aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/accessibility/speakup/speakup_dummy.c4
-rw-r--r--drivers/accessibility/speakup/speakup_soft.c32
-rw-r--r--drivers/accessibility/speakup/spk_types.h2
-rw-r--r--drivers/accessibility/speakup/varhandlers.c12
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/ac.c5
-rw-r--r--drivers/acpi/acpi_amba.c6
-rw-r--r--drivers/acpi/acpi_apd.c9
-rw-r--r--drivers/acpi/acpi_fpdt.c22
-rw-r--r--drivers/acpi/acpi_lpss.c69
-rw-r--r--drivers/acpi/acpi_pcc.c28
-rw-r--r--drivers/acpi/acpi_platform.c22
-rw-r--r--drivers/acpi/acpi_video.c114
-rw-r--r--drivers/acpi/apei/apei-base.c5
-rw-r--r--drivers/acpi/apei/bert.c3
-rw-r--r--drivers/acpi/apei/erst.c6
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/arm64/dma.c28
-rw-r--r--drivers/acpi/bus.c37
-rw-r--r--drivers/acpi/cppc_acpi.c45
-rw-r--r--drivers/acpi/device_pm.c53
-rw-r--r--drivers/acpi/dptf/Kconfig3
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/fan_core.c58
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/irq.c12
-rw-r--r--drivers/acpi/numa/hmat.c25
-rw-r--r--drivers/acpi/osi.c24
-rw-r--r--drivers/acpi/pci_root.c75
-rw-r--r--drivers/acpi/power.c11
-rw-r--r--drivers/acpi/processor_idle.c31
-rw-r--r--drivers/acpi/property.c13
-rw-r--r--drivers/acpi/resource.c62
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/scan.c179
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/thermal.c211
-rw-r--r--drivers/acpi/utils.c30
-rw-r--r--drivers/acpi/video_detect.c449
-rw-r--r--drivers/acpi/viot.c1
-rw-r--r--drivers/acpi/x86/apple.c1
-rw-r--r--drivers/acpi/x86/s2idle.c173
-rw-r--r--drivers/acpi/x86/utils.c33
-rw-r--r--drivers/amba/bus.c8
-rw-r--r--drivers/android/binder.c16
-rw-r--r--drivers/android/binder_alloc.c55
-rw-r--r--drivers/android/binder_alloc.h12
-rw-r--r--drivers/android/binderfs.c31
-rw-r--r--drivers/ata/Kconfig18
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci.h22
-rw-r--r--drivers/ata/ahci_da850.c47
-rw-r--r--drivers/ata/ahci_dm816.c4
-rw-r--r--drivers/ata/ahci_dwc.c493
-rw-r--r--drivers/ata/ahci_imx.c15
-rw-r--r--drivers/ata/ahci_mtk.c2
-rw-r--r--drivers/ata/ahci_platform.c5
-rw-r--r--drivers/ata/ahci_st.c4
-rw-r--r--drivers/ata/libahci.c63
-rw-r--r--drivers/ata/libahci_platform.c218
-rw-r--r--drivers/ata/libata-core.c68
-rw-r--r--drivers/ata/libata-eh.c38
-rw-r--r--drivers/ata/libata-sata.c30
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata-sff.c2
-rw-r--r--drivers/ata/libata.h7
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/auxdisplay/lcd2s.c3
-rw-r--r--drivers/base/arch_topology.c25
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/dd.c42
-rw-r--r--drivers/base/devcoredump.c83
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_loader/sysfs.c7
-rw-r--r--drivers/base/firmware_loader/sysfs.h5
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c12
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/domain.c6
-rw-r--r--drivers/base/power/runtime.c7
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/property.c4
-rw-r--r--drivers/base/regmap/regmap-mmio.c289
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c14
-rw-r--r--drivers/base/regmap/regmap-spi.c8
-rw-r--r--drivers/base/regmap/regmap.c167
-rw-r--r--drivers/base/regmap/trace.h61
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/aoe/aoeblk.c15
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_int.h1
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c12
-rw-r--r--drivers/block/nbd.c9
-rw-r--r--drivers/block/null_blk/main.c8
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rnbd/Makefile6
-rw-r--r--drivers/block/rnbd/rnbd-clt.c8
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c43
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h64
-rw-r--r--drivers/block/rnbd/rnbd-srv-trace.c17
-rw-r--r--drivers/block/rnbd/rnbd-srv-trace.h207
-rw-r--r--drivers/block/rnbd/rnbd-srv.c134
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/ublk_drv.c302
-rw-r--r--drivers/block/virtio_blk.c125
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btintel.c20
-rw-r--r--drivers/bluetooth/btusb.c38
-rw-r--r--drivers/bluetooth/hci_ldisc.c7
-rw-r--r--drivers/bluetooth/hci_serdev.c10
-rw-r--r--drivers/bus/hisi_lpc.c96
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/bus/mhi/host/pci_generic.c4
-rw-r--r--drivers/bus/mvebu-mbus.c26
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/core.c55
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c4
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c6
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/random.c139
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c6
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/dt-compat.c108
-rw-r--r--drivers/clk/at91/sama5d2.c10
-rw-r--r--drivers/clk/baikal-t1/Kconfig12
-rw-r--r--drivers/clk/baikal-t1/Makefile1
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c84
-rw-r--r--drivers/clk/baikal-t1/ccu-div.h17
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.h8
-rw-r--r--drivers/clk/baikal-t1/ccu-rst.c217
-rw-r--r--drivers/clk/baikal-t1/ccu-rst.h67
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c260
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c123
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c43
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c27
-rw-r--r--drivers/clk/berlin/bg2.c5
-rw-r--r--drivers/clk/berlin/bg2q.c6
-rw-r--r--drivers/clk/clk-asm9260.c29
-rw-r--r--drivers/clk/clk-ast2600.c2
-rw-r--r--drivers/clk/clk-cdce706.c3
-rw-r--r--drivers/clk/clk-cs2000-cp.c4
-rw-r--r--drivers/clk/clk-fixed-rate.c28
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/clk-lochnagar.c2
-rw-r--r--drivers/clk/clk-nomadik.c5
-rw-r--r--drivers/clk/clk-npcm7xx.c29
-rw-r--r--drivers/clk/clk-oxnas.c6
-rw-r--r--drivers/clk/clk-qoriq.c10
-rw-r--r--drivers/clk/clk-si514.c3
-rw-r--r--drivers/clk/clk-si5341.c4
-rw-r--r--drivers/clk/clk-si5351.c4
-rw-r--r--drivers/clk/clk-si570.c3
-rw-r--r--drivers/clk/clk-tps68470.c13
-rw-r--r--drivers/clk/clk-versaclock5.c167
-rw-r--r--drivers/clk/clk-versaclock7.c1309
-rw-r--r--drivers/clk/clk-xgene.c4
-rw-r--r--drivers/clk/clk.c34
-rw-r--r--drivers/clk/clkdev.c60
-rw-r--r--drivers/clk/davinci/Makefile4
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c3
-rw-r--r--drivers/clk/davinci/pll-dm644x.c81
-rw-r--r--drivers/clk/davinci/pll-dm646x.c85
-rw-r--r--drivers/clk/davinci/pll.c10
-rw-r--r--drivers/clk/davinci/pll.h6
-rw-r--r--drivers/clk/davinci/psc-dm644x.c85
-rw-r--r--drivers/clk/davinci/psc-dm646x.c82
-rw-r--r--drivers/clk/davinci/psc.c6
-rw-r--r--drivers/clk/davinci/psc.h6
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-composite-93.c171
-rw-r--r--drivers/clk/imx/clk-gate-93.c199
-rw-r--r--drivers/clk/imx/clk-imx6sx.c4
-rw-r--r--drivers/clk/imx/clk-imx8mp.c2
-rw-r--r--drivers/clk/imx/clk-imx93.c32
-rw-r--r--drivers/clk/imx/clk-scu.c6
-rw-r--r--drivers/clk/imx/clk.h9
-rw-r--r--drivers/clk/ingenic/tcu.c15
-rw-r--r--drivers/clk/mediatek/Kconfig87
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c12
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c2
-rw-r--r--drivers/clk/mediatek/clk-gate.c1
-rw-r--r--drivers/clk/mediatek/clk-mt2701-bdp.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2701-img.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2701-vdec.c36
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c33
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c34
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c27
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c29
-rw-r--r--drivers/clk/mediatek/clk-mt6795-apmixedsys.c157
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c151
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mfg.c50
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c132
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c160
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c610
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vdecsys.c55
-rw-r--r--drivers/clk/mediatek/clk-mt6795-vencsys.c50
-rw-r--r--drivers/clk/mediatek/clk-mt6797-img.c36
-rw-r--r--drivers/clk/mediatek/clk-mt6797-vdec.c36
-rw-r--r--drivers/clk/mediatek/clk-mt6797-venc.c36
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c35
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c27
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c28
-rw-r--r--drivers/clk/mediatek/clk-mt8192-cam.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-img.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-ipe.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mdp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mfg.c7
-rw-r--r--drivers/clk/mediatek/clk-mt8192-msdc.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-scp_adsp.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-vdec.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192-venc.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c234
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c16
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c6
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c46
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c7
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c17
-rw-r--r--drivers/clk/mediatek/clk-mt8365-apu.c55
-rw-r--r--drivers/clk/mediatek/clk-mt8365-cam.c57
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mfg.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c112
-rw-r--r--drivers/clk/mediatek/clk-mt8365-vdec.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8365-venc.c52
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c1155
-rw-r--r--drivers/clk/mediatek/clk-mtk.c52
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-mux.c38
-rw-r--r--drivers/clk/mediatek/clk-mux.h15
-rw-r--r--drivers/clk/mediatek/reset.c1
-rw-r--r--drivers/clk/meson/meson-aoclk.c5
-rw-r--r--drivers/clk/meson/meson-eeclk.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/microchip/Kconfig1
-rw-r--r--drivers/clk/microchip/Makefile1
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c290
-rw-r--r--drivers/clk/microchip/clk-mpfs.c384
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c113
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c6
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c2
-rw-r--r--drivers/clk/pistachio/clk.h4
-rw-r--r--drivers/clk/pxa/clk-pxa.c2
-rw-r--r--drivers/clk/qcom/Kconfig47
-rw-r--r--drivers/clk/qcom/Makefile5
-rw-r--r--drivers/clk/qcom/a53-pll.c4
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c33
-rw-r--r--drivers/clk/qcom/apss-ipq6018.c15
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c66
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h8
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c329
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c7
-rw-r--r--drivers/clk/qcom/clk-rpmh.c25
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c83
-rw-r--r--drivers/clk/qcom/dispcc-sm6115.c608
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c1829
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c330
-rw-r--r--drivers/clk/qcom/gcc-msm8909.c2731
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1020
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c552
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c436
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c56
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c6
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c20
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c400
-rw-r--r--drivers/clk/qcom/gcc-sm6115.c48
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c2
-rw-r--r--drivers/clk/qcom/gcc-sm6375.c3919
-rw-r--r--drivers/clk/qcom/gdsc.c35
-rw-r--r--drivers/clk/qcom/gdsc.h5
-rw-r--r--drivers/clk/qcom/gpucc-sc8280xp.c461
-rw-r--r--drivers/clk/qcom/kpss-xcc.c26
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c84
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c211
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c66
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c44
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7280.c33
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c454
-rw-r--r--drivers/clk/qcom/reset.c4
-rw-r--r--drivers/clk/qcom/reset.h1
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c21
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c2
-rw-r--r--drivers/clk/renesas/r9a09g011-cpg.c4
-rw-r--r--drivers/clk/rockchip/Kconfig7
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rv1126.c1138
-rw-r--r--drivers/clk/rockchip/clk.c27
-rw-r--r--drivers/clk/rockchip/clk.h36
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c6
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c207
-rw-r--r--drivers/clk/samsung/clk-exynos850.c682
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c401
-rw-r--r--drivers/clk/sprd/Kconfig6
-rw-r--r--drivers/clk/sprd/Makefile1
-rw-r--r--drivers/clk/sprd/common.c9
-rw-r--r--drivers/clk/sprd/ums512-clk.c2202
-rw-r--r--drivers/clk/st/clkgen-fsyn.c5
-rw-r--r--drivers/clk/st/clkgen-mux.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c28
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c9
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c1
-rw-r--r--drivers/clk/tegra/clk-tegra124.c1
-rw-r--r--drivers/clk/tegra/clk-tegra20.c1
-rw-r--r--drivers/clk/tegra/clk-tegra210.c1
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c9
-rw-r--r--drivers/clk/ti/clk.c6
-rw-r--r--drivers/clk/xilinx/Kconfig12
-rw-r--r--drivers/clk/xilinx/Makefile1
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c (renamed from drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c)35
-rw-r--r--drivers/clk/zynqmp/clkc.c19
-rw-r--r--drivers/clk/zynqmp/divider.c9
-rw-r--r--drivers/clk/zynqmp/pll.c31
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c83
-rw-r--r--drivers/clocksource/renesas-ostm.c2
-rw-r--r--drivers/clocksource/timer-gxp.c7
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c6
-rw-r--r--drivers/clocksource/timer-sun4i.c3
-rw-r--r--drivers/clocksource/timer-ti-dm.c681
-rw-r--r--drivers/comedi/comedi_fops.c8
-rw-r--r--drivers/counter/104-quad-8.c47
-rw-r--r--drivers/counter/Kconfig15
-rw-r--r--drivers/counter/Makefile1
-rw-r--r--drivers/counter/counter-chrdev.c137
-rw-r--r--drivers/counter/counter-core.c14
-rw-r--r--drivers/counter/counter-sysfs.c304
-rw-r--r--drivers/counter/ftm-quaddec.c1
-rw-r--r--drivers/counter/intel-qep.c1
-rw-r--r--drivers/counter/interrupt-cnt.c12
-rw-r--r--drivers/counter/microchip-tcb-capture.c1
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c1
-rw-r--r--drivers/counter/stm32-timer-cnt.c1
-rw-r--r--drivers/counter/ti-ecap-capture.c614
-rw-r--r--drivers/counter/ti-eqep.c1
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Kconfig.x8615
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c293
-rw-r--r--drivers/cpufreq/amd-pstate.c99
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c24
-rw-r--r--drivers/cpufreq/sti-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c35
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpuidle/coupled.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c4
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c7
-rw-r--r--drivers/cpuidle/governor.c11
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c16
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c6
-rw-r--r--drivers/crypto/aspeed/Kconfig48
-rw-r--r--drivers/crypto/aspeed/Makefile7
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c1133
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c1391
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c284
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h298
-rw-r--r--drivers/crypto/atmel-ecc.c6
-rw-r--r--drivers/crypto/atmel-sha204a.c6
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c4
-rw-r--r--drivers/crypto/bcm/cipher.h2
-rw-r--r--drivers/crypto/cavium/cpt/cpt_hw_types.h2
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c6
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c30
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/ccp/sev-dev.c78
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c2
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c250
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c216
-rw-r--r--drivers/crypto/hisilicon/qm.c906
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h34
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c456
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c160
-rw-r--r--drivers/crypto/hisilicon/zip/zip.h3
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c134
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c266
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c60
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c67
-rw-r--r--drivers/crypto/keembay/Kconfig4
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c24
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_main.c8
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c20
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen4_hw_data.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_debug.c2
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c56
-rw-r--r--drivers/crypto/qce/aead.c4
-rw-r--r--drivers/crypto/qce/sha.c8
-rw-r--r--drivers/crypto/qce/skcipher.c8
-rw-r--r--drivers/crypto/qcom-rng.c7
-rw-r--r--drivers/crypto/sahara.c22
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c4
-rw-r--r--drivers/dax/hmem/device.c1
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c7
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c8
-rw-r--r--drivers/dma-buf/dma-buf.c4
-rw-r--r--drivers/dma-buf/dma-fence.c22
-rw-r--r--drivers/dma-buf/dma-resv.c3
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c4
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c22
-rw-r--r--drivers/dma-buf/st-dma-fence.c16
-rw-r--r--drivers/dma-buf/st-dma-resv.c10
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/dma-buf/udmabuf.c11
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/apple-admac.c45
-rw-r--r--drivers/dma/at_xdmac.c5
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c12
-rw-r--r--drivers/dma/hisi_dma.c650
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/hsu/hsu.h12
-rw-r--r--drivers/dma/hsu/pci.c47
-rw-r--r--drivers/dma/idxd/device.c38
-rw-r--r--drivers/dma/idxd/idxd.h10
-rw-r--r--drivers/dma/idxd/init.c36
-rw-r--r--drivers/dma/idxd/irq.c13
-rw-r--r--drivers/dma/idxd/registers.h35
-rw-r--r--drivers/dma/idxd/sysfs.c187
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/mxs-dma.c11
-rw-r--r--drivers/dma/pl330.c4
-rw-r--r--drivers/dma/qcom/gpi.c7
-rw-r--r--drivers/dma/qcom/qcom_adm.c22
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c8
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/stm32-dma.c136
-rw-r--r--drivers/dma/stm32-dmamux.c12
-rw-r--r--drivers/dma/stm32-mdma.c70
-rw-r--r--drivers/dma/ti/edma.c40
-rw-r--r--drivers/dma/ti/k3-psil-j7200.c67
-rw-r--r--drivers/dma/ti/k3-psil-j721e.c79
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/ti/k3-udma.c37
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c21
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c14
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_module.h4
-rw-r--r--drivers/edac/i10nm_base.c287
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c28
-rw-r--r--drivers/edac/ppc4xx_edac.c23
-rw-r--r--drivers/edac/sb_edac.c148
-rw-r--r--drivers/edac/skx_base.c9
-rw-r--r--drivers/edac/skx_common.c26
-rw-r--r--drivers/edac/skx_common.h16
-rw-r--r--drivers/edac/wq.c1
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-rt8973a.c4
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c232
-rw-r--r--drivers/firmware/arm_ffa/bus.c4
-rw-r--r--drivers/firmware/arm_ffa/driver.c132
-rw-r--r--drivers/firmware/arm_scmi/clock.c6
-rw-r--r--drivers/firmware/arm_scmi/optee.c1
-rw-r--r--drivers/firmware/arm_scmi/reset.c10
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c46
-rw-r--r--drivers/firmware/arm_scmi/sensors.c25
-rw-r--r--drivers/firmware/efi/Kconfig45
-rw-r--r--drivers/firmware/efi/capsule-loader.c31
-rw-r--r--drivers/firmware/efi/dev-path-parser.c10
-rw-r--r--drivers/firmware/efi/efi-init.c61
-rw-r--r--drivers/firmware/efi/efi.c15
-rw-r--r--drivers/firmware/efi/efibc.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile41
-rw-r--r--drivers/firmware/efi/libstub/Makefile.zboot70
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c31
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c290
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c118
-rw-r--r--drivers/firmware/efi/libstub/efistub.h69
-rw-r--r--drivers/firmware/efi/libstub/fdt.c175
-rw-r--r--drivers/firmware/efi/libstub/file.c23
-rw-r--r--drivers/firmware/efi/libstub/intrinsics.c30
-rw-r--r--drivers/firmware/efi/libstub/loongarch-stub.c102
-rw-r--r--drivers/firmware/efi/libstub/mem.c93
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c25
-rw-r--r--drivers/firmware/efi/libstub/relocate.c21
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c8
-rw-r--r--drivers/firmware/efi/libstub/systable.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c41
-rw-r--r--drivers/firmware/efi/libstub/zboot-header.S143
-rw-r--r--drivers/firmware/efi/libstub/zboot.c302
-rw-r--r--drivers/firmware/efi/libstub/zboot.lds44
-rw-r--r--drivers/firmware/google/gsmi.c9
-rw-r--r--drivers/firmware/psci/psci.c130
-rw-r--r--drivers/firmware/qcom_scm.h2
-rw-r--r--drivers/firmware/sysfb.c4
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c13
-rw-r--r--drivers/firmware/xilinx/zynqmp.c31
-rw-r--r--drivers/fpga/dfl-pci.c18
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c11
-rw-r--r--drivers/fpga/microchip-spi.c1
-rw-r--r--drivers/fsi/fsi-core.c11
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c2
-rw-r--r--drivers/fsi/fsi-master.h2
-rw-r--r--drivers/fsi/fsi-occ.c66
-rw-r--r--drivers/fsi/fsi-sbefifo.c15
-rw-r--r--drivers/gnss/core.c4
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c15
-rw-r--r--drivers/gpio/gpio-104-idi-48.c15
-rw-r--r--drivers/gpio/gpio-104-idio-16.c23
-rw-r--r--drivers/gpio/gpio-adp5588.c4
-rw-r--r--drivers/gpio/gpio-exar.c40
-rw-r--r--drivers/gpio/gpio-f7188x.c275
-rw-r--r--drivers/gpio/gpio-ftgpio010.c22
-rw-r--r--drivers/gpio/gpio-imx-scu.c139
-rw-r--r--drivers/gpio/gpio-ixp4xx.c17
-rw-r--r--drivers/gpio/gpio-max7300.c4
-rw-r--r--drivers/gpio/gpio-mockup.c15
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c28
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-pca953x.c198
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-realtek-otto.c166
-rw-r--r--drivers/gpio/gpio-rockchip.c25
-rw-r--r--drivers/gpio/gpio-tc3589x.c8
-rw-r--r--drivers/gpio/gpio-tpic2810.c4
-rw-r--r--drivers/gpio/gpio-tqmx86.c4
-rw-r--r--drivers/gpio/gpio-twl4030.c26
-rw-r--r--drivers/gpio/gpio-ucb1400.c1
-rw-r--r--drivers/gpio/gpio-ws16c48.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.c53
-rw-r--r--drivers/gpio/gpiolib-acpi.h2
-rw-r--r--drivers/gpio/gpiolib-cdev.c23
-rw-r--r--drivers/gpio/gpiolib-of.c184
-rw-r--r--drivers/gpio/gpiolib.c132
-rw-r--r--drivers/gpu/drm/Kconfig49
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c1216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c162
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c281
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c372
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c220
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c511
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c303
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h (renamed from drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c343
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h771
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c89
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c171
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c109
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c181
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c439
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c776
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h1172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c682
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c305
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c443
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c290
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c395
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1884
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h69
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h15
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h1469
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h12086
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h44640
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h58
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c45
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c65
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c236
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c72
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c130
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h3
-rw-r--r--drivers/gpu/drm/arm/Kconfig4
-rw-r--r--drivers/gpu/drm/arm/display/Kconfig2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c12
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c77
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c9
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c8
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c35
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c6
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c8
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c2
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c10
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c28
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c9
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c42
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h6
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c48
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c4
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c31
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c42
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c12
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c74
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c4
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c9
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c13
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c94
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c119
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c56
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1149
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_aperture.c10
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c84
-rw-r--r--drivers/gpu/drm/drm_auth.c4
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_client.c4
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c135
-rw-r--r--drivers/gpu/drm/drm_crtc.c94
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c14
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c11
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c76
-rw-r--r--drivers/gpu/drm/drm_encoder.c75
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c (renamed from drivers/gpu/drm/drm_fb_cma_helper.c)67
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c101
-rw-r--r--drivers/gpu/drm/drm_file.c143
-rw-r--r--drivers/gpu/drm/drm_format_helper.c702
-rw-r--r--drivers/gpu/drm/drm_fourcc.c55
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c70
-rw-r--r--drivers/gpu/drm/drm_gem.c170
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c (renamed from drivers/gpu/drm/drm_gem_cma_helper.c)302
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c12
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c49
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c5
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c66
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c18
-rw-r--r--drivers/gpu/drm/drm_plane.c70
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c114
-rw-r--r--drivers/gpu/drm/drm_print.c48
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c64
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c7
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c18
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c102
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c54
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c30
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c70
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c6
-rw-r--r--drivers/gpu/drm/gma500/power.c166
-rw-r--r--drivers/gpu/drm/gma500/power.h18
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c73
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c52
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h25
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c62
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c30
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c34
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c15
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c3
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c12
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c392
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c293
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c383
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h418
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c326
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h81
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h270
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c1116
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h346
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c11
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3562
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h80
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c30
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c141
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h188
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c39
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c62
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c86
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c158
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c174
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c81
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c88
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c85
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c16
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c94
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c113
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c45
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c229
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h42
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c98
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c476
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c40
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c159
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c63
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c147
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h531
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c59
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h43
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c100
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c308
-rw-r--r--drivers/gpu/drm/i915/i915_pci.h6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h557
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h6
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c73
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c43
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h18
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h42
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c97
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h97
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c41
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c4
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pci_config.h7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3708
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h65
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c112
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c19
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c14
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c9
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c18
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c33
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c58
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c17
-rw-r--r--drivers/gpu/drm/kmb/Kconfig2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c6
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c15
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig4
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_crtc.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c10
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c11
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c3
-rw-r--r--drivers/gpu/drm/mcde/Kconfig2
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c8
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig11
-rw-r--r--drivers/gpu/drm/mediatek/Makefile2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2663
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp_reg.h350
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/Kconfig2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c24
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h7
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.h1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.h1
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c19
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c15
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c2
-rw-r--r--drivers/gpu/drm/mgag200/Makefile4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_bmc.c99
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h208
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c254
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c277
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c181
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c315
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c316
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c192
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c431
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c326
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c726
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_pll.c997
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c65
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c150
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c37
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h31
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c172
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c299
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c288
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c162
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c185
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c87
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c145
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c102
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c188
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c25
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c50
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h89
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c179
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h123
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c164
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c78
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h36
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c22
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c101
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c1
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig4
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c27
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.h1
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c25
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c39
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c43
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c13
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c198
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly507e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c85
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c1
-rw-r--r--drivers/gpu/drm/panel/Kconfig4
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c4
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c4
-rw-r--r--drivers/gpu/drm/panel/panel-dsi-cm.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ebbg-ft8719.c4
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c45
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c4
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c4
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c7
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c4
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c4
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c4
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c4
-rw-r--r--drivers/gpu/drm/panel/panel-khadas-ts050.c4
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c4
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c4
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c4
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c4
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c4
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c4
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c4
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c90
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c547
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c4
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c4
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c3
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c3
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c4
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig1
-rw-r--r--drivers/gpu/drm/panfrost/Makefile3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c249
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.h12
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c44
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h42
-rw-r--r--drivers/gpu/drm/pl111/Kconfig2
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c9
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c66
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c778
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h41
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c34
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c41
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h31
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c51
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c122
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c24
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h1
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/selftests/Makefile8
-rw-r--r--drivers/gpu/drm/selftests/drm_buddy_selftests.h15
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h68
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h28
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h40
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.c109
-rw-r--r--drivers/gpu/drm/selftests/drm_selftest.h41
-rw-r--r--drivers/gpu/drm/selftests/test-drm_buddy.c994
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c1141
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c668
-rw-r--r--drivers/gpu/drm/selftests/test-drm_format.c280
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.c32
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h52
-rw-r--r--drivers/gpu/drm/selftests/test-drm_rect.c223
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c21
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c4
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c21
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c263
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h9
-rw-r--r--drivers/gpu/drm/sprd/Kconfig2
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c15
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c6
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c7
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c18
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c18
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c3
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h1
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c11
-rw-r--r--drivers/gpu/drm/stm/ltdc.c17
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig28
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c26
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c64
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c27
-rw-r--r--drivers/gpu/drm/tegra/dc.c1
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/plane.c1
-rw-r--r--drivers/gpu/drm/tests/Makefile4
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c756
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c991
-rw-r--r--drivers/gpu/drm/tests/drm_damage_helper_test.c639
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c (renamed from drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c)89
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c370
-rw-r--r--drivers/gpu/drm/tests/drm_format_test.c359
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c (renamed from drivers/gpu/drm/selftests/test-drm_framebuffer.c)120
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c (renamed from drivers/gpu/drm/selftests/test-drm_mm.c)1253
-rw-r--r--drivers/gpu/drm/tests/drm_plane_helper_test.c (renamed from drivers/gpu/drm/selftests/test-drm_plane_helper.c)155
-rw-r--r--drivers/gpu/drm/tests/drm_rect_test.c214
-rw-r--r--drivers/gpu/drm/tidss/Kconfig2
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c28
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c6
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c10
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c14
-rw-r--r--drivers/gpu/drm/tiny/bochs.c14
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c19
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c12
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c7
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c7
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c7
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c7
-rw-r--r--drivers/gpu/drm/tiny/repaper.c42
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c594
-rw-r--r--drivers/gpu/drm/tiny/st7586.c17
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c172
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c64
-rw-r--r--drivers/gpu/drm/tve200/Kconfig2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c12
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c10
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c19
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_main.c128
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c49
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_perfmon.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c12
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c79
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c149
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h65
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c131
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c971
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h15
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c145
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c71
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c40
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c73
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c273
-rw-r--r--drivers/gpu/drm/via/Makefile2
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h349
-rw-r--r--drivers/gpu/drm/via/via_dma.c744
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c807
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h140
-rw-r--r--drivers/gpu/drm/via/via_dri1.c3630
-rw-r--r--drivers/gpu/drm/via/via_drv.c124
-rw-r--r--drivers/gpu/drm/via/via_drv.h229
-rw-r--r--drivers/gpu/drm/via/via_irq.c388
-rw-r--r--drivers/gpu/drm/via/via_map.c132
-rw-r--r--drivers/gpu/drm/via/via_mm.c241
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1110
-rw-r--r--drivers/gpu/drm/via/via_verifier.h62
-rw-r--r--drivers/gpu/drm/via/via_video.c94
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c53
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c65
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c40
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c21
-rw-r--r--drivers/gpu/drm/vkms/Makefile1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c314
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h33
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c286
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c56
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c1
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c13
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c8
-rw-r--r--drivers/hid/Kconfig100
-rw-r--r--drivers/hid/Makefile7
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c18
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c8
-rw-r--r--drivers/hid/hid-asus.c7
-rw-r--r--drivers/hid/hid-core.c22
-rw-r--r--drivers/hid/hid-google-hammer.c4
-rw-r--r--drivers/hid/hid-ids.h9
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-logitech-hidpp.c164
-rw-r--r--drivers/hid/hid-multitouch.c8
-rw-r--r--drivers/hid/hid-nintendo.c66
-rw-r--r--drivers/hid/hid-playstation.c16
-rw-r--r--drivers/hid/hid-pxrc.c112
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-rmi.c6
-rw-r--r--drivers/hid/hid-roccat.c4
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/hid-steam.c18
-rw-r--r--drivers/hid/hid-thrustmaster.c3
-rw-r--r--drivers/hid/hid-topre.c49
-rw-r--r--drivers/hid/hid-uclogic-core.c5
-rw-r--r--drivers/hid/hid-uclogic-params-test.c192
-rw-r--r--drivers/hid/hid-uclogic-params.c230
-rw-r--r--drivers/hid/hid-uclogic-params.h10
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c22
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c76
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h8
-rw-r--r--drivers/hid/hid-vivaldi-common.c29
-rw-r--r--drivers/hid/hid-vivaldi-common.h4
-rw-r--r--drivers/hid/hid-vivaldi.c4
-rw-r--r--drivers/hid/hid-vrc2.c91
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c68
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hid/wacom.h5
-rw-r--r--drivers/hid/wacom_sys.c11
-rw-r--r--drivers/hid/wacom_wac.c13
-rw-r--r--drivers/hid/wacom_wac.h4
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hsi/clients/nokia-modem.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c8
-rw-r--r--drivers/hv/connection.c33
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/vmbus_drv.c79
-rw-r--r--drivers/hwmon/Kconfig48
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/abituguru.c9
-rw-r--r--drivers/hwmon/abituguru3.c9
-rw-r--r--drivers/hwmon/acpi_power_meter.c11
-rw-r--r--drivers/hwmon/adc128d818.c6
-rw-r--r--drivers/hwmon/adm1021.c2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c2
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adm9240.c10
-rw-r--r--drivers/hwmon/adt7310.c2
-rw-r--r--drivers/hwmon/adt7410.c2
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/adt7470.c3
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/adt7x10.c7
-rw-r--r--drivers/hwmon/adt7x10.h5
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c181
-rw-r--r--drivers/hwmon/asb100.c8
-rw-r--r--drivers/hwmon/asc7621.c6
-rw-r--r--drivers/hwmon/asus-ec-sensors.c408
-rw-r--r--drivers/hwmon/asus_wmi_ec_sensors.c622
-rw-r--r--drivers/hwmon/axi-fan-control.c15
-rw-r--r--drivers/hwmon/corsair-psu.c32
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c14
-rw-r--r--drivers/hwmon/dme1737.c6
-rw-r--r--drivers/hwmon/emc1403.c12
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc2305.c620
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/f71882fg.c2453
-rw-r--r--drivers/hwmon/f75375s.c7
-rw-r--r--drivers/hwmon/fschmd.c8
-rw-r--r--drivers/hwmon/ftsteutates.c5
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/gpio-fan.c14
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hwmon.c14
-rw-r--r--drivers/hwmon/iio_hwmon.c8
-rw-r--r--drivers/hwmon/ina209.c4
-rw-r--r--drivers/hwmon/ina3221.c17
-rw-r--r--drivers/hwmon/it87.c8
-rw-r--r--drivers/hwmon/jc42.c5
-rw-r--r--drivers/hwmon/lm63.c6
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm80.c2
-rw-r--r--drivers/hwmon/lm83.c2
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/lm90.c10
-rw-r--r--drivers/hwmon/lm92.c2
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/lm95234.c2
-rw-r--r--drivers/hwmon/lm95241.c2
-rw-r--r--drivers/hwmon/lm95245.c2
-rw-r--r--drivers/hwmon/ltc2947-core.c24
-rw-r--r--drivers/hwmon/ltc2947-i2c.c2
-rw-r--r--drivers/hwmon/ltc2947-spi.c2
-rw-r--r--drivers/hwmon/max1619.c2
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/max31722.c8
-rw-r--r--drivers/hwmon/max31730.c10
-rw-r--r--drivers/hwmon/max31760.c596
-rw-r--r--drivers/hwmon/max31790.c38
-rw-r--r--drivers/hwmon/max6639.c8
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/mr75203.c447
-rw-r--r--drivers/hwmon/nct6683.c4
-rw-r--r--drivers/hwmon/nct6775-platform.c8
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c2
-rw-r--r--drivers/hwmon/nzxt-smart2.c1
-rw-r--r--drivers/hwmon/occ/common.c11
-rw-r--r--drivers/hwmon/occ/p8_i2c.c4
-rw-r--r--drivers/hwmon/occ/p9_sbe.c26
-rw-r--r--drivers/hwmon/pc87360.c1461
-rw-r--r--drivers/hwmon/pcf8591.c3
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/mp2888.c13
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c29
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c71
-rw-r--r--drivers/hwmon/pwm-fan.c320
-rw-r--r--drivers/hwmon/scpi-hwmon.c14
-rw-r--r--drivers/hwmon/sht4x.c2
-rw-r--r--drivers/hwmon/sis5595.c187
-rw-r--r--drivers/hwmon/smm665.c3
-rw-r--r--drivers/hwmon/smsc47m192.c2
-rw-r--r--drivers/hwmon/sparx5-temp.c19
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/thmc50.c2
-rw-r--r--drivers/hwmon/tmp102.c6
-rw-r--r--drivers/hwmon/tmp103.c8
-rw-r--r--drivers/hwmon/tmp108.c8
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/tps23861.c107
-rw-r--r--drivers/hwmon/via686a.c206
-rw-r--r--drivers/hwmon/vt8231.c198
-rw-r--r--drivers/hwmon/w83627ehf.c8
-rw-r--r--drivers/hwmon/w83627hf.c1600
-rw-r--r--drivers/hwmon/w83781d.c6
-rw-r--r--drivers/hwmon/w83791d.c8
-rw-r--r--drivers/hwmon/w83792d.c8
-rw-r--r--drivers/hwmon/w83793.c8
-rw-r--r--drivers/hwmon/w83795.c6
-rw-r--r--drivers/hwmon/w83l785ts.c8
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/hwtracing/Kconfig2
-rw-r--r--drivers/hwtracing/coresight/Kconfig4
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c213
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c34
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c29
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h74
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c40
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c48
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h3
-rw-r--r--drivers/hwtracing/ptt/Kconfig12
-rw-r--r--drivers/hwtracing/ptt/Makefile2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c1046
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.h200
-rw-r--r--drivers/i2c/busses/Kconfig14
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c27
-rw-r--r--drivers/i2c/busses/i2c-cadence.c20
-rw-r--r--drivers/i2c/busses/i2c-davinci.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-amdpsp.c67
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c37
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c24
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c7
-rw-r--r--drivers/i2c/busses/i2c-mchp-pci1xxxx.c1210
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c926
-rw-r--r--drivers/i2c/busses/i2c-riic.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c48
-rw-r--r--drivers/i2c/busses/i2c-tegra.c11
-rw-r--r--drivers/i2c/i2c-core-acpi.c42
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/i2c/i2c-core.h4
-rw-r--r--drivers/i2c/i2c-mux.c5
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/i2c/i2c-slave-testunit.c3
-rw-r--r--drivers/i2c/i2c-smbus.c3
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/idle/intel_idle.c53
-rw-r--r--drivers/iio/accel/Kconfig13
-rw-r--r--drivers/iio/accel/Makefile2
-rw-r--r--drivers/iio/accel/adxl313.h35
-rw-r--r--drivers/iio/accel/adxl313_core.c202
-rw-r--r--drivers/iio/accel/adxl313_i2c.c74
-rw-r--r--drivers/iio/accel/adxl313_spi.c63
-rw-r--r--drivers/iio/accel/adxl345_core.c7
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/accel/bma400.h14
-rw-r--r--drivers/iio/accel/bma400_core.c346
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c4
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c15
-rw-r--r--drivers/iio/accel/bmi088-accel-spi.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c6
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c4
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7455_i2c.c4
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c4
-rw-r--r--drivers/iio/accel/mma9551.c4
-rw-r--r--drivers/iio/accel/mma9553.c4
-rw-r--r--drivers/iio/accel/msa311.c1321
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/Kconfig47
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c27
-rw-r--r--drivers/iio/adc/ad7124.c15
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/ad7768-1.c17
-rw-r--r--drivers/iio/adc/ad7923.c11
-rw-r--r--drivers/iio/adc/ad799x.c4
-rw-r--r--drivers/iio/adc/ad9467.c17
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c714
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c8
-rw-r--r--drivers/iio/adc/ina2xx-adc.c4
-rw-r--r--drivers/iio/adc/ingenic-adc.c23
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c18
-rw-r--r--drivers/iio/adc/ltc2496.c9
-rw-r--r--drivers/iio/adc/ltc2497-core.c12
-rw-r--r--drivers/iio/adc/ltc2497.c81
-rw-r--r--drivers/iio/adc/ltc2497.h6
-rw-r--r--drivers/iio/adc/max11205.c183
-rw-r--r--drivers/iio/adc/max1363.c6
-rw-r--r--drivers/iio/adc/mcp3911.c285
-rw-r--r--drivers/iio/adc/mt6360-adc.c2
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c58
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c73
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c44
-rw-r--r--drivers/iio/adc/rtq6056.c661
-rw-r--r--drivers/iio/adc/stm32-adc-core.c59
-rw-r--r--drivers/iio/adc/stm32-adc-core.h31
-rw-r--r--drivers/iio/adc/stm32-adc.c128
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c14
-rw-r--r--drivers/iio/adc/ti-ads1015.c4
-rw-r--r--drivers/iio/adc/ti-ads131e08.c19
-rw-r--r--drivers/iio/adc/ti-tsc2046.c69
-rw-r--r--drivers/iio/adc/xilinx-ams.c15
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c18
-rw-r--r--drivers/iio/addac/Kconfig16
-rw-r--r--drivers/iio/addac/Makefile1
-rw-r--r--drivers/iio/addac/stx104.c (renamed from drivers/iio/adc/stx104.c)0
-rw-r--r--drivers/iio/cdc/Kconfig10
-rw-r--r--drivers/iio/cdc/Makefile1
-rw-r--r--drivers/iio/cdc/ad7746.c (renamed from drivers/staging/iio/cdc/ad7746.c)441
-rw-r--r--drivers/iio/chemical/atlas-sensor.c4
-rw-r--r--drivers/iio/chemical/ccs811.c4
-rw-r--r--drivers/iio/chemical/sgp30.c4
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c2
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5446.c4
-rw-r--r--drivers/iio/dac/ad5593r.c54
-rw-r--r--drivers/iio/dac/ad5696-i2c.c4
-rw-r--r--drivers/iio/dac/ds4424.c4
-rw-r--r--drivers/iio/dac/m62332.c4
-rw-r--r--drivers/iio/dac/mcp4725.c4
-rw-r--r--drivers/iio/dac/ti-dac5571.c4
-rw-r--r--drivers/iio/frequency/adf4371.c17
-rw-r--r--drivers/iio/frequency/admv1014.c3
-rw-r--r--drivers/iio/frequency/adrf6780.c16
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c4
-rw-r--r--drivers/iio/gyro/itg3200_core.c4
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c4
-rw-r--r--drivers/iio/health/max30102.c4
-rw-r--r--drivers/iio/humidity/hdc2010.c4
-rw-r--r--drivers/iio/imu/Kconfig1
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis16475.c15
-rw-r--r--drivers/iio/imu/bno055/Kconfig25
-rw-r--r--drivers/iio/imu/bno055/Makefile10
-rw-r--r--drivers/iio/imu/bno055/bno055.c1685
-rw-r--r--drivers/iio/imu/bno055/bno055.h13
-rw-r--r--drivers/iio/imu/bno055/bno055_i2c.c57
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c560
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.c14
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.h104
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/kmx61.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c7
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/industrialio-buffer.c5
-rw-r--r--drivers/iio/industrialio-core.c29
-rw-r--r--drivers/iio/industrialio-event.c14
-rw-r--r--drivers/iio/industrialio-trigger.c1
-rw-r--r--drivers/iio/inkern.c272
-rw-r--r--drivers/iio/light/Kconfig11
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/apds9300.c4
-rw-r--r--drivers/iio/light/apds9960.c4
-rw-r--r--drivers/iio/light/bh1750.c4
-rw-r--r--drivers/iio/light/bh1780.c4
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3232.c4
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/iio/light/cm36651.c4
-rw-r--r--drivers/iio/light/gp2ap002.c4
-rw-r--r--drivers/iio/light/gp2ap020a00f.c4
-rw-r--r--drivers/iio/light/isl29028.c4
-rw-r--r--drivers/iio/light/isl29125.c4
-rw-r--r--drivers/iio/light/jsa1212.c4
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/ltrf216a.c550
-rw-r--r--drivers/iio/light/opt3001.c6
-rw-r--r--drivers/iio/light/pa12203001.c4
-rw-r--r--drivers/iio/light/rpr0521.c4
-rw-r--r--drivers/iio/light/st_uvis25_core.c9
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c2
-rw-r--r--drivers/iio/light/st_uvis25_spi.c2
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2563.c4
-rw-r--r--drivers/iio/light/tsl2583.c4
-rw-r--r--drivers/iio/light/tsl4531.c4
-rw-r--r--drivers/iio/light/us5182d.c4
-rw-r--r--drivers/iio/light/vcnl4000.c4
-rw-r--r--drivers/iio/light/vcnl4035.c4
-rw-r--r--drivers/iio/light/veml6070.c4
-rw-r--r--drivers/iio/magnetometer/Kconfig4
-rw-r--r--drivers/iio/magnetometer/ak8974.c4
-rw-r--r--drivers/iio/magnetometer/ak8975.c4
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843.h13
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c8
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c6
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c14
-rw-r--r--drivers/iio/magnetometer/mag3110.c4
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c859
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/Kconfig6
-rw-r--r--drivers/iio/pressure/bmp280-core.c974
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c15
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c55
-rw-r--r--drivers/iio/pressure/bmp280-spi.c5
-rw-r--r--drivers/iio/pressure/bmp280.h164
-rw-r--r--drivers/iio/pressure/dlhl60d.c5
-rw-r--r--drivers/iio/pressure/dps310.c262
-rw-r--r--drivers/iio/pressure/icp10100.c10
-rw-r--r--drivers/iio/pressure/mpl3115.c4
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/st_pressure.h2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c70
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c5
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c5
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c4
-rw-r--r--drivers/iio/proximity/srf04.c10
-rw-r--r--drivers/iio/proximity/sx9310.c8
-rw-r--r--drivers/iio/proximity/sx9324.c8
-rw-r--r--drivers/iio/proximity/sx9360.c8
-rw-r--r--drivers/iio/proximity/sx9500.c4
-rw-r--r--drivers/iio/temperature/mlx90614.c45
-rw-r--r--drivers/iio/temperature/mlx90632.c65
-rw-r--r--drivers/iio/test/iio-test-rescale.c4
-rw-r--r--drivers/infiniband/core/cm.c104
-rw-r--r--drivers/infiniband/core/cma.c92
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/device.c4
-rw-r--r--drivers/infiniband/core/lag.c5
-rw-r--r--drivers/infiniband/core/sa_query.c235
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c5
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c8
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h289
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c11
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c13
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c17
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c15
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c35
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h3
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c167
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c220
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c51
-rw-r--r--drivers/infiniband/hw/irdma/type.h1
-rw-r--r--drivers/infiniband/hw/irdma/uk.c7
-rw-r--r--drivers/infiniband/hw/irdma/user.h1
-rw-r--r--drivers/infiniband/hw/irdma/utils.c18
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c82
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c57
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c31
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h49
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_icrc.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c61
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c106
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c53
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/sw/siw/Kconfig5
-rw-r--r--drivers/infiniband/sw/siw/siw.h1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c27
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c18
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile10
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h86
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c35
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h88
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c26
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h5
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c69
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c6
-rw-r--r--drivers/input/joystick/as5011.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c8
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/keyboard/adp5588-keys.c3
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c3
-rw-r--r--drivers/input/keyboard/lm8323.c4
-rw-r--r--drivers/input/keyboard/lm8333.c4
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c4
-rw-r--r--drivers/input/keyboard/qt1070.c4
-rw-r--r--drivers/input/keyboard/qt2160.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c2
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c4
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/misc/bma150.c4
-rw-r--r--drivers/input/misc/cma3000_d0x_i2c.c4
-rw-r--r--drivers/input/misc/pcf8574_keypad.c4
-rw-r--r--drivers/input/misc/rk805-pwrkey.c1
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/rmi4/rmi_smbus.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c4
-rw-r--r--drivers/input/touchscreen/cyttsp4_i2c.c4
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/input/touchscreen/goodix.c6
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c2
-rw-r--r--drivers/input/touchscreen/migor_ts.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c4
-rw-r--r--drivers/input/touchscreen/stmfts.c4
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c10
-rw-r--r--drivers/input/touchscreen/tsc2004.c4
-rw-r--r--drivers/interconnect/core.c10
-rw-r--r--drivers/interconnect/imx/imx.c4
-rw-r--r--drivers/interconnect/imx/imx.h2
-rw-r--r--drivers/interconnect/imx/imx8mm.c4
-rw-r--r--drivers/interconnect/imx/imx8mn.c4
-rw-r--r--drivers/interconnect/imx/imx8mp.c4
-rw-r--r--drivers/interconnect/imx/imx8mq.c4
-rw-r--r--drivers/interconnect/qcom/Kconfig2
-rw-r--r--drivers/interconnect/qcom/icc-common.c3
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c4
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c4
-rw-r--r--drivers/interconnect/qcom/msm8974.c4
-rw-r--r--drivers/interconnect/qcom/osm-l3.c4
-rw-r--r--drivers/interconnect/qcom/sm8450.c4
-rw-r--r--drivers/iommu/Kconfig16
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h10
-rw-r--r--drivers/iommu/amd/init.c47
-rw-r--r--drivers/iommu/amd/io_pgtable.c76
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c415
-rw-r--r--drivers/iommu/amd/iommu.c167
-rw-r--r--drivers/iommu/amd/iommu_v2.c2
-rw-r--r--drivers/iommu/apple-dart.c57
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c6
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c83
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c97
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c6
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/dma-iommu.h42
-rw-r--r--drivers/iommu/exynos-iommu.c9
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/cap_audit.c4
-rw-r--r--drivers/iommu/intel/iommu.c307
-rw-r--r--drivers/iommu/intel/iommu.h16
-rw-r--r--drivers/iommu/intel/irq_remapping.c6
-rw-r--r--drivers/iommu/intel/pasid.c12
-rw-r--r--drivers/iommu/intel/svm.c62
-rw-r--r--drivers/iommu/io-pgtable-arm.c71
-rw-r--r--drivers/iommu/io-pgtable-dart.c469
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu.c175
-rw-r--r--drivers/iommu/iova.c13
-rw-r--r--drivers/iommu/ipmmu-vmsa.c35
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c45
-rw-r--r--drivers/iommu/mtk_iommu_v1.c13
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/omap-iommu-debug.c6
-rw-r--r--drivers/iommu/omap-iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/s390-iommu.c15
-rw-r--r--drivers/iommu/sprd-iommu.c5
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c29
-rw-r--r--drivers/iommu/virtio-iommu.c41
-rw-r--r--drivers/ipack/devices/ipoctal.c2
-rw-r--r--drivers/ipack/ipack.c5
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-gic-v4.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c2
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/mISDN/l1oip.h1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c13
-rw-r--r--drivers/leds/flash/leds-as3645a.c4
-rw-r--r--drivers/leds/flash/leds-lm3601x.c13
-rw-r--r--drivers/leds/flash/leds-rt4505.c3
-rw-r--r--drivers/leds/leds-an30259a.c4
-rw-r--r--drivers/leds/leds-aw2013.c4
-rw-r--r--drivers/leds/leds-bd2802.c4
-rw-r--r--drivers/leds/leds-blinkm.c3
-rw-r--r--drivers/leds/leds-is31fl32xx.c4
-rw-r--r--drivers/leds/leds-lm3530.c3
-rw-r--r--drivers/leds/leds-lm3532.c4
-rw-r--r--drivers/leds/leds-lm355x.c4
-rw-r--r--drivers/leds/leds-lm3642.c3
-rw-r--r--drivers/leds/leds-lm3692x.c4
-rw-r--r--drivers/leds/leds-lm3697.c8
-rw-r--r--drivers/leds/leds-lp3944.c4
-rw-r--r--drivers/leds/leds-lp3952.c4
-rw-r--r--drivers/leds/leds-lp50xx.c4
-rw-r--r--drivers/leds/leds-lp5521.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-lp5562.c4
-rw-r--r--drivers/leds/leds-lp8501.c4
-rw-r--r--drivers/leds/leds-lp8860.c4
-rw-r--r--drivers/leds/leds-pca9532.c6
-rw-r--r--drivers/leds/leds-tca6507.c4
-rw-r--r--drivers/leds/leds-turris-omnia.c4
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c42
-rw-r--r--drivers/macintosh/ams/ams-i2c.c6
-rw-r--r--drivers/macintosh/therm_adt746x.c4
-rw-r--r--drivers/macintosh/therm_windtunnel.c10
-rw-r--r--drivers/macintosh/windfarm_ad7417_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c3
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/mailbox/apple-mailbox.c63
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c8
-rw-r--r--drivers/mailbox/imx-mailbox.c10
-rw-r--r--drivers/mailbox/mailbox-mpfs.c25
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c2
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/bset.c2
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/writeback.c78
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-verity-loadpin.c8
-rw-r--r--drivers/md/dm-verity-target.c16
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid10.c151
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c11
-rw-r--r--drivers/md/raid5.c147
-rw-r--r--drivers/md/raid5.h32
-rw-r--r--drivers/media/cec/i2c/ch7322.c4
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c4
-rw-r--r--drivers/media/common/Kconfig1
-rw-r--r--drivers/media/common/Makefile2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c14
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dvb.c4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c17
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c11
-rw-r--r--drivers/media/dvb-frontends/a8293.c3
-rw-r--r--drivers/media/dvb-frontends/af9013.c4
-rw-r--r--drivers/media/dvb-frontends/af9033.c4
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c3
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c4
-rw-r--r--drivers/media/dvb-frontends/dib8000.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c2
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c3
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c4
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c3
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c4
-rw-r--r--drivers/media/dvb-frontends/mn88472.c4
-rw-r--r--drivers/media/dvb-frontends/mn88473.c4
-rw-r--r--drivers/media/dvb-frontends/mxl692.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/si2165.c3
-rw-r--r--drivers/media/dvb-frontends/si2168.c4
-rw-r--r--drivers/media/dvb-frontends/sp2.c3
-rw-r--r--drivers/media/dvb-frontends/stv090x.c3
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c3
-rw-r--r--drivers/media/dvb-frontends/tc90522.c3
-rw-r--r--drivers/media/dvb-frontends/tda1002x.h2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c3
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/i2c/ad5820.c3
-rw-r--r--drivers/media/i2c/ad9389b.c3
-rw-r--r--drivers/media/i2c/adp1653.c4
-rw-r--r--drivers/media/i2c/adv7170.c3
-rw-r--r--drivers/media/i2c/adv7175.c3
-rw-r--r--drivers/media/i2c/adv7180.c4
-rw-r--r--drivers/media/i2c/adv7183.c3
-rw-r--r--drivers/media/i2c/adv7343.c4
-rw-r--r--drivers/media/i2c/adv7393.c4
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c4
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c7
-rw-r--r--drivers/media/i2c/adv7604.c7
-rw-r--r--drivers/media/i2c/adv7842.c7
-rw-r--r--drivers/media/i2c/ak7375.c4
-rw-r--r--drivers/media/i2c/ak881x.c4
-rw-r--r--drivers/media/i2c/ar0521.c3
-rw-r--r--drivers/media/i2c/bt819.c3
-rw-r--r--drivers/media/i2c/bt856.c3
-rw-r--r--drivers/media/i2c/bt866.c3
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c4
-rw-r--r--drivers/media/i2c/cs3308.c3
-rw-r--r--drivers/media/i2c/cs5345.c3
-rw-r--r--drivers/media/i2c/cs53l32a.c3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/i2c/cx25840/cx25840-ir.c2
-rw-r--r--drivers/media/i2c/dw9714.c4
-rw-r--r--drivers/media/i2c/dw9768.c4
-rw-r--r--drivers/media/i2c/dw9807-vcm.c4
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c4
-rw-r--r--drivers/media/i2c/hi556.c4
-rw-r--r--drivers/media/i2c/hi846.c4
-rw-r--r--drivers/media/i2c/hi847.c4
-rw-r--r--drivers/media/i2c/imx208.c4
-rw-r--r--drivers/media/i2c/imx214.c4
-rw-r--r--drivers/media/i2c/imx219.c4
-rw-r--r--drivers/media/i2c/imx258.c4
-rw-r--r--drivers/media/i2c/imx274.c3
-rw-r--r--drivers/media/i2c/imx290.c4
-rw-r--r--drivers/media/i2c/imx319.c4
-rw-r--r--drivers/media/i2c/imx334.c4
-rw-r--r--drivers/media/i2c/imx335.c4
-rw-r--r--drivers/media/i2c/imx355.c4
-rw-r--r--drivers/media/i2c/imx412.c4
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c4
-rw-r--r--drivers/media/i2c/isl7998x.c4
-rw-r--r--drivers/media/i2c/ks0127.c3
-rw-r--r--drivers/media/i2c/lm3560.c4
-rw-r--r--drivers/media/i2c/lm3646.c4
-rw-r--r--drivers/media/i2c/m52790.c3
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c4
-rw-r--r--drivers/media/i2c/max2175.c4
-rw-r--r--drivers/media/i2c/max9286.c4
-rw-r--r--drivers/media/i2c/ml86v7667.c4
-rw-r--r--drivers/media/i2c/msp3400-driver.c3
-rw-r--r--drivers/media/i2c/mt9m001.c4
-rw-r--r--drivers/media/i2c/mt9m032.c3
-rw-r--r--drivers/media/i2c/mt9m111.c4
-rw-r--r--drivers/media/i2c/mt9p031.c4
-rw-r--r--drivers/media/i2c/mt9t001.c3
-rw-r--r--drivers/media/i2c/mt9t112.c4
-rw-r--r--drivers/media/i2c/mt9v011.c4
-rw-r--r--drivers/media/i2c/mt9v032.c4
-rw-r--r--drivers/media/i2c/mt9v111.c4
-rw-r--r--drivers/media/i2c/noon010pc30.c4
-rw-r--r--drivers/media/i2c/og01a1b.c4
-rw-r--r--drivers/media/i2c/ov02a10.c4
-rw-r--r--drivers/media/i2c/ov08d10.c4
-rw-r--r--drivers/media/i2c/ov13858.c4
-rw-r--r--drivers/media/i2c/ov13b10.c4
-rw-r--r--drivers/media/i2c/ov2640.c3
-rw-r--r--drivers/media/i2c/ov2659.c4
-rw-r--r--drivers/media/i2c/ov2680.c4
-rw-r--r--drivers/media/i2c/ov2685.c4
-rw-r--r--drivers/media/i2c/ov2740.c4
-rw-r--r--drivers/media/i2c/ov5640.c4
-rw-r--r--drivers/media/i2c/ov5645.c4
-rw-r--r--drivers/media/i2c/ov5647.c4
-rw-r--r--drivers/media/i2c/ov5648.c4
-rw-r--r--drivers/media/i2c/ov5670.c4
-rw-r--r--drivers/media/i2c/ov5675.c4
-rw-r--r--drivers/media/i2c/ov5693.c4
-rw-r--r--drivers/media/i2c/ov5695.c4
-rw-r--r--drivers/media/i2c/ov6650.c3
-rw-r--r--drivers/media/i2c/ov7251.c4
-rw-r--r--drivers/media/i2c/ov7640.c4
-rw-r--r--drivers/media/i2c/ov7670.c3
-rw-r--r--drivers/media/i2c/ov772x.c4
-rw-r--r--drivers/media/i2c/ov7740.c3
-rw-r--r--drivers/media/i2c/ov8856.c4
-rw-r--r--drivers/media/i2c/ov8865.c4
-rw-r--r--drivers/media/i2c/ov9282.c4
-rw-r--r--drivers/media/i2c/ov9640.c4
-rw-r--r--drivers/media/i2c/ov9650.c4
-rw-r--r--drivers/media/i2c/ov9734.c4
-rw-r--r--drivers/media/i2c/rdacm20.c4
-rw-r--r--drivers/media/i2c/rdacm21.c4
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c4
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c4
-rw-r--r--drivers/media/i2c/s5k4ecgx.c4
-rw-r--r--drivers/media/i2c/s5k5baf.c4
-rw-r--r--drivers/media/i2c/s5k6a3.c3
-rw-r--r--drivers/media/i2c/s5k6aa.c4
-rw-r--r--drivers/media/i2c/saa6588.c4
-rw-r--r--drivers/media/i2c/saa6752hs.c3
-rw-r--r--drivers/media/i2c/saa7110.c3
-rw-r--r--drivers/media/i2c/saa7115.c3
-rw-r--r--drivers/media/i2c/saa7127.c3
-rw-r--r--drivers/media/i2c/saa717x.c3
-rw-r--r--drivers/media/i2c/saa7185.c3
-rw-r--r--drivers/media/i2c/sony-btf-mpx.c4
-rw-r--r--drivers/media/i2c/sr030pc30.c3
-rw-r--r--drivers/media/i2c/st-mipid02.c4
-rw-r--r--drivers/media/i2c/tc358743.c6
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/i2c/tda7432.c3
-rw-r--r--drivers/media/i2c/tda9840.c3
-rw-r--r--drivers/media/i2c/tea6415c.c3
-rw-r--r--drivers/media/i2c/tea6420.c3
-rw-r--r--drivers/media/i2c/ths7303.c4
-rw-r--r--drivers/media/i2c/ths8200.c4
-rw-r--r--drivers/media/i2c/tlv320aic23b.c3
-rw-r--r--drivers/media/i2c/tvaudio.c3
-rw-r--r--drivers/media/i2c/tvp514x.c3
-rw-r--r--drivers/media/i2c/tvp5150.c4
-rw-r--r--drivers/media/i2c/tvp7002.c3
-rw-r--r--drivers/media/i2c/tw2804.c3
-rw-r--r--drivers/media/i2c/tw9903.c3
-rw-r--r--drivers/media/i2c/tw9906.c3
-rw-r--r--drivers/media/i2c/tw9910.c4
-rw-r--r--drivers/media/i2c/uda1342.c3
-rw-r--r--drivers/media/i2c/upd64031a.c3
-rw-r--r--drivers/media/i2c/upd64083.c3
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/i2c/vp27smpx.c3
-rw-r--r--drivers/media/i2c/vpx3220.c4
-rw-r--r--drivers/media/i2c/vs6624.c3
-rw-r--r--drivers/media/i2c/wm8739.c3
-rw-r--r--drivers/media/i2c/wm8775.c3
-rw-r--r--drivers/media/pci/Kconfig4
-rw-r--r--drivers/media/pci/Makefile6
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c3
-rw-r--r--drivers/media/pci/cx23885/cx23888-ir.c2
-rw-r--r--drivers/media/pci/cx88/cx88-dsp.c2
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c43
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c2
-rw-r--r--drivers/media/pci/ngene/ngene.h78
-rw-r--r--drivers/media/pci/pt3/pt3.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c2
-rw-r--r--drivers/media/pci/zoran/Kconfig (renamed from drivers/staging/media/zoran/Kconfig)0
-rw-r--r--drivers/media/pci/zoran/Makefile (renamed from drivers/staging/media/zoran/Makefile)0
-rw-r--r--drivers/media/pci/zoran/videocodec.c (renamed from drivers/staging/media/zoran/videocodec.c)7
-rw-r--r--drivers/media/pci/zoran/videocodec.h (renamed from drivers/staging/media/zoran/videocodec.h)190
-rw-r--r--drivers/media/pci/zoran/zoran.h (renamed from drivers/staging/media/zoran/zoran.h)30
-rw-r--r--drivers/media/pci/zoran/zoran_card.c (renamed from drivers/staging/media/zoran/zoran_card.c)56
-rw-r--r--drivers/media/pci/zoran/zoran_card.h (renamed from drivers/staging/media/zoran/zoran_card.h)9
-rw-r--r--drivers/media/pci/zoran/zoran_device.c (renamed from drivers/staging/media/zoran/zoran_device.c)37
-rw-r--r--drivers/media/pci/zoran/zoran_device.h60
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c (renamed from drivers/staging/media/zoran/zoran_driver.c)59
-rw-r--r--drivers/media/pci/zoran/zr36016.c (renamed from drivers/staging/media/zoran/zr36016.c)142
-rw-r--r--drivers/media/pci/zoran/zr36016.h (renamed from drivers/staging/media/zoran/zr36016.h)0
-rw-r--r--drivers/media/pci/zoran/zr36050.c (renamed from drivers/staging/media/zoran/zr36050.c)182
-rw-r--r--drivers/media/pci/zoran/zr36050.h (renamed from drivers/staging/media/zoran/zr36050.h)0
-rw-r--r--drivers/media/pci/zoran/zr36057.h (renamed from drivers/staging/media/zoran/zr36057.h)130
-rw-r--r--drivers/media/pci/zoran/zr36060.c (renamed from drivers/staging/media/zoran/zr36060.c)7
-rw-r--r--drivers/media/pci/zoran/zr36060.h (renamed from drivers/staging/media/zoran/zr36060.h)86
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c1
-rw-r--r--drivers/media/platform/amphion/vdec.c16
-rw-r--r--drivers/media/platform/amphion/venc.c2
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_core.c84
-rw-r--r--drivers/media/platform/amphion/vpu_core.h1
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c9
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c2
-rw-r--r--drivers/media/platform/intel/pxa_camera.c8
-rw-r--r--drivers/media/platform/marvell/mcam-core.h2
-rw-r--r--drivers/media/platform/mediatek/Kconfig1
-rw-r--r--drivers/media/platform/mediatek/Makefile1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c1
-rw-r--r--drivers/media/platform/mediatek/mdp3/Kconfig21
-rw-r--r--drivers/media/platform/mediatek/mdp3/Makefile6
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h19
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h65
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h39
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h47
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h55
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h290
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c466
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h43
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c1033
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h186
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c357
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h94
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c724
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h48
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c735
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h373
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c313
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h78
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c19
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c23
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c200
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h24
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_vpu_if.c76
-rw-r--r--drivers/media/platform/nxp/Kconfig13
-rw-r--r--drivers/media/platform/nxp/Makefile2
-rw-r--r--drivers/media/platform/nxp/dw100/Kconfig16
-rw-r--r--drivers/media/platform/nxp/dw100/Makefile3
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c1707
-rw-r--r--drivers/media/platform/nxp/dw100/dw100_regs.h117
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c101
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c12
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c5
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c2
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.h2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c31
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c5
-rw-r--r--drivers/media/platform/ti/cal/cal.c139
-rw-r--r--drivers/media/platform/ti/cal/cal.h7
-rw-r--r--drivers/media/platform/ti/davinci/Kconfig49
-rw-r--r--drivers/media/platform/ti/davinci/Makefile4
-rw-r--r--drivers/media/platform/ti/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/ti/davinci/vpif.h60
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.h6
-rw-r--r--drivers/media/platform/ti/omap/omap_voutlib.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/platform/verisilicon/Kconfig (renamed from drivers/staging/media/hantro/Kconfig)6
-rw-r--r--drivers/media/platform/verisilicon/Makefile (renamed from drivers/staging/media/hantro/Makefile)0
-rw-r--r--drivers/media/platform/verisilicon/hantro.h (renamed from drivers/staging/media/hantro/hantro.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c (renamed from drivers/staging/media/hantro/hantro_drv.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1.c (renamed from drivers/staging/media/hantro/hantro_g1.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_h264_dec.c (renamed from drivers/staging/media/hantro/hantro_g1_h264_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c (renamed from drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h (renamed from drivers/staging/media/hantro/hantro_g1_regs.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c (renamed from drivers/staging/media/hantro/hantro_g1_vp8_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c (renamed from drivers/staging/media/hantro/hantro_g2.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c (renamed from drivers/staging/media/hantro/hantro_g2_hevc_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h (renamed from drivers/staging/media/hantro/hantro_g2_regs.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c (renamed from drivers/staging/media/hantro/hantro_g2_vp9_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c (renamed from drivers/staging/media/hantro/hantro_h1_jpeg_enc.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_regs.h (renamed from drivers/staging/media/hantro/hantro_h1_regs.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c (renamed from drivers/staging/media/hantro/hantro_h264.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c (renamed from drivers/staging/media/hantro/hantro_hevc.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h (renamed from drivers/staging/media/hantro/hantro_hw.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.c (renamed from drivers/staging/media/hantro/hantro_jpeg.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.h (renamed from drivers/staging/media/hantro/hantro_jpeg.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_mpeg2.c (renamed from drivers/staging/media/hantro/hantro_mpeg2.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c (renamed from drivers/staging/media/hantro/hantro_postproc.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c (renamed from drivers/staging/media/hantro/hantro_v4l2.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.h (renamed from drivers/staging/media/hantro/hantro_v4l2.h)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp8.c (renamed from drivers/staging/media/hantro/hantro_vp8.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.c (renamed from drivers/staging/media/hantro/hantro_vp9.c)0
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.h (renamed from drivers/staging/media/hantro/hantro_vp9.h)0
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c (renamed from drivers/staging/media/hantro/imx8m_vpu_hw.c)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c (renamed from drivers/staging/media/hantro/rockchip_vpu2_hw_h264_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c (renamed from drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c (renamed from drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c (renamed from drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_regs.h (renamed from drivers/staging/media/hantro/rockchip_vpu2_regs.h)0
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c (renamed from drivers/staging/media/hantro/rockchip_vpu_hw.c)0
-rw-r--r--drivers/media/platform/verisilicon/sama5d4_vdec_hw.c (renamed from drivers/staging/media/hantro/sama5d4_vdec_hw.c)0
-rw-r--r--drivers/media/platform/verisilicon/sunxi_vpu_hw.c (renamed from drivers/staging/media/hantro/sunxi_vpu_hw.c)0
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c9
-rw-r--r--drivers/media/radio/radio-tea5764.c3
-rw-r--r--drivers/media/radio/saa7706h.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c3
-rw-r--r--drivers/media/radio/si4713/si4713.c4
-rw-r--r--drivers/media/radio/tef6862.c3
-rw-r--r--drivers/media/rc/mceusb.c35
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_demod.c4
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_tuner.c4
-rw-r--r--drivers/media/test-drivers/vim2m.c2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c14
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c4
-rw-r--r--drivers/media/tuners/e4000.c4
-rw-r--r--drivers/media/tuners/fc2580.c3
-rw-r--r--drivers/media/tuners/m88rs6000t.c4
-rw-r--r--drivers/media/tuners/mt2060.c4
-rw-r--r--drivers/media/tuners/mxl301rf.c3
-rw-r--r--drivers/media/tuners/qm1d1b0004.c3
-rw-r--r--drivers/media/tuners/qm1d1c0042.c3
-rw-r--r--drivers/media/tuners/si2157.c4
-rw-r--r--drivers/media/tuners/tda18212.c4
-rw-r--r--drivers/media/tuners/tda18250.c4
-rw-r--r--drivers/media/tuners/tua9001.c3
-rw-r--r--drivers/media/usb/Kconfig3
-rw-r--r--drivers/media/usb/Makefile3
-rw-r--r--drivers/media/usb/airspy/airspy.c6
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c22
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/go7007/s2250-board.c3
-rw-r--r--drivers/media/usb/gspca/finepix.c2
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c118
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c309
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c14
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h147
-rw-r--r--drivers/media/v4l2-core/tuner-core.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c86
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c62
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c218
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c28
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c6
-rw-r--r--drivers/memory/Kconfig9
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_memc.c301
-rw-r--r--drivers/memory/dfl-emif.c62
-rw-r--r--drivers/memory/mtk-smi.c109
-rw-r--r--drivers/memory/of_memory.c2
-rw-r--r--drivers/memory/pl353-smc.c1
-rw-r--r--drivers/message/fusion/mptctl.c6
-rw-r--r--drivers/mfd/88pm800.c4
-rw-r--r--drivers/mfd/88pm805.c4
-rw-r--r--drivers/mfd/88pm860x-core.c3
-rw-r--r--drivers/mfd/Kconfig65
-rw-r--r--drivers/mfd/Makefile15
-rw-r--r--drivers/mfd/acer-ec-a500.c4
-rw-r--r--drivers/mfd/arizona-i2c.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c4
-rw-r--r--drivers/mfd/da903x.c3
-rw-r--r--drivers/mfd/da9052-i2c.c3
-rw-r--r--drivers/mfd/da9055-i2c.c4
-rw-r--r--drivers/mfd/da9062-core.c5
-rw-r--r--drivers/mfd/da9150-core.c4
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/ene-kb3930.c4
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c34
-rw-r--r--drivers/mfd/gateworks-gsc.c4
-rw-r--r--drivers/mfd/htc-i2cpld.c60
-rw-r--r--drivers/mfd/intel-lpss-pci.c141
-rw-r--r--drivers/mfd/intel-m10-bmc.c1
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c8
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c160
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h25
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c139
-rw-r--r--drivers/mfd/iqs62x.c4
-rw-r--r--drivers/mfd/lm3533-core.c4
-rw-r--r--drivers/mfd/lp8788-irq.c3
-rw-r--r--drivers/mfd/lp8788.c15
-rw-r--r--drivers/mfd/lpc_ich.c2
-rw-r--r--drivers/mfd/madera-i2c.c4
-rw-r--r--drivers/mfd/max14577.c4
-rw-r--r--drivers/mfd/max77693.c4
-rw-r--r--drivers/mfd/max8907.c4
-rw-r--r--drivers/mfd/max8925-i2c.c3
-rw-r--r--drivers/mfd/mc13xxx-i2c.c3
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c9
-rw-r--r--drivers/mfd/mt6370.c312
-rw-r--r--drivers/mfd/mt6370.h99
-rw-r--r--drivers/mfd/ntxec.c4
-rw-r--r--drivers/mfd/ocelot-core.c161
-rw-r--r--drivers/mfd/ocelot-spi.c300
-rw-r--r--drivers/mfd/ocelot.h49
-rw-r--r--drivers/mfd/palmas.c4
-rw-r--r--drivers/mfd/pcf50633-core.c4
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c1
-rw-r--r--drivers/mfd/retu-mfd.c4
-rw-r--r--drivers/mfd/rk808.c20
-rw-r--r--drivers/mfd/rn5t618.c4
-rw-r--r--drivers/mfd/rsmu_i2c.c4
-rw-r--r--drivers/mfd/rt4831.c4
-rw-r--r--drivers/mfd/rt5120.c124
-rw-r--r--drivers/mfd/si476x-i2c.c4
-rw-r--r--drivers/mfd/sm501.c7
-rw-r--r--drivers/mfd/stmfx.c4
-rw-r--r--drivers/mfd/stmpe-i2c.c4
-rw-r--r--drivers/mfd/stmpe.c49
-rw-r--r--drivers/mfd/syscon.c8
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/tps6105x.c4
-rw-r--r--drivers/mfd/tps65010.c3
-rw-r--r--drivers/mfd/tps65086.c4
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/tps6586x.c3
-rw-r--r--drivers/mfd/tps65912-i2c.c4
-rw-r--r--drivers/mfd/twl-core.c5
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6040.c4
-rw-r--r--drivers/mfd/wm8994-core.c4
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c3
-rw-r--r--drivers/misc/altera-stapl/altera.c8
-rw-r--r--drivers/misc/apds9802als.c3
-rw-r--r--drivers/misc/apds990x.c3
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_dev.c8
-rw-r--r--drivers/misc/bh1770glc.c4
-rw-r--r--drivers/misc/ds1682.c3
-rw-r--r--drivers/misc/eeprom/at24.c4
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/eeprom/eeprom.c6
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/eeprom/max6875.c4
-rw-r--r--drivers/misc/fastrpc.c16
-rw-r--r--drivers/misc/habanalabs/Kconfig1
-rw-r--r--drivers/misc/habanalabs/Makefile8
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c127
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c75
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c35
-rw-r--r--drivers/misc/habanalabs/common/device.c147
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c184
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h171
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c44
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c123
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c4
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c24
-rw-r--r--drivers/misc/habanalabs/common/memory.c57
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c10
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c31
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c10
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c185
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2.c671
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2P.h10
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_masks.h21
-rw-r--r--drivers/misc/habanalabs/gaudi2/gaudi2_security.c26
-rw-r--r--drivers/misc/habanalabs/goya/goya.c62
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h103
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h37
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h185
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h57
-rw-r--r--drivers/misc/hmc6352.c3
-rw-r--r--drivers/misc/ibmvmc.c6
-rw-r--r--drivers/misc/ics932s401.c7
-rw-r--r--drivers/misc/isl29003.c3
-rw-r--r--drivers/misc/isl29020.c3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c3
-rw-r--r--drivers/misc/lkdtm/cfi.c15
-rw-r--r--drivers/misc/lkdtm/fortify.c96
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/Kconfig13
-rw-r--r--drivers/misc/mchp_pci1xxxx/Makefile1
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c165
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h28
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c427
-rw-r--r--drivers/misc/mei/bus-fixup.c108
-rw-r--r--drivers/misc/mei/client.c16
-rw-r--r--drivers/misc/mei/debugfs.c19
-rw-r--r--drivers/misc/mei/gsc-me.c78
-rw-r--r--drivers/misc/mei/hbm.c14
-rw-r--r--drivers/misc/mei/hw-me-regs.h9
-rw-r--r--drivers/misc/mei/hw-me.c138
-rw-r--r--drivers/misc/mei/hw-me.h17
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mei/hw.h7
-rw-r--r--drivers/misc/mei/init.c35
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mei/mei_dev.h35
-rw-r--r--drivers/misc/mei/mkhi.h55
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c34
-rw-r--r--drivers/misc/sgi-xp/xp.h4
-rw-r--r--drivers/misc/tsl2550.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c16
-rw-r--r--drivers/misc/xilinx_sdfec.c3
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/sd.c49
-rw-r--r--drivers/mmc/core/sdio.c4
-rw-r--r--drivers/mmc/core/sdio_irq.c4
-rw-r--r--drivers/mmc/core/sdio_uart.c4
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/au1xmmc.c3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c84
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c4
-rw-r--r--drivers/mmc/host/mmc_hsq.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c17
-rw-r--r--drivers/mmc/host/mtk-sd.c109
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c7
-rw-r--r--drivers/mmc/host/sdhci-sprd.c6
-rw-r--r--drivers/mmc/host/sdhci.c88
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_am654.c5
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c5
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/devices/docg3.c21
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/inftlcore.c6
-rw-r--r--drivers/mtd/maps/physmap-core.c3
-rw-r--r--drivers/mtd/maps/pismo.c4
-rw-r--r--drivers/mtd/mtdchar.c139
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c33
-rw-r--r--drivers/mtd/mtdpstore.c2
-rw-r--r--drivers/mtd/mtdswap.c6
-rw-r--r--drivers/mtd/nand/bbt.c7
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c16
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig24
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Kconfig49
-rw-r--r--drivers/mtd/nand/raw/brcmnand/Makefile8
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c28
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c28
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c8
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c90
-rw-r--r--drivers/mtd/nand/raw/nand_base.c15
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c8
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c17
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c13
-rw-r--r--drivers/mtd/nand/spi/core.c10
-rw-r--r--drivers/mtd/nftlcore.c6
-rw-r--r--drivers/mtd/parsers/Kconfig16
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/brcm_u-boot.c84
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c4
-rw-r--r--drivers/mtd/sm_ftl.c4
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c8
-rw-r--r--drivers/mtd/tests/readtest.c2
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c2
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c5
-rw-r--r--drivers/net/bonding/bond_main.c94
-rw-r--r--drivers/net/bonding/bond_sysfs.c106
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c28
-rw-r--r--drivers/net/can/c_can/c_can.h17
-rw-r--r--drivers/net/can/c_can/c_can_main.c11
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c3
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c1
-rw-r--r--drivers/net/can/dev/rx-offload.c4
-rw-r--r--drivers/net/can/dev/skb.c113
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c69
-rw-r--r--drivers/net/can/flexcan/flexcan.h20
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c7
-rw-r--r--drivers/net/can/m_can/m_can.c3
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c26
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c38
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h2
-rw-r--r--drivers/net/can/usb/gs_usb.c682
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c20
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/can/vxcan.c8
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c2
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c2
-rw-r--r--drivers/net/dsa/b53/b53_srab.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c136
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c99
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h7
-rw-r--r--drivers/net/dsa/lan9303-core.c34
-rw-r--r--drivers/net/dsa/lan9303_i2c.c8
-rw-r--r--drivers/net/dsa/lan9303_mdio.c3
-rw-r--r--drivers/net/dsa/lantiq_gswip.c10
-rw-r--r--drivers/net/dsa/microchip/ksz8.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c111
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c2
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c110
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h5
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1073
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h136
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c15
-rw-r--r--drivers/net/dsa/microchip/lan937x.h6
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c118
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h18
-rw-r--r--drivers/net/dsa/mt7530.c71
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c39
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c19
-rw-r--r--drivers/net/dsa/ocelot/felix.c255
-rw-r--r--drivers/net/dsa/ocelot/felix.h16
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c845
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c518
-rw-r--r--drivers/net/dsa/qca/ar9331.c2
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c23
-rw-r--r--drivers/net/dsa/qca/qca8k.h3
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c2
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c5
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c2
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c8
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c4
-rw-r--r--drivers/net/ethernet/adi/Kconfig28
-rw-r--r--drivers/net/ethernet/adi/Makefile6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1697
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c4
-rw-r--r--drivers/net/ethernet/altera/Kconfig2
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h19
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c23
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c456
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c8
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c49
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c4
-rw-r--r--drivers/net/ethernet/amd/atarilance.c10
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c8
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c20
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c16
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c57
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c17
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c30
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cortina/gemini.c26
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c32
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c4
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/engleder/Kconfig1
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h48
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c40
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c465
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c30
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c12
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h12
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c62
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c84
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h26
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c239
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h116
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c59
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c117
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h39
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c276
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c321
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h24
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c238
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c164
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h54
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c497
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h45
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c2
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c15
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c16
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c66
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c327
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c415
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c50
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c35
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c303
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c89
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c202
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c372
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c288
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c333
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c807
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c98
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c242
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c226
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c131
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/jme.c8
-rw-r--r--drivers/net/ethernet/korina.c11
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c3
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h473
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h246
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c889
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c346
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1668
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c60
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c300
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c110
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c51
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c54
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c179
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c54
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c125
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1119
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c366
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h76
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c66
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c8
-rw-r--r--drivers/net/ethernet/marvell/sky2.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c122
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h97
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c302
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h81
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c46
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c64
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c479
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h89
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c559
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1870
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1384
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c437
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c420
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c373
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c511
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c139
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c379
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c46
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c5
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c6
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c68
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c155
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c104
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c20
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h119
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c235
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c24
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h356
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c133
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c95
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h21
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h165
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c513
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c271
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h15
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c14
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/Makefile11
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c808
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h12
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c95
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c481
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c458
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c419
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c3
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c8
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c242
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c74
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c262
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c61
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c3
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c16
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c19
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c8
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c6
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c241
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c133
-rw-r--r--drivers/net/ethernet/renesas/ravb.h8
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c21
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c4
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c43
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.h22
-rw-r--r--drivers/net/ethernet/sfc/mae.c165
-rw-r--r--drivers/net/ethernet/sfc/mae.h14
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c128
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c430
-rw-r--r--drivers/net/ethernet/sfc/tc.h36
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.c228
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.h29
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/epic100.c8
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c12
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c8
-rw-r--r--drivers/net/ethernet/socionext/netsec.c6
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c280
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c8
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c8
-rw-r--r--drivers/net/ethernet/sun/sunhme.c669
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c6
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c10
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c7
-rw-r--r--drivers/net/ethernet/ti/cpmac.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c15
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c242
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c6
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c6
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c25
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c10
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig13
-rw-r--r--drivers/net/ethernet/wangxun/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h24
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c170
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h50
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c6
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h181
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c81
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c51
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c6
-rw-r--r--drivers/net/fjes/fjes_ethtool.c6
-rw-r--r--drivers/net/fjes/fjes_main.c1152
-rw-r--r--drivers/net/geneve.c13
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hippi/rrunner.c1
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c1
-rw-r--r--drivers/net/ipa/Makefile2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c2
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c10
-rw-r--r--drivers/net/ipa/gsi.c94
-rw-r--r--drivers/net/ipa/gsi.h26
-rw-r--r--drivers/net/ipa/gsi_private.h14
-rw-r--r--drivers/net/ipa/gsi_reg.h210
-rw-r--r--drivers/net/ipa/gsi_trans.c221
-rw-r--r--drivers/net/ipa/gsi_trans.h7
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c11
-rw-r--r--drivers/net/ipa/ipa_cmd.h2
-rw-r--r--drivers/net/ipa/ipa_data.h4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c494
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c47
-rw-r--r--drivers/net/ipa/ipa_interrupt.h2
-rw-r--r--drivers/net/ipa/ipa_main.c284
-rw-r--r--drivers/net/ipa/ipa_mem.c18
-rw-r--r--drivers/net/ipa/ipa_modem.c2
-rw-r--r--drivers/net/ipa/ipa_modem.h2
-rw-r--r--drivers/net/ipa/ipa_power.c2
-rw-r--r--drivers/net/ipa/ipa_power.h2
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h39
-rw-r--r--drivers/net/ipa/ipa_reg.c97
-rw-r--r--drivers/net/ipa/ipa_reg.h1121
-rw-r--r--drivers/net/ipa/ipa_resource.c65
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c2
-rw-r--r--drivers/net/ipa/ipa_sysfs.h2
-rw-r--r--drivers/net/ipa/ipa_table.c31
-rw-r--r--drivers/net/ipa/ipa_table.h5
-rw-r--r--drivers/net/ipa/ipa_uc.c11
-rw-r--r--drivers/net/ipa/ipa_uc.h2
-rw-r--r--drivers/net/ipa/ipa_version.h30
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c478
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c512
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c533
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c509
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/macsec.c94
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/mctp/mctp-i2c.c4
-rw-r--r--drivers/net/mdio/fwnode_mdio.c62
-rw-r--r--drivers/net/mdio/mdio-i2c.c310
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c42
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c20
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c9
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c9
-rw-r--r--drivers/net/mdio/of_mdio.c1
-rw-r--r--drivers/net/net_failover.c4
-rw-r--r--drivers/net/netconsole.c10
-rw-r--r--drivers/net/netdevsim/dev.c20
-rw-r--r--drivers/net/netdevsim/hwstats.c6
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/ntb_netdev.c6
-rw-r--r--drivers/net/pcs/Kconfig6
-rw-r--r--drivers/net/pcs/Makefile1
-rw-r--r--drivers/net/pcs/pcs-altera-tse.c175
-rw-r--r--drivers/net/phy/adin.c2
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/aquantia_main.c121
-rw-r--r--drivers/net/phy/at803x.c28
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/broadcom.c39
-rw-r--r--drivers/net/phy/marvell-88x2222.c3
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/marvell10g.c133
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/meson-gxl.c8
-rw-r--r--drivers/net/phy/micrel.c221
-rw-r--r--drivers/net/phy/microchip_t1.c58
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c113
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/nxp-tja11xx.c83
-rw-r--r--drivers/net/phy/phy-core.c74
-rw-r--r--drivers/net/phy/phy.c28
-rw-r--r--drivers/net/phy/phy_device.c24
-rw-r--r--drivers/net/phy/phylink.c487
-rw-r--r--drivers/net/phy/realtek.c44
-rw-r--r--drivers/net/phy/sfp-bus.c175
-rw-r--r--drivers/net/phy/sfp.c397
-rw-r--r--drivers/net/phy/sfp.h11
-rw-r--r--drivers/net/phy/smsc.c30
-rw-r--r--drivers/net/phy/spi_ks8995.c69
-rw-r--r--drivers/net/pse-pd/Kconfig22
-rw-r--r--drivers/net/pse-pd/Makefile6
-rw-r--r--drivers/net/pse-pd/pse_core.c314
-rw-r--r--drivers/net/pse-pd/pse_regulator.c147
-rw-r--r--drivers/net/rionet.c8
-rw-r--r--drivers/net/team/team.c29
-rw-r--r--drivers/net/thunderbolt.c64
-rw-r--r--drivers/net/tun.c31
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/aqc111.c2
-rw-r--r--drivers/net/usb/asix.h3
-rw-r--r--drivers/net/usb/asix_common.c4
-rw-r--r--drivers/net/usb/asix_devices.c142
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/hso.c3
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c35
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c13
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireguard/peer.c3
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c25
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c68
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c188
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c132
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c488
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c28
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c118
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h23
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c165
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c118
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c30
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c246
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c118
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c25
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c434
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h7
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c668
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c376
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/intersil/p54/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c552
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c11
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c256
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c198
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c22
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c1753
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c108
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c88
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c220
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h31
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c23
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c65
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c235
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h64
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1887
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h6
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c489
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h551
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c107
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c702
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h299
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c338
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h63
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c161
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c410
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h73
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c453
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h148
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c244
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c77
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c94
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c411
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c28868
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c17
-rw-r--r--drivers/net/wireless/rndis_wlan.c25
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c1
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c18
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/wl3501_cs.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c9
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c2
-rw-r--r--drivers/net/wwan/wwan_core.c1
-rw-r--r--drivers/net/wwan/wwan_hwsim.c6
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c7
-rw-r--r--drivers/net/xen-netback/xenbus.c5
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/fdp/i2c.c4
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c4
-rw-r--r--drivers/nfc/nxp-nci/i2c.c4
-rw-r--r--drivers/nfc/pn533/i2c.c4
-rw-r--r--drivers/nfc/pn544/i2c.c4
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c4
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c6
-rw-r--r--drivers/nvme/host/core.c167
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fc.c124
-rw-r--r--drivers/nvme/host/ioctl.c317
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h48
-rw-r--r--drivers/nvme/host/pci.c96
-rw-r--r--drivers/nvme/host/rdma.c171
-rw-r--r--drivers/nvme/host/tcp.c176
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c1
-rw-r--r--drivers/nvme/target/configfs.c29
-rw-r--r--drivers/nvme/target/core.c7
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c23
-rw-r--r--drivers/nvme/target/fabrics-cmd.c19
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c19
-rw-r--r--drivers/nvme/target/loop.c91
-rw-r--r--drivers/nvme/target/nvmet.h7
-rw-r--r--drivers/nvme/target/passthru.c12
-rw-r--r--drivers/nvme/target/tcp.c94
-rw-r--r--drivers/nvme/target/zns.c20
-rw-r--r--drivers/nvmem/Kconfig313
-rw-r--r--drivers/nvmem/Makefile120
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/core.c27
-rw-r--r--drivers/nvmem/lan9662-otpc.c222
-rw-r--r--drivers/nvmem/u-boot-env.c219
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/device.c9
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_private.h5
-rw-r--r--drivers/of/unittest.c11
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/parisc/ccio-dma.c1
-rw-r--r--drivers/parisc/iosapic.c11
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/controller/Kconfig2
-rw-r--r--drivers/pcmcia/Kconfig13
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/at91_cf.c407
-rw-r--r--drivers/pcmcia/i82092.c4
-rw-r--r--drivers/pcmcia/omap_cf.c4
-rw-r--r--drivers/pcmcia/sa1100_generic.c10
-rw-r--r--drivers/pcmcia/vrc4171_card.c745
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/cpu.c3
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c810
-rw-r--r--drivers/perf/arm-cmn.c2
-rw-r--r--drivers/perf/arm_dsu_pmu.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c10
-rw-r--r--drivers/perf/qcom_l2_pmu.c10
-rw-r--r--drivers/perf/qcom_l3_pmu.c3
-rw-r--r--drivers/perf/riscv_pmu_legacy.c4
-rw-r--r--drivers/perf/riscv_pmu_sbi.c33
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c6
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c32
-rw-r--r--drivers/phy/broadcom/Kconfig4
-rw-r--r--drivers/phy/intel/phy-intel-lgm-combo.c45
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c87
-rw-r--r--drivers/phy/mediatek/phy-mtk-dp.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c238
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c246
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.c33
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h7
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h8
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c164
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c74
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c24
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.h5
-rw-r--r--drivers/phy/mediatek/phy-mtk-pcie.c17
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c223
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c78
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c46
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c102
-rw-r--r--drivers/phy/microchip/lan966x_serdes_regs.h42
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c221
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c819
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c251
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c360
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h333
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c270
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c468
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c27
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c273
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hsic.c6
-rw-r--r--drivers/phy/rockchip/Kconfig9
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c24
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c204
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c10
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c322
-rw-r--r--drivers/phy/sunplus/Kconfig12
-rw-r--r--drivers/phy/sunplus/Makefile2
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c296
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c28
-rw-r--r--drivers/phy/tegra/xusb.c25
-rw-r--r--drivers/phy/tegra/xusb.h4
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c47
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c248
-rw-r--r--drivers/pinctrl/Kconfig5
-rw-r--r--drivers/pinctrl/bcm/Kconfig4
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c14
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c127
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c2
-rw-r--r--drivers/platform/chrome/Kconfig11
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c24
-rw-r--r--drivers/platform/chrome/cros_ec.c11
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c3
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c3
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c4
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c32
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c110
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c321
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c55
-rw-r--r--drivers/platform/surface/surface3_power.c6
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c47
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c77
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/amd/Kconfig2
-rw-r--r--drivers/platform/x86/amd/Makefile1
-rw-r--r--drivers/platform/x86/amd/hsmp.c2
-rw-r--r--drivers/platform/x86/amd/pmc.c131
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig16
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile9
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c304
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c305
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c395
-rw-r--r--drivers/platform/x86/amd/pmf/core.c412
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h417
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c146
-rw-r--r--drivers/platform/x86/amilo-rfkill.c3
-rw-r--r--drivers/platform/x86/apple-gmux.c3
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c59
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c667
-rw-r--r--drivers/platform/x86/asus-wmi.h12
-rw-r--r--drivers/platform/x86/compal-laptop.c153
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c2
-rw-r--r--drivers/platform/x86/dell/dcdbas.c2
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c4
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c3
-rw-r--r--drivers/platform/x86/dell/dell-wmi-privacy.c12
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/eeepc-wmi.c25
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c33
-rw-r--r--drivers/platform/x86/huawei-wmi.c2
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c8
-rw-r--r--drivers/platform/x86/intel/int3472/common.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c34
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.c80
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470.h3
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c54
-rw-r--r--drivers/platform/x86/intel/oaktrail.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c2
-rw-r--r--drivers/platform/x86/intel/wmi/thunderbolt.c24
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c106
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c82
-rw-r--r--drivers/platform/x86/p2sb.c18
-rw-r--r--drivers/platform/x86/panasonic-laptop.c2
-rw-r--r--drivers/platform/x86/pmc_atom.c44
-rw-r--r--drivers/platform/x86/samsung-laptop.c89
-rw-r--r--drivers/platform/x86/simatic-ipc.c10
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c15
-rw-r--r--drivers/platform/x86/topstar-laptop.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c337
-rw-r--r--drivers/platform/x86/winmate-fm07-keys.c2
-rw-r--r--drivers/platform/x86/wmi.c66
-rw-r--r--drivers/platform/x86/x86-android-tablets.c14
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c7
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h1
-rw-r--r--drivers/power/reset/qcom-pon.c1
-rw-r--r--drivers/power/supply/Kconfig21
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_chargalg.c8
-rw-r--r--drivers/power/supply/adp5061.c9
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c4
-rw-r--r--drivers/power/supply/bq24257_charger.c4
-rw-r--r--drivers/power/supply/bq25890_charger.c34
-rw-r--r--drivers/power/supply/bq27xxx_battery.c60
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/cw2015_battery.c14
-rw-r--r--drivers/power/supply/ds2782_battery.c4
-rw-r--r--drivers/power/supply/lp8727_charger.c3
-rw-r--r--drivers/power/supply/max1721x_battery.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c961
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/rk817_charger.c1211
-rw-r--r--drivers/power/supply/rt5033_battery.c4
-rw-r--r--drivers/power/supply/rt9455_charger.c4
-rw-r--r--drivers/power/supply/smb347-charger.c4
-rw-r--r--drivers/power/supply/tps65217_charger.c2
-rw-r--r--drivers/power/supply/z2_battery.c4
-rw-r--r--drivers/powercap/idle_inject.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/ptp/ptp_ocp.c8
-rw-r--r--drivers/pwm/core.c37
-rw-r--r--drivers/pwm/pwm-lpss-pci.c48
-rw-r--r--drivers/pwm/pwm-lpss-platform.c40
-rw-r--r--drivers/pwm/pwm-lpss.c46
-rw-r--r--drivers/pwm/pwm-lpss.h12
-rw-r--r--drivers/pwm/pwm-pca9685.c4
-rw-r--r--drivers/pwm/pwm-rockchip.c18
-rw-r--r--drivers/pwm/sysfs.c20
-rw-r--r--drivers/ras/cec.c8
-rw-r--r--drivers/regulator/Kconfig28
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/bd71815-regulator.c7
-rw-r--r--drivers/regulator/bd9576-regulator.c17
-rw-r--r--drivers/regulator/core.c107
-rw-r--r--drivers/regulator/da9121-regulator.c3
-rw-r--r--drivers/regulator/devres.c164
-rw-r--r--drivers/regulator/gpio-regulator.c15
-rw-r--r--drivers/regulator/lp8755.c4
-rw-r--r--drivers/regulator/max597x-regulator.c5
-rw-r--r--drivers/regulator/max8973-regulator.c10
-rw-r--r--drivers/regulator/mt6331-regulator.c507
-rw-r--r--drivers/regulator/mt6332-regulator.c422
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c71
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c24
-rw-r--r--drivers/regulator/qcom_smd-regulator.c400
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c378
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c4
-rw-r--r--drivers/regulator/ti-abb-regulator.c2
-rw-r--r--drivers/regulator/tps65219-regulator.c411
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c4
-rw-r--r--drivers/remoteproc/imx_rproc.c14
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c16
-rw-r--r--drivers/remoteproc/remoteproc_core.c223
-rw-r--r--drivers/remoteproc/remoteproc_internal.h38
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c183
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/reset/reset-microchip-sparx5.c22
-rw-r--r--drivers/reset/reset-mpfs.c157
-rw-r--r--drivers/reset/reset-npcm.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c9
-rw-r--r--drivers/rtc/rtc-bq32k.c4
-rw-r--r--drivers/rtc/rtc-ds1374.c4
-rw-r--r--drivers/rtc/rtc-isl12026.c3
-rw-r--r--drivers/rtc/rtc-m41t80.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c3
-rw-r--r--drivers/rtc/rtc-x1205.c3
-rw-r--r--drivers/s390/block/dasd.c86
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_alias.c9
-rw-r--r--drivers/s390/block/dasd_devmap.c611
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c294
-rw-r--r--drivers/s390/block/dasd_eckd.h9
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c29
-rw-r--r--drivers/s390/block/dasd_int.h75
-rw-r--r--drivers/s390/block/dasd_ioctl.c53
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/hmcdrv_cache.c2
-rw-r--r--drivers/s390/char/tape_class.c4
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c30
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/qeth_ethtool.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-xxxx.c14
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/Kconfig7
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c10
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/esas2r/atioctl.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c37
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c12
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c19
-rw-r--r--drivers/scsi/hosts.c28
-rw-r--r--drivers/scsi/hpsa.c12
-rw-r--r--drivers/scsi/hptiop.c9
-rw-r--r--drivers/scsi/hptiop.h4
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c73
-rw-r--r--drivers/scsi/iscsi_tcp.h3
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h37
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1092
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c61
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c234
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h59
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c441
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c273
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c30
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h12
-rw-r--r--drivers/scsi/mpi3mr/Makefile1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h171
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h6
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h5
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h22
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h3
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h252
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h27
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c1032
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c550
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c3291
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c217
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h39
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c124
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c28
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/qedf/qedf_main.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h43
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c59
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qlogicpti.c3
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_debug.c7
-rw-r--r--drivers/scsi/scsi_error.c22
-rw-r--r--drivers/scsi/scsi_ioctl.c22
-rw-r--r--drivers/scsi/scsi_lib.c56
-rw-r--r--drivers/scsi/scsi_priv.h13
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c32
-rw-r--r--drivers/scsi/scsi_transport_fc.c10
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/sg.c31
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/stex.c17
-rw-r--r--drivers/scsi/storvsc_drv.c13
-rw-r--r--drivers/scsi/virtio_scsi.c8
-rw-r--r--drivers/scsi/wd33c93.c60
-rw-r--r--drivers/scsi/wd33c93.h5
-rw-r--r--drivers/scsi/xen-scsifront.c8
-rw-r--r--drivers/slimbus/Kconfig3
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c31
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c5
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c5
-rw-r--r--drivers/soc/apple/rtkit.c6
-rw-r--r--drivers/soc/bcm/bcm63xx/Kconfig4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c1
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c66
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/qbman/qman.c77
-rw-r--r--drivers/soc/imx/Kconfig8
-rw-r--r--drivers/soc/imx/Makefile2
-rw-r--r--drivers/soc/imx/gpcv2.c5
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c97
-rw-r--r--drivers/soc/imx/imx8mp-blk-ctrl.c89
-rw-r--r--drivers/soc/imx/imx93-blk-ctrl.c436
-rw-r--r--drivers/soc/imx/imx93-pd.c164
-rw-r--r--drivers/soc/imx/imx93-src.c33
-rw-r--r--drivers/soc/mediatek/Kconfig4
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h6
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c20
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c44
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c6
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c2
-rw-r--r--drivers/soc/mediatek/mtk-svs.c284
-rw-r--r--drivers/soc/pxa/ssp.c6
-rw-r--r--drivers/soc/qcom/Kconfig2
-rw-r--r--drivers/soc/qcom/icc-bwmon.c479
-rw-r--r--drivers/soc/qcom/llcc-qcom.c92
-rw-r--r--drivers/soc/qcom/qcom_stats.c9
-rw-r--r--drivers/soc/qcom/qmi_encdec.c50
-rw-r--r--drivers/soc/qcom/qmi_interface.c12
-rw-r--r--drivers/soc/qcom/rpmpd.c22
-rw-r--r--drivers/soc/qcom/smem_state.c3
-rw-r--r--drivers/soc/qcom/smsm.c20
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/renesas-soc.c14
-rw-r--r--drivers/soc/rockchip/io-domain.c20
-rw-r--r--drivers/soc/rockchip/pm_domains.c130
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c74
-rw-r--r--drivers/soc/tegra/Kconfig10
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/cbb/Makefile9
-rw-r--r--drivers/soc/tegra/cbb/tegra-cbb.c190
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2364
-rw-r--r--drivers/soc/tegra/cbb/tegra234-cbb.c1113
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c1
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c36
-rw-r--r--drivers/soc/tegra/pmc.c45
-rw-r--r--drivers/soundwire/bus.c126
-rw-r--r--drivers/soundwire/cadence_master.c112
-rw-r--r--drivers/soundwire/cadence_master.h2
-rw-r--r--drivers/soundwire/dmi-quirks.c27
-rw-r--r--drivers/soundwire/intel.c735
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/qcom.c15
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-amd.c183
-rw-r--r--drivers/spi/spi-aspeed-smc.c4
-rw-r--r--drivers/spi/spi-bitbang-txrx.h6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c41
-rw-r--r--drivers/spi/spi-cadence-xspi.c4
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c3
-rw-r--r--drivers/spi/spi-fsl-lpspi.c10
-rw-r--r--drivers/spi/spi-fsl-qspi.c3
-rw-r--r--drivers/spi/spi-fsl-spi.c157
-rw-r--r--drivers/spi/spi-gxp.c10
-rw-r--r--drivers/spi/spi-img-spfi.c6
-rw-r--r--drivers/spi/spi-intel.c164
-rw-r--r--drivers/spi/spi-loopback-test.c27
-rw-r--r--drivers/spi/spi-meson-spicc.c8
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c600
-rw-r--r--drivers/spi/spi-microchip-core.c4
-rw-r--r--drivers/spi/spi-mpc52xx.c35
-rw-r--r--drivers/spi/spi-mt65xx.c5
-rw-r--r--drivers/spi/spi-mt7621.c42
-rw-r--r--drivers/spi/spi-mux.c1
-rw-r--r--drivers/spi/spi-npcm-pspi.c1
-rw-r--r--drivers/spi/spi-nxp-fspi.c8
-rw-r--r--drivers/spi/spi-omap-100k.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c41
-rw-r--r--drivers/spi/spi-qup.c21
-rw-r--r--drivers/spi/spi-s3c24xx.c24
-rw-r--r--drivers/spi/spi-s3c64xx.c13
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c125
-rw-r--r--drivers/spi/spi-xilinx.c20
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c16
-rw-r--r--drivers/spi/spi.c164
-rw-r--r--drivers/spmi/spmi-pmic-arb.c91
-rw-r--r--drivers/spmi/spmi.c4
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/clocking-wizard/Kconfig10
-rw-r--r--drivers/staging/clocking-wizard/Makefile2
-rw-r--r--drivers/staging/clocking-wizard/TODO13
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt30
-rw-r--r--drivers/staging/fwserial/Kconfig32
-rw-r--r--drivers/staging/fwserial/Makefile3
-rw-r--r--drivers/staging/fwserial/TODO14
-rw-r--r--drivers/staging/fwserial/dma_fifo.c294
-rw-r--r--drivers/staging/fwserial/dma_fifo.h117
-rw-r--r--drivers/staging/fwserial/fwserial.c2890
-rw-r--r--drivers/staging/fwserial/fwserial.h359
-rw-r--r--drivers/staging/greybus/audio_helper.c11
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/cdc/Kconfig17
-rw-r--r--drivers/staging/iio/cdc/Makefile6
-rw-r--r--drivers/staging/iio/frequency/ad9832.c4
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/iio/meter/ade7854.h2
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c4
-rw-r--r--drivers/staging/media/Kconfig29
-rw-r--r--drivers/staging/media/Makefile12
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c3
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c4
-rw-r--r--drivers/staging/media/av7110/TODO3
-rw-r--r--drivers/staging/media/deprecated/cpia2/Kconfig (renamed from drivers/media/usb/cpia2/Kconfig)5
-rw-r--r--drivers/staging/media/deprecated/cpia2/Makefile (renamed from drivers/media/usb/cpia2/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/cpia2/TODO6
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2.h (renamed from drivers/media/usb/cpia2/cpia2.h)0
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_core.c (renamed from drivers/media/usb/cpia2/cpia2_core.c)0
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_registers.h (renamed from drivers/media/usb/cpia2/cpia2_registers.h)0
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_usb.c (renamed from drivers/media/usb/cpia2/cpia2_usb.c)0
-rw-r--r--drivers/staging/media/deprecated/cpia2/cpia2_v4l.c (renamed from drivers/media/usb/cpia2/cpia2_v4l.c)0
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/Kconfig15
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/Makefile2
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/TODO7
-rw-r--r--drivers/staging/media/deprecated/fsl-viu/fsl-viu.c (renamed from drivers/media/platform/nxp/fsl-viu.c)0
-rw-r--r--drivers/staging/media/deprecated/meye/Kconfig (renamed from drivers/media/pci/meye/Kconfig)5
-rw-r--r--drivers/staging/media/deprecated/meye/Makefile (renamed from drivers/media/pci/meye/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/meye/TODO6
-rw-r--r--drivers/staging/media/deprecated/meye/meye.c (renamed from drivers/media/pci/meye/meye.c)0
-rw-r--r--drivers/staging/media/deprecated/meye/meye.h (renamed from drivers/media/pci/meye/meye.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/Kconfig5
-rw-r--r--drivers/staging/media/deprecated/saa7146/Makefile2
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/Kconfig (renamed from drivers/staging/media/av7110/Kconfig)20
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/Makefile (renamed from drivers/staging/media/av7110/Makefile)3
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/TODO9
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-bilingual-channel-select.rst (renamed from drivers/staging/media/av7110/audio-bilingual-channel-select.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-channel-select.rst (renamed from drivers/staging/media/av7110/audio-channel-select.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-clear-buffer.rst (renamed from drivers/staging/media/av7110/audio-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-continue.rst (renamed from drivers/staging/media/av7110/audio-continue.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-fclose.rst (renamed from drivers/staging/media/av7110/audio-fclose.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-fopen.rst (renamed from drivers/staging/media/av7110/audio-fopen.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-fwrite.rst (renamed from drivers/staging/media/av7110/audio-fwrite.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-get-capabilities.rst (renamed from drivers/staging/media/av7110/audio-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-get-status.rst (renamed from drivers/staging/media/av7110/audio-get-status.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-pause.rst (renamed from drivers/staging/media/av7110/audio-pause.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-play.rst (renamed from drivers/staging/media/av7110/audio-play.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-select-source.rst (renamed from drivers/staging/media/av7110/audio-select-source.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-av-sync.rst (renamed from drivers/staging/media/av7110/audio-set-av-sync.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-bypass-mode.rst (renamed from drivers/staging/media/av7110/audio-set-bypass-mode.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-id.rst (renamed from drivers/staging/media/av7110/audio-set-id.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-mixer.rst (renamed from drivers/staging/media/av7110/audio-set-mixer.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-mute.rst (renamed from drivers/staging/media/av7110/audio-set-mute.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-set-streamtype.rst (renamed from drivers/staging/media/av7110/audio-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio-stop.rst (renamed from drivers/staging/media/av7110/audio-stop.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio.rst (renamed from drivers/staging/media/av7110/audio.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio_data_types.rst (renamed from drivers/staging/media/av7110/audio_data_types.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/audio_function_calls.rst (renamed from drivers/staging/media/av7110/audio_function_calls.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110.c (renamed from drivers/staging/media/av7110/av7110.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110.h (renamed from drivers/staging/media/av7110/av7110.h)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c (renamed from drivers/staging/media/av7110/av7110_av.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_av.h (renamed from drivers/staging/media/av7110/av7110_av.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.c (renamed from drivers/staging/media/av7110/av7110_ca.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.h (renamed from drivers/staging/media/av7110/av7110_ca.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.c (renamed from drivers/staging/media/av7110/av7110_hw.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.h (renamed from drivers/staging/media/av7110/av7110_hw.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.c (renamed from drivers/staging/media/av7110/av7110_ipack.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.h (renamed from drivers/staging/media/av7110/av7110_ipack.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_ir.c (renamed from drivers/staging/media/av7110/av7110_ir.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/av7110_v4l.c (renamed from drivers/staging/media/av7110/av7110_v4l.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/budget-patch.c (renamed from drivers/staging/media/av7110/budget-patch.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.c (renamed from drivers/staging/media/av7110/dvb_filter.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.h (renamed from drivers/staging/media/av7110/dvb_filter.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/sp8870.c (renamed from drivers/staging/media/av7110/sp8870.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/sp8870.h (renamed from drivers/staging/media/av7110/sp8870.h)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-clear-buffer.rst (renamed from drivers/staging/media/av7110/video-clear-buffer.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-command.rst (renamed from drivers/staging/media/av7110/video-command.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-continue.rst (renamed from drivers/staging/media/av7110/video-continue.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-fast-forward.rst (renamed from drivers/staging/media/av7110/video-fast-forward.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-fclose.rst (renamed from drivers/staging/media/av7110/video-fclose.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-fopen.rst (renamed from drivers/staging/media/av7110/video-fopen.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-freeze.rst (renamed from drivers/staging/media/av7110/video-freeze.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-fwrite.rst (renamed from drivers/staging/media/av7110/video-fwrite.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-capabilities.rst (renamed from drivers/staging/media/av7110/video-get-capabilities.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-event.rst (renamed from drivers/staging/media/av7110/video-get-event.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-frame-count.rst (renamed from drivers/staging/media/av7110/video-get-frame-count.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-pts.rst (renamed from drivers/staging/media/av7110/video-get-pts.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-size.rst (renamed from drivers/staging/media/av7110/video-get-size.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-get-status.rst (renamed from drivers/staging/media/av7110/video-get-status.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-play.rst (renamed from drivers/staging/media/av7110/video-play.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-select-source.rst (renamed from drivers/staging/media/av7110/video-select-source.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-set-blank.rst (renamed from drivers/staging/media/av7110/video-set-blank.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-set-display-format.rst (renamed from drivers/staging/media/av7110/video-set-display-format.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-set-format.rst (renamed from drivers/staging/media/av7110/video-set-format.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-set-streamtype.rst (renamed from drivers/staging/media/av7110/video-set-streamtype.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-slowmotion.rst (renamed from drivers/staging/media/av7110/video-slowmotion.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-stillpicture.rst (renamed from drivers/staging/media/av7110/video-stillpicture.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-stop.rst (renamed from drivers/staging/media/av7110/video-stop.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video-try-command.rst (renamed from drivers/staging/media/av7110/video-try-command.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video.rst (renamed from drivers/staging/media/av7110/video.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video_function_calls.rst (renamed from drivers/staging/media/av7110/video_function_calls.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/av7110/video_types.rst (renamed from drivers/staging/media/av7110/video_types.rst)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/Kconfig (renamed from drivers/media/common/saa7146/Kconfig)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/Makefile (renamed from drivers/media/common/saa7146/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146.h472
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_core.c (renamed from drivers/media/common/saa7146/saa7146_core.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_fops.c (renamed from drivers/media/common/saa7146/saa7146_fops.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_hlp.c (renamed from drivers/media/common/saa7146/saa7146_hlp.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_i2c.c (renamed from drivers/media/common/saa7146/saa7146_i2c.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_vbi.c (renamed from drivers/media/common/saa7146/saa7146_vbi.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_video.c (renamed from drivers/media/common/saa7146/saa7146_video.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h266
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/Kconfig (renamed from drivers/media/pci/saa7146/Kconfig)15
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/Makefile (renamed from drivers/media/pci/saa7146/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/TODO7
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/hexium_gemini.c (renamed from drivers/media/pci/saa7146/hexium_gemini.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/hexium_orion.c (renamed from drivers/media/pci/saa7146/hexium_orion.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/saa7146/mxb.c (renamed from drivers/media/pci/saa7146/mxb.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/Kconfig (renamed from drivers/media/pci/ttpci/Kconfig)17
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/Makefile (renamed from drivers/media/pci/ttpci/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/TODO7
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c (renamed from drivers/media/pci/ttpci/budget-av.c)2
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/budget-ci.c (renamed from drivers/media/pci/ttpci/budget-ci.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/budget-core.c (renamed from drivers/media/pci/ttpci/budget-core.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/budget.c (renamed from drivers/media/pci/ttpci/budget.c)0
-rw-r--r--drivers/staging/media/deprecated/saa7146/ttpci/budget.h (renamed from drivers/media/pci/ttpci/budget.h)2
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/Kconfig (renamed from drivers/staging/media/stkwebcam/Kconfig)0
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/Makefile (renamed from drivers/staging/media/stkwebcam/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/TODO (renamed from drivers/staging/media/stkwebcam/TODO)0
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-sensor.c (renamed from drivers/staging/media/stkwebcam/stk-sensor.c)0
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-webcam.c (renamed from drivers/staging/media/stkwebcam/stk-webcam.c)0
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-webcam.h (renamed from drivers/staging/media/stkwebcam/stk-webcam.h)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/Kconfig (renamed from drivers/media/usb/tm6000/Kconfig)5
-rw-r--r--drivers/staging/media/deprecated/tm6000/Makefile (renamed from drivers/media/usb/tm6000/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/TODO7
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-alsa.c (renamed from drivers/media/usb/tm6000/tm6000-alsa.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-cards.c (renamed from drivers/media/usb/tm6000/tm6000-cards.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-core.c (renamed from drivers/media/usb/tm6000/tm6000-core.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-dvb.c (renamed from drivers/media/usb/tm6000/tm6000-dvb.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-i2c.c (renamed from drivers/media/usb/tm6000/tm6000-i2c.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-input.c (renamed from drivers/media/usb/tm6000/tm6000-input.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-regs.h (renamed from drivers/media/usb/tm6000/tm6000-regs.h)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-stds.c (renamed from drivers/media/usb/tm6000/tm6000-stds.c)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-usb-isoc.h (renamed from drivers/media/usb/tm6000/tm6000-usb-isoc.h)0
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000-video.c (renamed from drivers/media/usb/tm6000/tm6000-video.c)2
-rw-r--r--drivers/staging/media/deprecated/tm6000/tm6000.h (renamed from drivers/media/usb/tm6000/tm6000.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Kconfig58
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Makefile4
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/TODO7
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h (renamed from drivers/media/platform/ti/davinci/ccdc_hw_device.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c (renamed from drivers/media/platform/ti/davinci/dm355_ccdc.c)2
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h308
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h (renamed from drivers/media/platform/ti/davinci/dm355_ccdc_regs.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c (renamed from drivers/media/platform/ti/davinci/dm644x_ccdc.c)2
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h171
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h (renamed from drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.c (renamed from drivers/media/platform/ti/davinci/isif.c)2
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.h518
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif_regs.h (renamed from drivers/media/platform/ti/davinci/isif_regs.h)0
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c (renamed from drivers/media/platform/ti/davinci/vpfe_capture.c)0
-rw-r--r--drivers/staging/media/deprecated/zr364xx/Kconfig (renamed from drivers/media/usb/zr364xx/Kconfig)7
-rw-r--r--drivers/staging/media/deprecated/zr364xx/Makefile (renamed from drivers/media/usb/zr364xx/Makefile)0
-rw-r--r--drivers/staging/media/deprecated/zr364xx/TODO7
-rw-r--r--drivers/staging/media/deprecated/zr364xx/zr364xx.c (renamed from drivers/media/usb/zr364xx/zr364xx.c)0
-rw-r--r--drivers/staging/media/hantro/TODO2
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c2
-rw-r--r--drivers/staging/media/max96712/max96712.c4
-rw-r--r--drivers/staging/media/meson/vdec/vdec_hevc.c6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h24
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c4
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c16
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c23
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c28
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_vp8.c43
-rw-r--r--drivers/staging/media/zoran/TODO19
-rw-r--r--drivers/staging/media/zoran/zoran_device.h60
-rw-r--r--drivers/staging/most/i2c/i2c.c4
-rw-r--r--drivers/staging/octeon/ethernet-tx.c4
-rw-r--r--drivers/staging/octeon/ethernet-tx.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c4
-rw-r--r--drivers/staging/pi433/pi433_if.c16
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/staging/qlge/qlge_main.c4
-rw-r--r--drivers/staging/r8188eu/Makefile6
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c42
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c1
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c91
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c13
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c285
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c153
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c84
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c19
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c204
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c28
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c45
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c269
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c68
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c14
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c39
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c173
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c65
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c89
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c40
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c45
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c15
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_xmit.c22
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_recv.c91
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c205
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c15
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h1
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_BB.h4
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h3
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_RF.h2
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h7
-rw-r--r--drivers/staging/r8188eu/include/hal_com.h4
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/r8188eu/include/ioctl_cfg80211.h89
-rw-r--r--drivers/staging/r8188eu/include/mlme_osdep.h19
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h1
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h21
-rw-r--r--drivers/staging/r8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h3
-rw-r--r--drivers/staging/r8188eu/include/recv_osdep.h30
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h11
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h12
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_xmit.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h4
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h4
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h16
-rw-r--r--drivers/staging/r8188eu/include/wlan_bssdef.h4
-rw-r--r--drivers/staging/r8188eu/include/xmit_osdep.h49
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c16
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c205
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c17
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c11
-rw-r--r--drivers/staging/r8188eu/os_dep/recv_linux.c165
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c60
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c237
-rw-r--r--drivers/staging/rtl8192e/Kconfig12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c12
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c185
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c164
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c26
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c296
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h14
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c210
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c26
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c68
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c68
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c9
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h53
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c128
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h16
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c10
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c42
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c26
-rw-r--r--drivers/staging/rtl8192u/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c6
-rw-r--r--drivers/staging/rtl8192u/r8192U.h9
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c226
-rw-r--r--drivers/staging/rtl8192u/r8192U_debugfs.c188
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c18
-rw-r--r--drivers/staging/rtl8712/xmit_osdep.h2
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c100
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c17
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c21
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c195
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c119
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c57
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c130
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h39
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_precomp.h1
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8723bs/include/hal_btcoex.h1
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h9
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_odm.h28
-rw-r--r--drivers/staging/rtl8723bs/include/xmit_osdep.h4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c122
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c60
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c50
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c17
-rw-r--r--drivers/staging/sm750fb/sm750.c27
-rw-r--r--drivers/staging/vme_user/vme_tsi148.c70
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/card.c30
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/device_main.c104
-rw-r--r--drivers/staging/vt6655/mac.c141
-rw-r--r--drivers/staging/vt6655/mac.h124
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c12
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h2
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pscsi.c8
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c46
-rw-r--r--drivers/tee/optee/optee_private.h1
-rw-r--r--drivers/tee/tee_shm.c1
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/amlogic_thermal.c16
-rw-r--r--drivers/thermal/armada_thermal.c12
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c14
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c14
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c20
-rw-r--r--drivers/thermal/broadcom/ns-thermal.c50
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c16
-rw-r--r--drivers/thermal/cpufreq_cooling.c12
-rw-r--r--drivers/thermal/da9062-thermal.c5
-rw-r--r--drivers/thermal/db8500_thermal.c8
-rw-r--r--drivers/thermal/gov_bang_bang.c10
-rw-r--r--drivers/thermal/gov_fair_share.c3
-rw-r--r--drivers/thermal/gov_power_allocator.c20
-rw-r--r--drivers/thermal/gov_step_wise.c10
-rw-r--r--drivers/thermal/gov_user_space.c5
-rw-r--r--drivers/thermal/hisi_thermal.c14
-rw-r--r--drivers/thermal/imx8mm_thermal.c14
-rw-r--r--drivers/thermal/imx_sc_thermal.c74
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c5
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c13
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c27
-rw-r--r--drivers/thermal/k3_bandgap.c12
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c12
-rw-r--r--drivers/thermal/max77620_thermal.c8
-rw-r--r--drivers/thermal/mtk_thermal.c10
-rw-r--r--drivers/thermal/qcom/Kconfig2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c26
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c12
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c2
-rw-r--r--drivers/thermal/qcom/tsens.c16
-rw-r--r--drivers/thermal/qoriq_thermal.c12
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c16
-rw-r--r--drivers/thermal/rcar_thermal.c13
-rw-r--r--drivers/thermal/rockchip_thermal.c14
-rw-r--r--drivers/thermal/rzg2l_thermal.c10
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c24
-rw-r--r--drivers/thermal/sprd_thermal.c18
-rw-r--r--drivers/thermal/st/stm_thermal.c18
-rw-r--r--drivers/thermal/sun8i_thermal.c14
-rw-r--r--drivers/thermal/tegra/soctherm.c21
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c19
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c12
-rw-r--r--drivers/thermal/thermal-generic-adc.c10
-rw-r--r--drivers/thermal/thermal_core.c82
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/thermal_helpers.c73
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thermal/thermal_mmio.c19
-rw-r--r--drivers/thermal/thermal_netlink.c1
-rw-r--r--drivers/thermal/thermal_of.c1192
-rw-r--r--drivers/thermal/thermal_sysfs.c19
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c16
-rw-r--r--drivers/thermal/uniphier_thermal.c10
-rw-r--r--drivers/thunderbolt/Kconfig13
-rw-r--r--drivers/thunderbolt/acpi.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/debugfs.c836
-rw-r--r--drivers/thunderbolt/domain.c10
-rw-r--r--drivers/thunderbolt/icm.c4
-rw-r--r--drivers/thunderbolt/nhi.c104
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/nvm.c385
-rw-r--r--drivers/thunderbolt/retimer.c113
-rw-r--r--drivers/thunderbolt/sb_regs.h58
-rw-r--r--drivers/thunderbolt/switch.c456
-rw-r--r--drivers/thunderbolt/tb.c41
-rw-r--r--drivers/thunderbolt/tb.h58
-rw-r--r--drivers/thunderbolt/tb_regs.h3
-rw-r--r--drivers/thunderbolt/usb4.c148
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c35
-rw-r--r--drivers/tty/amiserial.c6
-rw-r--r--drivers/tty/hvc/hvc_iucv.c11
-rw-r--r--drivers/tty/hvc/hvcs.c3
-rw-r--r--drivers/tty/moxa.c9
-rw-r--r--drivers/tty/mxser.c8
-rw-r--r--drivers/tty/n_gsm.c287
-rw-r--r--drivers/tty/n_hdlc.c28
-rw-r--r--drivers/tty/n_tty.c2
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/21285.c5
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c2
-rw-r--r--drivers/tty/serial/8250/8250_core.c16
-rw-r--r--drivers/tty/serial/8250/8250_dma.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h2
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c2
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mid.c5
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c10
-rw-r--r--drivers/tty/serial/8250/8250_pci.c14
-rw-r--r--drivers/tty/serial/8250/8250_port.c62
-rw-r--r--drivers/tty/serial/Kconfig9
-rw-r--r--drivers/tty/serial/altera_jtaguart.c36
-rw-r--r--drivers/tty/serial/altera_uart.c18
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c20
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c9
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c91
-rw-r--r--drivers/tty/serial/atmel_serial.h75
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/clps711x.c2
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h1
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c55
-rw-r--r--drivers/tty/serial/digicolor-usart.c2
-rw-r--r--drivers/tty/serial/dz.c11
-rw-r--r--drivers/tty/serial/earlycon.c6
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c2
-rw-r--r--drivers/tty/serial/fsl_lpuart.c33
-rw-r--r--drivers/tty/serial/icom.c5
-rw-r--r--drivers/tty/serial/imx.c10
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c3
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c4
-rw-r--r--drivers/tty/serial/lantiq.c25
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c12
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c6
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/men_z135_uart.c4
-rw-r--r--drivers/tty/serial/meson_uart.c31
-rw-r--r--drivers/tty/serial/milbeaut_usio.c3
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c30
-rw-r--r--drivers/tty/serial/mps2-uart.c2
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c2
-rw-r--r--drivers/tty/serial/omap-serial.c49
-rw-r--r--drivers/tty/serial/owl-uart.c2
-rw-r--r--drivers/tty/serial/pch_uart.c7
-rw-r--r--drivers/tty/serial/pic32_uart.c52
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c8
-rw-r--r--drivers/tty/serial/rda-uart.c2
-rw-r--r--drivers/tty/serial/rp2.c5
-rw-r--r--drivers/tty/serial/sa1100.c2
-rw-r--r--drivers/tty/serial/samsung_tty.c2
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c6
-rw-r--r--drivers/tty/serial/sccnxp.c3
-rw-r--r--drivers/tty/serial/serial-tegra.c20
-rw-r--r--drivers/tty/serial/serial_core.c46
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/tty/serial/sifive.c4
-rw-r--r--drivers/tty/serial/sprd_serial.c5
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c108
-rw-r--r--drivers/tty/serial/sunhv.c2
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsab.c22
-rw-r--r--drivers/tty/serial/sunsu.c8
-rw-r--r--drivers/tty/serial/sunzilog.c8
-rw-r--r--drivers/tty/serial/tegra-tcu.c4
-rw-r--r--drivers/tty/serial/timbuart.c4
-rw-r--r--drivers/tty/serial/uartlite.c5
-rw-r--r--drivers/tty/serial/ucc_uart.c18
-rw-r--r--drivers/tty/serial/vt8500_serial.c17
-rw-r--r--drivers/tty/serial/xilinx_uartps.c62
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/synclink_gt.c11
-rw-r--r--drivers/tty/tty.h2
-rw-r--r--drivers/tty/tty_baudrate.c26
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/tty_ioctl.c79
-rw-r--r--drivers/tty/tty_mutex.c6
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/vt.c23
-rw-r--r--drivers/ufs/core/ufs-sysfs.c85
-rw-r--r--drivers/ufs/core/ufshcd-priv.h11
-rw-r--r--drivers/ufs/core/ufshcd.c104
-rw-r--r--drivers/ufs/core/ufshpb.c8
-rw-r--r--drivers/ufs/host/ufs-mediatek-trace.h27
-rw-r--r--drivers/ufs/host/ufs-mediatek.c205
-rw-r--r--drivers/ufs/host/ufs-mediatek.h7
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/uio/uio_dfl.c2
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c2
-rw-r--r--drivers/usb/chipidea/Kconfig10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c1
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/otg_fsm.c7
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/debug.c96
-rw-r--r--drivers/usb/common/ulpi.c20
-rw-r--r--drivers/usb/common/usb-conn-gpio.c6
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c11
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/dwc2/core.c30
-rw-r--r--drivers/usb/dwc2/core.h30
-rw-r--r--drivers/usb/dwc2/core_intr.c30
-rw-r--r--drivers/usb/dwc2/hcd.c30
-rw-r--r--drivers/usb/dwc2/hcd.h31
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c30
-rw-r--r--drivers/usb/dwc2/hcd_intr.c30
-rw-r--r--drivers/usb/dwc2/hcd_queue.c30
-rw-r--r--drivers/usb/dwc2/hw.h30
-rw-r--r--drivers/usb/dwc2/params.c30
-rw-r--r--drivers/usb/dwc2/pci.c30
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/core.c174
-rw-r--r--drivers/usb/dwc3/core.h7
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/drd.c50
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c18
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c118
-rw-r--r--drivers/usb/dwc3/dwc3-st.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c63
-rw-r--r--drivers/usb/dwc3/ep0.c11
-rw-r--r--drivers/usb/dwc3/gadget.c81
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/dwc3/trace.h3
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c11
-rw-r--r--drivers/usb/gadget/function/f_ncm.c60
-rw-r--r--drivers/usb/gadget/function/f_printer.c12
-rw-r--r--drivers/usb/gadget/function/f_tcm.c4
-rw-r--r--drivers/usb/gadget/function/f_uac2.c16
-rw-r--r--drivers/usb/gadget/function/f_uvc.c37
-rw-r--r--drivers/usb/gadget/function/rndis.c4
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.c8
-rw-r--r--drivers/usb/gadget/function/u_serial.c4
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c4
-rw-r--r--drivers/usb/gadget/function/uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c294
-rw-r--r--drivers/usb/gadget/function/uvc_video.c9
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c10
-rw-r--r--drivers/usb/gadget/udc/core.c26
-rw-r--r--drivers/usb/gadget/udc/net2272.c4
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c4
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c131
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c78
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h3
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c6
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-atmel.c3
-rw-r--r--drivers/usb/host/ehci-exynos.c19
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c53
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-orion.c4
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-spear.c4
-rw-r--r--drivers/usb/host/ehci-st.c4
-rw-r--r--drivers/usb/host/fhci-hcd.c63
-rw-r--r--drivers/usb/host/fhci-hub.c15
-rw-r--r--drivers/usb/host/fhci.h4
-rw-r--r--drivers/usb/host/fotg210-hcd.c1
-rw-r--r--drivers/usb/host/ohci-at91.c3
-rw-r--r--drivers/usb/host/ohci-da8xx.c1
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c1
-rw-r--r--drivers/usb/host/ohci-nxp.c2
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-pci.c4
-rw-r--r--drivers/usb/host/ohci-platform.c32
-rw-r--r--drivers/usb/host/ohci-pxa27x.c4
-rw-r--r--drivers/usb/host/ohci-s3c2410.c3
-rw-r--r--drivers/usb/host/ohci-spear.c3
-rw-r--r--drivers/usb/host/ohci-st.c4
-rw-r--r--drivers/usb/host/u132-hcd.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-hub.c13
-rw-r--r--drivers/usb/host/xhci-mem.c7
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c15
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-plat.c29
-rw-r--r--drivers/usb/host/xhci.c27
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/misc/idmouse.c8
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c5
-rw-r--r--drivers/usb/misc/usb251xb.c18
-rw-r--r--drivers/usb/misc/usb3503.c29
-rw-r--r--drivers/usb/misc/uss720.c8
-rw-r--r--drivers/usb/mon/mon_bin.c5
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/da8xx.c8
-rw-r--r--drivers/usb/musb/jz4740.c10
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_cppi41.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c3
-rw-r--r--drivers/usb/musb/sunxi.c29
-rw-r--r--drivers/usb/phy/phy-generic.c9
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c4
-rw-r--r--drivers/usb/phy/phy-isp1301.c4
-rw-r--r--drivers/usb/phy/phy-jz4770.c25
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c14
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/ch341.c21
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/cp210x.c14
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/f81232.c3
-rw-r--r--drivers/usb/serial/f81534.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c493
-rw-r--r--drivers/usb/serial/ftdi_sio.h22
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/io_edgeport.c7
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c3
-rw-r--r--drivers/usb/serial/keyspan.c3
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c5
-rw-r--r--drivers/usb/serial/kobil_sct.c6
-rw-r--r--drivers/usb/serial/mct_u232.c5
-rw-r--r--drivers/usb/serial/mos7720.c5
-rw-r--r--drivers/usb/serial/mos7840.c5
-rw-r--r--drivers/usb/serial/mxuport.c4
-rw-r--r--drivers/usb/serial/option.c21
-rw-r--r--drivers/usb/serial/oti6858.c6
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c3
-rw-r--r--drivers/usb/serial/ssu100.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c4
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/xr_serial.c20
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h13
-rw-r--r--drivers/usb/storage/unusual_uas.h28
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/anx7411.c8
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/hd3ss3220.c4
-rw-r--r--drivers/usb/typec/mux.c4
-rw-r--r--drivers/usb/typec/mux/fsa4480.c4
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c12
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c3
-rw-r--r--drivers/usb/typec/qcom-pmic-typec.c5
-rw-r--r--drivers/usb/typec/retimer.c2
-rw-r--r--drivers/usb/typec/rt1719.c4
-rw-r--r--drivers/usb/typec/stusb160x.c11
-rw-r--r--drivers/usb/typec/tcpm/Kconfig11
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c26
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c207
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c153
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/tipd/core.c4
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c63
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c16
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c6
-rw-r--r--drivers/usb/typec/wusb3801.c4
-rw-r--r--drivers/usb/usbip/stub_main.c2
-rw-r--r--drivers/usb/usbip/stub_rx.c4
-rw-r--r--drivers/usb/usbip/usbip_common.c91
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c17
-rw-r--r--drivers/vdpa/vdpa.c74
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c12
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c5
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c9
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c22
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c13
-rw-r--r--drivers/vhost/net.c19
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/aperture.c69
-rw-r--r--drivers/video/backlight/Kconfig13
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8860_bl.c4
-rw-r--r--drivers/video/backlight/adp8870_bl.c4
-rw-r--r--drivers/video/backlight/arcxcnn_bl.c4
-rw-r--r--drivers/video/backlight/bd6107.c4
-rw-r--r--drivers/video/backlight/lm3630a_bl.c3
-rw-r--r--drivers/video/backlight/lm3639_bl.c3
-rw-r--r--drivers/video/backlight/lp855x_bl.c4
-rw-r--r--drivers/video/backlight/lv5207lp.c4
-rw-r--r--drivers/video/backlight/mt6370-backlight.c351
-rw-r--r--drivers/video/backlight/tosa_bl.c3
-rw-r--r--drivers/video/fbdev/arkfb.c5
-rw-r--r--drivers/video/fbdev/asiliantfb.c5
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c57
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c7
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c83
-rw-r--r--drivers/video/fbdev/carminefb.c5
-rw-r--r--drivers/video/fbdev/chipsfb.c13
-rw-r--r--drivers/video/fbdev/cirrusfb.c5
-rw-r--r--drivers/video/fbdev/core/fbmem.c219
-rw-r--r--drivers/video/fbdev/cyber2000fb.c5
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c5
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c5
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c5
-rw-r--r--drivers/video/fbdev/hyperv_fb.c10
-rw-r--r--drivers/video/fbdev/i740fb.c5
-rw-r--r--drivers/video/fbdev/i810/i810_main.c315
-rw-r--r--drivers/video/fbdev/imsttfb.c36
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c5
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c5
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c5
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c3
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c5
-rw-r--r--drivers/video/fbdev/neofb.c41
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c7
-rw-r--r--drivers/video/fbdev/pm2fb.c5
-rw-r--r--drivers/video/fbdev/pm3fb.c5
-rw-r--r--drivers/video/fbdev/pvr2fb.c5
-rw-r--r--drivers/video/fbdev/riva/fbdev.c67
-rw-r--r--drivers/video/fbdev/s3fb.c5
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c5
-rw-r--r--drivers/video/fbdev/sis/sis_main.c5
-rw-r--r--drivers/video/fbdev/skeletonfb.c210
-rw-r--r--drivers/video/fbdev/sm712fb.c5
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/sstfb.c43
-rw-r--r--drivers/video/fbdev/sunxvr2500.c5
-rw-r--r--drivers/video/fbdev/sunxvr500.c5
-rw-r--r--drivers/video/fbdev/tdfxfb.c5
-rw-r--r--drivers/video/fbdev/tgafb.c17
-rw-r--r--drivers/video/fbdev/tridentfb.c5
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c7
-rw-r--r--drivers/video/fbdev/vga16fb.c191
-rw-r--r--drivers/video/fbdev/via/via-core.c5
-rw-r--r--drivers/video/fbdev/vt8623fb.c5
-rw-r--r--drivers/video/hdmi.c82
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c16
-rw-r--r--drivers/virtio/virtio_pci_common.c3
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/w1/masters/ds2482.c3
-rw-r--r--drivers/w1/w1_netlink.c3
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/booke_wdt.c8
-rw-r--r--drivers/watchdog/ziirave_wdt.c4
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/xen-scsiback.c12
-rw-r--r--drivers/xen/xenbus/xenbus_client.c9
5513 files changed, 308369 insertions, 106416 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 057857258bfd..bdf1c66141c9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -175,6 +175,7 @@ obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/
+obj-$(CONFIG_HISI_PTT) += hwtracing/ptt/
obj-y += android/
obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/
diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c
index 34f11cd47073..56419dbb28d3 100644
--- a/drivers/accessibility/speakup/speakup_dummy.c
+++ b/drivers/accessibility/speakup/speakup_dummy.c
@@ -27,6 +27,7 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
{ TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
+ { PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
@@ -42,6 +43,8 @@ static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute punct_attribute =
+ __ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
@@ -69,6 +72,7 @@ static struct attribute *synth_attrs[] = {
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&inflection_attribute.attr,
+ &punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
diff --git a/drivers/accessibility/speakup/speakup_soft.c b/drivers/accessibility/speakup/speakup_soft.c
index 99f1d4ac426a..28c8f60370cf 100644
--- a/drivers/accessibility/speakup/speakup_soft.c
+++ b/drivers/accessibility/speakup/speakup_soft.c
@@ -26,6 +26,7 @@
static int softsynth_probe(struct spk_synth *synth);
static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth);
+static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var);
static unsigned char get_index(struct spk_synth *synth);
static struct miscdevice synth_device, synthu_device;
@@ -33,6 +34,9 @@ static int init_pos;
static int misc_registered;
static struct var_t vars[] = {
+ /* DIRECT is put first so that module_param_named can access it easily */
+ { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
+
{ CAPS_START, .u.s = {"\x01+3p" } },
{ CAPS_STOP, .u.s = {"\x01-3p" } },
{ PAUSE, .u.n = {"\x01P" } },
@@ -41,10 +45,9 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
{ TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
- { PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } },
+ { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } },
{ VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
{ FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
- { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
@@ -133,7 +136,7 @@ static struct spk_synth synth_soft = {
.catch_up = NULL,
.flush = NULL,
.is_alive = softsynth_is_alive,
- .synth_adjust = NULL,
+ .synth_adjust = softsynth_adjust,
.read_buff_add = NULL,
.get_index = get_index,
.indexing = {
@@ -426,9 +429,32 @@ static int softsynth_is_alive(struct spk_synth *synth)
return 0;
}
+static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var)
+{
+ struct st_var_header *punc_level_var;
+ struct var_t *var_data;
+
+ if (var->var_id != PUNC_LEVEL)
+ return 0;
+
+ /* We want to set the the speech synthesis punctuation level
+ * accordingly, so it properly tunes speaking A_PUNC characters */
+ var_data = var->data;
+ if (!var_data)
+ return 0;
+ punc_level_var = spk_get_var_header(PUNCT);
+ if (!punc_level_var)
+ return 0;
+ spk_set_num_var(var_data->u.n.value, punc_level_var, E_SET);
+
+ return 1;
+}
+
module_param_named(start, synth_soft.startup, short, 0444);
+module_param_named(direct, vars[0].u.n.default_val, int, 0444);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
+MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_soft);
diff --git a/drivers/accessibility/speakup/spk_types.h b/drivers/accessibility/speakup/spk_types.h
index 6a96ad94bc3f..3a14d39bf896 100644
--- a/drivers/accessibility/speakup/spk_types.h
+++ b/drivers/accessibility/speakup/spk_types.h
@@ -195,7 +195,7 @@ struct spk_synth {
void (*catch_up)(struct spk_synth *synth);
void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth);
- int (*synth_adjust)(struct st_var_header *var);
+ int (*synth_adjust)(struct spk_synth *synth, struct st_var_header *var);
void (*read_buff_add)(u_char c);
unsigned char (*get_index)(struct spk_synth *synth);
struct synth_indexing indexing;
diff --git a/drivers/accessibility/speakup/varhandlers.c b/drivers/accessibility/speakup/varhandlers.c
index 067c0da97dcb..e1c9f42e39f0 100644
--- a/drivers/accessibility/speakup/varhandlers.c
+++ b/drivers/accessibility/speakup/varhandlers.c
@@ -138,6 +138,7 @@ struct st_var_header *spk_get_var_header(enum var_id_t var_id)
return NULL;
return p_header;
}
+EXPORT_SYMBOL_GPL(spk_get_var_header);
struct st_var_header *spk_var_header_by_name(const char *name)
{
@@ -221,15 +222,17 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val];
- return 0;
}
if (var_data->u.n.multiplier != 0)
val *= var_data->u.n.multiplier;
val += var_data->u.n.offset;
- if (var->var_id < FIRST_SYNTH_VAR || !synth)
+
+ if (!synth)
+ return 0;
+ if (synth->synth_adjust && synth->synth_adjust(synth, var))
+ return 0;
+ if (var->var_id < FIRST_SYNTH_VAR)
return 0;
- if (synth->synth_adjust)
- return synth->synth_adjust(var);
if (!var_data->u.n.synth_fmt)
return 0;
@@ -245,6 +248,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
synth_printf("%s", cp);
return 0;
}
+EXPORT_SYMBOL_GPL(spk_set_num_var);
int spk_set_string_var(const char *page, struct st_var_header *var, int len)
{
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7802d8846a8d..473241b5193f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -27,9 +27,6 @@ menuconfig ACPI
Management (APM) specification. If both ACPI and APM support
are configured, ACPI is used.
- The project home page for the Linux ACPI subsystem is here:
- <https://01.org/linux-acpi>
-
Linux support for ACPI is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). For more information on the
ACPI CA, see:
@@ -212,6 +209,7 @@ config ACPI_VIDEO
tristate "Video"
depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on ACPI_WMI || !X86
select THERMAL
help
This driver implements the ACPI Extensions For Display Adapters
@@ -347,7 +345,6 @@ config ACPI_CUSTOM_DSDT_FILE
depends on !STANDALONE
help
This option supports a custom DSDT by linking it into the kernel.
- See Documentation/admin-guide/acpi/dsdt-override.rst
Enter the full path name to the file which includes the AmlCode
or dsdt_aml_code declaration.
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c29e41bfcf35..bb9fe7984b1a 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -36,11 +36,6 @@ static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
-struct acpi_ac_bl {
- const char *hid;
- int hrv;
-};
-
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
{"", 0},
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index ab8a4e0191b1..f5b443ab01c2 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -21,6 +21,7 @@
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
+ {"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
{"ARMHC500", 0}, /* ARM CoreSight ETM4x */
{"ARMHC501", 0}, /* ARM CoreSight ETR */
{"ARMHC502", 0}, /* ARM CoreSight STM */
@@ -48,6 +49,7 @@ static void amba_register_dummy_clk(void)
static int amba_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
+ struct acpi_device *parent = acpi_dev_parent(adev);
struct amba_device *dev;
struct resource_entry *rentry;
struct list_head resource_list;
@@ -97,8 +99,8 @@ static int amba_handler_attach(struct acpi_device *adev,
* attached to it, that physical device should be the parent of
* the amba device we are about to create.
*/
- if (adev->parent)
- dev->dev.parent = acpi_get_first_physical_node(adev->parent);
+ if (parent)
+ dev->dev.parent = acpi_get_first_physical_node(parent);
ACPI_COMPANION_SET(&dev->dev, adev);
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ad245bbd965e..3bbe2276cac7 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -60,12 +60,6 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
}
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
-static int misc_check_res(struct acpi_resource *ares, void *data)
-{
- struct resource res;
-
- return !acpi_dev_resource_memory(ares, &res);
-}
static int fch_misc_setup(struct apd_private_data *pdata)
{
@@ -82,8 +76,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, misc_check_res,
- NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
return -ENOENT;
diff --git a/drivers/acpi/acpi_fpdt.c b/drivers/acpi/acpi_fpdt.c
index 6922a44b3ce7..a2056c4c8cb7 100644
--- a/drivers/acpi/acpi_fpdt.c
+++ b/drivers/acpi/acpi_fpdt.c
@@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = {
static struct kobject *fpdt_kobj;
+#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
+#include <linux/processor.h>
+static bool fpdt_address_valid(u64 address)
+{
+ /*
+ * On some systems the table contains invalid addresses
+ * with unsuppored high address bits set, check for this.
+ */
+ return !(address >> boot_cpu_data.x86_phys_bits);
+}
+#else
+static bool fpdt_address_valid(u64 address)
+{
+ return true;
+}
+#endif
+
static int fpdt_process_subtable(u64 address, u32 subtable_type)
{
struct fpdt_subtable_header *subtable_header;
@@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
u32 length, offset;
int result;
+ if (!fpdt_address_valid(address)) {
+ pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
+ return -EINVAL;
+ }
+
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
if (!subtable_header)
return -ENOMEM;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index c4d4d21391d7..f08ffa75f4a7 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -167,10 +167,10 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
- struct acpi_device *adev = pdata->adev;
+ u64 uid;
/* Only call pwm_add_table for the first PWM controller */
- if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
@@ -180,14 +180,13 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
static void byt_i2c_setup(struct lpss_private_data *pdata)
{
- const char *uid_str = acpi_device_uid(pdata->adev);
acpi_handle handle = pdata->adev->handle;
unsigned long long shared_host = 0;
acpi_status status;
- long uid = 0;
+ u64 uid;
- /* Expected to always be true, but better safe then sorry */
- if (uid_str && !kstrtol(uid_str, 10, &uid) && uid) {
+ /* Expected to always be successfull, but better safe then sorry */
+ if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
if (ACPI_SUCCESS(status) && shared_host)
@@ -211,10 +210,10 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
- struct acpi_device *adev = pdata->adev;
+ u64 uid;
/* Only call pwm_add_table for the first PWM controller */
- if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return;
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
@@ -392,13 +391,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
#ifdef CONFIG_X86_INTEL_LPSS
-static int is_memory(struct acpi_resource *res, void *not_used)
-{
- struct resource r;
-
- return !acpi_dev_resource_memory(res, &r);
-}
-
/* LPSS main clock device. */
static struct platform_device *lpss_clk_dev;
@@ -659,29 +651,25 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
goto err_out;
- list_for_each_entry(rentry, &resource_list, node)
- if (resource_type(rentry->res) == IORESOURCE_MEM) {
- if (dev_desc->prv_size_override)
- pdata->mmio_size = dev_desc->prv_size_override;
- else
- pdata->mmio_size = resource_size(rentry->res);
- pdata->mmio_base = ioremap(rentry->res->start,
- pdata->mmio_size);
- break;
- }
+ rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
+ if (rentry) {
+ if (dev_desc->prv_size_override)
+ pdata->mmio_size = dev_desc->prv_size_override;
+ else
+ pdata->mmio_size = resource_size(rentry->res);
+ pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
+ }
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
/* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
adev->pnp.type.platform_id = 0;
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
+ goto out_free;
}
pdata->adev = adev;
@@ -692,11 +680,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
if (dev_desc->flags & LPSS_CLK) {
ret = register_device_clock(adev, pdata);
- if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
- }
+ if (ret)
+ goto out_free;
}
/*
@@ -708,15 +693,19 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
- if (!IS_ERR_OR_NULL(pdev)) {
- acpi_lpss_create_device_links(adev, pdev);
- return 1;
+ if (IS_ERR_OR_NULL(pdev)) {
+ adev->driver_data = NULL;
+ ret = PTR_ERR(pdev);
+ goto err_out;
}
- ret = PTR_ERR(pdev);
- adev->driver_data = NULL;
+ acpi_lpss_create_device_links(adev, pdev);
+ return 1;
- err_out:
+out_free:
+ /* Skip the device, but continue the namespace scan */
+ ret = 0;
+err_out:
kfree(pdata);
return ret;
}
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index a12b55d81209..ee4ce5ba1fb2 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -23,6 +23,12 @@
#include <acpi/pcc.h>
+/*
+ * Arbitrary retries in case the remote processor is slow to respond
+ * to PCC commands
+ */
+#define PCC_CMD_WAIT_RETRIES_NUM 500
+
struct pcc_data {
struct pcc_mbox_chan *pcc_chan;
void __iomem *pcc_comm_addr;
@@ -63,6 +69,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (IS_ERR(data->pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
ctx->subspace_id);
+ kfree(data);
return AE_NOT_FOUND;
}
@@ -72,6 +79,8 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (!data->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
ctx->subspace_id);
+ pcc_mbox_free_channel(data->pcc_chan);
+ kfree(data);
return AE_NO_MEMORY;
}
@@ -86,6 +95,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
{
int ret;
struct pcc_data *data = region_context;
+ u64 usecs_lat;
reinit_completion(&data->done);
@@ -96,10 +106,22 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
if (ret < 0)
return AE_ERROR;
- if (data->pcc_chan->mchan->mbox->txdone_irq)
- wait_for_completion(&data->done);
+ if (data->pcc_chan->mchan->mbox->txdone_irq) {
+ /*
+ * pcc_chan->latency is just a Nominal value. In reality the remote
+ * processor could be much slower to reply. So add an arbitrary
+ * amount of wait on top of Nominal.
+ */
+ usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency;
+ ret = wait_for_completion_timeout(&data->done,
+ usecs_to_jiffies(usecs_lat));
+ if (ret == 0) {
+ pr_err("PCC command executed timeout!\n");
+ return AE_TIME;
+ }
+ }
- mbox_client_txdone(data->pcc_chan->mchan, ret);
+ mbox_chan_txdone(data->pcc_chan->mchan, ret);
memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index de3cbf152dee..fe00a5783f53 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -20,13 +20,13 @@
#include "internal.h"
static const struct acpi_device_id forbidden_id_list[] = {
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */
{"PNP0200", 0}, /* AT DMA Controller */
- {"ACPI0009", 0}, /* IOxAPIC */
- {"ACPI000A", 0}, /* IOAPIC */
{"SMB0001", 0}, /* ACPI SMBUS virtual device */
- {"", 0},
+ { }
};
static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
@@ -78,7 +78,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
* If the device has parent we need to take its resources into
* account as well because this device might consume part of those.
*/
- parent = acpi_get_first_physical_node(adev->parent);
+ parent = acpi_get_first_physical_node(acpi_dev_parent(adev));
if (parent && dev_is_pci(parent))
dest->parent = pci_find_resource(to_pci_dev(parent), dest);
}
@@ -97,6 +97,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
const struct property_entry *properties)
{
+ struct acpi_device *parent = acpi_dev_parent(adev);
struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo;
struct resource_entry *rentry;
@@ -113,13 +114,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
- if (count < 0) {
+ if (count < 0)
return NULL;
- } else if (count > 0) {
- resources = kcalloc(count, sizeof(struct resource),
- GFP_KERNEL);
+ if (count > 0) {
+ resources = kcalloc(count, sizeof(*resources), GFP_KERNEL);
if (!resources) {
- dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list);
return ERR_PTR(-ENOMEM);
}
@@ -137,10 +136,9 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
* attached to it, that physical device should be the parent of the
* platform device we are about to create.
*/
- pdevinfo.parent = adev->parent ?
- acpi_get_first_physical_node(adev->parent) : NULL;
+ pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL;
pdevinfo.name = dev_name(&adev->dev);
- pdevinfo.id = -1;
+ pdevinfo.id = PLATFORM_DEVID_NONE;
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev);
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 5cbe2196176d..32953646caeb 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -47,9 +47,6 @@ module_param(brightness_switch_enabled, bool, 0644);
static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
-static int disable_backlight_sysfs_if = -1;
-module_param(disable_backlight_sysfs_if, int, 0444);
-
#define REPORT_OUTPUT_KEY_EVENTS 0x01
#define REPORT_BRIGHTNESS_KEY_EVENTS 0x02
static int report_key_events = -1;
@@ -73,6 +70,16 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
+/*
+ * Display probing is known to take up to 5 seconds, so delay the fallback
+ * backlight registration by 5 seconds + 3 seconds for some extra margin.
+ */
+static int register_backlight_delay = 8;
+module_param(register_backlight_delay, int, 0444);
+MODULE_PARM_DESC(register_backlight_delay,
+ "Delay in seconds before doing fallback (non GPU driver triggered) "
+ "backlight registration, set to 0 to disable.");
+
static bool may_report_brightness_keys;
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -81,7 +88,9 @@ static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
-void acpi_video_detect_exit(void);
+static void acpi_video_bus_register_backlight_work(struct work_struct *ignored);
+static DECLARE_DELAYED_WORK(video_bus_register_backlight_work,
+ acpi_video_bus_register_backlight_work);
/*
* Indices in the _BCL method response: the first two items are special,
@@ -382,14 +391,6 @@ static int video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
-static int video_disable_backlight_sysfs_if(
- const struct dmi_system_id *d)
-{
- if (disable_backlight_sysfs_if == -1)
- disable_backlight_sysfs_if = 1;
- return 0;
-}
-
static int video_set_device_id_scheme(const struct dmi_system_id *d)
{
device_id_scheme = true;
@@ -463,40 +464,6 @@ static const struct dmi_system_id video_dmi_table[] = {
},
/*
- * Some machines have a broken acpi-video interface for brightness
- * control, but still need an acpi_video_device_lcd_set_level() call
- * on resume to turn the backlight power on. We Enable backlight
- * control on these systems, but do not register a backlight sysfs
- * as brightness control does not work.
- */
- {
- /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Portege R700",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
- },
- },
- {
- /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Portege R830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
- },
- },
- {
- /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
- .callback = video_disable_backlight_sysfs_if,
- .ident = "Toshiba Satellite R830",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
- },
- },
- /*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme.
*/
@@ -1758,9 +1725,6 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
if (result)
return;
- if (disable_backlight_sysfs_if > 0)
- return;
-
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
@@ -1859,8 +1823,6 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
if (video->backlight_registered)
return 0;
- acpi_video_run_bcl_for_osi(video);
-
if (acpi_video_get_backlight_type() != acpi_backlight_video)
return 0;
@@ -2030,7 +1992,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
- device->parent->handle, 1,
+ acpi_dev_parent(device)->handle, 1,
acpi_video_bus_match, NULL,
device, NULL);
if (status == AE_ALREADY_EXISTS) {
@@ -2086,7 +2048,11 @@ static int acpi_video_bus_add(struct acpi_device *device)
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
- acpi_video_bus_register_backlight(video);
+ /*
+ * The userspace visible backlight_device gets registered separately
+ * from acpi_video_register_backlight().
+ */
+ acpi_video_run_bcl_for_osi(video);
acpi_video_bus_add_notify_handler(video);
return 0;
@@ -2111,20 +2077,25 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
- acpi_video_bus_remove_notify_handler(video);
- acpi_video_bus_unregister_backlight(video);
- acpi_video_bus_put_devices(video);
-
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
+ acpi_video_bus_put_devices(video);
+
kfree(video->attached_array);
kfree(video);
return 0;
}
+static void acpi_video_bus_register_backlight_work(struct work_struct *ignored)
+{
+ acpi_video_register_backlight();
+}
+
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
@@ -2235,6 +2206,18 @@ int acpi_video_register(void)
*/
register_count = 1;
+ /*
+ * acpi_video_bus_add() skips registering the userspace visible
+ * backlight_device. The intend is for this to be registered by the
+ * drm/kms driver calling acpi_video_register_backlight() *after* it is
+ * done setting up its own native backlight device. The delayed work
+ * ensures that acpi_video_register_backlight() always gets called
+ * eventually, in case there is no drm/kms driver or it is disabled.
+ */
+ if (register_backlight_delay)
+ schedule_delayed_work(&video_bus_register_backlight_work,
+ register_backlight_delay * HZ);
+
leave:
mutex_unlock(&register_count_mutex);
return ret;
@@ -2245,6 +2228,7 @@ void acpi_video_unregister(void)
{
mutex_lock(&register_count_mutex);
if (register_count) {
+ cancel_delayed_work_sync(&video_bus_register_backlight_work);
acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0;
may_report_brightness_keys = false;
@@ -2253,19 +2237,16 @@ void acpi_video_unregister(void)
}
EXPORT_SYMBOL(acpi_video_unregister);
-void acpi_video_unregister_backlight(void)
+void acpi_video_register_backlight(void)
{
struct acpi_video_bus *video;
- mutex_lock(&register_count_mutex);
- if (register_count) {
- mutex_lock(&video_list_lock);
- list_for_each_entry(video, &video_bus_head, entry)
- acpi_video_bus_unregister_backlight(video);
- mutex_unlock(&video_list_lock);
- }
- mutex_unlock(&register_count_mutex);
+ mutex_lock(&video_list_lock);
+ list_for_each_entry(video, &video_bus_head, entry)
+ acpi_video_bus_register_backlight(video);
+ mutex_unlock(&video_list_lock);
}
+EXPORT_SYMBOL(acpi_video_register_backlight);
bool acpi_video_handles_brightness_key_presses(void)
{
@@ -2302,7 +2283,6 @@ static int __init acpi_video_init(void)
static void __exit acpi_video_exit(void)
{
- acpi_video_detect_exit();
acpi_video_unregister();
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 9f49272cad39..9b52482b4ed5 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -125,12 +125,9 @@ EXPORT_SYMBOL_GPL(apei_exec_write_register);
int apei_exec_write_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry)
{
- int rc;
-
ctx->value = entry->value;
- rc = apei_exec_write_register(ctx, entry);
- return rc;
+ return apei_exec_write_register(ctx, entry);
}
EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c
index 45973aa6e06d..c23eb75866d0 100644
--- a/drivers/acpi/apei/bert.c
+++ b/drivers/acpi/apei/bert.c
@@ -90,6 +90,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
if (skipped)
pr_info(HW_ERR "Skipped %d error records\n", skipped);
+
+ if (printed + skipped)
+ pr_info("Total records found: %d\n", printed + skipped);
}
static int __init setup_bert_disable(char *str)
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 31b077eedb58..247989060e29 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1020,14 +1020,10 @@ static int reader_pos;
static int erst_open_pstore(struct pstore_info *psi)
{
- int rc;
-
if (erst_disable)
return -ENODEV;
- rc = erst_get_record_id_begin(&reader_pos);
-
- return rc;
+ return erst_get_record_id_begin(&reader_pos);
}
static int erst_close_pstore(struct pstore_info *psi)
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d91ad378c00d..80ad530583c9 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
ghes_estatus_cache_add(generic, estatus);
}
- if (task_work_pending && current->mm != &init_mm) {
+ if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work,
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index f16739ad3cc0..93d796531af3 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -4,11 +4,12 @@
#include <linux/device.h>
#include <linux/dma-direct.h>
-void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
+void acpi_arch_dma_setup(struct device *dev)
{
int ret;
u64 end, mask;
- u64 dmaaddr = 0, size = 0, offset = 0;
+ u64 size = 0;
+ const struct bus_dma_region *map = NULL;
/*
* If @dev is expected to be DMA-capable then the bus code that created
@@ -26,7 +27,19 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
else
size = 1ULL << 32;
- ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ ret = acpi_dma_get_range(dev, &map);
+ if (!ret && map) {
+ const struct bus_dma_region *r = map;
+
+ for (end = 0; r->size; r++) {
+ if (r->dma_start + r->size - 1 > end)
+ end = r->dma_start + r->size - 1;
+ }
+
+ size = end + 1;
+ dev->dma_range_map = map;
+ }
+
if (ret == -ENODEV)
ret = iort_dma_get_ranges(dev, &size);
if (!ret) {
@@ -34,17 +47,10 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size retrieved from
* firmware.
*/
- end = dmaaddr + size - 1;
+ end = size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask);
}
-
- *dma_addr = dmaaddr;
- *dma_size = size;
-
- ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
-
- dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c0d20d997891..d466c8195314 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -456,7 +456,7 @@ out_free:
Notification Handling
-------------------------------------------------------------------------- */
-/**
+/*
* acpi_bus_notify
* ---------------
* Callback for all 'system-level' device notifications (values 0x00-0x7F).
@@ -511,7 +511,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
}
- adev = acpi_bus_get_acpi_device(handle);
+ adev = acpi_get_acpi_dev(handle);
if (!adev)
goto err;
@@ -524,14 +524,14 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
}
if (!hotplug_event) {
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
return;
}
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return;
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
err:
acpi_evaluate_ost(handle, type, ost_code, NULL);
@@ -802,7 +802,7 @@ static bool acpi_of_modalias(struct acpi_device *adev,
str = obj->string.pointer;
chr = strchr(str, ',');
- strlcpy(modalias, chr ? chr + 1 : str, len);
+ strscpy(modalias, chr ? chr + 1 : str, len);
return true;
}
@@ -822,7 +822,7 @@ void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len)
{
if (!acpi_of_modalias(adev, modalias, len))
- strlcpy(modalias, default_id, len);
+ strscpy(modalias, default_id, len);
}
EXPORT_SYMBOL_GPL(acpi_set_modalias);
@@ -925,12 +925,13 @@ static const void *acpi_of_device_get_match_data(const struct device *dev)
const void *acpi_device_get_match_data(const struct device *dev)
{
+ const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table;
const struct acpi_device_id *match;
- if (!dev->driver->acpi_match_table)
+ if (!acpi_ids)
return acpi_of_device_get_match_data(dev);
- match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ match = acpi_match_device(acpi_ids, dev);
if (!match)
return NULL;
@@ -948,14 +949,13 @@ EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
- if (!drv->acpi_match_table)
- return acpi_of_match_device(ACPI_COMPANION(dev),
- drv->of_match_table,
- NULL);
-
- return __acpi_match_device(acpi_companion_match(dev),
- drv->acpi_match_table, drv->of_match_table,
- NULL, NULL);
+ const struct acpi_device_id *acpi_ids = drv->acpi_match_table;
+ const struct of_device_id *of_ids = drv->of_match_table;
+
+ if (!acpi_ids)
+ return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL);
+
+ return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
@@ -973,16 +973,13 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device);
*/
int acpi_bus_register_driver(struct acpi_driver *driver)
{
- int ret;
-
if (acpi_disabled)
return -ENODEV;
driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner;
- ret = driver_register(&driver->drv);
- return ret;
+ return driver_register(&driver->drv);
}
EXPORT_SYMBOL(acpi_bus_register_driver);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1e15a9f25ae9..093675b1a1ff 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -424,6 +424,9 @@ bool acpi_cpc_valid(void)
struct cpc_desc *cpc_ptr;
int cpu;
+ if (acpi_disabled)
+ return false;
+
for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
@@ -1241,6 +1244,48 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
+ * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
+ *
+ * CPPC has flexibility about how CPU performance counters are accessed.
+ * One of the choices is PCC regions, which can have a high access latency. This
+ * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
+ *
+ * Return: true if any of the counters are in PCC regions, false otherwise
+ */
+bool cppc_perf_ctrs_in_pcc(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu) {
+ struct cpc_register_resource *ref_perf_reg;
+ struct cpc_desc *cpc_desc;
+
+ cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+
+ if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
+ CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
+ CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
+ return true;
+
+
+ ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
+
+ /*
+ * If reference perf register is not supported then we should
+ * use the nominal perf value
+ */
+ if (!CPC_SUPPORTED(ref_perf_reg))
+ ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
+
+ if (CPC_IN_PCC(ref_perf_reg))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
+
+/**
* cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 9dce1245689c..97450f4003cc 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -75,15 +75,17 @@ static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
int acpi_device_get_power(struct acpi_device *device, int *state)
{
int result = ACPI_STATE_UNKNOWN;
+ struct acpi_device *parent;
int error;
if (!device || !state)
return -EINVAL;
+ parent = acpi_dev_parent(device);
+
if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
- *state = device->parent ?
- device->parent->power.state : ACPI_STATE_D0;
+ *state = parent ? parent->power.state : ACPI_STATE_D0;
goto out;
}
@@ -122,10 +124,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
* point, the fact that the device is in D0 implies that the parent has
* to be in D0 too, except if ignore_parent is set.
*/
- if (!device->power.flags.ignore_parent && device->parent
- && device->parent->power.state == ACPI_STATE_UNKNOWN
- && result == ACPI_STATE_D0)
- device->parent->power.state = ACPI_STATE_D0;
+ if (!device->power.flags.ignore_parent && parent &&
+ parent->power.state == ACPI_STATE_UNKNOWN &&
+ result == ACPI_STATE_D0)
+ parent->power.state = ACPI_STATE_D0;
*state = result;
@@ -191,13 +193,17 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
- if (!device->power.flags.ignore_parent && device->parent &&
- state < device->parent->power.state) {
- acpi_handle_debug(device->handle,
- "Cannot transition to %s for parent in %s\n",
- acpi_power_state_string(state),
- acpi_power_state_string(device->parent->power.state));
- return -ENODEV;
+ if (!device->power.flags.ignore_parent) {
+ struct acpi_device *parent;
+
+ parent = acpi_dev_parent(device);
+ if (parent && state < parent->power.state) {
+ acpi_handle_debug(device->handle,
+ "Cannot transition to %s for parent in %s\n",
+ acpi_power_state_string(state),
+ acpi_power_state_string(parent->power.state));
+ return -ENODEV;
+ }
}
/*
@@ -497,7 +503,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
acpi_handle_debug(handle, "Wake notify\n");
- adev = acpi_bus_get_acpi_device(handle);
+ adev = acpi_get_acpi_dev(handle);
if (!adev)
return;
@@ -515,7 +521,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
mutex_unlock(&acpi_pm_notifier_lock);
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
}
/**
@@ -681,7 +687,22 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
+ } else if (device_may_wakeup(dev) && dev->power.wakeirq) {
+ /*
+ * The ACPI subsystem doesn't manage the wake bit for IRQs
+ * defined with ExclusiveAndWake and SharedAndWake. Instead we
+ * expect them to be managed via the PM subsystem. Drivers
+ * should call dev_pm_set_wake_irq to register an IRQ as a wake
+ * source.
+ *
+ * If a device has a wake IRQ attached we need to check the
+ * _S0W method to get the correct wake D-state. Otherwise we
+ * end up putting the device into D3Cold which will more than
+ * likely disable wake functionality.
+ */
+ wakeup = true;
} else {
+ /* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid;
}
@@ -1460,7 +1481,7 @@ EXPORT_SYMBOL_GPL(acpi_storage_d3);
* not valid to ask for the ACPI power state of the device in that time frame.
*
* This function is intended to be used in a driver's probe or remove
- * function. See Documentation/firmware-guide/acpi/low-power-probe.rst for
+ * function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for
* more information.
*/
bool acpi_dev_state_d0(struct device *dev)
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
index 1e8c7ce89bf1..4b3fdc03e4ed 100644
--- a/drivers/acpi/dptf/Kconfig
+++ b/drivers/acpi/dptf/Kconfig
@@ -11,9 +11,6 @@ menuconfig ACPI_DPTF
a coordinated approach for different policies to effect the hardware
state of a system.
- For more information see:
- <https://01.org/intel%C2%AE-dynamic-platform-and-thermal-framework-dptf-chromium-os/overview>
-
if ACPI_DPTF
config DPTF_POWER
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index c95e535035a0..9b42628cf21b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -917,14 +917,10 @@ EXPORT_SYMBOL(ec_read);
int ec_write(u8 addr, u8 val)
{
- int err;
-
if (!first_ec)
return -ENODEV;
- err = acpi_ec_write(first_ec, addr, val);
-
- return err;
+ return acpi_ec_write(first_ec, addr, val);
}
EXPORT_SYMBOL(ec_write);
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index b9a9a59ddcc1..52a0b303b70a 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -19,43 +19,12 @@
#include "fan.h"
-MODULE_AUTHOR("Paul Diefenbaugh");
-MODULE_DESCRIPTION("ACPI Fan Driver");
-MODULE_LICENSE("GPL");
-
-static int acpi_fan_probe(struct platform_device *pdev);
-static int acpi_fan_remove(struct platform_device *pdev);
-
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
-#ifdef CONFIG_PM_SLEEP
-static int acpi_fan_suspend(struct device *dev);
-static int acpi_fan_resume(struct device *dev);
-static const struct dev_pm_ops acpi_fan_pm = {
- .resume = acpi_fan_resume,
- .freeze = acpi_fan_suspend,
- .thaw = acpi_fan_resume,
- .restore = acpi_fan_resume,
-};
-#define FAN_PM_OPS_PTR (&acpi_fan_pm)
-#else
-#define FAN_PM_OPS_PTR NULL
-#endif
-
-static struct platform_driver acpi_fan_driver = {
- .probe = acpi_fan_probe,
- .remove = acpi_fan_remove,
- .driver = {
- .name = "acpi-fan",
- .acpi_match_table = fan_device_ids,
- .pm = FAN_PM_OPS_PTR,
- },
-};
-
/* thermal cooling device callbacks */
static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state)
@@ -459,6 +428,33 @@ static int acpi_fan_resume(struct device *dev)
return result;
}
+
+static const struct dev_pm_ops acpi_fan_pm = {
+ .resume = acpi_fan_resume,
+ .freeze = acpi_fan_suspend,
+ .thaw = acpi_fan_resume,
+ .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
+
+#else
+
+#define FAN_PM_OPS_PTR NULL
+
#endif
+static struct platform_driver acpi_fan_driver = {
+ .probe = acpi_fan_probe,
+ .remove = acpi_fan_remove,
+ .driver = {
+ .name = "acpi-fan",
+ .acpi_match_table = fan_device_ids,
+ .pm = FAN_PM_OPS_PTR,
+ },
+};
+
module_platform_driver(acpi_fan_driver);
+
+MODULE_AUTHOR("Paul Diefenbaugh");
+MODULE_DESCRIPTION("ACPI Fan Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 628bf8f18130..219c02df9a08 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -102,10 +102,10 @@ struct acpi_device_bus_id {
struct list_head node;
};
-int acpi_device_add(struct acpi_device *device,
- void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type);
+ int type, void (*release)(struct device *));
+int acpi_tie_acpi_dev(struct acpi_device *adev);
+int acpi_device_add(struct acpi_device *device);
int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index dabe45eba055..1cc4647f78b8 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -118,12 +118,12 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
if (WARN_ON(ACPI_FAILURE(status)))
return NULL;
- device = acpi_bus_get_acpi_device(handle);
+ device = acpi_get_acpi_dev(handle);
if (WARN_ON(!device))
return NULL;
result = &device->fwnode;
- acpi_bus_put_acpi_device(device);
+ acpi_put_acpi_dev(device);
return result;
}
@@ -147,6 +147,7 @@ struct acpi_irq_parse_one_ctx {
* @polarity: polarity attributes of hwirq
* @polarity: polarity attributes of hwirq
* @shareable: shareable attributes of hwirq
+ * @wake_capable: wake capable attribute of hwirq
* @ctx: acpi_irq_parse_one_ctx updated by this function
*
* Description:
@@ -156,12 +157,13 @@ struct acpi_irq_parse_one_ctx {
static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
u32 hwirq, u8 triggering,
u8 polarity, u8 shareable,
+ u8 wake_capable,
struct acpi_irq_parse_one_ctx *ctx)
{
if (!fwnode)
return;
ctx->rc = 0;
- *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
ctx->fwspec->fwnode = fwnode;
ctx->fwspec->param[0] = hwirq;
ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
@@ -204,7 +206,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity,
- irq->shareable, ctx);
+ irq->shareable, irq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq;
@@ -218,7 +220,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity,
- eirq->shareable, ctx);
+ eirq->shareable, eirq->wake_capable, ctx);
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index c3d783aca196..23f49a2f4d14 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -9,7 +9,6 @@
*/
#define pr_fmt(fmt) "acpi/hmat: " fmt
-#define dev_fmt(fmt) "acpi/hmat: " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
@@ -302,7 +301,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) {
- pr_notice("HMAT: Unexpected locality header length: %u\n",
+ pr_notice("Unexpected locality header length: %u\n",
hmat_loc->header.length);
return -EINVAL;
}
@@ -314,12 +313,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) {
- pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
+ pr_notice("Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size);
return -EINVAL;
}
- pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
+ pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit);
@@ -363,13 +362,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs;
if (cache->header.length < sizeof(*cache)) {
- pr_notice("HMAT: Unexpected cache header length: %u\n",
+ pr_notice("Unexpected cache header length: %u\n",
cache->header.length);
return -EINVAL;
}
attrs = cache->cache_attributes;
- pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
@@ -424,24 +423,24 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) {
- pr_notice("HMAT: Unexpected address range header length: %u\n",
+ pr_notice("Unexpected address range header length: %u\n",
p->header.length);
return -EINVAL;
}
if (hmat_revision == 1)
- pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD);
else
- pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
+ pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD);
if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
hmat_revision > 1) {
target = find_mem_target(p->memory_PD);
if (!target) {
- pr_debug("HMAT: Memory Domain missing from SRAT\n");
+ pr_debug("Memory Domain missing from SRAT\n");
return -EINVAL;
}
}
@@ -449,7 +448,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
int p_node = pxm_to_node(p->processor_PD);
if (p_node == NUMA_NO_NODE) {
- pr_debug("HMAT: Invalid Processor Domain\n");
+ pr_debug("Invalid Processor Domain\n");
return -EINVAL;
}
target->processor_pxm = p->processor_PD;
@@ -840,7 +839,7 @@ static __init int hmat_init(void)
case 2:
break;
default:
- pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
+ pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision);
goto out_put;
}
@@ -848,7 +847,7 @@ static __init int hmat_init(void)
if (acpi_table_parse_entries(ACPI_SIG_HMAT,
sizeof(struct acpi_table_hmat), i,
hmat_parse_subtable, 0) < 0) {
- pr_notice("Ignoring HMAT: Invalid table");
+ pr_notice("Ignoring: Invalid table");
goto out_put;
}
}
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 9f6853809138..d4405e1ca9b9 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -44,30 +44,6 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Processor Device", true},
{"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true},
- /*
- * Linux-Dell-Video is used by BIOS to disable RTD3 for NVidia graphics
- * cards as RTD3 is not supported by drivers now. Systems with NVidia
- * cards will hang without RTD3 disabled.
- *
- * Once NVidia drivers officially support RTD3, this _OSI strings can
- * be removed if both new and old graphics cards are supported.
- */
- {"Linux-Dell-Video", true},
- /*
- * Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
- * audio device which is turned off for power-saving in Windows OS.
- * This power management feature observed on some Lenovo Thinkpad
- * systems which will not be able to output audio via HDMI without
- * a BIOS workaround.
- */
- {"Linux-Lenovo-NV-HDMI-Audio", true},
- /*
- * Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to
- * output video directly to external monitors on HP Inc. mobile
- * workstations as Nvidia and AMD VGA drivers provide limited
- * hybrid graphics supports.
- */
- {"Linux-HPI-Hybrid-Graphics", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d57cf8454b93..c8385ef54c37 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -312,76 +312,25 @@ struct acpi_handle_node {
*/
struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
{
- int dev, fn;
- unsigned long long adr;
- acpi_status status;
- acpi_handle phandle;
- struct pci_bus *pbus;
- struct pci_dev *pdev = NULL;
- struct acpi_handle_node *node, *tmp;
- struct acpi_pci_root *root;
- LIST_HEAD(device_list);
-
- /*
- * Walk up the ACPI CA namespace until we reach a PCI root bridge.
- */
- phandle = handle;
- while (!acpi_is_root_bridge(phandle)) {
- node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
- if (!node)
- goto out;
-
- INIT_LIST_HEAD(&node->node);
- node->handle = phandle;
- list_add(&node->node, &device_list);
-
- status = acpi_get_parent(phandle, &phandle);
- if (ACPI_FAILURE(status))
- goto out;
- }
-
- root = acpi_pci_find_root(phandle);
- if (!root)
- goto out;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
+ struct acpi_device_physical_node *pn;
+ struct pci_dev *pci_dev = NULL;
- pbus = root->bus;
-
- /*
- * Now, walk back down the PCI device tree until we return to our
- * original handle. Assumes that everything between the PCI root
- * bridge and the device we're looking for must be a P2P bridge.
- */
- list_for_each_entry(node, &device_list, node) {
- acpi_handle hnd = node->handle;
- status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
- if (ACPI_FAILURE(status))
- goto out;
- dev = (adr >> 16) & 0xffff;
- fn = adr & 0xffff;
-
- pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
- if (!pdev || hnd == handle)
- break;
+ if (!adev)
+ return NULL;
- pbus = pdev->subordinate;
- pci_dev_put(pdev);
+ mutex_lock(&adev->physical_node_lock);
- /*
- * This function may be called for a non-PCI device that has a
- * PCI parent (eg. a disk under a PCI SATA controller). In that
- * case pdev->subordinate will be NULL for the parent.
- */
- if (!pbus) {
- dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
- pdev = NULL;
+ list_for_each_entry(pn, &adev->physical_node_list, node) {
+ if (dev_is_pci(pn->dev)) {
+ pci_dev = to_pci_dev(pn->dev);
break;
}
}
-out:
- list_for_each_entry_safe(node, tmp, &device_list, node)
- kfree(node);
- return pdev;
+ mutex_unlock(&adev->physical_node_lock);
+
+ return pci_dev;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 8c4a73a1351e..f2588aba8421 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -944,13 +944,15 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
return NULL;
device = &resource->device;
- acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER);
+ acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
+ acpi_release_power_resource);
mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN;
+ device->flags.match_driver = true;
/* Evaluate the object to get the system level and resource order. */
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
@@ -967,8 +969,11 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
- device->flags.match_driver = true;
- result = acpi_device_add(device, acpi_release_power_resource);
+ result = acpi_tie_acpi_dev(device);
+ if (result)
+ goto err;
+
+ result = acpi_device_add(device);
if (result)
goto err;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 16a1663d02d4..acfabfe07c4f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return;
+ /*
+ * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
+ * not this code. Assume that any Intel systems using this
+ * are ancient and may need the dummy wait. This also assumes
+ * that the motivating chipset issue was Intel-only.
+ */
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
#endif
- /* Dummy wait op - must do something useless after P_LVL2 read
- because chipsets cannot guarantee that STPCLK# signal
- gets asserted in time to freeze execution properly. */
+ /*
+ * Dummy wait op - must do something useless after P_LVL2 read
+ * because chipsets cannot guarantee that STPCLK# signal gets
+ * asserted in time to freeze execution properly
+ *
+ * This workaround has been in place since the original ACPI
+ * implementation was merged, circa 2002.
+ *
+ * If a profile is pointing to this instruction, please first
+ * consider moving your system to a more modern idle
+ * mechanism.
+ */
inl(acpi_gbl_FADT.xpm_timer_block.address);
}
@@ -787,7 +804,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
- strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter;
@@ -956,7 +973,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle,
obj = pkg_elem + 9;
if (obj->type == ACPI_TYPE_STRING)
- strlcpy(lpi_state->desc, obj->string.pointer,
+ strscpy(lpi_state->desc, obj->string.pointer,
ACPI_CX_DESC_LEN);
lpi_state->index = state_idx;
@@ -1022,7 +1039,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local,
result->arch_flags = parent->arch_flags;
result->index = parent->index;
- strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
+ strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
return true;
@@ -1196,7 +1213,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state = &drv->states[i];
snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
- strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index d4c168ce428c..b8d9eb9a433e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -304,8 +304,10 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible);
if (ret) {
- if (adev->parent
- && adev->parent->flags.of_compatible_ok)
+ struct acpi_device *parent;
+
+ parent = acpi_dev_parent(adev);
+ if (parent && parent->flags.of_compatible_ok)
goto out;
return;
@@ -1267,10 +1269,11 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode)
return to_acpi_data_node(fwnode)->parent;
}
if (is_acpi_device_node(fwnode)) {
- struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
+ struct acpi_device *parent;
- if (dev)
- return acpi_fwnode_handle(to_acpi_device(dev));
+ parent = acpi_dev_parent(to_acpi_device_node(fwnode));
+ if (parent)
+ return acpi_fwnode_handle(parent);
}
return NULL;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 510cdec375c4..6f9489edfb4e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -336,8 +336,9 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
* @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI.
* @shareable: Whether or not the interrupt is shareable.
+ * @wake_capable: Wake capability as provided by ACPI.
*/
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable)
{
unsigned long flags;
@@ -351,6 +352,9 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
if (shareable == ACPI_SHARED)
flags |= IORESOURCE_IRQ_SHAREABLE;
+ if (wake_capable == ACPI_WAKE_CAPABLE)
+ flags |= IORESOURCE_IRQ_WAKECAPABLE;
+
return flags | IORESOURCE_IRQ;
}
EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
@@ -399,6 +403,31 @@ static const struct dmi_system_id medion_laptop[] = {
{ }
};
+static const struct dmi_system_id asus_laptop[] = {
+ {
+ .ident = "Asus Vivobook K3402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook K3502ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
+ },
+ },
+ {
+ .ident = "Asus Vivobook S5402ZA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -409,6 +438,7 @@ struct irq_override_cmp {
static const struct irq_override_cmp skip_override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+ { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@@ -442,7 +472,7 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool check_override)
+ u8 wake_capable, bool check_override)
{
int irq, p, t;
@@ -475,7 +505,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
- res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
if (irq >= 0) {
res->start = irq;
@@ -523,7 +553,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
- irq->shareable, true);
+ irq->shareable, irq->wake_capable,
+ true);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;
@@ -534,7 +565,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity,
- ext_irq->shareable, false);
+ ext_irq->shareable, ext_irq->wake_capable,
+ false);
else
irqresource_disabled(res, 0);
break;
@@ -690,6 +722,9 @@ static int is_memory(struct acpi_resource *ares, void *not_used)
memset(&win, 0, sizeof(win));
+ if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM))
+ return 1;
+
return !(acpi_dev_resource_memory(ares, res)
|| acpi_dev_resource_address_space(ares, &win)
|| acpi_dev_resource_ext_address_space(ares, &win));
@@ -719,6 +754,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
+ * acpi_dev_get_memory_resources - Get current memory resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * This is a helper function that locates all memory type resources of @adev
+ * with acpi_dev_get_resources().
+ *
+ * The number of resources in the output list is returned on success, an error
+ * code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return acpi_dev_get_resources(adev, list, is_memory, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources);
+
+/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
* types
* @ares: Input ACPI resource object.
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 4938010fcac7..e6a01a8df1b8 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -632,7 +632,7 @@ static int acpi_sbs_add(struct acpi_device *device)
mutex_init(&sbs->lock);
- sbs->hc = acpi_driver_data(device->parent);
+ sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SBS_CLASS);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 7c62e149a7a1..340e0b61587e 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -266,7 +266,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
mutex_init(&hc->lock);
init_waitqueue_head(&hc->wait);
- hc->ec = acpi_driver_data(device->parent);
+ hc->ec = acpi_driver_data(acpi_dev_parent(device));
hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff;
device->driver_data = hc;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 42cec8120f18..558664d169fc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -20,6 +20,7 @@
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
#include <linux/crc32.h>
+#include <linux/dma-direct.h>
#include "internal.h"
@@ -29,8 +30,6 @@ extern struct acpi_device *acpi_root;
#define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus"
-#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
-
#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
static const char *dummy_hid = "device";
@@ -429,7 +428,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
out:
- acpi_bus_put_acpi_device(adev);
+ acpi_put_acpi_dev(adev);
mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug();
}
@@ -599,11 +598,22 @@ static void get_acpi_device(void *dev)
acpi_dev_get(dev);
}
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+/**
+ * acpi_get_acpi_dev - Retrieve ACPI device object and reference count it.
+ * @handle: ACPI handle associated with the requested ACPI device object.
+ *
+ * Return a pointer to the ACPI device object associated with @handle and bump
+ * up that object's reference counter (under the ACPI Namespace lock), if
+ * present, or return NULL otherwise.
+ *
+ * The ACPI device object reference acquired by this function needs to be
+ * dropped via acpi_dev_put().
+ */
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle)
{
return handle_to_device(handle, get_acpi_device);
}
-EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
+EXPORT_SYMBOL_GPL(acpi_get_acpi_dev);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
@@ -632,7 +642,7 @@ static int acpi_device_set_name(struct acpi_device *device,
return 0;
}
-static int acpi_tie_acpi_dev(struct acpi_device *adev)
+int acpi_tie_acpi_dev(struct acpi_device *adev)
{
acpi_handle handle = adev->handle;
acpi_status status;
@@ -662,8 +672,7 @@ static void acpi_store_pld_crc(struct acpi_device *adev)
ACPI_FREE(pld);
}
-static int __acpi_device_add(struct acpi_device *device,
- void (*release)(struct device *))
+int acpi_device_add(struct acpi_device *device)
{
struct acpi_device_bus_id *acpi_device_bus_id;
int result;
@@ -719,11 +728,6 @@ static int __acpi_device_add(struct acpi_device *device,
mutex_unlock(&acpi_device_lock);
- if (device->parent)
- device->dev.parent = &device->parent->dev;
-
- device->dev.bus = &acpi_bus_type;
- device->dev.release = release;
result = device_add(&device->dev);
if (result) {
dev_err(&device->dev, "Error registering device\n");
@@ -750,17 +754,6 @@ err_unlock:
return result;
}
-int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
-{
- int ret;
-
- ret = acpi_tie_acpi_dev(adev);
- if (ret)
- return ret;
-
- return __acpi_device_add(adev, release);
-}
-
/* --------------------------------------------------------------------------
Device Enumeration
-------------------------------------------------------------------------- */
@@ -805,10 +798,9 @@ static const char * const acpi_honor_dep_ids[] = {
NULL
};
-static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
+static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle)
{
- struct acpi_device *device;
- acpi_status status;
+ struct acpi_device *adev;
/*
* Fixed hardware devices do not appear in the namespace and do not
@@ -819,13 +811,18 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
return acpi_root;
do {
+ acpi_status status;
+
status = acpi_get_parent(handle, &handle);
- if (ACPI_FAILURE(status))
- return status == AE_NULL_ENTRY ? NULL : acpi_root;
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NULL_ENTRY)
+ return acpi_root;
- device = acpi_fetch_acpi_dev(handle);
- } while (!device);
- return device;
+ return NULL;
+ }
+ adev = acpi_fetch_acpi_dev(handle);
+ } while (!adev);
+ return adev;
}
acpi_status
@@ -1112,7 +1109,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
* The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/
- if (ACPI_IS_ROOT_DEVICE(device)) {
+ if (!acpi_dev_parent(device)) {
strcpy(device->pnp.bus_id, "ACPI");
return;
}
@@ -1467,25 +1464,21 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
* acpi_dma_get_range() - Get device DMA parameters.
*
* @dev: device to configure
- * @dma_addr: pointer device DMA address result
- * @offset: pointer to the DMA offset result
- * @size: pointer to DMA range size result
+ * @map: pointer to DMA ranges result
*
- * Evaluate DMA regions and return respectively DMA region start, offset
- * and size in dma_addr, offset and size on parsing success; it does not
- * update the passed in values on failure.
+ * Evaluate DMA regions and return pointer to DMA regions on
+ * parsing success; it does not update the passed in values on failure.
*
* Return 0 on success, < 0 on failure.
*/
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size)
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
struct acpi_device *adev;
LIST_HEAD(list);
struct resource_entry *rentry;
int ret;
struct device *dma_dev = dev;
- u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+ struct bus_dma_region *r;
/*
* Walk the device tree chasing an ACPI companion with a _DMA
@@ -1510,31 +1503,28 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
ret = acpi_dev_get_dma_resources(adev, &list);
if (ret > 0) {
+ r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL);
+ if (!r) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
list_for_each_entry(rentry, &list, node) {
- if (dma_offset && rentry->offset != dma_offset) {
+ if (rentry->res->start >= rentry->res->end) {
+ kfree(r);
ret = -EINVAL;
- dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
goto out;
}
- dma_offset = rentry->offset;
- /* Take lower and upper limits */
- if (rentry->res->start < dma_start)
- dma_start = rentry->res->start;
- if (rentry->res->end > dma_end)
- dma_end = rentry->res->end;
- }
-
- if (dma_start >= dma_end) {
- ret = -EINVAL;
- dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
- goto out;
+ r->cpu_start = rentry->res->start;
+ r->dma_start = rentry->res->start - rentry->offset;
+ r->size = resource_size(rentry->res);
+ r->offset = rentry->offset;
+ r++;
}
- *dma_addr = dma_start - dma_offset;
- len = dma_end - dma_start;
- *size = max(len, len + 1);
- *offset = dma_offset;
+ *map = r;
}
out:
acpi_dev_free_resource_list(&list);
@@ -1624,20 +1614,19 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id)
{
const struct iommu_ops *iommu;
- u64 dma_addr = 0, size = 0;
if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops);
return 0;
}
- acpi_arch_dma_setup(dev, &dma_addr, &size);
+ acpi_arch_dma_setup(dev);
iommu = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- arch_setup_dma_ops(dev, dma_addr, size,
+ arch_setup_dma_ops(dev, 0, U64_MAX,
iommu, attr == DEV_DMA_COHERENT);
return 0;
@@ -1648,7 +1637,7 @@ static void acpi_init_coherency(struct acpi_device *adev)
{
unsigned long long cca = 0;
acpi_status status;
- struct acpi_device *parent = adev->parent;
+ struct acpi_device *parent = acpi_dev_parent(adev);
if (parent && parent->flags.cca_seen) {
/*
@@ -1692,7 +1681,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{
- struct acpi_device *parent = device->parent;
+ struct acpi_device *parent = acpi_dev_parent(device);
static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0},
{}
@@ -1762,12 +1751,16 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
- int type)
+ int type, void (*release)(struct device *))
{
+ struct acpi_device *parent = acpi_find_parent_acpi_dev(handle);
+
INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type;
device->handle = handle;
- device->parent = acpi_bus_get_parent(handle);
+ device->dev.parent = parent ? &parent->dev : NULL;
+ device->dev.release = release;
+ device->dev.bus = &acpi_bus_type;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device);
@@ -1821,7 +1814,7 @@ static int acpi_add_single_object(struct acpi_device **child,
if (!device)
return -ENOMEM;
- acpi_init_device_object(device, handle, type);
+ acpi_init_device_object(device, handle, type, acpi_device_release);
/*
* Getting the status is delayed till here so that we can call
* acpi_bus_get_status() and use its quirk handling. Note that
@@ -1851,7 +1844,7 @@ static int acpi_add_single_object(struct acpi_device **child,
mutex_unlock(&acpi_dep_list_lock);
if (!result)
- result = __acpi_device_add(device, acpi_device_release);
+ result = acpi_device_add(device);
if (result) {
acpi_device_release(&device->dev);
@@ -1862,8 +1855,8 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_device_add_finalize(device);
acpi_handle_debug(handle, "Added as %s, parent %s\n",
- dev_name(&device->dev), device->parent ?
- dev_name(&device->parent->dev) : "(null)");
+ dev_name(&device->dev), device->dev.parent ?
+ dev_name(device->dev.parent) : "(null)");
*child = device;
return 0;
@@ -2235,11 +2228,24 @@ ok:
return 0;
}
-static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
+static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{
- struct acpi_device *adev;
+ struct acpi_device **adev_p = data;
+ struct acpi_device *adev = *adev_p;
- adev = acpi_bus_get_acpi_device(dep->consumer);
+ /*
+ * If we're passed a 'previous' consumer device then we need to skip
+ * any consumers until we meet the previous one, and then NULL @data
+ * so the next one can be returned.
+ */
+ if (adev) {
+ if (dep->consumer == adev->handle)
+ *adev_p = NULL;
+
+ return 0;
+ }
+
+ adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
*(struct acpi_device **)data = adev;
return 1;
@@ -2292,7 +2298,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{
- struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer);
+ struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
if (adev) {
adev->dep_unmet--;
@@ -2368,25 +2374,32 @@ bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
/**
- * acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier
+ * acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier
* @supplier: Pointer to the dependee device
+ * @start: Pointer to the current dependent device
*
- * Returns the first &struct acpi_device which declares itself dependent on
+ * Returns the next &struct acpi_device which declares itself dependent on
* @supplier via the _DEP buffer, parsed from the acpi_dep_list.
*
- * The caller is responsible for putting the reference to adev when it is no
- * longer needed.
+ * If the returned adev is not passed as @start to this function, the caller is
+ * responsible for putting the reference to adev when it is no longer needed.
*/
-struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier)
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start)
{
- struct acpi_device *adev = NULL;
+ struct acpi_device *adev = start;
acpi_walk_dep_device_list(supplier->handle,
- acpi_dev_get_first_consumer_dev_cb, &adev);
+ acpi_dev_get_next_consumer_dev_cb, &adev);
+
+ acpi_dev_put(start);
+
+ if (adev == start)
+ return NULL;
return adev;
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev);
+EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);
/**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ad4b2987b3d6..0b557c0d405e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -1088,6 +1088,14 @@ int __init acpi_sleep_init(void)
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off, NULL);
+
+ /*
+ * Windows uses S5 for reboot, so some BIOSes depend on it to
+ * perform proper reboot.
+ */
+ register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_FIRMWARE,
+ acpi_power_off_prepare, NULL);
} else {
acpi_no_s5 = true;
}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 7fe41ee489d6..d960a238be4e 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -18,6 +18,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void);
extern int acpi_s2idle_prepare_late(void);
+extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void);
extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 539660ef93c7..40b07057983e 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -158,7 +158,7 @@ struct acpi_thermal_flags {
};
struct acpi_thermal {
- struct acpi_device * device;
+ struct acpi_device *device;
acpi_bus_id name;
unsigned long temperature;
unsigned long last_temperature;
@@ -262,7 +262,7 @@ do { \
static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
- acpi_status status = AE_OK;
+ acpi_status status;
unsigned long long tmp;
struct acpi_handle_list devices;
int valid = 0;
@@ -270,8 +270,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Shutdown */
if (flag & ACPI_TRIPS_CRITICAL) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_CRT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL, &tmp);
tz->trips.critical.temperature = tmp;
/*
* Treat freezing temperatures as invalid as well; some
@@ -284,8 +283,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
acpi_handle_debug(tz->device->handle,
"No critical threshold\n");
} else if (tmp <= 2732) {
- pr_info(FW_BUG "Invalid critical threshold (%llu)\n",
- tmp);
+ pr_info(FW_BUG "Invalid critical threshold (%llu)\n", tmp);
tz->trips.critical.flags.valid = 0;
} else {
tz->trips.critical.flags.valid = 1;
@@ -312,8 +310,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Critical Sleep (optional) */
if (flag & ACPI_TRIPS_HOT) {
- status = acpi_evaluate_integer(tz->device->handle,
- "_HOT", NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle, "_HOT", NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.hot.flags.valid = 0;
acpi_handle_debug(tz->device->handle,
@@ -329,7 +326,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
/* Passive (optional) */
if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
- (flag == ACPI_TRIPS_INIT)) {
+ flag == ACPI_TRIPS_INIT) {
valid = tz->trips.passive.flags.valid;
if (psv == -1) {
status = AE_SUPPORT;
@@ -338,32 +335,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
status = AE_OK;
} else {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tmp);
+ "_PSV", NULL, &tmp);
}
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
tz->trips.passive.flags.valid = 0;
- else {
+ } else {
tz->trips.passive.temperature = tmp;
tz->trips.passive.flags.valid = 1;
if (flag == ACPI_TRIPS_INIT) {
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC1",
- NULL, &tmp);
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc1 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TC2",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
tz->trips.passive.tc2 = tmp;
- status = acpi_evaluate_integer(
- tz->device->handle, "_TSP",
- NULL, &tmp);
+
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
else
@@ -374,25 +370,25 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
- NULL, &devices);
+ NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
tz->trips.passive.flags.valid = 0;
- }
- else
+ } else {
tz->trips.passive.flags.valid = 1;
+ }
if (memcmp(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.passive.devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
if (valid != tz->trips.passive.flags.valid)
- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
@@ -403,29 +399,31 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (act == -1)
break; /* disable all active trip points */
- if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) &&
- tz->trips.active[i].flags.valid)) {
+ if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
+ tz->trips.active[i].flags.valid)) {
status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tmp);
+ name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
tz->trips.active[i].flags.valid = 0;
if (i == 0)
break;
+
if (act <= 0)
break;
+
if (i == 1)
- tz->trips.active[0].temperature =
- celsius_to_deci_kelvin(act);
+ tz->trips.active[0].temperature = celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
* the next higher trip point
*/
- tz->trips.active[i - 1].temperature =
- (tz->trips.active[i - 2].temperature <
+ tz->trips.active[i-1].temperature =
+ (tz->trips.active[i-2].temperature <
celsius_to_deci_kelvin(act) ?
- tz->trips.active[i - 2].temperature :
+ tz->trips.active[i-2].temperature :
celsius_to_deci_kelvin(act));
+
break;
} else {
tz->trips.active[i].temperature = tmp;
@@ -434,22 +432,22 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
name[2] = 'L';
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
- name, NULL, &devices);
+ name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
tz->trips.active[i].flags.valid = 0;
- }
- else
+ } else {
tz->trips.active[i].flags.valid = 1;
+ }
if (memcmp(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list))) {
+ sizeof(struct acpi_handle_list))) {
memcpy(&tz->trips.active[i].devices, &devices,
- sizeof(struct acpi_handle_list));
+ sizeof(struct acpi_handle_list));
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
@@ -464,9 +462,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_DEVICES) {
memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
- NULL, &devices);
- if (ACPI_SUCCESS(status)
- && memcmp(&tz->devices, &devices, sizeof(devices))) {
+ NULL, &devices);
+ if (ACPI_SUCCESS(status) &&
+ memcmp(&tz->devices, &devices, sizeof(devices))) {
tz->devices = devices;
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
@@ -548,8 +546,7 @@ static int thermal_get_trip_type(struct thermal_zone_device *thermal,
trip--;
}
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++) {
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*type = THERMAL_TRIP_ACTIVE;
return 0;
@@ -572,8 +569,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -582,8 +579,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.hot.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.hot.temperature,
- tz->kelvin_offset);
+ tz->trips.hot.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -592,8 +589,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.passive.temperature,
- tz->kelvin_offset);
+ tz->trips.passive.temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -603,8 +600,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
tz->trips.active[i].flags.valid; i++) {
if (!trip) {
*temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.active[i].temperature,
- tz->kelvin_offset);
+ tz->trips.active[i].temperature,
+ tz->kelvin_offset);
return 0;
}
trip--;
@@ -620,15 +617,16 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
if (tz->trips.critical.flags.valid) {
*temperature = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
+ tz->trips.critical.temperature,
+ tz->kelvin_offset);
return 0;
- } else
- return -EINVAL;
+ }
+
+ return -EINVAL;
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
+ int trip, enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal->devdata;
enum thermal_trip_type type;
@@ -657,9 +655,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
* tz->temperature has already been updated by generic thermal layer,
* before this callback being invoked
*/
- i = (tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature))
- + (tz->trips.passive.tc2
- * (tz->temperature - tz->trips.passive.temperature));
+ i = tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature) +
+ tz->trips.passive.tc2 * (tz->temperature - tz->trips.passive.temperature);
if (i > 0)
*trend = THERMAL_TREND_RAISING;
@@ -667,6 +664,7 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
*trend = THERMAL_TREND_DROPPING;
else
*trend = THERMAL_TREND_STABLE;
+
return 0;
}
@@ -691,8 +689,8 @@ static void acpi_thermal_zone_device_critical(struct thermal_zone_device *therma
}
static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev,
- bool bind)
+ struct thermal_cooling_device *cdev,
+ bool bind)
{
struct acpi_device *device = cdev->devdata;
struct acpi_thermal *tz = thermal->devdata;
@@ -711,22 +709,23 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
if (tz->trips.passive.flags.valid) {
trip++;
- for (i = 0; i < tz->trips.passive.devices.count;
- i++) {
+ for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result =
- thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
result =
- thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -735,22 +734,24 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
trip++;
- for (j = 0;
- j < tz->trips.active[i].devices.count;
- j++) {
+ for (j = 0; j < tz->trips.active[i].devices.count; j++) {
handle = tz->trips.active[i].devices.handles[j];
dev = acpi_fetch_acpi_dev(handle);
if (dev != device)
continue;
+
if (bind)
- result = thermal_zone_bind_cooling_device
- (thermal, trip, cdev,
- THERMAL_NO_LIMIT, THERMAL_NO_LIMIT,
- THERMAL_WEIGHT_DEFAULT);
+ result = thermal_zone_bind_cooling_device(
+ thermal, trip, cdev,
+ THERMAL_NO_LIMIT,
+ THERMAL_NO_LIMIT,
+ THERMAL_WEIGHT_DEFAULT);
else
- result = thermal_zone_unbind_cooling_device
- (thermal, trip, cdev);
+ result = thermal_zone_unbind_cooling_device(
+ thermal, trip, cdev);
+
if (result)
goto failed;
}
@@ -762,14 +763,14 @@ failed:
static int
acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, true);
}
static int
acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
+ struct thermal_cooling_device *cdev)
{
return acpi_thermal_cooling_device_cb(thermal, cdev, false);
}
@@ -802,20 +803,20 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (tz->trips.passive.flags.valid)
trips++;
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].flags.valid; i++, trips++);
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].flags.valid;
+ i++, trips++);
if (tz->trips.passive.flags.valid)
- tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- tz->trips.passive.tsp*100,
- tz->polling_frequency*100);
+ tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz,
+ &acpi_thermal_zone_ops, NULL,
+ tz->trips.passive.tsp * 100,
+ tz->polling_frequency * 100);
else
tz->thermal_zone =
thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- 0, tz->polling_frequency*100);
+ &acpi_thermal_zone_ops, NULL,
+ 0, tz->polling_frequency * 100);
+
if (IS_ERR(tz->thermal_zone))
return -ENODEV;
@@ -881,7 +882,6 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
{
struct acpi_thermal *tz = acpi_driver_data(device);
-
if (!tz)
return;
@@ -893,13 +893,13 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
acpi_queue_thermal_check(tz);
acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ dev_name(&device->dev), event, 0);
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
@@ -942,8 +942,7 @@ static void acpi_thermal_aml_dependency_fix(struct acpi_thermal *tz)
static int acpi_thermal_get_info(struct acpi_thermal *tz)
{
- int result = 0;
-
+ int result;
if (!tz)
return -EINVAL;
@@ -1020,9 +1019,8 @@ static void acpi_thermal_check_fn(struct work_struct *work)
static int acpi_thermal_add(struct acpi_device *device)
{
- int result = 0;
- struct acpi_thermal *tz = NULL;
-
+ struct acpi_thermal *tz;
+ int result;
if (!device)
return -EINVAL;
@@ -1063,7 +1061,7 @@ end:
static int acpi_thermal_remove(struct acpi_device *device)
{
- struct acpi_thermal *tz = NULL;
+ struct acpi_thermal *tz;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1099,6 +1097,7 @@ static int acpi_thermal_resume(struct device *dev)
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!tz->trips.active[i].flags.valid)
break;
+
tz->trips.active[i].flags.enabled = 1;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
result = acpi_bus_update_power(
@@ -1119,7 +1118,6 @@ static int acpi_thermal_resume(struct device *dev)
#endif
static int thermal_act(const struct dmi_system_id *d) {
-
if (act == 0) {
pr_notice("%s detected: disabling all active thermal trip points\n",
d->ident);
@@ -1128,14 +1126,12 @@ static int thermal_act(const struct dmi_system_id *d) {
return 0;
}
static int thermal_nocrt(const struct dmi_system_id *d) {
-
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
d->ident);
nocrt = 1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
-
if (tzp == 0) {
pr_notice("%s detected: enabling thermal zone polling\n",
d->ident);
@@ -1144,7 +1140,6 @@ static int thermal_tzp(const struct dmi_system_id *d) {
return 0;
}
static int thermal_psv(const struct dmi_system_id *d) {
-
if (psv == 0) {
pr_notice("%s detected: disabling all passive thermal trip points\n",
d->ident);
@@ -1195,7 +1190,7 @@ static const struct dmi_system_id thermal_dmi_table[] __initconst = {
static int __init acpi_thermal_init(void)
{
- int result = 0;
+ int result;
dmi_check_system(thermal_dmi_table);
@@ -1222,8 +1217,6 @@ static void __exit acpi_thermal_exit(void)
{
acpi_bus_unregister_driver(&acpi_thermal_driver);
destroy_workqueue(acpi_thermal_pm_queue);
-
- return;
}
module_init(acpi_thermal_init);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 5a7b8065e77f..2ea14648a661 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -794,6 +794,30 @@ bool acpi_dev_hid_uid_match(struct acpi_device *adev,
EXPORT_SYMBOL(acpi_dev_hid_uid_match);
/**
+ * acpi_dev_uid_to_integer - treat ACPI device _UID as integer
+ * @adev: ACPI device to get _UID from
+ * @integer: output buffer for integer
+ *
+ * Considers _UID as integer and converts it to @integer.
+ *
+ * Returns 0 on success, or negative error code otherwise.
+ */
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
+{
+ const char *uid;
+
+ if (!adev)
+ return -ENODEV;
+
+ uid = acpi_device_uid(adev);
+ if (!uid)
+ return -ENODATA;
+
+ return kstrtou64(uid, 0, integer);
+}
+EXPORT_SYMBOL(acpi_dev_uid_to_integer);
+
+/**
* acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
@@ -878,7 +902,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_dev_match_info match = {};
struct device *dev;
- strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
@@ -911,7 +935,7 @@ acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const cha
struct acpi_dev_match_info match = {};
struct device *dev;
- strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ strscpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
@@ -961,7 +985,7 @@ EXPORT_SYMBOL(acpi_video_backlight_string);
static int __init acpi_backlight(char *str)
{
- strlcpy(acpi_video_backlight_string, str,
+ strscpy(acpi_video_backlight_string, str,
sizeof(acpi_video_backlight_string));
return 1;
}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 5d7f38016a24..0d9064a9804c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -17,8 +17,9 @@
* Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop,
* sony_acpi,... can take care about backlight brightness.
*
- * Backlight drivers can use acpi_video_get_backlight_type() to determine
- * which driver should handle the backlight.
+ * Backlight drivers can use acpi_video_get_backlight_type() to determine which
+ * driver should handle the backlight. RAW/GPU-driver backlight drivers must
+ * use the acpi_video_backlight_use_native() helper for this.
*
* If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m)
* this file will not be compiled and acpi_video_get_backlight_type() will
@@ -27,20 +28,16 @@
#include <linux/export.h>
#include <linux/acpi.h>
+#include <linux/apple-gmux.h>
#include <linux/backlight.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
-void acpi_video_unregister_backlight(void);
-
-static bool backlight_notifier_registered;
-static struct notifier_block backlight_nb;
-static struct work_struct backlight_notify_work;
-
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -78,6 +75,36 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
+/* This depends on ACPI_WMI which is X86 only */
+#ifdef CONFIG_X86
+static bool nvidia_wmi_ec_supported(void)
+{
+ struct wmi_brightness_args args = {
+ .mode = WMI_BRIGHTNESS_MODE_GET,
+ .val = 0,
+ .ret = 0,
+ };
+ struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMI_BRIGHTNESS_GUID, 0,
+ WMI_BRIGHTNESS_METHOD_SOURCE, &buf, &buf);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ /*
+ * If brightness is handled by the EC then nvidia-wmi-ec-backlight
+ * should be used, else the GPU driver(s) should be used.
+ */
+ return args.ret == WMI_BRIGHTNESS_SOURCE_EC;
+}
+#else
+static bool nvidia_wmi_ec_supported(void)
+{
+ return false;
+}
+#endif
+
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -105,63 +132,143 @@ static int video_detect_force_none(const struct dmi_system_id *d)
}
static const struct dmi_system_id video_detect_dmi_table[] = {
- /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
- * ACPI backlight device is used. This flag will definitively break
- * the backlight interface (even the vendor interface) until next
- * reboot. It's why we should prevent video.ko from being used here
- * and we can't rely on a later call to acpi_video_unregister().
- */
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */
.callback = video_detect_force_vendor,
- /* X360 */
+ /* Acer KAV80 */
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Asus UL30VT */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus UL30VT */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Asus UL30A */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus UL30A */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
{
- .callback = video_detect_force_vendor,
- /* GIGABYTE GB-BXBT-2807 */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Asus X55U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X101CH */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X401U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus X501U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Asus 1015CX */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* GIGABYTE GB-BXBT-2807 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Sony VPCEH3U1E */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Samsung N150/N210/N220 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Samsung NF110/NF210/NF310 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
+ DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Samsung NC210 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+ DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ /* Sony VPCEH3U1E */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
},
},
{
- .callback = video_detect_force_vendor,
- /* Xiaomi Mi Pad 2 */
- .matches = {
+ .callback = video_detect_force_vendor,
+ /* Xiaomi Mi Pad 2 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
/*
+ * Toshiba models with Transflective display, these need to use
+ * the toshiba_acpi vendor driver for proper Transflective handling.
+ */
+ {
+ .callback = video_detect_force_vendor,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R500"),
+ },
+ },
+ {
+ .callback = video_detect_force_vendor,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R600"),
+ },
+ },
+
+ /*
* These models have a working acpi_video backlight control, and using
* native backlight causes a regression where backlight does not work
* when userspace is not handling brightness key events. Disable
@@ -390,6 +497,41 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1012674 */
+ .callback = video_detect_force_native,
+ /* Acer Aspire 5741 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
+ },
+ },
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42993 */
+ .callback = video_detect_force_native,
+ /* Acer Aspire 5750 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ },
+ },
+ {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42833 */
+ .callback = video_detect_force_native,
+ /* Acer Extensa 5235 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* Acer TravelMate 4750 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
+ },
+ },
+ {
/* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */
.callback = video_detect_force_native,
/* Acer TravelMate 5735Z */
@@ -400,120 +542,109 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA401 */
- .matches = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=36322 */
+ .callback = video_detect_force_native,
+ /* Acer TravelMate 5760 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA401 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA401"),
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA502 */
- .matches = {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA502 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA502"),
},
},
{
- .callback = video_detect_force_native,
- /* ASUSTeK COMPUTER INC. GA503 */
- .matches = {
+ .callback = video_detect_force_native,
+ /* ASUSTeK COMPUTER INC. GA503 */
+ .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GA503"),
},
},
- /*
- * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a
- * working native and video interface. However the default detection
- * mechanism first registers the video interface before unregistering
- * it again and switching to the native interface during boot. This
- * results in a dangling SBIOS request for backlight change for some
- * reason, causing the backlight to switch to ~2% once per boot on the
- * first power cord connect or disconnect event. Setting the native
- * interface explicitly circumvents this buggy behaviour, by avoiding
- * the unregistering process.
- */
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+ .callback = video_detect_force_native,
+ /* Asus UX303UB */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
+ .callback = video_detect_force_native,
+ /* Samsung N150P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150P"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xRU",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
+ .callback = video_detect_force_native,
+ /* Samsung N145P/N250P/N260P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "Clevo NL5xNU",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
+ .callback = video_detect_force_native,
+ /* Samsung N250P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+ DMI_MATCH(DMI_BOARD_NAME, "N250P"),
},
},
+
/*
- * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10,
- * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo
- * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description
- * above.
+ * These Toshibas have a broken acpi-video interface for brightness
+ * control. They also have an issue where the panel is off after
+ * suspend until a special firmware call is made to turn it back
+ * on. This is handled by the toshiba_acpi kernel module, so that
+ * module must be enabled for these models to work correctly.
*/
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5PU1G",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF4NU1F",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF4NU1F",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"),
- },
- },
- {
- .callback = video_detect_force_native,
- .ident = "TongFang PF5NU1G",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"),
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_detect_force_native,
+ /* Toshiba Portégé R700 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5NU1G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"),
+ /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .callback = video_detect_force_native,
+ /* Toshiba Satellite/Portégé R830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
},
},
{
- .callback = video_detect_force_native,
- .ident = "TongFang PF5LUXG",
- .matches = {
- DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ .callback = video_detect_force_native,
+ /* Toshiba Satellite/Portégé Z830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
},
},
+
/*
* Desktops which falsely report a backlight and which our heuristics
* for this do not catch.
@@ -537,43 +668,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
{ },
};
-/* This uses a workqueue to avoid various locking ordering issues */
-static void acpi_video_backlight_notify_work(struct work_struct *work)
-{
- if (acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
-}
-
-static int acpi_video_backlight_notify(struct notifier_block *nb,
- unsigned long val, void *bd)
-{
- struct backlight_device *backlight = bd;
-
- /* A raw bl registering may change video -> native */
- if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED)
- schedule_work(&backlight_notify_work);
-
- return NOTIFY_OK;
-}
-
/*
* Determine which type of backlight interface to use on this system,
* First check cmdline, then dmi quirks, then do autodetect.
- *
- * The autodetect order is:
- * 1) Is the acpi-video backlight interface supported ->
- * no, use a vendor interface
- * 2) Is this a win8 "ready" BIOS and do we have a native interface ->
- * yes, use a native interface
- * 3) Else use the acpi-video interface
- *
- * Arguably the native on win8 check should be done first, but that would
- * be a behavior change, which may causes issues.
*/
-enum acpi_backlight_type acpi_video_get_backlight_type(void)
+static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
{
static DEFINE_MUTEX(init_mutex);
+ static bool nvidia_wmi_ec_present;
+ static bool native_available;
static bool init_done;
static long video_caps;
@@ -585,48 +688,60 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
- INIT_WORK(&backlight_notify_work,
- acpi_video_backlight_notify_work);
- backlight_nb.notifier_call = acpi_video_backlight_notify;
- backlight_nb.priority = 0;
- if (backlight_register_notifier(&backlight_nb) == 0)
- backlight_notifier_registered = true;
+ nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
init_done = true;
}
+ if (native)
+ native_available = true;
mutex_unlock(&init_mutex);
+ /*
+ * The below heuristics / detection steps are in order of descending
+ * presedence. The commandline takes presedence over anything else.
+ */
if (acpi_backlight_cmdline != acpi_backlight_undef)
return acpi_backlight_cmdline;
+ /* DMI quirks override any autodetection. */
if (acpi_backlight_dmi != acpi_backlight_undef)
return acpi_backlight_dmi;
- if (!(video_caps & ACPI_VIDEO_BACKLIGHT))
- return acpi_backlight_vendor;
+ /* Special cases such as nvidia_wmi_ec and apple gmux. */
+ if (nvidia_wmi_ec_present)
+ return acpi_backlight_nvidia_wmi_ec;
- if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW))
- return acpi_backlight_native;
+ if (apple_gmux_present())
+ return acpi_backlight_apple_gmux;
+
+ /* On systems with ACPI video use either native or ACPI video. */
+ if (video_caps & ACPI_VIDEO_BACKLIGHT) {
+ /*
+ * Windows 8 and newer no longer use the ACPI video interface,
+ * so it often does not work. If the ACPI tables are written
+ * for win8 and native brightness ctl is available, use that.
+ *
+ * The native check deliberately is inside the if acpi-video
+ * block on older devices without acpi-video support native
+ * is usually not the best choice.
+ */
+ if (acpi_osi_is_win8() && native_available)
+ return acpi_backlight_native;
+ else
+ return acpi_backlight_video;
+ }
- return acpi_backlight_video;
+ /* No ACPI video (old hw), use vendor specific fw methods. */
+ return acpi_backlight_vendor;
}
-EXPORT_SYMBOL(acpi_video_get_backlight_type);
-/*
- * Set the preferred backlight interface type based on DMI info.
- * This function allows DMI blacklists to be implemented by external
- * platform drivers instead of putting a big blacklist in video_detect.c
- */
-void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
- acpi_backlight_dmi = type;
- /* Remove acpi-video backlight interface if it is no longer desired */
- if (acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ return __acpi_video_get_backlight_type(false);
}
-EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type);
+EXPORT_SYMBOL(acpi_video_get_backlight_type);
-void __exit acpi_video_detect_exit(void)
+bool acpi_video_backlight_use_native(void)
{
- if (backlight_notifier_registered)
- backlight_unregister_notifier(&backlight_nb);
+ return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
}
+EXPORT_SYMBOL(acpi_video_backlight_use_native);
diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
index 6132092dab2a..ed752cbbe636 100644
--- a/drivers/acpi/viot.c
+++ b/drivers/acpi/viot.c
@@ -19,7 +19,6 @@
#define pr_fmt(fmt) "ACPI: VIOT: " fmt
#include <linux/acpi_viot.h>
-#include <linux/dma-iommu.h>
#include <linux/fwnode.h>
#include <linux/iommu.h>
#include <linux/list.h>
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index c285c91a5e9c..8812ecd03d55 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -8,6 +8,7 @@
#include <linux/bitmap.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/uuid.h>
+#include "../internal.h"
/* Apple _DSM device properties GUID */
static const guid_t apple_prp_guid =
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index f9ac12b778e6..5350c73564b6 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/suspend.h>
#include "../sleep.h"
@@ -27,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+static bool prefer_microsoft_dsm_guid __read_mostly;
+module_param(prefer_microsoft_dsm_guid, bool, 0644);
+MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
+
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -363,40 +368,132 @@ out:
return ret;
}
+struct amd_lps0_hid_device_data {
+ const unsigned int rev_id;
+ const bool check_off_by_one;
+ const bool prefer_amd_guid;
+};
+
+static const struct amd_lps0_hid_device_data amd_picasso = {
+ .rev_id = 0,
+ .check_off_by_one = true,
+ .prefer_amd_guid = false,
+};
+
+static const struct amd_lps0_hid_device_data amd_cezanne = {
+ .rev_id = 0,
+ .check_off_by_one = false,
+ .prefer_amd_guid = false,
+};
+
+static const struct amd_lps0_hid_device_data amd_rembrandt = {
+ .rev_id = 2,
+ .check_off_by_one = false,
+ .prefer_amd_guid = true,
+};
+
+static const struct acpi_device_id amd_hid_ids[] = {
+ {"AMD0004", (kernel_ulong_t)&amd_picasso, },
+ {"AMD0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0005", (kernel_ulong_t)&amd_picasso, },
+ {"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
+ {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
+ {}
+};
+
+static int lps0_prefer_microsoft(const struct dmi_system_id *id)
+{
+ pr_debug("Preferring Microsoft GUID.\n");
+ prefer_microsoft_dsm_guid = true;
+ return 0;
+}
+
+static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+ {
+ /*
+ * ASUS TUF Gaming A17 FA707RE
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216101
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
+ },
+ },
+ {
+ /* ASUS ROG Zephyrus G14 (2022) */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
+ },
+ },
+ {
+ /*
+ * Lenovo Yoga Slim 7 Pro X 14ARH7
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82"),
+ },
+ },
+ {
+ /*
+ * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
+ },
+ },
+ {
+ /*
+ * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ */
+ .callback = lps0_prefer_microsoft,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
+ },
+ },
+ {}
+};
+
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
if (lps0_device_handle)
return 0;
+ lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
+ ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
+ &lps0_dsm_guid_microsoft);
if (acpi_s2idle_vendor_amd()) {
- /* AMD0004, AMD0005, AMDI0005:
- * - Should use rev_id 0x0
- * - function mask > 0x3: Should use AMD method, but has off by one bug
- * - function mask = 0x3: Should use Microsoft method
- * AMDI0006:
- * - should use rev_id 0x0
- * - function mask = 0x3: Should use Microsoft method
- * AMDI0007:
- * - Should use rev_id 0x2
- * - Should only use AMD method
- */
- const char *hid = acpi_device_hid(adev);
- rev_id = strcmp(hid, "AMDI0007") ? 0 : 2;
+ static const struct acpi_device_id *dev_id;
+ const struct amd_lps0_hid_device_data *data;
+
+ for (dev_id = &amd_hid_ids[0]; dev_id->id[0]; dev_id++)
+ if (acpi_dev_hid_uid_match(adev, dev_id->id, NULL))
+ break;
+ if (dev_id->id[0])
+ data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
+ else
+ data = &amd_rembrandt;
+ rev_id = data->rev_id;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
- ACPI_LPS0_DSM_UUID_MICROSOFT, 0,
- &lps0_dsm_guid_microsoft);
- if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
- !strcmp(hid, "AMD0005") ||
- !strcmp(hid, "AMDI0005"))) {
+ if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 &&
- (!strcmp(hid, "AMDI0007") ||
- !strcmp(hid, "AMDI0008"))) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
+ !prefer_microsoft_dsm_guid) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -404,7 +501,8 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ if (!prefer_microsoft_dsm_guid)
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
@@ -486,6 +584,19 @@ int acpi_s2idle_prepare_late(void)
return 0;
}
+void acpi_s2idle_check(void)
+{
+ struct acpi_s2idle_dev_ops *handler;
+
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
+ if (handler->check)
+ handler->check();
+ }
+}
+
void acpi_s2idle_restore_early(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -527,26 +638,30 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
.prepare_late = acpi_s2idle_prepare_late,
+ .check = acpi_s2idle_check,
.wake = acpi_s2idle_wake,
.restore_early = acpi_s2idle_restore_early,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
-void acpi_s2idle_setup(void)
+void __init acpi_s2idle_setup(void)
{
+ dmi_check_system(s2idle_dmi_table);
acpi_scan_add_handler(&lps0_handler);
s2idle_set_ops(&acpi_s2idle_ops_lps0);
}
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return -ENODEV;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_add(&arg->list_node, &lps0_s2idle_devops_head);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
return 0;
}
@@ -554,12 +669,14 @@ EXPORT_SYMBOL_GPL(acpi_register_lps0_dev);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
+ unsigned int sleep_flags;
+
if (!lps0_device_handle || sleep_no_lps0)
return;
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
list_del(&arg->list_node);
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL_GPL(acpi_unregister_lps0_dev);
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 664070fc8349..f8a2cbdc0ce2 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -207,9 +207,26 @@ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
{}
};
+static const struct dmi_system_id force_storage_d3_dmi[] = {
+ {
+ /*
+ * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
+ * but .NVME is needed to get StorageD3Enable node
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
+ }
+ },
+ {}
+};
+
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
+
+ return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
}
/*
@@ -351,11 +368,17 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
struct acpi_device *adev = ACPI_COMPANION(controller_parent);
const struct dmi_system_id *dmi_id;
long quirks = 0;
+ u64 uid;
+ int ret;
*skip = false;
- /* !dev_is_platform() to not match on PNP enumerated debug UARTs */
- if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent))
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret)
+ return 0;
+
+ /* to not match on PNP enumerated debug UARTs */
+ if (!dev_is_platform(controller_parent))
return 0;
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
@@ -363,10 +386,10 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
quirks = (unsigned long)dmi_id->driver_data;
if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
- if (!strcmp(adev->pnp.unique_id, "1"))
+ if (uid == 1)
return -ENODEV; /* Create tty cdev instead of serdev */
- if (!strcmp(adev->pnp.unique_id, "2"))
+ if (uid == 2)
*skip = true;
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 32b0e0b930c1..110a535648d2 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -209,6 +209,7 @@ static int amba_match(struct device *dev, struct device_driver *drv)
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
@@ -218,11 +219,14 @@ static int amba_match(struct device *dev, struct device_driver *drv)
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
- if (ret)
+ if (ret) {
+ mutex_unlock(&pcdev->periphid_lock);
return -EPROBE_DEFER;
+ }
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
+ mutex_unlock(&pcdev->periphid_lock);
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
@@ -532,6 +536,7 @@ static void amba_device_release(struct device *dev)
if (d->res.parent)
release_resource(&d->res);
+ mutex_destroy(&d->periphid_lock);
kfree(d);
}
@@ -584,6 +589,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
+ mutex_init(&dev->periphid_lock);
}
/**
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c964d7c8c384..880224ec6abb 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
@@ -4247,10 +4259,9 @@ static int binder_wait_for_work(struct binder_thread *thread,
struct binder_proc *proc = thread->proc;
int ret = 0;
- freezer_do_not_count();
binder_inner_proc_lock(proc);
for (;;) {
- prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
if (binder_has_work_ilocked(thread, do_proc_work))
break;
if (do_proc_work)
@@ -4267,7 +4278,6 @@ static int binder_wait_for_work(struct binder_thread *thread,
}
finish_wait(&thread->wait, &wait);
binder_inner_proc_unlock(proc);
- freezer_count();
return ret;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 51f4e1c5cd01..1c39cfce32fa 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
- mm = alloc->vma_vm_mm;
+ if (need_mm && mmget_not_zero(alloc->mm))
+ mm = alloc->mm;
if (mm) {
mmap_read_lock(mm);
@@ -309,35 +309,13 @@ err_no_vma:
return vma ? -ENOMEM : -ESRCH;
}
-
-static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
-{
- unsigned long vm_start = 0;
-
- /*
- * Allow clearing the vma with holding just the read lock to allow
- * munmapping downgrade of the write lock before freeing and closing the
- * file using binder_alloc_vma_close().
- */
- if (vma) {
- vm_start = vma->vm_start;
- alloc->vma_vm_mm = vma->vm_mm;
- mmap_assert_write_locked(alloc->vma_vm_mm);
- } else {
- mmap_assert_locked(alloc->vma_vm_mm);
- }
-
- alloc->vma_addr = vm_start;
-}
-
static inline struct vm_area_struct *binder_alloc_get_vma(
struct binder_alloc *alloc)
{
struct vm_area_struct *vma = NULL;
if (alloc->vma_addr)
- vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+ vma = vma_lookup(alloc->mm, alloc->vma_addr);
return vma;
}
@@ -402,15 +380,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
- mmap_read_lock(alloc->vma_vm_mm);
+ mmap_read_lock(alloc->mm);
if (!binder_alloc_get_vma(alloc)) {
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
@@ -794,8 +772,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
+ alloc->vma_addr = vma->vm_start;
return 0;
@@ -825,7 +802,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0;
mutex_lock(&alloc->mutex);
BUG_ON(alloc->vma_addr &&
- vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
+ vma_lookup(alloc->mm, alloc->vma_addr));
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -875,8 +852,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
kfree(alloc->pages);
}
mutex_unlock(&alloc->mutex);
- if (alloc->vma_vm_mm)
- mmdrop(alloc->vma_vm_mm);
+ if (alloc->mm)
+ mmdrop(alloc->mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -933,13 +910,13 @@ void binder_alloc_print_pages(struct seq_file *m,
* read inconsistent state.
*/
- mmap_read_lock(alloc->vma_vm_mm);
+ mmap_read_lock(alloc->mm);
if (binder_alloc_get_vma(alloc) == NULL) {
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
goto uninitialized;
}
- mmap_read_unlock(alloc->vma_vm_mm);
+ mmap_read_unlock(alloc->mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
@@ -985,7 +962,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- binder_alloc_set_vma(alloc, NULL);
+ alloc->vma_addr = 0;
}
/**
@@ -1022,7 +999,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- mm = alloc->vma_vm_mm;
+ mm = alloc->mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!mmap_read_trylock(mm))
@@ -1091,6 +1068,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
+ alloc->mm = current->mm;
+ mmgrab(alloc->mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 1e4fd37af5e0..0f811ac4bcff 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -74,11 +74,10 @@ struct binder_lru_page {
/**
* struct binder_alloc - per-binder proc state for binder allocator
- * @vma: vm_area_struct passed to mmap_handler
- * (invarient after mmap)
- * @tsk: tid for task that called init for this proc
- * (invariant after init)
- * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @mutex: protects binder_alloc fields
+ * @vma_addr: vm_area_struct->vm_start passed to mmap_handler
+ * (invariant after mmap)
+ * @mm: copy of task->mm (invariant after open)
* @buffer: base of per-proc address space mapped via mmap
* @buffers: list of all buffers for this proc
* @free_buffers: rb tree of buffers available for allocation
@@ -101,7 +100,7 @@ struct binder_lru_page {
struct binder_alloc {
struct mutex mutex;
unsigned long vma_addr;
- struct mm_struct *vma_vm_mm;
+ struct mm_struct *mm;
void __user *buffer;
struct list_head buffers;
struct rb_root free_buffers;
@@ -109,7 +108,6 @@ struct binder_alloc {
size_t free_async_space;
struct binder_lru_page *pages;
size_t buffer_size;
- uint32_t buffer_free;
int pid;
size_t pages_high;
bool oneway_spam_detected;
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 588d753a7a19..09b2ce7e4c34 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -39,7 +39,6 @@
#define FIRST_INODE 1
#define SECOND_INODE 2
#define INODE_OFFSET 3
-#define INTSTRLEN 21
#define BINDERFS_MAX_MINOR (1U << MINORBITS)
/* Ensure that the initial ipc namespace always has devices available. */
#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
@@ -340,22 +339,10 @@ static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-static void binderfs_put_super(struct super_block *sb)
-{
- struct binderfs_info *info = sb->s_fs_info;
-
- if (info && info->ipc_ns)
- put_ipc_ns(info->ipc_ns);
-
- kfree(info);
- sb->s_fs_info = NULL;
-}
-
static const struct super_operations binderfs_super_ops = {
.evict_inode = binderfs_evict_inode,
.show_options = binderfs_show_options,
.statfs = simple_statfs,
- .put_super = binderfs_put_super,
};
static inline bool is_binderfs_control_device(const struct dentry *dentry)
@@ -785,11 +772,27 @@ static int binderfs_init_fs_context(struct fs_context *fc)
return 0;
}
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
static struct file_system_type binder_fs_type = {
.name = "binder",
.init_fs_context = binderfs_init_fs_context,
.parameters = binderfs_fs_parameters,
- .kill_sb = kill_litter_super,
+ .kill_sb = binderfs_kill_super,
.fs_flags = FS_USERNS_MOUNT,
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1c9f4fb2595d..36833a862998 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -176,9 +176,19 @@ config AHCI_DM816
If unsure, say N.
+config AHCI_DWC
+ tristate "Synopsys DWC AHCI SATA support"
+ select SATA_HOST
+ select MFD_SYSCON if (MIPS_BAIKAL_T1 || COMPILE_TEST)
+ help
+ This option enables support for the Synopsys DWC AHCI SATA
+ controller implementation.
+
+ If unsure, say N.
+
config AHCI_ST
tristate "ST AHCI SATA support"
- depends on ARCH_STI
+ depends on ARCH_STI || COMPILE_TEST
select SATA_HOST
help
This option enables support for ST AHCI SATA controller.
@@ -1102,8 +1112,7 @@ config PATA_PCMCIA
If unsure, say N.
config PATA_PLATFORM
- tristate "Generic platform device PATA support"
- depends on EXPERT || PPC || HAVE_PATA_PLATFORM
+ tristate "Generic platform device PATA support" if HAVE_PATA_PLATFORM
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems.
@@ -1112,7 +1121,8 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && OF
+ depends on OF
+ select PATA_PLATFORM
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index b8aebfb14e82..34623365d9a6 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AHCI_BRCM) += ahci_brcm.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DM816) += ahci_dm816.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_DWC) += ahci_dwc.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MTK) += ahci_mtk.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c1eca72b4575..639de2d75d63 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -657,7 +657,7 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
{
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
- hpriv->force_port_map = 1;
+ hpriv->saved_port_map = 1;
}
/*
@@ -690,7 +690,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
mv = 2;
else
mv = 4;
- port_mmio = __ahci_port_base(host, mv);
+ port_mmio = __ahci_port_base(hpriv, mv);
writel(0, port_mmio + PORT_IRQ_MASK);
@@ -1609,15 +1609,12 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap,
goto update_policy;
}
-#ifdef CONFIG_ACPI
- if (policy > ATA_LPM_MED_POWER &&
- (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ if (policy > ATA_LPM_MED_POWER && pm_suspend_default_s2idle()) {
if (hpriv->cap & HOST_CAP_PART)
policy = ATA_LPM_MIN_POWER_WITH_PARTIAL;
else if (hpriv->cap & HOST_CAP_SSC)
policy = ATA_LPM_MIN_POWER;
}
-#endif
update_policy:
if (policy >= ATA_LPM_UNKNOWN && policy <= ATA_LPM_MIN_POWER)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ad11a4c52fbe..da7ee8bec165 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -38,7 +38,6 @@
enum {
AHCI_MAX_PORTS = 32,
- AHCI_MAX_CLKS = 5,
AHCI_MAX_SG = 168, /* hardware max is 64K */
AHCI_DMA_BOUNDARY = 0xffffffff,
AHCI_MAX_CMDS = 32,
@@ -139,7 +138,7 @@ enum {
PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
- PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
+ PORT_IRQ_DMPS = (1 << 7), /* mechanical presence status */
PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
@@ -167,6 +166,8 @@ enum {
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
PORT_CMD_ESP = (1 << 21), /* External Sata Port */
+ PORT_CMD_CPD = (1 << 20), /* Cold Presence Detection */
+ PORT_CMD_MPSP = (1 << 19), /* Mechanical Presence Switch */
PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
@@ -182,6 +183,10 @@ enum {
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+ /* PORT_CMD capabilities mask */
+ PORT_CMD_CAP = PORT_CMD_HPCP | PORT_CMD_MPSP |
+ PORT_CMD_CPD | PORT_CMD_ESP | PORT_CMD_FBSCP,
+
/* PORT_FBS bits */
PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
@@ -323,7 +328,6 @@ struct ahci_port_priv {
struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
- u32 force_port_map; /* force port map */
u32 mask_port_map; /* mask out particular bits */
void __iomem * mmio; /* bus-independent mem map */
@@ -334,12 +338,15 @@ struct ahci_host_priv {
u32 saved_cap; /* saved initial cap */
u32 saved_cap2; /* saved initial cap2 */
u32 saved_port_map; /* saved initial port_map */
+ u32 saved_port_cap[AHCI_MAX_PORTS]; /* saved port_cap */
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
u32 remapped_nvme; /* NVMe remapped device count */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
- struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
+ unsigned int n_clks;
+ struct clk_bulk_data *clks; /* Optional */
+ unsigned int f_rsts;
struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
struct regulator *ahci_regulator;/* Optional */
@@ -426,10 +433,9 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap);
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
-static inline void __iomem *__ahci_port_base(struct ata_host *host,
+static inline void __iomem *__ahci_port_base(struct ahci_host_priv *hpriv,
unsigned int port_no)
{
- struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
return mmio + 0x100 + (port_no * 0x80);
@@ -437,7 +443,9 @@ static inline void __iomem *__ahci_port_base(struct ata_host *host,
static inline void __iomem *ahci_port_base(struct ata_port *ap)
{
- return __ahci_port_base(ap->host, ap->port_no);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
+ return __ahci_port_base(hpriv, ap->port_no);
}
static inline int ahci_nr_ports(u32 cap)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 052c28e250aa..dc8a019b8340 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -163,7 +163,6 @@ static int ahci_da850_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
void __iomem *pwrdn_reg;
struct resource *res;
- struct clk *clk;
u32 mpy;
int rc;
@@ -172,36 +171,28 @@ static int ahci_da850_probe(struct platform_device *pdev)
return PTR_ERR(hpriv);
/*
- * Internally ahci_platform_get_resources() calls clk_get(dev, NULL)
- * when trying to obtain the functional clock. This SATA controller
- * uses two clocks for which we specify two connection ids. If we don't
- * have the functional clock at this point - call clk_get() again with
- * con_id = "fck".
+ * Internally ahci_platform_get_resources() calls the bulk clocks
+ * get method or falls back to using a single clk_get_optional().
+ * This AHCI SATA controller uses two clocks: functional clock
+ * with "fck" connection id and external reference clock with
+ * "refclk" id. If we haven't got all of them re-try the clocks
+ * getting procedure with the explicitly specified ids.
*/
- if (!hpriv->clks[0]) {
- clk = clk_get(dev, "fck");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- hpriv->clks[0] = clk;
- }
-
- /*
- * The second clock used by ahci-da850 is the external REFCLK. If we
- * didn't get it from ahci_platform_get_resources(), let's try to
- * specify the con_id in clk_get().
- */
- if (!hpriv->clks[1]) {
- clk = clk_get(dev, "refclk");
- if (IS_ERR(clk)) {
- dev_err(dev, "unable to obtain the reference clock");
- return -ENODEV;
- }
-
- hpriv->clks[1] = clk;
+ if (hpriv->n_clks < 2) {
+ hpriv->clks = devm_kcalloc(dev, 2, sizeof(*hpriv->clks), GFP_KERNEL);
+ if (!hpriv->clks)
+ return -ENOMEM;
+
+ hpriv->clks[0].id = "fck";
+ hpriv->clks[1].id = "refclk";
+ hpriv->n_clks = 2;
+
+ rc = devm_clk_bulk_get(dev, hpriv->n_clks, hpriv->clks);
+ if (rc)
+ return rc;
}
- mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1]));
+ mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1].clk));
if (mpy == 0) {
dev_err(dev, "invalid REFCLK multiplier value: 0x%x", mpy);
return -EINVAL;
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index 8a92112dcd59..d26efcd20f64 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -69,12 +69,12 @@ static int ahci_dm816_phy_init(struct ahci_host_priv *hpriv, struct device *dev)
* keep-alive clock and the external reference clock. We need the
* rate of the latter to calculate the correct value of MPY bits.
*/
- if (!hpriv->clks[1]) {
+ if (hpriv->n_clks < 2) {
dev_err(dev, "reference clock not supplied\n");
return -EINVAL;
}
- refclk_rate = clk_get_rate(hpriv->clks[1]);
+ refclk_rate = clk_get_rate(hpriv->clks[1].clk);
if ((refclk_rate % 100) != 0) {
dev_err(dev, "reference clock rate must be divisible by 100\n");
return -EINVAL;
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
new file mode 100644
index 000000000000..8fb66860db31
--- /dev/null
+++ b/drivers/ata/ahci_dwc.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DWC AHCI SATA Platform driver
+ *
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+#include "ahci.h"
+
+#define DRV_NAME "ahci-dwc"
+
+#define AHCI_DWC_FBS_PMPN_MAX 15
+
+/* DWC AHCI SATA controller specific registers */
+#define AHCI_DWC_HOST_OOBR 0xbc
+#define AHCI_DWC_HOST_OOB_WE BIT(31)
+#define AHCI_DWC_HOST_CWMIN_MASK GENMASK(30, 24)
+#define AHCI_DWC_HOST_CWMAX_MASK GENMASK(23, 16)
+#define AHCI_DWC_HOST_CIMIN_MASK GENMASK(15, 8)
+#define AHCI_DWC_HOST_CIMAX_MASK GENMASK(7, 0)
+
+#define AHCI_DWC_HOST_GPCR 0xd0
+#define AHCI_DWC_HOST_GPSR 0xd4
+
+#define AHCI_DWC_HOST_TIMER1MS 0xe0
+#define AHCI_DWC_HOST_TIMV_MASK GENMASK(19, 0)
+
+#define AHCI_DWC_HOST_GPARAM1R 0xe8
+#define AHCI_DWC_HOST_ALIGN_M BIT(31)
+#define AHCI_DWC_HOST_RX_BUFFER BIT(30)
+#define AHCI_DWC_HOST_PHY_DATA_MASK GENMASK(29, 28)
+#define AHCI_DWC_HOST_PHY_RST BIT(27)
+#define AHCI_DWC_HOST_PHY_CTRL_MASK GENMASK(26, 21)
+#define AHCI_DWC_HOST_PHY_STAT_MASK GENMASK(20, 15)
+#define AHCI_DWC_HOST_LATCH_M BIT(14)
+#define AHCI_DWC_HOST_PHY_TYPE_MASK GENMASK(13, 11)
+#define AHCI_DWC_HOST_RET_ERR BIT(10)
+#define AHCI_DWC_HOST_AHB_ENDIAN_MASK GENMASK(9, 8)
+#define AHCI_DWC_HOST_S_HADDR BIT(7)
+#define AHCI_DWC_HOST_M_HADDR BIT(6)
+#define AHCI_DWC_HOST_S_HDATA_MASK GENMASK(5, 3)
+#define AHCI_DWC_HOST_M_HDATA_MASK GENMASK(2, 0)
+
+#define AHCI_DWC_HOST_GPARAM2R 0xec
+#define AHCI_DWC_HOST_FBS_MEM_S BIT(19)
+#define AHCI_DWC_HOST_FBS_PMPN_MASK GENMASK(17, 16)
+#define AHCI_DWC_HOST_FBS_SUP BIT(15)
+#define AHCI_DWC_HOST_DEV_CP BIT(14)
+#define AHCI_DWC_HOST_DEV_MP BIT(13)
+#define AHCI_DWC_HOST_ENCODE_M BIT(12)
+#define AHCI_DWC_HOST_RXOOB_CLK_M BIT(11)
+#define AHCI_DWC_HOST_RXOOB_M BIT(10)
+#define AHCI_DWC_HOST_TXOOB_M BIT(9)
+#define AHCI_DWC_HOST_RXOOB_M BIT(10)
+#define AHCI_DWC_HOST_RXOOB_CLK_MASK GENMASK(8, 0)
+
+#define AHCI_DWC_HOST_PPARAMR 0xf0
+#define AHCI_DWC_HOST_TX_MEM_M BIT(11)
+#define AHCI_DWC_HOST_TX_MEM_S BIT(10)
+#define AHCI_DWC_HOST_RX_MEM_M BIT(9)
+#define AHCI_DWC_HOST_RX_MEM_S BIT(8)
+#define AHCI_DWC_HOST_TXFIFO_DEPTH GENMASK(7, 4)
+#define AHCI_DWC_HOST_RXFIFO_DEPTH GENMASK(3, 0)
+
+#define AHCI_DWC_HOST_TESTR 0xf4
+#define AHCI_DWC_HOST_PSEL_MASK GENMASK(18, 16)
+#define AHCI_DWC_HOST_TEST_IF BIT(0)
+
+#define AHCI_DWC_HOST_VERSIONR 0xf8
+#define AHCI_DWC_HOST_IDR 0xfc
+
+#define AHCI_DWC_PORT_DMACR 0x70
+#define AHCI_DWC_PORT_RXABL_MASK GENMASK(15, 12)
+#define AHCI_DWC_PORT_TXABL_MASK GENMASK(11, 8)
+#define AHCI_DWC_PORT_RXTS_MASK GENMASK(7, 4)
+#define AHCI_DWC_PORT_TXTS_MASK GENMASK(3, 0)
+#define AHCI_DWC_PORT_PHYCR 0x74
+#define AHCI_DWC_PORT_PHYSR 0x78
+
+/* Baikal-T1 AHCI SATA specific registers */
+#define AHCI_BT1_HOST_PHYCR AHCI_DWC_HOST_GPCR
+#define AHCI_BT1_HOST_MPLM_MASK GENMASK(29, 23)
+#define AHCI_BT1_HOST_LOSDT_MASK GENMASK(22, 20)
+#define AHCI_BT1_HOST_CRR BIT(19)
+#define AHCI_BT1_HOST_CRW BIT(18)
+#define AHCI_BT1_HOST_CRCD BIT(17)
+#define AHCI_BT1_HOST_CRCA BIT(16)
+#define AHCI_BT1_HOST_CRDI_MASK GENMASK(15, 0)
+
+#define AHCI_BT1_HOST_PHYSR AHCI_DWC_HOST_GPSR
+#define AHCI_BT1_HOST_CRA BIT(16)
+#define AHCI_BT1_HOST_CRDO_MASK GENMASK(15, 0)
+
+struct ahci_dwc_plat_data {
+ unsigned int pflags;
+ unsigned int hflags;
+ int (*init)(struct ahci_host_priv *hpriv);
+ int (*reinit)(struct ahci_host_priv *hpriv);
+ void (*clear)(struct ahci_host_priv *hpriv);
+};
+
+struct ahci_dwc_host_priv {
+ const struct ahci_dwc_plat_data *pdata;
+ struct platform_device *pdev;
+
+ u32 timv;
+ u32 dmacr[AHCI_MAX_PORTS];
+};
+
+static int ahci_bt1_init(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ int ret;
+
+ /* APB, application and reference clocks are required */
+ if (!ahci_platform_find_clk(hpriv, "pclk") ||
+ !ahci_platform_find_clk(hpriv, "aclk") ||
+ !ahci_platform_find_clk(hpriv, "ref")) {
+ dev_err(&dpriv->pdev->dev, "No system clocks specified\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Fully reset the SATA AXI and ref clocks domain to ensure the state
+ * machine is working from scratch especially if the reference clocks
+ * source has been changed.
+ */
+ ret = ahci_platform_assert_rsts(hpriv);
+ if (ret) {
+ dev_err(&dpriv->pdev->dev, "Couldn't assert the resets\n");
+ return ret;
+ }
+
+ ret = ahci_platform_deassert_rsts(hpriv);
+ if (ret) {
+ dev_err(&dpriv->pdev->dev, "Couldn't de-assert the resets\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct ahci_host_priv *ahci_dwc_get_resources(struct platform_device *pdev)
+{
+ struct ahci_dwc_host_priv *dpriv;
+ struct ahci_host_priv *hpriv;
+
+ dpriv = devm_kzalloc(&pdev->dev, sizeof(*dpriv), GFP_KERNEL);
+ if (!dpriv)
+ return ERR_PTR(-ENOMEM);
+
+ dpriv->pdev = pdev;
+ dpriv->pdata = device_get_match_data(&pdev->dev);
+ if (!dpriv->pdata)
+ return ERR_PTR(-EINVAL);
+
+ hpriv = ahci_platform_get_resources(pdev, dpriv->pdata->pflags);
+ if (IS_ERR(hpriv))
+ return hpriv;
+
+ hpriv->flags |= dpriv->pdata->hflags;
+ hpriv->plat_data = (void *)dpriv;
+
+ return hpriv;
+}
+
+static void ahci_dwc_check_cap(struct ahci_host_priv *hpriv)
+{
+ unsigned long port_map = hpriv->saved_port_map | hpriv->mask_port_map;
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ bool dev_mp, dev_cp, fbs_sup;
+ unsigned int fbs_pmp;
+ u32 param;
+ int i;
+
+ param = readl(hpriv->mmio + AHCI_DWC_HOST_GPARAM2R);
+ dev_mp = !!(param & AHCI_DWC_HOST_DEV_MP);
+ dev_cp = !!(param & AHCI_DWC_HOST_DEV_CP);
+ fbs_sup = !!(param & AHCI_DWC_HOST_FBS_SUP);
+ fbs_pmp = 5 * FIELD_GET(AHCI_DWC_HOST_FBS_PMPN_MASK, param);
+
+ if (!dev_mp && hpriv->saved_cap & HOST_CAP_MPS) {
+ dev_warn(&dpriv->pdev->dev, "MPS is unsupported\n");
+ hpriv->saved_cap &= ~HOST_CAP_MPS;
+ }
+
+
+ if (fbs_sup && fbs_pmp < AHCI_DWC_FBS_PMPN_MAX) {
+ dev_warn(&dpriv->pdev->dev, "PMPn is limited up to %u ports\n",
+ fbs_pmp);
+ }
+
+ for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
+ if (!dev_mp && hpriv->saved_port_cap[i] & PORT_CMD_MPSP) {
+ dev_warn(&dpriv->pdev->dev, "MPS incapable port %d\n", i);
+ hpriv->saved_port_cap[i] &= ~PORT_CMD_MPSP;
+ }
+
+ if (!dev_cp && hpriv->saved_port_cap[i] & PORT_CMD_CPD) {
+ dev_warn(&dpriv->pdev->dev, "CPD incapable port %d\n", i);
+ hpriv->saved_port_cap[i] &= ~PORT_CMD_CPD;
+ }
+
+ if (!fbs_sup && hpriv->saved_port_cap[i] & PORT_CMD_FBSCP) {
+ dev_warn(&dpriv->pdev->dev, "FBS incapable port %d\n", i);
+ hpriv->saved_port_cap[i] &= ~PORT_CMD_FBSCP;
+ }
+ }
+}
+
+static void ahci_dwc_init_timer(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ unsigned long rate;
+ struct clk *aclk;
+ u32 cap, cap2;
+
+ /* 1ms tick is generated only for the CCC or DevSleep features */
+ cap = readl(hpriv->mmio + HOST_CAP);
+ cap2 = readl(hpriv->mmio + HOST_CAP2);
+ if (!(cap & HOST_CAP_CCC) && !(cap2 & HOST_CAP2_SDS))
+ return;
+
+ /*
+ * Tick is generated based on the AXI/AHB application clocks signal
+ * so we need to be sure in the clock we are going to use.
+ */
+ aclk = ahci_platform_find_clk(hpriv, "aclk");
+ if (!aclk)
+ return;
+
+ /* 1ms timer interval is set as TIMV = AMBA_FREQ[MHZ] * 1000 */
+ dpriv->timv = readl(hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
+ dpriv->timv = FIELD_GET(AHCI_DWC_HOST_TIMV_MASK, dpriv->timv);
+ rate = clk_get_rate(aclk) / 1000UL;
+ if (rate == dpriv->timv)
+ return;
+
+ dev_info(&dpriv->pdev->dev, "Update CCC/DevSlp timer for Fapp %lu MHz\n",
+ rate / 1000UL);
+ dpriv->timv = FIELD_PREP(AHCI_DWC_HOST_TIMV_MASK, rate);
+ writel(dpriv->timv, hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
+}
+
+static int ahci_dwc_init_dmacr(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ struct device_node *child;
+ void __iomem *port_mmio;
+ u32 port, dmacr, ts;
+
+ /*
+ * Update the DMA Tx/Rx transaction sizes in accordance with the
+ * platform setup. Note values exceeding maximal or minimal limits will
+ * be automatically clamped. Also note the register isn't affected by
+ * the HBA global reset so we can freely initialize it once until the
+ * next system reset.
+ */
+ for_each_child_of_node(dpriv->pdev->dev.of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+
+ if (of_property_read_u32(child, "reg", &port)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ port_mmio = __ahci_port_base(hpriv, port);
+ dmacr = readl(port_mmio + AHCI_DWC_PORT_DMACR);
+
+ if (!of_property_read_u32(child, "snps,tx-ts-max", &ts)) {
+ ts = ilog2(ts);
+ dmacr &= ~AHCI_DWC_PORT_TXTS_MASK;
+ dmacr |= FIELD_PREP(AHCI_DWC_PORT_TXTS_MASK, ts);
+ }
+
+ if (!of_property_read_u32(child, "snps,rx-ts-max", &ts)) {
+ ts = ilog2(ts);
+ dmacr &= ~AHCI_DWC_PORT_RXTS_MASK;
+ dmacr |= FIELD_PREP(AHCI_DWC_PORT_RXTS_MASK, ts);
+ }
+
+ writel(dmacr, port_mmio + AHCI_DWC_PORT_DMACR);
+ dpriv->dmacr[port] = dmacr;
+ }
+
+ return 0;
+}
+
+static int ahci_dwc_init_host(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ if (dpriv->pdata->init) {
+ rc = dpriv->pdata->init(hpriv);
+ if (rc)
+ goto err_disable_resources;
+ }
+
+ ahci_dwc_check_cap(hpriv);
+
+ ahci_dwc_init_timer(hpriv);
+
+ rc = ahci_dwc_init_dmacr(hpriv);
+ if (rc)
+ goto err_clear_platform;
+
+ return 0;
+
+err_clear_platform:
+ if (dpriv->pdata->clear)
+ dpriv->pdata->clear(hpriv);
+
+err_disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+
+static int ahci_dwc_reinit_host(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+ unsigned long port_map = hpriv->port_map;
+ void __iomem *port_mmio;
+ int i, rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ if (dpriv->pdata->reinit) {
+ rc = dpriv->pdata->reinit(hpriv);
+ if (rc)
+ goto err_disable_resources;
+ }
+
+ writel(dpriv->timv, hpriv->mmio + AHCI_DWC_HOST_TIMER1MS);
+
+ for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
+ port_mmio = __ahci_port_base(hpriv, i);
+ writel(dpriv->dmacr[i], port_mmio + AHCI_DWC_PORT_DMACR);
+ }
+
+ return 0;
+
+err_disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
+}
+
+static void ahci_dwc_clear_host(struct ahci_host_priv *hpriv)
+{
+ struct ahci_dwc_host_priv *dpriv = hpriv->plat_data;
+
+ if (dpriv->pdata->clear)
+ dpriv->pdata->clear(hpriv);
+
+ ahci_platform_disable_resources(hpriv);
+}
+
+static void ahci_dwc_stop_host(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_dwc_clear_host(hpriv);
+}
+
+static struct ata_port_operations ahci_dwc_port_ops = {
+ .inherits = &ahci_platform_ops,
+ .host_stop = ahci_dwc_stop_host,
+};
+
+static const struct ata_port_info ahci_dwc_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_dwc_port_ops,
+};
+
+static struct scsi_host_template ahci_dwc_scsi_info = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static int ahci_dwc_probe(struct platform_device *pdev)
+{
+ struct ahci_host_priv *hpriv;
+ int rc;
+
+ hpriv = ahci_dwc_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ rc = ahci_dwc_init_host(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_init_host(pdev, hpriv, &ahci_dwc_port_info,
+ &ahci_dwc_scsi_info);
+ if (rc)
+ goto err_clear_host;
+
+ return 0;
+
+err_clear_host:
+ ahci_dwc_clear_host(hpriv);
+
+ return rc;
+}
+
+static int ahci_dwc_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_suspend_host(dev);
+ if (rc)
+ return rc;
+
+ ahci_dwc_clear_host(hpriv);
+
+ return 0;
+}
+
+static int ahci_dwc_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_dwc_reinit_host(hpriv);
+ if (rc)
+ return rc;
+
+ return ahci_platform_resume_host(dev);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ahci_dwc_pm_ops, ahci_dwc_suspend,
+ ahci_dwc_resume);
+
+static struct ahci_dwc_plat_data ahci_dwc_plat = {
+ .pflags = AHCI_PLATFORM_GET_RESETS,
+};
+
+static struct ahci_dwc_plat_data ahci_bt1_plat = {
+ .pflags = AHCI_PLATFORM_GET_RESETS | AHCI_PLATFORM_RST_TRIGGER,
+ .init = ahci_bt1_init,
+};
+
+static const struct of_device_id ahci_dwc_of_match[] = {
+ { .compatible = "snps,dwc-ahci", &ahci_dwc_plat },
+ { .compatible = "snps,spear-ahci", &ahci_dwc_plat },
+ { .compatible = "baikal,bt1-ahci", &ahci_bt1_plat },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_dwc_of_match);
+
+static struct platform_driver ahci_dwc_driver = {
+ .probe = ahci_dwc_probe,
+ .remove = ata_platform_remove_one,
+ .shutdown = ahci_platform_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_dwc_of_match,
+ .pm = &ahci_dwc_pm_ops,
+ },
+};
+module_platform_driver(ahci_dwc_driver);
+
+MODULE_DESCRIPTION("DWC AHCI SATA platform driver");
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 79aa9f285312..b734e069034d 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -327,7 +327,7 @@ static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
}
/* SATA AHCI temperature monitor */
-static int sata_ahci_read_temperature(void *dev, int *temp)
+static int __sata_ahci_read_temperature(void *dev, int *temp)
{
u16 mpll_test_reg, rtune_ctl_reg, dac_ctl_reg, read_sum;
u32 str1, str2, str3, str4;
@@ -416,6 +416,11 @@ static int sata_ahci_read_temperature(void *dev, int *temp)
return 0;
}
+static int sata_ahci_read_temperature(struct thermal_zone_device *tz, int *temp)
+{
+ return __sata_ahci_read_temperature(tz->devdata, temp);
+}
+
static ssize_t sata_ahci_show_temp(struct device *dev,
struct device_attribute *da,
char *buf)
@@ -423,14 +428,14 @@ static ssize_t sata_ahci_show_temp(struct device *dev,
unsigned int temp = 0;
int err;
- err = sata_ahci_read_temperature(dev, &temp);
+ err = __sata_ahci_read_temperature(dev, &temp);
if (err < 0)
return err;
return sprintf(buf, "%u\n", temp);
}
-static const struct thermal_zone_of_device_ops fsl_sata_ahci_of_thermal_ops = {
+static const struct thermal_zone_device_ops fsl_sata_ahci_of_thermal_ops = {
.get_temp = sata_ahci_read_temperature,
};
@@ -1131,8 +1136,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
ret = PTR_ERR(hwmon_dev);
goto disable_clk;
}
- devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- &fsl_sata_ahci_of_thermal_ops);
+ devm_thermal_of_zone_register(hwmon_dev, 0, hwmon_dev,
+ &fsl_sata_ahci_of_thermal_ops);
dev_info(dev, "%s: sensor 'sata_ahci'\n", dev_name(hwmon_dev));
}
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 1f6c85fde983..c056378e3e72 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -118,8 +118,6 @@ static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
SYS_CFG_SATA_EN);
}
- of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map);
-
return 0;
}
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 28a8de5b48b9..8f5572a9f8f1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -56,9 +56,6 @@ static int ahci_probe(struct platform_device *pdev)
if (rc)
return rc;
- of_property_read_u32(dev->of_node,
- "ports-implemented", &hpriv->force_port_map);
-
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
@@ -83,9 +80,7 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "generic-ahci", },
/* Keep the following compatibles for device tree compatibility */
- { .compatible = "snps,spear-ahci", },
{ .compatible = "ibm,476gtr-ahci", },
- { .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
{ .compatible = "cavium,octeon-7130-ahci", },
{ /* sentinel */ }
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 7526653c843b..5a2cac60a29a 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -144,7 +144,6 @@ static struct scsi_host_template ahci_platform_sht = {
static int st_ahci_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
@@ -168,9 +167,6 @@ static int st_ahci_probe(struct platform_device *pdev)
st_ahci_configure_oob(hpriv->mmio);
- of_property_read_u32(dev->of_node,
- "ports-implemented", &hpriv->force_port_map);
-
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index cf8c7fd59ada..954386a2b500 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -16,6 +16,7 @@
* http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
@@ -443,17 +444,28 @@ static ssize_t ahci_show_em_supported(struct device *dev,
void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
{
void __iomem *mmio = hpriv->mmio;
- u32 cap, cap2, vers, port_map;
+ void __iomem *port_mmio;
+ unsigned long port_map;
+ u32 cap, cap2, vers;
int i;
/* make sure AHCI mode is enabled before accessing CAP */
ahci_enable_ahci(mmio);
- /* Values prefixed with saved_ are written back to host after
- * reset. Values without are used for driver operation.
+ /*
+ * Values prefixed with saved_ are written back to the HBA and ports
+ * registers after reset. Values without are used for driver operation.
+ */
+
+ /*
+ * Override HW-init HBA capability fields with the platform-specific
+ * values. The rest of the HBA capabilities are defined as Read-only
+ * and can't be modified in CSR anyway.
*/
- hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
- hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
+ cap = readl(mmio + HOST_CAP);
+ if (hpriv->saved_cap)
+ cap = (cap & ~(HOST_CAP_SSS | HOST_CAP_MPS)) | hpriv->saved_cap;
+ hpriv->saved_cap = cap;
/* CAP2 register is only defined for AHCI 1.2 and later */
vers = readl(mmio + HOST_VERSION);
@@ -517,15 +529,18 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
cap &= ~HOST_CAP_SXS;
}
- if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
- dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
- port_map, hpriv->force_port_map);
- port_map = hpriv->force_port_map;
+ /* Override the HBA ports mapping if the platform needs it */
+ port_map = readl(mmio + HOST_PORTS_IMPL);
+ if (hpriv->saved_port_map && port_map != hpriv->saved_port_map) {
+ dev_info(dev, "forcing port_map 0x%lx -> 0x%x\n",
+ port_map, hpriv->saved_port_map);
+ port_map = hpriv->saved_port_map;
+ } else {
hpriv->saved_port_map = port_map;
}
if (hpriv->mask_port_map) {
- dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
+ dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
port_map & hpriv->mask_port_map);
port_map &= hpriv->mask_port_map;
@@ -544,7 +559,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
*/
if (map_ports > ahci_nr_ports(cap)) {
dev_warn(dev,
- "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
+ "implemented port map (0x%lx) contains more ports than nr_ports (%u), using nr_ports\n",
port_map, ahci_nr_ports(cap));
port_map = 0;
}
@@ -553,16 +568,30 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
if (!port_map && vers < 0x10300) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
+ dev_warn(dev, "forcing PORTS_IMPL to 0x%lx\n", port_map);
/* write the fixed up value to the PI register */
hpriv->saved_port_map = port_map;
}
+ /*
+ * Preserve the ports capabilities defined by the platform. Note there
+ * is no need in storing the rest of the P#.CMD fields since they are
+ * volatile.
+ */
+ for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
+ if (hpriv->saved_port_cap[i])
+ continue;
+
+ port_mmio = __ahci_port_base(hpriv, i);
+ hpriv->saved_port_cap[i] =
+ readl(port_mmio + PORT_CMD) & PORT_CMD_CAP;
+ }
+
/* record values to use during operation */
hpriv->cap = cap;
hpriv->cap2 = cap2;
- hpriv->version = readl(mmio + HOST_VERSION);
+ hpriv->version = vers;
hpriv->port_map = port_map;
if (!hpriv->start_engine)
@@ -588,13 +617,21 @@ EXPORT_SYMBOL_GPL(ahci_save_initial_config);
static void ahci_restore_initial_config(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
+ unsigned long port_map = hpriv->port_map;
void __iomem *mmio = hpriv->mmio;
+ void __iomem *port_mmio;
+ int i;
writel(hpriv->saved_cap, mmio + HOST_CAP);
if (hpriv->saved_cap2)
writel(hpriv->saved_cap2, mmio + HOST_CAP2);
writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
+
+ for_each_set_bit(i, &port_map, AHCI_MAX_PORTS) {
+ port_mmio = __ahci_port_base(hpriv, i);
+ writel(hpriv->saved_port_cap[i], port_mmio + PORT_CMD);
+ }
}
static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 32495ae96567..ddf17e2d266c 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -94,31 +94,41 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
/**
- * ahci_platform_enable_clks - Enable platform clocks
+ * ahci_platform_find_clk - Find platform clock
* @hpriv: host private area to store config values
+ * @con_id: clock connection ID
*
- * This function enables all the clks found in hpriv->clks, starting at
- * index 0. If any clk fails to enable it disables all the clks already
- * enabled in reverse order, and then returns an error.
+ * This function returns a pointer to the clock descriptor of the clock with
+ * the passed ID.
*
* RETURNS:
- * 0 on success otherwise a negative error code
+ * Pointer to the clock descriptor on success otherwise NULL
*/
-int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv, const char *con_id)
{
- int c, rc;
+ int i;
- for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
- rc = clk_prepare_enable(hpriv->clks[c]);
- if (rc)
- goto disable_unprepare_clk;
+ for (i = 0; i < hpriv->n_clks; i++) {
+ if (!strcmp(hpriv->clks[i].id, con_id))
+ return hpriv->clks[i].clk;
}
- return 0;
-disable_unprepare_clk:
- while (--c >= 0)
- clk_disable_unprepare(hpriv->clks[c]);
- return rc;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_find_clk);
+
+/**
+ * ahci_platform_enable_clks - Enable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the clks found for the AHCI device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+{
+ return clk_bulk_prepare_enable(hpriv->n_clks, hpriv->clks);
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
@@ -126,20 +136,55 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
* ahci_platform_disable_clks - Disable platform clocks
* @hpriv: host private area to store config values
*
- * This function disables all the clks found in hpriv->clks, in reverse
- * order of ahci_platform_enable_clks (starting at the end of the array).
+ * This function disables all the clocks enabled before
+ * (bulk-clocks-disable function is supposed to do that in reverse
+ * from the enabling procedure order).
*/
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
{
- int c;
-
- for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
- if (hpriv->clks[c])
- clk_disable_unprepare(hpriv->clks[c]);
+ clk_bulk_disable_unprepare(hpriv->n_clks, hpriv->clks);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
/**
+ * ahci_platform_deassert_rsts - Deassert/trigger platform resets
+ * @hpriv: host private area to store config values
+ *
+ * This function deasserts or triggers all the reset lines found for
+ * the AHCI device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv)
+{
+ if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER)
+ return reset_control_reset(hpriv->rsts);
+
+ return reset_control_deassert(hpriv->rsts);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_deassert_rsts);
+
+/**
+ * ahci_platform_assert_rsts - Assert/rearm platform resets
+ * @hpriv: host private area to store config values
+ *
+ * This function asserts or rearms (for self-deasserting resets) all
+ * the reset controls found for the AHCI device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv)
+{
+ if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER)
+ return reset_control_rearm(hpriv->rsts);
+
+ return reset_control_assert(hpriv->rsts);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_assert_rsts);
+
+/**
* ahci_platform_enable_regulators - Enable regulators
* @hpriv: host private area to store config values
*
@@ -236,18 +281,18 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
if (rc)
goto disable_regulator;
- rc = reset_control_deassert(hpriv->rsts);
+ rc = ahci_platform_deassert_rsts(hpriv);
if (rc)
goto disable_clks;
rc = ahci_platform_enable_phys(hpriv);
if (rc)
- goto disable_resets;
+ goto disable_rsts;
return 0;
-disable_resets:
- reset_control_assert(hpriv->rsts);
+disable_rsts:
+ ahci_platform_assert_rsts(hpriv);
disable_clks:
ahci_platform_disable_clks(hpriv);
@@ -274,7 +319,7 @@ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
- reset_control_assert(hpriv->rsts);
+ ahci_platform_assert_rsts(hpriv);
ahci_platform_disable_clks(hpriv);
@@ -292,8 +337,6 @@ static void ahci_platform_put_resources(struct device *dev, void *res)
pm_runtime_disable(dev);
}
- for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
- clk_put(hpriv->clks[c]);
/*
* The regulators are tied to child node device and not to the
* SATA device itself. So we can't use devm for automatically
@@ -363,6 +406,34 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
return rc;
}
+static int ahci_platform_get_firmware(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct device_node *child;
+ u32 port;
+
+ if (!of_property_read_u32(dev->of_node, "hba-cap", &hpriv->saved_cap))
+ hpriv->saved_cap &= (HOST_CAP_SSS | HOST_CAP_MPS);
+
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->saved_port_map);
+
+ for_each_child_of_node(dev->of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+
+ if (of_property_read_u32(child, "reg", &port)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(child, "hba-port-cap", &hpriv->saved_port_cap[port]))
+ hpriv->saved_port_cap[port] &= PORT_CMD_CAP;
+ }
+
+ return 0;
+}
+
/**
* ahci_platform_get_resources - Get platform resources
* @pdev: platform device to get resources for
@@ -374,8 +445,8 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
* 1) mmio registers (IORESOURCE_MEM 0, mandatory)
* 2) regulator for controlling the targets power (optional)
* regulator for controlling the AHCI controller (optional)
- * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
- * or for non devicetree enabled platforms a single clock
+ * 3) all clocks specified in the devicetree node, or a single
+ * clock for non-OF platforms (optional)
* 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
* 5) phys (optional)
*
@@ -385,11 +456,10 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
unsigned int flags)
{
+ int child_nodes, rc = -ENOMEM, enabled_ports = 0;
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
- struct clk *clk;
struct device_node *child;
- int i, enabled_ports = 0, rc = -ENOMEM, child_nodes;
u32 mask_port_map = 0;
if (!devres_open_group(dev, NULL, GFP_KERNEL))
@@ -402,32 +472,51 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
devres_add(dev, hpriv);
- hpriv->mmio = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ /*
+ * If the DT provided an "ahci" named resource, use it. Otherwise,
+ * fallback to using the default first resource for the device node.
+ */
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci"))
+ hpriv->mmio = devm_platform_ioremap_resource_byname(pdev, "ahci");
+ else
+ hpriv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hpriv->mmio)) {
rc = PTR_ERR(hpriv->mmio);
goto err_out;
}
- for (i = 0; i < AHCI_MAX_CLKS; i++) {
+ /*
+ * Bulk clocks getting procedure can fail to find any clock due to
+ * running on a non-OF platform or due to the clocks being defined in
+ * bypass of the DT firmware (like da850, spear13xx). In that case we
+ * fallback to getting a single clock source right from the dev clocks
+ * list.
+ */
+ rc = devm_clk_bulk_get_all(dev, &hpriv->clks);
+ if (rc < 0)
+ goto err_out;
+
+ if (rc > 0) {
+ /* Got clocks in bulk */
+ hpriv->n_clks = rc;
+ } else {
/*
- * For now we must use clk_get(dev, NULL) for the first clock,
- * because some platforms (da850, spear13xx) are not yet
- * converted to use devicetree for clocks. For new platforms
- * this is equivalent to of_clk_get(dev->of_node, 0).
+ * No clock bulk found: fallback to manually getting
+ * the optional clock.
*/
- if (i == 0)
- clk = clk_get(dev, NULL);
- else
- clk = of_clk_get(dev->of_node, i);
-
- if (IS_ERR(clk)) {
- rc = PTR_ERR(clk);
- if (rc == -EPROBE_DEFER)
- goto err_out;
- break;
+ hpriv->clks = devm_kzalloc(dev, sizeof(*hpriv->clks), GFP_KERNEL);
+ if (!hpriv->clks) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ hpriv->clks->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(hpriv->clks->clk)) {
+ rc = PTR_ERR(hpriv->clks->clk);
+ goto err_out;
+ } else if (hpriv->clks->clk) {
+ hpriv->clks->id = "ahci";
+ hpriv->n_clks = 1;
}
- hpriv->clks[i] = clk;
}
hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
@@ -449,16 +538,28 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
rc = PTR_ERR(hpriv->rsts);
goto err_out;
}
+
+ hpriv->f_rsts = flags & AHCI_PLATFORM_RST_TRIGGER;
}
- hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
+ /*
+ * Too many sub-nodes most likely means having something wrong with
+ * the firmware.
+ */
+ child_nodes = of_get_child_count(dev->of_node);
+ if (child_nodes > AHCI_MAX_PORTS) {
+ rc = -EINVAL;
+ goto err_out;
+ }
/*
* If no sub-node was found, we still need to set nports to
* one in order to be able to use the
* ahci_platform_[en|dis]able_[phys|regulators] functions.
*/
- if (!child_nodes)
+ if (child_nodes)
+ hpriv->nports = child_nodes;
+ else
hpriv->nports = 1;
hpriv->phys = devm_kcalloc(dev, hpriv->nports, sizeof(*hpriv->phys), GFP_KERNEL);
@@ -540,6 +641,15 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
if (rc == -EPROBE_DEFER)
goto err_out;
}
+
+ /*
+ * Retrieve firmware-specific flags which then will be used to set
+ * the HW-init fields of HBA and its ports
+ */
+ rc = ahci_platform_get_firmware(hpriv, dev);
+ if (rc)
+ goto err_out;
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
hpriv->got_runtime_pm = true;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 826d41f341e4..d3ce5c383f3a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -665,33 +665,33 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
/**
* ata_build_rw_tf - Build ATA taskfile for given read/write request
- * @tf: Target ATA taskfile
- * @dev: ATA device @tf belongs to
+ * @qc: Metadata associated with the taskfile to build
* @block: Block address
* @n_block: Number of blocks
* @tf_flags: RW/FUA etc...
- * @tag: tag
* @class: IO priority class
*
* LOCKING:
* None.
*
- * Build ATA taskfile @tf for read/write request described by
- * @block, @n_block, @tf_flags and @tag on @dev.
+ * Build ATA taskfile for the command @qc for read/write request described
+ * by @block, @n_block, @tf_flags and @class.
*
* RETURNS:
*
* 0 on success, -ERANGE if the request is too large for @dev,
* -EINVAL if the request is invalid.
*/
-int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
- u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag, int class)
+int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
+ unsigned int tf_flags, int class)
{
+ struct ata_taskfile *tf = &qc->tf;
+ struct ata_device *dev = qc->dev;
+
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags;
- if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
+ if (ata_ncq_enabled(dev)) {
/* yay, NCQ */
if (!lba_48_ok(block, n_block))
return -ERANGE;
@@ -704,7 +704,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
else
tf->command = ATA_CMD_FPDMA_READ;
- tf->nsect = tag << 3;
+ tf->nsect = qc->hw_tag << 3;
tf->hob_feature = (n_block >> 8) & 0xff;
tf->feature = n_block & 0xff;
@@ -719,7 +719,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
if (tf->flags & ATA_TFLAG_FUA)
tf->device |= 1 << 7;
- if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
+ if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
class == IOPRIO_CLASS_RT)
tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
} else if (dev->flags & ATA_DFLAG_LBA) {
@@ -1578,8 +1578,8 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
else
ata_qc_complete(qc);
- ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
- command);
+ ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
+ timeout, command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -2171,7 +2171,7 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
return;
not_supported:
- dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
}
@@ -3021,7 +3021,8 @@ static void sata_print_link_status(struct ata_link *link)
if (sata_scr_read(link, SCR_STATUS, &sstatus))
return;
- sata_scr_read(link, SCR_CONTROL, &scontrol);
+ if (sata_scr_read(link, SCR_CONTROL, &scontrol))
+ return;
if (ata_phys_link_online(link)) {
tmp = (sstatus >> 4) & 0xf;
@@ -3988,6 +3989,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* These specific Pioneer models have LPM issues */
+ { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
+ { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
+
/* Crucial BX100 SSD 500GB has broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
@@ -4295,7 +4300,6 @@ static void ata_dev_xfermask(struct ata_device *dev)
static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
{
struct ata_taskfile tf;
- unsigned int err_mask;
/* set up set-features taskfile */
ata_dev_dbg(dev, "set features - xfer mode\n");
@@ -4317,20 +4321,20 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
else /* In the ancient relic department - skip all of this */
return 0;
- /* On some disks, this command causes spin-up, so we need longer timeout */
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
-
- return err_mask;
+ /*
+ * On some disks, this command causes spin-up, so we need longer
+ * timeout.
+ */
+ return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
}
/**
- * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
+ * ata_dev_set_feature - Issue SET FEATURES
* @dev: Device to which command will be sent
- * @enable: Whether to enable or disable the feature
- * @feature: The sector count represents the feature to set
+ * @subcmd: The SET FEATURES subcommand to be sent
+ * @action: The sector count represents a subcommand specific action
*
- * Issue SET FEATURES - SATA FEATURES command to device @dev
- * on port @ap with sector count
+ * Issue SET FEATURES command to device @dev on port @ap with sector count
*
* LOCKING:
* PCI/etc. bus probe sem.
@@ -4338,28 +4342,26 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/
-unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
+unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
{
struct ata_taskfile tf;
- unsigned int err_mask;
unsigned int timeout = 0;
/* set up set-features taskfile */
- ata_dev_dbg(dev, "set features - SATA features\n");
+ ata_dev_dbg(dev, "set features\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
- tf.feature = enable;
+ tf.feature = subcmd;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
- tf.nsect = feature;
+ tf.nsect = action;
- if (enable == SETFEATURES_SPINUP)
+ if (subcmd == SETFEATURES_SPINUP)
timeout = ata_probe_timeout ?
ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
- return err_mask;
+ return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
}
EXPORT_SYMBOL_GPL(ata_dev_set_feature);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7c128c89b454..08e11bc312c2 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -151,6 +151,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
+static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -1086,14 +1088,11 @@ static void __ata_port_freeze(struct ata_port *ap)
*/
int ata_port_freeze(struct ata_port *ap)
{
- int nr_aborted;
-
WARN_ON(!ap->ops->error_handler);
__ata_port_freeze(ap);
- nr_aborted = ata_port_abort(ap);
- return nr_aborted;
+ return ata_port_abort(ap);
}
EXPORT_SYMBOL_GPL(ata_port_freeze);
@@ -1393,7 +1392,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
* @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @cmd: scsi command for which the sense code should be set
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
* SENSE. This function is an EH helper.
@@ -1401,9 +1399,9 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
* LOCKING:
* Kernel thread context (may sleep).
*/
-static void ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
+static void ata_eh_request_sense(struct ata_queued_cmd *qc)
{
+ struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
struct ata_taskfile tf;
unsigned int err_mask;
@@ -1541,7 +1539,6 @@ static void ata_eh_analyze_serror(struct ata_link *link)
/**
* ata_eh_analyze_tf - analyze taskfile of a failed qc
* @qc: qc to analyze
- * @tf: Taskfile registers to analyze
*
* Analyze taskfile of @qc and further determine cause of
* failure. This function also requests ATAPI sense data if
@@ -1553,9 +1550,9 @@ static void ata_eh_analyze_serror(struct ata_link *link)
* RETURNS:
* Determined recovery action
*/
-static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
- const struct ata_taskfile *tf)
+static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc)
{
+ const struct ata_taskfile *tf = &qc->result_tf;
unsigned int tmp, action = 0;
u8 stat = tf->status, err = tf->error;
@@ -1579,7 +1576,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
switch (qc->dev->class) {
case ATA_DEV_ZAC:
if (stat & ATA_SENSE)
- ata_eh_request_sense(qc, qc->scsicmd);
+ ata_eh_request_sense(qc);
fallthrough;
case ATA_DEV_ATA:
if (err & ATA_ICRC)
@@ -1957,7 +1954,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
qc->err_mask |= ehc->i.err_mask;
/* analyze TF */
- ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
+ ehc->i.action |= ata_eh_analyze_tf(qc);
/* DEV errors are probably spurious in case of ATA_BUS error */
if (qc->err_mask & AC_ERR_ATA_BUS)
@@ -2940,6 +2937,23 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
+ /*
+ * The link may be in a deep sleep, wake it up.
+ *
+ * If the link is in deep sleep, ata_phys_link_offline()
+ * will return true, causing the revalidation to fail,
+ * which leads to a (potentially) needless hard reset.
+ *
+ * ata_eh_recover() will later restore the link policy
+ * to ap->target_lpm_policy after revalidation is done.
+ */
+ if (link->lpm_policy > ATA_LPM_MAX_POWER) {
+ rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
+ r_failed_dev);
+ if (rc)
+ goto err;
+ }
+
if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 7a5fe41aa5ae..b6806d41a8c5 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -870,7 +870,7 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device,
if (!dev)
rc = -ENODEV;
else
- ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+ ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED;
spin_unlock_irq(ap->lock);
return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
@@ -905,9 +905,9 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device,
}
if (input)
- dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+ dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED;
else
- dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+ dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
unlock:
spin_unlock_irq(ap->lock);
@@ -1018,26 +1018,25 @@ DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
/**
- * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
- * @ap: ATA port to which the device change the queue depth
+ * ata_change_queue_depth - Set a device maximum queue depth
+ * @ap: ATA port of the target device
+ * @dev: target ATA device
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
*
- * libsas and libata have different approaches for associating a sdev to
- * its ata_port.
+ * Helper to set a device maximum queue depth, usable with both libsas
+ * and libata.
*
*/
-int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth)
+int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth)
{
- struct ata_device *dev;
unsigned long flags;
- if (queue_depth < 1 || queue_depth == sdev->queue_depth)
+ if (!dev || !ata_dev_enabled(dev))
return sdev->queue_depth;
- dev = ata_scsi_find_dev(ap, sdev);
- if (!dev || !ata_dev_enabled(dev))
+ if (queue_depth < 1 || queue_depth == sdev->queue_depth)
return sdev->queue_depth;
/* NCQ enabled? */
@@ -1059,7 +1058,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
return scsi_change_queue_depth(sdev, queue_depth);
}
-EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
+EXPORT_SYMBOL_GPL(ata_change_queue_depth);
/**
* ata_scsi_change_queue_depth - SCSI callback for queue depth config
@@ -1080,7 +1079,8 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- return __ata_change_queue_depth(ap, sdev, queue_depth);
+ return ata_change_queue_depth(ap, ata_scsi_find_dev(ap, sdev),
+ sdev, queue_depth);
}
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 29e2f55c6faa..e2ebb0b065e2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,6 +1055,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
struct request_queue *q = sdev->request_queue;
+ int depth = 1;
if (!ata_id_has_unload(dev->id))
dev->flags |= ATA_DFLAG_NO_UNLOAD;
@@ -1100,13 +1101,10 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
if (dev->flags & ATA_DFLAG_AN)
set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
- if (dev->flags & ATA_DFLAG_NCQ) {
- int depth;
-
+ if (dev->flags & ATA_DFLAG_NCQ)
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
- depth = min(ATA_MAX_QUEUE, depth);
- scsi_change_queue_depth(sdev, depth);
- }
+ depth = min(ATA_MAX_QUEUE, depth);
+ scsi_change_queue_depth(sdev, depth);
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
@@ -1605,9 +1603,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
qc->flags |= ATA_QCFLAG_IO;
qc->nbytes = n_block * scmd->device->sector_size;
- rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
- qc->hw_tag, class);
-
+ rc = ata_build_rw_tf(qc, block, n_block, tf_flags, class);
if (likely(rc == 0))
return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b1666adc1c3a..7916e369e15e 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -776,7 +776,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
* @qc: Command on going
* @bytes: number of bytes
*
- * Transfer Transfer data from/to the ATAPI device.
+ * Transfer data from/to the ATAPI device.
*
* LOCKING:
* Inherited from caller.
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 98bc8649c63f..2c5c8273af01 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -44,9 +44,8 @@ static inline void ata_force_cbl(struct ata_port *ap) { }
#endif
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
-extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
- u64 block, u32 n_block, unsigned int tf_flags,
- unsigned int tag, int class);
+extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
+ unsigned int tf_flags, int class);
extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
@@ -64,7 +63,7 @@ extern int ata_dev_configure(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
- u8 enable, u8 feature);
+ u8 subcmd, u8 action);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index bfea2be2959a..9ccaac9e2bc3 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -666,8 +666,7 @@ static u8 pata_macio_bmdma_status(struct ata_port *ap)
* a multi-block transfer.
*
* - The dbdma fifo hasn't yet finished flushing to
- * to system memory when the disk interrupt occurs.
- *
+ * system memory when the disk interrupt occurs.
*/
/* First check for errors */
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 4fab3b2c7023..02425991c159 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -775,7 +775,7 @@ static int ht16k33_probe(struct i2c_client *client)
return err;
}
-static int ht16k33_remove(struct i2c_client *client)
+static void ht16k33_remove(struct i2c_client *client)
{
struct ht16k33_priv *priv = i2c_get_clientdata(client);
struct ht16k33_fbdev *fbdev = &priv->fbdev;
@@ -796,8 +796,6 @@ static int ht16k33_remove(struct i2c_client *client)
device_remove_file(&client->dev, &dev_attr_map_seg14);
break;
}
-
- return 0;
}
static const struct i2c_device_id ht16k33_i2c_match[] = {
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index e465108d9998..135831a16514 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -340,13 +340,12 @@ fail1:
return err;
}
-static int lcd2s_i2c_remove(struct i2c_client *i2c)
+static void lcd2s_i2c_remove(struct i2c_client *i2c)
{
struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
charlcd_unregister(lcd2s->charlcd);
charlcd_free(lcd2s->charlcd);
- return 0;
}
static const struct i2c_device_id lcd2s_i2c_id[] = {
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 0424b59b695e..e7d6e6657ffa 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -353,7 +353,7 @@ void topology_init_cpu_capacity_cppc(void)
struct cppc_perf_caps perf_caps;
int cpu;
- if (likely(acpi_disabled || !acpi_cpc_valid()))
+ if (likely(!acpi_cpc_valid()))
return;
raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
@@ -724,7 +724,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
*/
if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling))
- return get_cpu_mask(cpu);
+ return topology_sibling_cpumask(cpu);
return &cpu_topology[cpu].cluster_sibling;
}
@@ -735,7 +735,7 @@ void update_siblings_masks(unsigned int cpuid)
int cpu, ret;
ret = detect_cache_attributes(cpuid);
- if (ret)
+ if (ret && ret != -ENOENT)
pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
@@ -841,4 +841,23 @@ void __init init_cpu_topology(void)
return;
}
}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+}
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b3a43a164dcd..b902d1ecc247 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -154,8 +154,6 @@ extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
void device_driver_detach(struct device *dev);
-extern char *make_class_name(const char *name, struct kobject *kobj);
-
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8feb85e186e3..64f7b9a0970f 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(__class_create);
*/
void class_destroy(struct class *cls)
{
- if ((cls == NULL) || (IS_ERR(cls)))
+ if (IS_ERR_OR_NULL(cls))
return;
class_unregister(cls);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 753e7cca0f40..d02501933467 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1625,7 +1625,7 @@ static int __init fw_devlink_setup(char *arg)
}
early_param("fw_devlink", fw_devlink_setup);
-static bool fw_devlink_strict = true;
+static bool fw_devlink_strict;
static int __init fw_devlink_strict_setup(char *arg)
{
return strtobool(arg, &fw_devlink_strict);
@@ -2509,7 +2509,7 @@ static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
rc = kobject_synth_uevent(&dev->kobj, buf, count);
if (rc) {
- dev_err(dev, "uevent: failed to send synthetic uevent\n");
+ dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
return rc;
}
@@ -4170,7 +4170,7 @@ device_create_groups_vargs(struct class *class, struct device *parent,
struct device *dev = NULL;
int retval = -ENODEV;
- if (class == NULL || IS_ERR(class))
+ if (IS_ERR_OR_NULL(class))
goto error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 70f79fc71539..3dda62503102 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -274,12 +274,42 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Return:
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
+ return -ENODEV;
+ }
+
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
+ return -ETIMEDOUT;
+ }
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
+ driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@@ -806,7 +836,7 @@ static int __init save_async_options(char *buf)
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
- strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
async_probe_default = parse_option_str(async_probe_drv_names, "*");
return 1;
@@ -881,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1120,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index f4d794d6bb85..1c06781f7114 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -25,6 +25,47 @@ struct devcd_entry {
struct device devcd_dev;
void *data;
size_t datalen;
+ /*
+ * Here, mutex is required to serialize the calls to del_wk work between
+ * user/kernel space which happens when devcd is added with device_add()
+ * and that sends uevent to user space. User space reads the uevents,
+ * and calls to devcd_data_write() which try to modify the work which is
+ * not even initialized/queued from devcoredump.
+ *
+ *
+ *
+ * cpu0(X) cpu1(Y)
+ *
+ * dev_coredump() uevent sent to user space
+ * device_add() ======================> user space process Y reads the
+ * uevents writes to devcd fd
+ * which results into writes to
+ *
+ * devcd_data_write()
+ * mod_delayed_work()
+ * try_to_grab_pending()
+ * del_timer()
+ * debug_assert_init()
+ * INIT_DELAYED_WORK()
+ * schedule_delayed_work()
+ *
+ *
+ * Also, mutex alone would not be enough to avoid scheduling of
+ * del_wk work after it get flush from a call to devcd_free()
+ * mentioned as below.
+ *
+ * disabled_store()
+ * devcd_free()
+ * mutex_lock() devcd_data_write()
+ * flush_delayed_work()
+ * mutex_unlock()
+ * mutex_lock()
+ * mod_delayed_work()
+ * mutex_unlock()
+ * So, delete_work flag is required.
+ */
+ struct mutex mutex;
+ bool delete_work;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
@@ -84,7 +125,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work) {
+ devcd->delete_work = true;
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ }
+ mutex_unlock(&devcd->mutex);
return count;
}
@@ -112,7 +158,12 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work)
+ devcd->delete_work = true;
+
flush_delayed_work(&devcd->del_wk);
+ mutex_unlock(&devcd->mutex);
return 0;
}
@@ -122,6 +173,30 @@ static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
return sysfs_emit(buf, "%d\n", devcd_disabled);
}
+/*
+ *
+ * disabled_store() worker()
+ * class_for_each_device(&devcd_class,
+ * NULL, NULL, devcd_free)
+ * ...
+ * ...
+ * while ((dev = class_dev_iter_next(&iter))
+ * devcd_del()
+ * device_del()
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+ * mutex_lock(&devcd->mutex);
+ *
+ *
+ * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+ * is called after kfree of devcd memory after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+ */
+
static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
@@ -278,13 +353,16 @@ void dev_coredumpm(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
+ devcd->delete_work = false;
+ mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
dev_set_name(&devcd->devcd_dev, "devcd%d",
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
+ mutex_lock(&devcd->mutex);
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -301,10 +379,11 @@ void dev_coredumpm(struct device *dev, struct module *owner,
INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
-
+ mutex_unlock(&devcd->mutex);
return;
put_device:
put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
put_module:
module_put(owner);
free:
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 864d0b3f566e..4ab2b50ee38f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -117,7 +117,9 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
if (unlikely(!dr))
return NULL;
- memset(dr, 0, offsetof(struct devres, data));
+ /* No need to clear memory twice */
+ if (!(gfp & __GFP_ZERO))
+ memset(dr, 0, offsetof(struct devres, data));
INIT_LIST_HEAD(&dr->node.entry);
dr->node.release = release;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 15a75afe6b84..676b6275d5b5 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 77bad32c481a..5b66b3d1fa16 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- if (fw_sysfs->fw_upload_priv) {
- free_fw_priv(fw_sysfs->fw_priv);
- kfree(fw_sysfs->fw_upload_priv);
- }
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
kfree(fw_sysfs);
}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 5d8ff1675c79..df1d5add698f 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
extern struct device_attribute dev_attr_remaining_size;
int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
#else
static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
return 0;
}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
#endif
#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 87044d52322a..a0af8f5f13d8 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
return 0;
}
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
@@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
@@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
unregister:
device_unregister(&fw_sysfs->dev);
- module_put(fw_upload_priv->module);
+ module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 80b1e91b9608..faf3597a96da 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -434,6 +434,7 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n"
+ "Node %d SecPageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
@@ -460,6 +461,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5a2e0232862e..ead135c7044c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2085,8 +2085,10 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
/* Always-on domains must be powered on at initialization. */
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
- !genpd_status_on(genpd))
+ !genpd_status_on(genpd)) {
+ pr_err("always-on PM domain %s is not on\n", genpd->name);
return -EINVAL;
+ }
/* Multiple states but no governor doesn't make sense. */
if (!gov && genpd->state_count > 1)
@@ -2733,7 +2735,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -ENODEV;
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 997be3ac20a7..b52049098d4e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -792,10 +792,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
DEFINE_WAIT(wait);
if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
- if (dev->power.runtime_status == RPM_SUSPENDING)
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
dev->power.deferred_resume = true;
- else
+ if (rpmflags & RPM_NOWAIT)
+ retval = -EINPROGRESS;
+ } else {
retval = -EINPROGRESS;
+ }
goto out;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e3befa2c1b66..7cc0c0cf8eaa 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -944,6 +944,8 @@ void pm_system_irq_wakeup(unsigned int irq_number)
else
irq_number = 0;
+ pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
+
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
if (irq_number)
diff --git a/drivers/base/property.c b/drivers/base/property.c
index ed6f449f8e5c..4d6278a84868 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -17,7 +17,7 @@
#include <linux/property.h>
#include <linux/phy.h>
-struct fwnode_handle *dev_fwnode(struct device *dev)
+struct fwnode_handle *dev_fwnode(const struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
of_fwnode_handle(dev->of_node) : dev->fwnode;
@@ -1200,7 +1200,7 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
-const void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(const struct device *dev)
{
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 71f16be7e717..3ccdd86a97e7 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/swab.h>
#include "internal.h"
struct regmap_mmio_context {
void __iomem *regs;
unsigned int val_bytes;
- bool relaxed_mmio;
+ bool big_endian;
bool attached_clk;
struct clk *clk;
@@ -33,9 +34,6 @@ static int regmap_mmio_regbits_check(size_t reg_bits)
case 8:
case 16:
case 32:
-#ifdef CONFIG_64BIT
- case 64:
-#endif
return 0;
default:
return -EINVAL;
@@ -50,18 +48,13 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
case 8:
/* The core treats 0 as 1 */
min_stride = 0;
- return 0;
+ break;
case 16:
min_stride = 2;
break;
case 32:
min_stride = 4;
break;
-#ifdef CONFIG_64BIT
- case 64:
- min_stride = 8;
- break;
-#endif
default:
return -EINVAL;
}
@@ -83,6 +76,12 @@ static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
writeb_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite8(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
@@ -97,10 +96,22 @@ static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
writew_relaxed(val, ctx->regs + reg);
}
+static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16(val, ctx->regs + reg);
+}
+
static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
+ writew(swab16(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
iowrite16be(val, ctx->regs + reg);
}
@@ -118,28 +129,24 @@ static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
writel_relaxed(val, ctx->regs + reg);
}
-static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- iowrite32be(val, ctx->regs + reg);
+ iowrite32(val, ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
unsigned int reg,
unsigned int val)
{
- writeq(val, ctx->regs + reg);
+ writel(swab32(val), ctx->regs + reg);
}
-static void regmap_mmio_write64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg,
- unsigned int val)
+static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
{
- writeq_relaxed(val, ctx->regs + reg);
+ iowrite32be(val, ctx->regs + reg);
}
-#endif
static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
@@ -160,6 +167,83 @@ static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
return 0;
}
+static int regmap_mmio_noinc_write(void *context, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+ int i;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ {
+ const u16 *valp = (const u16 *)val;
+ for (i = 0; i < val_count; i++)
+ writew(swab16(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+ case 4:
+ {
+ const u32 *valp = (const u32 *)val;
+ for (i = 0; i < val_count; i++)
+ writel(swab32(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#ifdef CONFIG_64BIT
+ case 8:
+ {
+ const u64 *valp = (const u64 *)val;
+ for (i = 0; i < val_count; i++)
+ writeq(swab64(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ writesb(ctx->regs + reg, (const u8 *)val, val_count);
+ break;
+ case 2:
+ writesw(ctx->regs + reg, (const u16 *)val, val_count);
+ break;
+ case 4:
+ writesl(ctx->regs + reg, (const u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writesq(ctx->regs + reg, (const u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -172,6 +256,12 @@ static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
return readb_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread8(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
unsigned int reg)
{
@@ -184,9 +274,21 @@ static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx
return readw_relaxed(ctx->regs + reg);
}
+static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16(ctx->regs + reg);
+}
+
static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
+ return swab16(readw(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
return ioread16be(ctx->regs + reg);
}
@@ -202,25 +304,23 @@ static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx
return readl_relaxed(ctx->regs + reg);
}
-static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return ioread32be(ctx->regs + reg);
+ return ioread32(ctx->regs + reg);
}
-#ifdef CONFIG_64BIT
-static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
unsigned int reg)
{
- return readq(ctx->regs + reg);
+ return swab32(readl(ctx->regs + reg));
}
-static unsigned int regmap_mmio_read64le_relaxed(struct regmap_mmio_context *ctx,
- unsigned int reg)
+static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- return readq_relaxed(ctx->regs + reg);
+ return ioread32be(ctx->regs + reg);
}
-#endif
static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
@@ -241,6 +341,71 @@ static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
return 0;
}
+static int regmap_mmio_noinc_read(void *context, unsigned int reg,
+ void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ readsb(ctx->regs + reg, (u8 *)val, val_count);
+ break;
+ case 2:
+ readsw(ctx->regs + reg, (u16 *)val, val_count);
+ break;
+ case 4:
+ readsl(ctx->regs + reg, (u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ readsq(ctx->regs + reg, (u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ swab16_array(val, val_count);
+ break;
+ case 4:
+ swab32_array(val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ swab64_array(val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+
static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
@@ -257,6 +422,8 @@ static const struct regmap_bus regmap_mmio = {
.fast_io = true,
.reg_write = regmap_mmio_write,
.reg_read = regmap_mmio_read,
+ .reg_noinc_write = regmap_mmio_noinc_write,
+ .reg_noinc_read = regmap_mmio_noinc_read,
.free_context = regmap_mmio_free_context,
.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
};
@@ -284,13 +451,15 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
+ if (config->use_relaxed_mmio && config->io_port)
+ return ERR_PTR(-EINVAL);
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
- ctx->relaxed_mmio = config->use_relaxed_mmio;
ctx->clk = ERR_PTR(-ENODEV);
switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
@@ -301,7 +470,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#endif
switch (config->val_bits) {
case 8:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read8_relaxed;
ctx->reg_write = regmap_mmio_write8_relaxed;
} else {
@@ -310,7 +482,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 16:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16le;
+ ctx->reg_write = regmap_mmio_iowrite16le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read16le_relaxed;
ctx->reg_write = regmap_mmio_write16le_relaxed;
} else {
@@ -319,7 +494,10 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
}
break;
case 32:
- if (ctx->relaxed_mmio) {
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32le;
+ ctx->reg_write = regmap_mmio_iowrite32le;
+ } else if (config->use_relaxed_mmio) {
ctx->reg_read = regmap_mmio_read32le_relaxed;
ctx->reg_write = regmap_mmio_write32le_relaxed;
} else {
@@ -327,17 +505,6 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->reg_write = regmap_mmio_write32le;
}
break;
-#ifdef CONFIG_64BIT
- case 64:
- if (ctx->relaxed_mmio) {
- ctx->reg_read = regmap_mmio_read64le_relaxed;
- ctx->reg_write = regmap_mmio_write64le_relaxed;
- } else {
- ctx->reg_read = regmap_mmio_read64le;
- ctx->reg_write = regmap_mmio_write64le;
- }
- break;
-#endif
default:
ret = -EINVAL;
goto err_free;
@@ -347,18 +514,34 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
#ifdef __BIG_ENDIAN
case REGMAP_ENDIAN_NATIVE:
#endif
+ ctx->big_endian = true;
switch (config->val_bits) {
case 8:
- ctx->reg_read = regmap_mmio_read8;
- ctx->reg_write = regmap_mmio_write8;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
break;
case 16:
- ctx->reg_read = regmap_mmio_read16be;
- ctx->reg_write = regmap_mmio_write16be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16be;
+ ctx->reg_write = regmap_mmio_iowrite16be;
+ } else {
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ }
break;
case 32:
- ctx->reg_read = regmap_mmio_read32be;
- ctx->reg_write = regmap_mmio_write32be;
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32be;
+ ctx->reg_write = regmap_mmio_iowrite32be;
+ } else {
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
index ad1da83e849f..4c2b94b3e30b 100644
--- a/drivers/base/regmap/regmap-spi-avmm.c
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/swab.h>
/*
* This driver implements the regmap operations for a generic SPI
@@ -162,19 +163,12 @@ struct spi_avmm_bridge {
/* bridge buffer used in translation between protocol layers */
char trans_buf[TRANS_BUF_SIZE];
char phy_buf[PHY_BUF_SIZE];
- void (*swap_words)(char *buf, unsigned int len);
+ void (*swap_words)(void *buf, unsigned int len);
};
-static void br_swap_words_32(char *buf, unsigned int len)
+static void br_swap_words_32(void *buf, unsigned int len)
{
- u32 *p = (u32 *)buf;
- unsigned int count;
-
- count = len / 4;
- while (count--) {
- *p = swab32p(p);
- p++;
- }
+ swab32_array(buf, len / 4);
}
/*
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 719323bc6c7f..37ab23a9d034 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
if (!bus)
return ERR_PTR(-ENOMEM);
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
+
return bus;
}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index fee221c5008c..c6d6d53e8cd3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -288,15 +288,9 @@ static void regmap_format_16_native(void *buf, unsigned int val,
memcpy(buf, &v, sizeof(v));
}
-static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
{
- u8 *b = buf;
-
- val <<= shift;
-
- b[0] = val >> 16;
- b[1] = val >> 8;
- b[2] = val;
+ put_unaligned_be24(val << shift, buf);
}
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
@@ -380,14 +374,9 @@ static unsigned int regmap_parse_16_native(const void *buf)
return v;
}
-static unsigned int regmap_parse_24(const void *buf)
+static unsigned int regmap_parse_24_be(const void *buf)
{
- const u8 *b = buf;
- unsigned int ret = b[2];
- ret |= ((unsigned int)b[1]) << 8;
- ret |= ((unsigned int)b[0]) << 16;
-
- return ret;
+ return get_unaligned_be24(buf);
}
static unsigned int regmap_parse_32_be(const void *buf)
@@ -991,9 +980,13 @@ struct regmap *__regmap_init(struct device *dev,
break;
case 24:
- if (reg_endian != REGMAP_ENDIAN_BIG)
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_reg = regmap_format_24;
+ }
break;
case 32:
@@ -1064,10 +1057,14 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
case 24:
- if (val_endian != REGMAP_ENDIAN_BIG)
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_24_be;
+ map->format.parse_val = regmap_parse_24_be;
+ break;
+ default:
goto err_hwlock;
- map->format.format_val = regmap_format_24;
- map->format.parse_val = regmap_parse_24;
+ }
break;
case 32:
switch (val_endian) {
@@ -2132,6 +2129,99 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
+ void *val, unsigned int val_len, bool write)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int lastval;
+ u8 *u8p;
+ u16 *u16p;
+ u32 *u32p;
+#ifdef CONFIG_64BIT
+ u64 *u64p;
+#endif
+ int ret;
+ int i;
+
+ switch (val_bytes) {
+ case 1:
+ u8p = val;
+ if (write)
+ lastval = (unsigned int)u8p[val_count - 1];
+ break;
+ case 2:
+ u16p = val;
+ if (write)
+ lastval = (unsigned int)u16p[val_count - 1];
+ break;
+ case 4:
+ u32p = val;
+ if (write)
+ lastval = (unsigned int)u32p[val_count - 1];
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ u64p = val;
+ if (write)
+ lastval = (unsigned int)u64p[val_count - 1];
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update the cache with the last value we write, the rest is just
+ * gone down in the hardware FIFO. We can't cache FIFOs. This makes
+ * sure a single read from the cache will work.
+ */
+ if (write) {
+ if (!map->cache_bypass && !map->defer_caching) {
+ ret = regcache_write(map, reg, lastval);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+ ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
+ } else {
+ ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
+ }
+
+ if (!ret && regmap_should_log(map)) {
+ dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
+ for (i = 0; i < val_count; i++) {
+ switch (val_bytes) {
+ case 1:
+ pr_cont("%x", u8p[i]);
+ break;
+ case 2:
+ pr_cont("%x", u16p[i]);
+ break;
+ case 4:
+ pr_cont("%x", u32p[i]);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ pr_cont("%llx", u64p[i]);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (i == (val_count - 1))
+ pr_cont("]\n");
+ else
+ pr_cont(",");
+ }
+ }
+
+ return 0;
+}
+
/**
* regmap_noinc_write(): Write data from a register without incrementing the
* register number
@@ -2159,9 +2249,8 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
size_t write_len;
int ret;
- if (!map->write)
- return -ENOTSUPP;
-
+ if (!map->write && !(map->bus && map->bus->reg_noinc_write))
+ return -EINVAL;
if (val_len % map->format.val_bytes)
return -EINVAL;
if (!IS_ALIGNED(reg, map->reg_stride))
@@ -2176,6 +2265,15 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /*
+ * Use the accelerated operation if we can. The val drops the const
+ * typing in order to facilitate code reuse in regmap_noinc_readwrite().
+ */
+ if (map->bus->reg_noinc_write) {
+ ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_write && map->max_raw_write < val_len)
write_len = map->max_raw_write;
@@ -2350,6 +2448,10 @@ out:
kfree(wval);
}
+
+ if (!ret)
+ trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -2946,6 +3048,22 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
goto out_unlock;
}
+ /* Use the accelerated operation if we can */
+ if (map->bus->reg_noinc_read) {
+ /*
+ * We have not defined the FIFO semantics for cache, as the
+ * cache is just one value deep. Should we return the last
+ * written value? Just avoid this by always reading the FIFO
+ * even when using cache. Cache only will not work.
+ */
+ if (map->cache_only) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
+ goto out_unlock;
+ }
+
while (val_len) {
if (map->max_raw_read && map->max_raw_read < val_len)
read_len = map->max_raw_read;
@@ -3095,6 +3213,9 @@ out:
map->unlock(map->lock_arg);
}
+ if (!ret)
+ trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
+
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 9abee14df9ee..704e106e5dbd 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -32,9 +32,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
__entry->val = val;
),
- TP_printk("%s reg=%x val=%x", __get_str(name),
- (unsigned int)__entry->reg,
- (unsigned int)__entry->val)
+ TP_printk("%s reg=%x val=%x", __get_str(name), __entry->reg, __entry->val)
);
DEFINE_EVENT(regmap_reg, regmap_reg_write,
@@ -43,7 +41,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_write,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read,
@@ -52,7 +49,6 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read,
unsigned int val),
TP_ARGS(map, reg, val)
-
);
DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
@@ -61,7 +57,47 @@ DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
unsigned int val),
TP_ARGS(map, reg, val)
+);
+
+DECLARE_EVENT_CLASS(regmap_bulk,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len),
+
+ TP_STRUCT__entry(
+ __string(name, regmap_name(map))
+ __field(unsigned int, reg)
+ __dynamic_array(char, buf, val_len)
+ __field(int, val_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->val_len = val_len;
+ memcpy(__get_dynamic_array(buf), val, val_len);
+ ),
+ TP_printk("%s reg=%x val=%s", __get_str(name), __entry->reg,
+ __print_hex(__get_dynamic_array(buf), __entry->val_len))
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_write,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_read,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
);
DECLARE_EVENT_CLASS(regmap_block,
@@ -82,9 +118,7 @@ DECLARE_EVENT_CLASS(regmap_block,
__entry->count = count;
),
- TP_printk("%s reg=%x count=%d", __get_str(name),
- (unsigned int)__entry->reg,
- (int)__entry->count)
+ TP_printk("%s reg=%x count=%d", __get_str(name), __entry->reg, __entry->count)
);
DEFINE_EVENT(regmap_block, regmap_hw_read_start,
@@ -154,8 +188,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
__entry->flag = flag;
),
- TP_printk("%s flag=%d", __get_str(name),
- (int)__entry->flag)
+ TP_printk("%s flag=%d", __get_str(name), __entry->flag)
);
DEFINE_EVENT(regmap_bool, regmap_cache_only,
@@ -163,7 +196,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_only,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
@@ -171,7 +203,6 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
TP_PROTO(struct regmap *map, bool flag),
TP_ARGS(map, flag)
-
);
DECLARE_EVENT_CLASS(regmap_async,
@@ -203,7 +234,6 @@ DEFINE_EVENT(regmap_async, regmap_async_io_complete,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_start,
@@ -211,7 +241,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_start,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
DEFINE_EVENT(regmap_async, regmap_async_complete_done,
@@ -219,7 +248,6 @@ DEFINE_EVENT(regmap_async, regmap_async_complete_done,
TP_PROTO(struct regmap *map),
TP_ARGS(map)
-
);
TRACE_EVENT(regcache_drop_region,
@@ -241,8 +269,7 @@ TRACE_EVENT(regcache_drop_region,
__entry->to = to;
),
- TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from,
- (unsigned int)__entry->to)
+ TP_printk("%s %u-%u", __get_str(name), __entry->from, __entry->to)
);
#endif /* _TRACE_REGMAP_H */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 12aca34e8db0..4f01e6b17bb9 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -30,7 +30,7 @@ enum bcma_boot_dev {
BCMA_BOOT_DEV_NAND,
};
-/* The 47162a0 hangs when reading MIPS DMP registers registers */
+/* The 47162a0 hangs when reading MIPS DMP registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 12b3ca8f6f4a..128722cf6c3c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -108,7 +108,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
-static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
+static int aoe_debugfs_show(struct seq_file *s, void *ignored)
{
struct aoedev *d;
struct aoetgt **t, **te;
@@ -151,11 +151,7 @@ static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
return 0;
}
-
-static int aoe_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, aoedisk_debugfs_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(aoe_debugfs);
static DEVICE_ATTR(state, 0444, aoedisk_show_state, NULL);
static DEVICE_ATTR(mac, 0444, aoedisk_show_mac, NULL);
@@ -184,13 +180,6 @@ static const struct attribute_group *aoe_attr_groups[] = {
NULL,
};
-static const struct file_operations aoe_debugfs_fops = {
- .open = aoe_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void
aoedisk_add_debugfs(struct aoedev *d)
{
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 859499cd1ff8..20acc4a1fd6d 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -397,7 +397,7 @@ static int brd_alloc(int i)
disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
- strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
+ strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
/*
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f15f2f041596..4d661282ff41 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1529,7 +1529,6 @@ extern int w_send_read_req(struct drbd_work *, int);
extern int w_e_reissue(struct drbd_work *, int);
extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
-extern int w_start_resync(struct drbd_work *, int);
extern void resync_timer_fn(struct timer_list *t);
extern void start_resync_timer_fn(struct timer_list *t);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 013d355a2033..864c98e74875 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -4752,7 +4752,7 @@ void notify_helper(enum drbd_notification_type type,
struct drbd_genlmsghdr *dh;
int err;
- strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
+ strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
helper_info.helper_status = status;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index af4c7d65490b..c897c4572036 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2113,9 +2113,6 @@ static int receive_DataReply(struct drbd_connection *connection, struct packet_i
if (unlikely(!req))
return -EIO;
- /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
- * special casing it there for the various failure cases.
- * still no race with drbd_fail_pending_reads */
err = recv_dless_read(peer_device, req, sector, pi->size);
if (!err)
req_mod(req, DATA_RECEIVED);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 511f39a08de4..6237fa1dcb0e 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -266,8 +266,6 @@ struct bio_and_error {
extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref);
-extern void _req_may_be_done(struct drbd_request *req,
- struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_device *device,
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 562725d222a7..815d77ba6381 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1397,15 +1397,15 @@ static void mtip_dump_identify(struct mtip_port *port)
if (!port->identify_valid)
return;
- strlcpy(cbuf, (char *)(port->identify+10), 21);
+ strscpy(cbuf, (char *)(port->identify + 10), 21);
dev_info(&port->dd->pdev->dev,
"Serial No.: %s\n", cbuf);
- strlcpy(cbuf, (char *)(port->identify+23), 9);
+ strscpy(cbuf, (char *)(port->identify + 23), 9);
dev_info(&port->dd->pdev->dev,
"Firmware Ver.: %s\n", cbuf);
- strlcpy(cbuf, (char *)(port->identify+27), 41);
+ strscpy(cbuf, (char *)(port->identify + 27), 41);
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
@@ -1421,13 +1421,13 @@ static void mtip_dump_identify(struct mtip_port *port)
pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
switch (revid & 0xFF) {
case 0x1:
- strlcpy(cbuf, "A0", 3);
+ strscpy(cbuf, "A0", 3);
break;
case 0x3:
- strlcpy(cbuf, "A2", 3);
+ strscpy(cbuf, "A2", 3);
break;
default:
- strlcpy(cbuf, "?", 2);
+ strscpy(cbuf, "?", 2);
break;
}
dev_info(&port->dd->pdev->dev,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2a709daefbc4..5cffd96ef2d7 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -157,8 +157,6 @@ static struct dentry *nbd_dbg_dir;
#define nbd_name(nbd) ((nbd)->disk->disk_name)
-#define NBD_MAGIC 0x68797548
-
#define NBD_DEF_BLKSIZE_BITS 10
static unsigned int nbds_max = 16;
@@ -1413,10 +1411,12 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
- if (ret)
+ if (ret) {
sock_shutdown(nbd);
- flush_workqueue(nbd->recv_workq);
+ nbd_clear_que(nbd);
+ }
+ flush_workqueue(nbd->recv_workq);
mutex_lock(&nbd->config_lock);
nbd_bdev_reset(nbd);
/* user requested, ignore socket errors */
@@ -2322,6 +2322,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = nbd_connect_genl_ops,
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .resv_start_op = NBD_CMD_STATUS + 1,
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index c451c477978f..1f154f92f4c2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1528,7 +1528,7 @@ static bool should_requeue_request(struct request *rq)
return false;
}
-static int null_map_queues(struct blk_mq_tag_set *set)
+static void null_map_queues(struct blk_mq_tag_set *set)
{
struct nullb *nullb = set->driver_data;
int i, qoff;
@@ -1555,7 +1555,9 @@ static int null_map_queues(struct blk_mq_tag_set *set)
} else {
pr_warn("tag set has unexpected nr_hw_queues: %d\n",
set->nr_hw_queues);
- return -EINVAL;
+ WARN_ON_ONCE(true);
+ submit_queues = 1;
+ poll_queues = 0;
}
}
@@ -1577,8 +1579,6 @@ static int null_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
blk_mq_map_queues(map);
}
-
- return 0;
}
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index e1d080f680ed..c76e0148eada 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -745,7 +745,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->flags |= GENHD_FL_NO_PART;
gendisk->fops = &ps3vram_fops;
gendisk->private_data = dev;
- strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
+ strscpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
blk_queue_max_segments(gendisk->queue, BLK_MAX_SEGMENTS);
blk_queue_max_segment_size(gendisk->queue, BLK_MAX_SEGMENT_SIZE);
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
index 5bb1a7ad1ada..40b31630822c 100644
--- a/drivers/block/rnbd/Makefile
+++ b/drivers/block/rnbd/Makefile
@@ -6,10 +6,12 @@ rnbd-client-y := rnbd-clt.o \
rnbd-clt-sysfs.o \
rnbd-common.o
+CFLAGS_rnbd-srv-trace.o = -I$(src)
+
rnbd-server-y := rnbd-common.o \
rnbd-srv.o \
- rnbd-srv-dev.o \
- rnbd-srv-sysfs.o
+ rnbd-srv-sysfs.o \
+ rnbd-srv-trace.o
obj-$(CONFIG_BLK_DEV_RNBD_CLIENT) += rnbd-client.o
obj-$(CONFIG_BLK_DEV_RNBD_SERVER) += rnbd-server.o
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 04da33a22ef4..78334da74d8b 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1159,13 +1159,11 @@ static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
- int cnt;
- cnt = rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
- return cnt;
+ return rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
}
-static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
+static void rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct rnbd_clt_session *sess = set->driver_data;
@@ -1194,8 +1192,6 @@ static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
set->map[HCTX_TYPE_DEFAULT].nr_queues,
set->map[HCTX_TYPE_READ].nr_queues);
}
-
- return 0;
}
static struct blk_mq_ops rnbd_mq_ops = {
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
deleted file mode 100644
index c63017f6e421..000000000000
--- a/drivers/block/rnbd/rnbd-srv-dev.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RDMA Network Block Driver
- *
- * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
- * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
- * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
- */
-#undef pr_fmt
-#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
-
-#include "rnbd-srv-dev.h"
-#include "rnbd-log.h"
-
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags)
-{
- struct rnbd_dev *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- dev->blk_open_flags = flags;
- dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(dev->bdev);
- if (ret)
- goto err;
-
- dev->blk_open_flags = flags;
-
- return dev;
-
-err:
- kfree(dev);
- return ERR_PTR(ret);
-}
-
-void rnbd_dev_close(struct rnbd_dev *dev)
-{
- blkdev_put(dev->bdev, dev->blk_open_flags);
- kfree(dev);
-}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
deleted file mode 100644
index 8407d12f70af..000000000000
--- a/drivers/block/rnbd/rnbd-srv-dev.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * RDMA Network Block Driver
- *
- * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
- * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
- * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
- */
-#ifndef RNBD_SRV_DEV_H
-#define RNBD_SRV_DEV_H
-
-#include <linux/fs.h>
-#include "rnbd-proto.h"
-
-struct rnbd_dev {
- struct block_device *bdev;
- fmode_t blk_open_flags;
-};
-
-/**
- * rnbd_dev_open() - Open a device
- * @path: path to open
- * @flags: open flags
- */
-struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags);
-
-/**
- * rnbd_dev_close() - Close a device
- */
-void rnbd_dev_close(struct rnbd_dev *dev);
-
-void rnbd_endio(void *priv, int error);
-
-static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
-{
- return queue_max_segments(bdev_get_queue(dev->bdev));
-}
-
-static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
-{
- return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
-}
-
-static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
-{
- return bdev_max_secure_erase_sectors(dev->bdev);
-}
-
-static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
-{
- return bdev_max_discard_sectors(dev->bdev);
-}
-
-static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
-{
- return bdev_get_queue(dev->bdev)->limits.discard_granularity;
-}
-
-static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
-{
- return bdev_discard_alignment(dev->bdev);
-}
-
-#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv-trace.c b/drivers/block/rnbd/rnbd-srv-trace.c
new file mode 100644
index 000000000000..30f0895c18f5
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-trace.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-srv.h"
+#include "rnbd-srv.h"
+#include "rnbd-proto.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rnbd-srv-trace.h"
diff --git a/drivers/block/rnbd/rnbd-srv-trace.h b/drivers/block/rnbd/rnbd-srv-trace.h
new file mode 100644
index 000000000000..8dedf73bdd28
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-trace.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rnbd_srv
+
+#if !defined(_TRACE_RNBD_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RNBD_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rnbd_srv_session;
+struct rtrs_srv_op;
+
+DECLARE_EVENT_CLASS(rnbd_srv_link_class,
+ TP_PROTO(struct rnbd_srv_session *srv),
+
+ TP_ARGS(srv),
+
+ TP_STRUCT__entry(
+ __field(int, qdepth)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->qdepth = srv->queue_depth;
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("sessname: %s qdepth: %d",
+ __get_str(sessname),
+ __entry->qdepth
+ )
+);
+
+#define DEFINE_LINK_EVENT(name) \
+DEFINE_EVENT(rnbd_srv_link_class, name, \
+ TP_PROTO(struct rnbd_srv_session *srv), \
+ TP_ARGS(srv))
+
+DEFINE_LINK_EVENT(create_sess);
+DEFINE_LINK_EVENT(destroy_sess);
+
+TRACE_DEFINE_ENUM(RNBD_OP_READ);
+TRACE_DEFINE_ENUM(RNBD_OP_WRITE);
+TRACE_DEFINE_ENUM(RNBD_OP_FLUSH);
+TRACE_DEFINE_ENUM(RNBD_OP_DISCARD);
+TRACE_DEFINE_ENUM(RNBD_OP_SECURE_ERASE);
+TRACE_DEFINE_ENUM(RNBD_F_SYNC);
+TRACE_DEFINE_ENUM(RNBD_F_FUA);
+
+#define show_rnbd_rw_flags(x) \
+ __print_flags(x, "|", \
+ { RNBD_OP_READ, "READ" }, \
+ { RNBD_OP_WRITE, "WRITE" }, \
+ { RNBD_OP_FLUSH, "FLUSH" }, \
+ { RNBD_OP_DISCARD, "DISCARD" }, \
+ { RNBD_OP_SECURE_ERASE, "SECURE_ERASE" }, \
+ { RNBD_F_SYNC, "SYNC" }, \
+ { RNBD_F_FUA, "FUA" })
+
+TRACE_EVENT(process_rdma,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_io *msg,
+ struct rtrs_srv_op *id,
+ u32 datalen,
+ size_t usrlen),
+
+ TP_ARGS(srv, msg, id, datalen, usrlen),
+
+ TP_STRUCT__entry(
+ __string(sessname, srv->sessname)
+ __field(u8, dir)
+ __field(u8, ver)
+ __field(u32, device_id)
+ __field(u64, sector)
+ __field(u32, flags)
+ __field(u32, bi_size)
+ __field(u16, ioprio)
+ __field(u32, datalen)
+ __field(size_t, usrlen)
+ ),
+
+ TP_fast_assign(
+ __assign_str(sessname, srv->sessname);
+ __entry->dir = id->dir;
+ __entry->ver = srv->ver;
+ __entry->device_id = le32_to_cpu(msg->device_id);
+ __entry->sector = le64_to_cpu(msg->sector);
+ __entry->bi_size = le32_to_cpu(msg->bi_size);
+ __entry->flags = le32_to_cpu(msg->rw);
+ __entry->ioprio = le16_to_cpu(msg->prio);
+ __entry->datalen = datalen;
+ __entry->usrlen = usrlen;
+ ),
+
+ TP_printk("I/O req: sess: %s, type: %s, ver: %d, devid: %u, sector: %llu, bsize: %u, flags: %s, ioprio: %d, datalen: %u, usrlen: %zu",
+ __get_str(sessname),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->ver,
+ __entry->device_id,
+ __entry->sector,
+ __entry->bi_size,
+ show_rnbd_rw_flags(__entry->flags),
+ __entry->ioprio,
+ __entry->datalen,
+ __entry->usrlen
+ )
+);
+
+TRACE_EVENT(process_msg_sess_info,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_sess_info *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u8, proto_ver)
+ __field(u8, clt_ver)
+ __field(u8, srv_ver)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->proto_ver = srv->ver;
+ __entry->clt_ver = msg->ver;
+ __entry->srv_ver = RNBD_PROTO_VER_MAJOR;
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("Session %s using proto-ver %d (clt-ver: %d, srv-ver: %d)",
+ __get_str(sessname),
+ __entry->proto_ver,
+ __entry->clt_ver,
+ __entry->srv_ver
+ )
+);
+
+TRACE_DEFINE_ENUM(RNBD_ACCESS_RO);
+TRACE_DEFINE_ENUM(RNBD_ACCESS_RW);
+TRACE_DEFINE_ENUM(RNBD_ACCESS_MIGRATION);
+
+#define show_rnbd_access_mode(x) \
+ __print_symbolic(x, \
+ { RNBD_ACCESS_RO, "RO" }, \
+ { RNBD_ACCESS_RW, "RW" }, \
+ { RNBD_ACCESS_MIGRATION, "MIGRATION" })
+
+TRACE_EVENT(process_msg_open,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_open *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u8, access_mode)
+ __string(sessname, srv->sessname)
+ __string(dev_name, msg->dev_name)
+ ),
+
+ TP_fast_assign(
+ __entry->access_mode = msg->access_mode;
+ __assign_str(sessname, srv->sessname);
+ __assign_str(dev_name, msg->dev_name);
+ ),
+
+ TP_printk("Open message received: session='%s' path='%s' access_mode=%s",
+ __get_str(sessname),
+ __get_str(dev_name),
+ show_rnbd_access_mode(__entry->access_mode)
+ )
+);
+
+TRACE_EVENT(process_msg_close,
+ TP_PROTO(struct rnbd_srv_session *srv,
+ const struct rnbd_msg_close *msg),
+
+ TP_ARGS(srv, msg),
+
+ TP_STRUCT__entry(
+ __field(u32, device_id)
+ __string(sessname, srv->sessname)
+ ),
+
+ TP_fast_assign(
+ __entry->device_id = le32_to_cpu(msg->device_id);
+ __assign_str(sessname, srv->sessname);
+ ),
+
+ TP_printk("Close message received: session='%s' device id='%d'",
+ __get_str(sessname),
+ __entry->device_id
+ )
+);
+
+#endif /* _TRACE_RNBD_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rnbd-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 5e08da277ddf..2cfed2e58d64 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -13,7 +13,7 @@
#include <linux/blkdev.h>
#include "rnbd-srv.h"
-#include "rnbd-srv-dev.h"
+#include "rnbd-srv-trace.h"
MODULE_DESCRIPTION("RDMA Network Block Device Server");
MODULE_LICENSE("GPL");
@@ -84,18 +84,6 @@ static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
kref_put(&sess_dev->kref, rnbd_sess_dev_release);
}
-void rnbd_endio(void *priv, int error)
-{
- struct rnbd_io_private *rnbd_priv = priv;
- struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
-
- rnbd_put_sess_dev(sess_dev);
-
- rtrs_srv_resp_rdma(rnbd_priv->id, error);
-
- kfree(priv);
-}
-
static struct rnbd_srv_sess_dev *
rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
{
@@ -116,7 +104,13 @@ rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
static void rnbd_dev_bi_end_io(struct bio *bio)
{
- rnbd_endio(bio->bi_private, blk_status_to_errno(bio->bi_status));
+ struct rnbd_io_private *rnbd_priv = bio->bi_private;
+ struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
+
+ rnbd_put_sess_dev(sess_dev);
+ rtrs_srv_resp_rdma(rnbd_priv->id, blk_status_to_errno(bio->bi_status));
+
+ kfree(rnbd_priv);
bio_put(bio);
}
@@ -132,6 +126,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
struct bio *bio;
short prio;
+ trace_process_rdma(srv_sess, msg, id, datalen, usrlen);
+
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -149,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
+ bio = bio_alloc(sess_dev->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -223,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- rnbd_dev_close(sess_dev->rnbd_dev);
+ blkdev_put(sess_dev->bdev, sess_dev->open_flags);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (sess_dev->open_flags & FMODE_WRITE)
@@ -244,6 +240,8 @@ static void destroy_sess(struct rnbd_srv_session *srv_sess)
if (xa_empty(&srv_sess->index_idr))
goto out;
+ trace_destroy_sess(srv_sess);
+
mutex_lock(&srv_sess->lock);
xa_for_each(&srv_sess->index_idr, index, sess_dev)
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
@@ -290,6 +288,8 @@ static int create_sess(struct rtrs_srv_sess *rtrs)
rtrs_srv_set_sess_priv(rtrs, srv_sess);
+ trace_create_sess(srv_sess);
+
return 0;
}
@@ -332,23 +332,24 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev,
mutex_unlock(&sess->lock);
}
-static int process_msg_close(struct rnbd_srv_session *srv_sess,
+static void process_msg_close(struct rnbd_srv_session *srv_sess,
void *data, size_t datalen, const void *usr,
size_t usrlen)
{
const struct rnbd_msg_close *close_msg = usr;
struct rnbd_srv_sess_dev *sess_dev;
+ trace_process_msg_close(srv_sess, close_msg);
+
sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
srv_sess);
if (IS_ERR(sess_dev))
- return 0;
+ return;
rnbd_put_sess_dev(sess_dev);
mutex_lock(&srv_sess->lock);
rnbd_srv_destroy_dev_session_sysfs(sess_dev);
mutex_unlock(&srv_sess->lock);
- return 0;
}
static int process_msg_open(struct rnbd_srv_session *srv_sess,
@@ -359,10 +360,9 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
const void *msg, size_t len,
void *data, size_t datalen);
-static int rnbd_srv_rdma_ev(void *priv,
- struct rtrs_srv_op *id, int dir,
- void *data, size_t datalen, const void *usr,
- size_t usrlen)
+static int rnbd_srv_rdma_ev(void *priv, struct rtrs_srv_op *id,
+ void *data, size_t datalen,
+ const void *usr, size_t usrlen)
{
struct rnbd_srv_session *srv_sess = priv;
const struct rnbd_msg_hdr *hdr = usr;
@@ -378,7 +378,7 @@ static int rnbd_srv_rdma_ev(void *priv,
case RNBD_MSG_IO:
return process_rdma(srv_sess, id, data, datalen, usr, usrlen);
case RNBD_MSG_CLOSE:
- ret = process_msg_close(srv_sess, data, datalen, usr, usrlen);
+ process_msg_close(srv_sess, data, datalen, usr, usrlen);
break;
case RNBD_MSG_OPEN:
ret = process_msg_open(srv_sess, usr, usrlen, data, datalen);
@@ -388,11 +388,16 @@ static int rnbd_srv_rdma_ev(void *priv,
datalen);
break;
default:
- pr_warn("Received unexpected message type %d with dir %d from session %s\n",
- type, dir, srv_sess->sessname);
+ pr_warn("Received unexpected message type %d from session %s\n",
+ type, srv_sess->sessname);
return -EINVAL;
}
+ /*
+ * Since ret is passed to rtrs to handle the failure case, we
+ * just return 0 at the end otherwise callers in rtrs would call
+ * send_io_resp_imm again to print redundant err message.
+ */
rtrs_srv_resp_rdma(id, ret);
return 0;
}
@@ -504,14 +509,14 @@ static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
}
static struct rnbd_srv_dev *
-rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
+rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
struct rnbd_srv_session *srv_sess,
enum rnbd_access_mode access_mode)
{
int ret;
struct rnbd_srv_dev *new_dev, *dev;
- new_dev = rnbd_srv_init_srv_dev(rnbd_dev->bdev);
+ new_dev = rnbd_srv_init_srv_dev(bdev);
if (IS_ERR(new_dev))
return new_dev;
@@ -531,41 +536,32 @@ rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+ struct block_device *bdev = sess_dev->bdev;
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
- rsp->device_id =
- cpu_to_le32(sess_dev->device_id);
- rsp->nsectors =
- cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk));
- rsp->logical_block_size =
- cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev));
- rsp->physical_block_size =
- cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev));
- rsp->max_segments =
- cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
+ rsp->device_id = cpu_to_le32(sess_dev->device_id);
+ rsp->nsectors = cpu_to_le64(bdev_nr_sectors(bdev));
+ rsp->logical_block_size = cpu_to_le16(bdev_logical_block_size(bdev));
+ rsp->physical_block_size = cpu_to_le16(bdev_physical_block_size(bdev));
+ rsp->max_segments = cpu_to_le16(bdev_max_segments(bdev));
rsp->max_hw_sectors =
- cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
+ cpu_to_le32(queue_max_hw_sectors(bdev_get_queue(bdev)));
rsp->max_write_same_sectors = 0;
- rsp->max_discard_sectors =
- cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
- rsp->discard_granularity =
- cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
- rsp->discard_alignment =
- cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
- rsp->secure_discard =
- cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->max_discard_sectors = cpu_to_le32(bdev_max_discard_sectors(bdev));
+ rsp->discard_granularity = cpu_to_le32(bdev_discard_granularity(bdev));
+ rsp->discard_alignment = cpu_to_le32(bdev_discard_alignment(bdev));
+ rsp->secure_discard = cpu_to_le16(bdev_max_secure_erase_sectors(bdev));
rsp->cache_policy = 0;
- if (bdev_write_cache(rnbd_dev->bdev))
+ if (bdev_write_cache(bdev))
rsp->cache_policy |= RNBD_WRITEBACK;
- if (bdev_fua(rnbd_dev->bdev))
+ if (bdev_fua(bdev))
rsp->cache_policy |= RNBD_FUA;
}
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct rnbd_dev *rnbd_dev, fmode_t open_flags,
+ struct block_device *bdev, fmode_t open_flags,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -577,7 +573,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->rnbd_dev = rnbd_dev;
+ sdev->bdev = bdev;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->open_flags = open_flags;
@@ -643,9 +639,8 @@ static int process_msg_sess_info(struct rnbd_srv_session *srv_sess,
struct rnbd_msg_sess_info_rsp *rsp = data;
srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
- pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
- srv_sess->sessname, srv_sess->ver,
- sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+
+ trace_process_msg_sess_info(srv_sess, sess_info_msg);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
rsp->ver = srv_sess->ver;
@@ -685,14 +680,13 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
+ struct block_device *bdev;
fmode_t open_flags;
char *full_path;
- struct rnbd_dev *rnbd_dev;
struct rnbd_msg_open_rsp *rsp = data;
- pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
- srv_sess->sessname, open_msg->dev_name,
- open_msg->access_mode);
+ trace_process_msg_open(srv_sess, open_msg);
+
open_flags = FMODE_READ;
if (open_msg->access_mode != RNBD_ACCESS_RO)
open_flags |= FMODE_WRITE;
@@ -725,25 +719,25 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- rnbd_dev = rnbd_dev_open(full_path, open_flags);
- if (IS_ERR(rnbd_dev)) {
- pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
- full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
- ret = PTR_ERR(rnbd_dev);
+ bdev = blkdev_get_by_path(full_path, open_flags, THIS_MODULE);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
+ full_path, srv_sess->sessname, ret);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(rnbd_dev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
full_path, srv_sess->sessname, PTR_ERR(srv_dev));
ret = PTR_ERR(srv_dev);
- goto rnbd_dev_close;
+ goto blkdev_put;
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
- rnbd_dev, open_flags,
+ bdev, open_flags,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
@@ -758,7 +752,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -800,8 +794,8 @@ srv_dev_put:
mutex_unlock(&srv_dev->lock);
}
rnbd_put_srv_dev(srv_dev);
-rnbd_dev_close:
- rnbd_dev_close(rnbd_dev);
+blkdev_put:
+ blkdev_put(bdev, open_flags);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 081bceaf4ae9..f5962fd31d62 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct rnbd_dev *rnbd_dev;
+ struct block_device *bdev;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6a4a94b4cdf4..2651bf41dde3 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -49,7 +49,9 @@
/* All UBLK_F_* have to be included into UBLK_F_ALL */
#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
| UBLK_F_URING_CMD_COMP_IN_TASK \
- | UBLK_F_NEED_GET_DATA)
+ | UBLK_F_NEED_GET_DATA \
+ | UBLK_F_USER_RECOVERY \
+ | UBLK_F_USER_RECOVERY_REISSUE)
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
@@ -119,7 +121,7 @@ struct ublk_queue {
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
- bool abort_work_pending;
+ bool force_abort;
unsigned short nr_io_ready; /* how many ios setup */
struct ublk_device *dev;
struct ublk_io ios[0];
@@ -161,6 +163,7 @@ struct ublk_device {
* monitor each queue's daemon periodically
*/
struct delayed_work monitor_work;
+ struct work_struct quiesce_work;
struct work_struct stop_work;
};
@@ -323,6 +326,30 @@ static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
PAGE_SIZE);
}
+static inline bool ublk_queue_can_use_recovery_reissue(
+ struct ublk_queue *ubq)
+{
+ if ((ubq->flags & UBLK_F_USER_RECOVERY) &&
+ (ubq->flags & UBLK_F_USER_RECOVERY_REISSUE))
+ return true;
+ return false;
+}
+
+static inline bool ublk_queue_can_use_recovery(
+ struct ublk_queue *ubq)
+{
+ if (ubq->flags & UBLK_F_USER_RECOVERY)
+ return true;
+ return false;
+}
+
+static inline bool ublk_can_use_recovery(struct ublk_device *ub)
+{
+ if (ub->dev_info.flags & UBLK_F_USER_RECOVERY)
+ return true;
+ return false;
+}
+
static void ublk_free_disk(struct gendisk *disk)
{
struct ublk_device *ub = disk->private_data;
@@ -612,13 +639,17 @@ static void ublk_complete_rq(struct request *req)
* Also aborting may not be started yet, keep in mind that one failed
* request may be issued by block layer again.
*/
-static void __ublk_fail_req(struct ublk_io *io, struct request *req)
+static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
io->flags |= UBLK_IO_FLAG_ABORTED;
- blk_mq_end_request(req, BLK_STS_IOERR);
+ if (ublk_queue_can_use_recovery_reissue(ubq))
+ blk_mq_requeue_request(req, false);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
}
}
@@ -639,22 +670,40 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
#define UBLK_REQUEUE_DELAY_MS 3
+static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ struct request *rq)
+{
+ /* We cannot process this rq so just requeue it. */
+ if (ublk_queue_can_use_recovery(ubq))
+ blk_mq_requeue_request(rq, false);
+ else
+ blk_mq_end_request(rq, BLK_STS_IOERR);
+
+ mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+}
+
static inline void __ublk_rq_task_work(struct request *req)
{
struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_device *ub = ubq->dev;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
- bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
unsigned int mapped_bytes;
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
- if (unlikely(task_exiting)) {
- blk_mq_end_request(req, BLK_STS_IOERR);
- mod_delayed_work(system_wq, &ub->monitor_work, 0);
+ /*
+ * Task is exiting if either:
+ *
+ * (1) current != ubq_daemon.
+ * io_uring_cmd_complete_in_task() tries to run task_work
+ * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
+ *
+ * (2) current->flags & PF_EXITING.
+ */
+ if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
+ __ublk_abort_rq(ubq, req);
return;
}
@@ -739,13 +788,24 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
res = ublk_setup_iod(ubq, rq);
if (unlikely(res != BLK_STS_OK))
return BLK_STS_IOERR;
+ /* With recovery feature enabled, force_abort is set in
+ * ublk_stop_dev() before calling del_gendisk(). We have to
+ * abort all requeued and new rqs here to let del_gendisk()
+ * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
+ * to avoid UAF on io_uring ctx.
+ *
+ * Note: force_abort is guaranteed to be seen because it is set
+ * before request queue is unqiuesced.
+ */
+ if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
+ return BLK_STS_IOERR;
blk_mq_start_request(bd->rq);
if (unlikely(ubq_daemon_is_dying(ubq))) {
fail:
- mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
- return BLK_STS_IOERR;
+ __ublk_abort_rq(ubq, rq);
+ return BLK_STS_OK;
}
if (ublk_can_use_task_work(ubq)) {
@@ -916,7 +976,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
if (rq)
- __ublk_fail_req(io, rq);
+ __ublk_fail_req(ubq, io, rq);
}
}
ublk_put_device(ub);
@@ -932,7 +992,10 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
struct ublk_queue *ubq = ublk_get_queue(ub, i);
if (ubq_daemon_is_dying(ubq)) {
- schedule_work(&ub->stop_work);
+ if (ublk_queue_can_use_recovery(ubq))
+ schedule_work(&ub->quiesce_work);
+ else
+ schedule_work(&ub->stop_work);
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
@@ -940,12 +1003,13 @@ static void ublk_daemon_monitor_work(struct work_struct *work)
}
/*
- * We can't schedule monitor work after ublk_remove() is started.
+ * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
+ * after ublk_remove() or __ublk_quiesce_dev() is started.
*
* No need ub->mutex, monitor work are canceled after state is marked
- * as DEAD, so DEAD state is observed reliably.
+ * as not LIVE, so new state is observed reliably.
*/
- if (ub->dev_info.state != UBLK_S_DEV_DEAD)
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
schedule_delayed_work(&ub->monitor_work,
UBLK_DAEMON_MONITOR_PERIOD);
}
@@ -982,12 +1046,97 @@ static void ublk_cancel_dev(struct ublk_device *ub)
ublk_cancel_queue(ublk_get_queue(ub, i));
}
-static void ublk_stop_dev(struct ublk_device *ub)
+static bool ublk_check_inflight_rq(struct request *rq, void *data)
+{
+ bool *idle = data;
+
+ if (blk_mq_request_started(rq)) {
+ *idle = false;
+ return false;
+ }
+ return true;
+}
+
+static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
{
+ bool idle;
+
+ WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
+ while (true) {
+ idle = true;
+ blk_mq_tagset_busy_iter(&ub->tag_set,
+ ublk_check_inflight_rq, &idle);
+ if (idle)
+ break;
+ msleep(UBLK_REQUEUE_DELAY_MS);
+ }
+}
+
+static void __ublk_quiesce_dev(struct ublk_device *ub)
+{
+ pr_devel("%s: quiesce ub: dev_id %d state %s\n",
+ __func__, ub->dev_info.dev_id,
+ ub->dev_info.state == UBLK_S_DEV_LIVE ?
+ "LIVE" : "QUIESCED");
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+ ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+ * already scheduled quiesce_work and quiesced all ubqs.
+ *
+ * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
+ * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
+ */
+ cancel_delayed_work_sync(&ub->monitor_work);
+}
+
+static void ublk_quiesce_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, quiesce_work);
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state != UBLK_S_DEV_LIVE)
goto unlock;
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
+}
+
+static void ublk_unquiesce_dev(struct ublk_device *ub)
+{
+ int i;
+
+ pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
+ __func__, ub->dev_info.dev_id,
+ ub->dev_info.state == UBLK_S_DEV_LIVE ?
+ "LIVE" : "QUIESCED");
+ /* quiesce_work has run. We let requeued rqs be aborted
+ * before running fallback_wq. "force_abort" must be seen
+ * after request queue is unqiuesced. Then del_gendisk()
+ * can move on.
+ */
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->force_abort = true;
+
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ /* We may have requeued some rqs in ublk_quiesce_queue() */
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+}
+static void ublk_stop_dev(struct ublk_device *ub)
+{
+ mutex_lock(&ub->mutex);
+ if (ub->dev_info.state == UBLK_S_DEV_DEAD)
+ goto unlock;
+ if (ublk_can_use_recovery(ub)) {
+ if (ub->dev_info.state == UBLK_S_DEV_LIVE)
+ __ublk_quiesce_dev(ub);
+ ublk_unquiesce_dev(ub);
+ }
del_gendisk(ub->ub_disk);
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
@@ -1311,6 +1460,7 @@ static void ublk_remove(struct ublk_device *ub)
{
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
+ cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
put_device(&ub->cdev_dev);
}
@@ -1487,6 +1637,7 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->mm_lock);
+ INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
@@ -1607,6 +1758,7 @@ static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
+ cancel_work_sync(&ub->quiesce_work);
ublk_put_device(ub);
return 0;
@@ -1709,6 +1861,116 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
return ret;
}
+static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+{
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
+ /* All old ioucmds have to be completed */
+ WARN_ON_ONCE(ubq->nr_io_ready);
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+ ubq->ubq_daemon = NULL;
+
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+ /* forget everything now and be ready for new FETCH_REQ */
+ io->flags = 0;
+ io->cmd = NULL;
+ io->addr = 0;
+ }
+}
+
+static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+ int i;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return ret;
+
+ mutex_lock(&ub->mutex);
+ if (!ublk_can_use_recovery(ub))
+ goto out_unlock;
+ /*
+ * START_RECOVERY is only allowd after:
+ *
+ * (1) UB_STATE_OPEN is not set, which means the dying process is exited
+ * and related io_uring ctx is freed so file struct of /dev/ublkcX is
+ * released.
+ *
+ * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
+ * (a)has quiesced request queue
+ * (b)has requeued every inflight rqs whose io_flags is ACTIVE
+ * (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
+ * (d)has completed/camceled all ioucmds owned by ther dying process
+ */
+ if (test_bit(UB_STATE_OPEN, &ub->state) ||
+ ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_queue_reinit(ub, ublk_get_queue(ub, i));
+ /* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ ub->mm = NULL;
+ ub->nr_queues_ready = 0;
+ init_completion(&ub->completion);
+ ret = 0;
+ out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
+static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ int ublksrv_pid = (int)header->data[0];
+ struct ublk_device *ub;
+ int ret = -EINVAL;
+
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ return ret;
+
+ pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
+ __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+ /* wait until new ubq_daemon sending all FETCH_REQ */
+ wait_for_completion_interruptible(&ub->completion);
+ pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
+ __func__, ub->dev_info.nr_hw_queues, header->dev_id);
+
+ mutex_lock(&ub->mutex);
+ if (!ublk_can_use_recovery(ub))
+ goto out_unlock;
+
+ if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ub->dev_info.ublksrv_pid = ublksrv_pid;
+ pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
+ __func__, ublksrv_pid, header->dev_id);
+ blk_mq_unquiesce_queue(ub->ub_disk->queue);
+ pr_devel("%s: queue unquiesced, dev id %d.\n",
+ __func__, header->dev_id);
+ blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ ub->dev_info.state = UBLK_S_DEV_LIVE;
+ schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
+ ret = 0;
+ out_unlock:
+ mutex_unlock(&ub->mutex);
+ ublk_put_device(ub);
+ return ret;
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -1750,6 +2012,12 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
case UBLK_CMD_SET_PARAMS:
ret = ublk_ctrl_set_params(cmd);
break;
+ case UBLK_CMD_START_USER_RECOVERY:
+ ret = ublk_ctrl_start_recovery(cmd);
+ break;
+ case UBLK_CMD_END_USER_RECOVERY:
+ ret = ublk_ctrl_end_recovery(cmd);
+ break;
default:
break;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30255fcaf181..19da5defd734 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -130,7 +130,7 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
+static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
{
unsigned short segments = blk_rq_nr_discard_segments(req);
unsigned short n = 0;
@@ -240,6 +240,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
type = VIRTIO_BLK_T_WRITE_ZEROES;
unmap = !(req->cmd_flags & REQ_NOUNMAP);
break;
+ case REQ_OP_SECURE_ERASE:
+ type = VIRTIO_BLK_T_SECURE_ERASE;
+ break;
case REQ_OP_DRV_IN:
type = VIRTIO_BLK_T_GET_ID;
break;
@@ -251,8 +254,9 @@ static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
- if (virtblk_setup_discard_write_zeroes(req, unmap))
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
+ type == VIRTIO_BLK_T_SECURE_ERASE) {
+ if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
return BLK_STS_RESOURCE;
}
@@ -322,14 +326,14 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status))
return status;
- blk_mq_start_request(req);
-
vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
if (unlikely(vbr->sg_table.nents < 0)) {
virtblk_cleanup_cmd(req);
return BLK_STS_RESOURCE;
}
+ blk_mq_start_request(req);
+
return BLK_STS_OK;
}
@@ -391,8 +395,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
}
static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
- struct request **rqlist,
- struct request **requeue_list)
+ struct request **rqlist)
{
unsigned long flags;
int err;
@@ -408,7 +411,7 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
if (err) {
virtblk_unmap_data(req, vbr);
virtblk_cleanup_cmd(req);
- rq_list_add(requeue_list, req);
+ blk_mq_requeue_request(req, true);
}
}
@@ -436,7 +439,7 @@ static void virtio_queue_rqs(struct request **rqlist)
if (!next || req->mq_hctx != next->mq_hctx) {
req->rq_next = NULL;
- kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+ kick = virtblk_add_req_batch(vq, rqlist);
if (kick)
virtqueue_notify(vq->vq);
@@ -802,7 +805,7 @@ static const struct attribute_group *virtblk_attr_groups[] = {
NULL,
};
-static int virtblk_map_queues(struct blk_mq_tag_set *set)
+static void virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
int i, qoff;
@@ -827,8 +830,6 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
else
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
}
-
- return 0;
}
static void virtblk_complete_batch(struct io_comp_batch *iob)
@@ -889,6 +890,8 @@ static int virtblk_probe(struct virtio_device *vdev)
int err, index;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
+ u32 max_discard_segs = 0;
+ u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
unsigned int queue_depth;
@@ -1046,27 +1049,14 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
- discard_sector_alignment, &v);
- if (v)
- q->limits.discard_granularity = v << SECTOR_SHIFT;
- else
- q->limits.discard_granularity = blk_size;
+ discard_sector_alignment, &discard_granularity);
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
- &v);
-
- /*
- * max_discard_seg == 0 is out of spec but we always
- * handled it.
- */
- if (!v)
- v = sg_elems;
- blk_queue_max_discard_segments(q,
- min(v, MAX_DISCARD_SEGMENTS));
+ &max_discard_segs);
}
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
@@ -1075,6 +1065,85 @@ static int virtblk_probe(struct virtio_device *vdev)
blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
}
+ /* The discard and secure erase limits are combined since the Linux
+ * block layer uses the same limit for both commands.
+ *
+ * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
+ * are negotiated, we will use the minimum between the limits.
+ *
+ * discard sector alignment is set to the minimum between discard_sector_alignment
+ * and secure_erase_sector_alignment.
+ *
+ * max discard sectors is set to the minimum between max_discard_seg and
+ * max_secure_erase_seg.
+ */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ secure_erase_sector_alignment, &v);
+
+ /* secure_erase_sector_alignment should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: secure_erase_sector_alignment can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ discard_granularity = min_not_zero(discard_granularity, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_sectors, &v);
+
+ /* max_secure_erase_sectors should not be zero, the device should set a
+ * valid number of sectors.
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_sectors can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ blk_queue_max_secure_erase_sectors(q, v);
+
+ virtio_cread(vdev, struct virtio_blk_config,
+ max_secure_erase_seg, &v);
+
+ /* max_secure_erase_seg should not be zero, the device should set a
+ * valid number of segments
+ */
+ if (!v) {
+ dev_err(&vdev->dev,
+ "virtio_blk: max_secure_erase_seg can't be 0\n");
+ err = -EINVAL;
+ goto out_cleanup_disk;
+ }
+
+ max_discard_segs = min_not_zero(max_discard_segs, v);
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
+ virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
+ /* max_discard_seg and discard_granularity will be 0 only
+ * if max_discard_seg and discard_sector_alignment fields in the virtio
+ * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
+ * In this case, we use default values.
+ */
+ if (!max_discard_segs)
+ max_discard_segs = sg_elems;
+
+ blk_queue_max_discard_segments(q,
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+
+ if (discard_granularity)
+ q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ else
+ q->limits.discard_granularity = blk_size;
+ }
+
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
@@ -1170,6 +1239,7 @@ static unsigned int features_legacy[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
}
;
static unsigned int features[] = {
@@ -1177,6 +1247,7 @@ static unsigned int features[] = {
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
+ VIRTIO_BLK_F_SECURE_ERASE,
};
static struct virtio_driver virtio_blk = {
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index bda5c815e441..a28473470e66 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -226,6 +226,9 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ /* Connect-time cached feature_persistent parameter value */
+ unsigned int feature_gnt_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ee7ad2fb432d..c0227dfa4688 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -907,7 +907,7 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- be->blkif->vbd.feature_gnt_persistent);
+ be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1085,7 +1085,9 @@ static int connect_ring(struct backend_info *be)
return -ENOSYS;
}
- blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
+ blkif->vbd.feature_gnt_persistent =
+ blkif->vbd.feature_gnt_persistent_parm &&
xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8e56e69fb4c4..35b9bcad9db9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,9 @@ struct blkfront_info
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ /* Connect-time cached feature_persistent parameter */
+ unsigned int feature_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
@@ -1756,6 +1759,12 @@ abort_transaction:
return err;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1847,8 +1856,9 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- info->feature_persistent);
+ info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
return 0;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2281,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (feature_persistent)
+ if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 43eeef2b9fbe..7c74d8cba44f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -502,7 +502,7 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- strlcpy(file_name, buf, PATH_MAX);
+ strscpy(file_name, buf, PATH_MAX);
/* ignore trailing newline */
sz = strlen(file_name);
if (sz > 0 && file_name[sz - 1] == '\n')
@@ -1034,7 +1034,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
- strlcpy(compressor, buf, sizeof(compressor));
+ strscpy(compressor, buf, sizeof(compressor));
/* ignore trailing newline */
sz = strlen(compressor);
if (sz > 0 && compressor[sz - 1] == '\n')
@@ -1982,7 +1982,7 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
- strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
+ strscpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 818681c89db8..a657e9a3e96a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -449,6 +449,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x17: /* TyP */
case 0x18: /* Slr */
case 0x19: /* Slr-F */
+ case 0x1b: /* Mgr */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2330,6 +2331,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2439,15 +2441,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
+ if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
+ set_bit(HCI_QUIRK_VALID_LE_STATES,
+ &hdev->quirks);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
case 0x0b: /* SfP */
- case 0x0c: /* WsP */
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ fallthrough;
+ case 0x0c: /* WsP */
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
@@ -2455,11 +2462,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES,
- &hdev->quirks);
-
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2530,9 +2532,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ /* Set Valid LE States quirk */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2542,6 +2543,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 15caa6469538..271963805a38 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -426,6 +426,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK |
@@ -438,6 +440,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -466,6 +470,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -478,9 +485,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -516,19 +532,17 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
- /* Additional Realtek 8761B Bluetooth devices */
+ /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -2477,15 +2491,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ usb_autopm_put_interface(data->intf);
goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
if (err < 0)
goto err_free_wc;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f537673ede17..865112e96ff9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -493,6 +493,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
+ if (percpu_init_rwsem(&hu->proto_lock)) {
+ BT_ERR("Can't allocate semaphore structure");
+ kfree(hu);
+ return -ENOMEM;
+ }
tty->disc_data = hu;
hu->tty = tty;
@@ -505,8 +510,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
-
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index c0e5f42ec6b7..f16fd79bc02b 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -310,11 +310,12 @@ int hci_uart_register_device(struct hci_uart *hu,
serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
+ if (percpu_init_rwsem(&hu->proto_lock))
+ return -ENOMEM;
+
err = serdev_device_open(hu->serdev);
if (err)
- return err;
-
- percpu_init_rwsem(&hu->proto_lock);
+ goto err_rwsem;
err = p->open(hu);
if (err)
@@ -389,6 +390,8 @@ err_alloc:
p->close(hu);
err_open:
serdev_device_close(hu->serdev);
+err_rwsem:
+ percpu_free_rwsem(&hu->proto_lock);
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
@@ -410,5 +413,6 @@ void hci_uart_unregister_device(struct hci_uart *hu)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
+ percpu_free_rwsem(&hu->proto_lock);
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 2e564803e786..5b65a48f17e7 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -85,7 +85,7 @@ static int wait_lpc_idle(void __iomem *mbase, unsigned int waitcnt)
ndelay(LPC_NSEC_PERWAIT);
} while (--waitcnt);
- return -ETIME;
+ return -ETIMEDOUT;
}
/*
@@ -347,7 +347,7 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
unsigned long sys_port;
resource_size_t len = resource_size(res);
- sys_port = logic_pio_trans_hwaddr(&host->fwnode, res->start, len);
+ sys_port = logic_pio_trans_hwaddr(acpi_fwnode_handle(host), res->start, len);
if (sys_port == ~0UL)
return -EFAULT;
@@ -472,9 +472,7 @@ static int hisi_lpc_acpi_clear_enumerated(struct acpi_device *adev, void *not_us
struct hisi_lpc_acpi_cell {
const char *hid;
- const char *name;
- void *pdata;
- size_t pdata_size;
+ const struct platform_device_info *pdevinfo;
};
static void hisi_lpc_acpi_remove(struct device *hostdev)
@@ -505,28 +503,45 @@ static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
/* ipmi */
{
.hid = "IPI0001",
- .name = "hisi-lpc-ipmi",
+ .pdevinfo = (struct platform_device_info []) {
+ {
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "hisi-lpc-ipmi",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ },
+ },
},
/* 8250-compatible uart */
{
.hid = "HISI1031",
- .name = "serial8250",
- .pdata = (struct plat_serial8250_port []) {
+ .pdevinfo = (struct platform_device_info []) {
{
- .iobase = res->start,
- .uartclk = 1843200,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF,
+ .parent = hostdev,
+ .fwnode = acpi_fwnode_handle(child),
+ .name = "serial8250",
+ .id = PLATFORM_DEVID_AUTO,
+ .res = res,
+ .num_res = num_res,
+ .data = (struct plat_serial8250_port []) {
+ {
+ .iobase = res->start,
+ .uartclk = 1843200,
+ .iotype = UPIO_PORT,
+ .flags = UPF_BOOT_AUTOCONF,
+ },
+ {}
+ },
+ .size_data = 2 * sizeof(struct plat_serial8250_port),
},
- {}
},
- .pdata_size = 2 *
- sizeof(struct plat_serial8250_port),
},
{}
};
- for (; cell && cell->name; cell++) {
+ for (; cell && cell->hid; cell++) {
if (!strcmp(cell->hid, hid)) {
found = true;
break;
@@ -540,31 +555,12 @@ static int hisi_lpc_acpi_add_child(struct acpi_device *child, void *data)
return 0;
}
- pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- pdev->dev.parent = hostdev;
- ACPI_COMPANION_SET(&pdev->dev, child);
-
- ret = platform_device_add_resources(pdev, res, num_res);
- if (ret)
- goto fail;
-
- ret = platform_device_add_data(pdev, cell->pdata, cell->pdata_size);
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
+ pdev = platform_device_register_full(cell->pdevinfo);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
acpi_device_set_enumerated(child);
return 0;
-
-fail:
- platform_device_put(pdev);
- return ret;
}
/*
@@ -589,11 +585,6 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
return ret;
}
-
-static const struct acpi_device_id hisi_lpc_acpi_match[] = {
- {"HISI0191"},
- {}
-};
#else
static int hisi_lpc_acpi_probe(struct device *dev)
{
@@ -615,11 +606,9 @@ static void hisi_lpc_acpi_remove(struct device *hostdev)
static int hisi_lpc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct logic_pio_hwaddr *range;
struct hisi_lpc_dev *lpcdev;
resource_size_t io_end;
- struct resource *res;
int ret;
lpcdev = devm_kzalloc(dev, sizeof(*lpcdev), GFP_KERNEL);
@@ -628,8 +617,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
spin_lock_init(&lpcdev->cycle_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpcdev->membase = devm_ioremap_resource(dev, res);
+ lpcdev->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpcdev->membase))
return PTR_ERR(lpcdev->membase);
@@ -637,7 +625,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
if (!range)
return -ENOMEM;
- range->fwnode = dev->fwnode;
+ range->fwnode = dev_fwnode(dev);
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
range->hostdata = lpcdev;
@@ -651,7 +639,7 @@ static int hisi_lpc_probe(struct platform_device *pdev)
}
/* register the LPC host PIO resources */
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -672,11 +660,10 @@ static int hisi_lpc_probe(struct platform_device *pdev)
static int hisi_lpc_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_device = ACPI_COMPANION(dev);
struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
struct logic_pio_hwaddr *range = lpcdev->io_host;
- if (acpi_device)
+ if (is_acpi_device_node(range->fwnode))
hisi_lpc_acpi_remove(dev);
else
of_platform_depopulate(dev);
@@ -692,11 +679,16 @@ static const struct of_device_id hisi_lpc_of_match[] = {
{}
};
+static const struct acpi_device_id hisi_lpc_acpi_match[] = {
+ {"HISI0191"},
+ {}
+};
+
static struct platform_driver hisi_lpc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = hisi_lpc_of_match,
- .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ .acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
.remove = hisi_lpc_remove,
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index f3aef77a6a4a..df0fbfee7b78 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -430,12 +430,25 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ dma_addr_t ptr;
void *dev_rp;
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9e545f2a5a26..caa4ce28cf9e 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -507,6 +507,8 @@ static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -841,7 +843,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct mhi_controller *mhi_cntrl;
int err;
- dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
+ dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
/* mhi_pdev.mhi_cntrl must be zero-initialized */
mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 5dc2669432ba..d51573ac525e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -466,18 +466,7 @@ static int mvebu_sdram_debug_show(struct seq_file *seq, void *v)
struct mvebu_mbus_state *mbus = &mbus_state;
return mbus->soc->show_cpu_target(mbus, seq, v);
}
-
-static int mvebu_sdram_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_sdram_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_sdram_debug_fops = {
- .open = mvebu_sdram_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_sdram_debug);
static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
{
@@ -516,18 +505,7 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int mvebu_devs_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvebu_devs_debug_show, inode->i_private);
-}
-
-static const struct file_operations mvebu_devs_debug_fops = {
- .open = mvebu_devs_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvebu_devs_debug);
/*
* SoC-specific functions and definitions
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
- if ((int)res.a0 < 0)
- return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 16f227b995e8..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(default_quality,
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void hwrng_manage_rngd(struct hwrng *rng);
+static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
return 0;
}
@@ -167,8 +176,6 @@ skip_init:
rng->quality = 1024;
current_quality = rng->quality; /* obsolete */
- hwrng_manage_rngd(rng);
-
return 0;
}
@@ -454,10 +461,6 @@ static ssize_t rng_quality_store(struct device *dev,
/* the best available RNG may have changed */
ret = enable_best_rng();
- /* start/stop rngd if necessary */
- if (current_rng)
- hwrng_manage_rngd(current_rng);
-
out:
mutex_unlock(&rng_mutex);
return ret ? ret : len;
@@ -507,16 +510,14 @@ static int hwrng_fillfn(void *unused)
rng->quality = current_quality; /* obsolete */
quality = rng->quality;
mutex_unlock(&reading_mutex);
- put_rng(rng);
- if (!quality)
- break;
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
- if (rc <= 0) {
- pr_warn("hwrng: no data available\n");
- msleep_interruptible(10000);
+ put_rng(rng);
+
+ if (rc <= 0)
continue;
- }
/* If we cannot credit at least one bit of entropy,
* keep track of the remainder for the next iteration
@@ -533,22 +534,6 @@ static int hwrng_fillfn(void *unused)
return 0;
}
-static void hwrng_manage_rngd(struct hwrng *rng)
-{
- if (WARN_ON(!mutex_is_locked(&rng_mutex)))
- return;
-
- if (rng->quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (rng->quality > 0 && !hwrng_fill) {
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
- }
- }
-}
-
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
@@ -570,6 +555,7 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -617,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
+ complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
@@ -685,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
static int __init hwrng_modinit(void)
{
int ret;
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(rngc->clk);
- if (ret)
- return ret;
-
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
- ret = -ENODEV;
- goto err;
- }
-
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
init_completion(&rngc->rng_op_done);
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
- goto err;
+ return ret;
}
}
- ret = hwrng_register(&rngc->rng);
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
- goto err;
+ return ret;
}
dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
-
-err:
- clk_disable_unprepare(rngc->clk);
-
- return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
- struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rngc->rng);
-
- clk_disable_unprepare(rngc->clk);
-
- return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
- .name = "imx_rngc",
+ .name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
- .remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index db40037eb347..a0e9e80d92ee 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -341,14 +341,12 @@ static int ipmb_probe(struct i2c_client *client)
return 0;
}
-static int ipmb_remove(struct i2c_client *client)
+static void ipmb_remove(struct i2c_client *client)
{
struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
i2c_slave_unregister(client);
misc_deregister(&ipmb_dev->miscdev);
-
- return 0;
}
static const struct i2c_device_id ipmb_id[] = {
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index ab19b4b3317e..25c010c9ec25 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -424,7 +424,7 @@ static void ipmi_ipmb_request_events(void *send_info)
/* We don't fetch events here. */
}
-static int ipmi_ipmb_remove(struct i2c_client *client)
+static void ipmi_ipmb_remove(struct i2c_client *client)
{
struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
@@ -438,8 +438,6 @@ static int ipmi_ipmb_remove(struct i2c_client *client)
ipmi_ipmb_stop_thread(iidev);
ipmi_unregister_smi(iidev->intf);
-
- return 0;
}
static int ipmi_ipmb_probe(struct i2c_client *client)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index fc742ee9c046..13da021e7c6b 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1281,13 +1281,13 @@ static void shutdown_ssif(void *send_info)
}
}
-static int ssif_remove(struct i2c_client *client)
+static void ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
struct ssif_addr_info *addr_info;
if (!ssif_info)
- return 0;
+ return;
/*
* After this point, we won't deliver anything asychronously
@@ -1303,8 +1303,6 @@ static int ssif_remove(struct i2c_client *client)
}
kfree(ssif_info);
-
- return 0;
}
static int read_response(struct i2c_client *client, unsigned char *resp)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..5611d127363e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
@@ -706,8 +712,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 8fc49b038372..b2735be81ab2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2274,7 +2274,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
* tty pointer to tty structure
* termios pointer to buffer to hold returned old termios
*/
-static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void mgslpc_set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned long flags;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 79d7d4e4e582..01acf235f263 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -96,8 +96,8 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
* Returns whether or not the input pool has been seeded and thus guaranteed
* to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
+ * u16,u32,u64,int,long} family of functions.
*
* Returns: true if the input pool has been seeded.
* false if the input pool has not been seeded.
@@ -119,9 +119,9 @@ static void try_to_generate_entropy(void);
/*
* Wait for the input pool to be seeded and thus guaranteed to supply
* cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
+ * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
+ * int,long} family of functions. Using any of these functions without first
+ * calling this function forfeits the guarantee of security.
*
* Returns: 0 if the input pool has been seeded.
* -ERESTARTSYS if the function was interrupted by a signal.
@@ -157,6 +157,8 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* There are a few exported interfaces for use by other drivers:
*
* void get_random_bytes(void *buf, size_t len)
+ * u8 get_random_u8()
+ * u16 get_random_u16()
* u32 get_random_u32()
* u64 get_random_u64()
* unsigned int get_random_int()
@@ -164,10 +166,10 @@ EXPORT_SYMBOL(wait_for_random_bytes);
*
* These interfaces will return the requested number of random bytes
* into the given buffer or as a return value. This is equivalent to
- * a read from /dev/urandom. The u32, u64, int, and long family of
- * functions may be higher performance for one-off random integers,
- * because they do a bit of buffering and do not invoke reseeding
- * until the buffer is emptied.
+ * a read from /dev/urandom. The u8, u16, u32, u64, int, and long
+ * family of functions may be higher performance for one-off random
+ * integers, because they do a bit of buffering and do not invoke
+ * reseeding until the buffer is emptied.
*
*********************************************************************/
@@ -260,25 +262,23 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
}
/*
- * Return whether the crng seed is considered to be sufficiently old
- * that a reseeding is needed. This happens if the last reseeding
- * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
* proportional to the uptime.
*/
-static bool crng_has_old_seed(void)
+static unsigned int crng_reseed_interval(void)
{
static bool early_boot = true;
- unsigned long interval = CRNG_RESEED_INTERVAL;
if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
- interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
- (unsigned int)uptime / 2 * HZ);
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
}
- return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
+ return CRNG_RESEED_INTERVAL;
}
/*
@@ -320,7 +320,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* If the base_crng is old enough, we reseed, which in turn bumps the
* generation counter that we check below.
*/
- if (unlikely(crng_has_old_seed()))
+ if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
crng_reseed();
local_lock_irqsave(&crngs.lock, flags);
@@ -384,11 +384,11 @@ static void _get_random_bytes(void *buf, size_t len)
}
/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. In order to ensure that the randomness
- * by this function is okay, the function wait_for_random_bytes()
- * should be called and return 0 at least once at any point prior.
+ * This function is the exported kernel interface. It returns some number of
+ * good random numbers, suitable for key generation, seeding TCP sequence
+ * numbers, etc. In order to ensure that the randomness returned by this
+ * function is okay, the function wait_for_random_bytes() should be called and
+ * return 0 at least once at any point prior.
*/
void get_random_bytes(void *buf, size_t len)
{
@@ -506,8 +506,10 @@ type get_random_ ##type(void) \
} \
EXPORT_SYMBOL(get_random_ ##type);
-DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u8)
+DEFINE_BATCHED_ENTROPY(u16)
DEFINE_BATCHED_ENTROPY(u32)
+DEFINE_BATCHED_ENTROPY(u64)
#ifdef CONFIG_SMP
/*
@@ -522,6 +524,8 @@ int __cold random_prepare_cpu(unsigned int cpu)
* randomness.
*/
per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
return 0;
@@ -774,18 +778,13 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
/*
- * The first collection of entropy occurs at system boot while interrupts
- * are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
- * utsname(), and the command line. Depending on the above configuration knob,
- * RDSEED may be considered sufficient for initialization. Note that much
- * earlier setup may already have pushed entropy into the input pool by the
- * time we get here.
+ * This is called extremely early, before time keeping functionality is
+ * available, but arch randomness is. Interrupts are not yet enabled.
*/
-int __init random_init(const char *command_line)
+void __init random_init_early(const char *command_line)
{
- ktime_t now = ktime_get_real();
- size_t i, longs, arch_bits;
unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
+ size_t i, longs, arch_bits;
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
@@ -805,34 +804,49 @@ int __init random_init(const char *command_line)
i += longs;
continue;
}
- entropy[0] = random_get_entropy();
- _mix_pool_bytes(entropy, sizeof(*entropy));
arch_bits -= sizeof(*entropy) * 8;
++i;
}
- _mix_pool_bytes(&now, sizeof(now));
- _mix_pool_bytes(utsname(), sizeof(*(utsname())));
+
+ _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
_mix_pool_bytes(command_line, strlen(command_line));
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
+}
+
+/*
+ * This is called a little bit after the prior function, and now there is
+ * access to timestamps counters. Interrupts are not yet enabled.
+ */
+void __init random_init(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t now = ktime_get_real();
+
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
add_latent_entropy();
/*
- * If we were initialized by the bootloader before jump labels are
- * initialized, then we should enable the static branch here, where
+ * If we were initialized by the cpu or bootloader before jump labels
+ * are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
+ /* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed();
- else if (trust_cpu)
- _credit_init_bits(arch_bits);
WARN_ON(register_pm_notifier(&pm_notifier));
- WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
- "entropy collection will consequently suffer.");
- return 0;
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
+ "entropy collection will consequently suffer.");
}
/*
@@ -866,11 +880,11 @@ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
credit_init_bits(entropy);
/*
- * Throttle writing to once every CRNG_RESEED_INTERVAL, unless
- * we're not yet initialized.
+ * Throttle writing to once every reseed interval, unless we're not yet
+ * initialized or no entropy is credited.
*/
- if (!kthread_should_stop() && crng_ready())
- schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
+ if (!kthread_should_stop() && (crng_ready() || !entropy))
+ schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
@@ -920,20 +934,23 @@ EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif
struct fast_pool {
- struct work_struct mix;
unsigned long pool[4];
unsigned long last;
unsigned int count;
+ struct timer_list mix;
};
+static void mix_interrupt_randomness(struct timer_list *work);
+
static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
- .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
- .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};
/*
@@ -975,7 +992,7 @@ int __cold random_online_cpu(unsigned int cpu)
}
#endif
-static void mix_interrupt_randomness(struct work_struct *work)
+static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
@@ -1006,7 +1023,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_enable();
mix_pool_bytes(pool, sizeof(pool));
- credit_init_bits(max(1u, (count & U16_MAX) / 64));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
memzero_explicit(pool, sizeof(pool));
}
@@ -1029,10 +1046,11 @@ void add_interrupt_randomness(int irq)
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;
- if (unlikely(!fast_pool->mix.func))
- INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
fast_pool->count |= MIX_INFLIGHT;
- queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1191,7 +1209,7 @@ static void __cold entropy_timer(struct timer_list *timer)
*/
static void __cold try_to_generate_entropy(void)
{
- enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
struct entropy_timer_state stack;
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
@@ -1210,7 +1228,7 @@ static void __cold try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies + 1);
+ mod_timer(&stack.timer, jiffies);
mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
schedule();
stack.entropy = random_get_entropy();
@@ -1347,6 +1365,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 3170d59d660c..a3aa411389e7 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -264,13 +264,11 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
* @param: client, the i2c_client description (TPM I2C description).
* @return: 0 in case of success.
*/
-static int st33zp24_i2c_remove(struct i2c_client *client)
+static void st33zp24_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
st33zp24_remove(chip);
-
- return 0;
}
static const struct i2c_device_id st33zp24_i2c_id[] = {
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d5ac85558214..4be3677c1463 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -179,12 +179,11 @@ static int i2c_atmel_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_atmel_remove(struct i2c_client *client)
+static void i2c_atmel_remove(struct i2c_client *client)
{
struct device *dev = &(client->dev);
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_atmel_id[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index a19d32cb4e94..fd3c3661e646 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -706,15 +706,13 @@ static int tpm_tis_i2c_probe(struct i2c_client *client,
return rc;
}
-static int tpm_tis_i2c_remove(struct i2c_client *client)
+static void tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = tpm_dev.chip;
tpm_chip_unregister(chip);
release_locality(chip, tpm_dev.locality, 1);
tpm_dev.client = NULL;
-
- return 0;
}
static struct i2c_driver tpm_tis_i2c_driver = {
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index b77c18e38662..95c37350cc8e 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -622,12 +622,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_nuvoton_remove(struct i2c_client *client)
+static void i2c_nuvoton_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_nuvoton_id[] = {
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 40018a73b3cb..bc7b1b4501b3 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -380,7 +380,7 @@ void tpm_add_ppi(struct tpm_chip *chip)
TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {
- strlcpy(chip->ppi_version, obj->string.pointer,
+ strscpy(chip->ppi_version, obj->string.pointer,
sizeof(chip->ppi_version));
ACPI_FREE(obj);
}
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index ba0911b1d1ff..0692510dfcab 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -351,13 +351,12 @@ static int tpm_tis_i2c_probe(struct i2c_client *dev,
NULL);
}
-static int tpm_tis_i2c_remove(struct i2c_client *client)
+static void tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
- return 0;
}
static const struct i2c_device_id tpm_tis_i2c_id[] = {
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index 974479a1ec5a..77cea5b31c6e 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -763,20 +763,18 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_remove(struct i2c_client *client)
+static void tpm_cr50_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
struct device *dev = &client->dev;
if (!chip) {
dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
- return 0;
+ return;
}
tpm_chip_unregister(chip);
tpm_cr50_release_locality(chip, true);
-
- return 0;
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 48f8f4221e21..d79905f3e174 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -249,7 +249,7 @@ config COMMON_CLK_GEMINI
platform, also known as SL3516 or CS3516.
config COMMON_CLK_LAN966X
- bool "Generic Clock Controller driver for LAN966X SoC"
+ tristate "Generic Clock Controller driver for LAN966X SoC"
depends on HAS_IOMEM
depends on OF
depends on SOC_LAN966 || COMPILE_TEST
@@ -377,6 +377,15 @@ config COMMON_CLK_VC5
This driver supports the IDT VersaClock 5 and VersaClock 6
programmable clock generators.
+config COMMON_CLK_VC7
+ tristate "Clock driver for Renesas Versaclock 7 devices"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ Renesas Versaclock7 is a family of configurable clock generator
+ and jitter attenuator ICs with fractional and integer dividers.
+
config COMMON_CLK_STM32MP135
def_bool COMMON_CLK && MACH_STM32MP13
help
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5db170d38d2..e3ca0d058a25 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_RS9_PCIE) += clk-renesas-pcie.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
+obj-$(CONFIG_COMMON_CLK_VC7) += clk-versaclock7.o
obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index 8ca8bcacf66d..85a964cb2d89 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -33,8 +33,11 @@ static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -56,8 +59,11 @@ static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -79,8 +85,11 @@ static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -120,7 +129,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
struct clk_hw *hw;
unsigned int num_parents;
const char *parent_names[GENERATED_SOURCE_MAX];
- struct device_node *gcknp;
+ struct device_node *gcknp, *parent_np;
struct clk_range range = CLK_RANGE(0, 0);
struct regmap *regmap;
@@ -134,7 +143,9 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
if (!num || num > PERIPHERAL_MAX)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -180,8 +191,11 @@ static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
const char *name = np->name;
const char *parent_name;
struct regmap *regmap;
+ struct device_node *parent_np;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -243,12 +257,15 @@ static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
const char *parent_name;
struct regmap *regmap;
bool bypass;
+ struct device_node *parent_np;
of_property_read_string(np, "clock-output-names", &name);
bypass = of_property_read_bool(np, "atmel,osc-bypass");
parent_name = of_clk_get_parent_name(np, 0);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -268,12 +285,15 @@ static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
u32 accuracy = 0;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
of_property_read_string(np, "clock-output-names", &name);
of_property_read_u32(np, "clock-frequency", &frequency);
of_property_read_u32(np, "clock-accuracy", &accuracy);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -292,11 +312,14 @@ static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -316,13 +339,16 @@ static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
unsigned int num_parents;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -373,6 +399,7 @@ of_at91_clk_master_setup(struct device_node *np,
const char *name = np->name;
struct clk_master_characteristics *characteristics;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
@@ -386,7 +413,9 @@ of_at91_clk_master_setup(struct device_node *np,
if (!characteristics)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -433,6 +462,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
const char *name;
struct device_node *periphclknp;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -442,7 +472,9 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
if (!num || num > PERIPHERAL_MAX)
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -601,6 +633,7 @@ of_at91_clk_pll_setup(struct device_node *np,
struct regmap *regmap;
const char *parent_name;
const char *name = np->name;
+ struct device_node *parent_np;
struct clk_pll_characteristics *characteristics;
if (of_property_read_u32(np, "reg", &id))
@@ -610,7 +643,9 @@ of_at91_clk_pll_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -665,12 +700,15 @@ of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -694,7 +732,7 @@ of_at91_clk_prog_setup(struct device_node *np,
unsigned int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
const char *name;
- struct device_node *progclknp;
+ struct device_node *progclknp, *parent_np;
struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
@@ -707,7 +745,9 @@ of_at91_clk_prog_setup(struct device_node *np,
if (!num || num > (PROG_ID_MAX + 1))
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -756,13 +796,16 @@ static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
unsigned int num_parents;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents != 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -788,6 +831,7 @@ static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
@@ -797,7 +841,9 @@ static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -818,7 +864,7 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
u32 id;
struct clk_hw *hw;
const char *name;
- struct device_node *sysclknp;
+ struct device_node *sysclknp, *parent_np;
const char *parent_name;
struct regmap *regmap;
@@ -826,7 +872,9 @@ static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
if (num > (SYSTEM_MAX_ID + 1))
return;
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -859,6 +907,7 @@ static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
@@ -868,7 +917,9 @@ static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -888,6 +939,7 @@ static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -895,7 +947,9 @@ static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
@@ -915,6 +969,7 @@ static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
struct regmap *regmap;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -926,7 +981,9 @@ static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap))
return;
hw = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
@@ -946,12 +1003,15 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
const char *parent_name;
const char *name = np->name;
struct regmap *regmap_pmc, *regmap_sfr;
+ struct device_node *parent_np;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap_pmc = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_pmc))
return;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index cfd0f5e23b99..84156dc52bff 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -120,6 +120,16 @@ static const struct {
struct clk_range r;
int chg_pid;
} sama5d2_gck[] = {
+ { .n = "flx0_gclk", .id = 19, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx1_gclk", .id = 20, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx2_gclk", .id = 21, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx3_gclk", .id = 22, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "flx4_gclk", .id = 23, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart0_gclk", .id = 24, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart1_gclk", .id = 25, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart2_gclk", .id = 26, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart3_gclk", .id = 27, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
+ { .n = "uart4_gclk", .id = 28, .chg_pid = INT_MIN, .r = { .min = 0, .max = 27666666 }, },
{ .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, },
{ .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, },
{ .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, },
diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig
index 03102f1094bc..f0b186830324 100644
--- a/drivers/clk/baikal-t1/Kconfig
+++ b/drivers/clk/baikal-t1/Kconfig
@@ -29,7 +29,6 @@ config CLK_BT1_CCU_PLL
config CLK_BT1_CCU_DIV
bool "Baikal-T1 CCU Dividers support"
- select RESET_CONTROLLER
select MFD_SYSCON
default MIPS_BAIKAL_T1
help
@@ -39,4 +38,15 @@ config CLK_BT1_CCU_DIV
either gateable or ungateable. Some of the CCU dividers can be as well
used to reset the domains they're supplying clock to.
+config CLK_BT1_CCU_RST
+ bool "Baikal-T1 CCU Resets support"
+ select RESET_CONTROLLER
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the CCU reset blocks responsible for the
+ AXI-bus and some subsystems reset. These are mainly the
+ self-deasserted reset controls but there are several lines which
+ can be directly asserted/de-asserted (PCIe and DDR sub-domains).
+
endif
diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile
index b3b9590b95ed..9c3637de9407 100644
--- a/drivers/clk/baikal-t1/Makefile
+++ b/drivers/clk/baikal-t1/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o
obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o
+obj-$(CONFIG_CLK_BT1_CCU_RST) += ccu-rst.o
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 4062092d67f9..8d5fc7158f33 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -34,9 +34,9 @@
#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
+#define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
-#define CCU_DIV_RST_DELAY_US 1
#define CCU_DIV_LOCK_CHECK_RETRIES 50
#define CCU_DIV_CLKDIV_MIN 0
@@ -170,6 +170,40 @@ static int ccu_div_gate_is_enabled(struct clk_hw *hw)
return !!(val & CCU_DIV_CTL_EN);
}
+static int ccu_div_buf_enable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static void ccu_div_buf_disable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
+ spin_unlock_irqrestore(&div->lock, flags);
+}
+
+static int ccu_div_buf_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+
+ return !(val & CCU_DIV_CTL_GATE_REF_BUF);
+}
+
static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -288,24 +322,6 @@ static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-int ccu_div_reset_domain(struct ccu_div *div)
-{
- unsigned long flags;
-
- if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
- return -EINVAL;
-
- spin_lock_irqsave(&div->lock, flags);
- regmap_update_bits(div->sys_regs, div->reg_ctl,
- CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
- spin_unlock_irqrestore(&div->lock, flags);
-
- /* The next delay must be enough to cover all the resets. */
- udelay(CCU_DIV_RST_DELAY_US);
-
- return 0;
-}
-
#ifdef CONFIG_DEBUG_FS
struct ccu_div_dbgfs_bit {
@@ -323,6 +339,7 @@ static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
+ CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
};
@@ -441,6 +458,9 @@ static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
continue;
}
+ if (!strcmp("div_buf", name))
+ continue;
+
bits[didx] = ccu_div_bits[bidx];
bits[didx].div = div;
@@ -477,6 +497,21 @@ static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
&ccu_div_dbgfs_fixed_clkdiv_fops);
}
+static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bit;
+
+ bit = kmalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return;
+
+ *bit = ccu_div_bits[3];
+ bit->div = div;
+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
+ &ccu_div_dbgfs_bit_fops);
+}
+
static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
{
struct ccu_div *div = to_ccu_div(hw);
@@ -489,6 +524,7 @@ static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
#define ccu_div_var_debug_init NULL
#define ccu_div_gate_debug_init NULL
+#define ccu_div_buf_debug_init NULL
#define ccu_div_fixed_debug_init NULL
#endif /* !CONFIG_DEBUG_FS */
@@ -520,6 +556,13 @@ static const struct clk_ops ccu_div_gate_ops = {
.debug_init = ccu_div_gate_debug_init
};
+static const struct clk_ops ccu_div_buf_ops = {
+ .enable = ccu_div_buf_enable,
+ .disable = ccu_div_buf_disable,
+ .is_enabled = ccu_div_buf_is_enabled,
+ .debug_init = ccu_div_buf_debug_init
+};
+
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
.round_rate = ccu_div_fixed_round_rate,
@@ -566,6 +609,8 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
} else if (div_init->type == CCU_DIV_GATE) {
hw_init.ops = &ccu_div_gate_ops;
div->divider = div_init->divider;
+ } else if (div_init->type == CCU_DIV_BUF) {
+ hw_init.ops = &ccu_div_buf_ops;
} else if (div_init->type == CCU_DIV_FIXED) {
hw_init.ops = &ccu_div_fixed_ops;
div->divider = div_init->divider;
@@ -579,6 +624,7 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
goto err_free_div;
}
parent_data.fw_name = div_init->parent_name;
+ parent_data.name = div_init->parent_name;
hw_init.parent_data = &parent_data;
hw_init.num_parents = 1;
diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h
index 795665caefbd..76d8ee44d415 100644
--- a/drivers/clk/baikal-t1/ccu-div.h
+++ b/drivers/clk/baikal-t1/ccu-div.h
@@ -14,14 +14,25 @@
#include <linux/of.h>
/*
+ * CCU Divider private clock IDs
+ * @CCU_SYS_SATA_CLK: CCU SATA internal clock
+ * @CCU_SYS_XGMAC_CLK: CCU XGMAC internal clock
+ */
+#define CCU_SYS_SATA_CLK -1
+#define CCU_SYS_XGMAC_CLK -2
+
+/*
* CCU Divider private flags
+ * @CCU_DIV_BASIC: Basic divider clock required by the kernel as early as
+ * possible.
* @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
* It can be 0 though, which is functionally the same.
* @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3].
* It can be either 0 or greater than 3.
* @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position.
- * @CCU_DIV_RESET_DOMAIN: Provide reset clock domain method.
+ * @CCU_DIV_RESET_DOMAIN: There is a clock domain reset handle.
*/
+#define CCU_DIV_BASIC BIT(0)
#define CCU_DIV_SKIP_ONE BIT(1)
#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2)
#define CCU_DIV_LOCK_SHIFTED BIT(3)
@@ -31,11 +42,13 @@
* enum ccu_div_type - CCU Divider types
* @CCU_DIV_VAR: Clocks gate with variable divider.
* @CCU_DIV_GATE: Clocks gate with fixed divider.
+ * @CCU_DIV_BUF: Clock gate with no divider.
* @CCU_DIV_FIXED: Ungateable clock with fixed divider.
*/
enum ccu_div_type {
CCU_DIV_VAR,
CCU_DIV_GATE,
+ CCU_DIV_BUF,
CCU_DIV_FIXED
};
@@ -105,6 +118,4 @@ struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init);
void ccu_div_hw_unregister(struct ccu_div *div);
-int ccu_div_reset_domain(struct ccu_div *div);
-
#endif /* __CLK_BT1_CCU_DIV_H__ */
diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h
index 76cd9132a219..a71bfd7b90ec 100644
--- a/drivers/clk/baikal-t1/ccu-pll.h
+++ b/drivers/clk/baikal-t1/ccu-pll.h
@@ -14,6 +14,12 @@
#include <linux/of.h>
/*
+ * CCU PLL private flags
+ * @CCU_PLL_BASIC: Basic PLL required by the kernel as early as possible.
+ */
+#define CCU_PLL_BASIC BIT(0)
+
+/*
* struct ccu_pll_init_data - CCU PLL initialization data
* @id: Clock private identifier.
* @name: Clocks name.
@@ -22,6 +28,7 @@
* @sys_regs: Baikal-T1 System Controller registers map.
* @np: Pointer to the node describing the CCU PLLs.
* @flags: PLL clock flags.
+ * @features: PLL private features.
*/
struct ccu_pll_init_data {
unsigned int id;
@@ -31,6 +38,7 @@ struct ccu_pll_init_data {
struct regmap *sys_regs;
struct device_node *np;
unsigned long flags;
+ unsigned long features;
};
/*
diff --git a/drivers/clk/baikal-t1/ccu-rst.c b/drivers/clk/baikal-t1/ccu-rst.c
new file mode 100644
index 000000000000..40023ea67463
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-rst.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Resets interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-rst: " fmt
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/reset/bt1-ccu.h>
+
+#include "ccu-rst.h"
+
+#define CCU_AXI_MAIN_BASE 0x030
+#define CCU_AXI_DDR_BASE 0x034
+#define CCU_AXI_SATA_BASE 0x038
+#define CCU_AXI_GMAC0_BASE 0x03C
+#define CCU_AXI_GMAC1_BASE 0x040
+#define CCU_AXI_XGMAC_BASE 0x044
+#define CCU_AXI_PCIE_M_BASE 0x048
+#define CCU_AXI_PCIE_S_BASE 0x04C
+#define CCU_AXI_USB_BASE 0x050
+#define CCU_AXI_HWA_BASE 0x054
+#define CCU_AXI_SRAM_BASE 0x058
+
+#define CCU_SYS_DDR_BASE 0x02c
+#define CCU_SYS_SATA_REF_BASE 0x060
+#define CCU_SYS_APB_BASE 0x064
+#define CCU_SYS_PCIE_BASE 0x144
+
+#define CCU_RST_DELAY_US 1
+
+#define CCU_RST_TRIG(_base, _ofs) \
+ { \
+ .type = CCU_RST_TRIG, \
+ .base = _base, \
+ .mask = BIT(_ofs), \
+ }
+
+#define CCU_RST_DIR(_base, _ofs) \
+ { \
+ .type = CCU_RST_DIR, \
+ .base = _base, \
+ .mask = BIT(_ofs), \
+ }
+
+struct ccu_rst_info {
+ enum ccu_rst_type type;
+ unsigned int base;
+ unsigned int mask;
+};
+
+/*
+ * Each AXI-bus clock divider is equipped with the corresponding clock-consumer
+ * domain reset (it's self-deasserted reset control).
+ */
+static const struct ccu_rst_info axi_rst_info[] = {
+ [CCU_AXI_MAIN_RST] = CCU_RST_TRIG(CCU_AXI_MAIN_BASE, 1),
+ [CCU_AXI_DDR_RST] = CCU_RST_TRIG(CCU_AXI_DDR_BASE, 1),
+ [CCU_AXI_SATA_RST] = CCU_RST_TRIG(CCU_AXI_SATA_BASE, 1),
+ [CCU_AXI_GMAC0_RST] = CCU_RST_TRIG(CCU_AXI_GMAC0_BASE, 1),
+ [CCU_AXI_GMAC1_RST] = CCU_RST_TRIG(CCU_AXI_GMAC1_BASE, 1),
+ [CCU_AXI_XGMAC_RST] = CCU_RST_TRIG(CCU_AXI_XGMAC_BASE, 1),
+ [CCU_AXI_PCIE_M_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_M_BASE, 1),
+ [CCU_AXI_PCIE_S_RST] = CCU_RST_TRIG(CCU_AXI_PCIE_S_BASE, 1),
+ [CCU_AXI_USB_RST] = CCU_RST_TRIG(CCU_AXI_USB_BASE, 1),
+ [CCU_AXI_HWA_RST] = CCU_RST_TRIG(CCU_AXI_HWA_BASE, 1),
+ [CCU_AXI_SRAM_RST] = CCU_RST_TRIG(CCU_AXI_SRAM_BASE, 1),
+};
+
+/*
+ * SATA reference clock domain and APB-bus domain are connected with the
+ * sefl-deasserted reset control, which can be activated via the corresponding
+ * clock divider register. DDR and PCIe sub-domains can be reset with directly
+ * controlled reset signals. Resetting the DDR controller though won't end up
+ * well while the Linux kernel is working.
+ */
+static const struct ccu_rst_info sys_rst_info[] = {
+ [CCU_SYS_SATA_REF_RST] = CCU_RST_TRIG(CCU_SYS_SATA_REF_BASE, 1),
+ [CCU_SYS_APB_RST] = CCU_RST_TRIG(CCU_SYS_APB_BASE, 1),
+ [CCU_SYS_DDR_FULL_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 1),
+ [CCU_SYS_DDR_INIT_RST] = CCU_RST_DIR(CCU_SYS_DDR_BASE, 2),
+ [CCU_SYS_PCIE_PCS_PHY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 0),
+ [CCU_SYS_PCIE_PIPE0_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 4),
+ [CCU_SYS_PCIE_CORE_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 8),
+ [CCU_SYS_PCIE_PWR_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 9),
+ [CCU_SYS_PCIE_STICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 10),
+ [CCU_SYS_PCIE_NSTICKY_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 11),
+ [CCU_SYS_PCIE_HOT_RST] = CCU_RST_DIR(CCU_SYS_PCIE_BASE, 12),
+};
+
+static int ccu_rst_reset(struct reset_controller_dev *rcdev, unsigned long idx)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+
+ if (info->type != CCU_RST_TRIG)
+ return -EOPNOTSUPP;
+
+ regmap_update_bits(rst->sys_regs, info->base, info->mask, info->mask);
+
+ /* The next delay must be enough to cover all the resets. */
+ udelay(CCU_RST_DELAY_US);
+
+ return 0;
+}
+
+static int ccu_rst_set(struct reset_controller_dev *rcdev,
+ unsigned long idx, bool high)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+
+ if (info->type != CCU_RST_DIR)
+ return high ? -EOPNOTSUPP : 0;
+
+ return regmap_update_bits(rst->sys_regs, info->base,
+ info->mask, high ? info->mask : 0);
+}
+
+static int ccu_rst_assert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return ccu_rst_set(rcdev, idx, true);
+}
+
+static int ccu_rst_deassert(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ return ccu_rst_set(rcdev, idx, false);
+}
+
+static int ccu_rst_status(struct reset_controller_dev *rcdev,
+ unsigned long idx)
+{
+ struct ccu_rst *rst = to_ccu_rst(rcdev);
+ const struct ccu_rst_info *info = &rst->rsts_info[idx];
+ u32 val;
+
+ if (info->type != CCU_RST_DIR)
+ return -EOPNOTSUPP;
+
+ regmap_read(rst->sys_regs, info->base, &val);
+
+ return !!(val & info->mask);
+}
+
+static const struct reset_control_ops ccu_rst_ops = {
+ .reset = ccu_rst_reset,
+ .assert = ccu_rst_assert,
+ .deassert = ccu_rst_deassert,
+ .status = ccu_rst_status,
+};
+
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *rst_init)
+{
+ struct ccu_rst *rst;
+ int ret;
+
+ if (!rst_init)
+ return ERR_PTR(-EINVAL);
+
+ rst = kzalloc(sizeof(*rst), GFP_KERNEL);
+ if (!rst)
+ return ERR_PTR(-ENOMEM);
+
+ rst->sys_regs = rst_init->sys_regs;
+ if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-axi")) {
+ rst->rcdev.nr_resets = ARRAY_SIZE(axi_rst_info);
+ rst->rsts_info = axi_rst_info;
+ } else if (of_device_is_compatible(rst_init->np, "baikal,bt1-ccu-sys")) {
+ rst->rcdev.nr_resets = ARRAY_SIZE(sys_rst_info);
+ rst->rsts_info = sys_rst_info;
+ } else {
+ pr_err("Incompatible DT node '%s' specified\n",
+ of_node_full_name(rst_init->np));
+ ret = -EINVAL;
+ goto err_kfree_rst;
+ }
+
+ rst->rcdev.owner = THIS_MODULE;
+ rst->rcdev.ops = &ccu_rst_ops;
+ rst->rcdev.of_node = rst_init->np;
+
+ ret = reset_controller_register(&rst->rcdev);
+ if (ret) {
+ pr_err("Couldn't register '%s' reset controller\n",
+ of_node_full_name(rst_init->np));
+ goto err_kfree_rst;
+ }
+
+ return rst;
+
+err_kfree_rst:
+ kfree(rst);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_rst_hw_unregister(struct ccu_rst *rst)
+{
+ reset_controller_unregister(&rst->rcdev);
+
+ kfree(rst);
+}
diff --git a/drivers/clk/baikal-t1/ccu-rst.h b/drivers/clk/baikal-t1/ccu-rst.h
new file mode 100644
index 000000000000..d6e8b2f671f4
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-rst.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU Resets interface driver
+ */
+#ifndef __CLK_BT1_CCU_RST_H__
+#define __CLK_BT1_CCU_RST_H__
+
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct ccu_rst_info;
+
+/*
+ * enum ccu_rst_type - CCU Reset types
+ * @CCU_RST_TRIG: Self-deasserted reset signal.
+ * @CCU_RST_DIR: Directly controlled reset signal.
+ */
+enum ccu_rst_type {
+ CCU_RST_TRIG,
+ CCU_RST_DIR,
+};
+
+/*
+ * struct ccu_rst_init_data - CCU Resets initialization data
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node with the System CCU block.
+ */
+struct ccu_rst_init_data {
+ struct regmap *sys_regs;
+ struct device_node *np;
+};
+
+/*
+ * struct ccu_rst - CCU Reset descriptor
+ * @rcdev: Reset controller descriptor.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @rsts_info: Reset flag info (base address and mask).
+ */
+struct ccu_rst {
+ struct reset_controller_dev rcdev;
+ struct regmap *sys_regs;
+ const struct ccu_rst_info *rsts_info;
+};
+#define to_ccu_rst(_rcdev) container_of(_rcdev, struct ccu_rst, rcdev)
+
+#ifdef CONFIG_CLK_BT1_CCU_RST
+
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init);
+
+void ccu_rst_hw_unregister(struct ccu_rst *rst);
+
+#else
+
+static inline
+struct ccu_rst *ccu_rst_hw_register(const struct ccu_rst_init_data *init)
+{
+ return NULL;
+}
+
+static inline void ccu_rst_hw_unregister(struct ccu_rst *rst) {}
+
+#endif
+
+#endif /* __CLK_BT1_CCU_RST_H__ */
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
index f141fda12b09..0e772e034812 100644
--- a/drivers/clk/baikal-t1/clk-ccu-div.c
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "bt1-ccu-div: " fmt
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
@@ -24,9 +25,9 @@
#include <linux/regmap.h>
#include <dt-bindings/clock/bt1-ccu.h>
-#include <dt-bindings/reset/bt1-ccu.h>
#include "ccu-div.h"
+#include "ccu-rst.h"
#define CCU_AXI_MAIN_BASE 0x030
#define CCU_AXI_DDR_BASE 0x034
@@ -76,6 +77,16 @@
.divider = _divider \
}
+#define CCU_DIV_BUF_INFO(_id, _name, _pname, _base, _flags) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_BUF, \
+ .flags = _flags \
+ }
+
#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
{ \
.id = _id, \
@@ -85,12 +96,6 @@
.divider = _divider \
}
-#define CCU_DIV_RST_MAP(_rst_id, _clk_id) \
- { \
- .rst_id = _rst_id, \
- .clk_id = _clk_id \
- }
-
struct ccu_div_info {
unsigned int id;
const char *name;
@@ -105,11 +110,6 @@ struct ccu_div_info {
unsigned long features;
};
-struct ccu_div_rst_map {
- unsigned int rst_id;
- unsigned int clk_id;
-};
-
struct ccu_div_data {
struct device_node *np;
struct regmap *sys_regs;
@@ -118,11 +118,8 @@ struct ccu_div_data {
const struct ccu_div_info *divs_info;
struct ccu_div **divs;
- unsigned int rst_num;
- const struct ccu_div_rst_map *rst_map;
- struct reset_controller_dev rcdev;
+ struct ccu_rst *rsts;
};
-#define to_ccu_div_data(_rcdev) container_of(_rcdev, struct ccu_div_data, rcdev)
/*
* AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks
@@ -169,33 +166,22 @@ static const struct ccu_div_info axi_info[] = {
CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN)
};
-static const struct ccu_div_rst_map axi_rst_map[] = {
- CCU_DIV_RST_MAP(CCU_AXI_MAIN_RST, CCU_AXI_MAIN_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_DDR_RST, CCU_AXI_DDR_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_SATA_RST, CCU_AXI_SATA_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_GMAC0_RST, CCU_AXI_GMAC0_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_GMAC1_RST, CCU_AXI_GMAC1_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_XGMAC_RST, CCU_AXI_XGMAC_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_PCIE_M_RST, CCU_AXI_PCIE_M_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_PCIE_S_RST, CCU_AXI_PCIE_S_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_USB_RST, CCU_AXI_USB_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_HWA_RST, CCU_AXI_HWA_CLK),
- CCU_DIV_RST_MAP(CCU_AXI_SRAM_RST, CCU_AXI_SRAM_CLK)
-};
-
/*
* APB-bus clock is marked as critical since it's a main communication bus
* for the SoC devices registers IO-operations.
*/
static const struct ccu_div_info sys_info[] = {
- CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_CLK, "sys_sata_clk",
"sata_clk", CCU_SYS_SATA_REF_BASE, 4,
CLK_SET_RATE_GATE,
CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_BUF_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ "sys_sata_clk", CCU_SYS_SATA_REF_BASE,
+ CLK_SET_RATE_PARENT),
CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
"pcie_clk", CCU_SYS_APB_BASE, 5,
- CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CLK_IS_CRITICAL, CCU_DIV_BASIC | CCU_DIV_RESET_DOMAIN),
CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk",
"eth_clk", CCU_SYS_GMAC0_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk",
@@ -204,10 +190,12 @@ static const struct ccu_div_info sys_info[] = {
"eth_clk", CCU_SYS_GMAC1_BASE, 5),
CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
"eth_clk", 10),
- CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
- "eth_clk", CCU_SYS_XGMAC_BASE, 8),
+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_CLK, "sys_xgmac_clk",
+ "eth_clk", CCU_SYS_XGMAC_BASE, 1),
+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
+ "sys_xgmac_clk", 8),
CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
- "eth_clk", 10),
+ "sys_xgmac_clk", 8),
CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
"eth_clk", CCU_SYS_USB_BASE, 10),
CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
@@ -227,74 +215,58 @@ static const struct ccu_div_info sys_info[] = {
"ref_clk", 25),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk",
"ref_clk", CCU_SYS_TIMER0_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk",
"ref_clk", CCU_SYS_TIMER1_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk",
"ref_clk", CCU_SYS_TIMER2_BASE, 17,
- CLK_SET_RATE_GATE, 0),
+ CLK_SET_RATE_GATE, CCU_DIV_BASIC),
CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk",
"eth_clk", CCU_SYS_WDT_BASE, 17,
CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE)
};
-static const struct ccu_div_rst_map sys_rst_map[] = {
- CCU_DIV_RST_MAP(CCU_SYS_SATA_REF_RST, CCU_SYS_SATA_REF_CLK),
- CCU_DIV_RST_MAP(CCU_SYS_APB_RST, CCU_SYS_APB_CLK),
-};
+static struct ccu_div_data *axi_data;
+static struct ccu_div_data *sys_data;
-static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
- unsigned int clk_id)
+static void ccu_div_set_data(struct ccu_div_data *data)
{
- struct ccu_div *div;
- int idx;
-
- for (idx = 0; idx < data->divs_num; ++idx) {
- div = data->divs[idx];
- if (div && div->id == clk_id)
- return div;
- }
-
- return ERR_PTR(-EINVAL);
+ struct device_node *np = data->np;
+
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi"))
+ axi_data = data;
+ else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys"))
+ sys_data = data;
+ else
+ pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np));
}
-static int ccu_div_reset(struct reset_controller_dev *rcdev,
- unsigned long rst_id)
+static struct ccu_div_data *ccu_div_get_data(struct device_node *np)
{
- struct ccu_div_data *data = to_ccu_div_data(rcdev);
- const struct ccu_div_rst_map *map;
- struct ccu_div *div;
- int idx, ret;
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi"))
+ return axi_data;
+ else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys"))
+ return sys_data;
- for (idx = 0, map = data->rst_map; idx < data->rst_num; ++idx, ++map) {
- if (map->rst_id == rst_id)
- break;
- }
- if (idx == data->rst_num) {
- pr_err("Invalid reset ID %lu specified\n", rst_id);
- return -EINVAL;
- }
+ pr_err("Invalid DT node '%s' specified\n", of_node_full_name(np));
- div = ccu_div_find_desc(data, map->clk_id);
- if (IS_ERR(div)) {
- pr_err("Invalid clock ID %d in mapping\n", map->clk_id);
- return PTR_ERR(div);
- }
+ return NULL;
+}
- ret = ccu_div_reset_domain(div);
- if (ret) {
- pr_err("Reset isn't supported by divider %s\n",
- clk_hw_get_name(ccu_div_get_clk_hw(div)));
+static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
+ unsigned int clk_id)
+{
+ int idx;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ if (data->divs_info[idx].id == clk_id)
+ return data->divs[idx];
}
- return ret;
+ return ERR_PTR(-EINVAL);
}
-static const struct reset_control_ops ccu_div_rst_ops = {
- .reset = ccu_div_reset,
-};
-
static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
{
struct ccu_div_data *data;
@@ -308,13 +280,9 @@ static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) {
data->divs_num = ARRAY_SIZE(axi_info);
data->divs_info = axi_info;
- data->rst_num = ARRAY_SIZE(axi_rst_map);
- data->rst_map = axi_rst_map;
} else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) {
data->divs_num = ARRAY_SIZE(sys_info);
data->divs_info = sys_info;
- data->rst_num = ARRAY_SIZE(sys_rst_map);
- data->rst_map = sys_rst_map;
} else {
pr_err("Incompatible DT node '%s' specified\n",
of_node_full_name(np));
@@ -365,14 +333,16 @@ static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec,
clk_id = clkspec->args[0];
div = ccu_div_find_desc(data, clk_id);
if (IS_ERR(div)) {
- pr_info("Invalid clock ID %d specified\n", clk_id);
+ if (div != ERR_PTR(-EPROBE_DEFER))
+ pr_info("Invalid clock ID %d specified\n", clk_id);
+
return ERR_CAST(div);
}
return ccu_div_get_clk_hw(div);
}
-static int ccu_div_clk_register(struct ccu_div_data *data)
+static int ccu_div_clk_register(struct ccu_div_data *data, bool defer)
{
int idx, ret;
@@ -380,6 +350,13 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
const struct ccu_div_info *info = &data->divs_info[idx];
struct ccu_div_init_data init = {0};
+ if (!!(info->features & CCU_DIV_BASIC) ^ defer) {
+ if (!data->divs[idx])
+ data->divs[idx] = ERR_PTR(-EPROBE_DEFER);
+
+ continue;
+ }
+
init.id = info->id;
init.name = info->name;
init.parent_name = info->parent_name;
@@ -396,6 +373,9 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
init.base = info->base;
init.sys_regs = data->sys_regs;
init.divider = info->divider;
+ } else if (init.type == CCU_DIV_BUF) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
} else {
init.divider = info->divider;
}
@@ -409,49 +389,104 @@ static int ccu_div_clk_register(struct ccu_div_data *data)
}
}
- ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
- if (ret) {
- pr_err("Couldn't register dividers '%s' clock provider\n",
- of_node_full_name(data->np));
- goto err_hw_unregister;
- }
-
return 0;
err_hw_unregister:
- for (--idx; idx >= 0; --idx)
+ for (--idx; idx >= 0; --idx) {
+ if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
+ continue;
+
ccu_div_hw_unregister(data->divs[idx]);
+ }
return ret;
}
-static void ccu_div_clk_unregister(struct ccu_div_data *data)
+static void ccu_div_clk_unregister(struct ccu_div_data *data, bool defer)
{
int idx;
- of_clk_del_provider(data->np);
+ /* Uninstall only the clocks registered on the specfied stage */
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ if (!!(data->divs_info[idx].features & CCU_DIV_BASIC) ^ defer)
+ continue;
- for (idx = 0; idx < data->divs_num; ++idx)
ccu_div_hw_unregister(data->divs[idx]);
+ }
}
-static int ccu_div_rst_register(struct ccu_div_data *data)
+static int ccu_div_of_register(struct ccu_div_data *data)
{
int ret;
- data->rcdev.ops = &ccu_div_rst_ops;
- data->rcdev.of_node = data->np;
- data->rcdev.nr_resets = data->rst_num;
+ ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register dividers '%s' clock provider\n",
+ of_node_full_name(data->np));
+ }
+
+ return ret;
+}
- ret = reset_controller_register(&data->rcdev);
- if (ret)
+static int ccu_div_rst_register(struct ccu_div_data *data)
+{
+ struct ccu_rst_init_data init = {0};
+
+ init.sys_regs = data->sys_regs;
+ init.np = data->np;
+
+ data->rsts = ccu_rst_hw_register(&init);
+ if (IS_ERR(data->rsts)) {
pr_err("Couldn't register divider '%s' reset controller\n",
of_node_full_name(data->np));
+ return PTR_ERR(data->rsts);
+ }
+
+ return 0;
+}
+
+static int ccu_div_probe(struct platform_device *pdev)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = ccu_div_get_data(dev_of_node(&pdev->dev));
+ if (!data)
+ return -EINVAL;
+
+ ret = ccu_div_clk_register(data, false);
+ if (ret)
+ return ret;
+
+ ret = ccu_div_rst_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ return 0;
+
+err_clk_unregister:
+ ccu_div_clk_unregister(data, false);
return ret;
}
-static void ccu_div_init(struct device_node *np)
+static const struct of_device_id ccu_div_of_match[] = {
+ { .compatible = "baikal,bt1-ccu-axi" },
+ { .compatible = "baikal,bt1-ccu-sys" },
+ { }
+};
+
+static struct platform_driver ccu_div_driver = {
+ .probe = ccu_div_probe,
+ .driver = {
+ .name = "clk-ccu-div",
+ .of_match_table = ccu_div_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(ccu_div_driver);
+
+static __init void ccu_div_init(struct device_node *np)
{
struct ccu_div_data *data;
int ret;
@@ -464,22 +499,23 @@ static void ccu_div_init(struct device_node *np)
if (ret)
goto err_free_data;
- ret = ccu_div_clk_register(data);
+ ret = ccu_div_clk_register(data, true);
if (ret)
goto err_free_data;
- ret = ccu_div_rst_register(data);
+ ret = ccu_div_of_register(data);
if (ret)
goto err_clk_unregister;
+ ccu_div_set_data(data);
+
return;
err_clk_unregister:
- ccu_div_clk_unregister(data);
+ ccu_div_clk_unregister(data, true);
err_free_data:
ccu_div_free_data(data);
}
-
-CLK_OF_DECLARE(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
-CLK_OF_DECLARE(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
+CLK_OF_DECLARE_DRIVER(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
+CLK_OF_DECLARE_DRIVER(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
index 2445d4b12baf..fce02ce77347 100644
--- a/drivers/clk/baikal-t1/clk-ccu-pll.c
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
@@ -31,13 +32,14 @@
#define CCU_PCIE_PLL_BASE 0x018
#define CCU_ETH_PLL_BASE 0x020
-#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags) \
- { \
- .id = _id, \
- .name = _name, \
- .parent_name = _pname, \
- .base = _base, \
- .flags = _flags \
+#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags, _features) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .flags = _flags, \
+ .features = _features, \
}
#define CCU_PLL_NUM ARRAY_SIZE(pll_info)
@@ -48,6 +50,7 @@ struct ccu_pll_info {
const char *parent_name;
unsigned int base;
unsigned long flags;
+ unsigned long features;
};
/*
@@ -61,15 +64,15 @@ struct ccu_pll_info {
*/
static const struct ccu_pll_info pll_info[] = {
CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE,
- CLK_IS_CRITICAL),
+ CLK_IS_CRITICAL, CCU_PLL_BASIC),
CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0),
CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0),
CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE,
- CLK_IS_CRITICAL),
+ CLK_IS_CRITICAL, CCU_PLL_BASIC),
CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE,
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE)
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0)
};
struct ccu_pll_data {
@@ -78,16 +81,16 @@ struct ccu_pll_data {
struct ccu_pll *plls[CCU_PLL_NUM];
};
+static struct ccu_pll_data *pll_data;
+
static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data,
unsigned int clk_id)
{
- struct ccu_pll *pll;
int idx;
for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
- pll = data->plls[idx];
- if (pll && pll->id == clk_id)
- return pll;
+ if (pll_info[idx].id == clk_id)
+ return data->plls[idx];
}
return ERR_PTR(-EINVAL);
@@ -133,14 +136,16 @@ static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec,
clk_id = clkspec->args[0];
pll = ccu_pll_find_desc(data, clk_id);
if (IS_ERR(pll)) {
- pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+ if (pll != ERR_PTR(-EPROBE_DEFER))
+ pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+
return ERR_CAST(pll);
}
return ccu_pll_get_clk_hw(pll);
}
-static int ccu_pll_clk_register(struct ccu_pll_data *data)
+static int ccu_pll_clk_register(struct ccu_pll_data *data, bool defer)
{
int idx, ret;
@@ -148,6 +153,14 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
const struct ccu_pll_info *info = &pll_info[idx];
struct ccu_pll_init_data init = {0};
+ /* Defer non-basic PLLs allocation for the probe stage */
+ if (!!(info->features & CCU_PLL_BASIC) ^ defer) {
+ if (!data->plls[idx])
+ data->plls[idx] = ERR_PTR(-EPROBE_DEFER);
+
+ continue;
+ }
+
init.id = info->id;
init.name = info->name;
init.parent_name = info->parent_name;
@@ -155,6 +168,7 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
init.sys_regs = data->sys_regs;
init.np = data->np;
init.flags = info->flags;
+ init.features = info->features;
data->plls[idx] = ccu_pll_hw_register(&init);
if (IS_ERR(data->plls[idx])) {
@@ -165,22 +179,70 @@ static int ccu_pll_clk_register(struct ccu_pll_data *data)
}
}
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx) {
+ if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
+ continue;
+
+ ccu_pll_hw_unregister(data->plls[idx]);
+ }
+
+ return ret;
+}
+
+static void ccu_pll_clk_unregister(struct ccu_pll_data *data, bool defer)
+{
+ int idx;
+
+ /* Uninstall only the clocks registered on the specfied stage */
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ if (!!(pll_info[idx].features & CCU_PLL_BASIC) ^ defer)
+ continue;
+
+ ccu_pll_hw_unregister(data->plls[idx]);
+ }
+}
+
+static int ccu_pll_of_register(struct ccu_pll_data *data)
+{
+ int ret;
+
ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data);
if (ret) {
pr_err("Couldn't register PLL provider of '%s'\n",
of_node_full_name(data->np));
- goto err_hw_unregister;
}
- return 0;
+ return ret;
+}
-err_hw_unregister:
- for (--idx; idx >= 0; --idx)
- ccu_pll_hw_unregister(data->plls[idx]);
+static int ccu_pll_probe(struct platform_device *pdev)
+{
+ struct ccu_pll_data *data = pll_data;
- return ret;
+ if (!data)
+ return -EINVAL;
+
+ return ccu_pll_clk_register(data, false);
}
+static const struct of_device_id ccu_pll_of_match[] = {
+ { .compatible = "baikal,bt1-ccu-pll" },
+ { }
+};
+
+static struct platform_driver ccu_pll_driver = {
+ .probe = ccu_pll_probe,
+ .driver = {
+ .name = "clk-ccu-pll",
+ .of_match_table = ccu_pll_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(ccu_pll_driver);
+
static __init void ccu_pll_init(struct device_node *np)
{
struct ccu_pll_data *data;
@@ -194,13 +256,22 @@ static __init void ccu_pll_init(struct device_node *np)
if (ret)
goto err_free_data;
- ret = ccu_pll_clk_register(data);
+ ret = ccu_pll_clk_register(data, true);
if (ret)
goto err_free_data;
+ ret = ccu_pll_of_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ pll_data = data;
+
return;
+err_clk_unregister:
+ ccu_pll_clk_unregister(data, true);
+
err_free_data:
ccu_pll_free_data(data);
}
-CLK_OF_DECLARE(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
+CLK_OF_DECLARE_DRIVER(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 48a1eb9f2d55..e74fe6219d14 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -502,6 +503,8 @@ struct bcm2835_clock_data {
bool low_jitter;
u32 tcnt_mux;
+
+ bool round_up;
};
struct bcm2835_gate_data {
@@ -966,9 +969,9 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw,
return div;
}
-static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
- unsigned long parent_rate,
- u32 div)
+static unsigned long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
+ unsigned long parent_rate,
+ u32 div)
{
const struct bcm2835_clock_data *data = clock->data;
u64 temp;
@@ -993,12 +996,34 @@ static long bcm2835_clock_rate_from_divisor(struct bcm2835_clock *clock,
return temp;
}
+static unsigned long bcm2835_round_rate(unsigned long rate)
+{
+ unsigned long scaler;
+ unsigned long limit;
+
+ limit = rate / 100000;
+
+ scaler = 1;
+ while (scaler < limit)
+ scaler *= 10;
+
+ /*
+ * If increasing a clock by less than 0.1% changes it
+ * from ..999.. to ..000.., round up.
+ */
+ if ((rate + scaler - 1) / scaler % 1000 == 0)
+ rate = roundup(rate, scaler);
+
+ return rate;
+}
+
static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
const struct bcm2835_clock_data *data = clock->data;
+ unsigned long rate;
u32 div;
if (data->int_bits == 0 && data->frac_bits == 0)
@@ -1006,7 +1031,12 @@ static unsigned long bcm2835_clock_get_rate(struct clk_hw *hw,
div = cprman_read(cprman, data->div_reg);
- return bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+ rate = bcm2835_clock_rate_from_divisor(clock, parent_rate, div);
+
+ if (data->round_up)
+ rate = bcm2835_round_rate(rate);
+
+ return rate;
}
static void bcm2835_clock_wait_busy(struct bcm2835_clock *clock)
@@ -1784,7 +1814,7 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.load_mask = CM_PLLC_LOADPER,
.hold_mask = CM_PLLC_HOLDPER,
.fixed_divider = 1,
- .flags = CLK_SET_RATE_PARENT),
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
/*
* PLLD is the display PLL, used to drive DSI display panels.
@@ -2143,7 +2173,8 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.div_reg = CM_UARTDIV,
.int_bits = 10,
.frac_bits = 12,
- .tcnt_mux = 28),
+ .tcnt_mux = 28,
+ .round_up = true),
/* TV encoder clock. Only operating frequency is 108Mhz. */
[BCM2835_CLOCK_VEC] = REGISTER_PER_CLK(
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 1a098db12062..680f9d8d357c 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -726,6 +726,7 @@ void iproc_pll_clk_setup(struct device_node *node,
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
+ const char *clk_name;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@@ -773,7 +774,12 @@ void iproc_pll_clk_setup(struct device_node *node,
iclk = &iclk_array[0];
iclk->pll = pll;
- init.name = node->name;
+ ret = of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ if (WARN_ON(ret))
+ goto err_pll_register;
+
+ init.name = clk_name;
init.ops = &iproc_pll_ops;
init.flags = 0;
parent_name = of_clk_get_parent_name(node, 0);
@@ -793,13 +799,11 @@ void iproc_pll_clk_setup(struct device_node *node,
goto err_pll_register;
clk_data->hws[0] = &iclk->hw;
+ parent_name = clk_name;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
- const char *clk_name;
-
memset(&init, 0, sizeof(init));
- parent_name = node->name;
ret = of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 73518009a0f2..679f4649a7ef 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -33,6 +33,7 @@ enum rpi_firmware_clk_id {
RPI_FIRMWARE_EMMC2_CLK_ID,
RPI_FIRMWARE_M2MC_CLK_ID,
RPI_FIRMWARE_PIXEL_BVB_CLK_ID,
+ RPI_FIRMWARE_VEC_CLK_ID,
RPI_FIRMWARE_NUM_CLK_ID,
};
@@ -51,6 +52,7 @@ static char *rpi_firmware_clk_names[] = {
[RPI_FIRMWARE_EMMC2_CLK_ID] = "emmc2",
[RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc",
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb",
+ [RPI_FIRMWARE_VEC_CLK_ID] = "vec",
};
#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0)
@@ -129,9 +131,18 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
},
+ [RPI_FIRMWARE_PIXEL_CLK_ID] = {
+ .export = true,
+ },
+ [RPI_FIRMWARE_HEVC_CLK_ID] = {
+ .export = true,
+ },
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
},
+ [RPI_FIRMWARE_VEC_CLK_ID] = {
+ .export = true,
+ },
};
/*
@@ -203,7 +214,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
- return ret;
+ return 0;
return val;
}
@@ -220,7 +231,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@@ -288,7 +299,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@@ -344,8 +355,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct rpi_firmware_get_clocks_response *clks;
int ret;
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
clks = devm_kcalloc(rpi->dev,
- RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
@@ -360,7 +376,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct raspberrypi_clk_variant *variant;
if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
- dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+ clks->id, RPI_FIRMWARE_NUM_CLK_ID);
return -EINVAL;
}
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index bccdfa00fd37..67a9edbba29c 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -500,12 +500,15 @@ static void __init berlin2_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!gbase)
return;
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index e9518d35f262..dd2784bb75b6 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -286,19 +286,23 @@ static void __init berlin2q_clock_setup(struct device_node *np)
int n, ret;
clk_data = kzalloc(struct_size(clk_data, hws, MAX_CLKS), GFP_KERNEL);
- if (!clk_data)
+ if (!clk_data) {
+ of_node_put(parent_np);
return;
+ }
clk_data->num = MAX_CLKS;
hws = clk_data->hws;
gbase = of_iomap(parent_np, 0);
if (!gbase) {
+ of_node_put(parent_np);
pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
+ of_node_put(parent_np);
if (!cpupll_base) {
pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index bacebd457e6f..8b3c059e19a1 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -80,7 +80,7 @@ struct asm9260_mux_clock {
u8 mask;
u32 *table;
const char *name;
- const char **parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long offset;
unsigned long flags;
@@ -232,10 +232,10 @@ static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
HW_AHBCLKCTRL1, 16 },
};
-static const char __initdata *main_mux_p[] = { NULL, NULL };
-static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"};
-static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"};
-static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static struct clk_parent_data __initdata main_mux_p[] = { { .index = 0, }, { .name = "pll" } };
+static struct clk_parent_data __initdata i2s0_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s0m_div"} };
+static struct clk_parent_data __initdata i2s1_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "i2s1m_div"} };
+static struct clk_parent_data __initdata clkout_mux_p[] = { { .index = 0, }, { .name = "pll" }, { .name = "rtc"} };
static u32 three_mux_table[] = {0, 1, 3};
static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
@@ -255,9 +255,10 @@ static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
static void __init asm9260_acc_init(struct device_node *np)
{
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_hw;
struct clk_hw **hws;
- const char *ref_clk, *pll_clk = "pll";
+ const char *pll_clk = "pll";
+ struct clk_parent_data pll_parent_data = { .index = 0 };
u32 rate;
int n;
@@ -274,21 +275,15 @@ static void __init asm9260_acc_init(struct device_node *np)
/* register pll */
rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
- /* TODO: Convert to DT parent scheme */
- ref_clk = of_clk_get_parent_name(np, 0);
- hw = __clk_hw_register_fixed_rate(NULL, NULL, pll_clk,
- ref_clk, NULL, NULL, 0, rate, 0,
- CLK_FIXED_RATE_PARENT_ACCURACY);
-
- if (IS_ERR(hw))
+ pll_hw = clk_hw_register_fixed_rate_parent_accuracy(NULL, pll_clk, &pll_parent_data,
+ 0, rate);
+ if (IS_ERR(pll_hw))
panic("%pOFn: can't register REFCLK. Check DT!", np);
for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
- mc->parent_names[0] = ref_clk;
- mc->parent_names[1] = pll_clk;
- hw = clk_hw_register_mux_table(NULL, mc->name, mc->parent_names,
+ hw = clk_hw_register_mux_table_parent_data(NULL, mc->name, mc->parent_data,
mc->num_parents, mc->flags, base + mc->offset,
0, mc->mask, 0, mc->table, &asm9260_clk_lock);
}
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 24dab2312bc6..9c3305bcb27a 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -622,7 +622,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */
/* P-Bus (BCLK) clock divider */
- hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
+ hw = clk_hw_register_divider_table(dev, "bclk", "epll", 0,
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 20, 3, 0,
ast2600_div_table,
&aspeed_g6_clk_lock);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index 5467d941ddfd..1449d0537674 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -665,10 +665,9 @@ static int cdce706_probe(struct i2c_client *client)
cdce);
}
-static int cdce706_remove(struct i2c_client *client)
+static void cdce706_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index aa5c72bab83e..320d39922206 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -557,7 +557,7 @@ static int cs2000_version_print(struct cs2000_priv *priv)
return 0;
}
-static int cs2000_remove(struct i2c_client *client)
+static void cs2000_remove(struct i2c_client *client)
{
struct cs2000_priv *priv = i2c_get_clientdata(client);
struct device *dev = priv_to_dev(priv);
@@ -566,8 +566,6 @@ static int cs2000_remove(struct i2c_client *client)
of_clk_del_provider(np);
clk_hw_unregister(&priv->hw);
-
- return 0;
}
static int cs2000_probe(struct i2c_client *client)
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index ac68a6b40f0e..7d775954e26d 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -49,12 +49,24 @@ const struct clk_ops clk_fixed_rate_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+static void devm_clk_hw_register_fixed_rate_release(struct device *dev, void *res)
+{
+ struct clk_fixed_rate *fix = res;
+
+ /*
+ * We can not use clk_hw_unregister_fixed_rate, since it will kfree()
+ * the hw, resulting in double free. Just unregister the hw and let
+ * devres code kfree() it.
+ */
+ clk_hw_unregister(&fix->hw);
+}
+
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags)
+ unsigned long clk_fixed_flags, bool devm)
{
struct clk_fixed_rate *fixed;
struct clk_hw *hw;
@@ -62,7 +74,11 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
int ret = -EINVAL;
/* allocate fixed-rate clock */
- fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (devm)
+ fixed = devres_alloc(devm_clk_hw_register_fixed_rate_release,
+ sizeof(*fixed), GFP_KERNEL);
+ else
+ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
if (!fixed)
return ERR_PTR(-ENOMEM);
@@ -90,9 +106,13 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
else
ret = of_clk_hw_register(np, hw);
if (ret) {
- kfree(fixed);
+ if (devm)
+ devres_free(fixed);
+ else
+ kfree(fixed);
hw = ERR_PTR(ret);
- }
+ } else if (devm)
+ devres_add(dev, fixed);
return hw;
}
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 81cb90955d68..460e7216bfa1 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -286,7 +286,7 @@ static struct platform_driver lan966x_clk_driver = {
.of_match_table = lan966x_clk_dt_ids,
},
};
-builtin_platform_driver(lan966x_clk_driver);
+module_platform_driver(lan966x_clk_driver);
MODULE_AUTHOR("Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>");
MODULE_DESCRIPTION("LAN966X clock driver");
diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c
index 565bcd0cdde9..80944bf482e9 100644
--- a/drivers/clk/clk-lochnagar.c
+++ b/drivers/clk/clk-lochnagar.c
@@ -19,7 +19,7 @@
#include <linux/mfd/lochnagar1_regs.h>
#include <linux/mfd/lochnagar2_regs.h>
-#include <dt-bindings/clk/lochnagar.h>
+#include <dt-bindings/clock/lochnagar.h>
#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1)
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index bad2677e11ae..71fbe687fa7b 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -99,7 +99,7 @@ static void __init nomadik_src_init(void)
if (!src_base) {
pr_err("%s: must have src parent node with REGS (%pOFn)\n",
__func__, np);
- return;
+ goto out_put;
}
/* Set all timers to use the 2.4 MHz TIMCLK */
@@ -132,6 +132,9 @@ static void __init nomadik_src_init(void)
}
writel(val, src_base + SRC_XTALCR);
register_reboot_notifier(&nomadik_clk_reboot_notifier);
+
+out_put:
+ of_node_put(np);
}
/**
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index e677bb5a784b..e319cfa51a8a 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -129,20 +129,6 @@ npcm7xx_clk_register_pll(void __iomem *pllcon, const char *name,
#define NPCM7XX_SECCNT (0x68)
#define NPCM7XX_CNTR25M (0x6C)
-struct npcm7xx_clk_gate_data {
- u32 reg;
- u8 bit_idx;
- const char *name;
- const char *parent_name;
- unsigned long flags;
- /*
- * If this clock is exported via DT, set onecell_idx to constant
- * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
- * this specific clock. Otherwise, set to -1.
- */
- int onecell_idx;
-};
-
struct npcm7xx_clk_mux_data {
u8 shift;
u8 mask;
@@ -160,21 +146,6 @@ struct npcm7xx_clk_mux_data {
};
-struct npcm7xx_clk_div_fixed_data {
- u8 mult;
- u8 div;
- const char *name;
- const char *parent_name;
- u8 clk_divider_flags;
- /*
- * If this clock is exported via DT, set onecell_idx to constant
- * defined in include/dt-bindings/clock/nuvoton, NPCM7XX-clock.h for
- * this specific clock. Otherwise, set to -1.
- */
- int onecell_idx;
-};
-
-
struct npcm7xx_clk_div_data {
u32 reg;
u8 shift;
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
index cda5e258355b..584e293156ad 100644
--- a/drivers/clk/clk-oxnas.c
+++ b/drivers/clk/clk-oxnas.c
@@ -207,7 +207,7 @@ static const struct of_device_id oxnas_stdclk_dt_ids[] = {
static int oxnas_stdclk_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *parent_np;
const struct oxnas_stdclk_data *data;
struct regmap *regmap;
int ret;
@@ -215,7 +215,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
- regmap = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ regmap = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "failed to have parent regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 88898b97a443..5eddb9f0d6bd 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1063,8 +1063,13 @@ static void __init _clockgen_init(struct device_node *np, bool legacy);
*/
static void __init legacy_init_clockgen(struct device_node *np)
{
- if (!clockgen.node)
- _clockgen_init(of_get_parent(np), true);
+ if (!clockgen.node) {
+ struct device_node *parent_np;
+
+ parent_np = of_get_parent(np);
+ _clockgen_init(parent_np, true);
+ of_node_put(parent_np);
+ }
}
/* Legacy node */
@@ -1159,6 +1164,7 @@ static struct clk * __init create_sysclk(const char *name)
sysclk = of_get_child_by_name(clockgen.node, "sysclk");
if (sysclk) {
clk = sysclk_from_fixed(sysclk, name);
+ of_node_put(sysclk);
if (!IS_ERR(clk))
return clk;
}
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 4481c4303534..c028fa103bed 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -370,10 +370,9 @@ static int si514_probe(struct i2c_client *client)
return 0;
}
-static int si514_remove(struct i2c_client *client)
+static void si514_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
static const struct i2c_device_id si514_id[] = {
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 4bca73212662..0e528d7ba656 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1796,7 +1796,7 @@ cleanup:
return err;
}
-static int si5341_remove(struct i2c_client *client)
+static void si5341_remove(struct i2c_client *client)
{
struct clk_si5341 *data = i2c_get_clientdata(client);
int i;
@@ -1807,8 +1807,6 @@ static int si5341_remove(struct i2c_client *client)
if (data->clk[i].vddo_reg)
regulator_disable(data->clk[i].vddo_reg);
}
-
- return 0;
}
static const struct i2c_device_id si5341_id[] = {
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index b9f088c4ba2f..9e939c98a455 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1651,11 +1651,9 @@ static int si5351_i2c_probe(struct i2c_client *client)
return 0;
}
-static int si5351_i2c_remove(struct i2c_client *client)
+static void si5351_i2c_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
-
- return 0;
}
static struct i2c_driver si5351_driver = {
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 1ff8f32f734d..0a6d70c49726 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -498,10 +498,9 @@ static int si570_probe(struct i2c_client *client)
return 0;
}
-static int si570_remove(struct i2c_client *client)
+static void si570_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
- return 0;
}
static const struct of_device_id clk_si570_of_match[] = {
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index e5fbefd6ac2d..38f44b5b9b1b 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -200,7 +200,9 @@ static int tps68470_clk_probe(struct platform_device *pdev)
.flags = CLK_SET_RATE_GATE,
};
struct tps68470_clkdata *tps68470_clkdata;
+ struct tps68470_clk_consumer *consumer;
int ret;
+ int i;
tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata),
GFP_KERNEL);
@@ -223,10 +225,13 @@ static int tps68470_clk_probe(struct platform_device *pdev)
return ret;
if (pdata) {
- ret = devm_clk_hw_register_clkdev(&pdev->dev,
- &tps68470_clkdata->clkout_hw,
- pdata->consumer_con_id,
- pdata->consumer_dev_name);
+ for (i = 0; i < pdata->n_consumers; i++) {
+ consumer = &pdata->consumers[i];
+ ret = devm_clk_hw_register_clkdev(&pdev->dev,
+ &tps68470_clkdata->clkout_hw,
+ consumer->consumer_con_id,
+ consumer->consumer_dev_name);
+ }
}
return ret;
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index e7be3e54b9be..88689415aff9 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -24,7 +24,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <dt-bindings/clk/versaclock.h>
+#include <dt-bindings/clock/versaclock.h>
/* VersaClock5 registers */
#define VC5_OTP_CONTROL 0x00
@@ -153,6 +153,7 @@ enum vc5_model {
IDT_VC5_5P49V5935,
IDT_VC6_5P49V6901,
IDT_VC6_5P49V6965,
+ IDT_VC6_5P49V6975,
};
/* Structure to describe features of a particular VC5 model */
@@ -230,8 +231,12 @@ static unsigned char vc5_mux_get_parent(struct clk_hw *hw)
container_of(hw, struct vc5_driver_data, clk_mux);
const u8 mask = VC5_PRIM_SRC_SHDN_EN_XTAL | VC5_PRIM_SRC_SHDN_EN_CLKIN;
unsigned int src;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &src);
src &= mask;
if (src == VC5_PRIM_SRC_SHDN_EN_XTAL)
@@ -286,8 +291,12 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_mul);
unsigned int premul;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_PRIM_SRC_SHDN, &premul);
if (premul & VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ)
parent_rate *= 2;
@@ -315,11 +324,9 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
else
mask = 0;
- regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
- VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
- mask);
-
- return 0;
+ return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN,
+ VC5_PRIM_SRC_SHDN_EN_DOUBLE_XTAL_FREQ,
+ mask);
}
static const struct clk_ops vc5_dbl_ops = {
@@ -334,14 +341,19 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned int prediv, div;
+ int ret;
- regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
+ ret = regmap_read(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV, &prediv);
+ if (ret)
+ return 0;
/* The bypass_prediv is set, PLL fed from Ref_in directly. */
if (prediv & VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV)
return parent_rate;
- regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div);
+ ret = regmap_read(vc5->regmap, VC5_REF_DIVIDER, &div);
+ if (ret)
+ return 0;
/* The Sel_prediv2 is set, PLL fed from prediv2 (Ref_in / 2) */
if (div & VC5_REF_DIVIDER_SEL_PREDIV2)
@@ -376,15 +388,17 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
struct vc5_driver_data *vc5 =
container_of(hw, struct vc5_driver_data, clk_pfd);
unsigned long idiv;
+ int ret;
u8 div;
/* CLKIN within range of PLL input, feed directly to PLL. */
if (parent_rate <= 50000000) {
- regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
- regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00);
- return 0;
+ ret = regmap_set_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, 0x00);
}
idiv = DIV_ROUND_UP(parent_rate, rate);
@@ -395,11 +409,12 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
else
div = VC5_REF_DIVIDER_REF_DIV(idiv);
- regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div);
- regmap_update_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
- VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV, 0);
+ ret = regmap_update_bits(vc5->regmap, VC5_REF_DIVIDER, 0xff, div);
+ if (ret)
+ return ret;
- return 0;
+ return regmap_clear_bits(vc5->regmap, VC5_VCO_CTRL_AND_PREDIV,
+ VC5_VCO_CTRL_AND_PREDIV_BYPASS_PREDIV);
}
static const struct clk_ops vc5_pfd_ops = {
@@ -551,9 +566,12 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
hwdata->div_int >> 4, hwdata->div_int << 4,
0
};
+ int ret;
- regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
- data, 14);
+ ret = regmap_bulk_write(vc5->regmap, VC5_OUT_DIV_FRAC(hwdata->num, 0),
+ data, 14);
+ if (ret)
+ return ret;
/*
* Toggle magic bit in undocumented register for unknown reason.
@@ -561,12 +579,13 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
* datasheet somewhat implies this is needed, but the register
* and the bit is not documented.
*/
- regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET, 0);
- regmap_update_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET,
- VC5_GLOBAL_REGISTER_GLOBAL_RESET);
- return 0;
+ ret = regmap_clear_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(vc5->regmap, VC5_GLOBAL_REGISTER,
+ VC5_GLOBAL_REGISTER_GLOBAL_RESET);
}
static const struct clk_ops vc5_fod_ops = {
@@ -594,10 +613,9 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
* registers.
*/
if (vc5->chip_info->flags & VC5_HAS_BYPASS_SYNC_BIT) {
- ret = regmap_update_bits(vc5->regmap,
- VC5_RESERVED_X0(hwdata->num),
- VC5_RESERVED_X0_BYPASS_SYNC,
- VC5_RESERVED_X0_BYPASS_SYNC);
+ ret = regmap_set_bits(vc5->regmap,
+ VC5_RESERVED_X0(hwdata->num),
+ VC5_RESERVED_X0_BYPASS_SYNC);
if (ret)
return ret;
}
@@ -606,7 +624,10 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
* If the input mux is disabled, enable it first and
* select source from matching FOD.
*/
- regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ if (ret)
+ return ret;
+
if ((src & mask) == 0) {
src = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_EN_FOD;
ret = regmap_update_bits(vc5->regmap,
@@ -617,18 +638,22 @@ static int vc5_clk_out_prepare(struct clk_hw *hw)
}
/* Enable the clock buffer */
- regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF,
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
+ ret = regmap_set_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
+ if (ret)
+ return ret;
+
if (hwdata->clk_output_cfg0_mask) {
dev_dbg(&vc5->client->dev, "Update output %d mask 0x%0X val 0x%0X\n",
hwdata->num, hwdata->clk_output_cfg0_mask,
hwdata->clk_output_cfg0);
- regmap_update_bits(vc5->regmap,
- VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
- hwdata->clk_output_cfg0_mask,
- hwdata->clk_output_cfg0);
+ ret = regmap_update_bits(vc5->regmap,
+ VC5_CLK_OUTPUT_CFG(hwdata->num, 0),
+ hwdata->clk_output_cfg0_mask,
+ hwdata->clk_output_cfg0);
+ if (ret)
+ return ret;
}
return 0;
@@ -640,8 +665,8 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw)
struct vc5_driver_data *vc5 = hwdata->vc5;
/* Disable the clock buffer */
- regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
- VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, 0);
+ regmap_clear_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1),
+ VC5_CLK_OUTPUT_CFG1_EN_CLKBUF);
}
static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
@@ -656,8 +681,12 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw)
const u8 extclk = VC5_OUT_DIV_CONTROL_SELB_NORM |
VC5_OUT_DIV_CONTROL_SEL_EXT;
unsigned int src;
+ int ret;
+
+ ret = regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
+ if (ret)
+ return 0;
- regmap_read(vc5->regmap, VC5_OUT_DIV_CONTROL(hwdata->num), &src);
src &= mask;
if (src == 0) /* Input mux set to DISABLED */
@@ -725,6 +754,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
case IDT_VC5_5P49V5935:
case IDT_VC6_5P49V6901:
case IDT_VC6_5P49V6965:
+ case IDT_VC6_5P49V6975:
default:
return n;
}
@@ -819,22 +849,27 @@ static int vc5_update_cap_load(struct device_node *node, struct vc5_driver_data
{
u32 value;
int mapped_value;
+ int ret;
- if (!of_property_read_u32(node, "idt,xtal-load-femtofarads", &value)) {
- mapped_value = vc5_map_cap_value(value);
- if (mapped_value < 0)
- return mapped_value;
-
- /*
- * The mapped_value is really the high 6 bits of
- * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
- * shift the value 2 places.
- */
- regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03, mapped_value << 2);
- regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03, mapped_value << 2);
- }
+ if (of_property_read_u32(node, "idt,xtal-load-femtofarads", &value))
+ return 0;
- return 0;
+ mapped_value = vc5_map_cap_value(value);
+ if (mapped_value < 0)
+ return mapped_value;
+
+ /*
+ * The mapped_value is really the high 6 bits of
+ * VC5_XTAL_X1_LOAD_CAP and VC5_XTAL_X2_LOAD_CAP, so
+ * shift the value 2 places.
+ */
+ ret = regmap_update_bits(vc5->regmap, VC5_XTAL_X1_LOAD_CAP, ~0x03,
+ mapped_value << 2);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(vc5->regmap, VC5_XTAL_X2_LOAD_CAP, ~0x03,
+ mapped_value << 2);
}
static int vc5_update_slew(struct device_node *np_output,
@@ -956,7 +991,10 @@ static int vc5_probe(struct i2c_client *client)
"could not read idt,output-enable-active\n");
}
- regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask, src_val);
+ ret = regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, src_mask,
+ src_val);
+ if (ret)
+ return ret;
/* Register clock input mux */
memset(&init, 0, sizeof(init));
@@ -1138,7 +1176,7 @@ err_clk:
return ret;
}
-static int vc5_remove(struct i2c_client *client)
+static void vc5_remove(struct i2c_client *client)
{
struct vc5_driver_data *vc5 = i2c_get_clientdata(client);
@@ -1146,8 +1184,6 @@ static int vc5_remove(struct i2c_client *client)
if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL)
clk_unregister_fixed_rate(vc5->pin_xin);
-
- return 0;
}
static int __maybe_unused vc5_suspend(struct device *dev)
@@ -1204,7 +1240,7 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.model = IDT_VC6_5P49V6901,
.clk_fod_cnt = 4,
.clk_out_cnt = 5,
- .flags = VC5_HAS_PFD_FREQ_DBL,
+ .flags = VC5_HAS_PFD_FREQ_DBL | VC5_HAS_BYPASS_SYNC_BIT,
};
static const struct vc5_chip_info idt_5p49v6965_info = {
@@ -1214,6 +1250,13 @@ static const struct vc5_chip_info idt_5p49v6965_info = {
.flags = VC5_HAS_BYPASS_SYNC_BIT,
};
+static const struct vc5_chip_info idt_5p49v6975_info = {
+ .model = IDT_VC6_5P49V6975,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = VC5_HAS_BYPASS_SYNC_BIT | VC5_HAS_INTERNAL_XTAL,
+};
+
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
@@ -1221,6 +1264,7 @@ static const struct i2c_device_id vc5_id[] = {
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
{ "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
+ { "5p49v6975", .driver_data = IDT_VC6_5P49V6975 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
@@ -1232,6 +1276,7 @@ static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
{ .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
+ { .compatible = "idt,5p49v6975", .data = &idt_5p49v6975_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
new file mode 100644
index 000000000000..8e4f86e852aa
--- /dev/null
+++ b/drivers/clk/clk-versaclock7.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common clock framework driver for the Versaclock7 family of timing devices.
+ *
+ * Copyright (c) 2022 Renesas Electronics Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/swab.h>
+
+/*
+ * 16-bit register address: the lower 8 bits of the register address come
+ * from the offset addr byte and the upper 8 bits come from the page register.
+ */
+#define VC7_PAGE_ADDR 0xFD
+#define VC7_PAGE_WINDOW 256
+#define VC7_MAX_REG 0x364
+
+/* Maximum number of banks supported by VC7 */
+#define VC7_NUM_BANKS 7
+
+/* Maximum number of FODs supported by VC7 */
+#define VC7_NUM_FOD 3
+
+/* Maximum number of IODs supported by VC7 */
+#define VC7_NUM_IOD 4
+
+/* Maximum number of outputs supported by VC7 */
+#define VC7_NUM_OUT 12
+
+/* VCO valid range is 9.5 GHz to 10.7 GHz */
+#define VC7_APLL_VCO_MIN 9500000000UL
+#define VC7_APLL_VCO_MAX 10700000000UL
+
+/* APLL denominator is fixed at 2^27 */
+#define VC7_APLL_DENOMINATOR_BITS 27
+
+/* FOD 1st stage denominator is fixed 2^34 */
+#define VC7_FOD_DENOMINATOR_BITS 34
+
+/* IOD can operate between 1kHz and 650MHz */
+#define VC7_IOD_RATE_MIN 1000UL
+#define VC7_IOD_RATE_MAX 650000000UL
+#define VC7_IOD_MIN_DIVISOR 14
+#define VC7_IOD_MAX_DIVISOR 0x1ffffff /* 25-bit */
+
+#define VC7_FOD_RATE_MIN 1000UL
+#define VC7_FOD_RATE_MAX 650000000UL
+#define VC7_FOD_1ST_STAGE_RATE_MIN 33000000UL /* 33 MHz */
+#define VC7_FOD_1ST_STAGE_RATE_MAX 650000000UL /* 650 MHz */
+#define VC7_FOD_1ST_INT_MAX 324
+#define VC7_FOD_2ND_INT_MIN 2
+#define VC7_FOD_2ND_INT_MAX 0x1ffff /* 17-bit */
+
+/* VC7 Registers */
+
+#define VC7_REG_XO_CNFG 0x2C
+#define VC7_REG_XO_CNFG_COUNT 4
+#define VC7_REG_XO_IB_H_DIV_SHIFT 24
+#define VC7_REG_XO_IB_H_DIV_MASK GENMASK(28, VC7_REG_XO_IB_H_DIV_SHIFT)
+
+#define VC7_REG_APLL_FB_DIV_FRAC 0x120
+#define VC7_REG_APLL_FB_DIV_FRAC_COUNT 4
+#define VC7_REG_APLL_FB_DIV_FRAC_MASK GENMASK(26, 0)
+
+#define VC7_REG_APLL_FB_DIV_INT 0x124
+#define VC7_REG_APLL_FB_DIV_INT_COUNT 2
+#define VC7_REG_APLL_FB_DIV_INT_MASK GENMASK(9, 0)
+
+#define VC7_REG_APLL_CNFG 0x127
+#define VC7_REG_APLL_EN_DOUBLER BIT(0)
+
+#define VC7_REG_OUT_BANK_CNFG(idx) (0x280 + (0x4 * (idx)))
+#define VC7_REG_OUTPUT_BANK_SRC_MASK GENMASK(2, 0)
+
+#define VC7_REG_FOD_INT_CNFG(idx) (0x1E0 + (0x10 * (idx)))
+#define VC7_REG_FOD_INT_CNFG_COUNT 8
+#define VC7_REG_FOD_1ST_INT_MASK GENMASK(8, 0)
+#define VC7_REG_FOD_2ND_INT_SHIFT 9
+#define VC7_REG_FOD_2ND_INT_MASK GENMASK(25, VC7_REG_FOD_2ND_INT_SHIFT)
+#define VC7_REG_FOD_FRAC_SHIFT 26
+#define VC7_REG_FOD_FRAC_MASK GENMASK_ULL(59, VC7_REG_FOD_FRAC_SHIFT)
+
+#define VC7_REG_IOD_INT_CNFG(idx) (0x1C0 + (0x8 * (idx)))
+#define VC7_REG_IOD_INT_CNFG_COUNT 4
+#define VC7_REG_IOD_INT_MASK GENMASK(24, 0)
+
+#define VC7_REG_ODRV_EN(idx) (0x240 + (0x4 * (idx)))
+#define VC7_REG_OUT_DIS BIT(0)
+
+struct vc7_driver_data;
+static const struct regmap_config vc7_regmap_config;
+
+/* Supported Renesas VC7 models */
+enum vc7_model {
+ VC7_RC21008A,
+};
+
+struct vc7_chip_info {
+ const enum vc7_model model;
+ const unsigned int banks[VC7_NUM_BANKS];
+ const unsigned int num_banks;
+ const unsigned int outputs[VC7_NUM_OUT];
+ const unsigned int num_outputs;
+};
+
+/*
+ * Changing the APLL frequency is currently not supported.
+ * The APLL will consist of an opaque block between the XO and FOD/IODs and
+ * its frequency will be computed based on the current state of the device.
+ */
+struct vc7_apll_data {
+ struct clk *clk;
+ struct vc7_driver_data *vc7;
+ u8 xo_ib_h_div;
+ u8 en_doubler;
+ u16 apll_fb_div_int;
+ u32 apll_fb_div_frac;
+};
+
+struct vc7_fod_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ u32 fod_1st_int;
+ u32 fod_2nd_int;
+ u64 fod_frac;
+};
+
+struct vc7_iod_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ u32 iod_int;
+};
+
+struct vc7_out_data {
+ struct clk_hw hw;
+ struct vc7_driver_data *vc7;
+ unsigned int num;
+ unsigned int out_dis;
+};
+
+struct vc7_driver_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ const struct vc7_chip_info *chip_info;
+
+ struct clk *pin_xin;
+ struct vc7_apll_data clk_apll;
+ struct vc7_fod_data clk_fod[VC7_NUM_FOD];
+ struct vc7_iod_data clk_iod[VC7_NUM_IOD];
+ struct vc7_out_data clk_out[VC7_NUM_OUT];
+};
+
+struct vc7_bank_src_map {
+ enum vc7_bank_src_type {
+ VC7_FOD,
+ VC7_IOD,
+ } type;
+ union _divider {
+ struct vc7_iod_data *iod;
+ struct vc7_fod_data *fod;
+ } src;
+};
+
+static struct clk_hw *vc7_of_clk_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct vc7_driver_data *vc7 = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= vc7->chip_info->num_outputs)
+ return ERR_PTR(-EINVAL);
+
+ return &vc7->clk_out[idx].hw;
+}
+
+static const unsigned int RC21008A_index_to_output_mapping[] = {
+ 1, 2, 3, 6, 7, 8, 10, 11
+};
+
+static int vc7_map_index_to_output(const enum vc7_model model, const unsigned int i)
+{
+ switch (model) {
+ case VC7_RC21008A:
+ return RC21008A_index_to_output_mapping[i];
+ default:
+ return i;
+ }
+}
+
+/* bank to output mapping, same across all variants */
+static const unsigned int output_bank_mapping[] = {
+ 0, /* Output 0 */
+ 1, /* Output 1 */
+ 2, /* Output 2 */
+ 2, /* Output 3 */
+ 3, /* Output 4 */
+ 3, /* Output 5 */
+ 3, /* Output 6 */
+ 3, /* Output 7 */
+ 4, /* Output 8 */
+ 4, /* Output 9 */
+ 5, /* Output 10 */
+ 6 /* Output 11 */
+};
+
+/**
+ * vc7_64_mul_64_to_128() - Multiply two u64 and return an unsigned 128-bit integer
+ * as an upper and lower part.
+ *
+ * @left: The left argument.
+ * @right: The right argument.
+ * @hi: The upper 64-bits of the 128-bit product.
+ * @lo: The lower 64-bits of the 128-bit product.
+ *
+ * From mul_64_64 in crypto/ecc.c:350 in the linux kernel, accessed in v5.17.2.
+ */
+static void vc7_64_mul_64_to_128(u64 left, u64 right, u64 *hi, u64 *lo)
+{
+ u64 a0 = left & 0xffffffffull;
+ u64 a1 = left >> 32;
+ u64 b0 = right & 0xffffffffull;
+ u64 b1 = right >> 32;
+ u64 m0 = a0 * b0;
+ u64 m1 = a0 * b1;
+ u64 m2 = a1 * b0;
+ u64 m3 = a1 * b1;
+
+ m2 += (m0 >> 32);
+ m2 += m1;
+
+ /* Overflow */
+ if (m2 < m1)
+ m3 += 0x100000000ull;
+
+ *lo = (m0 & 0xffffffffull) | (m2 << 32);
+ *hi = m3 + (m2 >> 32);
+}
+
+/**
+ * vc7_128_div_64_to_64() - Divides a 128-bit uint by a 64-bit divisor, return a 64-bit quotient.
+ *
+ * @numhi: The uppper 64-bits of the dividend.
+ * @numlo: The lower 64-bits of the dividend.
+ * @den: The denominator (divisor).
+ * @r: The remainder, pass NULL if the remainder is not needed.
+ *
+ * Originally from libdivide, modified to use kernel u64/u32 types.
+ *
+ * See https://github.com/ridiculousfish/libdivide/blob/master/libdivide.h#L471.
+ *
+ * Return: The 64-bit quotient of the division.
+ *
+ * In case of overflow of division by zero, max(u64) is returned.
+ */
+static u64 vc7_128_div_64_to_64(u64 numhi, u64 numlo, u64 den, u64 *r)
+{
+ /*
+ * We work in base 2**32.
+ * A uint32 holds a single digit. A uint64 holds two digits.
+ * Our numerator is conceptually [num3, num2, num1, num0].
+ * Our denominator is [den1, den0].
+ */
+ const u64 b = ((u64)1 << 32);
+
+ /* The high and low digits of our computed quotient. */
+ u32 q1, q0;
+
+ /* The normalization shift factor */
+ int shift;
+
+ /*
+ * The high and low digits of our denominator (after normalizing).
+ * Also the low 2 digits of our numerator (after normalizing).
+ */
+ u32 den1, den0, num1, num0;
+
+ /* A partial remainder; */
+ u64 rem;
+
+ /*
+ * The estimated quotient, and its corresponding remainder (unrelated
+ * to true remainder).
+ */
+ u64 qhat, rhat;
+
+ /* Variables used to correct the estimated quotient. */
+ u64 c1, c2;
+
+ /* Check for overflow and divide by 0. */
+ if (numhi >= den) {
+ if (r)
+ *r = ~0ull;
+ return ~0ull;
+ }
+
+ /*
+ * Determine the normalization factor. We multiply den by this, so that
+ * its leading digit is at least half b. In binary this means just
+ * shifting left by the number of leading zeros, so that there's a 1 in
+ * the MSB.
+ *
+ * We also shift numer by the same amount. This cannot overflow because
+ * numhi < den. The expression (-shift & 63) is the same as (64 -
+ * shift), except it avoids the UB of shifting by 64. The funny bitwise
+ * 'and' ensures that numlo does not get shifted into numhi if shift is
+ * 0. clang 11 has an x86 codegen bug here: see LLVM bug 50118. The
+ * sequence below avoids it.
+ */
+ shift = __builtin_clzll(den);
+ den <<= shift;
+ numhi <<= shift;
+ numhi |= (numlo >> (-shift & 63)) & (-(s64)shift >> 63);
+ numlo <<= shift;
+
+ /*
+ * Extract the low digits of the numerator and both digits of the
+ * denominator.
+ */
+ num1 = (u32)(numlo >> 32);
+ num0 = (u32)(numlo & 0xFFFFFFFFu);
+ den1 = (u32)(den >> 32);
+ den0 = (u32)(den & 0xFFFFFFFFu);
+
+ /*
+ * We wish to compute q1 = [n3 n2 n1] / [d1 d0].
+ * Estimate q1 as [n3 n2] / [d1], and then correct it.
+ * Note while qhat may be 2 digits, q1 is always 1 digit.
+ */
+ qhat = div64_u64_rem(numhi, den1, &rhat);
+ c1 = qhat * den0;
+ c2 = rhat * b + num1;
+ if (c1 > c2)
+ qhat -= (c1 - c2 > den) ? 2 : 1;
+ q1 = (u32)qhat;
+
+ /* Compute the true (partial) remainder. */
+ rem = numhi * b + num1 - q1 * den;
+
+ /*
+ * We wish to compute q0 = [rem1 rem0 n0] / [d1 d0].
+ * Estimate q0 as [rem1 rem0] / [d1] and correct it.
+ */
+ qhat = div64_u64_rem(rem, den1, &rhat);
+ c1 = qhat * den0;
+ c2 = rhat * b + num0;
+ if (c1 > c2)
+ qhat -= (c1 - c2 > den) ? 2 : 1;
+ q0 = (u32)qhat;
+
+ /* Return remainder if requested. */
+ if (r)
+ *r = (rem * b + num0 - q0 * den) >> shift;
+ return ((u64)q1 << 32) | q0;
+}
+
+static int vc7_get_bank_clk(struct vc7_driver_data *vc7,
+ unsigned int bank_idx,
+ unsigned int output_bank_src,
+ struct vc7_bank_src_map *map)
+{
+ /* Mapping from Table 38 in datasheet */
+ if (bank_idx == 0 || bank_idx == 1) {
+ switch (output_bank_src) {
+ case 0:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[0];
+ return 0;
+ case 1:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[1];
+ return 0;
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 2) {
+ switch (output_bank_src) {
+ case 1:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[1];
+ return 0;
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 3) {
+ switch (output_bank_src) {
+ case 4:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[0];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ default:
+ break;
+ }
+ } else if (bank_idx == 4) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ } else if (bank_idx == 5) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 1:
+ /* XIN_REFIN not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 3:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[3];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ } else if (bank_idx == 6) {
+ switch (output_bank_src) {
+ case 0:
+ /* CLKIN1 not supported in this driver */
+ break;
+ case 2:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[2];
+ return 0;
+ case 3:
+ map->type = VC7_IOD,
+ map->src.iod = &vc7->clk_iod[3];
+ return 0;
+ case 5:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[1];
+ return 0;
+ case 6:
+ map->type = VC7_FOD,
+ map->src.fod = &vc7->clk_fod[2];
+ return 0;
+ case 7:
+ /* CLKIN0 not supported in this driver */
+ break;
+ default:
+ break;
+ }
+ }
+
+ pr_warn("bank_src%d = %d is not supported\n", bank_idx, output_bank_src);
+ return -1;
+}
+
+static int vc7_read_apll(struct vc7_driver_data *vc7)
+{
+ int err;
+ u32 val32;
+ u16 val16;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_XO_CNFG,
+ (u32 *)&val32,
+ VC7_REG_XO_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read XO_CNFG\n");
+ return err;
+ }
+
+ vc7->clk_apll.xo_ib_h_div = (val32 & VC7_REG_XO_IB_H_DIV_MASK)
+ >> VC7_REG_XO_IB_H_DIV_SHIFT;
+
+ err = regmap_read(vc7->regmap,
+ VC7_REG_APLL_CNFG,
+ &val32);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_CNFG\n");
+ return err;
+ }
+
+ vc7->clk_apll.en_doubler = val32 & VC7_REG_APLL_EN_DOUBLER;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_APLL_FB_DIV_FRAC,
+ (u32 *)&val32,
+ VC7_REG_APLL_FB_DIV_FRAC_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_FRAC\n");
+ return err;
+ }
+
+ vc7->clk_apll.apll_fb_div_frac = val32 & VC7_REG_APLL_FB_DIV_FRAC_MASK;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_APLL_FB_DIV_INT,
+ (u16 *)&val16,
+ VC7_REG_APLL_FB_DIV_INT_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read APLL_FB_DIV_INT\n");
+ return err;
+ }
+
+ vc7->clk_apll.apll_fb_div_int = val16 & VC7_REG_APLL_FB_DIV_INT_MASK;
+
+ return 0;
+}
+
+static int vc7_read_fod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u64 val;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ VC7_REG_FOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
+ return err;
+ }
+
+ vc7->clk_fod[idx].fod_1st_int = (val & VC7_REG_FOD_1ST_INT_MASK);
+ vc7->clk_fod[idx].fod_2nd_int =
+ (val & VC7_REG_FOD_2ND_INT_MASK) >> VC7_REG_FOD_2ND_INT_SHIFT;
+ vc7->clk_fod[idx].fod_frac = (val & VC7_REG_FOD_FRAC_MASK)
+ >> VC7_REG_FOD_FRAC_SHIFT;
+
+ return 0;
+}
+
+static int vc7_write_fod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u64 val;
+
+ /*
+ * FOD dividers are part of an atomic group where fod_1st_int,
+ * fod_2nd_int, and fod_frac must be written together. The new divider
+ * is applied when the MSB of fod_frac is written.
+ */
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ VC7_REG_FOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read FOD%d\n", idx);
+ return err;
+ }
+
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_1st_int,
+ VC7_REG_FOD_1ST_INT_MASK);
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_2nd_int,
+ VC7_REG_FOD_2ND_INT_MASK);
+ val = u64_replace_bits(val,
+ vc7->clk_fod[idx].fod_frac,
+ VC7_REG_FOD_FRAC_MASK);
+
+ err = regmap_bulk_write(vc7->regmap,
+ VC7_REG_FOD_INT_CNFG(idx),
+ (u64 *)&val,
+ sizeof(u64));
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write FOD%d\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vc7_read_iod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u32 val;
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ VC7_REG_IOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
+ return err;
+ }
+
+ vc7->clk_iod[idx].iod_int = (val & VC7_REG_IOD_INT_MASK);
+
+ return 0;
+}
+
+static int vc7_write_iod(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ u32 val;
+
+ /*
+ * IOD divider field is atomic and all bits must be written.
+ * The new divider is applied when the MSB of iod_int is written.
+ */
+
+ err = regmap_bulk_read(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ VC7_REG_IOD_INT_CNFG_COUNT);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read IOD%d\n", idx);
+ return err;
+ }
+
+ val = u32_replace_bits(val,
+ vc7->clk_iod[idx].iod_int,
+ VC7_REG_IOD_INT_MASK);
+
+ err = regmap_bulk_write(vc7->regmap,
+ VC7_REG_IOD_INT_CNFG(idx),
+ (u32 *)&val,
+ sizeof(u32));
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write IOD%d\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vc7_read_output(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ unsigned int val, out_num;
+
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
+ err = regmap_read(vc7->regmap,
+ VC7_REG_ODRV_EN(out_num),
+ &val);
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to read ODRV_EN[%d]\n", idx);
+ return err;
+ }
+
+ vc7->clk_out[idx].out_dis = val & VC7_REG_OUT_DIS;
+
+ return 0;
+}
+
+static int vc7_write_output(struct vc7_driver_data *vc7, unsigned int idx)
+{
+ int err;
+ unsigned int out_num;
+
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, idx);
+ err = regmap_write_bits(vc7->regmap,
+ VC7_REG_ODRV_EN(out_num),
+ VC7_REG_OUT_DIS,
+ vc7->clk_out[idx].out_dis);
+
+ if (err) {
+ dev_err(&vc7->client->dev, "failed to write ODRV_EN[%d]\n", idx);
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned long vc7_get_apll_rate(struct vc7_driver_data *vc7)
+{
+ int err;
+ unsigned long xtal_rate;
+ u64 refin_div, apll_rate;
+
+ xtal_rate = clk_get_rate(vc7->pin_xin);
+ err = vc7_read_apll(vc7);
+ if (err) {
+ dev_err(&vc7->client->dev, "unable to read apll\n");
+ return err;
+ }
+
+ /* 0 is bypassed, 1 is reserved */
+ if (vc7->clk_apll.xo_ib_h_div < 2)
+ refin_div = xtal_rate;
+ else
+ refin_div = div64_u64(xtal_rate, vc7->clk_apll.xo_ib_h_div);
+
+ if (vc7->clk_apll.en_doubler)
+ refin_div *= 2;
+
+ /* divider = int + (frac / 2^27) */
+ apll_rate = (refin_div * vc7->clk_apll.apll_fb_div_int) +
+ ((refin_div * vc7->clk_apll.apll_fb_div_frac) >> VC7_APLL_DENOMINATOR_BITS);
+
+ pr_debug("%s - xo_ib_h_div: %u, apll_fb_div_int: %u, apll_fb_div_frac: %u\n",
+ __func__, vc7->clk_apll.xo_ib_h_div, vc7->clk_apll.apll_fb_div_int,
+ vc7->clk_apll.apll_fb_div_frac);
+ pr_debug("%s - refin_div: %llu, apll rate: %llu\n",
+ __func__, refin_div, apll_rate);
+
+ return apll_rate;
+}
+
+static void vc7_calc_iod_divider(unsigned long rate, unsigned long parent_rate,
+ u32 *divider)
+{
+ *divider = DIV_ROUND_UP(parent_rate, rate);
+ if (*divider < VC7_IOD_MIN_DIVISOR)
+ *divider = VC7_IOD_MIN_DIVISOR;
+ if (*divider > VC7_IOD_MAX_DIVISOR)
+ *divider = VC7_IOD_MAX_DIVISOR;
+}
+
+static void vc7_calc_fod_1st_stage(unsigned long rate, unsigned long parent_rate,
+ u32 *div_int, u64 *div_frac)
+{
+ u64 rem;
+
+ *div_int = (u32)div64_u64_rem(parent_rate, rate, &rem);
+ *div_frac = div64_u64(rem << VC7_FOD_DENOMINATOR_BITS, rate);
+}
+
+static unsigned long vc7_calc_fod_1st_stage_rate(unsigned long parent_rate,
+ u32 fod_1st_int, u64 fod_frac)
+{
+ u64 numer, denom, hi, lo, divisor;
+
+ numer = fod_frac;
+ denom = BIT_ULL(VC7_FOD_DENOMINATOR_BITS);
+
+ if (fod_frac) {
+ vc7_64_mul_64_to_128(parent_rate, denom, &hi, &lo);
+ divisor = ((u64)fod_1st_int * denom) + numer;
+ return vc7_128_div_64_to_64(hi, lo, divisor, NULL);
+ }
+
+ return div64_u64(parent_rate, fod_1st_int);
+}
+
+static unsigned long vc7_calc_fod_2nd_stage_rate(unsigned long parent_rate,
+ u32 fod_1st_int, u32 fod_2nd_int, u64 fod_frac)
+{
+ unsigned long fod_1st_stage_rate;
+
+ fod_1st_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, fod_1st_int, fod_frac);
+
+ if (fod_2nd_int < 2)
+ return fod_1st_stage_rate;
+
+ /*
+ * There is a div-by-2 preceding the 2nd stage integer divider
+ * (not shown on block diagram) so the actual 2nd stage integer
+ * divisor is 2 * N.
+ */
+ return div64_u64(fod_1st_stage_rate >> 1, fod_2nd_int);
+}
+
+static void vc7_calc_fod_divider(unsigned long rate, unsigned long parent_rate,
+ u32 *fod_1st_int, u32 *fod_2nd_int, u64 *fod_frac)
+{
+ unsigned int allow_frac, i, best_frac_i;
+ unsigned long first_stage_rate;
+
+ vc7_calc_fod_1st_stage(rate, parent_rate, fod_1st_int, fod_frac);
+ first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate, *fod_1st_int, *fod_frac);
+
+ *fod_2nd_int = 0;
+
+ /* Do we need the second stage integer divider? */
+ if (first_stage_rate < VC7_FOD_1ST_STAGE_RATE_MIN) {
+ allow_frac = 0;
+ best_frac_i = VC7_FOD_2ND_INT_MIN;
+
+ for (i = VC7_FOD_2ND_INT_MIN; i <= VC7_FOD_2ND_INT_MAX; i++) {
+ /*
+ * 1) There is a div-by-2 preceding the 2nd stage integer divider
+ * (not shown on block diagram) so the actual 2nd stage integer
+ * divisor is 2 * N.
+ * 2) Attempt to find an integer solution first. This means stepping
+ * through each 2nd stage integer and recalculating the 1st stage
+ * until the 1st stage frequency is out of bounds. If no integer
+ * solution is found, use the best fractional solution.
+ */
+ vc7_calc_fod_1st_stage(parent_rate, rate * 2 * i, fod_1st_int, fod_frac);
+ first_stage_rate = vc7_calc_fod_1st_stage_rate(parent_rate,
+ *fod_1st_int,
+ *fod_frac);
+
+ /* Remember the first viable fractional solution */
+ if (best_frac_i == VC7_FOD_2ND_INT_MIN &&
+ first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MIN) {
+ best_frac_i = i;
+ }
+
+ /* Is the divider viable? Prefer integer solutions over fractional. */
+ if (*fod_1st_int < VC7_FOD_1ST_INT_MAX &&
+ first_stage_rate >= VC7_FOD_1ST_STAGE_RATE_MIN &&
+ (allow_frac || *fod_frac == 0)) {
+ *fod_2nd_int = i;
+ break;
+ }
+
+ /* Ran out of divisors or the 1st stage frequency is out of range */
+ if (i >= VC7_FOD_2ND_INT_MAX ||
+ first_stage_rate > VC7_FOD_1ST_STAGE_RATE_MAX) {
+ allow_frac = 1;
+ i = best_frac_i;
+
+ /* Restore the best frac and rerun the loop for the last time */
+ if (best_frac_i != VC7_FOD_2ND_INT_MIN)
+ i--;
+
+ continue;
+ }
+ }
+ }
+}
+
+static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ struct vc7_driver_data *vc7 = fod->vc7;
+ int err;
+ unsigned long fod_rate;
+
+ err = vc7_read_fod(vc7, fod->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ pr_debug("%s - %s: parent_rate: %lu\n", __func__, clk_hw_get_name(hw), parent_rate);
+
+ fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return fod_rate;
+}
+
+static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ unsigned long fod_rate;
+
+ pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, *parent_rate);
+
+ vc7_calc_fod_divider(rate, *parent_rate,
+ &fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
+ fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return fod_rate;
+}
+
+static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
+{
+ struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
+ struct vc7_driver_data *vc7 = fod->vc7;
+ unsigned long fod_rate;
+
+ pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, parent_rate);
+
+ if (rate < VC7_FOD_RATE_MIN || rate > VC7_FOD_RATE_MAX) {
+ dev_err(&vc7->client->dev,
+ "requested frequency %lu Hz for %s is out of range\n",
+ rate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ vc7_write_fod(vc7, fod->num);
+
+ fod_rate = vc7_calc_fod_2nd_stage_rate(parent_rate, fod->fod_1st_int,
+ fod->fod_2nd_int, fod->fod_frac);
+
+ pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
+ __func__, clk_hw_get_name(hw),
+ fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
+
+ return 0;
+}
+
+static const struct clk_ops vc7_fod_ops = {
+ .recalc_rate = vc7_fod_recalc_rate,
+ .round_rate = vc7_fod_round_rate,
+ .set_rate = vc7_fod_set_rate,
+};
+
+static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ struct vc7_driver_data *vc7 = iod->vc7;
+ int err;
+ unsigned long iod_rate;
+
+ err = vc7_read_iod(vc7, iod->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ iod_rate = div64_u64(parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return iod_rate;
+}
+
+static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ unsigned long iod_rate;
+
+ pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, *parent_rate);
+
+ vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(*parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return iod_rate;
+}
+
+static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
+{
+ struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
+ struct vc7_driver_data *vc7 = iod->vc7;
+ unsigned long iod_rate;
+
+ pr_debug("%s - %s: rate: %lu, parent_rate: %lu\n",
+ __func__, clk_hw_get_name(hw), rate, parent_rate);
+
+ if (rate < VC7_IOD_RATE_MIN || rate > VC7_IOD_RATE_MAX) {
+ dev_err(&vc7->client->dev,
+ "requested frequency %lu Hz for %s is out of range\n",
+ rate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ vc7_write_iod(vc7, iod->num);
+
+ iod_rate = div64_u64(parent_rate, iod->iod_int);
+
+ pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
+ pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
+
+ return 0;
+}
+
+static const struct clk_ops vc7_iod_ops = {
+ .recalc_rate = vc7_iod_recalc_rate,
+ .round_rate = vc7_iod_round_rate,
+ .set_rate = vc7_iod_set_rate,
+};
+
+static int vc7_clk_out_prepare(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err;
+
+ out->out_dis = 0;
+
+ err = vc7_write_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error writing registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ pr_debug("%s - %s: clk prepared\n", __func__, clk_hw_get_name(hw));
+
+ return 0;
+}
+
+static void vc7_clk_out_unprepare(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err;
+
+ out->out_dis = 1;
+
+ err = vc7_write_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error writing registers for %s\n",
+ clk_hw_get_name(hw));
+ return;
+ }
+
+ pr_debug("%s - %s: clk unprepared\n", __func__, clk_hw_get_name(hw));
+}
+
+static int vc7_clk_out_is_enabled(struct clk_hw *hw)
+{
+ struct vc7_out_data *out = container_of(hw, struct vc7_out_data, hw);
+ struct vc7_driver_data *vc7 = out->vc7;
+ int err, is_enabled;
+
+ err = vc7_read_output(vc7, out->num);
+ if (err) {
+ dev_err(&vc7->client->dev, "error reading registers for %s\n",
+ clk_hw_get_name(hw));
+ return err;
+ }
+
+ is_enabled = !out->out_dis;
+
+ pr_debug("%s - %s: is_enabled=%d\n", __func__, clk_hw_get_name(hw), is_enabled);
+
+ return is_enabled;
+}
+
+static const struct clk_ops vc7_clk_out_ops = {
+ .prepare = vc7_clk_out_prepare,
+ .unprepare = vc7_clk_out_unprepare,
+ .is_enabled = vc7_clk_out_is_enabled,
+};
+
+static int vc7_probe(struct i2c_client *client)
+{
+ struct vc7_driver_data *vc7;
+ struct clk_init_data clk_init;
+ struct vc7_bank_src_map bank_src_map;
+ const char *node_name, *apll_name;
+ const char *parent_names[1];
+ unsigned int i, val, bank_idx, out_num;
+ unsigned long apll_rate;
+ int ret;
+
+ vc7 = devm_kzalloc(&client->dev, sizeof(*vc7), GFP_KERNEL);
+ if (!vc7)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, vc7);
+ vc7->client = client;
+ vc7->chip_info = of_device_get_match_data(&client->dev);
+
+ vc7->pin_xin = devm_clk_get(&client->dev, "xin");
+ if (PTR_ERR(vc7->pin_xin) == -EPROBE_DEFER) {
+ return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ "xin not specified\n");
+ }
+
+ vc7->regmap = devm_regmap_init_i2c(client, &vc7_regmap_config);
+ if (IS_ERR(vc7->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(vc7->regmap),
+ "failed to allocate register map\n");
+ }
+
+ if (of_property_read_string(client->dev.of_node, "clock-output-names",
+ &node_name))
+ node_name = client->dev.of_node->name;
+
+ /* Register APLL */
+ apll_rate = vc7_get_apll_rate(vc7);
+ apll_name = kasprintf(GFP_KERNEL, "%s_apll", node_name);
+ vc7->clk_apll.clk = clk_register_fixed_rate(&client->dev, apll_name,
+ __clk_get_name(vc7->pin_xin),
+ 0, apll_rate);
+ kfree(apll_name); /* ccf made a copy of the name */
+ if (IS_ERR(vc7->clk_apll.clk)) {
+ return dev_err_probe(&client->dev, PTR_ERR(vc7->clk_apll.clk),
+ "failed to register apll\n");
+ }
+
+ /* Register FODs */
+ for (i = 0; i < VC7_NUM_FOD; i++) {
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_fod%d", node_name, i);
+ clk_init.ops = &vc7_fod_ops;
+ clk_init.parent_names = parent_names;
+ parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
+ clk_init.num_parents = 1;
+ vc7->clk_fod[i].num = i;
+ vc7->clk_fod[i].vc7 = vc7;
+ vc7->clk_fod[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_fod[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ /* Register IODs */
+ for (i = 0; i < VC7_NUM_IOD; i++) {
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_iod%d", node_name, i);
+ clk_init.ops = &vc7_iod_ops;
+ clk_init.parent_names = parent_names;
+ parent_names[0] = __clk_get_name(vc7->clk_apll.clk);
+ clk_init.num_parents = 1;
+ vc7->clk_iod[i].num = i;
+ vc7->clk_iod[i].vc7 = vc7;
+ vc7->clk_iod[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_iod[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ /* Register outputs */
+ for (i = 0; i < vc7->chip_info->num_outputs; i++) {
+ out_num = vc7_map_index_to_output(vc7->chip_info->model, i);
+
+ /*
+ * This driver does not support remapping FOD/IOD to banks.
+ * The device state is read and the driver is setup to match
+ * the device's existing mapping.
+ */
+ bank_idx = output_bank_mapping[out_num];
+
+ regmap_read(vc7->regmap, VC7_REG_OUT_BANK_CNFG(bank_idx), &val);
+ val &= VC7_REG_OUTPUT_BANK_SRC_MASK;
+
+ memset(&bank_src_map, 0, sizeof(bank_src_map));
+ ret = vc7_get_bank_clk(vc7, bank_idx, val, &bank_src_map);
+ if (ret) {
+ dev_err_probe(&client->dev, ret,
+ "unable to register output %d\n", i);
+ return ret;
+ }
+
+ switch (bank_src_map.type) {
+ case VC7_FOD:
+ parent_names[0] = clk_hw_get_name(&bank_src_map.src.fod->hw);
+ break;
+ case VC7_IOD:
+ parent_names[0] = clk_hw_get_name(&bank_src_map.src.iod->hw);
+ break;
+ }
+
+ memset(&clk_init, 0, sizeof(clk_init));
+ clk_init.name = kasprintf(GFP_KERNEL, "%s_out%d", node_name, i);
+ clk_init.ops = &vc7_clk_out_ops;
+ clk_init.flags = CLK_SET_RATE_PARENT;
+ clk_init.parent_names = parent_names;
+ clk_init.num_parents = 1;
+ vc7->clk_out[i].num = i;
+ vc7->clk_out[i].vc7 = vc7;
+ vc7->clk_out[i].hw.init = &clk_init;
+ ret = devm_clk_hw_register(&client->dev, &vc7->clk_out[i].hw);
+ if (ret)
+ goto err_clk_register;
+ kfree(clk_init.name); /* ccf made a copy of the name */
+ }
+
+ ret = of_clk_add_hw_provider(client->dev.of_node, vc7_of_clk_get, vc7);
+ if (ret) {
+ dev_err_probe(&client->dev, ret, "unable to add clk provider\n");
+ goto err_clk;
+ }
+
+ return ret;
+
+err_clk_register:
+ dev_err_probe(&client->dev, ret,
+ "unable to register %s\n", clk_init.name);
+ kfree(clk_init.name); /* ccf made a copy of the name */
+err_clk:
+ clk_unregister_fixed_rate(vc7->clk_apll.clk);
+ return ret;
+}
+
+static void vc7_remove(struct i2c_client *client)
+{
+ struct vc7_driver_data *vc7 = i2c_get_clientdata(client);
+
+ of_clk_del_provider(client->dev.of_node);
+ clk_unregister_fixed_rate(vc7->clk_apll.clk);
+}
+
+static bool vc7_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == VC7_PAGE_ADDR)
+ return false;
+
+ return true;
+}
+
+static const struct vc7_chip_info vc7_rc21008a_info = {
+ .model = VC7_RC21008A,
+ .num_banks = 6,
+ .num_outputs = 8,
+};
+
+static struct regmap_range_cfg vc7_range_cfg[] = {
+{
+ .range_min = 0,
+ .range_max = VC7_MAX_REG,
+ .selector_reg = VC7_PAGE_ADDR,
+ .selector_mask = 0xFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = VC7_PAGE_WINDOW,
+}};
+
+static const struct regmap_config vc7_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VC7_MAX_REG,
+ .ranges = vc7_range_cfg,
+ .num_ranges = ARRAY_SIZE(vc7_range_cfg),
+ .volatile_reg = vc7_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct i2c_device_id vc7_i2c_id[] = {
+ { "rc21008a", VC7_RC21008A },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, vc7_i2c_id);
+
+static const struct of_device_id vc7_of_match[] = {
+ { .compatible = "renesas,rc21008a", .data = &vc7_rc21008a_info },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vc7_of_match);
+
+static struct i2c_driver vc7_i2c_driver = {
+ .driver = {
+ .name = "vc7",
+ .of_match_table = vc7_of_match,
+ },
+ .probe_new = vc7_probe,
+ .remove = vc7_remove,
+ .id_table = vc7_i2c_id,
+};
+module_i2c_driver(vc7_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alex Helms <alexander.helms.jy@renesas.com");
+MODULE_DESCRIPTION("Renesas Versaclock7 common clock framework driver");
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 857217cbcef8..0c3d0cee98c8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -522,10 +522,10 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
data & pclk->param.reg_clk_mask ? "enabled" :
"disabled");
+ } else {
+ return 1;
}
- if (!pclk->param.csr_reg)
- return 1;
return data & pclk->param.reg_clk_mask ? 1 : 0;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7fc191c15507..dd810bcd2700 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
@@ -2189,7 +2188,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate;
- int ret = 0;
+ int ret;
if (!core)
return 0;
@@ -3462,7 +3461,7 @@ static void clk_core_reparent_orphans_nolock(void)
/*
* We need to use __clk_set_parent_before() and _after() to
- * to properly migrate any prepare/enable count of the orphan
+ * properly migrate any prepare/enable count of the orphan
* clock. This is important for CLK_IS_CRITICAL clocks, which
* are enabled during init but might not have a parent yet.
*/
@@ -3672,7 +3671,6 @@ static int __clk_core_init(struct clk_core *core)
clk_core_reparent_orphans_nolock();
-
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
@@ -4751,32 +4749,6 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
-static int devm_clk_provider_match(struct device *dev, void *res, void *data)
-{
- struct device_node **np = res;
-
- if (WARN_ON(!np || !*np))
- return 0;
-
- return *np == data;
-}
-
-/**
- * devm_of_clk_del_provider() - Remove clock provider registered using devm
- * @dev: Device to whose lifetime the clock provider was bound
- */
-void devm_of_clk_del_provider(struct device *dev)
-{
- int ret;
- struct device_node *np = get_clk_provider_node(dev);
-
- ret = devres_release(dev, devm_of_clk_release_provider,
- devm_clk_provider_match, np);
-
- WARN_ON(ret);
-}
-EXPORT_SYMBOL(devm_of_clk_del_provider);
-
/**
* of_parse_clkspec() - Parse a DT clock specifier for a given device node
* @np: device node to parse clock specifier from
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 67f601a41023..ee37d0be6877 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -165,7 +165,7 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
cla->cl.clk_hw = hw;
if (con_id) {
- strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
+ strscpy(cla->con_id, con_id, sizeof(cla->con_id));
cla->cl.con_id = cla->con_id;
}
@@ -346,46 +346,12 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
}
EXPORT_SYMBOL(clk_hw_register_clkdev);
-static void devm_clkdev_release(struct device *dev, void *res)
+static void devm_clkdev_release(void *res)
{
- clkdev_drop(*(struct clk_lookup **)res);
-}
-
-static int devm_clk_match_clkdev(struct device *dev, void *res, void *data)
-{
- struct clk_lookup **l = res;
-
- return *l == data;
+ clkdev_drop(res);
}
/**
- * devm_clk_release_clkdev - Resource managed clkdev lookup release
- * @dev: device this lookup is bound
- * @con_id: connection ID string on device
- * @dev_id: format string describing device name
- *
- * Drop the clkdev lookup created with devm_clk_hw_register_clkdev.
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id)
-{
- struct clk_lookup *cl;
- int rval;
-
- mutex_lock(&clocks_mutex);
- cl = clk_find(dev_id, con_id);
- mutex_unlock(&clocks_mutex);
-
- WARN_ON(!cl);
- rval = devres_release(dev, devm_clkdev_release,
- devm_clk_match_clkdev, cl);
- WARN_ON(rval);
-}
-EXPORT_SYMBOL(devm_clk_release_clkdev);
-
-/**
* devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw
* @dev: device this lookup is bound
* @hw: struct clk_hw to associate with all clk_lookups
@@ -403,17 +369,13 @@ EXPORT_SYMBOL(devm_clk_release_clkdev);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id)
{
- int rval = -ENOMEM;
- struct clk_lookup **cl;
-
- cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL);
- if (cl) {
- rval = do_clk_register_clkdev(hw, cl, con_id, dev_id);
- if (!rval)
- devres_add(dev, cl);
- else
- devres_free(cl);
- }
- return rval;
+ struct clk_lookup *cl;
+ int rval;
+
+ rval = do_clk_register_clkdev(hw, &cl, con_id, dev_id);
+ if (rval)
+ return rval;
+
+ return devm_add_action_or_reset(dev, devm_clkdev_release, cl);
}
EXPORT_SYMBOL(devm_clk_hw_register_clkdev);
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index 11178b79b483..be6f55d37b49 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -8,14 +8,10 @@ obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
-obj-$(CONFIG_ARCH_DAVINCI_DM644x) += pll-dm644x.o
-obj-$(CONFIG_ARCH_DAVINCI_DM646x) += pll-dm646x.o
obj-y += psc.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
-obj-$(CONFIG_ARCH_DAVINCI_DM644x) += psc-dm644x.o
-obj-$(CONFIG_ARCH_DAVINCI_DM646x) += psc-dm646x.o
endif
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index 77d18276bfe8..4103d605e804 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -510,8 +510,7 @@ da8xx_cfgchip_register_usb0_clk48(struct device *dev,
fck_clk = devm_clk_get(dev, "fck");
if (IS_ERR(fck_clk)) {
- if (PTR_ERR(fck_clk) != -EPROBE_DEFER)
- dev_err(dev, "Missing fck clock\n");
+ dev_err_probe(dev, PTR_ERR(fck_clk), "Missing fck clock\n");
return ERR_CAST(fck_clk);
}
diff --git a/drivers/clk/davinci/pll-dm644x.c b/drivers/clk/davinci/pll-dm644x.c
deleted file mode 100644
index 7650fadfaac8..000000000000
--- a/drivers/clk/davinci/pll-dm644x.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM644X
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm644x_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 1,
- .pllm_max = 32,
- .pllout_min_rate = 400000000,
- .pllout_max_rate = 600000000, /* 810MHz @ 1.3V, -810 only */
- .flags = PLL_HAS_CLKMODE | PLL_HAS_POSTDIV,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm644x_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm644x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm644x-psc");
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm644x-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm644x_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 1,
- .pllm_max = 32,
- .pllout_min_rate = 400000000,
- .pllout_max_rate = 900000000,
- .flags = PLL_HAS_POSTDIV | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, 0);
-SYSCLK(2, pll2_sysclk2, pll2_pllen, 4, 0);
-
-int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm644x_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll-dm646x.c b/drivers/clk/davinci/pll-dm646x.c
deleted file mode 100644
index 26982970df0e..000000000000
--- a/drivers/clk/davinci/pll-dm646x.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM646X
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm646x_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 14,
- .pllm_max = 32,
- .flags = PLL_HAS_CLKMODE,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 4, SYSCLK_FIXED_DIV);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 4, 0);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 4, 0);
-SYSCLK(6, pll1_sysclk6, pll1_pllen, 4, 0);
-SYSCLK(8, pll1_sysclk8, pll1_pllen, 4, 0);
-SYSCLK(9, pll1_sysclk9, pll1_pllen, 4, 0);
-
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm646x_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm646x-psc");
- clk_register_clkdev(clk, NULL, "davinci-wdt");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm646x-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm646x-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm646x_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(4, 0),
- .pllm_min = 14,
- .pllm_max = 32,
- .flags = 0,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 4, SYSCLK_ALWAYS_ENABLED);
-
-int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm646x_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 0d750433eb42..f862f5e2b3fc 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -98,7 +98,7 @@
* @hw: clk_hw for the pll
* @base: Base memory address
* @pllm_min: The minimum allowable PLLM[PLLM] value
- * @pllm_max: The maxiumum allowable PLLM[PLLM] value
+ * @pllm_max: The maximum allowable PLLM[PLLM] value
* @pllm_mask: Bitmask for PLLM[PLLM] value
*/
struct davinci_pll_clk {
@@ -890,14 +890,6 @@ static const struct platform_device_id davinci_pll_id_table[] = {
{ .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
{ .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
- { .name = "dm644x-pll1", .driver_data = (kernel_ulong_t)dm644x_pll1_init },
- { .name = "dm644x-pll2", .driver_data = (kernel_ulong_t)dm644x_pll2_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
- { .name = "dm646x-pll1", .driver_data = (kernel_ulong_t)dm646x_pll1_init },
- { .name = "dm646x-pll2", .driver_data = (kernel_ulong_t)dm646x_pll2_init },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index c2a453caa131..1773277bc690 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -130,11 +130,5 @@ int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cf
#ifdef CONFIG_ARCH_DAVINCI_DM355
int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-dm644x.c b/drivers/clk/davinci/psc-dm644x.c
deleted file mode 100644
index 0cea6e0bd5f0..000000000000
--- a/drivers/clk/davinci/psc-dm644x.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM644x
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mmcsd_clkdev, NULL, "dm6441-mmc.0");
-LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-
-static const struct davinci_lpsc_clk_info dm644x_psc_info[] = {
- LPSC(0, 0, vpss_master, pll1_sysclk3, vpss_master_clkdev, 0),
- LPSC(1, 0, vpss_slave, pll1_sysclk3, vpss_slave_clkdev, 0),
- LPSC(6, 0, emac, pll1_sysclk5, emac_clkdev, 0),
- LPSC(9, 0, usb, pll1_sysclk5, usb_clkdev, 0),
- LPSC(10, 0, ide, pll1_sysclk5, ide_clkdev, 0),
- LPSC(11, 0, vlynq, pll1_sysclk5, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk5, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd, pll1_sysclk5, mmcsd_clkdev, 0),
- LPSC(17, 0, asp0, pll1_sysclk5, asp0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
- LPSC(21, 0, uart2, pll1_auxclk, uart2_clkdev, 0),
- LPSC(22, 0, spi, pll1_sysclk5, NULL, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk5, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(39, 1, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(40, 1, vicp, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- { }
-};
-
-int dm644x_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm644x_psc_info, 41, base);
-}
-
-static struct clk_bulk_data dm644x_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk5" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm644x_psc_init_data = {
- .parent_clks = dm644x_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm644x_psc_parent_clks),
- .psc_init = &dm644x_psc_init,
-};
diff --git a/drivers/clk/davinci/psc-dm646x.c b/drivers/clk/davinci/psc-dm646x.c
deleted file mode 100644
index 20012dc7471a..000000000000
--- a/drivers/clk/davinci/psc-dm646x.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM646x
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(ide_clkdev, NULL, "palm_bk3710");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0");
-LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-
-static const struct davinci_lpsc_clk_info dm646x_psc_info[] = {
- LPSC(0, 0, arm, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- /* REVISIT how to disable? */
- LPSC(1, 0, dsp, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(4, 0, edma_cc, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(5, 0, edma_tc0, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(6, 0, edma_tc1, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(7, 0, edma_tc2, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(8, 0, edma_tc3, pll1_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(10, 0, ide, pll1_sysclk4, ide_clkdev, 0),
- LPSC(14, 0, emac, pll1_sysclk3, emac_clkdev, 0),
- LPSC(16, 0, vpif0, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(17, 0, vpif1, ref_clk, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(21, 0, aemif, pll1_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(22, 0, mcasp0, pll1_sysclk3, mcasp0_clkdev, 0),
- LPSC(23, 0, mcasp1, pll1_sysclk3, mcasp1_clkdev, 0),
- LPSC(26, 0, uart0, aux_clkin, uart0_clkdev, 0),
- LPSC(27, 0, uart1, aux_clkin, uart1_clkdev, 0),
- LPSC(28, 0, uart2, aux_clkin, uart2_clkdev, 0),
- /* REVIST: disabling hangs system */
- LPSC(29, 0, pwm0, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
- /* REVIST: disabling hangs system */
- LPSC(30, 0, pwm1, pll1_sysclk3, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, i2c, pll1_sysclk3, i2c_clkdev, 0),
- LPSC(33, 0, gpio, pll1_sysclk3, gpio_clkdev, 0),
- LPSC(34, 0, timer0, pll1_sysclk3, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(35, 0, timer1, pll1_sysclk3, NULL, 0),
- { }
-};
-
-int dm646x_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm646x_psc_info, 46, base);
-}
-
-static struct clk_bulk_data dm646x_psc_parent_clks[] = {
- { .id = "ref_clk" },
- { .id = "aux_clkin" },
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_sysclk5" },
-};
-
-const struct davinci_psc_init_data dm646x_psc_init_data = {
- .parent_clks = dm646x_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm646x_psc_parent_clks),
- .psc_init = &dm646x_psc_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 7387e7f6276e..42a59dbd49c8 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -517,12 +517,6 @@ static const struct platform_device_id davinci_psc_id_table[] = {
#ifdef CONFIG_ARCH_DAVINCI_DM365
{ .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
- { .name = "dm644x-psc", .driver_data = (kernel_ulong_t)&dm644x_psc_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
- { .name = "dm646x-psc", .driver_data = (kernel_ulong_t)&dm646x_psc_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 69070f834391..5e382b675518 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -110,11 +110,5 @@ extern const struct davinci_psc_init_data dm355_psc_init_data;
#ifdef CONFIG_ARCH_DAVINCI_DM365
extern const struct davinci_psc_init_data dm365_psc_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-extern const struct davinci_psc_init_data dm644x_psc_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-extern const struct davinci_psc_init_data dm646x_psc_init_data;
-#endif
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 88b9b9285d22..e8aacb0ee6ac 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -12,6 +12,7 @@ mxc-clk-objs += clk-fixup-div.o
mxc-clk-objs += clk-fixup-mux.o
mxc-clk-objs += clk-frac-pll.o
mxc-clk-objs += clk-gate2.o
+mxc-clk-objs += clk-gate-93.o
mxc-clk-objs += clk-gate-exclusive.o
mxc-clk-objs += clk-pfd.o
mxc-clk-objs += clk-pfdv2.o
diff --git a/drivers/clk/imx/clk-composite-93.c b/drivers/clk/imx/clk-composite-93.c
index b44619aa5ca5..74a66b0203e4 100644
--- a/drivers/clk/imx/clk-composite-93.c
+++ b/drivers/clk/imx/clk-composite-93.c
@@ -9,22 +9,180 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include "clk.h"
+#define TIMEOUT_US 500U
+
#define CCM_DIV_SHIFT 0
#define CCM_DIV_WIDTH 8
#define CCM_MUX_SHIFT 8
#define CCM_MUX_MASK 3
#define CCM_OFF_SHIFT 24
+#define CCM_BUSY_SHIFT 28
+#define STAT_OFFSET 0x4
#define AUTHEN_OFFSET 0x30
#define TZ_NS_SHIFT 9
#define TZ_NS_MASK BIT(9)
+#define WHITE_LIST_SHIFT 16
+
+static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
+ 0, TIMEOUT_US);
+ if (ret)
+ pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 reg;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+
+ if (enable)
+ reg &= ~BIT(gate->bit_idx);
+ else
+ reg |= BIT(gate->bit_idx);
+
+ writel(reg, gate->reg);
+
+ imx93_clk_composite_wait_ready(hw, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
+{
+ imx93_clk_composite_gate_endisable(hw, 1);
+
+ return 0;
+}
+
+static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
+{
+ imx93_clk_composite_gate_endisable(hw, 0);
+}
+
+static const struct clk_ops imx93_clk_composite_gate_ops = {
+ .enable = imx93_clk_composite_gate_enable,
+ .disable = imx93_clk_composite_gate_disable,
+ .is_enabled = clk_gate_is_enabled,
+};
+
+static unsigned long
+imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
+}
+
+static long
+imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+static int
+imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ return clk_divider_ops.determine_rate(hw, req);
+}
+
+static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ int value;
+ unsigned long flags = 0;
+ u32 val;
+ int ret;
+
+ value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl(divider->reg);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+ val |= (u32)value << divider->shift;
+ writel(val, divider->reg);
+
+ ret = imx93_clk_composite_wait_ready(hw, divider->reg);
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops imx93_clk_composite_divider_ops = {
+ .recalc_rate = imx93_clk_composite_divider_recalc_rate,
+ .round_rate = imx93_clk_composite_divider_round_rate,
+ .determine_rate = imx93_clk_composite_divider_determine_rate,
+ .set_rate = imx93_clk_composite_divider_set_rate,
+};
+
+static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+ val = val << mux->shift;
+ reg |= val;
+ writel(reg, mux->reg);
+
+ ret = imx93_clk_composite_wait_ready(hw, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return ret;
+}
+
+static int
+imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ return clk_mux_ops.determine_rate(hw, req);
+}
+
+static const struct clk_ops imx93_clk_composite_mux_ops = {
+ .get_parent = imx93_clk_composite_mux_get_parent,
+ .set_parent = imx93_clk_composite_mux_set_parent,
+ .determine_rate = imx93_clk_composite_mux_determine_rate,
+};
+
struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
- int num_parents, void __iomem *reg,
+ int num_parents, void __iomem *reg, u32 domain_id,
unsigned long flags)
{
struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
@@ -33,6 +191,7 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
bool clk_ro = false;
+ u32 authen;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -55,7 +214,8 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
div->lock = &imx_ccm_lock;
div->flags = CLK_DIVIDER_ROUND_CLOSEST;
- if (!(readl(reg + AUTHEN_OFFSET) & TZ_NS_MASK))
+ authen = readl(reg + AUTHEN_OFFSET);
+ if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
clk_ro = true;
if (clk_ro) {
@@ -74,9 +234,10 @@ struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *p
gate->flags = CLK_GATE_SET_TO_DISABLE;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &clk_mux_ops, div_hw,
- &clk_divider_ops, gate_hw,
- &clk_gate_ops, flags | CLK_SET_RATE_NO_REPARENT);
+ mux_hw, &imx93_clk_composite_mux_ops, div_hw,
+ &imx93_clk_composite_divider_ops, gate_hw,
+ &imx93_clk_composite_gate_ops,
+ flags | CLK_SET_RATE_NO_REPARENT);
}
if (IS_ERR(hw))
diff --git a/drivers/clk/imx/clk-gate-93.c b/drivers/clk/imx/clk-gate-93.c
new file mode 100644
index 000000000000..ceb56b290394
--- /dev/null
+++ b/drivers/clk/imx/clk-gate-93.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 NXP
+ *
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define DIRECT_OFFSET 0x0
+
+/*
+ * 0b000 - LPCG will be OFF in any CPU mode.
+ * 0b100 - LPCG will be ON in any CPU mode.
+ */
+#define LPM_SETTING_OFF 0x0
+#define LPM_SETTING_ON 0x4
+
+#define LPM_CUR_OFFSET 0x1c
+
+#define AUTHEN_OFFSET 0x30
+#define CPULPM_EN BIT(2)
+#define TZ_NS_SHIFT 9
+#define TZ_NS_MASK BIT(9)
+
+#define WHITE_LIST_SHIFT 16
+
+struct imx93_clk_gate {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 bit_idx;
+ u32 val;
+ u32 mask;
+ spinlock_t *lock;
+ unsigned int *share_count;
+};
+
+#define to_imx93_clk_gate(_hw) container_of(_hw, struct imx93_clk_gate, hw)
+
+static void imx93_clk_gate_do_hardware(struct clk_hw *hw, bool enable)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ u32 val;
+
+ val = readl(gate->reg + AUTHEN_OFFSET);
+ if (val & CPULPM_EN) {
+ val = enable ? LPM_SETTING_ON : LPM_SETTING_OFF;
+ writel(val, gate->reg + LPM_CUR_OFFSET);
+ } else {
+ val = readl(gate->reg + DIRECT_OFFSET);
+ val &= ~(gate->mask << gate->bit_idx);
+ if (enable)
+ val |= (gate->val & gate->mask) << gate->bit_idx;
+ writel(val, gate->reg + DIRECT_OFFSET);
+ }
+}
+
+static int imx93_clk_gate_enable(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (gate->share_count && (*gate->share_count)++ > 0)
+ goto out;
+
+ imx93_clk_gate_do_hardware(hw, true);
+out:
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void imx93_clk_gate_disable(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (gate->share_count) {
+ if (WARN_ON(*gate->share_count == 0))
+ goto out;
+ else if (--(*gate->share_count) > 0)
+ goto out;
+ }
+
+ imx93_clk_gate_do_hardware(hw, false);
+out:
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int imx93_clk_gate_reg_is_enabled(struct imx93_clk_gate *gate)
+{
+ u32 val = readl(gate->reg + AUTHEN_OFFSET);
+
+ if (val & CPULPM_EN) {
+ val = readl(gate->reg + LPM_CUR_OFFSET);
+ if (val == LPM_SETTING_ON)
+ return 1;
+ } else {
+ val = readl(gate->reg);
+ if (((val >> gate->bit_idx) & gate->mask) == gate->val)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int imx93_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ ret = imx93_clk_gate_reg_is_enabled(gate);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return ret;
+}
+
+static void imx93_clk_gate_disable_unused(struct clk_hw *hw)
+{
+ struct imx93_clk_gate *gate = to_imx93_clk_gate(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (!gate->share_count || *gate->share_count == 0)
+ imx93_clk_gate_do_hardware(hw, false);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static const struct clk_ops imx93_clk_gate_ops = {
+ .enable = imx93_clk_gate_enable,
+ .disable = imx93_clk_gate_disable,
+ .disable_unused = imx93_clk_gate_disable_unused,
+ .is_enabled = imx93_clk_gate_is_enabled,
+};
+
+static const struct clk_ops imx93_clk_gate_ro_ops = {
+ .is_enabled = imx93_clk_gate_is_enabled,
+};
+
+struct clk_hw *imx93_clk_gate(struct device *dev, const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u32 bit_idx, u32 val,
+ u32 mask, u32 domain_id, unsigned int *share_count)
+{
+ struct imx93_clk_gate *gate;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+ u32 authen;
+
+ gate = kzalloc(sizeof(struct imx93_clk_gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->reg = reg;
+ gate->lock = &imx_ccm_lock;
+ gate->bit_idx = bit_idx;
+ gate->val = val;
+ gate->mask = mask;
+ gate->share_count = share_count;
+
+ init.name = name;
+ init.ops = &imx93_clk_gate_ops;
+ init.flags = flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ gate->hw.init = &init;
+ hw = &gate->hw;
+
+ authen = readl(reg + AUTHEN_OFFSET);
+ if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
+ init.ops = &imx93_clk_gate_ro_ops;
+
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(gate);
+ return ERR_PTR(ret);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(imx93_clk_gate);
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fc1bd23d4583..598f3cf4eba4 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -280,13 +280,13 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
- hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels));
hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
- hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT);
+ hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels));
hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels));
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index e89db568f5a8..652ae58c2735 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -665,8 +665,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
- hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
+ hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0);
hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0);
hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
diff --git a/drivers/clk/imx/clk-imx93.c b/drivers/clk/imx/clk-imx93.c
index f5c9fa40491c..99cff1fd108b 100644
--- a/drivers/clk/imx/clk-imx93.c
+++ b/drivers/clk/imx/clk-imx93.c
@@ -28,6 +28,11 @@ enum clk_sel {
MAX_SEL
};
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_mub;
+
static const char *parent_names[MAX_SEL][4] = {
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "video_pll"},
{"osc_24m", "sys_pll_pfd0_div2", "sys_pll_pfd1_div2", "sys_pll_pfd2_div2"},
@@ -146,6 +151,7 @@ static const struct imx93_clk_ccgr {
char *parent_name;
u32 off;
unsigned long flags;
+ u32 *shared_count;
} ccgr_array[] = {
{ IMX93_CLK_A55_GATE, "a55", "a55_root", 0x8000, },
/* M33 critical clk for system run */
@@ -158,8 +164,10 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_WDOG5_GATE, "wdog5", "osc_24m", 0x8400, },
{ IMX93_CLK_SEMA1_GATE, "sema1", "bus_aon_root", 0x8440, },
{ IMX93_CLK_SEMA2_GATE, "sema2", "bus_wakeup_root", 0x8480, },
- { IMX93_CLK_MU_A_GATE, "mu_a", "bus_aon_root", 0x84c0, },
- { IMX93_CLK_MU_B_GATE, "mu_b", "bus_aon_root", 0x8500, },
+ { IMX93_CLK_MU1_A_GATE, "mu1_a", "bus_aon_root", 0x84c0, CLK_IGNORE_UNUSED },
+ { IMX93_CLK_MU2_A_GATE, "mu2_a", "bus_wakeup_root", 0x84c0, CLK_IGNORE_UNUSED },
+ { IMX93_CLK_MU1_B_GATE, "mu1_b", "bus_aon_root", 0x8500, 0, &share_count_mub },
+ { IMX93_CLK_MU2_B_GATE, "mu2_b", "bus_wakeup_root", 0x8500, 0, &share_count_mub },
{ IMX93_CLK_EDMA1_GATE, "edma1", "m33_root", 0x8540, },
{ IMX93_CLK_EDMA2_GATE, "edma2", "wakeup_axi_root", 0x8580, },
{ IMX93_CLK_FLEXSPI1_GATE, "flexspi", "flexspi_root", 0x8640, },
@@ -210,9 +218,12 @@ static const struct imx93_clk_ccgr {
{ IMX93_CLK_USDHC1_GATE, "usdhc1", "usdhc1_root", 0x9380, },
{ IMX93_CLK_USDHC2_GATE, "usdhc2", "usdhc2_root", 0x93c0, },
{ IMX93_CLK_USDHC3_GATE, "usdhc3", "usdhc3_root", 0x9400, },
- { IMX93_CLK_SAI1_GATE, "sai1", "sai1_root", 0x9440, },
- { IMX93_CLK_SAI2_GATE, "sai2", "sai2_root", 0x9480, },
- { IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, },
+ { IMX93_CLK_SAI1_GATE, "sai1", "sai1_root", 0x9440, 0, &share_count_sai1},
+ { IMX93_CLK_SAI1_IPG, "sai1_ipg_clk", "bus_aon_root", 0x9440, 0, &share_count_sai1},
+ { IMX93_CLK_SAI2_GATE, "sai2", "sai2_root", 0x9480, 0, &share_count_sai2},
+ { IMX93_CLK_SAI2_IPG, "sai2_ipg_clk", "bus_wakeup_root", 0x9480, 0, &share_count_sai2},
+ { IMX93_CLK_SAI3_GATE, "sai3", "sai3_root", 0x94c0, 0, &share_count_sai3},
+ { IMX93_CLK_SAI3_IPG, "sai3_ipg_clk", "bus_wakeup_root", 0x94c0, 0, &share_count_sai3},
{ IMX93_CLK_MIPI_CSI_GATE, "mipi_csi", "media_apb_root", 0x9580, },
{ IMX93_CLK_MIPI_DSI_GATE, "mipi_dsi", "media_apb_root", 0x95c0, },
{ IMX93_CLK_LVDS_GATE, "lvds", "media_ldb_root", 0x9600, },
@@ -293,16 +304,15 @@ static int imx93_clocks_probe(struct platform_device *pdev)
root = &root_array[i];
clks[root->clk] = imx93_clk_composite_flags(root->name,
parent_names[root->sel],
- 4, base + root->off,
+ 4, base + root->off, 3,
root->flags);
}
for (i = 0; i < ARRAY_SIZE(ccgr_array); i++) {
ccgr = &ccgr_array[i];
- clks[ccgr->clk] = imx_clk_hw_gate4_flags(ccgr->name,
- ccgr->parent_name,
- base + ccgr->off, 0,
- ccgr->flags);
+ clks[ccgr->clk] = imx93_clk_gate(NULL, ccgr->name, ccgr->parent_name,
+ ccgr->flags, base + ccgr->off, 0, 1, 1, 3,
+ ccgr->shared_count);
}
imx_check_clk_hws(clks, IMX93_CLK_END);
@@ -332,7 +342,7 @@ static struct platform_driver imx93_clk_driver = {
.driver = {
.name = "imx93-ccm",
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx93_clk_of_match),
+ .of_match_table = imx93_clk_of_match,
},
};
module_platform_driver(imx93_clk_driver);
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index c56e406138db..1e6870f3671f 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -695,7 +695,11 @@ struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
pr_warn("%s: failed to attached the power domain %d\n",
name, ret);
- platform_device_add(pdev);
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
/* For API backwards compatiblilty, simply return NULL for success */
return NULL;
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 5061a06468df..dd49f90110e8 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -445,11 +445,16 @@ struct clk_hw *imx93_clk_composite_flags(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
+ u32 domain_id,
unsigned long flags);
-#define imx93_clk_composite(name, parent_names, num_parents, reg) \
- imx93_clk_composite_flags(name, parent_names, num_parents, reg, \
+#define imx93_clk_composite(name, parent_names, num_parents, reg, domain_id) \
+ imx93_clk_composite_flags(name, parent_names, num_parents, reg, domain_id \
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+struct clk_hw *imx93_clk_gate(struct device *dev, const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u32 bit_idx, u32 val,
+ u32 mask, u32 domain_id, unsigned int *share_count);
+
struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 201bf6e6b6e0..d5544cbc5c48 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -101,15 +101,11 @@ static bool ingenic_tcu_enable_regs(struct clk_hw *hw)
bool enabled = false;
/*
- * If the SoC has no global TCU clock, we must ungate the channel's
- * clock to be able to access its registers.
- * If we have a TCU clock, it will be enabled automatically as it has
- * been attached to the regmap.
+ * According to the programming manual, a timer channel's registers can
+ * only be accessed when the channel's stop bit is clear.
*/
- if (!tcu->clk) {
- enabled = !!ingenic_tcu_is_enabled(hw);
- regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
- }
+ enabled = !!ingenic_tcu_is_enabled(hw);
+ regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit));
return enabled;
}
@@ -120,8 +116,7 @@ static void ingenic_tcu_disable_regs(struct clk_hw *hw)
const struct ingenic_tcu_clk_info *info = tcu_clk->info;
struct ingenic_tcu *tcu = tcu_clk->tcu;
- if (!tcu->clk)
- regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
+ regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit));
}
static u8 ingenic_tcu_get_parent(struct clk_hw *hw)
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index d5936cfb3bee..843cea0c7a44 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -259,6 +259,43 @@ config COMMON_CLK_MT6779_AUDSYS
help
This driver supports Mediatek MT6779 audsys clocks.
+config COMMON_CLK_MT6795
+ tristate "Clock driver for MediaTek MT6795"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT6795 basic clocks and clocks
+ required for various peripherals found on MediaTek.
+
+config COMMON_CLK_MT6795_MFGCFG
+ tristate "Clock driver for MediaTek MT6795 mfgcfg"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 mfgcfg clocks.
+
+config COMMON_CLK_MT6795_MMSYS
+ tristate "Clock driver for MediaTek MT6795 mmsys"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 mmsys clocks.
+
+config COMMON_CLK_MT6795_VDECSYS
+ tristate "Clock driver for MediaTek MT6795 VDECSYS"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 vdecsys clocks.
+
+config COMMON_CLK_MT6795_VENCSYS
+ tristate "Clock driver for MediaTek MT6795 VENCSYS"
+ depends on COMMON_CLK_MT6795
+ default COMMON_CLK_MT6795
+ help
+ This driver supports MediaTek MT6795 vencsys clocks.
+
config COMMON_CLK_MT6797
bool "Clock driver for MediaTek MT6797"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -608,6 +645,56 @@ config COMMON_CLK_MT8195
help
This driver supports MediaTek MT8195 clocks.
+config COMMON_CLK_MT8365
+ tristate "Clock driver for MediaTek MT8365"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT8365 basic clocks.
+
+config COMMON_CLK_MT8365_APU
+ tristate "Clock driver for MediaTek MT8365 apu"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 apu clocks.
+
+config COMMON_CLK_MT8365_CAM
+ tristate "Clock driver for MediaTek MT8365 cam"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 cam clocks.
+
+config COMMON_CLK_MT8365_MFG
+ tristate "Clock driver for MediaTek MT8365 mfg"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 mfg clocks.
+
+config COMMON_CLK_MT8365_MMSYS
+ tristate "Clock driver for MediaTek MT8365 mmsys"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 mmsys clocks.
+
+config COMMON_CLK_MT8365_VDEC
+ tristate "Clock driver for MediaTek MT8365 vdec"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 vdec clocks.
+
+config COMMON_CLK_MT8365_VENC
+ tristate "Clock driver for MediaTek MT8365 venc"
+ depends on COMMON_CLK_MT8365
+ default COMMON_CLK_MT8365
+ help
+ This driver supports MediaTek MT8365 venc clocks.
+
config COMMON_CLK_MT8516
bool "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index caf2ce93d666..ea3b73240303 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -17,6 +17,12 @@ obj-$(CONFIG_COMMON_CLK_MT6779_VDECSYS) += clk-mt6779-vdec.o
obj-$(CONFIG_COMMON_CLK_MT6779_VENCSYS) += clk-mt6779-venc.o
obj-$(CONFIG_COMMON_CLK_MT6779_MFGCFG) += clk-mt6779-mfg.o
obj-$(CONFIG_COMMON_CLK_MT6779_AUDSYS) += clk-mt6779-aud.o
+obj-$(CONFIG_COMMON_CLK_MT6795) += clk-mt6795-apmixedsys.o clk-mt6795-infracfg.o \
+ clk-mt6795-pericfg.o clk-mt6795-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT6795_MFGCFG) += clk-mt6795-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT6795_MMSYS) += clk-mt6795-mm.o
+obj-$(CONFIG_COMMON_CLK_MT6795_VDECSYS) += clk-mt6795-vdecsys.o
+obj-$(CONFIG_COMMON_CLK_MT6795_VENCSYS) += clk-mt6795-vencsys.o
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
@@ -97,5 +103,12 @@ obj-$(CONFIG_COMMON_CLK_MT8195) += clk-mt8195-apmixedsys.o clk-mt8195-topckgen.o
clk-mt8195-venc.o clk-mt8195-vpp0.o clk-mt8195-vpp1.o \
clk-mt8195-wpe.o clk-mt8195-imp_iic_wrap.o \
clk-mt8195-apusys_pll.o
+obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365.o
+obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
+obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8365_MFG) += clk-mt8365-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8365_MMSYS) += clk-mt8365-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8365_VDEC) += clk-mt8365-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8365_VENC) += clk-mt8365-venc.o
obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
index fc3d4146f482..60e34f124250 100644
--- a/drivers/clk/mediatek/clk-apmixed.c
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -70,7 +70,7 @@ static const struct clk_ops mtk_ref2usb_tx_ops = {
.unprepare = mtk_ref2usb_tx_unprepare,
};
-struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name,
+struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg)
{
struct mtk_ref2usb_tx *tx;
@@ -98,5 +98,15 @@ struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name,
return &tx->hw;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_ref2usb_tx);
+
+void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw)
+{
+ struct mtk_ref2usb_tx *tx = to_mtk_ref2usb_tx(hw);
+
+ clk_hw_unregister(hw);
+ kfree(tx);
+}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_ref2usb_tx);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 2b5d48591738..25618eff6f2a 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -150,6 +150,7 @@ err:
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_cpumuxes);
void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
struct clk_hw_onecell_data *clk_data)
@@ -166,5 +167,6 @@ void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num,
clk_data->hws[mux->id] = ERR_PTR(-ENOENT);
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_cpumuxes);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 421806236228..0c867136e49d 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -261,6 +261,7 @@ err:
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gates_with_dev);
int mtk_clk_register_gates(struct device_node *node,
const struct mtk_gate *clks, int num,
diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
index 662a8ab3fbb1..435ed4819d56 100644
--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
@@ -94,33 +94,23 @@ static const struct mtk_gate bdp_clks[] = {
GATE_BDP1(CLK_BDP_HDMI_MON, "hdmi_mon", "hdmi_0_pll340m", 16),
};
-static const struct of_device_id of_match_clk_mt2701_bdp[] = {
- { .compatible = "mediatek,mt2701-bdpsys", },
- {}
+static const struct mtk_clk_desc bdp_desc = {
+ .clks = bdp_clks,
+ .num_clks = ARRAY_SIZE(bdp_clks),
};
-static int clk_mt2701_bdp_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_BDP_NR);
-
- mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_bdp[] = {
+ {
+ .compatible = "mediatek,mt2701-bdpsys",
+ .data = &bdp_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_bdp_drv = {
- .probe = clk_mt2701_bdp_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-bdp",
.of_match_table = of_match_clk_mt2701_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
index c4f3cd26df60..7e53deb7f990 100644
--- a/drivers/clk/mediatek/clk-mt2701-img.c
+++ b/drivers/clk/mediatek/clk-mt2701-img.c
@@ -36,33 +36,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_VENC, "img_venc", "mm_sel", 9),
};
-static const struct of_device_id of_match_clk_mt2701_img[] = {
- { .compatible = "mediatek,mt2701-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt2701_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_img[] = {
+ {
+ .compatible = "mediatek,mt2701-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_img_drv = {
- .probe = clk_mt2701_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-img",
.of_match_table = of_match_clk_mt2701_img,
diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
index a2f18117f27a..d3089da0ab62 100644
--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
@@ -47,33 +47,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt2701_vdec[] = {
- { .compatible = "mediatek,mt2701-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt2701_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt2701_vdec[] = {
+ {
+ .compatible = "mediatek,mt2701-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt2701_vdec_drv = {
- .probe = clk_mt2701_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2701-vdec",
.of_match_table = of_match_clk_mt2701_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
index 9acab4357133..684d03e9f6de 100644
--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -58,33 +58,23 @@ static const struct mtk_gate bdp_clks[] = {
GATE_BDP(CLK_BDP_TVD_CBUS, "bdp_tvd_cbus", "mm_sel", 30),
};
-static int clk_mt2712_bdp_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_BDP_NR_CLK);
-
- mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc bdp_desc = {
+ .clks = bdp_clks,
+ .num_clks = ARRAY_SIZE(bdp_clks),
+};
static const struct of_device_id of_match_clk_mt2712_bdp[] = {
- { .compatible = "mediatek,mt2712-bdpsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-bdpsys",
+ .data = &bdp_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_bdp_drv = {
- .probe = clk_mt2712_bdp_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-bdp",
.of_match_table = of_match_clk_mt2712_bdp,
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
index 5cc143e65e42..335049cdc856 100644
--- a/drivers/clk/mediatek/clk-mt2712-img.c
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -36,33 +36,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_CAM_SV2_EN, "img_cam_sv2_en", "mm_sel", 11),
};
-static int clk_mt2712_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt2712_img[] = {
- { .compatible = "mediatek,mt2712-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_img_drv = {
- .probe = clk_mt2712_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-img",
.of_match_table = of_match_clk_mt2712_img,
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
index 31fc30370d98..07ba7c5e80af 100644
--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -32,33 +32,23 @@ static const struct mtk_gate jpgdec_clks[] = {
GATE_JPGDEC(CLK_JPGDEC_JPGDEC, "jpgdec_jpgdec", "jpgdec_sel", 4),
};
-static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_JPGDEC_NR_CLK);
-
- mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc jpgdec_desc = {
+ .clks = jpgdec_clks,
+ .num_clks = ARRAY_SIZE(jpgdec_clks),
+};
static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
- { .compatible = "mediatek,mt2712-jpgdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-jpgdecsys",
+ .data = &jpgdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_jpgdec_drv = {
- .probe = clk_mt2712_jpgdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-jpgdec",
.of_match_table = of_match_clk_mt2712_jpgdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
index a4d09675bf18..42f8cf3ecf4c 100644
--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -31,33 +31,23 @@ static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
};
-static int clk_mt2712_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
-
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt2712_mfg[] = {
- { .compatible = "mediatek,mt2712-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt2712-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_mfg_drv = {
- .probe = clk_mt2712_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-mfg",
.of_match_table = of_match_clk_mt2712_mfg,
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
index af13f43dd831..6296ed5c5b55 100644
--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -50,33 +50,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_IMGRZ_CKEN, "vdec_imgrz_cken", "vdec_sel", 1),
};
-static int clk_mt2712_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
static const struct of_device_id of_match_clk_mt2712_vdec[] = {
- { .compatible = "mediatek,mt2712-vdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_vdec_drv = {
- .probe = clk_mt2712_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-vdec",
.of_match_table = of_match_clk_mt2712_vdec,
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
index abc08a029753..b9bfc35de629 100644
--- a/drivers/clk/mediatek/clk-mt2712-venc.c
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -33,33 +33,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SMI_LARB6, "venc_smi_larb6", "jpgdec_sel", 12),
};
-static int clk_mt2712_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r != 0)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt2712_venc[] = {
- { .compatible = "mediatek,mt2712-vencsys", },
- {}
+ {
+ .compatible = "mediatek,mt2712-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt2712_venc_drv = {
- .probe = clk_mt2712_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt2712-venc",
.of_match_table = of_match_clk_mt2712_venc,
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
index 9c6e9caad597..0aa6c0d352ca 100644
--- a/drivers/clk/mediatek/clk-mt6765-audio.c
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -64,33 +64,23 @@ static const struct mtk_gate audio_clks[] = {
"audio_ck", 7),
};
-static int clk_mt6765_audio_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks,
- ARRAY_SIZE(audio_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
+};
static const struct of_device_id of_match_clk_mt6765_audio[] = {
- { .compatible = "mediatek,mt6765-audsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-audsys",
+ .data = &audio_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_audio_drv = {
- .probe = clk_mt6765_audio_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-audio",
.of_match_table = of_match_clk_mt6765_audio,
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
index 2586d3ac4cd4..25f2bef38126 100644
--- a/drivers/clk/mediatek/clk-mt6765-cam.c
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -39,32 +39,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_CCU, "cam_ccu", "mm_ck", 12),
};
-static int clk_mt6765_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
static const struct of_device_id of_match_clk_mt6765_cam[] = {
- { .compatible = "mediatek,mt6765-camsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_cam_drv = {
- .probe = clk_mt6765_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-cam",
.of_match_table = of_match_clk_mt6765_cam,
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
index 8cc95b98921e..a62303ef4f41 100644
--- a/drivers/clk/mediatek/clk-mt6765-img.c
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -35,32 +35,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_RSC, "img_rsc", "mm_ck", 5),
};
-static int clk_mt6765_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt6765_img[] = {
- { .compatible = "mediatek,mt6765-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_img_drv = {
- .probe = clk_mt6765_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-img",
.of_match_table = of_match_clk_mt6765_img,
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
index c816e26a95f9..25c829fc3866 100644
--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -32,33 +32,23 @@ static const struct mtk_gate mipi0a_clks[] = {
"mipi0a_csr_0a", "f_fseninf_ck", 1),
};
-static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MIPI0A_NR_CLK);
-
- mtk_clk_register_gates(node, mipi0a_clks,
- ARRAY_SIZE(mipi0a_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mipi0a_desc = {
+ .clks = mipi0a_clks,
+ .num_clks = ARRAY_SIZE(mipi0a_clks),
+};
static const struct of_device_id of_match_clk_mt6765_mipi0a[] = {
- { .compatible = "mediatek,mt6765-mipi0a", },
- {}
+ {
+ .compatible = "mediatek,mt6765-mipi0a",
+ .data = &mipi0a_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_mipi0a_drv = {
- .probe = clk_mt6765_mipi0a_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mipi0a",
.of_match_table = of_match_clk_mt6765_mipi0a,
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
index ee6d3b859a6c..bda774668a36 100644
--- a/drivers/clk/mediatek/clk-mt6765-mm.c
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -61,32 +61,23 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM(CLK_MM_F26M_HRTWT, "mm_hrtwt", "f_f26m_ck", 29),
};
-static int clk_mt6765_mm_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc mm_desc = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
static const struct of_device_id of_match_clk_mt6765_mm[] = {
- { .compatible = "mediatek,mt6765-mmsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-mmsys",
+ .data = &mm_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_mm_drv = {
- .probe = clk_mt6765_mm_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-mm",
.of_match_table = of_match_clk_mt6765_mm,
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
index d8045979d48a..2bc1fbde87da 100644
--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -34,33 +34,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12),
};
-static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks,
- ARRAY_SIZE(venc_clks), clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-
- return r;
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt6765_vcodec[] = {
- { .compatible = "mediatek,mt6765-vcodecsys", },
- {}
+ {
+ .compatible = "mediatek,mt6765-vcodecsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6765_vcodec_drv = {
- .probe = clk_mt6765_vcodec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6765-vcodec",
.of_match_table = of_match_clk_mt6765_vcodec,
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 97e44abb7e87..6e473ae1fd90 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -89,26 +89,23 @@ static const struct mtk_gate audio_clks[] = {
"audio_h_sel", 31),
};
-static const struct of_device_id of_match_clk_mt6779_aud[] = {
- { .compatible = "mediatek,mt6779-audio", },
- {}
+static const struct mtk_clk_desc audio_desc = {
+ .clks = audio_clks,
+ .num_clks = ARRAY_SIZE(audio_clks),
};
-static int clk_mt6779_aud_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
-
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_aud[] = {
+ {
+ .compatible = "mediatek,mt6779-audio",
+ .data = &audio_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_aud_drv = {
- .probe = clk_mt6779_aud_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-aud",
.of_match_table = of_match_clk_mt6779_aud,
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 9c5117aae146..7be3db90fa4a 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -38,26 +38,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_FAKE_ENG, "camsys_fake_eng", "cam_sel", 14),
};
-static const struct of_device_id of_match_clk_mt6779_cam[] = {
- { .compatible = "mediatek,mt6779-camsys", },
- {}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
};
-static int clk_mt6779_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_cam[] = {
+ {
+ .compatible = "mediatek,mt6779-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_cam_drv = {
- .probe = clk_mt6779_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-cam",
.of_match_table = of_match_clk_mt6779_cam,
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 801271477d46..9bc51fc82dbd 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -30,26 +30,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_WPE_A, "imgsys_wpe_a", "img_sel", 7),
};
-static const struct of_device_id of_match_clk_mt6779_img[] = {
- { .compatible = "mediatek,mt6779-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt6779_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_img[] = {
+ {
+ .compatible = "mediatek,mt6779-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_img_drv = {
- .probe = clk_mt6779_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-img",
.of_match_table = of_match_clk_mt6779_img,
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index f67814ca7dfb..92e9d1ade422 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -32,26 +32,23 @@ static const struct mtk_gate ipe_clks[] = {
GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "ipe_sel", 6),
};
-static const struct of_device_id of_match_clk_mt6779_ipe[] = {
- { .compatible = "mediatek,mt6779-ipesys", },
- {}
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
};
-static int clk_mt6779_ipe_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPE_NR_CLK);
-
- mtk_clk_register_gates(node, ipe_clks, ARRAY_SIZE(ipe_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_ipe[] = {
+ {
+ .compatible = "mediatek,mt6779-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_ipe_drv = {
- .probe = clk_mt6779_ipe_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-ipe",
.of_match_table = of_match_clk_mt6779_ipe,
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index fc7387b59758..efc793a1969a 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -27,26 +27,23 @@ static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFGCFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
};
-static int clk_mt6779_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_MFGCFG_NR_CLK);
-
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt6779_mfg[] = {
- { .compatible = "mediatek,mt6779-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt6779-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt6779_mfg_drv = {
- .probe = clk_mt6779_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-mfg",
.of_match_table = of_match_clk_mt6779_mfg,
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index 7e195b082e86..3209a6518d5b 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -39,26 +39,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1_cken", "vdec_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6779_vdec[] = {
- { .compatible = "mediatek,mt6779-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt6779_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_GCON_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_vdec[] = {
+ {
+ .compatible = "mediatek,mt6779-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_vdec_drv = {
- .probe = clk_mt6779_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-vdec",
.of_match_table = of_match_clk_mt6779_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index 573efa87c9bd..c25035c0f334 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -30,26 +30,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC_I(CLK_VENC_GCON_GALS, "venc_gals", "venc_sel", 28),
};
-static const struct of_device_id of_match_clk_mt6779_venc[] = {
- { .compatible = "mediatek,mt6779-vencsys", },
- {}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
};
-static int clk_mt6779_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_GCON_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct of_device_id of_match_clk_mt6779_venc[] = {
+ {
+ .compatible = "mediatek,mt6779-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6779_venc_drv = {
- .probe = clk_mt6779_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6779-venc",
.of_match_table = of_match_clk_mt6779_venc,
diff --git a/drivers/clk/mediatek/clk-mt6795-apmixedsys.c b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
new file mode 100644
index 000000000000..59761c72d3bc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-apmixedsys.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define REG_REF2USB 0x8
+#define REG_AP_PLL_CON7 0x1c
+ #define MD1_MTCMOS_OFF BIT(0)
+ #define MD1_MEM_OFF BIT(1)
+ #define MD1_CLK_OFF BIT(4)
+ #define MD1_ISO_OFF BIT(8)
+
+#define MT6795_PLL_FMAX (3000UL * MHZ)
+#define MT6795_CON0_EN BIT(0)
+#define MT6795_CON0_RST_BAR BIT(24)
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = MT6795_CON0_EN | _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = MT6795_CON0_RST_BAR, \
+ .fmax = MT6795_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = NULL, \
+ .pll_en_bit = 0, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMCA53PLL, "armca53pll", 0x200, 0x20c, 0, PLL_AO,
+ 21, 0x204, 24, 0x0, 0x204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR,
+ 21, 0x220, 4, 0x0, 0x224, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000101, HAVE_RST_BAR,
+ 7, 0x230, 4, 0x0, 0x234, 14),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0, 0x244, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a8, 0x2a4, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2bc, 0x2b8, 0),
+};
+
+static void clk_mt6795_apmixed_setup_md1(void __iomem *base)
+{
+ void __iomem *reg = base + REG_AP_PLL_CON7;
+
+ /* Turn on MD1 internal clock */
+ writel(readl(reg) & ~MD1_CLK_OFF, reg);
+
+ /* Unlock MD1's MTCMOS power path */
+ writel(readl(reg) & ~MD1_MTCMOS_OFF, reg);
+
+ /* Turn on ISO */
+ writel(readl(reg) & ~MD1_ISO_OFF, reg);
+
+ /* Turn on memory */
+ writel(readl(reg) & ~MD1_MEM_OFF, reg);
+}
+
+static const struct of_device_id of_match_clk_mt6795_apmixed[] = {
+ { .compatible = "mediatek,mt6795-apmixedsys" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ hw = mtk_clk_register_ref2usb_tx("ref2usb_tx", "clk26m", base + REG_REF2USB);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ dev_err(dev, "Failed to register ref2usb_tx: %d\n", ret);
+ goto unregister_plls;
+ }
+ clk_data->hws[CLK_APMIXED_REF2USB_TX] = hw;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret) {
+ dev_err(dev, "Cannot register clock provider: %d\n", ret);
+ goto unregister_ref2usb;
+ }
+
+ /* Setup MD1 to avoid random crashes */
+ dev_dbg(dev, "Performing initial setup for MD1\n");
+ clk_mt6795_apmixed_setup_md1(base);
+
+ return 0;
+
+unregister_ref2usb:
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_apmixed_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_ref2usb_tx(clk_data->hws[CLK_APMIXED_REF2USB_TX]);
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_apmixed_drv = {
+ .probe = clk_mt6795_apmixed_probe,
+ .remove = clk_mt6795_apmixed_remove,
+ .driver = {
+ .name = "clk-mt6795-apmixed",
+ .of_match_table = of_match_clk_mt6795_apmixed,
+ },
+};
+module_platform_driver(clk_mt6795_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 apmixed clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
new file mode 100644
index 000000000000..df7eed6e071e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <dt-bindings/reset/mediatek,mt6795-resets.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-cpumux.h"
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_ICG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x0040,
+ .clr_ofs = 0x0044,
+ .sta_ofs = 0x0048,
+};
+
+static const char * const ca53_c0_parents[] = {
+ "clk26m",
+ "armca53pll",
+ "mainpll",
+ "univpll"
+};
+
+static const char * const ca53_c1_parents[] = {
+ "clk26m",
+ "armca53pll",
+ "mainpll",
+ "univpll"
+};
+
+static const struct mtk_composite cpu_muxes[] = {
+ MUX(CLK_INFRA_CA53_C0_SEL, "infra_ca53_c0_sel", ca53_c0_parents, 0x00, 0, 2),
+ MUX(CLK_INFRA_CA53_C1_SEL, "infra_ca53_c1_sel", ca53_c1_parents, 0x00, 2, 2),
+};
+
+static const struct mtk_gate infra_gates[] = {
+ GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+ GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1),
+ GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5),
+ GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+ GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7),
+ GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+ GATE_ICG(CLK_INFRA_MD1MCU, "infra_md1mcu", "clk26m", 9),
+ GATE_ICG(CLK_INFRA_MD1BUS, "infra_md1bus", "axi_sel", 10),
+ GATE_ICG(CLK_INFRA_MD1DBB, "infra_dbb", "axi_sel", 11),
+ GATE_ICG(CLK_INFRA_DEVICE_APC, "infra_devapc", "clk26m", 12),
+ GATE_ICG(CLK_INFRA_TRNG, "infra_trng", "axi_sel", 13),
+ GATE_ICG(CLK_INFRA_MD1LTE, "infra_md1lte", "axi_sel", 14),
+ GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "cpum_ck", 15),
+ GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+};
+
+static u16 infra_ao_rst_ofs[] = { 0x30, 0x34 };
+
+static u16 infra_ao_idx_map[] = {
+ [MT6795_INFRA_RST0_SCPSYS_RST] = 0 * RST_NR_PER_BANK + 5,
+ [MT6795_INFRA_RST0_PMIC_WRAP_RST] = 0 * RST_NR_PER_BANK + 7,
+ [MT6795_INFRA_RST1_MIPI_DSI_RST] = 1 * RST_NR_PER_BANK + 4,
+ [MT6795_INFRA_RST1_MIPI_CSI_RST] = 1 * RST_NR_PER_BANK + 7,
+ [MT6795_INFRA_RST1_MM_IOMMU_RST] = 1 * RST_NR_PER_BANK + 15,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = infra_ao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(infra_ao_rst_ofs),
+ .rst_idx_map = infra_ao_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(infra_ao_idx_map),
+};
+
+static const struct of_device_id of_match_clk_mt6795_infracfg[] = {
+ { .compatible = "mediatek,mt6795-infracfg" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_gates(node, infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_cpumuxes;
+
+ return 0;
+
+unregister_cpumuxes:
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_infracfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_cpumuxes(cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data);
+ mtk_clk_unregister_gates(infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_infracfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-infracfg",
+ .of_match_table = of_match_clk_mt6795_infracfg,
+ },
+ .probe = clk_mt6795_infracfg_probe,
+ .remove = clk_mt6795_infracfg_remove,
+};
+module_platform_driver(clk_mt6795_infracfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 infracfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-mfg.c b/drivers/clk/mediatek/clk-mt6795-mfg.c
new file mode 100644
index 000000000000..ee7aab24eb24
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-mfg.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "axi_mfg_in_sel", 0),
+ GATE_MFG(CLK_MFG_BMEM, "mfg_bmem", "mem_mfg_in_sel", 1),
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 2),
+ GATE_MFG(CLK_MFG_B26M, "mfg_b26m", "clk26m", 3),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_mfg[] = {
+ { .compatible = "mediatek,mt6795-mfgcfg", .data = &mfg_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_mfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-mfg",
+ .of_match_table = of_match_clk_mt6795_mfg,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt6795_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
new file mode 100644
index 000000000000..fd73f202f292
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+static const struct mtk_gate mm_gates[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_CROP, "mm_mdp_crop", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "clk32k", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+};
+
+static int clk_mt6795_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_gates(node, mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_mm_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_gates(mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_mm_drv = {
+ .driver = {
+ .name = "clk-mt6795-mm",
+ },
+ .probe = clk_mt6795_mm_probe,
+ .remove = clk_mt6795_mm_remove,
+};
+module_platform_driver(clk_mt6795_mm_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 MultiMedia clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
new file mode 100644
index 000000000000..cb28d35dad59
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <dt-bindings/reset/mediatek,mt6795-resets.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define GATE_PERI(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr)
+
+static DEFINE_SPINLOCK(mt6795_peri_clk_lock);
+
+static const struct mtk_gate_regs peri_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x0010,
+ .sta_ofs = 0x0018,
+};
+
+static const char * const uart_ck_sel_parents[] = {
+ "clk26m",
+ "uart_sel",
+};
+
+static const struct mtk_composite peri_clks[] = {
+ MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1),
+ MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1),
+ MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1),
+ MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
+};
+
+static const struct mtk_gate peri_gates[] = {
+ GATE_PERI(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0),
+ GATE_PERI(CLK_PERI_THERM, "peri_therm", "axi_sel", 1),
+ GATE_PERI(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2),
+ GATE_PERI(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3),
+ GATE_PERI(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4),
+ GATE_PERI(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5),
+ GATE_PERI(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6),
+ GATE_PERI(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7),
+ GATE_PERI(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8),
+ GATE_PERI(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9),
+ GATE_PERI(CLK_PERI_USB0, "peri_usb0", "usb30_sel", 10),
+ GATE_PERI(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11),
+ GATE_PERI(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12),
+ GATE_PERI(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13),
+ GATE_PERI(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14),
+ GATE_PERI(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15),
+ GATE_PERI(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16),
+ GATE_PERI(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17),
+ GATE_PERI(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18),
+ GATE_PERI(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19),
+ GATE_PERI(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20),
+ GATE_PERI(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21),
+ GATE_PERI(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22),
+ GATE_PERI(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23),
+ GATE_PERI(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24),
+ GATE_PERI(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25),
+ GATE_PERI(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26),
+ GATE_PERI(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27),
+ GATE_PERI(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28),
+ GATE_PERI(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29),
+};
+
+static u16 peri_rst_ofs[] = { 0x0 };
+
+static u16 peri_idx_map[] = {
+ [MT6795_PERI_NFI_SW_RST] = 14,
+ [MT6795_PERI_THERM_SW_RST] = 16,
+ [MT6795_PERI_MSDC1_SW_RST] = 20,
+};
+
+static const struct mtk_clk_rst_desc clk_rst_desc = {
+ .version = MTK_RST_SIMPLE,
+ .rst_bank_ofs = peri_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(peri_rst_ofs),
+ .rst_idx_map = peri_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(peri_idx_map),
+};
+
+static const struct of_device_id of_match_clk_mt6795_pericfg[] = {
+ { .compatible = "mediatek,mt6795-pericfg" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+ &mt6795_peri_clk_lock, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+unregister_gates:
+ mtk_clk_unregister_gates(peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_pericfg_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ mtk_clk_unregister_gates(peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_pericfg_drv = {
+ .driver = {
+ .name = "clk-mt6795-pericfg",
+ .of_match_table = of_match_clk_mt6795_pericfg,
+ },
+ .probe = clk_mt6795_pericfg_probe,
+ .remove = clk_mt6795_pericfg_remove,
+};
+module_platform_driver(clk_mt6795_pericfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 pericfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
new file mode 100644
index 000000000000..2948dd1aee8f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/*
+ * For some clocks, we don't care what their actual rates are. And these
+ * clocks may change their rate on different products or different scenarios.
+ * So we model these clocks' rate as 0, to denote it's not an actual rate.
+ */
+#define DUMMY_RATE 0
+
+#define TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _reg, \
+ (_reg + 0x4), (_reg + 0x8), _shift, _width, \
+ _gate, 0, -1, _flags)
+
+#define TOP_MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) \
+ TOP_MUX_GATE_NOSR(_id, _name, _parents, _reg, _shift, _width, \
+ _gate, CLK_SET_RATE_PARENT | _flags)
+
+static DEFINE_SPINLOCK(mt6795_top_clk_lock);
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d8",
+ "dmpll_d4",
+ "dmpll_d8"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const axi_mfg_in_parents[] = {
+ "clk26m",
+ "axi_sel",
+ "dmpll_d2"
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d2",
+ "syspll3_d2",
+ "syspll3_d4",
+ "univpll1_d4",
+ "dmpll_d8"
+};
+
+static const char * const cci400_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "clk26m",
+ "clk26m",
+ "univpll_d2",
+ "syspll_d2",
+ "msdcpll_ck",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "clk26m",
+ "clk26m",
+ "tvdpll_d8",
+ "tvdpll_d16"
+};
+
+static const char * const i2s0_m_ck_parents[] = {
+ "apll1_div1",
+ "apll2_div1"
+};
+
+static const char * const i2s1_m_ck_parents[] = {
+ "apll1_div2",
+ "apll2_div2"
+};
+
+static const char * const i2s2_m_ck_parents[] = {
+ "apll1_div3",
+ "apll2_div3"
+};
+
+static const char * const i2s3_m_ck_parents[] = {
+ "apll1_div4",
+ "apll2_div4"
+};
+
+static const char * const i2s3_b_ck_parents[] = {
+ "apll1_div5",
+ "apll2_div5"
+};
+
+static const char * const irda_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll2_d4",
+ "dmpll_d8",
+};
+
+static const char * const mem_mfg_in_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "dmpll_ck",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll2_d2"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_d2",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2"
+};
+
+static const char * const mjc_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "syspll_d5",
+ "syspll1_d2",
+ "univpll_d5",
+ "univpll2_d2",
+ "dmpll_ck"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "msdcpll_d4",
+ "vencpll_d4",
+ "tvdpll_ck",
+ "univpll_d2",
+ "univpll1_d2",
+ "mmpll_ck"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "dmpll_d8",
+ "dmpll_d16"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "dmpll_d4"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d4",
+ "univpll1_d8"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+ "clk26m",
+ "univpll3_d2",
+ "usb_syspll_125m",
+ "univpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "mmpll_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_445p5m",
+ "univpll_d3",
+ "vencpll_d2",
+ "syspll_d3",
+ "univpll1_d2",
+ "univpll2_d2",
+ "dmpll_d2",
+ "dmpll_d4"
+};
+
+static const struct mtk_fixed_clk fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_ADSYS_26M, "adsys_26m", "clk26m", 26 * MHZ),
+ FIXED_CLK(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk26m", 125 * MHZ),
+ FIXED_CLK(CLK_TOP_DSI0_DIG, "dsi0_dig", "clk26m", DUMMY_RATE),
+ FIXED_CLK(CLK_TOP_DSI1_DIG, "dsi1_dig", "clk26m", DUMMY_RATE),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_ARMCA53PLL_754M, "armca53pll_754m", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_ARMCA53PLL_502M, "armca53pll_502m", "clk26m", 1, 3),
+
+ FACTOR(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7),
+
+ FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3),
+
+ FACTOR(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26),
+
+ FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1),
+ FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
+ FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
+
+ FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "clk26m", 1, 3),
+
+ FACTOR(CLK_TOP_ARMCA53PLL_D2, "armca53pll_d2", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_ARMCA53PLL_D3, "armca53pll_d3", "clk26m", 1, 1),
+
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1),
+ FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8),
+ FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16),
+
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4),
+
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4),
+
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16),
+
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2),
+
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3),
+ FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4),
+
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2),
+ FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4),
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ TOP_MUX_GATE_NOSR(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x40, 0, 3, 7, CLK_IS_CRITICAL),
+ TOP_MUX_GATE_NOSR(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x40, 8, 1, 15, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x40, 16, 1, 23, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x40, 24, 3, 31, 0),
+ /* CLK_CFG_1 */
+ TOP_MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x50, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x50, 8, 4, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x50, 16, 4, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x50, 24, 4, 31, 0),
+ /* CLK_CFG_2 */
+ TOP_MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x60, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x60, 8, 1, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x60, 16, 3, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x60, 24, 2, 31, 0),
+ /* CLK_CFG_3 */
+ TOP_MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x70, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents,
+ 0x70, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents, 0x70, 16, 4, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents, 0x70, 24, 3, 31, 0),
+ /* CLK_CFG_4 */
+ TOP_MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents, 0x80, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents, 0x80, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x80, 16, 2, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x80, 24, 3, 31, 0),
+ /* CLK_CFG_5 */
+ TOP_MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x90, 0, 3, 5, 0),
+ TOP_MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x90, 8, 3, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_MJC_SEL, "mjc_sel", mjc_parents, 0x90, 24, 4, 31, 0),
+ /* CLK_CFG_6 */
+ /*
+ * The dpi0_sel clock should not propagate rate changes to its parent
+ * clock so the dpi driver can have full control over PLL and divider.
+ */
+ TOP_MUX_GATE_NOSR(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0xa0, 0, 3, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0xa0, 8, 2, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents,
+ 0xa0, 16, 3, 23, CLK_IS_CRITICAL),
+ TOP_MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0xa0, 24, 2, 31, 0),
+ /* CLK_CFG_7 */
+ TOP_MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0xb0, 0, 2, 7, 0),
+ TOP_MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents,
+ 0xb0, 8, 2, 15, 0),
+ TOP_MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents,
+ 0xb0, 16, 2, 23, 0),
+ TOP_MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0xb0, 24, 2, 31, 0),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+ MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1),
+ MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1),
+ MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1),
+ MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1),
+ MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1),
+
+ DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0),
+ DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8),
+ DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16),
+ DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24),
+ DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0),
+
+ DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28),
+ DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0),
+ DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8),
+ DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16),
+ DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24),
+ DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4),
+};
+
+
+static const struct of_device_id of_match_clk_mt6795_topckgen[] = {
+ { .compatible = "mediatek,mt6795-topckgen" },
+ { /* sentinel */ }
+};
+
+static int clk_mt6795_topckgen_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ if (ret)
+ goto unregister_fixed_clks;
+
+ ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt6795_top_clk_lock, clk_data);
+ if (ret)
+ goto unregister_factors;
+
+ ret = mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base,
+ &mt6795_top_clk_lock, clk_data);
+ if (ret)
+ goto unregister_muxes;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+ return ret;
+}
+
+static int clk_mt6795_topckgen_remove(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), clk_data);
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_unregister_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
+ mtk_free_clk_data(clk_data);
+
+ return 0;
+}
+
+static struct platform_driver clk_mt6795_topckgen_drv = {
+ .driver = {
+ .name = "clk-mt6795-topckgen",
+ .of_match_table = of_match_clk_mt6795_topckgen,
+ },
+ .probe = clk_mt6795_topckgen_probe,
+ .remove = clk_mt6795_topckgen_remove,
+};
+module_platform_driver(clk_mt6795_topckgen_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 topckgen clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-vdecsys.c b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
new file mode 100644
index 000000000000..d85d04e0d016
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-vdecsys.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define GATE_VDEC(_id, _name, _parent, _regs) \
+ GATE_MTK(_id, _name, _parent, _regs, 0, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x0000,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x0008,
+};
+
+static const struct mtk_gate vdec_clks[] = {
+ GATE_VDEC(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", &vdec0_cg_regs),
+ GATE_VDEC(CLK_VDEC_LARB_CKEN, "vdec_larb_cken", "mm_sel", &vdec1_cg_regs),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_vdecsys[] = {
+ { .compatible = "mediatek,mt6795-vdecsys", .data = &vdec_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_vdecsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt6795-vdecsys",
+ .of_match_table = of_match_clk_mt6795_vdecsys,
+ },
+};
+module_platform_driver(clk_mt6795_vdecsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6795-vencsys.c b/drivers/clk/mediatek/clk-mt6795-vencsys.c
new file mode 100644
index 000000000000..de40a982ca96
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6795-vencsys.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <dt-bindings/clock/mediatek,mt6795-clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_LARB, "venc_larb", "venc_sel", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc", "venc_sel", 8),
+ GATE_VENC(CLK_VENC_JPGDEC, "venc_jpgdec", "venc_sel", 12),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt6795_vencsys[] = {
+ { .compatible = "mediatek,mt6795-vencsys", .data = &venc_desc },
+ { /* sentinel */ }
+};
+
+static struct platform_driver clk_mt6795_vencsys_drv = {
+ .driver = {
+ .name = "clk-mt6795-vencsys",
+ .of_match_table = of_match_clk_mt6795_vencsys,
+ },
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+};
+module_platform_driver(clk_mt6795_vencsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT6795 vdecsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
index 25d17db13bac..7c6a53fbb8be 100644
--- a/drivers/clk/mediatek/clk-mt6797-img.c
+++ b/drivers/clk/mediatek/clk-mt6797-img.c
@@ -32,33 +32,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_LARB6, "img_larb6", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6797_img[] = {
- { .compatible = "mediatek,mt6797-imgsys", },
- {}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
};
-static int clk_mt6797_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_img[] = {
+ {
+ .compatible = "mediatek,mt6797-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_img_drv = {
- .probe = clk_mt6797_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-img",
.of_match_table = of_match_clk_mt6797_img,
diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
index de857894e033..6120fccc859f 100644
--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
@@ -49,33 +49,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "mm_sel", 0),
};
-static const struct of_device_id of_match_clk_mt6797_vdec[] = {
- { .compatible = "mediatek,mt6797-vdecsys", },
- {}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
};
-static int clk_mt6797_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_vdec[] = {
+ {
+ .compatible = "mediatek,mt6797-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_vdec_drv = {
- .probe = clk_mt6797_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-vdec",
.of_match_table = of_match_clk_mt6797_vdec,
diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
index 78b7ed55f979..834d3834d2bb 100644
--- a/drivers/clk/mediatek/clk-mt6797-venc.c
+++ b/drivers/clk/mediatek/clk-mt6797-venc.c
@@ -34,33 +34,23 @@ static const struct mtk_gate venc_clks[] = {
GATE_VENC(CLK_VENC_3, "venc_3", "venc_sel", 12),
};
-static const struct of_device_id of_match_clk_mt6797_venc[] = {
- { .compatible = "mediatek,mt6797-vencsys", },
- {}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
};
-static int clk_mt6797_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- int r;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
- if (r)
- dev_err(&pdev->dev,
- "could not register clock provider: %s: %d\n",
- pdev->name, r);
-
- return r;
-}
+static const struct of_device_id of_match_clk_mt6797_venc[] = {
+ {
+ .compatible = "mediatek,mt6797-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
static struct platform_driver clk_mt6797_venc_drv = {
- .probe = clk_mt6797_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt6797-venc",
.of_match_table = of_match_clk_mt6797_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
index fcc598a45165..6907b1a6a824 100644
--- a/drivers/clk/mediatek/clk-mt8183-cam.c
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -34,26 +34,23 @@ static const struct mtk_gate cam_clks[] = {
GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12),
};
-static int clk_mt8183_cam_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
-
- mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
static const struct of_device_id of_match_clk_mt8183_cam[] = {
- { .compatible = "mediatek,mt8183-camsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-camsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_cam_drv = {
- .probe = clk_mt8183_cam_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-cam",
.of_match_table = of_match_clk_mt8183_cam,
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
index eb2def2cf0ae..8d884425d79f 100644
--- a/drivers/clk/mediatek/clk-mt8183-img.c
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -34,26 +34,23 @@ static const struct mtk_gate img_clks[] = {
GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9),
};
-static int clk_mt8183_img_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
-
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
static const struct of_device_id of_match_clk_mt8183_img[] = {
- { .compatible = "mediatek,mt8183-imgsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-imgsys",
+ .data = &img_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_img_drv = {
- .probe = clk_mt8183_img_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-img",
.of_match_table = of_match_clk_mt8183_img,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
index b30fc9f47518..953a8a33d048 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu0.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -27,26 +27,23 @@ static const struct mtk_gate ipu_core0_clks[] = {
GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2),
};
-static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_core0_desc = {
+ .clks = ipu_core0_clks,
+ .num_clks = ARRAY_SIZE(ipu_core0_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
- { .compatible = "mediatek,mt8183-ipu_core0", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_core0",
+ .data = &ipu_core0_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_core0_drv = {
- .probe = clk_mt8183_ipu_core0_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core0",
.of_match_table = of_match_clk_mt8183_ipu_core0,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
index b378957e11d0..221d12265974 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu1.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -27,26 +27,23 @@ static const struct mtk_gate ipu_core1_clks[] = {
GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2),
};
-static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_core1_desc = {
+ .clks = ipu_core1_clks,
+ .num_clks = ARRAY_SIZE(ipu_core1_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
- { .compatible = "mediatek,mt8183-ipu_core1", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_core1",
+ .data = &ipu_core1_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_core1_drv = {
- .probe = clk_mt8183_ipu_core1_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_core1",
.of_match_table = of_match_clk_mt8183_ipu_core1,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
index 941b43ac8bec..8c4fd96df821 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -25,26 +25,23 @@ static const struct mtk_gate ipu_adl_clks[] = {
GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24),
};
-static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_adl_desc = {
+ .clks = ipu_adl_clks,
+ .num_clks = ARRAY_SIZE(ipu_adl_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
- { .compatible = "mediatek,mt8183-ipu_adl", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_adl",
+ .data = &ipu_adl_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_adl_drv = {
- .probe = clk_mt8183_ipu_adl_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_adl",
.of_match_table = of_match_clk_mt8183_ipu_adl,
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
index ae82c2e17110..14a4c3ff82a1 100644
--- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -94,26 +94,23 @@ static const struct mtk_gate ipu_conn_clks[] = {
"ipu_conn_cab3to1_slice", "dsp1_sel", 17),
};
-static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
-
- mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc ipu_conn_desc = {
+ .clks = ipu_conn_clks,
+ .num_clks = ARRAY_SIZE(ipu_conn_clks),
+};
static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
- { .compatible = "mediatek,mt8183-ipu_conn", },
- {}
+ {
+ .compatible = "mediatek,mt8183-ipu_conn",
+ .data = &ipu_conn_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_ipu_conn_drv = {
- .probe = clk_mt8183_ipu_conn_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-ipu_conn",
.of_match_table = of_match_clk_mt8183_ipu_conn,
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
index d774edaf760b..730c9ae5ea12 100644
--- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -18,36 +18,31 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
- &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
};
-static int clk_mt8183_mfg_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- pm_runtime_enable(&pdev->dev);
-
- clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
-
- mtk_clk_register_gates_with_dev(node, mfg_clks, ARRAY_SIZE(mfg_clks),
- clk_data, &pdev->dev);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
static const struct of_device_id of_match_clk_mt8183_mfg[] = {
- { .compatible = "mediatek,mt8183-mfgcfg", },
- {}
+ {
+ .compatible = "mediatek,mt8183-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_mfg_drv = {
- .probe = clk_mt8183_mfg_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-mfg",
.of_match_table = of_match_clk_mt8183_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
index 0548cde159d0..c294e50b96b7 100644
--- a/drivers/clk/mediatek/clk-mt8183-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -38,26 +38,23 @@ static const struct mtk_gate vdec_clks[] = {
GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0),
};
-static int clk_mt8183_vdec_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
-
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
static const struct of_device_id of_match_clk_mt8183_vdec[] = {
- { .compatible = "mediatek,mt8183-vdecsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_vdec_drv = {
- .probe = clk_mt8183_vdec_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-vdec",
.of_match_table = of_match_clk_mt8183_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
index f86ec607d87a..0051c5d92fc5 100644
--- a/drivers/clk/mediatek/clk-mt8183-venc.c
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -30,26 +30,23 @@ static const struct mtk_gate venc_clks[] = {
"mm_sel", 8),
};
-static int clk_mt8183_venc_probe(struct platform_device *pdev)
-{
- struct clk_hw_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
-
- clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
-
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
-}
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
static const struct of_device_id of_match_clk_mt8183_venc[] = {
- { .compatible = "mediatek,mt8183-vencsys", },
- {}
+ {
+ .compatible = "mediatek,mt8183-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
};
static struct platform_driver clk_mt8183_venc_drv = {
- .probe = clk_mt8183_venc_probe,
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8183-venc",
.of_match_table = of_match_clk_mt8183_venc,
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 8512101e1189..1860a35a723a 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1198,10 +1198,33 @@ static void clk_mt8183_top_init_early(struct device_node *node)
CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
clk_mt8183_top_init_early);
+/* Register mux notifier for MFG mux */
+static int clk_mt8183_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+ int i;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(top_muxes); i++)
+ if (top_muxes[i].id == CLK_TOP_MUX_MFG)
+ break;
+ if (i == ARRAY_SIZE(top_muxes))
+ return -EINVAL;
+
+ mfg_mux_nb->ops = top_muxes[i].ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to 26M crystal */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
void __iomem *base;
struct device_node *node = pdev->dev.of_node;
+ int ret;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -1227,6 +1250,11 @@ static int clk_mt8183_top_probe(struct platform_device *pdev)
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
top_clk_data);
+ ret = clk_mt8183_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MUX_MFG]->clk);
+ if (ret)
+ return ret;
+
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
top_clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8192-cam.c b/drivers/clk/mediatek/clk-mt8192-cam.c
index fc74cd80b4b0..90b57d46eef7 100644
--- a/drivers/clk/mediatek/clk-mt8192-cam.c
+++ b/drivers/clk/mediatek/clk-mt8192-cam.c
@@ -98,6 +98,7 @@ static const struct of_device_id of_match_clk_mt8192_cam[] = {
static struct platform_driver clk_mt8192_cam_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-cam",
.of_match_table = of_match_clk_mt8192_cam,
diff --git a/drivers/clk/mediatek/clk-mt8192-img.c b/drivers/clk/mediatek/clk-mt8192-img.c
index 7ce3abe42577..da82d65a7650 100644
--- a/drivers/clk/mediatek/clk-mt8192-img.c
+++ b/drivers/clk/mediatek/clk-mt8192-img.c
@@ -61,6 +61,7 @@ static const struct of_device_id of_match_clk_mt8192_img[] = {
static struct platform_driver clk_mt8192_img_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-img",
.of_match_table = of_match_clk_mt8192_img,
diff --git a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
index 700356ac6a58..ff8e20bb44bb 100644
--- a/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
+++ b/drivers/clk/mediatek/clk-mt8192-imp_iic_wrap.c
@@ -110,6 +110,7 @@ static const struct of_device_id of_match_clk_mt8192_imp_iic_wrap[] = {
static struct platform_driver clk_mt8192_imp_iic_wrap_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-imp_iic_wrap",
.of_match_table = of_match_clk_mt8192_imp_iic_wrap,
diff --git a/drivers/clk/mediatek/clk-mt8192-ipe.c b/drivers/clk/mediatek/clk-mt8192-ipe.c
index 730d91b64b3f..0225abe4170a 100644
--- a/drivers/clk/mediatek/clk-mt8192-ipe.c
+++ b/drivers/clk/mediatek/clk-mt8192-ipe.c
@@ -48,6 +48,7 @@ static const struct of_device_id of_match_clk_mt8192_ipe[] = {
static struct platform_driver clk_mt8192_ipe_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-ipe",
.of_match_table = of_match_clk_mt8192_ipe,
diff --git a/drivers/clk/mediatek/clk-mt8192-mdp.c b/drivers/clk/mediatek/clk-mt8192-mdp.c
index 93c87ae2f332..4675788d7816 100644
--- a/drivers/clk/mediatek/clk-mt8192-mdp.c
+++ b/drivers/clk/mediatek/clk-mt8192-mdp.c
@@ -73,6 +73,7 @@ static const struct of_device_id of_match_clk_mt8192_mdp[] = {
static struct platform_driver clk_mt8192_mdp_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mdp",
.of_match_table = of_match_clk_mt8192_mdp,
diff --git a/drivers/clk/mediatek/clk-mt8192-mfg.c b/drivers/clk/mediatek/clk-mt8192-mfg.c
index 3bbc7469f0e4..ec5b44ffa458 100644
--- a/drivers/clk/mediatek/clk-mt8192-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8192-mfg.c
@@ -18,8 +18,10 @@ static const struct mtk_gate_regs mfg_cg_regs = {
.sta_ofs = 0x0,
};
-#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr, \
+ CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_pll_sel", 0),
@@ -41,6 +43,7 @@ static const struct of_device_id of_match_clk_mt8192_mfg[] = {
static struct platform_driver clk_mt8192_mfg_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-mfg",
.of_match_table = of_match_clk_mt8192_mfg,
diff --git a/drivers/clk/mediatek/clk-mt8192-msdc.c b/drivers/clk/mediatek/clk-mt8192-msdc.c
index 635f7a0b629a..a72e1b73fce8 100644
--- a/drivers/clk/mediatek/clk-mt8192-msdc.c
+++ b/drivers/clk/mediatek/clk-mt8192-msdc.c
@@ -55,6 +55,7 @@ static const struct of_device_id of_match_clk_mt8192_msdc[] = {
static struct platform_driver clk_mt8192_msdc_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-msdc",
.of_match_table = of_match_clk_mt8192_msdc,
diff --git a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
index 58725d79dd13..18a8679108b8 100644
--- a/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
+++ b/drivers/clk/mediatek/clk-mt8192-scp_adsp.c
@@ -41,6 +41,7 @@ static const struct of_device_id of_match_clk_mt8192_scp_adsp[] = {
static struct platform_driver clk_mt8192_scp_adsp_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-scp_adsp",
.of_match_table = of_match_clk_mt8192_scp_adsp,
diff --git a/drivers/clk/mediatek/clk-mt8192-vdec.c b/drivers/clk/mediatek/clk-mt8192-vdec.c
index b1d95cfbf22a..e149962dbbf9 100644
--- a/drivers/clk/mediatek/clk-mt8192-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8192-vdec.c
@@ -85,6 +85,7 @@ static const struct of_device_id of_match_clk_mt8192_vdec[] = {
static struct platform_driver clk_mt8192_vdec_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-vdec",
.of_match_table = of_match_clk_mt8192_vdec,
diff --git a/drivers/clk/mediatek/clk-mt8192-venc.c b/drivers/clk/mediatek/clk-mt8192-venc.c
index c0d867bff09e..80b8bb170996 100644
--- a/drivers/clk/mediatek/clk-mt8192-venc.c
+++ b/drivers/clk/mediatek/clk-mt8192-venc.c
@@ -44,6 +44,7 @@ static const struct of_device_id of_match_clk_mt8192_venc[] = {
static struct platform_driver clk_mt8192_venc_drv = {
.probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
.driver = {
.name = "clk-mt8192-venc",
.of_match_table = of_match_clk_mt8192_venc,
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index ebbd2798d9a3..d0f226931070 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -167,22 +167,7 @@ static const char * const mdp_parents[] = {
"mmpll_d5_d2"
};
-static const char * const img1_parents[] = {
- "clk26m",
- "univpll_d4",
- "tvdpll_ck",
- "mainpll_d4",
- "univpll_d5",
- "mmpll_d6",
- "univpll_d6",
- "mainpll_d6",
- "mmpll_d4_d2",
- "mainpll_d4_d2",
- "mmpll_d6_d2",
- "mmpll_d5_d2"
-};
-
-static const char * const img2_parents[] = {
+static const char * const img_parents[] = {
"clk26m",
"univpll_d4",
"tvdpll_ck",
@@ -280,61 +265,6 @@ static const char * const camtg_parents[] = {
"univpll_192m_d32"
};
-static const char * const camtg2_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg3_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg4_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg5_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
-static const char * const camtg6_parents[] = {
- "clk26m",
- "univpll_192m_d8",
- "univpll_d6_d8",
- "univpll_192m_d4",
- "univpll_d6_d16",
- "csw_f26m_d2",
- "univpll_192m_d16",
- "univpll_192m_d32"
-};
-
static const char * const uart_parents[] = {
"clk26m",
"univpll_d6_d8"
@@ -362,15 +292,7 @@ static const char * const msdc50_0_parents[] = {
"univpll_d4_d2"
};
-static const char * const msdc30_1_parents[] = {
- "clk26m",
- "univpll_d6_d2",
- "mainpll_d6_d2",
- "mainpll_d7_d2",
- "msdcpll_d2"
-};
-
-static const char * const msdc30_2_parents[] = {
+static const char * const msdc30_parents[] = {
"clk26m",
"univpll_d6_d2",
"mainpll_d6_d2",
@@ -457,39 +379,6 @@ static const char * const seninf_parents[] = {
"univpll_d5"
};
-static const char * const seninf1_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
-static const char * const seninf2_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
-static const char * const seninf3_parents[] = {
- "clk26m",
- "univpll_d4_d4",
- "univpll_d6_d2",
- "univpll_d4_d2",
- "univpll_d7",
- "univpll_d6",
- "mmpll_d6",
- "univpll_d5"
-};
-
static const char * const tl_parents[] = {
"clk26m",
"univpll_192m_d2",
@@ -649,52 +538,7 @@ static const char * const sflash_parents[] = {
"univpll_d5_d8"
};
-static const char * const apll_i2s0_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s1_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s2_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s3_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s4_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s5_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s6_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s7_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s8_m_parents[] = {
- "aud_1_sel",
- "aud_2_sel"
-};
-
-static const char * const apll_i2s9_m_parents[] = {
+static const char * const apll_i2s_m_parents[] = {
"aud_1_sel",
"aud_2_sel"
};
@@ -724,9 +568,9 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_MDP_SEL, "mdp_sel",
mdp_parents, 0x020, 0x024, 0x028, 8, 4, 15, 0x004, 5),
MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG1_SEL, "img1_sel",
- img1_parents, 0x020, 0x024, 0x028, 16, 4, 23, 0x004, 6),
+ img_parents, 0x020, 0x024, 0x028, 16, 4, 23, 0x004, 6),
MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG2_SEL, "img2_sel",
- img2_parents, 0x020, 0x024, 0x028, 24, 4, 31, 0x004, 7),
+ img_parents, 0x020, 0x024, 0x028, 24, 4, 31, 0x004, 7),
/* CLK_CFG_2 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE_SEL, "ipe_sel",
ipe_parents, 0x030, 0x034, 0x038, 0, 4, 7, 0x004, 8),
@@ -747,16 +591,16 @@ static const struct mtk_mux top_mtk_muxes[] = {
camtg_parents, 0x050, 0x054, 0x058, 24, 3, 31, 0x004, 19),
/* CLK_CFG_5 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel",
- camtg2_parents, 0x060, 0x064, 0x068, 0, 3, 7, 0x004, 20),
+ camtg_parents, 0x060, 0x064, 0x068, 0, 3, 7, 0x004, 20),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel",
- camtg3_parents, 0x060, 0x064, 0x068, 8, 3, 15, 0x004, 21),
+ camtg_parents, 0x060, 0x064, 0x068, 8, 3, 15, 0x004, 21),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4_SEL, "camtg4_sel",
- camtg4_parents, 0x060, 0x064, 0x068, 16, 3, 23, 0x004, 22),
+ camtg_parents, 0x060, 0x064, 0x068, 16, 3, 23, 0x004, 22),
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5_SEL, "camtg5_sel",
- camtg5_parents, 0x060, 0x064, 0x068, 24, 3, 31, 0x004, 23),
+ camtg_parents, 0x060, 0x064, 0x068, 24, 3, 31, 0x004, 23),
/* CLK_CFG_6 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG6_SEL, "camtg6_sel",
- camtg6_parents, 0x070, 0x074, 0x078, 0, 3, 7, 0x004, 24),
+ camtg_parents, 0x070, 0x074, 0x078, 0, 3, 7, 0x004, 24),
MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel",
uart_parents, 0x070, 0x074, 0x078, 8, 1, 15, 0x004, 25),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel",
@@ -767,9 +611,9 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
msdc50_0_parents, 0x080, 0x084, 0x088, 0, 3, 7, 0x004, 28),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
- msdc30_1_parents, 0x080, 0x084, 0x088, 8, 3, 15, 0x004, 29),
+ msdc30_parents, 0x080, 0x084, 0x088, 8, 3, 15, 0x004, 29),
MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
- msdc30_2_parents, 0x080, 0x084, 0x088, 16, 3, 23, 0x004, 30),
+ msdc30_parents, 0x080, 0x084, 0x088, 16, 3, 23, 0x004, 30),
MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel",
audio_parents, 0x080, 0x084, 0x088, 24, 2, 31, 0x008, 0),
/* CLK_CFG_8 */
@@ -796,12 +640,12 @@ static const struct mtk_mux top_mtk_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel",
seninf_parents, 0x0b0, 0x0b4, 0x0b8, 16, 3, 23, 0x008, 11),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1_SEL, "seninf1_sel",
- seninf1_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31, 0x008, 12),
+ seninf_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31, 0x008, 12),
/* CLK_CFG_11 */
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2_SEL, "seninf2_sel",
- seninf2_parents, 0x0c0, 0x0c4, 0x0c8, 0, 3, 7, 0x008, 13),
+ seninf_parents, 0x0c0, 0x0c4, 0x0c8, 0, 3, 7, 0x008, 13),
MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3_SEL, "seninf3_sel",
- seninf3_parents, 0x0c0, 0x0c4, 0x0c8, 8, 3, 15, 0x008, 14),
+ seninf_parents, 0x0c0, 0x0c4, 0x0c8, 8, 3, 15, 0x008, 14),
MUX_GATE_CLR_SET_UPD(CLK_TOP_TL_SEL, "tl_sel",
tl_parents, 0x0c0, 0x0c4, 0x0c8, 16, 2, 23, 0x008, 15),
MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel",
@@ -847,16 +691,16 @@ static const struct mtk_mux top_mtk_muxes[] = {
static struct mtk_composite top_muxes[] = {
/* CLK_AUDDIV_0 */
- MUX(CLK_TOP_APLL_I2S0_M_SEL, "apll_i2s0_m_sel", apll_i2s0_m_parents, 0x320, 16, 1),
- MUX(CLK_TOP_APLL_I2S1_M_SEL, "apll_i2s1_m_sel", apll_i2s1_m_parents, 0x320, 17, 1),
- MUX(CLK_TOP_APLL_I2S2_M_SEL, "apll_i2s2_m_sel", apll_i2s2_m_parents, 0x320, 18, 1),
- MUX(CLK_TOP_APLL_I2S3_M_SEL, "apll_i2s3_m_sel", apll_i2s3_m_parents, 0x320, 19, 1),
- MUX(CLK_TOP_APLL_I2S4_M_SEL, "apll_i2s4_m_sel", apll_i2s4_m_parents, 0x320, 20, 1),
- MUX(CLK_TOP_APLL_I2S5_M_SEL, "apll_i2s5_m_sel", apll_i2s5_m_parents, 0x320, 21, 1),
- MUX(CLK_TOP_APLL_I2S6_M_SEL, "apll_i2s6_m_sel", apll_i2s6_m_parents, 0x320, 22, 1),
- MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s7_m_parents, 0x320, 23, 1),
- MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s8_m_parents, 0x320, 24, 1),
- MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s9_m_parents, 0x320, 25, 1),
+ MUX(CLK_TOP_APLL_I2S0_M_SEL, "apll_i2s0_m_sel", apll_i2s_m_parents, 0x320, 16, 1),
+ MUX(CLK_TOP_APLL_I2S1_M_SEL, "apll_i2s1_m_sel", apll_i2s_m_parents, 0x320, 17, 1),
+ MUX(CLK_TOP_APLL_I2S2_M_SEL, "apll_i2s2_m_sel", apll_i2s_m_parents, 0x320, 18, 1),
+ MUX(CLK_TOP_APLL_I2S3_M_SEL, "apll_i2s3_m_sel", apll_i2s_m_parents, 0x320, 19, 1),
+ MUX(CLK_TOP_APLL_I2S4_M_SEL, "apll_i2s4_m_sel", apll_i2s_m_parents, 0x320, 20, 1),
+ MUX(CLK_TOP_APLL_I2S5_M_SEL, "apll_i2s5_m_sel", apll_i2s_m_parents, 0x320, 21, 1),
+ MUX(CLK_TOP_APLL_I2S6_M_SEL, "apll_i2s6_m_sel", apll_i2s_m_parents, 0x320, 22, 1),
+ MUX(CLK_TOP_APLL_I2S7_M_SEL, "apll_i2s7_m_sel", apll_i2s_m_parents, 0x320, 23, 1),
+ MUX(CLK_TOP_APLL_I2S8_M_SEL, "apll_i2s8_m_sel", apll_i2s_m_parents, 0x320, 24, 1),
+ MUX(CLK_TOP_APLL_I2S9_M_SEL, "apll_i2s9_m_sel", apll_i2s_m_parents, 0x320, 25, 1),
};
static const struct mtk_composite top_adj_divs[] = {
@@ -1224,6 +1068,28 @@ static void clk_mt8192_top_init_early(struct device_node *node)
CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen",
clk_mt8192_top_init_early);
+/* Register mux notifier for MFG mux */
+static int clk_mt8192_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+ int i;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(top_mtk_muxes); i++)
+ if (top_mtk_muxes[i].id == CLK_TOP_MFG_PLL_SEL)
+ break;
+ if (i == ARRAY_SIZE(top_mtk_muxes))
+ return -EINVAL;
+
+ mfg_mux_nb->ops = top_mtk_muxes[i].ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to 26M crystal */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8192_top_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -1247,6 +1113,12 @@ static int clk_mt8192_top_probe(struct platform_device *pdev)
if (r)
return r;
+ r = clk_mt8192_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MFG_PLL_SEL]->clk);
+ if (r)
+ return r;
+
+
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
top_clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index 97657f255618..fcd410461d3b 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -55,8 +55,12 @@ static const struct mtk_gate_regs infra_ao4_cg_regs = {
#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+#define GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &infra_ao2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, 0)
#define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
@@ -136,8 +140,11 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_SYS, "infra_ao_unipro_sys", "top_ufs", 11),
GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_TICK, "infra_ao_unipro_tick", "top_ufs_tick1us", 12),
GATE_INFRA_AO2(CLK_INFRA_AO_UFS_MP_SAP_B, "infra_ao_ufs_mp_sap_b", "top_ufs_mp_sap_cfg", 13),
- GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15),
- GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17),
+ /* pwrmcu is used by ATF for platform PM: clocks must never be disabled by the kernel */
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17,
+ CLK_IS_CRITICAL),
GATE_INFRA_AO2(CLK_INFRA_AO_APDMA_B, "infra_ao_apdma_b", "top_axi", 18),
GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
@@ -193,6 +200,9 @@ static u16 infra_ao_rst_ofs[] = {
static u16 infra_ao_idx_map[] = {
[MT8195_INFRA_RST0_THERM_CTRL_SWRST] = 0 * RST_NR_PER_BANK + 0,
+ [MT8195_INFRA_RST2_USBSIF_P1_SWRST] = 2 * RST_NR_PER_BANK + 18,
+ [MT8195_INFRA_RST2_PCIE_P0_SWRST] = 2 * RST_NR_PER_BANK + 26,
+ [MT8195_INFRA_RST2_PCIE_P1_SWRST] = 2 * RST_NR_PER_BANK + 27,
[MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST] = 3 * RST_NR_PER_BANK + 5,
[MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST] = 4 * RST_NR_PER_BANK + 10,
};
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
index 9411c556a5a9..c94cb71bd9b9 100644
--- a/drivers/clk/mediatek/clk-mt8195-mfg.c
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -17,10 +17,12 @@ static const struct mtk_gate_regs mfg_cg_regs = {
};
#define GATE_MFG(_id, _name, _parent, _shift) \
- GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ GATE_MTK_FLAGS(_id, _name, _parent, &mfg_cg_regs, \
+ _shift, &mtk_clk_gate_ops_setclr, \
+ CLK_SET_RATE_PARENT)
static const struct mtk_gate mfg_clks[] = {
- GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg_core_tmp", 0),
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_ck_fast_ref", 0),
};
static const struct mtk_clk_desc mfg_desc = {
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index ec70e1f65eaf..8cbab5ca2e58 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -298,11 +298,14 @@ static const char * const ipu_if_parents[] = {
"mmpll_d4"
};
+/*
+ * MFG can be also parented to "univpll_d6" and "univpll_d7":
+ * these have been removed from the parents list to let us
+ * achieve GPU DVFS without any special clock handlers.
+ */
static const char * const mfg_parents[] = {
"clk26m",
- "mainpll_d5_d2",
- "univpll_d6",
- "univpll_d7"
+ "mainpll_d5_d2"
};
static const char * const camtg_parents[] = {
@@ -1149,11 +1152,6 @@ static const struct mtk_mux top_mtk_muxes[] = {
*/
};
-static struct mtk_composite top_muxes[] = {
- /* CLK_MISC_CFG_3 */
- MUX(CLK_TOP_MFG_CK_FAST_REF, "mfg_ck_fast_ref", mfg_fast_parents, 0x0250, 8, 1),
-};
-
static const struct mtk_composite top_adj_divs[] = {
DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "top_i2si1_mck", 0x0320, 0, 0x0328, 8, 0),
DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "top_i2si2_mck", 0x0320, 1, 0x0328, 8, 8),
@@ -1222,10 +1220,26 @@ static const struct of_device_id of_match_clk_mt8195_topck[] = {
{}
};
+/* Register mux notifier for MFG mux */
+static int clk_mt8195_reg_mfg_mux_notifier(struct device *dev, struct clk *clk)
+{
+ struct mtk_mux_nb *mfg_mux_nb;
+
+ mfg_mux_nb = devm_kzalloc(dev, sizeof(*mfg_mux_nb), GFP_KERNEL);
+ if (!mfg_mux_nb)
+ return -ENOMEM;
+
+ mfg_mux_nb->ops = &clk_mux_ops;
+ mfg_mux_nb->bypass_index = 0; /* Bypass to TOP_MFG_CORE_TMP */
+
+ return devm_mtk_clk_mux_notifier_register(dev, clk, mfg_mux_nb);
+}
+
static int clk_mt8195_topck_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *top_clk_data;
struct device_node *node = pdev->dev.of_node;
+ struct clk_hw *hw;
int r;
void __iomem *base;
@@ -1253,15 +1267,22 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_factors;
- r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8195_clk_lock, top_clk_data);
+ hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_parents,
+ ARRAY_SIZE(mfg_fast_parents), CLK_SET_RATE_PARENT,
+ (base + 0x250), 8, 1, 0, &mt8195_clk_lock);
+ if (IS_ERR(hw))
+ goto unregister_muxes;
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw;
+
+ r = clk_mt8195_reg_mfg_mux_notifier(&pdev->dev,
+ top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF]->clk);
if (r)
goto unregister_muxes;
r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
&mt8195_clk_lock, top_clk_data);
if (r)
- goto unregister_composite_muxes;
+ goto unregister_muxes;
r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
if (r)
@@ -1279,8 +1300,6 @@ unregister_gates:
mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
unregister_composite_divs:
mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
-unregister_composite_muxes:
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
unregister_muxes:
mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
unregister_factors:
@@ -1300,7 +1319,6 @@ static int clk_mt8195_topck_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
- mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 261a7f76dd3c..07b46bfd5040 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -37,6 +37,10 @@ static const struct mtk_gate_regs vdo0_2_cg_regs = {
#define GATE_VDO0_2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO0_2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo0_2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
static const struct mtk_gate vdo0_clks[] = {
/* VDO0_0 */
GATE_VDO0_0(CLK_VDO0_DISP_OVL0, "vdo0_disp_ovl0", "top_vpp", 0),
@@ -85,7 +89,8 @@ static const struct mtk_gate vdo0_clks[] = {
/* VDO0_2 */
GATE_VDO0_2(CLK_VDO0_DSI0_DSI, "vdo0_dsi0_dsi", "top_dsi_occ", 0),
GATE_VDO0_2(CLK_VDO0_DSI1_DSI, "vdo0_dsi1_dsi", "top_dsi_occ", 8),
- GATE_VDO0_2(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf", "top_edp", 16),
+ GATE_VDO0_2_FLAGS(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf",
+ "top_edp", 16, CLK_SET_RATE_PARENT),
};
static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 3378487d2c90..835335b9d87b 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -34,6 +34,12 @@ static const struct mtk_gate_regs vdo1_3_cg_regs = {
.sta_ofs = 0x140,
};
+static const struct mtk_gate_regs vdo1_4_cg_regs = {
+ .set_ofs = 0x400,
+ .clr_ofs = 0x400,
+ .sta_ofs = 0x400,
+};
+
#define GATE_VDO1_0(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
@@ -43,9 +49,16 @@ static const struct mtk_gate_regs vdo1_3_cg_regs = {
#define GATE_VDO1_2(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_2_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &vdo1_2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flags)
+
#define GATE_VDO1_3(_id, _name, _parent, _shift) \
GATE_MTK(_id, _name, _parent, &vdo1_3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+#define GATE_VDO1_4(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_4_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
static const struct mtk_gate vdo1_clks[] = {
/* VDO1_0 */
GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
@@ -99,10 +112,12 @@ static const struct mtk_gate vdo1_clks[] = {
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI0, "vdo1_disp_monitor_dpi0", "top_vpp", 1),
GATE_VDO1_2(CLK_VDO1_DPI1, "vdo1_dpi1", "top_vpp", 8),
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI1, "vdo1_disp_monitor_dpi1", "top_vpp", 9),
- GATE_VDO1_2(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_vpp", 16),
+ GATE_VDO1_2_FLAGS(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_dp", 16, CLK_SET_RATE_PARENT),
GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf", "top_vpp", 17),
/* VDO1_3 */
GATE_VDO1_3(CLK_VDO1_26M_SLOW, "vdo1_26m_slow", "clk26m", 8),
+ /* VDO1_4 */
+ GATE_VDO1_4(CLK_VDO1_DPI1_HDMI, "vdo1_dpi1_hdmi", "hdmi_txpll", 0),
};
static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
diff --git a/drivers/clk/mediatek/clk-mt8365-apu.c b/drivers/clk/mediatek/clk-mt8365-apu.c
new file mode 100644
index 000000000000..91ffe89d9721
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-apu.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs apu_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_APU(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apu_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate apu_clks[] = {
+ GATE_APU(CLK_APU_AHB, "apu_ahb", "ifr_apu_axi", 5),
+ GATE_APU(CLK_APU_EDMA, "apu_edma", "apu_sel", 4),
+ GATE_APU(CLK_APU_IF_CK, "apu_if_ck", "apu_if_sel", 3),
+ GATE_APU(CLK_APU_JTAG, "apu_jtag", "clk26m", 2),
+ GATE_APU(CLK_APU_AXI, "apu_axi", "apu_sel", 1),
+ GATE_APU(CLK_APU_IPU_CK, "apu_ck", "apu_sel", 0),
+};
+
+static const struct mtk_clk_desc apu_desc = {
+ .clks = apu_clks,
+ .num_clks = ARRAY_SIZE(apu_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_apu[] = {
+ {
+ .compatible = "mediatek,mt8365-apu",
+ .data = &apu_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_apu_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-apu",
+ .of_match_table = of_match_clk_mt8365_apu,
+ },
+};
+builtin_platform_driver(clk_mt8365_apu_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-cam.c b/drivers/clk/mediatek/clk-mt8365-cam.c
new file mode 100644
index 000000000000..31d5b5cd6de1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-cam.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB2, "cam_larb2", "mm_sel", 0),
+ GATE_CAM(CLK_CAM, "cam", "mm_sel", 6),
+ GATE_CAM(CLK_CAMTG, "camtg", "mm_sel", 7),
+ GATE_CAM(CLK_CAM_SENIF, "cam_senif", "mm_sel", 8),
+ GATE_CAM(CLK_CAMSV0, "camsv0", "mm_sel", 9),
+ GATE_CAM(CLK_CAMSV1, "camsv1", "mm_sel", 10),
+ GATE_CAM(CLK_CAM_FDVT, "cam_fdvt", "mm_sel", 11),
+ GATE_CAM(CLK_CAM_WPE, "cam_wpe", "mm_sel", 12),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_cam[] = {
+ {
+ .compatible = "mediatek,mt8365-imgsys",
+ .data = &cam_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-cam",
+ .of_match_table = of_match_clk_mt8365_cam,
+ },
+};
+builtin_platform_driver(clk_mt8365_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mfg.c b/drivers/clk/mediatek/clk-mt8365-mfg.c
new file mode 100644
index 000000000000..587b49128b03
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-mfg.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mfg0_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs mfg1_cg_regs = {
+ .set_ofs = 0x280,
+ .clr_ofs = 0x280,
+ .sta_ofs = 0x280,
+};
+
+#define GATE_MFG0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MFG1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ /* MFG0 */
+ GATE_MFG0(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+ /* MFG1 */
+ GATE_MFG1(CLK_MFG_MBIST_DIAG, "mfg_mbist_diag", "mbist_diag_sel", 24),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_mfg[] = {
+ {
+ .compatible = "mediatek,mt8365-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-mfg",
+ .of_match_table = of_match_clk_mt8365_mfg,
+ },
+};
+builtin_platform_driver(clk_mt8365_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
new file mode 100644
index 000000000000..5c8bf18ab1f1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre, SAS
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 0),
+ GATE_MM0(CLK_MM_MM_MDP_CCORR0, "mm_mdp_ccorr0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MM_DISP_RSZ0, "mm_disp_rsz0", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 13),
+ GATE_MM0(CLK_MM_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 15),
+ GATE_MM0(CLK_MM_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
+ GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
+ GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
+ GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
+ GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
+ GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_MM_SMI_COMM0, "mm_smi_comm0", "mm_sel", 24),
+ GATE_MM0(CLK_MM_MM_SMI_COMM1, "mm_smi_comm1", "mm_sel", 25),
+ GATE_MM0(CLK_MM_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 26),
+ GATE_MM0(CLK_MM_MM_SMI_IMG, "mm_smi_img", "mm_sel", 27),
+ GATE_MM0(CLK_MM_MM_SMI_CAM, "mm_smi_cam", "mm_sel", 28),
+ GATE_MM0(CLK_MM_IMG_IMG_DL_RELAY, "mm_dl_relay", "mm_sel", 29),
+ GATE_MM0(CLK_MM_IMG_IMG_DL_ASYNC_TOP, "mm_dl_async_top", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DSI0_DIG_DSI, "mm_dsi0_dig_dsi", "dsi0_lntc_dsick", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_26M_HRTWT, "mm_f26m_hrtwt", "clk26m", 0),
+ GATE_MM1(CLK_MM_MM_DPI0, "mm_dpi0", "mm_sel", 1),
+ GATE_MM1(CLK_MM_LVDSTX_PXL, "mm_flvdstx_pxl", "vpll_dpix", 2),
+ GATE_MM1(CLK_MM_LVDSTX_CTS, "mm_flvdstx_cts", "lvdstx_dig_cts", 3),
+};
+
+static int clk_mt8365_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ ret = mtk_clk_register_gates_with_dev(node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data,
+ dev);
+ if (ret)
+ goto err_free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto err_unregister_gates;
+
+ return 0;
+
+err_unregister_gates:
+ mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+
+err_free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static struct platform_driver clk_mt8365_mm_drv = {
+ .probe = clk_mt8365_mm_probe,
+ .driver = {
+ .name = "clk-mt8365-mm",
+ },
+};
+builtin_platform_driver(clk_mt8365_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-vdec.c b/drivers/clk/mediatek/clk-mt8365-vdec.c
new file mode 100644
index 000000000000..cdc678e8941c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-vdec.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_VDEC, "vdec_fvdec_ck", "mm_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LARB1, "vdec_flarb1_ck", "mm_sel", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_vdec[] = {
+ {
+ .compatible = "mediatek,mt8365-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-vdec",
+ .of_match_table = of_match_clk_mt8365_vdec,
+ },
+};
+builtin_platform_driver(clk_mt8365_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365-venc.c b/drivers/clk/mediatek/clk-mt8365-venc.c
new file mode 100644
index 000000000000..0e080c22119d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365-venc.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ /* VENC */
+ GATE_VENC(CLK_VENC, "venc_fvenc_ck", "mm_sel", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc_ck", "mm_sel", 8),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8365_venc[] = {
+ {
+ .compatible = "mediatek,mt8365-vencsys",
+ .data = &venc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8365_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8365-venc",
+ .of_match_table = of_match_clk_mt8365_venc,
+ },
+};
+builtin_platform_driver(clk_mt8365_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
new file mode 100644
index 000000000000..adfecb618f10
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -0,0 +1,1155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ */
+
+#include <dt-bindings/clock/mediatek,mt8365-clk.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+static DEFINE_SPINLOCK(mt8365_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_I2S0_BCK, "i2s0_bck", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC_DSICK, "dsi0_lntc_dsick", "clk26m",
+ 75000000),
+ FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", "clk26m", 75000000),
+ FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx_dig_cts", "clk26m",
+ 52500000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_SYS_26M_D2, "sys_26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ_en", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MFGPLL, "mfgpll_ck", "mfgpll", 1, 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8),
+ FACTOR(CLK_TOP_LVDSPLL_D16, "lvdspll_d16", "lvdspll", 1, 16),
+ FACTOR(CLK_TOP_USB20_192M, "usb20_192m_ck", "usb20_en", 1, 13),
+ FACTOR(CLK_TOP_USB20_192M_D4, "usb20_192m_d4", "usb20_192m_ck", 1, 4),
+ FACTOR(CLK_TOP_USB20_192M_D8, "usb20_192m_d8", "usb20_192m_ck", 1, 8),
+ FACTOR(CLK_TOP_USB20_192M_D16, "usb20_192m_d16", "usb20_192m_ck",
+ 1, 16),
+ FACTOR(CLK_TOP_USB20_192M_D32, "usb20_192m_d32", "usb20_192m_ck",
+ 1, 32),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1, 8),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_DSPPLL, "dsppll_ck", "dsppll", 1, 1),
+ FACTOR(CLK_TOP_DSPPLL_D2, "dsppll_d2", "dsppll", 1, 2),
+ FACTOR(CLK_TOP_DSPPLL_D4, "dsppll_d4", "dsppll", 1, 4),
+ FACTOR(CLK_TOP_DSPPLL_D8, "dsppll_d8", "dsppll", 1, 8),
+ FACTOR(CLK_TOP_APUPLL, "apupll_ck", "apupll", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D52, "clk26m_d52", "clk26m", 1, 52),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d7",
+ "syspll1_d4",
+ "syspll3_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll_d3",
+ "syspll1_d2"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "mmpll_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d2",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "usb20_192m_d8",
+ "univpll2_d8",
+ "usb20_192m_d4",
+ "univpll2_d32",
+ "usb20_192m_d16",
+ "usb20_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const msdc50_0_hc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll2_d2",
+ "univpll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const msdc50_2_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "univpll1_d4",
+ "syspll1_d4",
+ "syspll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const aud_spdif_parents[] = {
+ "clk26m",
+ "univpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll1_d8"
+};
+
+static const char * const ssusb_sys_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll2_d4",
+ "univpll3_d2"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll3_d2",
+ "syspll1_d8",
+ "syspll2_d8"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "syspll1_d8"
+};
+
+static const char * const senif_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll2_d2",
+ "univpll1_d2",
+ "syspll1_d2"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8",
+ "lvdspll_d16"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "sys_26m_d2",
+ "dsppll_ck",
+ "dsppll_d2",
+ "dsppll_d4",
+ "dsppll_d8"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll2_d2",
+ "syspll_d7",
+ "syspll_d3",
+ "syspll2_d4",
+ "msdcpll_d2",
+ "univpll1_d2",
+ "univpll_d5"
+};
+
+static const char * const nfiecc_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d4",
+ "syspll_d7",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const ecc_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "univpll1_d2",
+ "univpll_d3",
+ "syspll_d2"
+};
+
+static const char * const eth_parents[] = {
+ "clk26m",
+ "univpll2_d8",
+ "syspll4_d4",
+ "syspll1_d8",
+ "syspll4_d2"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "univpll2_d2",
+ "syspll_d3",
+ "syspll2_d2"
+};
+
+static const char * const gcpu_cpm_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "syspll2_d2"
+};
+
+static const char * const apu_parents[] = {
+ "clk26m",
+ "univpll_d2",
+ "apupll_ck",
+ "mmpll_ck",
+ "syspll_d3",
+ "univpll1_d2",
+ "syspll1_d2",
+ "syspll1_d4"
+};
+
+static const char * const mbist_diag_parents[] = {
+ "clk26m",
+ "syspll4_d4",
+ "univpll2_d8"
+};
+
+static const char * const apll_i2s0_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static struct mtk_composite top_misc_mux_gates[] = {
+ /* CLK_CFG_11 */
+ MUX_GATE(CLK_TOP_MBIST_DIAG_SEL, "mbist_diag_sel", mbist_diag_parents,
+ 0x0ec, 0, 2, 7),
+};
+
+struct mt8365_clk_audio_mux {
+ int id;
+ const char *name;
+ u8 shift;
+};
+
+static struct mt8365_clk_audio_mux top_misc_muxes[] = {
+ { CLK_TOP_APLL_I2S0_SEL, "apll_i2s0_sel", 11},
+ { CLK_TOP_APLL_I2S1_SEL, "apll_i2s1_sel", 12},
+ { CLK_TOP_APLL_I2S2_SEL, "apll_i2s2_sel", 13},
+ { CLK_TOP_APLL_I2S3_SEL, "apll_i2s3_sel", 14},
+ { CLK_TOP_APLL_TDMOUT_SEL, "apll_tdmout_sel", 15},
+ { CLK_TOP_APLL_TDMIN_SEL, "apll_tdmin_sel", 16},
+ { CLK_TOP_APLL_SPDIF_SEL, "apll_spdif_sel", 17},
+};
+
+#define CLK_CFG_UPDATE 0x004
+#define CLK_CFG_UPDATE1 0x008
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0x044, 0x048, 0, 2, 7, CLK_CFG_UPDATE,
+ 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040,
+ 0x044, 0x048, 8, 2, 15, CLK_CFG_UPDATE, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x040, 0x044,
+ 0x048, 16, 3, 23, CLK_CFG_UPDATE, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x040,
+ 0x044, 0x048, 24, 3, 31, CLK_CFG_UPDATE, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x050,
+ 0x054, 0x058, 0, 2, 7, CLK_CFG_UPDATE, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x050,
+ 0x054, 0x058, 8, 2, 15, CLK_CFG_UPDATE, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents,
+ 0x050, 0x054, 0x058, 16, 3, 23, CLK_CFG_UPDATE, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1_SEL, "camtg1_sel", camtg_parents,
+ 0x050, 0x054, 0x058, 24, 3, 31, CLK_CFG_UPDATE, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x060,
+ 0x064, 0x068, 0, 1, 7, CLK_CFG_UPDATE, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x060,
+ 0x064, 0x068, 8, 2, 15, CLK_CFG_UPDATE, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HC_SEL, "msdc50_0_hc_sel",
+ msdc50_0_hc_parents, 0x060, 0x064, 0x068, 16, 2,
+ 23, CLK_CFG_UPDATE, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC2_2_HC_SEL, "msdc2_2_hc_sel",
+ msdc50_0_hc_parents, 0x060, 0x064, 0x068, 24, 2,
+ 31, CLK_CFG_UPDATE, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, 0x070, 0x074, 0x078, 0, 3, 7,
+ CLK_CFG_UPDATE, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_2_SEL, "msdc50_2_sel",
+ msdc50_2_parents, 0x070, 0x074, 0x078, 8, 3, 15,
+ CLK_CFG_UPDATE, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, 0x070, 0x074, 0x078, 16, 3, 23,
+ CLK_CFG_UPDATE, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x070, 0x074, 0x078, 24, 2, 31, CLK_CFG_UPDATE,
+ 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, 0x080, 0x084, 0x088, 0, 2, 7,
+ CLK_CFG_UPDATE, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents,
+ 0x080, 0x084, 0x088, 8, 1, 15, CLK_CFG_UPDATE, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents,
+ 0x080, 0x084, 0x088, 16, 1, 23, CLK_CFG_UPDATE,
+ 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel",
+ aud_engen1_parents, 0x080, 0x084, 0x088, 24, 2, 31,
+ CLK_CFG_UPDATE, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel",
+ aud_engen2_parents, 0x090, 0x094, 0x098, 0, 2, 7,
+ CLK_CFG_UPDATE, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_SPDIF_SEL, "aud_spdif_sel",
+ aud_spdif_parents, 0x090, 0x094, 0x098, 8, 1, 15,
+ CLK_CFG_UPDATE, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel",
+ disp_pwm_parents, 0x090, 0x094, 0x098, 16, 2, 23,
+ CLK_CFG_UPDATE, 22),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DXCC_SEL, "dxcc_sel", dxcc_parents,
+ 0x0a0, 0x0a4, 0x0a8, 0, 2, 7, CLK_CFG_UPDATE,
+ 24, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_SYS_SEL, "ssusb_sys_sel",
+ ssusb_sys_parents, 0x0a0, 0x0a4, 0x0a8, 8, 2, 15,
+ CLK_CFG_UPDATE, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_SEL, "ssusb_xhci_sel",
+ ssusb_sys_parents, 0x0a0, 0x0a4, 0x0a8, 16, 2, 23,
+ CLK_CFG_UPDATE, 26),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM_SEL, "spm_sel", spm_parents,
+ 0x0a0, 0x0a4, 0x0a8, 24, 1, 31,
+ CLK_CFG_UPDATE, 27, CLK_IS_CRITICAL),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x0b0,
+ 0x0b4, 0x0b8, 0, 3, 7, CLK_CFG_UPDATE, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0b0,
+ 0x0b4, 0x0b8, 8, 2, 15, CLK_CFG_UPDATE, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENIF_SEL, "senif_sel", senif_parents,
+ 0x0b0, 0x0b4, 0x0b8, 16, 2, 23, CLK_CFG_UPDATE,
+ 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE_SEL, "aes_fde_sel",
+ aes_fde_parents, 0x0b0, 0x0b4, 0x0b8, 24, 3, 31,
+ CLK_CFG_UPDATE, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel", senif_parents,
+ 0x0c0, 0x0c4, 0x0c8, 0, 2, 7, CLK_CFG_UPDATE1, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x0c0,
+ 0x0c4, 0x0c8, 8, 3, 15, CLK_CFG_UPDATE1, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi0_parents, 0x0c0,
+ 0x0c4, 0x0c8, 16, 3, 23, CLK_CFG_UPDATE1, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP_SEL, "dsp_sel", dsp_parents, 0x0c0,
+ 0x0c4, 0x0c8, 24, 3, 31, CLK_CFG_UPDATE1, 3),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI2X_SEL, "nfi2x_sel", nfi2x_parents,
+ 0x0d0, 0x0d4, 0x0d8, 0, 3, 7, CLK_CFG_UPDATE1, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+ 0x0d0, 0x0d4, 0x0d8, 8, 3, 15, CLK_CFG_UPDATE1, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ECC_SEL, "ecc_sel", ecc_parents, 0x0d0,
+ 0x0d4, 0x0d8, 16, 3, 23, CLK_CFG_UPDATE1, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETH_SEL, "eth_sel", eth_parents, 0x0d0,
+ 0x0d4, 0x0d8, 24, 3, 31, CLK_CFG_UPDATE1, 7),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0e0,
+ 0x0e4, 0x0e8, 0, 3, 7, CLK_CFG_UPDATE1, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU_CPM_SEL, "gcpu_cpm_sel",
+ gcpu_cpm_parents, 0x0e0, 0x0e4, 0x0e8, 8, 2, 15,
+ CLK_CFG_UPDATE1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APU_SEL, "apu_sel", apu_parents, 0x0e0,
+ 0x0e4, 0x0e8, 16, 3, 23, CLK_CFG_UPDATE1, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APU_IF_SEL, "apu_if_sel", apu_parents,
+ 0x0e0, 0x0e4, 0x0e8, 24, 3, 31, CLK_CFG_UPDATE1,
+ 11),
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "armpll",
+ "mainpll",
+ "univpll_d2"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* bus_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
+ 9, 2, -1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
+};
+
+#define DIV_ADJ_F(_id, _name, _parent, _reg, _shift, _width, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .clk_divider_flags = _flags, \
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "apll_i2s0_sel",
+ 0x324, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "apll_i2s1_sel",
+ 0x324, 8, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "apll_i2s2_sel",
+ 0x324, 16, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "apll_i2s3_sel",
+ 0x324, 24, 8, CLK_DIVIDER_ROUND_CLOSEST),
+ DIV_ADJ_F(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "apll_spdif_sel",
+ 0x32c, 0, 8, CLK_DIVIDER_ROUND_CLOSEST),
+};
+
+struct mtk_simple_gate {
+ int id;
+ const char *name;
+ const char *parent;
+ u32 reg;
+ u8 shift;
+ unsigned long gate_flags;
+};
+
+static const struct mtk_simple_gate top_clk_gates[] = {
+ { CLK_TOP_CONN_32K, "conn_32k", "clk32k", 0x0, 10, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_CONN_26M, "conn_26m", "clk26m", 0x0, 11, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_DSP_32K, "dsp_32k", "clk32k", 0x0, 16, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_DSP_26M, "dsp_26m", "clk26m", 0x0, 17, CLK_GATE_SET_TO_DISABLE },
+ { CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_192m_d4", 0x104, 8, 0 },
+ { CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "usb20_192m_d4", 0x104, 9, 0 },
+ { CLK_TOP_LVDSTX_CLKDIG_EN, "lvdstx_dig_en", "lvdstx_dig_cts", 0x104, 20, 0 },
+ { CLK_TOP_VPLL_DPIX_EN, "vpll_dpix_en", "vpll_dpix", 0x104, 21, 0 },
+ { CLK_TOP_SSUSB_TOP_CK_EN, "ssusb_top_ck_en", NULL, 0x104, 22, 0 },
+ { CLK_TOP_SSUSB_PHY_CK_EN, "ssusb_phy_ck_en", NULL, 0x104, 23, 0 },
+ { CLK_TOP_AUD_I2S0_M, "aud_i2s0_m_ck", "apll12_ck_div0", 0x320, 0, 0 },
+ { CLK_TOP_AUD_I2S1_M, "aud_i2s1_m_ck", "apll12_ck_div1", 0x320, 1, 0 },
+ { CLK_TOP_AUD_I2S2_M, "aud_i2s2_m_ck", "apll12_ck_div2", 0x320, 2, 0 },
+ { CLK_TOP_AUD_I2S3_M, "aud_i2s3_m_ck", "apll12_ck_div3", 0x320, 3, 0 },
+ { CLK_TOP_AUD_TDMOUT_M, "aud_tdmout_m_ck", "apll12_ck_div4", 0x320, 4, 0 },
+ { CLK_TOP_AUD_TDMOUT_B, "aud_tdmout_b_ck", "apll12_ck_div4b", 0x320, 5, 0 },
+ { CLK_TOP_AUD_TDMIN_M, "aud_tdmin_m_ck", "apll12_ck_div5", 0x320, 6, 0 },
+ { CLK_TOP_AUD_TDMIN_B, "aud_tdmin_b_ck", "apll12_ck_div5b", 0x320, 7, 0 },
+ { CLK_TOP_AUD_SPDIF_M, "aud_spdif_m_ck", "apll12_ck_div6", 0x320, 8, 0 },
+};
+
+static const struct mtk_gate_regs ifr2_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs ifr3_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs ifr4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs ifr5_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs ifr6_cg_regs = {
+ .set_ofs = 0xd0,
+ .clr_ofs = 0xd4,
+ .sta_ofs = 0xd8,
+};
+
+#define GATE_IFR2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR4(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR6(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr6_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ifr_clks[] = {
+ /* IFR2 */
+ GATE_IFR2(CLK_IFR_PMIC_TMR, "ifr_pmic_tmr", "clk26m", 0),
+ GATE_IFR2(CLK_IFR_PMIC_AP, "ifr_pmic_ap", "clk26m", 1),
+ GATE_IFR2(CLK_IFR_PMIC_MD, "ifr_pmic_md", "clk26m", 2),
+ GATE_IFR2(CLK_IFR_PMIC_CONN, "ifr_pmic_conn", "clk26m", 3),
+ GATE_IFR2(CLK_IFR_ICUSB, "ifr_icusb", "axi_sel", 8),
+ GATE_IFR2(CLK_IFR_GCE, "ifr_gce", "axi_sel", 9),
+ GATE_IFR2(CLK_IFR_THERM, "ifr_therm", "axi_sel", 10),
+ GATE_IFR2(CLK_IFR_PWM_HCLK, "ifr_pwm_hclk", "axi_sel", 15),
+ GATE_IFR2(CLK_IFR_PWM1, "ifr_pwm1", "pwm_sel", 16),
+ GATE_IFR2(CLK_IFR_PWM2, "ifr_pwm2", "pwm_sel", 17),
+ GATE_IFR2(CLK_IFR_PWM3, "ifr_pwm3", "pwm_sel", 18),
+ GATE_IFR2(CLK_IFR_PWM4, "ifr_pwm4", "pwm_sel", 19),
+ GATE_IFR2(CLK_IFR_PWM5, "ifr_pwm5", "pwm_sel", 20),
+ GATE_IFR2(CLK_IFR_PWM, "ifr_pwm", "pwm_sel", 21),
+ GATE_IFR2(CLK_IFR_UART0, "ifr_uart0", "uart_sel", 22),
+ GATE_IFR2(CLK_IFR_UART1, "ifr_uart1", "uart_sel", 23),
+ GATE_IFR2(CLK_IFR_UART2, "ifr_uart2", "uart_sel", 24),
+ GATE_IFR2(CLK_IFR_DSP_UART, "ifr_dsp_uart", "uart_sel", 26),
+ GATE_IFR2(CLK_IFR_GCE_26M, "ifr_gce_26m", "clk26m", 27),
+ GATE_IFR2(CLK_IFR_CQ_DMA_FPC, "ifr_cq_dma_fpc", "axi_sel", 28),
+ GATE_IFR2(CLK_IFR_BTIF, "ifr_btif", "axi_sel", 31),
+ /* IFR3 */
+ GATE_IFR3(CLK_IFR_SPI0, "ifr_spi0", "spi_sel", 1),
+ GATE_IFR3(CLK_IFR_MSDC0_HCLK, "ifr_msdc0", "msdc50_0_hc_sel", 2),
+ GATE_IFR3(CLK_IFR_MSDC2_HCLK, "ifr_msdc2", "msdc2_2_hc_sel", 3),
+ GATE_IFR3(CLK_IFR_MSDC1_HCLK, "ifr_msdc1", "axi_sel", 4),
+ GATE_IFR3(CLK_IFR_DVFSRC, "ifr_dvfsrc", "clk26m", 7),
+ GATE_IFR3(CLK_IFR_GCPU, "ifr_gcpu", "axi_sel", 8),
+ GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_sel", 9),
+ GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "clk26m", 10),
+ GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "clk26m", 14),
+ GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_sel", 18),
+ GATE_IFR3(CLK_IFR_DEBUGSYS, "ifr_debugsys", "axi_sel", 24),
+ GATE_IFR3(CLK_IFR_AUDIO, "ifr_audio", "axi_sel", 25),
+ /* IFR4 */
+ GATE_IFR4(CLK_IFR_PWM_FBCLK6, "ifr_pwm_fbclk6", "pwm_sel", 0),
+ GATE_IFR4(CLK_IFR_DISP_PWM, "ifr_disp_pwm", "disp_pwm_sel", 2),
+ GATE_IFR4(CLK_IFR_AUD_26M_BK, "ifr_aud_26m_bk", "clk26m", 4),
+ GATE_IFR4(CLK_IFR_CQ_DMA, "ifr_cq_dma", "axi_sel", 27),
+ /* IFR5 */
+ GATE_IFR5(CLK_IFR_MSDC0_SF, "ifr_msdc0_sf", "msdc50_0_sel", 0),
+ GATE_IFR5(CLK_IFR_MSDC1_SF, "ifr_msdc1_sf", "msdc50_0_sel", 1),
+ GATE_IFR5(CLK_IFR_MSDC2_SF, "ifr_msdc2_sf", "msdc50_0_sel", 2),
+ GATE_IFR5(CLK_IFR_AP_MSDC0, "ifr_ap_msdc0", "msdc50_0_sel", 7),
+ GATE_IFR5(CLK_IFR_MD_MSDC0, "ifr_md_msdc0", "msdc50_0_sel", 8),
+ GATE_IFR5(CLK_IFR_MSDC0_SRC, "ifr_msdc0_src", "msdc50_0_sel", 9),
+ GATE_IFR5(CLK_IFR_MSDC1_SRC, "ifr_msdc1_src", "msdc30_1_sel", 10),
+ GATE_IFR5(CLK_IFR_MSDC2_SRC, "ifr_msdc2_src", "msdc50_2_sel", 11),
+ GATE_IFR5(CLK_IFR_PWRAP_TMR, "ifr_pwrap_tmr", "clk26m", 12),
+ GATE_IFR5(CLK_IFR_PWRAP_SPI, "ifr_pwrap_spi", "clk26m", 13),
+ GATE_IFR5(CLK_IFR_PWRAP_SYS, "ifr_pwrap_sys", "clk26m", 14),
+ GATE_IFR5(CLK_IFR_IRRX_26M, "ifr_irrx_26m", "clk26m", 22),
+ GATE_IFR5(CLK_IFR_IRRX_32K, "ifr_irrx_32k", "clk32k", 23),
+ GATE_IFR5(CLK_IFR_I2C0_AXI, "ifr_i2c0_axi", "i2c_sel", 24),
+ GATE_IFR5(CLK_IFR_I2C1_AXI, "ifr_i2c1_axi", "i2c_sel", 25),
+ GATE_IFR5(CLK_IFR_I2C2_AXI, "ifr_i2c2_axi", "i2c_sel", 26),
+ GATE_IFR5(CLK_IFR_I2C3_AXI, "ifr_i2c3_axi", "i2c_sel", 27),
+ GATE_IFR5(CLK_IFR_NIC_AXI, "ifr_nic_axi", "axi_sel", 28),
+ GATE_IFR5(CLK_IFR_NIC_SLV_AXI, "ifr_nic_slv_axi", "axi_sel", 29),
+ GATE_IFR5(CLK_IFR_APU_AXI, "ifr_apu_axi", "axi_sel", 30),
+ /* IFR6 */
+ GATE_IFR6(CLK_IFR_NFIECC, "ifr_nfiecc", "nfiecc_sel", 0),
+ GATE_IFR6(CLK_IFR_NFI1X_BK, "ifr_nfi1x_bk", "nfi2x_sel", 1),
+ GATE_IFR6(CLK_IFR_NFIECC_BK, "ifr_nfiecc_bk", "nfi2x_sel", 2),
+ GATE_IFR6(CLK_IFR_NFI_BK, "ifr_nfi_bk", "axi_sel", 3),
+ GATE_IFR6(CLK_IFR_MSDC2_AP_BK, "ifr_msdc2_ap_bk", "axi_sel", 4),
+ GATE_IFR6(CLK_IFR_MSDC2_MD_BK, "ifr_msdc2_md_bk", "axi_sel", 5),
+ GATE_IFR6(CLK_IFR_MSDC2_BK, "ifr_msdc2_bk", "axi_sel", 6),
+ GATE_IFR6(CLK_IFR_SUSB_133_BK, "ifr_susb_133_bk", "axi_sel", 7),
+ GATE_IFR6(CLK_IFR_SUSB_66_BK, "ifr_susb_66_bk", "axi_sel", 8),
+ GATE_IFR6(CLK_IFR_SSUSB_SYS, "ifr_ssusb_sys", "ssusb_sys_sel", 9),
+ GATE_IFR6(CLK_IFR_SSUSB_REF, "ifr_ssusb_ref", "ssusb_sys_sel", 10),
+ GATE_IFR6(CLK_IFR_SSUSB_XHCI, "ifr_ssusb_xhci", "ssusb_xhci_sel", 11),
+};
+
+static const struct mtk_simple_gate peri_clks[] = {
+ { CLK_PERIAXI, "periaxi", "axi_sel", 0x20c, 31, 0 },
+};
+
+#define MT8365_PLL_FMAX (3800UL * MHZ)
+#define MT8365_PLL_FMIN (1500UL * MHZ)
+#define CON0_MT8365_RST_BAR BIT(23)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table, \
+ _rst_bar_mask, _pcw_chg_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8365_PLL_FMAX, \
+ .fmin = MT8365_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = 8, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, _rst_bar_mask, _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, NULL, _rst_bar_mask, \
+ _pcw_chg_reg) \
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 182500000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table dsppll_div_table[] = {
+ { .div = 0, .freq = MT8365_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 600 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL, "armpll", 0x030C, 0x0318, 0x00000001, PLL_AO,
+ 22, 0x0310, 24, 0, 0, 0, 0x0310, 0, armpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0228, 0x0234, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x022C, 24, 0, 0, 0, 0x022C, 0,
+ CON0_MT8365_RST_BAR, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll2", 0x0208, 0x0214, 0xFF000001,
+ HAVE_RST_BAR, 22, 0x020C, 24, 0, 0, 0, 0x020C, 0,
+ CON0_MT8365_RST_BAR, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0218, 0x0224, 0x00000001, 0, 22,
+ 0x021C, 24, 0, 0, 0, 0x021C, 0, mfgpll_div_table, 0, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0350, 0x035C, 0x00000001, 0, 22,
+ 0x0354, 24, 0, 0, 0, 0x0354, 0, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0330, 0x033C, 0x00000001, 0, 22,
+ 0x0334, 24, 0, 0, 0, 0x0334, 0, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x031C, 0x032C, 0x00000001, 0, 32,
+ 0x0320, 24, 0x0040, 0x000C, 0, 0x0324, 0, 0, 0x0320),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0360, 0x0370, 0x00000001, 0, 32,
+ 0x0364, 24, 0x004C, 0x000C, 5, 0x0368, 0, 0, 0x0364),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0374, 0x0380, 0x00000001, 0, 22,
+ 0x0378, 24, 0, 0, 0, 0x0378, 0, 0, 0),
+ PLL_B(CLK_APMIXED_DSPPLL, "dsppll", 0x0390, 0x039C, 0x00000001, 0, 22,
+ 0x0394, 24, 0, 0, 0, 0x0394, 0, dsppll_div_table, 0, 0),
+ PLL(CLK_APMIXED_APUPLL, "apupll", 0x03A0, 0x03AC, 0x00000001, 0, 22,
+ 0x03A4, 24, 0, 0, 0, 0x03A4, 0, 0, 0),
+};
+
+static int clk_mt8365_apmixed_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct clk_hw *hw;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ hw = devm_clk_hw_register_gate(dev, "univ_en", "univpll2", 0,
+ base + 0x204, 0, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_UNIV_EN] = hw;
+
+ hw = devm_clk_hw_register_gate(dev, "usb20_en", "univ_en", 0,
+ base + 0x204, 1, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ clk_data->hws[CLK_APMIXED_USB20_EN] = hw;
+
+ ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_plls;
+
+ return 0;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+
+ return ret;
+}
+
+static int
+clk_mt8365_register_mtk_simple_gates(struct device *dev, void __iomem *base,
+ struct clk_hw_onecell_data *clk_data,
+ const struct mtk_simple_gate *gates,
+ unsigned int num_gates)
+{
+ unsigned int i;
+
+ for (i = 0; i != num_gates; ++i) {
+ const struct mtk_simple_gate *gate = &gates[i];
+ struct clk_hw *hw;
+
+ hw = devm_clk_hw_register_gate(dev, gate->name, gate->parent, 0,
+ base + gate->reg, gate->shift,
+ gate->gate_flags, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ clk_data->hws[gate->id] = hw;
+ }
+
+ return 0;
+}
+
+static int clk_mt8365_top_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+ int i;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_fixed_clks(top_fixed_clks,
+ ARRAY_SIZE(top_fixed_clks), clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+ if (ret)
+ goto unregister_fixed_clks;
+
+ ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_factors;
+
+ ret = mtk_clk_register_composites(top_misc_mux_gates,
+ ARRAY_SIZE(top_misc_mux_gates), base,
+ &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_muxes;
+
+ for (i = 0; i != ARRAY_SIZE(top_misc_muxes); ++i) {
+ struct mt8365_clk_audio_mux *mux = &top_misc_muxes[i];
+ struct clk_hw *hw;
+
+ hw = devm_clk_hw_register_mux(dev, mux->name, apll_i2s0_parents,
+ ARRAY_SIZE(apll_i2s0_parents),
+ CLK_SET_RATE_PARENT, base + 0x320,
+ mux->shift, 1, 0, NULL);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto unregister_composites;
+ }
+
+ clk_data->hws[mux->id] = hw;
+ }
+
+ ret = mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
+ top_clk_gates,
+ ARRAY_SIZE(top_clk_gates));
+ if (ret)
+ goto unregister_dividers;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_dividers;
+
+ return 0;
+unregister_dividers:
+ mtk_clk_unregister_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ clk_data);
+unregister_composites:
+ mtk_clk_unregister_composites(top_misc_mux_gates,
+ ARRAY_SIZE(top_misc_mux_gates), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_muxes, ARRAY_SIZE(top_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(top_fixed_clks,
+ ARRAY_SIZE(top_fixed_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_infra_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_gates;
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(ifr_clks, ARRAY_SIZE(ifr_clks), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_peri_probe(struct platform_device *pdev)
+{
+ void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_devm_alloc_clk_data(dev, CLK_PERI_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = clk_mt8365_register_mtk_simple_gates(dev, base, clk_data,
+ peri_clks,
+ ARRAY_SIZE(peri_clks));
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+
+ return ret;
+}
+
+static int clk_mt8365_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ ret = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
+ base, &mt8365_clk_lock, clk_data);
+ if (ret)
+ goto free_clk_data;
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ goto unregister_composites;
+
+ return 0;
+
+unregister_composites:
+ mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
+ clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return ret;
+}
+
+static const struct of_device_id of_match_clk_mt8365[] = {
+ {
+ .compatible = "mediatek,mt8365-apmixedsys",
+ .data = clk_mt8365_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt8365-topckgen",
+ .data = clk_mt8365_top_probe,
+ }, {
+ .compatible = "mediatek,mt8365-infracfg",
+ .data = clk_mt8365_infra_probe,
+ }, {
+ .compatible = "mediatek,mt8365-pericfg",
+ .data = clk_mt8365_peri_probe,
+ }, {
+ .compatible = "mediatek,mt8365-mcucfg",
+ .data = clk_mt8365_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt8365_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *pdev);
+ int ret;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ ret = clk_probe(pdev);
+ if (ret)
+ dev_err(&pdev->dev,
+ "%s: could not register clock provider: %d\n",
+ pdev->name, ret);
+
+ return ret;
+}
+
+static struct platform_driver clk_mt8365_drv = {
+ .probe = clk_mt8365_probe,
+ .driver = {
+ .name = "clk-mt8365",
+ .of_match_table = of_match_clk_mt8365,
+ },
+};
+
+static int __init clk_mt8365_init(void)
+{
+ return platform_driver_register(&clk_mt8365_drv);
+}
+arch_initcall(clk_mt8365_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 05a188c62119..d31f01d0ba1c 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -18,19 +18,42 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
+ unsigned int clk_num)
{
int i;
+
+ clk_data->num = clk_num;
+
+ for (i = 0; i < clk_num; i++)
+ clk_data->hws[i] = ERR_PTR(-ENOENT);
+}
+
+struct clk_hw_onecell_data *mtk_devm_alloc_clk_data(struct device *dev,
+ unsigned int clk_num)
+{
struct clk_hw_onecell_data *clk_data;
- clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL);
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, clk_num),
+ GFP_KERNEL);
if (!clk_data)
return NULL;
- clk_data->num = clk_num;
+ mtk_init_clk_data(clk_data, clk_num);
- for (i = 0; i < clk_num; i++)
- clk_data->hws[i] = ERR_PTR(-ENOENT);
+ return clk_data;
+}
+EXPORT_SYMBOL_GPL(mtk_devm_alloc_clk_data);
+
+struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num)
+{
+ struct clk_hw_onecell_data *clk_data;
+
+ clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL);
+ if (!clk_data)
+ return NULL;
+
+ mtk_init_clk_data(clk_data, clk_num);
return clk_data;
}
@@ -80,7 +103,7 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_hw_unregister_fixed_rate(clk_data->hws[rc->id]);
clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
@@ -102,7 +125,7 @@ void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num,
if (IS_ERR_OR_NULL(clk_data->hws[rc->id]))
continue;
- clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk);
+ clk_hw_unregister_fixed_rate(clk_data->hws[rc->id]);
clk_data->hws[rc->id] = ERR_PTR(-ENOENT);
}
}
@@ -146,7 +169,7 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_hw_unregister_fixed_factor(clk_data->hws[ff->id]);
clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
@@ -168,7 +191,7 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
if (IS_ERR_OR_NULL(clk_data->hws[ff->id]))
continue;
- clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk);
+ clk_hw_unregister_fixed_factor(clk_data->hws[ff->id]);
clk_data->hws[ff->id] = ERR_PTR(-ENOENT);
}
}
@@ -393,12 +416,13 @@ err:
if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- mtk_clk_unregister_composite(clk_data->hws[mcd->id]);
+ clk_hw_unregister_divider(clk_data->hws[mcd->id]);
clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
return PTR_ERR(hw);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_dividers);
void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
struct clk_hw_onecell_data *clk_data)
@@ -414,10 +438,11 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
if (IS_ERR_OR_NULL(clk_data->hws[mcd->id]))
continue;
- clk_unregister_divider(clk_data->hws[mcd->id]->clk);
+ clk_hw_unregister_divider(clk_data->hws[mcd->id]);
clk_data->hws[mcd->id] = ERR_PTR(-ENOENT);
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_unregister_dividers);
int mtk_clk_simple_probe(struct platform_device *pdev)
{
@@ -434,7 +459,8 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, mcd->clks, mcd->num_clks, clk_data);
+ r = mtk_clk_register_gates_with_dev(node, mcd->clks, mcd->num_clks,
+ clk_data, &pdev->dev);
if (r)
goto free_data;
@@ -459,6 +485,7 @@ free_data:
mtk_free_clk_data(clk_data);
return r;
}
+EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
int mtk_clk_simple_remove(struct platform_device *pdev)
{
@@ -472,5 +499,6 @@ int mtk_clk_simple_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 1b95c484d5aa..63ae7941aa92 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -184,10 +184,13 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num,
struct clk_hw_onecell_data *clk_data);
struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+struct clk_hw_onecell_data *mtk_devm_alloc_clk_data(struct device *dev,
+ unsigned int clk_num);
void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data);
struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
+void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw);
struct mtk_clk_desc {
const struct mtk_gate *clks;
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index cd5f9fd8cb98..4421e4859257 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -4,6 +4,7 @@
* Author: Owen Chen <owen.chen@mediatek.com>
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
@@ -259,4 +260,41 @@ void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes);
+/*
+ * This clock notifier is called when the frequency of the parent
+ * PLL clock is to be changed. The idea is to switch the parent to a
+ * stable clock, such as the main oscillator, while the PLL frequency
+ * stabilizes.
+ */
+static int mtk_clk_mux_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *_data)
+{
+ struct clk_notifier_data *data = _data;
+ struct clk_hw *hw = __clk_get_hw(data->clk);
+ struct mtk_mux_nb *mux_nb = to_mtk_mux_nb(nb);
+ int ret = 0;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ mux_nb->original_index = mux_nb->ops->get_parent(hw);
+ ret = mux_nb->ops->set_parent(hw, mux_nb->bypass_index);
+ break;
+ case POST_RATE_CHANGE:
+ case ABORT_RATE_CHANGE:
+ ret = mux_nb->ops->set_parent(hw, mux_nb->original_index);
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
+ struct mtk_mux_nb *mux_nb)
+{
+ mux_nb->nb.notifier_call = mtk_clk_mux_notifier_cb;
+
+ return devm_clk_notifier_register(dev, clk, &mux_nb->nb);
+}
+EXPORT_SYMBOL_GPL(devm_mtk_clk_mux_notifier_register);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 6539c58f5d7d..83ff420f4ebe 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -7,12 +7,14 @@
#ifndef __DRV_CLK_MTK_MUX_H
#define __DRV_CLK_MTK_MUX_H
+#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/types.h>
struct clk;
struct clk_hw_onecell_data;
struct clk_ops;
+struct device;
struct device_node;
struct mtk_mux {
@@ -89,4 +91,17 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num,
struct clk_hw_onecell_data *clk_data);
+struct mtk_mux_nb {
+ struct notifier_block nb;
+ const struct clk_ops *ops;
+
+ u8 bypass_index; /* Which parent to temporarily use */
+ u8 original_index; /* Set by notifier callback */
+};
+
+#define to_mtk_mux_nb(_nb) container_of(_nb, struct mtk_mux_nb, nb)
+
+int devm_mtk_clk_mux_notifier_register(struct device *dev, struct clk *clk,
+ struct mtk_mux_nb *mux_nb);
+
#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 179505549a7c..290ceda84ce4 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -228,5 +228,6 @@ int mtk_register_reset_controller_with_dev(struct device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_register_reset_controller_with_dev);
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 27cd2c1f3f61..434cd8f9de82 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -38,6 +38,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
struct meson_aoclk_reset_controller *rstc;
struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *regmap;
int ret, clkid;
@@ -49,7 +50,9 @@ int meson_aoclkc_probe(struct platform_device *pdev)
if (!rstc)
return -ENOMEM;
- regmap = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index 8d5a5dab955a..0e5e6b57eb20 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -18,6 +18,7 @@ int meson_eeclkc_probe(struct platform_device *pdev)
{
const struct meson_eeclkc_data *data;
struct device *dev = &pdev->dev;
+ struct device_node *np;
struct regmap *map;
int ret, i;
@@ -26,7 +27,9 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return -EINVAL;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 8f3b7a94a667..827e78fb16a8 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -3792,12 +3792,15 @@ static void __init meson8b_clkc_init_common(struct device_node *np,
struct clk_hw_onecell_data *clk_hw_onecell_data)
{
struct meson8b_clk_reset *rstc;
+ struct device_node *parent_np;
const char *notifier_clk_name;
struct clk *notifier_clk;
struct regmap *map;
int i, ret;
- map = syscon_node_to_regmap(of_get_parent(np));
+ parent_np = of_get_parent(np);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
pr_err("failed to get HHI regmap - Trying obsolete regs\n");
return;
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
index a5a99873c4f5..b46e864b3bd8 100644
--- a/drivers/clk/microchip/Kconfig
+++ b/drivers/clk/microchip/Kconfig
@@ -6,5 +6,6 @@ config COMMON_CLK_PIC32
config MCHP_CLK_MPFS
bool "Clk driver for PolarFire SoC"
depends on (RISCV && SOC_MICROCHIP_POLARFIRE) || COMPILE_TEST
+ select AUXILIARY_BUS
help
Supports Clock Configuration for PolarFire SoC
diff --git a/drivers/clk/microchip/Makefile b/drivers/clk/microchip/Makefile
index 5fa6dcf30a9a..13250e04e46c 100644
--- a/drivers/clk/microchip/Makefile
+++ b/drivers/clk/microchip/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_COMMON_CLK_PIC32) += clk-core.o
obj-$(CONFIG_PIC32MZDA) += clk-pic32mzda.o
obj-$(CONFIG_MCHP_CLK_MPFS) += clk-mpfs.o
+obj-$(CONFIG_MCHP_CLK_MPFS) += clk-mpfs-ccc.o
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
new file mode 100644
index 000000000000..7be028dced63
--- /dev/null
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ *
+ * Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries
+ */
+#include "asm-generic/errno-base.h"
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/microchip,mpfs-clock.h>
+
+/* address offset of control registers */
+#define MPFS_CCC_PLL_CR 0x04u
+#define MPFS_CCC_REF_CR 0x08u
+#define MPFS_CCC_SSCG_2_CR 0x2Cu
+#define MPFS_CCC_POSTDIV01_CR 0x10u
+#define MPFS_CCC_POSTDIV23_CR 0x14u
+
+#define MPFS_CCC_FBDIV_SHIFT 0x00u
+#define MPFS_CCC_FBDIV_WIDTH 0x0Cu
+#define MPFS_CCC_POSTDIV0_SHIFT 0x08u
+#define MPFS_CCC_POSTDIV1_SHIFT 0x18u
+#define MPFS_CCC_POSTDIV2_SHIFT MPFS_CCC_POSTDIV0_SHIFT
+#define MPFS_CCC_POSTDIV3_SHIFT MPFS_CCC_POSTDIV1_SHIFT
+#define MPFS_CCC_POSTDIV_WIDTH 0x06u
+#define MPFS_CCC_REFCLK_SEL BIT(6)
+#define MPFS_CCC_REFDIV_SHIFT 0x08u
+#define MPFS_CCC_REFDIV_WIDTH 0x06u
+
+#define MPFS_CCC_FIXED_DIV 4
+#define MPFS_CCC_OUTPUTS_PER_PLL 4
+#define MPFS_CCC_REFS_PER_PLL 2
+
+struct mpfs_ccc_data {
+ void __iomem **pll_base;
+ struct device *dev;
+ struct clk_hw_onecell_data hw_data;
+};
+
+struct mpfs_ccc_pll_hw_clock {
+ void __iomem *base;
+ const char *name;
+ const struct clk_parent_data *parents;
+ unsigned int id;
+ u32 reg_offset;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+#define to_mpfs_ccc_clk(_hw) container_of(_hw, struct mpfs_ccc_pll_hw_clock, hw)
+
+/*
+ * mpfs_ccc_lock prevents anything else from writing to a fabric ccc
+ * while a software locked register is being written.
+ */
+static DEFINE_SPINLOCK(mpfs_ccc_lock);
+
+static const struct clk_parent_data mpfs_ccc_pll0_refs[] = {
+ { .fw_name = "pll0_ref0" },
+ { .fw_name = "pll0_ref1" },
+};
+
+static const struct clk_parent_data mpfs_ccc_pll1_refs[] = {
+ { .fw_name = "pll1_ref0" },
+ { .fw_name = "pll1_ref1" },
+};
+
+static unsigned long mpfs_ccc_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_ccc_pll_hw_clock *ccc_hw = to_mpfs_ccc_clk(hw);
+ void __iomem *mult_addr = ccc_hw->base + ccc_hw->reg_offset;
+ void __iomem *ref_div_addr = ccc_hw->base + MPFS_CCC_REF_CR;
+ u32 mult, ref_div;
+
+ mult = readl_relaxed(mult_addr) >> MPFS_CCC_FBDIV_SHIFT;
+ mult &= clk_div_mask(MPFS_CCC_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MPFS_CCC_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MPFS_CCC_REFDIV_WIDTH);
+
+ return prate * mult / (ref_div * MPFS_CCC_FIXED_DIV);
+}
+
+static u8 mpfs_ccc_pll_get_parent(struct clk_hw *hw)
+{
+ struct mpfs_ccc_pll_hw_clock *ccc_hw = to_mpfs_ccc_clk(hw);
+ void __iomem *pll_cr_addr = ccc_hw->base + MPFS_CCC_PLL_CR;
+
+ return !!(readl_relaxed(pll_cr_addr) & MPFS_CCC_REFCLK_SEL);
+}
+
+static const struct clk_ops mpfs_ccc_pll_ops = {
+ .recalc_rate = mpfs_ccc_pll_recalc_rate,
+ .get_parent = mpfs_ccc_pll_get_parent,
+};
+
+#define CLK_CCC_PLL(_id, _parents, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .shift = _shift, \
+ .width = _width, \
+ .reg_offset = _offset, \
+ .flags = _flags, \
+ .parents = _parents, \
+}
+
+static struct mpfs_ccc_pll_hw_clock mpfs_ccc_pll_clks[] = {
+ CLK_CCC_PLL(CLK_CCC_PLL0, mpfs_ccc_pll0_refs, MPFS_CCC_FBDIV_SHIFT,
+ MPFS_CCC_FBDIV_WIDTH, 0, MPFS_CCC_SSCG_2_CR),
+ CLK_CCC_PLL(CLK_CCC_PLL1, mpfs_ccc_pll1_refs, MPFS_CCC_FBDIV_SHIFT,
+ MPFS_CCC_FBDIV_WIDTH, 0, MPFS_CCC_SSCG_2_CR),
+};
+
+struct mpfs_ccc_out_hw_clock {
+ struct clk_divider divider;
+ struct clk_init_data init;
+ unsigned int id;
+ u32 reg_offset;
+};
+
+#define CLK_CCC_OUT(_id, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .divider.shift = _shift, \
+ .divider.width = _width, \
+ .reg_offset = _offset, \
+ .divider.flags = _flags, \
+ .divider.lock = &mpfs_ccc_lock, \
+}
+
+static struct mpfs_ccc_out_hw_clock mpfs_ccc_pll0out_clks[] = {
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT0, MPFS_CCC_POSTDIV0_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT1, MPFS_CCC_POSTDIV1_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT2, MPFS_CCC_POSTDIV2_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL0_OUT3, MPFS_CCC_POSTDIV3_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+};
+
+static struct mpfs_ccc_out_hw_clock mpfs_ccc_pll1out_clks[] = {
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT0, MPFS_CCC_POSTDIV0_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT1, MPFS_CCC_POSTDIV1_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV01_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT2, MPFS_CCC_POSTDIV2_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+ CLK_CCC_OUT(CLK_CCC_PLL1_OUT3, MPFS_CCC_POSTDIV3_SHIFT, MPFS_CCC_POSTDIV_WIDTH,
+ CLK_DIVIDER_ONE_BASED, MPFS_CCC_POSTDIV23_CR),
+};
+
+static struct mpfs_ccc_out_hw_clock *mpfs_ccc_pllout_clks[] = {
+ mpfs_ccc_pll0out_clks, mpfs_ccc_pll1out_clks
+};
+
+static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_clock *out_hws,
+ unsigned int num_clks, struct mpfs_ccc_data *data,
+ struct mpfs_ccc_pll_hw_clock *parent)
+{
+ int ret;
+
+ for (unsigned int i = 0; i < num_clks; i++) {
+ struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
+ char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+
+ snprintf(name, 23, "%s_out%u", parent->name, i);
+ out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
+ out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
+ out_hw->reg_offset;
+
+ ret = devm_clk_hw_register(dev, &out_hw->divider.hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
+ out_hw->id);
+
+ data->hw_data.hws[out_hw->id] = &out_hw->divider.hw;
+ }
+
+ return 0;
+}
+
+#define CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(_name, _parents, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = _parents, \
+ .num_parents = MPFS_CCC_REFS_PER_PLL, \
+ .ops = _ops, \
+ })
+
+static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clock *pll_hws,
+ unsigned int num_clks, struct mpfs_ccc_data *data)
+{
+ int ret;
+
+ for (unsigned int i = 0; i < num_clks; i++) {
+ struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
+ char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
+
+ pll_hw->base = data->pll_base[i];
+ snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
+ pll_hw->name = (const char *)name;
+ pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
+ pll_hw->parents,
+ &mpfs_ccc_pll_ops, 0);
+
+ ret = devm_clk_hw_register(dev, &pll_hw->hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register ccc id: %d\n",
+ pll_hw->id);
+
+ data->hw_data.hws[pll_hw->id] = &pll_hw->hw;
+
+ ret = mpfs_ccc_register_outputs(dev, mpfs_ccc_pllout_clks[i],
+ MPFS_CCC_OUTPUTS_PER_PLL, data, pll_hw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpfs_ccc_probe(struct platform_device *pdev)
+{
+ struct mpfs_ccc_data *clk_data;
+ void __iomem *pll_base[ARRAY_SIZE(mpfs_ccc_pll_clks)];
+ unsigned int num_clks;
+ int ret;
+
+ num_clks = ARRAY_SIZE(mpfs_ccc_pll_clks) + ARRAY_SIZE(mpfs_ccc_pll0out_clks) +
+ ARRAY_SIZE(mpfs_ccc_pll1out_clks);
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hw_data.hws, num_clks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ pll_base[0] = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pll_base[0]))
+ return PTR_ERR(pll_base[0]);
+
+ pll_base[1] = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(pll_base[1]))
+ return PTR_ERR(pll_base[1]);
+
+ clk_data->pll_base = pll_base;
+ clk_data->hw_data.num = num_clks;
+ clk_data->dev = &pdev->dev;
+
+ ret = mpfs_ccc_register_plls(clk_data->dev, mpfs_ccc_pll_clks,
+ ARRAY_SIZE(mpfs_ccc_pll_clks), clk_data);
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(clk_data->dev, of_clk_hw_onecell_get,
+ &clk_data->hw_data);
+}
+
+static const struct of_device_id mpfs_ccc_of_match_table[] = {
+ { .compatible = "microchip,mpfs-ccc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpfs_ccc_of_match_table);
+
+static struct platform_driver mpfs_ccc_driver = {
+ .probe = mpfs_ccc_probe,
+ .driver = {
+ .name = "microchip-mpfs-ccc",
+ .of_match_table = mpfs_ccc_of_match_table,
+ },
+};
+
+static int __init clk_ccc_init(void)
+{
+ return platform_driver_register(&mpfs_ccc_driver);
+}
+core_initcall(clk_ccc_init);
+
+static void __exit clk_ccc_exit(void)
+{
+ platform_driver_unregister(&mpfs_ccc_driver);
+}
+module_exit(clk_ccc_exit);
+
+MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index 070c3b896559..4f0a19db7ed7 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -1,14 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Daire McNamara,<daire.mcnamara@microchip.com>
- * Copyright (C) 2020 Microchip Technology Inc. All rights reserved.
+ * PolarFire SoC MSS/core complex clock control
+ *
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
+#include <soc/microchip/mpfs.h>
/* address offset of control registers */
#define REG_MSSPLL_REF_CR 0x08u
@@ -28,6 +31,7 @@
#define MSSPLL_FIXED_DIV 4u
struct mpfs_clock_data {
+ struct device *dev;
void __iomem *base;
void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
@@ -46,37 +50,18 @@ struct mpfs_msspll_hw_clock {
#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw)
-struct mpfs_cfg_clock {
- const struct clk_div_table *table;
- unsigned int id;
- u32 reg_offset;
- u8 shift;
- u8 width;
- u8 flags;
-};
-
struct mpfs_cfg_hw_clock {
- struct mpfs_cfg_clock cfg;
- void __iomem *sys_base;
- struct clk_hw hw;
+ struct clk_divider cfg;
struct clk_init_data init;
-};
-
-#define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw)
-
-struct mpfs_periph_clock {
unsigned int id;
- u8 shift;
+ u32 reg_offset;
};
struct mpfs_periph_hw_clock {
- struct mpfs_periph_clock periph;
- void __iomem *sys_base;
- struct clk_hw hw;
+ struct clk_gate periph;
+ unsigned int id;
};
-#define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw)
-
/*
* mpfs_clk_lock prevents anything else from writing to the
* mpfs clk block while a software locked register is being written.
@@ -126,8 +111,62 @@ static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned lon
return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv);
}
+static long mpfs_clk_msspll_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ u32 mult, ref_div;
+ unsigned long rate_before_ctrl;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+
+ rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
+
+ return divider_round_rate(hw, rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
+ msspll_hw->flags);
+}
+
+static int mpfs_clk_msspll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
+ u32 mult, ref_div, postdiv;
+ int divider_setting;
+ unsigned long rate_before_ctrl, flags;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+
+ rate_before_ctrl = rate * (ref_div * MSSPLL_FIXED_DIV) / mult;
+ divider_setting = divider_get_val(rate_before_ctrl, prate, NULL, MSSPLL_POSTDIV_WIDTH,
+ msspll_hw->flags);
+
+ if (divider_setting < 0)
+ return divider_setting;
+
+ spin_lock_irqsave(&mpfs_clk_lock, flags);
+
+ postdiv = readl_relaxed(postdiv_addr);
+ postdiv &= ~(clk_div_mask(MSSPLL_POSTDIV_WIDTH) << MSSPLL_POSTDIV_SHIFT);
+ writel_relaxed(postdiv, postdiv_addr);
+
+ spin_unlock_irqrestore(&mpfs_clk_lock, flags);
+
+ return 0;
+}
+
static const struct clk_ops mpfs_clk_msspll_ops = {
.recalc_rate = mpfs_clk_msspll_recalc_rate,
+ .round_rate = mpfs_clk_msspll_round_rate,
+ .set_rate = mpfs_clk_msspll_set_rate,
};
#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \
@@ -144,25 +183,17 @@ static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = {
MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR),
};
-static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw,
- void __iomem *base)
-{
- msspll_hw->base = base;
-
- return devm_clk_hw_register(dev, &msspll_hw->hw);
-}
-
static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws,
unsigned int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *base = data->msspll_base;
unsigned int i;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i];
- ret = mpfs_clk_register_msspll(dev, msspll_hw, base);
+ msspll_hw->base = data->msspll_base;
+ ret = devm_clk_hw_register(dev, &msspll_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register msspll id: %d\n",
CLK_MSSPLL);
@@ -177,68 +208,22 @@ static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_c
* "CFG" clocks
*/
-static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
- void __iomem *base_addr = cfg_hw->sys_base;
- u32 val;
-
- val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift;
- val &= clk_div_mask(cfg->width);
-
- return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
-}
-
-static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
-
- return divider_round_rate(hw, rate, prate, cfg->table, cfg->width, 0);
-}
-
-static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
-{
- struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
- struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
- void __iomem *base_addr = cfg_hw->sys_base;
- unsigned long flags;
- u32 val;
- int divider_setting;
-
- divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
-
- if (divider_setting < 0)
- return divider_setting;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
- val = readl_relaxed(base_addr + cfg->reg_offset);
- val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift);
- val |= divider_setting << cfg->shift;
- writel_relaxed(val, base_addr + cfg->reg_offset);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-
- return 0;
-}
-
-static const struct clk_ops mpfs_clk_cfg_ops = {
- .recalc_rate = mpfs_cfg_clk_recalc_rate,
- .round_rate = mpfs_cfg_clk_round_rate,
- .set_rate = mpfs_cfg_clk_set_rate,
-};
-
#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
- .cfg.id = _id, \
+ .id = _id, \
.cfg.shift = _shift, \
.cfg.width = _width, \
.cfg.table = _table, \
- .cfg.reg_offset = _offset, \
+ .reg_offset = _offset, \
.cfg.flags = _flags, \
- .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
+ .cfg.hw.init = CLK_HW_INIT(_name, _parent, &clk_divider_ops, 0), \
+ .cfg.lock = &mpfs_clk_lock, \
}
+#define CLK_CPU_OFFSET 0u
+#define CLK_AXI_OFFSET 1u
+#define CLK_AHB_OFFSET 2u
+#define CLK_RTCREF_OFFSET 3u
+
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
REG_CLOCK_CONFIG_CR),
@@ -247,42 +232,34 @@ static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0,
REG_CLOCK_CONFIG_CR),
{
- .cfg.id = CLK_RTCREF,
+ .id = CLK_RTCREF,
.cfg.shift = 0,
.cfg.width = 12,
.cfg.table = mpfs_div_rtcref_table,
- .cfg.reg_offset = REG_RTC_CLOCK_CR,
+ .reg_offset = REG_RTC_CLOCK_CR,
.cfg.flags = CLK_DIVIDER_ONE_BASED,
- .hw.init =
- CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
+ .cfg.hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &clk_divider_ops, 0),
}
};
-static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw,
- void __iomem *sys_base)
-{
- cfg_hw->sys_base = sys_base;
-
- return devm_clk_hw_register(dev, &cfg_hw->hw);
-}
-
static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hws,
unsigned int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *sys_base = data->base;
unsigned int i, id;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i];
- ret = mpfs_clk_register_cfg(dev, cfg_hw, sys_base);
+ cfg_hw->cfg.reg = data->base + cfg_hw->reg_offset;
+ ret = devm_clk_hw_register(dev, &cfg_hw->cfg.hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
- cfg_hw->cfg.id);
+ cfg_hw->id);
- id = cfg_hw->cfg.id;
- data->hw_data.hws[id] = &cfg_hw->hw;
+ id = cfg_hw->id;
+ data->hw_data.hws[id] = &cfg_hw->cfg.hw;
}
return 0;
@@ -292,77 +269,15 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
* peripheral clocks - devices connected to axi or ahb buses.
*/
-static int mpfs_periph_clk_enable(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg, val;
- unsigned long flags;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- val = reg & ~(1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- val = reg | (1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-
- return 0;
-}
-
-static void mpfs_periph_clk_disable(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg, val;
- unsigned long flags;
-
- spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- val = reg & ~(1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
-
- spin_unlock_irqrestore(&mpfs_clk_lock, flags);
-}
-
-static int mpfs_periph_clk_is_enabled(struct clk_hw *hw)
-{
- struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
- struct mpfs_periph_clock *periph = &periph_hw->periph;
- void __iomem *base_addr = periph_hw->sys_base;
- u32 reg;
-
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- if ((reg & (1u << periph->shift)) == 0u) {
- reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
- if (reg & (1u << periph->shift))
- return 1;
- }
-
- return 0;
-}
-
-static const struct clk_ops mpfs_periph_clk_ops = {
- .enable = mpfs_periph_clk_enable,
- .disable = mpfs_periph_clk_disable,
- .is_enabled = mpfs_periph_clk_is_enabled,
-};
-
#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
- .periph.id = _id, \
- .periph.shift = _shift, \
- .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops, \
+ .id = _id, \
+ .periph.bit_idx = _shift, \
+ .periph.hw.init = CLK_HW_INIT_HW(_name, _parent, &clk_gate_ops, \
_flags), \
+ .periph.lock = &mpfs_clk_lock, \
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT].hw)
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].cfg.hw)
/*
* Critical clocks:
@@ -370,6 +285,8 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
+ * - CLK_RTC: the onboard RTC's AHB bus clock must be kept running as the rtc will stop
+ * if the AHB interface clock is disabled
* - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
* clock domain crossers which provide the interface to the FPGA fabric. Disabling them
* causes the FPGA fabric to go into reset.
@@ -394,7 +311,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CAN0, "clk_periph_can0", PARENT_CLK(AHB), 14, 0),
CLK_PERIPH(CLK_CAN1, "clk_periph_can1", PARENT_CLK(AHB), 15, 0),
CLK_PERIPH(CLK_USB, "clk_periph_usb", PARENT_CLK(AHB), 16, 0),
- CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, 0),
+ CLK_PERIPH(CLK_RTC, "clk_periph_rtc", PARENT_CLK(AHB), 18, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_QSPI, "clk_periph_qspi", PARENT_CLK(AHB), 19, 0),
CLK_PERIPH(CLK_GPIO0, "clk_periph_gpio0", PARENT_CLK(AHB), 20, 0),
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
@@ -408,36 +325,116 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0),
};
-static int mpfs_clk_register_periph(struct device *dev, struct mpfs_periph_hw_clock *periph_hw,
- void __iomem *sys_base)
-{
- periph_hw->sys_base = sys_base;
-
- return devm_clk_hw_register(dev, &periph_hw->hw);
-}
-
static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_clock *periph_hws,
int num_clks, struct mpfs_clock_data *data)
{
- void __iomem *sys_base = data->base;
unsigned int i, id;
int ret;
for (i = 0; i < num_clks; i++) {
struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i];
- ret = mpfs_clk_register_periph(dev, periph_hw, sys_base);
+ periph_hw->periph.reg = data->base + REG_SUBBLK_CLOCK_CR;
+ ret = devm_clk_hw_register(dev, &periph_hw->periph.hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
- periph_hw->periph.id);
+ periph_hw->id);
- id = periph_hws[i].periph.id;
- data->hw_data.hws[id] = &periph_hw->hw;
+ id = periph_hws[i].id;
+ data->hw_data.hws[id] = &periph_hw->periph.hw;
}
return 0;
}
+/*
+ * Peripheral clock resets
+ */
+
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
+
+u32 mpfs_reset_read(struct device *dev)
+{
+ struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
+
+ return readl_relaxed(clock_data->base + REG_SUBBLK_RESET_CR);
+}
+EXPORT_SYMBOL_NS_GPL(mpfs_reset_read, MCHP_CLK_MPFS);
+
+void mpfs_reset_write(struct device *dev, u32 val)
+{
+ struct mpfs_clock_data *clock_data = dev_get_drvdata(dev->parent);
+
+ writel_relaxed(val, clock_data->base + REG_SUBBLK_RESET_CR);
+}
+EXPORT_SYMBOL_NS_GPL(mpfs_reset_write, MCHP_CLK_MPFS);
+
+static void mpfs_reset_unregister_adev(void *_adev)
+{
+ struct auxiliary_device *adev = _adev;
+
+ auxiliary_device_delete(adev);
+}
+
+static void mpfs_reset_adev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ auxiliary_device_uninit(adev);
+
+ kfree(adev);
+}
+
+static struct auxiliary_device *mpfs_reset_adev_alloc(struct mpfs_clock_data *clk_data)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ adev->name = "reset-mpfs";
+ adev->dev.parent = clk_data->dev;
+ adev->dev.release = mpfs_reset_adev_release;
+ adev->id = 666u;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ERR_PTR(ret);
+ }
+
+ return adev;
+}
+
+static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = mpfs_reset_adev_alloc(clk_data);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(clk_data->dev, mpfs_reset_unregister_adev, adev);
+}
+
+#else /* !CONFIG_RESET_CONTROLLER */
+
+static int mpfs_reset_controller_register(struct mpfs_clock_data *clk_data)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_RESET_CONTROLLER */
+
static int mpfs_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -462,6 +459,8 @@ static int mpfs_clk_probe(struct platform_device *pdev)
return PTR_ERR(clk_data->msspll_base);
clk_data->hw_data.num = num_clks;
+ clk_data->dev = dev;
+ dev_set_drvdata(dev, clk_data);
ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks),
clk_data);
@@ -481,14 +480,14 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- return ret;
+ return mpfs_reset_controller_register(clk_data);
}
static const struct of_device_id mpfs_clk_of_match_table[] = {
{ .compatible = "microchip,mpfs-clkcfg", },
{}
};
-MODULE_DEVICE_TABLE(of, mpfs_clk_match_table);
+MODULE_DEVICE_TABLE(of, mpfs_clk_of_match_table);
static struct platform_driver mpfs_clk_driver = {
.probe = mpfs_clk_probe,
@@ -511,4 +510,7 @@ static void __exit clk_mpfs_exit(void)
module_exit(clk_mpfs_exit);
MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Driver");
-MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Padmarao Begari <padmarao.begari@microchip.com>");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 48dfb18b490e..130d1a723879 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -19,9 +19,6 @@
#include "clk.h"
#include "reset.h"
-#define APBC_RTC 0x28
-#define APBC_TWSI0 0x2c
-#define APBC_KPC 0x30
#define APBC_UART0 0x0
#define APBC_UART1 0x4
#define APBC_GPIO 0x8
@@ -29,20 +26,40 @@
#define APBC_PWM1 0x10
#define APBC_PWM2 0x14
#define APBC_PWM3 0x18
+#define APBC_RTC 0x28
+#define APBC_TWSI0 0x2c
+#define APBC_KPC 0x30
#define APBC_TIMER 0x34
+#define APBC_AIB 0x3c
+#define APBC_SW_JTAG 0x40
+#define APBC_ONEWIRE 0x48
+#define APBC_TWSI1 0x6c
+#define APBC_UART2 0x70
+#define APBC_AC97 0x84
#define APBC_SSP0 0x81c
#define APBC_SSP1 0x820
#define APBC_SSP2 0x84c
#define APBC_SSP3 0x858
#define APBC_SSP4 0x85c
-#define APBC_TWSI1 0x6c
-#define APBC_UART2 0x70
+#define APMU_DISP0 0x4c
+#define APMU_CCIC0 0x50
#define APMU_SDH0 0x54
#define APMU_SDH1 0x58
#define APMU_USB 0x5c
-#define APMU_DISP0 0x4c
-#define APMU_CCIC0 0x50
#define APMU_DFC 0x60
+#define APMU_DMA 0x64
+#define APMU_BUS 0x6c
+#define APMU_GC 0xcc
+#define APMU_SMC 0xd4
+#define APMU_XD 0xdc
+#define APMU_SDH2 0xe0
+#define APMU_SDH3 0xe4
+#define APMU_CF 0xf0
+#define APMU_MSP 0xf4
+#define APMU_CMU 0xf8
+#define APMU_FE 0xfc
+#define APMU_PCIE 0x100
+#define APMU_EPD 0x104
#define MPMU_UART_PLL 0x14
struct pxa168_clk_unit {
@@ -71,9 +88,12 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
{PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
{PXA168_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0},
{PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
- {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
- {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+ {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 1, 5, 0},
+ {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 1, 5, 0},
{PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+ {PXA168_CLK_PLL1_2_1_10, "pll1_2_1_10", "pll1_2", 1, 10, 0},
+ {PXA168_CLK_PLL1_2_3_16, "pll1_2_3_16", "pll1_2", 3, 16, 0},
+ {PXA168_CLK_CLK32_2, "clk32_2", "clk32", 1, 2, 0},
};
static struct mmp_clk_factor_masks uart_factor_masks = {
@@ -107,24 +127,44 @@ static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
}
+static DEFINE_SPINLOCK(twsi0_lock);
+static DEFINE_SPINLOCK(twsi1_lock);
+static const char * const twsi_parent_names[] = {"pll1_2_1_10", "pll1_2_1_5"};
+
+static DEFINE_SPINLOCK(kpc_lock);
+static const char * const kpc_parent_names[] = {"clk32", "clk32_2", "pll1_24"};
+
+static DEFINE_SPINLOCK(pwm0_lock);
+static DEFINE_SPINLOCK(pwm1_lock);
+static DEFINE_SPINLOCK(pwm2_lock);
+static DEFINE_SPINLOCK(pwm3_lock);
+static const char * const pwm_parent_names[] = {"pll1_48", "clk32"};
+
static DEFINE_SPINLOCK(uart0_lock);
static DEFINE_SPINLOCK(uart1_lock);
static DEFINE_SPINLOCK(uart2_lock);
-static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+static const char * const uart_parent_names[] = {"pll1_2_3_16", "uart_pll"};
static DEFINE_SPINLOCK(ssp0_lock);
static DEFINE_SPINLOCK(ssp1_lock);
static DEFINE_SPINLOCK(ssp2_lock);
static DEFINE_SPINLOCK(ssp3_lock);
static DEFINE_SPINLOCK(ssp4_lock);
-static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+static const char * const ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
static DEFINE_SPINLOCK(timer_lock);
-static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"};
+static const char * const timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"};
static DEFINE_SPINLOCK(reset_lock);
static struct mmp_param_mux_clk apbc_mux_clks[] = {
+ {0, "twsi0_mux", twsi_parent_names, ARRAY_SIZE(twsi_parent_names), CLK_SET_RATE_PARENT, APBC_TWSI0, 4, 3, 0, &twsi0_lock},
+ {0, "twsi1_mux", twsi_parent_names, ARRAY_SIZE(twsi_parent_names), CLK_SET_RATE_PARENT, APBC_TWSI1, 4, 3, 0, &twsi1_lock},
+ {0, "kpc_mux", kpc_parent_names, ARRAY_SIZE(kpc_parent_names), CLK_SET_RATE_PARENT, APBC_KPC, 4, 3, 0, &kpc_lock},
+ {0, "pwm0_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM0, 4, 3, 0, &pwm0_lock},
+ {0, "pwm1_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM1, 4, 3, 0, &pwm1_lock},
+ {0, "pwm2_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM2, 4, 3, 0, &pwm2_lock},
+ {0, "pwm3_mux", pwm_parent_names, ARRAY_SIZE(pwm_parent_names), CLK_SET_RATE_PARENT, APBC_PWM3, 4, 3, 0, &pwm3_lock},
{0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
{0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
{0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
@@ -137,16 +177,15 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = {
};
static struct mmp_param_gate_clk apbc_gate_clks[] = {
- {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+ {PXA168_CLK_TWSI0, "twsi0_clk", "twsi0_mux", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &twsi0_lock},
+ {PXA168_CLK_TWSI1, "twsi1_clk", "twsi1_mux", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &twsi1_lock},
+ {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x1, 0x1, 0x0, 0, &reset_lock},
+ {PXA168_CLK_KPC, "kpc_clk", "kpc_mux", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &kpc_lock},
{PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
- {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
- {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
- /* The gate clocks has mux parent. */
+ {PXA168_CLK_PWM0, "pwm0_clk", "pwm0_mux", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &pwm0_lock},
+ {PXA168_CLK_PWM1, "pwm1_clk", "pwm1_mux", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &pwm1_lock},
+ {PXA168_CLK_PWM2, "pwm2_clk", "pwm2_mux", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &pwm2_lock},
+ {PXA168_CLK_PWM3, "pwm3_clk", "pwm3_mux", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &pwm3_lock},
{PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
{PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
{PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
@@ -170,22 +209,30 @@ static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
}
+static DEFINE_SPINLOCK(dfc_lock);
+static const char * const dfc_parent_names[] = {"pll1_4", "pll1_8"};
+
static DEFINE_SPINLOCK(sdh0_lock);
static DEFINE_SPINLOCK(sdh1_lock);
-static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+static DEFINE_SPINLOCK(sdh2_lock);
+static DEFINE_SPINLOCK(sdh3_lock);
+static const char * const sdh_parent_names[] = {"pll1_13", "pll1_12", "pll1_8"};
static DEFINE_SPINLOCK(usb_lock);
static DEFINE_SPINLOCK(disp0_lock);
-static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+static const char * const disp_parent_names[] = {"pll1", "pll1_2"};
static DEFINE_SPINLOCK(ccic0_lock);
-static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
-static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+static const char * const ccic_parent_names[] = {"pll1_4", "pll1_8"};
+static const char * const ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
static struct mmp_param_mux_clk apmu_mux_clks[] = {
- {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
- {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+ {0, "dfc_mux", dfc_parent_names, ARRAY_SIZE(dfc_parent_names), CLK_SET_RATE_PARENT, APMU_DFC, 6, 1, 0, &dfc_lock},
+ {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 2, 0, &sdh0_lock},
+ {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 2, 0, &sdh1_lock},
+ {0, "sdh2_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH2, 6, 2, 0, &sdh2_lock},
+ {0, "sdh3_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH3, 6, 2, 0, &sdh3_lock},
{0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
{0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
{0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
@@ -196,12 +243,16 @@ static struct mmp_param_div_clk apmu_div_clks[] = {
};
static struct mmp_param_gate_clk apmu_gate_clks[] = {
- {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+ {PXA168_CLK_DFC, "dfc_clk", "dfc_mux", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, &dfc_lock},
{PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
{PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
- /* The gate clocks has mux parent. */
- {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
- {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+ {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x12, 0x12, 0x0, 0, &sdh0_lock},
+ {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x12, 0x12, 0x0, 0, &sdh1_lock},
+ {PXA168_CLK_SDH2, "sdh2_clk", "sdh2_mux", CLK_SET_RATE_PARENT, APMU_SDH2, 0x12, 0x12, 0x0, 0, &sdh2_lock},
+ {PXA168_CLK_SDH3, "sdh3_clk", "sdh3_mux", CLK_SET_RATE_PARENT, APMU_SDH3, 0x12, 0x12, 0x0, 0, &sdh3_lock},
+ /* SDH0/1 and 2/3 AXI clocks are also gated by common bits in SDH0 and SDH2 registers */
+ {PXA168_CLK_SDH01_AXI, "sdh01_axi_clk", NULL, CLK_SET_RATE_PARENT, APMU_SDH0, 0x9, 0x9, 0x0, 0, &sdh0_lock},
+ {PXA168_CLK_SDH23_AXI, "sdh23_axi_clk", NULL, CLK_SET_RATE_PARENT, APMU_SDH2, 0x9, 0x9, 0x0, 0, &sdh2_lock},
{PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
{PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
{PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index 585a02e0b330..fc403ad735ad 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -87,7 +87,7 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
struct resource *res;
struct clk *parent;
void __iomem *reg;
- int i, ret;
+ int i;
hw_tbg_data = devm_kzalloc(&pdev->dev,
struct_size(hw_tbg_data, hws, NUM_TBG),
@@ -123,9 +123,7 @@ static int armada_3700_tbg_clock_probe(struct platform_device *pdev)
dev_err(dev, "Can't register TBG clock %s\n", name);
}
- ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
-
- return ret;
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, hw_tbg_data);
}
static int armada_3700_tbg_clock_remove(struct platform_device *pdev)
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 7e35c891e168..0a90452ee808 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -170,7 +170,7 @@ static struct clk *clk_register_dove_divider(struct device *dev,
.num_parents = num_parents,
};
- strlcpy(name, dc->name, sizeof(name));
+ strscpy(name, dc->name, sizeof(name));
dc->hw.init = &init;
dc->base = base;
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index f253ef1996b1..69ebf65081b8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -606,7 +606,7 @@ static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
if (IS_ERR(clk))
pr_warn("%s: failed to register irc clk\n", __func__);
- /* Register crystal oscillator controlller */
+ /* Register crystal oscillator controller */
parents[0] = of_clk_get_parent_name(np, 0);
clk = clk_register_gate(NULL, clk_src_names[CLK_SRC_OSC], parents[0],
0, base + LPC18XX_CGU_XTAL_OSC_CTRL,
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
index f9c31e3a0e47..2f4ffbd98282 100644
--- a/drivers/clk/pistachio/clk.h
+++ b/drivers/clk/pistachio/clk.h
@@ -31,10 +31,10 @@ struct pistachio_mux {
unsigned int shift;
unsigned int num_parents;
const char *name;
- const char **parents;
+ const char *const *parents;
};
-#define PNAME(x) static const char *x[] __initconst
+#define PNAME(x) static const char *const x[] __initconst
#define MUX(_id, _name, _pnames, _reg, _shift) \
{ \
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 03de634efc52..374098ebbf2b 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -104,6 +104,8 @@ int __init clk_pxa_cken_init(const struct desc_clk_cken *clks,
for (i = 0; i < nb_clks; i++) {
pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
+ if (!pxa_clk)
+ return -ENOMEM;
pxa_clk->is_in_low_power = clks[i].is_in_low_power;
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1cf1ef70e347..76e6dee450d5 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -180,6 +180,14 @@ config MSM_GCC_8660
Say Y if you want to use peripheral devices such as UART, SPI,
i2c, USB, SD/eMMC, etc.
+config MSM_GCC_8909
+ tristate "MSM8909 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8909 devices.
+ Say Y if you want to use devices such as UART, SPI, I2C, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
select QCOM_GDSC
@@ -445,6 +453,14 @@ config SC_GPUCC_7280
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_GPUCC_8280XP
+ tristate "SC8280XP Graphics Clock Controller"
+ select SC_GCC_8280XP
+ help
+ Support for the graphics clock controller on SC8280XP devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SC_LPASSCC_7280
tristate "SC7280 Low Power Audio Subsystem (LPASS) Clock Controller"
select SC_GCC_7280
@@ -545,10 +561,10 @@ config QCS_Q6SSTOP_404
controller to reset the Q6SSTOP subsystem.
config SDM_GCC_845
- tristate "SDM845 Global Clock Controller"
+ tristate "SDM845/SDM670 Global Clock Controller"
select QCOM_GDSC
help
- Support for the global clock controller on SDM845 devices.
+ Support for the global clock controller on SDM845 and SDM670 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
@@ -616,6 +632,15 @@ config SM_CAMCC_8450
Support for the camera clock controller on SM8450 devices.
Say Y if you want to support camera devices and camera functionality.
+config SM_DISPCC_6115
+ tristate "SM6115 Display Clock Controller"
+ depends on SM_GCC_6115
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM6115/SM4250 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen
+
config SM_DISPCC_6125
tristate "SM6125 Display Clock Controller"
depends on SM_GCC_6125
@@ -643,8 +668,18 @@ config SM_DISPCC_6350
Say Y if you want to support display devices and functionality such as
splash screen.
+config SM_DISPCC_8450
+ tristate "SM8450 Display Clock Controller"
+ depends on SM_GCC_8450
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc
+ SM8450 devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
config SM_GCC_6115
tristate "SM6115 and SM4250 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on SM6115 and SM4250 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -665,6 +700,14 @@ config SM_GCC_6350
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_6375
+ tristate "SM6375 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM6375 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS etc.
+
config SM_GCC_8150
tristate "SM8150 Global Clock Controller"
help
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index fbcf04073f07..e6cecf9e0436 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
+obj-$(CONFIG_MSM_GCC_8909) += gcc-msm8909.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GCC_8280XP) += gcc-sc8280xp.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
+obj-$(CONFIG_SC_GPUCC_8280XP) += gpucc-sc8280xp.o
obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o
@@ -90,12 +92,15 @@ obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
obj-$(CONFIG_SM_CAMCC_8450) += camcc-sm8450.o
+obj-$(CONFIG_SM_DISPCC_6115) += dispcc-sm6115.o
obj-$(CONFIG_SM_DISPCC_6125) += dispcc-sm6125.o
obj-$(CONFIG_SM_DISPCC_6350) += dispcc-sm6350.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
+obj-$(CONFIG_SM_DISPCC_8450) += dispcc-sm8450.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
+obj-$(CONFIG_SM_GCC_6375) += gcc-sm6375.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index 329d2c5356d8..f9c5e296dba2 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -127,7 +127,9 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
if (!init.name)
return -ENOMEM;
- init.parent_names = (const char *[]){ "xo" };
+ init.parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ };
init.num_parents = 1;
init.ops = &clk_pll_sr2_ops;
pll->clkr.hw.init = &init;
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index bef7899ad0d6..a5aea27eb867 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -2,6 +2,7 @@
// Copyright (c) 2018, The Linux Foundation. All rights reserved.
#include <linux/clk-provider.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -36,12 +37,28 @@ static struct clk_alpha_pll ipq_pll = {
},
};
-static const struct alpha_pll_config ipq_pll_config = {
+static const struct alpha_pll_config ipq6018_pll_config = {
.l = 0x37,
- .config_ctl_val = 0x04141200,
- .config_ctl_hi_val = 0x0,
+ .config_ctl_val = 0x240d4828,
+ .config_ctl_hi_val = 0x6,
.early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .aux_output_mask = BIT(1),
.main_output_mask = BIT(0),
+ .test_ctl_val = 0x1c0000C0,
+ .test_ctl_hi_val = 0x4000,
+};
+
+static const struct alpha_pll_config ipq8074_pll_config = {
+ .l = 0x48,
+ .config_ctl_val = 0x200d4828,
+ .config_ctl_hi_val = 0x6,
+ .early_output_mask = BIT(3),
+ .aux2_output_mask = BIT(2),
+ .aux_output_mask = BIT(1),
+ .main_output_mask = BIT(0),
+ .test_ctl_val = 0x1c000000,
+ .test_ctl_hi_val = 0x4000,
};
static const struct regmap_config ipq_pll_regmap_config = {
@@ -54,6 +71,7 @@ static const struct regmap_config ipq_pll_regmap_config = {
static int apss_ipq_pll_probe(struct platform_device *pdev)
{
+ const struct alpha_pll_config *ipq_pll_config;
struct device *dev = &pdev->dev;
struct regmap *regmap;
void __iomem *base;
@@ -67,7 +85,11 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- clk_alpha_pll_configure(&ipq_pll, regmap, &ipq_pll_config);
+ ipq_pll_config = of_device_get_match_data(&pdev->dev);
+ if (!ipq_pll_config)
+ return -ENODEV;
+
+ clk_alpha_pll_configure(&ipq_pll, regmap, ipq_pll_config);
ret = devm_clk_register_regmap(dev, &ipq_pll.clkr);
if (ret)
@@ -78,7 +100,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
}
static const struct of_device_id apss_ipq_pll_match_table[] = {
- { .compatible = "qcom,ipq6018-a53pll" },
+ { .compatible = "qcom,ipq6018-a53pll", .data = &ipq6018_pll_config },
+ { .compatible = "qcom,ipq8074-a53pll", .data = &ipq8074_pll_config },
{ }
};
MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c
index d78ff2f310bf..f2f502e2d5a4 100644
--- a/drivers/clk/qcom/apss-ipq6018.c
+++ b/drivers/clk/qcom/apss-ipq6018.c
@@ -16,7 +16,7 @@
#include "clk-regmap.h"
#include "clk-branch.h"
#include "clk-alpha-pll.h"
-#include "clk-regmap-mux.h"
+#include "clk-rcg.h"
enum {
P_XO,
@@ -33,16 +33,15 @@ static const struct parent_map parents_apcs_alias0_clk_src_map[] = {
{ P_APSS_PLL_EARLY, 5 },
};
-static struct clk_regmap_mux apcs_alias0_clk_src = {
- .reg = 0x0050,
- .width = 3,
- .shift = 7,
+static struct clk_rcg2 apcs_alias0_clk_src = {
+ .cmd_rcgr = 0x0050,
+ .hid_width = 5,
.parent_map = parents_apcs_alias0_clk_src_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "apcs_alias0_clk_src",
.parent_data = parents_apcs_alias0_clk_src,
- .num_parents = 2,
- .ops = &clk_regmap_mux_closest_ops,
+ .num_parents = ARRAY_SIZE(parents_apcs_alias0_clk_src),
+ .ops = &clk_rcg2_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -57,7 +56,7 @@ static struct clk_branch apcs_alias0_core_clk = {
.parent_hws = (const struct clk_hw *[]){
&apcs_alias0_clk_src.clkr.hw },
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index b42684703fbb..1973d79c9465 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -27,6 +27,7 @@
# define PLL_VOTE_FSM_RESET BIT(21)
# define PLL_UPDATE BIT(22)
# define PLL_UPDATE_BYPASS BIT(23)
+# define PLL_FSM_LEGACY_MODE BIT(24)
# define PLL_OFFLINE_ACK BIT(28)
# define ALPHA_PLL_ACK_LATCH BIT(29)
# define PLL_ACTIVE_FLAG BIT(30)
@@ -166,6 +167,27 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -1102,6 +1124,10 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
}
+ if (pll->flags & SUPPORTS_FSM_LEGACY_MODE)
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_FSM_LEGACY_MODE,
+ PLL_FSM_LEGACY_MODE);
+
regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
PLL_UPDATE_BYPASS);
@@ -2088,7 +2114,7 @@ static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
return ret;
}
-static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+static void _alpha_pll_lucid_evo_disable(struct clk_hw *hw, bool reset)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
struct regmap *regmap = pll->clkr.regmap;
@@ -2117,9 +2143,12 @@ static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
/* Place the PLL mode in STANDBY */
regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+
+ if (reset)
+ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, 0);
}
-static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+static int _alpha_pll_lucid_evo_prepare(struct clk_hw *hw, bool reset)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
struct clk_hw *p;
@@ -2139,11 +2168,31 @@ static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
if (ret)
return ret;
- alpha_pll_lucid_evo_disable(hw);
+ _alpha_pll_lucid_evo_disable(hw, reset);
return 0;
}
+static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+{
+ _alpha_pll_lucid_evo_disable(hw, false);
+}
+
+static int alpha_pll_lucid_evo_prepare(struct clk_hw *hw)
+{
+ return _alpha_pll_lucid_evo_prepare(hw, false);
+}
+
+static void alpha_pll_reset_lucid_evo_disable(struct clk_hw *hw)
+{
+ _alpha_pll_lucid_evo_disable(hw, true);
+}
+
+static int alpha_pll_reset_lucid_evo_prepare(struct clk_hw *hw)
+{
+ return _alpha_pll_lucid_evo_prepare(hw, true);
+}
+
static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -2191,6 +2240,17 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
+const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
+ .prepare = alpha_pll_reset_lucid_evo_prepare,
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_reset_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = alpha_pll_lucid_5lpe_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
+
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 447efb82fe59..f9524b3fce6b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -19,6 +19,8 @@ enum {
CLK_ALPHA_PLL_TYPE_ZONDA,
CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
+ CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
+ CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -70,9 +72,10 @@ struct clk_alpha_pll {
const struct pll_vco *vco_table;
size_t num_vco;
-#define SUPPORTS_OFFLINE_REQ BIT(0)
-#define SUPPORTS_FSM_MODE BIT(2)
+#define SUPPORTS_OFFLINE_REQ BIT(0)
+#define SUPPORTS_FSM_MODE BIT(2)
#define SUPPORTS_DYNAMIC_UPDATE BIT(3)
+#define SUPPORTS_FSM_LEGACY_MODE BIT(4)
u8 flags;
struct clk_regmap clkr;
@@ -155,6 +158,7 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 4a4fde8dd12d..ee76ef958d31 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -49,6 +49,7 @@
* detect voltage droops.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
@@ -59,9 +60,10 @@
#include "clk-alpha-pll.h"
#include "clk-regmap.h"
+#include "clk-regmap-mux.h"
enum _pmux_input {
- DIV_2_INDEX = 0,
+ SMUX_INDEX = 0,
PLL_INDEX,
ACD_INDEX,
ALT_INDEX,
@@ -75,6 +77,8 @@ enum _pmux_input {
#define ALT_PLL_OFFSET 0x100
#define SSSCTL_OFFSET 0x160
+#define PMUX_MASK 0x3
+
static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
@@ -111,30 +115,90 @@ static const struct alpha_pll_config hfpll_config = {
.early_output_mask = BIT(3),
};
-static struct clk_alpha_pll perfcl_pll = {
- .offset = PERFCL_REG_OFFSET,
+static const struct clk_parent_data pll_parent[] = {
+ { .fw_name = "xo" },
+};
+
+static struct clk_alpha_pll pwrcl_pll = {
+ .offset = PWRCL_REG_OFFSET,
.regs = prim_pll_regs,
.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data){
- .name = "perfcl_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "pwrcl_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_huayra_ops,
},
};
-static struct clk_alpha_pll pwrcl_pll = {
- .offset = PWRCL_REG_OFFSET,
+static struct clk_alpha_pll perfcl_pll = {
+ .offset = PERFCL_REG_OFFSET,
.regs = prim_pll_regs,
.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data){
- .name = "pwrcl_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "perfcl_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_huayra_ops,
},
};
+static struct clk_fixed_factor pwrcl_pll_postdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pwrcl_pll_postdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pwrcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor perfcl_pll_postdiv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "perfcl_pll_postdiv",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &perfcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor perfcl_pll_acd = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "perfcl_pll_acd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &perfcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor pwrcl_pll_acd = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "pwrcl_pll_acd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pwrcl_pll.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct pll_vco alt_pll_vco_modes[] = {
VCO(3, 250000000, 500000000),
VCO(2, 500000000, 750000000),
@@ -153,93 +217,87 @@ static const struct alpha_pll_config altpll_config = {
.early_output_mask = BIT(3),
};
-static struct clk_alpha_pll perfcl_alt_pll = {
- .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
+static struct clk_alpha_pll pwrcl_alt_pll = {
+ .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
.regs = alt_pll_regs,
.vco_table = alt_pll_vco_modes,
.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data) {
- .name = "perfcl_alt_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "pwrcl_alt_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_hwfsm_ops,
},
};
-static struct clk_alpha_pll pwrcl_alt_pll = {
- .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET,
+static struct clk_alpha_pll perfcl_alt_pll = {
+ .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET,
.regs = alt_pll_regs,
.vco_table = alt_pll_vco_modes,
.num_vco = ARRAY_SIZE(alt_pll_vco_modes),
.flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE,
.clkr.hw.init = &(struct clk_init_data) {
- .name = "pwrcl_alt_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
+ .name = "perfcl_alt_pll",
+ .parent_data = pll_parent,
+ .num_parents = ARRAY_SIZE(pll_parent),
.ops = &clk_alpha_pll_hwfsm_ops,
},
};
-struct clk_cpu_8996_mux {
+struct clk_cpu_8996_pmux {
u32 reg;
- u8 shift;
- u8 width;
struct notifier_block nb;
- struct clk_hw *pll;
- struct clk_hw *pll_div_2;
struct clk_regmap clkr;
};
static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data);
-#define to_clk_cpu_8996_mux_nb(_nb) \
- container_of(_nb, struct clk_cpu_8996_mux, nb)
+#define to_clk_cpu_8996_pmux_nb(_nb) \
+ container_of(_nb, struct clk_cpu_8996_pmux, nb)
-static inline struct clk_cpu_8996_mux *to_clk_cpu_8996_mux_hw(struct clk_hw *hw)
+static inline struct clk_cpu_8996_pmux *to_clk_cpu_8996_pmux_hw(struct clk_hw *hw)
{
- return container_of(to_clk_regmap(hw), struct clk_cpu_8996_mux, clkr);
+ return container_of(to_clk_regmap(hw), struct clk_cpu_8996_pmux, clkr);
}
-static u8 clk_cpu_8996_mux_get_parent(struct clk_hw *hw)
+static u8 clk_cpu_8996_pmux_get_parent(struct clk_hw *hw)
{
struct clk_regmap *clkr = to_clk_regmap(hw);
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- u32 mask = GENMASK(cpuclk->width - 1, 0);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
u32 val;
regmap_read(clkr->regmap, cpuclk->reg, &val);
- val >>= cpuclk->shift;
- return val & mask;
+ return FIELD_GET(PMUX_MASK, val);
}
-static int clk_cpu_8996_mux_set_parent(struct clk_hw *hw, u8 index)
+static int clk_cpu_8996_pmux_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_regmap *clkr = to_clk_regmap(hw);
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- u32 mask = GENMASK(cpuclk->width + cpuclk->shift - 1, cpuclk->shift);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_hw(hw);
u32 val;
- val = index;
- val <<= cpuclk->shift;
+ val = FIELD_PREP(PMUX_MASK, index);
- return regmap_update_bits(clkr->regmap, cpuclk->reg, mask, val);
+ return regmap_update_bits(clkr->regmap, cpuclk->reg, PMUX_MASK, val);
}
-static int clk_cpu_8996_mux_determine_rate(struct clk_hw *hw,
+static int clk_cpu_8996_pmux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw);
- struct clk_hw *parent = cpuclk->pll;
+ struct clk_hw *parent;
- if (cpuclk->pll_div_2 && req->rate < DIV_2_THRESHOLD) {
- if (req->rate < (DIV_2_THRESHOLD / 2))
- return -EINVAL;
+ if (req->rate < (DIV_2_THRESHOLD / 2))
+ return -EINVAL;
- parent = cpuclk->pll_div_2;
- }
+ if (req->rate < DIV_2_THRESHOLD)
+ parent = clk_hw_get_parent_by_index(hw, SMUX_INDEX);
+ else
+ parent = clk_hw_get_parent_by_index(hw, ACD_INDEX);
+ if (!parent)
+ return -EINVAL;
req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
req->best_parent_hw = parent;
@@ -247,83 +305,83 @@ static int clk_cpu_8996_mux_determine_rate(struct clk_hw *hw,
return 0;
}
-static const struct clk_ops clk_cpu_8996_mux_ops = {
- .set_parent = clk_cpu_8996_mux_set_parent,
- .get_parent = clk_cpu_8996_mux_get_parent,
- .determine_rate = clk_cpu_8996_mux_determine_rate,
+static const struct clk_ops clk_cpu_8996_pmux_ops = {
+ .set_parent = clk_cpu_8996_pmux_set_parent,
+ .get_parent = clk_cpu_8996_pmux_get_parent,
+ .determine_rate = clk_cpu_8996_pmux_determine_rate,
+};
+
+static const struct clk_parent_data pwrcl_smux_parents[] = {
+ { .fw_name = "xo" },
+ { .hw = &pwrcl_pll_postdiv.hw },
};
-static struct clk_cpu_8996_mux pwrcl_smux = {
+static const struct clk_parent_data perfcl_smux_parents[] = {
+ { .fw_name = "xo" },
+ { .hw = &perfcl_pll_postdiv.hw },
+};
+
+static struct clk_regmap_mux pwrcl_smux = {
.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pwrcl_smux",
- .parent_names = (const char *[]){
- "xo",
- "pwrcl_pll_main",
- },
- .num_parents = 2,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_data = pwrcl_smux_parents,
+ .num_parents = ARRAY_SIZE(pwrcl_smux_parents),
+ .ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_cpu_8996_mux perfcl_smux = {
+static struct clk_regmap_mux perfcl_smux = {
.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
.shift = 2,
.width = 2,
.clkr.hw.init = &(struct clk_init_data) {
.name = "perfcl_smux",
- .parent_names = (const char *[]){
- "xo",
- "perfcl_pll_main",
- },
- .num_parents = 2,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_data = perfcl_smux_parents,
+ .num_parents = ARRAY_SIZE(perfcl_smux_parents),
+ .ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_cpu_8996_mux pwrcl_pmux = {
+static const struct clk_hw *pwrcl_pmux_parents[] = {
+ [SMUX_INDEX] = &pwrcl_smux.clkr.hw,
+ [PLL_INDEX] = &pwrcl_pll.clkr.hw,
+ [ACD_INDEX] = &pwrcl_pll_acd.hw,
+ [ALT_INDEX] = &pwrcl_alt_pll.clkr.hw,
+};
+
+static const struct clk_hw *perfcl_pmux_parents[] = {
+ [SMUX_INDEX] = &perfcl_smux.clkr.hw,
+ [PLL_INDEX] = &perfcl_pll.clkr.hw,
+ [ACD_INDEX] = &perfcl_pll_acd.hw,
+ [ALT_INDEX] = &perfcl_alt_pll.clkr.hw,
+};
+
+static struct clk_cpu_8996_pmux pwrcl_pmux = {
.reg = PWRCL_REG_OFFSET + MUX_OFFSET,
- .shift = 0,
- .width = 2,
- .pll = &pwrcl_pll.clkr.hw,
- .pll_div_2 = &pwrcl_smux.clkr.hw,
.nb.notifier_call = cpu_clk_notifier_cb,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pwrcl_pmux",
- .parent_names = (const char *[]){
- "pwrcl_smux",
- "pwrcl_pll",
- "pwrcl_pll_acd",
- "pwrcl_alt_pll",
- },
- .num_parents = 4,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_hws = pwrcl_pmux_parents,
+ .num_parents = ARRAY_SIZE(pwrcl_pmux_parents),
+ .ops = &clk_cpu_8996_pmux_ops,
/* CPU clock is critical and should never be gated */
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
},
};
-static struct clk_cpu_8996_mux perfcl_pmux = {
+static struct clk_cpu_8996_pmux perfcl_pmux = {
.reg = PERFCL_REG_OFFSET + MUX_OFFSET,
- .shift = 0,
- .width = 2,
- .pll = &perfcl_pll.clkr.hw,
- .pll_div_2 = &perfcl_smux.clkr.hw,
.nb.notifier_call = cpu_clk_notifier_cb,
.clkr.hw.init = &(struct clk_init_data) {
.name = "perfcl_pmux",
- .parent_names = (const char *[]){
- "perfcl_smux",
- "perfcl_pll",
- "perfcl_pll_acd",
- "perfcl_alt_pll",
- },
- .num_parents = 4,
- .ops = &clk_cpu_8996_mux_ops,
+ .parent_hws = perfcl_pmux_parents,
+ .num_parents = ARRAY_SIZE(perfcl_pmux_parents),
+ .ops = &clk_cpu_8996_pmux_ops,
/* CPU clock is critical and should never be gated */
.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
},
@@ -338,15 +396,22 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static struct clk_hw *cpu_msm8996_hw_clks[] = {
+ &pwrcl_pll_postdiv.hw,
+ &perfcl_pll_postdiv.hw,
+ &pwrcl_pll_acd.hw,
+ &perfcl_pll_acd.hw,
+};
+
static struct clk_regmap *cpu_msm8996_clks[] = {
- &perfcl_pll.clkr,
&pwrcl_pll.clkr,
- &perfcl_alt_pll.clkr,
+ &perfcl_pll.clkr,
&pwrcl_alt_pll.clkr,
- &perfcl_smux.clkr,
+ &perfcl_alt_pll.clkr,
&pwrcl_smux.clkr,
- &perfcl_pmux.clkr,
+ &perfcl_smux.clkr,
&pwrcl_pmux.clkr,
+ &perfcl_pmux.clkr,
};
static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
@@ -354,67 +419,33 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev,
{
int i, ret;
- perfcl_smux.pll = clk_hw_register_fixed_factor(dev, "perfcl_pll_main",
- "perfcl_pll",
- CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(perfcl_smux.pll)) {
- dev_err(dev, "Failed to initialize perfcl_pll_main\n");
- return PTR_ERR(perfcl_smux.pll);
- }
-
- pwrcl_smux.pll = clk_hw_register_fixed_factor(dev, "pwrcl_pll_main",
- "pwrcl_pll",
- CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(pwrcl_smux.pll)) {
- dev_err(dev, "Failed to initialize pwrcl_pll_main\n");
- clk_hw_unregister(perfcl_smux.pll);
- return PTR_ERR(pwrcl_smux.pll);
+ for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) {
+ ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]);
+ if (ret)
+ return ret;
}
for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) {
ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]);
- if (ret) {
- clk_hw_unregister(perfcl_smux.pll);
- clk_hw_unregister(pwrcl_smux.pll);
+ if (ret)
return ret;
- }
}
- clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config);
- clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config);
clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
/* Enable alt PLLs */
clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk);
clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk);
- clk_notifier_register(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
- clk_notifier_register(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
+ devm_clk_notifier_register(dev, pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
+ devm_clk_notifier_register(dev, perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
return ret;
}
-static int qcom_cpu_clk_msm8996_unregister_clks(void)
-{
- int ret = 0;
-
- ret = clk_notifier_unregister(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb);
- if (ret)
- return ret;
-
- ret = clk_notifier_unregister(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb);
- if (ret)
- return ret;
-
- clk_hw_unregister(perfcl_smux.pll);
- clk_hw_unregister(pwrcl_smux.pll);
-
- return 0;
-}
-
#define CPU_AFINITY_MASK 0xFFF
#define PWRCL_CPU_REG_MASK 0x3
#define PERFCL_CPU_REG_MASK 0x103
@@ -456,22 +487,22 @@ static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base)
static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
- struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_nb(nb);
+ struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb);
struct clk_notifier_data *cnd = data;
int ret;
switch (event) {
case PRE_RATE_CHANGE:
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX);
qcom_cpu_clk_msm8996_acd_init(base);
break;
case POST_RATE_CHANGE:
if (cnd->new_rate < DIV_2_THRESHOLD)
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw,
- DIV_2_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
+ SMUX_INDEX);
else
- ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw,
- ACD_INDEX);
+ ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw,
+ ACD_INDEX);
break;
default:
ret = 0;
@@ -513,11 +544,6 @@ static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data);
}
-static int qcom_cpu_clk_msm8996_driver_remove(struct platform_device *pdev)
-{
- return qcom_cpu_clk_msm8996_unregister_clks();
-}
-
static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = {
{ .compatible = "qcom,msm8996-apcc" },
{}
@@ -526,7 +552,6 @@ MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table);
static struct platform_driver qcom_cpu_clk_msm8996_driver = {
.probe = qcom_cpu_clk_msm8996_driver_probe,
- .remove = qcom_cpu_clk_msm8996_driver_remove,
.driver = {
.name = "qcom-msm8996-apcc",
.of_match_table = qcom_cpu_clk_msm8996_match_table,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 012e745794fd..01581f4d2c39 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -167,6 +167,7 @@ struct clk_rcg2_gfx3d {
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
+extern const struct clk_ops clk_rcg2_mux_closest_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 28019edd2a50..609c10f8d0d9 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -509,6 +509,13 @@ const struct clk_ops clk_rcg2_floor_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
+const struct clk_ops clk_rcg2_mux_closest_ops = {
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
+
struct frac_entry {
int num;
int den;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index c07cab6905cb..0471bab82464 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -195,10 +195,6 @@ static int clk_rpmh_aggregate_state_send_command(struct clk_rpmh *c,
{
int ret;
- /* Nothing required to be done if already off or on */
- if (enable == c->state)
- return 0;
-
c->state = enable ? c->valid_state_mask : 0;
c->aggr_state = c->state | c->peer->state;
c->peer->aggr_state = c->aggr_state;
@@ -382,6 +378,26 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
+static struct clk_hw *sdm670_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_CE_CLK] = &sdm845_ce.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdm670 = {
+ .clks = sdm670_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdm670_rpmh_clocks),
+};
+
DEFINE_CLK_RPMH_VRM(sdx55, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
DEFINE_CLK_RPMH_VRM(sdx55, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
DEFINE_CLK_RPMH_BCM(sdx55, qpic_clk, "QP0");
@@ -715,6 +731,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sc8280xp-rpmh-clk", .data = &clk_rpmh_sc8280xp},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,sdm670-rpmh-clk", .data = &clk_rpmh_sdm670},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65},
{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 10b4e6d8d10f..fea505876855 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -417,6 +417,7 @@ DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2, 19200000);
@@ -427,6 +428,40 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2, 192
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5, 19200000);
+static struct clk_smd_rpm *msm8909_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8909 = {
+ .clks = msm8909_clks,
+ .num_clks = ARRAY_SIZE(msm8909_clks),
+};
+
static struct clk_smd_rpm *msm8916_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
[RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
@@ -787,7 +822,6 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
};
DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8, 19200000);
static struct clk_smd_rpm *qcs404_clks[] = {
@@ -1085,13 +1119,54 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
.num_clks = ARRAY_SIZE(sm6115_clks),
};
+/* SM6375 */
+DEFINE_CLK_SMD_RPM(sm6375, mmnrt_clk, mmnrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(sm6375, mmrt_clk, mmrt_a_clk, QCOM_SMD_RPM_MMXI_CLK, 1);
+DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(sm6375, bimc_freq_log, bimc_freq_log_a, QCOM_SMD_RPM_MISC_CLK, 4, 1);
+static struct clk_smd_rpm *sm6375_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6375_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6375_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6375_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6375_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
+ [RPM_SMD_BIMC_FREQ_LOG] = &sm6375_bimc_freq_log,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_sm6375 = {
+ .clks = sm6375_clks,
+ .num_clks = ARRAY_SIZE(sm6375_clks),
+};
+
/* QCM2290 */
DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, ln_bb_clk2, ln_bb_clk2_a, 0x2, 19200000);
DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, rf_clk3, rf_clk3_a, 6, 38400000);
DEFINE_CLK_SMD_RPM(qcm2290, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
-DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
DEFINE_CLK_SMD_RPM(qcm2290, cpuss_gnoc_clk, cpuss_gnoc_a_clk,
QCOM_SMD_RPM_MEM_CLK, 1);
DEFINE_CLK_SMD_RPM(qcm2290, bimc_gpu_clk, bimc_gpu_a_clk,
@@ -1146,6 +1221,7 @@ static const struct rpm_smd_clk_desc rpm_clk_qcm2290 = {
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-mdm9607", .data = &rpm_clk_mdm9607 },
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8909", .data = &rpm_clk_msm8909 },
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 },
{ .compatible = "qcom,rpmcc-msm8953", .data = &rpm_clk_msm8953 },
@@ -1160,6 +1236,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
{ .compatible = "qcom,rpmcc-sm6125", .data = &rpm_clk_sm6125 },
+ { .compatible = "qcom,rpmcc-sm6375", .data = &rpm_clk_sm6375 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm6115.c b/drivers/clk/qcom/dispcc-sm6115.c
new file mode 100644
index 000000000000..818bb8f4637c
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm6115.c
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on dispcc-qcm2290.c
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6115-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_GPLL0_DISP_DIV,
+};
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct clk_parent_data parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct pll_vco spark_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x28,
+ .alpha = 0x0,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_main = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_disp_cc_pll0_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0_out_main",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_GPLL0_DISP_DIV },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0_out_main.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_SLEEP_CLK, },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d4,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x205c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ /* For set_rate and set_parent to succeed, parent(s) must be enabled */
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x208c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32764, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x6050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x3000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc *disp_cc_sm6115_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static struct clk_regmap *disp_cc_sm6115_clocks[] = {
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL0_OUT_MAIN] = &disp_cc_pll0_out_main.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_sm6115_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_sm6115_desc = {
+ .config = &disp_cc_sm6115_regmap_config,
+ .clks = disp_cc_sm6115_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm6115_clocks),
+ .gdscs = disp_cc_sm6115_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm6115_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm6115_match_table[] = {
+ { .compatible = "qcom,sm6115-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm6115_match_table);
+
+static int disp_cc_sm6115_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm6115_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ /* Keep DISP_CC_XO_CLK always-ON */
+ regmap_update_bits(regmap, 0x604c, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm6115_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register DISP CC clocks\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm6115_driver = {
+ .probe = disp_cc_sm6115_probe,
+ .driver = {
+ .name = "dispcc-sm6115",
+ .of_match_table = disp_cc_sm6115_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_sm6115_driver);
+MODULE_DESCRIPTION("Qualcomm SM6115 Display Clock controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
new file mode 100644
index 000000000000..0cd7ebe90301
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -0,0 +1,1829 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <dt-bindings/clock/qcom,sm8450-dispcc.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_AHB_CLK,
+ DT_SLEEP_CLK,
+
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+};
+
+#define DISP_CC_MISC_CMD 0xF000
+
+enum {
+ P_BI_TCXO,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_evo_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xD,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32AA299C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1F,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00182261,
+ .config_ctl_hi1_val = 0x32AA299C,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_evo_vco,
+ .num_vco = ARRAY_SIZE(lucid_evo_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_reset_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8324,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8134,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x81ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
+ F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x81bc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x81d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8254,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x826c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x828c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82a4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8308,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x82ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x82d4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x816c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x8184,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(500000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x80ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x80d4,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_DISP_CC_PLL1_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_DISP_CC_PLL1_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_DISP_CC_PLL1_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x8104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x811c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1_ao,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x814c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x8168,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x824c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x8284,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8304,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x802c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x802c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_crypto_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_crypto_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_crypto_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_crypto_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_crypto_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x8094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot1_clk = {
+ .halt_reg = 0xa00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x8014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_rot_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0xe078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &disp_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x9000,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc mdss_int2_gdsc = {
+ .gdscr = 0xb000,
+ .pd = {
+ .name = "mdss_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_sm8450_clocks[] = {
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_CRYPTO_CLK] = &disp_cc_mdss_dptx0_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_CRYPTO_CLK] = &disp_cc_mdss_dptx1_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_CRYPTO_CLK] = &disp_cc_mdss_dptx2_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_CRYPTO_CLK] = &disp_cc_mdss_dptx3_crypto_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_ROT1_CLK] = &disp_cc_mdss_rot1_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct qcom_reset_map disp_cc_sm8450_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct gdsc *disp_cc_sm8450_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [MDSS_INT2_GDSC] = &mdss_int2_gdsc,
+};
+
+static const struct regmap_config disp_cc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11008,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc disp_cc_sm8450_desc = {
+ .config = &disp_cc_sm8450_regmap_config,
+ .clks = disp_cc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_sm8450_clocks),
+ .resets = disp_cc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm8450_resets),
+ .gdscs = disp_cc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_sm8450_gdscs),
+};
+
+static const struct of_device_id disp_cc_sm8450_match_table[] = {
+ { .compatible = "qcom,sm8450-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_sm8450_match_table);
+
+static void disp_cc_sm8450_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static int disp_cc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8450_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ regmap = qcom_cc_map(pdev, &disp_cc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+ clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config);
+
+ /* Enable clock gating for MDP clocks */
+ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10);
+
+ /*
+ * Keep clocks always enabled:
+ * disp_cc_xo_clk
+ */
+ regmap_update_bits(regmap, 0xe05c, BIT(0), BIT(0));
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8450_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver disp_cc_sm8450_driver = {
+ .probe = disp_cc_sm8450_probe,
+ .driver = {
+ .name = "disp_cc-sm8450",
+ .of_match_table = disp_cc_sm8450_match_table,
+ },
+};
+
+static int __init disp_cc_sm8450_init(void)
+{
+ return platform_driver_register(&disp_cc_sm8450_driver);
+}
+subsys_initcall(disp_cc_sm8450_init);
+
+static void __exit disp_cc_sm8450_exit(void)
+{
+ platform_driver_unregister(&disp_cc_sm8450_driver);
+}
+module_exit(disp_cc_sm8450_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC SM8450 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index 94ea2d84d1b1..657e1154bb9b 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -34,7 +34,9 @@ static struct clk_pll pll8 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll8",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -45,7 +47,9 @@ static struct clk_regmap pll8_vote = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "pll8_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll8.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -62,9 +66,9 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char * const gcc_pxo_pll8[] = {
- "pxo",
- "pll8_vote",
+static const struct clk_parent_data gcc_pxo_pll8[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
};
static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
@@ -73,10 +77,10 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char * const gcc_pxo_pll8_cxo[] = {
- "pxo",
- "pll8_vote",
- "cxo",
+static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .fw_name = "cxo", .name = "cxo_board" },
};
static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -122,8 +126,8 @@ static struct clk_rcg gsbi1_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -138,8 +142,8 @@ static struct clk_branch gsbi1_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_clk",
- .parent_names = (const char *[]){
- "gsbi1_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -173,8 +177,8 @@ static struct clk_rcg gsbi2_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -189,8 +193,8 @@ static struct clk_branch gsbi2_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_clk",
- .parent_names = (const char *[]){
- "gsbi2_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -224,8 +228,8 @@ static struct clk_rcg gsbi3_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -240,8 +244,8 @@ static struct clk_branch gsbi3_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_clk",
- .parent_names = (const char *[]){
- "gsbi3_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -275,8 +279,8 @@ static struct clk_rcg gsbi4_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -291,8 +295,8 @@ static struct clk_branch gsbi4_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_clk",
- .parent_names = (const char *[]){
- "gsbi4_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -326,8 +330,8 @@ static struct clk_rcg gsbi5_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -342,8 +346,8 @@ static struct clk_branch gsbi5_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_clk",
- .parent_names = (const char *[]){
- "gsbi5_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -377,8 +381,8 @@ static struct clk_rcg gsbi6_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -393,8 +397,8 @@ static struct clk_branch gsbi6_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_clk",
- .parent_names = (const char *[]){
- "gsbi6_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -428,8 +432,8 @@ static struct clk_rcg gsbi7_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -444,8 +448,8 @@ static struct clk_branch gsbi7_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_clk",
- .parent_names = (const char *[]){
- "gsbi7_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -479,8 +483,8 @@ static struct clk_rcg gsbi8_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -495,7 +499,9 @@ static struct clk_branch gsbi8_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_clk",
- .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -528,8 +534,8 @@ static struct clk_rcg gsbi9_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -544,7 +550,9 @@ static struct clk_branch gsbi9_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_clk",
- .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -577,8 +585,8 @@ static struct clk_rcg gsbi10_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -593,7 +601,9 @@ static struct clk_branch gsbi10_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_clk",
- .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -626,8 +636,8 @@ static struct clk_rcg gsbi11_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -642,7 +652,9 @@ static struct clk_branch gsbi11_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_clk",
- .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -675,8 +687,8 @@ static struct clk_rcg gsbi12_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -691,7 +703,9 @@ static struct clk_branch gsbi12_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_clk",
- .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -737,8 +751,8 @@ static struct clk_rcg gsbi1_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -753,7 +767,9 @@ static struct clk_branch gsbi1_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_clk",
- .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -786,8 +802,8 @@ static struct clk_rcg gsbi2_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -802,7 +818,9 @@ static struct clk_branch gsbi2_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_clk",
- .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -835,8 +853,8 @@ static struct clk_rcg gsbi3_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -851,7 +869,9 @@ static struct clk_branch gsbi3_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_clk",
- .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -884,8 +904,8 @@ static struct clk_rcg gsbi4_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -900,7 +920,9 @@ static struct clk_branch gsbi4_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_clk",
- .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -933,8 +955,8 @@ static struct clk_rcg gsbi5_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -949,7 +971,9 @@ static struct clk_branch gsbi5_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_clk",
- .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -982,8 +1006,8 @@ static struct clk_rcg gsbi6_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -998,7 +1022,9 @@ static struct clk_branch gsbi6_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_clk",
- .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1031,8 +1057,8 @@ static struct clk_rcg gsbi7_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1047,7 +1073,9 @@ static struct clk_branch gsbi7_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_clk",
- .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1080,8 +1108,8 @@ static struct clk_rcg gsbi8_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1096,7 +1124,9 @@ static struct clk_branch gsbi8_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_clk",
- .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1129,8 +1159,8 @@ static struct clk_rcg gsbi9_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1145,7 +1175,9 @@ static struct clk_branch gsbi9_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_clk",
- .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1178,8 +1210,8 @@ static struct clk_rcg gsbi10_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1194,7 +1226,9 @@ static struct clk_branch gsbi10_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_clk",
- .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1227,8 +1261,8 @@ static struct clk_rcg gsbi11_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1243,7 +1277,9 @@ static struct clk_branch gsbi11_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_clk",
- .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1276,8 +1312,8 @@ static struct clk_rcg gsbi12_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1292,7 +1328,9 @@ static struct clk_branch gsbi12_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_clk",
- .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1338,8 +1376,8 @@ static struct clk_rcg gp0_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp0_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1354,7 +1392,9 @@ static struct clk_branch gp0_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp0_clk",
- .parent_names = (const char *[]){ "gp0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1387,8 +1427,8 @@ static struct clk_rcg gp1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp1_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1403,7 +1443,9 @@ static struct clk_branch gp1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp1_clk",
- .parent_names = (const char *[]){ "gp1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1436,8 +1478,8 @@ static struct clk_rcg gp2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp2_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1452,7 +1494,9 @@ static struct clk_branch gp2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp2_clk",
- .parent_names = (const char *[]){ "gp2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1488,8 +1532,8 @@ static struct clk_rcg prng_src = {
.clkr.hw = {
.init = &(struct clk_init_data){
.name = "prng_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
},
@@ -1504,7 +1548,9 @@ static struct clk_branch prng_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "prng_clk",
- .parent_names = (const char *[]){ "prng_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &prng_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -1547,8 +1593,8 @@ static struct clk_rcg sdc1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc1_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1562,7 +1608,9 @@ static struct clk_branch sdc1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc1_clk",
- .parent_names = (const char *[]){ "sdc1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1595,8 +1643,8 @@ static struct clk_rcg sdc2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc2_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1610,7 +1658,9 @@ static struct clk_branch sdc2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc2_clk",
- .parent_names = (const char *[]){ "sdc2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1643,8 +1693,8 @@ static struct clk_rcg sdc3_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc3_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1658,7 +1708,9 @@ static struct clk_branch sdc3_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc3_clk",
- .parent_names = (const char *[]){ "sdc3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1691,8 +1743,8 @@ static struct clk_rcg sdc4_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc4_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1706,7 +1758,9 @@ static struct clk_branch sdc4_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc4_clk",
- .parent_names = (const char *[]){ "sdc4_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc4_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1739,8 +1793,8 @@ static struct clk_rcg sdc5_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc5_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1754,7 +1808,9 @@ static struct clk_branch sdc5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc5_clk",
- .parent_names = (const char *[]){ "sdc5_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc5_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1792,8 +1848,8 @@ static struct clk_rcg tsif_ref_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1808,7 +1864,9 @@ static struct clk_branch tsif_ref_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk",
- .parent_names = (const char *[]){ "tsif_ref_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &tsif_ref_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1846,8 +1904,8 @@ static struct clk_rcg usb_hs1_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1862,7 +1920,9 @@ static struct clk_branch usb_hs1_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs1_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1895,16 +1955,14 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
-
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 15,
@@ -1913,7 +1971,9 @@ static struct clk_branch usb_fs1_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_clk",
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1928,7 +1988,9 @@ static struct clk_branch usb_fs1_system_clk = {
.enable_reg = 0x296c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_fs1_system_clk",
.ops = &clk_branch_ops,
@@ -1962,16 +2024,14 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
-
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 12,
@@ -1980,7 +2040,9 @@ static struct clk_branch usb_fs2_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1996,7 +2058,9 @@ static struct clk_branch usb_fs2_system_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_system_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-msm8909.c b/drivers/clk/qcom/gcc-msm8909.c
new file mode 100644
index 000000000000..2a00b11ce2cd
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8909.c
@@ -0,0 +1,2731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ *
+ * Based on gcc-msm8916.c:
+ * Copyright 2015 Linaro Limited
+ * adapted with data from clock-gcc-8909.c in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8909.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_XO,
+ DT_SLEEP_CLK,
+ DT_DSI0PLL,
+ DT_DSI0PLL_BYTE,
+};
+
+enum {
+ P_XO,
+ P_SLEEP_CLK,
+ P_GPLL0,
+ P_GPLL1,
+ P_GPLL2,
+ P_BIMC,
+ P_DSI0PLL,
+ P_DSI0PLL_BYTE,
+};
+
+static const struct parent_map gcc_xo_map[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_xo_data[] = {
+ { .index = DT_XO },
+};
+
+static const struct clk_parent_data gcc_sleep_clk_data[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll0_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll0",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll1",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll1_vote",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll2_early = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gpll2_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll2 = {
+ .offset = 0x25000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll2",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll bimc_pll_early = {
+ .offset = 0x23000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "bimc_pll_early",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_fixed_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv bimc_pll = {
+ .offset = 0x23000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_pll",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_pll_early.clkr.hw,
+ },
+ .num_parents = 1,
+ /* Avoid rate changes for shared clock */
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_BIMC, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_bimc_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &bimc_pll.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_ddr_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "bimc_gpu_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07024,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .hid_width = 5,
+ .mnd_width = 16,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_byte0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_byte_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL_BYTE },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_byte0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "byte0_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_map),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_csi_phytimer_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_esc0_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "esc0_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_gfx3d_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+};
+
+static const struct clk_parent_data gcc_gfx3d_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ F(409600000, P_GPLL1, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gcc_gfx3d_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_gfx3d_data,
+ .num_parents = ARRAY_SIZE(gcc_gfx3d_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gp_clk_src[] = {
+ F(150000, P_XO, 1, 1, 128),
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gp_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mclk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 3 },
+};
+
+static const struct clk_parent_data gcc_mclk_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll2.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(24000000, P_GPLL2, 1, 1, 33),
+ F(66667000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .parent_map = gcc_mclk_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_mclk_data,
+ .num_parents = ARRAY_SIZE(gcc_mclk_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_mdp_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 3 },
+};
+
+static const struct clk_parent_data gcc_mdp_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .parent_map = gcc_mdp_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "mdp_clk_src",
+ .parent_data = gcc_mdp_data,
+ .num_parents = ARRAY_SIZE(gcc_mdp_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_pclk0_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_pclk_data[] = {
+ { .index = DT_XO },
+ { .index = DT_DSI0PLL },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_2_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gcc_sdcc1_2_apps_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_gcc_sdcc1_2_apps_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_floor_ops,
+ }
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(57140000, P_GPLL0, 14, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct parent_map gcc_vcodec0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 3 },
+};
+
+static const struct clk_parent_data gcc_vcodec0_data[] = {
+ { .index = DT_XO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(307200000, P_GPLL1, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .parent_map = gcc_vcodec0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_vcodec0_data,
+ .num_parents = ARRAY_SIZE(gcc_vcodec0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .parent_map = gcc_xo_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "vsync_clk_src",
+ .parent_data = gcc_xo_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_data),
+ .ops = &clk_rcg2_ops,
+ }
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_apss_tcu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x01004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_sleep_clk",
+ .parent_data = gcc_sleep_clk_data,
+ .num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_crypto_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gfx_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gfx_tcu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gtcu_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdp_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_smmu_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_vfe_tbu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gpu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0601c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x56004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .parent_data = gcc_sleep_clk_data,
+ .num_parents = ARRAY_SIZE(gcc_sleep_clk_data),
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+ }
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []) { 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .cxcs = (unsigned int []) { 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []) { 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []) { 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0_gdsc",
+ },
+ .flags = HW_CTRL,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []) { 0x58038, 0x58048, 0x58050 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "vfe_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8909_clocks[] = {
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [GPLL2_EARLY] = &gpll2_early.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [BIMC_PLL_EARLY] = &bimc_pll_early.clkr,
+ [BIMC_PLL] = &bimc_pll.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8909_gdscs[] = {
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8909_resets[] = {
+ [GCC_AUDIO_CORE_BCR] = { 0x1c008 },
+ [GCC_BLSP1_BCR] = { 0x01000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04018 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05018 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06018 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07018 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028 },
+ [GCC_CAMSS_CSI0_BCR] = { 0x4e038 },
+ [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 },
+ [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 },
+ [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c },
+ [GCC_CAMSS_CSI1_BCR] = { 0x4f038 },
+ [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 },
+ [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c },
+ [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c },
+ [GCC_CAMSS_GP0_BCR] = { 0x54014 },
+ [GCC_CAMSS_GP1_BCR] = { 0x55014 },
+ [GCC_CAMSS_ISPIF_BCR] = { 0x50000 },
+ [GCC_CAMSS_MCLK0_BCR] = { 0x52014 },
+ [GCC_CAMSS_MCLK1_BCR] = { 0x53014 },
+ [GCC_CAMSS_PHY0_BCR] = { 0x4e018 },
+ [GCC_CAMSS_TOP_BCR] = { 0x56000 },
+ [GCC_CAMSS_TOP_AHB_BCR] = { 0x5a018 },
+ [GCC_CAMSS_VFE_BCR] = { 0x58030 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_OXILI_BCR] = { 0x59018 },
+ [GCC_PDM_BCR] = { 0x44000 },
+ [GCC_PRNG_BCR] = { 0x13000 },
+ [GCC_QUSB2_PHY_BCR] = { 0x4103c },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
+ [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 },
+ [GCC_USB2A_PHY_BCR] = { 0x41028 },
+ [GCC_USB2_HS_PHY_ONLY_BCR] = { .reg = 0x41034, .udelay = 15 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_VENUS0_BCR] = { 0x4c014 },
+ /* Subsystem Restart */
+ [GCC_MSS_RESTART] = { 0x3e000 },
+};
+
+static const struct regmap_config gcc_msm8909_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8909_desc = {
+ .config = &gcc_msm8909_regmap_config,
+ .clks = gcc_msm8909_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8909_clocks),
+ .resets = gcc_msm8909_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8909_resets),
+ .gdscs = gcc_msm8909_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8909_gdscs),
+};
+
+static const struct of_device_id gcc_msm8909_match_table[] = {
+ { .compatible = "qcom,gcc-msm8909" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8909_match_table);
+
+static int gcc_msm8909_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_msm8909_desc);
+}
+
+static struct platform_driver gcc_msm8909_driver = {
+ .probe = gcc_msm8909_probe,
+ .driver = {
+ .name = "gcc-msm8909",
+ .of_match_table = gcc_msm8909_match_table,
+ },
+};
+
+static int __init gcc_msm8909_init(void)
+{
+ return platform_driver_register(&gcc_msm8909_driver);
+}
+core_initcall(gcc_msm8909_init);
+
+static void __exit gcc_msm8909_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8909_driver);
+}
+module_exit(gcc_msm8909_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8909 Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gcc-msm8909");
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 9a46794f6eb8..0c8fe19387a7 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -42,14 +42,138 @@ enum {
P_EXT_MCLK,
};
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21010,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a010,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll bimc_pll = {
+ .l_reg = 0x23004,
+ .m_reg = 0x23008,
+ .n_reg = 0x2300c,
+ .config_reg = 0x23010,
+ .mode_reg = 0x23000,
+ .status_reg = 0x2301c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_pll",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo", .name = "xo_board",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap bimc_pll_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_pll_vote",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
static const struct parent_map gcc_xo_gpll0_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
};
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0_vote",
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
@@ -58,10 +182,10 @@ static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
{ P_BIMC, 2 },
};
-static const char * const gcc_xo_gpll0_bimc[] = {
- "xo",
- "gpll0_vote",
- "bimc_pll_vote",
+static const struct clk_parent_data gcc_xo_gpll0_bimc[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
};
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
@@ -71,11 +195,11 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
{ P_GPLL2_AUX, 2 },
};
-static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0a_gpll1_gpll2a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
@@ -84,10 +208,10 @@ static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char * const gcc_xo_gpll0_gpll2[] = {
- "xo",
- "gpll0_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0_gpll2[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0a_map[] = {
@@ -95,9 +219,9 @@ static const struct parent_map gcc_xo_gpll0a_map[] = {
{ P_GPLL0_AUX, 2 },
};
-static const char * const gcc_xo_gpll0a[] = {
- "xo",
- "gpll0_vote",
+static const struct clk_parent_data gcc_xo_gpll0a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
@@ -107,11 +231,11 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
{ P_SLEEP_CLK, 6 },
};
-static const char * const gcc_xo_gpll0_gpll1a_sleep[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
@@ -120,10 +244,10 @@ static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
{ P_GPLL1_AUX, 2 },
};
-static const char * const gcc_xo_gpll0_gpll1a[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
};
static const struct parent_map gcc_xo_dsibyte_map[] = {
@@ -131,9 +255,9 @@ static const struct parent_map gcc_xo_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 2 },
};
-static const char * const gcc_xo_dsibyte[] = {
- "xo",
- "dsi0pllbyte",
+static const struct clk_parent_data gcc_xo_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
};
static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
@@ -142,10 +266,10 @@ static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
{ P_DSI0_PHYPLL_BYTE, 1 },
};
-static const char * const gcc_xo_gpll0a_dsibyte[] = {
- "xo",
- "gpll0_vote",
- "dsi0pllbyte",
+static const struct clk_parent_data gcc_xo_gpll0a_dsibyte[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
};
static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
@@ -154,10 +278,10 @@ static const struct parent_map gcc_xo_gpll0_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 2 },
};
-static const char * const gcc_xo_gpll0_dsiphy[] = {
- "xo",
- "gpll0_vote",
- "dsi0pll",
+static const struct clk_parent_data gcc_xo_gpll0_dsiphy[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
};
static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
@@ -166,10 +290,10 @@ static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
{ P_DSI0_PHYPLL_DSI, 1 },
};
-static const char * const gcc_xo_gpll0a_dsiphy[] = {
- "xo",
- "gpll0_vote",
- "dsi0pll",
+static const struct clk_parent_data gcc_xo_gpll0a_dsiphy[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
};
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
@@ -179,11 +303,11 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2_map[] = {
{ P_GPLL2, 2 },
};
-static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "gpll2_vote",
+static const struct clk_parent_data gcc_xo_gpll0a_gpll1_gpll2[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll2_vote.hw },
};
static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
@@ -193,11 +317,11 @@ static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll0_gpll1_sleep[] = {
- "xo",
- "gpll0_vote",
- "gpll1_vote",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll0_gpll1_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
@@ -208,12 +332,12 @@ static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_epi2s_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_pri_i2s",
- "ext_mclk",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll1_epi2s_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_pri_i2s", .name = "ext_pri_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
@@ -224,12 +348,12 @@ static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_esi2s_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_sec_i2s",
- "ext_mclk",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_gpll1_esi2s_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_sec_i2s", .name = "ext_sec_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_sleep_map[] = {
@@ -237,9 +361,9 @@ static const struct parent_map gcc_xo_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_sleep[] = {
- "xo",
- "sleep_clk",
+static const struct clk_parent_data gcc_xo_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
@@ -249,119 +373,11 @@ static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
{ P_SLEEP_CLK, 6 }
};
-static const char * const gcc_xo_gpll1_emclk_sleep[] = {
- "xo",
- "gpll1_vote",
- "ext_mclk",
- "sleep_clk",
-};
-
-static struct clk_pll gpll0 = {
- .l_reg = 0x21004,
- .m_reg = 0x21008,
- .n_reg = 0x2100c,
- .config_reg = 0x21010,
- .mode_reg = 0x21000,
- .status_reg = 0x2101c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll0",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll0_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gpll0_vote",
- .parent_names = (const char *[]){ "gpll0" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll gpll1 = {
- .l_reg = 0x20004,
- .m_reg = 0x20008,
- .n_reg = 0x2000c,
- .config_reg = 0x20010,
- .mode_reg = 0x20000,
- .status_reg = 0x2001c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll1",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll1_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(1),
- .hw.init = &(struct clk_init_data){
- .name = "gpll1_vote",
- .parent_names = (const char *[]){ "gpll1" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll gpll2 = {
- .l_reg = 0x4a004,
- .m_reg = 0x4a008,
- .n_reg = 0x4a00c,
- .config_reg = 0x4a010,
- .mode_reg = 0x4a000,
- .status_reg = 0x4a01c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gpll2",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap gpll2_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(2),
- .hw.init = &(struct clk_init_data){
- .name = "gpll2_vote",
- .parent_names = (const char *[]){ "gpll2" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
-};
-
-static struct clk_pll bimc_pll = {
- .l_reg = 0x23004,
- .m_reg = 0x23008,
- .n_reg = 0x2300c,
- .config_reg = 0x23010,
- .mode_reg = 0x23000,
- .status_reg = 0x2301c,
- .status_bit = 17,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "bimc_pll",
- .parent_names = (const char *[]){ "xo" },
- .num_parents = 1,
- .ops = &clk_pll_ops,
- },
-};
-
-static struct clk_regmap bimc_pll_vote = {
- .enable_reg = 0x45000,
- .enable_mask = BIT(3),
- .hw.init = &(struct clk_init_data){
- .name = "bimc_pll_vote",
- .parent_names = (const char *[]){ "bimc_pll" },
- .num_parents = 1,
- .ops = &clk_pll_vote_ops,
- },
+static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep[] = {
+ { .fw_name = "xo", .name = "xo_board" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
};
static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
@@ -370,8 +386,8 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcnoc_bfdcd_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
},
};
@@ -382,8 +398,8 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "system_noc_bfdcd_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
},
};
@@ -402,8 +418,8 @@ static struct clk_rcg2 camss_ahb_clk_src = {
.freq_tbl = ftbl_gcc_camss_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -423,8 +439,8 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.freq_tbl = ftbl_apss_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_ahb_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -442,8 +458,8 @@ static struct clk_rcg2 csi0_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -455,8 +471,8 @@ static struct clk_rcg2 csi1_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -483,8 +499,8 @@ static struct clk_rcg2 gfx3d_clk_src = {
.freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
- .parent_names = gcc_xo_gpll0a_gpll1_gpll2a,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0a_gpll1_gpll2a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_gpll1_gpll2a),
.ops = &clk_rcg2_ops,
},
};
@@ -510,8 +526,8 @@ static struct clk_rcg2 vfe0_clk_src = {
.freq_tbl = ftbl_gcc_camss_vfe0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll2,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -529,8 +545,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -558,8 +574,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -571,8 +587,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -585,8 +601,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -598,8 +614,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -612,8 +628,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -625,8 +641,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -639,8 +655,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -652,8 +668,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -666,8 +682,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -679,8 +695,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -693,8 +709,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -726,8 +742,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -740,8 +756,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -759,8 +775,8 @@ static struct clk_rcg2 cci_clk_src = {
.freq_tbl = ftbl_gcc_camss_cci_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
- .parent_names = gcc_xo_gpll0a,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a),
.ops = &clk_rcg2_ops,
},
};
@@ -792,8 +808,8 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.freq_tbl = ftbl_gcc_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -806,8 +822,8 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.freq_tbl = ftbl_gcc_camss_gp0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -826,8 +842,8 @@ static struct clk_rcg2 jpeg0_clk_src = {
.freq_tbl = ftbl_gcc_camss_jpeg0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -847,8 +863,8 @@ static struct clk_rcg2 mclk0_clk_src = {
.freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -861,8 +877,8 @@ static struct clk_rcg2 mclk1_clk_src = {
.freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -880,8 +896,8 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a),
.ops = &clk_rcg2_ops,
},
};
@@ -893,8 +909,8 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a),
.ops = &clk_rcg2_ops,
},
};
@@ -913,8 +929,8 @@ static struct clk_rcg2 cpp_clk_src = {
.freq_tbl = ftbl_gcc_camss_cpp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
- .parent_names = gcc_xo_gpll0_gpll2,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -934,8 +950,8 @@ static struct clk_rcg2 crypto_clk_src = {
.freq_tbl = ftbl_gcc_crypto_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "crypto_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -975,8 +991,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -989,8 +1005,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1003,8 +1019,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gcc_gp1_3_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1a_sleep,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,8 +1031,8 @@ static struct clk_rcg2 byte0_clk_src = {
.parent_map = gcc_xo_gpll0a_dsibyte_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
- .parent_names = gcc_xo_gpll0a_dsibyte,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0a_dsibyte,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1034,8 +1050,8 @@ static struct clk_rcg2 esc0_clk_src = {
.freq_tbl = ftbl_gcc_mdss_esc0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
- .parent_names = gcc_xo_dsibyte,
- .num_parents = 2,
+ .parent_data = gcc_xo_dsibyte,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -1059,8 +1075,8 @@ static struct clk_rcg2 mdp_clk_src = {
.freq_tbl = ftbl_gcc_mdss_mdp_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
- .parent_names = gcc_xo_gpll0_dsiphy,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_dsiphy,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_dsiphy),
.ops = &clk_rcg2_ops,
},
};
@@ -1072,8 +1088,8 @@ static struct clk_rcg2 pclk0_clk_src = {
.parent_map = gcc_xo_gpll0a_dsiphy_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
- .parent_names = gcc_xo_gpll0a_dsiphy,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0a_dsiphy,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1091,8 +1107,8 @@ static struct clk_rcg2 vsync_clk_src = {
.freq_tbl = ftbl_gcc_mdss_vsync_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
- .parent_names = gcc_xo_gpll0a,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0a,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a),
.ops = &clk_rcg2_ops,
},
};
@@ -1109,8 +1125,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_gcc_pdm2_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1134,8 +1150,8 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc1_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1159,8 +1175,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_gcc_sdcc2_apps_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1179,8 +1195,8 @@ static struct clk_rcg2 apss_tcu_clk_src = {
.freq_tbl = ftbl_gcc_apss_tcu_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_tcu_clk_src",
- .parent_names = gcc_xo_gpll0a_gpll1_gpll2,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0a_gpll1_gpll2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_gpll1_gpll2),
.ops = &clk_rcg2_ops,
},
};
@@ -1202,8 +1218,8 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
.freq_tbl = ftbl_gcc_bimc_gpu_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_gpu_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_rcg2_ops,
},
@@ -1221,8 +1237,8 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.freq_tbl = ftbl_gcc_usb_hs_system_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1247,8 +1263,8 @@ static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_ahbfabric_clk_src",
- .parent_names = gcc_xo_gpll0_gpll1_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll0_gpll1_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1260,8 +1276,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
- .parent_names = (const char *[]){
- "ultaudio_ahbfabric_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1277,8 +1293,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
- .parent_names = (const char *[]){
- "ultaudio_ahbfabric_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1326,8 +1342,8 @@ static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_pri_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_epi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_epi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1339,8 +1355,8 @@ static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_pri_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1357,8 +1373,8 @@ static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_sec_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1370,8 +1386,8 @@ static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_sec_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1388,8 +1404,8 @@ static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_aux_i2s_clk_src",
- .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
- .num_parents = 5,
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1401,8 +1417,8 @@ static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_aux_i2s_clk",
- .parent_names = (const char *[]){
- "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1423,8 +1439,8 @@ static struct clk_rcg2 ultaudio_xo_clk_src = {
.freq_tbl = ftbl_gcc_ultaudio_xo_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_xo_clk_src",
- .parent_names = gcc_xo_sleep,
- .num_parents = 2,
+ .parent_data = gcc_xo_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1436,8 +1452,8 @@ static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_avsync_xo_clk",
- .parent_names = (const char *[]){
- "ultaudio_xo_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1453,8 +1469,8 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_stc_xo_clk",
- .parent_names = (const char *[]){
- "ultaudio_xo_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1479,8 +1495,8 @@ static struct clk_rcg2 codec_digcodec_clk_src = {
.freq_tbl = ftbl_codec_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "codec_digcodec_clk_src",
- .parent_names = gcc_xo_gpll1_emclk_sleep,
- .num_parents = 4,
+ .parent_data = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_emclk_sleep),
.ops = &clk_rcg2_ops,
},
};
@@ -1492,8 +1508,8 @@ static struct clk_branch gcc_codec_digcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_codec_digcodec_clk",
- .parent_names = (const char *[]){
- "codec_digcodec_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &codec_digcodec_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1509,8 +1525,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_mport_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1525,8 +1541,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_sway_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1549,8 +1565,8 @@ static struct clk_rcg2 vcodec0_clk_src = {
.freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "vcodec0_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1563,8 +1579,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1579,8 +1595,8 @@ static struct clk_branch gcc_blsp1_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1596,8 +1612,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1613,8 +1629,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1630,8 +1646,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1647,8 +1663,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1664,8 +1680,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1681,8 +1697,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1698,8 +1714,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1715,8 +1731,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1732,8 +1748,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1749,8 +1765,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1766,8 +1782,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1783,8 +1799,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1800,8 +1816,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1817,8 +1833,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1835,8 +1851,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1851,8 +1867,8 @@ static struct clk_branch gcc_camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1868,8 +1884,8 @@ static struct clk_branch gcc_camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_clk",
- .parent_names = (const char *[]){
- "cci_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1885,8 +1901,8 @@ static struct clk_branch gcc_camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1902,8 +1918,8 @@ static struct clk_branch gcc_camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1919,8 +1935,8 @@ static struct clk_branch gcc_camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phy_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1936,8 +1952,8 @@ static struct clk_branch gcc_camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0pix_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1953,8 +1969,8 @@ static struct clk_branch gcc_camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0rdi_clk",
- .parent_names = (const char *[]){
- "csi0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1970,8 +1986,8 @@ static struct clk_branch gcc_camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1987,8 +2003,8 @@ static struct clk_branch gcc_camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2004,8 +2020,8 @@ static struct clk_branch gcc_camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phy_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2021,8 +2037,8 @@ static struct clk_branch gcc_camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1pix_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2038,8 +2054,8 @@ static struct clk_branch gcc_camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1rdi_clk",
- .parent_names = (const char *[]){
- "csi1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2055,8 +2071,8 @@ static struct clk_branch gcc_camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2072,8 +2088,8 @@ static struct clk_branch gcc_camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp0_clk",
- .parent_names = (const char *[]){
- "camss_gp0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2089,8 +2105,8 @@ static struct clk_branch gcc_camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp1_clk",
- .parent_names = (const char *[]){
- "camss_gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2106,8 +2122,8 @@ static struct clk_branch gcc_camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ispif_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2123,8 +2139,8 @@ static struct clk_branch gcc_camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg0_clk",
- .parent_names = (const char *[]){
- "jpeg0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2140,8 +2156,8 @@ static struct clk_branch gcc_camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2157,8 +2173,8 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2174,8 +2190,8 @@ static struct clk_branch gcc_camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk0_clk",
- .parent_names = (const char *[]){
- "mclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2191,8 +2207,8 @@ static struct clk_branch gcc_camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk1_clk",
- .parent_names = (const char *[]){
- "mclk1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2208,8 +2224,8 @@ static struct clk_branch gcc_camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_micro_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2225,8 +2241,8 @@ static struct clk_branch gcc_camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phytimer_clk",
- .parent_names = (const char *[]){
- "csi0phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2242,8 +2258,8 @@ static struct clk_branch gcc_camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phytimer_clk",
- .parent_names = (const char *[]){
- "csi1phytimer_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2259,8 +2275,8 @@ static struct clk_branch gcc_camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2276,8 +2292,8 @@ static struct clk_branch gcc_camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_top_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2293,8 +2309,8 @@ static struct clk_branch gcc_camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2310,8 +2326,8 @@ static struct clk_branch gcc_camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_clk",
- .parent_names = (const char *[]){
- "cpp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2327,8 +2343,8 @@ static struct clk_branch gcc_camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe0_clk",
- .parent_names = (const char *[]){
- "vfe0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2344,8 +2360,8 @@ static struct clk_branch gcc_camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_ahb_clk",
- .parent_names = (const char *[]){
- "camss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2361,8 +2377,8 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2379,8 +2395,8 @@ static struct clk_branch gcc_crypto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2397,8 +2413,8 @@ static struct clk_branch gcc_crypto_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_axi_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2415,8 +2431,8 @@ static struct clk_branch gcc_crypto_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_clk",
- .parent_names = (const char *[]){
- "crypto_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2432,8 +2448,8 @@ static struct clk_branch gcc_oxili_gmem_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gmem_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2449,8 +2465,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2466,8 +2482,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2483,8 +2499,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2500,8 +2516,8 @@ static struct clk_branch gcc_mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2517,8 +2533,8 @@ static struct clk_branch gcc_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2534,8 +2550,8 @@ static struct clk_branch gcc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte0_clk",
- .parent_names = (const char *[]){
- "byte0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2551,8 +2567,8 @@ static struct clk_branch gcc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc0_clk",
- .parent_names = (const char *[]){
- "esc0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2568,8 +2584,8 @@ static struct clk_branch gcc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_mdp_clk",
- .parent_names = (const char *[]){
- "mdp_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2585,8 +2601,8 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk0_clk",
- .parent_names = (const char *[]){
- "pclk0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2602,8 +2618,8 @@ static struct clk_branch gcc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_vsync_clk",
- .parent_names = (const char *[]){
- "vsync_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2619,25 +2635,8 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_cfg_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
- .halt_reg = 0x49004,
- .clkr = {
- .enable_reg = 0x49004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_mss_q6_bimc_axi_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2653,8 +2652,8 @@ static struct clk_branch gcc_oxili_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2670,8 +2669,8 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gfx3d_clk",
- .parent_names = (const char *[]){
- "gfx3d_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2687,8 +2686,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2704,8 +2703,8 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2722,8 +2721,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2738,8 +2737,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2755,8 +2754,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]){
- "sdcc1_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2772,8 +2771,8 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2789,8 +2788,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2805,13 +2804,30 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
.parent_map = gcc_xo_gpll0_bimc_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_ddr_clk_src",
- .parent_names = gcc_xo_gpll0_bimc,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_bimc,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE,
},
};
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_apss_tcu_clk = {
.halt_reg = 0x12018,
.clkr = {
@@ -2819,8 +2835,8 @@ static struct clk_branch gcc_apss_tcu_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_tcu_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2835,8 +2851,8 @@ static struct clk_branch gcc_gfx_tcu_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tcu_clk",
- .parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2851,8 +2867,8 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_gtcu_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2868,8 +2884,8 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .parent_names = (const char *[]){
- "bimc_gpu_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2885,8 +2901,8 @@ static struct clk_branch gcc_bimc_gpu_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gpu_clk",
- .parent_names = (const char *[]){
- "bimc_gpu_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2902,8 +2918,8 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2919,8 +2935,8 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2936,8 +2952,8 @@ static struct clk_branch gcc_smmu_cfg_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_cfg_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2953,8 +2969,8 @@ static struct clk_branch gcc_venus_tbu_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2970,8 +2986,8 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2987,8 +3003,8 @@ static struct clk_branch gcc_usb2a_phy_sleep_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2a_phy_sleep_clk",
- .parent_names = (const char *[]){
- "sleep_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep_clk", .name = "sleep_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3004,8 +3020,8 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3021,8 +3037,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]){
- "usb_hs_system_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3038,8 +3054,8 @@ static struct clk_branch gcc_venus0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_ahb_clk",
- .parent_names = (const char *[]){
- "pcnoc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3055,8 +3071,8 @@ static struct clk_branch gcc_venus0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
- .parent_names = (const char *[]){
- "system_noc_bfdcd_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &system_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3072,8 +3088,8 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_vcodec0_clk",
- .parent_names = (const char *[]){
- "vcodec0_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
index 8e2d9fb98ad5..af608f165896 100644
--- a/drivers/clk/qcom/gcc-msm8939.c
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -614,7 +614,7 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pcnoc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -626,7 +626,7 @@ static struct clk_rcg2 system_noc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "system_noc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll6a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -638,7 +638,7 @@ static struct clk_rcg2 bimc_ddr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_ddr_clk_src",
.parent_data = gcc_xo_gpll0_bimc_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_bimc_parent_data),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE,
},
@@ -651,7 +651,7 @@ static struct clk_rcg2 system_mm_noc_bfdcd_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "system_mm_noc_bfdcd_clk_src",
.parent_data = gcc_xo_gpll0_gpll6a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -671,7 +671,7 @@ static struct clk_rcg2 camss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_ahb_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -692,7 +692,7 @@ static struct clk_rcg2 apss_ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_ahb_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -711,7 +711,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -724,7 +724,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -753,7 +753,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -782,7 +782,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_gpll4_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_gpll4_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -801,7 +801,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -826,7 +826,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -839,7 +839,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -853,7 +853,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -866,7 +866,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -880,7 +880,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -893,7 +893,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -907,7 +907,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -920,7 +920,7 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -934,7 +934,7 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -947,7 +947,7 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -961,7 +961,7 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -994,7 +994,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1008,7 +1008,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1028,7 +1028,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = gcc_xo_gpll0a_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1048,7 +1048,7 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1062,7 +1062,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1082,7 +1082,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1102,7 +1102,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1116,7 +1116,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1135,7 +1135,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1148,7 +1148,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1171,7 +1171,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = gcc_xo_gpll0_gpll2_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll2_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1193,7 +1193,7 @@ static struct clk_rcg2 crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "crypto_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1212,7 +1212,7 @@ static struct clk_rcg2 gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1226,7 +1226,7 @@ static struct clk_rcg2 gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1240,7 +1240,7 @@ static struct clk_rcg2 gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
.parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1a_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1252,7 +1252,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte_parent_data),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1265,7 +1265,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsibyte_parent_data),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1284,7 +1284,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = gcc_xo_dsibyte_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1297,7 +1297,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = gcc_xo_dsibyte_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_dsibyte_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1325,7 +1325,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1338,7 +1338,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy_parent_data),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1352,7 +1352,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_dsiphy_parent_data),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1371,7 +1371,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = gcc_xo_gpll0a_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1390,7 +1390,7 @@ static struct clk_rcg2 pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1416,7 +1416,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1430,7 +1430,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1450,7 +1450,7 @@ static struct clk_rcg2 apss_tcu_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "apss_tcu_clk_src",
.parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1473,7 +1473,7 @@ static struct clk_rcg2 bimc_gpu_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "bimc_gpu_clk_src",
.parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data),
.flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_rcg2_ops,
},
@@ -1494,7 +1494,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1512,7 +1512,7 @@ static struct clk_rcg2 usb_fs_system_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_fs_system_clk_src",
.parent_data = gcc_xo_gpll6_gpll0_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll6_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1530,7 +1530,7 @@ static struct clk_rcg2 usb_fs_ic_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb_fs_ic_clk_src",
.parent_data = gcc_xo_gpll6_gpll0a_parent_data,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll6_gpll0a_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1556,7 +1556,7 @@ static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_ahbfabric_clk_src",
.parent_data = gcc_xo_gpll0_gpll1_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll1_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1568,8 +1568,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1585,8 +1585,8 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_ahbfabric_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1635,7 +1635,7 @@ static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_pri_i2s_clk_src",
.parent_data = gcc_xo_gpll1_epi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_epi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1647,8 +1647,8 @@ static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_pri_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1666,7 +1666,7 @@ static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_sec_i2s_clk_src",
.parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1678,8 +1678,8 @@ static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_sec_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1697,7 +1697,7 @@ static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_lpaif_aux_i2s_clk_src",
.parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_esi2s_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1709,8 +1709,8 @@ static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_lpaif_aux_i2s_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1732,7 +1732,7 @@ static struct clk_rcg2 ultaudio_xo_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ultaudio_xo_clk_src",
.parent_data = gcc_xo_sleep_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1744,8 +1744,8 @@ static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_avsync_xo_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1761,8 +1761,8 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_stc_xo_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &ultaudio_xo_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &ultaudio_xo_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1788,7 +1788,7 @@ static struct clk_rcg2 codec_digcodec_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "codec_digcodec_clk_src",
.parent_data = gcc_xo_gpll1_emclk_sleep_parent_data,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll1_emclk_sleep_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1800,8 +1800,8 @@ static struct clk_branch gcc_codec_digcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_codec_digcodec_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &codec_digcodec_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &codec_digcodec_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1817,8 +1817,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_mport_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1833,8 +1833,8 @@ static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ultaudio_pcnoc_sway_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1858,7 +1858,7 @@ static struct clk_rcg2 vcodec0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vcodec0_clk_src",
.parent_data = gcc_xo_gpll0_parent_data,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_parent_data),
.ops = &clk_rcg2_ops,
},
};
@@ -1871,8 +1871,8 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1899,8 +1899,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1916,8 +1916,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1933,8 +1933,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1950,8 +1950,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1967,8 +1967,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1984,8 +1984,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2001,8 +2001,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,8 +2018,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2035,8 +2035,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2052,8 +2052,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2069,8 +2069,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2086,8 +2086,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2103,8 +2103,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_uart1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2120,8 +2120,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &blsp1_uart2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2138,8 +2138,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -2154,8 +2154,8 @@ static struct clk_branch gcc_camss_cci_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2171,8 +2171,8 @@ static struct clk_branch gcc_camss_cci_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cci_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cci_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &cci_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2188,8 +2188,8 @@ static struct clk_branch gcc_camss_csi0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2205,8 +2205,8 @@ static struct clk_branch gcc_camss_csi0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2222,8 +2222,8 @@ static struct clk_branch gcc_camss_csi0phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phy_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2239,8 +2239,8 @@ static struct clk_branch gcc_camss_csi0pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0pix_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2256,8 +2256,8 @@ static struct clk_branch gcc_camss_csi0rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0rdi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2273,8 +2273,8 @@ static struct clk_branch gcc_camss_csi1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2290,8 +2290,8 @@ static struct clk_branch gcc_camss_csi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2307,8 +2307,8 @@ static struct clk_branch gcc_camss_csi1phy_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phy_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2324,8 +2324,8 @@ static struct clk_branch gcc_camss_csi1pix_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1pix_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2341,8 +2341,8 @@ static struct clk_branch gcc_camss_csi1rdi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1rdi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2358,8 +2358,8 @@ static struct clk_branch gcc_camss_csi_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi_vfe0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vfe0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2375,8 +2375,8 @@ static struct clk_branch gcc_camss_gp0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_gp0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2392,8 +2392,8 @@ static struct clk_branch gcc_camss_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2409,8 +2409,8 @@ static struct clk_branch gcc_camss_ispif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ispif_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2426,8 +2426,8 @@ static struct clk_branch gcc_camss_jpeg0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &jpeg0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &jpeg0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2443,8 +2443,8 @@ static struct clk_branch gcc_camss_jpeg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2460,8 +2460,8 @@ static struct clk_branch gcc_camss_jpeg_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_jpeg_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2477,8 +2477,8 @@ static struct clk_branch gcc_camss_mclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2494,8 +2494,8 @@ static struct clk_branch gcc_camss_mclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_mclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2511,8 +2511,8 @@ static struct clk_branch gcc_camss_micro_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_micro_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2528,8 +2528,8 @@ static struct clk_branch gcc_camss_csi0phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi0phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi0phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2545,8 +2545,8 @@ static struct clk_branch gcc_camss_csi1phytimer_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_csi1phytimer_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &csi1phytimer_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1phytimer_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2562,8 +2562,8 @@ static struct clk_branch gcc_camss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2579,8 +2579,8 @@ static struct clk_branch gcc_camss_top_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_top_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2596,8 +2596,8 @@ static struct clk_branch gcc_camss_cpp_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2613,8 +2613,8 @@ static struct clk_branch gcc_camss_cpp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_cpp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &cpp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &cpp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2630,8 +2630,8 @@ static struct clk_branch gcc_camss_vfe0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vfe0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2647,8 +2647,8 @@ static struct clk_branch gcc_camss_vfe_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &camss_ahb_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &camss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2664,8 +2664,8 @@ static struct clk_branch gcc_camss_vfe_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camss_vfe_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2682,8 +2682,8 @@ static struct clk_branch gcc_crypto_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2700,8 +2700,8 @@ static struct clk_branch gcc_crypto_axi_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2718,8 +2718,8 @@ static struct clk_branch gcc_crypto_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_crypto_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &crypto_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &crypto_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2735,8 +2735,8 @@ static struct clk_branch gcc_oxili_gmem_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gmem_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gfx3d_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2752,8 +2752,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2769,8 +2769,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2786,8 +2786,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gp3_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2803,8 +2803,8 @@ static struct clk_branch gcc_mdss_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2820,8 +2820,8 @@ static struct clk_branch gcc_mdss_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2837,8 +2837,8 @@ static struct clk_branch gcc_mdss_byte0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &byte0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &byte0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2854,8 +2854,8 @@ static struct clk_branch gcc_mdss_byte1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_byte1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &byte1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2871,8 +2871,8 @@ static struct clk_branch gcc_mdss_esc0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &esc0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &esc0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2888,8 +2888,8 @@ static struct clk_branch gcc_mdss_esc1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_esc1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &esc1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2905,8 +2905,8 @@ static struct clk_branch gcc_mdss_mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_mdp_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &mdp_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2922,8 +2922,8 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pclk0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2939,8 +2939,8 @@ static struct clk_branch gcc_mdss_pclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_pclk1_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pclk1_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2956,8 +2956,8 @@ static struct clk_branch gcc_mdss_vsync_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdss_vsync_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vsync_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vsync_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2973,8 +2973,8 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_cfg_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2990,8 +2990,8 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3007,8 +3007,8 @@ static struct clk_branch gcc_oxili_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3024,8 +3024,8 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_oxili_gfx3d_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &gfx3d_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3041,8 +3041,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pdm2_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3058,8 +3058,8 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3076,8 +3076,8 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3092,8 +3092,8 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3109,8 +3109,8 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &sdcc1_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3126,8 +3126,8 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3143,8 +3143,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &sdcc2_apps_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3161,8 +3161,8 @@ static struct clk_branch gcc_apss_tcu_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_apss_tcu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3178,8 +3178,8 @@ static struct clk_branch gcc_gfx_tcu_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tcu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3195,8 +3195,8 @@ static struct clk_branch gcc_gfx_tbu_clk = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gcc_gfx_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_ddr_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_ddr_clk_src.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -3212,8 +3212,8 @@ static struct clk_branch gcc_mdp_tbu_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3230,8 +3230,8 @@ static struct clk_branch gcc_venus_tbu_clk = {
.enable_mask = BIT(5),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3248,8 +3248,8 @@ static struct clk_branch gcc_vfe_tbu_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gcc_vfe_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3266,8 +3266,8 @@ static struct clk_branch gcc_jpeg_tbu_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_jpeg_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3284,8 +3284,8 @@ static struct clk_branch gcc_smmu_cfg_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "gcc_smmu_cfg_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3302,8 +3302,8 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_gtcu_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3320,8 +3320,8 @@ static struct clk_branch gcc_cpp_tbu_clk = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "gcc_cpp_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3338,8 +3338,8 @@ static struct clk_branch gcc_mdp_rt_tbu_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_mdp_rt_tbu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3355,8 +3355,8 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_gpu_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3372,8 +3372,8 @@ static struct clk_branch gcc_bimc_gpu_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gpu_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &bimc_gpu_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &bimc_gpu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3401,8 +3401,8 @@ static struct clk_branch gcc_usb_fs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3418,8 +3418,8 @@ static struct clk_branch gcc_usb_fs_ic_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_ic_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_fs_ic_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs_ic_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3435,8 +3435,8 @@ static struct clk_branch gcc_usb_fs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_fs_system_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_fs_system_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3452,8 +3452,8 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3469,8 +3469,8 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &usb_hs_system_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs_system_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3486,8 +3486,8 @@ static struct clk_branch gcc_venus0_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_ahb_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &pcnoc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3503,8 +3503,8 @@ static struct clk_branch gcc_venus0_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_axi_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &system_mm_noc_bfdcd_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &system_mm_noc_bfdcd_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3520,8 +3520,8 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3537,8 +3537,8 @@ static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_core0_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3554,8 +3554,8 @@ static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_venus0_core1_vcodec0_clk",
- .parent_data = &(const struct clk_parent_data){
- .hw = &vcodec0_clk_src.clkr.hw,
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index a6e13b91e4c8..9dd4e7ffa1f8 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -35,7 +35,9 @@ static struct clk_pll pll3 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll3",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -46,7 +48,9 @@ static struct clk_regmap pll4_vote = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "pll4_vote",
- .parent_names = (const char *[]){ "pll4" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pll4", .name = "pll4",
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -62,7 +66,9 @@ static struct clk_pll pll8 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll8",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -73,7 +79,9 @@ static struct clk_regmap pll8_vote = {
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
.name = "pll8_vote",
- .parent_names = (const char *[]){ "pll8" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll8.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -96,7 +104,9 @@ static struct hfpll_data hfpll0_data = {
static struct clk_hfpll hfpll0 = {
.d = &hfpll0_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll0",
.ops = &clk_ops_hfpll,
@@ -136,7 +146,9 @@ static struct hfpll_data hfpll1_data = {
static struct clk_hfpll hfpll1 = {
.d = &hfpll1_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll1",
.ops = &clk_ops_hfpll,
@@ -162,7 +174,9 @@ static struct hfpll_data hfpll2_data = {
static struct clk_hfpll hfpll2 = {
.d = &hfpll2_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll2",
.ops = &clk_ops_hfpll,
@@ -188,7 +202,9 @@ static struct hfpll_data hfpll3_data = {
static struct clk_hfpll hfpll3 = {
.d = &hfpll3_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll3",
.ops = &clk_ops_hfpll,
@@ -228,7 +244,9 @@ static struct hfpll_data hfpll_l2_data = {
static struct clk_hfpll hfpll_l2 = {
.d = &hfpll_l2_data,
.clkr.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.name = "hfpll_l2",
.ops = &clk_ops_hfpll,
@@ -247,7 +265,9 @@ static struct clk_pll pll14 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll14",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -258,7 +278,9 @@ static struct clk_regmap pll14_vote = {
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
.name = "pll14_vote",
- .parent_names = (const char *[]){ "pll14" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll14.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_pll_vote_ops,
},
@@ -276,9 +298,9 @@ static const struct parent_map gcc_pxo_pll8_map[] = {
{ P_PLL8, 3 }
};
-static const char * const gcc_pxo_pll8[] = {
- "pxo",
- "pll8_vote",
+static const struct clk_parent_data gcc_pxo_pll8[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
};
static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
@@ -287,10 +309,10 @@ static const struct parent_map gcc_pxo_pll8_cxo_map[] = {
{ P_CXO, 5 }
};
-static const char * const gcc_pxo_pll8_cxo[] = {
- "pxo",
- "pll8_vote",
- "cxo",
+static const struct clk_parent_data gcc_pxo_pll8_cxo[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .fw_name = "cxo", .name = "cxo_board" },
};
static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
@@ -299,10 +321,10 @@ static const struct parent_map gcc_pxo_pll8_pll3_map[] = {
{ P_PLL3, 6 }
};
-static const char * const gcc_pxo_pll8_pll3[] = {
- "pxo",
- "pll8_vote",
- "pll3",
+static const struct clk_parent_data gcc_pxo_pll8_pll3[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .hw = &pll8_vote.hw },
+ { .hw = &pll3.clkr.hw },
};
static struct freq_tbl clk_tbl_gsbi_uart[] = {
@@ -348,8 +370,8 @@ static struct clk_rcg gsbi1_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -364,8 +386,8 @@ static struct clk_branch gsbi1_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_uart_clk",
- .parent_names = (const char *[]){
- "gsbi1_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -399,8 +421,8 @@ static struct clk_rcg gsbi2_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -415,8 +437,8 @@ static struct clk_branch gsbi2_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_uart_clk",
- .parent_names = (const char *[]){
- "gsbi2_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -450,8 +472,8 @@ static struct clk_rcg gsbi3_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -466,8 +488,8 @@ static struct clk_branch gsbi3_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_uart_clk",
- .parent_names = (const char *[]){
- "gsbi3_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -501,8 +523,8 @@ static struct clk_rcg gsbi4_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -517,8 +539,8 @@ static struct clk_branch gsbi4_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_uart_clk",
- .parent_names = (const char *[]){
- "gsbi4_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -552,8 +574,8 @@ static struct clk_rcg gsbi5_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -568,8 +590,8 @@ static struct clk_branch gsbi5_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_uart_clk",
- .parent_names = (const char *[]){
- "gsbi5_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -603,8 +625,8 @@ static struct clk_rcg gsbi6_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -619,8 +641,8 @@ static struct clk_branch gsbi6_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_uart_clk",
- .parent_names = (const char *[]){
- "gsbi6_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -654,8 +676,8 @@ static struct clk_rcg gsbi7_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -670,8 +692,8 @@ static struct clk_branch gsbi7_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_uart_clk",
- .parent_names = (const char *[]){
- "gsbi7_uart_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_uart_src.clkr.hw
},
.num_parents = 1,
.ops = &clk_branch_ops,
@@ -705,8 +727,8 @@ static struct clk_rcg gsbi8_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -721,7 +743,9 @@ static struct clk_branch gsbi8_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_uart_clk",
- .parent_names = (const char *[]){ "gsbi8_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -754,8 +778,8 @@ static struct clk_rcg gsbi9_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -770,7 +794,9 @@ static struct clk_branch gsbi9_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_uart_clk",
- .parent_names = (const char *[]){ "gsbi9_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -803,8 +829,8 @@ static struct clk_rcg gsbi10_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -819,7 +845,9 @@ static struct clk_branch gsbi10_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_uart_clk",
- .parent_names = (const char *[]){ "gsbi10_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -852,8 +880,8 @@ static struct clk_rcg gsbi11_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -868,7 +896,9 @@ static struct clk_branch gsbi11_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_uart_clk",
- .parent_names = (const char *[]){ "gsbi11_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -901,8 +931,8 @@ static struct clk_rcg gsbi12_uart_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -917,7 +947,9 @@ static struct clk_branch gsbi12_uart_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_uart_clk",
- .parent_names = (const char *[]){ "gsbi12_uart_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_uart_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -963,8 +995,8 @@ static struct clk_rcg gsbi1_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -979,7 +1011,9 @@ static struct clk_branch gsbi1_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi1_qup_clk",
- .parent_names = (const char *[]){ "gsbi1_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi1_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1012,8 +1046,8 @@ static struct clk_rcg gsbi2_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1028,7 +1062,9 @@ static struct clk_branch gsbi2_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi2_qup_clk",
- .parent_names = (const char *[]){ "gsbi2_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi2_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1061,8 +1097,8 @@ static struct clk_rcg gsbi3_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1077,7 +1113,9 @@ static struct clk_branch gsbi3_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi3_qup_clk",
- .parent_names = (const char *[]){ "gsbi3_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi3_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1110,8 +1148,8 @@ static struct clk_rcg gsbi4_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1126,7 +1164,9 @@ static struct clk_branch gsbi4_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi4_qup_clk",
- .parent_names = (const char *[]){ "gsbi4_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi4_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1159,8 +1199,8 @@ static struct clk_rcg gsbi5_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1175,7 +1215,9 @@ static struct clk_branch gsbi5_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi5_qup_clk",
- .parent_names = (const char *[]){ "gsbi5_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi5_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1208,8 +1250,8 @@ static struct clk_rcg gsbi6_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1224,7 +1266,9 @@ static struct clk_branch gsbi6_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi6_qup_clk",
- .parent_names = (const char *[]){ "gsbi6_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi6_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1257,8 +1301,8 @@ static struct clk_rcg gsbi7_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1273,7 +1317,9 @@ static struct clk_branch gsbi7_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi7_qup_clk",
- .parent_names = (const char *[]){ "gsbi7_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi7_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1306,8 +1352,8 @@ static struct clk_rcg gsbi8_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1322,7 +1368,9 @@ static struct clk_branch gsbi8_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi8_qup_clk",
- .parent_names = (const char *[]){ "gsbi8_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi8_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1355,8 +1403,8 @@ static struct clk_rcg gsbi9_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1371,7 +1419,9 @@ static struct clk_branch gsbi9_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi9_qup_clk",
- .parent_names = (const char *[]){ "gsbi9_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi9_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1404,8 +1454,8 @@ static struct clk_rcg gsbi10_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1420,7 +1470,9 @@ static struct clk_branch gsbi10_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi10_qup_clk",
- .parent_names = (const char *[]){ "gsbi10_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi10_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1453,8 +1505,8 @@ static struct clk_rcg gsbi11_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1469,7 +1521,9 @@ static struct clk_branch gsbi11_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi11_qup_clk",
- .parent_names = (const char *[]){ "gsbi11_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi11_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1502,8 +1556,8 @@ static struct clk_rcg gsbi12_qup_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1518,7 +1572,9 @@ static struct clk_branch gsbi12_qup_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gsbi12_qup_clk",
- .parent_names = (const char *[]){ "gsbi12_qup_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gsbi12_qup_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1564,8 +1620,8 @@ static struct clk_rcg gp0_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp0_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_PARENT_GATE,
},
@@ -1580,7 +1636,9 @@ static struct clk_branch gp0_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp0_clk",
- .parent_names = (const char *[]){ "gp0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1613,8 +1671,8 @@ static struct clk_rcg gp1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp1_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1629,7 +1687,9 @@ static struct clk_branch gp1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp1_clk",
- .parent_names = (const char *[]){ "gp1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1662,8 +1722,8 @@ static struct clk_rcg gp2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "gp2_src",
- .parent_names = gcc_pxo_pll8_cxo,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_cxo,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_cxo),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -1678,7 +1738,9 @@ static struct clk_branch gp2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "gp2_clk",
- .parent_names = (const char *[]){ "gp2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gp2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1714,8 +1776,8 @@ static struct clk_rcg prng_src = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "prng_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
},
@@ -1730,7 +1792,9 @@ static struct clk_branch prng_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "prng_clk",
- .parent_names = (const char *[]){ "prng_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &prng_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -1776,8 +1840,8 @@ static struct clk_rcg sdc1_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc1_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1791,7 +1855,9 @@ static struct clk_branch sdc1_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc1_clk",
- .parent_names = (const char *[]){ "sdc1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1824,8 +1890,8 @@ static struct clk_rcg sdc2_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc2_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1839,7 +1905,9 @@ static struct clk_branch sdc2_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc2_clk",
- .parent_names = (const char *[]){ "sdc2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1872,8 +1940,8 @@ static struct clk_rcg sdc3_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc3_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1887,7 +1955,9 @@ static struct clk_branch sdc3_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc3_clk",
- .parent_names = (const char *[]){ "sdc3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1920,8 +1990,8 @@ static struct clk_rcg sdc4_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc4_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1935,7 +2005,9 @@ static struct clk_branch sdc4_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc4_clk",
- .parent_names = (const char *[]){ "sdc4_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc4_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1968,8 +2040,8 @@ static struct clk_rcg sdc5_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "sdc5_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
},
}
@@ -1983,7 +2055,9 @@ static struct clk_branch sdc5_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "sdc5_clk",
- .parent_names = (const char *[]){ "sdc5_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sdc5_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2021,8 +2095,8 @@ static struct clk_rcg tsif_ref_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2037,7 +2111,9 @@ static struct clk_branch tsif_ref_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk",
- .parent_names = (const char *[]){ "tsif_ref_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &tsif_ref_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2075,8 +2151,8 @@ static struct clk_rcg usb_hs1_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2091,7 +2167,9 @@ static struct clk_branch usb_hs1_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs1_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2124,8 +2202,8 @@ static struct clk_rcg usb_hs3_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2140,7 +2218,9 @@ static struct clk_branch usb_hs3_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs3_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs3_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2173,8 +2253,8 @@ static struct clk_rcg usb_hs4_xcvr_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_xcvr_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2189,7 +2269,9 @@ static struct clk_branch usb_hs4_xcvr_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_xcvr_clk",
- .parent_names = (const char *[]){ "usb_hs4_xcvr_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hs4_xcvr_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2222,16 +2304,14 @@ static struct clk_rcg usb_hsic_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
-
static struct clk_branch usb_hsic_xcvr_fs_clk = {
.halt_reg = 0x2fc8,
.halt_bit = 2,
@@ -2240,7 +2320,9 @@ static struct clk_branch usb_hsic_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_xcvr_fs_clk",
- .parent_names = usb_hsic_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2255,7 +2337,9 @@ static struct clk_branch usb_hsic_system_clk = {
.enable_reg = 0x292c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_hsic_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_hsic_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_hsic_system_clk",
.ops = &clk_branch_ops,
@@ -2271,7 +2355,9 @@ static struct clk_branch usb_hsic_hsic_clk = {
.enable_reg = 0x2b44,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pll14_vote" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pll14_vote.hw
+ },
.num_parents = 1,
.name = "usb_hsic_hsic_clk",
.ops = &clk_branch_ops,
@@ -2317,16 +2403,14 @@ static struct clk_rcg usb_fs1_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
-
static struct clk_branch usb_fs1_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 15,
@@ -2335,7 +2419,9 @@ static struct clk_branch usb_fs1_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_xcvr_fs_clk",
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2350,7 +2436,9 @@ static struct clk_branch usb_fs1_system_clk = {
.enable_reg = 0x296c,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
- .parent_names = usb_fs1_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs1_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.name = "usb_fs1_system_clk",
.ops = &clk_branch_ops,
@@ -2384,16 +2472,14 @@ static struct clk_rcg usb_fs2_xcvr_fs_src = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_src",
- .parent_names = gcc_pxo_pll8,
- .num_parents = 2,
+ .parent_data = gcc_pxo_pll8,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
}
};
-static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
-
static struct clk_branch usb_fs2_xcvr_fs_clk = {
.halt_reg = 0x2fcc,
.halt_bit = 12,
@@ -2402,7 +2488,9 @@ static struct clk_branch usb_fs2_xcvr_fs_clk = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_xcvr_fs_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2418,7 +2506,9 @@ static struct clk_branch usb_fs2_system_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_system_clk",
- .parent_names = usb_fs2_xcvr_fs_src_p,
+ .parent_hws = (const struct clk_hw*[]){
+ &usb_fs2_xcvr_fs_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2872,8 +2962,8 @@ static struct clk_rcg ce3_src = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
- .parent_names = gcc_pxo_pll8_pll3,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_pll3,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll3),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2888,7 +2978,9 @@ static struct clk_branch ce3_core_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
- .parent_names = (const char *[]){ "ce3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ce3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2904,7 +2996,9 @@ static struct clk_branch ce3_h_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_h_clk",
- .parent_names = (const char *[]){ "ce3_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ce3_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2934,8 +3028,8 @@ static struct clk_rcg sata_clk_src = {
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "sata_clk_src",
- .parent_names = gcc_pxo_pll8_pll3,
- .num_parents = 3,
+ .parent_data = gcc_pxo_pll8_pll3,
+ .num_parents = ARRAY_SIZE(gcc_pxo_pll8_pll3),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -2950,7 +3044,9 @@ static struct clk_branch sata_rxoob_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_rxoob_clk",
- .parent_names = (const char *[]){ "sata_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_clk_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2966,7 +3062,9 @@ static struct clk_branch sata_pmalive_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_pmalive_clk",
- .parent_names = (const char *[]){ "sata_clk_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &sata_clk_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2982,7 +3080,9 @@ static struct clk_branch sata_phy_ref_clk = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "sata_phy_ref_clk",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
index b6fa7b8e8006..7792b8f23704 100644
--- a/drivers/clk/qcom/gcc-qcm2290.c
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -54,33 +54,9 @@ static const struct pll_vco spark_vco[] = {
{ 750000000, 1500000000, 1 },
};
-static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_USER_CTL_U] = 0x1C,
- [PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_STATUS] = 0x24,
- },
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x1C,
- [PLL_OFF_STATUS] = 0x20,
- },
-};
-
static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(0),
@@ -106,7 +82,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
.post_div_table = post_div_table_gpll0_out_aux2,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_aux2",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -117,7 +93,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
static struct clk_alpha_pll gpll1 = {
.offset = 0x1000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(1),
@@ -147,7 +123,7 @@ static struct clk_alpha_pll gpll10 = {
.offset = 0xa000,
.vco_table = spark_vco,
.num_vco = ARRAY_SIZE(spark_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(10),
@@ -179,7 +155,7 @@ static struct clk_alpha_pll gpll11 = {
.offset = 0xb000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -197,7 +173,7 @@ static struct clk_alpha_pll gpll11 = {
static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(3),
@@ -223,7 +199,7 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
.post_div_table = post_div_table_gpll3_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll3.clkr.hw },
@@ -234,7 +210,7 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
static struct clk_alpha_pll gpll4 = {
.offset = 0x4000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(4),
@@ -251,7 +227,7 @@ static struct clk_alpha_pll gpll4 = {
static struct clk_alpha_pll gpll5 = {
.offset = 0x5000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(5),
@@ -268,7 +244,7 @@ static struct clk_alpha_pll gpll5 = {
static struct clk_alpha_pll gpll6 = {
.offset = 0x6000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(6),
@@ -294,7 +270,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
.post_div_table = post_div_table_gpll6_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
@@ -305,7 +281,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
static struct clk_alpha_pll gpll7 = {
.offset = 0x7000,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(7),
@@ -340,7 +316,7 @@ static struct clk_alpha_pll gpll8 = {
.offset = 0x8000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -367,7 +343,7 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = {
.post_div_table = post_div_table_gpll8_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll8_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
@@ -393,7 +369,7 @@ static struct clk_alpha_pll gpll9 = {
.offset = 0x9000,
.vco_table = brammo_vco,
.num_vco = ARRAY_SIZE(brammo_vco),
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(9),
@@ -419,7 +395,7 @@ static struct clk_alpha_pll_postdiv gpll9_out_main = {
.post_div_table = post_div_table_gpll9_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
.width = 2,
- .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll9_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index c2ea09945c47..2d3980251e78 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -2224,7 +2224,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 7ff64d4d5920..8afb7575e712 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -3108,7 +3108,7 @@ static struct gdsc gcc_pcie_1_gdsc = {
.pd = {
.name = "gcc_pcie_1_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
@@ -3126,7 +3126,7 @@ static struct gdsc gcc_usb30_prim_gdsc = {
.pd = {
.name = "gcc_usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
@@ -3135,7 +3135,7 @@ static struct gdsc gcc_usb30_sec_gdsc = {
.pd = {
.name = "gcc_usb30_sec_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
.flags = VOTABLE,
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index a2f3ffcc5849..a18ed88f3b82 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -6768,6 +6768,10 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE,
};
+/*
+ * The Qualcomm PCIe driver does not yet implement suspend so to keep the
+ * PCIe power domains always-on for now.
+ */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
@@ -6776,7 +6780,7 @@ static struct gdsc pcie_2a_gdsc = {
.name = "pcie_2a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_2b_gdsc = {
@@ -6787,7 +6791,7 @@ static struct gdsc pcie_2b_gdsc = {
.name = "pcie_2b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_3a_gdsc = {
@@ -6798,7 +6802,7 @@ static struct gdsc pcie_3a_gdsc = {
.name = "pcie_3a_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_3b_gdsc = {
@@ -6809,7 +6813,7 @@ static struct gdsc pcie_3b_gdsc = {
.name = "pcie_3b_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc pcie_4_gdsc = {
@@ -6820,7 +6824,7 @@ static struct gdsc pcie_4_gdsc = {
.name = "pcie_4_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
- .flags = VOTABLE,
+ .flags = VOTABLE | ALWAYS_ON,
};
static struct gdsc ufs_card_gdsc = {
@@ -6844,7 +6848,7 @@ static struct gdsc usb30_mp_gdsc = {
.pd = {
.name = "usb30_mp_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc usb30_prim_gdsc = {
@@ -6852,7 +6856,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc usb30_sec_gdsc = {
@@ -6860,7 +6864,7 @@ static struct gdsc usb30_sec_gdsc = {
.pd = {
.name = "usb30_sec_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct clk_regmap *gcc_sc8280xp_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 9b97425008ce..db918c92a522 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -757,7 +757,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
.num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 58aa3ec9a7fc..6af08e0ca847 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -31,6 +31,7 @@ enum {
P_GPLL0_OUT_EVEN,
P_GPLL0_OUT_MAIN,
P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_MAIN,
P_SLEEP_CLK,
};
@@ -68,6 +69,23 @@ static struct clk_alpha_pll gpll4 = {
},
};
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x13000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo", .name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ },
+ },
+};
+
static const struct clk_div_table post_div_table_fabia_even[] = {
{ 0x0, 1 },
{ 0x1, 2 },
@@ -194,6 +212,19 @@ static const struct clk_parent_data gcc_parent_data_10[] = {
{ .hw = &gpll0_out_even.clkr.hw },
};
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT_MAIN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "bi_tcxo", .name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -233,6 +264,26 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sdm670_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdm670_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x4815c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdm670_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_data = gcc_parent_data_8_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -656,6 +707,54 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
.clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
};
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x26028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x26010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(9600000, P_BI_TCXO, 2, 0, 0),
@@ -705,6 +804,31 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sdm670_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdm670_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdm670_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = {
F(105495, P_BI_TCXO, 2, 1, 91),
{ }
@@ -1283,6 +1407,28 @@ static struct clk_branch gcc_cpuss_rbcpr_clk = {
},
};
+/*
+ * The source clock frequencies are different for SDM670; define a child clock
+ * pointing to the source clock that uses SDM670 frequencies.
+ */
+static struct clk_branch gcc_sdm670_cpuss_rbcpr_clk = {
+ .halt_reg = 0x48008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x48008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdm670_cpuss_rbcpr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x44038,
.halt_check = BRANCH_VOTED,
@@ -2353,6 +2499,55 @@ static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
},
};
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x14008,
.halt_check = BRANCH_HALT,
@@ -2415,6 +2610,28 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
+/*
+ * The source clock frequencies are different for SDM670; define a child clock
+ * pointing to the source clock that uses SDM670 frequencies.
+ */
+static struct clk_branch gcc_sdm670_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_sdm670_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
.halt_reg = 0x414c,
.halt_check = BRANCH_HALT_VOTED,
@@ -3308,6 +3525,155 @@ static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf_gdsc = {
.flags = VOTABLE,
};
+static struct clk_regmap *gcc_sdm670_clocks[] = {
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_APC_VS_CLK] = &gcc_apc_vs_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_AXI_CLK] = &gcc_camera_axi_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_sdm670_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_sdm670_cpuss_rbcpr_clk_src.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_AXI_CLK] = &gcc_disp_axi_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_VS_CLK] = &gcc_gpu_vs_clk.clkr,
+ [GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
+ [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr,
+ [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
+ [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_MSS_VS_CLK] = &gcc_mss_vs_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdm670_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdm670_sdcc4_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] =
+ &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_VDDA_VS_CLK] = &gcc_vdda_vs_clk.clkr,
+ [GCC_VDDCX_VS_CLK] = &gcc_vddcx_vs_clk.clkr,
+ [GCC_VDDMX_VS_CLK] = &gcc_vddmx_vs_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_VS_CTRL_AHB_CLK] = &gcc_vs_ctrl_ahb_clk.clkr,
+ [GCC_VS_CTRL_CLK] = &gcc_vs_ctrl_clk.clkr,
+ [GCC_VS_CTRL_CLK_SRC] = &gcc_vs_ctrl_clk_src.clkr,
+ [GCC_VSENSOR_CLK_SRC] = &gcc_vsensor_clk_src.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GCC_CPUSS_DVM_BUS_CLK] = &gcc_cpuss_dvm_bus_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
+ [GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
+ [GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+};
+
static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3533,6 +3899,22 @@ static const struct qcom_reset_map gcc_sdm845_resets[] = {
[GCC_PCIE_1_PHY_BCR] = { 0x8e01c },
};
+static struct gdsc *gcc_sdm670_gdscs[] = {
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC] =
+ &hlos1_vote_aggre_noc_mmu_tbu2_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] =
+ &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc,
+ [HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC] = &hlos1_vote_mmnoc_mmu_tbu_sf_gdsc,
+};
+
static struct gdsc *gcc_sdm845_gdscs[] = {
[PCIE_0_GDSC] = &pcie_0_gdsc,
[PCIE_1_GDSC] = &pcie_1_gdsc,
@@ -3563,6 +3945,17 @@ static const struct regmap_config gcc_sdm845_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc gcc_sdm670_desc = {
+ .config = &gcc_sdm845_regmap_config,
+ .clks = gcc_sdm670_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdm670_clocks),
+ /* Snapdragon 670 can function without its own exclusive resets. */
+ .resets = gcc_sdm845_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdm845_resets),
+ .gdscs = gcc_sdm670_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdm670_gdscs),
+};
+
static const struct qcom_cc_desc gcc_sdm845_desc = {
.config = &gcc_sdm845_regmap_config,
.clks = gcc_sdm845_clocks,
@@ -3574,7 +3967,8 @@ static const struct qcom_cc_desc gcc_sdm845_desc = {
};
static const struct of_device_id gcc_sdm845_match_table[] = {
- { .compatible = "qcom,gcc-sdm845" },
+ { .compatible = "qcom,gcc-sdm670", .data = &gcc_sdm670_desc },
+ { .compatible = "qcom,gcc-sdm845", .data = &gcc_sdm845_desc },
{ }
};
MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
@@ -3600,6 +3994,7 @@ static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
static int gcc_sdm845_probe(struct platform_device *pdev)
{
+ const struct qcom_cc_desc *gcc_desc;
struct regmap *regmap;
int ret;
@@ -3616,7 +4011,8 @@ static int gcc_sdm845_probe(struct platform_device *pdev)
if (ret)
return ret;
- return qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
+ gcc_desc = of_device_get_match_data(&pdev->dev);
+ return qcom_cc_really_probe(pdev, gcc_desc, regmap);
}
static struct platform_driver gcc_sdm845_driver = {
diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
index 68fe9f6f0d2f..565f9912039f 100644
--- a/drivers/clk/qcom/gcc-sm6115.c
+++ b/drivers/clk/qcom/gcc-sm6115.c
@@ -57,7 +57,7 @@ static struct clk_alpha_pll gpll0 = {
.offset = 0x0,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(0),
@@ -83,7 +83,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
.post_div_table = post_div_table_gpll0_out_aux2,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_aux2",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -92,18 +92,6 @@ static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
},
};
-/* listed as BRAMMO, but it doesn't really match */
-static const u8 clk_gpll9_regs[PLL_OFF_MAX_REGS] = {
- [PLL_OFF_L_VAL] = 0x04,
- [PLL_OFF_ALPHA_VAL] = 0x08,
- [PLL_OFF_ALPHA_VAL_U] = 0x0c,
- [PLL_OFF_TEST_CTL] = 0x10,
- [PLL_OFF_TEST_CTL_U] = 0x14,
- [PLL_OFF_USER_CTL] = 0x18,
- [PLL_OFF_CONFIG_CTL] = 0x1C,
- [PLL_OFF_STATUS] = 0x20,
-};
-
static const struct clk_div_table post_div_table_gpll0_out_main[] = {
{ 0x0, 1 },
{ }
@@ -115,7 +103,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_main = {
.post_div_table = post_div_table_gpll0_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
@@ -137,7 +125,7 @@ static struct clk_alpha_pll gpll10 = {
.offset = 0xa000,
.vco_table = gpll10_vco,
.num_vco = ARRAY_SIZE(gpll10_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(10),
@@ -163,7 +151,7 @@ static struct clk_alpha_pll_postdiv gpll10_out_main = {
.post_div_table = post_div_table_gpll10_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll10_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll10.clkr.hw },
@@ -189,7 +177,7 @@ static struct clk_alpha_pll gpll11 = {
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
.flags = SUPPORTS_DYNAMIC_UPDATE,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(11),
@@ -215,7 +203,7 @@ static struct clk_alpha_pll_postdiv gpll11_out_main = {
.post_div_table = post_div_table_gpll11_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll11_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll11.clkr.hw },
@@ -229,7 +217,7 @@ static struct clk_alpha_pll gpll3 = {
.offset = 0x3000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(3),
@@ -248,7 +236,7 @@ static struct clk_alpha_pll gpll4 = {
.offset = 0x4000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(4),
@@ -274,7 +262,7 @@ static struct clk_alpha_pll_postdiv gpll4_out_main = {
.post_div_table = post_div_table_gpll4_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll4.clkr.hw },
@@ -287,7 +275,7 @@ static struct clk_alpha_pll gpll6 = {
.offset = 0x6000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(6),
@@ -313,7 +301,7 @@ static struct clk_alpha_pll_postdiv gpll6_out_main = {
.post_div_table = post_div_table_gpll6_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll6_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
@@ -326,7 +314,7 @@ static struct clk_alpha_pll gpll7 = {
.offset = 0x7000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(7),
@@ -352,7 +340,7 @@ static struct clk_alpha_pll_postdiv gpll7_out_main = {
.post_div_table = post_div_table_gpll7_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll7_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll7.clkr.hw },
@@ -380,7 +368,7 @@ static struct clk_alpha_pll gpll8 = {
.offset = 0x8000,
.vco_table = default_vco,
.num_vco = ARRAY_SIZE(default_vco),
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.flags = SUPPORTS_DYNAMIC_UPDATE,
.clkr = {
.enable_reg = 0x79000,
@@ -407,7 +395,7 @@ static struct clk_alpha_pll_postdiv gpll8_out_main = {
.post_div_table = post_div_table_gpll8_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
.width = 4,
- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll8_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
@@ -431,7 +419,7 @@ static struct clk_alpha_pll gpll9 = {
.offset = 0x9000,
.vco_table = gpll9_vco,
.num_vco = ARRAY_SIZE(gpll9_vco),
- .regs = clk_gpll9_regs,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr = {
.enable_reg = 0x79000,
.enable_mask = BIT(9),
@@ -457,7 +445,7 @@ static struct clk_alpha_pll_postdiv gpll9_out_main = {
.post_div_table = post_div_table_gpll9_out_main,
.num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
.width = 2,
- .regs = clk_gpll9_regs,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_BRAMMO_EVO],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll9_out_main",
.parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 69412400efa4..9b4e4bb05963 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -2316,7 +2316,7 @@ static struct gdsc usb30_prim_gdsc = {
.pd = {
.name = "usb30_prim_gdsc",
},
- .pwrsts = PWRSTS_OFF_ON,
+ .pwrsts = PWRSTS_RET_ON,
};
static struct gdsc ufs_phy_gdsc = {
diff --git a/drivers/clk/qcom/gcc-sm6375.c b/drivers/clk/qcom/gcc-sm6375.c
new file mode 100644
index 000000000000..89a1cc90b145
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm6375.c
@@ -0,0 +1,3919 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm6375-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK
+};
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_ODD,
+ P_GPLL10_OUT_EVEN,
+ P_GPLL11_OUT_EVEN,
+ P_GPLL11_OUT_ODD,
+ P_GPLL3_OUT_EVEN,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_GPLL5_OUT_EVEN,
+ P_GPLL6_OUT_EVEN,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_EVEN,
+ P_GPLL8_OUT_EVEN,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+ { 595200000, 3600000000UL, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_gpll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+/* 1152MHz Configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+/* 532MHz Configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1b,
+ .alpha = 0xb555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+/* 400MHz Configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x14,
+ .alpha = 0xd555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329a299c,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .flags = SUPPORTS_FSM_LEGACY_MODE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_even = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_even",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll8.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 1440MHz Configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x4b,
+ .alpha = 0x0,
+ .config_ctl_val = 0x08200800,
+ .config_ctl_hi_val = 0x05022011,
+ .config_ctl_hi1_val = 0x08000000,
+ .user_ctl_val = 0x00000301,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpll9.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL6_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll6_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_2_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+ { P_GPLL4_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL5_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll5.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_ODD, 4 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll0_out_odd.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL4_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL9_OUT_MAIN, 4 },
+ { P_GPLL8_OUT_EVEN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll8_out_even.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL8_OUT_MAIN, 2 },
+ { P_GPLL10_OUT_EVEN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll3_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 2 },
+ { P_GPLL7_OUT_EVEN, 3 },
+ { P_GPLL4_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_12[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_ODD, 2 },
+ { P_GPLL11_OUT_EVEN, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_0_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_cci_1_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_1_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5901c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x59038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x59054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi3phytimer_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 15),
+ F(65454545, P_GPLL9_OUT_EARLY, 11, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk4_clk_src = {
+ .cmd_rcgr = 0x51070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk4_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(266600000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(480000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ F(580000000, P_GPLL8_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(144000000, P_GPLL9_OUT_MAIN, 2.5, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(180000000, P_GPLL9_OUT_MAIN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(329142857, P_GPLL10_OUT_EVEN, 3.5, 0, 0),
+ F(384000000, P_GPLL10_OUT_EVEN, 3, 0, 0),
+ F(460800000, P_GPLL10_OUT_EVEN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(266571429, P_GPLL5_OUT_EVEN, 3.5, 0, 0),
+ F(426400000, P_GPLL3_OUT_MAIN, 2.5, 0, 0),
+ F(466500000, P_GPLL5_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ .cmd_rcgr = 0x52044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x520d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(256000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_EVEN, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x2b13c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2_ao),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x5301c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x5314c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x5327c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x533ac,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x534dc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x5360c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_EVEN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_12,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_12),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_ODD, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_ODD, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x45020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x45048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4507c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x45060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_ODD, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_13),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133000000, P_GPLL11_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_GPLL11_OUT_EVEN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_EVEN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x2b154,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_1_clk = {
+ .halt_reg = 0x5c018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_2_clk = {
+ .halt_reg = 0x52090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_3_clk = {
+ .halt_reg = 0x520f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x59018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+ .halt_reg = 0x59050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi3phytimer_clk = {
+ .halt_reg = 0x5906c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5906c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk4_clk = {
+ .halt_reg = 0x51088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_ope_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_ope_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_clk = {
+ .halt_reg = 0x5205c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x52084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_csid_clk = {
+ .halt_reg = 0x520ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_tfe_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+ .reg = 0x17058,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_names =
+ (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_disp_gpll0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sleep_clk = {
+ .halt_reg = 0x17074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17074,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x53014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x5300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x53018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x53148,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x53278,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x533a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x534d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x53608,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x53004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x53008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x53008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x45014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x45044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x45078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x4501c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x45040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x45040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_mem_clkref_clk = {
+ .halt_reg = 0x8c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_mem_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx5_pcie_clkref_en_clk = {
+ .halt_reg = 0x8c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx5_pcie_clkref_en_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x8c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_video_venus_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x45004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_top_gdsc = {
+ .gdscr = 0x58004,
+ .pd = {
+ .name = "camss_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x5807c,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vcodec0_gdsc = {
+ .gdscr = 0x58098,
+ .pd = {
+ .name = "vcodec0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc = {
+ .gdscr = 0x7d074,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc = {
+ .gdscr = 0x7d078,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d07c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_sm6375_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK_SRC] = &gcc_camss_cci_0_clk_src.clkr,
+ [GCC_CAMSS_CCI_1_CLK] = &gcc_camss_cci_1_clk.clkr,
+ [GCC_CAMSS_CCI_1_CLK_SRC] = &gcc_camss_cci_1_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr,
+ [GCC_CAMSS_CPHY_3_CLK] = &gcc_camss_cphy_3_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI3PHYTIMER_CLK] = &gcc_camss_csi3phytimer_clk.clkr,
+ [GCC_CAMSS_CSI3PHYTIMER_CLK_SRC] = &gcc_camss_csi3phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_MCLK4_CLK] = &gcc_camss_mclk4_clk.clkr,
+ [GCC_CAMSS_MCLK4_CLK_SRC] = &gcc_camss_mclk4_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr,
+ [GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SLEEP_CLK] = &gcc_disp_sleep_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GCC_UFS_MEM_CLKREF_CLK] = &gcc_ufs_mem_clkref_clk.clkr,
+ [GCC_RX5_PCIE_CLKREF_EN_CLK] = &gcc_rx5_pcie_clkref_en_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL0_OUT_ODD] = &gpll0_out_odd.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_EVEN] = &gpll3_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_EVEN] = &gpll6_out_even.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_EVEN] = &gpll8_out_even.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm6375_resets[] = {
+ [GCC_MMSS_BCR] = { 0x17000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x1b020 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_SDCC2_BCR] = { 0x1e000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x1f000 },
+ [GCC_PDM_BCR] = { 0x20000 },
+ [GCC_GPU_BCR] = { 0x36000 },
+ [GCC_SDCC1_BCR] = { 0x38000 },
+ [GCC_UFS_PHY_BCR] = { 0x45000 },
+ [GCC_CAMSS_TFE_BCR] = { 0x52000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x53000 },
+ [GCC_CAMSS_OPE_BCR] = { 0x55000 },
+ [GCC_CAMSS_TOP_BCR] = { 0x58000 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+};
+
+static struct gdsc *gcc_sm6375_gdscs[] = {
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [CAMSS_TOP_GDSC] = &camss_top_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VCODEC0_GDSC] = &vcodec0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+};
+
+static const struct regmap_config gcc_sm6375_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm6375_desc = {
+ .config = &gcc_sm6375_regmap_config,
+ .clks = gcc_sm6375_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm6375_clocks),
+ .resets = gcc_sm6375_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm6375_resets),
+ .gdscs = gcc_sm6375_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm6375_gdscs),
+};
+
+static const struct of_device_id gcc_sm6375_match_table[] = {
+ { .compatible = "qcom,sm6375-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm6375_match_table);
+
+static int gcc_sm6375_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm6375_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /*
+ * Keep the following clocks always on:
+ * GCC_CAMERA_XO_CLK, GCC_CPUSS_GNOC_CLK, GCC_DISP_XO_CLK
+ */
+ regmap_update_bits(regmap, 0x17028, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x2b004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x1702c, BIT(0), BIT(0));
+
+ clk_lucid_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_lucid_pll_configure(&gpll11, regmap, &gpll11_config);
+ clk_lucid_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_zonda_pll_configure(&gpll9, regmap, &gpll9_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_sm6375_desc, regmap);
+}
+
+static struct platform_driver gcc_sm6375_driver = {
+ .probe = gcc_sm6375_probe,
+ .driver = {
+ .name = "gcc-sm6375",
+ .of_match_table = gcc_sm6375_match_table,
+ },
+};
+
+static int __init gcc_sm6375_init(void)
+{
+ return platform_driver_register(&gcc_sm6375_driver);
+}
+subsys_initcall(gcc_sm6375_init);
+
+static void __exit gcc_sm6375_exit(void)
+{
+ platform_driver_unregister(&gcc_sm6375_driver);
+}
+module_exit(gcc_sm6375_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM6375 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index d3244006c661..7cf5e130e92f 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -368,6 +368,16 @@ static int _gdsc_disable(struct gdsc *sc)
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
+ /*
+ * If the GDSC supports only a Retention state, apart from ON,
+ * leave it in ON state.
+ * There is no SW control to transition the GDSC into
+ * Retention state. This happens in HW when the parent
+ * domain goes down to a Low power state
+ */
+ if (sc->pwrsts == PWRSTS_RET_ON)
+ return 0;
+
ret = gdsc_toggle_logic(sc, GDSC_OFF);
if (ret)
return ret;
@@ -439,11 +449,8 @@ static int gdsc_init(struct gdsc *sc)
/* ...and the power-domain */
ret = gdsc_pm_runtime_get(sc);
- if (ret) {
- if (sc->rsupply)
- regulator_disable(sc->rsupply);
- return ret;
- }
+ if (ret)
+ goto err_disable_supply;
/*
* Votable GDSCs can be ON due to Vote from other masters.
@@ -452,14 +459,14 @@ static int gdsc_init(struct gdsc *sc)
if (sc->flags & VOTABLE) {
ret = gdsc_update_collapse_bit(sc, false);
if (ret)
- return ret;
+ goto err_put_rpm;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
- return ret;
+ goto err_put_rpm;
}
/*
@@ -486,9 +493,21 @@ static int gdsc_init(struct gdsc *sc)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)
sc->pd.power_on = gdsc_enable;
- pm_genpd_init(&sc->pd, NULL, !on);
+
+ ret = pm_genpd_init(&sc->pd, NULL, !on);
+ if (ret)
+ goto err_put_rpm;
return 0;
+
+err_put_rpm:
+ if (on)
+ gdsc_pm_runtime_put(sc);
+err_disable_supply:
+ if (on && sc->rsupply)
+ regulator_disable(sc->rsupply);
+
+ return ret;
}
int gdsc_register(struct gdsc_desc *desc,
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5de48c9439b2..981a12c8502d 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -49,6 +49,11 @@ struct gdsc {
const u8 pwrsts;
/* Powerdomain allowable state bitfields */
#define PWRSTS_OFF BIT(0)
+/*
+ * There is no SW control to transition a GDSC into
+ * PWRSTS_RET. This happens in HW when the parent
+ * domain goes down to a low power state
+ */
#define PWRSTS_RET BIT(1)
#define PWRSTS_ON BIT(2)
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
diff --git a/drivers/clk/qcom/gpucc-sc8280xp.c b/drivers/clk/qcom/gpucc-sc8280xp.c
new file mode 100644
index 000000000000..ea1e9505c335
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sc8280xp.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sc8280xp.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "gdsc.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_BI_TCXO,
+ DT_GCC_GPU_GPLL0_CLK_SRC,
+ DT_GCC_GPU_GPLL0_DIV_CLK_SRC,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPU_GPLL0_CLK_SRC,
+ P_GCC_GPU_GPLL0_DIV_CLK_SRC,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct clk_parent_data parent_data_tcxo = { .index = DT_BI_TCXO };
+
+static const struct pll_vco lucid_5lpe_vco[] = {
+ { 249600000, 1800000000, 0 },
+};
+
+static struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1c,
+ .alpha = 0xa555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1A,
+ .alpha = 0xaaa,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x2a9a699c,
+ .test_ctl_val = 0x00000000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x01800000,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_5lpe_vco,
+ .num_vco = ARRAY_SIZE(lucid_5lpe_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_data = &parent_data_tcxo,
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_5lpe_ops,
+ },
+ },
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll0.clkr.hw },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GCC_GPU_GPLL0_CLK_SRC, 5 },
+ { P_GCC_GPU_GPLL0_DIV_CLK_SRC, 6 },
+};
+
+static const struct clk_parent_data gpu_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gpu_cc_pll1.clkr.hw },
+ { .index = DT_GCC_GPU_GPLL0_CLK_SRC },
+ { .index = DT_GCC_GPU_GPLL0_DIV_CLK_SRC },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GCC_GPU_GPLL0_DIV_CLK_SRC, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_data = gpu_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_hub_clk_src[] = {
+ F(200000000, P_GCC_GPU_GPLL0_CLK_SRC, 3, 0, 0),
+ F(300000000, P_GCC_GPU_GPLL0_CLK_SRC, 2, 0, 0),
+ F(400000000, P_GCC_GPU_GPLL0_CLK_SRC, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_hub_clk_src = {
+ .cmd_rcgr = 0x117c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_hub_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_clk_src",
+ .parent_data = gpu_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_ahb_div_clk_src = {
+ .reg = 0x11c0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_ahb_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gpu_cc_hub_cx_int_div_clk_src = {
+ .reg = 0x11bc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpu_cc_hub_cx_int_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_ahb_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_gmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_aon_clk = {
+ .halt_reg = 0x1178,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hub_cx_int_clk = {
+ .halt_reg = 0x1204,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_hub_cx_int_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpu_cc_sc8280xp_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+ [GPU_CC_HUB_AHB_DIV_CLK_SRC] = &gpu_cc_hub_ahb_div_clk_src.clkr,
+ [GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
+ [GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
+ [GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
+ [GPU_CC_HUB_CX_INT_DIV_CLK_SRC] = &gpu_cc_hub_cx_int_div_clk_src.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static struct gdsc cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gx_gdsc",
+ .power_on = gdsc_gx_do_nothing_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc *gpu_cc_sc8280xp_gdscs[] = {
+ [GPU_CC_CX_GDSC] = &cx_gdsc,
+ [GPU_CC_GX_GDSC] = &gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sc8280xp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8030,
+ .fast_io = true,
+};
+
+static struct qcom_cc_desc gpu_cc_sc8280xp_desc = {
+ .config = &gpu_cc_sc8280xp_regmap_config,
+ .clks = gpu_cc_sc8280xp_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sc8280xp_clocks),
+ .gdscs = gpu_cc_sc8280xp_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sc8280xp_gdscs),
+};
+
+static int gpu_cc_sc8280xp_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sc8280xp_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Keep the clocks always-ON
+ * GPU_CC_CB_CLK, GPU_CC_CXO_CLK
+ */
+ regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x109c, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sc8280xp_desc, regmap);
+}
+
+static const struct of_device_id gpu_cc_sc8280xp_match_table[] = {
+ { .compatible = "qcom,sc8280xp-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sc8280xp_match_table);
+
+static struct platform_driver gpu_cc_sc8280xp_driver = {
+ .probe = gpu_cc_sc8280xp_probe,
+ .driver = {
+ .name = "gpu_cc-sc8280xp",
+ .of_match_table = gpu_cc_sc8280xp_match_table,
+ },
+};
+module_platform_driver(gpu_cc_sc8280xp_driver);
+
+MODULE_DESCRIPTION("Qualcomm SC8280XP GPU clock controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index 88d4b33ac0cc..b1b370274ec4 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -12,9 +12,9 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-static const char *aux_parents[] = {
- "pll8_vote",
- "pxo",
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .fw_name = "pxo", .name = "pxo_board" },
};
static const u32 aux_parent_map[] = {
@@ -32,8 +32,8 @@ MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
static int kpss_xcc_driver_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
- struct clk *clk;
void __iomem *base;
+ struct clk_hw *hw;
const char *name;
id = of_match_device(kpss_xcc_match_table, &pdev->dev);
@@ -55,24 +55,16 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
base += 0x28;
}
- clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
- ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
- 0, aux_parent_map, NULL);
+ hw = devm_clk_hw_register_mux_parent_data_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0,
+ base, 0, 0x3,
+ 0, aux_parent_map, NULL);
- platform_set_drvdata(pdev, clk);
-
- return PTR_ERR_OR_ZERO(clk);
-}
-
-static int kpss_xcc_driver_remove(struct platform_device *pdev)
-{
- clk_unregister_mux(platform_get_drvdata(pdev));
- return 0;
+ return PTR_ERR_OR_ZERO(hw);
}
static struct platform_driver kpss_xcc_driver = {
.probe = kpss_xcc_driver_probe,
- .remove = kpss_xcc_driver_remove,
.driver = {
.name = "kpss-xcc",
.of_match_table = kpss_xcc_match_table,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 1a2be4aeb31d..81a44a9a9abc 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -22,6 +22,7 @@
#include "clk-branch.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
+#include "reset.h"
static struct clk_pll pll4 = {
.l_reg = 0x4,
@@ -33,7 +34,9 @@ static struct clk_pll pll4 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll4",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "pxo", .name = "pxo_board",
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -63,9 +66,9 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char * const lcc_pxo_pll4[] = {
- "pxo",
- "pll4_vote",
+static const struct clk_parent_data lcc_pxo_pll4[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll4_vote", .name = "pll4_vote" },
};
static struct freq_tbl clk_tbl_aif_mi2s[] = {
@@ -130,18 +133,14 @@ static struct clk_rcg mi2s_osr_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "mi2s_osr_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_mi2s_parents[] = {
- "mi2s_osr_src",
-};
-
static struct clk_branch mi2s_osr_clk = {
.halt_reg = 0x50,
.halt_bit = 1,
@@ -151,7 +150,9 @@ static struct clk_branch mi2s_osr_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "mi2s_osr_clk",
- .parent_names = lcc_mi2s_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_osr_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -166,7 +167,9 @@ static struct clk_regmap_div mi2s_div_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "mi2s_div_clk",
- .parent_names = lcc_mi2s_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_osr_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
@@ -182,7 +185,9 @@ static struct clk_branch mi2s_bit_div_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "mi2s_bit_div_clk",
- .parent_names = (const char *[]){ "mi2s_div_clk" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &mi2s_div_clk.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -190,6 +195,10 @@ static struct clk_branch mi2s_bit_div_clk = {
},
};
+static const struct clk_parent_data lcc_mi2s_bit_div_codec_clk[] = {
+ { .hw = &mi2s_bit_div_clk.clkr.hw, },
+ { .fw_name = "mi2s_codec", .name = "mi2s_codec_clk" },
+};
static struct clk_regmap_mux mi2s_bit_clk = {
.reg = 0x48,
@@ -198,11 +207,8 @@ static struct clk_regmap_mux mi2s_bit_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "mi2s_bit_clk",
- .parent_names = (const char *[]){
- "mi2s_bit_div_clk",
- "mi2s_codec_clk",
- },
- .num_parents = 2,
+ .parent_data = lcc_mi2s_bit_div_codec_clk,
+ .num_parents = ARRAY_SIZE(lcc_mi2s_bit_div_codec_clk),
.ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -244,8 +250,8 @@ static struct clk_rcg pcm_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "pcm_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -261,7 +267,9 @@ static struct clk_branch pcm_clk_out = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "pcm_clk_out",
- .parent_names = (const char *[]){ "pcm_src" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &pcm_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -269,6 +277,11 @@ static struct clk_branch pcm_clk_out = {
},
};
+static const struct clk_parent_data lcc_pcm_clk_out_codec_clk[] = {
+ { .hw = &pcm_clk_out.clkr.hw, },
+ { .fw_name = "pcm_codec_clk", .name = "pcm_codec_clk" },
+};
+
static struct clk_regmap_mux pcm_clk = {
.reg = 0x54,
.shift = 10,
@@ -276,11 +289,8 @@ static struct clk_regmap_mux pcm_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "pcm_clk",
- .parent_names = (const char *[]){
- "pcm_clk_out",
- "pcm_codec_clk",
- },
- .num_parents = 2,
+ .parent_data = lcc_pcm_clk_out_codec_clk,
+ .num_parents = ARRAY_SIZE(lcc_pcm_clk_out_codec_clk),
.ops = &clk_regmap_mux_closest_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -324,18 +334,14 @@ static struct clk_rcg spdif_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "spdif_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_spdif_parents[] = {
- "spdif_src",
-};
-
static struct clk_branch spdif_clk = {
.halt_reg = 0xd4,
.halt_bit = 1,
@@ -345,7 +351,9 @@ static struct clk_branch spdif_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "spdif_clk",
- .parent_names = lcc_spdif_parents,
+ .parent_hws = (const struct clk_hw*[]) {
+ &spdif_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -383,8 +391,8 @@ static struct clk_rcg ahbix_clk = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "ahbix",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_lcc_ops,
},
},
@@ -405,6 +413,10 @@ static struct clk_regmap *lcc_ipq806x_clks[] = {
[AHBIX_CLK] = &ahbix_clk.clkr,
};
+static const struct qcom_reset_map lcc_ipq806x_resets[] = {
+ [LCC_PCM_RESET] = { 0x54, 13 },
+};
+
static const struct regmap_config lcc_ipq806x_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -417,6 +429,8 @@ static const struct qcom_cc_desc lcc_ipq806x_desc = {
.config = &lcc_ipq806x_regmap_config,
.clks = lcc_ipq806x_clks,
.num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+ .resets = lcc_ipq806x_resets,
+ .num_resets = ARRAY_SIZE(lcc_ipq806x_resets),
};
static const struct of_device_id lcc_ipq806x_match_table[] = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index 84817cf2b6bd..3926184cc91b 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -33,7 +33,9 @@ static struct clk_pll pll4 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll4",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -49,9 +51,9 @@ static const struct parent_map lcc_pxo_pll4_map[] = {
{ P_PLL4, 2 }
};
-static const char * const lcc_pxo_pll4[] = {
- "pxo",
- "pll4_vote",
+static const struct clk_parent_data lcc_pxo_pll4[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll4_vote", .name = "pll4_vote" },
};
static struct freq_tbl clk_tbl_aif_osr_492[] = {
@@ -86,112 +88,7 @@ static struct freq_tbl clk_tbl_aif_osr_393[] = {
{ }
};
-static struct clk_rcg mi2s_osr_src = {
- .ns_reg = 0x48,
- .md_reg = 0x4c,
- .mn = {
- .mnctr_en_bit = 8,
- .mnctr_reset_bit = 7,
- .mnctr_mode_shift = 5,
- .n_val_shift = 24,
- .m_val_shift = 8,
- .width = 8,
- },
- .p = {
- .pre_div_shift = 3,
- .pre_div_width = 2,
- },
- .s = {
- .src_sel_shift = 0,
- .parent_map = lcc_pxo_pll4_map,
- },
- .freq_tbl = clk_tbl_aif_osr_393,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(9),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_osr_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
- .ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
- },
- },
-};
-
-static const char * const lcc_mi2s_parents[] = {
- "mi2s_osr_src",
-};
-
-static struct clk_branch mi2s_osr_clk = {
- .halt_reg = 0x50,
- .halt_bit = 1,
- .halt_check = BRANCH_HALT_ENABLE,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_osr_clk",
- .parent_names = lcc_mi2s_parents,
- .num_parents = 1,
- .ops = &clk_branch_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-static struct clk_regmap_div mi2s_div_clk = {
- .reg = 0x48,
- .shift = 10,
- .width = 4,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_div_clk",
- .parent_names = lcc_mi2s_parents,
- .num_parents = 1,
- .ops = &clk_regmap_div_ops,
- },
- },
-};
-
-static struct clk_branch mi2s_bit_div_clk = {
- .halt_reg = 0x50,
- .halt_bit = 0,
- .halt_check = BRANCH_HALT_ENABLE,
- .clkr = {
- .enable_reg = 0x48,
- .enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_bit_div_clk",
- .parent_names = (const char *[]){ "mi2s_div_clk" },
- .num_parents = 1,
- .ops = &clk_branch_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-static struct clk_regmap_mux mi2s_bit_clk = {
- .reg = 0x48,
- .shift = 14,
- .width = 1,
- .clkr = {
- .hw.init = &(struct clk_init_data){
- .name = "mi2s_bit_clk",
- .parent_names = (const char *[]){
- "mi2s_bit_div_clk",
- "mi2s_codec_clk",
- },
- .num_parents = 2,
- .ops = &clk_regmap_mux_closest_ops,
- .flags = CLK_SET_RATE_PARENT,
- },
- },
-};
-
-#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
+#define CLK_AIF_OSR_SRC(prefix, _ns, _md) \
static struct clk_rcg prefix##_osr_src = { \
.ns_reg = _ns, \
.md_reg = _md, \
@@ -217,85 +114,103 @@ static struct clk_rcg prefix##_osr_src = { \
.enable_mask = BIT(9), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_osr_src", \
- .parent_names = lcc_pxo_pll4, \
- .num_parents = 2, \
+ .parent_data = lcc_pxo_pll4, \
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4), \
.ops = &clk_rcg_ops, \
.flags = CLK_SET_RATE_GATE, \
}, \
}, \
}; \
- \
-static const char * const lcc_##prefix##_parents[] = { \
- #prefix "_osr_src", \
-}; \
- \
+
+#define CLK_AIF_OSR_CLK(prefix, _ns, hr, en_bit) \
static struct clk_branch prefix##_osr_clk = { \
.halt_reg = hr, \
.halt_bit = 1, \
.halt_check = BRANCH_HALT_ENABLE, \
.clkr = { \
.enable_reg = _ns, \
- .enable_mask = BIT(21), \
+ .enable_mask = BIT(en_bit), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_osr_clk", \
- .parent_names = lcc_##prefix##_parents, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_osr_src.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_branch_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_DIV_CLK(prefix, _ns, _width) \
static struct clk_regmap_div prefix##_div_clk = { \
.reg = _ns, \
.shift = 10, \
- .width = 8, \
+ .width = _width, \
.clkr = { \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_div_clk", \
- .parent_names = lcc_##prefix##_parents, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_osr_src.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_regmap_div_ops, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_BIT_DIV_CLK(prefix, _ns, hr, en_bit) \
static struct clk_branch prefix##_bit_div_clk = { \
.halt_reg = hr, \
.halt_bit = 0, \
.halt_check = BRANCH_HALT_ENABLE, \
.clkr = { \
.enable_reg = _ns, \
- .enable_mask = BIT(19), \
+ .enable_mask = BIT(en_bit), \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_bit_div_clk", \
- .parent_names = (const char *[]){ \
- #prefix "_div_clk" \
- }, \
+ .parent_hws = (const struct clk_hw*[]){ \
+ &prefix##_div_clk.clkr.hw, \
+ }, \
.num_parents = 1, \
.ops = &clk_branch_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
}; \
- \
+
+#define CLK_AIF_OSR_BIT_CLK(prefix, _ns, _shift) \
static struct clk_regmap_mux prefix##_bit_clk = { \
.reg = _ns, \
- .shift = 18, \
+ .shift = _shift, \
.width = 1, \
.clkr = { \
.hw.init = &(struct clk_init_data){ \
.name = #prefix "_bit_clk", \
- .parent_names = (const char *[]){ \
- #prefix "_bit_div_clk", \
- #prefix "_codec_clk", \
+ .parent_data = (const struct clk_parent_data[]){ \
+ { .hw = &prefix##_bit_div_clk.clkr.hw, }, \
+ { .fw_name = #prefix "_codec_clk", \
+ .name = #prefix "_codec_clk", }, \
}, \
.num_parents = 2, \
.ops = &clk_regmap_mux_closest_ops, \
.flags = CLK_SET_RATE_PARENT, \
}, \
}, \
-}
+};
+
+CLK_AIF_OSR_SRC(mi2s, 0x48, 0x4c)
+CLK_AIF_OSR_CLK(mi2s, 0x48, 0x50, 17)
+CLK_AIF_OSR_DIV_CLK(mi2s, 0x48, 4)
+CLK_AIF_OSR_BIT_DIV_CLK(mi2s, 0x48, 0x50, 15)
+CLK_AIF_OSR_BIT_CLK(mi2s, 0x48, 14)
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \
+ CLK_AIF_OSR_SRC(prefix, _ns, _md) \
+ CLK_AIF_OSR_CLK(prefix, _ns, hr, 21) \
+ CLK_AIF_OSR_DIV_CLK(prefix, _ns, 8) \
+ CLK_AIF_OSR_BIT_DIV_CLK(prefix, _ns, hr, 19) \
+ CLK_AIF_OSR_BIT_CLK(prefix, _ns, 18)
CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
@@ -361,8 +276,8 @@ static struct clk_rcg pcm_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "pcm_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
@@ -378,7 +293,9 @@ static struct clk_branch pcm_clk_out = {
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "pcm_clk_out",
- .parent_names = (const char *[]){ "pcm_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &pcm_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -393,9 +310,9 @@ static struct clk_regmap_mux pcm_clk = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "pcm_clk",
- .parent_names = (const char *[]){
- "pcm_clk_out",
- "pcm_codec_clk",
+ .parent_data = (const struct clk_parent_data[]){
+ { .hw = &pcm_clk_out.clkr.hw },
+ { .fw_name = "pcm_codec_clk", .name = "pcm_codec_clk" },
},
.num_parents = 2,
.ops = &clk_regmap_mux_closest_ops,
@@ -429,18 +346,14 @@ static struct clk_rcg slimbus_src = {
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
.name = "slimbus_src",
- .parent_names = lcc_pxo_pll4,
- .num_parents = 2,
+ .parent_data = lcc_pxo_pll4,
+ .num_parents = ARRAY_SIZE(lcc_pxo_pll4),
.ops = &clk_rcg_ops,
.flags = CLK_SET_RATE_GATE,
},
},
};
-static const char * const lcc_slimbus_parents[] = {
- "slimbus_src",
-};
-
static struct clk_branch audio_slimbus_clk = {
.halt_reg = 0xd4,
.halt_bit = 0,
@@ -450,7 +363,9 @@ static struct clk_branch audio_slimbus_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "audio_slimbus_clk",
- .parent_names = lcc_slimbus_parents,
+ .parent_hws = (const struct clk_hw*[]){
+ &slimbus_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -467,7 +382,9 @@ static struct clk_branch sps_slimbus_clk = {
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
.name = "sps_slimbus_clk",
- .parent_names = lcc_slimbus_parents,
+ .parent_hws = (const struct clk_hw*[]){
+ &slimbus_src.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 6ab6e5a34c72..063e0365f311 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -12,6 +12,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <dt-bindings/clock/qcom,lpass-sc7280.h>
#include <dt-bindings/clock/qcom,lpassaudiocc-sc7280.h>
#include "clk-alpha-pll.h"
@@ -22,6 +23,7 @@
#include "clk-regmap-mux.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -38,6 +40,32 @@ static const struct pll_vco zonda_vco[] = {
{ 595200000UL, 3600000000UL, 0 },
};
+static struct clk_branch lpass_q6ss_ahbm_clk = {
+ .halt_reg = 0x901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_q6ss_ahbs_clk = {
+ .halt_reg = 0x9020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x9020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
/* 1128.96MHz configuration */
static const struct alpha_pll_config lpass_audio_cc_pll_config = {
.l = 0x3a,
@@ -221,7 +249,7 @@ static struct clk_rcg2 lpass_aon_cc_main_rcg_clk_src = {
.parent_data = lpass_aon_cc_parent_data_0,
.num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_0),
.flags = CLK_OPS_PARENT_ENABLE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_shared_ops,
},
};
@@ -614,6 +642,11 @@ static struct gdsc lpass_aon_cc_lpass_audio_hm_gdsc = {
.flags = RETAIN_FF_ENABLE,
};
+static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
+ [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
+ [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
+};
+
static struct clk_regmap *lpass_aon_cc_sc7280_clocks[] = {
[LPASS_AON_CC_AUDIO_HM_H_CLK] = &lpass_aon_cc_audio_hm_h_clk.clkr,
[LPASS_AON_CC_VA_MEM0_CLK] = &lpass_aon_cc_va_mem0_clk.clkr,
@@ -659,12 +692,30 @@ static struct regmap_config lpass_audio_cc_sc7280_regmap_config = {
.fast_io = true,
};
+static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .clks = lpass_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+};
+
static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
.config = &lpass_audio_cc_sc7280_regmap_config,
.clks = lpass_audio_cc_sc7280_clocks,
.num_clks = ARRAY_SIZE(lpass_audio_cc_sc7280_clocks),
};
+static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
+};
+
+static const struct qcom_cc_desc lpass_audio_cc_reset_sc7280_desc = {
+ .config = &lpass_audio_cc_sc7280_regmap_config,
+ .resets = lpass_audio_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(lpass_audio_cc_sc7280_resets),
+};
+
static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = {
{ .compatible = "qcom,sc7280-lpassaudiocc" },
{ }
@@ -741,6 +792,13 @@ static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev)
return ret;
}
+ ret = qcom_cc_probe_by_index(pdev, 1, &lpass_audio_cc_reset_sc7280_desc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC Resets\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
@@ -785,6 +843,12 @@ static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
+ lpass_audio_cc_sc7280_regmap_config.name = "cc";
+ desc = &lpass_cc_sc7280_desc;
+ return qcom_cc_probe(pdev, desc);
+ }
+
lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon";
lpass_audio_cc_sc7280_regmap_config.max_register = 0xa0008;
desc = &lpass_aon_cc_sc7280_desc;
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index b39ee1c9647b..5c1e17bd0d76 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -17,32 +17,6 @@
#include "clk-branch.h"
#include "common.h"
-static struct clk_branch lpass_q6ss_ahbm_clk = {
- .halt_reg = 0x1c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x1c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "lpass_q6ss_ahbm_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch lpass_q6ss_ahbs_clk = {
- .halt_reg = 0x20,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x20,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "lpass_q6ss_ahbs_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch lpass_top_cc_lpi_q6_axim_hs_clk = {
.halt_reg = 0x0,
.halt_check = BRANCH_HALT,
@@ -105,17 +79,6 @@ static struct regmap_config lpass_regmap_config = {
.fast_io = true,
};
-static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
- [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
- [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
-};
-
-static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
- .config = &lpass_regmap_config,
- .clks = lpass_cc_sc7280_clocks,
- .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
-};
-
static struct clk_regmap *lpass_cc_top_sc7280_clocks[] = {
[LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK] =
&lpass_top_cc_lpi_q6_axim_hs_clk.clkr,
@@ -169,13 +132,6 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
if (ret)
goto destroy_pm_clk;
- lpass_regmap_config.name = "cc";
- desc = &lpass_cc_sc7280_desc;
-
- ret = qcom_cc_probe_by_index(pdev, 2, desc);
- if (ret)
- goto destroy_pm_clk;
-
return 0;
destroy_pm_clk:
diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c
index 1f1f1bd1b68e..6ad19b06b1ce 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7280.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7280.c
@@ -190,6 +190,19 @@ static struct clk_rcg2 lpass_core_cc_ext_if1_clk_src = {
},
};
+static struct clk_rcg2 lpass_core_cc_ext_mclk0_clk_src = {
+ .cmd_rcgr = 0x20000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = lpass_core_cc_parent_map_0,
+ .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_mclk0_clk_src",
+ .parent_data = lpass_core_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
static struct clk_branch lpass_core_cc_core_clk = {
.halt_reg = 0x1f000,
@@ -283,6 +296,24 @@ static struct clk_branch lpass_core_cc_lpm_mem0_core_clk = {
},
};
+static struct clk_branch lpass_core_cc_ext_mclk0_clk = {
+ .halt_reg = 0x20014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "lpass_core_cc_ext_mclk0_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &lpass_core_cc_ext_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch lpass_core_cc_sysnoc_mport_core_clk = {
.halt_reg = 0x23000,
.halt_check = BRANCH_HALT_VOTED,
@@ -326,6 +357,8 @@ static struct clk_regmap *lpass_core_cc_sc7280_clocks[] = {
[LPASS_CORE_CC_LPM_CORE_CLK] = &lpass_core_cc_lpm_core_clk.clkr,
[LPASS_CORE_CC_LPM_MEM0_CORE_CLK] = &lpass_core_cc_lpm_mem0_core_clk.clkr,
[LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK] = &lpass_core_cc_sysnoc_mport_core_clk.clkr,
+ [LPASS_CORE_CC_EXT_MCLK0_CLK] = &lpass_core_cc_ext_mclk0_clk.clkr,
+ [LPASS_CORE_CC_EXT_MCLK0_CLK_SRC] = &lpass_core_cc_ext_mclk0_clk_src.clkr,
};
static struct regmap_config lpass_core_cc_sc7280_regmap_config = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index aaaad65b6458..6bf908a51f53 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -41,70 +41,6 @@ enum {
#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
-static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2[] = {
- "pxo",
- "pll8_vote",
- "pll2",
-};
-
-static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 },
- { P_PLL3, 3 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2_pll15[] = {
- "pxo",
- "pll8_vote",
- "pll2",
- "pll15",
-};
-
-static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
- { P_PXO, 0 },
- { P_PLL8, 2 },
- { P_PLL2, 1 },
- { P_PLL15, 3 }
-};
-
-static const char * const mmcc_pxo_pll8_pll2_pll3[] = {
- "pxo",
- "pll8_vote",
- "pll2",
- "pll3",
-};
-
-static const struct parent_map mmcc_pxo_dsi2_dsi1_map[] = {
- { P_PXO, 0 },
- { P_DSI2_PLL_DSICLK, 1 },
- { P_DSI1_PLL_DSICLK, 3 },
-};
-
-static const char * const mmcc_pxo_dsi2_dsi1[] = {
- "pxo",
- "dsi2pll",
- "dsi1pll",
-};
-
-static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
- { P_PXO, 0 },
- { P_DSI1_PLL_BYTECLK, 1 },
- { P_DSI2_PLL_BYTECLK, 2 },
-};
-
-static const char * const mmcc_pxo_dsi1_dsi2_byte[] = {
- "pxo",
- "dsi1pllbyte",
- "dsi2pllbyte",
-};
-
static struct clk_pll pll2 = {
.l_reg = 0x320,
.m_reg = 0x324,
@@ -115,7 +51,9 @@ static struct clk_pll pll2 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll2",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -131,7 +69,9 @@ static struct clk_pll pll15 = {
.status_bit = 16,
.clkr.hw.init = &(struct clk_init_data){
.name = "pll15",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_pll_ops,
},
@@ -151,6 +91,70 @@ static const struct pll_config pll15_config = {
.main_output_mask = BIT(23),
};
+static const struct parent_map mmcc_pxo_pll8_pll2_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+};
+
+static const struct parent_map mmcc_pxo_pll8_pll2_pll3_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL3, 3 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2_pll15[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+ { .hw = &pll15.clkr.hw },
+};
+
+static const struct parent_map mmcc_pxo_pll8_pll2_pll15_map[] = {
+ { P_PXO, 0 },
+ { P_PLL8, 2 },
+ { P_PLL2, 1 },
+ { P_PLL15, 3 }
+};
+
+static const struct clk_parent_data mmcc_pxo_pll8_pll2_pll3[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "pll8_vote", .name = "pll8_vote" },
+ { .hw = &pll2.clkr.hw },
+ { .fw_name = "pll3", .name = "pll3" },
+};
+
+static const struct parent_map mmcc_pxo_dsi2_dsi1_map[] = {
+ { P_PXO, 0 },
+ { P_DSI2_PLL_DSICLK, 1 },
+ { P_DSI1_PLL_DSICLK, 3 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi2_dsi1[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi2pll", .name = "dsi2pll" },
+ { .fw_name = "dsi1pll", .name = "dsi1pll" },
+};
+
+static const struct parent_map mmcc_pxo_dsi1_dsi2_byte_map[] = {
+ { P_PXO, 0 },
+ { P_DSI1_PLL_BYTECLK, 1 },
+ { P_DSI2_PLL_BYTECLK, 2 },
+};
+
+static const struct clk_parent_data mmcc_pxo_dsi1_dsi2_byte[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "dsi1pllbyte", .name = "dsi1pllbyte" },
+ { .fw_name = "dsi2pllbyte", .name = "dsi2pllbyte" },
+};
+
static struct freq_tbl clk_tbl_cam[] = {
{ 6000000, P_PLL8, 4, 1, 16 },
{ 8000000, P_PLL8, 4, 1, 12 },
@@ -192,8 +196,8 @@ static struct clk_rcg camclk0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -207,7 +211,9 @@ static struct clk_branch camclk0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk0_clk",
- .parent_names = (const char *[]){ "camclk0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -241,8 +247,8 @@ static struct clk_rcg camclk1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -256,7 +262,9 @@ static struct clk_branch camclk1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk1_clk",
- .parent_names = (const char *[]){ "camclk1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -290,8 +298,8 @@ static struct clk_rcg camclk2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "camclk2_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -305,7 +313,9 @@ static struct clk_branch camclk2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "camclk2_clk",
- .parent_names = (const char *[]){ "camclk2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &camclk2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
},
@@ -345,8 +355,8 @@ static struct clk_rcg csi0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -359,7 +369,9 @@ static struct clk_branch csi0_clk = {
.enable_reg = 0x0040,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi0_clk",
.ops = &clk_branch_ops,
@@ -375,7 +387,9 @@ static struct clk_branch csi0_phy_clk = {
.enable_reg = 0x0040,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi0_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi0_phy_clk",
.ops = &clk_branch_ops,
@@ -409,8 +423,8 @@ static struct clk_rcg csi1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -423,7 +437,9 @@ static struct clk_branch csi1_clk = {
.enable_reg = 0x0024,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi1_clk",
.ops = &clk_branch_ops,
@@ -439,7 +455,9 @@ static struct clk_branch csi1_phy_clk = {
.enable_reg = 0x0024,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi1_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi1_phy_clk",
.ops = &clk_branch_ops,
@@ -473,8 +491,8 @@ static struct clk_rcg csi2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi2_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -487,7 +505,9 @@ static struct clk_branch csi2_clk = {
.enable_reg = 0x022c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi2_clk",
.ops = &clk_branch_ops,
@@ -503,7 +523,9 @@ static struct clk_branch csi2_phy_clk = {
.enable_reg = 0x022c,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "csi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &csi2_src.clkr.hw
+ },
.num_parents = 1,
.name = "csi2_phy_clk",
.ops = &clk_branch_ops,
@@ -602,10 +624,10 @@ static const struct clk_ops clk_ops_pix_rdi = {
.determine_rate = __clk_mux_determine_rate,
};
-static const char * const pix_rdi_parents[] = {
- "csi0_clk",
- "csi1_clk",
- "csi2_clk",
+static const struct clk_hw *pix_rdi_parents[] = {
+ &csi0_clk.clkr.hw,
+ &csi1_clk.clkr.hw,
+ &csi2_clk.clkr.hw,
};
static struct clk_pix_rdi csi_pix_clk = {
@@ -618,8 +640,8 @@ static struct clk_pix_rdi csi_pix_clk = {
.enable_mask = BIT(26),
.hw.init = &(struct clk_init_data){
.name = "csi_pix_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -635,8 +657,8 @@ static struct clk_pix_rdi csi_pix1_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "csi_pix1_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -652,8 +674,8 @@ static struct clk_pix_rdi csi_rdi_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -669,8 +691,8 @@ static struct clk_pix_rdi csi_rdi1_clk = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi1_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -686,8 +708,8 @@ static struct clk_pix_rdi csi_rdi2_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "csi_rdi2_clk",
- .parent_names = pix_rdi_parents,
- .num_parents = 3,
+ .parent_hws = pix_rdi_parents,
+ .num_parents = ARRAY_SIZE(pix_rdi_parents),
.ops = &clk_ops_pix_rdi,
},
},
@@ -725,15 +747,13 @@ static struct clk_rcg csiphytimer_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "csiphytimer_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
};
-static const char * const csixphy_timer_src[] = { "csiphytimer_src" };
-
static struct clk_branch csiphy0_timer_clk = {
.halt_reg = 0x01e8,
.halt_bit = 17,
@@ -741,7 +761,9 @@ static struct clk_branch csiphy0_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy0_timer_clk",
.ops = &clk_branch_ops,
@@ -757,7 +779,9 @@ static struct clk_branch csiphy1_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(9),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy1_timer_clk",
.ops = &clk_branch_ops,
@@ -773,7 +797,9 @@ static struct clk_branch csiphy2_timer_clk = {
.enable_reg = 0x0160,
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
- .parent_names = csixphy_timer_src,
+ .parent_hws = (const struct clk_hw*[]){
+ &csiphytimer_src.clkr.hw,
+ },
.num_parents = 1,
.name = "csiphy2_timer_clk",
.ops = &clk_branch_ops,
@@ -835,8 +861,8 @@ static struct clk_dyn_rcg gfx2d0_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -850,7 +876,9 @@ static struct clk_branch gfx2d0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_clk",
- .parent_names = (const char *[]){ "gfx2d0_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx2d0_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -895,8 +923,8 @@ static struct clk_dyn_rcg gfx2d1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -910,7 +938,9 @@ static struct clk_branch gfx2d1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_clk",
- .parent_names = (const char *[]){ "gfx2d1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx2d1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -996,8 +1026,8 @@ static struct clk_dyn_rcg gfx3d_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gfx3d_src",
- .parent_names = mmcc_pxo_pll8_pll2_pll3,
- .num_parents = 4,
+ .parent_data = mmcc_pxo_pll8_pll2_pll3,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2_pll3),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1005,8 +1035,8 @@ static struct clk_dyn_rcg gfx3d_src = {
static const struct clk_init_data gfx3d_8064_init = {
.name = "gfx3d_src",
- .parent_names = mmcc_pxo_pll8_pll2_pll15,
- .num_parents = 4,
+ .parent_data = mmcc_pxo_pll8_pll2_pll15,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2_pll15),
.ops = &clk_dyn_rcg_ops,
};
@@ -1018,7 +1048,9 @@ static struct clk_branch gfx3d_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk",
- .parent_names = (const char *[]){ "gfx3d_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1074,8 +1106,8 @@ static struct clk_dyn_rcg vcap_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vcap_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1089,7 +1121,9 @@ static struct clk_branch vcap_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vcap_clk",
- .parent_names = (const char *[]){ "vcap_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcap_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1105,7 +1139,9 @@ static struct clk_branch vcap_npl_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "vcap_npl_clk",
- .parent_names = (const char *[]){ "vcap_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcap_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1153,8 +1189,8 @@ static struct clk_rcg ijpeg_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "ijpeg_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1168,7 +1204,9 @@ static struct clk_branch ijpeg_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "ijpeg_clk",
- .parent_names = (const char *[]){ "ijpeg_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &ijpeg_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1201,8 +1239,8 @@ static struct clk_rcg jpegd_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "jpegd_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1216,7 +1254,9 @@ static struct clk_branch jpegd_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "jpegd_clk",
- .parent_names = (const char *[]){ "jpegd_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &jpegd_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1281,8 +1321,8 @@ static struct clk_dyn_rcg mdp_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "mdp_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1296,7 +1336,9 @@ static struct clk_branch mdp_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_clk",
- .parent_names = (const char *[]){ "mdp_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1311,7 +1353,9 @@ static struct clk_branch mdp_lut_clk = {
.enable_reg = 0x016c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "mdp_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &mdp_src.clkr.hw
+ },
.num_parents = 1,
.name = "mdp_lut_clk",
.ops = &clk_branch_ops,
@@ -1328,7 +1372,9 @@ static struct clk_branch mdp_vsync_clk = {
.enable_mask = BIT(6),
.hw.init = &(struct clk_init_data){
.name = "mdp_vsync_clk",
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.ops = &clk_branch_ops
},
@@ -1380,8 +1426,8 @@ static struct clk_dyn_rcg rot_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "rot_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1395,7 +1441,9 @@ static struct clk_branch rot_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "rot_clk",
- .parent_names = (const char *[]){ "rot_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &rot_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1408,9 +1456,9 @@ static const struct parent_map mmcc_pxo_hdmi_map[] = {
{ P_HDMI_PLL, 3 }
};
-static const char * const mmcc_pxo_hdmi[] = {
- "pxo",
- "hdmi_pll",
+static const struct clk_parent_data mmcc_pxo_hdmi[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
+ { .fw_name = "hdmipll", .name = "hdmi_pll" },
};
static struct freq_tbl clk_tbl_tv[] = {
@@ -1443,16 +1491,14 @@ static struct clk_rcg tv_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "tv_src",
- .parent_names = mmcc_pxo_hdmi,
- .num_parents = 2,
+ .parent_data = mmcc_pxo_hdmi,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_hdmi),
.ops = &clk_rcg_bypass_ops,
.flags = CLK_SET_RATE_PARENT,
},
},
};
-static const char * const tv_src_name[] = { "tv_src" };
-
static struct clk_branch tv_enc_clk = {
.halt_reg = 0x01d4,
.halt_bit = 9,
@@ -1460,7 +1506,9 @@ static struct clk_branch tv_enc_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(8),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "tv_enc_clk",
.ops = &clk_branch_ops,
@@ -1476,7 +1524,9 @@ static struct clk_branch tv_dac_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "tv_dac_clk",
.ops = &clk_branch_ops,
@@ -1492,7 +1542,9 @@ static struct clk_branch mdp_tv_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "mdp_tv_clk",
.ops = &clk_branch_ops,
@@ -1508,7 +1560,9 @@ static struct clk_branch hdmi_tv_clk = {
.enable_reg = 0x00ec,
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "hdmi_tv_clk",
.ops = &clk_branch_ops,
@@ -1524,7 +1578,9 @@ static struct clk_branch rgb_tv_clk = {
.enable_reg = 0x0124,
.enable_mask = BIT(14),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "rgb_tv_clk",
.ops = &clk_branch_ops,
@@ -1540,7 +1596,9 @@ static struct clk_branch npl_tv_clk = {
.enable_reg = 0x0124,
.enable_mask = BIT(16),
.hw.init = &(struct clk_init_data){
- .parent_names = tv_src_name,
+ .parent_hws = (const struct clk_hw*[]){
+ &tv_src.clkr.hw,
+ },
.num_parents = 1,
.name = "npl_tv_clk",
.ops = &clk_branch_ops,
@@ -1556,7 +1614,9 @@ static struct clk_branch hdmi_app_clk = {
.enable_reg = 0x005c,
.enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "pxo" },
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "pxo", .name = "pxo_board" },
+ },
.num_parents = 1,
.name = "hdmi_app_clk",
.ops = &clk_branch_ops,
@@ -1614,8 +1674,8 @@ static struct clk_dyn_rcg vcodec_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vcodec_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_dyn_rcg_ops,
},
},
@@ -1629,7 +1689,9 @@ static struct clk_branch vcodec_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vcodec_clk",
- .parent_names = (const char *[]){ "vcodec_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vcodec_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1665,8 +1727,8 @@ static struct clk_rcg vpe_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vpe_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1680,7 +1742,9 @@ static struct clk_branch vpe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vpe_clk",
- .parent_names = (const char *[]){ "vpe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vpe_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1733,8 +1797,8 @@ static struct clk_rcg vfe_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "vfe_src",
- .parent_names = mmcc_pxo_pll8_pll2,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_pll8_pll2,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_pll8_pll2),
.ops = &clk_rcg_ops,
},
},
@@ -1748,7 +1812,9 @@ static struct clk_branch vfe_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "vfe_clk",
- .parent_names = (const char *[]){ "vfe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -1763,7 +1829,9 @@ static struct clk_branch vfe_csi_clk = {
.enable_reg = 0x0104,
.enable_mask = BIT(12),
.hw.init = &(struct clk_init_data){
- .parent_names = (const char *[]){ "vfe_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &vfe_src.clkr.hw
+ },
.num_parents = 1,
.name = "vfe_csi_clk",
.ops = &clk_branch_ops,
@@ -2067,8 +2135,8 @@ static struct clk_rcg dsi1_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2083,7 +2151,9 @@ static struct clk_branch dsi1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_clk",
- .parent_names = (const char *[]){ "dsi1_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2115,8 +2185,8 @@ static struct clk_rcg dsi2_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2131,7 +2201,9 @@ static struct clk_branch dsi2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_clk",
- .parent_names = (const char *[]){ "dsi2_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2154,8 +2226,8 @@ static struct clk_rcg dsi1_byte_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_byte_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2170,7 +2242,9 @@ static struct clk_branch dsi1_byte_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_byte_clk",
- .parent_names = (const char *[]){ "dsi1_byte_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_byte_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2193,8 +2267,8 @@ static struct clk_rcg dsi2_byte_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_byte_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_bypass2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -2209,7 +2283,9 @@ static struct clk_branch dsi2_byte_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_byte_clk",
- .parent_names = (const char *[]){ "dsi2_byte_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_byte_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2232,8 +2308,8 @@ static struct clk_rcg dsi1_esc_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_esc_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_esc_ops,
},
},
@@ -2247,7 +2323,9 @@ static struct clk_branch dsi1_esc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi1_esc_clk",
- .parent_names = (const char *[]){ "dsi1_esc_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_esc_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2270,8 +2348,8 @@ static struct clk_rcg dsi2_esc_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_esc_src",
- .parent_names = mmcc_pxo_dsi1_dsi2_byte,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi1_dsi2_byte,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi1_dsi2_byte),
.ops = &clk_rcg_esc_ops,
},
},
@@ -2285,7 +2363,9 @@ static struct clk_branch dsi2_esc_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "dsi2_esc_clk",
- .parent_names = (const char *[]){ "dsi2_esc_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_esc_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2317,8 +2397,8 @@ static struct clk_rcg dsi1_pixel_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi1_pixel_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_pixel_ops,
},
},
@@ -2332,7 +2412,9 @@ static struct clk_branch dsi1_pixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk1_clk",
- .parent_names = (const char *[]){ "dsi1_pixel_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi1_pixel_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
@@ -2364,8 +2446,8 @@ static struct clk_rcg dsi2_pixel_src = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "dsi2_pixel_src",
- .parent_names = mmcc_pxo_dsi2_dsi1,
- .num_parents = 3,
+ .parent_data = mmcc_pxo_dsi2_dsi1,
+ .num_parents = ARRAY_SIZE(mmcc_pxo_dsi2_dsi1),
.ops = &clk_rcg_pixel_ops,
},
},
@@ -2379,7 +2461,9 @@ static struct clk_branch dsi2_pixel_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "mdp_pclk2_clk",
- .parent_names = (const char *[]){ "dsi2_pixel_src" },
+ .parent_hws = (const struct clk_hw*[]){
+ &dsi2_pixel_src.clkr.hw
+ },
.num_parents = 1,
.ops = &clk_branch_ops,
.flags = CLK_SET_RATE_PARENT,
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 819d194be8f7..2a16adb572d2 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -13,8 +13,10 @@
static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
+ struct qcom_reset_controller *rst = to_qcom_reset_controller(rcdev);
+
rcdev->ops->assert(rcdev, id);
- udelay(1);
+ udelay(rst->reset_map[id].udelay ?: 1); /* use 1 us as default */
rcdev->ops->deassert(rcdev, id);
return 0;
}
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 2a08b5e282c7..b8c113582072 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -11,6 +11,7 @@
struct qcom_reset_map {
unsigned int reg;
u8 bit;
+ u8 udelay;
};
struct regmap;
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index cd80b6084ece..4baf355e26d8 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -108,7 +108,13 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
DEF_FIXED("cbfusa", R8A779F0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
- DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_FIXED("sasyncrt", R8A779F0_CLK_SASYNCRT, CLK_PLL5_DIV4, 48, 1),
+ DEF_FIXED("sasyncperd1", R8A779F0_CLK_SASYNCPERD1, CLK_PLL5_DIV4, 3, 1),
+ DEF_FIXED("sasyncperd2", R8A779F0_CLK_SASYNCPERD2, R8A779F0_CLK_SASYNCPERD1, 2, 1),
+ DEF_FIXED("sasyncperd4", R8A779F0_CLK_SASYNCPERD4, R8A779F0_CLK_SASYNCPERD1, 4, 1),
+
+ DEF_GEN4_SDH("sdh0", R8A779F0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, R8A779F0_CLK_SD0H, 0x870),
DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
@@ -130,6 +136,10 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("i2c3", 521, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c4", 522, R8A779F0_CLK_S0D6_PER),
DEF_MOD("i2c5", 523, R8A779F0_CLK_S0D6_PER),
+ DEF_MOD("msiof0", 618, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof1", 619, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof2", 620, R8A779F0_CLK_MSO),
+ DEF_MOD("msiof3", 621, R8A779F0_CLK_MSO),
DEF_MOD("pcie0", 624, R8A779F0_CLK_S0D2),
DEF_MOD("pcie1", 625, R8A779F0_CLK_S0D2),
DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
@@ -139,7 +149,16 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("sdhi0", 706, R8A779F0_CLK_SD0),
DEF_MOD("sys-dmac0", 709, R8A779F0_CLK_S0D3_PER),
DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER),
+ DEF_MOD("tmu0", 713, R8A779F0_CLK_SASYNCRT),
+ DEF_MOD("tmu1", 714, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu2", 715, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu3", 716, R8A779F0_CLK_SASYNCPERD2),
+ DEF_MOD("tmu4", 717, R8A779F0_CLK_SASYNCPERD2),
DEF_MOD("wdt", 907, R8A779F0_CLK_R),
+ DEF_MOD("cmt0", 910, R8A779F0_CLK_R),
+ DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
+ DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
+ DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC),
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 3fc4233b1ead..9641122133b5 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -150,10 +150,24 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
+ DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC),
+ DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC),
+ DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC),
DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER),
DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER),
+ DEF_MOD("i2c0", 518, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c1", 519, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c2", 520, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c3", 521, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c4", 522, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("i2c5", 523, R8A779G0_CLK_S0D6_PER),
+ DEF_MOD("wdt1:wdt0", 907, R8A779G0_CLK_R),
+ DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
};
/*
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index fd7c4eecd398..02a4fc41bb6e 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -414,6 +414,7 @@ static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A07G044_DMAC_ACLK,
};
+#ifdef CONFIG_CLK_R9A07G044
const struct rzg2l_cpg_info r9a07g044_cpg_info = {
/* Core Clocks */
.core_clks = core_clks.common,
@@ -436,6 +437,7 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = {
.has_clk_mon_regs = true,
};
+#endif
#ifdef CONFIG_CLK_R9A07G054
const struct rzg2l_cpg_info r9a07g054_cpg_info = {
diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c
index b21915cf6648..fbef1b35d254 100644
--- a/drivers/clk/renesas/r9a09g011-cpg.c
+++ b/drivers/clk/renesas/r9a09g011-cpg.c
@@ -132,6 +132,8 @@ static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = {
DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8),
DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9),
DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12),
+ DEF_MOD("iic_pclk0", R9A09G011_IIC_PCLK0, CLK_SEL_E, 0x420, 12),
+ DEF_MOD("iic_pclk1", R9A09G011_IIC_PCLK1, CLK_SEL_E, 0x424, 12),
DEF_MOD("wdt0_pclk", R9A09G011_WDT0_PCLK, CLK_SEL_E, 0x428, 12),
DEF_MOD("wdt0_clk", R9A09G011_WDT0_CLK, CLK_MAIN, 0x428, 13),
DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4),
@@ -143,6 +145,8 @@ static const struct rzg2l_reset r9a09g011_resets[] = {
DEF_RST(R9A09G011_PFC_PRESETN, 0x600, 2),
DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11),
DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13),
+ DEF_RST(R9A09G011_IIC_GPA_PRESETN, 0x614, 8),
+ DEF_RST(R9A09G011_IIC_GPB_PRESETN, 0x614, 9),
DEF_RST_MON(R9A09G011_WDT0_PRESETN, 0x614, 12, 19),
};
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 3067bdb6e119..345a5d2a457c 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -23,6 +23,13 @@ config CLK_RV110X
help
Build the driver for RV110x Clock Driver.
+config CLK_RV1126
+ bool "Rockchip RV1126 clock controller support"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Build the driver for RV1126 Clock Driver.
+
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
depends on ARM || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 2b78f1247372..e8543876c056 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -17,6 +17,7 @@ clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
obj-$(CONFIG_CLK_RV110X) += clk-rv1108.o
+obj-$(CONFIG_CLK_RV1126) += clk-rv1126.o
obj-$(CONFIG_CLK_RK3036) += clk-rk3036.o
obj-$(CONFIG_CLK_RK312X) += clk-rk3128.o
obj-$(CONFIG_CLK_RK3188) += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-rv1126.c b/drivers/clk/rockchip/clk-rv1126.c
new file mode 100644
index 000000000000..c18790f5d05b
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rv1126.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rv1126-cru.h>
+#include "clk.h"
+
+#define RV1126_GMAC_CON 0x460
+#define RV1126_GRF_IOFUNC_CON1 0x10264
+#define RV1126_GRF_SOC_STATUS0 0x10
+
+#define RV1126_FRAC_MAX_PRATE 1200000000
+#define RV1126_CSIOUT_FRAC_MAX_PRATE 300000000
+
+enum rv1126_pmu_plls {
+ gpll,
+};
+
+enum rv1126_plls {
+ apll, dpll, cpll, hpll,
+};
+
+static struct rockchip_pll_rate_table rv1126_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1600000000, 3, 200, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 132, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 130, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 128, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 126, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 124, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 122, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 120, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 118, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1400000000, 3, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 116, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 114, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 112, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 110, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 108, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 106, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 104, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 92, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 3, 275, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 1, 75, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 3, 350, 4, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 116, 4, 1, 1, 0),
+ RK3036_PLL_RATE(624000000, 1, 104, 4, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 100, 4, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 84, 4, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 1, 125, 6, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 96, 6, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RV1126_DIV_ACLK_CORE_MASK 0xf
+#define RV1126_DIV_ACLK_CORE_SHIFT 4
+#define RV1126_DIV_PCLK_DBG_MASK 0x7
+#define RV1126_DIV_PCLK_DBG_SHIFT 0
+
+#define RV1126_CLKSEL1(_aclk_core, _pclk_dbg) \
+{ \
+ .reg = RV1126_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_aclk_core, RV1126_DIV_ACLK_CORE_MASK, \
+ RV1126_DIV_ACLK_CORE_SHIFT) | \
+ HIWORD_UPDATE(_pclk_dbg, RV1126_DIV_PCLK_DBG_MASK, \
+ RV1126_DIV_PCLK_DBG_SHIFT), \
+}
+
+#define RV1126_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RV1126_CLKSEL1(_aclk_core, _pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rv1126_cpuclk_rates[] __initdata = {
+ RV1126_CPUCLK_RATE(1608000000, 1, 7),
+ RV1126_CPUCLK_RATE(1584000000, 1, 7),
+ RV1126_CPUCLK_RATE(1560000000, 1, 7),
+ RV1126_CPUCLK_RATE(1536000000, 1, 7),
+ RV1126_CPUCLK_RATE(1512000000, 1, 7),
+ RV1126_CPUCLK_RATE(1488000000, 1, 5),
+ RV1126_CPUCLK_RATE(1464000000, 1, 5),
+ RV1126_CPUCLK_RATE(1440000000, 1, 5),
+ RV1126_CPUCLK_RATE(1416000000, 1, 5),
+ RV1126_CPUCLK_RATE(1392000000, 1, 5),
+ RV1126_CPUCLK_RATE(1368000000, 1, 5),
+ RV1126_CPUCLK_RATE(1344000000, 1, 5),
+ RV1126_CPUCLK_RATE(1320000000, 1, 5),
+ RV1126_CPUCLK_RATE(1296000000, 1, 5),
+ RV1126_CPUCLK_RATE(1272000000, 1, 5),
+ RV1126_CPUCLK_RATE(1248000000, 1, 5),
+ RV1126_CPUCLK_RATE(1224000000, 1, 5),
+ RV1126_CPUCLK_RATE(1200000000, 1, 5),
+ RV1126_CPUCLK_RATE(1104000000, 1, 5),
+ RV1126_CPUCLK_RATE(1008000000, 1, 5),
+ RV1126_CPUCLK_RATE(912000000, 1, 5),
+ RV1126_CPUCLK_RATE(816000000, 1, 3),
+ RV1126_CPUCLK_RATE(696000000, 1, 3),
+ RV1126_CPUCLK_RATE(600000000, 1, 3),
+ RV1126_CPUCLK_RATE(408000000, 1, 1),
+ RV1126_CPUCLK_RATE(312000000, 1, 1),
+ RV1126_CPUCLK_RATE(216000000, 1, 1),
+ RV1126_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data rv1126_cpuclk_data = {
+ .core_reg[0] = RV1126_CLKSEL_CON(0),
+ .div_core_shift[0] = 0,
+ .div_core_mask[0] = 0x1f,
+ .num_cores = 1,
+ .mux_core_alt = 0,
+ .mux_core_main = 2,
+ .mux_core_shift = 6,
+ .mux_core_mask = 0x3,
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_rtc32k_p) = { "clk_pmupvtm_divout", "xin32k", "clk_osc0_div32k" };
+PNAME(mux_wifi_p) = { "clk_wifi_osc0", "clk_wifi_div" };
+PNAME(mux_gpll_usb480m_cpll_xin24m_p) = { "gpll", "usb480m", "cpll", "xin24m" };
+PNAME(mux_uart1_p) = { "sclk_uart1_div", "sclk_uart1_fracdiv", "xin24m" };
+PNAME(mux_xin24m_gpll_p) = { "xin24m", "gpll" };
+PNAME(mux_gpll_xin24m_p) = { "gpll", "xin24m" };
+PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" };
+PNAME(mux_usbphy_otg_ref_p) = { "clk_ref12m", "xin_osc0_div2_usbphyref_otg" };
+PNAME(mux_usbphy_host_ref_p) = { "clk_ref12m", "xin_osc0_div2_usbphyref_host" };
+PNAME(mux_mipidsiphy_ref_p) = { "clk_ref24m", "xin_osc0_mipiphyref" };
+PNAME(mux_usb480m_p) = { "xin24m", "usb480m_phy", "clk_rtc32k" };
+PNAME(mux_armclk_p) = { "gpll", "cpll", "apll" };
+PNAME(mux_gpll_cpll_dpll_p) = { "gpll", "cpll", "dummy_dpll" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_hclk_pclk_pdbus_p) = { "gpll", "dummy_cpll" };
+PNAME(mux_gpll_cpll_usb480m_xin24m_p) = { "gpll", "cpll", "usb480m", "xin24m" };
+PNAME(mux_uart0_p) = { "sclk_uart0_div", "sclk_uart0_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "sclk_uart2_div", "sclk_uart2_frac", "xin24m" };
+PNAME(mux_uart3_p) = { "sclk_uart3_div", "sclk_uart3_frac", "xin24m" };
+PNAME(mux_uart4_p) = { "sclk_uart4_div", "sclk_uart4_frac", "xin24m" };
+PNAME(mux_uart5_p) = { "sclk_uart5_div", "sclk_uart5_frac", "xin24m" };
+PNAME(mux_cpll_gpll_p) = { "cpll", "gpll" };
+PNAME(mux_i2s0_tx_p) = { "mclk_i2s0_tx_div", "mclk_i2s0_tx_fracdiv", "i2s0_mclkin", "xin12m" };
+PNAME(mux_i2s0_rx_p) = { "mclk_i2s0_rx_div", "mclk_i2s0_rx_fracdiv", "i2s0_mclkin", "xin12m" };
+PNAME(mux_i2s0_tx_out2io_p) = { "mclk_i2s0_tx", "xin12m" };
+PNAME(mux_i2s0_rx_out2io_p) = { "mclk_i2s0_rx", "xin12m" };
+PNAME(mux_i2s1_p) = { "mclk_i2s1_div", "mclk_i2s1_fracdiv", "i2s1_mclkin", "xin12m" };
+PNAME(mux_i2s1_out2io_p) = { "mclk_i2s1", "xin12m" };
+PNAME(mux_i2s2_p) = { "mclk_i2s2_div", "mclk_i2s2_fracdiv", "i2s2_mclkin", "xin12m" };
+PNAME(mux_i2s2_out2io_p) = { "mclk_i2s2", "xin12m" };
+PNAME(mux_gpll_cpll_xin24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_audpwm_p) = { "sclk_audpwm_div", "sclk_audpwm_fracdiv", "xin24m" };
+PNAME(mux_usb480m_gpll_p) = { "usb480m", "gpll" };
+PNAME(clk_gmac_src_m0_p) = { "clk_gmac_div", "clk_gmac_rgmii_m0" };
+PNAME(clk_gmac_src_m1_p) = { "clk_gmac_div", "clk_gmac_rgmii_m1" };
+PNAME(mux_clk_gmac_src_p) = { "clk_gmac_src_m0", "clk_gmac_src_m1" };
+PNAME(mux_rgmii_clk_p) = { "clk_gmac_tx_div50", "clk_gmac_tx_div5", "clk_gmac_tx_src", "clk_gmac_tx_src"};
+PNAME(mux_rmii_clk_p) = { "clk_gmac_rx_div20", "clk_gmac_rx_div2" };
+PNAME(mux_gmac_tx_rx_p) = { "rgmii_mode_clk", "rmii_mode_clk" };
+PNAME(mux_dpll_gpll_p) = { "dpll", "gpll" };
+
+static u32 rgmii_mux_idx[] = { 2, 3, 0, 1 };
+
+static struct rockchip_pll_clock rv1126_pmu_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ 0, RV1126_PMU_PLL_CON(0),
+ RV1126_PMU_MODE, 0, 3, 0, rv1126_pll_rates),
+};
+
+static struct rockchip_pll_clock rv1126_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3328, PLL_APLL, "apll", mux_pll_p,
+ 0, RV1126_PLL_CON(0),
+ RV1126_MODE_CON, 0, 0, 0, rv1126_pll_rates),
+ [dpll] = PLL(pll_rk3328, PLL_DPLL, "dpll", mux_pll_p,
+ 0, RV1126_PLL_CON(8),
+ RV1126_MODE_CON, 2, 1, 0, NULL),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ 0, RV1126_PLL_CON(16),
+ RV1126_MODE_CON, 4, 2, 0, rv1126_pll_rates),
+ [hpll] = PLL(pll_rk3328, PLL_HPLL, "hpll", mux_pll_p,
+ 0, RV1126_PLL_CON(24),
+ RV1126_MODE_CON, 6, 4, 0, rv1126_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rv1126_rtc32k_fracmux __initdata =
+ MUX(CLK_RTC32K, "clk_rtc32k", mux_rtc32k_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(0), 7, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart1_fracmux __initdata =
+ MUX(SCLK_UART1_MUX, "sclk_uart1_mux", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(4), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart0_fracmux __initdata =
+ MUX(SCLK_UART0_MUX, "sclk_uart0_mux", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(10), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart2_fracmux __initdata =
+ MUX(SCLK_UART2_MUX, "sclk_uart2_mux", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(12), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart3_fracmux __initdata =
+ MUX(SCLK_UART3_MUX, "sclk_uart3_mux", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(14), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart4_fracmux __initdata =
+ MUX(SCLK_UART4_MUX, "sclk_uart4_mux", mux_uart4_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(16), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_uart5_fracmux __initdata =
+ MUX(SCLK_UART5_MUX, "sclk_uart5_mux", mux_uart5_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(18), 10, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s0_tx_fracmux __initdata =
+ MUX(MCLK_I2S0_TX_MUX, "mclk_i2s0_tx_mux", mux_i2s0_tx_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(30), 0, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s0_rx_fracmux __initdata =
+ MUX(MCLK_I2S0_RX_MUX, "mclk_i2s0_rx_mux", mux_i2s0_rx_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(30), 2, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s1_fracmux __initdata =
+ MUX(MCLK_I2S1_MUX, "mclk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(31), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_i2s2_fracmux __initdata =
+ MUX(MCLK_I2S2_MUX, "mclk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(33), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_audpwm_fracmux __initdata =
+ MUX(SCLK_AUDPWM_MUX, "mclk_audpwm_mux", mux_audpwm_p, CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(36), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126_clk_pmu_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 2
+ */
+ /* PD_PMU */
+ COMPOSITE_NOMUX(PCLK_PDPMU, "pclk_pdpmu", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 0, GFLAGS),
+
+ COMPOSITE_FRACMUX(CLK_OSC0_DIV32K, "clk_osc0_div32k", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKSEL_CON(13), 0,
+ RV1126_PMU_CLKGATE_CON(2), 9, GFLAGS,
+ &rv1126_rtc32k_fracmux),
+
+ COMPOSITE_NOMUX(CLK_WIFI_DIV, "clk_wifi_div", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(12), 0, 6, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(2), 10, GFLAGS),
+ GATE(CLK_WIFI_OSC0, "clk_wifi_osc0", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 11, GFLAGS),
+ MUX(CLK_WIFI, "clk_wifi", mux_wifi_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(12), 8, 1, MFLAGS),
+
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 1, GFLAGS),
+
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE(SCLK_UART1_DIV, "sclk_uart1_div", mux_gpll_usb480m_cpll_xin24m_p, 0,
+ RV1126_PMU_CLKSEL_CON(4), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART1_FRACDIV, "sclk_uart1_fracdiv", "sclk_uart1_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(5), 0,
+ RV1126_PMU_CLKGATE_CON(0), 13, GFLAGS,
+ &rv1126_uart1_fracmux),
+ GATE(SCLK_UART1, "sclk_uart1", "sclk_uart1_mux", 0,
+ RV1126_PMU_CLKGATE_CON(0), 14, GFLAGS),
+
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C0, "clk_i2c0", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(2), 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C2, "clk_i2c2", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(3), 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(0), 10, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM0, "clk_capture_pwm0", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PWM0, "clk_pwm0", mux_xin24m_gpll_p, 0,
+ RV1126_PMU_CLKSEL_CON(6), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_CAPTURE_PWM1, "clk_capture_pwm1", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", mux_xin24m_gpll_p, 0,
+ RV1126_PMU_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 4, GFLAGS),
+
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE(CLK_SPI0, "clk_spi0", mux_gpll_xin24m_p, 0,
+ RV1126_PMU_CLKSEL_CON(9), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 12, GFLAGS),
+
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", mux_xin24m_32k_p, 0,
+ RV1126_PMU_CLKSEL_CON(8), 15, 1, MFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 10, GFLAGS),
+
+ GATE(PCLK_PMUPVTM, "pclk_pmupvtm", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(2), 6, GFLAGS),
+ GATE(CLK_PMUPVTM, "clk_pmupvtm", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(CLK_CORE_PMUPVTM, "clk_core_pmupvtm", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(2), 7, GFLAGS),
+
+ COMPOSITE_NOMUX(CLK_REF12M, "clk_ref12m", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(7), 8, 7, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 15, GFLAGS),
+ GATE(0, "xin_osc0_usbphyref_otg", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(0, "xin_osc0_usbphyref_host", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ FACTOR(0, "xin_osc0_div2_usbphyref_otg", "xin_osc0_usbphyref_otg", 0, 1, 2),
+ FACTOR(0, "xin_osc0_div2_usbphyref_host", "xin_osc0_usbphyref_host", 0, 1, 2),
+ MUX(CLK_USBPHY_OTG_REF, "clk_usbphy_otg_ref", mux_usbphy_otg_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 6, 1, MFLAGS),
+ MUX(CLK_USBPHY_HOST_REF, "clk_usbphy_host_ref", mux_usbphy_host_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 7, 1, MFLAGS),
+
+ COMPOSITE_NOMUX(CLK_REF24M, "clk_ref24m", "gpll", 0,
+ RV1126_PMU_CLKSEL_CON(7), 0, 6, DFLAGS,
+ RV1126_PMU_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(0, "xin_osc0_mipiphyref", "xin24m", 0,
+ RV1126_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ MUX(CLK_MIPIDSIPHY_REF, "clk_mipidsiphy_ref", mux_mipidsiphy_ref_p, CLK_SET_RATE_PARENT,
+ RV1126_PMU_CLKSEL_CON(7), 15, 1, MFLAGS),
+
+ GATE(CLK_PMU, "clk_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 15, GFLAGS),
+
+ GATE(PCLK_PMUSGRF, "pclk_pmusgrf", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_PMUCRU, "pclk_pmucru", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_CHIPVEROTP, "pclk_chipverotp", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(PCLK_PDPMU_NIU, "pclk_pdpmu_niu", "pclk_pdpmu", CLK_IGNORE_UNUSED,
+ RV1126_PMU_CLKGATE_CON(0), 2, GFLAGS),
+
+ GATE(PCLK_SCRKEYGEN, "pclk_scrkeygen", "pclk_pdpmu", 0,
+ RV1126_PMU_CLKGATE_CON(0), 7, GFLAGS),
+};
+
+static struct rockchip_clk_branch rv1126_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+ MUX(USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RV1126_MODE_CON, 10, 2, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(1), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RV1126_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(CLK_CORE_CPUPVTM, "clk_core_cpupvtm", "armclk", 0,
+ RV1126_CLKGATE_CON(0), 12, GFLAGS),
+ GATE(PCLK_CPUPVTM, "pclk_cpupvtm", "pclk_dbg", 0,
+ RV1126_CLKGATE_CON(0), 10, GFLAGS),
+ GATE(CLK_CPUPVTM, "clk_cpupvtm", "xin24m", 0,
+ RV1126_CLKGATE_CON(0), 11, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PDCORE_NIU, "hclk_pdcore_niu", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(0), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(0), 8, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ /* PD_BUS */
+ COMPOSITE(0, "aclk_pdbus_pre", mux_gpll_cpll_dpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(2), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_PDBUS, "aclk_pdbus", "aclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE(0, "hclk_pdbus_pre", mux_hclk_pclk_pdbus_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(2), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(HCLK_PDBUS, "hclk_pdbus", "hclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE(0, "pclk_pdbus_pre", mux_hclk_pclk_pdbus_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(3), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(PCLK_PDBUS, "pclk_pdbus", "pclk_pdbus_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 13, GFLAGS),
+ /* aclk_dmac is controlled by sgrf_clkgat_con. */
+ SGRF_GATE(ACLK_DMAC, "aclk_dmac", "hclk_pdbus"),
+ GATE(ACLK_DCF, "aclk_dcf", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 6, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 10, GFLAGS),
+
+ COMPOSITE(CLK_SCR1, "clk_scr1", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(3), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(0, "clk_scr1_niu", "clk_scr1", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 14, GFLAGS),
+ GATE(CLK_SCR1_CORE, "clk_scr1_core", "clk_scr1", 0,
+ RV1126_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(CLK_SCR1_RTC, "clk_scr1_rtc", "xin24m", 0,
+ RV1126_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(CLK_SCR1_JTAG, "clk_scr1_jtag", "clk_scr1_jtag_io", 0,
+ RV1126_CLKGATE_CON(4), 10, GFLAGS),
+
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE(SCLK_UART0_DIV, "sclk_uart0_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(10), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART0_FRAC, "sclk_uart0_frac", "sclk_uart0_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(11), 0,
+ RV1126_CLKGATE_CON(5), 2, GFLAGS,
+ &rv1126_uart0_fracmux),
+ GATE(SCLK_UART0, "sclk_uart0", "sclk_uart0_mux", 0,
+ RV1126_CLKGATE_CON(5), 3, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 4, GFLAGS),
+ COMPOSITE(SCLK_UART2_DIV, "sclk_uart2_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(12), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 5, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART2_FRAC, "sclk_uart2_frac", "sclk_uart2_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(13), 0,
+ RV1126_CLKGATE_CON(5), 6, GFLAGS,
+ &rv1126_uart2_fracmux),
+ GATE(SCLK_UART2, "sclk_uart2", "sclk_uart2_mux", 0,
+ RV1126_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 8, GFLAGS),
+ COMPOSITE(SCLK_UART3_DIV, "sclk_uart3_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(14), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(5), 9, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART3_FRAC, "sclk_uart3_frac", "sclk_uart3_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(15), 0,
+ RV1126_CLKGATE_CON(5), 10, GFLAGS,
+ &rv1126_uart3_fracmux),
+ GATE(SCLK_UART3, "sclk_uart3", "sclk_uart3_mux", 0,
+ RV1126_CLKGATE_CON(5), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(5), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4_DIV, "sclk_uart4_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(16), 8, 2, MFLAGS, 0, 7,
+ DFLAGS, RV1126_CLKGATE_CON(5), 13, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART4_FRAC, "sclk_uart4_frac", "sclk_uart4_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(17), 0,
+ RV1126_CLKGATE_CON(5), 14, GFLAGS,
+ &rv1126_uart4_fracmux),
+ GATE(SCLK_UART4, "sclk_uart4", "sclk_uart4_mux", 0,
+ RV1126_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE(SCLK_UART5_DIV, "sclk_uart5_div", mux_gpll_cpll_usb480m_xin24m_p, 0,
+ RV1126_CLKSEL_CON(18), 8, 2, MFLAGS, 0, 7,
+ DFLAGS, RV1126_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_UART5_FRAC, "sclk_uart5_frac", "sclk_uart5_div", CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(19), 0,
+ RV1126_CLKGATE_CON(6), 2, GFLAGS,
+ &rv1126_uart5_fracmux),
+ GATE(SCLK_UART5, "sclk_uart5", "sclk_uart5_mux", 0,
+ RV1126_CLKGATE_CON(6), 3, GFLAGS),
+
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C1, "clk_i2c1", "gpll", 0,
+ RV1126_CLKSEL_CON(5), 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C3, "clk_i2c3", "gpll", 0,
+ RV1126_CLKSEL_CON(5), 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(3), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C4, "clk_i2c4", "gpll", 0,
+ RV1126_CLKSEL_CON(6), 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_I2C5, "clk_i2c5", "gpll", 0,
+ RV1126_CLKSEL_CON(6), 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 1, GFLAGS),
+
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CLK_SPI1, "clk_spi1", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(8), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 3, GFLAGS),
+
+ GATE(CLK_CAPTURE_PWM2, "clk_capture_pwm2", "xin24m", 0,
+ RV1126_CLKGATE_CON(4), 6, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(4), 4, GFLAGS),
+ COMPOSITE(CLK_PWM2, "clk_pwm2", mux_xin24m_gpll_p, 0,
+ RV1126_CLKSEL_CON(9), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(4), 5, GFLAGS),
+
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 0, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO1, "dbclk_gpio1", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(21), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 2, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO2, "dbclk_gpio2", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(22), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 4, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO3, "dbclk_gpio3", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(23), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO4, "dbclk_gpio4", mux_xin24m_32k_p, 0,
+ RV1126_CLKSEL_CON(24), 15, 1, MFLAGS,
+ RV1126_CLKGATE_CON(7), 7, GFLAGS),
+
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_SARADC, "clk_saradc", "xin24m", 0,
+ RV1126_CLKSEL_CON(20), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(6), 5, GFLAGS),
+
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 7, GFLAGS),
+ GATE(CLK_TIMER0, "clk_timer0", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(CLK_TIMER1, "clk_timer1", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(CLK_TIMER2, "clk_timer2", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(CLK_TIMER3, "clk_timer3", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(CLK_TIMER4, "clk_timer4", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "xin24m", 0,
+ RV1126_CLKGATE_CON(6), 13, GFLAGS),
+
+ GATE(ACLK_SPINLOCK, "aclk_spinlock", "hclk_pdbus", 0,
+ RV1126_CLKGATE_CON(6), 6, GFLAGS),
+
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE(DCLK_DECOM, "dclk_decom", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(25), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(7), 13, GFLAGS),
+
+ GATE(PCLK_CAN, "pclk_can", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CLK_CAN, "clk_can", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(7), 9, GFLAGS),
+ /* pclk_otp and clk_otp are controlled by sgrf_clkgat_con. */
+ SGRF_GATE(CLK_OTP, "clk_otp", "xin24m"),
+ SGRF_GATE(PCLK_OTP, "pclk_otp", "pclk_pdbus"),
+
+ GATE(PCLK_NPU_TSADC, "pclk_npu_tsadc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(24), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_NPU_TSADC, "clk_npu_tsadc", "xin24m", 0,
+ RV1126_CLKSEL_CON(71), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(24), 4, GFLAGS),
+ GATE(CLK_NPU_TSADCPHY, "clk_npu_tsadcphy", "clk_npu_tsadc", 0,
+ RV1126_CLKGATE_CON(24), 5, GFLAGS),
+ GATE(PCLK_CPU_TSADC, "pclk_cpu_tsadc", "pclk_pdbus", 0,
+ RV1126_CLKGATE_CON(24), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPU_TSADC, "clk_cpu_tsadc", "xin24m", 0,
+ RV1126_CLKSEL_CON(70), 0, 11, DFLAGS,
+ RV1126_CLKGATE_CON(24), 1, GFLAGS),
+ GATE(CLK_CPU_TSADCPHY, "clk_cpu_tsadcphy", "clk_cpu_tsadc", 0,
+ RV1126_CLKGATE_CON(24), 2, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+ /* PD_AUDIO */
+ COMPOSITE_NOMUX(HCLK_PDAUDIO, "hclk_pdaudio", "gpll", 0,
+ RV1126_CLKSEL_CON(26), 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(9), 0, GFLAGS),
+
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE(MCLK_I2S0_TX_DIV, "mclk_i2s0_tx_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(27), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(9), 5, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S0_TX_FRACDIV, "mclk_i2s0_tx_fracdiv", "mclk_i2s0_tx_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(28), 0,
+ RV1126_CLKGATE_CON(9), 6, GFLAGS,
+ &rv1126_i2s0_tx_fracmux),
+ GATE(MCLK_I2S0_TX, "mclk_i2s0_tx", "mclk_i2s0_tx_mux", 0,
+ RV1126_CLKGATE_CON(9), 9, GFLAGS),
+ COMPOSITE(MCLK_I2S0_RX_DIV, "mclk_i2s0_rx_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(27), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RV1126_CLKGATE_CON(9), 7, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S0_RX_FRACDIV, "mclk_i2s0_rx_fracdiv", "mclk_i2s0_rx_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(29), 0,
+ RV1126_CLKGATE_CON(9), 8, GFLAGS,
+ &rv1126_i2s0_rx_fracmux),
+ GATE(MCLK_I2S0_RX, "mclk_i2s0_rx", "mclk_i2s0_rx_mux", 0,
+ RV1126_CLKGATE_CON(9), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S0_TX_OUT2IO, "mclk_i2s0_tx_out2io", mux_i2s0_tx_out2io_p, 0,
+ RV1126_CLKSEL_CON(30), 6, 1, MFLAGS,
+ RV1126_CLKGATE_CON(9), 13, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S0_RX_OUT2IO, "mclk_i2s0_rx_out2io", mux_i2s0_rx_out2io_p, 0,
+ RV1126_CLKSEL_CON(30), 8, 1, MFLAGS,
+ RV1126_CLKGATE_CON(9), 14, GFLAGS),
+
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 0, GFLAGS),
+ COMPOSITE(MCLK_I2S1_DIV, "mclk_i2s1_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 1, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S1_FRACDIV, "mclk_i2s1_fracdiv", "mclk_i2s1_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(32), 0,
+ RV1126_CLKGATE_CON(10), 2, GFLAGS,
+ &rv1126_i2s1_fracmux),
+ GATE(MCLK_I2S1, "mclk_i2s1", "mclk_i2s1_mux", 0,
+ RV1126_CLKGATE_CON(10), 3, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S1_OUT2IO, "mclk_i2s1_out2io", mux_i2s1_out2io_p, 0,
+ RV1126_CLKSEL_CON(31), 12, 1, MFLAGS,
+ RV1126_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 5, GFLAGS),
+ COMPOSITE(MCLK_I2S2_DIV, "mclk_i2s2_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(33), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 6, GFLAGS),
+ COMPOSITE_FRACMUX(MCLK_I2S2_FRACDIV, "mclk_i2s2_fracdiv", "mclk_i2s2_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(34), 0,
+ RV1126_CLKGATE_CON(10), 7, GFLAGS,
+ &rv1126_i2s2_fracmux),
+ GATE(MCLK_I2S2, "mclk_i2s2", "mclk_i2s2_mux", 0,
+ RV1126_CLKGATE_CON(10), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_I2S2_OUT2IO, "mclk_i2s2_out2io", mux_i2s2_out2io_p, 0,
+ RV1126_CLKSEL_CON(33), 10, 1, MFLAGS,
+ RV1126_CLKGATE_CON(10), 9, GFLAGS),
+
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 10, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(35), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 11, GFLAGS),
+
+ GATE(HCLK_AUDPWM, "hclk_audpwm", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(10), 12, GFLAGS),
+ COMPOSITE(SCLK_ADUPWM_DIV, "sclk_audpwm_div", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(36), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(10), 13, GFLAGS),
+ COMPOSITE_FRACMUX(SCLK_AUDPWM_FRACDIV, "sclk_audpwm_fracdiv", "sclk_audpwm_div",
+ CLK_SET_RATE_PARENT,
+ RV1126_CLKSEL_CON(37), 0,
+ RV1126_CLKGATE_CON(10), 14, GFLAGS,
+ &rv1126_audpwm_fracmux),
+ GATE(SCLK_AUDPWM, "sclk_audpwm", "mclk_audpwm_mux", 0,
+ RV1126_CLKGATE_CON(10), 15, GFLAGS),
+
+ GATE(PCLK_ACDCDIG, "pclk_acdcdig", "hclk_pdaudio", 0,
+ RV1126_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(CLK_ACDCDIG_ADC, "clk_acdcdig_adc", "mclk_i2s0_rx", 0,
+ RV1126_CLKGATE_CON(11), 2, GFLAGS),
+ GATE(CLK_ACDCDIG_DAC, "clk_acdcdig_dac", "mclk_i2s0_tx", 0,
+ RV1126_CLKGATE_CON(11), 3, GFLAGS),
+ COMPOSITE(CLK_ACDCDIG_I2C, "clk_acdcdig_i2c", mux_gpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(72), 8, 1, MFLAGS, 0, 7, DFLAGS,
+ RV1126_CLKGATE_CON(11), 1, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 12
+ */
+ /* PD_PHP */
+ COMPOSITE(ACLK_PDPHP, "aclk_pdphp", mux_gpll_cpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(53), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(17), 0, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PDPHP, "hclk_pdphp", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(53), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(17), 1, GFLAGS),
+ /* PD_SDCARD */
+ GATE(HCLK_PDSDMMC, "hclk_pdsdmmc", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_pdsdmmc", 0,
+ RV1126_CLKGATE_CON(18), 4, GFLAGS),
+ COMPOSITE(CLK_SDMMC, "clk_sdmmc", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(55), 14, 2, MFLAGS, 0, 8,
+ DFLAGS, RV1126_CLKGATE_CON(18), 5, GFLAGS),
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RV1126_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RV1126_SDMMC_CON1, 1),
+
+ /* PD_SDIO */
+ GATE(HCLK_PDSDIO, "hclk_pdsdio", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_pdsdio", 0,
+ RV1126_CLKGATE_CON(18), 6, GFLAGS),
+ COMPOSITE(CLK_SDIO, "clk_sdio", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(56), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 7, GFLAGS),
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RV1126_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RV1126_SDIO_CON1, 1),
+
+ /* PD_NVM */
+ GATE(HCLK_PDNVM, "hclk_pdnvm", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(18), 1, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(CLK_EMMC, "clk_emmc", mux_gpll_cpll_xin24m_p, 0,
+ RV1126_CLKSEL_CON(57), 14, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 9, GFLAGS),
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 13, GFLAGS),
+ COMPOSITE(CLK_NANDC, "clk_nandc", mux_gpll_cpll_p, 0,
+ RV1126_CLKSEL_CON(59), 15, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(HCLK_SFC, "hclk_sfc", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(HCLK_SFCXIP, "hclk_sfcxip", "hclk_pdnvm", 0,
+ RV1126_CLKGATE_CON(18), 11, GFLAGS),
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(58), 15, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126_CLKGATE_CON(18), 12, GFLAGS),
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RV1126_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", RV1126_EMMC_CON1, 1),
+
+ /* PD_USB */
+ GATE(ACLK_PDUSB, "aclk_pdusb", "aclk_pdphp", 0,
+ RV1126_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(HCLK_PDUSB, "hclk_pdusb", "hclk_pdphp", 0,
+ RV1126_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(HCLK_USBHOST, "hclk_usbhost", "hclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(HCLK_USBHOST_ARB, "hclk_usbhost_arb", "hclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 5, GFLAGS),
+ COMPOSITE(CLK_USBHOST_UTMI_OHCI, "clk_usbhost_utmi_ohci", mux_usb480m_gpll_p, 0,
+ RV1126_CLKSEL_CON(61), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(19), 6, GFLAGS),
+ GATE(ACLK_USBOTG, "aclk_usbotg", "aclk_pdusb", 0,
+ RV1126_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(CLK_USBOTG_REF, "clk_usbotg_ref", "xin24m", 0,
+ RV1126_CLKGATE_CON(19), 8, GFLAGS),
+ /* PD_GMAC */
+ GATE(ACLK_PDGMAC, "aclk_pdgmac", "aclk_pdphp", 0,
+ RV1126_CLKGATE_CON(20), 0, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PDGMAC, "pclk_pdgmac", "aclk_pdgmac", 0,
+ RV1126_CLKSEL_CON(63), 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 1, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_pdgmac", 0,
+ RV1126_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_pdgmac", 0,
+ RV1126_CLKGATE_CON(20), 5, GFLAGS),
+
+ COMPOSITE(CLK_GMAC_DIV, "clk_gmac_div", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(63), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(CLK_GMAC_RGMII_M0, "clk_gmac_rgmii_m0", "clk_gmac_rgmii_clkin_m0", 0,
+ RV1126_CLKGATE_CON(20), 12, GFLAGS),
+ MUX(CLK_GMAC_SRC_M0, "clk_gmac_src_m0", clk_gmac_src_m0_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 0, 1, MFLAGS),
+ GATE(CLK_GMAC_RGMII_M1, "clk_gmac_rgmii_m1", "clk_gmac_rgmii_clkin_m1", 0,
+ RV1126_CLKGATE_CON(20), 13, GFLAGS),
+ MUX(CLK_GMAC_SRC_M1, "clk_gmac_src_m1", clk_gmac_src_m1_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 5, 1, MFLAGS),
+ MUXGRF(CLK_GMAC_SRC, "clk_gmac_src", mux_clk_gmac_src_p, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ RV1126_GRF_IOFUNC_CON1, 12, 1, MFLAGS),
+
+ GATE(CLK_GMAC_REF, "clk_gmac_ref", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 7, GFLAGS),
+
+ GATE(CLK_GMAC_TX_SRC, "clk_gmac_tx_src", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 9, GFLAGS),
+ FACTOR(CLK_GMAC_TX_DIV5, "clk_gmac_tx_div5", "clk_gmac_tx_src", 0, 1, 5),
+ FACTOR(CLK_GMAC_TX_DIV50, "clk_gmac_tx_div50", "clk_gmac_tx_src", 0, 1, 50),
+ MUXTBL(RGMII_MODE_CLK, "rgmii_mode_clk", mux_rgmii_clk_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 2, 2, MFLAGS, rgmii_mux_idx),
+ GATE(CLK_GMAC_RX_SRC, "clk_gmac_rx_src", "clk_gmac_src", 0,
+ RV1126_CLKGATE_CON(20), 8, GFLAGS),
+ FACTOR(CLK_GMAC_RX_DIV2, "clk_gmac_rx_div2", "clk_gmac_rx_src", 0, 1, 2),
+ FACTOR(CLK_GMAC_RX_DIV20, "clk_gmac_rx_div20", "clk_gmac_rx_src", 0, 1, 20),
+ MUX(RMII_MODE_CLK, "rmii_mode_clk", mux_rmii_clk_p, CLK_SET_RATE_PARENT,
+ RV1126_GMAC_CON, 1, 1, MFLAGS),
+ MUX(CLK_GMAC_TX_RX, "clk_gmac_tx_rx", mux_gmac_tx_rx_p, CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT,
+ RV1126_GMAC_CON, 4, 1, MFLAGS),
+
+ GATE(CLK_GMAC_PTPREF, "clk_gmac_ptpref", "xin24m", 0,
+ RV1126_CLKGATE_CON(20), 10, GFLAGS),
+ COMPOSITE(CLK_GMAC_ETHERNET_OUT, "clk_gmac_ethernet_out2io", mux_cpll_gpll_p, 0,
+ RV1126_CLKSEL_CON(61), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(20), 11, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 15
+ */
+ GATE(PCLK_PDTOP, "pclk_pdtop", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(PCLK_USBPHY_HOST, "pclk_usbphy_host", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(19), 13, GFLAGS),
+ GATE(PCLK_USBPHY_OTG, "pclk_usbphy_otg", "pclk_pdtop", 0,
+ RV1126_CLKGATE_CON(19), 12, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+ /* PD_CORE */
+ COMPOSITE_NOMUX(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(1), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RV1126_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "pclk_dbg_daplite", "pclk_dbg", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(0, "clk_a7_jtag", "clk_jtag_ori", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(0, "aclk_core_niu", "aclk_core", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(0, "pclk_dbg_niu", "pclk_dbg", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(0), 4, GFLAGS),
+ /*
+ * Clock-Architecture Diagram 4
+ */
+ /* PD_BUS */
+ GATE(0, "aclk_pdbus_hold_niu1", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 10, GFLAGS),
+ GATE(0, "aclk_pdbus_niu1", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 3, GFLAGS),
+ GATE(0, "hclk_pdbus_niu1", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(0, "pclk_pdbus_niu1", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(0, "aclk_pdbus_niu2", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 6, GFLAGS),
+ GATE(0, "hclk_pdbus_niu2", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 7, GFLAGS),
+ GATE(0, "aclk_pdbus_niu3", "aclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 8, GFLAGS),
+ GATE(0, "hclk_pdbus_niu3", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(2), 9, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(6), 15, GFLAGS),
+ GATE(0, "pclk_sgrf", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(8), 4, GFLAGS),
+ GATE(0, "aclk_sysram", "hclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(3), 9, GFLAGS),
+ GATE(0, "pclk_intmux", "pclk_pdbus", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(7), 14, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 6
+ */
+ /* PD_AUDIO */
+ GATE(0, "hclk_pdaudio_niu", "hclk_pdaudio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pdaudio_niu", "hclk_pdaudio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(9), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 12
+ */
+ /* PD_PHP */
+ GATE(0, "aclk_pdphpmid", "aclk_pdphp", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(0, "hclk_pdphpmid", "hclk_pdphp", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 3, GFLAGS),
+ GATE(0, "aclk_pdphpmid_niu", "aclk_pdphpmid", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 4, GFLAGS),
+ GATE(0, "hclk_pdphpmid_niu", "hclk_pdphpmid", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 5, GFLAGS),
+
+ /* PD_SDCARD */
+ GATE(0, "hclk_pdsdmmc_niu", "hclk_pdsdmmc", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 7, GFLAGS),
+
+ /* PD_SDIO */
+ GATE(0, "hclk_pdsdio_niu", "hclk_pdsdio", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(17), 9, GFLAGS),
+
+ /* PD_NVM */
+ GATE(0, "hclk_pdnvm_niu", "hclk_pdnvm", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(18), 3, GFLAGS),
+
+ /* PD_USB */
+ GATE(0, "aclk_pdusb_niu", "aclk_pdusb", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(19), 2, GFLAGS),
+ GATE(0, "hclk_pdusb_niu", "hclk_pdusb", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(19), 3, GFLAGS),
+
+ /* PD_GMAC */
+ GATE(0, "aclk_pdgmac_niu", "aclk_pdgmac", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 2, GFLAGS),
+ GATE(0, "pclk_pdgmac_niu", "pclk_pdgmac", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 3, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 13
+ */
+ /* PD_DDR */
+ COMPOSITE_NOMUX(0, "pclk_pdddr_pre", "gpll", CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 0, 5, DFLAGS,
+ RV1126_CLKGATE_CON(21), 0, GFLAGS),
+ GATE(PCLK_PDDDR, "pclk_pdddr", "pclk_pdddr_pre", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 15, GFLAGS),
+ GATE(0, "pclk_ddr_msch", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 6, GFLAGS),
+ COMPOSITE_NOGATE(SCLK_DDRCLK, "sclk_ddrc", mux_dpll_gpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 15, 1, MFLAGS, 8, 5, DFLAGS |
+ CLK_DIVIDER_POWER_OF_TWO),
+ COMPOSITE(CLK_DDRPHY, "clk_ddrphy", mux_dpll_gpll_p, CLK_IGNORE_UNUSED,
+ RV1126_CLKSEL_CON(64), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(0, "clk1x_phy", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(0, "clk_ddr_msch", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 10, GFLAGS),
+ GATE(0, "pclk_ddr_dfictl", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(0, "clk_ddr_dfictl", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(0, "pclk_ddr_standby", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(0, "clk_ddr_standby", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 14, GFLAGS),
+ GATE(0, "aclk_ddr_split", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(0, "pclk_ddr_grf", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 5, GFLAGS),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_pdddr", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 3, GFLAGS),
+ GATE(CLK_DDR_MON, "clk_ddr_mon", "clk_ddrphy", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(20), 15, GFLAGS),
+ GATE(TMCLK_DDR_MON, "tmclk_ddr_mon", "xin24m", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(21), 7, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 15
+ */
+ GATE(0, "pclk_topniu", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 9, GFLAGS),
+ GATE(PCLK_TOPCRU, "pclk_topcru", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 10, GFLAGS),
+ GATE(PCLK_TOPGRF, "pclk_topgrf", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 11, GFLAGS),
+ GATE(PCLK_CPUEMADET, "pclk_cpuemadet", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 12, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_pdtop", CLK_IGNORE_UNUSED,
+ RV1126_CLKGATE_CON(23), 0, GFLAGS),
+};
+
+static const char *const rv1126_cru_critical_clocks[] __initconst = {
+ "gpll",
+ "cpll",
+ "hpll",
+ "armclk",
+ "pclk_dbg",
+ "pclk_pdpmu",
+ "aclk_pdbus",
+ "hclk_pdbus",
+ "pclk_pdbus",
+ "aclk_pdphp",
+ "hclk_pdphp",
+ "clk_ddrphy",
+ "pclk_pdddr",
+ "pclk_pdtop",
+ "clk_usbhost_utmi_ohci",
+ "aclk_pdjpeg_niu",
+ "hclk_pdjpeg_niu",
+ "aclk_pdvdec_niu",
+ "hclk_pdvdec_niu",
+};
+
+static void __init rv1126_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126_pmu_pll_clks,
+ ARRAY_SIZE(rv1126_pmu_pll_clks),
+ RV1126_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_branches(ctx, rv1126_clk_pmu_branches,
+ ARRAY_SIZE(rv1126_clk_pmu_branches));
+
+ rockchip_register_softrst(np, 2, reg_base + RV1126_PMU_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+static void __init rv1126_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126_pll_clks,
+ ARRAY_SIZE(rv1126_pll_clks),
+ RV1126_GRF_SOC_STATUS0);
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &rv1126_cpuclk_data, rv1126_cpuclk_rates,
+ ARRAY_SIZE(rv1126_cpuclk_rates));
+
+ rockchip_clk_register_branches(ctx, rv1126_clk_branches,
+ ARRAY_SIZE(rv1126_clk_branches));
+
+ rockchip_register_softrst(np, 15, reg_base + RV1126_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RV1126_GLB_SRST_FST, NULL);
+
+ rockchip_clk_protect_critical(rv1126_cru_critical_clocks,
+ ARRAY_SIZE(rv1126_cru_critical_clocks));
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+struct clk_rv1126_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rv1126_inits clk_rv1126_pmucru_init = {
+ .inits = rv1126_pmu_clk_init,
+};
+
+static const struct clk_rv1126_inits clk_rv1126_cru_init = {
+ .inits = rv1126_clk_init,
+};
+
+static const struct of_device_id clk_rv1126_match_table[] = {
+ {
+ .compatible = "rockchip,rv1126-cru",
+ .data = &clk_rv1126_cru_init,
+ }, {
+ .compatible = "rockchip,rv1126-pmucru",
+ .data = &clk_rv1126_pmucru_init,
+ },
+ { }
+};
+
+static int __init clk_rv1126_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct clk_rv1126_inits *init_data;
+
+ init_data = (struct clk_rv1126_inits *)of_device_get_match_data(&pdev->dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(np);
+
+ return 0;
+}
+
+static struct platform_driver clk_rv1126_driver = {
+ .driver = {
+ .name = "clk-rv1126",
+ .of_match_table = clk_rv1126_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rv1126_driver, clk_rv1126_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index bb8a844309bf..e63d4f20b479 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -40,6 +40,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *base,
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
+ u32 *mux_table,
int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
struct clk_div_table *div_table, int gate_offset,
u8 gate_shift, u8 gate_flags, unsigned long flags,
@@ -62,6 +63,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
mux->shift = mux_shift;
mux->mask = BIT(mux_width) - 1;
mux->flags = mux_flags;
+ mux->table = mux_table;
mux->lock = lock;
mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
: &clk_mux_ops;
@@ -270,6 +272,8 @@ static struct clk *rockchip_clk_register_frac_branch(
frac_mux->shift = child->mux_shift;
frac_mux->mask = BIT(child->mux_width) - 1;
frac_mux->flags = child->mux_flags;
+ if (child->mux_table)
+ frac_mux->table = child->mux_table;
frac_mux->lock = lock;
frac_mux->hw.init = &init;
@@ -444,11 +448,21 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
/* catch simple muxes */
switch (list->branch_type) {
case branch_mux:
- clk = clk_register_mux(NULL, list->name,
- list->parent_names, list->num_parents,
- flags, ctx->reg_base + list->muxdiv_offset,
- list->mux_shift, list->mux_width,
- list->mux_flags, &ctx->lock);
+ if (list->mux_table)
+ clk = clk_register_mux_table(NULL, list->name,
+ list->parent_names, list->num_parents,
+ flags,
+ ctx->reg_base + list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, list->mux_table,
+ &ctx->lock);
+ else
+ clk = clk_register_mux(NULL, list->name,
+ list->parent_names, list->num_parents,
+ flags,
+ ctx->reg_base + list->muxdiv_offset,
+ list->mux_shift, list->mux_width,
+ list->mux_flags, &ctx->lock);
break;
case branch_muxgrf:
clk = rockchip_clk_register_muxgrf(list->name,
@@ -506,7 +520,8 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
ctx->reg_base, list->muxdiv_offset,
list->mux_shift,
list->mux_width, list->mux_flags,
- list->div_offset, list->div_shift, list->div_width,
+ list->mux_table, list->div_offset,
+ list->div_shift, list->div_width,
list->div_flags, list->div_table,
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &ctx->lock);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 7aa45cc70287..ee01739e4a7c 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -79,6 +79,25 @@ struct clk;
#define RV1108_EMMC_CON0 0x1e8
#define RV1108_EMMC_CON1 0x1ec
+#define RV1126_PMU_MODE 0x0
+#define RV1126_PMU_PLL_CON(x) ((x) * 0x4 + 0x10)
+#define RV1126_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RV1126_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x180)
+#define RV1126_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x200)
+#define RV1126_PLL_CON(x) ((x) * 0x4)
+#define RV1126_MODE_CON 0x90
+#define RV1126_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RV1126_CLKGATE_CON(x) ((x) * 0x4 + 0x280)
+#define RV1126_SOFTRST_CON(x) ((x) * 0x4 + 0x300)
+#define RV1126_GLB_SRST_FST 0x408
+#define RV1126_GLB_SRST_SND 0x40c
+#define RV1126_SDMMC_CON0 0x440
+#define RV1126_SDMMC_CON1 0x444
+#define RV1126_SDIO_CON0 0x448
+#define RV1126_SDIO_CON1 0x44c
+#define RV1126_EMMC_CON0 0x450
+#define RV1126_EMMC_CON1 0x454
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
@@ -448,6 +467,7 @@ struct rockchip_clk_branch {
u8 mux_shift;
u8 mux_width;
u8 mux_flags;
+ u32 *mux_table;
int div_offset;
u8 div_shift;
u8 div_width;
@@ -680,6 +700,22 @@ struct rockchip_clk_branch {
.gate_offset = -1, \
}
+#define MUXTBL(_id, cname, pnames, f, o, s, w, mf, mt) \
+ { \
+ .id = _id, \
+ .branch_type = branch_mux, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = o, \
+ .mux_shift = s, \
+ .mux_width = w, \
+ .mux_flags = mf, \
+ .gate_offset = -1, \
+ .mux_table = mt, \
+ }
+
#define MUXGRF(_id, cname, pnames, f, o, s, w, mf) \
{ \
.id = _id, \
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index e6d6cbf8c4e6..273f77d54dab 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -81,19 +81,17 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
const struct exynos_clkout_variant *variant;
- const struct of_device_id *match;
if (!dev->parent) {
dev_err(dev, "not instantiated from MFD\n");
return -EINVAL;
}
- match = of_match_device(exynos_clkout_ids, dev->parent);
- if (!match) {
+ variant = of_device_get_match_data(dev->parent);
+ if (!variant) {
dev_err(dev, "cannot match parent device\n");
return -EINVAL;
}
- variant = match->data;
*mux_mask = variant->mux_mask;
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
index a7b106302706..62ce6814f141 100644
--- a/drivers/clk/samsung/clk-exynos7885.c
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -27,6 +27,11 @@
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_G3D 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS 0x1028
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD 0x102c
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD 0x1030
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD 0x1038
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1058
#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0 0x105c
#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1 0x1060
@@ -39,6 +44,11 @@
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_G3D 0x1824
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1844
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD 0x1848
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD 0x184c
+#define CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO 0x1850
+#define CLK_CON_DIV_CLKCMU_FSYS_USB30DRD 0x1854
#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x1874
#define CLK_CON_DIV_CLKCMU_PERI_SPI0 0x1878
#define CLK_CON_DIV_CLKCMU_PERI_SPI1 0x187c
@@ -59,6 +69,11 @@
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_G3D 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD 0x2054
#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x207c
#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0 0x2080
#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1 0x2084
@@ -76,6 +91,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_G3D,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0,
CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1,
@@ -88,6 +108,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_G3D,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_DIV_CLKCMU_FSYS_USB30DRD,
CLK_CON_DIV_CLKCMU_PERI_BUS,
CLK_CON_DIV_CLKCMU_PERI_SPI0,
CLK_CON_DIV_CLKCMU_PERI_SPI1,
@@ -108,6 +133,11 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_G3D,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO,
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD,
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0,
CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1,
@@ -146,6 +176,13 @@ PNAME(mout_peri_usi0_p) = { "oscclk", "dout_shared0_div4" };
PNAME(mout_peri_usi1_p) = { "oscclk", "dout_shared0_div4" };
PNAME(mout_peri_usi2_p) = { "oscclk", "dout_shared0_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_FSYS */
+PNAME(mout_fsys_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_card_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_embd_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_mmc_sdio_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_fsys_usb30drd_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
@@ -174,6 +211,18 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_PERI_USI1, 0, 1),
MUX(CLK_MOUT_PERI_USI2, "mout_peri_usi2", mout_peri_usi2_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_USI2, 0, 1),
+
+ /* FSYS */
+ MUX(CLK_MOUT_FSYS_BUS, "mout_fsys_bus", mout_fsys_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_BUS, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_CARD, "mout_fsys_mmc_card", mout_fsys_mmc_card_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_CARD, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_EMBD, "mout_fsys_mmc_embd", mout_fsys_mmc_embd_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_EMBD, 0, 1),
+ MUX(CLK_MOUT_FSYS_MMC_SDIO, "mout_fsys_mmc_sdio", mout_fsys_mmc_sdio_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_MMC_SDIO, 0, 1),
+ MUX(CLK_MOUT_FSYS_USB30DRD, "mout_fsys_usb30drd", mout_fsys_usb30drd_p,
+ CLK_CON_MUX_MUX_CLKCMU_FSYS_USB30DRD, 0, 1),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
@@ -220,6 +269,18 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_PERI_USI1, 0, 4),
DIV(CLK_DOUT_PERI_USI2, "dout_peri_usi2", "gout_peri_usi2",
CLK_CON_DIV_CLKCMU_PERI_USI2, 0, 4),
+
+ /* FSYS */
+ DIV(CLK_DOUT_FSYS_BUS, "dout_fsys_bus", "gout_fsys_bus",
+ CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD, "dout_fsys_mmc_card", "gout_fsys_mmc_card",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_CARD, 0, 9),
+ DIV(CLK_DOUT_FSYS_MMC_EMBD, "dout_fsys_mmc_embd", "gout_fsys_mmc_embd",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_EMBD, 0, 9),
+ DIV(CLK_DOUT_FSYS_MMC_SDIO, "dout_fsys_mmc_sdio", "gout_fsys_mmc_sdio",
+ CLK_CON_DIV_CLKCMU_FSYS_MMC_SDIO, 0, 9),
+ DIV(CLK_DOUT_FSYS_USB30DRD, "dout_fsys_usb30drd", "gout_fsys_usb30drd",
+ CLK_CON_DIV_CLKCMU_FSYS_USB30DRD, 0, 4),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
@@ -250,6 +311,18 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_PERI_USI1, 21, 0, 0),
GATE(CLK_GOUT_PERI_USI2, "gout_peri_usi2", "mout_peri_usi2",
CLK_CON_GAT_GATE_CLKCMU_PERI_USI2, 21, 0, 0),
+
+ /* FSYS */
+ GATE(CLK_GOUT_FSYS_BUS, "gout_fsys_bus", "mout_fsys_bus",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_CARD, "gout_fsys_mmc_card", "mout_fsys_mmc_card",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_CARD, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_EMBD, "gout_fsys_mmc_embd", "mout_fsys_mmc_embd",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_EMBD, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_MMC_SDIO, "gout_fsys_mmc_sdio", "mout_fsys_mmc_sdio",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_MMC_SDIO, 21, 0, 0),
+ GATE(CLK_GOUT_FSYS_USB30DRD, "gout_fsys_usb30drd", "mout_fsys_usb30drd",
+ CLK_CON_GAT_GATE_CLKCMU_FSYS_USB30DRD, 21, 0, 0),
};
static const struct samsung_cmu_info top_cmu_info __initconst = {
@@ -498,13 +571,20 @@ CLK_OF_DECLARE(exynos7885_cmu_peri, "samsung,exynos7885-cmu-peri",
/* ---- CMU_CORE ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CORE (0x12000000) */
-#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
-#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
-#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
-#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
-#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
-#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
-#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK 0x215c
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK 0x2160
+#define CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK 0x2164
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE 0x2168
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE 0x216c
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK 0x2170
+#define CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE 0x2174
static const unsigned long core_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
@@ -514,6 +594,13 @@ static const unsigned long core_clk_regs[] __initconst = {
CLK_CON_DIV_DIV_CLK_CORE_BUSP,
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
CLK_CON_GAT_GOUT_CORE_GIC400_CLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK,
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE,
};
/* List of parent clocks for Muxes in CMU_CORE */
@@ -545,6 +632,27 @@ static const struct samsung_gate_clock core_gate_clks[] __initconst = {
/* GIC (interrupt controller) clock must be always running */
GATE(CLK_GOUT_GIC400_CLK, "gout_gic400_clk", "mout_core_gic",
CLK_CON_GAT_GOUT_CORE_GIC400_CLK, 21, CLK_IS_CRITICAL, 0),
+ /*
+ * TREX D and P Core (seems to be related to "bus traffic shaper")
+ * clocks must always be running
+ */
+ GATE(CLK_GOUT_TREX_D_CORE_ACLK, "gout_trex_d_core_aclk", "mout_core_bus_user",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_ACLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_D_CORE_GCLK, "gout_trex_d_core_gclk", "mout_core_g3d_user",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_GCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_D_CORE_PCLK, "gout_trex_d_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_TREX_D_CORE_PCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_ACLK_P_CORE, "gout_trex_p_core_aclk_p_core",
+ "mout_core_bus_user", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_ACLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_CCLK_P_CORE, "gout_trex_p_core_cclk_p_core",
+ "mout_core_cci_user", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_CCLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_PCLK, "gout_trex_p_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_TREX_P_CORE_PCLK_P_CORE, "gout_trex_p_core_pclk_p_core",
+ "dout_core_busp", CLK_CON_GAT_GOUT_CORE_TREX_P_CORE_PCLK_P_CORE, 21,
+ CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info core_cmu_info __initconst = {
@@ -560,6 +668,88 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_core_bus",
};
+/* ---- CMU_FSYS ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_FSYS (0x13400000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER 0x0180
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK 0x2030
+#define CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN 0x2034
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN 0x203c
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN 0x2044
+
+static const unsigned long fsys_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK,
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS */
+PNAME(mout_fsys_bus_user_p) = { "oscclk", "dout_fsys_bus" };
+PNAME(mout_fsys_mmc_card_user_p) = { "oscclk", "dout_fsys_mmc_card" };
+PNAME(mout_fsys_mmc_embd_user_p) = { "oscclk", "dout_fsys_mmc_embd" };
+PNAME(mout_fsys_mmc_sdio_user_p) = { "oscclk", "dout_fsys_mmc_sdio" };
+PNAME(mout_fsys_usb30drd_user_p) = { "oscclk", "dout_fsys_usb30drd" };
+
+static const struct samsung_mux_clock fsys_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user", mout_fsys_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_FSYS_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_FSYS_MMC_CARD_USER, "mout_fsys_mmc_card_user",
+ mout_fsys_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_MMC_EMBD_USER, "mout_fsys_mmc_embd_user",
+ mout_fsys_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_MMC_SDIO_USER, "mout_fsys_mmc_sdio_user",
+ mout_fsys_mmc_sdio_user_p, PLL_CON0_MUX_CLKCMU_FSYS_MMC_SDIO_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_FSYS_USB30DRD_USER, "mout_fsys_usb30drd_user",
+ mout_fsys_usb30drd_user_p, PLL_CON0_MUX_CLKCMU_FSYS_USB30DRD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_gate_clock fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_CARD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
+ "mout_fsys_mmc_card_user", CLK_CON_GAT_GOUT_FSYS_MMC_CARD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
+ "mout_fsys_mmc_embd_user", CLK_CON_GAT_GOUT_FSYS_MMC_EMBD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_MMC_SDIO_ACLK, "gout_mmc_sdio_aclk", "mout_fsys_bus_user",
+ CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_SDIO_SDCLKIN, "gout_mmc_sdio_sdclkin",
+ "mout_fsys_mmc_sdio_user", CLK_CON_GAT_GOUT_FSYS_MMC_SDIO_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info fsys_cmu_info __initconst = {
+ .mux_clks = fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys_mux_clks),
+ .gate_clks = fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys_gate_clks),
+ .nr_clk_ids = FSYS_NR_CLK,
+ .clk_regs = fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .clk_name = "dout_fsys_bus",
+};
+
/* ---- platform_driver ----------------------------------------------------- */
static int __init exynos7885_cmu_probe(struct platform_device *pdev)
@@ -578,6 +768,9 @@ static const struct of_device_id exynos7885_cmu_of_match[] = {
.compatible = "samsung,exynos7885-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynos7885-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
},
};
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index cd9725f1dbf7..541761e96aeb 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -30,6 +30,7 @@
#define PLL_CON0_PLL_SHARED1 0x0180
#define PLL_CON3_PLL_SHARED1 0x018c
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLKCMU_AUD 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
@@ -38,10 +39,19 @@
#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_IS_BUS 0x1048
+#define CLK_CON_MUX_MUX_CLKCMU_IS_GDC 0x104c
+#define CLK_CON_MUX_MUX_CLKCMU_IS_ITP 0x1050
+#define CLK_CON_MUX_MUX_CLKCMU_IS_VRA 0x1054
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC 0x1064
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x180c
+#define CLK_CON_DIV_CLKCMU_AUD 0x1810
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
@@ -50,6 +60,14 @@
#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
+#define CLK_CON_DIV_CLKCMU_IS_BUS 0x1854
+#define CLK_CON_DIV_CLKCMU_IS_GDC 0x1858
+#define CLK_CON_DIV_CLKCMU_IS_ITP 0x185c
+#define CLK_CON_DIV_CLKCMU_IS_VRA 0x1860
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG 0x1864
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_M2M 0x1868
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC 0x186c
+#define CLK_CON_DIV_CLKCMU_MFCMSCL_MFC 0x1870
#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x187c
#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1880
#define CLK_CON_DIV_CLKCMU_PERI_UART 0x1884
@@ -60,6 +78,7 @@
#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
+#define CLK_CON_GAT_GATE_CLKCMU_AUD 0x200c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
@@ -68,6 +87,14 @@
#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_IS_BUS 0x2050
+#define CLK_CON_GAT_GATE_CLKCMU_IS_GDC 0x2054
+#define CLK_CON_GAT_GATE_CLKCMU_IS_ITP 0x2058
+#define CLK_CON_GAT_GATE_CLKCMU_IS_VRA 0x205c
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG 0x2060
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M 0x2064
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC 0x2068
+#define CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC 0x206c
#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x2080
#define CLK_CON_GAT_GATE_CLKCMU_PERI_IP 0x2084
#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART 0x2088
@@ -83,6 +110,7 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_AUD,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
@@ -91,10 +119,19 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
+ CLK_CON_MUX_MUX_CLKCMU_IS_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_IS_GDC,
+ CLK_CON_MUX_MUX_CLKCMU_IS_ITP,
+ CLK_CON_MUX_MUX_CLKCMU_IS_VRA,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
CLK_CON_DIV_CLKCMU_APM_BUS,
+ CLK_CON_DIV_CLKCMU_AUD,
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
@@ -103,6 +140,14 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_HSI_BUS,
CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
+ CLK_CON_DIV_CLKCMU_IS_BUS,
+ CLK_CON_DIV_CLKCMU_IS_GDC,
+ CLK_CON_DIV_CLKCMU_IS_ITP,
+ CLK_CON_DIV_CLKCMU_IS_VRA,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MFC,
CLK_CON_DIV_CLKCMU_PERI_BUS,
CLK_CON_DIV_CLKCMU_PERI_IP,
CLK_CON_DIV_CLKCMU_PERI_UART,
@@ -113,6 +158,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_PLL_SHARED1_DIV3,
CLK_CON_DIV_PLL_SHARED1_DIV4,
CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_AUD,
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
@@ -121,6 +167,14 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
+ CLK_CON_GAT_GATE_CLKCMU_IS_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_IS_GDC,
+ CLK_CON_GAT_GATE_CLKCMU_IS_ITP,
+ CLK_CON_GAT_GATE_CLKCMU_IS_VRA,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC,
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC,
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
CLK_CON_GAT_GATE_CLKCMU_PERI_IP,
CLK_CON_GAT_GATE_CLKCMU_PERI_UART,
@@ -148,6 +202,9 @@ PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_APM */
PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared0_div4", "pll_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_AUD */
+PNAME(mout_aud_p) = { "fout_shared1_pll", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
@@ -167,13 +224,30 @@ PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
"oscclk", "oscclk" };
PNAME(mout_hsi_usb20drd_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_IS */
+PNAME(mout_is_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_itp_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_vra_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_is_gdc_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_m2m_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_mcsc_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_mfcmscl_jpeg_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
PNAME(mout_peri_uart_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
PNAME(mout_peri_ip_p) = { "oscclk", "dout_shared0_div4",
"dout_shared1_div4", "oscclk" };
-
/* List of parent clocks for Muxes in CMU_TOP: for CMU_DPU */
PNAME(mout_dpu_p) = { "dout_shared0_div3", "dout_shared1_div3",
"dout_shared0_div4", "dout_shared1_div4" };
@@ -191,6 +265,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus",
mout_clkcmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+ /* AUD */
+ MUX(CLK_MOUT_AUD, "mout_aud", mout_aud_p,
+ CLK_CON_MUX_MUX_CLKCMU_AUD, 0, 2),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -213,6 +291,26 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_HSI_USB20DRD, "mout_hsi_usb20drd", mout_hsi_usb20drd_p,
CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD, 0, 2),
+ /* IS */
+ MUX(CLK_MOUT_IS_BUS, "mout_is_bus", mout_is_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_BUS, 0, 2),
+ MUX(CLK_MOUT_IS_ITP, "mout_is_itp", mout_is_itp_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_ITP, 0, 2),
+ MUX(CLK_MOUT_IS_VRA, "mout_is_vra", mout_is_vra_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_VRA, 0, 2),
+ MUX(CLK_MOUT_IS_GDC, "mout_is_gdc", mout_is_gdc_p,
+ CLK_CON_MUX_MUX_CLKCMU_IS_GDC, 0, 2),
+
+ /* MFCMSCL */
+ MUX(CLK_MOUT_MFCMSCL_MFC, "mout_mfcmscl_mfc", mout_mfcmscl_mfc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MFC, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_M2M, "mout_mfcmscl_m2m", mout_mfcmscl_m2m_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_M2M, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_MCSC, "mout_mfcmscl_mcsc", mout_mfcmscl_mcsc_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_MCSC, 0, 2),
+ MUX(CLK_MOUT_MFCMSCL_JPEG, "mout_mfcmscl_jpeg", mout_mfcmscl_jpeg_p,
+ CLK_CON_MUX_MUX_CLKCMU_MFCMSCL_JPEG, 0, 2),
+
/* PERI */
MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
@@ -241,6 +339,10 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus",
"gout_clkcmu_apm_bus", CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ /* AUD */
+ DIV(CLK_DOUT_AUD, "dout_aud", "gout_aud",
+ CLK_CON_DIV_CLKCMU_AUD, 0, 4),
+
/* CORE */
DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
@@ -263,6 +365,26 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_HSI_USB20DRD, "dout_hsi_usb20drd", "gout_hsi_usb20drd",
CLK_CON_DIV_CLKCMU_HSI_USB20DRD, 0, 4),
+ /* IS */
+ DIV(CLK_DOUT_IS_BUS, "dout_is_bus", "gout_is_bus",
+ CLK_CON_DIV_CLKCMU_IS_BUS, 0, 4),
+ DIV(CLK_DOUT_IS_ITP, "dout_is_itp", "gout_is_itp",
+ CLK_CON_DIV_CLKCMU_IS_ITP, 0, 4),
+ DIV(CLK_DOUT_IS_VRA, "dout_is_vra", "gout_is_vra",
+ CLK_CON_DIV_CLKCMU_IS_VRA, 0, 4),
+ DIV(CLK_DOUT_IS_GDC, "dout_is_gdc", "gout_is_gdc",
+ CLK_CON_DIV_CLKCMU_IS_GDC, 0, 4),
+
+ /* MFCMSCL */
+ DIV(CLK_DOUT_MFCMSCL_MFC, "dout_mfcmscl_mfc", "gout_mfcmscl_mfc",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MFC, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_M2M, "dout_mfcmscl_m2m", "gout_mfcmscl_m2m",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_M2M, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_MCSC, "dout_mfcmscl_mcsc", "gout_mfcmscl_mcsc",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_MCSC, 0, 4),
+ DIV(CLK_DOUT_MFCMSCL_JPEG, "dout_mfcmscl_jpeg", "gout_mfcmscl_jpeg",
+ CLK_CON_DIV_CLKCMU_MFCMSCL_JPEG, 0, 4),
+
/* PERI */
DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
@@ -287,6 +409,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus",
"mout_clkcmu_apm_bus", CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, 0, 0),
+ /* AUD */
+ GATE(CLK_GOUT_AUD, "gout_aud", "mout_aud",
+ CLK_CON_GAT_GATE_CLKCMU_AUD, 21, 0, 0),
+
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
@@ -299,6 +425,28 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_HSI_USB20DRD, "gout_hsi_usb20drd", "mout_hsi_usb20drd",
CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD, 21, 0, 0),
+ /* IS */
+ /* TODO: These clocks have to be always enabled to access CMU_IS regs */
+ GATE(CLK_GOUT_IS_BUS, "gout_is_bus", "mout_is_bus",
+ CLK_CON_GAT_GATE_CLKCMU_IS_BUS, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_ITP, "gout_is_itp", "mout_is_itp",
+ CLK_CON_GAT_GATE_CLKCMU_IS_ITP, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_VRA, "gout_is_vra", "mout_is_vra",
+ CLK_CON_GAT_GATE_CLKCMU_IS_VRA, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IS_GDC, "gout_is_gdc", "mout_is_gdc",
+ CLK_CON_GAT_GATE_CLKCMU_IS_GDC, 21, CLK_IS_CRITICAL, 0),
+
+ /* MFCMSCL */
+ /* TODO: These have to be always enabled to access CMU_MFCMSCL regs */
+ GATE(CLK_GOUT_MFCMSCL_MFC, "gout_mfcmscl_mfc", "mout_mfcmscl_mfc",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MFC, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_M2M, "gout_mfcmscl_m2m", "mout_mfcmscl_m2m",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_M2M, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_MCSC, "gout_mfcmscl_mcsc", "mout_mfcmscl_mcsc",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_MCSC, 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_MFCMSCL_JPEG, "gout_mfcmscl_jpeg", "mout_mfcmscl_jpeg",
+ CLK_CON_GAT_GATE_CLKCMU_MFCMSCL_JPEG, 21, CLK_IS_CRITICAL, 0),
+
/* PERI */
GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
@@ -463,6 +611,284 @@ static const struct samsung_cmu_info apm_cmu_info __initconst = {
.clk_name = "dout_clkcmu_apm_bus",
};
+/* ---- CMU_AUD ------------------------------------------------------------- */
+
+#define PLL_LOCKTIME_PLL_AUD 0x0000
+#define PLL_CON0_PLL_AUD 0x0100
+#define PLL_CON3_PLL_AUD 0x010c
+#define PLL_CON0_MUX_CLKCMU_AUD_CPU_USER 0x0600
+#define PLL_CON0_MUX_TICK_USB_USER 0x0610
+#define CLK_CON_MUX_MUX_CLK_AUD_CPU 0x1000
+#define CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH 0x1004
+#define CLK_CON_MUX_MUX_CLK_AUD_FM 0x1008
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF0 0x100c
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF1 0x1010
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF2 0x1014
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF3 0x1018
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF4 0x101c
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF5 0x1020
+#define CLK_CON_MUX_MUX_CLK_AUD_UAIF6 0x1024
+#define CLK_CON_DIV_DIV_CLK_AUD_MCLK 0x1800
+#define CLK_CON_DIV_DIV_CLK_AUD_AUDIF 0x1804
+#define CLK_CON_DIV_DIV_CLK_AUD_BUSD 0x1808
+#define CLK_CON_DIV_DIV_CLK_AUD_BUSP 0x180c
+#define CLK_CON_DIV_DIV_CLK_AUD_CNT 0x1810
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU 0x1814
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK 0x1818
+#define CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG 0x181c
+#define CLK_CON_DIV_DIV_CLK_AUD_FM 0x1820
+#define CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY 0x1824
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF0 0x1828
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF1 0x182c
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF2 0x1830
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF3 0x1834
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF4 0x1838
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF5 0x183c
+#define CLK_CON_DIV_DIV_CLK_AUD_UAIF6 0x1840
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT 0x2000
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0 0x2004
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1 0x2008
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2 0x200c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3 0x2010
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4 0x2014
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5 0x2018
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6 0x201c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY 0x204c
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB 0x2050
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32 0x2054
+#define CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP 0x2058
+#define CLK_CON_GAT_GOUT_AUD_CODEC_MCLK 0x206c
+#define CLK_CON_GAT_GOUT_AUD_TZPC_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_AUD_GPIO_PCLK 0x2074
+#define CLK_CON_GAT_GOUT_AUD_PPMU_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_AUD_PPMU_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1 0x20b4
+#define CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK 0x20b8
+#define CLK_CON_GAT_GOUT_AUD_WDT_PCLK 0x20bc
+
+static const unsigned long aud_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUD,
+ PLL_CON0_PLL_AUD,
+ PLL_CON3_PLL_AUD,
+ PLL_CON0_MUX_CLKCMU_AUD_CPU_USER,
+ PLL_CON0_MUX_TICK_USB_USER,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH,
+ CLK_CON_MUX_MUX_CLK_AUD_FM,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF0,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF1,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF2,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF3,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF4,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF5,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF6,
+ CLK_CON_DIV_DIV_CLK_AUD_MCLK,
+ CLK_CON_DIV_DIV_CLK_AUD_AUDIF,
+ CLK_CON_DIV_DIV_CLK_AUD_BUSD,
+ CLK_CON_DIV_DIV_CLK_AUD_BUSP,
+ CLK_CON_DIV_DIV_CLK_AUD_CNT,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK,
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG,
+ CLK_CON_DIV_DIV_CLK_AUD_FM,
+ CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF0,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF1,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF2,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF3,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF4,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF5,
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF6,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6,
+ CLK_CON_GAT_GOUT_AUD_ABOX_ACLK,
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32,
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP,
+ CLK_CON_GAT_GOUT_AUD_CODEC_MCLK,
+ CLK_CON_GAT_GOUT_AUD_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_AUD_GPIO_PCLK,
+ CLK_CON_GAT_GOUT_AUD_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_AUD_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1,
+ CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK,
+ CLK_CON_GAT_GOUT_AUD_WDT_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_AUD */
+PNAME(mout_aud_pll_p) = { "oscclk", "fout_aud_pll" };
+PNAME(mout_aud_cpu_user_p) = { "oscclk", "dout_aud" };
+PNAME(mout_aud_cpu_p) = { "dout_aud_cpu", "mout_aud_cpu_user" };
+PNAME(mout_aud_cpu_hch_p) = { "mout_aud_cpu", "oscclk" };
+PNAME(mout_aud_uaif0_p) = { "dout_aud_uaif0", "ioclk_audiocdclk0" };
+PNAME(mout_aud_uaif1_p) = { "dout_aud_uaif1", "ioclk_audiocdclk1" };
+PNAME(mout_aud_uaif2_p) = { "dout_aud_uaif2", "ioclk_audiocdclk2" };
+PNAME(mout_aud_uaif3_p) = { "dout_aud_uaif3", "ioclk_audiocdclk3" };
+PNAME(mout_aud_uaif4_p) = { "dout_aud_uaif4", "ioclk_audiocdclk4" };
+PNAME(mout_aud_uaif5_p) = { "dout_aud_uaif5", "ioclk_audiocdclk5" };
+PNAME(mout_aud_uaif6_p) = { "dout_aud_uaif6", "ioclk_audiocdclk6" };
+PNAME(mout_aud_tick_usb_user_p) = { "oscclk", "tick_usb" };
+PNAME(mout_aud_fm_p) = { "oscclk", "dout_aud_fm_spdy" };
+
+/*
+ * Do not provide PLL table to PLL_AUD, as MANUAL_PLL_CTRL bit is not set
+ * for that PLL by default, so set_rate operation would fail.
+ */
+static const struct samsung_pll_clock aud_pll_clks[] __initconst = {
+ PLL(pll_0831x, CLK_FOUT_AUD_PLL, "fout_aud_pll", "oscclk",
+ PLL_LOCKTIME_PLL_AUD, PLL_CON3_PLL_AUD, NULL),
+};
+
+static const struct samsung_fixed_rate_clock aud_fixed_clks[] __initconst = {
+ FRATE(IOCLK_AUDIOCDCLK0, "ioclk_audiocdclk0", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK1, "ioclk_audiocdclk1", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK2, "ioclk_audiocdclk2", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK3, "ioclk_audiocdclk3", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK4, "ioclk_audiocdclk4", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK5, "ioclk_audiocdclk5", NULL, 0, 25000000),
+ FRATE(IOCLK_AUDIOCDCLK6, "ioclk_audiocdclk6", NULL, 0, 25000000),
+ FRATE(TICK_USB, "tick_usb", NULL, 0, 60000000),
+};
+
+static const struct samsung_mux_clock aud_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p,
+ PLL_CON0_PLL_AUD, 4, 1),
+ MUX(CLK_MOUT_AUD_CPU_USER, "mout_aud_cpu_user", mout_aud_cpu_user_p,
+ PLL_CON0_MUX_CLKCMU_AUD_CPU_USER, 4, 1),
+ MUX(CLK_MOUT_AUD_TICK_USB_USER, "mout_aud_tick_usb_user",
+ mout_aud_tick_usb_user_p,
+ PLL_CON0_MUX_TICK_USB_USER, 4, 1),
+ MUX(CLK_MOUT_AUD_CPU, "mout_aud_cpu", mout_aud_cpu_p,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU, 0, 1),
+ MUX(CLK_MOUT_AUD_CPU_HCH, "mout_aud_cpu_hch", mout_aud_cpu_hch_p,
+ CLK_CON_MUX_MUX_CLK_AUD_CPU_HCH, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF0, "mout_aud_uaif0", mout_aud_uaif0_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF0, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF1, "mout_aud_uaif1", mout_aud_uaif1_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF1, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF2, "mout_aud_uaif2", mout_aud_uaif2_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF2, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF3, "mout_aud_uaif3", mout_aud_uaif3_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF3, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF4, "mout_aud_uaif4", mout_aud_uaif4_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF4, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF5, "mout_aud_uaif5", mout_aud_uaif5_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF5, 0, 1),
+ MUX(CLK_MOUT_AUD_UAIF6, "mout_aud_uaif6", mout_aud_uaif6_p,
+ CLK_CON_MUX_MUX_CLK_AUD_UAIF6, 0, 1),
+ MUX(CLK_MOUT_AUD_FM, "mout_aud_fm", mout_aud_fm_p,
+ CLK_CON_MUX_MUX_CLK_AUD_FM, 0, 1),
+};
+
+static const struct samsung_div_clock aud_div_clks[] __initconst = {
+ DIV(CLK_DOUT_AUD_CPU, "dout_aud_cpu", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU, 0, 4),
+ DIV(CLK_DOUT_AUD_BUSD, "dout_aud_busd", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_BUSD, 0, 4),
+ DIV(CLK_DOUT_AUD_BUSP, "dout_aud_busp", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_BUSP, 0, 4),
+ DIV(CLK_DOUT_AUD_AUDIF, "dout_aud_audif", "mout_aud_pll",
+ CLK_CON_DIV_DIV_CLK_AUD_AUDIF, 0, 9),
+ DIV(CLK_DOUT_AUD_CPU_ACLK, "dout_aud_cpu_aclk", "mout_aud_cpu_hch",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_ACLK, 0, 3),
+ DIV(CLK_DOUT_AUD_CPU_PCLKDBG, "dout_aud_cpu_pclkdbg",
+ "mout_aud_cpu_hch",
+ CLK_CON_DIV_DIV_CLK_AUD_CPU_PCLKDBG, 0, 3),
+ DIV(CLK_DOUT_AUD_MCLK, "dout_aud_mclk", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_MCLK, 0, 2),
+ DIV(CLK_DOUT_AUD_CNT, "dout_aud_cnt", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_CNT, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF0, "dout_aud_uaif0", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF0, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF1, "dout_aud_uaif1", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF1, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF2, "dout_aud_uaif2", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF2, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF3, "dout_aud_uaif3", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF3, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF4, "dout_aud_uaif4", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF4, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF5, "dout_aud_uaif5", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF5, 0, 10),
+ DIV(CLK_DOUT_AUD_UAIF6, "dout_aud_uaif6", "dout_aud_audif",
+ CLK_CON_DIV_DIV_CLK_AUD_UAIF6, 0, 10),
+ DIV(CLK_DOUT_AUD_FM_SPDY, "dout_aud_fm_spdy", "mout_aud_tick_usb_user",
+ CLK_CON_DIV_DIV_CLK_AUD_FM_SPDY, 0, 1),
+ DIV(CLK_DOUT_AUD_FM, "dout_aud_fm", "mout_aud_fm",
+ CLK_CON_DIV_DIV_CLK_AUD_FM, 0, 10),
+};
+
+static const struct samsung_gate_clock aud_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_AUD_CA32_CCLK, "gout_aud_ca32_cclk", "mout_aud_cpu_hch",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_CA32, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_ASB_CCLK, "gout_aud_asb_cclk", "dout_aud_cpu_aclk",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_ASB, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_DAP_CCLK, "gout_aud_dap_cclk", "dout_aud_cpu_pclkdbg",
+ CLK_CON_GAT_GOUT_AUD_ABOX_CCLK_DAP, 21, 0, 0),
+ /* TODO: Should be enabled in ABOX driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_AUD_ABOX_ACLK, "gout_aud_abox_aclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_ABOX_ACLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_AUD_GPIO_PCLK, "gout_aud_gpio_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_GPIO_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_PPMU_ACLK, "gout_aud_ppmu_aclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_PPMU_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_PPMU_PCLK, "gout_aud_ppmu_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_PPMU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SYSMMU_CLK, "gout_aud_sysmmu_clk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_SYSMMU_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SYSREG_PCLK, "gout_aud_sysreg_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_SYSREG_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_WDT_PCLK, "gout_aud_wdt_pclk", "dout_aud_busd",
+ CLK_CON_GAT_GOUT_AUD_WDT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_TZPC_PCLK, "gout_aud_tzpc_pclk", "dout_aud_busp",
+ CLK_CON_GAT_GOUT_AUD_TZPC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_CODEC_MCLK, "gout_aud_codec_mclk", "dout_aud_mclk",
+ CLK_CON_GAT_GOUT_AUD_CODEC_MCLK, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_CNT_BCLK, "gout_aud_cnt_bclk", "dout_aud_cnt",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_CNT, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF0_BCLK, "gout_aud_uaif0_bclk", "mout_aud_uaif0",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF0, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF1_BCLK, "gout_aud_uaif1_bclk", "mout_aud_uaif1",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF1, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF2_BCLK, "gout_aud_uaif2_bclk", "mout_aud_uaif2",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF2, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF3_BCLK, "gout_aud_uaif3_bclk", "mout_aud_uaif3",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF3, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF4_BCLK, "gout_aud_uaif4_bclk", "mout_aud_uaif4",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF4, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF5_BCLK, "gout_aud_uaif5_bclk", "mout_aud_uaif5",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF5, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_UAIF6_BCLK, "gout_aud_uaif6_bclk", "mout_aud_uaif6",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_UAIF6, 21, 0, 0),
+ GATE(CLK_GOUT_AUD_SPDY_BCLK, "gout_aud_spdy_bclk", "dout_aud_fm",
+ CLK_CON_GAT_GOUT_AUD_ABOX_BCLK_SPDY, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info aud_cmu_info __initconst = {
+ .pll_clks = aud_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(aud_pll_clks),
+ .mux_clks = aud_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(aud_mux_clks),
+ .div_clks = aud_div_clks,
+ .nr_div_clks = ARRAY_SIZE(aud_div_clks),
+ .gate_clks = aud_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(aud_gate_clks),
+ .fixed_clks = aud_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(aud_fixed_clks),
+ .nr_clk_ids = AUD_NR_CLK,
+ .clk_regs = aud_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+ .clk_name = "dout_aud",
+};
+
/* ---- CMU_CMGP ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CMGP (0x11c00000) */
@@ -599,7 +1025,7 @@ static const unsigned long hsi_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
};
-/* List of parent clocks for Muxes in CMU_PERI */
+/* List of parent clocks for Muxes in CMU_HSI */
PNAME(mout_hsi_bus_user_p) = { "oscclk", "dout_hsi_bus" };
PNAME(mout_hsi_mmc_card_user_p) = { "oscclk", "dout_hsi_mmc_card" };
PNAME(mout_hsi_usb20drd_user_p) = { "oscclk", "dout_hsi_usb20drd" };
@@ -654,6 +1080,247 @@ static const struct samsung_cmu_info hsi_cmu_info __initconst = {
.clk_name = "dout_hsi_bus",
};
+/* ---- CMU_IS -------------------------------------------------------------- */
+
+#define PLL_CON0_MUX_CLKCMU_IS_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_IS_GDC_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_IS_ITP_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_IS_VRA_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_IS_BUSP 0x1800
+#define CLK_CON_GAT_CLK_IS_CMU_IS_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_IS_CSIS0_ACLK 0x2040
+#define CLK_CON_GAT_GOUT_IS_CSIS1_ACLK 0x2044
+#define CLK_CON_GAT_GOUT_IS_CSIS2_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_IS_TZPC_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA 0x2050
+#define CLK_CON_GAT_GOUT_IS_CLK_GDC 0x2054
+#define CLK_CON_GAT_GOUT_IS_CLK_IPP 0x2058
+#define CLK_CON_GAT_GOUT_IS_CLK_ITP 0x205c
+#define CLK_CON_GAT_GOUT_IS_CLK_MCSC 0x2060
+#define CLK_CON_GAT_GOUT_IS_CLK_VRA 0x2064
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1 0x2098
+#define CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1 0x209c
+#define CLK_CON_GAT_GOUT_IS_SYSREG_PCLK 0x20a0
+
+static const unsigned long is_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_IS_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_IS_GDC_USER,
+ PLL_CON0_MUX_CLKCMU_IS_ITP_USER,
+ PLL_CON0_MUX_CLKCMU_IS_VRA_USER,
+ CLK_CON_DIV_DIV_CLK_IS_BUSP,
+ CLK_CON_GAT_CLK_IS_CMU_IS_PCLK,
+ CLK_CON_GAT_GOUT_IS_CSIS0_ACLK,
+ CLK_CON_GAT_GOUT_IS_CSIS1_ACLK,
+ CLK_CON_GAT_GOUT_IS_CSIS2_ACLK,
+ CLK_CON_GAT_GOUT_IS_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA,
+ CLK_CON_GAT_GOUT_IS_CLK_GDC,
+ CLK_CON_GAT_GOUT_IS_CLK_IPP,
+ CLK_CON_GAT_GOUT_IS_CLK_ITP,
+ CLK_CON_GAT_GOUT_IS_CLK_MCSC,
+ CLK_CON_GAT_GOUT_IS_CLK_VRA,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK,
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK,
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1,
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1,
+ CLK_CON_GAT_GOUT_IS_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_IS */
+PNAME(mout_is_bus_user_p) = { "oscclk", "dout_is_bus" };
+PNAME(mout_is_itp_user_p) = { "oscclk", "dout_is_itp" };
+PNAME(mout_is_vra_user_p) = { "oscclk", "dout_is_vra" };
+PNAME(mout_is_gdc_user_p) = { "oscclk", "dout_is_gdc" };
+
+static const struct samsung_mux_clock is_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IS_BUS_USER, "mout_is_bus_user", mout_is_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_IS_ITP_USER, "mout_is_itp_user", mout_is_itp_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_ITP_USER, 4, 1),
+ MUX(CLK_MOUT_IS_VRA_USER, "mout_is_vra_user", mout_is_vra_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_VRA_USER, 4, 1),
+ MUX(CLK_MOUT_IS_GDC_USER, "mout_is_gdc_user", mout_is_gdc_user_p,
+ PLL_CON0_MUX_CLKCMU_IS_GDC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock is_div_clks[] __initconst = {
+ DIV(CLK_DOUT_IS_BUSP, "dout_is_busp", "mout_is_bus_user",
+ CLK_CON_DIV_DIV_CLK_IS_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock is_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in IS driver */
+ GATE(CLK_GOUT_IS_CMU_IS_PCLK, "gout_is_cmu_is_pclk", "dout_is_busp",
+ CLK_CON_GAT_CLK_IS_CMU_IS_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_IS_CSIS0_ACLK, "gout_is_csis0_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS0_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS1_ACLK, "gout_is_csis1_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS1_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS2_ACLK, "gout_is_csis2_aclk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CSIS2_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_TZPC_PCLK, "gout_is_tzpc_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_TZPC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_CSIS_DMA_CLK, "gout_is_csis_dma_clk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CLK_CSIS_DMA, 21, 0, 0),
+ GATE(CLK_GOUT_IS_GDC_CLK, "gout_is_gdc_clk", "mout_is_gdc_user",
+ CLK_CON_GAT_GOUT_IS_CLK_GDC, 21, 0, 0),
+ GATE(CLK_GOUT_IS_IPP_CLK, "gout_is_ipp_clk", "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_CLK_IPP, 21, 0, 0),
+ GATE(CLK_GOUT_IS_ITP_CLK, "gout_is_itp_clk", "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_CLK_ITP, 21, 0, 0),
+ GATE(CLK_GOUT_IS_MCSC_CLK, "gout_is_mcsc_clk", "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_CLK_MCSC, 21, 0, 0),
+ GATE(CLK_GOUT_IS_VRA_CLK, "gout_is_vra_clk", "mout_is_vra_user",
+ CLK_CON_GAT_GOUT_IS_CLK_VRA, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS0_ACLK, "gout_is_ppmu_is0_aclk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS0_PCLK, "gout_is_ppmu_is0_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS1_ACLK, "gout_is_ppmu_is1_aclk",
+ "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_PPMU_IS1_PCLK, "gout_is_ppmu_is1_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_PPMU_IS1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSMMU_IS0_CLK, "gout_is_sysmmu_is0_clk",
+ "mout_is_bus_user",
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS0_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSMMU_IS1_CLK, "gout_is_sysmmu_is1_clk",
+ "mout_is_itp_user",
+ CLK_CON_GAT_GOUT_IS_SYSMMU_IS1_CLK_S1, 21, 0, 0),
+ GATE(CLK_GOUT_IS_SYSREG_PCLK, "gout_is_sysreg_pclk", "dout_is_busp",
+ CLK_CON_GAT_GOUT_IS_SYSREG_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info is_cmu_info __initconst = {
+ .mux_clks = is_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(is_mux_clks),
+ .div_clks = is_div_clks,
+ .nr_div_clks = ARRAY_SIZE(is_div_clks),
+ .gate_clks = is_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(is_gate_clks),
+ .nr_clk_ids = IS_NR_CLK,
+ .clk_regs = is_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(is_clk_regs),
+ .clk_name = "dout_is_bus",
+};
+
+/* ---- CMU_MFCMSCL --------------------------------------------------------- */
+
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP 0x1800
+#define CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK 0x2000
+#define CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK 0x203c
+#define CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK 0x2048
+#define CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK 0x204c
+#define CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK 0x2050
+#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1 0x2074
+#define CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK 0x2078
+
+static const unsigned long mfcmscl_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER,
+ CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP,
+ CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
+ CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_MFCMSCL */
+PNAME(mout_mfcmscl_mfc_user_p) = { "oscclk", "dout_mfcmscl_mfc" };
+PNAME(mout_mfcmscl_m2m_user_p) = { "oscclk", "dout_mfcmscl_m2m" };
+PNAME(mout_mfcmscl_mcsc_user_p) = { "oscclk", "dout_mfcmscl_mcsc" };
+PNAME(mout_mfcmscl_jpeg_user_p) = { "oscclk", "dout_mfcmscl_jpeg" };
+
+static const struct samsung_mux_clock mfcmscl_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFCMSCL_MFC_USER, "mout_mfcmscl_mfc_user",
+ mout_mfcmscl_mfc_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MFC_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_M2M_USER, "mout_mfcmscl_m2m_user",
+ mout_mfcmscl_m2m_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_M2M_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_MCSC_USER, "mout_mfcmscl_mcsc_user",
+ mout_mfcmscl_mcsc_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_MCSC_USER, 4, 1),
+ MUX(CLK_MOUT_MFCMSCL_JPEG_USER, "mout_mfcmscl_jpeg_user",
+ mout_mfcmscl_jpeg_user_p,
+ PLL_CON0_MUX_CLKCMU_MFCMSCL_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_div_clock mfcmscl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFCMSCL_BUSP, "dout_mfcmscl_busp", "mout_mfcmscl_mfc_user",
+ CLK_CON_DIV_DIV_CLK_MFCMSCL_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock mfcmscl_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in MFC driver */
+ GATE(CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK, "gout_mfcmscl_cmu_mfcmscl_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_CLK_MFCMSCL_CMU_MFCMSCL_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_MFCMSCL_TZPC_PCLK, "gout_mfcmscl_tzpc_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_TZPC_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_JPEG_ACLK, "gout_mfcmscl_jpeg_aclk",
+ "mout_mfcmscl_jpeg_user", CLK_CON_GAT_GOUT_MFCMSCL_JPEG_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_M2M_ACLK, "gout_mfcmscl_m2m_aclk",
+ "mout_mfcmscl_m2m_user", CLK_CON_GAT_GOUT_MFCMSCL_M2M_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_MCSC_CLK, "gout_mfcmscl_mcsc_clk",
+ "mout_mfcmscl_mcsc_user", CLK_CON_GAT_GOUT_MFCMSCL_MCSC_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_MFC_ACLK, "gout_mfcmscl_mfc_aclk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_MFC_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_PPMU_ACLK, "gout_mfcmscl_ppmu_aclk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_PPMU_PCLK, "gout_mfcmscl_ppmu_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_PPMU_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_SYSMMU_CLK, "gout_mfcmscl_sysmmu_clk",
+ "mout_mfcmscl_mfc_user", CLK_CON_GAT_GOUT_MFCMSCL_SYSMMU_CLK_S1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_MFCMSCL_SYSREG_PCLK, "gout_mfcmscl_sysreg_pclk",
+ "dout_mfcmscl_busp", CLK_CON_GAT_GOUT_MFCMSCL_SYSREG_PCLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info mfcmscl_cmu_info __initconst = {
+ .mux_clks = mfcmscl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfcmscl_mux_clks),
+ .div_clks = mfcmscl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfcmscl_div_clks),
+ .gate_clks = mfcmscl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(mfcmscl_gate_clks),
+ .nr_clk_ids = MFCMSCL_NR_CLK,
+ .clk_regs = mfcmscl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfcmscl_clk_regs),
+ .clk_name = "dout_mfcmscl_mfc",
+};
+
/* ---- CMU_PERI ------------------------------------------------------------ */
/* Register Offset definitions for CMU_PERI (0x10030000) */
@@ -963,7 +1630,7 @@ static const unsigned long dpu_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK,
};
-/* List of parent clocks for Muxes in CMU_CORE */
+/* List of parent clocks for Muxes in CMU_DPU */
PNAME(mout_dpu_user_p) = { "oscclk", "dout_dpu" };
static const struct samsung_mux_clock dpu_mux_clks[] __initconst = {
@@ -1028,12 +1695,21 @@ static const struct of_device_id exynos850_cmu_of_match[] = {
.compatible = "samsung,exynos850-cmu-apm",
.data = &apm_cmu_info,
}, {
+ .compatible = "samsung,exynos850-cmu-aud",
+ .data = &aud_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-cmgp",
.data = &cmgp_cmu_info,
}, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
+ .compatible = "samsung,exynos850-cmu-is",
+ .data = &is_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-mfcmscl",
+ .data = &mfcmscl_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-core",
.data = &core_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index d9e1f8e4a7b4..7b16320bba66 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -1067,6 +1067,373 @@ static const struct samsung_cmu_info core_cmu_info __initconst = {
.clk_name = "dout_clkcmu_core_bus",
};
+/* ---- CMU_FSYS0 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS2 (0x17700000) */
+#define PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER 0x0610
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK 0x2000
+
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN 0x2004
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN 0x2008
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN 0x200c
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN 0x2010
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN 0x2014
+#define CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN 0x2018
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK 0x205c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK 0x2060
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK 0x206c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK 0x2070
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PIPE_CLK 0x207c
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK 0x2084
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK 0x2088
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK 0x208c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK 0x2094
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK 0x2098
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK 0x209c
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PIPE_CLK 0x20a4
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PIPE_CLK 0x20cc
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK 0x20e8
+
+
+static const unsigned long fsys0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN,
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PIPE_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK,
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK,
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS0 */
+PNAME(mout_fsys0_bus_user_p) = { "oscclk", "dout_clkcmu_fsys0_bus" };
+PNAME(mout_fsys0_pcie_user_p) = { "oscclk", "dout_clkcmu_fsys0_pcie" };
+
+static const struct samsung_mux_clock fsys0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS0_BUS_USER, "mout_fsys0_bus_user",
+ mout_fsys0_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS0_PCIE_USER, "mout_fsys0_pcie_user",
+ mout_fsys0_pcie_user_p, PLL_CON0_MUX_CLKCMU_FSYS0_PCIE_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock fsys0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS0_BUS_PCLK, "gout_fsys0_bus_pclk",
+ "mout_fsys0_bus_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_FSYS0_CMU_FSYS0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+
+ /* Gen3 2L0 */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x1_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X1_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l0_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L0_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK,
+ "gout_fsys0_pcie_gen3a_2l0_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L0_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK,
+ "gout_fsys0_pcie_gen3b_2l0_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L0_CLK,
+ 21, 0, 0),
+
+ /* Gen3 2L1 */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x1_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X1_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_2l1_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_2L1_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK,
+ "gout_fsys0_pcie_gen3a_2l1_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_2L1_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK,
+ "gout_fsys0_pcie_gen3b_2l1_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_2L1_CLK,
+ 21, 0, 0),
+
+ /* Gen3 4L */
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK,
+ "gout_fsys0_pcie_gen3_4l_x2_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK,
+ "gout_fsys0_pcie_gen3_4l_x4_refclk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_CLK_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_PHY_REFCLK_IN,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x2_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X2_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_dbi_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_DBI_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_mstr_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_MSTR_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK,
+ "gout_fsys0_pcie_gen3_4l_x4_slv_aclk", "mout_fsys0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3_4L_X4_SLV_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK,
+ "gout_fsys0_pcie_gen3a_4l_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3A_4L_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK,
+ "gout_fsys0_pcie_gen3b_4l_clk", "mout_fsys0_pcie_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS0_UID_PCIE_GEN3B_4L_CLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info fsys0_cmu_info __initconst = {
+ .mux_clks = fsys0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
+ .gate_clks = fsys0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
+ .nr_clk_ids = FSYS0_NR_CLK,
+ .clk_regs = fsys0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
+ .clk_name = "dout_clkcmu_fsys0_bus",
+};
+
+/* ---- CMU_FSYS1 ---------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_FSYS1 (0x17040000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0000
+#define PLL_CON0_PLL_MMC 0x0100
+#define PLL_CON3_PLL_MMC 0x010c
+#define PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_FSYS1_USBDRD_USER 0x0620
+
+#define CLK_CON_MUX_MUX_CLK_FSYS1_MMC_CARD 0x1000
+#define CLK_CON_DIV_DIV_CLK_FSYS1_MMC_CARD 0x1800
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN 0x202c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK 0x2028
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_0_REF_CLK_40 0x204c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_1_REF_CLK_40 0x2058
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_0_REF_CLK_40 0x2064
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_1_REF_CLK_40 0x2070
+
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_0_IPCLKPORT_ACLK 0x2074
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_1_IPCLKPORT_ACLK 0x2078
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_0_IPCLKPORT_ACLK 0x207c
+#define CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_1_IPCLKPORT_ACLK 0x2080
+
+static const unsigned long fsys1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER,
+};
+
+static const struct samsung_pll_clock fsys1_pll_clks[] __initconst = {
+ PLL(pll_0831x, FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_FSYS1 */
+PNAME(mout_fsys1_bus_user_p) = { "oscclk", "dout_clkcmu_fsys1_bus" };
+PNAME(mout_fsys1_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+PNAME(mout_fsys1_mmc_card_user_p) = { "oscclk", "gout_clkcmu_fsys1_mmc_card" };
+PNAME(mout_fsys1_usbdrd_user_p) = { "oscclk", "dout_clkcmu_fsys1_usbdrd" };
+PNAME(mout_fsys1_mmc_card_p) = { "mout_fsys1_mmc_card_user",
+ "mout_fsys1_mmc_pll" };
+
+static const struct samsung_mux_clock fsys1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_FSYS1_BUS_USER, "mout_fsys1_bus_user",
+ mout_fsys1_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_PLL, "mout_fsys1_mmc_pll", mout_fsys1_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_CARD_USER, "mout_fsys1_mmc_card_user",
+ mout_fsys1_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_MMC_CARD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS1_USBDRD_USER, "mout_fsys1_usbdrd_user",
+ mout_fsys1_usbdrd_user_p, PLL_CON0_MUX_CLKCMU_FSYS1_USBDRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_FSYS1_MMC_CARD, "mout_fsys1_mmc_card",
+ mout_fsys1_mmc_card_p, CLK_CON_MUX_MUX_CLK_FSYS1_MMC_CARD,
+ 0, 1),
+};
+
+static const struct samsung_div_clock fsys1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS1_MMC_CARD, "dout_fsys1_mmc_card",
+ "mout_fsys1_mmc_card",
+ CLK_CON_DIV_DIV_CLK_FSYS1_MMC_CARD, 0, 9),
+};
+
+static const struct samsung_gate_clock fsys1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS1_PCLK, "gout_fsys1_pclk", "mout_fsys1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_FSYS1_CMU_FSYS1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN, "gout_fsys1_mmc_card_sdclkin",
+ "dout_fsys1_mmc_card",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS1_MMC_CARD_ACLK, "gout_fsys1_mmc_card_aclk",
+ "dout_fsys1_mmc_card",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_MMC_CARD_IPCLKPORT_I_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20DRD_0_REFCLK, "gout_fsys1_usb20drd_0_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_0_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20DRD_1_REFCLK, "gout_fsys1_usb20drd_1_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB20DRD_1_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30DRD_0_REFCLK, "gout_fsys1_usb30drd_0_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_0_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30DRD_1_REFCLK, "gout_fsys1_usb30drd_1_refclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_USB30DRD_1_REF_CLK_40,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20_0_ACLK, "gout_fsys1_usb20_0_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_0_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB20_1_ACLK, "gout_fsys1_usb20_1_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB2_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30_0_ACLK, "gout_fsys1_usb30_0_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_0_IPCLKPORT_ACLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_FSYS1_USB30_1_ACLK, "gout_fsys1_usb30_1_aclk",
+ "mout_fsys1_usbdrd_user",
+ CLK_CON_GAT_GOUT_BLK_FSYS1_UID_US_D_USB3_1_IPCLKPORT_ACLK,
+ 21, 0, 0),
+};
+
+static const struct samsung_cmu_info fsys1_cmu_info __initconst = {
+ .pll_clks = fsys1_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(fsys1_pll_clks),
+ .mux_clks = fsys1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
+ .div_clks = fsys1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(fsys1_div_clks),
+ .gate_clks = fsys1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
+ .nr_clk_ids = FSYS1_NR_CLK,
+ .clk_regs = fsys1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
+ .clk_name = "dout_clkcmu_fsys1_bus",
+};
+
/* ---- CMU_FSYS2 ---------------------------------------------------------- */
/* Register Offset definitions for CMU_FSYS2 (0x17c00000) */
@@ -1170,9 +1537,9 @@ static const struct samsung_cmu_info fsys2_cmu_info __initconst = {
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2 0x2058
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3 0x205c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
-#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x204c
@@ -1330,6 +1697,10 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
"mout_peric0_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0,
21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_PCLK_1, "gout_peric0_pclk_1",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
GATE(CLK_GOUT_PERIC0_PCLK_2, "gout_peric0_pclk_2",
"mout_peric0_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2,
@@ -1418,14 +1789,14 @@ static const struct samsung_cmu_info peric0_cmu_info __initconst = {
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2020
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0 0x2044
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1 0x2048
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2058
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x205c
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2060
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x206c
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2064
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2068
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x2070
-#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2070
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x204c
#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2050
@@ -1463,9 +1834,9 @@ static const unsigned long peric1_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
- CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9,
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
@@ -1581,6 +1952,10 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
"mout_peric1_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0,
21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_PCLK_1, "gout_peric1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
GATE(CLK_GOUT_PERIC1_PCLK_2, "gout_peric1_pclk_2",
"mout_peric1_bus_user",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2,
@@ -1702,6 +2077,12 @@ static const struct of_device_id exynosautov9_cmu_of_match[] = {
.compatible = "samsung,exynosautov9-cmu-core",
.data = &core_cmu_info,
}, {
+ .compatible = "samsung,exynosautov9-cmu-fsys0",
+ .data = &fsys0_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov9-cmu-fsys1",
+ .data = &fsys1_cmu_info,
+ }, {
.compatible = "samsung,exynosautov9-cmu-fsys2",
.data = &fsys2_cmu_info,
}, {
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index e18c80fbe804..c744bd9d2f96 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -21,4 +21,10 @@ config SPRD_SC9863A_CLK
help
Support for the global clock controller on sc9863a devices.
Say Y if you want to use peripheral devices on sc9863a SoC.
+
+config SPRD_UMS512_CLK
+ tristate "Support for the Spreadtrum UMS512 clocks"
+ help
+ Support for the global clock controller on ums512 devices.
+ Say Y if you want to use peripheral devices on ums512 SoC.
endif
diff --git a/drivers/clk/sprd/Makefile b/drivers/clk/sprd/Makefile
index 41d90e0d7863..f25b2c3904fb 100644
--- a/drivers/clk/sprd/Makefile
+++ b/drivers/clk/sprd/Makefile
@@ -11,3 +11,4 @@ clk-sprd-y += pll.o
## SoC support
obj-$(CONFIG_SPRD_SC9860_CLK) += sc9860-clk.o
obj-$(CONFIG_SPRD_SC9863A_CLK) += sc9863a-clk.o
+obj-$(CONFIG_SPRD_UMS512_CLK) += ums512-clk.o
diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
index d620bbbcdfc8..ce81e4087a8f 100644
--- a/drivers/clk/sprd/common.c
+++ b/drivers/clk/sprd/common.c
@@ -41,7 +41,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
{
void __iomem *base;
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
+ struct device_node *node = dev->of_node, *np;
struct regmap *regmap;
if (of_find_property(node, "sprd,syscon", NULL)) {
@@ -50,9 +50,10 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
pr_err("%s: failed to get syscon regmap\n", __func__);
return PTR_ERR(regmap);
}
- } else if (of_device_is_compatible(of_get_parent(dev->of_node),
- "syscon")) {
- regmap = device_node_to_regmap(of_get_parent(dev->of_node));
+ } else if (of_device_is_compatible(np = of_get_parent(node), "syscon") ||
+ (of_node_put(np), 0)) {
+ regmap = device_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(regmap)) {
dev_err(dev, "failed to get regmap from its parent.\n");
return PTR_ERR(regmap);
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
new file mode 100644
index 000000000000..fc25bdd85e4e
--- /dev/null
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -0,0 +1,2202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Unisoc UMS512 clock driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Xiaolong Zhang <xiaolong.zhang@unisoc.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/sprd,ums512-clk.h>
+
+#include "common.h"
+#include "composite.h"
+#include "div.h"
+#include "gate.h"
+#include "mux.h"
+#include "pll.h"
+
+#define UMS512_MUX_FLAG \
+ (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT)
+
+/* pll gate clock */
+/* some pll clocks configure CLK_IGNORE_UNUSED because hw dvfs does not call
+ * clock interface. hw dvfs can not gate the pll clock.
+ */
+static CLK_FIXED_FACTOR_FW_NAME(clk_26m_aud, "clk-26m-aud", "ext-26m", 1, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_13m, "clk-13m", "ext-26m", 2, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_6m5, "clk-6m5", "ext-26m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_4m3, "clk-4m3", "ext-26m", 6, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_2m, "clk-2m", "ext-26m", 13, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_1m, "clk-1m", "ext-26m", 26, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(clk_250k, "clk-250k", "ext-26m", 104, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_25m, "rco-25m", "rco-100m", 4, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_4m, "rco-4m", "rco-100m", 25, 1, 0);
+static CLK_FIXED_FACTOR_FW_NAME(rco_2m, "rco-2m", "rco-100m", 50, 1, 0);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x8c,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0xa0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(twpll_gate, "twpll-gate", "ext-26m", 0xa4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(rpll_gate, "rpll-gate", "ext-26m", 0xac,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(cppll_gate, "cppll-gate", "ext-26m", 0xe4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x190,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x194,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x198,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+
+static struct sprd_clk_common *ums512_pmu_gate_clks[] = {
+ /* address base is 0x327e0000 */
+ &isppll_gate.common,
+ &dpll0_gate.common,
+ &dpll1_gate.common,
+ &lpll_gate.common,
+ &twpll_gate.common,
+ &gpll_gate.common,
+ &rpll_gate.common,
+ &cppll_gate.common,
+ &mpll0_gate.common,
+ &mpll1_gate.common,
+ &mpll2_gate.common,
+};
+
+static struct clk_hw_onecell_data ums512_pmu_gate_hws = {
+ .hws = {
+ [CLK_26M_AUD] = &clk_26m_aud.hw,
+ [CLK_13M] = &clk_13m.hw,
+ [CLK_6M5] = &clk_6m5.hw,
+ [CLK_4M3] = &clk_4m3.hw,
+ [CLK_2M] = &clk_2m.hw,
+ [CLK_1M] = &clk_1m.hw,
+ [CLK_250K] = &clk_250k.hw,
+ [CLK_RCO_25M] = &rco_25m.hw,
+ [CLK_RCO_4M] = &rco_4m.hw,
+ [CLK_RCO_2M] = &rco_2m.hw,
+ [CLK_ISPPLL_GATE] = &isppll_gate.common.hw,
+ [CLK_DPLL0_GATE] = &dpll0_gate.common.hw,
+ [CLK_DPLL1_GATE] = &dpll1_gate.common.hw,
+ [CLK_LPLL_GATE] = &lpll_gate.common.hw,
+ [CLK_TWPLL_GATE] = &twpll_gate.common.hw,
+ [CLK_GPLL_GATE] = &gpll_gate.common.hw,
+ [CLK_RPLL_GATE] = &rpll_gate.common.hw,
+ [CLK_CPPLL_GATE] = &cppll_gate.common.hw,
+ [CLK_MPLL0_GATE] = &mpll0_gate.common.hw,
+ [CLK_MPLL1_GATE] = &mpll1_gate.common.hw,
+ [CLK_MPLL2_GATE] = &mpll2_gate.common.hw,
+ },
+ .num = CLK_PMU_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_pmu_gate_desc = {
+ .clk_clks = ums512_pmu_gate_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_pmu_gate_clks),
+ .hw_clks = &ums512_pmu_gate_hws,
+};
+
+/* pll clock at g0 */
+static const u64 itable_dpll0[7] = { 6, 0, 0,
+ 1173000000ULL, 1475000000ULL,
+ 1855000000ULL, 1866000000ULL };
+
+static struct clk_bit_field f_dpll0[PLL_FACT_MAX] = {
+ { .shift = 18, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 4, .width = 3 }, /* icp */
+ { .shift = 7, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 0, .width = 0 }, /* postdiv */
+};
+static SPRD_PLL_HW(dpll0, "dpll0", &dpll0_gate.common.hw, 0x4, 3,
+ itable_dpll0, f_dpll0, 240, 1000, 1000, 0, 0);
+static CLK_FIXED_FACTOR_HW(dpll0_58m31, "dpll0-58m31", &dpll0.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g0_pll_clks[] = {
+ /* address base is 0x32390000 */
+ &dpll0.common,
+};
+
+static struct clk_hw_onecell_data ums512_g0_pll_hws = {
+ .hws = {
+ [CLK_DPLL0] = &dpll0.common.hw,
+ [CLK_DPLL0_58M31] = &dpll0_58m31.hw,
+ },
+ .num = CLK_ANLG_PHY_G0_NUM,
+};
+
+static struct sprd_clk_desc ums512_g0_pll_desc = {
+ .clk_clks = ums512_g0_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g0_pll_clks),
+ .hw_clks = &ums512_g0_pll_hws,
+};
+
+/* pll clock at g2 */
+static const u64 itable_mpll[8] = { 7, 0,
+ 1400000000ULL, 1600000000ULL,
+ 1800000000ULL, 2000000000ULL,
+ 2200000000ULL, 2500000000ULL };
+
+static struct clk_bit_field f_mpll[PLL_FACT_MAX] = {
+ { .shift = 17, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(mpll1, "mpll1", &mpll1_gate.common.hw, 0x0, 3,
+ itable_mpll, f_mpll, 240, 1000, 1000, 1, 1200000000);
+static CLK_FIXED_FACTOR_HW(mpll1_63m38, "mpll1-63m38", &mpll1.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g2_pll_clks[] = {
+ /* address base is 0x323B0000 */
+ &mpll1.common,
+};
+
+static struct clk_hw_onecell_data ums512_g2_pll_hws = {
+ .hws = {
+ [CLK_MPLL1] = &mpll1.common.hw,
+ [CLK_MPLL1_63M38] = &mpll1_63m38.hw,
+ },
+ .num = CLK_ANLG_PHY_G2_NUM,
+};
+
+static struct sprd_clk_desc ums512_g2_pll_desc = {
+ .clk_clks = ums512_g2_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g2_pll_clks),
+ .hw_clks = &ums512_g2_pll_hws,
+};
+
+/* pll at g3 */
+static const u64 itable[8] = { 7, 0, 0,
+ 900000000ULL, 1100000000ULL,
+ 1300000000ULL, 1500000000ULL,
+ 1600000000ULL };
+
+static struct clk_bit_field f_pll[PLL_FACT_MAX] = {
+ { .shift = 18, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+
+static SPRD_PLL_FW_NAME(rpll, "rpll", "ext-26m", 0x0, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+
+static SPRD_SC_GATE_CLK_FW_NAME(audio_gate, "audio-gate", "ext-26m", 0x24,
+ 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+
+static struct clk_bit_field f_mpll2[PLL_FACT_MAX] = {
+ { .shift = 16, .width = 1 }, /* lock_done */
+ { .shift = 0, .width = 1 }, /* div_s */
+ { .shift = 67, .width = 1 }, /* mod_en */
+ { .shift = 1, .width = 1 }, /* sdm_en */
+ { .shift = 0, .width = 0 }, /* refin */
+ { .shift = 2, .width = 3 }, /* icp */
+ { .shift = 5, .width = 11 }, /* n */
+ { .shift = 55, .width = 7 }, /* nint */
+ { .shift = 32, .width = 23}, /* kint */
+ { .shift = 0, .width = 0 }, /* prediv */
+ { .shift = 77, .width = 1 }, /* postdiv */
+};
+static SPRD_PLL_HW(mpll0, "mpll0", &mpll0_gate.common.hw, 0x54, 3,
+ itable_mpll, f_mpll, 240, 1000, 1000, 1, 1200000000);
+static CLK_FIXED_FACTOR_HW(mpll0_56m88, "mpll0-56m88", &mpll0.common.hw,
+ 32, 1, 0);
+
+static const u64 itable_mpll2[6] = { 5,
+ 1200000000ULL, 1400000000ULL,
+ 1600000000ULL, 1800000000ULL,
+ 2000000000ULL };
+
+static SPRD_PLL_HW(mpll2, "mpll2", &mpll2_gate.common.hw, 0x9c, 3,
+ itable_mpll2, f_mpll2, 240, 1000, 1000, 1, 1000000000);
+static CLK_FIXED_FACTOR_HW(mpll2_47m13, "mpll2-47m13", &mpll2.common.hw,
+ 32, 1, 0);
+
+static struct sprd_clk_common *ums512_g3_pll_clks[] = {
+ /* address base is 0x323c0000 */
+ &rpll.common,
+ &audio_gate.common,
+ &mpll0.common,
+ &mpll2.common,
+};
+
+static struct clk_hw_onecell_data ums512_g3_pll_hws = {
+ .hws = {
+ [CLK_RPLL] = &rpll.common.hw,
+ [CLK_AUDIO_GATE] = &audio_gate.common.hw,
+ [CLK_MPLL0] = &mpll0.common.hw,
+ [CLK_MPLL0_56M88] = &mpll0_56m88.hw,
+ [CLK_MPLL2] = &mpll2.common.hw,
+ [CLK_MPLL2_47M13] = &mpll2_47m13.hw,
+ },
+ .num = CLK_ANLG_PHY_G3_NUM,
+};
+
+static struct sprd_clk_desc ums512_g3_pll_desc = {
+ .clk_clks = ums512_g3_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_g3_pll_clks),
+ .hw_clks = &ums512_g3_pll_hws,
+};
+
+/* pll clock at gc */
+static SPRD_PLL_FW_NAME(twpll, "twpll", "ext-26m", 0x0, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(twpll_768m, "twpll-768m", &twpll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_384m, "twpll-384m", &twpll.common.hw,
+ 4, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_192m, "twpll-192m", &twpll.common.hw,
+ 8, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_96m, "twpll-96m", &twpll.common.hw,
+ 16, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_48m, "twpll-48m", &twpll.common.hw,
+ 32, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_24m, "twpll-24m", &twpll.common.hw,
+ 64, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m, "twpll-12m", &twpll.common.hw,
+ 128, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_512m, "twpll-512m", &twpll.common.hw,
+ 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_256m, "twpll-256m", &twpll.common.hw,
+ 6, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_128m, "twpll-128m", &twpll.common.hw,
+ 12, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_64m, "twpll-64m", &twpll.common.hw,
+ 24, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_307m2, "twpll-307m2", &twpll.common.hw,
+ 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_219m4, "twpll-219m4", &twpll.common.hw,
+ 7, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_170m6, "twpll-170m6", &twpll.common.hw,
+ 9, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_153m6, "twpll-153m6", &twpll.common.hw,
+ 10, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_76m8, "twpll-76m8", &twpll.common.hw,
+ 20, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_51m2, "twpll-51m2", &twpll.common.hw,
+ 30, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_38m4, "twpll-38m4", &twpll.common.hw,
+ 40, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_19m2, "twpll-19m2", &twpll.common.hw,
+ 80, 1, 0);
+static CLK_FIXED_FACTOR_HW(twpll_12m29, "twpll-12m29", &twpll.common.hw,
+ 125, 1, 0);
+
+static SPRD_PLL_FW_NAME(lpll, "lpll", "ext-26m", 0x18, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(lpll_614m4, "lpll-614m4", &lpll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_409m6, "lpll-409m6", &lpll.common.hw,
+ 3, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_245m76, "lpll-245m76", &lpll.common.hw,
+ 5, 1, 0);
+static CLK_FIXED_FACTOR_HW(lpll_30m72, "lpll-30m72", &lpll.common.hw,
+ 40, 1, 0);
+
+static SPRD_PLL_FW_NAME(isppll, "isppll", "ext-26m", 0x30, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(isppll_468m, "isppll-468m", &isppll.common.hw,
+ 2, 1, 0);
+static CLK_FIXED_FACTOR_HW(isppll_78m, "isppll-78m", &isppll.common.hw,
+ 12, 1, 0);
+
+static SPRD_PLL_HW(gpll, "gpll", &gpll_gate.common.hw, 0x48, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(gpll_40m, "gpll-40m", &gpll.common.hw,
+ 20, 1, 0);
+
+static SPRD_PLL_HW(cppll, "cppll", &cppll_gate.common.hw, 0x60, 3,
+ itable, f_pll, 240, 1000, 1000, 1, 750000000);
+static CLK_FIXED_FACTOR_HW(cppll_39m32, "cppll-39m32", &cppll.common.hw,
+ 26, 1, 0);
+
+static struct sprd_clk_common *ums512_gc_pll_clks[] = {
+ /* address base is 0x323e0000 */
+ &twpll.common,
+ &lpll.common,
+ &isppll.common,
+ &gpll.common,
+ &cppll.common,
+};
+
+static struct clk_hw_onecell_data ums512_gc_pll_hws = {
+ .hws = {
+ [CLK_TWPLL] = &twpll.common.hw,
+ [CLK_TWPLL_768M] = &twpll_768m.hw,
+ [CLK_TWPLL_384M] = &twpll_384m.hw,
+ [CLK_TWPLL_192M] = &twpll_192m.hw,
+ [CLK_TWPLL_96M] = &twpll_96m.hw,
+ [CLK_TWPLL_48M] = &twpll_48m.hw,
+ [CLK_TWPLL_24M] = &twpll_24m.hw,
+ [CLK_TWPLL_12M] = &twpll_12m.hw,
+ [CLK_TWPLL_512M] = &twpll_512m.hw,
+ [CLK_TWPLL_256M] = &twpll_256m.hw,
+ [CLK_TWPLL_128M] = &twpll_128m.hw,
+ [CLK_TWPLL_64M] = &twpll_64m.hw,
+ [CLK_TWPLL_307M2] = &twpll_307m2.hw,
+ [CLK_TWPLL_219M4] = &twpll_219m4.hw,
+ [CLK_TWPLL_170M6] = &twpll_170m6.hw,
+ [CLK_TWPLL_153M6] = &twpll_153m6.hw,
+ [CLK_TWPLL_76M8] = &twpll_76m8.hw,
+ [CLK_TWPLL_51M2] = &twpll_51m2.hw,
+ [CLK_TWPLL_38M4] = &twpll_38m4.hw,
+ [CLK_TWPLL_19M2] = &twpll_19m2.hw,
+ [CLK_TWPLL_12M29] = &twpll_12m29.hw,
+ [CLK_LPLL] = &lpll.common.hw,
+ [CLK_LPLL_614M4] = &lpll_614m4.hw,
+ [CLK_LPLL_409M6] = &lpll_409m6.hw,
+ [CLK_LPLL_245M76] = &lpll_245m76.hw,
+ [CLK_LPLL_30M72] = &lpll_30m72.hw,
+ [CLK_ISPPLL] = &isppll.common.hw,
+ [CLK_ISPPLL_468M] = &isppll_468m.hw,
+ [CLK_ISPPLL_78M] = &isppll_78m.hw,
+ [CLK_GPLL] = &gpll.common.hw,
+ [CLK_GPLL_40M] = &gpll_40m.hw,
+ [CLK_CPPLL] = &cppll.common.hw,
+ [CLK_CPPLL_39M32] = &cppll_39m32.hw,
+ },
+ .num = CLK_ANLG_PHY_GC_NUM,
+};
+
+static struct sprd_clk_desc ums512_gc_pll_desc = {
+ .clk_clks = ums512_gc_pll_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_gc_pll_clks),
+ .hw_clks = &ums512_gc_pll_hws,
+};
+
+/* ap ahb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(dsi_eb, "dsi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dispc_eb, "dispc-eb", "ext-26m",
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(vsp_eb, "vsp-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(vdma_eb, "vdma-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dma_pub_eb, "dma-pub-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dma_sec_eb, "dma-sec-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ipi_eb, "ipi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ahb_ckg_eb, "ahb-ckg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(bm_clk_eb, "bm-clk-eb", "ext-26m",
+ 0x0, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+
+static struct sprd_clk_common *ums512_apahb_gate[] = {
+ /* address base is 0x20100000 */
+ &dsi_eb.common,
+ &dispc_eb.common,
+ &vsp_eb.common,
+ &vdma_eb.common,
+ &dma_pub_eb.common,
+ &dma_sec_eb.common,
+ &ipi_eb.common,
+ &ahb_ckg_eb.common,
+ &bm_clk_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_apahb_gate_hws = {
+ .hws = {
+ [CLK_DSI_EB] = &dsi_eb.common.hw,
+ [CLK_DISPC_EB] = &dispc_eb.common.hw,
+ [CLK_VSP_EB] = &vsp_eb.common.hw,
+ [CLK_VDMA_EB] = &vdma_eb.common.hw,
+ [CLK_DMA_PUB_EB] = &dma_pub_eb.common.hw,
+ [CLK_DMA_SEC_EB] = &dma_sec_eb.common.hw,
+ [CLK_IPI_EB] = &ipi_eb.common.hw,
+ [CLK_AHB_CKG_EB] = &ahb_ckg_eb.common.hw,
+ [CLK_BM_CLK_EB] = &bm_clk_eb.common.hw,
+ },
+ .num = CLK_AP_AHB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_apahb_gate_desc = {
+ .clk_clks = ums512_apahb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_apahb_gate),
+ .hw_clks = &ums512_apahb_gate_hws,
+};
+
+/* ap clks */
+static const struct clk_parent_data ap_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_apb_clk, "ap-apb-clk", ap_apb_parents,
+ 0x20, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ipi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(ipi_clk, "ipi-clk", ipi_parents,
+ 0x24, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ap_uart_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_uart0_clk, "ap-uart0-clk", ap_uart_parents,
+ 0x28, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart1_clk, "ap-uart1-clk", ap_uart_parents,
+ 0x2c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_uart2_clk, "ap-uart2-clk", ap_uart_parents,
+ 0x30, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data i2c_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_i2c0_clk, "ap-i2c0-clk", i2c_parents,
+ 0x34, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c1_clk, "ap-i2c1-clk", i2c_parents,
+ 0x38, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c2_clk, "ap-i2c2-clk", i2c_parents,
+ 0x3c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c3_clk, "ap-i2c3-clk", i2c_parents,
+ 0x40, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_i2c4_clk, "ap-i2c4-clk", i2c_parents,
+ 0x44, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_spi0_clk, "ap-spi0-clk", spi_parents,
+ 0x48, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi1_clk, "ap-spi1-clk", spi_parents,
+ 0x4c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi2_clk, "ap-spi2-clk", spi_parents,
+ 0x50, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_spi3_clk, "ap-spi3-clk", spi_parents,
+ 0x54, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_iis0_clk, "ap-iis0-clk", iis_parents,
+ 0x58, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis1_clk, "ap-iis1-clk", iis_parents,
+ 0x5c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(ap_iis2_clk, "ap-iis2-clk", iis_parents,
+ 0x60, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data sim_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_64m.hw },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(ap_sim_clk, "ap-sim-clk", sim_parents,
+ 0x64, 0, 3, 8, 3, 0);
+
+static const struct clk_parent_data ap_ce_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_ce_clk, "ap-ce-clk", ap_ce_parents,
+ 0x68, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sdio_parents[] = {
+ { .hw = &clk_1m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio0_2x_clk, "sdio0-2x", sdio_parents,
+ 0x80, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(sdio1_2x_clk, "sdio1-2x", sdio_parents,
+ 0x88, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(emmc_2x_clk, "emmc-2x", sdio_parents,
+ 0x90, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data vsp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(vsp_clk, "vsp-clk", vsp_parents,
+ 0x98, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dispc0_parents[] = {
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(dispc0_clk, "dispc0-clk", dispc0_parents,
+ 0x9c, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dispc0_dpi_parents[] = {
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_COMP_CLK_DATA(dispc0_dpi_clk, "dispc0-dpi-clk", dispc0_dpi_parents,
+ 0xa0, 0, 3, 8, 4, 0);
+
+static const struct clk_parent_data dsi_apb_parents[] = {
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_MUX_CLK_DATA(dsi_apb_clk, "dsi-apb-clk", dsi_apb_parents,
+ 0xa4, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_rxesc, "dsi-rxesc", "ext-26m",
+ 0xa8, BIT(16), 0, 0);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_lanebyte, "dsi-lanebyte", "ext-26m",
+ 0xac, BIT(16), 0, 0);
+
+static const struct clk_parent_data vdsp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &lpll_614m4.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &isppll.common.hw },
+};
+static SPRD_MUX_CLK_DATA(vdsp_clk, "vdsp-clk", vdsp_parents,
+ 0xb0, 0, 3, UMS512_MUX_FLAG);
+static SPRD_DIV_CLK_HW(vdsp_m_clk, "vdsp-m-clk", &vdsp_clk.common.hw,
+ 0xb4, 8, 2, 0);
+
+static struct sprd_clk_common *ums512_ap_clks[] = {
+ /* address base is 0x20200000 */
+ &ap_apb_clk.common,
+ &ipi_clk.common,
+ &ap_uart0_clk.common,
+ &ap_uart1_clk.common,
+ &ap_uart2_clk.common,
+ &ap_i2c0_clk.common,
+ &ap_i2c1_clk.common,
+ &ap_i2c2_clk.common,
+ &ap_i2c3_clk.common,
+ &ap_i2c4_clk.common,
+ &ap_spi0_clk.common,
+ &ap_spi1_clk.common,
+ &ap_spi2_clk.common,
+ &ap_spi3_clk.common,
+ &ap_iis0_clk.common,
+ &ap_iis1_clk.common,
+ &ap_iis2_clk.common,
+ &ap_sim_clk.common,
+ &ap_ce_clk.common,
+ &sdio0_2x_clk.common,
+ &sdio1_2x_clk.common,
+ &emmc_2x_clk.common,
+ &vsp_clk.common,
+ &dispc0_clk.common,
+ &dispc0_dpi_clk.common,
+ &dsi_apb_clk.common,
+ &dsi_rxesc.common,
+ &dsi_lanebyte.common,
+ &vdsp_clk.common,
+ &vdsp_m_clk.common,
+
+};
+
+static struct clk_hw_onecell_data ums512_ap_clk_hws = {
+ .hws = {
+ [CLK_AP_APB] = &ap_apb_clk.common.hw,
+ [CLK_IPI] = &ipi_clk.common.hw,
+ [CLK_AP_UART0] = &ap_uart0_clk.common.hw,
+ [CLK_AP_UART1] = &ap_uart1_clk.common.hw,
+ [CLK_AP_UART2] = &ap_uart2_clk.common.hw,
+ [CLK_AP_I2C0] = &ap_i2c0_clk.common.hw,
+ [CLK_AP_I2C1] = &ap_i2c1_clk.common.hw,
+ [CLK_AP_I2C2] = &ap_i2c2_clk.common.hw,
+ [CLK_AP_I2C3] = &ap_i2c3_clk.common.hw,
+ [CLK_AP_I2C4] = &ap_i2c4_clk.common.hw,
+ [CLK_AP_SPI0] = &ap_spi0_clk.common.hw,
+ [CLK_AP_SPI1] = &ap_spi1_clk.common.hw,
+ [CLK_AP_SPI2] = &ap_spi2_clk.common.hw,
+ [CLK_AP_SPI3] = &ap_spi3_clk.common.hw,
+ [CLK_AP_IIS0] = &ap_iis0_clk.common.hw,
+ [CLK_AP_IIS1] = &ap_iis1_clk.common.hw,
+ [CLK_AP_IIS2] = &ap_iis2_clk.common.hw,
+ [CLK_AP_SIM] = &ap_sim_clk.common.hw,
+ [CLK_AP_CE] = &ap_ce_clk.common.hw,
+ [CLK_SDIO0_2X] = &sdio0_2x_clk.common.hw,
+ [CLK_SDIO1_2X] = &sdio1_2x_clk.common.hw,
+ [CLK_EMMC_2X] = &emmc_2x_clk.common.hw,
+ [CLK_VSP] = &vsp_clk.common.hw,
+ [CLK_DISPC0] = &dispc0_clk.common.hw,
+ [CLK_DISPC0_DPI] = &dispc0_dpi_clk.common.hw,
+ [CLK_DSI_APB] = &dsi_apb_clk.common.hw,
+ [CLK_DSI_RXESC] = &dsi_rxesc.common.hw,
+ [CLK_DSI_LANEBYTE] = &dsi_lanebyte.common.hw,
+ [CLK_VDSP] = &vdsp_clk.common.hw,
+ [CLK_VDSP_M] = &vdsp_m_clk.common.hw,
+ },
+ .num = CLK_AP_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_ap_clk_desc = {
+ .clk_clks = ums512_ap_clks,
+ .num_clk_clks = ARRAY_SIZE(ums512_ap_clks),
+ .hw_clks = &ums512_ap_clk_hws,
+};
+
+/* aon apb clks */
+static const struct clk_parent_data aon_apb_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+ { .hw = &clk_13m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(aon_apb_clk, "aon-apb-clk", aon_apb_parents,
+ 0x220, 0, 3, 8, 2, 0);
+
+
+static const struct clk_parent_data adi_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_25m.hw },
+ { .hw = &twpll_38m4.hw },
+ { .hw = &twpll_51m2.hw },
+};
+static SPRD_MUX_CLK_DATA(adi_clk, "adi-clk", adi_parents,
+ 0x224, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aux_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &clk_26m_aud.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &cppll_39m32.hw },
+ { .hw = &mpll0_56m88.hw },
+ { .hw = &mpll1_63m38.hw },
+ { .hw = &mpll2_47m13.hw },
+ { .hw = &dpll0_58m31.hw },
+ { .hw = &gpll_40m.hw },
+ { .hw = &twpll_48m.hw },
+};
+static const struct clk_parent_data aux1_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &clk_26m_aud.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &cppll_39m32.hw },
+ { .hw = &mpll0_56m88.hw },
+ { .hw = &mpll1_63m38.hw },
+ { .hw = &mpll2_47m13.hw },
+ { .hw = &dpll0_58m31.hw },
+ { .hw = &gpll_40m.hw },
+ { .hw = &twpll_19m2.hw },
+ { .hw = &lpll_30m72.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &twpll_12m29.hw },
+
+};
+static SPRD_COMP_CLK_DATA(aux0_clk, "aux0-clk", aux_parents,
+ 0x228, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux1_clk, "aux1-clk", aux1_parents,
+ 0x22c, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(aux2_clk, "aux2-clk", aux_parents,
+ 0x230, 0, 5, 8, 4, 0);
+static SPRD_COMP_CLK_DATA(probe_clk, "probe-clk", aux_parents,
+ 0x234, 0, 5, 8, 4, 0);
+
+static const struct clk_parent_data pwm_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_MUX_CLK_DATA(pwm0_clk, "pwm0-clk", pwm_parents,
+ 0x238, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm1_clk, "pwm1-clk", pwm_parents,
+ 0x23c, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm2_clk, "pwm2-clk", pwm_parents,
+ 0x240, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(pwm3_clk, "pwm3-clk", pwm_parents,
+ 0x244, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data efuse_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(efuse_clk, "efuse-clk", efuse_parents,
+ 0x248, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data uart_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(uart0_clk, "uart0-clk", uart_parents,
+ 0x24c, 0, 3, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
+ 0x250, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data thm_parents[] = {
+ { .fw_name = "ext-32m" },
+ { .hw = &clk_250k.hw },
+};
+static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
+ 0x260, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm1_clk, "thm1-clk", thm_parents,
+ 0x264, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm2_clk, "thm2-clk", thm_parents,
+ 0x268, 0, 1, UMS512_MUX_FLAG);
+static SPRD_MUX_CLK_DATA(thm3_clk, "thm3-clk", thm_parents,
+ 0x26c, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_i2c_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_i2c_clk, "aon-i2c-clk", aon_i2c_parents,
+ 0x27c, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_iis_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(aon_iis_clk, "aon-iis-clk", aon_iis_parents,
+ 0x280, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data scc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_51m2.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_MUX_CLK_DATA(scc_clk, "scc-clk", scc_parents,
+ 0x284, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data apcpu_dap_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &rco_4m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_dap_clk, "apcpu-dap-clk", apcpu_dap_parents,
+ 0x288, 0, 3, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(apcpu_dap_mtck, "apcpu-dap-mtck", "ext-26m",
+ 0x28c, BIT(16), 0, 0);
+
+static const struct clk_parent_data apcpu_ts_parents[] = {
+ { .fw_name = "ext-32m" },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_ts_clk, "apcpu-ts-clk", apcpu_ts_parents,
+ 0x290, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data debug_ts_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_192m.hw },
+};
+static SPRD_MUX_CLK_DATA(debug_ts_clk, "debug-ts-clk", debug_ts_parents,
+ 0x294, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(dsi_test_s, "dsi-test-s", "ext-26m",
+ 0x298, BIT(16), 0, 0);
+
+static const struct clk_parent_data djtag_tck_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(djtag_tck_clk, "djtag-tck-clk", djtag_tck_parents,
+ 0x2b4, 0, 1, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(djtag_tck_hw, "djtag-tck-hw", "ext-26m",
+ 0x2b8, BIT(16), 0, 0);
+
+static const struct clk_parent_data aon_tmr_parents[] = {
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(aon_tmr_clk, "aon-tmr-clk", aon_tmr_parents,
+ 0x2c0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data aon_pmu_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .fw_name = "ext-4m" },
+};
+static SPRD_MUX_CLK_DATA(aon_pmu_clk, "aon-pmu-clk", aon_pmu_parents,
+ 0x2c8, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data debounce_parents[] = {
+ { .fw_name = "ext-32k" },
+ { .hw = &rco_4m.hw },
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(debounce_clk, "debounce-clk", debounce_parents,
+ 0x2cc, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data apcpu_pmu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(apcpu_pmu_clk, "apcpu-pmu-clk", apcpu_pmu_parents,
+ 0x2d0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data top_dvfs_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_MUX_CLK_DATA(top_dvfs_clk, "top-dvfs-clk", top_dvfs_parents,
+ 0x2d8, 0, 2, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_FW_NAME(otg_utmi, "otg-utmi", "ext-26m", 0x2dc,
+ BIT(16), 0, 0);
+
+static const struct clk_parent_data otg_ref_parents[] = {
+ { .hw = &twpll_12m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(otg_ref_clk, "otg-ref-clk", otg_ref_parents,
+ 0x2e0, 0, 1, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data cssys_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_153m6.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_COMP_CLK_DATA(cssys_clk, "cssys-clk", cssys_parents,
+ 0x2e4, 0, 3, 8, 2, 0);
+static SPRD_DIV_CLK_HW(cssys_pub_clk, "cssys-pub-clk", &cssys_clk.common.hw,
+ 0x2e8, 8, 2, 0);
+static SPRD_DIV_CLK_HW(cssys_apb_clk, "cssys-apb-clk", &cssys_clk.common.hw,
+ 0x2ec, 8, 3, 0);
+
+static const struct clk_parent_data ap_axi_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_axi_clk, "ap-axi-clk", ap_axi_parents,
+ 0x2f0, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data ap_mm_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(ap_mm_clk, "ap-mm-clk", ap_mm_parents,
+ 0x2f4, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sdio2_2x_parents[] = {
+ { .hw = &clk_1m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &rpll.common.hw },
+ { .hw = &lpll_409m6.hw },
+};
+static SPRD_MUX_CLK_DATA(sdio2_2x_clk, "sdio2-2x-clk", sdio2_2x_parents,
+ 0x2f8, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data analog_io_apb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+};
+static SPRD_COMP_CLK_DATA(analog_io_apb, "analog-io-apb", analog_io_apb_parents,
+ 0x300, 0, 1, 8, 2, 0);
+
+static const struct clk_parent_data dmc_ref_parents[] = {
+ { .hw = &clk_6m5.hw },
+ { .hw = &clk_13m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(dmc_ref_clk, "dmc-ref-clk", dmc_ref_parents,
+ 0x304, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data emc_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &twpll_768m.hw },
+};
+static SPRD_MUX_CLK_DATA(emc_clk, "emc-clk", emc_parents,
+ 0x30c, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data usb_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_96m.hw },
+ { .fw_name = "rco-100m" },
+ { .hw = &twpll_128m.hw },
+};
+static SPRD_COMP_CLK_DATA(usb_clk, "usb-clk", usb_parents,
+ 0x310, 0, 3, 8, 2, 0);
+
+static const struct clk_parent_data pmu_26m_parents[] = {
+ { .hw = &rco_25m.hw },
+ { .fw_name = "ext-26m" },
+};
+static SPRD_MUX_CLK_DATA(pmu_26m_clk, "26m-pmu-clk", pmu_26m_parents,
+ 0x318, 0, 1, UMS512_MUX_FLAG);
+
+static struct sprd_clk_common *ums512_aon_apb[] = {
+ /* address base is 0x32080200 */
+ &aon_apb_clk.common,
+ &adi_clk.common,
+ &aux0_clk.common,
+ &aux1_clk.common,
+ &aux2_clk.common,
+ &probe_clk.common,
+ &pwm0_clk.common,
+ &pwm1_clk.common,
+ &pwm2_clk.common,
+ &pwm3_clk.common,
+ &efuse_clk.common,
+ &uart0_clk.common,
+ &uart1_clk.common,
+ &thm0_clk.common,
+ &thm1_clk.common,
+ &thm2_clk.common,
+ &thm3_clk.common,
+ &aon_i2c_clk.common,
+ &aon_iis_clk.common,
+ &scc_clk.common,
+ &apcpu_dap_clk.common,
+ &apcpu_dap_mtck.common,
+ &apcpu_ts_clk.common,
+ &debug_ts_clk.common,
+ &dsi_test_s.common,
+ &djtag_tck_clk.common,
+ &djtag_tck_hw.common,
+ &aon_tmr_clk.common,
+ &aon_pmu_clk.common,
+ &debounce_clk.common,
+ &apcpu_pmu_clk.common,
+ &top_dvfs_clk.common,
+ &otg_utmi.common,
+ &otg_ref_clk.common,
+ &cssys_clk.common,
+ &cssys_pub_clk.common,
+ &cssys_apb_clk.common,
+ &ap_axi_clk.common,
+ &ap_mm_clk.common,
+ &sdio2_2x_clk.common,
+ &analog_io_apb.common,
+ &dmc_ref_clk.common,
+ &emc_clk.common,
+ &usb_clk.common,
+ &pmu_26m_clk.common,
+};
+
+static struct clk_hw_onecell_data ums512_aon_apb_hws = {
+ .hws = {
+ [CLK_AON_APB] = &aon_apb_clk.common.hw,
+ [CLK_ADI] = &adi_clk.common.hw,
+ [CLK_AUX0] = &aux0_clk.common.hw,
+ [CLK_AUX1] = &aux1_clk.common.hw,
+ [CLK_AUX2] = &aux2_clk.common.hw,
+ [CLK_PROBE] = &probe_clk.common.hw,
+ [CLK_PWM0] = &pwm0_clk.common.hw,
+ [CLK_PWM1] = &pwm1_clk.common.hw,
+ [CLK_PWM2] = &pwm2_clk.common.hw,
+ [CLK_PWM3] = &pwm3_clk.common.hw,
+ [CLK_EFUSE] = &efuse_clk.common.hw,
+ [CLK_UART0] = &uart0_clk.common.hw,
+ [CLK_UART1] = &uart1_clk.common.hw,
+ [CLK_THM0] = &thm0_clk.common.hw,
+ [CLK_THM1] = &thm1_clk.common.hw,
+ [CLK_THM2] = &thm2_clk.common.hw,
+ [CLK_THM3] = &thm3_clk.common.hw,
+ [CLK_AON_I2C] = &aon_i2c_clk.common.hw,
+ [CLK_AON_IIS] = &aon_iis_clk.common.hw,
+ [CLK_SCC] = &scc_clk.common.hw,
+ [CLK_APCPU_DAP] = &apcpu_dap_clk.common.hw,
+ [CLK_APCPU_DAP_MTCK] = &apcpu_dap_mtck.common.hw,
+ [CLK_APCPU_TS] = &apcpu_ts_clk.common.hw,
+ [CLK_DEBUG_TS] = &debug_ts_clk.common.hw,
+ [CLK_DSI_TEST_S] = &dsi_test_s.common.hw,
+ [CLK_DJTAG_TCK] = &djtag_tck_clk.common.hw,
+ [CLK_DJTAG_TCK_HW] = &djtag_tck_hw.common.hw,
+ [CLK_AON_TMR] = &aon_tmr_clk.common.hw,
+ [CLK_AON_PMU] = &aon_pmu_clk.common.hw,
+ [CLK_DEBOUNCE] = &debounce_clk.common.hw,
+ [CLK_APCPU_PMU] = &apcpu_pmu_clk.common.hw,
+ [CLK_TOP_DVFS] = &top_dvfs_clk.common.hw,
+ [CLK_OTG_UTMI] = &otg_utmi.common.hw,
+ [CLK_OTG_REF] = &otg_ref_clk.common.hw,
+ [CLK_CSSYS] = &cssys_clk.common.hw,
+ [CLK_CSSYS_PUB] = &cssys_pub_clk.common.hw,
+ [CLK_CSSYS_APB] = &cssys_apb_clk.common.hw,
+ [CLK_AP_AXI] = &ap_axi_clk.common.hw,
+ [CLK_AP_MM] = &ap_mm_clk.common.hw,
+ [CLK_SDIO2_2X] = &sdio2_2x_clk.common.hw,
+ [CLK_ANALOG_IO_APB] = &analog_io_apb.common.hw,
+ [CLK_DMC_REF_CLK] = &dmc_ref_clk.common.hw,
+ [CLK_EMC] = &emc_clk.common.hw,
+ [CLK_USB] = &usb_clk.common.hw,
+ [CLK_26M_PMU] = &pmu_26m_clk.common.hw,
+ },
+ .num = CLK_AON_APB_NUM,
+};
+
+static struct sprd_clk_desc ums512_aon_apb_desc = {
+ .clk_clks = ums512_aon_apb,
+ .num_clk_clks = ARRAY_SIZE(ums512_aon_apb),
+ .hw_clks = &ums512_aon_apb_hws,
+};
+
+/* aon apb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_cal_eb, "rc100m-cal-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_tck_eb, "djtag-tck-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_eb, "djtag-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux0_eb, "aux0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux1_eb, "aux1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aux2_eb, "aux2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(probe_eb, "probe-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mm_eb, "mm-eb", "ext-26m",
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(gpu_eb, "gpu-eb", "ext-26m",
+ 0x0, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mspi_eb, "mspi-eb", "ext-26m",
+ 0x0, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_dap_eb, "apcpu-dap-eb", "ext-26m",
+ 0x0, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_cssys_eb, "aon-cssys-eb", "ext-26m",
+ 0x0, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cssys_apb_eb, "cssys-apb-eb", "ext-26m",
+ 0x0, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cssys_pub_eb, "cssys-pub-eb", "ext-26m",
+ 0x0, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdphy_cfg_eb, "sdphy-cfg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(19), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdphy_ref_eb, "sdphy-ref-eb", "ext-26m",
+ 0x0, 0x1000, BIT(20), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(efuse_eb, "efuse-eb", "ext-26m",
+ 0x4, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(gpio_eb, "gpio-eb", "ext-26m",
+ 0x4, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(mbox_eb, "mbox-eb", "ext-26m",
+ 0x4, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(kpd_eb, "kpd-eb", "ext-26m",
+ 0x4, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_syst_eb, "aon-syst-eb", "ext-26m",
+ 0x4, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_syst_eb, "ap-syst-eb", "ext-26m",
+ 0x4, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_tmr_eb, "aon-tmr-eb", "ext-26m",
+ 0x4, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_utmi_eb, "otg-utmi-eb", "ext-26m",
+ 0x4, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_phy_eb, "otg-phy-eb", "ext-26m",
+ 0x4, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(splk_eb, "splk-eb", "ext-26m",
+ 0x4, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pin_eb, "pin-eb", "ext-26m",
+ 0x4, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ana_eb, "ana-eb", "ext-26m",
+ 0x4, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_ts0_eb, "apcpu-ts0-eb", "ext-26m",
+ 0x4, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apb_busmon_eb, "apb-busmon-eb", "ext-26m",
+ 0x4, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_iis_eb, "aon-iis-eb", "ext-26m",
+ 0x4, 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(scc_eb, "scc-eb", "ext-26m",
+ 0x4, 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm0_eb, "thm0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm1_eb, "thm1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(thm2_eb, "thm2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(asim_top_eb, "asim-top", "ext-26m",
+ 0x8, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c_eb, "i2c-eb", "ext-26m",
+ 0x8, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pmu_eb, "pmu-eb", "ext-26m",
+ 0x8, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(adi_eb, "adi-eb", "ext-26m",
+ 0x8, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_eb, "eic-eb", "ext-26m",
+ 0x8, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc0_eb, "ap-intc0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc1_eb, "ap-intc1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc2_eb, "ap-intc2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc3_eb, "ap-intc3-eb", "ext-26m",
+ 0x8, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc4_eb, "ap-intc4-eb", "ext-26m",
+ 0x8, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_intc5_eb, "ap-intc5-eb", "ext-26m",
+ 0x8, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(audcp_intc_eb, "audcp-intc-eb", "ext-26m",
+ 0x8, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr0_eb, "ap-tmr0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(22), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr1_eb, "ap-tmr1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(23), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr2_eb, "ap-tmr2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(24), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm0_eb, "pwm0-eb", "ext-26m",
+ 0x8, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm1_eb, "pwm1-eb", "ext-26m",
+ 0x8, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm2_eb, "pwm2-eb", "ext-26m",
+ 0x8, 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pwm3_eb, "pwm3-eb", "ext-26m",
+ 0x8, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_wdg_eb, "ap-wdg-eb", "ext-26m",
+ 0x8, 0x1000, BIT(29), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apcpu_wdg_eb, "apcpu-wdg-eb", "ext-26m",
+ 0x8, 0x1000, BIT(30), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(serdes_eb, "serdes-eb", "ext-26m",
+ 0x8, 0x1000, BIT(31), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(arch_rtc_eb, "arch-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(kpd_rtc_eb, "kpd-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_syst_rtc_eb, "aon-syst-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_syst_rtc_eb, "ap-syst-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_rtc_eb, "eic-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(eic_rtcdv5_eb, "eic-rtcdv5-eb", "ext-26m",
+ 0x18, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ac_wdg_rtc_eb, "ac-wdg-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dcxo_lc_rtc_eb, "dcxo-lc-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(bb_cal_rtc_eb, "bb-cal-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_emmc_rtc_eb, "ap-emmc-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio0_rtc_eb, "ap-sdio0-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio1_rtc_eb, "ap-sdio1-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_sdio2_rtc_eb, "ap-sdio2-rtc-eb", "ext-26m",
+ 0x18, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dsi_csi_test_eb, "dsi-csi-test-eb", "ext-26m",
+ 0x138, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(djtag_tck_en, "djtag-tck-en", "ext-26m",
+ 0x138, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dphy_ref_eb, "dphy-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(dmc_ref_eb, "dmc-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(otg_ref_eb, "otg-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(12), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(tsen_eb, "tsen-eb", "ext-26m",
+ 0x138, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(tmr_eb, "tmr-eb", "ext-26m",
+ 0x138, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_ref_eb, "rc100m-ref-eb", "ext-26m",
+ 0x138, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(rc100m_fdk_eb, "rc100m-fdk-eb", "ext-26m",
+ 0x138, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(debounce_eb, "debounce-eb", "ext-26m",
+ 0x138, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(det_32k_eb, "det-32k-eb", "ext-26m",
+ 0x138, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(top_cssys_en, "top-cssys-en", "ext-26m",
+ 0x13c, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(ap_axi_en, "ap-axi-en", "ext-26m",
+ 0x13c, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_2x_en, "sdio0-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_1x_en, "sdio0-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_2x_en, "sdio1-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_1x_en, "sdio1-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_2x_en, "sdio2-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_1x_en, "sdio2-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_2x_en, "emmc-2x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_1x_en, "emmc-1x-en", "ext-26m",
+ 0x13c, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(pll_test_en, "pll-test-en", "ext-26m",
+ 0x13c, 0x1000, BIT(14), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(cphy_cfg_en, "cphy-cfg-en", "ext-26m",
+ 0x13c, 0x1000, BIT(15), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(debug_ts_en, "debug-ts-en", "ext-26m",
+ 0x13c, 0x1000, BIT(18), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(access_aud_en, "access-aud-en",
+ "ext-26m", 0x14c, 0x1000, BIT(0), 0, 0);
+
+static struct sprd_clk_common *ums512_aon_gate[] = {
+ /* address base is 0x327d0000 */
+ &rc100m_cal_eb.common,
+ &djtag_tck_eb.common,
+ &djtag_eb.common,
+ &aux0_eb.common,
+ &aux1_eb.common,
+ &aux2_eb.common,
+ &probe_eb.common,
+ &mm_eb.common,
+ &gpu_eb.common,
+ &mspi_eb.common,
+ &apcpu_dap_eb.common,
+ &aon_cssys_eb.common,
+ &cssys_apb_eb.common,
+ &cssys_pub_eb.common,
+ &sdphy_cfg_eb.common,
+ &sdphy_ref_eb.common,
+ &efuse_eb.common,
+ &gpio_eb.common,
+ &mbox_eb.common,
+ &kpd_eb.common,
+ &aon_syst_eb.common,
+ &ap_syst_eb.common,
+ &aon_tmr_eb.common,
+ &otg_utmi_eb.common,
+ &otg_phy_eb.common,
+ &splk_eb.common,
+ &pin_eb.common,
+ &ana_eb.common,
+ &apcpu_ts0_eb.common,
+ &apb_busmon_eb.common,
+ &aon_iis_eb.common,
+ &scc_eb.common,
+ &thm0_eb.common,
+ &thm1_eb.common,
+ &thm2_eb.common,
+ &asim_top_eb.common,
+ &i2c_eb.common,
+ &pmu_eb.common,
+ &adi_eb.common,
+ &eic_eb.common,
+ &ap_intc0_eb.common,
+ &ap_intc1_eb.common,
+ &ap_intc2_eb.common,
+ &ap_intc3_eb.common,
+ &ap_intc4_eb.common,
+ &ap_intc5_eb.common,
+ &audcp_intc_eb.common,
+ &ap_tmr0_eb.common,
+ &ap_tmr1_eb.common,
+ &ap_tmr2_eb.common,
+ &pwm0_eb.common,
+ &pwm1_eb.common,
+ &pwm2_eb.common,
+ &pwm3_eb.common,
+ &ap_wdg_eb.common,
+ &apcpu_wdg_eb.common,
+ &serdes_eb.common,
+ &arch_rtc_eb.common,
+ &kpd_rtc_eb.common,
+ &aon_syst_rtc_eb.common,
+ &ap_syst_rtc_eb.common,
+ &aon_tmr_rtc_eb.common,
+ &eic_rtc_eb.common,
+ &eic_rtcdv5_eb.common,
+ &ap_wdg_rtc_eb.common,
+ &ac_wdg_rtc_eb.common,
+ &ap_tmr0_rtc_eb.common,
+ &ap_tmr1_rtc_eb.common,
+ &ap_tmr2_rtc_eb.common,
+ &dcxo_lc_rtc_eb.common,
+ &bb_cal_rtc_eb.common,
+ &ap_emmc_rtc_eb.common,
+ &ap_sdio0_rtc_eb.common,
+ &ap_sdio1_rtc_eb.common,
+ &ap_sdio2_rtc_eb.common,
+ &dsi_csi_test_eb.common,
+ &djtag_tck_en.common,
+ &dphy_ref_eb.common,
+ &dmc_ref_eb.common,
+ &otg_ref_eb.common,
+ &tsen_eb.common,
+ &tmr_eb.common,
+ &rc100m_ref_eb.common,
+ &rc100m_fdk_eb.common,
+ &debounce_eb.common,
+ &det_32k_eb.common,
+ &top_cssys_en.common,
+ &ap_axi_en.common,
+ &sdio0_2x_en.common,
+ &sdio0_1x_en.common,
+ &sdio1_2x_en.common,
+ &sdio1_1x_en.common,
+ &sdio2_2x_en.common,
+ &sdio2_1x_en.common,
+ &emmc_2x_en.common,
+ &emmc_1x_en.common,
+ &pll_test_en.common,
+ &cphy_cfg_en.common,
+ &debug_ts_en.common,
+ &access_aud_en.common,
+};
+
+static struct clk_hw_onecell_data ums512_aon_gate_hws = {
+ .hws = {
+ [CLK_RC100M_CAL_EB] = &rc100m_cal_eb.common.hw,
+ [CLK_DJTAG_TCK_EB] = &djtag_tck_eb.common.hw,
+ [CLK_DJTAG_EB] = &djtag_eb.common.hw,
+ [CLK_AUX0_EB] = &aux0_eb.common.hw,
+ [CLK_AUX1_EB] = &aux1_eb.common.hw,
+ [CLK_AUX2_EB] = &aux2_eb.common.hw,
+ [CLK_PROBE_EB] = &probe_eb.common.hw,
+ [CLK_MM_EB] = &mm_eb.common.hw,
+ [CLK_GPU_EB] = &gpu_eb.common.hw,
+ [CLK_MSPI_EB] = &mspi_eb.common.hw,
+ [CLK_APCPU_DAP_EB] = &apcpu_dap_eb.common.hw,
+ [CLK_AON_CSSYS_EB] = &aon_cssys_eb.common.hw,
+ [CLK_CSSYS_APB_EB] = &cssys_apb_eb.common.hw,
+ [CLK_CSSYS_PUB_EB] = &cssys_pub_eb.common.hw,
+ [CLK_SDPHY_CFG_EB] = &sdphy_cfg_eb.common.hw,
+ [CLK_SDPHY_REF_EB] = &sdphy_ref_eb.common.hw,
+ [CLK_EFUSE_EB] = &efuse_eb.common.hw,
+ [CLK_GPIO_EB] = &gpio_eb.common.hw,
+ [CLK_MBOX_EB] = &mbox_eb.common.hw,
+ [CLK_KPD_EB] = &kpd_eb.common.hw,
+ [CLK_AON_SYST_EB] = &aon_syst_eb.common.hw,
+ [CLK_AP_SYST_EB] = &ap_syst_eb.common.hw,
+ [CLK_AON_TMR_EB] = &aon_tmr_eb.common.hw,
+ [CLK_OTG_UTMI_EB] = &otg_utmi_eb.common.hw,
+ [CLK_OTG_PHY_EB] = &otg_phy_eb.common.hw,
+ [CLK_SPLK_EB] = &splk_eb.common.hw,
+ [CLK_PIN_EB] = &pin_eb.common.hw,
+ [CLK_ANA_EB] = &ana_eb.common.hw,
+ [CLK_APCPU_TS0_EB] = &apcpu_ts0_eb.common.hw,
+ [CLK_APB_BUSMON_EB] = &apb_busmon_eb.common.hw,
+ [CLK_AON_IIS_EB] = &aon_iis_eb.common.hw,
+ [CLK_SCC_EB] = &scc_eb.common.hw,
+ [CLK_THM0_EB] = &thm0_eb.common.hw,
+ [CLK_THM1_EB] = &thm1_eb.common.hw,
+ [CLK_THM2_EB] = &thm2_eb.common.hw,
+ [CLK_ASIM_TOP_EB] = &asim_top_eb.common.hw,
+ [CLK_I2C_EB] = &i2c_eb.common.hw,
+ [CLK_PMU_EB] = &pmu_eb.common.hw,
+ [CLK_ADI_EB] = &adi_eb.common.hw,
+ [CLK_EIC_EB] = &eic_eb.common.hw,
+ [CLK_AP_INTC0_EB] = &ap_intc0_eb.common.hw,
+ [CLK_AP_INTC1_EB] = &ap_intc1_eb.common.hw,
+ [CLK_AP_INTC2_EB] = &ap_intc2_eb.common.hw,
+ [CLK_AP_INTC3_EB] = &ap_intc3_eb.common.hw,
+ [CLK_AP_INTC4_EB] = &ap_intc4_eb.common.hw,
+ [CLK_AP_INTC5_EB] = &ap_intc5_eb.common.hw,
+ [CLK_AUDCP_INTC_EB] = &audcp_intc_eb.common.hw,
+ [CLK_AP_TMR0_EB] = &ap_tmr0_eb.common.hw,
+ [CLK_AP_TMR1_EB] = &ap_tmr1_eb.common.hw,
+ [CLK_AP_TMR2_EB] = &ap_tmr2_eb.common.hw,
+ [CLK_PWM0_EB] = &pwm0_eb.common.hw,
+ [CLK_PWM1_EB] = &pwm1_eb.common.hw,
+ [CLK_PWM2_EB] = &pwm2_eb.common.hw,
+ [CLK_PWM3_EB] = &pwm3_eb.common.hw,
+ [CLK_AP_WDG_EB] = &ap_wdg_eb.common.hw,
+ [CLK_APCPU_WDG_EB] = &apcpu_wdg_eb.common.hw,
+ [CLK_SERDES_EB] = &serdes_eb.common.hw,
+ [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw,
+ [CLK_KPD_RTC_EB] = &kpd_rtc_eb.common.hw,
+ [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw,
+ [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw,
+ [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw,
+ [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw,
+ [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw,
+ [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw,
+ [CLK_AC_WDG_RTC_EB] = &ac_wdg_rtc_eb.common.hw,
+ [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw,
+ [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw,
+ [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw,
+ [CLK_DCXO_LC_RTC_EB] = &dcxo_lc_rtc_eb.common.hw,
+ [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw,
+ [CLK_AP_EMMC_RTC_EB] = &ap_emmc_rtc_eb.common.hw,
+ [CLK_AP_SDIO0_RTC_EB] = &ap_sdio0_rtc_eb.common.hw,
+ [CLK_AP_SDIO1_RTC_EB] = &ap_sdio1_rtc_eb.common.hw,
+ [CLK_AP_SDIO2_RTC_EB] = &ap_sdio2_rtc_eb.common.hw,
+ [CLK_DSI_CSI_TEST_EB] = &dsi_csi_test_eb.common.hw,
+ [CLK_DJTAG_TCK_EN] = &djtag_tck_en.common.hw,
+ [CLK_DPHY_REF_EB] = &dphy_ref_eb.common.hw,
+ [CLK_DMC_REF_EB] = &dmc_ref_eb.common.hw,
+ [CLK_OTG_REF_EB] = &otg_ref_eb.common.hw,
+ [CLK_TSEN_EB] = &tsen_eb.common.hw,
+ [CLK_TMR_EB] = &tmr_eb.common.hw,
+ [CLK_RC100M_REF_EB] = &rc100m_ref_eb.common.hw,
+ [CLK_RC100M_FDK_EB] = &rc100m_fdk_eb.common.hw,
+ [CLK_DEBOUNCE_EB] = &debounce_eb.common.hw,
+ [CLK_DET_32K_EB] = &det_32k_eb.common.hw,
+ [CLK_TOP_CSSYS_EB] = &top_cssys_en.common.hw,
+ [CLK_AP_AXI_EN] = &ap_axi_en.common.hw,
+ [CLK_SDIO0_2X_EN] = &sdio0_2x_en.common.hw,
+ [CLK_SDIO0_1X_EN] = &sdio0_1x_en.common.hw,
+ [CLK_SDIO1_2X_EN] = &sdio1_2x_en.common.hw,
+ [CLK_SDIO1_1X_EN] = &sdio1_1x_en.common.hw,
+ [CLK_SDIO2_2X_EN] = &sdio2_2x_en.common.hw,
+ [CLK_SDIO2_1X_EN] = &sdio2_1x_en.common.hw,
+ [CLK_EMMC_2X_EN] = &emmc_2x_en.common.hw,
+ [CLK_EMMC_1X_EN] = &emmc_1x_en.common.hw,
+ [CLK_PLL_TEST_EN] = &pll_test_en.common.hw,
+ [CLK_CPHY_CFG_EN] = &cphy_cfg_en.common.hw,
+ [CLK_DEBUG_TS_EN] = &debug_ts_en.common.hw,
+ [CLK_ACCESS_AUD_EN] = &access_aud_en.common.hw,
+ },
+ .num = CLK_AON_APB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_aon_gate_desc = {
+ .clk_clks = ums512_aon_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_aon_gate),
+ .hw_clks = &ums512_aon_gate_hws,
+};
+
+/* audcp apb gates */
+/* Audcp apb clocks configure CLK_IGNORE_UNUSED because these clocks may be
+ * controlled by audcp sys at the same time. It may be cause an execption if
+ * kernel gates these clock.
+ */
+static SPRD_SC_GATE_CLK_HW(audcp_wdg_eb, "audcp-wdg-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(1),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_rtc_wdg_eb, "audcp-rtc-wdg-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(2),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr0_eb, "audcp-tmr0-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(5),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr1_eb, "audcp-tmr1-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(6),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *ums512_audcpapb_gate[] = {
+ /* address base is 0x3350d000 */
+ &audcp_wdg_eb.common,
+ &audcp_rtc_wdg_eb.common,
+ &audcp_tmr0_eb.common,
+ &audcp_tmr1_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_audcpapb_gate_hws = {
+ .hws = {
+ [CLK_AUDCP_WDG_EB] = &audcp_wdg_eb.common.hw,
+ [CLK_AUDCP_RTC_WDG_EB] = &audcp_rtc_wdg_eb.common.hw,
+ [CLK_AUDCP_TMR0_EB] = &audcp_tmr0_eb.common.hw,
+ [CLK_AUDCP_TMR1_EB] = &audcp_tmr1_eb.common.hw,
+ },
+ .num = CLK_AUDCP_APB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc ums512_audcpapb_gate_desc = {
+ .clk_clks = ums512_audcpapb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_audcpapb_gate),
+ .hw_clks = &ums512_audcpapb_gate_hws,
+};
+
+/* audcp ahb gates */
+/* Audcp aphb clocks configure CLK_IGNORE_UNUSED because these clocks may be
+ * controlled by audcp sys at the same time. It may be cause an execption if
+ * kernel gates these clock.
+ */
+static SPRD_SC_GATE_CLK_HW(audcp_iis0_eb, "audcp-iis0-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(0),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_iis1_eb, "audcp-iis1-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(1),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_iis2_eb, "audcp-iis2-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(2),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_uart_eb, "audcp-uart-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(4),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dma_cp_eb, "audcp-dma-cp-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(5),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dma_ap_eb, "audcp-dma-ap-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(6),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_src48k_eb, "audcp-src48k-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(10),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_mcdt_eb, "audcp-mcdt-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(12),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbcifd_eb, "audcp-vbcifd-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(13),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbc_eb, "audcp-vbc-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(14),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_splk_eb, "audcp-splk-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(15),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_icu_eb, "audcp-icu-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(16),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(dma_ap_ashb_eb, "dma-ap-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(17),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(dma_cp_ashb_eb, "dma-cp-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(18),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_aud_eb, "audcp-aud-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(19),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_vbc_24m_eb, "audcp-vbc-24m-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(21),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_tmr_26m_eb, "audcp-tmr-26m-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(22),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+static SPRD_SC_GATE_CLK_HW(audcp_dvfs_ashb_eb, "audcp-dvfs-ashb-eb",
+ &access_aud_en.common.hw, 0x0, 0x100, BIT(23),
+ CLK_IGNORE_UNUSED, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *ums512_audcpahb_gate[] = {
+ /* address base is 0x335e0000 */
+ &audcp_iis0_eb.common,
+ &audcp_iis1_eb.common,
+ &audcp_iis2_eb.common,
+ &audcp_uart_eb.common,
+ &audcp_dma_cp_eb.common,
+ &audcp_dma_ap_eb.common,
+ &audcp_src48k_eb.common,
+ &audcp_mcdt_eb.common,
+ &audcp_vbcifd_eb.common,
+ &audcp_vbc_eb.common,
+ &audcp_splk_eb.common,
+ &audcp_icu_eb.common,
+ &dma_ap_ashb_eb.common,
+ &dma_cp_ashb_eb.common,
+ &audcp_aud_eb.common,
+ &audcp_vbc_24m_eb.common,
+ &audcp_tmr_26m_eb.common,
+ &audcp_dvfs_ashb_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_audcpahb_gate_hws = {
+ .hws = {
+ [CLK_AUDCP_IIS0_EB] = &audcp_iis0_eb.common.hw,
+ [CLK_AUDCP_IIS1_EB] = &audcp_iis1_eb.common.hw,
+ [CLK_AUDCP_IIS2_EB] = &audcp_iis2_eb.common.hw,
+ [CLK_AUDCP_UART_EB] = &audcp_uart_eb.common.hw,
+ [CLK_AUDCP_DMA_CP_EB] = &audcp_dma_cp_eb.common.hw,
+ [CLK_AUDCP_DMA_AP_EB] = &audcp_dma_ap_eb.common.hw,
+ [CLK_AUDCP_SRC48K_EB] = &audcp_src48k_eb.common.hw,
+ [CLK_AUDCP_MCDT_EB] = &audcp_mcdt_eb.common.hw,
+ [CLK_AUDCP_VBCIFD_EB] = &audcp_vbcifd_eb.common.hw,
+ [CLK_AUDCP_VBC_EB] = &audcp_vbc_eb.common.hw,
+ [CLK_AUDCP_SPLK_EB] = &audcp_splk_eb.common.hw,
+ [CLK_AUDCP_ICU_EB] = &audcp_icu_eb.common.hw,
+ [CLK_AUDCP_DMA_AP_ASHB_EB] = &dma_ap_ashb_eb.common.hw,
+ [CLK_AUDCP_DMA_CP_ASHB_EB] = &dma_cp_ashb_eb.common.hw,
+ [CLK_AUDCP_AUD_EB] = &audcp_aud_eb.common.hw,
+ [CLK_AUDCP_VBC_24M_EB] = &audcp_vbc_24m_eb.common.hw,
+ [CLK_AUDCP_TMR_26M_EB] = &audcp_tmr_26m_eb.common.hw,
+ [CLK_AUDCP_DVFS_ASHB_EB] = &audcp_dvfs_ashb_eb.common.hw,
+ },
+ .num = CLK_AUDCP_AHB_GATE_NUM,
+};
+
+static const struct sprd_clk_desc ums512_audcpahb_gate_desc = {
+ .clk_clks = ums512_audcpahb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_audcpahb_gate),
+ .hw_clks = &ums512_audcpahb_gate_hws,
+};
+
+/* gpu clocks */
+static SPRD_GATE_CLK_HW(gpu_core_gate, "gpu-core-gate", &gpu_eb.common.hw,
+ 0x4, BIT(0), 0, 0);
+
+static const struct clk_parent_data gpu_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_384m.hw },
+ { .hw = &twpll_512m.hw },
+ { .hw = &lpll_614m4.hw },
+ { .hw = &twpll_768m.hw },
+ { .hw = &gpll.common.hw },
+};
+
+static SPRD_COMP_CLK_DATA(gpu_core_clk, "gpu-core-clk", gpu_parents,
+ 0x4, 4, 3, 8, 3, 0);
+
+static SPRD_GATE_CLK_HW(gpu_mem_gate, "gpu-mem-gate", &gpu_eb.common.hw,
+ 0x8, BIT(0), 0, 0);
+
+static SPRD_COMP_CLK_DATA(gpu_mem_clk, "gpu-mem-clk", gpu_parents,
+ 0x8, 4, 3, 8, 3, 0);
+
+static SPRD_GATE_CLK_HW(gpu_sys_gate, "gpu-sys-gate", &gpu_eb.common.hw,
+ 0xc, BIT(0), 0, 0);
+
+static SPRD_DIV_CLK_HW(gpu_sys_clk, "gpu-sys-clk", &gpu_eb.common.hw,
+ 0xc, 4, 3, 0);
+
+static struct sprd_clk_common *ums512_gpu_clk[] = {
+ /* address base is 0x60100000 */
+ &gpu_core_gate.common,
+ &gpu_core_clk.common,
+ &gpu_mem_gate.common,
+ &gpu_mem_clk.common,
+ &gpu_sys_gate.common,
+ &gpu_sys_clk.common,
+};
+
+static struct clk_hw_onecell_data ums512_gpu_clk_hws = {
+ .hws = {
+ [CLK_GPU_CORE_EB] = &gpu_core_gate.common.hw,
+ [CLK_GPU_CORE] = &gpu_core_clk.common.hw,
+ [CLK_GPU_MEM_EB] = &gpu_mem_gate.common.hw,
+ [CLK_GPU_MEM] = &gpu_mem_clk.common.hw,
+ [CLK_GPU_SYS_EB] = &gpu_sys_gate.common.hw,
+ [CLK_GPU_SYS] = &gpu_sys_clk.common.hw,
+ },
+ .num = CLK_GPU_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_gpu_clk_desc = {
+ .clk_clks = ums512_gpu_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_gpu_clk),
+ .hw_clks = &ums512_gpu_clk_hws,
+};
+
+/* mm clocks */
+static const struct clk_parent_data mm_ahb_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_96m.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_153m6.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_ahb_clk, "mm-ahb-clk", mm_ahb_parents,
+ 0x20, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data mm_mtx_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(mm_mtx_clk, "mm-mtx-clk", mm_mtx_parents,
+ 0x24, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data sensor_parents[] = {
+ { .fw_name = "ext-26m" },
+ { .hw = &twpll_48m.hw },
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_96m.hw },
+};
+static SPRD_COMP_CLK_DATA(sensor0_clk, "sensor0-clk", sensor_parents,
+ 0x28, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor1_clk, "sensor1-clk", sensor_parents,
+ 0x2c, 0, 2, 8, 3, 0);
+static SPRD_COMP_CLK_DATA(sensor2_clk, "sensor2-clk", sensor_parents,
+ 0x30, 0, 2, 8, 3, 0);
+
+static const struct clk_parent_data cpp_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(cpp_clk, "cpp-clk", cpp_parents,
+ 0x34, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data jpg_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_128m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(jpg_clk, "jpg-clk", jpg_parents,
+ 0x38, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data fd_parents[] = {
+ { .hw = &twpll_76m8.hw },
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+};
+static SPRD_MUX_CLK_DATA(fd_clk, "fd-clk", fd_parents,
+ 0x3c, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dcam_if_parents[] = {
+ { .hw = &twpll_192m.hw },
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_if_clk, "dcam-if-clk", dcam_if_parents,
+ 0x40, 0, 3, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data dcam_axi_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+};
+static SPRD_MUX_CLK_DATA(dcam_axi_clk, "dcam-axi-clk", dcam_axi_parents,
+ 0x44, 0, 2, UMS512_MUX_FLAG);
+
+static const struct clk_parent_data isp_parents[] = {
+ { .hw = &twpll_256m.hw },
+ { .hw = &twpll_307m2.hw },
+ { .hw = &twpll_384m.hw },
+ { .hw = &isppll_468m.hw },
+ { .hw = &twpll_512m.hw },
+};
+static SPRD_MUX_CLK_DATA(isp_clk, "isp-clk", isp_parents,
+ 0x48, 0, 3, UMS512_MUX_FLAG);
+
+static SPRD_GATE_CLK_HW(mipi_csi0, "mipi-csi0", &mm_eb.common.hw,
+ 0x4c, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static SPRD_GATE_CLK_HW(mipi_csi1, "mipi-csi1", &mm_eb.common.hw,
+ 0x50, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static SPRD_GATE_CLK_HW(mipi_csi2, "mipi-csi2", &mm_eb.common.hw,
+ 0x54, BIT(16), CLK_IGNORE_UNUSED, 0);
+
+static struct sprd_clk_common *ums512_mm_clk[] = {
+ /* address base is 0x62100000 */
+ &mm_ahb_clk.common,
+ &mm_mtx_clk.common,
+ &sensor0_clk.common,
+ &sensor1_clk.common,
+ &sensor2_clk.common,
+ &cpp_clk.common,
+ &jpg_clk.common,
+ &fd_clk.common,
+ &dcam_if_clk.common,
+ &dcam_axi_clk.common,
+ &isp_clk.common,
+ &mipi_csi0.common,
+ &mipi_csi1.common,
+ &mipi_csi2.common,
+};
+
+static struct clk_hw_onecell_data ums512_mm_clk_hws = {
+ .hws = {
+ [CLK_MM_AHB] = &mm_ahb_clk.common.hw,
+ [CLK_MM_MTX] = &mm_mtx_clk.common.hw,
+ [CLK_SENSOR0] = &sensor0_clk.common.hw,
+ [CLK_SENSOR1] = &sensor1_clk.common.hw,
+ [CLK_SENSOR2] = &sensor2_clk.common.hw,
+ [CLK_CPP] = &cpp_clk.common.hw,
+ [CLK_JPG] = &jpg_clk.common.hw,
+ [CLK_FD] = &fd_clk.common.hw,
+ [CLK_DCAM_IF] = &dcam_if_clk.common.hw,
+ [CLK_DCAM_AXI] = &dcam_axi_clk.common.hw,
+ [CLK_ISP] = &isp_clk.common.hw,
+ [CLK_MIPI_CSI0] = &mipi_csi0.common.hw,
+ [CLK_MIPI_CSI1] = &mipi_csi1.common.hw,
+ [CLK_MIPI_CSI2] = &mipi_csi2.common.hw,
+ },
+ .num = CLK_MM_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_mm_clk_desc = {
+ .clk_clks = ums512_mm_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_mm_clk),
+ .hw_clks = &ums512_mm_clk_hws,
+};
+
+/* mm gate clocks */
+static SPRD_SC_GATE_CLK_HW(mm_cpp_eb, "mm-cpp-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_jpg_eb, "mm-jpg-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dcam_eb, "mm-dcam-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_eb, "mm-isp-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi2_eb, "mm-csi2-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi1_eb, "mm-csi1-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_csi0_eb, "mm-csi0-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_ckg_eb, "mm-ckg-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_ahb_eb, "mm-isp-ahb-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dvfs_eb, "mm-dvfs-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_fd_eb, "mm-fd-eb", &mm_eb.common.hw,
+ 0x0, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor2_en, "mm-sensor2-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor1_en, "mm-sensor1-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_sensor0_en, "mm-sensor0-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi2_en, "mm-mipi-csi2-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi1_en, "mm-mipi-csi1-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(4), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_mipi_csi0_en, "mm-mipi-csi0-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_dcam_axi_en, "mm-dcam-axi-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_isp_axi_en, "mm-isp-axi-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_HW(mm_cphy_en, "mm-cphy-en", &mm_eb.common.hw,
+ 0x8, 0x1000, BIT(8), 0, 0);
+
+static struct sprd_clk_common *ums512_mm_gate_clk[] = {
+ /* address base is 0x62200000 */
+ &mm_cpp_eb.common,
+ &mm_jpg_eb.common,
+ &mm_dcam_eb.common,
+ &mm_isp_eb.common,
+ &mm_csi2_eb.common,
+ &mm_csi1_eb.common,
+ &mm_csi0_eb.common,
+ &mm_ckg_eb.common,
+ &mm_isp_ahb_eb.common,
+ &mm_dvfs_eb.common,
+ &mm_fd_eb.common,
+ &mm_sensor2_en.common,
+ &mm_sensor1_en.common,
+ &mm_sensor0_en.common,
+ &mm_mipi_csi2_en.common,
+ &mm_mipi_csi1_en.common,
+ &mm_mipi_csi0_en.common,
+ &mm_dcam_axi_en.common,
+ &mm_isp_axi_en.common,
+ &mm_cphy_en.common,
+};
+
+static struct clk_hw_onecell_data ums512_mm_gate_clk_hws = {
+ .hws = {
+ [CLK_MM_CPP_EB] = &mm_cpp_eb.common.hw,
+ [CLK_MM_JPG_EB] = &mm_jpg_eb.common.hw,
+ [CLK_MM_DCAM_EB] = &mm_dcam_eb.common.hw,
+ [CLK_MM_ISP_EB] = &mm_isp_eb.common.hw,
+ [CLK_MM_CSI2_EB] = &mm_csi2_eb.common.hw,
+ [CLK_MM_CSI1_EB] = &mm_csi1_eb.common.hw,
+ [CLK_MM_CSI0_EB] = &mm_csi0_eb.common.hw,
+ [CLK_MM_CKG_EB] = &mm_ckg_eb.common.hw,
+ [CLK_ISP_AHB_EB] = &mm_isp_ahb_eb.common.hw,
+ [CLK_MM_DVFS_EB] = &mm_dvfs_eb.common.hw,
+ [CLK_MM_FD_EB] = &mm_fd_eb.common.hw,
+ [CLK_MM_SENSOR2_EB] = &mm_sensor2_en.common.hw,
+ [CLK_MM_SENSOR1_EB] = &mm_sensor1_en.common.hw,
+ [CLK_MM_SENSOR0_EB] = &mm_sensor0_en.common.hw,
+ [CLK_MM_MIPI_CSI2_EB] = &mm_mipi_csi2_en.common.hw,
+ [CLK_MM_MIPI_CSI1_EB] = &mm_mipi_csi1_en.common.hw,
+ [CLK_MM_MIPI_CSI0_EB] = &mm_mipi_csi0_en.common.hw,
+ [CLK_DCAM_AXI_EB] = &mm_dcam_axi_en.common.hw,
+ [CLK_ISP_AXI_EB] = &mm_isp_axi_en.common.hw,
+ [CLK_MM_CPHY_EB] = &mm_cphy_en.common.hw,
+ },
+ .num = CLK_MM_GATE_CLK_NUM,
+};
+
+static struct sprd_clk_desc ums512_mm_gate_clk_desc = {
+ .clk_clks = ums512_mm_gate_clk,
+ .num_clk_clks = ARRAY_SIZE(ums512_mm_gate_clk),
+ .hw_clks = &ums512_mm_gate_clk_hws,
+};
+
+/* ap apb gates */
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(0), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(1), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis1_eb, "iis1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(2), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(iis2_eb, "iis2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(3), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(apb_reg_eb, "apb-reg-eb", "ext-26m",
+ 0x0, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_eb, "spi0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(5), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_eb, "spi1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(6), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_eb, "spi2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(7), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_eb, "spi3-eb", "ext-26m",
+ 0x0, 0x1000, BIT(8), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c0_eb, "i2c0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(9), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c1_eb, "i2c1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(10), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c2_eb, "i2c2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(11), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c3_eb, "i2c3-eb", "ext-26m",
+ 0x0, 0x1000, BIT(12), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m",
+ 0x0, 0x1000, BIT(13), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sim0_32k_eb, "sim0-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi0_lfin_eb, "spi0-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi1_lfin_eb, "spi1-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi2_lfin_eb, "spi2-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(spi3_lfin_eb, "spi3-lfin-eb", "ext-26m",
+ 0x0, 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_eb, "sdio0-eb", "ext-26m",
+ 0x0, 0x1000, BIT(22), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_eb, "sdio1-eb", "ext-26m",
+ 0x0, 0x1000, BIT(23), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_eb, "sdio2-eb", "ext-26m",
+ 0x0, 0x1000, BIT(24), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_eb, "emmc-eb", "ext-26m",
+ 0x0, 0x1000, BIT(25), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio0_32k_eb, "sdio0-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(26), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio1_32k_eb, "sdio1-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(27), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(sdio2_32k_eb, "sdio2-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(28), 0, 0);
+static SPRD_SC_GATE_CLK_FW_NAME(emmc_32k_eb, "emmc-32k-eb", "ext-26m",
+ 0x0, 0x1000, BIT(29), 0, 0);
+
+static struct sprd_clk_common *ums512_apapb_gate[] = {
+ /* address base is 0x71000000 */
+ &sim0_eb.common,
+ &iis0_eb.common,
+ &iis1_eb.common,
+ &iis2_eb.common,
+ &apb_reg_eb.common,
+ &spi0_eb.common,
+ &spi1_eb.common,
+ &spi2_eb.common,
+ &spi3_eb.common,
+ &i2c0_eb.common,
+ &i2c1_eb.common,
+ &i2c2_eb.common,
+ &i2c3_eb.common,
+ &i2c4_eb.common,
+ &uart0_eb.common,
+ &uart1_eb.common,
+ &uart2_eb.common,
+ &sim0_32k_eb.common,
+ &spi0_lfin_eb.common,
+ &spi1_lfin_eb.common,
+ &spi2_lfin_eb.common,
+ &spi3_lfin_eb.common,
+ &sdio0_eb.common,
+ &sdio1_eb.common,
+ &sdio2_eb.common,
+ &emmc_eb.common,
+ &sdio0_32k_eb.common,
+ &sdio1_32k_eb.common,
+ &sdio2_32k_eb.common,
+ &emmc_32k_eb.common,
+};
+
+static struct clk_hw_onecell_data ums512_apapb_gate_hws = {
+ .hws = {
+ [CLK_SIM0_EB] = &sim0_eb.common.hw,
+ [CLK_IIS0_EB] = &iis0_eb.common.hw,
+ [CLK_IIS1_EB] = &iis1_eb.common.hw,
+ [CLK_IIS2_EB] = &iis2_eb.common.hw,
+ [CLK_APB_REG_EB] = &apb_reg_eb.common.hw,
+ [CLK_SPI0_EB] = &spi0_eb.common.hw,
+ [CLK_SPI1_EB] = &spi1_eb.common.hw,
+ [CLK_SPI2_EB] = &spi2_eb.common.hw,
+ [CLK_SPI3_EB] = &spi3_eb.common.hw,
+ [CLK_I2C0_EB] = &i2c0_eb.common.hw,
+ [CLK_I2C1_EB] = &i2c1_eb.common.hw,
+ [CLK_I2C2_EB] = &i2c2_eb.common.hw,
+ [CLK_I2C3_EB] = &i2c3_eb.common.hw,
+ [CLK_I2C4_EB] = &i2c4_eb.common.hw,
+ [CLK_UART0_EB] = &uart0_eb.common.hw,
+ [CLK_UART1_EB] = &uart1_eb.common.hw,
+ [CLK_UART2_EB] = &uart2_eb.common.hw,
+ [CLK_SIM0_32K_EB] = &sim0_32k_eb.common.hw,
+ [CLK_SPI0_LFIN_EB] = &spi0_lfin_eb.common.hw,
+ [CLK_SPI1_LFIN_EB] = &spi1_lfin_eb.common.hw,
+ [CLK_SPI2_LFIN_EB] = &spi2_lfin_eb.common.hw,
+ [CLK_SPI3_LFIN_EB] = &spi3_lfin_eb.common.hw,
+ [CLK_SDIO0_EB] = &sdio0_eb.common.hw,
+ [CLK_SDIO1_EB] = &sdio1_eb.common.hw,
+ [CLK_SDIO2_EB] = &sdio2_eb.common.hw,
+ [CLK_EMMC_EB] = &emmc_eb.common.hw,
+ [CLK_SDIO0_32K_EB] = &sdio0_32k_eb.common.hw,
+ [CLK_SDIO1_32K_EB] = &sdio1_32k_eb.common.hw,
+ [CLK_SDIO2_32K_EB] = &sdio2_32k_eb.common.hw,
+ [CLK_EMMC_32K_EB] = &emmc_32k_eb.common.hw,
+ },
+ .num = CLK_AP_APB_GATE_NUM,
+};
+
+static struct sprd_clk_desc ums512_apapb_gate_desc = {
+ .clk_clks = ums512_apapb_gate,
+ .num_clk_clks = ARRAY_SIZE(ums512_apapb_gate),
+ .hw_clks = &ums512_apapb_gate_hws,
+};
+
+static const struct of_device_id sprd_ums512_clk_ids[] = {
+ { .compatible = "sprd,ums512-pmu-gate", /* 0x327e0000 */
+ .data = &ums512_pmu_gate_desc },
+ { .compatible = "sprd,ums512-g0-pll", /* 0x32390000 */
+ .data = &ums512_g0_pll_desc },
+ { .compatible = "sprd,ums512-g2-pll", /* 0x323b0000 */
+ .data = &ums512_g2_pll_desc },
+ { .compatible = "sprd,ums512-g3-pll", /* 0x323c0000 */
+ .data = &ums512_g3_pll_desc },
+ { .compatible = "sprd,ums512-gc-pll", /* 0x323e0000 */
+ .data = &ums512_gc_pll_desc },
+ { .compatible = "sprd,ums512-apahb-gate", /* 0x20100000 */
+ .data = &ums512_apahb_gate_desc },
+ { .compatible = "sprd,ums512-ap-clk", /* 0x20200000 */
+ .data = &ums512_ap_clk_desc },
+ { .compatible = "sprd,ums512-aonapb-clk", /* 0x32080200 */
+ .data = &ums512_aon_apb_desc },
+ { .compatible = "sprd,ums512-aon-gate", /* 0x327d0000 */
+ .data = &ums512_aon_gate_desc },
+ { .compatible = "sprd,ums512-audcpapb-gate", /* 0x3350d000 */
+ .data = &ums512_audcpapb_gate_desc },
+ { .compatible = "sprd,ums512-audcpahb-gate", /* 0x335e0000 */
+ .data = &ums512_audcpahb_gate_desc },
+ { .compatible = "sprd,ums512-gpu-clk", /* 0x60100000 */
+ .data = &ums512_gpu_clk_desc },
+ { .compatible = "sprd,ums512-mm-clk", /* 0x62100000 */
+ .data = &ums512_mm_clk_desc },
+ { .compatible = "sprd,ums512-mm-gate-clk", /* 0x62200000 */
+ .data = &ums512_mm_gate_clk_desc },
+ { .compatible = "sprd,ums512-apapb-gate", /* 0x71000000 */
+ .data = &ums512_apapb_gate_desc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sprd_ums512_clk_ids);
+
+static int ums512_clk_probe(struct platform_device *pdev)
+{
+ const struct sprd_clk_desc *desc;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -ENODEV;
+
+ ret = sprd_clk_regmap_init(pdev, desc);
+ if (ret)
+ return ret;
+
+ return sprd_clk_probe(&pdev->dev, desc->hw_clks);
+}
+
+static struct platform_driver ums512_clk_driver = {
+ .probe = ums512_clk_probe,
+ .driver = {
+ .name = "ums512-clk",
+ .of_match_table = sprd_ums512_clk_ids,
+ },
+};
+module_platform_driver(ums512_clk_driver);
+
+MODULE_DESCRIPTION("Unisoc UMS512 Clock Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 582a22c04919..d820292a381d 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -987,6 +987,7 @@ static void __init st_of_quadfs_setup(struct device_node *np,
const char *pll_name, *clk_parent_name;
void __iomem *reg;
spinlock_t *lock;
+ struct device_node *parent_np;
/*
* First check for reg property within the node to keep backward
@@ -994,7 +995,9 @@ static void __init st_of_quadfs_setup(struct device_node *np,
*/
reg = of_iomap(np, 0);
if (!reg) {
- reg = of_iomap(of_get_parent(np), 0);
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!reg) {
pr_err("%s: Failed to get base address\n", __func__);
return;
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index ee39af7a0b72..596e939ad905 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -56,6 +56,7 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
void __iomem *reg;
const char **parents;
int num_parents = 0;
+ struct device_node *parent_np;
/*
* First check for reg property within the node to keep backward
@@ -63,7 +64,9 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
*/
reg = of_iomap(np, 0);
if (!reg) {
- reg = of_iomap(of_get_parent(np), 0);
+ parent_np = of_get_parent(np);
+ reg = of_iomap(parent_np, 0);
+ of_node_put(parent_np);
if (!reg) {
pr_err("%s: Failed to get base address\n", __func__);
return;
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
index 51058ba4db4d..8ef3cdeb7962 100644
--- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -104,6 +104,8 @@ static struct ccu_nm pll_video0_4x_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .min_rate = 252000000U,
+ .max_rate = 2400000000U,
.common = {
.reg = 0x040,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-4x", osc24M,
@@ -126,6 +128,8 @@ static struct ccu_nm pll_video1_4x_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .min_rate = 252000000U,
+ .max_rate = 2400000000U,
.common = {
.reg = 0x048,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-4x", osc24M,
@@ -175,6 +179,8 @@ static struct ccu_nm pll_audio0_4x_clk = {
.m = _SUNXI_CCU_DIV(16, 6),
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
0x178, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
.common = {
.reg = 0x078,
.features = CCU_FEATURE_SIGMA_DELTA_MOD,
@@ -202,6 +208,8 @@ static struct ccu_nm pll_audio1_clk = {
.lock = BIT(28),
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1),
+ .min_rate = 180000000U,
+ .max_rate = 3000000000U,
.common = {
.reg = 0x080,
.hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1", osc24M,
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 30056da3e0af..42568c616181 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1191,9 +1191,13 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force PLL_GPU output divider bits to 0 */
+ /*
+ * Force PLL_GPU output divider bits to 0 and adjust
+ * multiplier to sensible default value of 432 MHz.
+ */
val = readl(reg + SUN50I_H6_PLL_GPU_REG);
- val &= ~BIT(0);
+ val &= ~(GENMASK(15, 8) | BIT(0));
+ val |= 17 << 8;
writel(val, reg + SUN50I_H6_PLL_GPU_REG);
/* Force GPU_CLK divider bits to 0 */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 2f6f02f00be2..b70b312e7483 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -256,29 +256,19 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
mod_clk = devm_clk_get(&pdev->dev, "mod");
- if (IS_ERR(mod_clk)) {
- ret = PTR_ERR(mod_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get mod clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(mod_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mod_clk),
+ "Couldn't get mod clk\n");
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Couldn't get reset control: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
/* The clocks need to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index f2fe0e1cc3c0..1d8b1ae1619d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -213,21 +213,14 @@ static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(rstc)) {
- ret = PTR_ERR(rstc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Couldn't get reset control: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "Couldn't get reset control\n");
/* The bus clock needs to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 575ae4ccc65f..a0fb0da8f356 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -101,12 +101,9 @@ static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg);
bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (IS_ERR(bus_clk)) {
- ret = PTR_ERR(bus_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bus_clk),
+ "Couldn't get bus clk\n");
/* The bus clock needs to be enabled for us to access the registers */
ret = clk_prepare_enable(bus_clk);
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 3748a39dae7c..d82a71f10c2c 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -349,7 +349,7 @@ static int tegra_bpmp_clk_get_info(struct tegra_bpmp *bpmp, unsigned int id,
if (err < 0)
return err;
- strlcpy(info->name, response.name, MRQ_CLK_NAME_MAXLEN);
+ strscpy(info->name, response.name, MRQ_CLK_NAME_MAXLEN);
info->num_parents = response.num_parents;
for (i = 0; i < info->num_parents; i++)
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index ef718c4b3826..f7405a58877e 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1317,6 +1317,7 @@ static void __init tegra114_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 934520aab6e3..a9d4efcef2d4 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1471,6 +1471,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index be3c33441cfc..8a4514f6d503 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -1131,6 +1131,7 @@ static void __init tegra20_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b9099012dc7b..499f999e91e1 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3748,6 +3748,7 @@ static void __init tegra210_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
WARN_ON(1);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 04b496123820..168c07d5a5f2 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1320,6 +1320,7 @@ static void __init tegra30_clock_init(struct device_node *np)
}
pmc_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmc_base) {
pr_err("Can't map pmc registers\n");
BUG();
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index f0f5bf68b6d2..ff4d6a951681 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -245,14 +245,16 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (rc) {
pr_err("%s: failed to lookup atl clock %d\n", __func__,
i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto pm_put;
}
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
pr_err("%s: failed to get atl clock %d from provider\n",
__func__, i);
- return PTR_ERR(clk);
+ ret = PTR_ERR(clk);
+ goto pm_put;
}
cdesc = to_atl_desc(__clk_get_hw(clk));
@@ -285,8 +287,9 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
if (cdesc->enabled)
atl_clk_enable(__clk_get_hw(clk));
}
- pm_runtime_put_sync(cinfo->dev);
+pm_put:
+ pm_runtime_put_sync(cinfo->dev);
return ret;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ef2a445c63a3..1dc2f15fb75b 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -135,15 +135,17 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
continue;
if (!strncmp(n, tmp, strlen(tmp))) {
+ of_node_get(np);
found = true;
break;
}
}
- of_node_put(from);
kfree(tmp);
- if (found)
+ if (found) {
+ of_node_put(from);
return np;
+ }
/* Fall back to using old node name base provider name */
return of_find_node_by_name(from, name);
diff --git a/drivers/clk/xilinx/Kconfig b/drivers/clk/xilinx/Kconfig
index 5224114176ed..f205522c40ff 100644
--- a/drivers/clk/xilinx/Kconfig
+++ b/drivers/clk/xilinx/Kconfig
@@ -17,3 +17,15 @@ config XILINX_VCU
To compile this driver as a module, choose M here: the
module will be called xlnx_vcu.
+config COMMON_CLK_XLNX_CLKWZRD
+ tristate "Xilinx Clocking Wizard"
+ depends on COMMON_CLK && OF
+ depends on HAS_IOMEM
+ help
+ Support for the Xilinx Clocking Wizard IP core clock generator.
+ Adds support for clocking wizard and compatible.
+ This driver supports the Xilinx clocking wizard programmable clock
+ synthesizer. The number of output is configurable in the design.
+
+ If unsure, say N.
+
diff --git a/drivers/clk/xilinx/Makefile b/drivers/clk/xilinx/Makefile
index dee8fd51e303..7ac1789c6b1b 100644
--- a/drivers/clk/xilinx/Makefile
+++ b/drivers/clk/xilinx/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 39367712ef54..eb1dfe7ecc1b 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -2,9 +2,10 @@
/*
* Xilinx 'Clocking Wizard' driver
*
- * Copyright (C) 2013 - 2014 Xilinx
+ * Copyright (C) 2013 - 2021 Xilinx
*
* Sören Brinkmann <soren.brinkmann@xilinx.com>
+ *
*/
#include <linux/platform_device.h>
@@ -43,6 +44,8 @@
#define WZRD_DR_INIT_REG_OFFSET 0x25C
#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+#define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
+#define WZRD_DR_BEGIN_DYNA_RECONF1_5_2 0x02
#define WZRD_USEC_POLL 10
#define WZRD_TIMEOUT_POLL 1000
@@ -164,7 +167,9 @@ static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
goto err_reconfig;
/* Initiate reconfiguration */
- writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+ writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
divider->base + WZRD_DR_INIT_REG_OFFSET);
/* Check status register */
@@ -223,7 +228,7 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
void __iomem *div_addr = divider->base + divider->offset;
- rate_div = ((parent_rate * 1000) / rate);
+ rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
clockout0_div = rate_div / 1000;
pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
@@ -245,7 +250,9 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
return err;
/* Initiate reconfiguration */
- writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+ writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
divider->base + WZRD_DR_INIT_REG_OFFSET);
/* Check status register */
@@ -441,18 +448,14 @@ static int clk_wzrd_probe(struct platform_device *pdev)
}
clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
- if (IS_ERR(clk_wzrd->clk_in1)) {
- if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "clk_in1 not found\n");
- return PTR_ERR(clk_wzrd->clk_in1);
- }
+ if (IS_ERR(clk_wzrd->clk_in1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->clk_in1),
+ "clk_in1 not found\n");
clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
- if (IS_ERR(clk_wzrd->axi_clk)) {
- if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
- dev_err(&pdev->dev, "s_axi_aclk not found\n");
- return PTR_ERR(clk_wzrd->axi_clk);
- }
+ if (IS_ERR(clk_wzrd->axi_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk_wzrd->axi_clk),
+ "s_axi_aclk not found\n");
ret = clk_prepare_enable(clk_wzrd->axi_clk);
if (ret) {
dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
@@ -479,7 +482,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- ret = of_property_read_u32(np, "nr-outputs", &nr_outputs);
+ ret = of_property_read_u32(np, "xlnx,nr-outputs", &nr_outputs);
if (ret || nr_outputs > WZRD_NUM_OUTPUTS) {
ret = -EINVAL;
goto err_disable_clk;
@@ -614,6 +617,8 @@ static int clk_wzrd_remove(struct platform_device *pdev)
static const struct of_device_id clk_wzrd_ids[] = {
{ .compatible = "xlnx,clocking-wizard" },
+ { .compatible = "xlnx,clocking-wizard-v5.2" },
+ { .compatible = "xlnx,clocking-wizard-v6.0" },
{ },
};
MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index eb25303eefed..5636ff1ce552 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -163,7 +163,7 @@ static int zynqmp_get_clock_name(u32 clk_id, char *clk_name)
ret = zynqmp_is_valid_clock(clk_id);
if (ret == 1) {
- strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
+ strscpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
return 0;
}
@@ -220,18 +220,22 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
* This function is used to get name of clock specified by given
* clock ID.
*
- * Return: Returns 0
+ * Return: 0 on success else error+reason
*/
static int zynqmp_pm_clock_get_name(u32 clock_id,
struct name_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- zynqmp_pm_query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
+ if (ret)
+ return ret;
+
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -710,9 +714,16 @@ static void zynqmp_get_clock_info(void)
FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+
+ /*
+ * Terminate with NULL character in case name provided by firmware
+ * is longer and truncated due to size limit.
+ */
+ name.name[sizeof(name.name) - 1] = '\0';
+
if (!strcmp(name.name, RESERVED_CLK_NAME))
continue;
- strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
+ strscpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
}
/* Get topology of all clock */
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 422ea79907dd..33a3b2a22659 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -113,17 +113,20 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
struct zynqmp_clk_divider *divider,
- int *bestdiv)
+ u32 *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
unsigned long div1_prate;
struct clk_hw *div1_parent_hw;
+ struct zynqmp_clk_divider *pdivider;
struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
- struct zynqmp_clk_divider *pdivider =
- to_zynqmp_clk_divider(div2_parent_hw);
+ if (!div2_parent_hw)
+ return;
+
+ pdivider = to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 91a6b4cc910e..0d3e1377b092 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -102,26 +102,25 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 fbdiv;
- long rate_div, f;
+ u32 mult, div;
- /* Enable the fractional mode if needed */
- rate_div = (rate * FRAC_DIV) / *prate;
- f = rate_div % FRAC_DIV;
- if (f) {
- if (rate > PS_PLL_VCO_MAX) {
- fbdiv = rate / PS_PLL_VCO_MAX;
- rate = rate / (fbdiv + 1);
- }
- if (rate < PS_PLL_VCO_MIN) {
- fbdiv = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * fbdiv;
- }
- return rate;
+ /* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
+ if (rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
+ rate = rate / div;
+ }
+ if (rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
+ rate = rate * mult;
}
fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
- fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- return *prate * fbdiv;
+ if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
+ fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
+ rate = *prate * fbdiv;
+ }
+
+ return rate;
}
/**
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4f2bb7315b67..4469e7f555e9 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -434,7 +434,7 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
- depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on ARCH_ARTPEC || ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9ab8221ee3c6..a7ff77550e17 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,8 +44,8 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x00
-#define CNTPCT_LO 0x08
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
#define CNTFRQ 0x10
#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
@@ -473,6 +473,8 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "ARM erratum 858921",
.read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index f29c812b70c9..bfd60093ee1c 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -33,7 +33,7 @@
#define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
#define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
#define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
-#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
+#define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * (x)))
#define EXYNOS4_MCT_L_MASK (0xffffff00)
#define MCT_L_TCNTB_OFFSET (0x00)
@@ -66,6 +66,8 @@
#define MCT_L0_IRQ 4
/* Max number of IRQ as per DT binding document */
#define MCT_NR_IRQS 20
+/* Max number of local timers */
+#define MCT_NR_LOCAL (MCT_NR_IRQS - MCT_L0_IRQ)
enum {
MCT_INT_SPI,
@@ -233,9 +235,16 @@ static cycles_t exynos4_read_current_timer(void)
}
#endif
-static int __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(bool frc_shared)
{
- exynos4_mct_frc_start();
+ /*
+ * When the frc is shared, the main processer should have already
+ * turned it on and we shouldn't be writing to TCON.
+ */
+ if (frc_shared)
+ mct_frc.resume = NULL;
+ else
+ exynos4_mct_frc_start();
#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
@@ -449,7 +458,6 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- mevt->base = EXYNOS4_MCT_L_BASE(cpu);
snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
evt->name = mevt->name;
@@ -520,8 +528,17 @@ static int __init exynos4_timer_resources(struct device_node *np)
return 0;
}
+/**
+ * exynos4_timer_interrupts - initialize MCT interrupts
+ * @np: device node for MCT
+ * @int_type: interrupt type, MCT_INT_PPI or MCT_INT_SPI
+ * @local_idx: array mapping CPU numbers to local timer indices
+ * @nr_local: size of @local_idx array
+ */
static int __init exynos4_timer_interrupts(struct device_node *np,
- unsigned int int_type)
+ unsigned int int_type,
+ const u32 *local_idx,
+ size_t nr_local)
{
int nr_irqs, i, err, cpu;
@@ -554,13 +571,21 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
} else {
for_each_possible_cpu(cpu) {
int mct_irq;
+ unsigned int irq_idx;
struct mct_clock_event_device *pcpu_mevt =
per_cpu_ptr(&percpu_mct_tick, cpu);
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ irq_idx = MCT_L0_IRQ + local_idx[cpu];
+
pcpu_mevt->evt.irq = -1;
- if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs))
+ if (irq_idx >= ARRAY_SIZE(mct_irqs))
break;
- mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
+ mct_irq = mct_irqs[irq_idx];
irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
if (request_irq(mct_irq,
@@ -576,6 +601,17 @@ static int __init exynos4_timer_interrupts(struct device_node *np,
}
}
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *mevt = per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (cpu >= nr_local) {
+ err = -EINVAL;
+ goto out_irq;
+ }
+
+ mevt->base = EXYNOS4_MCT_L_BASE(local_idx[cpu]);
+ }
+
/* Install hotplug callbacks which configure the timer on this CPU */
err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
"clockevents/exynos4/mct_timer:starting",
@@ -605,20 +641,49 @@ out_irq:
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
+ bool frc_shared = of_property_read_bool(np, "samsung,frc-shared");
+ u32 local_idx[MCT_NR_LOCAL] = {0};
+ int nr_local;
int ret;
+ nr_local = of_property_count_u32_elems(np, "samsung,local-timers");
+ if (nr_local == 0)
+ return -EINVAL;
+ if (nr_local > 0) {
+ if (nr_local > ARRAY_SIZE(local_idx))
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "samsung,local-timers",
+ local_idx, nr_local);
+ if (ret)
+ return ret;
+ } else {
+ int i;
+
+ nr_local = ARRAY_SIZE(local_idx);
+ for (i = 0; i < nr_local; i++)
+ local_idx[i] = i;
+ }
+
ret = exynos4_timer_resources(np);
if (ret)
return ret;
- ret = exynos4_timer_interrupts(np, int_type);
+ ret = exynos4_timer_interrupts(np, int_type, local_idx, nr_local);
if (ret)
return ret;
- ret = exynos4_clocksource_init();
+ ret = exynos4_clocksource_init(frc_shared);
if (ret)
return ret;
+ /*
+ * When the FRC is shared with a main processor, this secondary
+ * processor cannot use the global comparator.
+ */
+ if (frc_shared)
+ return ret;
+
return exynos4_clockevent_init();
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 21d1392637b8..8da972dc1713 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -224,7 +224,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_R9A07G044
+#ifdef CONFIG_ARCH_RZG2L
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 8b38b3212388..fe4fa8d7b3f1 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -171,6 +171,7 @@ static int gxp_timer_probe(struct platform_device *pdev)
{
struct platform_device *gxp_watchdog_device;
struct device *dev = &pdev->dev;
+ int ret;
if (!gxp_timer) {
pr_err("Gxp Timer not initialized, cannot create watchdog");
@@ -187,7 +188,11 @@ static int gxp_timer_probe(struct platform_device *pdev)
gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
gxp_watchdog_device->dev.parent = dev;
- return platform_device_add(gxp_watchdog_device);
+ ret = platform_device_add(gxp_watchdog_device);
+ if (ret)
+ platform_device_put(gxp_watchdog_device);
+
+ return ret;
}
static const struct of_device_id gxp_timer_of_match[] = {
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 523e37662a6e..5a7a951c4efc 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -134,8 +134,10 @@ static int __init sysctr_timer_init(struct device_node *np)
if (ret)
return ret;
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
sys_ctr_base = timer_of_base(&to_sysctr);
cmpcr = readl(sys_ctr_base + CMPCR);
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 94dc6e42e983..e5a70aa1deb4 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -26,6 +26,7 @@
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
+#define TIMER_IRQ_CLEAR(val) BIT(val)
#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
#define TIMER_CTL_ENABLE BIT(0)
#define TIMER_CTL_RELOAD BIT(1)
@@ -123,7 +124,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
static void sun4i_timer_clear_interrupt(void __iomem *base)
{
- writel(TIMER_IRQ_EN(0), base + TIMER_IRQ_ST_REG);
+ writel(TIMER_IRQ_CLEAR(0), base + TIMER_IRQ_ST_REG);
}
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 469f7c91564b..cad29ded3a48 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -33,6 +33,116 @@
#include <clocksource/timer-ti-dm.h>
+/*
+ * timer errata flags
+ *
+ * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
+ * errata prevents us from using posted mode on these devices, unless the
+ * timer counter register is never read. For more details please refer to
+ * the OMAP3/4/5 errata documents.
+ */
+#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
+
+/* posted mode types */
+#define OMAP_TIMER_NONPOSTED 0x00
+#define OMAP_TIMER_POSTED 0x01
+
+/* register offsets with the write pending bit encoded */
+#define WPSHIFT 16
+
+#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
+ | (WP_TCLR << WPSHIFT))
+
+#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
+ | (WP_TCRR << WPSHIFT))
+
+#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
+ | (WP_TLDR << WPSHIFT))
+
+#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
+ | (WP_TTGR << WPSHIFT))
+
+#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
+ | (WP_TMAR << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
+ | (WP_NONE << WPSHIFT))
+
+#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
+ | (WP_TPIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
+ | (WP_TNIR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
+ | (WP_TCVR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
+
+#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
+ (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
+
+struct timer_regs {
+ u32 ocp_cfg;
+ u32 tidr;
+ u32 tier;
+ u32 twer;
+ u32 tclr;
+ u32 tcrr;
+ u32 tldr;
+ u32 ttrg;
+ u32 twps;
+ u32 tmar;
+ u32 tcar1;
+ u32 tsicr;
+ u32 tcar2;
+ u32 tpir;
+ u32 tnir;
+ u32 tcvr;
+ u32 tocr;
+ u32 towr;
+};
+
+struct dmtimer {
+ struct omap_dm_timer cookie;
+ int id;
+ int irq;
+ struct clk *fclk;
+
+ void __iomem *io_base;
+ int irq_stat; /* TISR/IRQSTATUS interrupt status */
+ int irq_ena; /* irq enable */
+ int irq_dis; /* irq disable, only on v2 ip */
+ void __iomem *pend; /* write pending */
+ void __iomem *func_base; /* function register base */
+
+ atomic_t enabled;
+ unsigned long rate;
+ unsigned reserved:1;
+ unsigned posted:1;
+ unsigned omap1:1;
+ struct timer_regs context;
+ int revision;
+ u32 capability;
+ u32 errata;
+ struct platform_device *pdev;
+ struct list_head node;
+ struct notifier_block nb;
+};
+
static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
@@ -44,27 +154,56 @@ enum {
REQUEST_BY_NODE,
};
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
+/**
+ * dmtimer_read - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
+ */
+static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- return readl_relaxed(timer->func_base + (reg & 0xff));
+ return readl_relaxed(timer->func_base + offset);
}
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
+/**
+ * dmtimer_write - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode, the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
+ */
+static inline void dmtimer_write(struct dmtimer *timer, u32 reg, u32 val)
{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
+ u16 wp, offset;
+
+ wp = reg >> WPSHIFT;
+ offset = reg & 0xff;
+
+ /* Wait for a possible write pending bit in posted mode */
+ if (wp && timer->posted)
+ while (readl_relaxed(timer->pend) & wp)
cpu_relax();
- writel_relaxed(val, timer->func_base + (reg & 0xff));
+ writel_relaxed(val, timer->func_base + offset);
}
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_init_regs(struct dmtimer *timer)
{
u32 tidr;
@@ -72,16 +211,16 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
tidr = readl_relaxed(timer->io_base);
if (!(tidr >> 16)) {
timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V1_INT_EN_OFFSET;
timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
timer->func_base = timer->io_base;
} else {
timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
+ timer->irq_stat = OMAP_TIMER_V2_IRQSTATUS - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET - OMAP_TIMER_V2_FUNC_OFFSET;
+ timer->irq_dis = OMAP_TIMER_V2_IRQENABLE_CLR - OMAP_TIMER_V2_FUNC_OFFSET;
timer->pend = timer->io_base +
_OMAP_TIMER_WRITE_PEND_OFFSET +
OMAP_TIMER_V2_FUNC_OFFSET;
@@ -99,35 +238,34 @@ static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
* complete. Enabling this feature can improve performance for writing to the
* timer registers.
*/
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
+static inline void __omap_dm_timer_enable_posted(struct dmtimer *timer)
{
if (timer->posted)
return;
if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0);
return;
}
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, OMAP_TIMER_CTRL_POSTED);
timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
timer->posted = OMAP_TIMER_POSTED;
}
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
+static inline void __omap_dm_timer_stop(struct dmtimer *timer,
+ unsigned long rate)
{
u32 l;
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
#ifdef CONFIG_ARCH_OMAP2PLUS
/* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
+ dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
/*
* Wait for functional clock period x 3.5 to make sure that
* timer is stopped
@@ -137,104 +275,59 @@ static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
}
/* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, OMAP_TIMER_INT_OVERFLOW);
}
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
+static inline void __omap_dm_timer_int_enable(struct dmtimer *timer,
+ unsigned int value)
{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
+ dmtimer_write(timer, timer->irq_ena, value);
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value);
}
static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
+__omap_dm_timer_read_counter(struct dmtimer *timer)
{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
+ return dmtimer_read(timer, OMAP_TIMER_COUNTER_REG);
}
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
+static inline void __omap_dm_timer_write_status(struct dmtimer *timer,
unsigned int value)
{
- writel_relaxed(value, timer->irq_stat);
+ dmtimer_write(timer, timer->irq_stat, value);
}
-/**
- * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
- * @timer: timer pointer over which read operation to perform
- * @reg: lowest byte holds the register offset
- *
- * The posted mode bit is encoded in reg. Note that in posted mode write
- * pending bit must be checked. Otherwise a read of a non completed write
- * will produce an error.
- */
-static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+static void omap_timer_restore_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- return __omap_dm_timer_read(timer, reg, timer->posted);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, timer->context.ocp_cfg);
+
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, timer->context.twer);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, timer->context.tcrr);
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, timer->context.tldr);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, timer->context.tmar);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, timer->context.tsicr);
+ dmtimer_write(timer, timer->irq_ena, timer->context.tier);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, timer->context.tclr);
}
-/**
- * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
- * @timer: timer pointer over which write operation is to perform
- * @reg: lowest byte holds the register offset
- * @value: data to write into the register
- *
- * The posted mode bit is encoded in reg. Note that in posted mode the write
- * pending bit must be checked. Otherwise a write on a register which has a
- * pending write will be lost.
- */
-static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
- u32 value)
+static void omap_timer_save_context(struct dmtimer *timer)
{
- WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
- __omap_dm_timer_write(timer, reg, value, timer->posted);
-}
-
-static void omap_timer_restore_context(struct omap_dm_timer *timer)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
- timer->context.ocp_cfg, 0);
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
- timer->context.twer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
- timer->context.tcrr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
- timer->context.tldr);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
- timer->context.tmar);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
- timer->context.tsicr);
- writel_relaxed(timer->context.tier, timer->irq_ena);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
- timer->context.tclr);
-}
-
-static void omap_timer_save_context(struct omap_dm_timer *timer)
-{
- timer->context.ocp_cfg =
- __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
-
- timer->context.tclr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- timer->context.twer =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG);
- timer->context.tldr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_LOAD_REG);
- timer->context.tmar =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_MATCH_REG);
- timer->context.tier = readl_relaxed(timer->irq_ena);
- timer->context.tsicr =
- omap_dm_timer_read_reg(timer, OMAP_TIMER_IF_CTRL_REG);
+ timer->context.ocp_cfg = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
+
+ timer->context.tclr = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ timer->context.twer = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG);
+ timer->context.tldr = dmtimer_read(timer, OMAP_TIMER_LOAD_REG);
+ timer->context.tmar = dmtimer_read(timer, OMAP_TIMER_MATCH_REG);
+ timer->context.tier = dmtimer_read(timer, timer->irq_ena);
+ timer->context.tsicr = dmtimer_read(timer, OMAP_TIMER_IF_CTRL_REG);
}
static int omap_timer_context_notifier(struct notifier_block *nb,
unsigned long cmd, void *v)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
- timer = container_of(nb, struct omap_dm_timer, nb);
+ timer = container_of(nb, struct dmtimer, nb);
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
@@ -256,18 +349,17 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int omap_dm_timer_reset(struct omap_dm_timer *timer)
+static int omap_dm_timer_reset(struct dmtimer *timer)
{
u32 l, timeout = 100000;
if (timer->revision != 1)
return -EINVAL;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
+ dmtimer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
do {
- l = __omap_dm_timer_read(timer,
- OMAP_TIMER_V1_SYS_STAT_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_V1_SYS_STAT_OFFSET);
} while (!l && timeout--);
if (!timeout) {
@@ -276,22 +368,38 @@ static int omap_dm_timer_reset(struct omap_dm_timer *timer)
}
/* Configure timer for smart-idle mode */
- l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+ l = dmtimer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET);
l |= 0x2 << 0x3;
- __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0);
+ dmtimer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l);
timer->posted = 0;
return 0;
}
-static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
+/*
+ * Functions exposed to PWM and remoteproc drivers via platform_data.
+ * Do not use these in the driver, these will get deprecated and will
+ * will be replaced by Linux generic framework functions such as
+ * chained interrupts and clock framework.
+ */
+static struct dmtimer *to_dmtimer(struct omap_dm_timer *cookie)
+{
+ if (!cookie)
+ return NULL;
+
+ return container_of(cookie, struct dmtimer, cookie);
+}
+
+static int omap_dm_timer_set_source(struct omap_dm_timer *cookie, int source)
{
int ret;
const char *parent_name;
struct clk *parent;
struct dmtimer_platform_data *pdata;
+ struct dmtimer *timer;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || IS_ERR(timer->fclk))
return -EINVAL;
@@ -316,7 +424,7 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
* use the clock framework to set the parent clock. To be removed
* once OMAP1 migrated to using clock framework for dmtimers
*/
- if (pdata && pdata->set_timer_src)
+ if (timer->omap1 && pdata && pdata->set_timer_src)
return pdata->set_timer_src(timer->pdev, source);
#if defined(CONFIG_COMMON_CLK)
@@ -341,44 +449,44 @@ static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
return ret;
}
-static void omap_dm_timer_enable(struct omap_dm_timer *timer)
+static void omap_dm_timer_enable(struct omap_dm_timer *cookie)
{
- pm_runtime_get_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+ int rc;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ dev_err(dev, "could not enable timer\n");
}
-static void omap_dm_timer_disable(struct omap_dm_timer *timer)
+static void omap_dm_timer_disable(struct omap_dm_timer *cookie)
{
- pm_runtime_put_sync(&timer->pdev->dev);
+ struct dmtimer *timer = to_dmtimer(cookie);
+ struct device *dev = &timer->pdev->dev;
+
+ pm_runtime_put_sync(dev);
}
-static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+static int omap_dm_timer_prepare(struct dmtimer *timer)
{
+ struct device *dev = &timer->pdev->dev;
int rc;
- /*
- * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
- * do not call clk_get() for these devices.
- */
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
- timer->fclk = clk_get(&timer->pdev->dev, "fck");
- if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
- dev_err(&timer->pdev->dev, ": No fclk handle.\n");
- return -EINVAL;
- }
- }
-
- omap_dm_timer_enable(timer);
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
rc = omap_dm_timer_reset(timer);
if (rc) {
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return rc;
}
}
__omap_dm_timer_enable_posted(timer);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -388,19 +496,9 @@ static inline u32 omap_dm_timer_reserved_systimer(int id)
return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
}
-int omap_dm_timer_reserve_systimer(int id)
+static struct dmtimer *_omap_dm_timer_request(int req_type, void *data)
{
- if (omap_dm_timer_reserved_systimer(id))
- return -ENODEV;
-
- omap_reserved_systimers |= (1 << (id - 1));
-
- return 0;
-}
-
-static struct omap_dm_timer *_omap_dm_timer_request(int req_type, void *data)
-{
- struct omap_dm_timer *timer = NULL, *t;
+ struct dmtimer *timer = NULL, *t;
struct device_node *np = NULL;
unsigned long flags;
u32 cap = 0;
@@ -484,11 +582,19 @@ found:
static struct omap_dm_timer *omap_dm_timer_request(void)
{
- return _omap_dm_timer_request(REQUEST_ANY, NULL);
+ struct dmtimer *timer;
+
+ timer = _omap_dm_timer_request(REQUEST_ANY, NULL);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
{
+ struct dmtimer *timer;
+
/* Requesting timer by ID is not supported when device tree is used */
if (of_have_populated_dt()) {
pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
@@ -496,21 +602,11 @@ static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
return NULL;
}
- return _omap_dm_timer_request(REQUEST_BY_ID, &id);
-}
+ timer = _omap_dm_timer_request(REQUEST_BY_ID, &id);
+ if (!timer)
+ return NULL;
-/**
- * omap_dm_timer_request_by_cap - Request a timer by capability
- * @cap: Bit mask of capabilities to match
- *
- * Find a timer based upon capabilities bit mask. Callers of this function
- * should use the definitions found in the plat/dmtimer.h file under the
- * comment "timer capabilities used in hwmod database". Returns pointer to
- * timer handle on success and a NULL pointer on failure.
- */
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
-{
- return _omap_dm_timer_request(REQUEST_BY_CAP, &cap);
+ return &timer->cookie;
}
/**
@@ -522,26 +618,34 @@ struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
*/
static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
{
+ struct dmtimer *timer;
+
if (!np)
return NULL;
- return _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ timer = _omap_dm_timer_request(REQUEST_BY_NODE, np);
+ if (!timer)
+ return NULL;
+
+ return &timer->cookie;
}
-static int omap_dm_timer_free(struct omap_dm_timer *timer)
+static int omap_dm_timer_free(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- clk_put(timer->fclk);
-
WARN_ON(!timer->reserved);
timer->reserved = 0;
return 0;
}
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
+int omap_dm_timer_get_irq(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
if (timer)
return timer->irq;
return -EINVAL;
@@ -550,7 +654,7 @@ int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
#if defined(CONFIG_ARCH_OMAP1)
#include <linux/soc/ti/omap1-io.h>
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
return NULL;
}
@@ -562,7 +666,7 @@ static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
{
int i = 0;
- struct omap_dm_timer *timer = NULL;
+ struct dmtimer *timer = NULL;
unsigned long flags;
/* If ARMXOR cannot be idled this function call is unnecessary */
@@ -574,7 +678,7 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
list_for_each_entry(timer, &omap_timer_list, node) {
u32 l;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (l & OMAP_TIMER_CTRL_ST) {
if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
inputmask &= ~(1 << 1);
@@ -590,8 +694,10 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#else
-static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer = to_dmtimer(cookie);
+
if (timer && !IS_ERR(timer->fclk))
return timer->fclk;
return NULL;
@@ -606,95 +712,125 @@ __u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
#endif
-int omap_dm_timer_trigger(struct omap_dm_timer *timer)
-{
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
- return -EINVAL;
- }
-
- omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
- return 0;
-}
-
-static int omap_dm_timer_start(struct omap_dm_timer *timer)
+static int omap_dm_timer_start(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (!(l & OMAP_TIMER_CTRL_ST)) {
l |= OMAP_TIMER_CTRL_ST;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
}
return 0;
}
-static int omap_dm_timer_stop(struct omap_dm_timer *timer)
+static int omap_dm_timer_stop(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
unsigned long rate = 0;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
+ dev = &timer->pdev->dev;
+
+ if (!timer->omap1)
rate = clk_get_rate(timer->fclk);
- __omap_dm_timer_stop(timer, timer->posted, rate);
+ __omap_dm_timer_stop(timer, rate);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_load(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_load(struct omap_dm_timer *cookie,
unsigned int load)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ dmtimer_write(timer, OMAP_TIMER_LOAD_REG, load);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
unsigned int match)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
if (enable)
l |= OMAP_TIMER_CTRL_CE;
else
l &= ~OMAP_TIMER_CTRL_CE;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_MATCH_REG, match);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
OMAP_TIMER_CTRL_PT | (0x03 << 10) | OMAP_TIMER_CTRL_AR);
if (def_on)
@@ -704,57 +840,86 @@ static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
l |= trigger << 10;
if (autoreload)
l |= OMAP_TIMER_CTRL_AR;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *timer)
+static int omap_dm_timer_get_pwm_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
- omap_dm_timer_disable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ pm_runtime_put_sync(dev);
return l;
}
-static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
- int prescaler)
+static int omap_dm_timer_set_prescaler(struct omap_dm_timer *cookie,
+ int prescaler)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
u32 l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
return -EINVAL;
- omap_dm_timer_enable(timer);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
if (prescaler >= 0) {
l |= OMAP_TIMER_CTRL_PRE;
l |= prescaler << 2;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie,
unsigned int value)
{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+
__omap_dm_timer_int_enable(timer, value);
- omap_dm_timer_disable(timer);
+ pm_runtime_put_sync(dev);
+
return 0;
}
@@ -765,42 +930,55 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
*
* Disables the specified timer interrupts for a timer.
*/
-static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
+static int omap_dm_timer_set_int_disable(struct omap_dm_timer *cookie, u32 mask)
{
+ struct dmtimer *timer;
+ struct device *dev;
u32 l = mask;
+ int rc;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer))
return -EINVAL;
- omap_dm_timer_enable(timer);
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
if (timer->revision == 1)
- l = readl_relaxed(timer->irq_ena) & ~mask;
+ l = dmtimer_read(timer, timer->irq_ena) & ~mask;
+
+ dmtimer_write(timer, timer->irq_dis, l);
+ l = dmtimer_read(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
+ dmtimer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
- writel_relaxed(l, timer->irq_dis);
- l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
- omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
+ pm_runtime_put_sync(dev);
- omap_dm_timer_disable(timer);
return 0;
}
-static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
unsigned int l;
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return 0;
}
- l = readl_relaxed(timer->irq_stat);
+ l = dmtimer_read(timer, timer->irq_stat);
return l;
}
-static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_status(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled)))
return -EINVAL;
@@ -809,49 +987,39 @@ static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int
return 0;
}
-static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
+static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not iavailable or enabled.\n", __func__);
return 0;
}
- return __omap_dm_timer_read_counter(timer, timer->posted);
+ return __omap_dm_timer_read_counter(timer);
}
-static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
+ struct dmtimer *timer;
+
+ timer = to_dmtimer(cookie);
if (unlikely(!timer || !atomic_read(&timer->enabled))) {
pr_err("%s: timer not available or enabled.\n", __func__);
return -EINVAL;
}
- omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+ dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
-int omap_dm_timers_active(void)
-{
- struct omap_dm_timer *timer;
-
- list_for_each_entry(timer, &omap_timer_list, node) {
- if (!timer->reserved)
- continue;
-
- if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
- OMAP_TIMER_CTRL_ST) {
- return 1;
- }
- }
- return 0;
-}
-
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
atomic_set(&timer->enabled, 0);
@@ -865,7 +1033,7 @@ static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
static int __maybe_unused omap_dm_timer_runtime_resume(struct device *dev)
{
- struct omap_dm_timer *timer = dev_get_drvdata(dev);
+ struct dmtimer *timer = dev_get_drvdata(dev);
if (!(timer->capability & OMAP_TIMER_ALWON) && timer->func_base)
omap_timer_restore_context(timer);
@@ -892,7 +1060,7 @@ static const struct of_device_id omap_timer_match[];
static int omap_dm_timer_probe(struct platform_device *pdev)
{
unsigned long flags;
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
struct device *dev = &pdev->dev;
const struct dmtimer_platform_data *pdata;
int ret;
@@ -916,7 +1084,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
if (timer->irq < 0)
return timer->irq;
- timer->fclk = ERR_PTR(-ENODEV);
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
return PTR_ERR(timer->io_base);
@@ -938,6 +1105,17 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
}
+ timer->omap1 = timer->capability & OMAP_TIMER_NEEDS_RESET;
+
+ /* OMAP1 devices do not yet use the clock framework for dmtimers */
+ if (!timer->omap1) {
+ timer->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(timer->fclk))
+ return PTR_ERR(timer->fclk);
+ } else {
+ timer->fclk = ERR_PTR(-ENODEV);
+ }
+
if (!(timer->capability & OMAP_TIMER_ALWON)) {
timer->nb.notifier_call = omap_timer_context_notifier;
cpu_pm_register_notifier(&timer->nb);
@@ -950,11 +1128,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
if (!timer->reserved) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
- goto err_get_sync;
+ goto err_disable;
}
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
@@ -969,8 +1147,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return 0;
-err_get_sync:
- pm_runtime_put_noidle(dev);
+err_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -985,7 +1162,7 @@ err_get_sync:
*/
static int omap_dm_timer_remove(struct platform_device *pdev)
{
- struct omap_dm_timer *timer;
+ struct dmtimer *timer;
unsigned long flags;
int ret = -EINVAL;
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 55a0cae04b8d..e2114bcf815a 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -396,7 +396,7 @@ static ssize_t max_read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t max_read_buffer_kb_store(struct device *csdev,
@@ -452,7 +452,7 @@ static ssize_t read_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t read_buffer_kb_store(struct device *csdev,
@@ -509,7 +509,7 @@ static ssize_t max_write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t max_write_buffer_kb_store(struct device *csdev,
@@ -565,7 +565,7 @@ static ssize_t write_buffer_kb_show(struct device *csdev,
mutex_unlock(&dev->mutex);
comedi_dev_put(dev);
- return snprintf(buf, PAGE_SIZE, "%u\n", size);
+ return sysfs_emit(buf, "%u\n", size);
}
static ssize_t write_buffer_kb_store(struct device *csdev,
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 62c2b7ac4339..77a863b7eefe 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -28,7 +28,8 @@ module_param_hw_array(base, uint, ioport, &num_quad8, 0);
MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
static unsigned int irq[max_num_isa_dev(QUAD8_EXTENT)];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
#define QUAD8_NUM_COUNTERS 8
@@ -449,6 +450,9 @@ static int quad8_events_configure(struct counter_device *counter)
return -EINVAL;
}
+ /* Enable IRQ line */
+ irq_enabled |= BIT(event_node->channel);
+
/* Skip configuration if it is the same as previously set */
if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
continue;
@@ -462,9 +466,6 @@ static int quad8_events_configure(struct counter_device *counter)
priv->irq_trigger[event_node->channel] << 3;
iowrite8(QUAD8_CTR_IOR | ior_cfg,
&priv->reg->channel[event_node->channel].control);
-
- /* Enable IRQ line */
- irq_enabled |= BIT(event_node->channel);
}
iowrite8(irq_enabled, &priv->reg->index_interrupt);
@@ -549,6 +550,32 @@ static int quad8_index_polarity_set(struct counter_device *counter,
return 0;
}
+static int quad8_polarity_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_polarity *polarity)
+{
+ int err;
+ u32 index_polarity;
+
+ err = quad8_index_polarity_get(counter, signal, &index_polarity);
+ if (err)
+ return err;
+
+ *polarity = (index_polarity) ? COUNTER_SIGNAL_POLARITY_POSITIVE :
+ COUNTER_SIGNAL_POLARITY_NEGATIVE;
+
+ return 0;
+}
+
+static int quad8_polarity_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ enum counter_signal_polarity polarity)
+{
+ const u32 pol = (polarity == COUNTER_SIGNAL_POLARITY_POSITIVE) ? 1 : 0;
+
+ return quad8_index_polarity_set(counter, signal, pol);
+}
+
static const char *const quad8_synchronous_modes[] = {
"non-synchronous",
"synchronous"
@@ -977,6 +1004,13 @@ static struct counter_comp quad8_signal_ext[] = {
quad8_signal_fck_prescaler_write)
};
+static const enum counter_signal_polarity quad8_polarities[] = {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+static DEFINE_COUNTER_AVAILABLE(quad8_polarity_available, quad8_polarities);
+
static DEFINE_COUNTER_ENUM(quad8_index_pol_enum, quad8_index_polarity_modes);
static DEFINE_COUNTER_ENUM(quad8_synch_mode_enum, quad8_synchronous_modes);
@@ -984,6 +1018,8 @@ static struct counter_comp quad8_index_ext[] = {
COUNTER_COMP_SIGNAL_ENUM("index_polarity", quad8_index_polarity_get,
quad8_index_polarity_set,
quad8_index_pol_enum),
+ COUNTER_COMP_POLARITY(quad8_polarity_read, quad8_polarity_write,
+ quad8_polarity_available),
COUNTER_COMP_SIGNAL_ENUM("synchronous_mode", quad8_synchronous_mode_get,
quad8_synchronous_mode_set,
quad8_synch_mode_enum),
@@ -1236,8 +1272,9 @@ static struct isa_driver quad8_driver = {
}
};
-module_isa_driver(quad8_driver, num_quad8);
+module_isa_driver_with_irq(quad8_driver, num_quad8, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-QUAD-8 driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 5edd155f1911..d388bf26f4dc 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -101,4 +101,19 @@ config INTEL_QEP
To compile this driver as a module, choose M here: the module
will be called intel-qep.
+config TI_ECAP_CAPTURE
+ tristate "TI eCAP capture driver"
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Capture
+ (eCAP) driver in input mode.
+
+ It can be used to timestamp events (falling/rising edges) detected
+ on ECAP input signal.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-ecap-capture.
+
endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
index 8fde6c100ebc..b9a369e0d4fc 100644
--- a/drivers/counter/Makefile
+++ b/drivers/counter/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_TI_EQEP) += ti-eqep.o
obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
obj-$(CONFIG_MICROCHIP_TCB_CAPTURE) += microchip-tcb-capture.o
obj-$(CONFIG_INTEL_QEP) += intel-qep.o
+obj-$(CONFIG_TI_ECAP_CAPTURE) += ti-ecap-capture.o
diff --git a/drivers/counter/counter-chrdev.c b/drivers/counter/counter-chrdev.c
index 69d340be9c93..80acdf62794a 100644
--- a/drivers/counter/counter-chrdev.c
+++ b/drivers/counter/counter-chrdev.c
@@ -40,7 +40,11 @@ struct counter_comp_node {
a.signal_u32_read == b.signal_u32_read || \
a.device_u64_read == b.device_u64_read || \
a.count_u64_read == b.count_u64_read || \
- a.signal_u64_read == b.signal_u64_read)
+ a.signal_u64_read == b.signal_u64_read || \
+ a.signal_array_u32_read == b.signal_array_u32_read || \
+ a.device_array_u64_read == b.device_array_u64_read || \
+ a.count_array_u64_read == b.count_array_u64_read || \
+ a.signal_array_u64_read == b.signal_array_u64_read)
#define counter_comp_read_is_set(comp) \
(comp.action_read || \
@@ -52,7 +56,11 @@ struct counter_comp_node {
comp.signal_u32_read || \
comp.device_u64_read || \
comp.count_u64_read || \
- comp.signal_u64_read)
+ comp.signal_u64_read || \
+ comp.signal_array_u32_read || \
+ comp.device_array_u64_read || \
+ comp.count_array_u64_read || \
+ comp.signal_array_u64_read)
static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
size_t len, loff_t *f_ps)
@@ -228,6 +236,31 @@ static int counter_disable_events(struct counter_device *const counter)
return err;
}
+static int counter_get_ext(const struct counter_comp *const ext,
+ const size_t num_ext, const size_t component_id,
+ size_t *const ext_idx, size_t *const id)
+{
+ struct counter_array *element;
+
+ *id = 0;
+ for (*ext_idx = 0; *ext_idx < num_ext; (*ext_idx)++) {
+ if (*id == component_id)
+ return 0;
+
+ if (ext->type == COUNTER_COMP_ARRAY) {
+ element = ext->priv;
+
+ if (component_id - *id < element->length)
+ return 0;
+
+ *id += element->length;
+ } else
+ (*id)++;
+ }
+
+ return -EINVAL;
+}
+
static int counter_add_watch(struct counter_device *const counter,
const unsigned long arg)
{
@@ -237,6 +270,7 @@ static int counter_add_watch(struct counter_device *const counter,
size_t parent, id;
struct counter_comp *ext;
size_t num_ext;
+ size_t ext_idx, ext_id;
int err = 0;
if (copy_from_user(&watch, uwatch, sizeof(watch)))
@@ -314,11 +348,11 @@ static int counter_add_watch(struct counter_device *const counter,
comp_node.comp.priv = counter->counts[parent].synapses + id;
break;
case COUNTER_COMPONENT_EXTENSION:
- if (id >= num_ext)
- return -EINVAL;
- id = array_index_nospec(id, num_ext);
+ err = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
+ if (err < 0)
+ return err;
- comp_node.comp = ext[id];
+ comp_node.comp = ext[ext_idx];
break;
default:
return -EINVAL;
@@ -451,14 +485,56 @@ void counter_chrdev_remove(struct counter_device *const counter)
kfifo_free(&counter->events);
}
+static int counter_get_array_data(struct counter_device *const counter,
+ const enum counter_scope scope,
+ void *const parent,
+ const struct counter_comp *const comp,
+ const size_t idx, u64 *const value)
+{
+ const struct counter_array *const element = comp->priv;
+ u32 value_u32 = 0;
+ int ret;
+
+ switch (element->type) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ if (scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+ ret = comp->signal_array_u32_read(counter, parent, idx,
+ &value_u32);
+ *value = value_u32;
+ return ret;
+ case COUNTER_COMP_U64:
+ switch (scope) {
+ case COUNTER_SCOPE_DEVICE:
+ return comp->device_array_u64_read(counter, idx, value);
+ case COUNTER_SCOPE_SIGNAL:
+ return comp->signal_array_u64_read(counter, parent, idx,
+ value);
+ case COUNTER_SCOPE_COUNT:
+ return comp->count_array_u64_read(counter, parent, idx,
+ value);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
static int counter_get_data(struct counter_device *const counter,
const struct counter_comp_node *const comp_node,
u64 *const value)
{
const struct counter_comp *const comp = &comp_node->comp;
- void *const parent = comp_node->parent;
+ const enum counter_scope scope = comp_node->component.scope;
+ const size_t id = comp_node->component.id;
+ struct counter_signal *const signal = comp_node->parent;
+ struct counter_count *const count = comp_node->parent;
u8 value_u8 = 0;
u32 value_u32 = 0;
+ const struct counter_comp *ext;
+ size_t num_ext;
+ size_t ext_idx, ext_id;
int ret;
if (comp_node->component.type == COUNTER_COMPONENT_NONE)
@@ -467,15 +543,15 @@ static int counter_get_data(struct counter_device *const counter,
switch (comp->type) {
case COUNTER_COMP_U8:
case COUNTER_COMP_BOOL:
- switch (comp_node->component.scope) {
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u8_read(counter, &value_u8);
break;
case COUNTER_SCOPE_SIGNAL:
- ret = comp->signal_u8_read(counter, parent, &value_u8);
+ ret = comp->signal_u8_read(counter, signal, &value_u8);
break;
case COUNTER_SCOPE_COUNT:
- ret = comp->count_u8_read(counter, parent, &value_u8);
+ ret = comp->count_u8_read(counter, count, &value_u8);
break;
default:
return -EINVAL;
@@ -487,16 +563,17 @@ static int counter_get_data(struct counter_device *const counter,
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
- switch (comp_node->component.scope) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
ret = comp->device_u32_read(counter, &value_u32);
break;
case COUNTER_SCOPE_SIGNAL:
- ret = comp->signal_u32_read(counter, parent,
+ ret = comp->signal_u32_read(counter, signal,
&value_u32);
break;
case COUNTER_SCOPE_COUNT:
- ret = comp->count_u32_read(counter, parent, &value_u32);
+ ret = comp->count_u32_read(counter, count, &value_u32);
break;
default:
return -EINVAL;
@@ -504,21 +581,43 @@ static int counter_get_data(struct counter_device *const counter,
*value = value_u32;
return ret;
case COUNTER_COMP_U64:
- switch (comp_node->component.scope) {
+ switch (scope) {
case COUNTER_SCOPE_DEVICE:
return comp->device_u64_read(counter, value);
case COUNTER_SCOPE_SIGNAL:
- return comp->signal_u64_read(counter, parent, value);
+ return comp->signal_u64_read(counter, signal, value);
case COUNTER_SCOPE_COUNT:
- return comp->count_u64_read(counter, parent, value);
+ return comp->count_u64_read(counter, count, value);
default:
return -EINVAL;
}
case COUNTER_COMP_SYNAPSE_ACTION:
- ret = comp->action_read(counter, parent, comp->priv,
- &value_u32);
+ ret = comp->action_read(counter, count, comp->priv, &value_u32);
*value = value_u32;
return ret;
+ case COUNTER_COMP_ARRAY:
+ switch (scope) {
+ case COUNTER_SCOPE_DEVICE:
+ ext = counter->ext;
+ num_ext = counter->num_ext;
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ ext = signal->ext;
+ num_ext = signal->num_ext;
+ break;
+ case COUNTER_SCOPE_COUNT:
+ ext = count->ext;
+ num_ext = count->num_ext;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = counter_get_ext(ext, num_ext, id, &ext_idx, &ext_id);
+ if (ret < 0)
+ return ret;
+
+ return counter_get_array_data(counter, scope, comp_node->parent,
+ comp, id - ext_id, value);
default:
return -EINVAL;
}
@@ -574,4 +673,4 @@ exit_early:
if (copied)
wake_up_poll(&counter->events_wait, EPOLLIN);
}
-EXPORT_SYMBOL_GPL(counter_push_event);
+EXPORT_SYMBOL_NS_GPL(counter_push_event, COUNTER);
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 938651f9e9e0..09c77afb33ca 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -73,7 +73,7 @@ void *counter_priv(const struct counter_device *const counter)
return &ch->privdata;
}
-EXPORT_SYMBOL_GPL(counter_priv);
+EXPORT_SYMBOL_NS_GPL(counter_priv, COUNTER);
/**
* counter_alloc - allocate a counter_device
@@ -133,13 +133,13 @@ err_ida_alloc:
return NULL;
}
-EXPORT_SYMBOL_GPL(counter_alloc);
+EXPORT_SYMBOL_NS_GPL(counter_alloc, COUNTER);
void counter_put(struct counter_device *counter)
{
put_device(&counter->dev);
}
-EXPORT_SYMBOL_GPL(counter_put);
+EXPORT_SYMBOL_NS_GPL(counter_put, COUNTER);
/**
* counter_add - complete registration of a counter
@@ -166,7 +166,7 @@ int counter_add(struct counter_device *counter)
/* implies device_add(dev) */
return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_GPL(counter_add);
+EXPORT_SYMBOL_NS_GPL(counter_add, COUNTER);
/**
* counter_unregister - unregister Counter from the system
@@ -188,7 +188,7 @@ void counter_unregister(struct counter_device *const counter)
mutex_unlock(&counter->ops_exist_lock);
}
-EXPORT_SYMBOL_GPL(counter_unregister);
+EXPORT_SYMBOL_NS_GPL(counter_unregister, COUNTER);
static void devm_counter_release(void *counter)
{
@@ -223,7 +223,7 @@ struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv
return counter;
}
-EXPORT_SYMBOL_GPL(devm_counter_alloc);
+EXPORT_SYMBOL_NS_GPL(devm_counter_alloc, COUNTER);
/**
* devm_counter_add - complete registration of a counter
@@ -244,7 +244,7 @@ int devm_counter_add(struct device *dev,
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_GPL(devm_counter_add);
+EXPORT_SYMBOL_NS_GPL(devm_counter_add, COUNTER);
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/counter-sysfs.c b/drivers/counter/counter-sysfs.c
index 04eac41dad33..b9efe66f9f8d 100644
--- a/drivers/counter/counter-sysfs.c
+++ b/drivers/counter/counter-sysfs.c
@@ -91,6 +91,11 @@ static const char *const counter_count_mode_str[] = {
[COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
};
+static const char *const counter_signal_polarity_str[] = {
+ [COUNTER_SIGNAL_POLARITY_POSITIVE] = "positive",
+ [COUNTER_SIGNAL_POLARITY_NEGATIVE] = "negative"
+};
+
static ssize_t counter_comp_u8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -201,6 +206,8 @@ static ssize_t counter_comp_u32_show(struct device *dev,
return sysfs_emit(buf, "%s\n", counter_count_direction_str[data]);
case COUNTER_COMP_COUNT_MODE:
return sysfs_emit(buf, "%s\n", counter_count_mode_str[data]);
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
default:
return sysfs_emit(buf, "%u\n", (unsigned int)data);
}
@@ -252,6 +259,10 @@ static ssize_t counter_comp_u32_store(struct device *dev,
err = counter_find_enum(&data, avail->enums, avail->num_items,
buf, counter_count_mode_str);
break;
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ err = counter_find_enum(&data, avail->enums, avail->num_items,
+ buf, counter_signal_polarity_str);
+ break;
default:
err = kstrtou32(buf, 0, &data);
break;
@@ -341,6 +352,124 @@ static ssize_t counter_comp_u64_store(struct device *dev,
return len;
}
+static ssize_t counter_comp_array_u32_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ if (a->scope != COUNTER_SCOPE_SIGNAL ||
+ element->type != COUNTER_COMP_SIGNAL_POLARITY)
+ return -EINVAL;
+
+ err = a->comp.signal_array_u32_read(counter, a->parent, element->idx,
+ &data);
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%s\n", counter_signal_polarity_str[data]);
+}
+
+static ssize_t counter_comp_array_u32_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u32 data = 0;
+
+ if (element->type != COUNTER_COMP_SIGNAL_POLARITY ||
+ a->scope != COUNTER_SCOPE_SIGNAL)
+ return -EINVAL;
+
+ err = counter_find_enum(&data, element->avail->enums,
+ element->avail->num_items, buf,
+ counter_signal_polarity_str);
+ if (err < 0)
+ return err;
+
+ err = a->comp.signal_array_u32_write(counter, a->parent, element->idx,
+ data);
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
+static ssize_t counter_comp_array_u64_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u64 data = 0;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_array_u64_read(counter, element->idx,
+ &data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_array_u64_read(counter, a->parent,
+ element->idx, &data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_array_u64_read(counter, a->parent,
+ element->idx, &data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return sysfs_emit(buf, "%llu\n", (unsigned long long)data);
+}
+
+static ssize_t counter_comp_array_u64_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_attribute *const a = to_counter_attribute(attr);
+ struct counter_device *const counter = counter_from_dev(dev);
+ const struct counter_array *const element = a->comp.priv;
+ int err;
+ u64 data = 0;
+
+ err = kstrtou64(buf, 0, &data);
+ if (err < 0)
+ return err;
+
+ switch (a->scope) {
+ case COUNTER_SCOPE_DEVICE:
+ err = a->comp.device_array_u64_write(counter, element->idx,
+ data);
+ break;
+ case COUNTER_SCOPE_SIGNAL:
+ err = a->comp.signal_array_u64_write(counter, a->parent,
+ element->idx, data);
+ break;
+ case COUNTER_SCOPE_COUNT:
+ err = a->comp.count_array_u64_write(counter, a->parent,
+ element->idx, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+
+ return len;
+}
+
static ssize_t enums_available_show(const u32 *const enums,
const size_t num_enums,
const char *const strs[], char *buf)
@@ -435,6 +564,7 @@ static int counter_attr_create(struct device *const dev,
const enum counter_scope scope,
void *const parent)
{
+ const struct counter_array *const array = comp->priv;
struct counter_attribute *counter_attr;
struct device_attribute *dev_attr;
@@ -469,6 +599,7 @@ static int counter_attr_create(struct device *const dev,
case COUNTER_COMP_ENUM:
case COUNTER_COMP_COUNT_DIRECTION:
case COUNTER_COMP_COUNT_MODE:
+ case COUNTER_COMP_SIGNAL_POLARITY:
if (comp->device_u32_read) {
dev_attr->attr.mode |= 0444;
dev_attr->show = counter_comp_u32_show;
@@ -488,6 +619,32 @@ static int counter_attr_create(struct device *const dev,
dev_attr->store = counter_comp_u64_store;
}
break;
+ case COUNTER_COMP_ARRAY:
+ switch (array->type) {
+ case COUNTER_COMP_SIGNAL_POLARITY:
+ if (comp->signal_array_u32_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_array_u32_show;
+ }
+ if (comp->signal_array_u32_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_array_u32_store;
+ }
+ break;
+ case COUNTER_COMP_U64:
+ if (comp->device_array_u64_read) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = counter_comp_array_u64_show;
+ }
+ if (comp->device_array_u64_write) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = counter_comp_array_u64_store;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -580,6 +737,95 @@ static int counter_comp_id_attr_create(struct device *const dev,
return 0;
}
+static int counter_ext_attrs_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const ext,
+ const enum counter_scope scope,
+ void *const parent, const size_t id)
+{
+ int err;
+
+ /* Create main extension attribute */
+ err = counter_attr_create(dev, group, ext, scope, parent);
+ if (err < 0)
+ return err;
+
+ /* Create extension id attribute */
+ return counter_comp_id_attr_create(dev, group, ext->name, id);
+}
+
+static int counter_array_attrs_create(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const comp,
+ const enum counter_scope scope,
+ void *const parent, const size_t id)
+{
+ const struct counter_array *const array = comp->priv;
+ struct counter_comp ext = *comp;
+ struct counter_array *element;
+ size_t idx;
+ int err;
+
+ /* Create an attribute for each array element */
+ for (idx = 0; idx < array->length; idx++) {
+ /* Generate array element attribute name */
+ ext.name = devm_kasprintf(dev, GFP_KERNEL, "%s%zu", comp->name,
+ idx);
+ if (!ext.name)
+ return -ENOMEM;
+
+ /* Allocate and configure array element */
+ element = devm_kzalloc(dev, sizeof(*element), GFP_KERNEL);
+ if (!element)
+ return -ENOMEM;
+ element->type = array->type;
+ element->avail = array->avail;
+ element->idx = idx;
+ ext.priv = element;
+
+ /* Create all attributes associated with the array element */
+ err = counter_ext_attrs_create(dev, group, &ext, scope, parent,
+ id + idx);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int counter_sysfs_exts_add(struct device *const dev,
+ struct counter_attribute_group *const group,
+ const struct counter_comp *const exts,
+ const size_t num_ext,
+ const enum counter_scope scope,
+ void *const parent)
+{
+ size_t i;
+ const struct counter_comp *ext;
+ int err;
+ size_t id = 0;
+ const struct counter_array *array;
+
+ /* Create attributes for each extension */
+ for (i = 0; i < num_ext; i++) {
+ ext = &exts[i];
+ if (ext->type == COUNTER_COMP_ARRAY) {
+ err = counter_array_attrs_create(dev, group, ext, scope,
+ parent, id);
+ array = ext->priv;
+ id += array->length;
+ } else {
+ err = counter_ext_attrs_create(dev, group, ext, scope,
+ parent, id);
+ id++;
+ }
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static struct counter_comp counter_signal_comp = {
.type = COUNTER_COMP_SIGNAL_LEVEL,
.name = "signal",
@@ -593,8 +839,6 @@ static int counter_signal_attrs_create(struct counter_device *const counter,
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
- size_t i;
- struct counter_comp *ext;
/* Create main Signal attribute */
comp = counter_signal_comp;
@@ -608,21 +852,9 @@ static int counter_signal_attrs_create(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < signal->num_ext; i++) {
- ext = &signal->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, signal);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
-
- return 0;
+ /* Add Signal extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, signal->ext,
+ signal->num_ext, scope, signal);
}
static int counter_sysfs_signals_add(struct counter_device *const counter,
@@ -707,8 +939,6 @@ static int counter_count_attrs_create(struct counter_device *const counter,
struct device *const dev = &counter->dev;
int err;
struct counter_comp comp;
- size_t i;
- struct counter_comp *ext;
/* Create main Count attribute */
comp = counter_count_comp;
@@ -731,21 +961,9 @@ static int counter_count_attrs_create(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < count->num_ext; i++) {
- ext = &count->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, count);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
-
- return 0;
+ /* Add Count extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, count->ext,
+ count->num_ext, scope, count);
}
static int counter_sysfs_counts_add(struct counter_device *const counter,
@@ -838,8 +1056,6 @@ static int counter_sysfs_attr_add(struct counter_device *const counter,
const enum counter_scope scope = COUNTER_SCOPE_DEVICE;
struct device *const dev = &counter->dev;
int err;
- size_t i;
- struct counter_comp *ext;
/* Add Signals sysfs attributes */
err = counter_sysfs_signals_add(counter, cattr_group);
@@ -876,19 +1092,9 @@ static int counter_sysfs_attr_add(struct counter_device *const counter,
if (err < 0)
return err;
- /* Create an attribute for each extension */
- for (i = 0; i < counter->num_ext; i++) {
- ext = &counter->ext[i];
-
- err = counter_attr_create(dev, cattr_group, ext, scope, NULL);
- if (err < 0)
- return err;
-
- err = counter_comp_id_attr_create(dev, cattr_group, ext->name,
- i);
- if (err < 0)
- return err;
- }
+ /* Add device extensions */
+ return counter_sysfs_exts_add(dev, cattr_group, counter->ext,
+ counter->num_ext, scope, NULL);
return 0;
}
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 2a58582a9df4..aea6622a9b13 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -325,3 +325,4 @@ module_platform_driver(ftm_quaddec_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com>");
MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com>");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index 47a6a9dfc9e8..af5942e66f7d 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -523,3 +523,4 @@ MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 3b13f56bbb11..229473855c5b 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -139,12 +139,23 @@ static int interrupt_cnt_signal_read(struct counter_device *counter,
return 0;
}
+static int interrupt_cnt_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel != 0 ||
+ watch->event != COUNTER_EVENT_CHANGE_OF_STATE)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct counter_ops interrupt_cnt_ops = {
.action_read = interrupt_cnt_action_read,
.count_read = interrupt_cnt_read,
.count_write = interrupt_cnt_write,
.function_read = interrupt_cnt_function_read,
.signal_read = interrupt_cnt_signal_read,
+ .watch_validate = interrupt_cnt_watch_validate,
};
static int interrupt_cnt_probe(struct platform_device *pdev)
@@ -242,3 +253,4 @@ MODULE_ALIAS("platform:interrupt-counter");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
MODULE_DESCRIPTION("Interrupt counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 00844445143b..f9dee15d9777 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -394,3 +394,4 @@ module_platform_driver(mchp_tc_driver);
MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
MODULE_DESCRIPTION("Microchip TCB Capture driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 68031d93ce89..d6b80b6dfc28 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -520,3 +520,4 @@ MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
MODULE_ALIAS("platform:stm32-lptimer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 5779ae7c73cf..9bf20a5d6bda 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -417,3 +417,4 @@ MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
MODULE_ALIAS("platform:stm32-timer-counter");
MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
new file mode 100644
index 000000000000..af10de30aba5
--- /dev/null
+++ b/drivers/counter/ti-ecap-capture.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ECAP Capture driver
+ *
+ * Copyright (C) 2022 Julien Panis <jpanis@baylibre.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/counter.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define ECAP_DRV_NAME "ecap"
+
+/* ECAP event IDs */
+#define ECAP_CEVT1 0
+#define ECAP_CEVT2 1
+#define ECAP_CEVT3 2
+#define ECAP_CEVT4 3
+#define ECAP_CNTOVF 4
+
+#define ECAP_CEVT_LAST ECAP_CEVT4
+#define ECAP_NB_CEVT (ECAP_CEVT_LAST + 1)
+
+#define ECAP_EVT_LAST ECAP_CNTOVF
+#define ECAP_NB_EVT (ECAP_EVT_LAST + 1)
+
+/* Registers */
+#define ECAP_TSCNT_REG 0x00
+
+#define ECAP_CAP_REG(i) (((i) << 2) + 0x08)
+
+#define ECAP_ECCTL_REG 0x28
+#define ECAP_CAPPOL_BIT(i) BIT((i) << 1)
+#define ECAP_EV_MODE_MASK GENMASK(7, 0)
+#define ECAP_CAPLDEN_BIT BIT(8)
+#define ECAP_CONT_ONESHT_BIT BIT(16)
+#define ECAP_STOPVALUE_MASK GENMASK(18, 17)
+#define ECAP_TSCNTSTP_BIT BIT(20)
+#define ECAP_SYNCO_DIS_MASK GENMASK(23, 22)
+#define ECAP_CAP_APWM_BIT BIT(25)
+#define ECAP_ECCTL_EN_MASK (ECAP_CAPLDEN_BIT | ECAP_TSCNTSTP_BIT)
+#define ECAP_ECCTL_CFG_MASK (ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK \
+ | ECAP_ECCTL_EN_MASK | ECAP_CAP_APWM_BIT \
+ | ECAP_CONT_ONESHT_BIT)
+
+#define ECAP_ECINT_EN_FLG_REG 0x2c
+#define ECAP_EVT_EN_MASK GENMASK(ECAP_NB_EVT, ECAP_NB_CEVT)
+#define ECAP_EVT_FLG_BIT(i) BIT((i) + 17)
+
+#define ECAP_ECINT_CLR_FRC_REG 0x30
+#define ECAP_INT_CLR_BIT BIT(0)
+#define ECAP_EVT_CLR_BIT(i) BIT((i) + 1)
+#define ECAP_EVT_CLR_MASK GENMASK(ECAP_NB_EVT, 0)
+
+#define ECAP_PID_REG 0x5c
+
+/* ECAP signals */
+#define ECAP_CLOCK_SIG 0
+#define ECAP_INPUT_SIG 1
+
+static const struct regmap_config ecap_cnt_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = ECAP_PID_REG,
+};
+
+/**
+ * struct ecap_cnt_dev - device private data structure
+ * @enabled: device state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @clk: device clock
+ * @regmap: device register map
+ * @nb_ovf: number of overflows since capture start
+ * @pm_ctx: device context for PM operations
+ * @pm_ctx.ev_mode: event mode bits
+ * @pm_ctx.time_cntr: timestamp counter value
+ */
+struct ecap_cnt_dev {
+ bool enabled;
+ struct mutex lock;
+ struct clk *clk;
+ struct regmap *regmap;
+ atomic_t nb_ovf;
+ struct {
+ u8 ev_mode;
+ u32 time_cntr;
+ } pm_ctx;
+};
+
+static u8 ecap_cnt_capture_get_evmode(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, ECAP_ECCTL_REG, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_capture_set_evmode(struct counter_device *counter, u8 ev_mode)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_EV_MODE_MASK, ev_mode);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static void ecap_cnt_capture_enable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+
+ /* Enable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG,
+ ECAP_EVT_EN_MASK, ECAP_EVT_EN_MASK);
+
+ /* Run counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_CFG_MASK,
+ ECAP_SYNCO_DIS_MASK | ECAP_STOPVALUE_MASK | ECAP_ECCTL_EN_MASK);
+}
+
+static void ecap_cnt_capture_disable(struct counter_device *counter)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ /* Stop counter */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_ECCTL_EN_MASK, 0);
+
+ /* Disable interrupts on events */
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, ECAP_EVT_EN_MASK, 0);
+
+ pm_runtime_put_sync(counter->parent);
+}
+
+static u32 ecap_cnt_count_get_val(struct counter_device *counter, unsigned int reg)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ unsigned int regval;
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_read(ecap_dev->regmap, reg, &regval);
+ pm_runtime_put_sync(counter->parent);
+
+ return regval;
+}
+
+static void ecap_cnt_count_set_val(struct counter_device *counter, unsigned int reg, u32 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ regmap_write(ecap_dev->regmap, reg, val);
+ pm_runtime_put_sync(counter->parent);
+}
+
+static int ecap_cnt_count_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = ecap_cnt_count_get_val(counter, ECAP_TSCNT_REG);
+
+ return 0;
+}
+
+static int ecap_cnt_count_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_TSCNT_REG, val);
+
+ return 0;
+}
+
+static int ecap_cnt_function_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function)
+{
+ *function = COUNTER_FUNCTION_INCREASE;
+
+ return 0;
+}
+
+static int ecap_cnt_action_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action)
+{
+ *action = (synapse->signal->id == ECAP_CLOCK_SIG) ?
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE :
+ COUNTER_SYNAPSE_ACTION_NONE;
+
+ return 0;
+}
+
+static int ecap_cnt_watch_validate(struct counter_device *counter,
+ const struct counter_watch *watch)
+{
+ if (watch->channel > ECAP_CEVT_LAST)
+ return -EINVAL;
+
+ switch (watch->event) {
+ case COUNTER_EVENT_CAPTURE:
+ case COUNTER_EVENT_OVERFLOW:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ecap_cnt_clk_get_freq(struct counter_device *counter,
+ struct counter_signal *signal, u64 *freq)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *freq = clk_get_rate(ecap_dev->clk);
+
+ return 0;
+}
+
+static int ecap_cnt_pol_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, enum counter_signal_polarity *pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+ int bitval;
+
+ pm_runtime_get_sync(counter->parent);
+ bitval = regmap_test_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ *pol = bitval ? COUNTER_SIGNAL_POLARITY_NEGATIVE : COUNTER_SIGNAL_POLARITY_POSITIVE;
+
+ return 0;
+}
+
+static int ecap_cnt_pol_write(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, enum counter_signal_polarity pol)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ pm_runtime_get_sync(counter->parent);
+ if (pol == COUNTER_SIGNAL_POLARITY_NEGATIVE)
+ regmap_set_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ else
+ regmap_clear_bits(ecap_dev->regmap, ECAP_ECCTL_REG, ECAP_CAPPOL_BIT(idx));
+ pm_runtime_put_sync(counter->parent);
+
+ return 0;
+}
+
+static int ecap_cnt_cap_read(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *cap)
+{
+ *cap = ecap_cnt_count_get_val(counter, ECAP_CAP_REG(idx));
+
+ return 0;
+}
+
+static int ecap_cnt_cap_write(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 cap)
+{
+ if (cap > U32_MAX)
+ return -ERANGE;
+
+ ecap_cnt_count_set_val(counter, ECAP_CAP_REG(idx), cap);
+
+ return 0;
+}
+
+static int ecap_cnt_nb_ovf_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *val = atomic_read(&ecap_dev->nb_ovf);
+
+ return 0;
+}
+
+static int ecap_cnt_nb_ovf_write(struct counter_device *counter,
+ struct counter_count *count, u64 val)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ atomic_set(&ecap_dev->nb_ovf, val);
+
+ return 0;
+}
+
+static int ecap_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, u64 *val)
+{
+ *val = U32_MAX;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count, u8 *enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ *enable = ecap_dev->enabled;
+
+ return 0;
+}
+
+static int ecap_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count, u8 enable)
+{
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter);
+
+ mutex_lock(&ecap_dev->lock);
+
+ if (enable == ecap_dev->enabled)
+ goto out;
+
+ if (enable)
+ ecap_cnt_capture_enable(counter);
+ else
+ ecap_cnt_capture_disable(counter);
+ ecap_dev->enabled = enable;
+
+out:
+ mutex_unlock(&ecap_dev->lock);
+
+ return 0;
+}
+
+static const struct counter_ops ecap_cnt_ops = {
+ .count_read = ecap_cnt_count_read,
+ .count_write = ecap_cnt_count_write,
+ .function_read = ecap_cnt_function_read,
+ .action_read = ecap_cnt_action_read,
+ .watch_validate = ecap_cnt_watch_validate,
+};
+
+static const enum counter_function ecap_cnt_functions[] = {
+ COUNTER_FUNCTION_INCREASE,
+};
+
+static const enum counter_synapse_action ecap_cnt_clock_actions[] = {
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+};
+
+static const enum counter_synapse_action ecap_cnt_input_actions[] = {
+ COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static struct counter_comp ecap_cnt_clock_ext[] = {
+ COUNTER_COMP_SIGNAL_U64("frequency", ecap_cnt_clk_get_freq, NULL),
+};
+
+static const enum counter_signal_polarity ecap_cnt_pol_avail[] = {
+ COUNTER_SIGNAL_POLARITY_POSITIVE,
+ COUNTER_SIGNAL_POLARITY_NEGATIVE,
+};
+
+static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_avail, ECAP_NB_CEVT);
+
+static struct counter_comp ecap_cnt_signal_ext[] = {
+ COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array),
+};
+
+static struct counter_signal ecap_cnt_signals[] = {
+ {
+ .id = ECAP_CLOCK_SIG,
+ .name = "Clock Signal",
+ .ext = ecap_cnt_clock_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_clock_ext),
+ },
+ {
+ .id = ECAP_INPUT_SIG,
+ .name = "Input Signal",
+ .ext = ecap_cnt_signal_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_signal_ext),
+ },
+};
+
+static struct counter_synapse ecap_cnt_synapses[] = {
+ {
+ .actions_list = ecap_cnt_clock_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_clock_actions),
+ .signal = &ecap_cnt_signals[ECAP_CLOCK_SIG],
+ },
+ {
+ .actions_list = ecap_cnt_input_actions,
+ .num_actions = ARRAY_SIZE(ecap_cnt_input_actions),
+ .signal = &ecap_cnt_signals[ECAP_INPUT_SIG],
+ },
+};
+
+static DEFINE_COUNTER_ARRAY_CAPTURE(ecap_cnt_cap_array, ECAP_NB_CEVT);
+
+static struct counter_comp ecap_cnt_count_ext[] = {
+ COUNTER_COMP_ARRAY_CAPTURE(ecap_cnt_cap_read, ecap_cnt_cap_write, ecap_cnt_cap_array),
+ COUNTER_COMP_COUNT_U64("num_overflows", ecap_cnt_nb_ovf_read, ecap_cnt_nb_ovf_write),
+ COUNTER_COMP_CEILING(ecap_cnt_ceiling_read, NULL),
+ COUNTER_COMP_ENABLE(ecap_cnt_enable_read, ecap_cnt_enable_write),
+};
+
+static struct counter_count ecap_cnt_counts[] = {
+ {
+ .name = "Timestamp Counter",
+ .functions_list = ecap_cnt_functions,
+ .num_functions = ARRAY_SIZE(ecap_cnt_functions),
+ .synapses = ecap_cnt_synapses,
+ .num_synapses = ARRAY_SIZE(ecap_cnt_synapses),
+ .ext = ecap_cnt_count_ext,
+ .num_ext = ARRAY_SIZE(ecap_cnt_count_ext),
+ },
+};
+
+static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
+{
+ struct counter_device *counter_dev = dev_id;
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+ unsigned int clr = 0;
+ unsigned int flg;
+ int i;
+
+ regmap_read(ecap_dev->regmap, ECAP_ECINT_EN_FLG_REG, &flg);
+
+ /* Check capture events */
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++) {
+ if (flg & ECAP_EVT_FLG_BIT(i)) {
+ counter_push_event(counter_dev, COUNTER_EVENT_CAPTURE, i);
+ clr |= ECAP_EVT_CLR_BIT(i);
+ }
+ }
+
+ /* Check counter overflow */
+ if (flg & ECAP_EVT_FLG_BIT(ECAP_CNTOVF)) {
+ atomic_inc(&ecap_dev->nb_ovf);
+ for (i = 0 ; i < ECAP_NB_CEVT ; i++)
+ counter_push_event(counter_dev, COUNTER_EVENT_OVERFLOW, i);
+ clr |= ECAP_EVT_CLR_BIT(ECAP_CNTOVF);
+ }
+
+ clr |= ECAP_INT_CLR_BIT;
+ regmap_update_bits(ecap_dev->regmap, ECAP_ECINT_CLR_FRC_REG, ECAP_EVT_CLR_MASK, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void ecap_cnt_pm_disable(void *dev)
+{
+ pm_runtime_disable(dev);
+}
+
+static int ecap_cnt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ecap_cnt_dev *ecap_dev;
+ struct counter_device *counter_dev;
+ void __iomem *mmio_base;
+ unsigned long clk_rate;
+ int ret;
+
+ counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
+ if (IS_ERR(counter_dev))
+ return PTR_ERR(counter_dev);
+
+ counter_dev->name = ECAP_DRV_NAME;
+ counter_dev->parent = dev;
+ counter_dev->ops = &ecap_cnt_ops;
+ counter_dev->signals = ecap_cnt_signals;
+ counter_dev->num_signals = ARRAY_SIZE(ecap_cnt_signals);
+ counter_dev->counts = ecap_cnt_counts;
+ counter_dev->num_counts = ARRAY_SIZE(ecap_cnt_counts);
+
+ ecap_dev = counter_priv(counter_dev);
+
+ mutex_init(&ecap_dev->lock);
+
+ ecap_dev->clk = devm_clk_get_enabled(dev, "fck");
+ if (IS_ERR(ecap_dev->clk))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->clk), "failed to get clock\n");
+
+ clk_rate = clk_get_rate(ecap_dev->clk);
+ if (!clk_rate) {
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
+
+ ecap_dev->regmap = devm_regmap_init_mmio(dev, mmio_base, &ecap_cnt_regmap_config);
+ if (IS_ERR(ecap_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ecap_dev->regmap), "failed to init regmap\n");
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get irq\n");
+
+ ret = devm_request_irq(dev, ret, ecap_cnt_isr, 0, pdev->name, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq\n");
+
+ platform_set_drvdata(pdev, counter_dev);
+
+ pm_runtime_enable(dev);
+
+ /* Register a cleanup callback to care for disabling PM */
+ ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+
+ ret = devm_counter_add(dev, counter_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add counter\n");
+
+ return 0;
+}
+
+static int ecap_cnt_remove(struct platform_device *pdev)
+{
+ struct counter_device *counter_dev = platform_get_drvdata(pdev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ if (ecap_dev->enabled)
+ ecap_cnt_capture_disable(counter_dev);
+
+ return 0;
+}
+
+static int ecap_cnt_suspend(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ /* If eCAP is running, stop capture then save timestamp counter */
+ if (ecap_dev->enabled) {
+ /*
+ * Disabling capture has the following effects:
+ * - interrupts are disabled
+ * - loading of capture registers is disabled
+ * - timebase counter is stopped
+ */
+ ecap_cnt_capture_disable(counter_dev);
+ ecap_dev->pm_ctx.time_cntr = ecap_cnt_count_get_val(counter_dev, ECAP_TSCNT_REG);
+ }
+
+ ecap_dev->pm_ctx.ev_mode = ecap_cnt_capture_get_evmode(counter_dev);
+
+ clk_disable(ecap_dev->clk);
+
+ return 0;
+}
+
+static int ecap_cnt_resume(struct device *dev)
+{
+ struct counter_device *counter_dev = dev_get_drvdata(dev);
+ struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev);
+
+ clk_enable(ecap_dev->clk);
+
+ ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode);
+
+ /* If eCAP was running, restore timestamp counter then run capture */
+ if (ecap_dev->enabled) {
+ ecap_cnt_count_set_val(counter_dev, ECAP_TSCNT_REG, ecap_dev->pm_ctx.time_cntr);
+ ecap_cnt_capture_enable(counter_dev);
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(ecap_cnt_pm_ops, ecap_cnt_suspend, ecap_cnt_resume);
+
+static const struct of_device_id ecap_cnt_of_match[] = {
+ { .compatible = "ti,am62-ecap-capture" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ecap_cnt_of_match);
+
+static struct platform_driver ecap_cnt_driver = {
+ .probe = ecap_cnt_probe,
+ .remove = ecap_cnt_remove,
+ .driver = {
+ .name = "ecap-capture",
+ .of_match_table = ecap_cnt_of_match,
+ .pm = pm_sleep_ptr(&ecap_cnt_pm_ops),
+ },
+};
+module_platform_driver(ecap_cnt_driver);
+
+MODULE_DESCRIPTION("ECAP Capture driver");
+MODULE_AUTHOR("Julien Panis <jpanis@baylibre.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 0489d26eb47c..b0f24cf3e891 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -456,3 +456,4 @@ module_platform_driver(ti_eqep_driver);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("TI eQEP counter driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(COUNTER);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 954749afb5fe..82e5de1f6f8c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -153,7 +153,7 @@ config ARM_OMAP2PLUS_CPUFREQ
config ARM_QCOM_CPUFREQ_NVMEM
tristate "Qualcomm nvmem based CPUFreq"
depends on ARCH_QCOM
- depends on QCOM_QFPROM
+ depends on NVMEM_QCOM_QFPROM
depends on QCOM_SMEM
select PM_OPP
help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 55516043b656..310779b07daf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -51,6 +51,21 @@ config X86_AMD_PSTATE
If in doubt, say N.
+config X86_AMD_PSTATE_UT
+ tristate "selftest for AMD Processor P-State driver"
+ depends on X86 && ACPI_PROCESSOR
+ default n
+ help
+ This kernel module is used for testing. It's safe to say M here.
+
+ It can also be built-in without X86_AMD_PSTATE enabled.
+ Currently, only tests for amd-pstate are supported. If X86_AMD_PSTATE
+ is set disabled, it can tell the users test can only run on amd-pstate
+ driver, please set X86_AMD_PSTATE enabled.
+ In the future, comparison tests will be added. It can set amd-pstate
+ disabled and set acpi-cpufreq enabled to run test cases, then compare
+ the test results.
+
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
depends on ACPI_PROCESSOR
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 285de70af877..49b98c62c5af 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -30,6 +30,7 @@ amd_pstate-y := amd-pstate.o amd-pstate-trace.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_AMD_PSTATE) += amd_pstate.o
+obj-$(CONFIG_X86_AMD_PSTATE_UT) += amd-pstate-ut.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
new file mode 100644
index 000000000000..e4a5b4d90f83
--- /dev/null
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-1.0-or-later
+/*
+ * AMD Processor P-state Frequency Driver Unit Test
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Author: Meng Li <li.meng@amd.com>
+ *
+ * The AMD P-State Unit Test is a test module for testing the amd-pstate
+ * driver. 1) It can help all users to verify their processor support
+ * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function
+ * test to avoid the kernel regression during the update. 3) We can
+ * introduce more functional or performance tests to align the result
+ * together, it will benefit power and performance scale optimization.
+ *
+ * This driver implements basic framework with plans to enhance it with
+ * additional test cases to improve the depth and coverage of the test.
+ *
+ * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for
+ * amd-pstate to get more detail.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/amd-pstate.h>
+
+#include <acpi/cppc_acpi.h>
+
+/*
+ * Abbreviations:
+ * amd_pstate_ut: used as a shortform for AMD P-State unit test.
+ * It helps to keep variable names smaller, simpler
+ */
+enum amd_pstate_ut_result {
+ AMD_PSTATE_UT_RESULT_PASS,
+ AMD_PSTATE_UT_RESULT_FAIL,
+};
+
+struct amd_pstate_ut_struct {
+ const char *name;
+ void (*func)(u32 index);
+ enum amd_pstate_ut_result result;
+};
+
+/*
+ * Kernel module for testing the AMD P-State unit test
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index);
+static void amd_pstate_ut_check_enabled(u32 index);
+static void amd_pstate_ut_check_perf(u32 index);
+static void amd_pstate_ut_check_freq(u32 index);
+
+static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
+ {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
+ {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled },
+ {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf },
+ {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq }
+};
+
+static bool get_shared_mem(void)
+{
+ bool result = false;
+ char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
+ char buf[5] = {0};
+ struct file *filp = NULL;
+ loff_t pos = 0;
+ ssize_t ret;
+
+ if (!boot_cpu_has(X86_FEATURE_CPPC)) {
+ filp = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(filp))
+ pr_err("%s unable to open %s file!\n", __func__, path);
+ else {
+ ret = kernel_read(filp, &buf, sizeof(buf), &pos);
+ if (ret < 0)
+ pr_err("%s read %s file fail ret=%ld!\n",
+ __func__, path, (long)ret);
+ filp_close(filp, NULL);
+ }
+
+ if ('Y' == *buf)
+ result = true;
+ }
+
+ return result;
+}
+
+/*
+ * check the _CPC object is present in SBIOS.
+ */
+static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+{
+ if (acpi_cpc_valid())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ }
+}
+
+static void amd_pstate_ut_pstate_enable(u32 index)
+{
+ int ret = 0;
+ u64 cppc_enable = 0;
+
+ ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
+ return;
+ }
+ if (cppc_enable)
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s amd pstate must be enabled!\n", __func__);
+ }
+}
+
+/*
+ * check if amd pstate is enabled
+ */
+static void amd_pstate_ut_check_enabled(u32 index)
+{
+ if (get_shared_mem())
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else
+ amd_pstate_ut_pstate_enable(index);
+}
+
+/*
+ * check if performance values are reasonable.
+ * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
+ */
+static void amd_pstate_ut_check_perf(u32 index)
+{
+ int cpu = 0, ret = 0;
+ u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
+ u64 cap1 = 0;
+ struct cppc_perf_caps cppc_perf;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ highest_perf = amd_get_highest_perf();
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (get_shared_mem()) {
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
+ return;
+ }
+
+ nominal_perf = cppc_perf.nominal_perf;
+ lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ lowest_perf = cppc_perf.lowest_perf;
+ } else {
+ ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
+ if (ret) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
+ return;
+ }
+
+ nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
+ lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
+ lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ }
+
+ if ((highest_perf != READ_ONCE(cpudata->highest_perf)) ||
+ (nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
+ (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
+ (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
+ __func__, cpu, highest_perf, cpudata->highest_perf,
+ nominal_perf, cpudata->nominal_perf,
+ lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
+ lowest_perf, cpudata->lowest_perf);
+ return;
+ }
+
+ if (!((highest_perf >= nominal_perf) &&
+ (nominal_perf > lowest_nonlinear_perf) &&
+ (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_perf > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, highest_perf, nominal_perf,
+ lowest_nonlinear_perf, lowest_perf);
+ return;
+ }
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+}
+
+/*
+ * Check if frequency values are reasonable.
+ * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
+ * check max freq when set support boost mode.
+ */
+static void amd_pstate_ut_check_freq(u32 index)
+{
+ int cpu = 0;
+ struct cpufreq_policy *policy = NULL;
+ struct amd_cpudata *cpudata = NULL;
+
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ break;
+ cpudata = policy->driver_data;
+
+ if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
+ (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
+ (cpudata->min_freq > 0))) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
+ __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, cpudata->min_freq);
+ return;
+ }
+
+ if (cpudata->min_freq != policy->min) {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
+ __func__, cpu, cpudata->min_freq, policy->min);
+ return;
+ }
+
+ if (cpudata->boost_supported) {
+ if ((policy->max == cpudata->max_freq) ||
+ (policy->max == cpudata->nominal_freq))
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
+ __func__, cpu, policy->max, cpudata->max_freq,
+ cpudata->nominal_freq);
+ return;
+ }
+ } else {
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ pr_err("%s cpu%d must support boost!\n", __func__, cpu);
+ return;
+ }
+ }
+
+ amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+}
+
+static int __init amd_pstate_ut_init(void)
+{
+ u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
+
+ for (i = 0; i < arr_size; i++) {
+ amd_pstate_ut_cases[i].func(i);
+ switch (amd_pstate_ut_cases[i].result) {
+ case AMD_PSTATE_UT_RESULT_PASS:
+ pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ case AMD_PSTATE_UT_RESULT_FAIL:
+ default:
+ pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void __exit amd_pstate_ut_exit(void)
+{
+}
+
+module_init(amd_pstate_ut_init);
+module_exit(amd_pstate_ut_exit);
+
+MODULE_AUTHOR("Meng Li <li.meng@amd.com>");
+MODULE_DESCRIPTION("AMD P-state driver Test module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9ac75c1cde9c..ace7d50cf2ac 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
+#include <linux/amd-pstate.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
@@ -46,8 +47,8 @@
#include <asm/cpu_device_id.h>
#include "amd-pstate-trace.h"
-#define AMD_PSTATE_TRANSITION_LATENCY 0x20000
-#define AMD_PSTATE_TRANSITION_DELAY 500
+#define AMD_PSTATE_TRANSITION_LATENCY 20000
+#define AMD_PSTATE_TRANSITION_DELAY 1000
/*
* TODO: We need more time to fine tune processors with shared memory solution
@@ -65,65 +66,6 @@ MODULE_PARM_DESC(shared_mem,
static struct cpufreq_driver amd_pstate_driver;
-/**
- * struct amd_aperf_mperf
- * @aperf: actual performance frequency clock count
- * @mperf: maximum performance frequency clock count
- * @tsc: time stamp counter
- */
-struct amd_aperf_mperf {
- u64 aperf;
- u64 mperf;
- u64 tsc;
-};
-
-/**
- * struct amd_cpudata - private CPU data for AMD P-State
- * @cpu: CPU number
- * @req: constraint request to apply
- * @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
- * @max_freq: the frequency that mapped to highest_perf
- * @min_freq: the frequency that mapped to lowest_perf
- * @nominal_freq: the frequency that mapped to nominal_perf
- * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
- * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
- * @prev: Last Aperf/Mperf/tsc count value read from register
- * @freq: current cpu frequency value
- * @boost_supported: check whether the Processor or SBIOS supports boost mode
- *
- * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
- * represents all the attributes and goals that AMD P-State requests at runtime.
- */
-struct amd_cpudata {
- int cpu;
-
- struct freq_qos_request req[2];
- u64 cppc_req_cached;
-
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
-
- u32 max_freq;
- u32 min_freq;
- u32 nominal_freq;
- u32 lowest_nonlinear_freq;
-
- struct amd_aperf_mperf cur;
- struct amd_aperf_mperf prev;
-
- u64 freq;
- bool boost_supported;
-};
-
static inline int pstate_enable(bool enable)
{
return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
@@ -152,6 +94,7 @@ static inline int amd_pstate_enable(bool enable)
static int pstate_init_perf(struct amd_cpudata *cpudata)
{
u64 cap1;
+ u32 highest_perf;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
&cap1);
@@ -163,7 +106,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
*
* CPPC entry doesn't indicate the highest performance in some ASICs.
*/
- WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
@@ -175,12 +122,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
+ highest_perf = amd_get_highest_perf();
+ if (highest_perf > cppc_perf.highest_perf)
+ highest_perf = cppc_perf.highest_perf;
+
+ WRITE_ONCE(cpudata->highest_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
@@ -269,6 +221,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
+ des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
value &= ~AMD_CPPC_MIN_PERF(~0L);
value |= AMD_CPPC_MIN_PERF(min_perf);
@@ -312,7 +265,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
return -ENODEV;
cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
freqs.old = policy->cur;
@@ -357,8 +310,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
}
@@ -555,9 +506,7 @@ free_cpudata1:
static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
freq_qos_remove_request(&cpudata->req[1]);
freq_qos_remove_request(&cpudata->req[0]);
@@ -599,9 +548,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
int max_freq;
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
max_freq = amd_get_max_freq(cpudata);
if (max_freq < 0)
@@ -614,9 +561,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
char *buf)
{
int freq;
- struct amd_cpudata *cpudata;
-
- cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata = policy->driver_data;
freq = amd_get_lowest_nonlinear_freq(cpudata);
if (freq < 0)
@@ -662,7 +607,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.name = "amd-pstate",
- .attr = amd_pstate_attr,
+ .attr = amd_pstate_attr,
};
static int __init amd_pstate_init(void)
@@ -673,7 +618,7 @@ static int __init amd_pstate_init(void)
return -ENODEV;
if (!acpi_cpc_valid()) {
- pr_debug("the _CPC object is not present in SBIOS\n");
+ pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index f7c23fa468f0..39221a9a187a 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.name = BMIPS_CPUFREQ_PREFIX,
};
-static int __init bmips_cpufreq_probe(void)
+static int __init bmips_cpufreq_driver_init(void)
{
struct cpufreq_compat *cc;
struct device_node *np;
@@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void)
return cpufreq_register_driver(&bmips_cpufreq_driver);
}
-device_initcall(bmips_cpufreq_probe);
+module_init(bmips_cpufreq_driver_init);
+
+static void __exit bmips_cpufreq_driver_exit(void)
+{
+ cpufreq_unregister_driver(&bmips_cpufreq_driver);
+}
+module_exit(bmips_cpufreq_driver_exit);
MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 24eaf0ec344d..432dfb4e8027 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -63,7 +63,15 @@ static struct cppc_workaround_oem_info wa_info[] = {
static struct cpufreq_driver cppc_cpufreq_driver;
+static enum {
+ FIE_UNSET = -1,
+ FIE_ENABLED,
+ FIE_DISABLED
+} fie_disabled = FIE_UNSET;
+
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
+module_param(fie_disabled, int, 0444);
+MODULE_PARM_DESC(fie_disabled, "Disable Frequency Invariance Engine (FIE)");
/* Frequency invariance support */
struct cppc_freq_invariance {
@@ -158,7 +166,7 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
struct cppc_freq_invariance *cppc_fi;
int cpu, ret;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
for_each_cpu(cpu, policy->cpus) {
@@ -199,7 +207,7 @@ static void cppc_cpufreq_cpu_fie_exit(struct cpufreq_policy *policy)
struct cppc_freq_invariance *cppc_fi;
int cpu;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
/* policy->cpus will be empty here, use related_cpus instead */
@@ -229,7 +237,15 @@ static void __init cppc_freq_invariance_init(void)
};
int ret;
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled != FIE_ENABLED && fie_disabled != FIE_DISABLED) {
+ fie_disabled = FIE_ENABLED;
+ if (cppc_perf_ctrs_in_pcc()) {
+ pr_info("FIE not enabled on systems with registers in PCC\n");
+ fie_disabled = FIE_DISABLED;
+ }
+ }
+
+ if (fie_disabled)
return;
kworker_fie = kthread_create_worker(0, "cppc_fie");
@@ -247,7 +263,7 @@ static void __init cppc_freq_invariance_init(void)
static void cppc_freq_invariance_exit(void)
{
- if (cppc_cpufreq_driver.get == hisi_cppc_cpufreq_get_rate)
+ if (fie_disabled)
return;
kthread_destroy_worker(kworker_fie);
@@ -936,6 +952,7 @@ static void cppc_check_hisi_workaround(void)
wa_info[i].oem_revision == tbl->oem_revision) {
/* Overwrite the get() callback */
cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
+ fie_disabled = FIE_DISABLED;
break;
}
}
@@ -947,7 +964,7 @@ static int __init cppc_cpufreq_init(void)
{
int ret;
- if ((acpi_disabled) || !acpi_cpc_valid())
+ if (!acpi_cpc_valid())
return -ENODEV;
cppc_check_hisi_workaround();
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 2c96de3f2d83..6ac3800db450 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index ac57cddc5f2f..a45864701143 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
.notifier_call = hb_cpufreq_clk_notify,
};
-static int hb_cpufreq_driver_init(void)
+static int __init hb_cpufreq_driver_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct device *cpu_dev;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 57cdb3679885..fc3ebeb0bbe5 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(SKYLAKE_X, core_funcs),
X86_MATCH(COMETLAKE, core_funcs),
X86_MATCH(ICELAKE_X, core_funcs),
+ X86_MATCH(TIGERLAKE, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d5ef3c66c762..833589bc95e4 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -13,6 +13,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/units.h>
@@ -56,6 +57,8 @@ struct qcom_cpufreq_data {
struct cpufreq_policy *policy;
bool per_core_dcvs;
+
+ struct freq_qos_request throttle_freq_req;
};
static unsigned long cpu_hw_rate, xo_rate;
@@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
if (IS_ERR(opp)) {
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
} else {
- throttled_freq = freq_hz / HZ_PER_KHZ;
-
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
-
dev_pm_opp_put(opp);
}
+ throttled_freq = freq_hz / HZ_PER_KHZ;
+
+ freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
+
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+
/*
* In the unlikely case policy is unregistered do not enable
* polling or h/w interrupt
@@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
if (data->throttle_irq < 0)
return data->throttle_irq;
+ ret = freq_qos_add_request(&policy->constraints,
+ &data->throttle_freq_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
+ return ret;
+ }
+
data->cancel_throttle = false;
data->policy = policy;
@@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
if (data->throttle_irq <= 0)
return;
+ freq_qos_remove_request(&data->throttle_freq_req);
free_irq(data->throttle_irq, data);
}
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index a67df90848c2..1a63aeea8711 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void)
return 0;
}
-static int sti_cpufreq_init(void)
+static int __init sti_cpufreq_init(void)
{
int ret;
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 1216046cf4c2..c2004cae3f02 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -38,14 +38,6 @@
/* cpufreq transisition latency */
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
-enum cluster {
- CLUSTER0,
- CLUSTER1,
- CLUSTER2,
- CLUSTER3,
- MAX_CLUSTERS,
-};
-
struct tegra_cpu_ctr {
u32 cpu;
u32 coreclk_cnt, last_coreclk_cnt;
@@ -67,12 +59,12 @@ struct tegra_cpufreq_ops {
struct tegra_cpufreq_soc {
struct tegra_cpufreq_ops *ops;
int maxcpus_per_cluster;
+ unsigned int num_clusters;
phys_addr_t actmon_cntr_base;
};
struct tegra194_cpufreq_data {
void __iomem *regs;
- size_t num_clusters;
struct cpufreq_frequency_table **tables;
const struct tegra_cpufreq_soc *soc;
};
@@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
.ops = &tegra234_cpufreq_ops,
.actmon_cntr_base = 0x9000,
.maxcpus_per_cluster = 4,
+ .num_clusters = 3,
+};
+
+static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
+ .ops = &tegra234_cpufreq_ops,
+ .actmon_cntr_base = 0x4000,
+ .maxcpus_per_cluster = 8,
+ .num_clusters = 1,
};
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
@@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
{
- int ret;
-
- ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
-
- return ret;
+ return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
}
static void tegra194_set_cpu_ndiv_sysreg(void *data)
@@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
- if (clusterid >= data->num_clusters || !data->tables[clusterid])
+ if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
return -EINVAL;
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
@@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
.ops = &tegra194_cpufreq_ops,
.maxcpus_per_cluster = 2,
+ .num_clusters = 4,
};
static void tegra194_cpufreq_free_resources(void)
@@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev);
- if (soc->ops && soc->maxcpus_per_cluster) {
+ if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
data->soc = soc;
} else {
dev_err(&pdev->dev, "soc data missing\n");
return -EINVAL;
}
- data->num_clusters = MAX_CLUSTERS;
- data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
+ data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
sizeof(*data->tables), GFP_KERNEL);
if (!data->tables)
return -ENOMEM;
@@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
goto put_bpmp;
}
- for (i = 0; i < data->num_clusters; i++) {
+ for (i = 0; i < data->soc->num_clusters; i++) {
data->tables[i] = init_freq_table(pdev, bpmp, i);
if (IS_ERR(data->tables[i])) {
err = PTR_ERR(data->tables[i]);
@@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
static const struct of_device_id tegra194_cpufreq_of_match[] = {
{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
+ { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
{ /* sentinel */ }
};
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index df85a77d476b..f64180dd2005 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -398,7 +398,7 @@ fail_put_node:
return ret;
}
-static int ti_cpufreq_init(void)
+static int __init ti_cpufreq_init(void)
{
const struct of_device_id *match;
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 74068742cef3..9acde71558d5 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -54,7 +54,7 @@
* variable is not locked. It is only written from the cpu that
* it stores (or by the on/offlining cpu if that cpu is offline),
* and only read after all the cpus are ready for the coupled idle
- * state are are no longer updating it.
+ * state are no longer updating it.
*
* Three atomic counters are used. alive_count tracks the number
* of cpus in the coupled set that are currently or soon will be
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index c32c600b3cf8..0b5461b3d7dd 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -233,8 +233,8 @@ static inline void add_powernv_state(int index, const char *name,
unsigned int exit_latency,
u64 psscr_val, u64 psscr_mask)
{
- strlcpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
- strlcpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
+ strscpy(powernv_states[index].name, name, CPUIDLE_NAME_LEN);
+ strscpy(powernv_states[index].desc, name, CPUIDLE_NAME_LEN);
powernv_states[index].flags = flags;
powernv_states[index].target_residency = target_residency;
powernv_states[index].exit_latency = exit_latency;
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index 3db4fca1172b..821984947ed9 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -124,10 +124,8 @@ static bool psci_pd_try_set_osi_mode(void)
return false;
ret = psci_set_osi_mode(true);
- if (ret) {
- pr_warn("failed to enable OSI mode: %d\n", ret);
+ if (ret)
return false;
- }
return true;
}
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 862a2876f1c9..05fe2902df9a 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -97,8 +97,13 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
+ u32 state = states[idx];
- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
+ idx, state);
}
static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 29acaf48e575..0d0f9751ff8f 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -63,12 +63,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
cpuidle_curr_governor = gov;
- if (gov) {
- list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
- cpuidle_enable_device(dev);
- cpuidle_install_idle_handler();
- printk(KERN_INFO "cpuidle: using governor %s\n", gov->name);
- }
+ list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
+ cpuidle_enable_device(dev);
+
+ cpuidle_install_idle_handler();
+ pr_info("cpuidle: using governor %s\n", gov->name);
return 0;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 3e6aa319920b..55e75fbb658e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -802,9 +802,7 @@ source "drivers/crypto/amlogic/Kconfig"
config CRYPTO_DEV_SA2UL
tristate "Support for TI security accelerator"
depends on ARCH_K3 || COMPILE_TEST
- select ARM64_CRYPTO
select CRYPTO_AES
- select CRYPTO_AES_ARM64
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_SHA1
@@ -818,5 +816,6 @@ config CRYPTO_DEV_SA2UL
acceleration for cryptographic algorithms on these devices.
source "drivers/crypto/keembay/Kconfig"
+source "drivers/crypto/aspeed/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index f81703a86b98..116de173a66c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed/
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 44b8fc4b786d..006e40133c28 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -235,7 +235,7 @@ static struct sun4i_ss_alg_template ss_algs[] = {
#endif
};
-static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
+static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v)
{
unsigned int i;
@@ -266,19 +266,7 @@ static int sun4i_ss_dbgfs_read(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int sun4i_ss_dbgfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sun4i_ss_dbgfs_read, inode->i_private);
-}
-
-static const struct file_operations sun4i_ss_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sun4i_ss_dbgfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs);
/*
* Power management strategy: The device is suspended unless a TFM exists for
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index 19cd2e52f89d..c4b0a8b58842 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -54,11 +54,9 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
goto err_dst;
}
- err = pm_runtime_get_sync(ce->dev);
- if (err < 0) {
- pm_runtime_put_noidle(ce->dev);
+ err = pm_runtime_resume_and_get(ce->dev);
+ if (err < 0)
goto err_pm;
- }
mutex_lock(&ce->rnglock);
chan = &ce->chanlist[flow];
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index e79514fce731..af017a087ebf 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -177,7 +177,7 @@ static int meson_cipher(struct skcipher_request *areq)
if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
- if (nr_sgs < 0) {
+ if (!nr_sgs) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
@@ -186,14 +186,14 @@ static int meson_cipher(struct skcipher_request *areq)
} else {
nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
- if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
+ if (!nr_sgs || nr_sgs > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
err = -EINVAL;
goto theend;
}
nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
- if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
+ if (!nr_sgd || nr_sgd > MAXDESC - 3) {
dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
err = -EINVAL;
goto theend;
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
new file mode 100644
index 000000000000..ae2710ae8d8f
--- /dev/null
+++ b/drivers/crypto/aspeed/Kconfig
@@ -0,0 +1,48 @@
+config CRYPTO_DEV_ASPEED
+ tristate "Support for Aspeed cryptographic engine driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select CRYPTO_ENGINE
+ help
+ Hash and Crypto Engine (HACE) is designed to accelerate the
+ throughput of hash data digest, encryption and decryption.
+
+ Select y here to have support for the cryptographic driver
+ available on Aspeed SoC.
+
+config CRYPTO_DEV_ASPEED_DEBUG
+ bool "Enable Aspeed crypto debug messages"
+ depends on CRYPTO_DEV_ASPEED
+ help
+ Print Aspeed crypto debugging messages if you use this
+ option to ask for those messages.
+ Avoid enabling this option for production build to
+ minimize driver timing.
+
+config CRYPTO_DEV_ASPEED_HACE_HASH
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_HMAC
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ hash driver.
+ Supports multiple message digest standards, including
+ SHA-1, SHA-224, SHA-256, SHA-384, SHA-512, and so on.
+
+config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_CFB
+ select CRYPTO_OFB
+ select CRYPTO_CTR
+ help
+ Select here to enable Aspeed Hash & Crypto Engine (HACE)
+ crypto driver.
+ Supports AES/DES symmetric-key encryption and decryption
+ with ECB/CBC/CFB/OFB/CTR options.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
new file mode 100644
index 000000000000..a0ed40ddaad1
--- /dev/null
+++ b/drivers/crypto/aspeed/Makefile
@@ -0,0 +1,7 @@
+hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
+aspeed_crypto-objs := aspeed-hace.o \
+ $(hace-hash-y) \
+ $(hace-crypto-y)
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
new file mode 100644
index 000000000000..ef73b0028b4d
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define CIPHER_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+ areq->base.complete, areq->base.data);
+ skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+
+ if (rctx->enc_cmd & HACE_CMD_ENCRYPT)
+ err = crypto_skcipher_encrypt(&rctx->fallback_req);
+ else
+ err = crypto_skcipher_decrypt(&rctx->fallback_req);
+
+ return err;
+}
+
+static bool aspeed_crypto_need_fallback(struct skcipher_request *areq)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+
+ if (areq->cryptlen == 0)
+ return true;
+
+ if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) &&
+ !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE))
+ return true;
+
+ if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) &&
+ !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE))
+ return true;
+
+ return false;
+}
+
+static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct skcipher_request *req)
+{
+ if (hace_dev->version == AST2500_VERSION &&
+ aspeed_crypto_need_fallback(req)) {
+ CIPHER_DBG(hace_dev, "SW fallback\n");
+ return aspeed_crypto_do_fallback(req);
+ }
+
+ return crypto_transfer_skcipher_request_to_engine(
+ hace_dev->crypt_engine_crypto, req);
+}
+
+static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct aspeed_engine_crypto *crypto_engine;
+ int rc;
+
+ crypto_engine = &hace_dev->crypto_engine;
+ crypto_engine->req = req;
+ crypto_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ rc = ctx->start(hace_dev);
+
+ if (rc != -EINPROGRESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(req->iv, crypto_engine->cipher_ctx +
+ DES_KEY_SIZE, DES_KEY_SIZE);
+ else
+ memcpy(req->iv, crypto_engine->cipher_ctx,
+ AES_BLOCK_SIZE);
+ }
+
+ crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req,
+ err);
+
+ return err;
+}
+
+static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct device *dev = hace_dev->dev;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE);
+ }
+
+ return aspeed_sk_complete(hace_dev, 0);
+}
+
+static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *out_sg;
+ int nbytes = 0;
+ int rc = 0;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ out_sg = req->dst;
+
+ /* Copy output buffer to dst scatter-gather lists */
+ nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ rc = -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_out_sg", rctx->dst_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ return aspeed_sk_complete(hace_dev, rc);
+}
+
+static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *in_sg;
+ int nbytes;
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ in_sg = req->src;
+
+ nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents,
+ crypto_engine->cipher_addr, req->cryptlen);
+
+ CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n",
+ "nbytes", nbytes, "req->cryptlen", req->cryptlen,
+ "nb_in_sg", rctx->src_nents,
+ "cipher addr", crypto_engine->cipher_addr);
+
+ if (!nbytes) {
+ dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n",
+ "nbytes", nbytes, "cryptlen", req->cryptlen);
+ return -EINVAL;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer;
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+}
+
+static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_sg_list *src_list, *dst_list;
+ dma_addr_t src_dma_addr, dst_dma_addr;
+ struct aspeed_cipher_reqctx *rctx;
+ struct skcipher_request *req;
+ struct scatterlist *s;
+ int src_sg_len;
+ int dst_sg_len;
+ int total, i;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+
+ rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+ HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
+ /* BIDIRECTIONAL */
+ if (req->dst == req->src) {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_BIDIRECTIONAL);
+ dst_sg_len = src_sg_len;
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ } else {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+ rctx->src_nents, DMA_TO_DEVICE);
+ if (!src_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ return -EINVAL;
+ }
+
+ dst_sg_len = dma_map_sg(hace_dev->dev, req->dst,
+ rctx->dst_nents, DMA_FROM_DEVICE);
+ if (!dst_sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() dst error\n");
+ rc = -EINVAL;
+ goto free_req_src;
+ }
+ }
+
+ src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr;
+ src_dma_addr = crypto_engine->cipher_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->src, s, src_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ if (req->dst == req->src) {
+ dst_list = src_list;
+ dst_dma_addr = src_dma_addr;
+
+ } else {
+ dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr;
+ dst_dma_addr = crypto_engine->dst_sg_dma_addr;
+ total = req->cryptlen;
+
+ for_each_sg(req->dst, s, dst_sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (total > len)
+ total -= len;
+ else {
+ /* last sg list */
+ len = total;
+ len |= BIT(31);
+ total = 0;
+ }
+
+ dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+ dst_list[i].len = cpu_to_le32(len);
+
+ }
+
+ dst_list[dst_sg_len].phy_addr = 0;
+ dst_list[dst_sg_len].len = 0;
+ }
+
+ if (total != 0) {
+ rc = -EINVAL;
+ goto free_req;
+ }
+
+ crypto_engine->resume = aspeed_sk_transfer_sg;
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ /* Trigger engines */
+ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+ return -EINPROGRESS;
+
+free_req:
+ if (req->dst == req->src) {
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_BIDIRECTIONAL);
+
+ } else {
+ dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
+ DMA_TO_DEVICE);
+ }
+
+ return rc;
+
+free_req_src:
+ dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_cipher_reqctx *rctx;
+ struct crypto_skcipher *cipher;
+ struct aspeed_cipher_ctx *ctx;
+ struct skcipher_request *req;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
+ rctx = skcipher_request_ctx(req);
+ cipher = crypto_skcipher_reqtfm(req);
+ ctx = crypto_skcipher_ctx(cipher);
+
+ /* enable interrupt */
+ rctx->enc_cmd |= HACE_CMD_ISR_EN;
+
+ rctx->dst_nents = sg_nents(req->dst);
+ rctx->src_nents = sg_nents(req->src);
+
+ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+ ASPEED_HACE_CONTEXT);
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+ memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE,
+ req->iv, DES_BLOCK_SIZE);
+ else
+ memcpy(crypto_engine->cipher_ctx, req->iv,
+ AES_BLOCK_SIZE);
+ }
+
+ if (hace_dev->version == AST2600_VERSION) {
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+ return aspeed_sk_start_sg(hace_dev);
+ }
+
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH);
+
+ return aspeed_sk_start(hace_dev);
+}
+
+static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE |
+ HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ int rc;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen);
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen);
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ rc = crypto_des_verify_key(tfm, key);
+ if (rc)
+ return rc;
+
+ } else if (keylen == DES3_EDE_KEY_SIZE) {
+ rc = crypto_des3_ede_verify_key(tfm, key);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_TRIPLE_DES);
+}
+
+static int aspeed_des_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_des_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB |
+ HACE_CMD_SINGLE_DES);
+}
+
+static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd)
+{
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK;
+
+ if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) {
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+ }
+
+ CIPHER_DBG(hace_dev, "%s\n",
+ (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt");
+
+ cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE |
+ HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE;
+
+ switch (ctx->key_len) {
+ case AES_KEYSIZE_128:
+ cmd |= HACE_CMD_AES128;
+ break;
+ case AES_KEYSIZE_192:
+ cmd |= HACE_CMD_AES192;
+ break;
+ case AES_KEYSIZE_256:
+ cmd |= HACE_CMD_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->enc_cmd = cmd;
+
+ return aspeed_hace_crypto_handle_queue(hace_dev, req);
+}
+
+static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+ struct crypto_aes_ctx gen_aes_key;
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ if (ctx->hace_dev->version == AST2500_VERSION) {
+ aes_expandkey(&gen_aes_key, key, keylen);
+ memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+
+ } else {
+ memcpy(ctx->key, key, keylen);
+ }
+
+ ctx->key_len = keylen;
+
+ crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
+}
+
+static int aspeed_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
+}
+
+static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
+}
+
+static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
+}
+
+static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC);
+}
+
+static int aspeed_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB);
+}
+
+static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_hace_alg *crypto_alg;
+
+
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ ctx->hace_dev = crypto_alg->hace_dev;
+ ctx->start = aspeed_hace_skcipher_trigger;
+
+ CIPHER_DBG(ctx->hace_dev, "%s\n", name);
+
+ ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
+
+ ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
+{
+ struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = ctx->hace_dev;
+
+ CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base));
+ crypto_free_skcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
+ {
+ .alg.skcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ecb_encrypt,
+ .decrypt = aspeed_aes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aspeed-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cbc_encrypt,
+ .decrypt = aspeed_aes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aspeed-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_cfb_encrypt,
+ .decrypt = aspeed_aes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "aspeed-cfb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ofb_encrypt,
+ .decrypt = aspeed_aes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "aspeed-ofb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ecb_encrypt,
+ .decrypt = aspeed_des_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "aspeed-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cbc_encrypt,
+ .decrypt = aspeed_des_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "aspeed-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_cfb_encrypt,
+ .decrypt = aspeed_des_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des)",
+ .cra_driver_name = "aspeed-cfb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ofb_encrypt,
+ .decrypt = aspeed_des_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des)",
+ .cra_driver_name = "aspeed-ofb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ecb_encrypt,
+ .decrypt = aspeed_tdes_ecb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "aspeed-ecb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cbc_encrypt,
+ .decrypt = aspeed_tdes_cbc_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "aspeed-cbc-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_cfb_encrypt,
+ .decrypt = aspeed_tdes_cfb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "cfb(des3_ede)",
+ .cra_driver_name = "aspeed-cfb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ofb_encrypt,
+ .decrypt = aspeed_tdes_ofb_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ofb(des3_ede)",
+ .cra_driver_name = "aspeed-ofb-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+ {
+ .alg.skcipher = {
+ .ivsize = AES_BLOCK_SIZE,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aspeed_aes_setkey,
+ .encrypt = aspeed_aes_ctr_encrypt,
+ .decrypt = aspeed_aes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aspeed-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_des_ctr_encrypt,
+ .decrypt = aspeed_des_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des)",
+ .cra_driver_name = "aspeed-ctr-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .alg.skcipher = {
+ .ivsize = DES_BLOCK_SIZE,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = aspeed_des_setkey,
+ .encrypt = aspeed_tdes_ctr_encrypt,
+ .decrypt = aspeed_tdes_ctr_decrypt,
+ .init = aspeed_crypto_cra_init,
+ .exit = aspeed_crypto_cra_exit,
+ .base = {
+ .cra_name = "ctr(des3_ede)",
+ .cra_driver_name = "aspeed-ctr-tdes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+
+};
+
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+ crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+}
+
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+ aspeed_crypto_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+ aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ if (rc) {
+ CIPHER_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
new file mode 100644
index 000000000000..935135229ebd
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define AHASH_DBG(h, fmt, ...) \
+ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define AHASH_DBG(h, fmt, ...) \
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* Initialization Vectors for SHA-family */
+static const __be32 sha1_iv[8] = {
+ cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
+ cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
+ cpu_to_be32(SHA1_H4), 0, 0, 0
+};
+
+static const __be32 sha224_iv[8] = {
+ cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
+ cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
+ cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
+ cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
+};
+
+static const __be32 sha256_iv[8] = {
+ cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
+ cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
+ cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
+ cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
+};
+
+static const __be64 sha384_iv[8] = {
+ cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+ cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+ cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+ cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
+};
+
+static const __be64 sha512_iv[8] = {
+ cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+ cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+ cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+ cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
+};
+
+static const __be32 sha512_224_iv[16] = {
+ cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
+ cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
+ cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
+ cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
+ cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
+ cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
+ cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
+ cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
+};
+
+static const __be32 sha512_256_iv[16] = {
+ cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
+ cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
+ cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
+ cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
+ cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
+ cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
+ cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
+ cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
+};
+
+/* The purpose of this padding is to ensure that the padded message is a
+ * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
+ * The bit "1" is appended at the end of the message followed by
+ * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
+ * 128 bits block (SHA384/SHA512) equals to the message length in bits
+ * is appended.
+ *
+ * For SHA1/SHA224/SHA256, padlen is calculated as followed:
+ * - if message length < 56 bytes then padlen = 56 - message length
+ * - else padlen = 64 + 56 - message length
+ *
+ * For SHA384/SHA512, padlen is calculated as followed:
+ * - if message length < 112 bytes then padlen = 112 - message length
+ * - else padlen = 128 + 112 - message length
+ */
+static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx)
+{
+ unsigned int index, padlen;
+ __be64 bits[2];
+
+ AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
+
+ switch (rctx->flags & SHA_FLAGS_MASK) {
+ case SHA_FLAGS_SHA1:
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+ index = rctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
+ rctx->bufcnt += padlen + 8;
+ break;
+ default:
+ bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
+ bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
+ rctx->digcnt[0] >> 61);
+ index = rctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+ *(rctx->buffer + rctx->bufcnt) = 0x80;
+ memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+ memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
+ rctx->bufcnt += padlen + 16;
+ break;
+ }
+}
+
+/*
+ * Prepare DMA buffer before hardware engine
+ * processing.
+ */
+static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int length, remain;
+
+ length = rctx->total + rctx->bufcnt;
+ remain = length % rctx->block_size;
+
+ AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+ if (rctx->bufcnt)
+ memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
+
+ if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+ rctx->bufcnt, rctx->src_sg,
+ rctx->offset, rctx->total - remain, 0);
+ rctx->offset += rctx->total - remain;
+
+ } else {
+ dev_warn(hace_dev->dev, "Hash data length is too large\n");
+ return -EINVAL;
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+ rctx->offset, remain, 0);
+
+ rctx->bufcnt = remain;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ return -ENOMEM;
+ }
+
+ hash_engine->src_length = length - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+}
+
+/*
+ * Prepare DMA buffer as SG list buffer before
+ * hardware engine processing.
+ */
+static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct aspeed_sg_list *src_list;
+ struct scatterlist *s;
+ int length, remain, sg_len, i;
+ int rc = 0;
+
+ remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
+ length = rctx->total + rctx->bufcnt - remain;
+
+ AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
+ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+ "length", length, "remain", remain);
+
+ sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+ if (!sg_len) {
+ dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto free_src_sg;
+ }
+
+ if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ phy_addr = rctx->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
+
+ /* Last sg list */
+ if (length == 0)
+ len |= HASH_SG_LAST_LIST;
+
+ src_list[0].phy_addr = cpu_to_le32(phy_addr);
+ src_list[0].len = cpu_to_le32(len);
+ src_list++;
+ }
+
+ if (length != 0) {
+ for_each_sg(rctx->src_sg, s, sg_len, i) {
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
+
+ if (length > len)
+ length -= len;
+ else {
+ /* Last sg list */
+ len = length;
+ len |= HASH_SG_LAST_LIST;
+ length = 0;
+ }
+
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
+ }
+ }
+
+ if (length != 0) {
+ rc = -EINVAL;
+ goto free_rctx_buffer;
+ }
+
+ rctx->offset = rctx->total - remain;
+ hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+
+free_rctx_buffer:
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+free_src_sg:
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+
+ return 0;
+}
+
+/*
+ * Copy digest to the corresponding request result.
+ * This function will be called at final() stage.
+ */
+static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ memcpy(req->result, rctx->digest, rctx->digsize);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+/*
+ * Trigger hardware engines to do the math.
+ */
+static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ aspeed_hace_fn_t resume)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
+ &hash_engine->src_dma, &hash_engine->digest_dma,
+ hash_engine->src_length);
+
+ rctx->cmd |= HASH_CMD_INT_ENABLE;
+ hash_engine->resume = resume;
+
+ ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_DIGEST_BUFF);
+ ast_hace_write(hace_dev, hash_engine->digest_dma,
+ ASPEED_HACE_HASH_KEY_BUFF);
+ ast_hace_write(hace_dev, hash_engine->src_length,
+ ASPEED_HACE_HASH_DATA_LEN);
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+
+ ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * HMAC resume aims to do the second pass produces
+ * the final HMAC code derived from the inner hash
+ * result and the outer key.
+ */
+static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2, DMA_TO_DEVICE);
+
+ /* o key pad + hash sum 1 */
+ memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+ memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
+
+ rctx->bufcnt = rctx->block_size + rctx->digsize;
+ rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+ memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+
+ rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->digest,
+ SHA512_DIGEST_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+ rctx->buffer,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+ dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+ rc = -ENOMEM;
+ goto free_rctx_digest;
+ }
+
+ hash_engine->src_dma = rctx->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+ hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ if (rctx->flags & SHA_FLAGS_HMAC)
+ return aspeed_hace_ahash_trigger(hace_dev,
+ aspeed_ahash_hmac_resume);
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+
+free_rctx_digest:
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+end:
+ return rc;
+}
+
+static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+ if (rctx->bufcnt != 0)
+ dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+ rctx->block_size * 2,
+ DMA_TO_DEVICE);
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
+ rctx->total - rctx->offset, 0);
+
+ rctx->bufcnt = rctx->total - rctx->offset;
+ rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ AHASH_DBG(hace_dev, "\n");
+
+ dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+ SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+ return aspeed_ahash_complete(hace_dev);
+}
+
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+{
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ aspeed_hace_fn_t resume;
+ int ret;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ if (hace_dev->version == AST2600_VERSION) {
+ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+ resume = aspeed_ahash_update_resume_sg;
+
+ } else {
+ resume = aspeed_ahash_update_resume;
+ }
+
+ ret = hash_engine->dma_prepare(hace_dev);
+ if (ret)
+ return ret;
+
+ return aspeed_hace_ahash_trigger(hace_dev, resume);
+}
+
+static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(
+ hace_dev->crypt_engine_hash, req);
+}
+
+static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+ int ret = 0;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->flags |= CRYPTO_FLAGS_BUSY;
+
+ if (rctx->op == SHA_OP_UPDATE)
+ ret = aspeed_ahash_req_update(hace_dev);
+ else if (rctx->op == SHA_OP_FINAL)
+ ret = aspeed_ahash_req_final(hace_dev);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ return 0;
+}
+
+static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_engine_hash *hash_engine;
+
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->req = req;
+
+ if (hace_dev->version == AST2600_VERSION)
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+ else
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+
+ return 0;
+}
+
+static int aspeed_sham_update(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->total = req->nbytes;
+ rctx->src_sg = req->src;
+ rctx->offset = 0;
+ rctx->src_nents = sg_nents(req->src);
+ rctx->op = SHA_OP_UPDATE;
+
+ rctx->digcnt[0] += rctx->total;
+ if (rctx->digcnt[0] < rctx->total)
+ rctx->digcnt[1]++;
+
+ if (rctx->bufcnt + rctx->total < rctx->block_size) {
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
+ rctx->src_sg, rctx->offset,
+ rctx->total, 0);
+ rctx->bufcnt += rctx->total;
+
+ return 0;
+ }
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+static int aspeed_sham_final(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+ req->nbytes, rctx->total);
+ rctx->op = SHA_OP_FINAL;
+
+ return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
+static int aspeed_sham_finup(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ int rc1, rc2;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+ rctx->flags |= SHA_FLAGS_FINUP;
+
+ rc1 = aspeed_sham_update(req);
+ if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
+ return rc1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ rc2 = aspeed_sham_final(req);
+
+ return rc1 ? : rc2;
+}
+
+static int aspeed_sham_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+ crypto_tfm_alg_name(&tfm->base),
+ crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA1;
+ rctx->digsize = SHA1_DIGEST_SIZE;
+ rctx->block_size = SHA1_BLOCK_SIZE;
+ rctx->sha_iv = sha1_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha1_iv, rctx->ivsize);
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA224_BLOCK_SIZE;
+ rctx->sha_iv = sha224_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA256_BLOCK_SIZE;
+ rctx->sha_iv = sha256_iv;
+ rctx->ivsize = 32;
+ memcpy(rctx->digest, sha256_iv, rctx->ivsize);
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA384;
+ rctx->digsize = SHA384_DIGEST_SIZE;
+ rctx->block_size = SHA384_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha384_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha384_iv, rctx->ivsize);
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512;
+ rctx->digsize = SHA512_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha512_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sha512s_init(struct ahash_request *req)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
+
+ rctx->cmd = HASH_CMD_ACC_MODE;
+ rctx->flags = 0;
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_224;
+ rctx->digsize = SHA224_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_224_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
+ HASH_CMD_SHA_SWAP;
+ rctx->flags |= SHA_FLAGS_SHA512_256;
+ rctx->digsize = SHA256_DIGEST_SIZE;
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = sha512_256_iv;
+ rctx->ivsize = 64;
+ memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+ crypto_ahash_digestsize(tfm));
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+ memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+ return 0;
+}
+
+static int aspeed_sham_digest(struct ahash_request *req)
+{
+ return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
+}
+
+static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+ int ds = crypto_shash_digestsize(bctx->shash);
+ int bs = crypto_shash_blocksize(bctx->shash);
+ int err = 0;
+ int i;
+
+ AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
+ keylen);
+
+ if (keylen > bs) {
+ err = aspeed_sham_shash_digest(bctx->shash,
+ crypto_shash_get_flags(bctx->shash),
+ key, keylen, bctx->ipad);
+ if (err)
+ return err;
+ keylen = ds;
+
+ } else {
+ memcpy(bctx->ipad, key, keylen);
+ }
+
+ memset(bctx->ipad + keylen, 0, bs - keylen);
+ memcpy(bctx->opad, bctx->ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ bctx->ipad[i] ^= HMAC_IPAD_VALUE;
+ bctx->opad[i] ^= HMAC_OPAD_VALUE;
+ }
+
+ return err;
+}
+
+static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+{
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_alg *ast_alg;
+
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ tctx->hace_dev = ast_alg->hace_dev;
+ tctx->flags = 0;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct aspeed_sham_reqctx));
+
+ if (ast_alg->alg_base) {
+ /* hmac related */
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ tctx->flags |= SHA_FLAGS_HMAC;
+ bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(bctx->shash)) {
+ dev_warn(ast_alg->hace_dev->dev,
+ "base driver '%s' could not be loaded.\n",
+ ast_alg->alg_base);
+ return PTR_ERR(bctx->shash);
+ }
+ }
+
+ tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
+ tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
+ tctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
+{
+ struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+ AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
+
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+
+ crypto_free_shash(bctx->shash);
+ }
+}
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ return 0;
+}
+
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "aspeed-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "aspeed-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "aspeed-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha1",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "aspeed-hmac-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha224",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "aspeed-hmac-sha224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha256",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "aspeed-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "aspeed-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "aspeed-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_224",
+ .cra_driver_name = "aspeed-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "sha512_256",
+ .cra_driver_name = "aspeed-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha384",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "aspeed-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512",
+ .alg.ahash = {
+ .init = aspeed_sham_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "aspeed-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_224",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_224)",
+ .cra_driver_name = "aspeed-hmac-sha512_224",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+ {
+ .alg_base = "sha512_256",
+ .alg.ahash = {
+ .init = aspeed_sha512s_init,
+ .update = aspeed_sham_update,
+ .final = aspeed_sham_final,
+ .finup = aspeed_sham_finup,
+ .digest = aspeed_sham_digest,
+ .setkey = aspeed_sham_setkey,
+ .export = aspeed_sham_export,
+ .import = aspeed_sham_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct aspeed_sham_reqctx),
+ .base = {
+ .cra_name = "hmac(sha512_256)",
+ .cra_driver_name = "aspeed-hmac-sha512_256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
+ sizeof(struct aspeed_sha_hmac_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = aspeed_sham_cra_init,
+ .cra_exit = aspeed_sham_cra_exit,
+ }
+ }
+ },
+ },
+};
+
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+ crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+}
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+{
+ int rc, i;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
+ aspeed_ahash_algs[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+
+ if (hace_dev->version != AST2600_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+ aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
+ rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ if (rc) {
+ AHASH_DBG(hace_dev, "Failed to register %s\n",
+ aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ }
+ }
+}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
new file mode 100644
index 000000000000..656cb92c8bb6
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "aspeed-hace.h"
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define HACE_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define HACE_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/* HACE interrupt service routine */
+static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ u32 sts;
+
+ sts = ast_hace_read(hace_dev, ASPEED_HACE_STS);
+ ast_hace_write(hace_dev, sts, ASPEED_HACE_STS);
+
+ HACE_DBG(hace_dev, "irq status: 0x%x\n", sts);
+
+ if (sts & HACE_HASH_ISR) {
+ if (hash_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&hash_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "HASH no active requests.\n");
+ }
+
+ if (sts & HACE_CRYPTO_ISR) {
+ if (crypto_engine->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&crypto_engine->done_task);
+ else
+ dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_hace_crypto_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+
+ crypto_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_hash_done_task(unsigned long data)
+{
+ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ hash_engine->resume(hace_dev);
+}
+
+static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_register_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_register_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+{
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ aspeed_unregister_hace_hash_algs(hace_dev);
+#endif
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ aspeed_unregister_hace_crypto_algs(hace_dev);
+#endif
+}
+
+static const struct of_device_id aspeed_hace_of_matches[] = {
+ { .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+ { .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
+ {},
+};
+
+static int aspeed_hace_probe(struct platform_device *pdev)
+{
+ struct aspeed_engine_crypto *crypto_engine;
+ const struct of_device_id *hace_dev_id;
+ struct aspeed_engine_hash *hash_engine;
+ struct aspeed_hace_dev *hace_dev;
+ struct resource *res;
+ int rc;
+
+ hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+ GFP_KERNEL);
+ if (!hace_dev)
+ return -ENOMEM;
+
+ hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
+ if (!hace_dev_id) {
+ dev_err(&pdev->dev, "Failed to match hace dev id\n");
+ return -EINVAL;
+ }
+
+ hace_dev->dev = &pdev->dev;
+ hace_dev->version = (unsigned long)hace_dev_id->data;
+ hash_engine = &hace_dev->hash_engine;
+ crypto_engine = &hace_dev->crypto_engine;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, hace_dev);
+
+ hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hace_dev->regs))
+ return PTR_ERR(hace_dev->regs);
+
+ /* Get irq number and register it */
+ hace_dev->irq = platform_get_irq(pdev, 0);
+ if (hace_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0,
+ dev_name(&pdev->dev), hace_dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to request interrupt\n");
+ return rc;
+ }
+
+ /* Get clk and enable it */
+ hace_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hace_dev->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(hace_dev->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc);
+ return rc;
+ }
+
+ /* Initialize crypto hardware engine structure for hash */
+ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_hash) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+ if (rc)
+ goto err_engine_hash_start;
+
+ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+ (unsigned long)hace_dev);
+
+ /* Initialize crypto hardware engine structure for crypto */
+ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+ true);
+ if (!hace_dev->crypt_engine_crypto) {
+ rc = -ENOMEM;
+ goto err_engine_hash_start;
+ }
+
+ rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+ if (rc)
+ goto err_engine_crypto_start;
+
+ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+ (unsigned long)hace_dev);
+
+ /* Allocate DMA buffer for hash engine input used */
+ hash_engine->ahash_src_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_HASH_SRC_DMA_BUF_LEN,
+ &hash_engine->ahash_src_dma_addr,
+ GFP_KERNEL);
+ if (!hash_engine->ahash_src_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine context used */
+ crypto_engine->cipher_ctx =
+ dmam_alloc_coherent(&pdev->dev,
+ PAGE_SIZE,
+ &crypto_engine->cipher_ctx_dma,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_ctx) {
+ dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine input used */
+ crypto_engine->cipher_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+ &crypto_engine->cipher_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->cipher_addr) {
+ dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+
+ /* Allocate DMA buffer for crypto engine output used */
+ if (hace_dev->version == AST2600_VERSION) {
+ crypto_engine->dst_sg_addr =
+ dmam_alloc_coherent(&pdev->dev,
+ ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+ &crypto_engine->dst_sg_dma_addr,
+ GFP_KERNEL);
+ if (!crypto_engine->dst_sg_addr) {
+ dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+ rc = -ENOMEM;
+ goto err_engine_crypto_start;
+ }
+ }
+
+ aspeed_hace_register(hace_dev);
+
+ dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_crypto_start:
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+err_engine_hash_start:
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+clk_exit:
+ clk_disable_unprepare(hace_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_hace_remove(struct platform_device *pdev)
+{
+ struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
+ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ aspeed_hace_unregister(hace_dev);
+
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+
+ tasklet_kill(&hash_engine->done_task);
+ tasklet_kill(&crypto_engine->done_task);
+
+ clk_disable_unprepare(hace_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
+
+static struct platform_driver aspeed_hace_driver = {
+ .probe = aspeed_hace_probe,
+ .remove = aspeed_hace_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_hace_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_hace_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
new file mode 100644
index 000000000000..f2cde23b56ae
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __ASPEED_HACE_H__
+#define __ASPEED_HACE_H__
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fips.h>
+#include <linux/dma-mapping.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+
+/*****************************
+ * *
+ * HACE register definitions *
+ * *
+ * ***************************/
+#define ASPEED_HACE_SRC 0x00 /* Crypto Data Source Base Address Register */
+#define ASPEED_HACE_DEST 0x04 /* Crypto Data Destination Base Address Register */
+#define ASPEED_HACE_CONTEXT 0x08 /* Crypto Context Buffer Base Address Register */
+#define ASPEED_HACE_DATA_LEN 0x0C /* Crypto Data Length Register */
+#define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
+
+/* G5 */
+#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
+/* G6 */
+#define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
+#define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
+
+#define ASPEED_HACE_STS 0x1C /* HACE Status Register */
+
+#define ASPEED_HACE_HASH_SRC 0x20 /* Hash Data Source Base Address Register */
+#define ASPEED_HACE_HASH_DIGEST_BUFF 0x24 /* Hash Digest Write Buffer Base Address Register */
+#define ASPEED_HACE_HASH_KEY_BUFF 0x28 /* Hash HMAC Key Buffer Base Address Register */
+#define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
+#define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
+
+/* crypto cmd */
+#define HACE_CMD_SINGLE_DES 0
+#define HACE_CMD_TRIPLE_DES BIT(17)
+#define HACE_CMD_AES_SELECT 0
+#define HACE_CMD_DES_SELECT BIT(16)
+#define HACE_CMD_ISR_EN BIT(12)
+#define HACE_CMD_CONTEXT_SAVE_ENABLE (0)
+#define HACE_CMD_CONTEXT_SAVE_DISABLE BIT(9)
+#define HACE_CMD_AES (0)
+#define HACE_CMD_DES (0)
+#define HACE_CMD_RC4 BIT(8)
+#define HACE_CMD_DECRYPT (0)
+#define HACE_CMD_ENCRYPT BIT(7)
+
+#define HACE_CMD_ECB (0x0 << 4)
+#define HACE_CMD_CBC (0x1 << 4)
+#define HACE_CMD_CFB (0x2 << 4)
+#define HACE_CMD_OFB (0x3 << 4)
+#define HACE_CMD_CTR (0x4 << 4)
+#define HACE_CMD_OP_MODE_MASK (0x7 << 4)
+
+#define HACE_CMD_AES128 (0x0 << 2)
+#define HACE_CMD_AES192 (0x1 << 2)
+#define HACE_CMD_AES256 (0x2 << 2)
+#define HACE_CMD_OP_CASCADE (0x3)
+#define HACE_CMD_OP_INDEPENDENT (0x1)
+
+/* G5 */
+#define HACE_CMD_RI_WO_DATA_ENABLE (0)
+#define HACE_CMD_RI_WO_DATA_DISABLE BIT(11)
+#define HACE_CMD_CONTEXT_LOAD_ENABLE (0)
+#define HACE_CMD_CONTEXT_LOAD_DISABLE BIT(10)
+/* G6 */
+#define HACE_CMD_AES_KEY_FROM_OTP BIT(24)
+#define HACE_CMD_GHASH_TAG_XOR_EN BIT(23)
+#define HACE_CMD_GHASH_PAD_LEN_INV BIT(22)
+#define HACE_CMD_GCM_TAG_ADDR_SEL BIT(21)
+#define HACE_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HACE_CMD_DES_SG_CTRL BIT(19)
+#define HACE_CMD_SRC_SG_CTRL BIT(18)
+#define HACE_CMD_CTR_IV_AES_96 (0x1 << 14)
+#define HACE_CMD_CTR_IV_DES_32 (0x1 << 14)
+#define HACE_CMD_CTR_IV_AES_64 (0x2 << 14)
+#define HACE_CMD_CTR_IV_AES_32 (0x3 << 14)
+#define HACE_CMD_AES_KEY_HW_EXP BIT(13)
+#define HACE_CMD_GCM (0x5 << 4)
+
+/* interrupt status reg */
+#define HACE_CRYPTO_ISR BIT(12)
+#define HACE_HASH_ISR BIT(9)
+#define HACE_HASH_BUSY BIT(0)
+
+/* hash cmd reg */
+#define HASH_CMD_MBUS_REQ_SYNC_EN BIT(20)
+#define HASH_CMD_HASH_SRC_SG_CTRL BIT(18)
+#define HASH_CMD_SHA512_224 (0x3 << 10)
+#define HASH_CMD_SHA512_256 (0x2 << 10)
+#define HASH_CMD_SHA384 (0x1 << 10)
+#define HASH_CMD_SHA512 (0)
+#define HASH_CMD_INT_ENABLE BIT(9)
+#define HASH_CMD_HMAC (0x1 << 7)
+#define HASH_CMD_ACC_MODE (0x2 << 7)
+#define HASH_CMD_HMAC_KEY (0x3 << 7)
+#define HASH_CMD_SHA1 (0x2 << 4)
+#define HASH_CMD_SHA224 (0x4 << 4)
+#define HASH_CMD_SHA256 (0x5 << 4)
+#define HASH_CMD_SHA512_SER (0x6 << 4)
+#define HASH_CMD_SHA_SWAP (0x2 << 2)
+
+#define HASH_SG_LAST_LIST BIT(31)
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+
+#define SHA_OP_UPDATE 1
+#define SHA_OP_FINAL 2
+
+#define SHA_FLAGS_SHA1 BIT(0)
+#define SHA_FLAGS_SHA224 BIT(1)
+#define SHA_FLAGS_SHA256 BIT(2)
+#define SHA_FLAGS_SHA384 BIT(3)
+#define SHA_FLAGS_SHA512 BIT(4)
+#define SHA_FLAGS_SHA512_224 BIT(5)
+#define SHA_FLAGS_SHA512_256 BIT(6)
+#define SHA_FLAGS_HMAC BIT(8)
+#define SHA_FLAGS_FINUP BIT(9)
+#define SHA_FLAGS_MASK (0xff)
+
+#define ASPEED_CRYPTO_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_DST_DMA_BUF_LEN 0xa000
+#define ASPEED_CRYPTO_GCM_TAG_OFFSET 0x9ff0
+#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
+#define ASPEED_HASH_QUEUE_LENGTH 50
+
+#define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
+ HACE_CMD_OFB | HACE_CMD_CTR)
+
+struct aspeed_hace_dev;
+
+typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
+
+struct aspeed_sg_list {
+ __le32 len;
+ __le32 phy_addr;
+};
+
+struct aspeed_engine_hash {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct ahash_request *req;
+
+ /* input buffer */
+ void *ahash_src_addr;
+ dma_addr_t ahash_src_dma_addr;
+
+ dma_addr_t src_dma;
+ dma_addr_t digest_dma;
+
+ size_t src_length;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+ aspeed_hace_fn_t dma_prepare;
+};
+
+struct aspeed_sha_hmac_ctx {
+ struct crypto_shash *shash;
+ u8 ipad[SHA512_BLOCK_SIZE];
+ u8 opad[SHA512_BLOCK_SIZE];
+};
+
+struct aspeed_sham_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ unsigned long flags; /* hmac flag */
+
+ struct aspeed_sha_hmac_ctx base[0];
+};
+
+struct aspeed_sham_reqctx {
+ unsigned long flags; /* final update flag should no use*/
+ unsigned long op; /* final or update */
+ u32 cmd; /* trigger cmd */
+
+ /* walk state */
+ struct scatterlist *src_sg;
+ int src_nents;
+ unsigned int offset; /* offset in current sg */
+ unsigned int total; /* per update length */
+
+ size_t digsize;
+ size_t block_size;
+ size_t ivsize;
+ const __be32 *sha_iv;
+
+ /* remain data buffer */
+ u8 buffer[SHA512_BLOCK_SIZE * 2];
+ dma_addr_t buffer_dma_addr;
+ size_t bufcnt; /* buffer counter */
+
+ /* output buffer */
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+ dma_addr_t digest_dma_addr;
+ u64 digcnt[2];
+};
+
+struct aspeed_engine_crypto {
+ struct tasklet_struct done_task;
+ unsigned long flags;
+ struct skcipher_request *req;
+
+ /* context buffer */
+ void *cipher_ctx;
+ dma_addr_t cipher_ctx_dma;
+
+ /* input buffer, could be single/scatter-gather lists */
+ void *cipher_addr;
+ dma_addr_t cipher_dma_addr;
+
+ /* output buffer, only used in scatter-gather lists */
+ void *dst_sg_addr;
+ dma_addr_t dst_sg_dma_addr;
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
+};
+
+struct aspeed_cipher_ctx {
+ struct crypto_engine_ctx enginectx;
+
+ struct aspeed_hace_dev *hace_dev;
+ int key_len;
+ u8 key[AES_MAX_KEYLENGTH];
+
+ /* callback func */
+ aspeed_hace_fn_t start;
+
+ struct crypto_skcipher *fallback_tfm;
+};
+
+struct aspeed_cipher_reqctx {
+ int enc_cmd;
+ int src_nents;
+ int dst_nents;
+
+ struct skcipher_request fallback_req; /* keep at the end */
+};
+
+struct aspeed_hace_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ unsigned long version;
+
+ struct crypto_engine *crypt_engine_hash;
+ struct crypto_engine *crypt_engine_crypto;
+
+ struct aspeed_engine_hash hash_engine;
+ struct aspeed_engine_crypto crypto_engine;
+};
+
+struct aspeed_hace_alg {
+ struct aspeed_hace_dev *hace_dev;
+
+ const char *alg_base;
+
+ union {
+ struct skcipher_alg skcipher;
+ struct ahash_alg ahash;
+ } alg;
+};
+
+enum aspeed_version {
+ AST2500_VERSION = 5,
+ AST2600_VERSION
+};
+
+#define ast_hace_write(hace, val, offset) \
+ writel((val), (hace)->regs + (offset))
+#define ast_hace_read(hace, offset) \
+ readl((hace)->regs + (offset))
+
+void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+
+#endif
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index a4b13d326cfc..82bf15d49561 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -343,7 +343,7 @@ static int atmel_ecc_probe(struct i2c_client *client,
return ret;
}
-static int atmel_ecc_remove(struct i2c_client *client)
+static void atmel_ecc_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
@@ -358,7 +358,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
* accessing the freed memory.
*/
dev_emerg(&client->dev, "Device is busy, expect memory corruption.\n");
- return 0;
+ return;
}
crypto_unregister_kpp(&atmel_ecdh_nist_p256);
@@ -366,8 +366,6 @@ static int atmel_ecc_remove(struct i2c_client *client)
spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index e4087bdd2475..a84b657598c6 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -116,18 +116,16 @@ static int atmel_sha204a_probe(struct i2c_client *client,
return ret;
}
-static int atmel_sha204a_remove(struct i2c_client *client)
+static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
if (atomic_read(&i2c_priv->tfm_count)) {
dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
- return 0;
+ return;
}
kfree((void *)i2c_priv->hwrng.priv);
-
- return 0;
}
static const struct of_device_id atmel_sha204a_dt_ids[] = {
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 9ad188cffd0d..51c66afbe677 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1712,7 +1712,7 @@ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
cipher_len = regk_crypto_key_256;
break;
default:
- pr_err("%s: Invalid key length %d!\n",
+ pr_err("%s: Invalid key length %zu!\n",
MODULE_NAME, ctx->key_length);
return -EINVAL;
}
@@ -2091,7 +2091,7 @@ static void artpec6_crypto_task(unsigned long data)
return;
}
- spin_lock_bh(&ac->queue_lock);
+ spin_lock(&ac->queue_lock);
list_for_each_entry_safe(req, n, &ac->pending, list) {
struct artpec6_crypto_dma_descriptors *dma = req->dma;
@@ -2128,7 +2128,7 @@ static void artpec6_crypto_task(unsigned long data)
artpec6_crypto_process_queue(ac, &complete_in_progress);
- spin_unlock_bh(&ac->queue_lock);
+ spin_unlock(&ac->queue_lock);
/* Perform the completion callbacks without holding the queue lock
* to allow new request submissions from the callbacks.
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 053315e260c2..c8c799428fe0 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1928,7 +1928,7 @@ static int ahash_enqueue(struct ahash_request *req)
/* SPU2 hardware does not compute hash of zero length data */
if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
(iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
flow_log("Doing %sfinal %s zero-len hash request in software\n",
rctx->is_final ? "" : "non-", alg_name);
err = do_shash((unsigned char *)alg_name, req->result,
@@ -2029,7 +2029,7 @@ static int ahash_init(struct ahash_request *req)
* supported by the hardware, we need to handle it in software
* by calling synchronous hash functions.
*/
- alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+ alg_name = crypto_ahash_alg_name(tfm);
hash = crypto_alloc_shash(alg_name, 0, 0);
if (IS_ERR(hash)) {
ret = PTR_ERR(hash);
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 71281a3bdbdc..d6d87332140a 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -231,7 +231,7 @@ struct iproc_ctx_s {
/*
* shash descriptor - needed to perform incremental hashing in
- * in software, when hw doesn't support it.
+ * software, when hw doesn't support it.
*/
struct shash_desc *shash;
diff --git a/drivers/crypto/cavium/cpt/cpt_hw_types.h b/drivers/crypto/cavium/cpt/cpt_hw_types.h
index 8ec6edc69f3f..ae4791a8ec4a 100644
--- a/drivers/crypto/cavium/cpt/cpt_hw_types.h
+++ b/drivers/crypto/cavium/cpt/cpt_hw_types.h
@@ -396,7 +396,7 @@ union cptx_vqx_misc_ena_w1s {
* Word0
* reserved_20_63:44 [63:20] Reserved.
* dbell_cnt:20 [19:0](R/W/H) Number of instruction queue 64-bit words to add
- * to the CPT instruction doorbell count. Readback value is the the
+ * to the CPT instruction doorbell count. Readback value is the
* current number of pending doorbell requests. If counter overflows
* CPT()_VQ()_MISC_INT[DBELL_DOVF] is set. To reset the count back to
* zero, write one to clear CPT()_VQ()_MISC_INT_ENA_W1C[DBELL_DOVF],
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 8c32d0eb8fcf..6872ac344001 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -253,6 +253,7 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
const struct firmware *fw_entry;
struct device *dev = &cpt->pdev->dev;
struct ucode_header *ucode;
+ unsigned int code_length;
struct microcode *mcode;
int j, ret = 0;
@@ -263,11 +264,12 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ucode = (struct ucode_header *)fw_entry->data;
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
- mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size) {
+ code_length = ntohl(ucode->code_length);
+ if (code_length == 0 || code_length >= INT_MAX / 2) {
ret = -EINVAL;
goto fw_release;
}
+ mcode->code_size = code_length * 2;
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 7df71fcebe8f..1046a746d36f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -198,22 +198,16 @@ static int zip_decompress(const u8 *src, unsigned int slen,
/* Legacy Compress framework start */
int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 0);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 0);
}
int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_ctx_init(zip_ctx, 1);
-
- return ret;
+ return zip_ctx_init(zip_ctx, 1);
}
void zip_free_comp_ctx(struct crypto_tfm *tfm)
@@ -227,24 +221,18 @@ int zip_comp_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_comp_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm);
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* Legacy compress framework end */
/* SCOMP framework start */
@@ -298,22 +286,16 @@ int zip_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_compress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_compress(src, slen, dst, dlen, zip_ctx);
}
int zip_scomp_decompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
- int ret;
struct zip_kernel_ctx *zip_ctx = ctx;
- ret = zip_decompress(src, slen, dst, dlen, zip_ctx);
-
- return ret;
+ return zip_decompress(src, slen, dst, dlen, zip_ctx);
} /* SCOMP framework end */
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index ec97daf0fcb7..278636ed251a 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -64,7 +64,6 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
struct scatterlist *iv_sg = NULL;
unsigned int iv_len = 0;
- int ret;
if (!ctx->u.des3.key_len)
return -EINVAL;
@@ -100,9 +99,7 @@ static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
rctx->cmd.u.des3.src_len = req->cryptlen;
rctx->cmd.u.des3.dst = req->dst;
- ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
-
- return ret;
+ return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
}
static int ccp_des3_encrypt(struct skcipher_request *req)
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 7d4b4ad1db1f..9f753cb4f5f1 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -641,6 +641,10 @@ static void ccp_dma_release(struct ccp_device *ccp)
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
@@ -766,8 +770,8 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- dma_async_device_unregister(dma_dev);
ccp_dma_release(ccp);
+ dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 9f588c9728f8..06fc7156c04f 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -211,18 +211,24 @@ static int sev_read_init_ex_file(void)
if (IS_ERR(fp)) {
int ret = PTR_ERR(fp);
- dev_err(sev->dev,
- "SEV: could not open %s for read, error %d\n",
- init_ex_path, ret);
+ if (ret == -ENOENT) {
+ dev_info(sev->dev,
+ "SEV: %s does not exist and will be created later.\n",
+ init_ex_path);
+ ret = 0;
+ } else {
+ dev_err(sev->dev,
+ "SEV: could not open %s for read, error %d\n",
+ init_ex_path, ret);
+ }
return ret;
}
nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL);
if (nread != NV_LENGTH) {
- dev_err(sev->dev,
- "SEV: failed to read %u bytes to non volatile memory area, ret %ld\n",
+ dev_info(sev->dev,
+ "SEV: could not read %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nread);
- return -EIO;
}
dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread);
@@ -231,7 +237,7 @@ static int sev_read_init_ex_file(void)
return 0;
}
-static void sev_write_init_ex_file(void)
+static int sev_write_init_ex_file(void)
{
struct sev_device *sev = psp_master->sev_data;
struct file *fp;
@@ -241,14 +247,16 @@ static void sev_write_init_ex_file(void)
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600);
if (IS_ERR(fp)) {
+ int ret = PTR_ERR(fp);
+
dev_err(sev->dev,
- "SEV: could not open file for write, error %ld\n",
- PTR_ERR(fp));
- return;
+ "SEV: could not open file for write, error %d\n",
+ ret);
+ return ret;
}
nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset);
@@ -259,18 +267,20 @@ static void sev_write_init_ex_file(void)
dev_err(sev->dev,
"SEV: failed to write %u bytes to non volatile memory area, ret %ld\n",
NV_LENGTH, nwrite);
- return;
+ return -EIO;
}
dev_dbg(sev->dev, "SEV: write successful to NV file\n");
+
+ return 0;
}
-static void sev_write_init_ex_file_if_required(int cmd_id)
+static int sev_write_init_ex_file_if_required(int cmd_id)
{
lockdep_assert_held(&sev_cmd_mutex);
if (!sev_init_ex_buffer)
- return;
+ return 0;
/*
* Only a few platform commands modify the SPI/NV area, but none of the
@@ -285,10 +295,10 @@ static void sev_write_init_ex_file_if_required(int cmd_id)
case SEV_CMD_PEK_GEN:
break;
default:
- return;
+ return 0;
}
- sev_write_init_ex_file();
+ return sev_write_init_ex_file();
}
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
@@ -361,7 +371,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
cmd, reg & PSP_CMDRESP_ERR_MASK);
ret = -EIO;
} else {
- sev_write_init_ex_file_if_required(cmd);
+ ret = sev_write_init_ex_file_if_required(cmd);
}
print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
@@ -410,17 +420,12 @@ static int __sev_init_locked(int *error)
static int __sev_init_ex_locked(int *error)
{
struct sev_data_init_ex data;
- int ret;
memset(&data, 0, sizeof(data));
data.length = sizeof(data);
data.nv_address = __psp_pa(sev_init_ex_buffer);
data.nv_len = NV_LENGTH;
- ret = sev_read_init_ex_file();
- if (ret)
- return ret;
-
if (sev_es_tmr) {
/*
* Do not include the encryption mask on the physical
@@ -439,7 +444,7 @@ static int __sev_platform_init_locked(int *error)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc, psp_ret = -1;
+ int rc = 0, psp_ret = -1;
int (*init_function)(int *error);
if (!psp || !psp->sev_data)
@@ -450,8 +455,15 @@ static int __sev_platform_init_locked(int *error)
if (sev->state == SEV_STATE_INIT)
return 0;
- init_function = sev_init_ex_buffer ? __sev_init_ex_locked :
- __sev_init_locked;
+ if (sev_init_ex_buffer) {
+ init_function = __sev_init_ex_locked;
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+ } else {
+ init_function = __sev_init_locked;
+ }
+
rc = init_function(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
@@ -744,6 +756,11 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
+ if (!sev_version_greater_or_equal(0, 15)) {
+ dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
+ return -1;
+ }
+
if (sev_get_firmware(dev, &firmware) == -ENOENT) {
dev_dbg(dev, "No SEV firmware file present\n");
return -1;
@@ -776,6 +793,14 @@ static int sev_update_firmware(struct device *dev)
data->len = firmware->size;
ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
+ /*
+ * A quirk for fixing the committed TCB version, when upgrading from
+ * earlier firmware version than 1.50.
+ */
+ if (!ret && !sev_version_greater_or_equal(1, 50))
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
else
@@ -1285,8 +1310,7 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
- if (sev_version_greater_or_equal(0, 15) &&
- sev_update_firmware(sev->dev) == 0)
+ if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
/* If an init_ex_path is provided rely on INIT_EX for PSP initialization
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 6140e4927322..9efd88f871d1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -274,7 +274,7 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
}
ret = dma_map_sg(dev, sg, *nents, direction);
- if (dma_mapping_error(dev, ret)) {
+ if (!ret) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 9a0558ed82f9..9f0b94c8e03d 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -22,7 +22,8 @@ enum {
HPRE_CLUSTER0,
HPRE_CLUSTER1,
HPRE_CLUSTER2,
- HPRE_CLUSTER3
+ HPRE_CLUSTER3,
+ HPRE_CLUSTERS_NUM_MAX
};
enum hpre_ctrl_dbgfs_file {
@@ -42,9 +43,6 @@ enum hpre_dfx_dbgfs_file {
HPRE_DFX_FILE_NUM
};
-#define HPRE_CLUSTERS_NUM_V2 (HPRE_CLUSTER3 + 1)
-#define HPRE_CLUSTERS_NUM_V3 1
-#define HPRE_CLUSTERS_NUM_MAX HPRE_CLUSTERS_NUM_V2
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM_MAX - 1)
struct hpre_debugfs_file {
@@ -105,5 +103,5 @@ struct hpre_sqe {
struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(struct hisi_qm *qm);
-
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 3ba6f15deafc..ef02dadd6217 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -51,6 +51,12 @@ struct hpre_ctx;
#define HPRE_ECC_HW256_KSZ_B 32
#define HPRE_ECC_HW384_KSZ_B 48
+/* capability register mask of driver */
+#define HPRE_DRV_RSA_MASK_CAP BIT(0)
+#define HPRE_DRV_DH_MASK_CAP BIT(1)
+#define HPRE_DRV_ECDH_MASK_CAP BIT(2)
+#define HPRE_DRV_X25519_MASK_CAP BIT(5)
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -147,7 +153,7 @@ static int hpre_alloc_req_id(struct hpre_ctx *ctx)
int id;
spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
+ id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_irqrestore(&ctx->req_lock, flags);
return id;
@@ -488,7 +494,7 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
+ ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
if (ret)
hpre_stop_qp_and_put(qp);
@@ -2002,55 +2008,53 @@ static struct kpp_alg dh = {
},
};
-static struct kpp_alg ecdh_nist_p192 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p256 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
-};
-
-static struct kpp_alg ecdh_nist_p384 = {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
+static struct kpp_alg ecdh_curves[] = {
+ {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p192_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p192",
+ .cra_driver_name = "hpre-ecdh-nist-p192",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p256_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "hpre-ecdh-nist-p256",
+ .cra_module = THIS_MODULE,
+ },
+ }, {
+ .set_secret = hpre_ecdh_set_secret,
+ .generate_public_key = hpre_ecdh_compute_value,
+ .compute_shared_secret = hpre_ecdh_compute_value,
+ .max_size = hpre_ecdh_max_size,
+ .init = hpre_ecdh_nist_p384_init_tfm,
+ .exit = hpre_ecdh_exit_tfm,
+ .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
+ .base = {
+ .cra_ctxsize = sizeof(struct hpre_ctx),
+ .cra_priority = HPRE_CRYPTO_ALG_PRI,
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "hpre-ecdh-nist-p384",
+ .cra_module = THIS_MODULE,
+ },
+ }
};
static struct kpp_alg curve25519_alg = {
@@ -2070,78 +2074,144 @@ static struct kpp_alg curve25519_alg = {
},
};
-
-static int hpre_register_ecdh(void)
+static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_kpp(&ecdh_nist_p192);
- if (ret)
- return ret;
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return 0;
- ret = crypto_register_kpp(&ecdh_nist_p256);
+ rsa.base.cra_flags = 0;
+ ret = crypto_register_akcipher(&rsa);
if (ret)
- goto unregister_ecdh_p192;
+ dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- ret = crypto_register_kpp(&ecdh_nist_p384);
+ return ret;
+}
+
+static void hpre_unregister_rsa(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
+ return;
+
+ crypto_unregister_akcipher(&rsa);
+}
+
+static int hpre_register_dh(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&dh);
if (ret)
- goto unregister_ecdh_p256;
+ dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_dh(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&dh);
+}
+
+static int hpre_register_ecdh(struct hisi_qm *qm)
+{
+ int ret, i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
+ ret = crypto_register_kpp(&ecdh_curves[i]);
+ if (ret) {
+ dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
+ ecdh_curves[i].base.cra_name, ret);
+ goto unreg_kpp;
+ }
+ }
return 0;
-unregister_ecdh_p256:
- crypto_unregister_kpp(&ecdh_nist_p256);
-unregister_ecdh_p192:
- crypto_unregister_kpp(&ecdh_nist_p192);
+unreg_kpp:
+ for (--i; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+
return ret;
}
-static void hpre_unregister_ecdh(void)
+static void hpre_unregister_ecdh(struct hisi_qm *qm)
{
- crypto_unregister_kpp(&ecdh_nist_p384);
- crypto_unregister_kpp(&ecdh_nist_p256);
- crypto_unregister_kpp(&ecdh_nist_p192);
+ int i;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
+ return;
+
+ for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
+ crypto_unregister_kpp(&ecdh_curves[i]);
+}
+
+static int hpre_register_x25519(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return 0;
+
+ ret = crypto_register_kpp(&curve25519_alg);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hpre_unregister_x25519(struct hisi_qm *qm)
+{
+ if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
+ return;
+
+ crypto_unregister_kpp(&curve25519_alg);
}
int hpre_algs_register(struct hisi_qm *qm)
{
int ret;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
+ ret = hpre_register_rsa(qm);
if (ret)
return ret;
- ret = crypto_register_kpp(&dh);
+ ret = hpre_register_dh(qm);
if (ret)
goto unreg_rsa;
- if (qm->ver >= QM_HW_V3) {
- ret = hpre_register_ecdh();
- if (ret)
- goto unreg_dh;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- goto unreg_ecdh;
- }
- return 0;
+ ret = hpre_register_ecdh(qm);
+ if (ret)
+ goto unreg_dh;
+
+ ret = hpre_register_x25519(qm);
+ if (ret)
+ goto unreg_ecdh;
+
+ return ret;
unreg_ecdh:
- hpre_unregister_ecdh();
+ hpre_unregister_ecdh(qm);
unreg_dh:
- crypto_unregister_kpp(&dh);
+ hpre_unregister_dh(qm);
unreg_rsa:
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_rsa(qm);
return ret;
}
void hpre_algs_unregister(struct hisi_qm *qm)
{
- if (qm->ver >= QM_HW_V3) {
- crypto_unregister_kpp(&curve25519_alg);
- hpre_unregister_ecdh();
- }
-
- crypto_unregister_kpp(&dh);
- crypto_unregister_akcipher(&rsa);
+ hpre_unregister_x25519(qm);
+ hpre_unregister_ecdh(qm);
+ hpre_unregister_dh(qm);
+ hpre_unregister_rsa(qm);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 9d529df0eab9..471e5ca720f5 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -53,9 +53,7 @@
#define HPRE_CORE_IS_SCHD_OFFSET 0x90
#define HPRE_RAS_CE_ENB 0x301410
-#define HPRE_HAC_RAS_CE_ENABLE (BIT(0) | BIT(22) | BIT(23))
#define HPRE_RAS_NFE_ENB 0x301414
-#define HPRE_HAC_RAS_NFE_ENABLE 0x3ffffe
#define HPRE_RAS_FE_ENB 0x301418
#define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
#define HPRE_HAC_RAS_FE_ENABLE 0
@@ -79,8 +77,6 @@
#define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
#define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
#define HPRE_BD_USR_MASK GENMASK(1, 0)
-#define HPRE_CLUSTER_CORE_MASK_V2 GENMASK(3, 0)
-#define HPRE_CLUSTER_CORE_MASK_V3 GENMASK(7, 0)
#define HPRE_PREFETCH_CFG 0x301130
#define HPRE_SVA_PREFTCH_DFX 0x30115C
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
@@ -122,6 +118,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_DEV_ALG_MAX_LEN 256
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -137,6 +135,38 @@ struct hpre_hw_error {
const char *msg;
};
+struct hpre_dev_alg {
+ u32 alg_msk;
+ const char *alg;
+};
+
+static const struct hpre_dev_alg hpre_dev_algs[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = "rsa\n"
+ }, {
+ .alg_msk = BIT(1),
+ .alg = "dh\n"
+ }, {
+ .alg_msk = BIT(2),
+ .alg = "ecdh\n"
+ }, {
+ .alg_msk = BIT(3),
+ .alg = "ecdsa\n"
+ }, {
+ .alg_msk = BIT(4),
+ .alg = "sm2\n"
+ }, {
+ .alg_msk = BIT(5),
+ .alg = "x25519\n"
+ }, {
+ .alg_msk = BIT(6),
+ .alg = "x448\n"
+ }, {
+ /* sentinel */
+ }
+};
+
static struct hisi_qm_list hpre_devices = {
.register_to_crypto = hpre_algs_register,
.unregister_from_crypto = hpre_algs_unregister,
@@ -147,6 +177,62 @@ static const char * const hpre_debug_file_name[] = {
[HPRE_CLUSTER_CTRL] = "cluster_ctrl",
};
+enum hpre_cap_type {
+ HPRE_QM_NFE_MASK_CAP,
+ HPRE_QM_RESET_MASK_CAP,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_QM_CE_MASK_CAP,
+ HPRE_NFE_MASK_CAP,
+ HPRE_RESET_MASK_CAP,
+ HPRE_OOO_SHUTDOWN_MASK_CAP,
+ HPRE_CE_MASK_CAP,
+ HPRE_CLUSTER_NUM_CAP,
+ HPRE_CORE_TYPE_NUM_CAP,
+ HPRE_CORE_NUM_CAP,
+ HPRE_CLUSTER_CORE_NUM_CAP,
+ HPRE_CORE_ENABLE_BITMAP_CAP,
+ HPRE_DRV_ALG_BITMAP_CAP,
+ HPRE_DEV_ALG_BITMAP_CAP,
+ HPRE_CORE1_ALG_BITMAP_CAP,
+ HPRE_CORE2_ALG_BITMAP_CAP,
+ HPRE_CORE3_ALG_BITMAP_CAP,
+ HPRE_CORE4_ALG_BITMAP_CAP,
+ HPRE_CORE5_ALG_BITMAP_CAP,
+ HPRE_CORE6_ALG_BITMAP_CAP,
+ HPRE_CORE7_ALG_BITMAP_CAP,
+ HPRE_CORE8_ALG_BITMAP_CAP,
+ HPRE_CORE9_ALG_BITMAP_CAP,
+ HPRE_CORE10_ALG_BITMAP_CAP
+};
+
+static const struct hisi_qm_cap_info hpre_basic_info[] = {
+ {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
+ {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
+ {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
+ {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
+ {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
+ {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
+ {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
+ {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
+ {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
+ {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
+ {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
+ {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
+ {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
+ {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
+ {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
+};
+
static const struct hpre_hw_error hpre_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -262,6 +348,46 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver);
+ if (alg & cap_val)
+ return true;
+
+ return false;
+}
+
+static int hpre_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_msk;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++)
+ if (alg_msk & hpre_dev_algs[i].alg_msk)
+ strcat(algs, hpre_dev_algs[i].alg);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int hpre_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -330,14 +456,12 @@ MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static inline int hpre_cluster_num(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ? HPRE_CLUSTERS_NUM_V3 :
- HPRE_CLUSTERS_NUM_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
}
static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
- return (qm->ver >= QM_HW_V3) ?
- HPRE_CLUSTER_CORE_MASK_V3 : HPRE_CLUSTER_CORE_MASK_V2;
+ return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
}
struct hisi_qp *hpre_create_qp(u8 type)
@@ -457,7 +581,7 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -478,7 +602,7 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
@@ -630,7 +754,8 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = HPRE_HAC_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -644,21 +769,30 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- /* disable hpre hw error interrupts */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ /* disable hpre hw error interrupts */
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
+ ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+
/* clear HPRE hw error source if having */
- writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB);
- writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
@@ -708,7 +842,7 @@ static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
}
-static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
+static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
int cluster_index = file->index - HPRE_CLUSTER_CTRL;
@@ -716,8 +850,6 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
HPRE_CLSTR_ADDR_INTRVL;
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
-
- return 0;
}
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@@ -792,9 +924,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
goto err_input;
break;
case HPRE_CLUSTER_CTRL:
- ret = hpre_cluster_inqry_write(file, val);
- if (ret)
- goto err_input;
+ hpre_cluster_inqry_write(file, val);
break;
default:
ret = -EINVAL;
@@ -1006,15 +1136,13 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
if (pdev->revision == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
- else
- qm->algs = "rsa\ndh";
qm->mode = uacce_mode;
qm->pdev = pdev;
qm->ver = pdev->revision;
@@ -1030,7 +1158,19 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qm_list = &hpre_devices;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to init hpre qm configures!\n");
+ return ret;
+ }
+
+ ret = hpre_set_qm_algs(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to set hpre algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static int hpre_show_last_regs_init(struct hisi_qm *qm)
@@ -1129,7 +1269,11 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
+ nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
}
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
@@ -1147,14 +1291,20 @@ static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
+ err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
+ err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hpre_err_ini = {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index ad83c194d664..8b387de69d22 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -22,20 +22,17 @@
#define QM_VF_AEQ_INT_MASK 0x4
#define QM_VF_EQ_INT_SOURCE 0x8
#define QM_VF_EQ_INT_MASK 0xc
-#define QM_IRQ_NUM_V1 1
-#define QM_IRQ_NUM_PF_V2 4
-#define QM_IRQ_NUM_VF_V2 2
-#define QM_IRQ_NUM_VF_V3 3
-#define QM_EQ_EVENT_IRQ_VECTOR 0
-#define QM_AEQ_EVENT_IRQ_VECTOR 1
-#define QM_CMD_EVENT_IRQ_VECTOR 2
-#define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
+#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
+#define QM_IRQ_TYPE_SHIFT 16
+#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
/* mailbox */
#define QM_MB_PING_ALL_VFS 0xffff
#define QM_MB_CMD_DATA_SHIFT 32
#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
+#define QM_MB_STATUS_MASK GENMASK(12, 9)
/* sqc shift */
#define QM_SQ_HOP_NUM_SHIFT 0
@@ -77,6 +74,9 @@
#define QM_EQ_OVERFLOW 1
#define QM_CQE_ERROR 2
+#define QM_XQ_DEPTH_SHIFT 16
+#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
+
#define QM_DOORBELL_CMD_SQ 0
#define QM_DOORBELL_CMD_CQ 1
#define QM_DOORBELL_CMD_EQ 2
@@ -86,11 +86,7 @@
#define QM_DB_CMD_SHIFT_V1 16
#define QM_DB_INDEX_SHIFT_V1 32
#define QM_DB_PRIORITY_SHIFT_V1 48
-#define QM_QUE_ISO_CFG_V 0x0030
#define QM_PAGE_SIZE 0x0034
-#define QM_QUE_ISO_EN 0x100154
-#define QM_CAPBILITY 0x100158
-#define QM_QP_NUN_MASK GENMASK(10, 0)
#define QM_QP_DB_INTERVAL 0x10000
#define QM_MEM_START_INIT 0x100040
@@ -126,7 +122,6 @@
#define QM_DFX_CNT_CLR_CE 0x100118
#define QM_ABNORMAL_INT_SOURCE 0x100000
-#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
#define QM_ABNORMAL_INT_STATUS 0x100008
@@ -144,8 +139,10 @@
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
-#define QM_RAS_MSI_INT_SEL 0x1040f4
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_ECC_MBIT BIT(2)
+#define QM_DB_TIMEOUT BIT(10)
+#define QM_OF_FIFO_OF BIT(11)
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -205,6 +202,8 @@
#define MAX_WAIT_COUNTS 1000
#define QM_CACHE_WB_START 0x204
#define QM_CACHE_WB_DONE 0x208
+#define QM_FUNC_CAPS_REG 0x3100
+#define QM_CAPBILITY_VERSION GENMASK(7, 0)
#define PCI_BAR_2 2
#define PCI_BAR_4 4
@@ -221,7 +220,6 @@
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
#define QM_SQE_ADDR_MASK GENMASK(7, 0)
-#define QM_EQ_DEPTH (1024 * 2)
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
@@ -270,8 +268,8 @@
((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
-#define QM_MK_CQC_DW3_V2(cqe_sz) \
- ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
+#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
+ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
(((priority) << QM_SQ_PRIORITY_SHIFT) | \
@@ -284,8 +282,8 @@
((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define QM_MK_SQC_DW3_V2(sqe_sz) \
- ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
+#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
+ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define INIT_QC_COMMON(qc, base, pasid) do { \
(qc)->head = 0; \
@@ -329,6 +327,48 @@ enum qm_mb_cmd {
QM_VF_GET_QOS,
};
+enum qm_basic_type {
+ QM_TOTAL_QP_NUM_CAP = 0x0,
+ QM_FUNC_MAX_QP_CAP,
+ QM_XEQ_DEPTH_CAP,
+ QM_QP_DEPTH_CAP,
+ QM_EQ_IRQ_TYPE_CAP,
+ QM_AEQ_IRQ_TYPE_CAP,
+ QM_ABN_IRQ_TYPE_CAP,
+ QM_PF2VF_IRQ_TYPE_CAP,
+ QM_PF_IRQ_NUM_CAP,
+ QM_VF_IRQ_NUM_CAP,
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
+ {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
+ {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
+ {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
+};
+
+static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
+ {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
+};
+
+static const struct hisi_qm_cap_info qm_basic_info[] = {
+ {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
+ {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(15, 0), 0x800, 0x4000800, 0x4000800},
+ {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
+ {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
+ {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
+ {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
+ {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
+ {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
+ {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
+};
+
struct qm_cqe {
__le32 rsvd0;
__le16 cmd_id;
@@ -421,15 +461,11 @@ struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
u8 cmd, u16 index, u8 priority);
- u32 (*get_irq_num)(struct hisi_qm *qm);
int (*debug_init)(struct hisi_qm *qm);
- void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
+ void (*hw_error_init)(struct hisi_qm *qm);
void (*hw_error_uninit)(struct hisi_qm *qm);
enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
- int (*stop_qp)(struct hisi_qp *qp);
int (*set_msi)(struct hisi_qm *qm, bool set);
- int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
- int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
};
struct qm_dfx_item {
@@ -533,6 +569,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
{50100, 100000, 19}
};
+static void qm_irqs_unregister(struct hisi_qm *qm);
+
static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
{
enum qm_state curr = atomic_read(&qm->status.flags);
@@ -623,22 +661,17 @@ static u32 qm_get_dev_err_status(struct hisi_qm *qm)
}
/* Check if the error causes the master ooo block */
-static int qm_check_dev_error(struct hisi_qm *qm)
+static bool qm_check_dev_error(struct hisi_qm *qm)
{
u32 val, dev_val;
if (qm->fun_type == QM_HW_VF)
- return 0;
+ return false;
- val = qm_get_hw_error_status(qm);
- dev_val = qm_get_dev_err_status(qm);
+ val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
+ dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
- if (qm->ver < QM_HW_V3)
- return (val & QM_ECC_MBIT) ||
- (dev_val & qm->err_info.ecc_2bits_mask);
-
- return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
- (dev_val & (~qm->err_info.dev_ce_mask));
+ return val || dev_val;
}
static int qm_wait_reset_finish(struct hisi_qm *qm)
@@ -728,8 +761,12 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src)
static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
{
+ int ret;
+ u32 val;
+
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
+ ret = -EBUSY;
goto mb_busy;
}
@@ -737,6 +774,14 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
if (unlikely(hisi_qm_wait_mb_ready(qm))) {
dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
+ ret = -ETIMEDOUT;
+ goto mb_busy;
+ }
+
+ val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
+ if (val & QM_MB_STATUS_MASK) {
+ dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
+ ret = -EIO;
goto mb_busy;
}
@@ -744,7 +789,7 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
mb_busy:
atomic64_inc(&qm->debug.dfx.mb_err_cnt);
- return -EBUSY;
+ return ret;
}
int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
@@ -828,25 +873,52 @@ static int qm_dev_mem_reset(struct hisi_qm *qm)
POLL_TIMEOUT);
}
-static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
+/**
+ * hisi_qm_get_hw_info() - Get device information.
+ * @qm: The qm which want to get information.
+ * @info_table: Array for storing device information.
+ * @index: Index in info_table.
+ * @is_read: Whether read from reg, 0: not support read from reg.
+ *
+ * This function returns device information the caller needs.
+ */
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read)
{
- return QM_IRQ_NUM_V1;
+ u32 val;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ return info_table[index].v1_val;
+ case QM_HW_V2:
+ return info_table[index].v2_val;
+ default:
+ if (!is_read)
+ return info_table[index].v3_val;
+
+ val = readl(qm->io_base + info_table[index].offset);
+ return (val >> info_table[index].shift) & info_table[index].mask;
+ }
}
+EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
-static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
+static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
+ u16 *high_bits, enum qm_basic_type type)
{
- if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
- else
- return QM_IRQ_NUM_VF_V2;
+ u32 depth;
+
+ depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
+ *high_bits = depth & QM_XQ_DEPTH_MASK;
+ *low_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
}
-static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
+static u32 qm_get_irq_num(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
- return QM_IRQ_NUM_PF_V2;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
- return QM_IRQ_NUM_VF_V3;
+ return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
}
static int qm_pm_get_sync(struct hisi_qm *qm)
@@ -854,7 +926,7 @@ static int qm_pm_get_sync(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
int ret;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return 0;
ret = pm_runtime_resume_and_get(dev);
@@ -870,7 +942,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_mark_last_busy(dev);
@@ -879,7 +951,7 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
static void qm_cq_head_update(struct hisi_qp *qp)
{
- if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
+ if (qp->qp_status.cq_head == qp->cq_depth - 1) {
qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
qp->qp_status.cq_head = 0;
} else {
@@ -911,6 +983,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
struct hisi_qm *qm = poll_data->qm;
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
+ u16 eq_depth = qm->eq_depth;
int eqe_num = 0;
u16 cqn;
@@ -919,7 +992,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
poll_data->qp_finish_id[eqe_num] = cqn;
eqe_num++;
- if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
+ if (qm->status.eq_head == eq_depth - 1) {
qm->status.eqc_phase = !qm->status.eqc_phase;
eqe = qm->eqe;
qm->status.eq_head = 0;
@@ -928,7 +1001,7 @@ static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
qm->status.eq_head++;
}
- if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
+ if (eqe_num == (eq_depth >> 1) - 1)
break;
}
@@ -1068,6 +1141,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
@@ -1092,7 +1166,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
break;
}
- if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
+ if (qm->status.aeq_head == aeq_depth - 1) {
qm->status.aeqc_phase = !qm->status.aeqc_phase;
aeqe = qm->aeqe;
qm->status.aeq_head = 0;
@@ -1118,24 +1192,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-static void qm_irq_unregister(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
-
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->ver > QM_HW_V1) {
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
- }
-
- if (qm->ver > QM_HW_V2)
- free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
-}
-
static void qm_init_qp_status(struct hisi_qp *qp)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -1151,7 +1207,7 @@ static void qm_init_prefetch(struct hisi_qm *qm)
struct device *dev = &qm->pdev->dev;
u32 page_type = 0x0;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
switch (PAGE_SIZE) {
@@ -1270,7 +1326,7 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
}
break;
case SHAPER_VFT:
- if (qm->ver >= QM_HW_V3) {
+ if (factor) {
tmp = factor->cir_b |
(factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
(factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
@@ -1288,10 +1344,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
u32 fun_num, u32 base, u32 number)
{
- struct qm_shaper_factor *factor = &qm->factor[fun_num];
+ struct qm_shaper_factor *factor = NULL;
unsigned int val;
int ret;
+ if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ factor = &qm->factor[fun_num];
+
ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
val & BIT(0), POLL_PERIOD,
POLL_TIMEOUT);
@@ -1349,7 +1408,7 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
}
/* init default shaper qos val */
- if (qm->ver >= QM_HW_V3) {
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
ret = qm_shaper_init_vft(qm, fun_num);
if (ret)
goto back_sqc_cqc;
@@ -1357,11 +1416,9 @@ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return 0;
back_sqc_cqc:
- for (i = SQC_VFT; i <= CQC_VFT; i++) {
- ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
- if (ret)
- return ret;
- }
+ for (i = SQC_VFT; i <= CQC_VFT; i++)
+ qm_set_vft_common(qm, i, fun_num, 0, 0);
+
return ret;
}
@@ -1857,39 +1914,19 @@ static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
kfree(ctx_addr);
}
-static int dump_show(struct hisi_qm *qm, void *info,
+static void dump_show(struct hisi_qm *qm, void *info,
unsigned int info_size, char *info_name)
{
struct device *dev = &qm->pdev->dev;
- u8 *info_buf, *info_curr = info;
+ u8 *info_curr = info;
u32 i;
#define BYTE_PER_DW 4
- info_buf = kzalloc(info_size, GFP_KERNEL);
- if (!info_buf)
- return -ENOMEM;
-
- for (i = 0; i < info_size; i++, info_curr++) {
- if (i % BYTE_PER_DW == 0)
- info_buf[i + 3UL] = *info_curr;
- else if (i % BYTE_PER_DW == 1)
- info_buf[i + 1UL] = *info_curr;
- else if (i % BYTE_PER_DW == 2)
- info_buf[i - 1] = *info_curr;
- else if (i % BYTE_PER_DW == 3)
- info_buf[i - 3] = *info_curr;
- }
-
dev_info(dev, "%s DUMP\n", info_name);
- for (i = 0; i < info_size; i += BYTE_PER_DW) {
+ for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) {
pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
- info_buf[i], info_buf[i + 1UL],
- info_buf[i + 2UL], info_buf[i + 3UL]);
+ *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr));
}
-
- kfree(info_buf);
-
- return 0;
}
static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
@@ -1929,23 +1966,18 @@ static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
if (qm->sqc) {
sqc_curr = qm->sqc + qp_id;
- ret = dump_show(qm, sqc_curr, sizeof(*sqc),
- "SOFT SQC");
- if (ret)
- dev_info(dev, "Show soft sqc failed!\n");
+ dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
- if (ret)
- dev_info(dev, "Show hw sqc failed!\n");
+ dump_show(qm, sqc, sizeof(*sqc), "SQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
- return ret;
+ return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
@@ -1975,23 +2007,18 @@ static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
if (qm->cqc) {
cqc_curr = qm->cqc + qp_id;
- ret = dump_show(qm, cqc_curr, sizeof(*cqc),
- "SOFT CQC");
- if (ret)
- dev_info(dev, "Show soft cqc failed!\n");
+ dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
}
up_read(&qm->qps_lock);
- goto err_free_ctx;
+ goto free_ctx;
}
- ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
- if (ret)
- dev_info(dev, "Show hw cqc failed!\n");
+ dump_show(qm, cqc, sizeof(*cqc), "CQC");
-err_free_ctx:
+free_ctx:
qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
- return ret;
+ return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
@@ -2015,9 +2042,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
if (ret)
goto err_free_ctx;
- ret = dump_show(qm, xeqc, size, name);
- if (ret)
- dev_info(dev, "Show hw %s failed!\n", name);
+ dump_show(qm, xeqc, size, name);
err_free_ctx:
qm_ctx_free(qm, size, xeqc, &xeqc_dma);
@@ -2025,7 +2050,7 @@ err_free_ctx:
}
static int q_dump_param_parse(struct hisi_qm *qm, char *s,
- u32 *e_id, u32 *q_id)
+ u32 *e_id, u32 *q_id, u16 q_depth)
{
struct device *dev = &qm->pdev->dev;
unsigned int qp_num = qm->qp_num;
@@ -2051,8 +2076,8 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
}
ret = kstrtou32(presult, 0, e_id);
- if (ret || *e_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
+ if (ret || *e_id >= q_depth) {
+ dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
return -EINVAL;
}
@@ -2066,54 +2091,49 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
static int qm_sq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
+ u16 sq_depth = qm->qp_array->cq_depth;
void *sqe, *sqe_curr;
struct hisi_qp *qp;
u32 qp_id, sqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth);
if (ret)
return ret;
- sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
+ sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
if (!sqe)
return -ENOMEM;
qp = &qm->qp_array[qp_id];
- memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
+ memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
qm->debug.sqe_mask_len);
- ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
- if (ret)
- dev_info(dev, "Show sqe failed!\n");
+ dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
kfree(sqe);
- return ret;
+ return 0;
}
static int qm_cq_dump(struct hisi_qm *qm, char *s)
{
- struct device *dev = &qm->pdev->dev;
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
u32 qp_id, cqe_id;
int ret;
- ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
+ ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth);
if (ret)
return ret;
qp = &qm->qp_array[qp_id];
cqe_curr = qp->cqe + cqe_id;
- ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- if (ret)
- dev_info(dev, "Show cqe failed!\n");
+ dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
- return ret;
+ return 0;
}
static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
@@ -2131,11 +2151,11 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
if (ret)
return -EINVAL;
- if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
- dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
+ if (!strcmp(name, "EQE") && xeqe_id >= qm->eq_depth) {
+ dev_err(dev, "Please input eqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
- } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
- dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
+ } else if (!strcmp(name, "AEQE") && xeqe_id >= qm->aeq_depth) {
+ dev_err(dev, "Please input aeqe num (0-%u)", qm->eq_depth - 1);
return -EINVAL;
}
@@ -2150,9 +2170,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
goto err_unlock;
}
- ret = dump_show(qm, xeqe, size, name);
- if (ret)
- dev_info(dev, "Show %s failed!\n", name);
+ dump_show(qm, xeqe, size, name);
err_unlock:
up_read(&qm->qps_lock);
@@ -2245,8 +2263,10 @@ static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
return ret;
/* Judge if the instance is being reset. */
- if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
- return 0;
+ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) {
+ ret = 0;
+ goto put_dfx_access;
+ }
if (count > QM_DBG_WRITE_LEN) {
ret = -ENOSPC;
@@ -2300,58 +2320,65 @@ static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
file->debug = &qm->debug;
}
-static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- qm->error_mask = ce | nfe | fe;
+ struct hisi_qm_err_info *err_info = &qm->err_info;
+
+ qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR,
- qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
-static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v2(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
-static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
+static void qm_hw_error_init_v3(struct hisi_qm *qm)
{
- u32 irq_enable = ce | nfe | fe;
- u32 irq_unmask = ~irq_enable;
+ u32 irq_unmask;
- qm_hw_error_cfg(qm, ce, nfe, fe);
+ qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
}
static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
{
- writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
+ u32 irq_mask = qm->error_mask;
+
+ irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
+ writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
/* disable close master ooo when hardware error happened */
writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
@@ -2396,7 +2423,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
- u32 error_status, tmp, val;
+ u32 error_status, tmp;
/* read err sts */
tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -2407,17 +2434,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
- /* ce error does not need to be reset */
- if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
- writel(error_status, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe,
- qm->io_base + QM_RAS_NFE_ENABLE);
- return ACC_ERR_RECOVERED;
- }
+ if (error_status & qm->err_info.qm_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_NEED_RESET;
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2493,7 +2514,7 @@ static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
u64 val;
u32 i;
- if (!qm->vfs_num || qm->ver < QM_HW_V3)
+ if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return 0;
while (true) {
@@ -2756,7 +2777,6 @@ static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
.qm_db = qm_db_v1,
- .get_irq_num = qm_get_irq_num_v1,
.hw_error_init = qm_hw_error_init_v1,
.set_msi = qm_set_msi,
};
@@ -2764,7 +2784,6 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v2,
.hw_error_init = qm_hw_error_init_v2,
.hw_error_uninit = qm_hw_error_uninit_v2,
.hw_error_handle = qm_hw_error_handle_v2,
@@ -2774,14 +2793,10 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
.get_vft = qm_get_vft_v2,
.qm_db = qm_db_v2,
- .get_irq_num = qm_get_irq_num_v3,
.hw_error_init = qm_hw_error_init_v3,
.hw_error_uninit = qm_hw_error_uninit_v3,
.hw_error_handle = qm_hw_error_handle_v2,
- .stop_qp = qm_stop_qp,
.set_msi = qm_set_msi_v3,
- .ping_all_vfs = qm_ping_all_vfs,
- .ping_pf = qm_ping_pf,
};
static void *qm_get_avail_sqe(struct hisi_qp *qp)
@@ -2789,7 +2804,7 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
+ if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
return NULL;
return qp->sqe + sq_tail * qp->qm->sqe_size;
@@ -2830,7 +2845,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp = &qm->qp_array[qp_id];
hisi_qm_unset_hw_reset(qp);
- memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
+ memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
qp->event_cb = NULL;
qp->req_cb = NULL;
@@ -2911,9 +2926,9 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
- sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
} else {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
+ sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
sqc->w8 = 0; /* rand_qc */
}
sqc->cq_num = cpu_to_le16(qp_id);
@@ -2954,9 +2969,9 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
if (ver == QM_HW_V1) {
cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
QM_QC_CQE_SIZE));
- cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
+ cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
} else {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
+ cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc->w8 = 0; /* rand_qc */
}
cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
@@ -3043,13 +3058,14 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
{
int qp_used = atomic_read(&qp->qp_status.used);
u16 cur_tail = qp->qp_status.sq_tail;
- u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
+ u16 sq_depth = qp->sq_depth;
+ u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
struct hisi_qm *qm = qp->qm;
u16 pos;
int i;
for (i = 0; i < qp_used; i++) {
- pos = (i + cur_head) % QM_Q_DEPTH;
+ pos = (i + cur_head) % sq_depth;
qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
atomic_dec(&qp->qp_status.used);
}
@@ -3078,8 +3094,8 @@ static int qm_drain_qp(struct hisi_qp *qp)
return 0;
/* Kunpeng930 supports drain qp by device */
- if (qm->ops->stop_qp) {
- ret = qm->ops->stop_qp(qp);
+ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
+ ret = qm_stop_qp(qp);
if (ret)
dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
return ret;
@@ -3197,7 +3213,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
+ u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
void *sqe = qm_get_avail_sqe(qp);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
@@ -3286,7 +3302,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
{
struct hisi_qp *qp = q->priv;
- hisi_qm_cache_wb(qp->qm);
hisi_qm_release_qp(qp);
}
@@ -3310,7 +3325,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
if (qm->ver == QM_HW_V1) {
if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
return -EINVAL;
- } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
+ } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
return -EINVAL;
@@ -3387,6 +3402,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
unsigned long arg)
{
struct hisi_qp *qp = q->priv;
+ struct hisi_qp_info qp_info;
struct hisi_qp_ctx qp_ctx;
if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
@@ -3403,11 +3419,25 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
if (copy_to_user((void __user *)arg, &qp_ctx,
sizeof(struct hisi_qp_ctx)))
return -EFAULT;
- } else {
- return -EINVAL;
+
+ return 0;
+ } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
+ if (copy_from_user(&qp_info, (void __user *)arg,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ qp_info.sqe_size = qp->qm->sqe_size;
+ qp_info.sq_depth = qp->sq_depth;
+ qp_info.cq_depth = qp->cq_depth;
+
+ if (copy_to_user((void __user *)arg, &qp_info,
+ sizeof(struct hisi_qp_info)))
+ return -EFAULT;
+
+ return 0;
}
- return 0;
+ return -EINVAL;
}
static const struct uacce_ops uacce_qm_ops = {
@@ -3427,6 +3457,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
struct uacce_device *uacce;
unsigned long mmio_page_nr;
unsigned long dus_page_nr;
+ u16 sq_depth, cq_depth;
struct uacce_interface interface = {
.flags = UACCE_DEV_SVA,
.ops = &uacce_qm_ops,
@@ -3453,7 +3484,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->is_vf = pdev->is_virtfn;
uacce->priv = qm;
- uacce->algs = qm->algs;
if (qm->ver == QM_HW_V1)
uacce->api_ver = HISI_QM_API_VER_BASE;
@@ -3464,15 +3494,17 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
- else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
+ else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* Add one more page for device or qp status */
- dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
+ sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
@@ -3577,10 +3609,11 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
kfree(qm->qp_array);
}
-static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
+static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
+ u16 sq_depth, u16 cq_depth)
{
struct device *dev = &qm->pdev->dev;
- size_t off = qm->sqe_size * QM_Q_DEPTH;
+ size_t off = qm->sqe_size * sq_depth;
struct hisi_qp *qp;
int ret = -ENOMEM;
@@ -3600,6 +3633,8 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
qp->cqe = qp->qdma.va + off;
qp->cqe_dma = qp->qdma.dma + off;
qp->qdma.size = dma_size;
+ qp->sq_depth = sq_depth;
+ qp->cq_depth = cq_depth;
qp->qm = qm;
qp->qp_id = id;
@@ -3626,7 +3661,7 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
init_rwsem(&qm->qps_lock);
qm->qp_in_used = 0;
qm->misc_ctl = false;
- if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
+ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
}
@@ -3636,7 +3671,7 @@ static void qm_cmd_uninit(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
val = readl(qm->io_base + QM_IFC_INT_MASK);
@@ -3648,7 +3683,7 @@ static void qm_cmd_init(struct hisi_qm *qm)
{
u32 val;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
return;
/* Clear communication interrupt source */
@@ -3664,7 +3699,7 @@ static void qm_put_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
iounmap(qm->io_base);
@@ -3714,7 +3749,9 @@ static void hisi_qm_memory_uninit(struct hisi_qm *qm)
}
idr_destroy(&qm->qp_idr);
- kfree(qm->factor);
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
}
/**
@@ -3740,7 +3777,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
if (qm->use_sva) {
uacce_remove(qm->uacce);
@@ -3841,7 +3878,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm)
eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
DMA_TO_DEVICE);
@@ -3870,7 +3907,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
DMA_TO_DEVICE);
@@ -4136,14 +4173,12 @@ DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
static void qm_hw_error_init(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
-
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
return;
}
- qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
+ qm->ops->hw_error_init(qm);
}
static void qm_hw_error_uninit(struct hisi_qm *qm)
@@ -4497,12 +4532,10 @@ static int qm_vf_read_qos(struct hisi_qm *qm)
qm->mb_qos = 0;
/* vf ping pf to get function qos */
- if (qm->ops->ping_pf) {
- ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
- if (ret) {
- pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
- return ret;
- }
+ ret = qm_ping_pf(qm, QM_VF_GET_QOS);
+ if (ret) {
+ pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
+ return ret;
}
while (true) {
@@ -4674,14 +4707,14 @@ static const struct file_operations qm_algqos_fops = {
* hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
* @qm: The qm for which we want to add debugfs files.
*
- * Create function qos debugfs files.
+ * Create function qos debugfs files, VF ping PF to get function qos.
*/
static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF)
debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
qm, &qm_algqos_fops);
- else
+ else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
qm, &qm_algqos_fops);
}
@@ -4729,7 +4762,7 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
&qm_atomic64_ops);
}
- if (qm->ver >= QM_HW_V3)
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
hisi_qm_set_algqos_init(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
@@ -4768,6 +4801,14 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
}
EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
+static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
+{
+ int i;
+
+ for (i = 1; i <= total_func; i++)
+ qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+}
+
/**
* hisi_qm_sriov_enable() - enable virtual functions
* @pdev: the PCIe device
@@ -4794,7 +4835,17 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
- num_vfs = min_t(int, max_vfs, total_vfs);
+ if (max_vfs > total_vfs) {
+ pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
+ ret = -ERANGE;
+ goto err_put_sync;
+ }
+
+ num_vfs = max_vfs;
+
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ hisi_qm_init_vf_qos(qm, num_vfs);
+
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
@@ -4830,7 +4881,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
int ret;
if (pci_vfs_assigned(pdev)) {
@@ -4845,8 +4895,7 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
- /* clear vf function shaper configure array */
- memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
+
ret = qm_clear_vft_config(qm);
if (ret)
return ret;
@@ -4891,17 +4940,11 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
if (qm->err_ini->log_dev_hw_err)
qm->err_ini->log_dev_hw_err(qm, err_sts);
- /* ce error does not need to be reset */
- if ((err_sts | qm->err_info.dev_ce_mask) ==
- qm->err_info.dev_ce_mask) {
- if (qm->err_ini->clear_dev_hw_err_status)
- qm->err_ini->clear_dev_hw_err_status(qm,
- err_sts);
+ if (err_sts & qm->err_info.dev_reset_mask)
+ return ACC_ERR_NEED_RESET;
- return ACC_ERR_RECOVERED;
- }
-
- return ACC_ERR_NEED_RESET;
+ if (qm->err_ini->clear_dev_hw_err_status)
+ qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
}
return ACC_ERR_RECOVERED;
@@ -5070,8 +5113,8 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
return 0;
/* Kunpeng930 supports to notify VFs to stop before PF reset */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
} else {
@@ -5262,8 +5305,8 @@ static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
}
/* Kunpeng930 supports to notify VFs to start after PF reset. */
- if (qm->ops->ping_all_vfs) {
- ret = qm->ops->ping_all_vfs(qm, cmd);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
+ ret = qm_ping_all_vfs(qm, cmd);
if (ret)
pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
} else {
@@ -5466,8 +5509,6 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
if (pdev->is_virtfn)
return PCI_ERS_RESULT_RECOVERED;
- pci_aer_clear_nonfatal_status(pdev);
-
/* reset pcie device controller */
ret = qm_controller_reset(qm);
if (ret) {
@@ -5599,51 +5640,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int qm_irq_register(struct hisi_qm *qm)
-{
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
- qm_irq, 0, qm->dev_name, qm);
- if (ret)
- return ret;
-
- if (qm->ver > QM_HW_V1) {
- ret = request_threaded_irq(pci_irq_vector(pdev,
- QM_AEQ_EVENT_IRQ_VECTOR),
- qm_aeq_irq, qm_aeq_thread,
- 0, qm->dev_name, qm);
- if (ret)
- goto err_aeq_irq;
-
- if (qm->fun_type == QM_HW_PF) {
- ret = request_irq(pci_irq_vector(pdev,
- QM_ABNORMAL_EVENT_IRQ_VECTOR),
- qm_abnormal_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_abonormal_irq;
- }
- }
-
- if (qm->ver > QM_HW_V2) {
- ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
- qm_mb_cmd_irq, 0, qm->dev_name, qm);
- if (ret)
- goto err_mb_cmd_irq;
- }
-
- return 0;
-
-err_mb_cmd_irq:
- if (qm->fun_type == QM_HW_PF)
- free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
-err_abonormal_irq:
- free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
-err_aeq_irq:
- free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
- return ret;
-}
/**
* hisi_qm_dev_shutdown() - Shutdown device.
@@ -5711,7 +5707,7 @@ err_prepare:
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
out:
pci_save_state(pdev);
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
}
@@ -5729,7 +5725,7 @@ static void qm_pf_reset_vf_done(struct hisi_qm *qm)
cmd = QM_VF_START_FAIL;
}
- ret = qm->ops->ping_pf(qm, cmd);
+ ret = qm_ping_pf(qm, cmd);
if (ret)
dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
@@ -5924,21 +5920,193 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
+static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ if (qm->fun_type == QM_HW_VF)
+ return;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_abnormal_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ if (qm->fun_type == QM_HW_VF)
+ return 0;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_aeq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
+ qm_aeq_thread, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_unregister_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ free_irq(pci_irq_vector(pdev, irq_vector), qm);
+}
+
+static int qm_register_eq_irq(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ u32 irq_vector, val;
+ int ret;
+
+ val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
+ if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
+ return 0;
+
+ irq_vector = val & QM_IRQ_VECTOR_MASK;
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ if (ret)
+ dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
+
+ return ret;
+}
+
+static void qm_irqs_unregister(struct hisi_qm *qm)
+{
+ qm_unregister_mb_cmd_irq(qm);
+ qm_unregister_abnormal_irq(qm);
+ qm_unregister_aeq_irq(qm);
+ qm_unregister_eq_irq(qm);
+}
+
+static int qm_irqs_register(struct hisi_qm *qm)
+{
+ int ret;
+
+ ret = qm_register_eq_irq(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_register_aeq_irq(qm);
+ if (ret)
+ goto free_eq_irq;
+
+ ret = qm_register_abnormal_irq(qm);
+ if (ret)
+ goto free_aeq_irq;
+
+ ret = qm_register_mb_cmd_irq(qm);
+ if (ret)
+ goto free_abnormal_irq;
+
+ return 0;
+
+free_abnormal_irq:
+ qm_unregister_abnormal_irq(qm);
+free_aeq_irq:
+ qm_unregister_aeq_irq(qm);
+free_eq_irq:
+ qm_unregister_eq_irq(qm);
+ return ret;
+}
+
static int qm_get_qp_num(struct hisi_qm *qm)
{
- if (qm->ver == QM_HW_V1)
- qm->ctrl_qp_num = QM_QNUM_V1;
- else if (qm->ver == QM_HW_V2)
- qm->ctrl_qp_num = QM_QNUM_V2;
- else
- qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
- QM_QP_NUN_MASK;
+ bool is_db_isolation;
- if (qm->use_db_isolation)
- qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
- QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
- else
- qm->max_qp_num = qm->ctrl_qp_num;
+ /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver != QM_HW_V1)
+ /* v2 starts to support get vft by mailbox */
+ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+
+ return 0;
+ }
+
+ is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+ qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
+ qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
+ QM_FUNC_MAX_QP_CAP, is_db_isolation);
/* check if qp number is valid */
if (qm->qp_num > qm->max_qp_num) {
@@ -5950,6 +6118,39 @@ static int qm_get_qp_num(struct hisi_qm *qm)
return 0;
}
+static void qm_get_hw_caps(struct hisi_qm *qm)
+{
+ const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
+ qm_cap_info_pf : qm_cap_info_vf;
+ u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
+ ARRAY_SIZE(qm_cap_info_vf);
+ u32 val, i;
+
+ /* Doorbell isolate register is a independent register. */
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
+ if (val)
+ set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
+
+ if (qm->ver >= QM_HW_V3) {
+ val = readl(qm->io_base + QM_FUNC_CAPS_REG);
+ qm->cap_ver = val & QM_CAPBILITY_VERSION;
+ }
+
+ /* Get PF/VF common capbility */
+ for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
+ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
+ if (val)
+ set_bit(qm_cap_info_comm[i].type, &qm->caps);
+ }
+
+ /* Get PF/VF different capbility */
+ for (i = 0; i < size; i++) {
+ val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
+ if (val)
+ set_bit(cap_info[i].type, &qm->caps);
+ }
+}
+
static int qm_get_pci_res(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -5969,16 +6170,8 @@ static int qm_get_pci_res(struct hisi_qm *qm)
goto err_request_mem_regions;
}
- if (qm->ver > QM_HW_V2) {
- if (qm->fun_type == QM_HW_PF)
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_EN) & BIT(0);
- else
- qm->use_db_isolation = readl(qm->io_base +
- QM_QUE_ISO_CFG_V) & BIT(0);
- }
-
- if (qm->use_db_isolation) {
+ qm_get_hw_caps(qm);
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
qm->db_interval = QM_QP_DB_INTERVAL;
qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
qm->db_io_base = ioremap(qm->db_phys_base,
@@ -5993,16 +6186,14 @@ static int qm_get_pci_res(struct hisi_qm *qm)
qm->db_interval = 0;
}
- if (qm->fun_type == QM_HW_PF) {
- ret = qm_get_qp_num(qm);
- if (ret)
- goto err_db_ioremap;
- }
+ ret = qm_get_qp_num(qm);
+ if (ret)
+ goto err_db_ioremap;
return 0;
err_db_ioremap:
- if (qm->use_db_isolation)
+ if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
iounmap(qm->db_io_base);
err_ioremap:
iounmap(qm->io_base);
@@ -6033,11 +6224,7 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
pci_set_master(pdev);
- if (!qm->ops->get_irq_num) {
- ret = -EOPNOTSUPP;
- goto err_get_pci_res;
- }
- num_vec = qm->ops->get_irq_num(qm);
+ num_vec = qm_get_irq_num(qm);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -6080,6 +6267,7 @@ static int hisi_qm_init_work(struct hisi_qm *qm)
static int hisi_qp_alloc_memory(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
+ u16 sq_depth, cq_depth;
size_t qp_dma_size;
int i, ret;
@@ -6093,13 +6281,14 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
return -ENOMEM;
}
+ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
+
/* one more page for device or qp statuses */
- qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH;
+ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
for (i = 0; i < qm->qp_num; i++) {
qm->poll_data[i].qm = qm;
- ret = hisi_qp_memory_init(qm, qp_dma_size, i);
+ ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
if (ret)
goto err_init_qp_mem;
@@ -6116,15 +6305,18 @@ err_init_qp_mem:
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret, total_func, i;
+ int ret, total_func;
size_t off = 0;
- total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
- qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
- if (!qm->factor)
- return -ENOMEM;
- for (i = 0; i < total_func; i++)
- qm->factor[i].func_qos = QM_QOS_MAX_VAL;
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
+ total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
+ qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
+ if (!qm->factor)
+ return -ENOMEM;
+
+ /* Only the PF value needs to be initialized */
+ qm->factor[0].func_qos = QM_QOS_MAX_VAL;
+ }
#define QM_INIT_BUF(qm, type, num) do { \
(qm)->type = ((qm)->qdma.va + (off)); \
@@ -6133,20 +6325,21 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
} while (0)
idr_init(&qm->qp_idr);
- qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
- QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
+ qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
+ qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
+ QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
GFP_ATOMIC);
dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
if (!qm->qdma.va) {
- ret = -ENOMEM;
- goto err_alloc_qdma;
+ ret = -ENOMEM;
+ goto err_destroy_idr;
}
- QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
- QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
+ QM_INIT_BUF(qm, eqe, qm->eq_depth);
+ QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
@@ -6158,8 +6351,10 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
err_alloc_qp_array:
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
-err_alloc_qdma:
- kfree(qm->factor);
+err_destroy_idr:
+ idr_destroy(&qm->qp_idr);
+ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
+ kfree(qm->factor);
return ret;
}
@@ -6202,17 +6397,10 @@ int hisi_qm_init(struct hisi_qm *qm)
if (ret)
return ret;
- ret = qm_irq_register(qm);
+ ret = qm_irqs_register(qm);
if (ret)
goto err_pci_init;
- if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
- /* v2 starts to support get vft by mailbox */
- ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
- if (ret)
- goto err_irq_register;
- }
-
if (qm->fun_type == QM_HW_PF) {
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
@@ -6251,7 +6439,7 @@ err_alloc_uacce:
qm->uacce = NULL;
}
err_irq_register:
- qm_irq_unregister(qm);
+ qm_irqs_unregister(qm);
err_pci_init:
hisi_qm_pci_uninit(qm);
return ret;
@@ -6302,7 +6490,7 @@ void hisi_qm_pm_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
@@ -6321,7 +6509,7 @@ void hisi_qm_pm_uninit(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
return;
pm_runtime_get_noresume(dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index d2a0bc93e752..3e57fc04b377 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -17,6 +17,7 @@ struct sec_alg_res {
dma_addr_t a_ivin_dma;
u8 *out_mac;
dma_addr_t out_mac_dma;
+ u16 depth;
};
/* Cipher request of SEC private */
@@ -115,9 +116,9 @@ struct sec_cipher_ctx {
/* SEC queue context which defines queue's relatives */
struct sec_qp_ctx {
struct hisi_qp *qp;
- struct sec_req *req_list[QM_Q_DEPTH];
+ struct sec_req **req_list;
struct idr req_idr;
- struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_alg_res *res;
struct sec_ctx *ctx;
spinlock_t req_lock;
struct list_head backlog;
@@ -191,8 +192,37 @@ struct sec_dev {
bool iommu_used;
};
+enum sec_cap_type {
+ SEC_QM_NFE_MASK_CAP = 0x0,
+ SEC_QM_RESET_MASK_CAP,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP,
+ SEC_QM_CE_MASK_CAP,
+ SEC_NFE_MASK_CAP,
+ SEC_RESET_MASK_CAP,
+ SEC_OOO_SHUTDOWN_MASK_CAP,
+ SEC_CE_MASK_CAP,
+ SEC_CLUSTER_NUM_CAP,
+ SEC_CORE_TYPE_NUM_CAP,
+ SEC_CORE_NUM_CAP,
+ SEC_CORES_PER_CLUSTER_NUM_CAP,
+ SEC_CORE_ENABLE_BITMAP,
+ SEC_DRV_ALG_BITMAP_LOW,
+ SEC_DRV_ALG_BITMAP_HIGH,
+ SEC_DEV_ALG_BITMAP_LOW,
+ SEC_DEV_ALG_BITMAP_HIGH,
+ SEC_CORE1_ALG_BITMAP_LOW,
+ SEC_CORE1_ALG_BITMAP_HIGH,
+ SEC_CORE2_ALG_BITMAP_LOW,
+ SEC_CORE2_ALG_BITMAP_HIGH,
+ SEC_CORE3_ALG_BITMAP_LOW,
+ SEC_CORE3_ALG_BITMAP_HIGH,
+ SEC_CORE4_ALG_BITMAP_LOW,
+ SEC_CORE4_ALG_BITMAP_HIGH,
+};
+
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
int sec_register_to_crypto(struct hisi_qm *qm);
void sec_unregister_from_crypto(struct hisi_qm *qm);
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 77c9f13cf69a..84ae8ddd1a13 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -59,14 +59,14 @@
#define SEC_ICV_MASK 0x000E
#define SEC_SQE_LEN_RATE_MASK 0x3
-#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
#define SEC_SGL_SGE_NR 128
#define SEC_CIPHER_AUTH 0xfe
#define SEC_AUTH_CIPHER 0x1
#define SEC_MAX_MAC_LEN 64
#define SEC_MAX_AAD_LEN 65535
#define SEC_MAX_CCM_AAD_LEN 65279
-#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+#define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
#define SEC_PBUF_SZ 512
#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
@@ -74,11 +74,11 @@
#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
SEC_MAX_MAC_LEN * 2)
#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
-#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
-#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
- SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
-#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
- SEC_PBUF_LEFT_SZ)
+#define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
+ SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
+ SEC_PBUF_LEFT_SZ(depth))
#define SEC_SQE_LEN_RATE 4
#define SEC_SQE_CFLAG 2
@@ -104,6 +104,16 @@
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+struct sec_skcipher {
+ u64 alg_msk;
+ struct skcipher_alg alg;
+};
+
+struct sec_aead {
+ u64 alg_msk;
+ struct aead_alg alg;
+};
+
/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
{
@@ -128,9 +138,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
int req_id;
spin_lock_bh(&qp_ctx->req_lock);
-
- req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
- 0, QM_Q_DEPTH, GFP_ATOMIC);
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
spin_unlock_bh(&qp_ctx->req_lock);
if (unlikely(req_id < 0)) {
dev_err(req->ctx->dev, "alloc req id fail!\n");
@@ -148,7 +156,7 @@ static void sec_free_req_id(struct sec_req *req)
struct sec_qp_ctx *qp_ctx = req->qp_ctx;
int req_id = req->req_id;
- if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
dev_err(req->ctx->dev, "free request id invalid!\n");
return;
}
@@ -300,14 +308,15 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
/* Get DMA memory resources */
static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->c_ivin_dma, GFP_KERNEL);
if (!res->c_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
}
@@ -318,20 +327,21 @@ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->c_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->c_ivin, res->c_ivin_dma);
}
static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
&res->a_ivin_dma, GFP_KERNEL);
if (!res->a_ivin)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
}
@@ -342,20 +352,21 @@ static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->a_ivin)
- dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
res->a_ivin, res->a_ivin_dma);
}
static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
int i;
- res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
&res->out_mac_dma, GFP_KERNEL);
if (!res->out_mac)
return -ENOMEM;
- for (i = 1; i < QM_Q_DEPTH; i++) {
+ for (i = 1; i < q_depth; i++) {
res[i].out_mac_dma = res->out_mac_dma +
i * (SEC_MAX_MAC_LEN << 1);
res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
@@ -367,14 +378,14 @@ static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->out_mac)
- dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
+ dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
res->out_mac, res->out_mac_dma);
}
static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
if (res->pbuf)
- dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
res->pbuf, res->pbuf_dma);
}
@@ -384,10 +395,12 @@ static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
*/
static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
{
+ u16 q_depth = res->depth;
+ int size = SEC_PBUF_PAGE_NUM(q_depth);
int pbuf_page_offset;
int i, j, k;
- res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
&res->pbuf_dma, GFP_KERNEL);
if (!res->pbuf)
return -ENOMEM;
@@ -400,11 +413,11 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
* So we need SEC_PBUF_PAGE_NUM numbers of PAGE
* for the SEC_TOTAL_PBUF_SZ
*/
- for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ for (i = 0; i <= size; i++) {
pbuf_page_offset = PAGE_SIZE * i;
for (j = 0; j < SEC_PBUF_NUM; j++) {
k = i * SEC_PBUF_NUM + j;
- if (k == QM_Q_DEPTH)
+ if (k == q_depth)
break;
res[k].pbuf = res->pbuf +
j * SEC_PBUF_PKG + pbuf_page_offset;
@@ -470,36 +483,29 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
sec_free_mac_resource(dev, qp_ctx->res);
}
-static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
- int qp_ctx_id, int alg_type)
+static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
+ u16 q_depth = qp_ctx->qp->sq_depth;
struct device *dev = ctx->dev;
- struct sec_qp_ctx *qp_ctx;
- struct hisi_qp *qp;
int ret = -ENOMEM;
- qp_ctx = &ctx->qp_ctx[qp_ctx_id];
- qp = ctx->qps[qp_ctx_id];
- qp->req_type = 0;
- qp->qp_ctx = qp_ctx;
- qp_ctx->qp = qp;
- qp_ctx->ctx = ctx;
-
- qp->req_cb = sec_req_cb;
+ qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
+ if (!qp_ctx->req_list)
+ return ret;
- spin_lock_init(&qp_ctx->req_lock);
- idr_init(&qp_ctx->req_idr);
- INIT_LIST_HEAD(&qp_ctx->backlog);
+ qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
+ if (!qp_ctx->res)
+ goto err_free_req_list;
+ qp_ctx->res->depth = q_depth;
- qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
dev_err(dev, "fail to create sgl pool for input!\n");
- goto err_destroy_idr;
+ goto err_free_res;
}
- qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- SEC_SGL_SGE_NR);
+ qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
@@ -509,34 +515,72 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
if (ret)
goto err_free_c_out_pool;
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0)
- goto err_queue_free;
-
return 0;
-err_queue_free:
- sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_destroy_idr:
- idr_destroy(&qp_ctx->req_idr);
+err_free_res:
+ kfree(qp_ctx->res);
+err_free_req_list:
+ kfree(qp_ctx->req_list);
return ret;
}
-static void sec_release_qp_ctx(struct sec_ctx *ctx,
- struct sec_qp_ctx *qp_ctx)
+static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
{
struct device *dev = ctx->dev;
- hisi_qm_stop_qp(qp_ctx->qp);
sec_alg_resource_free(ctx, qp_ctx);
-
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
+ kfree(qp_ctx->res);
+ kfree(qp_ctx->req_list);
+}
+
+static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
+ int qp_ctx_id, int alg_type)
+{
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret;
+ qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
+ qp->req_type = 0;
+ qp->qp_ctx = qp_ctx;
+ qp_ctx->qp = qp;
+ qp_ctx->ctx = ctx;
+
+ qp->req_cb = sec_req_cb;
+
+ spin_lock_init(&qp_ctx->req_lock);
+ idr_init(&qp_ctx->req_idr);
+ INIT_LIST_HEAD(&qp_ctx->backlog);
+
+ ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx);
+ if (ret)
+ goto err_destroy_idr;
+
+ ret = hisi_qm_start_qp(qp, 0);
+ if (ret < 0)
+ goto err_resource_free;
+
+ return 0;
+
+err_resource_free:
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
+ return ret;
+}
+
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ hisi_qm_stop_qp(qp_ctx->qp);
+ sec_free_qp_ctx_resource(ctx, qp_ctx);
idr_destroy(&qp_ctx->req_idr);
}
@@ -559,7 +603,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
ctx->pbuf_supported = ctx->sec->iommu_used;
/* Half of queue depth is taken as fake requests limit in the queue. */
- ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
GFP_KERNEL);
if (!ctx->qp_ctx) {
@@ -1679,7 +1723,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
aead_req->out_mac,
authsize, a_req->cryptlen +
a_req->assoclen);
-
if (unlikely(sz != authsize)) {
dev_err(c->dev, "copy out mac err!\n");
err = -EINVAL;
@@ -1966,7 +2009,6 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
return sec_aead_ctx_init(tfm, "sha512");
}
-
static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
struct sec_req *sreq)
{
@@ -2126,67 +2168,80 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.min_keysize = sec_min_key_size,\
.max_keysize = sec_max_key_size,\
.ivsize = iv_size,\
-},
+}
#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-static struct skcipher_alg sec_skciphers[] = {
- SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, 0)
-
- SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
- DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-};
-
-static struct skcipher_alg sec_skciphers_v3[] = {
- SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
-
- SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
- SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
+static struct sec_skcipher sec_skciphers[] = {
+ {
+ .alg_msk = BIT(0),
+ .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(1),
+ .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(2),
+ .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(3),
+ .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(4),
+ .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(5),
+ .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
+ AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(12),
+ .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(13),
+ .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(14),
+ .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
+ SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(15),
+ .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(16),
+ .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
+ AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(23),
+ .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+ },
+ {
+ .alg_msk = BIT(24),
+ .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
+ SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
+ DES3_EDE_BLOCK_SIZE),
+ },
};
static int aead_iv_demension_check(struct aead_request *aead_req)
@@ -2380,90 +2435,135 @@ static int sec_aead_decrypt(struct aead_request *a_req)
.maxauthsize = max_authsize,\
}
-static struct aead_alg sec_aeads[] = {
- SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
- sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+static struct sec_aead sec_aeads[] = {
+ {
+ .alg_msk = BIT(6),
+ .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(7),
+ .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(17),
+ .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(18),
+ .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+ sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
+ AES_BLOCK_SIZE),
+ },
+ {
+ .alg_msk = BIT(43),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
+ sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(44),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
+ sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+ },
+ {
+ .alg_msk = BIT(45),
+ .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
+ sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ },
+};
- SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
- sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+static void sec_unregister_skcipher(u64 alg_mask, int end)
+{
+ int i;
- SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
- sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
- sec_aead_ctx_exit, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+ for (i = 0; i < end; i++)
+ if (sec_skciphers[i].alg_msk & alg_mask)
+ crypto_unregister_skcipher(&sec_skciphers[i].alg);
+}
- SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+static int sec_register_skcipher(u64 alg_mask)
+{
+ int i, ret, count;
- SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ count = ARRAY_SIZE(sec_skciphers);
-static struct aead_alg sec_aeads_v3[] = {
- SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+ for (i = 0; i < count; i++) {
+ if (!(sec_skciphers[i].alg_msk & alg_mask))
+ continue;
- SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
- sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
- SEC_AIV_SIZE, AES_BLOCK_SIZE)
-};
+ ret = crypto_register_skcipher(&sec_skciphers[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_skcipher(alg_mask, i);
+
+ return ret;
+}
+
+static void sec_unregister_aead(u64 alg_mask, int end)
+{
+ int i;
+
+ for (i = 0; i < end; i++)
+ if (sec_aeads[i].alg_msk & alg_mask)
+ crypto_unregister_aead(&sec_aeads[i].alg);
+}
+
+static int sec_register_aead(u64 alg_mask)
+{
+ int i, ret, count;
+
+ count = ARRAY_SIZE(sec_aeads);
+
+ for (i = 0; i < count; i++) {
+ if (!(sec_aeads[i].alg_msk & alg_mask))
+ continue;
+
+ ret = crypto_register_aead(&sec_aeads[i].alg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ sec_unregister_aead(alg_mask, i);
+
+ return ret;
+}
int sec_register_to_crypto(struct hisi_qm *qm)
{
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
int ret;
- /* To avoid repeat register */
- ret = crypto_register_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ ret = sec_register_skcipher(alg_mask);
if (ret)
return ret;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- if (ret)
- goto reg_skcipher_fail;
- }
-
- ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ ret = sec_register_aead(alg_mask);
if (ret)
- goto reg_aead_fail;
- if (qm->ver > QM_HW_V2) {
- ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
- if (ret)
- goto reg_aead_v3_fail;
- }
- return ret;
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
-reg_aead_v3_fail:
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
-reg_aead_fail:
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
-reg_skcipher_fail:
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
return ret;
}
void sec_unregister_from_crypto(struct hisi_qm *qm)
{
- if (qm->ver > QM_HW_V2)
- crypto_unregister_aeads(sec_aeads_v3,
- ARRAY_SIZE(sec_aeads_v3));
- crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+ u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW);
- if (qm->ver > QM_HW_V2)
- crypto_unregister_skciphers(sec_skciphers_v3,
- ARRAY_SIZE(sec_skciphers_v3));
- crypto_unregister_skciphers(sec_skciphers,
- ARRAY_SIZE(sec_skciphers));
+ sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 2c0be91c0b09..3705412bac5f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -27,7 +27,6 @@
#define SEC_BD_ERR_CHK_EN3 0xffffbfff
#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 256
#define SEC_PF_DEF_Q_BASE 0
#define SEC_CTX_Q_NUM_DEF 2
@@ -42,16 +41,11 @@
#define SEC_ECC_NUM 16
#define SEC_ECC_MASH 0xFF
#define SEC_CORE_INT_DISABLE 0x0
-#define SEC_CORE_INT_ENABLE 0x7c1ff
-#define SEC_CORE_INT_CLEAR 0x7c1ff
-#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
#define SEC_RAS_NFE_REG 0x301058
-#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
-#define SEC_RAS_NFE_ENB_MSK 0x7c177
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
#define SEC_MEM_START_INIT_REG 0x301100
@@ -119,6 +113,16 @@
#define SEC_DFX_COMMON1_LEN 0x45
#define SEC_DFX_COMMON2_LEN 0xBA
+#define SEC_ALG_BITMAP_SHIFT 32
+
+#define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \
+ GENMASK(24, 21))
+#define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \
+ GENMASK_ULL(42, 25))
+#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
+ GENMASK_ULL(45, 43))
+#define SEC_DEV_ALG_MAX_LEN 256
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -129,6 +133,11 @@ struct sec_dfx_item {
u32 offset;
};
+struct sec_dev_alg {
+ u64 alg_msk;
+ const char *algs;
+};
+
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
@@ -137,6 +146,46 @@ static struct hisi_qm_list sec_devices = {
.unregister_from_crypto = sec_unregister_from_crypto,
};
+static const struct hisi_qm_cap_info sec_basic_info[] = {
+ {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77},
+ {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77},
+ {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177},
+ {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177},
+ {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177},
+ {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088},
+ {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
+ {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
+ {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+ {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
+};
+
+static const struct sec_dev_alg sec_dev_algs[] = { {
+ .alg_msk = SEC_CIPHER_BITMAP,
+ .algs = "cipher\n",
+ }, {
+ .alg_msk = SEC_DIGEST_BITMAP,
+ .algs = "digest\n",
+ }, {
+ .alg_msk = SEC_AEAD_BITMAP,
+ .algs = "aead\n",
+ },
+};
+
static const struct sec_hw_error sec_hw_errors[] = {
{
.int_msk = BIT(0),
@@ -339,6 +388,16 @@ struct hisi_qp **sec_create_qps(void)
return NULL;
}
+u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low)
+{
+ u32 cap_val_h, cap_val_l;
+
+ cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver);
+ cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver);
+
+ return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l;
+}
+
static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
@@ -415,7 +474,7 @@ static void sec_open_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -435,7 +494,7 @@ static void sec_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
@@ -506,7 +565,8 @@ static int sec_engine_init(struct hisi_qm *qm)
writel(SEC_SINGLE_PORT_MAX_TRANS,
qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG);
+ reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver);
+ writel(reg, qm->io_base + SEC_SAA_EN_REG);
if (qm->ver < QM_HW_V3) {
/* HW V2 enable sm4 extra mode, as ctr/ecb */
@@ -576,7 +636,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = SEC_RAS_NFE_ENB_MSK;
+ val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -590,25 +651,30 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
+ u32 ce, nfe;
+
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
+ ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+
/* clear SEC hw error source if having */
- writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(ce, qm->io_base + SEC_RAS_CE_REG);
writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -939,7 +1005,11 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm)
static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
}
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
@@ -955,14 +1025,20 @@ static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = SEC_RAS_FE_ENB_MSK;
+ err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
}
static const struct hisi_qm_err_ini sec_err_ini = {
@@ -1001,11 +1077,41 @@ static int sec_pf_probe_init(struct sec_dev *sec)
return ret;
}
+static int sec_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u64 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW);
+
+ for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++)
+ if (alg_mask & sec_dev_algs[i].alg_msk)
+ strcat(algs, sec_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- qm->algs = "cipher\ndigest\naead";
qm->mode = uacce_mode;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
@@ -1028,7 +1134,19 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init sec qm configures!\n");
+ return ret;
+ }
+
+ ret = sec_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set sec algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void sec_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 3dfd3bac5a33..f2e6da3240ae 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -81,7 +81,8 @@ struct hisi_zip_sqe {
u32 rsvd1[4];
};
-int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node);
+int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
int hisi_zip_register_to_crypto(struct hisi_qm *qm);
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index ad35434a3fdb..6608971d10cd 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -39,6 +39,9 @@
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
+#define HZIP_ALG_ZLIB GENMASK(1, 0)
+#define HZIP_ALG_GZIP GENMASK(3, 2)
+
static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
@@ -123,19 +126,19 @@ static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
return -EINVAL;
- return param_set_int(val, kp);
+ return param_set_ushort(val, kp);
}
static const struct kernel_param_ops sgl_sge_nr_ops = {
.set = sgl_sge_nr_set,
- .get = param_get_int,
+ .get = param_get_ushort,
};
static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static u16 get_extra_field_size(const u8 *start)
+static u32 get_extra_field_size(const u8 *start)
{
return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
}
@@ -167,7 +170,7 @@ static u32 __get_gzip_head_size(const u8 *src)
return size;
}
-static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
+static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
{
char buf[HZIP_GZIP_HEAD_BUF];
@@ -183,7 +186,7 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
int ret;
ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (ret != head_size) {
+ if (unlikely(ret != head_size)) {
pr_err("the head size of buffer is wrong (%d)!\n", ret);
return -ENOMEM;
}
@@ -193,11 +196,11 @@ static int add_comp_head(struct scatterlist *dst, u8 req_type)
static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
{
- if (!acomp_req->src || !acomp_req->slen)
+ if (unlikely(!acomp_req->src || !acomp_req->slen))
return -EINVAL;
- if (req_type == HZIP_ALG_TYPE_GZIP &&
- acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)
+ if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
+ acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
return -EINVAL;
switch (req_type) {
@@ -230,6 +233,8 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
}
set_bit(req_id, req_q->req_bitmap);
+ write_unlock(&req_q->req_lock);
+
req_cache = q + req_id;
req_cache->req_id = req_id;
req_cache->req = req;
@@ -242,8 +247,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache->dskip = 0;
}
- write_unlock(&req_q->req_lock);
-
return req_cache;
}
@@ -254,7 +257,6 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
write_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
- memset(req, 0, sizeof(struct hisi_zip_req));
write_unlock(&req_q->req_lock);
}
@@ -339,7 +341,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
struct hisi_zip_sqe zip_sqe;
int ret;
- if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)
+ if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
return -EINVAL;
req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
@@ -365,7 +367,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
ret = hisi_qp_send(qp, &zip_sqe);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
dev_dbg_ratelimited(dev, "failed to send request!\n");
@@ -417,7 +419,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
atomic64_inc(&dfx->recv_cnt);
status = ops->get_status(sqe);
- if (status != 0 && status != HZIP_NC_ERR) {
+ if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
(qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
sqe->produced);
@@ -450,7 +452,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
/* let's output compression head now */
head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
head_size);
return head_size;
@@ -461,7 +463,7 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
}
@@ -478,7 +480,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
int head_size, ret;
head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (head_size < 0) {
+ if (unlikely(head_size < 0)) {
dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
head_size);
return head_size;
@@ -489,7 +491,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return PTR_ERR(req);
ret = hisi_zip_do_work(req, qp_ctx);
- if (ret != -EINPROGRESS) {
+ if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
ret);
hisi_zip_remove_req(qp_ctx, req);
@@ -498,7 +500,7 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
return ret;
}
-static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
int alg_type, int req_type)
{
struct device *dev = &qp->qm->pdev->dev;
@@ -506,7 +508,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
qp->req_type = req_type;
qp->alg_type = alg_type;
- qp->qp_ctx = ctx;
+ qp->qp_ctx = qp_ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
@@ -514,15 +516,15 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
return ret;
}
- ctx->qp = qp;
+ qp_ctx->qp = qp;
return 0;
}
-static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
+static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
{
- hisi_qm_stop_qp(ctx->qp);
- hisi_qm_free_qps(&ctx->qp, 1);
+ hisi_qm_stop_qp(qp_ctx->qp);
+ hisi_qm_free_qps(&qp_ctx->qp, 1);
}
static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
@@ -594,18 +596,19 @@ static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
{
int i;
- for (i = 1; i >= 0; i--)
+ for (i = 0; i < HZIP_CTX_Q_NUM; i++)
hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
}
static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_req_q *req_q;
int i, ret;
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
req_q = &ctx->qp_ctx[i].req_q;
- req_q->size = QM_Q_DEPTH;
+ req_q->size = q_depth;
req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
if (!req_q->req_bitmap) {
@@ -613,7 +616,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (i == 0)
return ret;
- goto err_free_loop0;
+ goto err_free_comp_q;
}
rwlock_init(&req_q->req_lock);
@@ -622,19 +625,19 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
if (!req_q->q) {
ret = -ENOMEM;
if (i == 0)
- goto err_free_bitmap;
+ goto err_free_comp_bitmap;
else
- goto err_free_loop1;
+ goto err_free_decomp_bitmap;
}
}
return 0;
-err_free_loop1:
+err_free_decomp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
-err_free_loop0:
+err_free_comp_q:
kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
-err_free_bitmap:
+err_free_comp_bitmap:
bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
return ret;
}
@@ -651,6 +654,7 @@ static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
{
+ u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
struct hisi_zip_qp_ctx *tmp;
struct device *dev;
int i;
@@ -658,7 +662,7 @@ static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
tmp = &ctx->qp_ctx[i];
dev = &tmp->qp->qm->pdev->dev;
- tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH << 1,
+ tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
sgl_sge_nr);
if (IS_ERR(tmp->sgl_pool)) {
if (i == 1)
@@ -755,6 +759,28 @@ static struct acomp_alg hisi_zip_acomp_zlib = {
}
};
+static int hisi_zip_register_zlib(struct hisi_qm *qm)
+{
+ int ret;
+
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return 0;
+
+ ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
+
+ return ret;
+}
+
+static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
+{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
+ return;
+
+ crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
static struct acomp_alg hisi_zip_acomp_gzip = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
@@ -769,27 +795,45 @@ static struct acomp_alg hisi_zip_acomp_gzip = {
}
};
-int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+static int hisi_zip_register_gzip(struct hisi_qm *qm)
{
int ret;
- ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
- if (ret) {
- pr_err("failed to register to zlib (%d)!\n", ret);
- return ret;
- }
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return 0;
ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
- if (ret) {
- pr_err("failed to register to gzip (%d)!\n", ret);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
- }
+ if (ret)
+ dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
return ret;
}
-void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
{
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ return;
+
crypto_unregister_acomp(&hisi_zip_acomp_gzip);
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
+}
+
+int hisi_zip_register_to_crypto(struct hisi_qm *qm)
+{
+ int ret = 0;
+
+ ret = hisi_zip_register_zlib(qm);
+ if (ret)
+ return ret;
+
+ ret = hisi_zip_register_gzip(qm);
+ if (ret)
+ hisi_zip_unregister_zlib(qm);
+
+ return ret;
+}
+
+void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
+{
+ hisi_zip_unregister_zlib(qm);
+ hisi_zip_unregister_gzip(qm);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index c3303d99acac..c863435e8c75 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -20,18 +20,6 @@
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_CLOCK_GATE_CTRL 0x301004
-#define COMP0_ENABLE BIT(0)
-#define COMP1_ENABLE BIT(1)
-#define DECOMP0_ENABLE BIT(2)
-#define DECOMP1_ENABLE BIT(3)
-#define DECOMP2_ENABLE BIT(4)
-#define DECOMP3_ENABLE BIT(5)
-#define DECOMP4_ENABLE BIT(6)
-#define DECOMP5_ENABLE BIT(7)
-#define HZIP_ALL_COMP_DECOMP_EN (COMP0_ENABLE | COMP1_ENABLE | \
- DECOMP0_ENABLE | DECOMP1_ENABLE | \
- DECOMP2_ENABLE | DECOMP3_ENABLE | \
- DECOMP4_ENABLE | DECOMP5_ENABLE)
#define HZIP_DECOMP_CHECK_ENABLE BIT(16)
#define HZIP_FSM_MAX_CNT 0x301008
@@ -69,20 +57,14 @@
#define HZIP_CORE_INT_STATUS_M_ECC BIT(1)
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
-#define HZIP_CORE_INT_RAS_CE_ENABLE 0x1
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
+#define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0
#define HZIP_OOO_SHUTDOWN_SEL 0x30120C
-#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
-#define HZIP_COMP_CORE_NUM 2
-#define HZIP_DECOMP_CORE_NUM 6
-#define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \
- HZIP_DECOMP_CORE_NUM)
#define HZIP_SQE_SIZE 128
-#define HZIP_SQ_SIZE (HZIP_SQE_SIZE * QM_Q_DEPTH)
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -92,6 +74,12 @@
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_WR_PORT BIT(11)
+#define HZIP_DEV_ALG_MAX_LEN 256
+#define HZIP_ALG_ZLIB_BIT GENMASK(1, 0)
+#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
+#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
#define HZIP_SQE_MASK_LEN 48
@@ -132,6 +120,26 @@ struct zip_dfx_item {
u32 offset;
};
+struct zip_dev_alg {
+ u32 alg_msk;
+ const char *algs;
+};
+
+static const struct zip_dev_alg zip_dev_algs[] = { {
+ .alg_msk = HZIP_ALG_ZLIB_BIT,
+ .algs = "zlib\n",
+ }, {
+ .alg_msk = HZIP_ALG_GZIP_BIT,
+ .algs = "gzip\n",
+ }, {
+ .alg_msk = HZIP_ALG_DEFLATE_BIT,
+ .algs = "deflate\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .algs = "lz77_zstd\n",
+ },
+};
+
static struct hisi_qm_list zip_devices = {
.register_to_crypto = hisi_zip_register_to_crypto,
.unregister_from_crypto = hisi_zip_unregister_from_crypto,
@@ -187,6 +195,58 @@ struct hisi_zip_ctrl {
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
+enum zip_cap_type {
+ ZIP_QM_NFE_MASK_CAP = 0x0,
+ ZIP_QM_RESET_MASK_CAP,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_QM_CE_MASK_CAP,
+ ZIP_NFE_MASK_CAP,
+ ZIP_RESET_MASK_CAP,
+ ZIP_OOO_SHUTDOWN_MASK_CAP,
+ ZIP_CE_MASK_CAP,
+ ZIP_CLUSTER_NUM_CAP,
+ ZIP_CORE_TYPE_NUM_CAP,
+ ZIP_CORE_NUM_CAP,
+ ZIP_CLUSTER_COMP_NUM_CAP,
+ ZIP_CLUSTER_DECOMP_NUM_CAP,
+ ZIP_DECOMP_ENABLE_BITMAP,
+ ZIP_COMP_ENABLE_BITMAP,
+ ZIP_DRV_ALG_BITMAP,
+ ZIP_DEV_ALG_BITMAP,
+ ZIP_CORE1_ALG_BITMAP,
+ ZIP_CORE2_ALG_BITMAP,
+ ZIP_CORE3_ALG_BITMAP,
+ ZIP_CORE4_ALG_BITMAP,
+ ZIP_CORE5_ALG_BITMAP,
+ ZIP_CAP_MAX
+};
+
+static struct hisi_qm_cap_info zip_basic_cap_info[] = {
+ {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77},
+ {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77},
+ {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77},
+ {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
+ {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE},
+ {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE},
+ {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE},
+ {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
+ {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1},
+ {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2},
+ {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5},
+ {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2},
+ {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
+ {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
+ {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
+ {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
+ {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+ {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
+ {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
+ {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0}
+};
+
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
@@ -343,12 +403,52 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num, int node)
return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps);
}
+bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
+{
+ u32 cap_val;
+
+ cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver);
+ if ((alg & cap_val) == alg)
+ return true;
+
+ return false;
+}
+
+static int hisi_zip_set_qm_algs(struct hisi_qm *qm)
+{
+ struct device *dev = &qm->pdev->dev;
+ char *algs, *ptr;
+ u32 alg_mask;
+ int i;
+
+ if (!qm->use_sva)
+ return 0;
+
+ algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
+ if (!algs)
+ return -ENOMEM;
+
+ alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver);
+
+ for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++)
+ if (alg_mask & zip_dev_algs[i].alg_msk)
+ strcat(algs, zip_dev_algs[i].algs);
+
+ ptr = strrchr(algs, '\n');
+ if (ptr)
+ *ptr = '\0';
+
+ qm->uacce->algs = algs;
+
+ return 0;
+}
+
static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
/* Enable prefetch */
@@ -368,7 +468,7 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
u32 val;
int ret;
- if (qm->ver < QM_HW_V3)
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
@@ -401,6 +501,7 @@ static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
void __iomem *base = qm->io_base;
+ u32 dcomp_bm, comp_bm;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -438,8 +539,11 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
}
/* let's open all compression/decompression cores */
- writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- base + HZIP_CLOCK_GATE_CTRL);
+ dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver);
+ comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_COMP_ENABLE_BITMAP, qm->cap_ver);
+ writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
@@ -458,7 +562,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = HZIP_CORE_INT_RAS_NFE_ENABLE;
+ val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -472,6 +577,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
qm->io_base + HZIP_CORE_INT_MASK_REG);
@@ -479,17 +586,17 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+
/* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(HZIP_CORE_INT_RAS_CE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* enable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
@@ -498,10 +605,13 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
+ u32 nfe, ce;
+
/* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
- /* disable ZIP block master OOO when nfe occurs on Kunpeng930 */
hisi_zip_master_ooo_ctrl(qm, false);
}
@@ -586,8 +696,9 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
return len;
tbuf[len] = '\0';
- if (kstrtoul(tbuf, 0, &val))
- return -EFAULT;
+ ret = kstrtoul(tbuf, 0, &val);
+ if (ret)
+ return ret;
ret = hisi_qm_get_dfx_access(qm);
if (ret)
@@ -651,18 +762,23 @@ DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
+ u32 zip_core_num, zip_comp_core_num;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d;
char buf[HZIP_BUF_SIZE];
int i;
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "comp_core%d", i);
else
scnprintf(buf, sizeof(buf), "decomp_core%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -675,7 +791,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
debugfs_create_file("regs", 0444, tmp_d, regset,
- &hisi_zip_regs_fops);
+ &hisi_zip_regs_fops);
}
return 0;
@@ -795,10 +911,13 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
struct qm_debug *debug = &qm->debug;
void __iomem *io_base;
+ u32 zip_core_num;
int i, j, idx;
- debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM +
- com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+
+ debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num,
+ sizeof(unsigned int), GFP_KERNEL);
if (!debug->last_words)
return -ENOMEM;
@@ -807,7 +926,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
debug->last_words[i] = readl_relaxed(io_base);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
+ for (i = 0; i < zip_core_num; i++) {
io_base = qm->io_base + core_offsets[i];
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
@@ -834,6 +953,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
{
int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs);
int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs);
+ u32 zip_core_num, zip_comp_core_num;
struct qm_debug *debug = &qm->debug;
char buf[HZIP_BUF_SIZE];
void __iomem *base;
@@ -847,15 +967,18 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset);
if (debug->last_words[i] != val)
pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n",
- hzip_com_dfx_regs[i].name, debug->last_words[i], val);
+ hzip_com_dfx_regs[i].name, debug->last_words[i], val);
}
- for (i = 0; i < HZIP_CORE_NUM; i++) {
- if (i < HZIP_COMP_CORE_NUM)
+ zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver);
+ zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP,
+ qm->cap_ver);
+ for (i = 0; i < zip_core_num; i++) {
+ if (i < zip_comp_core_num)
scnprintf(buf, sizeof(buf), "Comp_core-%d", i);
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
- i - HZIP_COMP_CORE_NUM);
+ i - zip_comp_core_num);
base = qm->io_base + core_offsets[i];
pci_info(qm->pdev, "==>%s:\n", buf);
@@ -865,7 +988,8 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset);
if (debug->last_words[idx] != val)
pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n",
- hzip_dump_dfx_regs[j].name, debug->last_words[idx], val);
+ hzip_dump_dfx_regs[j].name,
+ debug->last_words[idx], val);
}
}
}
@@ -900,7 +1024,11 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
+ u32 nfe;
+
writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
@@ -934,16 +1062,21 @@ static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
- err_info->ce = QM_BASE_CE;
- err_info->fe = 0;
+ err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE;
+ err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
- err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
-
- if (qm->ver >= QM_HW_V3)
- err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hisi_zip_err_ini = {
@@ -976,7 +1109,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
qm->err_ini = &hisi_zip_err_ini;
qm->err_ini->err_info_init(qm);
- hisi_zip_set_user_domain_and_cache(qm);
+ ret = hisi_zip_set_user_domain_and_cache(qm);
+ if (ret)
+ return ret;
+
hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -990,12 +1126,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
+ int ret;
+
qm->pdev = pdev;
qm->ver = pdev->revision;
- if (pdev->revision >= QM_HW_V3)
- qm->algs = "zlib\ngzip\ndeflate\nlz77_zstd";
- else
- qm->algs = "zlib\ngzip";
qm->mode = uacce_mode;
qm->sqe_size = HZIP_SQE_SIZE;
qm->dev_name = hisi_zip_name;
@@ -1019,7 +1153,19 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
}
- return hisi_qm_init(qm);
+ ret = hisi_qm_init(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to init zip qm configures!\n");
+ return ret;
+ }
+
+ ret = hisi_zip_set_qm_algs(qm);
+ if (ret) {
+ pci_err(qm->pdev, "Failed to set zip algs!\n");
+ hisi_qm_uninit(qm);
+ }
+
+ return ret;
}
static void hisi_zip_qm_uninit(struct hisi_qm *qm)
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d68ef16650d4..32a37e3850c5 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -63,7 +63,6 @@ struct safexcel_cipher_ctx {
u32 hash_alg;
u32 state_sz;
- struct crypto_cipher *hkaes;
struct crypto_aead *fback;
};
@@ -642,10 +641,16 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
safexcel_complete(priv, ring);
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
/*
@@ -737,23 +742,29 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
max(totlen_src, totlen_dst));
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
totlen_src);
return -EINVAL;
}
- dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+
+ if (sreq->nr_src > 0)
+ dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
totlen_dst);
- dma_unmap_sg(priv->dev, src, sreq->nr_src,
- DMA_TO_DEVICE);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unmap;
}
- dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+
+ if (sreq->nr_dst > 0)
+ dma_map_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
@@ -883,12 +894,18 @@ rdesc_rollback:
cdesc_rollback:
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
-
+unmap:
if (src == dst) {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_BIDIRECTIONAL);
} else {
- dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
- dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
+ if (sreq->nr_src > 0)
+ dma_unmap_sg(priv->dev, src, sreq->nr_src,
+ DMA_TO_DEVICE);
+ if (sreq->nr_dst > 0)
+ dma_unmap_sg(priv->dev, dst, sreq->nr_dst,
+ DMA_FROM_DEVICE);
}
return ret;
@@ -2589,15 +2606,8 @@ static int safexcel_aead_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
ctx->key_len = len;
/* Compute hash key by encrypting zeroes with cipher key */
- crypto_cipher_clear_flags(ctx->hkaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->hkaes, key, len);
- if (ret)
- return ret;
-
memset(hashkey, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->hkaes, (u8 *)hashkey, (u8 *)hashkey);
+ aes_encrypt(&aes, (u8 *)hashkey, (u8 *)hashkey);
if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
@@ -2626,15 +2636,11 @@ static int safexcel_aead_gcm_cra_init(struct crypto_tfm *tfm)
ctx->xcm = EIP197_XCM_MODE_GCM;
ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
- ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->hkaes);
+ return 0;
}
static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(ctx->hkaes);
safexcel_aead_cra_exit(tfm);
}
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index bc60b5802256..103fc551d2af 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -30,7 +30,7 @@ struct safexcel_ahash_ctx {
bool fb_init_done;
bool fb_do_setkey;
- struct crypto_cipher *kaes;
+ struct crypto_aes_ctx *aes;
struct crypto_ahash *fback;
struct crypto_shash *shpre;
struct shash_desc *shdesc;
@@ -383,7 +383,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
u32 x;
x = ipad[i] ^ ipad[i + 4];
- cache[i] ^= swab(x);
+ cache[i] ^= swab32(x);
}
}
cache_len = AES_BLOCK_SIZE;
@@ -821,10 +821,10 @@ static int safexcel_ahash_final(struct ahash_request *areq)
u32 *result = (void *)areq->result;
/* K3 */
- result[i] = swab(ctx->base.ipad.word[i + 4]);
+ result[i] = swab32(ctx->base.ipad.word[i + 4]);
}
areq->result[0] ^= 0x80; // 10- padding
- crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
+ aes_encrypt(ctx->aes, areq->result, areq->result);
return 0;
} else if (unlikely(req->hmac &&
(req->len == req->block_sz) &&
@@ -2083,37 +2083,26 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
/* precompute the XCBC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
-
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
- "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
- "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp,
+ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
+ aes_encrypt(ctx->aes, (u8 *)key_tmp + AES_BLOCK_SIZE,
+ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
- ctx->base.ipad.word[i] = swab(key_tmp[i]);
-
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes,
- (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
- AES_MIN_KEY_SIZE);
+ ctx->base.ipad.word[i] = swab32(key_tmp[i]);
+
+ ret = aes_expandkey(ctx->aes,
+ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
+ AES_MIN_KEY_SIZE);
if (ret)
return ret;
@@ -2121,7 +2110,6 @@ static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
@@ -2130,15 +2118,15 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
safexcel_ahash_cra_init(tfm);
- ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
- return PTR_ERR_OR_ZERO(ctx->kaes);
+ ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL);
+ return PTR_ERR_OR_ZERO(ctx->aes);
}
static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->kaes);
+ kfree(ctx->aes);
safexcel_ahash_cra_exit(tfm);
}
@@ -2178,31 +2166,23 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct crypto_aes_ctx aes;
__be64 consts[4];
u64 _const[2];
u8 msb_mask, gfmask;
int ret, i;
- ret = aes_expandkey(&aes, key, len);
+ /* precompute the CMAC key material */
+ ret = aes_expandkey(ctx->aes, key, len);
if (ret)
return ret;
for (i = 0; i < len / sizeof(u32); i++)
- ctx->base.ipad.word[i + 8] = swab(aes.key_enc[i]);
-
- /* precompute the CMAC key material */
- crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
- crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- ret = crypto_cipher_setkey(ctx->kaes, key, len);
- if (ret)
- return ret;
+ ctx->base.ipad.word[i + 8] = swab32(ctx->aes->key_enc[i]);
/* code below borrowed from crypto/cmac.c */
/* encrypt the zero block */
memset(consts, 0, AES_BLOCK_SIZE);
- crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
+ aes_encrypt(ctx->aes, (u8 *)consts, (u8 *)consts);
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
@@ -2234,7 +2214,6 @@ static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
}
ctx->cbcmac = false;
- memzero_explicit(&aes, sizeof(aes));
return 0;
}
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 7942b48dd55a..1cd62f9c3e3a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -42,7 +42,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
config CRYPTO_DEV_KEEMBAY_OCS_ECC
tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select CRYPTO_ECDH
select CRYPTO_ENGINE
@@ -64,7 +64,7 @@ config CRYPTO_DEV_KEEMBAY_OCS_HCU
select CRYPTO_ENGINE
depends on HAS_IOMEM
depends on ARCH_KEEMBAY || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) Hash
Control Unit (HCU) hardware acceleration for use with Crypto API.
diff --git a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
index b8bdb9f134f3..205eacac4a34 100644
--- a/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx/otx_cpt_hw_types.h
@@ -403,7 +403,7 @@ union otx_cptx_pf_exe_bist_status {
* big-endian format in memory.
* iqb_ldwb:1 [7:7](R/W) Instruction load don't write back.
* 0 = The hardware issues NCB transient load (LDT) towards the cache,
- * which if the line hits and is is dirty will cause the line to be
+ * which if the line hits and is dirty will cause the line to be
* written back before being replaced.
* 1 = The hardware issues NCB LDWB read-and-invalidate command towards
* the cache when fetching the last word of instructions; as a result the
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 40b482198ebc..df9c2b8747e6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -97,7 +97,7 @@ static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
static void set_ucode_filename(struct otx_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -138,7 +138,7 @@ static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
u32 i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
@@ -286,6 +286,7 @@ static int process_tar_file(struct device *dev,
struct tar_ucode_info_t *tar_info;
struct otx_cpt_ucode_hdr *ucode_hdr;
int ucode_type, ucode_size;
+ unsigned int code_length;
/*
* If size is less than microcode header size then don't report
@@ -303,7 +304,13 @@ static int process_tar_file(struct device *dev,
if (get_ucode_type(ucode_hdr, &ucode_type))
return 0;
- ucode_size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Invalid code_length %u\n", code_length);
+ return -EINVAL;
+ }
+
+ ucode_size = code_length * 2;
if (!ucode_size || (size < round_up(ucode_size, 16) +
sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", filename);
@@ -886,6 +893,7 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
{
struct otx_cpt_ucode_hdr *ucode_hdr;
const struct firmware *fw;
+ unsigned int code_length;
int ret;
set_ucode_filename(ucode, ucode_filename);
@@ -896,7 +904,13 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
ucode->ver_num = ucode_hdr->ver_num;
- ucode->size = ntohl(ucode_hdr->code_length) * 2;
+ code_length = ntohl(ucode_hdr->code_length);
+ if (code_length >= INT_MAX / 2) {
+ dev_err(dev, "Ucode invalid code_length %u\n", code_length);
+ ret = -EINVAL;
+ goto release_fw;
+ }
+ ucode->size = code_length * 2;
if (!ucode->size || (fw->size < round_up(ucode->size, 16)
+ sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
@@ -1328,7 +1342,7 @@ static ssize_t ucode_load_store(struct device *dev,
eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
err_msg = "Invalid engine group format";
- strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
+ strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
index 36d72e35ebeb..88a41d1ca5f6 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c
@@ -661,7 +661,7 @@ static ssize_t vf_type_show(struct device *dev,
msg = "Invalid";
}
- return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
+ return sysfs_emit(buf, "%s\n", msg);
}
static ssize_t vf_engine_group_show(struct device *dev,
@@ -670,7 +670,7 @@ static ssize_t vf_engine_group_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
+ return sysfs_emit(buf, "%d\n", cptvf->vfgrp);
}
static ssize_t vf_engine_group_store(struct device *dev,
@@ -706,7 +706,7 @@ static ssize_t vf_coalesc_time_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_timewait(cptvf));
}
@@ -716,7 +716,7 @@ static ssize_t vf_coalesc_num_wait_show(struct device *dev,
{
struct otx_cptvf *cptvf = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
cptvf_read_vq_done_numwait(cptvf));
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
index 5663787c7a62..90fdafb7c468 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_mbox.c
@@ -159,12 +159,10 @@ static int cptvf_send_msg_to_pf_timeout(struct otx_cptvf *cptvf,
int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_READY;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -174,13 +172,11 @@ int otx_cptvf_check_pf_ready(struct otx_cptvf *cptvf)
int otx_cptvf_send_vq_size_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_QLEN;
mbx.data = cptvf->qsize;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -208,14 +204,12 @@ int otx_cptvf_send_vf_to_grp_msg(struct otx_cptvf *cptvf, int group)
int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VQ_PRIORITY;
/* Convey group of the VF */
mbx.data = cptvf->priority;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -224,12 +218,10 @@ int otx_cptvf_send_vf_priority_msg(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_UP;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
/*
@@ -238,10 +230,8 @@ int otx_cptvf_send_vf_up(struct otx_cptvf *cptvf)
int otx_cptvf_send_vf_down(struct otx_cptvf *cptvf)
{
struct otx_cpt_mbox mbx = {};
- int ret;
mbx.msg = OTX_CPT_MSG_VF_DOWN;
- ret = cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
- return ret;
+ return cptvf_send_msg_to_pf_timeout(cptvf, &mbx);
}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index f10050fead16..1577986677f6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -68,7 +68,7 @@ static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
const char *filename)
{
- strlcpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
+ strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
}
static char *get_eng_type_str(int eng_type)
@@ -126,7 +126,7 @@ static int get_ucode_type(struct device *dev,
int i, val = 0;
u8 nn;
- strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
+ strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 02cb9e44afd8..75c403f2b1d9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -191,7 +191,6 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
struct pci_dev *pdev = cptvf->pdev;
struct mbox_msghdr *req;
- int ret;
req = (struct mbox_msghdr *)
otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
@@ -204,7 +203,5 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
req->sig = OTX2_MBOX_REQ_SIG;
req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
- ret = otx2_cpt_send_mbox_msg(mbox, pdev);
-
- return ret;
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 3b0bf6fea491..31e24df18877 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1494,7 +1494,7 @@ static void n2_unregister_algs(void)
*
* So we have to back-translate, going through the 'intr' and 'ino'
* property tables of the n2cp MDESC node, matching it with the OF
- * 'interrupts' property entries, in order to to figure out which
+ * 'interrupts' property entries, in order to figure out which
* devino goes to which already-translated IRQ.
*/
static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 3793885f928d..c843f4c6f684 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -134,7 +134,6 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
unsigned int cryptlen, u8 *b0)
{
unsigned int l, lp, m = authsize;
- int rc;
memcpy(b0, iv, 16);
@@ -148,9 +147,7 @@ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
if (assoclen)
*b0 |= 64;
- rc = set_msg_len(b0 + 16 - l, cryptlen, l);
-
- return rc;
+ return set_msg_len(b0 + 16 - l, cryptlen, l);
}
static int generate_pat(u8 *iv,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index e61b3e13db3b..1931e5b37f2b 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -251,13 +251,13 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
return -ENOMEM;
INIT_LIST_HEAD(&key_val->list);
- strlcpy(key_val->key, key, sizeof(key_val->key));
+ strscpy(key_val->key, key, sizeof(key_val->key));
if (type == ADF_DEC) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%ld", (*((long *)val)));
} else if (type == ADF_STR) {
- strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+ strscpy(key_val->val, (char *)val, sizeof(key_val->val));
} else if (type == ADF_HEX) {
snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"0x%lx", (unsigned long)val);
@@ -315,7 +315,7 @@ int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
if (!sec)
return -ENOMEM;
- strlcpy(sec->name, name, sizeof(sec->name));
+ strscpy(sec->name, name, sizeof(sec->name));
INIT_LIST_HEAD(&sec->param_head);
down_write(&cfg->lock);
list_add_tail(&sec->list, &cfg->sec_list);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e8ac932bbaab..82b69e1f725b 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -16,6 +16,9 @@
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
+#define ADF_CFG_MAX_SECTION 512
+#define ADF_CFG_MAX_KEY_VAL 256
+
#define DEVICE_NAME "qat_adf_ctl"
static DEFINE_MUTEX(adf_ctl_lock);
@@ -137,10 +140,11 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_key_val *params_head;
struct adf_user_cfg_section section, *section_head;
+ int i, j;
section_head = ctl_data->config_section;
- while (section_head) {
+ for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
if (copy_from_user(&section, (void __user *)section_head,
sizeof(*section_head))) {
dev_err(&GET_DEV(accel_dev),
@@ -156,7 +160,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
params_head = section.params;
- while (params_head) {
+ for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
if (copy_from_user(&key_val, (void __user *)params_head,
sizeof(key_val))) {
dev_err(&GET_DEV(accel_dev),
@@ -363,7 +367,7 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
dev_info.num_logical_accel = hw_data->num_logical_accel;
dev_info.banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
- strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+ strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
dev_info.instance_id = hw_data->instance_id;
dev_info.type = hw_data->dev_class->type;
dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
index 43b8f864806b..4fb4b3df5a18 100644
--- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h
@@ -107,7 +107,7 @@ do { \
* Timeout is in cycles. Clock speed may vary across products but this
* value should be a few milli-seconds.
*/
-#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000
+#define ADF_SSM_WDT_DEFAULT_VALUE 0x7000000ULL
#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000
#define ADF_SSMWDTL_OFFSET 0x54
#define ADF_SSMWDTH_OFFSET 0x5C
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
index e69e5907f595..08bca1c506c0 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
+++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
@@ -96,7 +96,7 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
if (!ring_debug)
return -ENOMEM;
- strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+ strscpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
snprintf(entry_name, sizeof(entry_name), "ring_%02d",
ring->ring_number);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index 4b36869bf460..69482abdb8b9 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -86,7 +86,8 @@
ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \
ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \
ICP_QAT_CSS_SIGNATURE_LEN(handle))
-#define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN 0x40000
+#define ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN 0x30000
#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index fb45fa83841c..cad9c58caab1 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -673,11 +673,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
size_t sz_out = qat_req->buf.sz_out;
+ int bl_dma_dir;
int i;
+ bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_BIDIRECTIONAL);
+ bl->bufers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -691,7 +694,7 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -716,6 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n);
int node = dev_to_node(&GET_DEV(inst->accel_dev));
+ int bufl_dma_dir;
if (unlikely(!n))
return -EINVAL;
@@ -733,6 +737,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sgl_src_valid = true;
}
+ bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
@@ -744,7 +750,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err_in;
@@ -787,7 +793,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err_out;
bufers[y].len = sg->length;
@@ -817,7 +823,7 @@ err_out:
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr,
buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (!qat_req->buf.sgl_dst_valid)
kfree(buflout);
@@ -831,7 +837,7 @@ err_in:
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
- DMA_BIDIRECTIONAL);
+ bufl_dma_dir);
if (!qat_req->buf.sgl_src_valid)
kfree(bufl);
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 095ed2a404d2..94a26702aeae 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,14 +332,14 @@ static int qat_dh_compute_value(struct kpp_request *req)
qat_req->in.dh.in_tab[n_input_params] = 0;
qat_req->out.dh.out_tab[1] = 0;
/* Mapping in.in.b or in.in_g2.xa is the same */
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
- sizeof(qat_req->in.dh.in.b),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
+ sizeof(struct qat_dh_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
- sizeof(qat_req->out.dh.r),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
+ sizeof(struct qat_dh_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -729,14 +729,14 @@ static int qat_rsa_enc(struct akcipher_request *req)
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
- sizeof(qat_req->in.rsa.enc.m),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
- sizeof(qat_req->out.rsa.enc.c),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
@@ -875,14 +875,14 @@ static int qat_rsa_dec(struct akcipher_request *req)
else
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
- sizeof(qat_req->in.rsa.dec.c),
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
+ sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
- sizeof(qat_req->out.rsa.dec.m),
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
+ sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
goto unmap_in_params;
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 0fe5a474aa45..b7f7869ef8b2 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -1367,6 +1367,48 @@ static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
}
}
+static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
+ char *image, unsigned int size,
+ unsigned int fw_type)
+{
+ char *fw_type_name = fw_type ? "MMP" : "AE";
+ unsigned int css_dword_size = sizeof(u32);
+
+ if (handle->chip_info->fw_auth) {
+ struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
+ unsigned int header_len = ICP_QAT_AE_IMG_OFFSET(handle);
+
+ if ((css_hdr->header_len * css_dword_size) != header_len)
+ goto err;
+ if ((css_hdr->size * css_dword_size) != size)
+ goto err;
+ if (fw_type != css_hdr->fw_type)
+ goto err;
+ if (size <= header_len)
+ goto err;
+ size -= header_len;
+ }
+
+ if (fw_type == CSS_AE_FIRMWARE) {
+ if (size < sizeof(struct icp_qat_simg_ae_mode *) +
+ ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
+ goto err;
+ if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
+ goto err;
+ } else if (fw_type == CSS_MMP_FIRMWARE) {
+ if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
+ goto err;
+ } else {
+ pr_err("QAT: Unsupported firmware type\n");
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ pr_err("QAT: Invalid %s firmware image\n", fw_type_name);
+ return -EINVAL;
+}
+
static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
char *image, unsigned int size,
struct icp_qat_fw_auth_desc **desc)
@@ -1379,7 +1421,7 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_simg_ae_mode *simg_ae_mode;
struct icp_firml_dram_desc img_desc;
- if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
+ if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return -EINVAL;
}
@@ -1547,6 +1589,11 @@ int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
{
struct icp_qat_fw_auth_desc *desc = NULL;
int status = 0;
+ int ret;
+
+ ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
+ if (ret)
+ return ret;
if (handle->chip_info->fw_auth) {
status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
@@ -2018,8 +2065,15 @@ static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
struct icp_qat_fw_auth_desc *desc = NULL;
struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
+ int ret;
for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
+ ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
+ simg_hdr[i].simg_len,
+ CSS_AE_FIRMWARE);
+ if (ret)
+ return ret;
+
if (qat_uclo_map_auth_fw(handle,
(char *)simg_hdr[i].simg_buf,
(unsigned int)
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 97a530171f07..6eb4d2e35629 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -450,8 +450,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
if (ret)
return ret;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 59159f5e64e5..37bafd7aeb79 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -97,14 +97,16 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
}
ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto error_unmap_src;
+ }
ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
&rctx->result_sg, 1, qce_ahash_done, async_req);
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 3d27cd5210ef..5b493fdc1e74 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -124,15 +124,15 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
rctx->dst_sg = rctx->dst_tbl.sgl;
dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (dst_nents < 0) {
- ret = dst_nents;
+ if (!dst_nents) {
+ ret = -EIO;
goto error_free;
}
if (diff_dst) {
src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (src_nents < 0) {
- ret = src_nents;
+ if (!src_nents) {
+ ret = -EIO;
goto error_unmap_dst;
}
rctx->src_sg = req->src;
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 031b5f701a0a..72dd1a4ebac4 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -9,6 +9,7 @@
#include <linux/crypto.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -201,15 +202,13 @@ static int qcom_rng_remove(struct platform_device *pdev)
return 0;
}
-#if IS_ENABLED(CONFIG_ACPI)
-static const struct acpi_device_id qcom_rng_acpi_match[] = {
+static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
-#endif
-static const struct of_device_id qcom_rng_of_match[] = {
+static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
{ .compatible = "qcom,prng", .data = (void *)0},
{ .compatible = "qcom,prng-ee", .data = (void *)1},
{}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 457084b344c1..7ab20fb95166 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -26,10 +26,10 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#define SHA_BUFFER_LEN PAGE_SIZE
#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -196,7 +196,7 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- struct mutex queue_mutex;
+ spinlock_t queue_spinlock;
struct task_struct *kthread;
struct completion dma_completion;
@@ -487,13 +487,13 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_TO_DEVICE);
- if (ret != dev->nb_in_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map in sg\n");
goto unmap_in;
}
ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
DMA_FROM_DEVICE);
- if (ret != dev->nb_out_sg) {
+ if (!ret) {
dev_err(dev->device, "couldn't map out sg\n");
goto unmap_out;
}
@@ -642,9 +642,9 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
rctx->mode = mode;
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
err = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1043,10 +1043,10 @@ static int sahara_queue_manage(void *data)
do {
__set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -1092,9 +1092,9 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
rctx->first = 1;
}
- mutex_lock(&dev->queue_mutex);
+ spin_lock_bh(&dev->queue_spinlock);
ret = crypto_enqueue_request(&dev->queue, &req->base);
- mutex_unlock(&dev->queue_mutex);
+ spin_unlock_bh(&dev->queue_spinlock);
wake_up_process(dev->kthread);
@@ -1449,7 +1449,7 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- mutex_init(&dev->queue_mutex);
+ spin_lock_init(&dev->queue_spinlock);
dev_ptr = dev;
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2a60d0525cde..168195672e2e 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -56,6 +56,10 @@ static void virtio_crypto_akcipher_finalize_req(
struct virtio_crypto_akcipher_request *vc_akcipher_req,
struct akcipher_request *req, int err)
{
+ kfree(vc_akcipher_req->src_buf);
+ kfree(vc_akcipher_req->dst_buf);
+ vc_akcipher_req->src_buf = NULL;
+ vc_akcipher_req->dst_buf = NULL;
virtcrypto_clear_request(&vc_akcipher_req->base);
crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err);
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index cb6401c9e9a4..acf31cc1dbcc 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -15,6 +15,7 @@ void hmem_register_device(int target_nid, struct resource *r)
.start = r->start,
.end = r->end,
.flags = IORESOURCE_MEM,
+ .desc = IORES_DESC_SOFT_RESERVED,
};
struct platform_device *pdev;
struct memregion_info info;
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 9a88faaf8b27..39ac069cabc7 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -189,10 +189,9 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
data->clk = devm_clk_get(dev, "pclk_ddr_mon");
- if (IS_ERR(data->clk)) {
- dev_err(dev, "Cannot get the clk dmc_clk\n");
- return PTR_ERR(data->clk);
- }
+ if (IS_ERR(data->clk))
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "Cannot get the clk pclk_ddr_mon\n");
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 71abb3fbd042..e5458ada5197 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -291,9 +291,13 @@ static int mtk_ccifreq_probe(struct platform_device *pdev)
}
drv->sram_reg = devm_regulator_get_optional(dev, "sram");
- if (IS_ERR(drv->sram_reg))
+ if (IS_ERR(drv->sram_reg)) {
+ ret = PTR_ERR(drv->sram_reg);
+ if (ret == -EPROBE_DEFER)
+ goto out_free_resources;
+
drv->sram_reg = NULL;
- else {
+ } else {
ret = regulator_enable(drv->sram_reg);
if (ret) {
dev_err(dev, "failed to enable sram regulator\n");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index efb4990b29e1..dd0f83ee505b 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -53,7 +53,7 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
spin_unlock(&dmabuf->name_lock);
- return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
+ return dynamic_dname(buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
}
@@ -531,11 +531,11 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
* value.
*/
inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ flags &= O_ACCMODE | O_NONBLOCK;
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
- file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = dmabuf;
file->f_path.dentry->d_fsdata = dmabuf;
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 066400ed8841..406b4e26f538 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -136,6 +136,10 @@ struct dma_fence *dma_fence_get_stub(void)
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
dma_fence_signal_locked(&dma_fence_stub);
}
spin_unlock(&dma_fence_stub_lock);
@@ -161,6 +165,10 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
&dma_fence_stub_ops,
&dma_fence_stub_lock,
0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
dma_fence_signal(fence);
return fence;
@@ -500,6 +508,8 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
__dma_fence_might_wait();
+ dma_fence_enable_sw_signaling(fence);
+
trace_dma_fence_wait_start(fence);
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
@@ -601,9 +611,6 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
{
unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return;
-
spin_lock_irqsave(fence->lock, flags);
__dma_fence_enable_signaling(fence);
spin_unlock_irqrestore(fence->lock, flags);
@@ -756,19 +763,16 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
unsigned long flags;
signed long ret = timeout ? timeout : 1;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return ret;
-
spin_lock_irqsave(fence->lock, flags);
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ goto out;
+
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
goto out;
}
- if (!__dma_fence_enable_signaling(fence))
- goto out;
-
if (!timeout) {
ret = 0;
goto out;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 205acb2c744d..e3885c90a3ac 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
- if ((old->context == fence->context && old_usage >= usage) ||
+ if ((old->context == fence->context && old_usage >= usage &&
+ dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 8ce1ea59d31b..0a9b099d0518 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -87,6 +87,8 @@ static int sanitycheck(void *arg)
if (!chain)
err = -ENOMEM;
+ dma_fence_enable_sw_signaling(chain);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -143,6 +145,8 @@ static int fence_chains_init(struct fence_chains *fc, unsigned int count,
}
fc->tail = fc->chains[i];
+
+ dma_fence_enable_sw_signaling(fc->chains[i]);
}
fc->chain_length = i;
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index 4105d5ea8dde..f0cee984b6c7 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -102,6 +102,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
array = mock_array(1, f);
if (!array)
return -ENOMEM;
@@ -124,12 +126,16 @@ static int unwrap_array(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
array = mock_array(2, f1, f2);
if (!array)
return -ENOMEM;
@@ -164,12 +170,16 @@ static int unwrap_chain(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
chain = mock_chain(f1, f2);
if (!chain)
return -ENOMEM;
@@ -204,12 +214,16 @@ static int unwrap_chain_array(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
dma_fence_put(f1);
return -ENOMEM;
}
+ dma_fence_enable_sw_signaling(f2);
+
array = mock_array(2, f1, f2);
if (!array)
return -ENOMEM;
@@ -248,12 +262,16 @@ static int unwrap_merge(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2) {
err = -ENOMEM;
goto error_put_f1;
}
+ dma_fence_enable_sw_signaling(f2);
+
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3) {
err = -ENOMEM;
@@ -296,10 +314,14 @@ static int unwrap_merge_complex(void *arg)
if (!f1)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f1);
+
f2 = mock_fence();
if (!f2)
goto error_put_f1;
+ dma_fence_enable_sw_signaling(f2);
+
f3 = dma_fence_unwrap_merge(f1, f2);
if (!f3)
goto error_put_f2;
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index c8a12d7ad71a..fb6e0a6ae2c9 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -102,6 +102,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -117,6 +119,8 @@ static int test_signaling(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_is_signaled(f)) {
pr_err("Fence unexpectedly signaled on creation\n");
goto err_free;
@@ -190,6 +194,8 @@ static int test_late_add_callback(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
@@ -282,6 +288,8 @@ static int test_status(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_get_status(f)) {
pr_err("Fence unexpectedly has signaled status on creation\n");
goto err_free;
@@ -308,6 +316,8 @@ static int test_error(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_set_error(f, -EIO);
if (dma_fence_get_status(f)) {
@@ -337,6 +347,8 @@ static int test_wait(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
pr_err("Wait reported complete before being signaled\n");
goto err_free;
@@ -379,6 +391,8 @@ static int test_wait_timeout(void *arg)
if (!wt.f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(wt.f);
+
if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
pr_err("Wait reported complete before being signaled\n");
goto err_free;
@@ -458,6 +472,8 @@ static int thread_signal_callback(void *arg)
break;
}
+ dma_fence_enable_sw_signaling(f1);
+
rcu_assign_pointer(t->fences[t->id], f1);
smp_wmb();
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index 813779e3c9be..15dbea1462ed 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -45,6 +45,8 @@ static int sanitycheck(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_fence_signal(f);
dma_fence_put(f);
@@ -69,6 +71,8 @@ static int test_signaling(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -114,6 +118,8 @@ static int test_for_each(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -173,6 +179,8 @@ static int test_for_each_unlocked(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
@@ -244,6 +252,8 @@ static int test_get_fences(void *arg)
if (!f)
return -ENOMEM;
+ dma_fence_enable_sw_signaling(f);
+
dma_resv_init(&resv);
r = dma_resv_lock(&resv, NULL);
if (r) {
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 3ebec19a8e02..af57799c86ce 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -132,7 +132,7 @@ EXPORT_SYMBOL(sync_file_get_fence);
char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
{
if (sync_file->user_name[0]) {
- strlcpy(buf, sync_file->user_name, len);
+ strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
@@ -172,7 +172,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
return NULL;
}
sync_file->fence = fence;
- strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
+ strscpy(sync_file->user_name, name, sizeof(sync_file->user_name));
return sync_file;
}
@@ -262,9 +262,9 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, fence->ops->get_driver_name(fence),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 38e8767ec371..2bcdb935a3ac 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -124,17 +124,20 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct device *dev = ubuf->device->this_device;
+ int ret = 0;
if (!ubuf->sg) {
ubuf->sg = get_sg_table(dev, buf, direction);
- if (IS_ERR(ubuf->sg))
- return PTR_ERR(ubuf->sg);
+ if (IS_ERR(ubuf->sg)) {
+ ret = PTR_ERR(ubuf->sg);
+ ubuf->sg = NULL;
+ }
} else {
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
direction);
}
- return 0;
+ return ret;
}
static int end_cpu_udmabuf(struct dma_buf *buf,
@@ -210,7 +213,7 @@ static long udmabuf_create(struct miscdevice *device,
memfd = fget(list[i].memfd);
if (!memfd)
goto err;
- mapping = file_inode(memfd)->i_mapping;
+ mapping = memfd->f_mapping;
if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a06d2a7627aa..7524b62a8870 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -180,7 +180,7 @@ config DMA_SUN6I
config DW_AXI_DMAC
tristate "Synopsys DesignWare AXI DMA support"
- depends on OF || COMPILE_TEST
+ depends on OF
depends on HAS_IOMEM
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 487a01aa207d..eea8bd33b4b7 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2367,7 +2367,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
INIT_LIST_HEAD(&dmadev->channels);
/*
- * Register as many many memcpy as we have physical channels,
+ * Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index d1f74a3aa999..317ca76ccafd 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -12,8 +12,9 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
-#include <linux/interrupt.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include "dmaengine.h"
@@ -95,7 +96,9 @@ struct admac_data {
struct dma_device dma;
struct device *dev;
__iomem void *base;
+ struct reset_control *rstc;
+ int irq;
int irq_index;
int nchannels;
struct admac_chan channels[];
@@ -724,18 +727,17 @@ static int admac_probe(struct platform_device *pdev)
if (irq < 0)
return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
-
- err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
- 0, dev_name(&pdev->dev), ad);
- if (err)
- return dev_err_probe(&pdev->dev, err,
- "unable to register interrupt\n");
+ ad->irq = irq;
ad->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ad->base))
return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
"unable to obtain MMIO resource\n");
+ ad->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+ if (IS_ERR(ad->rstc))
+ return PTR_ERR(ad->rstc);
+
dma = &ad->dma;
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
@@ -774,17 +776,38 @@ static int admac_probe(struct platform_device *pdev)
tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
}
- err = dma_async_device_register(&ad->dma);
+ err = reset_control_reset(ad->rstc);
if (err)
- return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+ return dev_err_probe(&pdev->dev, err,
+ "unable to trigger reset\n");
+
+ err = request_irq(irq, admac_interrupt, 0, dev_name(&pdev->dev), ad);
+ if (err) {
+ dev_err_probe(&pdev->dev, err,
+ "unable to register interrupt\n");
+ goto free_reset;
+ }
+
+ err = dma_async_device_register(&ad->dma);
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
+ goto free_irq;
+ }
err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
if (err) {
dma_async_device_unregister(&ad->dma);
- return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
+ goto free_irq;
}
return 0;
+
+free_irq:
+ free_irq(ad->irq, ad);
+free_reset:
+ reset_control_rearm(ad->rstc);
+ return err;
}
static int admac_remove(struct platform_device *pdev)
@@ -793,6 +816,8 @@ static int admac_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&ad->dma);
+ free_irq(ad->irq, ad);
+ reset_control_rearm(ad->rstc);
return 0;
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b102d8eb5d83..d6c9781cd46a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1470,10 +1470,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_COMPLETE)
- return ret;
-
- if (!txstate)
+ if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&atchan->lock, flags);
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index 07f756479663..c54b24ff5206 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/pm_runtime.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -682,15 +681,12 @@ static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->status != EDMA_ST_IDLE)
return -EBUSY;
- pm_runtime_get(chan->dw->chip->dev);
-
return 0;
}
static void dw_edma_free_chan_resources(struct dma_chan *dchan)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
- struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
int ret;
while (time_before(jiffies, timeout)) {
@@ -703,8 +699,6 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
cpu_relax();
}
-
- pm_runtime_put(chan->dw->chip->dev);
}
static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
@@ -977,9 +971,6 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
goto err_irq_free;
- /* Power management */
- pm_runtime_enable(dev);
-
/* Turn debugfs on */
dw_edma_v0_core_debugfs_on(dw);
@@ -1009,9 +1000,6 @@ int dw_edma_remove(struct dw_edma_chip *chip)
for (i = (dw->nr_irqs - 1); i >= 0; i--)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
- /* Power management */
- pm_runtime_disable(dev);
-
/* Deregister eDMA device */
dma_async_device_unregister(&dw->wr_edma);
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index 43817ced3a3e..c1350a36fddd 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2019 HiSilicon Limited. */
+/* Copyright(c) 2019-2022 HiSilicon Limited. */
+
#include <linux/bitfield.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
@@ -9,32 +10,87 @@
#include <linux/spinlock.h>
#include "virt-dma.h"
-#define HISI_DMA_SQ_BASE_L 0x0
-#define HISI_DMA_SQ_BASE_H 0x4
-#define HISI_DMA_SQ_DEPTH 0x8
-#define HISI_DMA_SQ_TAIL_PTR 0xc
-#define HISI_DMA_CQ_BASE_L 0x10
-#define HISI_DMA_CQ_BASE_H 0x14
-#define HISI_DMA_CQ_DEPTH 0x18
-#define HISI_DMA_CQ_HEAD_PTR 0x1c
-#define HISI_DMA_CTRL0 0x20
-#define HISI_DMA_CTRL0_QUEUE_EN_S 0
-#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4
-#define HISI_DMA_CTRL1 0x24
-#define HISI_DMA_CTRL1_QUEUE_RESET_S 0
-#define HISI_DMA_Q_FSM_STS 0x30
-#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0)
-#define HISI_DMA_INT_STS 0x40
-#define HISI_DMA_INT_STS_MASK GENMASK(12, 0)
-#define HISI_DMA_INT_MSK 0x44
-#define HISI_DMA_MODE 0x217c
-#define HISI_DMA_OFFSET 0x100
-
-#define HISI_DMA_MSI_NUM 32
-#define HISI_DMA_CHAN_NUM 30
-#define HISI_DMA_Q_DEPTH_VAL 1024
-
-#define PCI_BAR_2 2
+/* HiSilicon DMA register common field define */
+#define HISI_DMA_Q_SQ_BASE_L 0x0
+#define HISI_DMA_Q_SQ_BASE_H 0x4
+#define HISI_DMA_Q_SQ_DEPTH 0x8
+#define HISI_DMA_Q_SQ_TAIL_PTR 0xc
+#define HISI_DMA_Q_CQ_BASE_L 0x10
+#define HISI_DMA_Q_CQ_BASE_H 0x14
+#define HISI_DMA_Q_CQ_DEPTH 0x18
+#define HISI_DMA_Q_CQ_HEAD_PTR 0x1c
+#define HISI_DMA_Q_CTRL0 0x20
+#define HISI_DMA_Q_CTRL0_QUEUE_EN BIT(0)
+#define HISI_DMA_Q_CTRL0_QUEUE_PAUSE BIT(4)
+#define HISI_DMA_Q_CTRL1 0x24
+#define HISI_DMA_Q_CTRL1_QUEUE_RESET BIT(0)
+#define HISI_DMA_Q_FSM_STS 0x30
+#define HISI_DMA_Q_FSM_STS_MASK GENMASK(3, 0)
+#define HISI_DMA_Q_ERR_INT_NUM0 0x84
+#define HISI_DMA_Q_ERR_INT_NUM1 0x88
+#define HISI_DMA_Q_ERR_INT_NUM2 0x8c
+
+/* HiSilicon IP08 DMA register and field define */
+#define HISI_DMA_HIP08_MODE 0x217C
+#define HISI_DMA_HIP08_Q_BASE 0x0
+#define HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN BIT(2)
+#define HISI_DMA_HIP08_Q_INT_STS 0x40
+#define HISI_DMA_HIP08_Q_INT_MSK 0x44
+#define HISI_DMA_HIP08_Q_INT_STS_MASK GENMASK(14, 0)
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM3 0x90
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM4 0x94
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM5 0x98
+#define HISI_DMA_HIP08_Q_ERR_INT_NUM6 0x48
+#define HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT BIT(24)
+
+/* HiSilicon IP09 DMA register and field define */
+#define HISI_DMA_HIP09_DMA_FLR_DISABLE 0xA00
+#define HISI_DMA_HIP09_DMA_FLR_DISABLE_B BIT(0)
+#define HISI_DMA_HIP09_Q_BASE 0x2000
+#define HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN GENMASK(31, 28)
+#define HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT BIT(26)
+#define HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT BIT(27)
+#define HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE BIT(2)
+#define HISI_DMA_HIP09_Q_INT_STS 0x40
+#define HISI_DMA_HIP09_Q_INT_MSK 0x44
+#define HISI_DMA_HIP09_Q_INT_STS_MASK 0x1
+#define HISI_DMA_HIP09_Q_ERR_INT_STS 0x48
+#define HISI_DMA_HIP09_Q_ERR_INT_MSK 0x4C
+#define HISI_DMA_HIP09_Q_ERR_INT_STS_MASK GENMASK(18, 1)
+#define HISI_DMA_HIP09_PORT_CFG_REG(port_id) (0x800 + \
+ (port_id) * 0x20)
+#define HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B BIT(16)
+
+#define HISI_DMA_HIP09_MAX_PORT_NUM 16
+
+#define HISI_DMA_HIP08_MSI_NUM 32
+#define HISI_DMA_HIP08_CHAN_NUM 30
+#define HISI_DMA_HIP09_MSI_NUM 4
+#define HISI_DMA_HIP09_CHAN_NUM 4
+#define HISI_DMA_REVISION_HIP08B 0x21
+#define HISI_DMA_REVISION_HIP09A 0x30
+
+#define HISI_DMA_Q_OFFSET 0x100
+#define HISI_DMA_Q_DEPTH_VAL 1024
+
+#define PCI_BAR_2 2
+
+#define HISI_DMA_POLL_Q_STS_DELAY_US 10
+#define HISI_DMA_POLL_Q_STS_TIME_OUT_US 1000
+
+#define HISI_DMA_MAX_DIR_NAME_LEN 128
+
+/*
+ * The HIP08B(HiSilicon IP08) and HIP09A(HiSilicon IP09) are DMA iEPs, they
+ * have the same pci device id but different pci revision.
+ * Unfortunately, they have different register layouts, so two layout
+ * enumerations are defined.
+ */
+enum hisi_dma_reg_layout {
+ HISI_DMA_REG_LAYOUT_INVALID = 0,
+ HISI_DMA_REG_LAYOUT_HIP08,
+ HISI_DMA_REG_LAYOUT_HIP09
+};
enum hisi_dma_mode {
EP = 0,
@@ -105,9 +161,162 @@ struct hisi_dma_dev {
struct dma_device dma_dev;
u32 chan_num;
u32 chan_depth;
+ enum hisi_dma_reg_layout reg_layout;
+ void __iomem *queue_base; /* queue region start of register */
struct hisi_dma_chan chan[];
};
+#ifdef CONFIG_DEBUG_FS
+
+static const struct debugfs_reg32 hisi_dma_comm_chan_regs[] = {
+ {"DMA_QUEUE_SQ_DEPTH ", 0x0008ull},
+ {"DMA_QUEUE_SQ_TAIL_PTR ", 0x000Cull},
+ {"DMA_QUEUE_CQ_DEPTH ", 0x0018ull},
+ {"DMA_QUEUE_CQ_HEAD_PTR ", 0x001Cull},
+ {"DMA_QUEUE_CTRL0 ", 0x0020ull},
+ {"DMA_QUEUE_CTRL1 ", 0x0024ull},
+ {"DMA_QUEUE_FSM_STS ", 0x0030ull},
+ {"DMA_QUEUE_SQ_STS ", 0x0034ull},
+ {"DMA_QUEUE_CQ_TAIL_PTR ", 0x003Cull},
+ {"DMA_QUEUE_INT_STS ", 0x0040ull},
+ {"DMA_QUEUE_INT_MSK ", 0x0044ull},
+ {"DMA_QUEUE_INT_RO ", 0x006Cull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip08_chan_regs[] = {
+ {"DMA_QUEUE_BYTE_CNT ", 0x0038ull},
+ {"DMA_ERR_INT_NUM6 ", 0x0048ull},
+ {"DMA_QUEUE_DESP0 ", 0x0050ull},
+ {"DMA_QUEUE_DESP1 ", 0x0054ull},
+ {"DMA_QUEUE_DESP2 ", 0x0058ull},
+ {"DMA_QUEUE_DESP3 ", 0x005Cull},
+ {"DMA_QUEUE_DESP4 ", 0x0074ull},
+ {"DMA_QUEUE_DESP5 ", 0x0078ull},
+ {"DMA_QUEUE_DESP6 ", 0x007Cull},
+ {"DMA_QUEUE_DESP7 ", 0x0080ull},
+ {"DMA_ERR_INT_NUM0 ", 0x0084ull},
+ {"DMA_ERR_INT_NUM1 ", 0x0088ull},
+ {"DMA_ERR_INT_NUM2 ", 0x008Cull},
+ {"DMA_ERR_INT_NUM3 ", 0x0090ull},
+ {"DMA_ERR_INT_NUM4 ", 0x0094ull},
+ {"DMA_ERR_INT_NUM5 ", 0x0098ull},
+ {"DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip09_chan_regs[] = {
+ {"DMA_QUEUE_ERR_INT_STS ", 0x0048ull},
+ {"DMA_QUEUE_ERR_INT_MSK ", 0x004Cull},
+ {"DFX_SQ_READ_ERR_PTR ", 0x0068ull},
+ {"DFX_DMA_ERR_INT_NUM0 ", 0x0084ull},
+ {"DFX_DMA_ERR_INT_NUM1 ", 0x0088ull},
+ {"DFX_DMA_ERR_INT_NUM2 ", 0x008Cull},
+ {"DFX_DMA_QUEUE_SQ_STS2 ", 0x00A4ull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip08_comm_regs[] = {
+ {"DMA_ECC_ERR_ADDR ", 0x2004ull},
+ {"DMA_ECC_ECC_CNT ", 0x2014ull},
+ {"COMMON_AND_CH_ERR_STS ", 0x2030ull},
+ {"LOCAL_CPL_ID_STS_0 ", 0x20E0ull},
+ {"LOCAL_CPL_ID_STS_1 ", 0x20E4ull},
+ {"LOCAL_CPL_ID_STS_2 ", 0x20E8ull},
+ {"LOCAL_CPL_ID_STS_3 ", 0x20ECull},
+ {"LOCAL_TLP_NUM ", 0x2158ull},
+ {"SQCQ_TLP_NUM ", 0x2164ull},
+ {"CPL_NUM ", 0x2168ull},
+ {"INF_BACK_PRESS_STS ", 0x2170ull},
+ {"DMA_CH_RAS_LEVEL ", 0x2184ull},
+ {"DMA_CM_RAS_LEVEL ", 0x2188ull},
+ {"DMA_CH_ERR_STS ", 0x2190ull},
+ {"DMA_CH_DONE_STS ", 0x2194ull},
+ {"DMA_SQ_TAG_STS_0 ", 0x21A0ull},
+ {"DMA_SQ_TAG_STS_1 ", 0x21A4ull},
+ {"DMA_SQ_TAG_STS_2 ", 0x21A8ull},
+ {"DMA_SQ_TAG_STS_3 ", 0x21ACull},
+ {"LOCAL_P_ID_STS_0 ", 0x21B0ull},
+ {"LOCAL_P_ID_STS_1 ", 0x21B4ull},
+ {"LOCAL_P_ID_STS_2 ", 0x21B8ull},
+ {"LOCAL_P_ID_STS_3 ", 0x21BCull},
+ {"DMA_PREBUFF_INFO_0 ", 0x2200ull},
+ {"DMA_CM_TABLE_INFO_0 ", 0x2220ull},
+ {"DMA_CM_CE_RO ", 0x2244ull},
+ {"DMA_CM_NFE_RO ", 0x2248ull},
+ {"DMA_CM_FE_RO ", 0x224Cull},
+};
+
+static const struct debugfs_reg32 hisi_dma_hip09_comm_regs[] = {
+ {"COMMON_AND_CH_ERR_STS ", 0x0030ull},
+ {"DMA_PORT_IDLE_STS ", 0x0150ull},
+ {"DMA_CH_RAS_LEVEL ", 0x0184ull},
+ {"DMA_CM_RAS_LEVEL ", 0x0188ull},
+ {"DMA_CM_CE_RO ", 0x0244ull},
+ {"DMA_CM_NFE_RO ", 0x0248ull},
+ {"DMA_CM_FE_RO ", 0x024Cull},
+ {"DFX_INF_BACK_PRESS_STS0 ", 0x1A40ull},
+ {"DFX_INF_BACK_PRESS_STS1 ", 0x1A44ull},
+ {"DFX_INF_BACK_PRESS_STS2 ", 0x1A48ull},
+ {"DFX_DMA_WRR_DISABLE ", 0x1A4Cull},
+ {"DFX_PA_REQ_TLP_NUM ", 0x1C00ull},
+ {"DFX_PA_BACK_TLP_NUM ", 0x1C04ull},
+ {"DFX_PA_RETRY_TLP_NUM ", 0x1C08ull},
+ {"DFX_LOCAL_NP_TLP_NUM ", 0x1C0Cull},
+ {"DFX_LOCAL_CPL_HEAD_TLP_NUM ", 0x1C10ull},
+ {"DFX_LOCAL_CPL_DATA_TLP_NUM ", 0x1C14ull},
+ {"DFX_LOCAL_CPL_EXT_DATA_TLP_NUM ", 0x1C18ull},
+ {"DFX_LOCAL_P_HEAD_TLP_NUM ", 0x1C1Cull},
+ {"DFX_LOCAL_P_ACK_TLP_NUM ", 0x1C20ull},
+ {"DFX_BUF_ALOC_PORT_REQ_NUM ", 0x1C24ull},
+ {"DFX_BUF_ALOC_PORT_RESULT_NUM ", 0x1C28ull},
+ {"DFX_BUF_FAIL_SIZE_NUM ", 0x1C2Cull},
+ {"DFX_BUF_ALOC_SIZE_NUM ", 0x1C30ull},
+ {"DFX_BUF_NP_RELEASE_SIZE_NUM ", 0x1C34ull},
+ {"DFX_BUF_P_RELEASE_SIZE_NUM ", 0x1C38ull},
+ {"DFX_BUF_PORT_RELEASE_SIZE_NUM ", 0x1C3Cull},
+ {"DFX_DMA_PREBUF_MEM0_ECC_ERR_ADDR ", 0x1CA8ull},
+ {"DFX_DMA_PREBUF_MEM0_ECC_CNT ", 0x1CACull},
+ {"DFX_DMA_LOC_NP_OSTB_ECC_ERR_ADDR ", 0x1CB0ull},
+ {"DFX_DMA_LOC_NP_OSTB_ECC_CNT ", 0x1CB4ull},
+ {"DFX_DMA_PREBUF_MEM1_ECC_ERR_ADDR ", 0x1CC0ull},
+ {"DFX_DMA_PREBUF_MEM1_ECC_CNT ", 0x1CC4ull},
+ {"DMA_CH_DONE_STS ", 0x02E0ull},
+ {"DMA_CH_ERR_STS ", 0x0320ull},
+};
+#endif /* CONFIG_DEBUG_FS*/
+
+static enum hisi_dma_reg_layout hisi_dma_get_reg_layout(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_REG_LAYOUT_HIP08;
+ else if (pdev->revision >= HISI_DMA_REVISION_HIP09A)
+ return HISI_DMA_REG_LAYOUT_HIP09;
+
+ return HISI_DMA_REG_LAYOUT_INVALID;
+}
+
+static u32 hisi_dma_get_chan_num(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_CHAN_NUM;
+
+ return HISI_DMA_HIP09_CHAN_NUM;
+}
+
+static u32 hisi_dma_get_msi_num(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_MSI_NUM;
+
+ return HISI_DMA_HIP09_MSI_NUM;
+}
+
+static u32 hisi_dma_get_queue_base(struct pci_dev *pdev)
+{
+ if (pdev->revision == HISI_DMA_REVISION_HIP08B)
+ return HISI_DMA_HIP08_Q_BASE;
+
+ return HISI_DMA_HIP09_Q_BASE;
+}
+
static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c)
{
return container_of(c, struct hisi_dma_chan, vc.chan);
@@ -121,7 +330,7 @@ static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd)
static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index,
u32 val)
{
- writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET);
+ writel_relaxed(val, base + reg + index * HISI_DMA_Q_OFFSET);
}
static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
@@ -129,70 +338,103 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
u32 tmp;
tmp = readl_relaxed(addr);
- tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos);
+ tmp = val ? tmp | pos : tmp & ~pos;
writel_relaxed(tmp, addr);
}
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause);
+ addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_PAUSE, pause);
}
static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool enable)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable);
+ addr = hdma_dev->queue_base + HISI_DMA_Q_CTRL0 +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL0_QUEUE_EN, enable);
}
static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index,
- HISI_DMA_INT_STS_MASK);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
+ qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
+ else {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
+ qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
+ qp_index,
+ HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
+ }
}
static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- void __iomem *base = hdma_dev->base;
-
- hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index,
- HISI_DMA_INT_STS_MASK);
- hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_STS,
+ qp_index, HISI_DMA_HIP08_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_INT_MSK,
+ qp_index, 0);
+ } else {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_STS,
+ qp_index, HISI_DMA_HIP09_Q_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_STS,
+ qp_index,
+ HISI_DMA_HIP09_Q_ERR_INT_STS_MASK);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_INT_MSK,
+ qp_index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP09_Q_ERR_INT_MSK,
+ qp_index, 0);
+ }
}
static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index)
{
- void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index *
- HISI_DMA_OFFSET;
+ void __iomem *addr;
- hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1);
+ addr = hdma_dev->queue_base +
+ HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_Q_CTRL1_QUEUE_RESET, 1);
}
static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index)
{
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0);
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+ void __iomem *q_base = hdma_dev->queue_base;
+
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
}
-static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
+static void hisi_dma_reset_or_disable_hw_chan(struct hisi_dma_chan *chan,
+ bool disable)
{
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
u32 index = chan->qp_num, tmp;
+ void __iomem *addr;
int ret;
hisi_dma_pause_dma(hdma_dev, index, true);
hisi_dma_enable_dma(hdma_dev, index, false);
hisi_dma_mask_irq(hdma_dev, index);
- ret = readl_relaxed_poll_timeout(hdma_dev->base +
- HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
- FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000);
+ addr = hdma_dev->queue_base +
+ HISI_DMA_Q_FSM_STS + index * HISI_DMA_Q_OFFSET;
+
+ ret = readl_relaxed_poll_timeout(addr, tmp,
+ FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) != RUN,
+ HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
if (ret) {
dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n");
WARN_ON(1);
@@ -201,12 +443,15 @@ static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan)
hisi_dma_do_reset(hdma_dev, index);
hisi_dma_reset_qp_point(hdma_dev, index);
hisi_dma_pause_dma(hdma_dev, index, false);
- hisi_dma_enable_dma(hdma_dev, index, true);
- hisi_dma_unmask_irq(hdma_dev, index);
- ret = readl_relaxed_poll_timeout(hdma_dev->base +
- HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp,
- FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000);
+ if (!disable) {
+ hisi_dma_enable_dma(hdma_dev, index, true);
+ hisi_dma_unmask_irq(hdma_dev, index);
+ }
+
+ ret = readl_relaxed_poll_timeout(addr, tmp,
+ FIELD_GET(HISI_DMA_Q_FSM_STS_MASK, tmp) == IDLE,
+ HISI_DMA_POLL_Q_STS_DELAY_US, HISI_DMA_POLL_Q_STS_TIME_OUT_US);
if (ret) {
dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n");
WARN_ON(1);
@@ -218,7 +463,7 @@ static void hisi_dma_free_chan_resources(struct dma_chan *c)
struct hisi_dma_chan *chan = to_hisi_dma_chan(c);
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
- hisi_dma_reset_hw_chan(chan);
+ hisi_dma_reset_or_disable_hw_chan(chan, false);
vchan_free_chan_resources(&chan->vc);
memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth);
@@ -267,7 +512,6 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
vd = vchan_next_desc(&chan->vc);
if (!vd) {
- dev_err(&hdma_dev->pdev->dev, "no issued task!\n");
chan->desc = NULL;
return;
}
@@ -288,8 +532,8 @@ static void hisi_dma_start_transfer(struct hisi_dma_chan *chan)
chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth;
/* update sq_tail to trigger a new task */
- hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num,
- chan->sq_tail);
+ hisi_dma_chan_write(hdma_dev->queue_base, HISI_DMA_Q_SQ_TAIL_PTR,
+ chan->qp_num, chan->sq_tail);
}
static void hisi_dma_issue_pending(struct dma_chan *c)
@@ -299,7 +543,7 @@ static void hisi_dma_issue_pending(struct dma_chan *c)
spin_lock_irqsave(&chan->vc.lock, flags);
- if (vchan_issue_pending(&chan->vc))
+ if (vchan_issue_pending(&chan->vc) && !chan->desc)
hisi_dma_start_transfer(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -363,26 +607,86 @@ static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev)
static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index)
{
struct hisi_dma_chan *chan = &hdma_dev->chan[index];
+ void __iomem *q_base = hdma_dev->queue_base;
u32 hw_depth = hdma_dev->chan_depth - 1;
- void __iomem *base = hdma_dev->base;
+ void __iomem *addr;
+ u32 tmp;
/* set sq, cq base */
- hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_L, index,
lower_32_bits(chan->sq_dma));
- hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_BASE_H, index,
upper_32_bits(chan->sq_dma));
- hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_L, index,
lower_32_bits(chan->cq_dma));
- hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index,
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_BASE_H, index,
upper_32_bits(chan->cq_dma));
/* set sq, cq depth */
- hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth);
- hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_DEPTH, index, hw_depth);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_DEPTH, index, hw_depth);
/* init sq tail and cq head */
- hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0);
- hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_SQ_TAIL_PTR, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR, index, 0);
+
+ /* init error interrupt stats */
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM0, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM1, index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_ERR_INT_NUM2, index, 0);
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM3,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM4,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM5,
+ index, 0);
+ hisi_dma_chan_write(q_base, HISI_DMA_HIP08_Q_ERR_INT_NUM6,
+ index, 0);
+ /*
+ * init SQ/CQ direction selecting register.
+ * "0" is to local side and "1" is to remote side.
+ */
+ addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP08_Q_CTRL0_SQCQ_DRCT, 0);
+
+ /*
+ * 0 - Continue to next descriptor if error occurs.
+ * 1 - Abort the DMA queue if error occurs.
+ */
+ hisi_dma_update_bit(addr,
+ HISI_DMA_HIP08_Q_CTRL0_ERR_ABORT_EN, 0);
+ } else {
+ addr = q_base + HISI_DMA_Q_CTRL0 + index * HISI_DMA_Q_OFFSET;
+
+ /*
+ * init SQ/CQ direction selecting register.
+ * "0" is to local side and "1" is to remote side.
+ */
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_SQ_DRCT, 0);
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL0_CQ_DRCT, 0);
+
+ /*
+ * 0 - Continue to next descriptor if error occurs.
+ * 1 - Abort the DMA queue if error occurs.
+ */
+
+ tmp = readl_relaxed(addr);
+ tmp &= ~HISI_DMA_HIP09_Q_CTRL0_ERR_ABORT_EN;
+ writel_relaxed(tmp, addr);
+
+ /*
+ * 0 - dma should process FLR whith CPU.
+ * 1 - dma not process FLR, only cpu process FLR.
+ */
+ addr = q_base + HISI_DMA_HIP09_DMA_FLR_DISABLE +
+ index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_DMA_FLR_DISABLE_B, 0);
+
+ addr = q_base + HISI_DMA_Q_CTRL1 + index * HISI_DMA_Q_OFFSET;
+ hisi_dma_update_bit(addr, HISI_DMA_HIP09_Q_CTRL1_VA_ENABLE, 1);
+ }
}
static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
@@ -394,7 +698,7 @@ static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index)
{
- hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]);
+ hisi_dma_reset_or_disable_hw_chan(&hdma_dev->chan[qp_index], true);
}
static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev)
@@ -426,24 +730,23 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
struct hisi_dma_desc *desc;
struct hisi_dma_cqe *cqe;
+ void __iomem *q_base;
spin_lock(&chan->vc.lock);
desc = chan->desc;
cqe = chan->cq + chan->cq_head;
+ q_base = hdma_dev->queue_base;
if (desc) {
+ chan->cq_head = (chan->cq_head + 1) % hdma_dev->chan_depth;
+ hisi_dma_chan_write(q_base, HISI_DMA_Q_CQ_HEAD_PTR,
+ chan->qp_num, chan->cq_head);
if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) {
- chan->cq_head = (chan->cq_head + 1) %
- hdma_dev->chan_depth;
- hisi_dma_chan_write(hdma_dev->base,
- HISI_DMA_CQ_HEAD_PTR, chan->qp_num,
- chan->cq_head);
vchan_cookie_complete(&desc->vd);
+ hisi_dma_start_transfer(chan);
} else {
dev_err(&hdma_dev->pdev->dev, "task error!\n");
}
-
- chan->desc = NULL;
}
spin_unlock(&chan->vc.lock);
@@ -497,16 +800,169 @@ static void hisi_dma_disable_hw_channels(void *data)
static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev,
enum hisi_dma_mode mode)
{
- writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE);
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ writel_relaxed(mode == RC ? 1 : 0,
+ hdma_dev->base + HISI_DMA_HIP08_MODE);
}
+static void hisi_dma_init_hw(struct hisi_dma_dev *hdma_dev)
+{
+ void __iomem *addr;
+ int i;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) {
+ for (i = 0; i < HISI_DMA_HIP09_MAX_PORT_NUM; i++) {
+ addr = hdma_dev->base + HISI_DMA_HIP09_PORT_CFG_REG(i);
+ hisi_dma_update_bit(addr,
+ HISI_DMA_HIP09_PORT_CFG_LINK_DOWN_MASK_B, 1);
+ }
+ }
+}
+
+static void hisi_dma_init_dma_dev(struct hisi_dma_dev *hdma_dev)
+{
+ struct dma_device *dma_dev;
+
+ dma_dev = &hdma_dev->dma_dev;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
+ dma_dev->device_tx_status = hisi_dma_tx_status;
+ dma_dev->device_issue_pending = hisi_dma_issue_pending;
+ dma_dev->device_terminate_all = hisi_dma_terminate_all;
+ dma_dev->device_synchronize = hisi_dma_synchronize;
+ dma_dev->directions = BIT(DMA_MEM_TO_MEM);
+ dma_dev->dev = &hdma_dev->pdev->dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+}
+
+/* --- debugfs implementation --- */
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+static struct debugfs_reg32 *hisi_dma_get_ch_regs(struct hisi_dma_dev *hdma_dev,
+ u32 *regs_sz)
+{
+ struct device *dev = &hdma_dev->pdev->dev;
+ struct debugfs_reg32 *regs;
+ u32 regs_sz_comm;
+
+ regs_sz_comm = ARRAY_SIZE(hisi_dma_comm_chan_regs);
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip08_chan_regs);
+ else
+ *regs_sz = regs_sz_comm + ARRAY_SIZE(hisi_dma_hip09_chan_regs);
+
+ regs = devm_kcalloc(dev, *regs_sz, sizeof(struct debugfs_reg32),
+ GFP_KERNEL);
+ if (!regs)
+ return NULL;
+ memcpy(regs, hisi_dma_comm_chan_regs, sizeof(hisi_dma_comm_chan_regs));
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08)
+ memcpy(regs + regs_sz_comm, hisi_dma_hip08_chan_regs,
+ sizeof(hisi_dma_hip08_chan_regs));
+ else
+ memcpy(regs + regs_sz_comm, hisi_dma_hip09_chan_regs,
+ sizeof(hisi_dma_hip09_chan_regs));
+
+ return regs;
+}
+
+static int hisi_dma_create_chan_dir(struct hisi_dma_dev *hdma_dev)
+{
+ char dir_name[HISI_DMA_MAX_DIR_NAME_LEN];
+ struct debugfs_regset32 *regsets;
+ struct debugfs_reg32 *regs;
+ struct dentry *chan_dir;
+ struct device *dev;
+ u32 regs_sz;
+ int ret;
+ int i;
+
+ dev = &hdma_dev->pdev->dev;
+
+ regsets = devm_kcalloc(dev, hdma_dev->chan_num,
+ sizeof(*regsets), GFP_KERNEL);
+ if (!regsets)
+ return -ENOMEM;
+
+ regs = hisi_dma_get_ch_regs(hdma_dev, &regs_sz);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < hdma_dev->chan_num; i++) {
+ regsets[i].regs = regs;
+ regsets[i].nregs = regs_sz;
+ regsets[i].base = hdma_dev->queue_base + i * HISI_DMA_Q_OFFSET;
+ regsets[i].dev = dev;
+
+ memset(dir_name, 0, HISI_DMA_MAX_DIR_NAME_LEN);
+ ret = sprintf(dir_name, "channel%d", i);
+ if (ret < 0)
+ return ret;
+
+ chan_dir = debugfs_create_dir(dir_name,
+ hdma_dev->dma_dev.dbg_dev_root);
+ debugfs_create_regset32("regs", 0444, chan_dir, &regsets[i]);
+ }
+
+ return 0;
+}
+
+static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev)
+{
+ struct debugfs_regset32 *regset;
+ struct device *dev;
+ int ret;
+
+ dev = &hdma_dev->pdev->dev;
+
+ if (hdma_dev->dma_dev.dbg_dev_root == NULL)
+ return;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return;
+
+ if (hdma_dev->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) {
+ regset->regs = hisi_dma_hip08_comm_regs;
+ regset->nregs = ARRAY_SIZE(hisi_dma_hip08_comm_regs);
+ } else {
+ regset->regs = hisi_dma_hip09_comm_regs;
+ regset->nregs = ARRAY_SIZE(hisi_dma_hip09_comm_regs);
+ }
+ regset->base = hdma_dev->base;
+ regset->dev = dev;
+
+ debugfs_create_regset32("regs", 0444,
+ hdma_dev->dma_dev.dbg_dev_root, regset);
+
+ ret = hisi_dma_create_chan_dir(hdma_dev);
+ if (ret < 0)
+ dev_info(&hdma_dev->pdev->dev, "fail to create debugfs for channels!\n");
+}
+#else
+static void hisi_dma_create_debugfs(struct hisi_dma_dev *hdma_dev) { }
+#endif /* CONFIG_DEBUG_FS*/
+/* --- debugfs implementation --- */
+
static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ enum hisi_dma_reg_layout reg_layout;
struct device *dev = &pdev->dev;
struct hisi_dma_dev *hdma_dev;
struct dma_device *dma_dev;
+ u32 chan_num;
+ u32 msi_num;
int ret;
+ reg_layout = hisi_dma_get_reg_layout(pdev);
+ if (reg_layout == HISI_DMA_REG_LAYOUT_INVALID) {
+ dev_err(dev, "unsupported device!\n");
+ return -EINVAL;
+ }
+
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(dev, "failed to enable device mem!\n");
@@ -523,40 +979,37 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, HISI_DMA_CHAN_NUM), GFP_KERNEL);
+ chan_num = hisi_dma_get_chan_num(pdev);
+ hdma_dev = devm_kzalloc(dev, struct_size(hdma_dev, chan, chan_num),
+ GFP_KERNEL);
if (!hdma_dev)
return -EINVAL;
hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2];
hdma_dev->pdev = pdev;
- hdma_dev->chan_num = HISI_DMA_CHAN_NUM;
hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL;
+ hdma_dev->chan_num = chan_num;
+ hdma_dev->reg_layout = reg_layout;
+ hdma_dev->queue_base = hdma_dev->base + hisi_dma_get_queue_base(pdev);
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
+ msi_num = hisi_dma_get_msi_num(pdev);
+
/* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
- ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
- PCI_IRQ_MSI);
+ ret = pci_alloc_irq_vectors(pdev, msi_num, msi_num, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to allocate MSI vectors!\n");
return ret;
}
- dma_dev = &hdma_dev->dma_dev;
- dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
- dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
- dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy;
- dma_dev->device_tx_status = hisi_dma_tx_status;
- dma_dev->device_issue_pending = hisi_dma_issue_pending;
- dma_dev->device_terminate_all = hisi_dma_terminate_all;
- dma_dev->device_synchronize = hisi_dma_synchronize;
- dma_dev->directions = BIT(DMA_MEM_TO_MEM);
- dma_dev->dev = dev;
- INIT_LIST_HEAD(&dma_dev->channels);
+ hisi_dma_init_dma_dev(hdma_dev);
hisi_dma_set_mode(hdma_dev, RC);
+ hisi_dma_init_hw(hdma_dev);
+
ret = hisi_dma_enable_hw_channels(hdma_dev);
if (ret < 0) {
dev_err(dev, "failed to enable hw channel!\n");
@@ -568,11 +1021,16 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ dma_dev = &hdma_dev->dma_dev;
ret = dmaenginem_async_device_register(dma_dev);
- if (ret < 0)
+ if (ret < 0) {
dev_err(dev, "failed to register device!\n");
+ return ret;
+ }
+
+ hisi_dma_create_debugfs(hdma_dev);
- return ret;
+ return 0;
}
static const struct pci_device_id hisi_dma_pci_tbl[] = {
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 92caae55aece..af5a2e252c25 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -16,12 +16,20 @@
* port 3, and so on.
*/
+#include <linux/bits.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/percpu-defs.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
#include "hsu.h"
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 9e5956345748..3bca577b98a1 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -10,7 +10,11 @@
#ifndef __DMA_HSU_H__
#define __DMA_HSU_H__
-#include <linux/spinlock.h>
+#include <linux/bits.h>
+#include <linux/container_of.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
#include <linux/dma/hsu.h>
#include "../virt-dma.h"
@@ -36,11 +40,11 @@
/* Bits in HSU_CH_SR */
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
-#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define HSU_CH_SR_DESCTO_ANY GENMASK(11, 8)
#define HSU_CH_SR_CHE BIT(15)
#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
-#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
-#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
+#define HSU_CH_SR_DESCE_ANY GENMASK(19, 16)
+#define HSU_CH_SR_CDESC_ANY GENMASK(31, 30)
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index 6a2df3dd78d0..0fcc0c0c22fc 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -26,29 +27,32 @@
static irqreturn_t hsu_pci_irq(int irq, void *dev)
{
struct hsu_dma_chip *chip = dev;
- u32 dmaisr;
- u32 status;
+ unsigned long dmaisr;
unsigned short i;
+ u32 status;
int ret = 0;
int err;
dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
- for (i = 0; i < chip->hsu->nr_channels; i++) {
- if (dmaisr & 0x1) {
- err = hsu_dma_get_status(chip, i, &status);
- if (err > 0)
- ret |= 1;
- else if (err == 0)
- ret |= hsu_dma_do_irq(chip, i, status);
- }
- dmaisr >>= 1;
+ for_each_set_bit(i, &dmaisr, chip->hsu->nr_channels) {
+ err = hsu_dma_get_status(chip, i, &status);
+ if (err > 0)
+ ret |= 1;
+ else if (err == 0)
+ ret |= hsu_dma_do_irq(chip, i, status);
}
return IRQ_RETVAL(ret);
}
+static void hsu_pci_dma_remove(void *chip)
+{
+ hsu_dma_remove(chip);
+}
+
static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct hsu_dma_chip *chip;
int ret;
@@ -87,9 +91,13 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- ret = request_irq(chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ ret = devm_add_action_or_reset(dev, hsu_pci_dma_remove, chip);
if (ret)
- goto err_register_irq;
+ return ret;
+
+ ret = devm_request_irq(dev, chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip);
+ if (ret)
+ return ret;
/*
* On Intel Tangier B0 and Anniedale the interrupt line, disregarding
@@ -105,18 +113,6 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, chip);
return 0;
-
-err_register_irq:
- hsu_dma_remove(chip);
- return ret;
-}
-
-static void hsu_pci_remove(struct pci_dev *pdev)
-{
- struct hsu_dma_chip *chip = pci_get_drvdata(pdev);
-
- free_irq(chip->irq, chip);
- hsu_dma_remove(chip);
}
static const struct pci_device_id hsu_pci_id_table[] = {
@@ -130,7 +126,6 @@ static struct pci_driver hsu_pci_driver = {
.name = "hsu_dma_pci",
.id_table = hsu_pci_id_table,
.probe = hsu_pci_probe,
- .remove = hsu_pci_remove,
};
module_pci_driver(hsu_pci_driver);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5a8cc52c1abf..2c1e6f6daa62 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -196,6 +196,7 @@ int idxd_wq_enable(struct idxd_wq *wq)
}
wq->state = IDXD_WQ_ENABLED;
+ set_bit(wq->id, idxd->wq_enable_map);
dev_dbg(dev, "WQ %d enabled\n", wq->id);
return 0;
}
@@ -223,6 +224,7 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
if (reset_config)
idxd_wq_disable_cleanup(wq);
+ clear_bit(wq->id, idxd->wq_enable_map);
wq->state = IDXD_WQ_DISABLED;
dev_dbg(dev, "WQ %d disabled\n", wq->id);
return 0;
@@ -258,7 +260,6 @@ void idxd_wq_reset(struct idxd_wq *wq)
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
idxd_wq_disable_cleanup(wq);
- wq->state = IDXD_WQ_DISABLED;
}
int idxd_wq_map_portal(struct idxd_wq *wq)
@@ -378,17 +379,20 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
lockdep_assert_held(&wq->wq_lock);
+ wq->state = IDXD_WQ_DISABLED;
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
wq->threshold = 0;
wq->priority = 0;
- wq->ats_dis = 0;
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ if (wq->opcap_bmap)
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
@@ -705,6 +709,8 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->tc_a = -1;
group->tc_b = -1;
}
+ group->desc_progress_limit = 0;
+ group->batch_progress_limit = 0;
}
}
@@ -761,10 +767,10 @@ static void idxd_group_config_write(struct idxd_group *group)
/* setup GRPFLAGS */
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
- iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
- dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset,
- ioread32(idxd->reg_base + grpcfg_offset));
+ ioread64(idxd->reg_base + grpcfg_offset));
}
static int idxd_groups_config_write(struct idxd_device *idxd)
@@ -807,7 +813,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
u32 wq_offset;
- int i;
+ int i, n;
if (!wq->group)
return 0;
@@ -859,12 +865,23 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->bof = 1;
if (idxd->hw.wq_cap.wq_ats_support)
- wq->wqcfg->wq_ats_disable = wq->ats_dis;
+ wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
+ /* bytes 32-63 */
+ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
+ memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
+ for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
+ int pos = n % BITS_PER_LONG_LONG;
+ int idx = n / BITS_PER_LONG_LONG;
+
+ wq->wqcfg->op_config[idx] |= BIT(pos);
+ }
+ }
+
dev_dbg(dev, "WQ %d CFGs\n", wq->id);
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
@@ -914,6 +931,9 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
+
+ group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
+ group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}
}
@@ -1096,8 +1116,8 @@ static void idxd_group_load_config(struct idxd_group *group)
}
grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
- group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
- dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+ group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
+ dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
group->id, grpcfg_offset, group->grpcfg.flags.bits);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index fed0dfc1eaa8..1196ab342f01 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -11,6 +11,7 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/ioasid.h>
+#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -95,6 +96,8 @@ struct idxd_group {
u8 rdbufs_reserved;
int tc_a;
int tc_b;
+ int desc_progress_limit;
+ int batch_progress_limit;
};
struct idxd_pmu {
@@ -132,6 +135,7 @@ enum idxd_wq_state {
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
WQ_FLAG_BLOCK_ON_FAULT,
+ WQ_FLAG_ATS_DISABLE,
};
enum idxd_wq_type {
@@ -194,6 +198,8 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
+ unsigned long *opcap_bmap;
+
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
@@ -208,7 +214,6 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
- bool ats_dis;
};
struct idxd_engine {
@@ -299,6 +304,7 @@ struct idxd_device {
int rdbuf_limit;
int nr_rdbufs; /* non-reserved read buffers */
unsigned int wqcfg_size;
+ unsigned long *wq_enable_map;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
@@ -308,6 +314,8 @@ struct idxd_device {
struct work_struct work;
struct idxd_pmu *idxd_pmu;
+
+ unsigned long *opcap_bmap;
};
/* IDXD software descriptor */
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index aa3478257ddb..2b18d512cbfc 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -151,6 +151,12 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
if (!idxd->wqs)
return -ENOMEM;
+ idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->wq_enable_map) {
+ kfree(idxd->wqs);
+ return -ENOMEM;
+ }
+
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
@@ -185,6 +191,16 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
rc = -ENOMEM;
goto err;
}
+
+ if (idxd->hw.wq_cap.op_config) {
+ wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!wq->opcap_bmap) {
+ put_device(conf_dev);
+ rc = -ENOMEM;
+ goto err;
+ }
+ bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
+ }
idxd->wqs[i] = wq;
}
@@ -369,6 +385,19 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
+static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
+{
+ int i, j, nr;
+
+ for (i = 0, nr = 0; i < count; i++) {
+ for (j = 0; j < BITS_PER_LONG_LONG; j++) {
+ if (val[i] & BIT(j))
+ set_bit(nr, bmap);
+ nr++;
+ }
+ }
+}
+
static void idxd_read_caps(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -427,6 +456,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
IDXD_OPCAP_OFFSET + i * sizeof(u64));
dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
}
+ multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
}
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
@@ -448,6 +478,12 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (idxd->id < 0)
return NULL;
+ idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
+ if (!idxd->opcap_bmap) {
+ ida_free(&idxd_ida, idxd->id);
+ return NULL;
+ }
+
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 743ead5ebc57..aa314ebec587 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -17,12 +17,6 @@ enum irq_work_type {
IRQ_WORK_PROCESS_FAULT,
};
-struct idxd_fault {
- struct work_struct work;
- u64 addr;
- struct idxd_device *idxd;
-};
-
struct idxd_resubmit {
struct work_struct work;
struct idxd_desc *desc;
@@ -49,11 +43,12 @@ static void idxd_device_reinit(struct work_struct *work)
goto out;
for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = idxd->wqs[i];
+ if (test_bit(i, idxd->wq_enable_map)) {
+ struct idxd_wq *wq = idxd->wqs[i];
- if (wq->state == IDXD_WQ_ENABLED) {
rc = idxd_wq_enable(wq);
if (rc < 0) {
+ clear_bit(i, idxd->wq_enable_map);
dev_warn(dev, "Unable to re-enable wq %s\n",
dev_name(wq_confdev(wq)));
}
@@ -324,13 +319,11 @@ halt:
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
- spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
- spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 02449aa9c454..fe3b8d04f9db 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -54,7 +54,8 @@ union wq_cap_reg {
u64 priority:1;
u64 occupancy:1;
u64 occupancy_int:1;
- u64 rsvd3:10;
+ u64 op_config:1;
+ u64 rsvd3:9;
};
u64 bits;
} __packed;
@@ -67,7 +68,8 @@ union group_cap_reg {
u64 total_rdbufs:8; /* formerly total_tokens */
u64 rdbuf_ctrl:1; /* formerly token_en */
u64 rdbuf_limit:1; /* formerly token_limit */
- u64 rsvd:46;
+ u64 progress_limit:1; /* descriptor and batch descriptor */
+ u64 rsvd:45;
};
u64 bits;
} __packed;
@@ -90,6 +92,8 @@ struct opcap {
u64 bits[4];
};
+#define IDXD_MAX_OPCAP_BITS 256U
+
#define IDXD_OPCAP_OFFSET 0x40
#define IDXD_TABLE_OFFSET 0x60
@@ -285,16 +289,20 @@ union msix_perm {
union group_flags {
struct {
- u32 tc_a:3;
- u32 tc_b:3;
- u32 rsvd:1;
- u32 use_rdbuf_limit:1;
- u32 rdbufs_reserved:8;
- u32 rsvd2:4;
- u32 rdbufs_allowed:8;
- u32 rsvd3:4;
+ u64 tc_a:3;
+ u64 tc_b:3;
+ u64 rsvd:1;
+ u64 use_rdbuf_limit:1;
+ u64 rdbufs_reserved:8;
+ u64 rsvd2:4;
+ u64 rdbufs_allowed:8;
+ u64 rsvd3:4;
+ u64 desc_progress_limit:2;
+ u64 rsvd4:2;
+ u64 batch_progress_limit:2;
+ u64 rsvd5:26;
};
- u32 bits;
+ u64 bits;
} __packed;
struct grpcfg {
@@ -348,8 +356,11 @@ union wqcfg {
/* bytes 28-31 */
u32 rsvd8;
+
+ /* bytes 32-63 */
+ u64 op_config[4];
};
- u32 bits[8];
+ u32 bits[16];
} __packed;
#define WQCFG_PASID_IDX 2
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3f262a57441b..bdaccf9e0436 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -443,6 +443,67 @@ static struct device_attribute dev_attr_group_traffic_class_b =
__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
group_traffic_class_b_store);
+static ssize_t group_desc_progress_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%d\n", group->desc_progress_limit);
+}
+
+static ssize_t group_desc_progress_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+ int val, rc;
+
+ rc = kstrtoint(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val & ~GENMASK(1, 0))
+ return -EINVAL;
+
+ group->desc_progress_limit = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_desc_progress_limit =
+ __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show,
+ group_desc_progress_limit_store);
+
+static ssize_t group_batch_progress_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%d\n", group->batch_progress_limit);
+}
+
+static ssize_t group_batch_progress_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+ int val, rc;
+
+ rc = kstrtoint(buf, 10, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val & ~GENMASK(1, 0))
+ return -EINVAL;
+
+ group->batch_progress_limit = val;
+ return count;
+}
+
+static struct device_attribute dev_attr_group_batch_progress_limit =
+ __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show,
+ group_batch_progress_limit_store);
static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_work_queues.attr,
&dev_attr_group_engines.attr,
@@ -454,11 +515,35 @@ static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_read_buffers_reserved.attr,
&dev_attr_group_traffic_class_a.attr,
&dev_attr_group_traffic_class_b.attr,
+ &dev_attr_group_desc_progress_limit.attr,
+ &dev_attr_group_batch_progress_limit.attr,
NULL,
};
+static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return (attr == &dev_attr_group_desc_progress_limit.attr ||
+ attr == &dev_attr_group_batch_progress_limit.attr) &&
+ !idxd->hw.group_cap.progress_limit;
+}
+
+static umode_t idxd_group_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct idxd_group *group = confdev_to_group(dev);
+ struct idxd_device *idxd = group->idxd;
+
+ if (idxd_group_attr_progress_limit_invisible(attr, idxd))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group idxd_group_attribute_group = {
.attrs = idxd_group_attributes,
+ .is_visible = idxd_group_attr_visible,
};
static const struct attribute_group *idxd_group_attribute_groups[] = {
@@ -973,7 +1058,7 @@ static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *
{
struct idxd_wq *wq = confdev_to_wq(dev);
- return sysfs_emit(buf, "%u\n", wq->ats_dis);
+ return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags));
}
static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
@@ -994,7 +1079,10 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (rc < 0)
return rc;
- wq->ats_dis = ats_dis;
+ if (ats_dis)
+ set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
+ else
+ clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
return count;
}
@@ -1055,6 +1143,68 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib
static struct device_attribute dev_attr_wq_enqcmds_retries =
__ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+static ssize_t wq_op_config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap);
+}
+
+static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask)
+{
+ int bit;
+
+ /*
+ * The OPCAP is defined as 256 bits that represents each operation the device
+ * supports per bit. Iterate through all the bits and check if the input mask
+ * is set for bits that are not set in the OPCAP for the device. If no OPCAP
+ * bit is set and input mask has the bit set, then return error.
+ */
+ for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) {
+ if (!test_bit(bit, idxd->opcap_bmap))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+ unsigned long *opmask;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
+ if (!opmask)
+ return -ENOMEM;
+
+ rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS);
+ if (rc < 0)
+ goto err;
+
+ rc = idxd_verify_supported_opcap(idxd, opmask);
+ if (rc < 0)
+ goto err;
+
+ bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS);
+
+ bitmap_free(opmask);
+ return count;
+
+err:
+ bitmap_free(opmask);
+ return rc;
+}
+
+static struct device_attribute dev_attr_wq_op_config =
+ __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1072,11 +1222,33 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_ats_disable.attr,
&dev_attr_wq_occupancy.attr,
&dev_attr_wq_enqcmds_retries.attr,
+ &dev_attr_wq_op_config.attr,
NULL,
};
+static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
+ struct idxd_device *idxd)
+{
+ return attr == &dev_attr_wq_op_config.attr &&
+ !idxd->hw.wq_cap.op_config;
+}
+
+static umode_t idxd_wq_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ struct idxd_device *idxd = wq->idxd;
+
+ if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group idxd_wq_attribute_group = {
.attrs = idxd_wq_attributes,
+ .is_visible = idxd_wq_attr_visible,
};
static const struct attribute_group *idxd_wq_attribute_groups[] = {
@@ -1088,6 +1260,7 @@ static void idxd_conf_wq_release(struct device *dev)
{
struct idxd_wq *wq = confdev_to_wq(dev);
+ bitmap_free(wq->opcap_bmap);
kfree(wq->wqcfg);
kfree(wq);
}
@@ -1177,14 +1350,8 @@ static ssize_t op_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- int i, rc = 0;
-
- for (i = 0; i < 4; i++)
- rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
- rc--;
- rc += sysfs_emit_at(buf, rc, "\n");
- return rc;
+ return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap);
}
static DEVICE_ATTR_RO(op_cap);
@@ -1405,9 +1572,11 @@ static void idxd_conf_device_release(struct device *dev)
struct idxd_device *idxd = confdev_to_idxd(dev);
kfree(idxd->groups);
+ bitmap_free(idxd->wq_enable_map);
kfree(idxd->wqs);
kfree(idxd->engines);
ida_free(&idxd_ida, idxd->id);
+ bitmap_free(idxd->opcap_bmap);
kfree(idxd);
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 37ff4ec7db76..e2070df6cad2 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -656,7 +656,7 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
if (active - i == 0) {
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
__func__);
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
/* microsecond delay by sysfs variable per pending descriptor */
@@ -682,7 +682,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
if (chanerr &
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
ioat_eh(ioat_chan);
}
}
@@ -879,7 +879,7 @@ static void check_active(struct ioatdma_chan *ioat_chan)
}
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+ mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 140cfe3782fb..35e06b382603 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -196,10 +196,8 @@ extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
extern struct kobj_type ioat_ktype;
extern struct kmem_cache *ioat_cache;
-extern int ioat_ring_max_alloc_order;
extern struct kmem_cache *ioat_sed_cache;
static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 994fc4d2aca4..dc147cc2436e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -670,7 +670,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
return mxs_chan->status;
}
-static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
+static int mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
@@ -741,7 +741,7 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
ofdma->of_node);
}
-static int __init mxs_dma_probe(struct platform_device *pdev)
+static int mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
@@ -839,10 +839,7 @@ static struct platform_driver mxs_dma_driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
+ .probe = mxs_dma_probe,
};
-static int __init mxs_dma_module_init(void)
-{
- return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe);
-}
-subsys_initcall(mxs_dma_module_init);
+builtin_platform_driver(mxs_dma_driver);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 09915a5cba3e..0d9257fbdfb0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2752,7 +2752,6 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
pch->cyclic = true;
- desc->txd.flags = flags;
return &desc->txd;
}
@@ -2804,8 +2803,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->bytes_requested = len;
- desc->txd.flags = flags;
-
return &desc->txd;
}
@@ -2889,7 +2886,6 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
/* Return the last desc in the chain */
- desc->txd.flags = flg;
return &desc->txd;
}
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 8f0c9c4e2efd..3f56514bbef8 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1150,9 +1150,9 @@ static void gpi_ev_tasklet(unsigned long data)
{
struct gpii *gpii = (struct gpii *)data;
- read_lock_bh(&gpii->pm_lock);
+ read_lock(&gpii->pm_lock);
if (!REG_ACCESS_VALID(gpii->pm_state)) {
- read_unlock_bh(&gpii->pm_lock);
+ read_unlock(&gpii->pm_lock);
dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
TO_GPI_PM_STR(gpii->pm_state));
return;
@@ -1163,7 +1163,7 @@ static void gpi_ev_tasklet(unsigned long data)
/* enable IEOB, switching back to interrupts */
gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
- read_unlock_bh(&gpii->pm_lock);
+ read_unlock(&gpii->pm_lock);
}
/* marks all pending events for the channel as stale */
@@ -2288,6 +2288,7 @@ static int gpi_probe(struct platform_device *pdev)
static const struct of_device_id gpi_of_match[] = {
{ .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 },
+ { .compatible = "qcom,sm6350-gpi-dma", .data = (void *)0x10000 },
{ .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 },
{ .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 },
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index facdacf8aede..d56caf1681ff 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -379,13 +379,13 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
if (blk_size < 0) {
dev_err(adev->dev, "invalid burst value: %d\n",
burst);
- return ERR_PTR(-EINVAL);
+ return NULL;
}
crci = achan->crci & 0xf;
if (!crci || achan->crci > 0x1f) {
dev_err(adev->dev, "invalid crci value\n");
- return ERR_PTR(-EINVAL);
+ return NULL;
}
}
@@ -403,8 +403,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
}
async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
- if (!async_desc)
- return ERR_PTR(-ENOMEM);
+ if (!async_desc) {
+ dev_err(adev->dev, "not enough memory for async_desc struct\n");
+ return NULL;
+ }
async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
async_desc->crci = crci;
@@ -414,8 +416,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
sizeof(*cple) + 2 * ADM_DESC_ALIGN;
async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
- if (!async_desc->cpl)
+ if (!async_desc->cpl) {
+ dev_err(adev->dev, "not enough memory for cpl struct\n");
goto free;
+ }
async_desc->adev = adev;
@@ -437,8 +441,10 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
async_desc->dma_len,
DMA_TO_DEVICE);
- if (dma_mapping_error(adev->dev, async_desc->dma_addr))
+ if (dma_mapping_error(adev->dev, async_desc->dma_addr)) {
+ dev_err(adev->dev, "dma mapping error for cpl\n");
goto free;
+ }
cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
@@ -454,7 +460,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
free:
kfree(async_desc);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
/**
@@ -494,7 +500,7 @@ static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
spin_lock_irqsave(&achan->vc.lock, flag);
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
- if (cfg->peripheral_size == sizeof(config))
+ if (cfg->peripheral_size == sizeof(*config))
achan->crci = config->crci;
spin_unlock_irqrestore(&achan->vc.lock, flag);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f6ed7e889781..a09eeb545f7d 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1094,7 +1094,7 @@ static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
INIT_LIST_HEAD(&dmadev->channels);
/*
- * Register as many many memcpy as we have physical channels,
+ * Register as many memcpy as we have physical channels,
* we won't always be able to use all but the code will have
* to cope with that situation.
*/
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 4f8b8498c5c6..6b524eb6bcf3 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -405,10 +405,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan = &pdma->chans[i];
irq = platform_get_irq(pdev, i * 2);
- if (irq < 0) {
- dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+ if (irq < 0)
return -EINVAL;
- }
r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
dev_name(&pdev->dev), (void *)chan);
@@ -420,10 +418,8 @@ static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
chan->txirq = irq;
irq = platform_get_irq(pdev, (i * 2) + 1);
- if (irq < 0) {
- dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+ if (irq < 0)
return -EINVAL;
- }
r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
dev_name(&pdev->dev), (void *)chan);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 13d12d660cc2..641d689d17ff 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -103,8 +103,8 @@ struct rcar_dmac_desc_page {
struct list_head node;
union {
- struct rcar_dmac_desc descs[0];
- struct rcar_dmac_xfer_chunk chunks[0];
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_desc, descs);
+ DECLARE_FLEX_ARRAY(struct rcar_dmac_xfer_chunk, chunks);
};
};
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index adb25a11c70f..4891a1767e5a 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -9,6 +9,7 @@
* Pierre-Yves Mordret <pierre-yves.mordret@st.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,8 +33,10 @@
#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
+#define STM32_DMA_ISR(n) (((n) & 4) ? STM32_DMA_HISR : STM32_DMA_LISR)
#define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
#define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
+#define STM32_DMA_IFCR(n) (((n) & 4) ? STM32_DMA_HIFCR : STM32_DMA_LIFCR)
#define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
#define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
@@ -43,23 +46,22 @@
| STM32_DMA_TEI \
| STM32_DMA_DMEI \
| STM32_DMA_FEI)
+/*
+ * If (chan->id % 4) is 2 or 3, left shift the mask by 16 bits;
+ * if (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
+ */
+#define STM32_DMA_FLAGS_SHIFT(n) ({ typeof(n) (_n) = (n); \
+ (((_n) & 2) << 3) | (((_n) & 1) * 6); })
/* DMA Stream x Configuration Register */
#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
-#define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
+#define STM32_DMA_SCR_REQ_MASK GENMASK(27, 25)
#define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
-#define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
#define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
-#define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
#define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
-#define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
#define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
-#define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
#define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
-#define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
-#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
-#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
@@ -96,7 +98,6 @@
/* DMA stream x FIFO control register */
#define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
#define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
-#define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
#define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
#define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
#define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
@@ -137,11 +138,9 @@
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
-#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
-#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
-#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
+#define STM32_DMA_MDMA_STREAM_ID_MASK GENMASK(19, 16)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -195,6 +194,19 @@ struct stm32_dma_desc {
struct stm32_dma_sg_req sg_req[];
};
+/**
+ * struct stm32_dma_mdma_config - STM32 DMA MDMA configuration
+ * @stream_id: DMA request to trigger STM32 MDMA transfer
+ * @ifcr: DMA interrupt flag clear register address,
+ * used by STM32 MDMA to clear DMA Transfer Complete flag
+ * @tcf: DMA Transfer Complete flag
+ */
+struct stm32_dma_mdma_config {
+ u32 stream_id;
+ u32 ifcr;
+ u32 tcf;
+};
+
struct stm32_dma_chan {
struct virt_dma_chan vchan;
bool config_init;
@@ -209,6 +221,8 @@ struct stm32_dma_chan {
u32 mem_burst;
u32 mem_width;
enum dma_status status;
+ bool trig_mdma;
+ struct stm32_dma_mdma_config mdma_config;
};
struct stm32_dma_device {
@@ -388,6 +402,13 @@ static int stm32_dma_slave_config(struct dma_chan *c,
memcpy(&chan->dma_sconfig, config, sizeof(*config));
+ /* Check if user is requesting DMA to trigger STM32 MDMA */
+ if (config->peripheral_size) {
+ config->peripheral_config = &chan->mdma_config;
+ config->peripheral_size = sizeof(chan->mdma_config);
+ chan->trig_mdma = true;
+ }
+
chan->config_init = true;
return 0;
@@ -401,17 +422,10 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
/*
* Read "flags" from DMA_xISR register corresponding to the selected
* DMA channel at the correct bit offset inside that register.
- *
- * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
- * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
- if (chan->id & 4)
- dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
- else
- dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
-
- flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+ dma_isr = stm32_dma_read(dmadev, STM32_DMA_ISR(chan->id));
+ flags = dma_isr >> STM32_DMA_FLAGS_SHIFT(chan->id);
return flags & STM32_DMA_MASKI;
}
@@ -424,17 +438,11 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
/*
* Write "flags" to the DMA_xIFCR register corresponding to the selected
* DMA channel at the correct bit offset inside that register.
- *
- * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
- * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
*/
flags &= STM32_DMA_MASKI;
- dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
+ dma_ifcr = flags << STM32_DMA_FLAGS_SHIFT(chan->id);
- if (chan->id & 4)
- stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
- else
- stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
+ stm32_dma_write(dmadev, STM32_DMA_IFCR(chan->id), dma_ifcr);
}
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
@@ -576,6 +584,10 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
sg_req = &chan->desc->sg_req[chan->next_sg];
reg = &sg_req->chan_reg;
+ /* When DMA triggers STM32 MDMA, DMA Transfer Complete is managed by STM32 MDMA */
+ if (chan->trig_mdma && chan->dma_sconfig.direction != DMA_MEM_TO_DEV)
+ reg->dma_scr &= ~STM32_DMA_SCR_TCIE;
+
reg->dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
@@ -725,6 +737,8 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
+ if (chan->trig_mdma)
+ return;
stm32_dma_sg_inc(chan);
/* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
@@ -861,7 +875,8 @@ static int stm32_dma_resume(struct dma_chan *c)
sg_req = &chan->desc->sg_req[chan->next_sg - 1];
ndtr = sg_req->chan_reg.dma_sndtr;
- offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
+ offset = (ndtr - chan_reg.dma_sndtr);
+ offset <<= FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, chan_reg.dma_scr);
spar = sg_req->chan_reg.dma_spar;
sm0ar = sg_req->chan_reg.dma_sm0ar;
sm1ar = sg_req->chan_reg.dma_sm1ar;
@@ -973,16 +988,16 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
if (src_burst_size < 0)
return src_burst_size;
- dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
- STM32_DMA_SCR_PSIZE(dst_bus_width) |
- STM32_DMA_SCR_MSIZE(src_bus_width) |
- STM32_DMA_SCR_PBURST(dst_burst_size) |
- STM32_DMA_SCR_MBURST(src_burst_size);
+ dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_DEV) |
+ FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, dst_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, src_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dst_burst_size) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, src_burst_size);
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
+ chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
@@ -1030,16 +1045,16 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
if (dst_burst_size < 0)
return dst_burst_size;
- dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
- STM32_DMA_SCR_PSIZE(src_bus_width) |
- STM32_DMA_SCR_MSIZE(dst_bus_width) |
- STM32_DMA_SCR_PBURST(src_burst_size) |
- STM32_DMA_SCR_MBURST(dst_burst_size);
+ dma_scr = FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_DEV_TO_MEM) |
+ FIELD_PREP(STM32_DMA_SCR_PSIZE_MASK, src_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_MSIZE_MASK, dst_bus_width) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, src_burst_size) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dst_burst_size);
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
+ chan->chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
@@ -1099,6 +1114,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
else
chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
+ /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
+ if (chan->trig_mdma && sg_len > 1)
+ chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
sg_dma_len(sg),
@@ -1120,6 +1139,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
+ if (chan->trig_mdma)
+ desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg);
desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
}
@@ -1207,8 +1228,11 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
+ if (chan->trig_mdma)
+ desc->sg_req[i].chan_reg.dma_sm1ar += period_len;
desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
- buf_addr += period_len;
+ if (!chan->trig_mdma)
+ buf_addr += period_len;
}
desc->num_sgs = num_periods;
@@ -1247,16 +1271,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
desc->sg_req[i].chan_reg.dma_scr =
- STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
- STM32_DMA_SCR_PBURST(dma_burst) |
- STM32_DMA_SCR_MBURST(dma_burst) |
+ FIELD_PREP(STM32_DMA_SCR_DIR_MASK, STM32_DMA_MEM_TO_MEM) |
+ FIELD_PREP(STM32_DMA_SCR_PBURST_MASK, dma_burst) |
+ FIELD_PREP(STM32_DMA_SCR_MBURST_MASK, dma_burst) |
STM32_DMA_SCR_MINC |
STM32_DMA_SCR_PINC |
STM32_DMA_SCR_TCIE |
STM32_DMA_SCR_TEIE;
desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
- desc->sg_req[i].chan_reg.dma_sfcr |=
- STM32_DMA_SFCR_FTH(threshold);
+ desc->sg_req[i].chan_reg.dma_sfcr |= FIELD_PREP(STM32_DMA_SFCR_FTH_MASK, threshold);
desc->sg_req[i].chan_reg.dma_spar = src + offset;
desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
@@ -1275,7 +1298,7 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
- width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+ width = FIELD_GET(STM32_DMA_SCR_PSIZE_MASK, dma_scr);
ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
return ndtr << width;
@@ -1481,16 +1504,17 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
stm32_dma_clear_reg(&chan->chan_reg);
chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
- chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
+ chan->chan_reg.dma_scr |= FIELD_PREP(STM32_DMA_SCR_REQ_MASK, cfg->request_line);
/* Enable Interrupts */
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
- chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
- if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
+ chan->threshold = FIELD_GET(STM32_DMA_THRESHOLD_FTR_MASK, cfg->features);
+ if (FIELD_GET(STM32_DMA_DIRECT_MODE_MASK, cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
- if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
+ if (FIELD_GET(STM32_DMA_ALT_ACK_MODE_MASK, cfg->features))
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
+ chan->mdma_config.stream_id = FIELD_GET(STM32_DMA_MDMA_STREAM_ID_MASK, cfg->features);
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -1630,6 +1654,12 @@ static int stm32_dma_probe(struct platform_device *pdev)
chan->id = i;
chan->vchan.desc_free = stm32_dma_desc_free;
vchan_init(&chan->vchan, dd);
+
+ chan->mdma_config.ifcr = res->start;
+ chan->mdma_config.ifcr += STM32_DMA_IFCR(chan->id);
+
+ chan->mdma_config.tcf = STM32_DMA_TCI;
+ chan->mdma_config.tcf <<= STM32_DMA_FLAGS_SHIFT(chan->id);
}
ret = dma_async_device_register(dd);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index eee0c5aa5fb5..ee3cbbf51006 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -39,13 +39,13 @@ struct stm32_dmamux_data {
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */
- unsigned long *dma_inuse; /* Used DMA channel */
+ DECLARE_BITMAP(dma_inuse, STM32_DMAMUX_MAX_DMA_REQUESTS); /* Used DMA channel */
u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
* in suspend
*/
u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters.
- * To be kept at very end end of this structure
+ * To be kept at very end of this structure
*/
};
@@ -147,7 +147,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
mux->request = dma_spec->args[0];
/* craft DMA spec */
- dma_spec->args[3] = dma_spec->args[2];
+ dma_spec->args[3] = dma_spec->args[2] | mux->chan_id << 16;
dma_spec->args[2] = dma_spec->args[1];
dma_spec->args[1] = 0;
dma_spec->args[0] = mux->chan_id - min;
@@ -229,12 +229,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->dma_requests = dma_req;
stm32_dmamux->dma_reqs[0] = count;
- stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
- BITS_TO_LONGS(dma_req),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!stm32_dmamux->dma_inuse)
- return -ENOMEM;
if (device_property_read_u32(&pdev->dev, "dma-requests",
&stm32_dmamux->dmamux_requests)) {
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index b11927ed4367..e28acbcb53f4 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,6 +199,7 @@ struct stm32_mdma_chan_config {
u32 transfer_config;
u32 mask_addr;
u32 mask_data;
+ bool m2m_hw; /* True when MDMA is triggered by STM32 DMA */
};
struct stm32_mdma_hwdesc {
@@ -227,6 +228,12 @@ struct stm32_mdma_desc {
struct stm32_mdma_desc_node node[];
};
+struct stm32_mdma_dma_config {
+ u32 request; /* STM32 DMA channel stream id, triggering MDMA */
+ u32 cmar; /* STM32 DMA interrupt flag clear register address */
+ u32 cmdr; /* STM32 DMA Transfer Complete flag */
+};
+
struct stm32_mdma_chan {
struct virt_dma_chan vchan;
struct dma_pool *desc_pool;
@@ -539,13 +546,23 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
dst_addr = chan->dma_config.dst_addr;
/* Set device data size */
+ if (chan_config->m2m_hw)
+ dst_addr_width = stm32_mdma_get_max_width(dst_addr, buf_len,
+ STM32_MDMA_MAX_BUF_LEN);
dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
return dst_bus_width;
ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
+ if (chan_config->m2m_hw) {
+ ctcr &= ~STM32_MDMA_CTCR_DINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+ }
/* Set device burst value */
+ if (chan_config->m2m_hw)
+ dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+
dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
dst_maxburst,
dst_addr_width);
@@ -588,13 +605,24 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
src_addr = chan->dma_config.src_addr;
/* Set device data size */
+ if (chan_config->m2m_hw)
+ src_addr_width = stm32_mdma_get_max_width(src_addr, buf_len,
+ STM32_MDMA_MAX_BUF_LEN);
+
src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
return src_bus_width;
ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
+ if (chan_config->m2m_hw) {
+ ctcr &= ~STM32_MDMA_CTCR_SINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_SINCOS(src_bus_width);
+ }
/* Set device burst value */
+ if (chan_config->m2m_hw)
+ src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+
src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
src_maxburst,
src_addr_width);
@@ -702,11 +730,15 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct scatterlist *sg;
dma_addr_t src_addr, dst_addr;
- u32 ccr, ctcr, ctbr;
+ u32 m2m_hw_period, ccr, ctcr, ctbr;
int i, ret = 0;
+ if (chan_config->m2m_hw)
+ m2m_hw_period = sg_dma_len(sgl);
+
for_each_sg(sgl, sg, sg_len, i) {
if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
dev_err(chan2dev(chan), "Invalid block len\n");
@@ -716,6 +748,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = dma_config->dst_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ dst_addr += m2m_hw_period;
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, src_addr,
sg_dma_len(sg));
@@ -723,6 +757,8 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
src_addr);
} else {
src_addr = dma_config->src_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ src_addr += m2m_hw_period;
dst_addr = sg_dma_address(sg);
ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
&ctcr, &ctbr, dst_addr,
@@ -755,6 +791,7 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
int i, ret;
@@ -777,6 +814,21 @@ stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
if (ret < 0)
goto xfer_setup_err;
+ /*
+ * In case of M2M HW transfer triggered by STM32 DMA, we do not have to clear the
+ * transfer complete flag by hardware in order to let the CPU rearm the STM32 DMA
+ * with the next sg element and update some data in dmaengine framework.
+ */
+ if (chan_config->m2m_hw && direction == DMA_MEM_TO_DEV) {
+ struct stm32_mdma_hwdesc *hwdesc;
+
+ for (i = 0; i < sg_len; i++) {
+ hwdesc = desc->node[i].hwdesc;
+ hwdesc->cmar = 0;
+ hwdesc->cmdr = 0;
+ }
+ }
+
desc->cyclic = false;
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -798,6 +850,7 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
struct stm32_mdma_desc *desc;
dma_addr_t src_addr, dst_addr;
u32 ccr, ctcr, ctbr, count;
@@ -858,8 +911,12 @@ stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
if (direction == DMA_MEM_TO_DEV) {
src_addr = buf_addr + i * period_len;
dst_addr = dma_config->dst_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ dst_addr += period_len;
} else {
src_addr = dma_config->src_addr;
+ if (chan_config->m2m_hw && (i & 1))
+ src_addr += period_len;
dst_addr = buf_addr + i * period_len;
}
@@ -1244,6 +1301,17 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
memcpy(&chan->dma_config, config, sizeof(*config));
+ /* Check if user is requesting STM32 DMA to trigger MDMA */
+ if (config->peripheral_size) {
+ struct stm32_mdma_dma_config *mdma_config;
+
+ mdma_config = (struct stm32_mdma_dma_config *)chan->dma_config.peripheral_config;
+ chan->chan_config.request = mdma_config->request;
+ chan->chan_config.mask_addr = mdma_config->cmar;
+ chan->chan_config.mask_data = mdma_config->cmdr;
+ chan->chan_config.m2m_hw = true;
+ }
+
return 0;
}
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 4cbca80ee16e..fa06d7e6d8e3 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -352,12 +352,6 @@ static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
edma_modify(ecc, offset + (i << 2), and, or);
}
-static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
- unsigned or)
-{
- edma_or(ecc, offset + (i << 2), or);
-}
-
static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
unsigned or)
{
@@ -370,11 +364,6 @@ static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
edma_write(ecc, offset + ((i * 2 + j) << 2), val);
}
-static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
-{
- return edma_read(ecc, EDMA_SHADOW0 + offset);
-}
-
static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
int offset, int i)
{
@@ -393,36 +382,12 @@ static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
}
-static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
- int param_no)
-{
- return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
-}
-
-static inline void edma_param_write(struct edma_cc *ecc, int offset,
- int param_no, unsigned val)
-{
- edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
-}
-
static inline void edma_param_modify(struct edma_cc *ecc, int offset,
int param_no, unsigned and, unsigned or)
{
edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
}
-static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
- unsigned and)
-{
- edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
-}
-
-static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
- unsigned or)
-{
- edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
-}
-
static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
int priority)
{
@@ -743,11 +708,6 @@ static void edma_free_channel(struct edma_chan *echan)
edma_setup_interrupt(echan, false);
}
-static inline struct edma_cc *to_edma_cc(struct dma_device *d)
-{
- return container_of(d, struct edma_cc, dma_slave);
-}
-
static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
return container_of(c, struct edma_chan, vchan.chan);
diff --git a/drivers/dma/ti/k3-psil-j7200.c b/drivers/dma/ti/k3-psil-j7200.c
index 5ea63ea74822..e3feff869991 100644
--- a/drivers/dma/ti/k3-psil-j7200.c
+++ b/drivers/dma/ti/k3-psil-j7200.c
@@ -143,6 +143,57 @@ static struct psil_ep j7200_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j7200_dst_ep_map[] = {
+ /* PDMA_MCASP - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ PSIL_PDMA_XY_PKT(0xc608),
+ PSIL_PDMA_XY_PKT(0xc609),
+ PSIL_PDMA_XY_PKT(0xc60a),
+ PSIL_PDMA_XY_PKT(0xc60b),
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ PSIL_PDMA_XY_PKT(0xc614),
+ PSIL_PDMA_XY_PKT(0xc615),
+ PSIL_PDMA_XY_PKT(0xc616),
+ PSIL_PDMA_XY_PKT(0xc617),
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW5 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -161,6 +212,22 @@ static struct psil_ep j7200_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA_MISC_G0 - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA_MISC_G1 - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA_MISC_G2 - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c
index 34e3fc565a37..e7c83d668bb6 100644
--- a/drivers/dma/ti/k3-psil-j721e.c
+++ b/drivers/dma/ti/k3-psil-j721e.c
@@ -266,6 +266,69 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xc205),
PSIL_ETHERNET(0xc206),
PSIL_ETHERNET(0xc207),
+ /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */
+ PSIL_PDMA_MCASP(0xc400),
+ PSIL_PDMA_MCASP(0xc401),
+ PSIL_PDMA_MCASP(0xc402),
+ /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ PSIL_PDMA_MCASP(0xc503),
+ PSIL_PDMA_MCASP(0xc504),
+ PSIL_PDMA_MCASP(0xc505),
+ PSIL_PDMA_MCASP(0xc506),
+ PSIL_PDMA_MCASP(0xc507),
+ PSIL_PDMA_MCASP(0xc508),
+ /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */
+ PSIL_PDMA_XY_PKT(0xc600),
+ PSIL_PDMA_XY_PKT(0xc601),
+ PSIL_PDMA_XY_PKT(0xc602),
+ PSIL_PDMA_XY_PKT(0xc603),
+ PSIL_PDMA_XY_PKT(0xc604),
+ PSIL_PDMA_XY_PKT(0xc605),
+ PSIL_PDMA_XY_PKT(0xc606),
+ PSIL_PDMA_XY_PKT(0xc607),
+ /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */
+ PSIL_PDMA_XY_PKT(0xc60c),
+ PSIL_PDMA_XY_PKT(0xc60d),
+ PSIL_PDMA_XY_PKT(0xc60e),
+ PSIL_PDMA_XY_PKT(0xc60f),
+ PSIL_PDMA_XY_PKT(0xc610),
+ PSIL_PDMA_XY_PKT(0xc611),
+ PSIL_PDMA_XY_PKT(0xc612),
+ PSIL_PDMA_XY_PKT(0xc613),
+ /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */
+ PSIL_PDMA_XY_PKT(0xc618),
+ PSIL_PDMA_XY_PKT(0xc619),
+ PSIL_PDMA_XY_PKT(0xc61a),
+ PSIL_PDMA_XY_PKT(0xc61b),
+ PSIL_PDMA_XY_PKT(0xc61c),
+ PSIL_PDMA_XY_PKT(0xc61d),
+ PSIL_PDMA_XY_PKT(0xc61e),
+ PSIL_PDMA_XY_PKT(0xc61f),
+ /* PDMA11 (PDMA_MISC_G3) */
+ PSIL_PDMA_XY_PKT(0xc624),
+ PSIL_PDMA_XY_PKT(0xc625),
+ PSIL_PDMA_XY_PKT(0xc626),
+ PSIL_PDMA_XY_PKT(0xc627),
+ PSIL_PDMA_XY_PKT(0xc628),
+ PSIL_PDMA_XY_PKT(0xc629),
+ PSIL_PDMA_XY_PKT(0xc630),
+ PSIL_PDMA_XY_PKT(0xc63a),
+ /* PDMA13 (PDMA_USART_G0) - UART0-1 */
+ PSIL_PDMA_XY_PKT(0xc700),
+ PSIL_PDMA_XY_PKT(0xc701),
+ /* PDMA14 (PDMA_USART_G1) - UART2-3 */
+ PSIL_PDMA_XY_PKT(0xc702),
+ PSIL_PDMA_XY_PKT(0xc703),
+ /* PDMA15 (PDMA_USART_G2) - UART4-9 */
+ PSIL_PDMA_XY_PKT(0xc704),
+ PSIL_PDMA_XY_PKT(0xc705),
+ PSIL_PDMA_XY_PKT(0xc706),
+ PSIL_PDMA_XY_PKT(0xc707),
+ PSIL_PDMA_XY_PKT(0xc708),
+ PSIL_PDMA_XY_PKT(0xc709),
/* CPSW9 */
PSIL_ETHERNET(0xca00),
PSIL_ETHERNET(0xca01),
@@ -284,6 +347,22 @@ static struct psil_ep j721e_dst_ep_map[] = {
PSIL_ETHERNET(0xf005),
PSIL_ETHERNET(0xf006),
PSIL_ETHERNET(0xf007),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0xf100),
+ PSIL_PDMA_XY_PKT(0xf101),
+ PSIL_PDMA_XY_PKT(0xf102),
+ PSIL_PDMA_XY_PKT(0xf103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0xf200),
+ PSIL_PDMA_XY_PKT(0xf201),
+ PSIL_PDMA_XY_PKT(0xf202),
+ PSIL_PDMA_XY_PKT(0xf203),
+ PSIL_PDMA_XY_PKT(0xf204),
+ PSIL_PDMA_XY_PKT(0xf205),
+ PSIL_PDMA_XY_PKT(0xf206),
+ PSIL_PDMA_XY_PKT(0xf207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0xf300),
/* SA2UL */
PSIL_SA2UL(0xf500, 1),
PSIL_SA2UL(0xf501, 1),
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index d4f1e4e9603a..85e00701473c 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -31,14 +31,14 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
pdev = of_find_device_by_node(udma_node);
+ if (np != udma_node)
+ of_node_put(udma_node);
+
if (!pdev) {
pr_debug("UDMA device not found\n");
return ERR_PTR(-EPROBE_DEFER);
}
- if (np != udma_node)
- of_node_put(udma_node);
-
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 2f0d2c68c93c..7b5081989b3d 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -263,6 +263,7 @@ struct udma_chan_config {
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
u32 tr_trigger_type;
+ unsigned long tx_flags;
/* PKDMA mapped channel */
int mapped_channel_id;
@@ -300,8 +301,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
- u32 bcnt; /* number of bytes completed since the start of the channel */
-
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -757,6 +756,20 @@ static void udma_reset_rings(struct udma_chan *uc)
}
}
+static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
+{
+ if (uc->desc->dir == DMA_DEV_TO_MEM) {
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ } else {
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
+ if (!uc->bchan)
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
+}
+
static void udma_reset_counters(struct udma_chan *uc)
{
u32 val;
@@ -790,8 +803,6 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
-
- uc->bcnt = 0;
}
static int udma_reset_chan(struct udma_chan *uc, bool hard)
@@ -1045,9 +1056,14 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
{
u32 peer_bcnt, bcnt;
- /* Only TX towards PDMA is affected */
+ /*
+ * Only TX towards PDMA is affected.
+ * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
+ * completion calculation, consumer must ensure that there is no stale
+ * data in DMA fabric in this case.
+ */
if (uc->config.ep_type == PSIL_EP_NATIVE ||
- uc->config.dir != DMA_MEM_TO_DEV)
+ uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
return true;
peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
@@ -1115,7 +1131,7 @@ static void udma_check_tx_completion(struct work_struct *work)
if (uc->desc) {
struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
@@ -1168,7 +1184,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
if (udma_is_desc_really_done(uc, d)) {
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
@@ -1204,7 +1220,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
vchan_cyclic_callback(&d->vd);
} else {
/* TODO: figure out the real amount of data */
- uc->bcnt += d->residue;
+ udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
}
@@ -3408,6 +3424,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!burst)
burst = 1;
+ uc->config.tx_flags = tx_flags;
+
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
@@ -3809,7 +3827,6 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
}
- bcnt -= uc->bcnt;
if (bcnt && !(bcnt % uc->desc->residue))
residue = 0;
else
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 6276934d4d2b..8cd4e69dc7b4 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -3040,9 +3040,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Request and map I/O memory */
xdev->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xdev->regs))
- return PTR_ERR(xdev->regs);
-
+ if (IS_ERR(xdev->regs)) {
+ err = PTR_ERR(xdev->regs);
+ goto disable_clks;
+ }
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
@@ -3070,7 +3071,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(xdev->dev,
"missing xlnx,num-fstores property\n");
- return err;
+ goto disable_clks;
}
err = of_property_read_u32(node, "xlnx,flush-fsync",
@@ -3090,7 +3091,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->ext_addr = false;
/* Set the dma mask bits */
- dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
+ if (err < 0) {
+ dev_err(xdev->dev, "DMA mask error %d\n", err);
+ goto disable_clks;
+ }
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
@@ -3137,7 +3142,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
- goto disable_clks;
+ goto error;
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -3172,12 +3177,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return 0;
-disable_clks:
- xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
+disable_clks:
+ xdma_disable_allclks(xdev);
return err;
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index dc299ab36818..21472a5d7636 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -796,6 +796,17 @@ static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
}
/**
+ * zynqmp_dma_synchronize - Synchronizes the termination of a transfers to the current context.
+ * @dchan: DMA channel pointer
+ */
+static void zynqmp_dma_synchronize(struct dma_chan *dchan)
+{
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+ tasklet_kill(&chan->tasklet);
+}
+
+/**
* zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
* @dchan: DMA channel
* @dma_dst: Destination buffer address
@@ -849,7 +860,7 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
zynqmp_dma_desc_config_eod(chan, desc);
async_tx_ack(&first->async_tx);
- first->async_tx.flags = flags;
+ first->async_tx.flags = (enum dma_ctrl_flags)flags;
return &first->async_tx;
}
@@ -1057,6 +1068,7 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p = &zdev->common;
p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
p->device_terminate_all = zynqmp_dma_device_terminate_all;
+ p->device_synchronize = zynqmp_dma_synchronize;
p->device_issue_pending = zynqmp_dma_issue_pending;
p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index eb58644bb019..6faeb2ab3960 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -103,7 +103,6 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm)
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
- edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 96f6de0c8ff6..50ed9f2425bb 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -28,13 +28,9 @@ void edac_mc_sysfs_exit(void);
extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups);
extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-extern int edac_get_log_ue(void);
-extern int edac_get_log_ce(void);
-extern int edac_get_panic_on_ue(void);
extern int edac_mc_get_log_ue(void);
extern int edac_mc_get_log_ce(void);
extern int edac_mc_get_panic_on_ue(void);
-extern int edac_get_poll_msec(void);
extern unsigned int edac_mc_get_poll_msec(void);
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 6cf50ee0b77c..a22ea053f8e1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -74,31 +74,47 @@ static struct list_head *i10nm_edac_list;
static struct res_config *res_cfg;
static int retry_rd_err_log;
+static int decoding_via_mca;
+static bool mem_cfg_2lm;
static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
+static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
+static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
+static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
+static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
+static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
-static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable)
+static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
+ u32 *offsets_scrub, u32 *offsets_demand,
+ u32 *offsets_demand2)
{
- u32 s, d;
+ u32 s, d, d2;
- if (!imc->mbase)
- return;
-
- s = I10NM_GET_REG32(imc, chan, res_cfg->offsets_scrub[0]);
- d = I10NM_GET_REG32(imc, chan, res_cfg->offsets_demand[0]);
+ s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
+ d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
+ if (offsets_demand2)
+ d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
if (enable) {
/* Save default configurations */
imc->chan[chan].retry_rd_err_log_s = s;
imc->chan[chan].retry_rd_err_log_d = d;
+ if (offsets_demand2)
+ imc->chan[chan].retry_rd_err_log_d2 = d2;
s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
s |= RETRY_RD_ERR_LOG_EN;
d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
d |= RETRY_RD_ERR_LOG_EN;
+
+ if (offsets_demand2) {
+ d2 &= ~RETRY_RD_ERR_LOG_UC;
+ d2 |= RETRY_RD_ERR_LOG_NOOVER;
+ d2 |= RETRY_RD_ERR_LOG_EN;
+ }
} else {
/* Restore default configurations */
if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
@@ -113,23 +129,55 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
d |= RETRY_RD_ERR_LOG_NOOVER;
if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
d &= ~RETRY_RD_ERR_LOG_EN;
+
+ if (offsets_demand2) {
+ if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
+ d2 |= RETRY_RD_ERR_LOG_UC;
+ if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
+ d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
+ if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
+ d2 &= ~RETRY_RD_ERR_LOG_EN;
+ }
}
- I10NM_SET_REG32(imc, chan, res_cfg->offsets_scrub[0], s);
- I10NM_SET_REG32(imc, chan, res_cfg->offsets_demand[0], d);
+ I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
+ I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
+ if (offsets_demand2)
+ I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
}
static void enable_retry_rd_err_log(bool enable)
{
+ struct skx_imc *imc;
struct skx_dev *d;
int i, j;
edac_dbg(2, "\n");
list_for_each_entry(d, i10nm_edac_list, list)
- for (i = 0; i < I10NM_NUM_IMC; i++)
- for (j = 0; j < I10NM_NUM_CHANNELS; j++)
- __enable_retry_rd_err_log(&d->imc[i], j, enable);
+ for (i = 0; i < I10NM_NUM_IMC; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ for (j = 0; j < I10NM_NUM_CHANNELS; j++) {
+ if (imc->hbm_mc) {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm0,
+ res_cfg->offsets_demand_hbm0,
+ NULL);
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm1,
+ res_cfg->offsets_demand_hbm1,
+ NULL);
+ } else {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub,
+ res_cfg->offsets_demand,
+ res_cfg->offsets_demand2);
+ }
+ }
+ }
}
static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
@@ -138,14 +186,33 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
struct skx_imc *imc = &res->dev->imc[res->imc];
u32 log0, log1, log2, log3, log4;
u32 corr0, corr1, corr2, corr3;
+ u32 lxg0, lxg1, lxg3, lxg4;
+ u32 *xffsets = NULL;
u64 log2a, log5;
+ u64 lxg2a, lxg5;
u32 *offsets;
- int n;
+ int n, pch;
if (!imc->mbase)
return;
- offsets = scrub_err ? res_cfg->offsets_scrub : res_cfg->offsets_demand;
+ if (imc->hbm_mc) {
+ pch = res->cs & 1;
+
+ if (pch)
+ offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
+ res_cfg->offsets_demand_hbm1;
+ else
+ offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
+ res_cfg->offsets_demand_hbm0;
+ } else {
+ if (scrub_err) {
+ offsets = res_cfg->offsets_scrub;
+ } else {
+ offsets = res_cfg->offsets_demand;
+ xffsets = res_cfg->offsets_demand2;
+ }
+ }
log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
@@ -153,20 +220,52 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
+ if (xffsets) {
+ lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
+ lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
+ lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
+ lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
+ lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
+ }
+
if (res_cfg->type == SPR) {
log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
- n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx]",
+ n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
log0, log1, log2a, log3, log4, log5);
+
+ if (len - n > 0) {
+ if (xffsets) {
+ lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
+ n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
+ lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
+ } else {
+ n += snprintf(msg + n, len - n, "]");
+ }
+ }
} else {
log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
log0, log1, log2, log3, log4, log5);
}
- corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
- corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
- corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
- corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+ if (imc->hbm_mc) {
+ if (pch) {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
+ } else {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
+ }
+ } else {
+ corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
+ corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
+ corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
+ corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
+ }
if (len - n > 0)
snprintf(msg + n, len - n,
@@ -177,9 +276,16 @@ static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
corr3 & 0xffff, corr3 >> 16);
/* Clear status bits */
- if (retry_rd_err_log == 2 && (log0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
- log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
- I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ if (retry_rd_err_log == 2) {
+ if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
+ log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+ I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
+ }
+
+ if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
+ lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
+ I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
+ }
}
}
@@ -231,6 +337,103 @@ static bool i10nm_check_2lm(struct res_config *cfg)
return false;
}
+/*
+ * Check whether the error comes from DDRT by ICX/Tremont model specific error code.
+ * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS.
+ */
+static bool i10nm_mscod_is_ddrt(u32 mscod)
+{
+ switch (mscod) {
+ case 0x0106: case 0x0107:
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ return false;
+}
+
+static bool i10nm_mc_decode_available(struct mce *mce)
+{
+ u8 bank;
+
+ if (!decoding_via_mca || mem_cfg_2lm)
+ return false;
+
+ if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
+ != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
+ return false;
+
+ bank = mce->bank;
+
+ switch (res_cfg->type) {
+ case I10NM:
+ if (bank < 13 || bank > 26)
+ return false;
+
+ /* DDRT errors can't be decoded from MCA bank registers */
+ if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ return false;
+
+ if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ return false;
+
+ /* Check whether one of {13,14,17,18,21,22,25,26} */
+ return ((bank - 13) & BIT(1)) == 0;
+ default:
+ return false;
+ }
+}
+
+static bool i10nm_mc_decode(struct decoded_addr *res)
+{
+ struct mce *m = res->mce;
+ struct skx_dev *d;
+ u8 bank;
+
+ if (!i10nm_mc_decode_available(m))
+ return false;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ if (d->imc[0].src_id == m->socketid) {
+ res->socket = m->socketid;
+ res->dev = d;
+ break;
+ }
+ }
+
+ switch (res_cfg->type) {
+ case I10NM:
+ bank = m->bank - 13;
+ res->imc = bank / 4;
+ res->channel = bank % 2;
+ break;
+ default:
+ return false;
+ }
+
+ if (!res->dev) {
+ skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
+ m->socketid, res->imc);
+ return false;
+ }
+
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 39);
+ res->bank_group = GET_BITFIELD(m->misc, 40, 41);
+ res->bank_address = GET_BITFIELD(m->misc, 42, 43);
+ res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
+ res->rank = GET_BITFIELD(m->misc, 56, 58);
+ res->dimm = res->rank >> 2;
+ res->rank = res->rank % 4;
+
+ return true;
+}
+
static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
@@ -420,7 +623,12 @@ static struct res_config spr_cfg = {
.sad_all_devfn = PCI_DEVFN(10, 0),
.sad_all_offset = 0x300,
.offsets_scrub = offsets_scrub_spr,
+ .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
+ .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
.offsets_demand = offsets_demand_spr,
+ .offsets_demand2 = offsets_demand2_spr,
+ .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
+ .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
};
static const struct x86_cpu_id i10nm_cpuids[] = {
@@ -574,7 +782,8 @@ static int __init i10nm_init(void)
return -ENODEV;
}
- skx_set_mem_cfg(i10nm_check_2lm(cfg));
+ mem_cfg_2lm = i10nm_check_2lm(cfg);
+ skx_set_mem_cfg(mem_cfg_2lm);
rc = i10nm_get_ddr_munits();
@@ -626,9 +835,11 @@ static int __init i10nm_init(void)
setup_i10nm_debug();
if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
- skx_set_decode(NULL, show_retry_rd_err_log);
+ skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
if (retry_rd_err_log == 2)
enable_retry_rd_err_log(true);
+ } else {
+ skx_set_decode(i10nm_mc_decode, NULL);
}
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
@@ -658,6 +869,34 @@ static void __exit i10nm_exit(void)
module_init(i10nm_init);
module_exit(i10nm_exit);
+static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &val);
+
+ if (ret || val > 1)
+ return -EINVAL;
+
+ if (val && mem_cfg_2lm) {
+ i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
+ return -EIO;
+ }
+
+ ret = param_set_int(buf, kp);
+
+ return ret;
+}
+
+static const struct kernel_param_ops decoding_via_mca_param_ops = {
+ .set = set_decoding_via_mca,
+ .get = param_get_int,
+};
+
+module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
+MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
+
module_param(retry_rd_err_log, int, 0444);
MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 4f28b8c8d378..61adaa872ba7 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1193,7 +1193,7 @@ static int __init i7300_init(void)
}
/**
- * i7300_init() - Unregisters the driver
+ * i7300_exit() - Unregisters the driver
*/
static void __exit i7300_exit(void)
{
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 9a9ff5ad611a..9ef13570f2e5 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -20,11 +20,15 @@
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
* 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
+ * 190f: 6th Gen Core Dual-Core Processor Host Bridge/DRAM Registers
+ * 191f: 6th Gen Core Quad-Core Processor Host Bridge/DRAM Registers
* 3e..: 8th/9th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
* https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/desktop-6th-gen-core-family-datasheet-vol-2.pdf
+ * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v6-vol-2-datasheet.pdf
* https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
* https://www.intel.com/content/www/us/en/products/docs/processors/core/8th-gen-core-family-datasheet-vol-2.html
*
@@ -53,15 +57,17 @@
#define ie31200_printk(level, fmt, arg...) \
edac_printk(level, "ie31200", fmt, ##arg)
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
-#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x190F
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x1918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_10 0x191F
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_11 0x5918
/* Coffee Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK 0x3e00
@@ -80,6 +86,8 @@
#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
(((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
+ ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
(((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
@@ -577,6 +585,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
{ PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 0bc670778c99..046969b4e82e 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -178,11 +178,6 @@ struct ppc4xx_ecc_status {
u32 wmirq;
};
-/* Function Prototypes */
-
-static int ppc4xx_edac_probe(struct platform_device *device);
-static int ppc4xx_edac_remove(struct platform_device *device);
-
/* Global Variables */
/*
@@ -197,15 +192,6 @@ static const struct of_device_id ppc4xx_edac_match[] = {
};
MODULE_DEVICE_TABLE(of, ppc4xx_edac_match);
-static struct platform_driver ppc4xx_edac_driver = {
- .probe = ppc4xx_edac_probe,
- .remove = ppc4xx_edac_remove,
- .driver = {
- .name = PPC4XX_EDAC_MODULE_NAME,
- .of_match_table = ppc4xx_edac_match,
- },
-};
-
/*
* TODO: The row and channel parameters likely need to be dynamically
* set based on the aforementioned variant controller realizations.
@@ -1391,6 +1377,15 @@ ppc4xx_edac_opstate_init(void)
EDAC_OPSTATE_UNKNOWN_STR)));
}
+static struct platform_driver ppc4xx_edac_driver = {
+ .probe = ppc4xx_edac_probe,
+ .remove = ppc4xx_edac_remove,
+ .driver = {
+ .name = PPC4XX_EDAC_MODULE_NAME,
+ .of_match_table = ppc4xx_edac_match,
+ },
+};
+
/**
* ppc4xx_edac_init - driver/module insertion entry point
*
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 9678ab97c7ac..8e39370fdb5c 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -335,6 +335,12 @@ struct sbridge_info {
struct sbridge_channel {
u32 ranks;
u32 dimms;
+ struct dimm {
+ u32 rowbits;
+ u32 colbits;
+ u32 bank_xor_enable;
+ u32 amap_fine;
+ } dimm[MAX_DIMMS];
};
struct pci_id_descr {
@@ -1603,7 +1609,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
banks = 8;
for (i = 0; i < channels; i++) {
- u32 mtr;
+ u32 mtr, amap = 0;
int max_dimms_per_channel;
@@ -1615,6 +1621,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
if (!pvt->pci_tad[i])
continue;
+ pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
}
for (j = 0; j < max_dimms_per_channel; j++) {
@@ -1627,6 +1634,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
mtr_regs[j], &mtr);
}
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
+
if (IS_DIMM_PRESENT(mtr)) {
if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
@@ -1661,6 +1669,11 @@ static int __populate_dimms(struct mem_ctl_info *mci,
dimm->dtype = pvt->info.get_width(pvt, mtr);
dimm->mtype = mtype;
dimm->edac_mode = mode;
+ pvt->channel[i].dimm[j].rowbits = order_base_2(rows);
+ pvt->channel[i].dimm[j].colbits = order_base_2(cols);
+ pvt->channel[i].dimm[j].bank_xor_enable =
+ GET_BITFIELD(pvt->info.mcmtr, 9, 9);
+ pvt->channel[i].dimm[j].amap_fine = GET_BITFIELD(amap, 0, 0);
snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
@@ -1922,6 +1935,99 @@ static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
return NULL;
}
+static u8 sb_close_row[] = {
+ 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
+};
+
+static u8 sb_close_column[] = {
+ 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
+};
+
+static u8 sb_open_row[] = {
+ 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
+};
+
+static u8 sb_open_column[] = {
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+};
+
+static u8 sb_open_fine_column[] = {
+ 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
+};
+
+static int sb_bits(u64 addr, int nbits, u8 *bits)
+{
+ int i, res = 0;
+
+ for (i = 0; i < nbits; i++)
+ res |= ((addr >> bits[i]) & 1) << i;
+ return res;
+}
+
+static int sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
+{
+ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
+
+ if (do_xor)
+ ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
+
+ return ret;
+}
+
+static bool sb_decode_ddr4(struct mem_ctl_info *mci, int ch, u8 rank,
+ u64 rank_addr, char *msg)
+{
+ int dimmno = 0;
+ int row, col, bank_address, bank_group;
+ struct sbridge_pvt *pvt;
+ u32 bg0 = 0, rowbits = 0, colbits = 0;
+ u32 amap_fine = 0, bank_xor_enable = 0;
+
+ dimmno = (rank < 12) ? rank / 4 : 2;
+ pvt = mci->pvt_info;
+ amap_fine = pvt->channel[ch].dimm[dimmno].amap_fine;
+ bg0 = amap_fine ? 6 : 13;
+ rowbits = pvt->channel[ch].dimm[dimmno].rowbits;
+ colbits = pvt->channel[ch].dimm[dimmno].colbits;
+ bank_xor_enable = pvt->channel[ch].dimm[dimmno].bank_xor_enable;
+
+ if (pvt->is_lockstep) {
+ pr_warn_once("LockStep row/column decode is not supported yet!\n");
+ msg[0] = '\0';
+ return false;
+ }
+
+ if (pvt->is_close_pg) {
+ row = sb_bits(rank_addr, rowbits, sb_close_row);
+ col = sb_bits(rank_addr, colbits, sb_close_column);
+ col |= 0x400; /* C10 is autoprecharge, always set */
+ bank_address = sb_bank_bits(rank_addr, 8, 9, bank_xor_enable, 22, 28);
+ bank_group = sb_bank_bits(rank_addr, 6, 7, bank_xor_enable, 20, 21);
+ } else {
+ row = sb_bits(rank_addr, rowbits, sb_open_row);
+ if (amap_fine)
+ col = sb_bits(rank_addr, colbits, sb_open_fine_column);
+ else
+ col = sb_bits(rank_addr, colbits, sb_open_column);
+ bank_address = sb_bank_bits(rank_addr, 18, 19, bank_xor_enable, 22, 23);
+ bank_group = sb_bank_bits(rank_addr, bg0, 17, bank_xor_enable, 20, 21);
+ }
+
+ row &= (1u << rowbits) - 1;
+
+ sprintf(msg, "row:0x%x col:0x%x bank_addr:%d bank_group:%d",
+ row, col, bank_address, bank_group);
+ return true;
+}
+
+static bool sb_decode_ddr3(struct mem_ctl_info *mci, int ch, u8 rank,
+ u64 rank_addr, char *msg)
+{
+ pr_warn_once("DDR3 row/column decode not support yet!\n");
+ msg[0] = '\0';
+ return false;
+}
+
static int get_memory_error_data(struct mem_ctl_info *mci,
u64 addr,
u8 *socket, u8 *ha,
@@ -1937,12 +2043,13 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
int interleave_mode, shiftup = 0;
unsigned int sad_interleave[MAX_INTERLEAVE];
u32 reg, dram_rule;
- u8 ch_way, sck_way, pkg, sad_ha = 0;
+ u8 ch_way, sck_way, pkg, sad_ha = 0, rankid = 0;
u32 tad_offset;
u32 rir_way;
u32 mb, gb;
u64 ch_addr, offset, limit = 0, prv = 0;
-
+ u64 rank_addr;
+ enum mem_type mtype;
/*
* Step 0) Check if the address is at special memory ranges
@@ -2226,6 +2333,28 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
*rank = RIR_RNK_TGT(pvt->info.type, reg);
+ if (pvt->info.type == BROADWELL) {
+ if (pvt->is_close_pg)
+ shiftup = 6;
+ else
+ shiftup = 13;
+
+ rank_addr = ch_addr >> shiftup;
+ rank_addr /= (1 << rir_way);
+ rank_addr <<= shiftup;
+ rank_addr |= ch_addr & GENMASK_ULL(shiftup - 1, 0);
+ rank_addr -= RIR_OFFSET(pvt->info.type, reg);
+
+ mtype = pvt->info.get_memory_type(pvt);
+ rankid = *rank;
+ if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
+ sb_decode_ddr4(mci, base_ch, rankid, rank_addr, msg);
+ else
+ sb_decode_ddr3(mci, base_ch, rankid, rank_addr, msg);
+ } else {
+ msg[0] = '\0';
+ }
+
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
n_rir,
ch_addr,
@@ -2950,7 +3079,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
enum hw_event_mc_err_type tp_event;
- char *optype, msg[256];
+ char *optype, msg[256], msg_full[512];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -3089,18 +3218,17 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
*/
if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
-
- snprintf(msg, sizeof(msg),
- "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
+ snprintf(msg_full, sizeof(msg_full),
+ "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
area_type,
mscod, errcode,
socket, ha,
channel_mask,
- rank);
+ rank, msg);
- edac_dbg(0, "%s\n", msg);
+ edac_dbg(0, "%s\n", msg_full);
/* FIXME: need support for channel mask */
@@ -3111,7 +3239,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, dimm, -1,
- optype, msg);
+ optype, msg_full);
return;
err_parsing:
edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 1abc020d49ab..7e2762f62eec 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -714,8 +714,13 @@ static int __init skx_init(void)
skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
- if (nvdimm_count && skx_adxl_get() == -ENODEV)
- skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
+ if (nvdimm_count && skx_adxl_get() != -ENODEV) {
+ skx_set_decode(NULL, skx_show_retry_rd_err_log);
+ } else {
+ if (nvdimm_count)
+ skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
+ skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
+ }
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 19c17c5198c5..f0f8e98f6efb 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -27,9 +27,11 @@ static const char * const component_names[] = {
[INDEX_MEMCTRL] = "MemoryControllerId",
[INDEX_CHANNEL] = "ChannelId",
[INDEX_DIMM] = "DimmSlotId",
+ [INDEX_CS] = "ChipSelect",
[INDEX_NM_MEMCTRL] = "NmMemoryControllerId",
[INDEX_NM_CHANNEL] = "NmChannelId",
[INDEX_NM_DIMM] = "NmDimmSlotId",
+ [INDEX_NM_CS] = "NmChipSelect",
};
static int component_indices[ARRAY_SIZE(component_names)];
@@ -40,7 +42,7 @@ static char *adxl_msg;
static unsigned long adxl_nm_bitmap;
static char skx_msg[MSG_SIZE];
-static skx_decode_f skx_decode;
+static skx_decode_f driver_decode;
static skx_show_retry_log_f skx_show_retry_rd_err_log;
static u64 skx_tolm, skx_tohm;
static LIST_HEAD(dev_edac_list);
@@ -139,10 +141,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
(int)adxl_values[component_indices[INDEX_NM_CHANNEL]] : -1;
res->dimm = (adxl_nm_bitmap & BIT_NM_DIMM) ?
(int)adxl_values[component_indices[INDEX_NM_DIMM]] : -1;
+ res->cs = (adxl_nm_bitmap & BIT_NM_CS) ?
+ (int)adxl_values[component_indices[INDEX_NM_CS]] : -1;
} else {
res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
+ res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
if (res->imc > NUM_IMC - 1 || res->imc < 0) {
@@ -173,6 +178,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, bool error_in_1st_level_me
break;
}
+ res->decoded_by_adxl = true;
+
return true;
}
@@ -183,7 +190,7 @@ void skx_set_mem_cfg(bool mem_cfg_2lm)
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log)
{
- skx_decode = decode;
+ driver_decode = decode;
skx_show_retry_rd_err_log = show_retry_log;
}
@@ -591,19 +598,19 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
break;
}
}
- if (adxl_component_count) {
+ if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
len = snprintf(skx_msg, MSG_SIZE,
- "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
+ "%s%s err_code:0x%04x:0x%04x ProcessorSocketId:0x%x MemoryControllerId:0x%x PhysicalRankId:0x%x Row:0x%x Column:0x%x Bank:0x%x BankGroup:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode,
res->socket, res->imc, res->rank,
- res->bank_group, res->bank_address, res->row, res->column);
+ res->row, res->column, res->bank_address, res->bank_group);
}
if (skx_show_retry_rd_err_log)
@@ -649,13 +656,14 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
+ res.mce = mce;
res.addr = mce->addr;
- if (adxl_component_count) {
- if (!skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce)))
+ /* Try driver decoder first */
+ if (!(driver_decode && driver_decode(&res))) {
+ /* Then try firmware decoder (ACPI DSM methods) */
+ if (!(adxl_component_count && skx_adxl_decode(&res, skx_error_in_1st_level_mem(mce))))
return NOTIFY_DONE;
- } else if (!skx_decode || !skx_decode(&res)) {
- return NOTIFY_DONE;
}
mci = res.dev->imc[res.imc].mci;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 03ac067a80b9..0cbadd3d2cd3 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -10,6 +10,7 @@
#define _SKX_COMM_EDAC_H
#include <linux/bits.h>
+#include <asm/mce.h>
#define MSG_SIZE 1024
@@ -52,6 +53,9 @@
#define IS_DIMM_PRESENT(r) GET_BITFIELD(r, 15, 15)
#define IS_NVDIMM_PRESENT(r, i) GET_BITFIELD(r, i, i)
+#define MCI_MISC_ECC_MODE(m) (((m) >> 59) & 15)
+#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */
+
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
@@ -82,6 +86,7 @@ struct skx_dev {
struct pci_dev *edev;
u32 retry_rd_err_log_s;
u32 retry_rd_err_log_d;
+ u32 retry_rd_err_log_d2;
struct skx_dimm {
u8 close_pg;
u8 bank_xor_enable;
@@ -108,18 +113,22 @@ enum {
INDEX_MEMCTRL,
INDEX_CHANNEL,
INDEX_DIMM,
+ INDEX_CS,
INDEX_NM_FIRST,
INDEX_NM_MEMCTRL = INDEX_NM_FIRST,
INDEX_NM_CHANNEL,
INDEX_NM_DIMM,
+ INDEX_NM_CS,
INDEX_MAX
};
#define BIT_NM_MEMCTRL BIT_ULL(INDEX_NM_MEMCTRL)
#define BIT_NM_CHANNEL BIT_ULL(INDEX_NM_CHANNEL)
#define BIT_NM_DIMM BIT_ULL(INDEX_NM_DIMM)
+#define BIT_NM_CS BIT_ULL(INDEX_NM_CS)
struct decoded_addr {
+ struct mce *mce;
struct skx_dev *dev;
u64 addr;
int socket;
@@ -129,6 +138,7 @@ struct decoded_addr {
int sktways;
int chanways;
int dimm;
+ int cs;
int rank;
int channel_rank;
u64 rank_address;
@@ -136,6 +146,7 @@ struct decoded_addr {
int column;
int bank_address;
int bank_group;
+ bool decoded_by_adxl;
};
struct res_config {
@@ -154,7 +165,12 @@ struct res_config {
int sad_all_offset;
/* Offsets of retry_rd_err_log registers */
u32 *offsets_scrub;
+ u32 *offsets_scrub_hbm0;
+ u32 *offsets_scrub_hbm1;
u32 *offsets_demand;
+ u32 *offsets_demand2;
+ u32 *offsets_demand_hbm0;
+ u32 *offsets_demand_hbm1;
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
diff --git a/drivers/edac/wq.c b/drivers/edac/wq.c
index d021d287eaec..ad3f516627c5 100644
--- a/drivers/edac/wq.c
+++ b/drivers/edac/wq.c
@@ -37,7 +37,6 @@ int edac_workqueue_setup(void)
void edac_workqueue_teardown(void)
{
- flush_workqueue(wq);
destroy_workqueue(wq);
wq = NULL;
}
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index dca7cecb37e3..290186e44e6b 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -183,7 +183,7 @@ config EXTCON_USBC_CROS_EC
config EXTCON_USBC_TUSB320
tristate "TI TUSB320 USB-C extcon support"
- depends on I2C
+ depends on I2C && TYPEC
select REGMAP_I2C
help
Say Y here to enable support for USB Type C cable detection extcon
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 02ba770acb27..e6e448f6ea2f 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -646,13 +646,11 @@ static int rt8973a_muic_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int rt8973a_muic_i2c_remove(struct i2c_client *i2c)
+static void rt8973a_muic_i2c_remove(struct i2c_client *i2c)
{
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
regmap_del_irq_chip(info->irq, info->irq_data);
-
- return 0;
}
static const struct of_device_id rt8973a_dt_match[] = {
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index 6ba3d89b106d..41041ff0fadb 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver
*
* Copyright (C) 2020 National Instruments Corporation
* Author: Michael Auchter <michael.auchter@ni.com>
*/
+#include <linux/bitfield.h>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -13,6 +14,24 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/usb/typec.h>
+
+#define TUSB320_REG8 0x8
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB 0x0
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A 0x1
+#define TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A 0x2
+#define TUSB320_REG8_CURRENT_MODE_DETECT GENMASK(5, 4)
+#define TUSB320_REG8_CURRENT_MODE_DETECT_DEF 0x0
+#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1
+#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2
+#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3
+#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2)
+#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0
+#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4
+#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6
+#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0)
#define TUSB320_REG9 0x9
#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6
@@ -55,6 +74,10 @@ struct tusb320_priv {
struct extcon_dev *edev;
struct tusb320_ops *ops;
enum tusb320_attached_state state;
+ struct typec_port *port;
+ struct typec_capability cap;
+ enum typec_port_type port_type;
+ enum typec_pwr_opmode pwr_opmode;
};
static const char * const tusb_attached_states[] = {
@@ -184,19 +207,47 @@ static struct tusb320_ops tusb320l_ops = {
.get_revision = tusb320l_get_revision,
};
-static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+static int tusb320_set_adv_pwr_mode(struct tusb320_priv *priv)
{
- struct tusb320_priv *priv = dev_id;
- int state, polarity;
- unsigned reg;
+ u8 mode;
+
+ if (priv->pwr_opmode == TYPEC_PWR_MODE_USB)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_USB;
+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_1_5A)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_15A;
+ else if (priv->pwr_opmode == TYPEC_PWR_MODE_3_0A)
+ mode = TUSB320_REG8_CURRENT_MODE_ADVERTISE_30A;
+ else /* No other mode is supported. */
+ return -EINVAL;
- if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
- dev_err(priv->dev, "error during i2c read!\n");
- return IRQ_NONE;
- }
+ return regmap_write_bits(priv->regmap, TUSB320_REG8,
+ TUSB320_REG8_CURRENT_MODE_ADVERTISE,
+ FIELD_PREP(TUSB320_REG8_CURRENT_MODE_ADVERTISE,
+ mode));
+}
- if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
- return IRQ_NONE;
+static int tusb320_port_type_set(struct typec_port *port,
+ enum typec_port_type type)
+{
+ struct tusb320_priv *priv = typec_get_drvdata(port);
+
+ if (type == TYPEC_PORT_SRC)
+ return priv->ops->set_mode(priv, TUSB320_MODE_DFP);
+ else if (type == TYPEC_PORT_SNK)
+ return priv->ops->set_mode(priv, TUSB320_MODE_UFP);
+ else if (type == TYPEC_PORT_DRP)
+ return priv->ops->set_mode(priv, TUSB320_MODE_DRP);
+ else
+ return priv->ops->set_mode(priv, TUSB320_MODE_PORT);
+}
+
+static const struct typec_operations tusb320_typec_ops = {
+ .port_type_set = tusb320_port_type_set,
+};
+
+static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg)
+{
+ int state, polarity;
state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
TUSB320_REG9_ATTACHED_STATE_MASK;
@@ -219,6 +270,64 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
extcon_sync(priv->edev, EXTCON_USB_HOST);
priv->state = state;
+}
+
+static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
+{
+ struct typec_port *port = priv->port;
+ struct device *dev = priv->dev;
+ u8 mode, role, state;
+ int ret, reg8;
+ bool ori;
+
+ ori = reg9 & TUSB320_REG9_CABLE_DIRECTION;
+ typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE :
+ TYPEC_ORIENTATION_NORMAL);
+
+ state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
+ TUSB320_REG9_ATTACHED_STATE_MASK;
+ if (state == TUSB320_ATTACHED_STATE_DFP)
+ role = TYPEC_SOURCE;
+ else
+ role = TYPEC_SINK;
+
+ typec_set_vconn_role(port, role);
+ typec_set_pwr_role(port, role);
+ typec_set_data_role(port, role == TYPEC_SOURCE ?
+ TYPEC_HOST : TYPEC_DEVICE);
+
+ ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
+ if (ret) {
+ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
+ return;
+ }
+
+ mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8);
+ if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_MED)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_1_5A);
+ else if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_HI)
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_3_0A);
+ else /* Charge through accessory */
+ typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
+}
+
+static irqreturn_t tusb320_irq_handler(int irq, void *dev_id)
+{
+ struct tusb320_priv *priv = dev_id;
+ unsigned int reg;
+
+ if (regmap_read(priv->regmap, TUSB320_REG9, &reg)) {
+ dev_err(priv->dev, "error during i2c read!\n");
+ return IRQ_NONE;
+ }
+
+ if (!(reg & TUSB320_REG9_INTERRUPT_STATUS))
+ return IRQ_NONE;
+
+ tusb320_extcon_irq_handler(priv, reg);
+ tusb320_typec_irq_handler(priv, reg);
regmap_write(priv->regmap, TUSB320_REG9, reg);
@@ -230,8 +339,84 @@ static const struct regmap_config tusb320_regmap_config = {
.val_bits = 8,
};
-static int tusb320_extcon_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tusb320_extcon_probe(struct tusb320_priv *priv)
+{
+ int ret;
+
+ priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
+ if (IS_ERR(priv->edev)) {
+ dev_err(priv->dev, "failed to allocate extcon device\n");
+ return PTR_ERR(priv->edev);
+ }
+
+ ret = devm_extcon_dev_register(priv->dev, priv->edev);
+ if (ret < 0) {
+ dev_err(priv->dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(priv->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+
+ return 0;
+}
+
+static int tusb320_typec_probe(struct i2c_client *client,
+ struct tusb320_priv *priv)
+{
+ struct fwnode_handle *connector;
+ const char *cap_str;
+ int ret;
+
+ /* The Type-C connector is optional, for backward compatibility. */
+ connector = device_get_named_child_node(&client->dev, "connector");
+ if (!connector)
+ return 0;
+
+ /* Type-C connector found. */
+ ret = typec_get_fw_cap(&priv->cap, connector);
+ if (ret)
+ return ret;
+
+ priv->port_type = priv->cap.type;
+
+ /* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
+ ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
+ if (ret)
+ return ret;
+
+ ret = typec_find_pwr_opmode(cap_str);
+ if (ret < 0)
+ return ret;
+ if (ret == TYPEC_PWR_MODE_PD)
+ return -EINVAL;
+
+ priv->pwr_opmode = ret;
+
+ /* Initialize the hardware with the devicetree settings. */
+ ret = tusb320_set_adv_pwr_mode(priv);
+ if (ret)
+ return ret;
+
+ priv->cap.revision = USB_TYPEC_REV_1_1;
+ priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
+ priv->cap.accessory[1] = TYPEC_ACCESSORY_DEBUG;
+ priv->cap.orientation_aware = true;
+ priv->cap.driver_data = priv;
+ priv->cap.ops = &tusb320_typec_ops;
+ priv->cap.fwnode = connector;
+
+ priv->port = typec_register_port(&client->dev, &priv->cap);
+ if (IS_ERR(priv->port))
+ return PTR_ERR(priv->port);
+
+ return 0;
+}
+
+static int tusb320_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tusb320_priv *priv;
const void *match_data;
@@ -257,12 +442,6 @@ static int tusb320_extcon_probe(struct i2c_client *client,
priv->ops = (struct tusb320_ops*)match_data;
- priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable);
- if (IS_ERR(priv->edev)) {
- dev_err(priv->dev, "failed to allocate extcon device\n");
- return PTR_ERR(priv->edev);
- }
-
if (priv->ops->get_revision) {
ret = priv->ops->get_revision(priv, &revision);
if (ret)
@@ -272,16 +451,13 @@ static int tusb320_extcon_probe(struct i2c_client *client,
dev_info(priv->dev, "chip revision %d\n", revision);
}
- ret = devm_extcon_dev_register(priv->dev, priv->edev);
- if (ret < 0) {
- dev_err(priv->dev, "failed to register extcon device\n");
+ ret = tusb320_extcon_probe(priv);
+ if (ret)
return ret;
- }
- extcon_set_property_capability(priv->edev, EXTCON_USB,
- EXTCON_PROP_USB_TYPEC_POLARITY);
- extcon_set_property_capability(priv->edev, EXTCON_USB_HOST,
- EXTCON_PROP_USB_TYPEC_POLARITY);
+ ret = tusb320_typec_probe(client, priv);
+ if (ret)
+ return ret;
/* update initial state */
tusb320_irq_handler(client->irq, priv);
@@ -313,7 +489,7 @@ static const struct of_device_id tusb320_extcon_dt_match[] = {
MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
static struct i2c_driver tusb320_extcon_driver = {
- .probe = tusb320_extcon_probe,
+ .probe = tusb320_probe,
.driver = {
.name = "extcon-tusb320",
.of_match_table = tusb320_extcon_dt_match,
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 641a91819088..99d439480612 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -167,7 +167,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
return valid;
}
-struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
+struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
+ const struct ffa_ops *ops)
{
int ret;
struct device *dev;
@@ -183,6 +184,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id)
dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
ffa_dev->vm_id = vm_id;
+ ffa_dev->ops = ops;
uuid_copy(&ffa_dev->uuid, uuid);
ret = device_register(&ffa_dev->dev);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index ec731e9e942b..d5e86ef40b89 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -163,6 +163,7 @@ struct ffa_drv_info {
struct mutex tx_lock; /* lock to protect Tx buffer */
void *rx_buffer;
void *tx_buffer;
+ bool mem_ops_native;
};
static struct ffa_drv_info *drv_info;
@@ -263,18 +264,24 @@ static int ffa_rxtx_unmap(u16 vm_id)
return 0;
}
+#define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
+
/* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
static int
__ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_partitions)
{
- int count;
+ int idx, count, flags = 0, sz, buf_sz;
ffa_value_t partition_info;
+ if (!buffer || !num_partitions) /* Just get the count for now */
+ flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
+
mutex_lock(&drv_info->rx_lock);
invoke_ffa_fn((ffa_value_t){
.a0 = FFA_PARTITION_INFO_GET,
.a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
+ .a5 = flags,
}, &partition_info);
if (partition_info.a0 == FFA_ERROR) {
@@ -284,8 +291,19 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
count = partition_info.a2;
+ if (drv_info->version > FFA_VERSION_1_0) {
+ buf_sz = sz = partition_info.a3;
+ if (sz > sizeof(*buffer))
+ buf_sz = sizeof(*buffer);
+ } else {
+ /* FFA_VERSION_1_0 lacks size in the response */
+ buf_sz = sz = 8;
+ }
+
if (buffer && count <= num_partitions)
- memcpy(buffer, drv_info->rx_buffer, sizeof(*buffer) * count);
+ for (idx = 0; idx < count; idx++)
+ memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
+ buf_sz);
ffa_rx_release();
@@ -571,6 +589,39 @@ static int ffa_memory_reclaim(u64 g_handle, u32 flags)
return 0;
}
+static int ffa_features(u32 func_feat_id, u32 input_props,
+ u32 *if_props_1, u32 *if_props_2)
+{
+ ffa_value_t id;
+
+ if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
+ pr_err("%s: Invalid Parameters: %x, %x", __func__,
+ func_feat_id, input_props);
+ return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
+ }
+
+ invoke_ffa_fn((ffa_value_t){
+ .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
+ }, &id);
+
+ if (id.a0 == FFA_ERROR)
+ return ffa_to_linux_errno((int)id.a2);
+
+ if (if_props_1)
+ *if_props_1 = id.a2;
+ if (if_props_2)
+ *if_props_2 = id.a3;
+
+ return 0;
+}
+
+static void ffa_set_up_mem_ops_native_flag(void)
+{
+ if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
+ !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
+ drv_info->mem_ops_native = true;
+}
+
static u32 ffa_api_version_get(void)
{
return drv_info->version;
@@ -597,11 +648,19 @@ static int ffa_partition_info_get(const char *uuid_str,
return 0;
}
-static void ffa_mode_32bit_set(struct ffa_device *dev)
+static void _ffa_mode_32bit_set(struct ffa_device *dev)
{
dev->mode_32bit = true;
}
+static void ffa_mode_32bit_set(struct ffa_device *dev)
+{
+ if (drv_info->version > FFA_VERSION_1_0)
+ return;
+
+ _ffa_mode_32bit_set(dev);
+}
+
static int ffa_sync_send_receive(struct ffa_device *dev,
struct ffa_send_direct_data *data)
{
@@ -609,17 +668,15 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
dev->mode_32bit, data);
}
-static int
-ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+static int ffa_memory_share(struct ffa_mem_ops_args *args)
{
- if (dev->mode_32bit)
- return ffa_memory_ops(FFA_MEM_SHARE, args);
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
- return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
+ return ffa_memory_ops(FFA_MEM_SHARE, args);
}
-static int
-ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+static int ffa_memory_lend(struct ffa_mem_ops_args *args)
{
/* Note that upon a successful MEM_LEND request the caller
* must ensure that the memory region specified is not accessed
@@ -628,36 +685,47 @@ ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
* however on systems without a hypervisor the responsibility
* falls to the calling kernel driver to prevent access.
*/
- if (dev->mode_32bit)
- return ffa_memory_ops(FFA_MEM_LEND, args);
+ if (drv_info->mem_ops_native)
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
- return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+ return ffa_memory_ops(FFA_MEM_LEND, args);
}
-static const struct ffa_dev_ops ffa_ops = {
+static const struct ffa_info_ops ffa_drv_info_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
+};
+
+static const struct ffa_msg_ops ffa_drv_msg_ops = {
.mode_32bit_set = ffa_mode_32bit_set,
.sync_send_receive = ffa_sync_send_receive,
+};
+
+static const struct ffa_mem_ops ffa_drv_mem_ops = {
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
.memory_lend = ffa_memory_lend,
};
-const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
-{
- if (ffa_device_is_valid(dev))
- return &ffa_ops;
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(ffa_dev_ops_get);
+static const struct ffa_ops ffa_drv_ops = {
+ .info_ops = &ffa_drv_info_ops,
+ .msg_ops = &ffa_drv_msg_ops,
+ .mem_ops = &ffa_drv_mem_ops,
+};
void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
{
int count, idx;
struct ffa_partition_info *pbuf, *tpbuf;
+ /*
+ * FF-A v1.1 provides UUID for each partition as part of the discovery
+ * API, the discovered UUID must be populated in the device's UUID and
+ * there is no need to copy the same from the driver table.
+ */
+ if (drv_info->version > FFA_VERSION_1_0)
+ return;
+
count = ffa_partition_probe(uuid, &pbuf);
if (count <= 0)
return;
@@ -671,6 +739,7 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
static void ffa_setup_partitions(void)
{
int count, idx;
+ uuid_t uuid;
struct ffa_device *ffa_dev;
struct ffa_partition_info *pbuf, *tpbuf;
@@ -681,19 +750,24 @@ static void ffa_setup_partitions(void)
}
for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
- /* Note that the &uuid_null parameter will require
+ import_uuid(&uuid, (u8 *)tpbuf->uuid);
+
+ /* Note that if the UUID will be uuid_null, that will require
* ffa_device_match() to find the UUID of this partition id
- * with help of ffa_device_match_uuid(). Once the FF-A spec
- * is updated to provide correct UUID here for each partition
- * as part of the discovery API, we need to pass the
- * discovered UUID here instead.
+ * with help of ffa_device_match_uuid(). FF-A v1.1 and above
+ * provides UUID here for each partition as part of the
+ * discovery API and the same is passed.
*/
- ffa_dev = ffa_device_register(&uuid_null, tpbuf->id);
+ ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
if (!ffa_dev) {
pr_err("%s: failed to register partition ID 0x%x\n",
__func__, tpbuf->id);
continue;
}
+
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
+ _ffa_mode_32bit_set(ffa_dev);
}
kfree(pbuf);
}
@@ -751,6 +825,8 @@ static int __init ffa_init(void)
ffa_setup_partitions();
+ ffa_set_up_mem_ops_native_flag();
+
return 0;
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 3ed7ae0d6781..96060bf90a24 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -450,9 +450,13 @@ static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
static const struct scmi_clock_info *
scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
{
+ struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- struct scmi_clock_info *clk = ci->clk + clk_id;
+ if (clk_id >= ci->num_clocks)
+ return NULL;
+
+ clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 8abace56b958..f42dad997ac9 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -106,6 +106,7 @@ enum scmi_optee_pta_cmd {
* @channel_id: OP-TEE channel ID used for this transport
* @tee_session: TEE session identifier
* @caps: OP-TEE SCMI channel capabilities
+ * @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 673f3eb498f4..e9afa8cab730 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -166,9 +166,13 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
struct scmi_xfer *t;
struct scmi_msg_reset_domain_reset *dom;
struct scmi_reset_info *pi = ph->get_priv(ph);
- struct reset_dom_info *rdom = pi->dom_info + domain;
+ struct reset_dom_info *rdom;
- if (rdom->async_reset)
+ if (domain >= pi->num_domains)
+ return -EINVAL;
+
+ rdom = pi->dom_info + domain;
+ if (rdom->async_reset && flags & AUTONOMOUS_RESET)
flags |= ASYNCHRONOUS_RESET;
ret = ph->xops->xfer_get_init(ph, RESET, sizeof(*dom), 0, &t);
@@ -180,7 +184,7 @@ static int scmi_domain_reset(const struct scmi_protocol_handle *ph, u32 domain,
dom->flags = cpu_to_le32(flags);
dom->reset_state = cpu_to_le32(state);
- if (rdom->async_reset)
+ if (flags & ASYNCHRONOUS_RESET)
ret = ph->xops->do_xfer_with_response(ph, t);
else
ret = ph->xops->do_xfer(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 581d34c95769..0e05a79de82d 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/scmi_protocol.h>
@@ -53,27 +52,6 @@ static int scmi_pd_power_off(struct generic_pm_domain *domain)
return scmi_pd_power(domain, false);
}
-static int scmi_pd_attach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- int ret;
-
- ret = pm_clk_create(dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clks(dev);
- if (ret >= 0)
- return 0;
-
- pm_clk_destroy(dev);
- return ret;
-}
-
-static void scmi_pd_detach_dev(struct generic_pm_domain *pd, struct device *dev)
-{
- pm_clk_destroy(dev);
-}
-
static int scmi_pm_domain_probe(struct scmi_device *sdev)
{
int num_domains, i;
@@ -124,10 +102,6 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd->genpd.name = scmi_pd->name;
scmi_pd->genpd.power_off = scmi_pd_power_off;
scmi_pd->genpd.power_on = scmi_pd_power_on;
- scmi_pd->genpd.attach_dev = scmi_pd_attach_dev;
- scmi_pd->genpd.detach_dev = scmi_pd_detach_dev;
- scmi_pd->genpd.flags = GENPD_FLAG_PM_CLK |
- GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&scmi_pd->genpd, NULL,
state == SCMI_POWER_STATE_GENERIC_OFF);
@@ -138,9 +112,28 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
+ dev_set_drvdata(dev, scmi_pd_data);
+
return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
+static void scmi_pm_domain_remove(struct scmi_device *sdev)
+{
+ int i;
+ struct genpd_onecell_data *scmi_pd_data;
+ struct device *dev = &sdev->dev;
+ struct device_node *np = dev->of_node;
+
+ of_genpd_del_provider(np);
+
+ scmi_pd_data = dev_get_drvdata(dev);
+ for (i = 0; i < scmi_pd_data->num_domains; i++) {
+ if (!scmi_pd_data->domains[i])
+ continue;
+ pm_genpd_remove(scmi_pd_data->domains[i]);
+ }
+}
+
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWER, "genpd" },
{ },
@@ -150,6 +143,7 @@ MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_power_domain_driver = {
.name = "scmi-power-domain",
.probe = scmi_pm_domain_probe,
+ .remove = scmi_pm_domain_remove,
.id_table = scmi_id_table,
};
module_scmi_driver(scmi_power_domain_driver);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 7288c6117838..0b5853fa9d87 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -762,6 +762,10 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
{
int ret;
struct scmi_xfer *t;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_GET,
sizeof(__le32), sizeof(__le32), &t);
@@ -771,7 +775,6 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
*sensor_config = get_unaligned_le64(t->rx.buf);
@@ -788,6 +791,10 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_config_set *msg;
+ struct sensors_info *si = ph->get_priv(ph);
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_CONFIG_SET,
sizeof(*msg), 0, &t);
@@ -800,7 +807,6 @@ static int scmi_sensor_config_set(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
- struct sensors_info *si = ph->get_priv(ph);
struct scmi_sensor_info *s = si->sensors + sensor_id;
s->sensor_config = sensor_config;
@@ -831,8 +837,11 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
ret = ph->xops->xfer_get_init(ph, SENSOR_READING_GET,
sizeof(*sensor), 0, &t);
@@ -841,6 +850,7 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph,
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
+ s = si->sensors + sensor_id;
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = ph->xops->do_xfer_with_response(ph, t);
@@ -895,9 +905,13 @@ scmi_sensor_reading_get_timestamped(const struct scmi_protocol_handle *ph,
int ret;
struct scmi_xfer *t;
struct scmi_msg_sensor_reading_get *sensor;
+ struct scmi_sensor_info *s;
struct sensors_info *si = ph->get_priv(ph);
- struct scmi_sensor_info *s = si->sensors + sensor_id;
+ if (sensor_id >= si->num_sensors)
+ return -EINVAL;
+
+ s = si->sensors + sensor_id;
if (!count || !readings ||
(!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
return -EINVAL;
@@ -948,6 +962,9 @@ scmi_sensor_info_get(const struct scmi_protocol_handle *ph, u32 sensor_id)
{
struct sensors_info *si = ph->get_priv(ph);
+ if (sensor_id >= si->num_sensors)
+ return NULL;
+
return si->sensors + sensor_id;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 6cb7384ad2ac..5b79a4a4a88d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -105,9 +105,50 @@ config EFI_RUNTIME_WRAPPERS
config EFI_GENERIC_STUB
bool
+config EFI_ZBOOT
+ bool "Enable the generic EFI decompressor"
+ depends on EFI_GENERIC_STUB && !ARM
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
+ select HAVE_KERNEL_ZSTD
+ help
+ Create the bootable image as an EFI application that carries the
+ actual kernel image in compressed form, and decompresses it into
+ memory before executing it via LoadImage/StartImage EFI boot service
+ calls. For compatibility with non-EFI loaders, the payload can be
+ decompressed and executed by the loader as well, provided that the
+ loader implements the decompression algorithm and that non-EFI boot
+ is supported by the encapsulated image. (The compression algorithm
+ used is described in the zboot image header)
+
+config EFI_ZBOOT_SIGNED
+ def_bool y
+ depends on EFI_ZBOOT_SIGNING_CERT != ""
+ depends on EFI_ZBOOT_SIGNING_KEY != ""
+
+config EFI_ZBOOT_SIGNING
+ bool "Sign the EFI decompressor for UEFI secure boot"
+ depends on EFI_ZBOOT
+ help
+ Use the 'sbsign' command line tool (which must exist on the host
+ path) to sign both the EFI decompressor PE/COFF image, as well as the
+ encapsulated PE/COFF image, which is subsequently compressed and
+ wrapped by the former image.
+
+config EFI_ZBOOT_SIGNING_CERT
+ string "Certificate to use for signing the compressed EFI boot image"
+ depends on EFI_ZBOOT_SIGNING
+
+config EFI_ZBOOT_SIGNING_KEY
+ string "Private key to use for signing the compressed EFI boot image"
+ depends on EFI_ZBOOT_SIGNING
+
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
- depends on EFI_GENERIC_STUB && !RISCV
+ depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH
default y
help
Select this config option to add support for the dtb= command
@@ -124,7 +165,7 @@ config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
bool "Enable the command line initrd loader" if !X86
depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
default y if X86
- depends on !RISCV
+ depends on !RISCV && !LOONGARCH
help
Select this config option to add support for the initrd= command
line parameter, allowing an initrd that resides on the same volume
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 4dde8edd53b6..3e8d4b51a814 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -243,29 +243,6 @@ failed:
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index eb9c65f97841..f80d87c199c3 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -15,9 +15,11 @@
static long __init parse_acpi_path(const struct efi_dev_path *node,
struct device *parent, struct device **child)
{
- char hid[ACPI_ID_LEN], uid[11]; /* UINT_MAX + null byte */
struct acpi_device *adev;
struct device *phys_dev;
+ char hid[ACPI_ID_LEN];
+ u64 uid;
+ int ret;
if (node->header.length != 12)
return -EINVAL;
@@ -27,12 +29,12 @@ static long __init parse_acpi_path(const struct efi_dev_path *node,
'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
node->acpi.hid >> 16);
- sprintf(uid, "%u", node->acpi.uid);
for_each_acpi_dev_match(adev, hid, NULL, -1) {
- if (adev->pnp.unique_id && !strcmp(adev->pnp.unique_id, uid))
+ ret = acpi_dev_uid_to_integer(adev, &uid);
+ if (ret == 0 && node->acpi.uid == uid)
break;
- if (!adev->pnp.unique_id && node->acpi.uid == 0)
+ if (ret == -ENODATA && node->acpi.uid == 0)
break;
}
if (!adev)
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 3928dbff76d0..2fd770b499a3 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -51,34 +51,10 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
return addr;
}
-static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
-static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
-
-static const efi_config_table_type_t arch_tables[] __initconst = {
- {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
- {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
- {}
-};
+extern __weak const efi_config_table_type_t efi_arch_tables[];
static void __init init_screen_info(void)
{
- struct screen_info *si;
-
- if (IS_ENABLED(CONFIG_ARM) &&
- screen_info_table != EFI_INVALID_TABLE_ADDR) {
- si = early_memremap_ro(screen_info_table, sizeof(*si));
- if (!si) {
- pr_err("Could not map screen_info config table\n");
- return;
- }
- screen_info = *si;
- early_memunmap(si, sizeof(*si));
-
- /* dummycon on ARM needs non-zero values for columns/lines */
- screen_info.orig_video_cols = 80;
- screen_info.orig_video_lines = 25;
- }
-
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
memblock_is_map_memory(screen_info.lfb_base))
memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
@@ -119,8 +95,7 @@ static int __init uefi_init(u64 efi_system_table)
goto out;
}
retval = efi_config_parse_tables(config_tables, systab->nr_tables,
- IS_ENABLED(CONFIG_ARM) ? arch_tables
- : NULL);
+ efi_arch_tables);
early_memunmap(config_tables, table_size);
out:
@@ -248,36 +223,4 @@ void __init efi_init(void)
PAGE_ALIGN(data.size + (data.phys_map & ~PAGE_MASK)));
init_screen_info();
-
-#ifdef CONFIG_ARM
- /* ARM does not permit early mappings to persist across paging_init() */
- efi_memmap_unmap();
-
- if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
- struct efi_arm_entry_state *state;
- bool dump_state = true;
-
- state = early_memremap_ro(cpu_state_table,
- sizeof(struct efi_arm_entry_state));
- if (state == NULL) {
- pr_warn("Unable to map CPU entry state table.\n");
- return;
- }
-
- if ((state->sctlr_before_ebs & 1) == 0)
- pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
- else if ((state->sctlr_after_ebs & 1) == 0)
- pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
- else
- dump_state = false;
-
- if (dump_state || efi_enabled(EFI_DBG)) {
- pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs);
- pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs);
- pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
- pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
- }
- early_memunmap(state, sizeof(struct efi_arm_entry_state));
- }
-#endif
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 042a3ef4db1c..9624735f1575 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -21,6 +21,7 @@
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/of.h>
+#include <linux/initrd.h>
#include <linux/io.h>
#include <linux/kexec.h>
#include <linux/platform_device.h>
@@ -55,6 +56,7 @@ EXPORT_SYMBOL(efi);
unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
struct mm_struct efi_mm = {
.mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
@@ -532,6 +534,7 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
{LINUX_EFI_TPM_FINAL_LOG_GUID, &efi.tpm_final_log, "TPMFinalLog" },
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
+ {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
@@ -674,6 +677,18 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
}
}
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
+ initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
+ struct linux_efi_initrd *tbl;
+
+ tbl = early_memremap(initrd, sizeof(*tbl));
+ if (tbl) {
+ phys_initrd_start = tbl->base;
+ phys_initrd_size = tbl->size;
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
return 0;
}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 8ced7af8e56d..4f9fb086eab7 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -48,6 +48,9 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier,
return NOTIFY_DONE;
wdata = kmalloc(MAX_DATA_LEN * sizeof(efi_char16_t), GFP_KERNEL);
+ if (!wdata)
+ return NOTIFY_DONE;
+
for (l = 0; l < MAX_DATA_LEN - 1 && str[l] != '\0'; l++)
wdata[l] = str[l];
wdata[l] = L'\0';
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 81432d0c904b..b1601aad7e1a 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -26,8 +26,10 @@ cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
$(call cc-option,-mno-single-pic-base)
cflags-$(CONFIG_RISCV) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fpic
+cflags-$(CONFIG_LOONGARCH) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie
-cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt
+cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
-include $(srctree)/include/linux/hidden.h \
@@ -37,8 +39,17 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# disable CFI
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# disable LTO
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO), $(KBUILD_CFLAGS))
@@ -58,21 +69,32 @@ lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o \
skip_spaces.o lib-cmdline.o lib-ctype.o \
alignedmem.o relocate.o vsprintf.o
-# include the stub's generic dependencies from lib/ when building for ARM/arm64
-efi-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+# include the stub's libfdt dependencies from lib/ when needed
+libfdt-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c \
+ fdt_empty_tree.c fdt_sw.c
+
+lib-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdt.o \
+ $(patsubst %.c,lib-%.o,$(libfdt-deps))
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o fdt.o string.o \
- $(patsubst %.c,lib-%.o,$(efi-deps-y))
+lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv-stub.o
+lib-$(CONFIG_LOONGARCH) += loongarch-stub.o
+
CFLAGS_arm32-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+zboot-obj-$(CONFIG_RISCV) := lib-clz_ctz.o lib-ashldi3.o
+lib-$(CONFIG_EFI_ZBOOT) += zboot.o $(zboot-obj-y)
+
+extra-y := $(lib-y)
+lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
+
# Even when -mbranch-protection=none is set, Clang will generate a
# .note.gnu.property for code-less object files (like lib/ctype.c),
# so work around this by explicitly removing the unwanted section.
@@ -112,9 +134,6 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
# a verification pass to see if any absolute relocations exist in any of the
# object files.
#
-extra-y := $(lib-y)
-lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
-
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -126,6 +145,12 @@ STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+# For LoongArch, keep all the symbols in .init section and make sure that no
+# absolute symbols references exist.
+STUBCOPY_FLAGS-$(CONFIG_LOONGARCH) += --prefix-alloc-sections=.init \
+ --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_LOONGARCH) := R_LARCH_MARK_LA
+
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,stubcopy)
diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot
new file mode 100644
index 000000000000..35f234ad8738
--- /dev/null
+++ b/drivers/firmware/efi/libstub/Makefile.zboot
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# to be include'd by arch/$(ARCH)/boot/Makefile after setting
+# EFI_ZBOOT_PAYLOAD, EFI_ZBOOT_BFD_TARGET and EFI_ZBOOT_MACH_TYPE
+
+comp-type-$(CONFIG_KERNEL_GZIP) := gzip
+comp-type-$(CONFIG_KERNEL_LZ4) := lz4
+comp-type-$(CONFIG_KERNEL_LZMA) := lzma
+comp-type-$(CONFIG_KERNEL_LZO) := lzo
+comp-type-$(CONFIG_KERNEL_XZ) := xzkern
+comp-type-$(CONFIG_KERNEL_ZSTD) := zstd22
+
+# in GZIP, the appended le32 carrying the uncompressed size is part of the
+# format, but in other cases, we just append it at the end for convenience,
+# causing the original tools to complain when checking image integrity.
+# So disregard it when calculating the payload size in the zimage header.
+zboot-method-y := $(comp-type-y)_with_size
+zboot-size-len-y := 4
+
+zboot-method-$(CONFIG_KERNEL_GZIP) := gzip
+zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
+
+quiet_cmd_sbsign = SBSIGN $@
+ cmd_sbsign = sbsign --out $@ $< \
+ --key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \
+ --cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT)
+
+$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE
+ $(call if_changed,sbsign)
+
+ZBOOT_PAYLOAD-y := $(EFI_ZBOOT_PAYLOAD)
+ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed
+
+$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE
+ $(call if_changed,$(zboot-method-y))
+
+OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \
+ --rename-section .data=.gzdata,load,alloc,readonly,contents
+$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
+ $(call if_changed,objcopy)
+
+AFLAGS_zboot-header.o += -DMACHINE_TYPE=IMAGE_FILE_MACHINE_$(EFI_ZBOOT_MACH_TYPE) \
+ -DZBOOT_EFI_PATH="\"$(realpath $(obj)/vmlinuz.efi.elf)\"" \
+ -DZBOOT_SIZE_LEN=$(zboot-size-len-y) \
+ -DCOMP_TYPE="\"$(comp-type-y)\""
+
+$(obj)/zboot-header.o: $(srctree)/drivers/firmware/efi/libstub/zboot-header.S FORCE
+ $(call if_changed_rule,as_o_S)
+
+ZBOOT_DEPS := $(obj)/zboot-header.o $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds
+$(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE
+ $(call if_changed,ld)
+
+ZBOOT_EFI-y := vmlinuz.efi
+ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED) := vmlinuz.efi.unsigned
+
+OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary
+$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE
+ $(call if_changed,objcopy)
+
+targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi
+
+ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),)
+$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE
+ $(call if_changed,sbsign)
+endif
+
+targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 577173ee1f83..259e4b852d63 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -19,12 +19,20 @@ efi_status_t check_platform_features(void)
{
u64 tg;
+ /*
+ * If we have 48 bits of VA space for TTBR0 mappings, we can map the
+ * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is
+ * unnecessary.
+ */
+ if (VA_BITS_MIN >= 48)
+ efi_novamap = true;
+
/* UEFI mandates support for 4 KB granularity, no need to check */
if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return EFI_SUCCESS;
- tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
- if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
+ if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
else
@@ -42,26 +50,17 @@ efi_status_t check_platform_features(void)
*/
static bool check_image_region(u64 base, u64 size)
{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *memory_map;
- struct efi_boot_memmap map;
+ struct efi_boot_memmap *map;
efi_status_t status;
bool ret = false;
int map_offset;
- map.map = &memory_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = NULL;
- map.key_ptr = NULL;
- map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return false;
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
u64 end = md->phys_addr + md->num_pages * EFI_PAGE_SIZE;
/*
@@ -74,7 +73,7 @@ static bool check_image_region(u64 base, u64 size)
}
}
- efi_bs_call(free_pool, memory_map);
+ efi_bs_call(free_pool, map);
return ret;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3d972061c1b0..0c493521b25b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -218,7 +218,7 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_noinitrd = true;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
- efi_novamap = parse_option_str(val, "novamap");
+ efi_novamap |= parse_option_str(val, "novamap");
efi_nosoftreserve = IS_ENABLED(CONFIG_EFI_SOFT_RESERVE) &&
parse_option_str(val, "nosoftreserve");
@@ -310,7 +310,7 @@ bool efi_load_option_unpack(efi_load_option_unpacked_t *dest,
*
* Detect this case and extract OptionalData.
*/
-void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size)
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size)
{
const efi_load_option_t *load_option = *load_options;
efi_load_option_unpacked_t load_option_unpacked;
@@ -334,6 +334,85 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si
*load_options_size = load_option_unpacked.optional_data_size;
}
+enum efistub_event {
+ EFISTUB_EVT_INITRD,
+ EFISTUB_EVT_LOAD_OPTIONS,
+ EFISTUB_EVT_COUNT,
+};
+
+#define STR_WITH_SIZE(s) sizeof(s), s
+
+static const struct {
+ u32 pcr_index;
+ u32 event_id;
+ u32 event_data_len;
+ u8 event_data[52];
+} events[] = {
+ [EFISTUB_EVT_INITRD] = {
+ 9,
+ INITRD_EVENT_TAG_ID,
+ STR_WITH_SIZE("Linux initrd")
+ },
+ [EFISTUB_EVT_LOAD_OPTIONS] = {
+ 9,
+ LOAD_OPTIONS_EVENT_TAG_ID,
+ STR_WITH_SIZE("LOADED_IMAGE::LoadOptions")
+ },
+};
+
+static efi_status_t efi_measure_tagged_event(unsigned long load_addr,
+ unsigned long load_size,
+ enum efistub_event event)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_tcg2_protocol_t *tcg2 = NULL;
+ efi_status_t status;
+
+ efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
+ if (tcg2) {
+ struct efi_measured_event {
+ efi_tcg2_event_t event_data;
+ efi_tcg2_tagged_event_t tagged_event;
+ u8 tagged_event_data[];
+ } *evt;
+ int size = sizeof(*evt) + events[event].event_data_len;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)&evt);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ evt->event_data = (struct efi_tcg2_event){
+ .event_size = size,
+ .event_header.header_size = sizeof(evt->event_data.event_header),
+ .event_header.header_version = EFI_TCG2_EVENT_HEADER_VERSION,
+ .event_header.pcr_index = events[event].pcr_index,
+ .event_header.event_type = EV_EVENT_TAG,
+ };
+
+ evt->tagged_event = (struct efi_tcg2_tagged_event){
+ .tagged_event_id = events[event].event_id,
+ .tagged_event_data_size = events[event].event_data_len,
+ };
+
+ memcpy(evt->tagged_event_data, events[event].event_data,
+ events[event].event_data_len);
+
+ status = efi_call_proto(tcg2, hash_log_extend_event, 0,
+ load_addr, load_size, &evt->event_data);
+ efi_bs_call(free_pool, evt);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+ return EFI_SUCCESS;
+ }
+
+ return EFI_UNSUPPORTED;
+fail:
+ efi_warn("Failed to measure data for event %d: 0x%lx\n", event, status);
+ return status;
+}
+
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -341,21 +420,26 @@ void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_si
*/
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
{
- const u16 *s2;
- unsigned long cmdline_addr = 0;
- int options_chars = efi_table_attr(image, load_options_size);
- const u16 *options = efi_table_attr(image, load_options);
+ const efi_char16_t *options = efi_table_attr(image, load_options);
+ u32 options_size = efi_table_attr(image, load_options_size);
int options_bytes = 0, safe_options_bytes = 0; /* UTF-8 bytes */
+ unsigned long cmdline_addr = 0;
+ const efi_char16_t *s2;
bool in_quote = false;
efi_status_t status;
+ u32 options_chars;
+
+ if (options_size > 0)
+ efi_measure_tagged_event((unsigned long)options, options_size,
+ EFISTUB_EVT_LOAD_OPTIONS);
- efi_apply_loadoptions_quirk((const void **)&options, &options_chars);
- options_chars /= sizeof(*options);
+ efi_apply_loadoptions_quirk((const void **)&options, &options_size);
+ options_chars = options_size / sizeof(efi_char16_t);
if (options) {
s2 = options;
while (options_bytes < COMMAND_LINE_SIZE && options_chars--) {
- u16 c = *s2++;
+ efi_char16_t c = *s2++;
if (c < 0x80) {
if (c == L'\0' || c == L'\n')
@@ -419,7 +503,6 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
/**
* efi_exit_boot_services() - Exit boot services
* @handle: handle of the exiting image
- * @map: pointer to receive the memory map
* @priv: argument to be passed to @priv_func
* @priv_func: function to process the memory map before exiting boot services
*
@@ -432,26 +515,26 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
*
* Return: status code
*/
-efi_status_t efi_exit_boot_services(void *handle,
- struct efi_boot_memmap *map,
- void *priv,
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
efi_exit_boot_map_processing priv_func)
{
+ struct efi_boot_memmap *map;
efi_status_t status;
- status = efi_get_memory_map(map);
-
+ status = efi_get_memory_map(&map, true);
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
status = priv_func(map, priv);
- if (status != EFI_SUCCESS)
- goto free_map;
+ if (status != EFI_SUCCESS) {
+ efi_bs_call(free_pool, map);
+ return status;
+ }
if (efi_disable_pci_dma)
efi_pci_disable_bridge_busmaster();
- status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
if (status == EFI_INVALID_PARAMETER) {
/*
@@ -467,35 +550,26 @@ efi_status_t efi_exit_boot_services(void *handle,
* buffer should account for any changes in the map so the call
* to get_memory_map() is expected to succeed here.
*/
- *map->map_size = *map->buff_size;
+ map->map_size = map->buff_size;
status = efi_bs_call(get_memory_map,
- map->map_size,
- *map->map,
- map->key_ptr,
- map->desc_size,
- map->desc_ver);
+ &map->map_size,
+ &map->map,
+ &map->map_key,
+ &map->desc_size,
+ &map->desc_ver);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
status = priv_func(map, priv);
/* exit_boot_services() was called, thus cannot free */
if (status != EFI_SUCCESS)
- goto fail;
+ return status;
- status = efi_bs_call(exit_boot_services, handle, *map->key_ptr);
+ status = efi_bs_call(exit_boot_services, handle, map->map_key);
}
- /* exit_boot_services() was called, thus cannot free */
- if (status != EFI_SUCCESS)
- goto fail;
-
- return EFI_SUCCESS;
-
-free_map:
- efi_bs_call(free_pool, *map->map);
-fail:
return status;
}
@@ -560,20 +634,16 @@ static const struct {
* * %EFI_SUCCESS if the initrd was loaded successfully, in which
* case @load_addr and @load_size are assigned accordingly
* * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
- * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
* * %EFI_OUT_OF_RESOURCES if memory allocation failed
* * %EFI_LOAD_ERROR in all other cases
*/
static
-efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
- unsigned long *load_size,
+efi_status_t efi_load_initrd_dev_path(struct linux_efi_initrd *initrd,
unsigned long max)
{
efi_guid_t lf2_proto_guid = EFI_LOAD_FILE2_PROTOCOL_GUID;
efi_device_path_protocol_t *dp;
efi_load_file2_protocol_t *lf2;
- unsigned long initrd_addr;
- unsigned long initrd_size;
efi_handle_t handle;
efi_status_t status;
@@ -587,124 +657,98 @@ efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(lf2, load_file, dp, false, &initrd_size, NULL);
+ initrd->size = 0;
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size, NULL);
if (status != EFI_BUFFER_TOO_SMALL)
return EFI_LOAD_ERROR;
- status = efi_allocate_pages(initrd_size, &initrd_addr, max);
+ status = efi_allocate_pages(initrd->size, &initrd->base, max);
if (status != EFI_SUCCESS)
return status;
- status = efi_call_proto(lf2, load_file, dp, false, &initrd_size,
- (void *)initrd_addr);
+ status = efi_call_proto(lf2, load_file, dp, false, &initrd->size,
+ (void *)initrd->base);
if (status != EFI_SUCCESS) {
- efi_free(initrd_size, initrd_addr);
+ efi_free(initrd->size, initrd->base);
return EFI_LOAD_ERROR;
}
-
- *load_addr = initrd_addr;
- *load_size = initrd_size;
return EFI_SUCCESS;
}
static
efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
+ struct linux_efi_initrd *initrd,
unsigned long soft_limit,
unsigned long hard_limit)
{
if (!IS_ENABLED(CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER) ||
- (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL))) {
- *load_addr = *load_size = 0;
- return EFI_SUCCESS;
- }
+ (IS_ENABLED(CONFIG_X86) && (!efi_is_native() || image == NULL)))
+ return EFI_UNSUPPORTED;
return handle_cmdline_files(image, L"initrd=", sizeof(L"initrd=") - 2,
soft_limit, hard_limit,
- load_addr, load_size);
-}
-
-static const struct {
- efi_tcg2_event_t event_data;
- efi_tcg2_tagged_event_t tagged_event;
- u8 tagged_event_data[];
-} initrd_tcg2_event = {
- {
- sizeof(initrd_tcg2_event) + sizeof("Linux initrd"),
- {
- sizeof(initrd_tcg2_event.event_data.event_header),
- EFI_TCG2_EVENT_HEADER_VERSION,
- 9,
- EV_EVENT_TAG,
- },
- },
- {
- INITRD_EVENT_TAG_ID,
- sizeof("Linux initrd"),
- },
- { "Linux initrd" },
-};
-
-static void efi_measure_initrd(unsigned long load_addr, unsigned long load_size)
-{
- efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
- efi_tcg2_protocol_t *tcg2 = NULL;
- efi_status_t status;
-
- efi_bs_call(locate_protocol, &tcg2_guid, NULL, (void **)&tcg2);
- if (tcg2) {
- status = efi_call_proto(tcg2, hash_log_extend_event,
- 0, load_addr, load_size,
- &initrd_tcg2_event.event_data);
- if (status != EFI_SUCCESS)
- efi_warn("Failed to measure initrd data: 0x%lx\n",
- status);
- else
- efi_info("Measured initrd data into PCR %d\n",
- initrd_tcg2_event.event_data.event_header.pcr_index);
- }
+ &initrd->base, &initrd->size);
}
/**
* efi_load_initrd() - Load initial RAM disk
* @image: EFI loaded image protocol
- * @load_addr: pointer to loaded initrd
- * @load_size: size of loaded initrd
* @soft_limit: preferred address for loading the initrd
* @hard_limit: upper limit address for loading the initrd
*
* Return: status code
*/
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
unsigned long soft_limit,
- unsigned long hard_limit)
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out)
{
- efi_status_t status;
+ efi_guid_t tbl_guid = LINUX_EFI_INITRD_MEDIA_GUID;
+ efi_status_t status = EFI_SUCCESS;
+ struct linux_efi_initrd initrd, *tbl;
- if (efi_noinitrd) {
- *load_addr = *load_size = 0;
- status = EFI_SUCCESS;
- } else {
- status = efi_load_initrd_dev_path(load_addr, load_size, hard_limit);
- if (status == EFI_SUCCESS) {
- efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
- if (*load_size > 0)
- efi_measure_initrd(*load_addr, *load_size);
- } else if (status == EFI_NOT_FOUND) {
- status = efi_load_initrd_cmdline(image, load_addr, load_size,
- soft_limit, hard_limit);
- if (status == EFI_SUCCESS && *load_size > 0)
- efi_info("Loaded initrd from command line option\n");
- }
- if (status != EFI_SUCCESS) {
- efi_err("Failed to load initrd: 0x%lx\n", status);
- *load_addr = *load_size = 0;
- }
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || efi_noinitrd)
+ return EFI_SUCCESS;
+
+ status = efi_load_initrd_dev_path(&initrd, hard_limit);
+ if (status == EFI_SUCCESS) {
+ efi_info("Loaded initrd from LINUX_EFI_INITRD_MEDIA_GUID device path\n");
+ if (initrd.size > 0 &&
+ efi_measure_tagged_event(initrd.base, initrd.size,
+ EFISTUB_EVT_INITRD) == EFI_SUCCESS)
+ efi_info("Measured initrd data into PCR 9\n");
+ } else if (status == EFI_NOT_FOUND) {
+ status = efi_load_initrd_cmdline(image, &initrd, soft_limit,
+ hard_limit);
+ /* command line loader disabled or no initrd= passed? */
+ if (status == EFI_UNSUPPORTED || status == EFI_NOT_READY)
+ return EFI_SUCCESS;
+ if (status == EFI_SUCCESS)
+ efi_info("Loaded initrd from command line option\n");
}
+ if (status != EFI_SUCCESS)
+ goto failed;
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA, sizeof(initrd),
+ (void **)&tbl);
+ if (status != EFI_SUCCESS)
+ goto free_initrd;
+
+ *tbl = initrd;
+ status = efi_bs_call(install_configuration_table, &tbl_guid, tbl);
+ if (status != EFI_SUCCESS)
+ goto free_tbl;
+
+ if (out)
+ *out = tbl;
+ return EFI_SUCCESS;
+free_tbl:
+ efi_bs_call(free_pool, tbl);
+free_initrd:
+ efi_free(initrd.size, initrd.base);
+failed:
+ efi_err("Failed to load initrd: 0x%lx\n", status);
return status;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index f515394cce6e..cf474f0dd261 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -10,7 +10,6 @@
*/
#include <linux/efi.h>
-#include <linux/libfdt.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -40,16 +39,22 @@
#ifdef CONFIG_ARM64
# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
-#elif defined(CONFIG_RISCV)
+#elif defined(CONFIG_RISCV) || defined(CONFIG_LOONGARCH)
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN
-#else
+#else /* Only if TASK_SIZE is a constant */
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
#endif
-static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
-static bool flat_va_mapping;
+/*
+ * Some architectures map the EFI regions into the kernel's linear map using a
+ * fixed offset.
+ */
+#ifndef EFI_RT_VIRTUAL_OFFSET
+#define EFI_RT_VIRTUAL_OFFSET 0
+#endif
-const efi_system_table_t *efi_system_table;
+static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
static struct screen_info *setup_graphics(void)
{
@@ -124,16 +129,11 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
unsigned long image_addr;
unsigned long image_size = 0;
/* addr/point and size pairs for memory management*/
- unsigned long initrd_addr = 0;
- unsigned long initrd_size = 0;
- unsigned long fdt_addr = 0; /* Original DTB */
- unsigned long fdt_size = 0;
char *cmdline_ptr = NULL;
int cmdline_size = 0;
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
- enum efi_secureboot_mode secure_boot;
struct screen_info *si;
efi_properties_table_t *prop_tbl;
@@ -154,8 +154,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
* information about the running image, such as size and the command
* line.
*/
- status = efi_system_table->boottime->handle_protocol(handle,
- &loaded_image_proto, (void *)&image);
+ status = efi_bs_call(handle_protocol, handle, &loaded_image_proto,
+ (void *)&image);
if (status != EFI_SUCCESS) {
efi_err("Failed to get loaded image protocol\n");
goto fail;
@@ -209,40 +209,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation();
- secure_boot = efi_get_secureboot();
-
- /*
- * Unauthenticated device tree data is a security hazard, so ignore
- * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
- * boot is enabled if we can't determine its state.
- */
- if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
- secure_boot != efi_secureboot_mode_disabled) {
- if (strstr(cmdline_ptr, "dtb="))
- efi_err("Ignoring DTB from command line.\n");
- } else {
- status = efi_load_dtb(image, &fdt_addr, &fdt_size);
-
- if (status != EFI_SUCCESS) {
- efi_err("Failed to load device tree!\n");
- goto fail_free_image;
- }
- }
-
- if (fdt_addr) {
- efi_info("Using DTB from command line\n");
- } else {
- /* Look for a device tree configuration table entry. */
- fdt_addr = (uintptr_t)get_fdt(&fdt_size);
- if (fdt_addr)
- efi_info("Using DTB from configuration table\n");
- }
-
- if (!fdt_addr)
- efi_info("Generating empty DTB\n");
-
- efi_load_initrd(image, &initrd_addr, &initrd_size, ULONG_MAX,
- efi_get_max_initrd_addr(image_addr));
+ efi_load_initrd(image, ULONG_MAX, efi_get_max_initrd_addr(image_addr),
+ NULL);
efi_random_get_seed();
@@ -254,8 +222,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
* The easiest way to achieve that is to simply use a 1:1 mapping.
*/
prop_tbl = get_efi_config_table(EFI_PROPERTIES_TABLE_GUID);
- flat_va_mapping = prop_tbl &&
- (prop_tbl->memory_protection_attribute &
+ flat_va_mapping |= prop_tbl &&
+ (prop_tbl->memory_protection_attribute &
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
/* force efi_novamap if SetVirtualAddressMap() is unsupported */
@@ -284,25 +252,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
install_memreserve_table();
- status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
- initrd_addr, initrd_size,
- cmdline_ptr, fdt_addr, fdt_size);
- if (status != EFI_SUCCESS)
- goto fail_free_initrd;
-
- if (IS_ENABLED(CONFIG_ARM))
- efi_handle_post_ebs_state();
-
- efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
- /* not reached */
-
-fail_free_initrd:
- efi_err("Failed to update FDT and exit boot services\n");
+ status = efi_boot_kernel(handle, image, image_addr, cmdline_ptr);
- efi_free(initrd_size, initrd_addr);
- efi_free(fdt_size, fdt_addr);
-
-fail_free_image:
efi_free(image_size, image_addr);
efi_free(reserve_size, reserve_addr);
fail_free_screeninfo:
@@ -314,6 +265,35 @@ fail:
}
/*
+ * efi_allocate_virtmap() - create a pool allocation for the virtmap
+ *
+ * Create an allocation that is of sufficient size to hold all the memory
+ * descriptors that will be passed to SetVirtualAddressMap() to inform the
+ * firmware about the virtual mapping that will be used under the OS to call
+ * into the firmware.
+ */
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver)
+{
+ unsigned long size, mmap_key;
+ efi_status_t status;
+
+ /*
+ * Use the size of the current memory map as an upper bound for the
+ * size of the buffer we need to pass to SetVirtualAddressMap() to
+ * cover all EFI_MEMORY_RUNTIME regions.
+ */
+ size = 0;
+ status = efi_bs_call(get_memory_map, &size, NULL, &mmap_key, desc_size,
+ desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ return efi_bs_call(allocate_pool, EFI_LOADER_DATA, size,
+ (void **)virtmap);
+}
+
+/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
* This function populates the virt_addr fields of all memory region descriptors
@@ -328,6 +308,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
efi_memory_desc_t *in, *out = runtime_map;
int l;
+ *count = 0;
+
for (l = 0; l < map_size; l += desc_size) {
u64 paddr, size;
@@ -338,7 +320,7 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
paddr = in->phys_addr;
size = in->num_pages * EFI_PAGE_SIZE;
- in->virt_addr = in->phys_addr;
+ in->virt_addr = in->phys_addr + EFI_RT_VIRTUAL_OFFSET;
if (efi_novamap) {
continue;
}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b0ae0a454404..a30fb5d8ef05 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -160,16 +160,24 @@ void efi_set_u64_split(u64 data, u32 *lo, u32 *hi)
*/
#define EFI_MMAP_NR_SLACK_SLOTS 8
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
+typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+
+union efi_device_path_to_text_protocol {
+ struct {
+ efi_char16_t *(__efiapi *convert_device_node_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ efi_char16_t *(__efiapi *convert_device_path_to_text)(
+ const efi_device_path_protocol_t *,
+ bool, bool);
+ };
+ struct {
+ u32 convert_device_node_to_text;
+ u32 convert_device_path_to_text;
+ } mixed_mode;
};
-typedef struct efi_generic_dev_path efi_device_path_protocol_t;
+typedef union efi_device_path_to_text_protocol efi_device_path_to_text_protocol_t;
typedef void *efi_event_t;
/* Note that notifications won't work in mixed mode */
@@ -254,13 +262,17 @@ union efi_boot_services {
efi_handle_t *);
efi_status_t (__efiapi *install_configuration_table)(efi_guid_t *,
void *);
- void *load_image;
- void *start_image;
+ efi_status_t (__efiapi *load_image)(bool, efi_handle_t,
+ efi_device_path_protocol_t *,
+ void *, unsigned long,
+ efi_handle_t *);
+ efi_status_t (__efiapi *start_image)(efi_handle_t, unsigned long *,
+ efi_char16_t **);
efi_status_t __noreturn (__efiapi *exit)(efi_handle_t,
efi_status_t,
unsigned long,
efi_char16_t *);
- void *unload_image;
+ efi_status_t (__efiapi *unload_image)(efi_handle_t);
efi_status_t (__efiapi *exit_boot_services)(efi_handle_t,
unsigned long);
void *get_next_monotonic_count;
@@ -277,11 +289,11 @@ union efi_boot_services {
void *locate_handle_buffer;
efi_status_t (__efiapi *locate_protocol)(efi_guid_t *, void *,
void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
+ efi_status_t (__efiapi *install_multiple_protocol_interfaces)(efi_handle_t *, ...);
+ efi_status_t (__efiapi *uninstall_multiple_protocol_interfaces)(efi_handle_t, ...);
void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
+ void (__efiapi *copy_mem)(void *, const void *, unsigned long);
+ void (__efiapi *set_mem)(void *, unsigned long, unsigned char);
void *create_event_ex;
};
struct {
@@ -741,6 +753,7 @@ union apple_properties_protocol {
typedef u32 efi_tcg2_event_log_format;
#define INITRD_EVENT_TAG_ID 0x8F3B22ECU
+#define LOAD_OPTIONS_EVENT_TAG_ID 0x8F3B22EDU
#define EV_EVENT_TAG 0x00000006U
#define EFI_TCG2_EVENT_HEADER_VERSION 0x1
@@ -840,7 +853,7 @@ typedef struct {
u16 file_path_list_length;
const efi_char16_t *description;
const efi_device_path_protocol_t *file_path_list;
- size_t optional_data_size;
+ u32 optional_data_size;
const void *optional_data;
} efi_load_option_unpacked_t;
@@ -850,20 +863,16 @@ typedef efi_status_t (*efi_exit_boot_map_processing)(
struct efi_boot_memmap *map,
void *priv);
-efi_status_t efi_exit_boot_services(void *handle,
- struct efi_boot_memmap *map,
- void *priv,
+efi_status_t efi_exit_boot_services(void *handle, void *priv,
efi_exit_boot_map_processing priv_func);
-efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
- unsigned long *new_fdt_addr,
- u64 initrd_addr, u64 initrd_size,
- char *cmdline_ptr,
- unsigned long fdt_addr,
- unsigned long fdt_size);
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr);
void *get_fdt(unsigned long *fdt_size);
+efi_status_t efi_alloc_virtmap(efi_memory_desc_t **virtmap,
+ unsigned long *desc_size, u32 *desc_ver);
void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
@@ -885,11 +894,12 @@ __printf(1, 2) int efi_printk(char const *fmt, ...);
void efi_free(unsigned long size, unsigned long addr);
-void efi_apply_loadoptions_quirk(const void **load_options, int *load_options_size);
+void efi_apply_loadoptions_quirk(const void **load_options, u32 *load_options_size);
char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len);
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map);
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl);
efi_status_t efi_allocate_pages(unsigned long size, unsigned long *addr,
unsigned long max);
@@ -932,10 +942,9 @@ static inline efi_status_t efi_load_dtb(efi_loaded_image_t *image,
}
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
- unsigned long *load_addr,
- unsigned long *load_size,
unsigned long soft_limit,
- unsigned long hard_limit);
+ unsigned long hard_limit,
+ const struct linux_efi_initrd **out);
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index fe567be0f118..4f4d98e51fbf 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -28,8 +28,7 @@ static void fdt_update_cell_size(void *fdt)
}
static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size)
+ void *fdt, int new_fdt_size, char *cmdline_ptr)
{
int node, num_rsv;
int status;
@@ -93,21 +92,6 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size,
goto fdt_set_fail;
}
- /* Set initrd address/end in device tree, if present */
- if (initrd_size != 0) {
- u64 initrd_image_end;
- u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
-
- status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
- if (status)
- goto fdt_set_fail;
-
- initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
- status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
- if (status)
- goto fdt_set_fail;
- }
-
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
fdt_val64 = cpu_to_fdt64((u64)(unsigned long)efi_system_table);
@@ -170,25 +154,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
if (node < 0)
return EFI_LOAD_ERROR;
- fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+ fdt_val64 = cpu_to_fdt64((unsigned long)map->map);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->map_size);
+ fdt_val32 = cpu_to_fdt32(map->map_size);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->desc_size);
+ fdt_val32 = cpu_to_fdt32(map->desc_size);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
- fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+ fdt_val32 = cpu_to_fdt32(map->desc_ver);
err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (err)
@@ -198,22 +182,25 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
}
struct exit_boot_struct {
+ struct efi_boot_memmap *boot_memmap;
efi_memory_desc_t *runtime_map;
- int *runtime_entry_count;
+ int runtime_entry_count;
void *new_fdt_addr;
};
-static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
- void *priv)
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
{
struct exit_boot_struct *p = priv;
+
+ p->boot_memmap = map;
+
/*
* Update the memory map with virtual addresses. The function will also
* populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
* entries so that we can pass it straight to SetVirtualAddressMap()
*/
- efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
- p->runtime_map, p->runtime_entry_count);
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
return update_fdt_memmap(p->new_fdt_addr, map);
}
@@ -223,86 +210,86 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
#endif
/*
- * Allocate memory for a new FDT, then add EFI, commandline, and
- * initrd related fields to the FDT. This routine increases the
- * FDT allocation size until the allocated memory is large
- * enough. EFI allocations are in EFI_PAGE_SIZE granules,
- * which are fixed at 4K bytes, so in most cases the first
- * allocation should succeed.
- * EFI boot services are exited at the end of this function.
- * There must be no allocations between the get_memory_map()
- * call and the exit_boot_services() call, so the exiting of
- * boot services is very tightly tied to the creation of the FDT
- * with the final memory map in it.
+ * Allocate memory for a new FDT, then add EFI and commandline related fields
+ * to the FDT. This routine increases the FDT allocation size until the
+ * allocated memory is large enough. EFI allocations are in EFI_PAGE_SIZE
+ * granules, which are fixed at 4K bytes, so in most cases the first allocation
+ * should succeed. EFI boot services are exited at the end of this function.
+ * There must be no allocations between the get_memory_map() call and the
+ * exit_boot_services() call, so the exiting of boot services is very tightly
+ * tied to the creation of the FDT with the final memory map in it.
*/
-
+static
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
+ efi_loaded_image_t *image,
unsigned long *new_fdt_addr,
- u64 initrd_addr, u64 initrd_size,
- char *cmdline_ptr,
- unsigned long fdt_addr,
- unsigned long fdt_size)
+ char *cmdline_ptr)
{
- unsigned long map_size, desc_size, buff_size;
+ unsigned long desc_size;
u32 desc_ver;
- unsigned long mmap_key;
- efi_memory_desc_t *memory_map, *runtime_map;
efi_status_t status;
- int runtime_entry_count;
- struct efi_boot_memmap map;
struct exit_boot_struct priv;
+ unsigned long fdt_addr = 0;
+ unsigned long fdt_size = 0;
- map.map = &runtime_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_ver;
- map.key_ptr = &mmap_key;
- map.buff_size = &buff_size;
+ if (!efi_novamap) {
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size,
+ &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+ }
/*
- * Get a copy of the current memory map that we will use to prepare
- * the input for SetVirtualAddressMap(). We don't have to worry about
- * subsequent allocations adding entries, since they could not affect
- * the number of EFI_MEMORY_RUNTIME regions.
+ * Unauthenticated device tree data is a security hazard, so ignore
+ * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
+ * boot is enabled if we can't determine its state.
*/
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS) {
- efi_err("Unable to retrieve UEFI memory map.\n");
- return status;
+ if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+ efi_get_secureboot() != efi_secureboot_mode_disabled) {
+ if (strstr(cmdline_ptr, "dtb="))
+ efi_err("Ignoring DTB from command line.\n");
+ } else {
+ status = efi_load_dtb(image, &fdt_addr, &fdt_size);
+
+ if (status != EFI_SUCCESS && status != EFI_NOT_READY) {
+ efi_err("Failed to load device tree!\n");
+ goto fail;
+ }
}
+ if (fdt_addr) {
+ efi_info("Using DTB from command line\n");
+ } else {
+ /* Look for a device tree configuration table entry. */
+ fdt_addr = (uintptr_t)get_fdt(&fdt_size);
+ if (fdt_addr)
+ efi_info("Using DTB from configuration table\n");
+ }
+
+ if (!fdt_addr)
+ efi_info("Generating empty DTB\n");
+
efi_info("Exiting boot services...\n");
- map.map = &memory_map;
status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
if (status != EFI_SUCCESS) {
efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for exit_boot_services().
- */
- status = efi_get_memory_map(&map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt((void *)fdt_addr, fdt_size,
- (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
- initrd_addr, initrd_size);
+ (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr);
if (status != EFI_SUCCESS) {
efi_err("Unable to construct new device tree.\n");
goto fail_free_new_fdt;
}
- runtime_entry_count = 0;
- priv.runtime_map = runtime_map;
- priv.runtime_entry_count = &runtime_entry_count;
- priv.new_fdt_addr = (void *)*new_fdt_addr;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
@@ -312,8 +299,8 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
/* Install the new virtual address map */
svam = efi_system_table->runtime->set_virtual_address_map;
- status = svam(runtime_entry_count * desc_size, desc_size,
- desc_ver, runtime_map);
+ status = svam(priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
/*
* We are beyond the point of no return here, so if the call to
@@ -321,6 +308,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
* incoming kernel but proceed normally otherwise.
*/
if (status != EFI_SUCCESS) {
+ efi_memory_desc_t *p;
int l;
/*
@@ -329,8 +317,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
* the incoming kernel that no virtual translation has
* been installed.
*/
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *p = (void *)memory_map + l;
+ for (l = 0; l < priv.boot_memmap->map_size;
+ l += priv.boot_memmap->desc_size) {
+ p = (void *)priv.boot_memmap->map + l;
if (p->attribute & EFI_MEMORY_RUNTIME)
p->virt_addr = 0;
@@ -345,11 +334,33 @@ fail_free_new_fdt:
efi_free(MAX_FDT_SIZE, *new_fdt_addr);
fail:
- efi_system_table->boottime->free_pool(runtime_map);
+ efi_free(fdt_size, fdt_addr);
+
+ efi_bs_call(free_pool, priv.runtime_map);
return EFI_LOAD_ERROR;
}
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ unsigned long fdt_addr;
+ efi_status_t status;
+
+ status = allocate_new_fdt_and_exit_boot(handle, image, &fdt_addr,
+ cmdline_ptr);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to update FDT and exit boot services\n");
+ return status;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
+ efi_enter_kernel(kernel_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
+ /* not reached */
+}
+
void *get_fdt(unsigned long *fdt_size)
{
void *fdt;
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index dd95f330fe6e..f756c61396e9 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -66,10 +66,28 @@ static efi_status_t efi_open_file(efi_file_protocol_t *volume,
static efi_status_t efi_open_volume(efi_loaded_image_t *image,
efi_file_protocol_t **fh)
{
+ struct efi_vendor_dev_path *dp = image->file_path;
+ efi_guid_t li_proto = LOADED_IMAGE_PROTOCOL_GUID;
efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
efi_simple_file_system_protocol_t *io;
efi_status_t status;
+ // If we are using EFI zboot, we should look for the file system
+ // protocol on the parent image's handle instead
+ if (IS_ENABLED(CONFIG_EFI_ZBOOT) &&
+ image->parent_handle != NULL &&
+ dp != NULL &&
+ dp->header.type == EFI_DEV_MEDIA &&
+ dp->header.sub_type == EFI_DEV_MEDIA_VENDOR &&
+ !efi_guidcmp(dp->vendorguid, LINUX_EFI_ZBOOT_MEDIA_GUID)) {
+ status = efi_bs_call(handle_protocol, image->parent_handle,
+ &li_proto, (void *)&image);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to locate parent image handle\n");
+ return status;
+ }
+ }
+
status = efi_bs_call(handle_protocol, image->device_handle, &fs_proto,
(void **)&io);
if (status != EFI_SUCCESS) {
@@ -136,7 +154,7 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
unsigned long *load_size)
{
const efi_char16_t *cmdline = image->load_options;
- int cmdline_len = image->load_options_size;
+ u32 cmdline_len = image->load_options_size;
unsigned long efi_chunk_size = ULONG_MAX;
efi_file_protocol_t *volume = NULL;
efi_file_protocol_t *file;
@@ -238,6 +256,9 @@ efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
if (volume)
volume->close(volume);
+
+ if (*load_size == 0)
+ return EFI_NOT_READY;
return EFI_SUCCESS;
err_close_file:
diff --git a/drivers/firmware/efi/libstub/intrinsics.c b/drivers/firmware/efi/libstub/intrinsics.c
new file mode 100644
index 000000000000..a04ab39292b6
--- /dev/null
+++ b/drivers/firmware/efi/libstub/intrinsics.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/string.h>
+
+#include "efistub.h"
+
+#ifdef CONFIG_KASAN
+#undef memcpy
+#undef memmove
+#undef memset
+void *__memcpy(void *__dest, const void *__src, size_t __n) __alias(memcpy);
+void *__memmove(void *__dest, const void *__src, size_t count) __alias(memmove);
+void *__memset(void *s, int c, size_t count) __alias(memset);
+#endif
+
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ efi_bs_call(copy_mem, dst, src, len);
+ return dst;
+}
+
+extern void *memmove(void *dst, const void *src, size_t len) __alias(memcpy);
+
+void *memset(void *dst, int c, size_t len)
+{
+ efi_bs_call(set_mem, dst, len, c & U8_MAX);
+ return dst;
+}
diff --git a/drivers/firmware/efi/libstub/loongarch-stub.c b/drivers/firmware/efi/libstub/loongarch-stub.c
new file mode 100644
index 000000000000..32329f2a92f9
--- /dev/null
+++ b/drivers/firmware/efi/libstub/loongarch-stub.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Yun Liu <liuyun@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/efi.h>
+#include <asm/addrspace.h>
+#include "efistub.h"
+
+typedef void __noreturn (*kernel_entry_t)(bool efi, unsigned long cmdline,
+ unsigned long systab);
+
+extern int kernel_asize;
+extern int kernel_fsize;
+extern int kernel_offset;
+extern kernel_entry_t kernel_entry;
+
+efi_status_t check_platform_features(void)
+{
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ efi_loaded_image_t *image,
+ efi_handle_t image_handle)
+{
+ efi_status_t status;
+ unsigned long kernel_addr = 0;
+
+ kernel_addr = (unsigned long)&kernel_offset - kernel_offset;
+
+ status = efi_relocate_kernel(&kernel_addr, kernel_fsize, kernel_asize,
+ PHYSADDR(VMLINUX_LOAD_ADDRESS), SZ_2M, 0x0);
+
+ *image_addr = kernel_addr;
+ *image_size = kernel_asize;
+
+ return status;
+}
+
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
+{
+ struct exit_boot_struct *p = priv;
+
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(map->map, map->map_size, map->desc_size,
+ p->runtime_map, &p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
+efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
+ unsigned long kernel_addr, char *cmdline_ptr)
+{
+ kernel_entry_t real_kernel_entry;
+ struct exit_boot_struct priv;
+ unsigned long desc_size;
+ efi_status_t status;
+ u32 desc_ver;
+
+ status = efi_alloc_virtmap(&priv.runtime_map, &desc_size, &desc_ver);
+ if (status != EFI_SUCCESS) {
+ efi_err("Unable to retrieve UEFI memory map.\n");
+ return status;
+ }
+
+ efi_info("Exiting boot services\n");
+
+ efi_novamap = false;
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /* Install the new virtual address map */
+ efi_rt_call(set_virtual_address_map,
+ priv.runtime_entry_count * desc_size, desc_size,
+ desc_ver, priv.runtime_map);
+
+ /* Config Direct Mapping */
+ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0);
+ csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1);
+
+ real_kernel_entry = (kernel_entry_t)
+ ((unsigned long)&kernel_entry - kernel_addr + VMLINUX_LOAD_ADDRESS);
+
+ real_kernel_entry(true, (unsigned long)cmdline_ptr,
+ (unsigned long)efi_system_table);
+}
diff --git a/drivers/firmware/efi/libstub/mem.c b/drivers/firmware/efi/libstub/mem.c
index feef8d4be113..45841ef55a9f 100644
--- a/drivers/firmware/efi/libstub/mem.c
+++ b/drivers/firmware/efi/libstub/mem.c
@@ -5,71 +5,66 @@
#include "efistub.h"
-static inline bool mmap_has_headroom(unsigned long buff_size,
- unsigned long map_size,
- unsigned long desc_size)
-{
- unsigned long slack = buff_size - map_size;
-
- return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
-}
-
/**
* efi_get_memory_map() - get memory map
- * @map: on return pointer to memory map
+ * @map: pointer to memory map pointer to which to assign the
+ * newly allocated memory map
+ * @install_cfg_tbl: whether or not to install the boot memory map as a
+ * configuration table
*
* Retrieve the UEFI memory map. The allocated memory leaves room for
* up to EFI_MMAP_NR_SLACK_SLOTS additional memory map entries.
*
* Return: status code
*/
-efi_status_t efi_get_memory_map(struct efi_boot_memmap *map)
+efi_status_t efi_get_memory_map(struct efi_boot_memmap **map,
+ bool install_cfg_tbl)
{
- efi_memory_desc_t *m = NULL;
+ int memtype = install_cfg_tbl ? EFI_ACPI_RECLAIM_MEMORY
+ : EFI_LOADER_DATA;
+ efi_guid_t tbl_guid = LINUX_EFI_BOOT_MEMMAP_GUID;
+ struct efi_boot_memmap *m, tmp;
efi_status_t status;
- unsigned long key;
- u32 desc_version;
-
- *map->desc_size = sizeof(*m);
- *map->map_size = *map->desc_size * 32;
- *map->buff_size = *map->map_size;
-again:
- status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
- *map->map_size, (void **)&m);
+ unsigned long size;
+
+ tmp.map_size = 0;
+ status = efi_bs_call(get_memory_map, &tmp.map_size, NULL, &tmp.map_key,
+ &tmp.desc_size, &tmp.desc_ver);
+ if (status != EFI_BUFFER_TOO_SMALL)
+ return EFI_LOAD_ERROR;
+
+ size = tmp.map_size + tmp.desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ status = efi_bs_call(allocate_pool, memtype, sizeof(*m) + size,
+ (void **)&m);
if (status != EFI_SUCCESS)
- goto fail;
-
- *map->desc_size = 0;
- key = 0;
- status = efi_bs_call(get_memory_map, map->map_size, m,
- &key, map->desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL ||
- !mmap_has_headroom(*map->buff_size, *map->map_size,
- *map->desc_size)) {
- efi_bs_call(free_pool, m);
+ return status;
+
+ if (install_cfg_tbl) {
/*
- * Make sure there is some entries of headroom so that the
- * buffer can be reused for a new map after allocations are
- * no longer permitted. Its unlikely that the map will grow to
- * exceed this headroom once we are ready to trigger
- * ExitBootServices()
+ * Installing a configuration table might allocate memory, and
+ * this may modify the memory map. This means we should install
+ * the configuration table first, and re-install or delete it
+ * as needed.
*/
- *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
- *map->buff_size = *map->map_size;
- goto again;
+ status = efi_bs_call(install_configuration_table, &tbl_guid, m);
+ if (status != EFI_SUCCESS)
+ goto free_map;
}
- if (status == EFI_SUCCESS) {
- if (map->key_ptr)
- *map->key_ptr = key;
- if (map->desc_ver)
- *map->desc_ver = desc_version;
- } else {
- efi_bs_call(free_pool, m);
- }
+ m->buff_size = m->map_size = size;
+ status = efi_bs_call(get_memory_map, &m->map_size, m->map, &m->map_key,
+ &m->desc_size, &m->desc_ver);
+ if (status != EFI_SUCCESS)
+ goto uninstall_table;
+
+ *map = m;
+ return EFI_SUCCESS;
-fail:
- *map->map = m;
+uninstall_table:
+ if (install_cfg_tbl)
+ efi_bs_call(install_configuration_table, &tbl_guid, NULL);
+free_map:
+ efi_bs_call(free_pool, m);
return status;
}
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 715f37479154..9fb5869896be 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -55,22 +55,13 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed)
{
- unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
- unsigned long buff_size;
+ struct efi_boot_memmap *map;
efi_status_t status;
- efi_memory_desc_t *memory_map;
int map_offset;
- struct efi_boot_memmap map;
- map.map = &memory_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = NULL;
- map.key_ptr = NULL;
- map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
return status;
@@ -80,8 +71,8 @@ efi_status_t efi_random_alloc(unsigned long size,
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
slots = get_entry_num_slots(md, size, ilog2(align));
@@ -109,8 +100,8 @@ efi_status_t efi_random_alloc(unsigned long size,
* to calculate the randomly chosen address, and allocate it directly
* using EFI_ALLOCATE_ADDRESS.
*/
- for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
- efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) {
+ efi_memory_desc_t *md = (void *)map->map + map_offset;
efi_physical_addr_t target;
unsigned long pages;
@@ -133,7 +124,7 @@ efi_status_t efi_random_alloc(unsigned long size,
break;
}
- efi_bs_call(free_pool, memory_map);
+ efi_bs_call(free_pool, map);
return status;
}
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index 8ee9eb2b9039..bf6fbd5d22a1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -23,21 +23,12 @@
efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long min)
{
- unsigned long map_size, desc_size, buff_size;
- efi_memory_desc_t *map;
+ struct efi_boot_memmap *map;
efi_status_t status;
unsigned long nr_pages;
int i;
- struct efi_boot_memmap boot_map;
- boot_map.map = &map;
- boot_map.map_size = &map_size;
- boot_map.desc_size = &desc_size;
- boot_map.desc_ver = NULL;
- boot_map.key_ptr = NULL;
- boot_map.buff_size = &buff_size;
-
- status = efi_get_memory_map(&boot_map);
+ status = efi_get_memory_map(&map, false);
if (status != EFI_SUCCESS)
goto fail;
@@ -52,12 +43,12 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
size = round_up(size, EFI_ALLOC_ALIGN);
nr_pages = size / EFI_PAGE_SIZE;
- for (i = 0; i < map_size / desc_size; i++) {
+ for (i = 0; i < map->map_size / map->desc_size; i++) {
efi_memory_desc_t *desc;
- unsigned long m = (unsigned long)map;
+ unsigned long m = (unsigned long)map->map;
u64 start, end;
- desc = efi_early_memdesc_ptr(m, desc_size, i);
+ desc = efi_early_memdesc_ptr(m, map->desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
@@ -87,7 +78,7 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
}
}
- if (i == map_size / desc_size)
+ if (i == map->map_size / map->desc_size)
status = EFI_NOT_FOUND;
efi_bs_call(free_pool, map);
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 8a18930f3eb6..516f4f0069bd 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -14,7 +14,7 @@
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
-static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static const efi_char16_t shim_MokSBState_name[] = L"MokSBStateRT";
static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
@@ -43,8 +43,8 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/*
* See if a user has put the shim into insecure mode. If so, and if the
- * variable doesn't have the runtime attribute set, we might as well
- * honor that.
+ * variable doesn't have the non-volatile attribute set, we might as
+ * well honor that.
*/
size = sizeof(moksbstate);
status = get_efi_var(shim_MokSBState_name, &shim_guid,
@@ -53,7 +53,7 @@ enum efi_secureboot_mode efi_get_secureboot(void)
/* If it fails, we don't care why. Default to secure */
if (status != EFI_SUCCESS)
goto secure_boot_enabled;
- if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1)
+ if (!(attr & EFI_VARIABLE_NON_VOLATILE) && moksbstate == 1)
return efi_secureboot_mode_disabled;
secure_boot_enabled:
diff --git a/drivers/firmware/efi/libstub/systable.c b/drivers/firmware/efi/libstub/systable.c
new file mode 100644
index 000000000000..91d016b02f8c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/systable.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+const efi_system_table_t *efi_system_table;
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 05ae8bcc9d67..b9ce6393e353 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -220,7 +220,6 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long end, next;
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
- int has_system_memory = 0;
if (efi_dxe_table == NULL)
return;
@@ -517,6 +516,13 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
+ /*
+ * Disregard any setup data that was provided by the bootloader:
+ * setup_data could be pointing anywhere, and we have no way of
+ * authenticating or validating the payload.
+ */
+ hdr->setup_data = 0;
+
efi_stub_entry(handle, sys_table_arg, boot_params);
/* not reached */
@@ -716,32 +722,22 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
efi_set_u64_split((unsigned long)efi_system_table,
&p->efi->efi_systab, &p->efi->efi_systab_hi);
- p->efi->efi_memdesc_size = *map->desc_size;
- p->efi->efi_memdesc_version = *map->desc_ver;
- efi_set_u64_split((unsigned long)*map->map,
+ p->efi->efi_memdesc_size = map->desc_size;
+ p->efi->efi_memdesc_version = map->desc_ver;
+ efi_set_u64_split((unsigned long)map->map,
&p->efi->efi_memmap, &p->efi->efi_memmap_hi);
- p->efi->efi_memmap_size = *map->map_size;
+ p->efi->efi_memmap_size = map->map_size;
return EFI_SUCCESS;
}
static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
- unsigned long map_sz, key, desc_size, buff_size;
- efi_memory_desc_t *mem_map;
struct setup_data *e820ext = NULL;
__u32 e820ext_size = 0;
efi_status_t status;
- __u32 desc_version;
- struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &mem_map;
- map.map_size = &map_sz;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_version;
- map.key_ptr = &key;
- map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
@@ -750,7 +746,7 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
return status;
/* Might as well exit boot services now */
- status = efi_exit_boot_services(handle, &map, &priv, exit_boot_func);
+ status = efi_exit_boot_services(handle, &priv, exit_boot_func);
if (status != EFI_SUCCESS)
return status;
@@ -776,7 +772,7 @@ unsigned long efi_main(efi_handle_t handle,
unsigned long bzimage_addr = (unsigned long)startup_32;
unsigned long buffer_start, buffer_end;
struct setup_header *hdr = &boot_params->hdr;
- unsigned long addr, size;
+ const struct linux_efi_initrd *initrd = NULL;
efi_status_t status;
efi_system_table = sys_table_arg;
@@ -871,17 +867,18 @@ unsigned long efi_main(efi_handle_t handle,
* arguments will be processed only if image is not NULL, which will be
* the case only if we were loaded via the PE entry point.
*/
- status = efi_load_initrd(image, &addr, &size, hdr->initrd_addr_max,
- ULONG_MAX);
+ status = efi_load_initrd(image, hdr->initrd_addr_max, ULONG_MAX,
+ &initrd);
if (status != EFI_SUCCESS)
goto fail;
- if (size > 0) {
- efi_set_u64_split(addr, &hdr->ramdisk_image,
+ if (initrd && initrd->size > 0) {
+ efi_set_u64_split(initrd->base, &hdr->ramdisk_image,
&boot_params->ext_ramdisk_image);
- efi_set_u64_split(size, &hdr->ramdisk_size,
+ efi_set_u64_split(initrd->size, &hdr->ramdisk_size,
&boot_params->ext_ramdisk_size);
}
+
/*
* If the boot loader gave us a value for secure_boot then we use that,
* otherwise we ask the BIOS.
diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
new file mode 100644
index 000000000000..9e6fe061ab07
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot-header.S
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/pe.h>
+
+#ifdef CONFIG_64BIT
+ .set .Lextra_characteristics, 0x0
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32PLUS
+#else
+ .set .Lextra_characteristics, IMAGE_FILE_32BIT_MACHINE
+ .set .Lpe_opt_magic, PE_OPT_MAGIC_PE32
+#endif
+
+ .section ".head", "a"
+ .globl __efistub_efi_zboot_header
+__efistub_efi_zboot_header:
+.Ldoshdr:
+ .long MZ_MAGIC
+ .ascii "zimg" // image type
+ .long __efistub__gzdata_start - .Ldoshdr // payload offset
+ .long __efistub__gzdata_size - ZBOOT_SIZE_LEN // payload size
+ .long 0, 0 // reserved
+ .asciz COMP_TYPE // compression type
+ .org .Ldoshdr + 0x3c
+ .long .Lpehdr - .Ldoshdr // PE header offset
+
+.Lpehdr:
+ .long PE_MAGIC
+ .short MACHINE_TYPE
+ .short .Lsection_count
+ .long 0
+ .long 0
+ .long 0
+ .short .Lsection_table - .Loptional_header
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED |\
+ .Lextra_characteristics
+
+.Loptional_header:
+ .short .Lpe_opt_magic
+ .byte 0, 0
+ .long _etext - .Lefi_header_end
+ .long __data_size
+ .long 0
+ .long __efistub_efi_zboot_entry - .Ldoshdr
+ .long .Lefi_header_end - .Ldoshdr
+
+#ifdef CONFIG_64BIT
+ .quad 0
+#else
+ .long _etext - .Ldoshdr, 0x0
+#endif
+ .long 4096
+ .long 512
+ .short 0, 0
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
+ .short 0, 0
+ .long 0
+ .long _end - .Ldoshdr
+
+ .long .Lefi_header_end - .Ldoshdr
+ .long 0
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION
+ .short 0
+#ifdef CONFIG_64BIT
+ .quad 0, 0, 0, 0
+#else
+ .long 0, 0, 0, 0
+#endif
+ .long 0
+ .long (.Lsection_table - .) / 8
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+#ifdef CONFIG_DEBUG_EFI
+ .long .Lefi_debug_table - .Ldoshdr // DebugTable
+ .long .Lefi_debug_table_size
+#endif
+
+.Lsection_table:
+ .ascii ".text\0\0\0"
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+ .long _etext - .Lefi_header_end
+ .long .Lefi_header_end - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE
+
+ .ascii ".data\0\0\0"
+ .long __data_size
+ .long _etext - .Ldoshdr
+ .long __data_rawsize
+ .long _etext - .Ldoshdr
+
+ .long 0, 0
+ .short 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE
+
+ .set .Lsection_count, (. - .Lsection_table) / 40
+
+#ifdef CONFIG_DEBUG_EFI
+ .section ".rodata", "a"
+ .align 2
+.Lefi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type
+ .long .Lefi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long .Lefi_debug_entry - .Ldoshdr // FileOffset
+
+ .set .Lefi_debug_table_size, . - .Lefi_debug_table
+ .previous
+
+.Lefi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz ZBOOT_EFI_PATH
+
+ .set .Lefi_debug_entry_size, . - .Lefi_debug_entry
+#endif
+
+ .p2align 12
+.Lefi_header_end:
+
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
new file mode 100644
index 000000000000..ea72c8f27da6
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/pe.h>
+#include <asm/efi.h>
+#include <asm/unaligned.h>
+
+#include "efistub.h"
+
+static unsigned char zboot_heap[SZ_256K] __aligned(64);
+static unsigned long free_mem_ptr, free_mem_end_ptr;
+
+#define STATIC static
+#if defined(CONFIG_KERNEL_GZIP)
+#include "../../../../lib/decompress_inflate.c"
+#elif defined(CONFIG_KERNEL_LZ4)
+#include "../../../../lib/decompress_unlz4.c"
+#elif defined(CONFIG_KERNEL_LZMA)
+#include "../../../../lib/decompress_unlzma.c"
+#elif defined(CONFIG_KERNEL_LZO)
+#include "../../../../lib/decompress_unlzo.c"
+#elif defined(CONFIG_KERNEL_XZ)
+#undef memcpy
+#define memcpy memcpy
+#undef memmove
+#define memmove memmove
+#include "../../../../lib/decompress_unxz.c"
+#elif defined(CONFIG_KERNEL_ZSTD)
+#include "../../../../lib/decompress_unzstd.c"
+#endif
+
+extern char efi_zboot_header[];
+extern char _gzdata_start[], _gzdata_end[];
+
+static void log(efi_char16_t str[])
+{
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, L"EFI decompressor: ");
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, str);
+ efi_call_proto(efi_table_attr(efi_system_table, con_out),
+ output_string, L"\n");
+}
+
+static void error(char *x)
+{
+ log(L"error() called from decompressor library\n");
+}
+
+// Local version to avoid pulling in memcmp()
+static bool guids_eq(const efi_guid_t *a, const efi_guid_t *b)
+{
+ const u32 *l = (u32 *)a;
+ const u32 *r = (u32 *)b;
+
+ return l[0] == r[0] && l[1] == r[1] && l[2] == r[2] && l[3] == r[3];
+}
+
+static efi_status_t __efiapi
+load_file(efi_load_file_protocol_t *this, efi_device_path_protocol_t *rem,
+ bool boot_policy, unsigned long *bufsize, void *buffer)
+{
+ unsigned long compressed_size = _gzdata_end - _gzdata_start;
+ struct efi_vendor_dev_path *vendor_dp;
+ bool decompress = false;
+ unsigned long size;
+ int ret;
+
+ if (rem == NULL || bufsize == NULL)
+ return EFI_INVALID_PARAMETER;
+
+ if (boot_policy)
+ return EFI_UNSUPPORTED;
+
+ // Look for our vendor media device node in the remaining file path
+ if (rem->type == EFI_DEV_MEDIA &&
+ rem->sub_type == EFI_DEV_MEDIA_VENDOR) {
+ vendor_dp = container_of(rem, struct efi_vendor_dev_path, header);
+ if (!guids_eq(&vendor_dp->vendorguid, &LINUX_EFI_ZBOOT_MEDIA_GUID))
+ return EFI_NOT_FOUND;
+
+ decompress = true;
+ rem = (void *)(vendor_dp + 1);
+ }
+
+ if (rem->type != EFI_DEV_END_PATH ||
+ rem->sub_type != EFI_DEV_END_ENTIRE)
+ return EFI_NOT_FOUND;
+
+ // The uncompressed size of the payload is appended to the raw bit
+ // stream, and may therefore appear misaligned in memory
+ size = decompress ? get_unaligned_le32(_gzdata_end - 4)
+ : compressed_size;
+ if (buffer == NULL || *bufsize < size) {
+ *bufsize = size;
+ return EFI_BUFFER_TOO_SMALL;
+ }
+
+ if (decompress) {
+ ret = __decompress(_gzdata_start, compressed_size, NULL, NULL,
+ buffer, size, NULL, error);
+ if (ret < 0) {
+ log(L"Decompression failed");
+ return EFI_DEVICE_ERROR;
+ }
+ } else {
+ memcpy(buffer, _gzdata_start, compressed_size);
+ }
+
+ return EFI_SUCCESS;
+}
+
+// Return the length in bytes of the device path up to the first end node.
+static int device_path_length(const efi_device_path_protocol_t *dp)
+{
+ int len = 0;
+
+ while (dp->type != EFI_DEV_END_PATH) {
+ len += dp->length;
+ dp = (void *)((u8 *)dp + dp->length);
+ }
+ return len;
+}
+
+static void append_rel_offset_node(efi_device_path_protocol_t **dp,
+ unsigned long start, unsigned long end)
+{
+ struct efi_rel_offset_dev_path *rodp = (void *)*dp;
+
+ rodp->header.type = EFI_DEV_MEDIA;
+ rodp->header.sub_type = EFI_DEV_MEDIA_REL_OFFSET;
+ rodp->header.length = sizeof(struct efi_rel_offset_dev_path);
+ rodp->reserved = 0;
+ rodp->starting_offset = start;
+ rodp->ending_offset = end;
+
+ *dp = (void *)(rodp + 1);
+}
+
+static void append_ven_media_node(efi_device_path_protocol_t **dp,
+ efi_guid_t *guid)
+{
+ struct efi_vendor_dev_path *vmdp = (void *)*dp;
+
+ vmdp->header.type = EFI_DEV_MEDIA;
+ vmdp->header.sub_type = EFI_DEV_MEDIA_VENDOR;
+ vmdp->header.length = sizeof(struct efi_vendor_dev_path);
+ vmdp->vendorguid = *guid;
+
+ *dp = (void *)(vmdp + 1);
+}
+
+static void append_end_node(efi_device_path_protocol_t **dp)
+{
+ (*dp)->type = EFI_DEV_END_PATH;
+ (*dp)->sub_type = EFI_DEV_END_ENTIRE;
+ (*dp)->length = sizeof(struct efi_generic_dev_path);
+
+ ++*dp;
+}
+
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
+{
+ struct efi_mem_mapped_dev_path mmdp = {
+ .header.type = EFI_DEV_HW,
+ .header.sub_type = EFI_DEV_MEM_MAPPED,
+ .header.length = sizeof(struct efi_mem_mapped_dev_path)
+ };
+ efi_device_path_protocol_t *parent_dp, *dpp, *lf2_dp, *li_dp;
+ efi_load_file2_protocol_t zboot_load_file2;
+ efi_loaded_image_t *parent, *child;
+ unsigned long exit_data_size;
+ efi_handle_t child_handle;
+ efi_handle_t zboot_handle;
+ efi_char16_t *exit_data;
+ efi_status_t status;
+ void *dp_alloc;
+ int dp_len;
+
+ WRITE_ONCE(efi_system_table, systab);
+
+ free_mem_ptr = (unsigned long)&zboot_heap;
+ free_mem_end_ptr = free_mem_ptr + sizeof(zboot_heap);
+
+ exit_data = NULL;
+ exit_data_size = 0;
+
+ status = efi_bs_call(handle_protocol, handle,
+ &LOADED_IMAGE_PROTOCOL_GUID, (void **)&parent);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to locate parent's loaded image protocol");
+ return status;
+ }
+
+ status = efi_bs_call(handle_protocol, handle,
+ &LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID,
+ (void **)&parent_dp);
+ if (status != EFI_SUCCESS || parent_dp == NULL) {
+ // Create a MemoryMapped() device path node to describe
+ // the parent image if no device path was provided.
+ mmdp.memory_type = parent->image_code_type;
+ mmdp.starting_addr = (unsigned long)parent->image_base;
+ mmdp.ending_addr = (unsigned long)parent->image_base +
+ parent->image_size - 1;
+ parent_dp = &mmdp.header;
+ dp_len = sizeof(mmdp);
+ } else {
+ dp_len = device_path_length(parent_dp);
+ }
+
+ // Allocate some pool memory for device path protocol data
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ 2 * (dp_len + sizeof(struct efi_rel_offset_dev_path) +
+ sizeof(struct efi_generic_dev_path)) +
+ sizeof(struct efi_vendor_dev_path),
+ (void **)&dp_alloc);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to allocate device path pool memory");
+ return status;
+ }
+
+ // Create a device path describing the compressed payload in this image
+ // <...parent_dp...>/Offset(<start>, <end>)
+ lf2_dp = memcpy(dp_alloc, parent_dp, dp_len);
+ dpp = (void *)((u8 *)lf2_dp + dp_len);
+ append_rel_offset_node(&dpp,
+ (unsigned long)(_gzdata_start - efi_zboot_header),
+ (unsigned long)(_gzdata_end - efi_zboot_header - 1));
+ append_end_node(&dpp);
+
+ // Create a device path describing the decompressed payload in this image
+ // <...parent_dp...>/Offset(<start>, <end>)/VenMedia(ZBOOT_MEDIA_GUID)
+ dp_len += sizeof(struct efi_rel_offset_dev_path);
+ li_dp = memcpy(dpp, lf2_dp, dp_len);
+ dpp = (void *)((u8 *)li_dp + dp_len);
+ append_ven_media_node(&dpp, &LINUX_EFI_ZBOOT_MEDIA_GUID);
+ append_end_node(&dpp);
+
+ zboot_handle = NULL;
+ zboot_load_file2.load_file = load_file;
+ status = efi_bs_call(install_multiple_protocol_interfaces,
+ &zboot_handle,
+ &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp,
+ &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2,
+ NULL);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to install LoadFile2 protocol and device path");
+ goto free_dpalloc;
+ }
+
+ status = efi_bs_call(load_image, false, handle, li_dp, NULL, 0,
+ &child_handle);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to load image");
+ goto uninstall_lf2;
+ }
+
+ status = efi_bs_call(handle_protocol, child_handle,
+ &LOADED_IMAGE_PROTOCOL_GUID, (void **)&child);
+ if (status != EFI_SUCCESS) {
+ log(L"Failed to locate child's loaded image protocol");
+ goto unload_image;
+ }
+
+ // Copy the kernel command line
+ child->load_options = parent->load_options;
+ child->load_options_size = parent->load_options_size;
+
+ status = efi_bs_call(start_image, child_handle, &exit_data_size,
+ &exit_data);
+ if (status != EFI_SUCCESS) {
+ log(L"StartImage() returned with error");
+ if (exit_data_size > 0)
+ log(exit_data);
+
+ // If StartImage() returns EFI_SECURITY_VIOLATION, the image is
+ // not unloaded so we need to do it by hand.
+ if (status == EFI_SECURITY_VIOLATION)
+unload_image:
+ efi_bs_call(unload_image, child_handle);
+ }
+
+uninstall_lf2:
+ efi_bs_call(uninstall_multiple_protocol_interfaces,
+ zboot_handle,
+ &EFI_DEVICE_PATH_PROTOCOL_GUID, lf2_dp,
+ &EFI_LOAD_FILE2_PROTOCOL_GUID, &zboot_load_file2,
+ NULL);
+
+free_dpalloc:
+ efi_bs_call(free_pool, dp_alloc);
+
+ efi_bs_call(exit, handle, status, exit_data_size, exit_data);
+
+ // Free ExitData in case Exit() returned with a failure code,
+ // but return the original status code.
+ log(L"Exit() returned with failure code");
+ if (exit_data != NULL)
+ efi_bs_call(free_pool, exit_data);
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds
new file mode 100644
index 000000000000..87a62765bafd
--- /dev/null
+++ b/drivers/firmware/efi/libstub/zboot.lds
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ENTRY(__efistub_efi_zboot_header);
+
+SECTIONS
+{
+ .head : ALIGN(4096) {
+ *(.head)
+ }
+
+ .text : {
+ *(.text* .init.text*)
+ }
+
+ .rodata : ALIGN(8) {
+ __efistub__gzdata_start = .;
+ *(.gzdata)
+ __efistub__gzdata_end = .;
+ *(.rodata* .init.rodata* .srodata*)
+ _etext = ALIGN(4096);
+ . = _etext;
+ }
+
+ .data : ALIGN(4096) {
+ *(.data* .init.data*)
+ _edata = ALIGN(512);
+ . = _edata;
+ }
+
+ .bss : {
+ *(.bss* .init.bss*)
+ _end = ALIGN(512);
+ . = _end;
+ }
+
+ /DISCARD/ : {
+ *(.modinfo .init.modinfo)
+ }
+}
+
+PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start));
+
+PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext));
+PROVIDE(__data_size = ABSOLUTE(_end - _etext));
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index adaa492c3d2d..4e2575dfeb90 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -681,6 +681,15 @@ static struct notifier_block gsmi_die_notifier = {
static int gsmi_panic_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{
+
+ /*
+ * Panic callbacks are executed with all other CPUs stopped,
+ * so we must not attempt to spin waiting for gsmi_dev.lock
+ * to be released.
+ */
+ if (spin_is_locked(&gsmi_dev.lock))
+ return NOTIFY_DONE;
+
gsmi_shutdown_reason(GSMI_SHUTDOWN_PANIC);
return NOTIFY_DONE;
}
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index cfb448eabdaa..e7bcfca4159f 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/cpuidle.h>
+#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
@@ -163,6 +164,8 @@ int psci_set_osi_mode(bool enable)
PSCI_1_0_SUSPEND_MODE_PC;
err = invoke_psci_fn(PSCI_1_0_FN_SET_SUSPEND_MODE, suspend_mode, 0, 0);
+ if (err < 0)
+ pr_warn("failed to set %s mode: %d\n", enable ? "OSI" : "PC", err);
return psci_to_linux_errno(err);
}
@@ -274,7 +277,7 @@ static void set_conduit(enum arm_smccc_conduit conduit)
psci_conduit = conduit;
}
-static int get_set_conduit_method(struct device_node *np)
+static int get_set_conduit_method(const struct device_node *np)
{
const char *method;
@@ -324,17 +327,130 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
-static int __init psci_features(u32 psci_func_id)
+static int psci_features(u32 psci_func_id)
{
return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES,
psci_func_id, 0, 0);
}
+#ifdef CONFIG_DEBUG_FS
+
+#define PSCI_ID(ver, _name) \
+ { .fn = PSCI_##ver##_FN_##_name, .name = #_name, }
+#define PSCI_ID_NATIVE(ver, _name) \
+ { .fn = PSCI_FN_NATIVE(ver, _name), .name = #_name, }
+
+/* A table of all optional functions */
+static const struct {
+ u32 fn;
+ const char *name;
+} psci_fn_ids[] = {
+ PSCI_ID_NATIVE(0_2, MIGRATE),
+ PSCI_ID(0_2, MIGRATE_INFO_TYPE),
+ PSCI_ID_NATIVE(0_2, MIGRATE_INFO_UP_CPU),
+ PSCI_ID(1_0, CPU_FREEZE),
+ PSCI_ID_NATIVE(1_0, CPU_DEFAULT_SUSPEND),
+ PSCI_ID_NATIVE(1_0, NODE_HW_STATE),
+ PSCI_ID_NATIVE(1_0, SYSTEM_SUSPEND),
+ PSCI_ID(1_0, SET_SUSPEND_MODE),
+ PSCI_ID_NATIVE(1_0, STAT_RESIDENCY),
+ PSCI_ID_NATIVE(1_0, STAT_COUNT),
+ PSCI_ID_NATIVE(1_1, SYSTEM_RESET2),
+ PSCI_ID(1_1, MEM_PROTECT),
+ PSCI_ID_NATIVE(1_1, MEM_PROTECT_CHECK_RANGE),
+};
+
+static int psci_debugfs_read(struct seq_file *s, void *data)
+{
+ int feature, type, i;
+ u32 ver;
+
+ ver = psci_ops.get_version();
+ seq_printf(s, "PSCIv%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ /* PSCI_FEATURES is available only starting from 1.0 */
+ if (PSCI_VERSION_MAJOR(ver) < 1)
+ return 0;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ ver = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ seq_printf(s, "SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+ } else {
+ seq_puts(s, "SMC Calling Convention v1.0 is assumed\n");
+ }
+
+ feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
+ if (feature < 0) {
+ seq_printf(s, "PSCI_FEATURES(CPU_SUSPEND) error (%d)\n", feature);
+ } else {
+ seq_printf(s, "OSI is %ssupported\n",
+ (feature & BIT(0)) ? "" : "not ");
+ seq_printf(s, "%s StateID format is used\n",
+ (feature & BIT(1)) ? "Extended" : "Original");
+ }
+
+ type = psci_ops.migrate_info_type();
+ if (type == PSCI_0_2_TOS_UP_MIGRATE ||
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ unsigned long cpuid;
+
+ seq_printf(s, "Trusted OS %smigrate capable\n",
+ type == PSCI_0_2_TOS_UP_NO_MIGRATE ? "not " : "");
+ cpuid = psci_migrate_info_up_cpu();
+ seq_printf(s, "Trusted OS resident on physical CPU 0x%lx (#%d)\n",
+ cpuid, resident_cpu);
+ } else if (type == PSCI_0_2_TOS_MP) {
+ seq_puts(s, "Trusted OS migration not required\n");
+ } else {
+ if (type != PSCI_RET_NOT_SUPPORTED)
+ seq_printf(s, "MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(psci_fn_ids); i++) {
+ feature = psci_features(psci_fn_ids[i].fn);
+ if (feature == PSCI_RET_NOT_SUPPORTED)
+ continue;
+ if (feature < 0)
+ seq_printf(s, "PSCI_FEATURES(%s) error (%d)\n",
+ psci_fn_ids[i].name, feature);
+ else
+ seq_printf(s, "%s is supported\n", psci_fn_ids[i].name);
+ }
+
+ return 0;
+}
+
+static int psci_debugfs_open(struct inode *inode, struct file *f)
+{
+ return single_open(f, psci_debugfs_read, NULL);
+}
+
+static const struct file_operations psci_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psci_debugfs_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek
+};
+
+static int __init psci_debugfs_init(void)
+{
+ return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
+ &psci_debugfs_ops));
+}
+late_initcall(psci_debugfs_init)
+#endif
+
#ifdef CONFIG_CPU_IDLE
static int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume));
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -359,7 +475,7 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
- phys_addr_t pa_cpu_resume = __pa_symbol(function_nocfi(cpu_resume));
+ phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
@@ -528,7 +644,7 @@ typedef int (*psci_initcall_t)(const struct device_node *);
*
* Probe based on PSCI PSCI_VERSION function
*/
-static int __init psci_0_2_init(struct device_node *np)
+static int __init psci_0_2_init(const struct device_node *np)
{
int err;
@@ -549,7 +665,7 @@ static int __init psci_0_2_init(struct device_node *np)
/*
* PSCI < v0.2 get PSCI Function IDs via DT.
*/
-static int __init psci_0_1_init(struct device_node *np)
+static int __init psci_0_1_init(const struct device_node *np)
{
u32 id;
int err;
@@ -585,7 +701,7 @@ static int __init psci_0_1_init(struct device_node *np)
return 0;
}
-static int __init psci_1_0_init(struct device_node *np)
+static int __init psci_1_0_init(const struct device_node *np)
{
int err;
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 0d51eef2472f..db3d08a01209 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -129,8 +129,6 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
-extern void __qcom_scm_init(void);
-
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 1f276f108cc9..3fd3563d962b 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -94,6 +94,10 @@ static __init int sysfb_init(void)
name = "efi-framebuffer";
else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
name = "vesa-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_VGAC)
+ name = "vga-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_EGAC)
+ name = "ega-framebuffer";
else
name = "platform-framebuffer";
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 0c440afd5224..9d3874cdaaee 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -377,18 +377,11 @@ static ssize_t bpmp_debug_store(struct file *file, const char __user *buf,
if (!filename)
return -ENOENT;
- databuf = kmalloc(count, GFP_KERNEL);
- if (!databuf)
- return -ENOMEM;
-
- if (copy_from_user(databuf, buf, count)) {
- err = -EFAULT;
- goto free_ret;
- }
+ databuf = memdup_user(buf, count);
+ if (IS_ERR(databuf))
+ return PTR_ERR(databuf);
err = mrq_debug_write(bpmp, filename, databuf, count);
-
-free_ret:
kfree(databuf);
return err ?: count;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index d1f652802181..ff5cabe70a2b 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1312,6 +1312,37 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
+ * @node: SD node ID
+ * @config: The config type of SD registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
+
+/**
+ * zynqmp_pm_set_gem_config - PM call to set value of GEM config registers
+ * @node: GEM node ID
+ * @config: The config type of GEM registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
+
+/**
* struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
* @subtype: Shutdown subtype
* @name: Matching string for scope argument
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index fd1fa55c9113..0914e7328b1a 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -77,12 +77,18 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
#define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
#define PCIE_DEVICE_ID_SILICOM_PAC_N5010 0x1000
#define PCIE_DEVICE_ID_SILICOM_PAC_N5011 0x1001
+#define PCIE_DEVICE_ID_INTEL_DFL 0xbcce
+/* PCI Subdevice ID for PCIE_DEVICE_ID_INTEL_DFL */
+#define PCIE_SUBDEVICE_ID_INTEL_N6000 0x1770
+#define PCIE_SUBDEVICE_ID_INTEL_N6001 0x1771
+#define PCIE_SUBDEVICE_ID_INTEL_C6100 0x17d4
/* VF Device */
#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
+#define PCIE_DEVICE_ID_INTEL_DFL_VF 0xbccf
static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
@@ -96,6 +102,18 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5010),},
{PCI_DEVICE(PCI_VENDOR_ID_SILICOM_DENMARK, PCIE_DEVICE_ID_SILICOM_PAC_N5011),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6000),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_N6001),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_DFL_VF,
+ PCI_VENDOR_ID_INTEL, PCIE_SUBDEVICE_ID_INTEL_C6100),},
{0,}
};
MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 5498bc337f8b..b9aae85ba930 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1866,7 +1866,7 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
return -EINVAL;
fds = memdup_user((void __user *)(arg + sizeof(hdr)),
- hdr.count * sizeof(s32));
+ array_size(hdr.count, sizeof(s32)));
if (IS_ERR(fds))
return PTR_ERR(fds);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 72c677c910de..79d48852825e 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -148,10 +148,6 @@ static ssize_t flash_count_show(struct device *dev,
stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
- if (!flash_buf)
- return -ENOMEM;
-
if (FLASH_COUNT_SIZE % stride) {
dev_err(sec->dev,
"FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
@@ -160,6 +156,10 @@ static ssize_t flash_count_show(struct device *dev,
return -EINVAL;
}
+ flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
+ if (!flash_buf)
+ return -ENOMEM;
+
ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
flash_buf, FLASH_COUNT_SIZE / stride);
if (ret) {
@@ -605,6 +605,9 @@ static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
},
+ {
+ .name = "d5005bmc-sec-update",
+ },
{ }
};
MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids);
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index bd284c7b8dc9..7436976ea904 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -395,4 +395,5 @@ static struct spi_driver mpf_driver = {
module_spi_driver(mpf_driver);
MODULE_DESCRIPTION("Microchip Polarfire SPI FPGA Manager");
+MODULE_AUTHOR("Ivan Bornyakov <i.bornyakov@metrotek.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 3a7b78e36701..694e80c06665 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -392,8 +392,8 @@ int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
}
EXPORT_SYMBOL_GPL(fsi_slave_write);
-extern int fsi_slave_claim_range(struct fsi_slave *slave,
- uint32_t addr, uint32_t size)
+int fsi_slave_claim_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
{
if (addr + size < addr)
return -EINVAL;
@@ -406,8 +406,8 @@ extern int fsi_slave_claim_range(struct fsi_slave *slave,
}
EXPORT_SYMBOL_GPL(fsi_slave_claim_range);
-extern void fsi_slave_release_range(struct fsi_slave *slave,
- uint32_t addr, uint32_t size)
+void fsi_slave_release_range(struct fsi_slave *slave,
+ uint32_t addr, uint32_t size)
{
}
EXPORT_SYMBOL_GPL(fsi_slave_release_range);
@@ -1314,6 +1314,9 @@ int fsi_master_register(struct fsi_master *master)
mutex_init(&master->scan_lock);
master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+ if (master->idx < 0)
+ return master->idx;
+
dev_set_name(&master->dev, "fsi%d", master->idx);
master->dev.class = &fsi_master_class;
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 24292acdbaf8..5f608ef8b53c 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1324,12 +1324,14 @@ static int fsi_master_acf_probe(struct platform_device *pdev)
}
master->cvic = devm_of_iomap(&pdev->dev, np, 0, NULL);
if (IS_ERR(master->cvic)) {
+ of_node_put(np);
rc = PTR_ERR(master->cvic);
dev_err(&pdev->dev, "Error %d mapping CVIC\n", rc);
goto err_free;
}
rc = of_property_read_u32(np, "copro-sw-interrupts",
&master->cvic_sw_irq);
+ of_node_put(np);
if (rc) {
dev_err(&pdev->dev, "Can't find coprocessor SW interrupt\n");
goto err_free;
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index cd6bee5e12a7..4762315a46ba 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -51,7 +51,7 @@
#define FSI_MMODE_CRS1SHFT 8 /* Clk rate selection 1 shift */
#define FSI_MMODE_CRS1MASK 0x3ff /* Clk rate selection 1 mask */
-/* MRESB: Reset brindge */
+/* MRESB: Reset bridge */
#define FSI_MRESB_RST_GEN 0x80000000 /* General reset */
#define FSI_MRESB_RST_ERR 0x40000000 /* Error Reset */
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index c9cc75fbdfb9..abdd37d5507f 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -44,6 +44,7 @@ struct occ {
struct device *sbefifo;
char name[32];
int idx;
+ bool platform_hwmon;
u8 sequence_number;
void *buffer;
void *client_buffer;
@@ -94,6 +95,7 @@ static int occ_open(struct inode *inode, struct file *file)
client->occ = occ;
mutex_init(&client->lock);
file->private_data = client;
+ get_device(occ->dev);
/* We allocate a 1-page buffer, make sure it all fits */
BUILD_BUG_ON((OCC_CMD_DATA_BYTES + 3) > PAGE_SIZE);
@@ -197,6 +199,7 @@ static int occ_release(struct inode *inode, struct file *file)
{
struct occ_client *client = file->private_data;
+ put_device(client->occ->dev);
free_page((unsigned long)client->buffer);
kfree(client);
@@ -246,7 +249,7 @@ static int occ_verify_checksum(struct occ *occ, struct occ_response *resp,
if (checksum != checksum_resp) {
dev_err(occ->dev, "Bad checksum: %04x!=%04x\n", checksum,
checksum_resp);
- return -EBADMSG;
+ return -EBADE;
}
return 0;
@@ -493,12 +496,19 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
for (i = 1; i < req_len - 2; ++i)
checksum += byte_request[i];
- mutex_lock(&occ->occ_lock);
+ rc = mutex_lock_interruptible(&occ->occ_lock);
+ if (rc)
+ return rc;
occ->client_buffer = response;
occ->client_buffer_size = user_resp_len;
occ->client_response_size = 0;
+ if (!occ->buffer) {
+ rc = -ENOENT;
+ goto done;
+ }
+
/*
* Get a sequence number and update the counter. Avoid a sequence
* number of 0 which would pass the response check below even if the
@@ -575,8 +585,11 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
dev_dbg(dev, "resp_status=%02x resp_data_len=%d\n",
resp->return_status, resp_data_length);
- occ->client_response_size = resp_data_length + 7;
rc = occ_verify_checksum(occ, resp, resp_data_length);
+ if (rc)
+ goto done;
+
+ occ->client_response_size = resp_data_length + 7;
done:
*resp_len = occ->client_response_size;
@@ -586,7 +599,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
}
EXPORT_SYMBOL_GPL(fsi_occ_submit);
-static int occ_unregister_child(struct device *dev, void *data)
+static int occ_unregister_platform_child(struct device *dev, void *data)
{
struct platform_device *hwmon_dev = to_platform_device(dev);
@@ -595,12 +608,25 @@ static int occ_unregister_child(struct device *dev, void *data)
return 0;
}
+static int occ_unregister_of_child(struct device *dev, void *data)
+{
+ struct platform_device *hwmon_dev = to_platform_device(dev);
+
+ of_device_unregister(hwmon_dev);
+ if (dev->of_node)
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+
+ return 0;
+}
+
static int occ_probe(struct platform_device *pdev)
{
int rc;
u32 reg;
+ char child_name[32];
struct occ *occ;
- struct platform_device *hwmon_dev;
+ struct platform_device *hwmon_dev = NULL;
+ struct device_node *hwmon_node;
struct device *dev = &pdev->dev;
struct platform_device_info hwmon_dev_info = {
.parent = dev,
@@ -659,10 +685,20 @@ static int occ_probe(struct platform_device *pdev)
return rc;
}
- hwmon_dev_info.id = occ->idx;
- hwmon_dev = platform_device_register_full(&hwmon_dev_info);
- if (IS_ERR(hwmon_dev))
- dev_warn(dev, "failed to create hwmon device\n");
+ hwmon_node = of_get_child_by_name(dev->of_node, hwmon_dev_info.name);
+ if (hwmon_node) {
+ snprintf(child_name, sizeof(child_name), "%s.%d", hwmon_dev_info.name, occ->idx);
+ hwmon_dev = of_platform_device_create(hwmon_node, child_name, dev);
+ of_node_put(hwmon_node);
+ }
+
+ if (!hwmon_dev) {
+ occ->platform_hwmon = true;
+ hwmon_dev_info.id = occ->idx;
+ hwmon_dev = platform_device_register_full(&hwmon_dev_info);
+ if (IS_ERR(hwmon_dev))
+ dev_warn(dev, "failed to create hwmon device\n");
+ }
return 0;
}
@@ -671,11 +707,17 @@ static int occ_remove(struct platform_device *pdev)
{
struct occ *occ = platform_get_drvdata(pdev);
- kvfree(occ->buffer);
-
misc_deregister(&occ->mdev);
- device_for_each_child(&pdev->dev, NULL, occ_unregister_child);
+ mutex_lock(&occ->occ_lock);
+ kvfree(occ->buffer);
+ occ->buffer = NULL;
+ mutex_unlock(&occ->occ_lock);
+
+ if (occ->platform_hwmon)
+ device_for_each_child(&pdev->dev, NULL, occ_unregister_platform_child);
+ else
+ device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
ida_simple_remove(&occ_ida, occ->idx);
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index f52a912cdf16..5f93a53846aa 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -477,7 +477,8 @@ static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
if (!ready) {
sysfs_notify(&sbefifo->dev.kobj, NULL, dev_attr_timeout.attr.name);
sbefifo->timed_out = true;
- dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
+ dev_err(dev, "%s FIFO Timeout (%u ms)! status=%08x\n",
+ up ? "UP" : "DOWN", jiffies_to_msecs(timeout), sts);
return -ETIMEDOUT;
}
dev_vdbg(dev, "End of wait status: %08x\n", sts);
@@ -497,8 +498,8 @@ static int sbefifo_send_command(struct sbefifo *sbefifo,
u32 status;
int rc;
- dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
- cmd_len, be32_to_cpu(command[1]));
+ dev_dbg(dev, "sending command (%zd words, cmd=%04x)\n",
+ cmd_len, be32_to_cpu(command[1]));
/* As long as there's something to send */
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
@@ -551,21 +552,23 @@ static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *respo
size_t len;
int rc;
- dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
+ dev_dbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
timeout = msecs_to_jiffies(sbefifo->timeout_start_rsp_ms);
for (;;) {
/* Grab FIFO status (this will handle parity errors) */
rc = sbefifo_wait(sbefifo, false, &status, timeout);
- if (rc < 0)
+ if (rc < 0) {
+ dev_dbg(dev, "timeout waiting (%u ms)\n", jiffies_to_msecs(timeout));
return rc;
+ }
timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
/* Decode status */
len = sbefifo_populated(status);
eot_set = sbefifo_eot_set(status);
- dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
+ dev_dbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
/* Go through the chunk */
while(len--) {
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index e6f94501cb28..1e82b7967570 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -217,7 +217,7 @@ static void gnss_device_release(struct device *dev)
kfree(gdev->write_buf);
kfifo_free(&gdev->read_fifo);
- ida_simple_remove(&gnss_minors, gdev->id);
+ ida_free(&gnss_minors, gdev->id);
kfree(gdev);
}
@@ -232,7 +232,7 @@ struct gnss_device *gnss_allocate_device(struct device *parent)
if (!gdev)
return NULL;
- id = ida_simple_get(&gnss_minors, 0, GNSS_MINORS, GFP_KERNEL);
+ id = ida_alloc_max(&gnss_minors, GNSS_MINORS - 1, GFP_KERNEL);
if (id < 0) {
kfree(gdev);
return NULL;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0642f579196f..ed9e71d6713e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -341,6 +341,10 @@ config GPIO_ICH
If unsure, say N.
+config GPIO_IMX_SCU
+ def_bool y
+ depends on IMX_SCU
+
config GPIO_IOP
tristate "Intel IOP GPIO"
depends on ARCH_IOP32X || COMPILE_TEST
@@ -874,10 +878,11 @@ config GPIO_104_IDI_48
module parameter.
config GPIO_F7188X
- tristate "F71869, F71869A, F71882FG, F71889F and F81866 GPIO support"
+ tristate "Fintek and Nuvoton Super-I/O GPIO support"
help
This option enables support for GPIOs found on Fintek Super-I/O
chips F71869, F71869A, F71882FG, F71889F and F81866.
+ As well as Nuvoton Super-I/O chip NCT6116D.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a0985d30f51b..b67e29d348cf 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_I8255) += gpio-i8255.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
+obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a41551870759..7b8829c8e423 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -34,7 +34,8 @@ module_param_hw_array(base, uint, ioport, &num_dio48e, 0);
MODULE_PARM_DESC(base, "ACCES 104-DIO-48E base addresses");
static unsigned int irq[MAX_NUM_DIO48E];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
#define DIO48E_NUM_PPI 2
@@ -164,6 +165,7 @@ static void dio48e_irq_mask(struct irq_data *data)
dio48egpio->irq_mask &= ~BIT(0);
else
dio48egpio->irq_mask &= ~BIT(1);
+ gpiochip_disable_irq(chip, offset);
if (!dio48egpio->irq_mask)
/* disable interrupts */
@@ -191,6 +193,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
+ gpiochip_enable_irq(chip, offset);
if (offset == 19)
dio48egpio->irq_mask |= BIT(0);
else
@@ -213,12 +216,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip dio48e_irqchip = {
+static const struct irq_chip dio48e_irqchip = {
.name = "104-dio-48e",
.irq_ack = dio48e_irq_ack,
.irq_mask = dio48e_irq_mask,
.irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type
+ .irq_set_type = dio48e_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
@@ -322,7 +327,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
girq = &dio48egpio->chip.irq;
- girq->chip = &dio48e_irqchip;
+ gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -358,7 +363,7 @@ static struct isa_driver dio48e_driver = {
.name = "104-dio-48e"
},
};
-module_isa_driver(dio48e_driver, num_dio48e);
+module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 40be76efeed7..c5e231fde1af 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -34,7 +34,8 @@ module_param_hw_array(base, uint, ioport, &num_idi_48, 0);
MODULE_PARM_DESC(base, "ACCES 104-IDI-48 base addresses");
static unsigned int irq[MAX_NUM_IDI_48];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
@@ -113,6 +114,7 @@ static void idi_48_irq_mask(struct irq_data *data)
spin_lock_irqsave(&idi48gpio->lock, flags);
idi48gpio->irq_mask[boundary] &= ~mask;
+ gpiochip_disable_irq(chip, offset);
/* Exit early if there are still input lines with IRQ unmasked */
if (idi48gpio->irq_mask[boundary])
@@ -140,6 +142,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
prev_irq_mask = idi48gpio->irq_mask[boundary];
+ gpiochip_enable_irq(chip, offset);
idi48gpio->irq_mask[boundary] |= mask;
/* Exit early if IRQ was already unmasked for this boundary */
@@ -164,12 +167,14 @@ static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idi_48_irqchip = {
+static const struct irq_chip idi_48_irqchip = {
.name = "104-idi-48",
.irq_ack = idi_48_irq_ack,
.irq_mask = idi_48_irq_mask,
.irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type
+ .irq_set_type = idi_48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
@@ -267,7 +272,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
girq = &idi48gpio->chip.irq;
- girq->chip = &idi_48_irqchip;
+ gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -300,7 +305,7 @@ static struct isa_driver idi_48_driver = {
.name = "104-idi-48"
},
};
-module_isa_driver(idi_48_driver, num_idi_48);
+module_isa_driver_with_irq(idi_48_driver, num_idi_48, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 65a5f581d981..718bd54e2a25 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -30,7 +30,8 @@ module_param_hw_array(base, uint, ioport, &num_idio_16, 0);
MODULE_PARM_DESC(base, "ACCES 104-IDIO-16 base addresses");
static unsigned int irq[MAX_NUM_IDIO_16];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
@@ -174,10 +175,11 @@ static void idio_16_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
unsigned long flags;
- idio16gpio->irq_mask &= ~mask;
+ idio16gpio->irq_mask &= ~BIT(offset);
+ gpiochip_disable_irq(chip, offset);
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -192,11 +194,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
- idio16gpio->irq_mask |= mask;
+ gpiochip_enable_irq(chip, offset);
+ idio16gpio->irq_mask |= BIT(offset);
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -217,12 +220,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_16_irqchip = {
+static const struct irq_chip idio_16_irqchip = {
.name = "104-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type
+ .irq_set_type = idio_16_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@@ -299,7 +304,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->out_state = 0xFFFF;
girq = &idio16gpio->chip.irq;
- girq->chip = &idio_16_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -333,7 +338,7 @@ static struct isa_driver idio_16_driver = {
},
};
-module_isa_driver(idio_16_driver, num_idio_16);
+module_isa_driver_with_irq(idio_16_driver, num_idio_16, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d49f12560cde..9b562dbbd733 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -409,14 +409,12 @@ static int adp5588_gpio_probe(struct i2c_client *client)
return 0;
}
-static int adp5588_gpio_remove(struct i2c_client *client)
+static void adp5588_gpio_remove(struct i2c_client *client)
{
struct adp5588_gpio *dev = i2c_get_clientdata(client);
if (dev->client->irq)
free_irq(dev->client->irq, dev);
-
- return 0;
}
static const struct i2c_device_id adp5588_gpio_id[] = {
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index d37de78247a6..482f678c893e 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -21,6 +21,12 @@
#define EXAR_OFFSET_MPIOLVL_HI 0x96
#define EXAR_OFFSET_MPIOSEL_HI 0x99
+/*
+ * The Device Configuration and UART Configuration Registers
+ * for each UART channel take 1KB of memory address space.
+ */
+#define EXAR_UART_CHANNEL_SIZE 0x400
+
#define DRIVER_NAME "gpio_exar"
static DEFINE_IDA(ida_index);
@@ -31,26 +37,39 @@ struct exar_gpio_chip {
int index;
char name[20];
unsigned int first_pin;
+ /*
+ * The offset to the cascaded device's (if existing)
+ * Device Configuration Registers.
+ */
+ unsigned int cascaded_offset;
};
static unsigned int
exar_offset_to_sel_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOSEL_HI
- : EXAR_OFFSET_MPIOSEL_LO;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+ unsigned int cascaded = offset / 16;
+ unsigned int addr = pin / 8 ? EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
+
+ return addr + (cascaded ? exar_gpio->cascaded_offset : 0);
}
static unsigned int
exar_offset_to_lvl_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOLVL_HI
- : EXAR_OFFSET_MPIOLVL_LO;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+ unsigned int cascaded = offset / 16;
+ unsigned int addr = pin / 8 ? EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
+
+ return addr + (cascaded ? exar_gpio->cascaded_offset : 0);
}
static unsigned int
exar_offset_to_bit(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- return (offset + exar_gpio->first_pin) % 8;
+ unsigned int pin = exar_gpio->first_pin + (offset % 16);
+
+ return pin % 8;
}
static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -154,6 +173,17 @@ static int gpio_exar_probe(struct platform_device *pdev)
return -ENOMEM;
/*
+ * If cascaded, secondary xr17v354 or xr17v358 have the same amount
+ * of MPIOs as their primaries and the last 4 bits of the primary's
+ * PCI Device ID is the number of its UART channels.
+ */
+ if (pcidev->device & GENMASK(15, 12)) {
+ ngpios += ngpios;
+ exar_gpio->cascaded_offset = (pcidev->device & GENMASK(3, 0)) *
+ EXAR_UART_CHANNEL_SIZE;
+ }
+
+ /*
* We don't need to check the return values of mmio regmap operations (unless
* the regmap has a clock attached which is not the case here).
*/
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 18a3147f5a42..9effa7769bef 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -1,12 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882, F71889 and F81866
+ * GPIO driver for Fintek and Nuvoton Super-I/O chips
*
* Copyright (C) 2010-2013 LaCie
*
* Author: Simon Guinot <simon.guinot@sequanux.org>
*/
+#define DRVNAME "gpio-f7188x"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -14,30 +17,41 @@
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
-#define DRVNAME "gpio-f7188x"
-
/*
* Super-I/O registers
*/
#define SIO_LDSEL 0x07 /* Logical device select */
#define SIO_DEVID 0x20 /* Device ID (2 bytes) */
-#define SIO_DEVREV 0x22 /* Device revision */
-#define SIO_MANID 0x23 /* Fintek ID (2 bytes) */
-#define SIO_LD_GPIO 0x06 /* GPIO logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
-#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
+/*
+ * Fintek devices.
+ */
+#define SIO_FINTEK_DEVREV 0x22 /* Fintek Device revision */
+#define SIO_FINTEK_MANID 0x23 /* Fintek ID (2 bytes) */
+
+#define SIO_FINTEK_ID 0x1934 /* Manufacturer ID */
+
#define SIO_F71869_ID 0x0814 /* F71869 chipset ID */
#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
-#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */
+#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for F81966 */
#define SIO_F81865_ID 0x0704 /* F81865 chipset ID */
+#define SIO_LD_GPIO_FINTEK 0x06 /* GPIO logical device */
+
+/*
+ * Nuvoton devices.
+ */
+#define SIO_NCT6116D_ID 0xD283 /* NCT6116D chipset ID */
+
+#define SIO_LD_GPIO_NUVOTON 0x07 /* GPIO logical device */
+
enum chips {
f71869,
@@ -48,6 +62,7 @@ enum chips {
f81866,
f81804,
f81865,
+ nct6116d,
};
static const char * const f7188x_names[] = {
@@ -59,10 +74,12 @@ static const char * const f7188x_names[] = {
"f81866",
"f81804",
"f81865",
+ "nct6116d",
};
struct f7188x_sio {
int addr;
+ int device;
enum chips type;
};
@@ -110,7 +127,7 @@ static inline int superio_enter(int base)
{
/* Don't step on other drivers' I/O space by accident. */
if (!request_muxed_region(base, 2, DRVNAME)) {
- pr_err(DRVNAME "I/O address 0x%04x already in use\n", base);
+ pr_err("I/O address 0x%04x already in use\n", base);
return -EBUSY;
}
@@ -146,10 +163,10 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
unsigned long config);
-#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
+#define F7188X_GPIO_BANK(_base, _ngpio, _regbase, _label) \
{ \
.chip = { \
- .label = DRVNAME, \
+ .label = _label, \
.owner = THIS_MODULE, \
.get_direction = f7188x_gpio_get_direction, \
.direction_input = f7188x_gpio_direction_in, \
@@ -164,94 +181,108 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.regbase = _regbase, \
}
-#define gpio_dir(base) (base + 0)
-#define gpio_data_out(base) (base + 1)
-#define gpio_data_in(base) (base + 2)
+#define f7188x_gpio_dir(base) ((base) + 0)
+#define f7188x_gpio_data_out(base) ((base) + 1)
+#define f7188x_gpio_data_in(base) ((base) + 2)
/* Output mode register (0:open drain 1:push-pull). */
-#define gpio_out_mode(base) (base + 3)
+#define f7188x_gpio_out_mode(base) ((base) + 3)
+
+#define f7188x_gpio_dir_invert(type) ((type) == nct6116d)
+#define f7188x_gpio_data_single(type) ((type) == nct6116d)
static struct f7188x_gpio_bank f71869_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 6, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 6, 0x90),
+ F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 6, 0x90, DRVNAME "-6"),
};
static struct f7188x_gpio_bank f71869a_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 6, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f71882_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 4, 0xC0),
- F7188X_GPIO_BANK(40, 4, 0xB0),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 4, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 4, 0xB0, DRVNAME "-4"),
};
static struct f7188x_gpio_bank f71889a_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 7, 0xF0),
- F7188X_GPIO_BANK(10, 7, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f71889_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 7, 0xF0),
- F7188X_GPIO_BANK(10, 7, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 5, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(0, 7, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 7, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 5, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
};
static struct f7188x_gpio_bank f81866_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
- F7188X_GPIO_BANK(80, 8, 0x88),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-7"),
+ F7188X_GPIO_BANK(80, 8, 0x88, DRVNAME "-8"),
};
static struct f7188x_gpio_bank f81804_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 8, 0x90),
- F7188X_GPIO_BANK(70, 8, 0x80),
- F7188X_GPIO_BANK(90, 8, 0x98),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(60, 8, 0x90, DRVNAME "-4"),
+ F7188X_GPIO_BANK(70, 8, 0x80, DRVNAME "-5"),
+ F7188X_GPIO_BANK(90, 8, 0x98, DRVNAME "-6"),
};
static struct f7188x_gpio_bank f81865_gpio_bank[] = {
- F7188X_GPIO_BANK(0, 8, 0xF0),
- F7188X_GPIO_BANK(10, 8, 0xE0),
- F7188X_GPIO_BANK(20, 8, 0xD0),
- F7188X_GPIO_BANK(30, 8, 0xC0),
- F7188X_GPIO_BANK(40, 8, 0xB0),
- F7188X_GPIO_BANK(50, 8, 0xA0),
- F7188X_GPIO_BANK(60, 5, 0x90),
+ F7188X_GPIO_BANK(0, 8, 0xF0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE0, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xD0, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xC0, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xB0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xA0, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"),
+};
+
+static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"),
+ F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"),
+ F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"),
+ F7188X_GPIO_BANK(30, 8, 0xEC, DRVNAME "-3"),
+ F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"),
+ F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"),
+ F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"),
+ F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"),
};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -264,13 +295,16 @@ static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
superio_exit(sio->addr);
- if (dir & 1 << offset)
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir = ~dir;
+
+ if (dir & BIT(offset))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -286,11 +320,15 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir &= ~BIT(offset);
- superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
+
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir |= BIT(offset);
+ else
+ dir &= ~BIT(offset);
+ superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -307,14 +345,14 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset)
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
dir = !!(dir & BIT(offset));
- if (dir)
- data = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ if (f7188x_gpio_data_single(sio->type) || dir)
+ data = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
else
- data = superio_inb(sio->addr, gpio_data_in(bank->regbase));
+ data = superio_inb(sio->addr, f7188x_gpio_data_in(bank->regbase));
superio_exit(sio->addr);
@@ -332,18 +370,21 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
if (value)
data_out |= BIT(offset);
else
data_out &= ~BIT(offset);
- superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
+ superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
- dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir |= BIT(offset);
- superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
+ dir = superio_inb(sio->addr, f7188x_gpio_dir(bank->regbase));
+ if (f7188x_gpio_dir_invert(sio->type))
+ dir &= ~BIT(offset);
+ else
+ dir |= BIT(offset);
+ superio_outb(sio->addr, f7188x_gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -360,14 +401,14 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
err = superio_enter(sio->addr);
if (err)
return;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
+ data_out = superio_inb(sio->addr, f7188x_gpio_data_out(bank->regbase));
if (value)
data_out |= BIT(offset);
else
data_out &= ~BIT(offset);
- superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
+ superio_outb(sio->addr, f7188x_gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
}
@@ -388,14 +429,14 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
err = superio_enter(sio->addr);
if (err)
return err;
- superio_select(sio->addr, SIO_LD_GPIO);
+ superio_select(sio->addr, sio->device);
- data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
+ data = superio_inb(sio->addr, f7188x_gpio_out_mode(bank->regbase));
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
data &= ~BIT(offset);
else
data |= BIT(offset);
- superio_outb(sio->addr, gpio_out_mode(bank->regbase), data);
+ superio_outb(sio->addr, f7188x_gpio_out_mode(bank->regbase), data);
superio_exit(sio->addr);
return 0;
@@ -449,6 +490,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
data->bank = f81865_gpio_bank;
break;
+ case nct6116d:
+ data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank);
+ data->bank = nct6116d_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -479,18 +524,15 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
{
int err;
u16 devid;
+ u16 manid;
err = superio_enter(addr);
if (err)
return err;
err = -ENODEV;
- devid = superio_inw(addr, SIO_MANID);
- if (devid != SIO_FINTEK_ID) {
- pr_debug(DRVNAME ": Not a Fintek device at 0x%08x\n", addr);
- goto err;
- }
+ sio->device = SIO_LD_GPIO_FINTEK;
devid = superio_inw(addr, SIO_DEVID);
switch (devid) {
case SIO_F71869_ID:
@@ -517,17 +559,30 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81865_ID:
sio->type = f81865;
break;
+ case SIO_NCT6116D_ID:
+ sio->device = SIO_LD_GPIO_NUVOTON;
+ sio->type = nct6116d;
+ break;
default:
- pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
+ pr_info("Unsupported Fintek device 0x%04x\n", devid);
goto err;
}
+
+ /* double check manufacturer where possible */
+ if (sio->type != nct6116d) {
+ manid = superio_inw(addr, SIO_FINTEK_MANID);
+ if (manid != SIO_FINTEK_ID) {
+ pr_debug("Not a Fintek device at 0x%08x\n", addr);
+ goto err;
+ }
+ }
+
sio->addr = addr;
err = 0;
- pr_info(DRVNAME ": Found %s at %#x, revision %d\n",
- f7188x_names[sio->type],
- (unsigned int) addr,
- (int) superio_inb(addr, SIO_DEVREV));
+ pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr);
+ if (sio->type != nct6116d)
+ pr_info(" revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV));
err:
superio_exit(addr);
@@ -548,13 +603,13 @@ f7188x_gpio_device_add(const struct f7188x_sio *sio)
err = platform_device_add_data(f7188x_gpio_pdev,
sio, sizeof(*sio));
if (err) {
- pr_err(DRVNAME "Platform data allocation failed\n");
+ pr_err("Platform data allocation failed\n");
goto err;
}
err = platform_device_add(f7188x_gpio_pdev);
if (err) {
- pr_err(DRVNAME "Device addition failed\n");
+ pr_err("Device addition failed\n");
goto err;
}
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index f422c3e129a0..f77a965f5780 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -41,14 +41,12 @@
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
* @gc: gpiochip for this instance
- * @irq: irqchip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
struct gpio_chip gc;
- struct irq_chip irq;
void __iomem *base;
struct clk *clk;
};
@@ -70,6 +68,7 @@ static void ftgpio_gpio_mask_irq(struct irq_data *d)
val = readl(g->base + GPIO_INT_EN);
val &= ~BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
+ gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
static void ftgpio_gpio_unmask_irq(struct irq_data *d)
@@ -78,6 +77,7 @@ static void ftgpio_gpio_unmask_irq(struct irq_data *d)
struct ftgpio_gpio *g = gpiochip_get_data(gc);
u32 val;
+ gpiochip_enable_irq(gc, irqd_to_hwirq(d));
val = readl(g->base + GPIO_INT_EN);
val |= BIT(irqd_to_hwirq(d));
writel(val, g->base + GPIO_INT_EN);
@@ -221,6 +221,16 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
return 0;
}
+static const struct irq_chip ftgpio_irq_chip = {
+ .name = "FTGPIO010",
+ .irq_ack = ftgpio_gpio_ack_irq,
+ .irq_mask = ftgpio_gpio_mask_irq,
+ .irq_unmask = ftgpio_gpio_unmask_irq,
+ .irq_set_type = ftgpio_gpio_set_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -277,14 +287,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
if (!IS_ERR(g->clk))
g->gc.set_config = ftgpio_gpio_set_config;
- g->irq.name = "FTGPIO010";
- g->irq.irq_ack = ftgpio_gpio_ack_irq;
- g->irq.irq_mask = ftgpio_gpio_mask_irq;
- g->irq.irq_unmask = ftgpio_gpio_unmask_irq;
- g->irq.irq_set_type = ftgpio_gpio_set_irq_type;
-
girq = &g->gc.irq;
- girq->chip = &g->irq;
+ gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
new file mode 100644
index 000000000000..17be21b8f3b7
--- /dev/null
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021~2022 NXP
+ *
+ * The driver exports a standard gpiochip interface
+ * to control the PIN resources on SCU domain.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/imx/svc/rm.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+struct scu_gpio_priv {
+ struct gpio_chip chip;
+ struct mutex lock;
+ struct device *dev;
+ struct imx_sc_ipc *handle;
+};
+
+static unsigned int scu_rsrc_arr[] = {
+ IMX_SC_R_BOARD_R0,
+ IMX_SC_R_BOARD_R1,
+ IMX_SC_R_BOARD_R2,
+ IMX_SC_R_BOARD_R3,
+ IMX_SC_R_BOARD_R4,
+ IMX_SC_R_BOARD_R5,
+ IMX_SC_R_BOARD_R6,
+ IMX_SC_R_BOARD_R7,
+};
+
+static int imx_scu_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct scu_gpio_priv *priv = gpiochip_get_data(chip);
+ int level;
+ int err;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ /* to read PIN state via scu api */
+ err = imx_sc_misc_get_control(priv->handle,
+ scu_rsrc_arr[offset], 0, &level);
+ mutex_unlock(&priv->lock);
+
+ if (err) {
+ dev_err(priv->dev, "SCU get failed: %d\n", err);
+ return err;
+ }
+
+ return level;
+}
+
+static void imx_scu_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct scu_gpio_priv *priv = gpiochip_get_data(chip);
+ int err;
+
+ if (offset >= chip->ngpio)
+ return;
+
+ mutex_lock(&priv->lock);
+
+ /* to set PIN output level via scu api */
+ err = imx_sc_misc_set_control(priv->handle,
+ scu_rsrc_arr[offset], 0, value);
+ mutex_unlock(&priv->lock);
+
+ if (err)
+ dev_err(priv->dev, "SCU set (%d) failed: %d\n",
+ scu_rsrc_arr[offset], err);
+}
+
+static int imx_scu_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int imx_scu_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct scu_gpio_priv *priv;
+ struct gpio_chip *gc;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = imx_scu_get_handle(&priv->handle);
+ if (ret)
+ return ret;
+
+ priv->dev = dev;
+ mutex_init(&priv->lock);
+
+ gc = &priv->chip;
+ gc->base = -1;
+ gc->parent = dev;
+ gc->ngpio = sizeof(scu_rsrc_arr)/sizeof(unsigned int);
+ gc->label = dev_name(dev);
+ gc->get = imx_scu_gpio_get;
+ gc->set = imx_scu_gpio_set;
+ gc->get_direction = imx_scu_gpio_get_direction;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+
+static const struct of_device_id imx_scu_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-sc-gpio" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver imx_scu_gpio_driver = {
+ .driver = {
+ .name = "gpio-imx-scu",
+ .of_match_table = imx_scu_gpio_dt_ids,
+ },
+ .probe = imx_scu_gpio_probe,
+};
+
+static int __init _imx_scu_gpio_init(void)
+{
+ return platform_driver_register(&imx_scu_gpio_driver);
+}
+
+subsys_initcall_sync(_imx_scu_gpio_init);
+
+MODULE_AUTHOR("Shenwei Wang <shenwei.wang@nxp.com>");
+MODULE_DESCRIPTION("NXP GPIO over IMX SCU API");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 312309be0287..56656fb519f8 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
__raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
}
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
if (!(g->irq_edge & BIT(d->hwirq)))
ixp4xx_gpio_irq_ack(d);
+ gpiochip_enable_irq(gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
.name = "IXP4GPIO",
.irq_ack = ixp4xx_gpio_irq_ack,
- .irq_mask = irq_chip_mask_parent,
+ .irq_mask = ixp4xx_gpio_mask_irq,
.irq_unmask = ixp4xx_gpio_irq_unmask,
.irq_set_type = ixp4xx_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->gc.owner = THIS_MODULE;
girq = &g->gc.irq;
- girq->chip = &ixp4xx_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = g->fwnode;
girq->parent_domain = parent;
girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
diff --git a/drivers/gpio/gpio-max7300.c b/drivers/gpio/gpio-max7300.c
index b2b547dd6e84..43da381a4d7e 100644
--- a/drivers/gpio/gpio-max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -48,11 +48,9 @@ static int max7300_probe(struct i2c_client *client,
return __max730x_probe(ts);
}
-static int max7300_remove(struct i2c_client *client)
+static void max7300_remove(struct i2c_client *client)
{
__max730x_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id max7300_id[] = {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8943cea92764..523dfd17dd92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
}
}
+static void gpio_mockup_debugfs_cleanup(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+
+ debugfs_remove_recursive(chip->dbg_dir);
+}
+
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gpio_mockup_debugfs_setup(dev, chip);
- return 0;
+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static const struct of_device_id gpio_mockup_of_match[] = {
@@ -526,8 +533,10 @@ static int __init gpio_mockup_register_chip(int idx)
}
fwnode = fwnode_create_software_node(properties, NULL);
- if (IS_ERR(fwnode))
+ if (IS_ERR(fwnode)) {
+ kfree_strarray(line_names, ngpio);
return PTR_ERR(fwnode);
+ }
pdevinfo.name = "gpio-mockup";
pdevinfo.id = idx;
@@ -590,9 +599,9 @@ static int __init gpio_mockup_init(void)
static void __exit gpio_mockup_exit(void)
{
+ gpio_mockup_unregister_pdevs();
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- gpio_mockup_unregister_pdevs();
}
module_init(gpio_mockup_init);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 15049822937a..3eb08cd1fdc0 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d8a26e503ca5..93facbebb80e 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -112,6 +111,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
u32 rise, fall, high, low;
+ gpiochip_enable_irq(gc, d->hwirq);
+
spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +144,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
spin_unlock_irqrestore(&rg->lock, flags);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static int
@@ -204,6 +207,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
return gpio % MTK_BANK_WIDTH;
}
+static const struct irq_chip mt7621_irq_chip = {
+ .name = "mt7621-gpio",
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_set_type = mediatek_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
@@ -238,11 +251,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return -ENOMEM;
rg->chip.offset = bank * MTK_BANK_WIDTH;
- rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -262,7 +270,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
}
girq = &rg->chip.irq;
- girq->chip = &rg->irq_chip;
+ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -290,7 +298,6 @@ static int
mediatek_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct mtk *mtk;
int i;
int ret;
@@ -303,7 +310,10 @@ mediatek_gpio_probe(struct platform_device *pdev)
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
- mtk->gpio_irq = irq_of_parse_and_map(np, 0);
+ mtk->gpio_irq = platform_get_irq(pdev, 0);
+ if (mtk->gpio_irq < 0)
+ return mtk->gpio_irq;
+
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index aa126ab80f0c..1bb317b8dcce 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -790,8 +790,12 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
u32 offset;
u32 set;
- if (of_device_is_compatible(mvchip->chip.of_node,
- "marvell,armada-370-gpio")) {
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
+ int ret = of_property_read_u32(dev->of_node,
+ "marvell,pwm-offset", &offset);
+ if (ret < 0)
+ return 0;
+ } else {
/*
* There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves
@@ -801,13 +805,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
return 0;
offset = 0;
- } else if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K) {
- int ret = of_property_read_u32(dev->of_node,
- "marvell,pwm-offset", &offset);
- if (ret < 0)
- return 0;
- } else {
- return 0;
}
if (IS_ERR(mvchip->clk))
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index ecd7d169470b..ebe1943b85dd 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -66,6 +66,7 @@
#define PCA_LATCH_INT (PCA_PCAL | PCA_INT)
#define PCA953X_TYPE BIT(12)
#define PCA957X_TYPE BIT(13)
+#define PCAL653X_TYPE BIT(14)
#define PCA_TYPE_MASK GENMASK(15, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -89,8 +90,10 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca9575", 16 | PCA957X_TYPE | PCA_INT, },
{ "pca9698", 40 | PCA953X_TYPE, },
+ { "pcal6408", 8 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal6416", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal6524", 24 | PCA953X_TYPE | PCA_LATCH_INT, },
+ { "pcal6534", 34 | PCAL653X_TYPE | PCA_LATCH_INT, },
{ "pcal9535", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9554b", 8 | PCA953X_TYPE | PCA_LATCH_INT, },
{ "pcal9555a", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
@@ -211,6 +214,10 @@ struct pca953x_chip {
struct regulator *regulator;
const struct pca953x_reg_config *regs;
+
+ u8 (*recalc_addr)(struct pca953x_chip *chip, int reg, int off);
+ bool (*check_reg)(struct pca953x_chip *chip, unsigned int reg,
+ u32 checkbank);
};
static int pca953x_bank_shift(struct pca953x_chip *chip)
@@ -288,18 +295,67 @@ static bool pca953x_check_register(struct pca953x_chip *chip, unsigned int reg,
return true;
}
+/*
+ * Unfortunately, whilst the PCAL6534 chip (and compatibles) broadly follow the
+ * same register layout as the PCAL6524, the spacing of the registers has been
+ * fundamentally altered by compacting them and thus does not obey the same
+ * rules, including being able to use bit shifting to determine bank. These
+ * chips hence need special handling here.
+ */
+static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
+ u32 checkbank)
+{
+ int bank;
+ int offset;
+
+ if (reg >= 0x30) {
+ /*
+ * Reserved block between 14h and 2Fh does not align on
+ * expected bank boundaries like other devices.
+ */
+ int temp = reg - 0x30;
+
+ bank = temp / NBANK(chip);
+ offset = temp - (bank * NBANK(chip));
+ bank += 8;
+ } else if (reg >= 0x54) {
+ /*
+ * Handle lack of reserved registers after output port
+ * configuration register to form a bank.
+ */
+ int temp = reg - 0x54;
+
+ bank = temp / NBANK(chip);
+ offset = temp - (bank * NBANK(chip));
+ bank += 16;
+ } else {
+ bank = reg / NBANK(chip);
+ offset = reg - (bank * NBANK(chip));
+ }
+
+ /* Register is not in the matching bank. */
+ if (!(BIT(bank) & checkbank))
+ return false;
+
+ /* Register is not within allowed range of bank. */
+ if (offset >= NBANK(chip))
+ return false;
+
+ return true;
+}
+
static bool pca953x_readable_register(struct device *dev, unsigned int reg)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
- PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT |
PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG |
PCA957x_BANK_BUSHOLD;
+ } else {
+ bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
+ PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
}
if (chip->driver_data & PCA_PCAL) {
@@ -308,7 +364,7 @@ static bool pca953x_readable_register(struct device *dev, unsigned int reg)
PCAL9xxx_BANK_IRQ_STAT;
}
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
@@ -316,19 +372,19 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
- PCA953x_BANK_CONFIG;
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY |
PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD;
+ } else {
+ bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
+ PCA953x_BANK_CONFIG;
}
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_PULL_EN |
PCAL9xxx_BANK_PULL_SEL | PCAL9xxx_BANK_IRQ_MASK;
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
@@ -336,15 +392,15 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE)
- bank = PCA953x_BANK_INPUT;
- else
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
bank = PCA957x_BANK_INPUT;
+ else
+ bank = PCA953x_BANK_INPUT;
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IRQ_STAT;
- return pca953x_check_register(chip, reg, bank);
+ return chip->check_reg(chip, reg, bank);
}
static const struct regmap_config pca953x_i2c_regmap = {
@@ -389,9 +445,42 @@ static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off)
return regaddr;
}
+/*
+ * The PCAL6534 and compatible chips have altered bank alignment that doesn't
+ * fit within the bit shifting scheme used for other devices.
+ */
+static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
+{
+ int addr;
+ int pinctrl;
+
+ addr = (reg & PCAL_GPIO_MASK) * NBANK(chip);
+
+ switch (reg) {
+ case PCAL953X_OUT_STRENGTH:
+ case PCAL953X_IN_LATCH:
+ case PCAL953X_PULL_EN:
+ case PCAL953X_PULL_SEL:
+ case PCAL953X_INT_MASK:
+ case PCAL953X_INT_STAT:
+ case PCAL953X_OUT_CONF:
+ pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x20;
+ break;
+ case PCAL6524_INT_EDGE:
+ case PCAL6524_INT_CLR:
+ case PCAL6524_IN_STATUS:
+ case PCAL6524_OUT_INDCONF:
+ case PCAL6524_DEBOUNCE:
+ pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
+ break;
+ }
+
+ return pinctrl + addr + (off / BANK_SZ);
+}
+
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
+ u8 regaddr = chip->recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -409,7 +498,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
+ u8 regaddr = chip->recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -428,7 +517,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -442,8 +531,8 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
+ u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -463,7 +552,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off);
+ u8 inreg = chip->recalc_addr(chip, chip->regs->input, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -480,7 +569,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
+ u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
@@ -491,7 +580,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -548,8 +637,10 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned int offset,
unsigned long config)
{
- u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset);
- u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset);
+ enum pin_config_param param = pinconf_to_config_param(config);
+
+ u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset);
+ u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
int ret;
@@ -563,9 +654,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
mutex_lock(&chip->i2c_lock);
/* Configure pull-up/pull-down */
- if (config == PIN_CONFIG_BIAS_PULL_UP)
+ if (param == PIN_CONFIG_BIAS_PULL_UP)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
- else if (config == PIN_CONFIG_BIAS_PULL_DOWN)
+ else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
else
ret = 0;
@@ -573,7 +664,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
goto exit;
/* Disable/Enable pull-up/pull-down */
- if (config == PIN_CONFIG_BIAS_DISABLE)
+ if (param == PIN_CONFIG_BIAS_DISABLE)
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
else
ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
@@ -912,13 +1003,13 @@ static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
u8 regaddr;
int ret;
- regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
goto out;
- regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret)
@@ -1037,6 +1128,14 @@ static int pca953x_probe(struct i2c_client *client,
regmap_config = &pca953x_i2c_regmap;
}
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) {
+ chip->recalc_addr = pcal6534_recalc_addr;
+ chip->check_reg = pcal6534_check_register;
+ } else {
+ chip->recalc_addr = pca953x_recalc_addr;
+ chip->check_reg = pca953x_check_register;
+ }
+
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
@@ -1068,13 +1167,12 @@ static int pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
-
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
- chip->regs = &pca953x_regs;
- ret = device_pca95xx_init(chip, invert);
- } else {
+ if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
chip->regs = &pca957x_regs;
ret = device_pca957x_init(chip, invert);
+ } else {
+ chip->regs = &pca953x_regs;
+ ret = device_pca95xx_init(chip, invert);
}
if (ret)
goto err_exit;
@@ -1101,24 +1199,17 @@ err_exit:
return ret;
}
-static int pca953x_remove(struct i2c_client *client)
+static void pca953x_remove(struct i2c_client *client)
{
struct pca953x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pca953x_chip *chip = i2c_get_clientdata(client);
- int ret;
if (pdata && pdata->teardown) {
- ret = pdata->teardown(client, chip->gpio_chip.base,
- chip->gpio_chip.ngpio, pdata->context);
- if (ret < 0)
- dev_err(&client->dev, "teardown failed, %d\n", ret);
- } else {
- ret = 0;
+ pdata->teardown(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
}
regulator_disable(chip->regulator);
-
- return ret;
}
#ifdef CONFIG_PM_SLEEP
@@ -1132,14 +1223,14 @@ static int pca953x_regcache_sync(struct device *dev)
* The ordering between direction and output is important,
* sync these registers first and only then sync the rest.
*/
- regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->direction, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
return ret;
}
- regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+ regaddr = chip->recalc_addr(chip, chip->regs->output, 0);
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
if (ret) {
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
@@ -1148,7 +1239,7 @@ static int pca953x_regcache_sync(struct device *dev)
#ifdef CONFIG_GPIO_PCA953X_IRQ
if (chip->driver_data & PCA_PCAL) {
- regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
+ regaddr = chip->recalc_addr(chip, PCAL953X_IN_LATCH, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) {
@@ -1157,7 +1248,7 @@ static int pca953x_regcache_sync(struct device *dev)
return ret;
}
- regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
+ regaddr = chip->recalc_addr(chip, PCAL953X_INT_MASK, 0);
ret = regcache_sync_region(chip->regmap, regaddr,
regaddr + NBANK(chip) - 1);
if (ret) {
@@ -1175,7 +1266,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1198,13 +1291,17 @@ static int pca953x_resume(struct device *dev)
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
@@ -1215,6 +1312,7 @@ static int pca953x_resume(struct device *dev)
#endif
/* convenience to stop overlong match-table lines */
+#define OF_653X(__nrgpio, __int) ((void *)(__nrgpio | PCAL653X_TYPE | __int))
#define OF_953X(__nrgpio, __int) (void *)(__nrgpio | PCA953X_TYPE | __int)
#define OF_957X(__nrgpio, __int) (void *)(__nrgpio | PCA957X_TYPE | __int)
@@ -1237,8 +1335,10 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
+ { .compatible = "nxp,pcal6408", .data = OF_953X(8, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
+ { .compatible = "nxp,pcal6534", .data = OF_653X(34, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9535", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9554b", .data = OF_953X( 8, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 59cc27e4de51..e98ea47d7237 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -399,7 +399,7 @@ fail:
return status;
}
-static int pcf857x_remove(struct i2c_client *client)
+static void pcf857x_remove(struct i2c_client *client)
{
struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pcf857x *gpio = i2c_get_clientdata(client);
@@ -407,8 +407,6 @@ static int pcf857x_remove(struct i2c_client *client)
if (pdata && pdata->teardown)
pdata->teardown(client, gpio->chip.base, gpio->chip.ngpio,
pdata->context);
-
- return 0;
}
static void pcf857x_shutdown(struct i2c_client *client)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index c7fbfa3ae43b..1198ab0305d0 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -661,24 +661,17 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
/* Initialize GPIO chips */
ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
- if (ret) {
- clk_put(clk);
+ if (ret)
return ret;
- }
/* clear all GPIO edge detects */
for_each_gpio_bank(gpio, c, pchip) {
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index 63dcf42f7c20..d6418f89d3f6 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -46,10 +46,20 @@
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
* @intr_type: Interrupt type selection
+ * @bank_read: Read a bank setting as a single 32-bit value
+ * @bank_write: Write a bank setting as a single 32-bit value
+ * @imr_line_pos: Bit shift of an IRQ line's IMR value.
+ *
+ * The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
+ * into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
+ * a value from (to) these registers. The IMR register consists of four 16-bit
+ * port values, packed into two 32-bit registers. Use @imr_line_pos to get the
+ * bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
+ * 32 overflow into the second register.
*
* Because the interrupt mask register (IMR) combines the function of IRQ type
* selection and masking, two extra values are stored. @intr_mask is used to
- * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
* the selected interrupt types. The logical AND of these values is written to
* IMR on changes.
*/
@@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
raw_spinlock_t lock;
- u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
- u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
- unsigned int (*port_offset_u8)(unsigned int port);
- unsigned int (*port_offset_u16)(unsigned int port);
+ u8 intr_mask[REALTEK_GPIO_MAX];
+ u8 intr_type[REALTEK_GPIO_MAX];
+ u32 (*bank_read)(void __iomem *reg);
+ void (*bank_write)(void __iomem *reg, u32 value);
+ unsigned int (*line_imr_pos)(unsigned int line);
};
/* Expand with more flags as devices with other quirks are added */
@@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
* port. The two interrupt mask registers store two bits per GPIO, so use u16
* values.
*/
-static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
+static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
{
- return port;
+ return ioread32be(reg);
}
-static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
+static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
{
- return 2 * port;
+ iowrite32be(value, reg);
+}
+
+static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
+{
+ unsigned int port_pin = line % 8;
+ unsigned int port = line / 8;
+
+ return 2 * (8 * (port ^ 1) + port_pin);
}
/*
@@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
* per GPIO, so use u16 values. The first register contains ports 1 and 0, the
* second ports 3 and 2.
*/
-static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
+static u32 realtek_gpio_bank_read(void __iomem *reg)
{
- return 3 - port;
+ return ioread32(reg);
}
-static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
+static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
{
- return 2 * (port ^ 1);
+ iowrite32(value, reg);
}
-static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u16 irq_type, u16 irq_mask)
+static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
{
- iowrite16(irq_type & irq_mask,
- ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
+ return 2 * line;
}
-static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u8 mask)
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
{
- iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
}
-static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
{
- return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
}
-/* Set the rising and falling edge mask bits for a GPIO port pin */
-static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+/* Set the rising and falling edge mask bits for a GPIO pin */
+static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
{
- return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+ void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
+ unsigned int line_shift = ctrl->line_imr_pos(line);
+ unsigned int shift = line_shift % 32;
+ u32 irq_type = ctrl->intr_type[line];
+ u32 irq_mask = ctrl->intr_mask[line];
+ u32 reg_val;
+
+ reg += 4 * (line_shift / 32);
+ reg_val = ioread32(reg);
+ reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
+ reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
+ iowrite32(reg_val, reg);
}
static void realtek_gpio_irq_ack(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
irq_hw_number_t line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
- realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+ realtek_gpio_clear_isr(ctrl, BIT(line));
}
static void realtek_gpio_irq_unmask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
gpiochip_enable_irq(&ctrl->gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = 0;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
gpiochip_disable_irq(&ctrl->gc, line);
@@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 type, t;
+ u8 type;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
irq_set_handler_locked(data, handle_edge_irq);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- t = ctrl->intr_type[port];
- t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- t |= realtek_gpio_imr_bits(port_pin, type);
- ctrl->intr_type[port] = t;
- realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ ctrl->intr_type[line] = type;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
@@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned int lines_done;
- unsigned int port_pin_count;
unsigned long status;
int offset;
chained_irq_enter(irq_chip, desc);
- for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
- status = realtek_gpio_read_isr(ctrl, lines_done / 8);
- port_pin_count = min(gc->ngpio - lines_done, 8U);
- for_each_set_bit(offset, &status, port_pin_count)
- generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
- }
+ status = realtek_gpio_read_isr(ctrl);
+ for_each_set_bit(offset, &status, gc->ngpio)
+ generic_handle_domain_irq(gc->irq.domain, offset);
chained_irq_exit(irq_chip, desc);
}
-static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, int cpu)
+static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
{
- return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
- REALTEK_GPIO_PORTS_PER_BANK * cpu;
+ return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
}
static int realtek_gpio_irq_set_affinity(struct irq_data *data,
@@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
void __iomem *irq_cpu_mask;
unsigned long flags;
int cpu;
- u8 v;
+ u32 v;
if (!ctrl->cpumask_base)
return -ENXIO;
@@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
raw_spin_lock_irqsave(&ctrl->lock, flags);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
- irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
- v = ioread8(irq_cpu_mask);
+ irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
+ v = ctrl->bank_read(irq_cpu_mask);
if (cpumask_test_cpu(cpu, dest))
- v |= BIT(port_pin);
+ v |= BIT(line);
else
- v &= ~BIT(port_pin);
+ v &= ~BIT(line);
- iowrite8(v, irq_cpu_mask);
+ ctrl->bank_write(irq_cpu_mask, v);
}
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
static int realtek_gpio_irq_init(struct gpio_chip *gc)
{
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned int port;
+ u32 mask_all = GENMASK(gc->ngpio - 1, 0);
+ unsigned int line;
int cpu;
- for (port = 0; (port * 8) < gc->ngpio; port++) {
- realtek_gpio_write_imr(ctrl, port, 0, 0);
- realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ for (line = 0; line < gc->ngpio; line++)
+ realtek_gpio_update_line_imr(ctrl, line);
- for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
- iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
- }
+ realtek_gpio_clear_isr(ctrl, mask_all);
+
+ for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
+ ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
return 0;
}
@@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
if (dev_flags & GPIO_PORTS_REVERSED) {
bgpio_flags = 0;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
+ ctrl->bank_read = realtek_gpio_bank_read;
+ ctrl->bank_write = realtek_gpio_bank_write;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
+ ctrl->bank_read = realtek_gpio_bank_read_swapped;
+ ctrl->bank_write = realtek_gpio_bank_write_swapped;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index f91e876fd969..6765477edb06 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -325,26 +325,15 @@ static void rockchip_irq_demux(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
- u32 pend;
+ unsigned long pending;
+ unsigned int irq;
dev_dbg(bank->dev, "got irq for bank %s\n", bank->name);
chained_irq_enter(chip, desc);
- pend = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
-
- while (pend) {
- unsigned int irq, virq;
-
- irq = __ffs(pend);
- pend &= ~BIT(irq);
- virq = irq_find_mapping(bank->domain, irq);
-
- if (!virq) {
- dev_err(bank->dev, "unmapped irq %d\n", irq);
- continue;
- }
-
+ pending = readl_relaxed(bank->reg_base + bank->gpio_regs->int_status);
+ for_each_set_bit(irq, &pending, 32) {
dev_dbg(bank->dev, "handling irq %d\n", irq);
/*
@@ -378,7 +367,7 @@ static void rockchip_irq_demux(struct irq_desc *desc)
} while ((data & BIT(irq)) != (data_old & BIT(irq)));
}
- generic_handle_irq(virq);
+ generic_handle_domain_irq(bank->domain, irq);
}
chained_irq_exit(chip, desc);
@@ -419,11 +408,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
goto out;
} else {
bank->toggle_edge_mode |= mask;
- level |= mask;
+ level &= ~mask;
/*
* Determine gpio state. If 1 next interrupt should be
- * falling otherwise rising.
+ * low otherwise high.
*/
data = readl(bank->reg_base + bank->gpio_regs->ext_port);
if (data & mask)
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 443fe975bf13..e62ee7e56908 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -230,6 +230,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
tc3589x_gpio->regs[REG_DIRECT][regoffset] |= mask;
+ gpiochip_disable_irq(gc, offset);
}
static void tc3589x_gpio_irq_unmask(struct irq_data *d)
@@ -240,17 +241,20 @@ static void tc3589x_gpio_irq_unmask(struct irq_data *d)
int regoffset = offset / 8;
int mask = BIT(offset % 8);
+ gpiochip_enable_irq(gc, offset);
tc3589x_gpio->regs[REG_IE][regoffset] |= mask;
tc3589x_gpio->regs[REG_DIRECT][regoffset] &= ~mask;
}
-static struct irq_chip tc3589x_gpio_irq_chip = {
+static const struct irq_chip tc3589x_gpio_irq_chip = {
.name = "tc3589x-gpio",
.irq_bus_lock = tc3589x_gpio_irq_lock,
.irq_bus_sync_unlock = tc3589x_gpio_irq_sync_unlock,
.irq_mask = tc3589x_gpio_irq_mask,
.irq_unmask = tc3589x_gpio_irq_unmask,
.irq_set_type = tc3589x_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
@@ -321,7 +325,7 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
tc3589x_gpio->chip.base = -1;
girq = &tc3589x_gpio->chip.irq;
- girq->chip = &tc3589x_gpio_irq_chip;
+ gpio_irq_chip_set_chip(girq, &tc3589x_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index a09b1e69b072..d642c35cb97c 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -126,13 +126,11 @@ static int tpic2810_probe(struct i2c_client *client,
return 0;
}
-static int tpic2810_remove(struct i2c_client *client)
+static void tpic2810_remove(struct i2c_client *client)
{
struct tpic2810 *gpio = i2c_get_clientdata(client);
gpiochip_remove(&gpio->chip);
-
- return 0;
}
static const struct i2c_device_id tpic2810_id_table[] = {
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index fa4bc7481f9a..e739dcea61b2 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -307,6 +307,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->init_valid_mask = tqmx86_init_irq_valid_mask;
+
+ irq_domain_set_pm_device(girq->domain, dev);
}
ret = devm_gpiochip_add_data(dev, chip, gpio);
@@ -315,8 +317,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
goto out_pm_dis;
}
- irq_domain_set_pm_device(girq->domain, dev);
-
dev_info(dev, "GPIO functionality initialized with %d pins\n",
chip->ngpio);
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 5046e51af8df..c1bb2c3ca6f2 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -465,8 +465,6 @@ static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
REG_GPIO_DEBEN1, 3);
}
-static int gpio_twl4030_remove(struct platform_device *pdev);
-
static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
struct twl4030_gpio_platform_data *pdata)
{
@@ -494,6 +492,18 @@ static struct twl4030_gpio_platform_data *of_gpio_twl4030(struct device *dev,
return omap_twl_info;
}
+/* Cannot use as gpio_twl4030_probe() calls us */
+static int gpio_twl4030_remove(struct platform_device *pdev)
+{
+ struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&priv->gpio_chip);
+
+ /* REVISIT no support yet for deregistering all the IRQs */
+ WARN_ON(!is_module());
+ return 0;
+}
+
static int gpio_twl4030_probe(struct platform_device *pdev)
{
struct twl4030_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -590,18 +600,6 @@ out:
return ret;
}
-/* Cannot use as gpio_twl4030_probe() calls us */
-static int gpio_twl4030_remove(struct platform_device *pdev)
-{
- struct gpio_twl4030_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->gpio_chip);
-
- /* REVISIT no support yet for deregistering all the IRQs */
- WARN_ON(!is_module());
- return 0;
-}
-
static const struct of_device_id twl_gpio_match[] = {
{ .compatible = "ti,twl4030-gpio", },
{ },
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 386e69300332..676adf1f198a 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/ucb1400.h>
+#include <linux/gpio/driver.h>
static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
{
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index b098f2dc196b..e73885a4dc32 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -27,7 +27,8 @@ module_param_hw_array(base, uint, ioport, &num_ws16c48, 0);
MODULE_PARM_DESC(base, "WinSystems WS16C48 base addresses");
static unsigned int irq[MAX_NUM_WS16C48];
-module_param_hw_array(irq, uint, irq, NULL, 0);
+static unsigned int num_irq;
+module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
@@ -265,6 +266,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ gpiochip_disable_irq(chip, offset);
port_state = ws16c48gpio->irq_mask >> (8 * port);
/* Select Register Page 2; Unlock all I/O ports */
@@ -295,6 +297,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ gpiochip_enable_irq(chip, offset);
ws16c48gpio->irq_mask |= mask;
port_state = ws16c48gpio->irq_mask >> (8 * port);
@@ -356,12 +359,14 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return 0;
}
-static struct irq_chip ws16c48_irqchip = {
+static const struct irq_chip ws16c48_irqchip = {
.name = "ws16c48",
.irq_ack = ws16c48_irq_ack,
.irq_mask = ws16c48_irq_mask,
.irq_unmask = ws16c48_irq_unmask,
- .irq_set_type = ws16c48_irq_set_type
+ .irq_set_type = ws16c48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
@@ -463,7 +468,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
girq = &ws16c48gpio->chip.irq;
- girq->chip = &ws16c48_irqchip;
+ gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -497,7 +502,7 @@ static struct isa_driver ws16c48_driver = {
},
};
-module_isa_driver(ws16c48_driver, num_ws16c48);
+module_isa_driver_with_irq(ws16c48_driver, num_ws16c48, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("WinSystems WS16C48 GPIO driver");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 9be1376f9a62..a7d2358736fe 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -32,9 +32,16 @@ MODULE_PARM_DESC(ignore_wake,
"controller@pin combos on which to ignore the ACPI wake flag "
"ignore_wake=controller@pin[,controller@pin[,...]]");
+static char *ignore_interrupt;
+module_param(ignore_interrupt, charp, 0444);
+MODULE_PARM_DESC(ignore_interrupt,
+ "controller@pin combos on which to ignore interrupt "
+ "ignore_interrupt=controller@pin[,controller@pin[,...]]");
+
struct acpi_gpiolib_dmi_quirk {
bool no_edge_events_on_boot;
char *ignore_wake;
+ char *ignore_interrupt;
};
/**
@@ -317,14 +324,15 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *ignore_list, const char *controller_in,
+ unsigned int pin_in)
{
const char *controller, *pin_str;
unsigned int pin;
char *endp;
int len;
- controller = ignore_wake;
+ controller = ignore_list;
while (controller) {
pin_str = strchr(controller, '@');
if (!pin_str)
@@ -348,7 +356,7 @@ static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin
return false;
err:
- pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_wake: %s\n", ignore_wake);
+ pr_err_once("Error: Invalid value for gpiolib_acpi.ignore_...: %s\n", ignore_list);
return false;
}
@@ -360,7 +368,7 @@ static bool acpi_gpio_irq_is_wake(struct device *parent,
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
- if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
+ if (acpi_gpio_in_ignore_list(ignore_wake, dev_name(parent), pin)) {
dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -427,6 +435,11 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
goto fail_unlock_irq;
}
+ if (acpi_gpio_in_ignore_list(ignore_interrupt, dev_name(chip->parent), pin)) {
+ dev_info(chip->parent, "Ignoring interrupt on pin %u\n", pin);
+ return AE_OK;
+ }
+
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
goto fail_unlock_irq;
@@ -741,6 +754,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.pin_config = agpio->pin_config;
lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
+ lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
/*
* Polarity and triggering are only specified for GpioInt
@@ -987,10 +1001,11 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
}
/**
- * acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
+ * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number
* @adev: pointer to a ACPI device to get IRQ from
* @name: optional name of GpioInt resource
* @index: index of GpioInt resource (starting from %0)
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* If the device has one or more GpioInt resources, this function can be
* used to translate from the GPIO offset in the resource to the Linux IRQ
@@ -1002,9 +1017,13 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function takes optional @name parameter. If the resource has a property
* name, then only those will be taken into account.
*
+ * The GPIO is considered wake capable if the GpioInt resource specifies
+ * SharedAndWake or ExclusiveAndWake.
+ *
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
-int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index,
+ bool *wake_capable)
{
int idx, i;
unsigned int irq_flags;
@@ -1061,13 +1080,16 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind
dev_dbg(&adev->dev, "IRQ %d already in use\n", irq);
}
+ if (wake_capable)
+ *wake_capable = info.wake_capable;
+
return irq;
}
}
return -ENOENT;
}
-EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
+EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_wake_get_by);
static acpi_status
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
@@ -1563,6 +1585,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "INT33FF:01@0",
},
},
+ {
+ /*
+ * Interrupt storm caused from edge triggered floating pin
+ * Found in BIOS UX325UAZ.300
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216208
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UAZ_UM325UAZ"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@18",
+ },
+ },
{} /* Terminating entry */
};
@@ -1585,6 +1621,9 @@ static int __init acpi_gpio_setup_params(void)
if (ignore_wake == NULL && quirk && quirk->ignore_wake)
ignore_wake = quirk->ignore_wake;
+ if (ignore_interrupt == NULL && quirk && quirk->ignore_interrupt)
+ ignore_interrupt = quirk->ignore_interrupt;
+
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index e476558d9471..1ac6816839db 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -18,6 +18,7 @@ struct acpi_device;
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @wake_capable: wake capability as provided by ACPI
* @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
@@ -28,6 +29,7 @@ struct acpi_gpio_info {
int pin_config;
int polarity;
int triggering;
+ bool wake_capable;
unsigned int debounce;
unsigned int quirks;
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index f8041d4898d1..0cb6b468f364 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -1497,6 +1497,21 @@ static int linereq_release(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
+{
+ struct linereq *lr = file->private_data;
+ struct device *dev = &lr->gdev->dev;
+ u16 i;
+
+ seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
+
+ for (i = 0; i < lr->num_lines; i++)
+ seq_printf(out, "gpio-line:\t%d\n",
+ gpio_chip_hwgpio(lr->lines[i].desc));
+}
+#endif
+
static const struct file_operations line_fileops = {
.release = linereq_release,
.read = linereq_read,
@@ -1507,6 +1522,9 @@ static const struct file_operations line_fileops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = linereq_ioctl_compat,
#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = linereq_show_fdinfo,
+#endif
};
static int linereq_create(struct gpio_device *gdev, void __user *ip)
@@ -1986,7 +2004,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
ret = -ENODEV;
goto out_free_le;
}
- le->irq = irq;
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
@@ -2000,7 +2017,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
init_waitqueue_head(&le->wait);
/* Request a thread to read the events */
- ret = request_threaded_irq(le->irq,
+ ret = request_threaded_irq(irq,
lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
@@ -2009,6 +2026,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
+ le->irq = irq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a037b50bef33..0e4e1291604d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -289,6 +289,36 @@ int of_get_named_gpio_flags(const struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+/* Converts gpio_lookup_flags into bitmask of GPIO_* values */
+static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
+{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (flags & OF_GPIO_SINGLE_ENDED) {
+ if (flags & OF_GPIO_OPEN_DRAIN)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (flags & OF_GPIO_TRANSITORY)
+ lflags |= GPIO_TRANSITORY;
+
+ if (flags & OF_GPIO_PULL_UP)
+ lflags |= GPIO_PULL_UP;
+
+ if (flags & OF_GPIO_PULL_DOWN)
+ lflags |= GPIO_PULL_DOWN;
+
+ if (flags & OF_GPIO_PULL_DISABLE)
+ lflags |= GPIO_PULL_DISABLE;
+
+ return lflags;
+}
+
/**
* gpiod_get_from_of_node() - obtain a GPIO from an OF node
* @node: handle of the OF node
@@ -308,26 +338,14 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
enum gpiod_flags dflags,
const char *label)
{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ unsigned long lflags;
struct gpio_desc *desc;
- enum of_gpio_flags flags;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
- bool transitory = false;
+ enum of_gpio_flags of_flags;
int ret;
- desc = of_get_named_gpiod_flags(node, propname,
- index, &flags);
-
- if (!desc || IS_ERR(desc)) {
+ desc = of_get_named_gpiod_flags(node, propname, index, &of_flags);
+ if (!desc || IS_ERR(desc))
return desc;
- }
-
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- transitory = flags & OF_GPIO_TRANSITORY;
ret = gpiod_request(desc, label);
if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
@@ -335,27 +353,7 @@ struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
- if (transitory)
- lflags |= GPIO_TRANSITORY;
-
- if (flags & OF_GPIO_PULL_UP)
- lflags |= GPIO_PULL_UP;
-
- if (flags & OF_GPIO_PULL_DOWN)
- lflags |= GPIO_PULL_DOWN;
-
- if (flags & OF_GPIO_PULL_DISABLE)
- lflags |= GPIO_PULL_DISABLE;
+ lflags = of_convert_gpio_flags(of_flags);
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
@@ -372,12 +370,12 @@ EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
* properties should be named "foo-gpios" so we have this special kludge for
* them.
*/
-static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *of_find_spi_gpio(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
char prop_name[32]; /* 32 is max size of property name */
- const struct device_node *np = dev->of_node;
- struct gpio_desc *desc;
/*
* Hopefully the compiler stubs the rest of the function if this
@@ -393,8 +391,7 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
/* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
- desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
- return desc;
+ return of_get_named_gpiod_flags(np, prop_name, idx, of_flags);
}
/*
@@ -402,13 +399,11 @@ static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id
* lines rather than "cs-gpios" like all other SPI hardware. Account for this
* with a special quirk.
*/
-static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
+static struct gpio_desc *of_find_spi_cs_gpio(struct device_node *np,
const char *con_id,
unsigned int idx,
- unsigned long *flags)
+ enum of_gpio_flags *of_flags)
{
- const struct device_node *np = dev->of_node;
-
if (!IS_ENABLED(CONFIG_SPI_MASTER))
return ERR_PTR(-ENOENT);
@@ -426,7 +421,7 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
* uses just "gpios" so translate to that when "cs-gpios" is
* requested.
*/
- return of_find_gpio(dev, NULL, idx, flags);
+ return of_get_named_gpiod_flags(np, "gpios", idx, of_flags);
}
/*
@@ -434,7 +429,9 @@ static struct gpio_desc *of_find_spi_cs_gpio(struct device *dev,
* properties should be named "foo-gpios" so we have this special kludge for
* them.
*/
-static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *of_find_regulator_gpio(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
/* These are the connection IDs we accept as legacy GPIO phandles */
@@ -443,8 +440,6 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
"wlf,ldo1ena", /* WM8994 */
"wlf,ldo2ena", /* WM8994 */
};
- const struct device_node *np = dev->of_node;
- struct gpio_desc *desc;
int i;
if (!IS_ENABLED(CONFIG_REGULATOR))
@@ -457,12 +452,12 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
if (i < 0)
return ERR_PTR(-ENOENT);
- desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
- return desc;
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
-static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
+static struct gpio_desc *of_find_arizona_gpio(struct device_node *np,
const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
if (!IS_ENABLED(CONFIG_MFD_ARIZONA))
@@ -471,17 +466,18 @@ static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
if (!con_id || strcmp(con_id, "wlf,reset"))
return ERR_PTR(-ENOENT);
- return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
-static struct gpio_desc *of_find_usb_gpio(struct device *dev,
+static struct gpio_desc *of_find_usb_gpio(struct device_node *np,
const char *con_id,
+ unsigned int idx,
enum of_gpio_flags *of_flags)
{
/*
- * Currently this USB quirk is only for the Fairchild FUSB302 host which is using
- * an undocumented DT GPIO line named "fcs,int_n" without the compulsory "-gpios"
- * suffix.
+ * Currently this USB quirk is only for the Fairchild FUSB302 host
+ * which is using an undocumented DT GPIO line named "fcs,int_n"
+ * without the compulsory "-gpios" suffix.
*/
if (!IS_ENABLED(CONFIG_TYPEC_FUSB302))
return ERR_PTR(-ENOENT);
@@ -489,14 +485,28 @@ static struct gpio_desc *of_find_usb_gpio(struct device *dev,
if (!con_id || strcmp(con_id, "fcs,int_n"))
return ERR_PTR(-ENOENT);
- return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+ return of_get_named_gpiod_flags(np, con_id, idx, of_flags);
}
+typedef struct gpio_desc *(*of_find_gpio_quirk)(struct device_node *np,
+ const char *con_id,
+ unsigned int idx,
+ enum of_gpio_flags *of_flags);
+static const of_find_gpio_quirk of_find_gpio_quirks[] = {
+ of_find_spi_gpio,
+ of_find_spi_cs_gpio,
+ of_find_regulator_gpio,
+ of_find_arizona_gpio,
+ of_find_usb_gpio,
+ NULL
+};
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
+ const of_find_gpio_quirk *q;
struct gpio_desc *desc;
unsigned int i;
@@ -516,51 +526,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
- if (gpiod_not_found(desc)) {
- /* Special handling for SPI GPIOs if used */
- desc = of_find_spi_gpio(dev, con_id, &of_flags);
- }
-
- if (gpiod_not_found(desc)) {
- /* This quirk looks up flags and all */
- desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
- if (!IS_ERR(desc))
- return desc;
- }
-
- if (gpiod_not_found(desc)) {
- /* Special handling for regulator GPIOs if used */
- desc = of_find_regulator_gpio(dev, con_id, &of_flags);
- }
-
- if (gpiod_not_found(desc))
- desc = of_find_arizona_gpio(dev, con_id, &of_flags);
-
- if (gpiod_not_found(desc))
- desc = of_find_usb_gpio(dev, con_id, &of_flags);
+ /* Properly named GPIO was not found, try workarounds */
+ for (q = of_find_gpio_quirks; gpiod_not_found(desc) && *q; q++)
+ desc = (*q)(dev->of_node, con_id, idx, &of_flags);
if (IS_ERR(desc))
return desc;
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
-
- if (of_flags & OF_GPIO_SINGLE_ENDED) {
- if (of_flags & OF_GPIO_OPEN_DRAIN)
- *flags |= GPIO_OPEN_DRAIN;
- else
- *flags |= GPIO_OPEN_SOURCE;
- }
-
- if (of_flags & OF_GPIO_TRANSITORY)
- *flags |= GPIO_TRANSITORY;
-
- if (of_flags & OF_GPIO_PULL_UP)
- *flags |= GPIO_PULL_UP;
- if (of_flags & OF_GPIO_PULL_DOWN)
- *flags |= GPIO_PULL_DOWN;
- if (of_flags & OF_GPIO_PULL_DISABLE)
- *flags |= GPIO_PULL_DISABLE;
+ *flags = of_convert_gpio_flags(of_flags);
return desc;
}
@@ -618,16 +591,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (IS_ERR(desc))
return desc;
- if (xlate_flags & OF_GPIO_ACTIVE_LOW)
- *lflags |= GPIO_ACTIVE_LOW;
- if (xlate_flags & OF_GPIO_TRANSITORY)
- *lflags |= GPIO_TRANSITORY;
- if (xlate_flags & OF_GPIO_PULL_UP)
- *lflags |= GPIO_PULL_UP;
- if (xlate_flags & OF_GPIO_PULL_DOWN)
- *lflags |= GPIO_PULL_DOWN;
- if (xlate_flags & OF_GPIO_PULL_DISABLE)
- *lflags |= GPIO_PULL_DISABLE;
+ *lflags = of_convert_gpio_flags(xlate_flags);
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cc9c0a12259e..4756ea08894f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3799,6 +3799,72 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
}
/**
+ * fwnode_get_named_gpiod - obtain a GPIO from firmware node
+ * @fwnode: handle of the firmware node
+ * @propname: name of the firmware property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * This function can be used for drivers that get their configuration
+ * from opaque firmware.
+ *
+ * The function properly finds the corresponding GPIO using whatever is the
+ * underlying firmware interface and then makes sure that the GPIO
+ * descriptor is requested before it is returned to the caller.
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+static struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ struct gpio_desc *desc = ERR_PTR(-ENODEV);
+ int ret;
+
+ if (is_of_node(fwnode)) {
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
+ } else if (is_acpi_node(fwnode)) {
+ struct acpi_gpio_info info;
+
+ desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
+ if (IS_ERR(desc))
+ return desc;
+
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Currently only ACPI takes this path */
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ blocking_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
+ return desc;
+}
+
+/**
* fwnode_gpiod_get_index - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @con_id: function within the GPIO consumer
@@ -4064,72 +4130,6 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
- * fwnode_get_named_gpiod - obtain a GPIO from firmware node
- * @fwnode: handle of the firmware node
- * @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * This function can be used for drivers that get their configuration
- * from opaque firmware.
- *
- * The function properly finds the corresponding GPIO using whatever is the
- * underlying firmware interface and then makes sure that the GPIO
- * descriptor is requested before it is returned to the caller.
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- struct gpio_desc *desc = ERR_PTR(-ENODEV);
- int ret;
-
- if (is_of_node(fwnode)) {
- desc = gpiod_get_from_of_node(to_of_node(fwnode),
- propname, index,
- dflags,
- label);
- return desc;
- } else if (is_acpi_node(fwnode)) {
- struct acpi_gpio_info info;
-
- desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (IS_ERR(desc))
- return desc;
-
- acpi_gpio_update_gpiod_flags(&dflags, &info);
- acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
- } else
- return ERR_PTR(-EINVAL);
-
- /* Currently only ACPI takes this path */
- ret = gpiod_request(desc, label);
- if (ret)
- return ERR_PTR(ret);
-
- ret = gpiod_configure_flags(desc, propname, lflags, dflags);
- if (ret < 0) {
- gpiod_put(desc);
- return ERR_PTR(ret);
- }
-
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
-
-/**
* gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO
* function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6c2256e8474b..34f5a092c99e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -31,6 +31,7 @@ menuconfig DRM
config DRM_MIPI_DBI
tristate
depends on DRM
+ select DRM_KMS_HELPER
config DRM_MIPI_DSI
bool
@@ -50,10 +51,21 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_DEBUG_SELFTEST
- tristate "kselftests for DRM"
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default y
depends on DRM
- depends on DEBUG_KERNEL
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
+config DRM_KUNIT_TEST
+ tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
+ depends on DRM && KUNIT
select PRIME_NUMBERS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
@@ -61,19 +73,6 @@ config DRM_DEBUG_SELFTEST
select DRM_KMS_HELPER
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
- default n
- help
- This option provides kernel modules that can be used to run
- various selftests on parts of the DRM api. This option is not
- useful for distributions or general kernels, but only for kernel
- developers working on DRM and associated drivers.
-
- If in doubt, say "N".
-
-config DRM_KUNIT_TEST
- tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
- depends on DRM && KUNIT=y
- select DRM_KMS_HELPER
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -214,11 +213,11 @@ config DRM_TTM_HELPER
help
Helpers for ttm-based gem objects
-config DRM_GEM_CMA_HELPER
+config DRM_GEM_DMA_HELPER
tristate
depends on DRM
help
- Choose this if you need the GEM CMA helper functions
+ Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
@@ -248,6 +247,13 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ # radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -273,6 +279,13 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
+ # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e7af358e6dda..0b283e46f28b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,6 +3,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+
drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \
@@ -40,9 +42,9 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
-drm_cma_helper-y := drm_gem_cma_helper.o
-drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o
-obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o
+drm_dma_helper-y := drm_gem_dma_helper.o
+drm_dma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_dma_helper.o
+obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
@@ -75,7 +77,6 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
# Drivers and the rest
#
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 5a283d12f8e1..6ad39cf71bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -75,7 +75,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
- nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
+ sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
# add DF block
amdgpu-y += \
@@ -89,7 +89,7 @@ amdgpu-y += \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
- mmhub_v3_0_1.o
+ mmhub_v3_0_1.o gfxhub_v3_0_3.o
# add UMC block
amdgpu-y += \
@@ -134,7 +134,8 @@ amdgpu-y += \
gfx_v9_4_2.o \
gfx_v10_0.o \
imu_v11_0.o \
- gfx_v11_0.o
+ gfx_v11_0.o \
+ imu_v11_0_3.o
# add async DMA block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d597e2656c47..ae9371b172e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -274,6 +274,9 @@ extern int amdgpu_vcnfw_log;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
+#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0)
+#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1)
+
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
@@ -882,6 +885,7 @@ struct amdgpu_device {
u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+ struct dma_fence __rcu *gang_submit;
bool ib_pool_ready;
struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
@@ -1060,6 +1064,9 @@ struct amdgpu_device {
uint32_t scpm_status;
struct work_struct reset_work;
+
+ uint32_t amdgpu_reset_level_mask;
+ bool job_hang;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1288,6 +1295,8 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 130060834b4e..b14800ac179e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -849,6 +850,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
+
if (dm->backlight_dev[0])
atif->bd = dm->backlight_dev[0];
#endif
@@ -863,6 +865,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
enc->enc_priv) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
if (dig->bl_dev) {
atif->bd = dig->bl_dev;
break;
@@ -919,9 +922,9 @@ static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atif.handle = atif_handle;
acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
@@ -954,9 +957,9 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atcs.handle = atcs_handle;
acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
@@ -1050,6 +1053,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
@@ -1066,6 +1073,12 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
+ /*
+ * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
+ * risky to do any special firmware-related preparations for entering
+ * S0ix even though the system is suspending to idle, so return false
+ * in that case.
+ */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5e53a5293935..9e98f3866edc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -130,11 +131,13 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
kfd.reset_work);
struct amdgpu_reset_context reset_context;
+
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
@@ -683,6 +686,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
ib->length_dw = ib_len;
/* This works for NO_HWS. TODO: need to handle without knowing VMID */
job->vmid = vmid;
+ job->num_ibs = 1;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
@@ -752,11 +756,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo
{
struct ras_err_data err_data = {0, 0, 0, NULL};
- /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, &err_data, reset);
- else if (reset)
- amdgpu_amdkfd_gpu_reset(adev);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index cbd593f7d553..978d3970b5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014-2018 Advanced Micro Devices, Inc.
*
@@ -297,7 +298,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement, DMA_RESV_USAGE_READ);
+ replacement, DMA_RESV_USAGE_BOOKKEEP);
dma_fence_put(replacement);
return 0;
}
@@ -1390,8 +1391,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(vm->root.bo,
- &vm->process_info->eviction_fence->base, true);
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv,
+ &vm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(vm->root.bo);
/* Update process info */
@@ -1612,6 +1614,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
+
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- adev->kfd.vram_used_aligned
@@ -1728,7 +1731,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
@@ -1987,9 +1990,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
- amdgpu_bo_fence(bo,
- &avm->process_info->eviction_fence->base,
- true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &avm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false);
goto out;
@@ -2216,7 +2219,7 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
{
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb();
+ mb(); /* make sure read happened */
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
@@ -2758,15 +2761,18 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (mem->bo->tbo.pin_count)
continue;
- amdgpu_bo_fence(mem->bo,
- &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(mem->bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
- amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
validate_map_fail:
@@ -2820,7 +2826,9 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(gws_bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(gws_bo);
mutex_unlock(&(*mem)->process_info->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index b7933c2ce765..491d4846fc02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1674,10 +1674,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1799,6 +1801,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1852,6 +1855,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1902,6 +1906,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b7bae833c804..1bbd39b3b0fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,9 +39,82 @@
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
-static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_cs_chunk_fence *data,
- uint32_t *offset)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
+ struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_cs *cs)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+ if (cs->in.num_chunks == 0)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+ p->adev = adev;
+ p->filp = filp;
+
+ p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+ if (!p->ctx)
+ return -EINVAL;
+
+ if (atomic_read(&p->ctx->guilty)) {
+ amdgpu_ctx_put(p->ctx);
+ return -ECANCELED;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib)
+{
+ struct drm_sched_entity *entity;
+ unsigned int i;
+ int r;
+
+ r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
+ chunk_ib->ip_instance,
+ chunk_ib->ring, &entity);
+ if (r)
+ return r;
+
+ /*
+ * Abort if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP.
+ */
+ if (entity->rq == NULL)
+ return -EINVAL;
+
+ /* Check if we can add this IB to some existing job */
+ for (i = 0; i < p->gang_size; ++i)
+ if (p->entities[i] == entity)
+ return i;
+
+ /* If not increase the gang size if possible */
+ if (i == AMDGPU_CS_GANG_SIZE)
+ return -EINVAL;
+
+ p->entities[i] = entity;
+ p->gang_size = i + 1;
+ return i;
+}
+
+static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib,
+ unsigned int *num_ibs)
+{
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ ++(num_ibs[r]);
+ return 0;
+}
+
+static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
@@ -80,11 +153,11 @@ error_unref:
return r;
}
-static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_bo_list_in *data)
+static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
{
+ struct drm_amdgpu_bo_list_entry *info;
int r;
- struct drm_amdgpu_bo_list_entry *info = NULL;
r = amdgpu_bo_create_list_entry_array(data, &info);
if (r)
@@ -104,38 +177,25 @@ error_free:
return r;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
+/* Copy the data from userspace and go over it the first time */
+static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
- int i;
+ unsigned int size;
int ret;
+ int i;
- if (cs->in.num_chunks == 0)
- return -EINVAL;
-
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
+ GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
- p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
- if (!p->ctx) {
- ret = -EINVAL;
- goto free_chunk;
- }
-
- mutex_lock(&p->ctx->lock);
-
- /* skip guilty context job */
- if (atomic_read(&p->ctx->guilty) == 1) {
- ret = -ECANCELED;
- goto free_chunk;
- }
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -170,7 +230,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
size = p->chunks[i].length_dw;
cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
+ GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
@@ -182,36 +243,35 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_partial_kdata;
}
+ /* Assume the worst on the following checks */
+ ret = -EINVAL;
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- ++num_ibs;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
+ goto free_partial_kdata;
+
+ ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
+ if (ret)
+ goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_FENCE:
- size = sizeof(struct drm_amdgpu_cs_chunk_fence);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
- &uf_offset);
+ ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_BO_HANDLES:
- size = sizeof(struct drm_amdgpu_bo_list_in);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -223,22 +283,32 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
break;
default:
- ret = -EINVAL;
goto free_partial_kdata;
}
}
- ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
- if (ret)
- goto free_all_kdata;
+ if (!p->gang_size)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
+ if (ret)
+ goto free_all_kdata;
+
+ ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
+ &fpriv->vm);
+ if (ret)
+ goto free_all_kdata;
+ }
+ p->gang_leader = p->jobs[p->gang_size - 1];
- if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}
if (p->uf_entry.tv.bo)
- p->job->uf_addr = uf_offset;
+ p->gang_leader->uf_addr = uf_offset;
kvfree(chunk_array);
/* Use this opportunity to fill in task info for the vm */
@@ -260,6 +330,297 @@ free_chunk:
return ret;
}
+static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk,
+ unsigned int *ce_preempt,
+ unsigned int *de_preempt)
+{
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ job = p->jobs[r];
+ ring = amdgpu_job_ring(job);
+ ib = &job->ibs[job->num_ibs++];
+
+ /* MM engine doesn't support user fences */
+ if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
+ return -EINVAL;
+
+ if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
+ (*ce_preempt)++;
+ else
+ (*de_preempt)++;
+
+ /* Each GFX command submit allows only 1 IB max
+ * preemptible for CE & DE */
+ if (*ce_preempt > 1 || *de_preempt > 1)
+ return -EINVAL;
+ }
+
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+
+ r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+
+ ib->gpu_addr = chunk_ib->va_start;
+ ib->length_dw = chunk_ib->ib_bytes / 4;
+ ib->flags = chunk_ib->flags;
+ return 0;
+}
+
+static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ctx *ctx;
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ struct drm_sched_fence *s_fence;
+ struct dma_fence *old = fence;
+
+ s_fence = to_drm_sched_fence(fence);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
+ uint32_t handle, u64 point,
+ u64 flags)
+{
+ struct dma_fence *fence;
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
+ return r;
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+
+ return r;
+}
+
+static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
+ return -EINVAL;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = dma_fence_chain_alloc();
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ dma_fence_chain_free(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
+{
+ unsigned int ce_preempt = 0, de_preempt = 0;
+ int i, r;
+
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
+
+ chunk = &p->chunks[i];
+
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_IB:
+ r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ r = amdgpu_cs_p2_dependencies(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ r = amdgpu_cs_p2_syncobj_in(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ r = amdgpu_cs_p2_syncobj_out(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
+ if (r)
+ return r;
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
@@ -495,9 +856,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- struct amdgpu_bo *gds;
- struct amdgpu_bo *gws;
- struct amdgpu_bo *oa;
+ unsigned int i;
int r;
INIT_LIST_HEAD(&p->validated);
@@ -581,16 +940,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->bo_va = amdgpu_vm_bo_find(vm, bo);
}
- /* Move fence waiting after getting reservation lock of
- * PD root. Then there is no need on a ctx mutex lock.
- */
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- goto error_validate;
- }
-
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
@@ -611,197 +960,139 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r)
goto error_validate;
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
- p->bytes_moved_vis);
+ if (p->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
+ if (r)
+ goto error_validate;
- if (gds) {
- p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
- p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
- }
- if (gws) {
- p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
- p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
- }
- if (oa) {
- p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
- p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
}
- if (!r && p->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
- r = amdgpu_ttm_alloc_gart(&uf->tbo);
- p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
- }
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
+ p->bo_list->gws_obj,
+ p->bo_list->oa_obj);
+ return 0;
error_validate:
- if (r)
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
out_free_user_pages:
- if (r) {
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
- kvfree(e->user_pages);
- e->user_pages = NULL;
- }
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (!e->user_pages)
+ continue;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
}
return r;
}
-static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_bo_list_entry *e;
- int r;
+ int i, j;
- list_for_each_entry(e, &p->validated, tv.head) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct dma_resv *resv = bo->tbo.base.resv;
- enum amdgpu_sync_mode sync_mode;
+ if (!trace_amdgpu_cs_enabled())
+ return;
- sync_mode = amdgpu_bo_explicit_sync(bo) ?
- AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
- &fpriv->vm);
- if (r)
- return r;
+ for (i = 0; i < p->gang_size; ++i) {
+ struct amdgpu_job *job = p->jobs[i];
+
+ for (j = 0; j < job->num_ibs; ++j)
+ trace_amdgpu_cs(p, job, &job->ibs[j]);
}
- return 0;
}
-/**
- * amdgpu_cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- * @backoff: indicator to backoff the reservation
- *
- * If error is set then unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
- bool backoff)
+static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
- unsigned i;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ unsigned int i;
+ int r;
- if (error && backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
- mutex_unlock(&parser->bo_list->bo_list_mutex);
- }
+ /* Only for UVD/VCE VM emulation */
+ if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
+ return 0;
- for (i = 0; i < parser->num_post_deps; i++) {
- drm_syncobj_put(parser->post_deps[i].syncobj);
- kfree(parser->post_deps[i].chain);
- }
- kfree(parser->post_deps);
+ for (i = 0; i < job->num_ibs; ++i) {
+ struct amdgpu_ib *ib = &job->ibs[i];
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj;
+ uint64_t va_start;
+ uint8_t *kptr;
- dma_fence_put(parser->fence);
+ va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
- if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
- amdgpu_ctx_put(parser->ctx);
+ if ((va_start + ib->length_dw * 4) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
+
+ if (ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, ib->length_dw * 4);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, job, ib);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
}
- if (parser->bo_list)
- amdgpu_bo_list_put(parser->bo_list);
- for (i = 0; i < parser->nchunks; i++)
- kvfree(parser->chunks[i].kdata);
- kvfree(parser->chunks);
- if (parser->job)
- amdgpu_job_free(parser->job);
- if (parser->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
+ return 0;
+}
- amdgpu_bo_unref(&uf);
+static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
+{
+ unsigned int i;
+ int r;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
+ if (r)
+ return r;
}
+ return 0;
}
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_job *job = p->gang_leader;
struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
+ unsigned int i;
int r;
- /* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
- unsigned i, j;
-
- for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- struct amdgpu_cs_chunk *chunk;
- uint64_t offset, va_start;
- struct amdgpu_ib *ib;
- uint8_t *kptr;
-
- chunk = &p->chunks[i];
- ib = &p->job->ibs[j];
- chunk_ib = chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
- if (r) {
- DRM_ERROR("IB va_start is invalid\n");
- return r;
- }
-
- if ((va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += va_start - offset;
-
- if (ring->funcs->parse_cs) {
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
- if (r)
- return r;
- } else {
- ib->ptr = (uint32_t *)kptr;
- r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
- amdgpu_bo_kunmap(aobj);
- if (r)
- return r;
- }
-
- j++;
- }
- }
-
- if (!p->job->vm)
- return amdgpu_cs_sync_rings(p);
-
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -810,18 +1101,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (fpriv->csa_va) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -840,7 +1131,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -853,11 +1144,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&job->sync, vm->last_update);
if (r)
return r;
- p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ for (i = 0; i < p->gang_size; ++i) {
+ job = p->jobs[i];
+
+ if (!job->vm)
+ continue;
+
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ }
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
@@ -872,331 +1170,40 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
- return amdgpu_cs_sync_rings(p);
-}
-
-static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- int r, ce_preempt = 0, de_preempt = 0;
- struct amdgpu_ring *ring;
- int i, j;
-
- for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
- struct amdgpu_cs_chunk *chunk;
- struct amdgpu_ib *ib;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct drm_sched_entity *entity;
-
- chunk = &parser->chunks[i];
- ib = &parser->job->ibs[j];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
- (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
- ce_preempt++;
- else
- de_preempt++;
- }
-
- /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
- if (ce_preempt > 1 || de_preempt > 1)
- return -EINVAL;
- }
-
- r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &entity);
- if (r)
- return r;
-
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
- parser->job->preamble_status |=
- AMDGPU_PREAMBLE_IB_PRESENT;
-
- if (parser->entity && parser->entity != entity)
- return -EINVAL;
-
- /* Return if there is no run queue associated with this entity.
- * Possibly because of disabled HW IP*/
- if (entity->rq == NULL)
- return -EINVAL;
-
- parser->entity = entity;
-
- ring = to_amdgpu_ring(entity->rq->sched);
- r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0,
- AMDGPU_IB_POOL_DELAYED, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- ib->gpu_addr = chunk_ib->va_start;
- ib->length_dw = chunk_ib->ib_bytes / 4;
- ib->flags = chunk_ib->flags;
-
- j++;
- }
-
- /* MM engine doesn't support user fences */
- ring = to_amdgpu_ring(parser->entity->rq->sched);
- if (parser->job->uf_addr && ring->funcs->no_user_fence)
- return -EINVAL;
-
return 0;
}
-static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
+static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_dep *deps;
-
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_ctx *ctx;
- struct drm_sched_entity *entity;
- struct dma_fence *fence;
-
- ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
-
- r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
- deps[i].ip_instance,
- deps[i].ring, &entity);
- if (r) {
- amdgpu_ctx_put(ctx);
- return r;
- }
-
- fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
- amdgpu_ctx_put(ctx);
-
- if (IS_ERR(fence))
- return PTR_ERR(fence);
- else if (!fence)
- continue;
-
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence;
- struct dma_fence *old = fence;
-
- s_fence = to_drm_sched_fence(fence);
- fence = dma_fence_get(&s_fence->scheduled);
- dma_fence_put(old);
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
- if (r)
- return r;
- }
- return 0;
-}
-
-static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle, u64 point,
- u64 flags)
-{
- struct dma_fence *fence;
+ struct amdgpu_job *leader = p->gang_leader;
+ struct amdgpu_bo_list_entry *e;
+ unsigned int i;
int r;
- r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
- if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
- handle, point, r);
- return r;
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
-
- return r;
-}
-
-static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i, r;
+ list_for_each_entry(e, &p->validated, tv.head) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
- 0, 0);
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i, r;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p,
- syncobj_deps[i].handle,
- syncobj_deps[i].point,
- syncobj_deps[i].flags);
+ for (i = 0; i < p->gang_size - 1; ++i) {
+ r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
if (r)
return r;
}
- return 0;
-}
-
-static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i;
-
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
-
- for (i = 0; i < num_deps; ++i) {
- p->post_deps[i].syncobj =
- drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_deps[i].syncobj)
- return -EINVAL;
- p->post_deps[i].chain = NULL;
- p->post_deps[i].point = 0;
- p->num_post_deps++;
- }
-
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
-
- dep->chain = NULL;
- if (syncobj_deps[i].point) {
- dep->chain = dma_fence_chain_alloc();
- if (!dep->chain)
- return -ENOMEM;
- }
-
- dep->syncobj = drm_syncobj_find(p->filp,
- syncobj_deps[i].handle);
- if (!dep->syncobj) {
- dma_fence_chain_free(dep->chain);
- return -EINVAL;
- }
- dep->point = syncobj_deps[i].point;
- p->num_post_deps++;
- }
-
- return 0;
-}
-
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
-{
- int i, r;
-
- /* TODO: Investigate why we still need the context lock */
- mutex_unlock(&p->ctx->lock);
-
- for (i = 0; i < p->nchunks; ++i) {
- struct amdgpu_cs_chunk *chunk;
-
- chunk = &p->chunks[i];
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+ if (r && r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- switch (chunk->chunk_id) {
- case AMDGPU_CHUNK_ID_DEPENDENCIES:
- case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
- r = amdgpu_cs_process_fence_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
- r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
- r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
- r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
- r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- }
- }
-
-out:
- mutex_lock(&p->ctx->lock);
return r;
}
@@ -1221,20 +1228,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct drm_sched_entity *entity = p->entity;
+ struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
- struct amdgpu_job *job;
+ unsigned int i;
uint64_t seq;
int r;
- job = p->job;
- p->job = NULL;
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_arm(&p->jobs[i]->base);
- r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
- if (r)
- goto error_unlock;
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ struct dma_fence *fence;
- drm_sched_job_arm(&job->base);
+ fence = &p->jobs[i]->base.s_fence->scheduled;
+ r = amdgpu_sync_fence(&leader->sync, fence);
+ if (r)
+ goto error_cleanup;
+ }
+
+ if (p->gang_size > 1) {
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_gang_leader(p->jobs[i], leader);
+ }
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
@@ -1245,6 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
*/
+ r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
@@ -1252,67 +1268,96 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
if (r) {
r = -EAGAIN;
- goto error_abort;
+ goto error_unlock;
}
- p->fence = dma_fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&leader->base.s_fence->finished);
+ list_for_each_entry(e, &p->validated, tv.head) {
+
+ /* Everybody except for the gang leader uses READ */
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ dma_resv_add_fence(e->tv.bo->base.resv,
+ &p->jobs[i]->base.s_fence->finished,
+ DMA_RESV_USAGE_READ);
+ }
- seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
+ /* The gang leader is remembered as writer */
+ e->tv.num_shared = 0;
+ }
+
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+ p->fence);
amdgpu_cs_post_dependencies(p);
- if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
!p->ctx->preamble_presented) {
- job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
p->ctx->preamble_presented = true;
}
cs->out.handle = seq;
- job->uf_sequence = seq;
-
- amdgpu_job_free_resources(job);
+ leader->uf_sequence = seq;
- trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base);
+ for (i = 0; i < p->gang_size; ++i) {
+ amdgpu_job_free_resources(p->jobs[i]);
+ trace_amdgpu_cs_ioctl(p->jobs[i]);
+ drm_sched_entity_push_job(&p->jobs[i]->base);
+ p->jobs[i] = NULL;
+ }
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
-
- /* Make sure all BOs are remembered as writers */
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->tv.num_shared = 0;
-
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
-
return 0;
-error_abort:
- drm_sched_job_cleanup(&job->base);
+error_unlock:
mutex_unlock(&p->adev->notifier_lock);
-error_unlock:
- amdgpu_job_free(job);
+error_cleanup:
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_cleanup(&p->jobs[i]->base);
return r;
}
-static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+/* Cleanup the parser structure */
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- int i;
+ unsigned i;
- if (!trace_amdgpu_cs_enabled())
- return;
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
+
+ dma_fence_put(parser->fence);
+
+ if (parser->ctx)
+ amdgpu_ctx_put(parser->ctx);
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
+
+ for (i = 0; i < parser->nchunks; i++)
+ kvfree(parser->chunks[i].kdata);
+ kvfree(parser->chunks);
+ for (i = 0; i < parser->gang_size; ++i) {
+ if (parser->jobs[i])
+ amdgpu_job_free(parser->jobs[i]);
+ }
+ if (parser->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
- for (i = 0; i < parser->job->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
+ amdgpu_bo_unref(&uf);
+ }
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- union drm_amdgpu_cs *cs = data;
- struct amdgpu_cs_parser parser = {};
- bool reserved_buffers = false;
+ struct amdgpu_cs_parser parser;
int r;
if (amdgpu_ras_intr_triggered())
@@ -1321,25 +1366,20 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- parser.adev = adev;
- parser.filp = filp;
-
- r = amdgpu_cs_parser_init(&parser, data);
+ r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
if (printk_ratelimit())
DRM_ERROR("Failed to initialize parser %d!\n", r);
- goto out;
+ return r;
}
- r = amdgpu_cs_ib_fill(adev, &parser);
+ r = amdgpu_cs_pass1(&parser, data);
if (r)
- goto out;
+ goto error_fini;
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r) {
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- goto out;
- }
+ r = amdgpu_cs_pass2(&parser);
+ if (r)
+ goto error_fini;
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
@@ -1347,22 +1387,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
- goto out;
+ goto error_fini;
}
- reserved_buffers = true;
+ r = amdgpu_cs_patch_jobs(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_vm_handling(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_sync_rings(&parser);
+ if (r)
+ goto error_backoff;
trace_amdgpu_cs_ibs(&parser);
- r = amdgpu_cs_vm_handling(&parser);
+ r = amdgpu_cs_submit(&parser, data);
if (r)
- goto out;
+ goto error_backoff;
- r = amdgpu_cs_submit(&parser, cs);
+ amdgpu_cs_parser_fini(&parser);
+ return 0;
-out:
- amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+error_backoff:
+ ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
+ mutex_unlock(&parser.bo_list->bo_list_mutex);
+error_fini:
+ amdgpu_cs_parser_fini(&parser);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 30ecc4917f81..cbaa19b2b8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -27,6 +27,8 @@
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
+#define AMDGPU_CS_GANG_SIZE 4
+
struct amdgpu_bo_va_mapping;
struct amdgpu_cs_chunk {
@@ -50,9 +52,11 @@ struct amdgpu_cs_parser {
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
- /* scheduler job object */
- struct amdgpu_job *job;
- struct drm_sched_entity *entity;
+ /* scheduler job objects */
+ unsigned int gang_size;
+ struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *gang_leader;
/* buffer objects */
struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 8ee4e8491f39..f6d9d5da53cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -315,7 +315,6 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
kref_init(&ctx->refcount);
ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -402,12 +401,11 @@ static void amdgpu_ctx_fini(struct kref *ref)
}
}
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
drm_dev_exit(idx);
}
- mutex_destroy(&ctx->lock);
kfree(ctx);
}
@@ -848,7 +846,7 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
mgr->adev = adev;
mutex_init(&mgr->lock);
- idr_init(&mgr->ctx_handles);
+ idr_init_base(&mgr->ctx_handles, 1);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
atomic64_set(&mgr->time_spend[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index cc7c8afff414..0fa0e56daf67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -53,7 +53,6 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
- struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index cb00c7d6f50b..6066aebf491c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1043,6 +1043,157 @@ err:
}
/**
+ * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * Read the last residency value logged. It doesn't auto update, one needs to
+ * stop logging before getting the current value.
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = amdgpu_get_gfx_off_residency(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ amdgpu_set_gfx_off_residency(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+
+/**
+ * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u64 value = 0;
+
+ r = amdgpu_get_gfx_off_entrycount(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (u64 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
* amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
*
* @f: open file handle
@@ -1249,6 +1400,19 @@ static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_count_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_residency_read,
+ .write = amdgpu_debugfs_gfxoff_residency_write,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
@@ -1261,6 +1425,8 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gpr_fops,
&amdgpu_debugfs_gfxoff_fops,
&amdgpu_debugfs_gfxoff_status_fops,
+ &amdgpu_debugfs_gfxoff_count_fops,
+ &amdgpu_debugfs_gfxoff_residency_fops,
};
static const char *debugfs_regs_names[] = {
@@ -1275,6 +1441,8 @@ static const char *debugfs_regs_names[] = {
"amdgpu_gpr",
"amdgpu_gfxoff",
"amdgpu_gfxoff_status",
+ "amdgpu_gfxoff_count",
+ "amdgpu_gfxoff_residency",
};
/**
@@ -1786,6 +1954,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return PTR_ERR(ent);
}
+ debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask);
+
/* Register debugfs entries for amdgpu_ttm */
amdgpu_ttm_debugfs_init(adev);
amdgpu_debugfs_pm_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f095a2513aff..ab8f970b2849 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ if (r) {
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
@@ -2451,19 +2459,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
*/
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (amdgpu_xgmi_add_device(adev) == 0) {
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ if (!amdgpu_sriov_vf(adev)) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
+ goto init_failed;
+ }
- if (!hive->reset_domain ||
- !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
- r = -ENOENT;
+ /* Drop the early temporary reset domain we created for device */
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = hive->reset_domain;
amdgpu_put_xgmi_hive(hive);
- goto init_failed;
}
-
- /* Drop the early temporary reset domain we created for device */
- amdgpu_reset_put_reset_domain(adev->reset_domain);
- adev->reset_domain = hive->reset_domain;
- amdgpu_put_xgmi_hive(hive);
}
}
@@ -3052,8 +3062,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
- AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -3144,7 +3154,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -3501,6 +3512,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
+ RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
@@ -3579,6 +3591,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
+ adev->gfx.gfx_off_residency = 0;
+ adev->gfx.gfx_off_entrycount = 0;
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
atomic_set(&adev->throttling_logging_enabled, 1);
@@ -3967,8 +3981,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
- if (drm_dev_is_unplugged(adev_to_drm(adev)))
- amdgpu_device_unmap_mmio(adev);
+ amdgpu_device_unmap_mmio(adev);
}
@@ -3981,6 +3994,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
+ dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
amdgpu_reset_fini(adev);
@@ -4056,12 +4070,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4085,6 +4107,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4103,6 +4128,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4117,6 +4148,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
@@ -4509,14 +4547,15 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
*/
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
- return false;
- }
if (amdgpu_gpu_recovery == 0)
goto disabled;
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
+ dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
+ return false;
+ }
+
if (amdgpu_sriov_vf(adev))
return true;
@@ -4641,7 +4680,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset) {
+ if (!need_full_reset && amdgpu_gpu_recovery) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
@@ -4737,6 +4776,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ bool gpu_reset_for_dev_remove = 0;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -4756,6 +4796,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -4800,6 +4844,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_ras_intr_cleared();
}
+ /* Since the mode1 reset affects base ip blocks, the
+ * phase1 ip blocks need to be resumed. Otherwise there
+ * will be a BIOS signature error and the psp bootloader
+ * can't load kdb on the next amdgpu install.
+ */
+ if (gpu_reset_for_dev_remove) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_device_ip_resume_phase1(tmp_adev);
+
+ goto end;
+ }
+
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
@@ -5039,6 +5095,7 @@ static void amdgpu_device_recheck_guilty_jobs(
/* set guilty */
drm_sched_increase_karma(s_job);
+ amdgpu_reset_prepare_hwcontext(adev, reset_context);
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
@@ -5121,6 +5178,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
int tmp_vram_lost_counter;
+ bool gpu_reset_for_dev_remove = false;
+
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5148,6 +5210,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->job = job;
reset_context->hive = hive;
+
/*
* Build list of devices to reset.
* In case we are in XGMI hive mode, resort the device list
@@ -5155,8 +5218,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (gpu_reset_for_dev_remove && adev->shutdown)
+ tmp_adev->shutdown = true;
+ }
if (!list_is_first(&adev->reset_list, &device_list))
list_rotate_to_front(&adev->reset_list, &device_list);
device_list_handle = &device_list;
@@ -5239,6 +5305,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ if (gpu_reset_for_dev_remove) {
+ /* Workaroud for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(tmp_adev);
+ }
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5267,8 +5337,14 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
amdgpu_ras_resume(adev);
} else {
r = amdgpu_do_asic_reset(device_list_handle, reset_context);
- if (r && r == -EAGAIN)
+ if (r && r == -EAGAIN) {
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags);
+ adev->asic_reset_res = 0;
goto retry;
+ }
+
+ if (!r && gpu_reset_for_dev_remove)
+ goto recover_end;
}
skip_hw_reset:
@@ -5342,6 +5418,7 @@ skip_sched_resume:
amdgpu_device_unset_mp1_state(tmp_adev);
}
+recover_end:
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
@@ -5524,8 +5601,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
- &peer_adev->dev, 1, true) < 0);
+ bool p2p_access =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
@@ -5699,6 +5777,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(adev, &reset_context);
@@ -5908,3 +5987,36 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
(void)RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+
+/**
+ * amdgpu_device_switch_gang - switch to a new gang
+ * @adev: amdgpu_device pointer
+ * @gang: the gang to switch to
+ *
+ * Try to switch to a new gang.
+ * Returns: NULL if we switched to the new gang or a reference to the current
+ * gang leader.
+ */
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang)
+{
+ struct dma_fence *old = NULL;
+
+ do {
+ dma_fence_put(old);
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+
+ if (old == gang)
+ break;
+
+ if (!dma_fence_is_signaled(old))
+ return old;
+
+ } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
+ old, gang) != old);
+
+ dma_fence_put(old);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 95d34590cad1..3993e6134914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -229,7 +229,7 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, ui
return r;
}
- memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
release_firmware(fw);
return 0;
@@ -1506,6 +1506,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
@@ -1549,6 +1550,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
@@ -1633,6 +1635,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
@@ -1682,6 +1685,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
@@ -1780,6 +1784,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
@@ -1823,6 +1828,7 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
@@ -1903,7 +1909,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
break;
default:
dev_err(adev->dev,
@@ -1940,6 +1947,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@@ -2165,6 +2173,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->family = AMDGPU_FAMILY_GC_11_0_0;
break;
case IP_VERSION(11, 0, 1):
@@ -2234,7 +2243,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
break;
case IP_VERSION(4, 3, 0):
case IP_VERSION(4, 3, 1):
- adev->nbio.funcs = &nbio_v4_3_funcs;
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
break;
case IP_VERSION(7, 7, 0):
@@ -2332,6 +2344,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index c20922a5af9f..23998f727c7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -498,6 +500,12 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = drm_atomic_helper_dirtyfb,
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1100,7 +1108,10 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 782cbca37538..7bd8e33b14be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -58,7 +58,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 429fcdf28836..3c9fecdd6b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,8 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
+#include <linux/fb.h>
+#include <linux/dynamic_debug.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -102,9 +104,10 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
+ * 3.49.0 - Add gang submit into CS IOCTL
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -185,6 +188,18 @@ int amdgpu_vcnfw_log;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
@@ -2181,15 +2196,46 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- drm_dev_unplug(dev);
-
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
}
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ bool need_to_reset_gpu = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive->device_remove_count == 0)
+ need_to_reset_gpu = true;
+ hive->device_remove_count++;
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ need_to_reset_gpu = true;
+ }
+
+ /* Workaround for ASICs need to reset SMU.
+ * Called only when the first device is removed.
+ */
+ if (need_to_reset_gpu) {
+ struct amdgpu_reset_context reset_context;
+
+ adev->shutdown = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+ }
+
amdgpu_driver_unload_kms(dev);
+ drm_dev_unplug(dev);
+
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device
@@ -2563,8 +2609,11 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- if (ret)
+ if (ret) {
+ if (amdgpu_device_supports_px(drm_dev))
+ pci_disable_device(pdev);
return ret;
+ }
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ecada5eadfe3..e325150879df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
- return true;
- else
+ return false;
+ else
+ return true;
+ } else {
return false;
+ }
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 222d3d7ea076..9546adc8a76f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
@@ -477,7 +478,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- if (adev->gfx.kiq.ring.sched.ready)
+ if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
@@ -610,6 +611,45 @@ unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_set_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{
@@ -826,3 +866,142 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
}
return amdgpu_num_kcq;
}
+
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ uint32_t ucode_id)
+{
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct firmware *ucode_fw;
+ unsigned int fw_size;
+
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_CP_PFP:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.ce_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+ break;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[ucode_id];
+ info->ucode_id = ucode_id;
+ info->fw = ucode_fw;
+ adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 23a696d38390..832b3807f1d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -304,6 +304,10 @@ struct amdgpu_gfx {
uint32_t rlc_srlg_feature_version;
uint32_t rlc_srls_fw_version;
uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
@@ -332,10 +336,12 @@ struct amdgpu_gfx {
uint32_t srbm_soft_reset;
/* gfx off */
- bool gfx_off_state; /* true: enabled, false: disabled */
- struct mutex gfx_off_mutex;
- uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
- struct delayed_work gfx_off_delay_work;
+ bool gfx_off_state; /* true: enabled, false: disabled */
+ struct mutex gfx_off_mutex; /* mutex to change gfxoff state */
+ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+ struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */
+ uint32_t gfx_off_residency; /* last logged residency */
+ uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */
/* pipe reservation */
struct mutex pipe_reserve_mutex;
@@ -407,6 +413,10 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value);
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency);
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value);
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
@@ -416,4 +426,6 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
index beabab515836..c7b44aeb671b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -35,6 +35,9 @@ struct amdgpu_gfxhub_funcs {
void (*init)(struct amdgpu_device *adev);
int (*get_xgmi_info)(struct amdgpu_device *adev);
void (*utcl2_harvest)(struct amdgpu_device *adev);
+ void (*mode2_save_regs)(struct amdgpu_device *adev);
+ void (*mode2_restore_regs)(struct amdgpu_device *adev);
+ void (*halt)(struct amdgpu_device *adev);
};
struct amdgpu_gfxhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index aebc384531ac..34233a74248c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -572,45 +572,15 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
{
struct amdgpu_gmc *gmc = &adev->gmc;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- case IP_VERSION(9, 3, 0):
- case IP_VERSION(9, 4, 0):
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- case IP_VERSION(10, 3, 3):
- case IP_VERSION(10, 3, 4):
- case IP_VERSION(10, 3, 5):
- case IP_VERSION(10, 3, 6):
- case IP_VERSION(10, 3, 7):
- /*
- * noretry = 0 will cause kfd page fault tests fail
- * for some ASICs, so set default to 1 for these ASICs.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 1;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- default:
- /* Raven currently has issues with noretry
- * regardless of what we decide for other
- * asics, we should leave raven with
- * noretry = 0 until we root cause the
- * issues.
- *
- * default this to 0 for now, but we may want
- * to change this in the future for certain
- * GPUs as it can increase performance in
- * certain cases.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 0;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- }
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+ gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver >= IP_VERSION(10, 3, 0));
+
+ gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
}
void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 008eaca27151..0305b660cd17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -264,6 +264,32 @@ struct amdgpu_gmc {
u64 mall_size;
/* number of UMC instances */
int num_umc;
+ /* mode2 save restore */
+ u64 VM_L2_CNTL;
+ u64 VM_L2_CNTL2;
+ u64 VM_DUMMY_PAGE_FAULT_CNTL;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_LO32;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_HI32;
+ u64 VM_L2_PROTECTION_FAULT_CNTL;
+ u64 VM_L2_PROTECTION_FAULT_CNTL2;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL3;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL4;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_LO32;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_HI32;
+ u64 VM_DEBUG;
+ u64 VM_L2_MM_GROUP_RT_CLASSES;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID2;
+ u64 VM_L2_CACHE_PARITY_CNTL;
+ u64 VM_L2_IH_LOG_CNTL;
+ u64 VM_CONTEXT_CNTL[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
+ u64 MC_VM_MX_L1_TLB_CNTL;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 8c6b2284cf56..1f3302aebeff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -205,6 +205,42 @@ void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
}
/**
+ * amdgpu_gtt_mgr_intersects - test for intersection
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified intersection test, only interesting if we need GART or not.
+ */
+static bool amdgpu_gtt_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+}
+
+/**
+ * amdgpu_gtt_mgr_compatible - test for compatibility
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified compatibility test.
+ */
+static bool amdgpu_gtt_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+}
+
+/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
@@ -225,6 +261,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
.alloc = amdgpu_gtt_mgr_new,
.free = amdgpu_gtt_mgr_del,
+ .intersects = amdgpu_gtt_mgr_intersects,
+ .compatible = amdgpu_gtt_mgr_compatible,
.debug = amdgpu_gtt_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index b1099ee79c50..46c99331d7f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -49,6 +49,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
memset(&ti, 0, sizeof(struct amdgpu_task_info));
+ adev->job_hang = true;
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
@@ -71,6 +72,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
@@ -82,6 +84,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
exit:
+ adev->job_hang = false;
drm_dev_exit(idx);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -102,7 +105,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
- (*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->sched_sync);
@@ -122,6 +124,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
+ (*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -129,6 +132,23 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa)
+{
+ if (gds) {
+ job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+ job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+ job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
+ job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+ job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
+ job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+}
+
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
@@ -153,13 +173,34 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
dma_fence_put(&job->hw_fence);
}
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader)
+{
+ struct dma_fence *fence = &leader->base.s_fence->scheduled;
+
+ WARN_ON(job->gang_submit);
+
+ /*
+ * Don't add a reference when we are the gang leader to avoid circle
+ * dependency.
+ */
+ if (job != leader)
+ dma_fence_get(fence);
+ job->gang_submit = fence;
+}
+
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
+ if (job->gang_submit != &job->base.s_fence->scheduled)
+ dma_fence_put(job->gang_submit);
- dma_fence_put(&job->hw_fence);
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -224,12 +265,16 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync);
}
+ if (!fence && job->gang_submit)
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+
return fence;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
int r = 0;
@@ -241,8 +286,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
- dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+ /* Skip job if VRAM is lost and never resubmit gangs */
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
+ (job->job_run_counter && job->gang_submit))
+ dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index babc0af751c2..ab7b150e5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -50,6 +50,7 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct dma_fence hw_fence;
+ struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
bool vm_needs_flush;
@@ -72,11 +73,20 @@ struct amdgpu_job {
struct amdgpu_ib ibs[];
};
+static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
+{
+ return to_amdgpu_ring(job->base.entity->rq->sched);
+}
+
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1369c25448dc..fe23e09eec98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -247,6 +247,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->gfx.rlc_srls_fw_version;
fw_info->feature = adev->gfx.rlc_srls_feature_version;
break;
+ case AMDGPU_INFO_FW_GFX_RLCP:
+ fw_info->ver = adev->gfx.rlcp_ucode_version;
+ fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLCV:
+ fw_info->ver = adev->gfx.rlcv_ucode_version;
+ fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
+ break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
@@ -328,6 +336,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->psp.cap_fw_version;
fw_info->feature = adev->psp.cap_feature_version;
break;
+ case AMDGPU_INFO_FW_MES_KIQ:
+ fw_info->ver = adev->mes.ucode_fw_version[0];
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_MES:
+ fw_info->ver = adev->mes.ucode_fw_version[1];
+ fw_info->feature = 0;
+ break;
default:
return -EINVAL;
}
@@ -1160,7 +1176,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
mutex_init(&fpriv->bo_list_lock);
- idr_init(&fpriv->bo_list_handles);
+ idr_init_base(&fpriv->bo_list_handles, 1);
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
@@ -1469,6 +1485,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* RLCP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLCV */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
@@ -1581,6 +1613,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
}
+ /* MES_KIQ */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MES */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index fe82b8b19a4e..0c546245793b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -181,6 +181,9 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
+ /* zero sdma_hqd_mask for non-existent engine */
+ else if (adev->sdma.num_instances == 1)
+ adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
else
adev->mes.sdma_hqd_mask[i] = 0xfc;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d788a00043a5..37322550d750 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4570ad449390..e6a9b9fc9e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -591,7 +591,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!bp->destroy)
bp->destroy = &amdgpu_bo_destroy;
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
&bo->placement, page_align, &ctx, NULL,
bp->resv, bp->destroy);
if (unlikely(r != 0))
@@ -1309,7 +1309,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->resource->mem_type != TTM_PL_VRAM ||
+ if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || adev->shutdown)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 1036446abc30..effa7df3ddbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -138,6 +138,7 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
@@ -327,23 +328,32 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "vega10");
break;
case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "navi12");
break;
case IP_VERSION(11, 0, 7):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "sienna_cichlid");
break;
case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, "aldebaran");
ret &= psp_init_ta_microcode(psp, "aldebaran");
break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ break;
default:
BUG();
break;
}
-
return ret;
}
@@ -486,11 +496,14 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
- if (adev->psp.cap_fw) {
+ if (psp->cap_fw) {
release_firmware(psp->cap_fw);
psp->cap_fw = NULL;
}
-
+ if (psp->toc_fw) {
+ release_firmware(psp->toc_fw);
+ psp->toc_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
@@ -498,6 +511,11 @@ static int psp_sw_fini(void *handle)
kfree(cmd);
cmd = NULL;
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+ (void **)&psp->km_ring.ring_mem);
+
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -753,7 +771,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
@@ -766,6 +784,7 @@ static bool psp_skip_tmr(struct psp_context *psp)
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 10):
return true;
default:
return false;
@@ -812,7 +831,7 @@ static int psp_tmr_unload(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_unload_cmd_buf(psp, cmd);
- DRM_INFO("free PSP TMR buffer\n");
+ dev_info(psp->adev->dev, "free PSP TMR buffer\n");
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -2041,6 +2060,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->ras_drv)) &&
+ (psp->funcs->bootloader_load_ras_drv != NULL)) {
+ ret = psp_bootloader_load_ras_drv(psp);
+ if (ret) {
+ DRM_ERROR("PSP load ras_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -2401,7 +2429,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
@@ -2411,20 +2439,7 @@ static bool fw_load_skip_check(struct psp_context *psp,
return true;
if (amdgpu_sriov_vf(psp->adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
return true;
if (psp->autoload_supported &&
@@ -2498,7 +2513,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
- AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
+ adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
@@ -3039,6 +3054,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dbg_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c32b74bd970f..58ce3ebb446c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -36,6 +36,7 @@
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
@@ -71,6 +72,7 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
PSP_BL__LOAD_INTFDRV = 0xD0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@@ -114,6 +116,7 @@ struct psp_funcs
int (*bootloader_load_soc_drv)(struct psp_context *psp);
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
+ int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
@@ -323,6 +326,7 @@ struct psp_context
struct psp_bin_desc soc_drv;
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
+ struct psp_bin_desc ras_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -403,6 +407,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
#define psp_bootloader_load_dbg_drv(psp) \
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
+#define psp_bootloader_load_ras_drv(psp) \
+ ((psp)->funcs->bootloader_load_ras_drv ? \
+ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ff5361f5c2d4..ccebd8e2a2d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
amdgpu_ras_query_error_status(adev, &info);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
if (amdgpu_ras_reset_error_status(adev, info.head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status");
}
@@ -1949,6 +1950,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
}
@@ -2718,7 +2720,8 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- amdgpu_ras_disable_all_features(adev, 0);
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
}
@@ -2831,11 +2834,8 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
struct mce *m = (struct mce *)data;
struct amdgpu_device *adev = NULL;
uint32_t gpu_id = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst, channel_index = 0;
+ uint32_t umc_inst = 0, ch_inst = 0;
struct ras_err_data err_data = {0, 0, 0, NULL};
- struct eeprom_table_record err_rec;
- uint64_t retired_page;
/*
* If the error was generated in UMC_V2, which belongs to GPU UMCs,
@@ -2874,21 +2874,22 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
umc_inst, ch_inst);
+ err_data.err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if(!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record in mca notifier!\n");
+ return NOTIFY_DONE;
+ }
+
/*
* Translate UMC channel address to Physical address
*/
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
- + ch_inst];
-
- retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(m->addr);
-
- memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
- err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, m->addr,
- retired_page, channel_index, umc_inst);
+ if (adev->umc.ras &&
+ adev->umc.ras->convert_ras_error_address)
+ adev->umc.ras->convert_ras_error_address(adev,
+ &err_data, 0, ch_inst, umc_inst, m->addr);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2896,6 +2897,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
amdgpu_ras_save_bad_pages(adev);
}
+ kfree(err_data.err_addr);
return NOTIFY_OK;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c4283987bb1e..84c241b9a2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -38,6 +38,7 @@
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
+#define EEPROM_I2C_MADDR_SMU_13_0_0 (0x54UL << 16)
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -156,6 +157,15 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return false;
}
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ control->i2c_address = EEPROM_I2C_MADDR_SMU_13_0_0;
+ break;
+
+ default:
+ break;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index 32c86a0b145c..9da5ead50c90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -23,6 +23,7 @@
#include "amdgpu_reset.h"
#include "aldebaran.h"
+#include "sienna_cichlid.h"
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler)
@@ -36,10 +37,15 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
{
int ret = 0;
+ adev->amdgpu_reset_level_mask = 0x1;
+
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_init(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_init(adev);
+ break;
default:
break;
}
@@ -55,6 +61,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 2):
ret = aldebaran_reset_fini(adev);
break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_fini(adev);
+ break;
default:
break;
}
@@ -67,6 +76,12 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
{
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
@@ -83,6 +98,12 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
int ret;
struct amdgpu_reset_handler *reset_handler = NULL;
+ if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2))
+ return -ENOSYS;
+
+ if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags))
+ return -ENOSYS;
+
if (adev->reset_cntl)
reset_handler = adev->reset_cntl->get_reset_handler(
adev->reset_cntl, reset_context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index ffda1560c648..f5318fedf2f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -30,6 +30,8 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_MODE2_RESET = 2,
+ AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
};
struct amdgpu_reset_context {
@@ -111,7 +113,8 @@ static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *dom
static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
{
- kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+ if (domain)
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
}
static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index d3558c34d406..3e316b013fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -405,6 +405,9 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
+ if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY))
+ return false;
+
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..012b72d00e04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,275 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 42c1f050542f..3949b7e3907f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
@@ -150,3 +151,135 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ int err = 0;
+ uint16_t version_major;
+ const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const struct sdma_firmware_header_v2_0 *hdr_v2;
+
+ err = amdgpu_ucode_validate(sdma_inst->fw);
+ if (err)
+ return err;
+
+ header = (const struct common_firmware_header *)
+ sdma_inst->fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ switch (version_major) {
+ case 1:
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ break;
+ case 2:
+ hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ if (duplicate)
+ break;
+ }
+
+ memset((void *)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance,
+ bool duplicate)
+{
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ int err = 0, i;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr;
+ uint16_t version_major;
+
+ err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ header = (const struct common_firmware_header *)
+ adev->sdma.instance[instance].fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ if ((duplicate && instance) || (!duplicate && version_major > 1))
+ return -EINVAL;
+
+ err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
+ if (err)
+ goto out;
+
+ if (duplicate) {
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (version_major) {
+ case 1:
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!duplicate && (instance != i))
+ continue;
+ else {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+ break;
+ case 2:
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
+ }
+ return err;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 53ac3ebae8d6..d2d88279fefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,4 +124,8 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance, bool duplicate);
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 504af1b93bfa..090e66a1b284 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
@@ -315,6 +316,7 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
struct hlist_node *tmp;
struct dma_fence *f;
int i;
+
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
@@ -392,7 +394,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- unsigned i;
+ unsigned int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 06dfcf297a8d..5e6ddc7e101c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -140,8 +140,10 @@ TRACE_EVENT(amdgpu_bo_create,
);
TRACE_EVENT(amdgpu_cs,
- TP_PROTO(struct amdgpu_cs_parser *p, int i),
- TP_ARGS(p, i),
+ TP_PROTO(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib),
+ TP_ARGS(p, job, ib),
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
@@ -151,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
- __entry->dw = p->job->ibs[i].length_dw;
+ __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+ __entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(p->entity->rq->sched));
+ to_amdgpu_ring(job->base.sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 134575a3893c..b1c455329023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -471,7 +471,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
adev = amdgpu_ttm_adev(bo->bdev);
- if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
@@ -1329,11 +1330,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->resource->num_pages;
struct dma_resv_iter resv_cursor;
- struct amdgpu_res_cursor cursor;
struct dma_fence *f;
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return ttm_bo_eviction_valuable(bo, place);
+
/* Swapout? */
if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
@@ -1352,40 +1354,20 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return false;
}
- switch (bo->resource->mem_type) {
- case AMDGPU_PL_PREEMPT:
- /* Preemptible BOs don't own system resources managed by the
- * driver (pages, VRAM, GART space). They point to resources
- * owned by someone else (e.g. pageable memory in user mode
- * or a DMABuf). They are used in a preemptible context so we
- * can guarantee no deadlocks and good QoS in case of MMU
- * notifiers or DMABuf move notifiers from the resource owner.
- */
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
return false;
- case TTM_PL_TT:
- if (amdgpu_bo_is_amdgpu_bo(bo) &&
- amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
- return false;
- return true;
- case TTM_PL_VRAM:
- /* Check each drm MM node individually */
- amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
- &cursor);
- while (cursor.remaining) {
- if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
- && !(place->lpfn &&
- place->lpfn <= PFN_DOWN(cursor.start)))
- return true;
-
- amdgpu_res_next(&cursor, cursor.size);
- }
+ if (bo->resource->mem_type == TTM_PL_TT &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
return false;
- default:
- break;
- }
-
return ttm_bo_eviction_valuable(bo, place);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 939c8614f0e3..dd0bc649a57d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -164,70 +164,138 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
} else if (version_major == 2) {
const struct rlc_firmware_header_v2_0 *rlc_hdr =
container_of(hdr, struct rlc_firmware_header_v2_0, header);
+ const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
+ container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
+ container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
+ const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
+ container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
+ const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
+ container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
- DRM_DEBUG("ucode_feature_version: %u\n",
- le32_to_cpu(rlc_hdr->ucode_feature_version));
- DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
- DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
- DRM_DEBUG("save_and_restore_offset: %u\n",
- le32_to_cpu(rlc_hdr->save_and_restore_offset));
- DRM_DEBUG("clear_state_descriptor_offset: %u\n",
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
- DRM_DEBUG("avail_scratch_ram_locations: %u\n",
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
- DRM_DEBUG("reg_restore_list_size: %u\n",
- le32_to_cpu(rlc_hdr->reg_restore_list_size));
- DRM_DEBUG("reg_list_format_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_start));
- DRM_DEBUG("reg_list_format_separate_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
- DRM_DEBUG("starting_offsets_start: %u\n",
- le32_to_cpu(rlc_hdr->starting_offsets_start));
- DRM_DEBUG("reg_list_format_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
- DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- DRM_DEBUG("reg_list_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_size_bytes));
- DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
- DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
- DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
- if (version_minor == 1) {
- const struct rlc_firmware_header_v2_1 *v2_1 =
- container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ switch (version_minor) {
+ case 0:
+ /* rlc_hdr v2_0 */
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("reg_restore_list_size: %u\n",
+ le32_to_cpu(rlc_hdr->reg_restore_list_size));
+ DRM_DEBUG("reg_list_format_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_start));
+ DRM_DEBUG("reg_list_format_separate_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
+ DRM_DEBUG("starting_offsets_start: %u\n",
+ le32_to_cpu(rlc_hdr->starting_offsets_start));
+ DRM_DEBUG("reg_list_format_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
+ DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ DRM_DEBUG("reg_list_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes));
+ DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
+ DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
+ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+ break;
+ case 1:
+ /* rlc_hdr v2_1 */
DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
- le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
+ le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
+ break;
+ case 2:
+ /* rlc_hdr v2_2 */
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
+ break;
+ case 3:
+ /* rlc_hdr v2_3 */
+ DRM_DEBUG("rlcp_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
+ DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
+ DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
+ DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
+ DRM_DEBUG("rlcv_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
+ DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
+ DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
+ DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
+ break;
+ case 4:
+ /* rlc_hdr v2_4 */
+ DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
+ break;
+ default:
+ DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
+ break;
}
} else {
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index ebed3f5226db..1c36235b4539 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -124,6 +124,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_SOC_DRV,
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
+ PSP_FW_TYPE_PSP_RAS_DRV,
};
/* version_major=2, version_minor=0 */
@@ -260,8 +261,12 @@ struct rlc_firmware_header_v2_2 {
/* version_major=2, version_minor=3 */
struct rlc_firmware_header_v2_3 {
struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
uint32_t rlcp_ucode_size_bytes;
uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t rlcv_ucode_size_bytes;
uint32_t rlcv_ucode_offset_bytes;
};
@@ -390,6 +395,7 @@ union amdgpu_firmware_header {
struct rlc_firmware_header_v2_1 rlc_v2_1;
struct rlc_firmware_header_v2_2 rlc_v2_2;
struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct sdma_firmware_header_v2_0 sdma_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3629d8f292ef..2fb4951a6433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -22,6 +22,8 @@
#define __AMDGPU_UMC_H__
#include "amdgpu_ras.h"
+#define UMC_INVALID_ADDR 0x1ULL
+
/*
* (addr / 256) * 4096, the higher 26 bits in ErrorAddr
* is the index of 4KB block
@@ -51,6 +53,10 @@ struct amdgpu_umc_ras {
struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*convert_ras_error_address)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset, uint32_t ch_inst,
+ uint32_t umc_inst, uint64_t mca_addr);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 60c608144480..253ea6b159df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,6 +161,8 @@
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -170,6 +172,9 @@
#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -317,12 +322,26 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
+struct amdgpu_fw_shared_rb_setup {
+ uint32_t is_rb_enabled_flags;
+ uint32_t rb_addr_lo;
+ uint32_t rb_addr_hi;
+ uint32_t rb_size;
+ uint32_t rb4_addr_lo;
+ uint32_t rb4_addr_hi;
+ uint32_t rb4_size;
+ uint32_t reserved[6];
+};
+
struct amdgpu_vcn4_fw_shared {
uint32_t present_flag_0;
uint8_t pad[12];
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9be57389301b..e4af40b9a8aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -690,7 +690,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
}
}
-
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -707,6 +706,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
+ case CHIP_IP_DISCOVERY:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
@@ -750,6 +750,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
+ case CHIP_IP_DISCOVERY:
nv_set_virt_ops(adev);
/* try send GPU_INIT_DATA request to host */
amdgpu_virt_request_init_data(adev);
@@ -807,6 +808,60 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
return mode;
}
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
+{
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ /* no vf autoload, white list */
+ if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
+ ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ case IP_VERSION(13, 0, 10):
+ /* white list */
+ if (ucode_id == AMDGPU_UCODE_ID_CAP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
+ || ucode_id == AMDGPU_UCODE_ID_VCN1
+ || ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ default:
+ /* lagacy black list */
+ if (ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
+ }
+}
+
void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 239f232f9c02..d94c31e68a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -253,6 +253,9 @@ struct amdgpu_virt {
uint32_t decode_max_frame_pixels;
uint32_t encode_max_dimension_pixels;
uint32_t encode_max_frame_pixels;
+
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
};
struct amdgpu_video_codec_info;
@@ -343,4 +346,6 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 acc_flags, u32 hwip);
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
u32 offset, u32 acc_flags, u32 hwip);
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
+ uint32_t ucode_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 576849e95296..f4b5301ea2a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -282,8 +282,8 @@ static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 59cac347baa3..83b0c5d86e48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -183,10 +183,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -198,7 +200,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -211,7 +215,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -225,9 +231,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -240,10 +246,13 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
- if (vm_bo->bo->parent)
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- else
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
amdgpu_vm_bo_idle(vm_bo);
+ }
}
/**
@@ -256,9 +265,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -363,12 +372,20 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
- struct amdgpu_vm_bo_base *bo_base, *tmp;
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *shadow;
+ struct amdgpu_bo *bo;
int r;
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
- struct amdgpu_bo *bo = bo_base->bo;
- struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+ shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
@@ -385,7 +402,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -406,13 +425,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
+ bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- return ret && list_empty(&vm->evicted);
+ spin_lock(&vm->status_lock);
+ empty = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
+
+ return ret && empty;
}
/**
@@ -680,9 +704,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm_update_params params;
struct amdgpu_vm_bo_base *entry;
bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
int r, idx;
- if (list_empty(&vm->relocated))
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
+
+ if (list_empty(&relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -697,7 +726,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &vm->relocated, vm_status) {
+ list_for_each_entry(entry, &relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -713,9 +742,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&vm->relocated)) {
- entry = list_first_entry(&vm->relocated,
- struct amdgpu_vm_bo_base,
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
vm_status);
amdgpu_vm_bo_idle(entry);
}
@@ -912,6 +940,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
{
struct amdgpu_bo_va *bo_va, *tmp;
+ spin_lock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -936,7 +965,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -949,7 +977,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
@@ -1278,24 +1306,29 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va, *tmp;
+ struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
bool clear;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!amdgpu_vm_debug && dma_resv_trylock(resv))
@@ -1310,9 +1343,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (!clear)
dma_resv_unlock(resv);
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1387,7 +1420,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!bo_va->base.moved) {
- list_move(&bo_va->base.vm_status, &vm->moved);
+ amdgpu_vm_bo_moved(&bo_va->base);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@@ -1763,9 +1796,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2019,9 +2052,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->invalidated_lock);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
@@ -2223,6 +2258,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ flush_work(&vm->pt_free_work);
+
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_set_pasid(adev, vm, 0);
@@ -2484,8 +2521,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
- AMDGPU_PTE_TF;
+ flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
@@ -2548,6 +2584,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -2585,7 +2622,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -2600,7 +2636,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 9ecb7f663e19..83acb7bd80fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -254,6 +254,9 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
+ /* Lock to protect vm_bo add/del/move on all lists of vm */
+ spinlock_t status_lock;
+
/* BOs who needs a validation */
struct list_head evicted;
@@ -268,7 +271,6 @@ struct amdgpu_vm {
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- spinlock_t invalidated_lock;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
@@ -276,6 +278,10 @@ struct amdgpu_vm {
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
+ struct list_head pt_freed;
+ struct work_struct pt_free_work;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -471,6 +477,7 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
+void amdgpu_vm_pt_free_work(struct work_struct *work);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 88de9f0d4728..358b91243e37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -637,10 +637,34 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
entry->bo->vm_bo = NULL;
+
+ spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
+ spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
+void amdgpu_vm_pt_free_work(struct work_struct *work)
+{
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm;
+ LIST_HEAD(pt_freed);
+
+ vm = container_of(work, struct amdgpu_vm, pt_free_work);
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->pt_freed, &pt_freed);
+ spin_unlock(&vm->status_lock);
+
+ /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
+ amdgpu_bo_reserve(vm->root.bo, true);
+
+ list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
+ amdgpu_vm_pt_free(entry);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+}
+
/**
* amdgpu_vm_pt_free_dfs - free PD/PT levels
*
@@ -652,11 +676,24 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
*/
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
+ struct amdgpu_vm_pt_cursor *start,
+ bool unlocked)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
+ if (unlocked) {
+ spin_lock(&vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_move(&entry->vm_status, &vm->pt_freed);
+
+ if (start)
+ list_move(&start->entry->vm_status, &vm->pt_freed);
+ spin_unlock(&vm->status_lock);
+ schedule_work(&vm->pt_free_work);
+ return;
+ }
+
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
amdgpu_vm_pt_free(entry);
@@ -673,7 +710,7 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
}
/**
@@ -966,7 +1003,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor);
+ &cursor,
+ params->unlocked);
}
amdgpu_vm_pt_next(adev, &cursor);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 1fd3cbca20a2..2b0669c464f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -112,7 +112,8 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
- amdgpu_bo_fence(p->vm->root.bo, f, true);
+ dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
+ DMA_RESV_USAGE_BOOKKEEP);
}
if (fence && !p->immediate)
@@ -211,12 +212,15 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
int r;
/* Wait for PD/PT moves to be completed */
- dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, fence) {
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
r = amdgpu_sync_fence(&p->job->sync, fence);
- if (r)
+ if (r) {
+ dma_resv_iter_end(&cursor);
return r;
+ }
}
+ dma_resv_iter_end(&cursor);
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 28ec5f8ac1c1..73a517bcf5c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -721,6 +721,72 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for intersection for eviction decision.
+ */
+static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn &&
+ (!place->lpfn || place->lpfn > fpfn))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for placement compatibility.
+ */
+static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn ||
+ (place->lpfn && lpfn > place->lpfn))
+ return false;
+ }
+
+ return true;
+}
+
+/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
@@ -753,6 +819,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
+ .intersects = amdgpu_vram_mgr_intersects,
+ .compatible = amdgpu_vram_mgr_compatible,
.debug = amdgpu_vram_mgr_debug
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index f2aebbf3fbe3..47159e9a0884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -392,12 +392,20 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
}
/**
+ * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
+ * Host driver decide how to reset the GPU either through FLR or chain reset.
+ * Guest side will get individual notifications from the host for the FLR
+ * if necessary.
+ */
+ if (!amdgpu_sriov_vf(adev)) {
+ /**
* Avoid recreating reset domain when hive is reconstructed for the case
- * of reset the devices in the XGMI hive during probe for SRIOV
+ * of reset the devices in the XGMI hive during probe for passthrough GPU
* See https://www.spinics.net/lists/amd-gfx/msg58836.html
*/
- if (adev->reset_domain->type != XGMI_HIVE) {
- hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+ if (adev->reset_domain->type != XGMI_HIVE) {
+ hive->reset_domain =
+ amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
if (!hive->reset_domain) {
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
ret = -ENOMEM;
@@ -406,9 +414,10 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
hive = NULL;
goto pro_end;
}
- } else {
- amdgpu_reset_get_reset_domain(adev->reset_domain);
- hive->reset_domain = adev->reset_domain;
+ } else {
+ amdgpu_reset_get_reset_domain(adev->reset_domain);
+ hive->reset_domain = adev->reset_domain;
+ }
}
hive->hive_id = adev->gmc.xgmi.hive_id;
@@ -504,6 +513,9 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
{
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
atomic_read(&hive->number_devices),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 552e6fb55aa8..30dcc1681b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -43,6 +43,7 @@ struct amdgpu_hive_info {
} pstate;
struct amdgpu_reset_domain *reset_domain;
+ uint32_t device_remove_count;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index fa7421afb9a6..6be9ac2b9c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -26,6 +26,8 @@
#include <linux/pci.h>
+#include <acpi/video.h>
+
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -182,7 +184,12 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
return;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
- return;
+ goto register_acpi_backlight;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n");
+ goto register_acpi_backlight;
+ }
pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
if (!pdata) {
@@ -218,6 +225,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
error:
kfree(pdata);
return;
+
+register_acpi_backlight:
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
}
void
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a2a4dc1844c0..af94ac580d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3943,56 +3943,6 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
-
-static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v10_0_init_tap_delays_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_4 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
-}
-
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -4028,12 +3978,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
char fw_name[40];
char *wks = "";
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -4091,9 +4036,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -4102,9 +4045,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -4113,69 +4054,27 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
+ /* don't check this. There are apparently firmwares in the wild with
+ * incorrect size in the header
+ */
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ dev_dbg(adev->dev,
+ "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v10_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 4) {
- gfx_v10_0_init_tap_delays_microcode(adev);
- }
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -4185,9 +4084,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -4195,154 +4093,18 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
-
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
- "gfx10: Failed to load firmware \"%s\"\n",
+ "gfx10: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -5971,6 +5733,9 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
}
+ if (adev->job_hang && !enable)
+ return 0;
+
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
break;
@@ -7569,8 +7334,10 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
-
- return amdgpu_ring_test_helper(kiq_ring);
+ if (!adev->job_hang)
+ return amdgpu_ring_test_helper(kiq_ring);
+ else
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index f6b1bb40e503..251109723ab6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -73,21 +73,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
-
-static const struct soc15_reg_golden golden_settings_gc_11_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_11_0_0[] =
-{
- /* Pending on emulation bring up */
-};
-
-static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] =
-{
- /* Pending on emulation bring up */
-};
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
@@ -269,42 +258,17 @@ static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
}
-static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_rlc_spm_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0));
- break;
- default:
- break;
- }
-}
-
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
- golden_settings_gc_11_0_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0));
- break;
case IP_VERSION(11, 0, 1):
soc15_program_register_sequence(adev,
- golden_settings_gc_11_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
- soc15_program_register_sequence(adev,
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
break;
default:
break;
}
- gfx_v11_0_init_spm_golden_registers(adev);
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@@ -474,61 +438,12 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
char ucode_prefix[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -549,14 +464,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw->data, 2, 0);
if (adev->gfx.rs64_enable) {
dev_info(adev->dev, "CP RS64 enable\n");
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
@@ -567,14 +479,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
}
if (!amdgpu_sriov_vf(adev)) {
@@ -583,58 +492,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -645,190 +510,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
- } else {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
- }
-
out:
if (err) {
dev_err(adev->dev,
- "gfx11: Failed to load firmware \"%s\"\n",
+ "gfx11: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1140,7 +838,6 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
- .init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
};
@@ -1151,6 +848,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -1586,6 +1284,7 @@ static int gfx_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -2760,6 +2459,21 @@ static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
mec_hdr->ucode_start_addr_hi >> 2);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
+
+ /* reset mec pipe */
+ tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
+
+ /* clear mec pipe reset */
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
+ tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
+ WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
}
static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
@@ -5260,6 +4974,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5273,6 +4989,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc9c1043244c..0320be4a5fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -126,6 +126,8 @@ MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
@@ -1089,27 +1091,6 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.me_fw_write_wait = false;
@@ -1271,9 +1252,6 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -1282,9 +1260,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -1293,9 +1269,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -1304,37 +1278,12 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1351,11 +1300,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
uint32_t smu_version;
@@ -1384,92 +1329,17 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_0_init_rlc_ext_microcode(adev);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
- }
-
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
@@ -1492,35 +1362,34 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1530,49 +1399,12 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- /* TODO: Determine if MEC2 JT FW loading can be removed
- for all GFX V9 asic and above */
- if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
- }
-
out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
@@ -5597,7 +5429,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index d8c531581116..8cf53e039c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -576,6 +576,111 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
}
}
+static void gfxhub_v2_1_save_regs(struct amdgpu_device *adev)
+{
+ int i;
+ adev->gmc.VM_L2_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
+ adev->gmc.VM_L2_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
+ adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3);
+ adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32);
+ adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32 = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32);
+ adev->gmc.VM_DEBUG = RREG32_SOC15(GC, 0, mmGCVM_DEBUG);
+ adev->gmc.VM_L2_MM_GROUP_RT_CLASSES = RREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID);
+ adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2 = RREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2);
+ adev->gmc.VM_L2_CACHE_PARITY_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL);
+ adev->gmc.VM_L2_IH_LOG_CNTL = RREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ adev->gmc.VM_CONTEXT_CNTL[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2);
+ adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i] = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2);
+ }
+
+ adev->gmc.MC_VM_MX_L1_TLB_CNTL = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev)
+{
+ int i;
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, adev->gmc.VM_L2_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, adev->gmc.VM_L2_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_CNTL, adev->gmc.VM_DUMMY_PAGE_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_LO32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_DUMMY_PAGE_FAULT_ADDR_HI32, adev->gmc.VM_DUMMY_PAGE_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL2, adev->gmc.VM_L2_PROTECTION_FAULT_CNTL2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL3, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL3);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_MM_CNTL4, adev->gmc.VM_L2_PROTECTION_FAULT_MM_CNTL4);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_LO32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_LO32);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_ADDR_HI32, adev->gmc.VM_L2_PROTECTION_FAULT_ADDR_HI32);
+ WREG32_SOC15(GC, 0, mmGCVM_DEBUG, adev->gmc.VM_DEBUG);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_MM_GROUP_RT_CLASSES, adev->gmc.VM_L2_MM_GROUP_RT_CLASSES);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_BANK_SELECT_RESERVED_CID2, adev->gmc.VM_L2_BANK_SELECT_RESERVED_CID2);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CACHE_PARITY_CNTL, adev->gmc.VM_L2_CACHE_PARITY_CNTL);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_IH_LOG_CNTL, adev->gmc.VM_L2_IH_LOG_CNTL);
+
+ for (i = 0; i <= 15; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL, i, adev->gmc.VM_CONTEXT_CNTL[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[i]);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, i * 2, adev->gmc.VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[i]);
+ }
+
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, adev->gmc.vram_end >> 24);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, adev->gmc.MC_VM_MX_L1_TLB_CNTL);
+}
+
+static void gfxhub_v2_1_halt(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+ int time = 1000;
+
+ gfxhub_v2_1_set_fault_enable_default(adev, false);
+
+ for (i = 0; i <= 14; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, ~0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ 0);
+ }
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ while ((tmp & (GRBM_STATUS2__EA_BUSY_MASK |
+ GRBM_STATUS2__EA_LINK_BUSY_MASK)) != 0 &&
+ time) {
+ udelay(100);
+ time--;
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
+ }
+
+ if (!time) {
+ DRM_WARN("failed to wait for GRBM(EA) idle\n");
+ }
+}
+
const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.get_fb_location = gfxhub_v2_1_get_fb_location,
.get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
@@ -586,4 +691,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
.init = gfxhub_v2_1_init,
.get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
.utcl2_harvest = gfxhub_v2_1_utcl2_harvest,
+ .mode2_save_regs = gfxhub_v2_1_save_regs,
+ .mode2_restore_regs = gfxhub_v2_1_restore_regs,
+ .halt = gfxhub_v2_1_halt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
new file mode 100644
index 000000000000..5d3fffd4929f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "gfxhub_v3_0_3.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+#include "navi10_enum.h"
+#include "soc15_common.h"
+
+#define regGCVM_L2_CNTL3_DEFAULT 0x80100007
+#define regGCVM_L2_CNTL4_DEFAULT 0x000000c1
+#define regGCVM_L2_CNTL5_DEFAULT 0x00003fe0
+
+static const char *gfxhub_client_ids[] = {
+ "CB/DB",
+ "Reserved",
+ "GE1",
+ "GE2",
+ "CPF",
+ "CPC",
+ "CPG",
+ "RLC",
+ "TCP",
+ "SQC (inst)",
+ "SQC (data)",
+ "SQG",
+ "Reserved",
+ "SDMA0",
+ "SDMA1",
+ "GCR",
+ "SDMA2",
+ "SDMA3",
+};
+
+static uint32_t gfxhub_v3_0_3_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vmid*/
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
+static void
+gfxhub_v3_0_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ u32 cid = REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID);
+
+ dev_err(adev->dev,
+ "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
+ status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
+ cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
+ cid);
+ dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
+ dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
+ dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
+ dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
+ dev_err(adev->dev, "\t RW: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, RW));
+}
+
+static u64 gfxhub_v3_0_3_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE);
+
+ base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ return base;
+}
+
+static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev)
+{
+ return (u64)RREG32_SOC15(GC, 0, regGCMC_VM_FB_OFFSET) << 24;
+}
+
+static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+}
+
+static void gfxhub_v3_0_3_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v3_0_3_setup_vm_pt_regs(adev, 0, pt_base);
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+}
+
+static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+}
+
+
+static void gfxhub_v3_0_3_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC); /* UC, uncached */
+
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
+ L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL2, tmp);
+
+ tmp = regGCVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, tmp);
+
+ tmp = regGCVM_L2_CNTL4_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL4, tmp);
+
+ tmp = regGCVM_L2_CNTL5_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL5, tmp);
+}
+
+static void gfxhub_v3_0_3_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL, tmp);
+}
+
+static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0xFFFFFFFF);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+
+}
+
+static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ adev->vm_manager.num_level);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !amdgpu_noretry);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+
+ hub->vm_cntx_cntl = tmp;
+}
+
+static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ unsigned i;
+
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ gfxhub_v3_0_3_init_gart_aperture_regs(adev);
+ gfxhub_v3_0_3_init_system_aperture_regs(adev);
+ gfxhub_v3_0_3_init_tlb_regs(adev);
+ gfxhub_v3_0_3_init_cache_regs(adev);
+
+ gfxhub_v3_0_3_enable_system_domain(adev);
+ gfxhub_v3_0_3_disable_identity_aperture(adev);
+ gfxhub_v3_0_3_setup_vmid_config(adev);
+ gfxhub_v3_0_3_program_invalidation(adev);
+
+ return 0;
+}
+
+static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 0);
+ WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, 0);
+}
+
+/**
+ * gfxhub_v3_0_3_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void gfxhub_v3_0_3_set_fault_enable_default(struct amdgpu_device *adev,
+ bool value)
+{
+ u32 tmp;
+
+ /* These registers are not accessible to VF-SRIOV.
+ * The PF will program them instead.
+ */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = {
+ .print_l2_protection_fault_status = gfxhub_v3_0_3_print_l2_protection_fault_status,
+ .get_invalidate_req = gfxhub_v3_0_3_get_invalidate_req,
+};
+
+static void gfxhub_v3_0_3_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(GC, 0,
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_SEM);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regGCVM_CONTEXT1_CNTL - regGCVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regGCVM_INVALIDATE_ENG1_REQ -
+ regGCVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+ hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
+ hub->vmhub_funcs = &gfxhub_v3_0_3_vmhub_funcs;
+}
+
+const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs = {
+ .get_fb_location = gfxhub_v3_0_3_get_fb_location,
+ .get_mc_fb_offset = gfxhub_v3_0_3_get_mc_fb_offset,
+ .setup_vm_pt_regs = gfxhub_v3_0_3_setup_vm_pt_regs,
+ .gart_enable = gfxhub_v3_0_3_gart_enable,
+ .gart_disable = gfxhub_v3_0_3_gart_disable,
+ .set_fault_enable_default = gfxhub_v3_0_3_set_fault_enable_default,
+ .init = gfxhub_v3_0_3_init,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
new file mode 100644
index 000000000000..6153bd5e3083
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFXHUB_V3_0_3_H__
+#define __GFXHUB_V3_0_3_H__
+
+extern const struct amdgpu_gfxhub_funcs gfxhub_v3_0_3_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 1471bfb9ae38..846ccb6cf07d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -39,6 +39,7 @@
#include "soc15_common.h"
#include "nbio_v4_3.h"
#include "gfxhub_v3_0.h"
+#include "gfxhub_v3_0_3.h"
#include "mmhub_v3_0.h"
#include "mmhub_v3_0_1.h"
#include "mmhub_v3_0_2.h"
@@ -233,7 +234,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
/* Issue additional private vm invalidation to MMHUB */
if ((vmhub != AMDGPU_GFXHUB_0) &&
- (hub->vm_l2_bank_select_reserved_cid2)) {
+ (hub->vm_l2_bank_select_reserved_cid2) &&
+ !amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
/* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
inv_req |= (1 << 25);
@@ -590,7 +592,14 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 3):
+ adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
+ break;
+ default:
+ adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
+ break;
+ }
}
static int gmc_v11_0_early_init(void *handle)
@@ -640,7 +649,10 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_gart_location(adev, mc);
/* base offset of vram pages */
- adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
+ if (amdgpu_sriov_vf(adev))
+ adev->vm_manager.vram_base_offset = 0;
+ else
+ adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
}
/**
@@ -732,6 +744,7 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 4603653916f5..67ca16a8027c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
*flags |= AMDGPU_PDE_BFS(0x9);
} else if (level == AMDGPU_VM_PDB0) {
- if (*flags & AMDGPU_PDE_PTE)
+ if (*flags & AMDGPU_PDE_PTE) {
*flags &= ~AMDGPU_PDE_PTE;
- else
+ if (!(*flags & AMDGPU_PTE_VALID))
+ *addr |= 1 << PAGE_SHIFT;
+ } else {
*flags |= AMDGPU_PTE_TF;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 085e613f3646..7cd79a3844b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -105,7 +105,13 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_USED_INT_THRESHOLD, threshold);
- WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
+ return;
+ } else {
+ WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+
WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
}
@@ -132,7 +138,13 @@ static int ih_v6_0_toggle_ring_interrupts(struct amdgpu_device *adev,
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
+ return -ETIMEDOUT;
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (enable) {
ih->enabled = true;
@@ -242,7 +254,15 @@ static int ih_v6_0_enable_ring(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
- WREG32(ih_regs->ih_rb_cntl, tmp);
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
+ if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32(ih_regs->ih_rb_cntl, tmp);
+ }
if (ih == &adev->irq.ih) {
/* set the ih ring 0 writeback address whether it's enabled or not */
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 76383baa3929..95548c512f4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -26,12 +26,15 @@
#include "amdgpu_imu.h"
#include "amdgpu_dpm.h"
+#include "imu_v11_0_3.h"
+
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin");
static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
{
@@ -360,6 +363,9 @@ static void imu_v11_0_program_rlc_ram(struct amdgpu_device *adev)
program_imu_rlc_ram(adev, imu_rlc_ram_golden_11_0_2,
(const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_2));
break;
+ case IP_VERSION(11, 0, 3):
+ imu_v11_0_3_program_rlc_ram(adev);
+ break;
default:
BUG();
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
new file mode 100644
index 000000000000..fc69c1a29e23
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_imu.h"
+#include "imu_v11_0_3.h"
+
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+
+static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_3[] = {
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_RD_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_IO_WR_COMBINE_FLUSH, 0x00055555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_DRAM_COMBINE_FLUSH, 0x00555555, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC2, 0x00001ffe, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_CREDITS, 0x003f3fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_TAG_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE0, 0x00041000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCC_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE0, 0x00040000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_VCD_RESERVE1, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_MISC, 0x00000017, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGUS_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_CREDITS, 0x003f3fbf, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE0, 0x10200800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_TAG_RESERVE1, 0x00000088, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE0, 0x1d041040, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_VCC_RESERVE1, 0x80000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_IO_PRIORITY, 0x88888888, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MAM_CTRL, 0x0000d800, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ARB_FINAL, 0x000007ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_DRAM_PAGE_BURST, 0x20080200, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_SDP_ENABLE, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END, 0x000fffff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCEA_MISC, 0x0c48bff0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SA_UNIT_DISABLE, 0x00fffc01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_PRIM_CONFIG, 0x000fffe1, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_RB_BACKEND_DISABLE, 0xffffff01, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x40000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xfffe0001, 0x42000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x44000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x46000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x48000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG, 0xffff0001, 0x4A000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCGTS_TCC_DISABLE, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_SHADER_RATE_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCC_GC_EDC_CONFIG, 0x00000001, 0x00000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000500, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_START, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_LOCAL_FB_ADDRESS_END, 0x000005ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_BASE, 0x00006000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_FB_LOCATION_TOP, 0x000065ff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_TOP_OF_DRAM_SLOT1, 0xff800000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_LOWER_TOP_OF_DRAM2, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_NB_UPPER_TOP_OF_DRAM2, 0x00000fff, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, 0x00001ffc, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, 0x00000551, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL, 0x00080603, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL2, 0x00000003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL3, 0x00100003, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CNTL5, 0x00003fe0, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT0_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_CONTEXT1_CNTL, 0x00000001, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES, 0x00000c00, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000444, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_0, 0x54105410, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGL2_PIPE_STEER_2, 0x76323276, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0x00000244, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCUTCL2_HARVEST_BYPASS_GROUPS, 0x00000006, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_APT_CNTL, 0x0000000c, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BASE, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_BOT, 0x00000002, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCMC_VM_AGP_TOP, 0x00000000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL2, 0x00020000, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA0_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regSDMA1_UCODE_SELFLOAD_CONTROL, 0x00000210, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPC_PSP_DEBUG, CPC_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+ IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0xe0000000),
+};
+
+static void program_rlc_ram_register_setting(struct amdgpu_device *adev,
+ const struct imu_rlc_ram_golden *regs,
+ const u32 array_size)
+{
+ const struct imu_rlc_ram_golden *entry;
+ u32 reg, data;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ reg |= entry->addr_mask;
+
+ data = entry->data;
+ if (entry->reg == regGCMC_VM_AGP_BASE)
+ data = 0x00ffffff;
+ else if (entry->reg == regGCMC_VM_AGP_TOP)
+ data = 0x0;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_BASE)
+ data = adev->gmc.vram_start >> 24;
+ else if (entry->reg == regGCMC_VM_FB_LOCATION_TOP)
+ data = adev->gmc.vram_end >> 24;
+
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, reg);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, data);
+ }
+ //Indicate the latest entry
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_HIGH, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_ADDR_LOW, 0);
+ WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_DATA, 0);
+}
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev)
+{
+ program_rlc_ram_register_setting(adev,
+ imu_rlc_ram_golden_11_0_3,
+ (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11_0_3));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
new file mode 100644
index 000000000000..702be568f26b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __IMU_V11_0_3_H__
+#define __IMU_V11_0_3_H__
+
+void imu_v11_0_3_program_rlc_ram(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 120ea294abef..5cec6b259b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -38,6 +38,8 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
+MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
@@ -183,6 +185,20 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
+ if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) &&
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3))))
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index bc11b2de37ae..a1d26c4d80b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -169,17 +169,17 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Disable AGP. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
-
if (!amdgpu_sriov_vf(adev)) {
/*
* the new L1 policy will block SRIOV guest from writing
* these regs, and they will be programed at host.
* so skip programing these regs.
*/
+ /* Disable AGP. */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 18);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
new file mode 100644
index 000000000000..f772bb499f3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V4_0_H__
+#define __MMSCH_V4_0_H__
+
+#include "amdgpu_vcn.h"
+
+#define MMSCH_VERSION_MAJOR 4
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+#define RB_ENABLED (1 << 0)
+#define RB4_ENABLED (1 << 1)
+#define MMSCH_DOORBELL_OFFSET 0x8
+
+#define MMSCH_VF_ENGINE_STATUS__PASS 0x1
+
+#define MMSCH_VF_MAILBOX_RESP__OK 0x1
+#define MMSCH_VF_MAILBOX_RESP__INCOMPLETE 0x2
+
+enum mmsch_v4_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v4_0_table_info {
+ uint32_t init_status;
+ uint32_t table_offset;
+ uint32_t table_size;
+};
+
+struct mmsch_v4_0_init_header {
+ uint32_t version;
+ uint32_t total_size;
+ struct mmsch_v4_0_table_info inst[AMDGPU_MAX_VCN_INSTANCES];
+ struct mmsch_v4_0_table_info jpegdec;
+};
+
+struct mmsch_v4_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v4_0_cmd_direct_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v4_0_cmd_direct_read_modify_write {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v4_0_cmd_direct_polling {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v4_0_cmd_end {
+ struct mmsch_v4_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v4_0_cmd_indirect_write {
+ struct mmsch_v4_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+#define MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_read_modify_write); \
+ size_dw = size / 4; \
+ direct_rd_mod_wt.cmd_header.reg_offset = reg; \
+ direct_rd_mod_wt.mask_value = mask; \
+ direct_rd_mod_wt.write_data = data; \
+ memcpy((void *)table_loc, &direct_rd_mod_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_WT(reg, value) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_write); \
+ size_dw = size / 4; \
+ direct_wt.cmd_header.reg_offset = reg; \
+ direct_wt.reg_value = value; \
+ memcpy((void *)table_loc, &direct_wt, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ size = sizeof(struct mmsch_v4_0_cmd_direct_polling); \
+ size_dw = size / 4; \
+ direct_poll.cmd_header.reg_offset = reg; \
+ direct_poll.mask_value = mask; \
+ direct_poll.wait_value = wait; \
+ memcpy((void *)table_loc, &direct_poll, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#define MMSCH_V4_0_INSERT_END() { \
+ size = sizeof(struct mmsch_v4_0_cmd_end); \
+ size_dw = size / 4; \
+ memcpy((void *)table_loc, &end, size); \
+ table_loc += size_dw; \
+ table_size += size_dw; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..a2f04b249132 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -290,6 +290,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..a977f0027928 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -317,6 +317,7 @@ flr_done:
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 288c414babdf..fd14fa9b9cd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -529,6 +529,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b465baa26762..aa761ff3a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v2_3_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 982a89f841d5..15eb3658d70e 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -488,3 +488,47 @@ const struct amdgpu_nbio_funcs nbio_v4_3_funcs = {
.get_rom_offset = nbio_v4_3_get_rom_offset,
.program_aspm = nbio_v4_3_program_aspm,
};
+
+
+static void nbio_v4_3_sriov_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
+{
+}
+
+static void nbio_v4_3_sriov_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index,
+ int doorbell_size)
+{
+}
+
+static void nbio_v4_3_sriov_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance)
+{
+}
+
+static void nbio_v4_3_sriov_gc_doorbell_init(struct amdgpu_device *adev)
+{
+}
+
+const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = {
+ .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
+ .get_rev_id = nbio_v4_3_get_rev_id,
+ .mc_access_enable = nbio_v4_3_mc_access_enable,
+ .get_memsize = nbio_v4_3_get_memsize,
+ .sdma_doorbell_range = nbio_v4_3_sriov_sdma_doorbell_range,
+ .vcn_doorbell_range = nbio_v4_3_sriov_vcn_doorbell_range,
+ .gc_doorbell_init = nbio_v4_3_sriov_gc_doorbell_init,
+ .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v4_3_sriov_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v4_3_get_clockgating_state,
+ .ih_control = nbio_v4_3_ih_control,
+ .init_registers = nbio_v4_3_init_registers,
+ .remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
+ .get_rom_offset = nbio_v4_3_get_rom_offset,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
index ade43661d7a9..711999ceedf4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v4_3_funcs;
+extern const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index f7f6ddebd3e4..37615a77287b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v6_1_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v6_1_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 11848d1e238b..19455a725939 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
};
+#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v7_4_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 1dc95ef21da6..def89379b51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -28,6 +28,14 @@
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
@@ -68,12 +76,6 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, doorbell_size);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
@@ -342,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index a75a286e1ecf..21d822b1d589 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
+MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@@ -109,6 +110,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -222,6 +224,12 @@ static int psp_v13_0_bootloader_load_dbg_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
}
+static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
+}
+
+
static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
{
int ret;
@@ -718,6 +726,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_soc_drv = psp_v13_0_bootloader_load_soc_drv,
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
+ .bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_init = psp_v13_0_ring_init,
.ring_create = psp_v13_0_ring_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 65181efba50e..7241a9fb0121 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -561,44 +561,6 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- /* arcturus shares the same FW memory across
- all SDMA isntances */
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
- break;
- }
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
@@ -615,9 +577,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
+ int ret, i;
DRM_DEBUG("\n");
@@ -656,58 +616,25 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
-
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
- else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
-
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
}
-out:
- if (err) {
- DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v4_0_destroy_inst_ctx(adev);
- }
- return err;
+ return ret;
}
/**
@@ -1504,6 +1431,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1995,14 +1927,17 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- sdma_v4_0_destroy_inst_ctx(adev);
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
static int sdma_v4_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
@@ -2011,9 +1946,7 @@ static int sdma_v4_0_hw_init(void *handle)
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
- r = sdma_v4_0_start(adev);
-
- return r;
+ return sdma_v4_0_start(adev);
}
static int sdma_v4_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index a019ac92edb7..c05c3eebde4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -240,10 +240,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct sdma_firmware_header_v1_0 *hdr;
+ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
@@ -272,38 +269,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
- if (err)
- goto out;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
- if (adev->sdma.instance[i].feature_version >= 20)
- adev->sdma.instance[i].burst_nop = true;
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-out:
- if (err) {
- DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
- return err;
+
+ return ret;
}
static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -1465,12 +1436,10 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
+ for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- }
+
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 83c6ccaaa9e4..f136fec7b4f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,33 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v5_2_init_microcode - load ucode images from disk
*
@@ -132,9 +105,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
DRM_DEBUG("\n");
@@ -169,42 +139,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++)
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
-
- if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
- return 0;
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v5_2: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v5_2_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -1406,19 +1341,16 @@ static int sdma_v5_2_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v5_2_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v5_2_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v5_2_start(adev);
-
- return r;
+ return sdma_v5_2_start(adev);
}
static int sdma_v5_2_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 0200cb3a31a4..db51230163c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -47,6 +47,7 @@
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
+MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
@@ -77,33 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v2_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void*)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v6_0_init_microcode - load ucode images from disk
*
@@ -113,16 +87,10 @@ static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
-
-// emulation only, won't work on real chip
-// sdma 6.0.0 real chip need to use PSP to load firmware
static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30];
char ucode_prefix[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct sdma_firmware_header_v2_0 *sdma_hdr;
DRM_DEBUG("\n");
@@ -130,43 +98,7 @@ static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
- memcpy((void*)&adev->sdma.instance[i],
- (void*)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v6_0_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -559,7 +491,8 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -593,7 +526,10 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev))
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
+ else
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
@@ -1365,19 +1301,16 @@ static int sdma_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v6_0_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v6_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v6_0_start(adev);
-
- return r;
+ return sdma_v6_0_start(adev);
}
static int sdma_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
new file mode 100644
index 000000000000..7aa570c1ce4a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "sienna_cichlid.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ } else {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2 &&
+ adev->pm.fw_version >= 0x3a5500 &&
+ !amdgpu_sriov_vf(adev)) {
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ return handler;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int sienna_cichlid_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->gfxhub.funcs->mode2_save_regs)
+ adev->gfxhub.funcs->mode2_save_regs(adev);
+ if (adev->gfxhub.funcs->halt)
+ adev->gfxhub.funcs->halt(adev);
+ r = sienna_cichlid_mode2_suspend_ip(adev);
+ }
+
+ return r;
+}
+
+static void sienna_cichlid_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int sienna_cichlid_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ return amdgpu_dpm_mode2_reset(adev);
+}
+
+static int
+sienna_cichlid_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r;
+
+ r = sienna_cichlid_mode2_reset(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "ASIC reset failed with error, %d ", r);
+ }
+ return r;
+}
+
+static int sienna_cichlid_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ int i, r;
+ struct psp_context *psp = &adev->psp;
+
+ r = psp_rlc_autoload_start(psp);
+ if (r) {
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
+ return r;
+ }
+
+ /* Reinit GFXHUB */
+ if (adev->gfxhub.funcs->mode2_restore_regs)
+ adev->gfxhub.funcs->mode2_restore_regs(adev);
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+sienna_cichlid_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = sienna_cichlid_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+
+end:
+ if (r)
+ return -EAGAIN;
+ else
+ return r;
+}
+
+static struct amdgpu_reset_handler sienna_cichlid_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = sienna_cichlid_mode2_prepare_hwcontext,
+ .perform_reset = sienna_cichlid_mode2_perform_reset,
+ .restore_hwcontext = sienna_cichlid_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = sienna_cichlid_mode2_reset,
+};
+
+int sienna_cichlid_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = sienna_cichlid_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = sienna_cichlid_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &sienna_cichlid_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
index 5dcfbd8e2697..5213b162dacd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
+++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef DML_WRAPPER_H_
-#define DML_WRAPPER_H_
+#ifndef __SIENNA_CICHLID_H__
+#define __SIENNA_CICHLID_H__
-#include "dc.h"
-#include "dml/display_mode_vba.h"
+#include "amdgpu.h"
-bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+int sienna_cichlid_reset_init(struct amdgpu_device *adev);
+int sienna_cichlid_reset_fini(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index fde6154f2009..183024d7c184 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_ring *ring;
-
- /* sdma/ih doorbell range are programed by hypervisor */
- if (!amdgpu_sriov_vf(adev)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
- }
-
- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
- }
-}
-
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
- /* HW doorbell routing policy: doorbell writing not
- * in SDMA/IH/MM/ACV range will be routed to CP. So
- * we need to init SDMA/IH/MM/ACV doorbell range prior
- * to CP ip block init and ring test.
- */
- soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 55284b24f113..16b757664a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -179,7 +179,7 @@ void soc21_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL), grbm_gfx_cntl);
+ WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
return false;
default:
@@ -582,6 +583,10 @@ static int soc21_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_ATHUB |
AMD_PG_SUPPORT_MMHUB;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
break;
case IP_VERSION(11, 0, 2):
@@ -624,9 +629,23 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
+ case IP_VERSION(11, 0, 3):
+ adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
+ AMD_CG_SUPPORT_JPEG_MGCG;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG;
+ if (amdgpu_sriov_vf(adev)) {
+ /* hypervisor control CG and PG enablement */
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ }
+ adev->external_rev_id = adev->rev_id + 0x20;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bf7524f16b66..a0d19b768346 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -452,41 +452,47 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
- uint32_t ch_inst,
- uint32_t umc_inst)
+ uint32_t umc_reg_offset, uint32_t ch_inst,
+ uint32_t umc_inst, uint64_t mca_addr)
{
uint32_t mc_umc_status_addr;
uint32_t channel_index;
- uint64_t mc_umc_status, mc_umc_addrt0;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0;
uint64_t err_addr, soc_pa, retired_page, column;
- mc_umc_status_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- mc_umc_addrt0 =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+ if (mca_addr == UMC_INVALID_ADDR) {
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
- if (mc_umc_status == 0)
- return;
+ if (mc_umc_status == 0)
+ return;
- if (!err_data->err_addr) {
- /* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
- return;
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
}
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
/* calculate error address if ue/ce error is detected */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-
- err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) ||
+ mca_addr != UMC_INVALID_ADDR) {
+ if (mca_addr == UMC_INVALID_ADDR) {
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ } else {
+ err_addr = mca_addr;
+ }
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
@@ -501,7 +507,8 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
/* we only save ue error information currently, ce is skipped */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
+ == 1 ||
+ mca_addr != UMC_INVALID_ADDR) {
/* loop for all possibilities of [C4 C3 C2] */
for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
@@ -519,7 +526,8 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
}
/* clear umc status */
- WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ if (mca_addr == UMC_INVALID_ADDR)
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -540,9 +548,8 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
ch_inst);
umc_v6_7_query_error_address(adev,
err_data,
- umc_reg_offset,
- ch_inst,
- umc_inst);
+ umc_reg_offset, ch_inst,
+ umc_inst, UMC_INVALID_ADDR);
}
}
@@ -583,4 +590,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
+ .convert_ras_error_address = umc_v6_7_query_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index 36a2053f2e8b..a8cbda81828d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -101,22 +101,16 @@ static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
- uint32_t ecc_err_cnt, ecc_err_cnt_addr;
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
/* UMC 8_10 registers */
- ecc_err_cnt_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
- *error_count +=
- (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
- UMC_V8_10_CE_CNT_INIT);
-
- /* Check for SRAM correctable error, MCUMC_STATUS is a 64 bit register */
+ /* Rely on MCUMC_STATUS for correctable error counter
+ * MCUMC_STATUS is a 64 bit register
+ */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 39405f0db824..9c8b5fd99037 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1761,21 +1761,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1846,7 +1848,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v3_0_limit_sched(p);
+ r = vcn_v3_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1860,7 +1862,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
uint32_t msg_lo = 0, msg_hi = 0;
unsigned i;
int r;
@@ -1879,7 +1881,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ r = vcn_v3_0_dec_msg(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index fb2d74f30448..897a5ce9c9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -30,6 +30,7 @@
#include "soc15d.h"
#include "soc15_hw_ip.h"
#include "vcn_v2_0.h"
+#include "mmsch_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
@@ -45,6 +46,8 @@
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN_HARVEST_MMSCH 0
+
#define RDECODE_MSG_CREATE 0x00000000
#define RDECODE_MESSAGE_CREATE 0x00000001
@@ -53,12 +56,14 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
+static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
/**
* vcn_v4_0_early_init - set function pointers
@@ -71,6 +76,9 @@ static int vcn_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
+
/* re-use enc ring as unified ring */
adev->vcn.num_enc_rings = 1;
@@ -92,6 +100,7 @@ static int vcn_v4_0_sw_init(void *handle)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r;
+ int vcn_doorbell_index = 0;
r = amdgpu_vcn_sw_init(adev);
if (r)
@@ -103,6 +112,12 @@ static int vcn_v4_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev)) {
+ vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 - MMSCH_DOORBELL_OFFSET;
+ /* get DWORD offset */
+ vcn_doorbell_index = vcn_doorbell_index << 1;
+ }
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -119,7 +134,10 @@ static int vcn_v4_0_sw_init(void *handle)
ring = &adev->vcn.inst[i].ring_enc[0];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
+ if (amdgpu_sriov_vf(adev))
+ ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.num_enc_rings + 1) + 1;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
sprintf(ring->name, "vcn_unified_%d", i);
@@ -132,10 +150,23 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
+ if (amdgpu_sriov_vf(adev))
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+ }
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
@@ -169,6 +200,9 @@ static int vcn_v4_0_sw_fini(void *handle)
drm_dev_exit(idx);
}
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -191,18 +225,42 @@ static int vcn_v4_0_hw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
+ if (amdgpu_sriov_vf(adev)) {
+ r = vcn_v4_0_start_sriov(adev);
+ if (r)
+ goto done;
- ring = &adev->vcn.inst[i].ring_enc[0];
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+ ring->sched.ready = false;
+ ring->no_scheduler = true;
+ dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+ } else {
+ ring->wptr = 0;
+ ring->wptr_old = 0;
+ vcn_v4_0_unified_ring_set_wptr(ring);
+ ring->sched.ready = true;
+ }
+ }
+ } else {
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- r = amdgpu_ring_test_helper(ring);
- if (r)
- goto done;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ goto done;
+
+ }
}
done:
@@ -230,12 +288,14 @@ static int vcn_v4_0_hw_fini(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, regUVD_STATUS))) {
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
}
+
}
return 0;
@@ -1107,6 +1167,214 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
return 0;
}
+static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_ring *ring_enc;
+ uint64_t cache_addr;
+ uint64_t rb_enc_addr;
+ uint64_t ctx_addr;
+ uint32_t param, resp, expected;
+ uint32_t offset, cache_size;
+ uint32_t tmp, timeout;
+
+ struct amdgpu_mm_table *table = &adev->virt.mm_table;
+ uint32_t *table_loc;
+ uint32_t table_size;
+ uint32_t size, size_dw;
+ uint32_t init_status;
+ uint32_t enabled_vcn;
+
+ struct mmsch_v4_0_cmd_direct_write
+ direct_wt = { {0} };
+ struct mmsch_v4_0_cmd_direct_read_modify_write
+ direct_rd_mod_wt = { {0} };
+ struct mmsch_v4_0_cmd_end end = { {0} };
+ struct mmsch_v4_0_init_header header;
+
+ volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+
+ direct_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ end.cmd_header.command_type =
+ MMSCH_COMMAND__END;
+
+ header.version = MMSCH_VERSION;
+ header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
+ for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = 0;
+ header.inst[i].table_size = 0;
+ }
+
+ table_loc = (uint32_t *)table->cpu_addr;
+ table_loc += header.total_size;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ table_size = 0;
+
+ MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_STATUS),
+ ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
+
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
+ offset = 0;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ } else {
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].gpu_addr));
+ offset = cache_size;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE0),
+ cache_size);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ cache_addr = adev->vcn.inst[i].gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE;
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(cache_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
+ rb_setup = &fw_shared->rb_setup;
+
+ ring_enc = &adev->vcn.inst[i].ring_enc[0];
+ ring_enc->wptr = 0;
+ rb_enc_addr = ring_enc->gpu_addr;
+
+ rb_setup->is_rb_enabled_flags |= RB_ENABLED;
+ rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
+ rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
+ rb_setup->rb_size = ring_enc->ring_size / 4;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
+
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
+ MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
+ regUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
+
+ /* add end packet */
+ MMSCH_V4_0_INSERT_END();
+
+ /* refine header */
+ header.inst[i].init_status = 0;
+ header.inst[i].table_offset = header.total_size;
+ header.inst[i].table_size = table_size;
+ header.total_size += table_size;
+ }
+
+ /* Update init table header in memory */
+ size = sizeof(struct mmsch_v4_0_init_header);
+ table_loc = (uint32_t *)table->cpu_addr;
+ memcpy((void *)table_loc, &header, size);
+
+ /* message MMSCH (in VCN[0]) to initialize this client
+ * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ ctx_addr = table->gpu_addr;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
+
+ /* 2, update vmid of descriptor */
+ tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
+ tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ size = header.total_size;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
+
+ /* 5, kick off the initialization and wait until
+ * MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ param = 0x00000001;
+ WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
+ tmp = 0;
+ timeout = 1000;
+ resp = 0;
+ expected = MMSCH_VF_MAILBOX_RESP__OK;
+ while (resp != expected) {
+ resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
+ if (resp != 0)
+ break;
+
+ udelay(10);
+ tmp = tmp + 10;
+ if (tmp >= timeout) {
+ DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
+ " waiting for regMMSCH_VF_MAILBOX_RESP "\
+ "(expected=0x%08x, readback=0x%08x)\n",
+ tmp, expected, resp);
+ return -EBUSY;
+ }
+ }
+ enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
+ init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
+ if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
+ && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
+ DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
+ "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
+
+ return 0;
+}
+
/**
* vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
*
@@ -1327,21 +1595,23 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1412,7 +1682,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v4_0_limit_sched(p);
+ r = vcn_v4_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1425,32 +1695,34 @@ out:
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
- struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ struct amdgpu_vcn_decode_buffer *decode_buffer;
+ uint64_t addr;
uint32_t val;
- int r = 0;
/* The first instance can decode anything */
if (!ring->me)
- return r;
+ return 0;
/* unified queue ib header has 8 double words. */
if (ib->length_dw < 8)
- return r;
+ return 0;
val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
+ if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
+ return 0;
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
- if (decode_buffer->valid_buf_flag & 0x1)
- r = vcn_v4_0_dec_msg(p, ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo);
- }
- return r;
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
}
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
@@ -1596,6 +1868,15 @@ static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_sta
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
+ /* for SRIOV, guest should not control VCN Power-gating
+ * MMSCH FW should control Power-gating and clock-gating
+ * guest should avoid touching CGC and PG
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if(state == adev->vcn.cur_state)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 03b7066471f9..1e83db0c5438 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 2022ffbb8dba..59dfca093155 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 60a81649cf12..c7118843db05 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -742,7 +742,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf88fffe, 0x877aff7f,
0x04000000, 0x8f7a857a,
0x886d7a6d, 0xb97b02dc,
- 0x8f7b997b, 0xb97a2a05,
+ 0x8f7b997b, 0xb97a3a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
@@ -819,7 +819,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611c7a, 0xf8000000,
0x80708470, 0xbefc037e,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -1069,7 +1069,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9f9f816, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2114,7 +2114,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x007a0000, 0x7e000280,
0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0xb97a2a05, 0x807a817a,
+ 0xb97a3a05, 0x807a817a,
0xbf0d997b, 0xbf850002,
0x8f7a897a, 0xbf820001,
0x8f7a8a7a, 0xb97b1e06,
@@ -2157,7 +2157,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
- 0x705d0300, 0xb9702a05,
+ 0x705d0300, 0xb9703a05,
0x80708170, 0xbf0d9973,
0xbf850002, 0x8f708970,
0xbf820001, 0x8f708a70,
@@ -2189,7 +2189,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
0x705d0200, 0xbefe03c1,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -2475,7 +2475,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9ef4803, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2494,438 +2494,441 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
-
static const uint32_t cwsr_trap_gfx11_hex[] = {
- 0xbfa00001, 0xbfa0021b,
+ 0xbfa00001, 0xbfa0021e,
0xb0804006, 0xb8f8f802,
- 0x91788678, 0xb8fbf803,
- 0x8b6eff78, 0x00002000,
- 0xbfa10009, 0x8b6eff6d,
- 0x00ff0000, 0xbfa2001e,
- 0x8b6eff7b, 0x00000400,
- 0xbfa20041, 0xbf830010,
- 0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
- 0xbfa20015, 0x8b6eff7b,
- 0x000071ff, 0xbfa10008,
- 0x8b6fff7b, 0x00007080,
- 0xbfa10001, 0xbeee1287,
- 0xb8eff801, 0x846e8c6e,
- 0x8b6e6f6e, 0xbfa2000a,
+ 0x9178ff78, 0x00020006,
+ 0xb8fbf803, 0xbf0d9f6d,
+ 0xbfa20006, 0x8b6eff78,
+ 0x00002000, 0xbfa10009,
0x8b6eff6d, 0x00ff0000,
- 0xbfa20007, 0xb8eef801,
- 0x8b6eff6e, 0x00000800,
- 0xbfa20003, 0x8b6eff7b,
- 0x00000400, 0xbfa20026,
- 0xbefa4d82, 0xbf89fc07,
- 0x84fa887a, 0xf4005bbd,
- 0xf8000010, 0xbf89fc07,
- 0x846e976e, 0x9177ff77,
- 0x00800000, 0x8c776e77,
- 0xf4045bbd, 0xf8000000,
- 0xbf89fc07, 0xf4045ebd,
- 0xf8000008, 0xbf89fc07,
- 0x8bee6e6e, 0xbfa10001,
- 0xbe80486e, 0x8b6eff6d,
- 0x01ff0000, 0xbfa20005,
- 0x8c78ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbfa00005, 0x8b6eff6d,
- 0x01000000, 0xbfa20002,
- 0x806c846c, 0x826d806d,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
- 0xb978f802, 0xbe804a6c,
- 0x8b6dff6d, 0x0000ffff,
- 0xbefa0080, 0xb97a0283,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbefe4d84,
- 0xbf89fc07, 0x8b7aff7f,
- 0x04000000, 0x847a857a,
- 0x8c6d7a6d, 0xbefa007e,
- 0x8b7bff7f, 0x0000ffff,
- 0xbefe00c1, 0xbeff00c1,
- 0xdca6c000, 0x007a0000,
- 0x7e000280, 0xbefe007a,
- 0xbeff007b, 0xb8fb02dc,
- 0x847b997b, 0xb8fa3b05,
- 0x807a817a, 0xbf0d997b,
- 0xbfa20002, 0x847a897a,
- 0xbfa00001, 0x847a8a7a,
- 0xb8fb1e06, 0x847b8a7b,
- 0x807a7b7a, 0x8b7bff7f,
- 0x0000ffff, 0x807aff7a,
- 0x00000200, 0x807a7e7a,
- 0x827b807b, 0xd7610000,
- 0x00010870, 0xd7610000,
- 0x00010a71, 0xd7610000,
- 0x00010c72, 0xd7610000,
- 0x00010e73, 0xd7610000,
- 0x00011074, 0xd7610000,
- 0x00011275, 0xd7610000,
- 0x00011476, 0xd7610000,
- 0x00011677, 0xd7610000,
- 0x00011a79, 0xd7610000,
- 0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
- 0x00003fff, 0xbeff0080,
- 0xdca6c040, 0x007a0000,
- 0xd760007a, 0x00011d00,
- 0xd760007b, 0x00011f00,
+ 0xbfa2001e, 0x8b6eff7b,
+ 0x00000400, 0xbfa20041,
+ 0xbf830010, 0xb8fbf803,
+ 0xbfa0fffa, 0x8b6eff7b,
+ 0x00000900, 0xbfa20015,
+ 0x8b6eff7b, 0x000071ff,
+ 0xbfa10008, 0x8b6fff7b,
+ 0x00007080, 0xbfa10001,
+ 0xbeee1287, 0xb8eff801,
+ 0x846e8c6e, 0x8b6e6f6e,
+ 0xbfa2000a, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa20007,
+ 0xb8eef801, 0x8b6eff6e,
+ 0x00000800, 0xbfa20003,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20026, 0xbefa4d82,
+ 0xbf89fc07, 0x84fa887a,
+ 0xf4005bbd, 0xf8000010,
+ 0xbf89fc07, 0x846e976e,
+ 0x9177ff77, 0x00800000,
+ 0x8c776e77, 0xf4045bbd,
+ 0xf8000000, 0xbf89fc07,
+ 0xf4045ebd, 0xf8000008,
+ 0xbf89fc07, 0x8bee6e6e,
+ 0xbfa10001, 0xbe80486e,
+ 0x8b6eff6d, 0x01ff0000,
+ 0xbfa20005, 0x8c78ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbfa00005,
+ 0x8b6eff6d, 0x01000000,
+ 0xbfa20002, 0x806c846c,
+ 0x826d806d, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb978f802,
+ 0xbe804a6c, 0x8b6dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbefe4d84, 0xbf89fc07,
+ 0x8b7aff7f, 0x04000000,
+ 0x847a857a, 0x8c6d7a6d,
+ 0xbefa007e, 0x8b7bff7f,
+ 0x0000ffff, 0xbefe00c1,
+ 0xbeff00c1, 0xdca6c000,
+ 0x007a0000, 0x7e000280,
0xbefe007a, 0xbeff007b,
- 0xbef4007e, 0x8b75ff7f,
- 0x0000ffff, 0x8c75ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x10807fac,
- 0xbef1007d, 0xbef00080,
- 0xb8f302dc, 0x84739973,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00002, 0xbeff00c1,
- 0xbfa00009, 0xbef600ff,
- 0x01000000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
- 0x701d0300, 0xbfa00008,
+ 0xb8fb02dc, 0x847b997b,
+ 0xb8fa3b05, 0x807a817a,
+ 0xbf0d997b, 0xbfa20002,
+ 0x847a897a, 0xbfa00001,
+ 0x847a8a7a, 0xb8fb1e06,
+ 0x847b8a7b, 0x807a7b7a,
+ 0x8b7bff7f, 0x0000ffff,
+ 0x807aff7a, 0x00000200,
+ 0x807a7e7a, 0x827b807b,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe00ff, 0x00003fff,
+ 0xbeff0080, 0xdca6c040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe007a,
+ 0xbeff007b, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xbef1007d,
+ 0xbef00080, 0xb8f302dc,
+ 0x84739973, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00002,
+ 0xbeff00c1, 0xbfa00009,
0xbef600ff, 0x01000000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0xbfa00008, 0xbef600ff,
+ 0x01000000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
+ 0x701d0300, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0xbefd0080, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xd7610002, 0x0000fa6c,
+ 0x807d817d, 0x917aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa6e,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6f, 0x807d817d,
+ 0xd7610002, 0x0000fa78,
+ 0x807d817d, 0xb8faf803,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa7b, 0x807d817d,
+ 0xb8f1f801, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f814, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f815, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xbefe00ff, 0x0000ffff,
+ 0xbeff0080, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
0xb8f03b05, 0x80708170,
0xbf0d9973, 0xbfa20002,
0x84708970, 0xbfa00001,
0x84708a70, 0xb8fa1e06,
0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
0xbef600ff, 0x01000000,
- 0x7e000280, 0x7e020280,
- 0x7e040280, 0xbefd0080,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xd7610002,
- 0x0000fa6c, 0x807d817d,
- 0x917aff6d, 0x80000000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa6e, 0x807d817d,
- 0xd7610002, 0x0000fa6f,
- 0x807d817d, 0xd7610002,
- 0x0000fa78, 0x807d817d,
- 0xb8faf803, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa7b,
- 0x807d817d, 0xb8f1f801,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f814,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f815,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xbefe00ff,
- 0x0000ffff, 0xbeff0080,
- 0xe0685000, 0x701d0200,
- 0xbefe00c1, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0xbef600ff,
- 0x01000000, 0xbef90080,
- 0xbefd0080, 0xbf800000,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xbe8c410c, 0xbe8e410e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0xbef90080, 0xbefd0080,
+ 0xbf800000, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xbe8c410c,
+ 0xbe8e410e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbfa10006,
- 0xe0685000, 0x701d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbc,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbfa10006, 0xe0685000,
+ 0x701d0200, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ffbc, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0685000, 0x701d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x8b7bc17b, 0xbfa10044,
+ 0xbfbd0000, 0x8b7aff6d,
+ 0x80000000, 0xbfa10040,
+ 0x847b867b, 0x847b827b,
+ 0xbef6007b, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xd71f0000,
+ 0x000100c1, 0xd7200000,
+ 0x000200c1, 0x16000084,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa20012, 0xbe8300ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff4,
+ 0xbfa00011, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7d, 0xbfa2fff4,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00001, 0xbeff00c1,
- 0xb8fb4306, 0x8b7bc17b,
- 0xbfa10044, 0xbfbd0000,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10040, 0x847b867b,
- 0x847b827b, 0xbef6007b,
- 0xb8f03b05, 0x80708170,
- 0xbf0d9973, 0xbfa20002,
- 0x84708970, 0xbfa00001,
- 0x84708a70, 0xb8fa1e06,
- 0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xd71f0000, 0x000100c1,
- 0xd7200000, 0x000200c1,
- 0x16000084, 0x857d9973,
+ 0xbfa20004, 0xbef000ff,
+ 0x00000200, 0xbeff0080,
+ 0xbfa00003, 0xbef000ff,
+ 0x00000400, 0xbeff00c1,
+ 0xb8fb3b05, 0x807b817b,
+ 0x847b827b, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbefd0080, 0xbfa20012,
- 0xbe8300ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbfa00011,
- 0xbe8300ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20004,
- 0xbef000ff, 0x00000200,
- 0xbeff0080, 0xbfa00003,
- 0xbef000ff, 0x00000400,
- 0xbeff00c1, 0xb8fb3b05,
- 0x807b817b, 0x847b827b,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20017,
+ 0xbfa20017, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xbfa00025,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10037, 0x7e008700,
+ 0xbfa10011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0685000,
- 0x701d0000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
+ 0x701d0000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
0x701d0300, 0x807d847d,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7d, 0xbfa2ffef,
- 0xbfa00025, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10011,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb8fb1e06, 0x8b7bc17b,
+ 0xbfa1000c, 0x847b837b,
+ 0x807b7d7b, 0xbefe00c1,
+ 0xbeff0080, 0x7e008700,
0xe0685000, 0x701d0000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
- 0x807d847d, 0x8070ff70,
- 0x00000400, 0xbf0a7b7d,
- 0xbfa2ffef, 0xb8fb1e06,
- 0x8b7bc17b, 0xbfa1000c,
- 0x847b837b, 0x807b7d7b,
- 0xbefe00c1, 0xbeff0080,
- 0x7e008700, 0xe0685000,
- 0x701d0000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff8,
- 0xbfa00141, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xb8f202dc,
- 0x84729972, 0x8b6eff7f,
- 0x04000000, 0xbfa1003a,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff8, 0xbfa00146,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xb8f202dc, 0x84729972,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1003a, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x8b6fc16f, 0xbfa1002f,
+ 0x846f866f, 0x846f826f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000c,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbfa0000b,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbef80080,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
- 0xb8ef4306, 0x8b6fc16f,
- 0xbfa1002f, 0x846f866f,
- 0x846f826f, 0xbef6006f,
- 0xb8f83b05, 0x80788178,
- 0xbf0d9972, 0xbfa20002,
- 0x84788978, 0xbfa00001,
- 0x84788a78, 0xb8ee1e06,
- 0x846e8a6e, 0x80786e78,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20024, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000c, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbfa0000b, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20024,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10050,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10050, 0xe0505000,
+ 0x781d0000, 0xe0505080,
+ 0x781d0100, 0xe0505100,
+ 0x781d0200, 0xe0505180,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xe0505000,
+ 0x6e1d0000, 0xe0505080,
+ 0x6e1d0100, 0xe0505100,
+ 0x6e1d0200, 0xe0505180,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xbfa00034, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10012, 0xe0505000,
+ 0x781d0000, 0xe0505100,
+ 0x781d0100, 0xe0505200,
+ 0x781d0200, 0xe0505300,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000e,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
0xe0505000, 0x781d0000,
- 0xe0505080, 0x781d0100,
- 0xe0505100, 0x781d0200,
- 0xe0505180, 0x781d0300,
0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffee,
+ 0x807d817d, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff7, 0xbeff00c1,
0xe0505000, 0x6e1d0000,
- 0xe0505080, 0x6e1d0100,
- 0xe0505100, 0x6e1d0200,
- 0xe0505180, 0x6e1d0300,
- 0xbf8903f7, 0xbfa00034,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10012,
- 0xe0505000, 0x781d0000,
- 0xe0505100, 0x781d0100,
- 0xe0505200, 0x781d0200,
- 0xe0505300, 0x781d0300,
- 0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffee,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000e, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xe0505000,
- 0x781d0000, 0xbf8903f7,
- 0x7e008500, 0x807d817d,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff7,
- 0xbeff00c1, 0xe0505000,
- 0x6e1d0000, 0xe0505100,
- 0x6e1d0100, 0xe0505200,
- 0x6e1d0200, 0xe0505300,
- 0x6e1d0300, 0xbf8903f7,
+ 0xe0505100, 0x6e1d0100,
+ 0xe0505200, 0x6e1d0200,
+ 0xe0505300, 0x6e1d0300,
+ 0xbf8903f7, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef600ff,
+ 0x01000000, 0xbefd00ff,
+ 0x0000006c, 0x80f89078,
+ 0xf428403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd847d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0x80f8a078,
+ 0xf42c403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd887d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0x80f8c078,
+ 0xf430403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd907d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0xbe884308,
+ 0xbe8a430a, 0xbe8c430c,
+ 0xbe8e430e, 0xbf06807d,
+ 0xbfa1fff0, 0xb980f801,
+ 0x00000000, 0xbfbd0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbefd00ff, 0x0000006c,
- 0x80f89078, 0xf428403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd847d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0x80f8a078, 0xf42c403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd887d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0x80f8c078, 0xf430403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd907d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0xbe884308, 0xbe8a430a,
- 0xbe8c430c, 0xbe8e430e,
- 0xbf06807d, 0xbfa1fff0,
- 0xb980f801, 0x00000000,
- 0xbfbd0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xf4205bfa,
+ 0xf4205bfa, 0xf0000000,
+ 0x80788478, 0xf4205b3a,
0xf0000000, 0x80788478,
- 0xf4205b3a, 0xf0000000,
- 0x80788478, 0xf4205b7a,
+ 0xf4205b7a, 0xf0000000,
+ 0x80788478, 0xf4205c3a,
0xf0000000, 0x80788478,
- 0xf4205c3a, 0xf0000000,
- 0x80788478, 0xf4205c7a,
+ 0xf4205c7a, 0xf0000000,
+ 0x80788478, 0xf4205eba,
0xf0000000, 0x80788478,
- 0xf4205eba, 0xf0000000,
- 0x80788478, 0xf4205efa,
+ 0xf4205efa, 0xf0000000,
+ 0x80788478, 0xf4205e7a,
0xf0000000, 0x80788478,
- 0xf4205e7a, 0xf0000000,
- 0x80788478, 0xf4205cfa,
+ 0xf4205cfa, 0xf0000000,
+ 0x80788478, 0xf4205bba,
0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef814,
0xf4205bba, 0xf0000000,
0x80788478, 0xbf89fc07,
- 0xb96ef814, 0xf4205bba,
- 0xf0000000, 0x80788478,
- 0xbf89fc07, 0xb96ef815,
- 0xbefd006f, 0xbefe0070,
- 0xbeff0071, 0x8b6f7bff,
- 0x000003ff, 0xb96f4803,
- 0x8b6f7bff, 0xfffff800,
- 0x856f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee3b05,
- 0x806e816e, 0xbf0d9972,
- 0xbfa20002, 0x846e896e,
- 0xbfa00001, 0x846e8a6e,
- 0xb8ef1e06, 0x846f8a6f,
- 0x806e6f6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x8b6fff6f,
- 0x0000ffff, 0xf4085c37,
- 0xf8000050, 0xf4085d37,
- 0xf8000060, 0xf4005e77,
- 0xf8000074, 0xbf89fc07,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb96ef815, 0xbefd006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x8b6f7bff, 0x000003ff,
+ 0xb96f4803, 0x8b6f7bff,
+ 0xfffff800, 0x856f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee3b05, 0x806e816e,
+ 0xbf0d9972, 0xbfa20002,
+ 0x846e896e, 0xbfa00001,
+ 0x846e8a6e, 0xb8ef1e06,
+ 0x846f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x8b6fff6f, 0x0000ffff,
+ 0xf4085c37, 0xf8000050,
+ 0xf4085d37, 0xf8000060,
+ 0xf4005e77, 0xf8000074,
+ 0xbf89fc07, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb8eef802,
+ 0xbf0d866e, 0xbfa20002,
+ 0xb97af802, 0xbe80486c,
0xb97af802, 0xbe804a6c,
0xbfb00000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 250ab007399b..0f81670f6f9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -43,12 +43,14 @@
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
+var SQ_WAVE_STATUS_TRAP_EN_SHIFT = 6
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -183,6 +185,13 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+#if SW_SA_TRAP
+ // If ttmp1[31] is set then trap may occur early.
+ // Spin wait until SAVECTX exception is raised.
+ s_bitcmp1_b32 s_save_pc_hi, 31
+ s_cbranch_scc1 L_CHECK_SAVE
+#endif
+
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
s_cbranch_scc0 L_NOT_HALTED
@@ -1061,8 +1070,20 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+#if SW_SA_TRAP
+ // If traps are enabled then return to the shader with PRIV=0.
+ // Otherwise retain PRIV=1 for subsequent context save requests.
+ s_getreg_b32 s_restore_tmp, hwreg(HW_REG_STATUS)
+ s_bitcmp1_b32 s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT
+ s_cbranch_scc1 L_RETURN_WITHOUT_PRIV
+
s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ s_setpc_b64 [s_restore_pc_lo, s_restore_pc_hi]
+L_RETURN_WITHOUT_PRIV:
+#endif
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index dc774ddf3445..5feaba6a77de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -327,6 +327,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+ err = -ENOMEM;
+ goto err_alloc_doorbells;
+ }
+
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
@@ -404,6 +410,7 @@ err_create_queue:
if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
+err_alloc_doorbells:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
@@ -869,14 +876,11 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
- int err;
- err = kfd_wait_on_events(p, args->num_events,
+ return kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
&args->timeout, &args->wait_result);
-
- return err;
}
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
struct kfd_process *p, void *data)
@@ -1092,6 +1096,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
offset = kfd_get_process_doorbells(pdd);
+ if (!offset) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
if (args->size != PAGE_SIZE) {
err = -EINVAL;
@@ -1576,6 +1584,8 @@ static int kfd_ioctl_smi_events(struct file *filep,
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1586,22 +1596,29 @@ static int kfd_ioctl_set_xnack_mode(struct file *filep,
if (args->xnack_enabled >= 0) {
if (!list_empty(&p->pqm.queues)) {
pr_debug("Process has user queues running\n");
- mutex_unlock(&p->mutex);
- return -EBUSY;
+ r = -EBUSY;
+ goto out_unlock;
}
- if (args->xnack_enabled && !kfd_process_xnack_mode(p, true))
+
+ if (p->xnack_enabled == args->xnack_enabled)
+ goto out_unlock;
+
+ if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
r = -EPERM;
- else
- p->xnack_enabled = args->xnack_enabled;
+ goto out_unlock;
+ }
+
+ r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
} else {
args->xnack_enabled = p->xnack_enabled;
}
+
+out_unlock:
mutex_unlock(&p->mutex);
return r;
}
-#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
@@ -1621,6 +1638,11 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
return r;
}
#else
+static int kfd_ioctl_set_xnack_mode(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ return -EPERM;
+}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
return -EPERM;
@@ -2145,6 +2167,12 @@ static int criu_restore_devices(struct kfd_process *p,
ret = PTR_ERR(pdd);
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
}
/*
@@ -2173,6 +2201,8 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
+ if (!offset)
+ return -ENOMEM;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
if (bo_bucket->size != PAGE_SIZE) {
@@ -2847,7 +2877,6 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
- int ret;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2867,12 +2896,11 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
process->pasid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
- ret = io_remap_pfn_range(vma,
+ return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
PAGE_SIZE,
vma->vm_page_prot);
- return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index a5409531a2fd..cd5f8b219bf9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1522,6 +1522,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
pcache_info = cache_info;
num_of_cache_types =
kfd_fill_gpu_cache_info_from_gfx_config(kdev, pcache_info);
@@ -2283,7 +2284,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
/* Fill in Subtype: IO_LINKS
* Only direct links are added here which is Link from GPU to
- * to its NUMA node. Indirect links are added by userspace.
+ * its NUMA node. Indirect links are added by userspace.
*/
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
cache_mem_filled);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 22c0929d410b..65a1d4f9004b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -91,6 +91,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
kfd->device_info.num_sdma_queues_per_engine = 8;
break;
default:
@@ -103,6 +104,7 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
switch (sdma_version) {
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
/* Reserve 1 for paging and 1 for gfx */
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
@@ -150,6 +152,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
break;
default:
@@ -399,6 +402,11 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 110002;
f2g = &gfx_v11_kfd2kgd;
break;
+ case IP_VERSION(11, 0, 3):
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
+ f2g = &gfx_v11_kfd2kgd;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..ecb4c3abc629 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
@@ -1240,6 +1242,24 @@ static void init_interrupts(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+{
+ unsigned int num_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
+ get_num_sdma_queues(dqm));
+ unsigned int num_xgmi_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
+ get_num_xgmi_sdma_queues(dqm));
+
+ if (num_sdma_queues)
+ dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
+ if (num_xgmi_sdma_queues)
+ dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+
+ dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
+ pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+}
+
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
int pipe, queue;
@@ -1268,11 +1288,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
+ init_sdma_bitmaps(dqm);
return 0;
}
@@ -1450,9 +1466,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- uint64_t num_sdma_queues;
- uint64_t num_xgmi_sdma_queues;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@@ -1461,24 +1474,10 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
-
- num_sdma_queues = get_num_sdma_queues(dqm);
- if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
- dqm->sdma_bitmap = ULLONG_MAX;
- else
- dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
-
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
- if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
- dqm->xgmi_sdma_bitmap = ULLONG_MAX;
- else
- dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
-
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ init_sdma_bitmaps(dqm);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cb3d2ccc5100..cd4e61bf0493 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -157,6 +157,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(pdd);
+ if (!address)
+ return -ENOMEM;
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP;
@@ -275,6 +277,13 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
{
+ if (!pdd->doorbell_index) {
+ int r = kfd_alloc_process_doorbells(pdd->dev,
+ &pdd->doorbell_index);
+ if (r)
+ return 0;
+ }
+
return pdd->dev->doorbell_base +
pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
}
@@ -294,6 +303,9 @@ int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_inde
if (r > 0)
*doorbell_index = r;
+ if (r < 0)
+ pr_err("Failed to allocate process doorbells\n");
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b059a77b6081..c70c026c9a93 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -322,12 +322,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
for (i = j = 0; i < npages; i++) {
struct page *spage;
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -522,9 +523,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
- /* FIXME: workaround for page locking bug with invalid pages */
- svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
-
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
@@ -886,7 +884,7 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long addr = vmf->address;
- struct vm_area_struct *vma;
+ struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
@@ -894,29 +892,42 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
struct mm_struct *mm;
int r = 0;
- vma = vmf->vma;
- mm = vma->vm_mm;
+ svm_bo = vmf->page->zone_device_data;
+ if (!svm_bo) {
+ pr_debug("failed get device page at addr 0x%lx\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+ if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+
+ mm = svm_bo->eviction_fence->mm;
+ if (mm != vmf->vma->vm_mm)
+ pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
- p = kfd_lookup_process_by_mm(vma->vm_mm);
+ p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
- return VM_FAULT_SIGBUS;
+ r = VM_FAULT_SIGBUS;
+ goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
- kfd_unref_process(p);
- return 0;
+ r = 0;
+ goto out_unref_process;
}
- addr >>= PAGE_SHIFT;
+
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
+ addr >>= PAGE_SHIFT;
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
- pr_debug("cannot find svm range at 0x%lx\n", addr);
+ pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
- goto out;
+ goto out_unlock_svms;
}
mutex_lock(&parent->migrate_mutex);
@@ -938,10 +949,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
goto out_unlock_prange;
}
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
if (r)
- pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
- prange, prange->start, prange->last);
+ pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
+ r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
@@ -955,9 +967,12 @@ out_unlock_prange:
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
-out:
+out_unlock_svms:
mutex_unlock(&p->svms.lock);
+out_unref_process:
kfd_unref_process(p);
+out_mmput:
+ mmput(mm);
pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index b8e14c2cc295..26b53b6d673e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -177,14 +181,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
-static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
- queue_id, p->doorbell_off);
-}
-
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
@@ -256,31 +252,6 @@ static uint32_t read_doorbell_id(void *mqd)
return m->queue_doorbell_id0;
}
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->adev, mqd, type, timeout,
- pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->adev, queue_address,
- pipe_id, queue_id);
-}
-
static int get_wave_state(struct mqd_manager *mm, void *mqd,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
@@ -349,15 +320,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm->update_mqd(mm, m, q, NULL);
}
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
- (uint32_t __user *)p->write_ptr,
- mms);
-}
-
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -389,25 +351,6 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-/*
- * * preempt type here is ignored because there is only one way
- * * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
-}
-
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
@@ -445,11 +388,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
#if defined(CONFIG_DEBUG_FS)
@@ -462,10 +405,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -476,11 +419,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -491,10 +434,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd_sdma;
+ mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
- mqd->destroy_mqd = destroy_mqd_sdma;
- mqd->is_occupied = is_occupied_sdma;
+ mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+ mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 6c83a519b3a1..951b63677248 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1499,11 +1499,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
if (!pdd)
return NULL;
- if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
- pr_err("Failed to alloc doorbell for pdd\n");
- goto err_free_pdd;
- }
-
if (init_doorbell_bitmap(&pdd->qpd, dev)) {
pr_err("Failed to init doorbell for process\n");
goto err_free_pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6e3e7f54381b..5137476ec18e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -857,6 +857,13 @@ int kfd_criu_restore_queue(struct kfd_process *p,
ret = -EINVAL;
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
/* data stored in this order: mqd, ctl_stack */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 11074cc8c333..f5913ba22174 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -278,7 +278,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
svm_range_free_dma_mappings(prange);
if (update_mem_usage && !p->xnack_enabled) {
- pr_debug("unreserve mem limit: %lld\n", size);
+ pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
}
@@ -2956,6 +2956,64 @@ out:
return r;
}
+int
+svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
+{
+ struct svm_range *prange, *pchild;
+ uint64_t reserved_size = 0;
+ uint64_t size;
+ int r = 0;
+
+ pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
+
+ mutex_lock(&p->svms.lock);
+
+ list_for_each_entry(prange, &p->svms.list, list) {
+ svm_range_lock(prange);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+ }
+
+ size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+out_unlock:
+ svm_range_unlock(prange);
+ if (r)
+ break;
+ }
+
+ if (r)
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ else
+ /* Change xnack mode must be inside svms lock, to avoid race with
+ * svm_range_deferred_list_work unreserve memory in parallel.
+ */
+ p->xnack_enabled = xnack_enabled;
+
+ mutex_unlock(&p->svms.lock);
+ return r;
+}
+
void svm_range_list_fini(struct kfd_process *p)
{
struct svm_range *prange;
@@ -3181,28 +3239,6 @@ out:
return best_loc;
}
-/* FIXME: This is a workaround for page locking bug when some pages are
- * invalid during migration to VRAM
- */
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner)
-{
- struct hmm_range *hmm_range;
- int r;
-
- if (prange->validated_once)
- return;
-
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true, owner);
- if (!r) {
- amdgpu_hmm_range_get_pages_done(hmm_range);
- prange->validated_once = true;
- }
-}
-
/* svm_range_trigger_migration - start page migration if prefetch loc changed
* @mm: current process mm_struct
* @prange: svm range structure
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index cfac13ad06ef..7a33b93f9df6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -181,8 +181,6 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange);
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -205,6 +203,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
void svm_range_set_max_pages(struct amdgpu_device *adev);
+int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
#else
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 413d8c6d592f..6925e0280dbe 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -28,7 +28,6 @@ config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
depends on DRM_AMDGPU_SI
depends on DRM_AMD_DC
- default n
help
Choose this option to enable new AMD DC support for SI asics
by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
@@ -43,7 +42,6 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
- default n
depends on DEBUG_FS
depends on DRM_AMD_DC_DCN
help
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5140d9c2bf3b..4c73727e0b7d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -88,6 +88,9 @@
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <acpi/video.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
@@ -97,8 +100,6 @@
#include "soc15_common.h"
#include "vega10_ip_offset.h"
-#include "soc15_common.h"
-
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -1295,13 +1296,21 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
if (hpd_rx_offload_wq[i].wq == NULL) {
DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
- return NULL;
+ goto out_err;
}
spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
}
return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
}
struct amdgpu_stutter_quirk {
@@ -1529,7 +1538,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
- adev->dm.dc->debug.disable_dsc_edp = true;
}
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
@@ -2807,20 +2815,18 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
- .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct dc_link *link = NULL;
- static const u8 pre_computed_values[] = {
- 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
- 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+ struct drm_luminance_range_info *luminance_range;
int i;
if (!aconnector || !aconnector->dc_link)
@@ -2842,8 +2848,6 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
- min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
@@ -2855,31 +2859,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
else if (amdgpu_backlight == 1)
caps->aux_support = true;
- /* From the specification (CTA-861-G), for calculating the maximum
- * luminance we need to use:
- * Luminance = 50*2**(CV/32)
- * Where CV is a one-byte value.
- * For calculating this expression we may need float point precision;
- * to avoid this complexity level, we take advantage that CV is divided
- * by a constant. From the Euclids division algorithm, we know that CV
- * can be written as: CV = 32*q + r. Next, we replace CV in the
- * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
- * need to pre-compute the value of r/32. For pre-computing the values
- * We just used the following Ruby line:
- * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
- * The results of the above expressions can be verified at
- * pre_computed_values.
- */
- q = max_avg >> 5;
- r = max_avg % 32;
- max = (1 << q) * pre_computed_values[r];
-
- // min luminance: maxLum * (CV/255)^2 / 100
- q = DIV_ROUND_CLOSEST(min_cll, 255);
- min = max * DIV_ROUND_CLOSEST((q * q), 100);
-
- caps->aux_max_input_signal = max;
- caps->aux_min_input_signal = min;
+ luminance_range = &conn_base->display_info.luminance_range;
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
}
void amdgpu_dm_update_connector_after_detect(
@@ -4058,6 +4040,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -4759,7 +4748,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
- plane_info->layer_index = 0;
+ plane_info->layer_index = plane_state->normalized_zpos;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
@@ -4827,7 +4816,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->layer_index = plane_info.layer_index;
dc_plane_state->flip_int_enabled = true;
/*
@@ -5629,7 +5618,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
@@ -6321,10 +6311,17 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
drm_atomic_get_old_connector_state(state, conn);
struct drm_crtc *crtc = new_con_state->crtc;
struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
trace_amdgpu_dm_connector_atomic_check(new_con_state);
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
if (!crtc)
return 0;
@@ -6408,6 +6405,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
enum dc_color_depth color_depth;
int clock, bpp = 0;
bool is_y420 = false;
@@ -6421,6 +6419,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
return 0;
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ if (!mst_state->pbn_div)
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
@@ -6432,11 +6437,10 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
}
- dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
- mst_mgr,
- mst_port,
- dm_new_connector_state->pbn,
- dm_mst_get_pbn_divider(aconnector->dc_link));
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
if (dm_new_connector_state->vcpi_slots < 0) {
DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
return dm_new_connector_state->vcpi_slots;
@@ -6506,18 +6510,12 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- drm_dp_mst_atomic_enable_dsc(state,
- aconnector->port,
- dm_conn_state->pbn,
- 0,
+ drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn,
false);
continue;
}
- vcpi = drm_dp_mst_atomic_enable_dsc(state,
- aconnector->port,
- pbn, pbn_div,
- true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
if (vcpi < 0)
return vcpi;
@@ -7387,11 +7385,6 @@ static void update_freesync_state_on_stream(
&vrr_infopacket,
pack_sdp_v1_3);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust,
- sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_vrr_info_changed |=
(memcmp(&new_crtc_state->vrr_infopacket,
&vrr_infopacket,
@@ -7400,7 +7393,6 @@ static void update_freesync_state_on_stream(
acrtc->dm_irq_params.vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
@@ -7463,10 +7455,6 @@ static void update_stream_irq_parameters(
new_stream,
&config, &vrr_params);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_config = config;
/* Copy state for access from DM IRQ handler */
acrtc->dm_irq_params.freesync_config = config;
@@ -7992,6 +7980,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_ERROR("Waiting for fences timed out!");
drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) {
@@ -8390,7 +8379,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_release_state(dc_state_temp);
}
-
static int dm_force_atomic_commit(struct drm_connector *connector)
{
int ret = 0;
@@ -9321,6 +9309,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
* @dev: The DRM device
* @state: The atomic state to commit
*
@@ -9361,8 +9350,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
- struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_mst_topology_mgr *mgr;
#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -9379,9 +9366,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
/* Skip connectors that are disabled or part of modeset already. */
- if (!old_con_state->crtc && !new_con_state->crtc)
- continue;
-
if (!new_con_state->crtc)
continue;
@@ -9485,6 +9469,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ drm_atomic_normalize_zpos(dev, state);
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -9601,33 +9593,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* set the slot info for each mst_state based on the link encoding format */
- for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
- struct amdgpu_dm_connector *aconnector;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u8 link_coding_cap;
-
- if (!mgr->mst_state )
- continue;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- int id = connector->index;
-
- if (id == mst_state->mgr->conn_base_id) {
- aconnector = to_amdgpu_dm_connector(connector);
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
- drm_dp_mst_update_slots(mst_state, link_coding_cap);
-
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- }
-#endif
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
@@ -9935,8 +9900,19 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
return valid_vsdb_found ? i : -ENODEV;
}
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid)
{
int i = 0;
struct detailed_timing *timing;
@@ -9949,8 +9925,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
- bool freesync_capable = false;
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@@ -9979,7 +9955,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
-
if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
|| sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 90b306a1dd68..b5ce15c43bcc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -598,6 +598,10 @@ struct amdgpu_dm_connector {
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
+
+ /**
+ * @dc_em_sink: Reference to the emulated (virtual) sink.
+ */
struct dc_sink *dc_em_sink;
/* DM only */
@@ -610,7 +614,16 @@ struct amdgpu_dm_connector {
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
- int min_vfreq ;
+ /**
+ * @min_vfreq: Minimal frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
+ int min_vfreq;
+
+ /**
+ * @max_vfreq: Maximum frequency supported by the display in Hz. This
+ * value is set to zero when there is no FreeSync support.
+ */
int max_vfreq ;
int pixel_clock_mhz;
@@ -668,7 +681,6 @@ struct dm_crtc_state {
int crc_skip_count;
- bool freesync_timing_changed;
bool freesync_vrr_info_changed;
bool dsc_force_changed;
@@ -705,11 +717,34 @@ struct dm_connector_state {
uint64_t pbn;
};
+/**
+ * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
+ *
+ * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
+ * struct is useful to keep track of the display-specific information about
+ * FreeSync.
+ */
struct amdgpu_hdmi_vsdb_info {
- unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */
- bool freesync_supported; /* FreeSync Supported */
- unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */
- unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */
+ /**
+ * @amd_vsdb_version: Vendor Specific Data Block Version, should be
+ * used to determine which Vendor Specific InfoFrame (VSIF) to send.
+ */
+ unsigned int amd_vsdb_version;
+
+ /**
+ * @freesync_supported: FreeSync Supported.
+ */
+ bool freesync_supported;
+
+ /**
+ * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
+ */
+ unsigned int min_refresh_rate_hz;
+
+ /**
+ * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
+ */
+ unsigned int max_refresh_rate_hz;
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index a71177305bcd..a4cb23d059bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -29,7 +29,9 @@
#include "modules/color/color_gamma.h"
#include "basics/conversion.h"
-/*
+/**
+ * DOC: overview
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -71,8 +73,8 @@
#define MAX_DRM_LUT_VALUE 0xFFFF
-/*
- * Initialize the color module.
+/**
+ * amdgpu_dm_init_color_mod - Initialize the color module.
*
* We're not using the full color module, only certain components.
* Only call setup functions for components that we need.
@@ -82,7 +84,14 @@ void amdgpu_dm_init_color_mod(void)
setup_x_points_distribution();
}
-/* Extracts the DRM lut and lut size from a blob. */
+/**
+ * __extract_blob_lut - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
static const struct drm_color_lut *
__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
{
@@ -90,13 +99,18 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
return blob ? (struct drm_color_lut *)blob->data : NULL;
}
-/*
- * Return true if the given lut is a linear mapping of values, i.e. it acts
- * like a bypass LUT.
+/**
+ * __is_lut_linear - check if the given lut is a linear mapping of values
+ * @lut: given lut to check values
+ * @size: lut size
*
* It is considered linear if the lut represents:
- * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in
- * [0, MAX_COLOR_LUT_ENTRIES)
+ * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0,
+ * MAX_COLOR_LUT_ENTRIES)
+ *
+ * Returns:
+ * True if the given lut is a linear mapping of values, i.e. it acts like a
+ * bypass LUT. Otherwise, false.
*/
static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
{
@@ -119,9 +133,13 @@ static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size)
return true;
}
-/*
- * Convert the drm_color_lut to dc_gamma. The conversion depends on the size
- * of the lut - whether or not it's legacy.
+/**
+ * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ * @is_legacy: legacy or atomic gamma
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
*/
static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
struct dc_gamma *gamma, bool is_legacy)
@@ -154,8 +172,11 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
}
-/*
- * Converts a DRM CTM to a DC CSC float matrix.
+/**
+ * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
+ * @ctm: DRM color transformation matrix
+ * @matrix: DC CSC float matrix
+ *
* The matrix needs to be a 3x4 (12 entry) matrix.
*/
static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
@@ -189,7 +210,18 @@ static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm,
}
}
-/* Calculates the legacy transfer function - only for sRGB input space. */
+/**
+ * __set_legacy_tf - Calculates the legacy transfer function
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Only for sRGB input space
+ *
+ * Returns:
+ * 0 in case of success, -ENOMEM if fails
+ */
static int __set_legacy_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -218,7 +250,16 @@ static int __set_legacy_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Calculates the output transfer function based on expected input space. */
+/**
+ * __set_output_tf - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_output_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size,
bool has_rom)
@@ -262,7 +303,16 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-/* Caculates the input transfer function based on expected input space. */
+/**
+ * __set_input_tf - calculates the input transfer function based on expected
+ * input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
static int __set_input_tf(struct dc_transfer_func *func,
const struct drm_color_lut *lut, uint32_t lut_size)
{
@@ -285,13 +335,14 @@ static int __set_input_tf(struct dc_transfer_func *func,
}
/**
- * amdgpu_dm_verify_lut_sizes
+ * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes
* @crtc_state: the DRM CRTC state
*
- * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of
- * the expected size.
+ * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state
+ * are of the expected size.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -EINVAL if any lut sizes are invalid.
*/
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
{
@@ -327,9 +378,9 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* of the HW blocks as long as the CRTC CTM always comes before the
* CRTC RGM and after the CRTC DGM.
*
- * The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
@@ -338,7 +389,8 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
* management at once we have to either restrict the usage of CRTC properties
* or blend adjustments together.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. Error code if setup fails.
*/
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
{
@@ -393,7 +445,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
if (r)
return r;
} else if (has_regamma) {
- /* CRTC RGM goes into RGM LUT. */
+ /* If atomic regamma, CRTC RGM goes into RGM LUT. */
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
@@ -450,9 +502,10 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* Update the underlying dc_stream_state's input transfer function (ITF) in
* preparation for hardware commit. The transfer function used depends on
- * the prepartion done on the stream for color management.
+ * the preparation done on the stream for color management.
*
- * Returns 0 on success.
+ * Returns:
+ * 0 on success. -ENOMEM if mem allocation fails.
*/
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0e48824f55e3..ee242d9d8b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
+ dput(dir);
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index a0154a5f7183..f0b01c8dc4a6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_edid.h>
@@ -153,41 +154,28 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
return result;
}
-static void get_payload_table(
- struct amdgpu_dm_connector *aconnector,
- struct dp_mst_stream_allocation_table *proposed_table)
+static void
+fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state,
+ struct amdgpu_dm_connector *aconnector,
+ struct dc_dp_mst_stream_allocation_table *table)
{
- int i;
- struct drm_dp_mst_topology_mgr *mst_mgr =
- &aconnector->mst_port->mst_mgr;
-
- mutex_lock(&mst_mgr->payload_lock);
-
- proposed_table->stream_count = 0;
-
- /* number of active streams */
- for (i = 0; i < mst_mgr->max_payloads; i++) {
- if (mst_mgr->payloads[i].num_slots == 0)
- break; /* end of vcp_id table */
-
- ASSERT(mst_mgr->payloads[i].payload_state !=
- DP_PAYLOAD_DELETE_LOCAL);
-
- if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
- mst_mgr->payloads[i].payload_state ==
- DP_PAYLOAD_REMOTE) {
-
- struct dp_mst_stream_allocation *sa =
- &proposed_table->stream_allocations[
- proposed_table->stream_count];
-
- sa->slot_count = mst_mgr->payloads[i].num_slots;
- sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
- proposed_table->stream_count++;
- }
+ struct dc_dp_mst_stream_allocation_table new_table = { 0 };
+ struct dc_dp_mst_stream_allocation *sa;
+ struct drm_dp_mst_atomic_payload *payload;
+
+ /* Fill payload info*/
+ list_for_each_entry(payload, &mst_state->payloads, next) {
+ if (payload->delete)
+ continue;
+
+ sa = &new_table.stream_allocations[new_table.stream_count];
+ sa->slot_count = payload->time_slots;
+ sa->vcp_id = payload->vcpi;
+ new_table.stream_count++;
}
- mutex_unlock(&mst_mgr->payload_lock);
+ /* Overwrite the old table */
+ *table = new_table;
}
void dm_helpers_dp_update_branch_info(
@@ -201,15 +189,13 @@ void dm_helpers_dp_update_branch_info(
bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- struct dp_mst_stream_allocation_table *proposed_table,
+ struct dc_dp_mst_stream_allocation_table *proposed_table,
bool enable)
{
struct amdgpu_dm_connector *aconnector;
- struct dm_connector_state *dm_conn_state;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
- bool ret;
- u8 link_coding_cap = DP_8b_10b_ENCODING;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
/* Accessing the connector state is required for vcpi_slots allocation
@@ -220,40 +206,21 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
if (!aconnector || !aconnector->mst_port)
return false;
- dm_conn_state = to_dm_connector_state(aconnector->base.state);
-
mst_mgr = &aconnector->mst_port->mst_mgr;
-
- if (!mst_mgr->mst_state)
- return false;
-
- mst_port = aconnector->port;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
-#endif
-
- if (enable) {
-
- ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
- dm_conn_state->pbn,
- dm_conn_state->vcpi_slots);
- if (!ret)
- return false;
-
- } else {
- drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
- }
+ mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+ if (enable)
+ drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+ else
+ drm_dp_remove_payload(mst_mgr, mst_state, payload);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
* sequence. copy DRM MST allocation to dc */
-
- get_payload_table(aconnector, proposed_table);
+ fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table);
return true;
}
@@ -310,8 +277,9 @@ bool dm_helpers_dp_mst_send_payload_allocation(
bool enable)
{
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_atomic_payload *payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
@@ -320,19 +288,16 @@ bool dm_helpers_dp_mst_send_payload_allocation(
if (!aconnector || !aconnector->mst_port)
return false;
- mst_port = aconnector->port;
-
mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
- if (!mst_mgr->mst_state)
- return false;
-
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
if (!enable) {
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
}
- if (drm_dp_update_payload_part2(mst_mgr)) {
+ if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
set_flag, false);
} else {
@@ -342,9 +307,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
clr_flag, false);
}
- if (!enable)
- drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
-
return true;
}
@@ -729,8 +691,14 @@ bool dm_helpers_dp_write_dsc_enable(
const struct dc_stream_state *stream,
bool enable)
{
- uint8_t enable_dsc = enable ? 1 : 0;
+ static const uint8_t DSC_DISABLE;
+ static const uint8_t DSC_DECODING = 0x01;
+ static const uint8_t DSC_PASSTHROUGH = 0x02;
+
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_port *port;
+ uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE;
+ uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE;
uint8_t ret = 0;
if (!stream)
@@ -750,8 +718,39 @@ bool dm_helpers_dp_write_dsc_enable(
aconnector->dsc_aux, stream, enable_dsc);
#endif
- ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
- DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
+ port = aconnector->port;
+
+ if (enable) {
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+ } else {
+ ret = drm_dp_dpcd_write(aconnector->dsc_aux,
+ DP_DSC_ENABLE, &enable_dsc, 1);
+ DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n",
+ (port->passthrough_aux) ? "remote RX" :
+ "virtual dpcd",
+ ret);
+
+ if (port->passthrough_aux) {
+ ret = drm_dp_dpcd_write(port->passthrough_aux,
+ DP_DSC_ENABLE,
+ &enable_passthrough, 1);
+ DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ ret);
+ }
+ }
}
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
@@ -768,7 +767,7 @@ bool dm_helpers_dp_write_dsc_enable(
#endif
}
- return (ret > 0);
+ return ret;
}
bool dm_helpers_is_dp_sink_present(struct dc_link *link)
@@ -879,6 +878,34 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
//amdgpu_device_gpu_recover(dc_context->driver-context, NULL);
}
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config,
+ struct dc_sink *sink)
+{
+ // Extra Panel Power Sequence
+ panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
+ panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
+ panel_config->pps.extra_post_t7_ms = 0;
+ panel_config->pps.extra_pre_t11_ms = 0;
+ panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
+ panel_config->pps.extra_post_OUI_ms = 0;
+ // Feature DSC
+ panel_config->dsc.disable_dsc_edp = false;
+ panel_config->dsc.force_dsc_edp_policy = 0;
+}
+
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *panel_config)
+{
+ // Feature DSC
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ panel_config->dsc.disable_dsc_edp = true;
+ }
+}
+
void *dm_helpers_allocate_gpu_mem(
struct dc_context *ctx,
enum dc_gpu_mem_alloc_type type,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2e74ccf7df5b..6ff96b4bdda5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,6 +36,7 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
+#include "dc_link_dp.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
@@ -447,34 +448,13 @@ dm_dp_mst_detect(struct drm_connector *connector,
}
static int dm_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct drm_crtc_state *new_crtc_state;
- struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr;
+ struct drm_dp_mst_port *mst_port = aconnector->port;
- mst_port = aconnector->port;
- mst_mgr = &aconnector->mst_port->mst_mgr;
-
- if (!old_conn_state->crtc)
- return 0;
-
- if (new_conn_state->crtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
- if (!new_crtc_state ||
- !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
- new_crtc_state->enable)
- return 0;
- }
-
- return drm_dp_atomic_release_vcpi_slots(state,
- mst_mgr,
- mst_port);
+ return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
}
static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
@@ -618,15 +598,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
- drm_dp_mst_topology_mgr_init(
- &aconnector->mst_mgr,
- adev_to_drm(dm->adev),
- &aconnector->dm_dp_aux.aux,
- 16,
- 4,
- max_link_enc_cap.lane_count,
- drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate),
- aconnector->connector_id);
+ drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev),
+ &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id);
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
@@ -731,6 +704,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
}
static bool increase_dsc_bpp(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_params *params,
struct dsc_mst_fairness_vars *vars,
@@ -743,12 +717,9 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
int min_initial_slack;
int next_index;
int remaining_to_increase = 0;
- int pbn_per_timeslot;
int link_timeslots_used;
int fair_pbn_alloc;
- pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
-
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
@@ -779,46 +750,43 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state,
link_timeslots_used = 0;
for (i = 0; i < count; i++)
- link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot);
+ link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div);
- fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
+ fair_pbn_alloc =
+ (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div;
if (initial_slack[next_index] > fair_pbn_alloc) {
vars[next_index].pbn += fair_pbn_alloc;
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
} else {
vars[next_index].pbn -= fair_pbn_alloc;
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
} else {
vars[next_index].pbn += initial_slack[next_index];
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
} else {
vars[next_index].pbn -= initial_slack[next_index];
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- pbn_per_timeslot) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
}
@@ -872,11 +840,10 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
break;
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ vars[next_index].pbn) < 0)
return false;
if (!drm_dp_mst_atomic_check(state)) {
@@ -884,11 +851,10 @@ static bool try_disable_dsc(struct drm_atomic_state *state,
vars[next_index].bpp_x16 = 0;
} else {
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
- if (drm_dp_atomic_find_vcpi_slots(state,
+ if (drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
- vars[next_index].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ vars[next_index].pbn) < 0)
return false;
}
@@ -902,17 +868,27 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
struct dsc_mst_fairness_vars *vars,
+ struct drm_dp_mst_topology_mgr *mgr,
int *link_vars_start_index)
{
- int i, k;
struct dc_stream_state *stream;
struct dsc_mst_fairness_params params[MAX_PIPES];
struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr);
int count = 0;
+ int i, k;
bool debugfs_overwrite = false;
memset(params, 0, sizeof(params));
+ if (IS_ERR(mst_state))
+ return false;
+
+ mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link));
+#endif
+
/* Set up params */
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -971,11 +947,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+ vars[i + k].pbn) < 0)
return false;
}
if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
@@ -989,21 +962,15 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn) < 0)
return false;
} else {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
- if (drm_dp_atomic_find_vcpi_slots(state,
- params[i].port->mgr,
- params[i].port,
- vars[i + k].pbn,
- dm_mst_get_pbn_divider(dc_link)) < 0)
+ if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+ params[i].port, vars[i + k].pbn) < 0)
return false;
}
}
@@ -1011,7 +978,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return false;
/* Optimize degree of compression */
- if (!increase_dsc_bpp(state, dc_link, params, vars, count, k))
+ if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k))
return false;
if (!try_disable_dsc(state, dc_link, params, vars, count, k))
@@ -1157,8 +1124,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
continue;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link,
- vars, &link_vars_start_index)) {
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &aconnector->mst_mgr,
+ &link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
}
@@ -1216,10 +1184,8 @@ static bool
continue;
mutex_lock(&aconnector->mst_mgr.lock);
- if (!compute_mst_dsc_configs_for_link(state,
- dc_state,
- stream->link,
- vars,
+ if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars,
+ &aconnector->mst_mgr,
&link_vars_start_index)) {
mutex_unlock(&aconnector->mst_mgr.lock);
return false;
@@ -1386,19 +1352,90 @@ clean_exit:
return (ret == 0);
}
-#endif
+static unsigned int kbps_from_pbn(unsigned int pbn)
+{
+ unsigned int kbps = pbn;
+
+ kbps *= (1000000 / PEAK_FACTOR_X1000);
+ kbps *= 8;
+ kbps *= 54;
+ kbps /= 64;
+
+ return kbps;
+}
+
+static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
+ struct dc_dsc_bw_range *bw_range)
+{
+ struct dc_dsc_policy dsc_policy = {0};
+
+ dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy);
+ dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0],
+ stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
+ &stream->sink->dsc_caps.dsc_dec_caps,
+ &stream->timing, bw_range);
+
+ return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
+}
+#endif /* CONFIG_DRM_AMD_DC_DCN */
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
int bpp, pbn, branch_max_throughput_mps = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings cur_link_settings;
+ unsigned int end_to_end_bw_in_kbps = 0;
+ unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
+ unsigned int max_compressed_bw_in_kbps = 0;
+ struct dc_dsc_bw_range bw_range = {0};
- /* check if mode could be supported within fUll_pbn */
- bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
- pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
- if (pbn > aconnector->port->full_pbn)
- return DC_FAIL_BANDWIDTH_VALIDATE;
+ /*
+ * check if the mode could be supported if DSC pass-through is supported
+ * AND check if there enough bandwidth available to support the mode
+ * with DSC enabled.
+ */
+ if (is_dsc_common_config_possible(stream, &bw_range) &&
+ aconnector->port->passthrough_aux) {
+ mutex_lock(&aconnector->mst_mgr.lock);
+
+ cur_link_settings = stream->link->verified_link_cap;
+
+ upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ &cur_link_settings
+ );
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn);
+
+ /* pick the bottleneck */
+ end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
+ down_link_bw_in_kbps);
+
+ mutex_unlock(&aconnector->mst_mgr.lock);
+
+ /*
+ * use the maximum dsc compression bandwidth as the required
+ * bandwidth for the mode
+ */
+ max_compressed_bw_in_kbps = bw_range.min_kbps;
+
+ if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
+ } else {
+#endif
+ /* check if mode could be supported within full_pbn */
+ bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
+ pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
+
+ if (pbn > aconnector->port->full_pbn)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 987bde4dca3d..dfd3be49eac8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1563,7 +1563,7 @@ int dm_drm_plane_get_property(struct drm_plane *plane,
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..8ca10ab3dfc1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -170,7 +170,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 09fbb7ad5362..53b077b40d72 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "core_types.h"
#include "ObjectID.h"
#include "atomfirmware.h"
@@ -44,25 +45,6 @@
#include "bios_parser_common.h"
-/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
-#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
-#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
-#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
-
-#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
-#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
-#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
-
#define DC_LOGGER \
bp->base.ctx->logger
@@ -868,6 +850,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -876,6 +860,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -885,6 +871,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -898,13 +886,15 @@ static enum bp_result get_ss_info_v4_1(
DATA_TABLES(smu_info));
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->spread_spectrum_percentage =
smu_info->waflclk_ss_percentage;
ss_info->spread_spectrum_range =
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
default:
result = BP_RESULT_UNSUPPORTED;
@@ -941,6 +931,7 @@ static enum bp_result get_ss_info_v4_2(
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
@@ -954,6 +945,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -962,6 +955,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -971,6 +966,8 @@ static enum bp_result get_ss_info_v4_2(
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -1019,6 +1016,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -1027,6 +1026,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
ss_info->spread_spectrum_percentage =
@@ -1035,6 +1036,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_smu_info_v4_0 does not have fields for SS for SMU Display PLL anymore.
@@ -1372,7 +1375,7 @@ static enum bp_result bios_parser_get_lttpr_interop(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
return result;
}
@@ -1388,6 +1391,7 @@ static enum bp_result bios_parser_get_lttpr_caps(
if (!DATA_TABLES(dce_info))
return BP_RESULT_UNSUPPORTED;
+ *dce_caps = 0;
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(dce_info));
get_atom_data_table_revision(header, &tbl_revision);
@@ -1421,7 +1425,11 @@ static enum bp_result bios_parser_get_lttpr_caps(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
+ if (dcb->ctx->dc->config.force_bios_enable_lttpr && *dce_caps == 0) {
+ *dce_caps = 1;
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: forced enabled");
+ }
return result;
}
@@ -1859,7 +1867,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
@@ -1868,7 +1876,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
@@ -2010,7 +2018,7 @@ static enum bp_result get_firmware_info_v3_4(
if (!smu_info_v3_5)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_5->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_5->bootup_dcefclk_10khz * 10;
break;
@@ -2416,6 +2424,7 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2630,6 +2639,7 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2791,6 +2801,8 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2942,6 +2954,27 @@ static enum bp_result construct_integrated_info(
default:
return result;
}
+ if (result == BP_RESULT_OK) {
+
+ DC_LOG_BIOS("edp1:\n"
+ "\tedp_pwr_on_off_delay = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp1_info.edp_pwr_on_off_delay,
+ info->edp1_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp1_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp1_info.edp_bootup_bl_level);
+ DC_LOG_BIOS("edp2:\n"
+ "\tedp_pwr_on_off_delayv = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp2_info.edp_pwr_on_off_delay,
+ info->edp2_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp2_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp2_info.edp_bootup_bl_level);
+ }
}
if (result != BP_RESULT_OK)
@@ -2967,13 +3000,22 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
+ if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
+ info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ }
}
-
// Log the Checksum and Voltage Swing
DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n"
"Integrated info table FIX_DP_VOLTAGE_SWING: %d\n",
info->ext_disp_conn_info.checksum,
info->ext_disp_conn_info.fixdpvoltageswing);
+ if (bp->base.ctx->dc->config.force_bios_fixed_vs && info->ext_disp_conn_info.fixdpvoltageswing == 0) {
+ info->ext_disp_conn_info.fixdpvoltageswing = bp->base.ctx->dc->config.force_bios_fixed_vs & 0xF;
+ DC_LOG_BIOS("driver forced fixdpvoltageswing = %d\n", info->ext_disp_conn_info.fixdpvoltageswing);
+ }
}
/* Sort voltage table from low to high*/
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
@@ -3319,6 +3361,7 @@ static enum bp_result bios_get_board_layout_info(
struct bios_parser *bp;
static enum bp_result record_result;
+ unsigned int max_slots;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
@@ -3335,8 +3378,14 @@ static enum bp_result bios_get_board_layout_info(
}
board_layout_info->num_of_slots = 0;
+ max_slots = MAX_BOARD_SLOTS;
+
+ // Assume single slot on v1_5
+ if (bp->object_info_tbl.revision.minor == 5) {
+ max_slots = 1;
+ }
- for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ for (i = 0; i < max_slots; ++i) {
record_result = get_bracket_layout_record(dcb,
slot_index_to_vbios_id[i],
&board_layout_info->slots[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index c09be3f15fe6..c1eaf571407a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -48,6 +48,11 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "yellow_carp_offset.h"
#define regCLK1_CLK_PLL_REQ 0x0237
@@ -99,7 +104,7 @@ static int dcn31_get_active_display_cnt_wa(
return display_count;
}
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -110,9 +115,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -211,11 +217,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn31_disable_otg_wa(clk_mgr_base, true);
+ dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn31_disable_otg_wa(clk_mgr_base, false);
+ dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -737,8 +743,49 @@ void dcn31_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn31_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn31_clk_mgr_helper_populate_bw_params(
&clk_mgr->base,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index beb025cd3dc2..1131c6d73f6c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -51,6 +51,13 @@
#include "dc_link_dp.h"
#include "dcn314_smu.h"
+
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+
#define MAX_INSTANCE 7
#define MAX_SEGMENT 8
@@ -119,7 +126,7 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -129,12 +136,21 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
+ if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
@@ -233,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -614,7 +630,7 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
clock_table->DfPstateTable[min_pstate].WckRatio);
- };
+ }
/* Make sure to include at least one entry at highest pstate */
if (max_pstate != min_pstate || i == 0) {
@@ -670,6 +686,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
}
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
+
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
@@ -775,7 +793,48 @@ void dcn314_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn314_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
dcn314_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index cc076621f5e6..893991a0eb97 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -41,11 +41,19 @@
#include "dc_dmub_srv.h"
+#include "logger_types.h"
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
#include "dc_link_dp.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK 100000
+
static int dcn315_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
@@ -79,7 +87,7 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -91,9 +99,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -146,6 +155,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+ if (!new_clocks->p_state_change_support)
+ new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -159,10 +171,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -175,12 +187,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, true);
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -275,7 +287,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -283,7 +295,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -291,7 +303,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -299,7 +311,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -507,7 +519,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].dispclk_mhz = clock_table->DispClocks[i];
bw_params->clk_table.entries[i].dppclk_mhz = clock_table->DppClocks[i];
bw_params->clk_table.entries[i].wck_ratio = 1;
- };
+ }
/* Make sure to include at least one entry and highest pstate */
if (max_pstate != min_pstate || i == 0) {
@@ -556,8 +568,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
- if (!bw_params->num_channels)
- bw_params->num_channels = 2;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
@@ -666,7 +677,48 @@ void dcn315_clk_mgr_construct(
clk_mgr->base.base.bw_params = &dcn315_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+
dcn315_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "NumDfPst atesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDfPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < NUM_DF_PSTATE_LEVELS; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.DfPstateTable[%d].FClk = %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->DfPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].FClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->DfPstateTable[i].Voltage);
+ }
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
dcn315_clk_mgr_helper_populate_bw_params(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 0cd3d2eb7ac7..187f5b27fdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..f0f3f66629cc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ unsigned int num_dcfclk_levels, num_dtbclk_levels, num_dispclk_levels;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -179,6 +181,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_levels);
+ num_dcfclk_levels = num_levels;
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
@@ -189,11 +192,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_levels);
+ num_dtbclk_levels = num_levels;
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_levels);
+ num_dispclk_levels = num_levels;
+
+ if (num_dcfclk_levels && num_dtbclk_levels && num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -658,6 +666,12 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index aeecca68dea7..258ba5a872b1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -401,6 +401,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
+ return true;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -638,14 +641,17 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
/**
* dc_stream_get_crc() - Get CRC values for the given stream.
- * @dc: DC object
+ *
+ * @dc: DC object.
* @stream: The DC stream state of the stream to get CRCs from.
- * @r_cr: CRC value for the first of the 3 channels stored here.
- * @g_y: CRC value for the second of the 3 channels stored here.
- * @b_cb: CRC value for the third of the 3 channels stored here.
+ * @r_cr: CRC value for the red component.
+ * @g_y: CRC value for the green component.
+ * @b_cb: CRC value for the blue component.
*
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
- * Return false if stream is not found, or if CRCs are not enabled.
+ *
+ * Return:
+ * false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -1094,7 +1100,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
- if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1194,7 +1201,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state)
+ if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1743,6 +1750,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1833,6 +1843,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
@@ -1996,6 +2009,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
@@ -2315,9 +2331,13 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
- if (u->flip_addr)
+ if (u->flip_addr) {
update_flags->bits.addr_update = 1;
-
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -2752,11 +2772,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->abm_level)
stream->abm_level = *update->abm_level;
- if (update->periodic_interrupt0)
- stream->periodic_interrupt0 = *update->periodic_interrupt0;
-
- if (update->periodic_interrupt1)
- stream->periodic_interrupt1 = *update->periodic_interrupt1;
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
if (update->gamut_remap)
stream->gamut_remap_matrix = *update->gamut_remap;
@@ -2841,16 +2858,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
-void dc_reset_state(struct dc *dc, struct dc_state *context)
-{
- dc_resource_state_destruct(context);
-
- /* clear the structure, but don't reset the reference count */
- memset(context, 0, offsetof(struct dc_state, refcount));
-
- init_state(dc, context);
-}
-
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -2986,13 +2993,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
- if (stream_update->periodic_interrupt0 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
-
- if (stream_update->periodic_interrupt1 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -3070,7 +3072,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
-
core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
@@ -3098,11 +3099,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
{
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
-
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- dc->debug.enable_sw_cntl_psr)
+ if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
+ || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3204,6 +3203,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
context_clock_trace(dc, context);
}
@@ -3318,10 +3320,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
@@ -3329,16 +3327,15 @@ static void commit_planes_for_stream(struct dc *dc,
}
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- } else {
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- }
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
return;
}
@@ -3462,10 +3459,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
- if (update_type != UPDATE_TYPE_FAST)
- if (dc->hwss.commit_subvp_config)
- dc->hwss.commit_subvp_config(dc, context);
-
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
dc->hwss.interdependent_update_lock(dc, context, false);
} else {
@@ -3503,6 +3496,10 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
@@ -3532,19 +3529,72 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+/* Determines if the incoming context requires a applying transition state with unnecessary
+ * pipe splitting and ODM disabled, due to hardware limitations. In a case where
+ * the OPP associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_policy;
+ enum pipe_split_policy tmp_mpc_policy;
+ bool temp_dynamic_odm_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
if (!transition_context)
return false;
- tmp_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ if (!dc->config.is_vmin_only_asic) {
+ tmp_mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
dc_resource_state_copy_construct(transition_base_context, transition_context);
@@ -3566,19 +3616,22 @@ static bool commit_minimal_transition_state(struct dc *dc,
ret = dc_commit_state_no_check(dc, transition_context);
}
- //always release as dc_commit_state_no_check retains in good case
+ /*always release as dc_commit_state_no_check retains in good case*/
dc_release_state(transition_context);
- //restore previous pipe split policy
- dc->debug.pipe_split_policy = tmp_policy;
+ /*restore previous pipe split and odm policy*/
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = tmp_mpc_policy;
+
+ dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
if (ret != DC_OK) {
- //this should never happen
+ /*this should never happen*/
BREAK_TO_DEBUGGER();
return false;
}
- //force full surface update
+ /*force full surface update*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
@@ -3601,22 +3654,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting = false;
- bool is_plane_addition = false;
-
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting;
+ bool is_plane_addition;
- if (cur_stream_status &&
- dc->current_state->stream_count > 0 &&
- dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
- /* determine if minimal transition is required */
- if (cur_stream_status->plane_count > surface_count) {
- force_minimal_pipe_splitting = true;
- } else if (cur_stream_status->plane_count < surface_count) {
- force_minimal_pipe_splitting = true;
- is_plane_addition = true;
- }
- }
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ surface_count,
+ &is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
@@ -3633,7 +3678,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
&context))
return false;
- /* on plane addition, minimal state is the new one */
+ /* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
@@ -4020,7 +4065,7 @@ struct dc_sink *dc_link_add_remote_sink(
* Treat device as no EDID device if EDID
* parsing fails
*/
- if (edid_status != EDID_OK) {
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
dc_sink->dc_edid.length = 0;
dm_error("Bad EDID, status%d!\n", edid_status);
}
@@ -4275,8 +4320,8 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
/*
*****************************************************************************
* Function: dc_is_dmub_outbox_supported -
- *
- * @brief
+ *
+ * @brief
* Checks whether DMUB FW supports outbox notifications, if supported
* DM should register outbox interrupt prior to actually enabling interrupts
* via dc_enable_dmub_outbox
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 2a8007928210..7c2e3b8dc26a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -402,6 +402,44 @@ void get_hdr_visual_confirm_color(
}
}
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+ bool enable_subvp = false;
+ int i;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ color->color_r_cr = color_value;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
+ color->color_r_cr = 0;
+ if (pipe_ctx->stream->ignore_msa_timing_param == 1)
+ /* SubVP enable and DRR on - green */
+ color->color_g_y = color_value;
+ else
+ /* SubVP enable and No DRR - blue */
+ color->color_b_cb = color_value;
+ }
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 66d2ae7aacf5..3d19fb92333b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -832,8 +832,9 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
- apply_dpia_mst_dsc_always_on_wa(link);
link->type = dc_connection_mst_branch;
+ apply_dpia_mst_dsc_always_on_wa(link);
+
dm_helpers_dp_update_branch_info(link->ctx, link);
if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
@@ -847,20 +848,13 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
bool reset_cur_dp_mst_topology(struct dc_link *link)
{
- bool result = false;
DC_LOGGER_INIT(link->ctx->logger);
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
revert_dpia_mst_dsc_always_on_wa(link);
- result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-
- link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations,
- 0,
- sizeof(link->mst_stream_alloc_table.stream_allocations));
- return result;
+ return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
}
static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
@@ -1311,6 +1305,14 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ // Init dc_panel_config
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
+ }
+
} else {
/* From Connected-to-Disconnected. */
link->type = dc_connection_none;
@@ -1975,7 +1977,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int i;
bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
- const uint32_t post_oui_delay = 30; // 30ms
+ uint32_t post_oui_delay = 30; // 30ms
/* Reduce link bandwidth between failed link training attempts. */
bool do_fallback = false;
@@ -2022,8 +2024,10 @@ static enum dc_status enable_link_dp(struct dc_state *state,
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
- if (link->dpcd_sink_ext_caps.raw != 0)
+ if (link->dpcd_sink_ext_caps.raw != 0) {
+ post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
msleep(post_oui_delay);
+ }
// similarly, mode switch can cause loss of cable ID
dpcd_write_cable_id_to_dprx(link);
@@ -2069,11 +2073,7 @@ static enum dc_status enable_link_edp(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- enum dc_status status;
-
- status = enable_link_dp(state, pipe_ctx);
-
- return status;
+ return enable_link_dp(state, pipe_ctx);
}
static enum dc_status enable_link_dp_mst(
@@ -2639,9 +2639,8 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
dp_set_fec_ready(link, link_res, false);
}
}
- } else {
- if (signal != SIGNAL_TYPE_VIRTUAL)
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else if (signal != SIGNAL_TYPE_VIRTUAL) {
+ link->dc->hwss.disable_link_output(link, link_res, signal);
}
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
@@ -2663,6 +2662,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_over_340mhz = false;
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2702,11 +2702,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
- link->link_enc->funcs->enable_tmds_output(
- link->link_enc,
+ dc->hwss.enable_tmds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->stream->signal,
pipe_ctx->clock_source->id,
display_color_depth,
- pipe_ctx->stream->signal,
stream->phy_pix_clk);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
@@ -2717,15 +2718,16 @@ static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dc *dc = stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
memset(&stream->link->cur_link_settings, 0,
sizeof(struct dc_link_settings));
-
- link->link_enc->funcs->enable_lvds_output(
- link->link_enc,
+ dc->hwss.enable_lvds_link_output(
+ link,
+ &pipe_ctx->link_res,
pipe_ctx->clock_source->id,
stream->phy_pix_clk);
@@ -3516,7 +3518,7 @@ static void update_mst_stream_alloc_table(
struct dc_link *link,
struct stream_encoder *stream_enc,
struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
- const struct dp_mst_stream_allocation_table *proposed_table)
+ const struct dc_dp_mst_stream_allocation_table *proposed_table)
{
struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
struct link_mst_stream_allocation *dc_alloc;
@@ -3563,6 +3565,35 @@ static void update_mst_stream_alloc_table(
work_table[i];
}
+static void remove_stream_from_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *dio_stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
+{
+ int i = 0;
+ struct link_mst_stream_allocation_table *table =
+ &link->mst_stream_alloc_table;
+
+ if (hpo_dp_stream_enc) {
+ for (; i < table->stream_count; i++)
+ if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
+ break;
+ } else {
+ for (; i < table->stream_count; i++)
+ if (dio_stream_enc == table->stream_allocations[i].stream_enc)
+ break;
+ }
+
+ if (i < table->stream_count) {
+ i++;
+ for (; i < table->stream_count; i++)
+ table->stream_allocations[i-1] = table->stream_allocations[i];
+ memset(&table->stream_allocations[table->stream_count-1], 0,
+ sizeof(struct link_mst_stream_allocation));
+ table->stream_count--;
+ }
+}
+
static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
{
const uint32_t VCP_Y_PRECISION = 1000;
@@ -3679,7 +3710,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
@@ -3784,7 +3815,7 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
DC_LOGGER_INIT(link->ctx->logger);
@@ -3873,7 +3904,7 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
enum act_return_status ret;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
@@ -3957,7 +3988,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
@@ -3980,26 +4011,32 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings,
avg_time_slots_per_mtp);
- /* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
if (dm_helpers_dp_mst_write_payload_allocation_table(
stream->ctx,
stream,
&proposed_table,
- false)) {
+ false))
update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- }
- else {
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- }
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ } else {
+ /* when link is no longer in mst mode (mst hub unplugged),
+ * remove payload with default dc logic
+ */
+ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
}
DC_LOG_MST("%s"
@@ -4303,8 +4340,9 @@ void core_link_enable_stream(
*/
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+
}
status = enable_link(state, pipe_ctx);
@@ -4736,7 +4774,7 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
else if (link->connector_signal == SIGNAL_TYPE_EDP
&& (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
dsc_support.DSC_SUPPORT == false
- || link->dc->debug.disable_dsc_edp
+ || link->panel_config.dsc.disable_dsc_edp
|| !link->dc->caps.edp_dsc_support))
force_disable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d01d2eeed813..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -35,6 +35,8 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "include/dal_asic_id.h"
#define DC_LOGGER_INIT(logger)
@@ -683,6 +685,21 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
+ if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
+ ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ /* Fixed VS workaround for AUX timeout */
+ const uint32_t fixed_vs_address = 0xF004F;
+ const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+
+ core_link_write_dpcd(ddc->link,
+ fixed_vs_address,
+ fixed_vs_data,
+ sizeof(fixed_vs_data));
+
+ timeout = 3072;
+ }
+
/* Do not try to access nonexistent DDC pin. */
if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY)
return true;
@@ -691,6 +708,7 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
result = true;
}
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 48dad093ae8b..c57df45e83ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -526,9 +526,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
return disable_scrabled_data_symbols;
}
-static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
{
- return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -545,7 +545,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -561,7 +561,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
offset,
@@ -584,7 +584,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
lt_settings->dpcd_lane_settings,
size_in_bytes);
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -873,7 +873,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
uint32_t lane;
enum dc_status status;
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -906,7 +906,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
ln_align->raw = dpcd_buf[2];
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
__func__,
@@ -954,7 +954,7 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(link_training_setting, offset))
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -963,7 +963,7 @@ enum dc_status dpcd_set_lane_settings(
(uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -1172,7 +1172,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
- if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
@@ -1198,7 +1198,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 3. wait for receiver to lock-on*/
wait_time_microsec = lt_settings->eq_pattern_time;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
wait_time_microsec =
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
@@ -1469,7 +1469,6 @@ static inline void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->lttpr_mode = link->lttpr_mode;
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
@@ -1478,6 +1477,7 @@ static inline void decide_8b_10b_training_settings(
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1501,9 +1501,8 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
lt_settings->cds_pattern_time = 2500;
lt_settings->cds_wait_time_limit = (dp_convert_to_count(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
- LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1543,7 +1542,7 @@ static void override_training_settings(
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
lt_settings->always_match_dpcd_with_hw_lane_settings = false;
@@ -1584,6 +1583,15 @@ static void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+#endif
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
}
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
@@ -1649,7 +1657,7 @@ static enum dc_status configure_lttpr_mode_non_transparent(
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -2099,7 +2107,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2216,7 +2224,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
link->vendor_specific_lttpr_link_rate_wa = target_rate;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2288,7 +2296,7 @@ static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
return status;
}
@@ -2635,6 +2643,7 @@ enum link_training_result dc_link_dp_perform_link_training(
link,
link_settings,
&lt_settings);
+
override_training_settings(
link,
&link->preferred_training_settings,
@@ -2652,7 +2661,7 @@ enum link_training_result dc_link_dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
@@ -2758,8 +2767,14 @@ bool perform_link_training_with_retries(
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ /* Update verified link settings to current one
+ * Because DPIA LT might fallback to lower link setting.
+ */
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ }
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
@@ -3080,7 +3095,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
+ if (dp_is_lttpr_present(link)) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
@@ -3234,7 +3249,7 @@ static bool dp_verify_link_cap(
cur_link_settings = max_link_settings;
/* Grant extended timeout request */
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -3743,7 +3758,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
unsigned int policy = 0;
- policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ policy = link->panel_config.dsc.force_dsc_edp_policy;
if (max_link_rate == LINK_RATE_UNKNOWN)
max_link_rate = link->verified_link_cap.link_rate;
/*
@@ -3909,7 +3924,7 @@ bool decide_link_settings(struct dc_stream_state *stream,
if (stream->timing.flags.DSC) {
enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
- if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ if (link->panel_config.dsc.force_dsc_edp_policy) {
/* calculate link max link rate cap*/
struct dc_link_settings tmp_link_setting;
struct dc_crtc_timing tmp_timing = stream->timing;
@@ -4095,8 +4110,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
+
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
link_training_settings.dpcd_lane_settings);
@@ -4203,9 +4223,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
@@ -4518,17 +4535,15 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_disable_stream(pipe_ctx);
- }
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
}
}
@@ -5017,121 +5032,136 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-/* Logic to determine LTTPR mode */
-static void determine_lttpr_mode(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
- bool allow_lttpr_non_transparent_mode = 0;
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
+
+ /* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return false;
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- allow_lttpr_non_transparent_mode = 1;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- allow_lttpr_non_transparent_mode = 1;
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value)
+ */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- if (vbios_lttpr_enable && vbios_lttpr_interop)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
- if (allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
- if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- else
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- }
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ return is_lttpr_present;
}
-bool dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_is_lttpr_present(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
+ return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
- memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
- /* Logic to determine LTTPR mode*/
- determine_lttpr_mode(link);
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
- /* Fixed VS workaround for AUX timeout */
- const uint32_t fixed_vs_address = 0xF004F;
- const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
- core_link_write_dpcd(
- link,
- fixed_vs_address,
- fixed_vs_data,
- sizeof(fixed_vs_data));
- }
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(
- link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
}
- return is_lttpr_present;
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent)
+ return LTTPR_MODE_NON_TRANSPARENT;
+ else
+ return LTTPR_MODE_TRANSPARENT;
+ }
+
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support)
+ return LTTPR_MODE_NON_TRANSPARENT;
+
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ return dp_is_lttpr_present(link) ? LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_NON_LTTPR;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5193,13 +5223,16 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
uint64_t current_ts = 0;
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
+ bool lttpr_present;
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- determine_lttpr_mode(link);
+ lttpr_present = dp_is_lttpr_present(link) ||
+ (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ if (lttpr_present)
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -5267,6 +5300,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t aux_channel_retry_cnt = 0;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -5294,20 +5328,46 @@ static bool retrieve_link_cap(struct dc_link *link)
status = wa_try_to_wake_dprx(link, timeout_ms);
}
+ while (status != DC_OK && aux_channel_retry_cnt < 10) {
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
+ udelay(1000);
+ aux_channel_retry_cnt++;
+ }
+ }
+
+ /* If aux channel is not active, return false and trigger another detect*/
+ if (status != DC_OK) {
+ dpcd_power_state = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+
+ dpcd_power_state = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+ return false;
+ }
+
is_lttpr_present = dp_retrieve_lttpr_cap(link);
- /* Read DP tunneling information. */
- status = dpcd_get_tunneling_device_data(link);
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
+ if (is_lttpr_present)
+ configure_lttpr_mode_transparent(link);
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3)
- udelay(1000);
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
dpcd_set_source_specific_data(link);
/* Sink may need to configure internals based on vendor, so allow some
@@ -6057,7 +6117,7 @@ bool dc_link_dp_set_test_pattern(
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
p_link_settings->dpcd_lane_settings,
@@ -7034,68 +7094,16 @@ void dp_enable_link_phy(
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct pipe_ctx *pipes =
- link->dc->current_state->res_ctx.pipe_ctx;
- struct clock_source *dp_cs =
- link->dc->res_pool->dp_clock_source;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- unsigned int i;
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- /* If the current pixel clock source is not DTO(happens after
- * switching from HDMI passive dongle to DP on the same connector),
- * switch the pixel clock source to DTO.
- */
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- pipes[i].stream->link == link) {
- if (pipes[i].clock_source != NULL &&
- pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
- pipes[i].clock_source = dp_cs;
- pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
- pipes[i].stream->timing.pix_clk_100hz;
- pipes[i].clock_source->funcs->program_pix_clk(
- pipes[i].clock_source,
- &pipes[i].stream_res.pix_clk_params,
- dp_get_link_encoding_format(link_settings),
- &pipes[i].pll_settings);
- }
- }
- }
-
link->cur_link_settings = *link_settings;
-
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
- if (dc->clk_mgr->funcs->notify_link_rate_change)
- dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
- }
-
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
-
- if (link_hwss->ext.enable_dp_link_output)
- link_hwss->ext.enable_dp_link_output(link, link_res, signal,
- clock_source, link_settings);
-
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->hwss.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
dp_receiver_power_ctrl(link, true);
}
void edp_add_delay_for_T9(struct dc_link *link)
{
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
+ if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
+ udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
}
bool edp_receiver_ready_T9(struct dc_link *link)
@@ -7151,9 +7159,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
+ if (link && link->panel_config.pps.extra_t7_ms > 0)
+ udelay(link->panel_config.pps.extra_t7_ms * 1000);
return result;
}
@@ -7162,29 +7169,11 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
- if (signal == SIGNAL_TYPE_EDP) {
- if (link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_backlight_control(link, false);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- link->dc->hwss.edp_power_control(link, false);
- } else {
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
- }
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
-
+ dc->hwss.disable_link_output(link, link_res, signal);
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
@@ -7250,7 +7239,7 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 468e39589ed8..74e36b34d3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -115,12 +115,14 @@ static enum link_training_result dpia_configure_link(
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
dp_decide_training_settings(link,
link_setting,
lt_settings);
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
status = dpcd_configure_channel_coding(link, lt_settings);
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
@@ -178,7 +180,7 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
switch (type) {
case DPIA_SET_CFG_SET_LINK:
- data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
break;
case DPIA_SET_CFG_SET_PHY_TEST_MODE:
break;
@@ -553,7 +555,7 @@ static enum link_training_result dpia_training_cr_phase(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_cr_transparent(link, link_res, lt_settings);
@@ -830,7 +832,7 @@ static enum link_training_result dpia_training_eq_phase(
{
enum link_training_result result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_eq_transparent(link, link_res, lt_settings);
@@ -870,13 +872,14 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* @param hop The Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_end(struct dc_link *link,
+ struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
enum dc_status status;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
@@ -916,7 +919,7 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
link->link_id.enum_id - ENUM_ID_1,
hop,
result,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
return result;
}
@@ -928,7 +931,9 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* @param link DPIA link being trained.
* @param hop The Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+static void dpia_training_abort(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -936,7 +941,7 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode,
+ lt_settings->lttpr_mode,
link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
@@ -964,12 +969,16 @@ enum link_training_result dc_link_dpia_perform_link_training(
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
int8_t repeater_id; /* Current hop. */
+ struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
+
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
+
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
@@ -987,7 +996,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
break;
/* Stop training hop. */
- result = dpia_training_end(link, repeater_id);
+ result = dpia_training_end(link, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
}
@@ -1001,9 +1010,9 @@ enum link_training_result dc_link_dpia_perform_link_training(
msleep(5);
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
- dpia_training_abort(link, repeater_id);
+ dpia_training_abort(link, &lt_settings, repeater_id);
} else {
- dpia_training_end(link, repeater_id);
+ dpia_training_end(link, &lt_settings, repeater_id);
}
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 7dbab15bfa68..8ee0d946bb2f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1904,9 +1904,6 @@ bool dc_is_stream_unchanged(
if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
return false;
- if (old_stream->odm_2to1_policy_applied != stream->odm_2to1_policy_applied)
- return false;
-
return true;
}
@@ -3584,6 +3581,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
}
}
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx_reset;
+
+ /* reset the otg sync context for the pipe and its slave pipes if any */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+ if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
@@ -3648,3 +3662,25 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
else
return get_virtual_link_hwss();
}
+
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
+{
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (stream) {
+ h_blank_start = stream->timing.h_total - stream->timing.h_front_porch;
+ h_blank_end = h_blank_start - stream->timing.h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (stream->timing.h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (stream->timing.h_sync_width % 2 == 0);
+ }
+ return divisible;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f62d50901d92..ae13887756bf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,7 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER dc->ctx->logger
@@ -329,7 +330,7 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- if (attributes->height * attributes->width * 4 > 16384)
+ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
if (stream->mall_stream_config.type == SUBVP_MAIN)
return false;
@@ -519,7 +520,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
}
/* remove writeback info for disabled writeback pipes from stream */
- for (i = 0, j = 0; i < stream->num_wb_info; i++) {
+ for (i = 0, j = 0; i < stream->num_wb_info && j < MAX_DWB_PIPES; i++) {
if (stream->writeback_info[i].wb_enabled) {
if (i != j)
/* trim the array */
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5908b60db313..2ecf36e6329b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.198"
+#define DC_VER "3.2.205"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -118,7 +118,26 @@ struct dc_plane_cap {
uint32_t min_height;
};
-// Color management caps (DPP and MPC)
+/**
+ * DOC: color-management-caps
+ *
+ * **Color management caps (DPP and MPC)**
+ *
+ * Modules/color calculates various color operations which are translated to
+ * abstracted HW. DCE 5-12 had almost no important changes, but starting with
+ * DCN1, every new generation comes with fairly major differences in color
+ * pipeline. Therefore, we abstract color pipe capabilities so modules/DM can
+ * decide mapping to HW block based on logical capabilities.
+ */
+
+/**
+ * struct rom_curve_caps - predefined transfer function caps for degamma and regamma
+ * @srgb: RGB color space transfer func
+ * @bt2020: BT.2020 transfer func
+ * @gamma2_2: standard gamma
+ * @pq: perceptual quantizer transfer function
+ * @hlg: hybrid log–gamma transfer function
+ */
struct rom_curve_caps {
uint16_t srgb : 1;
uint16_t bt2020 : 1;
@@ -127,36 +146,68 @@ struct rom_curve_caps {
uint16_t hlg : 1;
};
+/**
+ * struct dpp_color_caps - color pipeline capabilities for display pipe and
+ * plane blocks
+ *
+ * @dcn_arch: all DCE generations treated the same
+ * @input_lut_shared: shared with DGAM. Input LUT is different than most LUTs,
+ * just plain 256-entry lookup
+ * @icsc: input color space conversion
+ * @dgam_ram: programmable degamma LUT
+ * @post_csc: post color space conversion, before gamut remap
+ * @gamma_corr: degamma correction
+ * @hw_3d_lut: 3D LUT support. It implies a shaper LUT before. It may be shared
+ * with MPC by setting mpc:shared_3d_lut flag
+ * @ogam_ram: programmable out/blend gamma LUT
+ * @ocsc: output color space conversion
+ * @dgam_rom_for_yuv: pre-defined degamma LUT for YUV planes
+ * @dgam_rom_caps: pre-definied curve caps for degamma 1D LUT
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ *
+ * Note: hdr_mult and gamut remap (CTM) are always available in DPP (in that order)
+ */
struct dpp_color_caps {
- uint16_t dcn_arch : 1; // all DCE generations treated the same
- // input lut is different than most LUTs, just plain 256-entry lookup
- uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t dcn_arch : 1;
+ uint16_t input_lut_shared : 1;
uint16_t icsc : 1;
uint16_t dgam_ram : 1;
- uint16_t post_csc : 1; // before gamut remap
+ uint16_t post_csc : 1;
uint16_t gamma_corr : 1;
-
- // hdr_mult and gamut remap always available in DPP (in that order)
- // 3d lut implies shaper LUT,
- // it may be shared with MPC - check MPC:shared_3d_lut flag
uint16_t hw_3d_lut : 1;
- uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
uint16_t dgam_rom_for_yuv : 1;
struct rom_curve_caps dgam_rom_caps;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct mpc_color_caps - color pipeline capabilities for multiple pipe and
+ * plane combined blocks
+ *
+ * @gamut_remap: color transformation matrix
+ * @ogam_ram: programmable out gamma LUT
+ * @ocsc: output color space conversion matrix
+ * @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
+ * instance
+ * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
+ */
struct mpc_color_caps {
uint16_t gamut_remap : 1;
uint16_t ogam_ram : 1;
uint16_t ocsc : 1;
- uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
- uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
-
+ uint16_t num_3dluts : 3;
+ uint16_t shared_3d_lut:1;
struct rom_curve_caps ogam_rom_caps;
};
+/**
+ * struct dc_color_caps - color pipes capabilities for DPP and MPC hw blocks
+ * @dpp: color pipes caps for DPP
+ * @mpc: color pipes caps for MPC
+ */
struct dc_color_caps {
struct dpp_color_caps dpp;
struct mpc_color_caps mpc;
@@ -350,10 +401,14 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool is_vmin_only_asic;
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
+ bool force_bios_enable_lttpr;
+ uint8_t force_bios_fixed_vs;
+
};
enum visual_confirm {
@@ -365,6 +420,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SWAPCHAIN = 6,
VISUAL_CONFIRM_FAMS = 7,
VISUAL_CONFIRM_SWIZZLE = 9,
+ VISUAL_CONFIRM_SUBVP = 14,
};
enum dc_psr_power_opts {
@@ -386,9 +442,31 @@ enum dcc_option {
DCC_HALF_REQ_DISALBE = 2,
};
+/**
+ * enum pipe_split_policy - Pipe split strategy supported by DCN
+ *
+ * This enum is used to define the pipe split policy supported by DCN. By
+ * default, DC favors MPC_SPLIT_DYNAMIC.
+ */
enum pipe_split_policy {
+ /**
+ * @MPC_SPLIT_DYNAMIC: DC will automatically decide how to split the
+ * pipe in order to bring the best trade-off between performance and
+ * power consumption. This is the recommended option.
+ */
MPC_SPLIT_DYNAMIC = 0,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: Avoid pipe split, which means that DC will not
+ * try any sort of split optimization.
+ */
MPC_SPLIT_AVOID = 1,
+
+ /**
+ * @MPC_SPLIT_DYNAMIC: With this option, DC will only try to optimize
+ * the pipe utilization when using a single display; if the user
+ * connects to a second display, DC will avoid pipe split.
+ */
MPC_SPLIT_AVOID_MULT_DISP = 2,
};
@@ -623,6 +701,14 @@ struct dc_state;
struct resource_pool;
struct dce_hwseq;
+/**
+ * struct dc_debug_options - DC debug struct
+ *
+ * This struct provides a simple mechanism for developers to change some
+ * configurations, enable/disable features, and activate extra debug options.
+ * This can be very handy to narrow down whether some specific feature is
+ * causing an issue or not.
+ */
struct dc_debug_options {
bool native422_support;
bool disable_dsc;
@@ -642,6 +728,11 @@ struct dc_debug_options {
bool disable_stutter;
bool use_max_lb;
enum dcc_option disable_dcc;
+
+ /**
+ * @pipe_split_policy: Define which pipe split policy is used by the
+ * display core.
+ */
enum pipe_split_policy pipe_split_policy;
bool force_single_disp_pipe_split;
bool voltage_align_fclk;
@@ -715,8 +806,6 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
- bool disable_dsc_edp;
- unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
/* TODO - remove once tested */
bool legacy_dp2_lt;
@@ -740,11 +829,14 @@ struct dc_debug_options {
int crb_alloc_policy_min_disp_count;
bool disable_z10;
bool enable_z9_disable_interface;
- bool enable_sw_cntl_psr;
union dpia_debug_options dpia_debug;
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
+ bool allow_sw_cursor_fallback;
+ unsigned int force_subvp_num_ways;
+ unsigned int force_mall_ss_num_ways;
+ bool alloc_extra_way_for_cursor;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -758,7 +850,9 @@ struct dc_debug_options {
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
+ bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
+ enum lttpr_mode lttpr_mode_override;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -814,6 +908,17 @@ struct dc {
uint32_t *dcn_reg_offsets;
uint32_t *nbio_reg_offsets;
+
+ /* Scratch memory */
+ struct {
+ struct {
+ /*
+ * For matching clock_limits table in driver with table
+ * from PMFW.
+ */
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ } update_bw_bounding_box;
+ } scratch;
};
enum frame_buffer_mode {
@@ -1017,6 +1122,7 @@ union surface_update_flags {
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
+ uint32_t tmz_changed:1;
uint32_t full_update:1;
} bits;
@@ -1085,6 +1191,7 @@ struct dc_plane_state {
/* private to dc_surface.c */
enum dc_irq_source irq_source;
struct kref refcount;
+ struct tg_color visual_confirm_color;
};
struct dc_plane_info {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 09b304507bad..89d7d3fd3321 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -323,11 +323,13 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data;
int i = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
- uint8_t visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+ uint8_t visual_confirm_enabled;
if (dc == NULL)
return false;
+ visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS;
+
// Format command.
cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL;
@@ -387,6 +389,37 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
}
}
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ enum dmub_status status;
+ unsigned int panel_inst = 0;
+
+ dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ // Prepare fw command
+ cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR;
+ cmd.visual_confirm_color.header.sub_type = 0;
+ cmd.visual_confirm_color.header.ret_status = 1;
+ cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
+ cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
+
+ // Send command to fw
+ status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd);
+
+ ASSERT(status == DMUB_STATUS_OK);
+
+ // If command was processed, copy feature caps to dmub srv
+ if (status == DMUB_STATUS_OK &&
+ cmd.visual_confirm_color.header.ret_status == 0) {
+ memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
+ &cmd.visual_confirm_color.visual_confirm_color_data,
+ sizeof(struct dmub_visual_confirm_color));
+ }
+}
+
#ifdef CONFIG_DRM_AMD_DC_DCN
/**
* ***********************************************************************************************
@@ -417,44 +450,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
- int16_t drr_frame_us = 0;
- int16_t min_drr_supported_us = 0;
- int16_t max_drr_supported_us = 0;
- int16_t max_drr_vblank_us = 0;
- int16_t max_drr_mallregion_us = 0;
- int16_t mall_region_us = 0;
- int16_t prefetch_us = 0;
- int16_t subvp_active_us = 0;
- int16_t drr_active_us = 0;
- int16_t min_vtotal_supported = 0;
- int16_t max_vtotal_supported = 0;
+ uint16_t drr_frame_us = 0;
+ uint16_t min_drr_supported_us = 0;
+ uint16_t max_drr_supported_us = 0;
+ uint16_t max_drr_vblank_us = 0;
+ uint16_t max_drr_mallregion_us = 0;
+ uint16_t mall_region_us = 0;
+ uint16_t prefetch_us = 0;
+ uint16_t subvp_active_us = 0;
+ uint16_t drr_active_us = 0;
+ uint16_t min_vtotal_supported = 0;
+ uint16_t max_vtotal_supported = 0;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
- drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+ drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
// P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+ mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
- (div64_s64((int64_t)min_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
-
- prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
- (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
- drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
- max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+ min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
+
+ prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+ (((uint64_t)main_timing->pix_clk_100hz * 100)));
+ drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
- max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
+ max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +579,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
- subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
- (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
- (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+ subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+ (uint64_t)phantom_timing0->h_total * 1000000),
+ (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+ (uint64_t)phantom_timing1->h_total * 1000000),
+ (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
// should increase it's prefetch time to match the other
@@ -559,16 +592,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
- (int64_t)phantom_timing1->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing1->h_total * 1000000));
+
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
- (int64_t)phantom_timing0->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing0->h_total * 1000000));
}
}
@@ -601,7 +635,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
- uint32_t out_num, out_den;
+ uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz;
@@ -618,11 +652,16 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
/* Calculate the scaling factor from the src and dst height.
* e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2.
* Reduce the fraction 1080/2160 = 1/2 for the "scaling factor"
+ *
+ * Make sure to combine stream and plane scaling together.
*/
- reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height, &out_num, &out_den);
- // TODO: Uncomment below lines once DMCUB include headers are promoted
- //pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
- //pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
+ reduce_fraction(subvp_pipe->stream->src.height, subvp_pipe->stream->dst.height,
+ &out_num_stream, &out_den_stream);
+ reduce_fraction(subvp_pipe->plane_state->src_rect.height, subvp_pipe->plane_state->dst_rect.height,
+ &out_num_plane, &out_den_plane);
+ reduce_fraction(out_num_stream * out_num_plane, out_den_stream * out_den_plane, &out_num, &out_den);
+ pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num;
+ pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den;
// Prefetch lines is equal to VACTIVE + BP + VSYNC
pipe_data->pipe_config.subvp_data.prefetch_lines =
@@ -630,19 +669,33 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
// Round up
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
pipe_data->pipe_config.subvp_data.processing_delay_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
+
+ if (subvp_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
+ } else if (subvp_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ }
+
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ if (phantom_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ } else if (phantom_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ }
break;
}
}
@@ -687,7 +740,9 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && !pipe->top_pipe &&
+ /* For SubVP pipe count, only count the top most (ODM / MPC) pipe
+ */
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -700,7 +755,12 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
+ /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
+ * Any ODM or MPC splits being used in SubVP will be handled internally in
+ * populate_subvp_cmd_pipe_info
+ */
if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 159782cd6659..7e438345b1a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -78,12 +78,14 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
+void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 584aaf6967fd..848db8676adf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -417,19 +417,43 @@ enum dc_scan_direction {
SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
};
+/**
+ * struct dc_cursor_position: Hardware cursor data.
+ *
+ * This struct keeps the action information related to the cursor that will be
+ * sent and received from our DC core.
+ */
struct dc_cursor_position {
+ /**
+ * @x: It represents the top left abscissa coordinate of the cursor.
+ */
uint32_t x;
+
+ /**
+ * @y: It is the top ordinate of the cursor coordinate.
+ */
uint32_t y;
+ /**
+ * @x_hotspot: Define the abscissa point where mouse click happens.
+ */
uint32_t x_hotspot;
+
+ /**
+ * @y_hotspot: Define the ordinate point where mouse click happens.
+ */
uint32_t y_hotspot;
- /*
- * This parameter indicates whether HW cursor should be enabled
+ /**
+ * @enable: This parameter indicates whether hardware cursor should be
+ * enabled.
*/
bool enable;
- /* Translate cursor x/y by the source rectangle for each plane. */
+ /**
+ * @translate_by_source: Translate cursor x/y by the source rectangle
+ * for each plane.
+ */
bool translate_by_source;
};
@@ -494,7 +518,9 @@ struct dc_gamma {
/* Used by both ipp amd opp functions*/
/* TODO: to be consolidated with enum color_space */
-/*
+/**
+ * enum dc_cursor_color_format - DC cursor programming mode
+ *
* This enum is for programming CURSOR_MODE register field. What this register
* should be programmed to depends on OS requested cursor shape flags and what
* we stored in the cursor surface.
@@ -530,17 +556,39 @@ union dc_cursor_attribute_flags {
};
struct dc_cursor_attributes {
+ /**
+ * @address: This field represents the framebuffer address associated
+ * with the cursor. It is important to highlight that this address is
+ * divided into a high and low parts.
+ */
PHYSICAL_ADDRESS_LOC address;
+
+ /**
+ * @pitch: Cursor line stride.
+ */
uint32_t pitch;
- /* Width and height should correspond to cursor surface width x heigh */
+ /**
+ * @width: Width should correspond to cursor surface width.
+ */
uint32_t width;
+ /**
+ * @heigh: Height should correspond to cursor surface heigh.
+ */
uint32_t height;
+ /**
+ * @color_format: DC cursor programming mode.
+ */
enum dc_cursor_color_format color_format;
- uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
+ /**
+ * @sdr_white_level: Boosting (SDR) cursor in HDR mode.
+ */
+ uint32_t sdr_white_level;
- /* In case we support HW Cursor rotation in the future */
+ /**
+ * @rotation_angle: In case we support HW Cursor rotation in the future
+ */
enum dc_rotation_angle rotation_angle;
union dc_cursor_attribute_flags attribute_flags;
@@ -764,22 +812,108 @@ struct dc_dsc_config {
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
};
+
+/**
+ * struct dc_crtc_timing - Timing parameters used to configure DCN blocks
+ *
+ * DCN provides multiple signals and parameters that can be used to adjust
+ * timing parameters, this struct aggregate multiple of these values for easy
+ * access. In this struct, fields prefixed with h_* are related to horizontal
+ * timing, and v_* to vertical timing. Keep in mind that when we talk about
+ * vertical timings, the values, in general, are described in the number of
+ * lines; on the other hand, the horizontal values are in pixels.
+ */
struct dc_crtc_timing {
+ /**
+ * @h_total: The total number of pixels from the rising edge of HSync
+ * until the rising edge of the current HSync.
+ */
uint32_t h_total;
+
+ /**
+ * @h_border_left: The black pixels related to the left border
+ */
uint32_t h_border_left;
+
+ /**
+ * @h_addressable: It is the range of pixels displayed horizontally.
+ * For example, if the display resolution is 3840@2160, the horizontal
+ * addressable area is 3840.
+ */
uint32_t h_addressable;
+
+ /**
+ * @h_border_right: The black pixels related to the right border
+ */
uint32_t h_border_right;
+
+ /**
+ * @h_front_porch: Period (in pixels) between HBlank start and the
+ * rising edge of HSync.
+ */
uint32_t h_front_porch;
+
+ /**
+ * @h_sync_width: HSync duration in pixels.
+ */
uint32_t h_sync_width;
+ /**
+ * @v_total: It is the total number of lines from the rising edge of
+ * the previous VSync until the rising edge of the current VSync.
+ *
+ * |--------------------------|
+ * +-+ V_TOTAL +-+
+ * | | | |
+ * VSync ---+ +--------- // -----------+ +---
+ */
uint32_t v_total;
+
+ /**
+ * @v_border_top: The black border on the top.
+ */
uint32_t v_border_top;
+
+ /**
+ * @v_addressable: It is the range of the scanout at which the
+ * framebuffer is displayed. For example, if the display resolution is
+ * 3840@2160, the addressable area is 2160 lines, or if the resolution
+ * is 1920x1080, the addressable area is 1080 lines.
+ */
uint32_t v_addressable;
+
+ /**
+ * @v_border_bottom: The black border on the bottom.
+ */
uint32_t v_border_bottom;
+
+ /**
+ * @v_front_porch: Period (in lines) between VBlank start and rising
+ * edge of VSync.
+ * +-+
+ * VSync | |
+ * ----------+ +--------...
+ * +------------------...
+ * VBlank |
+ * --+
+ * |-------|
+ * v_front_porch
+ */
uint32_t v_front_porch;
+
+ /**
+ * @v_sync_width: VSync signal width in lines.
+ */
uint32_t v_sync_width;
+ /**
+ * @pix_clk_100hz: Pipe pixel precision
+ *
+ * This field is used to communicate pixel clocks with 100 Hz accuracy
+ * from dc_crtc_timing to BIOS command table.
+ */
uint32_t pix_clk_100hz;
+
uint32_t min_refresh_in_uhz;
uint32_t vic;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 9544abf75e84..bf5f9e2773bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -113,6 +113,32 @@ struct psr_settings {
unsigned int psr_power_opt;
};
+/* To split out "global" and "per-panel" config settings.
+ * Add a struct dc_panel_config under dc_link
+ */
+struct dc_panel_config {
+ // extra panel power sequence parameters
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ // ABM
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
+ // edp DSC
+ struct dsc {
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
+ } dsc;
+};
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -131,7 +157,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- enum lttpr_mode lttpr_mode;
bool is_internal_display;
/* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -224,6 +249,7 @@ struct dc_link {
bool dpia_mst_dsc_always_on;
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
+ bool dongle_mode_timing_override;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -232,6 +258,8 @@ struct dc_link {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ struct dc_panel_config panel_config;
+ struct phy_state phy_state;
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index f87f852d4829..9e6025c98db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -212,8 +212,7 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- struct periodic_interrupt_config periodic_interrupt0;
- struct periodic_interrupt_config periodic_interrupt1;
+ struct periodic_interrupt_config periodic_interrupt;
/* from core_stream struct */
struct dc_context *ctx;
@@ -268,8 +267,6 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
struct mall_stream_config mall_stream_config;
-
- bool odm_2to1_policy_applied;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -283,8 +280,7 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- struct periodic_interrupt_config *periodic_interrupt0;
- struct periodic_interrupt_config *periodic_interrupt1;
+ struct periodic_interrupt_config *periodic_interrupt;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index bdb6bac8dd97..c94a966c6612 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -300,7 +300,7 @@ static void set_high_bit_rate_capable(
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
}
-/* set video latency in in ms/2+1 */
+/* set video latency in ms/2+1 */
static void set_video_latency(
struct audio *audio,
int latency_in_ms)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 919c2c2ba84b..32782ef9ef77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -814,12 +814,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER");
retry_on_defer = true;
- fallthrough;
- case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
- if (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK)
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES
&& defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) {
@@ -848,7 +842,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
}
}
break;
-
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ LOG_FLAG_I2cAux_DceAux,
+ "dce_aux_transfer_with_retries: FAILURE: AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK");
+ goto fail;
case AUX_TRANSACTION_REPLY_I2C_DEFER:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 0df06740ec39..bec5e9f787fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -393,17 +393,18 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
if (copy_settings_data->dsc_enable_status &&
link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)))
+ sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
else
link->psr_settings.force_ffu_mode = 0;
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (link->fec_state == dc_link_fec_enabled &&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
- sizeof(link->dpcd_caps.sink_dev_id_str))))
+ sizeof(DP_SINK_DEVICE_STR_ID_2))))
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
else
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 38a67051d470..d260eaa1509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -722,7 +722,6 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
struct gpio *hpd;
- struct dc_sink *sink = link->local_sink;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
@@ -755,9 +754,9 @@ void dce110_edp_wait_for_hpd_ready(
return;
}
- if (sink != NULL) {
- if (sink->edid_caps.panel_patch.extra_t3_ms > 0) {
- int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ if (link != NULL) {
+ if (link->panel_config.pps.extra_t3_ms > 0) {
+ int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
msleep(extra_t3_in_ms);
}
@@ -842,7 +841,7 @@ void dce110_edp_power_control(
/* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
if (link->local_sink != NULL)
remaining_min_edp_poweroff_time_ms +=
- link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+ link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
@@ -946,7 +945,7 @@ void dce110_edp_wait_for_T12(
current_ts,
dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
- t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+ t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
if (time_since_edp_poweroff_ms < t12_duration)
msleep(t12_duration - time_since_edp_poweroff_ms);
@@ -965,6 +964,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t panel_instance;
+ unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
+ unsigned int post_T7_delay = OLED_POST_T7_DELAY;
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1043,8 +1044,10 @@ void dce110_edp_backlight_control(
link_transmitter_control(ctx->dc_bios, &cntl);
- if (enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_POST_T7_DELAY);
+ if (enable && link->dpcd_sink_ext_caps.bits.oled) {
+ post_T7_delay += link->panel_config.pps.extra_post_t7_ms;
+ msleep(post_T7_delay);
+ }
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
@@ -1066,8 +1069,10 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_PRE_T11_DELAY);
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
+ msleep(pre_T11_delay);
+ }
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -1441,6 +1446,14 @@ static enum dc_status dce110_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -2114,6 +2127,7 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
@@ -2992,6 +3006,124 @@ void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
}
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_lvds_output(
+ link->link_enc,
+ clock_source,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ clock_source,
+ color_depth,
+ signal,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ unsigned int i;
+
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
+ pipes[i].stream->timing.pix_clk_100hz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ dp_get_link_encoding_format(link_settings),
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+ }
+
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ if (link_hwss->ext.enable_dp_link_output)
+ link_hwss->ext.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
+
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -3031,6 +3163,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index b6f3843d3d05..758f4b3b0087 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -90,6 +90,24 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
-
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index db7ca4b0cdb9..897f412f539e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -448,11 +448,12 @@ void dpp1_set_cursor_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
+
src_y_offset = pos->y - param->viewport.y;
}
-
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 564e061ccb58..52e201e9b091 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1208,13 +1208,10 @@ void hubp1_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 5b5d952b2b8c..72521749c01d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -899,6 +899,14 @@ enum dc_status dcn10_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -1017,6 +1025,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2151,8 +2160,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
grouped_pipes[i]->stream_res.tg->inst, &pclk);
- grouped_pipes[i]->stream->timing.pix_clk_100hz =
- pclk*get_clock_divider(grouped_pipes[i], false);
+ grouped_pipes[i]->stream->timing.pix_clk_100hz =
+ pclk*get_clock_divider(grouped_pipes[i], false);
if (master == -1)
master = i;
}
@@ -2199,14 +2208,14 @@ void dcn10_enable_vblanks_synchronization(
if (master >= 0) {
for (i = 0; i < group_size; i++) {
if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
- grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
- grouped_pipes[master]->stream_res.tg,
- grouped_pipes[i]->stream_res.tg,
- grouped_pipes[master]->stream->timing.pix_clk_100hz,
- grouped_pipes[i]->stream->timing.pix_clk_100hz,
- get_clock_divider(grouped_pipes[master], false),
- get_clock_divider(grouped_pipes[i], false));
- grouped_pipes[i]->stream->vblank_synchronized = true;
+ grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
+ grouped_pipes[master]->stream_res.tg,
+ grouped_pipes[i]->stream_res.tg,
+ grouped_pipes[master]->stream->timing.pix_clk_100hz,
+ grouped_pipes[i]->stream->timing.pix_clk_100hz,
+ get_clock_divider(grouped_pipes[master], false),
+ get_clock_divider(grouped_pipes[i], false));
+ grouped_pipes[i]->stream->vblank_synchronized = true;
}
grouped_pipes[master]->stream->vblank_synchronized = true;
DC_SYNC_INFO("Sync complete\n");
@@ -2539,8 +2548,10 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -3340,11 +3351,11 @@ static bool dcn10_dmub_should_update_cursor_data(
if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- return true;
+ if (dcn10_can_pipe_disable_cursor(pipe_ctx))
+ return false;
- if (pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1 &&
- debug->enable_sw_cntl_psr)
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
return true;
return false;
@@ -3468,8 +3479,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
- (pipe_ctx->bottom_pipe != NULL);
+ bool pipe_split_on = false;
bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
(pipe_ctx->prev_odm_pipe != NULL);
@@ -3478,6 +3488,13 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
+ if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
+ (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
+ pipe_split_on = true;
+ }
+ }
+
/**
* DC cursor is stream space, HW cursor is plane space and drawn
* as part of the framebuffer.
@@ -3549,8 +3566,36 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
pos_cpy.enable = false;
+
+ if (param.rotation == ROTATION_ANGLE_0) {
+ int viewport_width =
+ pipe_ctx->plane_res.scl_data.viewport.width;
+ int viewport_x =
+ pipe_ctx->plane_res.scl_data.viewport.x;
+
+ if (param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
+ }
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
+ }
+ }
+ }
// Swap axis and mirror horizontally
- if (param.rotation == ROTATION_ANGLE_90) {
+ else if (param.rotation == ROTATION_ANGLE_90) {
uint32_t temp_x = pos_cpy.x;
pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
@@ -3621,23 +3666,25 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int viewport_x =
pipe_ctx->plane_res.scl_data.viewport.x;
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ if (!param.mirror) {
+ if (pipe_split_on || odm_combine_on) {
+ if (pos_cpy.x >= viewport_width + viewport_x) {
+ pos_cpy.x = 2 * viewport_width
+ - pos_cpy.x + 2 * viewport_x;
+ } else {
+ uint32_t temp_x = pos_cpy.x;
+
+ pos_cpy.x = 2 * viewport_x - pos_cpy.x;
+ if (temp_x >= viewport_x +
+ (int)hubp->curs_attr.width || pos_cpy.x
+ <= (int)hubp->curs_attr.width +
+ pipe_ctx->plane_state->src_rect.x) {
+ pos_cpy.x = temp_x + viewport_width;
+ }
}
+ } else {
+ pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
}
/**
@@ -3738,7 +3785,6 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
int vesa_sync_start;
int asic_blank_end;
int interlace_factor;
- int vertical_line_start;
patched_crtc_timing = *dc_crtc_timing;
apply_front_porch_workaround(&patched_crtc_timing);
@@ -3754,10 +3800,8 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
patched_crtc_timing.v_border_top)
* interlace_factor;
- vertical_line_start = asic_blank_end -
+ return asic_blank_end -
pipe_ctx->pipe_dlg_param.vstartup_start + 1;
-
- return vertical_line_start;
}
void dcn10_calc_vupdate_position(
@@ -3768,7 +3812,7 @@ void dcn10_calc_vupdate_position(
{
const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt0.lines_offset;
+ pipe_ctx->stream->periodic_interrupt.lines_offset;
int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
int start_position;
@@ -3793,18 +3837,10 @@ void dcn10_calc_vupdate_position(
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum vline_select vline,
uint32_t *start_line,
uint32_t *end_line)
{
- enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
-
- if (vline == VLINE0)
- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
- else if (vline == VLINE1)
- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
-
- switch (ref_point) {
+ switch (pipe_ctx->stream->periodic_interrupt.ref_point) {
case START_V_UPDATE:
dcn10_calc_vupdate_position(
dc,
@@ -3813,7 +3849,9 @@ static void dcn10_cal_vline_position(
end_line);
break;
case START_V_SYNC:
- // Suppose to do nothing because vsync is 0;
+ // vsync is line 0 so start_line is just the requested line offset
+ *start_line = pipe_ctx->stream->periodic_interrupt.lines_offset;
+ *end_line = *start_line + 2;
break;
default:
ASSERT(0);
@@ -3823,24 +3861,15 @@ static void dcn10_cal_vline_position(
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline)
+ struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
- if (vline == VLINE0) {
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
-
- } else if (vline == VLINE1) {
- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
- tg,
- pipe_ctx->stream->periodic_interrupt1.lines_offset);
- }
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 9ae07c77fdc0..0ef7bf7ddb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -175,8 +175,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 10e613ec7d24..f2371c948822 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -82,6 +82,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 3fc300cd1ce9..ea7739255119 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -312,6 +312,20 @@ void optc1_program_timing(
}
}
+/**
+ * optc1_set_vtg_params - Set Vertical Timing Generator (VTG) parameters
+ *
+ * @optc: timing_generator struct used to extract the optc parameters
+ * @dc_crtc_timing: Timing parameters configured
+ * @program_fp2: Boolean value indicating if FP2 will be programmed or not
+ *
+ * OTG is responsible for generating the global sync signals, including
+ * vertical timing information for each HUBP in the dcfclk domain. Each VTG is
+ * associated with one OTG that provides HUBP with vertical timing information
+ * (i.e., there is 1:1 correspondence between OTG and VTG). This function is
+ * responsible for setting the OTG parameters to the VTG during the pipe
+ * programming.
+ */
void optc1_set_vtg_params(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
@@ -1072,7 +1086,7 @@ static void optc1_set_test_pattern(
src_color[index] >> (src_bpc - dst_bpc);
/* CRTC_TEST_PATTERN_DATA has 16 bits,
* lowest 6 are hardwired to ZERO
- * color bits should be left aligned aligned to MSB
+ * color bits should be left aligned to MSB
* XXXXXXXXXX000000 for 10 bit,
* XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
*/
@@ -1379,6 +1393,12 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+ REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
+ OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
+
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
@@ -1498,8 +1518,23 @@ bool optc1_configure_crc(struct timing_generator *optc,
return true;
}
+/**
+ * optc1_get_crc - Capture CRC result per component
+ *
+ * @optc: timing_generator instance.
+ * @r_cr: 16-bit primary CRC signature for red data.
+ * @g_y: 16-bit primary CRC signature for green data.
+ * @b_cb: 16-bit primary CRC signature for blue data.
+ *
+ * This function reads the CRC signature from the OPTC registers. Notice that
+ * we have three registers to keep the CRC result per color component (RGB).
+ *
+ * Returns:
+ * If CRC is disabled, return false; otherwise, return true, and the CRC
+ * results in the parameters.
+ */
bool optc1_get_crc(struct timing_generator *optc,
- uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
{
uint32_t field = 0;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1510,12 +1545,14 @@ bool optc1_get_crc(struct timing_generator *optc,
if (!field)
return false;
+ /* OTG_CRC0_DATA_RG has the CRC16 results for the red and green component */
REG_GET_2(OTG_CRC0_DATA_RG,
- CRC0_R_CR, r_cr,
- CRC0_G_Y, g_y);
+ CRC0_R_CR, r_cr,
+ CRC0_G_Y, g_y);
+ /* OTG_CRC0_DATA_B has the CRC16 results for the blue component */
REG_GET(OTG_CRC0_DATA_B,
- CRC0_B_CB, b_cb);
+ CRC0_B_CB, b_cb);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 3fe5882ed018..6323ca6dc3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -583,6 +583,8 @@ struct dcn_otg_state {
uint32_t underflow_occurred_status;
uint32_t otg_enabled;
uint32_t blank_enabled;
+ uint32_t vertical_interrupt1_en;
+ uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 174eebbe8b4f..831080b9eb87 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1495,6 +1495,24 @@ static bool dcn10_resource_construct(
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
+ if (!dc->config.is_vmin_only_asic)
+ if (ASICREV_IS_RAVEN2(dc->ctx->asic_id.hw_internal_rev))
+ switch (dc->ctx->asic_id.pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ dc->config.is_vmin_only_asic = true;
+ break;
+ default:
+ break;
+ }
+
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2b9d3e63191b..915a20461c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -274,6 +274,7 @@ struct dccg_registers {
uint32_t DSCCLK2_DTO_PARAM;
uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
uint32_t DCCG_GATE_DISABLE_CNTL2;
uint32_t DCCG_GATE_DISABLE_CNTL3;
uint32_t HDMISTREAMCLK0_DTO_PARAM;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index cd2671161ef1..7ce64a3c1b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -445,226 +445,6 @@
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE
-#define DSC_REG_LIST_DCN314(id) \
- SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
- SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
- SRI(DSCC_CONFIG0, DSCC, id),\
- SRI(DSCC_CONFIG1, DSCC, id),\
- SRI(DSCC_STATUS, DSCC, id),\
- SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
- SRI(DSCC_PPS_CONFIG0, DSCC, id),\
- SRI(DSCC_PPS_CONFIG1, DSCC, id),\
- SRI(DSCC_PPS_CONFIG2, DSCC, id),\
- SRI(DSCC_PPS_CONFIG3, DSCC, id),\
- SRI(DSCC_PPS_CONFIG4, DSCC, id),\
- SRI(DSCC_PPS_CONFIG5, DSCC, id),\
- SRI(DSCC_PPS_CONFIG6, DSCC, id),\
- SRI(DSCC_PPS_CONFIG7, DSCC, id),\
- SRI(DSCC_PPS_CONFIG8, DSCC, id),\
- SRI(DSCC_PPS_CONFIG9, DSCC, id),\
- SRI(DSCC_PPS_CONFIG10, DSCC, id),\
- SRI(DSCC_PPS_CONFIG11, DSCC, id),\
- SRI(DSCC_PPS_CONFIG12, DSCC, id),\
- SRI(DSCC_PPS_CONFIG13, DSCC, id),\
- SRI(DSCC_PPS_CONFIG14, DSCC, id),\
- SRI(DSCC_PPS_CONFIG15, DSCC, id),\
- SRI(DSCC_PPS_CONFIG16, DSCC, id),\
- SRI(DSCC_PPS_CONFIG17, DSCC, id),\
- SRI(DSCC_PPS_CONFIG18, DSCC, id),\
- SRI(DSCC_PPS_CONFIG19, DSCC, id),\
- SRI(DSCC_PPS_CONFIG20, DSCC, id),\
- SRI(DSCC_PPS_CONFIG21, DSCC, id),\
- SRI(DSCC_PPS_CONFIG22, DSCC, id),\
- SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
- SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCCIF_CONFIG0, DSCCIF, id),\
- SRI(DSCCIF_CONFIG1, DSCCIF, id),\
- SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
- DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
- DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
- DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index 9570c2118ccc..b1ec0e6f7f58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -987,13 +987,10 @@ void hubp2_cursor_set_position(
src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
}
} else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
+ if (!param->mirror)
+ src_x_offset = pos->x - param->viewport.x;
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+ src_y_offset = pos->y - param->viewport.y;
}
dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 884fa060f375..e1d271fe9e64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -706,6 +706,14 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
@@ -1565,6 +1573,7 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
@@ -1898,8 +1907,14 @@ void dcn20_post_unlock_program_front_end(
* can underflow due to HUBP_VTG_SEL programming if done in the regular front end
* programming sequence).
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ while (pipe) {
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
}
}
@@ -2346,7 +2361,9 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
- struct dc_link *link;
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -2354,7 +2371,6 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- link = pipe_ctx->stream->link;
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
@@ -2403,6 +2419,16 @@ static void dcn20_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2462,9 +2488,13 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
get_mpctree_visual_confirm_color(pipe_ctx, color);
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
get_surface_tile_visual_confirm_color(pipe_ctx, color);
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(dc, pipe_ctx, color);
- if (mpc->funcs->set_bg_color)
+ if (mpc->funcs->set_bg_color) {
+ memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
mpc->funcs->set_bg_color(mpc, color, mpcc_id);
+ }
}
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 91e4885b743e..7c5817c426fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -96,6 +96,10 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index 694260c10a01..ccd91792991b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -215,7 +215,8 @@ void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en);
REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en);
- REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
+ if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN)
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
}
void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 05b3fba9ccce..61bcfa03c4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -82,7 +82,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
+static bool gpu_addr_to_uma(struct dce_hwseq *hwseq,
PHYSICAL_ADDRESS_LOC *addr)
{
bool is_in_uma;
@@ -98,6 +98,7 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
} else {
is_in_uma = false;
}
+ return is_in_uma;
}
static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
index 1826dd7f3da1..9c16633e473a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -86,6 +86,10 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index b270f0b194dc..fe1a8e2e08ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index fb59fed8f425..8c5045711264 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -939,13 +939,32 @@ bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, s
void dcn30_hardware_release(struct dc *dc)
{
+ bool subvp_in_use = false;
+ uint32_t i;
+
dc_dmub_srv_p_state_delegate(dc, false, NULL);
+ dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
+
+ /* SubVP treated the same way as FPO. If driver disable and
+ * we are using a SubVP config, disable and force on DCN side
+ * to prevent P-State hang on driver enable.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ subvp_in_use = true;
+ break;
+ }
+ }
/* If pstate unsupported, or still supported
* by firmware, force it supported by dcn
*/
if (dc->current_state)
- if ((!dc->clk_mgr->clks.p_state_change_support ||
+ if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 4c06e6e1ba4a..3216d10c58ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
index f2580e65196c..7446e54bf5aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
@@ -227,11 +227,7 @@
SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
@@ -363,11 +359,7 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 64320e0ca446..3a3b2ac791c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -724,7 +724,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.disable_psr = false,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1916,7 +1917,7 @@ static int get_refresh_rate(struct dc_state *context)
*/
#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
-int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
+static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
{
struct dc_crtc_timing *timing = NULL;
uint32_t sec_per_100_lines;
@@ -1946,7 +1947,7 @@ int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
return scaled_refresh_rate;
}
-bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
+static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
{
int refresh_rate_max_stretch_100hz;
int min_refresh_100hz;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 3d42a1a337ec..6192851c59ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.optimize_pwr_state = dcn21_optimize_pwr_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index db172677d613..559e563d5bc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -634,7 +634,7 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-static const struct resource_caps res_cap_dcn301 = {
+static struct resource_caps res_cap_dcn301 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
@@ -700,6 +700,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.use_max_lb = false,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1429,6 +1430,8 @@ static bool dcn301_resource_construct(
ctx->dc_bios->regs = &bios_regs;
+ if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435)
+ res_cap_dcn301.num_pll = 2;
pool->base.res_cap = &res_cap_dcn301;
pool->base.funcs = &dcn301_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 4fab537e822f..b925b6ddde5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -93,7 +93,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index d97076648acb..527d5c902878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -77,6 +77,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
+ .exit_idle_opt_for_cursor_updates = true,
.disable_idle_power_optimizations = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index a788d160953b..ab70ebd8f223 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 23621ff08c90..52fb2bf3d578 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -150,9 +150,9 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
* 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
- // VID_STREAM_STATUS, 0,
- // 10, 5000);
+ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_STATUS, 0,
+ 10, 5000);
/* Disable SDP tranmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 51c5f3685470..6360dc9502e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -876,7 +876,7 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
return true;
}
-static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
index e3a654bf04e8..70c60de448ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -122,6 +122,8 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 1ed1404e969d..bdf101547484 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -535,11 +535,11 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
-
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index e708f07fe75a..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index aedff18aff56..8c1a6fb36306 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -889,9 +889,8 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.disable_z10 = true,
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
- .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
+ .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 232cc15979dd..1bd7e0f327d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
@@ -137,7 +184,7 @@ static void dccg314_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg314_set_dtbclk_dto(
+static void dccg314_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -181,7 +228,7 @@ void dccg314_set_dtbclk_dto(
}
}
-void dccg314_set_dpstreamclk(
+static void dccg314_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -220,7 +267,7 @@ void dccg314_set_dpstreamclk(
}
}
-void dccg314_set_valid_pixel_rate(
+static void dccg314_set_valid_pixel_rate(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
index 9a4a9efc0203..6a35986307af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
@@ -63,34 +63,28 @@
DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL2),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
SR(DTBCLK_P_CNTL),\
SR(DCCG_AUDIO_DTO_SOURCE)
-
-#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, mask_sh),\
@@ -100,7 +94,6 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_SRC_SEL, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_EN, mask_sh),\
- DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
@@ -148,7 +141,48 @@
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh)
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
+
+#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
+ DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh)
struct dccg *dccg314_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index b384f30395d3..0d2ffb692957 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -67,8 +68,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
- DIG_FIFO_READ_START_LEVEL, 0);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
@@ -81,7 +81,7 @@ static void enc314_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc314_stream_encoder_dvi_set_stream_attribute(
+static void enc314_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -262,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -322,9 +332,6 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -340,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -408,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 39931d48f385..588c1c71241f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,12 +343,14 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -364,7 +366,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -384,21 +386,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
return;
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
- || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
pix_per_cycle = 2;
if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
-
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
- dc->debug.enable_dp_dig_pixel_rate_div_policy)
- return true;
- return false;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index d014580592ac..244280298212 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
-
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index fcf67eb3478f..5b6c2d94ec71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -102,6 +102,10 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
@@ -146,7 +150,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 0c7980266b85..47eb162f1a75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
- REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
@@ -149,7 +150,7 @@ static bool optc314_disable_crtc(struct timing_generator *optc)
return true;
}
-void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 3a9e3870b3a9..24ec71cbd3e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -87,6 +87,9 @@
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
@@ -454,6 +457,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
@@ -578,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
#define dsc_regsDCN314(id)\
[id] = {\
- DSC_REG_LIST_DCN314(id)\
+ DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -589,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
};
static const struct dcn20_dsc_shift dsc_shift = {
- DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
- DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
@@ -843,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
- .num_dsc = 4,
+ .num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
@@ -911,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.seamless_boot_odm_combine = true
};
@@ -1643,6 +1646,7 @@ static struct clock_source *dcn31_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
@@ -1715,6 +1719,7 @@ static struct clock_source *dcn30_clock_source_create(
}
BREAK_TO_DEBUGGER();
+ kfree(clk_src);
return NULL;
}
@@ -1814,8 +1819,6 @@ static bool dcn314_resource_construct(
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
- else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS)
- dc->debug = debug_defaults_diags;
else
dc->debug = debug_defaults_diags;
// Init the vm_helper
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 7463b12ae4a3..eebb42c9ddd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
.psr_power_use_phy_fsm = 0,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index d56a212e065c..f4b52a35ad84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -886,7 +886,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.optimize_edp_link_rate = true,
- .enable_sw_cntl_psr = true,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index a31c64b50410..e4daed44ef5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
@@ -133,7 +186,7 @@ static void dccg32_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg32_set_dtbclk_dto(
+static void dccg32_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -208,7 +261,7 @@ static void dccg32_get_dccg_ref_freq(struct dccg *dccg,
return;
}
-void dccg32_set_dpstreamclk(
+static void dccg32_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -225,19 +278,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
@@ -245,7 +298,7 @@ void dccg32_set_dpstreamclk(
}
}
-void dccg32_otg_add_pixel(struct dccg *dccg,
+static void dccg32_otg_add_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -254,7 +307,7 @@ void dccg32_otg_add_pixel(struct dccg *dccg,
OTG_ADD_PIXEL[otg_inst], 1);
}
-void dccg32_otg_drop_pixel(struct dccg *dccg,
+static void dccg32_otg_drop_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d6855d4f749b..fdae6aa89908 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -118,7 +118,7 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t dp_alt_mode_disable = 0;
@@ -133,7 +133,7 @@ bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
return is_usb_c_alt_mode;
}
-void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 26648ce772da..0e9dce414641 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -60,7 +60,7 @@ static void enc32_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc32_stream_encoder_dvi_set_stream_attribute(
+static void enc32_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
+ /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
+ * so set it to 1/2 full = 7 before reset as suggested by hardware team.
+ */
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
index f349cbe2a0f0..dcf12a0b031c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_cm_common.h"
/* Compute the maximum number of lines that we can fit in the line buffer */
-void dscl32_calc_lb_num_partitions(
+static void dscl32_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 99eb239bbc7b..f6d3da475835 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -68,7 +68,7 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
-static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
@@ -140,7 +144,7 @@ static uint32_t convert_and_clamp(
return ret_val;
}
-static bool hubbub32_program_urgent_watermarks(
+bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -330,7 +334,7 @@ static bool hubbub32_program_urgent_watermarks(
return wm_pending;
}
-static bool hubbub32_program_stutter_watermarks(
+bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -476,7 +480,7 @@ static bool hubbub32_program_stutter_watermarks(
}
-static bool hubbub32_program_pstate_watermarks(
+bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -629,7 +633,7 @@ static bool hubbub32_program_pstate_watermarks(
}
-static bool hubbub32_program_usr_watermarks(
+bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -769,7 +773,7 @@ static bool hubbub32_program_watermarks(
}
/* Copy values from WM set A to all other sets */
-void hubbub32_init_watermarks(struct hubbub *hubbub)
+static void hubbub32_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
@@ -820,7 +824,7 @@ void hubbub32_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
}
-void hubbub32_wm_read_state(struct hubbub *hubbub,
+static void hubbub32_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index 3bae6e558971..cda94e0e31bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -161,6 +161,35 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+bool hubbub32_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_usr_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
+
+void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 6ec1c52535b9..2038cbda33f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
+ //Round cursor width up to next multiple of 64
+ uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+ uint32_t cursor_height = attr->height;
+ uint32_t cursor_size = cursor_width * cursor_height;
+
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
- if (attr->width * attr->height * 4 > 16384)
+ switch (attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index ebd3945c71f1..a750343ca521 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -49,6 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "dcn32_resource.h"
#include "dc_link_dp.h"
#include "dmub/inc/dmub_subvp_state.h"
@@ -198,42 +199,6 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
return false;
}
-/* This function takes in the start address and surface size to be cached in CAB
- * and calculates the total number of cache lines required to store the surface.
- * The number of cache lines used for each surface is calculated independently of
- * one another. For example, if there is a primary surface(1), meta surface(2), and
- * cursor(3), this function should be called 3 times to calculate the number of cache
- * lines used for each of those surfaces.
- */
-static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_size, uint64_t start_address)
-{
- uint32_t lines_used = 1;
- uint32_t num_cached_bytes = 0;
- uint32_t remaining_size = 0;
- uint32_t cache_line_size = dc->caps.cache_line_size;
- uint32_t remainder = 0;
-
- /* 1. Calculate surface size minus the number of bytes stored
- * in the first cache line (all bytes in first cache line might
- * not be fully used).
- */
- div_u64_rem(start_address, cache_line_size, &remainder);
- num_cached_bytes = cache_line_size - remainder;
- remaining_size = surface_size - num_cached_bytes;
-
- /* 2. Calculate number of cache lines that will be fully used with
- * the remaining number of bytes to be stored.
- */
- lines_used += (remaining_size / cache_line_size);
-
- /* 3. Check if we need an extra line due to the remaining size not being
- * a multiple of CACHE_LINE_SIZE.
- */
- if (remaining_size % cache_line_size > 0)
- lines_used++;
-
- return lines_used;
-}
/* This function loops through every surface that needs to be cached in CAB for SS,
* and calculates the total number of ways required to store all surfaces (primary,
@@ -241,80 +206,116 @@ static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_si
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i, j;
+ uint8_t i;
+ int j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
- uint32_t surface_size = 0;
uint32_t cursor_size = 0;
- uint32_t cache_lines_used = 0;
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
- uint32_t num_ways = 0;
- uint32_t prev_addr_low = 0;
+ uint8_t num_ways = 0;
+ uint8_t bytes_per_pixel = 0;
+ uint8_t cursor_bpp = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint16_t mall_alloc_width_blk_aligned = 0;
+ uint16_t mall_alloc_height_blk_aligned = 0;
+ uint16_t num_mblks = 0;
+ uint32_t bytes_in_mall = 0;
+ uint32_t cache_lines_used = 0;
+ uint32_t cache_lines_per_plane = 0;
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- // Don't include PSR surface in the total surface size for CAB allocation
- if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ if (!pipe->stream || !pipe->plane_state ||
+ pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED ||
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
- if (ctx->stream_status[i].plane_count == 0)
- continue;
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
- // For each stream, loop through each plane to calculate the number of cache
- // lines required to store the surface in CAB
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ *
+ * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
+ */
+ mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ *
+ * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
+ */
+ mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
- // Calculate total surface size
- if (prev_addr_low != plane->address.grph.addr.u.low_part) {
- /* if plane address are different from prev FB, then userspace allocated separate FBs*/
- surface_size += plane->plane_size.surface_pitch *
- plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
- prev_addr_low = plane->address.grph.addr.u.low_part;
- } else {
- /* We have the same fb for all the planes.
- * Xorg always creates one giant fb that holds all surfaces,
- * so allocating it once is sufficient.
- * */
- continue;
- }
- // Convert surface size + starting address to number of cache lines required
- // (alignment accounted for)
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.addr.quad_part);
-
- if (plane->address.grph.meta_addr.quad_part) {
- // Meta surface
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.meta_addr.quad_part);
- }
- }
+ /* For DCC:
+ * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1)
+ */
+ if (pipe->plane_state->dcc.enable)
+ num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel +
+ (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
- // Include cursor size for CAB allocation
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
+ /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
+ * (MALL is 64-byte aligned)
+ */
+ cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
+ cache_lines_used += cache_lines_per_plane;
+ }
+
+ // Include cursor size for CAB allocation
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ using the max for calculation */
+
+ if (hubp->curs_attr.width > 0) {
+ // Round cursor width to next multiple of 64
+ cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+
+ switch (pipe->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ cursor_bpp = 4;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ cursor_bpp = 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ cursor_bpp = 8;
+ break;
+ }
+
+ if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ */
+ cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
+ DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
+ DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ }
break;
}
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.cursor_cache_addr.quad_part);
- }
}
// Convert number of cache lines required to number of ways
@@ -325,6 +326,28 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ for (i = 0; i < ctx->stream_count; i++) {
+ stream = ctx->streams[i];
+ for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
+ plane = ctx->stream_status[i].plane_states[j];
+
+ if (stream->cursor_position.enable && plane &&
+ dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* Cursor caching is not supported since it won't be on the same line.
+ * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
+ * this case triggers corruption. If we're at the edge, then dont trigger display refresh
+ * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
+ */
+ num_ways++;
+ /* We only expect one cursor plane */
+ break;
+ }
+ }
+ }
+ if (dc->debug.force_mall_ss_num_ways > 0) {
+ num_ways = dc->debug.force_mall_ss_num_ways;
+ }
return num_ways;
}
@@ -333,7 +356,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
union dmub_rb_cmd cmd;
uint8_t ways, i;
int j;
- bool stereo_in_use = false;
+ bool mall_ss_unsupported = false;
struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
@@ -364,22 +387,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- /* MALL not supported with Stereo3D. If any plane is using stereo,
- * don't try to enter MALL.
+ /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
+ * or TMZ surface, don't try to enter MALL.
*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
plane = dc->current_state->stream_status[i].plane_states[j];
- if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
- stereo_in_use = true;
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
+ plane->address.tmz_surface) {
+ mall_ss_unsupported = true;
break;
}
}
- if (stereo_in_use)
+ if (mall_ss_unsupported)
break;
}
- if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
+ if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -417,7 +441,6 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
{
-/*
int i;
bool enable_subvp = false;
@@ -435,7 +458,6 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
}
}
dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp);
-*/
}
/* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
@@ -641,9 +663,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
stream->out_transfer_func,
&mpc->blender_params, false))
params = &mpc->blender_params;
- /* there are no ROM LUTs in OUTGAM */
- if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
- BREAK_TO_DEBUGGER();
+ /* there are no ROM LUTs in OUTGAM */
+ if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED)
+ BREAK_TO_DEBUGGER();
}
}
@@ -707,7 +729,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+ //Round cursor width up to next multiple of 64
+ int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+ int cursor_height = hubp->curs_attr.height;
+ int cursor_size = cursor_width * cursor_height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
cache_cursor = true;
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
@@ -717,7 +761,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
- pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface ? 2 : 0,
cache_cursor);
}
}
@@ -827,6 +872,7 @@ void dcn32_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
@@ -1125,6 +1171,9 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -1218,3 +1267,155 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
return true;
return false;
}
+
+static void apply_symclk_on_tx_off_wa(struct dc_link *link)
+{
+ /* There are use cases where SYMCLK is referenced by OTG. For instance
+ * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
+ * However current link interface will power off PHY when disabling link
+ * output. This will turn off SYMCLK generated by PHY. The workaround is
+ * to identify such case where SYMCLK is still in use by OTG when we
+ * power off PHY. When this is detected, we will temporarily power PHY
+ * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
+ * program_pix_clk interface. When OTG is disabled, we will then power
+ * off PHY by calling disable link output again.
+ *
+ * In future dcn generations, we plan to rework transmitter control
+ * interface so that we could have an option to set SYMCLK ON TX OFF
+ * state in one step without this workaround
+ */
+
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ if (link->phy_state.symclk_ref_cnts.otg > 0) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ break;
+ }
+ }
+ }
+}
+
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+ apply_symclk_on_tx_off_wa(link);
+}
+
+/* For SubVP the main pipe can have a viewport position change
+ * without a full update. In this case we must also update the
+ * viewport positions for the phantom pipe accordingly.
+ */
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe)
+{
+ uint32_t i;
+ struct dc_plane_state *phantom_plane = phantom_pipe->plane_state;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
+
+ phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
+ phantom_plane->src_rect.y = pipe->plane_state->src_rect.y;
+ phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x;
+ phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x;
+ phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y;
+
+ phantom_pipe->plane_state->update_flags.bits.position_change = 1;
+ resource_build_scaling_params(phantom_pipe);
+ return;
+ }
+ }
+ }
+}
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst)
+{
+ uint32_t pwr_status = 0;
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_GET(DOMAIN16_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 1: /* DSC1 */
+
+ REG_GET(DOMAIN17_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 2: /* DSC2 */
+ REG_GET(DOMAIN18_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 3: /* DSC3 */
+ REG_GET(DOMAIN19_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return pwr_status == 0 ? true : false;
+}
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst);
+
+ if (context->res_ctx.is_dsc_acquired[i]) {
+ if (!is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, true);
+ }
+ } else if (safe_to_disable) {
+ if (is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 083f3aeb54f0..ac3657a5b9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -84,4 +84,20 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst);
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index c279a25ea293..45a949ba6f3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -99,11 +99,17 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
@@ -133,6 +139,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
index adf93cc8359c..41b0baf8e183 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
@@ -100,7 +100,7 @@ static void mmhubbub32_warmup_mcif(struct mcif_wb *mcif_wb,
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
-void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
+static void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
index 22355051f5f7..e460cf8d9041 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
@@ -90,7 +90,6 @@
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
@@ -101,7 +100,6 @@
SF(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
@@ -116,7 +114,6 @@
SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
@@ -131,7 +128,6 @@
SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
@@ -146,7 +142,6 @@
SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
@@ -172,11 +167,6 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 357bd2461bc9..4edd0655965b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-bool mpc32_program_shaper(
+static bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -726,7 +726,7 @@ bool mpc32_program_shaper(
else
next_mode = LUT_RAM_A;
- mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A ? true:false, mpcc_id);
+ mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
if (next_mode == LUT_RAM_A)
mpc32_program_shaper_luta_settings(mpc, params, mpcc_id);
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-bool mpc32_program_3dlut(
+static bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 1fad7b48bd5b..ec3989d37086 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -156,7 +156,7 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
return true;
}
-void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -190,7 +190,7 @@ static void optc32_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-void optc32_setup_manual_trigger(struct timing_generator *optc)
+static void optc32_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct dc *dc = optc->ctx->dc;
@@ -215,7 +215,7 @@ void optc32_setup_manual_trigger(struct timing_generator *optc)
}
}
-void optc32_set_drr(
+static void optc32_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 8b887b552f2c..05de97ea855f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -90,29 +90,6 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 6
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
enum dcn32_clk_src_array_id {
@@ -131,79 +108,103 @@ enum dcn32_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
reg ## reg_name
+#define SR_ARR(reg_name, id) \
+ REG_STRUCT[id].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SR_ARR_INIT(reg_name, id, value) \
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#undef CTX
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -213,17 +214,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -233,18 +227,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -259,23 +245,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -285,19 +258,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -307,17 +271,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -327,18 +284,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -349,46 +298,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -401,17 +328,10 @@ static const struct dcn10_link_enc_mask le_mask = {
//DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -422,20 +342,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -445,17 +359,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -466,17 +373,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -486,21 +386,16 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id)\
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), \
+ SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -510,15 +405,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -528,14 +418,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -545,17 +431,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -565,17 +444,18 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -585,19 +465,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
-
-//#ifdef DIAGS_BUILD
-//static struct dcn_optc_registers optc_regs[] = {
-//#else
-static const struct dcn_optc_registers optc_regs[] = {
-//#endif
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
+
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -607,17 +478,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
+#define hubp_regs_init(id)\
+ HUBP_REG_LIST_DCN32_RI(id)
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
@@ -627,9 +491,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -639,9 +504,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -714,9 +580,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -759,29 +626,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -870,7 +718,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -903,6 +756,14 @@ static struct dce_aux *dcn32_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -912,15 +773,10 @@ static struct dce_aux *dcn32_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -940,6 +796,14 @@ static struct dce_i2c_hw *dcn32_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -979,6 +843,29 @@ static struct hubbub *dcn32_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1011,6 +898,13 @@ static struct hubp *dcn32_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1036,6 +930,13 @@ static struct dpp *dcn32_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1056,6 +957,10 @@ static struct mpc *dcn32_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1077,6 +982,13 @@ static struct output_pixel_processor *dcn32_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1093,6 +1005,13 @@ static struct timing_generator *dcn32_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1127,6 +1046,30 @@ static struct link_encoder *dcn32_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn32_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1156,7 +1099,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1164,6 +1107,15 @@ static void read_dce_straps(
static struct audio *dcn32_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1177,6 +1129,19 @@ static struct vpg *dcn32_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1194,6 +1159,15 @@ static struct afmt *dcn32_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1211,6 +1185,13 @@ static struct apg *dcn31_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1247,6 +1228,14 @@ static struct stream_encoder *dcn32_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1297,6 +1286,13 @@ static struct hpo_dp_stream_encoder *dcn32_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1314,6 +1310,11 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1326,6 +1327,10 @@ static struct dce_hwseq *dcn32_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1517,6 +1522,10 @@ static bool dcn32_dwbc_create(struct dc_context *ctx, struct resource_pool *pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1542,6 +1551,10 @@ static bool dcn32_mmhubbub_create(struct dc_context *ctx, struct resource_pool *
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1564,6 +1577,13 @@ static struct display_stream_compressor *dcn32_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1701,13 +1721,26 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
{
int i;
bool removed_pipe = false;
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ phantom_plane = pipe->plane_state;
+ phantom_stream = pipe->stream;
+
dc_rem_all_planes_for_stream(dc, pipe->stream, context);
dc_remove_stream_from_ctx(dc, context, pipe->stream);
+
+ /* Ref count is incremented on allocation and also when added to the context.
+ * Therefore we must call release for the the phantom plane and stream once
+ * they are removed from the ctx to finally decrement the refcount to 0 to free.
+ */
+ dc_plane_state_release(phantom_plane);
+ dc_stream_release(phantom_stream);
+
removed_pipe = true;
}
@@ -1807,12 +1840,6 @@ validate_out:
return out;
}
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1821,12 +1848,37 @@ int dcn32_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
- bool subvp_in_use = false, is_pipe_split_expected[MAX_PIPES];
- int plane_count = 0;
+ bool subvp_in_use = false;
+ uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* Determine whether we will apply ODM 2to1 policy:
+ * Applies to single display and where the number of planes is less than 3.
+ * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes.
+ *
+ * Apply pipe split policy first so we can predict the pipe split correctly
+ * (dcn32_predict_pipe_split).
+ */
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (context->stream_count == 1 &&
+ context->stream_status[0].plane_count <= 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
+ pipe_cnt++;
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1879,59 +1931,18 @@ int dcn32_populate_dml_pipes_from_context(
}
}
- /* Calculate the number of planes we have so we can determine
- * whether to apply ODM 2to1 policy or not
- */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
-
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, pipes[i].pipe, i);
+ is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
}
- /* Determine whether we will apply ODM 2to1 policy
- * Applies to single display and where the number of planes is less than 3
- * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes
- */
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = false;
- if (context->stream_count == 1 && timing->dsc_cfg.num_slices_h != 1) {
- if (dc->debug.enable_single_display_2to1_odm_policy) {
- if (!((plane_count > 2) && pipe->top_pipe))
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
- }
- res_ctx->pipe_ctx[i].stream->odm_2to1_policy_applied = true;
- }
- pipe_cnt++;
- }
-
/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
* the DET available for each pipe). Use the DET override input to maintain our driver
* policy.
*/
- if (pipe_cnt == 1 && !is_pipe_split_expected[0]) {
- pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (!is_dual_plane(pipe->plane_state->format)) {
- pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
- pipes[0].pipe.src.unbounded_req_mode = true;
- if (pipe->plane_state->src_rect.width >= 5120 &&
- pipe->plane_state->src_rect.height >= 2880)
- pipes[0].pipe.src.det_size_override = 320; // 5K or higher
- }
- }
- } else
- dcn32_determine_det_override(context, pipes, is_pipe_split_expected, dc->res_pool->pipe_count);
+ dcn32_set_det_allocations(dc, context, pipes);
// In general cases we want to keep the dram clock change requirement
// (prefer configs that support MCLK switch). Only override to false
@@ -2002,6 +2013,28 @@ static bool dcn32_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+ #undef REG_STRUCT
+ #define REG_STRUCT bios_regs
+ bios_regs_init();
+
+ #undef REG_STRUCT
+ #define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+ #undef REG_STRUCT
+ #define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+ #undef REG_STRUCT
+ #define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
DC_FP_START();
ctx->dc_bios->regs = &bios_regs;
@@ -2039,7 +2072,8 @@ static bool dcn32_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 1e7e6201c880..55945cca2260 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -28,8 +28,16 @@
#include "core_types.h"
+#define DCN3_2_DEFAULT_DET_SIZE 256
+#define DCN3_2_MAX_DET_SIZE 1152
+#define DCN3_2_MIN_DET_SIZE 128
+#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
#define DCN3_2_DET_SEG_SIZE 64
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
+#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -106,7 +114,1167 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_stream_state *stream,
struct pipe_ctx *head_pipe);
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt);
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+/* definitions for run time init of reg offsets */
+
+/* CLK SRC */
+#define CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid) \
+ ( \
+ SRI_ARR_ALPHABET(PIXCLK_RESYNC_CNTL, PHYPLL, index, pllid), \
+ SRII_ARR_2(PHASE, DP_DTO, 0, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 1, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 2, index), \
+ SRII_ARR_2(PHASE, DP_DTO, 3, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 0, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 1, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 2, index), \
+ SRII_ARR_2(MODULO, DP_DTO, 3, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 0, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 1, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 2, index), \
+ SRII_ARR_2(PIXEL_RATE_CNTL, OTG, 3, index) \
+ )
+
+/* ABM */
+#define ABM_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI_ARR(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI_ARR(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI_ARR(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI_ARR(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI_ARR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI_ARR(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI_ARR(DC_ABM1_ACE_THRES_12, ABM, id), NBIO_SR_ARR(BIOS_SCRATCH_2, id) \
+ )
+
+/* Audio */
+#define AUD_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id), \
+ SRI_ARR(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, id), \
+ SR_ARR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, id), \
+ SR_ARR(DCCG_AUDIO_DTO_SOURCE, id), SR_ARR(DCCG_AUDIO_DTO0_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO0_PHASE, id), SR_ARR(DCCG_AUDIO_DTO1_MODULE, id), \
+ SR_ARR(DCCG_AUDIO_DTO1_PHASE, id) \
+ )
+
+/* VPG */
+
+#define VPG_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(VPG_GENERIC_STATUS, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI_ARR(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI_ARR(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI_ARR(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id) \
+ )
+
+/* AFMT */
+#define AFMT_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI_ARR(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI_ARR(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI_ARR(AFMT_60958_0, AFMT, id), SRI_ARR(AFMT_60958_1, AFMT, id), \
+ SRI_ARR(AFMT_60958_2, AFMT, id), SRI_ARR(AFMT_MEM_PWR, AFMT, id) \
+ )
+
+/* APG */
+#define APG_DCN31_REG_LIST_RI(id) \
+ (\
+ SRI_ARR(APG_CONTROL, APG, id), SRI_ARR(APG_CONTROL2, APG, id), \
+ SRI_ARR(APG_MEM_PWR, APG, id), SRI_ARR(APG_DBG_GEN_CONTROL, APG, id) \
+ )
+
+/* Stream encoder */
+#define SE_DCN32_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AFMT_CNTL, DIG, id), SRI_ARR(DIG_FE_CNTL, DIG, id), \
+ SRI_ARR(HDMI_CONTROL, DIG, id), SRI_ARR(HDMI_DB_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_GC, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL4, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL5, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL6, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL7, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL8, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL9, DIG, id), \
+ SRI_ARR(HDMI_GENERIC_PACKET_CONTROL10, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI_ARR(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI_ARR(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(HDMI_ACR_32_0, DIG, id), SRI_ARR(HDMI_ACR_32_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_44_0, DIG, id), SRI_ARR(HDMI_ACR_44_1, DIG, id), \
+ SRI_ARR(HDMI_ACR_48_0, DIG, id), SRI_ARR(HDMI_ACR_48_1, DIG, id), \
+ SRI_ARR(DP_DB_CNTL, DP, id), SRI_ARR(DP_MSA_MISC, DP, id), \
+ SRI_ARR(DP_MSA_VBID_MISC, DP, id), SRI_ARR(DP_MSA_COLORIMETRY, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \
+ SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
+ SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \
+ SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \
+ SRI_ARR(DP_SEC_TIMESTAMP, DP, id), SRI_ARR(DP_DSC_CNTL, DP, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DP_SEC_FRAMING4, DP, id), SRI_ARR(DP_GSP11_CNTL, DP, id), \
+ SRI_ARR(DME_CONTROL, DME, id), \
+ SRI_ARR(DP_SEC_METADATA_TRANSMISSION, DP, id), \
+ SRI_ARR(HDMI_METADATA_PACKET_CONTROL, DIG, id), \
+ SRI_ARR(DIG_FE_CNTL, DIG, id), SRI_ARR(DIG_CLOCK_PATTERN, DIG, id), \
+ SRI_ARR(DIG_FIFO_CTRL0, DIG, id) \
+ )
+
+/* Aux regs */
+
+#define AUX_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id) \
+ )
+
+#define DCN2_AUX_REG_LIST_RI(id) \
+ ( \
+ AUX_REG_LIST_RI(id), SRI_ARR(AUX_DPHY_TX_CONTROL, DP_AUX, id) \
+ )
+
+/* HDP */
+#define HPD_REG_LIST_RI(id) SRI_ARR(DC_HPD_CONTROL, HPD, id)
+
+/* Link encoder */
+#define LE_DCN3_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DIG_BE_CNTL, DIG, id), SRI_ARR(DIG_BE_EN_CNTL, DIG, id), \
+ SRI_ARR(TMDS_CTL_BITS, DIG, id), \
+ SRI_ARR(TMDS_DCBALANCER_CONTROL, DIG, id), SRI_ARR(DP_CONFIG, DP, id), \
+ SRI_ARR(DP_DPHY_CNTL, DP, id), SRI_ARR(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_SCRAM_CNTL, DP, id), SRI_ARR(DP_DPHY_SYM0, DP, id), \
+ SRI_ARR(DP_DPHY_SYM1, DP, id), SRI_ARR(DP_DPHY_SYM2, DP, id), \
+ SRI_ARR(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI_ARR(DP_LINK_CNTL, DP, id), SRI_ARR(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI_ARR(DP_MSE_SAT0, DP, id), SRI_ARR(DP_MSE_SAT1, DP, id), \
+ SRI_ARR(DP_MSE_SAT2, DP, id), SRI_ARR(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI_ARR(DP_SEC_CNTL, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_FAST_TRAINING, DP, id), SRI_ARR(DP_SEC_CNTL1, DP, id), \
+ SRI_ARR(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI_ARR(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) \
+ )
+
+#define LE_DCN31_REG_LIST_RI(id) \
+ ( \
+ LE_DCN3_REG_LIST_RI(id), SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR_ARR(DIO_LINKA_CNTL, id), SR_ARR(DIO_LINKB_CNTL, id), \
+ SR_ARR(DIO_LINKC_CNTL, id), SR_ARR(DIO_LINKD_CNTL, id), \
+ SR_ARR(DIO_LINKE_CNTL, id), SR_ARR(DIO_LINKF_CNTL, id) \
+ )
+
+#define UNIPHY_DCN2_REG_LIST_RI(id, phyid) \
+ ( \
+ SRI_ARR_ALPHABET(CLOCK_ENABLE, SYMCLK, id, phyid), \
+ SRI_ARR_ALPHABET(CHANNEL_XBAR_CNTL, UNIPHY, id, phyid) \
+ )
+
+/* HPO DP stream encoder */
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id) \
+ ( \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL0, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL1, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL2, id), \
+ SR_ARR(DP_STREAM_MAPPER_CONTROL3, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
+ SRI_ARR(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) \
+ )
+
+/* HPO DP link encoder regs */
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI_ARR(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI_ARR(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) \
+ )
+
+/* DPP */
+#define DPP_REG_LIST_DCN30_COMMON_RI(id) \
+ ( \
+ SRI_ARR(CM_DEALPHA, CM, id), SRI_ARR(CM_MEM_PWR_STATUS, CM, id), \
+ SRI_ARR(CM_BIAS_CR_R, CM, id), SRI_ARR(CM_BIAS_Y_G_CB_B, CM, id), \
+ SRI_ARR(PRE_DEGAM, CNVC_CFG, id), SRI_ARR(CM_GAMCOR_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_INDEX, CM, id), \
+ SRI_ARR(CM_GAMCOR_LUT_DATA, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_0_1, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_REGION_32_33, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_OFFSET_R, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id), \
+ SRI_ARR(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_CONTROL, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_C33_C34, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C11_C12, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C13_C14, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C21_C22, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C23_C24, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C31_C32, CM, id), \
+ SRI_ARR(CM_GAMUT_REMAP_B_C33_C34, CM, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI_ARR(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI_ARR(OTG_H_BLANK, DSCL, id), SRI_ARR(OTG_V_BLANK, DSCL, id), \
+ SRI_ARR(SCL_MODE, DSCL, id), SRI_ARR(LB_DATA_FORMAT, DSCL, id), \
+ SRI_ARR(LB_MEMORY_CTRL, DSCL, id), SRI_ARR(DSCL_AUTOCAL, DSCL, id), \
+ SRI_ARR(SCL_TAP_CONTROL, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI_ARR(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI_ARR(DSCL_2TAP_CONTROL, DSCL, id), SRI_ARR(MPC_SIZE, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI_ARR(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI_ARR(RECOUT_START, DSCL, id), SRI_ARR(RECOUT_SIZE, DSCL, id), \
+ SRI_ARR(PRE_DEALPHA, CNVC_CFG, id), SRI_ARR(PRE_REALPHA, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_MODE, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C11_C12, CNVC_CFG, id), \
+ SRI_ARR(PRE_CSC_B_C33_C34, CNVC_CFG, id), \
+ SRI_ARR(CM_POST_CSC_CONTROL, CM, id), \
+ SRI_ARR(CM_POST_CSC_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_C33_C34, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C11_C12, CM, id), \
+ SRI_ARR(CM_POST_CSC_B_C33_C34, CM, id), \
+ SRI_ARR(CM_MEM_PWR_CTRL, CM, id), SRI_ARR(CM_CONTROL, CM, id), \
+ SRI_ARR(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI_ARR(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_COLOR1, CNVC_CUR, id), \
+ SRI_ARR(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
+ SRI_ARR(DPP_CONTROL, DPP_TOP, id), SRI_ARR(CM_HDR_MULT_COEF, CM, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(ALPHA_2BIT_LUT, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_BIAS_B, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_R, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_G, CNVC_CFG, id), \
+ SRI_ARR(FCNV_FP_SCALE_B, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_CONTROL, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_ALPHA, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_RED, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_GREEN, CNVC_CFG, id), \
+ SRI_ARR(COLOR_KEYER_BLUE, CNVC_CFG, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(OBUF_MEM_PWR_CTRL, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_STATUS, DSCL, id), \
+ SRI_ARR(DSCL_MEM_PWR_CTRL, DSCL, id) \
+ )
+
+/* OPP */
+#define OPP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(FMT_BIT_DEPTH_CONTROL, FMT, id), SRI_ARR(FMT_CONTROL, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI_ARR(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI_ARR(FMT_CLAMP_CNTL, FMT, id), \
+ SRI_ARR(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI_ARR(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI_ARR(OPPBUF_CONTROL, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI_ARR(OPPBUF_3D_PARAMETERS_1, OPPBUF, id), \
+ SRI_ARR(OPP_PIPE_CONTROL, OPP_PIPE, id) \
+ )
+
+#define OPP_REG_LIST_DCN10_RI(id) OPP_REG_LIST_DCN_RI(id)
+
+#define OPP_DPG_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(DPG_CONTROL, DPG, id), SRI_ARR(DPG_DIMENSIONS, DPG, id), \
+ SRI_ARR(DPG_OFFSET_SEGMENT, DPG, id), SRI_ARR(DPG_COLOUR_B_CB, DPG, id), \
+ SRI_ARR(DPG_COLOUR_G_Y, DPG, id), SRI_ARR(DPG_COLOUR_R_CR, DPG, id), \
+ SRI_ARR(DPG_RAMP_CONTROL, DPG, id), SRI_ARR(DPG_STATUS, DPG, id) \
+ )
+
+#define OPP_REG_LIST_DCN30_RI(id) \
+ ( \
+ OPP_REG_LIST_DCN10_RI(id), OPP_DPG_REG_LIST_RI(id), \
+ SRI_ARR(FMT_422_CONTROL, FMT, id) \
+ )
+
+/* Aux engine regs */
+#define AUX_COMMON_REG_LIST0_RI(id) \
+ ( \
+ SRI_ARR(AUX_CONTROL, DP_AUX, id), SRI_ARR(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_SW_DATA, DP_AUX, id), SRI_ARR(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI_ARR(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \
+ SRI_ARR(AUX_SW_STATUS, DP_AUX, id) \
+ )
+
+/* DWBC */
+#define DWBC_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ SR_ARR(DWB_ENABLE_CLK_CTRL, id), SR_ARR(DWB_MEM_PWR_CTRL, id), \
+ SR_ARR(FC_MODE_CTRL, id), SR_ARR(FC_FLOW_CTRL, id), \
+ SR_ARR(FC_WINDOW_START, id), SR_ARR(FC_WINDOW_SIZE, id), \
+ SR_ARR(FC_SOURCE_SIZE, id), SR_ARR(DWB_UPDATE_CTRL, id), \
+ SR_ARR(DWB_CRC_CTRL, id), SR_ARR(DWB_CRC_MASK_R_G, id), \
+ SR_ARR(DWB_CRC_MASK_B_A, id), SR_ARR(DWB_CRC_VAL_R_G, id), \
+ SR_ARR(DWB_CRC_VAL_B_A, id), SR_ARR(DWB_OUT_CTRL, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT_EN, id), \
+ SR_ARR(DWB_MMHUBBUB_BACKPRESSURE_CNT, id), \
+ SR_ARR(DWB_HOST_READ_CONTROL, id), SR_ARR(DWB_SOFT_RESET, id), \
+ SR_ARR(DWB_HDR_MULT_COEF, id), SR_ARR(DWB_GAMUT_REMAP_MODE, id), \
+ SR_ARR(DWB_GAMUT_REMAP_COEF_FORMAT, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPA_C33_C34, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C11_C12, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C13_C14, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C21_C22, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C23_C24, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C31_C32, id), \
+ SR_ARR(DWB_GAMUT_REMAPB_C33_C34, id), SR_ARR(DWB_OGAM_CONTROL, id), \
+ SR_ARR(DWB_OGAM_LUT_INDEX, id), SR_ARR(DWB_OGAM_LUT_DATA, id), \
+ SR_ARR(DWB_OGAM_LUT_CONTROL, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMA_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMA_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMA_REGION_32_33, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_BASE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_START_SLOPE_CNTL_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_B, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL1_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_END_CNTL2_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_B, id), SR_ARR(DWB_OGAM_RAMB_OFFSET_G, id), \
+ SR_ARR(DWB_OGAM_RAMB_OFFSET_R, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_0_1, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_2_3, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_4_5, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_6_7, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_8_9, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_10_11, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_12_13, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_14_15, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_16_17, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_18_19, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_20_21, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_22_23, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_24_25, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_26_27, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_28_29, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_30_31, id), \
+ SR_ARR(DWB_OGAM_RAMB_REGION_32_33, id) \
+ )
+
+/* MCIF */
+
+#define MCIF_WB_COMMON_REG_LIST_DCN32_RI(inst) \
+ ( \
+ SRI2_ARR(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_PITCH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_STATUS2, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_ARBITRATION_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SCLK_CHANGE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_INDEX, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_TEST_DEBUG_DATA, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_NB_PSTATE_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_WATERMARK, MMHUBBUB, inst), \
+ SRI2_ARR(MCIF_WB_CLOCK_GATER_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SELF_REFRESH_CONTROL, MCIF_WB, inst), \
+ SRI2_ARR(MULTI_LEVEL_QOS_CTRL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_SECURITY_LEVEL, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_LUMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_CHROMA_SIZE, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_Y_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_ADDR_C_HIGH, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_1_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_2_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_3_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MCIF_WB_BUF_4_RESOLUTION, MCIF_WB, inst), \
+ SRI2_ARR(MMHUBBUB_MEM_PWR_CNTL, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_ADDR_REGION, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_HIGH, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_BASE_ADDR_LOW, MMHUBBUB, inst), \
+ SRI2_ARR(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB, inst) \
+ )
+
+/* DSC */
+
+#define DSC_REG_LIST_DCN20_RI(id) \
+ ( \
+ SRI_ARR(DSC_TOP_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSC_DEBUG_CONTROL, DSC_TOP, id), \
+ SRI_ARR(DSCC_CONFIG0, DSCC, id), SRI_ARR(DSCC_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG0, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG1, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG2, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG3, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG4, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG5, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG6, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG7, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG8, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG9, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG10, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG11, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG12, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG13, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG14, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG15, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG16, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG17, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG18, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG19, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG20, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG21, DSCC, id), \
+ SRI_ARR(DSCC_PPS_CONFIG22, DSCC, id), \
+ SRI_ARR(DSCC_MEM_POWER_CONTROL, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id), \
+ SRI_ARR(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR0, DSCC, id), \
+ SRI_ARR(DSCC_MAX_ABS_ERROR1, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id), \
+ SRI_ARR(DSCCIF_CONFIG0, DSCCIF, id), \
+ SRI_ARR(DSCCIF_CONFIG1, DSCCIF, id), \
+ SRI_ARR(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id) \
+ )
+
+/* MPC */
+
+#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \
+ SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) \
+ )
+
+#define MPC_OUT_MUX_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(CSC_MODE, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_A, MPC_OUT, inst), SRII(CSC_C33_C34_A, MPC_OUT, inst), \
+ SRII(CSC_C11_C12_B, MPC_OUT, inst), SRII(CSC_C33_C34_B, MPC_OUT, inst), \
+ SRII(DENORM_CONTROL, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_G_Y, MPC_OUT, inst), \
+ SRII(DENORM_CLAMP_B_CB, MPC_OUT, inst), SR(MPC_OUT_CSC_COEF_FORMAT) \
+ )
+
+#define MPC_COMMON_REG_LIST_DCN1_0_RI(inst) \
+ ( \
+ SRII(MPCC_TOP_SEL, MPCC, inst), SRII(MPCC_BOT_SEL, MPCC, inst), \
+ SRII(MPCC_CONTROL, MPCC, inst), SRII(MPCC_STATUS, MPCC, inst), \
+ SRII(MPCC_OPP_ID, MPCC, inst), SRII(MPCC_BG_G_Y, MPCC, inst), \
+ SRII(MPCC_BG_R_CR, MPCC, inst), SRII(MPCC_BG_B_CB, MPCC, inst), \
+ SRII(MPCC_SM_CONTROL, MPCC, inst), \
+ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) \
+ )
+
+#define MPC_REG_LIST_DCN3_0_RI(inst) \
+ ( \
+ MPC_COMMON_REG_LIST_DCN1_0_RI(inst), SRII(MPCC_TOP_GAIN, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_INSIDE, MPCC, inst), \
+ SRII(MPCC_BOT_GAIN_OUTSIDE, MPCC, inst), \
+ SRII(MPCC_MEM_PWR_CTRL, MPCC, inst), \
+ SRII(MPCC_OGAM_LUT_INDEX, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_DATA, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_OGAM, inst), \
+ SRII(MPCC_GAMUT_REMAP_MODE, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_A, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C11_C12_B, MPCC_OGAM, inst), \
+ SRII(MPC_GAMUT_REMAP_C33_C34_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMA_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_SLOPE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL1_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_END_CNTL2_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_0_1, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_REGION_32_33, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_OFFSET_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_B, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_G, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_RAMB_START_BASE_CNTL_R, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_CONTROL, MPCC_OGAM, inst), \
+ SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) \
+ )
+
+#define MPC_REG_LIST_DCN3_2_RI(inst) \
+ MPC_REG_LIST_DCN3_0_RI(inst),\
+ SRII(MPCC_MOVABLE_CM_LOCATION_CONTROL, MPCC, inst),\
+ SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), /*TODO: may need to add other 3DLUT regs*/\
+ SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst)
+
+/* OPTC */
+
+#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \
+ ( \
+ SRI_ARR(OTG_VSTARTUP_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_PARAM, OTG, inst), \
+ SRI_ARR(OTG_VREADY_PARAM, OTG, inst), \
+ SRI_ARR(OTG_MASTER_UPDATE_LOCK, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL0, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL4, OTG, inst), \
+ SRI_ARR(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_H_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_H_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_H_SYNC_A, OTG, inst), SRI_ARR(OTG_H_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_H_TIMING_CNTL, OTG, inst), SRI_ARR(OTG_V_TOTAL, OTG, inst), \
+ SRI_ARR(OTG_V_BLANK_START_END, OTG, inst), \
+ SRI_ARR(OTG_V_SYNC_A, OTG, inst), SRI_ARR(OTG_V_SYNC_A_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CONTROL, OTG, inst), SRI_ARR(OTG_STEREO_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_3D_STRUCTURE_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STEREO_STATUS, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst), \
+ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst), \
+ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst), \
+ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_STATUS_FRAME_COUNT, OTG, inst), \
+ SRI_ARR(OTG_STATUS, OTG, inst), SRI_ARR(OTG_STATUS_POSITION, OTG, inst), \
+ SRI_ARR(OTG_NOM_VERT_POSITION, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO0, OTG, inst), \
+ SRI_ARR(OTG_M_CONST_DTO1, OTG, inst), \
+ SRI_ARR(OTG_CLOCK_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst), \
+ SRI_ARR(OPTC_INPUT_CLOCK_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_DATA_SOURCE_SELECT, ODM, inst), \
+ SRI_ARR(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst), \
+ SRI_ARR(CONTROL, VTG, inst), SRI_ARR(OTG_VERT_SYNC_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_GSL_CONTROL, OTG, inst), SRI_ARR(OTG_CRC_CNTL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_RG, OTG, inst), \
+ SRI_ARR(OTG_CRC0_DATA_B, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst), \
+ SRI_ARR(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst), \
+ SR_ARR(GSL_SOURCE_SELECT, inst), \
+ SRI_ARR(OTG_TRIGA_MANUAL_TRIG, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL1, OTG, inst), \
+ SRI_ARR(OTG_GLOBAL_CONTROL2, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_X, OTG, inst), \
+ SRI_ARR(OTG_GSL_WINDOW_Y, OTG, inst), \
+ SRI_ARR(OTG_VUPDATE_KEEPOUT, OTG, inst), \
+ SRI_ARR(OTG_DSC_START_POSITION, OTG, inst), \
+ SRI_ARR(OTG_DRR_TRIGGER_WINDOW, OTG, inst), \
+ SRI_ARR(OTG_DRR_V_TOTAL_CHANGE, OTG, inst), \
+ SRI_ARR(OPTC_DATA_FORMAT_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_BYTES_PER_PIXEL, ODM, inst), \
+ SRI_ARR(OPTC_WIDTH_CONTROL, ODM, inst), \
+ SRI_ARR(OPTC_MEMORY_CONFIG, ODM, inst), \
+ SRI_ARR(OTG_DRR_CONTROL, OTG, inst) \
+ )
+
+/* HUBP */
+
+#define HUBP_REG_LIST_DCN_VM_RI(id) \
+ ( \
+ SRI_ARR(NOM_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN_RI(id) \
+ ( \
+ SRI_ARR(DCHUBP_CNTL, HUBP, id), SRI_ARR(HUBPREQ_DEBUG_DB, HUBP, id), \
+ SRI_ARR(HUBPREQ_DEBUG, HUBP, id), SRI_ARR(DCSURF_ADDR_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_TILING_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_PITCH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONFIG, HUBP, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI_ARR(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_CONTROL, HUBPREQ, id), \
+ SRI_ARR(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id), \
+ SRI_ARR(HUBPRET_CONTROL, HUBPRET, id), \
+ SRI_ARR(HUBPRET_READ_LINE_STATUS, HUBPRET, id), \
+ SRI_ARR(DCN_EXPANSION_MODE, HUBPREQ, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id), \
+ SRI_ARR(BLANK_OFFSET_0, HUBPREQ, id), \
+ SRI_ARR(BLANK_OFFSET_1, HUBPREQ, id), \
+ SRI_ARR(DST_DIMENSIONS, HUBPREQ, id), \
+ SRI_ARR(DST_AFTER_SCALER, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY_PRE, HUBPREQ, id), \
+ SRI_ARR(PER_LINE_DELIVERY, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(NOM_PARAMETERS_7, HUBPREQ, id), \
+ SRI_ARR(DCN_TTU_QOS_WM, HUBPREQ, id), \
+ SRI_ARR(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id) \
+ )
+
+#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
+ SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(PREFETCH_SETTINGS_C, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_LOW_ADDR, HUBPREQ, id), \
+ SRI_ARR(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI_ARR(CURSOR_SIZE, CURSOR0_, id), \
+ SRI_ARR(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI_ARR(CURSOR_POSITION, CURSOR0_, id), \
+ SRI_ARR(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI_ARR(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI_ARR(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI_ARR(DMDATA_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI_ARR(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI_ARR(DMDATA_STATUS, CURSOR0_, id), \
+ SRI_ARR(FLIP_PARAMETERS_0, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_1, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_2, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL0, HUBPREQ, id), \
+ SRI_ARR(DCN_CUR1_TTU_CNTL1, HUBPREQ, id), \
+ SRI_ARR(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI_ARR(VMID_SETTINGS_0, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN21_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN2_COMMON_RI(id), SRI_ARR(FLIP_PARAMETERS_3, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_4, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(FLIP_PARAMETERS_6, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_5, HUBPREQ, id), \
+ SRI_ARR(VBLANK_PARAMETERS_6, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN30_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN21_RI(id), SRI_ARR(DCN_DMDATA_VM_CNTL, HUBPREQ, id) \
+ )
+
+#define HUBP_REG_LIST_DCN32_RI(id) \
+ ( \
+ HUBP_REG_LIST_DCN30_RI(id), SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
+ SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
+ SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id) \
+ )
+
+/* HUBBUB */
+
+#define HUBBUB_REG_LIST_DCN32_RI(id) \
+ ( \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL), \
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL), SR(DCHUBBUB_ARB_SAT_LEVEL), \
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND), SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_SOFT_RESET), SR(DCHUBBUB_CRC_CTRL), \
+ SR(DCN_VM_FB_LOCATION_BASE), SR(DCN_VM_FB_LOCATION_TOP), \
+ SR(DCN_VM_FB_OFFSET), SR(DCN_VM_AGP_BOT), SR(DCN_VM_AGP_TOP), \
+ SR(DCN_VM_AGP_BASE), HUBBUB_SR_WATERMARK_REG_LIST(), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C), SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C), \
+ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C), \
+ SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \
+ SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \
+ SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \
+ SR(DCHUBBUB_DEBUG_CTRL_0), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C), \
+ SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D), \
+ SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
+ SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS) \
+ )
+
+/* DCCG */
+
+#define DCCG_REG_LIST_DCN32_RI() \
+ ( \
+ SR(DPPCLK_DTO_CTRL), DCCG_SRII(DTO_PARAM, DPPCLK, 0), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 1), DCCG_SRII(DTO_PARAM, DPPCLK, 2), \
+ DCCG_SRII(DTO_PARAM, DPPCLK, 3), DCCG_SRII(CLOCK_CNTL, HDMICHARCLK, 0), \
+ SR(PHYASYMCLK_CLOCK_CNTL), SR(PHYBSYMCLK_CLOCK_CNTL), \
+ SR(PHYCSYMCLK_CLOCK_CNTL), SR(PHYDSYMCLK_CLOCK_CNTL), \
+ SR(PHYESYMCLK_CLOCK_CNTL), SR(DPSTREAMCLK_CNTL), SR(HDMISTREAMCLK_CNTL), \
+ SR(SYMCLK32_SE_CNTL), SR(SYMCLK32_LE_CNTL), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2), DCCG_SRII(PIXEL_RATE_CNTL, OTG, 3), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 0), DCCG_SRII(MODULO, DTBCLK_DTO, 1), \
+ DCCG_SRII(MODULO, DTBCLK_DTO, 2), DCCG_SRII(MODULO, DTBCLK_DTO, 3), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
+ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
+ )
+
+/* VMID */
+#define DCN20_VMID_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR(CNTL, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_BASE_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_START_ADDR_LO32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_HI32, DCN_VM_CONTEXT, id), \
+ SRI_ARR(PAGE_TABLE_END_ADDR_LO32, DCN_VM_CONTEXT, id) \
+ )
+
+/* I2C HW */
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \
+ ( \
+ SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \
+ SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \
+ SR_ARR_I2C(DC_I2C_ARBITRATION, id), \
+ SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \
+ SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\
+ SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\
+ SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) \
+ )
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \
+ ( \
+ I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \
+ SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) \
+ )
#endif /* _DCN32_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 955f52e6064d..a2a70a1572b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -28,6 +28,11 @@
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
@@ -46,7 +51,6 @@
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
- uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
@@ -54,28 +58,77 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
uint32_t bytes_in_mall = 0;
uint32_t num_mblks = 0;
uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint32_t full_vp_width_blk_aligned = 0;
+ uint32_t full_vp_height_blk_aligned = 0;
+ uint32_t mall_alloc_width_blk_aligned = 0;
+ uint32_t mall_alloc_height_blk_aligned = 0;
+ uint16_t full_vp_height = 0;
+ bool subvp_in_use = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // Find the phantom pipes
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ /* Find the phantom pipes.
+ * - For pipe split case we need to loop through the bottom and next ODM
+ * pipes or only half the viewport size is counted
+ */
+ if (pipe->stream && pipe->plane_state &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct pipe_ctx *main_pipe = NULL;
+
+ subvp_in_use = true;
+ /* Get full viewport height from main pipe (required for MBLK calculation) */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ main_pipe = &context->res_ctx.pipe_ctx[j];
+ if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+ full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+ break;
+ }
+ }
+
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ */
+ full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ */
+ full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+ /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+ mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
- // For bytes required in MALL, calculate based on number of MBlks required
- num_mblks = (mall_region_pixels * bytes_per_pixel +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+ /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+ mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mblk_height * mblk_height + mblk_height;
+
+ /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+ * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+ * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+ * (Should be divisible, but round up if not)
+ */
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
- // For DCC we must cache the meat surface, so double cache lines required
+ /* For DCC divide by 256 */
if (pipe->plane_state->dcc.enable)
- cache_lines_per_plane *= 2;
+ cache_lines_per_plane = cache_lines_per_plane + (cache_lines_per_plane / 256) + 1;
cache_lines_used += cache_lines_per_plane;
}
}
@@ -86,6 +139,9 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
+ num_ways = dc->debug.force_subvp_num_ways;
+
return num_ways;
}
@@ -144,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
- continue;
+ return false;
if (!pipe->plane_state)
return false;
@@ -177,36 +233,133 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt)
+/**
+ * *******************************************************************************************
+ * dcn32_determine_det_override: Determine DET allocation for each pipe
+ *
+ * This function determines how much DET to allocate for each pipe. The total number of
+ * DET segments will be split equally among each of the streams, and after that the DET
+ * segments per stream will be split equally among the planes for the given stream.
+ *
+ * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
+ * number of DET for that given plane will be split among the pipes driving that plane.
+ *
+ *
+ * High level algorithm:
+ * 1. Split total DET among number of streams
+ * 2. For each stream, split DET among the planes
+ * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
+ * among those pipes.
+ * 4. Assign the DET override to the DML pipes.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipes: Array of DML pipes
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
{
- int i, j, count, stream_segments, pipe_segments[MAX_PIPES];
+ uint32_t i, j, k;
+ uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
+ uint8_t pipe_counted[MAX_PIPES] = {0};
+ uint8_t pipe_cnt = 0;
+ struct dc_plane_state *current_plane = NULL;
+ uint8_t stream_count = 0;
+
+ for (i = 0; i < context->stream_count; i++) {
+ /* Don't count SubVP streams for DET allocation */
+ if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ stream_count++;
+ }
+ }
- if (context->stream_count > 0) {
- stream_segments = 18 / context->stream_count;
+ if (stream_count > 0) {
+ stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- count = 0;
- for (j = 0; j < pipe_cnt; j++) {
- if (context->res_ctx.pipe_ctx[j].stream == context->streams[i]) {
- count++;
- if (is_pipe_split_expected[j])
- count++;
+ if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+ if (context->stream_status[i].plane_count > 0)
+ plane_segments = stream_segments / context->stream_status[i].plane_count;
+ else
+ plane_segments = stream_segments;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_plane_count = 0;
+ if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
+ pipe_counted[j] != 1) {
+ /* Note: pipe_plane_count indicates the number of pipes to be used for a
+ * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
+ * pipe_plane_count = 2 means 2:1 split, etc.
+ */
+ pipe_plane_count++;
+ pipe_counted[j] = 1;
+ current_plane = context->res_ctx.pipe_ctx[j].plane_state;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_plane_count++;
+ pipe_counted[k] = 1;
+ }
+ }
+
+ pipe_segments[j] = plane_segments / pipe_plane_count;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_segments[k] = plane_segments / pipe_plane_count;
+ }
+ }
}
}
- pipe_segments[i] = stream_segments / count;
}
- for (i = 0; i < pipe_cnt; i++) {
- pipes[i].pipe.src.det_size_override = 0;
- for (j = 0; j < context->stream_count; j++) {
- if (context->res_ctx.pipe_ctx[i].stream == context->streams[j]) {
- pipes[i].pipe.src.det_size_override = pipe_segments[j] * DCN3_2_DET_SEG_SIZE;
- break;
- }
- }
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
+ pipe_cnt++;
}
} else {
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
}
}
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ pipe_cnt++;
+ }
+
+ /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
+ * the DET available for each pipe). Use the DET override input to maintain our driver
+ * policy.
+ */
+ if (pipe_cnt == 1) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
+ if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (!is_dual_plane(pipe->plane_state->format)) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ if (pipe->plane_state->src_rect.width >= 5120 &&
+ pipe->plane_state->src_rect.height >= 2880)
+ pipes[0].pipe.src.det_size_override = 320; // 5K or higher
+ }
+ }
+ } else
+ dcn32_determine_det_override(dc, context, pipes);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index c8b7d6ff38f4..aed0f689cbbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -93,31 +93,6 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 8
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
@@ -138,78 +113,102 @@ enum dcn321_clk_src_array_id {
/* DCN */
/* TODO awful hack. fixup dcn20_dwb.h */
#undef BASE_INNER
-#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
+#define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
#define BASE(seg) BASE_INNER(seg)
#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
+#define SR_ARR_INIT(reg_name, id, value)\
+ REG_STRUCT[id].reg_name = value
#define SRI(reg_name, block, id)\
- .reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
+ REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRI2(reg_name, block, id)\
.reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+ reg ## reg_name
+#define SRI2_ARR(reg_name, block, id)\
+ REG_STRUCT[id].reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
+ reg ## reg_name
#define SRIR(var_name, reg_name, block, id)\
.var_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
+#define SRII_ARR_2(reg_name, block, id, inst)\
+ REG_STRUCT[inst].reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define SRII_MPC_RMU(reg_name, block, id)\
.RMU##_##reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ reg ## block ## id ## _ ## reg_name
#define SRII_DWB(reg_name, temp_name, block, id)\
- .reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## temp_name
+ REG_STRUCT.reg_name[id] = BASE(reg ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## temp_name
#define DCCG_SRII(reg_name, block, id)\
- .block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- reg ## block ## id ## _ ## reg_name
+ REG_STRUCT.block ## _ ## reg_name[id] = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
#define VUPDATE_SRII(reg_name, block, id)\
- .reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
- reg ## reg_name ## _ ## block ## id
+ REG_STRUCT.reg_name[id] = BASE(reg ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ reg ## reg_name ## _ ## block ## id
/* NBIO */
-#define NBIO_BASE_INNER(seg) \
- NBIO_BASE__INST0_SEG ## seg
+#define NBIO_BASE_INNER(seg) ctx->nbio_reg_offsets[seg]
#define NBIO_BASE(seg) \
NBIO_BASE_INNER(seg)
#define NBIO_SR(reg_name)\
- .reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
- regBIF_BX0_ ## reg_name
+ REG_STRUCT.reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
+#define NBIO_SR_ARR(reg_name, id)\
+ REG_STRUCT[id].reg_name = NBIO_BASE(regBIF_BX0_ ## reg_name ## _BASE_IDX) + \
+ regBIF_BX0_ ## reg_name
#define CTX ctx
#define REG(reg_name) \
- (DCN_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+ (ctx->dcn_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
-static const struct bios_registers bios_regs = {
- NBIO_SR(BIOS_SCRATCH_3),
- NBIO_SR(BIOS_SCRATCH_6)
-};
+static struct bios_registers bios_regs;
-#define clk_src_regs(index, pllid)\
-[index] = {\
- CS_COMMON_REG_LIST_DCN3_0(index, pllid),\
-}
+#define bios_regs_init() \
+ ( \
+ NBIO_SR(BIOS_SCRATCH_3),\
+ NBIO_SR(BIOS_SCRATCH_6)\
+ )
-static const struct dce110_clk_src_regs clk_src_regs[] = {
- clk_src_regs(0, A),
- clk_src_regs(1, B),
- clk_src_regs(2, C),
- clk_src_regs(3, D),
- clk_src_regs(4, E)
-};
+#define clk_src_regs_init(index, pllid)\
+ CS_COMMON_REG_LIST_DCN3_0_RI(index, pllid)
+
+static struct dce110_clk_src_regs clk_src_regs[5];
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -219,17 +218,10 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define abm_regs(id)\
-[id] = {\
- ABM_DCN32_REG_LIST(id)\
-}
+#define abm_regs_init(id)\
+ ABM_DCN32_REG_LIST_RI(id)
-static const struct dce_abm_registers abm_regs[] = {
- abm_regs(0),
- abm_regs(1),
- abm_regs(2),
- abm_regs(3),
-};
+static struct dce_abm_registers abm_regs[4];
static const struct dce_abm_shift abm_shift = {
ABM_MASK_SH_LIST_DCN32(__SHIFT)
@@ -239,18 +231,10 @@ static const struct dce_abm_mask abm_mask = {
ABM_MASK_SH_LIST_DCN32(_MASK)
};
-#define audio_regs(id)\
-[id] = {\
- AUD_COMMON_REG_LIST(id)\
-}
+#define audio_regs_init(id)\
+ AUD_COMMON_REG_LIST_RI(id)
-static const struct dce_audio_registers audio_regs[] = {
- audio_regs(0),
- audio_regs(1),
- audio_regs(2),
- audio_regs(3),
- audio_regs(4)
-};
+static struct dce_audio_registers audio_regs[5];
#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
@@ -265,23 +249,10 @@ static const struct dce_audio_mask audio_mask = {
DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
};
-#define vpg_regs(id)\
-[id] = {\
- VPG_DCN3_REG_LIST(id)\
-}
+#define vpg_regs_init(id)\
+ VPG_DCN3_REG_LIST_RI(id)
-static const struct dcn30_vpg_registers vpg_regs[] = {
- vpg_regs(0),
- vpg_regs(1),
- vpg_regs(2),
- vpg_regs(3),
- vpg_regs(4),
- vpg_regs(5),
- vpg_regs(6),
- vpg_regs(7),
- vpg_regs(8),
- vpg_regs(9),
-};
+static struct dcn30_vpg_registers vpg_regs[10];
static const struct dcn30_vpg_shift vpg_shift = {
DCN3_VPG_MASK_SH_LIST(__SHIFT)
@@ -291,19 +262,10 @@ static const struct dcn30_vpg_mask vpg_mask = {
DCN3_VPG_MASK_SH_LIST(_MASK)
};
-#define afmt_regs(id)\
-[id] = {\
- AFMT_DCN3_REG_LIST(id)\
-}
+#define afmt_regs_init(id)\
+ AFMT_DCN3_REG_LIST_RI(id)
-static const struct dcn30_afmt_registers afmt_regs[] = {
- afmt_regs(0),
- afmt_regs(1),
- afmt_regs(2),
- afmt_regs(3),
- afmt_regs(4),
- afmt_regs(5)
-};
+static struct dcn30_afmt_registers afmt_regs[6];
static const struct dcn30_afmt_shift afmt_shift = {
DCN3_AFMT_MASK_SH_LIST(__SHIFT)
@@ -313,17 +275,10 @@ static const struct dcn30_afmt_mask afmt_mask = {
DCN3_AFMT_MASK_SH_LIST(_MASK)
};
-#define apg_regs(id)\
-[id] = {\
- APG_DCN31_REG_LIST(id)\
-}
+#define apg_regs_init(id)\
+ APG_DCN31_REG_LIST_RI(id)
-static const struct dcn31_apg_registers apg_regs[] = {
- apg_regs(0),
- apg_regs(1),
- apg_regs(2),
- apg_regs(3)
-};
+static struct dcn31_apg_registers apg_regs[4];
static const struct dcn31_apg_shift apg_shift = {
DCN31_APG_MASK_SH_LIST(__SHIFT)
@@ -333,18 +288,10 @@ static const struct dcn31_apg_mask apg_mask = {
DCN31_APG_MASK_SH_LIST(_MASK)
};
-#define stream_enc_regs(id)\
-[id] = {\
- SE_DCN32_REG_LIST(id)\
-}
+#define stream_enc_regs_init(id)\
+ SE_DCN32_REG_LIST_RI(id)
-static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
- stream_enc_regs(0),
- stream_enc_regs(1),
- stream_enc_regs(2),
- stream_enc_regs(3),
- stream_enc_regs(4)
-};
+static struct dcn10_stream_enc_registers stream_enc_regs[5];
static const struct dcn10_stream_encoder_shift se_shift = {
SE_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -355,46 +302,24 @@ static const struct dcn10_stream_encoder_mask se_mask = {
};
-#define aux_regs(id)\
-[id] = {\
- DCN2_AUX_REG_LIST(id)\
-}
+#define aux_regs_init(id)\
+ DCN2_AUX_REG_LIST_RI(id)
-static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
- aux_regs(0),
- aux_regs(1),
- aux_regs(2),
- aux_regs(3),
- aux_regs(4)
-};
+static struct dcn10_link_enc_aux_registers link_enc_aux_regs[5];
-#define hpd_regs(id)\
-[id] = {\
- HPD_REG_LIST(id)\
-}
+#define hpd_regs_init(id)\
+ HPD_REG_LIST_RI(id)
-static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
- hpd_regs(0),
- hpd_regs(1),
- hpd_regs(2),
- hpd_regs(3),
- hpd_regs(4)
-};
+static struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[5];
-#define link_regs(id, phyid)\
-[id] = {\
- LE_DCN31_REG_LIST(id), \
- UNIPHY_DCN2_REG_LIST(phyid), \
+#define link_regs_init(id, phyid)\
+ ( \
+ LE_DCN31_REG_LIST_RI(id), \
+ UNIPHY_DCN2_REG_LIST_RI(id, phyid)\
+ )
/*DPCS_DCN31_REG_LIST(id),*/ \
-}
-static const struct dcn10_link_enc_registers link_enc_regs[] = {
- link_regs(0, A),
- link_regs(1, B),
- link_regs(2, C),
- link_regs(3, D),
- link_regs(4, E)
-};
+static struct dcn10_link_enc_registers link_enc_regs[5];
static const struct dcn10_link_enc_shift le_shift = {
LINK_ENCODER_MASK_SH_LIST_DCN31(__SHIFT), \
@@ -406,17 +331,10 @@ static const struct dcn10_link_enc_mask le_mask = {
// DPCS_DCN31_MASK_SH_LIST(_MASK)
};
-#define hpo_dp_stream_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
-}
+#define hpo_dp_stream_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST_RI(id)
-static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
- hpo_dp_stream_encoder_reg_list(0),
- hpo_dp_stream_encoder_reg_list(1),
- hpo_dp_stream_encoder_reg_list(2),
- hpo_dp_stream_encoder_reg_list(3),
-};
+static struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[4];
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
@@ -427,20 +345,14 @@ static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
};
-#define hpo_dp_link_encoder_reg_list(id)\
-[id] = {\
- DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
- /*DCN3_1_RDPCSTX_REG_LIST(0),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(1),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(2),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(3),*/\
- /*DCN3_1_RDPCSTX_REG_LIST(4)*/\
-}
+#define hpo_dp_link_encoder_reg_init(id)\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST_RI(id)
+ /*DCN3_1_RDPCSTX_REG_LIST(0),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(1),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(2),*/
+ /*DCN3_1_RDPCSTX_REG_LIST(3),*/
-static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
- hpo_dp_link_encoder_reg_list(0),
- hpo_dp_link_encoder_reg_list(1),
-};
+static struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[2];
static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
@@ -450,17 +362,10 @@ static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
DCN3_2_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
};
-#define dpp_regs(id)\
-[id] = {\
- DPP_REG_LIST_DCN30_COMMON(id),\
-}
+#define dpp_regs_init(id)\
+ DPP_REG_LIST_DCN30_COMMON_RI(id)
-static const struct dcn3_dpp_registers dpp_regs[] = {
- dpp_regs(0),
- dpp_regs(1),
- dpp_regs(2),
- dpp_regs(3)
-};
+static struct dcn3_dpp_registers dpp_regs[4];
static const struct dcn3_dpp_shift tf_shift = {
DPP_REG_LIST_SH_MASK_DCN30_COMMON(__SHIFT)
@@ -471,17 +376,10 @@ static const struct dcn3_dpp_mask tf_mask = {
};
-#define opp_regs(id)\
-[id] = {\
- OPP_REG_LIST_DCN30(id),\
-}
+#define opp_regs_init(id)\
+ OPP_REG_LIST_DCN30_RI(id)
-static const struct dcn20_opp_registers opp_regs[] = {
- opp_regs(0),
- opp_regs(1),
- opp_regs(2),
- opp_regs(3)
-};
+static struct dcn20_opp_registers opp_regs[4];
static const struct dcn20_opp_shift opp_shift = {
OPP_MASK_SH_LIST_DCN20(__SHIFT)
@@ -491,21 +389,15 @@ static const struct dcn20_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN20(_MASK)
};
-#define aux_engine_regs(id)\
-[id] = {\
- AUX_COMMON_REG_LIST0(id), \
- .AUXN_IMPCAL = 0, \
- .AUXP_IMPCAL = 0, \
- .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \
-}
+#define aux_engine_regs_init(id) \
+ ( \
+ AUX_COMMON_REG_LIST0_RI(id), SR_ARR_INIT(AUXN_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUXP_IMPCAL, id, 0), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK), \
+ SR_ARR_INIT(AUX_RESET_MASK, id, DP_AUX0_AUX_CONTROL__AUX_RESET_MASK)\
+ )
-static const struct dce110_aux_registers aux_engine_regs[] = {
- aux_engine_regs(0),
- aux_engine_regs(1),
- aux_engine_regs(2),
- aux_engine_regs(3),
- aux_engine_regs(4)
-};
+static struct dce110_aux_registers aux_engine_regs[5];
static const struct dce110_aux_registers_shift aux_shift = {
DCN_AUX_MASK_SH_LIST(__SHIFT)
@@ -515,15 +407,10 @@ static const struct dce110_aux_registers_mask aux_mask = {
DCN_AUX_MASK_SH_LIST(_MASK)
};
+#define dwbc_regs_dcn3_init(id)\
+ DWBC_COMMON_REG_LIST_DCN30_RI(id)
-#define dwbc_regs_dcn3(id)\
-[id] = {\
- DWBC_COMMON_REG_LIST_DCN30(id),\
-}
-
-static const struct dcn30_dwbc_registers dwbc30_regs[] = {
- dwbc_regs_dcn3(0),
-};
+static struct dcn30_dwbc_registers dwbc30_regs[1];
static const struct dcn30_dwbc_shift dwbc30_shift = {
DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -533,14 +420,10 @@ static const struct dcn30_dwbc_mask dwbc30_mask = {
DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
-#define mcif_wb_regs_dcn3(id)\
-[id] = {\
- MCIF_WB_COMMON_REG_LIST_DCN32(id),\
-}
+#define mcif_wb_regs_dcn3_init(id)\
+ MCIF_WB_COMMON_REG_LIST_DCN32_RI(id)
-static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = {
- mcif_wb_regs_dcn3(0)
-};
+static struct dcn30_mmhubbub_registers mcif_wb30_regs[1];
static const struct dcn30_mmhubbub_shift mcif_wb30_shift = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -550,17 +433,10 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
MCIF_WB_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define dsc_regsDCN20(id)\
-[id] = {\
- DSC_REG_LIST_DCN20(id)\
-}
+#define dsc_regsDCN20_init(id)\
+ DSC_REG_LIST_DCN20_RI(id)
-static const struct dcn20_dsc_registers dsc_regs[] = {
- dsc_regsDCN20(0),
- dsc_regsDCN20(1),
- dsc_regsDCN20(2),
- dsc_regsDCN20(3)
-};
+static struct dcn20_dsc_registers dsc_regs[4];
static const struct dcn20_dsc_shift dsc_shift = {
DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
@@ -570,17 +446,17 @@ static const struct dcn20_dsc_mask dsc_mask = {
DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
-static const struct dcn30_mpc_registers mpc_regs = {
- MPC_REG_LIST_DCN3_2(0),
- MPC_REG_LIST_DCN3_2(1),
- MPC_REG_LIST_DCN3_2(2),
- MPC_REG_LIST_DCN3_2(3),
- MPC_OUT_MUX_REG_LIST_DCN3_0(0),
- MPC_OUT_MUX_REG_LIST_DCN3_0(1),
- MPC_OUT_MUX_REG_LIST_DCN3_0(2),
- MPC_OUT_MUX_REG_LIST_DCN3_0(3),
- MPC_DWB_MUX_REG_LIST_DCN3_0(0),
-};
+static struct dcn30_mpc_registers mpc_regs;
+#define dcn_mpc_regs_init()\
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -590,15 +466,10 @@ static const struct dcn30_mpc_mask mpc_mask = {
MPC_COMMON_MASK_SH_LIST_DCN32(_MASK)
};
-#define optc_regs(id)\
-[id] = {OPTC_COMMON_REG_LIST_DCN3_2(id)}
+#define optc_regs_init(id)\
+ OPTC_COMMON_REG_LIST_DCN3_2_RI(id)
-static const struct dcn_optc_registers optc_regs[] = {
- optc_regs(0),
- optc_regs(1),
- optc_regs(2),
- optc_regs(3)
-};
+static struct dcn_optc_registers optc_regs[4];
static const struct dcn_optc_shift optc_shift = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(__SHIFT)
@@ -608,18 +479,10 @@ static const struct dcn_optc_mask optc_mask = {
OPTC_COMMON_MASK_SH_LIST_DCN3_2(_MASK)
};
-#define hubp_regs(id)\
-[id] = {\
- HUBP_REG_LIST_DCN32(id)\
-}
-
-static const struct dcn_hubp2_registers hubp_regs[] = {
- hubp_regs(0),
- hubp_regs(1),
- hubp_regs(2),
- hubp_regs(3)
-};
+#define hubp_regs_init(id) \
+ HUBP_REG_LIST_DCN32_RI(id)
+static struct dcn_hubp2_registers hubp_regs[4];
static const struct dcn_hubp2_shift hubp_shift = {
HUBP_MASK_SH_LIST_DCN32(__SHIFT)
@@ -628,9 +491,10 @@ static const struct dcn_hubp2_shift hubp_shift = {
static const struct dcn_hubp2_mask hubp_mask = {
HUBP_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dcn_hubbub_registers hubbub_reg = {
- HUBBUB_REG_LIST_DCN32(0)
-};
+
+static struct dcn_hubbub_registers hubbub_reg;
+#define hubbub_reg_init()\
+ HUBBUB_REG_LIST_DCN32_RI(0)
static const struct dcn_hubbub_shift hubbub_shift = {
HUBBUB_MASK_SH_LIST_DCN32(__SHIFT)
@@ -640,9 +504,10 @@ static const struct dcn_hubbub_mask hubbub_mask = {
HUBBUB_MASK_SH_LIST_DCN32(_MASK)
};
-static const struct dccg_registers dccg_regs = {
- DCCG_REG_LIST_DCN32()
-};
+static struct dccg_registers dccg_regs;
+
+#define dccg_regs_init()\
+ DCCG_REG_LIST_DCN32_RI()
static const struct dccg_shift dccg_shift = {
DCCG_MASK_SH_LIST_DCN32(__SHIFT)
@@ -715,9 +580,10 @@ static const struct dccg_mask dccg_mask = {
SR(AZALIA_AUDIO_DTO), \
SR(AZALIA_CONTROLLER_CLOCK_GATING)
-static const struct dce_hwseq_registers hwseq_reg = {
- HWSEQ_DCN32_REG_LIST()
-};
+static struct dce_hwseq_registers hwseq_reg;
+
+#define hwseq_reg_init()\
+ HWSEQ_DCN32_REG_LIST()
#define HWSEQ_DCN32_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -760,29 +626,10 @@ static const struct dce_hwseq_shift hwseq_shift = {
static const struct dce_hwseq_mask hwseq_mask = {
HWSEQ_DCN32_MASK_SH_LIST(_MASK)
};
-#define vmid_regs(id)\
-[id] = {\
- DCN20_VMID_REG_LIST(id)\
-}
+#define vmid_regs_init(id)\
+ DCN20_VMID_REG_LIST_RI(id)
-static const struct dcn_vmid_registers vmid_regs[] = {
- vmid_regs(0),
- vmid_regs(1),
- vmid_regs(2),
- vmid_regs(3),
- vmid_regs(4),
- vmid_regs(5),
- vmid_regs(6),
- vmid_regs(7),
- vmid_regs(8),
- vmid_regs(9),
- vmid_regs(10),
- vmid_regs(11),
- vmid_regs(12),
- vmid_regs(13),
- vmid_regs(14),
- vmid_regs(15)
-};
+static struct dcn_vmid_registers vmid_regs[16];
static const struct dcn20_vmid_shift vmid_shifts = {
DCN20_VMID_MASK_SH_LIST(__SHIFT)
@@ -871,7 +718,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -905,6 +757,14 @@ static struct dce_aux *dcn321_aux_engine_create(
if (!aux_engine)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT aux_engine_regs
+ aux_engine_regs_init(0),
+ aux_engine_regs_init(1),
+ aux_engine_regs_init(2),
+ aux_engine_regs_init(3),
+ aux_engine_regs_init(4);
+
dce110_aux_engine_construct(aux_engine, ctx, inst,
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
&aux_engine_regs[inst],
@@ -914,15 +774,10 @@ static struct dce_aux *dcn321_aux_engine_create(
return &aux_engine->base;
}
-#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST_DCN30(id) }
-
-static const struct dce_i2c_registers i2c_hw_regs[] = {
- i2c_inst_regs(1),
- i2c_inst_regs(2),
- i2c_inst_regs(3),
- i2c_inst_regs(4),
- i2c_inst_regs(5),
-};
+#define i2c_inst_regs_init(id)\
+ I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
+
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -942,6 +797,14 @@ static struct dce_i2c_hw *dcn321_i2c_hw_create(
if (!dce_i2c_hw)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT i2c_hw_regs
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
+
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -981,6 +844,29 @@ static struct hubbub *dcn321_hubbub_create(struct dc_context *ctx)
if (!hubbub2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubbub_reg
+ hubbub_reg_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT vmid_regs
+ vmid_regs_init(0),
+ vmid_regs_init(1),
+ vmid_regs_init(2),
+ vmid_regs_init(3),
+ vmid_regs_init(4),
+ vmid_regs_init(5),
+ vmid_regs_init(6),
+ vmid_regs_init(7),
+ vmid_regs_init(8),
+ vmid_regs_init(9),
+ vmid_regs_init(10),
+ vmid_regs_init(11),
+ vmid_regs_init(12),
+ vmid_regs_init(13),
+ vmid_regs_init(14),
+ vmid_regs_init(15);
+
hubbub32_construct(hubbub2, ctx,
&hubbub_reg,
&hubbub_shift,
@@ -1013,6 +899,13 @@ static struct hubp *dcn321_hubp_create(
if (!hubp2)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT hubp_regs
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
+
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
return &hubp2->base;
@@ -1038,6 +931,13 @@ static struct dpp *dcn321_dpp_create(
if (!dpp3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT dpp_regs
+ dpp_regs_init(0),
+ dpp_regs_init(1),
+ dpp_regs_init(2),
+ dpp_regs_init(3);
+
if (dpp32_construct(dpp3, ctx, inst,
&dpp_regs[inst], &tf_shift, &tf_mask))
return &dpp3->base;
@@ -1058,6 +958,10 @@ static struct mpc *dcn321_mpc_create(
if (!mpc30)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT mpc_regs
+ dcn_mpc_regs_init();
+
dcn32_mpc_construct(mpc30, ctx,
&mpc_regs,
&mpc_shift,
@@ -1079,6 +983,13 @@ static struct output_pixel_processor *dcn321_opp_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT opp_regs
+ opp_regs_init(0),
+ opp_regs_init(1),
+ opp_regs_init(2),
+ opp_regs_init(3);
+
dcn20_opp_construct(opp2, ctx, inst,
&opp_regs[inst], &opp_shift, &opp_mask);
return &opp2->base;
@@ -1095,6 +1006,13 @@ static struct timing_generator *dcn321_timing_generator_create(
if (!tgn10)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT optc_regs
+ optc_regs_init(0),
+ optc_regs_init(1),
+ optc_regs_init(2),
+ optc_regs_init(3);
+
tgn10->base.inst = instance;
tgn10->base.ctx = ctx;
@@ -1129,6 +1047,30 @@ static struct link_encoder *dcn321_link_encoder_create(
if (!enc20)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_aux_regs
+ aux_regs_init(0),
+ aux_regs_init(1),
+ aux_regs_init(2),
+ aux_regs_init(3),
+ aux_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_hpd_regs
+ hpd_regs_init(0),
+ hpd_regs_init(1),
+ hpd_regs_init(2),
+ hpd_regs_init(3),
+ hpd_regs_init(4);
+
+#undef REG_STRUCT
+#define REG_STRUCT link_enc_regs
+ link_regs_init(0, A),
+ link_regs_init(1, B),
+ link_regs_init(2, C),
+ link_regs_init(3, D),
+ link_regs_init(4, E);
+
dcn321_link_encoder_construct(enc20,
enc_init_data,
&link_enc_feature,
@@ -1145,7 +1087,7 @@ static void read_dce_straps(
struct dc_context *ctx,
struct resource_straps *straps)
{
- generic_reg_get(ctx, regDC_PINSTRAPS + BASE(regDC_PINSTRAPS_BASE_IDX),
+ generic_reg_get(ctx, ctx->dcn_reg_offsets[regDC_PINSTRAPS_BASE_IDX] + regDC_PINSTRAPS,
FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
}
@@ -1153,6 +1095,15 @@ static void read_dce_straps(
static struct audio *dcn321_create_audio(
struct dc_context *ctx, unsigned int inst)
{
+
+#undef REG_STRUCT
+#define REG_STRUCT audio_regs
+ audio_regs_init(0),
+ audio_regs_init(1),
+ audio_regs_init(2),
+ audio_regs_init(3),
+ audio_regs_init(4);
+
return dce_audio_create(ctx, inst,
&audio_regs[inst], &audio_shift, &audio_mask);
}
@@ -1166,6 +1117,19 @@ static struct vpg *dcn321_vpg_create(
if (!vpg3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT vpg_regs
+ vpg_regs_init(0),
+ vpg_regs_init(1),
+ vpg_regs_init(2),
+ vpg_regs_init(3),
+ vpg_regs_init(4),
+ vpg_regs_init(5),
+ vpg_regs_init(6),
+ vpg_regs_init(7),
+ vpg_regs_init(8),
+ vpg_regs_init(9);
+
vpg3_construct(vpg3, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
@@ -1183,6 +1147,15 @@ static struct afmt *dcn321_afmt_create(
if (!afmt3)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT afmt_regs
+ afmt_regs_init(0),
+ afmt_regs_init(1),
+ afmt_regs_init(2),
+ afmt_regs_init(3),
+ afmt_regs_init(4),
+ afmt_regs_init(5);
+
afmt3_construct(afmt3, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
@@ -1200,6 +1173,13 @@ static struct apg *dcn321_apg_create(
if (!apg31)
return NULL;
+#undef REG_STRUCT
+#define REG_STRUCT apg_regs
+ apg_regs_init(0),
+ apg_regs_init(1),
+ apg_regs_init(2),
+ apg_regs_init(3);
+
apg31_construct(apg31, ctx, inst,
&apg_regs[inst],
&apg_shift,
@@ -1236,6 +1216,14 @@ static struct stream_encoder *dcn321_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT stream_enc_regs
+ stream_enc_regs_init(0),
+ stream_enc_regs_init(1),
+ stream_enc_regs_init(2),
+ stream_enc_regs_init(3),
+ stream_enc_regs_init(4);
+
dcn32_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
&stream_enc_regs[eng_id],
@@ -1286,6 +1274,13 @@ static struct hpo_dp_stream_encoder *dcn321_hpo_dp_stream_encoder_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_stream_enc_regs
+ hpo_dp_stream_encoder_reg_init(0),
+ hpo_dp_stream_encoder_reg_init(1),
+ hpo_dp_stream_encoder_reg_init(2),
+ hpo_dp_stream_encoder_reg_init(3);
+
dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
hpo_dp_inst, eng_id, vpg, apg,
&hpo_dp_stream_enc_regs[hpo_dp_inst],
@@ -1303,6 +1298,11 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create(
/* allocate HPO link encoder */
hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hpo_dp_link_enc_regs
+ hpo_dp_link_encoder_reg_init(0),
+ hpo_dp_link_encoder_reg_init(1);
+
hpo_dp_link_encoder32_construct(hpo_dp_enc31, ctx, inst,
&hpo_dp_link_enc_regs[inst],
&hpo_dp_le_shift, &hpo_dp_le_mask);
@@ -1315,6 +1315,10 @@ static struct dce_hwseq *dcn321_hwseq_create(
{
struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+#undef REG_STRUCT
+#define REG_STRUCT hwseq_reg
+ hwseq_reg_init();
+
if (hws) {
hws->ctx = ctx;
hws->regs = &hwseq_reg;
@@ -1505,6 +1509,10 @@ static bool dcn321_dwbc_create(struct dc_context *ctx, struct resource_pool *poo
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT dwbc30_regs
+ dwbc_regs_dcn3_init(0);
+
dcn30_dwbc_construct(dwbc30, ctx,
&dwbc30_regs[i],
&dwbc30_shift,
@@ -1530,6 +1538,10 @@ static bool dcn321_mmhubbub_create(struct dc_context *ctx, struct resource_pool
return false;
}
+#undef REG_STRUCT
+#define REG_STRUCT mcif_wb30_regs
+ mcif_wb_regs_dcn3_init(0);
+
dcn32_mmhubbub_construct(mcif_wb30, ctx,
&mcif_wb30_regs[i],
&mcif_wb30_shift,
@@ -1552,6 +1564,13 @@ static struct display_stream_compressor *dcn321_dsc_create(
return NULL;
}
+#undef REG_STRUCT
+#define REG_STRUCT dsc_regs
+ dsc_regsDCN20_init(0),
+ dsc_regsDCN20_init(1),
+ dsc_regsDCN20_init(2),
+ dsc_regsDCN20_init(3);
+
dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask);
dsc->max_image_width = 6016;
@@ -1616,6 +1635,30 @@ static bool dcn321_resource_construct(
uint32_t pipe_fuses = 0;
uint32_t num_pipes = 4;
+#undef REG_STRUCT
+#define REG_STRUCT bios_regs
+ bios_regs_init();
+
+#undef REG_STRUCT
+#define REG_STRUCT clk_src_regs
+ clk_src_regs_init(0, A),
+ clk_src_regs_init(1, B),
+ clk_src_regs_init(2, C),
+ clk_src_regs_init(3, D),
+ clk_src_regs_init(4, E);
+
+#undef REG_STRUCT
+#define REG_STRUCT abm_regs
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
+
+#undef REG_STRUCT
+#define REG_STRUCT dccg_regs
+ dccg_regs_init();
+
+
ctx->dc_bios->regs = &bios_regs;
pool->base.res_cap = &res_cap_dcn321;
@@ -1651,7 +1694,8 @@ static bool dcn321_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index fb6a2d7b6470..e3e5c39895a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -33,7 +33,7 @@
#include "dc_types.h"
#include "dc.h"
-struct dp_mst_stream_allocation_table;
+struct dc_dp_mst_stream_allocation_table;
struct aux_payload;
enum aux_return_code_type;
@@ -77,7 +77,7 @@ void dm_helpers_dp_update_branch_info(
bool dm_helpers_dp_mst_write_payload_allocation_table(
struct dc_context *ctx,
const struct dc_stream_state *stream,
- struct dp_mst_stream_allocation_table *proposed_table,
+ struct dc_dp_mst_stream_allocation_table *proposed_table,
bool enable);
/*
@@ -171,7 +171,13 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy
#define IS_SMU_TIMEOUT(result) \
(result == 0x0)
-
+void dm_helpers_init_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config,
+ struct dc_sink *sink);
+void dm_helpers_override_panel_settings(
+ struct dc_context *ctx,
+ struct dc_panel_config *config);
int dm_helper_dmub_aux_transfer_sync(
struct dc_context *ctx,
const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 86a3b5bfd699..d70838edba80 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -34,7 +34,7 @@ dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
endif
endif
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
@@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
DML += dcn31/dcn31_fpu.o
DML += dcn32/dcn32_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
index 6ca288fb5fb9..3aa8dd0acd5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c
@@ -25,12 +25,11 @@
#include "dm_services.h"
#include "bw_fixed.h"
+#define MAX_I64 \
+ ((int64_t)((1ULL << 63) - 1))
#define MIN_I64 \
- (int64_t)(-(1LL << 63))
-
-#define MAX_I64 \
- (int64_t)((1ULL << 63) - 1)
+ (-MAX_I64 - 1)
#define FRACTIONAL_PART_MASK \
((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
@@ -49,6 +48,7 @@ static uint64_t abs_i64(int64_t arg)
struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
{
struct bw_fixed res;
+
ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
return res;
@@ -78,14 +78,12 @@ struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
{
uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
- do
- {
+ do {
remainder <<= 1;
res_value <<= 1;
- if (remainder >= arg2_value)
- {
+ if (remainder >= arg2_value) {
res_value |= 1;
remainder -= arg2_value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
index 41284e263325..288d22a16cf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
@@ -526,10 +526,10 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
}
if (v->max_swath_height_c[k] > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
@@ -552,14 +552,14 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
}
v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
- v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
- v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
}
else {
- v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ v->effective_lb_latency_hiding_source_lines_chroma = dcn_bw_min2(v->max_line_buffer_lines, dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 / dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_chroma = dcn_bw_floor2(v->lines_in_det_chroma + dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ v->urgent_latency_support_us_per_state[i][j][k] = dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] * dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 * dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
}
}
}
@@ -1146,10 +1146,10 @@ void display_pipe_configuration(struct dcn_bw_internal_vars *v)
}
if (v->maximum_swath_height_c > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_y[k] = v->maximum_swath_height_y;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
index 07d18e78de49..cac72413a097 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
@@ -23,6 +23,7 @@
*
*/
+#include "os_types.h"
#include "dcn_calc_math.h"
#define isNaN(number) ((number) != (number))
@@ -69,8 +70,8 @@ float dcn_bw_max2(const float arg1, const float arg2)
float dcn_bw_floor2(const float arg, const float significance)
{
- if (significance == 0)
- return 0;
+ ASSERT(significance != 0);
+
return ((int) (arg / significance)) * significance;
}
float dcn_bw_floor(const float arg)
@@ -80,17 +81,14 @@ float dcn_bw_floor(const float arg)
float dcn_bw_ceil(const float arg)
{
- float flr = dcn_bw_floor2(arg, 1);
-
- return flr + 0.00001 >= arg ? arg : flr + 1;
+ return (int) (arg + 0.99999);
}
float dcn_bw_ceil2(const float arg, const float significance)
{
- float flr = dcn_bw_floor2(arg, significance);
- if (significance == 0)
- return 0;
- return flr + 0.00001 >= arg ? arg : flr + significance;
+ ASSERT(significance != 0);
+
+ return ((int) (arg / significance + 0.99999)) * significance;
}
float dcn_bw_max3(float v1, float v2, float v3)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index db3b16b77034..d46adc849d2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -736,30 +736,13 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
- uint32_t hw_internal_rev,
- uint32_t pci_revision_id)
+static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
- if ((chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN2(hw_internal_rev))
- switch (pci_revision_id) {
- case PRID_DALI_DE:
- case PRID_DALI_DF:
- case PRID_DALI_E3:
- case PRID_DALI_E4:
- case PRID_POLLOCK_94:
- case PRID_POLLOCK_95:
- case PRID_POLLOCK_E9:
- case PRID_POLLOCK_EA:
- case PRID_POLLOCK_EB:
- return 0;
- default:
- break;
- }
-
- /* we are ok with all levels */
- return 4;
+ if (is_vmin_only_asic)
+ return 0;
+ else /* we are ok with all levels */
+ return 4;
}
bool dcn_validate_bandwidth(
@@ -1323,10 +1306,7 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
- dc->ctx->asic_id.chip_family,
- dc->ctx->asic_id.hw_internal_rev,
- dc->ctx->asic_id.pci_revision_id))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->config.is_vmin_only_asic))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index d34e0f1314d9..d680f1c5b69f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -2234,6 +2234,7 @@ static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_li
void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl = 0, k = 0;
@@ -2247,8 +2248,7 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
ASSERT(clk_table->num_entries);
/* Copy dcn2_1_soc.clock_limits to clock_limits to avoid copying over null states later */
- memcpy(&dcn2_1_soc._clock_tmp, &dcn2_1_soc.clock_limits,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(s, dcn2_1_soc.clock_limits, sizeof(dcn2_1_soc.clock_limits));
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
@@ -2263,25 +2263,25 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
if (i == 1)
k++;
- dcn2_1_soc._clock_tmp[k].state = k;
- dcn2_1_soc._clock_tmp[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc._clock_tmp[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc._clock_tmp[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn2_1_soc._clock_tmp[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn2_1_soc._clock_tmp[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn2_1_soc._clock_tmp[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn2_1_soc._clock_tmp[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn2_1_soc._clock_tmp[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn2_1_soc._clock_tmp[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[k].state = k;
+ s[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[k].dram_bw_per_chan_gbps =
+ dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
k++;
}
- memcpy(&dcn2_1_soc.clock_limits, &dcn2_1_soc._clock_tmp,
- sizeof(dcn2_1_soc.clock_limits));
+ memcpy(dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
if (clk_table->num_entries) {
dcn2_1_soc.num_states = clk_table->num_entries + 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 63bbdf8b8678..edd098c7eb92 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -4478,17 +4478,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
- locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
- locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
- locals->EffectiveLBLatencyHidingSourceLinesChroma),
- locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
+ locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 8a7485e21d53..1d84ae50311d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -806,10 +806,12 @@ static bool CalculatePrefetchSchedule(
if (myPipe->SourceScan == dm_horz) {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockWidth256BytesY) + myPipe->BlockWidth256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
prefetch_bw_oto = (PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto;
@@ -2634,7 +2636,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
- locals->XFCRemoteSurfaceFlipLatency[k] =
+ locals->XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 876b321b30ca..479e2c1a1301 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -396,64 +396,10 @@ static void CalculateUrgentBurstFactor(
static void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2]);
+ int ReorderingBytes);
+
static void CalculatePixelDeliveryTimes(
unsigned int NumberOfActivePlanes,
double VRatio[],
@@ -4692,66 +4638,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
if (v->UseMinimumRequiredDCFCLK == true) {
- UseMinimumDCFCLK(
- mode_lib,
- v->MaxInterDCNTileRepeaters,
- MaxPrefetchMode,
- v->FinalDRAMClockChangeLatency,
- v->SREnterPlusExitTime,
- v->ReturnBusWidth,
- v->RoundTripPingLatencyCycles,
- ReorderingBytes,
- v->PixelChunkSizeInKByte,
- v->MetaChunkSize,
- v->GPUVMEnable,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->NumberOfActivePlanes,
- v->HostVMMinPageSize,
- v->HostVMMaxNonCachedPageTableLevels,
- v->DynamicMetadataVMEnabled,
- v->ImmediateFlipRequirement[0],
- v->ProgressiveToInterlaceUnitInOPP,
- v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- v->VTotal,
- v->VActive,
- v->DynamicMetadataTransmittedBytes,
- v->DynamicMetadataLinesBeforeActiveRequired,
- v->Interlace,
- v->RequiredDPPCLK,
- v->RequiredDISPCLK,
- v->UrgLatency,
- v->NoOfDPP,
- v->ProjectedDCFCLKDeepSleep,
- v->MaximumVStartup,
- v->TotalVActivePixelBandwidth,
- v->TotalVActiveCursorBandwidth,
- v->TotalMetaRowBandwidth,
- v->TotalDPTERowBandwidth,
- v->TotalNumberOfActiveDPP,
- v->TotalNumberOfDCCActiveDPP,
- v->dpte_group_bytes,
- v->PrefetchLinesY,
- v->PrefetchLinesC,
- v->swath_width_luma_ub_all_states,
- v->swath_width_chroma_ub_all_states,
- v->BytePerPixelY,
- v->BytePerPixelC,
- v->HTotal,
- v->PixelClock,
- v->PDEAndMetaPTEBytesPerFrame,
- v->DPTEBytesPerRow,
- v->MetaRowBytes,
- v->DynamicMetadataEnable,
- v->VActivePixelBandwidth,
- v->VActiveCursorBandwidth,
- v->ReadBandwidthLuma,
- v->ReadBandwidthChroma,
- v->DCFCLKPerState,
- v->DCFCLKState);
+ UseMinimumDCFCLK(mode_lib, v, MaxPrefetchMode, ReorderingBytes);
if (v->ClampMinDCFCLK) {
/* Clamp calculated values to actual minimum */
@@ -6435,10 +6322,6 @@ static void CalculateSwathWidth(
for (k = 0; k < NumberOfActivePlanes; ++k) {
enum odm_combine_mode MainPlaneODMCombine = 0;
- surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
- surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
@@ -6478,8 +6361,6 @@ static void CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -6487,6 +6368,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
@@ -6498,6 +6380,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
@@ -6610,77 +6493,21 @@ static double CalculateUrgentLatency(
return ret;
}
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
- int MaxInterDCNTileRepeaters,
+ struct vba_vars_st *v,
int MaxPrefetchMode,
- double FinalDRAMClockChangeLatency,
- double SREnterPlusExitTime,
- int ReturnBusWidth,
- int RoundTripPingLatencyCycles,
- int ReorderingBytes,
- int PixelChunkSizeInKByte,
- int MetaChunkSize,
- bool GPUVMEnable,
- int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- int NumberOfActivePlanes,
- double HostVMMinPageSize,
- int HostVMMaxNonCachedPageTableLevels,
- bool DynamicMetadataVMEnabled,
- enum immediate_flip_requirement ImmediateFlipRequirement,
- bool ProgressiveToInterlaceUnitInOPP,
- double MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly,
- int VTotal[],
- int VActive[],
- int DynamicMetadataTransmittedBytes[],
- int DynamicMetadataLinesBeforeActiveRequired[],
- bool Interlace[],
- double RequiredDPPCLK[][2][DC__NUM_DPP__MAX],
- double RequiredDISPCLK[][2],
- double UrgLatency[],
- unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX],
- double ProjectedDCFCLKDeepSleep[][2],
- double MaximumVStartup[][2][DC__NUM_DPP__MAX],
- double TotalVActivePixelBandwidth[][2],
- double TotalVActiveCursorBandwidth[][2],
- double TotalMetaRowBandwidth[][2],
- double TotalDPTERowBandwidth[][2],
- unsigned int TotalNumberOfActiveDPP[][2],
- unsigned int TotalNumberOfDCCActiveDPP[][2],
- int dpte_group_bytes[],
- double PrefetchLinesY[][2][DC__NUM_DPP__MAX],
- double PrefetchLinesC[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX],
- unsigned int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX],
- int BytePerPixelY[],
- int BytePerPixelC[],
- int HTotal[],
- double PixelClock[],
- double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX],
- double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX],
- double MetaRowBytes[][2][DC__NUM_DPP__MAX],
- bool DynamicMetadataEnable[],
- double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX],
- double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX],
- double ReadBandwidthLuma[],
- double ReadBandwidthChroma[],
- double DCFCLKPerState[],
- double DCFCLKState[][2])
+ int ReorderingBytes)
{
double NormalEfficiency = 0;
double PTEEfficiency = 0;
double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2] = { { 0 } };
unsigned int i, j, k;
- NormalEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
- : PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
- PTEEfficiency = (HostVMEnable == true ? PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
- / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
+ NormalEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData
+ : v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly) / 100.0;
+ PTEEfficiency = (v->HostVMEnable == true ? v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly
+ / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData : 1.0);
for (i = 0; i < mode_lib->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX] = { 0 };
@@ -6698,58 +6525,58 @@ static void UseMinimumDCFCLK(
double MinimumTvmPlus2Tr0 = 0;
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j]
- + NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]);
+ + v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
- NoOfDPPState[k] = NoOfDPP[i][j][k];
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
+ NoOfDPPState[k] = v->NoOfDPP[i][j][k];
}
- MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime);
- NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j];
- DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ?
- TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j];
- DCFCLKRequiredForAverageBandwidth = dml_max3(ProjectedDCFCLKDeepSleep[i][j],
- (NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth / (MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
- (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / ReturnBusWidth);
-
- ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, TotalNumberOfActiveDPP[i][j], PixelChunkSizeInKByte, TotalNumberOfDCCActiveDPP[i][j],
- MetaChunkSize, GPUVMEnable, HostVMEnable, NumberOfActivePlanes, NoOfDPPState, dpte_group_bytes,
- PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- HostVMMinPageSize, HostVMMaxNonCachedPageTableLevels);
- ExtraLatencyCycles = RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime);
+ NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j];
+ DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ?
+ TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];
+ DCFCLKRequiredForAverageBandwidth = dml_max3(v->ProjectedDCFCLKDeepSleep[i][j],
+ (NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth / (v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100),
+ (NonDPTEBandwidth + DPTEBandwidth / PTEEfficiency) / NormalEfficiency / v->ReturnBusWidth);
+
+ ExtraLatencyBytes = CalculateExtraLatencyBytes(ReorderingBytes, v->TotalNumberOfActiveDPP[i][j], v->PixelChunkSizeInKByte, v->TotalNumberOfDCCActiveDPP[i][j],
+ v->MetaChunkSize, v->GPUVMEnable, v->HostVMEnable, v->NumberOfActivePlanes, NoOfDPPState, v->dpte_group_bytes,
+ v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ v->HostVMMinPageSize, v->HostVMMaxNonCachedPageTableLevels);
+ ExtraLatencyCycles = v->RoundTripPingLatencyCycles + 32 + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth;
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double DCFCLKCyclesRequiredInPrefetch = { 0 };
double ExpectedPrefetchBWAcceleration = { 0 };
double PrefetchTime = { 0 };
- PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k]
- + PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth;
- DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * DPTEBytesPerRow[i][j][k] / PTEEfficiency
- / NormalEfficiency / ReturnBusWidth + 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
- PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k];
- ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) / (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]);
- DynamicMetadataVMExtraLatency[k] = (GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ?
- UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
- PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait - UrgLatency[i] * ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels
- : GPUVMMaxPageTableLevels - 2) * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
+ PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k]
+ + v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;
+ DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] + v->PDEAndMetaPTEBytesPerFrame[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) + 2 * v->DPTEBytesPerRow[i][j][k] / PTEEfficiency
+ / NormalEfficiency / v->ReturnBusWidth + 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k];
+ PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k];
+ ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) / (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);
+ DynamicMetadataVMExtraLatency[k] = (v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ?
+ v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0;
+ PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - v->UrgLatency[i] * ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels
+ : v->GPUVMMaxPageTableLevels - 2) * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) - DynamicMetadataVMExtraLatency[k];
if (PrefetchTime > 0) {
double ExpectedVRatioPrefetch = { 0 };
ExpectedVRatioPrefetch = PrefetchPixelLinesTime[k] / (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);
DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]
* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration;
- if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) {
+ if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k]
- + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / ReturnBusWidth;
+ + NoOfDPPState[k] * DPTEBandwidth / PTEEfficiency / NormalEfficiency / v->ReturnBusWidth;
}
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
- if (DynamicMetadataEnable[k] == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
double TsetupPipe = { 0 };
double TdmbfPipe = { 0 };
double TdmsksPipe = { 0 };
@@ -6757,49 +6584,49 @@ static void UseMinimumDCFCLK(
double AllowedTimeForUrgentExtraLatency = { 0 };
CalculateDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
- RequiredDPPCLK[i][j][k],
- RequiredDISPCLK[i][j],
- ProjectedDCFCLKDeepSleep[i][j],
- PixelClock[k],
- HTotal[k],
- VTotal[k] - VActive[k],
- DynamicMetadataTransmittedBytes[k],
- DynamicMetadataLinesBeforeActiveRequired[k],
- Interlace[k],
- ProgressiveToInterlaceUnitInOPP,
+ v->MaxInterDCNTileRepeaters,
+ v->RequiredDPPCLK[i][j][k],
+ v->RequiredDISPCLK[i][j],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->PixelClock[k],
+ v->HTotal[k],
+ v->VTotal[k] - v->VActive[k],
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
+ v->Interlace[k],
+ v->ProgressiveToInterlaceUnitInOPP,
&TsetupPipe,
&TdmbfPipe,
&TdmecPipe,
&TdmsksPipe);
- AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TsetupPipe
+ AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TsetupPipe
- TdmbfPipe - TdmecPipe - TdmsksPipe - DynamicMetadataVMExtraLatency[k];
if (AllowedTimeForUrgentExtraLatency > 0) {
DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(DCFCLKRequiredForPeakBandwidthPerPlane[k],
ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);
} else {
- DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];
}
}
}
DCFCLKRequiredForPeakBandwidth = 0;
- for (k = 0; k <= NumberOfActivePlanes - 1; ++k) {
+ for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {
DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];
}
- MinimumTvmPlus2Tr0 = UrgLatency[i] * (GPUVMEnable == true ? (HostVMEnable == true ?
- (GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : 0);
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ MinimumTvmPlus2Tr0 = v->UrgLatency[i] * (v->GPUVMEnable == true ? (v->HostVMEnable == true ?
+ (v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) : 0);
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double MaximumTvmPlus2Tr0PlusTsw = { 0 };
- MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
+ MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];
if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) {
- DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i];
+ DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];
} else {
DCFCLKRequiredForPeakBandwidth = dml_max3(DCFCLKRequiredForPeakBandwidth, 2 * ExtraLatencyCycles
/ (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0 - PrefetchPixelLinesTime[k] / 4),
(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));
}
}
- DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
+ v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * (1 + mode_lib->vba.PercentMarginOverMinimumRequiredDCFCLK / 100)
* dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index d211cf6d234c..422f17aefd4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -322,6 +322,7 @@ static void calculate_wm_set_for_vlevel(int vlevel,
void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
@@ -329,8 +330,7 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_01_soc._clock_tmp, &dcn3_01_soc.clock_limits,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(s, dcn3_01_soc.clock_limits, sizeof(dcn3_01_soc.clock_limits));
/* Default clock levels are used for diags, which may lead to overclocking. */
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -348,35 +348,42 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
}
}
- dcn3_01_soc._clock_tmp[i].state = i;
- dcn3_01_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_01_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_01_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn3_01_soc._clock_tmp[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_01_soc._clock_tmp[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_01_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_01_soc._clock_tmp[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_01_soc._clock_tmp[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_01_soc._clock_tmp[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].state = i;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_01_soc.num_states = clk_table->num_entries;
/* duplicate last level */
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc._clock_tmp[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ s[dcn3_01_soc.num_states] =
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ s[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
}
}
- memcpy(&dcn3_01_soc.clock_limits, &dcn3_01_soc._clock_tmp,
- sizeof(dcn3_01_soc.clock_limits));
+ memcpy(dcn3_01_soc.clock_limits, s, sizeof(dcn3_01_soc.clock_limits));
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_01_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_01_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 149a1b17cdf3..b6e99eefe869 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -597,14 +598,14 @@ void dcn31_calculate_wm_and_dlg_fp(
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int j;
dc_assert_fp_enabled();
- memcpy(&dcn3_1_soc._clock_tmp, &dcn3_1_soc.clock_limits,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(s, dcn3_1_soc.clock_limits, sizeof(dcn3_1_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -633,38 +634,46 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
}
}
- dcn3_1_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_1_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn3_1_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_1_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_1_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_1_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_1_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_1_soc._clock_tmp[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_1_soc._clock_tmp[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_1_soc._clock_tmp[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_1_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_1_soc.clock_limits, &dcn3_1_soc._clock_tmp,
- sizeof(dcn3_1_soc.clock_limits));
+ memcpy(dcn3_1_soc.clock_limits, s, sizeof(dcn3_1_soc.clock_limits));
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_1_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
else
@@ -680,7 +689,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ if (bw_params->num_channels > 0)
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
@@ -719,6 +732,12 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
*/
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ if ((int)(dcn3_15_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
else
@@ -727,6 +746,7 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
{
+ struct _vcs_dpi_voltage_scaling_st *s = dc->scratch.update_bw_bounding_box.clock_limits;
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
@@ -734,8 +754,7 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dc_assert_fp_enabled();
- memcpy(&dcn3_16_soc._clock_tmp, &dcn3_16_soc.clock_limits,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(s, dcn3_16_soc.clock_limits, sizeof(dcn3_16_soc.clock_limits));
// Default clock levels are used for diags, which may lead to overclocking.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
@@ -757,7 +776,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
@@ -768,44 +788,53 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
closest_clk_lvl = dcn3_16_soc.num_states - 1;
}
- dcn3_16_soc._clock_tmp[i].state = i;
+ s[i].state = i;
/* Clocks dependent on voltage level. */
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ s[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
if (clk_table->num_entries == 1 &&
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ s[i].dcfclk_mhz <
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
/*SMU fix not released yet*/
- dcn3_16_soc._clock_tmp[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ s[i].dcfclk_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
}
- dcn3_16_soc._clock_tmp[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn3_16_soc._clock_tmp[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+ s[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz *
+ 2 * clk_table->entries[i].wck_ratio;
/* Clocks independent of voltage level. */
- dcn3_16_soc._clock_tmp[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn3_16_soc._clock_tmp[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn3_16_soc._clock_tmp[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn3_16_soc._clock_tmp[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn3_16_soc._clock_tmp[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn3_16_soc._clock_tmp[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ s[i].dram_bw_per_chan_gbps =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ s[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ s[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ s[i].phyclk_d18_mhz =
+ dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ s[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
}
if (clk_table->num_entries) {
dcn3_16_soc.num_states = clk_table->num_entries;
}
}
- memcpy(&dcn3_16_soc.clock_limits, &dcn3_16_soc._clock_tmp,
- sizeof(dcn3_16_soc.clock_limits));
+ memcpy(dcn3_16_soc.clock_limits, s, sizeof(dcn3_16_soc.clock_limits));
if (max_dispclk_mhz) {
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
+ if ((int)(dcn3_16_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index d63b4209b14c..8dfe639b6508 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1107,10 +1051,10 @@ static bool CalculatePrefetchSchedule(
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
/*rev 99*/
prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane);
- max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+ max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
+ prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -6933,8 +6711,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -6945,6 +6721,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -6956,6 +6734,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 34a5d0f87b5f..cf420ad2b8dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
if (bw_params->num_channels > 0)
dcn3_14_soc.num_chans = bw_params->num_channels;
@@ -261,8 +264,13 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
+ if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
else
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
}
@@ -315,6 +323,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index fc4d7474c111..0d12fd079cd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -61,7 +61,7 @@
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
-struct {
+typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1362,7 +1306,7 @@ static bool CalculatePrefetchSchedule(
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
#else
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#endif
#ifdef __DML_VBA_DEBUG__
@@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration(
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
- enum {
+ typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
- || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
- || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
@@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -7049,8 +6825,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -7061,6 +6835,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -7072,6 +6848,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
@@ -7157,12 +6935,13 @@ static double CalculateExtraLatencyBytes(
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
- else
+ } else {
HostVMDynamicLevels = 0;
+ }
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
- if (GPUVMEnable == true)
+ if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
@@ -7406,7 +7185,7 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = HTotal / PixelClock;
unsigned int vblank_actual = VTotal - VActive;
unsigned int vblank_nom_default_in_line = dml_floor(VBlankNomDefaultUS / line_time_us, 1.0);
- unsigned int vblank_nom_input = dml_min(VBlankNom, vblank_nom_default_in_line);
+ unsigned int vblank_nom_input = VBlankNom; //dml_min(VBlankNom, vblank_nom_default_in_line);
unsigned int vblank_avail = vblank_nom_input == 0 ? vblank_nom_default_in_line : vblank_nom_input;
vblank_size = (unsigned int) dml_min(vblank_actual, vblank_avail);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 8118cfc5b405..0571700f53f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -244,6 +244,50 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
}
/**
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ const int max_latency_table_entries = 4;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ int dummy_latency_index = 0;
+
+ dc_assert_fp_enabled();
+
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+ if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+ break;
+
+ dummy_latency_index++;
+ }
+
+ if (dummy_latency_index == max_latency_table_entries) {
+ ASSERT(dummy_latency_index != max_latency_table_entries);
+ /* If the execution gets here, it means dummy p_states are
+ * not possible. This should never happen and would mean
+ * something is severely wrong.
+ * Here we reset dummy_latency_index to 3, because it is
+ * better to have underflows than system crashes.
+ */
+ dummy_latency_index = max_latency_table_entries - 1;
+ }
+
+ return dummy_latency_index;
+}
+
+/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
*
@@ -286,41 +330,92 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
}
-bool dcn32_predict_pipe_split(struct dc_state *context, display_pipe_params_st pipe, int index)
+/**
+ * *******************************************************************************************
+ * dcn32_predict_pipe_split: Predict if pipe split will occur for a given DML pipe
+ *
+ * This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
+ * ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
+ * determined by DPPClk requirements
+ *
+ * This function follows the same policy as DML:
+ * - Check for ODM combine requirements / policy first
+ * - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
+ * MPC is required
+ *
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipe_e2e: DML pipe end to end context
+ *
+ * @return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
+ *
+ * *******************************************************************************************
+ */
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e)
{
double pscl_throughput;
double pscl_throughput_chroma;
double dpp_clk_single_dpp, clock;
double clk_frequency = 0.0;
double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz;
+ bool total_available_pipes_support = false;
+ uint32_t number_of_dpp = 0;
+ enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled;
+ double req_dispclk_per_surface = 0;
+ uint8_t num_splits = 0;
dc_assert_fp_enabled();
- dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe.scale_ratio_depth.hscl_ratio,
- pipe.scale_ratio_depth.hscl_ratio_c,
- pipe.scale_ratio_depth.vscl_ratio,
- pipe.scale_ratio_depth.vscl_ratio_c,
- context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
- context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
- pipe.dest.pixel_rate_mhz,
- pipe.src.source_format,
- pipe.scale_taps.htaps,
- pipe.scale_taps.htaps_c,
- pipe.scale_taps.vtaps,
- pipe.scale_taps.vtaps_c,
- /* Output */
- &pscl_throughput, &pscl_throughput_chroma,
- &dpp_clk_single_dpp);
+ dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit,
+ pipe_e2e->pipe.dest.hactive,
+ pipe_e2e->dout.output_format,
+ pipe_e2e->dout.output_type,
+ pipe_e2e->pipe.dest.odm_combine_policy,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ pipe_e2e->dout.dsc_enable != 0,
+ 0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */
+ context->bw_ctx.dml.ip.max_num_dpp,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ context->bw_ctx.dml.soc.dcn_downspread_percent,
+ context->bw_ctx.dml.ip.dispclk_ramp_margin_percent,
+ context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz,
+ pipe_e2e->dout.dsc_slices,
+ /* Output */
+ &total_available_pipes_support,
+ &number_of_dpp,
+ &odm_mode,
+ &req_dispclk_per_surface);
+
+ dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c,
+ context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
+ context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ pipe_e2e->pipe.src.source_format,
+ pipe_e2e->pipe.scale_taps.htaps,
+ pipe_e2e->pipe.scale_taps.htaps_c,
+ pipe_e2e->pipe.scale_taps.vtaps,
+ pipe_e2e->pipe.scale_taps.vtaps_c,
+ /* Output */
+ &pscl_throughput, &pscl_throughput_chroma,
+ &dpp_clk_single_dpp);
clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100);
if (clock > 0)
- clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0));
+ clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock);
- if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[index].dppclk_mhz)
- return true;
- else
- return false;
+ if (odm_mode == dm_odm_combine_mode_2to1)
+ num_splits = 1;
+ else if (odm_mode == dm_odm_combine_mode_4to1)
+ num_splits = 3;
+ else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz)
+ num_splits = 1;
+
+ return num_splits;
}
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
@@ -560,6 +655,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
bool valid_assignment_found = false;
unsigned int free_pipes = dcn32_get_num_free_pipes(dc, context);
bool current_assignment_freesync = false;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -573,8 +669,16 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ /* SubVP pipe candidate requirements:
+ * - Refresh rate < 120hz
+ * - Not able to switch in vactive naturally (switching in active means the
+ * DET provides enough buffer to hide the P-State switch latency -- trying
+ * to combine this with SubVP can cause issues with the scheduling).
+ * - Not TMZ surface
+ */
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120) {
+ pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+ vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@@ -998,8 +1102,10 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
/* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
/* Conditions for setting up phantom pipes for SubVP:
* 1. Not force disable SubVP
@@ -1014,6 +1120,17 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
+ /* to re-initialize viewport after the pipe merge */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state || !pipe_ctx->stream)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
@@ -1082,17 +1199,31 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->res_pool->funcs->remove_phantom_pipes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
- } else {
- // only call dcn20_validate_apply_pipe_split_flags if we found a supported config
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
- *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
+ }
+ } else {
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
DC_FP_START();
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
DC_FP_END();
+ /* Call validate_apply_pipe_split flags after calling DML getters for
+ * phantom dlg params, or some of the VBA params indicating pipe split
+ * can be overwritten by the getters.
+ *
+ * When setting up SubVP config, all pipes are merged before attempting to
+ * add phantom pipes. If pipe split (ODM / MPC) is required, both the main
+ * and phantom pipes will be split in the regular pipe splitting sequence.
+ */
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
// until driver has acquired the DMCUB lock to do it safely.
}
@@ -1416,6 +1547,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
memset(split, 0, sizeof(split));
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML
+ vba->VoltageLevel = vlevel;
}
}
@@ -1458,6 +1591,28 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+ /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
+ if (pipe->bottom_pipe) {
+ if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
+ /*MPC split rules will handle this case*/
+ pipe->bottom_pipe->top_pipe = NULL;
+ } else {
+ if (pipe->prev_odm_pipe->bottom_pipe) {
+ /* 3 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
+ pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
+ } else {
+ /* 2 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
+ pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
+ }
+ }
+ }
+
+ if (pipe->top_pipe) {
+ pipe->top_pipe->bottom_pipe = NULL;
+ }
+
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
@@ -1590,8 +1745,20 @@ bool dcn32_internal_validate_bw(struct dc *dc,
goto validate_fail;
}
- if (repopulate_pipes)
+ if (repopulate_pipes) {
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
+ * we have to re-calculate the DET allocation and run through DML once more to
+ * ensure all the params are calculated correctly. We do not need to run the
+ * pipe split check again after this call (pipes are already split / merged).
+ * */
+ if (!fast_validate) {
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ }
+ }
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1637,7 +1804,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
@@ -1776,7 +1943,11 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ /* On DCN32/321, PMFW will set PSTATE_CHANGE_TYPE = 1 (FCLK) for UCLK dummy p-state.
+ * In this case we must program FCLK WM Set C to use the UCLK dummy p-state WM
+ * value.
+ */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
@@ -1873,6 +2044,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
@@ -2107,6 +2317,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
@@ -2136,13 +2347,16 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_2_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_2_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_2_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_2_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_2_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 3ed06ab855be..3a3dc2ce4c73 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -29,11 +29,6 @@
#include "clk_mgr_internal.h"
-#define DCN3_2_DEFAULT_DET_SIZE 256
-#define DCN3_2_MAX_DET_SIZE 1152
-#define DCN3_2_MIN_DET_SIZE 128
-#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
-
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr);
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
@@ -41,9 +36,8 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn32_predict_pipe_split(struct dc_state *context,
- display_pipe_params_st pipe,
- int index);
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e);
void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
@@ -71,4 +65,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index cb2025771646..75be1e1ce543 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -677,9 +677,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
dml_ceil((double) v->WritebackDelay[mode_lib->vba.VoltageLevel][k]
/ (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1));
- // Clamp to max OTG vstartup register limit
- if (v->MaxVStartupLines[k] > 1023)
- v->MaxVStartupLines[k] = 1023;
+ // Clamp to max OTG vstartup register limit
+ if (v->MaxVStartupLines[k] > 1023)
+ v->MaxVStartupLines[k] = 1023;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d MaxVStartupLines = %d\n", __func__, k, v->MaxVStartupLines[k]);
@@ -755,30 +755,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
- &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
- mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
+ v,
+ k,
+ v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+ v->DSCDelay[k],
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
- mode_lib->vba.TCalc,
+ v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
@@ -792,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
TWait,
/* Output */
&v->DSTXAfterScaler[k],
@@ -1163,58 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLK,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- v->dpte_group_bytes,
- v->meta_row_height,
- v->meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+ v->DCFCLK,
+ v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLK,
+ v->SOCCLK,
v->DCFCLKDeepSleep,
- mode_lib->vba.DETBufferSizeY,
- mode_lib->vba.DETBufferSizeC,
- mode_lib->vba.SwathHeightY,
- mode_lib->vba.SwathHeightC,
- mode_lib->vba.LBBitPerPixel,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.DPPPerPlane,
+ v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
- &v->Watermark,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
@@ -1806,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
- &mode_lib->vba.MicroTileHeightY[k],
- &mode_lib->vba.MicroTileHeightC[k],
- &mode_lib->vba.MicroTileWidthY[k],
- &mode_lib->vba.MicroTileWidthC[k]);
+ &mode_lib->vba.MacroTileHeightY[k],
+ &mode_lib->vba.MacroTileHeightC[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
@@ -2034,6 +1992,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2045,6 +2004,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportNoDSC,
@@ -2055,6 +2015,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2066,6 +2027,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading,
mode_lib->vba.DISPCLKRampingMargin,
mode_lib->vba.DISPCLKDPPCLKVCOSpeed,
+ mode_lib->vba.NumberOfDSCSlices[k],
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalAvailablePipesSupportDSC,
@@ -2659,10 +2621,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
- mode_lib->vba.MicroTileWidthY,
- mode_lib->vba.MicroTileWidthC,
- mode_lib->vba.MicroTileHeightY,
- mode_lib->vba.MicroTileHeightC,
+ mode_lib->vba.MacroTileWidthY,
+ mode_lib->vba.MacroTileWidthC,
+ mode_lib->vba.MacroTileHeightY,
+ mode_lib->vba.MacroTileHeightC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
@@ -2709,10 +2671,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -3258,63 +3220,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
+ v,
+ k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal +
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYThisState[k] /
- mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup,
- mode_lib->vba.MaximumVStartup[i][j][k]),
- mode_lib->vba.MaximumVStartup[i][j][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.UrgLatency[i],
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
- mode_lib->vba.MetaRowBytes[i][j][k],
- mode_lib->vba.DPTEBytesPerRow[i][j][k],
- mode_lib->vba.PrefetchLinesY[i][j][k],
- mode_lib->vba.SwathWidthYThisState[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[i][j][k],
- mode_lib->vba.SwathWidthCThisState[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.swath_width_luma_ub_this_state[k],
- mode_lib->vba.swath_width_chroma_ub_this_state[k],
- mode_lib->vba.SwathHeightYThisState[k],
- mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+ v->DSCDelayPerState[i][k],
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][j][k],
- &mode_lib->vba.VRatioPreC[i][j][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
- &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.prefetch_vmrow_bw[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+ &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
@@ -3557,65 +3503,35 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[i][j],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLKState[i][j],
- mode_lib->vba.ReturnBWPerState[i][j],
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- mode_lib->vba.dpte_group_bytes,
- mode_lib->vba.meta_row_height,
- mode_lib->vba.meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[i][j],
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLKPerState[i],
- mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
- mode_lib->vba.DETBufferSizeYThisState,
- mode_lib->vba.DETBufferSizeCThisState,
- mode_lib->vba.SwathHeightYThisState,
- mode_lib->vba.SwathHeightCThisState,
- mode_lib->vba.LBBitPerPixel,
- mode_lib->vba.SwathWidthYThisState, // 24
- mode_lib->vba.SwathWidthCThisState,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.NoOfDPPThisState,
- mode_lib->vba.BytePerPixelInDETY,
- mode_lib->vba.BytePerPixelInDETC,
+ v->SOCCLKPerState[i],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->SwathWidthYThisState, // 24
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
- mode_lib->vba.UnboundedRequestEnabledThisState,
- mode_lib->vba.CompressedBufferSizeInkByteThisState,
+ v->UnboundedRequestEnabledThisState,
+ v->CompressedBufferSizeInkByteThisState,
/* Output */
- &mode_lib->vba.Watermark, // Store the values in vba
- &mode_lib->vba.DRAMClockChangeSupport[i][j],
+ &v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
- &mode_lib->vba.FCLKChangeSupport[i][j],
+ &v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
- mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMarginPerState[i][j]);
}
}
} // End of Prefetch Check
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 05fc14a47fba..ad66e241f9ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -27,6 +27,8 @@
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
@@ -719,8 +721,8 @@ void dml32_CalculateSwathWidth(
unsigned int surface_width_ub_l;
unsigned int surface_height_ub_l;
- unsigned int surface_width_ub_c;
- unsigned int surface_height_ub_c;
+ unsigned int surface_width_ub_c = 0;
+ unsigned int surface_height_ub_c = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -784,21 +786,6 @@ void dml32_CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
- dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
- dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
- dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
- dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
-#endif
if (!IsVertical(SourceRotation[k])) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -818,6 +805,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockWidthY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c,
dml_floor(ViewportXStartC[k] + SwathWidthC[k] +
@@ -848,6 +836,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c,
dml_floor(ViewportYStartC[k] + SwathWidthC[k] +
@@ -866,6 +855,16 @@ void dml32_CalculateSwathWidth(
}
#ifdef __DML_VBA_DEBUG__
+ dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
+ dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
+ dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
+ dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
+ dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
+ dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d swath_width_luma_ub=%0d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%d swath_width_chroma_ub=%0d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%d MaximumSwathHeightY=%0d\n", __func__, k, MaximumSwathHeightY[k]);
@@ -1182,6 +1181,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -1193,6 +1193,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -1228,7 +1229,8 @@ void dml32_CalculateODMMode(
if (!(Output == dm_hdmi || Output == dm_dp || Output == dm_edp) && (ODMUse == dm_odm_combine_policy_4to1 ||
((SurfaceRequiredDISPCLKWithODMCombineTwoToOne > StateDispclk ||
- (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > 2 * MaximumPixelsPerLinePerDSCUnit))
+ || NumberOfDSCSlices > 8)))) {
if (TotalNumberOfActiveDPP + 4 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_4to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
@@ -1239,7 +1241,8 @@ void dml32_CalculateODMMode(
} else if (Output != dm_hdmi && (ODMUse == dm_odm_combine_policy_2to1 ||
(((SurfaceRequiredDISPCLKWithoutODMCombine > StateDispclk &&
SurfaceRequiredDISPCLKWithODMCombineTwoToOne <= StateDispclk) ||
- (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit)))))) {
+ (DSCEnable && (HActive > MaximumPixelsPerLinePerDSCUnit))
+ || (NumberOfDSCSlices <= 8 && NumberOfDSCSlices > 4))))) {
if (TotalNumberOfActiveDPP + 2 <= MaxNumDPP) {
*ODMMode = dm_odm_combine_mode_2to1;
*RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
@@ -1253,6 +1256,29 @@ void dml32_CalculateODMMode(
else
*TotalAvailablePipesSupport = false;
}
+ if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+ ODMUse != dm_odm_combine_policy_4to1) {
+ if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+ *ODMMode == dm_odm_combine_mode_4to1) {
+ *ODMMode = dm_odm_combine_mode_4to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+ *NumberOfDPP = 4;
+ } else {
+ *ODMMode = dm_odm_combine_mode_2to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+ *NumberOfDPP = 2;
+ }
+ }
+ if (Output == dm_hdmi && OutFormat == dm_420 &&
+ HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ }
}
double dml32_CalculateRequiredDispclk(
@@ -1870,7 +1896,7 @@ void dml32_CalculateSurfaceSizeInMall(
if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
}
- *ExceededMALLSize = (TotalSurfaceSizeInMALL <= MALLAllocatedForDCN * 1024 * 1024 ? false : true);
+ *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
@@ -3363,28 +3389,14 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -3425,6 +3437,7 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
@@ -3461,27 +3474,27 @@ bool dml32_CalculatePrefetchSchedule(
double Tsw_est1 = 0;
double Tsw_est3 = 0;
- if (GPUVMEnable == true && HostVMEnable == true)
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true)
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
- dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+ dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+ dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
- dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
- __func__, HostVMEnable, HostVMInefficiencyFactor);
+ dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+ __func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
@@ -3496,19 +3509,19 @@ bool dml32_CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true)
+ if (v->DynamicMetadataVMEnabled == true)
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
- if (DynamicMetadataEnable == false)
+ if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
- if (DynamicMetadataEnable == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
@@ -3528,17 +3541,17 @@ bool dml32_CalculatePrefetchSchedule(
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+ v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
@@ -3564,7 +3577,7 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
@@ -3581,13 +3594,13 @@ bool dml32_CalculatePrefetchSchedule(
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
- if (GPUVMPageTableLevels >= 3) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
- } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
+ (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+ } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
@@ -3622,7 +3635,7 @@ bool dml32_CalculatePrefetchSchedule(
min_Lsw = dml_max(min_Lsw, 1.0);
Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(
Tvm_trips,
*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
@@ -3630,7 +3643,7 @@ bool dml32_CalculatePrefetchSchedule(
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4(
Tr0_trips,
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
@@ -3833,7 +3846,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor / prefetch_bw_equ,
Tvm_trips, LineTime / 4);
@@ -3841,7 +3854,7 @@ bool dml32_CalculatePrefetchSchedule(
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
(LineTime - Tvm_equ) / 2, LineTime / 4);
@@ -4206,58 +4219,28 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
@@ -4277,7 +4260,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
double ActiveClockChangeLatencyHidingY;
double ActiveClockChangeLatencyHidingC;
double ActiveClockChangeLatencyHiding;
- double EffectiveDETBufferSizeY;
+ double EffectiveDETBufferSizeY;
double ActiveFCLKChangeLatencyMargin[DC__NUM_DPP__MAX];
double USRRetrainingLatencyMargin[DC__NUM_DPP__MAX];
double TotalPixelBW = 0.0;
@@ -4299,136 +4282,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
- Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
- Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+ v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
- Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
- Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
- Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
- dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
- dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
- dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
- dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
- dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+ dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+ dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+ dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+ dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
- __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+ __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (WritebackEnable[k] == true)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->WritebackEnable[k] == true)
TotalActiveWriteback = TotalActiveWriteback + 1;
}
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
- + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
- __func__, Watermark->WritebackDRAMClockChangeWatermark);
- dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
- dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
- dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+ __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+ dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+ dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+ dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
- SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+ SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
- LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
- dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize);
- dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]);
- dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]);
- dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
+ dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+ dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
+ dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
+ dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
+ dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
- EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
- * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+ / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
- / PixelClock[k] / VRatio[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
- / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+ / v->VRatioChroma[k];
ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
- / PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
- / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatioChroma[k];
}
ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
ActiveClockChangeLatencyHidingC);
@@ -4436,24 +4419,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->DRAMClockChangeWatermark;
- ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->FCLKChangeWatermark;
- USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
-
- if (WritebackEnable[k]) {
- WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
- / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64)
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.DRAMClockChangeWatermark;
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.FCLKChangeWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
+
+ if (v->WritebackEnable[k]) {
+ WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64)
WritebackLatencyHiding = WritebackLatencyHiding / 2;
WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackDRAMClockChangeWatermark;
+ - v->Watermark.WritebackDRAMClockChangeWatermark;
WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackFCLKChangeWatermark;
+ - v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
WritebackFCLKChangeLatencyMargin);
@@ -4461,22 +4444,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
- (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+ (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
- for (i = 0; i < NumberOfActiveSurfaces; ++i) {
- for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+ for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+ for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
- (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
- (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
- (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
- (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
- HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
- VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
- (DRRDisplay[i] || DRRDisplay[j]))) {
+ (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+ (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+ (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+ (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+ v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+ v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+ (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
SynchronizedSurfaces[i][j] = true;
} else {
SynchronizedSurfaces[i][j] = false;
@@ -4484,8 +4467,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
@@ -4497,9 +4480,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
SameTimingForFCLKChange = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(SameTimingForFCLKChange ||
ActiveFCLKChangeLatencyMargin[k] <
SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
@@ -4519,17 +4502,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
*USRRetrainingSupport = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
DRAMClockChangeSupportNumber = 2;
@@ -4543,10 +4526,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
DRAMClockChangeMethod = 1;
- else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+ else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
DRAMClockChangeMethod = 2;
}
@@ -4573,16 +4556,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
- dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
- src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
+ dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+ src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
- sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+ sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
@@ -4593,21 +4576,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBL
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
- src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
+ src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
- sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+ sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
@@ -4660,10 +4643,6 @@ void dml32_CalculateMinAndMaxPrefetchMode(
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
- } else if (AllowForPStateChangeOrStutterInVBlankFinal ==
- dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
- *MinPrefetchMode = 0;
- *MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index d293856ba906..55cead0d4237 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,6 +30,7 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
+#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -215,6 +216,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -226,6 +228,7 @@ void dml32_CalculateODMMode(
double DISPCLKDPPCLKDSCCLKDownSpreading,
double DISPCLKRampingMargin,
double DISPCLKDPPCLKVCOSpeed,
+ unsigned int NumberOfDSCSlices,
/* Output */
bool *TotalAvailablePipesSupport,
@@ -712,28 +715,14 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -807,58 +796,28 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index c87091683b5d..dd90f241e906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -489,6 +489,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
@@ -518,13 +519,16 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_21_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+ dcn3_21_soc.dram_clock_change_latency_us =
+ bb_info.dram_clock_change_latency_100ns * 10;
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_21_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_enter_plus_exit_time_us =
+ bb_info.dram_sr_enter_exit_latency_100ns * 10;
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_21_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_21_soc.sr_exit_time_us =
+ bb_info.dram_sr_exit_latency_100ns * 10;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 5d27ff0ebb5f..f5400eda07a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,6 +35,8 @@
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
+const struct dml_funcs dml314_funcs = {
+ .validate = dml314_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml314_recalculate,
+ .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN31_FPGA:
lib->funcs = dml31_funcs;
break;
+ case DML_PROJECT_DCN314:
+ lib->funcs = dml314_funcs;
+ break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 2bdd6ed22611..b1878a1440e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,6 +41,7 @@ enum dml_project {
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
DML_PROJECT_DCN31_FPGA,
+ DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index e8b094006d95..f33a8879b05a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -26,6 +26,16 @@
#include "dc_features.h"
#include "display_mode_enums.h"
+/**
+ * DOC: overview
+ *
+ * Most of the DML code is automatically generated and tested via hardware
+ * description language. Usually, we use the reference _vcs_dpi in the code
+ * where VCS means "Verilog Compiled Simulator" and DPI stands for "Direct
+ * Programmer Interface". In other words, those structs can be used to
+ * interface with Verilog with other languages such as C.
+ */
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
@@ -159,13 +169,20 @@ struct _vcs_dpi_voltage_scaling_st {
double dtbclk_mhz;
};
+/**
+ * _vcs_dpi_soc_bounding_box_st: SOC definitions
+ *
+ * This struct maintains the SOC Bounding Box information for the ASIC; it
+ * defines things such as clock, voltage, performance, etc. Usually, we load
+ * these values from VBIOS; if something goes wrong, we use some hard-coded
+ * values, which will enable the ASIC to light up with limitations.
+ */
struct _vcs_dpi_soc_bounding_box_st {
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- /*
- * This is a temporary stash for updating @clock_limits with the PMFW
- * clock table. Do not use outside of *update_bw_boudning_box functions.
+ /**
+ * @num_states: It represents the total of Display Power Management
+ * (DPM) supported by the specific ASIC.
*/
- struct _vcs_dpi_voltage_scaling_st _clock_tmp[DC__VOLTAGE_STATES];
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
@@ -231,6 +248,14 @@ struct _vcs_dpi_soc_bounding_box_st {
enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank;
};
+/**
+ * @_vcs_dpi_ip_params_st: IP configuraion for DCN blocks
+ *
+ * In this struct you can find the DCN configuration associated to the specific
+ * ASIC. For example, here we can save how many DPPs the ASIC is using and it
+ * is available.
+ *
+ */
struct _vcs_dpi_ip_params_st {
bool use_min_dcfclk;
bool clamp_min_dcfclk;
@@ -283,6 +308,9 @@ struct _vcs_dpi_ip_params_st {
unsigned int writeback_line_buffer_chroma_buffer_size;
unsigned int max_page_table_levels;
+ /**
+ * @max_num_dpp: Maximum number of DPP supported in the target ASIC.
+ */
unsigned int max_num_dpp;
unsigned int max_num_otg;
unsigned int cursor_chunk_size;
@@ -482,6 +510,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int htotal;
unsigned int vtotal;
unsigned int vfront_porch;
+ unsigned int vblank_nom;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 503e7d984ff0..03924aed8d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -597,6 +597,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
+ mode_lib->vba.VBlankNom[mode_lib->vba.NumberOfActivePlanes] = dst->vblank_nom;
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 492aec634b68..630f3395e90a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -312,6 +312,7 @@ struct vba_vars_st {
unsigned int ActiveDPPs;
unsigned int LBLatencyHidingSourceLinesY;
unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMarginPerState[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];// DML doesn't save active margin per state
double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only
double MinActiveDRAMClockChangeMargin;
@@ -651,10 +652,10 @@ struct vba_vars_st {
unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
bool ImmediateFlipRequiredFinal;
bool DCCProgrammingAssumesScanDirectionUnknownFinal;
bool EnoughWritebackUnits;
@@ -800,8 +801,6 @@ struct vba_vars_st {
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
double AlignedYPitch[DC__NUM_DPP__MAX];
double AlignedCPitch[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 479d7d83220c..072bd0539605 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -76,14 +76,9 @@ static inline double dml_floor(double a, double granularity)
static inline double dml_round(double a)
{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
+ const double round_pt = 0.5;
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
+ return dml_floor(a + round_pt, 1);
}
/* float
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
deleted file mode 100644
index b4b51e51fc25..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
+++ /dev/null
@@ -1,1884 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "resource.h"
-#include "core_types.h"
-#include "dsc.h"
-#include "clk_mgr.h"
-
-#ifndef DC_LOGGER_INIT
-#define DC_LOGGER_INIT
-#undef DC_LOG_WARNING
-#define DC_LOG_WARNING
-#endif
-
-#define DML_WRAPPER_TRANSLATION_
-#include "dml_wrapper_translation.c"
-#undef DML_WRAPPER_TRANSLATION_
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
-static void build_clamping_params(struct dc_stream_state *stream)
-{
- stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
- stream->clamping.c_depth = stream->timing.display_color_depth;
- stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
-}
-
-static void get_pixel_clock_parameters(
- const struct pipe_ctx *pipe_ctx,
- struct pixel_clk_params *pixel_clk_params)
-{
- const struct dc_stream_state *stream = pipe_ctx->stream;
-
- /*TODO: is this halved for YCbCr 420? in that case we might want to move
- * the pixel clock normalization for hdmi up to here instead of doing it
- * in pll_adjust_pix_clk
- */
- pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
- pixel_clk_params->signal_type = pipe_ctx->stream->signal;
- pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
- /* TODO: un-hardcode*/
- pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
- LINK_RATE_REF_FREQ_IN_KHZ;
- pixel_clk_params->flags.ENABLE_SS = 0;
- pixel_clk_params->color_depth =
- stream->timing.display_color_depth;
- pixel_clk_params->flags.DISPLAY_BLANKED = 1;
- pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
- PIXEL_ENCODING_YCBCR420);
- pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- pixel_clk_params->color_depth = COLOR_DEPTH_888;
- }
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
- }
- if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- pixel_clk_params->requested_pix_clk_100hz *= 2;
-
-}
-
-static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
- struct bit_depth_reduction_params *fmt_bit_depth)
-{
- enum dc_dither_option option = stream->dither_option;
- enum dc_pixel_encoding pixel_encoding =
- stream->timing.pixel_encoding;
-
- memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
-
- if (option == DITHER_OPTION_DEFAULT) {
- switch (stream->timing.display_color_depth) {
- case COLOR_DEPTH_666:
- option = DITHER_OPTION_SPATIAL6;
- break;
- case COLOR_DEPTH_888:
- option = DITHER_OPTION_SPATIAL8;
- break;
- case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
- break;
- default:
- option = DITHER_OPTION_DISABLE;
- }
- }
-
- if (option == DITHER_OPTION_DISABLE)
- return;
-
- if (option == DITHER_OPTION_TRUN6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
- } else if (option == DITHER_OPTION_TRUN8 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
- } else if (option == DITHER_OPTION_TRUN10 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- }
-
- /* special case - Formatter can only reduce by 4 bits at most.
- * When reducing from 12 to 6 bits,
- * HW recommends we use trunc with round mode
- * (if we did nothing, trunc to 10 bits would be used)
- * note that any 12->10 bit reduction is ignored prior to DCE8,
- * as the input was 10 bits.
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_FM6) {
- fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
- fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
- fmt_bit_depth->flags.TRUNCATE_MODE = 1;
- }
-
- /* spatial dither
- * note that spatial modes 1-3 are never used
- */
- if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL6 ||
- option == DITHER_OPTION_TRUN8_SPATIAL6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
- option == DITHER_OPTION_SPATIAL10 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM6) {
- fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
- fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
- fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
- fmt_bit_depth->flags.RGB_RANDOM =
- (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
- }
-
- if (option == DITHER_OPTION_SPATIAL6 ||
- option == DITHER_OPTION_SPATIAL8 ||
- option == DITHER_OPTION_SPATIAL10) {
- fmt_bit_depth->flags.FRAME_RANDOM = 0;
- } else {
- fmt_bit_depth->flags.FRAME_RANDOM = 1;
- }
-
- //////////////////////
- //// temporal dither
- //////////////////////
- if (option == DITHER_OPTION_FM6 ||
- option == DITHER_OPTION_SPATIAL8_FM6 ||
- option == DITHER_OPTION_SPATIAL10_FM6 ||
- option == DITHER_OPTION_TRUN10_FM6 ||
- option == DITHER_OPTION_TRUN8_FM6 ||
- option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
- } else if (option == DITHER_OPTION_FM8 ||
- option == DITHER_OPTION_SPATIAL10_FM8 ||
- option == DITHER_OPTION_TRUN10_FM8) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
- } else if (option == DITHER_OPTION_FM10) {
- fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
- fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
- }
-
- fmt_bit_depth->pixel_encoding = pixel_encoding;
-}
-
-/* Move this after the above function as VS complains about
- * declaration issues for resource_build_bit_depth_reduction_params.
- */
-
-static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
-{
-
- get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
-
- if (pipe_ctx->clock_source)
- pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings);
-
- pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &pipe_ctx->stream->bit_depth_params);
- build_clamping_params(pipe_ctx->stream);
-
- return DC_OK;
-}
-
-bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
-{
- int i;
-
- /* Validate DSC config, dsc count validation is already done */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dsc_config dsc_cfg;
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- /* Only need to validate top pipe */
- if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
- continue;
-
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
- + stream->timing.h_border_right) / opp_cnt;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
- + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
- if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
- return false;
- }
- return true;
-}
-
-enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
-{
- enum dc_status status = DC_OK;
- struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
-
- if (!pipe_ctx)
- return DC_ERROR_UNEXPECTED;
-
-
- status = build_pipe_hw_param(pipe_ctx);
-
- return status;
-}
-
-void dml_acquire_dsc(const struct dc *dc,
- struct resource_context *res_ctx,
- struct display_stream_compressor **dsc,
- int pipe_idx)
-{
- int i;
- const struct resource_pool *pool = dc->res_pool;
- struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
-
- ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
- *dsc = NULL;
-
- /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
- if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
- *dsc = pool->dscs[pipe_idx];
- res_ctx->is_dsc_acquired[pipe_idx] = true;
- return;
- }
-
- /* Return old DSC to avoid the need for redo it */
- if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
- *dsc = dsc_old;
- res_ctx->is_dsc_acquired[dsc_old->inst] = true;
- return ;
- }
-
- /* Find first free DSC */
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (!res_ctx->is_dsc_acquired[i]) {
- *dsc = pool->dscs[i];
- res_ctx->is_dsc_acquired[i] = true;
- break;
- }
-}
-
-static bool dml_split_stream_for_mpc_or_odm(
- const struct dc *dc,
- struct resource_context *res_ctx,
- struct pipe_ctx *pri_pipe,
- struct pipe_ctx *sec_pipe,
- bool odm)
-{
- int pipe_idx = sec_pipe->pipe_idx;
- const struct resource_pool *pool = dc->res_pool;
-
- *sec_pipe = *pri_pipe;
-
- sec_pipe->pipe_idx = pipe_idx;
- sec_pipe->plane_res.mi = pool->mis[pipe_idx];
- sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
- sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
- sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
- sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
- sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
- sec_pipe->stream_res.dsc = NULL;
- if (odm) {
- if (pri_pipe->next_odm_pipe) {
- ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
- sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
- sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
- }
- if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
- pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
- }
- if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
- pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
- }
- pri_pipe->next_odm_pipe = sec_pipe;
- sec_pipe->prev_odm_pipe = pri_pipe;
- ASSERT(sec_pipe->top_pipe == NULL);
-
- if (!sec_pipe->top_pipe)
- sec_pipe->stream_res.opp = pool->opps[pipe_idx];
- else
- sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
- if (sec_pipe->stream->timing.flags.DSC == 1) {
- dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
- ASSERT(sec_pipe->stream_res.dsc);
- if (sec_pipe->stream_res.dsc == NULL)
- return false;
- }
- } else {
- if (pri_pipe->bottom_pipe) {
- ASSERT(pri_pipe->bottom_pipe != sec_pipe);
- sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
- sec_pipe->bottom_pipe->top_pipe = sec_pipe;
- }
- pri_pipe->bottom_pipe = sec_pipe;
- sec_pipe->top_pipe = pri_pipe;
-
- ASSERT(pri_pipe->plane_state);
- }
-
- return true;
-}
-
-static struct pipe_ctx *dml_find_split_pipe(
- struct dc *dc,
- struct dc_state *context,
- int old_index)
-{
- struct pipe_ctx *pipe = NULL;
- int i;
-
- if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[old_index];
- pipe->pipe_idx = old_index;
- }
-
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
- && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
- }
-
- /*
- * May need to fix pipes getting tossed from 1 opp to another on flip
- * Add for debugging transient underflow during topology updates:
- * ASSERT(pipe);
- */
- if (!pipe)
- for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
- if (context->res_ctx.pipe_ctx[i].stream == NULL) {
- pipe = &context->res_ctx.pipe_ctx[i];
- pipe->pipe_idx = i;
- break;
- }
- }
-
- return pipe;
-}
-
-static void dml_release_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
- struct display_stream_compressor **dsc)
-{
- int i;
-
- for (i = 0; i < pool->res_cap->num_dsc; i++)
- if (pool->dscs[i] == *dsc) {
- res_ctx->is_dsc_acquired[i] = false;
- *dsc = NULL;
- break;
- }
-}
-
-static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
-{
- int mpc_split_count = 0;
- struct pipe_ctx *other_pipe = pipe->bottom_pipe;
-
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->bottom_pipe;
- }
- other_pipe = pipe->top_pipe;
- while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
- mpc_split_count++;
- other_pipe = other_pipe->top_pipe;
- }
-
- return mpc_split_count;
-}
-
-static bool dml_enough_pipes_for_subvp(struct dc *dc,
- struct dc_state *context)
-{
- int i = 0;
- int num_pipes = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream && pipe->plane_state)
- num_pipes++;
- }
-
- // Sub-VP only possible if the number of "real" pipes is
- // less than or equal to half the number of available pipes
- if (num_pipes * 2 > dc->res_pool->pipe_count)
- return false;
-
- return true;
-}
-
-static int dml_validate_apply_pipe_split_flags(
- struct dc *dc,
- struct dc_state *context,
- int vlevel,
- int *split,
- bool *merge)
-{
- int i, pipe_idx, vlevel_split;
- int plane_count = 0;
- bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- struct vba_vars_st *v = &context->bw_ctx.dml.vba;
- int max_mpc_comb = v->maxMpcComb;
-
- if (context->stream_count > 1) {
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
- avoid_split = true;
- } else if (dc->debug.force_single_disp_pipe_split)
- force_split = true;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /**
- * Workaround for avoiding pipe-split in cases where we'd split
- * planes that are too small, resulting in splits that aren't
- * valid for the scaler.
- */
- if (pipe->plane_state &&
- (pipe->plane_state->dst_rect.width <= 16 ||
- pipe->plane_state->dst_rect.height <= 16 ||
- pipe->plane_state->src_rect.width <= 16 ||
- pipe->plane_state->src_rect.height <= 16))
- avoid_split = true;
-
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
- }
- if (plane_count > dc->res_pool->pipe_count / 2)
- avoid_split = true;
-
- /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_crtc_timing timing;
-
- if (!pipe->stream)
- continue;
- else {
- timing = pipe->stream->timing;
- if (timing.h_border_left + timing.h_border_right
- + timing.v_border_top + timing.v_border_bottom > 0) {
- avoid_split = true;
- break;
- }
- }
- }
-
- /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
- if (avoid_split) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
- v->ModeSupport[vlevel][0])
- break;
- /* Impossible to not split this pipe */
- if (vlevel > context->bw_ctx.dml.soc.num_states)
- vlevel = vlevel_split;
- else
- max_mpc_comb = 0;
- pipe_idx++;
- }
- v->maxMpcComb = max_mpc_comb;
- }
-
- /* Split loop sets which pipe should be split based on dml outputs and dc flags */
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = v->pipe_plane[pipe_idx];
- bool split4mpc = context->stream_count == 1 && plane_count == 1
- && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
- split[i] = 4;
- else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
- split[i] = 2;
-
- if ((pipe->stream->view_format ==
- VIEW_3D_FORMAT_SIDE_BY_SIDE ||
- pipe->stream->view_format ==
- VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
- (pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
- pipe->stream->timing.timing_3d_format ==
- TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = 2;
- if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 2;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
- }
- if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
- split[i] = 4;
- v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
- }
- /*420 format workaround*/
- if (pipe->stream->timing.h_addressable > 7680 &&
- pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- split[i] = 4;
- }
-
- v->ODMCombineEnabled[pipe_plane] =
- v->ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for mpc but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 MPC */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 MPC */
- else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 2 -> 1 MPC */
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for mpc but 4 way split already*/
- if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
- || !pipe->bottom_pipe)) {
- merge[i] = true; /* 4 -> 2 MPC */
- } else if (split[i] == 0 && pipe->top_pipe &&
- pipe->top_pipe->plane_state == pipe->plane_state)
- merge[i] = true; /* 4 -> 1 MPC */
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* ODM -> MPC transition */
- if (pipe->prev_odm_pipe) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- } else {
- if (dml_get_num_mpc_splits(pipe) == 1) {
- /*If need split for odm but 2 way split already*/
- if (split[i] == 4)
- split[i] = 2; /* 2 -> 4 ODM */
- else if (split[i] == 2)
- split[i] = 0; /* 2 -> 2 ODM */
- else if (pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- } else if (dml_get_num_mpc_splits(pipe) == 3) {
- /*If need split for odm but 4 way split already*/
- if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
- || !pipe->next_odm_pipe)) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* 4 -> 2 ODM */
- } else if (split[i] == 0 && pipe->prev_odm_pipe) {
- ASSERT(0); /* NOT expected yet */
- merge[i] = true; /* exit ODM */
- }
- split[i] = 0;
- } else if (dml_get_num_mpc_splits(pipe)) {
- /* MPC -> ODM transition */
- ASSERT(0); /* NOT expected yet */
- if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- split[i] = 0;
- merge[i] = true;
- }
- }
- }
-
- /* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
- v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
- pipe_idx++;
- }
-
- return vlevel;
-}
-
-static void dml_set_phantom_stream_timing(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe,
- struct dc_stream_state *phantom_stream)
-{
- // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
- uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
- dc->caps.subvp_fw_processing_delay_us +
- dc->caps.subvp_pstate_allow_width_us;
- uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
- (ref_pipe->stream->timing.pix_clk_100hz * 100) /
- (double)ref_pipe->stream->timing.h_total;
- uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
-
- phantom_stream->dst.y = 0;
- phantom_stream->dst.height = phantom_vactive;
- phantom_stream->src.y = 0;
- phantom_stream->src.height = phantom_vactive;
-
- phantom_stream->timing.v_addressable = phantom_vactive;
- phantom_stream->timing.v_front_porch = 1;
- phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
- phantom_stream->timing.v_front_porch +
- phantom_stream->timing.v_sync_width +
- phantom_bp;
-}
-
-static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *ref_pipe)
-{
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
-
- /* stream has limited viewport and small timing */
- memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
- memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
- memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
- dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
-
- dc_add_stream_to_ctx(dc, context, phantom_stream);
- dc->hwss.apply_ctx_to_hw(dc, context);
- return phantom_stream;
-}
-
-static void dml_enable_phantom_plane(struct dc *dc,
- struct dc_state *context,
- struct dc_stream_state *phantom_stream,
- struct pipe_ctx *main_pipe)
-{
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_plane_state *prev_phantom_plane = NULL;
- struct pipe_ctx *curr_pipe = main_pipe;
-
- while (curr_pipe) {
- if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
- phantom_plane = prev_phantom_plane;
- else
- phantom_plane = dc_create_plane_state(dc);
-
- memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
- memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
- sizeof(phantom_plane->scaling_quality));
- memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
- memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
- memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
- memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
- sizeof(phantom_plane->plane_size));
- memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
- sizeof(phantom_plane->tiling_info));
- memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
- /* Currently compat_level is undefined in dc_state
- * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
- */
- phantom_plane->format = curr_pipe->plane_state->format;
- phantom_plane->rotation = curr_pipe->plane_state->rotation;
- phantom_plane->visible = curr_pipe->plane_state->visible;
-
- /* Shadow pipe has small viewport. */
- phantom_plane->clip_rect.y = 0;
- phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
-
- curr_pipe = curr_pipe->bottom_pipe;
- prev_phantom_plane = phantom_plane;
- }
-}
-
-static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i = 0;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct dc_stream_state *ref_stream = pipe->stream;
- // Only construct phantom stream for top pipes that have plane enabled
- if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
- struct dc_stream_state *phantom_stream = NULL;
-
- phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
- dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state && pipe->stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- pipe->stream->use_dynamic_meta = false;
- pipe->plane_state->flip_immediate = false;
- if (!resource_build_scaling_params(pipe)) {
- // Log / remove phantom pipes since failed to build scaling params
- }
- }
- }
-}
-
-static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- bool removed_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
- removed_pipe = true;
- }
-
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
- }
- if (removed_pipe)
- dc->hwss.apply_ctx_to_hw(dc, context);
-}
-
-/*
- * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
- * we need to populate some "conservative" plane information as DML cannot handle "no planes"
- */
-static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
-{
- pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
- pipe->src.source_scan = dm_horz;
- pipe->src.sw_mode = dm_sw_4kb_s;
- pipe->src.macro_tile_size = dm_64k_tile;
- pipe->src.viewport_width = timing->h_addressable;
- if (pipe->src.viewport_width > 1920)
- pipe->src.viewport_width = 1920;
- pipe->src.viewport_height = timing->v_addressable;
- if (pipe->src.viewport_height > 1080)
- pipe->src.viewport_height = 1080;
- pipe->src.surface_height_y = pipe->src.viewport_height;
- pipe->src.surface_width_y = pipe->src.viewport_width;
- pipe->src.surface_height_c = pipe->src.viewport_height;
- pipe->src.surface_width_c = pipe->src.viewport_width;
- pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
- pipe->src.source_format = dm_444_32;
- pipe->dest.recout_width = pipe->src.viewport_width;
- pipe->dest.recout_height = pipe->src.viewport_height;
- pipe->dest.full_recout_width = pipe->dest.recout_width;
- pipe->dest.full_recout_height = pipe->dest.recout_height;
- pipe->scale_ratio_depth.lb_depth = dm_lb_16;
- pipe->scale_ratio_depth.hscl_ratio = 1.0;
- pipe->scale_ratio_depth.vscl_ratio = 1.0;
- pipe->scale_ratio_depth.scl_enable = 0;
- pipe->scale_taps.htaps = 1;
- pipe->scale_taps.vtaps = 1;
- pipe->dest.vtotal_min = timing->v_total;
- pipe->dest.vtotal_max = timing->v_total;
-
- if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
- pipe->src.viewport_width /= 2;
- pipe->dest.recout_width /= 2;
- } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
- pipe->src.viewport_width /= 4;
- pipe->dest.recout_width /= 4;
- }
-
- pipe->src.dcc = false;
- pipe->src.dcc_rate = 1;
-}
-
-/*
- * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
- * hsplit group is equal to its own pipe ID
- * Otherwise, all pipes part of the same blending tree have the same hsplit group
- * ID as the top most pipe
- *
- * If the pipe ctx is ODM combined, then similar logic follows
- */
-static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
-
- if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
- int split_idx = 0;
-
- while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
- == dc_pipe_ctx->plane_state) {
- first_pipe = first_pipe->top_pipe;
- split_idx++;
- }
-
- /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
- if (split_idx == 0)
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- else if (split_idx == 1)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- else if (split_idx == 2)
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
-
- } else if (dc_pipe_ctx->prev_odm_pipe) {
- struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
-
- while (first_pipe->prev_odm_pipe)
- first_pipe = first_pipe->prev_odm_pipe;
- e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
- }
-}
-
-static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
-{
- const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
- const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
-
- e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
- e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
- || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
- || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
-
- /* stereo is not split */
- if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
- pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
- e2e_pipe->pipe.src.is_hsplit = false;
- e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
- }
-
- e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
- || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
- e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
- e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
- e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
- e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
- e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
- e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
- e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
- e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
- e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
- e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
- e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
-
- if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
- || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
- } else {
- e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
- e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
- }
- e2e_pipe->pipe.src.dcc = pln->dcc.enable;
- e2e_pipe->pipe.src.dcc_rate = 1;
- e2e_pipe->pipe.dest.recout_width = scl->recout.width;
- e2e_pipe->pipe.dest.recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
- e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
- if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
- e2e_pipe->pipe.dest.full_recout_width *= 2;
- else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
- e2e_pipe->pipe.dest.full_recout_width *= 4;
- else {
- struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
-
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->bottom_pipe;
- }
- split_pipe = dc_pipe_ctx->top_pipe;
- while (split_pipe && split_pipe->plane_state == pln) {
- e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
- split_pipe = split_pipe->top_pipe;
- }
- }
-
- e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
- e2e_pipe->pipe.scale_ratio_depth.scl_enable =
- scl->ratios.vert.value != dc_fixpt_one.value
- || scl->ratios.horz.value != dc_fixpt_one.value
- || scl->ratios.vert_c.value != dc_fixpt_one.value
- || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
- || always_scale; /*support always scale*/
- e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
- e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
- e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
- e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
-
- /* Currently compat_level is not defined. Commenting it until further resolution
- * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
- swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size =
- swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
- } else {
- gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
- pln->compat_level,
- &e2e_pipe->pipe.src.sw_mode);
- e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
- }*/
-
- e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
-}
-
-static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
-{
- /*
- * For graphic plane, cursor number is 1, nv12 is 0
- * bw calculations due to cursor on/off
- */
- if (dc_pipe_ctx->plane_state &&
- (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
- e2e_pipe->pipe.src.num_cursors = 0;
- else
- e2e_pipe->pipe.src.num_cursors = 1;
-
- e2e_pipe->pipe.src.cur0_src_width = 256;
- e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
-}
-
-static int populate_dml_pipes_from_context_base(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int pipe_cnt, i;
- bool synchronized_vblank = true;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (pipe_cnt < 0) {
- pipe_cnt = i;
- continue;
- }
-
- if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
- continue;
-
- if (dc->debug.disable_timing_sync ||
- (!resource_are_streams_timing_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream) &&
- !resource_are_vblanks_synchronizable(
- res_ctx->pipe_ctx[pipe_cnt].stream,
- res_ctx->pipe_ctx[i].stream))) {
- synchronized_vblank = false;
- break;
- }
- }
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
-
- struct audio_check aud_check = {0};
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
-
- /* todo:
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
- pipes[pipe_cnt].pipe.src.dcc = 0;
- pipes[pipe_cnt].pipe.src.vm = 0;*/
-
- pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
-
- pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
- /* todo: rotation?*/
- pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
- if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
- pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
- /* 1/2 vblank */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
- (timing->v_total - timing->v_addressable
- - timing->v_border_top - timing->v_border_bottom) / 2;
- /* 36 bytes dp, 32 hdmi */
- pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
- dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
- }
- pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
-
- dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
- pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
- pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
-
- pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
-
- pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
-
- populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- pipes[pipe_cnt].dout.dp_lanes = 4;
- pipes[pipe_cnt].dout.is_virtual = 0;
- pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
- if (pipes[pipe_cnt].dout.output_type < 0) {
- pipes[pipe_cnt].dout.output_type = dm_dp;
- pipes[pipe_cnt].dout.is_virtual = 1;
- }
-
- populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
-
- if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
- pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
-
- /* todo: default max for now, until there is logic reflecting this in dc*/
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- /*fill up the audio sample rate (unit in kHz)*/
- get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
- pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
-
- populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
-
- if (!res_ctx->pipe_ctx[i].plane_state) {
- populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
- } else {
- populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
- }
-
- pipe_cnt++;
- }
-
- /* populate writeback information */
- if (dc->res_pool)
- dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
-
- return pipe_cnt;
-}
-
-static int dml_populate_dml_pipes_from_context(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- bool fast_validate)
-{
- int i, pipe_cnt;
- struct resource_context *res_ctx = &context->res_ctx;
- struct pipe_ctx *pipe = NULL; // Fix potentially uninitialized error from VS
-
- populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
-
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- struct dc_crtc_timing *timing;
-
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.src.gpuvm = true;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
- pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
- pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-
- pipes[pipe_cnt].dout.dsc_input_bpc = 0;
- if (pipes[pipe_cnt].dout.dsc_enable) {
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_888:
- pipes[pipe_cnt].dout.dsc_input_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- pipes[pipe_cnt].dout.dsc_input_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- pipes[pipe_cnt].dout.dsc_input_bpc = 12;
- break;
- default:
- ASSERT(0);
- break;
- }
- }
- pipe_cnt++;
- }
- dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (is_dual_plane(pipe->plane_state->format)
- && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
- dc->config.enable_4to1MPC = true;
- } else if (!is_dual_plane(pipe->plane_state->format)) {
- context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
- pipes[0].pipe.src.unbounded_req_mode = true;
- }
- }
-
- return pipe_cnt;
-}
-
-static void dml_full_validate_bw_helper(struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *vlevel,
- int *split,
- bool *merge,
- int *pipe_cnt)
-{
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- /*
- * DML favors voltage over p-state, but we're more interested in
- * supporting p-state over voltage. We can't support p-state in
- * prefetch mode > 0 so try capping the prefetch mode to start.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh_and_mclk_switch;
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- /* This may adjust vlevel and maxMpcComb */
- if (*vlevel < context->bw_ctx.dml.soc.num_states)
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
-
- /* Conditions for setting up phantom pipes for SubVP:
- * 1. Not force disable SubVP
- * 2. Full update (i.e. !fast_validate)
- * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
- * 4. Display configuration passes validation
- * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
- */
- if (!dc->debug.force_disable_subvp &&
- dml_enough_pipes_for_subvp(dc, context) &&
- *vlevel < context->bw_ctx.dml.soc.num_states &&
- (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
- dc->debug.force_subvp_mclk_switch)) {
-
- dml_add_phantom_pipes(dc, context);
-
- /* Create input to DML based on new context which includes phantom pipes
- * TODO: Input to DML should mark which pipes are phantom
- */
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
- if (*vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, MAX_PIPES * sizeof(*split));
- memset(merge, 0, MAX_PIPES * sizeof(*merge));
- *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
- }
-
- // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
- // remove phantom pipes and repopulate dml pipes
- if (*vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- dml_remove_phantom_pipes(dc, context);
- *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
- }
- }
-}
-
-static void dcn20_adjust_adaptive_sync_v_startup(
- const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
-{
- struct dc_crtc_timing patched_crtc_timing;
- uint32_t asic_blank_end = 0;
- uint32_t asic_blank_start = 0;
- uint32_t newVstartup = 0;
-
- patched_crtc_timing = *dc_crtc_timing;
-
- if (patched_crtc_timing.flags.INTERLACE == 1) {
- if (patched_crtc_timing.v_front_porch < 2)
- patched_crtc_timing.v_front_porch = 2;
- } else {
- if (patched_crtc_timing.v_front_porch < 1)
- patched_crtc_timing.v_front_porch = 1;
- }
-
- /* blank_start = frame end - front porch */
- asic_blank_start = patched_crtc_timing.v_total -
- patched_crtc_timing.v_front_porch;
-
- /* blank_end = blank_start - active */
- asic_blank_end = asic_blank_start -
- patched_crtc_timing.v_border_bottom -
- patched_crtc_timing.v_addressable -
- patched_crtc_timing.v_border_top;
-
- newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
-
- *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
-}
-
-static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
-{
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal));
-}
-
-static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
-{
- int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
- return true;
- }
- return false;
-}
-
-static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
-{
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
- context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us;
- }
-}
-
-static bool dml_internal_validate(
- struct dc *dc,
- struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int *pipe_cnt_out,
- int *vlevel_out,
- bool fast_validate)
-{
- bool out = false;
- bool repopulate_pipes = false;
- int split[MAX_PIPES] = { 0 };
- bool merge[MAX_PIPES] = { false };
- bool newly_split[MAX_PIPES] = { false };
- int pipe_cnt, i, pipe_idx, vlevel;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- ASSERT(pipes);
- if (!pipes)
- return false;
-
- // For each full update, remove all existing phantom pipes first
- dml_remove_phantom_pipes(dc, context);
-
- dml_update_soc_for_wm_a(dc, context);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- // On initial pass through DML, we intend to use MALL for SS on all
- // (non-PSR) surfaces with none using MALL for P-State
- // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
- //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
- // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
- }
- }
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
-
- if (!pipe_cnt) {
- out = true;
- goto validate_out;
- }
-
- dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
-
- if (!fast_validate) {
- dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
- }
-
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- /*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
- *
- * We don't actually support prefetch mode 2, so require that we
- * at least support prefetch mode 1.
- */
- context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
- dm_allow_self_refresh;
-
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (vlevel < context->bw_ctx.dml.soc.num_states) {
- memset(split, 0, sizeof(split));
- memset(merge, 0, sizeof(merge));
- vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
- }
- }
-
- dml_log_mode_support_params(&context->bw_ctx.dml);
-
- if (vlevel == context->bw_ctx.dml.soc.num_states)
- goto validate_fail;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
-
- if (!pipe->stream)
- continue;
-
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
- }
- pipe_idx++;
- }
-
- /* merge pipes if necessary */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /*skip pipes that don't need merging*/
- if (!merge[i])
- continue;
-
- /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
- if (pipe->prev_odm_pipe) {
- /*split off odm pipe*/
- pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
- if (pipe->next_odm_pipe)
- pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
-
- pipe->bottom_pipe = NULL;
- pipe->next_odm_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- pipe->top_pipe = NULL;
- pipe->prev_odm_pipe = NULL;
- if (pipe->stream_res.dsc)
- dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
- struct pipe_ctx *top_pipe = pipe->top_pipe;
- struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
-
- top_pipe->bottom_pipe = bottom_pipe;
- if (bottom_pipe)
- bottom_pipe->top_pipe = top_pipe;
-
- pipe->top_pipe = NULL;
- pipe->bottom_pipe = NULL;
- pipe->plane_state = NULL;
- pipe->stream = NULL;
- memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
- memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
- repopulate_pipes = true;
- } else
- ASSERT(0); /* Should never try to merge master pipe */
-
- }
-
- for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- struct pipe_ctx *hsplit_pipe = NULL;
- bool odm;
- int old_index = -1;
-
- if (!pipe->stream || newly_split[i])
- continue;
-
- pipe_idx++;
- odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
-
- if (!pipe->plane_state && !odm)
- continue;
-
- if (split[i]) {
- if (odm) {
- if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- } else {
- if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else if (old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- }
- hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
- ASSERT(hsplit_pipe);
- if (!hsplit_pipe)
- goto validate_fail;
-
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, hsplit_pipe, odm))
- goto validate_fail;
-
- newly_split[hsplit_pipe->pipe_idx] = true;
- repopulate_pipes = true;
- }
- if (split[i] == 4) {
- struct pipe_ctx *pipe_4to1;
-
- if (odm && old_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
-
- if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
- && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
- old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
- else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
- old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
- old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
- else
- old_index = -1;
- pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
- ASSERT(pipe_4to1);
- if (!pipe_4to1)
- goto validate_fail;
- if (!dml_split_stream_for_mpc_or_odm(
- dc, &context->res_ctx,
- hsplit_pipe, pipe_4to1, odm))
- goto validate_fail;
- newly_split[pipe_4to1->pipe_idx] = true;
- }
- if (odm)
- dml_build_mapped_resource(dc, context, pipe->stream);
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->plane_state) {
- if (!resource_build_scaling_params(pipe))
- goto validate_fail;
- }
- }
-
- /* Actual dsc count per stream dsc validation*/
- if (!dml_validate_dsc(dc, context)) {
- vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
- goto validate_fail;
- }
-
- if (repopulate_pipes)
- pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
- *vlevel_out = vlevel;
- *pipe_cnt_out = pipe_cnt;
-
- out = true;
- goto validate_out;
-
-validate_fail:
- out = false;
-
-validate_out:
- return out;
-}
-
-static void dml_calculate_dlg_params(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx;
- int plane_count;
-
- /* Writeback MCIF_WB arbitration parameters */
- if (dc->res_pool)
- dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
-
- context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
- context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
- context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
- context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
- context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
- context->bw_ctx.bw.dcn.clk.p_state_change_support =
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
- != dm_dram_clock_change_unsupported;
-
- context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
- /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
- * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
- DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
- */
- plane_count = 0;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].plane_state)
- plane_count++;
- }
-
- /* Commented out as per above error for now.
- if (plane_count == 0)
- context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
- */
- context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
- context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support =
- context->bw_ctx.dml.vba.FCLKChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
- context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
- // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
- context->res_ctx.pipe_ctx[i].unbounded_req = false;
- } else {
- context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
- context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
- }
-
- if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
- pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
- context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
- pipe_idx++;
- }
- /*save a original dppclock copy*/
- context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
- context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
- context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
- context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
- context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
- - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
-
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].dlg_regs,
- &context->res_ctx.pipe_ctx[i].ttu_regs,
- pipes,
- pipe_cnt,
- pipe_idx,
- cstate_en,
- context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, true);
-
- context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
- &context->res_ctx.pipe_ctx[i].rq_regs,
- &pipes[pipe_idx].pipe);
- pipe_idx++;
- }
-}
-
-static void dml_calculate_wm_and_dlg(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx, vlevel_temp = 0;
-
- double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
- unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
- bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
- dm_dram_clock_change_unsupported;
-
- /* Set B:
- * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
- * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
- * calculations to cover bootup clocks.
- * DCFCLK: soc.clock_limits[2] when available
- * UCLK: soc.clock_limits[2] when available
- */
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 2;
- dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
-
- /* Set D:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW when available
- * UCLK : Min, as reported by PM FW when available
- * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
- */
-
- if (context->bw_ctx.dml.soc.num_states > 2) {
- vlevel_temp = 0;
- dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
- } else
- dcfclk = 615; //DCFCLK Vmin_lv
-
- pipes[0].clks_cfg.voltage = vlevel_temp;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
-
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
- /* Set C, for Dummy P-State:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK : Min, as reported by PM FW, when available
- * pstate latency as per UCLK state dummy pstate latency
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- unsigned int min_dram_speed_mts_margin = 160;
-
- if ((!pstate_en))
- min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
-
- /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
- for (i = 3; i > 0; i--)
- if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
- break;
-
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
- //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
- if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
- /* The only difference between A and C is p-state latency, if p-state is not supported
- * with full p-state latency we want to calculate DLG based on dummy p-state latency,
- * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
- */
- context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
- } else {
- /* Set A:
- * All clocks min.
- * DCFCLK: Min, as reported by PM FW, when available
- * UCLK: Min, as reported by PM FW, when available
- */
- dml_update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
-
- dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
-
- if (!pstate_en)
- /* Restore full p-state latency */
- context->bw_ctx.dml.soc.dram_clock_change_latency_us =
- dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-}
-
-bool dml_validate(struct dc *dc,
- struct dc_state *context,
- bool fast_validate)
-{
- bool out = false;
-
- BW_VAL_TRACE_SETUP();
-
- int vlevel = 0;
- int pipe_cnt = 0;
- display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- BW_VAL_TRACE_COUNT();
-
- out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
-
- if (pipe_cnt == 0)
- goto validate_out;
-
- if (!out)
- goto validate_fail;
-
- BW_VAL_TRACE_END_VOLTAGE_LEVEL();
-
- if (fast_validate) {
- BW_VAL_TRACE_SKIP(fast);
- goto validate_out;
- }
-
- dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
-
- BW_VAL_TRACE_END_WATERMARKS();
-
- goto validate_out;
-
-validate_fail:
- DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
- dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
-
- BW_VAL_TRACE_SKIP(fail);
- out = false;
-
-validate_out:
- BW_VAL_TRACE_FINISH();
-
- return out;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
deleted file mode 100644
index 4ec5310a2962..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifdef DML_WRAPPER_TRANSLATION_
-
-static void gfx10array_mode_to_dml_params(
- enum array_mode_values array_mode,
- enum legacy_tiling_compat_level compat_level,
- unsigned int *sw_mode)
-{
- switch (array_mode) {
- case DC_ARRAY_LINEAR_ALLIGNED:
- case DC_ARRAY_LINEAR_GENERAL:
- *sw_mode = dm_sw_linear;
- break;
- case DC_ARRAY_2D_TILED_THIN1:
-// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
-#if 0
- if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
- *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
- else
- *sw_mode = dm_sw_gfx7_2d_thin_gl;
-#endif
- break;
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void swizzle_to_dml_params(
- enum swizzle_mode_values swizzle,
- unsigned int *sw_mode)
-{
- switch (swizzle) {
- case DC_SW_LINEAR:
- *sw_mode = dm_sw_linear;
- break;
- case DC_SW_4KB_S:
- *sw_mode = dm_sw_4kb_s;
- break;
- case DC_SW_4KB_S_X:
- *sw_mode = dm_sw_4kb_s_x;
- break;
- case DC_SW_4KB_D:
- *sw_mode = dm_sw_4kb_d;
- break;
- case DC_SW_4KB_D_X:
- *sw_mode = dm_sw_4kb_d_x;
- break;
- case DC_SW_64KB_S:
- *sw_mode = dm_sw_64kb_s;
- break;
- case DC_SW_64KB_S_X:
- *sw_mode = dm_sw_64kb_s_x;
- break;
- case DC_SW_64KB_S_T:
- *sw_mode = dm_sw_64kb_s_t;
- break;
- case DC_SW_64KB_D:
- *sw_mode = dm_sw_64kb_d;
- break;
- case DC_SW_64KB_D_X:
- *sw_mode = dm_sw_64kb_d_x;
- break;
- case DC_SW_64KB_D_T:
- *sw_mode = dm_sw_64kb_d_t;
- break;
- case DC_SW_64KB_R_X:
- *sw_mode = dm_sw_64kb_r_x;
- break;
- case DC_SW_VAR_S:
- *sw_mode = dm_sw_var_s;
- break;
- case DC_SW_VAR_S_X:
- *sw_mode = dm_sw_var_s_x;
- break;
- case DC_SW_VAR_D:
- *sw_mode = dm_sw_var_d;
- break;
- case DC_SW_VAR_D_X:
- *sw_mode = dm_sw_var_d_x;
- break;
-
- default:
- ASSERT(0); /* Not supported */
- break;
- }
-}
-
-static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
-{
- dest->hblank_start = timing->h_total - timing->h_front_porch;
- dest->hblank_end = dest->hblank_start
- - timing->h_addressable
- - timing->h_border_left
- - timing->h_border_right;
- dest->vblank_start = timing->v_total - timing->v_front_porch;
- dest->vblank_end = dest->vblank_start
- - timing->v_addressable
- - timing->v_border_top
- - timing->v_border_bottom;
- dest->htotal = timing->h_total;
- dest->vtotal = timing->v_total;
- dest->hactive = timing->h_addressable;
- dest->vactive = timing->v_addressable;
- dest->interlaced = timing->flags.INTERLACE;
- dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
- if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
- dest->pixel_rate_mhz *= 2;
-}
-
-static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
-{
- int odm_split_count = 0;
- enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
-
- // Traverse pipe tree to determine odm split count
- while (next_pipe) {
- odm_split_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_split_count++;
- pipe = pipe->prev_odm_pipe;
- }
-
- // Translate split to DML odm combine factor
- switch (odm_split_count) {
- case 1:
- combine_mode = dm_odm_combine_mode_2to1;
- break;
- case 3:
- combine_mode = dm_odm_combine_mode_4to1;
- break;
- default:
- combine_mode = dm_odm_combine_mode_disabled;
- }
-
- return combine_mode;
-}
-
-static int get_dml_output_type(enum signal_type dc_signal)
-{
- int dml_output_type = -1;
-
- switch (dc_signal) {
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- case SIGNAL_TYPE_DISPLAY_PORT:
- dml_output_type = dm_dp;
- break;
- case SIGNAL_TYPE_EDP:
- dml_output_type = dm_edp;
- break;
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- dml_output_type = dm_hdmi;
- break;
- default:
- break;
- }
-
- return dml_output_type;
-}
-
-static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
-{
- int output_bpc = 0;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- output_bpc = 6;
- break;
- case COLOR_DEPTH_888:
- output_bpc = 8;
- break;
- case COLOR_DEPTH_101010:
- output_bpc = 10;
- break;
- case COLOR_DEPTH_121212:
- output_bpc = 12;
- break;
- case COLOR_DEPTH_141414:
- output_bpc = 14;
- break;
- case COLOR_DEPTH_161616:
- output_bpc = 16;
- break;
- case COLOR_DEPTH_999:
- output_bpc = 9;
- break;
- case COLOR_DEPTH_111111:
- output_bpc = 11;
- break;
- default:
- output_bpc = 8;
- break;
- }
-
- switch (timing->pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- break;
- case PIXEL_ENCODING_YCBCR420:
- dout->output_format = dm_420;
- dout->output_bpp = (output_bpc * 3.0) / 2;
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
- dout->output_format = dm_n422;
- else
- dout->output_format = dm_s422;
- dout->output_bpp = output_bpc * 2;
- break;
- default:
- dout->output_format = dm_444;
- dout->output_bpp = output_bpc * 3;
- }
-}
-
-static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
-{
- enum source_format_class dml_format = dm_444_32;
-
- switch (dc_format) {
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
- dml_format = dm_420_8;
- break;
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
- case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
- dml_format = dm_420_10;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
- case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
- dml_format = dm_444_64;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
- case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
- dml_format = dm_444_16;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
- dml_format = dm_444_8;
- break;
- case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
- dml_format = dm_rgbe_alpha;
- break;
- default:
- dml_format = dm_444_32;
- break;
- }
-
- return dml_format;
-}
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index b3d0a4ea2446..8919a2092ac5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -399,6 +399,10 @@ struct pipe_ctx {
struct dc_stream_state *stream;
struct plane_resource plane_res;
+
+ /**
+ * @stream_res: Reference to DCN resource components such OPP and DSC.
+ */
struct stream_resource stream_res;
struct link_resource link_res;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 6682d9e181c6..b304d450b038 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -194,6 +194,11 @@ enum dc_status dpcd_configure_lttpr_mode(
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+bool dp_is_lttpr_present(struct dc_link *link);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
bool dpcd_write_128b_132b_sst_payload_allocation_table(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5d2b028e5dad..d9f1b0a4fbd4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index d89bd55f110f..cd2be729846b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -268,6 +268,20 @@ enum dc_lut_mode {
LUT_RAM_B
};
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
+};
+
/**
* speakersToChannels
*
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 5097037e3962..8d86159d9de0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -22,6 +22,16 @@
*
*/
+/**
+ * DOC: mpc-overview
+ *
+ * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * that performs blending of multiple planes, using global and per-pixel alpha.
+ * It also performs post-blending color correction operations according to the
+ * hardware capabilities, such as color transformation matrix and gamma 1D and
+ * 3D LUT.
+ */
+
#ifndef __DC_MPCC_H__
#define __DC_MPCC_H__
@@ -48,14 +58,39 @@ enum mpcc_blend_mode {
MPCC_BLEND_MODE_TOP_BOT_BLENDING
};
+/**
+ * enum mpcc_alpha_blend_mode - define the alpha blend mode regarding pixel
+ * alpha and plane alpha values
+ */
enum mpcc_alpha_blend_mode {
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA: per pixel alpha using DPP
+ * alpha value
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN: per
+ * pixel alpha using DPP alpha value multiplied by a global gain (plane
+ * alpha)
+ */
MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ /**
+ * @MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA: global alpha value, ignores
+ * pixel alpha and consider only plane alpha
+ */
MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
};
-/*
- * MPCC blending configuration
+/**
+ * struct mpcc_blnd_cfg - MPCC blending configuration
+ *
+ * @black_color: background color
+ * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
+ * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
+ * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
+ * @global_gain: used when blend mode considers both pixel alpha and plane
+ * alpha value and assumes the global alpha value.
+ * @global_alpha: plane alpha value
*/
struct mpcc_blnd_cfg {
struct tg_color black_color; /* background color */
@@ -107,8 +142,15 @@ struct mpc_dwb_flow_control {
int flow_ctrl_cnt1;
};
-/*
- * MPCC connection and blending configuration for a single MPCC instance.
+/**
+ * struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
+ * @mpcc_id: MPCC physical instance
+ * @dpp_id: DPP input to this MPCC
+ * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
+ * @blnd_cfg: the blending configuration for this MPCC
+ * @sm_cfg: stereo mix setting for this MPCC
+ * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+ *
* This struct is used as a node in an MPC tree.
*/
struct mpcc {
@@ -120,8 +162,12 @@ struct mpcc {
bool shared_bottom; /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
};
-/*
- * MPC tree represents all MPCC connections for a pipe.
+/**
+ * struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
+ *
+ * @opp_id: the OPP instance that owns this MPC tree
+ * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+ *
*/
struct mpc_tree {
int opp_id; /* The OPP instance that owns this MPC tree */
@@ -149,13 +195,18 @@ struct mpcc_state {
uint32_t busy;
};
+/**
+ * struct mpc_funcs - funcs
+ */
struct mpc_funcs {
void (*read_mpcc_state)(
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
- /*
+ /**
+ * @insert_plane:
+ *
* Insert DPP into MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for OPP output
*
@@ -180,7 +231,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc:
+ *
* Remove a specified MPCC from the MPC tree.
*
* Parameters:
@@ -195,7 +248,9 @@ struct mpc_funcs {
struct mpc_tree *tree,
struct mpcc *mpcc);
- /*
+ /**
+ * @mpc_init:
+ *
* Reset the MPCC HW status by disconnecting all muxes.
*
* Parameters:
@@ -208,7 +263,9 @@ struct mpc_funcs {
struct mpc *mpc,
unsigned int mpcc_id);
- /*
+ /**
+ * @update_blending:
+ *
* Update the blending configuration for a specified MPCC.
*
* Parameters:
@@ -223,7 +280,9 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id);
- /*
+ /**
+ * @cursor_lock:
+ *
* Lock cursor updates for the specified OPP.
* OPP defines the set of MPCC that are locked together for cursor.
*
@@ -239,8 +298,10 @@ struct mpc_funcs {
int opp_id,
bool lock);
- /*
- * Add DPP into 'secondary' MPC tree based on specified blending position.
+ /**
+ * @insert_plane_to_secondary:
+ *
+ * Add DPP into secondary MPC tree based on specified blending position.
* Only used for planes that are part of blending chain for DWB output
*
* Parameters:
@@ -264,7 +325,9 @@ struct mpc_funcs {
int dpp_id,
int mpcc_id);
- /*
+ /**
+ * @remove_mpcc_from_secondary:
+ *
* Remove a specified DPP from the 'secondary' MPC tree.
*
* Parameters:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 4cfa733cf96f..72eef7a5ed83 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -137,7 +137,13 @@ struct crc_params {
bool enable;
};
+/**
+ * struct timing_generator - Entry point to Output Timing Generator feature.
+ */
struct timing_generator {
+ /**
+ * @funcs: Timing generator control functions
+ */
const struct timing_generator_funcs *funcs;
struct dc_bios *bp;
struct dc_context *ctx;
@@ -148,7 +154,9 @@ struct dc_crtc_timing;
struct drr_params;
-
+/**
+ * struct timing_generator_funcs - Control timing generator on a given device.
+ */
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing);
@@ -273,8 +281,8 @@ struct timing_generator_funcs {
const struct crc_params *params);
/**
- * Get CRCs for the given timing generator. Return false if CRCs are
- * not enabled (via configure_crc).
+ * @get_crc: Get CRCs for the given timing generator. Return false if
+ * CRCs are not enabled (via configure_crc).
*/
bool (*get_crc)(struct timing_generator *tg,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index ccb3c719fc4d..d04b68dad413 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,11 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum vline_select {
- VLINE0,
- VLINE1
-};
-
struct pipe_ctx;
struct dc_state;
struct dc_stream_status;
@@ -48,6 +43,7 @@ struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
struct dpp;
struct dce_hwseq;
+struct link_resource;
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
@@ -88,6 +84,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*power_down)(struct dc *dc);
+ void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -116,8 +113,7 @@ struct hw_sequencer_funcs {
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*setup_periodic_interrupt)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct dc_crtc_timing_adjust adjust);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
@@ -218,6 +214,25 @@ struct hw_sequencer_funcs {
void (*set_pipe)(struct pipe_ctx *pipe_ctx);
+ void (*enable_dp_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+ void (*enable_tmds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+ void (*enable_lvds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
/* Idle Optimization Related */
@@ -245,6 +260,10 @@ struct hw_sequencer_funcs {
struct tg_color *color,
int mpcc_id);
+ void (*update_phantom_vp_position)(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
@@ -271,6 +290,11 @@ void get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_subvp_visual_confirm_color(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 1cdea0efe5c1..a4d61bb724b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -124,6 +124,8 @@ struct hwseq_private_funcs {
void (*dsc_pg_control)(struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
+ bool (*dsc_pg_status)(struct dce_hwseq *hws,
+ unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 3482a877b6af..89964c980b87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -55,9 +55,6 @@ struct link_hwss_ext {
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
- void (*disable_dp_link_output)(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal);
void (*set_dp_link_test_pattern)(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params);
@@ -79,6 +76,9 @@ struct link_hwss {
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
};
#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 58158764adc0..c37d1141febe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -219,9 +219,15 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context,
uint8_t disabled_master_pipe_idx);
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
const struct link_hwss *get_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
index 5e92019539c8..4227adbc646a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
@@ -130,7 +130,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
@@ -174,10 +174,10 @@ static const struct link_hwss dio_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dio_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
index 08f22b32df48..126d37f847a1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
@@ -40,7 +40,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal);
void set_dio_dp_link_test_pattern(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
index 89d4e8159138..64f7ea6a9aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
@@ -56,10 +56,10 @@ static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index db7b0b155374..7d3147175ca2 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
@@ -266,11 +266,11 @@ static const struct link_hwss hpo_dp_link_hwss = {
.setup_stream_encoder = setup_hpo_dp_stream_encoder,
.reset_stream_encoder = reset_hpo_dp_stream_encoder,
.setup_stream_attribute = setup_hpo_dp_stream_attribute,
+ .disable_link_output = disable_hpo_dp_link_output,
.ext = {
.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
.enable_dp_link_output = enable_hpo_dp_link_output,
- .disable_dp_link_output = disable_hpo_dp_link_output,
.set_dp_link_test_pattern = set_hpo_dp_link_test_pattern,
.set_dp_lane_settings = set_hpo_dp_lane_settings,
.update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 501173ce270e..9522fe0b36c9 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -36,10 +36,18 @@ void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
+
+void virtual_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+}
+
static const struct link_hwss virtual_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
.setup_stream_attribute = virtual_setup_stream_attribute,
+ .disable_link_output = virtual_disable_link_output,
};
const struct link_hwss *get_virtual_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index ced176d17bae..f34c45b19fcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -441,6 +441,7 @@ struct dmub_srv {
/* Feature capabilities reported by fw */
struct dmub_feature_caps feature_caps;
+ struct dmub_visual_confirm_color visual_confirm_color;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index d7f3619352f0..5d1aadade8a5 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -234,8 +234,7 @@ union dmub_psr_debug_flags {
};
/**
- * DMUB feature capabilities.
- * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ * DMUB visual confirm color
*/
struct dmub_feature_caps {
/**
@@ -246,6 +245,16 @@ struct dmub_feature_caps {
uint8_t reserved[6];
};
+struct dmub_visual_confirm_color {
+ /**
+ * Maximum 10 bits color value
+ */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+ uint16_t panel_inst;
+};
+
#if defined(__cplusplus)
}
#endif
@@ -645,6 +654,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD__QUERY_FEATURE_CAPS = 6,
/**
+ * Command type used to get visual confirm color.
+ */
+ DMUB_CMD__GET_VISUAL_CONFIRM_COLOR = 8,
+ /**
* Command type used for all PSR commands.
*/
DMUB_CMD__PSR = 64,
@@ -747,6 +760,11 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
+enum dmub_cmd_header_sub_type {
+ DMUB_CMD__SUB_TYPE_GENERAL = 0,
+ DMUB_CMD__SUB_TYPE_CURSOR_POSITION = 1
+};
+
#pragma pack(push, 1)
/**
@@ -976,8 +994,17 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
uint16_t vtotal;
uint8_t main_pipe_index;
uint8_t phantom_pipe_index;
+ /* Since the microschedule is calculated in terms of OTG lines,
+ * include any scaling factors to make sure when we get accurate
+ * conversion when programming MALL_START_LINE (which is in terms
+ * of HUBP lines). If 4K is being downscaled to 1080p, scale factor
+ * is 1/2 (numerator = 1, denominator = 2).
+ */
+ uint8_t scale_factor_numerator;
+ uint8_t scale_factor_denominator;
uint8_t is_drr;
- uint8_t padding;
+ uint8_t main_split_pipe_index;
+ uint8_t phantom_split_pipe_index;
} subvp_data;
struct {
@@ -999,7 +1026,11 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
} vblank_data;
} pipe_config;
- enum mclk_switch_mode mode;
+ /* - subvp_data in the union (pipe_config) takes up 27 bytes.
+ * - Make the "mode" field a uint8_t instead of enum so we only use 1 byte (only
+ * for the DMCUB command, cast to enum once we populate the DMCUB subvp state).
+ */
+ uint8_t mode; // enum mclk_switch_mode
};
/**
@@ -2766,6 +2797,31 @@ struct dmub_rb_cmd_query_feature_caps {
struct dmub_cmd_query_feature_caps_data query_feature_caps_data;
};
+/**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_cmd_visual_confirm_color_data {
+ /**
+ * DMUB feature capabilities.
+ * After DMUB init, driver will query FW capabilities prior to enabling certain features.
+ */
+struct dmub_visual_confirm_color visual_confirm_color;
+};
+
+/**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+struct dmub_rb_cmd_get_visual_confirm_color {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_cmd_visual_confirm_color_data visual_confirm_color_data;
+};
+
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
@@ -3138,6 +3194,11 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_rb_cmd_query_feature_caps query_feature_caps;
+
+ /**
+ * Definition of a DMUB_CMD__GET_VISUAL_CONFIRM_COLOR command.
+ */
+ struct dmub_rb_cmd_get_visual_confirm_color visual_confirm_color;
struct dmub_rb_cmd_drr_update drr_update;
struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9f3558c0ef11..c3089c673975 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -215,6 +215,7 @@ enum {
#define DEVICE_ID_NV_143F 0x143F
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
+#define DEVICE_ID_VGH_1435 0x1435
#define VANGOGH_A0 0x01
#define VANGOGH_UNKNOWN 0xFF
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 05096c644a60..a7ba5bd8dc16 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -128,8 +128,8 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
-static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
-static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 79fabc51c991..d1e91d31d151 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -83,6 +83,7 @@ enum link_training_result {
};
enum lttpr_mode {
+ LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
LTTPR_MODE_NON_TRANSPARENT,
@@ -246,8 +247,16 @@ union dpcd_training_lane_set {
};
+/* AMD's copy of various payload data for MST. We have two copies of the payload table (one in DRM,
+ * one in DC) since DRM's MST helpers can't be accessed here. This stream allocation table should
+ * _ONLY_ be filled out from DM and then passed to DC, do NOT use these for _any_ kind of atomic
+ * state calculations in DM, or you will break something.
+ */
+
+struct drm_dp_mst_port;
+
/* DP MST stream allocation (payload bandwidth number) */
-struct dp_mst_stream_allocation {
+struct dc_dp_mst_stream_allocation {
uint8_t vcp_id;
/* number of slots required for the DP stream in
* transport packet */
@@ -255,11 +264,11 @@ struct dp_mst_stream_allocation {
};
/* DP MST stream allocation table */
-struct dp_mst_stream_allocation_table {
+struct dc_dp_mst_stream_allocation_table {
/* number of DP video streams */
int stream_count;
/* array of stream allocations */
- struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+ struct dc_dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 859ffd8725c5..04f7656906ca 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index f21554a1c86c..3973110f149c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -3129,6 +3129,8 @@
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15cc
#define mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define mmGCVM_DEBUG 0x15cd
+#define mmGCVM_DEBUG_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15ce
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
#define mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15cf
@@ -3151,6 +3153,8 @@
#define mmGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
#define mmGCVM_L2_CACHE_PARITY_CNTL 0x15d8
#define mmGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define mmGCVM_L2_IH_LOG_CNTL 0x15d9
+#define mmGCVM_L2_IH_LOG_CNTL_BASE_IDX 0
#define mmGCVM_L2_CNTL5 0x15dc
#define mmGCVM_L2_CNTL5_BASE_IDX 0
#define mmGCVM_L2_GCR_CNTL 0x15dd
@@ -9796,14 +9800,118 @@
// addressBlock: gc_pwrdec
// base address: 0x3c000
+#define mmCGTS_RD_CTRL_REG 0x5004
+#define mmCGTS_RD_CTRL_REG_BASE_IDX 1
+#define mmCGTS_RD_REG 0x5005
+#define mmCGTS_RD_REG_BASE_IDX 1
+#define mmCGTS_TCC_DISABLE 0x5006
+#define mmCGTS_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_USER_TCC_DISABLE 0x5007
+#define mmCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_STATUS_REG 0x5008
+#define mmCGTS_STATUS_REG_BASE_IDX 1
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL 0x5009
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_PS_CLK_CTRL 0x507d
+#define mmCGTT_SPI_PS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPIS_CLK_CTRL 0x507e
+#define mmCGTT_SPIS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_CLK_CTRL 0x5080
+#define mmCGTT_SPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PC_CLK_CTRL 0x5081
+#define mmCGTT_PC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_BCI_CLK_CTRL 0x5082
+#define mmCGTT_BCI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_VGT_CLK_CTRL 0x5084
+#define mmCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_IA_CLK_CTRL 0x5085
+#define mmCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_WD_CLK_CTRL 0x5086
+#define mmCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
+#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PA_CLK_CTRL 0x5088
+#define mmCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL0 0x5089
+#define mmCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL1 0x508a
+#define mmCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL2 0x508b
+#define mmCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SQ_CLK_CTRL 0x508c
+#define mmCGTT_SQ_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SQG_CLK_CTRL 0x508d
+#define mmCGTT_SQG_CLK_CTRL_BASE_IDX 1
#define mmSQ_ALU_CLK_CTRL 0x508e
#define mmSQ_ALU_CLK_CTRL_BASE_IDX 1
#define mmSQ_TEX_CLK_CTRL 0x508f
#define mmSQ_TEX_CLK_CTRL_BASE_IDX 1
#define mmSQ_LDS_CLK_CTRL 0x5090
#define mmSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL0 0x5094
+#define mmCGTT_SX_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL1 0x5095
+#define mmCGTT_SX_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL2 0x5096
+#define mmCGTT_SX_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL3 0x5097
+#define mmCGTT_SX_CLK_CTRL3_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL4 0x5098
+#define mmCGTT_SX_CLK_CTRL4_BASE_IDX 1
+#define mmTD_CGTT_CTRL 0x509c
+#define mmTD_CGTT_CTRL_BASE_IDX 1
+#define mmTA_CGTT_CTRL 0x509d
+#define mmTA_CGTT_CTRL_BASE_IDX 1
+#define mmCGTT_TCPI_CLK_CTRL 0x5109
+#define mmCGTT_TCPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GDS_CLK_CTRL 0x50a0
+#define mmCGTT_GDS_CLK_CTRL_BASE_IDX 1
+#define mmDB_CGTT_CLK_CTRL_0 0x50a4
+#define mmDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define mmCB_CGTT_SCLK_CTRL 0x50a8
+#define mmCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2C_CGTT_SCLK_CTRL 0x50fc
+#define mmGL2C_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL 0x50ac
+#define mmGL2A_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL_1 0x50ad
+#define mmGL2A_CGTT_SCLK_CTRL_1_BASE_IDX 1
+#define mmCGTT_CP_CLK_CTRL 0x50b0
+#define mmCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPF_CLK_CTRL 0x50b1
+#define mmCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPC_CLK_CTRL 0x50b2
+#define mmCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_RLC_CLK_CTRL 0x50b5
+#define mmCGTT_RLC_CLK_CTRL_BASE_IDX 1
#define mmRLC_GFX_RM_CNTL 0x50b6
#define mmRLC_GFX_RM_CNTL_BASE_IDX 1
+#define mmRMI_CGTT_SCLK_CTRL 0x50c0
+#define mmRMI_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmCGTT_TCPF_CLK_CTRL 0x5111
+#define mmCGTT_TCPF_CLK_CTRL_BASE_IDX 1
+#define mmGCR_CGTT_SCLK_CTRL 0x50c2
+#define mmGCR_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmUTCL1_CGTT_CLK_CTRL 0x50c3
+#define mmUTCL1_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGCEA_CGTT_CLK_CTRL 0x50c4
+#define mmGCEA_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmSE_CAC_CGTT_CLK_CTRL 0x50d0
+#define mmSE_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGC_CAC_CGTT_CLK_CTRL 0x50d8
+#define mmGC_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGRBM_CGTT_CLK_CNTL 0x50e0
+#define mmGRBM_CGTT_CLK_CNTL_BASE_IDX 1
+#define mmGUS_CGTT_CLK_CTRL 0x50f4
+#define mmGUS_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL0 0x50f8
+#define mmCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL1 0x50f9
+#define mmCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL2 0x50fa
+#define mmCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL3 0x50fb
+#define mmCGTT_PH_CLK_CTRL3_BASE_IDX 1
// addressBlock: gc_hypdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index a827b0ff8905..d4e8ff22ecb8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -34547,6 +34547,503 @@
// addressBlock: gc_pwrdec
+//CGTS_RD_CTRL_REG
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x4
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000000FL
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x000000F0L
+//CGTS_RD_REG
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
+#define CGTS_RD_REG__READ_DATA_MASK 0xFFFFFFFFL
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_STATUS_REG
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED__SHIFT 0x0
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS__SHIFT 0x1
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED__SHIFT 0x8
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS__SHIFT 0x9
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED_MASK 0x00000001L
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS_MASK 0x00000006L
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED_MASK 0x00000100L
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS_MASK 0x00000600L
+//CGTT_SPI_CGTSSM_CLK_CTRL
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+//CGTT_SPI_PS_CLK_CTRL
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPIS_CLK_CTRL
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPI_CLK_CTRL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PC_CLK_CTRL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0xd
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0xe
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x00002000L
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x00004000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+//CGTT_BCI_CLK_CTRL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE__SHIFT 0x1d
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE__SHIFT 0x1e
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE_MASK 0x20000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE_MASK 0x40000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQ_CLK_CTRL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
//SQ_ALU_CLK_CTRL
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
@@ -34562,12 +35059,982 @@
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//CGTT_SX_CLK_CTRL0
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL1
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL2
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL3
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL4
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+//TD_CGTT_CTRL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPI_CLK_CTRL
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//CGTT_GDS_CLK_CTRL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GDS_CLK_CTRL__UNUSED__SHIFT 0xc
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GDS_CLK_CTRL__UNUSED_MASK 0x0000F000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2C_CGTT_SCLK_CTRL
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL_1
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0x0000000FL
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
//RLC_GFX_RM_CNTL
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
-
+//RMI_CGTT_SCLK_CTRL
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPF_CLK_CTRL
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//GCR_CGTT_SCLK_CTRL
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//UTCL1_CGTT_CLK_CTRL
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GCEA_CGTT_CLK_CTRL
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCEA_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GCEA_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCEA_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GCEA_CGTT_CLK_CTRL__SPARE1_MASK 0x0F800000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//SE_CAC_CGTT_CLK_CTRL
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GC_CAC_CGTT_CLK_CTRL
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GRBM_CGTT_CLK_CNTL
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+//GUS_CGTT_CLK_CTRL
+#define GUS_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GUS_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM__SHIFT 0x13
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GUS_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x1b
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GUS_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GUS_CGTT_CLK_CTRL__SPARE0_MASK 0x0007F000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM_MASK 0x00080000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GUS_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x08000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
// addressBlock: gc_hypdec
//CP_HYP_PFP_UCODE_ADDR
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
new file mode 100644
index 000000000000..3b95a59b196c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
@@ -0,0 +1,12086 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_OFFSET_HEADER
+#define _gc_11_0_3_OFFSET_HEADER
+
+
+
+// addressBlock: gc_sdma0_sdma0dec
+// base address: 0x4980
+#define regSDMA0_DEC_START 0x0000
+#define regSDMA0_DEC_START_BASE_IDX 0
+#define regSDMA0_F32_MISC_CNTL 0x000b
+#define regSDMA0_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_LO 0x000f
+#define regSDMA0_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA0_GLOBAL_TIMESTAMP_HI 0x0010
+#define regSDMA0_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA0_POWER_CNTL 0x001a
+#define regSDMA0_POWER_CNTL_BASE_IDX 0
+#define regSDMA0_CNTL 0x001c
+#define regSDMA0_CNTL_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS 0x001d
+#define regSDMA0_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG 0x001e
+#define regSDMA0_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA0_GB_ADDR_CONFIG_READ 0x001f
+#define regSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH 0x0020
+#define regSDMA0_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA0_RB_RPTR_FETCH_HI 0x0021
+#define regSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0022
+#define regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA0_IB_OFFSET_FETCH 0x0023
+#define regSDMA0_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA0_PROGRAM 0x0024
+#define regSDMA0_PROGRAM_BASE_IDX 0
+#define regSDMA0_STATUS_REG 0x0025
+#define regSDMA0_STATUS_REG_BASE_IDX 0
+#define regSDMA0_STATUS1_REG 0x0026
+#define regSDMA0_STATUS1_REG_BASE_IDX 0
+#define regSDMA0_CNTL1 0x0027
+#define regSDMA0_CNTL1_BASE_IDX 0
+#define regSDMA0_HBM_PAGE_CONFIG 0x0028
+#define regSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA0_UCODE_CHECKSUM 0x0029
+#define regSDMA0_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA0_FREEZE 0x002b
+#define regSDMA0_FREEZE_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM0 0x002c
+#define regSDMA0_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA0_PROCESS_QUANTUM1 0x002d
+#define regSDMA0_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA0_WATCHDOG_CNTL 0x002e
+#define regSDMA0_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE_STATUS0 0x002f
+#define regSDMA0_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA0_EDC_CONFIG 0x0032
+#define regSDMA0_EDC_CONFIG_BASE_IDX 0
+#define regSDMA0_BA_THRESHOLD 0x0033
+#define regSDMA0_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA0_ID 0x0034
+#define regSDMA0_ID_BASE_IDX 0
+#define regSDMA0_VERSION 0x0035
+#define regSDMA0_VERSION_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER 0x0036
+#define regSDMA0_EDC_COUNTER_BASE_IDX 0
+#define regSDMA0_EDC_COUNTER_CLEAR 0x0037
+#define regSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA0_STATUS2_REG 0x0038
+#define regSDMA0_STATUS2_REG_BASE_IDX 0
+#define regSDMA0_ATOMIC_CNTL 0x0039
+#define regSDMA0_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_LO 0x003a
+#define regSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA0_ATOMIC_PREOP_HI 0x003b
+#define regSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA0_UTCL1_CNTL 0x003c
+#define regSDMA0_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA0_UTCL1_WATERMK 0x003d
+#define regSDMA0_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA0_UTCL1_TIMEOUT 0x003e
+#define regSDMA0_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA0_UTCL1_PAGE 0x003f
+#define regSDMA0_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_STATUS 0x0040
+#define regSDMA0_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_STATUS 0x0041
+#define regSDMA0_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA0_UTCL1_INV0 0x0042
+#define regSDMA0_UTCL1_INV0_BASE_IDX 0
+#define regSDMA0_UTCL1_INV1 0x0043
+#define regSDMA0_UTCL1_INV1_BASE_IDX 0
+#define regSDMA0_UTCL1_INV2 0x0044
+#define regSDMA0_UTCL1_INV2_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK0 0x0045
+#define regSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_RD_XNACK1 0x0046
+#define regSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK0 0x0047
+#define regSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA0_UTCL1_WR_XNACK1 0x0048
+#define regSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA0_RELAX_ORDERING_LUT 0x004a
+#define regSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA0_CHICKEN_BITS_2 0x004b
+#define regSDMA0_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA0_STATUS3_REG 0x004c
+#define regSDMA0_STATUS3_REG_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_LO 0x004d
+#define regSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_PHYSICAL_ADDR_HI 0x004e
+#define regSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_GLOBAL_QUANTUM 0x004f
+#define regSDMA0_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA0_ERROR_LOG 0x0050
+#define regSDMA0_ERROR_LOG_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG0 0x0051
+#define regSDMA0_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG1 0x0052
+#define regSDMA0_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG2 0x0053
+#define regSDMA0_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA0_PUB_DUMMY_REG3 0x0054
+#define regSDMA0_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA0_F32_COUNTER 0x0055
+#define regSDMA0_F32_COUNTER_BASE_IDX 0
+#define regSDMA0_CRD_CNTL 0x005b
+#define regSDMA0_CRD_CNTL_BASE_IDX 0
+#define regSDMA0_RLC_CGCG_CTRL 0x005c
+#define regSDMA0_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG 0x005d
+#define regSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA0_AQL_STATUS 0x005f
+#define regSDMA0_AQL_STATUS_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_DATA 0x0060
+#define regSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA0_EA_DBIT_ADDR_INDEX 0x0061
+#define regSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA0_TLBI_GCR_CNTL 0x0062
+#define regSDMA0_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA0_TILING_CONFIG 0x0063
+#define regSDMA0_TILING_CONFIG_BASE_IDX 0
+#define regSDMA0_HASH 0x0064
+#define regSDMA0_HASH_BASE_IDX 0
+#define regSDMA0_INT_STATUS 0x0070
+#define regSDMA0_INT_STATUS_BASE_IDX 0
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2 0x0071
+#define regSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_LO 0x0072
+#define regSDMA0_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA0_HOLE_ADDR_HI 0x0073
+#define regSDMA0_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA0_CLOCK_GATING_STATUS 0x0075
+#define regSDMA0_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA0_STATUS4_REG 0x0076
+#define regSDMA0_STATUS4_REG_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_DATA 0x0077
+#define regSDMA0_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA0_SCRATCH_RAM_ADDR 0x0078
+#define regSDMA0_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA0_TIMESTAMP_CNTL 0x0079
+#define regSDMA0_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA0_STATUS5_REG 0x007a
+#define regSDMA0_STATUS5_REG_BASE_IDX 0
+#define regSDMA0_QUEUE_RESET_REQ 0x007b
+#define regSDMA0_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA0_STATUS6_REG 0x007c
+#define regSDMA0_STATUS6_REG_BASE_IDX 0
+#define regSDMA0_UCODE1_CHECKSUM 0x007d
+#define regSDMA0_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA0_CE_CTRL 0x007e
+#define regSDMA0_CE_CTRL_BASE_IDX 0
+#define regSDMA0_FED_STATUS 0x007f
+#define regSDMA0_FED_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_CNTL 0x0080
+#define regSDMA0_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE 0x0081
+#define regSDMA0_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_BASE_HI 0x0082
+#define regSDMA0_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR 0x0083
+#define regSDMA0_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_HI 0x0084
+#define regSDMA0_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR 0x0085
+#define regSDMA0_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_HI 0x0086
+#define regSDMA0_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI 0x0088
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO 0x0089
+#define regSDMA0_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_CNTL 0x008a
+#define regSDMA0_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_RPTR 0x008b
+#define regSDMA0_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_OFFSET 0x008c
+#define regSDMA0_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_LO 0x008d
+#define regSDMA0_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_BASE_HI 0x008e
+#define regSDMA0_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SIZE 0x008f
+#define regSDMA0_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE0_SKIP_CNTL 0x0090
+#define regSDMA0_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_CONTEXT_STATUS 0x0091
+#define regSDMA0_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL 0x0092
+#define regSDMA0_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_LOG 0x00a9
+#define regSDMA0_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET 0x00ab
+#define regSDMA0_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_LO 0x00ac
+#define regSDMA0_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_CSA_ADDR_HI 0x00ad
+#define regSDMA0_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL 0x00ae
+#define regSDMA0_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN 0x00af
+#define regSDMA0_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE0_PREEMPT 0x00b0
+#define regSDMA0_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_DUMMY_REG 0x00b1
+#define regSDMA0_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x00b2
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x00b3
+#define regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_AQL_CNTL 0x00b4
+#define regSDMA0_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE 0x00b5
+#define regSDMA0_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE0_RB_PREEMPT 0x00b6
+#define regSDMA0_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0 0x00c0
+#define regSDMA0_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA1 0x00c1
+#define regSDMA0_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA2 0x00c2
+#define regSDMA0_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA3 0x00c3
+#define regSDMA0_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA4 0x00c4
+#define regSDMA0_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA5 0x00c5
+#define regSDMA0_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA6 0x00c6
+#define regSDMA0_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA7 0x00c7
+#define regSDMA0_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA8 0x00c8
+#define regSDMA0_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA9 0x00c9
+#define regSDMA0_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_DATA10 0x00ca
+#define regSDMA0_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE0_MIDCMD_CNTL 0x00cb
+#define regSDMA0_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_CNTL 0x00d8
+#define regSDMA0_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE 0x00d9
+#define regSDMA0_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_BASE_HI 0x00da
+#define regSDMA0_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR 0x00db
+#define regSDMA0_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_HI 0x00dc
+#define regSDMA0_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR 0x00dd
+#define regSDMA0_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_HI 0x00de
+#define regSDMA0_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI 0x00e0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO 0x00e1
+#define regSDMA0_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_CNTL 0x00e2
+#define regSDMA0_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_RPTR 0x00e3
+#define regSDMA0_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_OFFSET 0x00e4
+#define regSDMA0_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_LO 0x00e5
+#define regSDMA0_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_BASE_HI 0x00e6
+#define regSDMA0_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SIZE 0x00e7
+#define regSDMA0_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE1_SKIP_CNTL 0x00e8
+#define regSDMA0_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_CONTEXT_STATUS 0x00e9
+#define regSDMA0_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL 0x00ea
+#define regSDMA0_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_LOG 0x0101
+#define regSDMA0_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET 0x0103
+#define regSDMA0_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_LO 0x0104
+#define regSDMA0_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_CSA_ADDR_HI 0x0105
+#define regSDMA0_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL 0x0106
+#define regSDMA0_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN 0x0107
+#define regSDMA0_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE1_PREEMPT 0x0108
+#define regSDMA0_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_DUMMY_REG 0x0109
+#define regSDMA0_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x010a
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x010b
+#define regSDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_AQL_CNTL 0x010c
+#define regSDMA0_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE 0x010d
+#define regSDMA0_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE1_RB_PREEMPT 0x010e
+#define regSDMA0_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA0 0x0118
+#define regSDMA0_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA1 0x0119
+#define regSDMA0_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA2 0x011a
+#define regSDMA0_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA3 0x011b
+#define regSDMA0_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA4 0x011c
+#define regSDMA0_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA5 0x011d
+#define regSDMA0_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA6 0x011e
+#define regSDMA0_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA7 0x011f
+#define regSDMA0_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA8 0x0120
+#define regSDMA0_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA9 0x0121
+#define regSDMA0_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_DATA10 0x0122
+#define regSDMA0_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE1_MIDCMD_CNTL 0x0123
+#define regSDMA0_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_CNTL 0x0130
+#define regSDMA0_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE 0x0131
+#define regSDMA0_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_BASE_HI 0x0132
+#define regSDMA0_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR 0x0133
+#define regSDMA0_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_HI 0x0134
+#define regSDMA0_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR 0x0135
+#define regSDMA0_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_HI 0x0136
+#define regSDMA0_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI 0x0138
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO 0x0139
+#define regSDMA0_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_CNTL 0x013a
+#define regSDMA0_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_RPTR 0x013b
+#define regSDMA0_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_OFFSET 0x013c
+#define regSDMA0_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_LO 0x013d
+#define regSDMA0_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_BASE_HI 0x013e
+#define regSDMA0_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SIZE 0x013f
+#define regSDMA0_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE2_SKIP_CNTL 0x0140
+#define regSDMA0_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_CONTEXT_STATUS 0x0141
+#define regSDMA0_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL 0x0142
+#define regSDMA0_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_LOG 0x0159
+#define regSDMA0_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET 0x015b
+#define regSDMA0_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_LO 0x015c
+#define regSDMA0_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_CSA_ADDR_HI 0x015d
+#define regSDMA0_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL 0x015e
+#define regSDMA0_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN 0x015f
+#define regSDMA0_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE2_PREEMPT 0x0160
+#define regSDMA0_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_DUMMY_REG 0x0161
+#define regSDMA0_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0162
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0163
+#define regSDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_AQL_CNTL 0x0164
+#define regSDMA0_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE 0x0165
+#define regSDMA0_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE2_RB_PREEMPT 0x0166
+#define regSDMA0_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA0 0x0170
+#define regSDMA0_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA1 0x0171
+#define regSDMA0_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA2 0x0172
+#define regSDMA0_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA3 0x0173
+#define regSDMA0_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA4 0x0174
+#define regSDMA0_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA5 0x0175
+#define regSDMA0_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA6 0x0176
+#define regSDMA0_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA7 0x0177
+#define regSDMA0_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA8 0x0178
+#define regSDMA0_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA9 0x0179
+#define regSDMA0_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_DATA10 0x017a
+#define regSDMA0_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE2_MIDCMD_CNTL 0x017b
+#define regSDMA0_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_CNTL 0x0188
+#define regSDMA0_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE 0x0189
+#define regSDMA0_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_BASE_HI 0x018a
+#define regSDMA0_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR 0x018b
+#define regSDMA0_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_HI 0x018c
+#define regSDMA0_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR 0x018d
+#define regSDMA0_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_HI 0x018e
+#define regSDMA0_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI 0x0190
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO 0x0191
+#define regSDMA0_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_CNTL 0x0192
+#define regSDMA0_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_RPTR 0x0193
+#define regSDMA0_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_OFFSET 0x0194
+#define regSDMA0_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_LO 0x0195
+#define regSDMA0_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_BASE_HI 0x0196
+#define regSDMA0_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SIZE 0x0197
+#define regSDMA0_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE3_SKIP_CNTL 0x0198
+#define regSDMA0_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_CONTEXT_STATUS 0x0199
+#define regSDMA0_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL 0x019a
+#define regSDMA0_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_LOG 0x01b1
+#define regSDMA0_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET 0x01b3
+#define regSDMA0_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_LO 0x01b4
+#define regSDMA0_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_CSA_ADDR_HI 0x01b5
+#define regSDMA0_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL 0x01b6
+#define regSDMA0_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN 0x01b7
+#define regSDMA0_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE3_PREEMPT 0x01b8
+#define regSDMA0_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_DUMMY_REG 0x01b9
+#define regSDMA0_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x01ba
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x01bb
+#define regSDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_AQL_CNTL 0x01bc
+#define regSDMA0_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE 0x01bd
+#define regSDMA0_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE3_RB_PREEMPT 0x01be
+#define regSDMA0_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA0 0x01c8
+#define regSDMA0_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA1 0x01c9
+#define regSDMA0_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA2 0x01ca
+#define regSDMA0_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA3 0x01cb
+#define regSDMA0_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA4 0x01cc
+#define regSDMA0_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA5 0x01cd
+#define regSDMA0_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA6 0x01ce
+#define regSDMA0_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA7 0x01cf
+#define regSDMA0_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8 0x01d0
+#define regSDMA0_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA9 0x01d1
+#define regSDMA0_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_DATA10 0x01d2
+#define regSDMA0_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE3_MIDCMD_CNTL 0x01d3
+#define regSDMA0_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_CNTL 0x01e0
+#define regSDMA0_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE 0x01e1
+#define regSDMA0_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_BASE_HI 0x01e2
+#define regSDMA0_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR 0x01e3
+#define regSDMA0_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_HI 0x01e4
+#define regSDMA0_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR 0x01e5
+#define regSDMA0_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_HI 0x01e6
+#define regSDMA0_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI 0x01e8
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO 0x01e9
+#define regSDMA0_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_CNTL 0x01ea
+#define regSDMA0_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_RPTR 0x01eb
+#define regSDMA0_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_OFFSET 0x01ec
+#define regSDMA0_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_LO 0x01ed
+#define regSDMA0_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_BASE_HI 0x01ee
+#define regSDMA0_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SIZE 0x01ef
+#define regSDMA0_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE4_SKIP_CNTL 0x01f0
+#define regSDMA0_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_CONTEXT_STATUS 0x01f1
+#define regSDMA0_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL 0x01f2
+#define regSDMA0_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_LOG 0x0209
+#define regSDMA0_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET 0x020b
+#define regSDMA0_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_LO 0x020c
+#define regSDMA0_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_CSA_ADDR_HI 0x020d
+#define regSDMA0_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL 0x020e
+#define regSDMA0_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN 0x020f
+#define regSDMA0_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE4_PREEMPT 0x0210
+#define regSDMA0_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_DUMMY_REG 0x0211
+#define regSDMA0_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0212
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0213
+#define regSDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_AQL_CNTL 0x0214
+#define regSDMA0_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE 0x0215
+#define regSDMA0_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE4_RB_PREEMPT 0x0216
+#define regSDMA0_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA0 0x0220
+#define regSDMA0_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA1 0x0221
+#define regSDMA0_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA2 0x0222
+#define regSDMA0_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA3 0x0223
+#define regSDMA0_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA4 0x0224
+#define regSDMA0_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA5 0x0225
+#define regSDMA0_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA6 0x0226
+#define regSDMA0_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA7 0x0227
+#define regSDMA0_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA8 0x0228
+#define regSDMA0_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA9 0x0229
+#define regSDMA0_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_DATA10 0x022a
+#define regSDMA0_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE4_MIDCMD_CNTL 0x022b
+#define regSDMA0_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_CNTL 0x0238
+#define regSDMA0_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE 0x0239
+#define regSDMA0_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_BASE_HI 0x023a
+#define regSDMA0_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR 0x023b
+#define regSDMA0_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_HI 0x023c
+#define regSDMA0_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR 0x023d
+#define regSDMA0_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_HI 0x023e
+#define regSDMA0_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI 0x0240
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO 0x0241
+#define regSDMA0_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_CNTL 0x0242
+#define regSDMA0_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_RPTR 0x0243
+#define regSDMA0_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_OFFSET 0x0244
+#define regSDMA0_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_LO 0x0245
+#define regSDMA0_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_BASE_HI 0x0246
+#define regSDMA0_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SIZE 0x0247
+#define regSDMA0_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE5_SKIP_CNTL 0x0248
+#define regSDMA0_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_CONTEXT_STATUS 0x0249
+#define regSDMA0_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL 0x024a
+#define regSDMA0_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_LOG 0x0261
+#define regSDMA0_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET 0x0263
+#define regSDMA0_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_LO 0x0264
+#define regSDMA0_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_CSA_ADDR_HI 0x0265
+#define regSDMA0_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL 0x0266
+#define regSDMA0_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN 0x0267
+#define regSDMA0_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE5_PREEMPT 0x0268
+#define regSDMA0_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_DUMMY_REG 0x0269
+#define regSDMA0_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x026a
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x026b
+#define regSDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_AQL_CNTL 0x026c
+#define regSDMA0_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE 0x026d
+#define regSDMA0_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE5_RB_PREEMPT 0x026e
+#define regSDMA0_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA0 0x0278
+#define regSDMA0_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA1 0x0279
+#define regSDMA0_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA2 0x027a
+#define regSDMA0_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA3 0x027b
+#define regSDMA0_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA4 0x027c
+#define regSDMA0_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA5 0x027d
+#define regSDMA0_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA6 0x027e
+#define regSDMA0_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA7 0x027f
+#define regSDMA0_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA8 0x0280
+#define regSDMA0_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA9 0x0281
+#define regSDMA0_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_DATA10 0x0282
+#define regSDMA0_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE5_MIDCMD_CNTL 0x0283
+#define regSDMA0_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_CNTL 0x0290
+#define regSDMA0_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE 0x0291
+#define regSDMA0_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_BASE_HI 0x0292
+#define regSDMA0_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR 0x0293
+#define regSDMA0_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_HI 0x0294
+#define regSDMA0_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR 0x0295
+#define regSDMA0_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_HI 0x0296
+#define regSDMA0_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI 0x0298
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO 0x0299
+#define regSDMA0_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_CNTL 0x029a
+#define regSDMA0_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_RPTR 0x029b
+#define regSDMA0_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_OFFSET 0x029c
+#define regSDMA0_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_LO 0x029d
+#define regSDMA0_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_BASE_HI 0x029e
+#define regSDMA0_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SIZE 0x029f
+#define regSDMA0_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE6_SKIP_CNTL 0x02a0
+#define regSDMA0_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_CONTEXT_STATUS 0x02a1
+#define regSDMA0_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL 0x02a2
+#define regSDMA0_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_LOG 0x02b9
+#define regSDMA0_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET 0x02bb
+#define regSDMA0_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_LO 0x02bc
+#define regSDMA0_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_CSA_ADDR_HI 0x02bd
+#define regSDMA0_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL 0x02be
+#define regSDMA0_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN 0x02bf
+#define regSDMA0_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE6_PREEMPT 0x02c0
+#define regSDMA0_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_DUMMY_REG 0x02c1
+#define regSDMA0_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x02c2
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x02c3
+#define regSDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_AQL_CNTL 0x02c4
+#define regSDMA0_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE 0x02c5
+#define regSDMA0_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE6_RB_PREEMPT 0x02c6
+#define regSDMA0_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0 0x02d0
+#define regSDMA0_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA1 0x02d1
+#define regSDMA0_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA2 0x02d2
+#define regSDMA0_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA3 0x02d3
+#define regSDMA0_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA4 0x02d4
+#define regSDMA0_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA5 0x02d5
+#define regSDMA0_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA6 0x02d6
+#define regSDMA0_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA7 0x02d7
+#define regSDMA0_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA8 0x02d8
+#define regSDMA0_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA9 0x02d9
+#define regSDMA0_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_DATA10 0x02da
+#define regSDMA0_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE6_MIDCMD_CNTL 0x02db
+#define regSDMA0_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_CNTL 0x02e8
+#define regSDMA0_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE 0x02e9
+#define regSDMA0_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_BASE_HI 0x02ea
+#define regSDMA0_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR 0x02eb
+#define regSDMA0_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_HI 0x02ec
+#define regSDMA0_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR 0x02ed
+#define regSDMA0_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_HI 0x02ee
+#define regSDMA0_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI 0x02f0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO 0x02f1
+#define regSDMA0_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_CNTL 0x02f2
+#define regSDMA0_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_RPTR 0x02f3
+#define regSDMA0_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_OFFSET 0x02f4
+#define regSDMA0_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_LO 0x02f5
+#define regSDMA0_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_BASE_HI 0x02f6
+#define regSDMA0_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SIZE 0x02f7
+#define regSDMA0_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA0_QUEUE7_SKIP_CNTL 0x02f8
+#define regSDMA0_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_CONTEXT_STATUS 0x02f9
+#define regSDMA0_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL 0x02fa
+#define regSDMA0_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_LOG 0x0311
+#define regSDMA0_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET 0x0313
+#define regSDMA0_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_LO 0x0314
+#define regSDMA0_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_CSA_ADDR_HI 0x0315
+#define regSDMA0_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL 0x0316
+#define regSDMA0_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN 0x0317
+#define regSDMA0_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA0_QUEUE7_PREEMPT 0x0318
+#define regSDMA0_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_DUMMY_REG 0x0319
+#define regSDMA0_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x031a
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x031b
+#define regSDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_AQL_CNTL 0x031c
+#define regSDMA0_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE 0x031d
+#define regSDMA0_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA0_QUEUE7_RB_PREEMPT 0x031e
+#define regSDMA0_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA0 0x0328
+#define regSDMA0_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA1 0x0329
+#define regSDMA0_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA2 0x032a
+#define regSDMA0_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA3 0x032b
+#define regSDMA0_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA4 0x032c
+#define regSDMA0_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA5 0x032d
+#define regSDMA0_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA6 0x032e
+#define regSDMA0_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA7 0x032f
+#define regSDMA0_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA8 0x0330
+#define regSDMA0_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA9 0x0331
+#define regSDMA0_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_DATA10 0x0332
+#define regSDMA0_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA0_QUEUE7_MIDCMD_CNTL 0x0333
+#define regSDMA0_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma1dec
+// base address: 0x6180
+#define regSDMA1_DEC_START 0x0600
+#define regSDMA1_DEC_START_BASE_IDX 0
+#define regSDMA1_F32_MISC_CNTL 0x060b
+#define regSDMA1_F32_MISC_CNTL_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_LO 0x060f
+#define regSDMA1_GLOBAL_TIMESTAMP_LO_BASE_IDX 0
+#define regSDMA1_GLOBAL_TIMESTAMP_HI 0x0610
+#define regSDMA1_GLOBAL_TIMESTAMP_HI_BASE_IDX 0
+#define regSDMA1_POWER_CNTL 0x061a
+#define regSDMA1_POWER_CNTL_BASE_IDX 0
+#define regSDMA1_CNTL 0x061c
+#define regSDMA1_CNTL_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS 0x061d
+#define regSDMA1_CHICKEN_BITS_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG 0x061e
+#define regSDMA1_GB_ADDR_CONFIG_BASE_IDX 0
+#define regSDMA1_GB_ADDR_CONFIG_READ 0x061f
+#define regSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH 0x0620
+#define regSDMA1_RB_RPTR_FETCH_BASE_IDX 0
+#define regSDMA1_RB_RPTR_FETCH_HI 0x0621
+#define regSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0622
+#define regSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0
+#define regSDMA1_IB_OFFSET_FETCH 0x0623
+#define regSDMA1_IB_OFFSET_FETCH_BASE_IDX 0
+#define regSDMA1_PROGRAM 0x0624
+#define regSDMA1_PROGRAM_BASE_IDX 0
+#define regSDMA1_STATUS_REG 0x0625
+#define regSDMA1_STATUS_REG_BASE_IDX 0
+#define regSDMA1_STATUS1_REG 0x0626
+#define regSDMA1_STATUS1_REG_BASE_IDX 0
+#define regSDMA1_CNTL1 0x0627
+#define regSDMA1_CNTL1_BASE_IDX 0
+#define regSDMA1_HBM_PAGE_CONFIG 0x0628
+#define regSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0
+#define regSDMA1_UCODE_CHECKSUM 0x0629
+#define regSDMA1_UCODE_CHECKSUM_BASE_IDX 0
+#define regSDMA1_FREEZE 0x062b
+#define regSDMA1_FREEZE_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM0 0x062c
+#define regSDMA1_PROCESS_QUANTUM0_BASE_IDX 0
+#define regSDMA1_PROCESS_QUANTUM1 0x062d
+#define regSDMA1_PROCESS_QUANTUM1_BASE_IDX 0
+#define regSDMA1_WATCHDOG_CNTL 0x062e
+#define regSDMA1_WATCHDOG_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE_STATUS0 0x062f
+#define regSDMA1_QUEUE_STATUS0_BASE_IDX 0
+#define regSDMA1_EDC_CONFIG 0x0632
+#define regSDMA1_EDC_CONFIG_BASE_IDX 0
+#define regSDMA1_BA_THRESHOLD 0x0633
+#define regSDMA1_BA_THRESHOLD_BASE_IDX 0
+#define regSDMA1_ID 0x0634
+#define regSDMA1_ID_BASE_IDX 0
+#define regSDMA1_VERSION 0x0635
+#define regSDMA1_VERSION_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER 0x0636
+#define regSDMA1_EDC_COUNTER_BASE_IDX 0
+#define regSDMA1_EDC_COUNTER_CLEAR 0x0637
+#define regSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0
+#define regSDMA1_STATUS2_REG 0x0638
+#define regSDMA1_STATUS2_REG_BASE_IDX 0
+#define regSDMA1_ATOMIC_CNTL 0x0639
+#define regSDMA1_ATOMIC_CNTL_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_LO 0x063a
+#define regSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0
+#define regSDMA1_ATOMIC_PREOP_HI 0x063b
+#define regSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0
+#define regSDMA1_UTCL1_CNTL 0x063c
+#define regSDMA1_UTCL1_CNTL_BASE_IDX 0
+#define regSDMA1_UTCL1_WATERMK 0x063d
+#define regSDMA1_UTCL1_WATERMK_BASE_IDX 0
+#define regSDMA1_UTCL1_TIMEOUT 0x063e
+#define regSDMA1_UTCL1_TIMEOUT_BASE_IDX 0
+#define regSDMA1_UTCL1_PAGE 0x063f
+#define regSDMA1_UTCL1_PAGE_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_STATUS 0x0640
+#define regSDMA1_UTCL1_RD_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_STATUS 0x0641
+#define regSDMA1_UTCL1_WR_STATUS_BASE_IDX 0
+#define regSDMA1_UTCL1_INV0 0x0642
+#define regSDMA1_UTCL1_INV0_BASE_IDX 0
+#define regSDMA1_UTCL1_INV1 0x0643
+#define regSDMA1_UTCL1_INV1_BASE_IDX 0
+#define regSDMA1_UTCL1_INV2 0x0644
+#define regSDMA1_UTCL1_INV2_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK0 0x0645
+#define regSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_RD_XNACK1 0x0646
+#define regSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK0 0x0647
+#define regSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0
+#define regSDMA1_UTCL1_WR_XNACK1 0x0648
+#define regSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0
+#define regSDMA1_RELAX_ORDERING_LUT 0x064a
+#define regSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0
+#define regSDMA1_CHICKEN_BITS_2 0x064b
+#define regSDMA1_CHICKEN_BITS_2_BASE_IDX 0
+#define regSDMA1_STATUS3_REG 0x064c
+#define regSDMA1_STATUS3_REG_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_LO 0x064d
+#define regSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_PHYSICAL_ADDR_HI 0x064e
+#define regSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_GLOBAL_QUANTUM 0x064f
+#define regSDMA1_GLOBAL_QUANTUM_BASE_IDX 0
+#define regSDMA1_ERROR_LOG 0x0650
+#define regSDMA1_ERROR_LOG_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG0 0x0651
+#define regSDMA1_PUB_DUMMY_REG0_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG1 0x0652
+#define regSDMA1_PUB_DUMMY_REG1_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG2 0x0653
+#define regSDMA1_PUB_DUMMY_REG2_BASE_IDX 0
+#define regSDMA1_PUB_DUMMY_REG3 0x0654
+#define regSDMA1_PUB_DUMMY_REG3_BASE_IDX 0
+#define regSDMA1_F32_COUNTER 0x0655
+#define regSDMA1_F32_COUNTER_BASE_IDX 0
+#define regSDMA1_CRD_CNTL 0x065b
+#define regSDMA1_CRD_CNTL_BASE_IDX 0
+#define regSDMA1_RLC_CGCG_CTRL 0x065c
+#define regSDMA1_RLC_CGCG_CTRL_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG 0x065d
+#define regSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0
+#define regSDMA1_AQL_STATUS 0x065f
+#define regSDMA1_AQL_STATUS_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_DATA 0x0660
+#define regSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0
+#define regSDMA1_EA_DBIT_ADDR_INDEX 0x0661
+#define regSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0
+#define regSDMA1_TLBI_GCR_CNTL 0x0662
+#define regSDMA1_TLBI_GCR_CNTL_BASE_IDX 0
+#define regSDMA1_TILING_CONFIG 0x0663
+#define regSDMA1_TILING_CONFIG_BASE_IDX 0
+#define regSDMA1_HASH 0x0664
+#define regSDMA1_HASH_BASE_IDX 0
+#define regSDMA1_INT_STATUS 0x0670
+#define regSDMA1_INT_STATUS_BASE_IDX 0
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2 0x0671
+#define regSDMA1_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_LO 0x0672
+#define regSDMA1_HOLE_ADDR_LO_BASE_IDX 0
+#define regSDMA1_HOLE_ADDR_HI 0x0673
+#define regSDMA1_HOLE_ADDR_HI_BASE_IDX 0
+#define regSDMA1_CLOCK_GATING_STATUS 0x0675
+#define regSDMA1_CLOCK_GATING_STATUS_BASE_IDX 0
+#define regSDMA1_STATUS4_REG 0x0676
+#define regSDMA1_STATUS4_REG_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_DATA 0x0677
+#define regSDMA1_SCRATCH_RAM_DATA_BASE_IDX 0
+#define regSDMA1_SCRATCH_RAM_ADDR 0x0678
+#define regSDMA1_SCRATCH_RAM_ADDR_BASE_IDX 0
+#define regSDMA1_TIMESTAMP_CNTL 0x0679
+#define regSDMA1_TIMESTAMP_CNTL_BASE_IDX 0
+#define regSDMA1_STATUS5_REG 0x067a
+#define regSDMA1_STATUS5_REG_BASE_IDX 0
+#define regSDMA1_QUEUE_RESET_REQ 0x067b
+#define regSDMA1_QUEUE_RESET_REQ_BASE_IDX 0
+#define regSDMA1_STATUS6_REG 0x067c
+#define regSDMA1_STATUS6_REG_BASE_IDX 0
+#define regSDMA1_UCODE1_CHECKSUM 0x067d
+#define regSDMA1_UCODE1_CHECKSUM_BASE_IDX 0
+#define regSDMA1_CE_CTRL 0x067e
+#define regSDMA1_CE_CTRL_BASE_IDX 0
+#define regSDMA1_FED_STATUS 0x067f
+#define regSDMA1_FED_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_CNTL 0x0680
+#define regSDMA1_QUEUE0_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE 0x0681
+#define regSDMA1_QUEUE0_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_BASE_HI 0x0682
+#define regSDMA1_QUEUE0_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR 0x0683
+#define regSDMA1_QUEUE0_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_HI 0x0684
+#define regSDMA1_QUEUE0_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR 0x0685
+#define regSDMA1_QUEUE0_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_HI 0x0686
+#define regSDMA1_QUEUE0_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI 0x0688
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO 0x0689
+#define regSDMA1_QUEUE0_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_CNTL 0x068a
+#define regSDMA1_QUEUE0_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_RPTR 0x068b
+#define regSDMA1_QUEUE0_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_OFFSET 0x068c
+#define regSDMA1_QUEUE0_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_LO 0x068d
+#define regSDMA1_QUEUE0_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_BASE_HI 0x068e
+#define regSDMA1_QUEUE0_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SIZE 0x068f
+#define regSDMA1_QUEUE0_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE0_SKIP_CNTL 0x0690
+#define regSDMA1_QUEUE0_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_CONTEXT_STATUS 0x0691
+#define regSDMA1_QUEUE0_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL 0x0692
+#define regSDMA1_QUEUE0_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_LOG 0x06a9
+#define regSDMA1_QUEUE0_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET 0x06ab
+#define regSDMA1_QUEUE0_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_LO 0x06ac
+#define regSDMA1_QUEUE0_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_CSA_ADDR_HI 0x06ad
+#define regSDMA1_QUEUE0_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL 0x06ae
+#define regSDMA1_QUEUE0_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN 0x06af
+#define regSDMA1_QUEUE0_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE0_PREEMPT 0x06b0
+#define regSDMA1_QUEUE0_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_DUMMY_REG 0x06b1
+#define regSDMA1_QUEUE0_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI 0x06b2
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO 0x06b3
+#define regSDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_AQL_CNTL 0x06b4
+#define regSDMA1_QUEUE0_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE 0x06b5
+#define regSDMA1_QUEUE0_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE0_RB_PREEMPT 0x06b6
+#define regSDMA1_QUEUE0_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0 0x06c0
+#define regSDMA1_QUEUE0_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA1 0x06c1
+#define regSDMA1_QUEUE0_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA2 0x06c2
+#define regSDMA1_QUEUE0_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA3 0x06c3
+#define regSDMA1_QUEUE0_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA4 0x06c4
+#define regSDMA1_QUEUE0_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA5 0x06c5
+#define regSDMA1_QUEUE0_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA6 0x06c6
+#define regSDMA1_QUEUE0_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA7 0x06c7
+#define regSDMA1_QUEUE0_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA8 0x06c8
+#define regSDMA1_QUEUE0_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA9 0x06c9
+#define regSDMA1_QUEUE0_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_DATA10 0x06ca
+#define regSDMA1_QUEUE0_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE0_MIDCMD_CNTL 0x06cb
+#define regSDMA1_QUEUE0_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_CNTL 0x06d8
+#define regSDMA1_QUEUE1_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE 0x06d9
+#define regSDMA1_QUEUE1_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_BASE_HI 0x06da
+#define regSDMA1_QUEUE1_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR 0x06db
+#define regSDMA1_QUEUE1_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_HI 0x06dc
+#define regSDMA1_QUEUE1_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR 0x06dd
+#define regSDMA1_QUEUE1_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_HI 0x06de
+#define regSDMA1_QUEUE1_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI 0x06e0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO 0x06e1
+#define regSDMA1_QUEUE1_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_CNTL 0x06e2
+#define regSDMA1_QUEUE1_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_RPTR 0x06e3
+#define regSDMA1_QUEUE1_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_OFFSET 0x06e4
+#define regSDMA1_QUEUE1_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_LO 0x06e5
+#define regSDMA1_QUEUE1_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_BASE_HI 0x06e6
+#define regSDMA1_QUEUE1_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SIZE 0x06e7
+#define regSDMA1_QUEUE1_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE1_SKIP_CNTL 0x06e8
+#define regSDMA1_QUEUE1_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_CONTEXT_STATUS 0x06e9
+#define regSDMA1_QUEUE1_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL 0x06ea
+#define regSDMA1_QUEUE1_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_LOG 0x0701
+#define regSDMA1_QUEUE1_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET 0x0703
+#define regSDMA1_QUEUE1_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_LO 0x0704
+#define regSDMA1_QUEUE1_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_CSA_ADDR_HI 0x0705
+#define regSDMA1_QUEUE1_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL 0x0706
+#define regSDMA1_QUEUE1_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN 0x0707
+#define regSDMA1_QUEUE1_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE1_PREEMPT 0x0708
+#define regSDMA1_QUEUE1_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_DUMMY_REG 0x0709
+#define regSDMA1_QUEUE1_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI 0x070a
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO 0x070b
+#define regSDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_AQL_CNTL 0x070c
+#define regSDMA1_QUEUE1_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE 0x070d
+#define regSDMA1_QUEUE1_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE1_RB_PREEMPT 0x070e
+#define regSDMA1_QUEUE1_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA0 0x0718
+#define regSDMA1_QUEUE1_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA1 0x0719
+#define regSDMA1_QUEUE1_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA2 0x071a
+#define regSDMA1_QUEUE1_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA3 0x071b
+#define regSDMA1_QUEUE1_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA4 0x071c
+#define regSDMA1_QUEUE1_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA5 0x071d
+#define regSDMA1_QUEUE1_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA6 0x071e
+#define regSDMA1_QUEUE1_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA7 0x071f
+#define regSDMA1_QUEUE1_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA8 0x0720
+#define regSDMA1_QUEUE1_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA9 0x0721
+#define regSDMA1_QUEUE1_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_DATA10 0x0722
+#define regSDMA1_QUEUE1_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE1_MIDCMD_CNTL 0x0723
+#define regSDMA1_QUEUE1_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_CNTL 0x0730
+#define regSDMA1_QUEUE2_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE 0x0731
+#define regSDMA1_QUEUE2_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_BASE_HI 0x0732
+#define regSDMA1_QUEUE2_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR 0x0733
+#define regSDMA1_QUEUE2_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_HI 0x0734
+#define regSDMA1_QUEUE2_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR 0x0735
+#define regSDMA1_QUEUE2_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_HI 0x0736
+#define regSDMA1_QUEUE2_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI 0x0738
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO 0x0739
+#define regSDMA1_QUEUE2_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_CNTL 0x073a
+#define regSDMA1_QUEUE2_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_RPTR 0x073b
+#define regSDMA1_QUEUE2_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_OFFSET 0x073c
+#define regSDMA1_QUEUE2_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_LO 0x073d
+#define regSDMA1_QUEUE2_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_BASE_HI 0x073e
+#define regSDMA1_QUEUE2_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SIZE 0x073f
+#define regSDMA1_QUEUE2_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE2_SKIP_CNTL 0x0740
+#define regSDMA1_QUEUE2_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_CONTEXT_STATUS 0x0741
+#define regSDMA1_QUEUE2_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL 0x0742
+#define regSDMA1_QUEUE2_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_LOG 0x0759
+#define regSDMA1_QUEUE2_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET 0x075b
+#define regSDMA1_QUEUE2_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_LO 0x075c
+#define regSDMA1_QUEUE2_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_CSA_ADDR_HI 0x075d
+#define regSDMA1_QUEUE2_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL 0x075e
+#define regSDMA1_QUEUE2_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN 0x075f
+#define regSDMA1_QUEUE2_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE2_PREEMPT 0x0760
+#define regSDMA1_QUEUE2_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_DUMMY_REG 0x0761
+#define regSDMA1_QUEUE2_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI 0x0762
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO 0x0763
+#define regSDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_AQL_CNTL 0x0764
+#define regSDMA1_QUEUE2_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE 0x0765
+#define regSDMA1_QUEUE2_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE2_RB_PREEMPT 0x0766
+#define regSDMA1_QUEUE2_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA0 0x0770
+#define regSDMA1_QUEUE2_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA1 0x0771
+#define regSDMA1_QUEUE2_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA2 0x0772
+#define regSDMA1_QUEUE2_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA3 0x0773
+#define regSDMA1_QUEUE2_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA4 0x0774
+#define regSDMA1_QUEUE2_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA5 0x0775
+#define regSDMA1_QUEUE2_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA6 0x0776
+#define regSDMA1_QUEUE2_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA7 0x0777
+#define regSDMA1_QUEUE2_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA8 0x0778
+#define regSDMA1_QUEUE2_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA9 0x0779
+#define regSDMA1_QUEUE2_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_DATA10 0x077a
+#define regSDMA1_QUEUE2_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE2_MIDCMD_CNTL 0x077b
+#define regSDMA1_QUEUE2_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_CNTL 0x0788
+#define regSDMA1_QUEUE3_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE 0x0789
+#define regSDMA1_QUEUE3_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_BASE_HI 0x078a
+#define regSDMA1_QUEUE3_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR 0x078b
+#define regSDMA1_QUEUE3_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_HI 0x078c
+#define regSDMA1_QUEUE3_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR 0x078d
+#define regSDMA1_QUEUE3_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_HI 0x078e
+#define regSDMA1_QUEUE3_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI 0x0790
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO 0x0791
+#define regSDMA1_QUEUE3_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_CNTL 0x0792
+#define regSDMA1_QUEUE3_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_RPTR 0x0793
+#define regSDMA1_QUEUE3_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_OFFSET 0x0794
+#define regSDMA1_QUEUE3_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_LO 0x0795
+#define regSDMA1_QUEUE3_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_BASE_HI 0x0796
+#define regSDMA1_QUEUE3_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SIZE 0x0797
+#define regSDMA1_QUEUE3_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE3_SKIP_CNTL 0x0798
+#define regSDMA1_QUEUE3_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_CONTEXT_STATUS 0x0799
+#define regSDMA1_QUEUE3_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL 0x079a
+#define regSDMA1_QUEUE3_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_LOG 0x07b1
+#define regSDMA1_QUEUE3_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET 0x07b3
+#define regSDMA1_QUEUE3_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_LO 0x07b4
+#define regSDMA1_QUEUE3_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_CSA_ADDR_HI 0x07b5
+#define regSDMA1_QUEUE3_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL 0x07b6
+#define regSDMA1_QUEUE3_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN 0x07b7
+#define regSDMA1_QUEUE3_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE3_PREEMPT 0x07b8
+#define regSDMA1_QUEUE3_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_DUMMY_REG 0x07b9
+#define regSDMA1_QUEUE3_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI 0x07ba
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO 0x07bb
+#define regSDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_AQL_CNTL 0x07bc
+#define regSDMA1_QUEUE3_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE 0x07bd
+#define regSDMA1_QUEUE3_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE3_RB_PREEMPT 0x07be
+#define regSDMA1_QUEUE3_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA0 0x07c8
+#define regSDMA1_QUEUE3_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA1 0x07c9
+#define regSDMA1_QUEUE3_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA2 0x07ca
+#define regSDMA1_QUEUE3_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA3 0x07cb
+#define regSDMA1_QUEUE3_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA4 0x07cc
+#define regSDMA1_QUEUE3_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA5 0x07cd
+#define regSDMA1_QUEUE3_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA6 0x07ce
+#define regSDMA1_QUEUE3_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA7 0x07cf
+#define regSDMA1_QUEUE3_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8 0x07d0
+#define regSDMA1_QUEUE3_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA9 0x07d1
+#define regSDMA1_QUEUE3_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_DATA10 0x07d2
+#define regSDMA1_QUEUE3_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE3_MIDCMD_CNTL 0x07d3
+#define regSDMA1_QUEUE3_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_CNTL 0x07e0
+#define regSDMA1_QUEUE4_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE 0x07e1
+#define regSDMA1_QUEUE4_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_BASE_HI 0x07e2
+#define regSDMA1_QUEUE4_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR 0x07e3
+#define regSDMA1_QUEUE4_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_HI 0x07e4
+#define regSDMA1_QUEUE4_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR 0x07e5
+#define regSDMA1_QUEUE4_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_HI 0x07e6
+#define regSDMA1_QUEUE4_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI 0x07e8
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO 0x07e9
+#define regSDMA1_QUEUE4_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_CNTL 0x07ea
+#define regSDMA1_QUEUE4_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_RPTR 0x07eb
+#define regSDMA1_QUEUE4_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_OFFSET 0x07ec
+#define regSDMA1_QUEUE4_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_LO 0x07ed
+#define regSDMA1_QUEUE4_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_BASE_HI 0x07ee
+#define regSDMA1_QUEUE4_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SIZE 0x07ef
+#define regSDMA1_QUEUE4_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE4_SKIP_CNTL 0x07f0
+#define regSDMA1_QUEUE4_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_CONTEXT_STATUS 0x07f1
+#define regSDMA1_QUEUE4_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL 0x07f2
+#define regSDMA1_QUEUE4_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_LOG 0x0809
+#define regSDMA1_QUEUE4_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET 0x080b
+#define regSDMA1_QUEUE4_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_LO 0x080c
+#define regSDMA1_QUEUE4_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_CSA_ADDR_HI 0x080d
+#define regSDMA1_QUEUE4_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL 0x080e
+#define regSDMA1_QUEUE4_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN 0x080f
+#define regSDMA1_QUEUE4_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE4_PREEMPT 0x0810
+#define regSDMA1_QUEUE4_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_DUMMY_REG 0x0811
+#define regSDMA1_QUEUE4_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI 0x0812
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO 0x0813
+#define regSDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_AQL_CNTL 0x0814
+#define regSDMA1_QUEUE4_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE 0x0815
+#define regSDMA1_QUEUE4_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE4_RB_PREEMPT 0x0816
+#define regSDMA1_QUEUE4_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA0 0x0820
+#define regSDMA1_QUEUE4_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA1 0x0821
+#define regSDMA1_QUEUE4_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA2 0x0822
+#define regSDMA1_QUEUE4_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA3 0x0823
+#define regSDMA1_QUEUE4_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA4 0x0824
+#define regSDMA1_QUEUE4_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA5 0x0825
+#define regSDMA1_QUEUE4_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA6 0x0826
+#define regSDMA1_QUEUE4_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA7 0x0827
+#define regSDMA1_QUEUE4_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA8 0x0828
+#define regSDMA1_QUEUE4_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA9 0x0829
+#define regSDMA1_QUEUE4_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_DATA10 0x082a
+#define regSDMA1_QUEUE4_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE4_MIDCMD_CNTL 0x082b
+#define regSDMA1_QUEUE4_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_CNTL 0x0838
+#define regSDMA1_QUEUE5_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE 0x0839
+#define regSDMA1_QUEUE5_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_BASE_HI 0x083a
+#define regSDMA1_QUEUE5_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR 0x083b
+#define regSDMA1_QUEUE5_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_HI 0x083c
+#define regSDMA1_QUEUE5_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR 0x083d
+#define regSDMA1_QUEUE5_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_HI 0x083e
+#define regSDMA1_QUEUE5_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI 0x0840
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO 0x0841
+#define regSDMA1_QUEUE5_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_CNTL 0x0842
+#define regSDMA1_QUEUE5_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_RPTR 0x0843
+#define regSDMA1_QUEUE5_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_OFFSET 0x0844
+#define regSDMA1_QUEUE5_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_LO 0x0845
+#define regSDMA1_QUEUE5_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_BASE_HI 0x0846
+#define regSDMA1_QUEUE5_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SIZE 0x0847
+#define regSDMA1_QUEUE5_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE5_SKIP_CNTL 0x0848
+#define regSDMA1_QUEUE5_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_CONTEXT_STATUS 0x0849
+#define regSDMA1_QUEUE5_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL 0x084a
+#define regSDMA1_QUEUE5_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_LOG 0x0861
+#define regSDMA1_QUEUE5_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET 0x0863
+#define regSDMA1_QUEUE5_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_LO 0x0864
+#define regSDMA1_QUEUE5_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_CSA_ADDR_HI 0x0865
+#define regSDMA1_QUEUE5_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL 0x0866
+#define regSDMA1_QUEUE5_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN 0x0867
+#define regSDMA1_QUEUE5_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE5_PREEMPT 0x0868
+#define regSDMA1_QUEUE5_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_DUMMY_REG 0x0869
+#define regSDMA1_QUEUE5_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI 0x086a
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO 0x086b
+#define regSDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_AQL_CNTL 0x086c
+#define regSDMA1_QUEUE5_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE 0x086d
+#define regSDMA1_QUEUE5_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE5_RB_PREEMPT 0x086e
+#define regSDMA1_QUEUE5_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA0 0x0878
+#define regSDMA1_QUEUE5_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA1 0x0879
+#define regSDMA1_QUEUE5_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA2 0x087a
+#define regSDMA1_QUEUE5_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA3 0x087b
+#define regSDMA1_QUEUE5_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA4 0x087c
+#define regSDMA1_QUEUE5_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA5 0x087d
+#define regSDMA1_QUEUE5_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA6 0x087e
+#define regSDMA1_QUEUE5_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA7 0x087f
+#define regSDMA1_QUEUE5_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA8 0x0880
+#define regSDMA1_QUEUE5_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA9 0x0881
+#define regSDMA1_QUEUE5_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_DATA10 0x0882
+#define regSDMA1_QUEUE5_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE5_MIDCMD_CNTL 0x0883
+#define regSDMA1_QUEUE5_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_CNTL 0x0890
+#define regSDMA1_QUEUE6_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE 0x0891
+#define regSDMA1_QUEUE6_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_BASE_HI 0x0892
+#define regSDMA1_QUEUE6_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR 0x0893
+#define regSDMA1_QUEUE6_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_HI 0x0894
+#define regSDMA1_QUEUE6_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR 0x0895
+#define regSDMA1_QUEUE6_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_HI 0x0896
+#define regSDMA1_QUEUE6_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI 0x0898
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO 0x0899
+#define regSDMA1_QUEUE6_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_CNTL 0x089a
+#define regSDMA1_QUEUE6_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_RPTR 0x089b
+#define regSDMA1_QUEUE6_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_OFFSET 0x089c
+#define regSDMA1_QUEUE6_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_LO 0x089d
+#define regSDMA1_QUEUE6_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_BASE_HI 0x089e
+#define regSDMA1_QUEUE6_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SIZE 0x089f
+#define regSDMA1_QUEUE6_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE6_SKIP_CNTL 0x08a0
+#define regSDMA1_QUEUE6_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_CONTEXT_STATUS 0x08a1
+#define regSDMA1_QUEUE6_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL 0x08a2
+#define regSDMA1_QUEUE6_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_LOG 0x08b9
+#define regSDMA1_QUEUE6_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET 0x08bb
+#define regSDMA1_QUEUE6_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_LO 0x08bc
+#define regSDMA1_QUEUE6_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_CSA_ADDR_HI 0x08bd
+#define regSDMA1_QUEUE6_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL 0x08be
+#define regSDMA1_QUEUE6_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN 0x08bf
+#define regSDMA1_QUEUE6_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE6_PREEMPT 0x08c0
+#define regSDMA1_QUEUE6_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_DUMMY_REG 0x08c1
+#define regSDMA1_QUEUE6_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI 0x08c2
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO 0x08c3
+#define regSDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_AQL_CNTL 0x08c4
+#define regSDMA1_QUEUE6_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE 0x08c5
+#define regSDMA1_QUEUE6_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE6_RB_PREEMPT 0x08c6
+#define regSDMA1_QUEUE6_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0 0x08d0
+#define regSDMA1_QUEUE6_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA1 0x08d1
+#define regSDMA1_QUEUE6_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA2 0x08d2
+#define regSDMA1_QUEUE6_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA3 0x08d3
+#define regSDMA1_QUEUE6_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA4 0x08d4
+#define regSDMA1_QUEUE6_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA5 0x08d5
+#define regSDMA1_QUEUE6_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA6 0x08d6
+#define regSDMA1_QUEUE6_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA7 0x08d7
+#define regSDMA1_QUEUE6_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA8 0x08d8
+#define regSDMA1_QUEUE6_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA9 0x08d9
+#define regSDMA1_QUEUE6_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_DATA10 0x08da
+#define regSDMA1_QUEUE6_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE6_MIDCMD_CNTL 0x08db
+#define regSDMA1_QUEUE6_MIDCMD_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_CNTL 0x08e8
+#define regSDMA1_QUEUE7_RB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE 0x08e9
+#define regSDMA1_QUEUE7_RB_BASE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_BASE_HI 0x08ea
+#define regSDMA1_QUEUE7_RB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR 0x08eb
+#define regSDMA1_QUEUE7_RB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_HI 0x08ec
+#define regSDMA1_QUEUE7_RB_RPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR 0x08ed
+#define regSDMA1_QUEUE7_RB_WPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_HI 0x08ee
+#define regSDMA1_QUEUE7_RB_WPTR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI 0x08f0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO 0x08f1
+#define regSDMA1_QUEUE7_RB_RPTR_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_CNTL 0x08f2
+#define regSDMA1_QUEUE7_IB_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_RPTR 0x08f3
+#define regSDMA1_QUEUE7_IB_RPTR_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_OFFSET 0x08f4
+#define regSDMA1_QUEUE7_IB_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_LO 0x08f5
+#define regSDMA1_QUEUE7_IB_BASE_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_BASE_HI 0x08f6
+#define regSDMA1_QUEUE7_IB_BASE_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SIZE 0x08f7
+#define regSDMA1_QUEUE7_IB_SIZE_BASE_IDX 0
+#define regSDMA1_QUEUE7_SKIP_CNTL 0x08f8
+#define regSDMA1_QUEUE7_SKIP_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_CONTEXT_STATUS 0x08f9
+#define regSDMA1_QUEUE7_CONTEXT_STATUS_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL 0x08fa
+#define regSDMA1_QUEUE7_DOORBELL_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_LOG 0x0911
+#define regSDMA1_QUEUE7_DOORBELL_LOG_BASE_IDX 0
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET 0x0913
+#define regSDMA1_QUEUE7_DOORBELL_OFFSET_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_LO 0x0914
+#define regSDMA1_QUEUE7_CSA_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_CSA_ADDR_HI 0x0915
+#define regSDMA1_QUEUE7_CSA_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL 0x0916
+#define regSDMA1_QUEUE7_SCHEDULE_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN 0x0917
+#define regSDMA1_QUEUE7_IB_SUB_REMAIN_BASE_IDX 0
+#define regSDMA1_QUEUE7_PREEMPT 0x0918
+#define regSDMA1_QUEUE7_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_DUMMY_REG 0x0919
+#define regSDMA1_QUEUE7_DUMMY_REG_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI 0x091a
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO 0x091b
+#define regSDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_AQL_CNTL 0x091c
+#define regSDMA1_QUEUE7_RB_AQL_CNTL_BASE_IDX 0
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE 0x091d
+#define regSDMA1_QUEUE7_MINOR_PTR_UPDATE_BASE_IDX 0
+#define regSDMA1_QUEUE7_RB_PREEMPT 0x091e
+#define regSDMA1_QUEUE7_RB_PREEMPT_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA0 0x0928
+#define regSDMA1_QUEUE7_MIDCMD_DATA0_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA1 0x0929
+#define regSDMA1_QUEUE7_MIDCMD_DATA1_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA2 0x092a
+#define regSDMA1_QUEUE7_MIDCMD_DATA2_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA3 0x092b
+#define regSDMA1_QUEUE7_MIDCMD_DATA3_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA4 0x092c
+#define regSDMA1_QUEUE7_MIDCMD_DATA4_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA5 0x092d
+#define regSDMA1_QUEUE7_MIDCMD_DATA5_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA6 0x092e
+#define regSDMA1_QUEUE7_MIDCMD_DATA6_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA7 0x092f
+#define regSDMA1_QUEUE7_MIDCMD_DATA7_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA8 0x0930
+#define regSDMA1_QUEUE7_MIDCMD_DATA8_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA9 0x0931
+#define regSDMA1_QUEUE7_MIDCMD_DATA9_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_DATA10 0x0932
+#define regSDMA1_QUEUE7_MIDCMD_DATA10_BASE_IDX 0
+#define regSDMA1_QUEUE7_MIDCMD_CNTL 0x0933
+#define regSDMA1_QUEUE7_MIDCMD_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+// base address: 0x3e200
+#define regSDMA0_UCODE_ADDR 0x5880
+#define regSDMA0_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_UCODE_DATA 0x5881
+#define regSDMA0_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_UCODE_SELFLOAD_CONTROL 0x5882
+#define regSDMA0_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_ADDR 0x5886
+#define regSDMA0_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA0_BROADCAST_UCODE_DATA 0x5887
+#define regSDMA0_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA0_VM_CTX_LO 0x588c
+#define regSDMA0_VM_CTX_LO_BASE_IDX 1
+#define regSDMA0_VM_CTX_HI 0x588d
+#define regSDMA0_VM_CTX_HI_BASE_IDX 1
+#define regSDMA0_ACTIVE_FCN_ID 0x588e
+#define regSDMA0_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA0_VM_CTX_CNTL 0x588f
+#define regSDMA0_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA0_VIRT_RESET_REQ 0x5890
+#define regSDMA0_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE0 0x5891
+#define regSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE1 0x5892
+#define regSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_CONTEXT_REG_TYPE2 0x5893
+#define regSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE0 0x5894
+#define regSDMA0_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE1 0x5895
+#define regSDMA0_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE2 0x5896
+#define regSDMA0_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA0_PUB_REG_TYPE3 0x5897
+#define regSDMA0_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA0_VM_CNTL 0x5899
+#define regSDMA0_VM_CNTL_BASE_IDX 1
+#define regSDMA0_F32_CNTL 0x589a
+#define regSDMA0_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+// base address: 0x3e280
+#define regSDMA1_UCODE_ADDR 0x58a0
+#define regSDMA1_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_UCODE_DATA 0x58a1
+#define regSDMA1_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_UCODE_SELFLOAD_CONTROL 0x58a2
+#define regSDMA1_UCODE_SELFLOAD_CONTROL_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_ADDR 0x58a6
+#define regSDMA1_BROADCAST_UCODE_ADDR_BASE_IDX 1
+#define regSDMA1_BROADCAST_UCODE_DATA 0x58a7
+#define regSDMA1_BROADCAST_UCODE_DATA_BASE_IDX 1
+#define regSDMA1_VM_CTX_LO 0x58ac
+#define regSDMA1_VM_CTX_LO_BASE_IDX 1
+#define regSDMA1_VM_CTX_HI 0x58ad
+#define regSDMA1_VM_CTX_HI_BASE_IDX 1
+#define regSDMA1_ACTIVE_FCN_ID 0x58ae
+#define regSDMA1_ACTIVE_FCN_ID_BASE_IDX 1
+#define regSDMA1_VM_CTX_CNTL 0x58af
+#define regSDMA1_VM_CTX_CNTL_BASE_IDX 1
+#define regSDMA1_VIRT_RESET_REQ 0x58b0
+#define regSDMA1_VIRT_RESET_REQ_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE0 0x58b1
+#define regSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE1 0x58b2
+#define regSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_CONTEXT_REG_TYPE2 0x58b3
+#define regSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE0 0x58b4
+#define regSDMA1_PUB_REG_TYPE0_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE1 0x58b5
+#define regSDMA1_PUB_REG_TYPE1_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE2 0x58b6
+#define regSDMA1_PUB_REG_TYPE2_BASE_IDX 1
+#define regSDMA1_PUB_REG_TYPE3 0x58b7
+#define regSDMA1_PUB_REG_TYPE3_BASE_IDX 1
+#define regSDMA1_VM_CNTL 0x58b9
+#define regSDMA1_VM_CNTL_BASE_IDX 1
+#define regSDMA1_F32_CNTL 0x58ba
+#define regSDMA1_F32_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+// base address: 0x37880
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG 0x3e20
+#define regSDMA0_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG 0x3e21
+#define regSDMA0_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e22
+#define regSDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCNT_MISC_CNTL 0x3e23
+#define regSDMA0_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT 0x3e24
+#define regSDMA0_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_SELECT1 0x3e25
+#define regSDMA0_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT 0x3e26
+#define regSDMA0_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_SELECT1 0x3e27
+#define regSDMA0_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+// base address: 0x378b0
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG 0x3e2c
+#define regSDMA1_PERFCNT_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG 0x3e2d
+#define regSDMA1_PERFCNT_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL 0x3e2e
+#define regSDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCNT_MISC_CNTL 0x3e2f
+#define regSDMA1_PERFCNT_MISC_CNTL_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT 0x3e30
+#define regSDMA1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_SELECT1 0x3e31
+#define regSDMA1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT 0x3e32
+#define regSDMA1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_SELECT1 0x3e33
+#define regSDMA1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+// base address: 0x35980
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO 0x3660
+#define regSDMA0_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI 0x3661
+#define regSDMA0_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_LO 0x3662
+#define regSDMA0_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER0_HI 0x3663
+#define regSDMA0_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_LO 0x3664
+#define regSDMA0_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA0_PERFCOUNTER1_HI 0x3665
+#define regSDMA0_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+// base address: 0x359b0
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO 0x366c
+#define regSDMA1_PERFCNT_PERFCOUNTER_LO_BASE_IDX 1
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI 0x366d
+#define regSDMA1_PERFCNT_PERFCOUNTER_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_LO 0x366e
+#define regSDMA1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER0_HI 0x366f
+#define regSDMA1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_LO 0x3670
+#define regSDMA1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSDMA1_PERFCOUNTER1_HI 0x3671
+#define regSDMA1_PERFCOUNTER1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbmdec
+// base address: 0x8000
+#define regGRBM_CNTL 0x0da0
+#define regGRBM_CNTL_BASE_IDX 0
+#define regGRBM_SKEW_CNTL 0x0da1
+#define regGRBM_SKEW_CNTL_BASE_IDX 0
+#define regGRBM_STATUS2 0x0da2
+#define regGRBM_STATUS2_BASE_IDX 0
+#define regGRBM_PWR_CNTL 0x0da3
+#define regGRBM_PWR_CNTL_BASE_IDX 0
+#define regGRBM_STATUS 0x0da4
+#define regGRBM_STATUS_BASE_IDX 0
+#define regGRBM_STATUS_SE0 0x0da5
+#define regGRBM_STATUS_SE0_BASE_IDX 0
+#define regGRBM_STATUS_SE1 0x0da6
+#define regGRBM_STATUS_SE1_BASE_IDX 0
+#define regGRBM_STATUS3 0x0da7
+#define regGRBM_STATUS3_BASE_IDX 0
+#define regGRBM_SOFT_RESET 0x0da8
+#define regGRBM_SOFT_RESET_BASE_IDX 0
+#define regGRBM_GFX_CLKEN_CNTL 0x0dac
+#define regGRBM_GFX_CLKEN_CNTL_BASE_IDX 0
+#define regGRBM_WAIT_IDLE_CLOCKS 0x0dad
+#define regGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0
+#define regGRBM_STATUS_SE2 0x0dae
+#define regGRBM_STATUS_SE2_BASE_IDX 0
+#define regGRBM_READ_ERROR 0x0db6
+#define regGRBM_READ_ERROR_BASE_IDX 0
+#define regGRBM_READ_ERROR2 0x0db7
+#define regGRBM_READ_ERROR2_BASE_IDX 0
+#define regGRBM_INT_CNTL 0x0db8
+#define regGRBM_INT_CNTL_BASE_IDX 0
+#define regGRBM_TRAP_OP 0x0db9
+#define regGRBM_TRAP_OP_BASE_IDX 0
+#define regGRBM_TRAP_ADDR 0x0dba
+#define regGRBM_TRAP_ADDR_BASE_IDX 0
+#define regGRBM_TRAP_ADDR_MSK 0x0dbb
+#define regGRBM_TRAP_ADDR_MSK_BASE_IDX 0
+#define regGRBM_TRAP_WD 0x0dbc
+#define regGRBM_TRAP_WD_BASE_IDX 0
+#define regGRBM_TRAP_WD_MSK 0x0dbd
+#define regGRBM_TRAP_WD_MSK_BASE_IDX 0
+#define regGRBM_DSM_BYPASS 0x0dbe
+#define regGRBM_DSM_BYPASS_BASE_IDX 0
+#define regGRBM_WRITE_ERROR 0x0dbf
+#define regGRBM_WRITE_ERROR_BASE_IDX 0
+#define regGRBM_CHIP_REVISION 0x0dc1
+#define regGRBM_CHIP_REVISION_BASE_IDX 0
+#define regGRBM_RSMU_CFG 0x0dc3
+#define regGRBM_RSMU_CFG_BASE_IDX 0
+#define regGRBM_IH_CREDIT 0x0dc4
+#define regGRBM_IH_CREDIT_BASE_IDX 0
+#define regGRBM_PWR_CNTL2 0x0dc5
+#define regGRBM_PWR_CNTL2_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_START 0x0dc6
+#define regGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0
+#define regGRBM_UTCL2_INVAL_RANGE_END 0x0dc7
+#define regGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0
+#define regGRBM_RSMU_READ_ERROR 0x0dc8
+#define regGRBM_RSMU_READ_ERROR_BASE_IDX 0
+#define regGRBM_INVALID_PIPE 0x0dc9
+#define regGRBM_INVALID_PIPE_BASE_IDX 0
+#define regGRBM_FENCE_RANGE0 0x0dca
+#define regGRBM_FENCE_RANGE0_BASE_IDX 0
+#define regGRBM_FENCE_RANGE1 0x0dcb
+#define regGRBM_FENCE_RANGE1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG0 0x0de0
+#define regGRBM_SCRATCH_REG0_BASE_IDX 0
+#define regGRBM_SCRATCH_REG1 0x0de1
+#define regGRBM_SCRATCH_REG1_BASE_IDX 0
+#define regGRBM_SCRATCH_REG2 0x0de2
+#define regGRBM_SCRATCH_REG2_BASE_IDX 0
+#define regGRBM_SCRATCH_REG3 0x0de3
+#define regGRBM_SCRATCH_REG3_BASE_IDX 0
+#define regGRBM_SCRATCH_REG4 0x0de4
+#define regGRBM_SCRATCH_REG4_BASE_IDX 0
+#define regGRBM_SCRATCH_REG5 0x0de5
+#define regGRBM_SCRATCH_REG5_BASE_IDX 0
+#define regGRBM_SCRATCH_REG6 0x0de6
+#define regGRBM_SCRATCH_REG6_BASE_IDX 0
+#define regGRBM_SCRATCH_REG7 0x0de7
+#define regGRBM_SCRATCH_REG7_BASE_IDX 0
+#define regVIOLATION_DATA_ASYNC_VF_PROG 0x0df1
+#define regVIOLATION_DATA_ASYNC_VF_PROG_BASE_IDX 0
+
+
+// addressBlock: gc_cpdec
+// base address: 0x8200
+#define regCP_CPC_DEBUG_CNTL 0x0e20
+#define regCP_CPC_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPF_DEBUG_CNTL 0x0e22
+#define regCP_CPF_DEBUG_CNTL_BASE_IDX 0
+#define regCP_CPC_STATUS 0x0e24
+#define regCP_CPC_STATUS_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT 0x0e25
+#define regCP_CPC_BUSY_STAT_BASE_IDX 0
+#define regCP_CPC_STALLED_STAT1 0x0e26
+#define regCP_CPC_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPF_STATUS 0x0e27
+#define regCP_CPF_STATUS_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT 0x0e28
+#define regCP_CPF_BUSY_STAT_BASE_IDX 0
+#define regCP_CPF_STALLED_STAT1 0x0e29
+#define regCP_CPF_STALLED_STAT1_BASE_IDX 0
+#define regCP_CPC_BUSY_STAT2 0x0e2a
+#define regCP_CPC_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_GRBM_FREE_COUNT 0x0e2b
+#define regCP_CPC_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPC_PRIV_VIOLATION_ADDR 0x0e2c
+#define regCP_CPC_PRIV_VIOLATION_ADDR_BASE_IDX 0
+#define regCP_MEC_ME1_HEADER_DUMP 0x0e2e
+#define regCP_MEC_ME1_HEADER_DUMP_BASE_IDX 0
+#define regCP_MEC_ME2_HEADER_DUMP 0x0e2f
+#define regCP_MEC_ME2_HEADER_DUMP_BASE_IDX 0
+#define regCP_CPC_SCRATCH_INDEX 0x0e30
+#define regCP_CPC_SCRATCH_INDEX_BASE_IDX 0
+#define regCP_CPC_SCRATCH_DATA 0x0e31
+#define regCP_CPC_SCRATCH_DATA_BASE_IDX 0
+#define regCP_CPF_GRBM_FREE_COUNT 0x0e32
+#define regCP_CPF_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_CPF_BUSY_STAT2 0x0e33
+#define regCP_CPF_BUSY_STAT2_BASE_IDX 0
+#define regCP_CPC_HALT_HYST_COUNT 0x0e47
+#define regCP_CPC_HALT_HYST_COUNT_BASE_IDX 0
+#define regCP_STALLED_STAT3 0x0f3c
+#define regCP_STALLED_STAT3_BASE_IDX 0
+#define regCP_STALLED_STAT1 0x0f3d
+#define regCP_STALLED_STAT1_BASE_IDX 0
+#define regCP_STALLED_STAT2 0x0f3e
+#define regCP_STALLED_STAT2_BASE_IDX 0
+#define regCP_BUSY_STAT 0x0f3f
+#define regCP_BUSY_STAT_BASE_IDX 0
+#define regCP_STAT 0x0f40
+#define regCP_STAT_BASE_IDX 0
+#define regCP_ME_HEADER_DUMP 0x0f41
+#define regCP_ME_HEADER_DUMP_BASE_IDX 0
+#define regCP_PFP_HEADER_DUMP 0x0f42
+#define regCP_PFP_HEADER_DUMP_BASE_IDX 0
+#define regCP_GRBM_FREE_COUNT 0x0f43
+#define regCP_GRBM_FREE_COUNT_BASE_IDX 0
+#define regCP_PFP_INSTR_PNTR 0x0f45
+#define regCP_PFP_INSTR_PNTR_BASE_IDX 0
+#define regCP_ME_INSTR_PNTR 0x0f46
+#define regCP_ME_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC1_INSTR_PNTR 0x0f48
+#define regCP_MEC1_INSTR_PNTR_BASE_IDX 0
+#define regCP_MEC2_INSTR_PNTR 0x0f49
+#define regCP_MEC2_INSTR_PNTR_BASE_IDX 0
+#define regCP_CSF_STAT 0x0f54
+#define regCP_CSF_STAT_BASE_IDX 0
+#define regCP_CNTX_STAT 0x0f58
+#define regCP_CNTX_STAT_BASE_IDX 0
+#define regCP_ME_PREEMPTION 0x0f59
+#define regCP_ME_PREEMPTION_BASE_IDX 0
+#define regCP_RB1_RPTR 0x0f5f
+#define regCP_RB1_RPTR_BASE_IDX 0
+#define regCP_RB0_RPTR 0x0f60
+#define regCP_RB0_RPTR_BASE_IDX 0
+#define regCP_RB_RPTR 0x0f60
+#define regCP_RB_RPTR_BASE_IDX 0
+#define regCP_RB_WPTR_DELAY 0x0f61
+#define regCP_RB_WPTR_DELAY_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_CNTL 0x0f62
+#define regCP_RB_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_ROQ1_THRESHOLDS 0x0f75
+#define regCP_ROQ1_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ2_THRESHOLDS 0x0f76
+#define regCP_ROQ2_THRESHOLDS_BASE_IDX 0
+#define regCP_STQ_THRESHOLDS 0x0f77
+#define regCP_STQ_THRESHOLDS_BASE_IDX 0
+#define regCP_MEQ_THRESHOLDS 0x0f79
+#define regCP_MEQ_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_AVAIL 0x0f7a
+#define regCP_ROQ_AVAIL_BASE_IDX 0
+#define regCP_STQ_AVAIL 0x0f7b
+#define regCP_STQ_AVAIL_BASE_IDX 0
+#define regCP_ROQ2_AVAIL 0x0f7c
+#define regCP_ROQ2_AVAIL_BASE_IDX 0
+#define regCP_MEQ_AVAIL 0x0f7d
+#define regCP_MEQ_AVAIL_BASE_IDX 0
+#define regCP_CMD_INDEX 0x0f7e
+#define regCP_CMD_INDEX_BASE_IDX 0
+#define regCP_CMD_DATA 0x0f7f
+#define regCP_CMD_DATA_BASE_IDX 0
+#define regCP_ROQ_RB_STAT 0x0f80
+#define regCP_ROQ_RB_STAT_BASE_IDX 0
+#define regCP_ROQ_IB1_STAT 0x0f81
+#define regCP_ROQ_IB1_STAT_BASE_IDX 0
+#define regCP_ROQ_IB2_STAT 0x0f82
+#define regCP_ROQ_IB2_STAT_BASE_IDX 0
+#define regCP_STQ_STAT 0x0f83
+#define regCP_STQ_STAT_BASE_IDX 0
+#define regCP_STQ_WR_STAT 0x0f84
+#define regCP_STQ_WR_STAT_BASE_IDX 0
+#define regCP_MEQ_STAT 0x0f85
+#define regCP_MEQ_STAT_BASE_IDX 0
+#define regCP_ROQ3_THRESHOLDS 0x0f8c
+#define regCP_ROQ3_THRESHOLDS_BASE_IDX 0
+#define regCP_ROQ_DB_STAT 0x0f8d
+#define regCP_ROQ_DB_STAT_BASE_IDX 0
+#define regCP_INT_STAT_DEBUG 0x0f97
+#define regCP_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_DEBUG_CNTL 0x0f98
+#define regCP_DEBUG_CNTL_BASE_IDX 0
+#define regCP_PRIV_VIOLATION_ADDR 0x0f9a
+#define regCP_PRIV_VIOLATION_ADDR_BASE_IDX 0
+
+
+// addressBlock: gc_padec
+// base address: 0x8800
+#define regVGT_DMA_DATA_FIFO_DEPTH 0x0fcd
+#define regVGT_DMA_DATA_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DMA_REQ_FIFO_DEPTH 0x0fce
+#define regVGT_DMA_REQ_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_DRAW_INIT_FIFO_DEPTH 0x0fcf
+#define regVGT_DRAW_INIT_FIFO_DEPTH_BASE_IDX 0
+#define regVGT_MC_LAT_CNTL 0x0fd6
+#define regVGT_MC_LAT_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS_2 0x0fd7
+#define regIA_UTCL1_STATUS_2_BASE_IDX 0
+#define regWD_CNTL_STATUS 0x0fdf
+#define regWD_CNTL_STATUS_BASE_IDX 0
+#define regCC_GC_PRIM_CONFIG 0x0fe0
+#define regCC_GC_PRIM_CONFIG_BASE_IDX 0
+#define regWD_QOS 0x0fe2
+#define regWD_QOS_BASE_IDX 0
+#define regWD_UTCL1_CNTL 0x0fe3
+#define regWD_UTCL1_CNTL_BASE_IDX 0
+#define regWD_UTCL1_STATUS 0x0fe4
+#define regWD_UTCL1_STATUS_BASE_IDX 0
+#define regIA_UTCL1_CNTL 0x0fe6
+#define regIA_UTCL1_CNTL_BASE_IDX 0
+#define regIA_UTCL1_STATUS 0x0fe7
+#define regIA_UTCL1_STATUS_BASE_IDX 0
+#define regCC_GC_SA_UNIT_DISABLE 0x0fe9
+#define regCC_GC_SA_UNIT_DISABLE_BASE_IDX 0
+#define regGE_RATE_CNTL_1 0x0ff4
+#define regGE_RATE_CNTL_1_BASE_IDX 0
+#define regGE_RATE_CNTL_2 0x0ff5
+#define regGE_RATE_CNTL_2_BASE_IDX 0
+#define regVGT_SYS_CONFIG 0x1003
+#define regVGT_SYS_CONFIG_BASE_IDX 0
+#define regGE_PRIV_CONTROL 0x1004
+#define regGE_PRIV_CONTROL_BASE_IDX 0
+#define regGE_STATUS 0x1005
+#define regGE_STATUS_BASE_IDX 0
+#define regVGT_GS_MAX_WAVE_ID 0x1009
+#define regVGT_GS_MAX_WAVE_ID_BASE_IDX 0
+#define regGFX_PIPE_CONTROL 0x100d
+#define regGFX_PIPE_CONTROL_BASE_IDX 0
+#define regCC_GC_SHADER_ARRAY_CONFIG 0x100f
+#define regCC_GC_SHADER_ARRAY_CONFIG_BASE_IDX 0
+#define regGE2_SE_CNTL_STATUS 0x1011
+#define regGE2_SE_CNTL_STATUS_BASE_IDX 0
+#define regVGT_RESET_DEBUG 0x1014
+#define regVGT_RESET_DEBUG_BASE_IDX 0
+#define regGE_SPI_IF_SAFE_REG 0x1018
+#define regGE_SPI_IF_SAFE_REG_BASE_IDX 0
+#define regGE_PA_IF_SAFE_REG 0x1019
+#define regGE_PA_IF_SAFE_REG_BASE_IDX 0
+#define regPA_CL_CNTL_STATUS 0x1024
+#define regPA_CL_CNTL_STATUS_BASE_IDX 0
+#define regPA_CL_ENHANCE 0x1025
+#define regPA_CL_ENHANCE_BASE_IDX 0
+#define regPA_CL_RESET_DEBUG 0x1026
+#define regPA_CL_RESET_DEBUG_BASE_IDX 0
+#define regPA_SU_CNTL_STATUS 0x1034
+#define regPA_SU_CNTL_STATUS_BASE_IDX 0
+#define regPA_SC_FIFO_DEPTH_CNTL 0x1035
+#define regPA_SC_FIFO_DEPTH_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_sqdec
+// base address: 0x8c00
+#define regSQ_CONFIG 0x10a0
+#define regSQ_CONFIG_BASE_IDX 0
+#define regSQC_CONFIG 0x10a1
+#define regSQC_CONFIG_BASE_IDX 0
+#define regLDS_CONFIG 0x10a2
+#define regLDS_CONFIG_BASE_IDX 0
+#define regSQ_RANDOM_WAVE_PRI 0x10a3
+#define regSQ_RANDOM_WAVE_PRI_BASE_IDX 0
+#define regSQG_STATUS 0x10a4
+#define regSQG_STATUS_BASE_IDX 0
+#define regSQ_FIFO_SIZES 0x10a5
+#define regSQ_FIFO_SIZES_BASE_IDX 0
+#define regSQ_DSM_CNTL 0x10a6
+#define regSQ_DSM_CNTL_BASE_IDX 0
+#define regSQ_DSM_CNTL2 0x10a7
+#define regSQ_DSM_CNTL2_BASE_IDX 0
+#define regSP_CONFIG 0x10ab
+#define regSP_CONFIG_BASE_IDX 0
+#define regSQ_ARB_CONFIG 0x10ac
+#define regSQ_ARB_CONFIG_BASE_IDX 0
+#define regSQ_DEBUG_HOST_TRAP_STATUS 0x10b6
+#define regSQ_DEBUG_HOST_TRAP_STATUS_BASE_IDX 0
+#define regSQG_GL1H_STATUS 0x10b9
+#define regSQG_GL1H_STATUS_BASE_IDX 0
+#define regSQG_CONFIG 0x10ba
+#define regSQG_CONFIG_BASE_IDX 0
+#define regSQ_PERF_SNAPSHOT_CTRL 0x10bb
+#define regSQ_PERF_SNAPSHOT_CTRL_BASE_IDX 0
+#define regCC_GC_SHADER_RATE_CONFIG 0x10bc
+#define regCC_GC_SHADER_RATE_CONFIG_BASE_IDX 0
+#define regSQ_INTERRUPT_AUTO_MASK 0x10be
+#define regSQ_INTERRUPT_AUTO_MASK_BASE_IDX 0
+#define regSQ_INTERRUPT_MSG_CTRL 0x10bf
+#define regSQ_INTERRUPT_MSG_CTRL_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_H 0x10d0
+#define regSQ_WATCH0_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH0_ADDR_L 0x10d1
+#define regSQ_WATCH0_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH0_CNTL 0x10d2
+#define regSQ_WATCH0_CNTL_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_H 0x10d3
+#define regSQ_WATCH1_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH1_ADDR_L 0x10d4
+#define regSQ_WATCH1_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH1_CNTL 0x10d5
+#define regSQ_WATCH1_CNTL_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_H 0x10d6
+#define regSQ_WATCH2_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH2_ADDR_L 0x10d7
+#define regSQ_WATCH2_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH2_CNTL 0x10d8
+#define regSQ_WATCH2_CNTL_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_H 0x10d9
+#define regSQ_WATCH3_ADDR_H_BASE_IDX 0
+#define regSQ_WATCH3_ADDR_L 0x10da
+#define regSQ_WATCH3_ADDR_L_BASE_IDX 0
+#define regSQ_WATCH3_CNTL 0x10db
+#define regSQ_WATCH3_CNTL_BASE_IDX 0
+#define regSQ_IND_INDEX 0x1118
+#define regSQ_IND_INDEX_BASE_IDX 0
+#define regSQ_IND_DATA 0x1119
+#define regSQ_IND_DATA_BASE_IDX 0
+#define regSQ_CMD 0x111b
+#define regSQ_CMD_BASE_IDX 0
+#define regSQC_MISC_CONFIG 0x1179
+#define regSQC_MISC_CONFIG_BASE_IDX 0
+
+
+// addressBlock: gc_shsdec
+// base address: 0x9000
+#define regSX_DEBUG_BUSY 0x11b4
+#define regSX_DEBUG_BUSY_BASE_IDX 0
+#define regSX_DEBUG_BUSY_2 0x11b5
+#define regSX_DEBUG_BUSY_2_BASE_IDX 0
+#define regSX_DEBUG_BUSY_3 0x11b6
+#define regSX_DEBUG_BUSY_3_BASE_IDX 0
+#define regSX_DEBUG_BUSY_4 0x11b7
+#define regSX_DEBUG_BUSY_4_BASE_IDX 0
+#define regSX_DEBUG_1 0x11b8
+#define regSX_DEBUG_1_BASE_IDX 0
+#define regSX_DEBUG_BUSY_5 0x11b9
+#define regSX_DEBUG_BUSY_5_BASE_IDX 0
+#define regSX_DEBUG_BUSY_6 0x11ba
+#define regSX_DEBUG_BUSY_6_BASE_IDX 0
+#define regSX_DEBUG_BUSY_7 0x11bb
+#define regSX_DEBUG_BUSY_7_BASE_IDX 0
+#define regSX_DEBUG_BUSY_8 0x11bc
+#define regSX_DEBUG_BUSY_8_BASE_IDX 0
+#define regSX_DEBUG_BUSY_9 0x11bd
+#define regSX_DEBUG_BUSY_9_BASE_IDX 0
+#define regSX_DEBUG_BUSY_10 0x11be
+#define regSX_DEBUG_BUSY_10_BASE_IDX 0
+#define regSPI_PS_MAX_WAVE_ID 0x11da
+#define regSPI_PS_MAX_WAVE_ID_BASE_IDX 0
+#define regSPI_GFX_CNTL 0x11dc
+#define regSPI_GFX_CNTL_BASE_IDX 0
+#define regSPI_DEBUG_READ 0x11e2
+#define regSPI_DEBUG_READ_BASE_IDX 0
+#define regSPI_DSM_CNTL 0x11e3
+#define regSPI_DSM_CNTL_BASE_IDX 0
+#define regSPI_DSM_CNTL2 0x11e4
+#define regSPI_DSM_CNTL2_BASE_IDX 0
+#define regSPI_EDC_CNT 0x11e5
+#define regSPI_EDC_CNT_BASE_IDX 0
+#define regSPI_DEBUG_BUSY 0x11f0
+#define regSPI_DEBUG_BUSY_BASE_IDX 0
+#define regSPI_CONFIG_PS_CU_EN 0x11f2
+#define regSPI_CONFIG_PS_CU_EN_BASE_IDX 0
+#define regSPI_WF_LIFETIME_CNTL 0x124a
+#define regSPI_WF_LIFETIME_CNTL_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_0 0x124b
+#define regSPI_WF_LIFETIME_LIMIT_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_1 0x124c
+#define regSPI_WF_LIFETIME_LIMIT_1_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_2 0x124d
+#define regSPI_WF_LIFETIME_LIMIT_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_3 0x124e
+#define regSPI_WF_LIFETIME_LIMIT_3_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_4 0x124f
+#define regSPI_WF_LIFETIME_LIMIT_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_LIMIT_5 0x1250
+#define regSPI_WF_LIFETIME_LIMIT_5_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_0 0x1255
+#define regSPI_WF_LIFETIME_STATUS_0_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_2 0x1257
+#define regSPI_WF_LIFETIME_STATUS_2_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_4 0x1259
+#define regSPI_WF_LIFETIME_STATUS_4_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_6 0x125b
+#define regSPI_WF_LIFETIME_STATUS_6_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_7 0x125c
+#define regSPI_WF_LIFETIME_STATUS_7_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_9 0x125e
+#define regSPI_WF_LIFETIME_STATUS_9_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_11 0x1260
+#define regSPI_WF_LIFETIME_STATUS_11_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_13 0x1262
+#define regSPI_WF_LIFETIME_STATUS_13_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_14 0x1263
+#define regSPI_WF_LIFETIME_STATUS_14_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_15 0x1264
+#define regSPI_WF_LIFETIME_STATUS_15_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_16 0x1265
+#define regSPI_WF_LIFETIME_STATUS_16_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_17 0x1266
+#define regSPI_WF_LIFETIME_STATUS_17_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_18 0x1267
+#define regSPI_WF_LIFETIME_STATUS_18_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_19 0x1268
+#define regSPI_WF_LIFETIME_STATUS_19_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_20 0x1269
+#define regSPI_WF_LIFETIME_STATUS_20_BASE_IDX 0
+#define regSPI_WF_LIFETIME_DEBUG 0x126a
+#define regSPI_WF_LIFETIME_DEBUG_BASE_IDX 0
+#define regSPI_WF_LIFETIME_STATUS_21 0x126b
+#define regSPI_WF_LIFETIME_STATUS_21_BASE_IDX 0
+#define regSPI_LB_CTR_CTRL 0x1274
+#define regSPI_LB_CTR_CTRL_BASE_IDX 0
+#define regSPI_LB_WGP_MASK 0x1275
+#define regSPI_LB_WGP_MASK_BASE_IDX 0
+#define regSPI_LB_DATA_REG 0x1276
+#define regSPI_LB_DATA_REG_BASE_IDX 0
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK 0x1277
+#define regSPI_PG_ENABLE_STATIC_WGP_MASK_BASE_IDX 0
+#define regSPI_GDS_CREDITS 0x1278
+#define regSPI_GDS_CREDITS_BASE_IDX 0
+#define regSPI_SX_EXPORT_BUFFER_SIZES 0x1279
+#define regSPI_SX_EXPORT_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES 0x127a
+#define regSPI_SX_SCOREBOARD_BUFFER_SIZES_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_STATUS 0x127b
+#define regSPI_CSQ_WF_ACTIVE_STATUS_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0 0x127c
+#define regSPI_CSQ_WF_ACTIVE_COUNT_0_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1 0x127d
+#define regSPI_CSQ_WF_ACTIVE_COUNT_1_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2 0x127e
+#define regSPI_CSQ_WF_ACTIVE_COUNT_2_BASE_IDX 0
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3 0x127f
+#define regSPI_CSQ_WF_ACTIVE_COUNT_3_BASE_IDX 0
+#define regSPI_LB_DATA_WAVES 0x1284
+#define regSPI_LB_DATA_WAVES_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS 0x1285
+#define regSPI_LB_DATA_PERWGP_WAVE_HSGS_BASE_IDX 0
+#define regSPI_LB_DATA_PERWGP_WAVE_CS 0x1287
+#define regSPI_LB_DATA_PERWGP_WAVE_CS_BASE_IDX 0
+#define regSPIS_DEBUG_READ 0x128a
+#define regSPIS_DEBUG_READ_BASE_IDX 0
+#define regBCI_DEBUG_READ 0x128b
+#define regBCI_DEBUG_READ_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO 0x128c
+#define regSPI_P0_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI 0x128d
+#define regSPI_P0_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO 0x128e
+#define regSPI_P0_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI 0x128f
+#define regSPI_P0_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN 0x1290
+#define regSPI_P0_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO 0x1291
+#define regSPI_P1_TRAP_SCREEN_PSBA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI 0x1292
+#define regSPI_P1_TRAP_SCREEN_PSBA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO 0x1293
+#define regSPI_P1_TRAP_SCREEN_PSMA_LO_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI 0x1294
+#define regSPI_P1_TRAP_SCREEN_PSMA_HI_BASE_IDX 0
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN 0x1295
+#define regSPI_P1_TRAP_SCREEN_GPR_MIN_BASE_IDX 0
+
+
+// addressBlock: gc_tpdec
+// base address: 0x9400
+#define regTD_CNTL 0x12c5
+#define regTD_CNTL_BASE_IDX 0
+#define regTD_STATUS 0x12c6
+#define regTD_STATUS_BASE_IDX 0
+#define regTD_POWER_CNTL 0x12ca
+#define regTD_POWER_CNTL_BASE_IDX 0
+#define regTD_CNTL2 0x12cb
+#define regTD_CNTL2_BASE_IDX 0
+#define regTD_DSM_CNTL 0x12cf
+#define regTD_DSM_CNTL_BASE_IDX 0
+#define regTD_DSM_CNTL2 0x12d0
+#define regTD_DSM_CNTL2_BASE_IDX 0
+#define regTD_SCRATCH 0x12d3
+#define regTD_SCRATCH_BASE_IDX 0
+#define regTA_CNTL 0x12e1
+#define regTA_CNTL_BASE_IDX 0
+#define regTA_CNTL_AUX 0x12e2
+#define regTA_CNTL_AUX_BASE_IDX 0
+#define regTA_CNTL2 0x12e5
+#define regTA_CNTL2_BASE_IDX 0
+#define regTA_STATUS 0x12e8
+#define regTA_STATUS_BASE_IDX 0
+#define regTA_SCRATCH 0x1304
+#define regTA_SCRATCH_BASE_IDX 0
+
+
+// addressBlock: gc_gdsdec
+// base address: 0x9700
+#define regGDS_CONFIG 0x1360
+#define regGDS_CONFIG_BASE_IDX 0
+#define regGDS_CNTL_STATUS 0x1361
+#define regGDS_CNTL_STATUS_BASE_IDX 0
+#define regGDS_ENHANCE 0x1362
+#define regGDS_ENHANCE_BASE_IDX 0
+#define regGDS_PROTECTION_FAULT 0x1363
+#define regGDS_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_VM_PROTECTION_FAULT 0x1364
+#define regGDS_VM_PROTECTION_FAULT_BASE_IDX 0
+#define regGDS_EDC_CNT 0x1365
+#define regGDS_EDC_CNT_BASE_IDX 0
+#define regGDS_EDC_GRBM_CNT 0x1366
+#define regGDS_EDC_GRBM_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_DED 0x1367
+#define regGDS_EDC_OA_DED_BASE_IDX 0
+#define regGDS_DSM_CNTL 0x136a
+#define regGDS_DSM_CNTL_BASE_IDX 0
+#define regGDS_EDC_OA_PHY_CNT 0x136b
+#define regGDS_EDC_OA_PHY_CNT_BASE_IDX 0
+#define regGDS_EDC_OA_PIPE_CNT 0x136c
+#define regGDS_EDC_OA_PIPE_CNT_BASE_IDX 0
+#define regGDS_DSM_CNTL2 0x136d
+#define regGDS_DSM_CNTL2_BASE_IDX 0
+
+
+// addressBlock: gc_rbdec
+// base address: 0x9800
+#define regDB_DEBUG 0x13ac
+#define regDB_DEBUG_BASE_IDX 0
+#define regDB_DEBUG2 0x13ad
+#define regDB_DEBUG2_BASE_IDX 0
+#define regDB_DEBUG3 0x13ae
+#define regDB_DEBUG3_BASE_IDX 0
+#define regDB_DEBUG4 0x13af
+#define regDB_DEBUG4_BASE_IDX 0
+#define regDB_ETILE_STUTTER_CONTROL 0x13b0
+#define regDB_ETILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LTILE_STUTTER_CONTROL 0x13b1
+#define regDB_LTILE_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_EQUAD_STUTTER_CONTROL 0x13b2
+#define regDB_EQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_LQUAD_STUTTER_CONTROL 0x13b3
+#define regDB_LQUAD_STUTTER_CONTROL_BASE_IDX 0
+#define regDB_CREDIT_LIMIT 0x13b4
+#define regDB_CREDIT_LIMIT_BASE_IDX 0
+#define regDB_WATERMARKS 0x13b5
+#define regDB_WATERMARKS_BASE_IDX 0
+#define regDB_SUBTILE_CONTROL 0x13b6
+#define regDB_SUBTILE_CONTROL_BASE_IDX 0
+#define regDB_FREE_CACHELINES 0x13b7
+#define regDB_FREE_CACHELINES_BASE_IDX 0
+#define regDB_FIFO_DEPTH1 0x13b8
+#define regDB_FIFO_DEPTH1_BASE_IDX 0
+#define regDB_FIFO_DEPTH2 0x13b9
+#define regDB_FIFO_DEPTH2_BASE_IDX 0
+#define regDB_LAST_OF_BURST_CONFIG 0x13ba
+#define regDB_LAST_OF_BURST_CONFIG_BASE_IDX 0
+#define regDB_RING_CONTROL 0x13bb
+#define regDB_RING_CONTROL_BASE_IDX 0
+#define regDB_MEM_ARB_WATERMARKS 0x13bc
+#define regDB_MEM_ARB_WATERMARKS_BASE_IDX 0
+#define regDB_FIFO_DEPTH3 0x13bd
+#define regDB_FIFO_DEPTH3_BASE_IDX 0
+#define regDB_DEBUG6 0x13be
+#define regDB_DEBUG6_BASE_IDX 0
+#define regDB_EXCEPTION_CONTROL 0x13bf
+#define regDB_EXCEPTION_CONTROL_BASE_IDX 0
+#define regDB_DEBUG7 0x13d0
+#define regDB_DEBUG7_BASE_IDX 0
+#define regDB_DEBUG5 0x13d1
+#define regDB_DEBUG5_BASE_IDX 0
+#define regDB_FGCG_SRAMS_CLK_CTRL 0x13d7
+#define regDB_FGCG_SRAMS_CLK_CTRL_BASE_IDX 0
+#define regDB_FGCG_INTERFACES_CLK_CTRL 0x13d8
+#define regDB_FGCG_INTERFACES_CLK_CTRL_BASE_IDX 0
+#define regDB_FIFO_DEPTH4 0x13d9
+#define regDB_FIFO_DEPTH4_BASE_IDX 0
+#define regCC_RB_REDUNDANCY 0x13dc
+#define regCC_RB_REDUNDANCY_BASE_IDX 0
+#define regCC_RB_BACKEND_DISABLE 0x13dd
+#define regCC_RB_BACKEND_DISABLE_BASE_IDX 0
+#define regGB_ADDR_CONFIG 0x13de
+#define regGB_ADDR_CONFIG_BASE_IDX 0
+#define regGB_BACKEND_MAP 0x13df
+#define regGB_BACKEND_MAP_BASE_IDX 0
+#define regGB_GPU_ID 0x13e0
+#define regGB_GPU_ID_BASE_IDX 0
+#define regCC_RB_DAISY_CHAIN 0x13e1
+#define regCC_RB_DAISY_CHAIN_BASE_IDX 0
+#define regGB_ADDR_CONFIG_READ 0x13e2
+#define regGB_ADDR_CONFIG_READ_BASE_IDX 0
+#define regCB_HW_CONTROL_4 0x1422
+#define regCB_HW_CONTROL_4_BASE_IDX 0
+#define regCB_HW_CONTROL_3 0x1423
+#define regCB_HW_CONTROL_3_BASE_IDX 0
+#define regCB_HW_CONTROL 0x1424
+#define regCB_HW_CONTROL_BASE_IDX 0
+#define regCB_HW_CONTROL_1 0x1425
+#define regCB_HW_CONTROL_1_BASE_IDX 0
+#define regCB_HW_CONTROL_2 0x1426
+#define regCB_HW_CONTROL_2_BASE_IDX 0
+#define regCB_DCC_CONFIG 0x1427
+#define regCB_DCC_CONFIG_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_RD 0x1428
+#define regCB_HW_MEM_ARBITER_RD_BASE_IDX 0
+#define regCB_HW_MEM_ARBITER_WR 0x1429
+#define regCB_HW_MEM_ARBITER_WR_BASE_IDX 0
+#define regCB_FGCG_SRAM_OVERRIDE 0x142a
+#define regCB_FGCG_SRAM_OVERRIDE_BASE_IDX 0
+#define regCB_DCC_CONFIG2 0x142b
+#define regCB_DCC_CONFIG2_BASE_IDX 0
+#define regCHICKEN_BITS 0x142d
+#define regCHICKEN_BITS_BASE_IDX 0
+#define regCB_CACHE_EVICT_POINTS 0x142e
+#define regCB_CACHE_EVICT_POINTS_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec
+// base address: 0xa800
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0 0x17a0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1 0x17a1
+#define regGCEA_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0 0x17a2
+#define regGCEA_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1 0x17a3
+#define regGCEA_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_DRAM_RD_GRP2VC_MAP 0x17a4
+#define regGCEA_DRAM_RD_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_WR_GRP2VC_MAP 0x17a5
+#define regGCEA_DRAM_WR_GRP2VC_MAP_BASE_IDX 0
+#define regGCEA_DRAM_RD_LAZY 0x17a6
+#define regGCEA_DRAM_RD_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_WR_LAZY 0x17a7
+#define regGCEA_DRAM_WR_LAZY_BASE_IDX 0
+#define regGCEA_DRAM_RD_CAM_CNTL 0x17a8
+#define regGCEA_DRAM_RD_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_WR_CAM_CNTL 0x17a9
+#define regGCEA_DRAM_WR_CAM_CNTL_BASE_IDX 0
+#define regGCEA_DRAM_PAGE_BURST 0x17aa
+#define regGCEA_DRAM_PAGE_BURST_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_AGE 0x17ab
+#define regGCEA_DRAM_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_AGE 0x17ac
+#define regGCEA_DRAM_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUEUING 0x17ad
+#define regGCEA_DRAM_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUEUING 0x17ae
+#define regGCEA_DRAM_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_FIXED 0x17af
+#define regGCEA_DRAM_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_FIXED 0x17b0
+#define regGCEA_DRAM_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_URGENCY 0x17b1
+#define regGCEA_DRAM_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_URGENCY 0x17b2
+#define regGCEA_DRAM_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1 0x17b3
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2 0x17b4
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3 0x17b5
+#define regGCEA_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1 0x17b6
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2 0x17b7
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3 0x17b8
+#define regGCEA_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP0 0x187d
+#define regGCEA_IO_RD_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_RD_CLI2GRP_MAP1 0x187e
+#define regGCEA_IO_RD_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP0 0x187f
+#define regGCEA_IO_WR_CLI2GRP_MAP0_BASE_IDX 0
+#define regGCEA_IO_WR_CLI2GRP_MAP1 0x1880
+#define regGCEA_IO_WR_CLI2GRP_MAP1_BASE_IDX 0
+#define regGCEA_IO_RD_COMBINE_FLUSH 0x1881
+#define regGCEA_IO_RD_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_WR_COMBINE_FLUSH 0x1882
+#define regGCEA_IO_WR_COMBINE_FLUSH_BASE_IDX 0
+#define regGCEA_IO_GROUP_BURST 0x1883
+#define regGCEA_IO_GROUP_BURST_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_AGE 0x1884
+#define regGCEA_IO_RD_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_AGE 0x1885
+#define regGCEA_IO_WR_PRI_AGE_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUEUING 0x1886
+#define regGCEA_IO_RD_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUEUING 0x1887
+#define regGCEA_IO_WR_PRI_QUEUING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_FIXED 0x1888
+#define regGCEA_IO_RD_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_FIXED 0x1889
+#define regGCEA_IO_WR_PRI_FIXED_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY 0x188a
+#define regGCEA_IO_RD_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY 0x188b
+#define regGCEA_IO_WR_PRI_URGENCY_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING 0x188c
+#define regGCEA_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING 0x188d
+#define regGCEA_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI1 0x188e
+#define regGCEA_IO_RD_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI2 0x188f
+#define regGCEA_IO_RD_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_RD_PRI_QUANT_PRI3 0x1890
+#define regGCEA_IO_RD_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI1 0x1891
+#define regGCEA_IO_WR_PRI_QUANT_PRI1_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI2 0x1892
+#define regGCEA_IO_WR_PRI_QUANT_PRI2_BASE_IDX 0
+#define regGCEA_IO_WR_PRI_QUANT_PRI3 0x1893
+#define regGCEA_IO_WR_PRI_QUANT_PRI3_BASE_IDX 0
+#define regGCEA_SDP_ARB_DRAM 0x1894
+#define regGCEA_SDP_ARB_DRAM_BASE_IDX 0
+#define regGCEA_SDP_ARB_FINAL 0x1896
+#define regGCEA_SDP_ARB_FINAL_BASE_IDX 0
+#define regGCEA_SDP_DRAM_PRIORITY 0x1897
+#define regGCEA_SDP_DRAM_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_IO_PRIORITY 0x1899
+#define regGCEA_SDP_IO_PRIORITY_BASE_IDX 0
+#define regGCEA_SDP_CREDITS 0x189a
+#define regGCEA_SDP_CREDITS_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE0 0x189b
+#define regGCEA_SDP_TAG_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_TAG_RESERVE1 0x189c
+#define regGCEA_SDP_TAG_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE0 0x189d
+#define regGCEA_SDP_VCC_RESERVE0_BASE_IDX 0
+#define regGCEA_SDP_VCC_RESERVE1 0x189e
+#define regGCEA_SDP_VCC_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_VCD_RESERVE0 0x189f
+#define regGCEA_SDP_VCD_RESERVE0_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec2
+// base address: 0x9c00
+#define regGCEA_SDP_VCD_RESERVE1 0x14a0
+#define regGCEA_SDP_VCD_RESERVE1_BASE_IDX 0
+#define regGCEA_SDP_REQ_CNTL 0x14a1
+#define regGCEA_SDP_REQ_CNTL_BASE_IDX 0
+#define regGCEA_MISC 0x14a2
+#define regGCEA_MISC_BASE_IDX 0
+#define regGCEA_LATENCY_SAMPLING 0x14a3
+#define regGCEA_LATENCY_SAMPLING_BASE_IDX 0
+#define regGCEA_MAM_CTRL2 0x14a9
+#define regGCEA_MAM_CTRL2_BASE_IDX 0
+#define regGCEA_MAM_CTRL 0x14ab
+#define regGCEA_MAM_CTRL_BASE_IDX 0
+#define regGCEA_EDC_CNT 0x14b2
+#define regGCEA_EDC_CNT_BASE_IDX 0
+#define regGCEA_EDC_CNT2 0x14b3
+#define regGCEA_EDC_CNT2_BASE_IDX 0
+#define regGCEA_DSM_CNTL 0x14b4
+#define regGCEA_DSM_CNTL_BASE_IDX 0
+#define regGCEA_DSM_CNTLA 0x14b5
+#define regGCEA_DSM_CNTLA_BASE_IDX 0
+#define regGCEA_DSM_CNTLB 0x14b6
+#define regGCEA_DSM_CNTLB_BASE_IDX 0
+#define regGCEA_DSM_CNTL2 0x14b7
+#define regGCEA_DSM_CNTL2_BASE_IDX 0
+#define regGCEA_DSM_CNTL2A 0x14b8
+#define regGCEA_DSM_CNTL2A_BASE_IDX 0
+#define regGCEA_DSM_CNTL2B 0x14b9
+#define regGCEA_DSM_CNTL2B_BASE_IDX 0
+#define regGCEA_GL2C_XBR_CREDITS 0x14ba
+#define regGCEA_GL2C_XBR_CREDITS_BASE_IDX 0
+#define regGCEA_GL2C_XBR_MAXBURST 0x14bb
+#define regGCEA_GL2C_XBR_MAXBURST_BASE_IDX 0
+#define regGCEA_PROBE_CNTL 0x14bc
+#define regGCEA_PROBE_CNTL_BASE_IDX 0
+#define regGCEA_PROBE_MAP 0x14bd
+#define regGCEA_PROBE_MAP_BASE_IDX 0
+#define regGCEA_ERR_STATUS 0x14be
+#define regGCEA_ERR_STATUS_BASE_IDX 0
+#define regGCEA_MISC2 0x14bf
+#define regGCEA_MISC2_BASE_IDX 0
+
+
+// addressBlock: gc_gceadec3
+// base address: 0x9dc0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0 0x1512
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1 0x1513
+#define regGCEA_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0 0x1514
+#define regGCEA_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1 0x1515
+#define regGCEA_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 0
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS 0x1516
+#define regGCEA_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 0
+#define regGCEA_RRET_MEM_RESERVE 0x1518
+#define regGCEA_RRET_MEM_RESERVE_BASE_IDX 0
+#define regGCEA_EDC_CNT3 0x151a
+#define regGCEA_EDC_CNT3_BASE_IDX 0
+#define regGCEA_SDP_ENABLE 0x151e
+#define regGCEA_SDP_ENABLE_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec2
+// base address: 0x9c80
+#define regSPI_PQEV_CTRL 0x14c0
+#define regSPI_PQEV_CTRL_BASE_IDX 0
+#define regSPI_EXP_THROTTLE_CTRL 0x14c3
+#define regSPI_EXP_THROTTLE_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_rmi_rmidec
+// base address: 0x2e200
+#define regRMI_GENERAL_CNTL 0x1880
+#define regRMI_GENERAL_CNTL_BASE_IDX 1
+#define regRMI_GENERAL_CNTL1 0x1881
+#define regRMI_GENERAL_CNTL1_BASE_IDX 1
+#define regRMI_GENERAL_STATUS 0x1882
+#define regRMI_GENERAL_STATUS_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS0 0x1883
+#define regRMI_SUBBLOCK_STATUS0_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS1 0x1884
+#define regRMI_SUBBLOCK_STATUS1_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS2 0x1885
+#define regRMI_SUBBLOCK_STATUS2_BASE_IDX 1
+#define regRMI_SUBBLOCK_STATUS3 0x1886
+#define regRMI_SUBBLOCK_STATUS3_BASE_IDX 1
+#define regRMI_XBAR_CONFIG 0x1887
+#define regRMI_XBAR_CONFIG_BASE_IDX 1
+#define regRMI_PROBE_POP_LOGIC_CNTL 0x1888
+#define regRMI_PROBE_POP_LOGIC_CNTL_BASE_IDX 1
+#define regRMI_UTC_XNACK_N_MISC_CNTL 0x1889
+#define regRMI_UTC_XNACK_N_MISC_CNTL_BASE_IDX 1
+#define regRMI_DEMUX_CNTL 0x188a
+#define regRMI_DEMUX_CNTL_BASE_IDX 1
+#define regRMI_UTCL1_CNTL1 0x188b
+#define regRMI_UTCL1_CNTL1_BASE_IDX 1
+#define regRMI_UTCL1_CNTL2 0x188c
+#define regRMI_UTCL1_CNTL2_BASE_IDX 1
+#define regRMI_UTC_UNIT_CONFIG 0x188d
+#define regRMI_UTC_UNIT_CONFIG_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER0_CNTL 0x188e
+#define regRMI_TCIW_FORMATTER0_CNTL_BASE_IDX 1
+#define regRMI_TCIW_FORMATTER1_CNTL 0x188f
+#define regRMI_TCIW_FORMATTER1_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_CNTL 0x1890
+#define regRMI_SCOREBOARD_CNTL_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS0 0x1891
+#define regRMI_SCOREBOARD_STATUS0_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS1 0x1892
+#define regRMI_SCOREBOARD_STATUS1_BASE_IDX 1
+#define regRMI_SCOREBOARD_STATUS2 0x1893
+#define regRMI_SCOREBOARD_STATUS2_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG 0x1894
+#define regRMI_XBAR_ARBITER_CONFIG_BASE_IDX 1
+#define regRMI_XBAR_ARBITER_CONFIG_1 0x1895
+#define regRMI_XBAR_ARBITER_CONFIG_1_BASE_IDX 1
+#define regRMI_CLOCK_CNTRL 0x1896
+#define regRMI_CLOCK_CNTRL_BASE_IDX 1
+#define regRMI_UTCL1_STATUS 0x1897
+#define regRMI_UTCL1_STATUS_BASE_IDX 1
+#define regRMI_RB_GLX_CID_MAP 0x1898
+#define regRMI_RB_GLX_CID_MAP_BASE_IDX 1
+#define regRMI_XNACK_DEBUG 0x189e
+#define regRMI_XNACK_DEBUG_BASE_IDX 1
+#define regRMI_SPARE 0x189f
+#define regRMI_SPARE_BASE_IDX 1
+#define regRMI_SPARE_1 0x18a0
+#define regRMI_SPARE_1_BASE_IDX 1
+#define regRMI_SPARE_2 0x18a1
+#define regRMI_SPARE_2_BASE_IDX 1
+#define regCC_RMI_REDUNDANCY 0x18a2
+#define regCC_RMI_REDUNDANCY_BASE_IDX 1
+
+
+// addressBlock: gc_pmmdec
+// base address: 0x9f80
+#define regGCR_PIO_CNTL 0x1580
+#define regGCR_PIO_CNTL_BASE_IDX 0
+#define regGCR_PIO_DATA 0x1581
+#define regGCR_PIO_DATA_BASE_IDX 0
+#define regPMM_CNTL 0x1582
+#define regPMM_CNTL_BASE_IDX 0
+#define regPMM_STATUS 0x1583
+#define regPMM_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_utcl1dec
+// base address: 0x9fb0
+#define regUTCL1_CTRL_1 0x158c
+#define regUTCL1_CTRL_1_BASE_IDX 0
+#define regUTCL1_ALOG 0x158f
+#define regUTCL1_ALOG_BASE_IDX 0
+#define regUTCL1_STATUS 0x1594
+#define regUTCL1_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedpfdec
+// base address: 0xa000
+#define regGCMC_VM_NB_MMIOBASE 0x15a0
+#define regGCMC_VM_NB_MMIOBASE_BASE_IDX 0
+#define regGCMC_VM_NB_MMIOLIMIT 0x15a1
+#define regGCMC_VM_NB_MMIOLIMIT_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_CTRL 0x15a2
+#define regGCMC_VM_NB_PCI_CTRL_BASE_IDX 0
+#define regGCMC_VM_NB_PCI_ARB 0x15a3
+#define regGCMC_VM_NB_PCI_ARB_BASE_IDX 0
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1 0x15a4
+#define regGCMC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 0
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2 0x15a5
+#define regGCMC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2 0x15a6
+#define regGCMC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 0
+#define regGCMC_VM_FB_OFFSET 0x15a7
+#define regGCMC_VM_FB_OFFSET_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x15a8
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x15a9
+#define regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 0
+#define regGCMC_VM_STEERING 0x15aa
+#define regGCMC_VM_STEERING_BASE_IDX 0
+#define regGCMC_SHARED_VIRT_RESET_REQ 0x15ab
+#define regGCMC_SHARED_VIRT_RESET_REQ_BASE_IDX 0
+#define regGCMC_MEM_POWER_LS 0x15ac
+#define regGCMC_MEM_POWER_LS_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START 0x15ad
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END 0x15ae
+#define regGCMC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START 0x15af
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END 0x15b0
+#define regGCMC_VM_LOCAL_SYSMEM_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_APT_CNTL 0x15b1
+#define regGCMC_VM_APT_CNTL_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START 0x15b2
+#define regGCMC_VM_LOCAL_FB_ADDRESS_START_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END 0x15b3
+#define regGCMC_VM_LOCAL_FB_ADDRESS_END_BASE_IDX 0
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL 0x15b4
+#define regGCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL_BASE_IDX 0
+#define regGCUTCL2_ICG_CTRL 0x15b5
+#define regGCUTCL2_ICG_CTRL_BASE_IDX 0
+#define regGCMC_SHARED_ACTIVE_FCN_ID 0x15b6
+#define regGCMC_SHARED_ACTIVE_FCN_ID_BASE_IDX 0
+#define regGCUTCL2_CGTT_BUSY_CTRL 0x15b7
+#define regGCUTCL2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCMC_VM_FB_NOALLOC_CNTL 0x15b8
+#define regGCMC_VM_FB_NOALLOC_CNTL_BASE_IDX 0
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS 0x15b9
+#define regGCUTCL2_HARVEST_BYPASS_GROUPS_BASE_IDX 0
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS 0x15bb
+#define regGCUTCL2_GROUP_RET_FAULT_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2pfdec
+// base address: 0xa080
+#define regGCVM_L2_CNTL 0x15c0
+#define regGCVM_L2_CNTL_BASE_IDX 0
+#define regGCVM_L2_CNTL2 0x15c1
+#define regGCVM_L2_CNTL2_BASE_IDX 0
+#define regGCVM_L2_CNTL3 0x15c2
+#define regGCVM_L2_CNTL3_BASE_IDX 0
+#define regGCVM_L2_STATUS 0x15c3
+#define regGCVM_L2_STATUS_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL 0x15c4
+#define regGCVM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32 0x15c5
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32 0x15c6
+#define regGCVM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_CNTL 0x15c7
+#define regGCVM_INVALIDATE_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL 0x15c8
+#define regGCVM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2 0x15c9
+#define regGCVM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3 0x15ca
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4 0x15cb
+#define regGCVM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_STATUS 0x15cc
+#define regGCVM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32 0x15cd
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32 0x15ce
+#define regGCVM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x15cf
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x15d0
+#define regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x15d2
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x15d3
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x15d4
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x15d5
+#define regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x15d6
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 0
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x15d7
+#define regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 0
+#define regGCVM_L2_CNTL4 0x15d8
+#define regGCVM_L2_CNTL4_BASE_IDX 0
+#define regGCVM_L2_MM_GROUP_RT_CLASSES 0x15d9
+#define regGCVM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID 0x15da
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2 0x15db
+#define regGCVM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 0
+#define regGCVM_L2_CACHE_PARITY_CNTL 0x15dc
+#define regGCVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0
+#define regGCVM_L2_ICG_CTRL 0x15dd
+#define regGCVM_L2_ICG_CTRL_BASE_IDX 0
+#define regGCVM_L2_CNTL5 0x15de
+#define regGCVM_L2_CNTL5_BASE_IDX 0
+#define regGCVM_L2_GCR_CNTL 0x15df
+#define regGCVM_L2_GCR_CNTL_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME 0x15e0
+#define regGCVML2_WALKER_MACRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT 0x15e1
+#define regGCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME 0x15e2
+#define regGCVML2_WALKER_MICRO_THROTTLE_TIME_BASE_IDX 0
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT 0x15e3
+#define regGCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT_BASE_IDX 0
+#define regGCVM_L2_CGTT_BUSY_CTRL 0x15e4
+#define regGCVM_L2_CGTT_BUSY_CTRL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL 0x15e5
+#define regGCVM_L2_PTE_CACHE_DUMP_CNTL_BASE_IDX 0
+#define regGCVM_L2_PTE_CACHE_DUMP_READ 0x15e6
+#define regGCVM_L2_PTE_CACHE_DUMP_READ_BASE_IDX 0
+#define regGCVM_L2_BANK_SELECT_MASKS 0x15e9
+#define regGCVM_L2_BANK_SELECT_MASKS_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC 0x15ea
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC 0x15eb
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC_BASE_IDX 0
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC 0x15ec
+#define regGCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC_BASE_IDX 0
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT 0x15ed
+#define regGCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT_BASE_IDX 0
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ 0x15ee
+#define regGCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ_BASE_IDX 0
+
+
+// addressBlock: gc_gcatcl2dec
+// base address: 0xa300
+#define regGC_ATC_L2_CNTL 0x1660
+#define regGC_ATC_L2_CNTL_BASE_IDX 0
+#define regGC_ATC_L2_CNTL2 0x1661
+#define regGC_ATC_L2_CNTL2_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA0 0x1664
+#define regGC_ATC_L2_CACHE_DATA0_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA1 0x1665
+#define regGC_ATC_L2_CACHE_DATA1_BASE_IDX 0
+#define regGC_ATC_L2_CACHE_DATA2 0x1666
+#define regGC_ATC_L2_CACHE_DATA2_BASE_IDX 0
+#define regGC_ATC_L2_CNTL3 0x1667
+#define regGC_ATC_L2_CNTL3_BASE_IDX 0
+#define regGC_ATC_L2_STATUS 0x1668
+#define regGC_ATC_L2_STATUS_BASE_IDX 0
+#define regGC_ATC_L2_STATUS2 0x1669
+#define regGC_ATC_L2_STATUS2_BASE_IDX 0
+#define regGC_ATC_L2_MISC_CG 0x166a
+#define regGC_ATC_L2_MISC_CG_BASE_IDX 0
+#define regGC_ATC_L2_MEM_POWER_LS 0x166b
+#define regGC_ATC_L2_MEM_POWER_LS_BASE_IDX 0
+#define regGC_ATC_L2_SDPPORT_CTRL 0x166f
+#define regGC_ATC_L2_SDPPORT_CTRL_BASE_IDX 0
+
+
+// addressBlock: gc_gcl2tlbpfdec
+// base address: 0xa380
+#define regGCL2TLB_TLB0_STATUS 0x1681
+#define regGCL2TLB_TLB0_STATUS_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO 0x1683
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI 0x1684
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO 0x1685
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO_BASE_IDX 0
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI 0x1686
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI_BASE_IDX 0
+
+
+// addressBlock: gc_gcvmsharedvcdec
+// base address: 0xa3a0
+#define regGCMC_VM_FB_LOCATION_BASE 0x1688
+#define regGCMC_VM_FB_LOCATION_BASE_BASE_IDX 0
+#define regGCMC_VM_FB_LOCATION_TOP 0x1689
+#define regGCMC_VM_FB_LOCATION_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_TOP 0x168a
+#define regGCMC_VM_AGP_TOP_BASE_IDX 0
+#define regGCMC_VM_AGP_BOT 0x168b
+#define regGCMC_VM_AGP_BOT_BASE_IDX 0
+#define regGCMC_VM_AGP_BASE 0x168c
+#define regGCMC_VM_AGP_BASE_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR 0x168d
+#define regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 0
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x168e
+#define regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 0
+#define regGCMC_VM_MX_L1_TLB_CNTL 0x168f
+#define regGCMC_VM_MX_L1_TLB_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2vcdec
+// base address: 0xa3e0
+#define regGCVM_CONTEXT0_CNTL 0x1698
+#define regGCVM_CONTEXT0_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT1_CNTL 0x1699
+#define regGCVM_CONTEXT1_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT2_CNTL 0x169a
+#define regGCVM_CONTEXT2_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT3_CNTL 0x169b
+#define regGCVM_CONTEXT3_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT4_CNTL 0x169c
+#define regGCVM_CONTEXT4_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT5_CNTL 0x169d
+#define regGCVM_CONTEXT5_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT6_CNTL 0x169e
+#define regGCVM_CONTEXT6_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT7_CNTL 0x169f
+#define regGCVM_CONTEXT7_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT8_CNTL 0x16a0
+#define regGCVM_CONTEXT8_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT9_CNTL 0x16a1
+#define regGCVM_CONTEXT9_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT10_CNTL 0x16a2
+#define regGCVM_CONTEXT10_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT11_CNTL 0x16a3
+#define regGCVM_CONTEXT11_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT12_CNTL 0x16a4
+#define regGCVM_CONTEXT12_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT13_CNTL 0x16a5
+#define regGCVM_CONTEXT13_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT14_CNTL 0x16a6
+#define regGCVM_CONTEXT14_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXT15_CNTL 0x16a7
+#define regGCVM_CONTEXT15_CNTL_BASE_IDX 0
+#define regGCVM_CONTEXTS_DISABLE 0x16a8
+#define regGCVM_CONTEXTS_DISABLE_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_SEM 0x16a9
+#define regGCVM_INVALIDATE_ENG0_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_SEM 0x16aa
+#define regGCVM_INVALIDATE_ENG1_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_SEM 0x16ab
+#define regGCVM_INVALIDATE_ENG2_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_SEM 0x16ac
+#define regGCVM_INVALIDATE_ENG3_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_SEM 0x16ad
+#define regGCVM_INVALIDATE_ENG4_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_SEM 0x16ae
+#define regGCVM_INVALIDATE_ENG5_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_SEM 0x16af
+#define regGCVM_INVALIDATE_ENG6_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_SEM 0x16b0
+#define regGCVM_INVALIDATE_ENG7_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_SEM 0x16b1
+#define regGCVM_INVALIDATE_ENG8_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_SEM 0x16b2
+#define regGCVM_INVALIDATE_ENG9_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_SEM 0x16b3
+#define regGCVM_INVALIDATE_ENG10_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_SEM 0x16b4
+#define regGCVM_INVALIDATE_ENG11_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_SEM 0x16b5
+#define regGCVM_INVALIDATE_ENG12_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_SEM 0x16b6
+#define regGCVM_INVALIDATE_ENG13_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_SEM 0x16b7
+#define regGCVM_INVALIDATE_ENG14_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_SEM 0x16b8
+#define regGCVM_INVALIDATE_ENG15_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_SEM 0x16b9
+#define regGCVM_INVALIDATE_ENG16_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_SEM 0x16ba
+#define regGCVM_INVALIDATE_ENG17_SEM_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_REQ 0x16bb
+#define regGCVM_INVALIDATE_ENG0_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_REQ 0x16bc
+#define regGCVM_INVALIDATE_ENG1_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_REQ 0x16bd
+#define regGCVM_INVALIDATE_ENG2_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_REQ 0x16be
+#define regGCVM_INVALIDATE_ENG3_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_REQ 0x16bf
+#define regGCVM_INVALIDATE_ENG4_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_REQ 0x16c0
+#define regGCVM_INVALIDATE_ENG5_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_REQ 0x16c1
+#define regGCVM_INVALIDATE_ENG6_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_REQ 0x16c2
+#define regGCVM_INVALIDATE_ENG7_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_REQ 0x16c3
+#define regGCVM_INVALIDATE_ENG8_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_REQ 0x16c4
+#define regGCVM_INVALIDATE_ENG9_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_REQ 0x16c5
+#define regGCVM_INVALIDATE_ENG10_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_REQ 0x16c6
+#define regGCVM_INVALIDATE_ENG11_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_REQ 0x16c7
+#define regGCVM_INVALIDATE_ENG12_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_REQ 0x16c8
+#define regGCVM_INVALIDATE_ENG13_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_REQ 0x16c9
+#define regGCVM_INVALIDATE_ENG14_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_REQ 0x16ca
+#define regGCVM_INVALIDATE_ENG15_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_REQ 0x16cb
+#define regGCVM_INVALIDATE_ENG16_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_REQ 0x16cc
+#define regGCVM_INVALIDATE_ENG17_REQ_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ACK 0x16cd
+#define regGCVM_INVALIDATE_ENG0_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ACK 0x16ce
+#define regGCVM_INVALIDATE_ENG1_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ACK 0x16cf
+#define regGCVM_INVALIDATE_ENG2_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ACK 0x16d0
+#define regGCVM_INVALIDATE_ENG3_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ACK 0x16d1
+#define regGCVM_INVALIDATE_ENG4_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ACK 0x16d2
+#define regGCVM_INVALIDATE_ENG5_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ACK 0x16d3
+#define regGCVM_INVALIDATE_ENG6_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ACK 0x16d4
+#define regGCVM_INVALIDATE_ENG7_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ACK 0x16d5
+#define regGCVM_INVALIDATE_ENG8_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ACK 0x16d6
+#define regGCVM_INVALIDATE_ENG9_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ACK 0x16d7
+#define regGCVM_INVALIDATE_ENG10_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ACK 0x16d8
+#define regGCVM_INVALIDATE_ENG11_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ACK 0x16d9
+#define regGCVM_INVALIDATE_ENG12_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ACK 0x16da
+#define regGCVM_INVALIDATE_ENG13_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ACK 0x16db
+#define regGCVM_INVALIDATE_ENG14_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ACK 0x16dc
+#define regGCVM_INVALIDATE_ENG15_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ACK 0x16dd
+#define regGCVM_INVALIDATE_ENG16_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ACK 0x16de
+#define regGCVM_INVALIDATE_ENG17_ACK_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x16df
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x16e0
+#define regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x16e1
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x16e2
+#define regGCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x16e3
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x16e4
+#define regGCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x16e5
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x16e6
+#define regGCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x16e7
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x16e8
+#define regGCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x16e9
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x16ea
+#define regGCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x16eb
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x16ec
+#define regGCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x16ed
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x16ee
+#define regGCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x16ef
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x16f0
+#define regGCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x16f1
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x16f2
+#define regGCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x16f3
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x16f4
+#define regGCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x16f5
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x16f6
+#define regGCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x16f7
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x16f8
+#define regGCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x16f9
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x16fa
+#define regGCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x16fb
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x16fc
+#define regGCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x16fd
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x16fe
+#define regGCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x16ff
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x1700
+#define regGCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x1701
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 0
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x1702
+#define regGCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x1703
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x1704
+#define regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x1705
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x1706
+#define regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x1707
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x1708
+#define regGCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x1709
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x170a
+#define regGCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x170b
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x170c
+#define regGCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x170d
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x170e
+#define regGCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x170f
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x1710
+#define regGCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x1711
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x1712
+#define regGCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x1713
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x1714
+#define regGCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x1715
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x1716
+#define regGCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x1717
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x1718
+#define regGCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x1719
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x171a
+#define regGCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x171b
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x171c
+#define regGCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x171d
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x171e
+#define regGCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x171f
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x1720
+#define regGCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x1721
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x1722
+#define regGCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x1723
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x1724
+#define regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x1725
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x1726
+#define regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x1727
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x1728
+#define regGCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x1729
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x172a
+#define regGCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x172b
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x172c
+#define regGCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x172d
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x172e
+#define regGCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x172f
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x1730
+#define regGCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x1731
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x1732
+#define regGCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x1733
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x1734
+#define regGCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x1735
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x1736
+#define regGCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x1737
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x1738
+#define regGCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x1739
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x173a
+#define regGCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x173b
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x173c
+#define regGCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x173d
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x173e
+#define regGCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x173f
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x1740
+#define regGCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x1741
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x1742
+#define regGCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x1743
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x1744
+#define regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x1745
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x1746
+#define regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x1747
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x1748
+#define regGCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x1749
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x174a
+#define regGCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x174b
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x174c
+#define regGCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x174d
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x174e
+#define regGCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x174f
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x1750
+#define regGCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x1751
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x1752
+#define regGCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x1753
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x1754
+#define regGCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x1755
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x1756
+#define regGCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x1757
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x1758
+#define regGCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x1759
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x175a
+#define regGCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x175b
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x175c
+#define regGCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x175d
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x175e
+#define regGCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x175f
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x1760
+#define regGCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x1761
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x1762
+#define regGCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1763
+#define regGCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1764
+#define regGCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1765
+#define regGCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1766
+#define regGCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1767
+#define regGCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1768
+#define regGCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1769
+#define regGCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176a
+#define regGCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176b
+#define regGCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176c
+#define regGCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176d
+#define regGCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176e
+#define regGCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x176f
+#define regGCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1770
+#define regGCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1771
+#define regGCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1772
+#define regGCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES 0x1773
+#define regGCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES_BASE_IDX 0
+
+
+// addressBlock: gc_gcvml2perfddec
+// base address: 0x35380
+#define regGCVML2_PERFCOUNTER2_0_LO 0x34e0
+#define regGCVML2_PERFCOUNTER2_0_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_LO 0x34e1
+#define regGCVML2_PERFCOUNTER2_1_LO_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_HI 0x34e2
+#define regGCVML2_PERFCOUNTER2_0_HI_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_HI 0x34e3
+#define regGCVML2_PERFCOUNTER2_1_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2prdec
+// base address: 0x35390
+#define regGCMC_VM_L2_PERFCOUNTER_LO 0x34e4
+#define regGCMC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_HI 0x34e5
+#define regGCMC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_LO 0x34e6
+#define regGCUTCL2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_HI 0x34e7
+#define regGCUTCL2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfddec
+// base address: 0x353d0
+#define regGC_ATC_L2_PERFCOUNTER2_LO 0x34f4
+#define regGC_ATC_L2_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_HI 0x34f5
+#define regGC_ATC_L2_PERFCOUNTER2_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+// base address: 0x353e0
+#define regGC_ATC_L2_PERFCOUNTER_LO 0x34f8
+#define regGC_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_HI 0x34f9
+#define regGC_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbprdec
+// base address: 0x353e8
+#define regGCL2TLB_PERFCOUNTER_LO 0x34fa
+#define regGCL2TLB_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_HI 0x34fb
+#define regGCL2TLB_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2perfsdec
+// base address: 0x37480
+#define regGCVML2_PERFCOUNTER2_0_SELECT 0x3d20
+#define regGCVML2_PERFCOUNTER2_0_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT 0x3d21
+#define regGCVML2_PERFCOUNTER2_1_SELECT_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_SELECT1 0x3d22
+#define regGCVML2_PERFCOUNTER2_0_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_SELECT1 0x3d23
+#define regGCVML2_PERFCOUNTER2_1_SELECT1_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_0_MODE 0x3d24
+#define regGCVML2_PERFCOUNTER2_0_MODE_BASE_IDX 1
+#define regGCVML2_PERFCOUNTER2_1_MODE 0x3d25
+#define regGCVML2_PERFCOUNTER2_1_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pldec
+// base address: 0x374c0
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG 0x3d30
+#define regGCMC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG 0x3d31
+#define regGCMC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG 0x3d32
+#define regGCMC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG 0x3d33
+#define regGCMC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG 0x3d34
+#define regGCMC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG 0x3d35
+#define regGCMC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG 0x3d36
+#define regGCMC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG 0x3d37
+#define regGCMC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3d38
+#define regGCMC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER0_CFG 0x3d39
+#define regGCUTCL2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER1_CFG 0x3d3a
+#define regGCUTCL2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER2_CFG 0x3d3b
+#define regGCUTCL2_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER3_CFG 0x3d3c
+#define regGCUTCL2_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL 0x3d3d
+#define regGCUTCL2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2perfsdec
+// base address: 0x37500
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT 0x3d40
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1 0x3d41
+#define regGC_ATC_L2_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER2_MODE 0x3d42
+#define regGC_ATC_L2_PERFCOUNTER2_MODE_BASE_IDX 1
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+// base address: 0x37510
+#define regGC_ATC_L2_PERFCOUNTER0_CFG 0x3d44
+#define regGC_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER1_CFG 0x3d45
+#define regGC_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x3d46
+#define regGC_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpldec
+// base address: 0x37528
+#define regGCL2TLB_PERFCOUNTER0_CFG 0x3d4a
+#define regGCL2TLB_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER1_CFG 0x3d4b
+#define regGCL2TLB_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER2_CFG 0x3d4c
+#define regGCL2TLB_PERFCOUNTER2_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER3_CFG 0x3d4d
+#define regGCL2TLB_PERFCOUNTER3_CFG_BASE_IDX 1
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL 0x3d4e
+#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvml2pspdec
+// base address: 0x3f900
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41
+#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID_BASE_IDX 1
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE 0x5e43
+#define regGCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE_BASE_IDX 1
+#define regGCVM_IOMMU_CONTROL_REGISTER 0x5e44
+#define regGCVM_IOMMU_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x5e45
+#define regGCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1
+#define regGCVM_IOMMU_MMIO_CNTRL_1 0x5e46
+#define regGCVM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_0 0x5e47
+#define regGCMC_VM_MARC_BASE_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_1 0x5e48
+#define regGCMC_VM_MARC_BASE_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_2 0x5e49
+#define regGCMC_VM_MARC_BASE_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_3 0x5e4a
+#define regGCMC_VM_MARC_BASE_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_4 0x5e4b
+#define regGCMC_VM_MARC_BASE_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_5 0x5e4c
+#define regGCMC_VM_MARC_BASE_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_6 0x5e4d
+#define regGCMC_VM_MARC_BASE_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_7 0x5e4e
+#define regGCMC_VM_MARC_BASE_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_8 0x5e4f
+#define regGCMC_VM_MARC_BASE_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_9 0x5e50
+#define regGCMC_VM_MARC_BASE_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_10 0x5e51
+#define regGCMC_VM_MARC_BASE_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_11 0x5e52
+#define regGCMC_VM_MARC_BASE_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_12 0x5e53
+#define regGCMC_VM_MARC_BASE_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_13 0x5e54
+#define regGCMC_VM_MARC_BASE_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_14 0x5e55
+#define regGCMC_VM_MARC_BASE_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_LO_15 0x5e56
+#define regGCMC_VM_MARC_BASE_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_0 0x5e57
+#define regGCMC_VM_MARC_BASE_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_1 0x5e58
+#define regGCMC_VM_MARC_BASE_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_2 0x5e59
+#define regGCMC_VM_MARC_BASE_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_3 0x5e5a
+#define regGCMC_VM_MARC_BASE_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_4 0x5e5b
+#define regGCMC_VM_MARC_BASE_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_5 0x5e5c
+#define regGCMC_VM_MARC_BASE_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_6 0x5e5d
+#define regGCMC_VM_MARC_BASE_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_7 0x5e5e
+#define regGCMC_VM_MARC_BASE_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_8 0x5e5f
+#define regGCMC_VM_MARC_BASE_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_9 0x5e60
+#define regGCMC_VM_MARC_BASE_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_10 0x5e61
+#define regGCMC_VM_MARC_BASE_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_11 0x5e62
+#define regGCMC_VM_MARC_BASE_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_12 0x5e63
+#define regGCMC_VM_MARC_BASE_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_13 0x5e64
+#define regGCMC_VM_MARC_BASE_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_14 0x5e65
+#define regGCMC_VM_MARC_BASE_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_BASE_HI_15 0x5e66
+#define regGCMC_VM_MARC_BASE_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_0 0x5e67
+#define regGCMC_VM_MARC_RELOC_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_1 0x5e68
+#define regGCMC_VM_MARC_RELOC_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_2 0x5e69
+#define regGCMC_VM_MARC_RELOC_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_3 0x5e6a
+#define regGCMC_VM_MARC_RELOC_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_4 0x5e6b
+#define regGCMC_VM_MARC_RELOC_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_5 0x5e6c
+#define regGCMC_VM_MARC_RELOC_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_6 0x5e6d
+#define regGCMC_VM_MARC_RELOC_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_7 0x5e6e
+#define regGCMC_VM_MARC_RELOC_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_8 0x5e6f
+#define regGCMC_VM_MARC_RELOC_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_9 0x5e70
+#define regGCMC_VM_MARC_RELOC_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_10 0x5e71
+#define regGCMC_VM_MARC_RELOC_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_11 0x5e72
+#define regGCMC_VM_MARC_RELOC_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_12 0x5e73
+#define regGCMC_VM_MARC_RELOC_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_13 0x5e74
+#define regGCMC_VM_MARC_RELOC_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_14 0x5e75
+#define regGCMC_VM_MARC_RELOC_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_LO_15 0x5e76
+#define regGCMC_VM_MARC_RELOC_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_0 0x5e77
+#define regGCMC_VM_MARC_RELOC_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_1 0x5e78
+#define regGCMC_VM_MARC_RELOC_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_2 0x5e79
+#define regGCMC_VM_MARC_RELOC_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_3 0x5e7a
+#define regGCMC_VM_MARC_RELOC_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_4 0x5e7b
+#define regGCMC_VM_MARC_RELOC_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_5 0x5e7c
+#define regGCMC_VM_MARC_RELOC_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_6 0x5e7d
+#define regGCMC_VM_MARC_RELOC_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_7 0x5e7e
+#define regGCMC_VM_MARC_RELOC_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_8 0x5e7f
+#define regGCMC_VM_MARC_RELOC_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_9 0x5e80
+#define regGCMC_VM_MARC_RELOC_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_10 0x5e81
+#define regGCMC_VM_MARC_RELOC_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_11 0x5e82
+#define regGCMC_VM_MARC_RELOC_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_12 0x5e83
+#define regGCMC_VM_MARC_RELOC_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_13 0x5e84
+#define regGCMC_VM_MARC_RELOC_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_14 0x5e85
+#define regGCMC_VM_MARC_RELOC_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_RELOC_HI_15 0x5e86
+#define regGCMC_VM_MARC_RELOC_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_0 0x5e87
+#define regGCMC_VM_MARC_LEN_LO_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_1 0x5e88
+#define regGCMC_VM_MARC_LEN_LO_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_2 0x5e89
+#define regGCMC_VM_MARC_LEN_LO_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_3 0x5e8a
+#define regGCMC_VM_MARC_LEN_LO_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_4 0x5e8b
+#define regGCMC_VM_MARC_LEN_LO_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_5 0x5e8c
+#define regGCMC_VM_MARC_LEN_LO_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_6 0x5e8d
+#define regGCMC_VM_MARC_LEN_LO_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_7 0x5e8e
+#define regGCMC_VM_MARC_LEN_LO_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_8 0x5e8f
+#define regGCMC_VM_MARC_LEN_LO_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_9 0x5e90
+#define regGCMC_VM_MARC_LEN_LO_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_10 0x5e91
+#define regGCMC_VM_MARC_LEN_LO_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_11 0x5e92
+#define regGCMC_VM_MARC_LEN_LO_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_12 0x5e93
+#define regGCMC_VM_MARC_LEN_LO_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_13 0x5e94
+#define regGCMC_VM_MARC_LEN_LO_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_14 0x5e95
+#define regGCMC_VM_MARC_LEN_LO_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_LO_15 0x5e96
+#define regGCMC_VM_MARC_LEN_LO_15_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_0 0x5e97
+#define regGCMC_VM_MARC_LEN_HI_0_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_1 0x5e98
+#define regGCMC_VM_MARC_LEN_HI_1_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_2 0x5e99
+#define regGCMC_VM_MARC_LEN_HI_2_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_3 0x5e9a
+#define regGCMC_VM_MARC_LEN_HI_3_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_4 0x5e9b
+#define regGCMC_VM_MARC_LEN_HI_4_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_5 0x5e9c
+#define regGCMC_VM_MARC_LEN_HI_5_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_6 0x5e9d
+#define regGCMC_VM_MARC_LEN_HI_6_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_7 0x5e9e
+#define regGCMC_VM_MARC_LEN_HI_7_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_8 0x5e9f
+#define regGCMC_VM_MARC_LEN_HI_8_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_9 0x5ea0
+#define regGCMC_VM_MARC_LEN_HI_9_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_10 0x5ea1
+#define regGCMC_VM_MARC_LEN_HI_10_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_11 0x5ea2
+#define regGCMC_VM_MARC_LEN_HI_11_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_12 0x5ea3
+#define regGCMC_VM_MARC_LEN_HI_12_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_13 0x5ea4
+#define regGCMC_VM_MARC_LEN_HI_13_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_14 0x5ea5
+#define regGCMC_VM_MARC_LEN_HI_14_BASE_IDX 1
+#define regGCMC_VM_MARC_LEN_HI_15 0x5ea6
+#define regGCMC_VM_MARC_LEN_HI_15_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_0 0x5ea7
+#define regGCMC_VM_MARC_PFVF_MAPPING_0_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_1 0x5ea8
+#define regGCMC_VM_MARC_PFVF_MAPPING_1_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_2 0x5ea9
+#define regGCMC_VM_MARC_PFVF_MAPPING_2_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_3 0x5eaa
+#define regGCMC_VM_MARC_PFVF_MAPPING_3_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_4 0x5eab
+#define regGCMC_VM_MARC_PFVF_MAPPING_4_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_5 0x5eac
+#define regGCMC_VM_MARC_PFVF_MAPPING_5_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_6 0x5ead
+#define regGCMC_VM_MARC_PFVF_MAPPING_6_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_7 0x5eae
+#define regGCMC_VM_MARC_PFVF_MAPPING_7_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_8 0x5eaf
+#define regGCMC_VM_MARC_PFVF_MAPPING_8_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_9 0x5eb0
+#define regGCMC_VM_MARC_PFVF_MAPPING_9_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10 0x5eb1
+#define regGCMC_VM_MARC_PFVF_MAPPING_10_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_11 0x5eb2
+#define regGCMC_VM_MARC_PFVF_MAPPING_11_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_12 0x5eb3
+#define regGCMC_VM_MARC_PFVF_MAPPING_12_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_13 0x5eb4
+#define regGCMC_VM_MARC_PFVF_MAPPING_13_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_14 0x5eb5
+#define regGCMC_VM_MARC_PFVF_MAPPING_14_BASE_IDX 1
+#define regGCMC_VM_MARC_PFVF_MAPPING_15 0x5eb6
+#define regGCMC_VM_MARC_PFVF_MAPPING_15_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL0 0x5eb7
+#define regGCUTC_TRANSLATION_FAULT_CNTL0_BASE_IDX 1
+#define regGCUTC_TRANSLATION_FAULT_CNTL1 0x5eb8
+#define regGCUTC_TRANSLATION_FAULT_CNTL1_BASE_IDX 1
+
+
+// addressBlock: gc_gcl2tlbpspdec
+// base address: 0x3fb10
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL 0x5ec4
+#define regGCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_shdec
+// base address: 0xb000
+#define regSPI_SHADER_PGM_RSRC4_PS 0x19a1
+#define regSPI_SHADER_PGM_RSRC4_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_PS 0x19a6
+#define regSPI_SHADER_PGM_CHKSUM_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_PS 0x19a7
+#define regSPI_SHADER_PGM_RSRC3_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_PS 0x19a8
+#define regSPI_SHADER_PGM_LO_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_PS 0x19a9
+#define regSPI_SHADER_PGM_HI_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_PS 0x19aa
+#define regSPI_SHADER_PGM_RSRC1_PS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_PS 0x19ab
+#define regSPI_SHADER_PGM_RSRC2_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_0 0x19ac
+#define regSPI_SHADER_USER_DATA_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_1 0x19ad
+#define regSPI_SHADER_USER_DATA_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_2 0x19ae
+#define regSPI_SHADER_USER_DATA_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_3 0x19af
+#define regSPI_SHADER_USER_DATA_PS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_4 0x19b0
+#define regSPI_SHADER_USER_DATA_PS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_5 0x19b1
+#define regSPI_SHADER_USER_DATA_PS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_6 0x19b2
+#define regSPI_SHADER_USER_DATA_PS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_7 0x19b3
+#define regSPI_SHADER_USER_DATA_PS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_8 0x19b4
+#define regSPI_SHADER_USER_DATA_PS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_9 0x19b5
+#define regSPI_SHADER_USER_DATA_PS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_10 0x19b6
+#define regSPI_SHADER_USER_DATA_PS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_11 0x19b7
+#define regSPI_SHADER_USER_DATA_PS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_12 0x19b8
+#define regSPI_SHADER_USER_DATA_PS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_13 0x19b9
+#define regSPI_SHADER_USER_DATA_PS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_14 0x19ba
+#define regSPI_SHADER_USER_DATA_PS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_15 0x19bb
+#define regSPI_SHADER_USER_DATA_PS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_16 0x19bc
+#define regSPI_SHADER_USER_DATA_PS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_17 0x19bd
+#define regSPI_SHADER_USER_DATA_PS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_18 0x19be
+#define regSPI_SHADER_USER_DATA_PS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_19 0x19bf
+#define regSPI_SHADER_USER_DATA_PS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_20 0x19c0
+#define regSPI_SHADER_USER_DATA_PS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_21 0x19c1
+#define regSPI_SHADER_USER_DATA_PS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_22 0x19c2
+#define regSPI_SHADER_USER_DATA_PS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_23 0x19c3
+#define regSPI_SHADER_USER_DATA_PS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_24 0x19c4
+#define regSPI_SHADER_USER_DATA_PS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_25 0x19c5
+#define regSPI_SHADER_USER_DATA_PS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_26 0x19c6
+#define regSPI_SHADER_USER_DATA_PS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_27 0x19c7
+#define regSPI_SHADER_USER_DATA_PS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_28 0x19c8
+#define regSPI_SHADER_USER_DATA_PS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_29 0x19c9
+#define regSPI_SHADER_USER_DATA_PS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_30 0x19ca
+#define regSPI_SHADER_USER_DATA_PS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_PS_31 0x19cb
+#define regSPI_SHADER_USER_DATA_PS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_PS 0x19d0
+#define regSPI_SHADER_REQ_CTRL_PS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_0 0x19d2
+#define regSPI_SHADER_USER_ACCUM_PS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_1 0x19d3
+#define regSPI_SHADER_USER_ACCUM_PS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_2 0x19d4
+#define regSPI_SHADER_USER_ACCUM_PS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_PS_3 0x19d5
+#define regSPI_SHADER_USER_ACCUM_PS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_GS 0x1a20
+#define regSPI_SHADER_PGM_CHKSUM_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_GS 0x1a21
+#define regSPI_SHADER_PGM_RSRC4_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS 0x1a22
+#define regSPI_SHADER_USER_DATA_ADDR_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS 0x1a23
+#define regSPI_SHADER_USER_DATA_ADDR_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES_GS 0x1a24
+#define regSPI_SHADER_PGM_LO_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES_GS 0x1a25
+#define regSPI_SHADER_PGM_HI_ES_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_GS 0x1a27
+#define regSPI_SHADER_PGM_RSRC3_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_GS 0x1a28
+#define regSPI_SHADER_PGM_LO_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_GS 0x1a29
+#define regSPI_SHADER_PGM_HI_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_GS 0x1a2a
+#define regSPI_SHADER_PGM_RSRC1_GS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_GS 0x1a2b
+#define regSPI_SHADER_PGM_RSRC2_GS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_0 0x1a2c
+#define regSPI_SHADER_USER_DATA_GS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_1 0x1a2d
+#define regSPI_SHADER_USER_DATA_GS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_2 0x1a2e
+#define regSPI_SHADER_USER_DATA_GS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_3 0x1a2f
+#define regSPI_SHADER_USER_DATA_GS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_4 0x1a30
+#define regSPI_SHADER_USER_DATA_GS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_5 0x1a31
+#define regSPI_SHADER_USER_DATA_GS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_6 0x1a32
+#define regSPI_SHADER_USER_DATA_GS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_7 0x1a33
+#define regSPI_SHADER_USER_DATA_GS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_8 0x1a34
+#define regSPI_SHADER_USER_DATA_GS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_9 0x1a35
+#define regSPI_SHADER_USER_DATA_GS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_10 0x1a36
+#define regSPI_SHADER_USER_DATA_GS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_11 0x1a37
+#define regSPI_SHADER_USER_DATA_GS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_12 0x1a38
+#define regSPI_SHADER_USER_DATA_GS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_13 0x1a39
+#define regSPI_SHADER_USER_DATA_GS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_14 0x1a3a
+#define regSPI_SHADER_USER_DATA_GS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_15 0x1a3b
+#define regSPI_SHADER_USER_DATA_GS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_16 0x1a3c
+#define regSPI_SHADER_USER_DATA_GS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_17 0x1a3d
+#define regSPI_SHADER_USER_DATA_GS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_18 0x1a3e
+#define regSPI_SHADER_USER_DATA_GS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_19 0x1a3f
+#define regSPI_SHADER_USER_DATA_GS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_20 0x1a40
+#define regSPI_SHADER_USER_DATA_GS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_21 0x1a41
+#define regSPI_SHADER_USER_DATA_GS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_22 0x1a42
+#define regSPI_SHADER_USER_DATA_GS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_23 0x1a43
+#define regSPI_SHADER_USER_DATA_GS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_24 0x1a44
+#define regSPI_SHADER_USER_DATA_GS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_25 0x1a45
+#define regSPI_SHADER_USER_DATA_GS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_26 0x1a46
+#define regSPI_SHADER_USER_DATA_GS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_27 0x1a47
+#define regSPI_SHADER_USER_DATA_GS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_28 0x1a48
+#define regSPI_SHADER_USER_DATA_GS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_29 0x1a49
+#define regSPI_SHADER_USER_DATA_GS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_30 0x1a4a
+#define regSPI_SHADER_USER_DATA_GS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_GS_31 0x1a4b
+#define regSPI_SHADER_USER_DATA_GS_31_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_DIM 0x1a4c
+#define regSPI_SHADER_GS_MESHLET_DIM_BASE_IDX 0
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC 0x1a4d
+#define regSPI_SHADER_GS_MESHLET_EXP_ALLOC_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_ESGS 0x1a50
+#define regSPI_SHADER_REQ_CTRL_ESGS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_0 0x1a52
+#define regSPI_SHADER_USER_ACCUM_ESGS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_1 0x1a53
+#define regSPI_SHADER_USER_ACCUM_ESGS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_2 0x1a54
+#define regSPI_SHADER_USER_ACCUM_ESGS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_ESGS_3 0x1a55
+#define regSPI_SHADER_USER_ACCUM_ESGS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_ES 0x1a68
+#define regSPI_SHADER_PGM_LO_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_ES 0x1a69
+#define regSPI_SHADER_PGM_HI_ES_BASE_IDX 0
+#define regSPI_SHADER_PGM_CHKSUM_HS 0x1aa0
+#define regSPI_SHADER_PGM_CHKSUM_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC4_HS 0x1aa1
+#define regSPI_SHADER_PGM_RSRC4_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS 0x1aa2
+#define regSPI_SHADER_USER_DATA_ADDR_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS 0x1aa3
+#define regSPI_SHADER_USER_DATA_ADDR_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS_HS 0x1aa4
+#define regSPI_SHADER_PGM_LO_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS_HS 0x1aa5
+#define regSPI_SHADER_PGM_HI_LS_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC3_HS 0x1aa7
+#define regSPI_SHADER_PGM_RSRC3_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_HS 0x1aa8
+#define regSPI_SHADER_PGM_LO_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_HS 0x1aa9
+#define regSPI_SHADER_PGM_HI_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC1_HS 0x1aaa
+#define regSPI_SHADER_PGM_RSRC1_HS_BASE_IDX 0
+#define regSPI_SHADER_PGM_RSRC2_HS 0x1aab
+#define regSPI_SHADER_PGM_RSRC2_HS_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_0 0x1aac
+#define regSPI_SHADER_USER_DATA_HS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_1 0x1aad
+#define regSPI_SHADER_USER_DATA_HS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_2 0x1aae
+#define regSPI_SHADER_USER_DATA_HS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_3 0x1aaf
+#define regSPI_SHADER_USER_DATA_HS_3_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_4 0x1ab0
+#define regSPI_SHADER_USER_DATA_HS_4_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_5 0x1ab1
+#define regSPI_SHADER_USER_DATA_HS_5_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_6 0x1ab2
+#define regSPI_SHADER_USER_DATA_HS_6_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_7 0x1ab3
+#define regSPI_SHADER_USER_DATA_HS_7_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_8 0x1ab4
+#define regSPI_SHADER_USER_DATA_HS_8_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_9 0x1ab5
+#define regSPI_SHADER_USER_DATA_HS_9_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_10 0x1ab6
+#define regSPI_SHADER_USER_DATA_HS_10_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_11 0x1ab7
+#define regSPI_SHADER_USER_DATA_HS_11_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_12 0x1ab8
+#define regSPI_SHADER_USER_DATA_HS_12_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_13 0x1ab9
+#define regSPI_SHADER_USER_DATA_HS_13_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_14 0x1aba
+#define regSPI_SHADER_USER_DATA_HS_14_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_15 0x1abb
+#define regSPI_SHADER_USER_DATA_HS_15_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_16 0x1abc
+#define regSPI_SHADER_USER_DATA_HS_16_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_17 0x1abd
+#define regSPI_SHADER_USER_DATA_HS_17_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_18 0x1abe
+#define regSPI_SHADER_USER_DATA_HS_18_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_19 0x1abf
+#define regSPI_SHADER_USER_DATA_HS_19_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_20 0x1ac0
+#define regSPI_SHADER_USER_DATA_HS_20_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_21 0x1ac1
+#define regSPI_SHADER_USER_DATA_HS_21_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_22 0x1ac2
+#define regSPI_SHADER_USER_DATA_HS_22_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_23 0x1ac3
+#define regSPI_SHADER_USER_DATA_HS_23_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_24 0x1ac4
+#define regSPI_SHADER_USER_DATA_HS_24_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_25 0x1ac5
+#define regSPI_SHADER_USER_DATA_HS_25_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_26 0x1ac6
+#define regSPI_SHADER_USER_DATA_HS_26_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_27 0x1ac7
+#define regSPI_SHADER_USER_DATA_HS_27_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_28 0x1ac8
+#define regSPI_SHADER_USER_DATA_HS_28_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_29 0x1ac9
+#define regSPI_SHADER_USER_DATA_HS_29_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_30 0x1aca
+#define regSPI_SHADER_USER_DATA_HS_30_BASE_IDX 0
+#define regSPI_SHADER_USER_DATA_HS_31 0x1acb
+#define regSPI_SHADER_USER_DATA_HS_31_BASE_IDX 0
+#define regSPI_SHADER_REQ_CTRL_LSHS 0x1ad0
+#define regSPI_SHADER_REQ_CTRL_LSHS_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_0 0x1ad2
+#define regSPI_SHADER_USER_ACCUM_LSHS_0_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_1 0x1ad3
+#define regSPI_SHADER_USER_ACCUM_LSHS_1_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_2 0x1ad4
+#define regSPI_SHADER_USER_ACCUM_LSHS_2_BASE_IDX 0
+#define regSPI_SHADER_USER_ACCUM_LSHS_3 0x1ad5
+#define regSPI_SHADER_USER_ACCUM_LSHS_3_BASE_IDX 0
+#define regSPI_SHADER_PGM_LO_LS 0x1ae8
+#define regSPI_SHADER_PGM_LO_LS_BASE_IDX 0
+#define regSPI_SHADER_PGM_HI_LS 0x1ae9
+#define regSPI_SHADER_PGM_HI_LS_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INITIATOR 0x1ba0
+#define regCOMPUTE_DISPATCH_INITIATOR_BASE_IDX 0
+#define regCOMPUTE_DIM_X 0x1ba1
+#define regCOMPUTE_DIM_X_BASE_IDX 0
+#define regCOMPUTE_DIM_Y 0x1ba2
+#define regCOMPUTE_DIM_Y_BASE_IDX 0
+#define regCOMPUTE_DIM_Z 0x1ba3
+#define regCOMPUTE_DIM_Z_BASE_IDX 0
+#define regCOMPUTE_START_X 0x1ba4
+#define regCOMPUTE_START_X_BASE_IDX 0
+#define regCOMPUTE_START_Y 0x1ba5
+#define regCOMPUTE_START_Y_BASE_IDX 0
+#define regCOMPUTE_START_Z 0x1ba6
+#define regCOMPUTE_START_Z_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_X 0x1ba7
+#define regCOMPUTE_NUM_THREAD_X_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Y 0x1ba8
+#define regCOMPUTE_NUM_THREAD_Y_BASE_IDX 0
+#define regCOMPUTE_NUM_THREAD_Z 0x1ba9
+#define regCOMPUTE_NUM_THREAD_Z_BASE_IDX 0
+#define regCOMPUTE_PIPELINESTAT_ENABLE 0x1baa
+#define regCOMPUTE_PIPELINESTAT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PERFCOUNT_ENABLE 0x1bab
+#define regCOMPUTE_PERFCOUNT_ENABLE_BASE_IDX 0
+#define regCOMPUTE_PGM_LO 0x1bac
+#define regCOMPUTE_PGM_LO_BASE_IDX 0
+#define regCOMPUTE_PGM_HI 0x1bad
+#define regCOMPUTE_PGM_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO 0x1bae
+#define regCOMPUTE_DISPATCH_PKT_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI 0x1baf
+#define regCOMPUTE_DISPATCH_PKT_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO 0x1bb0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_LO_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI 0x1bb1
+#define regCOMPUTE_DISPATCH_SCRATCH_BASE_HI_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC1 0x1bb2
+#define regCOMPUTE_PGM_RSRC1_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC2 0x1bb3
+#define regCOMPUTE_PGM_RSRC2_BASE_IDX 0
+#define regCOMPUTE_VMID 0x1bb4
+#define regCOMPUTE_VMID_BASE_IDX 0
+#define regCOMPUTE_RESOURCE_LIMITS 0x1bb5
+#define regCOMPUTE_RESOURCE_LIMITS_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE0 0x1bb6
+#define regCOMPUTE_DESTINATION_EN_SE0_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0 0x1bb6
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE0_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE1 0x1bb7
+#define regCOMPUTE_DESTINATION_EN_SE1_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1 0x1bb7
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE1_BASE_IDX 0
+#define regCOMPUTE_TMPRING_SIZE 0x1bb8
+#define regCOMPUTE_TMPRING_SIZE_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE2 0x1bb9
+#define regCOMPUTE_DESTINATION_EN_SE2_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2 0x1bb9
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0
+#define regCOMPUTE_DESTINATION_EN_SE3 0x1bba
+#define regCOMPUTE_DESTINATION_EN_SE3_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3 0x1bba
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0
+#define regCOMPUTE_RESTART_X 0x1bbb
+#define regCOMPUTE_RESTART_X_BASE_IDX 0
+#define regCOMPUTE_RESTART_Y 0x1bbc
+#define regCOMPUTE_RESTART_Y_BASE_IDX 0
+#define regCOMPUTE_RESTART_Z 0x1bbd
+#define regCOMPUTE_RESTART_Z_BASE_IDX 0
+#define regCOMPUTE_THREAD_TRACE_ENABLE 0x1bbe
+#define regCOMPUTE_THREAD_TRACE_ENABLE_BASE_IDX 0
+#define regCOMPUTE_MISC_RESERVED 0x1bbf
+#define regCOMPUTE_MISC_RESERVED_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_ID 0x1bc0
+#define regCOMPUTE_DISPATCH_ID_BASE_IDX 0
+#define regCOMPUTE_THREADGROUP_ID 0x1bc1
+#define regCOMPUTE_THREADGROUP_ID_BASE_IDX 0
+#define regCOMPUTE_REQ_CTRL 0x1bc2
+#define regCOMPUTE_REQ_CTRL_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_0 0x1bc4
+#define regCOMPUTE_USER_ACCUM_0_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_1 0x1bc5
+#define regCOMPUTE_USER_ACCUM_1_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_2 0x1bc6
+#define regCOMPUTE_USER_ACCUM_2_BASE_IDX 0
+#define regCOMPUTE_USER_ACCUM_3 0x1bc7
+#define regCOMPUTE_USER_ACCUM_3_BASE_IDX 0
+#define regCOMPUTE_PGM_RSRC3 0x1bc8
+#define regCOMPUTE_PGM_RSRC3_BASE_IDX 0
+#define regCOMPUTE_DDID_INDEX 0x1bc9
+#define regCOMPUTE_DDID_INDEX_BASE_IDX 0
+#define regCOMPUTE_SHADER_CHKSUM 0x1bca
+#define regCOMPUTE_SHADER_CHKSUM_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4 0x1bcb
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5 0x1bcc
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6 0x1bcd
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7 0x1bce
+#define regCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_INTERLEAVE 0x1bcf
+#define regCOMPUTE_DISPATCH_INTERLEAVE_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH 0x1bd0
+#define regCOMPUTE_RELAUNCH_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO 0x1bd1
+#define regCOMPUTE_WAVE_RESTORE_ADDR_LO_BASE_IDX 0
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI 0x1bd2
+#define regCOMPUTE_WAVE_RESTORE_ADDR_HI_BASE_IDX 0
+#define regCOMPUTE_RELAUNCH2 0x1bd3
+#define regCOMPUTE_RELAUNCH2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_0 0x1be0
+#define regCOMPUTE_USER_DATA_0_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_1 0x1be1
+#define regCOMPUTE_USER_DATA_1_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_2 0x1be2
+#define regCOMPUTE_USER_DATA_2_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_3 0x1be3
+#define regCOMPUTE_USER_DATA_3_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_4 0x1be4
+#define regCOMPUTE_USER_DATA_4_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_5 0x1be5
+#define regCOMPUTE_USER_DATA_5_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_6 0x1be6
+#define regCOMPUTE_USER_DATA_6_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_7 0x1be7
+#define regCOMPUTE_USER_DATA_7_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_8 0x1be8
+#define regCOMPUTE_USER_DATA_8_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_9 0x1be9
+#define regCOMPUTE_USER_DATA_9_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_10 0x1bea
+#define regCOMPUTE_USER_DATA_10_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_11 0x1beb
+#define regCOMPUTE_USER_DATA_11_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_12 0x1bec
+#define regCOMPUTE_USER_DATA_12_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_13 0x1bed
+#define regCOMPUTE_USER_DATA_13_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_14 0x1bee
+#define regCOMPUTE_USER_DATA_14_BASE_IDX 0
+#define regCOMPUTE_USER_DATA_15 0x1bef
+#define regCOMPUTE_USER_DATA_15_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_TUNNEL 0x1c1d
+#define regCOMPUTE_DISPATCH_TUNNEL_BASE_IDX 0
+#define regCOMPUTE_DISPATCH_END 0x1c1e
+#define regCOMPUTE_DISPATCH_END_BASE_IDX 0
+#define regCOMPUTE_NOWHERE 0x1c1f
+#define regCOMPUTE_NOWHERE_BASE_IDX 0
+#define regSH_RESERVED_REG0 0x1c20
+#define regSH_RESERVED_REG0_BASE_IDX 0
+#define regSH_RESERVED_REG1 0x1c21
+#define regSH_RESERVED_REG1_BASE_IDX 0
+
+
+// addressBlock: gc_cppdec
+// base address: 0xc080
+#define regCP_CU_MASK_ADDR_LO 0x1dd2
+#define regCP_CU_MASK_ADDR_LO_BASE_IDX 0
+#define regCP_CU_MASK_ADDR_HI 0x1dd3
+#define regCP_CU_MASK_ADDR_HI_BASE_IDX 0
+#define regCP_CU_MASK_CNTL 0x1dd4
+#define regCP_CU_MASK_CNTL_BASE_IDX 0
+#define regCP_EOPQ_WAIT_TIME 0x1dd5
+#define regCP_EOPQ_WAIT_TIME_BASE_IDX 0
+#define regCP_CPC_MGCG_SYNC_CNTL 0x1dd6
+#define regCP_CPC_MGCG_SYNC_CNTL_BASE_IDX 0
+#define regCPC_INT_INFO 0x1dd7
+#define regCPC_INT_INFO_BASE_IDX 0
+#define regCP_VIRT_STATUS 0x1dd8
+#define regCP_VIRT_STATUS_BASE_IDX 0
+#define regCPC_INT_ADDR 0x1dd9
+#define regCPC_INT_ADDR_BASE_IDX 0
+#define regCPC_INT_PASID 0x1dda
+#define regCPC_INT_PASID_BASE_IDX 0
+#define regCP_GFX_ERROR 0x1ddb
+#define regCP_GFX_ERROR_BASE_IDX 0
+#define regCPG_UTCL1_CNTL 0x1ddc
+#define regCPG_UTCL1_CNTL_BASE_IDX 0
+#define regCPC_UTCL1_CNTL 0x1ddd
+#define regCPC_UTCL1_CNTL_BASE_IDX 0
+#define regCPF_UTCL1_CNTL 0x1dde
+#define regCPF_UTCL1_CNTL_BASE_IDX 0
+#define regCP_AQL_SMM_STATUS 0x1ddf
+#define regCP_AQL_SMM_STATUS_BASE_IDX 0
+#define regCP_RB0_BASE 0x1de0
+#define regCP_RB0_BASE_BASE_IDX 0
+#define regCP_RB_BASE 0x1de0
+#define regCP_RB_BASE_BASE_IDX 0
+#define regCP_RB0_CNTL 0x1de1
+#define regCP_RB0_CNTL_BASE_IDX 0
+#define regCP_RB_CNTL 0x1de1
+#define regCP_RB_CNTL_BASE_IDX 0
+#define regCP_RB_RPTR_WR 0x1de2
+#define regCP_RB_RPTR_WR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR 0x1de3
+#define regCP_RB0_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR 0x1de3
+#define regCP_RB_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB0_RPTR_ADDR_HI 0x1de4
+#define regCP_RB0_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_RPTR_ADDR_HI 0x1de4
+#define regCP_RB_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB0_BUFSZ_MASK 0x1de5
+#define regCP_RB0_BUFSZ_MASK_BASE_IDX 0
+#define regCP_RB_BUFSZ_MASK 0x1de5
+#define regCP_RB_BUFSZ_MASK_BASE_IDX 0
+#define regGC_PRIV_MODE 0x1de8
+#define regGC_PRIV_MODE_BASE_IDX 0
+#define regCP_INT_CNTL 0x1de9
+#define regCP_INT_CNTL_BASE_IDX 0
+#define regCP_INT_STATUS 0x1dea
+#define regCP_INT_STATUS_BASE_IDX 0
+#define regCP_DEVICE_ID 0x1deb
+#define regCP_DEVICE_ID_BASE_IDX 0
+#define regCP_ME0_PIPE_PRIORITY_CNTS 0x1dec
+#define regCP_ME0_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_RING_PRIORITY_CNTS 0x1dec
+#define regCP_RING_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME0_PIPE0_PRIORITY 0x1ded
+#define regCP_ME0_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_RING0_PRIORITY 0x1ded
+#define regCP_RING0_PRIORITY_BASE_IDX 0
+#define regCP_ME0_PIPE1_PRIORITY 0x1dee
+#define regCP_ME0_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_RING1_PRIORITY 0x1dee
+#define regCP_RING1_PRIORITY_BASE_IDX 0
+#define regCP_FATAL_ERROR 0x1df0
+#define regCP_FATAL_ERROR_BASE_IDX 0
+#define regCP_RB_VMID 0x1df1
+#define regCP_RB_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE0_VMID 0x1df2
+#define regCP_ME0_PIPE0_VMID_BASE_IDX 0
+#define regCP_ME0_PIPE1_VMID 0x1df3
+#define regCP_ME0_PIPE1_VMID_BASE_IDX 0
+#define regCP_RB0_WPTR 0x1df4
+#define regCP_RB0_WPTR_BASE_IDX 0
+#define regCP_RB_WPTR 0x1df4
+#define regCP_RB_WPTR_BASE_IDX 0
+#define regCP_RB0_WPTR_HI 0x1df5
+#define regCP_RB0_WPTR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_HI 0x1df5
+#define regCP_RB_WPTR_HI_BASE_IDX 0
+#define regCP_RB1_WPTR 0x1df6
+#define regCP_RB1_WPTR_BASE_IDX 0
+#define regCP_RB1_WPTR_HI 0x1df7
+#define regCP_RB1_WPTR_HI_BASE_IDX 0
+#define regCP_PROCESS_QUANTUM 0x1df9
+#define regCP_PROCESS_QUANTUM_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_LOWER 0x1dfa
+#define regCP_RB_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_RB_DOORBELL_RANGE_UPPER 0x1dfb
+#define regCP_RB_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_LOWER 0x1dfc
+#define regCP_MEC_DOORBELL_RANGE_LOWER_BASE_IDX 0
+#define regCP_MEC_DOORBELL_RANGE_UPPER 0x1dfd
+#define regCP_MEC_DOORBELL_RANGE_UPPER_BASE_IDX 0
+#define regCPG_UTCL1_ERROR 0x1dfe
+#define regCPG_UTCL1_ERROR_BASE_IDX 0
+#define regCPC_UTCL1_ERROR 0x1dff
+#define regCPC_UTCL1_ERROR_BASE_IDX 0
+#define regCP_RB1_BASE 0x1e00
+#define regCP_RB1_BASE_BASE_IDX 0
+#define regCP_RB1_CNTL 0x1e01
+#define regCP_RB1_CNTL_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR 0x1e02
+#define regCP_RB1_RPTR_ADDR_BASE_IDX 0
+#define regCP_RB1_RPTR_ADDR_HI 0x1e03
+#define regCP_RB1_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB1_BUFSZ_MASK 0x1e04
+#define regCP_RB1_BUFSZ_MASK_BASE_IDX 0
+#define regCP_INT_CNTL_RING0 0x1e0a
+#define regCP_INT_CNTL_RING0_BASE_IDX 0
+#define regCP_INT_CNTL_RING1 0x1e0b
+#define regCP_INT_CNTL_RING1_BASE_IDX 0
+#define regCP_INT_STATUS_RING0 0x1e0d
+#define regCP_INT_STATUS_RING0_BASE_IDX 0
+#define regCP_INT_STATUS_RING1 0x1e0e
+#define regCP_INT_STATUS_RING1_BASE_IDX 0
+#define regCP_ME_F32_INTERRUPT 0x1e13
+#define regCP_ME_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PFP_F32_INTERRUPT 0x1e14
+#define regCP_PFP_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC1_F32_INTERRUPT 0x1e16
+#define regCP_MEC1_F32_INTERRUPT_BASE_IDX 0
+#define regCP_MEC2_F32_INTERRUPT 0x1e17
+#define regCP_MEC2_F32_INTERRUPT_BASE_IDX 0
+#define regCP_PWR_CNTL 0x1e18
+#define regCP_PWR_CNTL_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE 0x1e1a
+#define regCP_ECC_FIRSTOCCURRENCE_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING0 0x1e1b
+#define regCP_ECC_FIRSTOCCURRENCE_RING0_BASE_IDX 0
+#define regCP_ECC_FIRSTOCCURRENCE_RING1 0x1e1c
+#define regCP_ECC_FIRSTOCCURRENCE_RING1_BASE_IDX 0
+#define regGB_EDC_MODE 0x1e1e
+#define regGB_EDC_MODE_BASE_IDX 0
+#define regCP_DEBUG 0x1e1f
+#define regCP_DEBUG_BASE_IDX 0
+#define regCP_CPF_DEBUG 0x1e20
+#define regCP_CPF_DEBUG_BASE_IDX 0
+#define regCP_CPC_DEBUG 0x1e21
+#define regCP_CPC_DEBUG_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL 0x1e23
+#define regCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0
+#define regCP_PQ_WPTR_POLL_CNTL1 0x1e24
+#define regCP_PQ_WPTR_POLL_CNTL1_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_CNTL 0x1e25
+#define regCP_ME1_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_CNTL 0x1e26
+#define regCP_ME1_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_CNTL 0x1e27
+#define regCP_ME1_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_CNTL 0x1e28
+#define regCP_ME1_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_CNTL 0x1e29
+#define regCP_ME2_PIPE0_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_CNTL 0x1e2a
+#define regCP_ME2_PIPE1_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_CNTL 0x1e2b
+#define regCP_ME2_PIPE2_INT_CNTL_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_CNTL 0x1e2c
+#define regCP_ME2_PIPE3_INT_CNTL_BASE_IDX 0
+#define regCP_ME1_PIPE0_INT_STATUS 0x1e2d
+#define regCP_ME1_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE1_INT_STATUS 0x1e2e
+#define regCP_ME1_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE2_INT_STATUS 0x1e2f
+#define regCP_ME1_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_PIPE3_INT_STATUS 0x1e30
+#define regCP_ME1_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE0_INT_STATUS 0x1e31
+#define regCP_ME2_PIPE0_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE1_INT_STATUS 0x1e32
+#define regCP_ME2_PIPE1_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE2_INT_STATUS 0x1e33
+#define regCP_ME2_PIPE2_INT_STATUS_BASE_IDX 0
+#define regCP_ME2_PIPE3_INT_STATUS 0x1e34
+#define regCP_ME2_PIPE3_INT_STATUS_BASE_IDX 0
+#define regCP_ME1_INT_STAT_DEBUG 0x1e35
+#define regCP_ME1_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_ME2_INT_STAT_DEBUG 0x1e36
+#define regCP_ME2_INT_STAT_DEBUG_BASE_IDX 0
+#define regCP_GFX_QUEUE_INDEX 0x1e37
+#define regCP_GFX_QUEUE_INDEX_BASE_IDX 0
+#define regCC_GC_EDC_CONFIG 0x1e38
+#define regCC_GC_EDC_CONFIG_BASE_IDX 0
+#define regCP_ME1_PIPE_PRIORITY_CNTS 0x1e39
+#define regCP_ME1_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME1_PIPE0_PRIORITY 0x1e3a
+#define regCP_ME1_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE1_PRIORITY 0x1e3b
+#define regCP_ME1_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE2_PRIORITY 0x1e3c
+#define regCP_ME1_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME1_PIPE3_PRIORITY 0x1e3d
+#define regCP_ME1_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE_PRIORITY_CNTS 0x1e3e
+#define regCP_ME2_PIPE_PRIORITY_CNTS_BASE_IDX 0
+#define regCP_ME2_PIPE0_PRIORITY 0x1e3f
+#define regCP_ME2_PIPE0_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE1_PRIORITY 0x1e40
+#define regCP_ME2_PIPE1_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE2_PRIORITY 0x1e41
+#define regCP_ME2_PIPE2_PRIORITY_BASE_IDX 0
+#define regCP_ME2_PIPE3_PRIORITY 0x1e42
+#define regCP_ME2_PIPE3_PRIORITY_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START 0x1e44
+#define regCP_PFP_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START 0x1e45
+#define regCP_ME_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC1_PRGRM_CNTR_START 0x1e46
+#define regCP_MEC1_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_MEC2_PRGRM_CNTR_START 0x1e47
+#define regCP_MEC2_PRGRM_CNTR_START_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START 0x1e49
+#define regCP_PFP_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START 0x1e4a
+#define regCP_ME_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC1_INTR_ROUTINE_START 0x1e4b
+#define regCP_MEC1_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_MEC2_INTR_ROUTINE_START 0x1e4c
+#define regCP_MEC2_INTR_ROUTINE_START_BASE_IDX 0
+#define regCP_CONTEXT_CNTL 0x1e4d
+#define regCP_CONTEXT_CNTL_BASE_IDX 0
+#define regCP_MAX_CONTEXT 0x1e4e
+#define regCP_MAX_CONTEXT_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME1 0x1e4f
+#define regCP_IQ_WAIT_TIME1_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME2 0x1e50
+#define regCP_IQ_WAIT_TIME2_BASE_IDX 0
+#define regCP_RB0_BASE_HI 0x1e51
+#define regCP_RB0_BASE_HI_BASE_IDX 0
+#define regCP_RB1_BASE_HI 0x1e52
+#define regCP_RB1_BASE_HI_BASE_IDX 0
+#define regCP_VMID_RESET 0x1e53
+#define regCP_VMID_RESET_BASE_IDX 0
+#define regCPC_INT_CNTL 0x1e54
+#define regCPC_INT_CNTL_BASE_IDX 0
+#define regCPC_INT_STATUS 0x1e55
+#define regCPC_INT_STATUS_BASE_IDX 0
+#define regCP_VMID_PREEMPT 0x1e56
+#define regCP_VMID_PREEMPT_BASE_IDX 0
+#define regCPC_INT_CNTX_ID 0x1e57
+#define regCPC_INT_CNTX_ID_BASE_IDX 0
+#define regCP_PQ_STATUS 0x1e58
+#define regCP_PQ_STATUS_BASE_IDX 0
+#define regCP_PFP_PRGRM_CNTR_START_HI 0x1e59
+#define regCP_PFP_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_MAX_DRAW_COUNT 0x1e5c
+#define regCP_MAX_DRAW_COUNT_BASE_IDX 0
+#define regCP_MEC1_F32_INT_DIS 0x1e5d
+#define regCP_MEC1_F32_INT_DIS_BASE_IDX 0
+#define regCP_MEC2_F32_INT_DIS 0x1e5e
+#define regCP_MEC2_F32_INT_DIS_BASE_IDX 0
+#define regCP_VMID_STATUS 0x1e5f
+#define regCP_VMID_STATUS_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO 0x1e60
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI 0x1e61
+#define regCPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL 0x1e62
+#define regCPC_SUSPEND_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET 0x1e63
+#define regCPC_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CNTL_STACK_SIZE 0x1e64
+#define regCPC_SUSPEND_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCPC_SUSPEND_WG_STATE_OFFSET 0x1e65
+#define regCPC_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCPC_SUSPEND_CTX_SAVE_SIZE 0x1e66
+#define regCPC_SUSPEND_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCPC_OS_PIPES 0x1e67
+#define regCPC_OS_PIPES_BASE_IDX 0
+#define regCP_SUSPEND_RESUME_REQ 0x1e68
+#define regCP_SUSPEND_RESUME_REQ_BASE_IDX 0
+#define regCP_SUSPEND_CNTL 0x1e69
+#define regCP_SUSPEND_CNTL_BASE_IDX 0
+#define regCP_IQ_WAIT_TIME3 0x1e6a
+#define regCP_IQ_WAIT_TIME3_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_LO 0x1e6b
+#define regCPC_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_LO 0x1e6b
+#define regCP_DDID_BASE_ADDR_LO_BASE_IDX 0
+#define regCPC_DDID_BASE_ADDR_HI 0x1e6c
+#define regCPC_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_DDID_BASE_ADDR_HI 0x1e6c
+#define regCP_DDID_BASE_ADDR_HI_BASE_IDX 0
+#define regCPC_DDID_CNTL 0x1e6d
+#define regCPC_DDID_CNTL_BASE_IDX 0
+#define regCP_DDID_CNTL 0x1e6d
+#define regCP_DDID_CNTL_BASE_IDX 0
+#define regCP_GFX_DDID_INFLIGHT_COUNT 0x1e6e
+#define regCP_GFX_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_GFX_DDID_WPTR 0x1e6f
+#define regCP_GFX_DDID_WPTR_BASE_IDX 0
+#define regCP_GFX_DDID_RPTR 0x1e70
+#define regCP_GFX_DDID_RPTR_BASE_IDX 0
+#define regCP_GFX_DDID_DELTA_RPT_COUNT 0x1e71
+#define regCP_GFX_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_GFX_HPD_STATUS0 0x1e72
+#define regCP_GFX_HPD_STATUS0_BASE_IDX 0
+#define regCP_GFX_HPD_CONTROL0 0x1e73
+#define regCP_GFX_HPD_CONTROL0_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO 0x1e74
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI 0x1e75
+#define regCP_GFX_HPD_OSPRE_FENCE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO 0x1e76
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_LO_BASE_IDX 0
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI 0x1e77
+#define regCP_GFX_HPD_OSPRE_FENCE_DATA_HI_BASE_IDX 0
+#define regCP_GFX_INDEX_MUTEX 0x1e78
+#define regCP_GFX_INDEX_MUTEX_BASE_IDX 0
+#define regCP_ME_PRGRM_CNTR_START_HI 0x1e79
+#define regCP_ME_PRGRM_CNTR_START_HI_BASE_IDX 0
+#define regCP_PFP_INTR_ROUTINE_START_HI 0x1e7a
+#define regCP_PFP_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_ME_INTR_ROUTINE_START_HI 0x1e7b
+#define regCP_ME_INTR_ROUTINE_START_HI_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR 0x1e7e
+#define regCP_GFX_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_GFX_MQD_BASE_ADDR_HI 0x1e7f
+#define regCP_GFX_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_ACTIVE 0x1e80
+#define regCP_GFX_HQD_ACTIVE_BASE_IDX 0
+#define regCP_GFX_HQD_VMID 0x1e81
+#define regCP_GFX_HQD_VMID_BASE_IDX 0
+#define regCP_GFX_HQD_QUEUE_PRIORITY 0x1e84
+#define regCP_GFX_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_GFX_HQD_QUANTUM 0x1e85
+#define regCP_GFX_HQD_QUANTUM_BASE_IDX 0
+#define regCP_GFX_HQD_BASE 0x1e86
+#define regCP_GFX_HQD_BASE_BASE_IDX 0
+#define regCP_GFX_HQD_BASE_HI 0x1e87
+#define regCP_GFX_HQD_BASE_HI_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR 0x1e88
+#define regCP_GFX_HQD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR 0x1e89
+#define regCP_GFX_HQD_RPTR_ADDR_BASE_IDX 0
+#define regCP_GFX_HQD_RPTR_ADDR_HI 0x1e8a
+#define regCP_GFX_HQD_RPTR_ADDR_HI_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_LO 0x1e8b
+#define regCP_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0
+#define regCP_RB_WPTR_POLL_ADDR_HI 0x1e8c
+#define regCP_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_RB_DOORBELL_CONTROL 0x1e8d
+#define regCP_RB_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_OFFSET 0x1e8e
+#define regCP_GFX_HQD_OFFSET_BASE_IDX 0
+#define regCP_GFX_HQD_CNTL 0x1e8f
+#define regCP_GFX_HQD_CNTL_BASE_IDX 0
+#define regCP_GFX_HQD_CSMD_RPTR 0x1e90
+#define regCP_GFX_HQD_CSMD_RPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR 0x1e91
+#define regCP_GFX_HQD_WPTR_BASE_IDX 0
+#define regCP_GFX_HQD_WPTR_HI 0x1e92
+#define regCP_GFX_HQD_WPTR_HI_BASE_IDX 0
+#define regCP_GFX_HQD_DEQUEUE_REQUEST 0x1e93
+#define regCP_GFX_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_GFX_HQD_MAPPED 0x1e94
+#define regCP_GFX_HQD_MAPPED_BASE_IDX 0
+#define regCP_GFX_HQD_QUE_MGR_CONTROL 0x1e95
+#define regCP_GFX_HQD_QUE_MGR_CONTROL_BASE_IDX 0
+#define regCP_GFX_HQD_IQ_TIMER 0x1e96
+#define regCP_GFX_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_STATUS0 0x1e98
+#define regCP_GFX_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_GFX_HQD_HQ_CONTROL0 0x1e99
+#define regCP_GFX_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_GFX_MQD_CONTROL 0x1e9a
+#define regCP_GFX_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_CONTROL 0x1e9f
+#define regCP_HQD_GFX_CONTROL_BASE_IDX 0
+#define regCP_HQD_GFX_STATUS 0x1ea0
+#define regCP_HQD_GFX_STATUS_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_LO 0x1ec0
+#define regCP_DMA_WATCH0_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH0_ADDR_HI 0x1ec1
+#define regCP_DMA_WATCH0_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH0_MASK 0x1ec2
+#define regCP_DMA_WATCH0_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH0_CNTL 0x1ec3
+#define regCP_DMA_WATCH0_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_LO 0x1ec4
+#define regCP_DMA_WATCH1_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH1_ADDR_HI 0x1ec5
+#define regCP_DMA_WATCH1_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH1_MASK 0x1ec6
+#define regCP_DMA_WATCH1_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH1_CNTL 0x1ec7
+#define regCP_DMA_WATCH1_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_LO 0x1ec8
+#define regCP_DMA_WATCH2_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH2_ADDR_HI 0x1ec9
+#define regCP_DMA_WATCH2_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH2_MASK 0x1eca
+#define regCP_DMA_WATCH2_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH2_CNTL 0x1ecb
+#define regCP_DMA_WATCH2_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_LO 0x1ecc
+#define regCP_DMA_WATCH3_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH3_ADDR_HI 0x1ecd
+#define regCP_DMA_WATCH3_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH3_MASK 0x1ece
+#define regCP_DMA_WATCH3_MASK_BASE_IDX 0
+#define regCP_DMA_WATCH3_CNTL 0x1ecf
+#define regCP_DMA_WATCH3_CNTL_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_LO 0x1ed0
+#define regCP_DMA_WATCH_STAT_ADDR_LO_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT_ADDR_HI 0x1ed1
+#define regCP_DMA_WATCH_STAT_ADDR_HI_BASE_IDX 0
+#define regCP_DMA_WATCH_STAT 0x1ed2
+#define regCP_DMA_WATCH_STAT_BASE_IDX 0
+#define regCP_PFP_JT_STAT 0x1ed3
+#define regCP_PFP_JT_STAT_BASE_IDX 0
+#define regCP_MEC_JT_STAT 0x1ed5
+#define regCP_MEC_JT_STAT_BASE_IDX 0
+#define regCP_CPC_BUSY_HYSTERESIS 0x1edb
+#define regCP_CPC_BUSY_HYSTERESIS_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS1 0x1edc
+#define regCP_CPF_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPF_BUSY_HYSTERESIS2 0x1edd
+#define regCP_CPF_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS1 0x1ede
+#define regCP_CPG_BUSY_HYSTERESIS1_BASE_IDX 0
+#define regCP_CPG_BUSY_HYSTERESIS2 0x1edf
+#define regCP_CPG_BUSY_HYSTERESIS2_BASE_IDX 0
+#define regCP_RB_DOORBELL_CLEAR 0x1f28
+#define regCP_RB_DOORBELL_CLEAR_BASE_IDX 0
+#define regCP_RB0_ACTIVE 0x1f40
+#define regCP_RB0_ACTIVE_BASE_IDX 0
+#define regCP_RB_ACTIVE 0x1f40
+#define regCP_RB_ACTIVE_BASE_IDX 0
+#define regCP_RB1_ACTIVE 0x1f41
+#define regCP_RB1_ACTIVE_BASE_IDX 0
+#define regCP_RB_STATUS 0x1f43
+#define regCP_RB_STATUS_BASE_IDX 0
+#define regCPG_RCIU_CAM_INDEX 0x1f44
+#define regCPG_RCIU_CAM_INDEX_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA 0x1f45
+#define regCPG_RCIU_CAM_DATA_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE0 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE0_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE1 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE1_BASE_IDX 0
+#define regCPG_RCIU_CAM_DATA_PHASE2 0x1f45
+#define regCPG_RCIU_CAM_DATA_PHASE2_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_LO 0x1f4c
+#define regCP_GPU_TIMESTAMP_OFFSET_LO_BASE_IDX 0
+#define regCP_GPU_TIMESTAMP_OFFSET_HI 0x1f4d
+#define regCP_GPU_TIMESTAMP_OFFSET_HI_BASE_IDX 0
+#define regCP_SDMA_DMA_DONE 0x1f4e
+#define regCP_SDMA_DMA_DONE_BASE_IDX 0
+#define regCP_PFP_SDMA_CS 0x1f4f
+#define regCP_PFP_SDMA_CS_BASE_IDX 0
+#define regCP_ME_SDMA_CS 0x1f50
+#define regCP_ME_SDMA_CS_BASE_IDX 0
+#define regCPF_GCR_CNTL 0x1f53
+#define regCPF_GCR_CNTL_BASE_IDX 0
+#define regCPG_UTCL1_STATUS 0x1f54
+#define regCPG_UTCL1_STATUS_BASE_IDX 0
+#define regCPC_UTCL1_STATUS 0x1f55
+#define regCPC_UTCL1_STATUS_BASE_IDX 0
+#define regCPF_UTCL1_STATUS 0x1f56
+#define regCPF_UTCL1_STATUS_BASE_IDX 0
+#define regCP_SD_CNTL 0x1f57
+#define regCP_SD_CNTL_BASE_IDX 0
+#define regCP_SOFT_RESET_CNTL 0x1f59
+#define regCP_SOFT_RESET_CNTL_BASE_IDX 0
+#define regCP_CPC_GFX_CNTL 0x1f5a
+#define regCP_CPC_GFX_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_spipdec
+// base address: 0xc700
+#define regSPI_ARB_PRIORITY 0x1f60
+#define regSPI_ARB_PRIORITY_BASE_IDX 0
+#define regSPI_ARB_CYCLES_0 0x1f61
+#define regSPI_ARB_CYCLES_0_BASE_IDX 0
+#define regSPI_ARB_CYCLES_1 0x1f62
+#define regSPI_ARB_CYCLES_1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_GFX 0x1f67
+#define regSPI_WCL_PIPE_PERCENT_GFX_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_HP3D 0x1f68
+#define regSPI_WCL_PIPE_PERCENT_HP3D_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS0 0x1f69
+#define regSPI_WCL_PIPE_PERCENT_CS0_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS1 0x1f6a
+#define regSPI_WCL_PIPE_PERCENT_CS1_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS2 0x1f6b
+#define regSPI_WCL_PIPE_PERCENT_CS2_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS3 0x1f6c
+#define regSPI_WCL_PIPE_PERCENT_CS3_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS4 0x1f6d
+#define regSPI_WCL_PIPE_PERCENT_CS4_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS5 0x1f6e
+#define regSPI_WCL_PIPE_PERCENT_CS5_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS6 0x1f6f
+#define regSPI_WCL_PIPE_PERCENT_CS6_BASE_IDX 0
+#define regSPI_WCL_PIPE_PERCENT_CS7 0x1f70
+#define regSPI_WCL_PIPE_PERCENT_CS7_BASE_IDX 0
+#define regSPI_USER_ACCUM_VMID_CNTL 0x1f71
+#define regSPI_USER_ACCUM_VMID_CNTL_BASE_IDX 0
+#define regSPI_GDBG_PER_VMID_CNTL 0x1f72
+#define regSPI_GDBG_PER_VMID_CNTL_BASE_IDX 0
+#define regSPI_COMPUTE_QUEUE_RESET 0x1f73
+#define regSPI_COMPUTE_QUEUE_RESET_BASE_IDX 0
+#define regSPI_COMPUTE_WF_CTX_SAVE 0x1f74
+#define regSPI_COMPUTE_WF_CTX_SAVE_BASE_IDX 0
+
+
+// addressBlock: gc_cpphqddec
+// base address: 0xc800
+#define regCP_HPD_UTCL1_CNTL 0x1fa3
+#define regCP_HPD_UTCL1_CNTL_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR 0x1fa7
+#define regCP_HPD_UTCL1_ERROR_BASE_IDX 0
+#define regCP_HPD_UTCL1_ERROR_ADDR 0x1fa8
+#define regCP_HPD_UTCL1_ERROR_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR 0x1fa9
+#define regCP_MQD_BASE_ADDR_BASE_IDX 0
+#define regCP_MQD_BASE_ADDR_HI 0x1faa
+#define regCP_MQD_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_ACTIVE 0x1fab
+#define regCP_HQD_ACTIVE_BASE_IDX 0
+#define regCP_HQD_VMID 0x1fac
+#define regCP_HQD_VMID_BASE_IDX 0
+#define regCP_HQD_PERSISTENT_STATE 0x1fad
+#define regCP_HQD_PERSISTENT_STATE_BASE_IDX 0
+#define regCP_HQD_PIPE_PRIORITY 0x1fae
+#define regCP_HQD_PIPE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUEUE_PRIORITY 0x1faf
+#define regCP_HQD_QUEUE_PRIORITY_BASE_IDX 0
+#define regCP_HQD_QUANTUM 0x1fb0
+#define regCP_HQD_QUANTUM_BASE_IDX 0
+#define regCP_HQD_PQ_BASE 0x1fb1
+#define regCP_HQD_PQ_BASE_BASE_IDX 0
+#define regCP_HQD_PQ_BASE_HI 0x1fb2
+#define regCP_HQD_PQ_BASE_HI_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR 0x1fb3
+#define regCP_HQD_PQ_RPTR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR 0x1fb4
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI 0x1fb5
+#define regCP_HQD_PQ_RPTR_REPORT_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR 0x1fb6
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI 0x1fb7
+#define regCP_HQD_PQ_WPTR_POLL_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_PQ_DOORBELL_CONTROL 0x1fb8
+#define regCP_HQD_PQ_DOORBELL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_CONTROL 0x1fba
+#define regCP_HQD_PQ_CONTROL_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR 0x1fbb
+#define regCP_HQD_IB_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_IB_BASE_ADDR_HI 0x1fbc
+#define regCP_HQD_IB_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_IB_RPTR 0x1fbd
+#define regCP_HQD_IB_RPTR_BASE_IDX 0
+#define regCP_HQD_IB_CONTROL 0x1fbe
+#define regCP_HQD_IB_CONTROL_BASE_IDX 0
+#define regCP_HQD_IQ_TIMER 0x1fbf
+#define regCP_HQD_IQ_TIMER_BASE_IDX 0
+#define regCP_HQD_IQ_RPTR 0x1fc0
+#define regCP_HQD_IQ_RPTR_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_REQUEST 0x1fc1
+#define regCP_HQD_DEQUEUE_REQUEST_BASE_IDX 0
+#define regCP_HQD_DMA_OFFLOAD 0x1fc2
+#define regCP_HQD_DMA_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_OFFLOAD 0x1fc2
+#define regCP_HQD_OFFLOAD_BASE_IDX 0
+#define regCP_HQD_SEMA_CMD 0x1fc3
+#define regCP_HQD_SEMA_CMD_BASE_IDX 0
+#define regCP_HQD_MSG_TYPE 0x1fc4
+#define regCP_HQD_MSG_TYPE_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_LO 0x1fc5
+#define regCP_HQD_ATOMIC0_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC0_PREOP_HI 0x1fc6
+#define regCP_HQD_ATOMIC0_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_LO 0x1fc7
+#define regCP_HQD_ATOMIC1_PREOP_LO_BASE_IDX 0
+#define regCP_HQD_ATOMIC1_PREOP_HI 0x1fc8
+#define regCP_HQD_ATOMIC1_PREOP_HI_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER0 0x1fc9
+#define regCP_HQD_HQ_SCHEDULER0_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS0 0x1fc9
+#define regCP_HQD_HQ_STATUS0_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL0 0x1fca
+#define regCP_HQD_HQ_CONTROL0_BASE_IDX 0
+#define regCP_HQD_HQ_SCHEDULER1 0x1fca
+#define regCP_HQD_HQ_SCHEDULER1_BASE_IDX 0
+#define regCP_MQD_CONTROL 0x1fcb
+#define regCP_MQD_CONTROL_BASE_IDX 0
+#define regCP_HQD_HQ_STATUS1 0x1fcc
+#define regCP_HQD_HQ_STATUS1_BASE_IDX 0
+#define regCP_HQD_HQ_CONTROL1 0x1fcd
+#define regCP_HQD_HQ_CONTROL1_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR 0x1fce
+#define regCP_HQD_EOP_BASE_ADDR_BASE_IDX 0
+#define regCP_HQD_EOP_BASE_ADDR_HI 0x1fcf
+#define regCP_HQD_EOP_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_EOP_CONTROL 0x1fd0
+#define regCP_HQD_EOP_CONTROL_BASE_IDX 0
+#define regCP_HQD_EOP_RPTR 0x1fd1
+#define regCP_HQD_EOP_RPTR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR 0x1fd2
+#define regCP_HQD_EOP_WPTR_BASE_IDX 0
+#define regCP_HQD_EOP_EVENTS 0x1fd3
+#define regCP_HQD_EOP_EVENTS_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO 0x1fd4
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_LO_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI 0x1fd5
+#define regCP_HQD_CTX_SAVE_BASE_ADDR_HI_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_CONTROL 0x1fd6
+#define regCP_HQD_CTX_SAVE_CONTROL_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_OFFSET 0x1fd7
+#define regCP_HQD_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_CNTL_STACK_SIZE 0x1fd8
+#define regCP_HQD_CNTL_STACK_SIZE_BASE_IDX 0
+#define regCP_HQD_WG_STATE_OFFSET 0x1fd9
+#define regCP_HQD_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_CTX_SAVE_SIZE 0x1fda
+#define regCP_HQD_CTX_SAVE_SIZE_BASE_IDX 0
+#define regCP_HQD_GDS_RESOURCE_STATE 0x1fdb
+#define regCP_HQD_GDS_RESOURCE_STATE_BASE_IDX 0
+#define regCP_HQD_ERROR 0x1fdc
+#define regCP_HQD_ERROR_BASE_IDX 0
+#define regCP_HQD_EOP_WPTR_MEM 0x1fdd
+#define regCP_HQD_EOP_WPTR_MEM_BASE_IDX 0
+#define regCP_HQD_AQL_CONTROL 0x1fde
+#define regCP_HQD_AQL_CONTROL_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_LO 0x1fdf
+#define regCP_HQD_PQ_WPTR_LO_BASE_IDX 0
+#define regCP_HQD_PQ_WPTR_HI 0x1fe0
+#define regCP_HQD_PQ_WPTR_HI_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET 0x1fe1
+#define regCP_HQD_SUSPEND_CNTL_STACK_OFFSET_BASE_IDX 0
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT 0x1fe2
+#define regCP_HQD_SUSPEND_CNTL_STACK_DW_CNT_BASE_IDX 0
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET 0x1fe3
+#define regCP_HQD_SUSPEND_WG_STATE_OFFSET_BASE_IDX 0
+#define regCP_HQD_DDID_RPTR 0x1fe4
+#define regCP_HQD_DDID_RPTR_BASE_IDX 0
+#define regCP_HQD_DDID_WPTR 0x1fe5
+#define regCP_HQD_DDID_WPTR_BASE_IDX 0
+#define regCP_HQD_DDID_INFLIGHT_COUNT 0x1fe6
+#define regCP_HQD_DDID_INFLIGHT_COUNT_BASE_IDX 0
+#define regCP_HQD_DDID_DELTA_RPT_COUNT 0x1fe7
+#define regCP_HQD_DDID_DELTA_RPT_COUNT_BASE_IDX 0
+#define regCP_HQD_DEQUEUE_STATUS 0x1fe8
+#define regCP_HQD_DEQUEUE_STATUS_BASE_IDX 0
+
+
+// addressBlock: gc_tcpdec
+// base address: 0xca80
+#define regTCP_WATCH0_ADDR_H 0x2048
+#define regTCP_WATCH0_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH0_ADDR_L 0x2049
+#define regTCP_WATCH0_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH0_CNTL 0x204a
+#define regTCP_WATCH0_CNTL_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_H 0x204b
+#define regTCP_WATCH1_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH1_ADDR_L 0x204c
+#define regTCP_WATCH1_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH1_CNTL 0x204d
+#define regTCP_WATCH1_CNTL_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_H 0x204e
+#define regTCP_WATCH2_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH2_ADDR_L 0x204f
+#define regTCP_WATCH2_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH2_CNTL 0x2050
+#define regTCP_WATCH2_CNTL_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_H 0x2051
+#define regTCP_WATCH3_ADDR_H_BASE_IDX 0
+#define regTCP_WATCH3_ADDR_L 0x2052
+#define regTCP_WATCH3_ADDR_L_BASE_IDX 0
+#define regTCP_WATCH3_CNTL 0x2053
+#define regTCP_WATCH3_CNTL_BASE_IDX 0
+
+
+// addressBlock: gc_gdspdec
+// base address: 0xcc00
+#define regGDS_VMID0_BASE 0x20a0
+#define regGDS_VMID0_BASE_BASE_IDX 0
+#define regGDS_VMID0_SIZE 0x20a1
+#define regGDS_VMID0_SIZE_BASE_IDX 0
+#define regGDS_VMID1_BASE 0x20a2
+#define regGDS_VMID1_BASE_BASE_IDX 0
+#define regGDS_VMID1_SIZE 0x20a3
+#define regGDS_VMID1_SIZE_BASE_IDX 0
+#define regGDS_VMID2_BASE 0x20a4
+#define regGDS_VMID2_BASE_BASE_IDX 0
+#define regGDS_VMID2_SIZE 0x20a5
+#define regGDS_VMID2_SIZE_BASE_IDX 0
+#define regGDS_VMID3_BASE 0x20a6
+#define regGDS_VMID3_BASE_BASE_IDX 0
+#define regGDS_VMID3_SIZE 0x20a7
+#define regGDS_VMID3_SIZE_BASE_IDX 0
+#define regGDS_VMID4_BASE 0x20a8
+#define regGDS_VMID4_BASE_BASE_IDX 0
+#define regGDS_VMID4_SIZE 0x20a9
+#define regGDS_VMID4_SIZE_BASE_IDX 0
+#define regGDS_VMID5_BASE 0x20aa
+#define regGDS_VMID5_BASE_BASE_IDX 0
+#define regGDS_VMID5_SIZE 0x20ab
+#define regGDS_VMID5_SIZE_BASE_IDX 0
+#define regGDS_VMID6_BASE 0x20ac
+#define regGDS_VMID6_BASE_BASE_IDX 0
+#define regGDS_VMID6_SIZE 0x20ad
+#define regGDS_VMID6_SIZE_BASE_IDX 0
+#define regGDS_VMID7_BASE 0x20ae
+#define regGDS_VMID7_BASE_BASE_IDX 0
+#define regGDS_VMID7_SIZE 0x20af
+#define regGDS_VMID7_SIZE_BASE_IDX 0
+#define regGDS_VMID8_BASE 0x20b0
+#define regGDS_VMID8_BASE_BASE_IDX 0
+#define regGDS_VMID8_SIZE 0x20b1
+#define regGDS_VMID8_SIZE_BASE_IDX 0
+#define regGDS_VMID9_BASE 0x20b2
+#define regGDS_VMID9_BASE_BASE_IDX 0
+#define regGDS_VMID9_SIZE 0x20b3
+#define regGDS_VMID9_SIZE_BASE_IDX 0
+#define regGDS_VMID10_BASE 0x20b4
+#define regGDS_VMID10_BASE_BASE_IDX 0
+#define regGDS_VMID10_SIZE 0x20b5
+#define regGDS_VMID10_SIZE_BASE_IDX 0
+#define regGDS_VMID11_BASE 0x20b6
+#define regGDS_VMID11_BASE_BASE_IDX 0
+#define regGDS_VMID11_SIZE 0x20b7
+#define regGDS_VMID11_SIZE_BASE_IDX 0
+#define regGDS_VMID12_BASE 0x20b8
+#define regGDS_VMID12_BASE_BASE_IDX 0
+#define regGDS_VMID12_SIZE 0x20b9
+#define regGDS_VMID12_SIZE_BASE_IDX 0
+#define regGDS_VMID13_BASE 0x20ba
+#define regGDS_VMID13_BASE_BASE_IDX 0
+#define regGDS_VMID13_SIZE 0x20bb
+#define regGDS_VMID13_SIZE_BASE_IDX 0
+#define regGDS_VMID14_BASE 0x20bc
+#define regGDS_VMID14_BASE_BASE_IDX 0
+#define regGDS_VMID14_SIZE 0x20bd
+#define regGDS_VMID14_SIZE_BASE_IDX 0
+#define regGDS_VMID15_BASE 0x20be
+#define regGDS_VMID15_BASE_BASE_IDX 0
+#define regGDS_VMID15_SIZE 0x20bf
+#define regGDS_VMID15_SIZE_BASE_IDX 0
+#define regGDS_GWS_VMID0 0x20c0
+#define regGDS_GWS_VMID0_BASE_IDX 0
+#define regGDS_GWS_VMID1 0x20c1
+#define regGDS_GWS_VMID1_BASE_IDX 0
+#define regGDS_GWS_VMID2 0x20c2
+#define regGDS_GWS_VMID2_BASE_IDX 0
+#define regGDS_GWS_VMID3 0x20c3
+#define regGDS_GWS_VMID3_BASE_IDX 0
+#define regGDS_GWS_VMID4 0x20c4
+#define regGDS_GWS_VMID4_BASE_IDX 0
+#define regGDS_GWS_VMID5 0x20c5
+#define regGDS_GWS_VMID5_BASE_IDX 0
+#define regGDS_GWS_VMID6 0x20c6
+#define regGDS_GWS_VMID6_BASE_IDX 0
+#define regGDS_GWS_VMID7 0x20c7
+#define regGDS_GWS_VMID7_BASE_IDX 0
+#define regGDS_GWS_VMID8 0x20c8
+#define regGDS_GWS_VMID8_BASE_IDX 0
+#define regGDS_GWS_VMID9 0x20c9
+#define regGDS_GWS_VMID9_BASE_IDX 0
+#define regGDS_GWS_VMID10 0x20ca
+#define regGDS_GWS_VMID10_BASE_IDX 0
+#define regGDS_GWS_VMID11 0x20cb
+#define regGDS_GWS_VMID11_BASE_IDX 0
+#define regGDS_GWS_VMID12 0x20cc
+#define regGDS_GWS_VMID12_BASE_IDX 0
+#define regGDS_GWS_VMID13 0x20cd
+#define regGDS_GWS_VMID13_BASE_IDX 0
+#define regGDS_GWS_VMID14 0x20ce
+#define regGDS_GWS_VMID14_BASE_IDX 0
+#define regGDS_GWS_VMID15 0x20cf
+#define regGDS_GWS_VMID15_BASE_IDX 0
+#define regGDS_OA_VMID0 0x20d0
+#define regGDS_OA_VMID0_BASE_IDX 0
+#define regGDS_OA_VMID1 0x20d1
+#define regGDS_OA_VMID1_BASE_IDX 0
+#define regGDS_OA_VMID2 0x20d2
+#define regGDS_OA_VMID2_BASE_IDX 0
+#define regGDS_OA_VMID3 0x20d3
+#define regGDS_OA_VMID3_BASE_IDX 0
+#define regGDS_OA_VMID4 0x20d4
+#define regGDS_OA_VMID4_BASE_IDX 0
+#define regGDS_OA_VMID5 0x20d5
+#define regGDS_OA_VMID5_BASE_IDX 0
+#define regGDS_OA_VMID6 0x20d6
+#define regGDS_OA_VMID6_BASE_IDX 0
+#define regGDS_OA_VMID7 0x20d7
+#define regGDS_OA_VMID7_BASE_IDX 0
+#define regGDS_OA_VMID8 0x20d8
+#define regGDS_OA_VMID8_BASE_IDX 0
+#define regGDS_OA_VMID9 0x20d9
+#define regGDS_OA_VMID9_BASE_IDX 0
+#define regGDS_OA_VMID10 0x20da
+#define regGDS_OA_VMID10_BASE_IDX 0
+#define regGDS_OA_VMID11 0x20db
+#define regGDS_OA_VMID11_BASE_IDX 0
+#define regGDS_OA_VMID12 0x20dc
+#define regGDS_OA_VMID12_BASE_IDX 0
+#define regGDS_OA_VMID13 0x20dd
+#define regGDS_OA_VMID13_BASE_IDX 0
+#define regGDS_OA_VMID14 0x20de
+#define regGDS_OA_VMID14_BASE_IDX 0
+#define regGDS_OA_VMID15 0x20df
+#define regGDS_OA_VMID15_BASE_IDX 0
+#define regGDS_GWS_RESET0 0x20e4
+#define regGDS_GWS_RESET0_BASE_IDX 0
+#define regGDS_GWS_RESET1 0x20e5
+#define regGDS_GWS_RESET1_BASE_IDX 0
+#define regGDS_GWS_RESOURCE_RESET 0x20e6
+#define regGDS_GWS_RESOURCE_RESET_BASE_IDX 0
+#define regGDS_COMPUTE_MAX_WAVE_ID 0x20e8
+#define regGDS_COMPUTE_MAX_WAVE_ID_BASE_IDX 0
+#define regGDS_OA_RESET_MASK 0x20e9
+#define regGDS_OA_RESET_MASK_BASE_IDX 0
+#define regGDS_OA_RESET 0x20ea
+#define regGDS_OA_RESET_BASE_IDX 0
+#define regGDS_CS_CTXSW_STATUS 0x20ed
+#define regGDS_CS_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT0 0x20ee
+#define regGDS_CS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT1 0x20ef
+#define regGDS_CS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT2 0x20f0
+#define regGDS_CS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_CS_CTXSW_CNT3 0x20f1
+#define regGDS_CS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_GFX_CTXSW_STATUS 0x20f2
+#define regGDS_GFX_CTXSW_STATUS_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT0 0x20f7
+#define regGDS_PS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT1 0x20f8
+#define regGDS_PS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT2 0x20f9
+#define regGDS_PS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_PS_CTXSW_CNT3 0x20fa
+#define regGDS_PS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_PS_CTXSW_IDX 0x20fb
+#define regGDS_PS_CTXSW_IDX_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT0 0x2117
+#define regGDS_GS_CTXSW_CNT0_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT1 0x2118
+#define regGDS_GS_CTXSW_CNT1_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT2 0x2119
+#define regGDS_GS_CTXSW_CNT2_BASE_IDX 0
+#define regGDS_GS_CTXSW_CNT3 0x211a
+#define regGDS_GS_CTXSW_CNT3_BASE_IDX 0
+#define regGDS_MEMORY_CLEAN 0x211f
+#define regGDS_MEMORY_CLEAN_BASE_IDX 0
+
+
+// addressBlock: gc_rasdec
+// base address: 0xce00
+#define regRAS_SIGNATURE_CONTROL 0x2120
+#define regRAS_SIGNATURE_CONTROL_BASE_IDX 0
+#define regRAS_SIGNATURE_MASK 0x2121
+#define regRAS_SIGNATURE_MASK_BASE_IDX 0
+#define regRAS_SX_SIGNATURE0 0x2122
+#define regRAS_SX_SIGNATURE0_BASE_IDX 0
+#define regRAS_SX_SIGNATURE1 0x2123
+#define regRAS_SX_SIGNATURE1_BASE_IDX 0
+#define regRAS_SX_SIGNATURE2 0x2124
+#define regRAS_SX_SIGNATURE2_BASE_IDX 0
+#define regRAS_SX_SIGNATURE3 0x2125
+#define regRAS_SX_SIGNATURE3_BASE_IDX 0
+#define regRAS_DB_SIGNATURE0 0x212b
+#define regRAS_DB_SIGNATURE0_BASE_IDX 0
+#define regRAS_PA_SIGNATURE0 0x212c
+#define regRAS_PA_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE0 0x212f
+#define regRAS_SC_SIGNATURE0_BASE_IDX 0
+#define regRAS_SC_SIGNATURE1 0x2130
+#define regRAS_SC_SIGNATURE1_BASE_IDX 0
+#define regRAS_SC_SIGNATURE2 0x2131
+#define regRAS_SC_SIGNATURE2_BASE_IDX 0
+#define regRAS_SC_SIGNATURE3 0x2132
+#define regRAS_SC_SIGNATURE3_BASE_IDX 0
+#define regRAS_SC_SIGNATURE4 0x2133
+#define regRAS_SC_SIGNATURE4_BASE_IDX 0
+#define regRAS_SC_SIGNATURE5 0x2134
+#define regRAS_SC_SIGNATURE5_BASE_IDX 0
+#define regRAS_SC_SIGNATURE6 0x2135
+#define regRAS_SC_SIGNATURE6_BASE_IDX 0
+#define regRAS_SC_SIGNATURE7 0x2136
+#define regRAS_SC_SIGNATURE7_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE0 0x2139
+#define regRAS_SPI_SIGNATURE0_BASE_IDX 0
+#define regRAS_SPI_SIGNATURE1 0x213a
+#define regRAS_SPI_SIGNATURE1_BASE_IDX 0
+#define regRAS_CB_SIGNATURE0 0x213d
+#define regRAS_CB_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE0 0x213e
+#define regRAS_BCI_SIGNATURE0_BASE_IDX 0
+#define regRAS_BCI_SIGNATURE1 0x213f
+#define regRAS_BCI_SIGNATURE1_BASE_IDX 0
+
+
+// addressBlock: gc_gusdec
+// base address: 0x33000
+#define regGUS_IO_RD_COMBINE_FLUSH 0x2c00
+#define regGUS_IO_RD_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_WR_COMBINE_FLUSH 0x2c01
+#define regGUS_IO_WR_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_RATE 0x2c02
+#define regGUS_IO_RD_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_RATE 0x2c03
+#define regGUS_IO_WR_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_AGE_COEFF 0x2c04
+#define regGUS_IO_RD_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_AGE_COEFF 0x2c05
+#define regGUS_IO_WR_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUEUING 0x2c06
+#define regGUS_IO_RD_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUEUING 0x2c07
+#define regGUS_IO_WR_PRI_QUEUING_BASE_IDX 1
+#define regGUS_IO_RD_PRI_FIXED 0x2c08
+#define regGUS_IO_RD_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_WR_PRI_FIXED 0x2c09
+#define regGUS_IO_WR_PRI_FIXED_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_COEFF 0x2c0a
+#define regGUS_IO_RD_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_COEFF 0x2c0b
+#define regGUS_IO_WR_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_IO_RD_PRI_URGENCY_MODE 0x2c0c
+#define regGUS_IO_RD_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_WR_PRI_URGENCY_MODE 0x2c0d
+#define regGUS_IO_WR_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI1 0x2c0e
+#define regGUS_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI2 0x2c0f
+#define regGUS_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI3 0x2c10
+#define regGUS_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT_PRI4 0x2c11
+#define regGUS_IO_RD_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI1 0x2c12
+#define regGUS_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI2 0x2c13
+#define regGUS_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI3 0x2c14
+#define regGUS_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT_PRI4 0x2c15
+#define regGUS_IO_WR_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI1 0x2c16
+#define regGUS_IO_RD_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI2 0x2c17
+#define regGUS_IO_RD_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI3 0x2c18
+#define regGUS_IO_RD_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_RD_PRI_QUANT1_PRI4 0x2c19
+#define regGUS_IO_RD_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI1 0x2c1a
+#define regGUS_IO_WR_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI2 0x2c1b
+#define regGUS_IO_WR_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI3 0x2c1c
+#define regGUS_IO_WR_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_IO_WR_PRI_QUANT1_PRI4 0x2c1d
+#define regGUS_IO_WR_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_FLUSH 0x2c1e
+#define regGUS_DRAM_COMBINE_FLUSH_BASE_IDX 1
+#define regGUS_DRAM_COMBINE_RD_WR_EN 0x2c1f
+#define regGUS_DRAM_COMBINE_RD_WR_EN_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_RATE 0x2c20
+#define regGUS_DRAM_PRI_AGE_RATE_BASE_IDX 1
+#define regGUS_DRAM_PRI_AGE_COEFF 0x2c21
+#define regGUS_DRAM_PRI_AGE_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUEUING 0x2c22
+#define regGUS_DRAM_PRI_QUEUING_BASE_IDX 1
+#define regGUS_DRAM_PRI_FIXED 0x2c23
+#define regGUS_DRAM_PRI_FIXED_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_COEFF 0x2c24
+#define regGUS_DRAM_PRI_URGENCY_COEFF_BASE_IDX 1
+#define regGUS_DRAM_PRI_URGENCY_MODE 0x2c25
+#define regGUS_DRAM_PRI_URGENCY_MODE_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI1 0x2c26
+#define regGUS_DRAM_PRI_QUANT_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI2 0x2c27
+#define regGUS_DRAM_PRI_QUANT_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI3 0x2c28
+#define regGUS_DRAM_PRI_QUANT_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI4 0x2c29
+#define regGUS_DRAM_PRI_QUANT_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT_PRI5 0x2c2a
+#define regGUS_DRAM_PRI_QUANT_PRI5_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI1 0x2c2b
+#define regGUS_DRAM_PRI_QUANT1_PRI1_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI2 0x2c2c
+#define regGUS_DRAM_PRI_QUANT1_PRI2_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI3 0x2c2d
+#define regGUS_DRAM_PRI_QUANT1_PRI3_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI4 0x2c2e
+#define regGUS_DRAM_PRI_QUANT1_PRI4_BASE_IDX 1
+#define regGUS_DRAM_PRI_QUANT1_PRI5 0x2c2f
+#define regGUS_DRAM_PRI_QUANT1_PRI5_BASE_IDX 1
+#define regGUS_IO_GROUP_BURST 0x2c30
+#define regGUS_IO_GROUP_BURST_BASE_IDX 1
+#define regGUS_DRAM_GROUP_BURST 0x2c31
+#define regGUS_DRAM_GROUP_BURST_BASE_IDX 1
+#define regGUS_SDP_ARB_FINAL 0x2c32
+#define regGUS_SDP_ARB_FINAL_BASE_IDX 1
+#define regGUS_SDP_QOS_VC_PRIORITY 0x2c33
+#define regGUS_SDP_QOS_VC_PRIORITY_BASE_IDX 1
+#define regGUS_SDP_CREDITS 0x2c34
+#define regGUS_SDP_CREDITS_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE0 0x2c35
+#define regGUS_SDP_TAG_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_TAG_RESERVE1 0x2c36
+#define regGUS_SDP_TAG_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE0 0x2c37
+#define regGUS_SDP_VCC_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCC_RESERVE1 0x2c38
+#define regGUS_SDP_VCC_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE0 0x2c39
+#define regGUS_SDP_VCD_RESERVE0_BASE_IDX 1
+#define regGUS_SDP_VCD_RESERVE1 0x2c3a
+#define regGUS_SDP_VCD_RESERVE1_BASE_IDX 1
+#define regGUS_SDP_REQ_CNTL 0x2c3b
+#define regGUS_SDP_REQ_CNTL_BASE_IDX 1
+#define regGUS_MISC 0x2c3c
+#define regGUS_MISC_BASE_IDX 1
+#define regGUS_LATENCY_SAMPLING 0x2c3d
+#define regGUS_LATENCY_SAMPLING_BASE_IDX 1
+#define regGUS_ERR_STATUS 0x2c3e
+#define regGUS_ERR_STATUS_BASE_IDX 1
+#define regGUS_MISC2 0x2c3f
+#define regGUS_MISC2_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0 0x2c40
+#define regGUS_SDP_BACKDOOR_CMDCREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1 0x2c41
+#define regGUS_SDP_BACKDOOR_CMDCREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS0 0x2c42
+#define regGUS_SDP_BACKDOOR_DATACREDITS0_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_DATACREDITS1 0x2c43
+#define regGUS_SDP_BACKDOOR_DATACREDITS1_BASE_IDX 1
+#define regGUS_SDP_BACKDOOR_MISCCREDITS 0x2c44
+#define regGUS_SDP_BACKDOOR_MISCCREDITS_BASE_IDX 1
+#define regGUS_SDP_ENABLE 0x2c45
+#define regGUS_SDP_ENABLE_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_IN 0x2c46
+#define regGUS_L1_CH0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH0_CMD_OUT 0x2c47
+#define regGUS_L1_CH0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_IN 0x2c48
+#define regGUS_L1_CH0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_OUT 0x2c49
+#define regGUS_L1_CH0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_IN 0x2c4a
+#define regGUS_L1_CH0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH0_DATA_U_OUT 0x2c4b
+#define regGUS_L1_CH0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_IN 0x2c4c
+#define regGUS_L1_CH1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_CH1_CMD_OUT 0x2c4d
+#define regGUS_L1_CH1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_IN 0x2c4e
+#define regGUS_L1_CH1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_OUT 0x2c4f
+#define regGUS_L1_CH1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_IN 0x2c50
+#define regGUS_L1_CH1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_CH1_DATA_U_OUT 0x2c51
+#define regGUS_L1_CH1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_IN 0x2c52
+#define regGUS_L1_SA0_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA0_CMD_OUT 0x2c53
+#define regGUS_L1_SA0_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_IN 0x2c54
+#define regGUS_L1_SA0_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_OUT 0x2c55
+#define regGUS_L1_SA0_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_IN 0x2c56
+#define regGUS_L1_SA0_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA0_DATA_U_OUT 0x2c57
+#define regGUS_L1_SA0_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_IN 0x2c58
+#define regGUS_L1_SA1_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA1_CMD_OUT 0x2c59
+#define regGUS_L1_SA1_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_IN 0x2c5a
+#define regGUS_L1_SA1_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_OUT 0x2c5b
+#define regGUS_L1_SA1_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_IN 0x2c5c
+#define regGUS_L1_SA1_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA1_DATA_U_OUT 0x2c5d
+#define regGUS_L1_SA1_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_IN 0x2c5e
+#define regGUS_L1_SA2_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA2_CMD_OUT 0x2c5f
+#define regGUS_L1_SA2_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_IN 0x2c60
+#define regGUS_L1_SA2_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_OUT 0x2c61
+#define regGUS_L1_SA2_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_IN 0x2c62
+#define regGUS_L1_SA2_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA2_DATA_U_OUT 0x2c63
+#define regGUS_L1_SA2_DATA_U_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_IN 0x2c64
+#define regGUS_L1_SA3_CMD_IN_BASE_IDX 1
+#define regGUS_L1_SA3_CMD_OUT 0x2c65
+#define regGUS_L1_SA3_CMD_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_IN 0x2c66
+#define regGUS_L1_SA3_DATA_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_OUT 0x2c67
+#define regGUS_L1_SA3_DATA_OUT_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_IN 0x2c68
+#define regGUS_L1_SA3_DATA_U_IN_BASE_IDX 1
+#define regGUS_L1_SA3_DATA_U_OUT 0x2c69
+#define regGUS_L1_SA3_DATA_U_OUT_BASE_IDX 1
+#define regGUS_MISC3 0x2c6a
+#define regGUS_MISC3_BASE_IDX 1
+#define regGUS_WRRSP_FIFO_CNTL 0x2c6b
+#define regGUS_WRRSP_FIFO_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gfxdec0
+// base address: 0x28000
+#define regDB_RENDER_CONTROL 0x0000
+#define regDB_RENDER_CONTROL_BASE_IDX 1
+#define regDB_COUNT_CONTROL 0x0001
+#define regDB_COUNT_CONTROL_BASE_IDX 1
+#define regDB_DEPTH_VIEW 0x0002
+#define regDB_DEPTH_VIEW_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE 0x0003
+#define regDB_RENDER_OVERRIDE_BASE_IDX 1
+#define regDB_RENDER_OVERRIDE2 0x0004
+#define regDB_RENDER_OVERRIDE2_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE 0x0005
+#define regDB_HTILE_DATA_BASE_BASE_IDX 1
+#define regDB_DEPTH_SIZE_XY 0x0007
+#define regDB_DEPTH_SIZE_XY_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MIN 0x0008
+#define regDB_DEPTH_BOUNDS_MIN_BASE_IDX 1
+#define regDB_DEPTH_BOUNDS_MAX 0x0009
+#define regDB_DEPTH_BOUNDS_MAX_BASE_IDX 1
+#define regDB_STENCIL_CLEAR 0x000a
+#define regDB_STENCIL_CLEAR_BASE_IDX 1
+#define regDB_DEPTH_CLEAR 0x000b
+#define regDB_DEPTH_CLEAR_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_TL 0x000c
+#define regPA_SC_SCREEN_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_SCREEN_SCISSOR_BR 0x000d
+#define regPA_SC_SCREEN_SCISSOR_BR_BASE_IDX 1
+#define regDB_RESERVED_REG_2 0x000f
+#define regDB_RESERVED_REG_2_BASE_IDX 1
+#define regDB_Z_INFO 0x0010
+#define regDB_Z_INFO_BASE_IDX 1
+#define regDB_STENCIL_INFO 0x0011
+#define regDB_STENCIL_INFO_BASE_IDX 1
+#define regDB_Z_READ_BASE 0x0012
+#define regDB_Z_READ_BASE_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE 0x0013
+#define regDB_STENCIL_READ_BASE_BASE_IDX 1
+#define regDB_Z_WRITE_BASE 0x0014
+#define regDB_Z_WRITE_BASE_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE 0x0015
+#define regDB_STENCIL_WRITE_BASE_BASE_IDX 1
+#define regDB_RESERVED_REG_1 0x0016
+#define regDB_RESERVED_REG_1_BASE_IDX 1
+#define regDB_RESERVED_REG_3 0x0017
+#define regDB_RESERVED_REG_3_BASE_IDX 1
+#define regDB_Z_READ_BASE_HI 0x001a
+#define regDB_Z_READ_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_READ_BASE_HI 0x001b
+#define regDB_STENCIL_READ_BASE_HI_BASE_IDX 1
+#define regDB_Z_WRITE_BASE_HI 0x001c
+#define regDB_Z_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_STENCIL_WRITE_BASE_HI 0x001d
+#define regDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
+#define regDB_HTILE_DATA_BASE_HI 0x001e
+#define regDB_HTILE_DATA_BASE_HI_BASE_IDX 1
+#define regDB_RMI_L2_CACHE_CONTROL 0x001f
+#define regDB_RMI_L2_CACHE_CONTROL_BASE_IDX 1
+#define regTA_BC_BASE_ADDR 0x0020
+#define regTA_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_BC_BASE_ADDR_HI 0x0021
+#define regTA_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_0 0x007a
+#define regCOHER_DEST_BASE_HI_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_1 0x007b
+#define regCOHER_DEST_BASE_HI_1_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_2 0x007c
+#define regCOHER_DEST_BASE_HI_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_HI_3 0x007d
+#define regCOHER_DEST_BASE_HI_3_BASE_IDX 1
+#define regCOHER_DEST_BASE_2 0x007e
+#define regCOHER_DEST_BASE_2_BASE_IDX 1
+#define regCOHER_DEST_BASE_3 0x007f
+#define regCOHER_DEST_BASE_3_BASE_IDX 1
+#define regPA_SC_WINDOW_OFFSET 0x0080
+#define regPA_SC_WINDOW_OFFSET_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_TL 0x0081
+#define regPA_SC_WINDOW_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_WINDOW_SCISSOR_BR 0x0082
+#define regPA_SC_WINDOW_SCISSOR_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_RULE 0x0083
+#define regPA_SC_CLIPRECT_RULE_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_TL 0x0084
+#define regPA_SC_CLIPRECT_0_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_0_BR 0x0085
+#define regPA_SC_CLIPRECT_0_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_TL 0x0086
+#define regPA_SC_CLIPRECT_1_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_1_BR 0x0087
+#define regPA_SC_CLIPRECT_1_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_TL 0x0088
+#define regPA_SC_CLIPRECT_2_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_2_BR 0x0089
+#define regPA_SC_CLIPRECT_2_BR_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_TL 0x008a
+#define regPA_SC_CLIPRECT_3_TL_BASE_IDX 1
+#define regPA_SC_CLIPRECT_3_BR 0x008b
+#define regPA_SC_CLIPRECT_3_BR_BASE_IDX 1
+#define regPA_SC_EDGERULE 0x008c
+#define regPA_SC_EDGERULE_BASE_IDX 1
+#define regPA_SU_HARDWARE_SCREEN_OFFSET 0x008d
+#define regPA_SU_HARDWARE_SCREEN_OFFSET_BASE_IDX 1
+#define regCB_TARGET_MASK 0x008e
+#define regCB_TARGET_MASK_BASE_IDX 1
+#define regCB_SHADER_MASK 0x008f
+#define regCB_SHADER_MASK_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_TL 0x0090
+#define regPA_SC_GENERIC_SCISSOR_TL_BASE_IDX 1
+#define regPA_SC_GENERIC_SCISSOR_BR 0x0091
+#define regPA_SC_GENERIC_SCISSOR_BR_BASE_IDX 1
+#define regCOHER_DEST_BASE_0 0x0092
+#define regCOHER_DEST_BASE_0_BASE_IDX 1
+#define regCOHER_DEST_BASE_1 0x0093
+#define regCOHER_DEST_BASE_1_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_TL 0x0094
+#define regPA_SC_VPORT_SCISSOR_0_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_0_BR 0x0095
+#define regPA_SC_VPORT_SCISSOR_0_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_TL 0x0096
+#define regPA_SC_VPORT_SCISSOR_1_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_1_BR 0x0097
+#define regPA_SC_VPORT_SCISSOR_1_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_TL 0x0098
+#define regPA_SC_VPORT_SCISSOR_2_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_2_BR 0x0099
+#define regPA_SC_VPORT_SCISSOR_2_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_TL 0x009a
+#define regPA_SC_VPORT_SCISSOR_3_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_3_BR 0x009b
+#define regPA_SC_VPORT_SCISSOR_3_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_TL 0x009c
+#define regPA_SC_VPORT_SCISSOR_4_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_4_BR 0x009d
+#define regPA_SC_VPORT_SCISSOR_4_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_TL 0x009e
+#define regPA_SC_VPORT_SCISSOR_5_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_5_BR 0x009f
+#define regPA_SC_VPORT_SCISSOR_5_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_TL 0x00a0
+#define regPA_SC_VPORT_SCISSOR_6_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_6_BR 0x00a1
+#define regPA_SC_VPORT_SCISSOR_6_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_TL 0x00a2
+#define regPA_SC_VPORT_SCISSOR_7_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_7_BR 0x00a3
+#define regPA_SC_VPORT_SCISSOR_7_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_TL 0x00a4
+#define regPA_SC_VPORT_SCISSOR_8_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_8_BR 0x00a5
+#define regPA_SC_VPORT_SCISSOR_8_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_TL 0x00a6
+#define regPA_SC_VPORT_SCISSOR_9_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_9_BR 0x00a7
+#define regPA_SC_VPORT_SCISSOR_9_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_TL 0x00a8
+#define regPA_SC_VPORT_SCISSOR_10_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_10_BR 0x00a9
+#define regPA_SC_VPORT_SCISSOR_10_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_TL 0x00aa
+#define regPA_SC_VPORT_SCISSOR_11_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_11_BR 0x00ab
+#define regPA_SC_VPORT_SCISSOR_11_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_TL 0x00ac
+#define regPA_SC_VPORT_SCISSOR_12_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_12_BR 0x00ad
+#define regPA_SC_VPORT_SCISSOR_12_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_TL 0x00ae
+#define regPA_SC_VPORT_SCISSOR_13_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_13_BR 0x00af
+#define regPA_SC_VPORT_SCISSOR_13_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_TL 0x00b0
+#define regPA_SC_VPORT_SCISSOR_14_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_14_BR 0x00b1
+#define regPA_SC_VPORT_SCISSOR_14_BR_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_TL 0x00b2
+#define regPA_SC_VPORT_SCISSOR_15_TL_BASE_IDX 1
+#define regPA_SC_VPORT_SCISSOR_15_BR 0x00b3
+#define regPA_SC_VPORT_SCISSOR_15_BR_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_0 0x00b4
+#define regPA_SC_VPORT_ZMIN_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_0 0x00b5
+#define regPA_SC_VPORT_ZMAX_0_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_1 0x00b6
+#define regPA_SC_VPORT_ZMIN_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_1 0x00b7
+#define regPA_SC_VPORT_ZMAX_1_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_2 0x00b8
+#define regPA_SC_VPORT_ZMIN_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_2 0x00b9
+#define regPA_SC_VPORT_ZMAX_2_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_3 0x00ba
+#define regPA_SC_VPORT_ZMIN_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_3 0x00bb
+#define regPA_SC_VPORT_ZMAX_3_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_4 0x00bc
+#define regPA_SC_VPORT_ZMIN_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_4 0x00bd
+#define regPA_SC_VPORT_ZMAX_4_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_5 0x00be
+#define regPA_SC_VPORT_ZMIN_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_5 0x00bf
+#define regPA_SC_VPORT_ZMAX_5_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_6 0x00c0
+#define regPA_SC_VPORT_ZMIN_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_6 0x00c1
+#define regPA_SC_VPORT_ZMAX_6_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_7 0x00c2
+#define regPA_SC_VPORT_ZMIN_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_7 0x00c3
+#define regPA_SC_VPORT_ZMAX_7_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_8 0x00c4
+#define regPA_SC_VPORT_ZMIN_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_8 0x00c5
+#define regPA_SC_VPORT_ZMAX_8_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_9 0x00c6
+#define regPA_SC_VPORT_ZMIN_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_9 0x00c7
+#define regPA_SC_VPORT_ZMAX_9_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_10 0x00c8
+#define regPA_SC_VPORT_ZMIN_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_10 0x00c9
+#define regPA_SC_VPORT_ZMAX_10_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_11 0x00ca
+#define regPA_SC_VPORT_ZMIN_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_11 0x00cb
+#define regPA_SC_VPORT_ZMAX_11_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_12 0x00cc
+#define regPA_SC_VPORT_ZMIN_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_12 0x00cd
+#define regPA_SC_VPORT_ZMAX_12_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_13 0x00ce
+#define regPA_SC_VPORT_ZMIN_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_13 0x00cf
+#define regPA_SC_VPORT_ZMAX_13_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_14 0x00d0
+#define regPA_SC_VPORT_ZMIN_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_14 0x00d1
+#define regPA_SC_VPORT_ZMAX_14_BASE_IDX 1
+#define regPA_SC_VPORT_ZMIN_15 0x00d2
+#define regPA_SC_VPORT_ZMIN_15_BASE_IDX 1
+#define regPA_SC_VPORT_ZMAX_15 0x00d3
+#define regPA_SC_VPORT_ZMAX_15_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG 0x00d4
+#define regPA_SC_RASTER_CONFIG_BASE_IDX 1
+#define regPA_SC_RASTER_CONFIG_1 0x00d5
+#define regPA_SC_RASTER_CONFIG_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_CONTROL 0x00d6
+#define regPA_SC_SCREEN_EXTENT_CONTROL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_OVERRIDE 0x00d7
+#define regPA_SC_TILE_STEERING_OVERRIDE_BASE_IDX 1
+#define regCP_PERFMON_CNTX_CNTL 0x00d8
+#define regCP_PERFMON_CNTX_CNTL_BASE_IDX 1
+#define regCP_PIPEID 0x00d9
+#define regCP_PIPEID_BASE_IDX 1
+#define regCP_RINGID 0x00d9
+#define regCP_RINGID_BASE_IDX 1
+#define regCP_VMID 0x00da
+#define regCP_VMID_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG0 0x00db
+#define regCONTEXT_RESERVED_REG0_BASE_IDX 1
+#define regCONTEXT_RESERVED_REG1 0x00dc
+#define regCONTEXT_RESERVED_REG1_BASE_IDX 1
+#define regPA_SC_VRS_OVERRIDE_CNTL 0x00f4
+#define regPA_SC_VRS_OVERRIDE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE 0x00f5
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT 0x00f6
+#define regPA_SC_VRS_RATE_FEEDBACK_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY 0x00f7
+#define regPA_SC_VRS_RATE_FEEDBACK_SIZE_XY_BASE_IDX 1
+#define regPA_SC_VRS_RATE_CACHE_CNTL 0x00f9
+#define regPA_SC_VRS_RATE_CACHE_CNTL_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE 0x00fc
+#define regPA_SC_VRS_RATE_BASE_BASE_IDX 1
+#define regPA_SC_VRS_RATE_BASE_EXT 0x00fd
+#define regPA_SC_VRS_RATE_BASE_EXT_BASE_IDX 1
+#define regPA_SC_VRS_RATE_SIZE_XY 0x00fe
+#define regPA_SC_VRS_RATE_SIZE_XY_BASE_IDX 1
+#define regVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
+#define regVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
+#define regCB_RMI_GL2_CACHE_CONTROL 0x0104
+#define regCB_RMI_GL2_CACHE_CONTROL_BASE_IDX 1
+#define regCB_BLEND_RED 0x0105
+#define regCB_BLEND_RED_BASE_IDX 1
+#define regCB_BLEND_GREEN 0x0106
+#define regCB_BLEND_GREEN_BASE_IDX 1
+#define regCB_BLEND_BLUE 0x0107
+#define regCB_BLEND_BLUE_BASE_IDX 1
+#define regCB_BLEND_ALPHA 0x0108
+#define regCB_BLEND_ALPHA_BASE_IDX 1
+#define regCB_FDCC_CONTROL 0x0109
+#define regCB_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COVERAGE_OUT_CONTROL 0x010a
+#define regCB_COVERAGE_OUT_CONTROL_BASE_IDX 1
+#define regDB_STENCIL_CONTROL 0x010b
+#define regDB_STENCIL_CONTROL_BASE_IDX 1
+#define regDB_STENCILREFMASK 0x010c
+#define regDB_STENCILREFMASK_BASE_IDX 1
+#define regDB_STENCILREFMASK_BF 0x010d
+#define regDB_STENCILREFMASK_BF_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE 0x010f
+#define regPA_CL_VPORT_XSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET 0x0110
+#define regPA_CL_VPORT_XOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE 0x0111
+#define regPA_CL_VPORT_YSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET 0x0112
+#define regPA_CL_VPORT_YOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE 0x0113
+#define regPA_CL_VPORT_ZSCALE_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET 0x0114
+#define regPA_CL_VPORT_ZOFFSET_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_1 0x0115
+#define regPA_CL_VPORT_XSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_1 0x0116
+#define regPA_CL_VPORT_XOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_1 0x0117
+#define regPA_CL_VPORT_YSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_1 0x0118
+#define regPA_CL_VPORT_YOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_1 0x0119
+#define regPA_CL_VPORT_ZSCALE_1_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_1 0x011a
+#define regPA_CL_VPORT_ZOFFSET_1_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_2 0x011b
+#define regPA_CL_VPORT_XSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_2 0x011c
+#define regPA_CL_VPORT_XOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_2 0x011d
+#define regPA_CL_VPORT_YSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_2 0x011e
+#define regPA_CL_VPORT_YOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_2 0x011f
+#define regPA_CL_VPORT_ZSCALE_2_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_2 0x0120
+#define regPA_CL_VPORT_ZOFFSET_2_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_3 0x0121
+#define regPA_CL_VPORT_XSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_3 0x0122
+#define regPA_CL_VPORT_XOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_3 0x0123
+#define regPA_CL_VPORT_YSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_3 0x0124
+#define regPA_CL_VPORT_YOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_3 0x0125
+#define regPA_CL_VPORT_ZSCALE_3_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_3 0x0126
+#define regPA_CL_VPORT_ZOFFSET_3_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_4 0x0127
+#define regPA_CL_VPORT_XSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_4 0x0128
+#define regPA_CL_VPORT_XOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_4 0x0129
+#define regPA_CL_VPORT_YSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_4 0x012a
+#define regPA_CL_VPORT_YOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_4 0x012b
+#define regPA_CL_VPORT_ZSCALE_4_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_4 0x012c
+#define regPA_CL_VPORT_ZOFFSET_4_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_5 0x012d
+#define regPA_CL_VPORT_XSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_5 0x012e
+#define regPA_CL_VPORT_XOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_5 0x012f
+#define regPA_CL_VPORT_YSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_5 0x0130
+#define regPA_CL_VPORT_YOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_5 0x0131
+#define regPA_CL_VPORT_ZSCALE_5_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_5 0x0132
+#define regPA_CL_VPORT_ZOFFSET_5_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_6 0x0133
+#define regPA_CL_VPORT_XSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_6 0x0134
+#define regPA_CL_VPORT_XOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_6 0x0135
+#define regPA_CL_VPORT_YSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_6 0x0136
+#define regPA_CL_VPORT_YOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_6 0x0137
+#define regPA_CL_VPORT_ZSCALE_6_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_6 0x0138
+#define regPA_CL_VPORT_ZOFFSET_6_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_7 0x0139
+#define regPA_CL_VPORT_XSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_7 0x013a
+#define regPA_CL_VPORT_XOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_7 0x013b
+#define regPA_CL_VPORT_YSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_7 0x013c
+#define regPA_CL_VPORT_YOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_7 0x013d
+#define regPA_CL_VPORT_ZSCALE_7_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_7 0x013e
+#define regPA_CL_VPORT_ZOFFSET_7_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_8 0x013f
+#define regPA_CL_VPORT_XSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_8 0x0140
+#define regPA_CL_VPORT_XOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_8 0x0141
+#define regPA_CL_VPORT_YSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_8 0x0142
+#define regPA_CL_VPORT_YOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_8 0x0143
+#define regPA_CL_VPORT_ZSCALE_8_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_8 0x0144
+#define regPA_CL_VPORT_ZOFFSET_8_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_9 0x0145
+#define regPA_CL_VPORT_XSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_9 0x0146
+#define regPA_CL_VPORT_XOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_9 0x0147
+#define regPA_CL_VPORT_YSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_9 0x0148
+#define regPA_CL_VPORT_YOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_9 0x0149
+#define regPA_CL_VPORT_ZSCALE_9_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_9 0x014a
+#define regPA_CL_VPORT_ZOFFSET_9_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_10 0x014b
+#define regPA_CL_VPORT_XSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_10 0x014c
+#define regPA_CL_VPORT_XOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_10 0x014d
+#define regPA_CL_VPORT_YSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_10 0x014e
+#define regPA_CL_VPORT_YOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_10 0x014f
+#define regPA_CL_VPORT_ZSCALE_10_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_10 0x0150
+#define regPA_CL_VPORT_ZOFFSET_10_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_11 0x0151
+#define regPA_CL_VPORT_XSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_11 0x0152
+#define regPA_CL_VPORT_XOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_11 0x0153
+#define regPA_CL_VPORT_YSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_11 0x0154
+#define regPA_CL_VPORT_YOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_11 0x0155
+#define regPA_CL_VPORT_ZSCALE_11_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_11 0x0156
+#define regPA_CL_VPORT_ZOFFSET_11_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_12 0x0157
+#define regPA_CL_VPORT_XSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_12 0x0158
+#define regPA_CL_VPORT_XOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_12 0x0159
+#define regPA_CL_VPORT_YSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_12 0x015a
+#define regPA_CL_VPORT_YOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_12 0x015b
+#define regPA_CL_VPORT_ZSCALE_12_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_12 0x015c
+#define regPA_CL_VPORT_ZOFFSET_12_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_13 0x015d
+#define regPA_CL_VPORT_XSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_13 0x015e
+#define regPA_CL_VPORT_XOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_13 0x015f
+#define regPA_CL_VPORT_YSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_13 0x0160
+#define regPA_CL_VPORT_YOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_13 0x0161
+#define regPA_CL_VPORT_ZSCALE_13_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_13 0x0162
+#define regPA_CL_VPORT_ZOFFSET_13_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_14 0x0163
+#define regPA_CL_VPORT_XSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_14 0x0164
+#define regPA_CL_VPORT_XOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_14 0x0165
+#define regPA_CL_VPORT_YSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_14 0x0166
+#define regPA_CL_VPORT_YOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_14 0x0167
+#define regPA_CL_VPORT_ZSCALE_14_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_14 0x0168
+#define regPA_CL_VPORT_ZOFFSET_14_BASE_IDX 1
+#define regPA_CL_VPORT_XSCALE_15 0x0169
+#define regPA_CL_VPORT_XSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_XOFFSET_15 0x016a
+#define regPA_CL_VPORT_XOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_YSCALE_15 0x016b
+#define regPA_CL_VPORT_YSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_YOFFSET_15 0x016c
+#define regPA_CL_VPORT_YOFFSET_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZSCALE_15 0x016d
+#define regPA_CL_VPORT_ZSCALE_15_BASE_IDX 1
+#define regPA_CL_VPORT_ZOFFSET_15 0x016e
+#define regPA_CL_VPORT_ZOFFSET_15_BASE_IDX 1
+#define regPA_CL_UCP_0_X 0x016f
+#define regPA_CL_UCP_0_X_BASE_IDX 1
+#define regPA_CL_UCP_0_Y 0x0170
+#define regPA_CL_UCP_0_Y_BASE_IDX 1
+#define regPA_CL_UCP_0_Z 0x0171
+#define regPA_CL_UCP_0_Z_BASE_IDX 1
+#define regPA_CL_UCP_0_W 0x0172
+#define regPA_CL_UCP_0_W_BASE_IDX 1
+#define regPA_CL_UCP_1_X 0x0173
+#define regPA_CL_UCP_1_X_BASE_IDX 1
+#define regPA_CL_UCP_1_Y 0x0174
+#define regPA_CL_UCP_1_Y_BASE_IDX 1
+#define regPA_CL_UCP_1_Z 0x0175
+#define regPA_CL_UCP_1_Z_BASE_IDX 1
+#define regPA_CL_UCP_1_W 0x0176
+#define regPA_CL_UCP_1_W_BASE_IDX 1
+#define regPA_CL_UCP_2_X 0x0177
+#define regPA_CL_UCP_2_X_BASE_IDX 1
+#define regPA_CL_UCP_2_Y 0x0178
+#define regPA_CL_UCP_2_Y_BASE_IDX 1
+#define regPA_CL_UCP_2_Z 0x0179
+#define regPA_CL_UCP_2_Z_BASE_IDX 1
+#define regPA_CL_UCP_2_W 0x017a
+#define regPA_CL_UCP_2_W_BASE_IDX 1
+#define regPA_CL_UCP_3_X 0x017b
+#define regPA_CL_UCP_3_X_BASE_IDX 1
+#define regPA_CL_UCP_3_Y 0x017c
+#define regPA_CL_UCP_3_Y_BASE_IDX 1
+#define regPA_CL_UCP_3_Z 0x017d
+#define regPA_CL_UCP_3_Z_BASE_IDX 1
+#define regPA_CL_UCP_3_W 0x017e
+#define regPA_CL_UCP_3_W_BASE_IDX 1
+#define regPA_CL_UCP_4_X 0x017f
+#define regPA_CL_UCP_4_X_BASE_IDX 1
+#define regPA_CL_UCP_4_Y 0x0180
+#define regPA_CL_UCP_4_Y_BASE_IDX 1
+#define regPA_CL_UCP_4_Z 0x0181
+#define regPA_CL_UCP_4_Z_BASE_IDX 1
+#define regPA_CL_UCP_4_W 0x0182
+#define regPA_CL_UCP_4_W_BASE_IDX 1
+#define regPA_CL_UCP_5_X 0x0183
+#define regPA_CL_UCP_5_X_BASE_IDX 1
+#define regPA_CL_UCP_5_Y 0x0184
+#define regPA_CL_UCP_5_Y_BASE_IDX 1
+#define regPA_CL_UCP_5_Z 0x0185
+#define regPA_CL_UCP_5_Z_BASE_IDX 1
+#define regPA_CL_UCP_5_W 0x0186
+#define regPA_CL_UCP_5_W_BASE_IDX 1
+#define regPA_CL_PROG_NEAR_CLIP_Z 0x0187
+#define regPA_CL_PROG_NEAR_CLIP_Z_BASE_IDX 1
+#define regPA_RATE_CNTL 0x0188
+#define regPA_RATE_CNTL_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_0 0x0191
+#define regSPI_PS_INPUT_CNTL_0_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_1 0x0192
+#define regSPI_PS_INPUT_CNTL_1_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_2 0x0193
+#define regSPI_PS_INPUT_CNTL_2_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_3 0x0194
+#define regSPI_PS_INPUT_CNTL_3_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_4 0x0195
+#define regSPI_PS_INPUT_CNTL_4_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_5 0x0196
+#define regSPI_PS_INPUT_CNTL_5_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_6 0x0197
+#define regSPI_PS_INPUT_CNTL_6_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_7 0x0198
+#define regSPI_PS_INPUT_CNTL_7_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_8 0x0199
+#define regSPI_PS_INPUT_CNTL_8_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_9 0x019a
+#define regSPI_PS_INPUT_CNTL_9_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_10 0x019b
+#define regSPI_PS_INPUT_CNTL_10_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_11 0x019c
+#define regSPI_PS_INPUT_CNTL_11_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_12 0x019d
+#define regSPI_PS_INPUT_CNTL_12_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_13 0x019e
+#define regSPI_PS_INPUT_CNTL_13_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_14 0x019f
+#define regSPI_PS_INPUT_CNTL_14_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_15 0x01a0
+#define regSPI_PS_INPUT_CNTL_15_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_16 0x01a1
+#define regSPI_PS_INPUT_CNTL_16_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_17 0x01a2
+#define regSPI_PS_INPUT_CNTL_17_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_18 0x01a3
+#define regSPI_PS_INPUT_CNTL_18_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_19 0x01a4
+#define regSPI_PS_INPUT_CNTL_19_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_20 0x01a5
+#define regSPI_PS_INPUT_CNTL_20_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_21 0x01a6
+#define regSPI_PS_INPUT_CNTL_21_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_22 0x01a7
+#define regSPI_PS_INPUT_CNTL_22_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_23 0x01a8
+#define regSPI_PS_INPUT_CNTL_23_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_24 0x01a9
+#define regSPI_PS_INPUT_CNTL_24_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_25 0x01aa
+#define regSPI_PS_INPUT_CNTL_25_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_26 0x01ab
+#define regSPI_PS_INPUT_CNTL_26_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_27 0x01ac
+#define regSPI_PS_INPUT_CNTL_27_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_28 0x01ad
+#define regSPI_PS_INPUT_CNTL_28_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_29 0x01ae
+#define regSPI_PS_INPUT_CNTL_29_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_30 0x01af
+#define regSPI_PS_INPUT_CNTL_30_BASE_IDX 1
+#define regSPI_PS_INPUT_CNTL_31 0x01b0
+#define regSPI_PS_INPUT_CNTL_31_BASE_IDX 1
+#define regSPI_VS_OUT_CONFIG 0x01b1
+#define regSPI_VS_OUT_CONFIG_BASE_IDX 1
+#define regSPI_PS_INPUT_ENA 0x01b3
+#define regSPI_PS_INPUT_ENA_BASE_IDX 1
+#define regSPI_PS_INPUT_ADDR 0x01b4
+#define regSPI_PS_INPUT_ADDR_BASE_IDX 1
+#define regSPI_INTERP_CONTROL_0 0x01b5
+#define regSPI_INTERP_CONTROL_0_BASE_IDX 1
+#define regSPI_PS_IN_CONTROL 0x01b6
+#define regSPI_PS_IN_CONTROL_BASE_IDX 1
+#define regSPI_BARYC_CNTL 0x01b8
+#define regSPI_BARYC_CNTL_BASE_IDX 1
+#define regSPI_TMPRING_SIZE 0x01ba
+#define regSPI_TMPRING_SIZE_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_LO 0x01bb
+#define regSPI_GFX_SCRATCH_BASE_LO_BASE_IDX 1
+#define regSPI_GFX_SCRATCH_BASE_HI 0x01bc
+#define regSPI_GFX_SCRATCH_BASE_HI_BASE_IDX 1
+#define regSPI_SHADER_IDX_FORMAT 0x01c2
+#define regSPI_SHADER_IDX_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_POS_FORMAT 0x01c3
+#define regSPI_SHADER_POS_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_Z_FORMAT 0x01c4
+#define regSPI_SHADER_Z_FORMAT_BASE_IDX 1
+#define regSPI_SHADER_COL_FORMAT 0x01c5
+#define regSPI_SHADER_COL_FORMAT_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT_CONTROL 0x01d4
+#define regSX_PS_DOWNCONVERT_CONTROL_BASE_IDX 1
+#define regSX_PS_DOWNCONVERT 0x01d5
+#define regSX_PS_DOWNCONVERT_BASE_IDX 1
+#define regSX_BLEND_OPT_EPSILON 0x01d6
+#define regSX_BLEND_OPT_EPSILON_BASE_IDX 1
+#define regSX_BLEND_OPT_CONTROL 0x01d7
+#define regSX_BLEND_OPT_CONTROL_BASE_IDX 1
+#define regSX_MRT0_BLEND_OPT 0x01d8
+#define regSX_MRT0_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT1_BLEND_OPT 0x01d9
+#define regSX_MRT1_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT2_BLEND_OPT 0x01da
+#define regSX_MRT2_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT3_BLEND_OPT 0x01db
+#define regSX_MRT3_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT4_BLEND_OPT 0x01dc
+#define regSX_MRT4_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT5_BLEND_OPT 0x01dd
+#define regSX_MRT5_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT6_BLEND_OPT 0x01de
+#define regSX_MRT6_BLEND_OPT_BASE_IDX 1
+#define regSX_MRT7_BLEND_OPT 0x01df
+#define regSX_MRT7_BLEND_OPT_BASE_IDX 1
+#define regCB_BLEND0_CONTROL 0x01e0
+#define regCB_BLEND0_CONTROL_BASE_IDX 1
+#define regCB_BLEND1_CONTROL 0x01e1
+#define regCB_BLEND1_CONTROL_BASE_IDX 1
+#define regCB_BLEND2_CONTROL 0x01e2
+#define regCB_BLEND2_CONTROL_BASE_IDX 1
+#define regCB_BLEND3_CONTROL 0x01e3
+#define regCB_BLEND3_CONTROL_BASE_IDX 1
+#define regCB_BLEND4_CONTROL 0x01e4
+#define regCB_BLEND4_CONTROL_BASE_IDX 1
+#define regCB_BLEND5_CONTROL 0x01e5
+#define regCB_BLEND5_CONTROL_BASE_IDX 1
+#define regCB_BLEND6_CONTROL 0x01e6
+#define regCB_BLEND6_CONTROL_BASE_IDX 1
+#define regCB_BLEND7_CONTROL 0x01e7
+#define regCB_BLEND7_CONTROL_BASE_IDX 1
+#define regGFX_COPY_STATE 0x01f4
+#define regGFX_COPY_STATE_BASE_IDX 1
+#define regPA_CL_POINT_X_RAD 0x01f5
+#define regPA_CL_POINT_X_RAD_BASE_IDX 1
+#define regPA_CL_POINT_Y_RAD 0x01f6
+#define regPA_CL_POINT_Y_RAD_BASE_IDX 1
+#define regPA_CL_POINT_SIZE 0x01f7
+#define regPA_CL_POINT_SIZE_BASE_IDX 1
+#define regPA_CL_POINT_CULL_RAD 0x01f8
+#define regPA_CL_POINT_CULL_RAD_BASE_IDX 1
+#define regVGT_DMA_BASE_HI 0x01f9
+#define regVGT_DMA_BASE_HI_BASE_IDX 1
+#define regVGT_DMA_BASE 0x01fa
+#define regVGT_DMA_BASE_BASE_IDX 1
+#define regVGT_DRAW_INITIATOR 0x01fc
+#define regVGT_DRAW_INITIATOR_BASE_IDX 1
+#define regVGT_EVENT_ADDRESS_REG 0x01fe
+#define regVGT_EVENT_ADDRESS_REG_BASE_IDX 1
+#define regGE_MAX_OUTPUT_PER_SUBGROUP 0x01ff
+#define regGE_MAX_OUTPUT_PER_SUBGROUP_BASE_IDX 1
+#define regDB_DEPTH_CONTROL 0x0200
+#define regDB_DEPTH_CONTROL_BASE_IDX 1
+#define regDB_EQAA 0x0201
+#define regDB_EQAA_BASE_IDX 1
+#define regCB_COLOR_CONTROL 0x0202
+#define regCB_COLOR_CONTROL_BASE_IDX 1
+#define regDB_SHADER_CONTROL 0x0203
+#define regDB_SHADER_CONTROL_BASE_IDX 1
+#define regPA_CL_CLIP_CNTL 0x0204
+#define regPA_CL_CLIP_CNTL_BASE_IDX 1
+#define regPA_SU_SC_MODE_CNTL 0x0205
+#define regPA_SU_SC_MODE_CNTL_BASE_IDX 1
+#define regPA_CL_VTE_CNTL 0x0206
+#define regPA_CL_VTE_CNTL_BASE_IDX 1
+#define regPA_CL_VS_OUT_CNTL 0x0207
+#define regPA_CL_VS_OUT_CNTL_BASE_IDX 1
+#define regPA_CL_NANINF_CNTL 0x0208
+#define regPA_CL_NANINF_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_CNTL 0x0209
+#define regPA_SU_LINE_STIPPLE_CNTL_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_SCALE 0x020a
+#define regPA_SU_LINE_STIPPLE_SCALE_BASE_IDX 1
+#define regPA_SU_PRIM_FILTER_CNTL 0x020b
+#define regPA_SU_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL 0x020c
+#define regPA_SU_SMALL_PRIM_FILTER_CNTL_BASE_IDX 1
+#define regPA_CL_NGG_CNTL 0x020e
+#define regPA_CL_NGG_CNTL_BASE_IDX 1
+#define regPA_SU_OVER_RASTERIZATION_CNTL 0x020f
+#define regPA_SU_OVER_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_STEREO_CNTL 0x0210
+#define regPA_STEREO_CNTL_BASE_IDX 1
+#define regPA_STATE_STEREO_X 0x0211
+#define regPA_STATE_STEREO_X_BASE_IDX 1
+#define regPA_CL_VRS_CNTL 0x0212
+#define regPA_CL_VRS_CNTL_BASE_IDX 1
+#define regPA_SU_POINT_SIZE 0x0280
+#define regPA_SU_POINT_SIZE_BASE_IDX 1
+#define regPA_SU_POINT_MINMAX 0x0281
+#define regPA_SU_POINT_MINMAX_BASE_IDX 1
+#define regPA_SU_LINE_CNTL 0x0282
+#define regPA_SU_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE 0x0283
+#define regPA_SC_LINE_STIPPLE_BASE_IDX 1
+#define regVGT_HOS_MAX_TESS_LEVEL 0x0286
+#define regVGT_HOS_MAX_TESS_LEVEL_BASE_IDX 1
+#define regVGT_HOS_MIN_TESS_LEVEL 0x0287
+#define regVGT_HOS_MIN_TESS_LEVEL_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_0 0x0292
+#define regPA_SC_MODE_CNTL_0_BASE_IDX 1
+#define regPA_SC_MODE_CNTL_1 0x0293
+#define regPA_SC_MODE_CNTL_1_BASE_IDX 1
+#define regVGT_ENHANCE 0x0294
+#define regVGT_ENHANCE_BASE_IDX 1
+#define regIA_ENHANCE 0x029c
+#define regIA_ENHANCE_BASE_IDX 1
+#define regVGT_DMA_SIZE 0x029d
+#define regVGT_DMA_SIZE_BASE_IDX 1
+#define regVGT_DMA_MAX_SIZE 0x029e
+#define regVGT_DMA_MAX_SIZE_BASE_IDX 1
+#define regVGT_DMA_INDEX_TYPE 0x029f
+#define regVGT_DMA_INDEX_TYPE_BASE_IDX 1
+#define regWD_ENHANCE 0x02a0
+#define regWD_ENHANCE_BASE_IDX 1
+#define regVGT_PRIMITIVEID_EN 0x02a1
+#define regVGT_PRIMITIVEID_EN_BASE_IDX 1
+#define regVGT_DMA_NUM_INSTANCES 0x02a2
+#define regVGT_DMA_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_PRIMITIVEID_RESET 0x02a3
+#define regVGT_PRIMITIVEID_RESET_BASE_IDX 1
+#define regVGT_EVENT_INITIATOR 0x02a4
+#define regVGT_EVENT_INITIATOR_BASE_IDX 1
+#define regVGT_DRAW_PAYLOAD_CNTL 0x02a6
+#define regVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
+#define regVGT_ESGS_RING_ITEMSIZE 0x02ab
+#define regVGT_ESGS_RING_ITEMSIZE_BASE_IDX 1
+#define regVGT_REUSE_OFF 0x02ad
+#define regVGT_REUSE_OFF_BASE_IDX 1
+#define regDB_HTILE_SURFACE 0x02af
+#define regDB_HTILE_SURFACE_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE0 0x02b0
+#define regDB_SRESULTS_COMPARE_STATE0_BASE_IDX 1
+#define regDB_SRESULTS_COMPARE_STATE1 0x02b1
+#define regDB_SRESULTS_COMPARE_STATE1_BASE_IDX 1
+#define regDB_PRELOAD_CONTROL 0x02b2
+#define regDB_PRELOAD_CONTROL_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET 0x02ca
+#define regVGT_STRMOUT_DRAW_OPAQUE_OFFSET_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE 0x02cb
+#define regVGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE_BASE_IDX 1
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE 0x02cc
+#define regVGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE_BASE_IDX 1
+#define regVGT_GS_MAX_VERT_OUT 0x02ce
+#define regVGT_GS_MAX_VERT_OUT_BASE_IDX 1
+#define regGE_NGG_SUBGRP_CNTL 0x02d3
+#define regGE_NGG_SUBGRP_CNTL_BASE_IDX 1
+#define regVGT_TESS_DISTRIBUTION 0x02d4
+#define regVGT_TESS_DISTRIBUTION_BASE_IDX 1
+#define regVGT_SHADER_STAGES_EN 0x02d5
+#define regVGT_SHADER_STAGES_EN_BASE_IDX 1
+#define regVGT_LS_HS_CONFIG 0x02d6
+#define regVGT_LS_HS_CONFIG_BASE_IDX 1
+#define regVGT_TF_PARAM 0x02db
+#define regVGT_TF_PARAM_BASE_IDX 1
+#define regDB_ALPHA_TO_MASK 0x02dc
+#define regDB_ALPHA_TO_MASK_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL 0x02de
+#define regPA_SU_POLY_OFFSET_DB_FMT_CNTL_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_CLAMP 0x02df
+#define regPA_SU_POLY_OFFSET_CLAMP_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE 0x02e0
+#define regPA_SU_POLY_OFFSET_FRONT_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET 0x02e1
+#define regPA_SU_POLY_OFFSET_FRONT_OFFSET_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_SCALE 0x02e2
+#define regPA_SU_POLY_OFFSET_BACK_SCALE_BASE_IDX 1
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET 0x02e3
+#define regPA_SU_POLY_OFFSET_BACK_OFFSET_BASE_IDX 1
+#define regVGT_GS_INSTANCE_CNT 0x02e4
+#define regVGT_GS_INSTANCE_CNT_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_0 0x02f5
+#define regPA_SC_CENTROID_PRIORITY_0_BASE_IDX 1
+#define regPA_SC_CENTROID_PRIORITY_1 0x02f6
+#define regPA_SC_CENTROID_PRIORITY_1_BASE_IDX 1
+#define regPA_SC_LINE_CNTL 0x02f7
+#define regPA_SC_LINE_CNTL_BASE_IDX 1
+#define regPA_SC_AA_CONFIG 0x02f8
+#define regPA_SC_AA_CONFIG_BASE_IDX 1
+#define regPA_SU_VTX_CNTL 0x02f9
+#define regPA_SU_VTX_CNTL_BASE_IDX 1
+#define regPA_CL_GB_VERT_CLIP_ADJ 0x02fa
+#define regPA_CL_GB_VERT_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_VERT_DISC_ADJ 0x02fb
+#define regPA_CL_GB_VERT_DISC_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_CLIP_ADJ 0x02fc
+#define regPA_CL_GB_HORZ_CLIP_ADJ_BASE_IDX 1
+#define regPA_CL_GB_HORZ_DISC_ADJ 0x02fd
+#define regPA_CL_GB_HORZ_DISC_ADJ_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 0x02fe
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 0x02ff
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 0x0300
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 0x0301
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 0x0302
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 0x0303
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 0x0304
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 0x0305
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 0x0306
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 0x0307
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 0x0308
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 0x0309
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 0x030a
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 0x030b
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 0x030c
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2_BASE_IDX 1
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 0x030d
+#define regPA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y0_X1Y0 0x030e
+#define regPA_SC_AA_MASK_X0Y0_X1Y0_BASE_IDX 1
+#define regPA_SC_AA_MASK_X0Y1_X1Y1 0x030f
+#define regPA_SC_AA_MASK_X0Y1_X1Y1_BASE_IDX 1
+#define regPA_SC_SHADER_CONTROL 0x0310
+#define regPA_SC_SHADER_CONTROL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_0 0x0311
+#define regPA_SC_BINNER_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_1 0x0312
+#define regPA_SC_BINNER_CNTL_1_BASE_IDX 1
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL 0x0313
+#define regPA_SC_CONSERVATIVE_RASTERIZATION_CNTL_BASE_IDX 1
+#define regPA_SC_NGG_MODE_CNTL 0x0314
+#define regPA_SC_NGG_MODE_CNTL_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_2 0x0315
+#define regPA_SC_BINNER_CNTL_2_BASE_IDX 1
+#define regCB_COLOR0_BASE 0x0318
+#define regCB_COLOR0_BASE_BASE_IDX 1
+#define regCB_COLOR0_VIEW 0x031b
+#define regCB_COLOR0_VIEW_BASE_IDX 1
+#define regCB_COLOR0_INFO 0x031c
+#define regCB_COLOR0_INFO_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB 0x031d
+#define regCB_COLOR0_ATTRIB_BASE_IDX 1
+#define regCB_COLOR0_FDCC_CONTROL 0x031e
+#define regCB_COLOR0_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE 0x0325
+#define regCB_COLOR0_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR1_BASE 0x0327
+#define regCB_COLOR1_BASE_BASE_IDX 1
+#define regCB_COLOR1_VIEW 0x032a
+#define regCB_COLOR1_VIEW_BASE_IDX 1
+#define regCB_COLOR1_INFO 0x032b
+#define regCB_COLOR1_INFO_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB 0x032c
+#define regCB_COLOR1_ATTRIB_BASE_IDX 1
+#define regCB_COLOR1_FDCC_CONTROL 0x032d
+#define regCB_COLOR1_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE 0x0334
+#define regCB_COLOR1_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR2_BASE 0x0336
+#define regCB_COLOR2_BASE_BASE_IDX 1
+#define regCB_COLOR2_VIEW 0x0339
+#define regCB_COLOR2_VIEW_BASE_IDX 1
+#define regCB_COLOR2_INFO 0x033a
+#define regCB_COLOR2_INFO_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB 0x033b
+#define regCB_COLOR2_ATTRIB_BASE_IDX 1
+#define regCB_COLOR2_FDCC_CONTROL 0x033c
+#define regCB_COLOR2_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE 0x0343
+#define regCB_COLOR2_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR3_BASE 0x0345
+#define regCB_COLOR3_BASE_BASE_IDX 1
+#define regCB_COLOR3_VIEW 0x0348
+#define regCB_COLOR3_VIEW_BASE_IDX 1
+#define regCB_COLOR3_INFO 0x0349
+#define regCB_COLOR3_INFO_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB 0x034a
+#define regCB_COLOR3_ATTRIB_BASE_IDX 1
+#define regCB_COLOR3_FDCC_CONTROL 0x034b
+#define regCB_COLOR3_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE 0x0352
+#define regCB_COLOR3_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR4_BASE 0x0354
+#define regCB_COLOR4_BASE_BASE_IDX 1
+#define regCB_COLOR4_VIEW 0x0357
+#define regCB_COLOR4_VIEW_BASE_IDX 1
+#define regCB_COLOR4_INFO 0x0358
+#define regCB_COLOR4_INFO_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB 0x0359
+#define regCB_COLOR4_ATTRIB_BASE_IDX 1
+#define regCB_COLOR4_FDCC_CONTROL 0x035a
+#define regCB_COLOR4_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE 0x0361
+#define regCB_COLOR4_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR5_BASE 0x0363
+#define regCB_COLOR5_BASE_BASE_IDX 1
+#define regCB_COLOR5_VIEW 0x0366
+#define regCB_COLOR5_VIEW_BASE_IDX 1
+#define regCB_COLOR5_INFO 0x0367
+#define regCB_COLOR5_INFO_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB 0x0368
+#define regCB_COLOR5_ATTRIB_BASE_IDX 1
+#define regCB_COLOR5_FDCC_CONTROL 0x0369
+#define regCB_COLOR5_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE 0x0370
+#define regCB_COLOR5_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR6_BASE 0x0372
+#define regCB_COLOR6_BASE_BASE_IDX 1
+#define regCB_COLOR6_VIEW 0x0375
+#define regCB_COLOR6_VIEW_BASE_IDX 1
+#define regCB_COLOR6_INFO 0x0376
+#define regCB_COLOR6_INFO_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB 0x0377
+#define regCB_COLOR6_ATTRIB_BASE_IDX 1
+#define regCB_COLOR6_FDCC_CONTROL 0x0378
+#define regCB_COLOR6_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE 0x037f
+#define regCB_COLOR6_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR7_BASE 0x0381
+#define regCB_COLOR7_BASE_BASE_IDX 1
+#define regCB_COLOR7_VIEW 0x0384
+#define regCB_COLOR7_VIEW_BASE_IDX 1
+#define regCB_COLOR7_INFO 0x0385
+#define regCB_COLOR7_INFO_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB 0x0386
+#define regCB_COLOR7_ATTRIB_BASE_IDX 1
+#define regCB_COLOR7_FDCC_CONTROL 0x0387
+#define regCB_COLOR7_FDCC_CONTROL_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE 0x038e
+#define regCB_COLOR7_DCC_BASE_BASE_IDX 1
+#define regCB_COLOR0_BASE_EXT 0x0390
+#define regCB_COLOR0_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_BASE_EXT 0x0391
+#define regCB_COLOR1_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_BASE_EXT 0x0392
+#define regCB_COLOR2_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_BASE_EXT 0x0393
+#define regCB_COLOR3_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_BASE_EXT 0x0394
+#define regCB_COLOR4_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_BASE_EXT 0x0395
+#define regCB_COLOR5_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_BASE_EXT 0x0396
+#define regCB_COLOR6_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_BASE_EXT 0x0397
+#define regCB_COLOR7_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_DCC_BASE_EXT 0x03a8
+#define regCB_COLOR0_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR1_DCC_BASE_EXT 0x03a9
+#define regCB_COLOR1_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR2_DCC_BASE_EXT 0x03aa
+#define regCB_COLOR2_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR3_DCC_BASE_EXT 0x03ab
+#define regCB_COLOR3_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR4_DCC_BASE_EXT 0x03ac
+#define regCB_COLOR4_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR5_DCC_BASE_EXT 0x03ad
+#define regCB_COLOR5_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR6_DCC_BASE_EXT 0x03ae
+#define regCB_COLOR6_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR7_DCC_BASE_EXT 0x03af
+#define regCB_COLOR7_DCC_BASE_EXT_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB2 0x03b0
+#define regCB_COLOR0_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB2 0x03b1
+#define regCB_COLOR1_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB2 0x03b2
+#define regCB_COLOR2_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB2 0x03b3
+#define regCB_COLOR3_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB2 0x03b4
+#define regCB_COLOR4_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB2 0x03b5
+#define regCB_COLOR5_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB2 0x03b6
+#define regCB_COLOR6_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB2 0x03b7
+#define regCB_COLOR7_ATTRIB2_BASE_IDX 1
+#define regCB_COLOR0_ATTRIB3 0x03b8
+#define regCB_COLOR0_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR1_ATTRIB3 0x03b9
+#define regCB_COLOR1_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR2_ATTRIB3 0x03ba
+#define regCB_COLOR2_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR3_ATTRIB3 0x03bb
+#define regCB_COLOR3_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR4_ATTRIB3 0x03bc
+#define regCB_COLOR4_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR5_ATTRIB3 0x03bd
+#define regCB_COLOR5_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR6_ATTRIB3 0x03be
+#define regCB_COLOR6_ATTRIB3_BASE_IDX 1
+#define regCB_COLOR7_ATTRIB3 0x03bf
+#define regCB_COLOR7_ATTRIB3_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_cpdec
+// base address: 0x2a000
+#define regCONFIG_RESERVED_REG0 0x0800
+#define regCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regCONFIG_RESERVED_REG1 0x0801
+#define regCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_MEC_CNTL 0x0802
+#define regCP_MEC_CNTL_BASE_IDX 1
+#define regCP_ME_CNTL 0x0803
+#define regCP_ME_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_grbmdec
+// base address: 0x2a400
+#define regGRBM_GFX_CNTL 0x0900
+#define regGRBM_GFX_CNTL_BASE_IDX 1
+#define regGRBM_NOWHERE 0x0901
+#define regGRBM_NOWHERE_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_padec
+// base address: 0x2a500
+#define regPA_SC_VRS_SURFACE_CNTL 0x0940
+#define regPA_SC_VRS_SURFACE_CNTL_BASE_IDX 1
+#define regPA_SC_ENHANCE 0x0941
+#define regPA_SC_ENHANCE_BASE_IDX 1
+#define regPA_SC_ENHANCE_1 0x0942
+#define regPA_SC_ENHANCE_1_BASE_IDX 1
+#define regPA_SC_ENHANCE_2 0x0943
+#define regPA_SC_ENHANCE_2_BASE_IDX 1
+#define regPA_SC_ENHANCE_3 0x0944
+#define regPA_SC_ENHANCE_3_BASE_IDX 1
+#define regPA_SC_BINNER_CNTL_OVERRIDE 0x0946
+#define regPA_SC_BINNER_CNTL_OVERRIDE_BASE_IDX 1
+#define regPA_SC_PBB_OVERRIDE_FLAG 0x0947
+#define regPA_SC_PBB_OVERRIDE_FLAG_BASE_IDX 1
+#define regPA_SC_DSM_CNTL 0x0948
+#define regPA_SC_DSM_CNTL_BASE_IDX 1
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE 0x0949
+#define regPA_SC_TILE_STEERING_CREST_OVERRIDE_BASE_IDX 1
+#define regPA_SC_FIFO_SIZE 0x094a
+#define regPA_SC_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_IF_FIFO_SIZE 0x094b
+#define regPA_SC_IF_FIFO_SIZE_BASE_IDX 1
+#define regPA_SC_PACKER_WAVE_ID_CNTL 0x094c
+#define regPA_SC_PACKER_WAVE_ID_CNTL_BASE_IDX 1
+#define regPA_SC_ATM_CNTL 0x094d
+#define regPA_SC_ATM_CNTL_BASE_IDX 1
+#define regPA_SC_PKR_WAVE_TABLE_CNTL 0x094e
+#define regPA_SC_PKR_WAVE_TABLE_CNTL_BASE_IDX 1
+#define regPA_SC_FORCE_EOV_MAX_CNTS 0x094f
+#define regPA_SC_FORCE_EOV_MAX_CNTS_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_0 0x0950
+#define regPA_SC_BINNER_EVENT_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_1 0x0951
+#define regPA_SC_BINNER_EVENT_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_2 0x0952
+#define regPA_SC_BINNER_EVENT_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_EVENT_CNTL_3 0x0953
+#define regPA_SC_BINNER_EVENT_CNTL_3_BASE_IDX 1
+#define regPA_SC_BINNER_TIMEOUT_COUNTER 0x0954
+#define regPA_SC_BINNER_TIMEOUT_COUNTER_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_0 0x0955
+#define regPA_SC_BINNER_PERF_CNTL_0_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_1 0x0956
+#define regPA_SC_BINNER_PERF_CNTL_1_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_2 0x0957
+#define regPA_SC_BINNER_PERF_CNTL_2_BASE_IDX 1
+#define regPA_SC_BINNER_PERF_CNTL_3 0x0958
+#define regPA_SC_BINNER_PERF_CNTL_3_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK 0x095b
+#define regPA_SC_P3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK 0x095c
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_LOCK 0x095d
+#define regPA_SC_TRAP_SCREEN_HV_LOCK_BASE_IDX 1
+#define regPA_PH_INTERFACE_FIFO_SIZE 0x095e
+#define regPA_PH_INTERFACE_FIFO_SIZE_BASE_IDX 1
+#define regPA_PH_ENHANCE 0x095f
+#define regPA_PH_ENHANCE_BASE_IDX 1
+#define regPA_SC_VRS_SURFACE_CNTL_1 0x0960
+#define regPA_SC_VRS_SURFACE_CNTL_1_BASE_IDX 1
+
+
+// addressBlock: gc_pfvf_sqdec
+// base address: 0x2a780
+#define regSQ_RUNTIME_CONFIG 0x09e0
+#define regSQ_RUNTIME_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL 0x09e1
+#define regSQ_DEBUG_STS_GLOBAL_BASE_IDX 1
+#define regSQ_DEBUG_STS_GLOBAL2 0x09e2
+#define regSQ_DEBUG_STS_GLOBAL2_BASE_IDX 1
+#define regSH_MEM_BASES 0x09e3
+#define regSH_MEM_BASES_BASE_IDX 1
+#define regSH_MEM_CONFIG 0x09e4
+#define regSH_MEM_CONFIG_BASE_IDX 1
+#define regSQ_DEBUG 0x09e5
+#define regSQ_DEBUG_BASE_IDX 1
+#define regSQ_SHADER_TBA_LO 0x09e6
+#define regSQ_SHADER_TBA_LO_BASE_IDX 1
+#define regSQ_SHADER_TBA_HI 0x09e7
+#define regSQ_SHADER_TBA_HI_BASE_IDX 1
+#define regSQ_SHADER_TMA_LO 0x09e8
+#define regSQ_SHADER_TMA_LO_BASE_IDX 1
+#define regSQ_SHADER_TMA_HI 0x09e9
+#define regSQ_SHADER_TMA_HI_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpdec
+// base address: 0x2e000
+#define regCP_DEBUG_2 0x1800
+#define regCP_DEBUG_2_BASE_IDX 1
+#define regCP_FETCHER_SOURCE 0x1801
+#define regCP_FETCHER_SOURCE_BASE_IDX 1
+#define regCP_DFY_CNTL 0x1804
+#define regCP_DFY_CNTL_BASE_IDX 1
+#define regCP_DFY_STAT 0x1805
+#define regCP_DFY_STAT_BASE_IDX 1
+#define regCP_DFY_ADDR_HI 0x1806
+#define regCP_DFY_ADDR_HI_BASE_IDX 1
+#define regCP_DFY_ADDR_LO 0x1807
+#define regCP_DFY_ADDR_LO_BASE_IDX 1
+#define regCP_DFY_DATA_0 0x1808
+#define regCP_DFY_DATA_0_BASE_IDX 1
+#define regCP_DFY_DATA_1 0x1809
+#define regCP_DFY_DATA_1_BASE_IDX 1
+#define regCP_DFY_DATA_2 0x180a
+#define regCP_DFY_DATA_2_BASE_IDX 1
+#define regCP_DFY_DATA_3 0x180b
+#define regCP_DFY_DATA_3_BASE_IDX 1
+#define regCP_DFY_DATA_4 0x180c
+#define regCP_DFY_DATA_4_BASE_IDX 1
+#define regCP_DFY_DATA_5 0x180d
+#define regCP_DFY_DATA_5_BASE_IDX 1
+#define regCP_DFY_DATA_6 0x180e
+#define regCP_DFY_DATA_6_BASE_IDX 1
+#define regCP_DFY_DATA_7 0x180f
+#define regCP_DFY_DATA_7_BASE_IDX 1
+#define regCP_DFY_DATA_8 0x1810
+#define regCP_DFY_DATA_8_BASE_IDX 1
+#define regCP_DFY_DATA_9 0x1811
+#define regCP_DFY_DATA_9_BASE_IDX 1
+#define regCP_DFY_DATA_10 0x1812
+#define regCP_DFY_DATA_10_BASE_IDX 1
+#define regCP_DFY_DATA_11 0x1813
+#define regCP_DFY_DATA_11_BASE_IDX 1
+#define regCP_DFY_DATA_12 0x1814
+#define regCP_DFY_DATA_12_BASE_IDX 1
+#define regCP_DFY_DATA_13 0x1815
+#define regCP_DFY_DATA_13_BASE_IDX 1
+#define regCP_DFY_DATA_14 0x1816
+#define regCP_DFY_DATA_14_BASE_IDX 1
+#define regCP_DFY_DATA_15 0x1817
+#define regCP_DFY_DATA_15_BASE_IDX 1
+#define regCP_DFY_CMD 0x1818
+#define regCP_DFY_CMD_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_cpphqddec
+// base address: 0x2e080
+#define regCP_HPD_MES_ROQ_OFFSETS 0x1821
+#define regCP_HPD_MES_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_ROQ_OFFSETS 0x1821
+#define regCP_HPD_ROQ_OFFSETS_BASE_IDX 1
+#define regCP_HPD_STATUS0 0x1822
+#define regCP_HPD_STATUS0_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_didtdec
+// base address: 0x2e400
+#define regDIDT_INDEX_AUTO_INCR_EN 0x1900
+#define regDIDT_INDEX_AUTO_INCR_EN_BASE_IDX 1
+#define regDIDT_EDC_CTRL 0x1901
+#define regDIDT_EDC_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THROTTLE_CTRL 0x1902
+#define regDIDT_EDC_THROTTLE_CTRL_BASE_IDX 1
+#define regDIDT_EDC_THRESHOLD 0x1903
+#define regDIDT_EDC_THRESHOLD_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_1_2 0x1904
+#define regDIDT_EDC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_3_4 0x1905
+#define regDIDT_EDC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_5_6 0x1906
+#define regDIDT_EDC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_EDC_STALL_PATTERN_7 0x1907
+#define regDIDT_EDC_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_EDC_STATUS 0x1908
+#define regDIDT_EDC_STATUS_BASE_IDX 1
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO 0x1909
+#define regDIDT_EDC_DYNAMIC_THRESHOLD_RO_BASE_IDX 1
+#define regDIDT_EDC_OVERFLOW 0x190a
+#define regDIDT_EDC_OVERFLOW_BASE_IDX 1
+#define regDIDT_EDC_ROLLING_POWER_DELTA 0x190b
+#define regDIDT_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regDIDT_IND_INDEX 0x190c
+#define regDIDT_IND_INDEX_BASE_IDX 1
+#define regDIDT_IND_DATA 0x190d
+#define regDIDT_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_spidec
+// base address: 0x2e500
+#define regSPI_CDBG_SYS_GFX 0x1940
+#define regSPI_CDBG_SYS_GFX_BASE_IDX 1
+#define regSPI_CDBG_SYS_HP3D 0x1941
+#define regSPI_CDBG_SYS_HP3D_BASE_IDX 1
+#define regSPI_CDBG_SYS_CS0 0x1942
+#define regSPI_CDBG_SYS_CS0_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL 0x1943
+#define regSPI_GDBG_WAVE_CNTL_BASE_IDX 1
+#define regSPI_GDBG_TRAP_CONFIG 0x1944
+#define regSPI_GDBG_TRAP_CONFIG_BASE_IDX 1
+#define regSPI_GDBG_WAVE_CNTL3 0x1945
+#define regSPI_GDBG_WAVE_CNTL3_BASE_IDX 1
+#define regSPI_RESET_DEBUG 0x1946
+#define regSPI_RESET_DEBUG_BASE_IDX 1
+#define regSPI_ARB_CNTL_0 0x1949
+#define regSPI_ARB_CNTL_0_BASE_IDX 1
+#define regSPI_FEATURE_CTRL 0x194a
+#define regSPI_FEATURE_CTRL_BASE_IDX 1
+#define regSPI_SHADER_RSRC_LIMIT_CTRL 0x194b
+#define regSPI_SHADER_RSRC_LIMIT_CTRL_BASE_IDX 1
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS 0x194e
+#define regSPI_COMPUTE_WF_CTX_SAVE_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_tcpdec
+// base address: 0x2e680
+#define regTCP_INVALIDATE 0x19a0
+#define regTCP_INVALIDATE_BASE_IDX 1
+#define regTCP_STATUS 0x19a1
+#define regTCP_STATUS_BASE_IDX 1
+#define regTCP_CNTL 0x19a2
+#define regTCP_CNTL_BASE_IDX 1
+#define regTCP_CNTL2 0x19a3
+#define regTCP_CNTL2_BASE_IDX 1
+#define regTCP_CREDIT 0x19a4
+#define regTCP_CREDIT_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gdsdec
+// base address: 0x2e6c0
+#define regGDS_ENHANCE2 0x19b0
+#define regGDS_ENHANCE2_BASE_IDX 1
+#define regGDS_OA_CGPG_RESTORE 0x19b1
+#define regGDS_OA_CGPG_RESTORE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_utcl1dec
+// base address: 0x2e600
+#define regUTCL1_CTRL_0 0x1980
+#define regUTCL1_CTRL_0_BASE_IDX 1
+#define regUTCL1_UTCL0_INVREQ_DISABLE 0x1984
+#define regUTCL1_UTCL0_INVREQ_DISABLE_BASE_IDX 1
+#define regUTCL1_CTRL_2 0x1985
+#define regUTCL1_CTRL_2_BASE_IDX 1
+#define regUTCL1_FIFO_SIZING 0x1986
+#define regUTCL1_FIFO_SIZING_BASE_IDX 1
+#define regGCRD_SA0_TARGETS_DISABLE 0x1987
+#define regGCRD_SA0_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_SA1_TARGETS_DISABLE 0x1989
+#define regGCRD_SA1_TARGETS_DISABLE_BASE_IDX 1
+#define regGCRD_CREDIT_SAFE 0x198a
+#define regGCRD_CREDIT_SAFE_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_pmmdec
+// base address: 0x2e640
+#define regGCR_GENERAL_CNTL 0x1990
+#define regGCR_GENERAL_CNTL_BASE_IDX 1
+#define regGCR_TARGET_DISABLE 0x1991
+#define regGCR_TARGET_DISABLE_BASE_IDX 1
+#define regGCR_CMD_STATUS 0x1992
+#define regGCR_CMD_STATUS_BASE_IDX 1
+#define regGCR_SPARE 0x1993
+#define regGCR_SPARE_BASE_IDX 1
+#define regPMM_CNTL2 0x1999
+#define regPMM_CNTL2_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly_gccacdec
+// base address: 0x2eb40
+#define regGC_CAC_CTRL_1 0x1ad0
+#define regGC_CAC_CTRL_1_BASE_IDX 1
+#define regGC_CAC_CTRL_2 0x1ad1
+#define regGC_CAC_CTRL_2_BASE_IDX 1
+#define regGC_CAC_AGGR_LOWER 0x1ad2
+#define regGC_CAC_AGGR_LOWER_BASE_IDX 1
+#define regGC_CAC_AGGR_UPPER 0x1ad3
+#define regGC_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE0_CAC_AGGR_LOWER 0x1ad4
+#define regSE0_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE0_CAC_AGGR_UPPER 0x1ad5
+#define regSE0_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE1_CAC_AGGR_LOWER 0x1ad6
+#define regSE1_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE1_CAC_AGGR_UPPER 0x1ad7
+#define regSE1_CAC_AGGR_UPPER_BASE_IDX 1
+#define regSE2_CAC_AGGR_LOWER 0x1ad8
+#define regSE2_CAC_AGGR_LOWER_BASE_IDX 1
+#define regSE2_CAC_AGGR_UPPER 0x1ad9
+#define regSE2_CAC_AGGR_UPPER_BASE_IDX 1
+#define regGC_CAC_AGGR_GFXCLK_CYCLE 0x1ae4
+#define regGC_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE 0x1ae5
+#define regSE0_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE 0x1ae6
+#define regSE1_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE 0x1ae7
+#define regSE2_CAC_AGGR_GFXCLK_CYCLE_BASE_IDX 1
+#define regGC_EDC_CTRL 0x1aed
+#define regGC_EDC_CTRL_BASE_IDX 1
+#define regGC_EDC_THRESHOLD 0x1aee
+#define regGC_EDC_THRESHOLD_BASE_IDX 1
+#define regGC_EDC_STRETCH_CTRL 0x1aef
+#define regGC_EDC_STRETCH_CTRL_BASE_IDX 1
+#define regGC_EDC_STRETCH_THRESHOLD 0x1af0
+#define regGC_EDC_STRETCH_THRESHOLD_BASE_IDX 1
+#define regEDC_HYSTERESIS_CNTL 0x1af1
+#define regEDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL 0x1af2
+#define regGC_THROTTLE_CTRL_BASE_IDX 1
+#define regGC_THROTTLE_CTRL1 0x1af3
+#define regGC_THROTTLE_CTRL1_BASE_IDX 1
+#define regPCC_STALL_PATTERN_CTRL 0x1af4
+#define regPCC_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_CTRL 0x1af5
+#define regPWRBRK_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regPCC_STALL_PATTERN_1_2 0x1af6
+#define regPCC_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPCC_STALL_PATTERN_3_4 0x1af7
+#define regPCC_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPCC_STALL_PATTERN_5_6 0x1af8
+#define regPCC_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPCC_STALL_PATTERN_7 0x1af9
+#define regPCC_STALL_PATTERN_7_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_1_2 0x1afa
+#define regPWRBRK_STALL_PATTERN_1_2_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_3_4 0x1afb
+#define regPWRBRK_STALL_PATTERN_3_4_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_5_6 0x1afc
+#define regPWRBRK_STALL_PATTERN_5_6_BASE_IDX 1
+#define regPWRBRK_STALL_PATTERN_7 0x1afd
+#define regPWRBRK_STALL_PATTERN_7_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_CTRL 0x1afe
+#define regDIDT_STALL_PATTERN_CTRL_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_1_2 0x1aff
+#define regDIDT_STALL_PATTERN_1_2_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_3_4 0x1b00
+#define regDIDT_STALL_PATTERN_3_4_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_5_6 0x1b01
+#define regDIDT_STALL_PATTERN_5_6_BASE_IDX 1
+#define regDIDT_STALL_PATTERN_7 0x1b02
+#define regDIDT_STALL_PATTERN_7_BASE_IDX 1
+#define regPCC_PWRBRK_HYSTERESIS_CTRL 0x1b03
+#define regPCC_PWRBRK_HYSTERESIS_CTRL_BASE_IDX 1
+#define regEDC_STRETCH_PERF_COUNTER 0x1b04
+#define regEDC_STRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_UNSTRETCH_PERF_COUNTER 0x1b05
+#define regEDC_UNSTRETCH_PERF_COUNTER_BASE_IDX 1
+#define regEDC_STRETCH_NUM_PERF_COUNTER 0x1b06
+#define regEDC_STRETCH_NUM_PERF_COUNTER_BASE_IDX 1
+#define regGC_EDC_STATUS 0x1b07
+#define regGC_EDC_STATUS_BASE_IDX 1
+#define regGC_EDC_OVERFLOW 0x1b08
+#define regGC_EDC_OVERFLOW_BASE_IDX 1
+#define regGC_EDC_ROLLING_POWER_DELTA 0x1b09
+#define regGC_EDC_ROLLING_POWER_DELTA_BASE_IDX 1
+#define regGC_THROTTLE_STATUS 0x1b0a
+#define regGC_THROTTLE_STATUS_BASE_IDX 1
+#define regEDC_PERF_COUNTER 0x1b0b
+#define regEDC_PERF_COUNTER_BASE_IDX 1
+#define regPCC_PERF_COUNTER 0x1b0c
+#define regPCC_PERF_COUNTER_BASE_IDX 1
+#define regPWRBRK_PERF_COUNTER 0x1b0d
+#define regPWRBRK_PERF_COUNTER_BASE_IDX 1
+#define regEDC_HYSTERESIS_STAT 0x1b0e
+#define regEDC_HYSTERESIS_STAT_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_0 0x1b10
+#define regGC_CAC_WEIGHT_CP_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CP_1 0x1b11
+#define regGC_CAC_WEIGHT_CP_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_0 0x1b12
+#define regGC_CAC_WEIGHT_EA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_1 0x1b13
+#define regGC_CAC_WEIGHT_EA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_EA_2 0x1b14
+#define regGC_CAC_WEIGHT_EA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0 0x1b15
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1 0x1b16
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2 0x1b17
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3 0x1b18
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4 0x1b19
+#define regGC_CAC_WEIGHT_UTCL2_ROUTER_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0 0x1b1a
+#define regGC_CAC_WEIGHT_UTCL2_VML2_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1 0x1b1b
+#define regGC_CAC_WEIGHT_UTCL2_VML2_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2 0x1b1c
+#define regGC_CAC_WEIGHT_UTCL2_VML2_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0 0x1b1d
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1 0x1b1e
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2 0x1b1f
+#define regGC_CAC_WEIGHT_UTCL2_WALKER_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_0 0x1b20
+#define regGC_CAC_WEIGHT_GDS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_1 0x1b21
+#define regGC_CAC_WEIGHT_GDS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GDS_2 0x1b22
+#define regGC_CAC_WEIGHT_GDS_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_0 0x1b23
+#define regGC_CAC_WEIGHT_GE_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_1 0x1b24
+#define regGC_CAC_WEIGHT_GE_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_2 0x1b25
+#define regGC_CAC_WEIGHT_GE_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GE_3 0x1b26
+#define regGC_CAC_WEIGHT_GE_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PMM_0 0x1b2e
+#define regGC_CAC_WEIGHT_PMM_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_0 0x1b2f
+#define regGC_CAC_WEIGHT_GL2C_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_1 0x1b30
+#define regGC_CAC_WEIGHT_GL2C_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GL2C_2 0x1b31
+#define regGC_CAC_WEIGHT_GL2C_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_0 0x1b32
+#define regGC_CAC_WEIGHT_PH_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_1 0x1b33
+#define regGC_CAC_WEIGHT_PH_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_2 0x1b34
+#define regGC_CAC_WEIGHT_PH_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_PH_3 0x1b35
+#define regGC_CAC_WEIGHT_PH_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_0 0x1b36
+#define regGC_CAC_WEIGHT_SDMA_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_1 0x1b37
+#define regGC_CAC_WEIGHT_SDMA_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_2 0x1b38
+#define regGC_CAC_WEIGHT_SDMA_2_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_3 0x1b39
+#define regGC_CAC_WEIGHT_SDMA_3_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_4 0x1b3a
+#define regGC_CAC_WEIGHT_SDMA_4_BASE_IDX 1
+#define regGC_CAC_WEIGHT_SDMA_5 0x1b3b
+#define regGC_CAC_WEIGHT_SDMA_5_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_0 0x1b3c
+#define regGC_CAC_WEIGHT_CHC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_CHC_1 0x1b3d
+#define regGC_CAC_WEIGHT_CHC_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_0 0x1b3e
+#define regGC_CAC_WEIGHT_GUS_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GUS_1 0x1b3f
+#define regGC_CAC_WEIGHT_GUS_1_BASE_IDX 1
+#define regGC_CAC_WEIGHT_RLC_0 0x1b40
+#define regGC_CAC_WEIGHT_RLC_0_BASE_IDX 1
+#define regGC_CAC_WEIGHT_GRBM_0 0x1b44
+#define regGC_CAC_WEIGHT_GRBM_0_BASE_IDX 1
+#define regGC_EDC_CLK_MONITOR_CTRL 0x1b56
+#define regGC_EDC_CLK_MONITOR_CTRL_BASE_IDX 1
+#define regGC_CAC_IND_INDEX 0x1b58
+#define regGC_CAC_IND_INDEX_BASE_IDX 1
+#define regGC_CAC_IND_DATA 0x1b59
+#define regGC_CAC_IND_DATA_BASE_IDX 1
+#define regSE_CAC_CTRL_1 0x1b70
+#define regSE_CAC_CTRL_1_BASE_IDX 1
+#define regSE_CAC_CTRL_2 0x1b71
+#define regSE_CAC_CTRL_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TA_0 0x1b72
+#define regSE_CAC_WEIGHT_TA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_0 0x1b73
+#define regSE_CAC_WEIGHT_TD_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_1 0x1b74
+#define regSE_CAC_WEIGHT_TD_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_2 0x1b75
+#define regSE_CAC_WEIGHT_TD_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_3 0x1b76
+#define regSE_CAC_WEIGHT_TD_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_4 0x1b77
+#define regSE_CAC_WEIGHT_TD_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TD_5 0x1b78
+#define regSE_CAC_WEIGHT_TD_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_0 0x1b79
+#define regSE_CAC_WEIGHT_TCP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_1 0x1b7a
+#define regSE_CAC_WEIGHT_TCP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_2 0x1b7b
+#define regSE_CAC_WEIGHT_TCP_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_TCP_3 0x1b7c
+#define regSE_CAC_WEIGHT_TCP_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_0 0x1b7d
+#define regSE_CAC_WEIGHT_SQ_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_1 0x1b7e
+#define regSE_CAC_WEIGHT_SQ_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQ_2 0x1b7f
+#define regSE_CAC_WEIGHT_SQ_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_0 0x1b80
+#define regSE_CAC_WEIGHT_SP_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SP_1 0x1b81
+#define regSE_CAC_WEIGHT_SP_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_0 0x1b82
+#define regSE_CAC_WEIGHT_LDS_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_1 0x1b83
+#define regSE_CAC_WEIGHT_LDS_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_2 0x1b84
+#define regSE_CAC_WEIGHT_LDS_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_LDS_3 0x1b85
+#define regSE_CAC_WEIGHT_LDS_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_0 0x1b87
+#define regSE_CAC_WEIGHT_SQC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SQC_1 0x1b88
+#define regSE_CAC_WEIGHT_SQC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CU_0 0x1b89
+#define regSE_CAC_WEIGHT_CU_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_BCI_0 0x1b8a
+#define regSE_CAC_WEIGHT_BCI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_0 0x1b8b
+#define regSE_CAC_WEIGHT_CB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_1 0x1b8c
+#define regSE_CAC_WEIGHT_CB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_2 0x1b8d
+#define regSE_CAC_WEIGHT_CB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_3 0x1b8e
+#define regSE_CAC_WEIGHT_CB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_4 0x1b8f
+#define regSE_CAC_WEIGHT_CB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_5 0x1b90
+#define regSE_CAC_WEIGHT_CB_5_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_6 0x1b91
+#define regSE_CAC_WEIGHT_CB_6_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_7 0x1b92
+#define regSE_CAC_WEIGHT_CB_7_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_8 0x1b93
+#define regSE_CAC_WEIGHT_CB_8_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_9 0x1b94
+#define regSE_CAC_WEIGHT_CB_9_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_10 0x1b95
+#define regSE_CAC_WEIGHT_CB_10_BASE_IDX 1
+#define regSE_CAC_WEIGHT_CB_11 0x1b96
+#define regSE_CAC_WEIGHT_CB_11_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_0 0x1b97
+#define regSE_CAC_WEIGHT_DB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_1 0x1b98
+#define regSE_CAC_WEIGHT_DB_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_2 0x1b99
+#define regSE_CAC_WEIGHT_DB_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_3 0x1b9a
+#define regSE_CAC_WEIGHT_DB_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_DB_4 0x1b9b
+#define regSE_CAC_WEIGHT_DB_4_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_0 0x1b9c
+#define regSE_CAC_WEIGHT_RMI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_RMI_1 0x1b9d
+#define regSE_CAC_WEIGHT_RMI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SX_0 0x1b9e
+#define regSE_CAC_WEIGHT_SX_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SXRB_0 0x1b9f
+#define regSE_CAC_WEIGHT_SXRB_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_UTCL1_0 0x1ba0
+#define regSE_CAC_WEIGHT_UTCL1_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_0 0x1ba1
+#define regSE_CAC_WEIGHT_GL1C_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_1 0x1ba2
+#define regSE_CAC_WEIGHT_GL1C_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_GL1C_2 0x1ba3
+#define regSE_CAC_WEIGHT_GL1C_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_0 0x1ba4
+#define regSE_CAC_WEIGHT_SPI_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_1 0x1ba5
+#define regSE_CAC_WEIGHT_SPI_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SPI_2 0x1ba6
+#define regSE_CAC_WEIGHT_SPI_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PC_0 0x1ba7
+#define regSE_CAC_WEIGHT_PC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_0 0x1ba8
+#define regSE_CAC_WEIGHT_PA_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_1 0x1ba9
+#define regSE_CAC_WEIGHT_PA_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_2 0x1baa
+#define regSE_CAC_WEIGHT_PA_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_PA_3 0x1bab
+#define regSE_CAC_WEIGHT_PA_3_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_0 0x1bac
+#define regSE_CAC_WEIGHT_SC_0_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_1 0x1bad
+#define regSE_CAC_WEIGHT_SC_1_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_2 0x1bae
+#define regSE_CAC_WEIGHT_SC_2_BASE_IDX 1
+#define regSE_CAC_WEIGHT_SC_3 0x1baf
+#define regSE_CAC_WEIGHT_SC_3_BASE_IDX 1
+#define regSE_CAC_WINDOW_AGGR_VALUE 0x1bb0
+#define regSE_CAC_WINDOW_AGGR_VALUE_BASE_IDX 1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE 0x1bb1
+#define regSE_CAC_WINDOW_GFXCLK_CYCLE_BASE_IDX 1
+#define regSE_CAC_IND_INDEX 0x1bce
+#define regSE_CAC_IND_INDEX_BASE_IDX 1
+#define regSE_CAC_IND_DATA 0x1bcf
+#define regSE_CAC_IND_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_pfonly2_spidec
+// base address: 0x2f000
+#define regSPI_RESOURCE_RESERVE_CU_0 0x1c00
+#define regSPI_RESOURCE_RESERVE_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_1 0x1c01
+#define regSPI_RESOURCE_RESERVE_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_2 0x1c02
+#define regSPI_RESOURCE_RESERVE_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_3 0x1c03
+#define regSPI_RESOURCE_RESERVE_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_4 0x1c04
+#define regSPI_RESOURCE_RESERVE_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_5 0x1c05
+#define regSPI_RESOURCE_RESERVE_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_6 0x1c06
+#define regSPI_RESOURCE_RESERVE_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_7 0x1c07
+#define regSPI_RESOURCE_RESERVE_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_8 0x1c08
+#define regSPI_RESOURCE_RESERVE_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_9 0x1c09
+#define regSPI_RESOURCE_RESERVE_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_10 0x1c0a
+#define regSPI_RESOURCE_RESERVE_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_11 0x1c0b
+#define regSPI_RESOURCE_RESERVE_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_12 0x1c0c
+#define regSPI_RESOURCE_RESERVE_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_13 0x1c0d
+#define regSPI_RESOURCE_RESERVE_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_14 0x1c0e
+#define regSPI_RESOURCE_RESERVE_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_CU_15 0x1c0f
+#define regSPI_RESOURCE_RESERVE_CU_15_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_0 0x1c10
+#define regSPI_RESOURCE_RESERVE_EN_CU_0_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_1 0x1c11
+#define regSPI_RESOURCE_RESERVE_EN_CU_1_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_2 0x1c12
+#define regSPI_RESOURCE_RESERVE_EN_CU_2_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_3 0x1c13
+#define regSPI_RESOURCE_RESERVE_EN_CU_3_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_4 0x1c14
+#define regSPI_RESOURCE_RESERVE_EN_CU_4_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_5 0x1c15
+#define regSPI_RESOURCE_RESERVE_EN_CU_5_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_6 0x1c16
+#define regSPI_RESOURCE_RESERVE_EN_CU_6_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_7 0x1c17
+#define regSPI_RESOURCE_RESERVE_EN_CU_7_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_8 0x1c18
+#define regSPI_RESOURCE_RESERVE_EN_CU_8_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_9 0x1c19
+#define regSPI_RESOURCE_RESERVE_EN_CU_9_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_10 0x1c1a
+#define regSPI_RESOURCE_RESERVE_EN_CU_10_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_11 0x1c1b
+#define regSPI_RESOURCE_RESERVE_EN_CU_11_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_12 0x1c1c
+#define regSPI_RESOURCE_RESERVE_EN_CU_12_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_13 0x1c1d
+#define regSPI_RESOURCE_RESERVE_EN_CU_13_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_14 0x1c1e
+#define regSPI_RESOURCE_RESERVE_EN_CU_14_BASE_IDX 1
+#define regSPI_RESOURCE_RESERVE_EN_CU_15 0x1c1f
+#define regSPI_RESOURCE_RESERVE_EN_CU_15_BASE_IDX 1
+
+
+// addressBlock: gc_gfxudec
+// base address: 0x30000
+#define regCP_EOP_DONE_ADDR_LO 0x2000
+#define regCP_EOP_DONE_ADDR_LO_BASE_IDX 1
+#define regCP_EOP_DONE_ADDR_HI 0x2001
+#define regCP_EOP_DONE_ADDR_HI_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_LO 0x2002
+#define regCP_EOP_DONE_DATA_LO_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_HI 0x2003
+#define regCP_EOP_DONE_DATA_HI_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_LO 0x2004
+#define regCP_EOP_LAST_FENCE_LO_BASE_IDX 1
+#define regCP_EOP_LAST_FENCE_HI 0x2005
+#define regCP_EOP_LAST_FENCE_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_LO 0x2018
+#define regCP_PIPE_STATS_ADDR_LO_BASE_IDX 1
+#define regCP_PIPE_STATS_ADDR_HI 0x2019
+#define regCP_PIPE_STATS_ADDR_HI_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_LO 0x201a
+#define regCP_VGT_IAVERT_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAVERT_COUNT_HI 0x201b
+#define regCP_VGT_IAVERT_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_LO 0x201c
+#define regCP_VGT_IAPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_IAPRIM_COUNT_HI 0x201d
+#define regCP_VGT_IAPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_LO 0x201e
+#define regCP_VGT_GSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSPRIM_COUNT_HI 0x201f
+#define regCP_VGT_GSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_LO 0x2020
+#define regCP_VGT_VSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_VSINVOC_COUNT_HI 0x2021
+#define regCP_VGT_VSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_LO 0x2022
+#define regCP_VGT_GSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_GSINVOC_COUNT_HI 0x2023
+#define regCP_VGT_GSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_LO 0x2024
+#define regCP_VGT_HSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_HSINVOC_COUNT_HI 0x2025
+#define regCP_VGT_HSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_LO 0x2026
+#define regCP_VGT_DSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_DSINVOC_COUNT_HI 0x2027
+#define regCP_VGT_DSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_LO 0x2028
+#define regCP_PA_CINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CINVOC_COUNT_HI 0x2029
+#define regCP_PA_CINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_LO 0x202a
+#define regCP_PA_CPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_CPRIM_COUNT_HI 0x202b
+#define regCP_PA_CPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_LO 0x202c
+#define regCP_SC_PSINVOC_COUNT0_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT0_HI 0x202d
+#define regCP_SC_PSINVOC_COUNT0_HI_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_LO 0x202e
+#define regCP_SC_PSINVOC_COUNT1_LO_BASE_IDX 1
+#define regCP_SC_PSINVOC_COUNT1_HI 0x202f
+#define regCP_SC_PSINVOC_COUNT1_HI_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_LO 0x2030
+#define regCP_VGT_CSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_CSINVOC_COUNT_HI 0x2031
+#define regCP_VGT_CSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_LO 0x2032
+#define regCP_VGT_ASINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_VGT_ASINVOC_COUNT_HI 0x2033
+#define regCP_VGT_ASINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_PIPE_STATS_CONTROL 0x203d
+#define regCP_PIPE_STATS_CONTROL_BASE_IDX 1
+#define regSCRATCH_REG0 0x2040
+#define regSCRATCH_REG0_BASE_IDX 1
+#define regSCRATCH_REG1 0x2041
+#define regSCRATCH_REG1_BASE_IDX 1
+#define regSCRATCH_REG2 0x2042
+#define regSCRATCH_REG2_BASE_IDX 1
+#define regSCRATCH_REG3 0x2043
+#define regSCRATCH_REG3_BASE_IDX 1
+#define regSCRATCH_REG4 0x2044
+#define regSCRATCH_REG4_BASE_IDX 1
+#define regSCRATCH_REG5 0x2045
+#define regSCRATCH_REG5_BASE_IDX 1
+#define regSCRATCH_REG6 0x2046
+#define regSCRATCH_REG6_BASE_IDX 1
+#define regSCRATCH_REG7 0x2047
+#define regSCRATCH_REG7_BASE_IDX 1
+#define regSCRATCH_REG_ATOMIC 0x2048
+#define regSCRATCH_REG_ATOMIC_BASE_IDX 1
+#define regSCRATCH_REG_CMPSWAP_ATOMIC 0x2048
+#define regSCRATCH_REG_CMPSWAP_ATOMIC_BASE_IDX 1
+#define regCP_APPEND_DDID_CNT 0x204b
+#define regCP_APPEND_DDID_CNT_BASE_IDX 1
+#define regCP_APPEND_DATA_HI 0x204c
+#define regCP_APPEND_DATA_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_HI 0x204d
+#define regCP_APPEND_LAST_CS_FENCE_HI_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_HI 0x204e
+#define regCP_APPEND_LAST_PS_FENCE_HI_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_LO 0x2052
+#define regCP_PFP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_ATOMIC_PREOP_HI 0x2053
+#define regCP_PFP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO 0x2054
+#define regCP_PFP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI 0x2055
+#define regCP_PFP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO 0x2056
+#define regCP_PFP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI 0x2057
+#define regCP_PFP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_APPEND_ADDR_LO 0x2058
+#define regCP_APPEND_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_ADDR_HI 0x2059
+#define regCP_APPEND_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_DATA 0x205a
+#define regCP_APPEND_DATA_BASE_IDX 1
+#define regCP_APPEND_DATA_LO 0x205a
+#define regCP_APPEND_DATA_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_CS_FENCE_LO 0x205b
+#define regCP_APPEND_LAST_CS_FENCE_LO_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_BASE_IDX 1
+#define regCP_APPEND_LAST_PS_FENCE_LO 0x205c
+#define regCP_APPEND_LAST_PS_FENCE_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_LO 0x205d
+#define regCP_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_LO 0x205d
+#define regCP_ME_ATOMIC_PREOP_LO_BASE_IDX 1
+#define regCP_ATOMIC_PREOP_HI 0x205e
+#define regCP_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_ME_ATOMIC_PREOP_HI 0x205e
+#define regCP_ME_ATOMIC_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO 0x205f
+#define regCP_ME_GDS_ATOMIC0_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI 0x2060
+#define regCP_ME_GDS_ATOMIC0_PREOP_HI_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO 0x2061
+#define regCP_ME_GDS_ATOMIC1_PREOP_LO_BASE_IDX 1
+#define regCP_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI 0x2062
+#define regCP_ME_GDS_ATOMIC1_PREOP_HI_BASE_IDX 1
+#define regCP_ME_MC_WADDR_LO 0x2069
+#define regCP_ME_MC_WADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_WADDR_HI 0x206a
+#define regCP_ME_MC_WADDR_HI_BASE_IDX 1
+#define regCP_ME_MC_WDATA_LO 0x206b
+#define regCP_ME_MC_WDATA_LO_BASE_IDX 1
+#define regCP_ME_MC_WDATA_HI 0x206c
+#define regCP_ME_MC_WDATA_HI_BASE_IDX 1
+#define regCP_ME_MC_RADDR_LO 0x206d
+#define regCP_ME_MC_RADDR_LO_BASE_IDX 1
+#define regCP_ME_MC_RADDR_HI 0x206e
+#define regCP_ME_MC_RADDR_HI_BASE_IDX 1
+#define regCP_SEM_WAIT_TIMER 0x206f
+#define regCP_SEM_WAIT_TIMER_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_LO 0x2070
+#define regCP_SIG_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_SIG_SEM_ADDR_HI 0x2071
+#define regCP_SIG_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_WAIT_REG_MEM_TIMEOUT 0x2074
+#define regCP_WAIT_REG_MEM_TIMEOUT_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_LO 0x2075
+#define regCP_WAIT_SEM_ADDR_LO_BASE_IDX 1
+#define regCP_WAIT_SEM_ADDR_HI 0x2076
+#define regCP_WAIT_SEM_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CONTROL 0x2077
+#define regCP_DMA_PFP_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_CONTROL 0x2078
+#define regCP_DMA_ME_CONTROL_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR 0x2080
+#define regCP_DMA_ME_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_SRC_ADDR_HI 0x2081
+#define regCP_DMA_ME_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR 0x2082
+#define regCP_DMA_ME_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_ME_DST_ADDR_HI 0x2083
+#define regCP_DMA_ME_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_ME_COMMAND 0x2084
+#define regCP_DMA_ME_COMMAND_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR 0x2085
+#define regCP_DMA_PFP_SRC_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_SRC_ADDR_HI 0x2086
+#define regCP_DMA_PFP_SRC_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR 0x2087
+#define regCP_DMA_PFP_DST_ADDR_BASE_IDX 1
+#define regCP_DMA_PFP_DST_ADDR_HI 0x2088
+#define regCP_DMA_PFP_DST_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_COMMAND 0x2089
+#define regCP_DMA_PFP_COMMAND_BASE_IDX 1
+#define regCP_DMA_CNTL 0x208a
+#define regCP_DMA_CNTL_BASE_IDX 1
+#define regCP_DMA_READ_TAGS 0x208b
+#define regCP_DMA_READ_TAGS_BASE_IDX 1
+#define regCP_PFP_IB_CONTROL 0x208d
+#define regCP_PFP_IB_CONTROL_BASE_IDX 1
+#define regCP_PFP_LOAD_CONTROL 0x208e
+#define regCP_PFP_LOAD_CONTROL_BASE_IDX 1
+#define regCP_SCRATCH_INDEX 0x208f
+#define regCP_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_SCRATCH_DATA 0x2090
+#define regCP_SCRATCH_DATA_BASE_IDX 1
+#define regCP_RB_OFFSET 0x2091
+#define regCP_RB_OFFSET_BASE_IDX 1
+#define regCP_IB1_OFFSET 0x2092
+#define regCP_IB1_OFFSET_BASE_IDX 1
+#define regCP_IB2_OFFSET 0x2093
+#define regCP_IB2_OFFSET_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_BEGIN 0x2094
+#define regCP_IB1_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB1_PREAMBLE_END 0x2095
+#define regCP_IB1_PREAMBLE_END_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_BEGIN 0x2096
+#define regCP_IB2_PREAMBLE_BEGIN_BASE_IDX 1
+#define regCP_IB2_PREAMBLE_END 0x2097
+#define regCP_IB2_PREAMBLE_END_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_LO 0x209c
+#define regCP_DMA_ME_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_ME_CMD_ADDR_HI 0x209d
+#define regCP_DMA_ME_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_LO 0x209e
+#define regCP_DMA_PFP_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_DMA_PFP_CMD_ADDR_HI 0x209f
+#define regCP_DMA_PFP_CMD_ADDR_HI_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_LO 0x20a0
+#define regCP_APPEND_CMD_ADDR_LO_BASE_IDX 1
+#define regCP_APPEND_CMD_ADDR_HI 0x20a1
+#define regCP_APPEND_CMD_ADDR_HI_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG0 0x20a2
+#define regUCONFIG_RESERVED_REG0_BASE_IDX 1
+#define regUCONFIG_RESERVED_REG1 0x20a3
+#define regUCONFIG_RESERVED_REG1_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_LO 0x20a4
+#define regCP_PA_MSPRIM_COUNT_LO_BASE_IDX 1
+#define regCP_PA_MSPRIM_COUNT_HI 0x20a5
+#define regCP_PA_MSPRIM_COUNT_HI_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_LO 0x20a6
+#define regCP_GE_MSINVOC_COUNT_LO_BASE_IDX 1
+#define regCP_GE_MSINVOC_COUNT_HI 0x20a7
+#define regCP_GE_MSINVOC_COUNT_HI_BASE_IDX 1
+#define regCP_IB1_CMD_BUFSZ 0x20c0
+#define regCP_IB1_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB2_CMD_BUFSZ 0x20c1
+#define regCP_IB2_CMD_BUFSZ_BASE_IDX 1
+#define regCP_ST_CMD_BUFSZ 0x20c2
+#define regCP_ST_CMD_BUFSZ_BASE_IDX 1
+#define regCP_IB1_BASE_LO 0x20cc
+#define regCP_IB1_BASE_LO_BASE_IDX 1
+#define regCP_IB1_BASE_HI 0x20cd
+#define regCP_IB1_BASE_HI_BASE_IDX 1
+#define regCP_IB1_BUFSZ 0x20ce
+#define regCP_IB1_BUFSZ_BASE_IDX 1
+#define regCP_IB2_BASE_LO 0x20cf
+#define regCP_IB2_BASE_LO_BASE_IDX 1
+#define regCP_IB2_BASE_HI 0x20d0
+#define regCP_IB2_BASE_HI_BASE_IDX 1
+#define regCP_IB2_BUFSZ 0x20d1
+#define regCP_IB2_BUFSZ_BASE_IDX 1
+#define regCP_ST_BASE_LO 0x20d2
+#define regCP_ST_BASE_LO_BASE_IDX 1
+#define regCP_ST_BASE_HI 0x20d3
+#define regCP_ST_BASE_HI_BASE_IDX 1
+#define regCP_ST_BUFSZ 0x20d4
+#define regCP_ST_BUFSZ_BASE_IDX 1
+#define regCP_EOP_DONE_EVENT_CNTL 0x20d5
+#define regCP_EOP_DONE_EVENT_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_DATA_CNTL 0x20d6
+#define regCP_EOP_DONE_DATA_CNTL_BASE_IDX 1
+#define regCP_EOP_DONE_CNTX_ID 0x20d7
+#define regCP_EOP_DONE_CNTX_ID_BASE_IDX 1
+#define regCP_DB_BASE_LO 0x20d8
+#define regCP_DB_BASE_LO_BASE_IDX 1
+#define regCP_DB_BASE_HI 0x20d9
+#define regCP_DB_BASE_HI_BASE_IDX 1
+#define regCP_DB_BUFSZ 0x20da
+#define regCP_DB_BUFSZ_BASE_IDX 1
+#define regCP_DB_CMD_BUFSZ 0x20db
+#define regCP_DB_CMD_BUFSZ_BASE_IDX 1
+#define regCP_PFP_COMPLETION_STATUS 0x20ec
+#define regCP_PFP_COMPLETION_STATUS_BASE_IDX 1
+#define regCP_PRED_NOT_VISIBLE 0x20ee
+#define regCP_PRED_NOT_VISIBLE_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR 0x20f0
+#define regCP_PFP_METADATA_BASE_ADDR_BASE_IDX 1
+#define regCP_PFP_METADATA_BASE_ADDR_HI 0x20f1
+#define regCP_PFP_METADATA_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR 0x20f4
+#define regCP_DRAW_INDX_INDR_ADDR_BASE_IDX 1
+#define regCP_DRAW_INDX_INDR_ADDR_HI 0x20f5
+#define regCP_DRAW_INDX_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR 0x20f6
+#define regCP_DISPATCH_INDR_ADDR_BASE_IDX 1
+#define regCP_DISPATCH_INDR_ADDR_HI 0x20f7
+#define regCP_DISPATCH_INDR_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR 0x20f8
+#define regCP_INDEX_BASE_ADDR_BASE_IDX 1
+#define regCP_INDEX_BASE_ADDR_HI 0x20f9
+#define regCP_INDEX_BASE_ADDR_HI_BASE_IDX 1
+#define regCP_INDEX_TYPE 0x20fa
+#define regCP_INDEX_TYPE_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR 0x20fb
+#define regCP_GDS_BKUP_ADDR_BASE_IDX 1
+#define regCP_GDS_BKUP_ADDR_HI 0x20fc
+#define regCP_GDS_BKUP_ADDR_HI_BASE_IDX 1
+#define regCP_SAMPLE_STATUS 0x20fd
+#define regCP_SAMPLE_STATUS_BASE_IDX 1
+#define regCP_ME_COHER_CNTL 0x20fe
+#define regCP_ME_COHER_CNTL_BASE_IDX 1
+#define regCP_ME_COHER_SIZE 0x20ff
+#define regCP_ME_COHER_SIZE_BASE_IDX 1
+#define regCP_ME_COHER_SIZE_HI 0x2100
+#define regCP_ME_COHER_SIZE_HI_BASE_IDX 1
+#define regCP_ME_COHER_BASE 0x2101
+#define regCP_ME_COHER_BASE_BASE_IDX 1
+#define regCP_ME_COHER_BASE_HI 0x2102
+#define regCP_ME_COHER_BASE_HI_BASE_IDX 1
+#define regCP_ME_COHER_STATUS 0x2103
+#define regCP_ME_COHER_STATUS_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_0 0x2140
+#define regRLC_GPM_PERF_COUNT_0_BASE_IDX 1
+#define regRLC_GPM_PERF_COUNT_1 0x2141
+#define regRLC_GPM_PERF_COUNT_1_BASE_IDX 1
+#define regGRBM_GFX_INDEX 0x2200
+#define regGRBM_GFX_INDEX_BASE_IDX 1
+#define regVGT_PRIMITIVE_TYPE 0x2242
+#define regVGT_PRIMITIVE_TYPE_BASE_IDX 1
+#define regVGT_INDEX_TYPE 0x2243
+#define regVGT_INDEX_TYPE_BASE_IDX 1
+#define regGE_MIN_VTX_INDX 0x2249
+#define regGE_MIN_VTX_INDX_BASE_IDX 1
+#define regGE_INDX_OFFSET 0x224a
+#define regGE_INDX_OFFSET_BASE_IDX 1
+#define regGE_MULTI_PRIM_IB_RESET_EN 0x224b
+#define regGE_MULTI_PRIM_IB_RESET_EN_BASE_IDX 1
+#define regVGT_NUM_INDICES 0x224c
+#define regVGT_NUM_INDICES_BASE_IDX 1
+#define regVGT_NUM_INSTANCES 0x224d
+#define regVGT_NUM_INSTANCES_BASE_IDX 1
+#define regVGT_TF_RING_SIZE 0x224e
+#define regVGT_TF_RING_SIZE_BASE_IDX 1
+#define regVGT_HS_OFFCHIP_PARAM 0x224f
+#define regVGT_HS_OFFCHIP_PARAM_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE 0x2250
+#define regVGT_TF_MEMORY_BASE_BASE_IDX 1
+#define regGE_MAX_VTX_INDX 0x2259
+#define regGE_MAX_VTX_INDX_BASE_IDX 1
+#define regVGT_INSTANCE_BASE_ID 0x225a
+#define regVGT_INSTANCE_BASE_ID_BASE_IDX 1
+#define regGE_CNTL 0x225b
+#define regGE_CNTL_BASE_IDX 1
+#define regGE_USER_VGPR1 0x225c
+#define regGE_USER_VGPR1_BASE_IDX 1
+#define regGE_USER_VGPR2 0x225d
+#define regGE_USER_VGPR2_BASE_IDX 1
+#define regGE_USER_VGPR3 0x225e
+#define regGE_USER_VGPR3_BASE_IDX 1
+#define regGE_STEREO_CNTL 0x225f
+#define regGE_STEREO_CNTL_BASE_IDX 1
+#define regGE_PC_ALLOC 0x2260
+#define regGE_PC_ALLOC_BASE_IDX 1
+#define regVGT_TF_MEMORY_BASE_HI 0x2261
+#define regVGT_TF_MEMORY_BASE_HI_BASE_IDX 1
+#define regGE_USER_VGPR_EN 0x2262
+#define regGE_USER_VGPR_EN_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM 0x2264
+#define regGE_GS_FAST_LAUNCH_WG_DIM_BASE_IDX 1
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1 0x2265
+#define regGE_GS_FAST_LAUNCH_WG_DIM_1_BASE_IDX 1
+#define regVGT_GS_OUT_PRIM_TYPE 0x2266
+#define regVGT_GS_OUT_PRIM_TYPE_BASE_IDX 1
+#define regPA_SU_LINE_STIPPLE_VALUE 0x2280
+#define regPA_SU_LINE_STIPPLE_VALUE_BASE_IDX 1
+#define regPA_SC_LINE_STIPPLE_STATE 0x2281
+#define regPA_SC_LINE_STIPPLE_STATE_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_0 0x2284
+#define regPA_SC_SCREEN_EXTENT_MIN_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_0 0x2285
+#define regPA_SC_SCREEN_EXTENT_MAX_0_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MIN_1 0x2286
+#define regPA_SC_SCREEN_EXTENT_MIN_1_BASE_IDX 1
+#define regPA_SC_SCREEN_EXTENT_MAX_1 0x228b
+#define regPA_SC_SCREEN_EXTENT_MAX_1_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN 0x22a0
+#define regPA_SC_P3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_H 0x22a1
+#define regPA_SC_P3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_V 0x22a2
+#define regPA_SC_P3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE 0x22a3
+#define regPA_SC_P3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT 0x22a4
+#define regPA_SC_P3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN 0x22a8
+#define regPA_SC_HP3D_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_H 0x22a9
+#define regPA_SC_HP3D_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_V 0x22aa
+#define regPA_SC_HP3D_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE 0x22ab
+#define regPA_SC_HP3D_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT 0x22ac
+#define regPA_SC_HP3D_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_HV_EN 0x22b0
+#define regPA_SC_TRAP_SCREEN_HV_EN_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_H 0x22b1
+#define regPA_SC_TRAP_SCREEN_H_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_V 0x22b2
+#define regPA_SC_TRAP_SCREEN_V_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE 0x22b3
+#define regPA_SC_TRAP_SCREEN_OCCURRENCE_BASE_IDX 1
+#define regPA_SC_TRAP_SCREEN_COUNT 0x22b4
+#define regPA_SC_TRAP_SCREEN_COUNT_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_0 0x2340
+#define regSQ_THREAD_TRACE_USERDATA_0_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_1 0x2341
+#define regSQ_THREAD_TRACE_USERDATA_1_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_2 0x2342
+#define regSQ_THREAD_TRACE_USERDATA_2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_3 0x2343
+#define regSQ_THREAD_TRACE_USERDATA_3_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_4 0x2344
+#define regSQ_THREAD_TRACE_USERDATA_4_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_5 0x2345
+#define regSQ_THREAD_TRACE_USERDATA_5_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_6 0x2346
+#define regSQ_THREAD_TRACE_USERDATA_6_BASE_IDX 1
+#define regSQ_THREAD_TRACE_USERDATA_7 0x2347
+#define regSQ_THREAD_TRACE_USERDATA_7_BASE_IDX 1
+#define regSQC_CACHES 0x2348
+#define regSQC_CACHES_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR 0x2380
+#define regTA_CS_BC_BASE_ADDR_BASE_IDX 1
+#define regTA_CS_BC_BASE_ADDR_HI 0x2381
+#define regTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_LOW 0x23c0
+#define regDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT0_HI 0x23c1
+#define regDB_OCCLUSION_COUNT0_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_LOW 0x23c2
+#define regDB_OCCLUSION_COUNT1_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT1_HI 0x23c3
+#define regDB_OCCLUSION_COUNT1_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_LOW 0x23c4
+#define regDB_OCCLUSION_COUNT2_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT2_HI 0x23c5
+#define regDB_OCCLUSION_COUNT2_HI_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_LOW 0x23c6
+#define regDB_OCCLUSION_COUNT3_LOW_BASE_IDX 1
+#define regDB_OCCLUSION_COUNT3_HI 0x23c7
+#define regDB_OCCLUSION_COUNT3_HI_BASE_IDX 1
+#define regGDS_RD_ADDR 0x2400
+#define regGDS_RD_ADDR_BASE_IDX 1
+#define regGDS_RD_DATA 0x2401
+#define regGDS_RD_DATA_BASE_IDX 1
+#define regGDS_RD_BURST_ADDR 0x2402
+#define regGDS_RD_BURST_ADDR_BASE_IDX 1
+#define regGDS_RD_BURST_COUNT 0x2403
+#define regGDS_RD_BURST_COUNT_BASE_IDX 1
+#define regGDS_RD_BURST_DATA 0x2404
+#define regGDS_RD_BURST_DATA_BASE_IDX 1
+#define regGDS_WR_ADDR 0x2405
+#define regGDS_WR_ADDR_BASE_IDX 1
+#define regGDS_WR_DATA 0x2406
+#define regGDS_WR_DATA_BASE_IDX 1
+#define regGDS_WR_BURST_ADDR 0x2407
+#define regGDS_WR_BURST_ADDR_BASE_IDX 1
+#define regGDS_WR_BURST_DATA 0x2408
+#define regGDS_WR_BURST_DATA_BASE_IDX 1
+#define regGDS_WRITE_COMPLETE 0x2409
+#define regGDS_WRITE_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_CNTL 0x240a
+#define regGDS_ATOM_CNTL_BASE_IDX 1
+#define regGDS_ATOM_COMPLETE 0x240b
+#define regGDS_ATOM_COMPLETE_BASE_IDX 1
+#define regGDS_ATOM_BASE 0x240c
+#define regGDS_ATOM_BASE_BASE_IDX 1
+#define regGDS_ATOM_SIZE 0x240d
+#define regGDS_ATOM_SIZE_BASE_IDX 1
+#define regGDS_ATOM_OFFSET0 0x240e
+#define regGDS_ATOM_OFFSET0_BASE_IDX 1
+#define regGDS_ATOM_OFFSET1 0x240f
+#define regGDS_ATOM_OFFSET1_BASE_IDX 1
+#define regGDS_ATOM_DST 0x2410
+#define regGDS_ATOM_DST_BASE_IDX 1
+#define regGDS_ATOM_OP 0x2411
+#define regGDS_ATOM_OP_BASE_IDX 1
+#define regGDS_ATOM_SRC0 0x2412
+#define regGDS_ATOM_SRC0_BASE_IDX 1
+#define regGDS_ATOM_SRC0_U 0x2413
+#define regGDS_ATOM_SRC0_U_BASE_IDX 1
+#define regGDS_ATOM_SRC1 0x2414
+#define regGDS_ATOM_SRC1_BASE_IDX 1
+#define regGDS_ATOM_SRC1_U 0x2415
+#define regGDS_ATOM_SRC1_U_BASE_IDX 1
+#define regGDS_ATOM_READ0 0x2416
+#define regGDS_ATOM_READ0_BASE_IDX 1
+#define regGDS_ATOM_READ0_U 0x2417
+#define regGDS_ATOM_READ0_U_BASE_IDX 1
+#define regGDS_ATOM_READ1 0x2418
+#define regGDS_ATOM_READ1_BASE_IDX 1
+#define regGDS_ATOM_READ1_U 0x2419
+#define regGDS_ATOM_READ1_U_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNTL 0x241a
+#define regGDS_GWS_RESOURCE_CNTL_BASE_IDX 1
+#define regGDS_GWS_RESOURCE 0x241b
+#define regGDS_GWS_RESOURCE_BASE_IDX 1
+#define regGDS_GWS_RESOURCE_CNT 0x241c
+#define regGDS_GWS_RESOURCE_CNT_BASE_IDX 1
+#define regGDS_OA_CNTL 0x241d
+#define regGDS_OA_CNTL_BASE_IDX 1
+#define regGDS_OA_COUNTER 0x241e
+#define regGDS_OA_COUNTER_BASE_IDX 1
+#define regGDS_OA_ADDRESS 0x241f
+#define regGDS_OA_ADDRESS_BASE_IDX 1
+#define regGDS_OA_INCDEC 0x2420
+#define regGDS_OA_INCDEC_BASE_IDX 1
+#define regGDS_OA_RING_SIZE 0x2421
+#define regGDS_OA_RING_SIZE_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0 0x2422
+#define regGDS_STRMOUT_DWORDS_WRITTEN_0_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1 0x2423
+#define regGDS_STRMOUT_DWORDS_WRITTEN_1_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2 0x2424
+#define regGDS_STRMOUT_DWORDS_WRITTEN_2_BASE_IDX 1
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3 0x2425
+#define regGDS_STRMOUT_DWORDS_WRITTEN_3_BASE_IDX 1
+#define regGDS_GS_0 0x2426
+#define regGDS_GS_0_BASE_IDX 1
+#define regGDS_GS_1 0x2427
+#define regGDS_GS_1_BASE_IDX 1
+#define regGDS_GS_2 0x2428
+#define regGDS_GS_2_BASE_IDX 1
+#define regGDS_GS_3 0x2429
+#define regGDS_GS_3_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO 0x242a
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI 0x242b
+#define regGDS_STRMOUT_PRIMS_NEEDED_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO 0x242c
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI 0x242d
+#define regGDS_STRMOUT_PRIMS_WRITTEN_0_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO 0x242e
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI 0x242f
+#define regGDS_STRMOUT_PRIMS_NEEDED_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO 0x2430
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI 0x2431
+#define regGDS_STRMOUT_PRIMS_WRITTEN_1_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO 0x2432
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI 0x2433
+#define regGDS_STRMOUT_PRIMS_NEEDED_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO 0x2434
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI 0x2435
+#define regGDS_STRMOUT_PRIMS_WRITTEN_2_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO 0x2436
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI 0x2437
+#define regGDS_STRMOUT_PRIMS_NEEDED_3_HI_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO 0x2438
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_LO_BASE_IDX 1
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI 0x2439
+#define regGDS_STRMOUT_PRIMS_WRITTEN_3_HI_BASE_IDX 1
+#define regSPI_CONFIG_CNTL 0x2440
+#define regSPI_CONFIG_CNTL_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_1 0x2441
+#define regSPI_CONFIG_CNTL_1_BASE_IDX 1
+#define regSPI_CONFIG_CNTL_2 0x2442
+#define regSPI_CONFIG_CNTL_2_BASE_IDX 1
+#define regSPI_WAVE_LIMIT_CNTL 0x2443
+#define regSPI_WAVE_LIMIT_CNTL_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL1 0x2444
+#define regSPI_GS_THROTTLE_CNTL1_BASE_IDX 1
+#define regSPI_GS_THROTTLE_CNTL2 0x2445
+#define regSPI_GS_THROTTLE_CNTL2_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_BASE 0x2446
+#define regSPI_ATTRIBUTE_RING_BASE_BASE_IDX 1
+#define regSPI_ATTRIBUTE_RING_SIZE 0x2447
+#define regSPI_ATTRIBUTE_RING_SIZE_BASE_IDX 1
+
+
+// addressBlock: gc_cprs64dec
+// base address: 0x32000
+#define regCP_MES_PRGRM_CNTR_START 0x2800
+#define regCP_MES_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START 0x2801
+#define regCP_MES_INTR_ROUTINE_START_BASE_IDX 1
+#define regCP_MES_MTVEC_LO 0x2801
+#define regCP_MES_MTVEC_LO_BASE_IDX 1
+#define regCP_MES_INTR_ROUTINE_START_HI 0x2802
+#define regCP_MES_INTR_ROUTINE_START_HI_BASE_IDX 1
+#define regCP_MES_MTVEC_HI 0x2802
+#define regCP_MES_MTVEC_HI_BASE_IDX 1
+#define regCP_MES_CNTL 0x2807
+#define regCP_MES_CNTL_BASE_IDX 1
+#define regCP_MES_PIPE_PRIORITY_CNTS 0x2808
+#define regCP_MES_PIPE_PRIORITY_CNTS_BASE_IDX 1
+#define regCP_MES_PIPE0_PRIORITY 0x2809
+#define regCP_MES_PIPE0_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE1_PRIORITY 0x280a
+#define regCP_MES_PIPE1_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE2_PRIORITY 0x280b
+#define regCP_MES_PIPE2_PRIORITY_BASE_IDX 1
+#define regCP_MES_PIPE3_PRIORITY 0x280c
+#define regCP_MES_PIPE3_PRIORITY_BASE_IDX 1
+#define regCP_MES_HEADER_DUMP 0x280d
+#define regCP_MES_HEADER_DUMP_BASE_IDX 1
+#define regCP_MES_MIE_LO 0x280e
+#define regCP_MES_MIE_LO_BASE_IDX 1
+#define regCP_MES_MIE_HI 0x280f
+#define regCP_MES_MIE_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT 0x2810
+#define regCP_MES_INTERRUPT_BASE_IDX 1
+#define regCP_MES_SCRATCH_INDEX 0x2811
+#define regCP_MES_SCRATCH_INDEX_BASE_IDX 1
+#define regCP_MES_SCRATCH_DATA 0x2812
+#define regCP_MES_SCRATCH_DATA_BASE_IDX 1
+#define regCP_MES_INSTR_PNTR 0x2813
+#define regCP_MES_INSTR_PNTR_BASE_IDX 1
+#define regCP_MES_MSCRATCH_HI 0x2814
+#define regCP_MES_MSCRATCH_HI_BASE_IDX 1
+#define regCP_MES_MSCRATCH_LO 0x2815
+#define regCP_MES_MSCRATCH_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_LO 0x2816
+#define regCP_MES_MSTATUS_LO_BASE_IDX 1
+#define regCP_MES_MSTATUS_HI 0x2817
+#define regCP_MES_MSTATUS_HI_BASE_IDX 1
+#define regCP_MES_MEPC_LO 0x2818
+#define regCP_MES_MEPC_LO_BASE_IDX 1
+#define regCP_MES_MEPC_HI 0x2819
+#define regCP_MES_MEPC_HI_BASE_IDX 1
+#define regCP_MES_MCAUSE_LO 0x281a
+#define regCP_MES_MCAUSE_LO_BASE_IDX 1
+#define regCP_MES_MCAUSE_HI 0x281b
+#define regCP_MES_MCAUSE_HI_BASE_IDX 1
+#define regCP_MES_MBADADDR_LO 0x281c
+#define regCP_MES_MBADADDR_LO_BASE_IDX 1
+#define regCP_MES_MBADADDR_HI 0x281d
+#define regCP_MES_MBADADDR_HI_BASE_IDX 1
+#define regCP_MES_MIP_LO 0x281e
+#define regCP_MES_MIP_LO_BASE_IDX 1
+#define regCP_MES_MIP_HI 0x281f
+#define regCP_MES_MIP_HI_BASE_IDX 1
+#define regCP_MES_IC_OP_CNTL 0x2820
+#define regCP_MES_IC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MCYCLE_LO 0x2826
+#define regCP_MES_MCYCLE_LO_BASE_IDX 1
+#define regCP_MES_MCYCLE_HI 0x2827
+#define regCP_MES_MCYCLE_HI_BASE_IDX 1
+#define regCP_MES_MTIME_LO 0x2828
+#define regCP_MES_MTIME_LO_BASE_IDX 1
+#define regCP_MES_MTIME_HI 0x2829
+#define regCP_MES_MTIME_HI_BASE_IDX 1
+#define regCP_MES_MINSTRET_LO 0x282a
+#define regCP_MES_MINSTRET_LO_BASE_IDX 1
+#define regCP_MES_MINSTRET_HI 0x282b
+#define regCP_MES_MINSTRET_HI_BASE_IDX 1
+#define regCP_MES_MISA_LO 0x282c
+#define regCP_MES_MISA_LO_BASE_IDX 1
+#define regCP_MES_MISA_HI 0x282d
+#define regCP_MES_MISA_HI_BASE_IDX 1
+#define regCP_MES_MVENDORID_LO 0x282e
+#define regCP_MES_MVENDORID_LO_BASE_IDX 1
+#define regCP_MES_MVENDORID_HI 0x282f
+#define regCP_MES_MVENDORID_HI_BASE_IDX 1
+#define regCP_MES_MARCHID_LO 0x2830
+#define regCP_MES_MARCHID_LO_BASE_IDX 1
+#define regCP_MES_MARCHID_HI 0x2831
+#define regCP_MES_MARCHID_HI_BASE_IDX 1
+#define regCP_MES_MIMPID_LO 0x2832
+#define regCP_MES_MIMPID_LO_BASE_IDX 1
+#define regCP_MES_MIMPID_HI 0x2833
+#define regCP_MES_MIMPID_HI_BASE_IDX 1
+#define regCP_MES_MHARTID_LO 0x2834
+#define regCP_MES_MHARTID_LO_BASE_IDX 1
+#define regCP_MES_MHARTID_HI 0x2835
+#define regCP_MES_MHARTID_HI_BASE_IDX 1
+#define regCP_MES_DC_BASE_CNTL 0x2836
+#define regCP_MES_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_OP_CNTL 0x2837
+#define regCP_MES_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MES_MTIMECMP_LO 0x2838
+#define regCP_MES_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MES_MTIMECMP_HI 0x2839
+#define regCP_MES_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE0 0x283a
+#define regCP_MES_PROCESS_QUANTUM_PIPE0_BASE_IDX 1
+#define regCP_MES_PROCESS_QUANTUM_PIPE1 0x283b
+#define regCP_MES_PROCESS_QUANTUM_PIPE1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL1 0x283c
+#define regCP_MES_DOORBELL_CONTROL1_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL2 0x283d
+#define regCP_MES_DOORBELL_CONTROL2_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL3 0x283e
+#define regCP_MES_DOORBELL_CONTROL3_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL4 0x283f
+#define regCP_MES_DOORBELL_CONTROL4_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL5 0x2840
+#define regCP_MES_DOORBELL_CONTROL5_BASE_IDX 1
+#define regCP_MES_DOORBELL_CONTROL6 0x2841
+#define regCP_MES_DOORBELL_CONTROL6_BASE_IDX 1
+#define regCP_MES_GP0_LO 0x2843
+#define regCP_MES_GP0_LO_BASE_IDX 1
+#define regCP_MES_GP0_HI 0x2844
+#define regCP_MES_GP0_HI_BASE_IDX 1
+#define regCP_MES_GP1_LO 0x2845
+#define regCP_MES_GP1_LO_BASE_IDX 1
+#define regCP_MES_GP1_HI 0x2846
+#define regCP_MES_GP1_HI_BASE_IDX 1
+#define regCP_MES_GP2_LO 0x2847
+#define regCP_MES_GP2_LO_BASE_IDX 1
+#define regCP_MES_GP2_HI 0x2848
+#define regCP_MES_GP2_HI_BASE_IDX 1
+#define regCP_MES_GP3_LO 0x2849
+#define regCP_MES_GP3_LO_BASE_IDX 1
+#define regCP_MES_GP3_HI 0x284a
+#define regCP_MES_GP3_HI_BASE_IDX 1
+#define regCP_MES_GP4_LO 0x284b
+#define regCP_MES_GP4_LO_BASE_IDX 1
+#define regCP_MES_GP4_HI 0x284c
+#define regCP_MES_GP4_HI_BASE_IDX 1
+#define regCP_MES_GP5_LO 0x284d
+#define regCP_MES_GP5_LO_BASE_IDX 1
+#define regCP_MES_GP5_HI 0x284e
+#define regCP_MES_GP5_HI_BASE_IDX 1
+#define regCP_MES_GP6_LO 0x284f
+#define regCP_MES_GP6_LO_BASE_IDX 1
+#define regCP_MES_GP6_HI 0x2850
+#define regCP_MES_GP6_HI_BASE_IDX 1
+#define regCP_MES_GP7_LO 0x2851
+#define regCP_MES_GP7_LO_BASE_IDX 1
+#define regCP_MES_GP7_HI 0x2852
+#define regCP_MES_GP7_HI_BASE_IDX 1
+#define regCP_MES_GP8_LO 0x2853
+#define regCP_MES_GP8_LO_BASE_IDX 1
+#define regCP_MES_GP8_HI 0x2854
+#define regCP_MES_GP8_HI_BASE_IDX 1
+#define regCP_MES_GP9_LO 0x2855
+#define regCP_MES_GP9_LO_BASE_IDX 1
+#define regCP_MES_GP9_HI 0x2856
+#define regCP_MES_GP9_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_LO 0x2883
+#define regCP_MES_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_BASE0_HI 0x2884
+#define regCP_MES_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_LO 0x2885
+#define regCP_MES_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_MASK0_HI 0x2886
+#define regCP_MES_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_APERTURE 0x2887
+#define regCP_MES_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_LO 0x2888
+#define regCP_MES_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_BASE_HI 0x2889
+#define regCP_MES_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_LO 0x288a
+#define regCP_MES_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_MASK_HI 0x288b
+#define regCP_MES_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MES_LOCAL_INSTR_APERTURE 0x288c
+#define regCP_MES_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_APERTURE 0x288d
+#define regCP_MES_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO 0x288e
+#define regCP_MES_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI 0x288f
+#define regCP_MES_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MES_PERFCOUNT_CNTL 0x2899
+#define regCP_MES_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MES_PENDING_INTERRUPT 0x289a
+#define regCP_MES_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MES_PRGRM_CNTR_START_HI 0x289d
+#define regCP_MES_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_16 0x289f
+#define regCP_MES_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_17 0x28a0
+#define regCP_MES_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_18 0x28a1
+#define regCP_MES_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_19 0x28a2
+#define regCP_MES_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_20 0x28a3
+#define regCP_MES_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_21 0x28a4
+#define regCP_MES_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_22 0x28a5
+#define regCP_MES_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_23 0x28a6
+#define regCP_MES_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_24 0x28a7
+#define regCP_MES_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_25 0x28a8
+#define regCP_MES_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_26 0x28a9
+#define regCP_MES_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_27 0x28aa
+#define regCP_MES_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_28 0x28ab
+#define regCP_MES_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_29 0x28ac
+#define regCP_MES_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_30 0x28ad
+#define regCP_MES_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MES_INTERRUPT_DATA_31 0x28ae
+#define regCP_MES_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_BASE 0x28af
+#define regCP_MES_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_MASK 0x28b0
+#define regCP_MES_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE0_CNTL 0x28b1
+#define regCP_MES_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_BASE 0x28b2
+#define regCP_MES_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_MASK 0x28b3
+#define regCP_MES_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE1_CNTL 0x28b4
+#define regCP_MES_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_BASE 0x28b5
+#define regCP_MES_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_MASK 0x28b6
+#define regCP_MES_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE2_CNTL 0x28b7
+#define regCP_MES_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_BASE 0x28b8
+#define regCP_MES_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_MASK 0x28b9
+#define regCP_MES_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE3_CNTL 0x28ba
+#define regCP_MES_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_BASE 0x28bb
+#define regCP_MES_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_MASK 0x28bc
+#define regCP_MES_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE4_CNTL 0x28bd
+#define regCP_MES_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_BASE 0x28be
+#define regCP_MES_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_MASK 0x28bf
+#define regCP_MES_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE5_CNTL 0x28c0
+#define regCP_MES_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_BASE 0x28c1
+#define regCP_MES_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_MASK 0x28c2
+#define regCP_MES_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE6_CNTL 0x28c3
+#define regCP_MES_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_BASE 0x28c4
+#define regCP_MES_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_MASK 0x28c5
+#define regCP_MES_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE7_CNTL 0x28c6
+#define regCP_MES_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_BASE 0x28c7
+#define regCP_MES_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_MASK 0x28c8
+#define regCP_MES_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE8_CNTL 0x28c9
+#define regCP_MES_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_BASE 0x28ca
+#define regCP_MES_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_MASK 0x28cb
+#define regCP_MES_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE9_CNTL 0x28cc
+#define regCP_MES_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_BASE 0x28cd
+#define regCP_MES_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_MASK 0x28ce
+#define regCP_MES_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE10_CNTL 0x28cf
+#define regCP_MES_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_BASE 0x28d0
+#define regCP_MES_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_MASK 0x28d1
+#define regCP_MES_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE11_CNTL 0x28d2
+#define regCP_MES_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_BASE 0x28d3
+#define regCP_MES_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_MASK 0x28d4
+#define regCP_MES_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE12_CNTL 0x28d5
+#define regCP_MES_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_BASE 0x28d6
+#define regCP_MES_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_MASK 0x28d7
+#define regCP_MES_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE13_CNTL 0x28d8
+#define regCP_MES_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_BASE 0x28d9
+#define regCP_MES_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_MASK 0x28da
+#define regCP_MES_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE14_CNTL 0x28db
+#define regCP_MES_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_BASE 0x28dc
+#define regCP_MES_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_MASK 0x28dd
+#define regCP_MES_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MES_DC_APERTURE15_CNTL 0x28de
+#define regCP_MES_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START 0x2900
+#define regCP_MEC_RS64_PRGRM_CNTR_START_BASE_IDX 1
+#define regCP_MEC_MTVEC_LO 0x2901
+#define regCP_MEC_MTVEC_LO_BASE_IDX 1
+#define regCP_MEC_MTVEC_HI 0x2902
+#define regCP_MEC_MTVEC_HI_BASE_IDX 1
+#define regCP_MEC_ISA_CNTL 0x2903
+#define regCP_MEC_ISA_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_CNTL 0x2904
+#define regCP_MEC_RS64_CNTL_BASE_IDX 1
+#define regCP_MEC_MIE_LO 0x2905
+#define regCP_MEC_MIE_LO_BASE_IDX 1
+#define regCP_MEC_MIE_HI 0x2906
+#define regCP_MEC_MIE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT 0x2907
+#define regCP_MEC_RS64_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_INSTR_PNTR 0x2908
+#define regCP_MEC_RS64_INSTR_PNTR_BASE_IDX 1
+#define regCP_MEC_MIP_LO 0x2909
+#define regCP_MEC_MIP_LO_BASE_IDX 1
+#define regCP_MEC_MIP_HI 0x290a
+#define regCP_MEC_MIP_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_CNTL 0x290b
+#define regCP_MEC_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_OP_CNTL 0x290c
+#define regCP_MEC_DC_OP_CNTL_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_LO 0x290d
+#define regCP_MEC_MTIMECMP_LO_BASE_IDX 1
+#define regCP_MEC_MTIMECMP_HI 0x290e
+#define regCP_MEC_MTIMECMP_HI_BASE_IDX 1
+#define regCP_MEC_GP0_LO 0x2910
+#define regCP_MEC_GP0_LO_BASE_IDX 1
+#define regCP_MEC_GP0_HI 0x2911
+#define regCP_MEC_GP0_HI_BASE_IDX 1
+#define regCP_MEC_GP1_LO 0x2912
+#define regCP_MEC_GP1_LO_BASE_IDX 1
+#define regCP_MEC_GP1_HI 0x2913
+#define regCP_MEC_GP1_HI_BASE_IDX 1
+#define regCP_MEC_GP2_LO 0x2914
+#define regCP_MEC_GP2_LO_BASE_IDX 1
+#define regCP_MEC_GP2_HI 0x2915
+#define regCP_MEC_GP2_HI_BASE_IDX 1
+#define regCP_MEC_GP3_LO 0x2916
+#define regCP_MEC_GP3_LO_BASE_IDX 1
+#define regCP_MEC_GP3_HI 0x2917
+#define regCP_MEC_GP3_HI_BASE_IDX 1
+#define regCP_MEC_GP4_LO 0x2918
+#define regCP_MEC_GP4_LO_BASE_IDX 1
+#define regCP_MEC_GP4_HI 0x2919
+#define regCP_MEC_GP4_HI_BASE_IDX 1
+#define regCP_MEC_GP5_LO 0x291a
+#define regCP_MEC_GP5_LO_BASE_IDX 1
+#define regCP_MEC_GP5_HI 0x291b
+#define regCP_MEC_GP5_HI_BASE_IDX 1
+#define regCP_MEC_GP6_LO 0x291c
+#define regCP_MEC_GP6_LO_BASE_IDX 1
+#define regCP_MEC_GP6_HI 0x291d
+#define regCP_MEC_GP6_HI_BASE_IDX 1
+#define regCP_MEC_GP7_LO 0x291e
+#define regCP_MEC_GP7_LO_BASE_IDX 1
+#define regCP_MEC_GP7_HI 0x291f
+#define regCP_MEC_GP7_HI_BASE_IDX 1
+#define regCP_MEC_GP8_LO 0x2920
+#define regCP_MEC_GP8_LO_BASE_IDX 1
+#define regCP_MEC_GP8_HI 0x2921
+#define regCP_MEC_GP8_HI_BASE_IDX 1
+#define regCP_MEC_GP9_LO 0x2922
+#define regCP_MEC_GP9_LO_BASE_IDX 1
+#define regCP_MEC_GP9_HI 0x2923
+#define regCP_MEC_GP9_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_LO 0x2927
+#define regCP_MEC_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_BASE0_HI 0x2928
+#define regCP_MEC_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_LO 0x2929
+#define regCP_MEC_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_MASK0_HI 0x292a
+#define regCP_MEC_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_APERTURE 0x292b
+#define regCP_MEC_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_LO 0x292c
+#define regCP_MEC_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_BASE_HI 0x292d
+#define regCP_MEC_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_LO 0x292e
+#define regCP_MEC_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_MASK_HI 0x292f
+#define regCP_MEC_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_MEC_LOCAL_INSTR_APERTURE 0x2930
+#define regCP_MEC_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE 0x2931
+#define regCP_MEC_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO 0x2932
+#define regCP_MEC_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI 0x2933
+#define regCP_MEC_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_MEC_RS64_PERFCOUNT_CNTL 0x2934
+#define regCP_MEC_RS64_PERFCOUNT_CNTL_BASE_IDX 1
+#define regCP_MEC_RS64_PENDING_INTERRUPT 0x2935
+#define regCP_MEC_RS64_PENDING_INTERRUPT_BASE_IDX 1
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI 0x2938
+#define regCP_MEC_RS64_PRGRM_CNTR_START_HI_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_16 0x293a
+#define regCP_MEC_RS64_INTERRUPT_DATA_16_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_17 0x293b
+#define regCP_MEC_RS64_INTERRUPT_DATA_17_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_18 0x293c
+#define regCP_MEC_RS64_INTERRUPT_DATA_18_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_19 0x293d
+#define regCP_MEC_RS64_INTERRUPT_DATA_19_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_20 0x293e
+#define regCP_MEC_RS64_INTERRUPT_DATA_20_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_21 0x293f
+#define regCP_MEC_RS64_INTERRUPT_DATA_21_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_22 0x2940
+#define regCP_MEC_RS64_INTERRUPT_DATA_22_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_23 0x2941
+#define regCP_MEC_RS64_INTERRUPT_DATA_23_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_24 0x2942
+#define regCP_MEC_RS64_INTERRUPT_DATA_24_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_25 0x2943
+#define regCP_MEC_RS64_INTERRUPT_DATA_25_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_26 0x2944
+#define regCP_MEC_RS64_INTERRUPT_DATA_26_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_27 0x2945
+#define regCP_MEC_RS64_INTERRUPT_DATA_27_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_28 0x2946
+#define regCP_MEC_RS64_INTERRUPT_DATA_28_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_29 0x2947
+#define regCP_MEC_RS64_INTERRUPT_DATA_29_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_30 0x2948
+#define regCP_MEC_RS64_INTERRUPT_DATA_30_BASE_IDX 1
+#define regCP_MEC_RS64_INTERRUPT_DATA_31 0x2949
+#define regCP_MEC_RS64_INTERRUPT_DATA_31_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_BASE 0x294a
+#define regCP_MEC_DC_APERTURE0_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_MASK 0x294b
+#define regCP_MEC_DC_APERTURE0_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE0_CNTL 0x294c
+#define regCP_MEC_DC_APERTURE0_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_BASE 0x294d
+#define regCP_MEC_DC_APERTURE1_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_MASK 0x294e
+#define regCP_MEC_DC_APERTURE1_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE1_CNTL 0x294f
+#define regCP_MEC_DC_APERTURE1_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_BASE 0x2950
+#define regCP_MEC_DC_APERTURE2_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_MASK 0x2951
+#define regCP_MEC_DC_APERTURE2_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE2_CNTL 0x2952
+#define regCP_MEC_DC_APERTURE2_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_BASE 0x2953
+#define regCP_MEC_DC_APERTURE3_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_MASK 0x2954
+#define regCP_MEC_DC_APERTURE3_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE3_CNTL 0x2955
+#define regCP_MEC_DC_APERTURE3_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_BASE 0x2956
+#define regCP_MEC_DC_APERTURE4_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_MASK 0x2957
+#define regCP_MEC_DC_APERTURE4_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE4_CNTL 0x2958
+#define regCP_MEC_DC_APERTURE4_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_BASE 0x2959
+#define regCP_MEC_DC_APERTURE5_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_MASK 0x295a
+#define regCP_MEC_DC_APERTURE5_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE5_CNTL 0x295b
+#define regCP_MEC_DC_APERTURE5_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_BASE 0x295c
+#define regCP_MEC_DC_APERTURE6_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_MASK 0x295d
+#define regCP_MEC_DC_APERTURE6_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE6_CNTL 0x295e
+#define regCP_MEC_DC_APERTURE6_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_BASE 0x295f
+#define regCP_MEC_DC_APERTURE7_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_MASK 0x2960
+#define regCP_MEC_DC_APERTURE7_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE7_CNTL 0x2961
+#define regCP_MEC_DC_APERTURE7_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_BASE 0x2962
+#define regCP_MEC_DC_APERTURE8_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_MASK 0x2963
+#define regCP_MEC_DC_APERTURE8_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE8_CNTL 0x2964
+#define regCP_MEC_DC_APERTURE8_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_BASE 0x2965
+#define regCP_MEC_DC_APERTURE9_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_MASK 0x2966
+#define regCP_MEC_DC_APERTURE9_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE9_CNTL 0x2967
+#define regCP_MEC_DC_APERTURE9_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_BASE 0x2968
+#define regCP_MEC_DC_APERTURE10_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_MASK 0x2969
+#define regCP_MEC_DC_APERTURE10_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE10_CNTL 0x296a
+#define regCP_MEC_DC_APERTURE10_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_BASE 0x296b
+#define regCP_MEC_DC_APERTURE11_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_MASK 0x296c
+#define regCP_MEC_DC_APERTURE11_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE11_CNTL 0x296d
+#define regCP_MEC_DC_APERTURE11_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_BASE 0x296e
+#define regCP_MEC_DC_APERTURE12_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_MASK 0x296f
+#define regCP_MEC_DC_APERTURE12_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE12_CNTL 0x2970
+#define regCP_MEC_DC_APERTURE12_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_BASE 0x2971
+#define regCP_MEC_DC_APERTURE13_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_MASK 0x2972
+#define regCP_MEC_DC_APERTURE13_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE13_CNTL 0x2973
+#define regCP_MEC_DC_APERTURE13_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_BASE 0x2974
+#define regCP_MEC_DC_APERTURE14_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_MASK 0x2975
+#define regCP_MEC_DC_APERTURE14_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE14_CNTL 0x2976
+#define regCP_MEC_DC_APERTURE14_CNTL_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_BASE 0x2977
+#define regCP_MEC_DC_APERTURE15_BASE_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_MASK 0x2978
+#define regCP_MEC_DC_APERTURE15_MASK_BASE_IDX 1
+#define regCP_MEC_DC_APERTURE15_CNTL 0x2979
+#define regCP_MEC_DC_APERTURE15_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_OP_CNTL 0x297a
+#define regCP_CPC_IC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_CNTL 0x2a00
+#define regCP_GFX_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT0 0x2a01
+#define regCP_GFX_RS64_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN0 0x2a02
+#define regCP_GFX_RS64_INTR_EN0_BASE_IDX 1
+#define regCP_GFX_RS64_INTR_EN1 0x2a03
+#define regCP_GFX_RS64_INTR_EN1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE_CNTL 0x2a08
+#define regCP_GFX_RS64_DC_BASE_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_DC_OP_CNTL 0x2a09
+#define regCP_GFX_RS64_DC_OP_CNTL_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_LO 0x2a0a
+#define regCP_GFX_RS64_LOCAL_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_BASE0_HI 0x2a0b
+#define regCP_GFX_RS64_LOCAL_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_LO 0x2a0c
+#define regCP_GFX_RS64_LOCAL_MASK0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_MASK0_HI 0x2a0d
+#define regCP_GFX_RS64_LOCAL_MASK0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_APERTURE 0x2a0e
+#define regCP_GFX_RS64_LOCAL_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO 0x2a0f
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI 0x2a10
+#define regCP_GFX_RS64_LOCAL_INSTR_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO 0x2a11
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI 0x2a12
+#define regCP_GFX_RS64_LOCAL_INSTR_MASK_HI_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE 0x2a13
+#define regCP_GFX_RS64_LOCAL_INSTR_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE 0x2a14
+#define regCP_GFX_RS64_LOCAL_SCRATCH_APERTURE_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO 0x2a15
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_LO_BASE_IDX 1
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI 0x2a16
+#define regCP_GFX_RS64_LOCAL_SCRATCH_BASE_HI_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0 0x2a1a
+#define regCP_GFX_RS64_PERFCOUNT_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1 0x2a1b
+#define regCP_GFX_RS64_PERFCOUNT_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO0 0x2a1c
+#define regCP_GFX_RS64_MIP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_LO1 0x2a1d
+#define regCP_GFX_RS64_MIP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI0 0x2a1e
+#define regCP_GFX_RS64_MIP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MIP_HI1 0x2a1f
+#define regCP_GFX_RS64_MIP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO0 0x2a20
+#define regCP_GFX_RS64_MTIMECMP_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_LO1 0x2a21
+#define regCP_GFX_RS64_MTIMECMP_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI0 0x2a22
+#define regCP_GFX_RS64_MTIMECMP_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_MTIMECMP_HI1 0x2a23
+#define regCP_GFX_RS64_MTIMECMP_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO0 0x2a24
+#define regCP_GFX_RS64_GP0_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_LO1 0x2a25
+#define regCP_GFX_RS64_GP0_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI0 0x2a26
+#define regCP_GFX_RS64_GP0_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP0_HI1 0x2a27
+#define regCP_GFX_RS64_GP0_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO0 0x2a28
+#define regCP_GFX_RS64_GP1_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_LO1 0x2a29
+#define regCP_GFX_RS64_GP1_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI0 0x2a2a
+#define regCP_GFX_RS64_GP1_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP1_HI1 0x2a2b
+#define regCP_GFX_RS64_GP1_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO0 0x2a2c
+#define regCP_GFX_RS64_GP2_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_LO1 0x2a2d
+#define regCP_GFX_RS64_GP2_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI0 0x2a2e
+#define regCP_GFX_RS64_GP2_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP2_HI1 0x2a2f
+#define regCP_GFX_RS64_GP2_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO0 0x2a30
+#define regCP_GFX_RS64_GP3_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_LO1 0x2a31
+#define regCP_GFX_RS64_GP3_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI0 0x2a32
+#define regCP_GFX_RS64_GP3_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP3_HI1 0x2a33
+#define regCP_GFX_RS64_GP3_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO0 0x2a34
+#define regCP_GFX_RS64_GP4_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_LO1 0x2a35
+#define regCP_GFX_RS64_GP4_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI0 0x2a36
+#define regCP_GFX_RS64_GP4_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP4_HI1 0x2a37
+#define regCP_GFX_RS64_GP4_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO0 0x2a38
+#define regCP_GFX_RS64_GP5_LO0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_LO1 0x2a39
+#define regCP_GFX_RS64_GP5_LO1_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI0 0x2a3a
+#define regCP_GFX_RS64_GP5_HI0_BASE_IDX 1
+#define regCP_GFX_RS64_GP5_HI1 0x2a3b
+#define regCP_GFX_RS64_GP5_HI1_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_LO 0x2a3c
+#define regCP_GFX_RS64_GP6_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP6_HI 0x2a3d
+#define regCP_GFX_RS64_GP6_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_LO 0x2a3e
+#define regCP_GFX_RS64_GP7_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP7_HI 0x2a3f
+#define regCP_GFX_RS64_GP7_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_LO 0x2a40
+#define regCP_GFX_RS64_GP8_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP8_HI 0x2a41
+#define regCP_GFX_RS64_GP8_HI_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_LO 0x2a42
+#define regCP_GFX_RS64_GP9_LO_BASE_IDX 1
+#define regCP_GFX_RS64_GP9_HI 0x2a43
+#define regCP_GFX_RS64_GP9_HI_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR0 0x2a44
+#define regCP_GFX_RS64_INSTR_PNTR0_BASE_IDX 1
+#define regCP_GFX_RS64_INSTR_PNTR1 0x2a45
+#define regCP_GFX_RS64_INSTR_PNTR1_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT0 0x2a46
+#define regCP_GFX_RS64_PENDING_INTERRUPT0_BASE_IDX 1
+#define regCP_GFX_RS64_PENDING_INTERRUPT1 0x2a47
+#define regCP_GFX_RS64_PENDING_INTERRUPT1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0 0x2a49
+#define regCP_GFX_RS64_DC_APERTURE0_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0 0x2a4a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0 0x2a4b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0 0x2a4c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0 0x2a4d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0 0x2a4e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0 0x2a4f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0 0x2a50
+#define regCP_GFX_RS64_DC_APERTURE2_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0 0x2a51
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0 0x2a52
+#define regCP_GFX_RS64_DC_APERTURE3_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0 0x2a53
+#define regCP_GFX_RS64_DC_APERTURE3_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0 0x2a54
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0 0x2a55
+#define regCP_GFX_RS64_DC_APERTURE4_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0 0x2a56
+#define regCP_GFX_RS64_DC_APERTURE4_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0 0x2a57
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0 0x2a58
+#define regCP_GFX_RS64_DC_APERTURE5_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0 0x2a59
+#define regCP_GFX_RS64_DC_APERTURE5_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0 0x2a5a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0 0x2a5b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0 0x2a5c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0 0x2a5d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0 0x2a5e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0 0x2a5f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0 0x2a60
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0 0x2a61
+#define regCP_GFX_RS64_DC_APERTURE8_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0 0x2a62
+#define regCP_GFX_RS64_DC_APERTURE8_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0 0x2a63
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0 0x2a64
+#define regCP_GFX_RS64_DC_APERTURE9_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0 0x2a65
+#define regCP_GFX_RS64_DC_APERTURE9_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0 0x2a66
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0 0x2a67
+#define regCP_GFX_RS64_DC_APERTURE10_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0 0x2a68
+#define regCP_GFX_RS64_DC_APERTURE10_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0 0x2a69
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0 0x2a6a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0 0x2a6b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0 0x2a6c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0 0x2a6d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0 0x2a6e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0 0x2a6f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0 0x2a70
+#define regCP_GFX_RS64_DC_APERTURE13_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0 0x2a71
+#define regCP_GFX_RS64_DC_APERTURE13_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0 0x2a72
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0 0x2a73
+#define regCP_GFX_RS64_DC_APERTURE14_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0 0x2a74
+#define regCP_GFX_RS64_DC_APERTURE14_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0 0x2a75
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0 0x2a76
+#define regCP_GFX_RS64_DC_APERTURE15_BASE0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0 0x2a77
+#define regCP_GFX_RS64_DC_APERTURE15_MASK0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0 0x2a78
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL0_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1 0x2a79
+#define regCP_GFX_RS64_DC_APERTURE0_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1 0x2a7a
+#define regCP_GFX_RS64_DC_APERTURE0_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1 0x2a7b
+#define regCP_GFX_RS64_DC_APERTURE0_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1 0x2a7c
+#define regCP_GFX_RS64_DC_APERTURE1_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1 0x2a7d
+#define regCP_GFX_RS64_DC_APERTURE1_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1 0x2a7e
+#define regCP_GFX_RS64_DC_APERTURE1_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1 0x2a7f
+#define regCP_GFX_RS64_DC_APERTURE2_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1 0x2a80
+#define regCP_GFX_RS64_DC_APERTURE2_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1 0x2a81
+#define regCP_GFX_RS64_DC_APERTURE2_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1 0x2a82
+#define regCP_GFX_RS64_DC_APERTURE3_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1 0x2a83
+#define regCP_GFX_RS64_DC_APERTURE3_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1 0x2a84
+#define regCP_GFX_RS64_DC_APERTURE3_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1 0x2a85
+#define regCP_GFX_RS64_DC_APERTURE4_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1 0x2a86
+#define regCP_GFX_RS64_DC_APERTURE4_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1 0x2a87
+#define regCP_GFX_RS64_DC_APERTURE4_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1 0x2a88
+#define regCP_GFX_RS64_DC_APERTURE5_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1 0x2a89
+#define regCP_GFX_RS64_DC_APERTURE5_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1 0x2a8a
+#define regCP_GFX_RS64_DC_APERTURE5_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1 0x2a8b
+#define regCP_GFX_RS64_DC_APERTURE6_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1 0x2a8c
+#define regCP_GFX_RS64_DC_APERTURE6_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1 0x2a8d
+#define regCP_GFX_RS64_DC_APERTURE6_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1 0x2a8e
+#define regCP_GFX_RS64_DC_APERTURE7_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1 0x2a8f
+#define regCP_GFX_RS64_DC_APERTURE7_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1 0x2a90
+#define regCP_GFX_RS64_DC_APERTURE7_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1 0x2a91
+#define regCP_GFX_RS64_DC_APERTURE8_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1 0x2a92
+#define regCP_GFX_RS64_DC_APERTURE8_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1 0x2a93
+#define regCP_GFX_RS64_DC_APERTURE8_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1 0x2a94
+#define regCP_GFX_RS64_DC_APERTURE9_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1 0x2a95
+#define regCP_GFX_RS64_DC_APERTURE9_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1 0x2a96
+#define regCP_GFX_RS64_DC_APERTURE9_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1 0x2a97
+#define regCP_GFX_RS64_DC_APERTURE10_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1 0x2a98
+#define regCP_GFX_RS64_DC_APERTURE10_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1 0x2a99
+#define regCP_GFX_RS64_DC_APERTURE10_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1 0x2a9a
+#define regCP_GFX_RS64_DC_APERTURE11_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1 0x2a9b
+#define regCP_GFX_RS64_DC_APERTURE11_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1 0x2a9c
+#define regCP_GFX_RS64_DC_APERTURE11_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1 0x2a9d
+#define regCP_GFX_RS64_DC_APERTURE12_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1 0x2a9e
+#define regCP_GFX_RS64_DC_APERTURE12_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1 0x2a9f
+#define regCP_GFX_RS64_DC_APERTURE12_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1 0x2aa0
+#define regCP_GFX_RS64_DC_APERTURE13_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1 0x2aa1
+#define regCP_GFX_RS64_DC_APERTURE13_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1 0x2aa2
+#define regCP_GFX_RS64_DC_APERTURE13_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1 0x2aa3
+#define regCP_GFX_RS64_DC_APERTURE14_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1 0x2aa4
+#define regCP_GFX_RS64_DC_APERTURE14_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1 0x2aa5
+#define regCP_GFX_RS64_DC_APERTURE14_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1 0x2aa6
+#define regCP_GFX_RS64_DC_APERTURE15_BASE1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1 0x2aa7
+#define regCP_GFX_RS64_DC_APERTURE15_MASK1_BASE_IDX 1
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1 0x2aa8
+#define regCP_GFX_RS64_DC_APERTURE15_CNTL1_BASE_IDX 1
+#define regCP_GFX_RS64_INTERRUPT1 0x2aac
+#define regCP_GFX_RS64_INTERRUPT1_BASE_IDX 1
+
+
+// addressBlock: gc_gl1dec
+// base address: 0x33400
+#define regGL1_ARB_CTRL 0x2d00
+#define regGL1_ARB_CTRL_BASE_IDX 1
+#define regGL1_DRAM_BURST_MASK 0x2d02
+#define regGL1_DRAM_BURST_MASK_BASE_IDX 1
+#define regGL1_ARB_STATUS 0x2d03
+#define regGL1_ARB_STATUS_BASE_IDX 1
+#define regGL1_DRAM_BURST_CTRL 0x2d04
+#define regGL1_DRAM_BURST_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE 0x2d05
+#define regGL1I_GL1R_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regGL1C_CTRL 0x2d40
+#define regGL1C_CTRL_BASE_IDX 1
+#define regGL1C_STATUS 0x2d41
+#define regGL1C_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_CNTL2 0x2d43
+#define regGL1C_UTCL0_CNTL2_BASE_IDX 1
+#define regGL1C_UTCL0_STATUS 0x2d44
+#define regGL1C_UTCL0_STATUS_BASE_IDX 1
+#define regGL1C_UTCL0_RETRY 0x2d45
+#define regGL1C_UTCL0_RETRY_BASE_IDX 1
+#define regGL1C_CTRL2 0x2d46
+#define regGL1C_CTRL2_BASE_IDX 1
+
+
+// addressBlock: gc_chdec
+// base address: 0x33600
+#define regCH_ARB_CTRL 0x2d80
+#define regCH_ARB_CTRL_BASE_IDX 1
+#define regCH_DRAM_BURST_MASK 0x2d82
+#define regCH_DRAM_BURST_MASK_BASE_IDX 1
+#define regCH_ARB_STATUS 0x2d83
+#define regCH_ARB_STATUS_BASE_IDX 1
+#define regCH_DRAM_BURST_CTRL 0x2d84
+#define regCH_DRAM_BURST_CTRL_BASE_IDX 1
+#define regCHA_CHC_CREDITS 0x2d88
+#define regCHA_CHC_CREDITS_BASE_IDX 1
+#define regCHA_CLIENT_FREE_DELAY 0x2d89
+#define regCHA_CLIENT_FREE_DELAY_BASE_IDX 1
+#define regCHI_CHR_REP_FGCG_OVERRIDE 0x2d8c
+#define regCHI_CHR_REP_FGCG_OVERRIDE_BASE_IDX 1
+#define regCH_VC5_ENABLE 0x2d94
+#define regCH_VC5_ENABLE_BASE_IDX 1
+#define regCHC_CTRL 0x2dc0
+#define regCHC_CTRL_BASE_IDX 1
+#define regCHC_STATUS 0x2dc1
+#define regCHC_STATUS_BASE_IDX 1
+#define regCHCG_CTRL 0x2dc2
+#define regCHCG_CTRL_BASE_IDX 1
+#define regCHCG_STATUS 0x2dc3
+#define regCHCG_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_gl2dec
+// base address: 0x33800
+#define regGL2C_CTRL 0x2e00
+#define regGL2C_CTRL_BASE_IDX 1
+#define regGL2C_CTRL2 0x2e01
+#define regGL2C_CTRL2_BASE_IDX 1
+#define regGL2C_STATUS 0x2e02
+#define regGL2C_STATUS_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_MASK 0x2e03
+#define regGL2C_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2C_ADDR_MATCH_SIZE 0x2e04
+#define regGL2C_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2C_WBINVL2 0x2e05
+#define regGL2C_WBINVL2_BASE_IDX 1
+#define regGL2C_SOFT_RESET 0x2e06
+#define regGL2C_SOFT_RESET_BASE_IDX 1
+#define regGL2C_CM_CTRL0 0x2e07
+#define regGL2C_CM_CTRL0_BASE_IDX 1
+#define regGL2C_CM_CTRL1 0x2e08
+#define regGL2C_CM_CTRL1_BASE_IDX 1
+#define regGL2C_CM_STALL 0x2e09
+#define regGL2C_CM_STALL_BASE_IDX 1
+#define regGL2C_CM_CTRL2 0x2e0b
+#define regGL2C_CM_CTRL2_BASE_IDX 1
+#define regGL2C_CTRL3 0x2e0c
+#define regGL2C_CTRL3_BASE_IDX 1
+#define regGL2C_LB_CTR_CTRL 0x2e0d
+#define regGL2C_LB_CTR_CTRL_BASE_IDX 1
+#define regGL2C_LB_DATA0 0x2e0e
+#define regGL2C_LB_DATA0_BASE_IDX 1
+#define regGL2C_LB_DATA1 0x2e0f
+#define regGL2C_LB_DATA1_BASE_IDX 1
+#define regGL2C_LB_DATA2 0x2e10
+#define regGL2C_LB_DATA2_BASE_IDX 1
+#define regGL2C_LB_DATA3 0x2e11
+#define regGL2C_LB_DATA3_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL0 0x2e12
+#define regGL2C_LB_CTR_SEL0_BASE_IDX 1
+#define regGL2C_LB_CTR_SEL1 0x2e13
+#define regGL2C_LB_CTR_SEL1_BASE_IDX 1
+#define regGL2C_CTRL4 0x2e17
+#define regGL2C_CTRL4_BASE_IDX 1
+#define regGL2C_DISCARD_STALL_CTRL 0x2e18
+#define regGL2C_DISCARD_STALL_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_CTRL 0x2e20
+#define regGL2A_ADDR_MATCH_CTRL_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_MASK 0x2e21
+#define regGL2A_ADDR_MATCH_MASK_BASE_IDX 1
+#define regGL2A_ADDR_MATCH_SIZE 0x2e22
+#define regGL2A_ADDR_MATCH_SIZE_BASE_IDX 1
+#define regGL2A_PRIORITY_CTRL 0x2e23
+#define regGL2A_PRIORITY_CTRL_BASE_IDX 1
+#define regGL2A_CTRL 0x2e24
+#define regGL2A_CTRL_BASE_IDX 1
+#define regGL2A_RESP_THROTTLE_CTRL 0x2e2a
+#define regGL2A_RESP_THROTTLE_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gl1hdec
+// base address: 0x33900
+#define regGL1H_ARB_CTRL 0x2e40
+#define regGL1H_ARB_CTRL_BASE_IDX 1
+#define regGL1H_GL1_CREDITS 0x2e41
+#define regGL1H_GL1_CREDITS_BASE_IDX 1
+#define regGL1H_BURST_MASK 0x2e42
+#define regGL1H_BURST_MASK_BASE_IDX 1
+#define regGL1H_BURST_CTRL 0x2e43
+#define regGL1H_BURST_CTRL_BASE_IDX 1
+#define regGL1H_ARB_STATUS 0x2e44
+#define regGL1H_ARB_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_perfddec
+// base address: 0x34000
+#define regCPG_PERFCOUNTER1_LO 0x3000
+#define regCPG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER1_HI 0x3001
+#define regCPG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_LO 0x3002
+#define regCPG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_HI 0x3003
+#define regCPG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_LO 0x3004
+#define regCPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_HI 0x3005
+#define regCPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_LO 0x3006
+#define regCPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_HI 0x3007
+#define regCPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_LO 0x3008
+#define regCPF_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_HI 0x3009
+#define regCPF_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_LO 0x300a
+#define regCPF_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_HI 0x300b
+#define regCPF_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCPF_LATENCY_STATS_DATA 0x300c
+#define regCPF_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPG_LATENCY_STATS_DATA 0x300d
+#define regCPG_LATENCY_STATS_DATA_BASE_IDX 1
+#define regCPC_LATENCY_STATS_DATA 0x300e
+#define regCPC_LATENCY_STATS_DATA_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_LO 0x3040
+#define regGRBM_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_HI 0x3041
+#define regGRBM_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_LO 0x3043
+#define regGRBM_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_HI 0x3044
+#define regGRBM_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_LO 0x3045
+#define regGRBM_SE0_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_HI 0x3046
+#define regGRBM_SE0_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_LO 0x3047
+#define regGRBM_SE1_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_HI 0x3048
+#define regGRBM_SE1_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_LO 0x3049
+#define regGRBM_SE2_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_HI 0x304a
+#define regGRBM_SE2_PERFCOUNTER_HI_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_LO 0x304b
+#define regGRBM_SE3_PERFCOUNTER_LO_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_HI 0x304c
+#define regGRBM_SE3_PERFCOUNTER_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_LO 0x30a4
+#define regGE1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_HI 0x30a5
+#define regGE1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_LO 0x30a6
+#define regGE1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_HI 0x30a7
+#define regGE1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_LO 0x30a8
+#define regGE1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_HI 0x30a9
+#define regGE1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_LO 0x30aa
+#define regGE1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_HI 0x30ab
+#define regGE1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_LO 0x30ac
+#define regGE2_DIST_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_HI 0x30ad
+#define regGE2_DIST_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_LO 0x30ae
+#define regGE2_DIST_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_HI 0x30af
+#define regGE2_DIST_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_LO 0x30b0
+#define regGE2_DIST_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_HI 0x30b1
+#define regGE2_DIST_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_LO 0x30b2
+#define regGE2_DIST_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_HI 0x30b3
+#define regGE2_DIST_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_LO 0x30b4
+#define regGE2_SE_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_HI 0x30b5
+#define regGE2_SE_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_LO 0x30b6
+#define regGE2_SE_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_HI 0x30b7
+#define regGE2_SE_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_LO 0x30b8
+#define regGE2_SE_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_HI 0x30b9
+#define regGE2_SE_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_LO 0x30ba
+#define regGE2_SE_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_HI 0x30bb
+#define regGE2_SE_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_LO 0x3100
+#define regPA_SU_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_HI 0x3101
+#define regPA_SU_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_LO 0x3102
+#define regPA_SU_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_HI 0x3103
+#define regPA_SU_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_LO 0x3104
+#define regPA_SU_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_HI 0x3105
+#define regPA_SU_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_LO 0x3106
+#define regPA_SU_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_HI 0x3107
+#define regPA_SU_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_LO 0x3140
+#define regPA_SC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_HI 0x3141
+#define regPA_SC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_LO 0x3142
+#define regPA_SC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_HI 0x3143
+#define regPA_SC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_LO 0x3144
+#define regPA_SC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_HI 0x3145
+#define regPA_SC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_LO 0x3146
+#define regPA_SC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_HI 0x3147
+#define regPA_SC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_LO 0x3148
+#define regPA_SC_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_HI 0x3149
+#define regPA_SC_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_LO 0x314a
+#define regPA_SC_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_HI 0x314b
+#define regPA_SC_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_LO 0x314c
+#define regPA_SC_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_HI 0x314d
+#define regPA_SC_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_LO 0x314e
+#define regPA_SC_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_HI 0x314f
+#define regPA_SC_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_HI 0x3180
+#define regSPI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_LO 0x3181
+#define regSPI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_HI 0x3182
+#define regSPI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_LO 0x3183
+#define regSPI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_HI 0x3184
+#define regSPI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_LO 0x3185
+#define regSPI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_HI 0x3186
+#define regSPI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_LO 0x3187
+#define regSPI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_HI 0x3188
+#define regSPI_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_LO 0x3189
+#define regSPI_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_HI 0x318a
+#define regSPI_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_LO 0x318b
+#define regSPI_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER0_HI 0x318c
+#define regPC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER0_LO 0x318d
+#define regPC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER1_HI 0x318e
+#define regPC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER1_LO 0x318f
+#define regPC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER2_HI 0x3190
+#define regPC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER2_LO 0x3191
+#define regPC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPC_PERFCOUNTER3_HI 0x3192
+#define regPC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPC_PERFCOUNTER3_LO 0x3193
+#define regPC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_LO 0x31c0
+#define regSQ_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_LO 0x31c2
+#define regSQ_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_LO 0x31c4
+#define regSQ_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_LO 0x31c6
+#define regSQ_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_LO 0x31c8
+#define regSQ_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_LO 0x31ca
+#define regSQ_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_LO 0x31cc
+#define regSQ_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_LO 0x31ce
+#define regSQ_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_LO 0x31e4
+#define regSQG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_HI 0x31e5
+#define regSQG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_LO 0x31e6
+#define regSQG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_HI 0x31e7
+#define regSQG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_LO 0x31e8
+#define regSQG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_HI 0x31e9
+#define regSQG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_LO 0x31ea
+#define regSQG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_HI 0x31eb
+#define regSQG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_LO 0x31ec
+#define regSQG_PERFCOUNTER4_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_HI 0x31ed
+#define regSQG_PERFCOUNTER4_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_LO 0x31ee
+#define regSQG_PERFCOUNTER5_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_HI 0x31ef
+#define regSQG_PERFCOUNTER5_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_LO 0x31f0
+#define regSQG_PERFCOUNTER6_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_HI 0x31f1
+#define regSQG_PERFCOUNTER6_HI_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_LO 0x31f2
+#define regSQG_PERFCOUNTER7_LO_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_HI 0x31f3
+#define regSQG_PERFCOUNTER7_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER0_LO 0x3240
+#define regSX_PERFCOUNTER0_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER0_HI 0x3241
+#define regSX_PERFCOUNTER0_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER1_LO 0x3242
+#define regSX_PERFCOUNTER1_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER1_HI 0x3243
+#define regSX_PERFCOUNTER1_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER2_LO 0x3244
+#define regSX_PERFCOUNTER2_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER2_HI 0x3245
+#define regSX_PERFCOUNTER2_HI_BASE_IDX 1
+#define regSX_PERFCOUNTER3_LO 0x3246
+#define regSX_PERFCOUNTER3_LO_BASE_IDX 1
+#define regSX_PERFCOUNTER3_HI 0x3247
+#define regSX_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_LO 0x3260
+#define regGCEA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_HI 0x3261
+#define regGCEA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_LO 0x3262
+#define regGCEA_PERFCOUNTER_LO_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_HI 0x3263
+#define regGCEA_PERFCOUNTER_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_LO 0x3280
+#define regGDS_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_HI 0x3281
+#define regGDS_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_LO 0x3282
+#define regGDS_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_HI 0x3283
+#define regGDS_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_LO 0x3284
+#define regGDS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_HI 0x3285
+#define regGDS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_LO 0x3286
+#define regGDS_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_HI 0x3287
+#define regGDS_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER0_LO 0x32c0
+#define regTA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER0_HI 0x32c1
+#define regTA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTA_PERFCOUNTER1_LO 0x32c2
+#define regTA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTA_PERFCOUNTER1_HI 0x32c3
+#define regTA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER0_LO 0x3300
+#define regTD_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER0_HI 0x3301
+#define regTD_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTD_PERFCOUNTER1_LO 0x3302
+#define regTD_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTD_PERFCOUNTER1_HI 0x3303
+#define regTD_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_LO 0x3340
+#define regTCP_PERFCOUNTER0_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_HI 0x3341
+#define regTCP_PERFCOUNTER0_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_LO 0x3342
+#define regTCP_PERFCOUNTER1_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_HI 0x3343
+#define regTCP_PERFCOUNTER1_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_LO 0x3344
+#define regTCP_PERFCOUNTER2_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_HI 0x3345
+#define regTCP_PERFCOUNTER2_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_LO 0x3346
+#define regTCP_PERFCOUNTER3_LO_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_HI 0x3347
+#define regTCP_PERFCOUNTER3_HI_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER 0x3348
+#define regTCP_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER2 0x3349
+#define regTCP_PERFCOUNTER_FILTER2_BASE_IDX 1
+#define regTCP_PERFCOUNTER_FILTER_EN 0x334a
+#define regTCP_PERFCOUNTER_FILTER_EN_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_LO 0x3380
+#define regGL2C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_HI 0x3381
+#define regGL2C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_LO 0x3382
+#define regGL2C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_HI 0x3383
+#define regGL2C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_LO 0x3384
+#define regGL2C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_HI 0x3385
+#define regGL2C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_LO 0x3386
+#define regGL2C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_HI 0x3387
+#define regGL2C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_LO 0x3390
+#define regGL2A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_HI 0x3391
+#define regGL2A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_LO 0x3392
+#define regGL2A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_HI 0x3393
+#define regGL2A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_LO 0x3394
+#define regGL2A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_HI 0x3395
+#define regGL2A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_LO 0x3396
+#define regGL2A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_HI 0x3397
+#define regGL2A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_LO 0x33a0
+#define regGL1C_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_HI 0x33a1
+#define regGL1C_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_LO 0x33a2
+#define regGL1C_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_HI 0x33a3
+#define regGL1C_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_LO 0x33a4
+#define regGL1C_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_HI 0x33a5
+#define regGL1C_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_LO 0x33a6
+#define regGL1C_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_HI 0x33a7
+#define regGL1C_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_LO 0x33c0
+#define regCHC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_HI 0x33c1
+#define regCHC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_LO 0x33c2
+#define regCHC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_HI 0x33c3
+#define regCHC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_LO 0x33c4
+#define regCHC_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_HI 0x33c5
+#define regCHC_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_LO 0x33c6
+#define regCHC_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_HI 0x33c7
+#define regCHC_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_LO 0x33c8
+#define regCHCG_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_HI 0x33c9
+#define regCHCG_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_LO 0x33ca
+#define regCHCG_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_HI 0x33cb
+#define regCHCG_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_LO 0x33cc
+#define regCHCG_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_HI 0x33cd
+#define regCHCG_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_LO 0x33ce
+#define regCHCG_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_HI 0x33cf
+#define regCHCG_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER0_LO 0x3406
+#define regCB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER0_HI 0x3407
+#define regCB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER1_LO 0x3408
+#define regCB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER1_HI 0x3409
+#define regCB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER2_LO 0x340a
+#define regCB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER2_HI 0x340b
+#define regCB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCB_PERFCOUNTER3_LO 0x340c
+#define regCB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCB_PERFCOUNTER3_HI 0x340d
+#define regCB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER0_LO 0x3440
+#define regDB_PERFCOUNTER0_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER0_HI 0x3441
+#define regDB_PERFCOUNTER0_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER1_LO 0x3442
+#define regDB_PERFCOUNTER1_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER1_HI 0x3443
+#define regDB_PERFCOUNTER1_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER2_LO 0x3444
+#define regDB_PERFCOUNTER2_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER2_HI 0x3445
+#define regDB_PERFCOUNTER2_HI_BASE_IDX 1
+#define regDB_PERFCOUNTER3_LO 0x3446
+#define regDB_PERFCOUNTER3_LO_BASE_IDX 1
+#define regDB_PERFCOUNTER3_HI 0x3447
+#define regDB_PERFCOUNTER3_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_LO 0x3480
+#define regRLC_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_HI 0x3481
+#define regRLC_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_LO 0x3482
+#define regRLC_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_HI 0x3483
+#define regRLC_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_LO 0x34c0
+#define regRMI_PERFCOUNTER0_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_HI 0x34c1
+#define regRMI_PERFCOUNTER0_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_LO 0x34c2
+#define regRMI_PERFCOUNTER1_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_HI 0x34c3
+#define regRMI_PERFCOUNTER1_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_LO 0x34c4
+#define regRMI_PERFCOUNTER2_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_HI 0x34c5
+#define regRMI_PERFCOUNTER2_HI_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_LO 0x34c6
+#define regRMI_PERFCOUNTER3_LO_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_HI 0x34c7
+#define regRMI_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_LO 0x3520
+#define regGCR_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_HI 0x3521
+#define regGCR_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_LO 0x3522
+#define regGCR_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_HI 0x3523
+#define regGCR_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_LO 0x3580
+#define regPA_PH_PERFCOUNTER0_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_HI 0x3581
+#define regPA_PH_PERFCOUNTER0_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_LO 0x3582
+#define regPA_PH_PERFCOUNTER1_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_HI 0x3583
+#define regPA_PH_PERFCOUNTER1_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_LO 0x3584
+#define regPA_PH_PERFCOUNTER2_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_HI 0x3585
+#define regPA_PH_PERFCOUNTER2_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_LO 0x3586
+#define regPA_PH_PERFCOUNTER3_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_HI 0x3587
+#define regPA_PH_PERFCOUNTER3_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_LO 0x3588
+#define regPA_PH_PERFCOUNTER4_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_HI 0x3589
+#define regPA_PH_PERFCOUNTER4_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_LO 0x358a
+#define regPA_PH_PERFCOUNTER5_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_HI 0x358b
+#define regPA_PH_PERFCOUNTER5_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_LO 0x358c
+#define regPA_PH_PERFCOUNTER6_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_HI 0x358d
+#define regPA_PH_PERFCOUNTER6_HI_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_LO 0x358e
+#define regPA_PH_PERFCOUNTER7_LO_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_HI 0x358f
+#define regPA_PH_PERFCOUNTER7_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_LO 0x35a0
+#define regUTCL1_PERFCOUNTER0_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_HI 0x35a1
+#define regUTCL1_PERFCOUNTER0_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_LO 0x35a2
+#define regUTCL1_PERFCOUNTER1_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_HI 0x35a3
+#define regUTCL1_PERFCOUNTER1_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_LO 0x35a4
+#define regUTCL1_PERFCOUNTER2_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_HI 0x35a5
+#define regUTCL1_PERFCOUNTER2_HI_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_LO 0x35a6
+#define regUTCL1_PERFCOUNTER3_LO_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_HI 0x35a7
+#define regUTCL1_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_LO 0x35c0
+#define regGL1A_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_HI 0x35c1
+#define regGL1A_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_LO 0x35c2
+#define regGL1A_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_HI 0x35c3
+#define regGL1A_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_LO 0x35c4
+#define regGL1A_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_HI 0x35c5
+#define regGL1A_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_LO 0x35c6
+#define regGL1A_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_HI 0x35c7
+#define regGL1A_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_LO 0x35d0
+#define regGL1H_PERFCOUNTER0_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_HI 0x35d1
+#define regGL1H_PERFCOUNTER0_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_LO 0x35d2
+#define regGL1H_PERFCOUNTER1_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_HI 0x35d3
+#define regGL1H_PERFCOUNTER1_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_LO 0x35d4
+#define regGL1H_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_HI 0x35d5
+#define regGL1H_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_LO 0x35d6
+#define regGL1H_PERFCOUNTER3_LO_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_HI 0x35d7
+#define regGL1H_PERFCOUNTER3_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_LO 0x3600
+#define regCHA_PERFCOUNTER0_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_HI 0x3601
+#define regCHA_PERFCOUNTER0_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_LO 0x3602
+#define regCHA_PERFCOUNTER1_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_HI 0x3603
+#define regCHA_PERFCOUNTER1_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_LO 0x3604
+#define regCHA_PERFCOUNTER2_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_HI 0x3605
+#define regCHA_PERFCOUNTER2_HI_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_LO 0x3606
+#define regCHA_PERFCOUNTER3_LO_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_HI 0x3607
+#define regCHA_PERFCOUNTER3_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_LO 0x3640
+#define regGUS_PERFCOUNTER2_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_HI 0x3641
+#define regGUS_PERFCOUNTER2_HI_BASE_IDX 1
+#define regGUS_PERFCOUNTER_LO 0x3642
+#define regGUS_PERFCOUNTER_LO_BASE_IDX 1
+#define regGUS_PERFCOUNTER_HI 0x3643
+#define regGUS_PERFCOUNTER_HI_BASE_IDX 1
+
+
+// addressBlock: gc_perfsdec
+// base address: 0x36000
+#define regCPG_PERFCOUNTER1_SELECT 0x3800
+#define regCPG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT1 0x3801
+#define regCPG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPG_PERFCOUNTER0_SELECT 0x3802
+#define regCPG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER1_SELECT 0x3803
+#define regCPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT1 0x3804
+#define regCPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER1_SELECT 0x3805
+#define regCPF_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT1 0x3806
+#define regCPF_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCPF_PERFCOUNTER0_SELECT 0x3807
+#define regCPF_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCP_PERFMON_CNTL 0x3808
+#define regCP_PERFMON_CNTL_BASE_IDX 1
+#define regCPC_PERFCOUNTER0_SELECT 0x3809
+#define regCPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT 0x380a
+#define regCPF_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT 0x380b
+#define regCPG_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCPF_LATENCY_STATS_SELECT 0x380c
+#define regCPF_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPG_LATENCY_STATS_SELECT 0x380d
+#define regCPG_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_LATENCY_STATS_SELECT 0x380e
+#define regCPC_LATENCY_STATS_SELECT_BASE_IDX 1
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT 0x380f
+#define regCPC_TC_PERF_COUNTER_WINDOW_SELECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT 0x3810
+#define regCP_DRAW_OBJECT_BASE_IDX 1
+#define regCP_DRAW_OBJECT_COUNTER 0x3811
+#define regCP_DRAW_OBJECT_COUNTER_BASE_IDX 1
+#define regCP_DRAW_WINDOW_MASK_HI 0x3812
+#define regCP_DRAW_WINDOW_MASK_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_HI 0x3813
+#define regCP_DRAW_WINDOW_HI_BASE_IDX 1
+#define regCP_DRAW_WINDOW_LO 0x3814
+#define regCP_DRAW_WINDOW_LO_BASE_IDX 1
+#define regCP_DRAW_WINDOW_CNTL 0x3815
+#define regCP_DRAW_WINDOW_CNTL_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT 0x3840
+#define regGRBM_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT 0x3841
+#define regGRBM_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGRBM_SE0_PERFCOUNTER_SELECT 0x3842
+#define regGRBM_SE0_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE1_PERFCOUNTER_SELECT 0x3843
+#define regGRBM_SE1_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE2_PERFCOUNTER_SELECT 0x3844
+#define regGRBM_SE2_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_SE3_PERFCOUNTER_SELECT 0x3845
+#define regGRBM_SE3_PERFCOUNTER_SELECT_BASE_IDX 1
+#define regGRBM_PERFCOUNTER0_SELECT_HI 0x384d
+#define regGRBM_PERFCOUNTER0_SELECT_HI_BASE_IDX 1
+#define regGRBM_PERFCOUNTER1_SELECT_HI 0x384e
+#define regGRBM_PERFCOUNTER1_SELECT_HI_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT 0x38a4
+#define regGE1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER0_SELECT1 0x38a5
+#define regGE1_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT 0x38a6
+#define regGE1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER1_SELECT1 0x38a7
+#define regGE1_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT 0x38a8
+#define regGE1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER2_SELECT1 0x38a9
+#define regGE1_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT 0x38aa
+#define regGE1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE1_PERFCOUNTER3_SELECT1 0x38ab
+#define regGE1_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT 0x38ac
+#define regGE2_DIST_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER0_SELECT1 0x38ad
+#define regGE2_DIST_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT 0x38ae
+#define regGE2_DIST_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER1_SELECT1 0x38af
+#define regGE2_DIST_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT 0x38b0
+#define regGE2_DIST_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1 0x38b1
+#define regGE2_DIST_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT 0x38b2
+#define regGE2_DIST_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_DIST_PERFCOUNTER3_SELECT1 0x38b3
+#define regGE2_DIST_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT 0x38b4
+#define regGE2_SE_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER0_SELECT1 0x38b5
+#define regGE2_SE_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT 0x38b6
+#define regGE2_SE_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER1_SELECT1 0x38b7
+#define regGE2_SE_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT 0x38b8
+#define regGE2_SE_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER2_SELECT1 0x38b9
+#define regGE2_SE_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT 0x38ba
+#define regGE2_SE_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGE2_SE_PERFCOUNTER3_SELECT1 0x38bb
+#define regGE2_SE_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT 0x3900
+#define regPA_SU_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER0_SELECT1 0x3901
+#define regPA_SU_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT 0x3902
+#define regPA_SU_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER1_SELECT1 0x3903
+#define regPA_SU_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT 0x3904
+#define regPA_SU_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER2_SELECT1 0x3905
+#define regPA_SU_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT 0x3906
+#define regPA_SU_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SU_PERFCOUNTER3_SELECT1 0x3907
+#define regPA_SU_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT 0x3940
+#define regPA_SC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER0_SELECT1 0x3941
+#define regPA_SC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER1_SELECT 0x3942
+#define regPA_SC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER2_SELECT 0x3943
+#define regPA_SC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER3_SELECT 0x3944
+#define regPA_SC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER4_SELECT 0x3945
+#define regPA_SC_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER5_SELECT 0x3946
+#define regPA_SC_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER6_SELECT 0x3947
+#define regPA_SC_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_SC_PERFCOUNTER7_SELECT 0x3948
+#define regPA_SC_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT 0x3980
+#define regSPI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT 0x3981
+#define regSPI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT 0x3982
+#define regSPI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT 0x3983
+#define regSPI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER0_SELECT1 0x3984
+#define regSPI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER1_SELECT1 0x3985
+#define regSPI_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER2_SELECT1 0x3986
+#define regSPI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER3_SELECT1 0x3987
+#define regSPI_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSPI_PERFCOUNTER4_SELECT 0x3988
+#define regSPI_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER5_SELECT 0x3989
+#define regSPI_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSPI_PERFCOUNTER_BINS 0x398a
+#define regSPI_PERFCOUNTER_BINS_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT 0x398c
+#define regPC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT 0x398d
+#define regPC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT 0x398e
+#define regPC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT 0x398f
+#define regPC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPC_PERFCOUNTER0_SELECT1 0x3990
+#define regPC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER1_SELECT1 0x3991
+#define regPC_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER2_SELECT1 0x3992
+#define regPC_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPC_PERFCOUNTER3_SELECT1 0x3993
+#define regPC_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regSQ_PERFCOUNTER0_SELECT 0x39c0
+#define regSQ_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER1_SELECT 0x39c1
+#define regSQ_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER2_SELECT 0x39c2
+#define regSQ_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER3_SELECT 0x39c3
+#define regSQ_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER4_SELECT 0x39c4
+#define regSQ_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER5_SELECT 0x39c5
+#define regSQ_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER6_SELECT 0x39c6
+#define regSQ_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER7_SELECT 0x39c7
+#define regSQ_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER8_SELECT 0x39c8
+#define regSQ_PERFCOUNTER8_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER9_SELECT 0x39c9
+#define regSQ_PERFCOUNTER9_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER10_SELECT 0x39ca
+#define regSQ_PERFCOUNTER10_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER11_SELECT 0x39cb
+#define regSQ_PERFCOUNTER11_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER12_SELECT 0x39cc
+#define regSQ_PERFCOUNTER12_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER13_SELECT 0x39cd
+#define regSQ_PERFCOUNTER13_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER14_SELECT 0x39ce
+#define regSQ_PERFCOUNTER14_SELECT_BASE_IDX 1
+#define regSQ_PERFCOUNTER15_SELECT 0x39cf
+#define regSQ_PERFCOUNTER15_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER0_SELECT 0x39d0
+#define regSQG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER1_SELECT 0x39d1
+#define regSQG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER2_SELECT 0x39d2
+#define regSQG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER3_SELECT 0x39d3
+#define regSQG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER4_SELECT 0x39d4
+#define regSQG_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER5_SELECT 0x39d5
+#define regSQG_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER6_SELECT 0x39d6
+#define regSQG_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER7_SELECT 0x39d7
+#define regSQG_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL 0x39d8
+#define regSQG_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQG_PERFCOUNTER_CTRL2 0x39da
+#define regSQG_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQG_PERF_SAMPLE_FINISH 0x39db
+#define regSQG_PERF_SAMPLE_FINISH_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL 0x39e0
+#define regSQ_PERFCOUNTER_CTRL_BASE_IDX 1
+#define regSQ_PERFCOUNTER_CTRL2 0x39e2
+#define regSQ_PERFCOUNTER_CTRL2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_BASE 0x39e8
+#define regSQ_THREAD_TRACE_BUF0_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF0_SIZE 0x39e9
+#define regSQ_THREAD_TRACE_BUF0_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_BASE 0x39ea
+#define regSQ_THREAD_TRACE_BUF1_BASE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_BUF1_SIZE 0x39eb
+#define regSQ_THREAD_TRACE_BUF1_SIZE_BASE_IDX 1
+#define regSQ_THREAD_TRACE_CTRL 0x39ec
+#define regSQ_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regSQ_THREAD_TRACE_MASK 0x39ed
+#define regSQ_THREAD_TRACE_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_TOKEN_MASK 0x39ee
+#define regSQ_THREAD_TRACE_TOKEN_MASK_BASE_IDX 1
+#define regSQ_THREAD_TRACE_WPTR 0x39ef
+#define regSQ_THREAD_TRACE_WPTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS 0x39f4
+#define regSQ_THREAD_TRACE_STATUS_BASE_IDX 1
+#define regSQ_THREAD_TRACE_STATUS2 0x39f5
+#define regSQ_THREAD_TRACE_STATUS2_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR 0x39f6
+#define regSQ_THREAD_TRACE_GFX_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR 0x39f7
+#define regSQ_THREAD_TRACE_GFX_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR 0x39f8
+#define regSQ_THREAD_TRACE_HP3D_DRAW_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR 0x39f9
+#define regSQ_THREAD_TRACE_HP3D_MARKER_CNTR_BASE_IDX 1
+#define regSQ_THREAD_TRACE_DROPPED_CNTR 0x39fa
+#define regSQ_THREAD_TRACE_DROPPED_CNTR_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT 0x3a00
+#define regGCEA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_SELECT1 0x3a01
+#define regGCEA_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGCEA_PERFCOUNTER2_MODE 0x3a02
+#define regGCEA_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGCEA_PERFCOUNTER0_CFG 0x3a03
+#define regGCEA_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER1_CFG 0x3a04
+#define regGCEA_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGCEA_PERFCOUNTER_RSLT_CNTL 0x3a05
+#define regGCEA_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT 0x3a40
+#define regSX_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT 0x3a41
+#define regSX_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER2_SELECT 0x3a42
+#define regSX_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER3_SELECT 0x3a43
+#define regSX_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regSX_PERFCOUNTER0_SELECT1 0x3a44
+#define regSX_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regSX_PERFCOUNTER1_SELECT1 0x3a45
+#define regSX_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT 0x3a80
+#define regGDS_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT 0x3a81
+#define regGDS_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT 0x3a82
+#define regGDS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT 0x3a83
+#define regGDS_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGDS_PERFCOUNTER0_SELECT1 0x3a84
+#define regGDS_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER1_SELECT1 0x3a85
+#define regGDS_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER2_SELECT1 0x3a86
+#define regGDS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGDS_PERFCOUNTER3_SELECT1 0x3a87
+#define regGDS_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT 0x3ac0
+#define regTA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTA_PERFCOUNTER0_SELECT1 0x3ac1
+#define regTA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTA_PERFCOUNTER1_SELECT 0x3ac2
+#define regTA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT 0x3b00
+#define regTD_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTD_PERFCOUNTER0_SELECT1 0x3b01
+#define regTD_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTD_PERFCOUNTER1_SELECT 0x3b02
+#define regTD_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT 0x3b40
+#define regTCP_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER0_SELECT1 0x3b41
+#define regTCP_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT 0x3b42
+#define regTCP_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER1_SELECT1 0x3b43
+#define regTCP_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regTCP_PERFCOUNTER2_SELECT 0x3b44
+#define regTCP_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regTCP_PERFCOUNTER3_SELECT 0x3b45
+#define regTCP_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT 0x3b80
+#define regGL2C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER0_SELECT1 0x3b81
+#define regGL2C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT 0x3b82
+#define regGL2C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER1_SELECT1 0x3b83
+#define regGL2C_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2C_PERFCOUNTER2_SELECT 0x3b84
+#define regGL2C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2C_PERFCOUNTER3_SELECT 0x3b85
+#define regGL2C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT 0x3b90
+#define regGL2A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER0_SELECT1 0x3b91
+#define regGL2A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT 0x3b92
+#define regGL2A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER1_SELECT1 0x3b93
+#define regGL2A_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regGL2A_PERFCOUNTER2_SELECT 0x3b94
+#define regGL2A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL2A_PERFCOUNTER3_SELECT 0x3b95
+#define regGL2A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT 0x3ba0
+#define regGL1C_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER0_SELECT1 0x3ba1
+#define regGL1C_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1C_PERFCOUNTER1_SELECT 0x3ba2
+#define regGL1C_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER2_SELECT 0x3ba3
+#define regGL1C_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1C_PERFCOUNTER3_SELECT 0x3ba4
+#define regGL1C_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT 0x3bc0
+#define regCHC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER0_SELECT1 0x3bc1
+#define regCHC_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHC_PERFCOUNTER1_SELECT 0x3bc2
+#define regCHC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER2_SELECT 0x3bc3
+#define regCHC_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHC_PERFCOUNTER3_SELECT 0x3bc4
+#define regCHC_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT 0x3bc6
+#define regCHCG_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER0_SELECT1 0x3bc7
+#define regCHCG_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHCG_PERFCOUNTER1_SELECT 0x3bc8
+#define regCHCG_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER2_SELECT 0x3bc9
+#define regCHCG_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHCG_PERFCOUNTER3_SELECT 0x3bca
+#define regCHCG_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER_FILTER 0x3c00
+#define regCB_PERFCOUNTER_FILTER_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT 0x3c01
+#define regCB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER0_SELECT1 0x3c02
+#define regCB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCB_PERFCOUNTER1_SELECT 0x3c03
+#define regCB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER2_SELECT 0x3c04
+#define regCB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCB_PERFCOUNTER3_SELECT 0x3c05
+#define regCB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT 0x3c40
+#define regDB_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER0_SELECT1 0x3c41
+#define regDB_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT 0x3c42
+#define regDB_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER1_SELECT1 0x3c43
+#define regDB_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regDB_PERFCOUNTER2_SELECT 0x3c44
+#define regDB_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regDB_PERFCOUNTER3_SELECT 0x3c46
+#define regDB_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRLC_SPM_PERFMON_CNTL 0x3c80
+#define regRLC_SPM_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_LO 0x3c81
+#define regRLC_SPM_PERFMON_RING_BASE_LO_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_BASE_HI 0x3c82
+#define regRLC_SPM_PERFMON_RING_BASE_HI_BASE_IDX 1
+#define regRLC_SPM_PERFMON_RING_SIZE 0x3c83
+#define regRLC_SPM_PERFMON_RING_SIZE_BASE_IDX 1
+#define regRLC_SPM_RING_WRPTR 0x3c84
+#define regRLC_SPM_RING_WRPTR_BASE_IDX 1
+#define regRLC_SPM_RING_RDPTR 0x3c85
+#define regRLC_SPM_RING_RDPTR_BASE_IDX 1
+#define regRLC_SPM_SEGMENT_THRESHOLD 0x3c86
+#define regRLC_SPM_SEGMENT_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE 0x3c87
+#define regRLC_SPM_PERFMON_SEGMENT_SIZE_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR 0x3c88
+#define regRLC_SPM_GLOBAL_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA 0x3c89
+#define regRLC_SPM_GLOBAL_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_ADDR 0x3c8a
+#define regRLC_SPM_SE_MUXSEL_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_MUXSEL_DATA 0x3c8b
+#define regRLC_SPM_SE_MUXSEL_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_ADDR 0x3c92
+#define regRLC_SPM_ACCUM_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_DATA 0x3c93
+#define regRLC_SPM_ACCUM_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR 0x3c94
+#define regRLC_SPM_ACCUM_SWA_DATARAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA 0x3c95
+#define regRLC_SPM_ACCUM_SWA_DATARAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR 0x3c96
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA 0x3c97
+#define regRLC_SPM_ACCUM_CTRLRAM_DATA_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET 0x3c98
+#define regRLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET_BASE_IDX 1
+#define regRLC_SPM_ACCUM_STATUS 0x3c99
+#define regRLC_SPM_ACCUM_STATUS_BASE_IDX 1
+#define regRLC_SPM_ACCUM_CTRL 0x3c9a
+#define regRLC_SPM_ACCUM_CTRL_BASE_IDX 1
+#define regRLC_SPM_ACCUM_MODE 0x3c9b
+#define regRLC_SPM_ACCUM_MODE_BASE_IDX 1
+#define regRLC_SPM_ACCUM_THRESHOLD 0x3c9c
+#define regRLC_SPM_ACCUM_THRESHOLD_BASE_IDX 1
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED 0x3c9d
+#define regRLC_SPM_ACCUM_SAMPLES_REQUESTED_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT 0x3c9e
+#define regRLC_SPM_ACCUM_DATARAM_WRCOUNT_BASE_IDX 1
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS 0x3c9f
+#define regRLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS_BASE_IDX 1
+#define regRLC_SPM_PAUSE 0x3ca2
+#define regRLC_SPM_PAUSE_BASE_IDX 1
+#define regRLC_SPM_STATUS 0x3ca3
+#define regRLC_SPM_STATUS_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT 0x3ca4
+#define regRLC_SPM_GFXCLOCK_LOWCOUNT_BASE_IDX 1
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT 0x3ca5
+#define regRLC_SPM_GFXCLOCK_HIGHCOUNT_BASE_IDX 1
+#define regRLC_SPM_MODE 0x3cad
+#define regRLC_SPM_MODE_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_LO 0x3cae
+#define regRLC_SPM_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_DATA_HI 0x3caf
+#define regRLC_SPM_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_RSPM_REQ_OP 0x3cb0
+#define regRLC_SPM_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_DATA 0x3cb1
+#define regRLC_SPM_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_RSPM_RET_OP 0x3cb2
+#define regRLC_SPM_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO 0x3cb3
+#define regRLC_SPM_SE_RSPM_REQ_DATA_LO_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI 0x3cb4
+#define regRLC_SPM_SE_RSPM_REQ_DATA_HI_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_REQ_OP 0x3cb5
+#define regRLC_SPM_SE_RSPM_REQ_OP_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_DATA 0x3cb6
+#define regRLC_SPM_SE_RSPM_RET_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_RSPM_RET_OP 0x3cb7
+#define regRLC_SPM_SE_RSPM_RET_OP_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD 0x3cb8
+#define regRLC_SPM_RSPM_CMD_BASE_IDX 1
+#define regRLC_SPM_RSPM_CMD_ACK 0x3cb9
+#define regRLC_SPM_RSPM_CMD_ACK_BASE_IDX 1
+#define regRLC_SPM_SPARE 0x3cbf
+#define regRLC_SPM_SPARE_BASE_IDX 1
+#define regRLC_PERFMON_CNTL 0x3cc0
+#define regRLC_PERFMON_CNTL_BASE_IDX 1
+#define regRLC_PERFCOUNTER0_SELECT 0x3cc1
+#define regRLC_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRLC_PERFCOUNTER1_SELECT 0x3cc2
+#define regRLC_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_CNTL 0x3cc3
+#define regRLC_GPU_IOV_PERF_CNT_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR 0x3cc4
+#define regRLC_GPU_IOV_PERF_CNT_WR_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA 0x3cc5
+#define regRLC_GPU_IOV_PERF_CNT_WR_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR 0x3cc6
+#define regRLC_GPU_IOV_PERF_CNT_RD_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA 0x3cc7
+#define regRLC_GPU_IOV_PERF_CNT_RD_DATA_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT 0x3d00
+#define regRMI_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER0_SELECT1 0x3d01
+#define regRMI_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER1_SELECT 0x3d02
+#define regRMI_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT 0x3d03
+#define regRMI_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regRMI_PERFCOUNTER2_SELECT1 0x3d04
+#define regRMI_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regRMI_PERFCOUNTER3_SELECT 0x3d05
+#define regRMI_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regRMI_PERF_COUNTER_CNTL 0x3d06
+#define regRMI_PERF_COUNTER_CNTL_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT 0x3d60
+#define regGCR_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGCR_PERFCOUNTER0_SELECT1 0x3d61
+#define regGCR_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGCR_PERFCOUNTER1_SELECT 0x3d62
+#define regGCR_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT 0x3d80
+#define regPA_PH_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER0_SELECT1 0x3d81
+#define regPA_PH_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT 0x3d82
+#define regPA_PH_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT 0x3d83
+#define regPA_PH_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT 0x3d84
+#define regPA_PH_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER4_SELECT 0x3d85
+#define regPA_PH_PERFCOUNTER4_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER5_SELECT 0x3d86
+#define regPA_PH_PERFCOUNTER5_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER6_SELECT 0x3d87
+#define regPA_PH_PERFCOUNTER6_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER7_SELECT 0x3d88
+#define regPA_PH_PERFCOUNTER7_SELECT_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER1_SELECT1 0x3d90
+#define regPA_PH_PERFCOUNTER1_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER2_SELECT1 0x3d91
+#define regPA_PH_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regPA_PH_PERFCOUNTER3_SELECT1 0x3d92
+#define regPA_PH_PERFCOUNTER3_SELECT1_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER0_SELECT 0x3da0
+#define regUTCL1_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER1_SELECT 0x3da1
+#define regUTCL1_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER2_SELECT 0x3da2
+#define regUTCL1_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regUTCL1_PERFCOUNTER3_SELECT 0x3da3
+#define regUTCL1_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT 0x3dc0
+#define regGL1A_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER0_SELECT1 0x3dc1
+#define regGL1A_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1A_PERFCOUNTER1_SELECT 0x3dc2
+#define regGL1A_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER2_SELECT 0x3dc3
+#define regGL1A_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1A_PERFCOUNTER3_SELECT 0x3dc4
+#define regGL1A_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT 0x3dd0
+#define regGL1H_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER0_SELECT1 0x3dd1
+#define regGL1H_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regGL1H_PERFCOUNTER1_SELECT 0x3dd2
+#define regGL1H_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER2_SELECT 0x3dd3
+#define regGL1H_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGL1H_PERFCOUNTER3_SELECT 0x3dd4
+#define regGL1H_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT 0x3de0
+#define regCHA_PERFCOUNTER0_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER0_SELECT1 0x3de1
+#define regCHA_PERFCOUNTER0_SELECT1_BASE_IDX 1
+#define regCHA_PERFCOUNTER1_SELECT 0x3de2
+#define regCHA_PERFCOUNTER1_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER2_SELECT 0x3de3
+#define regCHA_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regCHA_PERFCOUNTER3_SELECT 0x3de4
+#define regCHA_PERFCOUNTER3_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT 0x3e00
+#define regGUS_PERFCOUNTER2_SELECT_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_SELECT1 0x3e01
+#define regGUS_PERFCOUNTER2_SELECT1_BASE_IDX 1
+#define regGUS_PERFCOUNTER2_MODE 0x3e02
+#define regGUS_PERFCOUNTER2_MODE_BASE_IDX 1
+#define regGUS_PERFCOUNTER0_CFG 0x3e03
+#define regGUS_PERFCOUNTER0_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER1_CFG 0x3e04
+#define regGUS_PERFCOUNTER1_CFG_BASE_IDX 1
+#define regGUS_PERFCOUNTER_RSLT_CNTL 0x3e05
+#define regGUS_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+// base address: 0x3a000
+#define regGDFLL_EDC_HYSTERESIS_CNTL 0x4828
+#define regGDFLL_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_EDC_HYSTERESIS_STAT 0x4829
+#define regGDFLL_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+// base address: 0x3a300
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL 0x48e8
+#define regGDFLL_SE_EDC_HYSTERESIS_CNTL_BASE_IDX 1
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT 0x48e9
+#define regGDFLL_SE_EDC_HYSTERESIS_STAT_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+// base address: 0x3ac00
+#define regGRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regGRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regGRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_GENERAL_0 0x4b02
+#define regGRTAVFS_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_RD_DATA 0x4b03
+#define regGRTAVFS_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_CTRL 0x4b04
+#define regGRTAVFS_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_RTAVFS_REG_STATUS 0x4b05
+#define regGRTAVFS_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_TARG_FREQ 0x4b06
+#define regGRTAVFS_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_TARG_VOLT 0x4b07
+#define regGRTAVFS_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SOFT_RESET 0x4b0c
+#define regGRTAVFS_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_PSM_CNTL 0x4b0d
+#define regGRTAVFS_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_CLK_CNTL 0x4b0e
+#define regGRTAVFS_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+// base address: 0x3ad00
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR 0x4b40
+#define regGRTAVFS_SE_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_WR_DATA 0x4b41
+#define regGRTAVFS_SE_RTAVFS_WR_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_GENERAL_0 0x4b42
+#define regGRTAVFS_SE_GENERAL_0_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_RD_DATA 0x4b43
+#define regGRTAVFS_SE_RTAVFS_RD_DATA_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL 0x4b44
+#define regGRTAVFS_SE_RTAVFS_REG_CTRL_BASE_IDX 1
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS 0x4b45
+#define regGRTAVFS_SE_RTAVFS_REG_STATUS_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_FREQ 0x4b46
+#define regGRTAVFS_SE_TARG_FREQ_BASE_IDX 1
+#define regGRTAVFS_SE_TARG_VOLT 0x4b47
+#define regGRTAVFS_SE_TARG_VOLT_BASE_IDX 1
+#define regGRTAVFS_SE_SOFT_RESET 0x4b4c
+#define regGRTAVFS_SE_SOFT_RESET_BASE_IDX 1
+#define regGRTAVFS_SE_PSM_CNTL 0x4b4d
+#define regGRTAVFS_SE_PSM_CNTL_BASE_IDX 1
+#define regGRTAVFS_SE_CLK_CNTL 0x4b4e
+#define regGRTAVFS_SE_CLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_grtavfsdec
+// base address: 0x3ac00
+#define regRTAVFS_RTAVFS_REG_ADDR 0x4b00
+#define regRTAVFS_RTAVFS_REG_ADDR_BASE_IDX 1
+#define regRTAVFS_RTAVFS_WR_DATA 0x4b01
+#define regRTAVFS_RTAVFS_WR_DATA_BASE_IDX 1
+
+
+// addressBlock: gc_hypdec
+// base address: 0x3e000
+#define regGFX_PIPE_PRIORITY 0x587f
+#define regGFX_PIPE_PRIORITY_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_ENABLE 0x5b00
+#define regRLC_GPU_IOV_VF_ENABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG6 0x5b06
+#define regRLC_GPU_IOV_CFG_REG6_BASE_IDX 1
+#define regRLC_SDMA0_STATUS 0x5b18
+#define regRLC_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_STATUS 0x5b19
+#define regRLC_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_STATUS 0x5b1a
+#define regRLC_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_STATUS 0x5b1b
+#define regRLC_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_SDMA0_BUSY_STATUS 0x5b1c
+#define regRLC_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA1_BUSY_STATUS 0x5b1d
+#define regRLC_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA2_BUSY_STATUS 0x5b1e
+#define regRLC_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_SDMA3_BUSY_STATUS 0x5b1f
+#define regRLC_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG8 0x5b20
+#define regRLC_GPU_IOV_CFG_REG8_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_0 0x5b25
+#define regRLC_RLCV_TIMER_INT_0_BASE_IDX 1
+#define regRLC_RLCV_TIMER_INT_1 0x5b26
+#define regRLC_RLCV_TIMER_INT_1_BASE_IDX 1
+#define regRLC_RLCV_TIMER_CTRL 0x5b27
+#define regRLC_RLCV_TIMER_CTRL_BASE_IDX 1
+#define regRLC_RLCV_TIMER_STAT 0x5b28
+#define regRLC_RLCV_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS 0x5b2a
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET 0x5b2b
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_SET_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR 0x5b2c
+#define regRLC_GPU_IOV_VF_DOORBELL_STATUS_CLR_BASE_IDX 1
+#define regRLC_GPU_IOV_VF_MASK 0x5b2d
+#define regRLC_GPU_IOV_VF_MASK_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_0 0x5b2e
+#define regRLC_HYP_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_1 0x5b2f
+#define regRLC_HYP_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_BUSY_CLK_CNTL 0x5b30
+#define regRLC_BUSY_CLK_CNTL_BASE_IDX 1
+#define regRLC_CLK_CNTL 0x5b31
+#define regRLC_CLK_CNTL_BASE_IDX 1
+#define regRLC_PACE_TIMER_STAT 0x5b33
+#define regRLC_PACE_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_BLOCK 0x5b34
+#define regRLC_GPU_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG1 0x5b35
+#define regRLC_GPU_IOV_CFG_REG1_BASE_IDX 1
+#define regRLC_GPU_IOV_CFG_REG2 0x5b36
+#define regRLC_GPU_IOV_CFG_REG2_BASE_IDX 1
+#define regRLC_GPU_IOV_VM_BUSY_STATUS 0x5b37
+#define regRLC_GPU_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_0 0x5b38
+#define regRLC_GPU_IOV_SCH_0_BASE_IDX 1
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID 0x5b39
+#define regRLC_GPU_IOV_ACTIVE_FCN_ID_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_3 0x5b3a
+#define regRLC_GPU_IOV_SCH_3_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_1 0x5b3b
+#define regRLC_GPU_IOV_SCH_1_BASE_IDX 1
+#define regRLC_GPU_IOV_SCH_2 0x5b3c
+#define regRLC_GPU_IOV_SCH_2_BASE_IDX 1
+#define regRLC_PACE_INT_FORCE 0x5b3d
+#define regRLC_PACE_INT_FORCE_BASE_IDX 1
+#define regRLC_PACE_INT_CLEAR 0x5b3e
+#define regRLC_PACE_INT_CLEAR_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_STAT 0x5b3f
+#define regRLC_GPU_IOV_INT_STAT_BASE_IDX 1
+#define regRLC_IH_COOKIE 0x5b41
+#define regRLC_IH_COOKIE_BASE_IDX 1
+#define regRLC_IH_COOKIE_CNTL 0x5b42
+#define regRLC_IH_COOKIE_CNTL_BASE_IDX 1
+#define regRLC_HYP_RLCG_UCODE_CHKSUM 0x5b43
+#define regRLC_HYP_RLCG_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCP_UCODE_CHKSUM 0x5b44
+#define regRLC_HYP_RLCP_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_HYP_RLCV_UCODE_CHKSUM 0x5b45
+#define regRLC_HYP_RLCV_UCODE_CHKSUM_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_CNTL 0x5b46
+#define regRLC_GPU_IOV_F32_CNTL_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_RESET 0x5b47
+#define regRLC_GPU_IOV_F32_RESET_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_ADDR 0x5b48
+#define regRLC_GPU_IOV_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_UCODE_DATA 0x5b49
+#define regRLC_GPU_IOV_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPU_IOV_SMU_RESPONSE 0x5b4a
+#define regRLC_GPU_IOV_SMU_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE 0x5b4b
+#define regRLC_GPU_IOV_F32_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_GPU_IOV_VIRT_RESET_REQ 0x5b4c
+#define regRLC_GPU_IOV_VIRT_RESET_REQ_BASE_IDX 1
+#define regRLC_GPU_IOV_RLC_RESPONSE 0x5b4d
+#define regRLC_GPU_IOV_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_DISABLE 0x5b4e
+#define regRLC_GPU_IOV_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPU_IOV_INT_FORCE 0x5b4f
+#define regRLC_GPU_IOV_INT_FORCE_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_ADDR 0x5b50
+#define regRLC_GPU_IOV_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPU_IOV_SCRATCH_DATA 0x5b51
+#define regRLC_GPU_IOV_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_2 0x5b52
+#define regRLC_HYP_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_HYP_SEMAPHORE_3 0x5b53
+#define regRLC_HYP_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_LX6_SCRATCH_ADDR 0x5b59
+#define regRLC_LX6_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_LX6_CORE1_SCRATCH_ADDR 0x5b5b
+#define regRLC_LX6_CORE1_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_ADDR 0x5b60
+#define regRLC_GPM_UCODE_ADDR_BASE_IDX 1
+#define regRLC_GPM_UCODE_DATA 0x5b61
+#define regRLC_GPM_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_IRAM_ADDR 0x5b62
+#define regRLC_GPM_IRAM_ADDR_BASE_IDX 1
+#define regRLC_GPM_IRAM_DATA 0x5b63
+#define regRLC_GPM_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCP_IRAM_ADDR 0x5b64
+#define regRLC_RLCP_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCP_IRAM_DATA 0x5b65
+#define regRLC_RLCP_IRAM_DATA_BASE_IDX 1
+#define regRLC_RLCV_IRAM_ADDR 0x5b66
+#define regRLC_RLCV_IRAM_ADDR_BASE_IDX 1
+#define regRLC_RLCV_IRAM_DATA 0x5b67
+#define regRLC_RLCV_IRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_DRAM_ADDR 0x5b68
+#define regRLC_LX6_DRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_DRAM_DATA 0x5b69
+#define regRLC_LX6_DRAM_DATA_BASE_IDX 1
+#define regRLC_LX6_IRAM_ADDR 0x5b6a
+#define regRLC_LX6_IRAM_ADDR_BASE_IDX 1
+#define regRLC_LX6_IRAM_DATA 0x5b6b
+#define regRLC_LX6_IRAM_DATA_BASE_IDX 1
+#define regRLC_PACE_UCODE_ADDR 0x5b6c
+#define regRLC_PACE_UCODE_ADDR_BASE_IDX 1
+#define regRLC_PACE_UCODE_DATA 0x5b6d
+#define regRLC_PACE_UCODE_DATA_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_ADDR 0x5b6e
+#define regRLC_GPM_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_GPM_SCRATCH_DATA 0x5b6f
+#define regRLC_GPM_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_SRM_DRAM_ADDR 0x5b71
+#define regRLC_SRM_DRAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_DRAM_DATA 0x5b72
+#define regRLC_SRM_DRAM_DATA_BASE_IDX 1
+#define regRLC_SRM_ARAM_ADDR 0x5b73
+#define regRLC_SRM_ARAM_ADDR_BASE_IDX 1
+#define regRLC_SRM_ARAM_DATA 0x5b74
+#define regRLC_SRM_ARAM_DATA_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_ADDR 0x5b77
+#define regRLC_PACE_SCRATCH_ADDR_BASE_IDX 1
+#define regRLC_PACE_SCRATCH_DATA 0x5b78
+#define regRLC_PACE_SCRATCH_DATA_BASE_IDX 1
+#define regRLC_GTS_OFFSET_LSB 0x5b79
+#define regRLC_GTS_OFFSET_LSB_BASE_IDX 1
+#define regRLC_GTS_OFFSET_MSB 0x5b7a
+#define regRLC_GTS_OFFSET_MSB_BASE_IDX 1
+#define regGL2_PIPE_STEER_0 0x5b80
+#define regGL2_PIPE_STEER_0_BASE_IDX 1
+#define regGL2_PIPE_STEER_1 0x5b81
+#define regGL2_PIPE_STEER_1_BASE_IDX 1
+#define regGL2_PIPE_STEER_2 0x5b82
+#define regGL2_PIPE_STEER_2_BASE_IDX 1
+#define regGL2_PIPE_STEER_3 0x5b83
+#define regGL2_PIPE_STEER_3_BASE_IDX 1
+#define regGL1_PIPE_STEER 0x5b84
+#define regGL1_PIPE_STEER_BASE_IDX 1
+#define regCH_PIPE_STEER 0x5b88
+#define regCH_PIPE_STEER_BASE_IDX 1
+#define regGC_USER_SHADER_ARRAY_CONFIG 0x5b90
+#define regGC_USER_SHADER_ARRAY_CONFIG_BASE_IDX 1
+#define regGC_USER_PRIM_CONFIG 0x5b91
+#define regGC_USER_PRIM_CONFIG_BASE_IDX 1
+#define regGC_USER_SA_UNIT_DISABLE 0x5b92
+#define regGC_USER_SA_UNIT_DISABLE_BASE_IDX 1
+#define regGC_USER_RB_REDUNDANCY 0x5b93
+#define regGC_USER_RB_REDUNDANCY_BASE_IDX 1
+#define regGC_USER_RB_BACKEND_DISABLE 0x5b94
+#define regGC_USER_RB_BACKEND_DISABLE_BASE_IDX 1
+#define regGC_USER_RMI_REDUNDANCY 0x5b95
+#define regGC_USER_RMI_REDUNDANCY_BASE_IDX 1
+#define regCGTS_USER_TCC_DISABLE 0x5b96
+#define regCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define regGC_USER_SHADER_RATE_CONFIG 0x5b97
+#define regGC_USER_SHADER_RATE_CONFIG_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_STATUS 0x5bc0
+#define regRLC_GPU_IOV_SDMA0_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_STATUS 0x5bc1
+#define regRLC_GPU_IOV_SDMA1_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_STATUS 0x5bc2
+#define regRLC_GPU_IOV_SDMA2_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_STATUS 0x5bc3
+#define regRLC_GPU_IOV_SDMA3_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_STATUS 0x5bc4
+#define regRLC_GPU_IOV_SDMA4_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_STATUS 0x5bc5
+#define regRLC_GPU_IOV_SDMA5_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_STATUS 0x5bc6
+#define regRLC_GPU_IOV_SDMA6_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_STATUS 0x5bc7
+#define regRLC_GPU_IOV_SDMA7_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS 0x5bc8
+#define regRLC_GPU_IOV_SDMA0_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS 0x5bc9
+#define regRLC_GPU_IOV_SDMA1_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS 0x5bca
+#define regRLC_GPU_IOV_SDMA2_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS 0x5bcb
+#define regRLC_GPU_IOV_SDMA3_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS 0x5bcc
+#define regRLC_GPU_IOV_SDMA4_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS 0x5bcd
+#define regRLC_GPU_IOV_SDMA5_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS 0x5bce
+#define regRLC_GPU_IOV_SDMA6_BUSY_STATUS_BASE_IDX 1
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS 0x5bcf
+#define regRLC_GPU_IOV_SDMA7_BUSY_STATUS_BASE_IDX 1
+
+
+// addressBlock: gc_cphypdec
+// base address: 0x3e000
+#define regCP_HYP_PFP_UCODE_ADDR 0x5814
+#define regCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_PFP_UCODE_ADDR 0x5814
+#define regCP_PFP_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_DATA 0x5815
+#define regCP_HYP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_PFP_UCODE_DATA 0x5815
+#define regCP_PFP_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_ADDR 0x5816
+#define regCP_HYP_ME_UCODE_ADDR_BASE_IDX 1
+#define regCP_ME_RAM_RADDR 0x5816
+#define regCP_ME_RAM_RADDR_BASE_IDX 1
+#define regCP_ME_RAM_WADDR 0x5816
+#define regCP_ME_RAM_WADDR_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_DATA 0x5817
+#define regCP_HYP_ME_UCODE_DATA_BASE_IDX 1
+#define regCP_ME_RAM_DATA 0x5817
+#define regCP_ME_RAM_DATA_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_ADDR 0x581a
+#define regCP_HYP_MEC1_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_ADDR 0x581a
+#define regCP_MEC_ME1_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC1_UCODE_DATA 0x581b
+#define regCP_HYP_MEC1_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME1_UCODE_DATA 0x581b
+#define regCP_MEC_ME1_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_ADDR 0x581c
+#define regCP_HYP_MEC2_UCODE_ADDR_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_ADDR 0x581c
+#define regCP_MEC_ME2_UCODE_ADDR_BASE_IDX 1
+#define regCP_HYP_MEC2_UCODE_DATA 0x581d
+#define regCP_HYP_MEC2_UCODE_DATA_BASE_IDX 1
+#define regCP_MEC_ME2_UCODE_DATA 0x581d
+#define regCP_MEC_ME2_UCODE_DATA_BASE_IDX 1
+#define regCP_HYP_PFP_UCODE_CHKSUM 0x581e
+#define regCP_HYP_PFP_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_ME_UCODE_CHKSUM 0x5820
+#define regCP_HYP_ME_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM 0x5821
+#define regCP_HYP_MEC_ME1_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM 0x5822
+#define regCP_HYP_MEC_ME2_UCODE_CHKSUM_BASE_IDX 1
+#define regCP_PFP_IC_BASE_LO 0x5840
+#define regCP_PFP_IC_BASE_LO_BASE_IDX 1
+#define regCP_PFP_IC_BASE_HI 0x5841
+#define regCP_PFP_IC_BASE_HI_BASE_IDX 1
+#define regCP_PFP_IC_BASE_CNTL 0x5842
+#define regCP_PFP_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_PFP_IC_OP_CNTL 0x5843
+#define regCP_PFP_IC_OP_CNTL_BASE_IDX 1
+#define regCP_ME_IC_BASE_LO 0x5844
+#define regCP_ME_IC_BASE_LO_BASE_IDX 1
+#define regCP_ME_IC_BASE_HI 0x5845
+#define regCP_ME_IC_BASE_HI_BASE_IDX 1
+#define regCP_ME_IC_BASE_CNTL 0x5846
+#define regCP_ME_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_ME_IC_OP_CNTL 0x5847
+#define regCP_ME_IC_OP_CNTL_BASE_IDX 1
+#define regCP_CPC_IC_BASE_LO 0x584c
+#define regCP_CPC_IC_BASE_LO_BASE_IDX 1
+#define regCP_CPC_IC_BASE_HI 0x584d
+#define regCP_CPC_IC_BASE_HI_BASE_IDX 1
+#define regCP_CPC_IC_BASE_CNTL 0x584e
+#define regCP_CPC_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_IC_BASE_LO 0x5850
+#define regCP_MES_IC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MIBASE_LO 0x5850
+#define regCP_MES_MIBASE_LO_BASE_IDX 1
+#define regCP_MES_IC_BASE_HI 0x5851
+#define regCP_MES_IC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MIBASE_HI 0x5851
+#define regCP_MES_MIBASE_HI_BASE_IDX 1
+#define regCP_MES_IC_BASE_CNTL 0x5852
+#define regCP_MES_IC_BASE_CNTL_BASE_IDX 1
+#define regCP_MES_DC_BASE_LO 0x5854
+#define regCP_MES_DC_BASE_LO_BASE_IDX 1
+#define regCP_MES_MDBASE_LO 0x5854
+#define regCP_MES_MDBASE_LO_BASE_IDX 1
+#define regCP_MES_DC_BASE_HI 0x5855
+#define regCP_MES_DC_BASE_HI_BASE_IDX 1
+#define regCP_MES_MDBASE_HI 0x5855
+#define regCP_MES_MDBASE_HI_BASE_IDX 1
+#define regCP_MES_MIBOUND_LO 0x585b
+#define regCP_MES_MIBOUND_LO_BASE_IDX 1
+#define regCP_MES_MIBOUND_HI 0x585c
+#define regCP_MES_MIBOUND_HI_BASE_IDX 1
+#define regCP_MES_MDBOUND_LO 0x585d
+#define regCP_MES_MDBOUND_LO_BASE_IDX 1
+#define regCP_MES_MDBOUND_HI 0x585e
+#define regCP_MES_MDBOUND_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_LO 0x5863
+#define regCP_GFX_RS64_DC_BASE0_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_LO 0x5864
+#define regCP_GFX_RS64_DC_BASE1_LO_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE0_HI 0x5865
+#define regCP_GFX_RS64_DC_BASE0_HI_BASE_IDX 1
+#define regCP_GFX_RS64_DC_BASE1_HI 0x5866
+#define regCP_GFX_RS64_DC_BASE1_HI_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_LO 0x586c
+#define regCP_GFX_RS64_MIBOUND_LO_BASE_IDX 1
+#define regCP_GFX_RS64_MIBOUND_HI 0x586d
+#define regCP_GFX_RS64_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_DC_BASE_LO 0x5870
+#define regCP_MEC_DC_BASE_LO_BASE_IDX 1
+#define regCP_MEC_MDBASE_LO 0x5870
+#define regCP_MEC_MDBASE_LO_BASE_IDX 1
+#define regCP_MEC_DC_BASE_HI 0x5871
+#define regCP_MEC_DC_BASE_HI_BASE_IDX 1
+#define regCP_MEC_MDBASE_HI 0x5871
+#define regCP_MEC_MDBASE_HI_BASE_IDX 1
+#define regCP_MEC_MIBOUND_LO 0x5872
+#define regCP_MEC_MIBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MIBOUND_HI 0x5873
+#define regCP_MEC_MIBOUND_HI_BASE_IDX 1
+#define regCP_MEC_MDBOUND_LO 0x5874
+#define regCP_MEC_MDBOUND_LO_BASE_IDX 1
+#define regCP_MEC_MDBOUND_HI 0x5875
+#define regCP_MEC_MDBOUND_HI_BASE_IDX 1
+
+
+// addressBlock: gc_grbm_hypdec
+// base address: 0x3e800
+#define regGRBM_GFX_INDEX_SR_SELECT 0x5a00
+#define regGRBM_GFX_INDEX_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_INDEX_SR_DATA 0x5a01
+#define regGRBM_GFX_INDEX_SR_DATA_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_SELECT 0x5a02
+#define regGRBM_GFX_CNTL_SR_SELECT_BASE_IDX 1
+#define regGRBM_GFX_CNTL_SR_DATA 0x5a03
+#define regGRBM_GFX_CNTL_SR_DATA_BASE_IDX 1
+#define regGC_IH_COOKIE_0_PTR 0x5a07
+#define regGC_IH_COOKIE_0_PTR_BASE_IDX 1
+#define regGRBM_SE_REMAP_CNTL 0x5a08
+#define regGRBM_SE_REMAP_CNTL_BASE_IDX 1
+
+
+// addressBlock: gc_gcvmsharedhvdec
+// base address: 0x3ea00
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0 0x5a80
+#define regGCMC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1 0x5a81
+#define regGCMC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2 0x5a82
+#define regGCMC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3 0x5a83
+#define regGCMC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4 0x5a84
+#define regGCMC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5 0x5a85
+#define regGCMC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6 0x5a86
+#define regGCMC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7 0x5a87
+#define regGCMC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8 0x5a88
+#define regGCMC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9 0x5a89
+#define regGCMC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10 0x5a8a
+#define regGCMC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11 0x5a8b
+#define regGCMC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12 0x5a8c
+#define regGCMC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13 0x5a8d
+#define regGCMC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14 0x5a8e
+#define regGCMC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15 0x5a8f
+#define regGCMC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1
+
+
+// addressBlock: gc_rlcdec
+// base address: 0x3b000
+#define regRLC_CNTL 0x4c00
+#define regRLC_CNTL_BASE_IDX 1
+#define regRLC_F32_UCODE_VERSION 0x4c03
+#define regRLC_F32_UCODE_VERSION_BASE_IDX 1
+#define regRLC_STAT 0x4c04
+#define regRLC_STAT_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_LSB 0x4c0c
+#define regRLC_REFCLOCK_TIMESTAMP_LSB_BASE_IDX 1
+#define regRLC_REFCLOCK_TIMESTAMP_MSB 0x4c0d
+#define regRLC_REFCLOCK_TIMESTAMP_MSB_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_0 0x4c0e
+#define regRLC_GPM_TIMER_INT_0_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_1 0x4c0f
+#define regRLC_GPM_TIMER_INT_1_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_2 0x4c10
+#define regRLC_GPM_TIMER_INT_2_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_3 0x4c11
+#define regRLC_GPM_TIMER_INT_3_BASE_IDX 1
+#define regRLC_GPM_TIMER_INT_4 0x4c12
+#define regRLC_GPM_TIMER_INT_4_BASE_IDX 1
+#define regRLC_GPM_TIMER_CTRL 0x4c13
+#define regRLC_GPM_TIMER_CTRL_BASE_IDX 1
+#define regRLC_GPM_TIMER_STAT 0x4c14
+#define regRLC_GPM_TIMER_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_STAT 0x4c16
+#define regRLC_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_CLEAR 0x4c17
+#define regRLC_GPM_LEGACY_INT_CLEAR_BASE_IDX 1
+#define regRLC_INT_STAT 0x4c18
+#define regRLC_INT_STAT_BASE_IDX 1
+#define regRLC_MGCG_CTRL 0x4c1a
+#define regRLC_MGCG_CTRL_BASE_IDX 1
+#define regRLC_JUMP_TABLE_RESTORE 0x4c1e
+#define regRLC_JUMP_TABLE_RESTORE_BASE_IDX 1
+#define regRLC_PG_DELAY_2 0x4c1f
+#define regRLC_PG_DELAY_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB 0x4c24
+#define regRLC_GPU_CLOCK_COUNT_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB 0x4c25
+#define regRLC_GPU_CLOCK_COUNT_MSB_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT 0x4c26
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_BASE_IDX 1
+#define regRLC_UCODE_CNTL 0x4c27
+#define regRLC_UCODE_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_RESET 0x4c28
+#define regRLC_GPM_THREAD_RESET_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T0 0x4c29
+#define regRLC_GPM_CP_DMA_COMPLETE_T0_BASE_IDX 1
+#define regRLC_GPM_CP_DMA_COMPLETE_T1 0x4c2a
+#define regRLC_GPM_CP_DMA_COMPLETE_T1_BASE_IDX 1
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE 0x4c2b
+#define regRLC_GPM_THREAD_INVALIDATE_CACHE_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_LSB 0x4c30
+#define regRLC_CLK_COUNT_GFXCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_GFXCLK_MSB 0x4c31
+#define regRLC_CLK_COUNT_GFXCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_LSB 0x4c32
+#define regRLC_CLK_COUNT_REFCLK_LSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_REFCLK_MSB 0x4c33
+#define regRLC_CLK_COUNT_REFCLK_MSB_BASE_IDX 1
+#define regRLC_CLK_COUNT_CTRL 0x4c34
+#define regRLC_CLK_COUNT_CTRL_BASE_IDX 1
+#define regRLC_CLK_COUNT_STAT 0x4c35
+#define regRLC_CLK_COUNT_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_CNTL 0x4c36
+#define regRLC_RLCG_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_STAT 0x4c37
+#define regRLC_RLCG_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_LO 0x4c38
+#define regRLC_RLCG_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_0_DATA_HI 0x4c39
+#define regRLC_RLCG_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_LO 0x4c3a
+#define regRLC_RLCG_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_1_DATA_HI 0x4c3b
+#define regRLC_RLCG_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_LO 0x4c3c
+#define regRLC_RLCG_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_2_DATA_HI 0x4c3d
+#define regRLC_RLCG_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_LO 0x4c3e
+#define regRLC_RLCG_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_3_DATA_HI 0x4c3f
+#define regRLC_RLCG_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32_RES_SEL 0x4c41
+#define regRLC_GPU_CLOCK_32_RES_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_32 0x4c42
+#define regRLC_GPU_CLOCK_32_BASE_IDX 1
+#define regRLC_PG_CNTL 0x4c43
+#define regRLC_PG_CNTL_BASE_IDX 1
+#define regRLC_GPM_THREAD_PRIORITY 0x4c44
+#define regRLC_GPM_THREAD_PRIORITY_BASE_IDX 1
+#define regRLC_GPM_THREAD_ENABLE 0x4c45
+#define regRLC_GPM_THREAD_ENABLE_BASE_IDX 1
+#define regRLC_RLCG_DOORBELL_RANGE 0x4c47
+#define regRLC_RLCG_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_CGTT_MGCG_OVERRIDE 0x4c48
+#define regRLC_CGTT_MGCG_OVERRIDE_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL 0x4c49
+#define regRLC_CGCG_CGLS_CTRL_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL 0x4c4a
+#define regRLC_CGCG_RAMP_CTRL_BASE_IDX 1
+#define regRLC_DYN_PG_STATUS 0x4c4b
+#define regRLC_DYN_PG_STATUS_BASE_IDX 1
+#define regRLC_DYN_PG_REQUEST 0x4c4c
+#define regRLC_DYN_PG_REQUEST_BASE_IDX 1
+#define regRLC_PG_DELAY 0x4c4d
+#define regRLC_PG_DELAY_BASE_IDX 1
+#define regRLC_WGP_STATUS 0x4c4e
+#define regRLC_WGP_STATUS_BASE_IDX 1
+#define regRLC_PG_ALWAYS_ON_WGP_MASK 0x4c53
+#define regRLC_PG_ALWAYS_ON_WGP_MASK_BASE_IDX 1
+#define regRLC_MAX_PG_WGP 0x4c54
+#define regRLC_MAX_PG_WGP_BASE_IDX 1
+#define regRLC_AUTO_PG_CTRL 0x4c55
+#define regRLC_AUTO_PG_CTRL_BASE_IDX 1
+#define regRLC_SERDES_RD_INDEX 0x4c59
+#define regRLC_SERDES_RD_INDEX_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_0 0x4c5a
+#define regRLC_SERDES_RD_DATA_0_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_1 0x4c5b
+#define regRLC_SERDES_RD_DATA_1_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_2 0x4c5c
+#define regRLC_SERDES_RD_DATA_2_BASE_IDX 1
+#define regRLC_SERDES_RD_DATA_3 0x4c5d
+#define regRLC_SERDES_RD_DATA_3_BASE_IDX 1
+#define regRLC_SERDES_MASK 0x4c5e
+#define regRLC_SERDES_MASK_BASE_IDX 1
+#define regRLC_SERDES_CTRL 0x4c5f
+#define regRLC_SERDES_CTRL_BASE_IDX 1
+#define regRLC_SERDES_DATA 0x4c60
+#define regRLC_SERDES_DATA_BASE_IDX 1
+#define regRLC_SERDES_BUSY 0x4c61
+#define regRLC_SERDES_BUSY_BASE_IDX 1
+#define regRLC_GPM_GENERAL_0 0x4c63
+#define regRLC_GPM_GENERAL_0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_1 0x4c64
+#define regRLC_GPM_GENERAL_1_BASE_IDX 1
+#define regRLC_GPM_GENERAL_2 0x4c65
+#define regRLC_GPM_GENERAL_2_BASE_IDX 1
+#define regRLC_GPM_GENERAL_3 0x4c66
+#define regRLC_GPM_GENERAL_3_BASE_IDX 1
+#define regRLC_GPM_GENERAL_4 0x4c67
+#define regRLC_GPM_GENERAL_4_BASE_IDX 1
+#define regRLC_GPM_GENERAL_5 0x4c68
+#define regRLC_GPM_GENERAL_5_BASE_IDX 1
+#define regRLC_GPM_GENERAL_6 0x4c69
+#define regRLC_GPM_GENERAL_6_BASE_IDX 1
+#define regRLC_GPM_GENERAL_7 0x4c6a
+#define regRLC_GPM_GENERAL_7_BASE_IDX 1
+#define regRLC_STATIC_PG_STATUS 0x4c6e
+#define regRLC_STATIC_PG_STATUS_BASE_IDX 1
+#define regRLC_GPM_GENERAL_16 0x4c76
+#define regRLC_GPM_GENERAL_16_BASE_IDX 1
+#define regRLC_PG_DELAY_3 0x4c78
+#define regRLC_PG_DELAY_3_BASE_IDX 1
+#define regRLC_GPR_REG1 0x4c79
+#define regRLC_GPR_REG1_BASE_IDX 1
+#define regRLC_GPR_REG2 0x4c7a
+#define regRLC_GPR_REG2_BASE_IDX 1
+#define regRLC_GPM_INT_DISABLE_TH0 0x4c7c
+#define regRLC_GPM_INT_DISABLE_TH0_BASE_IDX 1
+#define regRLC_GPM_LEGACY_INT_DISABLE 0x4c7d
+#define regRLC_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_GPM_INT_FORCE_TH0 0x4c7e
+#define regRLC_GPM_INT_FORCE_TH0_BASE_IDX 1
+#define regRLC_SRM_CNTL 0x4c80
+#define regRLC_SRM_CNTL_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND_STATUS 0x4c88
+#define regRLC_SRM_GPM_COMMAND_STATUS_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_0 0x4c8b
+#define regRLC_SRM_INDEX_CNTL_ADDR_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_1 0x4c8c
+#define regRLC_SRM_INDEX_CNTL_ADDR_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_2 0x4c8d
+#define regRLC_SRM_INDEX_CNTL_ADDR_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_3 0x4c8e
+#define regRLC_SRM_INDEX_CNTL_ADDR_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_4 0x4c8f
+#define regRLC_SRM_INDEX_CNTL_ADDR_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_5 0x4c90
+#define regRLC_SRM_INDEX_CNTL_ADDR_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_6 0x4c91
+#define regRLC_SRM_INDEX_CNTL_ADDR_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_ADDR_7 0x4c92
+#define regRLC_SRM_INDEX_CNTL_ADDR_7_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_0 0x4c93
+#define regRLC_SRM_INDEX_CNTL_DATA_0_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_1 0x4c94
+#define regRLC_SRM_INDEX_CNTL_DATA_1_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_2 0x4c95
+#define regRLC_SRM_INDEX_CNTL_DATA_2_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_3 0x4c96
+#define regRLC_SRM_INDEX_CNTL_DATA_3_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_4 0x4c97
+#define regRLC_SRM_INDEX_CNTL_DATA_4_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_5 0x4c98
+#define regRLC_SRM_INDEX_CNTL_DATA_5_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_6 0x4c99
+#define regRLC_SRM_INDEX_CNTL_DATA_6_BASE_IDX 1
+#define regRLC_SRM_INDEX_CNTL_DATA_7 0x4c9a
+#define regRLC_SRM_INDEX_CNTL_DATA_7_BASE_IDX 1
+#define regRLC_SRM_STAT 0x4c9b
+#define regRLC_SRM_STAT_BASE_IDX 1
+#define regRLC_GPM_GENERAL_8 0x4cad
+#define regRLC_GPM_GENERAL_8_BASE_IDX 1
+#define regRLC_GPM_GENERAL_9 0x4cae
+#define regRLC_GPM_GENERAL_9_BASE_IDX 1
+#define regRLC_GPM_GENERAL_10 0x4caf
+#define regRLC_GPM_GENERAL_10_BASE_IDX 1
+#define regRLC_GPM_GENERAL_11 0x4cb0
+#define regRLC_GPM_GENERAL_11_BASE_IDX 1
+#define regRLC_GPM_GENERAL_12 0x4cb1
+#define regRLC_GPM_GENERAL_12_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_0 0x4cb2
+#define regRLC_GPM_UTCL1_CNTL_0_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_1 0x4cb3
+#define regRLC_GPM_UTCL1_CNTL_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_CNTL_2 0x4cb4
+#define regRLC_GPM_UTCL1_CNTL_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_CNTL 0x4cb5
+#define regRLC_SPM_UTCL1_CNTL_BASE_IDX 1
+#define regRLC_UTCL1_STATUS_2 0x4cb6
+#define regRLC_UTCL1_STATUS_2_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_1 0x4cbc
+#define regRLC_SPM_UTCL1_ERROR_1_BASE_IDX 1
+#define regRLC_SPM_UTCL1_ERROR_2 0x4cbd
+#define regRLC_SPM_UTCL1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_1 0x4cbe
+#define regRLC_GPM_UTCL1_TH0_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH0_ERROR_2 0x4cc0
+#define regRLC_GPM_UTCL1_TH0_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1 0x4cc1
+#define regRLC_GPM_UTCL1_TH1_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH1_ERROR_2 0x4cc2
+#define regRLC_GPM_UTCL1_TH1_ERROR_2_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_1 0x4cc3
+#define regRLC_GPM_UTCL1_TH2_ERROR_1_BASE_IDX 1
+#define regRLC_GPM_UTCL1_TH2_ERROR_2 0x4cc4
+#define regRLC_GPM_UTCL1_TH2_ERROR_2_BASE_IDX 1
+#define regRLC_CGCG_CGLS_CTRL_3D 0x4cc5
+#define regRLC_CGCG_CGLS_CTRL_3D_BASE_IDX 1
+#define regRLC_CGCG_RAMP_CTRL_3D 0x4cc6
+#define regRLC_CGCG_RAMP_CTRL_3D_BASE_IDX 1
+#define regRLC_SEMAPHORE_0 0x4cc7
+#define regRLC_SEMAPHORE_0_BASE_IDX 1
+#define regRLC_SEMAPHORE_1 0x4cc8
+#define regRLC_SEMAPHORE_1_BASE_IDX 1
+#define regRLC_SEMAPHORE_2 0x4cc9
+#define regRLC_SEMAPHORE_2_BASE_IDX 1
+#define regRLC_SEMAPHORE_3 0x4cca
+#define regRLC_SEMAPHORE_3_BASE_IDX 1
+#define regRLC_PACE_INT_STAT 0x4ccc
+#define regRLC_PACE_INT_STAT_BASE_IDX 1
+#define regRLC_UTCL1_STATUS 0x4cd4
+#define regRLC_UTCL1_STATUS_BASE_IDX 1
+#define regRLC_R2I_CNTL_0 0x4cd5
+#define regRLC_R2I_CNTL_0_BASE_IDX 1
+#define regRLC_R2I_CNTL_1 0x4cd6
+#define regRLC_R2I_CNTL_1_BASE_IDX 1
+#define regRLC_R2I_CNTL_2 0x4cd7
+#define regRLC_R2I_CNTL_2_BASE_IDX 1
+#define regRLC_R2I_CNTL_3 0x4cd8
+#define regRLC_R2I_CNTL_3_BASE_IDX 1
+#define regRLC_GPM_INT_STAT_TH0 0x4cdc
+#define regRLC_GPM_INT_STAT_TH0_BASE_IDX 1
+#define regRLC_GPM_GENERAL_13 0x4cdd
+#define regRLC_GPM_GENERAL_13_BASE_IDX 1
+#define regRLC_GPM_GENERAL_14 0x4cde
+#define regRLC_GPM_GENERAL_14_BASE_IDX 1
+#define regRLC_GPM_GENERAL_15 0x4cdf
+#define regRLC_GPM_GENERAL_15_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1 0x4cea
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_2 0x4ceb
+#define regRLC_GPU_CLOCK_COUNT_LSB_2_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_2 0x4cec
+#define regRLC_GPU_CLOCK_COUNT_MSB_2_BASE_IDX 1
+#define regRLC_PACE_INT_DISABLE 0x4ced
+#define regRLC_PACE_INT_DISABLE_BASE_IDX 1
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2 0x4cef
+#define regRLC_CAPTURE_GPU_CLOCK_COUNT_2_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_RANGE 0x4cf0
+#define regRLC_RLCV_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_CNTL 0x4cf1
+#define regRLC_RLCV_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_STAT 0x4cf2
+#define regRLC_RLCV_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_LO 0x4cf3
+#define regRLC_RLCV_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_0_DATA_HI 0x4cf4
+#define regRLC_RLCV_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_LO 0x4cf5
+#define regRLC_RLCV_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_1_DATA_HI 0x4cf6
+#define regRLC_RLCV_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_LO 0x4cf7
+#define regRLC_RLCV_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_2_DATA_HI 0x4cf8
+#define regRLC_RLCV_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_LO 0x4cf9
+#define regRLC_RLCV_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCV_DOORBELL_3_DATA_HI 0x4cfa
+#define regRLC_RLCV_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_LSB_1 0x4cfb
+#define regRLC_GPU_CLOCK_COUNT_LSB_1_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_MSB_1 0x4cfc
+#define regRLC_GPU_CLOCK_COUNT_MSB_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT 0x4d00
+#define regRLC_RLCV_SPARE_INT_BASE_IDX 1
+#define regRLC_FIREWALL_VIOLATION 0x4d02
+#define regRLC_FIREWALL_VIOLATION_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_0 0x4d04
+#define regRLC_PACE_TIMER_INT_0_BASE_IDX 1
+#define regRLC_PACE_TIMER_INT_1 0x4d05
+#define regRLC_PACE_TIMER_INT_1_BASE_IDX 1
+#define regRLC_PACE_TIMER_CTRL 0x4d06
+#define regRLC_PACE_TIMER_CTRL_BASE_IDX 1
+#define regRLC_SMU_CLK_REQ 0x4d08
+#define regRLC_SMU_CLK_REQ_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_STAT 0x4d09
+#define regRLC_CP_STAT_INVAL_STAT_BASE_IDX 1
+#define regRLC_CP_STAT_INVAL_CTRL 0x4d0a
+#define regRLC_CP_STAT_INVAL_CTRL_BASE_IDX 1
+#define regRLC_SPARE 0x4d0b
+#define regRLC_SPARE_BASE_IDX 1
+#define regRLC_SPP_CTRL 0x4d0c
+#define regRLC_SPP_CTRL_BASE_IDX 1
+#define regRLC_SPP_SHADER_PROFILE_EN 0x4d0d
+#define regRLC_SPP_SHADER_PROFILE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_CAPTURE_EN 0x4d0e
+#define regRLC_SPP_SSF_CAPTURE_EN_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_0 0x4d0f
+#define regRLC_SPP_SSF_THRESHOLD_0_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_1 0x4d10
+#define regRLC_SPP_SSF_THRESHOLD_1_BASE_IDX 1
+#define regRLC_SPP_SSF_THRESHOLD_2 0x4d11
+#define regRLC_SPP_SSF_THRESHOLD_2_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_ADDR 0x4d12
+#define regRLC_SPP_INFLIGHT_RD_ADDR_BASE_IDX 1
+#define regRLC_SPP_INFLIGHT_RD_DATA 0x4d13
+#define regRLC_SPP_INFLIGHT_RD_DATA_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_1 0x4d18
+#define regRLC_SPP_PROF_INFO_1_BASE_IDX 1
+#define regRLC_SPP_PROF_INFO_2 0x4d19
+#define regRLC_SPP_PROF_INFO_2_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID 0x4d1a
+#define regRLC_SPP_GLOBAL_SH_ID_BASE_IDX 1
+#define regRLC_SPP_GLOBAL_SH_ID_VALID 0x4d1b
+#define regRLC_SPP_GLOBAL_SH_ID_VALID_BASE_IDX 1
+#define regRLC_SPP_STATUS 0x4d1c
+#define regRLC_SPP_STATUS_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_0 0x4d1d
+#define regRLC_SPP_PVT_STAT_0_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_1 0x4d1e
+#define regRLC_SPP_PVT_STAT_1_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_2 0x4d1f
+#define regRLC_SPP_PVT_STAT_2_BASE_IDX 1
+#define regRLC_SPP_PVT_STAT_3 0x4d20
+#define regRLC_SPP_PVT_STAT_3_BASE_IDX 1
+#define regRLC_SPP_PVT_LEVEL_MAX 0x4d21
+#define regRLC_SPP_PVT_LEVEL_MAX_BASE_IDX 1
+#define regRLC_SPP_STALL_STATE_UPDATE 0x4d22
+#define regRLC_SPP_STALL_STATE_UPDATE_BASE_IDX 1
+#define regRLC_SPP_PBB_INFO 0x4d23
+#define regRLC_SPP_PBB_INFO_BASE_IDX 1
+#define regRLC_SPP_RESET 0x4d24
+#define regRLC_SPP_RESET_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_RANGE 0x4d26
+#define regRLC_RLCP_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_CNTL 0x4d27
+#define regRLC_RLCP_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_STAT 0x4d28
+#define regRLC_RLCP_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_LO 0x4d29
+#define regRLC_RLCP_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_0_DATA_HI 0x4d2a
+#define regRLC_RLCP_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_LO 0x4d2b
+#define regRLC_RLCP_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_1_DATA_HI 0x4d2c
+#define regRLC_RLCP_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_LO 0x4d2d
+#define regRLC_RLCP_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_2_DATA_HI 0x4d2e
+#define regRLC_RLCP_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_LO 0x4d2f
+#define regRLC_RLCP_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_RLCP_DOORBELL_3_DATA_HI 0x4d30
+#define regRLC_RLCP_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_CAC_MASK_CNTL 0x4d45
+#define regRLC_CAC_MASK_CNTL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL 0x4d48
+#define regRLC_POWER_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL 0x4d49
+#define regRLC_CLK_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_CNTR_CTRL 0x4d4a
+#define regRLC_DS_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL 0x4d4b
+#define regRLC_ULV_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL 0x4d4c
+#define regRLC_PCC_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL 0x4d4d
+#define regRLC_GENERAL_RESIDENCY_CNTR_CTRL_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR 0x4d50
+#define regRLC_POWER_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR 0x4d51
+#define regRLC_CLK_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_EVENT_CNTR 0x4d52
+#define regRLC_DS_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR 0x4d53
+#define regRLC_ULV_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR 0x4d54
+#define regRLC_PCC_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR 0x4d55
+#define regRLC_GENERAL_RESIDENCY_EVENT_CNTR_BASE_IDX 1
+#define regRLC_POWER_RESIDENCY_REF_CNTR 0x4d58
+#define regRLC_POWER_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_CLK_RESIDENCY_REF_CNTR 0x4d59
+#define regRLC_CLK_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_DS_RESIDENCY_REF_CNTR 0x4d5a
+#define regRLC_DS_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_ULV_RESIDENCY_REF_CNTR 0x4d5b
+#define regRLC_ULV_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_PCC_RESIDENCY_REF_CNTR 0x4d5c
+#define regRLC_PCC_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR 0x4d5d
+#define regRLC_GENERAL_RESIDENCY_REF_CNTR_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_CTRL 0x4d5e
+#define regRLC_GFX_IH_CLIENT_CTRL_BASE_IDX 1
+#define regRLC_GFX_IH_ARBITER_STAT 0x4d5f
+#define regRLC_GFX_IH_ARBITER_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L 0x4d60
+#define regRLC_GFX_IH_CLIENT_SE_STAT_L_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H 0x4d61
+#define regRLC_GFX_IH_CLIENT_SE_STAT_H_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT 0x4d62
+#define regRLC_GFX_IH_CLIENT_SDMA_STAT_BASE_IDX 1
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT 0x4d63
+#define regRLC_GFX_IH_CLIENT_OTHER_STAT_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR 0x4d64
+#define regRLC_SPM_GLOBAL_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA 0x4d65
+#define regRLC_SPM_GLOBAL_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_ADDR 0x4d66
+#define regRLC_SPM_SE_DELAY_IND_ADDR_BASE_IDX 1
+#define regRLC_SPM_SE_DELAY_IND_DATA 0x4d67
+#define regRLC_SPM_SE_DELAY_IND_DATA_BASE_IDX 1
+#define regRLC_LX6_CNTL 0x4d80
+#define regRLC_LX6_CNTL_BASE_IDX 1
+#define regRLC_XT_CORE_STATUS 0x4dd4
+#define regRLC_XT_CORE_STATUS_BASE_IDX 1
+#define regRLC_XT_CORE_INTERRUPT 0x4dd5
+#define regRLC_XT_CORE_INTERRUPT_BASE_IDX 1
+#define regRLC_XT_CORE_FAULT_INFO 0x4dd6
+#define regRLC_XT_CORE_FAULT_INFO_BASE_IDX 1
+#define regRLC_XT_CORE_ALT_RESET_VEC 0x4dd7
+#define regRLC_XT_CORE_ALT_RESET_VEC_BASE_IDX 1
+#define regRLC_XT_CORE_RESERVED 0x4dd8
+#define regRLC_XT_CORE_RESERVED_BASE_IDX 1
+#define regRLC_XT_INT_VEC_FORCE 0x4dd9
+#define regRLC_XT_INT_VEC_FORCE_BASE_IDX 1
+#define regRLC_XT_INT_VEC_CLEAR 0x4dda
+#define regRLC_XT_INT_VEC_CLEAR_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_SEL 0x4ddb
+#define regRLC_XT_INT_VEC_MUX_SEL_BASE_IDX 1
+#define regRLC_XT_INT_VEC_MUX_INT_SEL 0x4ddc
+#define regRLC_XT_INT_VEC_MUX_INT_SEL_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB 0x4de4
+#define regRLC_GPU_CLOCK_COUNT_SPM_LSB_BASE_IDX 1
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB 0x4de5
+#define regRLC_GPU_CLOCK_COUNT_SPM_MSB_BASE_IDX 1
+#define regRLC_SPM_THREAD_TRACE_CTRL 0x4de6
+#define regRLC_SPM_THREAD_TRACE_CTRL_BASE_IDX 1
+#define regRLC_SPP_CAM_ADDR 0x4de8
+#define regRLC_SPP_CAM_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_DATA 0x4de9
+#define regRLC_SPP_CAM_DATA_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_ADDR 0x4dea
+#define regRLC_SPP_CAM_EXT_ADDR_BASE_IDX 1
+#define regRLC_SPP_CAM_EXT_DATA 0x4deb
+#define regRLC_SPP_CAM_EXT_DATA_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL 0x4df1
+#define regRLC_CPAXI_DOORBELL_MON_CTRL_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_STAT 0x4df2
+#define regRLC_CPAXI_DOORBELL_MON_STAT_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB 0x4df3
+#define regRLC_CPAXI_DOORBELL_MON_DATA_LSB_BASE_IDX 1
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB 0x4df4
+#define regRLC_CPAXI_DOORBELL_MON_DATA_MSB_BASE_IDX 1
+#define regRLC_XT_DOORBELL_RANGE 0x4df5
+#define regRLC_XT_DOORBELL_RANGE_BASE_IDX 1
+#define regRLC_XT_DOORBELL_CNTL 0x4df6
+#define regRLC_XT_DOORBELL_CNTL_BASE_IDX 1
+#define regRLC_XT_DOORBELL_STAT 0x4df7
+#define regRLC_XT_DOORBELL_STAT_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_LO 0x4df8
+#define regRLC_XT_DOORBELL_0_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_0_DATA_HI 0x4df9
+#define regRLC_XT_DOORBELL_0_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_LO 0x4dfa
+#define regRLC_XT_DOORBELL_1_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_1_DATA_HI 0x4dfb
+#define regRLC_XT_DOORBELL_1_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_LO 0x4dfc
+#define regRLC_XT_DOORBELL_2_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_2_DATA_HI 0x4dfd
+#define regRLC_XT_DOORBELL_2_DATA_HI_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_LO 0x4dfe
+#define regRLC_XT_DOORBELL_3_DATA_LO_BASE_IDX 1
+#define regRLC_XT_DOORBELL_3_DATA_HI 0x4dff
+#define regRLC_XT_DOORBELL_3_DATA_HI_BASE_IDX 1
+#define regRLC_MEM_SLP_CNTL 0x4e00
+#define regRLC_MEM_SLP_CNTL_BASE_IDX 1
+#define regSMU_RLC_RESPONSE 0x4e01
+#define regSMU_RLC_RESPONSE_BASE_IDX 1
+#define regRLC_RLCV_SAFE_MODE 0x4e02
+#define regRLC_RLCV_SAFE_MODE_BASE_IDX 1
+#define regRLC_SMU_SAFE_MODE 0x4e03
+#define regRLC_SMU_SAFE_MODE_BASE_IDX 1
+#define regRLC_RLCV_COMMAND 0x4e04
+#define regRLC_RLCV_COMMAND_BASE_IDX 1
+#define regRLC_SMU_MESSAGE 0x4e05
+#define regRLC_SMU_MESSAGE_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_1 0x4e06
+#define regRLC_SMU_MESSAGE_1_BASE_IDX 1
+#define regRLC_SMU_MESSAGE_2 0x4e07
+#define regRLC_SMU_MESSAGE_2_BASE_IDX 1
+#define regRLC_SRM_GPM_COMMAND 0x4e08
+#define regRLC_SRM_GPM_COMMAND_BASE_IDX 1
+#define regRLC_SRM_GPM_ABORT 0x4e09
+#define regRLC_SRM_GPM_ABORT_BASE_IDX 1
+#define regRLC_SMU_COMMAND 0x4e0a
+#define regRLC_SMU_COMMAND_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_1 0x4e0b
+#define regRLC_SMU_ARGUMENT_1_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_2 0x4e0c
+#define regRLC_SMU_ARGUMENT_2_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_3 0x4e0d
+#define regRLC_SMU_ARGUMENT_3_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_4 0x4e0e
+#define regRLC_SMU_ARGUMENT_4_BASE_IDX 1
+#define regRLC_SMU_ARGUMENT_5 0x4e0f
+#define regRLC_SMU_ARGUMENT_5_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_HI 0x4e10
+#define regRLC_IMU_BOOTLOAD_ADDR_HI_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_ADDR_LO 0x4e11
+#define regRLC_IMU_BOOTLOAD_ADDR_LO_BASE_IDX 1
+#define regRLC_IMU_BOOTLOAD_SIZE 0x4e12
+#define regRLC_IMU_BOOTLOAD_SIZE_BASE_IDX 1
+#define regRLC_IMU_MISC 0x4e16
+#define regRLC_IMU_MISC_BASE_IDX 1
+#define regRLC_IMU_RESET_VECTOR 0x4e17
+#define regRLC_IMU_RESET_VECTOR_BASE_IDX 1
+
+
+// addressBlock: gc_rlcsdec
+// base address: 0x3b980
+#define regRLC_RLCS_DEC_START 0x4e60
+#define regRLC_RLCS_DEC_START_BASE_IDX 1
+#define regRLC_RLCS_DEC_DUMP_ADDR 0x4e61
+#define regRLC_RLCS_DEC_DUMP_ADDR_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_1 0x4e62
+#define regRLC_RLCS_EXCEPTION_REG_1_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_2 0x4e63
+#define regRLC_RLCS_EXCEPTION_REG_2_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_3 0x4e64
+#define regRLC_RLCS_EXCEPTION_REG_3_BASE_IDX 1
+#define regRLC_RLCS_EXCEPTION_REG_4 0x4e65
+#define regRLC_RLCS_EXCEPTION_REG_4_BASE_IDX 1
+#define regRLC_RLCS_CGCG_REQUEST 0x4e66
+#define regRLC_RLCS_CGCG_REQUEST_BASE_IDX 1
+#define regRLC_RLCS_CGCG_STATUS 0x4e67
+#define regRLC_RLCS_CGCG_STATUS_BASE_IDX 1
+#define regRLC_RLCS_SOC_DS_CNTL 0x4e68
+#define regRLC_RLCS_SOC_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_CNTL 0x4e69
+#define regRLC_RLCS_GFX_DS_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL 0x4e6a
+#define regRLC_RLCS_GFX_DS_ALLOW_MASK_CNTL_BASE_IDX 1
+#define regRLC_GPM_STAT 0x4e6b
+#define regRLC_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT 0x4e6b
+#define regRLC_RLCS_GPM_STAT_BASE_IDX 1
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE 0x4e6c
+#define regRLC_RLCS_ABORTED_PD_SEQUENCE_BASE_IDX 1
+#define regRLC_RLCS_DIDT_FORCE_STALL 0x4e6d
+#define regRLC_RLCS_DIDT_FORCE_STALL_BASE_IDX 1
+#define regRLC_RLCS_IOV_CMD_STATUS 0x4e6e
+#define regRLC_RLCS_IOV_CMD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE 0x4e6f
+#define regRLC_RLCS_IOV_CNTX_LOC_SIZE_BASE_IDX 1
+#define regRLC_RLCS_IOV_SCH_BLOCK 0x4e70
+#define regRLC_RLCS_IOV_SCH_BLOCK_BASE_IDX 1
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS 0x4e71
+#define regRLC_RLCS_IOV_VM_BUSY_STATUS_BASE_IDX 1
+#define regRLC_RLCS_GPM_STAT_2 0x4e72
+#define regRLC_RLCS_GPM_STAT_2_BASE_IDX 1
+#define regRLC_RLCS_GRBM_SOFT_RESET 0x4e73
+#define regRLC_RLCS_GRBM_SOFT_RESET_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_STATUS 0x4e74
+#define regRLC_RLCS_PG_CHANGE_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PG_CHANGE_READ 0x4e75
+#define regRLC_RLCS_PG_CHANGE_READ_BASE_IDX 1
+#define regRLC_RLCS_IH_SEMAPHORE 0x4e76
+#define regRLC_RLCS_IH_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE 0x4e77
+#define regRLC_RLCS_IH_COOKIE_SEMAPHORE_BASE_IDX 1
+#define regRLC_RLCS_WGP_STATUS 0x4e78
+#define regRLC_RLCS_WGP_STATUS_BASE_IDX 1
+#define regRLC_RLCS_WGP_READ 0x4e79
+#define regRLC_RLCS_WGP_READ_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_1 0x4e7a
+#define regRLC_RLCS_CP_INT_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_CTRL_2 0x4e7b
+#define regRLC_RLCS_CP_INT_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_1 0x4e7c
+#define regRLC_RLCS_CP_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_CP_INT_INFO_2 0x4e7d
+#define regRLC_RLCS_CP_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_CTRL 0x4e7e
+#define regRLC_RLCS_SPM_INT_CTRL_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_1 0x4e7f
+#define regRLC_RLCS_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_RLCS_SPM_INT_INFO_2 0x4e80
+#define regRLC_RLCS_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_RLCS_DSM_TRIG 0x4e81
+#define regRLC_RLCS_DSM_TRIG_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_STATUS 0x4e82
+#define regRLC_RLCS_BOOTLOAD_STATUS_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL 0x4e83
+#define regRLC_RLCS_POWER_BRAKE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1 0x4e84
+#define regRLC_RLCS_POWER_BRAKE_CNTL_TH1_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT 0x4e85
+#define regRLC_RLCS_GRBM_IDLE_BUSY_STAT_BASE_IDX 1
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL 0x4e86
+#define regRLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_CMP_IDLE_CNTL 0x4e87
+#define regRLC_RLCS_CMP_IDLE_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_0 0x4e88
+#define regRLC_RLCS_GENERAL_0_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_1 0x4e89
+#define regRLC_RLCS_GENERAL_1_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_2 0x4e8a
+#define regRLC_RLCS_GENERAL_2_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_3 0x4e8b
+#define regRLC_RLCS_GENERAL_3_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_4 0x4e8c
+#define regRLC_RLCS_GENERAL_4_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_5 0x4e8d
+#define regRLC_RLCS_GENERAL_5_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_6 0x4e8e
+#define regRLC_RLCS_GENERAL_6_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_7 0x4e8f
+#define regRLC_RLCS_GENERAL_7_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_8 0x4e90
+#define regRLC_RLCS_GENERAL_8_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_9 0x4e91
+#define regRLC_RLCS_GENERAL_9_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_10 0x4e92
+#define regRLC_RLCS_GENERAL_10_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_11 0x4e93
+#define regRLC_RLCS_GENERAL_11_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_12 0x4e94
+#define regRLC_RLCS_GENERAL_12_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_13 0x4e95
+#define regRLC_RLCS_GENERAL_13_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_14 0x4e96
+#define regRLC_RLCS_GENERAL_14_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_15 0x4e97
+#define regRLC_RLCS_GENERAL_15_BASE_IDX 1
+#define regRLC_RLCS_GENERAL_16 0x4e98
+#define regRLC_RLCS_GENERAL_16_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_1 0x4ec5
+#define regRLC_RLCS_AUXILIARY_REG_1_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_2 0x4ec6
+#define regRLC_RLCS_AUXILIARY_REG_2_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_3 0x4ec7
+#define regRLC_RLCS_AUXILIARY_REG_3_BASE_IDX 1
+#define regRLC_RLCS_AUXILIARY_REG_4 0x4ec8
+#define regRLC_RLCS_AUXILIARY_REG_4_BASE_IDX 1
+#define regRLC_RLCS_SPM_SQTT_MODE 0x4ec9
+#define regRLC_RLCS_SPM_SQTT_MODE_BASE_IDX 1
+#define regRLC_RLCS_CP_DMA_SRCID_OVER 0x4eca
+#define regRLC_RLCS_CP_DMA_SRCID_OVER_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1 0x4ecb
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS1_BASE_IDX 1
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2 0x4ecc
+#define regRLC_RLCS_BOOTLOAD_ID_STATUS2_BASE_IDX 1
+#define regRLC_RLCS_IMU_VIDCHG_CNTL 0x4ecd
+#define regRLC_RLCS_IMU_VIDCHG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_EDC_INT_CNTL 0x4ece
+#define regRLC_RLCS_EDC_INT_CNTL_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL1 0x4ecf
+#define regRLC_RLCS_KMD_LOG_CNTL1_BASE_IDX 1
+#define regRLC_RLCS_KMD_LOG_CNTL2 0x4ed0
+#define regRLC_RLCS_KMD_LOG_CNTL2_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT 0x4ed1
+#define regRLC_RLCS_GPM_LEGACY_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE 0x4ed2
+#define regRLC_RLCS_GPM_LEGACY_INT_DISABLE_BASE_IDX 1
+#define regRLC_RLCS_SRM_SRCID_CNTL 0x4ed3
+#define regRLC_RLCS_SRM_SRCID_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_0 0x4ed4
+#define regRLC_RLCS_GCR_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_1 0x4ed5
+#define regRLC_RLCS_GCR_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_2 0x4ed6
+#define regRLC_RLCS_GCR_DATA_2_BASE_IDX 1
+#define regRLC_RLCS_GCR_DATA_3 0x4ed7
+#define regRLC_RLCS_GCR_DATA_3_BASE_IDX 1
+#define regRLC_RLCS_GCR_STATUS 0x4ed8
+#define regRLC_RLCS_GCR_STATUS_BASE_IDX 1
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE 0x4ed9
+#define regRLC_RLCS_PERFMON_CLK_CNTL_UCODE_BASE_IDX 1
+#define regRLC_RLCS_UTCL2_CNTL 0x4eda
+#define regRLC_RLCS_UTCL2_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0 0x4edb
+#define regRLC_RLCS_IMU_RLC_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1 0x4edc
+#define regRLC_RLCS_IMU_RLC_MSG_DATA1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2 0x4edd
+#define regRLC_RLCS_IMU_RLC_MSG_DATA2_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3 0x4ede
+#define regRLC_RLCS_IMU_RLC_MSG_DATA3_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4 0x4edf
+#define regRLC_RLCS_IMU_RLC_MSG_DATA4_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL 0x4ee0
+#define regRLC_RLCS_IMU_RLC_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL 0x4ee1
+#define regRLC_RLCS_IMU_RLC_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0 0x4ee2
+#define regRLC_RLCS_RLC_IMU_MSG_DATA0_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL 0x4ee3
+#define regRLC_RLCS_RLC_IMU_MSG_CONTROL_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL 0x4ee4
+#define regRLC_RLCS_RLC_IMU_MSG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0 0x4ee5
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1 0x4ee6
+#define regRLC_RLCS_IMU_RLC_TELEMETRY_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL 0x4ee7
+#define regRLC_RLCS_IMU_RLC_MUTEX_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_RLC_STATUS 0x4ee8
+#define regRLC_RLCS_IMU_RLC_STATUS_BASE_IDX 1
+#define regRLC_RLCS_RLC_IMU_STATUS 0x4ee9
+#define regRLC_RLCS_RLC_IMU_STATUS_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_1 0x4eea
+#define regRLC_RLCS_IMU_RAM_DATA_1_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB 0x4eeb
+#define regRLC_RLCS_IMU_RAM_ADDR_1_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB 0x4eec
+#define regRLC_RLCS_IMU_RAM_ADDR_1_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_DATA_0 0x4eed
+#define regRLC_RLCS_IMU_RAM_DATA_0_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB 0x4eee
+#define regRLC_RLCS_IMU_RAM_ADDR_0_LSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB 0x4eef
+#define regRLC_RLCS_IMU_RAM_ADDR_0_MSB_BASE_IDX 1
+#define regRLC_RLCS_IMU_RAM_CNTL 0x4ef0
+#define regRLC_RLCS_IMU_RAM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE 0x4ef1
+#define regRLC_RLCS_IMU_GFX_DOORBELL_FENCE_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_1 0x4ef3
+#define regRLC_RLCS_SDMA_INT_CNTL_1_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_CNTL_2 0x4ef4
+#define regRLC_RLCS_SDMA_INT_CNTL_2_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_STAT 0x4ef5
+#define regRLC_RLCS_SDMA_INT_STAT_BASE_IDX 1
+#define regRLC_RLCS_SDMA_INT_INFO 0x4ef6
+#define regRLC_RLCS_SDMA_INT_INFO_BASE_IDX 1
+#define regRLC_RLCS_PMM_CGCG_CNTL 0x4ef7
+#define regRLC_RLCS_PMM_CGCG_CNTL_BASE_IDX 1
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO 0x4ef8
+#define regRLC_RLCS_GFX_MEM_POWER_CTRL_LO_BASE_IDX 1
+#define regRLC_RLCS_GFX_RM_CNTL 0x4efa
+#define regRLC_RLCS_GFX_RM_CNTL_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_1 0x4efb
+#define regRLC_RLCS_IH_CTRL_1_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_2 0x4efc
+#define regRLC_RLCS_IH_CTRL_2_BASE_IDX 1
+#define regRLC_RLCS_IH_CTRL_3 0x4efd
+#define regRLC_RLCS_IH_CTRL_3_BASE_IDX 1
+#define regRLC_RLCS_IH_STATUS 0x4efe
+#define regRLC_RLCS_IH_STATUS_BASE_IDX 1
+#define regRLC_RLCS_DEC_END 0x4fff
+#define regRLC_RLCS_DEC_END_BASE_IDX 1
+
+
+// addressBlock: gc_pfvfdec_rlc
+// base address: 0x2a600
+#define regRLC_SAFE_MODE 0x0980
+#define regRLC_SAFE_MODE_BASE_IDX 1
+#define regRLC_SPM_SAMPLE_CNT 0x0981
+#define regRLC_SPM_SAMPLE_CNT_BASE_IDX 1
+#define regRLC_SPM_MC_CNTL 0x0982
+#define regRLC_SPM_MC_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_CNTL 0x0983
+#define regRLC_SPM_INT_CNTL_BASE_IDX 1
+#define regRLC_SPM_INT_STATUS 0x0984
+#define regRLC_SPM_INT_STATUS_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_1 0x0985
+#define regRLC_SPM_INT_INFO_1_BASE_IDX 1
+#define regRLC_SPM_INT_INFO_2 0x0986
+#define regRLC_SPM_INT_INFO_2_BASE_IDX 1
+#define regRLC_CSIB_ADDR_LO 0x0987
+#define regRLC_CSIB_ADDR_LO_BASE_IDX 1
+#define regRLC_CSIB_ADDR_HI 0x0988
+#define regRLC_CSIB_ADDR_HI_BASE_IDX 1
+#define regRLC_CSIB_LENGTH 0x0989
+#define regRLC_CSIB_LENGTH_BASE_IDX 1
+#define regRLC_CP_SCHEDULERS 0x098a
+#define regRLC_CP_SCHEDULERS_BASE_IDX 1
+#define regRLC_CP_EOF_INT 0x098b
+#define regRLC_CP_EOF_INT_BASE_IDX 1
+#define regRLC_CP_EOF_INT_CNT 0x098c
+#define regRLC_CP_EOF_INT_CNT_BASE_IDX 1
+#define regRLC_SPARE_INT_0 0x098d
+#define regRLC_SPARE_INT_0_BASE_IDX 1
+#define regRLC_SPARE_INT_1 0x098e
+#define regRLC_SPARE_INT_1_BASE_IDX 1
+#define regRLC_SPARE_INT_2 0x098f
+#define regRLC_SPARE_INT_2_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT 0x0990
+#define regRLC_PACE_SPARE_INT_BASE_IDX 1
+#define regRLC_PACE_SPARE_INT_1 0x0991
+#define regRLC_PACE_SPARE_INT_1_BASE_IDX 1
+#define regRLC_RLCV_SPARE_INT_1 0x0992
+#define regRLC_RLCV_SPARE_INT_1_BASE_IDX 1
+
+
+// addressBlock: gc_pwrdec
+// base address: 0x3c000
+#define regCGTS_TCC_DISABLE 0x5006
+#define regCGTS_TCC_DISABLE_BASE_IDX 1
+#define regCGTX_SPI_DEBUG_CLK_CTRL 0x507f
+#define regCGTX_SPI_DEBUG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_VGT_CLK_CTRL 0x5084
+#define regCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define regCGTT_IA_CLK_CTRL 0x5085
+#define regCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_WD_CLK_CTRL 0x5086
+#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define regCGTT_GS_NGG_CLK_CTRL 0x5087
+#define regCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define regCGTT_PA_CLK_CTRL 0x5088
+#define regCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL0 0x5089
+#define regCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL1 0x508a
+#define regCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL2 0x508b
+#define regCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_SQG_CLK_CTRL 0x508d
+#define regCGTT_SQG_CLK_CTRL_BASE_IDX 1
+#define regSQ_ALU_CLK_CTRL 0x508e
+#define regSQ_ALU_CLK_CTRL_BASE_IDX 1
+#define regSQ_TEX_CLK_CTRL 0x508f
+#define regSQ_TEX_CLK_CTRL_BASE_IDX 1
+#define regSQ_LDS_CLK_CTRL 0x5090
+#define regSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define regICG_SP_CLK_CTRL 0x5093
+#define regICG_SP_CLK_CTRL_BASE_IDX 1
+#define regTA_CGTT_CTRL 0x509d
+#define regTA_CGTT_CTRL_BASE_IDX 1
+#define regDB_CGTT_CLK_CTRL_0 0x50a4
+#define regDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define regCB_CGTT_SCLK_CTRL 0x50a8
+#define regCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2A_CTRL 0x50ac
+#define regGFX_ICG_GL2A_CTRL_BASE_IDX 1
+#define regCGTT_CP_CLK_CTRL 0x50b0
+#define regCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPF_CLK_CTRL 0x50b1
+#define regCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define regCGTT_CPC_CLK_CTRL 0x50b2
+#define regCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_RLC_CLK_CTRL 0x50b5
+#define regCGTT_RLC_CLK_CTRL_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL3 0x50bc
+#define regCGTT_SC_CLK_CTRL3_BASE_IDX 1
+#define regCGTT_SC_CLK_CTRL4 0x50bd
+#define regCGTT_SC_CLK_CTRL4_BASE_IDX 1
+#define regGCEA_ICG_CTRL 0x50c4
+#define regGCEA_ICG_CTRL_BASE_IDX 1
+#define regGL1I_GL1R_MGCG_OVERRIDE 0x50e4
+#define regGL1I_GL1R_MGCG_OVERRIDE_BASE_IDX 1
+#define regGL1H_ICG_CTRL 0x50e8
+#define regGL1H_ICG_CTRL_BASE_IDX 1
+#define regCHI_CHR_MGCG_OVERRIDE 0x50e9
+#define regCHI_CHR_MGCG_OVERRIDE_BASE_IDX 1
+#define regICG_GL1C_CLK_CTRL 0x50ec
+#define regICG_GL1C_CLK_CTRL_BASE_IDX 1
+#define regICG_GL1A_CTRL 0x50f0
+#define regICG_GL1A_CTRL_BASE_IDX 1
+#define regICG_CHA_CTRL 0x50f1
+#define regICG_CHA_CTRL_BASE_IDX 1
+#define regGUS_ICG_CTRL 0x50f4
+#define regGUS_ICG_CTRL_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL0 0x50f8
+#define regCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL1 0x50f9
+#define regCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL2 0x50fa
+#define regCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define regCGTT_PH_CLK_CTRL3 0x50fb
+#define regCGTT_PH_CLK_CTRL3_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL 0x50fc
+#define regGFX_ICG_GL2C_CTRL_BASE_IDX 1
+#define regGFX_ICG_GL2C_CTRL1 0x50fd
+#define regGFX_ICG_GL2C_CTRL1_BASE_IDX 1
+#define regICG_LDS_CLK_CTRL 0x5114
+#define regICG_LDS_CLK_CTRL_BASE_IDX 1
+#define regGFX_ICG_UTCL1_CTRL 0x511c
+#define regGFX_ICG_UTCL1_CTRL_BASE_IDX 1
+#define regICG_CHC_CLK_CTRL 0x5140
+#define regICG_CHC_CLK_CTRL_BASE_IDX 1
+#define regICG_CHCG_CLK_CTRL 0x5144
+#define regICG_CHCG_CLK_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_pspdec
+// base address: 0x3f000
+#define regCP_MES_DM_INDEX_ADDR 0x5c00
+#define regCP_MES_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MES_DM_INDEX_DATA 0x5c01
+#define regCP_MES_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_ADDR 0x5c02
+#define regCP_MEC_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_MEC_DM_INDEX_DATA 0x5c03
+#define regCP_MEC_DM_INDEX_DATA_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_ADDR 0x5c04
+#define regCP_GFX_RS64_DM_INDEX_ADDR_BASE_IDX 1
+#define regCP_GFX_RS64_DM_INDEX_DATA 0x5c05
+#define regCP_GFX_RS64_DM_INDEX_DATA_BASE_IDX 1
+#define regCPG_PSP_DEBUG 0x5c10
+#define regCPG_PSP_DEBUG_BASE_IDX 1
+#define regCPC_PSP_DEBUG 0x5c11
+#define regCPC_PSP_DEBUG_BASE_IDX 1
+#define regGRBM_IOV_ERROR_FIFO 0x5e07
+#define regGRBM_IOV_ERROR_FIFO_BASE_IDX 1
+#define regGRBM_SEC_CNTL 0x5e0d
+#define regGRBM_SEC_CNTL_BASE_IDX 1
+#define regGRBM_CAM_INDEX 0x5e10
+#define regGRBM_CAM_INDEX_BASE_IDX 1
+#define regGRBM_HYP_CAM_INDEX 0x5e10
+#define regGRBM_HYP_CAM_INDEX_BASE_IDX 1
+#define regGRBM_CAM_DATA 0x5e11
+#define regGRBM_CAM_DATA_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA 0x5e11
+#define regGRBM_HYP_CAM_DATA_BASE_IDX 1
+#define regGRBM_CAM_DATA_UPPER 0x5e12
+#define regGRBM_CAM_DATA_UPPER_BASE_IDX 1
+#define regGRBM_HYP_CAM_DATA_UPPER 0x5e12
+#define regGRBM_HYP_CAM_DATA_UPPER_BASE_IDX 1
+#define regRLC_FWL_FIRST_VIOL_ADDR 0x5f26
+#define regRLC_FWL_FIRST_VIOL_ADDR_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+// base address: 0x38000
+#define regGFX_IMU_C2PMSG_0 0x4000
+#define regGFX_IMU_C2PMSG_0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_1 0x4001
+#define regGFX_IMU_C2PMSG_1_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_2 0x4002
+#define regGFX_IMU_C2PMSG_2_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_3 0x4003
+#define regGFX_IMU_C2PMSG_3_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_4 0x4004
+#define regGFX_IMU_C2PMSG_4_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_5 0x4005
+#define regGFX_IMU_C2PMSG_5_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_6 0x4006
+#define regGFX_IMU_C2PMSG_6_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_7 0x4007
+#define regGFX_IMU_C2PMSG_7_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_8 0x4008
+#define regGFX_IMU_C2PMSG_8_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_9 0x4009
+#define regGFX_IMU_C2PMSG_9_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_10 0x400a
+#define regGFX_IMU_C2PMSG_10_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_11 0x400b
+#define regGFX_IMU_C2PMSG_11_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_12 0x400c
+#define regGFX_IMU_C2PMSG_12_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_13 0x400d
+#define regGFX_IMU_C2PMSG_13_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_14 0x400e
+#define regGFX_IMU_C2PMSG_14_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_15 0x400f
+#define regGFX_IMU_C2PMSG_15_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_16 0x4010
+#define regGFX_IMU_C2PMSG_16_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_17 0x4011
+#define regGFX_IMU_C2PMSG_17_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_18 0x4012
+#define regGFX_IMU_C2PMSG_18_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_19 0x4013
+#define regGFX_IMU_C2PMSG_19_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_20 0x4014
+#define regGFX_IMU_C2PMSG_20_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_21 0x4015
+#define regGFX_IMU_C2PMSG_21_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_22 0x4016
+#define regGFX_IMU_C2PMSG_22_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_23 0x4017
+#define regGFX_IMU_C2PMSG_23_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_24 0x4018
+#define regGFX_IMU_C2PMSG_24_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_25 0x4019
+#define regGFX_IMU_C2PMSG_25_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_26 0x401a
+#define regGFX_IMU_C2PMSG_26_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_27 0x401b
+#define regGFX_IMU_C2PMSG_27_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_28 0x401c
+#define regGFX_IMU_C2PMSG_28_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_29 0x401d
+#define regGFX_IMU_C2PMSG_29_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_30 0x401e
+#define regGFX_IMU_C2PMSG_30_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_31 0x401f
+#define regGFX_IMU_C2PMSG_31_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_32 0x4020
+#define regGFX_IMU_C2PMSG_32_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_33 0x4021
+#define regGFX_IMU_C2PMSG_33_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_34 0x4022
+#define regGFX_IMU_C2PMSG_34_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_35 0x4023
+#define regGFX_IMU_C2PMSG_35_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_36 0x4024
+#define regGFX_IMU_C2PMSG_36_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_37 0x4025
+#define regGFX_IMU_C2PMSG_37_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_38 0x4026
+#define regGFX_IMU_C2PMSG_38_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_39 0x4027
+#define regGFX_IMU_C2PMSG_39_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_40 0x4028
+#define regGFX_IMU_C2PMSG_40_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_41 0x4029
+#define regGFX_IMU_C2PMSG_41_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_42 0x402a
+#define regGFX_IMU_C2PMSG_42_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_43 0x402b
+#define regGFX_IMU_C2PMSG_43_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_44 0x402c
+#define regGFX_IMU_C2PMSG_44_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_45 0x402d
+#define regGFX_IMU_C2PMSG_45_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_46 0x402e
+#define regGFX_IMU_C2PMSG_46_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_47 0x402f
+#define regGFX_IMU_C2PMSG_47_BASE_IDX 1
+#define regGFX_IMU_MSG_FLAGS 0x403f
+#define regGFX_IMU_MSG_FLAGS_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0 0x4040
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL0_BASE_IDX 1
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1 0x4041
+#define regGFX_IMU_C2PMSG_ACCESS_CTRL1_BASE_IDX 1
+#define regGFX_IMU_PWRMGT_IRQ_CTRL 0x4042
+#define regGFX_IMU_PWRMGT_IRQ_CTRL_BASE_IDX 1
+#define regGFX_IMU_MP1_MUTEX 0x4043
+#define regGFX_IMU_MP1_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_4 0x4046
+#define regGFX_IMU_RLC_DATA_4_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_3 0x4047
+#define regGFX_IMU_RLC_DATA_3_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_2 0x4048
+#define regGFX_IMU_RLC_DATA_2_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_1 0x4049
+#define regGFX_IMU_RLC_DATA_1_BASE_IDX 1
+#define regGFX_IMU_RLC_DATA_0 0x404a
+#define regGFX_IMU_RLC_DATA_0_BASE_IDX 1
+#define regGFX_IMU_RLC_CMD 0x404b
+#define regGFX_IMU_RLC_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_MUTEX 0x404c
+#define regGFX_IMU_RLC_MUTEX_BASE_IDX 1
+#define regGFX_IMU_RLC_MSG_STATUS 0x404f
+#define regGFX_IMU_RLC_MSG_STATUS_BASE_IDX 1
+#define regRLC_GFX_IMU_DATA_0 0x4052
+#define regRLC_GFX_IMU_DATA_0_BASE_IDX 1
+#define regRLC_GFX_IMU_CMD 0x4053
+#define regRLC_GFX_IMU_CMD_BASE_IDX 1
+#define regGFX_IMU_RLC_STATUS 0x4054
+#define regGFX_IMU_RLC_STATUS_BASE_IDX 1
+#define regGFX_IMU_STATUS 0x4055
+#define regGFX_IMU_STATUS_BASE_IDX 1
+#define regGFX_IMU_SOC_DATA 0x4059
+#define regGFX_IMU_SOC_DATA_BASE_IDX 1
+#define regGFX_IMU_SOC_ADDR 0x405a
+#define regGFX_IMU_SOC_ADDR_BASE_IDX 1
+#define regGFX_IMU_SOC_REQ 0x405b
+#define regGFX_IMU_SOC_REQ_BASE_IDX 1
+#define regGFX_IMU_VF_CTRL 0x405c
+#define regGFX_IMU_VF_CTRL_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY 0x4060
+#define regGFX_IMU_TELEMETRY_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_DATA 0x4061
+#define regGFX_IMU_TELEMETRY_DATA_BASE_IDX 1
+#define regGFX_IMU_TELEMETRY_TEMPERATURE 0x4062
+#define regGFX_IMU_TELEMETRY_TEMPERATURE_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_0 0x4068
+#define regGFX_IMU_SCRATCH_0_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_1 0x4069
+#define regGFX_IMU_SCRATCH_1_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_2 0x406a
+#define regGFX_IMU_SCRATCH_2_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_3 0x406b
+#define regGFX_IMU_SCRATCH_3_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_4 0x406c
+#define regGFX_IMU_SCRATCH_4_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_5 0x406d
+#define regGFX_IMU_SCRATCH_5_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_6 0x406e
+#define regGFX_IMU_SCRATCH_6_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_7 0x406f
+#define regGFX_IMU_SCRATCH_7_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_8 0x4070
+#define regGFX_IMU_SCRATCH_8_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_9 0x4071
+#define regGFX_IMU_SCRATCH_9_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_10 0x4072
+#define regGFX_IMU_SCRATCH_10_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_11 0x4073
+#define regGFX_IMU_SCRATCH_11_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_12 0x4074
+#define regGFX_IMU_SCRATCH_12_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_13 0x4075
+#define regGFX_IMU_SCRATCH_13_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_14 0x4076
+#define regGFX_IMU_SCRATCH_14_BASE_IDX 1
+#define regGFX_IMU_SCRATCH_15 0x4077
+#define regGFX_IMU_SCRATCH_15_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_LO 0x4078
+#define regGFX_IMU_FW_GTS_LO_BASE_IDX 1
+#define regGFX_IMU_FW_GTS_HI 0x4079
+#define regGFX_IMU_FW_GTS_HI_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_LO 0x407a
+#define regGFX_IMU_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_GTS_OFFSET_HI 0x407b
+#define regGFX_IMU_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_LO 0x407c
+#define regGFX_IMU_RLC_GTS_OFFSET_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_GTS_OFFSET_HI 0x407d
+#define regGFX_IMU_RLC_GTS_OFFSET_HI_BASE_IDX 1
+#define regGFX_IMU_CORE_INT_STATUS 0x407f
+#define regGFX_IMU_CORE_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_MASK 0x4080
+#define regGFX_IMU_PIC_INT_MASK_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_LVL 0x4081
+#define regGFX_IMU_PIC_INT_LVL_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_EDGE 0x4082
+#define regGFX_IMU_PIC_INT_EDGE_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_0 0x4083
+#define regGFX_IMU_PIC_INT_PRI_0_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_1 0x4084
+#define regGFX_IMU_PIC_INT_PRI_1_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_2 0x4085
+#define regGFX_IMU_PIC_INT_PRI_2_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_3 0x4086
+#define regGFX_IMU_PIC_INT_PRI_3_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_4 0x4087
+#define regGFX_IMU_PIC_INT_PRI_4_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_5 0x4088
+#define regGFX_IMU_PIC_INT_PRI_5_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_6 0x4089
+#define regGFX_IMU_PIC_INT_PRI_6_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_PRI_7 0x408a
+#define regGFX_IMU_PIC_INT_PRI_7_BASE_IDX 1
+#define regGFX_IMU_PIC_INT_STATUS 0x408b
+#define regGFX_IMU_PIC_INT_STATUS_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR 0x408c
+#define regGFX_IMU_PIC_INTR_BASE_IDX 1
+#define regGFX_IMU_PIC_INTR_ID 0x408d
+#define regGFX_IMU_PIC_INTR_ID_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_1 0x4090
+#define regGFX_IMU_IH_CTRL_1_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_2 0x4091
+#define regGFX_IMU_IH_CTRL_2_BASE_IDX 1
+#define regGFX_IMU_IH_CTRL_3 0x4092
+#define regGFX_IMU_IH_CTRL_3_BASE_IDX 1
+#define regGFX_IMU_IH_STATUS 0x4093
+#define regGFX_IMU_IH_STATUS_BASE_IDX 1
+#define regGFX_IMU_FUSESTRAP 0x4094
+#define regGFX_IMU_FUSESTRAP_BASE_IDX 1
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL 0x4098
+#define regGFX_IMU_SMUIO_VIDCHG_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL 0x409c
+#define regGFX_IMU_GFXCLK_BYPASS_CTRL_BASE_IDX 1
+#define regGFX_IMU_CLK_CTRL 0x409d
+#define regGFX_IMU_CLK_CTRL_BASE_IDX 1
+#define regGFX_IMU_DOORBELL_CONTROL 0x409e
+#define regGFX_IMU_DOORBELL_CONTROL_BASE_IDX 1
+#define regGFX_IMU_RLC_CG_CTRL 0x40a0
+#define regGFX_IMU_RLC_CG_CTRL_BASE_IDX 1
+#define regGFX_IMU_RLC_THROTTLE_GFX 0x40a1
+#define regGFX_IMU_RLC_THROTTLE_GFX_BASE_IDX 1
+#define regGFX_IMU_RLC_RESET_VECTOR 0x40a2
+#define regGFX_IMU_RLC_RESET_VECTOR_BASE_IDX 1
+#define regGFX_IMU_RLC_OVERRIDE 0x40a3
+#define regGFX_IMU_RLC_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_DPM_CONTROL 0x40a8
+#define regGFX_IMU_DPM_CONTROL_BASE_IDX 1
+#define regGFX_IMU_DPM_ACC 0x40a9
+#define regGFX_IMU_DPM_ACC_BASE_IDX 1
+#define regGFX_IMU_DPM_REF_COUNTER 0x40aa
+#define regGFX_IMU_DPM_REF_COUNTER_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_INDEX 0x40ac
+#define regGFX_IMU_RLC_RAM_INDEX_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH 0x40ad
+#define regGFX_IMU_RLC_RAM_ADDR_HIGH_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_ADDR_LOW 0x40ae
+#define regGFX_IMU_RLC_RAM_ADDR_LOW_BASE_IDX 1
+#define regGFX_IMU_RLC_RAM_DATA 0x40af
+#define regGFX_IMU_RLC_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_FENCE_CTRL 0x40b0
+#define regGFX_IMU_FENCE_CTRL_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_INIT 0x40b1
+#define regGFX_IMU_FENCE_LOG_INIT_BASE_IDX 1
+#define regGFX_IMU_FENCE_LOG_ADDR 0x40b2
+#define regGFX_IMU_FENCE_LOG_ADDR_BASE_IDX 1
+#define regGFX_IMU_PROGRAM_CTR 0x40b5
+#define regGFX_IMU_PROGRAM_CTR_BASE_IDX 1
+#define regGFX_IMU_CORE_CTRL 0x40b6
+#define regGFX_IMU_CORE_CTRL_BASE_IDX 1
+#define regGFX_IMU_CORE_STATUS 0x40b7
+#define regGFX_IMU_CORE_STATUS_BASE_IDX 1
+#define regGFX_IMU_PWROKRAW 0x40b8
+#define regGFX_IMU_PWROKRAW_BASE_IDX 1
+#define regGFX_IMU_PWROK 0x40b9
+#define regGFX_IMU_PWROK_BASE_IDX 1
+#define regGFX_IMU_GAP_PWROK 0x40ba
+#define regGFX_IMU_GAP_PWROK_BASE_IDX 1
+#define regGFX_IMU_RESETn 0x40bb
+#define regGFX_IMU_RESETn_BASE_IDX 1
+#define regGFX_IMU_GFX_RESET_CTRL 0x40bc
+#define regGFX_IMU_GFX_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_AEB_OVERRIDE 0x40bd
+#define regGFX_IMU_AEB_OVERRIDE_BASE_IDX 1
+#define regGFX_IMU_VDCI_RESET_CTRL 0x40be
+#define regGFX_IMU_VDCI_RESET_CTRL_BASE_IDX 1
+#define regGFX_IMU_GFX_ISO_CTRL 0x40bf
+#define regGFX_IMU_GFX_ISO_CTRL_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL0 0x40c0
+#define regGFX_IMU_TIMER0_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CTRL1 0x40c1
+#define regGFX_IMU_TIMER0_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_AUTOINC 0x40c2
+#define regGFX_IMU_TIMER0_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP_INTEN 0x40c3
+#define regGFX_IMU_TIMER0_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP0 0x40c4
+#define regGFX_IMU_TIMER0_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP1 0x40c5
+#define regGFX_IMU_TIMER0_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER0_CMP3 0x40c7
+#define regGFX_IMU_TIMER0_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER0_VALUE 0x40c8
+#define regGFX_IMU_TIMER0_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL0 0x40c9
+#define regGFX_IMU_TIMER1_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CTRL1 0x40ca
+#define regGFX_IMU_TIMER1_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_AUTOINC 0x40cb
+#define regGFX_IMU_TIMER1_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP_INTEN 0x40cc
+#define regGFX_IMU_TIMER1_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP0 0x40cd
+#define regGFX_IMU_TIMER1_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP1 0x40ce
+#define regGFX_IMU_TIMER1_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER1_CMP3 0x40d0
+#define regGFX_IMU_TIMER1_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER1_VALUE 0x40d1
+#define regGFX_IMU_TIMER1_VALUE_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL0 0x40d2
+#define regGFX_IMU_TIMER2_CTRL0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CTRL1 0x40d3
+#define regGFX_IMU_TIMER2_CTRL1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_AUTOINC 0x40d4
+#define regGFX_IMU_TIMER2_CMP_AUTOINC_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP_INTEN 0x40d5
+#define regGFX_IMU_TIMER2_CMP_INTEN_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP0 0x40d6
+#define regGFX_IMU_TIMER2_CMP0_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP1 0x40d7
+#define regGFX_IMU_TIMER2_CMP1_BASE_IDX 1
+#define regGFX_IMU_TIMER2_CMP3 0x40d9
+#define regGFX_IMU_TIMER2_CMP3_BASE_IDX 1
+#define regGFX_IMU_TIMER2_VALUE 0x40da
+#define regGFX_IMU_TIMER2_VALUE_BASE_IDX 1
+#define regGFX_IMU_FUSE_CTRL 0x40e0
+#define regGFX_IMU_FUSE_CTRL_BASE_IDX 1
+#define regGFX_IMU_D_RAM_ADDR 0x40fc
+#define regGFX_IMU_D_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_D_RAM_DATA 0x40fd
+#define regGFX_IMU_D_RAM_DATA_BASE_IDX 1
+#define regGFX_IMU_GFX_IH_GASKET_CTRL 0x40ff
+#define regGFX_IMU_GFX_IH_GASKET_CTRL_BASE_IDX 1
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+// base address: 0x3fe00
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI 0x5f81
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_HI_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO 0x5f82
+#define regGFX_IMU_RLC_BOOTLOADER_ADDR_LO_BASE_IDX 1
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE 0x5f83
+#define regGFX_IMU_RLC_BOOTLOADER_SIZE_BASE_IDX 1
+#define regGFX_IMU_I_RAM_ADDR 0x5f90
+#define regGFX_IMU_I_RAM_ADDR_BASE_IDX 1
+#define regGFX_IMU_I_RAM_DATA 0x5f91
+#define regGFX_IMU_I_RAM_DATA_BASE_IDX 1
+
+
+// addressBlock: gccacind
+// base address: 0x0
+#define ixGC_CAC_ID 0x0000
+#define ixGC_CAC_CNTL 0x0001
+#define ixGC_CAC_ACC_CP0 0x0010
+#define ixGC_CAC_ACC_CP1 0x0011
+#define ixGC_CAC_ACC_CP2 0x0012
+#define ixGC_CAC_ACC_EA0 0x0013
+#define ixGC_CAC_ACC_EA1 0x0014
+#define ixGC_CAC_ACC_EA2 0x0015
+#define ixGC_CAC_ACC_EA3 0x0016
+#define ixGC_CAC_ACC_EA4 0x0017
+#define ixGC_CAC_ACC_EA5 0x0018
+#define ixGC_CAC_ACC_UTCL2_ROUTER0 0x0019
+#define ixGC_CAC_ACC_UTCL2_ROUTER1 0x001a
+#define ixGC_CAC_ACC_UTCL2_ROUTER2 0x001b
+#define ixGC_CAC_ACC_UTCL2_ROUTER3 0x001c
+#define ixGC_CAC_ACC_UTCL2_ROUTER4 0x001d
+#define ixGC_CAC_ACC_UTCL2_ROUTER5 0x001e
+#define ixGC_CAC_ACC_UTCL2_ROUTER6 0x001f
+#define ixGC_CAC_ACC_UTCL2_ROUTER7 0x0020
+#define ixGC_CAC_ACC_UTCL2_ROUTER8 0x0021
+#define ixGC_CAC_ACC_UTCL2_ROUTER9 0x0022
+#define ixGC_CAC_ACC_UTCL2_VML20 0x0023
+#define ixGC_CAC_ACC_UTCL2_VML21 0x0024
+#define ixGC_CAC_ACC_UTCL2_VML22 0x0025
+#define ixGC_CAC_ACC_UTCL2_VML23 0x0026
+#define ixGC_CAC_ACC_UTCL2_VML24 0x0027
+#define ixGC_CAC_ACC_UTCL2_WALKER0 0x0028
+#define ixGC_CAC_ACC_UTCL2_WALKER1 0x0029
+#define ixGC_CAC_ACC_UTCL2_WALKER2 0x002a
+#define ixGC_CAC_ACC_UTCL2_WALKER3 0x002b
+#define ixGC_CAC_ACC_UTCL2_WALKER4 0x002c
+#define ixGC_CAC_ACC_GDS0 0x002d
+#define ixGC_CAC_ACC_GDS1 0x002e
+#define ixGC_CAC_ACC_GDS2 0x002f
+#define ixGC_CAC_ACC_GDS3 0x0030
+#define ixGC_CAC_ACC_GDS4 0x0031
+#define ixGC_CAC_ACC_GE0 0x0032
+#define ixGC_CAC_ACC_GE1 0x0033
+#define ixGC_CAC_ACC_GE2 0x0034
+#define ixGC_CAC_ACC_GE3 0x0035
+#define ixGC_CAC_ACC_GE4 0x0036
+#define ixGC_CAC_ACC_GE5 0x0037
+#define ixGC_CAC_ACC_GE6 0x0038
+#define ixGC_CAC_ACC_GE7 0x0039
+#define ixGC_CAC_ACC_GE8 0x003a
+#define ixGC_CAC_ACC_GE9 0x003b
+#define ixGC_CAC_ACC_GE10 0x003c
+#define ixGC_CAC_ACC_GE11 0x003d
+#define ixGC_CAC_ACC_GE12 0x003e
+#define ixGC_CAC_ACC_GE13 0x003f
+#define ixGC_CAC_ACC_GE14 0x0040
+#define ixGC_CAC_ACC_GE15 0x0041
+#define ixGC_CAC_ACC_GE16 0x0042
+#define ixGC_CAC_ACC_GE17 0x0043
+#define ixGC_CAC_ACC_GE18 0x0044
+#define ixGC_CAC_ACC_GE19 0x0045
+#define ixGC_CAC_ACC_GE20 0x0046
+#define ixGC_CAC_ACC_PMM0 0x0047
+#define ixGC_CAC_ACC_GL2C0 0x0048
+#define ixGC_CAC_ACC_GL2C1 0x0049
+#define ixGC_CAC_ACC_GL2C2 0x004a
+#define ixGC_CAC_ACC_GL2C3 0x004b
+#define ixGC_CAC_ACC_GL2C4 0x004c
+#define ixGC_CAC_ACC_PH0 0x004d
+#define ixGC_CAC_ACC_PH1 0x004e
+#define ixGC_CAC_ACC_PH2 0x004f
+#define ixGC_CAC_ACC_PH3 0x0050
+#define ixGC_CAC_ACC_PH4 0x0051
+#define ixGC_CAC_ACC_PH5 0x0052
+#define ixGC_CAC_ACC_PH6 0x0053
+#define ixGC_CAC_ACC_PH7 0x0054
+#define ixGC_CAC_ACC_SDMA0 0x0055
+#define ixGC_CAC_ACC_SDMA1 0x0056
+#define ixGC_CAC_ACC_SDMA2 0x0057
+#define ixGC_CAC_ACC_SDMA3 0x0058
+#define ixGC_CAC_ACC_SDMA4 0x0059
+#define ixGC_CAC_ACC_SDMA5 0x005a
+#define ixGC_CAC_ACC_SDMA6 0x005b
+#define ixGC_CAC_ACC_SDMA7 0x005c
+#define ixGC_CAC_ACC_SDMA8 0x005d
+#define ixGC_CAC_ACC_SDMA9 0x005e
+#define ixGC_CAC_ACC_SDMA10 0x005f
+#define ixGC_CAC_ACC_SDMA11 0x0060
+#define ixGC_CAC_ACC_CHC0 0x0061
+#define ixGC_CAC_ACC_CHC1 0x0062
+#define ixGC_CAC_ACC_CHC2 0x0063
+#define ixGC_CAC_ACC_GUS0 0x0064
+#define ixGC_CAC_ACC_GUS1 0x0065
+#define ixGC_CAC_ACC_GUS2 0x0066
+#define ixGC_CAC_ACC_RLC0 0x0067
+#define ixGC_CAC_ACC_UTCL2_ATCL20 0x0068
+#define ixGC_CAC_ACC_UTCL2_ATCL21 0x0069
+#define ixGC_CAC_ACC_UTCL2_ATCL22 0x006a
+#define ixGC_CAC_ACC_UTCL2_ATCL23 0x006b
+#define ixGC_CAC_ACC_UTCL2_ATCL24 0x006c
+#define ixRELEASE_TO_STALL_LUT_1_8 0x0100
+#define ixRELEASE_TO_STALL_LUT_9_16 0x0101
+#define ixRELEASE_TO_STALL_LUT_17_20 0x0102
+#define ixSTALL_TO_RELEASE_LUT_1_4 0x0103
+#define ixSTALL_TO_RELEASE_LUT_5_7 0x0104
+#define ixSTALL_TO_PWRBRK_LUT_1_4 0x0105
+#define ixSTALL_TO_PWRBRK_LUT_5_7 0x0106
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_1_4 0x0107
+#define ixPWRBRK_STALL_TO_RELEASE_LUT_5_7 0x0108
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_1_8 0x0109
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_9_16 0x010a
+#define ixPWRBRK_RELEASE_TO_STALL_LUT_17_20 0x010b
+#define ixFIXED_PATTERN_PERF_COUNTER_1 0x010c
+#define ixFIXED_PATTERN_PERF_COUNTER_2 0x010d
+#define ixFIXED_PATTERN_PERF_COUNTER_3 0x010e
+#define ixFIXED_PATTERN_PERF_COUNTER_4 0x010f
+#define ixFIXED_PATTERN_PERF_COUNTER_5 0x0110
+#define ixFIXED_PATTERN_PERF_COUNTER_6 0x0111
+#define ixFIXED_PATTERN_PERF_COUNTER_7 0x0112
+#define ixFIXED_PATTERN_PERF_COUNTER_8 0x0113
+#define ixFIXED_PATTERN_PERF_COUNTER_9 0x0114
+#define ixFIXED_PATTERN_PERF_COUNTER_10 0x0115
+#define ixHW_LUT_UPDATE_STATUS 0x0116
+
+
+// addressBlock: secacind
+// base address: 0x0
+#define ixSE_CAC_ID 0x0000
+#define ixSE_CAC_CNTL 0x0001
+
+
+// addressBlock: grtavfsind
+// base address: 0x0
+#define ixRTAVFS_REG0 0x0000
+#define ixRTAVFS_REG1 0x0001
+#define ixRTAVFS_REG2 0x0002
+#define ixRTAVFS_REG3 0x0003
+#define ixRTAVFS_REG4 0x0004
+#define ixRTAVFS_REG5 0x0005
+#define ixRTAVFS_REG6 0x0006
+#define ixRTAVFS_REG7 0x0007
+#define ixRTAVFS_REG8 0x0008
+#define ixRTAVFS_REG9 0x0009
+#define ixRTAVFS_REG10 0x000a
+#define ixRTAVFS_REG11 0x000b
+#define ixRTAVFS_REG12 0x000c
+#define ixRTAVFS_REG13 0x000d
+#define ixRTAVFS_REG14 0x000e
+#define ixRTAVFS_REG15 0x000f
+#define ixRTAVFS_REG16 0x0010
+#define ixRTAVFS_REG17 0x0011
+#define ixRTAVFS_REG18 0x0012
+#define ixRTAVFS_REG19 0x0013
+#define ixRTAVFS_REG20 0x0014
+#define ixRTAVFS_REG21 0x0015
+#define ixRTAVFS_REG22 0x0016
+#define ixRTAVFS_REG23 0x0017
+#define ixRTAVFS_REG24 0x0018
+#define ixRTAVFS_REG25 0x0019
+#define ixRTAVFS_REG26 0x001a
+#define ixRTAVFS_REG27 0x001b
+#define ixRTAVFS_REG28 0x001c
+#define ixRTAVFS_REG29 0x001d
+#define ixRTAVFS_REG30 0x001e
+#define ixRTAVFS_REG31 0x001f
+#define ixRTAVFS_REG32 0x0020
+#define ixRTAVFS_REG33 0x0021
+#define ixRTAVFS_REG34 0x0022
+#define ixRTAVFS_REG35 0x0023
+#define ixRTAVFS_REG36 0x0024
+#define ixRTAVFS_REG37 0x0025
+#define ixRTAVFS_REG38 0x0026
+#define ixRTAVFS_REG39 0x0027
+#define ixRTAVFS_REG40 0x0028
+#define ixRTAVFS_REG41 0x0029
+#define ixRTAVFS_REG42 0x002a
+#define ixRTAVFS_REG43 0x002b
+#define ixRTAVFS_REG44 0x002c
+#define ixRTAVFS_REG45 0x002d
+#define ixRTAVFS_REG46 0x002e
+#define ixRTAVFS_REG47 0x002f
+#define ixRTAVFS_REG48 0x0030
+#define ixRTAVFS_REG49 0x0031
+#define ixRTAVFS_REG50 0x0032
+#define ixRTAVFS_REG51 0x0033
+#define ixRTAVFS_REG52 0x0034
+#define ixRTAVFS_REG53 0x0035
+#define ixRTAVFS_REG54 0x0036
+#define ixRTAVFS_REG55 0x0037
+#define ixRTAVFS_REG56 0x0038
+#define ixRTAVFS_REG57 0x0039
+#define ixRTAVFS_REG58 0x003a
+#define ixRTAVFS_REG59 0x003b
+#define ixRTAVFS_REG60 0x003c
+#define ixRTAVFS_REG61 0x003d
+#define ixRTAVFS_REG62 0x003e
+#define ixRTAVFS_REG63 0x003f
+#define ixRTAVFS_REG64 0x0040
+#define ixRTAVFS_REG65 0x0041
+#define ixRTAVFS_REG66 0x0042
+#define ixRTAVFS_REG67 0x0043
+#define ixRTAVFS_REG68 0x0044
+#define ixRTAVFS_REG69 0x0045
+#define ixRTAVFS_REG70 0x0046
+#define ixRTAVFS_REG71 0x0047
+#define ixRTAVFS_REG72 0x0048
+#define ixRTAVFS_REG73 0x0049
+#define ixRTAVFS_REG74 0x004a
+#define ixRTAVFS_REG75 0x004b
+#define ixRTAVFS_REG76 0x004c
+#define ixRTAVFS_REG77 0x004d
+#define ixRTAVFS_REG78 0x004e
+#define ixRTAVFS_REG79 0x004f
+#define ixRTAVFS_REG80 0x0050
+#define ixRTAVFS_REG81 0x0051
+#define ixRTAVFS_REG82 0x0052
+#define ixRTAVFS_REG83 0x0053
+#define ixRTAVFS_REG84 0x0054
+#define ixRTAVFS_REG85 0x0055
+#define ixRTAVFS_REG86 0x0056
+#define ixRTAVFS_REG87 0x0057
+#define ixRTAVFS_REG88 0x0058
+#define ixRTAVFS_REG89 0x0059
+#define ixRTAVFS_REG90 0x005a
+#define ixRTAVFS_REG91 0x005b
+#define ixRTAVFS_REG92 0x005c
+#define ixRTAVFS_REG93 0x005d
+#define ixRTAVFS_REG94 0x005e
+#define ixRTAVFS_REG95 0x005f
+#define ixRTAVFS_REG96 0x0060
+#define ixRTAVFS_REG97 0x0061
+#define ixRTAVFS_REG98 0x0062
+#define ixRTAVFS_REG99 0x0063
+#define ixRTAVFS_REG100 0x0064
+#define ixRTAVFS_REG101 0x0065
+#define ixRTAVFS_REG102 0x0066
+#define ixRTAVFS_REG103 0x0067
+#define ixRTAVFS_REG104 0x0068
+#define ixRTAVFS_REG105 0x0069
+#define ixRTAVFS_REG106 0x006a
+#define ixRTAVFS_REG107 0x006b
+#define ixRTAVFS_REG108 0x006c
+#define ixRTAVFS_REG109 0x006d
+#define ixRTAVFS_REG110 0x006e
+#define ixRTAVFS_REG111 0x006f
+#define ixRTAVFS_REG112 0x0070
+#define ixRTAVFS_REG113 0x0071
+#define ixRTAVFS_REG114 0x0072
+#define ixRTAVFS_REG115 0x0073
+#define ixRTAVFS_REG116 0x0074
+#define ixRTAVFS_REG117 0x0075
+#define ixRTAVFS_REG118 0x0076
+#define ixRTAVFS_REG119 0x0077
+#define ixRTAVFS_REG120 0x0078
+#define ixRTAVFS_REG121 0x0079
+#define ixRTAVFS_REG122 0x007a
+#define ixRTAVFS_REG123 0x007b
+#define ixRTAVFS_REG124 0x007c
+#define ixRTAVFS_REG125 0x007d
+#define ixRTAVFS_REG126 0x007e
+#define ixRTAVFS_REG127 0x007f
+#define ixRTAVFS_REG128 0x0080
+#define ixRTAVFS_REG129 0x0081
+#define ixRTAVFS_REG130 0x0082
+#define ixRTAVFS_REG131 0x0083
+#define ixRTAVFS_REG132 0x0084
+#define ixRTAVFS_REG133 0x0085
+#define ixRTAVFS_REG134 0x0086
+#define ixRTAVFS_REG135 0x0087
+#define ixRTAVFS_REG136 0x0088
+#define ixRTAVFS_REG137 0x0089
+#define ixRTAVFS_REG138 0x008a
+#define ixRTAVFS_REG139 0x008b
+#define ixRTAVFS_REG140 0x008c
+#define ixRTAVFS_REG141 0x008d
+#define ixRTAVFS_REG142 0x008e
+#define ixRTAVFS_REG143 0x008f
+#define ixRTAVFS_REG144 0x0090
+#define ixRTAVFS_REG145 0x0091
+#define ixRTAVFS_REG146 0x0092
+#define ixRTAVFS_REG147 0x0093
+#define ixRTAVFS_REG148 0x0094
+#define ixRTAVFS_REG149 0x0095
+#define ixRTAVFS_REG150 0x0096
+#define ixRTAVFS_REG151 0x0097
+#define ixRTAVFS_REG152 0x0098
+#define ixRTAVFS_REG153 0x0099
+#define ixRTAVFS_REG154 0x009a
+#define ixRTAVFS_REG155 0x009b
+#define ixRTAVFS_REG156 0x009c
+#define ixRTAVFS_REG157 0x009d
+#define ixRTAVFS_REG158 0x009e
+#define ixRTAVFS_REG159 0x009f
+#define ixRTAVFS_REG160 0x00a0
+#define ixRTAVFS_REG161 0x00a1
+#define ixRTAVFS_REG162 0x00a2
+#define ixRTAVFS_REG163 0x00a3
+#define ixRTAVFS_REG164 0x00a4
+#define ixRTAVFS_REG165 0x00a5
+#define ixRTAVFS_REG166 0x00a6
+#define ixRTAVFS_REG167 0x00a7
+#define ixRTAVFS_REG168 0x00a8
+#define ixRTAVFS_REG169 0x00a9
+#define ixRTAVFS_REG170 0x00aa
+#define ixRTAVFS_REG171 0x00ab
+#define ixRTAVFS_REG172 0x00ac
+#define ixRTAVFS_REG173 0x00ad
+#define ixRTAVFS_REG174 0x00ae
+#define ixRTAVFS_REG175 0x00af
+#define ixRTAVFS_REG176 0x00b0
+#define ixRTAVFS_REG177 0x00b1
+#define ixRTAVFS_REG178 0x00b2
+#define ixRTAVFS_REG179 0x00b3
+#define ixRTAVFS_REG180 0x00b4
+#define ixRTAVFS_REG181 0x00b5
+#define ixRTAVFS_REG182 0x00b6
+#define ixRTAVFS_REG183 0x00b7
+#define ixRTAVFS_REG184 0x00b8
+#define ixRTAVFS_REG185 0x00b9
+#define ixRTAVFS_REG186 0x00ba
+#define ixRTAVFS_REG187 0x00bb
+#define ixRTAVFS_REG189 0x00bd
+#define ixRTAVFS_REG190 0x00be
+#define ixRTAVFS_REG191 0x00bf
+#define ixRTAVFS_REG192 0x00c0
+#define ixRTAVFS_REG193 0x00c1
+#define ixRTAVFS_REG194 0x00c2
+
+
+// addressBlock: sqind
+// base address: 0x0
+#define ixSQ_DEBUG_STS_LOCAL 0x0008
+#define ixSQ_DEBUG_CTRL_LOCAL 0x0009
+#define ixSQ_WAVE_ACTIVE 0x000a
+#define ixSQ_WAVE_VALID_AND_IDLE 0x000b
+#define ixSQ_WAVE_MODE 0x0101
+#define ixSQ_WAVE_STATUS 0x0102
+#define ixSQ_WAVE_TRAPSTS 0x0103
+#define ixSQ_WAVE_GPR_ALLOC 0x0105
+#define ixSQ_WAVE_LDS_ALLOC 0x0106
+#define ixSQ_WAVE_IB_STS 0x0107
+#define ixSQ_WAVE_PC_LO 0x0108
+#define ixSQ_WAVE_PC_HI 0x0109
+#define ixSQ_WAVE_IB_DBG1 0x010d
+#define ixSQ_WAVE_FLUSH_IB 0x010e
+#define ixSQ_WAVE_FLAT_SCRATCH_LO 0x0114
+#define ixSQ_WAVE_FLAT_SCRATCH_HI 0x0115
+#define ixSQ_WAVE_HW_ID1 0x0117
+#define ixSQ_WAVE_HW_ID2 0x0118
+#define ixSQ_WAVE_POPS_PACKER 0x0119
+#define ixSQ_WAVE_SCHED_MODE 0x011a
+#define ixSQ_WAVE_IB_STS2 0x011c
+#define ixSQ_WAVE_SHADER_CYCLES 0x011d
+#define ixSQ_WAVE_TTMP0 0x026c
+#define ixSQ_WAVE_TTMP1 0x026d
+#define ixSQ_WAVE_TTMP2 0x026e
+#define ixSQ_WAVE_TTMP3 0x026f
+#define ixSQ_WAVE_TTMP4 0x0270
+#define ixSQ_WAVE_TTMP5 0x0271
+#define ixSQ_WAVE_TTMP6 0x0272
+#define ixSQ_WAVE_TTMP7 0x0273
+#define ixSQ_WAVE_TTMP8 0x0274
+#define ixSQ_WAVE_TTMP9 0x0275
+#define ixSQ_WAVE_TTMP10 0x0276
+#define ixSQ_WAVE_TTMP11 0x0277
+#define ixSQ_WAVE_TTMP12 0x0278
+#define ixSQ_WAVE_TTMP13 0x0279
+#define ixSQ_WAVE_TTMP14 0x027a
+#define ixSQ_WAVE_TTMP15 0x027b
+#define ixSQ_WAVE_M0 0x027d
+#define ixSQ_WAVE_EXEC_LO 0x027e
+#define ixSQ_WAVE_EXEC_HI 0x027f
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
new file mode 100644
index 000000000000..ae3ef8a9e702
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
@@ -0,0 +1,44640 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _gc_11_0_3_SH_MASK_HEADER
+#define _gc_11_0_3_SH_MASK_HEADER
+
+
+// addressBlock: gc_sdma0_sdma0dec
+//SDMA0_DEC_START
+#define SDMA0_DEC_START__START__SHIFT 0x0
+#define SDMA0_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA0_F32_MISC_CNTL
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA0_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA0_GLOBAL_TIMESTAMP_LO
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GLOBAL_TIMESTAMP_HI
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA0_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_POWER_CNTL
+#define SDMA0_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA0_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA0_CNTL
+#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA0_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA0_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA0_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA0_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA0_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA0_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA0_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA0_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA0_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA0_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA0_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA0_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA0_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA0_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA0_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA0_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA0_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA0_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA0_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA0_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA0_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA0_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA0_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA0_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA0_GB_ADDR_CONFIG
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_GB_ADDR_CONFIG_READ
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA0_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA0_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA0_RB_RPTR_FETCH
+#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA0_RB_RPTR_FETCH_HI
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA0_IB_OFFSET_FETCH
+#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA0_PROGRAM
+#define SDMA0_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA0_STATUS_REG
+#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA0_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA0_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA0_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA0_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA0_STATUS1_REG
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA0_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA0_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA0_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA0_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA0_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA0_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA0_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA0_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA0_CNTL1
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA0_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA0_HBM_PAGE_CONFIG
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA0_UCODE_CHECKSUM
+#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_FREEZE
+#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA0_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA0_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA0_PROCESS_QUANTUM0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA0_PROCESS_QUANTUM1
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA0_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA0_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA0_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA0_WATCHDOG_CNTL
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA0_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA0_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA0_QUEUE_STATUS0
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA0_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA0_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA0_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA0_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA0_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA0_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA0_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA0_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA0_EDC_CONFIG
+#define SDMA0_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA0_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA0_BA_THRESHOLD
+#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA0_ID
+#define SDMA0_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA0_VERSION
+#define SDMA0_VERSION__MINVER__SHIFT 0x0
+#define SDMA0_VERSION__MAJVER__SHIFT 0x8
+#define SDMA0_VERSION__REV__SHIFT 0x10
+#define SDMA0_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA0_VERSION__REV_MASK 0x003F0000L
+//SDMA0_EDC_COUNTER
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA0_EDC_COUNTER_CLEAR
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA0_STATUS2_REG
+#define SDMA0_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA0_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA0_ATOMIC_CNTL
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA0_ATOMIC_PREOP_LO
+#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA0_ATOMIC_PREOP_HI
+#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_CNTL
+#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA0_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA0_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA0_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA0_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA0_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA0_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA0_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA0_UTCL1_WATERMK
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA0_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA0_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA0_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA0_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA0_UTCL1_TIMEOUT
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA0_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA0_UTCL1_PAGE
+#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA0_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA0_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA0_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA0_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA0_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA0_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA0_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA0_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA0_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA0_UTCL1_RD_STATUS
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA0_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA0_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA0_UTCL1_WR_STATUS
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA0_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA0_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA0_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA0_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA0_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA0_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA0_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA0_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA0_UTCL1_INV0
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA0_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA0_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA0_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA0_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA0_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA0_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA0_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA0_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA0_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA0_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA0_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA0_UTCL1_INV1
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_INV2
+#define SDMA0_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA0_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA0_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA0_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA0_UTCL1_RD_XNACK0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_RD_XNACK1
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_UTCL1_WR_XNACK0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA0_UTCL1_WR_XNACK1
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA0_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA0_RELAX_ORDERING_LUT
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA0_CHICKEN_BITS_2
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA0_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA0_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA0_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA0_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA0_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA0_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA0_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA0_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA0_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA0_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA0_STATUS3_REG
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA0_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA0_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA0_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA0_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA0_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA0_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA0_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA0_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA0_PHYSICAL_ADDR_LO
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA0_PHYSICAL_ADDR_HI
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA0_GLOBAL_QUANTUM
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA0_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA0_ERROR_LOG
+#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA0_PUB_DUMMY_REG0
+#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG1
+#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG2
+#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_PUB_DUMMY_REG3
+#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_F32_COUNTER
+#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CRD_CNTL
+#define SDMA0_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA0_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA0_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA0_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA0_RLC_CGCG_CTRL
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA0_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA0_GPU_IOV_VIOLATION_LOG
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA0_AQL_STATUS
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA0_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA0_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA0_EA_DBIT_ADDR_DATA
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_EA_DBIT_ADDR_INDEX
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA0_TLBI_GCR_CNTL
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA0_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA0_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA0_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA0_TILING_CONFIG
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA0_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA0_HASH
+#define SDMA0_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA0_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA0_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA0_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA0_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA0_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA0_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA0_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA0_INT_STATUS
+#define SDMA0_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA0_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA0_GPU_IOV_VIOLATION_LOG2
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA0_HOLE_ADDR_LO
+#define SDMA0_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_HOLE_ADDR_HI
+#define SDMA0_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA0_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_CLOCK_GATING_STATUS
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA0_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA0_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA0_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA0_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA0_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA0_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA0_STATUS4_REG
+#define SDMA0_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA0_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA0_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA0_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA0_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA0_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA0_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA0_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA0_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA0_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA0_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA0_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA0_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA0_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA0_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA0_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA0_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA0_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA0_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA0_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA0_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA0_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA0_SCRATCH_RAM_DATA
+#define SDMA0_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA0_SCRATCH_RAM_ADDR
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA0_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA0_TIMESTAMP_CNTL
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA0_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA0_STATUS5_REG
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA0_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA0_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA0_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA0_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA0_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA0_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA0_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA0_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA0_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA0_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA0_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA0_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA0_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA0_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA0_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA0_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA0_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA0_QUEUE_RESET_REQ
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA0_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA0_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA0_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA0_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA0_STATUS6_REG
+#define SDMA0_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA0_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA0_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA0_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA0_UCODE1_CHECKSUM
+#define SDMA0_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA0_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA0_CE_CTRL
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA0_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA0_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA0_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA0_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA0_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA0_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA0_FED_STATUS
+#define SDMA0_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA0_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA0_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA0_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA0_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA0_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA0_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA0_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA0_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA0_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA0_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA0_QUEUE0_RB_CNTL
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE0_RB_BASE
+#define SDMA0_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_BASE_HI
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE0_RB_RPTR
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_HI
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_HI
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_IB_CNTL
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE0_IB_RPTR
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_OFFSET
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE0_IB_BASE_LO
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE0_IB_BASE_HI
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_IB_SIZE
+#define SDMA0_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_SKIP_CNTL
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE0_CONTEXT_STATUS
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE0_DOORBELL
+#define SDMA0_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE0_DOORBELL_LOG
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_DOORBELL_OFFSET
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_LO
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_CSA_ADDR_HI
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_SCHEDULE_CNTL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE0_IB_SUB_REMAIN
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE0_PREEMPT
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE0_DUMMY_REG
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE0_RB_AQL_CNTL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE0_RB_PREEMPT
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE0_MIDCMD_DATA0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA1
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA2
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA3
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA4
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA5
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA6
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA7
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA8
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA9
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_DATA10
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE0_MIDCMD_CNTL
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE1_RB_CNTL
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE1_RB_BASE
+#define SDMA0_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_BASE_HI
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE1_RB_RPTR
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_HI
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_HI
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_IB_CNTL
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE1_IB_RPTR
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_OFFSET
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE1_IB_BASE_LO
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE1_IB_BASE_HI
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_IB_SIZE
+#define SDMA0_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_SKIP_CNTL
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE1_CONTEXT_STATUS
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE1_DOORBELL
+#define SDMA0_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE1_DOORBELL_LOG
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_DOORBELL_OFFSET
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_LO
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_CSA_ADDR_HI
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_SCHEDULE_CNTL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE1_IB_SUB_REMAIN
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE1_PREEMPT
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE1_DUMMY_REG
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE1_RB_AQL_CNTL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE1_RB_PREEMPT
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE1_MIDCMD_DATA0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA1
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA2
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA3
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA4
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA5
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA6
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA7
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA8
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA9
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_DATA10
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE1_MIDCMD_CNTL
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE2_RB_CNTL
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE2_RB_BASE
+#define SDMA0_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_BASE_HI
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE2_RB_RPTR
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_HI
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_HI
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_IB_CNTL
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE2_IB_RPTR
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_OFFSET
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE2_IB_BASE_LO
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE2_IB_BASE_HI
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_IB_SIZE
+#define SDMA0_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_SKIP_CNTL
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE2_CONTEXT_STATUS
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE2_DOORBELL
+#define SDMA0_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE2_DOORBELL_LOG
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_DOORBELL_OFFSET
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_LO
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_CSA_ADDR_HI
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_SCHEDULE_CNTL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE2_IB_SUB_REMAIN
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE2_PREEMPT
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE2_DUMMY_REG
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE2_RB_AQL_CNTL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE2_RB_PREEMPT
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE2_MIDCMD_DATA0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA1
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA2
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA3
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA4
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA5
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA6
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA7
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA8
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA9
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_DATA10
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE2_MIDCMD_CNTL
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE3_RB_CNTL
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE3_RB_BASE
+#define SDMA0_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_BASE_HI
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE3_RB_RPTR
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_HI
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_HI
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_IB_CNTL
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE3_IB_RPTR
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_OFFSET
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE3_IB_BASE_LO
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE3_IB_BASE_HI
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_IB_SIZE
+#define SDMA0_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_SKIP_CNTL
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE3_CONTEXT_STATUS
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE3_DOORBELL
+#define SDMA0_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE3_DOORBELL_LOG
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_DOORBELL_OFFSET
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_LO
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_CSA_ADDR_HI
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_SCHEDULE_CNTL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE3_IB_SUB_REMAIN
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE3_PREEMPT
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE3_DUMMY_REG
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE3_RB_AQL_CNTL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE3_RB_PREEMPT
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE3_MIDCMD_DATA0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA1
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA2
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA3
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA4
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA5
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA6
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA7
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA8
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA9
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_DATA10
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE3_MIDCMD_CNTL
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE4_RB_CNTL
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE4_RB_BASE
+#define SDMA0_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_BASE_HI
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE4_RB_RPTR
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_HI
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_HI
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_IB_CNTL
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE4_IB_RPTR
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_OFFSET
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE4_IB_BASE_LO
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE4_IB_BASE_HI
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_IB_SIZE
+#define SDMA0_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_SKIP_CNTL
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE4_CONTEXT_STATUS
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE4_DOORBELL
+#define SDMA0_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE4_DOORBELL_LOG
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_DOORBELL_OFFSET
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_LO
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_CSA_ADDR_HI
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_SCHEDULE_CNTL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE4_IB_SUB_REMAIN
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE4_PREEMPT
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE4_DUMMY_REG
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE4_RB_AQL_CNTL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE4_RB_PREEMPT
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE4_MIDCMD_DATA0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA1
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA2
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA3
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA4
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA5
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA6
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA7
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA8
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA9
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_DATA10
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE4_MIDCMD_CNTL
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE5_RB_CNTL
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE5_RB_BASE
+#define SDMA0_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_BASE_HI
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE5_RB_RPTR
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_HI
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_HI
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_IB_CNTL
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE5_IB_RPTR
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_OFFSET
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE5_IB_BASE_LO
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE5_IB_BASE_HI
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_IB_SIZE
+#define SDMA0_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_SKIP_CNTL
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE5_CONTEXT_STATUS
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE5_DOORBELL
+#define SDMA0_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE5_DOORBELL_LOG
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_DOORBELL_OFFSET
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_LO
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_CSA_ADDR_HI
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_SCHEDULE_CNTL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE5_IB_SUB_REMAIN
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE5_PREEMPT
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE5_DUMMY_REG
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE5_RB_AQL_CNTL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE5_RB_PREEMPT
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE5_MIDCMD_DATA0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA1
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA2
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA3
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA4
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA5
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA6
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA7
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA8
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA9
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_DATA10
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE5_MIDCMD_CNTL
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE6_RB_CNTL
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE6_RB_BASE
+#define SDMA0_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_BASE_HI
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE6_RB_RPTR
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_HI
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_HI
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_IB_CNTL
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE6_IB_RPTR
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_OFFSET
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE6_IB_BASE_LO
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE6_IB_BASE_HI
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_IB_SIZE
+#define SDMA0_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_SKIP_CNTL
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE6_CONTEXT_STATUS
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE6_DOORBELL
+#define SDMA0_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE6_DOORBELL_LOG
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_DOORBELL_OFFSET
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_LO
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_CSA_ADDR_HI
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_SCHEDULE_CNTL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE6_IB_SUB_REMAIN
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE6_PREEMPT
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE6_DUMMY_REG
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE6_RB_AQL_CNTL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE6_RB_PREEMPT
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE6_MIDCMD_DATA0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA1
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA2
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA3
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA4
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA5
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA6
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA7
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA8
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA9
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_DATA10
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE6_MIDCMD_CNTL
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA0_QUEUE7_RB_CNTL
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA0_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA0_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA0_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA0_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA0_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA0_QUEUE7_RB_BASE
+#define SDMA0_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_BASE_HI
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA0_QUEUE7_RB_RPTR
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_HI
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_HI
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_IB_CNTL
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA0_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA0_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA0_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA0_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA0_QUEUE7_IB_RPTR
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_OFFSET
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA0_QUEUE7_IB_BASE_LO
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA0_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA0_QUEUE7_IB_BASE_HI
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_IB_SIZE
+#define SDMA0_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_SKIP_CNTL
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA0_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA0_QUEUE7_CONTEXT_STATUS
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA0_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA0_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA0_QUEUE7_DOORBELL
+#define SDMA0_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA0_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA0_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA0_QUEUE7_DOORBELL_LOG
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA0_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_DOORBELL_OFFSET
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA0_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_LO
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_CSA_ADDR_HI
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_SCHEDULE_CNTL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA0_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA0_QUEUE7_IB_SUB_REMAIN
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA0_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA0_QUEUE7_PREEMPT
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA0_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA0_QUEUE7_DUMMY_REG
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA0_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA0_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_QUEUE7_RB_AQL_CNTL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA0_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA0_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA0_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA0_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA0_QUEUE7_RB_PREEMPT
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA0_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA0_QUEUE7_MIDCMD_DATA0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA1
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA2
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA3
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA4
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA5
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA6
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA7
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA8
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA9
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_DATA10
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA0_QUEUE7_MIDCMD_CNTL
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA0_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA0_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma1dec
+//SDMA1_DEC_START
+#define SDMA1_DEC_START__START__SHIFT 0x0
+#define SDMA1_DEC_START__START_MASK 0xFFFFFFFFL
+//SDMA1_F32_MISC_CNTL
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP__SHIFT 0x0
+#define SDMA1_F32_MISC_CNTL__F32_WAKEUP_MASK 0x00000001L
+//SDMA1_GLOBAL_TIMESTAMP_LO
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GLOBAL_TIMESTAMP_HI
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA__SHIFT 0x0
+#define SDMA1_GLOBAL_TIMESTAMP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_POWER_CNTL
+#define SDMA1_POWER_CNTL__LS_ENABLE__SHIFT 0x8
+#define SDMA1_POWER_CNTL__LS_ENABLE_MASK 0x00000100L
+//SDMA1_CNTL
+#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2
+#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE__SHIFT 0x6
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE__SHIFT 0x8
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE__SHIFT 0x9
+#define SDMA1_CNTL__CP_MES_INT_ENABLE__SHIFT 0xa
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE__SHIFT 0xb
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE__SHIFT 0xc
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE__SHIFT 0xd
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE__SHIFT 0x10
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE__SHIFT 0x13
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c
+#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L
+#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L
+#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L
+#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L
+#define SDMA1_CNTL__PIO_DONE_ACK_ENABLE_MASK 0x00000040L
+#define SDMA1_CNTL__TMZ_MIDCMD_PREEMPT_ENABLE_MASK 0x00000100L
+#define SDMA1_CNTL__MIDCMD_EXPIRE_ENABLE_MASK 0x00000200L
+#define SDMA1_CNTL__CP_MES_INT_ENABLE_MASK 0x00000400L
+#define SDMA1_CNTL__PAGE_RETRY_TIMEOUT_INT_ENABLE_MASK 0x00000800L
+#define SDMA1_CNTL__PAGE_NULL_INT_ENABLE_MASK 0x00001000L
+#define SDMA1_CNTL__PAGE_FAULT_INT_ENABLE_MASK 0x00002000L
+#define SDMA1_CNTL__CH_PERFCNT_ENABLE_MASK 0x00010000L
+#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L
+#define SDMA1_CNTL__DRM_RESTORE_ENABLE_MASK 0x00080000L
+#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L
+#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L
+#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L
+#define SDMA1_CNTL__RB_PREEMPT_INT_ENABLE_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE__SHIFT 0x3
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x5
+#define SDMA1_CHICKEN_BITS__RD_BURST__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS__WR_BURST__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE__SHIFT 0xa
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE__SHIFT 0xe
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG__SHIFT 0x13
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG__SHIFT 0x15
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG__SHIFT 0x16
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x18
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL__SHIFT 0x1a
+#define SDMA1_CHICKEN_BITS__RESERVED__SHIFT 0x1b
+#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L
+#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L
+#define SDMA1_CHICKEN_BITS__BACK_COMPAT_ENABLE_MASK 0x00000008L
+#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00000020L
+#define SDMA1_CHICKEN_BITS__RD_BURST_MASK 0x000000C0L
+#define SDMA1_CHICKEN_BITS__WR_BURST_MASK 0x00000300L
+#define SDMA1_CHICKEN_BITS__COMBINE_256B_WAIT_CYCLE_MASK 0x00003C00L
+#define SDMA1_CHICKEN_BITS__WR_COMBINE_256B_ENABLE_MASK 0x00004000L
+#define SDMA1_CHICKEN_BITS__RD_COMBINE_256B_ENABLE_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L
+#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L
+#define SDMA1_CHICKEN_BITS__T2L_256B_ENABLE_MASK 0x00040000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GCR_FGCG_MASK 0x00080000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_GRBM_FGCG_MASK 0x00100000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_CH_FGCG_MASK 0x00200000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL2_INVREQ_FGCG_MASK 0x00400000L
+#define SDMA1_CHICKEN_BITS__SOFT_OVERRIDE_UTCL1_FGCG_MASK 0x00800000L
+#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x01000000L
+#define SDMA1_CHICKEN_BITS__SW_FREEZE_ENABLE_MASK 0x02000000L
+#define SDMA1_CHICKEN_BITS__DRAM_ECC_COPY_MODE_CNTL_MASK 0x04000000L
+#define SDMA1_CHICKEN_BITS__RESERVED_MASK 0xF8000000L
+//SDMA1_GB_ADDR_CONFIG
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_GB_ADDR_CONFIG_READ
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define SDMA1_GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define SDMA1_GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//SDMA1_RB_RPTR_FETCH
+#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL
+//SDMA1_RB_RPTR_FETCH_HI
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0
+#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0
+#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL
+//SDMA1_IB_OFFSET_FETCH
+#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2
+#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL
+//SDMA1_PROGRAM
+#define SDMA1_PROGRAM__STREAM__SHIFT 0x0
+#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL
+//SDMA1_STATUS_REG
+#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1
+#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2
+#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3
+#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4
+#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5
+#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6
+#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7
+#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8
+#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9
+#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa
+#define SDMA1_STATUS_REG__CGCG_FENCE__SHIFT 0xb
+#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc
+#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd
+#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12
+#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16
+#define SDMA1_STATUS_REG__DRM_IDLE__SHIFT 0x17
+#define SDMA1_STATUS_REG__DRM_MASK_FULL__SHIFT 0x18
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19
+#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a
+#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b
+#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c
+#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e
+#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f
+#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L
+#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L
+#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L
+#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L
+#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L
+#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L
+#define SDMA1_STATUS_REG__CGCG_FENCE_MASK 0x00000800L
+#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L
+#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L
+#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L
+#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L
+#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L
+#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L
+#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L
+#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L
+#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS_REG__DRM_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS_REG__DRM_MASK_FULL_MASK 0x01000000L
+#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L
+#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L
+#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L
+#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L
+#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L
+#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L
+//SDMA1_STATUS1_REG
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0
+#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4
+#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5
+#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE__SHIFT 0x7
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE__SHIFT 0x8
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa
+#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xb
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xc
+#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xd
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE__SHIFT 0xe
+#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0xf
+#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x10
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS__SHIFT 0x11
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE__SHIFT 0x12
+#define SDMA1_STATUS1_REG__SDMA_IDLE__SHIFT 0x13
+#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L
+#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L
+#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L
+#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L
+#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L
+#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L
+#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L
+#define SDMA1_STATUS1_REG__CE_DRM_IDLE_MASK 0x00000080L
+#define SDMA1_STATUS1_REG__CE_DRM1_IDLE_MASK 0x00000100L
+#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L
+#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L
+#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00000800L
+#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00001000L
+#define SDMA1_STATUS1_REG__EX_START_MASK 0x00002000L
+#define SDMA1_STATUS1_REG__DRM_CTX_RESTORE_MASK 0x00004000L
+#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00008000L
+#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00010000L
+#define SDMA1_STATUS1_REG__SEC_INTR_STATUS_MASK 0x00020000L
+#define SDMA1_STATUS1_REG__WPTR_POLL_IDLE_MASK 0x00040000L
+#define SDMA1_STATUS1_REG__SDMA_IDLE_MASK 0x00080000L
+//SDMA1_CNTL1
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY__SHIFT 0x2
+#define SDMA1_CNTL1__WPTR_POLL_FREQUENCY_MASK 0x0000FFFCL
+//SDMA1_HBM_PAGE_CONFIG
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0
+#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L
+//SDMA1_UCODE_CHECKSUM
+#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_FREEZE
+#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0
+#define SDMA1_FREEZE__FREEZE__SHIFT 0x4
+#define SDMA1_FREEZE__FROZEN__SHIFT 0x5
+#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6
+#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L
+#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L
+#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L
+#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L
+//SDMA1_PROCESS_QUANTUM0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM0__PROCESS0_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM0__PROCESS1_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS2_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM0__PROCESS3_QUANTUM_MASK 0xFF000000L
+//SDMA1_PROCESS_QUANTUM1
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM__SHIFT 0x0
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM__SHIFT 0x8
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM__SHIFT 0x10
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM__SHIFT 0x18
+#define SDMA1_PROCESS_QUANTUM1__PROCESS4_QUANTUM_MASK 0x000000FFL
+#define SDMA1_PROCESS_QUANTUM1__PROCESS5_QUANTUM_MASK 0x0000FF00L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS6_QUANTUM_MASK 0x00FF0000L
+#define SDMA1_PROCESS_QUANTUM1__PROCESS7_QUANTUM_MASK 0xFF000000L
+//SDMA1_WATCHDOG_CNTL
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT__SHIFT 0x0
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT__SHIFT 0x8
+#define SDMA1_WATCHDOG_CNTL__QUEUE_HANG_COUNT_MASK 0x000000FFL
+#define SDMA1_WATCHDOG_CNTL__CMD_TIMEOUT_COUNT_MASK 0x0000FF00L
+//SDMA1_QUEUE_STATUS0
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS__SHIFT 0x0
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS__SHIFT 0x4
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS__SHIFT 0x8
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS__SHIFT 0xc
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS__SHIFT 0x10
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS__SHIFT 0x14
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS__SHIFT 0x18
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS__SHIFT 0x1c
+#define SDMA1_QUEUE_STATUS0__QUEUE0_STATUS_MASK 0x0000000FL
+#define SDMA1_QUEUE_STATUS0__QUEUE1_STATUS_MASK 0x000000F0L
+#define SDMA1_QUEUE_STATUS0__QUEUE2_STATUS_MASK 0x00000F00L
+#define SDMA1_QUEUE_STATUS0__QUEUE3_STATUS_MASK 0x0000F000L
+#define SDMA1_QUEUE_STATUS0__QUEUE4_STATUS_MASK 0x000F0000L
+#define SDMA1_QUEUE_STATUS0__QUEUE5_STATUS_MASK 0x00F00000L
+#define SDMA1_QUEUE_STATUS0__QUEUE6_STATUS_MASK 0x0F000000L
+#define SDMA1_QUEUE_STATUS0__QUEUE7_STATUS_MASK 0xF0000000L
+//SDMA1_EDC_CONFIG
+#define SDMA1_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2
+#define SDMA1_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L
+//SDMA1_BA_THRESHOLD
+#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0
+#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10
+#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL
+#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L
+//SDMA1_ID
+#define SDMA1_ID__DEVICE_ID__SHIFT 0x0
+#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL
+//SDMA1_VERSION
+#define SDMA1_VERSION__MINVER__SHIFT 0x0
+#define SDMA1_VERSION__MAJVER__SHIFT 0x8
+#define SDMA1_VERSION__REV__SHIFT 0x10
+#define SDMA1_VERSION__MINVER_MASK 0x0000007FL
+#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L
+#define SDMA1_VERSION__REV_MASK 0x003F0000L
+//SDMA1_EDC_COUNTER
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED__SHIFT 0x0
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC__SHIFT 0x1
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0xf
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x10
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_DED_MASK 0x00000001L
+#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SEC_MASK 0x00000002L
+#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L
+#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L
+#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L
+#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L
+#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L
+#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00008000L
+#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x00010000L
+//SDMA1_EDC_COUNTER_CLEAR
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0
+#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L
+//SDMA1_STATUS2_REG
+#define SDMA1_STATUS2_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10
+#define SDMA1_STATUS2_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS2_REG__TH0F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L
+//SDMA1_ATOMIC_CNTL
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f
+#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL
+#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L
+//SDMA1_ATOMIC_PREOP_LO
+#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL
+//SDMA1_ATOMIC_PREOP_HI
+#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0
+#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_CNTL
+#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x0
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY__SHIFT 0x5
+#define SDMA1_UTCL1_CNTL__RESP_MODE__SHIFT 0x9
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION__SHIFT 0xe
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY__SHIFT 0xf
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL__SHIFT 0x10
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL__SHIFT 0x11
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0x12
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18
+#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x0000001FL
+#define SDMA1_UTCL1_CNTL__PAGE_WAIT_DELAY_MASK 0x000001E0L
+#define SDMA1_UTCL1_CNTL__RESP_MODE_MASK 0x00000600L
+#define SDMA1_UTCL1_CNTL__FORCE_INVALIDATION_MASK 0x00004000L
+#define SDMA1_UTCL1_CNTL__FORCE_INVREQ_HEAVY_MASK 0x00008000L
+#define SDMA1_UTCL1_CNTL__WR_EXE_PERMS_CTRL_MASK 0x00010000L
+#define SDMA1_UTCL1_CNTL__RD_EXE_PERMS_CTRL_MASK 0x00020000L
+#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x003C0000L
+#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x3F000000L
+//SDMA1_UTCL1_WATERMK
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK__SHIFT 0x0
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP__SHIFT 0x4
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK__SHIFT 0x6
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP__SHIFT 0xa
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK__SHIFT 0xc
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP__SHIFT 0x10
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK__SHIFT 0x12
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP__SHIFT 0x16
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_WATERMK_MASK 0x0000000FL
+#define SDMA1_UTCL1_WATERMK__WR_REQ_FIFO_DEPTH_STEP_MASK 0x00000030L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_WATERMK_MASK 0x000003C0L
+#define SDMA1_UTCL1_WATERMK__RD_REQ_FIFO_DEPTH_STEP_MASK 0x00000C00L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_WATERMK_MASK 0x0000F000L
+#define SDMA1_UTCL1_WATERMK__WR_PAGE_FIFO_DEPTH_STEP_MASK 0x00030000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_WATERMK_MASK 0x003C0000L
+#define SDMA1_UTCL1_WATERMK__RD_PAGE_FIFO_DEPTH_STEP_MASK 0x00C00000L
+//SDMA1_UTCL1_TIMEOUT
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT__SHIFT 0x0
+#define SDMA1_UTCL1_TIMEOUT__XNACK_LIMIT_MASK 0x0000FFFFL
+//SDMA1_UTCL1_PAGE
+#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0
+#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE__SHIFT 0x5
+#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0xa
+#define SDMA1_UTCL1_PAGE__USE_IO__SHIFT 0xb
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY__SHIFT 0xc
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY__SHIFT 0xe
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE__SHIFT 0x10
+#define SDMA1_UTCL1_PAGE__USE_BC__SHIFT 0x16
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA__SHIFT 0x17
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC__SHIFT 0x18
+#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L
+#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL
+#define SDMA1_UTCL1_PAGE__TMZ_ENABLE_MASK 0x00000020L
+#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000003C0L
+#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000400L
+#define SDMA1_UTCL1_PAGE__USE_IO_MASK 0x00000800L
+#define SDMA1_UTCL1_PAGE__RD_L2_POLICY_MASK 0x00003000L
+#define SDMA1_UTCL1_PAGE__WR_L2_POLICY_MASK 0x0000C000L
+#define SDMA1_UTCL1_PAGE__DMA_PAGE_SIZE_MASK 0x003F0000L
+#define SDMA1_UTCL1_PAGE__USE_BC_MASK 0x00400000L
+#define SDMA1_UTCL1_PAGE__ADDR_IS_PA_MASK 0x00800000L
+#define SDMA1_UTCL1_PAGE__LLC_NOALLOC_MASK 0x01000000L
+//SDMA1_UTCL1_RD_STATUS
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0__SHIFT 0x5
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1__SHIFT 0x6
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY__SHIFT 0x7
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2__SHIFT 0xd
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3__SHIFT 0xe
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL__SHIFT 0xf
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4__SHIFT 0x1a
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY__SHIFT 0x1e
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE__SHIFT 0x1f
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED0_MASK 0x00000020L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED1_MASK 0x00000040L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_EMPTY_MASK 0x00000080L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_RD_STATUS__RD_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_RD_STATUS__RD_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_RD_STATUS__RD_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED2_MASK 0x00002000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED3_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_STATUS__META_Q_FULL_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_STATUS__RD_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_RD_STATUS__RD_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_RD_STATUS__RESERVED4_MASK 0x04000000L
+#define SDMA1_UTCL1_RD_STATUS__RD_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_RD_STATUS__RDREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_RD_STATUS__INV_BUSY_MASK 0x40000000L
+#define SDMA1_UTCL1_RD_STATUS__DBIT_REQ_IDLE_MASK 0x80000000L
+//SDMA1_UTCL1_WR_STATUS
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY__SHIFT 0x0
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY__SHIFT 0x1
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY__SHIFT 0x2
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY__SHIFT 0x3
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY__SHIFT 0x4
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY__SHIFT 0x5
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY__SHIFT 0x6
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0__SHIFT 0x7
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL__SHIFT 0x8
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL__SHIFT 0x9
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL__SHIFT 0xa
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL__SHIFT 0xb
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL__SHIFT 0xc
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL__SHIFT 0xd
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL__SHIFT 0xe
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0xf
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE__SHIFT 0x10
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE__SHIFT 0x11
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE__SHIFT 0x12
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE__SHIFT 0x13
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY__SHIFT 0x15
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY__SHIFT 0x16
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY__SHIFT 0x17
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY__SHIFT 0x18
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY__SHIFT 0x19
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL__SHIFT 0x1a
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR__SHIFT 0x1b
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR__SHIFT 0x1c
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR__SHIFT 0x1d
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR__SHIFT 0x1e
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR__SHIFT 0x1f
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_EMPTY_MASK 0x00000001L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_EMPTY_MASK 0x00000002L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_EMPTY_MASK 0x00000004L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_EMPTY_MASK 0x00000008L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_EMPTY_MASK 0x00000010L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_EMPTY_MASK 0x00000020L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_EMPTY_MASK 0x00000040L
+#define SDMA1_UTCL1_WR_STATUS__RESERVED0_MASK 0x00000080L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_FIFO_FULL_MASK 0x00000100L
+#define SDMA1_UTCL1_WR_STATUS__WR_REG_ENTRY_FULL_MASK 0x00000200L
+#define SDMA1_UTCL1_WR_STATUS__WR_PAGE_FIFO_FULL_MASK 0x00000400L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_FIFO_FULL_MASK 0x00000800L
+#define SDMA1_UTCL1_WR_STATUS__WR_VA_REQ_FIFO_FULL_MASK 0x00001000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA2_FULL_MASK 0x00002000L
+#define SDMA1_UTCL1_WR_STATUS__WR_DATA1_FULL_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_STATUS__WR_L2_INTF_IDLE_MASK 0x00010000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQRET_IDLE_MASK 0x00020000L
+#define SDMA1_UTCL1_WR_STATUS__WR_REQ_IDLE_MASK 0x00040000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_TYPE_MASK 0x00180000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_PA_READY_MASK 0x00200000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_META_PA_READY_MASK 0x00400000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REG_READY_MASK 0x00800000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_PAGE_FIFO_READY_MASK 0x01000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_REQ_FIFO_READY_MASK 0x02000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_DATA_SEL_MASK 0x04000000L
+#define SDMA1_UTCL1_WR_STATUS__WR_MERGE_OUT_RTR_MASK 0x08000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_RTR_MASK 0x10000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_OUT_RTR_MASK 0x20000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA1_RTR_MASK 0x40000000L
+#define SDMA1_UTCL1_WR_STATUS__WRREQ_IN_DATA2_RTR_MASK 0x80000000L
+//SDMA1_UTCL1_INV0
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY__SHIFT 0x0
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE__SHIFT 0x1
+#define SDMA1_UTCL1_INV0__GPUVM_VMID__SHIFT 0x7
+#define SDMA1_UTCL1_INV0__GPUVM_MODE__SHIFT 0xb
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH__SHIFT 0xd
+#define SDMA1_UTCL1_INV0__GPUVM_TAG__SHIFT 0xe
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH__SHIFT 0x12
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW__SHIFT 0x16
+#define SDMA1_UTCL1_INV0__INV_TYPE__SHIFT 0x1a
+#define SDMA1_UTCL1_INV0__INV_PROC_BUSY_MASK 0x00000001L
+#define SDMA1_UTCL1_INV0__GPUVM_FRAG_SIZE_MASK 0x0000007EL
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_MASK 0x00000780L
+#define SDMA1_UTCL1_INV0__GPUVM_MODE_MASK 0x00001800L
+#define SDMA1_UTCL1_INV0__GPUVM_HIGH_MASK 0x00002000L
+#define SDMA1_UTCL1_INV0__GPUVM_TAG_MASK 0x0003C000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_HIGH_MASK 0x003C0000L
+#define SDMA1_UTCL1_INV0__GPUVM_VMID_LOW_MASK 0x03C00000L
+#define SDMA1_UTCL1_INV0__INV_TYPE_MASK 0x0C000000L
+//SDMA1_UTCL1_INV1
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_INV2
+#define SDMA1_UTCL1_INV2__CPF_VMID__SHIFT 0x0
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE__SHIFT 0x10
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE__SHIFT 0x11
+#define SDMA1_UTCL1_INV2__CPF_VMID_MASK 0x0000FFFFL
+#define SDMA1_UTCL1_INV2__CPF_FLUSH_TYPE_MASK 0x00010000L
+#define SDMA1_UTCL1_INV2__CPF_FRAG_SIZE_MASK 0x007E0000L
+//SDMA1_UTCL1_RD_XNACK0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_RD_XNACK1
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_RD_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_UTCL1_WR_XNACK0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK0__XNACK_FAULT_ADDR_LO_MASK 0xFFFFFFFFL
+//SDMA1_UTCL1_WR_XNACK1
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI__SHIFT 0x0
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID__SHIFT 0x4
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR__SHIFT 0x8
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR__SHIFT 0xa
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR__SHIFT 0xc
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG__SHIFT 0xe
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG__SHIFT 0xf
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG__SHIFT 0x10
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_ADDR_HI_MASK 0x0000000FL
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VMID_MASK 0x000000F0L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_VECTOR_MASK 0x00000300L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_VECTOR_MASK 0x00000C00L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_VECTOR_MASK 0x00003000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_FAULT_FLAG_MASK 0x00004000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_NULL_FLAG_MASK 0x00008000L
+#define SDMA1_UTCL1_WR_XNACK1__XNACK_TIMEOUT_FLAG_MASK 0x00010000L
+//SDMA1_RELAX_ORDERING_LUT
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0
+#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1
+#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4
+#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L
+#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L
+#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L
+#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L
+#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L
+#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L
+#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L
+#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L
+#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L
+#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L
+#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L
+#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L
+#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L
+#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L
+#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L
+#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L
+//SDMA1_CHICKEN_BITS_2
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN__SHIFT 0x4
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN__SHIFT 0x6
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP__SHIFT 0x7
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING__SHIFT 0x8
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12__SHIFT 0xc
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15__SHIFT 0xf
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK__SHIFT 0x10
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK__SHIFT 0x12
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20__SHIFT 0x14
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK__SHIFT 0x17
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK__SHIFT 0x19
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB__SHIFT 0x1e
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE__SHIFT 0x1f
+#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL
+#define SDMA1_CHICKEN_BITS_2__F32_SEND_POSTCODE_EN_MASK 0x00000010L
+#define SDMA1_CHICKEN_BITS_2__UCODE_BUF_DS_EN_MASK 0x00000040L
+#define SDMA1_CHICKEN_BITS_2__UCODE_SELFLOAD_THREAD_OVERLAP_MASK 0x00000080L
+#define SDMA1_CHICKEN_BITS_2__WPTR_POLL_OUTSTANDING_MASK 0x00000F00L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_14_12_MASK 0x00007000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_15_MASK 0x00008000L
+#define SDMA1_CHICKEN_BITS_2__RB_FIFO_WATERMARK_MASK 0x00030000L
+#define SDMA1_CHICKEN_BITS_2__IB_FIFO_WATERMARK_MASK 0x000C0000L
+#define SDMA1_CHICKEN_BITS_2__RESERVED_22_20_MASK 0x00700000L
+#define SDMA1_CHICKEN_BITS_2__CH_RD_WATERMARK_MASK 0x01800000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_MASK 0x3E000000L
+#define SDMA1_CHICKEN_BITS_2__CH_WR_WATERMARK_LSB_MASK 0x40000000L
+#define SDMA1_CHICKEN_BITS_2__PIO_VFID_SOURCE_MASK 0x80000000L
+//SDMA1_STATUS3_REG
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0
+#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE__SHIFT 0x15
+#define SDMA1_STATUS3_REG__TLBI_IDLE__SHIFT 0x16
+#define SDMA1_STATUS3_REG__GCR_IDLE__SHIFT 0x17
+#define SDMA1_STATUS3_REG__INVREQ_IDLE__SHIFT 0x18
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x19
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x1a
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS__SHIFT 0x1e
+#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL
+#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L
+#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L
+#define SDMA1_STATUS3_REG__AQL_PREV_CMD_IDLE_MASK 0x00200000L
+#define SDMA1_STATUS3_REG__TLBI_IDLE_MASK 0x00400000L
+#define SDMA1_STATUS3_REG__GCR_IDLE_MASK 0x00800000L
+#define SDMA1_STATUS3_REG__INVREQ_IDLE_MASK 0x01000000L
+#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x02000000L
+#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x3C000000L
+#define SDMA1_STATUS3_REG__TMZ_MTYPE_STATUS_MASK 0xC0000000L
+//SDMA1_PHYSICAL_ADDR_LO
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc
+#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L
+#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L
+#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L
+#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//SDMA1_PHYSICAL_ADDR_HI
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL
+//SDMA1_GLOBAL_QUANTUM
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM__SHIFT 0x0
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM__SHIFT 0x8
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_FOCUS_QUANTUM_MASK 0x000000FFL
+#define SDMA1_GLOBAL_QUANTUM__GLOBAL_NORMAL_QUANTUM_MASK 0x0000FF00L
+//SDMA1_ERROR_LOG
+#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0
+#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10
+#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL
+#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L
+//SDMA1_PUB_DUMMY_REG0
+#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG1
+#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG2
+#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_PUB_DUMMY_REG3
+#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0
+#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_F32_COUNTER
+#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0
+#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CRD_CNTL
+#define SDMA1_CRD_CNTL__DRM_CREDIT__SHIFT 0x0
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT__SHIFT 0x13
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT__SHIFT 0x19
+#define SDMA1_CRD_CNTL__DRM_CREDIT_MASK 0x0000007FL
+#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L
+#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L
+#define SDMA1_CRD_CNTL__CH_WRREQ_CREDIT_MASK 0x01F80000L
+#define SDMA1_CRD_CNTL__CH_RDREQ_CREDIT_MASK 0x7E000000L
+//SDMA1_RLC_CGCG_CTRL
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE__SHIFT 0x1
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS__SHIFT 0x10
+#define SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK 0x00000002L
+#define SDMA1_RLC_CGCG_CTRL__CGCG_IDLE_HYSTERESIS_MASK 0xFFFF0000L
+//SDMA1_GPU_IOV_VIOLATION_LOG
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL
+#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L
+#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L
+//SDMA1_AQL_STATUS
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY__SHIFT 0x0
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY__SHIFT 0x1
+#define SDMA1_AQL_STATUS__COMPLETE_SIGNAL_EMPTY_MASK 0x00000001L
+#define SDMA1_AQL_STATUS__INVALID_CMD_EMPTY_MASK 0x00000002L
+//SDMA1_EA_DBIT_ADDR_DATA
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_EA_DBIT_ADDR_INDEX
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0
+#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L
+//SDMA1_TLBI_GCR_CNTL
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW__SHIFT 0x0
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW__SHIFT 0x4
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE__SHIFT 0x8
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT__SHIFT 0x10
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT__SHIFT 0x18
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CMD_DW_MASK 0x0000000FL
+#define SDMA1_TLBI_GCR_CNTL__GCR_CMD_DW_MASK 0x000000F0L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CLKEN_CYCLE_MASK 0x00000F00L
+#define SDMA1_TLBI_GCR_CNTL__TLBI_CREDIT_MASK 0x00FF0000L
+#define SDMA1_TLBI_GCR_CNTL__GCR_CREDIT_MASK 0xFF000000L
+//SDMA1_TILING_CONFIG
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define SDMA1_TILING_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L
+//SDMA1_HASH
+#define SDMA1_HASH__CHANNEL_BITS__SHIFT 0x0
+#define SDMA1_HASH__BANK_BITS__SHIFT 0x4
+#define SDMA1_HASH__CHANNEL_XOR_COUNT__SHIFT 0x8
+#define SDMA1_HASH__BANK_XOR_COUNT__SHIFT 0xc
+#define SDMA1_HASH__CHANNEL_BITS_MASK 0x00000007L
+#define SDMA1_HASH__BANK_BITS_MASK 0x00000070L
+#define SDMA1_HASH__CHANNEL_XOR_COUNT_MASK 0x00000700L
+#define SDMA1_HASH__BANK_XOR_COUNT_MASK 0x00007000L
+//SDMA1_INT_STATUS
+#define SDMA1_INT_STATUS__DATA__SHIFT 0x0
+#define SDMA1_INT_STATUS__DATA_MASK 0xFFFFFFFFL
+//SDMA1_GPU_IOV_VIOLATION_LOG2
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0
+#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000003FFL
+//SDMA1_HOLE_ADDR_LO
+#define SDMA1_HOLE_ADDR_LO__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_LO__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_HOLE_ADDR_HI
+#define SDMA1_HOLE_ADDR_HI__VALUE__SHIFT 0x0
+#define SDMA1_HOLE_ADDR_HI__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_CLOCK_GATING_STATUS
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS__SHIFT 0x0
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS__SHIFT 0x2
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS__SHIFT 0x3
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS__SHIFT 0x4
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS__SHIFT 0x5
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS__SHIFT 0x6
+#define SDMA1_CLOCK_GATING_STATUS__DYN_CLK_GATE_STATUS_MASK 0x00000001L
+#define SDMA1_CLOCK_GATING_STATUS__CE_CLK_GATE_STATUS_MASK 0x00000004L
+#define SDMA1_CLOCK_GATING_STATUS__CE_BC_CLK_GATE_STATUS_MASK 0x00000008L
+#define SDMA1_CLOCK_GATING_STATUS__CE_NBC_CLK_GATE_STATUS_MASK 0x00000010L
+#define SDMA1_CLOCK_GATING_STATUS__REG_CLK_GATE_STATUS_MASK 0x00000020L
+#define SDMA1_CLOCK_GATING_STATUS__F32_CLK_GATE_STATUS_MASK 0x00000040L
+//SDMA1_STATUS4_REG
+#define SDMA1_STATUS4_REG__IDLE__SHIFT 0x0
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING__SHIFT 0x2
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING__SHIFT 0x3
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING__SHIFT 0x4
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING__SHIFT 0x5
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING__SHIFT 0x6
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING__SHIFT 0x7
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING__SHIFT 0x8
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING__SHIFT 0x9
+#define SDMA1_STATUS4_REG__REG_POLLING__SHIFT 0xa
+#define SDMA1_STATUS4_REG__MEM_POLLING__SHIFT 0xb
+#define SDMA1_STATUS4_REG__RESERVED_13_12__SHIFT 0xc
+#define SDMA1_STATUS4_REG__RESERVED_15_14__SHIFT 0xe
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD__SHIFT 0x14
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD__SHIFT 0x15
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT__SHIFT 0x16
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL__SHIFT 0x17
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT__SHIFT 0x18
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT__SHIFT 0x19
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL__SHIFT 0x1a
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT__SHIFT 0x1b
+#define SDMA1_STATUS4_REG__IDLE_MASK 0x00000001L
+#define SDMA1_STATUS4_REG__IH_OUTSTANDING_MASK 0x00000004L
+#define SDMA1_STATUS4_REG__SEM_OUTSTANDING_MASK 0x00000008L
+#define SDMA1_STATUS4_REG__CH_RD_OUTSTANDING_MASK 0x00000010L
+#define SDMA1_STATUS4_REG__CH_WR_OUTSTANDING_MASK 0x00000020L
+#define SDMA1_STATUS4_REG__GCR_OUTSTANDING_MASK 0x00000040L
+#define SDMA1_STATUS4_REG__TLBI_OUTSTANDING_MASK 0x00000080L
+#define SDMA1_STATUS4_REG__UTCL2_RD_OUTSTANDING_MASK 0x00000100L
+#define SDMA1_STATUS4_REG__UTCL2_WR_OUTSTANDING_MASK 0x00000200L
+#define SDMA1_STATUS4_REG__REG_POLLING_MASK 0x00000400L
+#define SDMA1_STATUS4_REG__MEM_POLLING_MASK 0x00000800L
+#define SDMA1_STATUS4_REG__RESERVED_13_12_MASK 0x00003000L
+#define SDMA1_STATUS4_REG__RESERVED_15_14_MASK 0x0000C000L
+#define SDMA1_STATUS4_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS4_REG__SRIOV_WATING_RLCV_CMD_MASK 0x00100000L
+#define SDMA1_STATUS4_REG__SRIOV_SDMA_EXECUTING_CMD_MASK 0x00200000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_FAULT_MASK 0x00400000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_NULL_MASK 0x00800000L
+#define SDMA1_STATUS4_REG__UTCL2_RD_XNACK_TIMEOUT_MASK 0x01000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_FAULT_MASK 0x02000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_NULL_MASK 0x04000000L
+#define SDMA1_STATUS4_REG__UTCL2_WR_XNACK_TIMEOUT_MASK 0x08000000L
+//SDMA1_SCRATCH_RAM_DATA
+#define SDMA1_SCRATCH_RAM_DATA__DATA__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//SDMA1_SCRATCH_RAM_ADDR
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR__SHIFT 0x0
+#define SDMA1_SCRATCH_RAM_ADDR__ADDR_MASK 0x0000007FL
+//SDMA1_TIMESTAMP_CNTL
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE__SHIFT 0x0
+#define SDMA1_TIMESTAMP_CNTL__CAPTURE_MASK 0x00000001L
+//SDMA1_STATUS5_REG
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS__SHIFT 0x0
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS__SHIFT 0x1
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS__SHIFT 0x2
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS__SHIFT 0x3
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS__SHIFT 0x4
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS__SHIFT 0x5
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS__SHIFT 0x6
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS__SHIFT 0x7
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID__SHIFT 0x10
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x14
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x15
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x16
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x17
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x18
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x19
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1a
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION__SHIFT 0x1b
+#define SDMA1_STATUS5_REG__QUEUE0_RB_ENABLE_STATUS_MASK 0x00000001L
+#define SDMA1_STATUS5_REG__QUEUE1_RB_ENABLE_STATUS_MASK 0x00000002L
+#define SDMA1_STATUS5_REG__QUEUE2_RB_ENABLE_STATUS_MASK 0x00000004L
+#define SDMA1_STATUS5_REG__QUEUE3_RB_ENABLE_STATUS_MASK 0x00000008L
+#define SDMA1_STATUS5_REG__QUEUE4_RB_ENABLE_STATUS_MASK 0x00000010L
+#define SDMA1_STATUS5_REG__QUEUE5_RB_ENABLE_STATUS_MASK 0x00000020L
+#define SDMA1_STATUS5_REG__QUEUE6_RB_ENABLE_STATUS_MASK 0x00000040L
+#define SDMA1_STATUS5_REG__QUEUE7_RB_ENABLE_STATUS_MASK 0x00000080L
+#define SDMA1_STATUS5_REG__ACTIVE_QUEUE_ID_MASK 0x000F0000L
+#define SDMA1_STATUS5_REG__QUEUE0_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00100000L
+#define SDMA1_STATUS5_REG__QUEUE1_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00200000L
+#define SDMA1_STATUS5_REG__QUEUE2_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00400000L
+#define SDMA1_STATUS5_REG__QUEUE3_WPTR_POLL_PAGE_EXCEPTION_MASK 0x00800000L
+#define SDMA1_STATUS5_REG__QUEUE4_WPTR_POLL_PAGE_EXCEPTION_MASK 0x01000000L
+#define SDMA1_STATUS5_REG__QUEUE5_WPTR_POLL_PAGE_EXCEPTION_MASK 0x02000000L
+#define SDMA1_STATUS5_REG__QUEUE6_WPTR_POLL_PAGE_EXCEPTION_MASK 0x04000000L
+#define SDMA1_STATUS5_REG__QUEUE7_WPTR_POLL_PAGE_EXCEPTION_MASK 0x08000000L
+//SDMA1_QUEUE_RESET_REQ
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET__SHIFT 0x0
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET__SHIFT 0x1
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET__SHIFT 0x2
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET__SHIFT 0x3
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET__SHIFT 0x4
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET__SHIFT 0x5
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET__SHIFT 0x6
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET__SHIFT 0x7
+#define SDMA1_QUEUE_RESET_REQ__RESERVED__SHIFT 0x8
+#define SDMA1_QUEUE_RESET_REQ__QUEUE0_RESET_MASK 0x00000001L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE1_RESET_MASK 0x00000002L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE2_RESET_MASK 0x00000004L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE3_RESET_MASK 0x00000008L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE4_RESET_MASK 0x00000010L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE5_RESET_MASK 0x00000020L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE6_RESET_MASK 0x00000040L
+#define SDMA1_QUEUE_RESET_REQ__QUEUE7_RESET_MASK 0x00000080L
+#define SDMA1_QUEUE_RESET_REQ__RESERVED_MASK 0xFFFFFF00L
+//SDMA1_STATUS6_REG
+#define SDMA1_STATUS6_REG__ID__SHIFT 0x0
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR__SHIFT 0x2
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION__SHIFT 0x10
+#define SDMA1_STATUS6_REG__ID_MASK 0x00000003L
+#define SDMA1_STATUS6_REG__TH1F32_INSTR_PTR_MASK 0x0000FFFCL
+#define SDMA1_STATUS6_REG__TH1_EXCEPTION_MASK 0xFFFF0000L
+//SDMA1_UCODE1_CHECKSUM
+#define SDMA1_UCODE1_CHECKSUM__DATA__SHIFT 0x0
+#define SDMA1_UCODE1_CHECKSUM__DATA_MASK 0xFFFFFFFFL
+//SDMA1_CE_CTRL
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK__SHIFT 0x0
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH__SHIFT 0x3
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK__SHIFT 0x5
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE__SHIFT 0x8
+#define SDMA1_CE_CTRL__RESERVED__SHIFT 0x9
+#define SDMA1_CE_CTRL__RD_LUT_WATERMARK_MASK 0x00000007L
+#define SDMA1_CE_CTRL__RD_LUT_DEPTH_MASK 0x00000018L
+#define SDMA1_CE_CTRL__WR_AFIFO_WATERMARK_MASK 0x000000E0L
+#define SDMA1_CE_CTRL__CE_DCC_READ_128B_ENABLE_MASK 0x00000100L
+#define SDMA1_CE_CTRL__RESERVED_MASK 0xFFFFFE00L
+//SDMA1_FED_STATUS
+#define SDMA1_FED_STATUS__RB_FETCH_ECC__SHIFT 0x0
+#define SDMA1_FED_STATUS__IB_FETCH_ECC__SHIFT 0x1
+#define SDMA1_FED_STATUS__F32_DATA_ECC__SHIFT 0x2
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC__SHIFT 0x3
+#define SDMA1_FED_STATUS__COPY_DATA_ECC__SHIFT 0x4
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC__SHIFT 0x5
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC__SHIFT 0x6
+#define SDMA1_FED_STATUS__RB_FETCH_ECC_MASK 0x00000001L
+#define SDMA1_FED_STATUS__IB_FETCH_ECC_MASK 0x00000002L
+#define SDMA1_FED_STATUS__F32_DATA_ECC_MASK 0x00000004L
+#define SDMA1_FED_STATUS__WPTR_ATOMIC_ECC_MASK 0x00000008L
+#define SDMA1_FED_STATUS__COPY_DATA_ECC_MASK 0x00000010L
+#define SDMA1_FED_STATUS__COPY_METADATA_ECC_MASK 0x00000020L
+#define SDMA1_FED_STATUS__SELFLOAD_UCODE_ECC_MASK 0x00000040L
+//SDMA1_QUEUE0_RB_CNTL
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE0_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE0_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE0_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE0_RB_BASE
+#define SDMA1_QUEUE0_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_BASE_HI
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE0_RB_RPTR
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_HI
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_HI
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_IB_CNTL
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE0_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE0_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE0_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE0_IB_RPTR
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_OFFSET
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE0_IB_BASE_LO
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE0_IB_BASE_HI
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_IB_SIZE
+#define SDMA1_QUEUE0_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_SKIP_CNTL
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE0_CONTEXT_STATUS
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB__SHIFT 0x1
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__USE_IB_MASK 0x00000002L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE0_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE0_DOORBELL
+#define SDMA1_QUEUE0_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE0_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE0_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE0_DOORBELL_LOG
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_DOORBELL_OFFSET
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_LO
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_CSA_ADDR_HI
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_SCHEDULE_CNTL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE0_IB_SUB_REMAIN
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE0_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE0_PREEMPT
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE0_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE0_DUMMY_REG
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE0_RB_AQL_CNTL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE0_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE0_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE0_RB_PREEMPT
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE0_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE0_MIDCMD_DATA0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA1
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA2
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA3
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA4
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA5
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA6
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA7
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA8
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA9
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_DATA10
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE0_MIDCMD_CNTL
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE1_RB_CNTL
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE1_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE1_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE1_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE1_RB_BASE
+#define SDMA1_QUEUE1_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_BASE_HI
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE1_RB_RPTR
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_HI
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_HI
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_IB_CNTL
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE1_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE1_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE1_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE1_IB_RPTR
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_OFFSET
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE1_IB_BASE_LO
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE1_IB_BASE_HI
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_IB_SIZE
+#define SDMA1_QUEUE1_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_SKIP_CNTL
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE1_CONTEXT_STATUS
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE1_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE1_DOORBELL
+#define SDMA1_QUEUE1_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE1_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE1_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE1_DOORBELL_LOG
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_DOORBELL_OFFSET
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_LO
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_CSA_ADDR_HI
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_SCHEDULE_CNTL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE1_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE1_IB_SUB_REMAIN
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE1_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE1_PREEMPT
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE1_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE1_DUMMY_REG
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE1_RB_AQL_CNTL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE1_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE1_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE1_RB_PREEMPT
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE1_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE1_MIDCMD_DATA0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA1
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA2
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA3
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA4
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA5
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA6
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA7
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA8
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA9
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_DATA10
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE1_MIDCMD_CNTL
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE2_RB_CNTL
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE2_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE2_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE2_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE2_RB_BASE
+#define SDMA1_QUEUE2_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_BASE_HI
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE2_RB_RPTR
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_HI
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_HI
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_IB_CNTL
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE2_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE2_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE2_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE2_IB_RPTR
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_OFFSET
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE2_IB_BASE_LO
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE2_IB_BASE_HI
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_IB_SIZE
+#define SDMA1_QUEUE2_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_SKIP_CNTL
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE2_CONTEXT_STATUS
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE2_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE2_DOORBELL
+#define SDMA1_QUEUE2_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE2_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE2_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE2_DOORBELL_LOG
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_DOORBELL_OFFSET
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_LO
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_CSA_ADDR_HI
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_SCHEDULE_CNTL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE2_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE2_IB_SUB_REMAIN
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE2_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE2_PREEMPT
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE2_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE2_DUMMY_REG
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE2_RB_AQL_CNTL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE2_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE2_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE2_RB_PREEMPT
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE2_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE2_MIDCMD_DATA0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA1
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA2
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA3
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA4
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA5
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA6
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA7
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA8
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA9
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_DATA10
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE2_MIDCMD_CNTL
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE3_RB_CNTL
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE3_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE3_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE3_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE3_RB_BASE
+#define SDMA1_QUEUE3_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_BASE_HI
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE3_RB_RPTR
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_HI
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_HI
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_IB_CNTL
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE3_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE3_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE3_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE3_IB_RPTR
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_OFFSET
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE3_IB_BASE_LO
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE3_IB_BASE_HI
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_IB_SIZE
+#define SDMA1_QUEUE3_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_SKIP_CNTL
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE3_CONTEXT_STATUS
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE3_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE3_DOORBELL
+#define SDMA1_QUEUE3_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE3_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE3_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE3_DOORBELL_LOG
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_DOORBELL_OFFSET
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_LO
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_CSA_ADDR_HI
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_SCHEDULE_CNTL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE3_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE3_IB_SUB_REMAIN
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE3_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE3_PREEMPT
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE3_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE3_DUMMY_REG
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE3_RB_AQL_CNTL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE3_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE3_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE3_RB_PREEMPT
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE3_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE3_MIDCMD_DATA0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA1
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA2
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA3
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA4
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA5
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA6
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA7
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA8
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA9
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_DATA10
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE3_MIDCMD_CNTL
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE4_RB_CNTL
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE4_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE4_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE4_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE4_RB_BASE
+#define SDMA1_QUEUE4_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_BASE_HI
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE4_RB_RPTR
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_HI
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_HI
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_IB_CNTL
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE4_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE4_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE4_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE4_IB_RPTR
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_OFFSET
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE4_IB_BASE_LO
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE4_IB_BASE_HI
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_IB_SIZE
+#define SDMA1_QUEUE4_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_SKIP_CNTL
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE4_CONTEXT_STATUS
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE4_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE4_DOORBELL
+#define SDMA1_QUEUE4_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE4_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE4_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE4_DOORBELL_LOG
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_DOORBELL_OFFSET
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_LO
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_CSA_ADDR_HI
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_SCHEDULE_CNTL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE4_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE4_IB_SUB_REMAIN
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE4_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE4_PREEMPT
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE4_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE4_DUMMY_REG
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE4_RB_AQL_CNTL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE4_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE4_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE4_RB_PREEMPT
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE4_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE4_MIDCMD_DATA0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA1
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA2
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA3
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA4
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA5
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA6
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA7
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA8
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA9
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_DATA10
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE4_MIDCMD_CNTL
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE5_RB_CNTL
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE5_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE5_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE5_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE5_RB_BASE
+#define SDMA1_QUEUE5_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_BASE_HI
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE5_RB_RPTR
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_HI
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_HI
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_IB_CNTL
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE5_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE5_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE5_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE5_IB_RPTR
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_OFFSET
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE5_IB_BASE_LO
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE5_IB_BASE_HI
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_IB_SIZE
+#define SDMA1_QUEUE5_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_SKIP_CNTL
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE5_CONTEXT_STATUS
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE5_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE5_DOORBELL
+#define SDMA1_QUEUE5_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE5_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE5_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE5_DOORBELL_LOG
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_DOORBELL_OFFSET
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_LO
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_CSA_ADDR_HI
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_SCHEDULE_CNTL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE5_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE5_IB_SUB_REMAIN
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE5_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE5_PREEMPT
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE5_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE5_DUMMY_REG
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE5_RB_AQL_CNTL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE5_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE5_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE5_RB_PREEMPT
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE5_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE5_MIDCMD_DATA0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA1
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA2
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA3
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA4
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA5
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA6
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA7
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA8
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA9
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_DATA10
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE5_MIDCMD_CNTL
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE6_RB_CNTL
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE6_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE6_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE6_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE6_RB_BASE
+#define SDMA1_QUEUE6_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_BASE_HI
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE6_RB_RPTR
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_HI
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_HI
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_IB_CNTL
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE6_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE6_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE6_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE6_IB_RPTR
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_OFFSET
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE6_IB_BASE_LO
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE6_IB_BASE_HI
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_IB_SIZE
+#define SDMA1_QUEUE6_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_SKIP_CNTL
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE6_CONTEXT_STATUS
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE6_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE6_DOORBELL
+#define SDMA1_QUEUE6_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE6_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE6_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE6_DOORBELL_LOG
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_DOORBELL_OFFSET
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_LO
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_CSA_ADDR_HI
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_SCHEDULE_CNTL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE6_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE6_IB_SUB_REMAIN
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE6_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE6_PREEMPT
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE6_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE6_DUMMY_REG
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE6_RB_AQL_CNTL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE6_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE6_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE6_RB_PREEMPT
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE6_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE6_MIDCMD_DATA0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA1
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA2
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA3
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA4
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA5
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA6
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA7
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA8
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA9
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_DATA10
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE6_MIDCMD_CNTL
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+//SDMA1_QUEUE7_RB_CNTL
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT 0xb
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV__SHIFT 0x17
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID__SHIFT 0x18
+#define SDMA1_QUEUE7_RB_CNTL__RB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SIZE_MASK 0x0000003EL
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_ENABLE_MASK 0x00000100L
+#define SDMA1_QUEUE7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L
+#define SDMA1_QUEUE7_RB_CNTL__WPTR_POLL_SWAP_ENABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_RB_CNTL__F32_WPTR_POLL_ENABLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L
+#define SDMA1_QUEUE7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_PRIV_MASK 0x00800000L
+#define SDMA1_QUEUE7_RB_CNTL__RB_VMID_MASK 0x0F000000L
+//SDMA1_QUEUE7_RB_BASE
+#define SDMA1_QUEUE7_RB_BASE__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_BASE_HI
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL
+//SDMA1_QUEUE7_RB_RPTR
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_HI
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_HI
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_HI
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_RPTR_ADDR_LO
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_IB_CNTL
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID__SHIFT 0x10
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV__SHIFT 0x1f
+#define SDMA1_QUEUE7_IB_CNTL__IB_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L
+#define SDMA1_QUEUE7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L
+#define SDMA1_QUEUE7_IB_CNTL__CMD_VMID_MASK 0x000F0000L
+#define SDMA1_QUEUE7_IB_CNTL__IB_PRIV_MASK 0x80000000L
+//SDMA1_QUEUE7_IB_RPTR
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_RPTR__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_OFFSET
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL
+//SDMA1_QUEUE7_IB_BASE_LO
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR__SHIFT 0x5
+#define SDMA1_QUEUE7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L
+//SDMA1_QUEUE7_IB_BASE_HI
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_IB_SIZE
+#define SDMA1_QUEUE7_IB_SIZE__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SIZE__SIZE_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_SKIP_CNTL
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0
+#define SDMA1_QUEUE7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL
+//SDMA1_QUEUE7_CONTEXT_STATUS
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED__SHIFT 0x0
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE__SHIFT 0x2
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE__SHIFT 0xb
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING__SHIFT 0xc
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x10
+#define SDMA1_QUEUE7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__IDLE_MASK 0x00000004L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__RPTR_WB_IDLE_MASK 0x00000800L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_PENDING_MASK 0x00001000L
+#define SDMA1_QUEUE7_CONTEXT_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x00FF0000L
+//SDMA1_QUEUE7_DOORBELL
+#define SDMA1_QUEUE7_DOORBELL__ENABLE__SHIFT 0x1c
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED__SHIFT 0x1e
+#define SDMA1_QUEUE7_DOORBELL__ENABLE_MASK 0x10000000L
+#define SDMA1_QUEUE7_DOORBELL__CAPTURED_MASK 0x40000000L
+//SDMA1_QUEUE7_DOORBELL_LOG
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L
+#define SDMA1_QUEUE7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_DOORBELL_OFFSET
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2
+#define SDMA1_QUEUE7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_LO
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_CSA_ADDR_HI
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_SCHEDULE_CNTL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID__SHIFT 0x0
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID__SHIFT 0x2
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID__SHIFT 0x6
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT 0x8
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__GLOBAL_ID_MASK 0x00000003L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__PROCESS_ID_MASK 0x0000001CL
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__LOCAL_ID_MASK 0x000000C0L
+#define SDMA1_QUEUE7_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK 0x0000FF00L
+//SDMA1_QUEUE7_IB_SUB_REMAIN
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE__SHIFT 0x0
+#define SDMA1_QUEUE7_IB_SUB_REMAIN__SIZE_MASK 0x00003FFFL
+//SDMA1_QUEUE7_PREEMPT
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT__SHIFT 0x0
+#define SDMA1_QUEUE7_PREEMPT__IB_PREEMPT_MASK 0x00000001L
+//SDMA1_QUEUE7_DUMMY_REG
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY__SHIFT 0x0
+#define SDMA1_QUEUE7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2
+#define SDMA1_QUEUE7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_QUEUE7_RB_AQL_CNTL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x10
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE__SHIFT 0x11
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE__SHIFT 0x12
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL
+#define SDMA1_QUEUE7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00010000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__MIDCMD_PREEMPT_DATA_RESTORE_MASK 0x00020000L
+#define SDMA1_QUEUE7_RB_AQL_CNTL__OVERLAP_ENABLE_MASK 0x00040000L
+//SDMA1_QUEUE7_MINOR_PTR_UPDATE
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0
+#define SDMA1_QUEUE7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L
+//SDMA1_QUEUE7_RB_PREEMPT
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ__SHIFT 0x0
+#define SDMA1_QUEUE7_RB_PREEMPT__PREEMPT_REQ_MASK 0x00000001L
+//SDMA1_QUEUE7_MIDCMD_DATA0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA1
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA2
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA3
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA4
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA5
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA6
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA7
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA8
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA9
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA9__DATA9_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_DATA10
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_DATA10__DATA10_MASK 0xFFFFFFFFL
+//SDMA1_QUEUE7_MIDCMD_CNTL
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8
+#define SDMA1_QUEUE7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L
+#define SDMA1_QUEUE7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L
+
+
+// addressBlock: gc_sdma0_sdma0hypdec
+//SDMA0_UCODE_ADDR
+#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_UCODE_DATA
+#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_UCODE_SELFLOAD_CONTROL
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA0_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA0_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA0_BROADCAST_UCODE_ADDR
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA0_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA0_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA0_BROADCAST_UCODE_DATA
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA0_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA0_VM_CTX_LO
+#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA0_VM_CTX_HI
+#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA0_ACTIVE_FCN_ID
+#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA0_VM_CTX_CNTL
+#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA0_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA0_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA0_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA0_VIRT_RESET_REQ
+#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA0_CONTEXT_REG_TYPE0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA0_CONTEXT_REG_TYPE1
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA0_CONTEXT_REG_TYPE2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA0_PUB_REG_TYPE0
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE0__SDMA0_DEC_START_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA0_PUB_REG_TYPE0__SDMA0_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_CNTL1_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_PAGE_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE2__SDMA0_AQL_STATUS_MASK 0x80000000L
+//SDMA0_PUB_REG_TYPE3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG__SHIFT 0x3
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH__SHIFT 0x4
+#define SDMA0_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA0_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL__SHIFT 0x8
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS__SHIFT 0x9
+#define SDMA0_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA0_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA0_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA0_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA0_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA0_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS__SHIFT 0x10
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA0_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG__SHIFT 0x16
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA0_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA0_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG__SHIFT 0x1c
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG__SHIFT 0x1e
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TILING_CONFIG_MASK 0x00000008L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HASH_MASK 0x00000010L
+#define SDMA0_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA0_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CE_CTRL_MASK 0x00000100L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_FED_STATUS_MASK 0x00000200L
+#define SDMA0_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA0_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA0_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_INT_STATUS_MASK 0x00010000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS4_REG_MASK 0x00400000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA0_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS5_REG_MASK 0x10000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_STATUS6_REG_MASK 0x40000000L
+#define SDMA0_PUB_REG_TYPE3__SDMA0_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA0_VM_CNTL
+#define SDMA0_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA0_F32_CNTL
+#define SDMA0_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA0_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA0_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA0_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA0_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA0_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA0_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA0_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA0_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA0_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA0_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA0_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA0_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA0_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA0_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA0_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma1hypdec
+//SDMA1_UCODE_ADDR
+#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_UCODE_DATA
+#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_UCODE_SELFLOAD_CONTROL
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA__SHIFT 0x0
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS__SHIFT 0x1
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID__SHIFT 0x4
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY__SHIFT 0x8
+#define SDMA1_UCODE_SELFLOAD_CONTROL__GPA_MASK 0x00000001L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__SYS_MASK 0x00000002L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CID_MASK 0x000000F0L
+#define SDMA1_UCODE_SELFLOAD_CONTROL__CACHE_POLICY_MASK 0x00000300L
+//SDMA1_BROADCAST_UCODE_ADDR
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_ADDR__THID__SHIFT 0xf
+#define SDMA1_BROADCAST_UCODE_ADDR__VALUE_MASK 0x00001FFFL
+#define SDMA1_BROADCAST_UCODE_ADDR__THID_MASK 0x00008000L
+//SDMA1_BROADCAST_UCODE_DATA
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE__SHIFT 0x0
+#define SDMA1_BROADCAST_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL
+//SDMA1_VM_CTX_LO
+#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2
+#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL
+//SDMA1_VM_CTX_HI
+#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0
+#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL
+//SDMA1_ACTIVE_FCN_ID
+#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4
+#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f
+#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L
+#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L
+//SDMA1_VM_CTX_CNTL
+#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0
+#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4
+#define SDMA1_VM_CTX_CNTL__MEM_PHY__SHIFT 0x8
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE__SHIFT 0x10
+#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L
+#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L
+#define SDMA1_VM_CTX_CNTL__MEM_PHY_MASK 0x00000300L
+#define SDMA1_VM_CTX_CNTL__BUSY_STATUS_REPORT_ENABLE_MASK 0x00010000L
+//SDMA1_VIRT_RESET_REQ
+#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//SDMA1_CONTEXT_REG_TYPE0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_CNTL_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_BASE_HI_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_HI_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_WPTR_HI_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_HI_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_RB_RPTR_ADDR_LO_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_CNTL_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_RPTR_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_OFFSET_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_LO_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_BASE_HI_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_IB_SIZE_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_SKIP_CNTL_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_CONTEXT_STATUS_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_QUEUE0_DOORBELL_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE0__RESERVED31_19_MASK 0xFFF80000L
+//SDMA1_CONTEXT_REG_TYPE1
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO__SHIFT 0xc
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI__SHIFT 0xd
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN__SHIFT 0xf
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT__SHIFT 0x10
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG__SHIFT 0x11
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL__SHIFT 0x14
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE__SHIFT 0x15
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT__SHIFT 0x16
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x17
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED8_0_MASK 0x000001FFL
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_LOG_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DOORBELL_OFFSET_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_LO_MASK 0x00001000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_CSA_ADDR_HI_MASK 0x00002000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_SCHEDULE_CNTL_MASK 0x00004000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_IB_SUB_REMAIN_MASK 0x00008000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_PREEMPT_MASK 0x00010000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_DUMMY_REG_MASK 0x00020000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_AQL_CNTL_MASK 0x00100000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_MINOR_PTR_UPDATE_MASK 0x00200000L
+#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_QUEUE0_RB_PREEMPT_MASK 0x00400000L
+#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFF800000L
+//SDMA1_CONTEXT_REG_TYPE2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0__SHIFT 0x0
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1__SHIFT 0x1
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2__SHIFT 0x2
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3__SHIFT 0x3
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4__SHIFT 0x4
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5__SHIFT 0x5
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6__SHIFT 0x6
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7__SHIFT 0x7
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8__SHIFT 0x8
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9__SHIFT 0x9
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10__SHIFT 0xa
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL__SHIFT 0xb
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xe
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA0_MASK 0x00000001L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA1_MASK 0x00000002L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA2_MASK 0x00000004L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA3_MASK 0x00000008L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA4_MASK 0x00000010L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA5_MASK 0x00000020L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA6_MASK 0x00000040L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA7_MASK 0x00000080L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA8_MASK 0x00000100L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA9_MASK 0x00000200L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_DATA10_MASK 0x00000400L
+#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_QUEUE0_MIDCMD_CNTL_MASK 0x00000800L
+#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFC000L
+//SDMA1_PUB_REG_TYPE0
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE0__RESERVED22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE0__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE0__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE0__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE0__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE0__SDMA1_DEC_START_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE0__RESERVED_10_1_MASK 0x000007FEL
+#define SDMA1_PUB_REG_TYPE0__SDMA1_F32_MISC_CNTL_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_LO_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GLOBAL_TIMESTAMP_HI_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE0__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE1__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE1__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE1__RESERVED16__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE1__RESERVED17__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_CNTL1_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE1__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM0_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_PROCESS_QUANTUM1_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_WATCHDOG_CNTL_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED16_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE1__RESERVED17_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_TIMEOUT_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_PAGE_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE2__RESERVED23__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE2__RESERVED24__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE2__RESERVED25__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE2__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_STATUS_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_STATUS_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GLOBAL_QUANTUM_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE2__RESERVE_22_22_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED23_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED24_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED25_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE2__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_RLC_CGCG_CTRL_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE2__SDMA1_AQL_STATUS_MASK 0x80000000L
+//SDMA1_PUB_REG_TYPE3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL__SHIFT 0x2
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG__SHIFT 0x3
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH__SHIFT 0x4
+#define SDMA1_PUB_REG_TYPE3__RESERVED5__SHIFT 0x5
+#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x6
+#define SDMA1_PUB_REG_TYPE3__RESERVED7__SHIFT 0x7
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL__SHIFT 0x8
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS__SHIFT 0x9
+#define SDMA1_PUB_REG_TYPE3__RESERVED10__SHIFT 0xa
+#define SDMA1_PUB_REG_TYPE3__RESERVED11__SHIFT 0xb
+#define SDMA1_PUB_REG_TYPE3__RESERVED12__SHIFT 0xc
+#define SDMA1_PUB_REG_TYPE3__RESERVED13__SHIFT 0xd
+#define SDMA1_PUB_REG_TYPE3__RESERVED14__SHIFT 0xe
+#define SDMA1_PUB_REG_TYPE3__RESERVED15__SHIFT 0xf
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS__SHIFT 0x10
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x11
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO__SHIFT 0x12
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI__SHIFT 0x13
+#define SDMA1_PUB_REG_TYPE3__RESERVED20__SHIFT 0x14
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS__SHIFT 0x15
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG__SHIFT 0x16
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA__SHIFT 0x17
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR__SHIFT 0x18
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL__SHIFT 0x19
+#define SDMA1_PUB_REG_TYPE3__RESERVED26__SHIFT 0x1a
+#define SDMA1_PUB_REG_TYPE3__RESERVED27__SHIFT 0x1b
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG__SHIFT 0x1c
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ__SHIFT 0x1d
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG__SHIFT 0x1e
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM__SHIFT 0x1f
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TLBI_GCR_CNTL_MASK 0x00000004L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TILING_CONFIG_MASK 0x00000008L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HASH_MASK 0x00000010L
+#define SDMA1_PUB_REG_TYPE3__RESERVED5_MASK 0x00000020L
+#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0x00000040L
+#define SDMA1_PUB_REG_TYPE3__RESERVED7_MASK 0x00000080L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CE_CTRL_MASK 0x00000100L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_FED_STATUS_MASK 0x00000200L
+#define SDMA1_PUB_REG_TYPE3__RESERVED10_MASK 0x00000400L
+#define SDMA1_PUB_REG_TYPE3__RESERVED11_MASK 0x00000800L
+#define SDMA1_PUB_REG_TYPE3__RESERVED12_MASK 0x00001000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED13_MASK 0x00002000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED14_MASK 0x00004000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED15_MASK 0x00008000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_INT_STATUS_MASK 0x00010000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00020000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_LO_MASK 0x00040000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_HOLE_ADDR_HI_MASK 0x00080000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED20_MASK 0x00100000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_CLOCK_GATING_STATUS_MASK 0x00200000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS4_REG_MASK 0x00400000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_DATA_MASK 0x00800000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_SCRATCH_RAM_ADDR_MASK 0x01000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_TIMESTAMP_CNTL_MASK 0x02000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED26_MASK 0x04000000L
+#define SDMA1_PUB_REG_TYPE3__RESERVED27_MASK 0x08000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS5_REG_MASK 0x10000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_QUEUE_RESET_REQ_MASK 0x20000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_STATUS6_REG_MASK 0x40000000L
+#define SDMA1_PUB_REG_TYPE3__SDMA1_UCODE1_CHECKSUM_MASK 0x80000000L
+//SDMA1_VM_CNTL
+#define SDMA1_VM_CNTL__CMD__SHIFT 0x0
+#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL
+//SDMA1_F32_CNTL
+#define SDMA1_F32_CNTL__HALT__SHIFT 0x0
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS__SHIFT 0x2
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR__SHIFT 0x8
+#define SDMA1_F32_CNTL__TH0_RESET__SHIFT 0x9
+#define SDMA1_F32_CNTL__TH0_ENABLE__SHIFT 0xa
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR__SHIFT 0xc
+#define SDMA1_F32_CNTL__TH1_RESET__SHIFT 0xd
+#define SDMA1_F32_CNTL__TH1_ENABLE__SHIFT 0xe
+#define SDMA1_F32_CNTL__TH0_PRIORITY__SHIFT 0x10
+#define SDMA1_F32_CNTL__TH1_PRIORITY__SHIFT 0x18
+#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L
+#define SDMA1_F32_CNTL__DBG_SELECT_BITS_MASK 0x000000FCL
+#define SDMA1_F32_CNTL__TH0_CHECKSUM_CLR_MASK 0x00000100L
+#define SDMA1_F32_CNTL__TH0_RESET_MASK 0x00000200L
+#define SDMA1_F32_CNTL__TH0_ENABLE_MASK 0x00000400L
+#define SDMA1_F32_CNTL__TH1_CHECKSUM_CLR_MASK 0x00001000L
+#define SDMA1_F32_CNTL__TH1_RESET_MASK 0x00002000L
+#define SDMA1_F32_CNTL__TH1_ENABLE_MASK 0x00004000L
+#define SDMA1_F32_CNTL__TH0_PRIORITY_MASK 0x00FF0000L
+#define SDMA1_F32_CNTL__TH1_PRIORITY_MASK 0xFF000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfsdec
+//SDMA0_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA0_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA0_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA0_PERFCNT_MISC_CNTL
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA0_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA0_PERFCOUNTER0_SELECT
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER0_SELECT1
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA0_PERFCOUNTER1_SELECT1
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA0_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma1perfsdec
+//SDMA1_PERFCNT_PERFCOUNTER0_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER1_CFG
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define SDMA1_PERFCNT_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define SDMA1_PERFCNT_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SDMA1_PERFCNT_MISC_CNTL
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP__SHIFT 0x0
+#define SDMA1_PERFCNT_MISC_CNTL__CMD_OP_MASK 0x0000FFFFL
+//SDMA1_PERFCOUNTER0_SELECT
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER0_SELECT1
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SDMA1_PERFCOUNTER1_SELECT1
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SDMA1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+
+
+// addressBlock: gc_sdma0_sdma0perfddec
+//SDMA0_PERFCNT_PERFCOUNTER_LO
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCNT_PERFCOUNTER_HI
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA0_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA0_PERFCOUNTER0_LO
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER0_HI
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_LO
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA0_PERFCOUNTER1_HI
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA0_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_sdma0_sdma1perfddec
+//SDMA1_PERFCNT_PERFCOUNTER_LO
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCNT_PERFCOUNTER_HI
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define SDMA1_PERFCNT_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//SDMA1_PERFCOUNTER0_LO
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER0_HI
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_LO
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SDMA1_PERFCOUNTER1_HI
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SDMA1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbmdec
+//GRBM_CNTL
+#define GRBM_CNTL__READ_TIMEOUT__SHIFT 0x0
+#define GRBM_CNTL__REPORT_LAST_RDERR__SHIFT 0x1f
+#define GRBM_CNTL__READ_TIMEOUT_MASK 0x000000FFL
+#define GRBM_CNTL__REPORT_LAST_RDERR_MASK 0x80000000L
+//GRBM_SKEW_CNTL
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD__SHIFT 0x0
+#define GRBM_SKEW_CNTL__SKEW_COUNT__SHIFT 0x6
+#define GRBM_SKEW_CNTL__SKEW_TOP_THRESHOLD_MASK 0x0000003FL
+#define GRBM_SKEW_CNTL__SKEW_COUNT_MASK 0x00000FC0L
+//GRBM_STATUS2
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING__SHIFT 0x4
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS2__RLC_RQ_PENDING__SHIFT 0xe
+#define GRBM_STATUS2__UTCL2_BUSY__SHIFT 0xf
+#define GRBM_STATUS2__EA_BUSY__SHIFT 0x10
+#define GRBM_STATUS2__RMI_BUSY__SHIFT 0x11
+#define GRBM_STATUS2__UTCL2_RQ_PENDING__SHIFT 0x12
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING__SHIFT 0x13
+#define GRBM_STATUS2__EA_LINK_BUSY__SHIFT 0x14
+#define GRBM_STATUS2__SDMA_BUSY__SHIFT 0x15
+#define GRBM_STATUS2__SDMA0_RQ_PENDING__SHIFT 0x16
+#define GRBM_STATUS2__SDMA1_RQ_PENDING__SHIFT 0x17
+#define GRBM_STATUS2__RLC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS2__TCP_BUSY__SHIFT 0x1b
+#define GRBM_STATUS2__CPF_BUSY__SHIFT 0x1c
+#define GRBM_STATUS2__CPC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS2__CPG_BUSY__SHIFT 0x1e
+#define GRBM_STATUS2__CPAXI_BUSY__SHIFT 0x1f
+#define GRBM_STATUS2__ME0PIPE1_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS2__ME0PIPE1_CF_RQ_PENDING_MASK 0x00000010L
+#define GRBM_STATUS2__ME0PIPE1_PF_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS2__ME1PIPE0_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS2__ME1PIPE1_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS2__ME1PIPE2_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS2__ME1PIPE3_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS2__RLC_RQ_PENDING_MASK 0x00004000L
+#define GRBM_STATUS2__UTCL2_BUSY_MASK 0x00008000L
+#define GRBM_STATUS2__EA_BUSY_MASK 0x00010000L
+#define GRBM_STATUS2__RMI_BUSY_MASK 0x00020000L
+#define GRBM_STATUS2__UTCL2_RQ_PENDING_MASK 0x00040000L
+#define GRBM_STATUS2__SDMA_SCH_RQ_PENDING_MASK 0x00080000L
+#define GRBM_STATUS2__EA_LINK_BUSY_MASK 0x00100000L
+#define GRBM_STATUS2__SDMA_BUSY_MASK 0x00200000L
+#define GRBM_STATUS2__SDMA0_RQ_PENDING_MASK 0x00400000L
+#define GRBM_STATUS2__SDMA1_RQ_PENDING_MASK 0x00800000L
+#define GRBM_STATUS2__RLC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS2__TCP_BUSY_MASK 0x08000000L
+#define GRBM_STATUS2__CPF_BUSY_MASK 0x10000000L
+#define GRBM_STATUS2__CPC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS2__CPG_BUSY_MASK 0x40000000L
+#define GRBM_STATUS2__CPAXI_BUSY_MASK 0x80000000L
+//GRBM_PWR_CNTL
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE__SHIFT 0x0
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE__SHIFT 0x2
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE__SHIFT 0x4
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE__SHIFT 0x6
+#define GRBM_PWR_CNTL__GFX_REQ_EN__SHIFT 0xe
+#define GRBM_PWR_CNTL__ALL_REQ_EN__SHIFT 0xf
+#define GRBM_PWR_CNTL__ALL_REQ_TYPE_MASK 0x00000003L
+#define GRBM_PWR_CNTL__GFX_REQ_TYPE_MASK 0x0000000CL
+#define GRBM_PWR_CNTL__ALL_RSP_TYPE_MASK 0x00000030L
+#define GRBM_PWR_CNTL__GFX_RSP_TYPE_MASK 0x000000C0L
+#define GRBM_PWR_CNTL__GFX_REQ_EN_MASK 0x00004000L
+#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
+//GRBM_STATUS
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
+#define GRBM_STATUS__SDMA_RQ_PENDING__SHIFT 0x6
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS__DB_CLEAN__SHIFT 0xc
+#define GRBM_STATUS__CB_CLEAN__SHIFT 0xd
+#define GRBM_STATUS__TA_BUSY__SHIFT 0xe
+#define GRBM_STATUS__GDS_BUSY__SHIFT 0xf
+#define GRBM_STATUS__GE_BUSY_NO_DMA__SHIFT 0x10
+#define GRBM_STATUS__SX_BUSY__SHIFT 0x14
+#define GRBM_STATUS__GE_BUSY__SHIFT 0x15
+#define GRBM_STATUS__SPI_BUSY__SHIFT 0x16
+#define GRBM_STATUS__BCI_BUSY__SHIFT 0x17
+#define GRBM_STATUS__SC_BUSY__SHIFT 0x18
+#define GRBM_STATUS__PA_BUSY__SHIFT 0x19
+#define GRBM_STATUS__DB_BUSY__SHIFT 0x1a
+#define GRBM_STATUS__ANY_ACTIVE__SHIFT 0x1b
+#define GRBM_STATUS__CP_COHERENCY_BUSY__SHIFT 0x1c
+#define GRBM_STATUS__CP_BUSY__SHIFT 0x1d
+#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
+#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
+#define GRBM_STATUS__SDMA_RQ_PENDING_MASK 0x00000040L
+#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
+#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS__DB_CLEAN_MASK 0x00001000L
+#define GRBM_STATUS__CB_CLEAN_MASK 0x00002000L
+#define GRBM_STATUS__TA_BUSY_MASK 0x00004000L
+#define GRBM_STATUS__GDS_BUSY_MASK 0x00008000L
+#define GRBM_STATUS__GE_BUSY_NO_DMA_MASK 0x00010000L
+#define GRBM_STATUS__SX_BUSY_MASK 0x00100000L
+#define GRBM_STATUS__GE_BUSY_MASK 0x00200000L
+#define GRBM_STATUS__SPI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS__BCI_BUSY_MASK 0x00800000L
+#define GRBM_STATUS__SC_BUSY_MASK 0x01000000L
+#define GRBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS__DB_BUSY_MASK 0x04000000L
+#define GRBM_STATUS__ANY_ACTIVE_MASK 0x08000000L
+#define GRBM_STATUS__CP_COHERENCY_BUSY_MASK 0x10000000L
+#define GRBM_STATUS__CP_BUSY_MASK 0x20000000L
+#define GRBM_STATUS__CB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+//GRBM_STATUS_SE0
+#define GRBM_STATUS_SE0__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE0__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE0__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE0__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE0__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE0__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE0__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE0__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE0__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE0__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE0__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE0__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE0__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE0__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE0__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE0__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE0__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE0__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE0__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE0__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE0__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE0__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE0__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE0__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE0__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE0__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE0__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE0__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE0__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE0__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE0__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE0__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE0__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE0__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS_SE1
+#define GRBM_STATUS_SE1__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE1__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE1__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE1__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE1__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE1__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE1__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE1__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE1__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE1__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE1__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE1__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE1__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE1__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE1__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE1__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE1__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE1__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE1__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE1__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE1__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE1__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE1__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE1__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE1__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE1__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE1__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE1__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE1__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE1__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE1__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE1__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE1__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE1__CB_BUSY_MASK 0x80000000L
+//GRBM_STATUS3
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING__SHIFT 0x5
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING__SHIFT 0x7
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING__SHIFT 0x8
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING__SHIFT 0x9
+#define GRBM_STATUS3__PH_BUSY__SHIFT 0xd
+#define GRBM_STATUS3__CH_BUSY__SHIFT 0xe
+#define GRBM_STATUS3__GL2CC_BUSY__SHIFT 0xf
+#define GRBM_STATUS3__GL1CC_BUSY__SHIFT 0x10
+#define GRBM_STATUS3__SEDC_BUSY__SHIFT 0x19
+#define GRBM_STATUS3__PC_BUSY__SHIFT 0x1a
+#define GRBM_STATUS3__GL1H_BUSY__SHIFT 0x1b
+#define GRBM_STATUS3__GUS_LINK_BUSY__SHIFT 0x1c
+#define GRBM_STATUS3__GUS_BUSY__SHIFT 0x1d
+#define GRBM_STATUS3__UTCL1_BUSY__SHIFT 0x1e
+#define GRBM_STATUS3__PMM_BUSY__SHIFT 0x1f
+#define GRBM_STATUS3__GRBM_RLC_INTR_CREDIT_PENDING_MASK 0x00000020L
+#define GRBM_STATUS3__GRBM_CPF_INTR_CREDIT_PENDING_MASK 0x00000080L
+#define GRBM_STATUS3__MESPIPE0_RQ_PENDING_MASK 0x00000100L
+#define GRBM_STATUS3__MESPIPE1_RQ_PENDING_MASK 0x00000200L
+#define GRBM_STATUS3__PH_BUSY_MASK 0x00002000L
+#define GRBM_STATUS3__CH_BUSY_MASK 0x00004000L
+#define GRBM_STATUS3__GL2CC_BUSY_MASK 0x00008000L
+#define GRBM_STATUS3__GL1CC_BUSY_MASK 0x00010000L
+#define GRBM_STATUS3__SEDC_BUSY_MASK 0x02000000L
+#define GRBM_STATUS3__PC_BUSY_MASK 0x04000000L
+#define GRBM_STATUS3__GL1H_BUSY_MASK 0x08000000L
+#define GRBM_STATUS3__GUS_LINK_BUSY_MASK 0x10000000L
+#define GRBM_STATUS3__GUS_BUSY_MASK 0x20000000L
+#define GRBM_STATUS3__UTCL1_BUSY_MASK 0x40000000L
+#define GRBM_STATUS3__PMM_BUSY_MASK 0x80000000L
+//GRBM_SOFT_RESET
+#define GRBM_SOFT_RESET__SOFT_RESET_CP__SHIFT 0x0
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC__SHIFT 0x2
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2__SHIFT 0xf
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX__SHIFT 0x10
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF__SHIFT 0x11
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC__SHIFT 0x12
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG__SHIFT 0x13
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC__SHIFT 0x14
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI__SHIFT 0x15
+#define GRBM_SOFT_RESET__SOFT_RESET_EA__SHIFT 0x16
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT 0x17
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1__SHIFT 0x18
+#define GRBM_SOFT_RESET__SOFT_RESET_CP_MASK 0x00000001L
+#define GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK 0x00000004L
+#define GRBM_SOFT_RESET__SOFT_RESET_UTCL2_MASK 0x00008000L
+#define GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK 0x00010000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPF_MASK 0x00020000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPC_MASK 0x00040000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPG_MASK 0x00080000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CAC_MASK 0x00100000L
+#define GRBM_SOFT_RESET__SOFT_RESET_CPAXI_MASK 0x00200000L
+#define GRBM_SOFT_RESET__SOFT_RESET_EA_MASK 0x00400000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK 0x00800000L
+#define GRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK 0x01000000L
+//GRBM_GFX_CLKEN_CNTL
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT__SHIFT 0x0
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT__SHIFT 0x8
+#define GRBM_GFX_CLKEN_CNTL__PREFIX_DELAY_CNT_MASK 0x0000000FL
+#define GRBM_GFX_CLKEN_CNTL__POST_DELAY_CNT_MASK 0x00001F00L
+//GRBM_WAIT_IDLE_CLOCKS
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS__SHIFT 0x0
+#define GRBM_WAIT_IDLE_CLOCKS__WAIT_IDLE_CLOCKS_MASK 0x000000FFL
+//GRBM_STATUS_SE2
+#define GRBM_STATUS_SE2__DB_CLEAN__SHIFT 0x1
+#define GRBM_STATUS_SE2__CB_CLEAN__SHIFT 0x2
+#define GRBM_STATUS_SE2__UTCL1_BUSY__SHIFT 0x3
+#define GRBM_STATUS_SE2__TCP_BUSY__SHIFT 0x4
+#define GRBM_STATUS_SE2__GL1CC_BUSY__SHIFT 0x5
+#define GRBM_STATUS_SE2__GL1H_BUSY__SHIFT 0x6
+#define GRBM_STATUS_SE2__PC_BUSY__SHIFT 0x7
+#define GRBM_STATUS_SE2__SEDC_BUSY__SHIFT 0x8
+#define GRBM_STATUS_SE2__RMI_BUSY__SHIFT 0x15
+#define GRBM_STATUS_SE2__BCI_BUSY__SHIFT 0x16
+#define GRBM_STATUS_SE2__PA_BUSY__SHIFT 0x18
+#define GRBM_STATUS_SE2__TA_BUSY__SHIFT 0x19
+#define GRBM_STATUS_SE2__SX_BUSY__SHIFT 0x1a
+#define GRBM_STATUS_SE2__SPI_BUSY__SHIFT 0x1b
+#define GRBM_STATUS_SE2__SC_BUSY__SHIFT 0x1d
+#define GRBM_STATUS_SE2__DB_BUSY__SHIFT 0x1e
+#define GRBM_STATUS_SE2__CB_BUSY__SHIFT 0x1f
+#define GRBM_STATUS_SE2__DB_CLEAN_MASK 0x00000002L
+#define GRBM_STATUS_SE2__CB_CLEAN_MASK 0x00000004L
+#define GRBM_STATUS_SE2__UTCL1_BUSY_MASK 0x00000008L
+#define GRBM_STATUS_SE2__TCP_BUSY_MASK 0x00000010L
+#define GRBM_STATUS_SE2__GL1CC_BUSY_MASK 0x00000020L
+#define GRBM_STATUS_SE2__GL1H_BUSY_MASK 0x00000040L
+#define GRBM_STATUS_SE2__PC_BUSY_MASK 0x00000080L
+#define GRBM_STATUS_SE2__SEDC_BUSY_MASK 0x00000100L
+#define GRBM_STATUS_SE2__RMI_BUSY_MASK 0x00200000L
+#define GRBM_STATUS_SE2__BCI_BUSY_MASK 0x00400000L
+#define GRBM_STATUS_SE2__PA_BUSY_MASK 0x01000000L
+#define GRBM_STATUS_SE2__TA_BUSY_MASK 0x02000000L
+#define GRBM_STATUS_SE2__SX_BUSY_MASK 0x04000000L
+#define GRBM_STATUS_SE2__SPI_BUSY_MASK 0x08000000L
+#define GRBM_STATUS_SE2__SC_BUSY_MASK 0x20000000L
+#define GRBM_STATUS_SE2__DB_BUSY_MASK 0x40000000L
+#define GRBM_STATUS_SE2__CB_BUSY_MASK 0x80000000L
+//GRBM_READ_ERROR
+#define GRBM_READ_ERROR__READ_ADDRESS__SHIFT 0x2
+#define GRBM_READ_ERROR__READ_PIPEID__SHIFT 0x14
+#define GRBM_READ_ERROR__READ_MEID__SHIFT 0x16
+#define GRBM_READ_ERROR__READ_ERROR__SHIFT 0x1f
+#define GRBM_READ_ERROR__READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_READ_ERROR__READ_PIPEID_MASK 0x00300000L
+#define GRBM_READ_ERROR__READ_MEID_MASK 0x00C00000L
+#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
+//GRBM_READ_ERROR2
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0__SHIFT 0x9
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1__SHIFT 0xa
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2__SHIFT 0xb
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3__SHIFT 0xc
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0__SHIFT 0xd
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1__SHIFT 0xe
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF__SHIFT 0x15
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF__SHIFT 0x16
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF__SHIFT 0x17
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0__SHIFT 0x18
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1__SHIFT 0x19
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2__SHIFT 0x1a
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3__SHIFT 0x1b
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0__SHIFT 0x1c
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1__SHIFT 0x1d
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE0_MASK 0x00000200L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE1_MASK 0x00000400L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE2_MASK 0x00000800L
+#define GRBM_READ_ERROR2__READ_REQUESTER_MESPIPE3_MASK 0x00001000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA0_MASK 0x00002000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_SDMA1_MASK 0x00004000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_PF_MASK 0x00200000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_CF_MASK 0x00400000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE1_PF_MASK 0x00800000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE0_MASK 0x01000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE1_MASK 0x02000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE2_MASK 0x04000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME1PIPE3_MASK 0x08000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE0_MASK 0x10000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE1_MASK 0x20000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2_MASK 0x40000000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3_MASK 0x80000000L
+//GRBM_INT_CNTL
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE__SHIFT 0x0
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE__SHIFT 0x13
+#define GRBM_INT_CNTL__RDERR_INT_ENABLE_MASK 0x00000001L
+#define GRBM_INT_CNTL__GUI_IDLE_INT_ENABLE_MASK 0x00080000L
+//GRBM_TRAP_OP
+#define GRBM_TRAP_OP__RW__SHIFT 0x0
+#define GRBM_TRAP_OP__RW_MASK 0x00000001L
+//GRBM_TRAP_ADDR
+#define GRBM_TRAP_ADDR__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_ADDR_MSK
+#define GRBM_TRAP_ADDR_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_ADDR_MSK__DATA_MASK 0x0003FFFFL
+//GRBM_TRAP_WD
+#define GRBM_TRAP_WD__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD__DATA_MASK 0xFFFFFFFFL
+//GRBM_TRAP_WD_MSK
+#define GRBM_TRAP_WD_MSK__DATA__SHIFT 0x0
+#define GRBM_TRAP_WD_MSK__DATA_MASK 0xFFFFFFFFL
+//GRBM_DSM_BYPASS
+#define GRBM_DSM_BYPASS__BYPASS_BITS__SHIFT 0x0
+#define GRBM_DSM_BYPASS__BYPASS_EN__SHIFT 0x2
+#define GRBM_DSM_BYPASS__BYPASS_BITS_MASK 0x00000003L
+#define GRBM_DSM_BYPASS__BYPASS_EN_MASK 0x00000004L
+//GRBM_WRITE_ERROR
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC__SHIFT 0x0
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU__SHIFT 0x1
+#define GRBM_WRITE_ERROR__WRITE_SSRCID__SHIFT 0x2
+#define GRBM_WRITE_ERROR__WRITE_VFID__SHIFT 0x8
+#define GRBM_WRITE_ERROR__WRITE_VF__SHIFT 0xc
+#define GRBM_WRITE_ERROR__WRITE_VMID__SHIFT 0xd
+#define GRBM_WRITE_ERROR__TMZ__SHIFT 0x11
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL__SHIFT 0x12
+#define GRBM_WRITE_ERROR__WRITE_PIPEID__SHIFT 0x14
+#define GRBM_WRITE_ERROR__WRITE_MEID__SHIFT 0x16
+#define GRBM_WRITE_ERROR__WRITE_ERROR__SHIFT 0x1f
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RLC_MASK 0x00000001L
+#define GRBM_WRITE_ERROR__WRITE_REQUESTER_RSMU_MASK 0x00000002L
+#define GRBM_WRITE_ERROR__WRITE_SSRCID_MASK 0x0000003CL
+#define GRBM_WRITE_ERROR__WRITE_VFID_MASK 0x00000F00L
+#define GRBM_WRITE_ERROR__WRITE_VF_MASK 0x00001000L
+#define GRBM_WRITE_ERROR__WRITE_VMID_MASK 0x0001E000L
+#define GRBM_WRITE_ERROR__TMZ_MASK 0x00020000L
+#define GRBM_WRITE_ERROR__CP_SECURE_WR_ILLEGAL_MASK 0x00040000L
+#define GRBM_WRITE_ERROR__WRITE_PIPEID_MASK 0x00300000L
+#define GRBM_WRITE_ERROR__WRITE_MEID_MASK 0x00C00000L
+#define GRBM_WRITE_ERROR__WRITE_ERROR_MASK 0x80000000L
+//GRBM_CHIP_REVISION
+#define GRBM_CHIP_REVISION__CHIP_REVISION__SHIFT 0x0
+#define GRBM_CHIP_REVISION__CHIP_REVISION_MASK 0x000000FFL
+//GRBM_RSMU_CFG
+#define GRBM_RSMU_CFG__APERTURE_ID__SHIFT 0x0
+#define GRBM_RSMU_CFG__QOS__SHIFT 0xc
+#define GRBM_RSMU_CFG__POSTED_WR__SHIFT 0x10
+#define GRBM_RSMU_CFG__DEBUG_MASK__SHIFT 0x11
+#define GRBM_RSMU_CFG__APERTURE_ID_MASK 0x00000FFFL
+#define GRBM_RSMU_CFG__QOS_MASK 0x0000F000L
+#define GRBM_RSMU_CFG__POSTED_WR_MASK 0x00010000L
+#define GRBM_RSMU_CFG__DEBUG_MASK_MASK 0x00020000L
+//GRBM_IH_CREDIT
+#define GRBM_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define GRBM_IH_CREDIT__IH_CLIENT_ID__SHIFT 0x10
+#define GRBM_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define GRBM_IH_CREDIT__IH_CLIENT_ID_MASK 0x00FF0000L
+//GRBM_PWR_CNTL2
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT__SHIFT 0x10
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT__SHIFT 0x14
+#define GRBM_PWR_CNTL2__PWR_REQUEST_HALT_MASK 0x00010000L
+#define GRBM_PWR_CNTL2__PWR_GFX3D_REQUEST_HALT_MASK 0x00100000L
+//GRBM_UTCL2_INVAL_RANGE_START
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_START__DATA_MASK 0x0003FFFFL
+//GRBM_UTCL2_INVAL_RANGE_END
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA__SHIFT 0x0
+#define GRBM_UTCL2_INVAL_RANGE_END__DATA_MASK 0x0003FFFFL
+//GRBM_RSMU_READ_ERROR
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS__SHIFT 0x2
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF__SHIFT 0x14
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID__SHIFT 0x15
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE__SHIFT 0x1b
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR__SHIFT 0x1f
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ADDRESS_MASK 0x000FFFFCL
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VF_MASK 0x00100000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_VFID_MASK 0x07E00000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_TYPE_MASK 0x08000000L
+#define GRBM_RSMU_READ_ERROR__RSMU_READ_ERROR_MASK 0x80000000L
+//GRBM_INVALID_PIPE
+#define GRBM_INVALID_PIPE__ADDR__SHIFT 0x2
+#define GRBM_INVALID_PIPE__PIPEID__SHIFT 0x14
+#define GRBM_INVALID_PIPE__MEID__SHIFT 0x16
+#define GRBM_INVALID_PIPE__QUEUEID__SHIFT 0x18
+#define GRBM_INVALID_PIPE__SSRCID__SHIFT 0x1b
+#define GRBM_INVALID_PIPE__INVALID_PIPE__SHIFT 0x1f
+#define GRBM_INVALID_PIPE__ADDR_MASK 0x000FFFFCL
+#define GRBM_INVALID_PIPE__PIPEID_MASK 0x00300000L
+#define GRBM_INVALID_PIPE__MEID_MASK 0x00C00000L
+#define GRBM_INVALID_PIPE__QUEUEID_MASK 0x07000000L
+#define GRBM_INVALID_PIPE__SSRCID_MASK 0x78000000L
+#define GRBM_INVALID_PIPE__INVALID_PIPE_MASK 0x80000000L
+//GRBM_FENCE_RANGE0
+#define GRBM_FENCE_RANGE0__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE0__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE0__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE0__END_MASK 0xFFFF0000L
+//GRBM_FENCE_RANGE1
+#define GRBM_FENCE_RANGE1__START__SHIFT 0x0
+#define GRBM_FENCE_RANGE1__END__SHIFT 0x10
+#define GRBM_FENCE_RANGE1__START_MASK 0x0000FFFFL
+#define GRBM_FENCE_RANGE1__END_MASK 0xFFFF0000L
+//GRBM_SCRATCH_REG0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define GRBM_SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG1
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define GRBM_SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG2
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define GRBM_SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG3
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define GRBM_SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG4
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define GRBM_SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG5
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define GRBM_SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG6
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define GRBM_SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//GRBM_SCRATCH_REG7
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define GRBM_SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//VIOLATION_DATA_ASYNC_VF_PROG
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID__SHIFT 0x0
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID__SHIFT 0x4
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR__SHIFT 0x1f
+#define VIOLATION_DATA_ASYNC_VF_PROG__SSRCID_MASK 0x0000000FL
+#define VIOLATION_DATA_ASYNC_VF_PROG__VFID_MASK 0x000003F0L
+#define VIOLATION_DATA_ASYNC_VF_PROG__VIOLATION_ERROR_MASK 0x80000000L
+
+
+// addressBlock: gc_cpdec
+//CP_CPC_DEBUG_CNTL
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPC_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPF_DEBUG_CNTL
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_CPF_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_CPC_STATUS
+#define CP_CPC_STATUS__MEC1_BUSY__SHIFT 0x0
+#define CP_CPC_STATUS__MEC2_BUSY__SHIFT 0x1
+#define CP_CPC_STATUS__DC0_BUSY__SHIFT 0x2
+#define CP_CPC_STATUS__DC1_BUSY__SHIFT 0x3
+#define CP_CPC_STATUS__RCIU1_BUSY__SHIFT 0x4
+#define CP_CPC_STATUS__RCIU2_BUSY__SHIFT 0x5
+#define CP_CPC_STATUS__ROQ1_BUSY__SHIFT 0x6
+#define CP_CPC_STATUS__ROQ2_BUSY__SHIFT 0x7
+#define CP_CPC_STATUS__TCIU_BUSY__SHIFT 0xa
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY__SHIFT 0xb
+#define CP_CPC_STATUS__QU_BUSY__SHIFT 0xc
+#define CP_CPC_STATUS__UTCL2IU_BUSY__SHIFT 0xd
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY__SHIFT 0xe
+#define CP_CPC_STATUS__GCRIU_BUSY__SHIFT 0xf
+#define CP_CPC_STATUS__MES_BUSY__SHIFT 0x10
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY__SHIFT 0x11
+#define CP_CPC_STATUS__RCIU3_BUSY__SHIFT 0x12
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY__SHIFT 0x13
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY__SHIFT 0x14
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY__SHIFT 0x15
+#define CP_CPC_STATUS__CPG_CPC_BUSY__SHIFT 0x1d
+#define CP_CPC_STATUS__CPF_CPC_BUSY__SHIFT 0x1e
+#define CP_CPC_STATUS__CPC_BUSY__SHIFT 0x1f
+#define CP_CPC_STATUS__MEC1_BUSY_MASK 0x00000001L
+#define CP_CPC_STATUS__MEC2_BUSY_MASK 0x00000002L
+#define CP_CPC_STATUS__DC0_BUSY_MASK 0x00000004L
+#define CP_CPC_STATUS__DC1_BUSY_MASK 0x00000008L
+#define CP_CPC_STATUS__RCIU1_BUSY_MASK 0x00000010L
+#define CP_CPC_STATUS__RCIU2_BUSY_MASK 0x00000020L
+#define CP_CPC_STATUS__ROQ1_BUSY_MASK 0x00000040L
+#define CP_CPC_STATUS__ROQ2_BUSY_MASK 0x00000080L
+#define CP_CPC_STATUS__TCIU_BUSY_MASK 0x00000400L
+#define CP_CPC_STATUS__SCRATCH_RAM_BUSY_MASK 0x00000800L
+#define CP_CPC_STATUS__QU_BUSY_MASK 0x00001000L
+#define CP_CPC_STATUS__UTCL2IU_BUSY_MASK 0x00002000L
+#define CP_CPC_STATUS__SAVE_RESTORE_BUSY_MASK 0x00004000L
+#define CP_CPC_STATUS__GCRIU_BUSY_MASK 0x00008000L
+#define CP_CPC_STATUS__MES_BUSY_MASK 0x00010000L
+#define CP_CPC_STATUS__MES_SCRATCH_RAM_BUSY_MASK 0x00020000L
+#define CP_CPC_STATUS__RCIU3_BUSY_MASK 0x00040000L
+#define CP_CPC_STATUS__MES_INSTRUCTION_CACHE_BUSY_MASK 0x00080000L
+#define CP_CPC_STATUS__MES_DATA_CACHE_BUSY_MASK 0x00100000L
+#define CP_CPC_STATUS__MEC_DATA_CACHE_BUSY_MASK 0x00200000L
+#define CP_CPC_STATUS__CPG_CPC_BUSY_MASK 0x20000000L
+#define CP_CPC_STATUS__CPF_CPC_BUSY_MASK 0x40000000L
+#define CP_CPC_STATUS__CPC_BUSY_MASK 0x80000000L
+//CP_CPC_BUSY_STAT
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY__SHIFT 0x1
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY__SHIFT 0x4
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY__SHIFT 0x5
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY__SHIFT 0x6
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY__SHIFT 0x9
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY__SHIFT 0x10
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY__SHIFT 0x11
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY__SHIFT 0x12
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY__SHIFT 0x13
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY__SHIFT 0x14
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY__SHIFT 0x15
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY__SHIFT 0x16
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY__SHIFT 0x17
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY__SHIFT 0x18
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY__SHIFT 0x19
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY__SHIFT 0x1a
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY__SHIFT 0x1b
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY__SHIFT 0x1c
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY__SHIFT 0x1d
+#define CP_CPC_BUSY_STAT__MEC1_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT__MEC1_SEMAPHORE_BUSY_MASK 0x00000002L
+#define CP_CPC_BUSY_STAT__MEC1_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT__MEC1_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT__MEC1_EOP_QUEUE_BUSY_MASK 0x00000010L
+#define CP_CPC_BUSY_STAT__MEC1_IQ_QUEUE_BUSY_MASK 0x00000020L
+#define CP_CPC_BUSY_STAT__MEC1_IB_QUEUE_BUSY_MASK 0x00000040L
+#define CP_CPC_BUSY_STAT__MEC1_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT__MEC1_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT__MEC1_PARTIAL_FLUSH_BUSY_MASK 0x00000200L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT__MEC1_PIPE3_BUSY_MASK 0x00002000L
+#define CP_CPC_BUSY_STAT__MEC2_LOAD_BUSY_MASK 0x00010000L
+#define CP_CPC_BUSY_STAT__MEC2_SEMAPHORE_BUSY_MASK 0x00020000L
+#define CP_CPC_BUSY_STAT__MEC2_MUTEX_BUSY_MASK 0x00040000L
+#define CP_CPC_BUSY_STAT__MEC2_MESSAGE_BUSY_MASK 0x00080000L
+#define CP_CPC_BUSY_STAT__MEC2_EOP_QUEUE_BUSY_MASK 0x00100000L
+#define CP_CPC_BUSY_STAT__MEC2_IQ_QUEUE_BUSY_MASK 0x00200000L
+#define CP_CPC_BUSY_STAT__MEC2_IB_QUEUE_BUSY_MASK 0x00400000L
+#define CP_CPC_BUSY_STAT__MEC2_TC_BUSY_MASK 0x00800000L
+#define CP_CPC_BUSY_STAT__MEC2_DMA_BUSY_MASK 0x01000000L
+#define CP_CPC_BUSY_STAT__MEC2_PARTIAL_FLUSH_BUSY_MASK 0x02000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE0_BUSY_MASK 0x04000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE1_BUSY_MASK 0x08000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE2_BUSY_MASK 0x10000000L
+#define CP_CPC_BUSY_STAT__MEC2_PIPE3_BUSY_MASK 0x20000000L
+//CP_CPC_STALLED_STAT1
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL__SHIFT 0x3
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION__SHIFT 0x4
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL__SHIFT 0x6
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x7
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET__SHIFT 0x8
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU__SHIFT 0x9
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ__SHIFT 0xa
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA__SHIFT 0xd
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET__SHIFT 0x10
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU__SHIFT 0x11
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ__SHIFT 0x12
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA__SHIFT 0x15
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x16
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x17
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS__SHIFT 0x18
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE__SHIFT 0x19
+#define CP_CPC_STALLED_STAT1__RCIU_TX_FREE_STALL_MASK 0x00000008L
+#define CP_CPC_STALLED_STAT1__RCIU_PRIV_VIOLATION_MASK 0x00000010L
+#define CP_CPC_STALLED_STAT1__TCIU_TX_FREE_STALL_MASK 0x00000040L
+#define CP_CPC_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000080L
+#define CP_CPC_STALLED_STAT1__MEC1_DECODING_PACKET_MASK 0x00000100L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_MASK 0x00000200L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_RCIU_READ_MASK 0x00000400L
+#define CP_CPC_STALLED_STAT1__MEC1_WAIT_ON_ROQ_DATA_MASK 0x00002000L
+#define CP_CPC_STALLED_STAT1__MEC2_DECODING_PACKET_MASK 0x00010000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_MASK 0x00020000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_RCIU_READ_MASK 0x00040000L
+#define CP_CPC_STALLED_STAT1__MEC2_WAIT_ON_ROQ_DATA_MASK 0x00200000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00400000L
+#define CP_CPC_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00800000L
+#define CP_CPC_STALLED_STAT1__UTCL1_WAITING_ON_TRANS_MASK 0x01000000L
+#define CP_CPC_STALLED_STAT1__GCRIU_WAITING_ON_FREE_MASK 0x02000000L
+//CP_CPF_STATUS
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY__SHIFT 0x0
+#define CP_CPF_STATUS__CSF_BUSY__SHIFT 0x1
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY__SHIFT 0x4
+#define CP_CPF_STATUS__ROQ_RING_BUSY__SHIFT 0x5
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY__SHIFT 0x6
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY__SHIFT 0x7
+#define CP_CPF_STATUS__ROQ_STATE_BUSY__SHIFT 0x8
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY__SHIFT 0x9
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_CPF_STATUS__SEMAPHORE_BUSY__SHIFT 0xc
+#define CP_CPF_STATUS__INTERRUPT_BUSY__SHIFT 0xd
+#define CP_CPF_STATUS__TCIU_BUSY__SHIFT 0xe
+#define CP_CPF_STATUS__HQD_BUSY__SHIFT 0xf
+#define CP_CPF_STATUS__PRT_BUSY__SHIFT 0x10
+#define CP_CPF_STATUS__UTCL2IU_BUSY__SHIFT 0x11
+#define CP_CPF_STATUS__RCIU_BUSY__SHIFT 0x12
+#define CP_CPF_STATUS__RCIU_GFX_BUSY__SHIFT 0x13
+#define CP_CPF_STATUS__RCIU_CMP_BUSY__SHIFT 0x14
+#define CP_CPF_STATUS__ROQ_DATA_BUSY__SHIFT 0x15
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY__SHIFT 0x16
+#define CP_CPF_STATUS__GCRIU_BUSY__SHIFT 0x17
+#define CP_CPF_STATUS__MES_HQD_BUSY__SHIFT 0x18
+#define CP_CPF_STATUS__CPF_GFX_BUSY__SHIFT 0x1a
+#define CP_CPF_STATUS__CPF_CMP_BUSY__SHIFT 0x1b
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY__SHIFT 0x1c
+#define CP_CPF_STATUS__CPC_CPF_BUSY__SHIFT 0x1e
+#define CP_CPF_STATUS__CPF_BUSY__SHIFT 0x1f
+#define CP_CPF_STATUS__POST_WPTR_GFX_BUSY_MASK 0x00000001L
+#define CP_CPF_STATUS__CSF_BUSY_MASK 0x00000002L
+#define CP_CPF_STATUS__ROQ_ALIGN_BUSY_MASK 0x00000010L
+#define CP_CPF_STATUS__ROQ_RING_BUSY_MASK 0x00000020L
+#define CP_CPF_STATUS__ROQ_INDIRECT1_BUSY_MASK 0x00000040L
+#define CP_CPF_STATUS__ROQ_INDIRECT2_BUSY_MASK 0x00000080L
+#define CP_CPF_STATUS__ROQ_STATE_BUSY_MASK 0x00000100L
+#define CP_CPF_STATUS__ROQ_CE_RING_BUSY_MASK 0x00000200L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_CPF_STATUS__ROQ_CE_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_CPF_STATUS__SEMAPHORE_BUSY_MASK 0x00001000L
+#define CP_CPF_STATUS__INTERRUPT_BUSY_MASK 0x00002000L
+#define CP_CPF_STATUS__TCIU_BUSY_MASK 0x00004000L
+#define CP_CPF_STATUS__HQD_BUSY_MASK 0x00008000L
+#define CP_CPF_STATUS__PRT_BUSY_MASK 0x00010000L
+#define CP_CPF_STATUS__UTCL2IU_BUSY_MASK 0x00020000L
+#define CP_CPF_STATUS__RCIU_BUSY_MASK 0x00040000L
+#define CP_CPF_STATUS__RCIU_GFX_BUSY_MASK 0x00080000L
+#define CP_CPF_STATUS__RCIU_CMP_BUSY_MASK 0x00100000L
+#define CP_CPF_STATUS__ROQ_DATA_BUSY_MASK 0x00200000L
+#define CP_CPF_STATUS__ROQ_CE_DATA_BUSY_MASK 0x00400000L
+#define CP_CPF_STATUS__GCRIU_BUSY_MASK 0x00800000L
+#define CP_CPF_STATUS__MES_HQD_BUSY_MASK 0x01000000L
+#define CP_CPF_STATUS__CPF_GFX_BUSY_MASK 0x04000000L
+#define CP_CPF_STATUS__CPF_CMP_BUSY_MASK 0x08000000L
+#define CP_CPF_STATUS__GRBM_CPF_STAT_BUSY_MASK 0x30000000L
+#define CP_CPF_STATUS__CPC_CPF_BUSY_MASK 0x40000000L
+#define CP_CPF_STATUS__CPF_BUSY_MASK 0x80000000L
+//CP_CPF_BUSY_STAT
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY__SHIFT 0x2
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY__SHIFT 0x3
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY__SHIFT 0x4
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY__SHIFT 0x5
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY__SHIFT 0x6
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY__SHIFT 0x7
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY__SHIFT 0x9
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY__SHIFT 0xa
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY__SHIFT 0xb
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY__SHIFT 0xd
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY__SHIFT 0xf
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY__SHIFT 0x13
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY__SHIFT 0x14
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY__SHIFT 0x15
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY__SHIFT 0x19
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY__SHIFT 0x1a
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY__SHIFT 0x1c
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY__SHIFT 0x1d
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY__SHIFT 0x1f
+#define CP_CPF_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT__CSF_RING_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT1_BUSY_MASK 0x00000004L
+#define CP_CPF_BUSY_STAT__CSF_INDIRECT2_BUSY_MASK 0x00000008L
+#define CP_CPF_BUSY_STAT__CSF_STATE_BUSY_MASK 0x00000010L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR1_BUSY_MASK 0x00000020L
+#define CP_CPF_BUSY_STAT__CSF_CE_INDR2_BUSY_MASK 0x00000040L
+#define CP_CPF_BUSY_STAT__CSF_ARBITER_BUSY_MASK 0x00000080L
+#define CP_CPF_BUSY_STAT__CSF_INPUT_BUSY_MASK 0x00000100L
+#define CP_CPF_BUSY_STAT__CSF_DATA_BUSY_MASK 0x00000200L
+#define CP_CPF_BUSY_STAT__CSF_CE_DATA_BUSY_MASK 0x00000400L
+#define CP_CPF_BUSY_STAT__HPD_PROCESSING_EOP_BUSY_MASK 0x00000800L
+#define CP_CPF_BUSY_STAT__HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_TIMER_BUSY_MASK 0x00002000L
+#define CP_CPF_BUSY_STAT__HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT__HQD_WAIT_SEMAPHORE_BUSY_MASK 0x00008000L
+#define CP_CPF_BUSY_STAT__HQD_SIGNAL_SEMAPHORE_BUSY_MASK 0x00010000L
+#define CP_CPF_BUSY_STAT__HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT__HQD_IB_FETCHER_BUSY_MASK 0x00080000L
+#define CP_CPF_BUSY_STAT__HQD_IQ_FETCHER_BUSY_MASK 0x00100000L
+#define CP_CPF_BUSY_STAT__HQD_EOP_FETCHER_BUSY_MASK 0x00200000L
+#define CP_CPF_BUSY_STAT__HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT__HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_EOP_BUSY_MASK 0x02000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IQ_BUSY_MASK 0x04000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT__HQD_ROQ_IB_BUSY_MASK 0x10000000L
+#define CP_CPF_BUSY_STAT__HQD_WPTR_POLL_BUSY_MASK 0x20000000L
+#define CP_CPF_BUSY_STAT__HQD_PQ_BUSY_MASK 0x40000000L
+#define CP_CPF_BUSY_STAT__HQD_IB_BUSY_MASK 0x80000000L
+//CP_CPF_STALLED_STAT1
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA__SHIFT 0x0
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA__SHIFT 0x1
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA__SHIFT 0x2
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA__SHIFT 0x3
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE__SHIFT 0x5
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS__SHIFT 0x6
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE__SHIFT 0x7
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x8
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS__SHIFT 0x9
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS__SHIFT 0xa
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE__SHIFT 0xb
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA__SHIFT 0xc
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE__SHIFT 0xd
+#define CP_CPF_STALLED_STAT1__RING_FETCHING_DATA_MASK 0x00000001L
+#define CP_CPF_STALLED_STAT1__INDR1_FETCHING_DATA_MASK 0x00000002L
+#define CP_CPF_STALLED_STAT1__INDR2_FETCHING_DATA_MASK 0x00000004L
+#define CP_CPF_STALLED_STAT1__STATE_FETCHING_DATA_MASK 0x00000008L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_FREE_MASK 0x00000020L
+#define CP_CPF_STALLED_STAT1__TCIU_WAITING_ON_TAGS_MASK 0x00000040L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_FREE_MASK 0x00000080L
+#define CP_CPF_STALLED_STAT1__UTCL2IU_WAITING_ON_TAGS_MASK 0x00000100L
+#define CP_CPF_STALLED_STAT1__GFX_UTCL1_WAITING_ON_TRANS_MASK 0x00000200L
+#define CP_CPF_STALLED_STAT1__CMP_UTCL1_WAITING_ON_TRANS_MASK 0x00000400L
+#define CP_CPF_STALLED_STAT1__RCIU_WAITING_ON_FREE_MASK 0x00000800L
+#define CP_CPF_STALLED_STAT1__DATA_FETCHING_DATA_MASK 0x00001000L
+#define CP_CPF_STALLED_STAT1__GCRIU_WAIT_ON_FREE_MASK 0x00002000L
+//CP_CPC_BUSY_STAT2
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY__SHIFT 0x0
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY__SHIFT 0x2
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY__SHIFT 0x3
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY__SHIFT 0x7
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY__SHIFT 0xa
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY__SHIFT 0xb
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY__SHIFT 0xc
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY__SHIFT 0xd
+#define CP_CPC_BUSY_STAT2__MES_LOAD_BUSY_MASK 0x00000001L
+#define CP_CPC_BUSY_STAT2__MES_MUTEX_BUSY_MASK 0x00000004L
+#define CP_CPC_BUSY_STAT2__MES_MESSAGE_BUSY_MASK 0x00000008L
+#define CP_CPC_BUSY_STAT2__MES_TC_BUSY_MASK 0x00000080L
+#define CP_CPC_BUSY_STAT2__MES_DMA_BUSY_MASK 0x00000100L
+#define CP_CPC_BUSY_STAT2__MES_PIPE0_BUSY_MASK 0x00000400L
+#define CP_CPC_BUSY_STAT2__MES_PIPE1_BUSY_MASK 0x00000800L
+#define CP_CPC_BUSY_STAT2__MES_PIPE2_BUSY_MASK 0x00001000L
+#define CP_CPC_BUSY_STAT2__MES_PIPE3_BUSY_MASK 0x00002000L
+//CP_CPC_GRBM_FREE_COUNT
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPC_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+//CP_CPC_PRIV_VIOLATION_ADDR
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_CPC_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+//CP_MEC_ME1_HEADER_DUMP
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME1_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_HEADER_DUMP
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MEC_ME2_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_CPC_SCRATCH_INDEX
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_CPC_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_CPC_SCRATCH_DATA
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_CPC_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_CPF_GRBM_FREE_COUNT
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_CPF_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x00000007L
+//CP_CPF_BUSY_STAT2
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY__SHIFT 0x1
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY__SHIFT 0xc
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY__SHIFT 0xe
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY__SHIFT 0x11
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY__SHIFT 0x12
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY__SHIFT 0x16
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY__SHIFT 0x17
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY__SHIFT 0x1b
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY__SHIFT 0x1e
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPG_BUSY_MASK 0x00000001L
+#define CP_CPF_BUSY_STAT2__CP_SDMA_CPC_BUSY_MASK 0x00000002L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DISPATCH_BUSY_MASK 0x00001000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_DMA_OFFLOAD_BUSY_MASK 0x00004000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_MESSAGE_BUSY_MASK 0x00020000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_FETCHER_BUSY_MASK 0x00040000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_CONSUMED_RPTR_BUSY_MASK 0x00400000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_FETCHER_ARB_BUSY_MASK 0x00800000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_ALIGN_BUSY_MASK 0x01000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_ROQ_PQ_BUSY_MASK 0x08000000L
+#define CP_CPF_BUSY_STAT2__MES_HQD_PQ_BUSY_MASK 0x40000000L
+//CP_CPC_HALT_HYST_COUNT
+#define CP_CPC_HALT_HYST_COUNT__COUNT__SHIFT 0x0
+#define CP_CPC_HALT_HYST_COUNT__COUNT_MASK 0x0000000FL
+//CP_STALLED_STAT3
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER__SHIFT 0x2
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY__SHIFT 0x3
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY__SHIFT 0x4
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY__SHIFT 0x5
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV__SHIFT 0x7
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA__SHIFT 0xa
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER__SHIFT 0xc
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW__SHIFT 0xd
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE__SHIFT 0xe
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS__SHIFT 0xf
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x10
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x11
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE__SHIFT 0x12
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS__SHIFT 0x13
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS__SHIFT 0x14
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE__SHIFT 0x15
+#define CP_STALLED_STAT3__CE_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_FETCHER_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DATA_FROM_RAM_INIT_FETCHER_MASK 0x00000004L
+#define CP_STALLED_STAT3__CE_TO_RAM_INIT_NOT_RDY_MASK 0x00000008L
+#define CP_STALLED_STAT3__CE_TO_RAM_DUMP_NOT_RDY_MASK 0x00000010L
+#define CP_STALLED_STAT3__CE_TO_RAM_WRITE_NOT_RDY_MASK 0x00000020L
+#define CP_STALLED_STAT3__CE_TO_INC_FIFO_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT3__CE_TO_WR_FIFO_NOT_RDY_TO_RCV_MASK 0x00000080L
+#define CP_STALLED_STAT3__CE_WAITING_ON_BUFFER_DATA_MASK 0x00000400L
+#define CP_STALLED_STAT3__CE_WAITING_ON_CE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_MASK 0x00001000L
+#define CP_STALLED_STAT3__CE_WAITING_ON_DE_COUNTER_UNDERFLOW_MASK 0x00002000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_FREE_MASK 0x00004000L
+#define CP_STALLED_STAT3__TCIU_WAITING_ON_TAGS_MASK 0x00008000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_TC_WR_CONFIRM_MASK 0x00010000L
+#define CP_STALLED_STAT3__CE_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00020000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_FREE_MASK 0x00040000L
+#define CP_STALLED_STAT3__UTCL2IU_WAITING_ON_TAGS_MASK 0x00080000L
+#define CP_STALLED_STAT3__UTCL1_WAITING_ON_TRANS_MASK 0x00100000L
+#define CP_STALLED_STAT3__GCRIU_WAITING_ON_FREE_MASK 0x00200000L
+//CP_STALLED_STAT1
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0__SHIFT 0x2
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1__SHIFT 0x3
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0__SHIFT 0x4
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1__SHIFT 0x5
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG__SHIFT 0xa
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG__SHIFT 0xb
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM__SHIFT 0xc
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0xd
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA__SHIFT 0xe
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA__SHIFT 0xf
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE__SHIFT 0x17
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE__SHIFT 0x18
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE__SHIFT 0x19
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ__SHIFT 0x1a
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ__SHIFT 0x1b
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ__SHIFT 0x1c
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION__SHIFT 0x1d
+#define CP_STALLED_STAT1__RBIU_TO_DMA_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R0_MASK 0x00000004L
+#define CP_STALLED_STAT1__RBIU_TO_SEM_NOT_RDY_TO_RCV_R1_MASK 0x00000008L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R0_MASK 0x00000010L
+#define CP_STALLED_STAT1__RBIU_TO_MEMWR_NOT_RDY_TO_RCV_R1_MASK 0x00000020L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_CE_BUFFER_FLAG_MASK 0x00000400L
+#define CP_STALLED_STAT1__ME_HAS_ACTIVE_DE_BUFFER_FLAG_MASK 0x00000800L
+#define CP_STALLED_STAT1__ME_STALLED_ON_TC_WR_CONFIRM_MASK 0x00001000L
+#define CP_STALLED_STAT1__ME_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00002000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_TC_READ_DATA_MASK 0x00004000L
+#define CP_STALLED_STAT1__ME_WAITING_ON_REG_READ_DATA_MASK 0x00008000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GDS_FREE_MASK 0x00800000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_GRBM_FREE_MASK 0x01000000L
+#define CP_STALLED_STAT1__RCIU_WAITING_ON_VGT_FREE_MASK 0x02000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_ME_READ_MASK 0x04000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_DMA_READ_MASK 0x08000000L
+#define CP_STALLED_STAT1__RCIU_STALLED_ON_APPEND_READ_MASK 0x10000000L
+#define CP_STALLED_STAT1__RCIU_HALTED_BY_REG_VIOLATION_MASK 0x20000000L
+//CP_STALLED_STAT2
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV__SHIFT 0x0
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV__SHIFT 0x1
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV__SHIFT 0x2
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING__SHIFT 0x4
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING__SHIFT 0x5
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV__SHIFT 0x6
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA__SHIFT 0x8
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER__SHIFT 0x9
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER__SHIFT 0xa
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME__SHIFT 0xb
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV__SHIFT 0xc
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV__SHIFT 0xd
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP__SHIFT 0xe
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH__SHIFT 0xf
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x10
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV__SHIFT 0x11
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ__SHIFT 0x12
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM__SHIFT 0x13
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA__SHIFT 0x14
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE__SHIFT 0x15
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM__SHIFT 0x16
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING__SHIFT 0x17
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING__SHIFT 0x18
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE__SHIFT 0x19
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE__SHIFT 0x1a
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM__SHIFT 0x1b
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION__SHIFT 0x1c
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE__SHIFT 0x1d
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS__SHIFT 0x1e
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN__SHIFT 0x1f
+#define CP_STALLED_STAT2__PFP_TO_CSF_NOT_RDY_TO_RCV_MASK 0x00000001L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_NOT_RDY_TO_RCV_MASK 0x00000002L
+#define CP_STALLED_STAT2__PFP_TO_RCIU_NOT_RDY_TO_RCV_MASK 0x00000004L
+#define CP_STALLED_STAT2__PFP_TO_VGT_WRITES_PENDING_MASK 0x00000010L
+#define CP_STALLED_STAT2__PFP_RCIU_READ_PENDING_MASK 0x00000020L
+#define CP_STALLED_STAT2__PFP_TO_MEQ_DDID_NOT_RDY_TO_RCV_MASK 0x00000040L
+#define CP_STALLED_STAT2__PFP_WAITING_ON_BUFFER_DATA_MASK 0x00000100L
+#define CP_STALLED_STAT2__ME_WAIT_ON_CE_COUNTER_MASK 0x00000200L
+#define CP_STALLED_STAT2__ME_WAIT_ON_AVAIL_BUFFER_MASK 0x00000400L
+#define CP_STALLED_STAT2__GFX_CNTX_NOT_AVAIL_TO_ME_MASK 0x00000800L
+#define CP_STALLED_STAT2__ME_RCIU_NOT_RDY_TO_RCV_MASK 0x00001000L
+#define CP_STALLED_STAT2__ME_TO_CONST_NOT_RDY_TO_RCV_MASK 0x00002000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_PFP_MASK 0x00004000L
+#define CP_STALLED_STAT2__ME_WAITING_ON_PARTIAL_FLUSH_MASK 0x00008000L
+#define CP_STALLED_STAT2__MEQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00010000L
+#define CP_STALLED_STAT2__STQ_TO_ME_NOT_RDY_TO_RCV_MASK 0x00020000L
+#define CP_STALLED_STAT2__ME_WAITING_DATA_FROM_STQ_MASK 0x00040000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_TC_WR_CONFIRM_MASK 0x00080000L
+#define CP_STALLED_STAT2__PFP_STALLED_ON_ATOMIC_RTN_DATA_MASK 0x00100000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_PULSE_MASK 0x00200000L
+#define CP_STALLED_STAT2__QU_STALLED_ON_EOP_DONE_WR_CONFIRM_MASK 0x00400000L
+#define CP_STALLED_STAT2__STRMO_WR_OF_PRIM_DATA_PENDING_MASK 0x00800000L
+#define CP_STALLED_STAT2__PIPE_STATS_WR_DATA_PENDING_MASK 0x01000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_CS_DONE_MASK 0x02000000L
+#define CP_STALLED_STAT2__APPEND_RDY_WAIT_ON_PS_DONE_MASK 0x04000000L
+#define CP_STALLED_STAT2__APPEND_WAIT_ON_WR_CONFIRM_MASK 0x08000000L
+#define CP_STALLED_STAT2__APPEND_ACTIVE_PARTITION_MASK 0x10000000L
+#define CP_STALLED_STAT2__APPEND_WAITING_TO_SEND_MEMWRITE_MASK 0x20000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_IDLE_CNTXS_MASK 0x40000000L
+#define CP_STALLED_STAT2__SURF_SYNC_NEEDS_ALL_CLEAN_MASK 0x80000000L
+//CP_BUSY_STAT
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY__SHIFT 0x0
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO__SHIFT 0x6
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS__SHIFT 0x7
+#define CP_BUSY_STAT__ME_PARSING_PACKETS__SHIFT 0x8
+#define CP_BUSY_STAT__RCIU_PFP_BUSY__SHIFT 0x9
+#define CP_BUSY_STAT__RCIU_ME_BUSY__SHIFT 0xa
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY__SHIFT 0xc
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING__SHIFT 0xd
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS__SHIFT 0xe
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY__SHIFT 0xf
+#define CP_BUSY_STAT__ME_PARSER_BUSY__SHIFT 0x11
+#define CP_BUSY_STAT__EOP_DONE_BUSY__SHIFT 0x12
+#define CP_BUSY_STAT__STRM_OUT_BUSY__SHIFT 0x13
+#define CP_BUSY_STAT__PIPE_STATS_BUSY__SHIFT 0x14
+#define CP_BUSY_STAT__RCIU_CE_BUSY__SHIFT 0x15
+#define CP_BUSY_STAT__CE_PARSING_PACKETS__SHIFT 0x16
+#define CP_BUSY_STAT__REG_BUS_FIFO_BUSY_MASK 0x00000001L
+#define CP_BUSY_STAT__COHER_CNT_NEQ_ZERO_MASK 0x00000040L
+#define CP_BUSY_STAT__PFP_PARSING_PACKETS_MASK 0x00000080L
+#define CP_BUSY_STAT__ME_PARSING_PACKETS_MASK 0x00000100L
+#define CP_BUSY_STAT__RCIU_PFP_BUSY_MASK 0x00000200L
+#define CP_BUSY_STAT__RCIU_ME_BUSY_MASK 0x00000400L
+#define CP_BUSY_STAT__SEM_CMDFIFO_NOT_EMPTY_MASK 0x00001000L
+#define CP_BUSY_STAT__SEM_FAILED_AND_HOLDING_MASK 0x00002000L
+#define CP_BUSY_STAT__SEM_POLLING_FOR_PASS_MASK 0x00004000L
+#define CP_BUSY_STAT__GFX_CONTEXT_BUSY_MASK 0x00008000L
+#define CP_BUSY_STAT__ME_PARSER_BUSY_MASK 0x00020000L
+#define CP_BUSY_STAT__EOP_DONE_BUSY_MASK 0x00040000L
+#define CP_BUSY_STAT__STRM_OUT_BUSY_MASK 0x00080000L
+#define CP_BUSY_STAT__PIPE_STATS_BUSY_MASK 0x00100000L
+#define CP_BUSY_STAT__RCIU_CE_BUSY_MASK 0x00200000L
+#define CP_BUSY_STAT__CE_PARSING_PACKETS_MASK 0x00400000L
+//CP_STAT
+#define CP_STAT__ROQ_DB_BUSY__SHIFT 0x5
+#define CP_STAT__ROQ_CE_DB_BUSY__SHIFT 0x6
+#define CP_STAT__ROQ_RING_BUSY__SHIFT 0x9
+#define CP_STAT__ROQ_INDIRECT1_BUSY__SHIFT 0xa
+#define CP_STAT__ROQ_INDIRECT2_BUSY__SHIFT 0xb
+#define CP_STAT__ROQ_STATE_BUSY__SHIFT 0xc
+#define CP_STAT__DC_BUSY__SHIFT 0xd
+#define CP_STAT__UTCL2IU_BUSY__SHIFT 0xe
+#define CP_STAT__PFP_BUSY__SHIFT 0xf
+#define CP_STAT__MEQ_BUSY__SHIFT 0x10
+#define CP_STAT__ME_BUSY__SHIFT 0x11
+#define CP_STAT__QUERY_BUSY__SHIFT 0x12
+#define CP_STAT__SEMAPHORE_BUSY__SHIFT 0x13
+#define CP_STAT__INTERRUPT_BUSY__SHIFT 0x14
+#define CP_STAT__SURFACE_SYNC_BUSY__SHIFT 0x15
+#define CP_STAT__DMA_BUSY__SHIFT 0x16
+#define CP_STAT__RCIU_BUSY__SHIFT 0x17
+#define CP_STAT__SCRATCH_RAM_BUSY__SHIFT 0x18
+#define CP_STAT__GCRIU_BUSY__SHIFT 0x19
+#define CP_STAT__CE_BUSY__SHIFT 0x1a
+#define CP_STAT__TCIU_BUSY__SHIFT 0x1b
+#define CP_STAT__ROQ_CE_RING_BUSY__SHIFT 0x1c
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY__SHIFT 0x1d
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY__SHIFT 0x1e
+#define CP_STAT__CP_BUSY__SHIFT 0x1f
+#define CP_STAT__ROQ_DB_BUSY_MASK 0x00000020L
+#define CP_STAT__ROQ_CE_DB_BUSY_MASK 0x00000040L
+#define CP_STAT__ROQ_RING_BUSY_MASK 0x00000200L
+#define CP_STAT__ROQ_INDIRECT1_BUSY_MASK 0x00000400L
+#define CP_STAT__ROQ_INDIRECT2_BUSY_MASK 0x00000800L
+#define CP_STAT__ROQ_STATE_BUSY_MASK 0x00001000L
+#define CP_STAT__DC_BUSY_MASK 0x00002000L
+#define CP_STAT__UTCL2IU_BUSY_MASK 0x00004000L
+#define CP_STAT__PFP_BUSY_MASK 0x00008000L
+#define CP_STAT__MEQ_BUSY_MASK 0x00010000L
+#define CP_STAT__ME_BUSY_MASK 0x00020000L
+#define CP_STAT__QUERY_BUSY_MASK 0x00040000L
+#define CP_STAT__SEMAPHORE_BUSY_MASK 0x00080000L
+#define CP_STAT__INTERRUPT_BUSY_MASK 0x00100000L
+#define CP_STAT__SURFACE_SYNC_BUSY_MASK 0x00200000L
+#define CP_STAT__DMA_BUSY_MASK 0x00400000L
+#define CP_STAT__RCIU_BUSY_MASK 0x00800000L
+#define CP_STAT__SCRATCH_RAM_BUSY_MASK 0x01000000L
+#define CP_STAT__GCRIU_BUSY_MASK 0x02000000L
+#define CP_STAT__CE_BUSY_MASK 0x04000000L
+#define CP_STAT__TCIU_BUSY_MASK 0x08000000L
+#define CP_STAT__ROQ_CE_RING_BUSY_MASK 0x10000000L
+#define CP_STAT__ROQ_CE_INDIRECT1_BUSY_MASK 0x20000000L
+#define CP_STAT__ROQ_CE_INDIRECT2_BUSY_MASK 0x40000000L
+#define CP_STAT__CP_BUSY_MASK 0x80000000L
+//CP_ME_HEADER_DUMP
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP__SHIFT 0x0
+#define CP_ME_HEADER_DUMP__ME_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_PFP_HEADER_DUMP
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP__SHIFT 0x0
+#define CP_PFP_HEADER_DUMP__PFP_HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_GRBM_FREE_COUNT
+#define CP_GRBM_FREE_COUNT__FREE_COUNT__SHIFT 0x0
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS__SHIFT 0x8
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP__SHIFT 0x10
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_MASK 0x0000003FL
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_GDS_MASK 0x00003F00L
+#define CP_GRBM_FREE_COUNT__FREE_COUNT_PFP_MASK 0x003F0000L
+//CP_PFP_INSTR_PNTR
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_PFP_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_ME_INSTR_PNTR
+#define CP_ME_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_ME_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC1_INSTR_PNTR
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC1_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_MEC2_INSTR_PNTR
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC2_INSTR_PNTR__INSTR_PNTR_MASK 0x0000FFFFL
+//CP_CSF_STAT
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT__SHIFT 0x8
+#define CP_CSF_STAT__BUFFER_REQUEST_COUNT_MASK 0x0001FF00L
+//CP_CNTX_STAT
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS__SHIFT 0x0
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT__SHIFT 0x8
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS__SHIFT 0x14
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT__SHIFT 0x1c
+#define CP_CNTX_STAT__ACTIVE_HP3D_CONTEXTS_MASK 0x000000FFL
+#define CP_CNTX_STAT__CURRENT_HP3D_CONTEXT_MASK 0x00000700L
+#define CP_CNTX_STAT__ACTIVE_GFX_CONTEXTS_MASK 0x0FF00000L
+#define CP_CNTX_STAT__CURRENT_GFX_CONTEXT_MASK 0x70000000L
+//CP_ME_PREEMPTION
+#define CP_ME_PREEMPTION__OBSOLETE__SHIFT 0x0
+#define CP_ME_PREEMPTION__OBSOLETE_MASK 0x00000001L
+//CP_RB1_RPTR
+#define CP_RB1_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB1_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB0_RPTR
+#define CP_RB0_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB0_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_RPTR
+#define CP_RB_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_RB_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_RB_WPTR_DELAY
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER__SHIFT 0x0
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT__SHIFT 0x1c
+#define CP_RB_WPTR_DELAY__PRE_WRITE_TIMER_MASK 0x0FFFFFFFL
+#define CP_RB_WPTR_DELAY__PRE_WRITE_LIMIT_MASK 0xF0000000L
+//CP_RB_WPTR_POLL_CNTL
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT 0x0
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10
+#define CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK 0x0000FFFFL
+#define CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L
+//CP_ROQ1_THRESHOLDS
+#define CP_ROQ1_THRESHOLDS__RB1_START__SHIFT 0x0
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START__SHIFT 0xa
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START__SHIFT 0x14
+#define CP_ROQ1_THRESHOLDS__RB1_START_MASK 0x000003FFL
+#define CP_ROQ1_THRESHOLDS__R0_IB1_START_MASK 0x000FFC00L
+#define CP_ROQ1_THRESHOLDS__R1_IB1_START_MASK 0x3FF00000L
+//CP_ROQ2_THRESHOLDS
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START__SHIFT 0x0
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START__SHIFT 0xa
+#define CP_ROQ2_THRESHOLDS__R0_IB2_START_MASK 0x000003FFL
+#define CP_ROQ2_THRESHOLDS__R1_IB2_START_MASK 0x000FFC00L
+//CP_STQ_THRESHOLDS
+#define CP_STQ_THRESHOLDS__STQ0_START__SHIFT 0x0
+#define CP_STQ_THRESHOLDS__STQ1_START__SHIFT 0x8
+#define CP_STQ_THRESHOLDS__STQ2_START__SHIFT 0x10
+#define CP_STQ_THRESHOLDS__STQ0_START_MASK 0x000000FFL
+#define CP_STQ_THRESHOLDS__STQ1_START_MASK 0x0000FF00L
+#define CP_STQ_THRESHOLDS__STQ2_START_MASK 0x00FF0000L
+//CP_MEQ_THRESHOLDS
+#define CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT 0x0
+#define CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT 0x8
+#define CP_MEQ_THRESHOLDS__MEQ1_START_MASK 0x000000FFL
+#define CP_MEQ_THRESHOLDS__MEQ2_START_MASK 0x0000FF00L
+//CP_ROQ_AVAIL
+#define CP_ROQ_AVAIL__ROQ_CNT_RING__SHIFT 0x0
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1__SHIFT 0x10
+#define CP_ROQ_AVAIL__ROQ_CNT_RING_MASK 0x00000FFFL
+#define CP_ROQ_AVAIL__ROQ_CNT_IB1_MASK 0x0FFF0000L
+//CP_STQ_AVAIL
+#define CP_STQ_AVAIL__STQ_CNT__SHIFT 0x0
+#define CP_STQ_AVAIL__STQ_CNT_MASK 0x000001FFL
+//CP_ROQ2_AVAIL
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2__SHIFT 0x0
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB__SHIFT 0x10
+#define CP_ROQ2_AVAIL__ROQ_CNT_IB2_MASK 0x00000FFFL
+#define CP_ROQ2_AVAIL__ROQ_CNT_DB_MASK 0x0FFF0000L
+//CP_MEQ_AVAIL
+#define CP_MEQ_AVAIL__MEQ_CNT__SHIFT 0x0
+#define CP_MEQ_AVAIL__MEQ_CNT_MASK 0x000003FFL
+//CP_CMD_INDEX
+#define CP_CMD_INDEX__CMD_INDEX__SHIFT 0x0
+#define CP_CMD_INDEX__CMD_ME_SEL__SHIFT 0xc
+#define CP_CMD_INDEX__CMD_QUEUE_SEL__SHIFT 0x10
+#define CP_CMD_INDEX__CMD_INDEX_MASK 0x000007FFL
+#define CP_CMD_INDEX__CMD_ME_SEL_MASK 0x00003000L
+#define CP_CMD_INDEX__CMD_QUEUE_SEL_MASK 0x00070000L
+//CP_CMD_DATA
+#define CP_CMD_DATA__CMD_DATA__SHIFT 0x0
+#define CP_CMD_DATA__CMD_DATA_MASK 0xFFFFFFFFL
+//CP_ROQ_RB_STAT
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY__SHIFT 0x0
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY__SHIFT 0x10
+#define CP_ROQ_RB_STAT__ROQ_RPTR_PRIMARY_MASK 0x00000FFFL
+#define CP_ROQ_RB_STAT__ROQ_WPTR_PRIMARY_MASK 0x0FFF0000L
+//CP_ROQ_IB1_STAT
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1__SHIFT 0x0
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1__SHIFT 0x10
+#define CP_ROQ_IB1_STAT__ROQ_RPTR_INDIRECT1_MASK 0x00000FFFL
+#define CP_ROQ_IB1_STAT__ROQ_WPTR_INDIRECT1_MASK 0x0FFF0000L
+//CP_ROQ_IB2_STAT
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2__SHIFT 0x0
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2__SHIFT 0x10
+#define CP_ROQ_IB2_STAT__ROQ_RPTR_INDIRECT2_MASK 0x00000FFFL
+#define CP_ROQ_IB2_STAT__ROQ_WPTR_INDIRECT2_MASK 0x0FFF0000L
+//CP_STQ_STAT
+#define CP_STQ_STAT__STQ_RPTR__SHIFT 0x0
+#define CP_STQ_STAT__STQ_RPTR_MASK 0x000003FFL
+//CP_STQ_WR_STAT
+#define CP_STQ_WR_STAT__STQ_WPTR__SHIFT 0x0
+#define CP_STQ_WR_STAT__STQ_WPTR_MASK 0x000003FFL
+//CP_MEQ_STAT
+#define CP_MEQ_STAT__MEQ_RPTR__SHIFT 0x0
+#define CP_MEQ_STAT__MEQ_WPTR__SHIFT 0x10
+#define CP_MEQ_STAT__MEQ_RPTR_MASK 0x000003FFL
+#define CP_MEQ_STAT__MEQ_WPTR_MASK 0x03FF0000L
+//CP_ROQ3_THRESHOLDS
+#define CP_ROQ3_THRESHOLDS__R0_DB_START__SHIFT 0x0
+#define CP_ROQ3_THRESHOLDS__R1_DB_START__SHIFT 0xa
+#define CP_ROQ3_THRESHOLDS__R0_DB_START_MASK 0x000003FFL
+#define CP_ROQ3_THRESHOLDS__R1_DB_START_MASK 0x000FFC00L
+//CP_ROQ_DB_STAT
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB__SHIFT 0x0
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB__SHIFT 0x10
+#define CP_ROQ_DB_STAT__ROQ_RPTR_DB_MASK 0x00000FFFL
+#define CP_ROQ_DB_STAT__ROQ_WPTR_DB_MASK 0x0FFF0000L
+//CP_INT_STAT_DEBUG
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED__SHIFT 0x8
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED__SHIFT 0x9
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED__SHIFT 0xa
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED__SHIFT 0xb
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG__SHIFT 0xf
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED__SHIFT 0x12
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED__SHIFT 0x13
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED__SHIFT 0x14
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED__SHIFT 0x15
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED__SHIFT 0x16
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_INT_STAT_DEBUG__RESUME_INT_ASSERTED_MASK 0x00000100L
+#define CP_INT_STAT_DEBUG__SUSPEND_INT_ASSERTED_MASK 0x00000200L
+#define CP_INT_STAT_DEBUG__DMA_WATCH_INT_ASSERTED_MASK 0x00000400L
+#define CP_INT_STAT_DEBUG__CP_VM_DOORBELL_WR_INT_ASSERTED_MASK 0x00000800L
+#define CP_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_INT_STAT_DEBUG__FUE_INT_STATUS_DEBUG_MASK 0x00008000L
+#define CP_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_INT_STAT_DEBUG__CMP_BUSY_INT_ASSERTED_MASK 0x00040000L
+#define CP_INT_STAT_DEBUG__CNTX_BUSY_INT_ASSERTED_MASK 0x00080000L
+#define CP_INT_STAT_DEBUG__CNTX_EMPTY_INT_ASSERTED_MASK 0x00100000L
+#define CP_INT_STAT_DEBUG__GFX_IDLE_INT_ASSERTED_MASK 0x00200000L
+#define CP_INT_STAT_DEBUG__PRIV_INSTR_INT_ASSERTED_MASK 0x00400000L
+#define CP_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_DEBUG_CNTL
+#define CP_DEBUG_CNTL__DEBUG_INDX__SHIFT 0x0
+#define CP_DEBUG_CNTL__DEBUG_INDX_MASK 0x0000007FL
+//CP_PRIV_VIOLATION_ADDR
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR__SHIFT 0x0
+#define CP_PRIV_VIOLATION_ADDR__PRIV_VIOLATION_ADDR_MASK 0x0003FFFFL
+
+
+// addressBlock: gc_padec
+//VGT_DMA_DATA_FIFO_DEPTH
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_DATA_FIFO_DEPTH__DMA_DATA_FIFO_DEPTH_MASK 0x000003FFL
+//VGT_DMA_REQ_FIFO_DEPTH
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DMA_REQ_FIFO_DEPTH__DMA_REQ_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_DRAW_INIT_FIFO_DEPTH
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH__SHIFT 0x0
+#define VGT_DRAW_INIT_FIFO_DEPTH__DRAW_INIT_FIFO_DEPTH_MASK 0x0000003FL
+//VGT_MC_LAT_CNTL
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES__SHIFT 0x0
+#define VGT_MC_LAT_CNTL__MC_TIME_STAMP_RES_MASK 0x0000000FL
+//IA_UTCL1_STATUS_2
+#define IA_UTCL1_STATUS_2__IA_BUSY__SHIFT 0x0
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY__SHIFT 0x1
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY__SHIFT 0x2
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY__SHIFT 0x3
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY__SHIFT 0x4
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED__SHIFT 0x5
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED__SHIFT 0x6
+#define IA_UTCL1_STATUS_2__PRT_DETECTED__SHIFT 0x7
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS_2__IA_BUSY_MASK 0x00000001L
+#define IA_UTCL1_STATUS_2__IA_DMA_BUSY_MASK 0x00000002L
+#define IA_UTCL1_STATUS_2__IA_DMA_REQ_BUSY_MASK 0x00000004L
+#define IA_UTCL1_STATUS_2__IA_GRP_BUSY_MASK 0x00000008L
+#define IA_UTCL1_STATUS_2__IA_ADC_BUSY_MASK 0x00000010L
+#define IA_UTCL1_STATUS_2__FAULT_DETECTED_MASK 0x00000020L
+#define IA_UTCL1_STATUS_2__RETRY_DETECTED_MASK 0x00000040L
+#define IA_UTCL1_STATUS_2__PRT_DETECTED_MASK 0x00000080L
+#define IA_UTCL1_STATUS_2__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS_2__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS_2__PRT_UTCL1ID_MASK 0x3F000000L
+//WD_CNTL_STATUS
+#define WD_CNTL_STATUS__DIST_BUSY__SHIFT 0x0
+#define WD_CNTL_STATUS__DIST_BE_BUSY__SHIFT 0x1
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY__SHIFT 0x2
+#define WD_CNTL_STATUS__WD_TE11_BUSY__SHIFT 0x3
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY__SHIFT 0x4
+#define WD_CNTL_STATUS__WLC_BUSY__SHIFT 0x5
+#define WD_CNTL_STATUS__DIST_BUSY_MASK 0x00000001L
+#define WD_CNTL_STATUS__DIST_BE_BUSY_MASK 0x00000002L
+#define WD_CNTL_STATUS__GE_UTCL1_BUSY_MASK 0x00000004L
+#define WD_CNTL_STATUS__WD_TE11_BUSY_MASK 0x00000008L
+#define WD_CNTL_STATUS__PC_MANAGER_BUSY_MASK 0x00000010L
+#define WD_CNTL_STATUS__WLC_BUSY_MASK 0x00000020L
+//CC_GC_PRIM_CONFIG
+#define CC_GC_PRIM_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define CC_GC_PRIM_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//WD_QOS
+#define WD_QOS__DRAW_STALL__SHIFT 0x0
+#define WD_QOS__DRAW_STALL_MASK 0x00000001L
+//WD_UTCL1_CNTL
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define WD_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define WD_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define WD_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define WD_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define WD_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define WD_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define WD_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define WD_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define WD_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define WD_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define WD_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define WD_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define WD_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define WD_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//WD_UTCL1_STATUS
+#define WD_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define WD_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define WD_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define WD_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define WD_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define WD_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define WD_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define WD_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define WD_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define WD_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//IA_UTCL1_CNTL
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define IA_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define IA_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define IA_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define IA_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define IA_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE__SHIFT 0x1d
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE__SHIFT 0x1e
+#define IA_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define IA_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define IA_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define IA_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define IA_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define IA_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define IA_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define IA_UTCL1_CNTL__MTYPE_OVERRIDE_MASK 0x20000000L
+#define IA_UTCL1_CNTL__LLC_NOALLOC_OVERRIDE_MASK 0x40000000L
+//IA_UTCL1_STATUS
+#define IA_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define IA_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define IA_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define IA_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define IA_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define IA_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define IA_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define IA_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define IA_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define IA_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CC_GC_SA_UNIT_DISABLE
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define CC_GC_SA_UNIT_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GE_RATE_CNTL_1
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT__SHIFT 0x8
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT__SHIFT 0xc
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT__SHIFT 0x10
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT__SHIFT 0x14
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM__SHIFT 0x18
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM__SHIFT 0x1c
+#define GE_RATE_CNTL_1__ADD_X_CLKS_LS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_LS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_HS_VERT_MASK 0x00000F00L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_HS_VERT_MASK 0x0000F000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_ES_VERT_MASK 0x000F0000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_ES_VERT_MASK 0x00F00000L
+#define GE_RATE_CNTL_1__ADD_X_CLKS_GS_PRIM_MASK 0x0F000000L
+#define GE_RATE_CNTL_1__AFTER_Y_TRANS_GS_PRIM_MASK 0xF0000000L
+//GE_RATE_CNTL_2
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT__SHIFT 0x0
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT__SHIFT 0x4
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM__SHIFT 0x8
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM__SHIFT 0xc
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS__SHIFT 0x10
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES__SHIFT 0x14
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE__SHIFT 0x18
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE__SHIFT 0x19
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL__SHIFT 0x1a
+#define GE_RATE_CNTL_2__SWAP_PRIORITY__SHIFT 0x1b
+#define GE_RATE_CNTL_2__ADD_X_CLKS_VS_VERT_MASK 0x0000000FL
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_VS_VERT_MASK 0x000000F0L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_PA_PRIM_MASK 0x00000F00L
+#define GE_RATE_CNTL_2__AFTER_Y_TRANS_PA_PRIM_MASK 0x0000F000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_HS_GS_MASK 0x000F0000L
+#define GE_RATE_CNTL_2__ADD_X_CLKS_MERGED_LS_ES_MASK 0x00F00000L
+#define GE_RATE_CNTL_2__MERGED_HS_GS_MODE_MASK 0x01000000L
+#define GE_RATE_CNTL_2__MERGED_LS_ES_MODE_MASK 0x02000000L
+#define GE_RATE_CNTL_2__ENABLE_RATE_CNTL_MASK 0x04000000L
+#define GE_RATE_CNTL_2__SWAP_PRIORITY_MASK 0x08000000L
+//VGT_SYS_CONFIG
+#define VGT_SYS_CONFIG__DUAL_CORE_EN__SHIFT 0x0
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP__SHIFT 0x1
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE__SHIFT 0x7
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT__SHIFT 0x8
+#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x00000001L
+#define VGT_SYS_CONFIG__MAX_LS_HS_THDGRP_MASK 0x0000007EL
+#define VGT_SYS_CONFIG__ADC_EVENT_FILTER_DISABLE_MASK 0x00000080L
+#define VGT_SYS_CONFIG__NUM_SUBGROUPS_IN_FLIGHT_MASK 0x0007FF00L
+//GE_PRIV_CONTROL
+#define GE_PRIV_CONTROL__RESERVED__SHIFT 0x0
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE__SHIFT 0x1
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE__SHIFT 0xa
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE__SHIFT 0xf
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE__SHIFT 0x10
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM__SHIFT 0x11
+#define GE_PRIV_CONTROL__RESERVED_MASK 0x00000001L
+#define GE_PRIV_CONTROL__CLAMP_PRIMGRP_SIZE_MASK 0x000003FEL
+#define GE_PRIV_CONTROL__RESET_ON_PIPELINE_CHANGE_MASK 0x00000400L
+#define GE_PRIV_CONTROL__FGCG_OVERRIDE_MASK 0x00008000L
+#define GE_PRIV_CONTROL__CLAMP_HS_OFFCHIP_PER_SE_OVERRIDE_MASK 0x00010000L
+#define GE_PRIV_CONTROL__DISABLE_ACCUM_AGM_MASK 0x00020000L
+//GE_STATUS
+#define GE_STATUS__PERFCOUNTER_STATUS__SHIFT 0x0
+#define GE_STATUS__THREAD_TRACE_STATUS__SHIFT 0x1
+#define GE_STATUS__PERFCOUNTER_STATUS_MASK 0x00000001L
+#define GE_STATUS__THREAD_TRACE_STATUS_MASK 0x00000002L
+//VGT_GS_MAX_WAVE_ID
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define VGT_GS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+//GFX_PIPE_CONTROL
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT__SHIFT 0x0
+#define GFX_PIPE_CONTROL__RESERVED__SHIFT 0xd
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN__SHIFT 0x10
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN__SHIFT 0x11
+#define GFX_PIPE_CONTROL__HYSTERESIS_CNT_MASK 0x00001FFFL
+#define GFX_PIPE_CONTROL__RESERVED_MASK 0x0000E000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_EN_MASK 0x00010000L
+#define GFX_PIPE_CONTROL__CONTEXT_SUSPEND_STALL_EN_MASK 0x00020000L
+//CC_GC_SHADER_ARRAY_CONFIG
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define CC_GC_SHADER_ARRAY_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GE2_SE_CNTL_STATUS
+#define GE2_SE_CNTL_STATUS__TE_BUSY__SHIFT 0x0
+#define GE2_SE_CNTL_STATUS__NGG_BUSY__SHIFT 0x1
+#define GE2_SE_CNTL_STATUS__HS_BUSY__SHIFT 0x2
+#define GE2_SE_CNTL_STATUS__TE_BUSY_MASK 0x00000001L
+#define GE2_SE_CNTL_STATUS__NGG_BUSY_MASK 0x00000002L
+#define GE2_SE_CNTL_STATUS__HS_BUSY_MASK 0x00000004L
+//VGT_RESET_DEBUG
+#define VGT_RESET_DEBUG__GS_DISABLE__SHIFT 0x0
+#define VGT_RESET_DEBUG__TESS_DISABLE__SHIFT 0x1
+#define VGT_RESET_DEBUG__WD_DISABLE__SHIFT 0x2
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0__SHIFT 0x3
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1__SHIFT 0x4
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1__SHIFT 0x5
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH__SHIFT 0x6
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX__SHIFT 0x7
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD__SHIFT 0x8
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF__SHIFT 0x9
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION__SHIFT 0xa
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON__SHIFT 0xb
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX__SHIFT 0xc
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING__SHIFT 0xd
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF__SHIFT 0xe
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC__SHIFT 0xf
+#define VGT_RESET_DEBUG__SPARE__SHIFT 0x10
+#define VGT_RESET_DEBUG__GS_DISABLE_MASK 0x00000001L
+#define VGT_RESET_DEBUG__TESS_DISABLE_MASK 0x00000002L
+#define VGT_RESET_DEBUG__WD_DISABLE_MASK 0x00000004L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE0_MASK 0x00000008L
+#define VGT_RESET_DEBUG__DISABLE_TE11_DIST_PIPE1_MASK 0x00000010L
+#define VGT_RESET_DEBUG__ENABLE_VMID_RESET_UTCL1_MASK 0x00000020L
+#define VGT_RESET_DEBUG__DISABLE_PREFETCH_MASK 0x00000040L
+#define VGT_RESET_DEBUG__DISABLE_SWITCH_MODE_STALL_FIX_MASK 0x00000080L
+#define VGT_RESET_DEBUG__DISABLE_SENDING_MULTIPLE_SE_IN_PD_MASK 0x00000100L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_ON_OFF_MASK 0x00000200L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_OPTIMIZATION_MASK 0x00000400L
+#define VGT_RESET_DEBUG__ENABLE_DIST_STALL_TESS_OFF_ON_MASK 0x00000800L
+#define VGT_RESET_DEBUG__DISABLE_MERGE_GRP_PERF_FIX_MASK 0x00001000L
+#define VGT_RESET_DEBUG__DISABLE_MESH_SHADER_ATTR_PACKING_MASK 0x00002000L
+#define VGT_RESET_DEBUG__ENABLE_SMALL_INST_PACK_ADJ_GS_OFF_MASK 0x00004000L
+#define VGT_RESET_DEBUG__DISABLE_PATCH_DIST_LAST_DONUT_SE_SWITCH_LOGIC_MASK 0x00008000L
+#define VGT_RESET_DEBUG__SPARE_MASK 0xFFFF0000L
+//GE_SPI_IF_SAFE_REG
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA__SHIFT 0x0
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA__SHIFT 0x6
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP__SHIFT 0xc
+#define GE_SPI_IF_SAFE_REG__GE_SPI_LS_ES_DATA_MASK 0x0000003FL
+#define GE_SPI_IF_SAFE_REG__GE_SPI_HS_GS_DATA_MASK 0x00000FC0L
+#define GE_SPI_IF_SAFE_REG__GE_SPI_GRP_MASK 0x0003F000L
+//GE_PA_IF_SAFE_REG
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB__SHIFT 0x0
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD__SHIFT 0xa
+#define GE_PA_IF_SAFE_REG__GE_PA_CSB_MASK 0x000003FFL
+#define GE_PA_IF_SAFE_REG__GE_PA_PAYLOAD_MASK 0x000FFC00L
+//PA_CL_CNTL_STATUS
+#define PA_CL_CNTL_STATUS__CL_BUSY__SHIFT 0x1f
+#define PA_CL_CNTL_STATUS__CL_BUSY_MASK 0x80000000L
+//PA_CL_ENHANCE
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA__SHIFT 0x0
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT 0x1
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL__SHIFT 0x3
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE__SHIFT 0x4
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL__SHIFT 0x5
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET__SHIFT 0x6
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS__SHIFT 0x7
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC__SHIFT 0x8
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION__SHIFT 0x9
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER__SHIFT 0xb
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH__SHIFT 0xc
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH__SHIFT 0xe
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE__SHIFT 0x11
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE__SHIFT 0x12
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE__SHIFT 0x13
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE__SHIFT 0x14
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE__SHIFT 0x15
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL__SHIFT 0x16
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO__SHIFT 0x17
+#define PA_CL_ENHANCE__ECO_SPARE3__SHIFT 0x1c
+#define PA_CL_ENHANCE__ECO_SPARE2__SHIFT 0x1d
+#define PA_CL_ENHANCE__ECO_SPARE1__SHIFT 0x1e
+#define PA_CL_ENHANCE__ECO_SPARE0__SHIFT 0x1f
+#define PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK 0x00000001L
+#define PA_CL_ENHANCE__NUM_CLIP_SEQ_MASK 0x00000006L
+#define PA_CL_ENHANCE__CLIPPED_PRIM_SEQ_STALL_MASK 0x00000008L
+#define PA_CL_ENHANCE__VE_NAN_PROC_DISABLE_MASK 0x00000010L
+#define PA_CL_ENHANCE__XTRA_DEBUG_REG_SEL_MASK 0x00000020L
+#define PA_CL_ENHANCE__IGNORE_PIPELINE_RESET_MASK 0x00000040L
+#define PA_CL_ENHANCE__KILL_INNER_EDGE_FLAGS_MASK 0x00000080L
+#define PA_CL_ENHANCE__NGG_PA_TO_ALL_SC_MASK 0x00000100L
+#define PA_CL_ENHANCE__TC_LATENCY_TIME_STAMP_RESOLUTION_MASK 0x00000600L
+#define PA_CL_ENHANCE__NGG_BYPASS_PRIM_FILTER_MASK 0x00000800L
+#define PA_CL_ENHANCE__NGG_SIDEBAND_MEMORY_DEPTH_MASK 0x00003000L
+#define PA_CL_ENHANCE__NGG_PRIM_INDICES_FIFO_DEPTH_MASK 0x0001C000L
+#define PA_CL_ENHANCE__PROG_NEAR_CLIP_PLANE_ENABLE_MASK 0x00020000L
+#define PA_CL_ENHANCE__POLY_INNER_EDGE_FLAG_DISABLE_MASK 0x00040000L
+#define PA_CL_ENHANCE__TC_REQUEST_PERF_CNTR_ENABLE_MASK 0x00080000L
+#define PA_CL_ENHANCE__DISABLE_PA_PH_INTF_FINE_CLOCK_GATE_MASK 0x00100000L
+#define PA_CL_ENHANCE__DISABLE_PA_SX_REQ_INTF_FINE_CLOCK_GATE_MASK 0x00200000L
+#define PA_CL_ENHANCE__ENABLE_PA_RATE_CNTL_MASK 0x00400000L
+#define PA_CL_ENHANCE__CLAMP_NEGATIVE_BB_TO_ZERO_MASK 0x00800000L
+#define PA_CL_ENHANCE__ECO_SPARE3_MASK 0x10000000L
+#define PA_CL_ENHANCE__ECO_SPARE2_MASK 0x20000000L
+#define PA_CL_ENHANCE__ECO_SPARE1_MASK 0x40000000L
+#define PA_CL_ENHANCE__ECO_SPARE0_MASK 0x80000000L
+//PA_CL_RESET_DEBUG
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE__SHIFT 0x0
+#define PA_CL_RESET_DEBUG__CL_TRIV_DISC_DISABLE_MASK 0x00000001L
+//PA_SU_CNTL_STATUS
+#define PA_SU_CNTL_STATUS__SU_BUSY__SHIFT 0x1f
+#define PA_SU_CNTL_STATUS__SU_BUSY_MASK 0x80000000L
+//PA_SC_FIFO_DEPTH_CNTL
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH__SHIFT 0x0
+#define PA_SC_FIFO_DEPTH_CNTL__DEPTH_MASK 0x000003FFL
+
+
+// addressBlock: gc_sqdec
+//SQ_CONFIG
+#define SQ_CONFIG__ECO_SPARE__SHIFT 0x0
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME__SHIFT 0x8
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP__SHIFT 0x9
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL__SHIFT 0xa
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS__SHIFT 0x12
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS__SHIFT 0x13
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT__SHIFT 0x15
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX__SHIFT 0x1b
+#define SQ_CONFIG__ECO_SPARE_MASK 0x000000FFL
+#define SQ_CONFIG__NEW_TRANS_ARB_SCHEME_MASK 0x00000100L
+#define SQ_CONFIG__DISABLE_VMEM_EXEC_ZERO_SKIP_MASK 0x00000200L
+#define SQ_CONFIG__DISABLE_SGPR_RD_KILL_MASK 0x00000400L
+#define SQ_CONFIG__ENABLE_HIPRIO_ON_EXP_RDY_GS_MASK 0x00040000L
+#define SQ_CONFIG__PRIO_VAL_ON_EXP_RDY_GS_MASK 0x00180000L
+#define SQ_CONFIG__WCLK_HYSTERESIS_CNT_MASK 0x00600000L
+#define SQ_CONFIG__DISABLE_END_CLAUSE_TX_MASK 0x08000000L
+//SQC_CONFIG
+#define SQC_CONFIG__INST_CACHE_SIZE__SHIFT 0x0
+#define SQC_CONFIG__DATA_CACHE_SIZE__SHIFT 0x2
+#define SQC_CONFIG__MISS_FIFO_DEPTH__SHIFT 0x4
+#define SQC_CONFIG__HIT_FIFO_DEPTH__SHIFT 0x6
+#define SQC_CONFIG__FORCE_ALWAYS_MISS__SHIFT 0x7
+#define SQC_CONFIG__FORCE_IN_ORDER__SHIFT 0x8
+#define SQC_CONFIG__PER_VMID_INV_DISABLE__SHIFT 0x9
+#define SQC_CONFIG__EVICT_LRU__SHIFT 0xa
+#define SQC_CONFIG__FORCE_2_BANK__SHIFT 0xc
+#define SQC_CONFIG__FORCE_1_BANK__SHIFT 0xd
+#define SQC_CONFIG__LS_DISABLE_CLOCKS__SHIFT 0xe
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE__SHIFT 0x16
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG__SHIFT 0x17
+#define SQC_CONFIG__SPARE__SHIFT 0x1a
+#define SQC_CONFIG__INST_CACHE_SIZE_MASK 0x00000003L
+#define SQC_CONFIG__DATA_CACHE_SIZE_MASK 0x0000000CL
+#define SQC_CONFIG__MISS_FIFO_DEPTH_MASK 0x00000030L
+#define SQC_CONFIG__HIT_FIFO_DEPTH_MASK 0x00000040L
+#define SQC_CONFIG__FORCE_ALWAYS_MISS_MASK 0x00000080L
+#define SQC_CONFIG__FORCE_IN_ORDER_MASK 0x00000100L
+#define SQC_CONFIG__PER_VMID_INV_DISABLE_MASK 0x00000200L
+#define SQC_CONFIG__EVICT_LRU_MASK 0x00000C00L
+#define SQC_CONFIG__FORCE_2_BANK_MASK 0x00001000L
+#define SQC_CONFIG__FORCE_1_BANK_MASK 0x00002000L
+#define SQC_CONFIG__LS_DISABLE_CLOCKS_MASK 0x003FC000L
+#define SQC_CONFIG__CACHE_CTRL_GCR_FIX_DISABLE_MASK 0x00400000L
+#define SQC_CONFIG__CACHE_CTRL_ALMOST_MAX_INFLIGHT_CONFIG_MASK 0x03800000L
+#define SQC_CONFIG__SPARE_MASK 0xFC000000L
+//LDS_CONFIG
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING__SHIFT 0x0
+#define LDS_CONFIG__CONF_BIT_1__SHIFT 0x1
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE__SHIFT 0x2
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE__SHIFT 0x3
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE__SHIFT 0x4
+#define LDS_CONFIG__CONF_BIT_5__SHIFT 0x5
+#define LDS_CONFIG__CONF_BIT_6__SHIFT 0x6
+#define LDS_CONFIG__CONF_BIT_7__SHIFT 0x7
+#define LDS_CONFIG__CONF_BIT_8__SHIFT 0x8
+#define LDS_CONFIG__ADDR_OUT_OF_RANGE_REPORTING_MASK 0x00000001L
+#define LDS_CONFIG__CONF_BIT_1_MASK 0x00000002L
+#define LDS_CONFIG__WAVE32_INTERP_DUAL_ISSUE_DISABLE_MASK 0x00000004L
+#define LDS_CONFIG__SP_TDDATA_FGCG_OVERRIDE_MASK 0x00000008L
+#define LDS_CONFIG__SQC_PERF_FGCG_OVERRIDE_MASK 0x00000010L
+#define LDS_CONFIG__CONF_BIT_5_MASK 0x00000020L
+#define LDS_CONFIG__CONF_BIT_6_MASK 0x00000040L
+#define LDS_CONFIG__CONF_BIT_7_MASK 0x00000080L
+#define LDS_CONFIG__CONF_BIT_8_MASK 0x00000100L
+//SQ_RANDOM_WAVE_PRI
+#define SQ_RANDOM_WAVE_PRI__RET__SHIFT 0x0
+#define SQ_RANDOM_WAVE_PRI__RUI__SHIFT 0x7
+#define SQ_RANDOM_WAVE_PRI__RNG__SHIFT 0xa
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID__SHIFT 0x1f
+#define SQ_RANDOM_WAVE_PRI__RET_MASK 0x0000007FL
+#define SQ_RANDOM_WAVE_PRI__RUI_MASK 0x00000380L
+#define SQ_RANDOM_WAVE_PRI__RNG_MASK 0x00FFFC00L
+#define SQ_RANDOM_WAVE_PRI__FORCE_IB_ARB_PRIO_MSK_VALID_MASK 0x80000000L
+//SQG_STATUS
+#define SQG_STATUS__REG_BUSY__SHIFT 0x0
+#define SQG_STATUS__REG_BUSY_MASK 0x00000001L
+//SQ_FIFO_SIZES
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE__SHIFT 0x0
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE__SHIFT 0x8
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED__SHIFT 0xc
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED__SHIFT 0xe
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE__SHIFT 0x10
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE__SHIFT 0x12
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT__SHIFT 0x14
+#define SQ_FIFO_SIZES__INTERRUPT_FIFO_SIZE_MASK 0x0000000FL
+#define SQ_FIFO_SIZES__TTRACE_FIFO_SIZE_MASK 0x00000300L
+#define SQ_FIFO_SIZES__EXPORT_BUF_GS_RESERVED_MASK 0x00003000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PS_RESERVED_MASK 0x0000C000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_REDUCE_MASK 0x00030000L
+#define SQ_FIFO_SIZES__VMEM_DATA_FIFO_SIZE_MASK 0x000C0000L
+#define SQ_FIFO_SIZES__EXPORT_BUF_PRIMPOS_LIMIT_MASK 0x00300000L
+//SQ_DSM_CNTL
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0__SHIFT 0x0
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1__SHIFT 0x1
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0__SHIFT 0x2
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1__SHIFT 0x3
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0__SHIFT 0x8
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1__SHIFT 0x9
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE__SHIFT 0xa
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0__SHIFT 0x10
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1__SHIFT 0x11
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01__SHIFT 0x12
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2__SHIFT 0x13
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3__SHIFT 0x14
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23__SHIFT 0x15
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0__SHIFT 0x18
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1__SHIFT 0x19
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_0_MASK 0x00000001L
+#define SQ_DSM_CNTL__WAVEFRONT_STALL_1_MASK 0x00000002L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_0_MASK 0x00000004L
+#define SQ_DSM_CNTL__SPI_BACKPRESSURE_1_MASK 0x00000008L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA0_MASK 0x00000100L
+#define SQ_DSM_CNTL__SEL_DSM_SGPR_IRRITATOR_DATA1_MASK 0x00000200L
+#define SQ_DSM_CNTL__SGPR_ENABLE_SINGLE_WRITE_MASK 0x00000400L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA0_MASK 0x00010000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA1_MASK 0x00020000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE01_MASK 0x00040000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA2_MASK 0x00080000L
+#define SQ_DSM_CNTL__SEL_DSM_LDS_IRRITATOR_DATA3_MASK 0x00100000L
+#define SQ_DSM_CNTL__LDS_ENABLE_SINGLE_WRITE23_MASK 0x00200000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA0_MASK 0x01000000L
+#define SQ_DSM_CNTL__SEL_DSM_SP_IRRITATOR_DATA1_MASK 0x02000000L
+#define SQ_DSM_CNTL__SP_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//SQ_DSM_CNTL2
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY__SHIFT 0x5
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY__SHIFT 0x8
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY__SHIFT 0xb
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY__SHIFT 0xe
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY__SHIFT 0x14
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY__SHIFT 0x1a
+#define SQ_DSM_CNTL2__SGPR_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SQ_DSM_CNTL2__SGPR_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SQ_DSM_CNTL2__LDS_D_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define SQ_DSM_CNTL2__LDS_D_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define SQ_DSM_CNTL2__LDS_I_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define SQ_DSM_CNTL2__LDS_I_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define SQ_DSM_CNTL2__SP_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define SQ_DSM_CNTL2__SP_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define SQ_DSM_CNTL2__LDS_INJECT_DELAY_MASK 0x000FC000L
+#define SQ_DSM_CNTL2__SP_INJECT_DELAY_MASK 0x03F00000L
+#define SQ_DSM_CNTL2__SQ_INJECT_DELAY_MASK 0xFC000000L
+//SP_CONFIG
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER__SHIFT 0x0
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE__SHIFT 0x2
+#define SP_CONFIG__DISABLE_TRANS_COEXEC__SHIFT 0x3
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE__SHIFT 0x4
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE__SHIFT 0x5
+#define SP_CONFIG__DEST_CACHE_EVICT_COUNTER_MASK 0x00000003L
+#define SP_CONFIG__ALU_BUSY_MGCG_OVERRIDE_MASK 0x00000004L
+#define SP_CONFIG__DISABLE_TRANS_COEXEC_MASK 0x00000008L
+#define SP_CONFIG__CAC_COUNTER_OVERRIDE_MASK 0x00000010L
+#define SP_CONFIG__SP_SX_EXPVDATA_FGCG_OVERRIDE_MASK 0x00000020L
+//SQ_ARB_CONFIG
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL__SHIFT 0x0
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL__SHIFT 0x4
+#define SQ_ARB_CONFIG__WG_RR_INTERVAL_MASK 0x00000003L
+#define SQ_ARB_CONFIG__FWD_PROG_INTERVAL_MASK 0x00000030L
+//SQ_DEBUG_HOST_TRAP_STATUS
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT__SHIFT 0x0
+#define SQ_DEBUG_HOST_TRAP_STATUS__PENDING_COUNT_MASK 0x0000007FL
+//SQG_GL1H_STATUS
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED__SHIFT 0x0
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED__SHIFT 0x1
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED__SHIFT 0x2
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED__SHIFT 0x3
+#define SQG_GL1H_STATUS__R0_ACK_ERR_DETECTED_MASK 0x00000001L
+#define SQG_GL1H_STATUS__R0_XNACK_ERR_DETECTED_MASK 0x00000002L
+#define SQG_GL1H_STATUS__R1_ACK_ERR_DETECTED_MASK 0x00000004L
+#define SQG_GL1H_STATUS__R1_XNACK_ERR_DETECTED_MASK 0x00000008L
+//SQG_CONFIG
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE__SHIFT 0x0
+#define SQG_CONFIG__SQG_ICPFT_EN__SHIFT 0xd
+#define SQG_CONFIG__SQG_ICPFT_CLR__SHIFT 0xe
+#define SQG_CONFIG__XNACK_INTR_MASK__SHIFT 0x10
+#define SQG_CONFIG__GL1H_PREFETCH_PAGE_MASK 0x0000000FL
+#define SQG_CONFIG__SQG_ICPFT_EN_MASK 0x00002000L
+#define SQG_CONFIG__SQG_ICPFT_CLR_MASK 0x00004000L
+#define SQG_CONFIG__XNACK_INTR_MASK_MASK 0xFFFF0000L
+//SQ_PERF_SNAPSHOT_CTRL
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF__SHIFT 0x0
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK__SHIFT 0x1
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL__SHIFT 0x11
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL__SHIFT 0x12
+#define SQ_PERF_SNAPSHOT_CTRL__TIMER_ON_OFF_MASK 0x00000001L
+#define SQ_PERF_SNAPSHOT_CTRL__VMID_MASK_MASK 0x0001FFFEL
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_SEL_MASK 0x00020000L
+#define SQ_PERF_SNAPSHOT_CTRL__COUNT_INTERVAL_MASK 0x003C0000L
+//CC_GC_SHADER_RATE_CONFIG
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define CC_GC_SHADER_RATE_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//SQ_INTERRUPT_AUTO_MASK
+#define SQ_INTERRUPT_AUTO_MASK__MASK__SHIFT 0x0
+#define SQ_INTERRUPT_AUTO_MASK__MASK_MASK 0x00FFFFFFL
+//SQ_INTERRUPT_MSG_CTRL
+#define SQ_INTERRUPT_MSG_CTRL__STALL__SHIFT 0x0
+#define SQ_INTERRUPT_MSG_CTRL__STALL_MASK 0x00000001L
+//SQ_WATCH0_ADDR_H
+#define SQ_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH0_ADDR_L
+#define SQ_WATCH0_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH0_CNTL
+#define SQ_WATCH0_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH0_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH0_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH0_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH1_ADDR_H
+#define SQ_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH1_ADDR_L
+#define SQ_WATCH1_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH1_CNTL
+#define SQ_WATCH1_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH1_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH1_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH1_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH2_ADDR_H
+#define SQ_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH2_ADDR_L
+#define SQ_WATCH2_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH2_CNTL
+#define SQ_WATCH2_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH2_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH2_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH2_CNTL__VALID_MASK 0x80000000L
+//SQ_WATCH3_ADDR_H
+#define SQ_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define SQ_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//SQ_WATCH3_ADDR_L
+#define SQ_WATCH3_ADDR_L__ADDR__SHIFT 0x6
+#define SQ_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFFC0L
+//SQ_WATCH3_CNTL
+#define SQ_WATCH3_CNTL__MASK__SHIFT 0x0
+#define SQ_WATCH3_CNTL__VMID__SHIFT 0x18
+#define SQ_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define SQ_WATCH3_CNTL__MASK_MASK 0x00FFFFFFL
+#define SQ_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define SQ_WATCH3_CNTL__VALID_MASK 0x80000000L
+//SQ_IND_INDEX
+#define SQ_IND_INDEX__WAVE_ID__SHIFT 0x0
+#define SQ_IND_INDEX__WORKITEM_ID__SHIFT 0x5
+#define SQ_IND_INDEX__AUTO_INCR__SHIFT 0xb
+#define SQ_IND_INDEX__INDEX__SHIFT 0x10
+#define SQ_IND_INDEX__WAVE_ID_MASK 0x0000001FL
+#define SQ_IND_INDEX__WORKITEM_ID_MASK 0x000007E0L
+#define SQ_IND_INDEX__AUTO_INCR_MASK 0x00000800L
+#define SQ_IND_INDEX__INDEX_MASK 0xFFFF0000L
+//SQ_IND_DATA
+#define SQ_IND_DATA__DATA__SHIFT 0x0
+#define SQ_IND_DATA__DATA_MASK 0xFFFFFFFFL
+//SQ_CMD
+#define SQ_CMD__CMD__SHIFT 0x0
+#define SQ_CMD__MODE__SHIFT 0x4
+#define SQ_CMD__CHECK_VMID__SHIFT 0x7
+#define SQ_CMD__DATA__SHIFT 0x8
+#define SQ_CMD__WAVE_ID__SHIFT 0x10
+#define SQ_CMD__QUEUE_ID__SHIFT 0x18
+#define SQ_CMD__VM_ID__SHIFT 0x1c
+#define SQ_CMD__CMD_MASK 0x0000000FL
+#define SQ_CMD__MODE_MASK 0x00000070L
+#define SQ_CMD__CHECK_VMID_MASK 0x00000080L
+#define SQ_CMD__DATA_MASK 0x00000F00L
+#define SQ_CMD__WAVE_ID_MASK 0x001F0000L
+#define SQ_CMD__QUEUE_ID_MASK 0x07000000L
+#define SQ_CMD__VM_ID_MASK 0xF0000000L
+//SQC_MISC_CONFIG
+#define SQC_MISC_CONFIG__UNUSED__SHIFT 0x0
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE__SHIFT 0x5
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE__SHIFT 0x6
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE__SHIFT 0x7
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE__SHIFT 0x8
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE__SHIFT 0x9
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE__SHIFT 0xa
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE__SHIFT 0xb
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE__SHIFT 0xc
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE__SHIFT 0xd
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE__SHIFT 0xe
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE__SHIFT 0xf
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE__SHIFT 0x10
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE__SHIFT 0x11
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE__SHIFT 0x12
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE__SHIFT 0x13
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE__SHIFT 0x14
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE__SHIFT 0x15
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE__SHIFT 0x16
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE__SHIFT 0x17
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE__SHIFT 0x18
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE__SHIFT 0x19
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1a
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE__SHIFT 0x1b
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE__SHIFT 0x1c
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE__SHIFT 0x1d
+#define SQC_MISC_CONFIG__UNUSED_MASK 0x0000001FL
+#define SQC_MISC_CONFIG__SQC_SPI_TTRACE_FGCG_OVERRIDE_MASK 0x00000020L
+#define SQC_MISC_CONFIG__SQ_SPI_MSG_FGCG_OVERRIDE_MASK 0x00000040L
+#define SQC_MISC_CONFIG__SPI_SQ_EXPALLOC_FGCG_OVERRIDE_MASK 0x00000080L
+#define SQC_MISC_CONFIG__SQC_SQ_DATA_RET_FGCG_OVERRIDE_MASK 0x00000100L
+#define SQC_MISC_CONFIG__SQC_SQ_INST_RET_FGCG_OVERRIDE_MASK 0x00000200L
+#define SQC_MISC_CONFIG__SQC_GCR_RSP_FGCG_OVERRIDE_MASK 0x00000400L
+#define SQC_MISC_CONFIG__ICLK_MGCG_DISABLE_MASK 0x00000800L
+#define SQC_MISC_CONFIG__ICLK_BANK_MGCG_DISABLE_MASK 0x00001000L
+#define SQC_MISC_CONFIG__DCLK_MGCG_DISABLE_MASK 0x00002000L
+#define SQC_MISC_CONFIG__GCLK_MGCG_DISABLE_MASK 0x00004000L
+#define SQC_MISC_CONFIG__MCLK_MGCG_DISABLE_MASK 0x00008000L
+#define SQC_MISC_CONFIG__PCLK_MGCG_DISABLE_MASK 0x00010000L
+#define SQC_MISC_CONFIG__BCLK_MGCG_DISABLE_MASK 0x00020000L
+#define SQC_MISC_CONFIG__SQC_TA_RESET_FGCG_OVERRIDE_MASK 0x00040000L
+#define SQC_MISC_CONFIG__SQC_LDS_CONFIG_FGCG_OVERRIDE_MASK 0x00080000L
+#define SQC_MISC_CONFIG__DCLK_BANK_MGCG_DISABLE_MASK 0x00100000L
+#define SQC_MISC_CONFIG__SQC_SQ_BARRIER_DONE_FGCG_OVERRIDE_MASK 0x00200000L
+#define SQC_MISC_CONFIG__SQC_SQ_MSGDONE_FGCG_OVERRIDE_MASK 0x00400000L
+#define SQC_MISC_CONFIG__CMCLK_MGCG_DISABLE_MASK 0x00800000L
+#define SQC_MISC_CONFIG__SQC_GL1_CLKEN_OVERRIDE_MASK 0x01000000L
+#define SQC_MISC_CONFIG__SQC_CORE_OVERRIDE_MASK 0x02000000L
+#define SQC_MISC_CONFIG__ICLK_HMF_BS_MGCG_DISABLE_MASK 0x04000000L
+#define SQC_MISC_CONFIG__ICLK_CC_MGCG_DISABLE_MASK 0x08000000L
+#define SQC_MISC_CONFIG__DCLK_HMF_BS_MGCG_DISABLE_MASK 0x10000000L
+#define SQC_MISC_CONFIG__DCLK_CC_MGCG_DISABLE_MASK 0x20000000L
+
+
+// addressBlock: gc_shsdec
+//SX_DEBUG_BUSY
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3__SHIFT 0x0
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2__SHIFT 0x1
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3__SHIFT 0x3
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1__SHIFT 0x5
+#define SX_DEBUG_BUSY__PCCMD_VALID__SHIFT 0x6
+#define SX_DEBUG_BUSY__VDATA1_VALID__SHIFT 0x7
+#define SX_DEBUG_BUSY__VDATA0_VALID__SHIFT 0x8
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL__SHIFT 0x9
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL__SHIFT 0xa
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID__SHIFT 0xb
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID__SHIFT 0xc
+#define SX_DEBUG_BUSY__RESERVED__SHIFT 0xd
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ3_MASK 0x00000001L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ2_MASK 0x00000002L
+#define SX_DEBUG_BUSY__COL_WRCTRL1_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ3_MASK 0x00000008L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY__COL_WRCTRL0_VALIDQ1_MASK 0x00000020L
+#define SX_DEBUG_BUSY__PCCMD_VALID_MASK 0x00000040L
+#define SX_DEBUG_BUSY__VDATA1_VALID_MASK 0x00000080L
+#define SX_DEBUG_BUSY__VDATA0_VALID_MASK 0x00000100L
+#define SX_DEBUG_BUSY__CMD_BUSYORVAL_MASK 0x00000200L
+#define SX_DEBUG_BUSY__ADDR_BUSYORVAL_MASK 0x00000400L
+#define SX_DEBUG_BUSY__SX_SX_IN_VALID_MASK 0x00000800L
+#define SX_DEBUG_BUSY__SX_SX_OUT_VALID_MASK 0x00001000L
+#define SX_DEBUG_BUSY__RESERVED_MASK 0xFFFFE000L
+//SX_DEBUG_BUSY_2
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0__SHIFT 0x1
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE__SHIFT 0x2
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0__SHIFT 0x4
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE__SHIFT 0x5
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0__SHIFT 0x7
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE__SHIFT 0x8
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0__SHIFT 0xa
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE__SHIFT 0xb
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE__SHIFT 0xf
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE__SHIFT 0x12
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE__SHIFT 0x15
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE__SHIFT 0x18
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_2__COL_SCBD0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_2__COL_REQ3_FREECNT_NE0_MASK 0x00000002L
+#define SX_DEBUG_BUSY_2__COL_REQ3_IDLE_MASK 0x00000004L
+#define SX_DEBUG_BUSY_2__COL_REQ3_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_2__COL_REQ2_FREECNT_NE0_MASK 0x00000010L
+#define SX_DEBUG_BUSY_2__COL_REQ2_IDLE_MASK 0x00000020L
+#define SX_DEBUG_BUSY_2__COL_REQ2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_2__COL_REQ1_FREECNT_NE0_MASK 0x00000080L
+#define SX_DEBUG_BUSY_2__COL_REQ1_IDLE_MASK 0x00000100L
+#define SX_DEBUG_BUSY_2__COL_REQ1_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_2__COL_REQ0_FREECNT_NE0_MASK 0x00000400L
+#define SX_DEBUG_BUSY_2__COL_REQ0_IDLE_MASK 0x00000800L
+#define SX_DEBUG_BUSY_2__COL_REQ0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_SENDFREE_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_FIFO_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_2__COL_DBIF3_QUAD_FREE_MASK 0x00008000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_SENDFREE_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_FIFO_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_2__COL_DBIF2_QUAD_FREE_MASK 0x00040000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_SENDFREE_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_FIFO_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_2__COL_DBIF1_QUAD_FREE_MASK 0x00200000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_SENDFREE_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_FIFO_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_2__COL_DBIF0_QUAD_FREE_MASK 0x01000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_2__COL_BUFF3_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_3__COL_BUFF3_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_3__COL_BUFF2_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK3_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_3__COL_BUFF1_BANK2_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK2_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK1_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_4__COL_BUFF1_BANK0_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK3_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK2_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK1_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_4__COL_BUFF0_BANK0_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_4__COL_BUFF3_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_1
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT__SHIFT 0x0
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE__SHIFT 0x7
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x8
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x9
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0xa
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT__SHIFT 0xb
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT__SHIFT 0xc
+#define SX_DEBUG_1__DISABLE_REP_FGCG__SHIFT 0xd
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS__SHIFT 0xe
+#define SX_DEBUG_1__DISABLE_RAM_FGCG__SHIFT 0xf
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT__SHIFT 0x10
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT__SHIFT 0x11
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS__SHIFT 0x12
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING__SHIFT 0x13
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT__SHIFT 0x14
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT__SHIFT 0x15
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT__SHIFT 0x16
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT__SHIFT 0x17
+#define SX_DEBUG_1__SX_DB_QUAD_CREDIT_MASK 0x0000007FL
+#define SX_DEBUG_1__ENABLE_FIFO_DEBUG_WRITE_MASK 0x00000080L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x00000100L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_BYPASS_MASK 0x00000200L
+#define SX_DEBUG_1__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x00000400L
+#define SX_DEBUG_1__DISABLE_QUAD_PAIR_OPT_MASK 0x00000800L
+#define SX_DEBUG_1__DISABLE_PIX_EN_ZERO_OPT_MASK 0x00001000L
+#define SX_DEBUG_1__DISABLE_REP_FGCG_MASK 0x00002000L
+#define SX_DEBUG_1__ENABLE_SAME_PC_GDS_CGTS_MASK 0x00004000L
+#define SX_DEBUG_1__DISABLE_RAM_FGCG_MASK 0x00008000L
+#define SX_DEBUG_1__PC_DISABLE_SAME_ADDR_OPT_MASK 0x00010000L
+#define SX_DEBUG_1__DISABLE_COL_VAL_READ_OPT_MASK 0x00020000L
+#define SX_DEBUG_1__DISABLE_BC_RB_PLUS_MASK 0x00040000L
+#define SX_DEBUG_1__DISABLE_NATIVE_DOWNCVT_FMT_MAPPING_MASK 0x00080000L
+#define SX_DEBUG_1__DISABLE_SCBD_READ_PWR_OPT_MASK 0x00100000L
+#define SX_DEBUG_1__DISABLE_GDS_CGTS_OPT_MASK 0x00200000L
+#define SX_DEBUG_1__DISABLE_DOWNCVT_PWR_OPT_MASK 0x00400000L
+#define SX_DEBUG_1__DISABLE_POS_BUFF_REUSE_OPT_MASK 0x00800000L
+//SX_DEBUG_BUSY_5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_5__COL_BUFF3_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_5__COL_BUFF2_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL3_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL2_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL1_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK7_VAL0_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL3_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL2_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_5__COL_BUFF1_BANK6_VAL1_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK6_VAL0_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL3_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL2_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL1_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK5_VAL0_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL3_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL2_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL1_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_6__COL_BUFF1_BANK4_VAL0_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL3_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL2_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL1_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK7_VAL0_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL3_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL2_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL1_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK6_VAL0_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL3_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL2_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL1_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK5_VAL0_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL3_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL2_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL1_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_6__COL_BUFF0_BANK4_VAL0_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_CREDIT_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_6__COL_REQ3_FLOP_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_CREDIT_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_6__COL_REQ2_FLOP_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_CREDIT_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_6__COL_REQ1_FLOP_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_6__COL_REQ0_CREDIT_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_7
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1__SHIFT 0x2
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ__SHIFT 0x3
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2__SHIFT 0x4
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3__SHIFT 0x5
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4__SHIFT 0x6
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5__SHIFT 0x7
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT__SHIFT 0x8
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1__SHIFT 0x9
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ__SHIFT 0xa
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2__SHIFT 0xb
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3__SHIFT 0xc
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4__SHIFT 0xd
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5__SHIFT 0xe
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT__SHIFT 0xf
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1__SHIFT 0x10
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ__SHIFT 0x11
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2__SHIFT 0x12
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3__SHIFT 0x13
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4__SHIFT 0x14
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5__SHIFT 0x15
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT__SHIFT 0x16
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1__SHIFT 0x17
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ__SHIFT 0x18
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2__SHIFT 0x19
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3__SHIFT 0x1a
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4__SHIFT 0x1b
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5__SHIFT 0x1c
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT__SHIFT 0x1d
+#define SX_DEBUG_BUSY_7__RESERVED__SHIFT 0x1e
+#define SX_DEBUG_BUSY_7__COL_REQ0_FLOP_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_7__COL_SCBD1_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_MASK 0x00000004L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ1_ADJ_MASK 0x00000008L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ2_MASK 0x00000010L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ3_MASK 0x00000020L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ4_MASK 0x00000040L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALIDQ5_MASK 0x00000080L
+#define SX_DEBUG_BUSY_7__COL_BLEND3_DATA_VALID_OUT_MASK 0x00000100L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_MASK 0x00000200L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ1_ADJ_MASK 0x00000400L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ2_MASK 0x00000800L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ3_MASK 0x00001000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ4_MASK 0x00002000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALIDQ5_MASK 0x00004000L
+#define SX_DEBUG_BUSY_7__COL_BLEND2_DATA_VALID_OUT_MASK 0x00008000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_MASK 0x00010000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ1_ADJ_MASK 0x00020000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ2_MASK 0x00040000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ3_MASK 0x00080000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ4_MASK 0x00100000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALIDQ5_MASK 0x00200000L
+#define SX_DEBUG_BUSY_7__COL_BLEND1_DATA_VALID_OUT_MASK 0x00400000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_MASK 0x00800000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ1_ADJ_MASK 0x01000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ2_MASK 0x02000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ3_MASK 0x04000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ4_MASK 0x08000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALIDQ5_MASK 0x10000000L
+#define SX_DEBUG_BUSY_7__COL_BLEND0_DATA_VALID_OUT_MASK 0x20000000L
+#define SX_DEBUG_BUSY_7__RESERVED_MASK 0xC0000000L
+//SX_DEBUG_BUSY_8
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_8__POS_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_8__POS_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_8__POS_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_8__POS_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_8__POS_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_8__POS_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_8__POS_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_8__POS_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_9
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY__SHIFT 0x1
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY__SHIFT 0x4
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY__SHIFT 0x5
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY__SHIFT 0x6
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY__SHIFT 0x8
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY__SHIFT 0xb
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY__SHIFT 0xc
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY__SHIFT 0xd
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY__SHIFT 0xe
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY__SHIFT 0xf
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY__SHIFT 0x10
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY__SHIFT 0x11
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY__SHIFT 0x12
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY__SHIFT 0x13
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY__SHIFT 0x14
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY__SHIFT 0x15
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY__SHIFT 0x16
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY__SHIFT 0x17
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY__SHIFT 0x18
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY__SHIFT 0x19
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY__SHIFT 0x1a
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY__SHIFT 0x1b
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY__SHIFT 0x1c
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY__SHIFT 0x1d
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY__SHIFT 0x1e
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY__SHIFT 0x1f
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL3_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL2_BUSY_MASK 0x00000002L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL1_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_9__IDX_BANK7VAL0_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL3_BUSY_MASK 0x00000010L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL2_BUSY_MASK 0x00000020L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL1_BUSY_MASK 0x00000040L
+#define SX_DEBUG_BUSY_9__IDX_BANK6VAL0_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL3_BUSY_MASK 0x00000100L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL2_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL1_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_9__IDX_BANK5VAL0_BUSY_MASK 0x00000800L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL3_BUSY_MASK 0x00001000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL2_BUSY_MASK 0x00002000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL1_BUSY_MASK 0x00004000L
+#define SX_DEBUG_BUSY_9__IDX_BANK4VAL0_BUSY_MASK 0x00008000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL3_BUSY_MASK 0x00010000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL2_BUSY_MASK 0x00020000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL1_BUSY_MASK 0x00040000L
+#define SX_DEBUG_BUSY_9__IDX_BANK3VAL0_BUSY_MASK 0x00080000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL3_BUSY_MASK 0x00100000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL2_BUSY_MASK 0x00200000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL1_BUSY_MASK 0x00400000L
+#define SX_DEBUG_BUSY_9__IDX_BANK2VAL0_BUSY_MASK 0x00800000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL3_BUSY_MASK 0x01000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL2_BUSY_MASK 0x02000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL1_BUSY_MASK 0x04000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK1VAL0_BUSY_MASK 0x08000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL3_BUSY_MASK 0x10000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL2_BUSY_MASK 0x20000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL1_BUSY_MASK 0x40000000L
+#define SX_DEBUG_BUSY_9__IDX_BANK0VAL0_BUSY_MASK 0x80000000L
+//SX_DEBUG_BUSY_10
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY__SHIFT 0x0
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS__SHIFT 0x1
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY__SHIFT 0x2
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY__SHIFT 0x3
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3__SHIFT 0x4
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2__SHIFT 0x5
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1__SHIFT 0x6
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY__SHIFT 0x7
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS__SHIFT 0x8
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY__SHIFT 0x9
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY__SHIFT 0xa
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3__SHIFT 0xb
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2__SHIFT 0xc
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1__SHIFT 0xd
+#define SX_DEBUG_BUSY_10__RESERVED__SHIFT 0xe
+#define SX_DEBUG_BUSY_10__POS_SCBD_BUSY_MASK 0x00000001L
+#define SX_DEBUG_BUSY_10__POS_FREE_OR_VALIDS_MASK 0x00000002L
+#define SX_DEBUG_BUSY_10__POS_REQUESTER_BUSY_MASK 0x00000004L
+#define SX_DEBUG_BUSY_10__PA_SX_BUSY_MASK 0x00000008L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ3_MASK 0x00000010L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ2_MASK 0x00000020L
+#define SX_DEBUG_BUSY_10__POS_WRCTRL1_VALIDQ1_MASK 0x00000040L
+#define SX_DEBUG_BUSY_10__IDX_SCBD_BUSY_MASK 0x00000080L
+#define SX_DEBUG_BUSY_10__IDX_FREE_OR_VALIDS_MASK 0x00000100L
+#define SX_DEBUG_BUSY_10__IDX_REQUESTER_BUSY_MASK 0x00000200L
+#define SX_DEBUG_BUSY_10__PA_SX_IDX_BUSY_MASK 0x00000400L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ3_MASK 0x00000800L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ2_MASK 0x00001000L
+#define SX_DEBUG_BUSY_10__IDX_WRCTRL1_VALIDQ1_MASK 0x00002000L
+#define SX_DEBUG_BUSY_10__RESERVED_MASK 0xFFFFC000L
+//SPI_PS_MAX_WAVE_ID
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID__SHIFT 0x10
+#define SPI_PS_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define SPI_PS_MAX_WAVE_ID__MAX_COLLISION_WAVE_ID_MASK 0x03FF0000L
+//SPI_GFX_CNTL
+#define SPI_GFX_CNTL__RESET_COUNTS__SHIFT 0x0
+#define SPI_GFX_CNTL__RESET_COUNTS_MASK 0x00000001L
+//SPI_DEBUG_READ
+#define SPI_DEBUG_READ__DATA__SHIFT 0x0
+#define SPI_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//SPI_DSM_CNTL
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define SPI_DSM_CNTL__SPI_SR_MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define SPI_DSM_CNTL__SPI_SR_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+//SPI_DSM_CNTL2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY__SHIFT 0x3
+#define SPI_DSM_CNTL2__SPI_SR_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define SPI_DSM_CNTL2__SPI_SR_MEM_INJECT_DELAY_MASK 0x000001F8L
+//SPI_EDC_CNT
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT__SHIFT 0x0
+#define SPI_EDC_CNT__SPI_SR_MEM_SED_COUNT_MASK 0x00000003L
+//SPI_DEBUG_BUSY
+#define SPI_DEBUG_BUSY__HS_BUSY__SHIFT 0x0
+#define SPI_DEBUG_BUSY__GS_BUSY__SHIFT 0x1
+#define SPI_DEBUG_BUSY__PS0_BUSY__SHIFT 0x2
+#define SPI_DEBUG_BUSY__PS1_BUSY__SHIFT 0x3
+#define SPI_DEBUG_BUSY__PS2_BUSY__SHIFT 0x4
+#define SPI_DEBUG_BUSY__PS3_BUSY__SHIFT 0x5
+#define SPI_DEBUG_BUSY__CSG0_BUSY__SHIFT 0x6
+#define SPI_DEBUG_BUSY__CSG1_BUSY__SHIFT 0x7
+#define SPI_DEBUG_BUSY__CS0_BUSY__SHIFT 0x8
+#define SPI_DEBUG_BUSY__CS1_BUSY__SHIFT 0x9
+#define SPI_DEBUG_BUSY__CS2_BUSY__SHIFT 0xa
+#define SPI_DEBUG_BUSY__CS3_BUSY__SHIFT 0xb
+#define SPI_DEBUG_BUSY__CS4_BUSY__SHIFT 0xc
+#define SPI_DEBUG_BUSY__CS5_BUSY__SHIFT 0xd
+#define SPI_DEBUG_BUSY__CS6_BUSY__SHIFT 0xe
+#define SPI_DEBUG_BUSY__CS7_BUSY__SHIFT 0xf
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY__SHIFT 0x10
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY__SHIFT 0x11
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY__SHIFT 0x12
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY__SHIFT 0x13
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY__SHIFT 0x14
+#define SPI_DEBUG_BUSY__GRBM_BUSY__SHIFT 0x15
+#define SPI_DEBUG_BUSY__SPIS_BUSY__SHIFT 0x16
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY__SHIFT 0x17
+#define SPI_DEBUG_BUSY__PWS_BUSY__SHIFT 0x18
+#define SPI_DEBUG_BUSY__HS_BUSY_MASK 0x00000001L
+#define SPI_DEBUG_BUSY__GS_BUSY_MASK 0x00000002L
+#define SPI_DEBUG_BUSY__PS0_BUSY_MASK 0x00000004L
+#define SPI_DEBUG_BUSY__PS1_BUSY_MASK 0x00000008L
+#define SPI_DEBUG_BUSY__PS2_BUSY_MASK 0x00000010L
+#define SPI_DEBUG_BUSY__PS3_BUSY_MASK 0x00000020L
+#define SPI_DEBUG_BUSY__CSG0_BUSY_MASK 0x00000040L
+#define SPI_DEBUG_BUSY__CSG1_BUSY_MASK 0x00000080L
+#define SPI_DEBUG_BUSY__CS0_BUSY_MASK 0x00000100L
+#define SPI_DEBUG_BUSY__CS1_BUSY_MASK 0x00000200L
+#define SPI_DEBUG_BUSY__CS2_BUSY_MASK 0x00000400L
+#define SPI_DEBUG_BUSY__CS3_BUSY_MASK 0x00000800L
+#define SPI_DEBUG_BUSY__CS4_BUSY_MASK 0x00001000L
+#define SPI_DEBUG_BUSY__CS5_BUSY_MASK 0x00002000L
+#define SPI_DEBUG_BUSY__CS6_BUSY_MASK 0x00004000L
+#define SPI_DEBUG_BUSY__CS7_BUSY_MASK 0x00008000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL0_BUSY_MASK 0x00010000L
+#define SPI_DEBUG_BUSY__LDS_WR_CTL1_BUSY_MASK 0x00020000L
+#define SPI_DEBUG_BUSY__PC_DEALLOC_BUSY_MASK 0x00040000L
+#define SPI_DEBUG_BUSY__OFC_LDS_BUSY_MASK 0x00080000L
+#define SPI_DEBUG_BUSY__EVENT_CLCTR_BUSY_MASK 0x00100000L
+#define SPI_DEBUG_BUSY__GRBM_BUSY_MASK 0x00200000L
+#define SPI_DEBUG_BUSY__SPIS_BUSY_MASK 0x00400000L
+#define SPI_DEBUG_BUSY__RSRC_ALLOC_BUSY_MASK 0x00800000L
+#define SPI_DEBUG_BUSY__PWS_BUSY_MASK 0x01000000L
+//SPI_CONFIG_PS_CU_EN
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET__SHIFT 0x0
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET__SHIFT 0x4
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET__SHIFT 0x8
+#define SPI_CONFIG_PS_CU_EN__PKR_OFFSET_MASK 0x0000000FL
+#define SPI_CONFIG_PS_CU_EN__PKR2_OFFSET_MASK 0x000000F0L
+#define SPI_CONFIG_PS_CU_EN__PKR3_OFFSET_MASK 0x00000F00L
+//SPI_WF_LIFETIME_CNTL
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD__SHIFT 0x0
+#define SPI_WF_LIFETIME_CNTL__EN__SHIFT 0x4
+#define SPI_WF_LIFETIME_CNTL__SAMPLE_PERIOD_MASK 0x0000000FL
+#define SPI_WF_LIFETIME_CNTL__EN_MASK 0x00000010L
+//SPI_WF_LIFETIME_LIMIT_0
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_0__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_1
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_1__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_1__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_2
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_2__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_3
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_3__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_3__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_4
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_4__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_LIMIT_5
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_LIMIT_5__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_LIMIT_5__EN_WARN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_0
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_0__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_0__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_2
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_2__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_2__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_4
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_4__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_4__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_6
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_6__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_6__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_7
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_7__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_7__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_9
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_9__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_9__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_11
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_11__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_11__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_13
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_13__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_13__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_14
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_14__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_14__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_15
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_15__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_15__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_16
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_16__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_16__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_17
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_17__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_17__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_18
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_18__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_18__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_19
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_19__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_19__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_20
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_20__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_20__INT_SENT_MASK 0x80000000L
+//SPI_WF_LIFETIME_DEBUG
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE__SHIFT 0x0
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN__SHIFT 0x1f
+#define SPI_WF_LIFETIME_DEBUG__START_VALUE_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_DEBUG__OVERRIDE_EN_MASK 0x80000000L
+//SPI_WF_LIFETIME_STATUS_21
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT__SHIFT 0x0
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT__SHIFT 0x1f
+#define SPI_WF_LIFETIME_STATUS_21__MAX_CNT_MASK 0x7FFFFFFFL
+#define SPI_WF_LIFETIME_STATUS_21__INT_SENT_MASK 0x80000000L
+//SPI_LB_CTR_CTRL
+#define SPI_LB_CTR_CTRL__LOAD__SHIFT 0x0
+#define SPI_LB_CTR_CTRL__WAVES_SELECT__SHIFT 0x1
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ__SHIFT 0x3
+#define SPI_LB_CTR_CTRL__RESET_COUNTS__SHIFT 0x4
+#define SPI_LB_CTR_CTRL__LOAD_MASK 0x00000001L
+#define SPI_LB_CTR_CTRL__WAVES_SELECT_MASK 0x00000006L
+#define SPI_LB_CTR_CTRL__CLEAR_ON_READ_MASK 0x00000008L
+#define SPI_LB_CTR_CTRL__RESET_COUNTS_MASK 0x00000010L
+//SPI_LB_WGP_MASK
+#define SPI_LB_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_LB_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_LB_DATA_REG
+#define SPI_LB_DATA_REG__CNT_DATA__SHIFT 0x0
+#define SPI_LB_DATA_REG__CNT_DATA_MASK 0xFFFFFFFFL
+//SPI_PG_ENABLE_STATIC_WGP_MASK
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK__SHIFT 0x0
+#define SPI_PG_ENABLE_STATIC_WGP_MASK__WGP_MASK_MASK 0xFFFFL
+//SPI_GDS_CREDITS
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS__SHIFT 0x0
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS__SHIFT 0x8
+#define SPI_GDS_CREDITS__DS_DATA_CREDITS_MASK 0x000000FFL
+#define SPI_GDS_CREDITS__DS_CMD_CREDITS_MASK 0x0000FF00L
+//SPI_SX_EXPORT_BUFFER_SIZES
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE__SHIFT 0x0
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE__SHIFT 0x10
+#define SPI_SX_EXPORT_BUFFER_SIZES__COLOR_BUFFER_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_EXPORT_BUFFER_SIZES__POSITION_BUFFER_SIZE_MASK 0xFFFF0000L
+//SPI_SX_SCOREBOARD_BUFFER_SIZES
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE__SHIFT 0x0
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE__SHIFT 0x10
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__COLOR_SCOREBOARD_SIZE_MASK 0x0000FFFFL
+#define SPI_SX_SCOREBOARD_BUFFER_SIZES__POSITION_SCOREBOARD_SIZE_MASK 0xFFFF0000L
+//SPI_CSQ_WF_ACTIVE_STATUS
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_STATUS__ACTIVE_MASK 0xFFFFFFFFL
+//SPI_CSQ_WF_ACTIVE_COUNT_0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_0__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_1
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_1__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_2
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_2__EVENTS_MASK 0x07FF0000L
+//SPI_CSQ_WF_ACTIVE_COUNT_3
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT__SHIFT 0x0
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS__SHIFT 0x10
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__COUNT_MASK 0x000007FFL
+#define SPI_CSQ_WF_ACTIVE_COUNT_3__EVENTS_MASK 0x07FF0000L
+//SPI_LB_DATA_WAVES
+#define SPI_LB_DATA_WAVES__COUNT0__SHIFT 0x0
+#define SPI_LB_DATA_WAVES__COUNT1__SHIFT 0x10
+#define SPI_LB_DATA_WAVES__COUNT0_MASK 0x0000FFFFL
+#define SPI_LB_DATA_WAVES__COUNT1_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_HSGS
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS__SHIFT 0x10
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_HS_MASK 0x0000FFFFL
+#define SPI_LB_DATA_PERWGP_WAVE_HSGS__WGP_USED_GS_MASK 0xFFFF0000L
+//SPI_LB_DATA_PERWGP_WAVE_CS
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE__SHIFT 0x0
+#define SPI_LB_DATA_PERWGP_WAVE_CS__ACTIVE_MASK 0xFFFFL
+//SPIS_DEBUG_READ
+#define SPIS_DEBUG_READ__DATA__SHIFT 0x0
+#define SPIS_DEBUG_READ__DATA_MASK 0xFFFFFFFFL
+//BCI_DEBUG_READ
+#define BCI_DEBUG_READ__DATA__SHIFT 0x0
+#define BCI_DEBUG_READ__DATA_MASK 0xFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_LO
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSBA_HI
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_PSMA_LO
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P0_TRAP_SCREEN_PSMA_HI
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P0_TRAP_SCREEN_GPR_MIN
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P0_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+//SPI_P1_TRAP_SCREEN_PSBA_LO
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSBA_HI
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSBA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_PSMA_LO
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_LO__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_P1_TRAP_SCREEN_PSMA_HI
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_PSMA_HI__MEM_BASE_MASK 0xFFL
+//SPI_P1_TRAP_SCREEN_GPR_MIN
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN__SHIFT 0x0
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN__SHIFT 0x6
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__VGPR_MIN_MASK 0x003FL
+#define SPI_P1_TRAP_SCREEN_GPR_MIN__SGPR_MIN_MASK 0x03C0L
+
+
+// addressBlock: gc_tpdec
+//TD_CNTL
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS__SHIFT 0x0
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER__SHIFT 0x2
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES__SHIFT 0x7
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR__SHIFT 0xd
+#define TD_CNTL__GATHER4_FLOAT_MODE__SHIFT 0x10
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG__SHIFT 0x11
+#define TD_CNTL__GATHER4_DX9_MODE__SHIFT 0x13
+#define TD_CNTL__DISABLE_POWER_THROTTLE__SHIFT 0x14
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO__SHIFT 0x15
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT__SHIFT 0x16
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT__SHIFT 0x17
+#define TD_CNTL__ARBITER_ROUND_ROBIN__SHIFT 0x18
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY__SHIFT 0x19
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH__SHIFT 0x1a
+#define TD_CNTL__DISABLE_MEDIAN_CALC_FOR_CUBECORNER_PHANTOM_TEXELS_MASK 0x00000001L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_TO_BE_MAX_FILTER_MASK 0x00000004L
+#define TD_CNTL__FORCE_RESIDENCY_MAP_CC_MAX_OF_ALL_SAMPLES_MASK 0x00000080L
+#define TD_CNTL__PRESERVE_VGPR_ON_UTC_ERROR_MASK 0x00002000L
+#define TD_CNTL__GATHER4_FLOAT_MODE_MASK 0x00010000L
+#define TD_CNTL__FORCE_RT_BVH4_ARBITER_TO_PING_PONG_MASK 0x00020000L
+#define TD_CNTL__GATHER4_DX9_MODE_MASK 0x00080000L
+#define TD_CNTL__DISABLE_POWER_THROTTLE_MASK 0x00100000L
+#define TD_CNTL__ENABLE_ROUND_TO_ZERO_MASK 0x00200000L
+#define TD_CNTL__DISABLE_ROUND_TO_ZERO_FOR_LARGE_FLOAT_TO_SMALL_FLOAT_MASK 0x00400000L
+#define TD_CNTL__DISABLE_2BIT_SIGNED_FORMAT_MASK 0x00800000L
+#define TD_CNTL__ARBITER_ROUND_ROBIN_MASK 0x01000000L
+#define TD_CNTL__ARBITER_OLDEST_PRIORITY_MASK 0x02000000L
+#define TD_CNTL__DONE_SCOREBOARD_DEPTH_MASK 0xFC000000L
+//TD_STATUS
+#define TD_STATUS__BUSY__SHIFT 0x1f
+#define TD_STATUS__BUSY_MASK 0x80000000L
+//TD_POWER_CNTL
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT__SHIFT 0x6
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON__SHIFT 0x7
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG__SHIFT 0x8
+#define TD_POWER_CNTL__DISABLE_NOFILTER_FORMATTER_POWER_OPT_MASK 0x00000040L
+#define TD_POWER_CNTL__FORCE_NOFILTER_D16_FORMATTERS_ON_MASK 0x00000080L
+#define TD_POWER_CNTL__ENABLE_DEBUG_REG_MASK 0x00000100L
+//TD_CNTL2
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT__SHIFT 0x0
+#define TD_CNTL2__MULTI_CYCLE_16FP__SHIFT 0x3
+#define TD_CNTL2__LDS_RETURN_FIFO_CREDIT_MASK 0x00000007L
+#define TD_CNTL2__MULTI_CYCLE_16FP_MASK 0x00000008L
+//TD_DSM_CNTL
+//TD_DSM_CNTL2
+//TD_SCRATCH
+#define TD_SCRATCH__SCRATCH__SHIFT 0x0
+#define TD_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+//TA_CNTL
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE__SHIFT 0x0
+#define TA_CNTL__ALIGNER_CREDIT__SHIFT 0x10
+#define TA_CNTL__TD_FIFO_CREDIT__SHIFT 0x16
+#define TA_CNTL__TA_SQ_XNACK_FGCG_DISABLE_MASK 0x00000001L
+#define TA_CNTL__ALIGNER_CREDIT_MASK 0x001F0000L
+#define TA_CNTL__TD_FIFO_CREDIT_MASK 0xFFC00000L
+//TA_CNTL_AUX
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N__SHIFT 0x0
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS__SHIFT 0x1
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM__SHIFT 0x2
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS__SHIFT 0x3
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS__SHIFT 0x4
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE__SHIFT 0x5
+#define TA_CNTL_AUX__GATHERH_DST_SEL__SHIFT 0x6
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE__SHIFT 0x7
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP__SHIFT 0x8
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT__SHIFT 0x9
+#define TA_CNTL_AUX__ANISO_HALF_THRESH__SHIFT 0xa
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS__SHIFT 0xc
+#define TA_CNTL_AUX__ANISO_STEP_ORDER__SHIFT 0xd
+#define TA_CNTL_AUX__ANISO_STEP__SHIFT 0xe
+#define TA_CNTL_AUX__MINMAG_UNNORM__SHIFT 0xf
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE__SHIFT 0x10
+#define TA_CNTL_AUX__ANISO_RATIO_LUT__SHIFT 0x11
+#define TA_CNTL_AUX__ANISO_TAP__SHIFT 0x12
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE__SHIFT 0x14
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE__SHIFT 0x15
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE__SHIFT 0x16
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE__SHIFT 0x17
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE__SHIFT 0x18
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE__SHIFT 0x19
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE__SHIFT 0x1a
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP__SHIFT 0x1c
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG__SHIFT 0x1d
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE__SHIFT 0x1e
+#define TA_CNTL_AUX__SCOAL_DSWIZZLE_N_MASK 0x00000001L
+#define TA_CNTL_AUX__DEPTH_AS_PITCH_DIS_MASK 0x00000002L
+#define TA_CNTL_AUX__CORNER_SAMPLES_MIN_DIM_MASK 0x00000004L
+#define TA_CNTL_AUX__OVERRIDE_QUAD_MODE_DIS_MASK 0x00000008L
+#define TA_CNTL_AUX__DERIV_ADJUST_DIS_MASK 0x00000010L
+#define TA_CNTL_AUX__TFAULT_EN_OVERRIDE_MASK 0x00000020L
+#define TA_CNTL_AUX__GATHERH_DST_SEL_MASK 0x00000040L
+#define TA_CNTL_AUX__DISABLE_GATHER4_BC_SWIZZLE_MASK 0x00000080L
+#define TA_CNTL_AUX__ANISO_MAG_STEP_CLAMP_MASK 0x00000100L
+#define TA_CNTL_AUX__AUTO_ALIGN_FORMAT_MASK 0x00000200L
+#define TA_CNTL_AUX__ANISO_HALF_THRESH_MASK 0x00000C00L
+#define TA_CNTL_AUX__ANISO_ERROR_FP_VBIAS_MASK 0x00001000L
+#define TA_CNTL_AUX__ANISO_STEP_ORDER_MASK 0x00002000L
+#define TA_CNTL_AUX__ANISO_STEP_MASK 0x00004000L
+#define TA_CNTL_AUX__MINMAG_UNNORM_MASK 0x00008000L
+#define TA_CNTL_AUX__ANISO_WEIGHT_MODE_MASK 0x00010000L
+#define TA_CNTL_AUX__ANISO_RATIO_LUT_MASK 0x00020000L
+#define TA_CNTL_AUX__ANISO_TAP_MASK 0x00040000L
+#define TA_CNTL_AUX__DETERMINISM_RESERVED_DISABLE_MASK 0x00100000L
+#define TA_CNTL_AUX__DETERMINISM_OPCODE_STRICT_DISABLE_MASK 0x00200000L
+#define TA_CNTL_AUX__DETERMINISM_MISC_DISABLE_MASK 0x00400000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLE_C_DFMT_DISABLE_MASK 0x00800000L
+#define TA_CNTL_AUX__DETERMINISM_SAMPLER_MSAA_DISABLE_MASK 0x01000000L
+#define TA_CNTL_AUX__DETERMINISM_WRITEOP_READFMT_DISABLE_MASK 0x02000000L
+#define TA_CNTL_AUX__DETERMINISM_DFMT_NFMT_DISABLE_MASK 0x04000000L
+#define TA_CNTL_AUX__CUBEMAP_SLICE_CLAMP_MASK 0x10000000L
+#define TA_CNTL_AUX__TRUNC_SMALL_NEG_MASK 0x20000000L
+#define TA_CNTL_AUX__ARRAY_ROUND_MODE_MASK 0xC0000000L
+//TA_CNTL2
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS__SHIFT 0x10
+#define TA_CNTL2__ELEMSIZE_HASH_DIS__SHIFT 0x11
+#define TA_CNTL2__TRUNCATE_COORD_MODE__SHIFT 0x12
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS__SHIFT 0x13
+#define TA_CNTL2__POINT_SAMPLE_ACCEL_DIS_MASK 0x00010000L
+#define TA_CNTL2__ELEMSIZE_HASH_DIS_MASK 0x00020000L
+#define TA_CNTL2__TRUNCATE_COORD_MODE_MASK 0x00040000L
+#define TA_CNTL2__ELIMINATE_UNLIT_QUAD_DIS_MASK 0x00080000L
+//TA_STATUS
+#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
+#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
+#define TA_STATUS__FG_SFIFO_EMPTYB__SHIFT 0xe
+#define TA_STATUS__FL_PFIFO_EMPTYB__SHIFT 0x10
+#define TA_STATUS__FL_LFIFO_EMPTYB__SHIFT 0x11
+#define TA_STATUS__FL_SFIFO_EMPTYB__SHIFT 0x12
+#define TA_STATUS__FA_PFIFO_EMPTYB__SHIFT 0x14
+#define TA_STATUS__FA_LFIFO_EMPTYB__SHIFT 0x15
+#define TA_STATUS__FA_SFIFO_EMPTYB__SHIFT 0x16
+#define TA_STATUS__IN_BUSY__SHIFT 0x18
+#define TA_STATUS__FG_BUSY__SHIFT 0x19
+#define TA_STATUS__LA_BUSY__SHIFT 0x1a
+#define TA_STATUS__FL_BUSY__SHIFT 0x1b
+#define TA_STATUS__TA_BUSY__SHIFT 0x1c
+#define TA_STATUS__FA_BUSY__SHIFT 0x1d
+#define TA_STATUS__AL_BUSY__SHIFT 0x1e
+#define TA_STATUS__BUSY__SHIFT 0x1f
+#define TA_STATUS__FG_PFIFO_EMPTYB_MASK 0x00001000L
+#define TA_STATUS__FG_LFIFO_EMPTYB_MASK 0x00002000L
+#define TA_STATUS__FG_SFIFO_EMPTYB_MASK 0x00004000L
+#define TA_STATUS__FL_PFIFO_EMPTYB_MASK 0x00010000L
+#define TA_STATUS__FL_LFIFO_EMPTYB_MASK 0x00020000L
+#define TA_STATUS__FL_SFIFO_EMPTYB_MASK 0x00040000L
+#define TA_STATUS__FA_PFIFO_EMPTYB_MASK 0x00100000L
+#define TA_STATUS__FA_LFIFO_EMPTYB_MASK 0x00200000L
+#define TA_STATUS__FA_SFIFO_EMPTYB_MASK 0x00400000L
+#define TA_STATUS__IN_BUSY_MASK 0x01000000L
+#define TA_STATUS__FG_BUSY_MASK 0x02000000L
+#define TA_STATUS__LA_BUSY_MASK 0x04000000L
+#define TA_STATUS__FL_BUSY_MASK 0x08000000L
+#define TA_STATUS__TA_BUSY_MASK 0x10000000L
+#define TA_STATUS__FA_BUSY_MASK 0x20000000L
+#define TA_STATUS__AL_BUSY_MASK 0x40000000L
+#define TA_STATUS__BUSY_MASK 0x80000000L
+//TA_SCRATCH
+#define TA_SCRATCH__SCRATCH__SHIFT 0x0
+#define TA_SCRATCH__SCRATCH_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gdsdec
+//GDS_CONFIG
+#define GDS_CONFIG__WRITE_DIS__SHIFT 0x0
+#define GDS_CONFIG__UNUSED__SHIFT 0x1
+#define GDS_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define GDS_CONFIG__UNUSED_MASK 0xFFFFFFFEL
+//GDS_CNTL_STATUS
+#define GDS_CNTL_STATUS__GDS_BUSY__SHIFT 0x0
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY__SHIFT 0x1
+#define GDS_CNTL_STATUS__ORD_APP_BUSY__SHIFT 0x2
+#define GDS_CNTL_STATUS__DS_WR_CLAMP__SHIFT 0x3
+#define GDS_CNTL_STATUS__DS_RD_CLAMP__SHIFT 0x4
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY__SHIFT 0x5
+#define GDS_CNTL_STATUS__DS_BUSY__SHIFT 0x6
+#define GDS_CNTL_STATUS__GWS_BUSY__SHIFT 0x7
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY__SHIFT 0x8
+#define GDS_CNTL_STATUS__CREDIT_BUSY0__SHIFT 0x9
+#define GDS_CNTL_STATUS__CREDIT_BUSY1__SHIFT 0xa
+#define GDS_CNTL_STATUS__CREDIT_BUSY2__SHIFT 0xb
+#define GDS_CNTL_STATUS__CREDIT_BUSY3__SHIFT 0xc
+#define GDS_CNTL_STATUS__CREDIT_BUSY4__SHIFT 0xd
+#define GDS_CNTL_STATUS__CREDIT_BUSY5__SHIFT 0xe
+#define GDS_CNTL_STATUS__CREDIT_BUSY6__SHIFT 0xf
+#define GDS_CNTL_STATUS__CREDIT_BUSY7__SHIFT 0x10
+#define GDS_CNTL_STATUS__UNUSED__SHIFT 0x11
+#define GDS_CNTL_STATUS__GDS_BUSY_MASK 0x00000001L
+#define GDS_CNTL_STATUS__GRBM_WBUF_BUSY_MASK 0x00000002L
+#define GDS_CNTL_STATUS__ORD_APP_BUSY_MASK 0x00000004L
+#define GDS_CNTL_STATUS__DS_WR_CLAMP_MASK 0x00000008L
+#define GDS_CNTL_STATUS__DS_RD_CLAMP_MASK 0x00000010L
+#define GDS_CNTL_STATUS__GRBM_RBUF_BUSY_MASK 0x00000020L
+#define GDS_CNTL_STATUS__DS_BUSY_MASK 0x00000040L
+#define GDS_CNTL_STATUS__GWS_BUSY_MASK 0x00000080L
+#define GDS_CNTL_STATUS__ORD_FIFO_BUSY_MASK 0x00000100L
+#define GDS_CNTL_STATUS__CREDIT_BUSY0_MASK 0x00000200L
+#define GDS_CNTL_STATUS__CREDIT_BUSY1_MASK 0x00000400L
+#define GDS_CNTL_STATUS__CREDIT_BUSY2_MASK 0x00000800L
+#define GDS_CNTL_STATUS__CREDIT_BUSY3_MASK 0x00001000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY4_MASK 0x00002000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY5_MASK 0x00004000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY6_MASK 0x00008000L
+#define GDS_CNTL_STATUS__CREDIT_BUSY7_MASK 0x00010000L
+#define GDS_CNTL_STATUS__UNUSED_MASK 0xFFFE0000L
+//GDS_ENHANCE
+#define GDS_ENHANCE__MISC__SHIFT 0x0
+#define GDS_ENHANCE__AUTO_INC_INDEX__SHIFT 0x10
+#define GDS_ENHANCE__CGPG_RESTORE__SHIFT 0x11
+#define GDS_ENHANCE__UNUSED__SHIFT 0x12
+#define GDS_ENHANCE__MISC_MASK 0x0000FFFFL
+#define GDS_ENHANCE__AUTO_INC_INDEX_MASK 0x00010000L
+#define GDS_ENHANCE__CGPG_RESTORE_MASK 0x00020000L
+#define GDS_ENHANCE__UNUSED_MASK 0xFFFC0000L
+//GDS_PROTECTION_FAULT
+#define GDS_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_PROTECTION_FAULT__GRBM__SHIFT 0x2
+#define GDS_PROTECTION_FAULT__SE_ID__SHIFT 0x3
+#define GDS_PROTECTION_FAULT__SA_ID__SHIFT 0x6
+#define GDS_PROTECTION_FAULT__WGP_ID__SHIFT 0x7
+#define GDS_PROTECTION_FAULT__SIMD_ID__SHIFT 0xb
+#define GDS_PROTECTION_FAULT__WAVE_ID__SHIFT 0xd
+#define GDS_PROTECTION_FAULT__ADDRESS__SHIFT 0x12
+#define GDS_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_PROTECTION_FAULT__GRBM_MASK 0x00000004L
+#define GDS_PROTECTION_FAULT__SE_ID_MASK 0x00000038L
+#define GDS_PROTECTION_FAULT__SA_ID_MASK 0x00000040L
+#define GDS_PROTECTION_FAULT__WGP_ID_MASK 0x00000780L
+#define GDS_PROTECTION_FAULT__SIMD_ID_MASK 0x00001800L
+#define GDS_PROTECTION_FAULT__WAVE_ID_MASK 0x0003E000L
+#define GDS_PROTECTION_FAULT__ADDRESS_MASK 0xFFFC0000L
+//GDS_VM_PROTECTION_FAULT
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS__SHIFT 0x0
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED__SHIFT 0x1
+#define GDS_VM_PROTECTION_FAULT__GWS__SHIFT 0x2
+#define GDS_VM_PROTECTION_FAULT__OA__SHIFT 0x3
+#define GDS_VM_PROTECTION_FAULT__GRBM__SHIFT 0x4
+#define GDS_VM_PROTECTION_FAULT__TMZ__SHIFT 0x5
+#define GDS_VM_PROTECTION_FAULT__UNUSED1__SHIFT 0x6
+#define GDS_VM_PROTECTION_FAULT__VMID__SHIFT 0x8
+#define GDS_VM_PROTECTION_FAULT__UNUSED2__SHIFT 0xc
+#define GDS_VM_PROTECTION_FAULT__ADDRESS__SHIFT 0x10
+#define GDS_VM_PROTECTION_FAULT__WRITE_DIS_MASK 0x00000001L
+#define GDS_VM_PROTECTION_FAULT__FAULT_DETECTED_MASK 0x00000002L
+#define GDS_VM_PROTECTION_FAULT__GWS_MASK 0x00000004L
+#define GDS_VM_PROTECTION_FAULT__OA_MASK 0x00000008L
+#define GDS_VM_PROTECTION_FAULT__GRBM_MASK 0x00000010L
+#define GDS_VM_PROTECTION_FAULT__TMZ_MASK 0x00000020L
+#define GDS_VM_PROTECTION_FAULT__UNUSED1_MASK 0x000000C0L
+#define GDS_VM_PROTECTION_FAULT__VMID_MASK 0x00000F00L
+#define GDS_VM_PROTECTION_FAULT__UNUSED2_MASK 0x0000F000L
+#define GDS_VM_PROTECTION_FAULT__ADDRESS_MASK 0xFFFF0000L
+//GDS_EDC_CNT
+#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED__SHIFT 0x2
+#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_CNT__UNUSED__SHIFT 0x6
+#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L
+#define GDS_EDC_CNT__GDS_INPUT_QUEUE_SED_MASK 0x0000000CL
+#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L
+//GDS_EDC_GRBM_CNT
+#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0
+#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2
+#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4
+#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L
+#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL
+#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L
+//GDS_EDC_OA_DED
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1
+#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED__SHIFT 0xc
+#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xd
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L
+#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L
+#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L
+#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L
+#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L
+#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L
+#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L
+#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L
+#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L
+#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L
+#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L
+#define GDS_EDC_OA_DED__ME0_PIPE1_CS_DED_MASK 0x00001000L
+#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFE000L
+//GDS_DSM_CNTL
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0__SHIFT 0x0
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1__SHIFT 0x1
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0__SHIFT 0x3
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1__SHIFT 0x4
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0__SHIFT 0x6
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1__SHIFT 0x7
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0__SHIFT 0x9
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1__SHIFT 0xa
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0__SHIFT 0xc
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1__SHIFT 0xd
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GDS_DSM_CNTL__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_0_MASK 0x00000001L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_MEM_IRRITATOR_DATA_1_MASK 0x00000002L
+#define GDS_DSM_CNTL__GDS_MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_0_MASK 0x00000008L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_INPUT_QUEUE_IRRITATOR_DATA_1_MASK 0x00000010L
+#define GDS_DSM_CNTL__GDS_INPUT_QUEUE_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_0_MASK 0x00000040L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_CMD_RAM_IRRITATOR_DATA_1_MASK 0x00000080L
+#define GDS_DSM_CNTL__GDS_PHY_CMD_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_0_MASK 0x00000200L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PHY_DATA_RAM_IRRITATOR_DATA_1_MASK 0x00000400L
+#define GDS_DSM_CNTL__GDS_PHY_DATA_RAM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_0_MASK 0x00001000L
+#define GDS_DSM_CNTL__SEL_DSM_GDS_PIPE_MEM_IRRITATOR_DATA_1_MASK 0x00002000L
+#define GDS_DSM_CNTL__GDS_PIPE_MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GDS_DSM_CNTL__UNUSED_MASK 0xFFFF8000L
+//GDS_EDC_OA_PHY_CNT
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED__SHIFT 0x8
+#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xa
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SED_MASK 0x00000300L
+#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFFC00L
+//GDS_EDC_OA_PIPE_CNT
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe
+#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L
+#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L
+#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_DSM_CNTL2
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GDS_DSM_CNTL2__UNUSED__SHIFT 0xf
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY__SHIFT 0x1a
+#define GDS_DSM_CNTL2__GDS_MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GDS_DSM_CNTL2__GDS_MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GDS_DSM_CNTL2__GDS_INPUT_QUEUE_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GDS_DSM_CNTL2__GDS_PHY_CMD_RAM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GDS_DSM_CNTL2__GDS_PHY_DATA_RAM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GDS_DSM_CNTL2__GDS_PIPE_MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GDS_DSM_CNTL2__UNUSED_MASK 0x03FF8000L
+#define GDS_DSM_CNTL2__GDS_INJECT_DELAY_MASK 0xFC000000L
+
+
+// addressBlock: gc_rbdec
+//DB_DEBUG
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE__SHIFT 0x0
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE__SHIFT 0x1
+#define DB_DEBUG__FETCH_FULL_Z_TILE__SHIFT 0x2
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE__SHIFT 0x3
+#define DB_DEBUG__FORCE_Z_MODE__SHIFT 0x4
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ__SHIFT 0x6
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ__SHIFT 0x7
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE__SHIFT 0x8
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0__SHIFT 0xa
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1__SHIFT 0xc
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE__SHIFT 0xe
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE__SHIFT 0xf
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE__SHIFT 0x10
+#define DB_DEBUG__DISABLE_SUMM_SQUADS__SHIFT 0x11
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS__SHIFT 0x12
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE__SHIFT 0x13
+#define DB_DEBUG__NEVER_FREE_Z_ONLY__SHIFT 0x15
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS__SHIFT 0x16
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION__SHIFT 0x17
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES__SHIFT 0x18
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT__SHIFT 0x1c
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT__SHIFT 0x1d
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC__SHIFT 0x1e
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC__SHIFT 0x1f
+#define DB_DEBUG__DEBUG_STENCIL_COMPRESS_DISABLE_MASK 0x00000001L
+#define DB_DEBUG__DEBUG_DEPTH_COMPRESS_DISABLE_MASK 0x00000002L
+#define DB_DEBUG__FETCH_FULL_Z_TILE_MASK 0x00000004L
+#define DB_DEBUG__FETCH_FULL_STENCIL_TILE_MASK 0x00000008L
+#define DB_DEBUG__FORCE_Z_MODE_MASK 0x00000030L
+#define DB_DEBUG__DEBUG_FORCE_DEPTH_READ_MASK 0x00000040L
+#define DB_DEBUG__DEBUG_FORCE_STENCIL_READ_MASK 0x00000080L
+#define DB_DEBUG__DEBUG_FORCE_HIZ_ENABLE_MASK 0x00000300L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE0_MASK 0x00000C00L
+#define DB_DEBUG__DEBUG_FORCE_HIS_ENABLE1_MASK 0x00003000L
+#define DB_DEBUG__DEBUG_FAST_Z_DISABLE_MASK 0x00004000L
+#define DB_DEBUG__DEBUG_FAST_STENCIL_DISABLE_MASK 0x00008000L
+#define DB_DEBUG__DEBUG_NOOP_CULL_DISABLE_MASK 0x00010000L
+#define DB_DEBUG__DISABLE_SUMM_SQUADS_MASK 0x00020000L
+#define DB_DEBUG__DEPTH_CACHE_FORCE_MISS_MASK 0x00040000L
+#define DB_DEBUG__DEBUG_FORCE_FULL_Z_RANGE_MASK 0x00180000L
+#define DB_DEBUG__NEVER_FREE_Z_ONLY_MASK 0x00200000L
+#define DB_DEBUG__ZPASS_COUNTS_LOOK_AT_PIPE_STAT_EVENTS_MASK 0x00400000L
+#define DB_DEBUG__DISABLE_VPORT_ZPLANE_OPTIMIZATION_MASK 0x00800000L
+#define DB_DEBUG__DECOMPRESS_AFTER_N_ZPLANES_MASK 0x0F000000L
+#define DB_DEBUG__ONE_FREE_IN_FLIGHT_MASK 0x10000000L
+#define DB_DEBUG__FORCE_MISS_IF_NOT_INFLIGHT_MASK 0x20000000L
+#define DB_DEBUG__DISABLE_DEPTH_SURFACE_SYNC_MASK 0x40000000L
+#define DB_DEBUG__DISABLE_HTILE_SURFACE_SYNC_MASK 0x80000000L
+//DB_DEBUG2
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING__SHIFT 0x0
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE__SHIFT 0x1
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE__SHIFT 0x2
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB__SHIFT 0x3
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM__SHIFT 0x4
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL__SHIFT 0x5
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ__SHIFT 0x6
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL__SHIFT 0x7
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE__SHIFT 0x8
+#define DB_DEBUG2__CLK_OFF_DELAY__SHIFT 0x9
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON__SHIFT 0xe
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL__SHIFT 0xf
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES__SHIFT 0x10
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING__SHIFT 0x11
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING__SHIFT 0x12
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL__SHIFT 0x13
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x14
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES__SHIFT 0x15
+#define DB_DEBUG2__FORCE_ITERATE_256__SHIFT 0x18
+#define DB_DEBUG2__RESERVED1__SHIFT 0x1a
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN__SHIFT 0x1b
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM__SHIFT 0x1c
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL__SHIFT 0x1d
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM__SHIFT 0x1e
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT__SHIFT 0x1f
+#define DB_DEBUG2__ALLOW_COMPZ_BYTE_MASKING_MASK 0x00000001L
+#define DB_DEBUG2__DISABLE_TC_ZRANGE_L0_CACHE_MASK 0x00000002L
+#define DB_DEBUG2__DISABLE_TC_MASK_L0_CACHE_MASK 0x00000004L
+#define DB_DEBUG2__DTR_ROUND_ROBIN_ARB_MASK 0x00000008L
+#define DB_DEBUG2__DTR_PREZ_STALLS_FOR_ETF_ROOM_MASK 0x00000010L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_MASK 0x00000020L
+#define DB_DEBUG2__DISABLE_PREZL_FIFO_STALL_REZ_MASK 0x00000040L
+#define DB_DEBUG2__ENABLE_VIEWPORT_STALL_ON_ALL_MASK 0x00000080L
+#define DB_DEBUG2__OPTIMIZE_HIZ_MATCHES_FB_DISABLE_MASK 0x00000100L
+#define DB_DEBUG2__CLK_OFF_DELAY_MASK 0x00003E00L
+#define DB_DEBUG2__FORCE_PERF_COUNTERS_ON_MASK 0x00004000L
+#define DB_DEBUG2__FULL_TILE_CACHE_EVICT_ON_HALF_FULL_MASK 0x00008000L
+#define DB_DEBUG2__DISABLE_HTILE_PAIRED_PIPES_MASK 0x00010000L
+#define DB_DEBUG2__DISABLE_NULL_EOT_FORWARDING_MASK 0x00020000L
+#define DB_DEBUG2__DISABLE_DTT_DATA_FORWARDING_MASK 0x00040000L
+#define DB_DEBUG2__DISABLE_QUAD_COHERENCY_STALL_MASK 0x00080000L
+#define DB_DEBUG2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00100000L
+#define DB_DEBUG2__ENABLE_FULL_TILE_WAVE_BREAK_FOR_ALL_TILES_MASK 0x00200000L
+#define DB_DEBUG2__FORCE_ITERATE_256_MASK 0x03000000L
+#define DB_DEBUG2__RESERVED1_MASK 0x04000000L
+#define DB_DEBUG2__DEBUG_BUS_FLOP_EN_MASK 0x08000000L
+#define DB_DEBUG2__ENABLE_PREZ_OF_REZ_SUMM_MASK 0x10000000L
+#define DB_DEBUG2__DISABLE_PREZL_VIEWPORT_STALL_MASK 0x20000000L
+#define DB_DEBUG2__DISABLE_SINGLE_STENCIL_QUAD_SUMM_MASK 0x40000000L
+#define DB_DEBUG2__DISABLE_WRITE_STALL_ON_RDWR_CONFLICT_MASK 0x80000000L
+//DB_DEBUG3
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION__SHIFT 0x0
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA__SHIFT 0x1
+#define DB_DEBUG3__FORCE_DB_IS_GOOD__SHIFT 0x2
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION__SHIFT 0x3
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP__SHIFT 0x4
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z__SHIFT 0x5
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z__SHIFT 0x6
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION__SHIFT 0x8
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP__SHIFT 0xa
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS__SHIFT 0xb
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS__SHIFT 0xd
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE__SHIFT 0xe
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK__SHIFT 0xf
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH__SHIFT 0x10
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE__SHIFT 0x11
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE__SHIFT 0x13
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE__SHIFT 0x14
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT__SHIFT 0x15
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB__SHIFT 0x16
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD__SHIFT 0x17
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT__SHIFT 0x18
+#define DB_DEBUG3__DISABLE_DI_DT_STALL__SHIFT 0x19
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET__SHIFT 0x1a
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX__SHIFT 0x1b
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND__SHIFT 0x1c
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND__SHIFT 0x1d
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0__SHIFT 0x1e
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT__SHIFT 0x1f
+#define DB_DEBUG3__DISABLE_CLEAR_ZRANGE_CORRECTION_MASK 0x00000001L
+#define DB_DEBUG3__DISABLE_RELOAD_CONTEXT_DRAW_DATA_MASK 0x00000002L
+#define DB_DEBUG3__FORCE_DB_IS_GOOD_MASK 0x00000004L
+#define DB_DEBUG3__DISABLE_TL_SSO_NULL_SUPPRESSION_MASK 0x00000008L
+#define DB_DEBUG3__DISABLE_HIZ_ON_VPORT_CLAMP_MASK 0x00000010L
+#define DB_DEBUG3__EQAA_INTERPOLATE_COMP_Z_MASK 0x00000020L
+#define DB_DEBUG3__EQAA_INTERPOLATE_SRC_Z_MASK 0x00000040L
+#define DB_DEBUG3__DISABLE_ZCMP_DIRTY_SUPPRESSION_MASK 0x00000100L
+#define DB_DEBUG3__DISABLE_RECOMP_TO_1ZPLANE_WITHOUT_FASTOP_MASK 0x00000400L
+#define DB_DEBUG3__ENABLE_INCOHERENT_EQAA_READS_MASK 0x00000800L
+#define DB_DEBUG3__DISABLE_OP_DF_BYPASS_MASK 0x00002000L
+#define DB_DEBUG3__DISABLE_OP_DF_WRITE_COMBINE_MASK 0x00004000L
+#define DB_DEBUG3__DISABLE_OP_DF_DIRECT_FEEDBACK_MASK 0x00008000L
+#define DB_DEBUG3__DISABLE_SLOCS_PER_CTXT_MATCH_MASK 0x00010000L
+#define DB_DEBUG3__SLOW_PREZ_TO_A2M_OMASK_RATE_MASK 0x00020000L
+#define DB_DEBUG3__DISABLE_TC_UPDATE_WRITE_COMBINE_MASK 0x00080000L
+#define DB_DEBUG3__DISABLE_HZ_TC_WRITE_COMBINE_MASK 0x00100000L
+#define DB_DEBUG3__ENABLE_RECOMP_ZDIRTY_SUPPRESSION_OPT_MASK 0x00200000L
+#define DB_DEBUG3__ENABLE_TC_MA_ROUND_ROBIN_ARB_MASK 0x00400000L
+#define DB_DEBUG3__DISABLE_RAM_READ_SUPPRESION_ON_FWD_MASK 0x00800000L
+#define DB_DEBUG3__DISABLE_EQAA_A2M_PERF_OPT_MASK 0x01000000L
+#define DB_DEBUG3__DISABLE_DI_DT_STALL_MASK 0x02000000L
+#define DB_DEBUG3__ENABLE_DB_PROCESS_RESET_MASK 0x04000000L
+#define DB_DEBUG3__DISABLE_OVERRASTERIZATION_FIX_MASK 0x08000000L
+#define DB_DEBUG3__DONT_INSERT_CONTEXT_SUSPEND_MASK 0x10000000L
+#define DB_DEBUG3__DELETE_CONTEXT_SUSPEND_MASK 0x20000000L
+#define DB_DEBUG3__DISABLE_TS_WRITE_L0_MASK 0x40000000L
+#define DB_DEBUG3__DISABLE_MULTIDTAG_FL_PANIC_REQUIREMENT_MASK 0x80000000L
+//DB_DEBUG4
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION__SHIFT 0x0
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION__SHIFT 0x1
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL__SHIFT 0x2
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL__SHIFT 0x3
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK__SHIFT 0x4
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK__SHIFT 0x5
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN__SHIFT 0x6
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE__SHIFT 0x7
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK__SHIFT 0x8
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR__SHIFT 0x9
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR__SHIFT 0xa
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR__SHIFT 0xb
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION__SHIFT 0xc
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP__SHIFT 0xd
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION__SHIFT 0xe
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE__SHIFT 0xf
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT__SHIFT 0x10
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE__SHIFT 0x12
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE__SHIFT 0x13
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO__SHIFT 0x15
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT__SHIFT 0x16
+#define DB_DEBUG4__WR_MEM_BURST_CTL__SHIFT 0x18
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING__SHIFT 0x1b
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST__SHIFT 0x1c
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT__SHIFT 0x1e
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD__SHIFT 0x1f
+#define DB_DEBUG4__DISABLE_QC_Z_MASK_SUMMATION_MASK 0x00000001L
+#define DB_DEBUG4__DISABLE_QC_STENCIL_MASK_SUMMATION_MASK 0x00000002L
+#define DB_DEBUG4__DISABLE_RESUMM_TO_SINGLE_STENCIL_MASK 0x00000004L
+#define DB_DEBUG4__DISABLE_PREZ_POSTZ_DTILE_CONFLICT_STALL_MASK 0x00000008L
+#define DB_DEBUG4__DISABLE_SEPARATE_OP_PIPE_CLK_MASK 0x00000010L
+#define DB_DEBUG4__DISABLE_SEPARATE_SX_CLK_MASK 0x00000020L
+#define DB_DEBUG4__ALWAYS_ON_RMI_CLK_EN_MASK 0x00000040L
+#define DB_DEBUG4__ENABLE_DBCB_SLOW_FORMAT_COLLAPSE_MASK 0x00000080L
+#define DB_DEBUG4__DISABLE_SEPARATE_DBG_CLK_MASK 0x00000100L
+#define DB_DEBUG4__DISABLE_UNMAPPED_Z_INDICATOR_MASK 0x00000200L
+#define DB_DEBUG4__DISABLE_UNMAPPED_S_INDICATOR_MASK 0x00000400L
+#define DB_DEBUG4__DISABLE_UNMAPPED_H_INDICATOR_MASK 0x00000800L
+#define DB_DEBUG4__ENABLE_A2M_DQUAD_OPTIMIZATION_MASK 0x00001000L
+#define DB_DEBUG4__DISABLE_DTT_FAST_HTILENACK_LOOKUP_MASK 0x00002000L
+#define DB_DEBUG4__DISABLE_RESCHECK_MEMCOHER_OPTIMIZATION_MASK 0x00004000L
+#define DB_DEBUG4__DISABLE_DYNAMIC_RAM_LIGHT_SLEEP_MODE_MASK 0x00008000L
+#define DB_DEBUG4__DISABLE_HIZ_TS_COLLISION_DETECT_MASK 0x00010000L
+#define DB_DEBUG4__DISABLE_LAST_OF_BURST_ON_FLUSH_CHUNK0_ALL_DONE_MASK 0x00040000L
+#define DB_DEBUG4__ENABLE_CZ_OVERFLOW_TESTMODE_MASK 0x00080000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_MASK 0x00200000L
+#define DB_DEBUG4__DISABLE_MCC_BURST_FIFO_CONFLICT_MASK 0x00400000L
+#define DB_DEBUG4__WR_MEM_BURST_CTL_MASK 0x07000000L
+#define DB_DEBUG4__DISABLE_WR_MEM_BURST_POOLING_MASK 0x08000000L
+#define DB_DEBUG4__DISABLE_RD_MEM_BURST_MASK 0x10000000L
+#define DB_DEBUG4__LATE_ACK_SCOREBOARD_MULTIPLE_SLOT_MASK 0x40000000L
+#define DB_DEBUG4__LATE_ACK_PSD_EOP_OLD_METHOD_MASK 0x80000000L
+//DB_ETILE_STUTTER_CONTROL
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_ETILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_ETILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LTILE_STUTTER_CONTROL
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LTILE_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LTILE_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_EQUAD_STUTTER_CONTROL
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_EQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_EQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_LQUAD_STUTTER_CONTROL
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD__SHIFT 0x0
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT__SHIFT 0x10
+#define DB_LQUAD_STUTTER_CONTROL__THRESHOLD_MASK 0x000000FFL
+#define DB_LQUAD_STUTTER_CONTROL__TIMEOUT_MASK 0x00FF0000L
+//DB_CREDIT_LIMIT
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS__SHIFT 0x0
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS__SHIFT 0x5
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS__SHIFT 0xa
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS__SHIFT 0xd
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS__SHIFT 0x12
+#define DB_CREDIT_LIMIT__DB_SC_TILE_CREDITS_MASK 0x0000001FL
+#define DB_CREDIT_LIMIT__DB_SC_QUAD_CREDITS_MASK 0x000003E0L
+#define DB_CREDIT_LIMIT__DB_CB_LQUAD_CREDITS_MASK 0x00001C00L
+#define DB_CREDIT_LIMIT__DB_SC_WAVE_CREDITS_MASK 0x0003E000L
+#define DB_CREDIT_LIMIT__DB_SC_FREE_WAVE_CREDITS_MASK 0x007C0000L
+//DB_WATERMARKS
+#define DB_WATERMARKS__DEPTH_FREE__SHIFT 0x0
+#define DB_WATERMARKS__DEPTH_FLUSH__SHIFT 0x8
+#define DB_WATERMARKS__DEPTH_PENDING_FREE__SHIFT 0x10
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE__SHIFT 0x18
+#define DB_WATERMARKS__DEPTH_FREE_MASK 0x000000FFL
+#define DB_WATERMARKS__DEPTH_FLUSH_MASK 0x0000FF00L
+#define DB_WATERMARKS__DEPTH_PENDING_FREE_MASK 0x00FF0000L
+#define DB_WATERMARKS__DEPTH_CACHELINE_FREE_MASK 0xFF000000L
+//DB_SUBTILE_CONTROL
+#define DB_SUBTILE_CONTROL__MSAA1_X__SHIFT 0x0
+#define DB_SUBTILE_CONTROL__MSAA1_Y__SHIFT 0x2
+#define DB_SUBTILE_CONTROL__MSAA2_X__SHIFT 0x4
+#define DB_SUBTILE_CONTROL__MSAA2_Y__SHIFT 0x6
+#define DB_SUBTILE_CONTROL__MSAA4_X__SHIFT 0x8
+#define DB_SUBTILE_CONTROL__MSAA4_Y__SHIFT 0xa
+#define DB_SUBTILE_CONTROL__MSAA8_X__SHIFT 0xc
+#define DB_SUBTILE_CONTROL__MSAA8_Y__SHIFT 0xe
+#define DB_SUBTILE_CONTROL__MSAA16_X__SHIFT 0x10
+#define DB_SUBTILE_CONTROL__MSAA16_Y__SHIFT 0x12
+#define DB_SUBTILE_CONTROL__MSAA1_X_MASK 0x00000003L
+#define DB_SUBTILE_CONTROL__MSAA1_Y_MASK 0x0000000CL
+#define DB_SUBTILE_CONTROL__MSAA2_X_MASK 0x00000030L
+#define DB_SUBTILE_CONTROL__MSAA2_Y_MASK 0x000000C0L
+#define DB_SUBTILE_CONTROL__MSAA4_X_MASK 0x00000300L
+#define DB_SUBTILE_CONTROL__MSAA4_Y_MASK 0x00000C00L
+#define DB_SUBTILE_CONTROL__MSAA8_X_MASK 0x00003000L
+#define DB_SUBTILE_CONTROL__MSAA8_Y_MASK 0x0000C000L
+#define DB_SUBTILE_CONTROL__MSAA16_X_MASK 0x00030000L
+#define DB_SUBTILE_CONTROL__MSAA16_Y_MASK 0x000C0000L
+//DB_FREE_CACHELINES
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH__SHIFT 0x0
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH__SHIFT 0x8
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH__SHIFT 0x10
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH__SHIFT 0x18
+#define DB_FREE_CACHELINES__FREE_DTILE_DEPTH_MASK 0x000000FFL
+#define DB_FREE_CACHELINES__FREE_PLANE_DEPTH_MASK 0x0000FF00L
+#define DB_FREE_CACHELINES__FREE_Z_DEPTH_MASK 0x00FF0000L
+#define DB_FREE_CACHELINES__FREE_HTILE_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH1
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH1__MCC_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH1__QC_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH1__MI_RDREQ_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH1__MI_WRREQ_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH1__MCC_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH1__QC_DEPTH_MASK 0xFF000000L
+//DB_FIFO_DEPTH2
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH__SHIFT 0x19
+#define DB_FIFO_DEPTH2__EQUAD_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH2__ETILE_OP_FIFO_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH2__LQUAD_FIFO_DEPTH_MASK 0x01FF0000L
+#define DB_FIFO_DEPTH2__LTILE_OP_FIFO_DEPTH_MASK 0xFE000000L
+//DB_LAST_OF_BURST_CONFIG
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST__SHIFT 0x0
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT__SHIFT 0x8
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT__SHIFT 0xb
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT__SHIFT 0x11
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB__SHIFT 0x12
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B__SHIFT 0x13
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB__SHIFT 0x14
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO__SHIFT 0x15
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN__SHIFT 0x16
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN__SHIFT 0x17
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST__SHIFT 0x19
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR__SHIFT 0x1a
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA__SHIFT 0x1c
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE__SHIFT 0x1d
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST__SHIFT 0x1e
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN__SHIFT 0x1f
+#define DB_LAST_OF_BURST_CONFIG__MAXBURST_MASK 0x000000FFL
+#define DB_LAST_OF_BURST_CONFIG__TIMEOUT_MASK 0x00000700L
+#define DB_LAST_OF_BURST_CONFIG__DBCB_LOB_SWITCH_TIMEOUT_MASK 0x0000F800L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_FG_DEFAULT_TIMEOUT_MASK 0x00020000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_COUNT_RESET_ON_LOB_MASK 0x00040000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_FLQ_LOB_EVERY_256B_MASK 0x00080000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_ZCACHE_FL_OP_EVEN_ARB_MASK 0x00100000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_MCC_BURST_FORCE_FLUSH_BEFORE_FIFO_MASK 0x00200000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_DKG_LOB_GEN_MASK 0x00400000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_LPF_LOB_GEN_MASK 0x00800000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FL_BURST_MASK 0x02000000L
+#define DB_LAST_OF_BURST_CONFIG__ENABLE_TIMEOUT_FG_LOB_FWDR_MASK 0x04000000L
+#define DB_LAST_OF_BURST_CONFIG__BYPASS_SORT_RD_BA_MASK 0x10000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_256B_COALESCE_MASK 0x20000000L
+#define DB_LAST_OF_BURST_CONFIG__DISABLE_RD_BURST_MASK 0x40000000L
+#define DB_LAST_OF_BURST_CONFIG__LEGACY_LOB_INSERT_EN_MASK 0x80000000L
+//DB_RING_CONTROL
+#define DB_RING_CONTROL__COUNTER_CONTROL__SHIFT 0x0
+#define DB_RING_CONTROL__COUNTER_CONTROL_MASK 0x00000003L
+//DB_MEM_ARB_WATERMARKS
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK__SHIFT 0x0
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK__SHIFT 0x8
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK__SHIFT 0x10
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK__SHIFT 0x18
+#define DB_MEM_ARB_WATERMARKS__CLIENT0_WATERMARK_MASK 0x00000007L
+#define DB_MEM_ARB_WATERMARKS__CLIENT1_WATERMARK_MASK 0x00000700L
+#define DB_MEM_ARB_WATERMARKS__CLIENT2_WATERMARK_MASK 0x00070000L
+#define DB_MEM_ARB_WATERMARKS__CLIENT3_WATERMARK_MASK 0x07000000L
+//DB_FIFO_DEPTH3
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS__SHIFT 0x18
+#define DB_FIFO_DEPTH3__LTILE_PROBE_FIFO_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH3__OSB_WAVE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH3__OREO_WAVE_HIDE_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH3__QUAD_READ_REQS_MASK 0xFF000000L
+//DB_DEBUG6
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT__SHIFT 0x0
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT__SHIFT 0x1
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT__SHIFT 0x2
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL__SHIFT 0x3
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID__SHIFT 0x4
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN__SHIFT 0xa
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL__SHIFT 0xb
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL__SHIFT 0xc
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID__SHIFT 0xd
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL__SHIFT 0x10
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT__SHIFT 0x18
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK__SHIFT 0x19
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX__SHIFT 0x1a
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC__SHIFT 0x1b
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_CONFLICT_MASK 0x00000001L
+#define DB_DEBUG6__FORCE_DB_SC_WAVE_HARD_CONFLICT_MASK 0x00000002L
+#define DB_DEBUG6__FORCE_DB_SC_QUAD_CONFLICT_MASK 0x00000004L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ALL_MASK 0x00000008L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_ID_MASK 0x000003F0L
+#define DB_DEBUG6__OREO_TRANSITION_EVENT_EN_MASK 0x00000400L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_TCP_CM_LIVENESS_STALL_MASK 0x00000800L
+#define DB_DEBUG6__DISABLE_PWS_PLUS_DTT_TAG_LIVENESS_STALL_MASK 0x00001000L
+#define DB_DEBUG6__SET_DB_PERFMON_PWS_PIPE_ID_MASK 0x00006000L
+#define DB_DEBUG6__FTWB_MAX_TIMEOUT_VAL_MASK 0x00FF0000L
+#define DB_DEBUG6__DISABLE_LQO_SMT_RAM_OPT_MASK 0x01000000L
+#define DB_DEBUG6__FORCE_MAX_TILES_IN_WAVE_CHECK_MASK 0x02000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_FIX_MASK 0x04000000L
+#define DB_DEBUG6__DISABLE_OSB_DEADLOCK_WAIT_PANIC_MASK 0x08000000L
+//DB_EXCEPTION_CONTROL
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE__SHIFT 0x0
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE__SHIFT 0x1
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE__SHIFT 0x2
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18
+#define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L
+#define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L
+#define DB_EXCEPTION_CONTROL__RE_Z_PANIC_DISABLE_MASK 0x00000004L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L
+#define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L
+#define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L
+#define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L
+//DB_DEBUG7
+#define DB_DEBUG7__SPARE_BITS__SHIFT 0x0
+#define DB_DEBUG7__SPARE_BITS_MASK 0xFFFFFFFFL
+//DB_DEBUG5
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD__SHIFT 0x0
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION__SHIFT 0x1
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT__SHIFT 0x2
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT__SHIFT 0x3
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK__SHIFT 0x4
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS__SHIFT 0x5
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX__SHIFT 0x6
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT__SHIFT 0x7
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA__SHIFT 0x8
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE__SHIFT 0x9
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING__SHIFT 0xa
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK__SHIFT 0xb
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH__SHIFT 0xc
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX__SHIFT 0xd
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED__SHIFT 0xe
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK__SHIFT 0xf
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ__SHIFT 0x10
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE__SHIFT 0x11
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT__SHIFT 0x12
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT__SHIFT 0x13
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z__SHIFT 0x14
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL__SHIFT 0x15
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK__SHIFT 0x16
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE__SHIFT 0x17
+#define DB_DEBUG5__SPARE_BITS__SHIFT 0x18
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PRELOAD_MASK 0x00000001L
+#define DB_DEBUG5__ENABLE_SECONDARY_MIPS_TAILS_COMPRESSION_MASK 0x00000002L
+#define DB_DEBUG5__DISABLE_CLEAR_VALUE_UPDATE_ON_TILE_CACHE_HIT_MASK 0x00000004L
+#define DB_DEBUG5__DISABLE_2SRC_VRS_HARD_CONFLICT_MASK 0x00000008L
+#define DB_DEBUG5__DISABLE_FLQ_MCC_DTILEID_CHECK_MASK 0x00000010L
+#define DB_DEBUG5__DISABLE_NOZ_POWER_SAVINGS_MASK 0x00000020L
+#define DB_DEBUG5__DISABLE_TILE_INFLIGHT_DEC_POSTZ_FIX_MASK 0x00000040L
+#define DB_DEBUG5__DISABLE_MGCG_GATING_ON_SHADER_WAIT_MASK 0x00000080L
+#define DB_DEBUG5__DISABLE_VRS_1X2_2XAA_MASK 0x00000100L
+#define DB_DEBUG5__ENABLE_FULL_TILE_WAVE_BREAK_ON_COARSE_MASK 0x00000200L
+#define DB_DEBUG5__DISABLE_HTILE_HARVESTING_MASK 0x00000400L
+#define DB_DEBUG5__DISABLE_SEPARATE_TILE_CLK_MASK 0x00000800L
+#define DB_DEBUG5__DISABLE_TILE_CACHE_PREFETCH_MASK 0x00001000L
+#define DB_DEBUG5__DISABLE_PSL_AUTO_MODE_FIX_MASK 0x00002000L
+#define DB_DEBUG5__DISABLE_FORCE_ZMASK_EXPANDED_MASK 0x00004000L
+#define DB_DEBUG5__DISABLE_SEPARATE_LQO_CLK_MASK 0x00008000L
+#define DB_DEBUG5__DISABLE_Z_WITHOUT_PLANES_FLQ_MASK 0x00010000L
+#define DB_DEBUG5__PRESERVE_QMASK_FOR_POSTZ_OP_PIPE_MASK 0x00020000L
+#define DB_DEBUG5__Z_NACK_BEHAVIOR_ONLY_WHEN_Z_IS_PRT_MASK 0x00040000L
+#define DB_DEBUG5__S_NACK_BEHAVIOR_ONLY_WHEN_S_IS_PRT_MASK 0x00080000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_Z_MASK 0x00100000L
+#define DB_DEBUG5__DISABLE_RESIDENCY_CHECK_STENCIL_MASK 0x00200000L
+#define DB_DEBUG5__DISABLE_LQO_FTCQ_DUAL_QUAD_REGION_CHECK_MASK 0x00400000L
+#define DB_DEBUG5__DISABLE_EVENT_INSERTION_AFTER_ZPC_BEFORE_CONTEXT_DONE_MASK 0x00800000L
+#define DB_DEBUG5__SPARE_BITS_MASK 0xFF000000L
+//DB_FGCG_SRAMS_CLK_CTRL
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0__SHIFT 0x0
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1__SHIFT 0x1
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2__SHIFT 0x2
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3__SHIFT 0x3
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4__SHIFT 0x4
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5__SHIFT 0x5
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6__SHIFT 0x6
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7__SHIFT 0x7
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8__SHIFT 0x8
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9__SHIFT 0x9
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10__SHIFT 0xa
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11__SHIFT 0xb
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12__SHIFT 0xc
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13__SHIFT 0xd
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14__SHIFT 0xe
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15__SHIFT 0xf
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16__SHIFT 0x10
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17__SHIFT 0x11
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18__SHIFT 0x12
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19__SHIFT 0x13
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20__SHIFT 0x14
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21__SHIFT 0x15
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22__SHIFT 0x16
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23__SHIFT 0x17
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24__SHIFT 0x18
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25__SHIFT 0x19
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26__SHIFT 0x1a
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27__SHIFT 0x1b
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28__SHIFT 0x1c
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29__SHIFT 0x1d
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30__SHIFT 0x1e
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31__SHIFT 0x1f
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE0_MASK 0x00000001L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE1_MASK 0x00000002L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE2_MASK 0x00000004L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE3_MASK 0x00000008L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE4_MASK 0x00000010L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE5_MASK 0x00000020L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE6_MASK 0x00000040L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE7_MASK 0x00000080L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE8_MASK 0x00000100L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE9_MASK 0x00000200L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE10_MASK 0x00000400L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE11_MASK 0x00000800L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE12_MASK 0x00001000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE13_MASK 0x00002000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE14_MASK 0x00004000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE15_MASK 0x00008000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE16_MASK 0x00010000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE17_MASK 0x00020000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE18_MASK 0x00040000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE19_MASK 0x00080000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE20_MASK 0x00100000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE21_MASK 0x00200000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE22_MASK 0x00400000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE23_MASK 0x00800000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE24_MASK 0x01000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE25_MASK 0x02000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE26_MASK 0x04000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE27_MASK 0x08000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE28_MASK 0x10000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE29_MASK 0x20000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE30_MASK 0x40000000L
+#define DB_FGCG_SRAMS_CLK_CTRL__OVERRIDE31_MASK 0x80000000L
+//DB_FGCG_INTERFACES_CLK_CTRL
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE__SHIFT 0x0
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE__SHIFT 0x2
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE__SHIFT 0x3
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE__SHIFT 0x4
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE__SHIFT 0x5
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE__SHIFT 0x6
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE__SHIFT 0x7
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE__SHIFT 0x8
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_QUAD_OVERRIDE_MASK 0x00000001L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_EXPORT_OVERRIDE_MASK 0x00000004L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_RDREQ_OVERRIDE_MASK 0x00000008L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_RMI_WRREQ_OVERRIDE_MASK 0x00000010L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_TILE_OVERRIDE_MASK 0x00000020L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_CB_RMIRET_OVERRIDE_MASK 0x00000040L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_WAVE_OVERRIDE_MASK 0x00000080L
+#define DB_FGCG_INTERFACES_CLK_CTRL__DB_SC_FREE_WAVE_OVERRIDE_MASK 0x00000100L
+//DB_FIFO_DEPTH4
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH__SHIFT 0x0
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH__SHIFT 0x8
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH__SHIFT 0x10
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH__SHIFT 0x18
+#define DB_FIFO_DEPTH4__OSB_SQUAD_TABLE_DEPTH_MASK 0x000000FFL
+#define DB_FIFO_DEPTH4__OSB_TILE_TABLE_DEPTH_MASK 0x0000FF00L
+#define DB_FIFO_DEPTH4__OSB_SCORE_BOARD_DEPTH_MASK 0x00FF0000L
+#define DB_FIFO_DEPTH4__OSB_EVENT_FIFO_DEPTH_MASK 0xFF000000L
+//CC_RB_REDUNDANCY
+#define CC_RB_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define CC_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define CC_RB_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define CC_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define CC_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//CC_RB_BACKEND_DISABLE
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CC_RB_BACKEND_DISABLE__RESERVED__SHIFT 0x2
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define CC_RB_BACKEND_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CC_RB_BACKEND_DISABLE__RESERVED_MASK 0x0000000CL
+#define CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GB_ADDR_CONFIG
+#define GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x0C000000L
+//GB_BACKEND_MAP
+#define GB_BACKEND_MAP__BACKEND_MAP__SHIFT 0x0
+#define GB_BACKEND_MAP__BACKEND_MAP_MASK 0xFFFFFFFFL
+//GB_GPU_ID
+#define GB_GPU_ID__GPU_ID__SHIFT 0x0
+#define GB_GPU_ID__GPU_ID_MASK 0x0000000FL
+//CC_RB_DAISY_CHAIN
+#define CC_RB_DAISY_CHAIN__RB_0__SHIFT 0x0
+#define CC_RB_DAISY_CHAIN__RB_1__SHIFT 0x4
+#define CC_RB_DAISY_CHAIN__RB_2__SHIFT 0x8
+#define CC_RB_DAISY_CHAIN__RB_3__SHIFT 0xc
+#define CC_RB_DAISY_CHAIN__RB_4__SHIFT 0x10
+#define CC_RB_DAISY_CHAIN__RB_5__SHIFT 0x14
+#define CC_RB_DAISY_CHAIN__RB_6__SHIFT 0x18
+#define CC_RB_DAISY_CHAIN__RB_7__SHIFT 0x1c
+#define CC_RB_DAISY_CHAIN__RB_0_MASK 0x0000000FL
+#define CC_RB_DAISY_CHAIN__RB_1_MASK 0x000000F0L
+#define CC_RB_DAISY_CHAIN__RB_2_MASK 0x00000F00L
+#define CC_RB_DAISY_CHAIN__RB_3_MASK 0x0000F000L
+#define CC_RB_DAISY_CHAIN__RB_4_MASK 0x000F0000L
+#define CC_RB_DAISY_CHAIN__RB_5_MASK 0x00F00000L
+#define CC_RB_DAISY_CHAIN__RB_6_MASK 0x0F000000L
+#define CC_RB_DAISY_CHAIN__RB_7_MASK 0xF0000000L
+//GB_ADDR_CONFIG_READ
+#define GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS__SHIFT 0x6
+#define GB_ADDR_CONFIG_READ__NUM_PKRS__SHIFT 0x8
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE__SHIFT 0x1a
+#define GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L
+#define GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L
+#define GB_ADDR_CONFIG_READ__MAX_COMPRESSED_FRAGS_MASK 0x000000C0L
+#define GB_ADDR_CONFIG_READ__NUM_PKRS_MASK 0x00000700L
+#define GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L
+#define GB_ADDR_CONFIG_READ__NUM_RB_PER_SE_MASK 0x0C000000L
+//CB_HW_CONTROL_4
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2__SHIFT 0x0
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM__SHIFT 0x3
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE__SHIFT 0x5
+#define CB_HW_CONTROL_4__SPARE_10__SHIFT 0x6
+#define CB_HW_CONTROL_4__SPARE_11__SHIFT 0x7
+#define CB_HW_CONTROL_4__SPARE_12__SHIFT 0x8
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST__SHIFT 0x9
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD__SHIFT 0xa
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD__SHIFT 0xd
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD__SHIFT 0x10
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD__SHIFT 0x11
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD__SHIFT 0x12
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_NUM_QB_LOG2_MASK 0x00000007L
+#define CB_HW_CONTROL_4__COLOR_CACHE_FETCH_ALGORITHM_MASK 0x00000018L
+#define CB_HW_CONTROL_4__DISABLE_USE_OF_SMT_SCORE_MASK 0x00000020L
+#define CB_HW_CONTROL_4__SPARE_10_MASK 0x00000040L
+#define CB_HW_CONTROL_4__SPARE_11_MASK 0x00000080L
+#define CB_HW_CONTROL_4__SPARE_12_MASK 0x00000100L
+#define CB_HW_CONTROL_4__DISABLE_MA_WAIT_FOR_LAST_MASK 0x00000200L
+#define CB_HW_CONTROL_4__SMT_TIMEOUT_THRESHOLD_MASK 0x00001C00L
+#define CB_HW_CONTROL_4__SMT_QPFIFO_THRESHOLD_MASK 0x0000E000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_RAW_HAZARD_MASK 0x00010000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_COARSE_RAW_HAZARD_MASK 0x00020000L
+#define CB_HW_CONTROL_4__ENABLE_FRAGOP_STALLING_ON_DS_RAW_HAZARD_MASK 0x00040000L
+//CB_HW_CONTROL_3
+#define CB_HW_CONTROL_3__SPARE_5__SHIFT 0x0
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED__SHIFT 0x1
+#define CB_HW_CONTROL_3__SPARE_6__SHIFT 0x2
+#define CB_HW_CONTROL_3__SPARE_7__SHIFT 0x3
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM__SHIFT 0x4
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING__SHIFT 0x5
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS__SHIFT 0x6
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS__SHIFT 0x7
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH__SHIFT 0xb
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH__SHIFT 0xc
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC__SHIFT 0xd
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC__SHIFT 0xe
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC__SHIFT 0xf
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC__SHIFT 0x10
+#define CB_HW_CONTROL_3__SPARE_8__SHIFT 0x11
+#define CB_HW_CONTROL_3__SPARE_9__SHIFT 0x12
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x14
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x15
+#define CB_HW_CONTROL_3__SPARE_5_MASK 0x00000001L
+#define CB_HW_CONTROL_3__RAM_ADDRESS_CONFLICTS_DISALLOWED_MASK 0x00000002L
+#define CB_HW_CONTROL_3__SPARE_6_MASK 0x00000004L
+#define CB_HW_CONTROL_3__SPARE_7_MASK 0x00000008L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_OVWR_STATUS_ACCUM_MASK 0x00000010L
+#define CB_HW_CONTROL_3__DISABLE_CC_CACHE_PANIC_GATING_MASK 0x00000020L
+#define CB_HW_CONTROL_3__SPLIT_ALL_FAST_MODE_TRANSFERS_MASK 0x00000040L
+#define CB_HW_CONTROL_3__DISABLE_SHADER_BLEND_OPTS_MASK 0x00000080L
+#define CB_HW_CONTROL_3__FORCE_RMI_LAST_HIGH_MASK 0x00000800L
+#define CB_HW_CONTROL_3__FORCE_RMI_CLKEN_HIGH_MASK 0x00001000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_CC_MASK 0x00002000L
+#define CB_HW_CONTROL_3__DISABLE_EARLY_WRACKS_DC_MASK 0x00004000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CC_MASK 0x00008000L
+#define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_DC_MASK 0x00010000L
+#define CB_HW_CONTROL_3__SPARE_8_MASK 0x00020000L
+#define CB_HW_CONTROL_3__SPARE_9_MASK 0x00040000L
+#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x00100000L
+#define CB_HW_CONTROL_3__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00200000L
+//CB_HW_CONTROL
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX__SHIFT 0x2
+#define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES__SHIFT 0xc
+#define CB_HW_CONTROL__FORCE_FEA_HIGH__SHIFT 0xf
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID__SHIFT 0x10
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING__SHIFT 0x11
+#define CB_HW_CONTROL__FORCE_NEEDS_DST__SHIFT 0x13
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH__SHIFT 0x14
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST__SHIFT 0x15
+#define CB_HW_CONTROL__SPARE_2__SHIFT 0x16
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST__SHIFT 0x18
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS__SHIFT 0x19
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL__SHIFT 0x1a
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED__SHIFT 0x1b
+#define CB_HW_CONTROL__SPARE_3__SHIFT 0x1d
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f
+#define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L
+#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L
+#define CB_HW_CONTROL__DISABLE_SMT_WHEN_NO_FDCC_FIX_MASK 0x00000004L
+#define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L
+#define CB_HW_CONTROL__NUM_CCC_SKID_FIFO_ENTRIES_MASK 0x00007000L
+#define CB_HW_CONTROL__FORCE_FEA_HIGH_MASK 0x00008000L
+#define CB_HW_CONTROL__FORCE_EVICT_ALL_VALID_MASK 0x00010000L
+#define CB_HW_CONTROL__DISABLE_DCC_CACHE_BYTEMASKING_MASK 0x00020000L
+#define CB_HW_CONTROL__FORCE_NEEDS_DST_MASK 0x00080000L
+#define CB_HW_CONTROL__DISABLE_USE_OF_SET_HASH_MASK 0x00100000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_RESULT_EQ_DEST_MASK 0x00200000L
+#define CB_HW_CONTROL__SPARE_2_MASK 0x00400000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DONT_RD_DST_MASK 0x01000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_BYPASS_MASK 0x02000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_DISCARD_PIXEL_MASK 0x04000000L
+#define CB_HW_CONTROL__DISABLE_BLEND_OPT_WHEN_DISABLED_SRCALPHA_IS_USED_MASK 0x08000000L
+#define CB_HW_CONTROL__SPARE_3_MASK 0x20000000L
+#define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT_MASK 0x40000000L
+#define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE_MASK 0x80000000L
+//CB_HW_CONTROL_1
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS__SHIFT 0x0
+#define CB_HW_CONTROL_1__CC_CACHE_NUM_TAGS_MASK 0x0000003FL
+//CB_HW_CONTROL_2
+#define CB_HW_CONTROL_2__SPARE_4__SHIFT 0x0
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8__SHIFT 0x8
+#define CB_HW_CONTROL_2__SPARE__SHIFT 0xe
+#define CB_HW_CONTROL_2__SPARE_4_MASK 0x000000FFL
+#define CB_HW_CONTROL_2__DRR_ASSUMED_FIFO_DEPTH_DIV8_MASK 0x00003F00L
+#define CB_HW_CONTROL_2__SPARE_MASK 0xFFFFC000L
+//CB_DCC_CONFIG
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH__SHIFT 0x0
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x5
+#define CB_DCC_CONFIG__SPARE_13__SHIFT 0x6
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE__SHIFT 0x7
+#define CB_DCC_CONFIG__SPARE_14__SHIFT 0x8
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH__SHIFT 0x10
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS__SHIFT 0x19
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DEPTH_MASK 0x0000001FL
+#define CB_DCC_CONFIG__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000020L
+#define CB_DCC_CONFIG__SPARE_13_MASK 0x00000040L
+#define CB_DCC_CONFIG__DISABLE_CONSTANT_ENCODE_MASK 0x00000080L
+#define CB_DCC_CONFIG__SPARE_14_MASK 0x0000FF00L
+#define CB_DCC_CONFIG__READ_RETURN_SKID_FIFO_DEPTH_MASK 0x01FF0000L
+#define CB_DCC_CONFIG__DCC_CACHE_NUM_TAGS_MASK 0xFE000000L
+//CB_HW_MEM_ARBITER_RD
+#define CB_HW_MEM_ARBITER_RD__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_RD__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_RD__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_RD__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_RD__WEIGHT_IGNORE_NUM_TIDS_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_RD__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_RD__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_HW_MEM_ARBITER_WR
+#define CB_HW_MEM_ARBITER_WR__MODE__SHIFT 0x0
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE__SHIFT 0x2
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE__SHIFT 0x6
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC__SHIFT 0xa
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC__SHIFT 0xc
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS__SHIFT 0xe
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS__SHIFT 0x10
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK__SHIFT 0x12
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE__SHIFT 0x13
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT__SHIFT 0x16
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS__SHIFT 0x19
+#define CB_HW_MEM_ARBITER_WR__MODE_MASK 0x00000003L
+#define CB_HW_MEM_ARBITER_WR__IGNORE_URGENT_AGE_MASK 0x0000003CL
+#define CB_HW_MEM_ARBITER_WR__BREAK_GROUP_AGE_MASK 0x000003C0L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_CC_MASK 0x00000C00L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DC_MASK 0x00003000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_REQS_MASK 0x0000C000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_DECAY_NOREQS_MASK 0x00030000L
+#define CB_HW_MEM_ARBITER_WR__WEIGHT_IGNORE_BYTE_MASK_MASK 0x00040000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_AGE_MASK 0x00380000L
+#define CB_HW_MEM_ARBITER_WR__SCALE_WEIGHT_MASK 0x01C00000L
+#define CB_HW_MEM_ARBITER_WR__SEND_LASTS_WITHIN_GROUPS_MASK 0x02000000L
+//CB_FGCG_SRAM_OVERRIDE
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG__SHIFT 0x0
+#define CB_FGCG_SRAM_OVERRIDE__DISABLE_FGCG_MASK 0x000FFFFFL
+//CB_DCC_CONFIG2
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE__SHIFT 0x0
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x8
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION__SHIFT 0x9
+#define CB_DCC_CONFIG2__INVALID_KEY_ERROR_CODE_MASK 0x000000FFL
+#define CB_DCC_CONFIG2__CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00000100L
+#define CB_DCC_CONFIG2__ENABLE_COMP_KEY_ERROR_DETECTION_MASK 0x00000200L
+//CHICKEN_BITS
+#define CHICKEN_BITS__SPARE__SHIFT 0x0
+#define CHICKEN_BITS__SPARE_MASK 0xFFFFFFFFL
+//CB_CACHE_EVICT_POINTS
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT__SHIFT 0x0
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT__SHIFT 0x8
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT__SHIFT 0x10
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT__SHIFT 0x18
+#define CB_CACHE_EVICT_POINTS__CC_COLOR_EVICT_POINT_MASK 0x000000FFL
+#define CB_CACHE_EVICT_POINTS__CC_FMASK_EVICT_POINT_MASK 0x0000FF00L
+#define CB_CACHE_EVICT_POINTS__DCC_CACHE_EVICT_POINT_MASK 0x00FF0000L
+#define CB_CACHE_EVICT_POINTS__CC_CACHE_EVICT_POINT_MASK 0xFF000000L
+
+
+// addressBlock: gc_gceadec
+//GCEA_DRAM_RD_CLI2GRP_MAP0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_CLI2GRP_MAP1
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_WR_CLI2GRP_MAP1
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_DRAM_RD_GRP2VC_MAP
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_WR_GRP2VC_MAP
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L
+#define GCEA_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L
+//GCEA_DRAM_RD_LAZY
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_WR_LAZY
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b
+#define GCEA_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L
+#define GCEA_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L
+#define GCEA_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L
+#define GCEA_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L
+#define GCEA_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L
+//GCEA_DRAM_RD_CAM_CNTL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_WR_CAM_CNTL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L
+#define GCEA_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L
+#define GCEA_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L
+#define GCEA_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L
+//GCEA_DRAM_PAGE_BURST
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_AGE
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_WR_PRI_AGE
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_DRAM_RD_PRI_QUEUING
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_QUEUING
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_FIXED
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_WR_PRI_FIXED
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_DRAM_RD_PRI_URGENCY
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_WR_PRI_URGENCY
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI1
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI2
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_RD_PRI_QUANT_PRI3
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI1
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI2
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_DRAM_WR_PRI_QUANT_PRI3
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_CLI2GRP_MAP0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_CLI2GRP_MAP1
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L
+//GCEA_IO_WR_CLI2GRP_MAP1
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L
+#define GCEA_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L
+//GCEA_IO_RD_COMBINE_FLUSH
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+//GCEA_IO_WR_COMBINE_FLUSH
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x10
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING__SHIFT 0x12
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GCEA_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GCEA_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x00030000L
+#define GCEA_IO_WR_COMBINE_FLUSH__DISABLE_MAM_CHAINING_MASK 0x00040000L
+//GCEA_IO_GROUP_BURST
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GCEA_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GCEA_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_AGE
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_WR_PRI_AGE
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L
+#define GCEA_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L
+#define GCEA_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L
+#define GCEA_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L
+//GCEA_IO_RD_PRI_QUEUING
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_QUEUING
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_FIXED
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_WR_PRI_FIXED
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+//GCEA_IO_RD_PRI_URGENCY
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_WR_PRI_URGENCY
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L
+//GCEA_IO_RD_PRI_URGENCY_MASKING
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_WR_PRI_URGENCY_MASKING
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L
+#define GCEA_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L
+//GCEA_IO_RD_PRI_QUANT_PRI1
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI2
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_RD_PRI_QUANT_PRI3
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI1
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI2
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_IO_WR_PRI_QUANT_PRI3
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GCEA_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GCEA_SDP_ARB_DRAM
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING__SHIFT 0x16
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL
+#define GCEA_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L
+#define GCEA_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L
+#define GCEA_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L
+#define GCEA_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L
+#define GCEA_SDP_ARB_DRAM__ALLOW_CHAIN_BREAKING_MASK 0x00400000L
+//GCEA_SDP_ARB_FINAL
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE__SHIFT 0x1c
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE__SHIFT 0x1d
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE__SHIFT 0x1e
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE__SHIFT 0x1f
+#define GCEA_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L
+#define GCEA_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GCEA_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L
+#define GCEA_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L
+#define GCEA_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L
+#define GCEA_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L
+#define GCEA_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_RD_THROTTLE_MASK 0x10000000L
+#define GCEA_SDP_ARB_FINAL__DRAM_WR_THROTTLE_MASK 0x20000000L
+#define GCEA_SDP_ARB_FINAL__GMI_RD_THROTTLE_MASK 0x40000000L
+#define GCEA_SDP_ARB_FINAL__GMI_WR_THROTTLE_MASK 0x80000000L
+//GCEA_SDP_DRAM_PRIORITY
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_IO_PRIORITY
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L
+#define GCEA_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L
+#define GCEA_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L
+//GCEA_SDP_CREDITS
+#define GCEA_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS__SHIFT 0x18
+#define GCEA_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GCEA_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GCEA_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+#define GCEA_SDP_CREDITS__PRB_REQ_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_TAG_RESERVE0
+#define GCEA_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GCEA_SDP_TAG_RESERVE1
+#define GCEA_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GCEA_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GCEA_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GCEA_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GCEA_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GCEA_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GCEA_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GCEA_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GCEA_SDP_VCC_RESERVE0
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GCEA_SDP_VCC_RESERVE1
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_VCD_RESERVE0
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GCEA_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GCEA_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+
+
+// addressBlock: gc_gceadec2
+//GCEA_SDP_VCD_RESERVE1
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GCEA_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GCEA_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GCEA_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GCEA_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GCEA_SDP_REQ_CNTL
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ__SHIFT 0x6
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE__SHIFT 0x8
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC__SHIFT 0xa
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GCEA_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GCEA_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L
+#define GCEA_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_READ_MASK 0x000000C0L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_WRITE_MASK 0x00000300L
+#define GCEA_SDP_REQ_CNTL__REQ_BLOCK_LEVEL_ATOMIC_MASK 0x00000C00L
+//GCEA_MISC
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd
+#define GCEA_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L
+#define GCEA_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L
+#define GCEA_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L
+#define GCEA_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L
+#define GCEA_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L
+#define GCEA_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L
+#define GCEA_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L
+#define GCEA_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L
+#define GCEA_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L
+#define GCEA_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L
+#define GCEA_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L
+#define GCEA_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L
+#define GCEA_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L
+//GCEA_LATENCY_SAMPLING
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L
+#define GCEA_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L
+//GCEA_MAM_CTRL2
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY__SHIFT 0x1
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY__SHIFT 0x2
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT__SHIFT 0x3
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT__SHIFT 0x6
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE__SHIFT 0x9
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE__SHIFT 0xf
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP__SHIFT 0x12
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE__SHIFT 0x13
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE__SHIFT 0x14
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER__SHIFT 0x15
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE__SHIFT 0x16
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE__SHIFT 0x17
+#define GCEA_MAM_CTRL2__RESERVED_FIELD__SHIFT 0x18
+#define GCEA_MAM_CTRL2__ARAM_FLUSH_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL2__DBIT_PF_CLR_ONLY_MASK 0x00000002L
+#define GCEA_MAM_CTRL2__DBIT_PF_RD_ONLY_MASK 0x00000004L
+#define GCEA_MAM_CTRL2__DBIT_TRACK_SEGMENT_MASK 0x00000038L
+#define GCEA_MAM_CTRL2__ARAM_TRACK_SEGMENT_MASK 0x000001C0L
+#define GCEA_MAM_CTRL2__ARAM_FB_TRACK_SIZE_MASK 0x00007E00L
+#define GCEA_MAM_CTRL2__ARAM_RB_ENTRY_SIZE_MASK 0x00038000L
+#define GCEA_MAM_CTRL2__ARAM_OVERRIDE_EA_STRAP_MASK 0x00040000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_ENABLE_MASK 0x00080000L
+#define GCEA_MAM_CTRL2__ABIT_FLUSH_SPACE_OVERRIDE_VALUE_MASK 0x00100000L
+#define GCEA_MAM_CTRL2__ARAM_REMOVE_TRACKER_MASK 0x00200000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_ENABLE_MASK 0x00400000L
+#define GCEA_MAM_CTRL2__FORCE_DBIT_QUERY_DIRTY_VALUE_MASK 0x00800000L
+#define GCEA_MAM_CTRL2__RESERVED_FIELD_MASK 0xFF000000L
+//GCEA_MAM_CTRL
+#define GCEA_MAM_CTRL__MAM_DISABLE__SHIFT 0x0
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE__SHIFT 0x1
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE__SHIFT 0x2
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN__SHIFT 0x3
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM__SHIFT 0x4
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC__SHIFT 0x5
+#define GCEA_MAM_CTRL__FLUSH_TRACKER__SHIFT 0x6
+#define GCEA_MAM_CTRL__CLEAR_TRACKER__SHIFT 0x7
+#define GCEA_MAM_CTRL__SDP_PRIORITY__SHIFT 0x8
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER__SHIFT 0xc
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT__SHIFT 0xd
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER__SHIFT 0xe
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT__SHIFT 0xf
+#define GCEA_MAM_CTRL__RESERVED_FIELD__SHIFT 0x10
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES__SHIFT 0x17
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI__SHIFT 0x1c
+#define GCEA_MAM_CTRL__MAM_DISABLE_MASK 0x00000001L
+#define GCEA_MAM_CTRL__DBIT_COALESCE_DISABLE_MASK 0x00000002L
+#define GCEA_MAM_CTRL__ARAM_COALESCE_DISABLE_MASK 0x00000004L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_SNOOP_EN_MASK 0x00000008L
+#define GCEA_MAM_CTRL__SDMA_UPDT_ARAM_MASK 0x00000010L
+#define GCEA_MAM_CTRL__ARAM_FLUSH_NOALLOC_MASK 0x00000020L
+#define GCEA_MAM_CTRL__FLUSH_TRACKER_MASK 0x00000040L
+#define GCEA_MAM_CTRL__CLEAR_TRACKER_MASK 0x00000080L
+#define GCEA_MAM_CTRL__SDP_PRIORITY_MASK 0x00000F00L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_UPDT_TRACKER_MASK 0x00001000L
+#define GCEA_MAM_CTRL__FORCE_FLUSH_GEN_INTERRUPT_MASK 0x00002000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_UPDT_TRACKER_MASK 0x00004000L
+#define GCEA_MAM_CTRL__TIMER_FLUSH_GEN_INTERRUPT_MASK 0x00008000L
+#define GCEA_MAM_CTRL__RESERVED_FIELD_MASK 0x007F0000L
+#define GCEA_MAM_CTRL__ARAM_NUM_RB_ENTRIES_MASK 0x0F800000L
+#define GCEA_MAM_CTRL__ARAM_RB_ADDR_HI_MASK 0xF0000000L
+//GCEA_EDC_CNT
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT__IOWR_DATAMEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0xC0000000L
+//GCEA_EDC_CNT2
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_DSM_CNTL
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+//GCEA_DSM_CNTLA
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+//GCEA_DSM_CNTLB
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA__SHIFT 0x0
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA__SHIFT 0x3
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x5
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA__SHIFT 0x6
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x8
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA__SHIFT 0x9
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE__SHIFT 0xb
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA__SHIFT 0xc
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE__SHIFT 0xe
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA__SHIFT 0xf
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE__SHIFT 0x11
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA__SHIFT 0x12
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE__SHIFT 0x14
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA__SHIFT 0x15
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE__SHIFT 0x17
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA__SHIFT 0x18
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE__SHIFT 0x1a
+#define GCEA_DSM_CNTLB__MAM_D0MEM_DSM_IRRITATOR_DATA_MASK 0x00000003L
+#define GCEA_DSM_CNTLB__MAM_D0MEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_DSM_IRRITATOR_DATA_MASK 0x00000018L
+#define GCEA_DSM_CNTLB__MAM_D1MEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L
+#define GCEA_DSM_CNTLB__MAM_D2MEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_DSM_IRRITATOR_DATA_MASK 0x00000600L
+#define GCEA_DSM_CNTLB__MAM_D3MEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_DSM_IRRITATOR_DATA_MASK 0x00003000L
+#define GCEA_DSM_CNTLB__MAM_A0MEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_DSM_IRRITATOR_DATA_MASK 0x00018000L
+#define GCEA_DSM_CNTLB__MAM_A1MEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L
+#define GCEA_DSM_CNTLB__MAM_A2MEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_DSM_IRRITATOR_DATA_MASK 0x00600000L
+#define GCEA_DSM_CNTLB__MAM_A3MEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_DSM_IRRITATOR_DATA_MASK 0x03000000L
+#define GCEA_DSM_CNTLB__MAM_AFMEM_ENABLE_SINGLE_WRITE_MASK 0x04000000L
+//GCEA_DSM_CNTL2
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L
+//GCEA_DSM_CNTL2A
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+//GCEA_DSM_CNTL2B
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT__SHIFT 0x0
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY__SHIFT 0x2
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT__SHIFT 0x3
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY__SHIFT 0x5
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT__SHIFT 0x6
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY__SHIFT 0x8
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT__SHIFT 0x9
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY__SHIFT 0xb
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT__SHIFT 0xc
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY__SHIFT 0xe
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT__SHIFT 0xf
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY__SHIFT 0x11
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT__SHIFT 0x12
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY__SHIFT 0x14
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT__SHIFT 0x15
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY__SHIFT 0x17
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT__SHIFT 0x18
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY__SHIFT 0x1a
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_ENABLE_ERROR_INJECT_MASK 0x00000003L
+#define GCEA_DSM_CNTL2B__MAM_D0MEM_SELECT_INJECT_DELAY_MASK 0x00000004L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_ENABLE_ERROR_INJECT_MASK 0x00000018L
+#define GCEA_DSM_CNTL2B__MAM_D1MEM_SELECT_INJECT_DELAY_MASK 0x00000020L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L
+#define GCEA_DSM_CNTL2B__MAM_D2MEM_SELECT_INJECT_DELAY_MASK 0x00000100L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_ENABLE_ERROR_INJECT_MASK 0x00000600L
+#define GCEA_DSM_CNTL2B__MAM_D3MEM_SELECT_INJECT_DELAY_MASK 0x00000800L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_ENABLE_ERROR_INJECT_MASK 0x00003000L
+#define GCEA_DSM_CNTL2B__MAM_A0MEM_SELECT_INJECT_DELAY_MASK 0x00004000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_ENABLE_ERROR_INJECT_MASK 0x00018000L
+#define GCEA_DSM_CNTL2B__MAM_A1MEM_SELECT_INJECT_DELAY_MASK 0x00020000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L
+#define GCEA_DSM_CNTL2B__MAM_A2MEM_SELECT_INJECT_DELAY_MASK 0x00100000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_ENABLE_ERROR_INJECT_MASK 0x00600000L
+#define GCEA_DSM_CNTL2B__MAM_A3MEM_SELECT_INJECT_DELAY_MASK 0x00800000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_ENABLE_ERROR_INJECT_MASK 0x03000000L
+#define GCEA_DSM_CNTL2B__MAM_AFMEM_SELECT_INJECT_DELAY_MASK 0x04000000L
+//GCEA_GL2C_XBR_CREDITS
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT__SHIFT 0x0
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE__SHIFT 0x6
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT__SHIFT 0x8
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE__SHIFT 0xe
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT__SHIFT 0x10
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE__SHIFT 0x16
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT__SHIFT 0x18
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE__SHIFT 0x1e
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_LIMIT_MASK 0x0000003FL
+#define GCEA_GL2C_XBR_CREDITS__DRAM_RD_RESERVE_MASK 0x000000C0L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_LIMIT_MASK 0x00003F00L
+#define GCEA_GL2C_XBR_CREDITS__IO_RD_RESERVE_MASK 0x0000C000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_LIMIT_MASK 0x003F0000L
+#define GCEA_GL2C_XBR_CREDITS__DRAM_WR_RESERVE_MASK 0x00C00000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_LIMIT_MASK 0x3F000000L
+#define GCEA_GL2C_XBR_CREDITS__IO_WR_RESERVE_MASK 0xC0000000L
+//GCEA_GL2C_XBR_MAXBURST
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD__SHIFT 0x0
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD__SHIFT 0x4
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR__SHIFT 0x8
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR__SHIFT 0xc
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER__SHIFT 0x10
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY__SHIFT 0x13
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER__SHIFT 0x14
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY__SHIFT 0x17
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_MASK 0x0000000FL
+#define GCEA_GL2C_XBR_MAXBURST__IO_RD_MASK 0x000000F0L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_MASK 0x00000F00L
+#define GCEA_GL2C_XBR_MAXBURST__IO_WR_MASK 0x0000F000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_FLUSH_TIMER_MASK 0x00070000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_RD_COMB_SAME64B_ONLY_MASK 0x00080000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_FLUSH_TIMER_MASK 0x00700000L
+#define GCEA_GL2C_XBR_MAXBURST__DRAM_WR_COMB_SAME64B_ONLY_MASK 0x00800000L
+//GCEA_PROBE_CNTL
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY__SHIFT 0x0
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE__SHIFT 0x5
+#define GCEA_PROBE_CNTL__REQ2RSP_DELAY_MASK 0x0000001FL
+#define GCEA_PROBE_CNTL__PRB_FILTER_DISABLE_MASK 0x00000020L
+//GCEA_PROBE_MAP
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C__SHIFT 0x0
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C__SHIFT 0x1
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C__SHIFT 0x2
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C__SHIFT 0x3
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C__SHIFT 0x4
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C__SHIFT 0x5
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C__SHIFT 0x6
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C__SHIFT 0x7
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C__SHIFT 0x8
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C__SHIFT 0x9
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C__SHIFT 0xa
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C__SHIFT 0xb
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C__SHIFT 0xc
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C__SHIFT 0xd
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C__SHIFT 0xe
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C__SHIFT 0xf
+#define GCEA_PROBE_MAP__INTLV_SIZE__SHIFT 0x10
+#define GCEA_PROBE_MAP__CHADDR0_TO_RIGHTGL2C_MASK 0x00000001L
+#define GCEA_PROBE_MAP__CHADDR1_TO_RIGHTGL2C_MASK 0x00000002L
+#define GCEA_PROBE_MAP__CHADDR2_TO_RIGHTGL2C_MASK 0x00000004L
+#define GCEA_PROBE_MAP__CHADDR3_TO_RIGHTGL2C_MASK 0x00000008L
+#define GCEA_PROBE_MAP__CHADDR4_TO_RIGHTGL2C_MASK 0x00000010L
+#define GCEA_PROBE_MAP__CHADDR5_TO_RIGHTGL2C_MASK 0x00000020L
+#define GCEA_PROBE_MAP__CHADDR6_TO_RIGHTGL2C_MASK 0x00000040L
+#define GCEA_PROBE_MAP__CHADDR7_TO_RIGHTGL2C_MASK 0x00000080L
+#define GCEA_PROBE_MAP__CHADDR8_TO_RIGHTGL2C_MASK 0x00000100L
+#define GCEA_PROBE_MAP__CHADDR9_TO_RIGHTGL2C_MASK 0x00000200L
+#define GCEA_PROBE_MAP__CHADDR10_TO_RIGHTGL2C_MASK 0x00000400L
+#define GCEA_PROBE_MAP__CHADDR11_TO_RIGHTGL2C_MASK 0x00000800L
+#define GCEA_PROBE_MAP__CHADDR12_TO_RIGHTGL2C_MASK 0x00001000L
+#define GCEA_PROBE_MAP__CHADDR13_TO_RIGHTGL2C_MASK 0x00002000L
+#define GCEA_PROBE_MAP__CHADDR14_TO_RIGHTGL2C_MASK 0x00004000L
+#define GCEA_PROBE_MAP__CHADDR15_TO_RIGHTGL2C_MASK 0x00008000L
+#define GCEA_PROBE_MAP__INTLV_SIZE_MASK 0x00030000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED__SHIFT 0xe
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL__SHIFT 0xf
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL__SHIFT 0x10
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT__SHIFT 0x11
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+#define GCEA_ERR_STATUS__IGNORE_RDRSP_FED_MASK 0x00004000L
+#define GCEA_ERR_STATUS__INTERRUPT_ON_FATAL_MASK 0x00008000L
+#define GCEA_ERR_STATUS__INTERRUPT_IGNORE_CLI_FATAL_MASK 0x00010000L
+#define GCEA_ERR_STATUS__LEVEL_INTERRUPT_MASK 0x00020000L
+//GCEA_MISC2
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc
+#define GCEA_MISC2__BLOCK_REQUESTS__SHIFT 0xd
+#define GCEA_MISC2__REQUESTS_BLOCKED__SHIFT 0xe
+#define GCEA_MISC2__FGCLKEN_OVERRIDE__SHIFT 0xf
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK__SHIFT 0x10
+#define GCEA_MISC2__RDRET_FED_MASK__SHIFT 0x11
+#define GCEA_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L
+#define GCEA_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL
+#define GCEA_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L
+#define GCEA_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L
+#define GCEA_MISC2__BLOCK_REQUESTS_MASK 0x00002000L
+#define GCEA_MISC2__REQUESTS_BLOCKED_MASK 0x00004000L
+#define GCEA_MISC2__FGCLKEN_OVERRIDE_MASK 0x00008000L
+#define GCEA_MISC2__LINKMGR_CRBUSY_MASK_MASK 0x00010000L
+#define GCEA_MISC2__RDRET_FED_MASK_MASK 0x00020000L
+
+
+// addressBlock: gc_gceadec3
+//GCEA_SDP_BACKDOOR_CMDCREDITS0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_CMDCREDITS1
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_CMDCREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED__SHIFT 0x7
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED__SHIFT 0xe
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED__SHIFT 0x15
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED__SHIFT 0x1c
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC0_CREDITS_RECEIVED_MASK 0x0000007FL
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC1_CREDITS_RECEIVED_MASK 0x00003F80L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC2_CREDITS_RECEIVED_MASK 0x001FC000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC3_CREDITS_RECEIVED_MASK 0x0FE00000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS0__VC4_CREDITS_RECEIVED_MASK 0xF0000000L
+//GCEA_SDP_BACKDOOR_DATACREDITS1
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED__SHIFT 0x3
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED__SHIFT 0xa
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED__SHIFT 0x11
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED__SHIFT 0x18
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC4_CREDITS_RECEIVED_MASK 0x00000007L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC5_CREDITS_RECEIVED_MASK 0x000003F8L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC6_CREDITS_RECEIVED_MASK 0x0001FC00L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__VC7_CREDITS_RECEIVED_MASK 0x00FE0000L
+#define GCEA_SDP_BACKDOOR_DATACREDITS1__POOL_CREDITS_RECEIVED_MASK 0x7F000000L
+//GCEA_SDP_BACKDOOR_MISCCREDITS
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED__SHIFT 0x0
+#define GCEA_SDP_BACKDOOR_MISCCREDITS__PRB_RSP_CREDITS_RECEIVED_MASK 0x0000007FL
+//GCEA_RRET_MEM_RESERVE
+#define GCEA_RRET_MEM_RESERVE__VC0__SHIFT 0x0
+#define GCEA_RRET_MEM_RESERVE__VC1__SHIFT 0x4
+#define GCEA_RRET_MEM_RESERVE__VC2__SHIFT 0x8
+#define GCEA_RRET_MEM_RESERVE__VC3__SHIFT 0xc
+#define GCEA_RRET_MEM_RESERVE__VC4__SHIFT 0x10
+#define GCEA_RRET_MEM_RESERVE__VC5__SHIFT 0x14
+#define GCEA_RRET_MEM_RESERVE__VC6__SHIFT 0x18
+#define GCEA_RRET_MEM_RESERVE__VC7__SHIFT 0x1c
+#define GCEA_RRET_MEM_RESERVE__VC0_MASK 0x0000000FL
+#define GCEA_RRET_MEM_RESERVE__VC1_MASK 0x000000F0L
+#define GCEA_RRET_MEM_RESERVE__VC2_MASK 0x00000F00L
+#define GCEA_RRET_MEM_RESERVE__VC3_MASK 0x0000F000L
+#define GCEA_RRET_MEM_RESERVE__VC4_MASK 0x000F0000L
+#define GCEA_RRET_MEM_RESERVE__VC5_MASK 0x00F00000L
+#define GCEA_RRET_MEM_RESERVE__VC6_MASK 0x0F000000L
+#define GCEA_RRET_MEM_RESERVE__VC7_MASK 0xF0000000L
+//GCEA_EDC_CNT3
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0x8
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xa
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0xc
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0xe
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x10
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x12
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x14
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x16
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x18
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1a
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT__SHIFT 0x1c
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0x1e
+#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L
+#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL
+#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L
+#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L
+#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000300L
+#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00000C00L
+#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00003000L
+#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x0000C000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00030000L
+#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x000C0000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x00300000L
+#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x00C00000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x03000000L
+#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0x0C000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_SEC_COUNT_MASK 0x30000000L
+#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_SDP_ENABLE
+#define GCEA_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST__SHIFT 0x1
+#define GCEA_SDP_ENABLE__ENABLE_MASK 0x00000001L
+#define GCEA_SDP_ENABLE__EARLY_CREDIT_REQUEST_MASK 0x00000002L
+
+
+// addressBlock: gc_spipdec2
+//SPI_PQEV_CTRL
+#define SPI_PQEV_CTRL__SCAN_PERIOD__SHIFT 0x0
+#define SPI_PQEV_CTRL__QUEUE_DURATION__SHIFT 0xa
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN__SHIFT 0x10
+#define SPI_PQEV_CTRL__SCAN_PERIOD_MASK 0x000003FFL
+#define SPI_PQEV_CTRL__QUEUE_DURATION_MASK 0x0000FC00L
+#define SPI_PQEV_CTRL__COMPUTE_PIPE_EN_MASK 0x00FF0000L
+//SPI_EXP_THROTTLE_CTRL
+#define SPI_EXP_THROTTLE_CTRL__ENABLE__SHIFT 0x0
+#define SPI_EXP_THROTTLE_CTRL__PERIOD__SHIFT 0x1
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP__SHIFT 0x5
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP__SHIFT 0x9
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT__SHIFT 0xd
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT__SHIFT 0x10
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD__SHIFT 0x13
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT__SHIFT 0x1a
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET__SHIFT 0x1d
+#define SPI_EXP_THROTTLE_CTRL__ENABLE_MASK 0x00000001L
+#define SPI_EXP_THROTTLE_CTRL__PERIOD_MASK 0x0000001EL
+#define SPI_EXP_THROTTLE_CTRL__UPSTEP_MASK 0x000001E0L
+#define SPI_EXP_THROTTLE_CTRL__DOWNSTEP_MASK 0x00001E00L
+#define SPI_EXP_THROTTLE_CTRL__LOW_STALL_MON_HIST_COUNT_MASK 0x0000E000L
+#define SPI_EXP_THROTTLE_CTRL__HIGH_STALL_MON_HIST_COUNT_MASK 0x00070000L
+#define SPI_EXP_THROTTLE_CTRL__EXP_STALL_THRESHOLD_MASK 0x03F80000L
+#define SPI_EXP_THROTTLE_CTRL__SKEW_COUNT_MASK 0x1C000000L
+#define SPI_EXP_THROTTLE_CTRL__THROTTLE_RESET_MASK 0x20000000L
+
+
+// addressBlock: gc_rmi_rmidec
+//RMI_GENERAL_CNTL
+#define RMI_GENERAL_CNTL__BURST_DISABLE__SHIFT 0x0
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE__SHIFT 0x1
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN__SHIFT 0x13
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE__SHIFT 0x15
+#define RMI_GENERAL_CNTL__BURST_DISABLE_MASK 0x00000001L
+#define RMI_GENERAL_CNTL__VMID_BYPASS_ENABLE_MASK 0x0001FFFEL
+#define RMI_GENERAL_CNTL__RB0_HARVEST_EN_MASK 0x00080000L
+#define RMI_GENERAL_CNTL__LOOPBACK_DIS_BY_REQ_TYPE_MASK 0x01E00000L
+//RMI_GENERAL_CNTL1
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE__SHIFT 0x0
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE__SHIFT 0x4
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE__SHIFT 0x6
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK__SHIFT 0x8
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE__SHIFT 0x9
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE__SHIFT 0xb
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE__SHIFT 0xe
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE__SHIFT 0xf
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS__SHIFT 0x10
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS__SHIFT 0x16
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_ENABLE_PER_MTYPE_MASK 0x0000000FL
+#define RMI_GENERAL_CNTL1__TCIW0_64B_RD_STALL_MODE_MASK 0x00000030L
+#define RMI_GENERAL_CNTL1__TCIW1_64B_RD_STALL_MODE_MASK 0x000000C0L
+#define RMI_GENERAL_CNTL1__EARLY_WRACK_DISABLE_FOR_LOOPBACK_MASK 0x00000100L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_VALUE_MASK 0x00000600L
+#define RMI_GENERAL_CNTL1__POLICY_OVERRIDE_MASK 0x00000800L
+#define RMI_GENERAL_CNTL1__ARBITER_ADDRESS_CHANGE_ENABLE_MASK 0x00004000L
+#define RMI_GENERAL_CNTL1__LAST_OF_BURST_INSERTION_DISABLE_MASK 0x00008000L
+#define RMI_GENERAL_CNTL1__TCIW0_PRODUCER_CREDITS_MASK 0x003F0000L
+#define RMI_GENERAL_CNTL1__TCIW1_PRODUCER_CREDITS_MASK 0x0FC00000L
+//RMI_GENERAL_STATUS
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED__SHIFT 0x0
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR__SHIFT 0x1
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR__SHIFT 0x2
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR__SHIFT 0x3
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR__SHIFT 0x4
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY__SHIFT 0x5
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6__SHIFT 0x6
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY__SHIFT 0x7
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY__SHIFT 0x8
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY__SHIFT 0x9
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY__SHIFT 0xa
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xb
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY__SHIFT 0xc
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY__SHIFT 0xd
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY__SHIFT 0xe
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY__SHIFT 0xf
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18__SHIFT 0x12
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19__SHIFT 0x13
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20__SHIFT 0x14
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21__SHIFT 0x15
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29__SHIFT 0x1d
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30__SHIFT 0x1e
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR__SHIFT 0x1f
+#define RMI_GENERAL_STATUS__GENERAL_RMI_ERRORS_COMBINED_MASK 0x00000001L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_OVERFLOW_ERROR_MASK 0x00000002L
+#define RMI_GENERAL_STATUS__SKID_FIFO_0_UNDERFLOW_ERROR_MASK 0x00000004L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_OVERFLOW_ERROR_MASK 0x00000008L
+#define RMI_GENERAL_STATUS__SKID_FIFO_1_UNDERFLOW_ERROR_MASK 0x00000010L
+#define RMI_GENERAL_STATUS__RMI_XBAR_BUSY_MASK 0x00000020L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_6_MASK 0x00000040L
+#define RMI_GENERAL_STATUS__RMI_SCOREBOARD_BUSY_MASK 0x00000080L
+#define RMI_GENERAL_STATUS__TCIW0_PRT_FIFO_BUSY_MASK 0x00000100L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR0_BUSY_MASK 0x00000200L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR0_BUSY_MASK 0x00000400L
+#define RMI_GENERAL_STATUS__WRREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00000800L
+#define RMI_GENERAL_STATUS__RDREQ_CONSUMER_FIFO_0_BUSY_MASK 0x00001000L
+#define RMI_GENERAL_STATUS__TCIW1_PRT_FIFO_BUSY_MASK 0x00002000L
+#define RMI_GENERAL_STATUS__TCIW_FRMTR1_BUSY_MASK 0x00004000L
+#define RMI_GENERAL_STATUS__TCIW_RTN_FRMTR1_BUSY_MASK 0x00008000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_18_MASK 0x00040000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_19_MASK 0x00080000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_20_MASK 0x00100000L
+#define RMI_GENERAL_STATUS__RESERVED_BITS_28_21_MASK 0x1FE00000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_29_MASK 0x20000000L
+#define RMI_GENERAL_STATUS__RESERVED_BIT_30_MASK 0x40000000L
+#define RMI_GENERAL_STATUS__SKID_FIFO_FREESPACE_IS_ZERO_ERROR_MASK 0x80000000L
+//RMI_SUBBLOCK_STATUS0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0__SHIFT 0x7
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0__SHIFT 0x8
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1__SHIFT 0x10
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1__SHIFT 0x11
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT__SHIFT 0x12
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE0_MASK 0x0000007FL
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE0_MASK 0x00000080L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE0_MASK 0x00000100L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_NUM_USED_PROBE1_MASK 0x0000FE00L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_FULL_PROBE1_MASK 0x00010000L
+#define RMI_SUBBLOCK_STATUS0__UTC_EXT_LAT_HID_FIFO_EMPTY_PROBE1_MASK 0x00020000L
+#define RMI_SUBBLOCK_STATUS0__TCIW0_INFLIGHT_CNT_MASK 0x0FFC0000L
+//RMI_SUBBLOCK_STATUS1
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT__SHIFT 0x14
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_0_FREE_SPACE_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS1__SKID_FIFO_1_FREE_SPACE_MASK 0x000FFC00L
+#define RMI_SUBBLOCK_STATUS1__TCIW1_INFLIGHT_CNT_MASK 0x3FF00000L
+//RMI_SUBBLOCK_STATUS2
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED__SHIFT 0x9
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_0_NUM_USED_MASK 0x000001FFL
+#define RMI_SUBBLOCK_STATUS2__PRT_FIFO_1_NUM_USED_MASK 0x0003FE00L
+//RMI_SUBBLOCK_STATUS3
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL__SHIFT 0x0
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL__SHIFT 0xa
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_0_FREE_SPACE_TOTAL_MASK 0x000003FFL
+#define RMI_SUBBLOCK_STATUS3__SKID_FIFO_1_FREE_SPACE_TOTAL_MASK 0x000FFC00L
+//RMI_XBAR_CONFIG
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE__SHIFT 0x0
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE__SHIFT 0x2
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_CONFIG__ARBITER_DIS__SHIFT 0x7
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ__SHIFT 0x8
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE__SHIFT 0xc
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0__SHIFT 0xd
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_OVERRIDE_MASK 0x00000003L
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_REQ_TYPE_OVERRIDE_MASK 0x0000003CL
+#define RMI_XBAR_CONFIG__XBAR_MUX_CONFIG_CB_DB_OVERRIDE_MASK 0x00000040L
+#define RMI_XBAR_CONFIG__ARBITER_DIS_MASK 0x00000080L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_MASK 0x00000F00L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_REQ_OVERRIDE_MASK 0x00001000L
+#define RMI_XBAR_CONFIG__XBAR_EN_IN_RB0_MASK 0x00002000L
+//RMI_PROBE_POP_LOGIC_CNTL
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH__SHIFT 0x0
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS__SHIFT 0x7
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2__SHIFT 0x8
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH__SHIFT 0xa
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS__SHIFT 0x11
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_0_MAX_DEPTH_MASK 0x0000007FL
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE0_DIS_MASK 0x00000080L
+#define RMI_PROBE_POP_LOGIC_CNTL__REDUCE_MAX_XLAT_CHAIN_SIZE_BY_2_MASK 0x00000300L
+#define RMI_PROBE_POP_LOGIC_CNTL__EXT_LAT_FIFO_1_MAX_DEPTH_MASK 0x0001FC00L
+#define RMI_PROBE_POP_LOGIC_CNTL__XLAT_COMBINE1_DIS_MASK 0x00020000L
+//RMI_UTC_XNACK_N_MISC_CNTL
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC__SHIFT 0x0
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE__SHIFT 0xc
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE__SHIFT 0xd
+#define RMI_UTC_XNACK_N_MISC_CNTL__MASTER_XNACK_TIMER_INC_MASK 0x000000FFL
+#define RMI_UTC_XNACK_N_MISC_CNTL__IND_XNACK_TIMER_START_VALUE_MASK 0x00000F00L
+#define RMI_UTC_XNACK_N_MISC_CNTL__UTCL1_PERM_MODE_MASK 0x00001000L
+#define RMI_UTC_XNACK_N_MISC_CNTL__CP_VMID_RESET_REQUEST_DISABLE_MASK 0x00002000L
+//RMI_DEMUX_CNTL
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN__SHIFT 0x2
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x6
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE__SHIFT 0xe
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN__SHIFT 0x12
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x16
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE__SHIFT 0x1e
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_OVERRIDE_EN_MASK 0x00000004L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_STALL_TIMER_START_VALUE_MASK 0x00003FC0L
+#define RMI_DEMUX_CNTL__DEMUX_ARB0_MODE_MASK 0x0000C000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_OVERRIDE_EN_MASK 0x00040000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_STALL_TIMER_START_VALUE_MASK 0x3FC00000L
+#define RMI_DEMUX_CNTL__DEMUX_ARB1_MODE_MASK 0xC0000000L
+//RMI_UTCL1_CNTL1
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP__SHIFT 0x0
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF__SHIFT 0x1
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE__SHIFT 0x2
+#define RMI_UTCL1_CNTL1__RESP_MODE__SHIFT 0x3
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE__SHIFT 0x5
+#define RMI_UTCL1_CNTL1__CLIENTID__SHIFT 0x7
+#define RMI_UTCL1_CNTL1__USERVM_DIS__SHIFT 0x10
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO__SHIFT 0x11
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB__SHIFT 0x12
+#define RMI_UTCL1_CNTL1__REG_INV_VMID__SHIFT 0x13
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID__SHIFT 0x17
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE__SHIFT 0x18
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID__SHIFT 0x19
+#define RMI_UTCL1_CNTL1__FORCE_MISS__SHIFT 0x1a
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER__SHIFT 0x1b
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2__SHIFT 0x1c
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2__SHIFT 0x1e
+#define RMI_UTCL1_CNTL1__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define RMI_UTCL1_CNTL1__GPUVM_64K_DEF_MASK 0x00000002L
+#define RMI_UTCL1_CNTL1__GPUVM_PERM_MODE_MASK 0x00000004L
+#define RMI_UTCL1_CNTL1__RESP_MODE_MASK 0x00000018L
+#define RMI_UTCL1_CNTL1__RESP_FAULT_MODE_MASK 0x00000060L
+#define RMI_UTCL1_CNTL1__CLIENTID_MASK 0x0000FF80L
+#define RMI_UTCL1_CNTL1__USERVM_DIS_MASK 0x00010000L
+#define RMI_UTCL1_CNTL1__ENABLE_PUSH_LFIFO_MASK 0x00020000L
+#define RMI_UTCL1_CNTL1__ENABLE_LFIFO_PRI_ARB_MASK 0x00040000L
+#define RMI_UTCL1_CNTL1__REG_INV_VMID_MASK 0x00780000L
+#define RMI_UTCL1_CNTL1__REG_INV_ALL_VMID_MASK 0x00800000L
+#define RMI_UTCL1_CNTL1__REG_INV_TOGGLE_MASK 0x01000000L
+#define RMI_UTCL1_CNTL1__CLIENT_INVALIDATE_ALL_VMID_MASK 0x02000000L
+#define RMI_UTCL1_CNTL1__FORCE_MISS_MASK 0x04000000L
+#define RMI_UTCL1_CNTL1__FORCE_IN_ORDER_MASK 0x08000000L
+#define RMI_UTCL1_CNTL1__REDUCE_FIFO_DEPTH_BY_2_MASK 0x30000000L
+#define RMI_UTCL1_CNTL1__REDUCE_CACHE_SIZE_BY_2_MASK 0xC0000000L
+//RMI_UTCL1_CNTL2
+#define RMI_UTCL1_CNTL2__UTC_SPARE__SHIFT 0x0
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define RMI_UTCL1_CNTL2__LINE_VALID__SHIFT 0xa
+#define RMI_UTCL1_CNTL2__DIS_EDC__SHIFT 0xb
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE__SHIFT 0xc
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT__SHIFT 0xd
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK__SHIFT 0xf
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE__SHIFT 0x10
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR__SHIFT 0x12
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR__SHIFT 0x13
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID__SHIFT 0x14
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID__SHIFT 0x15
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ__SHIFT 0x19
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD__SHIFT 0x1b
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT__SHIFT 0x1c
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT__SHIFT 0x1d
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define RMI_UTCL1_CNTL2__RESERVED__SHIFT 0x1f
+#define RMI_UTCL1_CNTL2__UTC_SPARE_MASK 0x000000FFL
+#define RMI_UTCL1_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define RMI_UTCL1_CNTL2__LINE_VALID_MASK 0x00000400L
+#define RMI_UTCL1_CNTL2__DIS_EDC_MASK 0x00000800L
+#define RMI_UTCL1_CNTL2__GPUVM_INV_MODE_MASK 0x00001000L
+#define RMI_UTCL1_CNTL2__SHOOTDOWN_OPT_MASK 0x00002000L
+#define RMI_UTCL1_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define RMI_UTCL1_CNTL2__FORCE_GPUVM_INV_ACK_MASK 0x00008000L
+#define RMI_UTCL1_CNTL2__UTCL1_ARB_BURST_MODE_MASK 0x00030000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_RD_WR_MASK 0x00040000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_RD_WR_MASK 0x00080000L
+#define RMI_UTCL1_CNTL2__UTCL1_ENABLE_PERF_EVENT_VMID_MASK 0x00100000L
+#define RMI_UTCL1_CNTL2__UTCL1_PERF_EVENT_VMID_MASK 0x01E00000L
+#define RMI_UTCL1_CNTL2__UTCL1_DIS_DUAL_L2_REQ_MASK 0x02000000L
+#define RMI_UTCL1_CNTL2__UTCL1_FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define RMI_UTCL1_CNTL2__PERM_MODE_OVRD_MASK 0x08000000L
+#define RMI_UTCL1_CNTL2__LINE_INVALIDATE_OPT_MASK 0x10000000L
+#define RMI_UTCL1_CNTL2__GPUVM_16K_DEFAULT_MASK 0x20000000L
+#define RMI_UTCL1_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define RMI_UTCL1_CNTL2__RESERVED_MASK 0x80000000L
+//RMI_UTC_UNIT_CONFIG
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN__SHIFT 0x0
+#define RMI_UTC_UNIT_CONFIG__TMZ_REQ_EN_MASK 0x0000FFFFL
+//RMI_TCIW_FORMATTER0_CNTL
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER0_CNTL__TCIW0_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER0_CNTL__RMI_IN0_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER0_CNTL__ALL_FAULT_RET0_DATA_MASK 0x80000000L
+//RMI_TCIW_FORMATTER1_CNTL
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE__SHIFT 0x0
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW__SHIFT 0x1
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ__SHIFT 0x9
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS__SHIFT 0x1d
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST__SHIFT 0x1e
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA__SHIFT 0x1f
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_OVERRIDE_MASK 0x00000001L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_TIME_OUT_WINDOW_MASK 0x000001FEL
+#define RMI_TCIW_FORMATTER1_CNTL__TCIW1_MAX_ALLOWED_INFLIGHT_REQ_MASK 0x0007FE00L
+#define RMI_TCIW_FORMATTER1_CNTL__RMI_IN1_REORDER_DIS_MASK 0x20000000L
+#define RMI_TCIW_FORMATTER1_CNTL__WR_COMBINE1_DIS_AT_LAST_OF_BURST_MASK 0x40000000L
+#define RMI_TCIW_FORMATTER1_CNTL__ALL_FAULT_RET1_DATA_MASK 0x80000000L
+//RMI_SCOREBOARD_CNTL
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH__SHIFT 0x0
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0__SHIFT 0x1
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH__SHIFT 0x2
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1__SHIFT 0x3
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE__SHIFT 0x6
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE__SHIFT 0x9
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB0_FLUSH_MASK 0x00000001L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB0_MASK 0x00000002L
+#define RMI_SCOREBOARD_CNTL__COMPLETE_RB1_FLUSH_MASK 0x00000004L
+#define RMI_SCOREBOARD_CNTL__REQ_IN_RE_EN_AFTER_FLUSH_RB1_MASK 0x00000008L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_SCOREBOARD_CNTL__VMID_INVAL_FLUSH_TYPE_OVERRIDE_VALUE_MASK 0x00000040L
+#define RMI_SCOREBOARD_CNTL__FORCE_VMID_INVAL_DONE_TIMER_START_VALUE_MASK 0x001FFE00L
+//RMI_SCOREBOARD_STATUS0
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG__SHIFT 0x1
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID__SHIFT 0x2
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE__SHIFT 0x12
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE__SHIFT 0x13
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE__SHIFT 0x14
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE__SHIFT 0x15
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT__SHIFT 0x16
+#define RMI_SCOREBOARD_STATUS0__CURRENT_SESSION_ID_MASK 0x00000001L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_IN_PROG_MASK 0x00000002L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_REQ_VMID_MASK 0x0003FFFCL
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_UTC_DONE_MASK 0x00040000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_DONE_MASK 0x00080000L
+#define RMI_SCOREBOARD_STATUS0__CP_VMID_INV_FLUSH_TYPE_MASK 0x00100000L
+#define RMI_SCOREBOARD_STATUS0__FORCE_VMID_INV_DONE_MASK 0x00200000L
+#define RMI_SCOREBOARD_STATUS0__COUNTER_SELECT_MASK 0x07C00000L
+//RMI_SCOREBOARD_STATUS1
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED__SHIFT 0xe
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1__SHIFT 0xf
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB0_MASK 0x00002000L
+#define RMI_SCOREBOARD_STATUS1__MULTI_VMID_INVAL_FROM_CP_DETECTED_MASK 0x00004000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_RB1_MASK 0x07FF8000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_UNDERFLOW_RB1_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS1__RUNNING_CNT_OVERFLOW_RB1_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS1__COM_FLUSH_IN_PROG_RB0_MASK 0x40000000L
+//RMI_SCOREBOARD_STATUS2
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0__SHIFT 0x0
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0__SHIFT 0xc
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1__SHIFT 0xd
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1__SHIFT 0x19
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1__SHIFT 0x1a
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0__SHIFT 0x1b
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0__SHIFT 0x1c
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1__SHIFT 0x1d
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0__SHIFT 0x1e
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1__SHIFT 0x1f
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB0_MASK 0x00000FFFL
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB0_MASK 0x00001000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_RB1_MASK 0x01FFE000L
+#define RMI_SCOREBOARD_STATUS2__SNAPSHOT_CNT_UNDERFLOW_RB1_MASK 0x02000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB1_MASK 0x04000000L
+#define RMI_SCOREBOARD_STATUS2__COM_FLUSH_DONE_RB0_MASK 0x08000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB0_MASK 0x10000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_IN_PROG_RB1_MASK 0x20000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB0_MASK 0x40000000L
+#define RMI_SCOREBOARD_STATUS2__TIME_STAMP_FLUSH_DONE_RB1_MASK 0x80000000L
+//RMI_XBAR_ARBITER_CONFIG
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x2
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL__SHIFT 0x3
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN__SHIFT 0x4
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN__SHIFT 0x5
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE__SHIFT 0x6
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE__SHIFT 0x10
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR__SHIFT 0x12
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL__SHIFT 0x13
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN__SHIFT 0x14
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN__SHIFT 0x15
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE__SHIFT 0x16
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE__SHIFT 0x18
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_MASK 0x00000003L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00000004L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_MASK 0x00000008L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_BREAK_LOB_ON_IDLEIN_MASK 0x00000010L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_MODE_OVERRIDE_EN_MASK 0x00000020L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_OVERRIDE_MASK 0x000000C0L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB0_STALL_TIMER_START_VALUE_MASK 0x0000FF00L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_MASK 0x00030000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_WEIGHTEDRR_MASK 0x00040000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_MASK 0x00080000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_BREAK_LOB_ON_IDLEIN_MASK 0x00100000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_MODE_OVERRIDE_EN_MASK 0x00200000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_OVERRIDE_MASK 0x00C00000L
+#define RMI_XBAR_ARBITER_CONFIG__XBAR_ARB1_STALL_TIMER_START_VALUE_MASK 0xFF000000L
+//RMI_XBAR_ARBITER_CONFIG_1
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD__SHIFT 0x0
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR__SHIFT 0x8
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_RD_MASK 0x000000FFL
+#define RMI_XBAR_ARBITER_CONFIG_1__XBAR_ARB_ROUND_ROBIN_WEIGHT_RB0_WR_MASK 0x0000FF00L
+//RMI_CLOCK_CNTRL
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK__SHIFT 0x0
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK__SHIFT 0x5
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK__SHIFT 0xa
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK__SHIFT 0xf
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_BUSY_MASK_MASK 0x0000001FL
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_BUSY_MASK_MASK 0x000003E0L
+#define RMI_CLOCK_CNTRL__DYN_CLK_RB0_WAKEUP_MASK_MASK 0x00007C00L
+#define RMI_CLOCK_CNTRL__DYN_CLK_CMN_WAKEUP_MASK_MASK 0x000F8000L
+//RMI_UTCL1_STATUS
+#define RMI_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RMI_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RMI_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RMI_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RMI_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RMI_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+//RMI_RB_GLX_CID_MAP
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP__SHIFT 0x0
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP__SHIFT 0x4
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP__SHIFT 0x8
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP__SHIFT 0xc
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP__SHIFT 0x10
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP__SHIFT 0x14
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP__SHIFT 0x18
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP__SHIFT 0x1c
+#define RMI_RB_GLX_CID_MAP__CB_COLOR_MAP_MASK 0x0000000FL
+#define RMI_RB_GLX_CID_MAP__CB_FMASK_MAP_MASK 0x000000F0L
+#define RMI_RB_GLX_CID_MAP__CB_CMASK_MAP_MASK 0x00000F00L
+#define RMI_RB_GLX_CID_MAP__CB_DCC_MAP_MASK 0x0000F000L
+#define RMI_RB_GLX_CID_MAP__DB_Z_MAP_MASK 0x000F0000L
+#define RMI_RB_GLX_CID_MAP__DB_S_MAP_MASK 0x00F00000L
+#define RMI_RB_GLX_CID_MAP__DB_TILE_MAP_MASK 0x0F000000L
+#define RMI_RB_GLX_CID_MAP__DB_ZPCPSD_MAP_MASK 0xF0000000L
+//RMI_XNACK_DEBUG
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID__SHIFT 0x0
+#define RMI_XNACK_DEBUG__XNACK_PER_VMID_MASK 0x0000FFFFL
+//RMI_SPARE
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE__SHIFT 0x1
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE__SHIFT 0x2
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE__SHIFT 0x3
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS__SHIFT 0x4
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS__SHIFT 0x5
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE__SHIFT 0x6
+#define RMI_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define RMI_SPARE__NOFILL_RMI_CID_CC__SHIFT 0x8
+#define RMI_SPARE__NOFILL_RMI_CID_FC__SHIFT 0x9
+#define RMI_SPARE__NOFILL_RMI_CID_CM__SHIFT 0xa
+#define RMI_SPARE__NOFILL_RMI_CID_DC__SHIFT 0xb
+#define RMI_SPARE__NOFILL_RMI_CID_Z__SHIFT 0xc
+#define RMI_SPARE__NOFILL_RMI_CID_S__SHIFT 0xd
+#define RMI_SPARE__NOFILL_RMI_CID_TILE__SHIFT 0xe
+#define RMI_SPARE__SPARE_BIT_15_0__SHIFT 0xf
+#define RMI_SPARE__ARBITER_ADDRESS_MASK__SHIFT 0x10
+#define RMI_SPARE__RMI_2_GL1_128B_READ_DISABLE_MASK 0x00000002L
+#define RMI_SPARE__RMI_2_GL1_REPEATER_FGCG_DISABLE_MASK 0x00000004L
+#define RMI_SPARE__RMI_2_RB_REPEATER_FGCG_DISABLE_MASK 0x00000008L
+#define RMI_SPARE__EARLY_WRITE_ACK_ENABLE_C_RW_NOA_RESOLVE_DIS_MASK 0x00000010L
+#define RMI_SPARE__RMI_REORDER_BYPASS_CHANNEL_DIS_MASK 0x00000020L
+#define RMI_SPARE__XNACK_RETURN_DATA_OVERRIDE_MASK 0x00000040L
+#define RMI_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define RMI_SPARE__NOFILL_RMI_CID_CC_MASK 0x00000100L
+#define RMI_SPARE__NOFILL_RMI_CID_FC_MASK 0x00000200L
+#define RMI_SPARE__NOFILL_RMI_CID_CM_MASK 0x00000400L
+#define RMI_SPARE__NOFILL_RMI_CID_DC_MASK 0x00000800L
+#define RMI_SPARE__NOFILL_RMI_CID_Z_MASK 0x00001000L
+#define RMI_SPARE__NOFILL_RMI_CID_S_MASK 0x00002000L
+#define RMI_SPARE__NOFILL_RMI_CID_TILE_MASK 0x00004000L
+#define RMI_SPARE__SPARE_BIT_15_0_MASK 0x00008000L
+#define RMI_SPARE__ARBITER_ADDRESS_MASK_MASK 0xFFFF0000L
+//RMI_SPARE_1
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE__SHIFT 0x0
+#define RMI_SPARE_1__SPARE_BIT_9__SHIFT 0x1
+#define RMI_SPARE_1__SPARE_BIT_10__SHIFT 0x2
+#define RMI_SPARE_1__SPARE_BIT_11__SHIFT 0x3
+#define RMI_SPARE_1__SPARE_BIT_12__SHIFT 0x4
+#define RMI_SPARE_1__SPARE_BIT_13__SHIFT 0x5
+#define RMI_SPARE_1__SPARE_BIT_14__SHIFT 0x6
+#define RMI_SPARE_1__SPARE_BIT_15__SHIFT 0x7
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID__SHIFT 0x8
+#define RMI_SPARE_1__SPARE_BIT_16_1__SHIFT 0x10
+#define RMI_SPARE_1__EARLY_WRACK_FIFO_DISABLE_MASK 0x00000001L
+#define RMI_SPARE_1__SPARE_BIT_9_MASK 0x00000002L
+#define RMI_SPARE_1__SPARE_BIT_10_MASK 0x00000004L
+#define RMI_SPARE_1__SPARE_BIT_11_MASK 0x00000008L
+#define RMI_SPARE_1__SPARE_BIT_12_MASK 0x00000010L
+#define RMI_SPARE_1__SPARE_BIT_13_MASK 0x00000020L
+#define RMI_SPARE_1__SPARE_BIT_14_MASK 0x00000040L
+#define RMI_SPARE_1__SPARE_BIT_15_MASK 0x00000080L
+#define RMI_SPARE_1__RMI_REORDER_DIS_BY_CID_MASK 0x0000FF00L
+#define RMI_SPARE_1__SPARE_BIT_16_1_MASK 0xFFFF0000L
+//RMI_SPARE_2
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID__SHIFT 0x0
+#define RMI_SPARE_2__SPARE_BIT_8_2__SHIFT 0x10
+#define RMI_SPARE_2__SPARE_BIT_8_3__SHIFT 0x18
+#define RMI_SPARE_2__ERROR_ZERO_BYTE_MASK_CID_MASK 0x0000FFFFL
+#define RMI_SPARE_2__SPARE_BIT_8_2_MASK 0x00FF0000L
+#define RMI_SPARE_2__SPARE_BIT_8_3_MASK 0xFF000000L
+//CC_RMI_REDUNDANCY
+#define CC_RMI_REDUNDANCY__WRITE_DIS__SHIFT 0x0
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define CC_RMI_REDUNDANCY__WRITE_DIS_MASK 0x00000001L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define CC_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define CC_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define CC_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+
+
+// addressBlock: gc_pmmdec
+//GCR_PIO_CNTL
+#define GCR_PIO_CNTL__GCR_DATA_INDEX__SHIFT 0x0
+#define GCR_PIO_CNTL__GCR_REG_DONE__SHIFT 0x2
+#define GCR_PIO_CNTL__GCR_REG_RESET__SHIFT 0x3
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG__SHIFT 0x10
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE__SHIFT 0x1e
+#define GCR_PIO_CNTL__GCR_READY__SHIFT 0x1f
+#define GCR_PIO_CNTL__GCR_DATA_INDEX_MASK 0x00000003L
+#define GCR_PIO_CNTL__GCR_REG_DONE_MASK 0x00000004L
+#define GCR_PIO_CNTL__GCR_REG_RESET_MASK 0x00000008L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_TAG_MASK 0x00FF0000L
+#define GCR_PIO_CNTL__GCR_PIO_RSP_DONE_MASK 0x40000000L
+#define GCR_PIO_CNTL__GCR_READY_MASK 0x80000000L
+//GCR_PIO_DATA
+#define GCR_PIO_DATA__GCR_DATA__SHIFT 0x0
+#define GCR_PIO_DATA__GCR_DATA_MASK 0xFFFFFFFFL
+//PMM_CNTL
+#define PMM_CNTL__PMM_DISABLE__SHIFT 0x0
+#define PMM_CNTL__ABIT_FORCE_FLUSH__SHIFT 0x1
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD__SHIFT 0x2
+#define PMM_CNTL__ABIT_TIMER_DISABLE__SHIFT 0x6
+#define PMM_CNTL__ABIT_TIMER_RESET__SHIFT 0x7
+#define PMM_CNTL__INTERRUPT_PRIORITY__SHIFT 0x8
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE__SHIFT 0xa
+#define PMM_CNTL__RESERVED__SHIFT 0xb
+#define PMM_CNTL__PMM_DISABLE_MASK 0x00000001L
+#define PMM_CNTL__ABIT_FORCE_FLUSH_MASK 0x00000002L
+#define PMM_CNTL__ABIT_TIMER_THRESHOLD_MASK 0x0000003CL
+#define PMM_CNTL__ABIT_TIMER_DISABLE_MASK 0x00000040L
+#define PMM_CNTL__ABIT_TIMER_RESET_MASK 0x00000080L
+#define PMM_CNTL__INTERRUPT_PRIORITY_MASK 0x00000300L
+#define PMM_CNTL__PMM_INTERRUPTS_DISABLE_MASK 0x00000400L
+#define PMM_CNTL__RESERVED_MASK 0xFFFFF800L
+//PMM_STATUS
+#define PMM_STATUS__PMM_IDLE__SHIFT 0x0
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS__SHIFT 0x1
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE__SHIFT 0x2
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS__SHIFT 0x3
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE__SHIFT 0x4
+#define PMM_STATUS__ABIT_TIMER_RUNNING__SHIFT 0x5
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING__SHIFT 0x6
+#define PMM_STATUS__ABIT_FLUSH_ERROR__SHIFT 0x7
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS__SHIFT 0x8
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS__SHIFT 0x9
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS__SHIFT 0xa
+#define PMM_STATUS__RESERVED__SHIFT 0xb
+#define PMM_STATUS__PMM_IDLE_MASK 0x00000001L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_IN_PROGRESS_MASK 0x00000002L
+#define PMM_STATUS__ABIT_FORCE_FLUSH_DONE_MASK 0x00000004L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_IN_PROGRESS_MASK 0x00000008L
+#define PMM_STATUS__ABIT_TIMER_FLUSH_DONE_MASK 0x00000010L
+#define PMM_STATUS__ABIT_TIMER_RUNNING_MASK 0x00000020L
+#define PMM_STATUS__PMM_INTERRUPTS_PENDING_MASK 0x00000040L
+#define PMM_STATUS__ABIT_FLUSH_ERROR_MASK 0x00000080L
+#define PMM_STATUS__ABIT_TIMER_RESET_CDC_IN_PROGRESS_MASK 0x00000100L
+#define PMM_STATUS__ABIT_TIMER_ENABLE_CDC_IN_PROGRESS_MASK 0x00000200L
+#define PMM_STATUS__ABIT_TIMER_THRESHOLD_CDC_IN_PROGRESS_MASK 0x00000400L
+#define PMM_STATUS__RESERVED_MASK 0xFFFFF800L
+
+
+// addressBlock: gc_utcl1dec
+//UTCL1_CTRL_1
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS__SHIFT 0x0
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS__SHIFT 0x1
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS__SHIFT 0x2
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS__SHIFT 0x3
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS__SHIFT 0x4
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS__SHIFT 0x5
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID__SHIFT 0x6
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL__SHIFT 0x7
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE__SHIFT 0x8
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1__SHIFT 0x9
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2__SHIFT 0xb
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3__SHIFT 0xd
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4__SHIFT 0xf
+#define UTCL1_CTRL_1__RESERVED__SHIFT 0x11
+#define UTCL1_CTRL_1__UTCL1_CACHE_CORE_BYPASS_MASK 0x00000001L
+#define UTCL1_CTRL_1__UTCL1_TCP_BYPASS_MASK 0x00000002L
+#define UTCL1_CTRL_1__UTCL1_SQCI_BYPASS_MASK 0x00000004L
+#define UTCL1_CTRL_1__UTCL1_SQCD_BYPASS_MASK 0x00000008L
+#define UTCL1_CTRL_1__UTCL1_RMI_BYPASS_MASK 0x00000010L
+#define UTCL1_CTRL_1__UTCL1_SQG_BYPASS_MASK 0x00000020L
+#define UTCL1_CTRL_1__UTCL1_FORCE_RANGE_INV_TO_VMID_MASK 0x00000040L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_MASK 0x00000080L
+#define UTCL1_CTRL_1__UTCL1_FORCE_INV_ALL_DONE_MASK 0x00000100L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_1_MASK 0x00000600L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_2_MASK 0x00001800L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_3_MASK 0x00006000L
+#define UTCL1_CTRL_1__UTCL1_PAGE_SIZE_4_MASK 0x00018000L
+#define UTCL1_CTRL_1__RESERVED_MASK 0xFFFE0000L
+//UTCL1_ALOG
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD__SHIFT 0x0
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS__SHIFT 0x3
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE__SHIFT 0x4
+#define UTCL1_ALOG__UTCL1_ALOG_MODE__SHIFT 0x5
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW__SHIFT 0x6
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS__SHIFT 0x9
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD__SHIFT 0xa
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN__SHIFT 0xc
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN__SHIFT 0xf
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE__SHIFT 0x10
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE__SHIFT 0x11
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS__SHIFT 0x17
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC__SHIFT 0x18
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_THRESHOLD_MASK 0x00000007L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER2_BYPASS_MASK 0x00000008L
+#define UTCL1_ALOG__UTCL1_ALOG_ACTIVE_MASK 0x00000010L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE_MASK 0x00000020L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_LOCK_WINDOW_MASK 0x000001C0L
+#define UTCL1_ALOG__UTCL1_ALOG_ONLY_MISS_MASK 0x00000200L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE2_INTR_THRESHOLD_MASK 0x00000C00L
+#define UTCL1_ALOG__UTCL1_ALOG_SPACE_EN_MASK 0x00007000L
+#define UTCL1_ALOG__UTCL1_ALOG_CLEAN_MASK 0x00008000L
+#define UTCL1_ALOG__UTCL1_ALOG_IDLE_MASK 0x00010000L
+#define UTCL1_ALOG__UTCL1_ALOG_TRACK_SEGMENT_SIZE_MASK 0x007E0000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_FILTER1_BYPASS_MASK 0x00800000L
+#define UTCL1_ALOG__UTCL1_ALOG_MODE1_INTR_ON_ALLOC_MASK 0x01000000L
+//UTCL1_STATUS
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY__SHIFT 0x0
+#define UTCL1_STATUS__UTCL1_MH_BUSY__SHIFT 0x1
+#define UTCL1_STATUS__UTCL1_INV_BUSY__SHIFT 0x2
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ__SHIFT 0x3
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET__SHIFT 0x4
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK__SHIFT 0x5
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS__SHIFT 0x7
+#define UTCL1_STATUS__RESERVED__SHIFT 0x8
+#define UTCL1_STATUS__UTCL1_HIT_PATH_BUSY_MASK 0x00000001L
+#define UTCL1_STATUS__UTCL1_MH_BUSY_MASK 0x00000002L
+#define UTCL1_STATUS__UTCL1_INV_BUSY_MASK 0x00000004L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_REQ_MASK 0x00000008L
+#define UTCL1_STATUS__UTCL1_PENDING_UTCL2_RET_MASK 0x00000010L
+#define UTCL1_STATUS__UTCL1_LAST_UTCL2_RET_XNACK_MASK 0x00000060L
+#define UTCL1_STATUS__UTCL1_RANGE_INV_IN_PROGRESS_MASK 0x00000080L
+#define UTCL1_STATUS__RESERVED_MASK 0x00000100L
+
+
+// addressBlock: gc_gcvmsharedpfdec
+//GCMC_VM_NB_MMIOBASE
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0
+#define GCMC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_MMIOLIMIT
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0
+#define GCMC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL
+//GCMC_VM_NB_PCI_CTRL
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17
+#define GCMC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L
+//GCMC_VM_NB_PCI_ARB
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3
+#define GCMC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L
+//GCMC_VM_NB_TOP_OF_DRAM_SLOT1
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17
+#define GCMC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L
+//GCMC_VM_NB_LOWER_TOP_OF_DRAM2
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L
+#define GCMC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L
+//GCMC_VM_NB_UPPER_TOP_OF_DRAM2
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0
+#define GCMC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL
+//GCMC_VM_FB_OFFSET
+#define GCMC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0
+#define GCMC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL
+//GCMC_VM_STEERING
+#define GCMC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0
+#define GCMC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L
+//GCMC_SHARED_VIRT_RESET_REQ
+#define GCMC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0
+#define GCMC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f
+#define GCMC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL
+#define GCMC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L
+//GCMC_MEM_POWER_LS
+#define GCMC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GCMC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GCMC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GCMC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_START
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_CACHEABLE_DRAM_ADDRESS_END
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_START
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_SYSMEM_ADDRESS_END
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_SYSMEM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_APT_CNTL
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x2
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL__SHIFT 0x4
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M__SHIFT 0x5
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL__SHIFT 0x6
+#define GCMC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L
+#define GCMC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L
+#define GCMC_VM_APT_CNTL__FRAG_APT_INTXN_MODE_MASK 0x0000000CL
+#define GCMC_VM_APT_CNTL__CHECK_IS_LOCAL_MASK 0x00000010L
+#define GCMC_VM_APT_CNTL__CAP_FRAG_SIZE_2M_MASK 0x00000020L
+#define GCMC_VM_APT_CNTL__LOCAL_SYSMEM_APERTURE_CNTL_MASK 0x000000C0L
+//GCMC_VM_LOCAL_FB_ADDRESS_START
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_END
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL
+//GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0
+#define GCMC_VM_LOCAL_FB_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L
+//GCUTCL2_ICG_CTRL
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCUTCL2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCUTCL2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCUTCL2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCUTCL2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCUTCL2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCMC_SHARED_ACTIVE_FCN_ID
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1e
+#define GCMC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL
+#define GCMC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x40000000L
+//GCUTCL2_CGTT_BUSY_CTRL
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCUTCL2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCUTCL2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCMC_VM_FB_NOALLOC_CNTL
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE__SHIFT 0x0
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE__SHIFT 0x1
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH__SHIFT 0x2
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC__SHIFT 0x3
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC__SHIFT 0x4
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC__SHIFT 0x5
+#define GCMC_VM_FB_NOALLOC_CNTL__LOCAL_FB_NOALLOC_NOPTE_MASK 0x00000001L
+#define GCMC_VM_FB_NOALLOC_CNTL__REMOTE_FB_NOALLOC_NOPTE_MASK 0x00000002L
+#define GCMC_VM_FB_NOALLOC_CNTL__FB_NOALLOC_WALKER_FETCH_MASK 0x00000004L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_ATCL2_NOALLOC_MASK 0x00000008L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE2_NOALLOC_MASK 0x00000010L
+#define GCMC_VM_FB_NOALLOC_CNTL__ROUTER_GPA_MODE3_NOALLOC_MASK 0x00000020L
+//GCUTCL2_HARVEST_BYPASS_GROUPS
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS__SHIFT 0x0
+#define GCUTCL2_HARVEST_BYPASS_GROUPS__BYPASS_GROUPS_MASK 0xFFFFFFFFL
+//GCUTCL2_GROUP_RET_FAULT_STATUS
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS__SHIFT 0x0
+#define GCUTCL2_GROUP_RET_FAULT_STATUS__FAULT_GROUPS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2pfdec
+//GCVM_L2_CNTL
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L
+#define GCVM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L
+#define GCVM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL
+#define GCVM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L
+#define GCVM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L
+#define GCVM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L
+#define GCVM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L
+#define GCVM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L
+#define GCVM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L
+#define GCVM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L
+#define GCVM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L
+#define GCVM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L
+#define GCVM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L
+//GCVM_L2_CNTL2
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c
+#define GCVM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L
+#define GCVM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L
+#define GCVM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L
+#define GCVM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L
+#define GCVM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L
+#define GCVM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L
+#define GCVM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L
+//GCVM_L2_CNTL3
+#define GCVM_L2_CNTL3__BANK_SELECT__SHIFT 0x0
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f
+#define GCVM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L
+#define GCVM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L
+#define GCVM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L
+#define GCVM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L
+#define GCVM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L
+//GCVM_L2_STATUS
+#define GCVM_L2_STATUS__L2_BUSY__SHIFT 0x0
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15
+#define GCVM_L2_STATUS__L2_BUSY_MASK 0x00000001L
+#define GCVM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL
+#define GCVM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L
+#define GCVM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L
+#define GCVM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L
+#define GCVM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L
+#define GCVM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L
+//GCVM_DUMMY_PAGE_FAULT_CNTL
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L
+#define GCVM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_LO32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_DUMMY_PAGE_FAULT_ADDR_HI32
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_INVALIDATE_CNTL
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING__SHIFT 0x0
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING__SHIFT 0x8
+#define GCVM_INVALIDATE_CNTL__PRI_REG_ALTERNATING_MASK 0x000000FFL
+#define GCVM_INVALIDATE_CNTL__MAX_REG_OUTSTANDING_MASK 0x0000FF00L
+//GCVM_L2_PROTECTION_FAULT_CNTL
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L
+//GCVM_L2_PROTECTION_FAULT_CNTL2
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL3
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_MM_CNTL4
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_STATUS
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT__SHIFT 0x1d
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__PRT_MASK 0x20000000L
+#define GCVM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L
+//GCVM_L2_PROTECTION_FAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0
+#define GCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL
+//GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0
+#define GCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL
+//GCVM_L2_CNTL4
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF__SHIFT 0x1d
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE__SHIFT 0x1e
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS__SHIFT 0x1f
+#define GCVM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL
+#define GCVM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L
+#define GCVM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L
+#define GCVM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L
+#define GCVM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L
+#define GCVM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L
+#define GCVM_L2_CNTL4__GC_CH_FGCG_OFF_MASK 0x20000000L
+#define GCVM_L2_CNTL4__VFIFO_HEAD_OF_QUEUE_MASK 0x40000000L
+#define GCVM_L2_CNTL4__VFIFO_VISIBLE_BANK_SILOS_MASK 0x80000000L
+//GCVM_L2_MM_GROUP_RT_CLASSES
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L
+#define GCVM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_BANK_SELECT_RESERVED_CID2
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE__SHIFT 0x1a
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L
+#define GCVM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_FRAGMENT_SIZE_MASK 0x7C000000L
+//GCVM_L2_CACHE_PARITY_CNTL
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L
+#define GCVM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L
+#define GCVM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L
+//GCVM_L2_ICG_CTRL
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS__SHIFT 0x0
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE__SHIFT 0x4
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE__SHIFT 0x5
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE__SHIFT 0x6
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE__SHIFT 0x7
+#define GCVM_L2_ICG_CTRL__OFF_HYSTERESIS_MASK 0x0000000FL
+#define GCVM_L2_ICG_CTRL__DYNAMIC_CLOCK_OVERRIDE_MASK 0x00000010L
+#define GCVM_L2_ICG_CTRL__STATIC_CLOCK_OVERRIDE_MASK 0x00000020L
+#define GCVM_L2_ICG_CTRL__AON_CLOCK_OVERRIDE_MASK 0x00000040L
+#define GCVM_L2_ICG_CTRL__PERFMON_CLOCK_OVERRIDE_MASK 0x00000080L
+//GCVM_L2_CNTL5
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID__SHIFT 0x5
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE__SHIFT 0xe
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE__SHIFT 0xf
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF__SHIFT 0x10
+#define GCVM_L2_CNTL5__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CNTL5__WALKER_PRIORITY_CLIENT_ID_MASK 0x00003FE0L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_NOALLOC_ENABLE_MASK 0x00004000L
+#define GCVM_L2_CNTL5__WALKER_FETCH_PDE_MTYPE_ENABLE_MASK 0x00008000L
+#define GCVM_L2_CNTL5__UTCL2_ATC_REQ_FGCG_OFF_MASK 0x00010000L
+//GCVM_L2_GCR_CNTL
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE__SHIFT 0x0
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID__SHIFT 0x1
+#define GCVM_L2_GCR_CNTL__GCR_ENABLE_MASK 0x00000001L
+#define GCVM_L2_GCR_CNTL__GCR_CLIENT_ID_MASK 0x000003FEL
+//GCVML2_WALKER_MACRO_THROTTLE_TIME
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MACRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MACRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVML2_WALKER_MICRO_THROTTLE_TIME
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME__SHIFT 0x0
+#define GCVML2_WALKER_MICRO_THROTTLE_TIME__TIME_MASK 0x00FFFFFFL
+//GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT__SHIFT 0x1
+#define GCVML2_WALKER_MICRO_THROTTLE_FETCH_LIMIT__LIMIT_MASK 0x0000FFFEL
+//GCVM_L2_CGTT_BUSY_CTRL
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY__SHIFT 0x0
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY__SHIFT 0x5
+#define GCVM_L2_CGTT_BUSY_CTRL__READ_DELAY_MASK 0x0000001FL
+#define GCVM_L2_CGTT_BUSY_CTRL__ALWAYS_BUSY_MASK 0x00000020L
+//GCVM_L2_PTE_CACHE_DUMP_CNTL
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY__SHIFT 0x1
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK__SHIFT 0x4
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE__SHIFT 0x8
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC__SHIFT 0xc
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX__SHIFT 0x10
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ENABLE_MASK 0x00000001L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__READY_MASK 0x00000002L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__BANK_MASK 0x000000F0L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__CACHE_MASK 0x00000F00L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__ASSOC_MASK 0x0000F000L
+#define GCVM_L2_PTE_CACHE_DUMP_CNTL__INDEX_MASK 0xFFFF0000L
+//GCVM_L2_PTE_CACHE_DUMP_READ
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA__SHIFT 0x0
+#define GCVM_L2_PTE_CACHE_DUMP_READ__DATA_MASK 0xFFFFFFFFL
+//GCVM_L2_BANK_SELECT_MASKS
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0__SHIFT 0x0
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1__SHIFT 0x4
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2__SHIFT 0x8
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3__SHIFT 0xc
+#define GCVM_L2_BANK_SELECT_MASKS__MASK0_MASK 0x0000000FL
+#define GCVM_L2_BANK_SELECT_MASKS__MASK1_MASK 0x000000F0L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK2_MASK 0x00000F00L
+#define GCVM_L2_BANK_SELECT_MASKS__MASK3_MASK 0x0000F000L
+//GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_RET_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_CDC__UPDATE_MASK 0x00000400L
+//GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS__SHIFT 0x0
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE__SHIFT 0xa
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__CREDITS_MASK 0x000003FFL
+#define GCUTCL2_CREDIT_SAFETY_GROUP_CLIENTS_INVREQ_NOCDC__UPDATE_MASK 0x00000400L
+//GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS__SHIFT 0x0
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE__SHIFT 0xa
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__CREDITS_MASK 0x000003FFL
+#define GCVML2_CREDIT_SAFETY_IH_FAULT_INTERRUPT__UPDATE_MASK 0x00000400L
+//GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS__SHIFT 0x0
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE__SHIFT 0xa
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__CREDITS_MASK 0x000003FFL
+#define GCVML2_WALKER_CREDIT_SAFETY_FETCH_RDREQ__UPDATE_MASK 0x00000400L
+
+
+// addressBlock: gc_gcatcl2dec
+//GC_ATC_L2_CNTL
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE__SHIFT 0x14
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE__SHIFT 0x16
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L
+#define GC_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L
+#define GC_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L
+#define GC_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L
+#define GC_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L
+#define GC_ATC_L2_CNTL__FRAG_APT_INTXN_MODE_MASK 0x00300000L
+#define GC_ATC_L2_CNTL__CLI_GPA_REQ_FRAG_SIZE_MASK 0x0FC00000L
+//GC_ATC_L2_CNTL2
+#define GC_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2__SHIFT 0x6
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x9
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xb
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0xc
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xf
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x12
+#define GC_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL2__NUM_BANKS_LOG2_MASK 0x000001C0L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x00000600L
+#define GC_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000800L
+#define GC_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00007000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00038000L
+#define GC_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00FC0000L
+//GC_ATC_L2_CACHE_DATA0
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x18
+#define GC_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L
+#define GC_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L
+#define GC_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x00FFFFFCL
+#define GC_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x0F000000L
+//GC_ATC_L2_CACHE_DATA1
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CACHE_DATA2
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0
+#define GC_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL
+//GC_ATC_L2_CNTL3
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE__SHIFT 0x0
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE__SHIFT 0x6
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE__SHIFT 0xc
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x12
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x15
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x1b
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF__SHIFT 0x1e
+#define GC_ATC_L2_CNTL3__L2_SMALLK_CACHE_FRAGMENT_SIZE_MASK 0x0000003FL
+#define GC_ATC_L2_CNTL3__L2_MIDK_CACHE_FRAGMENT_SIZE_MASK 0x00000FC0L
+#define GC_ATC_L2_CNTL3__L2_BIGK_CACHE_FRAGMENT_SIZE_MASK 0x0003F000L
+#define GC_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x001C0000L
+#define GC_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x07E00000L
+#define GC_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x38000000L
+#define GC_ATC_L2_CNTL3__REPEATER_FGCG_OFF_MASK 0x40000000L
+//GC_ATC_L2_STATUS
+#define GC_ATC_L2_STATUS__BUSY__SHIFT 0x0
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS__SHIFT 0x1
+#define GC_ATC_L2_STATUS__BUSY_MASK 0x00000001L
+#define GC_ATC_L2_STATUS__NO_OUTSTANDING_AT_REQUESTS_MASK 0x00000002L
+//GC_ATC_L2_STATUS2
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8
+#define GC_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL
+#define GC_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L
+//GC_ATC_L2_MISC_CG
+#define GC_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6
+#define GC_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13
+#define GC_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L
+#define GC_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L
+#define GC_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L
+//GC_ATC_L2_MEM_POWER_LS
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6
+#define GC_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL
+#define GC_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L
+//GC_ATC_L2_SDPPORT_CTRL
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN__SHIFT 0x0
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV__SHIFT 0x1
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN__SHIFT 0x2
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x3
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN__SHIFT 0x4
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV__SHIFT 0x5
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN__SHIFT 0x6
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV__SHIFT 0x7
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN__SHIFT 0x8
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV__SHIFT 0x9
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKEN_MASK 0x00000001L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPCKENRCV_MASK 0x00000002L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKEN_MASK 0x00000004L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_RDRSPDATACKENRCV_MASK 0x00000008L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKEN_MASK 0x00000010L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_WRRSPCKENRCV_MASK 0x00000020L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKEN_MASK 0x00000040L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_REQCKENRCV_MASK 0x00000080L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKEN_MASK 0x00000100L
+#define GC_ATC_L2_SDPPORT_CTRL__SDPVDCI_ORIGDATACKENRCV_MASK 0x00000200L
+
+
+// addressBlock: gc_gcl2tlbpfdec
+//GCL2TLB_TLB0_STATUS
+#define GCL2TLB_TLB0_STATUS__BUSY__SHIFT 0x0
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS__SHIFT 0x2
+#define GCL2TLB_TLB0_STATUS__BUSY_MASK 0x00000001L
+#define GCL2TLB_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L
+#define GCL2TLB_TLB0_STATUS__FOUND_APERTURE_FAULTS_MASK 0x00000004L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID__SHIFT 0x8
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF__SHIFT 0xc
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ__SHIFT 0x1e
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VMID_MASK 0x000000F0L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VFID_MASK 0x00000F00L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__VF_MASK 0x00001000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__GPA_MASK 0x00006000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__RD_PERM_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__WR_PERM_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__EX_PERM_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__CLIENT_ID_MASK 0x07FC0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_REQUEST_HI__REQ_MASK 0x40000000L
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_LO__ADDR_MASK 0xFFFFFFFFL
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS__SHIFT 0x4
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE__SHIFT 0x7
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP__SHIFT 0xd
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA__SHIFT 0xe
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO__SHIFT 0xf
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ__SHIFT 0x10
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE__SHIFT 0x11
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE__SHIFT 0x12
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG__SHIFT 0x15
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK__SHIFT 0x16
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC__SHIFT 0x18
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK__SHIFT 0x1f
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ADDR_MASK 0x0000000FL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PERMS_MASK 0x00000070L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__FRAGMENT_SIZE_MASK 0x00001F80L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SNOOP_MASK 0x00002000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__SPA_MASK 0x00004000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__IO_MASK 0x00008000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__PTE_TMZ_MASK 0x00010000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NO_PTE_MASK 0x00020000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MTYPE_MASK 0x001C0000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__MEMLOG_MASK 0x00200000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__NACK_MASK 0x00C00000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__LLC_NOALLOC_MASK 0x01000000L
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_RESPONSE_HI__ACK_MASK 0x80000000L
+
+
+// addressBlock: gc_gcvmsharedvcdec
+//GCMC_VM_FB_LOCATION_BASE
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_FB_LOCATION_TOP
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0
+#define GCMC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_TOP
+#define GCMC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0
+#define GCMC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BOT
+#define GCMC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0
+#define GCMC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL
+//GCMC_VM_AGP_BASE
+#define GCMC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0
+#define GCMC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_LOW_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0
+#define GCMC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL
+//GCMC_VM_MX_L1_TLB_CNTL
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L
+#define GCMC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L
+#define GCMC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L
+#define GCMC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L
+#define GCMC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00003800L
+
+
+// addressBlock: gc_gcvml2vcdec
+//GCVM_CONTEXT0_CNTL
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT0_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT1_CNTL
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT1_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT2_CNTL
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT2_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT3_CNTL
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT3_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT4_CNTL
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT4_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT5_CNTL
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT5_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT6_CNTL
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT6_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT7_CNTL
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT7_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT8_CNTL
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT8_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT9_CNTL
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT9_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT10_CNTL
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT10_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT11_CNTL
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT11_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT12_CNTL
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT12_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT13_CNTL
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT13_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT14_CNTL
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT14_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXT15_CNTL
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x17
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x18
+#define GCVM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L
+#define GCVM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L
+#define GCVM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L
+#define GCVM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L
+#define GCVM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L
+#define GCVM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L
+#define GCVM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L
+#define GCVM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L
+#define GCVM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L
+#define GCVM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L
+#define GCVM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00800000L
+#define GCVM_CONTEXT15_CNTL__SECURE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x01000000L
+//GCVM_CONTEXTS_DISABLE
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L
+#define GCVM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L
+//GCVM_INVALIDATE_ENG0_SEM
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG1_SEM
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG2_SEM
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG3_SEM
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG4_SEM
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG5_SEM
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG6_SEM
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG7_SEM
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG8_SEM
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG9_SEM
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG10_SEM
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG11_SEM
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG12_SEM
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG13_SEM
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG14_SEM
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG15_SEM
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG16_SEM
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG17_SEM
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L
+//GCVM_INVALIDATE_ENG0_REQ
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG0_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG0_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG1_REQ
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG1_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG1_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG2_REQ
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG2_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG2_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG3_REQ
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG3_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG3_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG4_REQ
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG4_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG4_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG5_REQ
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG5_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG5_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG6_REQ
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG6_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG6_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG7_REQ
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG7_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG7_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG8_REQ
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG8_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG8_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG9_REQ
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG9_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG9_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG10_REQ
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG10_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG10_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG11_REQ
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG11_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG11_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG12_REQ
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG12_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG12_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG13_REQ
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG13_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG13_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG14_REQ
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG14_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG14_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG15_REQ
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG15_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG15_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG16_REQ
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG16_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG16_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG17_REQ
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x13
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x14
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x15
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x16
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x17
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x18
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST__SHIFT 0x19
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY__SHIFT 0x1a
+#define GCVM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00070000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00080000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00100000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00200000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00400000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00800000L
+#define GCVM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x01000000L
+#define GCVM_INVALIDATE_ENG17_REQ__LOG_REQUEST_MASK 0x02000000L
+#define GCVM_INVALIDATE_ENG17_REQ__INVALIDATE_4K_PAGES_ONLY_MASK 0x04000000L
+//GCVM_INVALIDATE_ENG0_ACK
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG1_ACK
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG2_ACK
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG3_ACK
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG4_ACK
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG5_ACK
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG6_ACK
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG7_ACK
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG8_ACK
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG9_ACK
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG10_ACK
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG11_ACK
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG12_ACK
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG13_ACK
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG14_ACK
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG15_ACK
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG16_ACK
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG17_ACK
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10
+#define GCVM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL
+#define GCVM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL
+//GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0
+#define GCVM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL
+//GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0
+#define GCVM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL
+//GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT0_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT1_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT2_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT3_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT4_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT5_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT6_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT7_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT8_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT9_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT10_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT11_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT12_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT13_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT14_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+//GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE__SHIFT 0x0
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0x5
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT__SHIFT 0xa
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_SMALLK_FRAGMENT_SIZE_MASK 0x0000001FL
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000003E0L
+#define GCVM_L2_CONTEXT15_PER_PFVF_PTE_CACHE_FRAGMENT_SIZES__BANK_SELECT_MASK 0x0000FC00L
+
+
+// addressBlock: gc_gcvml2perfddec
+//GCVML2_PERFCOUNTER2_0_LO
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_LO
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_0_HI
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCVML2_PERFCOUNTER2_1_HI
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcvml2prdec
+//GCMC_VM_L2_PERFCOUNTER_LO
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCMC_VM_L2_PERFCOUNTER_HI
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCMC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GCUTCL2_PERFCOUNTER_LO
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCUTCL2_PERFCOUNTER_HI
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCUTCL2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcatcl2perfddec
+//GC_ATC_L2_PERFCOUNTER2_LO
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER2_HI
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gcatcl2pfcntrdec
+//GC_ATC_L2_PERFCOUNTER_LO
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GC_ATC_L2_PERFCOUNTER_HI
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GC_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcl2tlbprdec
+//GCL2TLB_PERFCOUNTER_LO
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCL2TLB_PERFCOUNTER_HI
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCL2TLB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_gcvml2perfsdec
+//GCVML2_PERFCOUNTER2_0_SELECT
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_SELECT1
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_1_SELECT1
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCVML2_PERFCOUNTER2_1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCVML2_PERFCOUNTER2_0_MODE
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_0_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCVML2_PERFCOUNTER2_1_MODE
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCVML2_PERFCOUNTER2_1_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcvml2pldec
+//GCMC_VM_L2_PERFCOUNTER0_CFG
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER1_CFG
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER2_CFG
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER3_CFG
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER4_CFG
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER5_CFG
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER6_CFG
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER7_CFG
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L
+#define GCMC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L
+//GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCMC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//GCUTCL2_PERFCOUNTER0_CFG
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER1_CFG
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER2_CFG
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER3_CFG
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCUTCL2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCUTCL2_PERFCOUNTER_RSLT_CNTL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCUTCL2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcatcl2perfsdec
+//GC_ATC_L2_PERFCOUNTER2_SELECT
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_SELECT1
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GC_ATC_L2_PERFCOUNTER2_MODE
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GC_ATC_L2_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+
+
+// addressBlock: gc_gcatcl2pfcntldec
+//GC_ATC_L2_PERFCOUNTER0_CFG
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER1_CFG
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GC_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GC_ATC_L2_PERFCOUNTER_RSLT_CNTL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GC_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcl2tlbpldec
+//GCL2TLB_PERFCOUNTER0_CFG
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER1_CFG
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER2_CFG
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER3_CFG
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L
+#define GCL2TLB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L
+//GCL2TLB_PERFCOUNTER_RSLT_CNTL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCL2TLB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gcvml2pspdec
+//GCUTCL2_TRANSLATION_BYPASS_BY_VMID
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS__SHIFT 0x0
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS__SHIFT 0x10
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__TRANS_BYPASS_VMIDS_MASK 0x0000FFFFL
+#define GCUTCL2_TRANSLATION_BYPASS_BY_VMID__GPA_MODE_VMIDS_MASK 0xFFFF0000L
+//GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE__SHIFT 0x0
+#define GCVM_IOMMU_GPU_HOST_TRANSLATION_ENABLE__GPU_HOST_TRANSLATION_ENABLE_MASK 0x00000001L
+//GCVM_IOMMU_CONTROL_REGISTER
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0
+#define GCVM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L
+//GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd
+#define GCVM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L
+//GCVM_IOMMU_MMIO_CNTRL_1
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8
+#define GCVM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L
+//GCMC_VM_MARC_BASE_LO_0
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_1
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_2
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_3
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_4
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_4__MARC_BASE_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_5
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_5__MARC_BASE_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_6
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_6__MARC_BASE_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_7
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_7__MARC_BASE_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_8
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_8__MARC_BASE_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_9
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_9__MARC_BASE_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_10
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_10__MARC_BASE_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_11
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_11__MARC_BASE_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_12
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_12__MARC_BASE_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_13
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_13__MARC_BASE_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_14
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_14__MARC_BASE_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_LO_15
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_BASE_LO_15__MARC_BASE_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_BASE_HI_0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_1
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_2
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_3
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_4
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_4__MARC_BASE_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_5
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_5__MARC_BASE_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_6
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_6__MARC_BASE_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_7
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_7__MARC_BASE_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_8
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_8__MARC_BASE_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_9
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_9__MARC_BASE_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_10
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_10__MARC_BASE_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_11
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_11__MARC_BASE_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_12
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_12__MARC_BASE_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_13
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_13__MARC_BASE_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_14
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_14__MARC_BASE_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_BASE_HI_15
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_BASE_HI_15__MARC_BASE_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_LO_0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_2
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_3
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_4
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_ENABLE_4_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_READONLY_4_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_4__MARC_RELOC_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_5
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_ENABLE_5_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_READONLY_5_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_5__MARC_RELOC_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_6
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_ENABLE_6_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_READONLY_6_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_6__MARC_RELOC_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_7
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_ENABLE_7_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_READONLY_7_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_7__MARC_RELOC_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_8
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_ENABLE_8_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_READONLY_8_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_8__MARC_RELOC_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_9
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_ENABLE_9_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_READONLY_9_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_9__MARC_RELOC_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_10
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_ENABLE_10_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_READONLY_10_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_10__MARC_RELOC_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_11
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_ENABLE_11_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_READONLY_11_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_11__MARC_RELOC_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_12
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_ENABLE_12_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_READONLY_12_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_12__MARC_RELOC_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_13
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_ENABLE_13_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_READONLY_13_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_13__MARC_RELOC_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_14
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_ENABLE_14_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_READONLY_14_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_14__MARC_RELOC_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_LO_15
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15__SHIFT 0x1
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_ENABLE_15_MASK 0x00000001L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_READONLY_15_MASK 0x00000002L
+#define GCMC_VM_MARC_RELOC_LO_15__MARC_RELOC_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_RELOC_HI_0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_1
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_2
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_3
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_4
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_4__MARC_RELOC_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_5
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_5__MARC_RELOC_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_6
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_6__MARC_RELOC_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_7
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_7__MARC_RELOC_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_8
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_8__MARC_RELOC_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_9
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_9__MARC_RELOC_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_10
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_10__MARC_RELOC_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_11
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_11__MARC_RELOC_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_12
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_12__MARC_RELOC_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_13
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_13__MARC_RELOC_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_14
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_14__MARC_RELOC_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_RELOC_HI_15
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_RELOC_HI_15__MARC_RELOC_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_LO_0
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_1
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_2
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_3
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_4
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_4__MARC_LEN_LO_4_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_5
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_5__MARC_LEN_LO_5_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_6
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_6__MARC_LEN_LO_6_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_7
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_7__MARC_LEN_LO_7_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_8
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_8__MARC_LEN_LO_8_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_9
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_9__MARC_LEN_LO_9_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_10
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_10__MARC_LEN_LO_10_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_11
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_11__MARC_LEN_LO_11_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_12
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_12__MARC_LEN_LO_12_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_13
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_13__MARC_LEN_LO_13_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_14
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_14__MARC_LEN_LO_14_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_LO_15
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15__SHIFT 0xc
+#define GCMC_VM_MARC_LEN_LO_15__MARC_LEN_LO_15_MASK 0xFFFFF000L
+//GCMC_VM_MARC_LEN_HI_0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_1
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_2
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_3
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_4
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_4__MARC_LEN_HI_4_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_5
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_5__MARC_LEN_HI_5_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_6
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_6__MARC_LEN_HI_6_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_7
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_7__MARC_LEN_HI_7_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_8
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_8__MARC_LEN_HI_8_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_9
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_9__MARC_LEN_HI_9_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_10
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_10__MARC_LEN_HI_10_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_11
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_11__MARC_LEN_HI_11_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_12
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_12__MARC_LEN_HI_12_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_13
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_13__MARC_LEN_HI_13_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_14
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_14__MARC_LEN_HI_14_MASK 0x000FFFFFL
+//GCMC_VM_MARC_LEN_HI_15
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15__SHIFT 0x0
+#define GCMC_VM_MARC_LEN_HI_15__MARC_LEN_HI_15_MASK 0x000FFFFFL
+//GCMC_VM_MARC_PFVF_MAPPING_0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_0__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_1
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_1__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_2
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_2__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_3
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_3__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_4
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_4__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_5
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_5__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_6
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_6__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_7
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_7__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_8
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_8__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_9
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_9__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_10__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_11
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_11__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_12
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_12__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_13
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_13__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_14
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_14__ENABLE_PF_MASK 0x00010000L
+//GCMC_VM_MARC_PFVF_MAPPING_15
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS__SHIFT 0x0
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF__SHIFT 0x10
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_VFS_MASK 0x0000FFFFL
+#define GCMC_VM_MARC_PFVF_MAPPING_15__ENABLE_PF_MASK 0x00010000L
+//GCUTC_TRANSLATION_FAULT_CNTL0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL0__DEFAULT_PHYSICAL_PAGE_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//GCUTC_TRANSLATION_FAULT_CNTL1
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB__SHIFT 0x0
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO__SHIFT 0x4
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA__SHIFT 0x5
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP__SHIFT 0x6
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_PHYSICAL_PAGE_ADDRESS_MSB_MASK 0x0000000FL
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_IO_MASK 0x00000010L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SPA_MASK 0x00000020L
+#define GCUTC_TRANSLATION_FAULT_CNTL1__DEFAULT_SNOOP_MASK 0x00000040L
+
+
+// addressBlock: gc_gcl2tlbpspdec
+//GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE__SHIFT 0x0
+#define GCUTC_GPUVA_VMID_TRANSLATION_ASSIST_CNTL__ENABLE_MASK 0x00000001L
+
+
+// addressBlock: gc_shdec
+//SPI_SHADER_PGM_RSRC4_PS
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_PS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_PS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_PS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_PGM_CHKSUM_PS
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_PS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC3_PS
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_PS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_PS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_PS__LDS_GROUP_SIZE_MASK 0x00C00000L
+//SPI_SHADER_PGM_LO_PS
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_PS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_PS
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_PS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC1_PS
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_PS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_PS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_PS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_PS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_PS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_PS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_PS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_PS__CU_GROUP_DISABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_PS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_PS__LOAD_PROVOKING_VTX_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_PS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_PS__FP16_OVFL_MASK 0x20000000L
+//SPI_SHADER_PGM_RSRC2_PS
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_PS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_PS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_PS__WAVE_CNT_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_PS__EXTRA_LDS_SIZE_MASK 0x0000FF00L
+#define SPI_SHADER_PGM_RSRC2_PS__EXCP_EN_MASK 0x01FF0000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_COLLISION_WAVEID_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC2_PS__LOAD_INTRAWAVE_COLLISION_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC2_PS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_PS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_PS_0
+#define SPI_SHADER_USER_DATA_PS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_1
+#define SPI_SHADER_USER_DATA_PS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_2
+#define SPI_SHADER_USER_DATA_PS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_3
+#define SPI_SHADER_USER_DATA_PS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_4
+#define SPI_SHADER_USER_DATA_PS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_5
+#define SPI_SHADER_USER_DATA_PS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_6
+#define SPI_SHADER_USER_DATA_PS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_7
+#define SPI_SHADER_USER_DATA_PS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_8
+#define SPI_SHADER_USER_DATA_PS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_9
+#define SPI_SHADER_USER_DATA_PS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_10
+#define SPI_SHADER_USER_DATA_PS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_11
+#define SPI_SHADER_USER_DATA_PS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_12
+#define SPI_SHADER_USER_DATA_PS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_13
+#define SPI_SHADER_USER_DATA_PS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_14
+#define SPI_SHADER_USER_DATA_PS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_15
+#define SPI_SHADER_USER_DATA_PS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_16
+#define SPI_SHADER_USER_DATA_PS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_17
+#define SPI_SHADER_USER_DATA_PS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_18
+#define SPI_SHADER_USER_DATA_PS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_19
+#define SPI_SHADER_USER_DATA_PS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_20
+#define SPI_SHADER_USER_DATA_PS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_21
+#define SPI_SHADER_USER_DATA_PS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_22
+#define SPI_SHADER_USER_DATA_PS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_23
+#define SPI_SHADER_USER_DATA_PS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_24
+#define SPI_SHADER_USER_DATA_PS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_25
+#define SPI_SHADER_USER_DATA_PS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_26
+#define SPI_SHADER_USER_DATA_PS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_27
+#define SPI_SHADER_USER_DATA_PS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_28
+#define SPI_SHADER_USER_DATA_PS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_29
+#define SPI_SHADER_USER_DATA_PS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_30
+#define SPI_SHADER_USER_DATA_PS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_PS_31
+#define SPI_SHADER_USER_DATA_PS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_PS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_PS
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_PS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_PS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_PS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_PS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_PS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_PS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_PS_0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_1
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_2
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_PS_3
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_PS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_CHKSUM_GS
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_GS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_GS
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN__SHIFT 0xe
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN__SHIFT 0xf
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_GS__CU_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC4_GS__RESERVED_MASK 0x00003FFEL
+#define SPI_SHADER_PGM_RSRC4_GS__PH_THROTTLE_EN_MASK 0x00004000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_THROTTLE_EN_MASK 0x00008000L
+#define SPI_SHADER_PGM_RSRC4_GS__SPI_SHADER_LATE_ALLOC_GS_MASK 0x007F0000L
+#define SPI_SHADER_PGM_RSRC4_GS__INST_PREF_SIZE_MASK 0x1F800000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_GS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_GS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_GS
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_GS
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_ES_GS
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES_GS
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES_GS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_GS
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC3_GS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC3_GS__WAVE_LIMIT_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC3_GS__LOCK_LOW_THRESHOLD_MASK 0x03C00000L
+#define SPI_SHADER_PGM_RSRC3_GS__GROUP_FIFO_DEPTH_MASK 0xFC000000L
+//SPI_SHADER_PGM_LO_GS
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_GS
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_GS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_GS
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC1_GS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_GS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_GS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_GS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_GS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_GS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_GS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_GS__CU_GROUP_ENABLE_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_GS__MEM_ORDERED_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FWD_PROGRESS_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_GS__WGP_MODE_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_GS__CDBG_USER_MASK 0x10000000L
+#define SPI_SHADER_PGM_RSRC1_GS__GS_VGPR_COMP_CNT_MASK 0x60000000L
+#define SPI_SHADER_PGM_RSRC1_GS__FP16_OVFL_MASK 0x80000000L
+//SPI_SHADER_PGM_RSRC2_GS
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE__SHIFT 0x13
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_GS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_GS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_GS__EXCP_EN_MASK 0x0000FF80L
+#define SPI_SHADER_PGM_RSRC2_GS__ES_VGPR_COMP_CNT_MASK 0x00030000L
+#define SPI_SHADER_PGM_RSRC2_GS__OC_LDS_EN_MASK 0x00040000L
+#define SPI_SHADER_PGM_RSRC2_GS__LDS_SIZE_MASK 0x07F80000L
+#define SPI_SHADER_PGM_RSRC2_GS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_GS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_GS_0
+#define SPI_SHADER_USER_DATA_GS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_1
+#define SPI_SHADER_USER_DATA_GS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_2
+#define SPI_SHADER_USER_DATA_GS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_3
+#define SPI_SHADER_USER_DATA_GS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_4
+#define SPI_SHADER_USER_DATA_GS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_5
+#define SPI_SHADER_USER_DATA_GS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_6
+#define SPI_SHADER_USER_DATA_GS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_7
+#define SPI_SHADER_USER_DATA_GS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_8
+#define SPI_SHADER_USER_DATA_GS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_9
+#define SPI_SHADER_USER_DATA_GS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_10
+#define SPI_SHADER_USER_DATA_GS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_11
+#define SPI_SHADER_USER_DATA_GS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_12
+#define SPI_SHADER_USER_DATA_GS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_13
+#define SPI_SHADER_USER_DATA_GS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_14
+#define SPI_SHADER_USER_DATA_GS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_15
+#define SPI_SHADER_USER_DATA_GS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_16
+#define SPI_SHADER_USER_DATA_GS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_17
+#define SPI_SHADER_USER_DATA_GS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_18
+#define SPI_SHADER_USER_DATA_GS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_19
+#define SPI_SHADER_USER_DATA_GS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_20
+#define SPI_SHADER_USER_DATA_GS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_21
+#define SPI_SHADER_USER_DATA_GS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_22
+#define SPI_SHADER_USER_DATA_GS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_23
+#define SPI_SHADER_USER_DATA_GS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_24
+#define SPI_SHADER_USER_DATA_GS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_25
+#define SPI_SHADER_USER_DATA_GS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_26
+#define SPI_SHADER_USER_DATA_GS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_27
+#define SPI_SHADER_USER_DATA_GS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_28
+#define SPI_SHADER_USER_DATA_GS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_29
+#define SPI_SHADER_USER_DATA_GS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_30
+#define SPI_SHADER_USER_DATA_GS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_GS_31
+#define SPI_SHADER_USER_DATA_GS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_GS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_GS_MESHLET_DIM
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y__SHIFT 0x8
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z__SHIFT 0x10
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE__SHIFT 0x18
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_X_MASK 0x000000FFL
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Y_MASK 0x0000FF00L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_NUM_THREAD_Z_MASK 0x00FF0000L
+#define SPI_SHADER_GS_MESHLET_DIM__MESHLET_THREADGROUP_SIZE_MASK 0xFF000000L
+//SPI_SHADER_GS_MESHLET_EXP_ALLOC
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS__SHIFT 0x0
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS__SHIFT 0x9
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_VERTS_MASK 0x000001FFL
+#define SPI_SHADER_GS_MESHLET_EXP_ALLOC__MAX_EXP_PRIMS_MASK 0x0003FE00L
+//SPI_SHADER_REQ_CTRL_ESGS
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_ESGS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_ESGS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_ESGS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_ESGS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_ESGS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_ESGS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_ESGS_0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_1
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_2
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_ESGS_3
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_ESGS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_ES
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_ES__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_ES
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_ES__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_CHKSUM_HS
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM__SHIFT 0x0
+#define SPI_SHADER_PGM_CHKSUM_HS__CHECKSUM_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC4_HS
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START__SHIFT 0x1d
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP__SHIFT 0x1f
+#define SPI_SHADER_PGM_RSRC4_HS__CU_EN_MASK 0x0000FFFFL
+#define SPI_SHADER_PGM_RSRC4_HS__INST_PREF_SIZE_MASK 0x003F0000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_START_MASK 0x20000000L
+#define SPI_SHADER_PGM_RSRC4_HS__TRAP_ON_END_MASK 0x40000000L
+#define SPI_SHADER_PGM_RSRC4_HS__IMAGE_OP_MASK 0x80000000L
+//SPI_SHADER_USER_DATA_ADDR_LO_HS
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_ADDR_HI_HS
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_ADDR_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_LO_LS_HS
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS_HS
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS_HS__MEM_BASE_MASK 0xFFL
+//SPI_SHADER_PGM_RSRC3_HS
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN__SHIFT 0x10
+#define SPI_SHADER_PGM_RSRC3_HS__WAVE_LIMIT_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC3_HS__LOCK_LOW_THRESHOLD_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC3_HS__GROUP_FIFO_DEPTH_MASK 0x0000FC00L
+#define SPI_SHADER_PGM_RSRC3_HS__CU_EN_MASK 0xFFFF0000L
+//SPI_SHADER_PGM_LO_HS
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_HS
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_HS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_RSRC1_HS
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY__SHIFT 0xa
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE__SHIFT 0xc
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV__SHIFT 0x14
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP__SHIFT 0x15
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE__SHIFT 0x16
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE__SHIFT 0x17
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED__SHIFT 0x18
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS__SHIFT 0x19
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE__SHIFT 0x1a
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL__SHIFT 0x1e
+#define SPI_SHADER_PGM_RSRC1_HS__VGPRS_MASK 0x0000003FL
+#define SPI_SHADER_PGM_RSRC1_HS__SGPRS_MASK 0x000003C0L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIORITY_MASK 0x00000C00L
+#define SPI_SHADER_PGM_RSRC1_HS__FLOAT_MODE_MASK 0x000FF000L
+#define SPI_SHADER_PGM_RSRC1_HS__PRIV_MASK 0x00100000L
+#define SPI_SHADER_PGM_RSRC1_HS__DX10_CLAMP_MASK 0x00200000L
+#define SPI_SHADER_PGM_RSRC1_HS__DEBUG_MODE_MASK 0x00400000L
+#define SPI_SHADER_PGM_RSRC1_HS__IEEE_MODE_MASK 0x00800000L
+#define SPI_SHADER_PGM_RSRC1_HS__MEM_ORDERED_MASK 0x01000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FWD_PROGRESS_MASK 0x02000000L
+#define SPI_SHADER_PGM_RSRC1_HS__WGP_MODE_MASK 0x04000000L
+#define SPI_SHADER_PGM_RSRC1_HS__CDBG_USER_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC1_HS__LS_VGPR_COMP_CNT_MASK 0x30000000L
+#define SPI_SHADER_PGM_RSRC1_HS__FP16_OVFL_MASK 0x40000000L
+//SPI_SHADER_PGM_RSRC2_HS
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN__SHIFT 0x0
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR__SHIFT 0x1
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT__SHIFT 0x6
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN__SHIFT 0x7
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN__SHIFT 0x8
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN__SHIFT 0x9
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE__SHIFT 0x12
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB__SHIFT 0x1b
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT__SHIFT 0x1c
+#define SPI_SHADER_PGM_RSRC2_HS__SCRATCH_EN_MASK 0x00000001L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MASK 0x0000003EL
+#define SPI_SHADER_PGM_RSRC2_HS__TRAP_PRESENT_MASK 0x00000040L
+#define SPI_SHADER_PGM_RSRC2_HS__OC_LDS_EN_MASK 0x00000080L
+#define SPI_SHADER_PGM_RSRC2_HS__TG_SIZE_EN_MASK 0x00000100L
+#define SPI_SHADER_PGM_RSRC2_HS__EXCP_EN_MASK 0x0003FE00L
+#define SPI_SHADER_PGM_RSRC2_HS__LDS_SIZE_MASK 0x07FC0000L
+#define SPI_SHADER_PGM_RSRC2_HS__USER_SGPR_MSB_MASK 0x08000000L
+#define SPI_SHADER_PGM_RSRC2_HS__SHARED_VGPR_CNT_MASK 0xF0000000L
+//SPI_SHADER_USER_DATA_HS_0
+#define SPI_SHADER_USER_DATA_HS_0__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_0__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_1
+#define SPI_SHADER_USER_DATA_HS_1__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_1__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_2
+#define SPI_SHADER_USER_DATA_HS_2__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_2__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_3
+#define SPI_SHADER_USER_DATA_HS_3__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_3__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_4
+#define SPI_SHADER_USER_DATA_HS_4__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_4__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_5
+#define SPI_SHADER_USER_DATA_HS_5__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_5__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_6
+#define SPI_SHADER_USER_DATA_HS_6__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_6__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_7
+#define SPI_SHADER_USER_DATA_HS_7__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_7__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_8
+#define SPI_SHADER_USER_DATA_HS_8__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_8__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_9
+#define SPI_SHADER_USER_DATA_HS_9__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_9__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_10
+#define SPI_SHADER_USER_DATA_HS_10__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_10__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_11
+#define SPI_SHADER_USER_DATA_HS_11__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_11__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_12
+#define SPI_SHADER_USER_DATA_HS_12__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_12__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_13
+#define SPI_SHADER_USER_DATA_HS_13__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_13__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_14
+#define SPI_SHADER_USER_DATA_HS_14__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_14__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_15
+#define SPI_SHADER_USER_DATA_HS_15__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_15__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_16
+#define SPI_SHADER_USER_DATA_HS_16__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_16__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_17
+#define SPI_SHADER_USER_DATA_HS_17__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_17__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_18
+#define SPI_SHADER_USER_DATA_HS_18__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_18__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_19
+#define SPI_SHADER_USER_DATA_HS_19__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_19__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_20
+#define SPI_SHADER_USER_DATA_HS_20__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_20__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_21
+#define SPI_SHADER_USER_DATA_HS_21__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_21__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_22
+#define SPI_SHADER_USER_DATA_HS_22__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_22__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_23
+#define SPI_SHADER_USER_DATA_HS_23__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_23__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_24
+#define SPI_SHADER_USER_DATA_HS_24__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_24__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_25
+#define SPI_SHADER_USER_DATA_HS_25__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_25__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_26
+#define SPI_SHADER_USER_DATA_HS_26__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_26__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_27
+#define SPI_SHADER_USER_DATA_HS_27__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_27__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_28
+#define SPI_SHADER_USER_DATA_HS_28__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_28__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_29
+#define SPI_SHADER_USER_DATA_HS_29__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_29__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_30
+#define SPI_SHADER_USER_DATA_HS_30__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_30__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_USER_DATA_HS_31
+#define SPI_SHADER_USER_DATA_HS_31__DATA__SHIFT 0x0
+#define SPI_SHADER_USER_DATA_HS_31__DATA_MASK 0xFFFFFFFFL
+//SPI_SHADER_REQ_CTRL_LSHS
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN__SHIFT 0x0
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_EN_MASK 0x00000001L
+#define SPI_SHADER_REQ_CTRL_LSHS__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define SPI_SHADER_REQ_CTRL_LSHS__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define SPI_SHADER_REQ_CTRL_LSHS__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define SPI_SHADER_REQ_CTRL_LSHS__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define SPI_SHADER_REQ_CTRL_LSHS__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define SPI_SHADER_REQ_CTRL_LSHS__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+//SPI_SHADER_USER_ACCUM_LSHS_0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_0__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_1
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_1__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_2
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_2__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_USER_ACCUM_LSHS_3
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION__SHIFT 0x0
+#define SPI_SHADER_USER_ACCUM_LSHS_3__CONTRIBUTION_MASK 0x0000007FL
+//SPI_SHADER_PGM_LO_LS
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_LO_LS__MEM_BASE_MASK 0xFFFFFFFFL
+//SPI_SHADER_PGM_HI_LS
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE__SHIFT 0x0
+#define SPI_SHADER_PGM_HI_LS__MEM_BASE_MASK 0xFFL
+//COMPUTE_DISPATCH_INITIATOR
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN__SHIFT 0x0
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN__SHIFT 0x1
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000__SHIFT 0x2
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL__SHIFT 0x3
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE__SHIFT 0x4
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS__SHIFT 0x5
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE__SHIFT 0x6
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL__SHIFT 0xa
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL__SHIFT 0xb
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED__SHIFT 0xc
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE__SHIFT 0xd
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE__SHIFT 0xe
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN__SHIFT 0xf
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN__SHIFT 0x10
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN__SHIFT 0x11
+#define COMPUTE_DISPATCH_INITIATOR__COMPUTE_SHADER_EN_MASK 0x00000001L
+#define COMPUTE_DISPATCH_INITIATOR__PARTIAL_TG_EN_MASK 0x00000002L
+#define COMPUTE_DISPATCH_INITIATOR__FORCE_START_AT_000_MASK 0x00000004L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_ENBL_MASK 0x00000008L
+#define COMPUTE_DISPATCH_INITIATOR__ORDERED_APPEND_MODE_MASK 0x00000010L
+#define COMPUTE_DISPATCH_INITIATOR__USE_THREAD_DIMENSIONS_MASK 0x00000020L
+#define COMPUTE_DISPATCH_INITIATOR__ORDER_MODE_MASK 0x00000040L
+#define COMPUTE_DISPATCH_INITIATOR__SCALAR_L1_INV_VOL_MASK 0x00000400L
+#define COMPUTE_DISPATCH_INITIATOR__VECTOR_L1_INV_VOL_MASK 0x00000800L
+#define COMPUTE_DISPATCH_INITIATOR__RESERVED_MASK 0x00001000L
+#define COMPUTE_DISPATCH_INITIATOR__TUNNEL_ENABLE_MASK 0x00002000L
+#define COMPUTE_DISPATCH_INITIATOR__RESTORE_MASK 0x00004000L
+#define COMPUTE_DISPATCH_INITIATOR__CS_W32_EN_MASK 0x00008000L
+#define COMPUTE_DISPATCH_INITIATOR__AMP_SHADER_EN_MASK 0x00010000L
+#define COMPUTE_DISPATCH_INITIATOR__DISABLE_DISP_PREMPT_EN_MASK 0x00020000L
+//COMPUTE_DIM_X
+#define COMPUTE_DIM_X__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_X__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Y
+#define COMPUTE_DIM_Y__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Y__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_DIM_Z
+#define COMPUTE_DIM_Z__SIZE__SHIFT 0x0
+#define COMPUTE_DIM_Z__SIZE_MASK 0xFFFFFFFFL
+//COMPUTE_START_X
+#define COMPUTE_START_X__START__SHIFT 0x0
+#define COMPUTE_START_X__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Y
+#define COMPUTE_START_Y__START__SHIFT 0x0
+#define COMPUTE_START_Y__START_MASK 0xFFFFFFFFL
+//COMPUTE_START_Z
+#define COMPUTE_START_Z__START__SHIFT 0x0
+#define COMPUTE_START_Z__START_MASK 0xFFFFFFFFL
+//COMPUTE_NUM_THREAD_X
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_X__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Y
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Y__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_NUM_THREAD_Z
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL__SHIFT 0x0
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL__SHIFT 0x10
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_FULL_MASK 0x0000FFFFL
+#define COMPUTE_NUM_THREAD_Z__NUM_THREAD_PARTIAL_MASK 0xFFFF0000L
+//COMPUTE_PIPELINESTAT_ENABLE
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE__SHIFT 0x0
+#define COMPUTE_PIPELINESTAT_ENABLE__PIPELINESTAT_ENABLE_MASK 0x00000001L
+//COMPUTE_PERFCOUNT_ENABLE
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE__SHIFT 0x0
+#define COMPUTE_PERFCOUNT_ENABLE__PERFCOUNT_ENABLE_MASK 0x00000001L
+//COMPUTE_PGM_LO
+#define COMPUTE_PGM_LO__DATA__SHIFT 0x0
+#define COMPUTE_PGM_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_PGM_HI
+#define COMPUTE_PGM_HI__DATA__SHIFT 0x0
+#define COMPUTE_PGM_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_PKT_ADDR_LO
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_PKT_ADDR_HI
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_PKT_ADDR_HI__DATA_MASK 0x000000FFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_LO
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_SCRATCH_BASE_HI
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//COMPUTE_PGM_RSRC1
+#define COMPUTE_PGM_RSRC1__VGPRS__SHIFT 0x0
+#define COMPUTE_PGM_RSRC1__SGPRS__SHIFT 0x6
+#define COMPUTE_PGM_RSRC1__PRIORITY__SHIFT 0xa
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE__SHIFT 0xc
+#define COMPUTE_PGM_RSRC1__PRIV__SHIFT 0x14
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP__SHIFT 0x15
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE__SHIFT 0x16
+#define COMPUTE_PGM_RSRC1__IEEE_MODE__SHIFT 0x17
+#define COMPUTE_PGM_RSRC1__BULKY__SHIFT 0x18
+#define COMPUTE_PGM_RSRC1__CDBG_USER__SHIFT 0x19
+#define COMPUTE_PGM_RSRC1__FP16_OVFL__SHIFT 0x1a
+#define COMPUTE_PGM_RSRC1__WGP_MODE__SHIFT 0x1d
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED__SHIFT 0x1e
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC1__VGPRS_MASK 0x0000003FL
+#define COMPUTE_PGM_RSRC1__SGPRS_MASK 0x000003C0L
+#define COMPUTE_PGM_RSRC1__PRIORITY_MASK 0x00000C00L
+#define COMPUTE_PGM_RSRC1__FLOAT_MODE_MASK 0x000FF000L
+#define COMPUTE_PGM_RSRC1__PRIV_MASK 0x00100000L
+#define COMPUTE_PGM_RSRC1__DX10_CLAMP_MASK 0x00200000L
+#define COMPUTE_PGM_RSRC1__DEBUG_MODE_MASK 0x00400000L
+#define COMPUTE_PGM_RSRC1__IEEE_MODE_MASK 0x00800000L
+#define COMPUTE_PGM_RSRC1__BULKY_MASK 0x01000000L
+#define COMPUTE_PGM_RSRC1__CDBG_USER_MASK 0x02000000L
+#define COMPUTE_PGM_RSRC1__FP16_OVFL_MASK 0x04000000L
+#define COMPUTE_PGM_RSRC1__WGP_MODE_MASK 0x20000000L
+#define COMPUTE_PGM_RSRC1__MEM_ORDERED_MASK 0x40000000L
+#define COMPUTE_PGM_RSRC1__FWD_PROGRESS_MASK 0x80000000L
+//COMPUTE_PGM_RSRC2
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN__SHIFT 0x0
+#define COMPUTE_PGM_RSRC2__USER_SGPR__SHIFT 0x1
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT 0x6
+#define COMPUTE_PGM_RSRC2__TGID_X_EN__SHIFT 0x7
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN__SHIFT 0x8
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN__SHIFT 0x9
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN__SHIFT 0xa
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT__SHIFT 0xb
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB__SHIFT 0xd
+#define COMPUTE_PGM_RSRC2__LDS_SIZE__SHIFT 0xf
+#define COMPUTE_PGM_RSRC2__EXCP_EN__SHIFT 0x18
+#define COMPUTE_PGM_RSRC2__SCRATCH_EN_MASK 0x00000001L
+#define COMPUTE_PGM_RSRC2__USER_SGPR_MASK 0x0000003EL
+#define COMPUTE_PGM_RSRC2__TRAP_PRESENT_MASK 0x00000040L
+#define COMPUTE_PGM_RSRC2__TGID_X_EN_MASK 0x00000080L
+#define COMPUTE_PGM_RSRC2__TGID_Y_EN_MASK 0x00000100L
+#define COMPUTE_PGM_RSRC2__TGID_Z_EN_MASK 0x00000200L
+#define COMPUTE_PGM_RSRC2__TG_SIZE_EN_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC2__TIDIG_COMP_CNT_MASK 0x00001800L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MSB_MASK 0x00006000L
+#define COMPUTE_PGM_RSRC2__LDS_SIZE_MASK 0x00FF8000L
+#define COMPUTE_PGM_RSRC2__EXCP_EN_MASK 0x7F000000L
+//COMPUTE_VMID
+#define COMPUTE_VMID__DATA__SHIFT 0x0
+#define COMPUTE_VMID__DATA_MASK 0x0000000FL
+//COMPUTE_RESOURCE_LIMITS
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH__SHIFT 0x0
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU__SHIFT 0xc
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD__SHIFT 0x10
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL__SHIFT 0x16
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST__SHIFT 0x17
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT__SHIFT 0x18
+#define COMPUTE_RESOURCE_LIMITS__WAVES_PER_SH_MASK 0x000003FFL
+#define COMPUTE_RESOURCE_LIMITS__TG_PER_CU_MASK 0x0000F000L
+#define COMPUTE_RESOURCE_LIMITS__LOCK_THRESHOLD_MASK 0x003F0000L
+#define COMPUTE_RESOURCE_LIMITS__SIMD_DEST_CNTL_MASK 0x00400000L
+#define COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK 0x00800000L
+#define COMPUTE_RESOURCE_LIMITS__CU_GROUP_COUNT_MASK 0x07000000L
+//COMPUTE_DESTINATION_EN_SE0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE0__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE0__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE1
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE1__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE1
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE1__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_TMPRING_SIZE
+#define COMPUTE_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define COMPUTE_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define COMPUTE_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define COMPUTE_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//COMPUTE_DESTINATION_EN_SE2
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE2__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE2
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE2__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DESTINATION_EN_SE3
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN__SHIFT 0x0
+#define COMPUTE_DESTINATION_EN_SE3__CU_EN_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE3
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE3__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_RESTART_X
+#define COMPUTE_RESTART_X__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_X__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Y
+#define COMPUTE_RESTART_Y__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Y__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_RESTART_Z
+#define COMPUTE_RESTART_Z__RESTART__SHIFT 0x0
+#define COMPUTE_RESTART_Z__RESTART_MASK 0xFFFFFFFFL
+//COMPUTE_THREAD_TRACE_ENABLE
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE__SHIFT 0x0
+#define COMPUTE_THREAD_TRACE_ENABLE__THREAD_TRACE_ENABLE_MASK 0x00000001L
+//COMPUTE_MISC_RESERVED
+#define COMPUTE_MISC_RESERVED__SEND_SEID__SHIFT 0x0
+#define COMPUTE_MISC_RESERVED__RESERVED3__SHIFT 0x3
+#define COMPUTE_MISC_RESERVED__RESERVED4__SHIFT 0x4
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE__SHIFT 0x5
+#define COMPUTE_MISC_RESERVED__SEND_SEID_MASK 0x00000007L
+#define COMPUTE_MISC_RESERVED__RESERVED3_MASK 0x00000008L
+#define COMPUTE_MISC_RESERVED__RESERVED4_MASK 0x00000010L
+#define COMPUTE_MISC_RESERVED__WAVE_ID_BASE_MASK 0x0001FFE0L
+//COMPUTE_DISPATCH_ID
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID__SHIFT 0x0
+#define COMPUTE_DISPATCH_ID__DISPATCH_ID_MASK 0xFFFFFFFFL
+//COMPUTE_THREADGROUP_ID
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID__SHIFT 0x0
+#define COMPUTE_THREADGROUP_ID__THREADGROUP_ID_MASK 0xFFFFFFFFL
+//COMPUTE_REQ_CTRL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN__SHIFT 0x0
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU__SHIFT 0x1
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT__SHIFT 0x5
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS__SHIFT 0x9
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD__SHIFT 0xa
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT__SHIFT 0xf
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN__SHIFT 0x10
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD__SHIFT 0x11
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT__SHIFT 0x14
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_EN_MASK 0x00000001L
+#define COMPUTE_REQ_CTRL__NUMBER_OF_REQUESTS_PER_CU_MASK 0x0000001EL
+#define COMPUTE_REQ_CTRL__SOFT_GROUPING_ALLOCATION_TIMEOUT_MASK 0x000001E0L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_HYSTERESIS_MASK 0x00000200L
+#define COMPUTE_REQ_CTRL__HARD_LOCK_LOW_THRESHOLD_MASK 0x00007C00L
+#define COMPUTE_REQ_CTRL__PRODUCER_REQUEST_LOCKOUT_MASK 0x00008000L
+#define COMPUTE_REQ_CTRL__GLOBAL_SCANNING_EN_MASK 0x00010000L
+#define COMPUTE_REQ_CTRL__ALLOCATION_RATE_THROTTLING_THRESHOLD_MASK 0x000E0000L
+#define COMPUTE_REQ_CTRL__DEDICATED_PREALLOCATION_BUFFER_LIMIT_MASK 0x07F00000L
+//COMPUTE_USER_ACCUM_0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_0__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_1
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_1__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_2
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_2__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_USER_ACCUM_3
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION__SHIFT 0x0
+#define COMPUTE_USER_ACCUM_3__CONTRIBUTION_MASK 0x0000007FL
+//COMPUTE_PGM_RSRC3
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT__SHIFT 0x0
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE__SHIFT 0x4
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START__SHIFT 0xa
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END__SHIFT 0xb
+#define COMPUTE_PGM_RSRC3__IMAGE_OP__SHIFT 0x1f
+#define COMPUTE_PGM_RSRC3__SHARED_VGPR_CNT_MASK 0x0000000FL
+#define COMPUTE_PGM_RSRC3__INST_PREF_SIZE_MASK 0x000003F0L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_START_MASK 0x00000400L
+#define COMPUTE_PGM_RSRC3__TRAP_ON_END_MASK 0x00000800L
+#define COMPUTE_PGM_RSRC3__IMAGE_OP_MASK 0x80000000L
+//COMPUTE_DDID_INDEX
+#define COMPUTE_DDID_INDEX__INDEX__SHIFT 0x0
+#define COMPUTE_DDID_INDEX__INDEX_MASK 0x000007FFL
+//COMPUTE_SHADER_CHKSUM
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM__SHIFT 0x0
+#define COMPUTE_SHADER_CHKSUM__CHECKSUM_MASK 0xFFFFFFFFL
+//COMPUTE_STATIC_THREAD_MGMT_SE4
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE4__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE5
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE5__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE6
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE6__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_STATIC_THREAD_MGMT_SE7
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN__SHIFT 0x0
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN__SHIFT 0x10
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA0_CU_EN_MASK 0x0000FFFFL
+#define COMPUTE_STATIC_THREAD_MGMT_SE7__SA1_CU_EN_MASK 0xFFFF0000L
+//COMPUTE_DISPATCH_INTERLEAVE
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE__SHIFT 0x0
+#define COMPUTE_DISPATCH_INTERLEAVE__INTERLEAVE_MASK 0x000003FFL
+//COMPUTE_RELAUNCH
+#define COMPUTE_RELAUNCH__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH__IS_STATE_MASK 0x80000000L
+//COMPUTE_WAVE_RESTORE_ADDR_LO
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_LO__ADDR_MASK 0xFFFFFFFFL
+//COMPUTE_WAVE_RESTORE_ADDR_HI
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR__SHIFT 0x0
+#define COMPUTE_WAVE_RESTORE_ADDR_HI__ADDR_MASK 0xFFFFL
+//COMPUTE_RELAUNCH2
+#define COMPUTE_RELAUNCH2__PAYLOAD__SHIFT 0x0
+#define COMPUTE_RELAUNCH2__IS_EVENT__SHIFT 0x1e
+#define COMPUTE_RELAUNCH2__IS_STATE__SHIFT 0x1f
+#define COMPUTE_RELAUNCH2__PAYLOAD_MASK 0x3FFFFFFFL
+#define COMPUTE_RELAUNCH2__IS_EVENT_MASK 0x40000000L
+#define COMPUTE_RELAUNCH2__IS_STATE_MASK 0x80000000L
+//COMPUTE_USER_DATA_0
+#define COMPUTE_USER_DATA_0__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_0__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_1
+#define COMPUTE_USER_DATA_1__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_1__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_2
+#define COMPUTE_USER_DATA_2__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_2__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_3
+#define COMPUTE_USER_DATA_3__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_3__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_4
+#define COMPUTE_USER_DATA_4__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_4__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_5
+#define COMPUTE_USER_DATA_5__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_5__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_6
+#define COMPUTE_USER_DATA_6__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_6__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_7
+#define COMPUTE_USER_DATA_7__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_7__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_8
+#define COMPUTE_USER_DATA_8__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_8__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_9
+#define COMPUTE_USER_DATA_9__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_9__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_10
+#define COMPUTE_USER_DATA_10__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_10__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_11
+#define COMPUTE_USER_DATA_11__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_11__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_12
+#define COMPUTE_USER_DATA_12__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_12__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_13
+#define COMPUTE_USER_DATA_13__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_13__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_14
+#define COMPUTE_USER_DATA_14__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_14__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_USER_DATA_15
+#define COMPUTE_USER_DATA_15__DATA__SHIFT 0x0
+#define COMPUTE_USER_DATA_15__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_DISPATCH_TUNNEL
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY__SHIFT 0x0
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE__SHIFT 0xa
+#define COMPUTE_DISPATCH_TUNNEL__OFF_DELAY_MASK 0x000003FFL
+#define COMPUTE_DISPATCH_TUNNEL__IMMEDIATE_MASK 0x00000400L
+//COMPUTE_DISPATCH_END
+#define COMPUTE_DISPATCH_END__DATA__SHIFT 0x0
+#define COMPUTE_DISPATCH_END__DATA_MASK 0xFFFFFFFFL
+//COMPUTE_NOWHERE
+#define COMPUTE_NOWHERE__DATA__SHIFT 0x0
+#define COMPUTE_NOWHERE__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG0
+#define SH_RESERVED_REG0__DATA__SHIFT 0x0
+#define SH_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//SH_RESERVED_REG1
+#define SH_RESERVED_REG1__DATA__SHIFT 0x0
+#define SH_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cppdec
+//CP_CU_MASK_ADDR_LO
+#define CP_CU_MASK_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_CU_MASK_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_CU_MASK_ADDR_HI
+#define CP_CU_MASK_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_CU_MASK_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_CU_MASK_CNTL
+#define CP_CU_MASK_CNTL__POLICY__SHIFT 0x0
+#define CP_CU_MASK_CNTL__POLICY_MASK 0x00000001L
+//CP_EOPQ_WAIT_TIME
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME__SHIFT 0x0
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT__SHIFT 0xa
+#define CP_EOPQ_WAIT_TIME__WAIT_TIME_MASK 0x000003FFL
+#define CP_EOPQ_WAIT_TIME__SCALE_COUNT_MASK 0x0003FC00L
+//CP_CPC_MGCG_SYNC_CNTL
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD__SHIFT 0x0
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD__SHIFT 0x8
+#define CP_CPC_MGCG_SYNC_CNTL__COOLDOWN_PERIOD_MASK 0x000000FFL
+#define CP_CPC_MGCG_SYNC_CNTL__WARMUP_PERIOD_MASK 0x0000FF00L
+//CPC_INT_INFO
+#define CPC_INT_INFO__ADDR_HI__SHIFT 0x0
+#define CPC_INT_INFO__TYPE__SHIFT 0x10
+#define CPC_INT_INFO__VMID__SHIFT 0x14
+#define CPC_INT_INFO__QUEUE_ID__SHIFT 0x1c
+#define CPC_INT_INFO__ADDR_HI_MASK 0x0000FFFFL
+#define CPC_INT_INFO__TYPE_MASK 0x00010000L
+#define CPC_INT_INFO__VMID_MASK 0x00F00000L
+#define CPC_INT_INFO__QUEUE_ID_MASK 0x70000000L
+//CP_VIRT_STATUS
+#define CP_VIRT_STATUS__VIRT_STATUS__SHIFT 0x0
+#define CP_VIRT_STATUS__VIRT_STATUS_MASK 0xFFFFFFFFL
+//CPC_INT_ADDR
+#define CPC_INT_ADDR__ADDR__SHIFT 0x0
+#define CPC_INT_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CPC_INT_PASID
+#define CPC_INT_PASID__PASID__SHIFT 0x0
+#define CPC_INT_PASID__BYPASS_PASID__SHIFT 0x10
+#define CPC_INT_PASID__PASID_MASK 0x0000FFFFL
+#define CPC_INT_PASID__BYPASS_PASID_MASK 0x00010000L
+//CP_GFX_ERROR
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x0
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR__SHIFT 0x1
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR__SHIFT 0x2
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR__SHIFT 0x3
+#define CP_GFX_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR__SHIFT 0x6
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR__SHIFT 0x7
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR__SHIFT 0x9
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR__SHIFT 0xa
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR__SHIFT 0xb
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR__SHIFT 0xc
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR__SHIFT 0xd
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR__SHIFT 0xe
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR__SHIFT 0xf
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0x12
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x13
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR__SHIFT 0x14
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR__SHIFT 0x15
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR__SHIFT 0x17
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR__SHIFT 0x18
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR__SHIFT 0x19
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR__SHIFT 0x1a
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR__SHIFT 0x1b
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR__SHIFT 0x1e
+#define CP_GFX_ERROR__RESERVED__SHIFT 0x1f
+#define CP_GFX_ERROR__ME_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000001L
+#define CP_GFX_ERROR__PFP_INSTR_CACHE_UTCL1_ERROR_MASK 0x00000002L
+#define CP_GFX_ERROR__DDID_DRAW_UTCL1_ERROR_MASK 0x00000004L
+#define CP_GFX_ERROR__DDID_DISPATCH_UTCL1_ERROR_MASK 0x00000008L
+#define CP_GFX_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_GFX_ERROR__DATA_FETCHER_UTCL1_ERROR_MASK 0x00000040L
+#define CP_GFX_ERROR__SEM_UTCL1_ERROR_MASK 0x00000080L
+#define CP_GFX_ERROR__QU_EOP_UTCL1_ERROR_MASK 0x00000200L
+#define CP_GFX_ERROR__QU_PIPE_UTCL1_ERROR_MASK 0x00000400L
+#define CP_GFX_ERROR__QU_READ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_GFX_ERROR__SYNC_MEMRD_UTCL1_ERROR_MASK 0x00001000L
+#define CP_GFX_ERROR__SYNC_MEMWR_UTCL1_ERROR_MASK 0x00002000L
+#define CP_GFX_ERROR__SHADOW_UTCL1_ERROR_MASK 0x00004000L
+#define CP_GFX_ERROR__APPEND_UTCL1_ERROR_MASK 0x00008000L
+#define CP_GFX_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00040000L
+#define CP_GFX_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00080000L
+#define CP_GFX_ERROR__PFP_TC_UTCL1_ERROR_MASK 0x00100000L
+#define CP_GFX_ERROR__ME_TC_UTCL1_ERROR_MASK 0x00200000L
+#define CP_GFX_ERROR__PRT_LOD_UTCL1_ERROR_MASK 0x00800000L
+#define CP_GFX_ERROR__RDPTR_RPT_UTCL1_ERROR_MASK 0x01000000L
+#define CP_GFX_ERROR__RB_FETCHER_UTCL1_ERROR_MASK 0x02000000L
+#define CP_GFX_ERROR__I1_FETCHER_UTCL1_ERROR_MASK 0x04000000L
+#define CP_GFX_ERROR__I2_FETCHER_UTCL1_ERROR_MASK 0x08000000L
+#define CP_GFX_ERROR__ST_FETCHER_UTCL1_ERROR_MASK 0x40000000L
+#define CP_GFX_ERROR__RESERVED_MASK 0x80000000L
+//CPG_UTCL1_CNTL
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPG_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPG_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPG_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPG_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPG_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPG_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPG_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPG_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPG_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPG_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPG_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPC_UTCL1_CNTL
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPC_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPC_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPC_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPC_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPC_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPC_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPC_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPC_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPC_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPC_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+//CPF_UTCL1_CNTL
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE__SHIFT 0x17
+#define CPF_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define CPF_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define CPF_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x1d
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x1e
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE__SHIFT 0x1f
+#define CPF_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define CPF_UTCL1_CNTL__VMID_RESET_MODE_MASK 0x00800000L
+#define CPF_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define CPF_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define CPF_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define CPF_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define CPF_UTCL1_CNTL__IGNORE_PTE_PERMISSION_MASK 0x20000000L
+#define CPF_UTCL1_CNTL__MTYPE_NO_PTE_MODE_MASK 0x40000000L
+#define CPF_UTCL1_CNTL__FORCE_NO_EXE_MASK 0x80000000L
+//CP_AQL_SMM_STATUS
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM__SHIFT 0x0
+#define CP_AQL_SMM_STATUS__AQL_QUEUE_SMM_MASK 0xFFFFFFFFL
+//CP_RB0_BASE
+#define CP_RB0_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB0_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB_BASE
+#define CP_RB_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB0_CNTL
+#define CP_RB0_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB0_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB0_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB0_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB0_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB0_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB0_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB0_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB0_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB0_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB0_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB0_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB0_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB0_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB0_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB0_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB0_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB0_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB0_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB0_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB0_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB0_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB0_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_CNTL
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB_RPTR_WR
+#define CP_RB_RPTR_WR__RB_RPTR_WR__SHIFT 0x0
+#define CP_RB_RPTR_WR__RB_RPTR_WR_MASK 0x000FFFFFL
+//CP_RB0_RPTR_ADDR
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB0_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB_RPTR_ADDR
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB0_RPTR_ADDR_HI
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB0_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_RPTR_ADDR_HI
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB0_BUFSZ_MASK
+#define CP_RB0_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB0_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_RB_BUFSZ_MASK
+#define CP_RB_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//GC_PRIV_MODE
+#define GC_PRIV_MODE__MC_PRIV_MODE__SHIFT 0x0
+#define GC_PRIV_MODE__MC_PRIV_MODE_MASK 0x00000001L
+//CP_INT_CNTL
+#define CP_INT_CNTL__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS
+#define CP_INT_STATUS__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS__CNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_DEVICE_ID
+#define CP_DEVICE_ID__DEVICE_ID__SHIFT 0x0
+#define CP_DEVICE_ID__DEVICE_ID_MASK 0x000000FFL
+//CP_ME0_PIPE_PRIORITY_CNTS
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME0_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_RING_PRIORITY_CNTS
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_RING_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_RING_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_RING_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_RING_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME0_PIPE0_PRIORITY
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING0_PRIORITY
+#define CP_RING0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME0_PIPE1_PRIORITY
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME0_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_RING1_PRIORITY
+#define CP_RING1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_RING1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_FATAL_ERROR
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR__SHIFT 0x0
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR__SHIFT 0x1
+#define CP_FATAL_ERROR__GFX_HALT_PROC__SHIFT 0x2
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR__SHIFT 0x3
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN__SHIFT 0x4
+#define CP_FATAL_ERROR__CPF_FATAL_ERROR_MASK 0x00000001L
+#define CP_FATAL_ERROR__CPG_FATAL_ERROR_MASK 0x00000002L
+#define CP_FATAL_ERROR__GFX_HALT_PROC_MASK 0x00000004L
+#define CP_FATAL_ERROR__DIS_CPG_FATAL_ERROR_MASK 0x00000008L
+#define CP_FATAL_ERROR__CPG_TAG_FATAL_ERROR_EN_MASK 0x00000010L
+//CP_RB_VMID
+#define CP_RB_VMID__RB0_VMID__SHIFT 0x0
+#define CP_RB_VMID__RB1_VMID__SHIFT 0x8
+#define CP_RB_VMID__RB2_VMID__SHIFT 0x10
+#define CP_RB_VMID__RB0_VMID_MASK 0x0000000FL
+#define CP_RB_VMID__RB1_VMID_MASK 0x00000F00L
+#define CP_RB_VMID__RB2_VMID_MASK 0x000F0000L
+//CP_ME0_PIPE0_VMID
+#define CP_ME0_PIPE0_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE0_VMID__VMID_MASK 0x0000000FL
+//CP_ME0_PIPE1_VMID
+#define CP_ME0_PIPE1_VMID__VMID__SHIFT 0x0
+#define CP_ME0_PIPE1_VMID__VMID_MASK 0x0000000FL
+//CP_RB0_WPTR
+#define CP_RB0_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR
+#define CP_RB_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB0_WPTR_HI
+#define CP_RB0_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB0_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB_WPTR_HI
+#define CP_RB_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR
+#define CP_RB1_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_RB1_WPTR_HI
+#define CP_RB1_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_RB1_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_PROCESS_QUANTUM
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION__SHIFT 0x0
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_PROCESS_QUANTUM__QUANTUM_EN__SHIFT 0x1f
+#define CP_PROCESS_QUANTUM__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_PROCESS_QUANTUM__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_PROCESS_QUANTUM__QUANTUM_EN_MASK 0x80000000L
+//CP_RB_DOORBELL_RANGE_LOWER
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_RB_DOORBELL_RANGE_UPPER
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_LOWER
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_LOWER__DOORBELL_RANGE_LOWER_MASK 0x00000FFCL
+//CP_MEC_DOORBELL_RANGE_UPPER
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER__SHIFT 0x2
+#define CP_MEC_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK 0x00000FFCL
+//CPG_UTCL1_ERROR
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPG_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CPC_UTCL1_ERROR
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT__SHIFT 0x0
+#define CPC_UTCL1_ERROR__ERROR_DETECTED_HALT_MASK 0x00000001L
+//CP_RB1_BASE
+#define CP_RB1_BASE__RB_BASE__SHIFT 0x0
+#define CP_RB1_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_RB1_CNTL
+#define CP_RB1_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_RB1_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_RB1_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_RB1_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_RB1_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_RB1_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_RB1_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_RB1_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_RB1_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_RB1_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_RB1_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_RB1_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_RB1_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_RB1_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_RB1_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_RB1_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_RB1_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_RB1_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_RB1_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_RB1_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_RB1_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_RB1_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_RB1_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_RB1_RPTR_ADDR
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_RB1_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_RB1_RPTR_ADDR_HI
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB1_BUFSZ_MASK
+#define CP_RB1_BUFSZ_MASK__DATA__SHIFT 0x0
+#define CP_RB1_BUFSZ_MASK__DATA_MASK 0x000FFFFFL
+//CP_INT_CNTL_RING0
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE__SHIFT 0x8
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE__SHIFT 0x9
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE__SHIFT 0xa
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE__SHIFT 0xb
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE__SHIFT 0x12
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE__SHIFT 0x13
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE__SHIFT 0x14
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE__SHIFT 0x15
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING0__RESUME_INT_ENABLE_MASK 0x00000100L
+#define CP_INT_CNTL_RING0__SUSPEND_INT_ENABLE_MASK 0x00000200L
+#define CP_INT_CNTL_RING0__DMA_WATCH_INT_ENABLE_MASK 0x00000400L
+#define CP_INT_CNTL_RING0__CP_VM_DOORBELL_WR_INT_ENABLE_MASK 0x00000800L
+#define CP_INT_CNTL_RING0__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING0__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING0__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING0__CMP_BUSY_INT_ENABLE_MASK 0x00040000L
+#define CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK 0x00080000L
+#define CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK 0x00100000L
+#define CP_INT_CNTL_RING0__GFX_IDLE_INT_ENABLE_MASK 0x00200000L
+#define CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING0__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING0__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING0__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING0__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_CNTL_RING1
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE__SHIFT 0x16
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_INT_CNTL_RING1__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_INT_CNTL_RING1__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_INT_CNTL_RING1__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_INT_CNTL_RING1__PRIV_INSTR_INT_ENABLE_MASK 0x00400000L
+#define CP_INT_CNTL_RING1__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_INT_CNTL_RING1__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_INT_CNTL_RING1__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_INT_CNTL_RING1__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_INT_CNTL_RING1__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_INT_CNTL_RING1__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_INT_STATUS_RING0
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT__SHIFT 0x8
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT__SHIFT 0x9
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT__SHIFT 0xa
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT__SHIFT 0xb
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING0__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT__SHIFT 0x12
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT__SHIFT 0x13
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT__SHIFT 0x14
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT__SHIFT 0x15
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING0__RESUME_INT_STAT_MASK 0x00000100L
+#define CP_INT_STATUS_RING0__SUSPEND_INT_STAT_MASK 0x00000200L
+#define CP_INT_STATUS_RING0__DMA_WATCH_INT_STAT_MASK 0x00000400L
+#define CP_INT_STATUS_RING0__CP_VM_DOORBELL_WR_INT_STAT_MASK 0x00000800L
+#define CP_INT_STATUS_RING0__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING0__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING0__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING0__CMP_BUSY_INT_STAT_MASK 0x00040000L
+#define CP_INT_STATUS_RING0__GCNTX_BUSY_INT_STAT_MASK 0x00080000L
+#define CP_INT_STATUS_RING0__CNTX_EMPTY_INT_STAT_MASK 0x00100000L
+#define CP_INT_STATUS_RING0__GFX_IDLE_INT_STAT_MASK 0x00200000L
+#define CP_INT_STATUS_RING0__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING0__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING0__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING0__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING0__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING0__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING0__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING0__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_INT_STATUS_RING1
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT__SHIFT 0xe
+#define CP_INT_STATUS_RING1__GPF_INT_STAT__SHIFT 0x10
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT__SHIFT 0x11
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT__SHIFT 0x16
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT__SHIFT 0x17
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT__SHIFT 0x18
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT__SHIFT 0x1a
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT__SHIFT 0x1b
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT__SHIFT 0x1d
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT__SHIFT 0x1e
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT__SHIFT 0x1f
+#define CP_INT_STATUS_RING1__CP_ECC_ERROR_INT_STAT_MASK 0x00004000L
+#define CP_INT_STATUS_RING1__GPF_INT_STAT_MASK 0x00010000L
+#define CP_INT_STATUS_RING1__WRM_POLL_TIMEOUT_INT_STAT_MASK 0x00020000L
+#define CP_INT_STATUS_RING1__PRIV_INSTR_INT_STAT_MASK 0x00400000L
+#define CP_INT_STATUS_RING1__PRIV_REG_INT_STAT_MASK 0x00800000L
+#define CP_INT_STATUS_RING1__OPCODE_ERROR_INT_STAT_MASK 0x01000000L
+#define CP_INT_STATUS_RING1__TIME_STAMP_INT_STAT_MASK 0x04000000L
+#define CP_INT_STATUS_RING1__RESERVED_BIT_ERROR_INT_STAT_MASK 0x08000000L
+#define CP_INT_STATUS_RING1__GENERIC2_INT_STAT_MASK 0x20000000L
+#define CP_INT_STATUS_RING1__GENERIC1_INT_STAT_MASK 0x40000000L
+#define CP_INT_STATUS_RING1__GENERIC0_INT_STAT_MASK 0x80000000L
+//CP_ME_F32_INTERRUPT
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT__SHIFT 0x1
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2__SHIFT 0x2
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3__SHIFT 0x3
+#define CP_ME_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_ME_F32_INTERRUPT__TIME_STAMP_INT_MASK 0x00000002L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_2_MASK 0x00000004L
+#define CP_ME_F32_INTERRUPT__ME_F32_INT_3_MASK 0x00000008L
+//CP_PFP_F32_INTERRUPT
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT__SHIFT 0x0
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3__SHIFT 0x3
+#define CP_PFP_F32_INTERRUPT__ECC_ERROR_INT_MASK 0x00000001L
+#define CP_PFP_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_PFP_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_PFP_F32_INTERRUPT__PFP_F32_INT_3_MASK 0x00000008L
+//CP_MEC1_F32_INTERRUPT
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INTERRUPT
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INTERRUPT__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INTERRUPT__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INTERRUPT__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INTERRUPT__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INTERRUPT__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INTERRUPT__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INTERRUPT__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INTERRUPT__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INTERRUPT__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INTERRUPT__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INTERRUPT__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INTERRUPT__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INTERRUPT__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INTERRUPT__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_PWR_CNTL
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0__SHIFT 0x0
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1__SHIFT 0x1
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0__SHIFT 0x8
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1__SHIFT 0x9
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2__SHIFT 0xa
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3__SHIFT 0xb
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0__SHIFT 0x10
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1__SHIFT 0x11
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2__SHIFT 0x12
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3__SHIFT 0x13
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0__SHIFT 0x14
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1__SHIFT 0x15
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2__SHIFT 0x16
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3__SHIFT 0x17
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE0_MASK 0x00000001L
+#define CP_PWR_CNTL__GFX_CLK_HALT_ME0_PIPE1_MASK 0x00000002L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE0_MASK 0x00000100L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE1_MASK 0x00000200L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE2_MASK 0x00000400L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME1_PIPE3_MASK 0x00000800L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE0_MASK 0x00010000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE1_MASK 0x00020000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE2_MASK 0x00040000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME2_PIPE3_MASK 0x00080000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE0_MASK 0x00100000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE1_MASK 0x00200000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE2_MASK 0x00400000L
+#define CP_PWR_CNTL__CMP_CLK_HALT_ME3_PIPE3_MASK 0x00800000L
+//CP_ECC_FIRSTOCCURRENCE
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT__SHIFT 0x4
+#define CP_ECC_FIRSTOCCURRENCE__ME__SHIFT 0x8
+#define CP_ECC_FIRSTOCCURRENCE__PIPE__SHIFT 0xa
+#define CP_ECC_FIRSTOCCURRENCE__VMID__SHIFT 0x10
+#define CP_ECC_FIRSTOCCURRENCE__INTERFACE_MASK 0x00000003L
+#define CP_ECC_FIRSTOCCURRENCE__CLIENT_MASK 0x000000F0L
+#define CP_ECC_FIRSTOCCURRENCE__ME_MASK 0x00000300L
+#define CP_ECC_FIRSTOCCURRENCE__PIPE_MASK 0x00000C00L
+#define CP_ECC_FIRSTOCCURRENCE__VMID_MASK 0x000F0000L
+//CP_ECC_FIRSTOCCURRENCE_RING0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING0__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_ECC_FIRSTOCCURRENCE_RING1
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE__SHIFT 0x0
+#define CP_ECC_FIRSTOCCURRENCE_RING1__OBSOLETE_MASK 0xFFFFFFFFL
+//GB_EDC_MODE
+#define GB_EDC_MODE__FORCE_SEC_ON_DED__SHIFT 0xf
+#define GB_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10
+#define GB_EDC_MODE__GATE_FUE__SHIFT 0x11
+#define GB_EDC_MODE__DED_MODE__SHIFT 0x14
+#define GB_EDC_MODE__PROP_FED__SHIFT 0x1d
+#define GB_EDC_MODE__BYPASS__SHIFT 0x1f
+#define GB_EDC_MODE__FORCE_SEC_ON_DED_MASK 0x00008000L
+#define GB_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L
+#define GB_EDC_MODE__GATE_FUE_MASK 0x00020000L
+#define GB_EDC_MODE__DED_MODE_MASK 0x00300000L
+#define GB_EDC_MODE__PROP_FED_MASK 0x20000000L
+#define GB_EDC_MODE__BYPASS_MASK 0x80000000L
+//CP_DEBUG
+#define CP_DEBUG__PERFMON_RING_SEL__SHIFT 0x0
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS__SHIFT 0x2
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0x8
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CP_DEBUG__PACKET_FILTER_DISABLE__SHIFT 0xa
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE__SHIFT 0xb
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE__SHIFT 0xc
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS__SHIFT 0xd
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE__SHIFT 0xe
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE__SHIFT 0xf
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR__SHIFT 0x10
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE__SHIFT 0x15
+#define CP_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_DEBUG__PREDICATE_DISABLE__SHIFT 0x17
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE__SHIFT 0x1b
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE__SHIFT 0x1e
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE__SHIFT 0x1f
+#define CP_DEBUG__PERFMON_RING_SEL_MASK 0x00000003L
+#define CP_DEBUG__DEBUG_BUS_SELECT_BITS_MASK 0x000000FCL
+#define CP_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00000100L
+#define CP_DEBUG__CPG_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define CP_DEBUG__PACKET_FILTER_DISABLE_MASK 0x00000400L
+#define CP_DEBUG__NOT_EOP_PREEMPT_DISABLE_MASK 0x00000800L
+#define CP_DEBUG__CPG_CHIU_RO_DISABLE_MASK 0x00001000L
+#define CP_DEBUG__CPG_GCR_CNTL_BYPASS_MASK 0x00002000L
+#define CP_DEBUG__CPG_RAM_CLK_GATING_DISABLE_MASK 0x00004000L
+#define CP_DEBUG__CPG_UTCL1_ERROR_HALT_DISABLE_MASK 0x00008000L
+#define CP_DEBUG__SURFSYNC_CNTX_RDADDR_MASK 0x00070000L
+#define CP_DEBUG__CPG_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_DEBUG__CPG_CHIU_GUS_DISABLE_MASK 0x00200000L
+#define CP_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_DEBUG__PREDICATE_DISABLE_MASK 0x00800000L
+#define CP_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_DEBUG__CPG_CHIU_MTYPE_OVERRIDE_MASK 0x08000000L
+#define CP_DEBUG__CPG_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_DEBUG__CS_PIPELINE_RESET_DISABLE_MASK 0x40000000L
+#define CP_DEBUG__IB_PACKET_INJECTOR_DISABLE_MASK 0x80000000L
+//CP_CPF_DEBUG
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE__SHIFT 0x10
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE__SHIFT 0x16
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE__SHIFT 0x17
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE__SHIFT 0x1a
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE__SHIFT 0x1b
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE__SHIFT 0x1c
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS__SHIFT 0x1d
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPF_DEBUG__DBGU_TRIGGER__SHIFT 0x1f
+#define CP_CPF_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPF_DEBUG__CPF_REPEATER_FGCG_OVERRIDE_MASK 0x00010000L
+#define CP_CPF_DEBUG__CPF_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPF_DEBUG__CPF_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPF_DEBUG__CPF_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_DELAY_OVERRIDE_MASK 0x00400000L
+#define CP_CPF_DEBUG__CLOCK_ACTIVE_OVERRIDE_MASK 0x00800000L
+#define CP_CPF_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPF_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPF_DEBUG__CPF_CHIU_NOALLOC_OVERRIDE_MASK 0x04000000L
+#define CP_CPF_DEBUG__CE_FETCHER_DISABLE_MASK 0x08000000L
+#define CP_CPF_DEBUG__CPF_CHIU_GUS_DISABLE_MASK 0x10000000L
+#define CP_CPF_DEBUG__CPF_PRIORITY_YIELD_ACTIVE_DIS_MASK 0x20000000L
+#define CP_CPF_DEBUG__CPF_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPF_DEBUG__DBGU_TRIGGER_MASK 0x80000000L
+//CP_CPC_DEBUG
+#define CP_CPC_DEBUG__PIPE_SELECT__SHIFT 0x0
+#define CP_CPC_DEBUG__ME_SELECT__SHIFT 0x2
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE__SHIFT 0x4
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN__SHIFT 0xe
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE__SHIFT 0xf
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE__SHIFT 0x10
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS__SHIFT 0x11
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE__SHIFT 0x12
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE__SHIFT 0x13
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE__SHIFT 0x14
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE__SHIFT 0x15
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE__SHIFT 0x16
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE__SHIFT 0x17
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE__SHIFT 0x18
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE__SHIFT 0x19
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE__SHIFT 0x1a
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE__SHIFT 0x1b
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE__SHIFT 0x1c
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE__SHIFT 0x1d
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE__SHIFT 0x1e
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE__SHIFT 0x1f
+#define CP_CPC_DEBUG__PIPE_SELECT_MASK 0x00000003L
+#define CP_CPC_DEBUG__ME_SELECT_MASK 0x00000004L
+#define CP_CPC_DEBUG__ADC_INTERLEAVE_DISABLE_MASK 0x00000010L
+#define CP_CPC_DEBUG__DEBUG_BUS_FLOP_EN_MASK 0x00004000L
+#define CP_CPC_DEBUG__CPC_REPEATER_FGCG_OVERRIDE_MASK 0x00008000L
+#define CP_CPC_DEBUG__CPC_CHIU_NOALLOC_OVERRIDE_MASK 0x00010000L
+#define CP_CPC_DEBUG__CPC_GCR_CNTL_BYPASS_MASK 0x00020000L
+#define CP_CPC_DEBUG__CPC_RAM_CLK_GATING_DISABLE_MASK 0x00040000L
+#define CP_CPC_DEBUG__CPC_DATA_POISONING_INT_DISABLE_MASK 0x00080000L
+#define CP_CPC_DEBUG__PRIV_VIOLATION_WRITE_DISABLE_MASK 0x00100000L
+#define CP_CPC_DEBUG__UCODE_ECC_ERROR_DISABLE_MASK 0x00200000L
+#define CP_CPC_DEBUG__INTERRUPT_DISABLE_MASK 0x00400000L
+#define CP_CPC_DEBUG__CPC_CHIU_RO_DISABLE_MASK 0x00800000L
+#define CP_CPC_DEBUG__UNDERFLOW_BUSY_DISABLE_MASK 0x01000000L
+#define CP_CPC_DEBUG__OVERFLOW_BUSY_DISABLE_MASK 0x02000000L
+#define CP_CPC_DEBUG__EVENT_FILT_DISABLE_MASK 0x04000000L
+#define CP_CPC_DEBUG__CPC_CHIU_GUS_DISABLE_MASK 0x08000000L
+#define CP_CPC_DEBUG__CPC_TC_ONE_CYCLE_WRITE_DISABLE_MASK 0x10000000L
+#define CP_CPC_DEBUG__CS_STATE_FILT_DISABLE_MASK 0x20000000L
+#define CP_CPC_DEBUG__CPC_CHIU_MTYPE_OVERRIDE_MASK 0x40000000L
+#define CP_CPC_DEBUG__ME2_UCODE_RAM_ENABLE_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT__SHIFT 0x1d
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE__SHIFT 0x1e
+#define CP_PQ_WPTR_POLL_CNTL__EN__SHIFT 0x1f
+#define CP_PQ_WPTR_POLL_CNTL__PERIOD_MASK 0x000000FFL
+#define CP_PQ_WPTR_POLL_CNTL__DISABLE_PEND_REQ_ONE_SHOT_MASK 0x20000000L
+#define CP_PQ_WPTR_POLL_CNTL__POLL_ACTIVE_MASK 0x40000000L
+#define CP_PQ_WPTR_POLL_CNTL__EN_MASK 0x80000000L
+//CP_PQ_WPTR_POLL_CNTL1
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK__SHIFT 0x0
+#define CP_PQ_WPTR_POLL_CNTL1__QUEUE_MASK_MASK 0xFFFFFFFFL
+//CP_ME1_PIPE0_INT_CNTL
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_CNTL
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_CNTL
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_CNTL
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_CNTL
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_CNTL
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_CNTL
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_CNTL
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CP_ME1_PIPE0_INT_STATUS
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE1_INT_STATUS
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE2_INT_STATUS
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_PIPE3_INT_STATUS
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME1_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME1_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME1_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME1_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME1_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME1_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME1_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME1_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME1_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE0_INT_STATUS
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE0_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE0_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE0_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE0_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE0_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE0_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE0_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE0_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE0_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE1_INT_STATUS
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE1_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE1_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE1_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE1_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE1_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE1_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE1_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE1_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE1_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE1_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE1_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE2_INT_STATUS
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE2_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE2_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE2_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE2_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE2_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE2_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE2_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE2_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE2_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE2_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE2_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME2_PIPE3_INT_STATUS
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CP_ME2_PIPE3_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CP_ME2_PIPE3_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CP_ME2_PIPE3_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CP_ME2_PIPE3_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_PIPE3_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CP_ME2_PIPE3_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CP_ME2_PIPE3_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CP_ME2_PIPE3_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CP_ME2_PIPE3_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CP_ME2_PIPE3_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CP_ME2_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT 0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT 0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT 0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT 0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT 0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT 0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT 0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT 0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT 0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT 0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT 0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK 0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK 0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK 0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK 0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK 0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK 0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK 0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK 0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK 0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK 0x80000000L
+//CP_GFX_QUEUE_INDEX
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS__SHIFT 0x0
+#define CP_GFX_QUEUE_INDEX__PIPE_ID__SHIFT 0x4
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID__SHIFT 0x8
+#define CP_GFX_QUEUE_INDEX__QUEUE_ACCESS_MASK 0x00000001L
+#define CP_GFX_QUEUE_INDEX__PIPE_ID_MASK 0x00000030L
+#define CP_GFX_QUEUE_INDEX__QUEUE_ID_MASK 0x00000700L
+//CC_GC_EDC_CONFIG
+#define CC_GC_EDC_CONFIG__WRITE_DIS__SHIFT 0x0
+#define CC_GC_EDC_CONFIG__DIS_EDC__SHIFT 0x1
+#define CC_GC_EDC_CONFIG__WRITE_DIS_MASK 0x00000001L
+#define CC_GC_EDC_CONFIG__DIS_EDC_MASK 0x00000002L
+//CP_ME1_PIPE_PRIORITY_CNTS
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME1_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME1_PIPE0_PRIORITY
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE1_PRIORITY
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE2_PRIORITY
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME1_PIPE3_PRIORITY
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME1_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE_PRIORITY_CNTS
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_ME2_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_ME2_PIPE0_PRIORITY
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE1_PRIORITY
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE2_PRIORITY
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_ME2_PIPE3_PRIORITY
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_ME2_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_PFP_PRGRM_CNTR_START
+#define CP_PFP_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_ME_PRGRM_CNTR_START
+#define CP_ME_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC1_PRGRM_CNTR_START
+#define CP_MEC1_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC1_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_MEC2_PRGRM_CNTR_START
+#define CP_MEC2_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC2_PRGRM_CNTR_START__IP_START_MASK 0x000FFFFFL
+//CP_PFP_INTR_ROUTINE_START
+#define CP_PFP_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_ME_INTR_ROUTINE_START
+#define CP_ME_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MEC1_INTR_ROUTINE_START
+#define CP_MEC1_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC1_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_MEC2_INTR_ROUTINE_START
+#define CP_MEC2_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MEC2_INTR_ROUTINE_START__IR_START_MASK 0x000FFFFFL
+//CP_CONTEXT_CNTL
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX__SHIFT 0x0
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX__SHIFT 0x4
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX__SHIFT 0x10
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX__SHIFT 0x14
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_GE_CNTX_MASK 0x00000007L
+#define CP_CONTEXT_CNTL__ME0PIPE0_MAX_PIPE_CNTX_MASK 0x00000070L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_GE_CNTX_MASK 0x00070000L
+#define CP_CONTEXT_CNTL__ME0PIPE1_MAX_PIPE_CNTX_MASK 0x00700000L
+//CP_MAX_CONTEXT
+#define CP_MAX_CONTEXT__MAX_CONTEXT__SHIFT 0x0
+#define CP_MAX_CONTEXT__MAX_CONTEXT_MASK 0x00000007L
+//CP_IQ_WAIT_TIME1
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD__SHIFT 0x0
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD__SHIFT 0x8
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD__SHIFT 0x10
+#define CP_IQ_WAIT_TIME1__GWS__SHIFT 0x18
+#define CP_IQ_WAIT_TIME1__IB_OFFLOAD_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME1__ATOMIC_OFFLOAD_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME1__WRM_OFFLOAD_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME1__GWS_MASK 0xFF000000L
+//CP_IQ_WAIT_TIME2
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP__SHIFT 0x0
+#define CP_IQ_WAIT_TIME2__SCH_WAVE__SHIFT 0x8
+#define CP_IQ_WAIT_TIME2__SEM_REARM__SHIFT 0x10
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY__SHIFT 0x18
+#define CP_IQ_WAIT_TIME2__QUE_SLEEP_MASK 0x000000FFL
+#define CP_IQ_WAIT_TIME2__SCH_WAVE_MASK 0x0000FF00L
+#define CP_IQ_WAIT_TIME2__SEM_REARM_MASK 0x00FF0000L
+#define CP_IQ_WAIT_TIME2__DEQ_RETRY_MASK 0xFF000000L
+//CP_RB0_BASE_HI
+#define CP_RB0_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB0_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_RB1_BASE_HI
+#define CP_RB1_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_RB1_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_VMID_RESET
+#define CP_VMID_RESET__RESET_REQUEST__SHIFT 0x0
+#define CP_VMID_RESET__PIPE0_QUEUES__SHIFT 0x10
+#define CP_VMID_RESET__PIPE1_QUEUES__SHIFT 0x18
+#define CP_VMID_RESET__RESET_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_RESET__PIPE0_QUEUES_MASK 0x00FF0000L
+#define CP_VMID_RESET__PIPE1_QUEUES_MASK 0xFF000000L
+//CPC_INT_CNTL
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE__SHIFT 0xc
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE__SHIFT 0xd
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE__SHIFT 0xe
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE__SHIFT 0xf
+#define CPC_INT_CNTL__GPF_INT_ENABLE__SHIFT 0x10
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE__SHIFT 0x11
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE__SHIFT 0x17
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE__SHIFT 0x18
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE__SHIFT 0x1a
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE__SHIFT 0x1b
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE__SHIFT 0x1d
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE__SHIFT 0x1e
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE__SHIFT 0x1f
+#define CPC_INT_CNTL__CMP_QUERY_STATUS_INT_ENABLE_MASK 0x00001000L
+#define CPC_INT_CNTL__DEQUEUE_REQUEST_INT_ENABLE_MASK 0x00002000L
+#define CPC_INT_CNTL__CP_ECC_ERROR_INT_ENABLE_MASK 0x00004000L
+#define CPC_INT_CNTL__SUA_VIOLATION_INT_ENABLE_MASK 0x00008000L
+#define CPC_INT_CNTL__GPF_INT_ENABLE_MASK 0x00010000L
+#define CPC_INT_CNTL__WRM_POLL_TIMEOUT_INT_ENABLE_MASK 0x00020000L
+#define CPC_INT_CNTL__PRIV_REG_INT_ENABLE_MASK 0x00800000L
+#define CPC_INT_CNTL__OPCODE_ERROR_INT_ENABLE_MASK 0x01000000L
+#define CPC_INT_CNTL__TIME_STAMP_INT_ENABLE_MASK 0x04000000L
+#define CPC_INT_CNTL__RESERVED_BIT_ERROR_INT_ENABLE_MASK 0x08000000L
+#define CPC_INT_CNTL__GENERIC2_INT_ENABLE_MASK 0x20000000L
+#define CPC_INT_CNTL__GENERIC1_INT_ENABLE_MASK 0x40000000L
+#define CPC_INT_CNTL__GENERIC0_INT_ENABLE_MASK 0x80000000L
+//CPC_INT_STATUS
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT 0xc
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT 0xd
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS__SHIFT 0xe
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS__SHIFT 0xf
+#define CPC_INT_STATUS__GPF_INT_STATUS__SHIFT 0x10
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS__SHIFT 0x11
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS__SHIFT 0x17
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS__SHIFT 0x18
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS__SHIFT 0x1a
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS__SHIFT 0x1b
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS__SHIFT 0x1d
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS__SHIFT 0x1e
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS__SHIFT 0x1f
+#define CPC_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS_MASK 0x00001000L
+#define CPC_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS_MASK 0x00002000L
+#define CPC_INT_STATUS__CP_ECC_ERROR_INT_STATUS_MASK 0x00004000L
+#define CPC_INT_STATUS__SUA_VIOLATION_INT_STATUS_MASK 0x00008000L
+#define CPC_INT_STATUS__GPF_INT_STATUS_MASK 0x00010000L
+#define CPC_INT_STATUS__WRM_POLL_TIMEOUT_INT_STATUS_MASK 0x00020000L
+#define CPC_INT_STATUS__PRIV_REG_INT_STATUS_MASK 0x00800000L
+#define CPC_INT_STATUS__OPCODE_ERROR_INT_STATUS_MASK 0x01000000L
+#define CPC_INT_STATUS__TIME_STAMP_INT_STATUS_MASK 0x04000000L
+#define CPC_INT_STATUS__RESERVED_BIT_ERROR_INT_STATUS_MASK 0x08000000L
+#define CPC_INT_STATUS__GENERIC2_INT_STATUS_MASK 0x20000000L
+#define CPC_INT_STATUS__GENERIC1_INT_STATUS_MASK 0x40000000L
+#define CPC_INT_STATUS__GENERIC0_INT_STATUS_MASK 0x80000000L
+//CP_VMID_PREEMPT
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST__SHIFT 0x0
+#define CP_VMID_PREEMPT__VIRT_COMMAND__SHIFT 0x10
+#define CP_VMID_PREEMPT__PREEMPT_REQUEST_MASK 0x0000FFFFL
+#define CP_VMID_PREEMPT__VIRT_COMMAND_MASK 0x000F0000L
+//CPC_INT_CNTX_ID
+#define CPC_INT_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CPC_INT_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_PQ_STATUS
+#define CP_PQ_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_PQ_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN__SHIFT 0x2
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE__SHIFT 0x3
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_PQ_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_EN_MASK 0x00000004L
+#define CP_PQ_STATUS__DOORBELL_UPDATED_MODE_MASK 0x00000008L
+//CP_PFP_PRGRM_CNTR_START_HI
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_PFP_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MAX_DRAW_COUNT
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT__SHIFT 0x0
+#define CP_MAX_DRAW_COUNT__MAX_DRAW_COUNT_MASK 0xFFFFFFFFL
+//CP_MEC1_F32_INT_DIS
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC1_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC1_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC1_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC1_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC1_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC1_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC1_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC1_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC1_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC1_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC1_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC1_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC1_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC1_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_MEC2_F32_INT_DIS
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT__SHIFT 0x0
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT__SHIFT 0x1
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT__SHIFT 0x2
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT__SHIFT 0x3
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT__SHIFT 0x4
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT__SHIFT 0x5
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT__SHIFT 0x6
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT__SHIFT 0x7
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT__SHIFT 0x8
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT__SHIFT 0x9
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF__SHIFT 0xa
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA__SHIFT 0xb
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC__SHIFT 0xc
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT__SHIFT 0xd
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT__SHIFT 0xe
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT__SHIFT 0xf
+#define CP_MEC2_F32_INT_DIS__EDC_ROQ_FED_INT_MASK 0x00000001L
+#define CP_MEC2_F32_INT_DIS__PRIV_REG_INT_MASK 0x00000002L
+#define CP_MEC2_F32_INT_DIS__RESERVED_BIT_ERR_INT_MASK 0x00000004L
+#define CP_MEC2_F32_INT_DIS__EDC_TC_FED_INT_MASK 0x00000008L
+#define CP_MEC2_F32_INT_DIS__EDC_GDS_FED_INT_MASK 0x00000010L
+#define CP_MEC2_F32_INT_DIS__EDC_SCRATCH_FED_INT_MASK 0x00000020L
+#define CP_MEC2_F32_INT_DIS__WAVE_RESTORE_INT_MASK 0x00000040L
+#define CP_MEC2_F32_INT_DIS__SUA_VIOLATION_INT_MASK 0x00000080L
+#define CP_MEC2_F32_INT_DIS__EDC_DMA_FED_INT_MASK 0x00000100L
+#define CP_MEC2_F32_INT_DIS__IQ_TIMER_INT_MASK 0x00000200L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPF_MASK 0x00000400L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_DMA_MASK 0x00000800L
+#define CP_MEC2_F32_INT_DIS__GPF_INT_CPC_MASK 0x00001000L
+#define CP_MEC2_F32_INT_DIS__EDC_SR_MEM_FED_INT_MASK 0x00002000L
+#define CP_MEC2_F32_INT_DIS__QUEUE_MESSAGE_INT_MASK 0x00004000L
+#define CP_MEC2_F32_INT_DIS__FATAL_EDC_ERROR_INT_MASK 0x00008000L
+//CP_VMID_STATUS
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS__SHIFT 0x0
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS__SHIFT 0x10
+#define CP_VMID_STATUS__PREEMPT_DE_STATUS_MASK 0x0000FFFFL
+#define CP_VMID_STATUS__PREEMPT_CE_STATUS_MASK 0xFFFF0000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CPC_SUSPEND_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CPC_SUSPEND_CTX_SAVE_CONTROL
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CPC_SUSPEND_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CPC_SUSPEND_CNTL_STACK_OFFSET
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CPC_SUSPEND_CNTL_STACK_SIZE
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CPC_SUSPEND_WG_STATE_OFFSET
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CPC_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CPC_SUSPEND_CTX_SAVE_SIZE
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CPC_SUSPEND_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CPC_OS_PIPES
+#define CPC_OS_PIPES__OS_PIPES__SHIFT 0x0
+#define CPC_OS_PIPES__OS_PIPES_MASK 0x000000FFL
+//CP_SUSPEND_RESUME_REQ
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ__SHIFT 0x0
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ__SHIFT 0x1
+#define CP_SUSPEND_RESUME_REQ__SUSPEND_REQ_MASK 0x00000001L
+#define CP_SUSPEND_RESUME_REQ__RESUME_REQ_MASK 0x00000002L
+//CP_SUSPEND_CNTL
+#define CP_SUSPEND_CNTL__SUSPEND_MODE__SHIFT 0x0
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE__SHIFT 0x1
+#define CP_SUSPEND_CNTL__RESUME_LOCK__SHIFT 0x2
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE__SHIFT 0x3
+#define CP_SUSPEND_CNTL__SUSPEND_MODE_MASK 0x00000001L
+#define CP_SUSPEND_CNTL__SUSPEND_ENABLE_MASK 0x00000002L
+#define CP_SUSPEND_CNTL__RESUME_LOCK_MASK 0x00000004L
+#define CP_SUSPEND_CNTL__ACE_SUSPEND_ACTIVE_MASK 0x00000008L
+//CP_IQ_WAIT_TIME3
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE__SHIFT 0x0
+#define CP_IQ_WAIT_TIME3__SUSPEND_QUE_MASK 0x000000FFL
+//CPC_DDID_BASE_ADDR_LO
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CPC_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CP_DDID_BASE_ADDR_LO
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO__SHIFT 0x6
+#define CP_DDID_BASE_ADDR_LO__BASE_ADDR_LO_MASK 0xFFFFFFC0L
+//CPC_DDID_BASE_ADDR_HI
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CPC_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_DDID_BASE_ADDR_HI
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_DDID_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CPC_DDID_CNTL
+#define CPC_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CPC_DDID_CNTL__SIZE__SHIFT 0x10
+#define CPC_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CPC_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CPC_DDID_CNTL__MODE__SHIFT 0x1e
+#define CPC_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CPC_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CPC_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CPC_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CPC_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CPC_DDID_CNTL__MODE_MASK 0x40000000L
+#define CPC_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_DDID_CNTL
+#define CP_DDID_CNTL__THRESHOLD__SHIFT 0x0
+#define CP_DDID_CNTL__SIZE__SHIFT 0x10
+#define CP_DDID_CNTL__NO_RING_MEMORY__SHIFT 0x13
+#define CP_DDID_CNTL__VMID__SHIFT 0x14
+#define CP_DDID_CNTL__VMID_SEL__SHIFT 0x18
+#define CP_DDID_CNTL__POLICY__SHIFT 0x1c
+#define CP_DDID_CNTL__MODE__SHIFT 0x1e
+#define CP_DDID_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DDID_CNTL__THRESHOLD_MASK 0x000000FFL
+#define CP_DDID_CNTL__SIZE_MASK 0x00010000L
+#define CP_DDID_CNTL__NO_RING_MEMORY_MASK 0x00080000L
+#define CP_DDID_CNTL__VMID_MASK 0x00F00000L
+#define CP_DDID_CNTL__VMID_SEL_MASK 0x01000000L
+#define CP_DDID_CNTL__POLICY_MASK 0x30000000L
+#define CP_DDID_CNTL__MODE_MASK 0x40000000L
+#define CP_DDID_CNTL__ENABLE_MASK 0x80000000L
+//CP_GFX_DDID_INFLIGHT_COUNT
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_WPTR
+#define CP_GFX_DDID_WPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_WPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_RPTR
+#define CP_GFX_DDID_RPTR__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_RPTR__COUNT_MASK 0x0000FFFFL
+//CP_GFX_DDID_DELTA_RPT_COUNT
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_GFX_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_GFX_HPD_STATUS0
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE__SHIFT 0x10
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ__SHIFT 0x1c
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID__SHIFT 0x1d
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID__SHIFT 0x1e
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_GFX_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_GFX_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_GFX_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_GFX_HPD_STATUS0__FORCE_MAPPED_QUEUE_MASK 0x00070000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_GFX_HPD_STATUS0__SUSPEND_REQ_MASK 0x10000000L
+#define CP_GFX_HPD_STATUS0__ENABLE_OVERIDE_QUEUEID_MASK 0x20000000L
+#define CP_GFX_HPD_STATUS0__OVERIDE_QUEUEID_MASK 0x40000000L
+#define CP_GFX_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+//CP_GFX_HPD_CONTROL0
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE__SHIFT 0x0
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING__SHIFT 0x4
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL__SHIFT 0x8
+#define CP_GFX_HPD_CONTROL0__SUSPEND_ENABLE_MASK 0x00000001L
+#define CP_GFX_HPD_CONTROL0__PIPE_HOLDING_MASK 0x00000010L
+#define CP_GFX_HPD_CONTROL0__RB_CE_ROQ_CNTL_MASK 0x00000100L
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_LO
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_GFX_HPD_OSPRE_FENCE_ADDR_HI
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_HPD_OSPRE_FENCE_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_GFX_HPD_OSPRE_FENCE_DATA_LO
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_GFX_HPD_OSPRE_FENCE_DATA_HI
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_GFX_HPD_OSPRE_FENCE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_GFX_INDEX_MUTEX
+#define CP_GFX_INDEX_MUTEX__REQUEST__SHIFT 0x0
+#define CP_GFX_INDEX_MUTEX__CLIENTID__SHIFT 0x1
+#define CP_GFX_INDEX_MUTEX__REQUEST_MASK 0x00000001L
+#define CP_GFX_INDEX_MUTEX__CLIENTID_MASK 0x0000000EL
+//CP_ME_PRGRM_CNTR_START_HI
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_ME_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_PFP_INTR_ROUTINE_START_HI
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_PFP_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_ME_INTR_ROUTINE_START_HI
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_ME_INTR_ROUTINE_START_HI__IR_START_MASK 0x3FFFFFFFL
+//CP_GFX_MQD_BASE_ADDR
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_GFX_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_MQD_BASE_ADDR_HI
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID__SHIFT 0x1c
+#define CP_GFX_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+#define CP_GFX_MQD_BASE_ADDR_HI__APP_VMID_MASK 0xF0000000L
+//CP_GFX_HQD_ACTIVE
+#define CP_GFX_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_GFX_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_GFX_HQD_VMID
+#define CP_GFX_HQD_VMID__VMID__SHIFT 0x0
+#define CP_GFX_HQD_VMID__VMID_MASK 0x0000000FL
+//CP_GFX_HQD_QUEUE_PRIORITY
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_GFX_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_GFX_HQD_QUANTUM
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x3
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000018L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x0000FF00L
+#define CP_GFX_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_BASE
+#define CP_GFX_HQD_BASE__RB_BASE__SHIFT 0x0
+#define CP_GFX_HQD_BASE__RB_BASE_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_BASE_HI
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI__SHIFT 0x0
+#define CP_GFX_HQD_BASE_HI__RB_BASE_HI_MASK 0x000000FFL
+//CP_GFX_HQD_RPTR
+#define CP_GFX_HQD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_RPTR_ADDR
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x2
+#define CP_GFX_HQD_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFCL
+//CP_GFX_HQD_RPTR_ADDR_HI
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI__SHIFT 0x0
+#define CP_GFX_HQD_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_WPTR_POLL_ADDR_LO
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO__SHIFT 0x2
+#define CP_RB_WPTR_POLL_ADDR_LO__RB_WPTR_POLL_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_RB_WPTR_POLL_ADDR_HI
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI__SHIFT 0x0
+#define CP_RB_WPTR_POLL_ADDR_HI__RB_WPTR_POLL_ADDR_HI_MASK 0x0000FFFFL
+//CP_RB_DOORBELL_CONTROL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_RB_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_GFX_HQD_OFFSET
+#define CP_GFX_HQD_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET__SHIFT 0x1f
+#define CP_GFX_HQD_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+#define CP_GFX_HQD_OFFSET__DISABLE_RB_OFFSET_MASK 0x80000000L
+//CP_GFX_HQD_CNTL
+#define CP_GFX_HQD_CNTL__RB_BUFSZ__SHIFT 0x0
+#define CP_GFX_HQD_CNTL__TMZ_STATE__SHIFT 0x6
+#define CP_GFX_HQD_CNTL__TMZ_MATCH__SHIFT 0x7
+#define CP_GFX_HQD_CNTL__RB_BLKSZ__SHIFT 0x8
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV__SHIFT 0xf
+#define CP_GFX_HQD_CNTL__BUF_SWAP__SHIFT 0x10
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ__SHIFT 0x14
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ__SHIFT 0x16
+#define CP_GFX_HQD_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_HQD_CNTL__RB_VOLATILE__SHIFT 0x1a
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE__SHIFT 0x1b
+#define CP_GFX_HQD_CNTL__RB_EXE__SHIFT 0x1c
+#define CP_GFX_HQD_CNTL__KMD_QUEUE__SHIFT 0x1d
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA__SHIFT 0x1f
+#define CP_GFX_HQD_CNTL__RB_BUFSZ_MASK 0x0000003FL
+#define CP_GFX_HQD_CNTL__TMZ_STATE_MASK 0x00000040L
+#define CP_GFX_HQD_CNTL__TMZ_MATCH_MASK 0x00000080L
+#define CP_GFX_HQD_CNTL__RB_BLKSZ_MASK 0x00003F00L
+#define CP_GFX_HQD_CNTL__RB_NON_PRIV_MASK 0x00008000L
+#define CP_GFX_HQD_CNTL__BUF_SWAP_MASK 0x00030000L
+#define CP_GFX_HQD_CNTL__MIN_AVAILSZ_MASK 0x00300000L
+#define CP_GFX_HQD_CNTL__MIN_IB_AVAILSZ_MASK 0x00C00000L
+#define CP_GFX_HQD_CNTL__CACHE_POLICY_MASK 0x03000000L
+#define CP_GFX_HQD_CNTL__RB_VOLATILE_MASK 0x04000000L
+#define CP_GFX_HQD_CNTL__RB_NO_UPDATE_MASK 0x08000000L
+#define CP_GFX_HQD_CNTL__RB_EXE_MASK 0x10000000L
+#define CP_GFX_HQD_CNTL__KMD_QUEUE_MASK 0x20000000L
+#define CP_GFX_HQD_CNTL__RB_RPTR_WR_ENA_MASK 0x80000000L
+//CP_GFX_HQD_CSMD_RPTR
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR__SHIFT 0x0
+#define CP_GFX_HQD_CSMD_RPTR__RB_RPTR_MASK 0x000FFFFFL
+//CP_GFX_HQD_WPTR
+#define CP_GFX_HQD_WPTR__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_WPTR_HI
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR__SHIFT 0x0
+#define CP_GFX_HQD_WPTR_HI__RB_WPTR_MASK 0xFFFFFFFFL
+//CP_GFX_HQD_DEQUEUE_REQUEST
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x00000001L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_GFX_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_GFX_HQD_MAPPED
+#define CP_GFX_HQD_MAPPED__MAPPED__SHIFT 0x0
+#define CP_GFX_HQD_MAPPED__MAPPED_MASK 0x00000001L
+//CP_GFX_HQD_QUE_MGR_CONTROL
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT__SHIFT 0x0
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE__SHIFT 0x4
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT__SHIFT 0x5
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN__SHIFT 0x6
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN__SHIFT 0x7
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE__SHIFT 0x8
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE__SHIFT 0xb
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE__SHIFT 0xd
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR__SHIFT 0xf
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE__SHIFT 0x10
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE__SHIFT 0x11
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT__SHIFT 0x12
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG__SHIFT 0x17
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_IDLE_QUEUE_DISCONNECT_MASK 0x00000001L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_CONNECT_HANDSHAKE_MASK 0x00000010L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_FETCHER_DISCONNECT_MASK 0x00000020L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_ACTIVE_EN_MASK 0x00000040L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_ALLOW_DB_UPDATE_EN_MASK 0x00000080L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__FORCE_QUEUE_MASK 0x00000700L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_OFFSET_UPDATE_MASK 0x00000800L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__PRIORITY_PREEMPT_DISABLE_MASK 0x00002000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_QUEUE_MGR_MASK 0x00008000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_IDLE_MESSAGE_MASK 0x00010000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_SWITCH_MESSAGE_IDLE_MASK 0x00020000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__ENABLE_SWITCH_MSG_PREEMPT_MASK 0x00040000L
+#define CP_GFX_HQD_QUE_MGR_CONTROL__DISABLE_MAPPED_QUEUE_IDLE_MSG_MASK 0x00800000L
+//CP_GFX_HQD_IQ_TIMER
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_GFX_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_GFX_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_GFX_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_GFX_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_GFX_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_GFX_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_GFX_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_GFX_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_GFX_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_GFX_HQD_HQ_STATUS0
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS__SHIFT 0x0
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS__SHIFT 0x4
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK__SHIFT 0x6
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_GFX_HQD_HQ_STATUS0__DEQUEUE_STATUS_MASK 0x00000001L
+#define CP_GFX_HQD_HQ_STATUS0__OS_PREEMPT_STATUS_MASK 0x00000030L
+#define CP_GFX_HQD_HQ_STATUS0__PREEMPT_ACK_MASK 0x00000040L
+#define CP_GFX_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+//CP_GFX_HQD_HQ_CONTROL0
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND__SHIFT 0x0
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES__SHIFT 0x4
+#define CP_GFX_HQD_HQ_CONTROL0__COMMAND_MASK 0x0000000FL
+#define CP_GFX_HQD_HQ_CONTROL0__SPARES_MASK 0x000000F0L
+//CP_GFX_MQD_CONTROL
+#define CP_GFX_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_GFX_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_GFX_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_GFX_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_GFX_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_GFX_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+//CP_HQD_GFX_CONTROL
+#define CP_HQD_GFX_CONTROL__MESSAGE__SHIFT 0x0
+#define CP_HQD_GFX_CONTROL__MISC__SHIFT 0x4
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT 0xf
+#define CP_HQD_GFX_CONTROL__MESSAGE_MASK 0x0000000FL
+#define CP_HQD_GFX_CONTROL__MISC_MASK 0x00007FF0L
+#define CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN_MASK 0x00008000L
+//CP_HQD_GFX_STATUS
+#define CP_HQD_GFX_STATUS__STATUS__SHIFT 0x0
+#define CP_HQD_GFX_STATUS__STATUS_MASK 0x0000FFFFL
+//CP_DMA_WATCH0_ADDR_LO
+#define CP_DMA_WATCH0_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH0_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_ADDR_HI
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH0_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH0_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH0_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH0_MASK
+#define CP_DMA_WATCH0_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH0_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH0_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH0_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH0_CNTL
+#define CP_DMA_WATCH0_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH0_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH0_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH0_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH0_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH0_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH0_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH0_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH0_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH0_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH0_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH1_ADDR_LO
+#define CP_DMA_WATCH1_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH1_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_ADDR_HI
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH1_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH1_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH1_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH1_MASK
+#define CP_DMA_WATCH1_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH1_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH1_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH1_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH1_CNTL
+#define CP_DMA_WATCH1_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH1_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH1_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH1_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH1_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH1_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH1_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH1_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH1_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH1_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH1_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH2_ADDR_LO
+#define CP_DMA_WATCH2_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH2_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_ADDR_HI
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH2_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH2_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH2_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH2_MASK
+#define CP_DMA_WATCH2_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH2_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH2_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH2_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH2_CNTL
+#define CP_DMA_WATCH2_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH2_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH2_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH2_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH2_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH2_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH2_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH2_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH2_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH2_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH2_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH3_ADDR_LO
+#define CP_DMA_WATCH3_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO__SHIFT 0x7
+#define CP_DMA_WATCH3_ADDR_LO__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_ADDR_LO__ADDR_LO_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_ADDR_HI
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH3_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_WATCH3_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_WATCH3_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_WATCH3_MASK
+#define CP_DMA_WATCH3_MASK__RSVD__SHIFT 0x0
+#define CP_DMA_WATCH3_MASK__MASK__SHIFT 0x7
+#define CP_DMA_WATCH3_MASK__RSVD_MASK 0x0000007FL
+#define CP_DMA_WATCH3_MASK__MASK_MASK 0xFFFFFF80L
+//CP_DMA_WATCH3_CNTL
+#define CP_DMA_WATCH3_CNTL__VMID__SHIFT 0x0
+#define CP_DMA_WATCH3_CNTL__RSVD1__SHIFT 0x4
+#define CP_DMA_WATCH3_CNTL__WATCH_READS__SHIFT 0x8
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES__SHIFT 0x9
+#define CP_DMA_WATCH3_CNTL__ANY_VMID__SHIFT 0xa
+#define CP_DMA_WATCH3_CNTL__RSVD2__SHIFT 0xb
+#define CP_DMA_WATCH3_CNTL__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH3_CNTL__RSVD1_MASK 0x000000F0L
+#define CP_DMA_WATCH3_CNTL__WATCH_READS_MASK 0x00000100L
+#define CP_DMA_WATCH3_CNTL__WATCH_WRITES_MASK 0x00000200L
+#define CP_DMA_WATCH3_CNTL__ANY_VMID_MASK 0x00000400L
+#define CP_DMA_WATCH3_CNTL__RSVD2_MASK 0xFFFFF800L
+//CP_DMA_WATCH_STAT_ADDR_LO
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_WATCH_STAT_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_WATCH_STAT_ADDR_HI
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_WATCH_STAT_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_WATCH_STAT
+#define CP_DMA_WATCH_STAT__VMID__SHIFT 0x0
+#define CP_DMA_WATCH_STAT__QUEUE_ID__SHIFT 0x4
+#define CP_DMA_WATCH_STAT__CLIENT_ID__SHIFT 0x8
+#define CP_DMA_WATCH_STAT__PIPE__SHIFT 0xc
+#define CP_DMA_WATCH_STAT__WATCH_ID__SHIFT 0x10
+#define CP_DMA_WATCH_STAT__RD_WR__SHIFT 0x14
+#define CP_DMA_WATCH_STAT__TRAP_FLAG__SHIFT 0x1f
+#define CP_DMA_WATCH_STAT__VMID_MASK 0x0000000FL
+#define CP_DMA_WATCH_STAT__QUEUE_ID_MASK 0x00000070L
+#define CP_DMA_WATCH_STAT__CLIENT_ID_MASK 0x00000700L
+#define CP_DMA_WATCH_STAT__PIPE_MASK 0x00003000L
+#define CP_DMA_WATCH_STAT__WATCH_ID_MASK 0x00030000L
+#define CP_DMA_WATCH_STAT__RD_WR_MASK 0x00100000L
+#define CP_DMA_WATCH_STAT__TRAP_FLAG_MASK 0x80000000L
+//CP_PFP_JT_STAT
+#define CP_PFP_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_PFP_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_PFP_JT_STAT__JT_LOADED_MASK 0x00000003L
+#define CP_PFP_JT_STAT__WR_MASK_MASK 0x00030000L
+//CP_MEC_JT_STAT
+#define CP_MEC_JT_STAT__JT_LOADED__SHIFT 0x0
+#define CP_MEC_JT_STAT__WR_MASK__SHIFT 0x10
+#define CP_MEC_JT_STAT__JT_LOADED_MASK 0x000000FFL
+#define CP_MEC_JT_STAT__WR_MASK_MASK 0x00FF0000L
+//CP_CPC_BUSY_HYSTERESIS
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY__SHIFT 0x8
+#define CP_CPC_BUSY_HYSTERESIS__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPC_BUSY_HYSTERESIS__CPC_BUSY_MASK 0x0000FF00L
+//CP_CPF_BUSY_HYSTERESIS1
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY__SHIFT 0x8
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY__SHIFT 0x10
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPF_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPF_BUSY_HYSTERESIS1__CPF_BUSY_MASK 0x0000FF00L
+#define CP_CPF_BUSY_HYSTERESIS1__CORE_BUSY_MASK 0x00FF0000L
+#define CP_CPF_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPF_BUSY_HYSTERESIS2
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPF_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+//CP_CPG_BUSY_HYSTERESIS1
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY__SHIFT 0x18
+#define CP_CPG_BUSY_HYSTERESIS1__CAC_ACTIVE_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS1__CP_BUSY_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS1__DMA_BUSY_MASK 0x00FF0000L
+#define CP_CPG_BUSY_HYSTERESIS1__GFX_BUSY_MASK 0xFF000000L
+//CP_CPG_BUSY_HYSTERESIS2
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY__SHIFT 0x0
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0__SHIFT 0x8
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1__SHIFT 0x10
+#define CP_CPG_BUSY_HYSTERESIS2__CMP_BUSY_MASK 0x000000FFL
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_0_MASK 0x0000FF00L
+#define CP_CPG_BUSY_HYSTERESIS2__SPI_CLOCK_1_MASK 0x00FF0000L
+//CP_RB_DOORBELL_CLEAR
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE__SHIFT 0x0
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR__SHIFT 0x8
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR__SHIFT 0x9
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR__SHIFT 0xa
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR__SHIFT 0xb
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR__SHIFT 0xc
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR__SHIFT 0xd
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUEUE_MASK 0x00000007L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_EN_CLEAR_MASK 0x00000100L
+#define CP_RB_DOORBELL_CLEAR__MAPPED_QUE_DOORBELL_HIT_CLEAR_MASK 0x00000200L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_EN_CLEAR_MASK 0x00000400L
+#define CP_RB_DOORBELL_CLEAR__MASTER_DOORBELL_HIT_CLEAR_MASK 0x00000800L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_EN_CLEAR_MASK 0x00001000L
+#define CP_RB_DOORBELL_CLEAR__QUEUES_DOORBELL_HIT_CLEAR_MASK 0x00002000L
+//CP_RB0_ACTIVE
+#define CP_RB0_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB0_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_ACTIVE
+#define CP_RB_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB1_ACTIVE
+#define CP_RB1_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_RB1_ACTIVE__ACTIVE_MASK 0x00000001L
+//CP_RB_STATUS
+#define CP_RB_STATUS__DOORBELL_UPDATED__SHIFT 0x0
+#define CP_RB_STATUS__DOORBELL_ENABLE__SHIFT 0x1
+#define CP_RB_STATUS__DOORBELL_UPDATED_MASK 0x00000001L
+#define CP_RB_STATUS__DOORBELL_ENABLE_MASK 0x00000002L
+//CPG_RCIU_CAM_INDEX
+#define CPG_RCIU_CAM_INDEX__INDEX__SHIFT 0x0
+#define CPG_RCIU_CAM_INDEX__INDEX_MASK 0x0000001FL
+//CPG_RCIU_CAM_DATA
+#define CPG_RCIU_CAM_DATA__DATA__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE0
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN__SHIFT 0x18
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN__SHIFT 0x19
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR__SHIFT 0x1f
+#define CPG_RCIU_CAM_DATA_PHASE0__ADDR_MASK 0x0003FFFFL
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE0_EN_MASK 0x01000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__PIPE1_EN_MASK 0x02000000L
+#define CPG_RCIU_CAM_DATA_PHASE0__SKIP_WR_MASK 0x80000000L
+//CPG_RCIU_CAM_DATA_PHASE1
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE1__MASK_MASK 0xFFFFFFFFL
+//CPG_RCIU_CAM_DATA_PHASE2
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE__SHIFT 0x0
+#define CPG_RCIU_CAM_DATA_PHASE2__VALUE_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_LO
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_LO__OFFSET_LO_MASK 0xFFFFFFFFL
+//CP_GPU_TIMESTAMP_OFFSET_HI
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI__SHIFT 0x0
+#define CP_GPU_TIMESTAMP_OFFSET_HI__OFFSET_HI_MASK 0xFFFFFFFFL
+//CP_SDMA_DMA_DONE
+#define CP_SDMA_DMA_DONE__SDMA_ID__SHIFT 0x0
+#define CP_SDMA_DMA_DONE__SDMA_ID_MASK 0x0000000FL
+//CP_PFP_SDMA_CS
+#define CP_PFP_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_PFP_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_PFP_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_PFP_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_PFP_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_PFP_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_PFP_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_PFP_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CP_ME_SDMA_CS
+#define CP_ME_SDMA_CS__REQUEST_GRANT__SHIFT 0x0
+#define CP_ME_SDMA_CS__SDMA_ID__SHIFT 0x4
+#define CP_ME_SDMA_CS__REQUEST_POSITION__SHIFT 0x8
+#define CP_ME_SDMA_CS__SDMA_COUNT__SHIFT 0xc
+#define CP_ME_SDMA_CS__REQUEST_GRANT_MASK 0x00000001L
+#define CP_ME_SDMA_CS__SDMA_ID_MASK 0x000000F0L
+#define CP_ME_SDMA_CS__REQUEST_POSITION_MASK 0x00000F00L
+#define CP_ME_SDMA_CS__SDMA_COUNT_MASK 0x00003000L
+//CPF_GCR_CNTL
+#define CPF_GCR_CNTL__GCR_GL_CMD__SHIFT 0x0
+#define CPF_GCR_CNTL__GCR_GL_CMD_MASK 0x0007FFFFL
+//CPG_UTCL1_STATUS
+#define CPG_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPG_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPG_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPG_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPG_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPG_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPG_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPG_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPG_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPC_UTCL1_STATUS
+#define CPC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CPF_UTCL1_STATUS
+#define CPF_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define CPF_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define CPF_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define CPF_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define CPF_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define CPF_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define CPF_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define CPF_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define CPF_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+//CP_SD_CNTL
+#define CP_SD_CNTL__CPF_EN__SHIFT 0x0
+#define CP_SD_CNTL__CPG_EN__SHIFT 0x1
+#define CP_SD_CNTL__CPC_EN__SHIFT 0x2
+#define CP_SD_CNTL__RLC_EN__SHIFT 0x3
+#define CP_SD_CNTL__GE_EN__SHIFT 0x5
+#define CP_SD_CNTL__UTCL1_EN__SHIFT 0x6
+#define CP_SD_CNTL__EA_EN__SHIFT 0x9
+#define CP_SD_CNTL__SDMA_EN__SHIFT 0xa
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE__SHIFT 0x1f
+#define CP_SD_CNTL__CPF_EN_MASK 0x00000001L
+#define CP_SD_CNTL__CPG_EN_MASK 0x00000002L
+#define CP_SD_CNTL__CPC_EN_MASK 0x00000004L
+#define CP_SD_CNTL__RLC_EN_MASK 0x00000008L
+#define CP_SD_CNTL__GE_EN_MASK 0x00000020L
+#define CP_SD_CNTL__UTCL1_EN_MASK 0x00000040L
+#define CP_SD_CNTL__EA_EN_MASK 0x00000200L
+#define CP_SD_CNTL__SDMA_EN_MASK 0x00000400L
+#define CP_SD_CNTL__SD_VMIDVEC_OVERRIDE_MASK 0x80000000L
+//CP_SOFT_RESET_CNTL
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET__SHIFT 0x0
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET__SHIFT 0x1
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET__SHIFT 0x2
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET__SHIFT 0x3
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET__SHIFT 0x4
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET__SHIFT 0x5
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET__SHIFT 0x6
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET__SHIFT 0x7
+#define CP_SOFT_RESET_CNTL__CMP_ONLY_SOFT_RESET_MASK 0x00000001L
+#define CP_SOFT_RESET_CNTL__GFX_ONLY_SOFT_RESET_MASK 0x00000002L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_REG_RESET_MASK 0x00000004L
+#define CP_SOFT_RESET_CNTL__CMP_INTR_REG_RESET_MASK 0x00000008L
+#define CP_SOFT_RESET_CNTL__CMP_HQD_QUEUE_DOORBELL_RESET_MASK 0x00000010L
+#define CP_SOFT_RESET_CNTL__GFX_RB_DOORBELL_RESET_MASK 0x00000020L
+#define CP_SOFT_RESET_CNTL__GFX_INTR_REG_RESET_MASK 0x00000040L
+#define CP_SOFT_RESET_CNTL__GFX_HQD_REG_RESET_MASK 0x00000080L
+//CP_CPC_GFX_CNTL
+#define CP_CPC_GFX_CNTL__QUEUEID__SHIFT 0x0
+#define CP_CPC_GFX_CNTL__PIPEID__SHIFT 0x3
+#define CP_CPC_GFX_CNTL__MEID__SHIFT 0x5
+#define CP_CPC_GFX_CNTL__VALID__SHIFT 0x7
+#define CP_CPC_GFX_CNTL__QUEUEID_MASK 0x00000007L
+#define CP_CPC_GFX_CNTL__PIPEID_MASK 0x00000018L
+#define CP_CPC_GFX_CNTL__MEID_MASK 0x00000060L
+#define CP_CPC_GFX_CNTL__VALID_MASK 0x00000080L
+
+
+// addressBlock: gc_spipdec
+//SPI_ARB_PRIORITY
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0__SHIFT 0x0
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1__SHIFT 0x3
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2__SHIFT 0x6
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3__SHIFT 0x9
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT__SHIFT 0xc
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT__SHIFT 0xe
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT__SHIFT 0x10
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT__SHIFT 0x12
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS0_MASK 0x00000007L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS1_MASK 0x00000038L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS2_MASK 0x000001C0L
+#define SPI_ARB_PRIORITY__PIPE_ORDER_TS3_MASK 0x00000E00L
+#define SPI_ARB_PRIORITY__TS0_DUR_MULT_MASK 0x00003000L
+#define SPI_ARB_PRIORITY__TS1_DUR_MULT_MASK 0x0000C000L
+#define SPI_ARB_PRIORITY__TS2_DUR_MULT_MASK 0x00030000L
+#define SPI_ARB_PRIORITY__TS3_DUR_MULT_MASK 0x000C0000L
+//SPI_ARB_CYCLES_0
+#define SPI_ARB_CYCLES_0__TS0_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_0__TS1_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_0__TS0_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_0__TS1_DURATION_MASK 0xFFFF0000L
+//SPI_ARB_CYCLES_1
+#define SPI_ARB_CYCLES_1__TS2_DURATION__SHIFT 0x0
+#define SPI_ARB_CYCLES_1__TS3_DURATION__SHIFT 0x10
+#define SPI_ARB_CYCLES_1__TS2_DURATION_MASK 0x0000FFFFL
+#define SPI_ARB_CYCLES_1__TS3_DURATION_MASK 0xFFFF0000L
+//SPI_WCL_PIPE_PERCENT_GFX
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_GFX__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_GFX__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_HP3D
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE__SHIFT 0xc
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE__SHIFT 0x16
+#define SPI_WCL_PIPE_PERCENT_HP3D__VALUE_MASK 0x0000007FL
+#define SPI_WCL_PIPE_PERCENT_HP3D__HS_GRP_VALUE_MASK 0x0001F000L
+#define SPI_WCL_PIPE_PERCENT_HP3D__GS_GRP_VALUE_MASK 0x07C00000L
+//SPI_WCL_PIPE_PERCENT_CS0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS0__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS1
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS1__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS2
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS2__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS3
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS3__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS4
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS4__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS5
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS5__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS6
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS6__VALUE_MASK 0x7FL
+//SPI_WCL_PIPE_PERCENT_CS7
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE__SHIFT 0x0
+#define SPI_WCL_PIPE_PERCENT_CS7__VALUE_MASK 0x7FL
+//SPI_USER_ACCUM_VMID_CNTL
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM__SHIFT 0x0
+#define SPI_USER_ACCUM_VMID_CNTL__EN_USER_ACCUM_MASK 0x0000000FL
+//SPI_GDBG_PER_VMID_CNTL
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID__SHIFT 0x0
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE__SHIFT 0x1
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN__SHIFT 0x3
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN__SHIFT 0x4
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE__SHIFT 0xd
+#define SPI_GDBG_PER_VMID_CNTL__STALL_VMID_MASK 0x00000001L
+#define SPI_GDBG_PER_VMID_CNTL__LAUNCH_MODE_MASK 0x00000006L
+#define SPI_GDBG_PER_VMID_CNTL__TRAP_EN_MASK 0x00000008L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_EN_MASK 0x00001FF0L
+#define SPI_GDBG_PER_VMID_CNTL__EXCP_REPLACE_MASK 0x00002000L
+//SPI_COMPUTE_QUEUE_RESET
+#define SPI_COMPUTE_QUEUE_RESET__RESET__SHIFT 0x0
+#define SPI_COMPUTE_QUEUE_RESET__RESET_MASK 0x01L
+//SPI_COMPUTE_WF_CTX_SAVE
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE__INITIATE_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_INTERRUPT_EN_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE__DONE_INTERRUPT_EN_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE__GDS_REQ_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE__SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_cpphqddec
+//CP_HPD_UTCL1_CNTL
+#define CP_HPD_UTCL1_CNTL__SELECT__SHIFT 0x0
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT__SHIFT 0xa
+#define CP_HPD_UTCL1_CNTL__SELECT_MASK 0x0000000FL
+#define CP_HPD_UTCL1_CNTL__DISABLE_ERROR_REPORT_MASK 0x00000400L
+//CP_HPD_UTCL1_ERROR
+#define CP_HPD_UTCL1_ERROR__ADDR_HI__SHIFT 0x0
+#define CP_HPD_UTCL1_ERROR__TYPE__SHIFT 0x10
+#define CP_HPD_UTCL1_ERROR__VMID__SHIFT 0x14
+#define CP_HPD_UTCL1_ERROR__ADDR_HI_MASK 0x0000FFFFL
+#define CP_HPD_UTCL1_ERROR__TYPE_MASK 0x00010000L
+#define CP_HPD_UTCL1_ERROR__VMID_MASK 0x00F00000L
+//CP_HPD_UTCL1_ERROR_ADDR
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR__SHIFT 0xc
+#define CP_HPD_UTCL1_ERROR_ADDR__ADDR_MASK 0xFFFFF000L
+//CP_MQD_BASE_ADDR
+#define CP_MQD_BASE_ADDR__BASE_ADDR__SHIFT 0x2
+#define CP_MQD_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_MQD_BASE_ADDR_HI
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_MQD_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_ACTIVE
+#define CP_HQD_ACTIVE__ACTIVE__SHIFT 0x0
+#define CP_HQD_ACTIVE__BUSY_GATE__SHIFT 0x1
+#define CP_HQD_ACTIVE__ACTIVE_MASK 0x00000001L
+#define CP_HQD_ACTIVE__BUSY_GATE_MASK 0x00000002L
+//CP_HQD_VMID
+#define CP_HQD_VMID__VMID__SHIFT 0x0
+#define CP_HQD_VMID__IB_VMID__SHIFT 0x8
+#define CP_HQD_VMID__VQID__SHIFT 0x10
+#define CP_HQD_VMID__VMID_MASK 0x0000000FL
+#define CP_HQD_VMID__IB_VMID_MASK 0x00000F00L
+#define CP_HQD_VMID__VQID_MASK 0x03FF0000L
+//CP_HQD_PERSISTENT_STATE
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ__SHIFT 0x0
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE__SHIFT 0x1
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS__SHIFT 0x7
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT 0x8
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT__SHIFT 0x12
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS__SHIFT 0x13
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN__SHIFT 0x14
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN__SHIFT 0x15
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN__SHIFT 0x16
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN__SHIFT 0x17
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN__SHIFT 0x18
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN__SHIFT 0x19
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN__SHIFT 0x1a
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN__SHIFT 0x1b
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE__SHIFT 0x1c
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES__SHIFT 0x1d
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT 0x1e
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE__SHIFT 0x1f
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK 0x00000001L
+#define CP_HQD_PERSISTENT_STATE__TMZ_CONNECT_OVERRIDE_MASK 0x00000002L
+#define CP_HQD_PERSISTENT_STATE__SUSPEND_STATUS_MASK 0x00000080L
+#define CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE_MASK 0x0003FF00L
+#define CP_HQD_PERSISTENT_STATE__TMZ_SWITCH_EXEMPT_MASK 0x00040000L
+#define CP_HQD_PERSISTENT_STATE__TMZ_MATCH_DIS_MASK 0x00080000L
+#define CP_HQD_PERSISTENT_STATE__WPP_CLAMP_EN_MASK 0x00100000L
+#define CP_HQD_PERSISTENT_STATE__WPP_SWITCH_QOS_EN_MASK 0x00200000L
+#define CP_HQD_PERSISTENT_STATE__IQ_SWITCH_QOS_EN_MASK 0x00400000L
+#define CP_HQD_PERSISTENT_STATE__IB_SWITCH_QOS_EN_MASK 0x00800000L
+#define CP_HQD_PERSISTENT_STATE__EOP_SWITCH_QOS_EN_MASK 0x01000000L
+#define CP_HQD_PERSISTENT_STATE__PQ_SWITCH_QOS_EN_MASK 0x02000000L
+#define CP_HQD_PERSISTENT_STATE__TC_OFFLOAD_QOS_EN_MASK 0x04000000L
+#define CP_HQD_PERSISTENT_STATE__CACHE_FULL_PACKET_EN_MASK 0x08000000L
+#define CP_HQD_PERSISTENT_STATE__RESTORE_ACTIVE_MASK 0x10000000L
+#define CP_HQD_PERSISTENT_STATE__RELAUNCH_WAVES_MASK 0x20000000L
+#define CP_HQD_PERSISTENT_STATE__QSWITCH_MODE_MASK 0x40000000L
+#define CP_HQD_PERSISTENT_STATE__DISP_ACTIVE_MASK 0x80000000L
+//CP_HQD_PIPE_PRIORITY
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY__SHIFT 0x0
+#define CP_HQD_PIPE_PRIORITY__PIPE_PRIORITY_MASK 0x00000003L
+//CP_HQD_QUEUE_PRIORITY
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL__SHIFT 0x0
+#define CP_HQD_QUEUE_PRIORITY__PRIORITY_LEVEL_MASK 0x0000000FL
+//CP_HQD_QUANTUM
+#define CP_HQD_QUANTUM__QUANTUM_EN__SHIFT 0x0
+#define CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT 0x4
+#define CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT 0x8
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE__SHIFT 0x1f
+#define CP_HQD_QUANTUM__QUANTUM_EN_MASK 0x00000001L
+#define CP_HQD_QUANTUM__QUANTUM_SCALE_MASK 0x00000010L
+#define CP_HQD_QUANTUM__QUANTUM_DURATION_MASK 0x00003F00L
+#define CP_HQD_QUANTUM__QUANTUM_ACTIVE_MASK 0x80000000L
+//CP_HQD_PQ_BASE
+#define CP_HQD_PQ_BASE__ADDR__SHIFT 0x0
+#define CP_HQD_PQ_BASE__ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_BASE_HI
+#define CP_HQD_PQ_BASE_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_BASE_HI__ADDR_HI_MASK 0x000000FFL
+//CP_HQD_PQ_RPTR
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_RPTR__CONSUMED_OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_RPTR_REPORT_ADDR
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR__SHIFT 0x2
+#define CP_HQD_PQ_RPTR_REPORT_ADDR__RPTR_REPORT_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_PQ_RPTR_REPORT_ADDR_HI
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI__RPTR_REPORT_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_WPTR_POLL_ADDR
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR__SHIFT 0x3
+#define CP_HQD_PQ_WPTR_POLL_ADDR__WPTR_ADDR_MASK 0xFFFFFFF8L
+//CP_HQD_PQ_WPTR_POLL_ADDR_HI
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_POLL_ADDR_HI__WPTR_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_PQ_DOORBELL_CONTROL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT 0x0
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT 0x1
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE__SHIFT 0x1c
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT__SHIFT 0x1d
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT 0x1e
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT__SHIFT 0x1f
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE_MASK 0x00000001L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP_MASK 0x00000002L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK 0x10000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SCHD_HIT_MASK 0x20000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK 0x40000000L
+#define CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK 0x80000000L
+//CP_HQD_PQ_CONTROL
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE__SHIFT 0x0
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY__SHIFT 0x6
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY__SHIFT 0x7
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT 0x8
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT 0xe
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY__SHIFT 0xf
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT 0x12
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_PQ_CONTROL__TMZ__SHIFT 0x16
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR__SHIFT 0x1b
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH__SHIFT 0x1c
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH__SHIFT 0x1d
+#define CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT 0x1f
+#define CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK 0x0000003FL
+#define CP_HQD_PQ_CONTROL__WPTR_CARRY_MASK 0x00000040L
+#define CP_HQD_PQ_CONTROL__RPTR_CARRY_MASK 0x00000080L
+#define CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK 0x00003F00L
+#define CP_HQD_PQ_CONTROL__QUEUE_FULL_EN_MASK 0x00004000L
+#define CP_HQD_PQ_CONTROL__PQ_EMPTY_MASK 0x00008000L
+#define CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR_MASK 0x000C0000L
+#define CP_HQD_PQ_CONTROL__MIN_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_PQ_CONTROL__TMZ_MASK 0x00400000L
+#define CP_HQD_PQ_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_PQ_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK 0x08000000L
+#define CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK 0x10000000L
+#define CP_HQD_PQ_CONTROL__TUNNEL_DISPATCH_MASK 0x20000000L
+#define CP_HQD_PQ_CONTROL__PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK 0x80000000L
+//CP_HQD_IB_BASE_ADDR
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR__SHIFT 0x2
+#define CP_HQD_IB_BASE_ADDR__IB_BASE_ADDR_MASK 0xFFFFFFFCL
+//CP_HQD_IB_BASE_ADDR_HI
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_IB_BASE_ADDR_HI__IB_BASE_ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_IB_RPTR
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET__SHIFT 0x0
+#define CP_HQD_IB_RPTR__CONSUMED_OFFSET_MASK 0x000FFFFFL
+//CP_HQD_IB_CONTROL
+#define CP_HQD_IB_CONTROL__IB_SIZE__SHIFT 0x0
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT 0x14
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IB_CONTROL__IB_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE__SHIFT 0x1e
+#define CP_HQD_IB_CONTROL__PROCESSING_IB__SHIFT 0x1f
+#define CP_HQD_IB_CONTROL__IB_SIZE_MASK 0x000FFFFFL
+#define CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE_MASK 0x00300000L
+#define CP_HQD_IB_CONTROL__IB_EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IB_CONTROL__IB_CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IB_CONTROL__IB_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IB_CONTROL__IB_PRIV_STATE_MASK 0x40000000L
+#define CP_HQD_IB_CONTROL__PROCESSING_IB_MASK 0x80000000L
+//CP_HQD_IQ_TIMER
+#define CP_HQD_IQ_TIMER__WAIT_TIME__SHIFT 0x0
+#define CP_HQD_IQ_TIMER__RETRY_TYPE__SHIFT 0x8
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE__SHIFT 0xb
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE__SHIFT 0xc
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT__SHIFT 0xe
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE__SHIFT 0x10
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER__SHIFT 0x16
+#define CP_HQD_IQ_TIMER__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_IQ_TIMER__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE__SHIFT 0x1a
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE__SHIFT 0x1b
+#define CP_HQD_IQ_TIMER__REARM_TIMER__SHIFT 0x1c
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN__SHIFT 0x1d
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ__SHIFT 0x1e
+#define CP_HQD_IQ_TIMER__ACTIVE__SHIFT 0x1f
+#define CP_HQD_IQ_TIMER__WAIT_TIME_MASK 0x000000FFL
+#define CP_HQD_IQ_TIMER__RETRY_TYPE_MASK 0x00000700L
+#define CP_HQD_IQ_TIMER__IMMEDIATE_EXPIRE_MASK 0x00000800L
+#define CP_HQD_IQ_TIMER__INTERRUPT_TYPE_MASK 0x00003000L
+#define CP_HQD_IQ_TIMER__CLOCK_COUNT_MASK 0x0000C000L
+#define CP_HQD_IQ_TIMER__INTERRUPT_SIZE_MASK 0x003F0000L
+#define CP_HQD_IQ_TIMER__QUANTUM_TIMER_MASK 0x00400000L
+#define CP_HQD_IQ_TIMER__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_IQ_TIMER__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_IQ_TIMER__IQ_VOLATILE_MASK 0x04000000L
+#define CP_HQD_IQ_TIMER__QUEUE_TYPE_MASK 0x08000000L
+#define CP_HQD_IQ_TIMER__REARM_TIMER_MASK 0x10000000L
+#define CP_HQD_IQ_TIMER__PROCESS_IQ_EN_MASK 0x20000000L
+#define CP_HQD_IQ_TIMER__PROCESSING_IQ_MASK 0x40000000L
+#define CP_HQD_IQ_TIMER__ACTIVE_MASK 0x80000000L
+//CP_HQD_IQ_RPTR
+#define CP_HQD_IQ_RPTR__OFFSET__SHIFT 0x0
+#define CP_HQD_IQ_RPTR__OFFSET_MASK 0x0000003FL
+//CP_HQD_DEQUEUE_REQUEST
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ__SHIFT 0x0
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT__SHIFT 0x8
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_INT_MASK 0x00000100L
+#define CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_REQUEST__DEQUEUE_REQ_EN_MASK 0x00000400L
+//CP_HQD_DMA_OFFLOAD
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_DMA_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_DMA_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_DMA_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_OFFLOAD
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD__SHIFT 0x0
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN__SHIFT 0x1
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD__SHIFT 0x2
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN__SHIFT 0x3
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD__SHIFT 0x4
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN__SHIFT 0x5
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_MASK 0x00000001L
+#define CP_HQD_OFFLOAD__DMA_OFFLOAD_EN_MASK 0x00000002L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_MASK 0x00000004L
+#define CP_HQD_OFFLOAD__AQL_OFFLOAD_EN_MASK 0x00000008L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_MASK 0x00000010L
+#define CP_HQD_OFFLOAD__EOP_OFFLOAD_EN_MASK 0x00000020L
+//CP_HQD_SEMA_CMD
+#define CP_HQD_SEMA_CMD__RETRY__SHIFT 0x0
+#define CP_HQD_SEMA_CMD__RESULT__SHIFT 0x1
+#define CP_HQD_SEMA_CMD__POLLING_DIS__SHIFT 0x8
+#define CP_HQD_SEMA_CMD__MESSAGE_EN__SHIFT 0x9
+#define CP_HQD_SEMA_CMD__RETRY_MASK 0x00000001L
+#define CP_HQD_SEMA_CMD__RESULT_MASK 0x00000006L
+#define CP_HQD_SEMA_CMD__POLLING_DIS_MASK 0x00000100L
+#define CP_HQD_SEMA_CMD__MESSAGE_EN_MASK 0x00000200L
+//CP_HQD_MSG_TYPE
+#define CP_HQD_MSG_TYPE__ACTION__SHIFT 0x0
+#define CP_HQD_MSG_TYPE__SAVE_STATE__SHIFT 0x4
+#define CP_HQD_MSG_TYPE__ACTION_MASK 0x00000007L
+#define CP_HQD_MSG_TYPE__SAVE_STATE_MASK 0x00000070L
+//CP_HQD_ATOMIC0_PREOP_LO
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_LO__ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC0_PREOP_HI
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC0_PREOP_HI__ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_LO
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_LO__ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_HQD_ATOMIC1_PREOP_HI
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_HQD_ATOMIC1_PREOP_HI__ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER0
+#define CP_HQD_HQ_SCHEDULER0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_SCHEDULER0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_SCHEDULER0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_SCHEDULER0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_SCHEDULER0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_SCHEDULER0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_SCHEDULER0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_SCHEDULER0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_SCHEDULER0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_SCHEDULER0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_SCHEDULER0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_SCHEDULER0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_SCHEDULER0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_SCHEDULER0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_STATUS0
+#define CP_HQD_HQ_STATUS0__CWSR__SHIFT 0x0
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS__SHIFT 0x1
+#define CP_HQD_HQ_STATUS0__RSRV__SHIFT 0x2
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE__SHIFT 0x3
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE__SHIFT 0x6
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT__SHIFT 0x7
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY__SHIFT 0x8
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID__SHIFT 0x9
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE__SHIFT 0xa
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS__SHIFT 0xd
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN__SHIFT 0xe
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED__SHIFT 0xf
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED__SHIFT 0x14
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE__SHIFT 0x15
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT__SHIFT 0x18
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE__SHIFT 0x1e
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN__SHIFT 0x1f
+#define CP_HQD_HQ_STATUS0__CWSR_MASK 0x00000001L
+#define CP_HQD_HQ_STATUS0__SAVE_STATUS_MASK 0x00000002L
+#define CP_HQD_HQ_STATUS0__RSRV_MASK 0x00000004L
+#define CP_HQD_HQ_STATUS0__STATIC_QUEUE_MASK 0x00000038L
+#define CP_HQD_HQ_STATUS0__QUEUE_RUN_ONCE_MASK 0x00000040L
+#define CP_HQD_HQ_STATUS0__SCRATCH_RAM_INIT_MASK 0x00000080L
+#define CP_HQD_HQ_STATUS0__TCL2_DIRTY_MASK 0x00000100L
+#define CP_HQD_HQ_STATUS0__C_INHERIT_VMID_MASK 0x00000200L
+#define CP_HQD_HQ_STATUS0__QUEUE_SCHEDULER_TYPE_MASK 0x00001C00L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_USE_GWS_MASK 0x00002000L
+#define CP_HQD_HQ_STATUS0__C_QUEUE_DEBUG_EN_MASK 0x00004000L
+#define CP_HQD_HQ_STATUS0__QUEUE_SLOT_CONNECTED_MASK 0x00008000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_ENABLED_MASK 0x00100000L
+#define CP_HQD_HQ_STATUS0__MES_INTERRUPT_PIPE_MASK 0x00600000L
+#define CP_HQD_HQ_STATUS0__CONCURRENT_PROCESS_COUNT_MASK 0x0F000000L
+#define CP_HQD_HQ_STATUS0__QUEUE_IDLE_MASK 0x40000000L
+#define CP_HQD_HQ_STATUS0__DB_UPDATED_MSG_EN_MASK 0x80000000L
+//CP_HQD_HQ_CONTROL0
+#define CP_HQD_HQ_CONTROL0__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL0__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_SCHEDULER1
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER__SHIFT 0x0
+#define CP_HQD_HQ_SCHEDULER1__SCHEDULER_MASK 0xFFFFFFFFL
+//CP_MQD_CONTROL
+#define CP_MQD_CONTROL__VMID__SHIFT 0x0
+#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
+#define CP_MQD_CONTROL__PROCESSING_MQD__SHIFT 0xc
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN__SHIFT 0xd
+#define CP_MQD_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_MQD_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_MQD_CONTROL__MQD_VOLATILE__SHIFT 0x1a
+#define CP_MQD_CONTROL__VMID_MASK 0x0000000FL
+#define CP_MQD_CONTROL__PRIV_STATE_MASK 0x00000100L
+#define CP_MQD_CONTROL__PROCESSING_MQD_MASK 0x00001000L
+#define CP_MQD_CONTROL__PROCESSING_MQD_EN_MASK 0x00002000L
+#define CP_MQD_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MQD_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_MQD_CONTROL__MQD_VOLATILE_MASK 0x04000000L
+//CP_HQD_HQ_STATUS1
+#define CP_HQD_HQ_STATUS1__STATUS__SHIFT 0x0
+#define CP_HQD_HQ_STATUS1__STATUS_MASK 0xFFFFFFFFL
+//CP_HQD_HQ_CONTROL1
+#define CP_HQD_HQ_CONTROL1__CONTROL__SHIFT 0x0
+#define CP_HQD_HQ_CONTROL1__CONTROL_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR__BASE_ADDR_MASK 0xFFFFFFFFL
+//CP_HQD_EOP_BASE_ADDR_HI
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI__SHIFT 0x0
+#define CP_HQD_EOP_BASE_ADDR_HI__BASE_ADDR_HI_MASK 0x000000FFL
+//CP_HQD_EOP_CONTROL
+#define CP_HQD_EOP_CONTROL__EOP_SIZE__SHIFT 0x0
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP__SHIFT 0x8
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN__SHIFT 0xc
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB__SHIFT 0xd
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN__SHIFT 0xe
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER__SHIFT 0x15
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN__SHIFT 0x16
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY__SHIFT 0x18
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE__SHIFT 0x1a
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT__SHIFT 0x1d
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM__SHIFT 0x1f
+#define CP_HQD_EOP_CONTROL__EOP_SIZE_MASK 0x0000003FL
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOP_MASK 0x00000100L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOP_EN_MASK 0x00001000L
+#define CP_HQD_EOP_CONTROL__PROCESSING_EOPIB_MASK 0x00002000L
+#define CP_HQD_EOP_CONTROL__PROCESS_EOPIB_EN_MASK 0x00004000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_MASK 0x00200000L
+#define CP_HQD_EOP_CONTROL__HALT_FETCHER_EN_MASK 0x00400000L
+#define CP_HQD_EOP_CONTROL__EXE_DISABLE_MASK 0x00800000L
+#define CP_HQD_EOP_CONTROL__CACHE_POLICY_MASK 0x03000000L
+#define CP_HQD_EOP_CONTROL__EOP_VOLATILE_MASK 0x04000000L
+#define CP_HQD_EOP_CONTROL__SIG_SEM_RESULT_MASK 0x60000000L
+#define CP_HQD_EOP_CONTROL__PEND_SIG_SEM_MASK 0x80000000L
+//CP_HQD_EOP_RPTR
+#define CP_HQD_EOP_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_EOP_RPTR__RESET_FETCHER__SHIFT 0x1c
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND__SHIFT 0x1d
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR__SHIFT 0x1e
+#define CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT 0x1f
+#define CP_HQD_EOP_RPTR__RPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_RPTR__RESET_FETCHER_MASK 0x10000000L
+#define CP_HQD_EOP_RPTR__DEQUEUE_PEND_MASK 0x20000000L
+#define CP_HQD_EOP_RPTR__RPTR_EQ_CSMD_WPTR_MASK 0x40000000L
+#define CP_HQD_EOP_RPTR__INIT_FETCHER_MASK 0x80000000L
+//CP_HQD_EOP_WPTR
+#define CP_HQD_EOP_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR__EOP_EMPTY__SHIFT 0xf
+#define CP_HQD_EOP_WPTR__EOP_AVAIL__SHIFT 0x10
+#define CP_HQD_EOP_WPTR__WPTR_MASK 0x00001FFFL
+#define CP_HQD_EOP_WPTR__EOP_EMPTY_MASK 0x00008000L
+#define CP_HQD_EOP_WPTR__EOP_AVAIL_MASK 0x1FFF0000L
+//CP_HQD_EOP_EVENTS
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT__SHIFT 0x0
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND__SHIFT 0x10
+#define CP_HQD_EOP_EVENTS__EVENT_COUNT_MASK 0x00000FFFL
+#define CP_HQD_EOP_EVENTS__CS_PARTIAL_FLUSH_PEND_MASK 0x00010000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_LO
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_BASE_ADDR_LO__ADDR_MASK 0xFFFFF000L
+//CP_HQD_CTX_SAVE_BASE_ADDR_HI
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_HQD_CTX_SAVE_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_HQD_CTX_SAVE_CONTROL
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY__SHIFT 0x3
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE__SHIFT 0x17
+#define CP_HQD_CTX_SAVE_CONTROL__POLICY_MASK 0x00000018L
+#define CP_HQD_CTX_SAVE_CONTROL__EXE_DISABLE_MASK 0x00800000L
+//CP_HQD_CNTL_STACK_OFFSET
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_CNTL_STACK_SIZE
+#define CP_HQD_CNTL_STACK_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CNTL_STACK_SIZE__SIZE_MASK 0x0000F000L
+//CP_HQD_WG_STATE_OFFSET
+#define CP_HQD_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_CTX_SAVE_SIZE
+#define CP_HQD_CTX_SAVE_SIZE__SIZE__SHIFT 0xc
+#define CP_HQD_CTX_SAVE_SIZE__SIZE_MASK 0x03FFF000L
+//CP_HQD_GDS_RESOURCE_STATE
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED__SHIFT 0x0
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED__SHIFT 0x1
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE__SHIFT 0x4
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR__SHIFT 0xc
+#define CP_HQD_GDS_RESOURCE_STATE__OA_REQUIRED_MASK 0x00000001L
+#define CP_HQD_GDS_RESOURCE_STATE__OA_ACQUIRED_MASK 0x00000002L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_SIZE_MASK 0x000003F0L
+#define CP_HQD_GDS_RESOURCE_STATE__GWS_PNTR_MASK 0x0003F000L
+//CP_HQD_ERROR
+#define CP_HQD_ERROR__EDC_ERROR_ID__SHIFT 0x0
+#define CP_HQD_ERROR__SUA_ERROR__SHIFT 0x4
+#define CP_HQD_ERROR__AQL_ERROR__SHIFT 0x5
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR__SHIFT 0x8
+#define CP_HQD_ERROR__IB_UTCL1_ERROR__SHIFT 0x9
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR__SHIFT 0xa
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR__SHIFT 0xb
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR__SHIFT 0xc
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR__SHIFT 0xd
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR__SHIFT 0xe
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR__SHIFT 0xf
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR__SHIFT 0x10
+#define CP_HQD_ERROR__SR_UTCL1_ERROR__SHIFT 0x11
+#define CP_HQD_ERROR__QU_UTCL1_ERROR__SHIFT 0x12
+#define CP_HQD_ERROR__TC_UTCL1_ERROR__SHIFT 0x13
+#define CP_HQD_ERROR__EDC_ERROR_ID_MASK 0x0000000FL
+#define CP_HQD_ERROR__SUA_ERROR_MASK 0x00000010L
+#define CP_HQD_ERROR__AQL_ERROR_MASK 0x00000020L
+#define CP_HQD_ERROR__PQ_UTCL1_ERROR_MASK 0x00000100L
+#define CP_HQD_ERROR__IB_UTCL1_ERROR_MASK 0x00000200L
+#define CP_HQD_ERROR__EOP_UTCL1_ERROR_MASK 0x00000400L
+#define CP_HQD_ERROR__IQ_UTCL1_ERROR_MASK 0x00000800L
+#define CP_HQD_ERROR__RRPT_UTCL1_ERROR_MASK 0x00001000L
+#define CP_HQD_ERROR__WPP_UTCL1_ERROR_MASK 0x00002000L
+#define CP_HQD_ERROR__SEM_UTCL1_ERROR_MASK 0x00004000L
+#define CP_HQD_ERROR__DMA_SRC_UTCL1_ERROR_MASK 0x00008000L
+#define CP_HQD_ERROR__DMA_DST_UTCL1_ERROR_MASK 0x00010000L
+#define CP_HQD_ERROR__SR_UTCL1_ERROR_MASK 0x00020000L
+#define CP_HQD_ERROR__QU_UTCL1_ERROR_MASK 0x00040000L
+#define CP_HQD_ERROR__TC_UTCL1_ERROR_MASK 0x00080000L
+//CP_HQD_EOP_WPTR_MEM
+#define CP_HQD_EOP_WPTR_MEM__WPTR__SHIFT 0x0
+#define CP_HQD_EOP_WPTR_MEM__WPTR_MASK 0x00001FFFL
+//CP_HQD_AQL_CONTROL
+#define CP_HQD_AQL_CONTROL__CONTROL0__SHIFT 0x0
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN__SHIFT 0xf
+#define CP_HQD_AQL_CONTROL__CONTROL1__SHIFT 0x10
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN__SHIFT 0x1f
+#define CP_HQD_AQL_CONTROL__CONTROL0_MASK 0x00007FFFL
+#define CP_HQD_AQL_CONTROL__CONTROL0_EN_MASK 0x00008000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_MASK 0x7FFF0000L
+#define CP_HQD_AQL_CONTROL__CONTROL1_EN_MASK 0x80000000L
+//CP_HQD_PQ_WPTR_LO
+#define CP_HQD_PQ_WPTR_LO__OFFSET__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_LO__OFFSET_MASK 0xFFFFFFFFL
+//CP_HQD_PQ_WPTR_HI
+#define CP_HQD_PQ_WPTR_HI__DATA__SHIFT 0x0
+#define CP_HQD_PQ_WPTR_HI__DATA_MASK 0xFFFFFFFFL
+//CP_HQD_SUSPEND_CNTL_STACK_OFFSET
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_CNTL_STACK_OFFSET__OFFSET_MASK 0x0000FFFCL
+//CP_HQD_SUSPEND_CNTL_STACK_DW_CNT
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT__SHIFT 0x0
+#define CP_HQD_SUSPEND_CNTL_STACK_DW_CNT__CNT_MASK 0x00003FFFL
+//CP_HQD_SUSPEND_WG_STATE_OFFSET
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET__SHIFT 0x2
+#define CP_HQD_SUSPEND_WG_STATE_OFFSET__OFFSET_MASK 0x03FFFFFCL
+//CP_HQD_DDID_RPTR
+#define CP_HQD_DDID_RPTR__RPTR__SHIFT 0x0
+#define CP_HQD_DDID_RPTR__RPTR_MASK 0x000007FFL
+//CP_HQD_DDID_WPTR
+#define CP_HQD_DDID_WPTR__WPTR__SHIFT 0x0
+#define CP_HQD_DDID_WPTR__WPTR_MASK 0x000007FFL
+//CP_HQD_DDID_INFLIGHT_COUNT
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_INFLIGHT_COUNT__COUNT_MASK 0x0000FFFFL
+//CP_HQD_DDID_DELTA_RPT_COUNT
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT__SHIFT 0x0
+#define CP_HQD_DDID_DELTA_RPT_COUNT__COUNT_MASK 0x000000FFL
+//CP_HQD_DEQUEUE_STATUS
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT__SHIFT 0x0
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND__SHIFT 0x4
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN__SHIFT 0x9
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN__SHIFT 0xa
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_MASK 0x0000000FL
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_MASK 0x00000010L
+#define CP_HQD_DEQUEUE_STATUS__SUSPEND_REQ_PEND_EN_MASK 0x00000200L
+#define CP_HQD_DEQUEUE_STATUS__DEQUEUE_STAT_EN_MASK 0x00000400L
+
+
+// addressBlock: gc_tcpdec
+//TCP_WATCH0_ADDR_H
+#define TCP_WATCH0_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH0_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH0_ADDR_L
+#define TCP_WATCH0_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH0_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH0_CNTL
+#define TCP_WATCH0_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH0_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH0_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH0_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH0_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH0_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH0_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH0_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH1_ADDR_H
+#define TCP_WATCH1_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH1_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH1_ADDR_L
+#define TCP_WATCH1_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH1_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH1_CNTL
+#define TCP_WATCH1_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH1_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH1_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH1_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH1_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH1_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH1_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH1_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH2_ADDR_H
+#define TCP_WATCH2_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH2_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH2_ADDR_L
+#define TCP_WATCH2_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH2_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH2_CNTL
+#define TCP_WATCH2_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH2_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH2_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH2_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH2_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH2_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH2_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH2_CNTL__VALID_MASK 0x80000000L
+//TCP_WATCH3_ADDR_H
+#define TCP_WATCH3_ADDR_H__ADDR__SHIFT 0x0
+#define TCP_WATCH3_ADDR_H__ADDR_MASK 0x0000FFFFL
+//TCP_WATCH3_ADDR_L
+#define TCP_WATCH3_ADDR_L__ADDR__SHIFT 0x7
+#define TCP_WATCH3_ADDR_L__ADDR_MASK 0xFFFFFF80L
+//TCP_WATCH3_CNTL
+#define TCP_WATCH3_CNTL__MASK__SHIFT 0x0
+#define TCP_WATCH3_CNTL__VMID__SHIFT 0x18
+#define TCP_WATCH3_CNTL__MODE__SHIFT 0x1d
+#define TCP_WATCH3_CNTL__VALID__SHIFT 0x1f
+#define TCP_WATCH3_CNTL__MASK_MASK 0x007FFFFFL
+#define TCP_WATCH3_CNTL__VMID_MASK 0x0F000000L
+#define TCP_WATCH3_CNTL__MODE_MASK 0x60000000L
+#define TCP_WATCH3_CNTL__VALID_MASK 0x80000000L
+
+
+// addressBlock: gc_gdspdec
+//GDS_VMID0_BASE
+#define GDS_VMID0_BASE__BASE__SHIFT 0x0
+#define GDS_VMID0_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID0_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID0_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID0_SIZE
+#define GDS_VMID0_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID0_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID0_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID0_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID1_BASE
+#define GDS_VMID1_BASE__BASE__SHIFT 0x0
+#define GDS_VMID1_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID1_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID1_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID1_SIZE
+#define GDS_VMID1_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID1_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID1_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID1_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID2_BASE
+#define GDS_VMID2_BASE__BASE__SHIFT 0x0
+#define GDS_VMID2_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID2_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID2_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID2_SIZE
+#define GDS_VMID2_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID2_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID2_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID2_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID3_BASE
+#define GDS_VMID3_BASE__BASE__SHIFT 0x0
+#define GDS_VMID3_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID3_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID3_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID3_SIZE
+#define GDS_VMID3_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID3_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID3_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID3_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID4_BASE
+#define GDS_VMID4_BASE__BASE__SHIFT 0x0
+#define GDS_VMID4_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID4_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID4_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID4_SIZE
+#define GDS_VMID4_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID4_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID4_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID4_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID5_BASE
+#define GDS_VMID5_BASE__BASE__SHIFT 0x0
+#define GDS_VMID5_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID5_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID5_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID5_SIZE
+#define GDS_VMID5_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID5_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID5_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID5_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID6_BASE
+#define GDS_VMID6_BASE__BASE__SHIFT 0x0
+#define GDS_VMID6_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID6_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID6_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID6_SIZE
+#define GDS_VMID6_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID6_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID6_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID6_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID7_BASE
+#define GDS_VMID7_BASE__BASE__SHIFT 0x0
+#define GDS_VMID7_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID7_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID7_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID7_SIZE
+#define GDS_VMID7_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID7_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID7_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID7_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID8_BASE
+#define GDS_VMID8_BASE__BASE__SHIFT 0x0
+#define GDS_VMID8_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID8_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID8_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID8_SIZE
+#define GDS_VMID8_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID8_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID8_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID8_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID9_BASE
+#define GDS_VMID9_BASE__BASE__SHIFT 0x0
+#define GDS_VMID9_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID9_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID9_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID9_SIZE
+#define GDS_VMID9_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID9_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID9_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID9_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID10_BASE
+#define GDS_VMID10_BASE__BASE__SHIFT 0x0
+#define GDS_VMID10_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID10_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID10_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID10_SIZE
+#define GDS_VMID10_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID10_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID10_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID10_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID11_BASE
+#define GDS_VMID11_BASE__BASE__SHIFT 0x0
+#define GDS_VMID11_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID11_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID11_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID11_SIZE
+#define GDS_VMID11_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID11_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID11_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID11_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID12_BASE
+#define GDS_VMID12_BASE__BASE__SHIFT 0x0
+#define GDS_VMID12_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID12_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID12_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID12_SIZE
+#define GDS_VMID12_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID12_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID12_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID12_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID13_BASE
+#define GDS_VMID13_BASE__BASE__SHIFT 0x0
+#define GDS_VMID13_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID13_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID13_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID13_SIZE
+#define GDS_VMID13_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID13_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID13_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID13_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID14_BASE
+#define GDS_VMID14_BASE__BASE__SHIFT 0x0
+#define GDS_VMID14_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID14_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID14_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID14_SIZE
+#define GDS_VMID14_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID14_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID14_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID14_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_VMID15_BASE
+#define GDS_VMID15_BASE__BASE__SHIFT 0x0
+#define GDS_VMID15_BASE__UNUSED__SHIFT 0x10
+#define GDS_VMID15_BASE__BASE_MASK 0x0000FFFFL
+#define GDS_VMID15_BASE__UNUSED_MASK 0xFFFF0000L
+//GDS_VMID15_SIZE
+#define GDS_VMID15_SIZE__SIZE__SHIFT 0x0
+#define GDS_VMID15_SIZE__UNUSED__SHIFT 0x11
+#define GDS_VMID15_SIZE__SIZE_MASK 0x0001FFFFL
+#define GDS_VMID15_SIZE__UNUSED_MASK 0xFFFE0000L
+//GDS_GWS_VMID0
+#define GDS_GWS_VMID0__BASE__SHIFT 0x0
+#define GDS_GWS_VMID0__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID0__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID0__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID0__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID0__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID0__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID0__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID1
+#define GDS_GWS_VMID1__BASE__SHIFT 0x0
+#define GDS_GWS_VMID1__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID1__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID1__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID1__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID1__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID1__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID1__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID2
+#define GDS_GWS_VMID2__BASE__SHIFT 0x0
+#define GDS_GWS_VMID2__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID2__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID2__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID2__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID2__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID2__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID2__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID3
+#define GDS_GWS_VMID3__BASE__SHIFT 0x0
+#define GDS_GWS_VMID3__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID3__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID3__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID3__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID3__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID3__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID3__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID4
+#define GDS_GWS_VMID4__BASE__SHIFT 0x0
+#define GDS_GWS_VMID4__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID4__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID4__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID4__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID4__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID4__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID4__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID5
+#define GDS_GWS_VMID5__BASE__SHIFT 0x0
+#define GDS_GWS_VMID5__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID5__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID5__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID5__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID5__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID5__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID5__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID6
+#define GDS_GWS_VMID6__BASE__SHIFT 0x0
+#define GDS_GWS_VMID6__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID6__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID6__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID6__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID6__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID6__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID6__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID7
+#define GDS_GWS_VMID7__BASE__SHIFT 0x0
+#define GDS_GWS_VMID7__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID7__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID7__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID7__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID7__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID7__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID7__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID8
+#define GDS_GWS_VMID8__BASE__SHIFT 0x0
+#define GDS_GWS_VMID8__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID8__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID8__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID8__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID8__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID8__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID8__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID9
+#define GDS_GWS_VMID9__BASE__SHIFT 0x0
+#define GDS_GWS_VMID9__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID9__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID9__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID9__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID9__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID9__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID9__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID10
+#define GDS_GWS_VMID10__BASE__SHIFT 0x0
+#define GDS_GWS_VMID10__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID10__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID10__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID10__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID10__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID10__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID10__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID11
+#define GDS_GWS_VMID11__BASE__SHIFT 0x0
+#define GDS_GWS_VMID11__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID11__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID11__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID11__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID11__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID11__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID11__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID12
+#define GDS_GWS_VMID12__BASE__SHIFT 0x0
+#define GDS_GWS_VMID12__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID12__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID12__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID12__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID12__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID12__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID12__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID13
+#define GDS_GWS_VMID13__BASE__SHIFT 0x0
+#define GDS_GWS_VMID13__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID13__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID13__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID13__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID13__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID13__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID13__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID14
+#define GDS_GWS_VMID14__BASE__SHIFT 0x0
+#define GDS_GWS_VMID14__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID14__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID14__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID14__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID14__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID14__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID14__UNUSED2_MASK 0xFF800000L
+//GDS_GWS_VMID15
+#define GDS_GWS_VMID15__BASE__SHIFT 0x0
+#define GDS_GWS_VMID15__UNUSED1__SHIFT 0x6
+#define GDS_GWS_VMID15__SIZE__SHIFT 0x10
+#define GDS_GWS_VMID15__UNUSED2__SHIFT 0x17
+#define GDS_GWS_VMID15__BASE_MASK 0x0000003FL
+#define GDS_GWS_VMID15__UNUSED1_MASK 0x0000FFC0L
+#define GDS_GWS_VMID15__SIZE_MASK 0x007F0000L
+#define GDS_GWS_VMID15__UNUSED2_MASK 0xFF800000L
+//GDS_OA_VMID0
+#define GDS_OA_VMID0__MASK__SHIFT 0x0
+#define GDS_OA_VMID0__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID0__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID0__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID1
+#define GDS_OA_VMID1__MASK__SHIFT 0x0
+#define GDS_OA_VMID1__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID1__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID1__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID2
+#define GDS_OA_VMID2__MASK__SHIFT 0x0
+#define GDS_OA_VMID2__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID2__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID2__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID3
+#define GDS_OA_VMID3__MASK__SHIFT 0x0
+#define GDS_OA_VMID3__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID3__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID3__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID4
+#define GDS_OA_VMID4__MASK__SHIFT 0x0
+#define GDS_OA_VMID4__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID4__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID4__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID5
+#define GDS_OA_VMID5__MASK__SHIFT 0x0
+#define GDS_OA_VMID5__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID5__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID5__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID6
+#define GDS_OA_VMID6__MASK__SHIFT 0x0
+#define GDS_OA_VMID6__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID6__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID6__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID7
+#define GDS_OA_VMID7__MASK__SHIFT 0x0
+#define GDS_OA_VMID7__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID7__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID7__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID8
+#define GDS_OA_VMID8__MASK__SHIFT 0x0
+#define GDS_OA_VMID8__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID8__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID8__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID9
+#define GDS_OA_VMID9__MASK__SHIFT 0x0
+#define GDS_OA_VMID9__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID9__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID9__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID10
+#define GDS_OA_VMID10__MASK__SHIFT 0x0
+#define GDS_OA_VMID10__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID10__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID10__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID11
+#define GDS_OA_VMID11__MASK__SHIFT 0x0
+#define GDS_OA_VMID11__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID11__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID11__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID12
+#define GDS_OA_VMID12__MASK__SHIFT 0x0
+#define GDS_OA_VMID12__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID12__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID12__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID13
+#define GDS_OA_VMID13__MASK__SHIFT 0x0
+#define GDS_OA_VMID13__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID13__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID13__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID14
+#define GDS_OA_VMID14__MASK__SHIFT 0x0
+#define GDS_OA_VMID14__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID14__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID14__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_VMID15
+#define GDS_OA_VMID15__MASK__SHIFT 0x0
+#define GDS_OA_VMID15__UNUSED__SHIFT 0x10
+#define GDS_OA_VMID15__MASK_MASK 0x0000FFFFL
+#define GDS_OA_VMID15__UNUSED_MASK 0xFFFF0000L
+//GDS_GWS_RESET0
+#define GDS_GWS_RESET0__RESOURCE0_RESET__SHIFT 0x0
+#define GDS_GWS_RESET0__RESOURCE1_RESET__SHIFT 0x1
+#define GDS_GWS_RESET0__RESOURCE2_RESET__SHIFT 0x2
+#define GDS_GWS_RESET0__RESOURCE3_RESET__SHIFT 0x3
+#define GDS_GWS_RESET0__RESOURCE4_RESET__SHIFT 0x4
+#define GDS_GWS_RESET0__RESOURCE5_RESET__SHIFT 0x5
+#define GDS_GWS_RESET0__RESOURCE6_RESET__SHIFT 0x6
+#define GDS_GWS_RESET0__RESOURCE7_RESET__SHIFT 0x7
+#define GDS_GWS_RESET0__RESOURCE8_RESET__SHIFT 0x8
+#define GDS_GWS_RESET0__RESOURCE9_RESET__SHIFT 0x9
+#define GDS_GWS_RESET0__RESOURCE10_RESET__SHIFT 0xa
+#define GDS_GWS_RESET0__RESOURCE11_RESET__SHIFT 0xb
+#define GDS_GWS_RESET0__RESOURCE12_RESET__SHIFT 0xc
+#define GDS_GWS_RESET0__RESOURCE13_RESET__SHIFT 0xd
+#define GDS_GWS_RESET0__RESOURCE14_RESET__SHIFT 0xe
+#define GDS_GWS_RESET0__RESOURCE15_RESET__SHIFT 0xf
+#define GDS_GWS_RESET0__RESOURCE16_RESET__SHIFT 0x10
+#define GDS_GWS_RESET0__RESOURCE17_RESET__SHIFT 0x11
+#define GDS_GWS_RESET0__RESOURCE18_RESET__SHIFT 0x12
+#define GDS_GWS_RESET0__RESOURCE19_RESET__SHIFT 0x13
+#define GDS_GWS_RESET0__RESOURCE20_RESET__SHIFT 0x14
+#define GDS_GWS_RESET0__RESOURCE21_RESET__SHIFT 0x15
+#define GDS_GWS_RESET0__RESOURCE22_RESET__SHIFT 0x16
+#define GDS_GWS_RESET0__RESOURCE23_RESET__SHIFT 0x17
+#define GDS_GWS_RESET0__RESOURCE24_RESET__SHIFT 0x18
+#define GDS_GWS_RESET0__RESOURCE25_RESET__SHIFT 0x19
+#define GDS_GWS_RESET0__RESOURCE26_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET0__RESOURCE27_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET0__RESOURCE28_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET0__RESOURCE29_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET0__RESOURCE30_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET0__RESOURCE31_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET0__RESOURCE0_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET0__RESOURCE1_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET0__RESOURCE2_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET0__RESOURCE3_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET0__RESOURCE4_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET0__RESOURCE5_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET0__RESOURCE6_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET0__RESOURCE7_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET0__RESOURCE8_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET0__RESOURCE9_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET0__RESOURCE10_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET0__RESOURCE11_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET0__RESOURCE12_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET0__RESOURCE13_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET0__RESOURCE14_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET0__RESOURCE15_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET0__RESOURCE16_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET0__RESOURCE17_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET0__RESOURCE18_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET0__RESOURCE19_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET0__RESOURCE20_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET0__RESOURCE21_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET0__RESOURCE22_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET0__RESOURCE23_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET0__RESOURCE24_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET0__RESOURCE25_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET0__RESOURCE26_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET0__RESOURCE27_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET0__RESOURCE28_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET0__RESOURCE29_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET0__RESOURCE30_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET0__RESOURCE31_RESET_MASK 0x80000000L
+//GDS_GWS_RESET1
+#define GDS_GWS_RESET1__RESOURCE32_RESET__SHIFT 0x0
+#define GDS_GWS_RESET1__RESOURCE33_RESET__SHIFT 0x1
+#define GDS_GWS_RESET1__RESOURCE34_RESET__SHIFT 0x2
+#define GDS_GWS_RESET1__RESOURCE35_RESET__SHIFT 0x3
+#define GDS_GWS_RESET1__RESOURCE36_RESET__SHIFT 0x4
+#define GDS_GWS_RESET1__RESOURCE37_RESET__SHIFT 0x5
+#define GDS_GWS_RESET1__RESOURCE38_RESET__SHIFT 0x6
+#define GDS_GWS_RESET1__RESOURCE39_RESET__SHIFT 0x7
+#define GDS_GWS_RESET1__RESOURCE40_RESET__SHIFT 0x8
+#define GDS_GWS_RESET1__RESOURCE41_RESET__SHIFT 0x9
+#define GDS_GWS_RESET1__RESOURCE42_RESET__SHIFT 0xa
+#define GDS_GWS_RESET1__RESOURCE43_RESET__SHIFT 0xb
+#define GDS_GWS_RESET1__RESOURCE44_RESET__SHIFT 0xc
+#define GDS_GWS_RESET1__RESOURCE45_RESET__SHIFT 0xd
+#define GDS_GWS_RESET1__RESOURCE46_RESET__SHIFT 0xe
+#define GDS_GWS_RESET1__RESOURCE47_RESET__SHIFT 0xf
+#define GDS_GWS_RESET1__RESOURCE48_RESET__SHIFT 0x10
+#define GDS_GWS_RESET1__RESOURCE49_RESET__SHIFT 0x11
+#define GDS_GWS_RESET1__RESOURCE50_RESET__SHIFT 0x12
+#define GDS_GWS_RESET1__RESOURCE51_RESET__SHIFT 0x13
+#define GDS_GWS_RESET1__RESOURCE52_RESET__SHIFT 0x14
+#define GDS_GWS_RESET1__RESOURCE53_RESET__SHIFT 0x15
+#define GDS_GWS_RESET1__RESOURCE54_RESET__SHIFT 0x16
+#define GDS_GWS_RESET1__RESOURCE55_RESET__SHIFT 0x17
+#define GDS_GWS_RESET1__RESOURCE56_RESET__SHIFT 0x18
+#define GDS_GWS_RESET1__RESOURCE57_RESET__SHIFT 0x19
+#define GDS_GWS_RESET1__RESOURCE58_RESET__SHIFT 0x1a
+#define GDS_GWS_RESET1__RESOURCE59_RESET__SHIFT 0x1b
+#define GDS_GWS_RESET1__RESOURCE60_RESET__SHIFT 0x1c
+#define GDS_GWS_RESET1__RESOURCE61_RESET__SHIFT 0x1d
+#define GDS_GWS_RESET1__RESOURCE62_RESET__SHIFT 0x1e
+#define GDS_GWS_RESET1__RESOURCE63_RESET__SHIFT 0x1f
+#define GDS_GWS_RESET1__RESOURCE32_RESET_MASK 0x00000001L
+#define GDS_GWS_RESET1__RESOURCE33_RESET_MASK 0x00000002L
+#define GDS_GWS_RESET1__RESOURCE34_RESET_MASK 0x00000004L
+#define GDS_GWS_RESET1__RESOURCE35_RESET_MASK 0x00000008L
+#define GDS_GWS_RESET1__RESOURCE36_RESET_MASK 0x00000010L
+#define GDS_GWS_RESET1__RESOURCE37_RESET_MASK 0x00000020L
+#define GDS_GWS_RESET1__RESOURCE38_RESET_MASK 0x00000040L
+#define GDS_GWS_RESET1__RESOURCE39_RESET_MASK 0x00000080L
+#define GDS_GWS_RESET1__RESOURCE40_RESET_MASK 0x00000100L
+#define GDS_GWS_RESET1__RESOURCE41_RESET_MASK 0x00000200L
+#define GDS_GWS_RESET1__RESOURCE42_RESET_MASK 0x00000400L
+#define GDS_GWS_RESET1__RESOURCE43_RESET_MASK 0x00000800L
+#define GDS_GWS_RESET1__RESOURCE44_RESET_MASK 0x00001000L
+#define GDS_GWS_RESET1__RESOURCE45_RESET_MASK 0x00002000L
+#define GDS_GWS_RESET1__RESOURCE46_RESET_MASK 0x00004000L
+#define GDS_GWS_RESET1__RESOURCE47_RESET_MASK 0x00008000L
+#define GDS_GWS_RESET1__RESOURCE48_RESET_MASK 0x00010000L
+#define GDS_GWS_RESET1__RESOURCE49_RESET_MASK 0x00020000L
+#define GDS_GWS_RESET1__RESOURCE50_RESET_MASK 0x00040000L
+#define GDS_GWS_RESET1__RESOURCE51_RESET_MASK 0x00080000L
+#define GDS_GWS_RESET1__RESOURCE52_RESET_MASK 0x00100000L
+#define GDS_GWS_RESET1__RESOURCE53_RESET_MASK 0x00200000L
+#define GDS_GWS_RESET1__RESOURCE54_RESET_MASK 0x00400000L
+#define GDS_GWS_RESET1__RESOURCE55_RESET_MASK 0x00800000L
+#define GDS_GWS_RESET1__RESOURCE56_RESET_MASK 0x01000000L
+#define GDS_GWS_RESET1__RESOURCE57_RESET_MASK 0x02000000L
+#define GDS_GWS_RESET1__RESOURCE58_RESET_MASK 0x04000000L
+#define GDS_GWS_RESET1__RESOURCE59_RESET_MASK 0x08000000L
+#define GDS_GWS_RESET1__RESOURCE60_RESET_MASK 0x10000000L
+#define GDS_GWS_RESET1__RESOURCE61_RESET_MASK 0x20000000L
+#define GDS_GWS_RESET1__RESOURCE62_RESET_MASK 0x40000000L
+#define GDS_GWS_RESET1__RESOURCE63_RESET_MASK 0x80000000L
+//GDS_GWS_RESOURCE_RESET
+#define GDS_GWS_RESOURCE_RESET__RESET__SHIFT 0x0
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID__SHIFT 0x8
+#define GDS_GWS_RESOURCE_RESET__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_RESET__RESET_MASK 0x00000001L
+#define GDS_GWS_RESOURCE_RESET__RESOURCE_ID_MASK 0x0000FF00L
+#define GDS_GWS_RESOURCE_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_COMPUTE_MAX_WAVE_ID
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID__SHIFT 0x0
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED__SHIFT 0xc
+#define GDS_COMPUTE_MAX_WAVE_ID__MAX_WAVE_ID_MASK 0x00000FFFL
+#define GDS_COMPUTE_MAX_WAVE_ID__UNUSED_MASK 0xFFFFF000L
+//GDS_OA_RESET_MASK
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET__SHIFT 0x0
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET__SHIFT 0x1
+#define GDS_OA_RESET_MASK__ME0_CS_RESET__SHIFT 0x2
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET__SHIFT 0x3
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET__SHIFT 0x4
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET__SHIFT 0x5
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET__SHIFT 0x6
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET__SHIFT 0x7
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET__SHIFT 0x8
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET__SHIFT 0x9
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET__SHIFT 0xa
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET__SHIFT 0xb
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET__SHIFT 0xc
+#define GDS_OA_RESET_MASK__UNUSED1__SHIFT 0xd
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_PIX_RESET_MASK 0x00000001L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_VTX_RESET_MASK 0x00000002L
+#define GDS_OA_RESET_MASK__ME0_CS_RESET_MASK 0x00000004L
+#define GDS_OA_RESET_MASK__ME0_GFXHP3D_GS_RESET_MASK 0x00000008L
+#define GDS_OA_RESET_MASK__ME1_PIPE0_RESET_MASK 0x00000010L
+#define GDS_OA_RESET_MASK__ME1_PIPE1_RESET_MASK 0x00000020L
+#define GDS_OA_RESET_MASK__ME1_PIPE2_RESET_MASK 0x00000040L
+#define GDS_OA_RESET_MASK__ME1_PIPE3_RESET_MASK 0x00000080L
+#define GDS_OA_RESET_MASK__ME2_PIPE0_RESET_MASK 0x00000100L
+#define GDS_OA_RESET_MASK__ME2_PIPE1_RESET_MASK 0x00000200L
+#define GDS_OA_RESET_MASK__ME2_PIPE2_RESET_MASK 0x00000400L
+#define GDS_OA_RESET_MASK__ME2_PIPE3_RESET_MASK 0x00000800L
+#define GDS_OA_RESET_MASK__ME0_PIPE1_CS_RESET_MASK 0x00001000L
+#define GDS_OA_RESET_MASK__UNUSED1_MASK 0xFFFFE000L
+//GDS_OA_RESET
+#define GDS_OA_RESET__RESET__SHIFT 0x0
+#define GDS_OA_RESET__PIPE_ID__SHIFT 0x8
+#define GDS_OA_RESET__UNUSED__SHIFT 0x10
+#define GDS_OA_RESET__RESET_MASK 0x00000001L
+#define GDS_OA_RESET__PIPE_ID_MASK 0x0000FF00L
+#define GDS_OA_RESET__UNUSED_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_STATUS
+#define GDS_CS_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_CS_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_CS_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_CS_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_CS_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_CS_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_CS_CTXSW_CNT0
+#define GDS_CS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT1
+#define GDS_CS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT2
+#define GDS_CS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_CS_CTXSW_CNT3
+#define GDS_CS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_CS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_CS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_CS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_GFX_CTXSW_STATUS
+#define GDS_GFX_CTXSW_STATUS__R__SHIFT 0x0
+#define GDS_GFX_CTXSW_STATUS__W__SHIFT 0x1
+#define GDS_GFX_CTXSW_STATUS__UNUSED__SHIFT 0x2
+#define GDS_GFX_CTXSW_STATUS__R_MASK 0x00000001L
+#define GDS_GFX_CTXSW_STATUS__W_MASK 0x00000002L
+#define GDS_GFX_CTXSW_STATUS__UNUSED_MASK 0xFFFFFFFCL
+//GDS_PS_CTXSW_CNT0
+#define GDS_PS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT1
+#define GDS_PS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT2
+#define GDS_PS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_CNT3
+#define GDS_PS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_PS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_PS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_PS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_PS_CTXSW_IDX
+#define GDS_PS_CTXSW_IDX__PACKER_ID__SHIFT 0x0
+#define GDS_PS_CTXSW_IDX__UNUSED__SHIFT 0x6
+#define GDS_PS_CTXSW_IDX__PACKER_ID_MASK 0x0000003FL
+#define GDS_PS_CTXSW_IDX__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GS_CTXSW_CNT0
+#define GDS_GS_CTXSW_CNT0__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT0__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT0__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT0__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT1
+#define GDS_GS_CTXSW_CNT1__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT1__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT1__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT1__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT2
+#define GDS_GS_CTXSW_CNT2__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT2__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT2__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT2__PTR_MASK 0xFFFF0000L
+//GDS_GS_CTXSW_CNT3
+#define GDS_GS_CTXSW_CNT3__UPDN__SHIFT 0x0
+#define GDS_GS_CTXSW_CNT3__PTR__SHIFT 0x10
+#define GDS_GS_CTXSW_CNT3__UPDN_MASK 0x0000FFFFL
+#define GDS_GS_CTXSW_CNT3__PTR_MASK 0xFFFF0000L
+//GDS_MEMORY_CLEAN
+#define GDS_MEMORY_CLEAN__START__SHIFT 0x0
+#define GDS_MEMORY_CLEAN__FINISH__SHIFT 0x1
+#define GDS_MEMORY_CLEAN__UNUSED__SHIFT 0x2
+#define GDS_MEMORY_CLEAN__START_MASK 0x00000001L
+#define GDS_MEMORY_CLEAN__FINISH_MASK 0x00000002L
+#define GDS_MEMORY_CLEAN__UNUSED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_rasdec
+//RAS_SIGNATURE_CONTROL
+#define RAS_SIGNATURE_CONTROL__ENABLE__SHIFT 0x0
+#define RAS_SIGNATURE_CONTROL__ENABLE_MASK 0x00000001L
+//RAS_SIGNATURE_MASK
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK__SHIFT 0x0
+#define RAS_SIGNATURE_MASK__INPUT_BUS_MASK_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE0
+#define RAS_SX_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE1
+#define RAS_SX_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE2
+#define RAS_SX_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SX_SIGNATURE3
+#define RAS_SX_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SX_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_DB_SIGNATURE0
+#define RAS_DB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_DB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_PA_SIGNATURE0
+#define RAS_PA_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_PA_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE0
+#define RAS_SC_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE1
+#define RAS_SC_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE2
+#define RAS_SC_SIGNATURE2__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE2__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE3
+#define RAS_SC_SIGNATURE3__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE3__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE4
+#define RAS_SC_SIGNATURE4__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE4__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE5
+#define RAS_SC_SIGNATURE5__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE5__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE6
+#define RAS_SC_SIGNATURE6__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE6__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SC_SIGNATURE7
+#define RAS_SC_SIGNATURE7__SIGNATURE__SHIFT 0x0
+#define RAS_SC_SIGNATURE7__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE0
+#define RAS_SPI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_SPI_SIGNATURE1
+#define RAS_SPI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_SPI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_CB_SIGNATURE0
+#define RAS_CB_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_CB_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE0
+#define RAS_BCI_SIGNATURE0__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE0__SIGNATURE_MASK 0xFFFFFFFFL
+//RAS_BCI_SIGNATURE1
+#define RAS_BCI_SIGNATURE1__SIGNATURE__SHIFT 0x0
+#define RAS_BCI_SIGNATURE1__SIGNATURE_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gusdec
+//GUS_IO_RD_COMBINE_FLUSH
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_RD_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_RD_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_WR_COMBINE_FLUSH
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE__SHIFT 0x18
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_IO_WR_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+#define GUS_IO_WR_COMBINE_FLUSH__COMB_MODE_MASK 0x03000000L
+//GUS_IO_RD_PRI_AGE_RATE
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_RATE
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_IO_RD_PRI_AGE_COEFF
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_AGE_COEFF
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_QUEUING
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_QUEUING
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_FIXED
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_FIXED
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_COEFF
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_RD_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_WR_PRI_URGENCY_COEFF
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_IO_WR_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_IO_RD_PRI_URGENCY_MODE
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_RD_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_WR_PRI_URGENCY_MODE
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_IO_WR_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_IO_RD_PRI_QUANT_PRI1
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI2
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI3
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT_PRI4
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_RD_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI1
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI2
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI3
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_WR_PRI_QUANT_PRI4
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_IO_WR_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_IO_RD_PRI_QUANT1_PRI1
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI2
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI3
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_RD_PRI_QUANT1_PRI4
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_RD_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI1
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI2
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI3
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_WR_PRI_QUANT1_PRI4
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_IO_WR_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_COMBINE_FLUSH
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER__SHIFT 0x10
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER__SHIFT 0x14
+#define GUS_DRAM_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL
+#define GUS_DRAM_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP4_TIMER_MASK 0x000F0000L
+#define GUS_DRAM_COMBINE_FLUSH__GROUP5_TIMER_MASK 0x00F00000L
+//GUS_DRAM_COMBINE_RD_WR_EN
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER__SHIFT 0x0
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER__SHIFT 0x2
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER__SHIFT 0x4
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER__SHIFT 0x6
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER__SHIFT 0x8
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER__SHIFT 0xa
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP0_TIMER_MASK 0x00000003L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP1_TIMER_MASK 0x0000000CL
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP2_TIMER_MASK 0x00000030L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP3_TIMER_MASK 0x000000C0L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP4_TIMER_MASK 0x00000300L
+#define GUS_DRAM_COMBINE_RD_WR_EN__GROUP5_TIMER_MASK 0x00000C00L
+//GUS_DRAM_PRI_AGE_RATE
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_RATE__GROUP0_AGING_RATE_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP1_AGING_RATE_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP2_AGING_RATE_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP3_AGING_RATE_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP4_AGING_RATE_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_RATE__GROUP5_AGING_RATE_MASK 0x00038000L
+//GUS_DRAM_PRI_AGE_COEFF
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP0_AGE_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP1_AGE_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP2_AGE_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP3_AGE_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP4_AGE_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_AGE_COEFF__GROUP5_AGE_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_QUEUING
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_QUEUING__GROUP4_QUEUING_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_QUEUING__GROUP5_QUEUING_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_FIXED
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_FIXED__GROUP4_FIXED_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_FIXED__GROUP5_FIXED_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_COEFF
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT__SHIFT 0xc
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT__SHIFT 0xf
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP4_URGENCY_COEFFICIENT_MASK 0x00007000L
+#define GUS_DRAM_PRI_URGENCY_COEFF__GROUP5_URGENCY_COEFFICIENT_MASK 0x00038000L
+//GUS_DRAM_PRI_URGENCY_MODE
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE__SHIFT 0x0
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE__SHIFT 0x1
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE__SHIFT 0x2
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE__SHIFT 0x3
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE__SHIFT 0x4
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE__SHIFT 0x5
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP0_URGENCY_MODE_MASK 0x00000001L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP1_URGENCY_MODE_MASK 0x00000002L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP2_URGENCY_MODE_MASK 0x00000004L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP3_URGENCY_MODE_MASK 0x00000008L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP4_URGENCY_MODE_MASK 0x00000010L
+#define GUS_DRAM_PRI_URGENCY_MODE__GROUP5_URGENCY_MODE_MASK 0x00000020L
+//GUS_DRAM_PRI_QUANT_PRI1
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI2
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI3
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI4
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI4__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT_PRI5
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD__SHIFT 0x10
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD__SHIFT 0x18
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP0_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP1_THRESHOLD_MASK 0x0000FF00L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP2_THRESHOLD_MASK 0x00FF0000L
+#define GUS_DRAM_PRI_QUANT_PRI5__GROUP3_THRESHOLD_MASK 0xFF000000L
+//GUS_DRAM_PRI_QUANT1_PRI1
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI1__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI2
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI2__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI3
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI3__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI4
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI4__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_DRAM_PRI_QUANT1_PRI5
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD__SHIFT 0x0
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD__SHIFT 0x8
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP4_THRESHOLD_MASK 0x000000FFL
+#define GUS_DRAM_PRI_QUANT1_PRI5__GROUP5_THRESHOLD_MASK 0x0000FF00L
+//GUS_IO_GROUP_BURST
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18
+#define GUS_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL
+#define GUS_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L
+#define GUS_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L
+//GUS_DRAM_GROUP_BURST
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO__SHIFT 0x0
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI__SHIFT 0x8
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_LO_MASK 0x000000FFL
+#define GUS_DRAM_GROUP_BURST__DRAM_LIMIT_HI_MASK 0x0000FF00L
+//GUS_SDP_ARB_FINAL
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT__SHIFT 0x0
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x5
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x11
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x12
+#define GUS_SDP_ARB_FINAL__HI_DRAM_BURST_LIMIT_MASK 0x0000001FL
+#define GUS_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x000003E0L
+#define GUS_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L
+#define GUS_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L
+#define GUS_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x00020000L
+#define GUS_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x00040000L
+//GUS_SDP_QOS_VC_PRIORITY
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD__SHIFT 0x0
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR__SHIFT 0x4
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM__SHIFT 0x8
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM__SHIFT 0xc
+#define GUS_SDP_QOS_VC_PRIORITY__VC2_IORD_MASK 0x0000000FL
+#define GUS_SDP_QOS_VC_PRIORITY__VC3_IOWR_MASK 0x000000F0L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_DRAM_MASK 0x00000F00L
+#define GUS_SDP_QOS_VC_PRIORITY__VC4_HI_DRAM_MASK 0x0000F000L
+//GUS_SDP_CREDITS
+#define GUS_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10
+#define GUS_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL
+#define GUS_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L
+#define GUS_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L
+//GUS_SDP_TAG_RESERVE0
+#define GUS_SDP_TAG_RESERVE0__VC0__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE0__VC1__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE0__VC2__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE0__VC3__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L
+//GUS_SDP_TAG_RESERVE1
+#define GUS_SDP_TAG_RESERVE1__VC4__SHIFT 0x0
+#define GUS_SDP_TAG_RESERVE1__VC5__SHIFT 0x8
+#define GUS_SDP_TAG_RESERVE1__VC6__SHIFT 0x10
+#define GUS_SDP_TAG_RESERVE1__VC7__SHIFT 0x18
+#define GUS_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL
+#define GUS_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L
+#define GUS_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L
+#define GUS_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L
+//GUS_SDP_VCC_RESERVE0
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCC_RESERVE1
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_VCD_RESERVE0
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18
+#define GUS_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L
+#define GUS_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L
+//GUS_SDP_VCD_RESERVE1
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f
+#define GUS_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL
+#define GUS_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L
+#define GUS_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L
+#define GUS_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L
+//GUS_SDP_REQ_CNTL
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x4
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L
+#define GUS_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L
+#define GUS_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000010L
+//GUS_MISC
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB__SHIFT 0x0
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x1
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x2
+#define GUS_MISC__EARLY_SDP_ORIGDATA__SHIFT 0x3
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0x4
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x6
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x8
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0xa
+#define GUS_MISC__SEND0_IOWR_ONLY__SHIFT 0xf
+#define GUS_MISC__RELATIVE_PRI_IN_DRAM_ARB_MASK 0x00000001L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000002L
+#define GUS_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000004L
+#define GUS_MISC__EARLY_SDP_ORIGDATA_MASK 0x00000008L
+#define GUS_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00000030L
+#define GUS_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x000000C0L
+#define GUS_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00000300L
+#define GUS_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x00007C00L
+#define GUS_MISC__SEND0_IOWR_ONLY_MASK 0x00008000L
+//GUS_LATENCY_SAMPLING
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x2
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x3
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x4
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x5
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x6
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x7
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0x8
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0x9
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xa
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xb
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xc
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x14
+#define GUS_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000004L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000008L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000010L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000020L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000040L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000080L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000100L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000200L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00000400L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00000800L
+#define GUS_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x000FF000L
+#define GUS_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x0FF00000L
+//GUS_ERR_STATUS
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GUS_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GUS_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GUS_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GUS_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GUS_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GUS_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GUS_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GUS_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+//GUS_MISC2
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0x0
+#define GUS_MISC2__CH_L1_RO_MASK__SHIFT 0x1
+#define GUS_MISC2__SA0_L1_RO_MASK__SHIFT 0x2
+#define GUS_MISC2__SA1_L1_RO_MASK__SHIFT 0x3
+#define GUS_MISC2__SA2_L1_RO_MASK__SHIFT 0x4
+#define GUS_MISC2__SA3_L1_RO_MASK__SHIFT 0x5
+#define GUS_MISC2__CH_L1_PERF_MASK__SHIFT 0x6
+#define GUS_MISC2__SA0_L1_PERF_MASK__SHIFT 0x7
+#define GUS_MISC2__SA1_L1_PERF_MASK__SHIFT 0x8
+#define GUS_MISC2__SA2_L1_PERF_MASK__SHIFT 0x9
+#define GUS_MISC2__SA3_L1_PERF_MASK__SHIFT 0xa
+#define GUS_MISC2__FP_ATOMICS_ENABLE__SHIFT 0xb
+#define GUS_MISC2__L1_RET_CLKEN__SHIFT 0xc
+#define GUS_MISC2__FGCLKEN_HIGH__SHIFT 0xd
+#define GUS_MISC2__BLOCK_REQUESTS__SHIFT 0xe
+#define GUS_MISC2__REQUESTS_BLOCKED__SHIFT 0xf
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x10
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x11
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK__SHIFT 0x12
+#define GUS_MISC2__RDRET_FED_MASK__SHIFT 0x13
+#define GUS_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00000001L
+#define GUS_MISC2__CH_L1_RO_MASK_MASK 0x00000002L
+#define GUS_MISC2__SA0_L1_RO_MASK_MASK 0x00000004L
+#define GUS_MISC2__SA1_L1_RO_MASK_MASK 0x00000008L
+#define GUS_MISC2__SA2_L1_RO_MASK_MASK 0x00000010L
+#define GUS_MISC2__SA3_L1_RO_MASK_MASK 0x00000020L
+#define GUS_MISC2__CH_L1_PERF_MASK_MASK 0x00000040L
+#define GUS_MISC2__SA0_L1_PERF_MASK_MASK 0x00000080L
+#define GUS_MISC2__SA1_L1_PERF_MASK_MASK 0x00000100L
+#define GUS_MISC2__SA2_L1_PERF_MASK_MASK 0x00000200L
+#define GUS_MISC2__SA3_L1_PERF_MASK_MASK 0x00000400L
+#define GUS_MISC2__FP_ATOMICS_ENABLE_MASK 0x00000800L
+#define GUS_MISC2__L1_RET_CLKEN_MASK 0x00001000L
+#define GUS_MISC2__FGCLKEN_HIGH_MASK 0x00002000L
+#define GUS_MISC2__BLOCK_REQUESTS_MASK 0x00004000L
+#define GUS_MISC2__REQUESTS_BLOCKED_MASK 0x00008000L
+#define GUS_MISC2__RIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00010000L
+#define GUS_MISC2__WIO_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00020000L
+#define GUS_MISC2__DRAM_ICG_L1_ROUTER_BUSY_MASK_MASK 0x00040000L
+#define GUS_MISC2__RDRET_FED_MASK_MASK 0x00080000L
+//GUS_SDP_BACKDOOR_CMDCREDITS0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_CMDCREDITS1
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_CMDCREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS0__CREDITS_RECEIVED_MASK 0xFFFFFFFFL
+//GUS_SDP_BACKDOOR_DATACREDITS1
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_DATACREDITS1__CREDITS_RECEIVED_MASK 0x7FFFFFFFL
+//GUS_SDP_BACKDOOR_MISCCREDITS
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED__SHIFT 0x0
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED__SHIFT 0x8
+#define GUS_SDP_BACKDOOR_MISCCREDITS__RDRSP_CREDITS_RELEASED_MASK 0x000000FFL
+#define GUS_SDP_BACKDOOR_MISCCREDITS__WRRSP_CREDITS_RELEASED_MASK 0x0000FF00L
+//GUS_SDP_ENABLE
+#define GUS_SDP_ENABLE__ENABLE__SHIFT 0x0
+#define GUS_SDP_ENABLE__ENABLE_MASK 0x00000001L
+//GUS_L1_CH0_CMD_IN
+#define GUS_L1_CH0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_CMD_OUT
+#define GUS_L1_CH0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_IN
+#define GUS_L1_CH0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_OUT
+#define GUS_L1_CH0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_IN
+#define GUS_L1_CH0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH0_DATA_U_OUT
+#define GUS_L1_CH0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_IN
+#define GUS_L1_CH1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_CMD_OUT
+#define GUS_L1_CH1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_IN
+#define GUS_L1_CH1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_OUT
+#define GUS_L1_CH1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_IN
+#define GUS_L1_CH1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_CH1_DATA_U_OUT
+#define GUS_L1_CH1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_CH1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_IN
+#define GUS_L1_SA0_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_CMD_OUT
+#define GUS_L1_SA0_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_IN
+#define GUS_L1_SA0_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_OUT
+#define GUS_L1_SA0_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_IN
+#define GUS_L1_SA0_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA0_DATA_U_OUT
+#define GUS_L1_SA0_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA0_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_IN
+#define GUS_L1_SA1_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_CMD_OUT
+#define GUS_L1_SA1_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_IN
+#define GUS_L1_SA1_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_OUT
+#define GUS_L1_SA1_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_IN
+#define GUS_L1_SA1_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA1_DATA_U_OUT
+#define GUS_L1_SA1_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA1_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_IN
+#define GUS_L1_SA2_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_CMD_OUT
+#define GUS_L1_SA2_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_IN
+#define GUS_L1_SA2_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_OUT
+#define GUS_L1_SA2_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_IN
+#define GUS_L1_SA2_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA2_DATA_U_OUT
+#define GUS_L1_SA2_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA2_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_IN
+#define GUS_L1_SA3_CMD_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_CMD_OUT
+#define GUS_L1_SA3_CMD_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_CMD_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_IN
+#define GUS_L1_SA3_DATA_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_OUT
+#define GUS_L1_SA3_DATA_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_IN
+#define GUS_L1_SA3_DATA_U_IN__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_IN__COUNT_MASK 0xFFFFFFFFL
+//GUS_L1_SA3_DATA_U_OUT
+#define GUS_L1_SA3_DATA_U_OUT__COUNT__SHIFT 0x0
+#define GUS_L1_SA3_DATA_U_OUT__COUNT_MASK 0xFFFFFFFFL
+//GUS_MISC3
+#define GUS_MISC3__FP_ATOMICS_LOG__SHIFT 0x0
+#define GUS_MISC3__CLEAR_LOG__SHIFT 0x1
+#define GUS_MISC3__FP_ATOMICS_LOG_MASK 0x00000001L
+#define GUS_MISC3__CLEAR_LOG_MASK 0x00000002L
+//GUS_WRRSP_FIFO_CNTL
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD__SHIFT 0x0
+#define GUS_WRRSP_FIFO_CNTL__THRESHOLD_MASK 0x0000003FL
+
+
+// addressBlock: gc_gfxdec0
+//DB_RENDER_CONTROL
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE__SHIFT 0x0
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE__SHIFT 0x1
+#define DB_RENDER_CONTROL__DEPTH_COPY__SHIFT 0x2
+#define DB_RENDER_CONTROL__STENCIL_COPY__SHIFT 0x3
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE__SHIFT 0x4
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE__SHIFT 0x5
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE__SHIFT 0x6
+#define DB_RENDER_CONTROL__COPY_CENTROID__SHIFT 0x7
+#define DB_RENDER_CONTROL__COPY_SAMPLE__SHIFT 0x8
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE__SHIFT 0xc
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE__SHIFT 0xe
+#define DB_RENDER_CONTROL__OREO_MODE__SHIFT 0x10
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE__SHIFT 0x12
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER__SHIFT 0x13
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE__SHIFT 0x14
+#define DB_RENDER_CONTROL__DEPTH_CLEAR_ENABLE_MASK 0x00000001L
+#define DB_RENDER_CONTROL__STENCIL_CLEAR_ENABLE_MASK 0x00000002L
+#define DB_RENDER_CONTROL__DEPTH_COPY_MASK 0x00000004L
+#define DB_RENDER_CONTROL__STENCIL_COPY_MASK 0x00000008L
+#define DB_RENDER_CONTROL__RESUMMARIZE_ENABLE_MASK 0x00000010L
+#define DB_RENDER_CONTROL__STENCIL_COMPRESS_DISABLE_MASK 0x00000020L
+#define DB_RENDER_CONTROL__DEPTH_COMPRESS_DISABLE_MASK 0x00000040L
+#define DB_RENDER_CONTROL__COPY_CENTROID_MASK 0x00000080L
+#define DB_RENDER_CONTROL__COPY_SAMPLE_MASK 0x00000F00L
+#define DB_RENDER_CONTROL__DECOMPRESS_ENABLE_MASK 0x00001000L
+#define DB_RENDER_CONTROL__PS_INVOKE_DISABLE_MASK 0x00004000L
+#define DB_RENDER_CONTROL__OREO_MODE_MASK 0x00030000L
+#define DB_RENDER_CONTROL__FORCE_OREO_MODE_MASK 0x00040000L
+#define DB_RENDER_CONTROL__FORCE_EXPORT_ORDER_MASK 0x00080000L
+#define DB_RENDER_CONTROL__MAX_ALLOWED_TILES_IN_WAVE_MASK 0x00F00000L
+//DB_COUNT_CONTROL
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS__SHIFT 0x1
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x2
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS__SHIFT 0x3
+#define DB_COUNT_CONTROL__SAMPLE_RATE__SHIFT 0x4
+#define DB_COUNT_CONTROL__ZPASS_ENABLE__SHIFT 0x8
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE__SHIFT 0xc
+#define DB_COUNT_CONTROL__SFAIL_ENABLE__SHIFT 0x10
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE__SHIFT 0x14
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x18
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x1c
+#define DB_COUNT_CONTROL__PERFECT_ZPASS_COUNTS_MASK 0x00000002L
+#define DB_COUNT_CONTROL__DISABLE_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000004L
+#define DB_COUNT_CONTROL__ENHANCED_CONSERVATIVE_ZPASS_COUNTS_MASK 0x00000008L
+#define DB_COUNT_CONTROL__SAMPLE_RATE_MASK 0x00000070L
+#define DB_COUNT_CONTROL__ZPASS_ENABLE_MASK 0x00000F00L
+#define DB_COUNT_CONTROL__ZFAIL_ENABLE_MASK 0x0000F000L
+#define DB_COUNT_CONTROL__SFAIL_ENABLE_MASK 0x000F0000L
+#define DB_COUNT_CONTROL__DBFAIL_ENABLE_MASK 0x00F00000L
+#define DB_COUNT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x0F000000L
+#define DB_COUNT_CONTROL__SLICE_ODD_ENABLE_MASK 0xF0000000L
+//DB_DEPTH_VIEW
+#define DB_DEPTH_VIEW__SLICE_START__SHIFT 0x0
+#define DB_DEPTH_VIEW__SLICE_START_HI__SHIFT 0xb
+#define DB_DEPTH_VIEW__SLICE_MAX__SHIFT 0xd
+#define DB_DEPTH_VIEW__Z_READ_ONLY__SHIFT 0x18
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY__SHIFT 0x19
+#define DB_DEPTH_VIEW__MIPID__SHIFT 0x1a
+#define DB_DEPTH_VIEW__SLICE_MAX_HI__SHIFT 0x1e
+#define DB_DEPTH_VIEW__SLICE_START_MASK 0x000007FFL
+#define DB_DEPTH_VIEW__SLICE_START_HI_MASK 0x00001800L
+#define DB_DEPTH_VIEW__SLICE_MAX_MASK 0x00FFE000L
+#define DB_DEPTH_VIEW__Z_READ_ONLY_MASK 0x01000000L
+#define DB_DEPTH_VIEW__STENCIL_READ_ONLY_MASK 0x02000000L
+#define DB_DEPTH_VIEW__MIPID_MASK 0x3C000000L
+#define DB_DEPTH_VIEW__SLICE_MAX_HI_MASK 0xC0000000L
+//DB_RENDER_OVERRIDE
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE__SHIFT 0x0
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0__SHIFT 0x2
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1__SHIFT 0x4
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER__SHIFT 0x6
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE__SHIFT 0x7
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE__SHIFT 0x8
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE__SHIFT 0x9
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL__SHIFT 0xa
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ__SHIFT 0xb
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ__SHIFT 0xc
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE__SHIFT 0xd
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP__SHIFT 0x10
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE__SHIFT 0x11
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED__SHIFT 0x12
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM__SHIFT 0x13
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT__SHIFT 0x15
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES__SHIFT 0x1a
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY__SHIFT 0x1c
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID__SHIFT 0x1e
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION__SHIFT 0x1f
+#define DB_RENDER_OVERRIDE__FORCE_HIZ_ENABLE_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE0_MASK 0x0000000CL
+#define DB_RENDER_OVERRIDE__FORCE_HIS_ENABLE1_MASK 0x00000030L
+#define DB_RENDER_OVERRIDE__FORCE_SHADER_Z_ORDER_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE__FAST_Z_DISABLE_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE__FAST_STENCIL_DISABLE_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE__NOOP_CULL_DISABLE_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE__FORCE_COLOR_KILL_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE__FORCE_Z_READ_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_READ_MASK 0x00001000L
+#define DB_RENDER_OVERRIDE__FORCE_FULL_Z_RANGE_MASK 0x00006000L
+#define DB_RENDER_OVERRIDE__DISABLE_VIEWPORT_CLAMP_MASK 0x00010000L
+#define DB_RENDER_OVERRIDE__IGNORE_SC_ZRANGE_MASK 0x00020000L
+#define DB_RENDER_OVERRIDE__DISABLE_FULLY_COVERED_MASK 0x00040000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_LIMIT_SUMM_MASK 0x00180000L
+#define DB_RENDER_OVERRIDE__MAX_TILES_IN_DTT_MASK 0x03E00000L
+#define DB_RENDER_OVERRIDE__DISABLE_TILE_RATE_TILES_MASK 0x04000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_DIRTY_MASK 0x08000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_DIRTY_MASK 0x10000000L
+#define DB_RENDER_OVERRIDE__FORCE_Z_VALID_MASK 0x20000000L
+#define DB_RENDER_OVERRIDE__FORCE_STENCIL_VALID_MASK 0x40000000L
+#define DB_RENDER_OVERRIDE__PRESERVE_COMPRESSION_MASK 0x80000000L
+//DB_RENDER_OVERRIDE2
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL__SHIFT 0x0
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN__SHIFT 0x2
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION__SHIFT 0x5
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION__SHIFT 0x6
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION__SHIFT 0x7
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH__SHIFT 0x8
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP__SHIFT 0x9
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE__SHIFT 0xa
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE__SHIFT 0xb
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC__SHIFT 0xc
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF__SHIFT 0xf
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF__SHIFT 0x12
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE__SHIFT 0x15
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ__SHIFT 0x1d
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L
+#define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL
+#define DB_RENDER_OVERRIDE2__DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION_MASK 0x00000020L
+#define DB_RENDER_OVERRIDE2__DISABLE_SMEM_EXPCLEAR_OPTIMIZATION_MASK 0x00000040L
+#define DB_RENDER_OVERRIDE2__DISABLE_COLOR_ON_VALIDATION_MASK 0x00000080L
+#define DB_RENDER_OVERRIDE2__DECOMPRESS_Z_ON_FLUSH_MASK 0x00000100L
+#define DB_RENDER_OVERRIDE2__DISABLE_REG_SNOOP_MASK 0x00000200L
+#define DB_RENDER_OVERRIDE2__DEPTH_BOUNDS_HIER_DEPTH_DISABLE_MASK 0x00000400L
+#define DB_RENDER_OVERRIDE2__SEPARATE_HIZS_FUNC_ENABLE_MASK 0x00000800L
+#define DB_RENDER_OVERRIDE2__HIZ_ZFUNC_MASK 0x00007000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_FF_MASK 0x00038000L
+#define DB_RENDER_OVERRIDE2__HIS_SFUNC_BF_MASK 0x001C0000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_ZRANGE_MASK 0x00200000L
+#define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L
+#define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L
+#define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L
+#define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L
+#define DB_RENDER_OVERRIDE2__DISABLE_NOZ_MASK 0x20000000L
+//DB_HTILE_DATA_BASE
+#define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0
+#define DB_HTILE_DATA_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_DEPTH_SIZE_XY
+#define DB_DEPTH_SIZE_XY__X_MAX__SHIFT 0x0
+#define DB_DEPTH_SIZE_XY__Y_MAX__SHIFT 0x10
+#define DB_DEPTH_SIZE_XY__X_MAX_MASK 0x00003FFFL
+#define DB_DEPTH_SIZE_XY__Y_MAX_MASK 0x3FFF0000L
+//DB_DEPTH_BOUNDS_MIN
+#define DB_DEPTH_BOUNDS_MIN__MIN__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MIN__MIN_MASK 0xFFFFFFFFL
+//DB_DEPTH_BOUNDS_MAX
+#define DB_DEPTH_BOUNDS_MAX__MAX__SHIFT 0x0
+#define DB_DEPTH_BOUNDS_MAX__MAX_MASK 0xFFFFFFFFL
+//DB_STENCIL_CLEAR
+#define DB_STENCIL_CLEAR__CLEAR__SHIFT 0x0
+#define DB_STENCIL_CLEAR__CLEAR_MASK 0x000000FFL
+//DB_DEPTH_CLEAR
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR__SHIFT 0x0
+#define DB_DEPTH_CLEAR__DEPTH_CLEAR_MASK 0xFFFFFFFFL
+//PA_SC_SCREEN_SCISSOR_TL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_TL__TL_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_TL__TL_Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_SCISSOR_BR
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_SCREEN_SCISSOR_BR__BR_X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_SCISSOR_BR__BR_Y_MASK 0xFFFF0000L
+//DB_RESERVED_REG_2
+#define DB_RESERVED_REG_2__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_2__FIELD_2__SHIFT 0x4
+#define DB_RESERVED_REG_2__FIELD_3__SHIFT 0x8
+#define DB_RESERVED_REG_2__FIELD_4__SHIFT 0xd
+#define DB_RESERVED_REG_2__FIELD_5__SHIFT 0xf
+#define DB_RESERVED_REG_2__FIELD_6__SHIFT 0x11
+#define DB_RESERVED_REG_2__FIELD_7__SHIFT 0x13
+#define DB_RESERVED_REG_2__FIELD_8__SHIFT 0x1c
+#define DB_RESERVED_REG_2__FIELD_1_MASK 0x0000000FL
+#define DB_RESERVED_REG_2__FIELD_2_MASK 0x000000F0L
+#define DB_RESERVED_REG_2__FIELD_3_MASK 0x00001F00L
+#define DB_RESERVED_REG_2__FIELD_4_MASK 0x00006000L
+#define DB_RESERVED_REG_2__FIELD_5_MASK 0x00018000L
+#define DB_RESERVED_REG_2__FIELD_6_MASK 0x00060000L
+#define DB_RESERVED_REG_2__FIELD_7_MASK 0x00180000L
+#define DB_RESERVED_REG_2__FIELD_8_MASK 0xF0000000L
+//DB_Z_INFO
+#define DB_Z_INFO__FORMAT__SHIFT 0x0
+#define DB_Z_INFO__NUM_SAMPLES__SHIFT 0x2
+#define DB_Z_INFO__SW_MODE__SHIFT 0x4
+#define DB_Z_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_Z_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_Z_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_Z_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_Z_INFO__MAXMIP__SHIFT 0x10
+#define DB_Z_INFO__ITERATE_256__SHIFT 0x14
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES__SHIFT 0x17
+#define DB_Z_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_Z_INFO__READ_SIZE__SHIFT 0x1c
+#define DB_Z_INFO__TILE_SURFACE_ENABLE__SHIFT 0x1d
+#define DB_Z_INFO__ZRANGE_PRECISION__SHIFT 0x1f
+#define DB_Z_INFO__FORMAT_MASK 0x00000003L
+#define DB_Z_INFO__NUM_SAMPLES_MASK 0x0000000CL
+#define DB_Z_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_Z_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_Z_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_Z_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_Z_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_Z_INFO__MAXMIP_MASK 0x000F0000L
+#define DB_Z_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_Z_INFO__DECOMPRESS_ON_N_ZPLANES_MASK 0x07800000L
+#define DB_Z_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_Z_INFO__READ_SIZE_MASK 0x10000000L
+#define DB_Z_INFO__TILE_SURFACE_ENABLE_MASK 0x20000000L
+#define DB_Z_INFO__ZRANGE_PRECISION_MASK 0x80000000L
+//DB_STENCIL_INFO
+#define DB_STENCIL_INFO__FORMAT__SHIFT 0x0
+#define DB_STENCIL_INFO__SW_MODE__SHIFT 0x4
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR__SHIFT 0x9
+#define DB_STENCIL_INFO__ITERATE_FLUSH__SHIFT 0xb
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT__SHIFT 0xc
+#define DB_STENCIL_INFO__RESERVED_FIELD_1__SHIFT 0xd
+#define DB_STENCIL_INFO__ITERATE_256__SHIFT 0x14
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR__SHIFT 0x1b
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE__SHIFT 0x1d
+#define DB_STENCIL_INFO__FORMAT_MASK 0x00000001L
+#define DB_STENCIL_INFO__SW_MODE_MASK 0x000001F0L
+#define DB_STENCIL_INFO__FAULT_BEHAVIOR_MASK 0x00000600L
+#define DB_STENCIL_INFO__ITERATE_FLUSH_MASK 0x00000800L
+#define DB_STENCIL_INFO__PARTIALLY_RESIDENT_MASK 0x00001000L
+#define DB_STENCIL_INFO__RESERVED_FIELD_1_MASK 0x0000E000L
+#define DB_STENCIL_INFO__ITERATE_256_MASK 0x00100000L
+#define DB_STENCIL_INFO__ALLOW_EXPCLEAR_MASK 0x08000000L
+#define DB_STENCIL_INFO__TILE_STENCIL_DISABLE_MASK 0x20000000L
+//DB_Z_READ_BASE
+#define DB_Z_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_READ_BASE
+#define DB_STENCIL_READ_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_READ_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_Z_WRITE_BASE
+#define DB_Z_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_Z_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_STENCIL_WRITE_BASE
+#define DB_STENCIL_WRITE_BASE__BASE_256B__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//DB_RESERVED_REG_1
+#define DB_RESERVED_REG_1__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_1__FIELD_2__SHIFT 0xb
+#define DB_RESERVED_REG_1__FIELD_1_MASK 0x000007FFL
+#define DB_RESERVED_REG_1__FIELD_2_MASK 0x003FF800L
+//DB_RESERVED_REG_3
+#define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0
+#define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL
+//DB_Z_READ_BASE_HI
+#define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_READ_BASE_HI
+#define DB_STENCIL_READ_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_READ_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_Z_WRITE_BASE_HI
+#define DB_Z_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_Z_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_STENCIL_WRITE_BASE_HI
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_STENCIL_WRITE_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_HTILE_DATA_BASE_HI
+#define DB_HTILE_DATA_BASE_HI__BASE_HI__SHIFT 0x0
+#define DB_HTILE_DATA_BASE_HI__BASE_HI_MASK 0x000000FFL
+//DB_RMI_L2_CACHE_CONTROL
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY__SHIFT 0x0
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY__SHIFT 0x2
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY__SHIFT 0x4
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY__SHIFT 0x6
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY__SHIFT 0x10
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY__SHIFT 0x12
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY__SHIFT 0x14
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE__SHIFT 0x18
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE__SHIFT 0x19
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC__SHIFT 0x1a
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC__SHIFT 0x1b
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC__SHIFT 0x1c
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC__SHIFT 0x1d
+#define DB_RMI_L2_CACHE_CONTROL__Z_WR_POLICY_MASK 0x00000003L
+#define DB_RMI_L2_CACHE_CONTROL__S_WR_POLICY_MASK 0x0000000CL
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_WR_POLICY_MASK 0x00000030L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_WR_POLICY_MASK 0x000000C0L
+#define DB_RMI_L2_CACHE_CONTROL__Z_RD_POLICY_MASK 0x00030000L
+#define DB_RMI_L2_CACHE_CONTROL__S_RD_POLICY_MASK 0x000C0000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_RD_POLICY_MASK 0x00300000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_BIG_PAGE_MASK 0x01000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_BIG_PAGE_MASK 0x02000000L
+#define DB_RMI_L2_CACHE_CONTROL__Z_NOALLOC_MASK 0x04000000L
+#define DB_RMI_L2_CACHE_CONTROL__S_NOALLOC_MASK 0x08000000L
+#define DB_RMI_L2_CACHE_CONTROL__HTILE_NOALLOC_MASK 0x10000000L
+#define DB_RMI_L2_CACHE_CONTROL__ZPCPSD_NOALLOC_MASK 0x20000000L
+//TA_BC_BASE_ADDR
+#define TA_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_BC_BASE_ADDR_HI
+#define TA_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_0__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_1
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_1__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_2
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_2__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_HI_3
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B__SHIFT 0x0
+#define COHER_DEST_BASE_HI_3__DEST_BASE_HI_256B_MASK 0x000000FFL
+//COHER_DEST_BASE_2
+#define COHER_DEST_BASE_2__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_2__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_3
+#define COHER_DEST_BASE_3__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_3__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_WINDOW_OFFSET
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET__SHIFT 0x0
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET__SHIFT 0x10
+#define PA_SC_WINDOW_OFFSET__WINDOW_X_OFFSET_MASK 0x0000FFFFL
+#define PA_SC_WINDOW_OFFSET__WINDOW_Y_OFFSET_MASK 0xFFFF0000L
+//PA_SC_WINDOW_SCISSOR_TL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_WINDOW_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_WINDOW_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_WINDOW_SCISSOR_BR
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_WINDOW_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_WINDOW_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_RULE
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE__SHIFT 0x0
+#define PA_SC_CLIPRECT_RULE__CLIP_RULE_MASK 0x0000FFFFL
+//PA_SC_CLIPRECT_0_TL
+#define PA_SC_CLIPRECT_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_0_BR
+#define PA_SC_CLIPRECT_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_TL
+#define PA_SC_CLIPRECT_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_1_BR
+#define PA_SC_CLIPRECT_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_TL
+#define PA_SC_CLIPRECT_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_2_BR
+#define PA_SC_CLIPRECT_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_TL
+#define PA_SC_CLIPRECT_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_TL__TL_Y_MASK 0x7FFF0000L
+//PA_SC_CLIPRECT_3_BR
+#define PA_SC_CLIPRECT_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_CLIPRECT_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_CLIPRECT_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_CLIPRECT_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_EDGERULE
+#define PA_SC_EDGERULE__ER_TRI__SHIFT 0x0
+#define PA_SC_EDGERULE__ER_POINT__SHIFT 0x4
+#define PA_SC_EDGERULE__ER_RECT__SHIFT 0x8
+#define PA_SC_EDGERULE__ER_LINE_LR__SHIFT 0xc
+#define PA_SC_EDGERULE__ER_LINE_RL__SHIFT 0x12
+#define PA_SC_EDGERULE__ER_LINE_TB__SHIFT 0x18
+#define PA_SC_EDGERULE__ER_LINE_BT__SHIFT 0x1c
+#define PA_SC_EDGERULE__ER_TRI_MASK 0x0000000FL
+#define PA_SC_EDGERULE__ER_POINT_MASK 0x000000F0L
+#define PA_SC_EDGERULE__ER_RECT_MASK 0x00000F00L
+#define PA_SC_EDGERULE__ER_LINE_LR_MASK 0x0003F000L
+#define PA_SC_EDGERULE__ER_LINE_RL_MASK 0x00FC0000L
+#define PA_SC_EDGERULE__ER_LINE_TB_MASK 0x0F000000L
+#define PA_SC_EDGERULE__ER_LINE_BT_MASK 0xF0000000L
+//PA_SU_HARDWARE_SCREEN_OFFSET
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X__SHIFT 0x0
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y__SHIFT 0x10
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_X_MASK 0x000001FFL
+#define PA_SU_HARDWARE_SCREEN_OFFSET__HW_SCREEN_OFFSET_Y_MASK 0x01FF0000L
+//CB_TARGET_MASK
+#define CB_TARGET_MASK__TARGET0_ENABLE__SHIFT 0x0
+#define CB_TARGET_MASK__TARGET1_ENABLE__SHIFT 0x4
+#define CB_TARGET_MASK__TARGET2_ENABLE__SHIFT 0x8
+#define CB_TARGET_MASK__TARGET3_ENABLE__SHIFT 0xc
+#define CB_TARGET_MASK__TARGET4_ENABLE__SHIFT 0x10
+#define CB_TARGET_MASK__TARGET5_ENABLE__SHIFT 0x14
+#define CB_TARGET_MASK__TARGET6_ENABLE__SHIFT 0x18
+#define CB_TARGET_MASK__TARGET7_ENABLE__SHIFT 0x1c
+#define CB_TARGET_MASK__TARGET0_ENABLE_MASK 0x0000000FL
+#define CB_TARGET_MASK__TARGET1_ENABLE_MASK 0x000000F0L
+#define CB_TARGET_MASK__TARGET2_ENABLE_MASK 0x00000F00L
+#define CB_TARGET_MASK__TARGET3_ENABLE_MASK 0x0000F000L
+#define CB_TARGET_MASK__TARGET4_ENABLE_MASK 0x000F0000L
+#define CB_TARGET_MASK__TARGET5_ENABLE_MASK 0x00F00000L
+#define CB_TARGET_MASK__TARGET6_ENABLE_MASK 0x0F000000L
+#define CB_TARGET_MASK__TARGET7_ENABLE_MASK 0xF0000000L
+//CB_SHADER_MASK
+#define CB_SHADER_MASK__OUTPUT0_ENABLE__SHIFT 0x0
+#define CB_SHADER_MASK__OUTPUT1_ENABLE__SHIFT 0x4
+#define CB_SHADER_MASK__OUTPUT2_ENABLE__SHIFT 0x8
+#define CB_SHADER_MASK__OUTPUT3_ENABLE__SHIFT 0xc
+#define CB_SHADER_MASK__OUTPUT4_ENABLE__SHIFT 0x10
+#define CB_SHADER_MASK__OUTPUT5_ENABLE__SHIFT 0x14
+#define CB_SHADER_MASK__OUTPUT6_ENABLE__SHIFT 0x18
+#define CB_SHADER_MASK__OUTPUT7_ENABLE__SHIFT 0x1c
+#define CB_SHADER_MASK__OUTPUT0_ENABLE_MASK 0x0000000FL
+#define CB_SHADER_MASK__OUTPUT1_ENABLE_MASK 0x000000F0L
+#define CB_SHADER_MASK__OUTPUT2_ENABLE_MASK 0x00000F00L
+#define CB_SHADER_MASK__OUTPUT3_ENABLE_MASK 0x0000F000L
+#define CB_SHADER_MASK__OUTPUT4_ENABLE_MASK 0x000F0000L
+#define CB_SHADER_MASK__OUTPUT5_ENABLE_MASK 0x00F00000L
+#define CB_SHADER_MASK__OUTPUT6_ENABLE_MASK 0x0F000000L
+#define CB_SHADER_MASK__OUTPUT7_ENABLE_MASK 0xF0000000L
+//PA_SC_GENERIC_SCISSOR_TL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_GENERIC_SCISSOR_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_GENERIC_SCISSOR_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_GENERIC_SCISSOR_BR
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X__SHIFT 0x0
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y__SHIFT 0x10
+#define PA_SC_GENERIC_SCISSOR_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_GENERIC_SCISSOR_BR__BR_Y_MASK 0x7FFF0000L
+//COHER_DEST_BASE_0
+#define COHER_DEST_BASE_0__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_0__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//COHER_DEST_BASE_1
+#define COHER_DEST_BASE_1__DEST_BASE_256B__SHIFT 0x0
+#define COHER_DEST_BASE_1__DEST_BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_SCISSOR_0_TL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_0_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_0_BR
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_0_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_1_TL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_1_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_1_BR
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_1_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_2_TL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_2_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_2_BR
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_2_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_3_TL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_3_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_3_BR
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_3_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_4_TL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_4_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_4_BR
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_4_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_5_TL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_5_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_5_BR
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_5_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_6_TL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_6_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_6_BR
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_6_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_7_TL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_7_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_7_BR
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_7_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_8_TL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_8_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_8_BR
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_8_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_9_TL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_9_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_9_BR
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_9_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_10_TL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_10_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_10_BR
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_10_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_11_TL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_11_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_11_BR
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_11_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_12_TL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_12_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_12_BR
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_12_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_13_TL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_13_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_13_BR
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_13_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_14_TL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_14_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_14_BR
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_14_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_SCISSOR_15_TL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE__SHIFT 0x1f
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_TL__TL_Y_MASK 0x7FFF0000L
+#define PA_SC_VPORT_SCISSOR_15_TL__WINDOW_OFFSET_DISABLE_MASK 0x80000000L
+//PA_SC_VPORT_SCISSOR_15_BR
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X__SHIFT 0x0
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y__SHIFT 0x10
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_X_MASK 0x00007FFFL
+#define PA_SC_VPORT_SCISSOR_15_BR__BR_Y_MASK 0x7FFF0000L
+//PA_SC_VPORT_ZMIN_0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_0__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_0__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_1
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_1__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_1
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_1__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_2
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_2__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_2
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_2__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_3
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_3__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_3
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_3__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_4
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_4__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_4
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_4__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_5
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_5__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_5
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_5__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_6
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_6__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_6
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_6__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_7
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_7__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_7
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_7__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_8
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_8__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_8
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_8__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_9
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_9__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_9
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_9__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_10
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_10__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_10
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_10__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_11
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_11__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_11
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_11__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_12
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_12__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_12
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_12__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_13
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_13__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_13
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_13__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_14
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_14__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_14
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_14__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMIN_15
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN__SHIFT 0x0
+#define PA_SC_VPORT_ZMIN_15__VPORT_ZMIN_MASK 0xFFFFFFFFL
+//PA_SC_VPORT_ZMAX_15
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX__SHIFT 0x0
+#define PA_SC_VPORT_ZMAX_15__VPORT_ZMAX_MASK 0xFFFFFFFFL
+//PA_SC_RASTER_CONFIG
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT 0x6
+#define PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT 0x7
+#define PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT 0x8
+#define PA_SC_RASTER_CONFIG__PKR_XSEL__SHIFT 0xa
+#define PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT 0xc
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2__SHIFT 0xe
+#define PA_SC_RASTER_CONFIG__SC_MAP__SHIFT 0x10
+#define PA_SC_RASTER_CONFIG__SC_XSEL__SHIFT 0x12
+#define PA_SC_RASTER_CONFIG__SC_YSEL__SHIFT 0x14
+#define PA_SC_RASTER_CONFIG__SE_MAP__SHIFT 0x18
+#define PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT 0x1a
+#define PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT 0x1c
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG__RB_XSEL2_MASK 0x00000030L
+#define PA_SC_RASTER_CONFIG__RB_XSEL_MASK 0x00000040L
+#define PA_SC_RASTER_CONFIG__RB_YSEL_MASK 0x00000080L
+#define PA_SC_RASTER_CONFIG__PKR_MAP_MASK 0x00000300L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL_MASK 0x00000C00L
+#define PA_SC_RASTER_CONFIG__PKR_YSEL_MASK 0x00003000L
+#define PA_SC_RASTER_CONFIG__PKR_XSEL2_MASK 0x0000C000L
+#define PA_SC_RASTER_CONFIG__SC_MAP_MASK 0x00030000L
+#define PA_SC_RASTER_CONFIG__SC_XSEL_MASK 0x000C0000L
+#define PA_SC_RASTER_CONFIG__SC_YSEL_MASK 0x00300000L
+#define PA_SC_RASTER_CONFIG__SE_MAP_MASK 0x03000000L
+#define PA_SC_RASTER_CONFIG__SE_XSEL_MASK 0x0C000000L
+#define PA_SC_RASTER_CONFIG__SE_YSEL_MASK 0x30000000L
+//PA_SC_RASTER_CONFIG_1
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP__SHIFT 0x0
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL__SHIFT 0x2
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL__SHIFT 0x4
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_MAP_MASK 0x00000003L
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_XSEL_MASK 0x0000000CL
+#define PA_SC_RASTER_CONFIG_1__SE_PAIR_YSEL_MASK 0x00000030L
+//PA_SC_SCREEN_EXTENT_CONTROL
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE__SHIFT 0x2
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_EVEN_ENABLE_MASK 0x00000003L
+#define PA_SC_SCREEN_EXTENT_CONTROL__SLICE_ODD_ENABLE_MASK 0x0000000CL
+//PA_SC_TILE_STEERING_OVERRIDE
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT 0xc
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT 0x10
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT 0x14
+#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK 0x00003000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK 0x00030000L
+#define PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK 0x00300000L
+//CP_PERFMON_CNTX_CNTL
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
+#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
+//CP_PIPEID
+#define CP_PIPEID__PIPE_ID__SHIFT 0x0
+#define CP_PIPEID__PIPE_ID_MASK 0x00000003L
+//CP_RINGID
+#define CP_RINGID__RINGID__SHIFT 0x0
+#define CP_RINGID__RINGID_MASK 0x00000003L
+//CP_VMID
+#define CP_VMID__VMID__SHIFT 0x0
+#define CP_VMID__VMID_MASK 0x0000000FL
+//CONTEXT_RESERVED_REG0
+#define CONTEXT_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONTEXT_RESERVED_REG1
+#define CONTEXT_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONTEXT_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//PA_SC_VRS_OVERRIDE_CNTL
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE__SHIFT 0x4
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE__SHIFT 0xc
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE__SHIFT 0xd
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0xe
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_RATE_MASK 0x000000F0L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_SURFACE_ENABLE_MASK 0x00001000L
+#define PA_SC_VRS_OVERRIDE_CNTL__RATE_HINT_WRITE_BACK_ENABLE_MASK 0x00002000L
+#define PA_SC_VRS_OVERRIDE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00004000L
+//PA_SC_VRS_RATE_FEEDBACK_BASE
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_FEEDBACK_BASE_EXT
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//PA_SC_VRS_RATE_FEEDBACK_SIZE_XY
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_FEEDBACK_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//PA_SC_VRS_RATE_CACHE_CNTL
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD__SHIFT 0x0
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR__SHIFT 0x1
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY__SHIFT 0x2
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY__SHIFT 0x4
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY__SHIFT 0x6
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC__SHIFT 0x8
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC__SHIFT 0x9
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD__SHIFT 0xa
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR__SHIFT 0xb
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD__SHIFT 0xc
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR__SHIFT 0xd
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_RD_MASK 0x00000001L
+#define PA_SC_VRS_RATE_CACHE_CNTL__BIG_PAGE_WR_MASK 0x00000002L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L1_RD_POLICY_MASK 0x0000000CL
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_RD_POLICY_MASK 0x00000030L
+#define PA_SC_VRS_RATE_CACHE_CNTL__L2_WR_POLICY_MASK 0x000000C0L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_RD_NOALLOC_MASK 0x00000100L
+#define PA_SC_VRS_RATE_CACHE_CNTL__LLC_WR_NOALLOC_MASK 0x00000200L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_RD_MASK 0x00000400L
+#define PA_SC_VRS_RATE_CACHE_CNTL__NOFILL_WR_MASK 0x00000800L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_RD_MASK 0x00001000L
+#define PA_SC_VRS_RATE_CACHE_CNTL__PERF_CNTR_EN_WR_MASK 0x00002000L
+//PA_SC_VRS_RATE_BASE
+#define PA_SC_VRS_RATE_BASE__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//PA_SC_VRS_RATE_BASE_EXT
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B__SHIFT 0x0
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID__SHIFT 0x1c
+#define PA_SC_VRS_RATE_BASE_EXT__BASE_256B_MASK 0x000000FFL
+#define PA_SC_VRS_RATE_BASE_EXT__TB_SYNC_SIM_ID_MASK 0xF0000000L
+//PA_SC_VRS_RATE_SIZE_XY
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX__SHIFT 0x0
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX__SHIFT 0x10
+#define PA_SC_VRS_RATE_SIZE_XY__X_MAX_MASK 0x000007FFL
+#define PA_SC_VRS_RATE_SIZE_XY__Y_MAX_MASK 0x07FF0000L
+//VGT_MULTI_PRIM_IB_RESET_INDX
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
+#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
+//CB_RMI_GL2_CACHE_CONTROL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY__SHIFT 0x0
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY__SHIFT 0x2
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY__SHIFT 0x14
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY__SHIFT 0x16
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS__SHIFT 0x1a
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS__SHIFT 0x1b
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE__SHIFT 0x1f
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_WR_POLICY_MASK 0x00000003L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_WR_POLICY_MASK 0x0000000CL
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_RD_POLICY_MASK 0x00300000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_RD_POLICY_MASK 0x00C00000L
+#define CB_RMI_GL2_CACHE_CONTROL__DCC_L3_BYPASS_MASK 0x04000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_L3_BYPASS_MASK 0x08000000L
+#define CB_RMI_GL2_CACHE_CONTROL__COLOR_BIG_PAGE_MASK 0x80000000L
+//CB_BLEND_RED
+#define CB_BLEND_RED__BLEND_RED__SHIFT 0x0
+#define CB_BLEND_RED__BLEND_RED_MASK 0xFFFFFFFFL
+//CB_BLEND_GREEN
+#define CB_BLEND_GREEN__BLEND_GREEN__SHIFT 0x0
+#define CB_BLEND_GREEN__BLEND_GREEN_MASK 0xFFFFFFFFL
+//CB_BLEND_BLUE
+#define CB_BLEND_BLUE__BLEND_BLUE__SHIFT 0x0
+#define CB_BLEND_BLUE__BLEND_BLUE_MASK 0xFFFFFFFFL
+//CB_BLEND_ALPHA
+#define CB_BLEND_ALPHA__BLEND_ALPHA__SHIFT 0x0
+#define CB_BLEND_ALPHA__BLEND_ALPHA_MASK 0xFFFFFFFFL
+//CB_FDCC_CONTROL
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK__SHIFT 0x2
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01__SHIFT 0x8
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE__SHIFT 0x9
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0xa
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01__SHIFT 0xc
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE__SHIFT 0xd
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG__SHIFT 0xe
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_FDCC_CONTROL__SAMPLE_MASK_TRACKER_WATERMARK_MASK 0x0000007CL
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_AC01_MASK 0x00000100L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_SINGLE_MASK 0x00000200L
+#define CB_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00000400L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_AC01_MASK 0x00001000L
+#define CB_FDCC_CONTROL__DISABLE_ELIMFC_SKIP_OF_SINGLE_MASK 0x00002000L
+#define CB_FDCC_CONTROL__ENABLE_ELIMFC_SKIP_OF_REG_MASK 0x00004000L
+//CB_COVERAGE_OUT_CONTROL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE__SHIFT 0x0
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT__SHIFT 0x1
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL__SHIFT 0x4
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES__SHIFT 0x8
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_ENABLE_MASK 0x00000001L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_MRT_MASK 0x0000000EL
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_CHANNEL_MASK 0x00000030L
+#define CB_COVERAGE_OUT_CONTROL__COVERAGE_OUT_SAMPLES_MASK 0x00000F00L
+//DB_STENCIL_CONTROL
+#define DB_STENCIL_CONTROL__STENCILFAIL__SHIFT 0x0
+#define DB_STENCIL_CONTROL__STENCILZPASS__SHIFT 0x4
+#define DB_STENCIL_CONTROL__STENCILZFAIL__SHIFT 0x8
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF__SHIFT 0xc
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF__SHIFT 0x10
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF__SHIFT 0x14
+#define DB_STENCIL_CONTROL__STENCILFAIL_MASK 0x0000000FL
+#define DB_STENCIL_CONTROL__STENCILZPASS_MASK 0x000000F0L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_MASK 0x00000F00L
+#define DB_STENCIL_CONTROL__STENCILFAIL_BF_MASK 0x0000F000L
+#define DB_STENCIL_CONTROL__STENCILZPASS_BF_MASK 0x000F0000L
+#define DB_STENCIL_CONTROL__STENCILZFAIL_BF_MASK 0x00F00000L
+//DB_STENCILREFMASK
+#define DB_STENCILREFMASK__STENCILTESTVAL__SHIFT 0x0
+#define DB_STENCILREFMASK__STENCILMASK__SHIFT 0x8
+#define DB_STENCILREFMASK__STENCILWRITEMASK__SHIFT 0x10
+#define DB_STENCILREFMASK__STENCILOPVAL__SHIFT 0x18
+#define DB_STENCILREFMASK__STENCILTESTVAL_MASK 0x000000FFL
+#define DB_STENCILREFMASK__STENCILMASK_MASK 0x0000FF00L
+#define DB_STENCILREFMASK__STENCILWRITEMASK_MASK 0x00FF0000L
+#define DB_STENCILREFMASK__STENCILOPVAL_MASK 0xFF000000L
+//DB_STENCILREFMASK_BF
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF__SHIFT 0x0
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF__SHIFT 0x8
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF__SHIFT 0x10
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF__SHIFT 0x18
+#define DB_STENCILREFMASK_BF__STENCILTESTVAL_BF_MASK 0x000000FFL
+#define DB_STENCILREFMASK_BF__STENCILMASK_BF_MASK 0x0000FF00L
+#define DB_STENCILREFMASK_BF__STENCILWRITEMASK_BF_MASK 0x00FF0000L
+#define DB_STENCILREFMASK_BF__STENCILOPVAL_BF_MASK 0xFF000000L
+//PA_CL_VPORT_XSCALE
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_1
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_1__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_1
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_1__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_1
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_1__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_1
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_1__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_1
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_1__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_1
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_1__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_2
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_2__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_2
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_2__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_2
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_2__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_2
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_2__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_2
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_2__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_2
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_2__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_3
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_3__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_3
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_3__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_3
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_3__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_3
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_3__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_3
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_3__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_3
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_3__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_4
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_4__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_4
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_4__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_4
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_4__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_4
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_4__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_4
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_4__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_4
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_4__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_5
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_5__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_5
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_5__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_5
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_5__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_5
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_5__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_5
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_5__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_5
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_5__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_6
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_6__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_6
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_6__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_6
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_6__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_6
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_6__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_6
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_6__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_6
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_6__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_7
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_7__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_7
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_7__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_7
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_7__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_7
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_7__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_7
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_7__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_7
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_7__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_8
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_8__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_8
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_8__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_8
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_8__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_8
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_8__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_8
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_8__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_8
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_8__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_9
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_9__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_9
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_9__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_9
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_9__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_9
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_9__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_9
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_9__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_9
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_9__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_10
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_10__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_10
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_10__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_10
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_10__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_10
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_10__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_10
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_10__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_10
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_10__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_11
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_11__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_11
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_11__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_11
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_11__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_11
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_11__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_11
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_11__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_11
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_11__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_12
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_12__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_12
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_12__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_12
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_12__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_12
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_12__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_12
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_12__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_12
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_12__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_13
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_13__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_13
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_13__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_13
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_13__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_13
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_13__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_13
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_13__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_13
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_13__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_14
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_14__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_14
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_14__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_14
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_14__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_14
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_14__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_14
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_14__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_14
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_14__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XSCALE_15
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE__SHIFT 0x0
+#define PA_CL_VPORT_XSCALE_15__VPORT_XSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_XOFFSET_15
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_XOFFSET_15__VPORT_XOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YSCALE_15
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE__SHIFT 0x0
+#define PA_CL_VPORT_YSCALE_15__VPORT_YSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_YOFFSET_15
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_YOFFSET_15__VPORT_YOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZSCALE_15
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE__SHIFT 0x0
+#define PA_CL_VPORT_ZSCALE_15__VPORT_ZSCALE_MASK 0xFFFFFFFFL
+//PA_CL_VPORT_ZOFFSET_15
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET__SHIFT 0x0
+#define PA_CL_VPORT_ZOFFSET_15__VPORT_ZOFFSET_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_X
+#define PA_CL_UCP_0_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Y
+#define PA_CL_UCP_0_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_Z
+#define PA_CL_UCP_0_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_0_W
+#define PA_CL_UCP_0_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_0_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_X
+#define PA_CL_UCP_1_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Y
+#define PA_CL_UCP_1_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_Z
+#define PA_CL_UCP_1_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_1_W
+#define PA_CL_UCP_1_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_1_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_X
+#define PA_CL_UCP_2_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Y
+#define PA_CL_UCP_2_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_Z
+#define PA_CL_UCP_2_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_2_W
+#define PA_CL_UCP_2_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_2_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_X
+#define PA_CL_UCP_3_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Y
+#define PA_CL_UCP_3_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_Z
+#define PA_CL_UCP_3_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_3_W
+#define PA_CL_UCP_3_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_3_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_X
+#define PA_CL_UCP_4_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Y
+#define PA_CL_UCP_4_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_Z
+#define PA_CL_UCP_4_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_4_W
+#define PA_CL_UCP_4_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_4_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_X
+#define PA_CL_UCP_5_X__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_X__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Y
+#define PA_CL_UCP_5_Y__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Y__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_Z
+#define PA_CL_UCP_5_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_UCP_5_W
+#define PA_CL_UCP_5_W__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_UCP_5_W__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_PROG_NEAR_CLIP_Z
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_PROG_NEAR_CLIP_Z__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_RATE_CNTL
+#define PA_RATE_CNTL__VERTEX_RATE__SHIFT 0x0
+#define PA_RATE_CNTL__PRIM_RATE__SHIFT 0x4
+#define PA_RATE_CNTL__VERTEX_RATE_MASK 0x0000000FL
+#define PA_RATE_CNTL__PRIM_RATE_MASK 0x000000F0L
+//SPI_PS_INPUT_CNTL_0
+#define SPI_PS_INPUT_CNTL_0__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_0__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_0__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_0__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_0__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_0__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_0__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_0__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_0__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_0__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_0__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_0__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_0__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_1
+#define SPI_PS_INPUT_CNTL_1__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_1__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_1__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_1__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_1__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_1__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_1__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_1__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_1__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_1__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_1__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_1__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_1__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_2
+#define SPI_PS_INPUT_CNTL_2__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_2__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_2__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_2__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_2__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_2__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_2__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_2__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_2__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_2__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_2__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_2__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_2__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_3
+#define SPI_PS_INPUT_CNTL_3__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_3__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_3__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_3__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_3__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_3__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_3__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_3__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_3__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_3__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_3__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_3__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_3__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_4
+#define SPI_PS_INPUT_CNTL_4__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_4__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_4__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_4__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_4__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_4__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_4__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_4__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_4__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_4__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_4__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_4__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_4__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_5
+#define SPI_PS_INPUT_CNTL_5__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_5__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_5__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_5__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_5__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_5__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_5__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_5__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_5__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_5__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_5__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_5__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_5__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_6
+#define SPI_PS_INPUT_CNTL_6__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_6__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_6__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_6__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_6__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_6__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_6__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_6__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_6__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_6__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_6__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_6__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_6__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_7
+#define SPI_PS_INPUT_CNTL_7__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_7__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_7__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_7__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_7__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_7__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_7__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_7__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_7__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_7__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_7__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_7__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_7__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_8
+#define SPI_PS_INPUT_CNTL_8__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_8__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_8__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_8__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_8__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_8__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_8__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_8__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_8__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_8__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_8__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_8__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_8__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_9
+#define SPI_PS_INPUT_CNTL_9__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_9__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_9__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_9__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_9__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_9__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_9__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_9__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_9__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_9__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_9__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_9__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_9__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_10
+#define SPI_PS_INPUT_CNTL_10__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_10__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_10__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_10__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_10__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_10__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_10__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_10__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_10__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_10__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_10__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_10__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_10__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_11
+#define SPI_PS_INPUT_CNTL_11__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_11__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_11__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_11__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_11__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_11__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_11__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_11__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_11__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_11__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_11__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_11__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_11__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_12
+#define SPI_PS_INPUT_CNTL_12__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_12__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_12__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_12__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_12__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_12__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_12__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_12__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_12__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_12__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_12__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_12__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_12__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_13
+#define SPI_PS_INPUT_CNTL_13__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_13__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_13__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_13__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_13__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_13__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_13__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_13__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_13__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_13__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_13__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_13__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_13__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_14
+#define SPI_PS_INPUT_CNTL_14__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_14__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_14__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_14__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_14__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_14__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_14__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_14__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_14__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_14__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_14__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_14__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_14__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_15
+#define SPI_PS_INPUT_CNTL_15__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_15__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_15__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_15__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_15__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_15__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_15__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_15__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_15__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_15__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_15__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_15__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_15__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_16
+#define SPI_PS_INPUT_CNTL_16__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_16__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_16__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_16__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_16__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_16__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_16__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_16__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_16__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_16__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_16__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_16__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_16__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_17
+#define SPI_PS_INPUT_CNTL_17__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_17__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_17__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_17__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_17__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_17__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_17__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_17__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_17__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_17__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_17__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_17__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_17__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_18
+#define SPI_PS_INPUT_CNTL_18__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_18__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_18__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_18__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_18__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_18__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_18__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_18__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_18__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_18__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_18__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_18__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_18__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_19
+#define SPI_PS_INPUT_CNTL_19__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX__SHIFT 0x11
+#define SPI_PS_INPUT_CNTL_19__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1__SHIFT 0x17
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_19__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_19__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_19__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_19__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_MASK 0x00020000L
+#define SPI_PS_INPUT_CNTL_19__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_19__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_19__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_19__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_19__PT_SPRITE_TEX_ATTR1_MASK 0x00800000L
+#define SPI_PS_INPUT_CNTL_19__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_19__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_20
+#define SPI_PS_INPUT_CNTL_20__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_20__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_20__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_20__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_20__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_20__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_20__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_20__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_20__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_20__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_20__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_20__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_21
+#define SPI_PS_INPUT_CNTL_21__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_21__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_21__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_21__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_21__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_21__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_21__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_21__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_21__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_21__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_21__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_21__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_22
+#define SPI_PS_INPUT_CNTL_22__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_22__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_22__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_22__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_22__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_22__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_22__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_22__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_22__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_22__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_22__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_22__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_23
+#define SPI_PS_INPUT_CNTL_23__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_23__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_23__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_23__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_23__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_23__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_23__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_23__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_23__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_23__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_23__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_23__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_24
+#define SPI_PS_INPUT_CNTL_24__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_24__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_24__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_24__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_24__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_24__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_24__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_24__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_24__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_24__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_24__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_24__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_25
+#define SPI_PS_INPUT_CNTL_25__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_25__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_25__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_25__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_25__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_25__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_25__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_25__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_25__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_25__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_25__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_25__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_26
+#define SPI_PS_INPUT_CNTL_26__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_26__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_26__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_26__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_26__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_26__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_26__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_26__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_26__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_26__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_26__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_26__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_27
+#define SPI_PS_INPUT_CNTL_27__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_27__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_27__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_27__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_27__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_27__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_27__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_27__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_27__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_27__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_27__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_27__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_28
+#define SPI_PS_INPUT_CNTL_28__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_28__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_28__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_28__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_28__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_28__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_28__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_28__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_28__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_28__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_28__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_28__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_29
+#define SPI_PS_INPUT_CNTL_29__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_29__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_29__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_29__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_29__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_29__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_29__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_29__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_29__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_29__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_29__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_29__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_30
+#define SPI_PS_INPUT_CNTL_30__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_30__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_30__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_30__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_30__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_30__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_30__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_30__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_30__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_30__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_30__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_30__ATTR1_VALID_MASK 0x02000000L
+//SPI_PS_INPUT_CNTL_31
+#define SPI_PS_INPUT_CNTL_31__OFFSET__SHIFT 0x0
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL__SHIFT 0x8
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE__SHIFT 0xa
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR__SHIFT 0xb
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR__SHIFT 0xc
+#define SPI_PS_INPUT_CNTL_31__DUP__SHIFT 0x12
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE__SHIFT 0x13
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1__SHIFT 0x14
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1__SHIFT 0x15
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID__SHIFT 0x18
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID__SHIFT 0x19
+#define SPI_PS_INPUT_CNTL_31__OFFSET_MASK 0x0000003FL
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_MASK 0x00000300L
+#define SPI_PS_INPUT_CNTL_31__FLAT_SHADE_MASK 0x00000400L
+#define SPI_PS_INPUT_CNTL_31__ROTATE_PC_PTR_MASK 0x00000800L
+#define SPI_PS_INPUT_CNTL_31__PRIM_ATTR_MASK 0x00001000L
+#define SPI_PS_INPUT_CNTL_31__DUP_MASK 0x00040000L
+#define SPI_PS_INPUT_CNTL_31__FP16_INTERP_MODE_MASK 0x00080000L
+#define SPI_PS_INPUT_CNTL_31__USE_DEFAULT_ATTR1_MASK 0x00100000L
+#define SPI_PS_INPUT_CNTL_31__DEFAULT_VAL_ATTR1_MASK 0x00600000L
+#define SPI_PS_INPUT_CNTL_31__ATTR0_VALID_MASK 0x01000000L
+#define SPI_PS_INPUT_CNTL_31__ATTR1_VALID_MASK 0x02000000L
+//SPI_VS_OUT_CONFIG
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT__SHIFT 0x1
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT__SHIFT 0x7
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT__SHIFT 0x8
+#define SPI_VS_OUT_CONFIG__VS_EXPORT_COUNT_MASK 0x0000003EL
+#define SPI_VS_OUT_CONFIG__NO_PC_EXPORT_MASK 0x00000080L
+#define SPI_VS_OUT_CONFIG__PRIM_EXPORT_COUNT_MASK 0x00001F00L
+//SPI_PS_INPUT_ENA
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ENA__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ENA__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ENA__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ENA__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ENA__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ENA__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ENA__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ENA__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ENA__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ENA__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ENA__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ENA__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ENA__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ENA__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ENA__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_PS_INPUT_ADDR
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA__SHIFT 0x0
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA__SHIFT 0x1
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA__SHIFT 0x2
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA__SHIFT 0x3
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA__SHIFT 0x4
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA__SHIFT 0x5
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA__SHIFT 0x6
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA__SHIFT 0x7
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA__SHIFT 0x8
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA__SHIFT 0x9
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA__SHIFT 0xa
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA__SHIFT 0xb
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA__SHIFT 0xc
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA__SHIFT 0xd
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA__SHIFT 0xe
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA__SHIFT 0xf
+#define SPI_PS_INPUT_ADDR__PERSP_SAMPLE_ENA_MASK 0x00000001L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTER_ENA_MASK 0x00000002L
+#define SPI_PS_INPUT_ADDR__PERSP_CENTROID_ENA_MASK 0x00000004L
+#define SPI_PS_INPUT_ADDR__PERSP_PULL_MODEL_ENA_MASK 0x00000008L
+#define SPI_PS_INPUT_ADDR__LINEAR_SAMPLE_ENA_MASK 0x00000010L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTER_ENA_MASK 0x00000020L
+#define SPI_PS_INPUT_ADDR__LINEAR_CENTROID_ENA_MASK 0x00000040L
+#define SPI_PS_INPUT_ADDR__LINE_STIPPLE_TEX_ENA_MASK 0x00000080L
+#define SPI_PS_INPUT_ADDR__POS_X_FLOAT_ENA_MASK 0x00000100L
+#define SPI_PS_INPUT_ADDR__POS_Y_FLOAT_ENA_MASK 0x00000200L
+#define SPI_PS_INPUT_ADDR__POS_Z_FLOAT_ENA_MASK 0x00000400L
+#define SPI_PS_INPUT_ADDR__POS_W_FLOAT_ENA_MASK 0x00000800L
+#define SPI_PS_INPUT_ADDR__FRONT_FACE_ENA_MASK 0x00001000L
+#define SPI_PS_INPUT_ADDR__ANCILLARY_ENA_MASK 0x00002000L
+#define SPI_PS_INPUT_ADDR__SAMPLE_COVERAGE_ENA_MASK 0x00004000L
+#define SPI_PS_INPUT_ADDR__POS_FIXED_PT_ENA_MASK 0x00008000L
+//SPI_INTERP_CONTROL_0
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA__SHIFT 0x0
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA__SHIFT 0x1
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X__SHIFT 0x2
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y__SHIFT 0x5
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z__SHIFT 0x8
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W__SHIFT 0xb
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1__SHIFT 0xe
+#define SPI_INTERP_CONTROL_0__FLAT_SHADE_ENA_MASK 0x00000001L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_ENA_MASK 0x00000002L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_X_MASK 0x0000001CL
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Y_MASK 0x000000E0L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_Z_MASK 0x00000700L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_OVRD_W_MASK 0x00003800L
+#define SPI_INTERP_CONTROL_0__PNT_SPRITE_TOP_1_MASK 0x00004000L
+//SPI_PS_IN_CONTROL
+#define SPI_PS_IN_CONTROL__NUM_INTERP__SHIFT 0x0
+#define SPI_PS_IN_CONTROL__PARAM_GEN__SHIFT 0x6
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN__SHIFT 0x7
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC__SHIFT 0x8
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP__SHIFT 0x9
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE__SHIFT 0xe
+#define SPI_PS_IN_CONTROL__PS_W32_EN__SHIFT 0xf
+#define SPI_PS_IN_CONTROL__NUM_INTERP_MASK 0x0000003FL
+#define SPI_PS_IN_CONTROL__PARAM_GEN_MASK 0x00000040L
+#define SPI_PS_IN_CONTROL__OFFCHIP_PARAM_EN_MASK 0x00000080L
+#define SPI_PS_IN_CONTROL__LATE_PC_DEALLOC_MASK 0x00000100L
+#define SPI_PS_IN_CONTROL__NUM_PRIM_INTERP_MASK 0x00003E00L
+#define SPI_PS_IN_CONTROL__BC_OPTIMIZE_DISABLE_MASK 0x00004000L
+#define SPI_PS_IN_CONTROL__PS_W32_EN_MASK 0x00008000L
+//SPI_BARYC_CNTL
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL__SHIFT 0x0
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL__SHIFT 0x4
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL__SHIFT 0x8
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL__SHIFT 0xc
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION__SHIFT 0x10
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC__SHIFT 0x14
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS__SHIFT 0x18
+#define SPI_BARYC_CNTL__PERSP_CENTER_CNTL_MASK 0x00000001L
+#define SPI_BARYC_CNTL__PERSP_CENTROID_CNTL_MASK 0x00000010L
+#define SPI_BARYC_CNTL__LINEAR_CENTER_CNTL_MASK 0x00000100L
+#define SPI_BARYC_CNTL__LINEAR_CENTROID_CNTL_MASK 0x00001000L
+#define SPI_BARYC_CNTL__POS_FLOAT_LOCATION_MASK 0x00030000L
+#define SPI_BARYC_CNTL__POS_FLOAT_ULC_MASK 0x00100000L
+#define SPI_BARYC_CNTL__FRONT_FACE_ALL_BITS_MASK 0x01000000L
+//SPI_TMPRING_SIZE
+#define SPI_TMPRING_SIZE__WAVES__SHIFT 0x0
+#define SPI_TMPRING_SIZE__WAVESIZE__SHIFT 0xc
+#define SPI_TMPRING_SIZE__WAVES_MASK 0x00000FFFL
+#define SPI_TMPRING_SIZE__WAVESIZE_MASK 0x07FFF000L
+//SPI_GFX_SCRATCH_BASE_LO
+#define SPI_GFX_SCRATCH_BASE_LO__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_LO__DATA_MASK 0xFFFFFFFFL
+//SPI_GFX_SCRATCH_BASE_HI
+#define SPI_GFX_SCRATCH_BASE_HI__DATA__SHIFT 0x0
+#define SPI_GFX_SCRATCH_BASE_HI__DATA_MASK 0x000000FFL
+//SPI_SHADER_IDX_FORMAT
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_IDX_FORMAT__IDX0_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_POS_FORMAT
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_POS_FORMAT__POS0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_POS_FORMAT__POS1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_POS_FORMAT__POS2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_POS_FORMAT__POS3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_POS_FORMAT__POS4_EXPORT_FORMAT_MASK 0x000F0000L
+//SPI_SHADER_Z_FORMAT
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_Z_FORMAT__Z_EXPORT_FORMAT_MASK 0x0000000FL
+//SPI_SHADER_COL_FORMAT
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT__SHIFT 0x0
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT__SHIFT 0x4
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT__SHIFT 0x8
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT__SHIFT 0xc
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT__SHIFT 0x10
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT__SHIFT 0x14
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT__SHIFT 0x18
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT__SHIFT 0x1c
+#define SPI_SHADER_COL_FORMAT__COL0_EXPORT_FORMAT_MASK 0x0000000FL
+#define SPI_SHADER_COL_FORMAT__COL1_EXPORT_FORMAT_MASK 0x000000F0L
+#define SPI_SHADER_COL_FORMAT__COL2_EXPORT_FORMAT_MASK 0x00000F00L
+#define SPI_SHADER_COL_FORMAT__COL3_EXPORT_FORMAT_MASK 0x0000F000L
+#define SPI_SHADER_COL_FORMAT__COL4_EXPORT_FORMAT_MASK 0x000F0000L
+#define SPI_SHADER_COL_FORMAT__COL5_EXPORT_FORMAT_MASK 0x00F00000L
+#define SPI_SHADER_COL_FORMAT__COL6_EXPORT_FORMAT_MASK 0x0F000000L
+#define SPI_SHADER_COL_FORMAT__COL7_EXPORT_FORMAT_MASK 0xF0000000L
+//SX_PS_DOWNCONVERT_CONTROL
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE__SHIFT 0x0
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE__SHIFT 0x1
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE__SHIFT 0x2
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE__SHIFT 0x3
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE__SHIFT 0x4
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE__SHIFT 0x5
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE__SHIFT 0x6
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE__SHIFT 0x7
+#define SX_PS_DOWNCONVERT_CONTROL__MRT0_FMT_MAPPING_DISABLE_MASK 0x00000001L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT1_FMT_MAPPING_DISABLE_MASK 0x00000002L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT2_FMT_MAPPING_DISABLE_MASK 0x00000004L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT3_FMT_MAPPING_DISABLE_MASK 0x00000008L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT4_FMT_MAPPING_DISABLE_MASK 0x00000010L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT5_FMT_MAPPING_DISABLE_MASK 0x00000020L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT6_FMT_MAPPING_DISABLE_MASK 0x00000040L
+#define SX_PS_DOWNCONVERT_CONTROL__MRT7_FMT_MAPPING_DISABLE_MASK 0x00000080L
+//SX_PS_DOWNCONVERT
+#define SX_PS_DOWNCONVERT__MRT0__SHIFT 0x0
+#define SX_PS_DOWNCONVERT__MRT1__SHIFT 0x4
+#define SX_PS_DOWNCONVERT__MRT2__SHIFT 0x8
+#define SX_PS_DOWNCONVERT__MRT3__SHIFT 0xc
+#define SX_PS_DOWNCONVERT__MRT4__SHIFT 0x10
+#define SX_PS_DOWNCONVERT__MRT5__SHIFT 0x14
+#define SX_PS_DOWNCONVERT__MRT6__SHIFT 0x18
+#define SX_PS_DOWNCONVERT__MRT7__SHIFT 0x1c
+#define SX_PS_DOWNCONVERT__MRT0_MASK 0x0000000FL
+#define SX_PS_DOWNCONVERT__MRT1_MASK 0x000000F0L
+#define SX_PS_DOWNCONVERT__MRT2_MASK 0x00000F00L
+#define SX_PS_DOWNCONVERT__MRT3_MASK 0x0000F000L
+#define SX_PS_DOWNCONVERT__MRT4_MASK 0x000F0000L
+#define SX_PS_DOWNCONVERT__MRT5_MASK 0x00F00000L
+#define SX_PS_DOWNCONVERT__MRT6_MASK 0x0F000000L
+#define SX_PS_DOWNCONVERT__MRT7_MASK 0xF0000000L
+//SX_BLEND_OPT_EPSILON
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON__SHIFT 0x0
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON__SHIFT 0x4
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON__SHIFT 0x8
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON__SHIFT 0xc
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON__SHIFT 0x10
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON__SHIFT 0x14
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON__SHIFT 0x18
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON__SHIFT 0x1c
+#define SX_BLEND_OPT_EPSILON__MRT0_EPSILON_MASK 0x0000000FL
+#define SX_BLEND_OPT_EPSILON__MRT1_EPSILON_MASK 0x000000F0L
+#define SX_BLEND_OPT_EPSILON__MRT2_EPSILON_MASK 0x00000F00L
+#define SX_BLEND_OPT_EPSILON__MRT3_EPSILON_MASK 0x0000F000L
+#define SX_BLEND_OPT_EPSILON__MRT4_EPSILON_MASK 0x000F0000L
+#define SX_BLEND_OPT_EPSILON__MRT5_EPSILON_MASK 0x00F00000L
+#define SX_BLEND_OPT_EPSILON__MRT6_EPSILON_MASK 0x0F000000L
+#define SX_BLEND_OPT_EPSILON__MRT7_EPSILON_MASK 0xF0000000L
+//SX_BLEND_OPT_CONTROL
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE__SHIFT 0x0
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE__SHIFT 0x1
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE__SHIFT 0x4
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE__SHIFT 0x5
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE__SHIFT 0x8
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE__SHIFT 0x9
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE__SHIFT 0xc
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE__SHIFT 0xd
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE__SHIFT 0x10
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE__SHIFT 0x11
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE__SHIFT 0x14
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE__SHIFT 0x15
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE__SHIFT 0x18
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE__SHIFT 0x19
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE__SHIFT 0x1c
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE__SHIFT 0x1d
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE__SHIFT 0x1f
+#define SX_BLEND_OPT_CONTROL__MRT0_COLOR_OPT_DISABLE_MASK 0x00000001L
+#define SX_BLEND_OPT_CONTROL__MRT0_ALPHA_OPT_DISABLE_MASK 0x00000002L
+#define SX_BLEND_OPT_CONTROL__MRT1_COLOR_OPT_DISABLE_MASK 0x00000010L
+#define SX_BLEND_OPT_CONTROL__MRT1_ALPHA_OPT_DISABLE_MASK 0x00000020L
+#define SX_BLEND_OPT_CONTROL__MRT2_COLOR_OPT_DISABLE_MASK 0x00000100L
+#define SX_BLEND_OPT_CONTROL__MRT2_ALPHA_OPT_DISABLE_MASK 0x00000200L
+#define SX_BLEND_OPT_CONTROL__MRT3_COLOR_OPT_DISABLE_MASK 0x00001000L
+#define SX_BLEND_OPT_CONTROL__MRT3_ALPHA_OPT_DISABLE_MASK 0x00002000L
+#define SX_BLEND_OPT_CONTROL__MRT4_COLOR_OPT_DISABLE_MASK 0x00010000L
+#define SX_BLEND_OPT_CONTROL__MRT4_ALPHA_OPT_DISABLE_MASK 0x00020000L
+#define SX_BLEND_OPT_CONTROL__MRT5_COLOR_OPT_DISABLE_MASK 0x00100000L
+#define SX_BLEND_OPT_CONTROL__MRT5_ALPHA_OPT_DISABLE_MASK 0x00200000L
+#define SX_BLEND_OPT_CONTROL__MRT6_COLOR_OPT_DISABLE_MASK 0x01000000L
+#define SX_BLEND_OPT_CONTROL__MRT6_ALPHA_OPT_DISABLE_MASK 0x02000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_COLOR_OPT_DISABLE_MASK 0x10000000L
+#define SX_BLEND_OPT_CONTROL__MRT7_ALPHA_OPT_DISABLE_MASK 0x20000000L
+#define SX_BLEND_OPT_CONTROL__PIXEN_ZERO_OPT_DISABLE_MASK 0x80000000L
+//SX_MRT0_BLEND_OPT
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT0_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT0_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT0_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT0_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT0_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT0_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT1_BLEND_OPT
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT1_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT1_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT1_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT1_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT1_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT1_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT2_BLEND_OPT
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT2_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT2_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT2_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT2_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT2_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT2_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT3_BLEND_OPT
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT3_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT3_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT3_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT3_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT3_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT3_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT4_BLEND_OPT
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT4_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT4_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT4_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT4_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT4_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT4_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT5_BLEND_OPT
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT5_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT5_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT5_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT5_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT5_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT5_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT6_BLEND_OPT
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT6_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT6_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT6_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT6_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT6_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT6_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//SX_MRT7_BLEND_OPT
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT__SHIFT 0x0
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT__SHIFT 0x4
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN__SHIFT 0x8
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT__SHIFT 0x10
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT__SHIFT 0x14
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN__SHIFT 0x18
+#define SX_MRT7_BLEND_OPT__COLOR_SRC_OPT_MASK 0x00000007L
+#define SX_MRT7_BLEND_OPT__COLOR_DST_OPT_MASK 0x00000070L
+#define SX_MRT7_BLEND_OPT__COLOR_COMB_FCN_MASK 0x00000700L
+#define SX_MRT7_BLEND_OPT__ALPHA_SRC_OPT_MASK 0x00070000L
+#define SX_MRT7_BLEND_OPT__ALPHA_DST_OPT_MASK 0x00700000L
+#define SX_MRT7_BLEND_OPT__ALPHA_COMB_FCN_MASK 0x07000000L
+//CB_BLEND0_CONTROL
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND0_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND0_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND0_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND0_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND0_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND0_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND0_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND0_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND0_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND0_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND0_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND1_CONTROL
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND1_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND1_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND1_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND1_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND1_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND1_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND1_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND1_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND1_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND1_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND1_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND2_CONTROL
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND2_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND2_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND2_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND2_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND2_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND2_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND2_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND2_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND2_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND2_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND2_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND3_CONTROL
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND3_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND3_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND3_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND3_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND3_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND3_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND3_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND3_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND3_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND3_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND3_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND4_CONTROL
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND4_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND4_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND4_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND4_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND4_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND4_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND4_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND4_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND4_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND4_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND4_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND5_CONTROL
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND5_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND5_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND5_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND5_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND5_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND5_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND5_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND5_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND5_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND5_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND5_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND6_CONTROL
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND6_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND6_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND6_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND6_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND6_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND6_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND6_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND6_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND6_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND6_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND6_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//CB_BLEND7_CONTROL
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND__SHIFT 0x0
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN__SHIFT 0x5
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND__SHIFT 0x8
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND__SHIFT 0x10
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN__SHIFT 0x15
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND__SHIFT 0x18
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND__SHIFT 0x1d
+#define CB_BLEND7_CONTROL__ENABLE__SHIFT 0x1e
+#define CB_BLEND7_CONTROL__DISABLE_ROP3__SHIFT 0x1f
+#define CB_BLEND7_CONTROL__COLOR_SRCBLEND_MASK 0x0000001FL
+#define CB_BLEND7_CONTROL__COLOR_COMB_FCN_MASK 0x000000E0L
+#define CB_BLEND7_CONTROL__COLOR_DESTBLEND_MASK 0x00001F00L
+#define CB_BLEND7_CONTROL__ALPHA_SRCBLEND_MASK 0x001F0000L
+#define CB_BLEND7_CONTROL__ALPHA_COMB_FCN_MASK 0x00E00000L
+#define CB_BLEND7_CONTROL__ALPHA_DESTBLEND_MASK 0x1F000000L
+#define CB_BLEND7_CONTROL__SEPARATE_ALPHA_BLEND_MASK 0x20000000L
+#define CB_BLEND7_CONTROL__ENABLE_MASK 0x40000000L
+#define CB_BLEND7_CONTROL__DISABLE_ROP3_MASK 0x80000000L
+//GFX_COPY_STATE
+#define GFX_COPY_STATE__SRC_STATE_ID__SHIFT 0x0
+#define GFX_COPY_STATE__SRC_STATE_ID_MASK 0x00000007L
+//PA_CL_POINT_X_RAD
+#define PA_CL_POINT_X_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_X_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_Y_RAD
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_Y_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_SIZE
+#define PA_CL_POINT_SIZE__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_SIZE__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_POINT_CULL_RAD
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_POINT_CULL_RAD__DATA_REGISTER_MASK 0xFFFFFFFFL
+//VGT_DMA_BASE_HI
+#define VGT_DMA_BASE_HI__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE_HI__BASE_ADDR_MASK 0x0000FFFFL
+//VGT_DMA_BASE
+#define VGT_DMA_BASE__BASE_ADDR__SHIFT 0x0
+#define VGT_DMA_BASE__BASE_ADDR_MASK 0xFFFFFFFFL
+//VGT_DRAW_INITIATOR
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT__SHIFT 0x0
+#define VGT_DRAW_INITIATOR__MAJOR_MODE__SHIFT 0x2
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX__SHIFT 0x4
+#define VGT_DRAW_INITIATOR__NOT_EOP__SHIFT 0x5
+#define VGT_DRAW_INITIATOR__USE_OPAQUE__SHIFT 0x6
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX__SHIFT 0x1d
+#define VGT_DRAW_INITIATOR__SOURCE_SELECT_MASK 0x00000003L
+#define VGT_DRAW_INITIATOR__MAJOR_MODE_MASK 0x0000000CL
+#define VGT_DRAW_INITIATOR__SPRITE_EN_R6XX_MASK 0x00000010L
+#define VGT_DRAW_INITIATOR__NOT_EOP_MASK 0x00000020L
+#define VGT_DRAW_INITIATOR__USE_OPAQUE_MASK 0x00000040L
+#define VGT_DRAW_INITIATOR__REG_RT_INDEX_MASK 0xE0000000L
+//VGT_EVENT_ADDRESS_REG
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW__SHIFT 0x0
+#define VGT_EVENT_ADDRESS_REG__ADDRESS_LOW_MASK 0x0FFFFFFFL
+//GE_MAX_OUTPUT_PER_SUBGROUP
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP__SHIFT 0x0
+#define GE_MAX_OUTPUT_PER_SUBGROUP__MAX_VERTS_PER_SUBGROUP_MASK 0x000003FFL
+//DB_DEPTH_CONTROL
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE__SHIFT 0x0
+#define DB_DEPTH_CONTROL__Z_ENABLE__SHIFT 0x1
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE__SHIFT 0x2
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE__SHIFT 0x3
+#define DB_DEPTH_CONTROL__ZFUNC__SHIFT 0x4
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE__SHIFT 0x7
+#define DB_DEPTH_CONTROL__STENCILFUNC__SHIFT 0x8
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF__SHIFT 0x14
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL__SHIFT 0x1e
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS__SHIFT 0x1f
+#define DB_DEPTH_CONTROL__STENCIL_ENABLE_MASK 0x00000001L
+#define DB_DEPTH_CONTROL__Z_ENABLE_MASK 0x00000002L
+#define DB_DEPTH_CONTROL__Z_WRITE_ENABLE_MASK 0x00000004L
+#define DB_DEPTH_CONTROL__DEPTH_BOUNDS_ENABLE_MASK 0x00000008L
+#define DB_DEPTH_CONTROL__ZFUNC_MASK 0x00000070L
+#define DB_DEPTH_CONTROL__BACKFACE_ENABLE_MASK 0x00000080L
+#define DB_DEPTH_CONTROL__STENCILFUNC_MASK 0x00000700L
+#define DB_DEPTH_CONTROL__STENCILFUNC_BF_MASK 0x00700000L
+#define DB_DEPTH_CONTROL__ENABLE_COLOR_WRITES_ON_DEPTH_FAIL_MASK 0x40000000L
+#define DB_DEPTH_CONTROL__DISABLE_COLOR_WRITES_ON_DEPTH_PASS_MASK 0x80000000L
+//DB_EQAA
+#define DB_EQAA__MAX_ANCHOR_SAMPLES__SHIFT 0x0
+#define DB_EQAA__PS_ITER_SAMPLES__SHIFT 0x4
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES__SHIFT 0x8
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES__SHIFT 0xc
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS__SHIFT 0x10
+#define DB_EQAA__INCOHERENT_EQAA_READS__SHIFT 0x11
+#define DB_EQAA__INTERPOLATE_COMP_Z__SHIFT 0x12
+#define DB_EQAA__INTERPOLATE_SRC_Z__SHIFT 0x13
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS__SHIFT 0x14
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE__SHIFT 0x15
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT__SHIFT 0x18
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION__SHIFT 0x1b
+#define DB_EQAA__MAX_ANCHOR_SAMPLES_MASK 0x00000007L
+#define DB_EQAA__PS_ITER_SAMPLES_MASK 0x00000070L
+#define DB_EQAA__MASK_EXPORT_NUM_SAMPLES_MASK 0x00000700L
+#define DB_EQAA__ALPHA_TO_MASK_NUM_SAMPLES_MASK 0x00007000L
+#define DB_EQAA__HIGH_QUALITY_INTERSECTIONS_MASK 0x00010000L
+#define DB_EQAA__INCOHERENT_EQAA_READS_MASK 0x00020000L
+#define DB_EQAA__INTERPOLATE_COMP_Z_MASK 0x00040000L
+#define DB_EQAA__INTERPOLATE_SRC_Z_MASK 0x00080000L
+#define DB_EQAA__STATIC_ANCHOR_ASSOCIATIONS_MASK 0x00100000L
+#define DB_EQAA__ALPHA_TO_MASK_EQAA_DISABLE_MASK 0x00200000L
+#define DB_EQAA__OVERRASTERIZATION_AMOUNT_MASK 0x07000000L
+#define DB_EQAA__ENABLE_POSTZ_OVERRASTERIZATION_MASK 0x08000000L
+//CB_COLOR_CONTROL
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD__SHIFT 0x0
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE__SHIFT 0x1
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE__SHIFT 0x3
+#define CB_COLOR_CONTROL__MODE__SHIFT 0x4
+#define CB_COLOR_CONTROL__ROP3__SHIFT 0x10
+#define CB_COLOR_CONTROL__DISABLE_DUAL_QUAD_MASK 0x00000001L
+#define CB_COLOR_CONTROL__ENABLE_1FRAG_PS_INVOKE_MASK 0x00000002L
+#define CB_COLOR_CONTROL__DEGAMMA_ENABLE_MASK 0x00000008L
+#define CB_COLOR_CONTROL__MODE_MASK 0x00000070L
+#define CB_COLOR_CONTROL__ROP3_MASK 0x00FF0000L
+//DB_SHADER_CONTROL
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE__SHIFT 0x0
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE__SHIFT 0x1
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE__SHIFT 0x2
+#define DB_SHADER_CONTROL__Z_ORDER__SHIFT 0x4
+#define DB_SHADER_CONTROL__KILL_ENABLE__SHIFT 0x6
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE__SHIFT 0x7
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE__SHIFT 0x8
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL__SHIFT 0x9
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP__SHIFT 0xa
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE__SHIFT 0xb
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER__SHIFT 0xc
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT__SHIFT 0xd
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE__SHIFT 0xf
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER__SHIFT 0x10
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE__SHIFT 0x17
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE__SHIFT 0x18
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE__SHIFT 0x19
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE__SHIFT 0x1a
+#define DB_SHADER_CONTROL__Z_EXPORT_ENABLE_MASK 0x00000001L
+#define DB_SHADER_CONTROL__STENCIL_TEST_VAL_EXPORT_ENABLE_MASK 0x00000002L
+#define DB_SHADER_CONTROL__STENCIL_OP_VAL_EXPORT_ENABLE_MASK 0x00000004L
+#define DB_SHADER_CONTROL__Z_ORDER_MASK 0x00000030L
+#define DB_SHADER_CONTROL__KILL_ENABLE_MASK 0x00000040L
+#define DB_SHADER_CONTROL__COVERAGE_TO_MASK_ENABLE_MASK 0x00000080L
+#define DB_SHADER_CONTROL__MASK_EXPORT_ENABLE_MASK 0x00000100L
+#define DB_SHADER_CONTROL__EXEC_ON_HIER_FAIL_MASK 0x00000200L
+#define DB_SHADER_CONTROL__EXEC_ON_NOOP_MASK 0x00000400L
+#define DB_SHADER_CONTROL__ALPHA_TO_MASK_DISABLE_MASK 0x00000800L
+#define DB_SHADER_CONTROL__DEPTH_BEFORE_SHADER_MASK 0x00001000L
+#define DB_SHADER_CONTROL__CONSERVATIVE_Z_EXPORT_MASK 0x00006000L
+#define DB_SHADER_CONTROL__DUAL_QUAD_DISABLE_MASK 0x00008000L
+#define DB_SHADER_CONTROL__PRIMITIVE_ORDERED_PIXEL_SHADER_MASK 0x00010000L
+#define DB_SHADER_CONTROL__PRE_SHADER_DEPTH_COVERAGE_ENABLE_MASK 0x00800000L
+#define DB_SHADER_CONTROL__OREO_BLEND_ENABLE_MASK 0x01000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_ENABLE_MASK 0x02000000L
+#define DB_SHADER_CONTROL__OVERRIDE_INTRINSIC_RATE_MASK 0x1C000000L
+//PA_CL_CLIP_CNTL
+#define PA_CL_CLIP_CNTL__UCP_ENA_0__SHIFT 0x0
+#define PA_CL_CLIP_CNTL__UCP_ENA_1__SHIFT 0x1
+#define PA_CL_CLIP_CNTL__UCP_ENA_2__SHIFT 0x2
+#define PA_CL_CLIP_CNTL__UCP_ENA_3__SHIFT 0x3
+#define PA_CL_CLIP_CNTL__UCP_ENA_4__SHIFT 0x4
+#define PA_CL_CLIP_CNTL__UCP_ENA_5__SHIFT 0x5
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG__SHIFT 0xd
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE__SHIFT 0xe
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE__SHIFT 0x10
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA__SHIFT 0x11
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA__SHIFT 0x12
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF__SHIFT 0x13
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT__SHIFT 0x14
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR__SHIFT 0x15
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL__SHIFT 0x16
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA__SHIFT 0x18
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE__SHIFT 0x19
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE__SHIFT 0x1a
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE__SHIFT 0x1b
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA__SHIFT 0x1c
+#define PA_CL_CLIP_CNTL__UCP_ENA_0_MASK 0x00000001L
+#define PA_CL_CLIP_CNTL__UCP_ENA_1_MASK 0x00000002L
+#define PA_CL_CLIP_CNTL__UCP_ENA_2_MASK 0x00000004L
+#define PA_CL_CLIP_CNTL__UCP_ENA_3_MASK 0x00000008L
+#define PA_CL_CLIP_CNTL__UCP_ENA_4_MASK 0x00000010L
+#define PA_CL_CLIP_CNTL__UCP_ENA_5_MASK 0x00000020L
+#define PA_CL_CLIP_CNTL__PS_UCP_Y_SCALE_NEG_MASK 0x00002000L
+#define PA_CL_CLIP_CNTL__PS_UCP_MODE_MASK 0x0000C000L
+#define PA_CL_CLIP_CNTL__CLIP_DISABLE_MASK 0x00010000L
+#define PA_CL_CLIP_CNTL__UCP_CULL_ONLY_ENA_MASK 0x00020000L
+#define PA_CL_CLIP_CNTL__BOUNDARY_EDGE_FLAG_ENA_MASK 0x00040000L
+#define PA_CL_CLIP_CNTL__DX_CLIP_SPACE_DEF_MASK 0x00080000L
+#define PA_CL_CLIP_CNTL__DIS_CLIP_ERR_DETECT_MASK 0x00100000L
+#define PA_CL_CLIP_CNTL__VTX_KILL_OR_MASK 0x00200000L
+#define PA_CL_CLIP_CNTL__DX_RASTERIZATION_KILL_MASK 0x00400000L
+#define PA_CL_CLIP_CNTL__DX_LINEAR_ATTR_CLIP_ENA_MASK 0x01000000L
+#define PA_CL_CLIP_CNTL__VTE_VPORT_PROVOKE_DISABLE_MASK 0x02000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_NEAR_DISABLE_MASK 0x04000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_FAR_DISABLE_MASK 0x08000000L
+#define PA_CL_CLIP_CNTL__ZCLIP_PROG_NEAR_ENA_MASK 0x10000000L
+//PA_SU_SC_MODE_CNTL
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT__SHIFT 0x0
+#define PA_SU_SC_MODE_CNTL__CULL_BACK__SHIFT 0x1
+#define PA_SU_SC_MODE_CNTL__FACE__SHIFT 0x2
+#define PA_SU_SC_MODE_CNTL__POLY_MODE__SHIFT 0x3
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE__SHIFT 0x5
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE__SHIFT 0x8
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE__SHIFT 0xb
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE__SHIFT 0xc
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE__SHIFT 0xd
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE__SHIFT 0x10
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST__SHIFT 0x13
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS__SHIFT 0x14
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA__SHIFT 0x15
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF__SHIFT 0x16
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION__SHIFT 0x17
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE__SHIFT 0x18
+#define PA_SU_SC_MODE_CNTL__CULL_FRONT_MASK 0x00000001L
+#define PA_SU_SC_MODE_CNTL__CULL_BACK_MASK 0x00000002L
+#define PA_SU_SC_MODE_CNTL__FACE_MASK 0x00000004L
+#define PA_SU_SC_MODE_CNTL__POLY_MODE_MASK 0x00000018L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_FRONT_PTYPE_MASK 0x000000E0L
+#define PA_SU_SC_MODE_CNTL__POLYMODE_BACK_PTYPE_MASK 0x00000700L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_FRONT_ENABLE_MASK 0x00000800L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_BACK_ENABLE_MASK 0x00001000L
+#define PA_SU_SC_MODE_CNTL__POLY_OFFSET_PARA_ENABLE_MASK 0x00002000L
+#define PA_SU_SC_MODE_CNTL__VTX_WINDOW_OFFSET_ENABLE_MASK 0x00010000L
+#define PA_SU_SC_MODE_CNTL__PROVOKING_VTX_LAST_MASK 0x00080000L
+#define PA_SU_SC_MODE_CNTL__PERSP_CORR_DIS_MASK 0x00100000L
+#define PA_SU_SC_MODE_CNTL__MULTI_PRIM_IB_ENA_MASK 0x00200000L
+#define PA_SU_SC_MODE_CNTL__RIGHT_TRIANGLE_ALTERNATE_GRADIENT_REF_MASK 0x00400000L
+#define PA_SU_SC_MODE_CNTL__NEW_QUAD_DECOMPOSITION_MASK 0x00800000L
+#define PA_SU_SC_MODE_CNTL__KEEP_TOGETHER_ENABLE_MASK 0x01000000L
+//PA_CL_VTE_CNTL
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA__SHIFT 0x0
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA__SHIFT 0x1
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA__SHIFT 0x2
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA__SHIFT 0x3
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA__SHIFT 0x4
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA__SHIFT 0x5
+#define PA_CL_VTE_CNTL__VTX_XY_FMT__SHIFT 0x8
+#define PA_CL_VTE_CNTL__VTX_Z_FMT__SHIFT 0x9
+#define PA_CL_VTE_CNTL__VTX_W0_FMT__SHIFT 0xa
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF__SHIFT 0xb
+#define PA_CL_VTE_CNTL__VPORT_X_SCALE_ENA_MASK 0x00000001L
+#define PA_CL_VTE_CNTL__VPORT_X_OFFSET_ENA_MASK 0x00000002L
+#define PA_CL_VTE_CNTL__VPORT_Y_SCALE_ENA_MASK 0x00000004L
+#define PA_CL_VTE_CNTL__VPORT_Y_OFFSET_ENA_MASK 0x00000008L
+#define PA_CL_VTE_CNTL__VPORT_Z_SCALE_ENA_MASK 0x00000010L
+#define PA_CL_VTE_CNTL__VPORT_Z_OFFSET_ENA_MASK 0x00000020L
+#define PA_CL_VTE_CNTL__VTX_XY_FMT_MASK 0x00000100L
+#define PA_CL_VTE_CNTL__VTX_Z_FMT_MASK 0x00000200L
+#define PA_CL_VTE_CNTL__VTX_W0_FMT_MASK 0x00000400L
+#define PA_CL_VTE_CNTL__PERFCOUNTER_REF_MASK 0x00000800L
+//PA_CL_VS_OUT_CNTL
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0__SHIFT 0x0
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1__SHIFT 0x1
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2__SHIFT 0x2
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3__SHIFT 0x3
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4__SHIFT 0x4
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5__SHIFT 0x5
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6__SHIFT 0x6
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7__SHIFT 0x7
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0__SHIFT 0x8
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1__SHIFT 0x9
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2__SHIFT 0xa
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3__SHIFT 0xb
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4__SHIFT 0xc
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5__SHIFT 0xd
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6__SHIFT 0xe
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7__SHIFT 0xf
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE__SHIFT 0x10
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG__SHIFT 0x11
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX__SHIFT 0x12
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX__SHIFT 0x13
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG__SHIFT 0x14
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA__SHIFT 0x15
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA__SHIFT 0x16
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA__SHIFT 0x17
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT__SHIFT 0x1f
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_1_MASK 0x00000002L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_2_MASK 0x00000004L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_3_MASK 0x00000008L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_4_MASK 0x00000010L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_5_MASK 0x00000020L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_6_MASK 0x00000040L
+#define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_7_MASK 0x00000080L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_0_MASK 0x00000100L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_1_MASK 0x00000200L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_2_MASK 0x00000400L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_3_MASK 0x00000800L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_4_MASK 0x00001000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_5_MASK 0x00002000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_6_MASK 0x00004000L
+#define PA_CL_VS_OUT_CNTL__CULL_DIST_ENA_7_MASK 0x00008000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_POINT_SIZE_MASK 0x00010000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_EDGE_FLAG_MASK 0x00020000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_RENDER_TARGET_INDX_MASK 0x00040000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VIEWPORT_INDX_MASK 0x00080000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_KILL_FLAG_MASK 0x00100000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_VEC_ENA_MASK 0x00200000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST0_VEC_ENA_MASK 0x00400000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_CCDIST1_VEC_ENA_MASK 0x00800000L
+#define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L
+#define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L
+#define PA_CL_VS_OUT_CNTL__USE_VTX_FSR_SELECT_MASK 0x80000000L
+//PA_CL_NANINF_CNTL
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD__SHIFT 0x0
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD__SHIFT 0x1
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD__SHIFT 0x2
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0__SHIFT 0x3
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN__SHIFT 0x4
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN__SHIFT 0x5
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN__SHIFT 0x6
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0__SHIFT 0x7
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF__SHIFT 0x8
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN__SHIFT 0x9
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF__SHIFT 0xa
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN__SHIFT 0xb
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF__SHIFT 0xc
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN__SHIFT 0xd
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD__SHIFT 0xe
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0__SHIFT 0x14
+#define PA_CL_NANINF_CNTL__VTE_XY_INF_DISCARD_MASK 0x00000001L
+#define PA_CL_NANINF_CNTL__VTE_Z_INF_DISCARD_MASK 0x00000002L
+#define PA_CL_NANINF_CNTL__VTE_W_INF_DISCARD_MASK 0x00000004L
+#define PA_CL_NANINF_CNTL__VTE_0XNANINF_IS_0_MASK 0x00000008L
+#define PA_CL_NANINF_CNTL__VTE_XY_NAN_RETAIN_MASK 0x00000010L
+#define PA_CL_NANINF_CNTL__VTE_Z_NAN_RETAIN_MASK 0x00000020L
+#define PA_CL_NANINF_CNTL__VTE_W_NAN_RETAIN_MASK 0x00000040L
+#define PA_CL_NANINF_CNTL__VTE_W_RECIP_NAN_IS_0_MASK 0x00000080L
+#define PA_CL_NANINF_CNTL__VS_XY_NAN_TO_INF_MASK 0x00000100L
+#define PA_CL_NANINF_CNTL__VS_XY_INF_RETAIN_MASK 0x00000200L
+#define PA_CL_NANINF_CNTL__VS_Z_NAN_TO_INF_MASK 0x00000400L
+#define PA_CL_NANINF_CNTL__VS_Z_INF_RETAIN_MASK 0x00000800L
+#define PA_CL_NANINF_CNTL__VS_W_NAN_TO_INF_MASK 0x00001000L
+#define PA_CL_NANINF_CNTL__VS_W_INF_RETAIN_MASK 0x00002000L
+#define PA_CL_NANINF_CNTL__VS_CLIP_DIST_INF_DISCARD_MASK 0x00004000L
+#define PA_CL_NANINF_CNTL__VTE_NO_OUTPUT_NEG_0_MASK 0x00100000L
+//PA_SU_LINE_STIPPLE_CNTL
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH__SHIFT 0x2
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM__SHIFT 0x3
+#define PA_SU_LINE_STIPPLE_CNTL__LINE_STIPPLE_RESET_MASK 0x00000003L
+#define PA_SU_LINE_STIPPLE_CNTL__EXPAND_FULL_LENGTH_MASK 0x00000004L
+#define PA_SU_LINE_STIPPLE_CNTL__FRACTIONAL_ACCUM_MASK 0x00000008L
+//PA_SU_LINE_STIPPLE_SCALE
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_SCALE__LINE_STIPPLE_SCALE_MASK 0xFFFFFFFFL
+//PA_SU_PRIM_FILTER_CNTL
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x0
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA__SHIFT 0x4
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA__SHIFT 0x5
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA__SHIFT 0x6
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA__SHIFT 0x7
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT__SHIFT 0x8
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION__SHIFT 0x1e
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION__SHIFT 0x1f
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000001L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_PRIM_FILTER_CNTL__TRIANGLE_EXPAND_ENA_MASK 0x00000010L
+#define PA_SU_PRIM_FILTER_CNTL__LINE_EXPAND_ENA_MASK 0x00000020L
+#define PA_SU_PRIM_FILTER_CNTL__POINT_EXPAND_ENA_MASK 0x00000040L
+#define PA_SU_PRIM_FILTER_CNTL__RECTANGLE_EXPAND_ENA_MASK 0x00000080L
+#define PA_SU_PRIM_FILTER_CNTL__PRIM_EXPAND_CONSTANT_MASK 0x0000FF00L
+#define PA_SU_PRIM_FILTER_CNTL__XMAX_RIGHT_EXCLUSION_MASK 0x40000000L
+#define PA_SU_PRIM_FILTER_CNTL__YMAX_BOTTOM_EXCLUSION_MASK 0x80000000L
+//PA_SU_SMALL_PRIM_FILTER_CNTL
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE__SHIFT 0x0
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE__SHIFT 0x1
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE__SHIFT 0x6
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
+#define PA_SU_SMALL_PRIM_FILTER_CNTL__SC_1XMSAA_COMPATIBLE_DISABLE_MASK 0x00000040L
+//PA_CL_NGG_CNTL
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF__SHIFT 0x0
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA__SHIFT 0x1
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH__SHIFT 0x2
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_OFF_MASK 0x00000001L
+#define PA_CL_NGG_CNTL__INDEX_BUF_EDGE_FLAG_ENA_MASK 0x00000002L
+#define PA_CL_NGG_CNTL__VERTEX_REUSE_DEPTH_MASK 0x000003FCL
+//PA_SU_OVER_RASTERIZATION_CNTL
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES__SHIFT 0x0
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES__SHIFT 0x1
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS__SHIFT 0x2
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES__SHIFT 0x3
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW__SHIFT 0x4
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_TRIANGLES_MASK 0x00000001L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_LINES_MASK 0x00000002L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_POINTS_MASK 0x00000004L
+#define PA_SU_OVER_RASTERIZATION_CNTL__DISCARD_0_AREA_RECTANGLES_MASK 0x00000008L
+#define PA_SU_OVER_RASTERIZATION_CNTL__USE_PROVOKING_ZW_MASK 0x00000010L
+//PA_STEREO_CNTL
+#define PA_STEREO_CNTL__STEREO_MODE__SHIFT 0x1
+#define PA_STEREO_CNTL__RT_SLICE_MODE__SHIFT 0x5
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET__SHIFT 0x8
+#define PA_STEREO_CNTL__VP_ID_MODE__SHIFT 0x10
+#define PA_STEREO_CNTL__VP_ID_OFFSET__SHIFT 0x13
+#define PA_STEREO_CNTL__FSR_MODE__SHIFT 0x18
+#define PA_STEREO_CNTL__FSR_OFFSET__SHIFT 0x1a
+#define PA_STEREO_CNTL__STEREO_MODE_MASK 0x0000001EL
+#define PA_STEREO_CNTL__RT_SLICE_MODE_MASK 0x000000E0L
+#define PA_STEREO_CNTL__RT_SLICE_OFFSET_MASK 0x00000F00L
+#define PA_STEREO_CNTL__VP_ID_MODE_MASK 0x00070000L
+#define PA_STEREO_CNTL__VP_ID_OFFSET_MASK 0x00780000L
+#define PA_STEREO_CNTL__FSR_MODE_MASK 0x03000000L
+#define PA_STEREO_CNTL__FSR_OFFSET_MASK 0x0C000000L
+//PA_STATE_STEREO_X
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0
+#define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL
+//PA_CL_VRS_CNTL
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe
+#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L
+#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L
+#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L
+#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L
+#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L
+#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L
+//PA_SU_POINT_SIZE
+#define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0
+#define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10
+#define PA_SU_POINT_SIZE__HEIGHT_MASK 0x0000FFFFL
+#define PA_SU_POINT_SIZE__WIDTH_MASK 0xFFFF0000L
+//PA_SU_POINT_MINMAX
+#define PA_SU_POINT_MINMAX__MIN_SIZE__SHIFT 0x0
+#define PA_SU_POINT_MINMAX__MAX_SIZE__SHIFT 0x10
+#define PA_SU_POINT_MINMAX__MIN_SIZE_MASK 0x0000FFFFL
+#define PA_SU_POINT_MINMAX__MAX_SIZE_MASK 0xFFFF0000L
+//PA_SU_LINE_CNTL
+#define PA_SU_LINE_CNTL__WIDTH__SHIFT 0x0
+#define PA_SU_LINE_CNTL__WIDTH_MASK 0x0000FFFFL
+//PA_SC_LINE_STIPPLE
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT__SHIFT 0x10
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER__SHIFT 0x1c
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL__SHIFT 0x1d
+#define PA_SC_LINE_STIPPLE__LINE_PATTERN_MASK 0x0000FFFFL
+#define PA_SC_LINE_STIPPLE__REPEAT_COUNT_MASK 0x00FF0000L
+#define PA_SC_LINE_STIPPLE__PATTERN_BIT_ORDER_MASK 0x10000000L
+#define PA_SC_LINE_STIPPLE__AUTO_RESET_CNTL_MASK 0x60000000L
+//VGT_HOS_MAX_TESS_LEVEL
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS__SHIFT 0x0
+#define VGT_HOS_MAX_TESS_LEVEL__MAX_TESS_MASK 0xFFFFFFFFL
+//VGT_HOS_MIN_TESS_LEVEL
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS__SHIFT 0x0
+#define VGT_HOS_MIN_TESS_LEVEL__MIN_TESS_MASK 0xFFFFFFFFL
+//PA_SC_MODE_CNTL_0
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE__SHIFT 0x1
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE__SHIFT 0x2
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR__SHIFT 0x3
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE__SHIFT 0x5
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB__SHIFT 0x6
+#define PA_SC_MODE_CNTL_0__MSAA_ENABLE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_0__VPORT_SCISSOR_ENABLE_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_0__LINE_STIPPLE_ENABLE_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_0__SEND_UNLIT_STILES_TO_PKR_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_0__ALTERNATE_RBS_PER_TILE_MASK 0x00000020L
+#define PA_SC_MODE_CNTL_0__COARSE_TILE_STARTS_ON_EVEN_RB_MASK 0x00000040L
+//PA_SC_MODE_CNTL_1
+#define PA_SC_MODE_CNTL_1__WALK_SIZE__SHIFT 0x0
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT__SHIFT 0x1
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST__SHIFT 0x2
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE__SHIFT 0x3
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE__SHIFT 0x4
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE__SHIFT 0x7
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE__SHIFT 0x8
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE__SHIFT 0x9
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR__SHIFT 0xa
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT__SHIFT 0xb
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET__SHIFT 0xc
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT__SHIFT 0xd
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z__SHIFT 0xe
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK__SHIFT 0xf
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE__SHIFT 0x10
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE__SHIFT 0x11
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE__SHIFT 0x12
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE__SHIFT 0x13
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE__SHIFT 0x14
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE__SHIFT 0x18
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE__SHIFT 0x19
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE__SHIFT 0x1a
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE__SHIFT 0x1b
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK__SHIFT 0x1c
+#define PA_SC_MODE_CNTL_1__WALK_SIZE_MASK 0x00000001L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGNMENT_MASK 0x00000002L
+#define PA_SC_MODE_CNTL_1__WALK_ALIGN8_PRIM_FITS_ST_MASK 0x00000004L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_ENABLE_MASK 0x00000008L
+#define PA_SC_MODE_CNTL_1__WALK_FENCE_SIZE_MASK 0x00000070L
+#define PA_SC_MODE_CNTL_1__SUPERTILE_WALK_ORDER_ENABLE_MASK 0x00000080L
+#define PA_SC_MODE_CNTL_1__TILE_WALK_ORDER_ENABLE_MASK 0x00000100L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_DISABLE_MASK 0x00000200L
+#define PA_SC_MODE_CNTL_1__TILE_COVER_NO_SCISSOR_MASK 0x00000400L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_EXTENT_MASK 0x00000800L
+#define PA_SC_MODE_CNTL_1__ZMM_LINE_OFFSET_MASK 0x00001000L
+#define PA_SC_MODE_CNTL_1__ZMM_RECT_EXTENT_MASK 0x00002000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_HI_Z_MASK 0x00004000L
+#define PA_SC_MODE_CNTL_1__KILL_PIX_POST_DETAIL_MASK_MASK 0x00008000L
+#define PA_SC_MODE_CNTL_1__PS_ITER_SAMPLE_MASK 0x00010000L
+#define PA_SC_MODE_CNTL_1__MULTI_SHADER_ENGINE_PRIM_DISCARD_ENABLE_MASK 0x00020000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_SUPERTILE_ENABLE_MASK 0x00040000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_ENABLE_MASK 0x00080000L
+#define PA_SC_MODE_CNTL_1__GPU_ID_OVERRIDE_MASK 0x00F00000L
+#define PA_SC_MODE_CNTL_1__MULTI_GPU_PRIM_DISCARD_ENABLE_MASK 0x01000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_CNTDWN_ENABLE_MASK 0x02000000L
+#define PA_SC_MODE_CNTL_1__FORCE_EOV_REZ_ENABLE_MASK 0x04000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_PRIMITIVE_ENABLE_MASK 0x08000000L
+#define PA_SC_MODE_CNTL_1__OUT_OF_ORDER_WATER_MARK_MASK 0x70000000L
+//VGT_ENHANCE
+#define VGT_ENHANCE__MISC__SHIFT 0x0
+#define VGT_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//IA_ENHANCE
+#define IA_ENHANCE__MISC__SHIFT 0x0
+#define IA_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_DMA_SIZE
+#define VGT_DMA_SIZE__NUM_INDICES__SHIFT 0x0
+#define VGT_DMA_SIZE__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_DMA_MAX_SIZE
+#define VGT_DMA_MAX_SIZE__MAX_SIZE__SHIFT 0x0
+#define VGT_DMA_MAX_SIZE__MAX_SIZE_MASK 0xFFFFFFFFL
+//VGT_DMA_INDEX_TYPE
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE__SHIFT 0x2
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE__SHIFT 0x4
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY__SHIFT 0x6
+#define VGT_DMA_INDEX_TYPE__ATC__SHIFT 0x8
+#define VGT_DMA_INDEX_TYPE__NOT_EOP__SHIFT 0x9
+#define VGT_DMA_INDEX_TYPE__REQ_PATH__SHIFT 0xa
+#define VGT_DMA_INDEX_TYPE__MTYPE__SHIFT 0xb
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_DMA_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_DMA_INDEX_TYPE__SWAP_MODE_MASK 0x0000000CL
+#define VGT_DMA_INDEX_TYPE__BUF_TYPE_MASK 0x00000030L
+#define VGT_DMA_INDEX_TYPE__RDREQ_POLICY_MASK 0x000000C0L
+#define VGT_DMA_INDEX_TYPE__ATC_MASK 0x00000100L
+#define VGT_DMA_INDEX_TYPE__NOT_EOP_MASK 0x00000200L
+#define VGT_DMA_INDEX_TYPE__REQ_PATH_MASK 0x00000400L
+#define VGT_DMA_INDEX_TYPE__MTYPE_MASK 0x00003800L
+#define VGT_DMA_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//WD_ENHANCE
+#define WD_ENHANCE__MISC__SHIFT 0x0
+#define WD_ENHANCE__MISC_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_EN
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN__SHIFT 0x0
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI__SHIFT 0x1
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE__SHIFT 0x2
+#define VGT_PRIMITIVEID_EN__PRIMITIVEID_EN_MASK 0x00000001L
+#define VGT_PRIMITIVEID_EN__DISABLE_RESET_ON_EOI_MASK 0x00000002L
+#define VGT_PRIMITIVEID_EN__NGG_DISABLE_PROVOK_REUSE_MASK 0x00000004L
+//VGT_DMA_NUM_INSTANCES
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_DMA_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_PRIMITIVEID_RESET
+#define VGT_PRIMITIVEID_RESET__VALUE__SHIFT 0x0
+#define VGT_PRIMITIVEID_RESET__VALUE_MASK 0xFFFFFFFFL
+//VGT_EVENT_INITIATOR
+#define VGT_EVENT_INITIATOR__EVENT_TYPE__SHIFT 0x0
+#define VGT_EVENT_INITIATOR__ADDRESS_HI__SHIFT 0xa
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT__SHIFT 0x1b
+#define VGT_EVENT_INITIATOR__EVENT_TYPE_MASK 0x0000003FL
+#define VGT_EVENT_INITIATOR__ADDRESS_HI_MASK 0x07FFFC00L
+#define VGT_EVENT_INITIATOR__EXTENDED_EVENT_MASK 0x08000000L
+//VGT_DRAW_PAYLOAD_CNTL
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX__SHIFT 0x1
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD__SHIFT 0x3
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP__SHIFT 0x4
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR__SHIFT 0x5
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE__SHIFT 0x6
+#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_PRIM_PAYLOAD_MASK 0x00000008L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_DRAW_VP_MASK 0x00000010L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_FSR_MASK 0x00000020L
+#define VGT_DRAW_PAYLOAD_CNTL__EN_VRS_RATE_MASK 0x00000040L
+//VGT_ESGS_RING_ITEMSIZE
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE__SHIFT 0x0
+#define VGT_ESGS_RING_ITEMSIZE__ITEMSIZE_MASK 0x00007FFFL
+//VGT_REUSE_OFF
+#define VGT_REUSE_OFF__REUSE_OFF__SHIFT 0x0
+#define VGT_REUSE_OFF__REUSE_OFF_MASK 0x00000001L
+//DB_HTILE_SURFACE
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1__SHIFT 0x0
+#define DB_HTILE_SURFACE__FULL_CACHE__SHIFT 0x1
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2__SHIFT 0x2
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3__SHIFT 0x3
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4__SHIFT 0x4
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5__SHIFT 0xa
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11
+#define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12
+#define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L
+#define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_3_MASK 0x00000008L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_4_MASK 0x000003F0L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_5_MASK 0x0000FC00L
+#define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L
+#define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L
+#define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L
+//DB_SRESULTS_COMPARE_STATE0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE0__COMPAREMASK0_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE0__ENABLE0_MASK 0x01000000L
+//DB_SRESULTS_COMPARE_STATE1
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1__SHIFT 0x0
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1__SHIFT 0x4
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1__SHIFT 0xc
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1__SHIFT 0x18
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREFUNC1_MASK 0x00000007L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREVALUE1_MASK 0x00000FF0L
+#define DB_SRESULTS_COMPARE_STATE1__COMPAREMASK1_MASK 0x000FF000L
+#define DB_SRESULTS_COMPARE_STATE1__ENABLE1_MASK 0x01000000L
+//DB_PRELOAD_CONTROL
+#define DB_PRELOAD_CONTROL__START_X__SHIFT 0x0
+#define DB_PRELOAD_CONTROL__START_Y__SHIFT 0x8
+#define DB_PRELOAD_CONTROL__MAX_X__SHIFT 0x10
+#define DB_PRELOAD_CONTROL__MAX_Y__SHIFT 0x18
+#define DB_PRELOAD_CONTROL__START_X_MASK 0x000000FFL
+#define DB_PRELOAD_CONTROL__START_Y_MASK 0x0000FF00L
+#define DB_PRELOAD_CONTROL__MAX_X_MASK 0x00FF0000L
+#define DB_PRELOAD_CONTROL__MAX_Y_MASK 0xFF000000L
+//VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE__SIZE_MASK 0xFFFFFFFFL
+//VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE__SHIFT 0x0
+#define VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE__VERTEX_STRIDE_MASK 0x000001FFL
+//VGT_GS_MAX_VERT_OUT
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT__SHIFT 0x0
+#define VGT_GS_MAX_VERT_OUT__MAX_VERT_OUT_MASK 0x000007FFL
+//GE_NGG_SUBGRP_CNTL
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR__SHIFT 0x0
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP__SHIFT 0x9
+#define GE_NGG_SUBGRP_CNTL__PRIM_AMP_FACTOR_MASK 0x000001FFL
+#define GE_NGG_SUBGRP_CNTL__THDS_PER_SUBGRP_MASK 0x0003FE00L
+//VGT_TESS_DISTRIBUTION
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE__SHIFT 0x0
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT__SHIFT 0x1d
+#define VGT_TESS_DISTRIBUTION__ACCUM_ISOLINE_MASK 0x000000FFL
+#define VGT_TESS_DISTRIBUTION__ACCUM_TRI_MASK 0x0000FF00L
+#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0x00FF0000L
+#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0x1F000000L
+#define VGT_TESS_DISTRIBUTION__TRAP_SPLIT_MASK 0xE0000000L
+//VGT_SHADER_STAGES_EN
+#define VGT_SHADER_STAGES_EN__LS_EN__SHIFT 0x0
+#define VGT_SHADER_STAGES_EN__HS_EN__SHIFT 0x2
+#define VGT_SHADER_STAGES_EN__ES_EN__SHIFT 0x3
+#define VGT_SHADER_STAGES_EN__GS_EN__SHIFT 0x5
+#define VGT_SHADER_STAGES_EN__VS_EN__SHIFT 0x6
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS__SHIFT 0x8
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN__SHIFT 0xc
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN__SHIFT 0xd
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE__SHIFT 0xe
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE__SHIFT 0xf
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH__SHIFT 0x13
+#define VGT_SHADER_STAGES_EN__HS_W32_EN__SHIFT 0x15
+#define VGT_SHADER_STAGES_EN__GS_W32_EN__SHIFT 0x16
+#define VGT_SHADER_STAGES_EN__VS_W32_EN__SHIFT 0x17
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN__SHIFT 0x18
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN__SHIFT 0x19
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG__SHIFT 0x1a
+#define VGT_SHADER_STAGES_EN__LS_EN_MASK 0x00000003L
+#define VGT_SHADER_STAGES_EN__HS_EN_MASK 0x00000004L
+#define VGT_SHADER_STAGES_EN__ES_EN_MASK 0x00000018L
+#define VGT_SHADER_STAGES_EN__GS_EN_MASK 0x00000020L
+#define VGT_SHADER_STAGES_EN__VS_EN_MASK 0x000000C0L
+#define VGT_SHADER_STAGES_EN__DYNAMIC_HS_MASK 0x00000100L
+#define VGT_SHADER_STAGES_EN__VS_WAVE_ID_EN_MASK 0x00001000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_EN_MASK 0x00002000L
+#define VGT_SHADER_STAGES_EN__ORDERED_ID_MODE_MASK 0x00004000L
+#define VGT_SHADER_STAGES_EN__MAX_PRIMGRP_IN_WAVE_MASK 0x00078000L
+#define VGT_SHADER_STAGES_EN__GS_FAST_LAUNCH_MASK 0x00180000L
+#define VGT_SHADER_STAGES_EN__HS_W32_EN_MASK 0x00200000L
+#define VGT_SHADER_STAGES_EN__GS_W32_EN_MASK 0x00400000L
+#define VGT_SHADER_STAGES_EN__VS_W32_EN_MASK 0x00800000L
+#define VGT_SHADER_STAGES_EN__NGG_WAVE_ID_EN_MASK 0x01000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_EN_MASK 0x02000000L
+#define VGT_SHADER_STAGES_EN__PRIMGEN_PASSTHRU_NO_MSG_MASK 0x04000000L
+//VGT_LS_HS_CONFIG
+#define VGT_LS_HS_CONFIG__NUM_PATCHES__SHIFT 0x0
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP__SHIFT 0x8
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP__SHIFT 0xe
+#define VGT_LS_HS_CONFIG__NUM_PATCHES_MASK 0x000000FFL
+#define VGT_LS_HS_CONFIG__HS_NUM_INPUT_CP_MASK 0x00003F00L
+#define VGT_LS_HS_CONFIG__HS_NUM_OUTPUT_CP_MASK 0x000FC000L
+//VGT_TF_PARAM
+#define VGT_TF_PARAM__TYPE__SHIFT 0x0
+#define VGT_TF_PARAM__PARTITIONING__SHIFT 0x2
+#define VGT_TF_PARAM__TOPOLOGY__SHIFT 0x5
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS__SHIFT 0x8
+#define VGT_TF_PARAM__NOT_USED__SHIFT 0x9
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD__SHIFT 0xa
+#define VGT_TF_PARAM__DISABLE_DONUTS__SHIFT 0xe
+#define VGT_TF_PARAM__RDREQ_POLICY__SHIFT 0xf
+#define VGT_TF_PARAM__DISTRIBUTION_MODE__SHIFT 0x11
+#define VGT_TF_PARAM__DETECT_ONE__SHIFT 0x13
+#define VGT_TF_PARAM__DETECT_ZERO__SHIFT 0x14
+#define VGT_TF_PARAM__MTYPE__SHIFT 0x17
+#define VGT_TF_PARAM__TYPE_MASK 0x00000003L
+#define VGT_TF_PARAM__PARTITIONING_MASK 0x0000001CL
+#define VGT_TF_PARAM__TOPOLOGY_MASK 0x000000E0L
+#define VGT_TF_PARAM__RESERVED_REDUC_AXIS_MASK 0x00000100L
+#define VGT_TF_PARAM__NOT_USED_MASK 0x00000200L
+#define VGT_TF_PARAM__NUM_DS_WAVES_PER_SIMD_MASK 0x00003C00L
+#define VGT_TF_PARAM__DISABLE_DONUTS_MASK 0x00004000L
+#define VGT_TF_PARAM__RDREQ_POLICY_MASK 0x00018000L
+#define VGT_TF_PARAM__DISTRIBUTION_MODE_MASK 0x00060000L
+#define VGT_TF_PARAM__DETECT_ONE_MASK 0x00080000L
+#define VGT_TF_PARAM__DETECT_ZERO_MASK 0x00100000L
+#define VGT_TF_PARAM__MTYPE_MASK 0x03800000L
+//DB_ALPHA_TO_MASK
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE__SHIFT 0x0
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0__SHIFT 0x8
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1__SHIFT 0xa
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2__SHIFT 0xc
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3__SHIFT 0xe
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND__SHIFT 0x10
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_ENABLE_MASK 0x00000001L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET0_MASK 0x00000300L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET1_MASK 0x00000C00L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET2_MASK 0x00003000L
+#define DB_ALPHA_TO_MASK__ALPHA_TO_MASK_OFFSET3_MASK 0x0000C000L
+#define DB_ALPHA_TO_MASK__OFFSET_ROUND_MASK 0x00010000L
+//PA_SU_POLY_OFFSET_DB_FMT_CNTL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT__SHIFT 0x8
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_NEG_NUM_DB_BITS_MASK 0x000000FFL
+#define PA_SU_POLY_OFFSET_DB_FMT_CNTL__POLY_OFFSET_DB_IS_FLOAT_FMT_MASK 0x00000100L
+//PA_SU_POLY_OFFSET_CLAMP
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_CLAMP__CLAMP_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_SCALE
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_FRONT_OFFSET
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_FRONT_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_SCALE
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_SCALE__SCALE_MASK 0xFFFFFFFFL
+//PA_SU_POLY_OFFSET_BACK_OFFSET
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET__SHIFT 0x0
+#define PA_SU_POLY_OFFSET_BACK_OFFSET__OFFSET_MASK 0xFFFFFFFFL
+//VGT_GS_INSTANCE_CNT
+#define VGT_GS_INSTANCE_CNT__ENABLE__SHIFT 0x0
+#define VGT_GS_INSTANCE_CNT__CNT__SHIFT 0x2
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE__SHIFT 0x1f
+#define VGT_GS_INSTANCE_CNT__ENABLE_MASK 0x00000001L
+#define VGT_GS_INSTANCE_CNT__CNT_MASK 0x000001FCL
+#define VGT_GS_INSTANCE_CNT__EN_MAX_VERT_OUT_PER_GS_INSTANCE_MASK 0x80000000L
+//PA_SC_CENTROID_PRIORITY_0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_0_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_1_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_2_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_3_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_4_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_5_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_6_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_0__DISTANCE_7_MASK 0xF0000000L
+//PA_SC_CENTROID_PRIORITY_1
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8__SHIFT 0x0
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9__SHIFT 0x4
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10__SHIFT 0x8
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11__SHIFT 0xc
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12__SHIFT 0x10
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13__SHIFT 0x14
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14__SHIFT 0x18
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15__SHIFT 0x1c
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_8_MASK 0x0000000FL
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_9_MASK 0x000000F0L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_10_MASK 0x00000F00L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_11_MASK 0x0000F000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_12_MASK 0x000F0000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_13_MASK 0x00F00000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_14_MASK 0x0F000000L
+#define PA_SC_CENTROID_PRIORITY_1__DISTANCE_15_MASK 0xF0000000L
+//PA_SC_LINE_CNTL
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH__SHIFT 0x9
+#define PA_SC_LINE_CNTL__LAST_PIXEL__SHIFT 0xa
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA__SHIFT 0xb
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA__SHIFT 0xc
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION__SHIFT 0xd
+#define PA_SC_LINE_CNTL__EXPAND_LINE_WIDTH_MASK 0x00000200L
+#define PA_SC_LINE_CNTL__LAST_PIXEL_MASK 0x00000400L
+#define PA_SC_LINE_CNTL__PERPENDICULAR_ENDCAP_ENA_MASK 0x00000800L
+#define PA_SC_LINE_CNTL__DX10_DIAMOND_TEST_ENA_MASK 0x00001000L
+#define PA_SC_LINE_CNTL__EXTRA_DX_DY_PRECISION_MASK 0x00002000L
+//PA_SC_AA_CONFIG
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES__SHIFT 0x0
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN__SHIFT 0x4
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST__SHIFT 0xd
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES__SHIFT 0x14
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE__SHIFT 0x18
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT__SHIFT 0x1a
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING__SHIFT 0x1c
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER__SHIFT 0x1d
+#define PA_SC_AA_CONFIG__MSAA_NUM_SAMPLES_MASK 0x00000007L
+#define PA_SC_AA_CONFIG__AA_MASK_CENTROID_DTMN_MASK 0x00000010L
+#define PA_SC_AA_CONFIG__MAX_SAMPLE_DIST_MASK 0x0001E000L
+#define PA_SC_AA_CONFIG__MSAA_EXPOSED_SAMPLES_MASK 0x00700000L
+#define PA_SC_AA_CONFIG__DETAIL_TO_EXPOSED_MODE_MASK 0x03000000L
+#define PA_SC_AA_CONFIG__COVERAGE_TO_SHADER_SELECT_MASK 0x0C000000L
+#define PA_SC_AA_CONFIG__SAMPLE_COVERAGE_ENCODING_MASK 0x10000000L
+#define PA_SC_AA_CONFIG__COVERED_CENTROID_IS_CENTER_MASK 0x20000000L
+//PA_SU_VTX_CNTL
+#define PA_SU_VTX_CNTL__PIX_CENTER__SHIFT 0x0
+#define PA_SU_VTX_CNTL__ROUND_MODE__SHIFT 0x1
+#define PA_SU_VTX_CNTL__QUANT_MODE__SHIFT 0x3
+#define PA_SU_VTX_CNTL__PIX_CENTER_MASK 0x00000001L
+#define PA_SU_VTX_CNTL__ROUND_MODE_MASK 0x00000006L
+#define PA_SU_VTX_CNTL__QUANT_MODE_MASK 0x00000038L
+//PA_CL_GB_VERT_CLIP_ADJ
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_VERT_DISC_ADJ
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_VERT_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_CLIP_ADJ
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_CLIP_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_CL_GB_HORZ_DISC_ADJ
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER__SHIFT 0x0
+#define PA_CL_GB_HORZ_DISC_ADJ__DATA_REGISTER_MASK 0xFFFFFFFFL
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S0_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S1_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S2_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0__S3_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S4_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S5_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S6_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1__S7_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S8_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S9_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S10_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2__S11_Y_MASK 0xF0000000L
+//PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X__SHIFT 0x0
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y__SHIFT 0x4
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X__SHIFT 0x8
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y__SHIFT 0xc
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X__SHIFT 0x10
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y__SHIFT 0x14
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X__SHIFT 0x18
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y__SHIFT 0x1c
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_X_MASK 0x0000000FL
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S12_Y_MASK 0x000000F0L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_X_MASK 0x00000F00L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S13_Y_MASK 0x0000F000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_X_MASK 0x000F0000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S14_Y_MASK 0x00F00000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_X_MASK 0x0F000000L
+#define PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3__S15_Y_MASK 0xF0000000L
+//PA_SC_AA_MASK_X0Y0_X1Y0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X0Y0_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y0_X1Y0__AA_MASK_X1Y0_MASK 0xFFFF0000L
+//PA_SC_AA_MASK_X0Y1_X1Y1
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1__SHIFT 0x0
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1__SHIFT 0x10
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X0Y1_MASK 0x0000FFFFL
+#define PA_SC_AA_MASK_X0Y1_X1Y1__AA_MASK_X1Y1_MASK 0xFFFF0000L
+//PA_SC_SHADER_CONTROL
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES__SHIFT 0x0
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID__SHIFT 0x2
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION__SHIFT 0x3
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE__SHIFT 0x5
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x7
+#define PA_SC_SHADER_CONTROL__REALIGN_DQUADS_AFTER_N_WAVES_MASK 0x00000003L
+#define PA_SC_SHADER_CONTROL__LOAD_COLLISION_WAVEID_MASK 0x00000004L
+#define PA_SC_SHADER_CONTROL__LOAD_INTRAWAVE_COLLISION_MASK 0x00000008L
+#define PA_SC_SHADER_CONTROL__WAVE_BREAK_REGION_SIZE_MASK 0x00000060L
+#define PA_SC_SHADER_CONTROL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x00000080L
+//PA_SC_BINNER_CNTL_0
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM__SHIFT 0x12
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE__SHIFT 0x1d
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE__SHIFT 0x1f
+#define PA_SC_BINNER_CNTL_0__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_X_EXTEND_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_0__BIN_SIZE_Y_EXTEND_MASK 0x00000380L
+#define PA_SC_BINNER_CNTL_0__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_0__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_0__DISABLE_START_OF_PRIM_MASK 0x00040000L
+#define PA_SC_BINNER_CNTL_0__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_0__OPTIMAL_BIN_SELECTION_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_0__FLUSH_ON_BINNING_TRANSITION_MASK 0x10000000L
+#define PA_SC_BINNER_CNTL_0__BIN_MAPPING_MODE_MASK 0x60000000L
+#define PA_SC_BINNER_CNTL_0__FSR_EXPANSION_ENABLE_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_1
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH__SHIFT 0x10
+#define PA_SC_BINNER_CNTL_1__MAX_ALLOC_COUNT_MASK 0x0000FFFFL
+#define PA_SC_BINNER_CNTL_1__MAX_PRIM_PER_BATCH_MASK 0xFFFF0000L
+//PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE__SHIFT 0x0
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT__SHIFT 0x1
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE__SHIFT 0x5
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT__SHIFT 0x6
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE__SHIFT 0xa
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT__SHIFT 0xb
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET__SHIFT 0xc
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL__SHIFT 0xd
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL__SHIFT 0xe
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE__SHIFT 0xf
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE__SHIFT 0x10
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x12
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE__SHIFT 0x13
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE__SHIFT 0x14
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE__SHIFT 0x15
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE__SHIFT 0x16
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE__SHIFT 0x17
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE__SHIFT 0x18
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT__SHIFT 0x19
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT__SHIFT 0x1b
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_ENABLE_MASK 0x00000001L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVER_RAST_SAMPLE_SELECT_MASK 0x0000001EL
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_ENABLE_MASK 0x00000020L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNDER_RAST_SAMPLE_SELECT_MASK 0x000003C0L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PBB_UNCERTAINTY_REGION_ENABLE_MASK 0x00000400L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_EXTENT_MASK 0x00000800L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__ZMM_TRI_OFFSET_MASK 0x00001000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_OVER_RAST_INNER_TO_NORMAL_MASK 0x00002000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OVERRIDE_UNDER_RAST_INNER_TO_NORMAL_MASK 0x00004000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__DEGENERATE_OVERRIDE_INNER_TO_NORMAL_DISABLE_MASK 0x00008000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MODE_MASK 0x00030000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__OUTER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00040000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__INNER_UNCERTAINTY_EDGERULE_OVERRIDE_MASK 0x00080000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__NULL_SQUAD_AA_MASK_ENABLE_MASK 0x00100000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__COVERAGE_AA_MASK_ENABLE_MASK 0x00200000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__PREZ_AA_MASK_ENABLE_MASK 0x00400000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__POSTZ_AA_MASK_ENABLE_MASK 0x00800000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__CENTROID_SAMPLE_OVERRIDE_MASK 0x01000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_MULT_MASK 0x06000000L
+#define PA_SC_CONSERVATIVE_RASTERIZATION_CNTL__UNCERTAINTY_REGION_PBB_MULT_MASK 0x18000000L
+//PA_SC_NGG_MODE_CNTL
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE__SHIFT 0x0
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT__SHIFT 0xc
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC__SHIFT 0xd
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0xe
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE__SHIFT 0x10
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x18
+#define PA_SC_NGG_MODE_CNTL__MAX_DEALLOCS_IN_WAVE_MASK 0x000007FFL
+#define PA_SC_NGG_MODE_CNTL__DISABLE_FPOG_AND_DEALLOC_CONFLICT_MASK 0x00001000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_DEALLOC_MASK 0x00002000L
+#define PA_SC_NGG_MODE_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00004000L
+#define PA_SC_NGG_MODE_CNTL__MAX_FPOVS_IN_WAVE_MASK 0x00FF0000L
+#define PA_SC_NGG_MODE_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0xFF000000L
+//PA_SC_BINNER_CNTL_2
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X__SHIFT 0x1
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0x2
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW__SHIFT 0x3
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT__SHIFT 0x4
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN__SHIFT 0x7
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED__SHIFT 0xb
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED__SHIFT 0xc
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION__SHIFT 0x15
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_X_MULT_BY_1P5X_MASK 0x00000001L
+#define PA_SC_BINNER_CNTL_2__BIN_SIZE_Y_MULT_BY_1P5X_MASK 0x00000002L
+#define PA_SC_BINNER_CNTL_2__ENABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00000004L
+#define PA_SC_BINNER_CNTL_2__DUAL_LIGHT_SHAFT_IN_DRAW_MASK 0x00000008L
+#define PA_SC_BINNER_CNTL_2__LIGHT_SHAFT_DRAW_CALL_LIMIT_MASK 0x00000070L
+#define PA_SC_BINNER_CNTL_2__CONTEXT_DONE_EVENTS_PER_BIN_MASK 0x00000780L
+#define PA_SC_BINNER_CNTL_2__ZPP_ENABLED_MASK 0x00000800L
+#define PA_SC_BINNER_CNTL_2__ZPP_OPTIMIZATION_ENABLED_MASK 0x00001000L
+#define PA_SC_BINNER_CNTL_2__ZPP_AREA_THRESHOLD_MASK 0x001FE000L
+#define PA_SC_BINNER_CNTL_2__DISABLE_NOPCEXPORT_BREAKBATCH_CONDITION_MASK 0x00200000L
+//CB_COLOR0_BASE
+#define CB_COLOR0_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_VIEW
+#define CB_COLOR0_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR0_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR0_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR0_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR0_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR0_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR0_INFO
+#define CB_COLOR0_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR0_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR0_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR0_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR0_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR0_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR0_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR0_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR0_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR0_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR0_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR0_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR0_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR0_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR0_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR0_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR0_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR0_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR0_ATTRIB
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR0_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR0_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR0_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR0_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR0_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR0_FDCC_CONTROL
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR0_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR0_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR0_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR0_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR0_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR0_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR0_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR0_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR0_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR0_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR0_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR0_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR0_DCC_BASE
+#define CB_COLOR0_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_BASE
+#define CB_COLOR1_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR1_VIEW
+#define CB_COLOR1_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR1_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR1_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR1_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR1_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR1_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR1_INFO
+#define CB_COLOR1_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR1_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR1_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR1_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR1_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR1_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR1_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR1_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR1_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR1_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR1_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR1_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR1_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR1_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR1_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR1_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR1_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR1_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR1_ATTRIB
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR1_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR1_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR1_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR1_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR1_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR1_FDCC_CONTROL
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR1_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR1_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR1_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR1_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR1_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR1_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR1_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR1_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR1_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR1_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR1_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR1_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR1_DCC_BASE
+#define CB_COLOR1_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_BASE
+#define CB_COLOR2_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR2_VIEW
+#define CB_COLOR2_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR2_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR2_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR2_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR2_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR2_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR2_INFO
+#define CB_COLOR2_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR2_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR2_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR2_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR2_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR2_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR2_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR2_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR2_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR2_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR2_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR2_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR2_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR2_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR2_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR2_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR2_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR2_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR2_ATTRIB
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR2_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR2_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR2_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR2_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR2_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR2_FDCC_CONTROL
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR2_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR2_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR2_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR2_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR2_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR2_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR2_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR2_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR2_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR2_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR2_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR2_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR2_DCC_BASE
+#define CB_COLOR2_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_BASE
+#define CB_COLOR3_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR3_VIEW
+#define CB_COLOR3_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR3_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR3_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR3_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR3_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR3_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR3_INFO
+#define CB_COLOR3_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR3_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR3_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR3_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR3_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR3_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR3_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR3_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR3_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR3_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR3_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR3_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR3_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR3_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR3_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR3_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR3_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR3_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR3_ATTRIB
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR3_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR3_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR3_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR3_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR3_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR3_FDCC_CONTROL
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR3_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR3_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR3_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR3_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR3_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR3_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR3_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR3_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR3_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR3_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR3_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR3_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR3_DCC_BASE
+#define CB_COLOR3_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_BASE
+#define CB_COLOR4_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR4_VIEW
+#define CB_COLOR4_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR4_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR4_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR4_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR4_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR4_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR4_INFO
+#define CB_COLOR4_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR4_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR4_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR4_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR4_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR4_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR4_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR4_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR4_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR4_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR4_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR4_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR4_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR4_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR4_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR4_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR4_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR4_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR4_ATTRIB
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR4_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR4_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR4_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR4_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR4_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR4_FDCC_CONTROL
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR4_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR4_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR4_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR4_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR4_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR4_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR4_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR4_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR4_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR4_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR4_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR4_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR4_DCC_BASE
+#define CB_COLOR4_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_BASE
+#define CB_COLOR5_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR5_VIEW
+#define CB_COLOR5_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR5_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR5_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR5_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR5_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR5_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR5_INFO
+#define CB_COLOR5_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR5_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR5_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR5_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR5_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR5_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR5_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR5_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR5_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR5_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR5_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR5_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR5_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR5_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR5_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR5_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR5_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR5_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR5_ATTRIB
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR5_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR5_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR5_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR5_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR5_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR5_FDCC_CONTROL
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR5_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR5_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR5_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR5_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR5_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR5_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR5_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR5_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR5_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR5_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR5_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR5_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR5_DCC_BASE
+#define CB_COLOR5_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_BASE
+#define CB_COLOR6_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR6_VIEW
+#define CB_COLOR6_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR6_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR6_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR6_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR6_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR6_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR6_INFO
+#define CB_COLOR6_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR6_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR6_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR6_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR6_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR6_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR6_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR6_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR6_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR6_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR6_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR6_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR6_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR6_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR6_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR6_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR6_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR6_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR6_ATTRIB
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR6_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR6_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR6_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR6_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR6_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR6_FDCC_CONTROL
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR6_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR6_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR6_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR6_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR6_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR6_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR6_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR6_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR6_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR6_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR6_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR6_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR6_DCC_BASE
+#define CB_COLOR6_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_BASE
+#define CB_COLOR7_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR7_VIEW
+#define CB_COLOR7_VIEW__SLICE_START__SHIFT 0x0
+#define CB_COLOR7_VIEW__SLICE_MAX__SHIFT 0xd
+#define CB_COLOR7_VIEW__MIP_LEVEL__SHIFT 0x1a
+#define CB_COLOR7_VIEW__SLICE_START_MASK 0x00001FFFL
+#define CB_COLOR7_VIEW__SLICE_MAX_MASK 0x03FFE000L
+#define CB_COLOR7_VIEW__MIP_LEVEL_MASK 0x3C000000L
+//CB_COLOR7_INFO
+#define CB_COLOR7_INFO__FORMAT__SHIFT 0x0
+#define CB_COLOR7_INFO__LINEAR_GENERAL__SHIFT 0x7
+#define CB_COLOR7_INFO__NUMBER_TYPE__SHIFT 0x8
+#define CB_COLOR7_INFO__COMP_SWAP__SHIFT 0xb
+#define CB_COLOR7_INFO__BLEND_CLAMP__SHIFT 0xf
+#define CB_COLOR7_INFO__BLEND_BYPASS__SHIFT 0x10
+#define CB_COLOR7_INFO__SIMPLE_FLOAT__SHIFT 0x11
+#define CB_COLOR7_INFO__ROUND_MODE__SHIFT 0x12
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST__SHIFT 0x14
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL__SHIFT 0x17
+#define CB_COLOR7_INFO__FORMAT_MASK 0x0000001FL
+#define CB_COLOR7_INFO__LINEAR_GENERAL_MASK 0x00000080L
+#define CB_COLOR7_INFO__NUMBER_TYPE_MASK 0x00000700L
+#define CB_COLOR7_INFO__COMP_SWAP_MASK 0x00001800L
+#define CB_COLOR7_INFO__BLEND_CLAMP_MASK 0x00008000L
+#define CB_COLOR7_INFO__BLEND_BYPASS_MASK 0x00010000L
+#define CB_COLOR7_INFO__SIMPLE_FLOAT_MASK 0x00020000L
+#define CB_COLOR7_INFO__ROUND_MODE_MASK 0x00040000L
+#define CB_COLOR7_INFO__BLEND_OPT_DONT_RD_DST_MASK 0x00700000L
+#define CB_COLOR7_INFO__BLEND_OPT_DISCARD_PIXEL_MASK 0x03800000L
+//CB_COLOR7_ATTRIB
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS__SHIFT 0x0
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1__SHIFT 0x2
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT__SHIFT 0x3
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX__SHIFT 0x4
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX__SHIFT 0x5
+#define CB_COLOR7_ATTRIB__NUM_FRAGMENTS_MASK 0x00000003L
+#define CB_COLOR7_ATTRIB__FORCE_DST_ALPHA_1_MASK 0x00000004L
+#define CB_COLOR7_ATTRIB__DISABLE_FMASK_NOALLOC_OPT_MASK 0x00000008L
+#define CB_COLOR7_ATTRIB__LIMIT_COLOR_FETCH_TO_256B_MAX_MASK 0x00000010L
+#define CB_COLOR7_ATTRIB__FORCE_LIMIT_COLOR_SECTOR_TO_256B_MAX_MASK 0x00000020L
+//CB_COLOR7_FDCC_CONTROL
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE__SHIFT 0x0
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE__SHIFT 0x1
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE__SHIFT 0x2
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE__SHIFT 0x4
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE__SHIFT 0x5
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM__SHIFT 0x7
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS__SHIFT 0x9
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS__SHIFT 0xa
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG__SHIFT 0x12
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE__SHIFT 0x13
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO__SHIFT 0x15
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE__SHIFT 0x16
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE__SHIFT 0x17
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE__SHIFT 0x18
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_DISABLE_MASK 0x00000001L
+#define CB_COLOR7_FDCC_CONTROL__SAMPLE_MASK_TRACKER_FEA_FORCE_MASK 0x00000002L
+#define CB_COLOR7_FDCC_CONTROL__MAX_UNCOMPRESSED_BLOCK_SIZE_MASK 0x0000000CL
+#define CB_COLOR7_FDCC_CONTROL__MIN_COMPRESSED_BLOCK_SIZE_MASK 0x00000010L
+#define CB_COLOR7_FDCC_CONTROL__MAX_COMPRESSED_BLOCK_SIZE_MASK 0x00000060L
+#define CB_COLOR7_FDCC_CONTROL__COLOR_TRANSFORM_MASK 0x00000180L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_64B_BLOCKS_MASK 0x00000200L
+#define CB_COLOR7_FDCC_CONTROL__INDEPENDENT_128B_BLOCKS_MASK 0x00000400L
+#define CB_COLOR7_FDCC_CONTROL__DISABLE_CONSTANT_ENCODE_REG_MASK 0x00040000L
+#define CB_COLOR7_FDCC_CONTROL__ENABLE_CONSTANT_ENCODE_REG_WRITE_MASK 0x00080000L
+#define CB_COLOR7_FDCC_CONTROL__SKIP_LOW_COMP_RATIO_MASK 0x00200000L
+#define CB_COLOR7_FDCC_CONTROL__FDCC_ENABLE_MASK 0x00400000L
+#define CB_COLOR7_FDCC_CONTROL__DCC_COMPRESS_DISABLE_MASK 0x00800000L
+#define CB_COLOR7_FDCC_CONTROL__FRAGMENT_COMPRESS_DISABLE_MASK 0x01000000L
+//CB_COLOR7_DCC_BASE
+#define CB_COLOR7_DCC_BASE__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE__BASE_256B_MASK 0xFFFFFFFFL
+//CB_COLOR0_BASE_EXT
+#define CB_COLOR0_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_BASE_EXT
+#define CB_COLOR1_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_BASE_EXT
+#define CB_COLOR2_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_BASE_EXT
+#define CB_COLOR3_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_BASE_EXT
+#define CB_COLOR4_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_BASE_EXT
+#define CB_COLOR5_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_BASE_EXT
+#define CB_COLOR6_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_BASE_EXT
+#define CB_COLOR7_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_DCC_BASE_EXT
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR0_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR1_DCC_BASE_EXT
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR1_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR2_DCC_BASE_EXT
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR2_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR3_DCC_BASE_EXT
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR3_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR4_DCC_BASE_EXT
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR4_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR5_DCC_BASE_EXT
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR5_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR6_DCC_BASE_EXT
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR6_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR7_DCC_BASE_EXT
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B__SHIFT 0x0
+#define CB_COLOR7_DCC_BASE_EXT__BASE_256B_MASK 0x000000FFL
+//CB_COLOR0_ATTRIB2
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR0_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR0_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR0_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR0_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR1_ATTRIB2
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR1_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR1_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR1_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR1_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR2_ATTRIB2
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR2_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR2_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR2_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR2_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR3_ATTRIB2
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR3_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR3_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR3_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR3_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR4_ATTRIB2
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR4_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR4_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR4_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR4_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR5_ATTRIB2
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR5_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR5_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR5_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR5_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR6_ATTRIB2
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR6_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR6_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR6_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR6_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR7_ATTRIB2
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT__SHIFT 0x0
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH__SHIFT 0xe
+#define CB_COLOR7_ATTRIB2__MAX_MIP__SHIFT 0x1c
+#define CB_COLOR7_ATTRIB2__MIP0_HEIGHT_MASK 0x00003FFFL
+#define CB_COLOR7_ATTRIB2__MIP0_WIDTH_MASK 0x0FFFC000L
+#define CB_COLOR7_ATTRIB2__MAX_MIP_MASK 0xF0000000L
+//CB_COLOR0_ATTRIB3
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR0_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR0_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR1_ATTRIB3
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR1_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR2_ATTRIB3
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR2_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR3_ATTRIB3
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR3_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR4_ATTRIB3
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR4_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR5_ATTRIB3
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR5_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR6_ATTRIB3
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR6_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+//CB_COLOR7_ATTRIB3
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0
+#define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE__SHIFT 0xe
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE__SHIFT 0x18
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e
+#define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL
+#define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L
+#define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L
+#define CB_COLOR7_ATTRIB3__RESOURCE_TYPE_MASK 0x03000000L
+#define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L
+
+
+// addressBlock: gc_pfvf_cpdec
+//CONFIG_RESERVED_REG0
+#define CONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//CONFIG_RESERVED_REG1
+#define CONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define CONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_CNTL
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET__SHIFT 0x14
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET__SHIFT 0x15
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET__SHIFT 0x16
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET__SHIFT 0x17
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x1b
+#define CP_MEC_CNTL__MEC_ME2_HALT__SHIFT 0x1c
+#define CP_MEC_CNTL__MEC_ME2_STEP__SHIFT 0x1d
+#define CP_MEC_CNTL__MEC_ME1_HALT__SHIFT 0x1e
+#define CP_MEC_CNTL__MEC_ME1_STEP__SHIFT 0x1f
+#define CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK 0x00100000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK 0x00200000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE2_RESET_MASK 0x00400000L
+#define CP_MEC_CNTL__MEC_ME2_PIPE3_RESET_MASK 0x00800000L
+#define CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x08000000L
+#define CP_MEC_CNTL__MEC_ME2_HALT_MASK 0x10000000L
+#define CP_MEC_CNTL__MEC_ME2_STEP_MASK 0x20000000L
+#define CP_MEC_CNTL__MEC_ME1_HALT_MASK 0x40000000L
+#define CP_MEC_CNTL__MEC_ME1_STEP_MASK 0x80000000L
+//CP_ME_CNTL
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE__SHIFT 0x6
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE__SHIFT 0x8
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE__SHIFT 0xc
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE__SHIFT 0xd
+#define CP_ME_CNTL__ME_PIPE0_DISABLE__SHIFT 0xe
+#define CP_ME_CNTL__ME_PIPE1_DISABLE__SHIFT 0xf
+#define CP_ME_CNTL__CE_PIPE0_RESET__SHIFT 0x10
+#define CP_ME_CNTL__CE_PIPE1_RESET__SHIFT 0x11
+#define CP_ME_CNTL__PFP_PIPE0_RESET__SHIFT 0x12
+#define CP_ME_CNTL__PFP_PIPE1_RESET__SHIFT 0x13
+#define CP_ME_CNTL__ME_PIPE0_RESET__SHIFT 0x14
+#define CP_ME_CNTL__ME_PIPE1_RESET__SHIFT 0x15
+#define CP_ME_CNTL__CE_HALT__SHIFT 0x18
+#define CP_ME_CNTL__CE_STEP__SHIFT 0x19
+#define CP_ME_CNTL__PFP_HALT__SHIFT 0x1a
+#define CP_ME_CNTL__PFP_STEP__SHIFT 0x1b
+#define CP_ME_CNTL__ME_HALT__SHIFT 0x1c
+#define CP_ME_CNTL__ME_STEP__SHIFT 0x1d
+#define CP_ME_CNTL__CE_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_ME_CNTL__PFP_INVALIDATE_ICACHE_MASK 0x00000040L
+#define CP_ME_CNTL__ME_INVALIDATE_ICACHE_MASK 0x00000100L
+#define CP_ME_CNTL__PFP_PIPE0_DISABLE_MASK 0x00001000L
+#define CP_ME_CNTL__PFP_PIPE1_DISABLE_MASK 0x00002000L
+#define CP_ME_CNTL__ME_PIPE0_DISABLE_MASK 0x00004000L
+#define CP_ME_CNTL__ME_PIPE1_DISABLE_MASK 0x00008000L
+#define CP_ME_CNTL__CE_PIPE0_RESET_MASK 0x00010000L
+#define CP_ME_CNTL__CE_PIPE1_RESET_MASK 0x00020000L
+#define CP_ME_CNTL__PFP_PIPE0_RESET_MASK 0x00040000L
+#define CP_ME_CNTL__PFP_PIPE1_RESET_MASK 0x00080000L
+#define CP_ME_CNTL__ME_PIPE0_RESET_MASK 0x00100000L
+#define CP_ME_CNTL__ME_PIPE1_RESET_MASK 0x00200000L
+#define CP_ME_CNTL__CE_HALT_MASK 0x01000000L
+#define CP_ME_CNTL__CE_STEP_MASK 0x02000000L
+#define CP_ME_CNTL__PFP_HALT_MASK 0x04000000L
+#define CP_ME_CNTL__PFP_STEP_MASK 0x08000000L
+#define CP_ME_CNTL__ME_HALT_MASK 0x10000000L
+#define CP_ME_CNTL__ME_STEP_MASK 0x20000000L
+
+
+// addressBlock: gc_pfvf_grbmdec
+//GRBM_GFX_CNTL
+#define GRBM_GFX_CNTL__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL__CTXID__SHIFT 0xb
+#define GRBM_GFX_CNTL__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL__QUEUEID_MASK 0x00000700L
+#define GRBM_GFX_CNTL__CTXID_MASK 0x00003800L
+//GRBM_NOWHERE
+#define GRBM_NOWHERE__DATA__SHIFT 0x0
+#define GRBM_NOWHERE__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfvf_padec
+//PA_SC_VRS_SURFACE_CNTL
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE__SHIFT 0xd
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE__SHIFT 0xe
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE__SHIFT 0x10
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH__SHIFT 0x11
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE__SHIFT 0x12
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL__VRC_CONTEXT_DONE_SYNC_DISABLE_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL__VRS_FEEDBACK_RATE_OVERRIDE_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_EVENT_MASK_DISABLE_MASK 0x00001F00L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PREFETCH_DISABLE_MASK 0x00002000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_FLUSH_NO_INV_DISABLE_MASK 0x00004000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_NONSTALLING_FLUSH_DISABLE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_PARTIAL_FLUSH_DISABLE_MASK 0x00010000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_AUTO_FLUSH_MASK 0x00020000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EOP_SYNC_DISABLE_MASK 0x00040000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_MAX_TAGS_MASK 0x03F80000L
+#define PA_SC_VRS_SURFACE_CNTL__VRC_EVICT_POINT_MASK 0xFC000000L
+//PA_SC_ENHANCE
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER__SHIFT 0x0
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX__SHIFT 0x1
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX__SHIFT 0x2
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS__SHIFT 0x3
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID__SHIFT 0x4
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX__SHIFT 0x5
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER__SHIFT 0x6
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION__SHIFT 0x7
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM__SHIFT 0x8
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE__SHIFT 0x9
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE__SHIFT 0xa
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE__SHIFT 0xb
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS__SHIFT 0xc
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE__SHIFT 0xd
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE__SHIFT 0xe
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE__SHIFT 0xf
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST__SHIFT 0x10
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING__SHIFT 0x11
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY__SHIFT 0x12
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING__SHIFT 0x13
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING__SHIFT 0x14
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS__SHIFT 0x15
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID__SHIFT 0x16
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO__SHIFT 0x17
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING__SHIFT 0x19
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET__SHIFT 0x1a
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET__SHIFT 0x1b
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE__SHIFT 0x1c
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING__SHIFT 0x1d
+#define PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK 0x00000001L
+#define PA_SC_ENHANCE__DISABLE_SC_DB_TILE_FIX_MASK 0x00000002L
+#define PA_SC_ENHANCE__DISABLE_AA_MASK_FULL_FIX_MASK 0x00000004L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOCATIONS_MASK 0x00000008L
+#define PA_SC_ENHANCE__ENABLE_1XMSAA_SAMPLE_LOC_CENTROID_MASK 0x00000010L
+#define PA_SC_ENHANCE__DISABLE_SCISSOR_FIX_MASK 0x00000020L
+#define PA_SC_ENHANCE__SEND_UNLIT_STILES_TO_PACKER_MASK 0x00000040L
+#define PA_SC_ENHANCE__DISABLE_DUALGRAD_PERF_OPTIMIZATION_MASK 0x00000080L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_PRIM_MASK 0x00000100L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_SUPERTILE_MASK 0x00000200L
+#define PA_SC_ENHANCE__DISABLE_SC_PROCESS_RESET_TILE_MASK 0x00000400L
+#define PA_SC_ENHANCE__DISABLE_PA_SC_GUIDANCE_MASK 0x00000800L
+#define PA_SC_ENHANCE__DISABLE_EOV_ALL_CTRL_ONLY_COMBINATIONS_MASK 0x00001000L
+#define PA_SC_ENHANCE__ENABLE_MULTICYCLE_BUBBLE_FREEZE_MASK 0x00002000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_PA_SC_GUIDANCE_MASK 0x00004000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_POLY_MODE_MASK 0x00008000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EOP_SYNC_NULL_PRIMS_LAST_MASK 0x00010000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_THRESHOLD_SWITCHING_MASK 0x00020000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_THRESHOLD_SWITCH_AT_EOPG_ONLY_MASK 0x00040000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_DESIRED_FIFO_EMPTY_SWITCHING_MASK 0x00080000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_SELECTED_FIFO_EMPTY_SWITCHING_MASK 0x00100000L
+#define PA_SC_ENHANCE__DISABLE_OUT_OF_ORDER_EMPTY_SWITCHING_HYSTERYSIS_MASK 0x00200000L
+#define PA_SC_ENHANCE__ENABLE_OUT_OF_ORDER_DESIRED_FIFO_IS_NEXT_FEID_MASK 0x00400000L
+#define PA_SC_ENHANCE__DISABLE_OOO_NO_EOPG_SKEW_DESIRED_FIFO_IS_CURRENT_FIFO_MASK 0x00800000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE__OOO_DISABLE_EOPG_SKEW_THRESHOLD_SWITCHING_MASK 0x02000000L
+#define PA_SC_ENHANCE__DISABLE_EOP_LINE_STIPPLE_RESET_MASK 0x04000000L
+#define PA_SC_ENHANCE__DISABLE_VPZ_EOP_LINE_STIPPLE_RESET_MASK 0x08000000L
+#define PA_SC_ENHANCE__IOO_DISABLE_SCAN_UNSELECTED_FIFOS_FOR_DUAL_GFX_RING_CHANGE_MASK 0x10000000L
+#define PA_SC_ENHANCE__OOO_USE_ABSOLUTE_FIFO_COUNT_IN_THRESHOLD_SWITCHING_MASK 0x20000000L
+//PA_SC_ENHANCE_1
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE__SHIFT 0x0
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE__SHIFT 0x1
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING__SHIFT 0x3
+#define PA_SC_ENHANCE_1__BYPASS_PBB__SHIFT 0x4
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE__SHIFT 0x5
+#define PA_SC_ENHANCE_1__ECO_SPARE1__SHIFT 0x6
+#define PA_SC_ENHANCE_1__ECO_SPARE2__SHIFT 0x7
+#define PA_SC_ENHANCE_1__ECO_SPARE3__SHIFT 0x8
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB__SHIFT 0x9
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT__SHIFT 0xa
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM__SHIFT 0xb
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS__SHIFT 0xd
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE__SHIFT 0xe
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE__SHIFT 0x10
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION__SHIFT 0x12
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS__SHIFT 0x13
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION__SHIFT 0x14
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION__SHIFT 0x15
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION__SHIFT 0x16
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG__SHIFT 0x17
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT__SHIFT 0x18
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER__SHIFT 0x19
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION__SHIFT 0x1a
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE__SHIFT 0x1b
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX__SHIFT 0x1c
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1__SHIFT 0x1d
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI__SHIFT 0x1e
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX__SHIFT 0x1f
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_ENABLE_MASK 0x00000001L
+#define PA_SC_ENHANCE_1__REALIGN_DQUADS_OVERRIDE_MASK 0x00000006L
+#define PA_SC_ENHANCE_1__DISABLE_SC_BINNING_MASK 0x00000008L
+#define PA_SC_ENHANCE_1__BYPASS_PBB_MASK 0x00000010L
+#define PA_SC_ENHANCE_1__DISABLE_NONBINNED_LIVE_PRIM_DG1_LS0_CL0_EOPKT_POKE_MASK 0x00000020L
+#define PA_SC_ENHANCE_1__ECO_SPARE1_MASK 0x00000040L
+#define PA_SC_ENHANCE_1__ECO_SPARE2_MASK 0x00000080L
+#define PA_SC_ENHANCE_1__ECO_SPARE3_MASK 0x00000100L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PROCESS_RESET_PBB_MASK 0x00000200L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_OPT_MASK 0x00000400L
+#define PA_SC_ENHANCE_1__ENABLE_DFSM_FLUSH_EVENT_TO_FLUSH_POPS_CAM_MASK 0x00000800L
+#define PA_SC_ENHANCE_1__DEBUG_PIXEL_PICKER_COUNT_PIXELS_MASK 0x00002000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_DB_TILE_INTF_FINE_CLOCK_GATE_MASK 0x00004000L
+#define PA_SC_ENHANCE_1__DISABLE_PACKER_ODC_ENHANCE_MASK 0x00010000L
+#define PA_SC_ENHANCE_1__OPTIMAL_BIN_SELECTION_MASK 0x00040000L
+#define PA_SC_ENHANCE_1__DISABLE_FORCE_SOP_ALL_EVENTS_MASK 0x00080000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_CLK_OPTIMIZATION_MASK 0x00100000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_SCISSOR_CLK_OPTIMIZATION_MASK 0x00200000L
+#define PA_SC_ENHANCE_1__DISABLE_PBB_BINNING_CLK_OPTIMIZATION_MASK 0x00400000L
+#define PA_SC_ENHANCE_1__DISABLE_INTF_CG_MASK 0x00800000L
+#define PA_SC_ENHANCE_1__IOO_DISABLE_EOP_ON_FIRST_LIVE_PRIM_HIT_MASK 0x01000000L
+#define PA_SC_ENHANCE_1__DISABLE_SHADER_PROFILING_FOR_POWER_MASK 0x02000000L
+#define PA_SC_ENHANCE_1__FLUSH_ON_BINNING_TRANSITION_MASK 0x04000000L
+#define PA_SC_ENHANCE_1__DISABLE_QUAD_PROC_FDCE_ENHANCE_MASK 0x08000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_MASK 0x10000000L
+#define PA_SC_ENHANCE_1__DISABLE_SC_PS_PA_ARBITER_FIX_1_MASK 0x20000000L
+#define PA_SC_ENHANCE_1__PASS_VPZ_EVENT_TO_SPI_MASK 0x40000000L
+#define PA_SC_ENHANCE_1__DISABLE_FSR_NEAR_AXIS_LINE_VERT_ORDER_SORT_FIX_MASK 0x80000000L
+//PA_SC_ENHANCE_2
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE__SHIFT 0x0
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x1
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE__SHIFT 0x2
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE__SHIFT 0x3
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK__SHIFT 0x4
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK__SHIFT 0x5
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD__SHIFT 0x7
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH__SHIFT 0x8
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK__SHIFT 0x9
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS__SHIFT 0xa
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE__SHIFT 0xb
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE__SHIFT 0xc
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP__SHIFT 0xd
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP__SHIFT 0xe
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ__SHIFT 0xf
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP__SHIFT 0x10
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP__SHIFT 0x11
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET__SHIFT 0x12
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG__SHIFT 0x15
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE__SHIFT 0x16
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO__SHIFT 0x17
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH__SHIFT 0x1a
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT__SHIFT 0x1b
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT__SHIFT 0x1e
+#define PA_SC_ENHANCE_2__RSVD__SHIFT 0x1f
+#define PA_SC_ENHANCE_2__DISABLE_SC_MEM_MACRO_FINE_CLOCK_GATE_MASK 0x00000001L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DB_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000002L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_QUAD_INTF_FINE_CLOCK_GATE_MASK 0x00000004L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_PRIM_INTF_FINE_CLOCK_GATE_MASK 0x00000008L
+#define PA_SC_ENHANCE_2__ENABLE_LPOV_WAVE_BREAK_MASK 0x00000010L
+#define PA_SC_ENHANCE_2__ENABLE_FPOV_WAVE_BREAK_MASK 0x00000020L
+#define PA_SC_ENHANCE_2__ENABLE_SC_SEND_DB_VPZ_FOR_EN_PRIM_PAYLOAD_MASK 0x00000080L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPE_SWITCH_MASK 0x00000100L
+#define PA_SC_ENHANCE_2__DISABLE_FULL_TILE_WAVE_BREAK_MASK 0x00000200L
+#define PA_SC_ENHANCE_2__ENABLE_VPZ_INJECTION_BEFORE_NULL_PRIMS_MASK 0x00000400L
+#define PA_SC_ENHANCE_2__PBB_TIMEOUT_THRESHOLD_MODE_MASK 0x00000800L
+#define PA_SC_ENHANCE_2__DISABLE_PACKER_GRAD_FDCE_ENHANCE_MASK 0x00001000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_SPI_INTF_EARLY_WAKEUP_MASK 0x00002000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_BCI_INTF_EARLY_WAKEUP_MASK 0x00004000L
+#define PA_SC_ENHANCE_2__DISABLE_EXPOSED_GT_DETAIL_RATE_TILE_COV_ADJ_MASK 0x00008000L
+#define PA_SC_ENHANCE_2__PBB_WARP_CLK_MAIN_CLK_WAKEUP_MASK 0x00010000L
+#define PA_SC_ENHANCE_2__PBB_MAIN_CLK_REG_BUSY_WAKEUP_MASK 0x00020000L
+#define PA_SC_ENHANCE_2__DISABLE_BREAK_BATCH_ON_GFX_PIPELINE_RESET_MASK 0x00040000L
+#define PA_SC_ENHANCE_2__DISABLE_SC_DBR_DATAPATH_FGCG_MASK 0x00200000L
+#define PA_SC_ENHANCE_2__FSR_BB_OPTIMIZATION_DISABLE_OVERRIDE_MASK 0x00400000L
+#define PA_SC_ENHANCE_2__PROCESS_RESET_FORCE_STILE_MASK_TO_ZERO_MASK 0x00800000L
+#define PA_SC_ENHANCE_2__BREAK_WHEN_ONE_NULL_PRIM_BATCH_MASK 0x04000000L
+#define PA_SC_ENHANCE_2__NULL_PRIM_BREAK_BATCH_LIMIT_MASK 0x38000000L
+#define PA_SC_ENHANCE_2__DISABLE_MAX_DEALLOC_FORCE_EOV_RESET_N_WAVES_COUNT_MASK 0x40000000L
+#define PA_SC_ENHANCE_2__RSVD_MASK 0x80000000L
+//PA_SC_ENHANCE_3
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA__SHIFT 0x0
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST__SHIFT 0x2
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION__SHIFT 0x4
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN__SHIFT 0x5
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER__SHIFT 0x6
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER__SHIFT 0x7
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS__SHIFT 0x8
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY__SHIFT 0x9
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES__SHIFT 0xa
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION__SHIFT 0xb
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED__SHIFT 0xc
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION__SHIFT 0xd
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION__SHIFT 0xe
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN__SHIFT 0xf
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS__SHIFT 0x10
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM__SHIFT 0x11
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE__SHIFT 0x12
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE__SHIFT 0x13
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x14
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY__SHIFT 0x15
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE__SHIFT 0x16
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION__SHIFT 0x17
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY__SHIFT 0x18
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL__SHIFT 0x19
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL__SHIFT 0x1a
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL__SHIFT 0x1b
+#define PA_SC_ENHANCE_3__ECO_SPARE0__SHIFT 0x1c
+#define PA_SC_ENHANCE_3__ECO_SPARE1__SHIFT 0x1d
+#define PA_SC_ENHANCE_3__ECO_SPARE2__SHIFT 0x1e
+#define PA_SC_ENHANCE_3__ECO_SPARE3__SHIFT 0x1f
+#define PA_SC_ENHANCE_3__FORCE_USE_OF_SC_CENTROID_DATA_MASK 0x00000001L
+#define PA_SC_ENHANCE_3__DISABLE_RB_MASK_COPY_FOR_NONP2_SA_PAIR_HARVEST_MASK 0x00000004L
+#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_BCI_QUAD_NEW_PRIM_DATA_LOAD_OPTIMIZATION_MASK 0x00000010L
+#define PA_SC_ENHANCE_3__DISABLE_CP_CONTEXT_DONE_PERFCOUNT_SAMPLE_EN_MASK 0x00000020L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_FIRST_PHASE_FILTER_MASK 0x00000040L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_MASK 0x00000080L
+#define PA_SC_ENHANCE_3__ENABLE_SINGLE_PA_EOPKT_LAST_PHASE_FILTER_FOR_PBB_BINNED_PRIMS_MASK 0x00000100L
+#define PA_SC_ENHANCE_3__DISABLE_SET_VPZ_DIRTY_EOPKT_LAST_PHASE_ONLY_MASK 0x00000200L
+#define PA_SC_ENHANCE_3__DISABLE_PBB_EOP_OPTIMIZATION_WITH_SAME_CONTEXT_BATCHES_MASK 0x00000400L
+#define PA_SC_ENHANCE_3__DISABLE_FAST_NULL_PRIM_OPTIMIZATION_MASK 0x00000800L
+#define PA_SC_ENHANCE_3__USE_PBB_PRIM_STORAGE_WHEN_STALLED_MASK 0x00001000L
+#define PA_SC_ENHANCE_3__DISABLE_LIGHT_VOLUME_RENDERING_OPTIMIZATION_MASK 0x00002000L
+#define PA_SC_ENHANCE_3__DISABLE_ZPRE_PASS_OPTIMIZATION_MASK 0x00004000L
+#define PA_SC_ENHANCE_3__DISABLE_EVENT_INCLUSION_IN_CONTEXT_STATES_PER_BIN_MASK 0x00008000L
+#define PA_SC_ENHANCE_3__DISABLE_PIXEL_WAIT_SYNC_COUNTERS_MASK 0x00010000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_CPG_PSINVOC_SEDC_ISOLATION_ACCUM_MASK 0x00020000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_FB_FINE_CLOCK_GATE_MASK 0x00040000L
+#define PA_SC_ENHANCE_3__DISABLE_SC_QP_VRS_RATE_CACHE_RD_FINE_CLOCK_GATE_MASK 0x00080000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_REZ_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00100000L
+#define PA_SC_ENHANCE_3__DISABLE_PKR_FORCE_EOV_MAX_CLK_CNT_FOR_SPI_BACKPRESSURE_ONLY_MASK 0x00200000L
+#define PA_SC_ENHANCE_3__DO_NOT_INCLUDE_OREO_WAVEID_IN_FORCE_EOV_MAX_CNT_DISABLE_MASK 0x00400000L
+#define PA_SC_ENHANCE_3__DISABLE_PWS_PRE_DEPTH_WAIT_SYNC_VPZ_INSERTION_MASK 0x00800000L
+#define PA_SC_ENHANCE_3__PKR_CNT_FORCE_EOV_AT_QS_EMPTY_ONLY_MASK 0x01000000L
+#define PA_SC_ENHANCE_3__PKR_S0_FORCE_EOV_STALL_MASK 0x02000000L
+#define PA_SC_ENHANCE_3__PKR_S1_FORCE_EOV_STALL_MASK 0x04000000L
+#define PA_SC_ENHANCE_3__PKR_S2_FORCE_EOV_STALL_MASK 0x08000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE0_MASK 0x10000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE1_MASK 0x20000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE2_MASK 0x40000000L
+#define PA_SC_ENHANCE_3__ECO_SPARE3_MASK 0x80000000L
+//PA_SC_BINNER_CNTL_OVERRIDE
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE__SHIFT 0x0
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN__SHIFT 0xa
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN__SHIFT 0xd
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH__SHIFT 0x13
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE__SHIFT 0x1b
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE__SHIFT 0x1c
+#define PA_SC_BINNER_CNTL_OVERRIDE__BINNING_MODE_MASK 0x00000003L
+#define PA_SC_BINNER_CNTL_OVERRIDE__CONTEXT_STATES_PER_BIN_MASK 0x00001C00L
+#define PA_SC_BINNER_CNTL_OVERRIDE__PERSISTENT_STATES_PER_BIN_MASK 0x0003E000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__FPOVS_PER_BATCH_MASK 0x07F80000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__DIRECT_OVERRIDE_MODE_MASK 0x08000000L
+#define PA_SC_BINNER_CNTL_OVERRIDE__OVERRIDE_MASK 0xF0000000L
+//PA_SC_PBB_OVERRIDE_FLAG
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE__SHIFT 0x0
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID__SHIFT 0x1
+#define PA_SC_PBB_OVERRIDE_FLAG__OVERRIDE_MASK 0x00000001L
+#define PA_SC_PBB_OVERRIDE_FLAG__PIPE_ID_MASK 0x00000002L
+//PA_SC_DSM_CNTL
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0__SHIFT 0x0
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1__SHIFT 0x1
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_0_MASK 0x00000001L
+#define PA_SC_DSM_CNTL__FORCE_EOV_REZ_1_MASK 0x00000002L
+//PA_SC_TILE_STEERING_CREST_OVERRIDE
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE__SHIFT 0x0
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT__SHIFT 0x1
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT__SHIFT 0x5
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT__SHIFT 0x8
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE__SHIFT 0x1f
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__ONE_RB_MODE_ENABLE_MASK 0x00000001L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SE_SELECT_MASK 0x00000006L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__RB_SELECT_MASK 0x00000060L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__SA_SELECT_MASK 0x00000700L
+#define PA_SC_TILE_STEERING_CREST_OVERRIDE__FORCE_TILE_STEERING_OVERRIDE_USE_MASK 0x80000000L
+//PA_SC_FIFO_SIZE
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT 0xf
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT 0x15
+#define PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE_MASK 0x00007FC0L
+#define PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE_MASK 0x001F8000L
+#define PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE_MASK 0xFFE00000L
+//PA_SC_IF_FIFO_SIZE
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE__SHIFT 0x6
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE__SHIFT 0xc
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE__SHIFT 0x12
+#define PA_SC_IF_FIFO_SIZE__SC_DB_TILE_IF_FIFO_SIZE_MASK 0x0000003FL
+#define PA_SC_IF_FIFO_SIZE__SC_DB_QUAD_IF_FIFO_SIZE_MASK 0x00000FC0L
+#define PA_SC_IF_FIFO_SIZE__SC_SPI_IF_FIFO_SIZE_MASK 0x0003F000L
+#define PA_SC_IF_FIFO_SIZE__SC_BCI_IF_FIFO_SIZE_MASK 0x00FC0000L
+//PA_SC_PACKER_WAVE_ID_CNTL
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE__SHIFT 0x0
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE__SHIFT 0xa
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN__SHIFT 0x10
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE__SHIFT 0x11
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN__SHIFT 0x17
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD__SHIFT 0x18
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD__SHIFT 0x1f
+#define PA_SC_PACKER_WAVE_ID_CNTL__WAVE_TABLE_SIZE_MASK 0x000003FFL
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_DB_WAVE_IF_FIFO_SIZE_MASK 0x0000FC00L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_DB_WAVE_IF_FGCG_EN_MASK 0x00010000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__SC_SPI_WAVE_IF_FIFO_SIZE_MASK 0x007E0000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_SC_SPI_WAVE_IF_FGCG_EN_MASK 0x00800000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DEBUG_CONFLICT_QUAD_MASK 0x0F000000L
+#define PA_SC_PACKER_WAVE_ID_CNTL__DISABLE_OREO_CONFLICT_QUAD_MASK 0x80000000L
+//PA_SC_ATM_CNTL
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE__SHIFT 0x0
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN__SHIFT 0x7
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE__SHIFT 0x8
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES__SHIFT 0x10
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES__SHIFT 0x11
+#define PA_SC_ATM_CNTL__SC_PC_IF_SIZE_MASK 0x0000003FL
+#define PA_SC_ATM_CNTL__DISABLE_SC_PC_IF_FGCG_EN_MASK 0x00000080L
+#define PA_SC_ATM_CNTL__MAX_ATTRIBUTES_IN_WAVE_MASK 0x0000FF00L
+#define PA_SC_ATM_CNTL__DISABLE_MAX_ATTRIBUTES_MASK 0x00010000L
+#define PA_SC_ATM_CNTL__SELECT_MAX_ATTRIBUTES_MASK 0x00020000L
+//PA_SC_PKR_WAVE_TABLE_CNTL
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE__SHIFT 0x0
+#define PA_SC_PKR_WAVE_TABLE_CNTL__SIZE_MASK 0x0000003FL
+//PA_SC_FORCE_EOV_MAX_CNTS
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT 0x0
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT 0x10
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT_MASK 0x0000FFFFL
+#define PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT_MASK 0xFFFF0000L
+//PA_SC_BINNER_EVENT_CNTL_0
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_0_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS1_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS2_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_0__SAMPLE_STREAMOUTSTATS3_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_TS_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_0__CONTEXT_DONE_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_0__CACHE_FLUSH_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_0__CS_PARTIAL_FLUSH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_SYNC_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RESERVED_9_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VGT_STREAMOUT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_INCR_DE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_0__END_OF_PIPE_IB_END_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__RST_PIX_CNT_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__BREAK_BATCH_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_0__VS_PARTIAL_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_1
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_1__PS_PARTIAL_FLUSH_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_HS_OUTPUT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_DFSM_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_1__RESET_TO_LOWEST_VGT_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_TS_EVENT_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_1__WAIT_SYNC_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_1__CACHE_FLUSH_AND_INV_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_START_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_STOP_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_START_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PIPELINESTAT_STOP_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_1__PERFCOUNTER_SAMPLE_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_1__FLUSH_ES_OUTPUT_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__BIN_CONF_OVERRIDE_CHECK_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SAMPLE_PIPELINESTAT_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_1__SO_VGTSTREAMOUT_FLUSH_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_2
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_2__SAMPLE_STREAMOUTSTATS_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESET_VTX_CNT_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_2__BLOCK_CONTEXT_DONE_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_35_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_2__VGT_FLUSH_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_2__TGID_ROLLOVER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_2__SQ_NON_EVENT_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_2__SC_SEND_DB_VPZ_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_2__BOTTOM_OF_PIPE_TS_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_2__RESERVED_41_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_2__DB_CACHE_FLUSH_AND_INV_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_DATA_TS_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_DB_META_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_DATA_TS_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__FLUSH_AND_INV_CB_META_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_2__CS_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_EVENT_CNTL_3
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE__SHIFT 0x0
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA__SHIFT 0x2
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50__SHIFT 0x4
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START__SHIFT 0x6
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP__SHIFT 0x8
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER__SHIFT 0xa
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW__SHIFT 0xc
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH__SHIFT 0xe
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL__SHIFT 0x10
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP__SHIFT 0x12
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET__SHIFT 0x14
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND__SHIFT 0x16
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC__SHIFT 0x18
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE__SHIFT 0x1a
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED__SHIFT 0x1c
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE__SHIFT 0x1e
+#define PA_SC_BINNER_EVENT_CNTL_3__PS_DONE_MASK 0x00000003L
+#define PA_SC_BINNER_EVENT_CNTL_3__FLUSH_AND_INV_CB_PIXEL_DATA_MASK 0x0000000CL
+#define PA_SC_BINNER_EVENT_CNTL_3__RESERVED_50_MASK 0x00000030L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_START_MASK 0x000000C0L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_STOP_MASK 0x00000300L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_MARKER_MASK 0x00000C00L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_DRAW_MASK 0x00003000L
+#define PA_SC_BINNER_EVENT_CNTL_3__THREAD_TRACE_FINISH_MASK 0x0000C000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_CONTROL_MASK 0x00030000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_DUMP_MASK 0x000C0000L
+#define PA_SC_BINNER_EVENT_CNTL_3__PIXEL_PIPE_STAT_RESET_MASK 0x00300000L
+#define PA_SC_BINNER_EVENT_CNTL_3__CONTEXT_SUSPEND_MASK 0x00C00000L
+#define PA_SC_BINNER_EVENT_CNTL_3__OFFCHIP_HS_DEALLOC_MASK 0x03000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_NGG_PIPELINE_MASK 0x0C000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__ENABLE_PIPELINE_NOT_USED_MASK 0x30000000L
+#define PA_SC_BINNER_EVENT_CNTL_3__DRAW_DONE_MASK 0xC0000000L
+//PA_SC_BINNER_TIMEOUT_COUNTER
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_TIMEOUT_COUNTER__THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_BINNER_PERF_CNTL_0
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x14
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD__SHIFT 0x17
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000003FFL
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_PRIMS_THRESHOLD_MASK 0x000FFC00L
+#define PA_SC_BINNER_PERF_CNTL_0__BIN_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x00700000L
+#define PA_SC_BINNER_PERF_CNTL_0__BATCH_HIST_NUM_CONTEXT_THRESHOLD_MASK 0x03800000L
+//PA_SC_BINNER_PERF_CNTL_1
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD__SHIFT 0x5
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD__SHIFT 0xa
+#define PA_SC_BINNER_PERF_CNTL_1__BIN_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x0000001FL
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_PERSISTENT_STATE_THRESHOLD_MASK 0x000003E0L
+#define PA_SC_BINNER_PERF_CNTL_1__BATCH_HIST_NUM_TRIV_REJECTED_PRIMS_THRESHOLD_MASK 0x03FFFC00L
+//PA_SC_BINNER_PERF_CNTL_2
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD__SHIFT 0xb
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_ROWS_PER_PRIM_THRESHOLD_MASK 0x000007FFL
+#define PA_SC_BINNER_PERF_CNTL_2__BATCH_HIST_NUM_COLUMNS_PER_ROW_THRESHOLD_MASK 0x003FF800L
+//PA_SC_BINNER_PERF_CNTL_3
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD__SHIFT 0x0
+#define PA_SC_BINNER_PERF_CNTL_3__BATCH_HIST_NUM_PS_WAVE_BREAKS_THRESHOLD_MASK 0xFFFFFFFFL
+//PA_SC_P3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_HP3D_TRAP_SCREEN_HV_LOCK
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_SC_TRAP_SCREEN_HV_LOCK
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_LOCK__DISABLE_NON_PRIV_WRITES_MASK 0x00000001L
+//PA_PH_INTERFACE_FIFO_SIZE
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE__SHIFT 0x0
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE__SHIFT 0x10
+#define PA_PH_INTERFACE_FIFO_SIZE__PA_PH_IF_FIFO_SIZE_MASK 0x000003FFL
+#define PA_PH_INTERFACE_FIFO_SIZE__PH_SC_IF_FIFO_SIZE_MASK 0x003F0000L
+//PA_PH_ENHANCE
+#define PA_PH_ENHANCE__ECO_SPARE0__SHIFT 0x0
+#define PA_PH_ENHANCE__ECO_SPARE1__SHIFT 0x1
+#define PA_PH_ENHANCE__ECO_SPARE2__SHIFT 0x2
+#define PA_PH_ENHANCE__ECO_SPARE3__SHIFT 0x3
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE__SHIFT 0x4
+#define PA_PH_ENHANCE__DISABLE_FOPKT__SHIFT 0x5
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET__SHIFT 0x6
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE__SHIFT 0x7
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG__SHIFT 0x8
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG__SHIFT 0x9
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH__SHIFT 0xa
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT__SHIFT 0xd
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS__SHIFT 0xe
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON__SHIFT 0xf
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE__SHIFT 0x10
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE__SHIFT 0x11
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE__SHIFT 0x12
+#define PA_PH_ENHANCE__ECO_SPARE0_MASK 0x00000001L
+#define PA_PH_ENHANCE__ECO_SPARE1_MASK 0x00000002L
+#define PA_PH_ENHANCE__ECO_SPARE2_MASK 0x00000004L
+#define PA_PH_ENHANCE__ECO_SPARE3_MASK 0x00000008L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_FINE_CLOCK_GATE_MASK 0x00000010L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_MASK 0x00000020L
+#define PA_PH_ENHANCE__DISABLE_FOPKT_SCAN_POST_RESET_MASK 0x00000040L
+#define PA_PH_ENHANCE__DISABLE_PH_SC_INTF_CLKEN_CLOCK_GATE_MASK 0x00000080L
+#define PA_PH_ENHANCE__DISABLE_PH_DEBUG_REG_FGCG_MASK 0x00000100L
+#define PA_PH_ENHANCE__DISABLE_PH_PERF_REG_FGCG_MASK 0x00000200L
+#define PA_PH_ENHANCE__ENABLE_PH_INTF_CLKEN_STRETCH_MASK 0x00001C00L
+#define PA_PH_ENHANCE__DISABLE_USE_LAST_PH_ARBITER_PERFCOUNTER_SAMPLE_EVENT_MASK 0x00002000L
+#define PA_PH_ENHANCE__USE_PERFCOUNTER_START_STOP_EVENTS_MASK 0x00004000L
+#define PA_PH_ENHANCE__FORCE_PH_PERFCOUNTER_SAMPLE_ENABLE_ON_MASK 0x00008000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_MASK 0x00010000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_MODE_DISABLE_MASK 0x00020000L
+#define PA_PH_ENHANCE__PH_SPI_GE_THROTTLE_PERFCOUNTER_COUNT_MODE_MASK 0x00040000L
+//PA_SC_VRS_SURFACE_CNTL_1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE__SHIFT 0x0
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE__SHIFT 0x1
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE__SHIFT 0x2
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA__SHIFT 0x3
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL__SHIFT 0x4
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED__SHIFT 0x5
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT__SHIFT 0x6
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS__SHIFT 0x7
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG__SHIFT 0x8
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION__SHIFT 0xc
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE__SHIFT 0xf
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE__SHIFT 0x13
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING__SHIFT 0x14
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0__SHIFT 0x15
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1__SHIFT 0x16
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2__SHIFT 0x17
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3__SHIFT 0x18
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4__SHIFT 0x19
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5__SHIFT 0x1a
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6__SHIFT 0x1b
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7__SHIFT 0x1c
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8__SHIFT 0x1d
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9__SHIFT 0x1e
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10__SHIFT 0x1f
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK 0x00000001L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_SHADER_KILL_ENABLE_MASK 0x00000002L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_MASK_OPS_ENABLE_MASK 0x00000004L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_RATE_16XAA_MASK 0x00000008L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_Z_OR_STENCIL_MASK 0x00000010L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_PRE_SHADER_DEPTH_COVERAGE_ENABLED_MASK 0x00000020L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POST_DEPTH_IMPORT_MASK 0x00000040L
+#define PA_SC_VRS_SURFACE_CNTL_1__FORCE_SC_VRS_RATE_FINE_POPS_MASK 0x00000080L
+#define PA_SC_VRS_SURFACE_CNTL_1__USE_ONLY_VRS_RATE_FINE_CFG_MASK 0x00000100L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_VRS_RATE_NORMALIZATION_MASK 0x00001000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_PS_ITER_RATE_COMBINER_PASSTHRU_OVERRIDE_MASK 0x00008000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_CMASK_RATE_HINT_FORCE_ZERO_OVERRIDE_MASK 0x00080000L
+#define PA_SC_VRS_SURFACE_CNTL_1__DISABLE_SSAA_DETAIL_TO_EXPOSED_RATE_CLAMPING_MASK 0x00100000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_0_MASK 0x00200000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_1_MASK 0x00400000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_2_MASK 0x00800000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_3_MASK 0x01000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_4_MASK 0x02000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_5_MASK 0x04000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_6_MASK 0x08000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_7_MASK 0x10000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_8_MASK 0x20000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_9_MASK 0x40000000L
+#define PA_SC_VRS_SURFACE_CNTL_1__VRS_ECO_SPARE_10_MASK 0x80000000L
+
+
+// addressBlock: gc_pfvf_sqdec
+//SQ_RUNTIME_CONFIG
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER__SHIFT 0x0
+#define SQ_RUNTIME_CONFIG__UNUSED_REGISTER_MASK 0x00000001L
+//SQ_DEBUG_STS_GLOBAL
+#define SQ_DEBUG_STS_GLOBAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY__SHIFT 0x1
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0__SHIFT 0x4
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_GLOBAL__INTERRUPT_BUSY_MASK 0x00000002L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA0_MASK 0x0000FFF0L
+#define SQ_DEBUG_STS_GLOBAL__WAVE_LEVEL_SA1_MASK 0x0FFF0000L
+//SQ_DEBUG_STS_GLOBAL2
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0__SHIFT 0x0
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1__SHIFT 0x8
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE__SHIFT 0x10
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX0_MASK 0x000000FFL
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_GFX1_MASK 0x0000FF00L
+#define SQ_DEBUG_STS_GLOBAL2__REG_FIFO_LEVEL_COMPUTE_MASK 0x00FF0000L
+//SH_MEM_BASES
+#define SH_MEM_BASES__PRIVATE_BASE__SHIFT 0x0
+#define SH_MEM_BASES__SHARED_BASE__SHIFT 0x10
+#define SH_MEM_BASES__PRIVATE_BASE_MASK 0x0000FFFFL
+#define SH_MEM_BASES__SHARED_BASE_MASK 0xFFFF0000L
+//SH_MEM_CONFIG
+#define SH_MEM_CONFIG__ADDRESS_MODE__SHIFT 0x0
+#define SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT 0x2
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT 0xe
+#define SH_MEM_CONFIG__ICACHE_USE_GL1__SHIFT 0x12
+#define SH_MEM_CONFIG__ADDRESS_MODE_MASK 0x00000001L
+#define SH_MEM_CONFIG__ALIGNMENT_MODE_MASK 0x0000000CL
+#define SH_MEM_CONFIG__INITIAL_INST_PREFETCH_MASK 0x0000C000L
+#define SH_MEM_CONFIG__ICACHE_USE_GL1_MASK 0x00040000L
+//SQ_DEBUG
+#define SQ_DEBUG__SINGLE_MEMOP__SHIFT 0x0
+#define SQ_DEBUG__SINGLE_ALU_OP__SHIFT 0x1
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO__SHIFT 0x2
+#define SQ_DEBUG__SINGLE_MEMOP_MASK 0x00000001L
+#define SQ_DEBUG__SINGLE_ALU_OP_MASK 0x00000002L
+#define SQ_DEBUG__WAIT_DEP_CTR_ZERO_MASK 0x00000004L
+//SQ_SHADER_TBA_LO
+#define SQ_SHADER_TBA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TBA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TBA_HI
+#define SQ_SHADER_TBA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TBA_HI__TRAP_EN__SHIFT 0x1f
+#define SQ_SHADER_TBA_HI__ADDR_HI_MASK 0x000000FFL
+#define SQ_SHADER_TBA_HI__TRAP_EN_MASK 0x80000000L
+//SQ_SHADER_TMA_LO
+#define SQ_SHADER_TMA_LO__ADDR_LO__SHIFT 0x0
+#define SQ_SHADER_TMA_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//SQ_SHADER_TMA_HI
+#define SQ_SHADER_TMA_HI__ADDR_HI__SHIFT 0x0
+#define SQ_SHADER_TMA_HI__ADDR_HI_MASK 0x000000FFL
+
+
+// addressBlock: gc_pfonly_cpdec
+//CP_DEBUG_2
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE__SHIFT 0xc
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE__SHIFT 0xd
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE__SHIFT 0xe
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE__SHIFT 0xf
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE__SHIFT 0x10
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE__SHIFT 0x11
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE__SHIFT 0x1b
+#define CP_DEBUG_2__DC_FORCE_CLK_EN__SHIFT 0x1c
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST__SHIFT 0x1d
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE__SHIFT 0x1e
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE__SHIFT 0x1f
+#define CP_DEBUG_2__CHIU_NOALLOC_OVERRIDE_MASK 0x00001000L
+#define CP_DEBUG_2__RCIU_SECURE_CHECK_DISABLE_MASK 0x00002000L
+#define CP_DEBUG_2__RB_PACKET_INJECTOR_DISABLE_MASK 0x00004000L
+#define CP_DEBUG_2__CNTX_DONE_COPY_STATE_DISABLE_MASK 0x00008000L
+#define CP_DEBUG_2__NOP_DISCARD_DISABLE_MASK 0x00010000L
+#define CP_DEBUG_2__DC_INTERLEAVE_DISABLE_MASK 0x00020000L
+#define CP_DEBUG_2__BC_LOOKUP_CB_DB_FLUSH_DISABLE_MASK 0x08000000L
+#define CP_DEBUG_2__DC_FORCE_CLK_EN_MASK 0x10000000L
+#define CP_DEBUG_2__DC_DISABLE_BROADCAST_MASK 0x20000000L
+#define CP_DEBUG_2__NOT_EOP_HW_DETECT_DISABLE_MASK 0x40000000L
+#define CP_DEBUG_2__PFP_DDID_HW_DETECT_DISABLE_MASK 0x80000000L
+//CP_FETCHER_SOURCE
+#define CP_FETCHER_SOURCE__ME_SRC__SHIFT 0x0
+#define CP_FETCHER_SOURCE__ME_SRC_MASK 0x00000001L
+//CP_DFY_CNTL
+#define CP_DFY_CNTL__POLICY__SHIFT 0x8
+#define CP_DFY_CNTL__VOL__SHIFT 0xa
+#define CP_DFY_CNTL__MTYPE__SHIFT 0xc
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define CP_DFY_CNTL__TPI_SDP_SEL__SHIFT 0x1a
+#define CP_DFY_CNTL__WRITE_DIS__SHIFT 0x1b
+#define CP_DFY_CNTL__LFSR_RESET__SHIFT 0x1c
+#define CP_DFY_CNTL__MODE__SHIFT 0x1d
+#define CP_DFY_CNTL__ENABLE__SHIFT 0x1f
+#define CP_DFY_CNTL__POLICY_MASK 0x00000300L
+#define CP_DFY_CNTL__VOL_MASK 0x00000400L
+#define CP_DFY_CNTL__MTYPE_MASK 0x00007000L
+#define CP_DFY_CNTL__REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define CP_DFY_CNTL__TPI_SDP_SEL_MASK 0x04000000L
+#define CP_DFY_CNTL__WRITE_DIS_MASK 0x08000000L
+#define CP_DFY_CNTL__LFSR_RESET_MASK 0x10000000L
+#define CP_DFY_CNTL__MODE_MASK 0x60000000L
+#define CP_DFY_CNTL__ENABLE_MASK 0x80000000L
+//CP_DFY_STAT
+#define CP_DFY_STAT__BURST_COUNT__SHIFT 0x0
+#define CP_DFY_STAT__TAGS_PENDING__SHIFT 0x10
+#define CP_DFY_STAT__BUSY__SHIFT 0x1f
+#define CP_DFY_STAT__BURST_COUNT_MASK 0x0000FFFFL
+#define CP_DFY_STAT__TAGS_PENDING_MASK 0x07FF0000L
+#define CP_DFY_STAT__BUSY_MASK 0x80000000L
+//CP_DFY_ADDR_HI
+#define CP_DFY_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DFY_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DFY_ADDR_LO
+#define CP_DFY_ADDR_LO__ADDR_LO__SHIFT 0x5
+#define CP_DFY_ADDR_LO__ADDR_LO_MASK 0xFFFFFFE0L
+//CP_DFY_DATA_0
+#define CP_DFY_DATA_0__DATA__SHIFT 0x0
+#define CP_DFY_DATA_0__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_1
+#define CP_DFY_DATA_1__DATA__SHIFT 0x0
+#define CP_DFY_DATA_1__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_2
+#define CP_DFY_DATA_2__DATA__SHIFT 0x0
+#define CP_DFY_DATA_2__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_3
+#define CP_DFY_DATA_3__DATA__SHIFT 0x0
+#define CP_DFY_DATA_3__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_4
+#define CP_DFY_DATA_4__DATA__SHIFT 0x0
+#define CP_DFY_DATA_4__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_5
+#define CP_DFY_DATA_5__DATA__SHIFT 0x0
+#define CP_DFY_DATA_5__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_6
+#define CP_DFY_DATA_6__DATA__SHIFT 0x0
+#define CP_DFY_DATA_6__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_7
+#define CP_DFY_DATA_7__DATA__SHIFT 0x0
+#define CP_DFY_DATA_7__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_8
+#define CP_DFY_DATA_8__DATA__SHIFT 0x0
+#define CP_DFY_DATA_8__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_9
+#define CP_DFY_DATA_9__DATA__SHIFT 0x0
+#define CP_DFY_DATA_9__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_10
+#define CP_DFY_DATA_10__DATA__SHIFT 0x0
+#define CP_DFY_DATA_10__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_11
+#define CP_DFY_DATA_11__DATA__SHIFT 0x0
+#define CP_DFY_DATA_11__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_12
+#define CP_DFY_DATA_12__DATA__SHIFT 0x0
+#define CP_DFY_DATA_12__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_13
+#define CP_DFY_DATA_13__DATA__SHIFT 0x0
+#define CP_DFY_DATA_13__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_14
+#define CP_DFY_DATA_14__DATA__SHIFT 0x0
+#define CP_DFY_DATA_14__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_DATA_15
+#define CP_DFY_DATA_15__DATA__SHIFT 0x0
+#define CP_DFY_DATA_15__DATA_MASK 0xFFFFFFFFL
+//CP_DFY_CMD
+#define CP_DFY_CMD__SIZE__SHIFT 0x10
+#define CP_DFY_CMD__SIZE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_pfonly_cpphqddec
+//CP_HPD_MES_ROQ_OFFSETS
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_MES_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_MES_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_MES_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_ROQ_OFFSETS
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET__SHIFT 0x0
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET__SHIFT 0x8
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET__SHIFT 0x10
+#define CP_HPD_ROQ_OFFSETS__IQ_OFFSET_MASK 0x00000007L
+#define CP_HPD_ROQ_OFFSETS__PQ_OFFSET_MASK 0x00003F00L
+#define CP_HPD_ROQ_OFFSETS__IB_OFFSET_MASK 0x007F0000L
+//CP_HPD_STATUS0
+#define CP_HPD_STATUS0__QUEUE_STATE__SHIFT 0x0
+#define CP_HPD_STATUS0__MAPPED_QUEUE__SHIFT 0x5
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE__SHIFT 0x8
+#define CP_HPD_STATUS0__FETCHING_MQD__SHIFT 0x10
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB__SHIFT 0x11
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ__SHIFT 0x12
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE__SHIFT 0x14
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS__SHIFT 0x1b
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK__SHIFT 0x1c
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE__SHIFT 0x1e
+#define CP_HPD_STATUS0__FORCE_QUEUE__SHIFT 0x1f
+#define CP_HPD_STATUS0__QUEUE_STATE_MASK 0x0000001FL
+#define CP_HPD_STATUS0__MAPPED_QUEUE_MASK 0x000000E0L
+#define CP_HPD_STATUS0__QUEUE_AVAILABLE_MASK 0x0000FF00L
+#define CP_HPD_STATUS0__FETCHING_MQD_MASK 0x00010000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_PQIB_MASK 0x00020000L
+#define CP_HPD_STATUS0__PEND_TXFER_SIZE_IQ_MASK 0x00040000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_STATE_MASK 0x01F00000L
+#define CP_HPD_STATUS0__MASTER_QUEUE_IDLE_DIS_MASK 0x08000000L
+#define CP_HPD_STATUS0__ENABLE_OFFLOAD_CHECK_MASK 0x30000000L
+#define CP_HPD_STATUS0__FREEZE_QUEUE_STATE_MASK 0x40000000L
+#define CP_HPD_STATUS0__FORCE_QUEUE_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_didtdec
+//DIDT_INDEX_AUTO_INCR_EN
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN__SHIFT 0x0
+#define DIDT_INDEX_AUTO_INCR_EN__DIDT_INDEX_AUTO_INCR_EN_MASK 0x00000001L
+//DIDT_EDC_CTRL
+#define DIDT_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define DIDT_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT 0xa
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xe
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0xf
+#define DIDT_EDC_CTRL__EDC_AVGDIV__SHIFT 0x10
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL__SHIFT 0x14
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS__SHIFT 0x15
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN__SHIFT 0x18
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL__SHIFT 0x19
+#define DIDT_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define DIDT_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define DIDT_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define DIDT_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define DIDT_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK 0x00003C00L
+#define DIDT_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00004000L
+#define DIDT_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00008000L
+#define DIDT_EDC_CTRL__EDC_AVGDIV_MASK 0x000F0000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_SEL_MASK 0x00100000L
+#define DIDT_EDC_CTRL__EDC_THRESHOLD_RSHIFT_BIT_NUMS_MASK 0x00E00000L
+#define DIDT_EDC_CTRL__RLC_FORCE_STALL_EN_MASK 0x01000000L
+#define DIDT_EDC_CTRL__RLC_STALL_LEVEL_SEL_MASK 0x02000000L
+//DIDT_EDC_THROTTLE_CTRL
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN__SHIFT 0x0
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN__SHIFT 0x1
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN__SHIFT 0x2
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN__SHIFT 0x3
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN__SHIFT 0x4
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE__SHIFT 0x5
+#define DIDT_EDC_THROTTLE_CTRL__SQ_STALL_EN_MASK 0x00000001L
+#define DIDT_EDC_THROTTLE_CTRL__DB_STALL_EN_MASK 0x00000002L
+#define DIDT_EDC_THROTTLE_CTRL__TCP_STALL_EN_MASK 0x00000004L
+#define DIDT_EDC_THROTTLE_CTRL__TD_STALL_EN_MASK 0x00000008L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_EN_MASK 0x00000010L
+#define DIDT_EDC_THROTTLE_CTRL__PATTERN_EXTEND_MODE_MASK 0x000000E0L
+//DIDT_EDC_THRESHOLD
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define DIDT_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//DIDT_EDC_STALL_PATTERN_1_2
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_3_4
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_5_6
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_EDC_STALL_PATTERN_7
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_EDC_STATUS
+#define DIDT_EDC_STATUS__EDC_FSM_STATE__SHIFT 0x0
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x1
+#define DIDT_EDC_STATUS__EDC_FSM_STATE_MASK 0x00000001L
+#define DIDT_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x0000000EL
+//DIDT_EDC_DYNAMIC_THRESHOLD_RO
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO__SHIFT 0x0
+#define DIDT_EDC_DYNAMIC_THRESHOLD_RO__EDC_DYNAMIC_THRESHOLD_RO_MASK 0x00000001L
+//DIDT_EDC_OVERFLOW
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define DIDT_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define DIDT_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//DIDT_EDC_ROLLING_POWER_DELTA
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define DIDT_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//DIDT_IND_INDEX
+#define DIDT_IND_INDEX__DIDT_IND_INDEX__SHIFT 0x0
+#define DIDT_IND_INDEX__DIDT_IND_INDEX_MASK 0xFFFFFFFFL
+//DIDT_IND_DATA
+#define DIDT_IND_DATA__DIDT_IND_DATA__SHIFT 0x0
+#define DIDT_IND_DATA__DIDT_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly_spidec
+//SPI_CDBG_SYS_GFX
+#define SPI_CDBG_SYS_GFX__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_GFX__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_GFX__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_GFX__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_GFX__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_GFX__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_GFX__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_GFX__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_HP3D
+#define SPI_CDBG_SYS_HP3D__PS_EN__SHIFT 0x0
+#define SPI_CDBG_SYS_HP3D__GS_EN__SHIFT 0x2
+#define SPI_CDBG_SYS_HP3D__HS_EN__SHIFT 0x4
+#define SPI_CDBG_SYS_HP3D__CS_EN__SHIFT 0x6
+#define SPI_CDBG_SYS_HP3D__PS_EN_MASK 0x0001L
+#define SPI_CDBG_SYS_HP3D__GS_EN_MASK 0x0004L
+#define SPI_CDBG_SYS_HP3D__HS_EN_MASK 0x0010L
+#define SPI_CDBG_SYS_HP3D__CS_EN_MASK 0x0040L
+//SPI_CDBG_SYS_CS0
+#define SPI_CDBG_SYS_CS0__PIPE0__SHIFT 0x0
+#define SPI_CDBG_SYS_CS0__PIPE1__SHIFT 0x8
+#define SPI_CDBG_SYS_CS0__PIPE2__SHIFT 0x10
+#define SPI_CDBG_SYS_CS0__PIPE3__SHIFT 0x18
+#define SPI_CDBG_SYS_CS0__PIPE0_MASK 0x000000FFL
+#define SPI_CDBG_SYS_CS0__PIPE1_MASK 0x0000FF00L
+#define SPI_CDBG_SYS_CS0__PIPE2_MASK 0x00FF0000L
+#define SPI_CDBG_SYS_CS0__PIPE3_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL
+#define SPI_GDBG_WAVE_CNTL__STALL_RA__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH__SHIFT 0x1
+#define SPI_GDBG_WAVE_CNTL__STALL_RA_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL__STALL_LAUNCH_MASK 0x00000002L
+//SPI_GDBG_TRAP_CONFIG
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN__SHIFT 0x0
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN__SHIFT 0x8
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN__SHIFT 0x10
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN__SHIFT 0x18
+#define SPI_GDBG_TRAP_CONFIG__PIPE0_EN_MASK 0x000000FFL
+#define SPI_GDBG_TRAP_CONFIG__PIPE1_EN_MASK 0x0000FF00L
+#define SPI_GDBG_TRAP_CONFIG__PIPE2_EN_MASK 0x00FF0000L
+#define SPI_GDBG_TRAP_CONFIG__PIPE3_EN_MASK 0xFF000000L
+//SPI_GDBG_WAVE_CNTL3
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS__SHIFT 0x0
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS__SHIFT 0x2
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS__SHIFT 0x3
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG__SHIFT 0x4
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0__SHIFT 0x5
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1__SHIFT 0x6
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2__SHIFT 0x7
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3__SHIFT 0x8
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4__SHIFT 0x9
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5__SHIFT 0xa
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6__SHIFT 0xb
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7__SHIFT 0xc
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION__SHIFT 0xd
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT__SHIFT 0x1c
+#define SPI_GDBG_WAVE_CNTL3__STALL_PS_MASK 0x00000001L
+#define SPI_GDBG_WAVE_CNTL3__STALL_GS_MASK 0x00000004L
+#define SPI_GDBG_WAVE_CNTL3__STALL_HS_MASK 0x00000008L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CSG_MASK 0x00000010L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS0_MASK 0x00000020L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS1_MASK 0x00000040L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS2_MASK 0x00000080L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS3_MASK 0x00000100L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS4_MASK 0x00000200L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS5_MASK 0x00000400L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS6_MASK 0x00000800L
+#define SPI_GDBG_WAVE_CNTL3__STALL_CS7_MASK 0x00001000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_DURATION_MASK 0x0FFFE000L
+#define SPI_GDBG_WAVE_CNTL3__STALL_MULT_MASK 0x10000000L
+//SPI_RESET_DEBUG
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET__SHIFT 0x0
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID__SHIFT 0x1
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID__SHIFT 0x2
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE__SHIFT 0x3
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY__SHIFT 0x4
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_MASK 0x01L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PER_VMID_MASK 0x02L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_ALL_VMID_MASK 0x04L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_RESOURCE_MASK 0x08L
+#define SPI_RESET_DEBUG__DISABLE_GFX_RESET_PRIORITY_MASK 0x10L
+//SPI_ARB_CNTL_0
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT__SHIFT 0x0
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT__SHIFT 0x4
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT__SHIFT 0x8
+#define SPI_ARB_CNTL_0__EXP_ARB_COL_WT_MASK 0x0000000FL
+#define SPI_ARB_CNTL_0__EXP_ARB_POS_WT_MASK 0x000000F0L
+#define SPI_ARB_CNTL_0__EXP_ARB_GDS_WT_MASK 0x00000F00L
+//SPI_FEATURE_CTRL
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT__SHIFT 0x0
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE__SHIFT 0x4
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT__SHIFT 0x5
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL__SHIFT 0xb
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL__SHIFT 0xd
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE__SHIFT 0xe
+#define SPI_FEATURE_CTRL__TUNNELING_WAVE_LIMIT_MASK 0x0000000FL
+#define SPI_FEATURE_CTRL__RA_PROBE_IGNORE_MASK 0x00000010L
+#define SPI_FEATURE_CTRL__PS_THROTTLE_MAX_WAVE_LIMIT_MASK 0x000007E0L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_WIF_CTRL_MASK 0x00001800L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_OOO_CTRL_MASK 0x00002000L
+#define SPI_FEATURE_CTRL__RA_PROBE_SKEW_DISABLE_MASK 0x00004000L
+//SPI_SHADER_RSRC_LIMIT_CTRL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32__SHIFT 0x0
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32__SHIFT 0x5
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE__SHIFT 0xc
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT__SHIFT 0xd
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL__SHIFT 0x13
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT__SHIFT 0x14
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL__SHIFT 0x1c
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE__SHIFT 0x1f
+#define SPI_SHADER_RSRC_LIMIT_CTRL__WAVES_PER_SIMD32_MASK 0x0000001FL
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_PER_SIMD32_MASK 0x00000FE0L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__VGPR_WRAP_DISABLE_MASK 0x00001000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_MASK 0x0007E000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__BARRIER_LIMIT_HIERARCHY_LEVEL_MASK 0x00080000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_MASK 0x0FF00000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__LDS_LIMIT_HIERARCHY_LEVEL_MASK 0x10000000L
+#define SPI_SHADER_RSRC_LIMIT_CTRL__PERFORMANCE_LIMIT_ENABLE_MASK 0x80000000L
+//SPI_COMPUTE_WF_CTX_SAVE_STATUS
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY__SHIFT 0x0
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY__SHIFT 0x1
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY__SHIFT 0x2
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY__SHIFT 0x3
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY__SHIFT 0x4
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY__SHIFT 0x5
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY__SHIFT 0x6
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY__SHIFT 0x7
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY__SHIFT 0x8
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY__SHIFT 0x9
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY__SHIFT 0xa
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY__SHIFT 0xb
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY__SHIFT 0xc
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY__SHIFT 0xd
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY__SHIFT 0xe
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY__SHIFT 0xf
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY__SHIFT 0x10
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY__SHIFT 0x11
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY__SHIFT 0x12
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY__SHIFT 0x13
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY__SHIFT 0x14
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY__SHIFT 0x15
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY__SHIFT 0x16
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY__SHIFT 0x17
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY__SHIFT 0x18
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY__SHIFT 0x19
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY__SHIFT 0x1a
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY__SHIFT 0x1b
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY__SHIFT 0x1c
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY__SHIFT 0x1d
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY__SHIFT 0x1e
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY__SHIFT 0x1f
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE0_SAVE_BUSY_MASK 0x00000001L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE1_SAVE_BUSY_MASK 0x00000002L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE2_SAVE_BUSY_MASK 0x00000004L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE3_SAVE_BUSY_MASK 0x00000008L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE4_SAVE_BUSY_MASK 0x00000010L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE5_SAVE_BUSY_MASK 0x00000020L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE6_SAVE_BUSY_MASK 0x00000040L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE0_QUEUE7_SAVE_BUSY_MASK 0x00000080L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE0_SAVE_BUSY_MASK 0x00000100L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE1_SAVE_BUSY_MASK 0x00000200L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE2_SAVE_BUSY_MASK 0x00000400L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE3_SAVE_BUSY_MASK 0x00000800L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE4_SAVE_BUSY_MASK 0x00001000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE5_SAVE_BUSY_MASK 0x00002000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE6_SAVE_BUSY_MASK 0x00004000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE1_QUEUE7_SAVE_BUSY_MASK 0x00008000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE0_SAVE_BUSY_MASK 0x00010000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE1_SAVE_BUSY_MASK 0x00020000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE2_SAVE_BUSY_MASK 0x00040000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE3_SAVE_BUSY_MASK 0x00080000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE4_SAVE_BUSY_MASK 0x00100000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE5_SAVE_BUSY_MASK 0x00200000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE6_SAVE_BUSY_MASK 0x00400000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE2_QUEUE7_SAVE_BUSY_MASK 0x00800000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE0_SAVE_BUSY_MASK 0x01000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE1_SAVE_BUSY_MASK 0x02000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE2_SAVE_BUSY_MASK 0x04000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE3_SAVE_BUSY_MASK 0x08000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE4_SAVE_BUSY_MASK 0x10000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE5_SAVE_BUSY_MASK 0x20000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE6_SAVE_BUSY_MASK 0x40000000L
+#define SPI_COMPUTE_WF_CTX_SAVE_STATUS__PIPE3_QUEUE7_SAVE_BUSY_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_tcpdec
+//TCP_INVALIDATE
+#define TCP_INVALIDATE__START__SHIFT 0x0
+#define TCP_INVALIDATE__START_MASK 0x00000001L
+//TCP_STATUS
+#define TCP_STATUS__TCP_BUSY__SHIFT 0x0
+#define TCP_STATUS__INPUT_BUSY__SHIFT 0x1
+#define TCP_STATUS__ADRS_BUSY__SHIFT 0x2
+#define TCP_STATUS__TAGRAMS_BUSY__SHIFT 0x3
+#define TCP_STATUS__CNTRL_BUSY__SHIFT 0x4
+#define TCP_STATUS__LFIFO_BUSY__SHIFT 0x5
+#define TCP_STATUS__READ_BUSY__SHIFT 0x6
+#define TCP_STATUS__FORMAT_BUSY__SHIFT 0x7
+#define TCP_STATUS__VM_BUSY__SHIFT 0x8
+#define TCP_STATUS__MEMIF_BUSY__SHIFT 0x9
+#define TCP_STATUS__GCR_BUSY__SHIFT 0xa
+#define TCP_STATUS__OFIFO_BUSY__SHIFT 0xb
+#define TCP_STATUS__OFIFO_QUEUE_BUSY__SHIFT 0xc
+#define TCP_STATUS__XNACK_PRT__SHIFT 0xf
+#define TCP_STATUS__TCP_BUSY_MASK 0x00000001L
+#define TCP_STATUS__INPUT_BUSY_MASK 0x00000002L
+#define TCP_STATUS__ADRS_BUSY_MASK 0x00000004L
+#define TCP_STATUS__TAGRAMS_BUSY_MASK 0x00000008L
+#define TCP_STATUS__CNTRL_BUSY_MASK 0x00000010L
+#define TCP_STATUS__LFIFO_BUSY_MASK 0x00000020L
+#define TCP_STATUS__READ_BUSY_MASK 0x00000040L
+#define TCP_STATUS__FORMAT_BUSY_MASK 0x00000080L
+#define TCP_STATUS__VM_BUSY_MASK 0x00000100L
+#define TCP_STATUS__MEMIF_BUSY_MASK 0x00000200L
+#define TCP_STATUS__GCR_BUSY_MASK 0x00000400L
+#define TCP_STATUS__OFIFO_BUSY_MASK 0x00000800L
+#define TCP_STATUS__OFIFO_QUEUE_BUSY_MASK 0x00003000L
+#define TCP_STATUS__XNACK_PRT_MASK 0x00008000L
+//TCP_CNTL
+#define TCP_CNTL__FORCE_HIT__SHIFT 0x0
+#define TCP_CNTL__FORCE_MISS__SHIFT 0x1
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE__SHIFT 0x5
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE__SHIFT 0x6
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64__SHIFT 0x7
+#define TCP_CNTL__DISABLE_WRITE_COMBINING__SHIFT 0x9
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT__SHIFT 0xf
+#define TCP_CNTL__FORCE_EOW_SET_CNT__SHIFT 0x16
+#define TCP_CNTL__DISABLE_Z_MAP__SHIFT 0x1c
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS__SHIFT 0x1d
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT__SHIFT 0x1f
+#define TCP_CNTL__FORCE_HIT_MASK 0x00000001L
+#define TCP_CNTL__FORCE_MISS_MASK 0x00000002L
+#define TCP_CNTL__FLAT_BUF_CACHE_SWIZZLE_MASK 0x00000020L
+#define TCP_CNTL__TD_DATA_EN_OVERRIDE_MASK 0x00000040L
+#define TCP_CNTL__ENABLE_128B_DCC_COMP_READ_FOR_INDEP64_MASK 0x00000080L
+#define TCP_CNTL__DISABLE_WRITE_COMBINING_MASK 0x00000200L
+#define TCP_CNTL__FORCE_EOW_TOTAL_CNT_MASK 0x001F8000L
+#define TCP_CNTL__FORCE_EOW_SET_CNT_MASK 0x07C00000L
+#define TCP_CNTL__DISABLE_Z_MAP_MASK 0x10000000L
+#define TCP_CNTL__FORCE_ORDER_BETWEEN_READ_WRITE_TO_SAME_ADDRESS_MASK 0x20000000L
+#define TCP_CNTL__ASTC_VE_MSB_TOLERANT_MASK 0x80000000L
+//TCP_CNTL2
+#define TCP_CNTL2__LS_DISABLE_CLOCKS__SHIFT 0x0
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE__SHIFT 0x8
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE__SHIFT 0x9
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE__SHIFT 0xa
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE__SHIFT 0xb
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE__SHIFT 0xc
+#define TCP_CNTL2__V64_COMBINE_ENABLE__SHIFT 0xd
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE__SHIFT 0xe
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE__SHIFT 0xf
+#define TCP_CNTL2__POWER_OPT_DISABLE__SHIFT 0x10
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE__SHIFT 0x11
+#define TCP_CNTL2__PERF_EN_OVERRIDE__SHIFT 0x12
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE__SHIFT 0x14
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE__SHIFT 0x15
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE__SHIFT 0x16
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE__SHIFT 0x17
+#define TCP_CNTL2__SPARE_BIT__SHIFT 0x1a
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE__SHIFT 0x1b
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE__SHIFT 0x1d
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE__SHIFT 0x1e
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING__SHIFT 0x1f
+#define TCP_CNTL2__LS_DISABLE_CLOCKS_MASK 0x000000FFL
+#define TCP_CNTL2__TCP_FMT_MGCG_DISABLE_MASK 0x00000100L
+#define TCP_CNTL2__TCPF_LATENCY_BYPASS_DISABLE_MASK 0x00000200L
+#define TCP_CNTL2__TCP_WRITE_DATA_MGCG_DISABLE_MASK 0x00000400L
+#define TCP_CNTL2__TCP_INNER_BLOCK_MGCG_DISABLE_MASK 0x00000800L
+#define TCP_CNTL2__TCP_ADRS_IMG_CALC_MGCG_DISABLE_MASK 0x00001000L
+#define TCP_CNTL2__V64_COMBINE_ENABLE_MASK 0x00002000L
+#define TCP_CNTL2__TAGRAM_ADDR_SWIZZLE_DISABLE_MASK 0x00004000L
+#define TCP_CNTL2__RETURN_ORDER_OVERRIDE_MASK 0x00008000L
+#define TCP_CNTL2__POWER_OPT_DISABLE_MASK 0x00010000L
+#define TCP_CNTL2__GCR_RSP_FGCG_DISABLE_MASK 0x00020000L
+#define TCP_CNTL2__PERF_EN_OVERRIDE_MASK 0x000C0000L
+#define TCP_CNTL2__TC_TD_RAM_CLKEN_DISABLE_MASK 0x00100000L
+#define TCP_CNTL2__TC_TD_DATA_CLKEN_DISABLE_MASK 0x00200000L
+#define TCP_CNTL2__TCP_GL1_REQ_CLKEN_DISABLE_MASK 0x00400000L
+#define TCP_CNTL2__TCP_GL1R_SRC_CLKEN_DISABLE_MASK 0x00800000L
+#define TCP_CNTL2__SPARE_BIT_MASK 0x04000000L
+#define TCP_CNTL2__TAGRAM_XY_BIAS_OVERRIDE_MASK 0x18000000L
+#define TCP_CNTL2__TCP_REQ_MGCG_DISABLE_MASK 0x20000000L
+#define TCP_CNTL2__TCP_MISS_MGCG_DISABLE_MASK 0x40000000L
+#define TCP_CNTL2__DISABLE_MIPMAP_PARAM_CALC_SELF_GATING_MASK 0x80000000L
+//TCP_CREDIT
+#define TCP_CREDIT__LFIFO_RAM_DEPTH__SHIFT 0x0
+#define TCP_CREDIT__GL1_REQ_CREDIT__SHIFT 0xa
+#define TCP_CREDIT__REQ_FIFO_CREDIT__SHIFT 0x10
+#define TCP_CREDIT__TD_RAM_CREDIT__SHIFT 0x17
+#define TCP_CREDIT__TD_DATA_CREDIT__SHIFT 0x1d
+#define TCP_CREDIT__LFIFO_RAM_DEPTH_MASK 0x000003FFL
+#define TCP_CREDIT__GL1_REQ_CREDIT_MASK 0x0000FC00L
+#define TCP_CREDIT__REQ_FIFO_CREDIT_MASK 0x007F0000L
+#define TCP_CREDIT__TD_RAM_CREDIT_MASK 0x0F800000L
+#define TCP_CREDIT__TD_DATA_CREDIT_MASK 0xE0000000L
+
+
+// addressBlock: gc_pfonly_gdsdec
+//GDS_ENHANCE2
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT__SHIFT 0x0
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE__SHIFT 0x1
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT__SHIFT 0x2
+#define GDS_ENHANCE2__UNUSED__SHIFT 0x3
+#define GDS_ENHANCE2__DISABLE_MEMORY_VIOLATION_REPORT_MASK 0x00000001L
+#define GDS_ENHANCE2__GDS_INTERFACES_FGCG_OVERRIDE_MASK 0x00000002L
+#define GDS_ENHANCE2__DISABLE_PIPE_MEMORY_RD_OPT_MASK 0x00000004L
+#define GDS_ENHANCE2__UNUSED_MASK 0xFFFFFFF8L
+//GDS_OA_CGPG_RESTORE
+#define GDS_OA_CGPG_RESTORE__VMID__SHIFT 0x0
+#define GDS_OA_CGPG_RESTORE__MEID__SHIFT 0x8
+#define GDS_OA_CGPG_RESTORE__PIPEID__SHIFT 0xc
+#define GDS_OA_CGPG_RESTORE__QUEUEID__SHIFT 0x10
+#define GDS_OA_CGPG_RESTORE__UNUSED__SHIFT 0x14
+#define GDS_OA_CGPG_RESTORE__VMID_MASK 0x000000FFL
+#define GDS_OA_CGPG_RESTORE__MEID_MASK 0x00000F00L
+#define GDS_OA_CGPG_RESTORE__PIPEID_MASK 0x0000F000L
+#define GDS_OA_CGPG_RESTORE__QUEUEID_MASK 0x000F0000L
+#define GDS_OA_CGPG_RESTORE__UNUSED_MASK 0xFFF00000L
+
+
+// addressBlock: gc_pfonly_utcl1dec
+//UTCL1_CTRL_0
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE__SHIFT 0x0
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE__SHIFT 0x1
+#define UTCL1_CTRL_0__RESERVED_0__SHIFT 0x2
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS__SHIFT 0x3
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS__SHIFT 0x9
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE__SHIFT 0xd
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE__SHIFT 0xe
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE__SHIFT 0xf
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID__SHIFT 0x10
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL__SHIFT 0x11
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE__SHIFT 0x12
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE__SHIFT 0x13
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE__SHIFT 0x14
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE__SHIFT 0x15
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES__SHIFT 0x16
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING__SHIFT 0x17
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER__SHIFT 0x18
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL__SHIFT 0x19
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE__SHIFT 0x1b
+#define UTCL1_CTRL_0__RESERVED_1__SHIFT 0x1d
+#define UTCL1_CTRL_0__MH_SPARE0__SHIFT 0x1e
+#define UTCL1_CTRL_0__RESERVED_2__SHIFT 0x1f
+#define UTCL1_CTRL_0__UTCL1_L0_REQ_VFIFO_DISABLE_MASK 0x00000001L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_INVACK_CDC_FIFO_DISABLE_MASK 0x00000002L
+#define UTCL1_CTRL_0__RESERVED_0_MASK 0x00000004L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_REQ_CREDITS_MASK 0x000001F8L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_CREDITS_MASK 0x00001E00L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_INV_TO_ONE_MASK 0x00002000L
+#define UTCL1_CTRL_0__UTCL1_LIMIT_XLAT_TO_ONE_MASK 0x00004000L
+#define UTCL1_CTRL_0__UTCL1_UTCL2_FGCG_REPEATERS_OVERRIDE_MASK 0x00008000L
+#define UTCL1_CTRL_0__UTCL1_INV_FILTER_VMID_MASK 0x00010000L
+#define UTCL1_CTRL_0__UTCL1_RANGE_INV_FORCE_CHK_ALL_MASK 0x00020000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_RET_FGCG_REPEATERS_OVERRIDE_MASK 0x00040000L
+#define UTCL1_CTRL_0__UTCL1_UTCL0_INVREQ_FGCG_REPEATERS_OVERRIDE_MASK 0x00080000L
+#define UTCL1_CTRL_0__GCRD_FGCG_DISABLE_MASK 0x00100000L
+#define UTCL1_CTRL_0__UTCL1_MH_RANGE_INV_TO_VMID_OVERRIDE_MASK 0x00200000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_DUPLICATES_MASK 0x00400000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_REQUEST_SQUASHING_MASK 0x00800000L
+#define UTCL1_CTRL_0__UTCL1_MH_DISABLE_RECENT_BUFFER_MASK 0x01000000L
+#define UTCL1_CTRL_0__UTCL1_XLAT_FAULT_LOCK_CTRL_MASK 0x06000000L
+#define UTCL1_CTRL_0__UTCL1_REDUCE_CC_SIZE_MASK 0x18000000L
+#define UTCL1_CTRL_0__RESERVED_1_MASK 0x20000000L
+#define UTCL1_CTRL_0__MH_SPARE0_MASK 0x40000000L
+#define UTCL1_CTRL_0__RESERVED_2_MASK 0x80000000L
+//UTCL1_UTCL0_INVREQ_DISABLE
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE__SHIFT 0x0
+#define UTCL1_UTCL0_INVREQ_DISABLE__UTCL1_UTCL0_INVREQ_DISABLE_MASK 0xFFFFFFFFL
+//UTCL1_CTRL_2
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD__SHIFT 0x0
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x4
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM__SHIFT 0xa
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE__SHIFT 0xb
+#define UTCL1_CTRL_2__UTCL1_SPARE0__SHIFT 0xc
+#define UTCL1_CTRL_2__UTCL1_SPARE1__SHIFT 0xd
+#define UTCL1_CTRL_2__RESERVED__SHIFT 0xe
+#define UTCL1_CTRL_2__UTCL1_RNG_TO_VMID_INV_OVRD_MASK 0x0000000FL
+#define UTCL1_CTRL_2__UTCL1_PMM_INTERRUPT_CREDITS_OVERRIDE_MASK 0x000003F0L
+#define UTCL1_CTRL_2__UTCL1_CACHE_WRITE_PERM_MASK 0x00000400L
+#define UTCL1_CTRL_2__UTCL1_PAGE_OVRD_DISABLE_MASK 0x00000800L
+#define UTCL1_CTRL_2__UTCL1_SPARE0_MASK 0x00001000L
+#define UTCL1_CTRL_2__UTCL1_SPARE1_MASK 0x00002000L
+#define UTCL1_CTRL_2__RESERVED_MASK 0xFFFFC000L
+//UTCL1_FIFO_SIZING
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH__SHIFT 0x0
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW__SHIFT 0x3
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH__SHIFT 0x10
+#define UTCL1_FIFO_SIZING__UTCL1_UTCL2_INVACK_CDC_FIFO_THRESH_MASK 0x00000007L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_LOW_MASK 0x0000FFF8L
+#define UTCL1_FIFO_SIZING__UTCL1_GENERAL_SIZING_CTRL_HIGH_MASK 0xFFFF0000L
+//GCRD_SA0_TARGETS_DISABLE
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA0_TARGETS_DISABLE__GCRD_SA0_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_SA1_TARGETS_DISABLE
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE__SHIFT 0x0
+#define GCRD_SA1_TARGETS_DISABLE__GCRD_SA1_TARGETS_DISABLE_MASK 0x0007FFFFL
+//GCRD_CREDIT_SAFE
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG__SHIFT 0x0
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG__SHIFT 0x4
+#define GCRD_CREDIT_SAFE__GCRD_CHAIN_CREDIT_SAFE_REG_MASK 0x00000007L
+#define GCRD_CREDIT_SAFE__GCRD_TARGET_CREDIT_SAFE_REG_MASK 0x00000070L
+
+
+// addressBlock: gc_pfonly_pmmdec
+//GCR_GENERAL_CNTL
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP__SHIFT 0x0
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ__SHIFT 0x1
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ__SHIFT 0x2
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL__SHIFT 0x3
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL__SHIFT 0x4
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE__SHIFT 0x6
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE__SHIFT 0x7
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE__SHIFT 0x8
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ__SHIFT 0x9
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM__SHIFT 0xa
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS__SHIFT 0xd
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS__SHIFT 0xe
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ__SHIFT 0xf
+#define GCR_GENERAL_CNTL__DISABLE_FGCG__SHIFT 0x10
+#define GCR_GENERAL_CNTL__CLIENT_ID__SHIFT 0x14
+#define GCR_GENERAL_CNTL__FORCE_4K_L2_RESP_MASK 0x00000001L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_MAIN_WQ_MASK 0x00000002L
+#define GCR_GENERAL_CNTL__REDUCE_HALF_PHY_WQ_MASK 0x00000004L
+#define GCR_GENERAL_CNTL__FORCE_INV_ALL_MASK 0x00000008L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_CNTL_MASK 0x00000030L
+#define GCR_GENERAL_CNTL__HI_PRIORITY_DISABLE_MASK 0x00000040L
+#define GCR_GENERAL_CNTL__BIG_PAGE_FILTER_DISABLE_MASK 0x00000080L
+#define GCR_GENERAL_CNTL__PERF_CNTR_ENABLE_MASK 0x00000100L
+#define GCR_GENERAL_CNTL__FORCE_SINGLE_WQ_MASK 0x00000200L
+#define GCR_GENERAL_CNTL__UTCL2_REQ_PERM_MASK 0x00001C00L
+#define GCR_GENERAL_CNTL__TARGET_MGCG_CLKEN_DIS_MASK 0x00002000L
+#define GCR_GENERAL_CNTL__MIXED_RANGE_MODE_DIS_MASK 0x00004000L
+#define GCR_GENERAL_CNTL__ENABLE_16K_UTCL2_REQ_MASK 0x00008000L
+#define GCR_GENERAL_CNTL__DISABLE_FGCG_MASK 0x00010000L
+#define GCR_GENERAL_CNTL__CLIENT_ID_MASK 0x1FF00000L
+//GCR_TARGET_DISABLE
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY__SHIFT 0x0
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT__SHIFT 0x1
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY__SHIFT 0x2
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT__SHIFT 0x3
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY__SHIFT 0x4
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT__SHIFT 0x5
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY__SHIFT 0x6
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY__SHIFT 0x7
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY__SHIFT 0x8
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY__SHIFT 0x9
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY__SHIFT 0xa
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT__SHIFT 0xb
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY__SHIFT 0xc
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT__SHIFT 0xd
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY__SHIFT 0xe
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT__SHIFT 0xf
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS__SHIFT 0x10
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS__SHIFT 0x11
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS__SHIFT 0x12
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS__SHIFT 0x13
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS__SHIFT 0x14
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS__SHIFT 0x15
+#define GCR_TARGET_DISABLE__DISABLE_SE0_PHY_MASK 0x00000001L
+#define GCR_TARGET_DISABLE__DISABLE_SE0_VIRT_MASK 0x00000002L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_PHY_MASK 0x00000004L
+#define GCR_TARGET_DISABLE__DISABLE_SE1_VIRT_MASK 0x00000008L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_PHY_MASK 0x00000010L
+#define GCR_TARGET_DISABLE__DISABLE_SE2_VIRT_MASK 0x00000020L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A0_PHY_MASK 0x00000040L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A1_PHY_MASK 0x00000080L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A2_PHY_MASK 0x00000100L
+#define GCR_TARGET_DISABLE__DISABLE_GL2A3_PHY_MASK 0x00000200L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_PHY_MASK 0x00000400L
+#define GCR_TARGET_DISABLE__DISABLE_SE3_VIRT_MASK 0x00000800L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_PHY_MASK 0x00001000L
+#define GCR_TARGET_DISABLE__DISABLE_SE4_VIRT_MASK 0x00002000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_PHY_MASK 0x00004000L
+#define GCR_TARGET_DISABLE__DISABLE_SE5_VIRT_MASK 0x00008000L
+#define GCR_TARGET_DISABLE__SE0_INACTIVE_STATUS_MASK 0x00010000L
+#define GCR_TARGET_DISABLE__SE1_INACTIVE_STATUS_MASK 0x00020000L
+#define GCR_TARGET_DISABLE__SE2_INACTIVE_STATUS_MASK 0x00040000L
+#define GCR_TARGET_DISABLE__SE3_INACTIVE_STATUS_MASK 0x00080000L
+#define GCR_TARGET_DISABLE__SE4_INACTIVE_STATUS_MASK 0x00100000L
+#define GCR_TARGET_DISABLE__SE5_INACTIVE_STATUS_MASK 0x00200000L
+//GCR_CMD_STATUS
+#define GCR_CMD_STATUS__GCR_CONTROL__SHIFT 0x0
+#define GCR_CMD_STATUS__GCR_SRC__SHIFT 0x13
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN__SHIFT 0x17
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID__SHIFT 0x18
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS__SHIFT 0x1c
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR__SHIFT 0x1e
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR__SHIFT 0x1f
+#define GCR_CMD_STATUS__GCR_CONTROL_MASK 0x0007FFFFL
+#define GCR_CMD_STATUS__GCR_SRC_MASK 0x00380000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_MASK 0x00800000L
+#define GCR_CMD_STATUS__GCR_TLB_SHOOTDOWN_VMID_MASK 0x0F000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_STATUS_MASK 0x30000000L
+#define GCR_CMD_STATUS__GCR_SEQ_OP_ERROR_MASK 0x40000000L
+#define GCR_CMD_STATUS__UTCL2_NACK_ERROR_MASK 0x80000000L
+//GCR_SPARE
+#define GCR_SPARE__SPARE_BIT_1__SHIFT 0x1
+#define GCR_SPARE__SPARE_BIT_2__SHIFT 0x2
+#define GCR_SPARE__SPARE_BIT_3__SHIFT 0x3
+#define GCR_SPARE__SPARE_BIT_4__SHIFT 0x4
+#define GCR_SPARE__SPARE_BIT_5__SHIFT 0x5
+#define GCR_SPARE__SPARE_BIT_6__SHIFT 0x6
+#define GCR_SPARE__SPARE_BIT_7__SHIFT 0x7
+#define GCR_SPARE__UTCL2_REQ_CREDIT__SHIFT 0x8
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT__SHIFT 0x10
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT__SHIFT 0x14
+#define GCR_SPARE__SPARE_BIT_31_24__SHIFT 0x18
+#define GCR_SPARE__SPARE_BIT_1_MASK 0x00000002L
+#define GCR_SPARE__SPARE_BIT_2_MASK 0x00000004L
+#define GCR_SPARE__SPARE_BIT_3_MASK 0x00000008L
+#define GCR_SPARE__SPARE_BIT_4_MASK 0x00000010L
+#define GCR_SPARE__SPARE_BIT_5_MASK 0x00000020L
+#define GCR_SPARE__SPARE_BIT_6_MASK 0x00000040L
+#define GCR_SPARE__SPARE_BIT_7_MASK 0x00000080L
+#define GCR_SPARE__UTCL2_REQ_CREDIT_MASK 0x0000FF00L
+#define GCR_SPARE__GCRD_GL2A_REQ_CREDIT_MASK 0x000F0000L
+#define GCR_SPARE__GCRD_SE_REQ_CREDIT_MASK 0x00F00000L
+#define GCR_SPARE__SPARE_BIT_31_24_MASK 0xFF000000L
+//PMM_CNTL2
+#define PMM_CNTL2__GCEA_MAM_DISABLE__SHIFT 0x0
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE__SHIFT 0x18
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE__SHIFT 0x19
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE__SHIFT 0x1a
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE__SHIFT 0x1e
+#define PMM_CNTL2__RESERVED__SHIFT 0x1f
+#define PMM_CNTL2__GCEA_MAM_DISABLE_MASK 0x00FFFFFFL
+#define PMM_CNTL2__ABIT_FORCE_FLUSH_OVERRIDE_MASK 0x01000000L
+#define PMM_CNTL2__ABIT_TIMER_FLUSH_OVERRIDE_MASK 0x02000000L
+#define PMM_CNTL2__PMM_IH_INTERRUPT_CREDITS_OVERRIDE_MASK 0x3C000000L
+#define PMM_CNTL2__ABIT_INTR_ON_FLUSH_DONE_MASK 0x40000000L
+#define PMM_CNTL2__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_pfonly_gccacdec
+//GC_CAC_CTRL_1
+#define GC_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define GC_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define GC_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define GC_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//GC_CAC_CTRL_2
+#define GC_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE__SHIFT 0x1
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x2
+#define GC_CAC_CTRL_2__TOGGLE_EN__SHIFT 0x3
+#define GC_CAC_CTRL_2__INTR_EN__SHIFT 0x4
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL__SHIFT 0x5
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN__SHIFT 0x6
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN__SHIFT 0xe
+#define GC_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define GC_CAC_CTRL_2__GC_LCAC_ENABLE_MASK 0x00000002L
+#define GC_CAC_CTRL_2__GC_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000004L
+#define GC_CAC_CTRL_2__TOGGLE_EN_MASK 0x00000008L
+#define GC_CAC_CTRL_2__INTR_EN_MASK 0x00000010L
+#define GC_CAC_CTRL_2__CAC_COUNTER_SNAP_SEL_MASK 0x00000020L
+#define GC_CAC_CTRL_2__SE_AGGR_ACC_EN_MASK 0x00003FC0L
+#define GC_CAC_CTRL_2__GC_AGGR_ACC_EN_MASK 0x00004000L
+//GC_CAC_AGGR_LOWER
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0__SHIFT 0x0
+#define GC_CAC_AGGR_LOWER__GC_AGGR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_UPPER
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32__SHIFT 0x0
+#define GC_CAC_AGGR_UPPER__GC_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_LOWER
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0__SHIFT 0x0
+#define SE0_CAC_AGGR_LOWER__SE0_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_UPPER
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32__SHIFT 0x0
+#define SE0_CAC_AGGR_UPPER__SE0_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_LOWER
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0__SHIFT 0x0
+#define SE1_CAC_AGGR_LOWER__SE1_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_UPPER
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32__SHIFT 0x0
+#define SE1_CAC_AGGR_UPPER__SE1_AGGR_63_32_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_LOWER
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0__SHIFT 0x0
+#define SE2_CAC_AGGR_LOWER__SE2_AGGR_31_0_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_UPPER
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32__SHIFT 0x0
+#define SE2_CAC_AGGR_UPPER__SE2_AGGR_63_32_MASK 0xFFFFFFFFL
+//GC_CAC_AGGR_GFXCLK_CYCLE
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define GC_CAC_AGGR_GFXCLK_CYCLE__GC_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE0_CAC_AGGR_GFXCLK_CYCLE
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE0_CAC_AGGR_GFXCLK_CYCLE__SE0_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE1_CAC_AGGR_GFXCLK_CYCLE
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE1_CAC_AGGR_GFXCLK_CYCLE__SE1_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//SE2_CAC_AGGR_GFXCLK_CYCLE
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE__SHIFT 0x0
+#define SE2_CAC_AGGR_GFXCLK_CYCLE__SE2_AGGR_GFXCLK_CYCLE_MASK 0xFFFFFFFFL
+//GC_EDC_CTRL
+#define GC_EDC_CTRL__EDC_EN__SHIFT 0x0
+#define GC_EDC_CTRL__EDC_SW_RST__SHIFT 0x1
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT 0x2
+#define GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT 0x3
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT 0x4
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT 0xa
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0xb
+#define GC_EDC_CTRL__EDC_LEVEL_SEL__SHIFT 0xf
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE__SHIFT 0x10
+#define GC_EDC_CTRL__EDC_AVGDIV__SHIFT 0x11
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL__SHIFT 0x15
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK__SHIFT 0x18
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK__SHIFT 0x19
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK__SHIFT 0x1a
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK__SHIFT 0x1b
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS__SHIFT 0x1c
+#define GC_EDC_CTRL__EDC_EN_MASK 0x00000001L
+#define GC_EDC_CTRL__EDC_SW_RST_MASK 0x00000002L
+#define GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define GC_EDC_CTRL__EDC_FORCE_STALL_MASK 0x00000008L
+#define GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK 0x000003F0L
+#define GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK 0x00000400L
+#define GC_EDC_CTRL__EDC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00007800L
+#define GC_EDC_CTRL__EDC_LEVEL_SEL_MASK 0x00008000L
+#define GC_EDC_CTRL__EDC_ALGORITHM_MODE_MASK 0x00010000L
+#define GC_EDC_CTRL__EDC_AVGDIV_MASK 0x001E0000L
+#define GC_EDC_CTRL__PSM_THROTTLE_SRC_SEL_MASK 0x00E00000L
+#define GC_EDC_CTRL__THROTTLE_SRC0_MASK_MASK 0x01000000L
+#define GC_EDC_CTRL__THROTTLE_SRC1_MASK_MASK 0x02000000L
+#define GC_EDC_CTRL__THROTTLE_SRC2_MASK_MASK 0x04000000L
+#define GC_EDC_CTRL__THROTTLE_SRC3_MASK_MASK 0x08000000L
+#define GC_EDC_CTRL__EDC_CREDIT_SHIFT_BIT_NUMS_MASK 0xF0000000L
+//GC_EDC_THRESHOLD
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT 0x0
+#define GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK 0xFFFFFFFFL
+//GC_EDC_STRETCH_CTRL
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN__SHIFT 0x0
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY__SHIFT 0x1
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY__SHIFT 0xa
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_EN_MASK 0x00000001L
+#define GC_EDC_STRETCH_CTRL__EDC_STRETCH_DELAY_MASK 0x000003FEL
+#define GC_EDC_STRETCH_CTRL__EDC_UNSTRETCH_DELAY_MASK 0x0007FC00L
+//GC_EDC_STRETCH_THRESHOLD
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD__SHIFT 0x0
+#define GC_EDC_STRETCH_THRESHOLD__EDC_STRETCH_THRESHOLD_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_CNTL
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER__SHIFT 0x8
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN__SHIFT 0x10
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE__SHIFT 0x11
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE__SHIFT 0x14
+#define EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_TIMER_MASK 0x0000FF00L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_EN_MASK 0x00010000L
+#define EDC_HYSTERESIS_CNTL__PATTERN_EXTEND_MODE_MASK 0x000E0000L
+#define EDC_HYSTERESIS_CNTL__EDC_AGGR_MODE_MASK 0x00100000L
+//GC_THROTTLE_CTRL
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST__SHIFT 0x0
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN__SHIFT 0x1
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT 0x2
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL__SHIFT 0x3
+#define GC_THROTTLE_CTRL__PCC_STALL_EN__SHIFT 0x4
+#define GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT 0x5
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE__SHIFT 0x6
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE__SHIFT 0x7
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE__SHIFT 0x8
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE__SHIFT 0x9
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN__SHIFT 0xa
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN__SHIFT 0xb
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN__SHIFT 0xc
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT 0xd
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN__SHIFT 0x17
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX__SHIFT 0x18
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE__SHIFT 0x1d
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE__SHIFT 0x1e
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL__SHIFT 0x1f
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_SW_RST_MASK 0x00000001L
+#define GC_THROTTLE_CTRL__GC_EDC_STALL_EN_MASK 0x00000002L
+#define GC_THROTTLE_CTRL__PWRBRK_STALL_EN_MASK 0x00000004L
+#define GC_THROTTLE_CTRL__PWRBRK_POLARITY_CNTL_MASK 0x00000008L
+#define GC_THROTTLE_CTRL__PCC_STALL_EN_MASK 0x00000010L
+#define GC_THROTTLE_CTRL__PATTERN_MODE_MASK 0x00000020L
+#define GC_THROTTLE_CTRL__GC_EDC_ONLY_MODE_MASK 0x00000040L
+#define GC_THROTTLE_CTRL__GC_EDC_OVERRIDE_MASK 0x00000080L
+#define GC_THROTTLE_CTRL__PCC_OVERRIDE_MASK 0x00000100L
+#define GC_THROTTLE_CTRL__PWRBRK_OVERRIDE_MASK 0x00000200L
+#define GC_THROTTLE_CTRL__GC_EDC_PERF_COUNTER_EN_MASK 0x00000400L
+#define GC_THROTTLE_CTRL__PCC_PERF_COUNTER_EN_MASK 0x00000800L
+#define GC_THROTTLE_CTRL__PWRBRK_PERF_COUNTER_EN_MASK 0x00001000L
+#define GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL_MASK 0x007FE000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_PERF_COUNTER_EN_MASK 0x00800000L
+#define GC_THROTTLE_CTRL__FIXED_PATTERN_LOG_INDEX_MASK 0x1F000000L
+#define GC_THROTTLE_CTRL__LUT_HW_UPDATE_MASK 0x20000000L
+#define GC_THROTTLE_CTRL__THROTTLE_CTRL_CLK_EN_OVERRIDE_MASK 0x40000000L
+#define GC_THROTTLE_CTRL__PCC_POLARITY_CNTL_MASK 0x80000000L
+//GC_THROTTLE_CTRL1
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN__SHIFT 0x0
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP__SHIFT 0x1
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP__SHIFT 0x5
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0xa
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN__SHIFT 0xd
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP__SHIFT 0xe
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP__SHIFT 0x12
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE__SHIFT 0x17
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT__SHIFT 0x1a
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN__SHIFT 0x1e
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN__SHIFT 0x1f
+#define GC_THROTTLE_CTRL1__PCC_FP_PROGRAM_STEP_EN_MASK 0x00000001L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MIN_STEP_MASK 0x0000001EL
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_MAX_STEP_MASK 0x000003E0L
+#define GC_THROTTLE_CTRL1__PCC_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x00001C00L
+#define GC_THROTTLE_CTRL1__PWRBRK_FP_PROGRAM_STEP_EN_MASK 0x00002000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MIN_STEP_MASK 0x0003C000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_MAX_STEP_MASK 0x007C0000L
+#define GC_THROTTLE_CTRL1__PWRBRK_PROGRAM_UPWARDS_STEP_SIZE_MASK 0x03800000L
+#define GC_THROTTLE_CTRL1__FIXED_PATTERN_SELECT_MASK 0x0C000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_STRETCH_PERF_COUNTER_EN_MASK 0x40000000L
+#define GC_THROTTLE_CTRL1__GC_EDC_UNSTRETCH_PERF_COUNTER_EN_MASK 0x80000000L
+//PCC_STALL_PATTERN_CTRL
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL__SHIFT 0x0
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP__SHIFT 0xa
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP__SHIFT 0xf
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR__SHIFT 0x18
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR__SHIFT 0x19
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE__SHIFT 0x1a
+#define PCC_STALL_PATTERN_CTRL__PCC_STEP_INTERVAL_MASK 0x000003FFL
+#define PCC_STALL_PATTERN_CTRL__PCC_BEGIN_STEP_MASK 0x00007C00L
+#define PCC_STALL_PATTERN_CTRL__PCC_END_STEP_MASK 0x000F8000L
+#define PCC_STALL_PATTERN_CTRL__PCC_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_INCR_MASK 0x01000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_INST_THROT_DECR_MASK 0x02000000L
+#define PCC_STALL_PATTERN_CTRL__PCC_DITHER_MODE_MASK 0x04000000L
+//PWRBRK_STALL_PATTERN_CTRL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT 0xa
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT 0xf
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT 0x14
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL_MASK 0x000003FFL
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP_MASK 0x00007C00L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP_MASK 0x000F8000L
+#define PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS_MASK 0x00F00000L
+//PCC_STALL_PATTERN_1_2
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1__SHIFT 0x0
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2__SHIFT 0x10
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_1_2__PCC_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_3_4
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3__SHIFT 0x0
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4__SHIFT 0x10
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_3_4__PCC_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_5_6
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5__SHIFT 0x0
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6__SHIFT 0x10
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PCC_STALL_PATTERN_5_6__PCC_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PCC_STALL_PATTERN_7
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7__SHIFT 0x0
+#define PCC_STALL_PATTERN_7__PCC_STALL_PATTERN_7_MASK 0x00007FFFL
+//PWRBRK_STALL_PATTERN_1_2
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_1_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_1_2__PWRBRK_STALL_PATTERN_2_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_3_4
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_3_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_3_4__PWRBRK_STALL_PATTERN_4_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_5_6
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6__SHIFT 0x10
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_5_MASK 0x00007FFFL
+#define PWRBRK_STALL_PATTERN_5_6__PWRBRK_STALL_PATTERN_6_MASK 0x7FFF0000L
+//PWRBRK_STALL_PATTERN_7
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7__SHIFT 0x0
+#define PWRBRK_STALL_PATTERN_7__PWRBRK_STALL_PATTERN_7_MASK 0x00007FFFL
+//DIDT_STALL_PATTERN_CTRL
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN__SHIFT 0x0
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST__SHIFT 0x1
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE__SHIFT 0x2
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT 0x3
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN__SHIFT 0x7
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE__SHIFT 0x8
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CTRL_EN_MASK 0x00000001L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_SW_RST_MASK 0x00000002L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_DROOP_CLK_EN_OVERRIDE_MASK 0x00000004L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_STALL_PATTERN_BIT_NUMS_MASK 0x00000078L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_EN_MASK 0x00000080L
+#define DIDT_STALL_PATTERN_CTRL__DIDT_PATTERN_EXTEND_MODE_MASK 0x00000700L
+//DIDT_STALL_PATTERN_1_2
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT 0x0
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT 0x10
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_3_4
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT 0x0
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT 0x10
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_5_6
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT 0x0
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT 0x10
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK 0x00007FFFL
+#define DIDT_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK 0x7FFF0000L
+//DIDT_STALL_PATTERN_7
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT 0x0
+#define DIDT_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK 0x00007FFFL
+//PCC_PWRBRK_HYSTERESIS_CTRL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS__SHIFT 0x0
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS__SHIFT 0x8
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PCC_MAX_HYSTERESIS_MASK 0x000000FFL
+#define PCC_PWRBRK_HYSTERESIS_CTRL__PWRBRK_MAX_HYSTERESIS_MASK 0x0000FF00L
+//EDC_STRETCH_PERF_COUNTER
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_PERF_COUNTER__STRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_UNSTRETCH_PERF_COUNTER
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER__SHIFT 0x0
+#define EDC_UNSTRETCH_PERF_COUNTER__UNSTRETCH_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_STRETCH_NUM_PERF_COUNTER
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER__SHIFT 0x0
+#define EDC_STRETCH_NUM_PERF_COUNTER__STRETCH_NUM_PERF_COUNTER_MASK 0xFFFFFFFFL
+//GC_EDC_STATUS
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL__SHIFT 0x0
+#define GC_EDC_STATUS__GPIO_IN_0__SHIFT 0x3
+#define GC_EDC_STATUS__GPIO_IN_1__SHIFT 0x4
+#define GC_EDC_STATUS__EDC_THROTTLE_LEVEL_MASK 0x00000007L
+#define GC_EDC_STATUS__GPIO_IN_0_MASK 0x00000008L
+#define GC_EDC_STATUS__GPIO_IN_1_MASK 0x00000010L
+//GC_EDC_OVERFLOW
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW__SHIFT 0x0
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER__SHIFT 0x1
+#define GC_EDC_OVERFLOW__EDC_ROLLING_POWER_DELTA_OVERFLOW_MASK 0x00000001L
+#define GC_EDC_OVERFLOW__EDC_THROTTLE_LEVEL_OVERFLOW_COUNTER_MASK 0x0001FFFEL
+//GC_EDC_ROLLING_POWER_DELTA
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA__SHIFT 0x0
+#define GC_EDC_ROLLING_POWER_DELTA__EDC_ROLLING_POWER_DELTA_MASK 0xFFFFFFFFL
+//GC_THROTTLE_STATUS
+#define GC_THROTTLE_STATUS__FSM_STATE__SHIFT 0x0
+#define GC_THROTTLE_STATUS__PATTERN_INDEX__SHIFT 0x4
+#define GC_THROTTLE_STATUS__FSM_STATE_MASK 0x0000000FL
+#define GC_THROTTLE_STATUS__PATTERN_INDEX_MASK 0x000001F0L
+//EDC_PERF_COUNTER
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER__SHIFT 0x0
+#define EDC_PERF_COUNTER__EDC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PCC_PERF_COUNTER
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER__SHIFT 0x0
+#define PCC_PERF_COUNTER__PCC_PERF_COUNTER_MASK 0xFFFFFFFFL
+//PWRBRK_PERF_COUNTER
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER__SHIFT 0x0
+#define PWRBRK_PERF_COUNTER__PWRBRK_PERF_COUNTER_MASK 0xFFFFFFFFL
+//EDC_HYSTERESIS_STAT
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define EDC_HYSTERESIS_STAT__EDC_STATUS__SHIFT 0x8
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW__SHIFT 0x9
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL__SHIFT 0xa
+#define EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define EDC_HYSTERESIS_STAT__EDC_STATUS_MASK 0x00000100L
+#define EDC_HYSTERESIS_STAT__EDC_CREDIT_INCR_OVERFLOW_MASK 0x00000200L
+#define EDC_HYSTERESIS_STAT__EDC_THRESHOLD_SEL_MASK 0x00000400L
+//GC_CAC_WEIGHT_CP_0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CP_0__WEIGHT_CP_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CP_1
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CP_1__WEIGHT_CP_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_EA_0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_0__WEIGHT_EA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_1
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_1__WEIGHT_EA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_EA_2
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_EA_2__WEIGHT_EA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_0__WEIGHT_UTCL2_ROUTER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_1
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_1__WEIGHT_UTCL2_ROUTER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_2
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_2__WEIGHT_UTCL2_ROUTER_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_3
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_3__WEIGHT_UTCL2_ROUTER_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_ROUTER_4
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_ROUTER_4__WEIGHT_UTCL2_ROUTER_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_0__WEIGHT_UTCL2_VML2_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_1
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_VML2_1__WEIGHT_UTCL2_VML2_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_VML2_2
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_VML2_2__WEIGHT_UTCL2_VML2_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_UTCL2_WALKER_0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_0__WEIGHT_UTCL2_WALKER_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_1
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_UTCL2_WALKER_1__WEIGHT_UTCL2_WALKER_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_UTCL2_WALKER_2
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_UTCL2_WALKER_2__WEIGHT_UTCL2_WALKER_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GDS_0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_0__WEIGHT_GDS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_1
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GDS_1__WEIGHT_GDS_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GDS_2
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GDS_2__WEIGHT_GDS_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GE_0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_0__WEIGHT_GE_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_1
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_1__WEIGHT_GE_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_2
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GE_2__WEIGHT_GE_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GE_3
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_GE_3__WEIGHT_GE_SIG6_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PMM_0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PMM_0__WEIGHT_PMM_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GL2C_0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_0__WEIGHT_GL2C_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_1
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GL2C_1__WEIGHT_GL2C_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GL2C_2
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_GL2C_2__WEIGHT_GL2C_SIG4_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_PH_0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_0__WEIGHT_PH_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_1
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_1__WEIGHT_PH_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_2
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_2__WEIGHT_PH_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_PH_3
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_PH_3__WEIGHT_PH_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_0__WEIGHT_SDMA_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_1
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG2_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_1__WEIGHT_SDMA_SIG3_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_2
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG4_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_2__WEIGHT_SDMA_SIG5_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_3
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG6_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_3__WEIGHT_SDMA_SIG7_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_4
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG8_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_4__WEIGHT_SDMA_SIG9_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_SDMA_5
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10__SHIFT 0x0
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11__SHIFT 0x10
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG10_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_SDMA_5__WEIGHT_SDMA_SIG11_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_CHC_0__WEIGHT_CHC_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_CHC_1
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_CHC_1__WEIGHT_CHC_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GUS_0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GUS_0__WEIGHT_GUS_SIG1_MASK 0xFFFF0000L
+//GC_CAC_WEIGHT_GUS_1
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2__SHIFT 0x0
+#define GC_CAC_WEIGHT_GUS_1__WEIGHT_GUS_SIG2_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_RLC_0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_RLC_0__WEIGHT_RLC_SIG0_MASK 0x0000FFFFL
+//GC_CAC_WEIGHT_GRBM_0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0__SHIFT 0x0
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1__SHIFT 0x10
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG0_MASK 0x0000FFFFL
+#define GC_CAC_WEIGHT_GRBM_0__WEIGHT_GRBM_SIG1_MASK 0xFFFF0000L
+//GC_EDC_CLK_MONITOR_CTRL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN__SHIFT 0x0
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL__SHIFT 0x1
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD__SHIFT 0x5
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_EN_MASK 0x00000001L
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_INTERVAL_MASK 0x0000001EL
+#define GC_EDC_CLK_MONITOR_CTRL__EDC_CLK_MONITOR_THRESHOLD_MASK 0x0001FFE0L
+//GC_CAC_IND_INDEX
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR__SHIFT 0x0
+#define GC_CAC_IND_INDEX__GC_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//GC_CAC_IND_DATA
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA__SHIFT 0x0
+#define GC_CAC_IND_DATA__GC_CAC_IND_DATA_MASK 0xFFFFFFFFL
+//SE_CAC_CTRL_1
+#define SE_CAC_CTRL_1__CAC_WINDOW__SHIFT 0x0
+#define SE_CAC_CTRL_1__TDP_WINDOW__SHIFT 0x8
+#define SE_CAC_CTRL_1__CAC_WINDOW_MASK 0x000000FFL
+#define SE_CAC_CTRL_1__TDP_WINDOW_MASK 0xFFFFFF00L
+//SE_CAC_CTRL_2
+#define SE_CAC_CTRL_2__CAC_ENABLE__SHIFT 0x0
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE__SHIFT 0x1
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE__SHIFT 0x2
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN__SHIFT 0x3
+#define SE_CAC_CTRL_2__CAC_ENABLE_MASK 0x00000001L
+#define SE_CAC_CTRL_2__SE_LCAC_ENABLE_MASK 0x00000002L
+#define SE_CAC_CTRL_2__WGP_CAC_CLK_OVERRIDE_MASK 0x00000004L
+#define SE_CAC_CTRL_2__SE_CAC_INDEX_AUTO_INCR_EN_MASK 0x00000008L
+//SE_CAC_WEIGHT_TA_0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TA_0__WEIGHT_TA_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TD_0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_0__WEIGHT_TD_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_1
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_1__WEIGHT_TD_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_2
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_2__WEIGHT_TD_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_3
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_3__WEIGHT_TD_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_4
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TD_4__WEIGHT_TD_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TD_5
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_TD_5__WEIGHT_TD_SIG10_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_TCP_0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_0__WEIGHT_TCP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_1
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_1__WEIGHT_TCP_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_2
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_2__WEIGHT_TCP_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_TCP_3
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_TCP_3__WEIGHT_TCP_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_0__WEIGHT_SQ_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_1
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQ_1__WEIGHT_SQ_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQ_2
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQ_2__WEIGHT_SQ_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SP_0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SP_0__WEIGHT_SP_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SP_1
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SP_1__WEIGHT_SP_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_LDS_0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_0__WEIGHT_LDS_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_1
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_1__WEIGHT_LDS_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_2
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_2__WEIGHT_LDS_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_LDS_3
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_LDS_3__WEIGHT_LDS_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SQC_0__WEIGHT_SQC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SQC_1
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SQC_1__WEIGHT_SQC_SIG2_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_CU_0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CU_0__WEIGHT_CU_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_BCI_0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_BCI_0__WEIGHT_BCI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_0__WEIGHT_CB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_1
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_1__WEIGHT_CB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_2
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_2__WEIGHT_CB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_3
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_3__WEIGHT_CB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_4
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_4__WEIGHT_CB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_5
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG10_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_5__WEIGHT_CB_SIG11_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_6
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG12_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_6__WEIGHT_CB_SIG13_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_7
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG14_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_7__WEIGHT_CB_SIG15_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_8
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG16_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_8__WEIGHT_CB_SIG17_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_9
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG18_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_9__WEIGHT_CB_SIG19_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG20_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_10__WEIGHT_CB_SIG21_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_CB_11
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22__SHIFT 0x0
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23__SHIFT 0x10
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG22_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_CB_11__WEIGHT_CB_SIG23_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_0__WEIGHT_DB_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_1
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_1__WEIGHT_DB_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_2
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_2__WEIGHT_DB_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_3
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_3__WEIGHT_DB_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_DB_4
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8__SHIFT 0x0
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9__SHIFT 0x10
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG8_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_DB_4__WEIGHT_DB_SIG9_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_0__WEIGHT_RMI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_RMI_1
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_RMI_1__WEIGHT_RMI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SX_0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SX_0__WEIGHT_SX_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SXRB_0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SXRB_0__WEIGHT_SXRB_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_UTCL1_0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_UTCL1_0__WEIGHT_UTCL1_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_GL1C_0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_0__WEIGHT_GL1C_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_1
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_GL1C_1__WEIGHT_GL1C_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_GL1C_2
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_GL1C_2__WEIGHT_GL1C_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_SPI_0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_0__WEIGHT_SPI_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_1
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SPI_1__WEIGHT_SPI_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SPI_2
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SPI_2__WEIGHT_SPI_SIG4_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PC_0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PC_0__WEIGHT_PC_SIG0_MASK 0x0000FFFFL
+//SE_CAC_WEIGHT_PA_0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_0__WEIGHT_PA_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_1
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_1__WEIGHT_PA_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_2
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_2__WEIGHT_PA_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_PA_3
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_PA_3__WEIGHT_PA_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG0_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_0__WEIGHT_SC_SIG1_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_1
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG2_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_1__WEIGHT_SC_SIG3_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_2
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG4_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_2__WEIGHT_SC_SIG5_MASK 0xFFFF0000L
+//SE_CAC_WEIGHT_SC_3
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6__SHIFT 0x0
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7__SHIFT 0x10
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG6_MASK 0x0000FFFFL
+#define SE_CAC_WEIGHT_SC_3__WEIGHT_SC_SIG7_MASK 0xFFFF0000L
+//SE_CAC_WINDOW_AGGR_VALUE
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE__SHIFT 0x0
+#define SE_CAC_WINDOW_AGGR_VALUE__SE_CAC_WINDOW_AGGR_VALUE_MASK 0xFFFFFFFFL
+//SE_CAC_WINDOW_GFXCLK_CYCLE
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE__SHIFT 0x0
+#define SE_CAC_WINDOW_GFXCLK_CYCLE__SE_CAC_WINDOW_GFXCLK_CYCLE_MASK 0x000003FFL
+//SE_CAC_IND_INDEX
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR__SHIFT 0x0
+#define SE_CAC_IND_INDEX__SE_CAC_IND_ADDR_MASK 0xFFFFFFFFL
+//SE_CAC_IND_DATA
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA__SHIFT 0x0
+#define SE_CAC_IND_DATA__SE_CAC_IND_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_pfonly2_spidec
+//SPI_RESOURCE_RESERVE_CU_0
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_0__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_0__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_0__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_0__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_0__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_0__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_1
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_1__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_1__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_1__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_1__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_1__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_1__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_2
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_2__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_2__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_2__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_2__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_2__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_2__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_3
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_3__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_3__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_3__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_3__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_3__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_3__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_4
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_4__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_4__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_4__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_4__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_4__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_4__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_5
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_5__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_5__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_5__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_5__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_5__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_5__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_6
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_6__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_6__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_6__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_6__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_6__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_6__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_7
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_7__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_7__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_7__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_7__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_7__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_7__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_8
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_8__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_8__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_8__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_8__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_8__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_8__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_9
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_9__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_9__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_9__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_9__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_9__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_9__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_10
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_10__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_10__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_10__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_10__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_10__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_10__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_11
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_11__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_11__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_11__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_11__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_11__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_11__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_12
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_12__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_12__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_12__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_12__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_12__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_12__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_13
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_13__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_13__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_13__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_13__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_13__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_13__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_14
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_14__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_14__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_14__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_14__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_14__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_14__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_CU_15
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR__SHIFT 0x4
+#define SPI_RESOURCE_RESERVE_CU_15__LDS__SHIFT 0x8
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES__SHIFT 0xc
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS__SHIFT 0xf
+#define SPI_RESOURCE_RESERVE_CU_15__VGPR_MASK 0x0000000FL
+#define SPI_RESOURCE_RESERVE_CU_15__SGPR_MASK 0x000000F0L
+#define SPI_RESOURCE_RESERVE_CU_15__LDS_MASK 0x00000F00L
+#define SPI_RESOURCE_RESERVE_CU_15__WAVES_MASK 0x00007000L
+#define SPI_RESOURCE_RESERVE_CU_15__BARRIERS_MASK 0x00078000L
+//SPI_RESOURCE_RESERVE_EN_CU_0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_0__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_0__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_0__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_1__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_1__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_1__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_2
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_2__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_2__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_2__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_3
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_3__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_3__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_3__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_4
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_4__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_4__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_4__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_5
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_5__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_5__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_5__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_6
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_6__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_6__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_6__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_7
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_7__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_7__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_7__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_8
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_8__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_8__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_8__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_9
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_9__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_9__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_9__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_10__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_10__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_10__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_11
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_11__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_11__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_11__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_12
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_12__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_12__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_12__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_13
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_13__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_13__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_13__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_14
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_14__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_14__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_14__QUEUE_MASK_MASK 0x00FF0000L
+//SPI_RESOURCE_RESERVE_EN_CU_15
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN__SHIFT 0x0
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK__SHIFT 0x1
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK__SHIFT 0x10
+#define SPI_RESOURCE_RESERVE_EN_CU_15__EN_MASK 0x00000001L
+#define SPI_RESOURCE_RESERVE_EN_CU_15__TYPE_MASK_MASK 0x0000FFFEL
+#define SPI_RESOURCE_RESERVE_EN_CU_15__QUEUE_MASK_MASK 0x00FF0000L
+
+
+// addressBlock: gc_gfxudec
+//CP_EOP_DONE_ADDR_LO
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_EOP_DONE_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_EOP_DONE_ADDR_HI
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_EOP_DONE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_EOP_DONE_DATA_LO
+#define CP_EOP_DONE_DATA_LO__DATA_LO__SHIFT 0x0
+#define CP_EOP_DONE_DATA_LO__DATA_LO_MASK 0xFFFFFFFFL
+//CP_EOP_DONE_DATA_HI
+#define CP_EOP_DONE_DATA_HI__DATA_HI__SHIFT 0x0
+#define CP_EOP_DONE_DATA_HI__DATA_HI_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_LO
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_LO__LAST_FENCE_LO_MASK 0xFFFFFFFFL
+//CP_EOP_LAST_FENCE_HI
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI__SHIFT 0x0
+#define CP_EOP_LAST_FENCE_HI__LAST_FENCE_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_ADDR_LO
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO__SHIFT 0x2
+#define CP_PIPE_STATS_ADDR_LO__PIPE_STATS_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_PIPE_STATS_ADDR_HI
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI__SHIFT 0x0
+#define CP_PIPE_STATS_ADDR_HI__PIPE_STATS_ADDR_HI_MASK 0x0000FFFFL
+//CP_VGT_IAVERT_COUNT_LO
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_LO__IAVERT_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAVERT_COUNT_HI
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAVERT_COUNT_HI__IAVERT_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_LO
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_LO__IAPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_IAPRIM_COUNT_HI
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_IAPRIM_COUNT_HI__IAPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_LO
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_LO__GSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSPRIM_COUNT_HI
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSPRIM_COUNT_HI__GSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_LO
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_LO__VSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_VSINVOC_COUNT_HI
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_VSINVOC_COUNT_HI__VSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_LO
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_LO__GSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_GSINVOC_COUNT_HI
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_GSINVOC_COUNT_HI__GSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_LO
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_LO__HSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_HSINVOC_COUNT_HI
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_HSINVOC_COUNT_HI__HSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_LO
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_LO__DSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_DSINVOC_COUNT_HI
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_DSINVOC_COUNT_HI__DSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_LO
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_LO__CINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CINVOC_COUNT_HI
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI__SHIFT 0x0
+#define CP_PA_CINVOC_COUNT_HI__CINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_LO
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_LO__CPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_CPRIM_COUNT_HI
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_CPRIM_COUNT_HI__CPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_LO
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_LO__PSINVOC_COUNT0_LO_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT0_HI
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT0_HI__PSINVOC_COUNT0_HI_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_LO
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_LO__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_SC_PSINVOC_COUNT1_HI
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE__SHIFT 0x0
+#define CP_SC_PSINVOC_COUNT1_HI__OBSOLETE_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_LO
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_LO__CSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_CSINVOC_COUNT_HI
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_CSINVOC_COUNT_HI__CSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_LO
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_LO__ASINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_VGT_ASINVOC_COUNT_HI
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI__SHIFT 0x0
+#define CP_VGT_ASINVOC_COUNT_HI__ASINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_PIPE_STATS_CONTROL
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY__SHIFT 0x19
+#define CP_PIPE_STATS_CONTROL__CACHE_POLICY_MASK 0x06000000L
+//SCRATCH_REG0
+#define SCRATCH_REG0__SCRATCH_REG0__SHIFT 0x0
+#define SCRATCH_REG0__SCRATCH_REG0_MASK 0xFFFFFFFFL
+//SCRATCH_REG1
+#define SCRATCH_REG1__SCRATCH_REG1__SHIFT 0x0
+#define SCRATCH_REG1__SCRATCH_REG1_MASK 0xFFFFFFFFL
+//SCRATCH_REG2
+#define SCRATCH_REG2__SCRATCH_REG2__SHIFT 0x0
+#define SCRATCH_REG2__SCRATCH_REG2_MASK 0xFFFFFFFFL
+//SCRATCH_REG3
+#define SCRATCH_REG3__SCRATCH_REG3__SHIFT 0x0
+#define SCRATCH_REG3__SCRATCH_REG3_MASK 0xFFFFFFFFL
+//SCRATCH_REG4
+#define SCRATCH_REG4__SCRATCH_REG4__SHIFT 0x0
+#define SCRATCH_REG4__SCRATCH_REG4_MASK 0xFFFFFFFFL
+//SCRATCH_REG5
+#define SCRATCH_REG5__SCRATCH_REG5__SHIFT 0x0
+#define SCRATCH_REG5__SCRATCH_REG5_MASK 0xFFFFFFFFL
+//SCRATCH_REG6
+#define SCRATCH_REG6__SCRATCH_REG6__SHIFT 0x0
+#define SCRATCH_REG6__SCRATCH_REG6_MASK 0xFFFFFFFFL
+//SCRATCH_REG7
+#define SCRATCH_REG7__SCRATCH_REG7__SHIFT 0x0
+#define SCRATCH_REG7__SCRATCH_REG7_MASK 0xFFFFFFFFL
+//SCRATCH_REG_ATOMIC
+#define SCRATCH_REG_ATOMIC__IMMED__SHIFT 0x0
+#define SCRATCH_REG_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_ATOMIC__IMMED_MASK 0x00FFFFFFL
+#define SCRATCH_REG_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_ATOMIC__reserved31_MASK 0x80000000L
+//SCRATCH_REG_CMPSWAP_ATOMIC
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE__SHIFT 0x0
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE__SHIFT 0xc
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID__SHIFT 0x18
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27__SHIFT 0x1b
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP__SHIFT 0x1c
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31__SHIFT 0x1f
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_COMPARE_MASK 0x00000FFFL
+#define SCRATCH_REG_CMPSWAP_ATOMIC__IMMED_REPLACE_MASK 0x00FFF000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__ID_MASK 0x07000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved27_MASK 0x08000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__OP_MASK 0x70000000L
+#define SCRATCH_REG_CMPSWAP_ATOMIC__reserved31_MASK 0x80000000L
+//CP_APPEND_DDID_CNT
+#define CP_APPEND_DDID_CNT__DATA__SHIFT 0x0
+#define CP_APPEND_DDID_CNT__DATA_MASK 0x000000FFL
+//CP_APPEND_DATA_HI
+#define CP_APPEND_DATA_HI__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_HI
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_HI
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_HI__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_LO
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_ATOMIC_PREOP_HI
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_PFP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_LO
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC0_PREOP_HI
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_LO
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_PFP_GDS_ATOMIC1_PREOP_HI
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_PFP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_APPEND_ADDR_LO
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO__SHIFT 0x2
+#define CP_APPEND_ADDR_LO__MEM_ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_ADDR_HI
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI__SHIFT 0x0
+#define CP_APPEND_ADDR_HI__CS_PS_SEL__SHIFT 0x10
+#define CP_APPEND_ADDR_HI__FENCE_SIZE__SHIFT 0x12
+#define CP_APPEND_ADDR_HI__PWS_ENABLE__SHIFT 0x13
+#define CP_APPEND_ADDR_HI__CACHE_POLICY__SHIFT 0x19
+#define CP_APPEND_ADDR_HI__COMMAND__SHIFT 0x1d
+#define CP_APPEND_ADDR_HI__MEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_ADDR_HI__CS_PS_SEL_MASK 0x00030000L
+#define CP_APPEND_ADDR_HI__FENCE_SIZE_MASK 0x00040000L
+#define CP_APPEND_ADDR_HI__PWS_ENABLE_MASK 0x00080000L
+#define CP_APPEND_ADDR_HI__CACHE_POLICY_MASK 0x06000000L
+#define CP_APPEND_ADDR_HI__COMMAND_MASK 0xE0000000L
+//CP_APPEND_DATA
+#define CP_APPEND_DATA__DATA__SHIFT 0x0
+#define CP_APPEND_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_DATA_LO
+#define CP_APPEND_DATA_LO__DATA__SHIFT 0x0
+#define CP_APPEND_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_CS_FENCE_LO
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_CS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_APPEND_LAST_PS_FENCE_LO
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE__SHIFT 0x0
+#define CP_APPEND_LAST_PS_FENCE_LO__LAST_FENCE_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_LO
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_LO
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_LO__ATOMIC_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ATOMIC_PREOP_HI
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_ATOMIC_PREOP_HI
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI__SHIFT 0x0
+#define CP_ME_ATOMIC_PREOP_HI__ATOMIC_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_LO
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_LO
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_LO__GDS_ATOMIC0_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC0_PREOP_HI
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC0_PREOP_HI
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC0_PREOP_HI__GDS_ATOMIC0_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_LO
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_LO
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_LO__GDS_ATOMIC1_PREOP_LO_MASK 0xFFFFFFFFL
+//CP_GDS_ATOMIC1_PREOP_HI
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_GDS_ATOMIC1_PREOP_HI
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI__SHIFT 0x0
+#define CP_ME_GDS_ATOMIC1_PREOP_HI__GDS_ATOMIC1_PREOP_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_WADDR_LO
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO__SHIFT 0x2
+#define CP_ME_MC_WADDR_LO__ME_MC_WADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_WADDR_HI
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI__SHIFT 0x0
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM__SHIFT 0x11
+#define CP_ME_MC_WADDR_HI__WRITE64__SHIFT 0x12
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_WADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_WADDR_HI__RINGID__SHIFT 0x1c
+#define CP_ME_MC_WADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_WADDR_HI__ME_MC_WADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_WADDR_HI__WRITE_CONFIRM_MASK 0x00020000L
+#define CP_ME_MC_WADDR_HI__WRITE64_MASK 0x00040000L
+#define CP_ME_MC_WADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_WADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_WADDR_HI__RINGID_MASK 0x30000000L
+#define CP_ME_MC_WADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_ME_MC_WDATA_LO
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO__SHIFT 0x0
+#define CP_ME_MC_WDATA_LO__ME_MC_WDATA_LO_MASK 0xFFFFFFFFL
+//CP_ME_MC_WDATA_HI
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI__SHIFT 0x0
+#define CP_ME_MC_WDATA_HI__ME_MC_WDATA_HI_MASK 0xFFFFFFFFL
+//CP_ME_MC_RADDR_LO
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO__SHIFT 0x2
+#define CP_ME_MC_RADDR_LO__ME_MC_RADDR_LO_MASK 0xFFFFFFFCL
+//CP_ME_MC_RADDR_HI
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI__SHIFT 0x0
+#define CP_ME_MC_RADDR_HI__SIZE__SHIFT 0x10
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY__SHIFT 0x16
+#define CP_ME_MC_RADDR_HI__VMID__SHIFT 0x18
+#define CP_ME_MC_RADDR_HI__PRIVILEGE__SHIFT 0x1f
+#define CP_ME_MC_RADDR_HI__ME_MC_RADDR_HI_MASK 0x0000FFFFL
+#define CP_ME_MC_RADDR_HI__SIZE_MASK 0x000F0000L
+#define CP_ME_MC_RADDR_HI__CACHE_POLICY_MASK 0x00C00000L
+#define CP_ME_MC_RADDR_HI__VMID_MASK 0x0F000000L
+#define CP_ME_MC_RADDR_HI__PRIVILEGE_MASK 0x80000000L
+//CP_SEM_WAIT_TIMER
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER__SHIFT 0x0
+#define CP_SEM_WAIT_TIMER__SEM_WAIT_TIMER_MASK 0xFFFFFFFFL
+//CP_SIG_SEM_ADDR_LO
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_SIG_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_SIG_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_SIG_SEM_ADDR_HI
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_SIG_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_SIG_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_SIG_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_SIG_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_WAIT_REG_MEM_TIMEOUT
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT__SHIFT 0x0
+#define CP_WAIT_REG_MEM_TIMEOUT__WAIT_REG_MEM_TIMEOUT_MASK 0xFFFFFFFFL
+//CP_WAIT_SEM_ADDR_LO
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO__SHIFT 0x3
+#define CP_WAIT_SEM_ADDR_LO__SEM_PRIV_MASK 0x00000001L
+#define CP_WAIT_SEM_ADDR_LO__SEM_ADDR_LO_MASK 0xFFFFFFF8L
+//CP_WAIT_SEM_ADDR_HI
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI__SHIFT 0x0
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX__SHIFT 0x10
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE__SHIFT 0x14
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE__SHIFT 0x18
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT__SHIFT 0x1d
+#define CP_WAIT_SEM_ADDR_HI__SEM_ADDR_HI_MASK 0x0000FFFFL
+#define CP_WAIT_SEM_ADDR_HI__SEM_USE_MAILBOX_MASK 0x00010000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SIGNAL_TYPE_MASK 0x00100000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_CLIENT_CODE_MASK 0x03000000L
+#define CP_WAIT_SEM_ADDR_HI__SEM_SELECT_MASK 0xE0000000L
+//CP_DMA_PFP_CONTROL
+#define CP_DMA_PFP_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_PFP_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_PFP_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_PFP_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_PFP_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_PFP_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_PFP_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_PFP_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_PFP_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_PFP_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_PFP_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_PFP_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_PFP_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_CONTROL
+#define CP_DMA_ME_CONTROL__VMID__SHIFT 0x0
+#define CP_DMA_ME_CONTROL__TMZ__SHIFT 0x4
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR__SHIFT 0xa
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY__SHIFT 0xd
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE__SHIFT 0xf
+#define CP_DMA_ME_CONTROL__DST_SELECT__SHIFT 0x14
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY__SHIFT 0x19
+#define CP_DMA_ME_CONTROL__DST_VOLATLE__SHIFT 0x1b
+#define CP_DMA_ME_CONTROL__SRC_SELECT__SHIFT 0x1d
+#define CP_DMA_ME_CONTROL__VMID_MASK 0x0000000FL
+#define CP_DMA_ME_CONTROL__TMZ_MASK 0x00000010L
+#define CP_DMA_ME_CONTROL__MEMLOG_CLEAR_MASK 0x00000400L
+#define CP_DMA_ME_CONTROL__SRC_CACHE_POLICY_MASK 0x00006000L
+#define CP_DMA_ME_CONTROL__SRC_VOLATLE_MASK 0x00008000L
+#define CP_DMA_ME_CONTROL__DST_SELECT_MASK 0x00300000L
+#define CP_DMA_ME_CONTROL__DST_CACHE_POLICY_MASK 0x06000000L
+#define CP_DMA_ME_CONTROL__DST_VOLATLE_MASK 0x08000000L
+#define CP_DMA_ME_CONTROL__SRC_SELECT_MASK 0x60000000L
+//CP_DMA_ME_SRC_ADDR
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_SRC_ADDR_HI
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_DST_ADDR
+#define CP_DMA_ME_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_ME_DST_ADDR_HI
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_ME_COMMAND
+#define CP_DMA_ME_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_ME_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_ME_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_ME_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_ME_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_ME_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_ME_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_ME_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_ME_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_ME_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_ME_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_ME_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_ME_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_ME_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_PFP_SRC_ADDR
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR__SRC_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_SRC_ADDR_HI
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_DST_ADDR
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR__DST_ADDR_MASK 0xFFFFFFFFL
+//CP_DMA_PFP_DST_ADDR_HI
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_DST_ADDR_HI__DST_ADDR_HI_MASK 0x0000FFFFL
+//CP_DMA_PFP_COMMAND
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT__SHIFT 0x0
+#define CP_DMA_PFP_COMMAND__SAS__SHIFT 0x1a
+#define CP_DMA_PFP_COMMAND__DAS__SHIFT 0x1b
+#define CP_DMA_PFP_COMMAND__SAIC__SHIFT 0x1c
+#define CP_DMA_PFP_COMMAND__DAIC__SHIFT 0x1d
+#define CP_DMA_PFP_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define CP_DMA_PFP_COMMAND__DIS_WC__SHIFT 0x1f
+#define CP_DMA_PFP_COMMAND__BYTE_COUNT_MASK 0x03FFFFFFL
+#define CP_DMA_PFP_COMMAND__SAS_MASK 0x04000000L
+#define CP_DMA_PFP_COMMAND__DAS_MASK 0x08000000L
+#define CP_DMA_PFP_COMMAND__SAIC_MASK 0x10000000L
+#define CP_DMA_PFP_COMMAND__DAIC_MASK 0x20000000L
+#define CP_DMA_PFP_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define CP_DMA_PFP_COMMAND__DIS_WC_MASK 0x80000000L
+//CP_DMA_CNTL
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL__SHIFT 0x0
+#define CP_DMA_CNTL__WATCH_CONTROL__SHIFT 0x1
+#define CP_DMA_CNTL__MIN_AVAILSZ__SHIFT 0x4
+#define CP_DMA_CNTL__BUFFER_DEPTH__SHIFT 0x10
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define CP_DMA_CNTL__PIO_FIFO_FULL__SHIFT 0x1d
+#define CP_DMA_CNTL__PIO_COUNT__SHIFT 0x1e
+#define CP_DMA_CNTL__UTCL1_FAULT_CONTROL_MASK 0x00000001L
+#define CP_DMA_CNTL__WATCH_CONTROL_MASK 0x00000002L
+#define CP_DMA_CNTL__MIN_AVAILSZ_MASK 0x00000030L
+#define CP_DMA_CNTL__BUFFER_DEPTH_MASK 0x01FF0000L
+#define CP_DMA_CNTL__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define CP_DMA_CNTL__PIO_FIFO_FULL_MASK 0x20000000L
+#define CP_DMA_CNTL__PIO_COUNT_MASK 0xC0000000L
+//CP_DMA_READ_TAGS
+#define CP_DMA_READ_TAGS__DMA_READ_TAG__SHIFT 0x0
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID__SHIFT 0x1c
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_MASK 0x03FFFFFFL
+#define CP_DMA_READ_TAGS__DMA_READ_TAG_VALID_MASK 0x10000000L
+//CP_PFP_IB_CONTROL
+#define CP_PFP_IB_CONTROL__IB_EN__SHIFT 0x0
+#define CP_PFP_IB_CONTROL__IB_EN_MASK 0x000000FFL
+//CP_PFP_LOAD_CONTROL
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN__SHIFT 0x0
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN__SHIFT 0x1
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN__SHIFT 0xf
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN__SHIFT 0x10
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN__SHIFT 0x18
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL__SHIFT 0x1f
+#define CP_PFP_LOAD_CONTROL__CONFIG_REG_EN_MASK 0x00000001L
+#define CP_PFP_LOAD_CONTROL__CNTX_REG_EN_MASK 0x00000002L
+#define CP_PFP_LOAD_CONTROL__UCONFIG_REG_EN_MASK 0x00008000L
+#define CP_PFP_LOAD_CONTROL__SH_GFX_REG_EN_MASK 0x00010000L
+#define CP_PFP_LOAD_CONTROL__SH_CS_REG_EN_MASK 0x01000000L
+#define CP_PFP_LOAD_CONTROL__LOAD_ORDINAL_MASK 0x80000000L
+//CP_SCRATCH_INDEX
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_SCRATCH_DATA
+#define CP_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_RB_OFFSET
+#define CP_RB_OFFSET__RB_OFFSET__SHIFT 0x0
+#define CP_RB_OFFSET__RB_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_OFFSET
+#define CP_IB1_OFFSET__IB1_OFFSET__SHIFT 0x0
+#define CP_IB1_OFFSET__IB1_OFFSET_MASK 0x000FFFFFL
+//CP_IB2_OFFSET
+#define CP_IB2_OFFSET__IB2_OFFSET__SHIFT 0x0
+#define CP_IB2_OFFSET__IB2_OFFSET_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_BEGIN
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB1_PREAMBLE_BEGIN__IB1_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB1_PREAMBLE_END
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END__SHIFT 0x0
+#define CP_IB1_PREAMBLE_END__IB1_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_BEGIN
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN__SHIFT 0x0
+#define CP_IB2_PREAMBLE_BEGIN__IB2_PREAMBLE_BEGIN_MASK 0x000FFFFFL
+//CP_IB2_PREAMBLE_END
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END__SHIFT 0x0
+#define CP_IB2_PREAMBLE_END__IB2_PREAMBLE_END_MASK 0x000FFFFFL
+//CP_DMA_ME_CMD_ADDR_LO
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_ME_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_ME_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_ME_CMD_ADDR_HI
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_ME_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_ME_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_DMA_PFP_CMD_ADDR_LO
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_DMA_PFP_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_DMA_PFP_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_DMA_PFP_CMD_ADDR_HI
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_DMA_PFP_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_DMA_PFP_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//CP_APPEND_CMD_ADDR_LO
+#define CP_APPEND_CMD_ADDR_LO__RSVD__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO__SHIFT 0x2
+#define CP_APPEND_CMD_ADDR_LO__RSVD_MASK 0x00000003L
+#define CP_APPEND_CMD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFCL
+//CP_APPEND_CMD_ADDR_HI
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_APPEND_CMD_ADDR_HI__RSVD__SHIFT 0x10
+#define CP_APPEND_CMD_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+#define CP_APPEND_CMD_ADDR_HI__RSVD_MASK 0xFFFF0000L
+//UCONFIG_RESERVED_REG0
+#define UCONFIG_RESERVED_REG0__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG0__DATA_MASK 0xFFFFFFFFL
+//UCONFIG_RESERVED_REG1
+#define UCONFIG_RESERVED_REG1__DATA__SHIFT 0x0
+#define UCONFIG_RESERVED_REG1__DATA_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_LO
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_LO__MSPRIM_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_PA_MSPRIM_COUNT_HI
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI__SHIFT 0x0
+#define CP_PA_MSPRIM_COUNT_HI__MSPRIM_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_LO
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_LO__MSINVOC_COUNT_LO_MASK 0xFFFFFFFFL
+//CP_GE_MSINVOC_COUNT_HI
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI__SHIFT 0x0
+#define CP_GE_MSINVOC_COUNT_HI__MSINVOC_COUNT_HI_MASK 0xFFFFFFFFL
+//CP_IB1_CMD_BUFSZ
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ__SHIFT 0x0
+#define CP_IB1_CMD_BUFSZ__IB1_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB2_CMD_BUFSZ
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ__SHIFT 0x0
+#define CP_IB2_CMD_BUFSZ__IB2_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_ST_CMD_BUFSZ
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ__SHIFT 0x0
+#define CP_ST_CMD_BUFSZ__ST_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
+//CP_IB2_BASE_LO
+#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
+#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB2_BASE_HI
+#define CP_IB2_BASE_HI__IB2_BASE_HI__SHIFT 0x0
+#define CP_IB2_BASE_HI__IB2_BASE_HI_MASK 0x0000FFFFL
+//CP_IB2_BUFSZ
+#define CP_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
+#define CP_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_ST_BASE_LO
+#define CP_ST_BASE_LO__ST_BASE_LO__SHIFT 0x2
+#define CP_ST_BASE_LO__ST_BASE_LO_MASK 0xFFFFFFFCL
+//CP_ST_BASE_HI
+#define CP_ST_BASE_HI__ST_BASE_HI__SHIFT 0x0
+#define CP_ST_BASE_HI__ST_BASE_HI_MASK 0x0000FFFFL
+//CP_ST_BUFSZ
+#define CP_ST_BUFSZ__ST_BUFSZ__SHIFT 0x0
+#define CP_ST_BUFSZ__ST_BUFSZ_MASK 0x000FFFFFL
+//CP_EOP_DONE_EVENT_CNTL
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL__SHIFT 0xc
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY__SHIFT 0x19
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE__SHIFT 0x1b
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE__SHIFT 0x1c
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV__SHIFT 0x1e
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE__SHIFT 0x1f
+#define CP_EOP_DONE_EVENT_CNTL__GCR_CNTL_MASK 0x01FFF000L
+#define CP_EOP_DONE_EVENT_CNTL__CACHE_POLICY_MASK 0x06000000L
+#define CP_EOP_DONE_EVENT_CNTL__EOP_VOLATILE_MASK 0x08000000L
+#define CP_EOP_DONE_EVENT_CNTL__EXECUTE_MASK 0x10000000L
+#define CP_EOP_DONE_EVENT_CNTL__GLK_INV_MASK 0x40000000L
+#define CP_EOP_DONE_EVENT_CNTL__PWS_ENABLE_MASK 0x80000000L
+//CP_EOP_DONE_DATA_CNTL
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL__SHIFT 0x10
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE__SHIFT 0x13
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID__SHIFT 0x14
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID__SHIFT 0x16
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT 0x18
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT 0x1d
+#define CP_EOP_DONE_DATA_CNTL__DST_SEL_MASK 0x00030000L
+#define CP_EOP_DONE_DATA_CNTL__SEMAPHORE_SIGNAL_TYPE_MASK 0x00080000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_PIPE_ID_MASK 0x00300000L
+#define CP_EOP_DONE_DATA_CNTL__ACTION_ID_MASK 0x00C00000L
+#define CP_EOP_DONE_DATA_CNTL__INT_SEL_MASK 0x07000000L
+#define CP_EOP_DONE_DATA_CNTL__DATA_SEL_MASK 0xE0000000L
+//CP_EOP_DONE_CNTX_ID
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID__SHIFT 0x0
+#define CP_EOP_DONE_CNTX_ID__CNTX_ID_MASK 0xFFFFFFFFL
+//CP_DB_BASE_LO
+#define CP_DB_BASE_LO__DB_BASE_LO__SHIFT 0x2
+#define CP_DB_BASE_LO__DB_BASE_LO_MASK 0xFFFFFFFCL
+//CP_DB_BASE_HI
+#define CP_DB_BASE_HI__DB_BASE_HI__SHIFT 0x0
+#define CP_DB_BASE_HI__DB_BASE_HI_MASK 0x0000FFFFL
+//CP_DB_BUFSZ
+#define CP_DB_BUFSZ__DB_BUFSZ__SHIFT 0x0
+#define CP_DB_BUFSZ__DB_BUFSZ_MASK 0x000FFFFFL
+//CP_DB_CMD_BUFSZ
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ__SHIFT 0x0
+#define CP_DB_CMD_BUFSZ__DB_CMD_REQSZ_MASK 0x000FFFFFL
+//CP_PFP_COMPLETION_STATUS
+#define CP_PFP_COMPLETION_STATUS__STATUS__SHIFT 0x0
+#define CP_PFP_COMPLETION_STATUS__STATUS_MASK 0x00000003L
+//CP_PRED_NOT_VISIBLE
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE__SHIFT 0x0
+#define CP_PRED_NOT_VISIBLE__NOT_VISIBLE_MASK 0x00000001L
+//CP_PFP_METADATA_BASE_ADDR
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_PFP_METADATA_BASE_ADDR_HI
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_PFP_METADATA_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DRAW_INDX_INDR_ADDR
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DRAW_INDX_INDR_ADDR_HI
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DRAW_INDX_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_DISPATCH_INDR_ADDR
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_DISPATCH_INDR_ADDR_HI
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_DISPATCH_INDR_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_BASE_ADDR
+#define CP_INDEX_BASE_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_INDEX_BASE_ADDR_HI
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_INDEX_BASE_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_INDEX_TYPE
+#define CP_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define CP_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+//CP_GDS_BKUP_ADDR
+#define CP_GDS_BKUP_ADDR__ADDR_LO__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_GDS_BKUP_ADDR_HI
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_GDS_BKUP_ADDR_HI__ADDR_HI_MASK 0x0000FFFFL
+//CP_SAMPLE_STATUS
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE__SHIFT 0x0
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE__SHIFT 0x1
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE__SHIFT 0x2
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE__SHIFT 0x3
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE__SHIFT 0x4
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE__SHIFT 0x5
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE__SHIFT 0x6
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE__SHIFT 0x7
+#define CP_SAMPLE_STATUS__Z_PASS_ACITVE_MASK 0x00000001L
+#define CP_SAMPLE_STATUS__STREAMOUT_ACTIVE_MASK 0x00000002L
+#define CP_SAMPLE_STATUS__PIPELINE_ACTIVE_MASK 0x00000004L
+#define CP_SAMPLE_STATUS__STIPPLE_ACTIVE_MASK 0x00000008L
+#define CP_SAMPLE_STATUS__VGT_BUFFERS_ACTIVE_MASK 0x00000010L
+#define CP_SAMPLE_STATUS__SCREEN_EXT_ACTIVE_MASK 0x00000020L
+#define CP_SAMPLE_STATUS__DRAW_INDIRECT_ACTIVE_MASK 0x00000040L
+#define CP_SAMPLE_STATUS__DISP_INDIRECT_ACTIVE_MASK 0x00000080L
+//CP_ME_COHER_CNTL
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA__SHIFT 0x0
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA__SHIFT 0x1
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA__SHIFT 0x6
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA__SHIFT 0x7
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA__SHIFT 0x8
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA__SHIFT 0x9
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA__SHIFT 0xa
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA__SHIFT 0xb
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA__SHIFT 0xc
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA__SHIFT 0xd
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA__SHIFT 0xe
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA__SHIFT 0x13
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA__SHIFT 0x15
+#define CP_ME_COHER_CNTL__DEST_BASE_0_ENA_MASK 0x00000001L
+#define CP_ME_COHER_CNTL__DEST_BASE_1_ENA_MASK 0x00000002L
+#define CP_ME_COHER_CNTL__CB0_DEST_BASE_ENA_MASK 0x00000040L
+#define CP_ME_COHER_CNTL__CB1_DEST_BASE_ENA_MASK 0x00000080L
+#define CP_ME_COHER_CNTL__CB2_DEST_BASE_ENA_MASK 0x00000100L
+#define CP_ME_COHER_CNTL__CB3_DEST_BASE_ENA_MASK 0x00000200L
+#define CP_ME_COHER_CNTL__CB4_DEST_BASE_ENA_MASK 0x00000400L
+#define CP_ME_COHER_CNTL__CB5_DEST_BASE_ENA_MASK 0x00000800L
+#define CP_ME_COHER_CNTL__CB6_DEST_BASE_ENA_MASK 0x00001000L
+#define CP_ME_COHER_CNTL__CB7_DEST_BASE_ENA_MASK 0x00002000L
+#define CP_ME_COHER_CNTL__DB_DEST_BASE_ENA_MASK 0x00004000L
+#define CP_ME_COHER_CNTL__DEST_BASE_2_ENA_MASK 0x00080000L
+#define CP_ME_COHER_CNTL__DEST_BASE_3_ENA_MASK 0x00200000L
+//CP_ME_COHER_SIZE
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE__COHER_SIZE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_SIZE_HI
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_SIZE_HI__COHER_SIZE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_BASE
+#define CP_ME_COHER_BASE__COHER_BASE_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE__COHER_BASE_256B_MASK 0xFFFFFFFFL
+//CP_ME_COHER_BASE_HI
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B__SHIFT 0x0
+#define CP_ME_COHER_BASE_HI__COHER_BASE_HI_256B_MASK 0x000000FFL
+//CP_ME_COHER_STATUS
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX__SHIFT 0x0
+#define CP_ME_COHER_STATUS__STATUS__SHIFT 0x1f
+#define CP_ME_COHER_STATUS__MATCHING_GFX_CNTX_MASK 0x000000FFL
+#define CP_ME_COHER_STATUS__STATUS_MASK 0x80000000L
+//RLC_GPM_PERF_COUNT_0
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_0__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_0__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_0__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_0__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_0__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_0__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_0__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_0__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_0__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_0__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_0__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_PERF_COUNT_1
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL__SHIFT 0x0
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX__SHIFT 0x4
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX__SHIFT 0x8
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX__SHIFT 0xc
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL__SHIFT 0x10
+#define RLC_GPM_PERF_COUNT_1__UNUSED__SHIFT 0x12
+#define RLC_GPM_PERF_COUNT_1__ENABLE__SHIFT 0x14
+#define RLC_GPM_PERF_COUNT_1__RESERVED__SHIFT 0x15
+#define RLC_GPM_PERF_COUNT_1__FEATURE_SEL_MASK 0x0000000FL
+#define RLC_GPM_PERF_COUNT_1__SE_INDEX_MASK 0x000000F0L
+#define RLC_GPM_PERF_COUNT_1__SA_INDEX_MASK 0x00000F00L
+#define RLC_GPM_PERF_COUNT_1__WGP_INDEX_MASK 0x0000F000L
+#define RLC_GPM_PERF_COUNT_1__EVENT_SEL_MASK 0x00030000L
+#define RLC_GPM_PERF_COUNT_1__UNUSED_MASK 0x000C0000L
+#define RLC_GPM_PERF_COUNT_1__ENABLE_MASK 0x00100000L
+#define RLC_GPM_PERF_COUNT_1__RESERVED_MASK 0xFFE00000L
+//GRBM_GFX_INDEX
+#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L
+//VGT_PRIMITIVE_TYPE
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE__SHIFT 0x0
+#define VGT_PRIMITIVE_TYPE__PRIM_TYPE_MASK 0x0000003FL
+//VGT_INDEX_TYPE
+#define VGT_INDEX_TYPE__INDEX_TYPE__SHIFT 0x0
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING__SHIFT 0xe
+#define VGT_INDEX_TYPE__INDEX_TYPE_MASK 0x00000003L
+#define VGT_INDEX_TYPE__DISABLE_INSTANCE_PACKING_MASK 0x00004000L
+//GE_MIN_VTX_INDX
+#define GE_MIN_VTX_INDX__MIN_INDX__SHIFT 0x0
+#define GE_MIN_VTX_INDX__MIN_INDX_MASK 0xFFFFFFFFL
+//GE_INDX_OFFSET
+#define GE_INDX_OFFSET__INDX_OFFSET__SHIFT 0x0
+#define GE_INDX_OFFSET__INDX_OFFSET_MASK 0xFFFFFFFFL
+//GE_MULTI_PRIM_IB_RESET_EN
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN__SHIFT 0x0
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS__SHIFT 0x1
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX__SHIFT 0x2
+#define GE_MULTI_PRIM_IB_RESET_EN__RESET_EN_MASK 0x00000001L
+#define GE_MULTI_PRIM_IB_RESET_EN__MATCH_ALL_BITS_MASK 0x00000002L
+#define GE_MULTI_PRIM_IB_RESET_EN__DISABLE_FOR_AUTO_INDEX_MASK 0x00000004L
+//VGT_NUM_INDICES
+#define VGT_NUM_INDICES__NUM_INDICES__SHIFT 0x0
+#define VGT_NUM_INDICES__NUM_INDICES_MASK 0xFFFFFFFFL
+//VGT_NUM_INSTANCES
+#define VGT_NUM_INSTANCES__NUM_INSTANCES__SHIFT 0x0
+#define VGT_NUM_INSTANCES__NUM_INSTANCES_MASK 0xFFFFFFFFL
+//VGT_TF_RING_SIZE
+#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
+#define VGT_TF_RING_SIZE__SIZE_MASK 0x0001FFFFL
+//VGT_HS_OFFCHIP_PARAM
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING__SHIFT 0x0
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY__SHIFT 0xa
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_BUFFERING_MASK 0x000003FFL
+#define VGT_HS_OFFCHIP_PARAM__OFFCHIP_GRANULARITY_MASK 0x00000C00L
+//VGT_TF_MEMORY_BASE
+#define VGT_TF_MEMORY_BASE__BASE__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE__BASE_MASK 0xFFFFFFFFL
+//GE_MAX_VTX_INDX
+#define GE_MAX_VTX_INDX__MAX_INDX__SHIFT 0x0
+#define GE_MAX_VTX_INDX__MAX_INDX_MASK 0xFFFFFFFFL
+//VGT_INSTANCE_BASE_ID
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
+#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
+//GE_CNTL
+#define GE_CNTL__PRIMS_PER_SUBGRP__SHIFT 0x0
+#define GE_CNTL__VERTS_PER_SUBGRP__SHIFT 0x9
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI__SHIFT 0x12
+#define GE_CNTL__PACKET_TO_ONE_PA__SHIFT 0x13
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI__SHIFT 0x14
+#define GE_CNTL__PRIM_GRP_SIZE__SHIFT 0x15
+#define GE_CNTL__GCR_DISABLE__SHIFT 0x1e
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP__SHIFT 0x1f
+#define GE_CNTL__PRIMS_PER_SUBGRP_MASK 0x000001FFL
+#define GE_CNTL__VERTS_PER_SUBGRP_MASK 0x0003FE00L
+#define GE_CNTL__BREAK_SUBGRP_AT_EOI_MASK 0x00040000L
+#define GE_CNTL__PACKET_TO_ONE_PA_MASK 0x00080000L
+#define GE_CNTL__BREAK_PRIMGRP_AT_EOI_MASK 0x00100000L
+#define GE_CNTL__PRIM_GRP_SIZE_MASK 0x3FE00000L
+#define GE_CNTL__GCR_DISABLE_MASK 0x40000000L
+#define GE_CNTL__DIS_PG_SIZE_ADJUST_FOR_STRIP_MASK 0x80000000L
+//GE_USER_VGPR1
+#define GE_USER_VGPR1__DATA__SHIFT 0x0
+#define GE_USER_VGPR1__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR2
+#define GE_USER_VGPR2__DATA__SHIFT 0x0
+#define GE_USER_VGPR2__DATA_MASK 0xFFFFFFFFL
+//GE_USER_VGPR3
+#define GE_USER_VGPR3__DATA__SHIFT 0x0
+#define GE_USER_VGPR3__DATA_MASK 0xFFFFFFFFL
+//GE_STEREO_CNTL
+#define GE_STEREO_CNTL__RT_SLICE__SHIFT 0x0
+#define GE_STEREO_CNTL__VIEWPORT__SHIFT 0x3
+#define GE_STEREO_CNTL__FSR_SELECT__SHIFT 0x7
+#define GE_STEREO_CNTL__EN_STEREO__SHIFT 0x8
+#define GE_STEREO_CNTL__RT_SLICE_MASK 0x00000007L
+#define GE_STEREO_CNTL__VIEWPORT_MASK 0x00000078L
+#define GE_STEREO_CNTL__FSR_SELECT_MASK 0x00000080L
+#define GE_STEREO_CNTL__EN_STEREO_MASK 0x00000100L
+//GE_PC_ALLOC
+#define GE_PC_ALLOC__OVERSUB_EN__SHIFT 0x0
+#define GE_PC_ALLOC__NUM_PC_LINES__SHIFT 0x1
+#define GE_PC_ALLOC__OVERSUB_EN_MASK 0x00000001L
+#define GE_PC_ALLOC__NUM_PC_LINES_MASK 0x000007FEL
+//VGT_TF_MEMORY_BASE_HI
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI__SHIFT 0x0
+#define VGT_TF_MEMORY_BASE_HI__BASE_HI_MASK 0x000000FFL
+//GE_USER_VGPR_EN
+#define GE_USER_VGPR_EN__EN_USER_VGPR1__SHIFT 0x0
+#define GE_USER_VGPR_EN__EN_USER_VGPR2__SHIFT 0x1
+#define GE_USER_VGPR_EN__EN_USER_VGPR3__SHIFT 0x2
+#define GE_USER_VGPR_EN__EN_USER_VGPR1_MASK 0x00000001L
+#define GE_USER_VGPR_EN__EN_USER_VGPR2_MASK 0x00000002L
+#define GE_USER_VGPR_EN__EN_USER_VGPR3_MASK 0x00000004L
+//GE_GS_FAST_LAUNCH_WG_DIM
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y__SHIFT 0x10
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_X_MASK 0x0000FFFFL
+#define GE_GS_FAST_LAUNCH_WG_DIM__GS_FL_DIM_Y_MASK 0xFFFF0000L
+//GE_GS_FAST_LAUNCH_WG_DIM_1
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z__SHIFT 0x0
+#define GE_GS_FAST_LAUNCH_WG_DIM_1__GS_FL_DIM_Z_MASK 0x0000FFFFL
+//VGT_GS_OUT_PRIM_TYPE
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE__SHIFT 0x0
+#define VGT_GS_OUT_PRIM_TYPE__OUTPRIM_TYPE_MASK 0x0000003FL
+//PA_SU_LINE_STIPPLE_VALUE
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE__SHIFT 0x0
+#define PA_SU_LINE_STIPPLE_VALUE__LINE_STIPPLE_VALUE_MASK 0x00FFFFFFL
+//PA_SC_LINE_STIPPLE_STATE
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR__SHIFT 0x0
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT__SHIFT 0x8
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_PTR_MASK 0x0000000FL
+#define PA_SC_LINE_STIPPLE_STATE__CURRENT_COUNT_MASK 0x0000FF00L
+//PA_SC_SCREEN_EXTENT_MIN_0
+#define PA_SC_SCREEN_EXTENT_MIN_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_0
+#define PA_SC_SCREEN_EXTENT_MAX_0__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_0__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_0__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MIN_1
+#define PA_SC_SCREEN_EXTENT_MIN_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MIN_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MIN_1__Y_MASK 0xFFFF0000L
+//PA_SC_SCREEN_EXTENT_MAX_1
+#define PA_SC_SCREEN_EXTENT_MAX_1__X__SHIFT 0x0
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y__SHIFT 0x10
+#define PA_SC_SCREEN_EXTENT_MAX_1__X_MASK 0x0000FFFFL
+#define PA_SC_SCREEN_EXTENT_MAX_1__Y_MASK 0xFFFF0000L
+//PA_SC_P3D_TRAP_SCREEN_HV_EN
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_P3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_P3D_TRAP_SCREEN_H
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_V
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_P3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_P3D_TRAP_SCREEN_COUNT
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_P3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_HV_EN
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_HP3D_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_HP3D_TRAP_SCREEN_H
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_V
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_HP3D_TRAP_SCREEN_COUNT
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_HP3D_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_HV_EN
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS__SHIFT 0x1
+#define PA_SC_TRAP_SCREEN_HV_EN__ENABLE_HV_PRE_SHADER_MASK 0x00000001L
+#define PA_SC_TRAP_SCREEN_HV_EN__FORCE_PRE_SHADER_ALL_PIXELS_MASK 0x00000002L
+//PA_SC_TRAP_SCREEN_H
+#define PA_SC_TRAP_SCREEN_H__X_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_H__X_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_V
+#define PA_SC_TRAP_SCREEN_V__Y_COORD__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_V__Y_COORD_MASK 0x00003FFFL
+//PA_SC_TRAP_SCREEN_OCCURRENCE
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_OCCURRENCE__COUNT_MASK 0x0000FFFFL
+//PA_SC_TRAP_SCREEN_COUNT
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT__SHIFT 0x0
+#define PA_SC_TRAP_SCREEN_COUNT__COUNT_MASK 0x0000FFFFL
+//SQ_THREAD_TRACE_USERDATA_0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_0__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_1
+#define SQ_THREAD_TRACE_USERDATA_1__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_1__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_2
+#define SQ_THREAD_TRACE_USERDATA_2__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_2__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_3
+#define SQ_THREAD_TRACE_USERDATA_3__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_3__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_4
+#define SQ_THREAD_TRACE_USERDATA_4__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_4__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_5
+#define SQ_THREAD_TRACE_USERDATA_5__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_5__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_6
+#define SQ_THREAD_TRACE_USERDATA_6__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_6__DATA_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_USERDATA_7
+#define SQ_THREAD_TRACE_USERDATA_7__DATA__SHIFT 0x0
+#define SQ_THREAD_TRACE_USERDATA_7__DATA_MASK 0xFFFFFFFFL
+//SQC_CACHES
+#define SQC_CACHES__TARGET_INST__SHIFT 0x0
+#define SQC_CACHES__TARGET_DATA__SHIFT 0x1
+#define SQC_CACHES__INVALIDATE__SHIFT 0x2
+#define SQC_CACHES__COMPLETE__SHIFT 0x10
+#define SQC_CACHES__TARGET_INST_MASK 0x00000001L
+#define SQC_CACHES__TARGET_DATA_MASK 0x00000002L
+#define SQC_CACHES__INVALIDATE_MASK 0x00000004L
+#define SQC_CACHES__COMPLETE_MASK 0x00010000L
+//TA_CS_BC_BASE_ADDR
+#define TA_CS_BC_BASE_ADDR__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR__ADDRESS_MASK 0xFFFFFFFFL
+//TA_CS_BC_BASE_ADDR_HI
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
+#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
+//DB_OCCLUSION_COUNT0_LOW
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT0_HI
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT0_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT1_LOW
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT1_HI
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT1_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT2_LOW
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT2_HI
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT2_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//DB_OCCLUSION_COUNT3_LOW
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
+//DB_OCCLUSION_COUNT3_HI
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI__SHIFT 0x0
+#define DB_OCCLUSION_COUNT3_HI__COUNT_HI_MASK 0x7FFFFFFFL
+//GDS_RD_ADDR
+#define GDS_RD_ADDR__READ_ADDR__SHIFT 0x0
+#define GDS_RD_ADDR__READ_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_DATA
+#define GDS_RD_DATA__READ_DATA__SHIFT 0x0
+#define GDS_RD_DATA__READ_DATA_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_ADDR
+#define GDS_RD_BURST_ADDR__BURST_ADDR__SHIFT 0x0
+#define GDS_RD_BURST_ADDR__BURST_ADDR_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_COUNT
+#define GDS_RD_BURST_COUNT__BURST_COUNT__SHIFT 0x0
+#define GDS_RD_BURST_COUNT__BURST_COUNT_MASK 0xFFFFFFFFL
+//GDS_RD_BURST_DATA
+#define GDS_RD_BURST_DATA__BURST_DATA__SHIFT 0x0
+#define GDS_RD_BURST_DATA__BURST_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_ADDR
+#define GDS_WR_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_DATA
+#define GDS_WR_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_ADDR
+#define GDS_WR_BURST_ADDR__WRITE_ADDR__SHIFT 0x0
+#define GDS_WR_BURST_ADDR__WRITE_ADDR_MASK 0xFFFFFFFFL
+//GDS_WR_BURST_DATA
+#define GDS_WR_BURST_DATA__WRITE_DATA__SHIFT 0x0
+#define GDS_WR_BURST_DATA__WRITE_DATA_MASK 0xFFFFFFFFL
+//GDS_WRITE_COMPLETE
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE__SHIFT 0x0
+#define GDS_WRITE_COMPLETE__WRITE_COMPLETE_MASK 0xFFFFFFFFL
+//GDS_ATOM_CNTL
+#define GDS_ATOM_CNTL__AINC__SHIFT 0x0
+#define GDS_ATOM_CNTL__UNUSED1__SHIFT 0x6
+#define GDS_ATOM_CNTL__DMODE__SHIFT 0x8
+#define GDS_ATOM_CNTL__UNUSED2__SHIFT 0xa
+#define GDS_ATOM_CNTL__AINC_MASK 0x0000003FL
+#define GDS_ATOM_CNTL__UNUSED1_MASK 0x000000C0L
+#define GDS_ATOM_CNTL__DMODE_MASK 0x00000300L
+#define GDS_ATOM_CNTL__UNUSED2_MASK 0xFFFFFC00L
+//GDS_ATOM_COMPLETE
+#define GDS_ATOM_COMPLETE__COMPLETE__SHIFT 0x0
+#define GDS_ATOM_COMPLETE__UNUSED__SHIFT 0x1
+#define GDS_ATOM_COMPLETE__COMPLETE_MASK 0x00000001L
+#define GDS_ATOM_COMPLETE__UNUSED_MASK 0xFFFFFFFEL
+//GDS_ATOM_BASE
+#define GDS_ATOM_BASE__BASE__SHIFT 0x0
+#define GDS_ATOM_BASE__UNUSED__SHIFT 0xc
+#define GDS_ATOM_BASE__BASE_MASK 0x00000FFFL
+#define GDS_ATOM_BASE__UNUSED_MASK 0xFFFFF000L
+//GDS_ATOM_SIZE
+#define GDS_ATOM_SIZE__SIZE__SHIFT 0x0
+#define GDS_ATOM_SIZE__UNUSED__SHIFT 0xd
+#define GDS_ATOM_SIZE__SIZE_MASK 0x00001FFFL
+#define GDS_ATOM_SIZE__UNUSED_MASK 0xFFFFE000L
+//GDS_ATOM_OFFSET0
+#define GDS_ATOM_OFFSET0__OFFSET0__SHIFT 0x0
+#define GDS_ATOM_OFFSET0__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET0__OFFSET0_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET0__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_OFFSET1
+#define GDS_ATOM_OFFSET1__OFFSET1__SHIFT 0x0
+#define GDS_ATOM_OFFSET1__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OFFSET1__OFFSET1_MASK 0x000000FFL
+#define GDS_ATOM_OFFSET1__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_DST
+#define GDS_ATOM_DST__DST__SHIFT 0x0
+#define GDS_ATOM_DST__DST_MASK 0xFFFFFFFFL
+//GDS_ATOM_OP
+#define GDS_ATOM_OP__OP__SHIFT 0x0
+#define GDS_ATOM_OP__UNUSED__SHIFT 0x8
+#define GDS_ATOM_OP__OP_MASK 0x000000FFL
+#define GDS_ATOM_OP__UNUSED_MASK 0xFFFFFF00L
+//GDS_ATOM_SRC0
+#define GDS_ATOM_SRC0__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC0_U
+#define GDS_ATOM_SRC0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1
+#define GDS_ATOM_SRC1__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_SRC1_U
+#define GDS_ATOM_SRC1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_SRC1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0
+#define GDS_ATOM_READ0__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ0_U
+#define GDS_ATOM_READ0_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ0_U__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1
+#define GDS_ATOM_READ1__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1__DATA_MASK 0xFFFFFFFFL
+//GDS_ATOM_READ1_U
+#define GDS_ATOM_READ1_U__DATA__SHIFT 0x0
+#define GDS_ATOM_READ1_U__DATA_MASK 0xFFFFFFFFL
+//GDS_GWS_RESOURCE_CNTL
+#define GDS_GWS_RESOURCE_CNTL__INDEX__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNTL__UNUSED__SHIFT 0x6
+#define GDS_GWS_RESOURCE_CNTL__INDEX_MASK 0x0000003FL
+#define GDS_GWS_RESOURCE_CNTL__UNUSED_MASK 0xFFFFFFC0L
+//GDS_GWS_RESOURCE
+#define GDS_GWS_RESOURCE__FLAG__SHIFT 0x0
+#define GDS_GWS_RESOURCE__COUNTER__SHIFT 0x1
+#define GDS_GWS_RESOURCE__TYPE__SHIFT 0xd
+#define GDS_GWS_RESOURCE__DED__SHIFT 0xe
+#define GDS_GWS_RESOURCE__RELEASE_ALL__SHIFT 0xf
+#define GDS_GWS_RESOURCE__HEAD_QUEUE__SHIFT 0x10
+#define GDS_GWS_RESOURCE__HEAD_VALID__SHIFT 0x1d
+#define GDS_GWS_RESOURCE__HEAD_FLAG__SHIFT 0x1e
+#define GDS_GWS_RESOURCE__HALTED__SHIFT 0x1f
+#define GDS_GWS_RESOURCE__FLAG_MASK 0x00000001L
+#define GDS_GWS_RESOURCE__COUNTER_MASK 0x00001FFEL
+#define GDS_GWS_RESOURCE__TYPE_MASK 0x00002000L
+#define GDS_GWS_RESOURCE__DED_MASK 0x00004000L
+#define GDS_GWS_RESOURCE__RELEASE_ALL_MASK 0x00008000L
+#define GDS_GWS_RESOURCE__HEAD_QUEUE_MASK 0x1FFF0000L
+#define GDS_GWS_RESOURCE__HEAD_VALID_MASK 0x20000000L
+#define GDS_GWS_RESOURCE__HEAD_FLAG_MASK 0x40000000L
+#define GDS_GWS_RESOURCE__HALTED_MASK 0x80000000L
+//GDS_GWS_RESOURCE_CNT
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT__SHIFT 0x0
+#define GDS_GWS_RESOURCE_CNT__UNUSED__SHIFT 0x10
+#define GDS_GWS_RESOURCE_CNT__RESOURCE_CNT_MASK 0x0000FFFFL
+#define GDS_GWS_RESOURCE_CNT__UNUSED_MASK 0xFFFF0000L
+//GDS_OA_CNTL
+#define GDS_OA_CNTL__INDEX__SHIFT 0x0
+#define GDS_OA_CNTL__UNUSED__SHIFT 0x4
+#define GDS_OA_CNTL__INDEX_MASK 0x0000000FL
+#define GDS_OA_CNTL__UNUSED_MASK 0xFFFFFFF0L
+//GDS_OA_COUNTER
+#define GDS_OA_COUNTER__SPACE_AVAILABLE__SHIFT 0x0
+#define GDS_OA_COUNTER__SPACE_AVAILABLE_MASK 0xFFFFFFFFL
+//GDS_OA_ADDRESS
+#define GDS_OA_ADDRESS__DS_ADDRESS__SHIFT 0x0
+#define GDS_OA_ADDRESS__CRAWLER_TYPE__SHIFT 0x10
+#define GDS_OA_ADDRESS__CRAWLER__SHIFT 0x14
+#define GDS_OA_ADDRESS__UNUSED__SHIFT 0x18
+#define GDS_OA_ADDRESS__NO_ALLOC__SHIFT 0x1e
+#define GDS_OA_ADDRESS__ENABLE__SHIFT 0x1f
+#define GDS_OA_ADDRESS__DS_ADDRESS_MASK 0x0000FFFFL
+#define GDS_OA_ADDRESS__CRAWLER_TYPE_MASK 0x000F0000L
+#define GDS_OA_ADDRESS__CRAWLER_MASK 0x00F00000L
+#define GDS_OA_ADDRESS__UNUSED_MASK 0x3F000000L
+#define GDS_OA_ADDRESS__NO_ALLOC_MASK 0x40000000L
+#define GDS_OA_ADDRESS__ENABLE_MASK 0x80000000L
+//GDS_OA_INCDEC
+#define GDS_OA_INCDEC__VALUE__SHIFT 0x0
+#define GDS_OA_INCDEC__INCDEC__SHIFT 0x1f
+#define GDS_OA_INCDEC__VALUE_MASK 0x7FFFFFFFL
+#define GDS_OA_INCDEC__INCDEC_MASK 0x80000000L
+//GDS_OA_RING_SIZE
+#define GDS_OA_RING_SIZE__RING_SIZE__SHIFT 0x0
+#define GDS_OA_RING_SIZE__RING_SIZE_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_0__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_1
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_1__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_2
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_2__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_DWORDS_WRITTEN_3
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA__SHIFT 0x0
+#define GDS_STRMOUT_DWORDS_WRITTEN_3__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_0
+#define GDS_GS_0__DATA__SHIFT 0x0
+#define GDS_GS_0__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_1
+#define GDS_GS_1__DATA__SHIFT 0x0
+#define GDS_GS_1__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_2
+#define GDS_GS_2__DATA__SHIFT 0x0
+#define GDS_GS_2__DATA_MASK 0xFFFFFFFFL
+//GDS_GS_3
+#define GDS_GS_3__DATA__SHIFT 0x0
+#define GDS_GS_3__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_0_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_0_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_0_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_1_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_1_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_1_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_2_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_2_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_2_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_LO
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_NEEDED_3_HI
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_NEEDED_3_HI__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_LO
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_LO__DATA_MASK 0xFFFFFFFFL
+//GDS_STRMOUT_PRIMS_WRITTEN_3_HI
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA__SHIFT 0x0
+#define GDS_STRMOUT_PRIMS_WRITTEN_3_HI__DATA_MASK 0xFFFFFFFFL
+//SPI_CONFIG_CNTL
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY__SHIFT 0x0
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER__SHIFT 0x15
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS__SHIFT 0x18
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS__SHIFT 0x19
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA__SHIFT 0x1c
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA__SHIFT 0x1d
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL__SHIFT 0x1e
+#define SPI_CONFIG_CNTL__GPR_WRITE_PRIORITY_MASK 0x001FFFFFL
+#define SPI_CONFIG_CNTL__EXP_PRIORITY_ORDER_MASK 0x00E00000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_TOP_EVENTS_MASK 0x01000000L
+#define SPI_CONFIG_CNTL__ENABLE_SQG_BOP_EVENTS_MASK 0x02000000L
+#define SPI_CONFIG_CNTL__ALLOC_ARB_LRU_ENA_MASK 0x10000000L
+#define SPI_CONFIG_CNTL__EXP_ARB_LRU_ENA_MASK 0x20000000L
+#define SPI_CONFIG_CNTL__PS_PKR_PRIORITY_CNTL_MASK 0xC0000000L
+//SPI_CONFIG_CNTL_1
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT 0x0
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW__SHIFT 0x4
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE__SHIFT 0x5
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT__SHIFT 0x7
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL__SHIFT 0x9
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT__SHIFT 0xa
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE__SHIFT 0xe
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE__SHIFT 0xf
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT__SHIFT 0x10
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM__SHIFT 0x15
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP__SHIFT 0x16
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT__SHIFT 0x17
+#define SPI_CONFIG_CNTL_1__VTX_DONE_DELAY_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_1__INTERP_ONE_PRIM_PER_ROW_MASK 0x00000010L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_ENABLE_MASK 0x00000060L
+#define SPI_CONFIG_CNTL_1__PC_LIMIT_STRICT_MASK 0x00000080L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MODE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_1__OREO_EXPALLOC_STALL_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_1__LBPW_CU_CHK_CNT_MASK 0x00003C00L
+#define SPI_CONFIG_CNTL_1__CSC_PWR_SAVE_DISABLE_MASK 0x00004000L
+#define SPI_CONFIG_CNTL_1__CSG_PWR_SAVE_DISABLE_MASK 0x00008000L
+#define SPI_CONFIG_CNTL_1__MAX_VTX_SYNC_CNT_MASK 0x001F0000L
+#define SPI_CONFIG_CNTL_1__EN_USER_ACCUM_MASK 0x00200000L
+#define SPI_CONFIG_CNTL_1__SA_SCREEN_MAP_MASK 0x00400000L
+#define SPI_CONFIG_CNTL_1__PS_GROUP_TIMEOUT_MASK 0xFF800000L
+//SPI_CONFIG_CNTL_2
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD__SHIFT 0x0
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD__SHIFT 0x4
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE__SHIFT 0x8
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE__SHIFT 0x9
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE__SHIFT 0xa
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE__SHIFT 0xb
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY__SHIFT 0xc
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_REQUEST_CYCLE_OVHD_MASK 0x0000000FL
+#define SPI_CONFIG_CNTL_2__CONTEXT_SAVE_WAIT_GDS_GRANT_CYCLE_OVHD_MASK 0x000000F0L
+#define SPI_CONFIG_CNTL_2__PWS_CSG_WAIT_DISABLE_MASK 0x00000100L
+#define SPI_CONFIG_CNTL_2__PWS_HS_WAIT_DISABLE_MASK 0x00000200L
+#define SPI_CONFIG_CNTL_2__PWS_GS_WAIT_DISABLE_MASK 0x00000400L
+#define SPI_CONFIG_CNTL_2__PWS_PS_WAIT_DISABLE_MASK 0x00000800L
+#define SPI_CONFIG_CNTL_2__CSC_HALT_ACK_DELAY_MASK 0x0001F000L
+//SPI_WAVE_LIMIT_CNTL
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN__SHIFT 0x0
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN__SHIFT 0x4
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN__SHIFT 0x6
+#define SPI_WAVE_LIMIT_CNTL__PS_WAVE_GRAN_MASK 0x00000003L
+#define SPI_WAVE_LIMIT_CNTL__GS_WAVE_GRAN_MASK 0x00000030L
+#define SPI_WAVE_LIMIT_CNTL__HS_WAVE_GRAN_MASK 0x000000C0L
+//SPI_GS_THROTTLE_CNTL1
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE__SHIFT 0x4
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD__SHIFT 0xc
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL__SHIFT 0x14
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE__SHIFT 0x18
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE__SHIFT 0x1c
+#define SPI_GS_THROTTLE_CNTL1__PH_POLL_INTERVAL_MASK 0x0000000FL
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_BASE_MASK 0x000000F0L
+#define SPI_GS_THROTTLE_CNTL1__PH_THROTTLE_STEP_SIZE_MASK 0x00000F00L
+#define SPI_GS_THROTTLE_CNTL1__SPI_VGPR_THRESHOLD_MASK 0x0000F000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_LDS_THRESHOLD_MASK 0x000F0000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_POLL_INTERVAL_MASK 0x00F00000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_BASE_MASK 0x0F000000L
+#define SPI_GS_THROTTLE_CNTL1__SPI_THROTTLE_STEP_SIZE_MASK 0xF0000000L
+//SPI_GS_THROTTLE_CNTL2
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE__SHIFT 0x0
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD__SHIFT 0x2
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR__SHIFT 0x6
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1__SHIFT 0x8
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2__SHIFT 0xb
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD__SHIFT 0xe
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE__SHIFT 0x10
+#define SPI_GS_THROTTLE_CNTL2__RESERVED__SHIFT 0x11
+#define SPI_GS_THROTTLE_CNTL2__SPI_THROTTLE_MODE_MASK 0x00000003L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_MASK 0x0000003CL
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_THRESHOLD_FACTOR_MASK 0x000000C0L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY1_MASK 0x00000700L
+#define SPI_GS_THROTTLE_CNTL2__GRP_LIFETIME_PENALTY2_MASK 0x00003800L
+#define SPI_GS_THROTTLE_CNTL2__PS_STALL_THRESHOLD_MASK 0x0000C000L
+#define SPI_GS_THROTTLE_CNTL2__PH_MODE_MASK 0x00010000L
+#define SPI_GS_THROTTLE_CNTL2__RESERVED_MASK 0xFFFE0000L
+//SPI_ATTRIBUTE_RING_BASE
+#define SPI_ATTRIBUTE_RING_BASE__BASE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_BASE__BASE_MASK 0xFFFFFFFFL
+//SPI_ATTRIBUTE_RING_SIZE
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE__SHIFT 0x0
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE__SHIFT 0x10
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY__SHIFT 0x11
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY__SHIFT 0x13
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC__SHIFT 0x15
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE__SHIFT 0x16
+#define SPI_ATTRIBUTE_RING_SIZE__MEM_SIZE_MASK 0x000000FFL
+#define SPI_ATTRIBUTE_RING_SIZE__BIG_PAGE_MASK 0x00010000L
+#define SPI_ATTRIBUTE_RING_SIZE__L1_POLICY_MASK 0x00060000L
+#define SPI_ATTRIBUTE_RING_SIZE__L2_POLICY_MASK 0x00180000L
+#define SPI_ATTRIBUTE_RING_SIZE__LLC_NOALLOC_MASK 0x00200000L
+#define SPI_ATTRIBUTE_RING_SIZE__GL1_PERF_COUNTER_DISABLE_MASK 0x00400000L
+
+
+// addressBlock: gc_cprs64dec
+//CP_MES_PRGRM_CNTR_START
+#define CP_MES_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START
+#define CP_MES_INTR_ROUTINE_START__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_LO
+#define CP_MES_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_INTR_ROUTINE_START_HI
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START__SHIFT 0x0
+#define CP_MES_INTR_ROUTINE_START_HI__IR_START_MASK 0xFFFFFFFFL
+//CP_MES_MTVEC_HI
+#define CP_MES_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MES_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_CNTL
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MES_CNTL__MES_PIPE0_RESET__SHIFT 0x10
+#define CP_MES_CNTL__MES_PIPE1_RESET__SHIFT 0x11
+#define CP_MES_CNTL__MES_PIPE2_RESET__SHIFT 0x12
+#define CP_MES_CNTL__MES_PIPE3_RESET__SHIFT 0x13
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MES_CNTL__MES_HALT__SHIFT 0x1e
+#define CP_MES_CNTL__MES_STEP__SHIFT 0x1f
+#define CP_MES_CNTL__MES_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MES_CNTL__MES_PIPE0_RESET_MASK 0x00010000L
+#define CP_MES_CNTL__MES_PIPE1_RESET_MASK 0x00020000L
+#define CP_MES_CNTL__MES_PIPE2_RESET_MASK 0x00040000L
+#define CP_MES_CNTL__MES_PIPE3_RESET_MASK 0x00080000L
+#define CP_MES_CNTL__MES_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MES_CNTL__MES_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MES_CNTL__MES_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MES_CNTL__MES_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MES_CNTL__MES_HALT_MASK 0x40000000L
+#define CP_MES_CNTL__MES_STEP_MASK 0x80000000L
+//CP_MES_PIPE_PRIORITY_CNTS
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT__SHIFT 0x0
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT__SHIFT 0x8
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT__SHIFT 0x10
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT__SHIFT 0x18
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY1_CNT_MASK 0x000000FFL
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2A_CNT_MASK 0x0000FF00L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY2B_CNT_MASK 0x00FF0000L
+#define CP_MES_PIPE_PRIORITY_CNTS__PRIORITY3_CNT_MASK 0xFF000000L
+//CP_MES_PIPE0_PRIORITY
+#define CP_MES_PIPE0_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE0_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE1_PRIORITY
+#define CP_MES_PIPE1_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE1_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE2_PRIORITY
+#define CP_MES_PIPE2_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE2_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_PIPE3_PRIORITY
+#define CP_MES_PIPE3_PRIORITY__PRIORITY__SHIFT 0x0
+#define CP_MES_PIPE3_PRIORITY__PRIORITY_MASK 0x00000003L
+//CP_MES_HEADER_DUMP
+#define CP_MES_HEADER_DUMP__HEADER_DUMP__SHIFT 0x0
+#define CP_MES_HEADER_DUMP__HEADER_DUMP_MASK 0xFFFFFFFFL
+//CP_MES_MIE_LO
+#define CP_MES_MIE_LO__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_LO__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_MIE_HI
+#define CP_MES_MIE_HI__MES_INT__SHIFT 0x0
+#define CP_MES_MIE_HI__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT
+#define CP_MES_INTERRUPT__MES_INT__SHIFT 0x0
+#define CP_MES_INTERRUPT__MES_INT_MASK 0xFFFFFFFFL
+//CP_MES_SCRATCH_INDEX
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX__SHIFT 0x0
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE__SHIFT 0x1f
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_MASK 0x000001FFL
+#define CP_MES_SCRATCH_INDEX__SCRATCH_INDEX_64BIT_MODE_MASK 0x80000000L
+//CP_MES_SCRATCH_DATA
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA__SHIFT 0x0
+#define CP_MES_SCRATCH_DATA__SCRATCH_DATA_MASK 0xFFFFFFFFL
+//CP_MES_INSTR_PNTR
+#define CP_MES_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MES_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MES_MSCRATCH_HI
+#define CP_MES_MSCRATCH_HI__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSCRATCH_LO
+#define CP_MES_MSCRATCH_LO__DATA__SHIFT 0x0
+#define CP_MES_MSCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_LO
+#define CP_MES_MSTATUS_LO__STATUS_LO__SHIFT 0x0
+#define CP_MES_MSTATUS_LO__STATUS_LO_MASK 0xFFFFFFFFL
+//CP_MES_MSTATUS_HI
+#define CP_MES_MSTATUS_HI__STATUS_HI__SHIFT 0x0
+#define CP_MES_MSTATUS_HI__STATUS_HI_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_LO
+#define CP_MES_MEPC_LO__MEPC_LO__SHIFT 0x0
+#define CP_MES_MEPC_LO__MEPC_LO_MASK 0xFFFFFFFFL
+//CP_MES_MEPC_HI
+#define CP_MES_MEPC_HI__MEPC_HI__SHIFT 0x0
+#define CP_MES_MEPC_HI__MEPC_HI_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_LO
+#define CP_MES_MCAUSE_LO__CAUSE_LO__SHIFT 0x0
+#define CP_MES_MCAUSE_LO__CAUSE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCAUSE_HI
+#define CP_MES_MCAUSE_HI__CAUSE_HI__SHIFT 0x0
+#define CP_MES_MCAUSE_HI__CAUSE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_LO
+#define CP_MES_MBADADDR_LO__ADDR_LO__SHIFT 0x0
+#define CP_MES_MBADADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MES_MBADADDR_HI
+#define CP_MES_MBADADDR_HI__ADDR_HI__SHIFT 0x0
+#define CP_MES_MBADADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIP_LO
+#define CP_MES_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MES_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIP_HI
+#define CP_MES_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MES_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MES_IC_OP_CNTL
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_MES_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_MES_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_MES_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_MES_MCYCLE_LO
+#define CP_MES_MCYCLE_LO__CYCLE_LO__SHIFT 0x0
+#define CP_MES_MCYCLE_LO__CYCLE_LO_MASK 0xFFFFFFFFL
+//CP_MES_MCYCLE_HI
+#define CP_MES_MCYCLE_HI__CYCLE_HI__SHIFT 0x0
+#define CP_MES_MCYCLE_HI__CYCLE_HI_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_LO
+#define CP_MES_MTIME_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIME_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIME_HI
+#define CP_MES_MTIME_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIME_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_LO
+#define CP_MES_MINSTRET_LO__INSTRET_LO__SHIFT 0x0
+#define CP_MES_MINSTRET_LO__INSTRET_LO_MASK 0xFFFFFFFFL
+//CP_MES_MINSTRET_HI
+#define CP_MES_MINSTRET_HI__INSTRET_HI__SHIFT 0x0
+#define CP_MES_MINSTRET_HI__INSTRET_HI_MASK 0xFFFFFFFFL
+//CP_MES_MISA_LO
+#define CP_MES_MISA_LO__MISA_LO__SHIFT 0x0
+#define CP_MES_MISA_LO__MISA_LO_MASK 0xFFFFFFFFL
+//CP_MES_MISA_HI
+#define CP_MES_MISA_HI__MISA_HI__SHIFT 0x0
+#define CP_MES_MISA_HI__MISA_HI_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_LO
+#define CP_MES_MVENDORID_LO__MVENDORID_LO__SHIFT 0x0
+#define CP_MES_MVENDORID_LO__MVENDORID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MVENDORID_HI
+#define CP_MES_MVENDORID_HI__MVENDORID_HI__SHIFT 0x0
+#define CP_MES_MVENDORID_HI__MVENDORID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_LO
+#define CP_MES_MARCHID_LO__MARCHID_LO__SHIFT 0x0
+#define CP_MES_MARCHID_LO__MARCHID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MARCHID_HI
+#define CP_MES_MARCHID_HI__MARCHID_HI__SHIFT 0x0
+#define CP_MES_MARCHID_HI__MARCHID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_LO
+#define CP_MES_MIMPID_LO__MIMPID_LO__SHIFT 0x0
+#define CP_MES_MIMPID_LO__MIMPID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIMPID_HI
+#define CP_MES_MIMPID_HI__MIMPID_HI__SHIFT 0x0
+#define CP_MES_MIMPID_HI__MIMPID_HI_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_LO
+#define CP_MES_MHARTID_LO__MHARTID_LO__SHIFT 0x0
+#define CP_MES_MHARTID_LO__MHARTID_LO_MASK 0xFFFFFFFFL
+//CP_MES_MHARTID_HI
+#define CP_MES_MHARTID_HI__MHARTID_HI__SHIFT 0x0
+#define CP_MES_MHARTID_HI__MHARTID_HI_MASK 0xFFFFFFFFL
+//CP_MES_DC_BASE_CNTL
+#define CP_MES_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_OP_CNTL
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MES_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MES_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MES_MTIMECMP_LO
+#define CP_MES_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MES_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MES_MTIMECMP_HI
+#define CP_MES_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MES_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MES_PROCESS_QUANTUM_PIPE0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE0__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE0__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_PROCESS_QUANTUM_PIPE1
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION__SHIFT 0x0
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED__SHIFT 0x1c
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE__SHIFT 0x1d
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN__SHIFT 0x1f
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_DURATION_MASK 0x0FFFFFFFL
+#define CP_MES_PROCESS_QUANTUM_PIPE1__TIMER_EXPIRED_MASK 0x10000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_SCALE_MASK 0x60000000L
+#define CP_MES_PROCESS_QUANTUM_PIPE1__QUANTUM_EN_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL1
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL3
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL4
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL5
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_DOORBELL_CONTROL6
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET__SHIFT 0x2
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN__SHIFT 0x1e
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT__SHIFT 0x1f
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_OFFSET_MASK 0x0FFFFFFCL
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_EN_MASK 0x40000000L
+#define CP_MES_DOORBELL_CONTROL6__DOORBELL_HIT_MASK 0x80000000L
+//CP_MES_GP0_LO
+#define CP_MES_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP0_LO__DATA__SHIFT 0x1
+#define CP_MES_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP0_HI
+#define CP_MES_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP1_LO
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP1_HI
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP2_LO
+#define CP_MES_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP2_HI
+#define CP_MES_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP3_LO
+#define CP_MES_GP3_LO__DATA__SHIFT 0x0
+#define CP_MES_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP3_HI
+#define CP_MES_GP3_HI__DATA__SHIFT 0x0
+#define CP_MES_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_LO
+#define CP_MES_GP4_LO__DATA__SHIFT 0x0
+#define CP_MES_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP4_HI
+#define CP_MES_GP4_HI__DATA__SHIFT 0x0
+#define CP_MES_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP5_LO
+#define CP_MES_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MES_GP5_LO__DATA__SHIFT 0x1
+#define CP_MES_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MES_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MES_GP5_HI
+#define CP_MES_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MES_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MES_GP6_LO
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MES_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP6_HI
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MES_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP7_LO
+#define CP_MES_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MES_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MES_GP7_HI
+#define CP_MES_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MES_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MES_GP8_LO
+#define CP_MES_GP8_LO__DATA__SHIFT 0x0
+#define CP_MES_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP8_HI
+#define CP_MES_GP8_HI__DATA__SHIFT 0x0
+#define CP_MES_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_LO
+#define CP_MES_GP9_LO__DATA__SHIFT 0x0
+#define CP_MES_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MES_GP9_HI
+#define CP_MES_GP9_HI__DATA__SHIFT 0x0
+#define CP_MES_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MES_LOCAL_BASE0_LO
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_BASE0_HI
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_MASK0_LO
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MES_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_MASK0_HI
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MES_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_APERTURE
+#define CP_MES_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_INSTR_BASE_LO
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_BASE_HI
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_MASK_LO
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MES_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_INSTR_MASK_HI
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MES_LOCAL_INSTR_APERTURE
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_APERTURE
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MES_LOCAL_SCRATCH_BASE_LO
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_LOCAL_SCRATCH_BASE_HI
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_PERFCOUNT_CNTL
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MES_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MES_PENDING_INTERRUPT
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MES_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MES_PRGRM_CNTR_START_HI
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MES_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MES_INTERRUPT_DATA_16
+#define CP_MES_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_17
+#define CP_MES_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_18
+#define CP_MES_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_19
+#define CP_MES_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_20
+#define CP_MES_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_21
+#define CP_MES_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_22
+#define CP_MES_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_23
+#define CP_MES_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_24
+#define CP_MES_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_25
+#define CP_MES_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_26
+#define CP_MES_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_27
+#define CP_MES_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_28
+#define CP_MES_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_29
+#define CP_MES_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_30
+#define CP_MES_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MES_INTERRUPT_DATA_31
+#define CP_MES_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MES_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_BASE
+#define CP_MES_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_MASK
+#define CP_MES_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE0_CNTL
+#define CP_MES_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE1_BASE
+#define CP_MES_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_MASK
+#define CP_MES_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE1_CNTL
+#define CP_MES_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE2_BASE
+#define CP_MES_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_MASK
+#define CP_MES_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE2_CNTL
+#define CP_MES_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE3_BASE
+#define CP_MES_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_MASK
+#define CP_MES_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE3_CNTL
+#define CP_MES_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE4_BASE
+#define CP_MES_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_MASK
+#define CP_MES_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE4_CNTL
+#define CP_MES_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE5_BASE
+#define CP_MES_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_MASK
+#define CP_MES_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE5_CNTL
+#define CP_MES_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE6_BASE
+#define CP_MES_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_MASK
+#define CP_MES_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE6_CNTL
+#define CP_MES_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE7_BASE
+#define CP_MES_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_MASK
+#define CP_MES_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE7_CNTL
+#define CP_MES_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE8_BASE
+#define CP_MES_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_MASK
+#define CP_MES_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE8_CNTL
+#define CP_MES_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE9_BASE
+#define CP_MES_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_MASK
+#define CP_MES_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE9_CNTL
+#define CP_MES_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE10_BASE
+#define CP_MES_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_MASK
+#define CP_MES_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE10_CNTL
+#define CP_MES_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE11_BASE
+#define CP_MES_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_MASK
+#define CP_MES_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE11_CNTL
+#define CP_MES_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE12_BASE
+#define CP_MES_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_MASK
+#define CP_MES_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE12_CNTL
+#define CP_MES_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE13_BASE
+#define CP_MES_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_MASK
+#define CP_MES_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE13_CNTL
+#define CP_MES_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE14_BASE
+#define CP_MES_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_MASK
+#define CP_MES_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE14_CNTL
+#define CP_MES_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MES_DC_APERTURE15_BASE
+#define CP_MES_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_MASK
+#define CP_MES_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MES_DC_APERTURE15_CNTL
+#define CP_MES_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MES_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_RS64_PRGRM_CNTR_START
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START__IP_START_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_LO
+#define CP_MEC_MTVEC_LO__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTVEC_HI
+#define CP_MEC_MTVEC_HI__ADDR_LO__SHIFT 0x0
+#define CP_MEC_MTVEC_HI__ADDR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_ISA_CNTL
+#define CP_MEC_ISA_CNTL__ISA_MODE__SHIFT 0x0
+#define CP_MEC_ISA_CNTL__ISA_MODE_MASK 0x00000001L
+//CP_MEC_RS64_CNTL
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE__SHIFT 0x4
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET__SHIFT 0x10
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET__SHIFT 0x11
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET__SHIFT 0x12
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET__SHIFT 0x13
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE__SHIFT 0x1a
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE__SHIFT 0x1b
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE__SHIFT 0x1c
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE__SHIFT 0x1d
+#define CP_MEC_RS64_CNTL__MEC_HALT__SHIFT 0x1e
+#define CP_MEC_RS64_CNTL__MEC_STEP__SHIFT 0x1f
+#define CP_MEC_RS64_CNTL__MEC_INVALIDATE_ICACHE_MASK 0x00000010L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_RESET_MASK 0x00010000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_RESET_MASK 0x00020000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_RESET_MASK 0x00040000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_RESET_MASK 0x00080000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE0_ACTIVE_MASK 0x04000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE1_ACTIVE_MASK 0x08000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE2_ACTIVE_MASK 0x10000000L
+#define CP_MEC_RS64_CNTL__MEC_PIPE3_ACTIVE_MASK 0x20000000L
+#define CP_MEC_RS64_CNTL__MEC_HALT_MASK 0x40000000L
+#define CP_MEC_RS64_CNTL__MEC_STEP_MASK 0x80000000L
+//CP_MEC_MIE_LO
+#define CP_MEC_MIE_LO__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_LO__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_MIE_HI
+#define CP_MEC_MIE_HI__MEC_INT__SHIFT 0x0
+#define CP_MEC_MIE_HI__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT
+#define CP_MEC_RS64_INTERRUPT__MEC_INT__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT__MEC_INT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INSTR_PNTR
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR__SHIFT 0x0
+#define CP_MEC_RS64_INSTR_PNTR__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_MEC_MIP_LO
+#define CP_MEC_MIP_LO__MIP_LO__SHIFT 0x0
+#define CP_MEC_MIP_LO__MIP_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIP_HI
+#define CP_MEC_MIP_HI__MIP_HI__SHIFT 0x0
+#define CP_MEC_MIP_HI__MIP_HI_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_CNTL
+#define CP_MEC_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MEC_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MEC_DC_OP_CNTL
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_MEC_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_MEC_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+//CP_MEC_MTIMECMP_LO
+#define CP_MEC_MTIMECMP_LO__TIME_LO__SHIFT 0x0
+#define CP_MEC_MTIMECMP_LO__TIME_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MTIMECMP_HI
+#define CP_MEC_MTIMECMP_HI__TIME_HI__SHIFT 0x0
+#define CP_MEC_MTIMECMP_HI__TIME_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP0_LO
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP0_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP0_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP0_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP0_HI
+#define CP_MEC_GP0_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP0_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_LO
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP1_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP1_HI
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP1_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_LO
+#define CP_MEC_GP2_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP2_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP2_HI
+#define CP_MEC_GP2_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP2_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_LO
+#define CP_MEC_GP3_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP3_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP3_HI
+#define CP_MEC_GP3_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP3_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_LO
+#define CP_MEC_GP4_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP4_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP4_HI
+#define CP_MEC_GP4_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP4_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP5_LO
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_MEC_GP5_LO__DATA__SHIFT 0x1
+#define CP_MEC_GP5_LO__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_MEC_GP5_LO__DATA_MASK 0xFFFFFFFEL
+//CP_MEC_GP5_HI
+#define CP_MEC_GP5_HI__M_RET_ADDR__SHIFT 0x0
+#define CP_MEC_GP5_HI__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_LO
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_MEC_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP6_HI
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_MEC_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_LO
+#define CP_MEC_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_MEC_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_MEC_GP7_HI
+#define CP_MEC_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_MEC_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_LO
+#define CP_MEC_GP8_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP8_HI
+#define CP_MEC_GP8_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_LO
+#define CP_MEC_GP9_LO__DATA__SHIFT 0x0
+#define CP_MEC_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_GP9_HI
+#define CP_MEC_GP9_HI__DATA__SHIFT 0x0
+#define CP_MEC_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_LOCAL_BASE0_LO
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_BASE0_HI
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_MASK0_LO
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_MASK0_HI
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_APERTURE
+#define CP_MEC_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_INSTR_BASE_LO
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_BASE_HI
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_MASK_LO
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_INSTR_MASK_HI
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_MEC_LOCAL_INSTR_APERTURE
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_APERTURE
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_MEC_LOCAL_SCRATCH_BASE_LO
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_LOCAL_SCRATCH_BASE_HI
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_RS64_PERFCOUNT_CNTL
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL__SHIFT 0x0
+#define CP_MEC_RS64_PERFCOUNT_CNTL__EVENT_SEL_MASK 0x0000001FL
+//CP_MEC_RS64_PENDING_INTERRUPT
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_MEC_RS64_PENDING_INTERRUPT__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_PRGRM_CNTR_START_HI
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START__SHIFT 0x0
+#define CP_MEC_RS64_PRGRM_CNTR_START_HI__IP_START_MASK 0x3FFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_16
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_16__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_17
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_17__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_18
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_18__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_19
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_19__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_20
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_20__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_21
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_21__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_22
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_22__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_23
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_23__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_24
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_24__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_25
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_25__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_26
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_26__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_27
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_27__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_28
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_28__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_29
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_29__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_30
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_30__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_RS64_INTERRUPT_DATA_31
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA__SHIFT 0x0
+#define CP_MEC_RS64_INTERRUPT_DATA_31__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_BASE
+#define CP_MEC_DC_APERTURE0_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_MASK
+#define CP_MEC_DC_APERTURE0_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE0_CNTL
+#define CP_MEC_DC_APERTURE0_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE0_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE0_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE1_BASE
+#define CP_MEC_DC_APERTURE1_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_MASK
+#define CP_MEC_DC_APERTURE1_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE1_CNTL
+#define CP_MEC_DC_APERTURE1_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE1_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE1_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE2_BASE
+#define CP_MEC_DC_APERTURE2_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_MASK
+#define CP_MEC_DC_APERTURE2_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE2_CNTL
+#define CP_MEC_DC_APERTURE2_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE2_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE2_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE3_BASE
+#define CP_MEC_DC_APERTURE3_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_MASK
+#define CP_MEC_DC_APERTURE3_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE3_CNTL
+#define CP_MEC_DC_APERTURE3_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE3_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE3_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE4_BASE
+#define CP_MEC_DC_APERTURE4_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_MASK
+#define CP_MEC_DC_APERTURE4_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE4_CNTL
+#define CP_MEC_DC_APERTURE4_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE4_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE4_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE5_BASE
+#define CP_MEC_DC_APERTURE5_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_MASK
+#define CP_MEC_DC_APERTURE5_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE5_CNTL
+#define CP_MEC_DC_APERTURE5_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE5_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE5_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE6_BASE
+#define CP_MEC_DC_APERTURE6_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_MASK
+#define CP_MEC_DC_APERTURE6_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE6_CNTL
+#define CP_MEC_DC_APERTURE6_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE6_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE6_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE7_BASE
+#define CP_MEC_DC_APERTURE7_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_MASK
+#define CP_MEC_DC_APERTURE7_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE7_CNTL
+#define CP_MEC_DC_APERTURE7_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE7_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE7_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE8_BASE
+#define CP_MEC_DC_APERTURE8_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_MASK
+#define CP_MEC_DC_APERTURE8_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE8_CNTL
+#define CP_MEC_DC_APERTURE8_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE8_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE8_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE9_BASE
+#define CP_MEC_DC_APERTURE9_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_MASK
+#define CP_MEC_DC_APERTURE9_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE9_CNTL
+#define CP_MEC_DC_APERTURE9_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE9_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE9_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE10_BASE
+#define CP_MEC_DC_APERTURE10_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_MASK
+#define CP_MEC_DC_APERTURE10_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE10_CNTL
+#define CP_MEC_DC_APERTURE10_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE10_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE10_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE11_BASE
+#define CP_MEC_DC_APERTURE11_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_MASK
+#define CP_MEC_DC_APERTURE11_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE11_CNTL
+#define CP_MEC_DC_APERTURE11_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE11_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE11_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE12_BASE
+#define CP_MEC_DC_APERTURE12_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_MASK
+#define CP_MEC_DC_APERTURE12_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE12_CNTL
+#define CP_MEC_DC_APERTURE12_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE12_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE12_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE13_BASE
+#define CP_MEC_DC_APERTURE13_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_MASK
+#define CP_MEC_DC_APERTURE13_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE13_CNTL
+#define CP_MEC_DC_APERTURE13_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE13_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE13_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE14_BASE
+#define CP_MEC_DC_APERTURE14_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_MASK
+#define CP_MEC_DC_APERTURE14_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE14_CNTL
+#define CP_MEC_DC_APERTURE14_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE14_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE14_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_MEC_DC_APERTURE15_BASE
+#define CP_MEC_DC_APERTURE15_BASE__BASE__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_BASE__BASE_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_MASK
+#define CP_MEC_DC_APERTURE15_MASK__MASK__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_MASK__MASK_MASK 0xFFFFFFFFL
+//CP_MEC_DC_APERTURE15_CNTL
+#define CP_MEC_DC_APERTURE15_CNTL__VMID__SHIFT 0x0
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE__SHIFT 0x4
+#define CP_MEC_DC_APERTURE15_CNTL__VMID_MASK 0x0000000FL
+#define CP_MEC_DC_APERTURE15_CNTL__BYPASS_MODE_MASK 0x00000010L
+//CP_CPC_IC_OP_CNTL
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_CPC_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_CPC_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_CPC_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_CNTL
+#define CP_GFX_CNTL__ENGINE_SEL__SHIFT 0x0
+#define CP_GFX_CNTL__CONFIG__SHIFT 0x1
+#define CP_GFX_CNTL__ENGINE_SEL_MASK 0x00000001L
+#define CP_GFX_CNTL__CONFIG_MASK 0x00000006L
+//CP_GFX_RS64_INTERRUPT0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN0
+#define CP_GFX_RS64_INTR_EN0__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN0__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INTR_EN1
+#define CP_GFX_RS64_INTR_EN1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTR_EN1__ME_INT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE_CNTL
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_GFX_RS64_DC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_GFX_RS64_DC_OP_CNTL
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE__SHIFT 0x0
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE__SHIFT 0x1
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL__SHIFT 0x2
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED__SHIFT 0x3
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE__SHIFT 0x4
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED__SHIFT 0x5
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_MASK 0x00000001L
+#define CP_GFX_RS64_DC_OP_CNTL__INVALIDATE_DCACHE_COMPLETE_MASK 0x00000002L
+#define CP_GFX_RS64_DC_OP_CNTL__BYPASS_ALL_MASK 0x00000004L
+#define CP_GFX_RS64_DC_OP_CNTL__RESERVED_MASK 0x00000008L
+#define CP_GFX_RS64_DC_OP_CNTL__PRIME_DCACHE_MASK 0x00000010L
+#define CP_GFX_RS64_DC_OP_CNTL__DCACHE_PRIMED_MASK 0x00000020L
+//CP_GFX_RS64_LOCAL_BASE0_LO
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_BASE0_LO__BASE0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_BASE0_HI
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_BASE0_HI__BASE0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_MASK0_LO
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_MASK0_LO__MASK0_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_MASK0_HI
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_MASK0_HI__MASK0_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_APERTURE
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_LO
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_BASE_HI
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_MASK_LO
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_LO__MASK_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_INSTR_MASK_HI
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_MASK_HI__MASK_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_LOCAL_INSTR_APERTURE
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_INSTR_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_APERTURE
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_APERTURE__APERTURE_MASK 0x00000007L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_LOCAL_SCRATCH_BASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_PERFCOUNT_CNTL0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL0__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_PERFCOUNT_CNTL1
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL__SHIFT 0x0
+#define CP_GFX_RS64_PERFCOUNT_CNTL1__EVENT_SEL_MASK 0x0000001FL
+//CP_GFX_RS64_MIP_LO0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO0__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_LO1
+#define CP_GFX_RS64_MIP_LO1__MIP_LO__SHIFT 0x0
+#define CP_GFX_RS64_MIP_LO1__MIP_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI0__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIP_HI1
+#define CP_GFX_RS64_MIP_HI1__MIP_HI__SHIFT 0x0
+#define CP_GFX_RS64_MIP_HI1__MIP_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO0__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_LO1
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_LO1__TIME_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI0__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MTIMECMP_HI1
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI__SHIFT 0x0
+#define CP_GFX_RS64_MTIMECMP_HI1__TIME_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_LO0
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_LO1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP0_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP0_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP0_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP0_HI0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP0_HI1
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP0_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO0__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_LO1
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP1_LO1__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI0__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP1_HI1
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP1_HI1__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO0__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_LO1
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP2_LO1__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI0__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP2_HI1
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP2_HI1__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO0
+#define CP_GFX_RS64_GP3_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_LO1
+#define CP_GFX_RS64_GP3_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI0
+#define CP_GFX_RS64_GP3_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP3_HI1
+#define CP_GFX_RS64_GP3_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP3_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO0
+#define CP_GFX_RS64_GP4_LO0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_LO1
+#define CP_GFX_RS64_GP4_LO1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_LO1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI0
+#define CP_GFX_RS64_GP4_HI0__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI0__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP4_HI1
+#define CP_GFX_RS64_GP4_HI1__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP4_HI1__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_LO0
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO0__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO0__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO0__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_LO1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED__SHIFT 0x0
+#define CP_GFX_RS64_GP5_LO1__DATA__SHIFT 0x1
+#define CP_GFX_RS64_GP5_LO1__PG_VIRT_HALTED_MASK 0x00000001L
+#define CP_GFX_RS64_GP5_LO1__DATA_MASK 0xFFFFFFFEL
+//CP_GFX_RS64_GP5_HI0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI0__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP5_HI1
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR__SHIFT 0x0
+#define CP_GFX_RS64_GP5_HI1__M_RET_ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_LO
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP6_LO__RD_WR_SELECT_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP6_HI
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP6_HI__RD_WR_SELECT_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_LO
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO__SHIFT 0x0
+#define CP_GFX_RS64_GP7_LO__STACK_PNTR_LO_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP7_HI
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI__SHIFT 0x0
+#define CP_GFX_RS64_GP7_HI__STACK_PNTR_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_LO
+#define CP_GFX_RS64_GP8_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP8_HI
+#define CP_GFX_RS64_GP8_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP8_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_LO
+#define CP_GFX_RS64_GP9_LO__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_LO__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_GP9_HI
+#define CP_GFX_RS64_GP9_HI__DATA__SHIFT 0x0
+#define CP_GFX_RS64_GP9_HI__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_INSTR_PNTR0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR0__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_INSTR_PNTR1
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR__SHIFT 0x0
+#define CP_GFX_RS64_INSTR_PNTR1__INSTR_PNTR_MASK 0x000FFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT0__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_PENDING_INTERRUPT1
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT__SHIFT 0x0
+#define CP_GFX_RS64_PENDING_INTERRUPT1__PENDING_INTERRUPT_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_BASE0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE0__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK0__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL0__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE0_BASE1
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_MASK1
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE0_CNTL1
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE0_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE1_BASE1
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_MASK1
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE1_CNTL1
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE1_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE2_BASE1
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_MASK1
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE2_CNTL1
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE2_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE3_BASE1
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_MASK1
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE3_CNTL1
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE3_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE4_BASE1
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_MASK1
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE4_CNTL1
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE4_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE5_BASE1
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_MASK1
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE5_CNTL1
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE5_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE6_BASE1
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_MASK1
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE6_CNTL1
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE6_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE7_BASE1
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_MASK1
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE7_CNTL1
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE7_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE8_BASE1
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_MASK1
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE8_CNTL1
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE8_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE9_BASE1
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_MASK1
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE9_CNTL1
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE9_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE10_BASE1
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_MASK1
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE10_CNTL1
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE10_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE11_BASE1
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_MASK1
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE11_CNTL1
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE11_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE12_BASE1
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_MASK1
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE12_CNTL1
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE12_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE13_BASE1
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_MASK1
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE13_CNTL1
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE13_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE14_BASE1
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_MASK1
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE14_CNTL1
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE14_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_DC_APERTURE15_BASE1
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_BASE1__BASE_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_MASK1
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_MASK1__MASK_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_APERTURE15_CNTL1
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID__SHIFT 0x0
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE__SHIFT 0x4
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__VMID_MASK 0x0000000FL
+#define CP_GFX_RS64_DC_APERTURE15_CNTL1__BYPASS_MODE_MASK 0x00000010L
+//CP_GFX_RS64_INTERRUPT1
+#define CP_GFX_RS64_INTERRUPT1__ME_INT__SHIFT 0x0
+#define CP_GFX_RS64_INTERRUPT1__ME_INT_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_gl1dec
+//GL1_ARB_CTRL
+#define GL1_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define GL1_ARB_CTRL__FGCG_DISABLE__SHIFT 0x2
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x3
+#define GL1_ARB_CTRL__CHICKEN_BITS__SHIFT 0x4
+#define GL1_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define GL1_ARB_CTRL__FGCG_DISABLE_MASK 0x00000004L
+#define GL1_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000008L
+#define GL1_ARB_CTRL__CHICKEN_BITS_MASK 0x00000FF0L
+//GL1_DRAM_BURST_MASK
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define GL1_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1_ARB_STATUS
+#define GL1_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define GL1_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//GL1_DRAM_BURST_CTRL
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE__SHIFT 0x4
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE__SHIFT 0x5
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define GL1_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define GL1_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1_DRAM_BURST_CTRL__GATHER_64B_BURST_DISABLE_MASK 0x00000010L
+#define GL1_DRAM_BURST_CTRL__GATHER_32B_BURST_DISABLE_MASK 0x00000020L
+#define GL1_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//GL1I_GL1R_REP_FGCG_OVERRIDE
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IR_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1IW_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_REP_FGCG_OVERRIDE__GL1A_GL1R_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//GL1C_CTRL
+#define GL1C_CTRL__FORCE_MISS__SHIFT 0x0
+#define GL1C_CTRL__FORCE_HIT__SHIFT 0x1
+#define GL1C_CTRL__NOFILL_32B__SHIFT 0x2
+#define GL1C_CTRL__NOFILL_64B__SHIFT 0x3
+#define GL1C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x4
+#define GL1C_CTRL__ACK_QUEUE_DISABLE__SHIFT 0x8
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE__SHIFT 0x9
+#define GL1C_CTRL__HIT_QUEUE_DISABLE__SHIFT 0xa
+#define GL1C_CTRL__GL2_REQ_CREDITS__SHIFT 0xb
+#define GL1C_CTRL__GL2_DATA_CREDITS__SHIFT 0x12
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x19
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x1a
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE__SHIFT 0x1b
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS__SHIFT 0x1c
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE__SHIFT 0x1e
+#define GL1C_CTRL__FORCE_MISS_MASK 0x00000001L
+#define GL1C_CTRL__FORCE_HIT_MASK 0x00000002L
+#define GL1C_CTRL__NOFILL_32B_MASK 0x00000004L
+#define GL1C_CTRL__NOFILL_64B_MASK 0x00000008L
+#define GL1C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000000F0L
+#define GL1C_CTRL__ACK_QUEUE_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL__RMI_META_READ_MISS_QUEUE_DISABLE_MASK 0x00000200L
+#define GL1C_CTRL__HIT_QUEUE_DISABLE_MASK 0x00000400L
+#define GL1C_CTRL__GL2_REQ_CREDITS_MASK 0x0003F800L
+#define GL1C_CTRL__GL2_DATA_CREDITS_MASK 0x01FC0000L
+#define GL1C_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x02000000L
+#define GL1C_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x04000000L
+#define GL1C_CTRL__GCR_RSP_FGCG_DISABLE_MASK 0x08000000L
+#define GL1C_CTRL__DISABLE_HASH_TO_UPPER_16_SETS_MASK 0x10000000L
+#define GL1C_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+#define GL1C_CTRL__DISABLE_PERF_SPLIT_EVICT_WRITE_MASK 0x40000000L
+//GL1C_STATUS
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define GL1C_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define GL1C_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define GL1C_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define GL1C_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define GL1C_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL__SHIFT 0x14
+#define GL1C_STATUS__TAG_STALL__SHIFT 0x15
+#define GL1C_STATUS__TAG_BUSY__SHIFT 0x16
+#define GL1C_STATUS__TAG_ACK_STALL__SHIFT 0x17
+#define GL1C_STATUS__TAG_GCR_INV_STALL__SHIFT 0x18
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL__SHIFT 0x19
+#define GL1C_STATUS__TAG_EVICT__SHIFT 0x1a
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION__SHIFT 0x1b
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET__SHIFT 0x1f
+#define GL1C_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define GL1C_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define GL1C_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define GL1C_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define GL1C_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define GL1C_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define GL1C_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define GL1C_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define GL1C_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define GL1C_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define GL1C_STATUS__LATENCY_FIFO_FULL_STALL_MASK 0x00100000L
+#define GL1C_STATUS__TAG_STALL_MASK 0x00200000L
+#define GL1C_STATUS__TAG_BUSY_MASK 0x00400000L
+#define GL1C_STATUS__TAG_ACK_STALL_MASK 0x00800000L
+#define GL1C_STATUS__TAG_GCR_INV_STALL_MASK 0x01000000L
+#define GL1C_STATUS__TAG_NO_AVAILABLE_LINE_TO_EVICT_STALL_MASK 0x02000000L
+#define GL1C_STATUS__TAG_EVICT_MASK 0x04000000L
+#define GL1C_STATUS__TAG_REQUEST_STATE_OPERATION_MASK 0x78000000L
+#define GL1C_STATUS__TRACKER_LAST_SET_MATCHES_CURRENT_SET_MASK 0x80000000L
+//GL1C_UTCL0_CNTL2
+#define GL1C_UTCL0_CNTL2__SPARE__SHIFT 0x0
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE__SHIFT 0x8
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS__SHIFT 0x9
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID__SHIFT 0xa
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP__SHIFT 0xe
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST__SHIFT 0x11
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K__SHIFT 0x1a
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE__SHIFT 0x1e
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE__SHIFT 0x1f
+#define GL1C_UTCL0_CNTL2__SPARE_MASK 0x000000FFL
+#define GL1C_UTCL0_CNTL2__COMP_SYNC_DISABLE_MASK 0x00000100L
+#define GL1C_UTCL0_CNTL2__MTYPE_OVRD_DIS_MASK 0x00000200L
+#define GL1C_UTCL0_CNTL2__ANY_LINE_VALID_MASK 0x00000400L
+#define GL1C_UTCL0_CNTL2__FORCE_SNOOP_MASK 0x00004000L
+#define GL1C_UTCL0_CNTL2__DISABLE_BURST_MASK 0x00020000L
+#define GL1C_UTCL0_CNTL2__FORCE_FRAG_2M_TO_64K_MASK 0x04000000L
+#define GL1C_UTCL0_CNTL2__FGCG_DISABLE_MASK 0x40000000L
+#define GL1C_UTCL0_CNTL2__BIG_PAGE_DISABLE_MASK 0x80000000L
+//GL1C_UTCL0_STATUS
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define GL1C_UTCL0_STATUS__PRT_DETECTED__SHIFT 0x2
+#define GL1C_UTCL0_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define GL1C_UTCL0_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define GL1C_UTCL0_STATUS__PRT_DETECTED_MASK 0x00000004L
+//GL1C_UTCL0_RETRY
+#define GL1C_UTCL0_RETRY__INCR__SHIFT 0x0
+#define GL1C_UTCL0_RETRY__COUNT__SHIFT 0x8
+#define GL1C_UTCL0_RETRY__INCR_MASK 0x000000FFL
+#define GL1C_UTCL0_RETRY__COUNT_MASK 0x00000F00L
+//GL1C_CTRL2
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX__SHIFT 0x0
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE__SHIFT 0x8
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL__SHIFT 0x9
+#define GL1C_CTRL2__UTCL0_INFLIGHT_MAX_MASK 0x000000FFL
+#define GL1C_CTRL2__UTCL0_SD_SIDEBAND_IF_DISABLE_MASK 0x00000100L
+#define GL1C_CTRL2__REDUCE_REQ_PROTECTION_LINE_LEVEL_MASK 0x00003E00L
+
+
+// addressBlock: gc_chdec
+//CH_ARB_CTRL
+#define CH_ARB_CTRL__NUM_MEM_PIPES__SHIFT 0x0
+#define CH_ARB_CTRL__UC_IO_WR_PATH__SHIFT 0x2
+#define CH_ARB_CTRL__FGCG_DISABLE__SHIFT 0x3
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x4
+#define CH_ARB_CTRL__CHICKEN_BITS__SHIFT 0x5
+#define CH_ARB_CTRL__NUM_MEM_PIPES_MASK 0x00000003L
+#define CH_ARB_CTRL__UC_IO_WR_PATH_MASK 0x00000004L
+#define CH_ARB_CTRL__FGCG_DISABLE_MASK 0x00000008L
+#define CH_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000010L
+#define CH_ARB_CTRL__CHICKEN_BITS_MASK 0x00001FE0L
+//CH_DRAM_BURST_MASK
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK__SHIFT 0x0
+#define CH_DRAM_BURST_MASK__DRAM_BURST_ADDR_MASK_MASK 0x000000FFL
+//CH_ARB_STATUS
+#define CH_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define CH_ARB_STATUS__RET_ARB_BUSY__SHIFT 0x1
+#define CH_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define CH_ARB_STATUS__RET_ARB_BUSY_MASK 0x00000002L
+//CH_DRAM_BURST_CTRL
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST__SHIFT 0x0
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE__SHIFT 0x4
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE__SHIFT 0x5
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE__SHIFT 0x6
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE__SHIFT 0x7
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE__SHIFT 0x8
+#define CH_DRAM_BURST_CTRL__MAX_DRAM_BURST_MASK 0x00000007L
+#define CH_DRAM_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_MEMORY_BURST_DISABLE_MASK 0x00000010L
+#define CH_DRAM_BURST_CTRL__GATHER_64B_IO_BURST_DISABLE_MASK 0x00000020L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_MEMORY_BURST_DISABLE_MASK 0x00000040L
+#define CH_DRAM_BURST_CTRL__GATHER_32B_IO_BURST_DISABLE_MASK 0x00000080L
+#define CH_DRAM_BURST_CTRL__WRITE_BURSTABLE_STALL_DISABLE_MASK 0x00000100L
+//CHA_CHC_CREDITS
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS__SHIFT 0x0
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS__SHIFT 0x8
+#define CHA_CHC_CREDITS__CHC_REQ_CREDITS_MASK 0x000000FFL
+#define CHA_CHC_CREDITS__CHCG_REQ_CREDITS_MASK 0x0000FF00L
+//CHA_CLIENT_FREE_DELAY
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY__SHIFT 0x0
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY__SHIFT 0x3
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY__SHIFT 0x6
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY__SHIFT 0x9
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY__SHIFT 0xc
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_0_FREE_DELAY_MASK 0x00000007L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_1_FREE_DELAY_MASK 0x00000038L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_2_FREE_DELAY_MASK 0x000001C0L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_3_FREE_DELAY_MASK 0x00000E00L
+#define CHA_CLIENT_FREE_DELAY__CLIENT_TYPE_4_FREE_DELAY_MASK 0x00007000L
+//CHI_CHR_REP_FGCG_OVERRIDE
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIW_REP_FGCG_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHIR_REP_FGCG_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_SRC_REP_FGCG_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_REP_FGCG_OVERRIDE__CHA_CHR_RET_REP_FGCG_OVERRIDE_MASK 0x00000008L
+//CH_VC5_ENABLE
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE__SHIFT 0x1
+#define CH_VC5_ENABLE__UTCL2_VC5_ENABLE_MASK 0x00000002L
+//CHC_CTRL
+#define CHC_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHC_CTRL__GL2_REQ_CREDITS__SHIFT 0x4
+#define CHC_CTRL__GL2_DATA_CREDITS__SHIFT 0xb
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x12
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x13
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT__SHIFT 0x1d
+#define CHC_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHC_CTRL__GL2_REQ_CREDITS_MASK 0x000007F0L
+#define CHC_CTRL__GL2_DATA_CREDITS_MASK 0x0003F800L
+#define CHC_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00040000L
+#define CHC_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00080000L
+#define CHC_CTRL__DISABLE_PERF_WR_DATA_ALLOC_COUNT_MASK 0x20000000L
+//CHC_STATUS
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHC_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHC_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHC_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHC_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHC_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHC_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHC_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHC_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHC_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHC_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHC_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHC_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHC_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHC_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHC_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHC_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHC_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHC_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHC_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHC_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHC_STATUS__BUFFER_FULL_MASK 0x00800000L
+//CHCG_CTRL
+#define CHCG_CTRL__BUFFER_DEPTH_MAX__SHIFT 0x0
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX__SHIFT 0x4
+#define CHCG_CTRL__GL2_REQ_CREDITS__SHIFT 0x8
+#define CHCG_CTRL__GL2_DATA_CREDITS__SHIFT 0xf
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE__SHIFT 0x16
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE__SHIFT 0x17
+#define CHCG_CTRL__BUFFER_DEPTH_MAX_MASK 0x0000000FL
+#define CHCG_CTRL__VC0_BUFFER_DEPTH_MAX_MASK 0x000000F0L
+#define CHCG_CTRL__GL2_REQ_CREDITS_MASK 0x00007F00L
+#define CHCG_CTRL__GL2_DATA_CREDITS_MASK 0x003F8000L
+#define CHCG_CTRL__TO_L1_REPEATER_FGCG_DISABLE_MASK 0x00400000L
+#define CHCG_CTRL__TO_L2_REPEATER_FGCG_DISABLE_MASK 0x00800000L
+//CHCG_STATUS
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL__SHIFT 0x0
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY__SHIFT 0x1
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL__SHIFT 0x2
+#define CHCG_STATUS__GL2_REQ_VC0_STALL__SHIFT 0x3
+#define CHCG_STATUS__GL2_DATA_VC0_STALL__SHIFT 0x4
+#define CHCG_STATUS__GL2_REQ_VC1_STALL__SHIFT 0x5
+#define CHCG_STATUS__GL2_DATA_VC1_STALL__SHIFT 0x6
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY__SHIFT 0x7
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY__SHIFT 0x8
+#define CHCG_STATUS__GL2_RH_BUSY__SHIFT 0x9
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2__SHIFT 0xa
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL__SHIFT 0x14
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL__SHIFT 0x15
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY__SHIFT 0x16
+#define CHCG_STATUS__BUFFER_FULL__SHIFT 0x17
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY__SHIFT 0x18
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY__SHIFT 0x19
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL__SHIFT 0x1a
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL__SHIFT 0x1b
+#define CHCG_STATUS__INPUT_BUFFER_VC0_FIFO_FULL_MASK 0x00000001L
+#define CHCG_STATUS__OUTPUT_FIFOS_BUSY_MASK 0x00000002L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_FULL_MASK 0x00000004L
+#define CHCG_STATUS__GL2_REQ_VC0_STALL_MASK 0x00000008L
+#define CHCG_STATUS__GL2_DATA_VC0_STALL_MASK 0x00000010L
+#define CHCG_STATUS__GL2_REQ_VC1_STALL_MASK 0x00000020L
+#define CHCG_STATUS__GL2_DATA_VC1_STALL_MASK 0x00000040L
+#define CHCG_STATUS__INPUT_BUFFER_VC0_BUSY_MASK 0x00000080L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC0_BUSY_MASK 0x00000100L
+#define CHCG_STATUS__GL2_RH_BUSY_MASK 0x00000200L
+#define CHCG_STATUS__NUM_REQ_PENDING_FROM_L2_MASK 0x000FFC00L
+#define CHCG_STATUS__VIRTUAL_FIFO_FULL_STALL_MASK 0x00100000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUFFER_STALL_MASK 0x00200000L
+#define CHCG_STATUS__REQUEST_TRACKER_BUSY_MASK 0x00400000L
+#define CHCG_STATUS__BUFFER_FULL_MASK 0x00800000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_BUSY_MASK 0x01000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_BUSY_MASK 0x02000000L
+#define CHCG_STATUS__INPUT_BUFFER_VC1_FIFO_FULL_MASK 0x04000000L
+#define CHCG_STATUS__SRC_DATA_FIFO_VC1_FULL_MASK 0x08000000L
+
+
+// addressBlock: gc_gl2dec
+//GL2C_CTRL
+#define GL2C_CTRL__CACHE_SIZE__SHIFT 0x0
+#define GL2C_CTRL__RATE__SHIFT 0x2
+#define GL2C_CTRL__WRITEBACK_MARGIN__SHIFT 0x4
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE__SHIFT 0x8
+#define GL2C_CTRL__SRC_FIFO_SIZE__SHIFT 0xc
+#define GL2C_CTRL__LATENCY_FIFO_SIZE__SHIFT 0x10
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY__SHIFT 0x14
+#define GL2C_CTRL__LINEAR_SET_HASH__SHIFT 0x15
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP__SHIFT 0x16
+#define GL2C_CTRL__MDC_SIZE__SHIFT 0x18
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN__SHIFT 0x1b
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE__SHIFT 0x1c
+#define GL2C_CTRL__CACHE_SIZE_MASK 0x00000003L
+#define GL2C_CTRL__RATE_MASK 0x0000000CL
+#define GL2C_CTRL__WRITEBACK_MARGIN_MASK 0x000000F0L
+#define GL2C_CTRL__METADATA_LATENCY_FIFO_SIZE_MASK 0x00000F00L
+#define GL2C_CTRL__SRC_FIFO_SIZE_MASK 0x0000F000L
+#define GL2C_CTRL__LATENCY_FIFO_SIZE_MASK 0x000F0000L
+#define GL2C_CTRL__METADATA_TO_HI_PRIORITY_MASK 0x00100000L
+#define GL2C_CTRL__LINEAR_SET_HASH_MASK 0x00200000L
+#define GL2C_CTRL__FORCE_HIT_QUEUE_POP_MASK 0x00C00000L
+#define GL2C_CTRL__MDC_SIZE_MASK 0x03000000L
+#define GL2C_CTRL__METADATA_TO_HIT_QUEUE_MASK 0x04000000L
+#define GL2C_CTRL__IGNORE_FULLY_WRITTEN_MASK 0x08000000L
+#define GL2C_CTRL__MDC_SIDEBAND_FIFO_SIZE_MASK 0xF0000000L
+//GL2C_CTRL2
+#define GL2C_CTRL2__PROBE_FIFO_SIZE__SHIFT 0x0
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE__SHIFT 0x4
+#define GL2C_CTRL2__FILL_SIZE_32__SHIFT 0x5
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE__SHIFT 0x7
+#define GL2C_CTRL2__RO_DISABLE__SHIFT 0x8
+#define GL2C_CTRL2__FORCE_MDC_INV__SHIFT 0x9
+#define GL2C_CTRL2__GCR_ARB_CTRL__SHIFT 0xa
+#define GL2C_CTRL2__GCR_ALL_SET__SHIFT 0xd
+#define GL2C_CTRL2__FILL_SIZE_64__SHIFT 0x11
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK__SHIFT 0x12
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE__SHIFT 0x13
+#define GL2C_CTRL2__METADATA_VOLATILE_EN__SHIFT 0x14
+#define GL2C_CTRL2__RB_VOLATILE_EN__SHIFT 0x15
+#define GL2C_CTRL2__PROBE_UNSHARED_EN__SHIFT 0x16
+#define GL2C_CTRL2__MAX_MIN_CTRL__SHIFT 0x17
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN__SHIFT 0x1a
+#define GL2C_CTRL2__PROBE_FIFO_SIZE_MASK 0x0000000FL
+#define GL2C_CTRL2__ADDR_MATCH_DISABLE_MASK 0x00000010L
+#define GL2C_CTRL2__FILL_SIZE_32_MASK 0x00000020L
+#define GL2C_CTRL2__RB_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL2__HIT_UNDER_MISS_DISABLE_MASK 0x00000080L
+#define GL2C_CTRL2__RO_DISABLE_MASK 0x00000100L
+#define GL2C_CTRL2__FORCE_MDC_INV_MASK 0x00000200L
+#define GL2C_CTRL2__GCR_ARB_CTRL_MASK 0x00001C00L
+#define GL2C_CTRL2__GCR_ALL_SET_MASK 0x00002000L
+#define GL2C_CTRL2__FILL_SIZE_64_MASK 0x00020000L
+#define GL2C_CTRL2__USE_EA_EARLYWRRET_ON_WRITEBACK_MASK 0x00040000L
+#define GL2C_CTRL2__WRITEBACK_ALL_WAIT_FOR_ALL_EA_WRITE_COMPLETE_MASK 0x00080000L
+#define GL2C_CTRL2__METADATA_VOLATILE_EN_MASK 0x00100000L
+#define GL2C_CTRL2__RB_VOLATILE_EN_MASK 0x00200000L
+#define GL2C_CTRL2__PROBE_UNSHARED_EN_MASK 0x00400000L
+#define GL2C_CTRL2__MAX_MIN_CTRL_MASK 0x01800000L
+#define GL2C_CTRL2__MDC_UC_TO_C_RO_EN_MASK 0x04000000L
+//GL2C_STATUS
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC__SHIFT 0x0
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC__SHIFT 0x4
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC__SHIFT 0x5
+#define GL2C_STATUS__WRRET_NACK_FAULT__SHIFT 0x6
+#define GL2C_STATUS__RDRET_NACK_FAULT__SHIFT 0x7
+#define GL2C_STATUS__METADATA_FED__SHIFT 0x8
+#define GL2C_STATUS__FED_FSM_STATE__SHIFT 0x9
+#define GL2C_STATUS__SAFE_MODE_FED__SHIFT 0xb
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_STATUS__NONCACHEABLE_FLOAT_ATOMIC_MASK 0x00000001L
+#define GL2C_STATUS__NONCACHEABLE_U8_ATOMIC_MASK 0x00000010L
+#define GL2C_STATUS__NONCACHEABLE_CLAMP_SUB_ATOMIC_MASK 0x00000020L
+#define GL2C_STATUS__WRRET_NACK_FAULT_MASK 0x00000040L
+#define GL2C_STATUS__RDRET_NACK_FAULT_MASK 0x00000080L
+#define GL2C_STATUS__METADATA_FED_MASK 0x00000100L
+#define GL2C_STATUS__FED_FSM_STATE_MASK 0x00000600L
+#define GL2C_STATUS__SAFE_MODE_FED_MASK 0x00000800L
+#define GL2C_STATUS__DCC_OUT_INVALID_KEY_ERROR_CODE_MASK 0x007C0000L
+//GL2C_ADDR_MATCH_MASK
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2C_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2C_ADDR_MATCH_SIZE
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2C_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2C_WBINVL2
+#define GL2C_WBINVL2__DONE__SHIFT 0x4
+#define GL2C_WBINVL2__DONE_MASK 0x00000010L
+//GL2C_SOFT_RESET
+#define GL2C_SOFT_RESET__HALT_FOR_RESET__SHIFT 0x0
+#define GL2C_SOFT_RESET__HALT_FOR_RESET_MASK 0x00000001L
+//GL2C_CM_CTRL0
+#define GL2C_CM_CTRL0__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL0__HASH_MASK_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL1
+#define GL2C_CM_CTRL1__HASH_MASK__SHIFT 0x0
+#define GL2C_CM_CTRL1__BURST_TIMER__SHIFT 0x8
+#define GL2C_CM_CTRL1__RVF_SIZE__SHIFT 0x10
+#define GL2C_CM_CTRL1__WRITE_COH_MODE__SHIFT 0x17
+#define GL2C_CM_CTRL1__MDC_ARB_MODE__SHIFT 0x19
+#define GL2C_CM_CTRL1__READ_REQ_ONLY__SHIFT 0x1a
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN__SHIFT 0x1b
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN__SHIFT 0x1c
+#define GL2C_CM_CTRL1__BURST_MODE__SHIFT 0x1d
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER__SHIFT 0x1e
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE__SHIFT 0x1f
+#define GL2C_CM_CTRL1__HASH_MASK_MASK 0x0000000FL
+#define GL2C_CM_CTRL1__BURST_TIMER_MASK 0x0000FF00L
+#define GL2C_CM_CTRL1__RVF_SIZE_MASK 0x000F0000L
+#define GL2C_CM_CTRL1__WRITE_COH_MODE_MASK 0x01800000L
+#define GL2C_CM_CTRL1__MDC_ARB_MODE_MASK 0x02000000L
+#define GL2C_CM_CTRL1__READ_REQ_ONLY_MASK 0x04000000L
+#define GL2C_CM_CTRL1__COMP_TO_CONSTANT_EN_MASK 0x08000000L
+#define GL2C_CM_CTRL1__COMP_TO_SINGLE_EN_MASK 0x10000000L
+#define GL2C_CM_CTRL1__BURST_MODE_MASK 0x20000000L
+#define GL2C_CM_CTRL1__UNCOMP_READBACK_FILTER_MASK 0x40000000L
+#define GL2C_CM_CTRL1__WAIT_ATOMIC_RECOMP_WRITE_MASK 0x80000000L
+//GL2C_CM_STALL
+#define GL2C_CM_STALL__QUEUE__SHIFT 0x0
+#define GL2C_CM_STALL__QUEUE_MASK 0xFFFFFFFFL
+//GL2C_CM_CTRL2
+#define GL2C_CM_CTRL2__READ_BURST_TIMER__SHIFT 0x0
+#define GL2C_CM_CTRL2__VRS_DISABLE__SHIFT 0x8
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO__SHIFT 0x9
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE__SHIFT 0xa
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE__SHIFT 0xb
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE__SHIFT 0xc
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE__SHIFT 0xd
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE__SHIFT 0xf
+#define GL2C_CM_CTRL2__RECOMP_DISABLE__SHIFT 0x10
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN__SHIFT 0x11
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE__SHIFT 0x12
+#define GL2C_CM_CTRL2__READ_BURST_TIMER_MASK 0x000000FFL
+#define GL2C_CM_CTRL2__VRS_DISABLE_MASK 0x00000100L
+#define GL2C_CM_CTRL2__SKIP_LOW_COMP_RATIO_MASK 0x00000200L
+#define GL2C_CM_CTRL2__CM_NBC_IND64_DISABLE_MASK 0x00000400L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MODE_MASK 0x00000800L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_METADATA_WR_MODE_MASK 0x00001000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_MAX_UNCOMP_BLK_SZ_MODE_MASK 0x00006000L
+#define GL2C_CM_CTRL2__PARTIAL_WR_OPT_SECTOR_READBACK_MODE_MASK 0x00008000L
+#define GL2C_CM_CTRL2__RECOMP_DISABLE_MASK 0x00010000L
+#define GL2C_CM_CTRL2__DCC_COMP_KEY_ERROR_DETECTION_EN_MASK 0x00020000L
+#define GL2C_CM_CTRL2__DCC_CLEAR_FRAG2DCC_KEY_ERROR_CODE_MASK 0x00040000L
+//GL2C_CTRL3
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY__SHIFT 0x0
+#define GL2C_CTRL3__METADATA_NOFILL__SHIFT 0x3
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH__SHIFT 0x4
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE__SHIFT 0x5
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY__SHIFT 0x6
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE__SHIFT 0x7
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE__SHIFT 0x8
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY__SHIFT 0x9
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY__SHIFT 0xa
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE__SHIFT 0xb
+#define GL2C_CTRL3__HASH_256B_ENABLE__SHIFT 0xc
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE__SHIFT 0xd
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP__SHIFT 0xe
+#define GL2C_CTRL3__FGCG_OVERRIDE__SHIFT 0xf
+#define GL2C_CTRL3__FORCE_MTYPE_UC__SHIFT 0x10
+#define GL2C_CTRL3__DGPU_SHARED_MODE__SHIFT 0x11
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN__SHIFT 0x12
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT__SHIFT 0x13
+#define GL2C_CTRL3__READ_BYPASS_AS_UC__SHIFT 0x14
+#define GL2C_CTRL3__WB_OPT_ENABLE__SHIFT 0x15
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT__SHIFT 0x16
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE__SHIFT 0x18
+#define GL2C_CTRL3__EA_GMI_DISABLE__SHIFT 0x19
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY__SHIFT 0x1a
+#define GL2C_CTRL3__INF_NAN_CLAMP__SHIFT 0x1b
+#define GL2C_CTRL3__SCRATCH__SHIFT 0x1c
+#define GL2C_CTRL3__METADATA_MTYPE_COHERENCY_MASK 0x00000003L
+#define GL2C_CTRL3__METADATA_NOFILL_MASK 0x00000008L
+#define GL2C_CTRL3__METADATA_NEXT_CL_PREFETCH_MASK 0x00000010L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_MODE_MASK 0x00000020L
+#define GL2C_CTRL3__HTILE_TO_HI_PRIORITY_MASK 0x00000040L
+#define GL2C_CTRL3__UNCACHED_WRITE_ATOMIC_TO_UC_WRITE_MASK 0x00000080L
+#define GL2C_CTRL3__IO_CHANNEL_ENABLE_MASK 0x00000100L
+#define GL2C_CTRL3__FMASK_TO_HI_PRIORITY_MASK 0x00000200L
+#define GL2C_CTRL3__DCC_CMASK_TO_HI_PRIORITY_MASK 0x00000400L
+#define GL2C_CTRL3__BANK_LINEAR_HASH_ENABLE_MASK 0x00000800L
+#define GL2C_CTRL3__HASH_256B_ENABLE_MASK 0x00001000L
+#define GL2C_CTRL3__DECOMP_NBC_IND64_DISABLE_MASK 0x00002000L
+#define GL2C_CTRL3__FORCE_READ_ON_WRITE_OP_MASK 0x00004000L
+#define GL2C_CTRL3__FGCG_OVERRIDE_MASK 0x00008000L
+#define GL2C_CTRL3__FORCE_MTYPE_UC_MASK 0x00010000L
+#define GL2C_CTRL3__DGPU_SHARED_MODE_MASK 0x00020000L
+#define GL2C_CTRL3__WRITE_SET_SECTOR_FULLY_WRITTEN_MASK 0x00040000L
+#define GL2C_CTRL3__EA_READ_SIZE_LIMIT_MASK 0x00080000L
+#define GL2C_CTRL3__READ_BYPASS_AS_UC_MASK 0x00100000L
+#define GL2C_CTRL3__WB_OPT_ENABLE_MASK 0x00200000L
+#define GL2C_CTRL3__WB_OPT_BURST_MAX_COUNT_MASK 0x00C00000L
+#define GL2C_CTRL3__SET_GROUP_LINEAR_HASH_ENABLE_MASK 0x01000000L
+#define GL2C_CTRL3__EA_GMI_DISABLE_MASK 0x02000000L
+#define GL2C_CTRL3__SQC_TO_HI_PRIORITY_MASK 0x04000000L
+#define GL2C_CTRL3__INF_NAN_CLAMP_MASK 0x08000000L
+#define GL2C_CTRL3__SCRATCH_MASK 0xF0000000L
+//GL2C_LB_CTR_CTRL
+#define GL2C_LB_CTR_CTRL__START__SHIFT 0x0
+#define GL2C_LB_CTR_CTRL__LOAD__SHIFT 0x1
+#define GL2C_LB_CTR_CTRL__CLEAR__SHIFT 0x2
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0x1f
+#define GL2C_LB_CTR_CTRL__START_MASK 0x00000001L
+#define GL2C_LB_CTR_CTRL__LOAD_MASK 0x00000002L
+#define GL2C_LB_CTR_CTRL__CLEAR_MASK 0x00000004L
+#define GL2C_LB_CTR_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x80000000L
+//GL2C_LB_DATA0
+#define GL2C_LB_DATA0__DATA__SHIFT 0x0
+#define GL2C_LB_DATA0__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA1
+#define GL2C_LB_DATA1__DATA__SHIFT 0x0
+#define GL2C_LB_DATA1__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA2
+#define GL2C_LB_DATA2__DATA__SHIFT 0x0
+#define GL2C_LB_DATA2__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_DATA3
+#define GL2C_LB_DATA3__DATA__SHIFT 0x0
+#define GL2C_LB_DATA3__DATA_MASK 0xFFFFFFFFL
+//GL2C_LB_CTR_SEL0
+#define GL2C_LB_CTR_SEL0__SEL0__SHIFT 0x0
+#define GL2C_LB_CTR_SEL0__DIV0__SHIFT 0xf
+#define GL2C_LB_CTR_SEL0__SEL1__SHIFT 0x10
+#define GL2C_LB_CTR_SEL0__DIV1__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL0__SEL0_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL0__DIV0_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL0__SEL1_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL0__DIV1_MASK 0x80000000L
+//GL2C_LB_CTR_SEL1
+#define GL2C_LB_CTR_SEL1__SEL2__SHIFT 0x0
+#define GL2C_LB_CTR_SEL1__DIV2__SHIFT 0xf
+#define GL2C_LB_CTR_SEL1__SEL3__SHIFT 0x10
+#define GL2C_LB_CTR_SEL1__DIV3__SHIFT 0x1f
+#define GL2C_LB_CTR_SEL1__SEL2_MASK 0x000000FFL
+#define GL2C_LB_CTR_SEL1__DIV2_MASK 0x00008000L
+#define GL2C_LB_CTR_SEL1__SEL3_MASK 0x00FF0000L
+#define GL2C_LB_CTR_SEL1__DIV3_MASK 0x80000000L
+//GL2C_CTRL4
+#define GL2C_CTRL4__METADATA_WR_OP_CID__SHIFT 0x0
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE__SHIFT 0x1
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY__SHIFT 0x2
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE__SHIFT 0x3
+#define GL2C_CTRL4__CM_MGCG_MODE__SHIFT 0x4
+#define GL2C_CTRL4__MDC_MGCG_MODE__SHIFT 0x5
+#define GL2C_CTRL4__TAG_MGCG_MODE__SHIFT 0x6
+#define GL2C_CTRL4__CORE_MGCG_MODE__SHIFT 0x7
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE__SHIFT 0x8
+#define GL2C_CTRL4__EA_NACK_DISABLE__SHIFT 0x9
+#define GL2C_CTRL4__FED_SAFE_MODE__SHIFT 0xa
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE__SHIFT 0xb
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE__SHIFT 0x1a
+#define GL2C_CTRL4__METADATA_WR_OP_CID_MASK 0x00000001L
+#define GL2C_CTRL4__SPA_CHANNEL_ENABLE_MASK 0x00000002L
+#define GL2C_CTRL4__SRC_FIFO_MDC_LOW_PRIORITY_MASK 0x00000004L
+#define GL2C_CTRL4__WRITEBACK_FIFO_STALL_ENABLE_MASK 0x00000008L
+#define GL2C_CTRL4__CM_MGCG_MODE_MASK 0x00000010L
+#define GL2C_CTRL4__MDC_MGCG_MODE_MASK 0x00000020L
+#define GL2C_CTRL4__TAG_MGCG_MODE_MASK 0x00000040L
+#define GL2C_CTRL4__CORE_MGCG_MODE_MASK 0x00000080L
+#define GL2C_CTRL4__EXECUTE_MGCG_MODE_MASK 0x00000100L
+#define GL2C_CTRL4__EA_NACK_DISABLE_MASK 0x00000200L
+#define GL2C_CTRL4__FED_SAFE_MODE_MASK 0x00000400L
+#define GL2C_CTRL4__FLUSH_SET_COUNTER_MASK_DISABLE_MASK 0x00000800L
+#define GL2C_CTRL4__NO_WRITE_ACK_TO_HIT_QUEUE_MASK 0x04000000L
+//GL2C_DISCARD_STALL_CTRL
+#define GL2C_DISCARD_STALL_CTRL__LIMIT__SHIFT 0x0
+#define GL2C_DISCARD_STALL_CTRL__WINDOW__SHIFT 0xf
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT__SHIFT 0x1e
+#define GL2C_DISCARD_STALL_CTRL__ENABLE__SHIFT 0x1f
+#define GL2C_DISCARD_STALL_CTRL__LIMIT_MASK 0x00007FFFL
+#define GL2C_DISCARD_STALL_CTRL__WINDOW_MASK 0x3FFF8000L
+#define GL2C_DISCARD_STALL_CTRL__DROP_NEXT_MASK 0x40000000L
+#define GL2C_DISCARD_STALL_CTRL__ENABLE_MASK 0x80000000L
+//GL2A_ADDR_MATCH_CTRL
+#define GL2A_ADDR_MATCH_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_ADDR_MATCH_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_MASK
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK__SHIFT 0x0
+#define GL2A_ADDR_MATCH_MASK__ADDR_MASK_MASK 0xFFFFFFFFL
+//GL2A_ADDR_MATCH_SIZE
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT__SHIFT 0x0
+#define GL2A_ADDR_MATCH_SIZE__MAX_COUNT_MASK 0x00000007L
+//GL2A_PRIORITY_CTRL
+#define GL2A_PRIORITY_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_PRIORITY_CTRL__DISABLE_MASK 0xFFFFFFFFL
+//GL2A_CTRL
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE__SHIFT 0x0
+#define GL2A_CTRL__STAY_ON_BURST__SHIFT 0x1
+#define GL2A_CTRL__FGCG_OVERRIDE__SHIFT 0x2
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY__SHIFT 0x3
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG__SHIFT 0x4
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG__SHIFT 0x8
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT__SHIFT 0xc
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE__SHIFT 0x11
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS__SHIFT 0x12
+#define GL2A_CTRL__RTN_ARB_TIMER_RESET_VALUE_MASK 0x00000001L
+#define GL2A_CTRL__STAY_ON_BURST_MASK 0x00000002L
+#define GL2A_CTRL__FGCG_OVERRIDE_MASK 0x00000004L
+#define GL2A_CTRL__CLIENT_ARB_PRIO_STAY_MASK 0x00000008L
+#define GL2A_CTRL__GCRD_CREDIT_SAFE_REG_MASK 0x000000F0L
+#define GL2A_CTRL__REQ_CREDIT_SAFE_REG_MASK 0x00000F00L
+#define GL2A_CTRL__WRITE_COMBINE_TIMEOUT_COUNT_MASK 0x0001F000L
+#define GL2A_CTRL__INTERNAL_RETURN_BYPASS_ENABLE_MASK 0x00020000L
+#define GL2A_CTRL__ADDR_REMOVE_COLBITS_MASK 0x00040000L
+//GL2A_RESP_THROTTLE_CTRL
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE__SHIFT 0x0
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1__SHIFT 0x10
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH__SHIFT 0x18
+#define GL2A_RESP_THROTTLE_CTRL__DISABLE_MASK 0x0000FFFFL
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_GL1_MASK 0x00FF0000L
+#define GL2A_RESP_THROTTLE_CTRL__CREDIT_CH_MASK 0xFF000000L
+
+
+// addressBlock: gc_gl1hdec
+//GL1H_ARB_CTRL
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE__SHIFT 0x0
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE__SHIFT 0x1
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE__SHIFT 0x2
+#define GL1H_ARB_CTRL__CHICKEN_BITS__SHIFT 0x3
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE__SHIFT 0xb
+#define GL1H_ARB_CTRL__REQ_FGCG_DISABLE_MASK 0x00000001L
+#define GL1H_ARB_CTRL__SRC_FGCG_DISABLE_MASK 0x00000002L
+#define GL1H_ARB_CTRL__RET_FGCG_DISABLE_MASK 0x00000004L
+#define GL1H_ARB_CTRL__CHICKEN_BITS_MASK 0x000007F8L
+#define GL1H_ARB_CTRL__PERF_CNTR_EN_OVERRIDE_MASK 0x00000800L
+//GL1H_GL1_CREDITS
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS__SHIFT 0x0
+#define GL1H_GL1_CREDITS__GL1_REQ_CREDITS_MASK 0x000000FFL
+//GL1H_BURST_MASK
+#define GL1H_BURST_MASK__BURST_ADDR_MASK__SHIFT 0x0
+#define GL1H_BURST_MASK__BURST_ADDR_MASK_MASK 0x000000FFL
+//GL1H_BURST_CTRL
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE__SHIFT 0x0
+#define GL1H_BURST_CTRL__BURST_DISABLE__SHIFT 0x3
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS__SHIFT 0x4
+#define GL1H_BURST_CTRL__MAX_BURST_SIZE_MASK 0x00000007L
+#define GL1H_BURST_CTRL__BURST_DISABLE_MASK 0x00000008L
+#define GL1H_BURST_CTRL__SPARE_BURST_CTRL_BITS_MASK 0x00000030L
+//GL1H_ARB_STATUS
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY__SHIFT 0x0
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ__SHIFT 0x1
+#define GL1H_ARB_STATUS__REQ_ARB_BUSY_MASK 0x00000001L
+#define GL1H_ARB_STATUS__CLIENT1_ILLEGAL_REQ_MASK 0x00000002L
+
+
+// addressBlock: gc_perfddec
+//CPG_PERFCOUNTER1_LO
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER1_HI
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_LO
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPG_PERFCOUNTER0_HI
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_LO
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER1_HI
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_LO
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPC_PERFCOUNTER0_HI
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_LO
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER1_HI
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_LO
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CPF_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CPF_PERFCOUNTER0_HI
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CPF_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CPF_LATENCY_STATS_DATA
+#define CPF_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPF_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_LATENCY_STATS_DATA
+#define CPG_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPG_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//CPC_LATENCY_STATS_DATA
+#define CPC_LATENCY_STATS_DATA__DATA__SHIFT 0x0
+#define CPC_LATENCY_STATS_DATA__DATA_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_LO
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER0_HI
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_LO
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_PERFCOUNTER1_HI
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_LO
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE0_PERFCOUNTER_HI
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_LO
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE1_PERFCOUNTER_HI
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_LO
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE2_PERFCOUNTER_HI
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_LO
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GRBM_SE3_PERFCOUNTER_HI
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_LO
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER0_HI
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_LO
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER1_HI
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_LO
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER2_HI
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_LO
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE1_PERFCOUNTER3_HI
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_LO
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER0_HI
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_LO
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER1_HI
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_LO
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER2_HI
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_LO
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_DIST_PERFCOUNTER3_HI
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_LO
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER0_HI
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_LO
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER1_HI
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_LO
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER2_HI
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_LO
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GE2_SE_PERFCOUNTER3_HI
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_LO
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER0_HI
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_LO
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER1_HI
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_LO
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER2_HI
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_LO
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SU_PERFCOUNTER3_HI
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_LO
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER0_HI
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_LO
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER1_HI
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_LO
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER2_HI
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_LO
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER3_HI
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_LO
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER4_HI
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_LO
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER5_HI
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_LO
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER6_HI
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_LO
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_SC_PERFCOUNTER7_HI
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_HI
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER0_LO
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_HI
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER1_LO
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_HI
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER2_LO
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_HI
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER3_LO
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_HI
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER4_LO
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_HI
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SPI_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SPI_PERFCOUNTER5_LO
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SPI_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_HI
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER0_LO
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_HI
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER1_LO
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_HI
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER2_LO
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_HI
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PC_PERFCOUNTER3_LO
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER0_LO
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER1_LO
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER2_LO
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER3_LO
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER4_LO
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER5_LO
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER6_LO
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQ_PERFCOUNTER7_LO
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQ_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_LO
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER0_HI
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_LO
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER1_HI
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_LO
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER2_HI
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_LO
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER3_HI
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_LO
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER4_HI
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_LO
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER5_HI
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_LO
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER6_HI
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_LO
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SQG_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SQG_PERFCOUNTER7_HI
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SQG_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_LO
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER0_HI
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_LO
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER1_HI
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_LO
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER2_HI
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_LO
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define SX_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//SX_PERFCOUNTER3_HI
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define SX_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_LO
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_HI
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_LO
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GCEA_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER_HI
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GCEA_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GCEA_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+//GDS_PERFCOUNTER0_LO
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER0_HI
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_LO
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER1_HI
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_LO
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER2_HI
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_LO
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GDS_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GDS_PERFCOUNTER3_HI
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GDS_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_LO
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER0_HI
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_LO
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TA_PERFCOUNTER1_HI
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_LO
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER0_HI
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_LO
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TD_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TD_PERFCOUNTER1_HI
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TD_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_LO
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER0_HI
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_LO
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER1_HI
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_LO
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER2_HI
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_LO
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define TCP_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER3_HI
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define TCP_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//TCP_PERFCOUNTER_FILTER
+#define TCP_PERFCOUNTER_FILTER__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT__SHIFT 0xd
+#define TCP_PERFCOUNTER_FILTER__SW_MODE__SHIFT 0x11
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES__SHIFT 0x16
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE__SHIFT 0x18
+#define TCP_PERFCOUNTER_FILTER__SLC__SHIFT 0x1b
+#define TCP_PERFCOUNTER_FILTER__DLC__SHIFT 0x1c
+#define TCP_PERFCOUNTER_FILTER__GLC__SHIFT 0x1d
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE__SHIFT 0x1e
+#define TCP_PERFCOUNTER_FILTER__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER__DIM_MASK 0x0000001CL
+#define TCP_PERFCOUNTER_FILTER__DATA_FORMAT_MASK 0x00000FE0L
+#define TCP_PERFCOUNTER_FILTER__NUM_FORMAT_MASK 0x0001E000L
+#define TCP_PERFCOUNTER_FILTER__SW_MODE_MASK 0x003E0000L
+#define TCP_PERFCOUNTER_FILTER__NUM_SAMPLES_MASK 0x00C00000L
+#define TCP_PERFCOUNTER_FILTER__OPCODE_TYPE_MASK 0x07000000L
+#define TCP_PERFCOUNTER_FILTER__SLC_MASK 0x08000000L
+#define TCP_PERFCOUNTER_FILTER__DLC_MASK 0x10000000L
+#define TCP_PERFCOUNTER_FILTER__GLC_MASK 0x20000000L
+#define TCP_PERFCOUNTER_FILTER__COMPRESSION_ENABLE_MASK 0x40000000L
+//TCP_PERFCOUNTER_FILTER2
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER2__REQ_MODE_MASK 0x00000007L
+//TCP_PERFCOUNTER_FILTER_EN
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER__SHIFT 0x0
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT__SHIFT 0x1
+#define TCP_PERFCOUNTER_FILTER_EN__DIM__SHIFT 0x2
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT__SHIFT 0x3
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT__SHIFT 0x4
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE__SHIFT 0x5
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES__SHIFT 0x6
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE__SHIFT 0x7
+#define TCP_PERFCOUNTER_FILTER_EN__SLC__SHIFT 0x8
+#define TCP_PERFCOUNTER_FILTER_EN__DLC__SHIFT 0x9
+#define TCP_PERFCOUNTER_FILTER_EN__GLC__SHIFT 0xa
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE__SHIFT 0xb
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE__SHIFT 0xc
+#define TCP_PERFCOUNTER_FILTER_EN__BUFFER_MASK 0x00000001L
+#define TCP_PERFCOUNTER_FILTER_EN__FLAT_MASK 0x00000002L
+#define TCP_PERFCOUNTER_FILTER_EN__DIM_MASK 0x00000004L
+#define TCP_PERFCOUNTER_FILTER_EN__DATA_FORMAT_MASK 0x00000008L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_FORMAT_MASK 0x00000010L
+#define TCP_PERFCOUNTER_FILTER_EN__SW_MODE_MASK 0x00000020L
+#define TCP_PERFCOUNTER_FILTER_EN__NUM_SAMPLES_MASK 0x00000040L
+#define TCP_PERFCOUNTER_FILTER_EN__OPCODE_TYPE_MASK 0x00000080L
+#define TCP_PERFCOUNTER_FILTER_EN__SLC_MASK 0x00000100L
+#define TCP_PERFCOUNTER_FILTER_EN__DLC_MASK 0x00000200L
+#define TCP_PERFCOUNTER_FILTER_EN__GLC_MASK 0x00000400L
+#define TCP_PERFCOUNTER_FILTER_EN__COMPRESSION_ENABLE_MASK 0x00000800L
+#define TCP_PERFCOUNTER_FILTER_EN__REQ_MODE_MASK 0x00001000L
+//GL2C_PERFCOUNTER0_LO
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER0_HI
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_LO
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER1_HI
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_LO
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER2_HI
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_LO
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2C_PERFCOUNTER3_HI
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_LO
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER0_HI
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_LO
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER1_HI
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_LO
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER2_HI
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_LO
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL2A_PERFCOUNTER3_HI
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_LO
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER0_HI
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_LO
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER1_HI
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_LO
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER2_HI
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_LO
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1C_PERFCOUNTER3_HI
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_LO
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER0_HI
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_LO
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER1_HI
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_LO
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER2_HI
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_LO
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHC_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHC_PERFCOUNTER3_HI
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHC_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_LO
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER0_HI
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_LO
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER1_HI
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_LO
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER2_HI
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_LO
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHCG_PERFCOUNTER3_HI
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_LO
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER0_HI
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_LO
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER1_HI
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_LO
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER2_HI
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_LO
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CB_PERFCOUNTER3_HI
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_LO
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER0_HI
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_LO
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER1_HI
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_LO
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER2_HI
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_LO
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define DB_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//DB_PERFCOUNTER3_HI
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define DB_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_LO
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER0_HI
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_LO
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RLC_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RLC_PERFCOUNTER1_HI
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RLC_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_LO
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_HI
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_LO
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER1_HI
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_LO
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER2_HI
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_LO
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define RMI_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER3_HI
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define RMI_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_LO
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER0_HI
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_LO
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GCR_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GCR_PERFCOUNTER1_HI
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GCR_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_LO
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER0_HI
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_LO
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER1_HI
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_LO
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER2_HI
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_LO
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER3_HI
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_LO
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER4_HI
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_LO
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER5_HI
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_LO
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER6_HI
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_LO
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//PA_PH_PERFCOUNTER7_HI
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_LO
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER0_HI
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_LO
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER1_HI
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_LO
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER2_HI
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_LO
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//UTCL1_PERFCOUNTER3_HI
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_LO
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER0_HI
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_LO
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER1_HI
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_LO
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER2_HI
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_LO
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1A_PERFCOUNTER3_HI
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_LO
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER0_HI
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_LO
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER1_HI
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_LO
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER2_HI
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_LO
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GL1H_PERFCOUNTER3_HI
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_LO
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER0_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER0_HI
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER0_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_LO
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER1_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER1_HI
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER1_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_LO
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER2_HI
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_LO
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define CHA_PERFCOUNTER3_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//CHA_PERFCOUNTER3_HI
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define CHA_PERFCOUNTER3_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_LO
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER2_LO__PERFCOUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER2_HI
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER2_HI__PERFCOUNTER_HI_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_LO
+#define GUS_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0
+#define GUS_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL
+//GUS_PERFCOUNTER_HI
+#define GUS_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10
+#define GUS_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL
+#define GUS_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_perfsdec
+//CPG_PERFCOUNTER1_SELECT
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT1
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPG_PERFCOUNTER0_SELECT
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPG_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPC_PERFCOUNTER1_SELECT
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPC_PERFCOUNTER0_SELECT1
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER1_SELECT
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x1c
+#define CPF_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT1
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE3_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT1__CNTR_MODE2_MASK 0xF0000000L
+//CPF_PERFCOUNTER0_SELECT
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPF_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPF_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPF_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CP_PERFMON_CNTL
+#define CP_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE__SHIFT 0x4
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE__SHIFT 0x8
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define CP_PERFMON_CNTL__PERFMON_STATE_MASK 0x0000000FL
+#define CP_PERFMON_CNTL__SPM_PERFMON_STATE_MASK 0x000000F0L
+#define CP_PERFMON_CNTL__PERFMON_ENABLE_MODE_MASK 0x00000300L
+#define CP_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//CPC_PERFCOUNTER0_SELECT
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1__SHIFT 0x18
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0__SHIFT 0x1c
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CPC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CPC_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE1_MASK 0x0F000000L
+#define CPC_PERFCOUNTER0_SELECT__CNTR_MODE0_MASK 0xF0000000L
+//CPF_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x00000007L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPF_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPG_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPG_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CPF_LATENCY_STATS_SELECT
+#define CPF_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPF_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPF_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPF_LATENCY_STATS_SELECT__INDEX_MASK 0x0000000FL
+#define CPF_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPF_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPG_LATENCY_STATS_SELECT
+#define CPG_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPG_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPG_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPG_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPG_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPG_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_LATENCY_STATS_SELECT
+#define CPC_LATENCY_STATS_SELECT__INDEX__SHIFT 0x0
+#define CPC_LATENCY_STATS_SELECT__CLEAR__SHIFT 0x1e
+#define CPC_LATENCY_STATS_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_LATENCY_STATS_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_LATENCY_STATS_SELECT__CLEAR_MASK 0x40000000L
+#define CPC_LATENCY_STATS_SELECT__ENABLE_MASK 0x80000000L
+//CPC_TC_PERF_COUNTER_WINDOW_SELECT
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX__SHIFT 0x0
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS__SHIFT 0x1e
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE__SHIFT 0x1f
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__INDEX_MASK 0x0000001FL
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ALWAYS_MASK 0x40000000L
+#define CPC_TC_PERF_COUNTER_WINDOW_SELECT__ENABLE_MASK 0x80000000L
+//CP_DRAW_OBJECT
+#define CP_DRAW_OBJECT__OBJECT__SHIFT 0x0
+#define CP_DRAW_OBJECT__OBJECT_MASK 0xFFFFFFFFL
+//CP_DRAW_OBJECT_COUNTER
+#define CP_DRAW_OBJECT_COUNTER__COUNT__SHIFT 0x0
+#define CP_DRAW_OBJECT_COUNTER__COUNT_MASK 0x0000FFFFL
+//CP_DRAW_WINDOW_MASK_HI
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_MASK_HI__WINDOW_MASK_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_HI
+#define CP_DRAW_WINDOW_HI__WINDOW_HI__SHIFT 0x0
+#define CP_DRAW_WINDOW_HI__WINDOW_HI_MASK 0xFFFFFFFFL
+//CP_DRAW_WINDOW_LO
+#define CP_DRAW_WINDOW_LO__MIN__SHIFT 0x0
+#define CP_DRAW_WINDOW_LO__MAX__SHIFT 0x10
+#define CP_DRAW_WINDOW_LO__MIN_MASK 0x0000FFFFL
+#define CP_DRAW_WINDOW_LO__MAX_MASK 0xFFFF0000L
+//CP_DRAW_WINDOW_CNTL
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX__SHIFT 0x0
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN__SHIFT 0x1
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI__SHIFT 0x2
+#define CP_DRAW_WINDOW_CNTL__MODE__SHIFT 0x8
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MAX_MASK 0x00000001L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_LO_MIN_MASK 0x00000002L
+#define CP_DRAW_WINDOW_CNTL__DISABLE_DRAW_WINDOW_HI_MASK 0x00000004L
+#define CP_DRAW_WINDOW_CNTL__MODE_MASK 0x00000100L
+//GRBM_PERFCOUNTER0_SELECT
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER0_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER0_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER0_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER0_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER0_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER0_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER0_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER0_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER0_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER0_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER0_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER0_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER0_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER0_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER0_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER0_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER0_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER0_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER0_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_PERFCOUNTER1_SELECT
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xe
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK__SHIFT 0x13
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK__SHIFT 0x1d
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK__SHIFT 0x1e
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x1f
+#define GRBM_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_PERFCOUNTER1_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_PERFCOUNTER1_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_PERFCOUNTER1_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_PERFCOUNTER1_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00004000L
+#define GRBM_PERFCOUNTER1_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_PERFCOUNTER1_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_PERFCOUNTER1_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_PERFCOUNTER1_SELECT__GRBM_BUSY_USER_DEFINED_MASK_MASK 0x00080000L
+#define GRBM_PERFCOUNTER1_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_PERFCOUNTER1_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_PERFCOUNTER1_SELECT__CP_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_PERFCOUNTER1_SELECT__GDS_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_PERFCOUNTER1_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_PERFCOUNTER1_SELECT__RLC_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_PERFCOUNTER1_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_PERFCOUNTER1_SELECT__GE_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+#define GRBM_PERFCOUNTER1_SELECT__UTCL2_BUSY_USER_DEFINED_MASK_MASK 0x20000000L
+#define GRBM_PERFCOUNTER1_SELECT__EA_BUSY_USER_DEFINED_MASK_MASK 0x40000000L
+#define GRBM_PERFCOUNTER1_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x80000000L
+//GRBM_SE0_PERFCOUNTER_SELECT
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE0_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE0_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE1_PERFCOUNTER_SELECT
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE1_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE1_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE2_PERFCOUNTER_SELECT
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE2_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE2_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_SE3_PERFCOUNTER_SELECT
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL__SHIFT 0x0
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK__SHIFT 0xa
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK__SHIFT 0xb
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK__SHIFT 0xc
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK__SHIFT 0xd
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK__SHIFT 0xf
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK__SHIFT 0x10
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK__SHIFT 0x11
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK__SHIFT 0x12
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK__SHIFT 0x14
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK__SHIFT 0x15
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK__SHIFT 0x16
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x17
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK__SHIFT 0x18
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x19
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x1a
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK__SHIFT 0x1b
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK__SHIFT 0x1c
+#define GRBM_SE3_PERFCOUNTER_SELECT__PERF_SEL_MASK 0x0000003FL
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_CLEAN_USER_DEFINED_MASK_MASK 0x00000400L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_CLEAN_USER_DEFINED_MASK_MASK 0x00000800L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TA_BUSY_USER_DEFINED_MASK_MASK 0x00001000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SX_BUSY_USER_DEFINED_MASK_MASK 0x00002000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SPI_BUSY_USER_DEFINED_MASK_MASK 0x00008000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SC_BUSY_USER_DEFINED_MASK_MASK 0x00010000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__DB_BUSY_USER_DEFINED_MASK_MASK 0x00020000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__CB_BUSY_USER_DEFINED_MASK_MASK 0x00040000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PA_BUSY_USER_DEFINED_MASK_MASK 0x00100000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__BCI_BUSY_USER_DEFINED_MASK_MASK 0x00200000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__RMI_BUSY_USER_DEFINED_MASK_MASK 0x00400000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00800000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__TCP_BUSY_USER_DEFINED_MASK_MASK 0x01000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x02000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x04000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__PC_BUSY_USER_DEFINED_MASK_MASK 0x08000000L
+#define GRBM_SE3_PERFCOUNTER_SELECT__SEDC_BUSY_USER_DEFINED_MASK_MASK 0x10000000L
+//GRBM_PERFCOUNTER0_SELECT_HI
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER0_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER0_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER0_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER0_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER0_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GRBM_PERFCOUNTER1_SELECT_HI
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK__SHIFT 0x1
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK__SHIFT 0x2
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK__SHIFT 0x3
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK__SHIFT 0x4
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK__SHIFT 0x5
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK__SHIFT 0x6
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK__SHIFT 0x7
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK__SHIFT 0x8
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK__SHIFT 0x9
+#define GRBM_PERFCOUNTER1_SELECT_HI__UTCL1_BUSY_USER_DEFINED_MASK_MASK 0x00000002L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL2CC_BUSY_USER_DEFINED_MASK_MASK 0x00000004L
+#define GRBM_PERFCOUNTER1_SELECT_HI__SDMA_BUSY_USER_DEFINED_MASK_MASK 0x00000008L
+#define GRBM_PERFCOUNTER1_SELECT_HI__CH_BUSY_USER_DEFINED_MASK_MASK 0x00000010L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PH_BUSY_USER_DEFINED_MASK_MASK 0x00000020L
+#define GRBM_PERFCOUNTER1_SELECT_HI__PMM_BUSY_USER_DEFINED_MASK_MASK 0x00000040L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GUS_BUSY_USER_DEFINED_MASK_MASK 0x00000080L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1CC_BUSY_USER_DEFINED_MASK_MASK 0x00000100L
+#define GRBM_PERFCOUNTER1_SELECT_HI__GL1H_BUSY_USER_DEFINED_MASK_MASK 0x00000200L
+//GE1_PERFCOUNTER0_SELECT
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER0_SELECT1
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER1_SELECT1
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER2_SELECT1
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE1_PERFCOUNTER3_SELECT1
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE1_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE1_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER0_SELECT1
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER1_SELECT1
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER2_SELECT1
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_DIST_PERFCOUNTER3_SELECT1
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_DIST_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER0_SELECT1
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER1_SELECT1
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER2_SELECT1
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL0_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT__PERF_MODE0_MASK 0xF0000000L
+//GE2_SE_PERFCOUNTER3_SELECT1
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GE2_SE_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER0_SELECT1
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER1_SELECT1
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER2_SELECT1
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SU_PERFCOUNTER3_SELECT1
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SU_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER0_SELECT1
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_SC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_SC_PERFCOUNTER1_SELECT
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER2_SELECT
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER3_SELECT
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER4_SELECT
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER5_SELECT
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER6_SELECT
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_SC_PERFCOUNTER7_SELECT
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_SC_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER0_SELECT
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SPI_PERFCOUNTER0_SELECT1
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER1_SELECT1
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER2_SELECT1
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER3_SELECT1
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SPI_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SPI_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SPI_PERFCOUNTER4_SELECT
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER5_SELECT
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SPI_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//SPI_PERFCOUNTER_BINS
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN__SHIFT 0x0
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX__SHIFT 0x4
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN__SHIFT 0x8
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX__SHIFT 0xc
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN__SHIFT 0x10
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX__SHIFT 0x14
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN__SHIFT 0x18
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX__SHIFT 0x1c
+#define SPI_PERFCOUNTER_BINS__BIN0_MIN_MASK 0x0000000FL
+#define SPI_PERFCOUNTER_BINS__BIN0_MAX_MASK 0x000000F0L
+#define SPI_PERFCOUNTER_BINS__BIN1_MIN_MASK 0x00000F00L
+#define SPI_PERFCOUNTER_BINS__BIN1_MAX_MASK 0x0000F000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MIN_MASK 0x000F0000L
+#define SPI_PERFCOUNTER_BINS__BIN2_MAX_MASK 0x00F00000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MIN_MASK 0x0F000000L
+#define SPI_PERFCOUNTER_BINS__BIN3_MAX_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PC_PERFCOUNTER0_SELECT1
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER1_SELECT1
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER2_SELECT1
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PC_PERFCOUNTER3_SELECT1
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PC_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PC_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SQ_PERFCOUNTER0_SELECT
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER1_SELECT
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER2_SELECT
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER3_SELECT
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER4_SELECT
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER5_SELECT
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER6_SELECT
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER7_SELECT
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER8_SELECT
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER8_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER8_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER8_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER9_SELECT
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER9_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER9_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER9_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER10_SELECT
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER10_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER10_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER10_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER11_SELECT
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER11_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER11_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER11_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER12_SELECT
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER12_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER12_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER12_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER13_SELECT
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER13_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER13_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER13_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER14_SELECT
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER14_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER14_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER14_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQ_PERFCOUNTER15_SELECT
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL__SHIFT 0x0
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE__SHIFT 0x14
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQ_PERFCOUNTER15_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQ_PERFCOUNTER15_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQ_PERFCOUNTER15_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER0_SELECT
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER0_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER1_SELECT
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER1_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER2_SELECT
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER2_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER3_SELECT
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER3_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER4_SELECT
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER4_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER4_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER5_SELECT
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER5_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER5_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER6_SELECT
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER6_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER6_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER7_SELECT
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE__SHIFT 0x14
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE__SHIFT 0x1c
+#define SQG_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000001FFL
+#define SQG_PERFCOUNTER7_SELECT__SPM_MODE_MASK 0x00F00000L
+#define SQG_PERFCOUNTER7_SELECT__PERF_MODE_MASK 0xF0000000L
+//SQG_PERFCOUNTER_CTRL
+#define SQG_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQG_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQG_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQG_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQG_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQG_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQG_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQG_PERFCOUNTER_CTRL2
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQG_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQG_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQG_PERF_SAMPLE_FINISH
+#define SQG_PERF_SAMPLE_FINISH__STATUS__SHIFT 0x0
+#define SQG_PERF_SAMPLE_FINISH__STATUS_MASK 0x0000007FL
+//SQ_PERFCOUNTER_CTRL
+#define SQ_PERFCOUNTER_CTRL__PS_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL__GS_EN__SHIFT 0x2
+#define SQ_PERFCOUNTER_CTRL__HS_EN__SHIFT 0x4
+#define SQ_PERFCOUNTER_CTRL__CS_EN__SHIFT 0x6
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF__SHIFT 0xe
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF__SHIFT 0xf
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF__SHIFT 0x10
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF__SHIFT 0x11
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF__SHIFT 0x12
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF__SHIFT 0x13
+#define SQ_PERFCOUNTER_CTRL__PS_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL__GS_EN_MASK 0x00000004L
+#define SQ_PERFCOUNTER_CTRL__HS_EN_MASK 0x00000010L
+#define SQ_PERFCOUNTER_CTRL__CS_EN_MASK 0x00000040L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE0_PERF_MASK 0x00004000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME0PIPE1_PERF_MASK 0x00008000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE0_PERF_MASK 0x00010000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE1_PERF_MASK 0x00020000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE2_PERF_MASK 0x00040000L
+#define SQ_PERFCOUNTER_CTRL__DISABLE_ME1PIPE3_PERF_MASK 0x00080000L
+//SQ_PERFCOUNTER_CTRL2
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN__SHIFT 0x0
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN__SHIFT 0x1
+#define SQ_PERFCOUNTER_CTRL2__FORCE_EN_MASK 0x00000001L
+#define SQ_PERFCOUNTER_CTRL2__VMID_EN_MASK 0x0001FFFEL
+//SQ_THREAD_TRACE_BUF0_BASE
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF0_SIZE
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF0_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF0_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_BUF1_BASE
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_BASE__BASE_LO_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_BUF1_SIZE
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI__SHIFT 0x0
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE__SHIFT 0x8
+#define SQ_THREAD_TRACE_BUF1_SIZE__BASE_HI_MASK 0x0000000FL
+#define SQ_THREAD_TRACE_BUF1_SIZE__SIZE_MASK 0x3FFFFF00L
+//SQ_THREAD_TRACE_CTRL
+#define SQ_THREAD_TRACE_CTRL__MODE__SHIFT 0x0
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID__SHIFT 0x2
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN__SHIFT 0x3
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN__SHIFT 0x4
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER__SHIFT 0x5
+#define SQ_THREAD_TRACE_CTRL__HIWATER__SHIFT 0x6
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM__SHIFT 0x9
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN__SHIFT 0xb
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN__SHIFT 0xc
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER__SHIFT 0xd
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE__SHIFT 0xe
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ__SHIFT 0x10
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS__SHIFT 0x12
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS__SHIFT 0x13
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET__SHIFT 0x14
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS__SHIFT 0x1c
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE__SHIFT 0x1d
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN__SHIFT 0x1f
+#define SQ_THREAD_TRACE_CTRL__MODE_MASK 0x00000003L
+#define SQ_THREAD_TRACE_CTRL__ALL_VMID_MASK 0x00000004L
+#define SQ_THREAD_TRACE_CTRL__GL1_PERF_EN_MASK 0x00000008L
+#define SQ_THREAD_TRACE_CTRL__INTERRUPT_EN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_CTRL__DOUBLE_BUFFER_MASK 0x00000020L
+#define SQ_THREAD_TRACE_CTRL__HIWATER_MASK 0x000001C0L
+#define SQ_THREAD_TRACE_CTRL__REG_AT_HWM_MASK 0x00000600L
+#define SQ_THREAD_TRACE_CTRL__SPI_STALL_EN_MASK 0x00000800L
+#define SQ_THREAD_TRACE_CTRL__SQ_STALL_EN_MASK 0x00001000L
+#define SQ_THREAD_TRACE_CTRL__UTIL_TIMER_MASK 0x00002000L
+#define SQ_THREAD_TRACE_CTRL__WAVESTART_MODE_MASK 0x0000C000L
+#define SQ_THREAD_TRACE_CTRL__RT_FREQ_MASK 0x00030000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_MARKERS_MASK 0x00040000L
+#define SQ_THREAD_TRACE_CTRL__SYNC_COUNT_DRAWS_MASK 0x00080000L
+#define SQ_THREAD_TRACE_CTRL__LOWATER_OFFSET_MASK 0x00700000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_PADDING_DIS_MASK 0x10000000L
+#define SQ_THREAD_TRACE_CTRL__AUTO_FLUSH_MODE_MASK 0x20000000L
+#define SQ_THREAD_TRACE_CTRL__DRAW_EVENT_EN_MASK 0x80000000L
+//SQ_THREAD_TRACE_MASK
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL__SHIFT 0x0
+#define SQ_THREAD_TRACE_MASK__WGP_SEL__SHIFT 0x4
+#define SQ_THREAD_TRACE_MASK__SA_SEL__SHIFT 0x9
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE__SHIFT 0xa
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA__SHIFT 0x11
+#define SQ_THREAD_TRACE_MASK__SIMD_SEL_MASK 0x00000003L
+#define SQ_THREAD_TRACE_MASK__WGP_SEL_MASK 0x000000F0L
+#define SQ_THREAD_TRACE_MASK__SA_SEL_MASK 0x00000200L
+#define SQ_THREAD_TRACE_MASK__WTYPE_INCLUDE_MASK 0x0001FC00L
+#define SQ_THREAD_TRACE_MASK__EXCLUDE_NONDETAIL_SHADERDATA_MASK 0x00020000L
+//SQ_THREAD_TRACE_TOKEN_MASK
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE__SHIFT 0x0
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC__SHIFT 0xb
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE__SHIFT 0xc
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE__SHIFT 0x10
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE__SHIFT 0x18
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE__SHIFT 0x1a
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL__SHIFT 0x1f
+#define SQ_THREAD_TRACE_TOKEN_MASK__TOKEN_EXCLUDE_MASK 0x000007FFL
+#define SQ_THREAD_TRACE_TOKEN_MASK__TTRACE_EXEC_MASK 0x00000800L
+#define SQ_THREAD_TRACE_TOKEN_MASK__BOP_EVENTS_TOKEN_INCLUDE_MASK 0x00001000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_INCLUDE_MASK 0x00FF0000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__INST_EXCLUDE_MASK 0x03000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_EXCLUDE_MASK 0x1C000000L
+#define SQ_THREAD_TRACE_TOKEN_MASK__REG_DETAIL_ALL_MASK 0x80000000L
+//SQ_THREAD_TRACE_WPTR
+#define SQ_THREAD_TRACE_WPTR__OFFSET__SHIFT 0x0
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID__SHIFT 0x1f
+#define SQ_THREAD_TRACE_WPTR__OFFSET_MASK 0x1FFFFFFFL
+#define SQ_THREAD_TRACE_WPTR__BUFFER_ID_MASK 0x80000000L
+//SQ_THREAD_TRACE_STATUS
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE__SHIFT 0xc
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR__SHIFT 0x18
+#define SQ_THREAD_TRACE_STATUS__BUSY__SHIFT 0x19
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID__SHIFT 0x1c
+#define SQ_THREAD_TRACE_STATUS__FINISH_PENDING_MASK 0x00000FFFL
+#define SQ_THREAD_TRACE_STATUS__FINISH_DONE_MASK 0x00FFF000L
+#define SQ_THREAD_TRACE_STATUS__WRITE_ERROR_MASK 0x01000000L
+#define SQ_THREAD_TRACE_STATUS__BUSY_MASK 0x02000000L
+#define SQ_THREAD_TRACE_STATUS__OWNER_VMID_MASK 0xF0000000L
+//SQ_THREAD_TRACE_STATUS2
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL__SHIFT 0x0
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL__SHIFT 0x1
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN__SHIFT 0x4
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS__SHIFT 0x8
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE__SHIFT 0xd
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL__SHIFT 0xe
+#define SQ_THREAD_TRACE_STATUS2__BUF0_FULL_MASK 0x00000001L
+#define SQ_THREAD_TRACE_STATUS2__BUF1_FULL_MASK 0x00000002L
+#define SQ_THREAD_TRACE_STATUS2__PACKET_LOST_BUF_NO_LOCKDOWN_MASK 0x00000010L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_STATUS_MASK 0x00001F00L
+#define SQ_THREAD_TRACE_STATUS2__BUF_ISSUE_MASK 0x00002000L
+#define SQ_THREAD_TRACE_STATUS2__WRITE_BUF_FULL_MASK 0x00004000L
+//SQ_THREAD_TRACE_GFX_DRAW_CNTR
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_GFX_MARKER_CNTR
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_GFX_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_DRAW_CNTR
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_DRAW_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_HP3D_MARKER_CNTR
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_HP3D_MARKER_CNTR__CNTR_MASK 0xFFFFFFFFL
+//SQ_THREAD_TRACE_DROPPED_CNTR
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR__SHIFT 0x0
+#define SQ_THREAD_TRACE_DROPPED_CNTR__CNTR_MASK 0xFFFFFFFFL
+//GCEA_PERFCOUNTER2_SELECT
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_SELECT1
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCEA_PERFCOUNTER2_MODE
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GCEA_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GCEA_PERFCOUNTER0_CFG
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER1_CFG
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GCEA_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GCEA_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GCEA_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GCEA_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GCEA_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GCEA_PERFCOUNTER_RSLT_CNTL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GCEA_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GCEA_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GCEA_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+//SX_PERFCOUNTER0_SELECT
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER2_SELECT
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER3_SELECT
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define SX_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define SX_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define SX_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//SX_PERFCOUNTER0_SELECT1
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//SX_PERFCOUNTER1_SELECT1
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define SX_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define SX_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GDS_PERFCOUNTER0_SELECT1
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER1_SELECT1
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER2_SELECT1
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GDS_PERFCOUNTER3_SELECT1
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GDS_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GDS_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TA_PERFCOUNTER0_SELECT1
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TA_PERFCOUNTER1_SELECT
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TD_PERFCOUNTER0_SELECT1
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TD_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TD_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TD_PERFCOUNTER1_SELECT
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TD_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TD_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TD_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER0_SELECT1
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER1_SELECT1
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define TCP_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define TCP_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//TCP_PERFCOUNTER2_SELECT
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//TCP_PERFCOUNTER3_SELECT
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define TCP_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define TCP_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define TCP_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER0_SELECT1
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER1_SELECT1
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2C_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2C_PERFCOUNTER2_SELECT
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2C_PERFCOUNTER3_SELECT
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER0_SELECT1
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER1_SELECT1
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL2A_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL2A_PERFCOUNTER2_SELECT
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL2A_PERFCOUNTER3_SELECT
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL2A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL2A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL2A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER0_SELECT1
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1C_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1C_PERFCOUNTER1_SELECT
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER2_SELECT
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1C_PERFCOUNTER3_SELECT
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1C_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1C_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1C_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER0_SELECT1
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHC_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHC_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHC_PERFCOUNTER1_SELECT
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER2_SELECT
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHC_PERFCOUNTER3_SELECT
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHC_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHC_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHC_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER0_SELECT1
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHCG_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHCG_PERFCOUNTER1_SELECT
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER2_SELECT
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHCG_PERFCOUNTER3_SELECT
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHCG_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHCG_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHCG_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER_FILTER
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE__SHIFT 0x0
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL__SHIFT 0x1
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE__SHIFT 0x4
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL__SHIFT 0x5
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE__SHIFT 0xa
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL__SHIFT 0xb
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE__SHIFT 0xc
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL__SHIFT 0xd
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE__SHIFT 0x11
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL__SHIFT 0x12
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE__SHIFT 0x15
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL__SHIFT 0x16
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_ENABLE_MASK 0x00000001L
+#define CB_PERFCOUNTER_FILTER__OP_FILTER_SEL_MASK 0x0000000EL
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_ENABLE_MASK 0x00000010L
+#define CB_PERFCOUNTER_FILTER__FORMAT_FILTER_SEL_MASK 0x000003E0L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_ENABLE_MASK 0x00000400L
+#define CB_PERFCOUNTER_FILTER__CLEAR_FILTER_SEL_MASK 0x00000800L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_ENABLE_MASK 0x00001000L
+#define CB_PERFCOUNTER_FILTER__MRT_FILTER_SEL_MASK 0x0000E000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_ENABLE_MASK 0x00020000L
+#define CB_PERFCOUNTER_FILTER__NUM_SAMPLES_FILTER_SEL_MASK 0x001C0000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_ENABLE_MASK 0x00200000L
+#define CB_PERFCOUNTER_FILTER__NUM_FRAGMENTS_FILTER_SEL_MASK 0x00C00000L
+//CB_PERFCOUNTER0_SELECT
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER0_SELECT1
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define CB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//CB_PERFCOUNTER1_SELECT
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER2_SELECT
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CB_PERFCOUNTER3_SELECT
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER0_SELECT1
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER1_SELECT1
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define DB_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define DB_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//DB_PERFCOUNTER2_SELECT
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//DB_PERFCOUNTER3_SELECT
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define DB_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define DB_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define DB_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RLC_SPM_PERFMON_CNTL
+#define RLC_SPM_PERFMON_CNTL__RESERVED1__SHIFT 0x0
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE__SHIFT 0xc
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT__SHIFT 0xe
+#define RLC_SPM_PERFMON_CNTL__RESERVED__SHIFT 0xf
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL__SHIFT 0x10
+#define RLC_SPM_PERFMON_CNTL__RESERVED1_MASK 0x00000FFFL
+#define RLC_SPM_PERFMON_CNTL__PERFMON_RING_MODE_MASK 0x00003000L
+#define RLC_SPM_PERFMON_CNTL__DISABLE_GFXCLOCK_COUNT_MASK 0x00004000L
+#define RLC_SPM_PERFMON_CNTL__RESERVED_MASK 0x00008000L
+#define RLC_SPM_PERFMON_CNTL__PERFMON_SAMPLE_INTERVAL_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_BASE_LO
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_LO__RING_BASE_LO_MASK 0xFFFFFFFFL
+//RLC_SPM_PERFMON_RING_BASE_HI
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED__SHIFT 0x10
+#define RLC_SPM_PERFMON_RING_BASE_HI__RING_BASE_HI_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_RING_BASE_HI__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PERFMON_RING_SIZE
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE__SHIFT 0x0
+#define RLC_SPM_PERFMON_RING_SIZE__RING_BASE_SIZE_MASK 0xFFFFFFFFL
+//RLC_SPM_RING_WRPTR
+#define RLC_SPM_RING_WRPTR__RESERVED__SHIFT 0x0
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR__SHIFT 0x5
+#define RLC_SPM_RING_WRPTR__RESERVED_MASK 0x0000001FL
+#define RLC_SPM_RING_WRPTR__PERFMON_RING_WRPTR_MASK 0xFFFFFFE0L
+//RLC_SPM_RING_RDPTR
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR__SHIFT 0x0
+#define RLC_SPM_RING_RDPTR__PERFMON_RING_RDPTR_MASK 0xFFFFFFFFL
+//RLC_SPM_SEGMENT_THRESHOLD
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD__SHIFT 0x0
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED__SHIFT 0x8
+#define RLC_SPM_SEGMENT_THRESHOLD__NUM_SEGMENT_THRESHOLD_MASK 0x000000FFL
+#define RLC_SPM_SEGMENT_THRESHOLD__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_PERFMON_SEGMENT_SIZE
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT__SHIFT 0x0
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT__SHIFT 0x10
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT__SHIFT 0x18
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__TOTAL_NUM_SEGMENT_MASK 0x0000FFFFL
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__GLOBAL_NUM_SEGMENT_MASK 0x00FF0000L
+#define RLC_SPM_PERFMON_SEGMENT_SIZE__SE_NUM_SEGMENT_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_MUXSEL_ADDR
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_MUXSEL_DATA
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_GLOBAL_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_SE_MUXSEL_ADDR
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_MUXSEL_DATA
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0__SHIFT 0x0
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1__SHIFT 0x10
+#define RLC_SPM_SE_MUXSEL_DATA__SEL0_MASK 0x0000FFFFL
+#define RLC_SPM_SE_MUXSEL_DATA__SEL1_MASK 0xFFFF0000L
+//RLC_SPM_ACCUM_DATARAM_ADDR
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_DATARAM_DATA
+#define RLC_SPM_ACCUM_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_SWA_DATARAM_ADDR
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED__SHIFT 0x7
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__addr_MASK 0x0000007FL
+#define RLC_SPM_ACCUM_SWA_DATARAM_ADDR__RESERVED_MASK 0xFFFFFF80L
+//RLC_SPM_ACCUM_SWA_DATARAM_DATA
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_SWA_DATARAM_DATA__data_MASK 0xFFFFFFFFL
+//RLC_SPM_ACCUM_CTRLRAM_ADDR
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__addr_MASK 0x000007FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_CTRLRAM_DATA
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__data_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_DATA__RESERVED_MASK 0xFFFFFF00L
+//RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset__SHIFT 0x10
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__global_offset_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_se_offset_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__spmwithaccum_global_offset_MASK 0x00FF0000L
+#define RLC_SPM_ACCUM_CTRLRAM_ADDR_OFFSET__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_STATUS
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted__SHIFT 0x0
+#define RLC_SPM_ACCUM_STATUS__AccumDone__SHIFT 0x8
+#define RLC_SPM_ACCUM_STATUS__SpmDone__SHIFT 0x9
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow__SHIFT 0xa
+#define RLC_SPM_ACCUM_STATUS__AccumArmed__SHIFT 0xb
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress__SHIFT 0xc
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress__SHIFT 0xd
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty__SHIFT 0xe
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle__SHIFT 0xf
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone__SHIFT 0x10
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone__SHIFT 0x11
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow__SHIFT 0x12
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed__SHIFT 0x13
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone__SHIFT 0x14
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending__SHIFT 0x15
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending__SHIFT 0x16
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted__SHIFT 0x17
+#define RLC_SPM_ACCUM_STATUS__RESERVED__SHIFT 0x18
+#define RLC_SPM_ACCUM_STATUS__NumbSamplesCompleted_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_STATUS__AccumDone_MASK 0x00000100L
+#define RLC_SPM_ACCUM_STATUS__SpmDone_MASK 0x00000200L
+#define RLC_SPM_ACCUM_STATUS__AccumOverflow_MASK 0x00000400L
+#define RLC_SPM_ACCUM_STATUS__AccumArmed_MASK 0x00000800L
+#define RLC_SPM_ACCUM_STATUS__SequenceInProgress_MASK 0x00001000L
+#define RLC_SPM_ACCUM_STATUS__FinalSequenceInProgress_MASK 0x00002000L
+#define RLC_SPM_ACCUM_STATUS__AllFifosEmpty_MASK 0x00004000L
+#define RLC_SPM_ACCUM_STATUS__FSMIsIdle_MASK 0x00008000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumDone_MASK 0x00010000L
+#define RLC_SPM_ACCUM_STATUS__SwaSpmDone_MASK 0x00020000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumOverflow_MASK 0x00040000L
+#define RLC_SPM_ACCUM_STATUS__SwaAccumArmed_MASK 0x00080000L
+#define RLC_SPM_ACCUM_STATUS__AllSegsDone_MASK 0x00100000L
+#define RLC_SPM_ACCUM_STATUS__RearmSwaPending_MASK 0x00200000L
+#define RLC_SPM_ACCUM_STATUS__RearmSppPending_MASK 0x00400000L
+#define RLC_SPM_ACCUM_STATUS__MultiSampleAborted_MASK 0x00800000L
+#define RLC_SPM_ACCUM_STATUS__RESERVED_MASK 0xFF000000L
+//RLC_SPM_ACCUM_CTRL
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors__SHIFT 0x0
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation__SHIFT 0x1
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum__SHIFT 0x2
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock__SHIFT 0x3
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm__SHIFT 0x4
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum__SHIFT 0x8
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa__SHIFT 0x9
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires__SHIFT 0xa
+#define RLC_SPM_ACCUM_CTRL__RESERVED__SHIFT 0xb
+#define RLC_SPM_ACCUM_CTRL__StrobeResetPerfMonitors_MASK 0x00000001L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartAccumulation_MASK 0x00000002L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmAccum_MASK 0x00000004L
+#define RLC_SPM_ACCUM_CTRL__StrobeResetSpmBlock_MASK 0x00000008L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSpm_MASK 0x000000F0L
+#define RLC_SPM_ACCUM_CTRL__StrobeRearmSwaAccum_MASK 0x00000100L
+#define RLC_SPM_ACCUM_CTRL__StrobeStartSwa_MASK 0x00000200L
+#define RLC_SPM_ACCUM_CTRL__StrobePerfmonSampleWires_MASK 0x00000400L
+#define RLC_SPM_ACCUM_CTRL__RESERVED_MASK 0xFFFFF800L
+//RLC_SPM_ACCUM_MODE
+#define RLC_SPM_ACCUM_MODE__EnableAccum__SHIFT 0x0
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode__SHIFT 0x1
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode__SHIFT 0x2
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable__SHIFT 0x3
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn__SHIFT 0x5
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn__SHIFT 0x6
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn__SHIFT 0x7
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn__SHIFT 0x8
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride__SHIFT 0x9
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride__SHIFT 0xa
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride__SHIFT 0xb
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride__SHIFT 0xc
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride__SHIFT 0xd
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride__SHIFT 0xe
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride__SHIFT 0xf
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride__SHIFT 0x10
+#define RLC_SPM_ACCUM_MODE__EnableAccum_MASK 0x00000001L
+#define RLC_SPM_ACCUM_MODE__EnableSpmWithAccumMode_MASK 0x00000002L
+#define RLC_SPM_ACCUM_MODE__EnableSPPMode_MASK 0x00000004L
+#define RLC_SPM_ACCUM_MODE__AutoResetPerfmonDisable_MASK 0x00000008L
+#define RLC_SPM_ACCUM_MODE__AutoAccumEn_MASK 0x00000020L
+#define RLC_SPM_ACCUM_MODE__SwaAutoAccumEn_MASK 0x00000040L
+#define RLC_SPM_ACCUM_MODE__AutoSpmEn_MASK 0x00000080L
+#define RLC_SPM_ACCUM_MODE__SwaAutoSpmEn_MASK 0x00000100L
+#define RLC_SPM_ACCUM_MODE__Globals_LoadOverride_MASK 0x00000200L
+#define RLC_SPM_ACCUM_MODE__Globals_SwaLoadOverride_MASK 0x00000400L
+#define RLC_SPM_ACCUM_MODE__SE0_LoadOverride_MASK 0x00000800L
+#define RLC_SPM_ACCUM_MODE__SE0_SwaLoadOverride_MASK 0x00001000L
+#define RLC_SPM_ACCUM_MODE__SE1_LoadOverride_MASK 0x00002000L
+#define RLC_SPM_ACCUM_MODE__SE1_SwaLoadOverride_MASK 0x00004000L
+#define RLC_SPM_ACCUM_MODE__SE2_LoadOverride_MASK 0x00008000L
+#define RLC_SPM_ACCUM_MODE__SE2_SwaLoadOverride_MASK 0x00010000L
+//RLC_SPM_ACCUM_THRESHOLD
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold__SHIFT 0x0
+#define RLC_SPM_ACCUM_THRESHOLD__Threshold_MASK 0x0000FFFFL
+//RLC_SPM_ACCUM_SAMPLES_REQUESTED
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested__SHIFT 0x0
+#define RLC_SPM_ACCUM_SAMPLES_REQUESTED__SamplesRequested_MASK 0x000000FFL
+//RLC_SPM_ACCUM_DATARAM_WRCOUNT
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED__SHIFT 0x13
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__DataRamWrCount_MASK 0x0007FFFFL
+#define RLC_SPM_ACCUM_DATARAM_WRCOUNT__RESERVED_MASK 0xFFF80000L
+//RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region__SHIFT 0x0
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region__SHIFT 0x8
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED__SHIFT 0x10
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__spp_addr_region_MASK 0x000000FFL
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__swa_addr_region_MASK 0x0000FF00L
+#define RLC_SPM_ACCUM_DATARAM_32BITCNTRS_REGIONS__RESERVED_MASK 0xFFFF0000L
+//RLC_SPM_PAUSE
+#define RLC_SPM_PAUSE__PAUSE__SHIFT 0x0
+#define RLC_SPM_PAUSE__PAUSED__SHIFT 0x1
+#define RLC_SPM_PAUSE__PAUSE_MASK 0x00000001L
+#define RLC_SPM_PAUSE__PAUSED_MASK 0x00000002L
+//RLC_SPM_STATUS
+#define RLC_SPM_STATUS__CTL_BUSY__SHIFT 0x0
+#define RLC_SPM_STATUS__RSPM_REG_BUSY__SHIFT 0x1
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY__SHIFT 0x2
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY__SHIFT 0x3
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY__SHIFT 0x4
+#define RLC_SPM_STATUS__ACCUM_BUSY__SHIFT 0xf
+#define RLC_SPM_STATUS__FSM_MASTER_STATE__SHIFT 0x10
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE__SHIFT 0x14
+#define RLC_SPM_STATUS__CTL_REQ_STATE__SHIFT 0x18
+#define RLC_SPM_STATUS__CTL_RET_STATE__SHIFT 0x1a
+#define RLC_SPM_STATUS__CTL_BUSY_MASK 0x00000001L
+#define RLC_SPM_STATUS__RSPM_REG_BUSY_MASK 0x00000002L
+#define RLC_SPM_STATUS__SPM_RSPM_BUSY_MASK 0x00000004L
+#define RLC_SPM_STATUS__SPM_RSPM_IO_BUSY_MASK 0x00000008L
+#define RLC_SPM_STATUS__SE_RSPM_IO_BUSY_MASK 0x00000FF0L
+#define RLC_SPM_STATUS__ACCUM_BUSY_MASK 0x00008000L
+#define RLC_SPM_STATUS__FSM_MASTER_STATE_MASK 0x000F0000L
+#define RLC_SPM_STATUS__FSM_MEMORY_STATE_MASK 0x00F00000L
+#define RLC_SPM_STATUS__CTL_REQ_STATE_MASK 0x03000000L
+#define RLC_SPM_STATUS__CTL_RET_STATE_MASK 0x04000000L
+//RLC_SPM_GFXCLOCK_LOWCOUNT
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_LOWCOUNT__GFXCLOCK_LOWCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_GFXCLOCK_HIGHCOUNT
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT__SHIFT 0x0
+#define RLC_SPM_GFXCLOCK_HIGHCOUNT__GFXCLOCK_HIGHCOUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MODE
+#define RLC_SPM_MODE__MODE__SHIFT 0x0
+#define RLC_SPM_MODE__MODE_MASK 0x00000001L
+//RLC_SPM_RSPM_REQ_DATA_LO
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_REQ_DATA_HI
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_RSPM_REQ_OP
+#define RLC_SPM_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_RSPM_RET_DATA
+#define RLC_SPM_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_RSPM_RET_OP
+#define RLC_SPM_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_SE_RSPM_REQ_DATA_LO
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_REQ_DATA_HI
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_DATA_HI__DATA_MASK 0x00000FFFL
+//RLC_SPM_SE_RSPM_REQ_OP
+#define RLC_SPM_SE_RSPM_REQ_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_REQ_OP__OP_MASK 0x0000000FL
+//RLC_SPM_SE_RSPM_RET_DATA
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPM_SE_RSPM_RET_OP
+#define RLC_SPM_SE_RSPM_RET_OP__OP__SHIFT 0x0
+#define RLC_SPM_SE_RSPM_RET_OP__VALID__SHIFT 0x8
+#define RLC_SPM_SE_RSPM_RET_OP__OP_MASK 0x0000000FL
+#define RLC_SPM_SE_RSPM_RET_OP__VALID_MASK 0x00000100L
+//RLC_SPM_RSPM_CMD
+#define RLC_SPM_RSPM_CMD__CMD__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD__CMD_MASK 0x0000000FL
+//RLC_SPM_RSPM_CMD_ACK
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK__SHIFT 0x0
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK__SHIFT 0x1
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK__SHIFT 0x2
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK__SHIFT 0x3
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK__SHIFT 0x4
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK__SHIFT 0x5
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK__SHIFT 0x6
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK__SHIFT 0x7
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK__SHIFT 0x8
+#define RLC_SPM_RSPM_CMD_ACK__SE0_ACK_MASK 0x00000001L
+#define RLC_SPM_RSPM_CMD_ACK__SE1_ACK_MASK 0x00000002L
+#define RLC_SPM_RSPM_CMD_ACK__SE2_ACK_MASK 0x00000004L
+#define RLC_SPM_RSPM_CMD_ACK__SE3_ACK_MASK 0x00000008L
+#define RLC_SPM_RSPM_CMD_ACK__SE4_ACK_MASK 0x00000010L
+#define RLC_SPM_RSPM_CMD_ACK__SE5_ACK_MASK 0x00000020L
+#define RLC_SPM_RSPM_CMD_ACK__SE6_ACK_MASK 0x00000040L
+#define RLC_SPM_RSPM_CMD_ACK__SE7_ACK_MASK 0x00000080L
+#define RLC_SPM_RSPM_CMD_ACK__SPM_ACK_MASK 0x00000100L
+//RLC_SPM_SPARE
+#define RLC_SPM_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPM_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_PERFMON_CNTL
+#define RLC_PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE__SHIFT 0xa
+#define RLC_PERFMON_CNTL__PERFMON_STATE_MASK 0x00000007L
+#define RLC_PERFMON_CNTL__PERFMON_SAMPLE_ENABLE_MASK 0x00000400L
+//RLC_PERFCOUNTER0_SELECT
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER0_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_PERFCOUNTER1_SELECT
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT__SHIFT 0x0
+#define RLC_PERFCOUNTER1_SELECT__PERFCOUNTER_SELECT_MASK 0x000000FFL
+//RLC_GPU_IOV_PERF_CNT_CNTL
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT__SHIFT 0x1
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET__SHIFT 0x2
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED__SHIFT 0x3
+#define RLC_GPU_IOV_PERF_CNT_CNTL__ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__MODE_SELECT_MASK 0x00000002L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESET_MASK 0x00000004L
+#define RLC_GPU_IOV_PERF_CNT_CNTL__RESERVED_MASK 0xFFFFFFF8L
+//RLC_GPU_IOV_PERF_CNT_WR_ADDR
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_WR_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_WR_DATA
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_WR_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_PERF_CNT_RD_ADDR
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID__SHIFT 0x4
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__VFID_MASK 0x0000000FL
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__CNT_ID_MASK 0x00000030L
+#define RLC_GPU_IOV_PERF_CNT_RD_ADDR__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_IOV_PERF_CNT_RD_DATA
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_PERF_CNT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RMI_PERFCOUNTER0_SELECT
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER0_SELECT1
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER1_SELECT
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERFCOUNTER2_SELECT1
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define RMI_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define RMI_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//RMI_PERFCOUNTER3_SELECT
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define RMI_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define RMI_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//RMI_PERF_COUNTER_CNTL
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL__SHIFT 0x0
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL__SHIFT 0x2
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL__SHIFT 0x4
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0__SHIFT 0x6
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1__SHIFT 0x8
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID__SHIFT 0xa
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID__SHIFT 0xe
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD__SHIFT 0x13
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET__SHIFT 0x19
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL__SHIFT 0x1a
+#define RMI_PERF_COUNTER_CNTL__TRANS_BASED_PERF_EN_SEL_MASK 0x00000003L
+#define RMI_PERF_COUNTER_CNTL__EVENT_BASED_PERF_EN_SEL_MASK 0x0000000CL
+#define RMI_PERF_COUNTER_CNTL__TC_PERF_EN_SEL_MASK 0x00000030L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK0_MASK 0x000000C0L
+#define RMI_PERF_COUNTER_CNTL__PERF_EVENT_WINDOW_MASK1_MASK 0x00000300L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_CID_MASK 0x00003C00L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_VMID_MASK 0x0007C000L
+#define RMI_PERF_COUNTER_CNTL__PERF_COUNTER_BURST_LENGTH_THRESHOLD_MASK 0x01F80000L
+#define RMI_PERF_COUNTER_CNTL__PERF_SOFT_RESET_MASK 0x02000000L
+#define RMI_PERF_COUNTER_CNTL__PERF_CNTR_SPM_SEL_MASK 0x04000000L
+//GCR_PERFCOUNTER0_SELECT
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GCR_PERFCOUNTER0_SELECT1
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GCR_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GCR_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GCR_PERFCOUNTER1_SELECT
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GCR_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GCR_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GCR_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER0_SELECT1
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER1_SELECT
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER4_SELECT
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER4_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER5_SELECT
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER5_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER6_SELECT
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER6_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER7_SELECT
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL__SHIFT 0x0
+#define PA_PH_PERFCOUNTER7_SELECT__PERF_SEL_MASK 0x000003FFL
+//PA_PH_PERFCOUNTER1_SELECT1
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER1_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER2_SELECT1
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//PA_PH_PERFCOUNTER3_SELECT1
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2__SHIFT 0x0
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3__SHIFT 0xa
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3__SHIFT 0x18
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define PA_PH_PERFCOUNTER3_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER0_SELECT
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER0_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER1_SELECT
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER1_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER2_SELECT
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER2_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//UTCL1_PERFCOUNTER3_SELECT
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE__SHIFT 0x1c
+#define UTCL1_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define UTCL1_PERFCOUNTER3_SELECT__COUNTER_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER0_SELECT1
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1A_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1A_PERFCOUNTER1_SELECT
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER2_SELECT
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1A_PERFCOUNTER3_SELECT
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1A_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1A_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1A_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER0_SELECT1
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define GL1H_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//GL1H_PERFCOUNTER1_SELECT
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER2_SELECT
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GL1H_PERFCOUNTER3_SELECT
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define GL1H_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GL1H_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GL1H_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER0_SELECT1
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2__SHIFT 0x0
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3__SHIFT 0xa
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2__SHIFT 0x18
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3__SHIFT 0x1c
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define CHA_PERFCOUNTER0_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE2_MASK 0x0F000000L
+#define CHA_PERFCOUNTER0_SELECT1__PERF_MODE3_MASK 0xF0000000L
+//CHA_PERFCOUNTER1_SELECT
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER1_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER1_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER1_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER2_SELECT
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//CHA_PERFCOUNTER3_SELECT
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL__SHIFT 0x0
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE__SHIFT 0x14
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE__SHIFT 0x1c
+#define CHA_PERFCOUNTER3_SELECT__PERF_SEL_MASK 0x000003FFL
+#define CHA_PERFCOUNTER3_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define CHA_PERFCOUNTER3_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE__SHIFT 0x14
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT__PERF_SEL1_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT__CNTR_MODE_MASK 0x00F00000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE1_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT__PERF_MODE_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_SELECT1
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2__SHIFT 0x0
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3__SHIFT 0xa
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3__SHIFT 0x18
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2__SHIFT 0x1c
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL2_MASK 0x000003FFL
+#define GUS_PERFCOUNTER2_SELECT1__PERF_SEL3_MASK 0x000FFC00L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE3_MASK 0x0F000000L
+#define GUS_PERFCOUNTER2_SELECT1__PERF_MODE2_MASK 0xF0000000L
+//GUS_PERFCOUNTER2_MODE
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0__SHIFT 0x0
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1__SHIFT 0x2
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2__SHIFT 0x4
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3__SHIFT 0x6
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0__SHIFT 0x8
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1__SHIFT 0xc
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2__SHIFT 0x10
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3__SHIFT 0x14
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE0_MASK 0x00000003L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE1_MASK 0x0000000CL
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE2_MASK 0x00000030L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_MODE3_MASK 0x000000C0L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE0_MASK 0x00000F00L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE1_MASK 0x0000F000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE2_MASK 0x000F0000L
+#define GUS_PERFCOUNTER2_MODE__COMPARE_VALUE3_MASK 0x00F00000L
+//GUS_PERFCOUNTER0_CFG
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER1_CFG
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18
+#define GUS_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c
+#define GUS_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL
+#define GUS_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L
+#define GUS_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L
+#define GUS_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L
+//GUS_PERFCOUNTER_RSLT_CNTL
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a
+#define GUS_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL
+#define GUS_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L
+#define GUS_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L
+
+
+// addressBlock: gc_gdfll_gdfll_dec
+//GDFLL_EDC_HYSTERESIS_CNTL
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_EDC_HYSTERESIS_STAT
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_gdfll_se_gdfll_dec
+//GDFLL_SE_EDC_HYSTERESIS_CNTL
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_CNTL__MAX_HYSTERESIS_MASK 0x000000FFL
+//GDFLL_SE_EDC_HYSTERESIS_STAT
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT__SHIFT 0x0
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC__SHIFT 0x8
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__HYSTERESIS_CNT_MASK 0x000000FFL
+#define GDFLL_SE_EDC_HYSTERESIS_STAT__EDC_MASK 0x00000100L
+
+
+// addressBlock: gc_grtavfs_grtavfs_dec
+//GRTAVFS_RTAVFS_REG_ADDR
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_RTAVFS_WR_DATA
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_GENERAL_0
+#define GRTAVFS_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_RD_DATA
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_RTAVFS_REG_CTRL
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_RTAVFS_REG_STATUS
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_TARG_FREQ
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_TARG_VOLT
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SOFT_RESET
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_PSM_CNTL
+#define GRTAVFS_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_CLK_CNTL
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfs_se_grtavfs_dec
+//GRTAVFS_SE_RTAVFS_REG_ADDR
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//GRTAVFS_SE_RTAVFS_WR_DATA
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_GENERAL_0
+#define GRTAVFS_SE_GENERAL_0__DATA__SHIFT 0x0
+#define GRTAVFS_SE_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_RD_DATA
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_RD_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+//GRTAVFS_SE_RTAVFS_REG_CTRL
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_WR_EN_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_CTRL__SET_RD_EN_MASK 0x00000002L
+//GRTAVFS_SE_RTAVFS_REG_STATUS
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK__SHIFT 0x0
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID__SHIFT 0x1
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_WR_ACK_MASK 0x00000001L
+#define GRTAVFS_SE_RTAVFS_REG_STATUS__RTAVFS_RD_DATA_VALID_MASK 0x00000002L
+//GRTAVFS_SE_TARG_FREQ
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY__SHIFT 0x0
+#define GRTAVFS_SE_TARG_FREQ__REQUEST__SHIFT 0x10
+#define GRTAVFS_SE_TARG_FREQ__RESERVED__SHIFT 0x11
+#define GRTAVFS_SE_TARG_FREQ__TARGET_FREQUENCY_MASK 0x0000FFFFL
+#define GRTAVFS_SE_TARG_FREQ__REQUEST_MASK 0x00010000L
+#define GRTAVFS_SE_TARG_FREQ__RESERVED_MASK 0xFFFE0000L
+//GRTAVFS_SE_TARG_VOLT
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE__SHIFT 0x0
+#define GRTAVFS_SE_TARG_VOLT__VALID__SHIFT 0xa
+#define GRTAVFS_SE_TARG_VOLT__RESERVED__SHIFT 0xb
+#define GRTAVFS_SE_TARG_VOLT__TARGET_VOLTAGE_MASK 0x000003FFL
+#define GRTAVFS_SE_TARG_VOLT__VALID_MASK 0x00000400L
+#define GRTAVFS_SE_TARG_VOLT__RESERVED_MASK 0xFFFFF800L
+//GRTAVFS_SE_SOFT_RESET
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE__SHIFT 0x0
+#define GRTAVFS_SE_SOFT_RESET__RESERVED__SHIFT 0x1
+#define GRTAVFS_SE_SOFT_RESET__RESETN_OVERRIDE_MASK 0x00000001L
+#define GRTAVFS_SE_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//GRTAVFS_SE_PSM_CNTL
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT__SHIFT 0x0
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN__SHIFT 0xe
+#define GRTAVFS_SE_PSM_CNTL__RESERVED__SHIFT 0xf
+#define GRTAVFS_SE_PSM_CNTL__PSM_COUNT_MASK 0x00003FFFL
+#define GRTAVFS_SE_PSM_CNTL__PSM_SAMPLE_EN_MASK 0x00004000L
+#define GRTAVFS_SE_PSM_CNTL__RESERVED_MASK 0xFFFF8000L
+//GRTAVFS_SE_CLK_CNTL
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL__SHIFT 0x0
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL__SHIFT 0x1
+#define GRTAVFS_SE_CLK_CNTL__RESERVED__SHIFT 0x2
+#define GRTAVFS_SE_CLK_CNTL__GRTAVFS_MUX_CLK_SEL_MASK 0x00000001L
+#define GRTAVFS_SE_CLK_CNTL__FORCE_GRTAVFS_CLK_SEL_MASK 0x00000002L
+#define GRTAVFS_SE_CLK_CNTL__RESERVED_MASK 0xFFFFFFFCL
+
+
+// addressBlock: gc_grtavfsdec
+//RTAVFS_RTAVFS_REG_ADDR
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR__SHIFT 0x0
+#define RTAVFS_RTAVFS_REG_ADDR__RTAVFSADDR_MASK 0x000003FFL
+//RTAVFS_RTAVFS_WR_DATA
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA__SHIFT 0x0
+#define RTAVFS_RTAVFS_WR_DATA__RTAVFSDATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_hypdec
+//GFX_PIPE_PRIORITY
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT__SHIFT 0x0
+#define GFX_PIPE_PRIORITY__HP_PIPE_SELECT_MASK 0x00000001L
+//RLC_GPU_IOV_VF_ENABLE
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED__SHIFT 0x1
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM__SHIFT 0x10
+#define RLC_GPU_IOV_VF_ENABLE__VF_ENABLE_MASK 0x00000001L
+#define RLC_GPU_IOV_VF_ENABLE__RESERVED_MASK 0x0000FFFEL
+#define RLC_GPU_IOV_VF_ENABLE__VF_NUM_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG6
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION__SHIFT 0x7
+#define RLC_GPU_IOV_CFG_REG6__RESERVED__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET__SHIFT 0xa
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_SIZE_MASK 0x0000007FL
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_LOCATION_MASK 0x00000080L
+#define RLC_GPU_IOV_CFG_REG6__RESERVED_MASK 0x00000300L
+#define RLC_GPU_IOV_CFG_REG6__CNTXT_OFFSET_MASK 0xFFFFFC00L
+//RLC_SDMA0_STATUS
+#define RLC_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_STATUS
+#define RLC_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_STATUS
+#define RLC_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_STATUS
+#define RLC_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA0_BUSY_STATUS
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA0_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA1_BUSY_STATUS
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA1_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA2_BUSY_STATUS
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA2_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_SDMA3_BUSY_STATUS
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS__SHIFT 0x0
+#define RLC_SDMA3_BUSY_STATUS__BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_CFG_REG8
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG8__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_0
+#define RLC_RLCV_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_INT_1
+#define RLC_RLCV_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_RLCV_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_RLCV_TIMER_CTRL
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCV_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_RLCV_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCV_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCV_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCV_TIMER_STAT
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_RLCV_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_RLCV_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_RLCV_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_RLCV_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_RLCV_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_RLCV_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__VF_DOORBELL_STATUS_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS__PF_DOORBELL_STATUS_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_SET
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__VF_DOORBELL_STATUS_SET_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_SET__PF_DOORBELL_STATUS_SET_MASK 0x80000000L
+//RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR__SHIFT 0x0
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__VF_DOORBELL_STATUS_CLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VF_DOORBELL_STATUS_CLR__PF_DOORBELL_STATUS_CLR_MASK 0x80000000L
+//RLC_GPU_IOV_VF_MASK
+#define RLC_GPU_IOV_VF_MASK__VF_MASK__SHIFT 0x0
+#define RLC_GPU_IOV_VF_MASK__VF_MASK_MASK 0x7FFFFFFFL
+//RLC_HYP_SEMAPHORE_0
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_1
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_BUSY_CLK_CNTL
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY__SHIFT 0x0
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY__SHIFT 0x8
+#define RLC_BUSY_CLK_CNTL__BUSY_OFF_LATENCY_MASK 0x0000003FL
+#define RLC_BUSY_CLK_CNTL__GRBM_BUSY_OFF_LATENCY_MASK 0x00003F00L
+//RLC_CLK_CNTL
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE__SHIFT 0x0
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE__SHIFT 0x1
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE__SHIFT 0x2
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE__SHIFT 0x3
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE__SHIFT 0x4
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE__SHIFT 0x5
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE__SHIFT 0x6
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE__SHIFT 0x7
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE__SHIFT 0x8
+#define RLC_CLK_CNTL__RESERVED_9__SHIFT 0x9
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE__SHIFT 0xa
+#define RLC_CLK_CNTL__RESERVED_11__SHIFT 0xb
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE__SHIFT 0xc
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE__SHIFT 0xd
+#define RLC_CLK_CNTL__RESERVED_15__SHIFT 0xf
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE__SHIFT 0x10
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE__SHIFT 0x11
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE__SHIFT 0x12
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE__SHIFT 0x13
+#define RLC_CLK_CNTL__RESERVED__SHIFT 0x14
+#define RLC_CLK_CNTL__RLC_SRM_ICG_OVERRIDE_MASK 0x00000001L
+#define RLC_CLK_CNTL__RLC_IMU_ICG_OVERRIDE_MASK 0x00000002L
+#define RLC_CLK_CNTL__RLC_SPM_ICG_OVERRIDE_MASK 0x00000004L
+#define RLC_CLK_CNTL__RLC_SPM_RSPM_ICG_OVERRIDE_MASK 0x00000008L
+#define RLC_CLK_CNTL__RLC_GPM_ICG_OVERRIDE_MASK 0x00000010L
+#define RLC_CLK_CNTL__RLC_CMN_ICG_OVERRIDE_MASK 0x00000020L
+#define RLC_CLK_CNTL__RLC_TC_ICG_OVERRIDE_MASK 0x00000040L
+#define RLC_CLK_CNTL__RLC_REG_ICG_OVERRIDE_MASK 0x00000080L
+#define RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK 0x00000100L
+#define RLC_CLK_CNTL__RESERVED_9_MASK 0x00000200L
+#define RLC_CLK_CNTL__RLC_SPP_ICG_OVERRIDE_MASK 0x00000400L
+#define RLC_CLK_CNTL__RESERVED_11_MASK 0x00000800L
+#define RLC_CLK_CNTL__RLC_TC_FGCG_REP_OVERRIDE_MASK 0x00001000L
+#define RLC_CLK_CNTL__RLC_DFLL_ICG_OVERRIDE_MASK 0x00002000L
+#define RLC_CLK_CNTL__RESERVED_15_MASK 0x00008000L
+#define RLC_CLK_CNTL__RLC_LX6_CORE_ICG_OVERRIDE_MASK 0x00010000L
+#define RLC_CLK_CNTL__RLC_LX6_ICG_OVERRIDE_MASK 0x00020000L
+#define RLC_CLK_CNTL__RLC_UTCL2_FGCG_OVERRIDE_MASK 0x00040000L
+#define RLC_CLK_CNTL__RLC_IH_GASKET_ICG_OVERRIDE_MASK 0x00080000L
+#define RLC_CLK_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_PACE_TIMER_STAT
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_PACE_TIMER_STAT__RESERVED__SHIFT 0x2
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0xa
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0xb
+#define RLC_PACE_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_PACE_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_PACE_TIMER_STAT__RESERVED_MASK 0x000000FCL
+#define RLC_PACE_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_PACE_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_PACE_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00000400L
+#define RLC_PACE_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00000800L
+//RLC_GPU_IOV_SCH_BLOCK
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver__SHIFT 0x4
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size__SHIFT 0x8
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED__SHIFT 0x10
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Ver_MASK 0x000000F0L
+#define RLC_GPU_IOV_SCH_BLOCK__Sch_Block_Size_MASK 0x0000FF00L
+#define RLC_GPU_IOV_SCH_BLOCK__RESERVED_MASK 0xFFFF0000L
+//RLC_GPU_IOV_CFG_REG1
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN__SHIFT 0x5
+#define RLC_GPU_IOV_CFG_REG1__RESERVED__SHIFT 0x6
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID__SHIFT 0x8
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID__SHIFT 0x10
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1__SHIFT 0x18
+#define RLC_GPU_IOV_CFG_REG1__CMD_TYPE_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_MASK 0x00000010L
+#define RLC_GPU_IOV_CFG_REG1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED_MASK 0x000000C0L
+#define RLC_GPU_IOV_CFG_REG1__FCN_ID_MASK 0x0000FF00L
+#define RLC_GPU_IOV_CFG_REG1__NEXT_FCN_ID_MASK 0x00FF0000L
+#define RLC_GPU_IOV_CFG_REG1__RESERVED1_MASK 0xFF000000L
+//RLC_GPU_IOV_CFG_REG2
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_CFG_REG2__RESERVED__SHIFT 0x4
+#define RLC_GPU_IOV_CFG_REG2__CMD_STATUS_MASK 0x0000000FL
+#define RLC_GPU_IOV_CFG_REG2__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPU_IOV_VM_BUSY_STATUS
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_VM_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_0__ACTIVE_FUNCTIONS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_ACTIVE_FCN_ID
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID__SHIFT 0x0
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4__SHIFT 0x4
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS__SHIFT 0x8
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12__SHIFT 0xc
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF__SHIFT 0x1f
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__VF_ID_MASK 0x0000000FL
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_7_4_MASK 0x000000F0L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__ACTIVE_FCN_ID_STATUS_MASK 0x00000F00L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__RESERVED_30_12_MASK 0x7FFFF000L
+#define RLC_GPU_IOV_ACTIVE_FCN_ID__PF_VF_MASK 0x80000000L
+//RLC_GPU_IOV_SCH_3
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_3__Time_Quanta_Def_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_1
+#define RLC_GPU_IOV_SCH_1__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCH_2
+#define RLC_GPU_IOV_SCH_2__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCH_2__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_FORCE
+#define RLC_PACE_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_PACE_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_CLEAR
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR__SHIFT 0x0
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR__SHIFT 0x1
+#define RLC_PACE_INT_CLEAR__SMU_STRETCH_PCC_CLEAR_MASK 0x00000001L
+#define RLC_PACE_INT_CLEAR__SMU_PCC_CLEAR_MASK 0x00000002L
+//RLC_GPU_IOV_INT_STAT
+#define RLC_GPU_IOV_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE
+#define RLC_IH_COOKIE__DATA__SHIFT 0x0
+#define RLC_IH_COOKIE__DATA_MASK 0xFFFFFFFFL
+//RLC_IH_COOKIE_CNTL
+#define RLC_IH_COOKIE_CNTL__CREDIT__SHIFT 0x0
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER__SHIFT 0x2
+#define RLC_IH_COOKIE_CNTL__CREDIT_MASK 0x00000003L
+#define RLC_IH_COOKIE_CNTL__RESET_COUNTER_MASK 0x00000004L
+//RLC_HYP_RLCG_UCODE_CHKSUM
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCG_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCP_UCODE_CHKSUM
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_HYP_RLCV_UCODE_CHKSUM
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define RLC_HYP_RLCV_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_CNTL
+#define RLC_GPU_IOV_F32_CNTL__ENABLE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_CNTL__ENABLE_MASK 0x00000001L
+//RLC_GPU_IOV_F32_RESET
+#define RLC_GPU_IOV_F32_RESET__RESET__SHIFT 0x0
+#define RLC_GPU_IOV_F32_RESET__RESET_MASK 0x00000001L
+//RLC_GPU_IOV_UCODE_ADDR
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_GPU_IOV_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_GPU_IOV_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_GPU_IOV_UCODE_DATA
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPU_IOV_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SMU_RESPONSE
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_SMU_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_F32_INVALIDATE_CACHE
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPU_IOV_F32_INVALIDATE_CACHE__INVALIDATE_CACHE_MASK 0x00000001L
+//RLC_GPU_IOV_VIRT_RESET_REQ
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR__SHIFT 0x1f
+#define RLC_GPU_IOV_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL
+#define RLC_GPU_IOV_VIRT_RESET_REQ__SOFT_PF_FLR_MASK 0x80000000L
+//RLC_GPU_IOV_RLC_RESPONSE
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP__SHIFT 0x0
+#define RLC_GPU_IOV_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_DISABLE
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_INT_FORCE
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT__SHIFT 0x0
+#define RLC_GPU_IOV_INT_FORCE__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SCRATCH_ADDR
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPU_IOV_SCRATCH_DATA
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPU_IOV_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_HYP_SEMAPHORE_2
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_HYP_SEMAPHORE_3
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_HYP_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_HYP_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_HYP_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_LX6_SCRATCH_ADDR
+#define RLC_LX6_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_LX6_CORE1_SCRATCH_ADDR
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_CORE1_SCRATCH_ADDR__ADDR_MASK 0x000000FFL
+//RLC_GPM_UCODE_ADDR
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_GPM_UCODE_ADDR__RESERVED__SHIFT 0xe
+#define RLC_GPM_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFFL
+#define RLC_GPM_UCODE_ADDR__RESERVED_MASK 0xFFFFC000L
+//RLC_GPM_UCODE_DATA
+#define RLC_GPM_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_GPM_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_ADDR
+#define RLC_GPM_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_GPM_IRAM_DATA
+#define RLC_GPM_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_ADDR
+#define RLC_RLCP_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCP_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCP_IRAM_DATA
+#define RLC_RLCP_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCP_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_ADDR
+#define RLC_RLCV_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_RLCV_IRAM_ADDR__ADDR_MASK 0xFFFFFFFFL
+//RLC_RLCV_IRAM_DATA
+#define RLC_RLCV_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_RLCV_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_DRAM_ADDR
+#define RLC_LX6_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_DRAM_ADDR__ADDR_MASK 0x000007FFL
+//RLC_LX6_DRAM_DATA
+#define RLC_LX6_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_LX6_IRAM_ADDR
+#define RLC_LX6_IRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_LX6_IRAM_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_LX6_IRAM_DATA
+#define RLC_LX6_IRAM_DATA__DATA__SHIFT 0x0
+#define RLC_LX6_IRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_UCODE_ADDR
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define RLC_PACE_UCODE_ADDR__RESERVED__SHIFT 0xc
+#define RLC_PACE_UCODE_ADDR__UCODE_ADDR_MASK 0x00000FFFL
+#define RLC_PACE_UCODE_ADDR__RESERVED_MASK 0xFFFFF000L
+//RLC_PACE_UCODE_DATA
+#define RLC_PACE_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define RLC_PACE_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_SCRATCH_ADDR
+#define RLC_GPM_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_GPM_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_GPM_SCRATCH_DATA
+#define RLC_GPM_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_GPM_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_DRAM_ADDR
+#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_DRAM_DATA
+#define RLC_SRM_DRAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_DRAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_ARAM_ADDR
+#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xd
+#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x00001FFFL
+#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xFFFFE000L
+//RLC_SRM_ARAM_DATA
+#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
+#define RLC_SRM_ARAM_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_PACE_SCRATCH_ADDR
+#define RLC_PACE_SCRATCH_ADDR__ADDR__SHIFT 0x0
+#define RLC_PACE_SCRATCH_ADDR__ADDR_MASK 0x0000FFFFL
+//RLC_PACE_SCRATCH_DATA
+#define RLC_PACE_SCRATCH_DATA__DATA__SHIFT 0x0
+#define RLC_PACE_SCRATCH_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_LSB
+#define RLC_GTS_OFFSET_LSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_GTS_OFFSET_MSB
+#define RLC_GTS_OFFSET_MSB__DATA__SHIFT 0x0
+#define RLC_GTS_OFFSET_MSB__DATA_MASK 0xFFFFFFFFL
+//GL2_PIPE_STEER_0
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_0__PIPE_0_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_0__PIPE_1_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_0__PIPE_2_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_0__PIPE_3_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_1
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_1__PIPE_0_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_1__PIPE_1_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_1__PIPE_2_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_1__PIPE_3_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL2_PIPE_STEER_2
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0__SHIFT 0x0
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0__SHIFT 0x4
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0__SHIFT 0x8
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0__SHIFT 0xc
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1__SHIFT 0x10
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1__SHIFT 0x14
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1__SHIFT 0x18
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1__SHIFT 0x1c
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q0_MASK 0x00000007L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q0_MASK 0x00000070L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q0_MASK 0x00000700L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q0_MASK 0x00007000L
+#define GL2_PIPE_STEER_2__PIPE_4_TO_CHAN_IN_Q1_MASK 0x00070000L
+#define GL2_PIPE_STEER_2__PIPE_5_TO_CHAN_IN_Q1_MASK 0x00700000L
+#define GL2_PIPE_STEER_2__PIPE_6_TO_CHAN_IN_Q1_MASK 0x07000000L
+#define GL2_PIPE_STEER_2__PIPE_7_TO_CHAN_IN_Q1_MASK 0x70000000L
+//GL2_PIPE_STEER_3
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2__SHIFT 0x0
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2__SHIFT 0x4
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2__SHIFT 0x8
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2__SHIFT 0xc
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3__SHIFT 0x10
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3__SHIFT 0x14
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3__SHIFT 0x18
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3__SHIFT 0x1c
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q2_MASK 0x00000007L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q2_MASK 0x00000070L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q2_MASK 0x00000700L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q2_MASK 0x00007000L
+#define GL2_PIPE_STEER_3__PIPE_4_TO_CHAN_IN_Q3_MASK 0x00070000L
+#define GL2_PIPE_STEER_3__PIPE_5_TO_CHAN_IN_Q3_MASK 0x00700000L
+#define GL2_PIPE_STEER_3__PIPE_6_TO_CHAN_IN_Q3_MASK 0x07000000L
+#define GL2_PIPE_STEER_3__PIPE_7_TO_CHAN_IN_Q3_MASK 0x70000000L
+//GL1_PIPE_STEER
+#define GL1_PIPE_STEER__PIPE0__SHIFT 0x0
+#define GL1_PIPE_STEER__PIPE1__SHIFT 0x2
+#define GL1_PIPE_STEER__PIPE2__SHIFT 0x4
+#define GL1_PIPE_STEER__PIPE3__SHIFT 0x6
+#define GL1_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define GL1_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define GL1_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define GL1_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//CH_PIPE_STEER
+#define CH_PIPE_STEER__PIPE0__SHIFT 0x0
+#define CH_PIPE_STEER__PIPE1__SHIFT 0x2
+#define CH_PIPE_STEER__PIPE2__SHIFT 0x4
+#define CH_PIPE_STEER__PIPE3__SHIFT 0x6
+#define CH_PIPE_STEER__PIPE0_MASK 0x00000003L
+#define CH_PIPE_STEER__PIPE1_MASK 0x0000000CL
+#define CH_PIPE_STEER__PIPE2_MASK 0x00000030L
+#define CH_PIPE_STEER__PIPE3_MASK 0x000000C0L
+//GC_USER_SHADER_ARRAY_CONFIG
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT 0x10
+#define GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK 0xFFFF0000L
+//GC_USER_PRIM_CONFIG
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA__SHIFT 0x4
+#define GC_USER_PRIM_CONFIG__INACTIVE_PA_MASK 0x000FFFF0L
+//GC_USER_SA_UNIT_DISABLE
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT 0x8
+#define GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK 0x00FFFF00L
+//GC_USER_RB_REDUNDANCY
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0__SHIFT 0x8
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0__SHIFT 0xc
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1__SHIFT 0x10
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1__SHIFT 0x14
+#define GC_USER_RB_REDUNDANCY__FAILED_RB0_MASK 0x00000F00L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY0_MASK 0x00001000L
+#define GC_USER_RB_REDUNDANCY__FAILED_RB1_MASK 0x000F0000L
+#define GC_USER_RB_REDUNDANCY__EN_REDUNDANCY1_MASK 0x00100000L
+//GC_USER_RB_BACKEND_DISABLE
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT 0x4
+#define GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK 0xFFFFFFF0L
+//GC_USER_RMI_REDUNDANCY
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0__SHIFT 0x1
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1__SHIFT 0x2
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE__SHIFT 0x3
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP__SHIFT 0x4
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_0_MASK 0x00000002L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_EN_IN_1_MASK 0x00000004L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_RMI_OVERRIDE_MASK 0x00000008L
+#define GC_USER_RMI_REDUNDANCY__REPAIR_ID_SWAP_MASK 0x00000010L
+//CGTS_USER_TCC_DISABLE
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//GC_USER_SHADER_RATE_CONFIG
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE__SHIFT 0x1
+#define GC_USER_SHADER_RATE_CONFIG__DPFP_RATE_MASK 0x00000006L
+//RLC_GPU_IOV_SDMA0_STATUS
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_STATUS
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_STATUS
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_STATUS
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_STATUS
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_STATUS
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_STATUS
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_STATUS
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_STATUS__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA0_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA0_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA1_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA1_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA2_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA2_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA3_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA3_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA4_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA4_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA5_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA5_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA6_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA6_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+//RLC_GPU_IOV_SDMA7_BUSY_STATUS
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS__SHIFT 0x0
+#define RLC_GPU_IOV_SDMA7_BUSY_STATUS__VM_BUSY_STATUS_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_cphypdec
+//CP_HYP_PFP_UCODE_ADDR
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_PFP_UCODE_ADDR
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_PFP_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_PFP_UCODE_DATA
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_PFP_UCODE_DATA
+#define CP_PFP_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_PFP_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_ADDR
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_ME_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_RADDR
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR__SHIFT 0x0
+#define CP_ME_RAM_RADDR__ME_RAM_RADDR_MASK 0x000FFFFFL
+//CP_ME_RAM_WADDR
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR__SHIFT 0x0
+#define CP_ME_RAM_WADDR__ME_RAM_WADDR_MASK 0x001FFFFFL
+//CP_HYP_ME_UCODE_DATA
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_ME_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_ME_RAM_DATA
+#define CP_ME_RAM_DATA__ME_RAM_DATA__SHIFT 0x0
+#define CP_ME_RAM_DATA__ME_RAM_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC1_UCODE_ADDR
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME1_UCODE_ADDR
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC1_UCODE_DATA
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME1_UCODE_DATA
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME1_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_MEC2_UCODE_ADDR
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_MEC_ME2_UCODE_ADDR
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_ADDR__UCODE_ADDR_MASK 0x000FFFFFL
+//CP_HYP_MEC2_UCODE_DATA
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_HYP_MEC2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_MEC_ME2_UCODE_DATA
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA__SHIFT 0x0
+#define CP_MEC_ME2_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL
+//CP_HYP_PFP_UCODE_CHKSUM
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_PFP_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_ME_UCODE_CHKSUM
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_ME_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME1_UCODE_CHKSUM
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME1_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_HYP_MEC_ME2_UCODE_CHKSUM
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM__SHIFT 0x0
+#define CP_HYP_MEC_ME2_UCODE_CHKSUM__UCODE_CHKSUM_MASK 0xFFFFFFFFL
+//CP_PFP_IC_BASE_LO
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_PFP_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_PFP_IC_BASE_HI
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_PFP_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_PFP_IC_BASE_CNTL
+#define CP_PFP_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_PFP_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_PFP_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_PFP_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_PFP_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_PFP_IC_OP_CNTL
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_PFP_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_PFP_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_PFP_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_ME_IC_BASE_LO
+#define CP_ME_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_ME_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_ME_IC_BASE_HI
+#define CP_ME_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_ME_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_ME_IC_BASE_CNTL
+#define CP_ME_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_ME_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_ME_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_ME_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_ME_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_ME_IC_OP_CNTL
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE__SHIFT 0x0
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE__SHIFT 0x1
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE__SHIFT 0x4
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED__SHIFT 0x5
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_MASK 0x00000001L
+#define CP_ME_IC_OP_CNTL__INVALIDATE_CACHE_COMPLETE_MASK 0x00000002L
+#define CP_ME_IC_OP_CNTL__PRIME_ICACHE_MASK 0x00000010L
+#define CP_ME_IC_OP_CNTL__ICACHE_PRIMED_MASK 0x00000020L
+//CP_CPC_IC_BASE_LO
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_CPC_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_CPC_IC_BASE_HI
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_CPC_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_CPC_IC_BASE_CNTL
+#define CP_CPC_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP__SHIFT 0x4
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_CPC_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_CPC_IC_BASE_CNTL__ADDRESS_CLAMP_MASK 0x00000010L
+#define CP_CPC_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_CPC_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_IC_BASE_LO
+#define CP_MES_IC_BASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_IC_BASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_MIBASE_LO
+#define CP_MES_MIBASE_LO__IC_BASE_LO__SHIFT 0xc
+#define CP_MES_MIBASE_LO__IC_BASE_LO_MASK 0xFFFFF000L
+//CP_MES_IC_BASE_HI
+#define CP_MES_IC_BASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_IC_BASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBASE_HI
+#define CP_MES_MIBASE_HI__IC_BASE_HI__SHIFT 0x0
+#define CP_MES_MIBASE_HI__IC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_IC_BASE_CNTL
+#define CP_MES_IC_BASE_CNTL__VMID__SHIFT 0x0
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE__SHIFT 0x17
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY__SHIFT 0x18
+#define CP_MES_IC_BASE_CNTL__VMID_MASK 0x0000000FL
+#define CP_MES_IC_BASE_CNTL__EXE_DISABLE_MASK 0x00800000L
+#define CP_MES_IC_BASE_CNTL__CACHE_POLICY_MASK 0x03000000L
+//CP_MES_DC_BASE_LO
+#define CP_MES_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MES_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MES_MDBASE_LO
+#define CP_MES_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MES_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MES_DC_BASE_HI
+#define CP_MES_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MES_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MDBASE_HI
+#define CP_MES_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MES_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MES_MIBOUND_LO
+#define CP_MES_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MIBOUND_HI
+#define CP_MES_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_LO
+#define CP_MES_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MES_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MES_MDBOUND_HI
+#define CP_MES_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MES_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DC_BASE0_LO
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE0_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE1_LO
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_GFX_RS64_DC_BASE1_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_GFX_RS64_DC_BASE0_HI
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE0_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_DC_BASE1_HI
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_GFX_RS64_DC_BASE1_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_GFX_RS64_MIBOUND_LO
+#define CP_GFX_RS64_MIBOUND_LO__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_LO__BOUND_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_MIBOUND_HI
+#define CP_GFX_RS64_MIBOUND_HI__BOUND__SHIFT 0x0
+#define CP_GFX_RS64_MIBOUND_HI__BOUND_MASK 0xFFFFFFFFL
+//CP_MEC_DC_BASE_LO
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO__SHIFT 0x10
+#define CP_MEC_DC_BASE_LO__DC_BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_MDBASE_LO
+#define CP_MEC_MDBASE_LO__BASE_LO__SHIFT 0x10
+#define CP_MEC_MDBASE_LO__BASE_LO_MASK 0xFFFF0000L
+//CP_MEC_DC_BASE_HI
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI__SHIFT 0x0
+#define CP_MEC_DC_BASE_HI__DC_BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MDBASE_HI
+#define CP_MEC_MDBASE_HI__BASE_HI__SHIFT 0x0
+#define CP_MEC_MDBASE_HI__BASE_HI_MASK 0x0000FFFFL
+//CP_MEC_MIBOUND_LO
+#define CP_MEC_MIBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MIBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MIBOUND_HI
+#define CP_MEC_MIBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MIBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_LO
+#define CP_MEC_MDBOUND_LO__BOUND_LO__SHIFT 0x0
+#define CP_MEC_MDBOUND_LO__BOUND_LO_MASK 0xFFFFFFFFL
+//CP_MEC_MDBOUND_HI
+#define CP_MEC_MDBOUND_HI__BOUND_HI__SHIFT 0x0
+#define CP_MEC_MDBOUND_HI__BOUND_HI_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gc_grbm_hypdec
+//GRBM_GFX_INDEX_SR_SELECT
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_INDEX_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_INDEX_SR_DATA
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX__SHIFT 0x0
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX__SHIFT 0x8
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX__SHIFT 0x10
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES__SHIFT 0x1d
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES__SHIFT 0x1f
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_INDEX_MASK 0x000000FFL
+#define GRBM_GFX_INDEX_SR_DATA__SA_INDEX_MASK 0x0000FF00L
+#define GRBM_GFX_INDEX_SR_DATA__SE_INDEX_MASK 0x00FF0000L
+#define GRBM_GFX_INDEX_SR_DATA__SA_BROADCAST_WRITES_MASK 0x20000000L
+#define GRBM_GFX_INDEX_SR_DATA__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L
+#define GRBM_GFX_INDEX_SR_DATA__SE_BROADCAST_WRITES_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_SELECT
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF__SHIFT 0x1f
+#define GRBM_GFX_CNTL_SR_SELECT__INDEX_MASK 0x00000007L
+#define GRBM_GFX_CNTL_SR_SELECT__VF_PF_MASK 0x80000000L
+//GRBM_GFX_CNTL_SR_DATA
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID__SHIFT 0x0
+#define GRBM_GFX_CNTL_SR_DATA__MEID__SHIFT 0x2
+#define GRBM_GFX_CNTL_SR_DATA__VMID__SHIFT 0x4
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID__SHIFT 0x8
+#define GRBM_GFX_CNTL_SR_DATA__PIPEID_MASK 0x00000003L
+#define GRBM_GFX_CNTL_SR_DATA__MEID_MASK 0x0000000CL
+#define GRBM_GFX_CNTL_SR_DATA__VMID_MASK 0x000000F0L
+#define GRBM_GFX_CNTL_SR_DATA__QUEUEID_MASK 0x00000700L
+//GC_IH_COOKIE_0_PTR
+#define GC_IH_COOKIE_0_PTR__ADDR__SHIFT 0x0
+#define GC_IH_COOKIE_0_PTR__ADDR_MASK 0x000FFFFFL
+//GRBM_SE_REMAP_CNTL
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN__SHIFT 0x0
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP__SHIFT 0x1
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN__SHIFT 0x4
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP__SHIFT 0x5
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN__SHIFT 0x8
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP__SHIFT 0x9
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN__SHIFT 0xc
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP__SHIFT 0xd
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN__SHIFT 0x10
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP__SHIFT 0x11
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN__SHIFT 0x14
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP__SHIFT 0x15
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN__SHIFT 0x18
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP__SHIFT 0x19
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN__SHIFT 0x1c
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP__SHIFT 0x1d
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_EN_MASK 0x00000001L
+#define GRBM_SE_REMAP_CNTL__SE0_REMAP_MASK 0x0000000EL
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_EN_MASK 0x00000010L
+#define GRBM_SE_REMAP_CNTL__SE1_REMAP_MASK 0x000000E0L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_EN_MASK 0x00000100L
+#define GRBM_SE_REMAP_CNTL__SE2_REMAP_MASK 0x00000E00L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_EN_MASK 0x00001000L
+#define GRBM_SE_REMAP_CNTL__SE3_REMAP_MASK 0x0000E000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_EN_MASK 0x00010000L
+#define GRBM_SE_REMAP_CNTL__SE4_REMAP_MASK 0x000E0000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_EN_MASK 0x00100000L
+#define GRBM_SE_REMAP_CNTL__SE5_REMAP_MASK 0x00E00000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_EN_MASK 0x01000000L
+#define GRBM_SE_REMAP_CNTL__SE6_REMAP_MASK 0x0E000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_EN_MASK 0x10000000L
+#define GRBM_SE_REMAP_CNTL__SE7_REMAP_MASK 0xE0000000L
+
+
+// addressBlock: gc_gcvmsharedhvdec
+//GCMC_VM_FB_SIZE_OFFSET_VF0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF1
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF2
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF3
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF4
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF5
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF6
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF7
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF8
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF9
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF11
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF12
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF13
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF14
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L
+//GCMC_VM_FB_SIZE_OFFSET_VF15
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL
+#define GCMC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L
+
+
+// addressBlock: gc_rlcdec
+//RLC_CNTL
+#define RLC_CNTL__RLC_ENABLE_F32__SHIFT 0x0
+#define RLC_CNTL__FORCE_RETRY__SHIFT 0x1
+#define RLC_CNTL__READ_CACHE_DISABLE__SHIFT 0x2
+#define RLC_CNTL__RLC_STEP_F32__SHIFT 0x3
+#define RLC_CNTL__RESERVED__SHIFT 0x4
+#define RLC_CNTL__RLC_ENABLE_F32_MASK 0x00000001L
+#define RLC_CNTL__FORCE_RETRY_MASK 0x00000002L
+#define RLC_CNTL__READ_CACHE_DISABLE_MASK 0x00000004L
+#define RLC_CNTL__RLC_STEP_F32_MASK 0x00000008L
+#define RLC_CNTL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_F32_UCODE_VERSION
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION__SHIFT 0x0
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION__SHIFT 0xa
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION__SHIFT 0x14
+#define RLC_F32_UCODE_VERSION__THREAD0_VERSION_MASK 0x000003FFL
+#define RLC_F32_UCODE_VERSION__THREAD1_VERSION_MASK 0x000FFC00L
+#define RLC_F32_UCODE_VERSION__THREAD2_VERSION_MASK 0x3FF00000L
+//RLC_STAT
+#define RLC_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_STAT__RLC_SRM_BUSY__SHIFT 0x1
+#define RLC_STAT__RLC_GPM_BUSY__SHIFT 0x2
+#define RLC_STAT__RLC_SPM_BUSY__SHIFT 0x3
+#define RLC_STAT__MC_BUSY__SHIFT 0x4
+#define RLC_STAT__RLC_THREAD_0_BUSY__SHIFT 0x5
+#define RLC_STAT__RLC_THREAD_1_BUSY__SHIFT 0x6
+#define RLC_STAT__RLC_THREAD_2_BUSY__SHIFT 0x7
+#define RLC_STAT__RESERVED__SHIFT 0x8
+#define RLC_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_STAT__RLC_SRM_BUSY_MASK 0x00000002L
+#define RLC_STAT__RLC_GPM_BUSY_MASK 0x00000004L
+#define RLC_STAT__RLC_SPM_BUSY_MASK 0x00000008L
+#define RLC_STAT__MC_BUSY_MASK 0x00000010L
+#define RLC_STAT__RLC_THREAD_0_BUSY_MASK 0x00000020L
+#define RLC_STAT__RLC_THREAD_1_BUSY_MASK 0x00000040L
+#define RLC_STAT__RLC_THREAD_2_BUSY_MASK 0x00000080L
+#define RLC_STAT__RESERVED_MASK 0xFFFFFF00L
+//RLC_REFCLOCK_TIMESTAMP_LSB
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_LSB__TIMESTAMP_LSB_MASK 0xFFFFFFFFL
+//RLC_REFCLOCK_TIMESTAMP_MSB
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB__SHIFT 0x0
+#define RLC_REFCLOCK_TIMESTAMP_MSB__TIMESTAMP_MSB_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_0
+#define RLC_GPM_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_1
+#define RLC_GPM_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_2
+#define RLC_GPM_TIMER_INT_2__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_2__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_3
+#define RLC_GPM_TIMER_INT_3__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_3__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_INT_4
+#define RLC_GPM_TIMER_INT_4__TIMER__SHIFT 0x0
+#define RLC_GPM_TIMER_INT_4__TIMER_MASK 0xFFFFFFFFL
+//RLC_GPM_TIMER_CTRL
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN__SHIFT 0x2
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN__SHIFT 0x3
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN__SHIFT 0x4
+#define RLC_GPM_TIMER_CTRL__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x8
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x9
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM__SHIFT 0xa
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM__SHIFT 0xb
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM__SHIFT 0xc
+#define RLC_GPM_TIMER_CTRL__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x10
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x11
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR__SHIFT 0x12
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR__SHIFT 0x13
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR__SHIFT 0x14
+#define RLC_GPM_TIMER_CTRL__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_EN_MASK 0x00000004L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_EN_MASK 0x00000008L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_EN_MASK 0x00000010L
+#define RLC_GPM_TIMER_CTRL__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000100L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000200L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_AUTO_REARM_MASK 0x00000400L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_AUTO_REARM_MASK 0x00000800L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_AUTO_REARM_MASK 0x00001000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00010000L
+#define RLC_GPM_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00020000L
+#define RLC_GPM_TIMER_CTRL__TIMER_2_INT_CLEAR_MASK 0x00040000L
+#define RLC_GPM_TIMER_CTRL__TIMER_3_INT_CLEAR_MASK 0x00080000L
+#define RLC_GPM_TIMER_CTRL__TIMER_4_INT_CLEAR_MASK 0x00100000L
+#define RLC_GPM_TIMER_CTRL__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_TIMER_STAT
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT__SHIFT 0x0
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT__SHIFT 0x1
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT__SHIFT 0x2
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT__SHIFT 0x3
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT__SHIFT 0x4
+#define RLC_GPM_TIMER_STAT__RESERVED_1__SHIFT 0x5
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC__SHIFT 0x8
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC__SHIFT 0x9
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC__SHIFT 0xa
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC__SHIFT 0xb
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC__SHIFT 0xc
+#define RLC_GPM_TIMER_STAT__RESERVED_2__SHIFT 0xd
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC__SHIFT 0x10
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC__SHIFT 0x11
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC__SHIFT 0x12
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC__SHIFT 0x13
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC__SHIFT 0x14
+#define RLC_GPM_TIMER_STAT__RESERVED__SHIFT 0x15
+#define RLC_GPM_TIMER_STAT__TIMER_0_STAT_MASK 0x00000001L
+#define RLC_GPM_TIMER_STAT__TIMER_1_STAT_MASK 0x00000002L
+#define RLC_GPM_TIMER_STAT__TIMER_2_STAT_MASK 0x00000004L
+#define RLC_GPM_TIMER_STAT__TIMER_3_STAT_MASK 0x00000008L
+#define RLC_GPM_TIMER_STAT__TIMER_4_STAT_MASK 0x00000010L
+#define RLC_GPM_TIMER_STAT__RESERVED_1_MASK 0x000000E0L
+#define RLC_GPM_TIMER_STAT__TIMER_0_ENABLE_SYNC_MASK 0x00000100L
+#define RLC_GPM_TIMER_STAT__TIMER_1_ENABLE_SYNC_MASK 0x00000200L
+#define RLC_GPM_TIMER_STAT__TIMER_2_ENABLE_SYNC_MASK 0x00000400L
+#define RLC_GPM_TIMER_STAT__TIMER_3_ENABLE_SYNC_MASK 0x00000800L
+#define RLC_GPM_TIMER_STAT__TIMER_4_ENABLE_SYNC_MASK 0x00001000L
+#define RLC_GPM_TIMER_STAT__RESERVED_2_MASK 0x0000E000L
+#define RLC_GPM_TIMER_STAT__TIMER_0_AUTO_REARM_SYNC_MASK 0x00010000L
+#define RLC_GPM_TIMER_STAT__TIMER_1_AUTO_REARM_SYNC_MASK 0x00020000L
+#define RLC_GPM_TIMER_STAT__TIMER_2_AUTO_REARM_SYNC_MASK 0x00040000L
+#define RLC_GPM_TIMER_STAT__TIMER_3_AUTO_REARM_SYNC_MASK 0x00080000L
+#define RLC_GPM_TIMER_STAT__TIMER_4_AUTO_REARM_SYNC_MASK 0x00100000L
+#define RLC_GPM_TIMER_STAT__RESERVED_MASK 0xFFE00000L
+//RLC_GPM_LEGACY_INT_STAT
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_STAT__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_STAT__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_STAT__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_STAT__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_LEGACY_INT_CLEAR
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_CLEAR__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_CLEAR__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_CLEAR__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_CLEAR__RESERVED_4_MASK 0x00000010L
+//RLC_INT_STAT
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID__SHIFT 0x0
+#define RLC_INT_STAT__CP_RLC_INT_PENDING__SHIFT 0x8
+#define RLC_INT_STAT__RESERVED__SHIFT 0x9
+#define RLC_INT_STAT__LAST_CP_RLC_INT_ID_MASK 0x000000FFL
+#define RLC_INT_STAT__CP_RLC_INT_PENDING_MASK 0x00000100L
+#define RLC_INT_STAT__RESERVED_MASK 0xFFFFFE00L
+//RLC_MGCG_CTRL
+#define RLC_MGCG_CTRL__MGCG_EN__SHIFT 0x0
+#define RLC_MGCG_CTRL__SILICON_EN__SHIFT 0x1
+#define RLC_MGCG_CTRL__SIMULATION_EN__SHIFT 0x2
+#define RLC_MGCG_CTRL__ON_DELAY__SHIFT 0x3
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS__SHIFT 0x7
+#define RLC_MGCG_CTRL__SPARE__SHIFT 0xf
+#define RLC_MGCG_CTRL__MGCG_EN_MASK 0x00000001L
+#define RLC_MGCG_CTRL__SILICON_EN_MASK 0x00000002L
+#define RLC_MGCG_CTRL__SIMULATION_EN_MASK 0x00000004L
+#define RLC_MGCG_CTRL__ON_DELAY_MASK 0x00000078L
+#define RLC_MGCG_CTRL__OFF_HYSTERESIS_MASK 0x00007F80L
+#define RLC_MGCG_CTRL__SPARE_MASK 0xFFFF8000L
+//RLC_JUMP_TABLE_RESTORE
+#define RLC_JUMP_TABLE_RESTORE__ADDR__SHIFT 0x0
+#define RLC_JUMP_TABLE_RESTORE__ADDR_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_2
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE__SHIFT 0x0
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE__SHIFT 0x10
+#define RLC_PG_DELAY_2__SERDES_TIMEOUT_VALUE_MASK 0x000000FFL
+#define RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY_2__PERWGP_TIMEOUT_VALUE_MASK 0xFFFF0000L
+//RLC_GPU_CLOCK_COUNT_LSB
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_UCODE_CNTL
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS__SHIFT 0x0
+#define RLC_UCODE_CNTL__RLC_UCODE_FLAGS_MASK 0xFFFFFFFFL
+//RLC_GPM_THREAD_RESET
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET__SHIFT 0x0
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET__SHIFT 0x1
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET__SHIFT 0x2
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET__SHIFT 0x3
+#define RLC_GPM_THREAD_RESET__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_RESET__THREAD0_RESET_MASK 0x00000001L
+#define RLC_GPM_THREAD_RESET__THREAD1_RESET_MASK 0x00000002L
+#define RLC_GPM_THREAD_RESET__THREAD2_RESET_MASK 0x00000004L
+#define RLC_GPM_THREAD_RESET__THREAD3_RESET_MASK 0x00000008L
+#define RLC_GPM_THREAD_RESET__RESERVED_MASK 0xFFFFFFF0L
+//RLC_GPM_CP_DMA_COMPLETE_T0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T0__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T0__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_CP_DMA_COMPLETE_T1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA__SHIFT 0x0
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED__SHIFT 0x1
+#define RLC_GPM_CP_DMA_COMPLETE_T1__DATA_MASK 0x00000001L
+#define RLC_GPM_CP_DMA_COMPLETE_T1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPM_THREAD_INVALIDATE_CACHE
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE__SHIFT 0x0
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE__SHIFT 0x1
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE__SHIFT 0x2
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE__SHIFT 0x3
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD0_INVALIDATE_CACHE_MASK 0x00000001L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD1_INVALIDATE_CACHE_MASK 0x00000002L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD2_INVALIDATE_CACHE_MASK 0x00000004L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__THREAD3_INVALIDATE_CACHE_MASK 0x00000008L
+#define RLC_GPM_THREAD_INVALIDATE_CACHE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_CLK_COUNT_GFXCLK_LSB
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_GFXCLK_MSB
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_GFXCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_LSB
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_LSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_REFCLK_MSB
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER__SHIFT 0x0
+#define RLC_CLK_COUNT_REFCLK_MSB__COUNTER_MASK 0xFFFFFFFFL
+//RLC_CLK_COUNT_CTRL
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN__SHIFT 0x0
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET__SHIFT 0x1
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE__SHIFT 0x2
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN__SHIFT 0x3
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET__SHIFT 0x4
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE__SHIFT 0x5
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RUN_MASK 0x00000001L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_RESET_MASK 0x00000002L
+#define RLC_CLK_COUNT_CTRL__GFXCLK_SAMPLE_MASK 0x00000004L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RUN_MASK 0x00000008L
+#define RLC_CLK_COUNT_CTRL__REFCLK_RESET_MASK 0x00000010L
+#define RLC_CLK_COUNT_CTRL__REFCLK_SAMPLE_MASK 0x00000020L
+//RLC_CLK_COUNT_STAT
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID__SHIFT 0x0
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID__SHIFT 0x1
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC__SHIFT 0x2
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC__SHIFT 0x3
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC__SHIFT 0x4
+#define RLC_CLK_COUNT_STAT__RESERVED__SHIFT 0x5
+#define RLC_CLK_COUNT_STAT__GFXCLK_VALID_MASK 0x00000001L
+#define RLC_CLK_COUNT_STAT__REFCLK_VALID_MASK 0x00000002L
+#define RLC_CLK_COUNT_STAT__REFCLK_RUN_RESYNC_MASK 0x00000004L
+#define RLC_CLK_COUNT_STAT__REFCLK_RESET_RESYNC_MASK 0x00000008L
+#define RLC_CLK_COUNT_STAT__REFCLK_SAMPLE_RESYNC_MASK 0x00000010L
+#define RLC_CLK_COUNT_STAT__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCG_DOORBELL_CNTL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED__SHIFT 0x16
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCG_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+#define RLC_RLCG_DOORBELL_CNTL__RESERVED_MASK 0xFFC00000L
+//RLC_RLCG_DOORBELL_STAT
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCG_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCG_DOORBELL_0_DATA_LO
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_0_DATA_HI
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_LO
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_1_DATA_HI
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_LO
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_2_DATA_HI
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_LO
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCG_DOORBELL_3_DATA_HI
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_32_RES_SEL
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL__SHIFT 0x0
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED__SHIFT 0x6
+#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x0000003FL
+#define RLC_GPU_CLOCK_32_RES_SEL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_GPU_CLOCK_32
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32__SHIFT 0x0
+#define RLC_GPU_CLOCK_32__GPU_CLOCK_32_MASK 0xFFFFFFFFL
+//RLC_PG_CNTL
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE__SHIFT 0x0
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC__SHIFT 0x1
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE__SHIFT 0x2
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE__SHIFT 0x3
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE__SHIFT 0x4
+#define RLC_PG_CNTL__RESERVED__SHIFT 0x5
+#define RLC_PG_CNTL__MEM_DS_DISABLE__SHIFT 0xd
+#define RLC_PG_CNTL__PG_OVERRIDE__SHIFT 0xe
+#define RLC_PG_CNTL__CP_PG_DISABLE__SHIFT 0xf
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE__SHIFT 0x10
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE__SHIFT 0x11
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x13
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable__SHIFT 0x15
+#define RLC_PG_CNTL__RESERVED2__SHIFT 0x16
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE__SHIFT 0x17
+#define RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK 0x00000001L
+#define RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK 0x00000002L
+#define RLC_PG_CNTL__DYN_PER_WGP_PG_ENABLE_MASK 0x00000004L
+#define RLC_PG_CNTL__STATIC_PER_WGP_PG_ENABLE_MASK 0x00000008L
+#define RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK 0x00000010L
+#define RLC_PG_CNTL__RESERVED_MASK 0x00001FE0L
+#define RLC_PG_CNTL__MEM_DS_DISABLE_MASK 0x00002000L
+#define RLC_PG_CNTL__PG_OVERRIDE_MASK 0x00004000L
+#define RLC_PG_CNTL__CP_PG_DISABLE_MASK 0x00008000L
+#define RLC_PG_CNTL__CHUB_HANDSHAKE_ENABLE_MASK 0x00010000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK 0x00020000L
+#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK 0x00040000L
+#define RLC_PG_CNTL__RESERVED1_MASK 0x00180000L
+#define RLC_PG_CNTL__Ultra_Low_Voltage_Enable_MASK 0x00200000L
+#define RLC_PG_CNTL__RESERVED2_MASK 0x00400000L
+#define RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK 0x00800000L
+//RLC_GPM_THREAD_PRIORITY
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY__SHIFT 0x8
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY__SHIFT 0x10
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY__SHIFT 0x18
+#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0x000000FFL
+#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0x0000FF00L
+#define RLC_GPM_THREAD_PRIORITY__THREAD2_PRIORITY_MASK 0x00FF0000L
+#define RLC_GPM_THREAD_PRIORITY__THREAD3_PRIORITY_MASK 0xFF000000L
+//RLC_GPM_THREAD_ENABLE
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE__SHIFT 0x0
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE__SHIFT 0x1
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE__SHIFT 0x2
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE__SHIFT 0x3
+#define RLC_GPM_THREAD_ENABLE__RESERVED__SHIFT 0x4
+#define RLC_GPM_THREAD_ENABLE__THREAD0_ENABLE_MASK 0x00000001L
+#define RLC_GPM_THREAD_ENABLE__THREAD1_ENABLE_MASK 0x00000002L
+#define RLC_GPM_THREAD_ENABLE__THREAD2_ENABLE_MASK 0x00000004L
+#define RLC_GPM_THREAD_ENABLE__THREAD3_ENABLE_MASK 0x00000008L
+#define RLC_GPM_THREAD_ENABLE__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCG_DOORBELL_RANGE
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_CGTT_MGCG_OVERRIDE
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE__SHIFT 0x2
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE__SHIFT 0x3
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE__SHIFT 0x4
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE__SHIFT 0x5
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE__SHIFT 0x6
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE__SHIFT 0x7
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE__SHIFT 0x8
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE__SHIFT 0xa
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11__SHIFT 0xb
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL__SHIFT 0x11
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL__SHIFT 0x12
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19__SHIFT 0x13
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE_MASK 0x00000001L
+#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK 0x00000002L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK 0x00000004L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK 0x00000008L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK 0x00000010L
+#define RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK 0x00000020L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK 0x00000040L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK 0x00000080L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK 0x00000100L
+#define RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+#define RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK 0x00000400L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_16_11_MASK 0x0001F800L
+#define RLC_CGTT_MGCG_OVERRIDE__GC_CAC_MGCG_CLK_CNTL_MASK 0x00020000L
+#define RLC_CGTT_MGCG_OVERRIDE__SE_CAC_MGCG_CLK_CNTL_MASK 0x00040000L
+#define RLC_CGTT_MGCG_OVERRIDE__RESERVED_31_19_MASK 0xFFF80000L
+//RLC_CGCG_CGLS_CTRL
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_DYN_PG_STATUS
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_DYN_PG_REQUEST
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK__SHIFT 0x0
+#define RLC_DYN_PG_REQUEST__PG_REQUEST_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY
+#define RLC_PG_DELAY__POWER_UP_DELAY__SHIFT 0x0
+#define RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT 0x8
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT 0x10
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT 0x18
+#define RLC_PG_DELAY__POWER_UP_DELAY_MASK 0x000000FFL
+#define RLC_PG_DELAY__POWER_DOWN_DELAY_MASK 0x0000FF00L
+#define RLC_PG_DELAY__CMD_PROPAGATE_DELAY_MASK 0x00FF0000L
+#define RLC_PG_DELAY__MEM_SLEEP_DELAY_MASK 0xFF000000L
+//RLC_WGP_STATUS
+#define RLC_WGP_STATUS__WORK_PENDING__SHIFT 0x0
+#define RLC_WGP_STATUS__WORK_PENDING_MASK 0xFFFFFFFFL
+//RLC_PG_ALWAYS_ON_WGP_MASK
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK__SHIFT 0x0
+#define RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_MAX_PG_WGP
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP__SHIFT 0x0
+#define RLC_MAX_PG_WGP__SPARE__SHIFT 0x8
+#define RLC_MAX_PG_WGP__MAX_POWERED_UP_WGP_MASK 0x000000FFL
+#define RLC_MAX_PG_WGP__SPARE_MASK 0xFFFFFF00L
+//RLC_AUTO_PG_CTRL
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN__SHIFT 0x0
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN__SHIFT 0x1
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN__SHIFT 0x2
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT 0x3
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD__SHIFT 0x13
+#define RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK 0x00000001L
+#define RLC_AUTO_PG_CTRL__AUTO_GRBM_REG_SAVE_ON_IDLE_EN_MASK 0x00000002L
+#define RLC_AUTO_PG_CTRL__AUTO_WAKE_UP_EN_MASK 0x00000004L
+#define RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK 0x0007FFF8L
+#define RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK 0xFFF80000L
+//RLC_SERDES_RD_INDEX
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID__SHIFT 0x0
+#define RLC_SERDES_RD_INDEX__SPARE__SHIFT 0x2
+#define RLC_SERDES_RD_INDEX__DATA_REG_ID_MASK 0x00000003L
+#define RLC_SERDES_RD_INDEX__SPARE_MASK 0xFFFFFFFCL
+//RLC_SERDES_RD_DATA_0
+#define RLC_SERDES_RD_DATA_0__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_1
+#define RLC_SERDES_RD_DATA_1__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_2
+#define RLC_SERDES_RD_DATA_2__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_RD_DATA_3
+#define RLC_SERDES_RD_DATA_3__DATA__SHIFT 0x0
+#define RLC_SERDES_RD_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_MASK
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_MASK__RESERVED__SHIFT 0x2
+#define RLC_SERDES_MASK__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_MASK__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_MASK__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_MASK__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_MASK__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_MASK__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_MASK__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_MASK__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_MASK__RESERVED_31_24__SHIFT 0x18
+#define RLC_SERDES_MASK__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_MASK__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_MASK__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_MASK__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_MASK__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_MASK__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_MASK__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_MASK__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_MASK__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_MASK__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_MASK__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_MASK__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SERDES_CTRL
+#define RLC_SERDES_CTRL__BPM_BROADCAST__SHIFT 0x0
+#define RLC_SERDES_CTRL__BPM_REG_WRITE__SHIFT 0x1
+#define RLC_SERDES_CTRL__BPM_LONG_CMD__SHIFT 0x2
+#define RLC_SERDES_CTRL__BPM_ADDR__SHIFT 0x3
+#define RLC_SERDES_CTRL__REG_ADDR__SHIFT 0x10
+#define RLC_SERDES_CTRL__BPM_BROADCAST_MASK 0x000001L
+#define RLC_SERDES_CTRL__BPM_REG_WRITE_MASK 0x000002L
+#define RLC_SERDES_CTRL__BPM_LONG_CMD_MASK 0x000004L
+#define RLC_SERDES_CTRL__BPM_ADDR_MASK 0x00FFF8L
+#define RLC_SERDES_CTRL__REG_ADDR_MASK 0xFF0000L
+//RLC_SERDES_DATA
+#define RLC_SERDES_DATA__DATA__SHIFT 0x0
+#define RLC_SERDES_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SERDES_BUSY
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0__SHIFT 0x0
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1__SHIFT 0x1
+#define RLC_SERDES_BUSY__RESERVED__SHIFT 0x2
+#define RLC_SERDES_BUSY__GC_SE_0__SHIFT 0x10
+#define RLC_SERDES_BUSY__GC_SE_1__SHIFT 0x11
+#define RLC_SERDES_BUSY__GC_SE_2__SHIFT 0x12
+#define RLC_SERDES_BUSY__GC_SE_3__SHIFT 0x13
+#define RLC_SERDES_BUSY__GC_SE_4__SHIFT 0x14
+#define RLC_SERDES_BUSY__GC_SE_5__SHIFT 0x15
+#define RLC_SERDES_BUSY__GC_SE_6__SHIFT 0x16
+#define RLC_SERDES_BUSY__GC_SE_7__SHIFT 0x17
+#define RLC_SERDES_BUSY__RESERVED_29_24__SHIFT 0x18
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY__SHIFT 0x1e
+#define RLC_SERDES_BUSY__RD_PENDING__SHIFT 0x1f
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_0_MASK 0x00000001L
+#define RLC_SERDES_BUSY__GC_CENTER_HUB_1_MASK 0x00000002L
+#define RLC_SERDES_BUSY__RESERVED_MASK 0x0000FFFCL
+#define RLC_SERDES_BUSY__GC_SE_0_MASK 0x00010000L
+#define RLC_SERDES_BUSY__GC_SE_1_MASK 0x00020000L
+#define RLC_SERDES_BUSY__GC_SE_2_MASK 0x00040000L
+#define RLC_SERDES_BUSY__GC_SE_3_MASK 0x00080000L
+#define RLC_SERDES_BUSY__GC_SE_4_MASK 0x00100000L
+#define RLC_SERDES_BUSY__GC_SE_5_MASK 0x00200000L
+#define RLC_SERDES_BUSY__GC_SE_6_MASK 0x00400000L
+#define RLC_SERDES_BUSY__GC_SE_7_MASK 0x00800000L
+#define RLC_SERDES_BUSY__RESERVED_29_24_MASK 0x3F000000L
+#define RLC_SERDES_BUSY__RD_FIFO_NOT_EMPTY_MASK 0x40000000L
+#define RLC_SERDES_BUSY__RD_PENDING_MASK 0x80000000L
+//RLC_GPM_GENERAL_0
+#define RLC_GPM_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_1
+#define RLC_GPM_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_2
+#define RLC_GPM_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_3
+#define RLC_GPM_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_4
+#define RLC_GPM_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_5
+#define RLC_GPM_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_6
+#define RLC_GPM_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_7
+#define RLC_GPM_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_STATIC_PG_STATUS
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK__SHIFT 0x0
+#define RLC_STATIC_PG_STATUS__PG_STATUS_WGP_MASK_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_16
+#define RLC_GPM_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_PG_DELAY_3
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
+#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
+#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0x000000FFL
+#define RLC_PG_DELAY_3__RESERVED_MASK 0xFFFFFF00L
+//RLC_GPR_REG1
+#define RLC_GPR_REG1__DATA__SHIFT 0x0
+#define RLC_GPR_REG1__DATA_MASK 0xFFFFFFFFL
+//RLC_GPR_REG2
+#define RLC_GPR_REG2__DATA__SHIFT 0x0
+#define RLC_GPR_REG2__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_DISABLE_TH0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT__SHIFT 0x0
+#define RLC_GPM_INT_DISABLE_TH0__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_GPM_LEGACY_INT_DISABLE
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED__SHIFT 0x0
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED__SHIFT 0x1
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED__SHIFT 0x2
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED__SHIFT 0x3
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0__SHIFT 0x4
+#define RLC_GPM_LEGACY_INT_DISABLE__SPP_PVT_INT_CHANGED_MASK 0x00000001L
+#define RLC_GPM_LEGACY_INT_DISABLE__CP_RLC_STAT_INVAL_PEND_CHANGED_MASK 0x00000002L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_EOF_INT_CHANGED_MASK 0x00000004L
+#define RLC_GPM_LEGACY_INT_DISABLE__RLC_PG_CNTL_CHANGED_MASK 0x00000008L
+#define RLC_GPM_LEGACY_INT_DISABLE__STORE_LOAD_TIMER3_EXPIRED_T0_MASK 0x00000010L
+//RLC_GPM_INT_FORCE_TH0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT__SHIFT 0x0
+#define RLC_GPM_INT_FORCE_TH0__FORCE_INT_MASK 0xFFFFFFFFL
+//RLC_SRM_CNTL
+#define RLC_SRM_CNTL__SRM_ENABLE__SHIFT 0x0
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR__SHIFT 0x1
+#define RLC_SRM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_SRM_CNTL__SRM_ENABLE_MASK 0x00000001L
+#define RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK 0x00000002L
+#define RLC_SRM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_GPM_COMMAND_STATUS
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_EMPTY_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND_STATUS__FIFO_FULL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND_STATUS__RESERVED_MASK 0xFFFFFFFCL
+//RLC_SRM_INDEX_CNTL_ADDR_0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_0__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_1
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_1__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_2
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_2__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_3
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_3__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_4
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_4__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_5
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_5__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_6
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_6__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_ADDR_7
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_ADDR_7__ADDRESS_MASK 0x0003FFFFL
+//RLC_SRM_INDEX_CNTL_DATA_0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_1
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_2
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_2__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_3
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_3__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_4
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_4__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_5
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_5__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_6
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_6__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_INDEX_CNTL_DATA_7
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA__SHIFT 0x0
+#define RLC_SRM_INDEX_CNTL_DATA_7__DATA_MASK 0xFFFFFFFFL
+//RLC_SRM_STAT
+#define RLC_SRM_STAT__SRM_BUSY__SHIFT 0x0
+#define RLC_SRM_STAT__SRM_BUSY_DELAY__SHIFT 0x1
+#define RLC_SRM_STAT__RESERVED__SHIFT 0x2
+#define RLC_SRM_STAT__SRM_BUSY_MASK 0x00000001L
+#define RLC_SRM_STAT__SRM_BUSY_DELAY_MASK 0x00000002L
+#define RLC_SRM_STAT__RESERVED_MASK 0xFFFFFFFCL
+//RLC_GPM_GENERAL_8
+#define RLC_GPM_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_9
+#define RLC_GPM_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_10
+#define RLC_GPM_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_11
+#define RLC_GPM_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_12
+#define RLC_GPM_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_CNTL_0
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_0__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_0__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_0__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_0__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_0__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_0__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_0__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_1
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_1__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_1__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_1__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_1__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_1__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_1__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_1__RESERVED_MASK 0xC0000000L
+//RLC_GPM_UTCL1_CNTL_2
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE__SHIFT 0x18
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS__SHIFT 0x19
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE__SHIFT 0x1a
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED__SHIFT 0x1e
+#define RLC_GPM_UTCL1_CNTL_2__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_GPM_UTCL1_CNTL_2__DROP_MODE_MASK 0x01000000L
+#define RLC_GPM_UTCL1_CNTL_2__BYPASS_MASK 0x02000000L
+#define RLC_GPM_UTCL1_CNTL_2__INVALIDATE_MASK 0x04000000L
+#define RLC_GPM_UTCL1_CNTL_2__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_GPM_UTCL1_CNTL_2__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_GPM_UTCL1_CNTL_2__RESERVED_MASK 0xC0000000L
+//RLC_SPM_UTCL1_CNTL
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT__SHIFT 0x0
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE__SHIFT 0x18
+#define RLC_SPM_UTCL1_CNTL__BYPASS__SHIFT 0x19
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE__SHIFT 0x1a
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE__SHIFT 0x1b
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP__SHIFT 0x1c
+#define RLC_SPM_UTCL1_CNTL__RESERVED__SHIFT 0x1e
+#define RLC_SPM_UTCL1_CNTL__XNACK_REDO_TIMER_CNT_MASK 0x000FFFFFL
+#define RLC_SPM_UTCL1_CNTL__DROP_MODE_MASK 0x01000000L
+#define RLC_SPM_UTCL1_CNTL__BYPASS_MASK 0x02000000L
+#define RLC_SPM_UTCL1_CNTL__INVALIDATE_MASK 0x04000000L
+#define RLC_SPM_UTCL1_CNTL__FRAG_LIMIT_MODE_MASK 0x08000000L
+#define RLC_SPM_UTCL1_CNTL__FORCE_SNOOP_MASK 0x10000000L
+#define RLC_SPM_UTCL1_CNTL__RESERVED_MASK 0xC0000000L
+//RLC_UTCL1_STATUS_2
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY__SHIFT 0x0
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY__SHIFT 0x1
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY__SHIFT 0x2
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY__SHIFT 0x3
+#define RLC_UTCL1_STATUS_2__RESERVED_1__SHIFT 0x4
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans__SHIFT 0x5
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans__SHIFT 0x6
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans__SHIFT 0x7
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans__SHIFT 0x8
+#define RLC_UTCL1_STATUS_2__RESERVED__SHIFT 0x9
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_BUSY_MASK 0x00000001L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_BUSY_MASK 0x00000002L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_BUSY_MASK 0x00000004L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_BUSY_MASK 0x00000008L
+#define RLC_UTCL1_STATUS_2__RESERVED_1_MASK 0x00000010L
+#define RLC_UTCL1_STATUS_2__GPM_TH0_UTCL1_StallOnTrans_MASK 0x00000020L
+#define RLC_UTCL1_STATUS_2__GPM_TH1_UTCL1_StallOnTrans_MASK 0x00000040L
+#define RLC_UTCL1_STATUS_2__GPM_TH2_UTCL1_StallOnTrans_MASK 0x00000080L
+#define RLC_UTCL1_STATUS_2__SPM_UTCL1_StallOnTrans_MASK 0x00000100L
+#define RLC_UTCL1_STATUS_2__RESERVED_MASK 0xFFFFFE00L
+//RLC_SPM_UTCL1_ERROR_1
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_SPM_UTCL1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_SPM_UTCL1_ERROR_2
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_SPM_UTCL1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH0_ERROR_1
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH0_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH0_ERROR_2
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH0_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH1_ERROR_1
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH1_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH1_ERROR_2
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH1_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_GPM_UTCL1_TH2_ERROR_1
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid__SHIFT 0x2
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB__SHIFT 0x6
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqError_MASK 0x00000003L
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorVmid_MASK 0x0000003CL
+#define RLC_GPM_UTCL1_TH2_ERROR_1__Translated_ReqErrorAddr_MSB_MASK 0x000003C0L
+//RLC_GPM_UTCL1_TH2_ERROR_2
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB__SHIFT 0x0
+#define RLC_GPM_UTCL1_TH2_ERROR_2__Translated_ReqErrorAddr_LSB_MASK 0xFFFFFFFFL
+//RLC_CGCG_CGLS_CTRL_3D
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN__SHIFT 0x0
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN__SHIFT 0x1
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT 0x2
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT 0x8
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER__SHIFT 0x1b
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL__SHIFT 0x1c
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE__SHIFT 0x1d
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN__SHIFT 0x1f
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK 0x00000001L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK 0x00000002L
+#define RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK 0x000000FCL
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK 0x07FFFF00L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_CONTROLLER_MASK 0x08000000L
+#define RLC_CGCG_CGLS_CTRL_3D__CGCG_REG_CTRL_MASK 0x10000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SLEEP_MODE_MASK 0x60000000L
+#define RLC_CGCG_CGLS_CTRL_3D__SIM_SILICON_EN_MASK 0x80000000L
+//RLC_CGCG_RAMP_CTRL_3D
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT__SHIFT 0x0
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT__SHIFT 0x4
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT__SHIFT 0x8
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT__SHIFT 0xc
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT__SHIFT 0x10
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT__SHIFT 0x1c
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_START_UNIT_MASK 0x0000000FL
+#define RLC_CGCG_RAMP_CTRL_3D__DOWN_DIV_STEP_UNIT_MASK 0x000000F0L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_START_UNIT_MASK 0x00000F00L
+#define RLC_CGCG_RAMP_CTRL_3D__UP_DIV_STEP_UNIT_MASK 0x0000F000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_CNT_MASK 0x0FFF0000L
+#define RLC_CGCG_RAMP_CTRL_3D__STEP_DELAY_UNIT_MASK 0xF0000000L
+//RLC_SEMAPHORE_0
+#define RLC_SEMAPHORE_0__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_0__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_0__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_0__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_1
+#define RLC_SEMAPHORE_1__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_1__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_1__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_1__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_2
+#define RLC_SEMAPHORE_2__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_2__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_2__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_SEMAPHORE_3
+#define RLC_SEMAPHORE_3__CLIENT_ID__SHIFT 0x0
+#define RLC_SEMAPHORE_3__RESERVED__SHIFT 0x5
+#define RLC_SEMAPHORE_3__CLIENT_ID_MASK 0x0000001FL
+#define RLC_SEMAPHORE_3__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PACE_INT_STAT
+#define RLC_PACE_INT_STAT__STATUS__SHIFT 0x0
+#define RLC_PACE_INT_STAT__STATUS_MASK 0xFFFFFFFFL
+//RLC_UTCL1_STATUS
+#define RLC_UTCL1_STATUS__FAULT_DETECTED__SHIFT 0x0
+#define RLC_UTCL1_STATUS__RETRY_DETECTED__SHIFT 0x1
+#define RLC_UTCL1_STATUS__PRT_DETECTED__SHIFT 0x2
+#define RLC_UTCL1_STATUS__RESERVED__SHIFT 0x3
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID__SHIFT 0x8
+#define RLC_UTCL1_STATUS__RESERVED_1__SHIFT 0xe
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID__SHIFT 0x10
+#define RLC_UTCL1_STATUS__RESERVED_2__SHIFT 0x16
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID__SHIFT 0x18
+#define RLC_UTCL1_STATUS__RESERVED_3__SHIFT 0x1e
+#define RLC_UTCL1_STATUS__FAULT_DETECTED_MASK 0x00000001L
+#define RLC_UTCL1_STATUS__RETRY_DETECTED_MASK 0x00000002L
+#define RLC_UTCL1_STATUS__PRT_DETECTED_MASK 0x00000004L
+#define RLC_UTCL1_STATUS__RESERVED_MASK 0x000000F8L
+#define RLC_UTCL1_STATUS__FAULT_UTCL1ID_MASK 0x00003F00L
+#define RLC_UTCL1_STATUS__RESERVED_1_MASK 0x0000C000L
+#define RLC_UTCL1_STATUS__RETRY_UTCL1ID_MASK 0x003F0000L
+#define RLC_UTCL1_STATUS__RESERVED_2_MASK 0x00C00000L
+#define RLC_UTCL1_STATUS__PRT_UTCL1ID_MASK 0x3F000000L
+#define RLC_UTCL1_STATUS__RESERVED_3_MASK 0xC0000000L
+//RLC_R2I_CNTL_0
+#define RLC_R2I_CNTL_0__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_0__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_1
+#define RLC_R2I_CNTL_1__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_1__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_2
+#define RLC_R2I_CNTL_2__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_2__Data_MASK 0xFFFFFFFFL
+//RLC_R2I_CNTL_3
+#define RLC_R2I_CNTL_3__Data__SHIFT 0x0
+#define RLC_R2I_CNTL_3__Data_MASK 0xFFFFFFFFL
+//RLC_GPM_INT_STAT_TH0
+#define RLC_GPM_INT_STAT_TH0__STATUS__SHIFT 0x0
+#define RLC_GPM_INT_STAT_TH0__STATUS_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_13
+#define RLC_GPM_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_14
+#define RLC_GPM_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_GPM_GENERAL_15
+#define RLC_GPM_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_GPM_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_GPU_CLOCK_COUNT_LSB_2
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_2__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_2
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_2__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_PACE_INT_DISABLE
+#define RLC_PACE_INT_DISABLE__DISABLE_INT__SHIFT 0x0
+#define RLC_PACE_INT_DISABLE__DISABLE_INT_MASK 0xFFFFFFFFL
+//RLC_CAPTURE_GPU_CLOCK_COUNT_2
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE__SHIFT 0x0
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED__SHIFT 0x1
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__CAPTURE_MASK 0x00000001L
+#define RLC_CAPTURE_GPU_CLOCK_COUNT_2__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_DOORBELL_RANGE
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCV_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCV_DOORBELL_CNTL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCV_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCV_DOORBELL_STAT
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCV_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCV_DOORBELL_0_DATA_LO
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_0_DATA_HI
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_LO
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_1_DATA_HI
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_LO
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_2_DATA_HI
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_LO
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCV_DOORBELL_3_DATA_HI
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCV_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_LSB_1
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_LSB_1__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_MSB_1
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_MSB_1__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_RLCV_SPARE_INT
+#define RLC_RLCV_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_FIREWALL_VIOLATION
+#define RLC_FIREWALL_VIOLATION__ADDR__SHIFT 0x0
+#define RLC_FIREWALL_VIOLATION__ADDR_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_0
+#define RLC_PACE_TIMER_INT_0__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_0__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_INT_1
+#define RLC_PACE_TIMER_INT_1__TIMER__SHIFT 0x0
+#define RLC_PACE_TIMER_INT_1__TIMER_MASK 0xFFFFFFFFL
+//RLC_PACE_TIMER_CTRL
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN__SHIFT 0x0
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN__SHIFT 0x1
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM__SHIFT 0x2
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM__SHIFT 0x3
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR__SHIFT 0x4
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR__SHIFT 0x5
+#define RLC_PACE_TIMER_CTRL__RESERVED__SHIFT 0x6
+#define RLC_PACE_TIMER_CTRL__TIMER_0_EN_MASK 0x00000001L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_EN_MASK 0x00000002L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_AUTO_REARM_MASK 0x00000004L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_AUTO_REARM_MASK 0x00000008L
+#define RLC_PACE_TIMER_CTRL__TIMER_0_INT_CLEAR_MASK 0x00000010L
+#define RLC_PACE_TIMER_CTRL__TIMER_1_INT_CLEAR_MASK 0x00000020L
+#define RLC_PACE_TIMER_CTRL__RESERVED_MASK 0xFFFFFFC0L
+//RLC_SMU_CLK_REQ
+#define RLC_SMU_CLK_REQ__VALID__SHIFT 0x0
+#define RLC_SMU_CLK_REQ__VALID_MASK 0x00000001L
+//RLC_CP_STAT_INVAL_STAT
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED__SHIFT 0x3
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED__SHIFT 0x4
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED__SHIFT 0x5
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_MASK 0x00000004L
+#define RLC_CP_STAT_INVAL_STAT__CPG_STAT_INVAL_PEND_CHANGED_MASK 0x00000008L
+#define RLC_CP_STAT_INVAL_STAT__CPC_STAT_INVAL_PEND_CHANGED_MASK 0x00000010L
+#define RLC_CP_STAT_INVAL_STAT__CPF_STAT_INVAL_PEND_CHANGED_MASK 0x00000020L
+//RLC_CP_STAT_INVAL_CTRL
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN__SHIFT 0x0
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN__SHIFT 0x1
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN__SHIFT 0x2
+#define RLC_CP_STAT_INVAL_CTRL__CPG_STAT_INVAL_PEND_EN_MASK 0x00000001L
+#define RLC_CP_STAT_INVAL_CTRL__CPC_STAT_INVAL_PEND_EN_MASK 0x00000002L
+#define RLC_CP_STAT_INVAL_CTRL__CPF_STAT_INVAL_PEND_EN_MASK 0x00000004L
+//RLC_SPARE
+#define RLC_SPARE__SPARE__SHIFT 0x0
+#define RLC_SPARE__SPARE_MASK 0xFFFFFFFFL
+//RLC_SPP_CTRL
+#define RLC_SPP_CTRL__ENABLE__SHIFT 0x0
+#define RLC_SPP_CTRL__ENABLE_PPROF__SHIFT 0x1
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT__SHIFT 0x2
+#define RLC_SPP_CTRL__PAUSE__SHIFT 0x3
+#define RLC_SPP_CTRL__ENABLE_MASK 0x00000001L
+#define RLC_SPP_CTRL__ENABLE_PPROF_MASK 0x00000002L
+#define RLC_SPP_CTRL__ENABLE_PWR_OPT_MASK 0x00000004L
+#define RLC_SPP_CTRL__PAUSE_MASK 0x00000008L
+//RLC_SPP_SHADER_PROFILE_EN
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION__SHIFT 0x6
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7__SHIFT 0x7
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION__SHIFT 0x8
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION__SHIFT 0x9
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION__SHIFT 0xa
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION__SHIFT 0xb
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION__SHIFT 0xc
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION__SHIFT 0xd
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS__SHIFT 0xe
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED__SHIFT 0xf
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK__SHIFT 0x10
+#define RLC_SPP_SHADER_PROFILE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_ENABLE_MASK 0x00000020L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_STOP_CONDITION_MASK 0x00000040L
+#define RLC_SPP_SHADER_PROFILE_EN__RESERVED_7_MASK 0x00000080L
+#define RLC_SPP_SHADER_PROFILE_EN__GS_STOP_CONDITION_MASK 0x00000100L
+#define RLC_SPP_SHADER_PROFILE_EN__HS_STOP_CONDITION_MASK 0x00000200L
+#define RLC_SPP_SHADER_PROFILE_EN__CSG_STOP_CONDITION_MASK 0x00000400L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_STOP_CONDITION_MASK 0x00000800L
+#define RLC_SPP_SHADER_PROFILE_EN__PS_START_CONDITION_MASK 0x00001000L
+#define RLC_SPP_SHADER_PROFILE_EN__CS_START_CONDITION_MASK 0x00002000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_MISS_MASK 0x00004000L
+#define RLC_SPP_SHADER_PROFILE_EN__FORCE_UNLOCKED_MASK 0x00008000L
+#define RLC_SPP_SHADER_PROFILE_EN__ENABLE_PROF_INFO_LOCK_MASK 0x00010000L
+//RLC_SPP_SSF_CAPTURE_EN
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE__SHIFT 0x0
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1__SHIFT 0x1
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE__SHIFT 0x2
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE__SHIFT 0x3
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE__SHIFT 0x4
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE__SHIFT 0x5
+#define RLC_SPP_SSF_CAPTURE_EN__PS_ENABLE_MASK 0x00000001L
+#define RLC_SPP_SSF_CAPTURE_EN__RESERVED_1_MASK 0x00000002L
+#define RLC_SPP_SSF_CAPTURE_EN__GS_ENABLE_MASK 0x00000004L
+#define RLC_SPP_SSF_CAPTURE_EN__HS_ENABLE_MASK 0x00000008L
+#define RLC_SPP_SSF_CAPTURE_EN__CSG_ENABLE_MASK 0x00000010L
+#define RLC_SPP_SSF_CAPTURE_EN__CS_ENABLE_MASK 0x00000020L
+//RLC_SPP_SSF_THRESHOLD_0
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_0__PS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_0__RESERVED_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_1
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_1__GS_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_1__HS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_SSF_THRESHOLD_2
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD__SHIFT 0x0
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD__SHIFT 0x10
+#define RLC_SPP_SSF_THRESHOLD_2__CSG_THRESHOLD_MASK 0x0000FFFFL
+#define RLC_SPP_SSF_THRESHOLD_2__CS_THRESHOLD_MASK 0xFFFF0000L
+//RLC_SPP_INFLIGHT_RD_ADDR
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_ADDR__ADDR_MASK 0x0000001FL
+//RLC_SPP_INFLIGHT_RD_DATA
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_INFLIGHT_RD_DATA__DATA_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_1
+#define RLC_SPP_PROF_INFO_1__SH_ID__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_1__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_PROF_INFO_2
+#define RLC_SPP_PROF_INFO_2__SH_TYPE__SHIFT 0x0
+#define RLC_SPP_PROF_INFO_2__CAM_HIT__SHIFT 0x4
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK__SHIFT 0x5
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT__SHIFT 0x6
+#define RLC_SPP_PROF_INFO_2__SH_TYPE_MASK 0x0000000FL
+#define RLC_SPP_PROF_INFO_2__CAM_HIT_MASK 0x00000010L
+#define RLC_SPP_PROF_INFO_2__CAM_LOCK_MASK 0x00000020L
+#define RLC_SPP_PROF_INFO_2__CAM_CONFLICT_MASK 0x00000040L
+//RLC_SPP_GLOBAL_SH_ID
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID__SH_ID_MASK 0xFFFFFFFFL
+//RLC_SPP_GLOBAL_SH_ID_VALID
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID__SHIFT 0x0
+#define RLC_SPP_GLOBAL_SH_ID_VALID__VALID_MASK 0x00000001L
+//RLC_SPP_STATUS
+#define RLC_SPP_STATUS__RESERVED_0__SHIFT 0x0
+#define RLC_SPP_STATUS__SSF_BUSY__SHIFT 0x1
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY__SHIFT 0x2
+#define RLC_SPP_STATUS__SPP_BUSY__SHIFT 0x1f
+#define RLC_SPP_STATUS__RESERVED_0_MASK 0x00000001L
+#define RLC_SPP_STATUS__SSF_BUSY_MASK 0x00000002L
+#define RLC_SPP_STATUS__EVENT_ARB_BUSY_MASK 0x00000004L
+#define RLC_SPP_STATUS__SPP_BUSY_MASK 0x80000000L
+//RLC_SPP_PVT_STAT_0
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_0__LEVEL_0_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_0__LEVEL_1_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_0__LEVEL_2_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_0__LEVEL_3_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_1
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_1__LEVEL_4_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_1__LEVEL_5_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_1__LEVEL_6_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_1__LEVEL_7_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_2
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_2__LEVEL_8_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_2__LEVEL_9_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_2__LEVEL_10_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_2__LEVEL_11_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_STAT_3
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER__SHIFT 0x0
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER__SHIFT 0x8
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER__SHIFT 0x10
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER__SHIFT 0x18
+#define RLC_SPP_PVT_STAT_3__LEVEL_12_COUNTER_MASK 0x000000FFL
+#define RLC_SPP_PVT_STAT_3__LEVEL_13_COUNTER_MASK 0x0000FF00L
+#define RLC_SPP_PVT_STAT_3__LEVEL_14_COUNTER_MASK 0x00FF0000L
+#define RLC_SPP_PVT_STAT_3__LEVEL_15_COUNTER_MASK 0xFF000000L
+//RLC_SPP_PVT_LEVEL_MAX
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL__SHIFT 0x0
+#define RLC_SPP_PVT_LEVEL_MAX__LEVEL_MASK 0x0000000FL
+//RLC_SPP_STALL_STATE_UPDATE
+#define RLC_SPP_STALL_STATE_UPDATE__STALL__SHIFT 0x0
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE__SHIFT 0x1
+#define RLC_SPP_STALL_STATE_UPDATE__STALL_MASK 0x00000001L
+#define RLC_SPP_STALL_STATE_UPDATE__ENABLE_MASK 0x00000002L
+//RLC_SPP_PBB_INFO
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE__SHIFT 0x0
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID__SHIFT 0x1
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE__SHIFT 0x2
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID__SHIFT 0x3
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_MASK 0x00000001L
+#define RLC_SPP_PBB_INFO__PIPE0_OVERRIDE_VALID_MASK 0x00000002L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_MASK 0x00000004L
+#define RLC_SPP_PBB_INFO__PIPE1_OVERRIDE_VALID_MASK 0x00000008L
+//RLC_SPP_RESET
+#define RLC_SPP_RESET__SSF_RESET__SHIFT 0x0
+#define RLC_SPP_RESET__EVENT_ARB_RESET__SHIFT 0x1
+#define RLC_SPP_RESET__CAM_RESET__SHIFT 0x2
+#define RLC_SPP_RESET__PVT_RESET__SHIFT 0x3
+#define RLC_SPP_RESET__SSF_RESET_MASK 0x00000001L
+#define RLC_SPP_RESET__EVENT_ARB_RESET_MASK 0x00000002L
+#define RLC_SPP_RESET__CAM_RESET_MASK 0x00000004L
+#define RLC_SPP_RESET__PVT_RESET_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_RANGE
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_RLCP_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCP_DOORBELL_CNTL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_RLCP_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_RLCP_DOORBELL_STAT
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_RLCP_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_RLCP_DOORBELL_0_DATA_LO
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_0_DATA_HI
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_LO
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_1_DATA_HI
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_LO
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_2_DATA_HI
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_LO
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCP_DOORBELL_3_DATA_HI
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_RLCP_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_CAC_MASK_CNTL
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK__SHIFT 0x0
+#define RLC_CAC_MASK_CNTL__RLC_CAC_MASK_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_CNTR_CTRL
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_POWER_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_CLK_RESIDENCY_CNTR_CTRL
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_CLK_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_DS_RESIDENCY_CNTR_CTRL
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_DS_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_ULV_RESIDENCY_CNTR_CTRL
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_ULV_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_PCC_RESIDENCY_CNTR_CTRL
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL__SHIFT 0x5
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x9
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__EVENT_SEL_MASK 0x000001E0L
+#define RLC_PCC_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFE00L
+//RLC_GENERAL_RESIDENCY_CNTR_CTRL
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE__SHIFT 0x1
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK__SHIFT 0x2
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK__SHIFT 0x3
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW__SHIFT 0x4
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED__SHIFT 0x5
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_MASK 0x00000001L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_MASK 0x00000002L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESET_ACK_MASK 0x00000004L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__ENABLE_ACK_MASK 0x00000008L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__COUNTER_OVERFLOW_MASK 0x00000010L
+#define RLC_GENERAL_RESIDENCY_CNTR_CTRL__RESERVED_MASK 0xFFFFFFE0L
+//RLC_POWER_RESIDENCY_EVENT_CNTR
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_EVENT_CNTR
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_EVENT_CNTR
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_EVENT_CNTR
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_EVENT_CNTR
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_EVENT_CNTR
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_EVENT_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_POWER_RESIDENCY_REF_CNTR
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_POWER_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_CLK_RESIDENCY_REF_CNTR
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_CLK_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_DS_RESIDENCY_REF_CNTR
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_DS_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_ULV_RESIDENCY_REF_CNTR
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_ULV_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_PCC_RESIDENCY_REF_CNTR
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_PCC_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GENERAL_RESIDENCY_REF_CNTR
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA__SHIFT 0x0
+#define RLC_GENERAL_RESIDENCY_REF_CNTR__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IH_CLIENT_CTRL
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_MASK_MASK 0x000000FFL
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_MASK_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_MASK_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_MASK_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_MASK_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_15_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_CTRL__SE_INTERRUPT_ERROR_CLEAR_MASK 0x00FF0000L
+#define RLC_GFX_IH_CLIENT_CTRL__SDMA_INTERRUPT_ERROR_CLEAR_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_CTRL__UTCL2_INTERRUPT_ERROR_CLEAR_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_CTRL__PMM_INTERRUPT_ERROR_CLEAR_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_CTRL__FED_INTERRUPT_ERROR_CLEAR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_CTRL__RESERVED_31_MASK 0x80000000L
+//RLC_GFX_IH_ARBITER_STAT
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED__SHIFT 0x0
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED__SHIFT 0x10
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED__SHIFT 0x1c
+#define RLC_GFX_IH_ARBITER_STAT__CLIENT_GRANTED_MASK 0x0000FFFFL
+#define RLC_GFX_IH_ARBITER_STAT__RESERVED_MASK 0x0FFF0000L
+#define RLC_GFX_IH_ARBITER_STAT__LAST_CLIENT_GRANTED_MASK 0xF0000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_L__SE3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SE_STAT_H
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE4_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE5_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE6_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SE_STAT_H__SE7_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_SDMA_STAT
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING__SHIFT 0x1c
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW__SHIFT 0x1d
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR__SHIFT 0x1e
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED__SHIFT 0x1f
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA0_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA1_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA2_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LEVEL_MASK 0x0F000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_LOADING_MASK 0x10000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_BUFFER_OVERFLOW_MASK 0x20000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_PROTOCOL_ERROR_MASK 0x40000000L
+#define RLC_GFX_IH_CLIENT_SDMA_STAT__SDMA3_RESERVED_MASK 0x80000000L
+//RLC_GFX_IH_CLIENT_OTHER_STAT
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL__SHIFT 0x0
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING__SHIFT 0x4
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW__SHIFT 0x5
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR__SHIFT 0x6
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED__SHIFT 0x7
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL__SHIFT 0x8
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING__SHIFT 0xc
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW__SHIFT 0xd
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR__SHIFT 0xe
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED__SHIFT 0xf
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL__SHIFT 0x10
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING__SHIFT 0x14
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW__SHIFT 0x15
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR__SHIFT 0x16
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED__SHIFT 0x17
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24__SHIFT 0x18
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LEVEL_MASK 0x0000000FL
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_LOADING_MASK 0x00000010L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_BUFFER_OVERFLOW_MASK 0x00000020L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_PROTOCOL_ERROR_MASK 0x00000040L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__UTCL2_RESERVED_MASK 0x00000080L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LEVEL_MASK 0x00000F00L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_LOADING_MASK 0x00001000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_BUFFER_OVERFLOW_MASK 0x00002000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_PROTOCOL_ERROR_MASK 0x00004000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__PMM_RESERVED_MASK 0x00008000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LEVEL_MASK 0x000F0000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_LOADING_MASK 0x00100000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_BUFFER_OVERFLOW_MASK 0x00200000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_PROTOCOL_ERROR_MASK 0x00400000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__FED_RESERVED_MASK 0x00800000L
+#define RLC_GFX_IH_CLIENT_OTHER_STAT__RESERVED_31_24_MASK 0xFF000000L
+//RLC_SPM_GLOBAL_DELAY_IND_ADDR
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_GLOBAL_DELAY_IND_DATA
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_GLOBAL_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_SPM_SE_DELAY_IND_ADDR
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_ADDR__ADDR_MASK 0x00000FFFL
+//RLC_SPM_SE_DELAY_IND_DATA
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA__SHIFT 0x0
+#define RLC_SPM_SE_DELAY_IND_DATA__DATA_MASK 0x0000003FL
+//RLC_LX6_CNTL
+#define RLC_LX6_CNTL__BRESET__SHIFT 0x0
+#define RLC_LX6_CNTL__RUNSTALL__SHIFT 0x1
+#define RLC_LX6_CNTL__PDEBUG_ENABLE__SHIFT 0x2
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL__SHIFT 0x3
+#define RLC_LX6_CNTL__BRESET_MASK 0x00000001L
+#define RLC_LX6_CNTL__RUNSTALL_MASK 0x00000002L
+#define RLC_LX6_CNTL__PDEBUG_ENABLE_MASK 0x00000004L
+#define RLC_LX6_CNTL__STAT_VECTOR_SEL_MASK 0x00000008L
+//RLC_XT_CORE_STATUS
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE__SHIFT 0x0
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR__SHIFT 0x1
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR__SHIFT 0x2
+#define RLC_XT_CORE_STATUS__P_WAIT_MODE_MASK 0x00000001L
+#define RLC_XT_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000002L
+#define RLC_XT_CORE_STATUS__DOUBLE_EXCEPTION_ERROR_MASK 0x00000004L
+//RLC_XT_CORE_INTERRUPT
+#define RLC_XT_CORE_INTERRUPT__EXTINT1__SHIFT 0x0
+#define RLC_XT_CORE_INTERRUPT__EXTINT2__SHIFT 0x1a
+#define RLC_XT_CORE_INTERRUPT__NMI__SHIFT 0x1b
+#define RLC_XT_CORE_INTERRUPT__EXTINT1_MASK 0x03FFFFFFL
+#define RLC_XT_CORE_INTERRUPT__EXTINT2_MASK 0x04000000L
+#define RLC_XT_CORE_INTERRUPT__NMI_MASK 0x08000000L
+//RLC_XT_CORE_FAULT_INFO
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO__SHIFT 0x0
+#define RLC_XT_CORE_FAULT_INFO__FAULT_INFO_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_ALT_RESET_VEC
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC__SHIFT 0x0
+#define RLC_XT_CORE_ALT_RESET_VEC__ALT_RESET_VEC_MASK 0xFFFFFFFFL
+//RLC_XT_CORE_RESERVED
+#define RLC_XT_CORE_RESERVED__RESERVED__SHIFT 0x0
+#define RLC_XT_CORE_RESERVED__RESERVED_MASK 0xFFFFFFFFL
+//RLC_XT_INT_VEC_FORCE
+#define RLC_XT_INT_VEC_FORCE__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_FORCE__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_FORCE__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_FORCE__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_FORCE__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_FORCE__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_FORCE__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_FORCE__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_FORCE__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_FORCE__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_FORCE__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_FORCE__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_FORCE__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_FORCE__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_FORCE__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_FORCE__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_FORCE__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_FORCE__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_FORCE__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_FORCE__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_FORCE__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_FORCE__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_FORCE__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_FORCE__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_FORCE__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_FORCE__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_FORCE__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_FORCE__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_FORCE__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_FORCE__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_FORCE__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_FORCE__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_FORCE__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_FORCE__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_FORCE__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_FORCE__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_FORCE__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_FORCE__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_FORCE__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_FORCE__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_FORCE__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_FORCE__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_FORCE__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_FORCE__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_FORCE__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_FORCE__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_FORCE__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_FORCE__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_FORCE__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_FORCE__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_FORCE__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_FORCE__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_CLEAR
+#define RLC_XT_INT_VEC_CLEAR__NUM_0__SHIFT 0x0
+#define RLC_XT_INT_VEC_CLEAR__NUM_1__SHIFT 0x1
+#define RLC_XT_INT_VEC_CLEAR__NUM_2__SHIFT 0x2
+#define RLC_XT_INT_VEC_CLEAR__NUM_3__SHIFT 0x3
+#define RLC_XT_INT_VEC_CLEAR__NUM_4__SHIFT 0x4
+#define RLC_XT_INT_VEC_CLEAR__NUM_5__SHIFT 0x5
+#define RLC_XT_INT_VEC_CLEAR__NUM_6__SHIFT 0x6
+#define RLC_XT_INT_VEC_CLEAR__NUM_7__SHIFT 0x7
+#define RLC_XT_INT_VEC_CLEAR__NUM_8__SHIFT 0x8
+#define RLC_XT_INT_VEC_CLEAR__NUM_9__SHIFT 0x9
+#define RLC_XT_INT_VEC_CLEAR__NUM_10__SHIFT 0xa
+#define RLC_XT_INT_VEC_CLEAR__NUM_11__SHIFT 0xb
+#define RLC_XT_INT_VEC_CLEAR__NUM_12__SHIFT 0xc
+#define RLC_XT_INT_VEC_CLEAR__NUM_13__SHIFT 0xd
+#define RLC_XT_INT_VEC_CLEAR__NUM_14__SHIFT 0xe
+#define RLC_XT_INT_VEC_CLEAR__NUM_15__SHIFT 0xf
+#define RLC_XT_INT_VEC_CLEAR__NUM_16__SHIFT 0x10
+#define RLC_XT_INT_VEC_CLEAR__NUM_17__SHIFT 0x11
+#define RLC_XT_INT_VEC_CLEAR__NUM_18__SHIFT 0x12
+#define RLC_XT_INT_VEC_CLEAR__NUM_19__SHIFT 0x13
+#define RLC_XT_INT_VEC_CLEAR__NUM_20__SHIFT 0x14
+#define RLC_XT_INT_VEC_CLEAR__NUM_21__SHIFT 0x15
+#define RLC_XT_INT_VEC_CLEAR__NUM_22__SHIFT 0x16
+#define RLC_XT_INT_VEC_CLEAR__NUM_23__SHIFT 0x17
+#define RLC_XT_INT_VEC_CLEAR__NUM_24__SHIFT 0x18
+#define RLC_XT_INT_VEC_CLEAR__NUM_25__SHIFT 0x19
+#define RLC_XT_INT_VEC_CLEAR__NUM_0_MASK 0x00000001L
+#define RLC_XT_INT_VEC_CLEAR__NUM_1_MASK 0x00000002L
+#define RLC_XT_INT_VEC_CLEAR__NUM_2_MASK 0x00000004L
+#define RLC_XT_INT_VEC_CLEAR__NUM_3_MASK 0x00000008L
+#define RLC_XT_INT_VEC_CLEAR__NUM_4_MASK 0x00000010L
+#define RLC_XT_INT_VEC_CLEAR__NUM_5_MASK 0x00000020L
+#define RLC_XT_INT_VEC_CLEAR__NUM_6_MASK 0x00000040L
+#define RLC_XT_INT_VEC_CLEAR__NUM_7_MASK 0x00000080L
+#define RLC_XT_INT_VEC_CLEAR__NUM_8_MASK 0x00000100L
+#define RLC_XT_INT_VEC_CLEAR__NUM_9_MASK 0x00000200L
+#define RLC_XT_INT_VEC_CLEAR__NUM_10_MASK 0x00000400L
+#define RLC_XT_INT_VEC_CLEAR__NUM_11_MASK 0x00000800L
+#define RLC_XT_INT_VEC_CLEAR__NUM_12_MASK 0x00001000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_13_MASK 0x00002000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_14_MASK 0x00004000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_15_MASK 0x00008000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_16_MASK 0x00010000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_17_MASK 0x00020000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_18_MASK 0x00040000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_19_MASK 0x00080000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_20_MASK 0x00100000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_21_MASK 0x00200000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_22_MASK 0x00400000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_23_MASK 0x00800000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_24_MASK 0x01000000L
+#define RLC_XT_INT_VEC_CLEAR__NUM_25_MASK 0x02000000L
+//RLC_XT_INT_VEC_MUX_SEL
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_SEL__MUX_SEL_MASK 0x0000001FL
+//RLC_XT_INT_VEC_MUX_INT_SEL
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL__SHIFT 0x0
+#define RLC_XT_INT_VEC_MUX_INT_SEL__INT_SEL_MASK 0x0000003FL
+//RLC_GPU_CLOCK_COUNT_SPM_LSB
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_LSB__GPU_CLOCKS_LSB_MASK 0xFFFFFFFFL
+//RLC_GPU_CLOCK_COUNT_SPM_MSB
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB__SHIFT 0x0
+#define RLC_GPU_CLOCK_COUNT_SPM_MSB__GPU_CLOCKS_MSB_MASK 0xFFFFFFFFL
+//RLC_SPM_THREAD_TRACE_CTRL
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN__SHIFT 0x0
+#define RLC_SPM_THREAD_TRACE_CTRL__THREAD_TRACE_INT_EN_MASK 0x00000001L
+//RLC_SPP_CAM_ADDR
+#define RLC_SPP_CAM_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_DATA
+#define RLC_SPP_CAM_DATA__DATA__SHIFT 0x0
+#define RLC_SPP_CAM_DATA__TAG__SHIFT 0x8
+#define RLC_SPP_CAM_DATA__DATA_MASK 0x000000FFL
+#define RLC_SPP_CAM_DATA__TAG_MASK 0xFFFFFF00L
+//RLC_SPP_CAM_EXT_ADDR
+#define RLC_SPP_CAM_EXT_ADDR__ADDR__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_ADDR__ADDR_MASK 0x000000FFL
+//RLC_SPP_CAM_EXT_DATA
+#define RLC_SPP_CAM_EXT_DATA__VALID__SHIFT 0x0
+#define RLC_SPP_CAM_EXT_DATA__LOCK__SHIFT 0x1
+#define RLC_SPP_CAM_EXT_DATA__VALID_MASK 0x00000001L
+#define RLC_SPP_CAM_EXT_DATA__LOCK_MASK 0x00000002L
+//RLC_CPAXI_DOORBELL_MON_CTRL
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_CTRL__EN_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_CTRL__ID_MASK 0x0000003EL
+//RLC_CPAXI_DOORBELL_MON_STAT
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR__SHIFT 0x1
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR__SHIFT 0x2
+#define RLC_CPAXI_DOORBELL_MON_STAT__ID_MATCH_MASK 0x00000001L
+#define RLC_CPAXI_DOORBELL_MON_STAT__MATCH_CLEAR_MASK 0x00000002L
+#define RLC_CPAXI_DOORBELL_MON_STAT__ADDR_MASK 0x0FFFFFFCL
+//RLC_CPAXI_DOORBELL_MON_DATA_LSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_CPAXI_DOORBELL_MON_DATA_MSB
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA__SHIFT 0x0
+#define RLC_CPAXI_DOORBELL_MON_DATA_MSB__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_RANGE
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED__SHIFT 0x0
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR__SHIFT 0x2
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED__SHIFT 0x10
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR__SHIFT 0x12
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_RESERVED_MASK 0x00000003L
+#define RLC_XT_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
+#define RLC_XT_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_XT_DOORBELL_CNTL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE__SHIFT 0x0
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE__SHIFT 0x2
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE__SHIFT 0x4
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE__SHIFT 0x6
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID__SHIFT 0x10
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN__SHIFT 0x15
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_0_MODE_MASK 0x00000003L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_1_MODE_MASK 0x0000000CL
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_2_MODE_MASK 0x00000030L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_3_MODE_MASK 0x000000C0L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_MASK 0x001F0000L
+#define RLC_XT_DOORBELL_CNTL__DOORBELL_ID_EN_MASK 0x00200000L
+//RLC_XT_DOORBELL_STAT
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID__SHIFT 0x0
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID__SHIFT 0x1
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID__SHIFT 0x2
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID__SHIFT 0x3
+#define RLC_XT_DOORBELL_STAT__DOORBELL_0_VALID_MASK 0x00000001L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_1_VALID_MASK 0x00000002L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_2_VALID_MASK 0x00000004L
+#define RLC_XT_DOORBELL_STAT__DOORBELL_3_VALID_MASK 0x00000008L
+//RLC_XT_DOORBELL_0_DATA_LO
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_0_DATA_HI
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_0_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_LO
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_1_DATA_HI
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_1_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_LO
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_2_DATA_HI
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_2_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_LO
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_XT_DOORBELL_3_DATA_HI
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA__SHIFT 0x0
+#define RLC_XT_DOORBELL_3_DATA_HI__DATA_MASK 0xFFFFFFFFL
+//RLC_MEM_SLP_CNTL
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN__SHIFT 0x0
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN__SHIFT 0x1
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE__SHIFT 0x2
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE__SHIFT 0x3
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE__SHIFT 0x4
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE__SHIFT 0x5
+#define RLC_MEM_SLP_CNTL__RESERVED__SHIFT 0x6
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE__SHIFT 0x7
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY__SHIFT 0x8
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY__SHIFT 0x10
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE__SHIFT 0x18
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE__SHIFT 0x19
+#define RLC_MEM_SLP_CNTL__RESERVED1__SHIFT 0x1a
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK 0x00000001L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_DS_EN_MASK 0x00000002L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_LS_OVERRIDE_MASK 0x00000004L
+#define RLC_MEM_SLP_CNTL__RLC_SRM_MEM_DS_OVERRIDE_MASK 0x00000008L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_LS_OVERRIDE_MASK 0x00000010L
+#define RLC_MEM_SLP_CNTL__RLC_SPM_MEM_DS_OVERRIDE_MASK 0x00000020L
+#define RLC_MEM_SLP_CNTL__RESERVED_MASK 0x00000040L
+#define RLC_MEM_SLP_CNTL__RLC_LS_DS_BUSY_OVERRIDE_MASK 0x00000080L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_ON_DELAY_MASK 0x0000FF00L
+#define RLC_MEM_SLP_CNTL__RLC_MEM_LS_OFF_DELAY_MASK 0x00FF0000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_LS_OVERRIDE_MASK 0x01000000L
+#define RLC_MEM_SLP_CNTL__RLC_SPP_MEM_DS_OVERRIDE_MASK 0x02000000L
+#define RLC_MEM_SLP_CNTL__RESERVED1_MASK 0xFC000000L
+//SMU_RLC_RESPONSE
+#define SMU_RLC_RESPONSE__RESP__SHIFT 0x0
+#define SMU_RLC_RESPONSE__RESP_MASK 0xFFFFFFFFL
+//RLC_RLCV_SAFE_MODE
+#define RLC_RLCV_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_RLCV_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_RLCV_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_RLCV_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_RLCV_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_RLCV_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_RLCV_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_RLCV_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_RLCV_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_RLCV_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SMU_SAFE_MODE
+#define RLC_SMU_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SMU_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SMU_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SMU_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SMU_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SMU_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SMU_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SMU_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SMU_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SMU_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCV_COMMAND
+#define RLC_RLCV_COMMAND__CMD__SHIFT 0x0
+#define RLC_RLCV_COMMAND__RESERVED__SHIFT 0x4
+#define RLC_RLCV_COMMAND__CMD_MASK 0x0000000FL
+#define RLC_RLCV_COMMAND__RESERVED_MASK 0xFFFFFFF0L
+//RLC_SMU_MESSAGE
+#define RLC_SMU_MESSAGE__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_1
+#define RLC_SMU_MESSAGE_1__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_1__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_MESSAGE_2
+#define RLC_SMU_MESSAGE_2__CMD__SHIFT 0x0
+#define RLC_SMU_MESSAGE_2__CMD_MASK 0xFFFFFFFFL
+//RLC_SRM_GPM_COMMAND
+#define RLC_SRM_GPM_COMMAND__OP__SHIFT 0x0
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL__SHIFT 0x1
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM__SHIFT 0x2
+#define RLC_SRM_GPM_COMMAND__SIZE__SHIFT 0x5
+#define RLC_SRM_GPM_COMMAND__START_OFFSET__SHIFT 0x12
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY__SHIFT 0x1f
+#define RLC_SRM_GPM_COMMAND__OP_MASK 0x00000001L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_MASK 0x00000002L
+#define RLC_SRM_GPM_COMMAND__INDEX_CNTL_NUM_MASK 0x0000001CL
+#define RLC_SRM_GPM_COMMAND__SIZE_MASK 0x0003FFE0L
+#define RLC_SRM_GPM_COMMAND__START_OFFSET_MASK 0x7FFC0000L
+#define RLC_SRM_GPM_COMMAND__DEST_MEMORY_MASK 0x80000000L
+//RLC_SRM_GPM_ABORT
+#define RLC_SRM_GPM_ABORT__ABORT__SHIFT 0x0
+#define RLC_SRM_GPM_ABORT__RESERVED__SHIFT 0x1
+#define RLC_SRM_GPM_ABORT__ABORT_MASK 0x00000001L
+#define RLC_SRM_GPM_ABORT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SMU_COMMAND
+#define RLC_SMU_COMMAND__CMD__SHIFT 0x0
+#define RLC_SMU_COMMAND__CMD_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_1
+#define RLC_SMU_ARGUMENT_1__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_1__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_2
+#define RLC_SMU_ARGUMENT_2__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_2__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_3
+#define RLC_SMU_ARGUMENT_3__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_3__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_4
+#define RLC_SMU_ARGUMENT_4__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_4__ARG_MASK 0xFFFFFFFFL
+//RLC_SMU_ARGUMENT_5
+#define RLC_SMU_ARGUMENT_5__ARG__SHIFT 0x0
+#define RLC_SMU_ARGUMENT_5__ARG_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_HI
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_ADDR_LO
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//RLC_IMU_BOOTLOAD_SIZE
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE__SHIFT 0x0
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED__SHIFT 0x1a
+#define RLC_IMU_BOOTLOAD_SIZE__SIZE_MASK 0x03FFFFFFL
+#define RLC_IMU_BOOTLOAD_SIZE__RESERVED_MASK 0xFC000000L
+//RLC_IMU_MISC
+#define RLC_IMU_MISC__THROTTLE_GFX__SHIFT 0x0
+#define RLC_IMU_MISC__EARLY_MGCG__SHIFT 0x1
+#define RLC_IMU_MISC__RESERVED__SHIFT 0x2
+#define RLC_IMU_MISC__THROTTLE_GFX_MASK 0x00000001L
+#define RLC_IMU_MISC__EARLY_MGCG_MASK 0x00000002L
+#define RLC_IMU_MISC__RESERVED_MASK 0xFFFFFFFCL
+//RLC_IMU_RESET_VECTOR
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT__SHIFT 0x0
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT__SHIFT 0x1
+#define RLC_IMU_RESET_VECTOR__VECTOR__SHIFT 0x2
+#define RLC_IMU_RESET_VECTOR__RESERVED__SHIFT 0x8
+#define RLC_IMU_RESET_VECTOR__COLD_BOOT_EXIT_MASK 0x00000001L
+#define RLC_IMU_RESET_VECTOR__VDDGFX_EXIT_MASK 0x00000002L
+#define RLC_IMU_RESET_VECTOR__VECTOR_MASK 0x000000FCL
+#define RLC_IMU_RESET_VECTOR__RESERVED_MASK 0xFFFFFF00L
+
+
+// addressBlock: gc_rlcsdec
+//RLC_RLCS_DEC_START
+//RLC_RLCS_DEC_DUMP_ADDR
+//RLC_RLCS_EXCEPTION_REG_1
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_2
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_3
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_EXCEPTION_REG_4
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_EXCEPTION_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_EXCEPTION_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_CGCG_REQUEST
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST__SHIFT 0x0
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D__SHIFT 0x1
+#define RLC_RLCS_CGCG_REQUEST__RESERVED__SHIFT 0x2
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_MASK 0x00000001L
+#define RLC_RLCS_CGCG_REQUEST__CGCG_REQUEST_3D_MASK 0x00000002L
+#define RLC_RLCS_CGCG_REQUEST__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_CGCG_STATUS
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS__SHIFT 0x0
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS__SHIFT 0x2
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D__SHIFT 0x3
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D__SHIFT 0x5
+#define RLC_RLCS_CGCG_STATUS__RESERVED__SHIFT 0x6
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_MASK 0x00000003L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_CGCG_STATUS__CGCG_RAMP_STATUS_3D_MASK 0x00000018L
+#define RLC_RLCS_CGCG_STATUS__GFX_CLK_STATUS_3D_MASK 0x00000020L
+#define RLC_RLCS_CGCG_STATUS__RESERVED_MASK 0xFFFFFFC0L
+//RLC_RLCS_SOC_DS_CNTL
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_SOC_DS_CNTL__SOC_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_CNTL
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK__SHIFT 0x6
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK__SHIFT 0x7
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK__SHIFT 0x8
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK__SHIFT 0x10
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK__SHIFT 0x11
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK__SHIFT 0x12
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK__SHIFT 0x13
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK__SHIFT 0x14
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK__SHIFT 0x15
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK__SHIFT 0x16
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK__SHIFT 0x17
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_ALLOW_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_RLC_BUSY_MASK_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_CP_BUSY_MASK_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_GFX_PWR_STALLED_MASK_MASK 0x00000040L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_NON3D_PWR_STALLED_MASK_MASK 0x00000080L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_IMU_DISABLE_MASK_MASK 0x00000100L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_0_BUSY_MASK_MASK 0x00010000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_1_BUSY_MASK_MASK 0x00020000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_2_BUSY_MASK_MASK 0x00040000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_3_BUSY_MASK_MASK 0x00080000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_4_BUSY_MASK_MASK 0x00100000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_5_BUSY_MASK_MASK 0x00200000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_6_BUSY_MASK_MASK 0x00400000L
+#define RLC_RLCS_GFX_DS_CNTL__GFX_CLK_DS_SDMA_7_BUSY_MASK_MASK 0x00800000L
+//RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL__SHIFT 0x0
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0__SHIFT 0x1
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1__SHIFT 0x2
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2__SHIFT 0x3
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_MASK 0x00000001L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE0_MASK 0x00000002L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE1_MASK 0x00000004L
+#define RLC_RLCS_GFX_DS_ALLOW_MASK_CNTL__GFX_CLK_DS_ALLOW_MASK_GDFLL_SE2_MASK 0x00000008L
+//RLC_GPM_STAT
+#define RLC_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_GPM_STAT__CMP_power_status__SHIFT 0x12
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_GPM_STAT__CMP_power_status_MASK 0x00040000L
+#define RLC_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_GPM_STAT
+#define RLC_RLCS_GPM_STAT__RLC_BUSY__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED__SHIFT 0x6
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED__SHIFT 0x7
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED__SHIFT 0x8
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS__SHIFT 0x9
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS__SHIFT 0xa
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xb
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE__SHIFT 0xc
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP__SHIFT 0xd
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN__SHIFT 0xe
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP__SHIFT 0xf
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN__SHIFT 0x10
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS__SHIFT 0x12
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D__SHIFT 0x13
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D__SHIFT 0x14
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS__SHIFT 0x15
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE__SHIFT 0x16
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS__SHIFT 0x17
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
+#define RLC_RLCS_GPM_STAT__RLC_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT__GFX_POWER_STATUS_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT__GFX_PIPELINE_POWER_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT__CNTX_IDLE_BEING_PROCESSED_MASK 0x00000020L
+#define RLC_RLCS_GPM_STAT__CNTX_BUSY_BEING_PROCESSED_MASK 0x00000040L
+#define RLC_RLCS_GPM_STAT__GFX_IDLE_BEING_PROCESSED_MASK 0x00000080L
+#define RLC_RLCS_GPM_STAT__CMP_BUSY_BEING_PROCESSED_MASK 0x00000100L
+#define RLC_RLCS_GPM_STAT__SAVING_REGISTERS_MASK 0x00000200L
+#define RLC_RLCS_GPM_STAT__RESTORING_REGISTERS_MASK 0x00000400L
+#define RLC_RLCS_GPM_STAT__GFX3D_BLOCKS_CHANGING_POWER_STATE_MASK 0x00000800L
+#define RLC_RLCS_GPM_STAT__CMP_BLOCKS_CHANGING_POWER_STATE_MASK 0x00001000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_UP_MASK 0x00002000L
+#define RLC_RLCS_GPM_STAT__STATIC_WGP_POWERING_DOWN_MASK 0x00004000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_UP_MASK 0x00008000L
+#define RLC_RLCS_GPM_STAT__DYN_WGP_POWERING_DOWN_MASK 0x00010000L
+#define RLC_RLCS_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x00020000L
+#define RLC_RLCS_GPM_STAT__CMP_POWER_STATUS_MASK 0x00040000L
+#define RLC_RLCS_GPM_STAT__GFX_LS_STATUS_3D_MASK 0x00080000L
+#define RLC_RLCS_GPM_STAT__GFX_CLOCK_STATUS_3D_MASK 0x00100000L
+#define RLC_RLCS_GPM_STAT__MGCG_OVERRIDE_STATUS_MASK 0x00200000L
+#define RLC_RLCS_GPM_STAT__RLC_EXEC_ROM_CODE_MASK 0x00400000L
+#define RLC_RLCS_GPM_STAT__FGCG_OVERRIDE_STATUS_MASK 0x00800000L
+#define RLC_RLCS_GPM_STAT__PG_ERROR_STATUS_MASK 0xFF000000L
+//RLC_RLCS_ABORTED_PD_SEQUENCE
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS__SHIFT 0x0
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED__SHIFT 0x10
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__APS_MASK 0x0000FFFFL
+#define RLC_RLCS_ABORTED_PD_SEQUENCE__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_DIDT_FORCE_STALL
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS__SHIFT 0x0
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID__SHIFT 0x3
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED__SHIFT 0x4
+#define RLC_RLCS_DIDT_FORCE_STALL__DFS_MASK 0x00000007L
+#define RLC_RLCS_DIDT_FORCE_STALL__VALID_MASK 0x00000008L
+#define RLC_RLCS_DIDT_FORCE_STALL__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IOV_CMD_STATUS
+#define RLC_RLCS_IOV_CMD_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CMD_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_CNTX_LOC_SIZE
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__DATA_MASK 0x000000FFL
+#define RLC_RLCS_IOV_CNTX_LOC_SIZE__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_IOV_SCH_BLOCK
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_SCH_BLOCK__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IOV_VM_BUSY_STATUS
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA__SHIFT 0x0
+#define RLC_RLCS_IOV_VM_BUSY_STATUS__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_STAT_2
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR__SHIFT 0x0
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED__SHIFT 0x1
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS__SHIFT 0x2
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS__SHIFT 0x3
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS__SHIFT 0x4
+#define RLC_RLCS_GPM_STAT_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_GPM_STAT_2__TC_TRANS_ERROR_MASK 0x00000001L
+#define RLC_RLCS_GPM_STAT_2__RLC_PWR_NON3D_STALLED_MASK 0x00000002L
+#define RLC_RLCS_GPM_STAT_2__GFX_PWR_STALLED_STATUS_MASK 0x00000004L
+#define RLC_RLCS_GPM_STAT_2__GFX_ULV_STATUS_MASK 0x00000008L
+#define RLC_RLCS_GPM_STAT_2__GFX_GENERAL_STATUS_MASK 0x00000010L
+#define RLC_RLCS_GPM_STAT_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_GRBM_SOFT_RESET
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET__SHIFT 0x0
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GRBM_SOFT_RESET__RESET_MASK 0x00000001L
+#define RLC_RLCS_GRBM_SOFT_RESET__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_PG_CHANGE_STATUS
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_CNTL_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_STATUS__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_STATUS__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+#define RLC_RLCS_PG_CHANGE_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_PG_CHANGE_READ
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED__SHIFT 0x0
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED__SHIFT 0x1
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED__SHIFT 0x3
+#define RLC_RLCS_PG_CHANGE_READ__RESERVED_MASK 0x00000001L
+#define RLC_RLCS_PG_CHANGE_READ__PG_REG_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_PG_CHANGE_READ__DYN_PG_REQ_CHANGED_MASK 0x00000008L
+//RLC_RLCS_IH_SEMAPHORE
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_IH_COOKIE_SEMAPHORE
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID__SHIFT 0x0
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED__SHIFT 0x5
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__CLIENT_ID_MASK 0x0000001FL
+#define RLC_RLCS_IH_COOKIE_SEMAPHORE__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_WGP_STATUS
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE__SHIFT 0x3
+#define RLC_RLCS_WGP_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_WGP_STATUS__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_STATUS__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_STATUS__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_STATUS__STATIC_PERWGP_PD_INCOMPLETE_MASK 0x00000008L
+#define RLC_RLCS_WGP_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_WGP_READ
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED__SHIFT 0x1
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED__SHIFT 0x2
+#define RLC_RLCS_WGP_READ__RESERVED__SHIFT 0x3
+#define RLC_RLCS_WGP_READ__CS_WORK_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_WGP_READ__STATIC_WGP_STATUS_CHANGED_MASK 0x00000002L
+#define RLC_RLCS_WGP_READ__DYMANIC_WGP_STATUS_CHANGED_MASK 0x00000004L
+#define RLC_RLCS_WGP_READ__RESERVED_MASK 0xFFFFFFF8L
+//RLC_RLCS_CP_INT_CTRL_1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_CP_INT_CTRL_2
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN__SHIFT 0x1
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE__SHIFT 0x2
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE__SHIFT 0x3
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING__SHIFT 0x4
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED__SHIFT 0x5
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_EN_MASK 0x00000002L
+#define RLC_RLCS_CP_INT_CTRL_2__IDLE_AUTO_ACK_ACTIVE_MASK 0x00000004L
+#define RLC_RLCS_CP_INT_CTRL_2__BUSY_AUTO_ACK_ACTIVE_MASK 0x00000008L
+#define RLC_RLCS_CP_INT_CTRL_2__INTERRUPT_PENDING_MASK 0x00000010L
+#define RLC_RLCS_CP_INT_CTRL_2__RESERVED_MASK 0xFFFFFFE0L
+//RLC_RLCS_CP_INT_INFO_1
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_CP_INT_INFO_2
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_CP_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_CP_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_SPM_INT_CTRL
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_SPM_INT_CTRL__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SPM_INT_CTRL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_SPM_INT_INFO_1
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_SPM_INT_INFO_2
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED__SHIFT 0x19
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_RLCS_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x01FF0000L
+#define RLC_RLCS_SPM_INT_INFO_2__RESERVED_MASK 0xFE000000L
+//RLC_RLCS_DSM_TRIG
+#define RLC_RLCS_DSM_TRIG__START__SHIFT 0x0
+#define RLC_RLCS_DSM_TRIG__RESERVED__SHIFT 0x1
+#define RLC_RLCS_DSM_TRIG__START_MASK 0x00000001L
+#define RLC_RLCS_DSM_TRIG__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_BOOTLOAD_STATUS
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_INIT_DONE_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_STATUS__GFX_SECURITY_POLICY_DONE_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_STATUS__RLC_GPM_IRAM_DONE_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_STATUS__RESERVED_MASK 0x7FFFFFE0L
+#define RLC_RLCS_BOOTLOAD_STATUS__BOOTLOAD_COMPLETE_MASK 0x80000000L
+//RLC_RLCS_POWER_BRAKE_CNTL
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_POWER_BRAKE_CNTL_TH1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE__SHIFT 0x0
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS__SHIFT 0x2
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT__SHIFT 0xa
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__POWER_BRAKE_MASK 0x00000001L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__MAX_HYSTERESIS_MASK 0x000003FCL
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__HYSTERESIS_CNT_MASK 0x0003FC00L
+#define RLC_RLCS_POWER_BRAKE_CNTL_TH1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_GRBM_IDLE_BUSY_STAT
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY__SHIFT 0x10
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY__SHIFT 0x11
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY__SHIFT 0x12
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY__SHIFT 0x13
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY__SHIFT 0x14
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY__SHIFT 0x15
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY__SHIFT 0x16
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY__SHIFT 0x17
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED__SHIFT 0x18
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED__SHIFT 0x19
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED__SHIFT 0x1a
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED__SHIFT 0x1b
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED__SHIFT 0x1c
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED__SHIFT 0x1d
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED__SHIFT 0x1e
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED__SHIFT 0x1f
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__GRBM_RLC_GC_STAT_IDLE_MASK 0x00000003L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_MASK 0x00010000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_MASK 0x00020000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_MASK 0x00040000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_MASK 0x00080000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_MASK 0x00100000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_MASK 0x00200000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_MASK 0x00400000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_MASK 0x00800000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_0_BUSY_CHANGED_MASK 0x01000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_1_BUSY_CHANGED_MASK 0x02000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_2_BUSY_CHANGED_MASK 0x04000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_3_BUSY_CHANGED_MASK 0x08000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_4_BUSY_CHANGED_MASK 0x10000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_5_BUSY_CHANGED_MASK 0x20000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_6_BUSY_CHANGED_MASK 0x40000000L
+#define RLC_RLCS_GRBM_IDLE_BUSY_STAT__SDMA_7_BUSY_CHANGED_MASK 0x80000000L
+//RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR__SHIFT 0x1
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR__SHIFT 0x2
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR__SHIFT 0x3
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR__SHIFT 0x4
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR__SHIFT 0x5
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR__SHIFT 0x6
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR__SHIFT 0x7
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA0_BUSY_INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA1_BUSY_INT_CLEAR_MASK 0x00000002L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA2_BUSY_INT_CLEAR_MASK 0x00000004L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA3_BUSY_INT_CLEAR_MASK 0x00000008L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA4_BUSY_INT_CLEAR_MASK 0x00000010L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA5_BUSY_INT_CLEAR_MASK 0x00000020L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA6_BUSY_INT_CLEAR_MASK 0x00000040L
+#define RLC_RLCS_GRBM_IDLE_BUSY_INT_CNTL__SDMA7_BUSY_INT_CLEAR_MASK 0x00000080L
+//RLC_RLCS_CMP_IDLE_CNTL
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST__SHIFT 0x1
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE__SHIFT 0x2
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS__SHIFT 0x3
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT__SHIFT 0xb
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED__SHIFT 0x13
+#define RLC_RLCS_CMP_IDLE_CNTL__INT_CLEAR_MASK 0x00000001L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_HYST_MASK 0x00000002L
+#define RLC_RLCS_CMP_IDLE_CNTL__CMP_IDLE_MASK 0x00000004L
+#define RLC_RLCS_CMP_IDLE_CNTL__MAX_HYSTERESIS_MASK 0x000007F8L
+#define RLC_RLCS_CMP_IDLE_CNTL__HYSTERESIS_CNT_MASK 0x0007F800L
+#define RLC_RLCS_CMP_IDLE_CNTL__RESERVED_MASK 0xFFF80000L
+//RLC_RLCS_GENERAL_0
+#define RLC_RLCS_GENERAL_0__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_1
+#define RLC_RLCS_GENERAL_1__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_2
+#define RLC_RLCS_GENERAL_2__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_3
+#define RLC_RLCS_GENERAL_3__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_4
+#define RLC_RLCS_GENERAL_4__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_5
+#define RLC_RLCS_GENERAL_5__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_5__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_6
+#define RLC_RLCS_GENERAL_6__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_6__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_7
+#define RLC_RLCS_GENERAL_7__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_7__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_8
+#define RLC_RLCS_GENERAL_8__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_8__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_9
+#define RLC_RLCS_GENERAL_9__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_9__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_10
+#define RLC_RLCS_GENERAL_10__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_10__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_11
+#define RLC_RLCS_GENERAL_11__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_11__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_12
+#define RLC_RLCS_GENERAL_12__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_12__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_13
+#define RLC_RLCS_GENERAL_13__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_13__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_14
+#define RLC_RLCS_GENERAL_14__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_14__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_15
+#define RLC_RLCS_GENERAL_15__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_15__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GENERAL_16
+#define RLC_RLCS_GENERAL_16__DATA__SHIFT 0x0
+#define RLC_RLCS_GENERAL_16__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_AUXILIARY_REG_1
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_1__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_1__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_2
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_2__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_2__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_3
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_3__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_3__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_AUXILIARY_REG_4
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR__SHIFT 0x0
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED__SHIFT 0x12
+#define RLC_RLCS_AUXILIARY_REG_4__ADDR_MASK 0x0003FFFFL
+#define RLC_RLCS_AUXILIARY_REG_4__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SPM_SQTT_MODE
+#define RLC_RLCS_SPM_SQTT_MODE__MODE__SHIFT 0x0
+#define RLC_RLCS_SPM_SQTT_MODE__MODE_MASK 0x00000001L
+//RLC_RLCS_CP_DMA_SRCID_OVER
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE__SHIFT 0x0
+#define RLC_RLCS_CP_DMA_SRCID_OVER__SRCID_OVERRIDE_MASK 0x00000001L
+//RLC_RLCS_BOOTLOAD_ID_STATUS1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_0_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_1_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_2_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_3_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_4_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_5_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_6_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_7_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_8_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_9_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_10_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_11_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_12_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_13_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_14_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_15_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_16_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_17_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_18_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_19_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_20_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_21_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_22_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_23_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_24_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_25_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_26_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_27_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_28_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_29_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_30_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS1__ID_31_LOADED_MASK 0x80000000L
+//RLC_RLCS_BOOTLOAD_ID_STATUS2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED__SHIFT 0x0
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED__SHIFT 0x1
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED__SHIFT 0x2
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED__SHIFT 0x3
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED__SHIFT 0x4
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED__SHIFT 0x5
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED__SHIFT 0x6
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED__SHIFT 0x7
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED__SHIFT 0x8
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED__SHIFT 0x9
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED__SHIFT 0xa
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED__SHIFT 0xb
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED__SHIFT 0xc
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED__SHIFT 0xd
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED__SHIFT 0xe
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED__SHIFT 0xf
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED__SHIFT 0x10
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED__SHIFT 0x11
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED__SHIFT 0x12
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED__SHIFT 0x13
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED__SHIFT 0x14
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED__SHIFT 0x15
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED__SHIFT 0x16
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED__SHIFT 0x17
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED__SHIFT 0x18
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED__SHIFT 0x19
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED__SHIFT 0x1a
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED__SHIFT 0x1b
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED__SHIFT 0x1c
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED__SHIFT 0x1d
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED__SHIFT 0x1e
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED__SHIFT 0x1f
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_32_LOADED_MASK 0x00000001L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_33_LOADED_MASK 0x00000002L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_34_LOADED_MASK 0x00000004L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_35_LOADED_MASK 0x00000008L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_36_LOADED_MASK 0x00000010L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_37_LOADED_MASK 0x00000020L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_38_LOADED_MASK 0x00000040L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_39_LOADED_MASK 0x00000080L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_40_LOADED_MASK 0x00000100L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_41_LOADED_MASK 0x00000200L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_42_LOADED_MASK 0x00000400L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_43_LOADED_MASK 0x00000800L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_44_LOADED_MASK 0x00001000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_45_LOADED_MASK 0x00002000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_46_LOADED_MASK 0x00004000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_47_LOADED_MASK 0x00008000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_48_LOADED_MASK 0x00010000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_49_LOADED_MASK 0x00020000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_50_LOADED_MASK 0x00040000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_51_LOADED_MASK 0x00080000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_52_LOADED_MASK 0x00100000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_53_LOADED_MASK 0x00200000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_54_LOADED_MASK 0x00400000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_55_LOADED_MASK 0x00800000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_56_LOADED_MASK 0x01000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_57_LOADED_MASK 0x02000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_58_LOADED_MASK 0x04000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_59_LOADED_MASK 0x08000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_60_LOADED_MASK 0x10000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_61_LOADED_MASK 0x20000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_62_LOADED_MASK 0x40000000L
+#define RLC_RLCS_BOOTLOAD_ID_STATUS2__ID_63_LOADED_MASK 0x80000000L
+//RLC_RLCS_IMU_VIDCHG_CNTL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA__SHIFT 0x1
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN__SHIFT 0xa
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK__SHIFT 0xb
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED__SHIFT 0xc
+#define RLC_RLCS_IMU_VIDCHG_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__DATA_MASK 0x000003FEL
+#define RLC_RLCS_IMU_VIDCHG_CNTL__PSIEN_MASK 0x00000400L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__ACK_MASK 0x00000800L
+#define RLC_RLCS_IMU_VIDCHG_CNTL__RESERVED_MASK 0xFFFFF000L
+//RLC_RLCS_EDC_INT_CNTL
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR__SHIFT 0x0
+#define RLC_RLCS_EDC_INT_CNTL__EDC_EVENT_INT_CLEAR_MASK 0x00000001L
+//RLC_RLCS_KMD_LOG_CNTL1
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_KMD_LOG_CNTL2
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA__SHIFT 0x0
+#define RLC_RLCS_KMD_LOG_CNTL2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GPM_LEGACY_INT_STAT
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_STAT__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_GPM_LEGACY_INT_DISABLE
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED__SHIFT 0x0
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED__SHIFT 0x1
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GC_CAC_EDC_EVENT_CHANGED_MASK 0x00000001L
+#define RLC_RLCS_GPM_LEGACY_INT_DISABLE__GFX_POWER_BRAKE_CHANGED_MASK 0x00000002L
+//RLC_RLCS_SRM_SRCID_CNTL
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID__SHIFT 0x0
+#define RLC_RLCS_SRM_SRCID_CNTL__SRCID_MASK 0x00000007L
+//RLC_RLCS_GCR_DATA_0
+#define RLC_RLCS_GCR_DATA_0__PHASE_0__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_0__PHASE_1__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_0__PHASE_0_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_0__PHASE_1_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_1
+#define RLC_RLCS_GCR_DATA_1__PHASE_2__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_1__PHASE_3__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_1__PHASE_2_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_1__PHASE_3_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_2
+#define RLC_RLCS_GCR_DATA_2__PHASE_4__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_2__PHASE_5__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_2__PHASE_4_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_2__PHASE_5_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_DATA_3
+#define RLC_RLCS_GCR_DATA_3__PHASE_6__SHIFT 0x0
+#define RLC_RLCS_GCR_DATA_3__PHASE_7__SHIFT 0x10
+#define RLC_RLCS_GCR_DATA_3__PHASE_6_MASK 0x0000FFFFL
+#define RLC_RLCS_GCR_DATA_3__PHASE_7_MASK 0xFFFF0000L
+//RLC_RLCS_GCR_STATUS
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY__SHIFT 0x0
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT__SHIFT 0x1
+#define RLC_RLCS_GCR_STATUS__RESERVED_2__SHIFT 0x5
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG__SHIFT 0x8
+#define RLC_RLCS_GCR_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_GCR_STATUS__GCR_BUSY_MASK 0x00000001L
+#define RLC_RLCS_GCR_STATUS__GCR_OUT_COUNT_MASK 0x0000001EL
+#define RLC_RLCS_GCR_STATUS__RESERVED_2_MASK 0x000000E0L
+#define RLC_RLCS_GCR_STATUS__GCRIU_CLI_RSP_TAG_MASK 0x0000FF00L
+#define RLC_RLCS_GCR_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_PERFMON_CLK_CNTL_UCODE
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE__SHIFT 0x0
+#define RLC_RLCS_PERFMON_CLK_CNTL_UCODE__PERFMON_CLOCK_STATE_MASK 0x00000001L
+//RLC_RLCS_UTCL2_CNTL
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE__SHIFT 0x0
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE__SHIFT 0x1
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE__SHIFT 0x2
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE__SHIFT 0x3
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE__SHIFT 0x5
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION__SHIFT 0x6
+#define RLC_RLCS_UTCL2_CNTL__RESERVED__SHIFT 0x7
+#define RLC_RLCS_UTCL2_CNTL__MTYPE_NO_PTE_MODE_MASK 0x00000001L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_MASK 0x00000002L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_MASK 0x00000004L
+#define RLC_RLCS_UTCL2_CNTL__GPA_OVERRIDE_VALUE_MASK 0x00000018L
+#define RLC_RLCS_UTCL2_CNTL__VF_OVERRIDE_VALUE_MASK 0x00000020L
+#define RLC_RLCS_UTCL2_CNTL__IGNORE_PTE_PERMISSION_MASK 0x00000040L
+#define RLC_RLCS_UTCL2_CNTL__RESERVED_MASK 0xFFFFFF80L
+//RLC_RLCS_IMU_RLC_MSG_DATA0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA1
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA2
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA2__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA3
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA3__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_DATA4
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_DATA4__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CONTROL
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RLC_MSG_CNTL
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__DONETOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__CHGTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_RLC_IMU_MSG_DATA0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_DATA0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CONTROL
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CONTROL__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_RLC_IMU_MSG_CNTL
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__CHGTOG_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__DONETOG_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_MSG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__CURRENT_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_0__VOLTAGE_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__TEMPERATURE1_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RLC_TELEMETRY_DATA_1__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RLC_MUTEX_CNTL
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__REQ_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__ACQUIRE_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_MUTEX_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_RLC_STATUS
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2__SHIFT 0x2
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define RLC_RLCS_IMU_RLC_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_14_2_MASK 0x00007FFCL
+#define RLC_RLCS_IMU_RLC_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+#define RLC_RLCS_IMU_RLC_STATUS__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_RLC_IMU_STATUS
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE__SHIFT 0x0
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE__SHIFT 0x1
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2__SHIFT 0x2
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED__SHIFT 0x4
+#define RLC_RLCS_RLC_IMU_STATUS__PWR_DOWN_ACTIVE_MASK 0x00000001L
+#define RLC_RLCS_RLC_IMU_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_3_2_MASK 0x0000000CL
+#define RLC_RLCS_RLC_IMU_STATUS__RESERVED_MASK 0xFFFFFFF0L
+//RLC_RLCS_IMU_RAM_DATA_1
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_1__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_1_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_1_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_DATA_0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_LSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_LSB__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_IMU_RAM_ADDR_0_MSB
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED__SHIFT 0x10
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__DATA_MASK 0x0000FFFFL
+#define RLC_RLCS_IMU_RAM_ADDR_0_MSB__RESERVED_MASK 0xFFFF0000L
+//RLC_RLCS_IMU_RAM_CNTL
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG__SHIFT 0x0
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG__SHIFT 0x1
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_RAM_CNTL__REQTOG_MASK 0x00000001L
+#define RLC_RLCS_IMU_RAM_CNTL__ACKTOG_MASK 0x00000002L
+#define RLC_RLCS_IMU_RAM_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_IMU_GFX_DOORBELL_FENCE
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE__SHIFT 0x0
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK__SHIFT 0x1
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED__SHIFT 0x2
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ENABLE_MASK 0x00000001L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__ACK_MASK 0x00000002L
+#define RLC_RLCS_IMU_GFX_DOORBELL_FENCE__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_1
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_1__INTERRUPT_ACK_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESP_ID_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_1__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_CNTL_2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE__SHIFT 0x1
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED__SHIFT 0x2
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_EN_MASK 0x00000001L
+#define RLC_RLCS_SDMA_INT_CNTL_2__AUTO_ACK_ACTIVE_MASK 0x00000002L
+#define RLC_RLCS_SDMA_INT_CNTL_2__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_SDMA_INT_STAT
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED__SHIFT 0x12
+#define RLC_RLCS_SDMA_INT_STAT__REQ_IDLE_HIST_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_STAT__REQ_BUSY_HIST_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_STAT__LAST_SDMA_RLC_INT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_STAT__SDMA_RLC_INT_PENDING_MASK 0x00020000L
+#define RLC_RLCS_SDMA_INT_STAT__RESERVED_MASK 0xFFFC0000L
+//RLC_RLCS_SDMA_INT_INFO
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW__SHIFT 0x0
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW__SHIFT 0x8
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID__SHIFT 0x10
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED__SHIFT 0x11
+#define RLC_RLCS_SDMA_INT_INFO__REQ_IDLE_TO_FW_MASK 0x000000FFL
+#define RLC_RLCS_SDMA_INT_INFO__REQ_BUSY_TO_FW_MASK 0x0000FF00L
+#define RLC_RLCS_SDMA_INT_INFO__INTERRUPT_ID_MASK 0x00010000L
+#define RLC_RLCS_SDMA_INT_INFO__RESERVED_MASK 0xFFFE0000L
+//RLC_RLCS_PMM_CGCG_CNTL
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID__SHIFT 0x0
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN__SHIFT 0x1
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED__SHIFT 0x2
+#define RLC_RLCS_PMM_CGCG_CNTL__VALID_MASK 0x00000001L
+#define RLC_RLCS_PMM_CGCG_CNTL__CLEAN_MASK 0x00000002L
+#define RLC_RLCS_PMM_CGCG_CNTL__RESERVED_MASK 0xFFFFFFFCL
+//RLC_RLCS_GFX_MEM_POWER_CTRL_LO
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA__SHIFT 0x0
+#define RLC_RLCS_GFX_MEM_POWER_CTRL_LO__DATA_MASK 0xFFFFFFFFL
+//RLC_RLCS_GFX_RM_CNTL
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED__SHIFT 0x1
+#define RLC_RLCS_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
+#define RLC_RLCS_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCS_IH_CTRL_1
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_1__IH_CONTEXT_ID_1_MASK 0xFFFFFFFFL
+//RLC_RLCS_IH_CTRL_2
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID__SHIFT 0x10
+#define RLC_RLCS_IH_CTRL_2__RESERVED__SHIFT 0x14
+#define RLC_RLCS_IH_CTRL_2__IH_CONTEXT_ID_2_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_2__IH_RING_ID_MASK 0x0000FF00L
+#define RLC_RLCS_IH_CTRL_2__IH_VM_ID_MASK 0x000F0000L
+#define RLC_RLCS_IH_CTRL_2__RESERVED_MASK 0xFFF00000L
+//RLC_RLCS_IH_CTRL_3
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID__SHIFT 0x0
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID__SHIFT 0x8
+#define RLC_RLCS_IH_CTRL_3__IH_VF__SHIFT 0xd
+#define RLC_RLCS_IH_CTRL_3__RESERVED__SHIFT 0xe
+#define RLC_RLCS_IH_CTRL_3__IH_SOURCE_ID_MASK 0x000000FFL
+#define RLC_RLCS_IH_CTRL_3__IH_VF_ID_MASK 0x00001F00L
+#define RLC_RLCS_IH_CTRL_3__IH_VF_MASK 0x00002000L
+#define RLC_RLCS_IH_CTRL_3__RESERVED_MASK 0xFFFFC000L
+//RLC_RLCS_IH_STATUS
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT__SHIFT 0x0
+#define RLC_RLCS_IH_STATUS__IH_BUSY__SHIFT 0x6
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE__SHIFT 0x7
+#define RLC_RLCS_IH_STATUS__RESERVED__SHIFT 0x8
+#define RLC_RLCS_IH_STATUS__IH_CREDIT_COUNT_MASK 0x0000003FL
+#define RLC_RLCS_IH_STATUS__IH_BUSY_MASK 0x00000040L
+#define RLC_RLCS_IH_STATUS__IH_WRITE_DONE_MASK 0x00000080L
+#define RLC_RLCS_IH_STATUS__RESERVED_MASK 0xFFFFFF00L
+//RLC_RLCS_DEC_END
+
+
+// addressBlock: gc_pfvfdec_rlc
+//RLC_SAFE_MODE
+#define RLC_SAFE_MODE__CMD__SHIFT 0x0
+#define RLC_SAFE_MODE__MESSAGE__SHIFT 0x1
+#define RLC_SAFE_MODE__RESERVED1__SHIFT 0x5
+#define RLC_SAFE_MODE__RESPONSE__SHIFT 0x8
+#define RLC_SAFE_MODE__RESERVED__SHIFT 0xc
+#define RLC_SAFE_MODE__CMD_MASK 0x00000001L
+#define RLC_SAFE_MODE__MESSAGE_MASK 0x0000001EL
+#define RLC_SAFE_MODE__RESERVED1_MASK 0x000000E0L
+#define RLC_SAFE_MODE__RESPONSE_MASK 0x00000F00L
+#define RLC_SAFE_MODE__RESERVED_MASK 0xFFFFF000L
+//RLC_SPM_SAMPLE_CNT
+#define RLC_SPM_SAMPLE_CNT__COUNT__SHIFT 0x0
+#define RLC_SPM_SAMPLE_CNT__COUNT_MASK 0xFFFFFFFFL
+//RLC_SPM_MC_CNTL
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT 0x0
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY__SHIFT 0x4
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR__SHIFT 0x6
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED__SHIFT 0x7
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER__SHIFT 0x8
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE__SHIFT 0x9
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC__SHIFT 0xc
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO__SHIFT 0xd
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL__SHIFT 0xe
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL__SHIFT 0xf
+#define RLC_SPM_MC_CNTL__RESERVED_3__SHIFT 0x10
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC__SHIFT 0x12
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER__SHIFT 0x13
+#define RLC_SPM_MC_CNTL__RESERVED__SHIFT 0x14
+#define RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK 0x0000000FL
+#define RLC_SPM_MC_CNTL__RLC_SPM_POLICY_MASK 0x00000030L
+#define RLC_SPM_MC_CNTL__RLC_SPM_PERF_CNTR_MASK 0x00000040L
+#define RLC_SPM_MC_CNTL__RLC_SPM_FED_MASK 0x00000080L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_OVER_MASK 0x00000100L
+#define RLC_SPM_MC_CNTL__RLC_SPM_MTYPE_MASK 0x00000E00L
+#define RLC_SPM_MC_CNTL__RLC_SPM_BC_MASK 0x00001000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_RO_MASK 0x00002000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_VOL_MASK 0x00004000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_NOFILL_MASK 0x00008000L
+#define RLC_SPM_MC_CNTL__RESERVED_3_MASK 0x00030000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_MASK 0x00040000L
+#define RLC_SPM_MC_CNTL__RLC_SPM_LLC_NOALLOC_OVER_MASK 0x00080000L
+#define RLC_SPM_MC_CNTL__RESERVED_MASK 0xFFF00000L
+//RLC_SPM_INT_CNTL
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL__SHIFT 0x0
+#define RLC_SPM_INT_CNTL__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_CNTL__RLC_SPM_INT_CNTL_MASK 0x00000001L
+#define RLC_SPM_INT_CNTL__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_STATUS
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS__SHIFT 0x0
+#define RLC_SPM_INT_STATUS__RESERVED__SHIFT 0x1
+#define RLC_SPM_INT_STATUS__RLC_SPM_INT_STATUS_MASK 0x00000001L
+#define RLC_SPM_INT_STATUS__RESERVED_MASK 0xFFFFFFFEL
+//RLC_SPM_INT_INFO_1
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1__SHIFT 0x0
+#define RLC_SPM_INT_INFO_1__INTERRUPT_INFO_1_MASK 0xFFFFFFFFL
+//RLC_SPM_INT_INFO_2
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2__SHIFT 0x0
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID__SHIFT 0x10
+#define RLC_SPM_INT_INFO_2__RESERVED__SHIFT 0x18
+#define RLC_SPM_INT_INFO_2__INTERRUPT_INFO_2_MASK 0x0000FFFFL
+#define RLC_SPM_INT_INFO_2__INTERRUPT_ID_MASK 0x00FF0000L
+#define RLC_SPM_INT_INFO_2__RESERVED_MASK 0xFF000000L
+//RLC_CSIB_ADDR_LO
+#define RLC_CSIB_ADDR_LO__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_LO__ADDRESS_MASK 0xFFFFFFFFL
+//RLC_CSIB_ADDR_HI
+#define RLC_CSIB_ADDR_HI__ADDRESS__SHIFT 0x0
+#define RLC_CSIB_ADDR_HI__ADDRESS_MASK 0x0000FFFFL
+//RLC_CSIB_LENGTH
+#define RLC_CSIB_LENGTH__LENGTH__SHIFT 0x0
+#define RLC_CSIB_LENGTH__LENGTH_MASK 0xFFFFFFFFL
+//RLC_CP_SCHEDULERS
+#define RLC_CP_SCHEDULERS__scheduler0__SHIFT 0x0
+#define RLC_CP_SCHEDULERS__scheduler1__SHIFT 0x8
+#define RLC_CP_SCHEDULERS__scheduler0_MASK 0x000000FFL
+#define RLC_CP_SCHEDULERS__scheduler1_MASK 0x0000FF00L
+//RLC_CP_EOF_INT
+#define RLC_CP_EOF_INT__INTERRUPT__SHIFT 0x0
+#define RLC_CP_EOF_INT__RESERVED__SHIFT 0x1
+#define RLC_CP_EOF_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_CP_EOF_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_CP_EOF_INT_CNT
+#define RLC_CP_EOF_INT_CNT__CNT__SHIFT 0x0
+#define RLC_CP_EOF_INT_CNT__CNT_MASK 0xFFFFFFFFL
+//RLC_SPARE_INT_0
+#define RLC_SPARE_INT_0__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_0__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_0__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_0__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_0__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_0__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_1
+#define RLC_SPARE_INT_1__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_1__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_1__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_1__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_1__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_1__COMPLETE_MASK 0x80000000L
+//RLC_SPARE_INT_2
+#define RLC_SPARE_INT_2__DATA__SHIFT 0x0
+#define RLC_SPARE_INT_2__PROCESSING__SHIFT 0x1e
+#define RLC_SPARE_INT_2__COMPLETE__SHIFT 0x1f
+#define RLC_SPARE_INT_2__DATA_MASK 0x3FFFFFFFL
+#define RLC_SPARE_INT_2__PROCESSING_MASK 0x40000000L
+#define RLC_SPARE_INT_2__COMPLETE_MASK 0x80000000L
+//RLC_PACE_SPARE_INT
+#define RLC_PACE_SPARE_INT__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT__RESERVED_MASK 0xFFFFFFFEL
+//RLC_PACE_SPARE_INT_1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_PACE_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_PACE_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_PACE_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+//RLC_RLCV_SPARE_INT_1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT__SHIFT 0x0
+#define RLC_RLCV_SPARE_INT_1__RESERVED__SHIFT 0x1
+#define RLC_RLCV_SPARE_INT_1__INTERRUPT_MASK 0x00000001L
+#define RLC_RLCV_SPARE_INT_1__RESERVED_MASK 0xFFFFFFFEL
+
+
+// addressBlock: gc_pwrdec
+//CGTS_TCC_DISABLE
+#define CGTS_TCC_DISABLE__WRITE_DIS__SHIFT 0x0
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
+#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
+#define CGTS_TCC_DISABLE__WRITE_DIS_MASK 0x00000001L
+#define CGTS_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
+#define CGTS_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTX_SPI_DEBUG_CLK_CTRL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST__SHIFT 0x0
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE__SHIFT 0x6
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE__SHIFT 0x7
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL__SHIFT 0x8
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE__SHIFT 0x9
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OFF_HYST_MASK 0x0000003FL
+#define CGTX_SPI_DEBUG_CLK_CTRL__GRP5_CG_OVERRIDE_MASK 0x00000040L
+#define CGTX_SPI_DEBUG_CLK_CTRL__ALL_CLK_ON_OVERRIDE_MASK 0x00000080L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_SH_CLK_CONTROL_MASK 0x00000100L
+#define CGTX_SPI_DEBUG_CLK_CTRL__SPI_REPEATER_FGCG_OVERRIDE_MASK 0x00000200L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE__SHIFT 0x18
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE__SHIFT 0x19
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__PI1_OVERRIDE_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__PI0_OVERRIDE_MASK 0x01000000L
+#define CGTT_VGT_CLK_CTRL__HS_OVERRIDE_MASK 0x02000000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE__SHIFT 0x1a
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE__SHIFT 0x1c
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE__SHIFT 0x1d
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_IA_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__DIST_OVERRIDE_MASK 0x04000000L
+#define CGTT_IA_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_IA_CLK_CTRL__PCM_OVERRIDE_MASK 0x10000000L
+#define CGTT_IA_CLK_CTRL__TESS_DIST_OVERRIDE_MASK 0x20000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE__SHIFT 0x19
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE__SHIFT 0x1a
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE__SHIFT 0x1c
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__FE_OUT_OVERRIDE_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__ASSEMBLER_OVERRIDE_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC0_OVERRIDE_MASK 0x02000000L
+#define CGTT_WD_CLK_CTRL__DMA_PROC1_OVERRIDE_MASK 0x04000000L
+#define CGTT_WD_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_WD_CLK_CTRL__DMA_OVERRIDE_MASK 0x10000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE__SHIFT 0x1b
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_OVERRIDE_MASK 0x08000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE__SHIFT 0xc
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE__SHIFT 0xd
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE__SHIFT 0xe
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE__SHIFT 0xf
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__CLIP_SU_PRIM_FIFO_CLK_OVERRIDE_MASK 0x00001000L
+#define CGTT_PA_CLK_CTRL__SXIFCCG_CLK_OVERRIDE_MASK 0x00002000L
+#define CGTT_PA_CLK_CTRL__AG_CLK_OVERRIDE_MASK 0x00004000L
+#define CGTT_PA_CLK_CTRL__VE_VTE_REC_CLK_OVERRIDE_MASK 0x00008000L
+#define CGTT_PA_CLK_CTRL__ENGG_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_CLK_OVERRIDE_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__AG_REG_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__CL_VTE_REG_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PA_CLK_CTRL__VTE_REG_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__NGG_INDEX_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__NGG_CSB_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__SU_CL_REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0xf
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x00008000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_VRS_INTF_CLK_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_COURSE_MGCG_BUSY_ENABLE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_STAGE_IN_TP_PFFB_WR_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUADMASK_Z_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_PROC_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_QUAD_ACCUM_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PFFB_RP_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_PKR_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_FREE_WAVE_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_SC_WAVE_2_SC_SPI_WAVE_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN__SHIFT 0x17
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG__SHIFT 0x18
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG__SHIFT 0x19
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG__SHIFT 0x1a
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG__SHIFT 0x1b
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__FORCE_GL1H_CLKEN_MASK 0x00800000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPALLOC_FGCG_MASK 0x01000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPGRANT_FGCG_MASK 0x02000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_EXPREQ_FGCG_MASK 0x04000000L
+#define CGTT_SQG_CLK_CTRL__FORCE_CMD_FGCG_MASK 0x08000000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//SQ_ALU_CLK_CTRL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_TEX_CLK_CTRL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_TEX_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//SQ_LDS_CLK_CTRL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
+#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//ICG_SP_CLK_CTRL
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE__SHIFT 0x0
+#define ICG_SP_CLK_CTRL__CLK_OVERRIDE_MASK 0xFFFFFFFFL
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x2
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x3
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x5
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x6
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x7
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8__SHIFT 0x8
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0x9
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x00000001L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x00000002L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x00000004L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x00000008L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x00000010L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x00000020L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x00000040L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x00000080L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE8_MASK 0x00000100L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0xFFFFFE00L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GFX_ICG_GL2A_CTRL
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE__SHIFT 0x13
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2A_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2A_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2A_CTRL__CROSSBAR_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2A_CTRL__RTN_ARB_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2A_CTRL__GCRD_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2A_CTRL__CLIENT0_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2A_CTRL__CLIENT1_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2A_CTRL__CLIENT2_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2A_CTRL__CLIENT3_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2A_CTRL__CLIENT4_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2A_CTRL__CLIENT5_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2A_CTRL__CLIENT6_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2A_CTRL__CLIENT7_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2A_CTRL__CLIENT8_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2A_CTRL__CLIENT9_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2A_CTRL__CLIENT10_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2A_CTRL__CLIENT11_OVERRIDE_MASK 0x00080000L
+#define GFX_ICG_GL2A_CTRL__CLIENT12_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2A_CTRL__CLIENT13_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2A_CTRL__CLIENT14_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2A_CTRL__CLIENT15_OVERRIDE_MASK 0x00800000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0xFFFFFFFFL
+//CGTT_SC_CLK_CTRL3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE__SHIFT 0xd
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_STALL_OVERRIDE_MASK 0x00002000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINROWWARP_CLK_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPBINWARP_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPFBWBINWARP_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPHSPANUNWARP_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL3__PBB_WARPSCISSORUNWARP_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACK_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWBACKREPEATER_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONT_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWFRONTREPEATER_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FBWSCALER_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL3__PBB_FRONT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL3__PBB_BATCHIN_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VRASTER_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL3__PBB_VGATHER_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL4
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x1
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE__SHIFT 0x2
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE__SHIFT 0x3
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE__SHIFT 0x5
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE__SHIFT 0x6
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE__SHIFT 0x7
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE__SHIFT 0x8
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE__SHIFT 0x9
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE__SHIFT 0xa
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE__SHIFT 0xb
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE__SHIFT 0xc
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000001L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000002L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_STALL_OVERRIDE_MASK 0x00000004L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_STALL_OVERRIDE_MASK 0x00000008L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_STALL_OVERRIDE_MASK 0x00000010L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_STALL_OVERRIDE_MASK 0x00000020L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_STALL_OVERRIDE_MASK 0x00000040L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_STALL_OVERRIDE_MASK 0x00000080L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_STALL_OVERRIDE_MASK 0x00000100L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_STALL_OVERRIDE_MASK 0x00000200L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_STALL_OVERRIDE_MASK 0x00000400L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_STALL_OVERRIDE_MASK 0x00000800L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_STALL_OVERRIDE_MASK 0x00001000L
+#define CGTT_SC_CLK_CTRL4__PBB_VCOARSE_CLK_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL4__PBB_VDETAIL_CLK_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL4__PBB_HRASTER_CLK_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCONFIG_CLK_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL4__PBB_HGATHER_CLK_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL4__PBB_HCOARSE_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HDETAIL_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL4__PBB_HREPEAT_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHOUT_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTPUT_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL4__PBB_OUTMUX_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL4__PBB_BATCHINFO_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL4__PBB_EVENTINFO_CLK_OVERRIDE_MASK 0x80000000L
+//GCEA_ICG_CTRL
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x0
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x2
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x3
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x4
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM__SHIFT 0x5
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x00000001L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000002L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000004L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000008L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000010L
+#define GCEA_ICG_CTRL__SOFT_OVERRIDE_MAM_MASK 0x00000020L
+//GL1I_GL1R_MGCG_OVERRIDE
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1IW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define GL1I_GL1R_MGCG_OVERRIDE__GL1A_GL1R_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//GL1H_ICG_CTRL
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE__SHIFT 0x0
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE__SHIFT 0x1
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE__SHIFT 0x2
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE__SHIFT 0x3
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE__SHIFT 0x4
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE__SHIFT 0x5
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE__SHIFT 0x6
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE__SHIFT 0x7
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE__SHIFT 0x8
+#define GL1H_ICG_CTRL__REG_DCLK_OVERRIDE_MASK 0x00000001L
+#define GL1H_ICG_CTRL__REQ_ARB_DCLK_OVERRIDE_MASK 0x00000002L
+#define GL1H_ICG_CTRL__PERFMON_DCLK_OVERRIDE_MASK 0x00000004L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI0_DCLK_OVERRIDE_MASK 0x00000008L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI1_DCLK_OVERRIDE_MASK 0x00000010L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI2_DCLK_OVERRIDE_MASK 0x00000020L
+#define GL1H_ICG_CTRL__REQ_ARB_CLI3_DCLK_OVERRIDE_MASK 0x00000040L
+#define GL1H_ICG_CTRL__SRC_DCLK_OVERRIDE_MASK 0x00000080L
+#define GL1H_ICG_CTRL__RET_DCLK_OVERRIDE_MASK 0x00000100L
+//CHI_CHR_MGCG_OVERRIDE
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE__SHIFT 0x0
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x1
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE__SHIFT 0x2
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE__SHIFT 0x3
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE__SHIFT 0x4
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE__SHIFT 0x5
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE__SHIFT 0x6
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_SCLK_OVERRIDE_MASK 0x00000001L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIR_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000002L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SCLK_OVERRIDE_MASK 0x00000004L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_RET_DCLK_OVERRIDE_MASK 0x00000008L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHIW_MGCG_SRC_DCLK_OVERRIDE_MASK 0x00000010L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_RET_MGCG_SCLK_OVERRIDE_MASK 0x00000020L
+#define CHI_CHR_MGCG_OVERRIDE__CHA_CHR_SRC_MGCG_SCLK_OVERRIDE_MASK 0x00000040L
+//ICG_GL1C_CLK_CTRL
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x7
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x8
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x9
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE__SHIFT 0xa
+#define ICG_GL1C_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1C_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1C_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1C_CLK_CTRL__VM_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1C_CLK_CTRL__TAG_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1C_CLK_CTRL__GCR_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_GL1C_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000040L
+#define ICG_GL1C_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000080L
+#define ICG_GL1C_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000100L
+#define ICG_GL1C_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000200L
+#define ICG_GL1C_CLK_CTRL__LATENCY_FIFO_CLK_OVERRIDE_MASK 0x00000400L
+//ICG_GL1A_CTRL
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_GL1A_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_GL1A_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_GL1A_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_GL1A_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_GL1A_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_GL1A_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//ICG_CHA_CTRL
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHA_CTRL__REG_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHA_CTRL__REQ_CLI_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHA_CTRL__REQ_ARB_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHA_CTRL__RET_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHA_CTRL__REQ_CREDIT_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHA_CTRL__PERFMON_CLK_OVERRIDE_MASK 0x00000020L
+//GUS_ICG_CTRL
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x0
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x2
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX__SHIFT 0x3
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE__SHIFT 0x4
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ__SHIFT 0x5
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x6
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x7
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC__SHIFT 0x8
+#define GUS_ICG_CTRL__SPARE1__SHIFT 0x9
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x00000001L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x00000002L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_READ_MASK 0x00000004L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_DEMUX_MASK 0x00000008L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_WRITE_MASK 0x00000010L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_RETURN_READ_MASK 0x00000020L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x00000040L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x00000080L
+#define GUS_ICG_CTRL__SOFT_OVERRIDE_STATIC_MASK 0x00000100L
+#define GUS_ICG_CTRL__SPARE1_MASK 0x0003FE00L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN__SHIFT 0x17
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__DEBUG_BUS_EN_MASK 0x00800000L
+#define CGTT_PH_CLK_CTRL0__DISABLE_DEBUG_BUS_FLOP_EN_ON_PERFMON_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+//GFX_ICG_GL2C_CTRL
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE__SHIFT 0x12
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE__SHIFT 0x14
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE__SHIFT 0x15
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE__SHIFT 0x16
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE__SHIFT 0x17
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE__SHIFT 0x1d
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE__SHIFT 0x1e
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE__SHIFT 0x1f
+#define GFX_ICG_GL2C_CTRL__REG_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL__PERFMON_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL__IB_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL__TAG_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL__CM_CORE_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL__CORE_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL__CACHE_RAM_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL__GCR_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL__RETURN_BUFFER_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL__LATENCY_FIFO_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL__OUTPUT_FIFOS_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL__MC_WRITE_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_DECOMP_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL__EXECUTE_WRITE_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP0_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP1_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP2_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL__TAG_FLOPSET_GROUP3_OVERRIDE_MASK 0x00040000L
+#define GFX_ICG_GL2C_CTRL__CM_RVF_OVERRIDE_MASK 0x00100000L
+#define GFX_ICG_GL2C_CTRL__CM_SDR_OVERRIDE_MASK 0x00200000L
+#define GFX_ICG_GL2C_CTRL__CM_RPF_OVERRIDE_MASK 0x00400000L
+#define GFX_ICG_GL2C_CTRL__CM_STS_OVERRIDE_MASK 0x00800000L
+#define GFX_ICG_GL2C_CTRL__CM_READ_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL__CM_MERGE_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL__CM_COMP_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL__CM_DCC_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL__CM_WRITE_OVERRIDE_MASK 0x10000000L
+#define GFX_ICG_GL2C_CTRL__CM_NOOP_OVERRIDE_MASK 0x20000000L
+#define GFX_ICG_GL2C_CTRL__MDC_TAG_OVERRIDE_MASK 0x40000000L
+#define GFX_ICG_GL2C_CTRL__MDC_DATA_OVERRIDE_MASK 0x80000000L
+//GFX_ICG_GL2C_CTRL1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE__SHIFT 0x0
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE__SHIFT 0x1
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE__SHIFT 0x2
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE__SHIFT 0x3
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE__SHIFT 0x4
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE__SHIFT 0x5
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE__SHIFT 0x6
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE__SHIFT 0x7
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE__SHIFT 0x8
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE__SHIFT 0x9
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE__SHIFT 0xa
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE__SHIFT 0xb
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE__SHIFT 0xc
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE__SHIFT 0xd
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE__SHIFT 0xe
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE__SHIFT 0xf
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE__SHIFT 0x10
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE__SHIFT 0x11
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE__SHIFT 0x18
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE__SHIFT 0x19
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE__SHIFT 0x1a
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE__SHIFT 0x1b
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE__SHIFT 0x1c
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT0_OVERRIDE_MASK 0x00000001L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT1_OVERRIDE_MASK 0x00000002L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT2_OVERRIDE_MASK 0x00000004L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT3_OVERRIDE_MASK 0x00000008L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT4_OVERRIDE_MASK 0x00000010L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT5_OVERRIDE_MASK 0x00000020L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT6_OVERRIDE_MASK 0x00000040L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT7_OVERRIDE_MASK 0x00000080L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT8_OVERRIDE_MASK 0x00000100L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT9_OVERRIDE_MASK 0x00000200L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT10_OVERRIDE_MASK 0x00000400L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT11_OVERRIDE_MASK 0x00000800L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT12_OVERRIDE_MASK 0x00001000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT13_OVERRIDE_MASK 0x00002000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT14_OVERRIDE_MASK 0x00004000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT15_OVERRIDE_MASK 0x00008000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT16_OVERRIDE_MASK 0x00010000L
+#define GFX_ICG_GL2C_CTRL1__OUTPUT_FIFOS_INTERNAL_CLIENT17_OVERRIDE_MASK 0x00020000L
+#define GFX_ICG_GL2C_CTRL1__TAG_PROBE_OVERRIDE_MASK 0x01000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_UPPER_OVERRIDE_MASK 0x02000000L
+#define GFX_ICG_GL2C_CTRL1__DCC_LOWER_OVERRIDE_MASK 0x04000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_UPPER_OVERRIDE_MASK 0x08000000L
+#define GFX_ICG_GL2C_CTRL1__ZD_LOWER_OVERRIDE_MASK 0x10000000L
+//ICG_LDS_CLK_CTRL
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE__SHIFT 0x0
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE__SHIFT 0x1
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE__SHIFT 0x2
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE__SHIFT 0x3
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE__SHIFT 0x4
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE__SHIFT 0x5
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE__SHIFT 0x6
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE__SHIFT 0x7
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE__SHIFT 0x8
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE__SHIFT 0x9
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE__SHIFT 0xa
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xb
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE__SHIFT 0xc
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE__SHIFT 0xd
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE__SHIFT 0xe
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE__SHIFT 0xf
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE__SHIFT 0x10
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE__SHIFT 0x11
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE__SHIFT 0x12
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE__SHIFT 0x13
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE__SHIFT 0x14
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE__SHIFT 0x15
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE__SHIFT 0x16
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE__SHIFT 0x17
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE__SHIFT 0x18
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE__SHIFT 0x19
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED__SHIFT 0x1a
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD0_OVERRIDE_MASK 0x00000001L
+#define ICG_LDS_CLK_CTRL__LDS_DLOAD1_OVERRIDE_MASK 0x00000002L
+#define ICG_LDS_CLK_CTRL__LDS_WGP_ARB_OVERRIDE_MASK 0x00000004L
+#define ICG_LDS_CLK_CTRL__LDS_TD_OVERRIDE_MASK 0x00000008L
+#define ICG_LDS_CLK_CTRL__LDS_ATTR_WR_OVERRIDE_MASK 0x00000010L
+#define ICG_LDS_CLK_CTRL__LDS_CONFIG_REG_OVERRIDE_MASK 0x00000020L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_PIPE_OVERRIDE_MASK 0x00000040L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_DIR_OVERRIDE_MASK 0x00000080L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_WR_OVERRIDE_MASK 0x00000100L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_INPUT_QUEUE_OVERRIDE_MASK 0x00000200L
+#define ICG_LDS_CLK_CTRL__LDS_MEM_OVERRIDE_MASK 0x00000400L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00000800L
+#define ICG_LDS_CLK_CTRL__LDS_DIR_OUTPUT_ALIGNER_OVERRIDE_MASK 0x00001000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_BANK_CONFLICT_OVERRIDE_MASK 0x00002000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_INPUT_OVERRIDE_MASK 0x00004000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_OUTPUT_OVERRIDE_MASK 0x00008000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHED_PIPE_OVERRIDE_MASK 0x00010000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_SCHEDULER_OVERRIDE_MASK 0x00020000L
+#define ICG_LDS_CLK_CTRL__LDS_IDX_RDRTN_OVERRIDE_MASK 0x00040000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_DONE_OVERRIDE_MASK 0x00080000L
+#define ICG_LDS_CLK_CTRL__LDS_SQC_PERF_OVERRIDE_MASK 0x00100000L
+#define ICG_LDS_CLK_CTRL__LDS_SP_READ_OVERRIDE_MASK 0x00200000L
+#define ICG_LDS_CLK_CTRL__SQ_LDS_VMEMCMD_OVERRIDE_MASK 0x00400000L
+#define ICG_LDS_CLK_CTRL__SP_LDS_VMEMREQ_OVERRIDE_MASK 0x00800000L
+#define ICG_LDS_CLK_CTRL__SPI_LDS_STALL_OVERRIDE_MASK 0x01000000L
+#define ICG_LDS_CLK_CTRL__MEM_WR_OVERRIDE_MASK 0x02000000L
+#define ICG_LDS_CLK_CTRL__LDS_CLK_OVERRIDE_UNUSED_MASK 0xFC000000L
+//GFX_ICG_UTCL1_CTRL
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0__SHIFT 0x0
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1__SHIFT 0x1
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2__SHIFT 0x2
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3__SHIFT 0x3
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4__SHIFT 0x4
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5__SHIFT 0x5
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6__SHIFT 0x6
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7__SHIFT 0x7
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8__SHIFT 0x8
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9__SHIFT 0x9
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10__SHIFT 0xa
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11__SHIFT 0xb
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12__SHIFT 0xc
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13__SHIFT 0xd
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14__SHIFT 0xe
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31__SHIFT 0xf
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE0_MASK 0x00000001L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE1_MASK 0x00000002L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE2_MASK 0x00000004L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE3_MASK 0x00000008L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE4_MASK 0x00000010L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE5_MASK 0x00000020L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE6_MASK 0x00000040L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE7_MASK 0x00000080L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE8_MASK 0x00000100L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE9_MASK 0x00000200L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE10_MASK 0x00000400L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE11_MASK 0x00000800L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE12_MASK 0x00001000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE13_MASK 0x00002000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE14_MASK 0x00004000L
+#define GFX_ICG_UTCL1_CTRL__SOFT_OVERRIDE15_31_MASK 0xFFFF8000L
+//ICG_CHC_CLK_CTRL
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHC_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHC_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHC_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHC_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHC_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHC_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHC_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+//ICG_CHCG_CLK_CTRL
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE__SHIFT 0x0
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE__SHIFT 0x1
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE__SHIFT 0x2
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE__SHIFT 0x3
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE__SHIFT 0x4
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE__SHIFT 0x5
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE__SHIFT 0x6
+#define ICG_CHCG_CLK_CTRL__GLOBAL_CLK_OVERRIDE_MASK 0x00000001L
+#define ICG_CHCG_CLK_CTRL__GLOBAL_NONHARVESTABLE_CLK_OVERRIDE_MASK 0x00000002L
+#define ICG_CHCG_CLK_CTRL__REQUEST_CLK_OVERRIDE_MASK 0x00000004L
+#define ICG_CHCG_CLK_CTRL__SRC_DATA_CLK_OVERRIDE_MASK 0x00000008L
+#define ICG_CHCG_CLK_CTRL__RETURN_CLK_OVERRIDE_MASK 0x00000010L
+#define ICG_CHCG_CLK_CTRL__GRBM_CLK_OVERRIDE_MASK 0x00000020L
+#define ICG_CHCG_CLK_CTRL__PERF_CLK_OVERRIDE_MASK 0x00000040L
+
+
+// addressBlock: gc_pspdec
+//CP_MES_DM_INDEX_ADDR
+#define CP_MES_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MES_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MES_DM_INDEX_DATA
+#define CP_MES_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MES_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_ADDR
+#define CP_MEC_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_MEC_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_MEC_DM_INDEX_DATA
+#define CP_MEC_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_MEC_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_ADDR
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_ADDR__ADDR_MASK 0xFFFFFFFFL
+//CP_GFX_RS64_DM_INDEX_DATA
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA__SHIFT 0x0
+#define CP_GFX_RS64_DM_INDEX_DATA__DATA_MASK 0xFFFFFFFFL
+//CPG_PSP_DEBUG
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL__SHIFT 0x2
+#define CPG_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPG_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPG_PSP_DEBUG__VMID_VIOLATION_CNTL_MASK 0x00000004L
+#define CPG_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPG_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPG_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPG_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//CPC_PSP_DEBUG
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL__SHIFT 0x0
+#define CPC_PSP_DEBUG__GPA_OVERRIDE__SHIFT 0x3
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE__SHIFT 0x4
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE__SHIFT 0x5
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE__SHIFT 0x6
+#define CPC_PSP_DEBUG__PRIV_VIOLATION_CNTL_MASK 0x00000003L
+#define CPC_PSP_DEBUG__GPA_OVERRIDE_MASK 0x00000008L
+#define CPC_PSP_DEBUG__UCODE_VF_OVERRIDE_MASK 0x00000010L
+#define CPC_PSP_DEBUG__MTYPE_TMZ_OVERRIDE_MASK 0x00000020L
+#define CPC_PSP_DEBUG__SECURE_REG_OVERRIDE_MASK 0x00000040L
+//GRBM_IOV_ERROR_FIFO
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR__SHIFT 0x0
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID__SHIFT 0x12
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID__SHIFT 0x18
+#define GRBM_IOV_ERROR_FIFO__IOV_OP__SHIFT 0x1c
+#define GRBM_IOV_ERROR_FIFO__IOV_VF__SHIFT 0x1d
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW__SHIFT 0x1e
+#define GRBM_IOV_ERROR_FIFO__READ_VALID__SHIFT 0x1f
+#define GRBM_IOV_ERROR_FIFO__IOV_ADDR_MASK 0x0003FFFFL
+#define GRBM_IOV_ERROR_FIFO__IOV_VFID_MASK 0x00FC0000L
+#define GRBM_IOV_ERROR_FIFO__IOV_SSRCID_MASK 0x0F000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_OP_MASK 0x10000000L
+#define GRBM_IOV_ERROR_FIFO__IOV_VF_MASK 0x20000000L
+#define GRBM_IOV_ERROR_FIFO__FIFO_OVERFLOW_MASK 0x40000000L
+#define GRBM_IOV_ERROR_FIFO__READ_VALID_MASK 0x80000000L
+//GRBM_SEC_CNTL
+#define GRBM_SEC_CNTL__DEBUG_ENABLE__SHIFT 0x0
+#define GRBM_SEC_CNTL__DEBUG_ENABLE_MASK 0x00000001L
+//GRBM_CAM_INDEX
+#define GRBM_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_HYP_CAM_INDEX
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX__SHIFT 0x0
+#define GRBM_HYP_CAM_INDEX__CAM_INDEX_MASK 0x0000000FL
+//GRBM_CAM_DATA
+#define GRBM_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_HYP_CAM_DATA
+#define GRBM_HYP_CAM_DATA__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA__CAM_ADDR_MASK 0x0000FFFFL
+#define GRBM_HYP_CAM_DATA__CAM_REMAPADDR_MASK 0xFFFF0000L
+//GRBM_CAM_DATA_UPPER
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//GRBM_HYP_CAM_DATA_UPPER
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR__SHIFT 0x0
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR__SHIFT 0x10
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_ADDR_MASK 0x00000003L
+#define GRBM_HYP_CAM_DATA_UPPER__CAM_REMAPADDR_MASK 0x00030000L
+//RLC_FWL_FIRST_VIOL_ADDR
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR__SHIFT 0x0
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID__SHIFT 0x12
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP__SHIFT 0x1e
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED__SHIFT 0x1f
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_ADDR_MASK 0x0003FFFFL
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_APERTURE_ID_MASK 0x3FFC0000L
+#define RLC_FWL_FIRST_VIOL_ADDR__VIOL_OP_MASK 0x40000000L
+#define RLC_FWL_FIRST_VIOL_ADDR__RESERVED_MASK 0x80000000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imudec
+//GFX_IMU_C2PMSG_0
+#define GFX_IMU_C2PMSG_0__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_1
+#define GFX_IMU_C2PMSG_1__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_2
+#define GFX_IMU_C2PMSG_2__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_3
+#define GFX_IMU_C2PMSG_3__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_4
+#define GFX_IMU_C2PMSG_4__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_5
+#define GFX_IMU_C2PMSG_5__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_6
+#define GFX_IMU_C2PMSG_6__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_7
+#define GFX_IMU_C2PMSG_7__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_8
+#define GFX_IMU_C2PMSG_8__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_9
+#define GFX_IMU_C2PMSG_9__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_10
+#define GFX_IMU_C2PMSG_10__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_11
+#define GFX_IMU_C2PMSG_11__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_12
+#define GFX_IMU_C2PMSG_12__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_13
+#define GFX_IMU_C2PMSG_13__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_14
+#define GFX_IMU_C2PMSG_14__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_15
+#define GFX_IMU_C2PMSG_15__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_16
+#define GFX_IMU_C2PMSG_16__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_16__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_17
+#define GFX_IMU_C2PMSG_17__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_17__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_18
+#define GFX_IMU_C2PMSG_18__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_18__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_19
+#define GFX_IMU_C2PMSG_19__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_19__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_20
+#define GFX_IMU_C2PMSG_20__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_20__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_21
+#define GFX_IMU_C2PMSG_21__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_21__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_22
+#define GFX_IMU_C2PMSG_22__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_22__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_23
+#define GFX_IMU_C2PMSG_23__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_23__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_24
+#define GFX_IMU_C2PMSG_24__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_24__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_25
+#define GFX_IMU_C2PMSG_25__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_25__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_26
+#define GFX_IMU_C2PMSG_26__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_26__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_27
+#define GFX_IMU_C2PMSG_27__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_27__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_28
+#define GFX_IMU_C2PMSG_28__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_28__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_29
+#define GFX_IMU_C2PMSG_29__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_29__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_30
+#define GFX_IMU_C2PMSG_30__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_30__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_31
+#define GFX_IMU_C2PMSG_31__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_31__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_32
+#define GFX_IMU_C2PMSG_32__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_32__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_33
+#define GFX_IMU_C2PMSG_33__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_33__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_34
+#define GFX_IMU_C2PMSG_34__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_34__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_35
+#define GFX_IMU_C2PMSG_35__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_35__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_36
+#define GFX_IMU_C2PMSG_36__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_36__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_37
+#define GFX_IMU_C2PMSG_37__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_37__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_38
+#define GFX_IMU_C2PMSG_38__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_38__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_39
+#define GFX_IMU_C2PMSG_39__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_39__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_40
+#define GFX_IMU_C2PMSG_40__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_40__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_41
+#define GFX_IMU_C2PMSG_41__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_41__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_42
+#define GFX_IMU_C2PMSG_42__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_42__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_43
+#define GFX_IMU_C2PMSG_43__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_43__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_44
+#define GFX_IMU_C2PMSG_44__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_44__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_45
+#define GFX_IMU_C2PMSG_45__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_45__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_46
+#define GFX_IMU_C2PMSG_46__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_46__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_47
+#define GFX_IMU_C2PMSG_47__DATA__SHIFT 0x0
+#define GFX_IMU_C2PMSG_47__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_MSG_FLAGS
+#define GFX_IMU_MSG_FLAGS__STATUS__SHIFT 0x0
+#define GFX_IMU_MSG_FLAGS__STATUS_MASK 0xFFFFFFFFL
+//GFX_IMU_C2PMSG_ACCESS_CTRL0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5__SHIFT 0xf
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6__SHIFT 0x12
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7__SHIFT 0x15
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC0_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC1_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC2_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC3_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC4_MASK 0x00007000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC5_MASK 0x00038000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC6_MASK 0x001C0000L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL0__ACC7_MASK 0x00E00000L
+//GFX_IMU_C2PMSG_ACCESS_CTRL1
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15__SHIFT 0x0
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23__SHIFT 0x3
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31__SHIFT 0x6
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39__SHIFT 0x9
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47__SHIFT 0xc
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC8_15_MASK 0x00000007L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC16_23_MASK 0x00000038L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC24_31_MASK 0x000001C0L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC32_39_MASK 0x00000E00L
+#define GFX_IMU_C2PMSG_ACCESS_CTRL1__ACC40_47_MASK 0x00007000L
+//GFX_IMU_PWRMGT_IRQ_CTRL
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_PWRMGT_IRQ_CTRL__REQ_MASK 0x00000001L
+//GFX_IMU_MP1_MUTEX
+#define GFX_IMU_MP1_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_MP1_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_DATA_4
+#define GFX_IMU_RLC_DATA_4__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_3
+#define GFX_IMU_RLC_DATA_3__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_2
+#define GFX_IMU_RLC_DATA_2__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_1
+#define GFX_IMU_RLC_DATA_1__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_DATA_0
+#define GFX_IMU_RLC_DATA_0__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_DATA_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_CMD
+#define GFX_IMU_RLC_CMD__CMD__SHIFT 0x0
+#define GFX_IMU_RLC_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_MUTEX
+#define GFX_IMU_RLC_MUTEX__MUTEX__SHIFT 0x0
+#define GFX_IMU_RLC_MUTEX__MUTEX_MASK 0x00000003L
+//GFX_IMU_RLC_MSG_STATUS
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY__SHIFT 0x0
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR__SHIFT 0x1
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE__SHIFT 0x10
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG__SHIFT 0x1e
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG__SHIFT 0x1f
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_BUSY_MASK 0x00000001L
+#define GFX_IMU_RLC_MSG_STATUS__IMU2RLC_MSG_ERROR_MASK 0x00000002L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_MSGDONE_MASK 0x00010000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_CHGTOG_MASK 0x40000000L
+#define GFX_IMU_RLC_MSG_STATUS__RLC2IMU_DONETOG_MASK 0x80000000L
+//RLC_GFX_IMU_DATA_0
+#define RLC_GFX_IMU_DATA_0__DATA__SHIFT 0x0
+#define RLC_GFX_IMU_DATA_0__DATA_MASK 0xFFFFFFFFL
+//RLC_GFX_IMU_CMD
+#define RLC_GFX_IMU_CMD__CMD__SHIFT 0x0
+#define RLC_GFX_IMU_CMD__CMD_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_STATUS
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE__SHIFT 0x0
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE__SHIFT 0x1
+#define GFX_IMU_RLC_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_RLC_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_RLC_STATUS__PD_ACTIVE_MASK 0x00000001L
+#define GFX_IMU_RLC_STATUS__RLC_ALIVE_MASK 0x00000002L
+#define GFX_IMU_RLC_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_RLC_STATUS__TBD3_MASK 0x00000008L
+//GFX_IMU_STATUS
+#define GFX_IMU_STATUS__ALLOW_GFXOFF__SHIFT 0x0
+#define GFX_IMU_STATUS__ALLOW_FA_DCS__SHIFT 0x1
+#define GFX_IMU_STATUS__TBD2__SHIFT 0x2
+#define GFX_IMU_STATUS__TBD3__SHIFT 0x3
+#define GFX_IMU_STATUS__TBD4__SHIFT 0x4
+#define GFX_IMU_STATUS__TBD5__SHIFT 0x5
+#define GFX_IMU_STATUS__TBD6__SHIFT 0x6
+#define GFX_IMU_STATUS__TBD7__SHIFT 0x7
+#define GFX_IMU_STATUS__TBD8__SHIFT 0x8
+#define GFX_IMU_STATUS__TBD9__SHIFT 0x9
+#define GFX_IMU_STATUS__TBD10__SHIFT 0xa
+#define GFX_IMU_STATUS__TBD11__SHIFT 0xb
+#define GFX_IMU_STATUS__TBD12__SHIFT 0xc
+#define GFX_IMU_STATUS__TBD13__SHIFT 0xd
+#define GFX_IMU_STATUS__TBD14__SHIFT 0xe
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS__SHIFT 0xf
+#define GFX_IMU_STATUS__ALLOW_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_STATUS__ALLOW_FA_DCS_MASK 0x00000002L
+#define GFX_IMU_STATUS__TBD2_MASK 0x00000004L
+#define GFX_IMU_STATUS__TBD3_MASK 0x00000008L
+#define GFX_IMU_STATUS__TBD4_MASK 0x00000010L
+#define GFX_IMU_STATUS__TBD5_MASK 0x00000020L
+#define GFX_IMU_STATUS__TBD6_MASK 0x00000040L
+#define GFX_IMU_STATUS__TBD7_MASK 0x00000080L
+#define GFX_IMU_STATUS__TBD8_MASK 0x00000100L
+#define GFX_IMU_STATUS__TBD9_MASK 0x00000200L
+#define GFX_IMU_STATUS__TBD10_MASK 0x00000400L
+#define GFX_IMU_STATUS__TBD11_MASK 0x00000800L
+#define GFX_IMU_STATUS__TBD12_MASK 0x00001000L
+#define GFX_IMU_STATUS__TBD13_MASK 0x00002000L
+#define GFX_IMU_STATUS__TBD14_MASK 0x00004000L
+#define GFX_IMU_STATUS__DISABLE_GFXCLK_DS_MASK 0x00008000L
+//GFX_IMU_SOC_DATA
+#define GFX_IMU_SOC_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_SOC_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_ADDR
+#define GFX_IMU_SOC_ADDR__ADDR__SHIFT 0x0
+#define GFX_IMU_SOC_ADDR__ADDR_MASK 0xFFFFFFFFL
+//GFX_IMU_SOC_REQ
+#define GFX_IMU_SOC_REQ__REQ_BUSY__SHIFT 0x0
+#define GFX_IMU_SOC_REQ__R_W__SHIFT 0x1
+#define GFX_IMU_SOC_REQ__ERR__SHIFT 0x1f
+#define GFX_IMU_SOC_REQ__REQ_BUSY_MASK 0x00000001L
+#define GFX_IMU_SOC_REQ__R_W_MASK 0x00000002L
+#define GFX_IMU_SOC_REQ__ERR_MASK 0x80000000L
+//GFX_IMU_VF_CTRL
+#define GFX_IMU_VF_CTRL__VF__SHIFT 0x0
+#define GFX_IMU_VF_CTRL__VFID__SHIFT 0x1
+#define GFX_IMU_VF_CTRL__QOS__SHIFT 0x7
+#define GFX_IMU_VF_CTRL__VF_MASK 0x00000001L
+#define GFX_IMU_VF_CTRL__VFID_MASK 0x0000007EL
+#define GFX_IMU_VF_CTRL__QOS_MASK 0x00000780L
+//GFX_IMU_TELEMETRY
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES__SHIFT 0x0
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE__SHIFT 0x5
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW__SHIFT 0x6
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW__SHIFT 0x7
+#define GFX_IMU_TELEMETRY__FSM_STATE__SHIFT 0x8
+#define GFX_IMU_TELEMETRY__SVI_TYPE__SHIFT 0xc
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO__SHIFT 0x1e
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY__SHIFT 0x1f
+#define GFX_IMU_TELEMETRY__TELEMETRY_ENTRIES_MASK 0x0000001FL
+#define GFX_IMU_TELEMETRY__TELEMETRY_DATA_SAMPLE_SIZE_MASK 0x00000020L
+#define GFX_IMU_TELEMETRY__FIFO_OVERFLOW_MASK 0x00000040L
+#define GFX_IMU_TELEMETRY__FIFO_UNDERFLOW_MASK 0x00000080L
+#define GFX_IMU_TELEMETRY__FSM_STATE_MASK 0x00000700L
+#define GFX_IMU_TELEMETRY__SVI_TYPE_MASK 0x00003000L
+#define GFX_IMU_TELEMETRY__ENABLE_FIFO_MASK 0x40000000L
+#define GFX_IMU_TELEMETRY__ENABLE_IMU_RLC_TELEMETRY_MASK 0x80000000L
+//GFX_IMU_TELEMETRY_DATA
+#define GFX_IMU_TELEMETRY_DATA__CURRENT__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE__SHIFT 0x10
+#define GFX_IMU_TELEMETRY_DATA__CURRENT_MASK 0x0000FFFFL
+#define GFX_IMU_TELEMETRY_DATA__VOLTAGE_MASK 0xFFFF0000L
+//GFX_IMU_TELEMETRY_TEMPERATURE
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE__SHIFT 0x0
+#define GFX_IMU_TELEMETRY_TEMPERATURE__TEMPERATURE_MASK 0x0000FFFFL
+//GFX_IMU_SCRATCH_0
+#define GFX_IMU_SCRATCH_0__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_0__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_1
+#define GFX_IMU_SCRATCH_1__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_1__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_2
+#define GFX_IMU_SCRATCH_2__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_2__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_3
+#define GFX_IMU_SCRATCH_3__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_3__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_4
+#define GFX_IMU_SCRATCH_4__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_4__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_5
+#define GFX_IMU_SCRATCH_5__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_5__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_6
+#define GFX_IMU_SCRATCH_6__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_6__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_7
+#define GFX_IMU_SCRATCH_7__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_7__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_8
+#define GFX_IMU_SCRATCH_8__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_8__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_9
+#define GFX_IMU_SCRATCH_9__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_9__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_10
+#define GFX_IMU_SCRATCH_10__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_10__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_11
+#define GFX_IMU_SCRATCH_11__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_11__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_12
+#define GFX_IMU_SCRATCH_12__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_12__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_13
+#define GFX_IMU_SCRATCH_13__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_13__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_14
+#define GFX_IMU_SCRATCH_14__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_14__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_SCRATCH_15
+#define GFX_IMU_SCRATCH_15__DATA__SHIFT 0x0
+#define GFX_IMU_SCRATCH_15__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_LO
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO__SHIFT 0x0
+#define GFX_IMU_FW_GTS_LO__TSTAMP_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_FW_GTS_HI
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI__SHIFT 0x0
+#define GFX_IMU_FW_GTS_HI__TSTAMP_HI_MASK 0x00FFFFFFL
+//GFX_IMU_GTS_OFFSET_LO
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_GTS_OFFSET_HI
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_LO
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_LO__GTS_OFFSET_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_GTS_OFFSET_HI
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI__SHIFT 0x0
+#define GFX_IMU_RLC_GTS_OFFSET_HI__GTS_OFFSET_HI_MASK 0x00FFFFFFL
+//GFX_IMU_CORE_INT_STATUS
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24__SHIFT 0x18
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25__SHIFT 0x19
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29__SHIFT 0x1d
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_24_MASK 0x01000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_25_MASK 0x02000000L
+#define GFX_IMU_CORE_INT_STATUS__INTERRUPT_29_MASK 0x20000000L
+//GFX_IMU_PIC_INT_MASK
+#define GFX_IMU_PIC_INT_MASK__MASK_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_MASK__MASK_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_MASK__MASK_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_MASK__MASK_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_MASK__MASK_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_MASK__MASK_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_MASK__MASK_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_MASK__MASK_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_MASK__MASK_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_MASK__MASK_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_MASK__MASK_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_MASK__MASK_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_MASK__MASK_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_MASK__MASK_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_MASK__MASK_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_MASK__MASK_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_MASK__MASK_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_MASK__MASK_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_MASK__MASK_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_MASK__MASK_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_MASK__MASK_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_MASK__MASK_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_MASK__MASK_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_MASK__MASK_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_MASK__MASK_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_MASK__MASK_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_MASK__MASK_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_MASK__MASK_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_MASK__MASK_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_MASK__MASK_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_MASK__MASK_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_MASK__MASK_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_MASK__MASK_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_MASK__MASK_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_MASK__MASK_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_MASK__MASK_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_MASK__MASK_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_MASK__MASK_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_MASK__MASK_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_MASK__MASK_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_MASK__MASK_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_MASK__MASK_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_MASK__MASK_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_MASK__MASK_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_MASK__MASK_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_MASK__MASK_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_MASK__MASK_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_MASK__MASK_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_MASK__MASK_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_MASK__MASK_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_MASK__MASK_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_MASK__MASK_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_MASK__MASK_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_MASK__MASK_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_MASK__MASK_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_MASK__MASK_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_MASK__MASK_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_MASK__MASK_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_LVL
+#define GFX_IMU_PIC_INT_LVL__LVL_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_LVL__LVL_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_LVL__LVL_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_LVL__LVL_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_LVL__LVL_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_LVL__LVL_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_LVL__LVL_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_LVL__LVL_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_LVL__LVL_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_LVL__LVL_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_LVL__LVL_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_LVL__LVL_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_LVL__LVL_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_LVL__LVL_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_LVL__LVL_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_LVL__LVL_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_LVL__LVL_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_LVL__LVL_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_LVL__LVL_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_LVL__LVL_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_LVL__LVL_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_LVL__LVL_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_LVL__LVL_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_LVL__LVL_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_LVL__LVL_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_LVL__LVL_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_LVL__LVL_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_LVL__LVL_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_LVL__LVL_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_LVL__LVL_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_LVL__LVL_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_LVL__LVL_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_LVL__LVL_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_LVL__LVL_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_LVL__LVL_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_LVL__LVL_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_LVL__LVL_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_LVL__LVL_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_LVL__LVL_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_LVL__LVL_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_LVL__LVL_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_LVL__LVL_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_LVL__LVL_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_LVL__LVL_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_LVL__LVL_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_LVL__LVL_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_LVL__LVL_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_LVL__LVL_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_LVL__LVL_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_LVL__LVL_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_LVL__LVL_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_LVL__LVL_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_LVL__LVL_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_LVL__LVL_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_LVL__LVL_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_LVL__LVL_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_LVL__LVL_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_LVL__LVL_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_EDGE
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_EDGE__EDGE_0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_EDGE__EDGE_31_MASK 0x80000000L
+//GFX_IMU_PIC_INT_PRI_0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_0__PRI_0_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_0__PRI_1_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_2_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_0__PRI_3_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_1
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_1__PRI_4_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_1__PRI_5_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_6_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_1__PRI_7_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_2
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_2__PRI_8_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_2__PRI_9_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_10_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_2__PRI_11_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_3
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_3__PRI_12_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_3__PRI_13_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_14_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_3__PRI_15_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_4
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_4__PRI_16_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_4__PRI_17_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_18_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_4__PRI_19_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_5
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_5__PRI_20_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_5__PRI_21_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_22_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_5__PRI_23_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_6
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_6__PRI_24_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_6__PRI_25_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_26_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_6__PRI_27_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_PRI_7
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28__SHIFT 0x0
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29__SHIFT 0x8
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30__SHIFT 0x10
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31__SHIFT 0x18
+#define GFX_IMU_PIC_INT_PRI_7__PRI_28_MASK 0x000000FFL
+#define GFX_IMU_PIC_INT_PRI_7__PRI_29_MASK 0x0000FF00L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_30_MASK 0x00FF0000L
+#define GFX_IMU_PIC_INT_PRI_7__PRI_31_MASK 0xFF000000L
+//GFX_IMU_PIC_INT_STATUS
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0__SHIFT 0x0
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1__SHIFT 0x1
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2__SHIFT 0x2
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3__SHIFT 0x3
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4__SHIFT 0x4
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5__SHIFT 0x5
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6__SHIFT 0x6
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7__SHIFT 0x7
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8__SHIFT 0x8
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9__SHIFT 0x9
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10__SHIFT 0xa
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11__SHIFT 0xb
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12__SHIFT 0xc
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13__SHIFT 0xd
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14__SHIFT 0xe
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15__SHIFT 0xf
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16__SHIFT 0x10
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17__SHIFT 0x11
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18__SHIFT 0x12
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19__SHIFT 0x13
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20__SHIFT 0x14
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21__SHIFT 0x15
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22__SHIFT 0x16
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23__SHIFT 0x17
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24__SHIFT 0x18
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25__SHIFT 0x19
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26__SHIFT 0x1a
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27__SHIFT 0x1b
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28__SHIFT 0x1c
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29__SHIFT 0x1d
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30__SHIFT 0x1e
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31__SHIFT 0x1f
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS0_MASK 0x00000001L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS1_MASK 0x00000002L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS2_MASK 0x00000004L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS3_MASK 0x00000008L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS4_MASK 0x00000010L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS5_MASK 0x00000020L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS6_MASK 0x00000040L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS7_MASK 0x00000080L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS8_MASK 0x00000100L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS9_MASK 0x00000200L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS10_MASK 0x00000400L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS11_MASK 0x00000800L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS12_MASK 0x00001000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS13_MASK 0x00002000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS14_MASK 0x00004000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS15_MASK 0x00008000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS16_MASK 0x00010000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS17_MASK 0x00020000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS18_MASK 0x00040000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS19_MASK 0x00080000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS20_MASK 0x00100000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS21_MASK 0x00200000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS22_MASK 0x00400000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS23_MASK 0x00800000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS24_MASK 0x01000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS25_MASK 0x02000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS26_MASK 0x04000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS27_MASK 0x08000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS28_MASK 0x10000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS29_MASK 0x20000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS30_MASK 0x40000000L
+#define GFX_IMU_PIC_INT_STATUS__INT_STATUS31_MASK 0x80000000L
+//GFX_IMU_PIC_INTR
+#define GFX_IMU_PIC_INTR__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR__INTR_n_MASK 0x00000001L
+//GFX_IMU_PIC_INTR_ID
+#define GFX_IMU_PIC_INTR_ID__INTR_n__SHIFT 0x0
+#define GFX_IMU_PIC_INTR_ID__INTR_n_MASK 0x000000FFL
+//GFX_IMU_IH_CTRL_1
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_1__CONTEXT_ID_MASK 0xFFFFFFFFL
+//GFX_IMU_IH_CTRL_2
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_2__RING_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_2__VM_ID__SHIFT 0x10
+#define GFX_IMU_IH_CTRL_2__SRSTB__SHIFT 0x1f
+#define GFX_IMU_IH_CTRL_2__CONTEXT_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_2__RING_ID_MASK 0x0000FF00L
+#define GFX_IMU_IH_CTRL_2__VM_ID_MASK 0x000F0000L
+#define GFX_IMU_IH_CTRL_2__SRSTB_MASK 0x80000000L
+//GFX_IMU_IH_CTRL_3
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID__SHIFT 0x0
+#define GFX_IMU_IH_CTRL_3__VF_ID__SHIFT 0x8
+#define GFX_IMU_IH_CTRL_3__VF__SHIFT 0xd
+#define GFX_IMU_IH_CTRL_3__SOURCE_ID_MASK 0x000000FFL
+#define GFX_IMU_IH_CTRL_3__VF_ID_MASK 0x00001F00L
+#define GFX_IMU_IH_CTRL_3__VF_MASK 0x00002000L
+//GFX_IMU_IH_STATUS
+#define GFX_IMU_IH_STATUS__IH_BUSY__SHIFT 0x0
+#define GFX_IMU_IH_STATUS__IH_BUSY_MASK 0x00000001L
+//GFX_IMU_FUSESTRAP
+#define GFX_IMU_FUSESTRAP__BOOT_VID__SHIFT 0x0
+#define GFX_IMU_FUSESTRAP__BOOT_VID_MASK 0x000001FFL
+//GFX_IMU_SMUIO_VIDCHG_CTRL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ__SHIFT 0x0
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA__SHIFT 0x1
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN__SHIFT 0xa
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK__SHIFT 0xb
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL__SHIFT 0x1f
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__REQ_MASK 0x00000001L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__DATA_MASK 0x000003FEL
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__PSIEN_MASK 0x00000400L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__ACK_MASK 0x00000800L
+#define GFX_IMU_SMUIO_VIDCHG_CTRL__SRC_SEL_MASK 0x80000000L
+//GFX_IMU_GFXCLK_BYPASS_CTRL
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL__SHIFT 0x0
+#define GFX_IMU_GFXCLK_BYPASS_CTRL__BYPASS_SEL_MASK 0x00000001L
+//GFX_IMU_CLK_CTRL
+#define GFX_IMU_CLK_CTRL__CG_OVR__SHIFT 0x0
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE__SHIFT 0x1
+#define GFX_IMU_CLK_CTRL__CLKDIV__SHIFT 0x4
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG__SHIFT 0x8
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG__SHIFT 0x9
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV__SHIFT 0x10
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD__SHIFT 0x1c
+#define GFX_IMU_CLK_CTRL__CG_OVR_MASK 0x00000001L
+#define GFX_IMU_CLK_CTRL__CG_OVR_CORE_MASK 0x00000002L
+#define GFX_IMU_CLK_CTRL__CLKDIV_MASK 0x00000010L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_CHGTOG_MASK 0x00000100L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DONETOG_MASK 0x00000200L
+#define GFX_IMU_CLK_CTRL__GFXBYPASSCLK_DIV_MASK 0x007F0000L
+#define GFX_IMU_CLK_CTRL__COOLDOWN_PERIOD_MASK 0xF0000000L
+//GFX_IMU_DOORBELL_CONTROL
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN__SHIFT 0x0
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR__SHIFT 0x1
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT__SHIFT 0x18
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS__SHIFT 0x1f
+#define GFX_IMU_DOORBELL_CONTROL__OVR_EN_MASK 0x00000001L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_OVR_MASK 0x00000002L
+#define GFX_IMU_DOORBELL_CONTROL__CP_DB_RESP_PEND_COUNT_MASK 0x7F000000L
+#define GFX_IMU_DOORBELL_CONTROL__FENCE_EN_STATUS_MASK 0x80000000L
+//GFX_IMU_RLC_CG_CTRL
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG__SHIFT 0x0
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN__SHIFT 0x1
+#define GFX_IMU_RLC_CG_CTRL__FORCE_CGCG_MASK 0x00000001L
+#define GFX_IMU_RLC_CG_CTRL__MGCG_EARLY_EN_MASK 0x00000002L
+//GFX_IMU_RLC_THROTTLE_GFX
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN__SHIFT 0x0
+#define GFX_IMU_RLC_THROTTLE_GFX__THROTTLE_EN_MASK 0x00000001L
+//GFX_IMU_RLC_RESET_VECTOR
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF__SHIFT 0x0
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT__SHIFT 0x2
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT__SHIFT 0x3
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR__SHIFT 0x4
+#define GFX_IMU_RLC_RESET_VECTOR__COLD_VS_GFXOFF_MASK 0x00000001L
+#define GFX_IMU_RLC_RESET_VECTOR__WARM_RESET_EXIT_MASK 0x00000004L
+#define GFX_IMU_RLC_RESET_VECTOR__VF_FLR_EXIT_MASK 0x00000008L
+#define GFX_IMU_RLC_RESET_VECTOR__VECTOR_MASK 0x000000F0L
+//GFX_IMU_RLC_OVERRIDE
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW__SHIFT 0x0
+#define GFX_IMU_RLC_OVERRIDE__DS_ALLOW_MASK 0x00000001L
+//GFX_IMU_DPM_CONTROL
+#define GFX_IMU_DPM_CONTROL__ACC_RESET__SHIFT 0x0
+#define GFX_IMU_DPM_CONTROL__ACC_START__SHIFT 0x1
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK__SHIFT 0x2
+#define GFX_IMU_DPM_CONTROL__ACC_RESET_MASK 0x00000001L
+#define GFX_IMU_DPM_CONTROL__ACC_START_MASK 0x00000002L
+#define GFX_IMU_DPM_CONTROL__BUSY_MASK_MASK 0x0003FFFCL
+//GFX_IMU_DPM_ACC
+#define GFX_IMU_DPM_ACC__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_ACC__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_DPM_REF_COUNTER
+#define GFX_IMU_DPM_REF_COUNTER__COUNT__SHIFT 0x0
+#define GFX_IMU_DPM_REF_COUNTER__COUNT_MASK 0x00FFFFFFL
+//GFX_IMU_RLC_RAM_INDEX
+#define GFX_IMU_RLC_RAM_INDEX__INDEX__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX__SHIFT 0x10
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID__SHIFT 0x1f
+#define GFX_IMU_RLC_RAM_INDEX__INDEX_MASK 0x000000FFL
+#define GFX_IMU_RLC_RAM_INDEX__RLC_INDEX_MASK 0x00FF0000L
+#define GFX_IMU_RLC_RAM_INDEX__RAM_VALID_MASK 0x80000000L
+//GFX_IMU_RLC_RAM_ADDR_HIGH
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_HIGH__ADDR_MSB_MASK 0x0000FFFFL
+//GFX_IMU_RLC_RAM_ADDR_LOW
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_ADDR_LOW__ADDR_LSB_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_RAM_DATA
+#define GFX_IMU_RLC_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_RLC_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_FENCE_CTRL
+#define GFX_IMU_FENCE_CTRL__ENABLED__SHIFT 0x0
+#define GFX_IMU_FENCE_CTRL__ARM_LOG__SHIFT 0x1
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE__SHIFT 0x2
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS__SHIFT 0x3
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN__SHIFT 0x8
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR__SHIFT 0x9
+#define GFX_IMU_FENCE_CTRL__ENABLED_MASK 0x00000001L
+#define GFX_IMU_FENCE_CTRL__ARM_LOG_MASK 0x00000002L
+#define GFX_IMU_FENCE_CTRL__GRBM_RSMU_FENCE_ENABLE_MASK 0x00000004L
+#define GFX_IMU_FENCE_CTRL__FLUSH_ARBITER_CREDITS_MASK 0x00000008L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_EN_MASK 0x00000100L
+#define GFX_IMU_FENCE_CTRL__GFX_REG_FENCE_OVR_MASK 0x00000200L
+//GFX_IMU_FENCE_LOG_INIT
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID__SHIFT 0x0
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID__SHIFT 0x7
+#define GFX_IMU_FENCE_LOG_INIT__UNIT_ID_MASK 0x0000007FL
+#define GFX_IMU_FENCE_LOG_INIT__INITIATOR_ID_MASK 0x0001FF80L
+//GFX_IMU_FENCE_LOG_ADDR
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_FENCE_LOG_ADDR__ADDR_MASK 0x000FFFFCL
+//GFX_IMU_PROGRAM_CTR
+#define GFX_IMU_PROGRAM_CTR__PC__SHIFT 0x0
+#define GFX_IMU_PROGRAM_CTR__PC_MASK 0xFFFFFFFFL
+//GFX_IMU_CORE_CTRL
+#define GFX_IMU_CORE_CTRL__CRESET__SHIFT 0x0
+#define GFX_IMU_CORE_CTRL__CSTALL__SHIFT 0x1
+#define GFX_IMU_CORE_CTRL__CDBGENABLE__SHIFT 0x2
+#define GFX_IMU_CORE_CTRL__DRESET__SHIFT 0x3
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET__SHIFT 0x4
+#define GFX_IMU_CORE_CTRL__BREAK_IN__SHIFT 0x8
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK__SHIFT 0x9
+#define GFX_IMU_CORE_CTRL__CRESET_MASK 0x00000001L
+#define GFX_IMU_CORE_CTRL__CSTALL_MASK 0x00000002L
+#define GFX_IMU_CORE_CTRL__CDBGENABLE_MASK 0x00000004L
+#define GFX_IMU_CORE_CTRL__DRESET_MASK 0x00000008L
+#define GFX_IMU_CORE_CTRL__HALT_ON_RESET_MASK 0x00000010L
+#define GFX_IMU_CORE_CTRL__BREAK_IN_MASK 0x00000100L
+#define GFX_IMU_CORE_CTRL__BREAK_OUT_ACK_MASK 0x00000200L
+//GFX_IMU_CORE_STATUS
+#define GFX_IMU_CORE_STATUS__CBUSY__SHIFT 0x0
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE__SHIFT 0x1
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR__SHIFT 0x2
+#define GFX_IMU_CORE_STATUS__CINTLEVEL__SHIFT 0x4
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK__SHIFT 0x8
+#define GFX_IMU_CORE_STATUS__BREAK_OUT__SHIFT 0x9
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE__SHIFT 0xa
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR__SHIFT 0xb
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL__SHIFT 0x18
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE__SHIFT 0x1c
+#define GFX_IMU_CORE_STATUS__CBUSY_MASK 0x00000001L
+#define GFX_IMU_CORE_STATUS__PWAIT_MODE_MASK 0x00000002L
+#define GFX_IMU_CORE_STATUS__PSP_ACC_ERR_MASK 0x00000004L
+#define GFX_IMU_CORE_STATUS__CINTLEVEL_MASK 0x000000F0L
+#define GFX_IMU_CORE_STATUS__BREAK_IN_ACK_MASK 0x00000100L
+#define GFX_IMU_CORE_STATUS__BREAK_OUT_MASK 0x00000200L
+#define GFX_IMU_CORE_STATUS__DEBUG_MODE_MASK 0x00000400L
+#define GFX_IMU_CORE_STATUS__P_FATAL_ERROR_MASK 0x00000800L
+#define GFX_IMU_CORE_STATUS__FAULT_SEVERITY_LEVEL_MASK 0x0F000000L
+#define GFX_IMU_CORE_STATUS__FAULT_TYPE_MASK 0xF0000000L
+//GFX_IMU_PWROKRAW
+#define GFX_IMU_PWROKRAW__PWROKRAW__SHIFT 0x0
+#define GFX_IMU_PWROKRAW__PWROKRAW_MASK 0x00000001L
+//GFX_IMU_PWROK
+#define GFX_IMU_PWROK__PWROK__SHIFT 0x0
+#define GFX_IMU_PWROK__PWROK_MASK 0x00000001L
+//GFX_IMU_GAP_PWROK
+#define GFX_IMU_GAP_PWROK__GAP_PWROK__SHIFT 0x0
+#define GFX_IMU_GAP_PWROK__GAP_PWROK_MASK 0x00000001L
+//GFX_IMU_RESETn
+#define GFX_IMU_RESETn__Cpl_RESETn__SHIFT 0x0
+#define GFX_IMU_RESETn__Cpl_RESETn_MASK 0x00000001L
+//GFX_IMU_GFX_RESET_CTRL
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB__SHIFT 0x0
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB__SHIFT 0x1
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB__SHIFT 0x2
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB__SHIFT 0x3
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB__SHIFT 0x4
+#define GFX_IMU_GFX_RESET_CTRL__HARD_RESETB_MASK 0x00000001L
+#define GFX_IMU_GFX_RESET_CTRL__EA_RESETB_MASK 0x00000002L
+#define GFX_IMU_GFX_RESET_CTRL__UTCL2_RESETB_MASK 0x00000004L
+#define GFX_IMU_GFX_RESET_CTRL__SDMA_RESETB_MASK 0x00000008L
+#define GFX_IMU_GFX_RESET_CTRL__GRBM_RESETB_MASK 0x00000010L
+//GFX_IMU_AEB_OVERRIDE
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL__SHIFT 0x0
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE__SHIFT 0x1
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE__SHIFT 0x2
+#define GFX_IMU_AEB_OVERRIDE__AEB_OVERRIDE_CTRL_MASK 0x00000001L
+#define GFX_IMU_AEB_OVERRIDE__AEB_RESET_VALUE_MASK 0x00000002L
+#define GFX_IMU_AEB_OVERRIDE__AEB_VALID_VALUE_MASK 0x00000004L
+//GFX_IMU_VDCI_RESET_CTRL
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn__SHIFT 0x0
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET__SHIFT 0x1
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET__SHIFT 0x2
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET__SHIFT 0x3
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn__SHIFT 0x4
+#define GFX_IMU_VDCI_RESET_CTRL__SOC2GFX_VDCI_RESETn_MASK 0x00000001L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_EA_SDF_VDCI_RESET_MASK 0x00000002L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_UTCL2_ATHUB_VDCI_RESET_MASK 0x00000004L
+#define GFX_IMU_VDCI_RESET_CTRL__SOC_IMUAXI_SYSHUB_VDCI_RESET_MASK 0x00000008L
+#define GFX_IMU_VDCI_RESET_CTRL__IMU2GFX_VDCI_RESETn_MASK 0x00000010L
+//GFX_IMU_GFX_ISO_CTRL
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn__SHIFT 0x0
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN__SHIFT 0x1
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN__SHIFT 0x2
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn__SHIFT 0x3
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn__SHIFT 0x4
+#define GFX_IMU_GFX_ISO_CTRL__GFX2IMU_ISOn_MASK 0x00000001L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_EA_SDF_VDCI_ISOn_EN_MASK 0x00000002L
+#define GFX_IMU_GFX_ISO_CTRL__SOC_UTCL2_ATHUB_VDCI_ISOn_EN_MASK 0x00000004L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_ISOn_MASK 0x00000008L
+#define GFX_IMU_GFX_ISO_CTRL__GFX2SOC_CLK_ISOn_MASK 0x00000010L
+//GFX_IMU_TIMER0_CTRL0
+#define GFX_IMU_TIMER0_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER0_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER0_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER0_CTRL1
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER0_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER0_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER0_CMP_AUTOINC
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP_INTEN
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER0_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER0_CMP0
+#define GFX_IMU_TIMER0_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP1
+#define GFX_IMU_TIMER0_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_CMP3
+#define GFX_IMU_TIMER0_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER0_VALUE
+#define GFX_IMU_TIMER0_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER0_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CTRL0
+#define GFX_IMU_TIMER1_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER1_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER1_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER1_CTRL1
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER1_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER1_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER1_CMP_AUTOINC
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP_INTEN
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER1_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER1_CMP0
+#define GFX_IMU_TIMER1_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP1
+#define GFX_IMU_TIMER1_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_CMP3
+#define GFX_IMU_TIMER1_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER1_VALUE
+#define GFX_IMU_TIMER1_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER1_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CTRL0
+#define GFX_IMU_TIMER2_CTRL0__START_STOP__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL0__CLEAR__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN__SHIFT 0x18
+#define GFX_IMU_TIMER2_CTRL0__START_STOP_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL0__CLEAR_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL0__UP_DOWN_MASK 0x00010000L
+#define GFX_IMU_TIMER2_CTRL0__PULSE_EN_MASK 0x01000000L
+//GFX_IMU_TIMER2_CTRL1
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN__SHIFT 0x0
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE__SHIFT 0x8
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN__SHIFT 0x10
+#define GFX_IMU_TIMER2_CTRL1__PWM_EN_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CTRL1__TS_MODE_MASK 0x00000100L
+#define GFX_IMU_TIMER2_CTRL1__SAT_EN_MASK 0x00010000L
+//GFX_IMU_TIMER2_CMP_AUTOINC
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_AUTOINC__AUTOINC_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP_INTEN
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1__SHIFT 0x1
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2__SHIFT 0x2
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3__SHIFT 0x3
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN0_MASK 0x00000001L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN1_MASK 0x00000002L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN2_MASK 0x00000004L
+#define GFX_IMU_TIMER2_CMP_INTEN__INT_EN3_MASK 0x00000008L
+//GFX_IMU_TIMER2_CMP0
+#define GFX_IMU_TIMER2_CMP0__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP0__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP1
+#define GFX_IMU_TIMER2_CMP1__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP1__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_CMP3
+#define GFX_IMU_TIMER2_CMP3__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_CMP3__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_TIMER2_VALUE
+#define GFX_IMU_TIMER2_VALUE__VALUE__SHIFT 0x0
+#define GFX_IMU_TIMER2_VALUE__VALUE_MASK 0xFFFFFFFFL
+//GFX_IMU_FUSE_CTRL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR__SHIFT 0x0
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN__SHIFT 0x5
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE__SHIFT 0x6
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_MASK 0x0000001FL
+#define GFX_IMU_FUSE_CTRL__DIV_OVR_EN_MASK 0x00000020L
+#define GFX_IMU_FUSE_CTRL__FORCE_DONE_MASK 0x00000040L
+//GFX_IMU_D_RAM_ADDR
+#define GFX_IMU_D_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_D_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_D_RAM_DATA
+#define GFX_IMU_D_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_D_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+//GFX_IMU_GFX_IH_GASKET_CTRL
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB__SHIFT 0x0
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL__SHIFT 0x10
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW__SHIFT 0x14
+#define GFX_IMU_GFX_IH_GASKET_CTRL__SRSTB_MASK 0x00000001L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_LEVEL_MASK 0x000F0000L
+#define GFX_IMU_GFX_IH_GASKET_CTRL__BUFFER_OVERFLOW_MASK 0x00100000L
+
+
+// addressBlock: gc_gfx_imu_gfx_imu_pspdec
+//GFX_IMU_RLC_BOOTLOADER_ADDR_HI
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_HI__ADDR_HI_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_ADDR_LO
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_ADDR_LO__ADDR_LO_MASK 0xFFFFFFFFL
+//GFX_IMU_RLC_BOOTLOADER_SIZE
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE__SHIFT 0x0
+#define GFX_IMU_RLC_BOOTLOADER_SIZE__SIZE_MASK 0x03FFFFFFL
+//GFX_IMU_I_RAM_ADDR
+#define GFX_IMU_I_RAM_ADDR__ADDR__SHIFT 0x2
+#define GFX_IMU_I_RAM_ADDR__ADDR_MASK 0x0000FFFCL
+//GFX_IMU_I_RAM_DATA
+#define GFX_IMU_I_RAM_DATA__DATA__SHIFT 0x0
+#define GFX_IMU_I_RAM_DATA__DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: gccacind
+//GC_CAC_ID
+#define GC_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define GC_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define GC_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define GC_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//GC_CAC_CNTL
+#define GC_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define GC_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+//GC_CAC_ACC_CP0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP1
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CP2
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CP2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA1
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA2
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA3
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA4
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_EA5
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_EA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER1
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER2
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER3
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER4
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER5
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER6
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER7
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER8
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ROUTER9
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ROUTER9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML20
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML21
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML22
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML23
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_VML24
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_VML24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER1
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER2
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER3
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_WALKER4
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_WALKER4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS1
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS2
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS3
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GDS4
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GDS4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE1
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE2
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE3
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE4
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE5
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE6
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE7
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE8
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE9
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE10
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE11
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE12
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE12__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE13
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE13__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE14
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE14__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE15
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE15__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE16
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE16__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE17
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE17__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE18
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE18__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE19
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE19__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GE20
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GE20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PMM0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PMM0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C1
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C2
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C3
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GL2C4
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GL2C4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH1
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH2
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH3
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH4
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH5
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH6
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_PH7
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_PH7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA1
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA2
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA3
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA3__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA4
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA4__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA5
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA5__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA6
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA6__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA7
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA7__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA8
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA8__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA9
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA9__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA10
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA10__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_SDMA11
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_SDMA11__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC1
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_CHC2
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_CHC2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS1
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS1__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_GUS2
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_GUS2__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_RLC0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_RLC0__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL20
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL20__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL21
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL21__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL22
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL22__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL23
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL23__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//GC_CAC_ACC_UTCL2_ATCL24
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0__SHIFT 0x0
+#define GC_CAC_ACC_UTCL2_ATCL24__ACCUMULATOR_31_0_MASK 0xFFFFFFFFL
+//RELEASE_TO_STALL_LUT_1_8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_9_16
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//RELEASE_TO_STALL_LUT_17_20
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//STALL_TO_RELEASE_LUT_1_4
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//STALL_TO_RELEASE_LUT_5_7
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//STALL_TO_PWRBRK_LUT_1_4
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_1_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_2_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_3_MASK 0x00070000L
+#define STALL_TO_PWRBRK_LUT_1_4__FIRST_PATTERN_4_MASK 0x07000000L
+//STALL_TO_PWRBRK_LUT_5_7
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_5_MASK 0x00000007L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_6_MASK 0x00000700L
+#define STALL_TO_PWRBRK_LUT_5_7__FIRST_PATTERN_7_MASK 0x00070000L
+//PWRBRK_STALL_TO_RELEASE_LUT_1_4
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4__SHIFT 0x18
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_1_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_2_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_3_MASK 0x001F0000L
+#define PWRBRK_STALL_TO_RELEASE_LUT_1_4__FIRST_PATTERN_4_MASK 0x1F000000L
+//PWRBRK_STALL_TO_RELEASE_LUT_5_7
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5__SHIFT 0x0
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6__SHIFT 0x8
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7__SHIFT 0x10
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_5_MASK 0x0000001FL
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_6_MASK 0x00001F00L
+#define PWRBRK_STALL_TO_RELEASE_LUT_5_7__FIRST_PATTERN_7_MASK 0x001F0000L
+//PWRBRK_RELEASE_TO_STALL_LUT_1_8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_1_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_2_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_3_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_4_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_5_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_6_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_7_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_1_8__FIRST_PATTERN_8_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_9_16
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13__SHIFT 0x10
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14__SHIFT 0x14
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15__SHIFT 0x18
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16__SHIFT 0x1c
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_9_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_10_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_11_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_12_MASK 0x00007000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_13_MASK 0x00070000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_14_MASK 0x00700000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_15_MASK 0x07000000L
+#define PWRBRK_RELEASE_TO_STALL_LUT_9_16__FIRST_PATTERN_16_MASK 0x70000000L
+//PWRBRK_RELEASE_TO_STALL_LUT_17_20
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17__SHIFT 0x0
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18__SHIFT 0x4
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19__SHIFT 0x8
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20__SHIFT 0xc
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_17_MASK 0x00000007L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_18_MASK 0x00000070L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_19_MASK 0x00000700L
+#define PWRBRK_RELEASE_TO_STALL_LUT_17_20__FIRST_PATTERN_20_MASK 0x00007000L
+//FIXED_PATTERN_PERF_COUNTER_1
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_1__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_2
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_2__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_3
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_3__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_4
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_4__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_5
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_5__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_6
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_6__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_7
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_7__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_8
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_8__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_9
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_9__PERF_COUNTER_MASK 0x0001FFFFL
+//FIXED_PATTERN_PERF_COUNTER_10
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER__SHIFT 0x0
+#define FIXED_PATTERN_PERF_COUNTER_10__PERF_COUNTER_MASK 0x0001FFFFL
+//HW_LUT_UPDATE_STATUS
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE__SHIFT 0x0
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR__SHIFT 0x1
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP__SHIFT 0x2
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE__SHIFT 0x5
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR__SHIFT 0x6
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP__SHIFT 0x7
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE__SHIFT 0xa
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR__SHIFT 0xb
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP__SHIFT 0xc
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE__SHIFT 0x11
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR__SHIFT 0x12
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP__SHIFT 0x13
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE__SHIFT 0x16
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR__SHIFT 0x17
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP__SHIFT 0x18
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_DONE_MASK 0x00000001L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_MASK 0x00000002L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_1_ERROR_STEP_MASK 0x0000001CL
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_DONE_MASK 0x00000020L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_MASK 0x00000040L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_2_ERROR_STEP_MASK 0x00000380L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_DONE_MASK 0x00000400L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_MASK 0x00000800L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_3_ERROR_STEP_MASK 0x0001F000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_DONE_MASK 0x00020000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_MASK 0x00040000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_4_ERROR_STEP_MASK 0x00380000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_DONE_MASK 0x00400000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_MASK 0x00800000L
+#define HW_LUT_UPDATE_STATUS__UPDATE_TABLE_5_ERROR_STEP_MASK 0x1F000000L
+
+
+// addressBlock: secacind
+//SE_CAC_ID
+#define SE_CAC_ID__CAC_BLOCK_ID__SHIFT 0x0
+#define SE_CAC_ID__CAC_SIGNAL_ID__SHIFT 0x6
+#define SE_CAC_ID__CAC_BLOCK_ID_MASK 0x0000003FL
+#define SE_CAC_ID__CAC_SIGNAL_ID_MASK 0x00003FC0L
+//SE_CAC_CNTL
+#define SE_CAC_CNTL__CAC_THRESHOLD__SHIFT 0x0
+#define SE_CAC_CNTL__CAC_THRESHOLD_MASK 0x0000FFFFL
+
+
+// addressBlock: grtavfsind
+//RTAVFS_REG0
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT__SHIFT 0x0
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT__SHIFT 0x10
+#define RTAVFS_REG0__RTAVFSZONE0STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG0__RTAVFSZONE0STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG1
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT__SHIFT 0x0
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT__SHIFT 0x10
+#define RTAVFS_REG1__RTAVFSZONE1STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG1__RTAVFSZONE1STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG2
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT__SHIFT 0x0
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT__SHIFT 0x10
+#define RTAVFS_REG2__RTAVFSZONE2STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG2__RTAVFSZONE2STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG3
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT__SHIFT 0x0
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT__SHIFT 0x10
+#define RTAVFS_REG3__RTAVFSZONE3STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG3__RTAVFSZONE3STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG4
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT__SHIFT 0x0
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT__SHIFT 0x10
+#define RTAVFS_REG4__RTAVFSZONE4STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG4__RTAVFSZONE4STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG5
+#define RTAVFS_REG5__RTAVFSZONE0EN0__SHIFT 0x0
+#define RTAVFS_REG5__RTAVFSZONE0EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG6
+#define RTAVFS_REG6__RTAVFSZONE0EN1__SHIFT 0x0
+#define RTAVFS_REG6__RTAVFSZONE0EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG7
+#define RTAVFS_REG7__RTAVFSZONE1EN0__SHIFT 0x0
+#define RTAVFS_REG7__RTAVFSZONE1EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG8
+#define RTAVFS_REG8__RTAVFSZONE1EN1__SHIFT 0x0
+#define RTAVFS_REG8__RTAVFSZONE1EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG9
+#define RTAVFS_REG9__RTAVFSZONE2EN0__SHIFT 0x0
+#define RTAVFS_REG9__RTAVFSZONE2EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG10
+#define RTAVFS_REG10__RTAVFSZONE2EN1__SHIFT 0x0
+#define RTAVFS_REG10__RTAVFSZONE2EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG11
+#define RTAVFS_REG11__RTAVFSZONE3EN0__SHIFT 0x0
+#define RTAVFS_REG11__RTAVFSZONE3EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG12
+#define RTAVFS_REG12__RTAVFSZONE3EN1__SHIFT 0x0
+#define RTAVFS_REG12__RTAVFSZONE3EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG13
+#define RTAVFS_REG13__RTAVFSZONE4EN0__SHIFT 0x0
+#define RTAVFS_REG13__RTAVFSZONE4EN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG14
+#define RTAVFS_REG14__RTAVFSZONE4EN1__SHIFT 0x0
+#define RTAVFS_REG14__RTAVFSZONE4EN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG15
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG15__RTAVFSVF0FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG15__RTAVFSVF0VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG16
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG16__RTAVFSVF1FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG16__RTAVFSVF1VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG17
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG17__RTAVFSVF2FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG17__RTAVFSVF2VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG18
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT__SHIFT 0x0
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE__SHIFT 0x10
+#define RTAVFS_REG18__RTAVFSVF3FREQCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG18__RTAVFSVF3VOLTCODE_MASK 0xFFFF0000L
+//RTAVFS_REG19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0__SHIFT 0x0
+#define RTAVFS_REG19__RTAVFSGB_ZONE1__SHIFT 0x6
+#define RTAVFS_REG19__RTAVFSGB_ZONE2__SHIFT 0xc
+#define RTAVFS_REG19__RTAVFSGB_ZONE3__SHIFT 0x12
+#define RTAVFS_REG19__RTAVFSGB_ZONE4__SHIFT 0x19
+#define RTAVFS_REG19__RTAVFSGB_ZONE0_MASK 0x0000003FL
+#define RTAVFS_REG19__RTAVFSGB_ZONE1_MASK 0x00000FC0L
+#define RTAVFS_REG19__RTAVFSGB_ZONE2_MASK 0x0003F000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE3_MASK 0x01FC0000L
+#define RTAVFS_REG19__RTAVFSGB_ZONE4_MASK 0xFE000000L
+//RTAVFS_REG20
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED__SHIFT 0x12
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG20__RTAVFSZONE0CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG20__RTAVFSZONE0RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG21
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED__SHIFT 0x12
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG21__RTAVFSZONE1CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG21__RTAVFSZONE1RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG22
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED__SHIFT 0x12
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG22__RTAVFSZONE2CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG22__RTAVFSZONE2RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG23
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED__SHIFT 0x12
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG23__RTAVFSZONE3CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG23__RTAVFSZONE3RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG24
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED__SHIFT 0x12
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG24__RTAVFSZONE4CPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG24__RTAVFSZONE4RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG25
+#define RTAVFS_REG25__RTAVFSRESERVED0__SHIFT 0x0
+#define RTAVFS_REG25__RTAVFSRESERVED0_MASK 0xFFFFFFFFL
+//RTAVFS_REG26
+#define RTAVFS_REG26__RTAVFSRESERVED1__SHIFT 0x0
+#define RTAVFS_REG26__RTAVFSRESERVED1_MASK 0xFFFFFFFFL
+//RTAVFS_REG27
+#define RTAVFS_REG27__RTAVFSRESERVED2__SHIFT 0x0
+#define RTAVFS_REG27__RTAVFSRESERVED2_MASK 0xFFFFFFFFL
+//RTAVFS_REG28
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG28__RTAVFSZONE0INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG28__RTAVFSZONE1INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG29
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT__SHIFT 0x10
+#define RTAVFS_REG29__RTAVFSZONE2INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG29__RTAVFSZONE3INTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG30
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT__SHIFT 0x0
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT__SHIFT 0x10
+#define RTAVFS_REG30__RTAVFSZONE4INTERCEPT_MASK 0x0000FFFFL
+#define RTAVFS_REG30__RTAVFSRESERVEDINTERCEPT_MASK 0xFFFF0000L
+//RTAVFS_REG31
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0__SHIFT 0x0
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1__SHIFT 0x2
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2__SHIFT 0x4
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3__SHIFT 0x6
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4__SHIFT 0x8
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5__SHIFT 0xa
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6__SHIFT 0xc
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7__SHIFT 0xe
+#define RTAVFS_REG31__RESERVED__SHIFT 0x10
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV0_MASK 0x00000003L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV1_MASK 0x0000000CL
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV2_MASK 0x00000030L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV3_MASK 0x000000C0L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV4_MASK 0x00000300L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV5_MASK 0x00000C00L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV6_MASK 0x00003000L
+#define RTAVFS_REG31__RTAVFSCPOCLKDIV7_MASK 0x0000C000L
+#define RTAVFS_REG31__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG32
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT__SHIFT 0x0
+#define RTAVFS_REG32__RESERVED__SHIFT 0x10
+#define RTAVFS_REG32__RTAVFSFSMSTARTUPCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG32__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG33
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT__SHIFT 0x0
+#define RTAVFS_REG33__RESERVED__SHIFT 0x10
+#define RTAVFS_REG33__RTAVFSFSMIDLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG33__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG34
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG34__RESERVED__SHIFT 0x10
+#define RTAVFS_REG34__RTAVFSFSMRESETCPORIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG34__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG35
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG35__RESERVED__SHIFT 0x10
+#define RTAVFS_REG35__RTAVFSFSMSTARTCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG35__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG36
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT__SHIFT 0x0
+#define RTAVFS_REG36__RESERVED__SHIFT 0x10
+#define RTAVFS_REG36__RTAVFSFSMSTARTRIPPLECOUNTERSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG36__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG37
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT__SHIFT 0x0
+#define RTAVFS_REG37__RESERVED__SHIFT 0x10
+#define RTAVFS_REG37__RTAVFSFSMRIPPLECOUNTERSDONECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG37__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG38
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT__SHIFT 0x0
+#define RTAVFS_REG38__RESERVED__SHIFT 0x10
+#define RTAVFS_REG38__RTAVFSFSMCPOFINALRESULTREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG38__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG39
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG39__RESERVED__SHIFT 0x10
+#define RTAVFS_REG39__RTAVFSFSMVOLTCODEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG39__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG40
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT__SHIFT 0x0
+#define RTAVFS_REG40__RESERVED__SHIFT 0x10
+#define RTAVFS_REG40__RTAVFSFSMTARGETVOLTAGEREADYCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG40__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG41
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT__SHIFT 0x0
+#define RTAVFS_REG41__RESERVED__SHIFT 0x10
+#define RTAVFS_REG41__RTAVFSFSMSTOPCPOSCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG41__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG42
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT__SHIFT 0x0
+#define RTAVFS_REG42__RESERVED__SHIFT 0x10
+#define RTAVFS_REG42__RTAVFSFSMWAITFORACKCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG42__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG43
+#define RTAVFS_REG43__RTAVFSKP0__SHIFT 0x0
+#define RTAVFS_REG43__RTAVFSKP1__SHIFT 0x4
+#define RTAVFS_REG43__RTAVFSKP2__SHIFT 0x8
+#define RTAVFS_REG43__RTAVFSKP3__SHIFT 0xc
+#define RTAVFS_REG43__RTAVFSKI0__SHIFT 0x10
+#define RTAVFS_REG43__RTAVFSKI1__SHIFT 0x14
+#define RTAVFS_REG43__RTAVFSKI2__SHIFT 0x18
+#define RTAVFS_REG43__RTAVFSKI3__SHIFT 0x1c
+#define RTAVFS_REG43__RTAVFSKP0_MASK 0x0000000FL
+#define RTAVFS_REG43__RTAVFSKP1_MASK 0x000000F0L
+#define RTAVFS_REG43__RTAVFSKP2_MASK 0x00000F00L
+#define RTAVFS_REG43__RTAVFSKP3_MASK 0x0000F000L
+#define RTAVFS_REG43__RTAVFSKI0_MASK 0x000F0000L
+#define RTAVFS_REG43__RTAVFSKI1_MASK 0x00F00000L
+#define RTAVFS_REG43__RTAVFSKI2_MASK 0x0F000000L
+#define RTAVFS_REG43__RTAVFSKI3_MASK 0xF0000000L
+//RTAVFS_REG44
+#define RTAVFS_REG44__RTAVFSV1__SHIFT 0x0
+#define RTAVFS_REG44__RTAVFSV2__SHIFT 0xa
+#define RTAVFS_REG44__RTAVFSV3__SHIFT 0x14
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH__SHIFT 0x1e
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL__SHIFT 0x1f
+#define RTAVFS_REG44__RTAVFSV1_MASK 0x000003FFL
+#define RTAVFS_REG44__RTAVFSV2_MASK 0x000FFC00L
+#define RTAVFS_REG44__RTAVFSV3_MASK 0x3FF00000L
+#define RTAVFS_REG44__RTAVFSUSEBINARYSEARCH_MASK 0x40000000L
+#define RTAVFS_REG44__RTAVFSVOLTCODEHWCAL_MASK 0x80000000L
+//RTAVFS_REG45
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL__SHIFT 0x0
+#define RTAVFS_REG45__RTAVFSVRENABLE__SHIFT 0x1
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE__SHIFT 0x2
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL__SHIFT 0xc
+#define RTAVFS_REG45__RTAVFSLOWPWREN__SHIFT 0xd
+#define RTAVFS_REG45__RTAVFSUREGENABLE__SHIFT 0xe
+#define RTAVFS_REG45__RTAVFSBGENABLE__SHIFT 0xf
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING__SHIFT 0x10
+#define RTAVFS_REG45__RESERVED__SHIFT 0x11
+#define RTAVFS_REG45__RTAVFSVRBLEEDCNTRL_MASK 0x00000001L
+#define RTAVFS_REG45__RTAVFSVRENABLE_MASK 0x00000002L
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDE_MASK 0x00000FFCL
+#define RTAVFS_REG45__RTAVFSVOLTCODEOVERRIDESEL_MASK 0x00001000L
+#define RTAVFS_REG45__RTAVFSLOWPWREN_MASK 0x00002000L
+#define RTAVFS_REG45__RTAVFSUREGENABLE_MASK 0x00004000L
+#define RTAVFS_REG45__RTAVFSBGENABLE_MASK 0x00008000L
+#define RTAVFS_REG45__RTAVFSENABLEVDDRETSENSING_MASK 0x00010000L
+#define RTAVFS_REG45__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG46
+#define RTAVFS_REG46__RTAVFSKP__SHIFT 0x0
+#define RTAVFS_REG46__RTAVFSKI__SHIFT 0x4
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP__SHIFT 0x8
+#define RTAVFS_REG46__RTAVFSPISHIFT__SHIFT 0x9
+#define RTAVFS_REG46__RTAVFSPIERREN__SHIFT 0xd
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT__SHIFT 0xe
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI__SHIFT 0x12
+#define RTAVFS_REG46__RESERVED__SHIFT 0x13
+#define RTAVFS_REG46__RTAVFSKP_MASK 0x0000000FL
+#define RTAVFS_REG46__RTAVFSKI_MASK 0x000000F0L
+#define RTAVFS_REG46__RTAVFSPIENABLEANTIWINDUP_MASK 0x00000100L
+#define RTAVFS_REG46__RTAVFSPISHIFT_MASK 0x00001E00L
+#define RTAVFS_REG46__RTAVFSPIERREN_MASK 0x00002000L
+#define RTAVFS_REG46__RTAVFSPISHIFTOUT_MASK 0x0003C000L
+#define RTAVFS_REG46__RTAVFSUSELUTKPKI_MASK 0x00040000L
+#define RTAVFS_REG46__RESERVED_MASK 0xFFF80000L
+//RTAVFS_REG47
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN__SHIFT 0x0
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX__SHIFT 0xa
+#define RTAVFS_REG47__RTAVFSPIERRMASK__SHIFT 0x14
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI__SHIFT 0x1b
+#define RTAVFS_REG47__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMIN_MASK 0x000003FFL
+#define RTAVFS_REG47__RTAVFSVOLTCODEPIMAX_MASK 0x000FFC00L
+#define RTAVFS_REG47__RTAVFSPIERRMASK_MASK 0x07F00000L
+#define RTAVFS_REG47__RTAVFSFORCEDISABLEPI_MASK 0x08000000L
+#define RTAVFS_REG47__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG48
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS__SHIFT 0x0
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD__SHIFT 0x10
+#define RTAVFS_REG48__RTAVFSPILOOPNITERATIONS_MASK 0x0000FFFFL
+#define RTAVFS_REG48__RTAVFSPIERRTHRESHOLD_MASK 0xFFFF0000L
+//RTAVFS_REG49
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD__SHIFT 0x0
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD__SHIFT 0x1
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD__SHIFT 0x2
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD__SHIFT 0x4
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD__SHIFT 0xa
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD__SHIFT 0xb
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD__SHIFT 0xc
+#define RTAVFS_REG49__RESERVED__SHIFT 0xd
+#define RTAVFS_REG49__RTAVFSPSMRSTAVGVDD_MASK 0x00000001L
+#define RTAVFS_REG49__RTAVFSPSMMEASMAXVDD_MASK 0x00000002L
+#define RTAVFS_REG49__RTAVFSPSMCLKDIVVDD_MASK 0x0000000CL
+#define RTAVFS_REG49__RTAVFSPSMAVGDIVVDD_MASK 0x000003F0L
+#define RTAVFS_REG49__RTAVFSPSMOSCENVDD_MASK 0x00000400L
+#define RTAVFS_REG49__RTAVFSPSMAVGENVDD_MASK 0x00000800L
+#define RTAVFS_REG49__RTAVFSPSMRSTMINMAXVDD_MASK 0x00001000L
+#define RTAVFS_REG49__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG50
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG__SHIFT 0x0
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG__SHIFT 0x1
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG__SHIFT 0x2
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG__SHIFT 0x4
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG__SHIFT 0xa
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG__SHIFT 0xb
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG__SHIFT 0xc
+#define RTAVFS_REG50__RESERVED__SHIFT 0xd
+#define RTAVFS_REG50__RTAVFSPSMRSTAVGVREG_MASK 0x00000001L
+#define RTAVFS_REG50__RTAVFSPSMMEASMAXVREG_MASK 0x00000002L
+#define RTAVFS_REG50__RTAVFSPSMCLKDIVVREG_MASK 0x0000000CL
+#define RTAVFS_REG50__RTAVFSPSMAVGDIVVREG_MASK 0x000003F0L
+#define RTAVFS_REG50__RTAVFSPSMOSCENVREG_MASK 0x00000400L
+#define RTAVFS_REG50__RTAVFSPSMAVGENVREG_MASK 0x00000800L
+#define RTAVFS_REG50__RTAVFSPSMRSTMINMAXVREG_MASK 0x00001000L
+#define RTAVFS_REG50__RESERVED_MASK 0xFFFFE000L
+//RTAVFS_REG51
+#define RTAVFS_REG51__RTAVFSAVFSENABLE__SHIFT 0x0
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY__SHIFT 0x1
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX__SHIFT 0x5
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING__SHIFT 0x6
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND__SHIFT 0x7
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT__SHIFT 0x8
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES__SHIFT 0x9
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT__SHIFT 0xa
+#define RTAVFS_REG51__RESERVED__SHIFT 0xb
+#define RTAVFS_REG51__RTAVFSAVFSENABLE_MASK 0x00000001L
+#define RTAVFS_REG51__RTAVFSCPOTURNONDELAY_MASK 0x0000001EL
+#define RTAVFS_REG51__RTAVFSSELECTMINMAX_MASK 0x00000020L
+#define RTAVFS_REG51__RTAVFSSELECTPERPATHSCALING_MASK 0x00000040L
+#define RTAVFS_REG51__RTAVFSADDVOLTCODEGUARDBAND_MASK 0x00000080L
+#define RTAVFS_REG51__RTAVFSSENDAVGPSMTOPSMOUT_MASK 0x00000100L
+#define RTAVFS_REG51__RTAVFSUPDATEANCHORVOLTAGES_MASK 0x00000200L
+#define RTAVFS_REG51__RTAVFSSENDVDDTOPSMOUT_MASK 0x00000400L
+#define RTAVFS_REG51__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG52
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD__SHIFT 0x0
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD__SHIFT 0xe
+#define RTAVFS_REG52__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG52__RTAVFSMINMAXPSMVDD_MASK 0x00003FFFL
+#define RTAVFS_REG52__RTAVFSAVGPSMVDD_MASK 0x0FFFC000L
+#define RTAVFS_REG52__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG53
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG__SHIFT 0x0
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG__SHIFT 0xe
+#define RTAVFS_REG53__RESERVED__SHIFT 0x1c
+#define RTAVFS_REG53__RTAVFSMINMAXPSMVREG_MASK 0x00003FFFL
+#define RTAVFS_REG53__RTAVFSAVGPSMVREG_MASK 0x0FFFC000L
+#define RTAVFS_REG53__RESERVED_MASK 0xF0000000L
+//RTAVFS_REG54
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG54__RTAVFSCPO0_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG54__RTAVFSCPO0_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG55
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG55__RTAVFSCPO1_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG55__RTAVFSCPO1_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG56
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG56__RTAVFSCPO2_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG56__RTAVFSCPO2_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG57
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG57__RTAVFSCPO3_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG57__RTAVFSCPO3_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG58
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG58__RTAVFSCPO4_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG58__RTAVFSCPO4_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG59
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG59__RTAVFSCPO5_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG59__RTAVFSCPO5_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG60
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG60__RTAVFSCPO6_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG60__RTAVFSCPO6_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG61
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG61__RTAVFSCPO7_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG61__RTAVFSCPO7_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG62
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG62__RTAVFSCPO8_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG62__RTAVFSCPO8_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG63
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG63__RTAVFSCPO9_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG63__RTAVFSCPO9_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG64
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG64__RTAVFSCPO10_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG64__RTAVFSCPO10_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG65
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG65__RTAVFSCPO11_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG65__RTAVFSCPO11_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG66
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG66__RTAVFSCPO12_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG66__RTAVFSCPO12_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG67
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG67__RTAVFSCPO13_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG67__RTAVFSCPO13_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG68
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG68__RTAVFSCPO14_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG68__RTAVFSCPO14_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG69
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG69__RTAVFSCPO15_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG69__RTAVFSCPO15_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG70
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG70__RTAVFSCPO16_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG70__RTAVFSCPO16_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG71
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG71__RTAVFSCPO17_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG71__RTAVFSCPO17_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG72
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG72__RTAVFSCPO18_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG72__RTAVFSCPO18_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG73
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG73__RTAVFSCPO19_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG73__RTAVFSCPO19_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG74
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG74__RTAVFSCPO20_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG74__RTAVFSCPO20_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG75
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG75__RTAVFSCPO21_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG75__RTAVFSCPO21_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG76
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG76__RTAVFSCPO22_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG76__RTAVFSCPO22_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG77
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG77__RTAVFSCPO23_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG77__RTAVFSCPO23_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG78
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG78__RTAVFSCPO24_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG78__RTAVFSCPO24_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG79
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG79__RTAVFSCPO25_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG79__RTAVFSCPO25_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG80
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG80__RTAVFSCPO26_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG80__RTAVFSCPO26_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG81
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG81__RTAVFSCPO27_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG81__RTAVFSCPO27_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG82
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG82__RTAVFSCPO28_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG82__RTAVFSCPO28_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG83
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG83__RTAVFSCPO29_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG83__RTAVFSCPO29_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG84
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG84__RTAVFSCPO30_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG84__RTAVFSCPO30_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG85
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG85__RTAVFSCPO31_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG85__RTAVFSCPO31_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG86
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG86__RTAVFSCPO32_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG86__RTAVFSCPO32_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG87
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG87__RTAVFSCPO33_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG87__RTAVFSCPO33_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG88
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG88__RTAVFSCPO34_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG88__RTAVFSCPO34_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG89
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG89__RTAVFSCPO35_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG89__RTAVFSCPO35_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG90
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG90__RTAVFSCPO36_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG90__RTAVFSCPO36_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG91
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG91__RTAVFSCPO37_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG91__RTAVFSCPO37_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG92
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG92__RTAVFSCPO38_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG92__RTAVFSCPO38_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG93
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG93__RTAVFSCPO39_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG93__RTAVFSCPO39_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG94
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG94__RTAVFSCPO40_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG94__RTAVFSCPO40_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG95
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG95__RTAVFSCPO41_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG95__RTAVFSCPO41_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG96
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG96__RTAVFSCPO42_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG96__RTAVFSCPO42_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG97
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG97__RTAVFSCPO43_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG97__RTAVFSCPO43_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG98
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG98__RTAVFSCPO44_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG98__RTAVFSCPO44_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG99
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG99__RTAVFSCPO45_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG99__RTAVFSCPO45_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG100
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG100__RTAVFSCPO46_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG100__RTAVFSCPO46_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG101
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG101__RTAVFSCPO47_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG101__RTAVFSCPO47_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG102
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG102__RTAVFSCPO48_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG102__RTAVFSCPO48_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG103
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG103__RTAVFSCPO49_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG103__RTAVFSCPO49_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG104
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG104__RTAVFSCPO50_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG104__RTAVFSCPO50_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG105
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG105__RTAVFSCPO51_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG105__RTAVFSCPO51_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG106
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG106__RTAVFSCPO52_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG106__RTAVFSCPO52_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG107
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG107__RTAVFSCPO53_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG107__RTAVFSCPO53_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG108
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG108__RTAVFSCPO54_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG108__RTAVFSCPO54_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG109
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG109__RTAVFSCPO55_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG109__RTAVFSCPO55_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG110
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG110__RTAVFSCPO56_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG110__RTAVFSCPO56_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG111
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG111__RTAVFSCPO57_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG111__RTAVFSCPO57_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG112
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG112__RTAVFSCPO58_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG112__RTAVFSCPO58_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG113
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG113__RTAVFSCPO59_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG113__RTAVFSCPO59_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG114
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG114__RTAVFSCPO60_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG114__RTAVFSCPO60_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG115
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG115__RTAVFSCPO61_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG115__RTAVFSCPO61_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG116
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG116__RTAVFSCPO62_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG116__RTAVFSCPO62_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG117
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT__SHIFT 0x0
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT__SHIFT 0x10
+#define RTAVFS_REG117__RTAVFSCPO63_STARTCNT_MASK 0x0000FFFFL
+#define RTAVFS_REG117__RTAVFSCPO63_STOPCNT_MASK 0xFFFF0000L
+//RTAVFS_REG118
+#define RTAVFS_REG118__RTAVFSCPOEN0__SHIFT 0x0
+#define RTAVFS_REG118__RTAVFSCPOEN0_MASK 0xFFFFFFFFL
+//RTAVFS_REG119
+#define RTAVFS_REG119__RTAVFSCPOEN1__SHIFT 0x0
+#define RTAVFS_REG119__RTAVFSCPOEN1_MASK 0xFFFFFFFFL
+//RTAVFS_REG120
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0__SHIFT 0x0
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1__SHIFT 0x2
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2__SHIFT 0x4
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3__SHIFT 0x6
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4__SHIFT 0x8
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5__SHIFT 0xa
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6__SHIFT 0xc
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7__SHIFT 0xe
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL__SHIFT 0x10
+#define RTAVFS_REG120__RESERVED__SHIFT 0x12
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV0_MASK 0x00000003L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV1_MASK 0x0000000CL
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV2_MASK 0x00000030L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV3_MASK 0x000000C0L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV4_MASK 0x00000300L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV5_MASK 0x00000C00L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV6_MASK 0x00003000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIV7_MASK 0x0000C000L
+#define RTAVFS_REG120__RTAVFSCPOAVGDIVFINAL_MASK 0x00030000L
+#define RTAVFS_REG120__RESERVED_MASK 0xFFFC0000L
+//RTAVFS_REG121
+#define RTAVFS_REG121__RTAVFSZONE0INUSE__SHIFT 0x0
+#define RTAVFS_REG121__RTAVFSZONE1INUSE__SHIFT 0x1
+#define RTAVFS_REG121__RTAVFSZONE2INUSE__SHIFT 0x2
+#define RTAVFS_REG121__RTAVFSZONE3INUSE__SHIFT 0x3
+#define RTAVFS_REG121__RTAVFSZONE4INUSE__SHIFT 0x4
+#define RTAVFS_REG121__RTAVFSRESERVED__SHIFT 0x5
+#define RTAVFS_REG121__RTAVFSERRORCODE__SHIFT 0x1c
+#define RTAVFS_REG121__RTAVFSZONE0INUSE_MASK 0x00000001L
+#define RTAVFS_REG121__RTAVFSZONE1INUSE_MASK 0x00000002L
+#define RTAVFS_REG121__RTAVFSZONE2INUSE_MASK 0x00000004L
+#define RTAVFS_REG121__RTAVFSZONE3INUSE_MASK 0x00000008L
+#define RTAVFS_REG121__RTAVFSZONE4INUSE_MASK 0x00000010L
+#define RTAVFS_REG121__RTAVFSRESERVED_MASK 0x0FFFFFE0L
+#define RTAVFS_REG121__RTAVFSERRORCODE_MASK 0xF0000000L
+//RTAVFS_REG122
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG122__RESERVED__SHIFT 0x10
+#define RTAVFS_REG122__RTAVFSCPO0_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG122__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG123
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG123__RESERVED__SHIFT 0x10
+#define RTAVFS_REG123__RTAVFSCPO1_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG123__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG124
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG124__RESERVED__SHIFT 0x10
+#define RTAVFS_REG124__RTAVFSCPO2_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG124__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG125
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG125__RESERVED__SHIFT 0x10
+#define RTAVFS_REG125__RTAVFSCPO3_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG125__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG126
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG126__RESERVED__SHIFT 0x10
+#define RTAVFS_REG126__RTAVFSCPO4_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG126__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG127
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG127__RESERVED__SHIFT 0x10
+#define RTAVFS_REG127__RTAVFSCPO5_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG127__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG128
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG128__RESERVED__SHIFT 0x10
+#define RTAVFS_REG128__RTAVFSCPO6_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG128__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG129
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG129__RESERVED__SHIFT 0x10
+#define RTAVFS_REG129__RTAVFSCPO7_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG129__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG130
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG130__RESERVED__SHIFT 0x10
+#define RTAVFS_REG130__RTAVFSCPO8_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG130__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG131
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG131__RESERVED__SHIFT 0x10
+#define RTAVFS_REG131__RTAVFSCPO9_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG131__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG132
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG132__RESERVED__SHIFT 0x10
+#define RTAVFS_REG132__RTAVFSCPO10_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG132__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG133
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG133__RESERVED__SHIFT 0x10
+#define RTAVFS_REG133__RTAVFSCPO11_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG133__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG134
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG134__RESERVED__SHIFT 0x10
+#define RTAVFS_REG134__RTAVFSCPO12_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG134__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG135
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG135__RESERVED__SHIFT 0x10
+#define RTAVFS_REG135__RTAVFSCPO13_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG135__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG136
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG136__RESERVED__SHIFT 0x10
+#define RTAVFS_REG136__RTAVFSCPO14_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG136__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG137
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG137__RESERVED__SHIFT 0x10
+#define RTAVFS_REG137__RTAVFSCPO15_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG137__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG138
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG138__RESERVED__SHIFT 0x10
+#define RTAVFS_REG138__RTAVFSCPO16_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG138__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG139
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG139__RESERVED__SHIFT 0x10
+#define RTAVFS_REG139__RTAVFSCPO17_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG139__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG140
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG140__RESERVED__SHIFT 0x10
+#define RTAVFS_REG140__RTAVFSCPO18_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG140__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG141
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG141__RESERVED__SHIFT 0x10
+#define RTAVFS_REG141__RTAVFSCPO19_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG141__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG142
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG142__RESERVED__SHIFT 0x10
+#define RTAVFS_REG142__RTAVFSCPO20_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG142__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG143
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG143__RESERVED__SHIFT 0x10
+#define RTAVFS_REG143__RTAVFSCPO21_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG143__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG144
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG144__RESERVED__SHIFT 0x10
+#define RTAVFS_REG144__RTAVFSCPO22_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG144__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG145
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG145__RESERVED__SHIFT 0x10
+#define RTAVFS_REG145__RTAVFSCPO23_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG145__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG146
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG146__RESERVED__SHIFT 0x10
+#define RTAVFS_REG146__RTAVFSCPO24_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG146__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG147
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG147__RESERVED__SHIFT 0x10
+#define RTAVFS_REG147__RTAVFSCPO25_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG147__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG148
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG148__RESERVED__SHIFT 0x10
+#define RTAVFS_REG148__RTAVFSCPO26_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG148__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG149
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG149__RESERVED__SHIFT 0x10
+#define RTAVFS_REG149__RTAVFSCPO27_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG149__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG150
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG150__RESERVED__SHIFT 0x10
+#define RTAVFS_REG150__RTAVFSCPO28_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG150__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG151
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG151__RESERVED__SHIFT 0x10
+#define RTAVFS_REG151__RTAVFSCPO29_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG151__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG152
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG152__RESERVED__SHIFT 0x10
+#define RTAVFS_REG152__RTAVFSCPO30_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG152__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG153
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG153__RESERVED__SHIFT 0x10
+#define RTAVFS_REG153__RTAVFSCPO31_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG153__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG154
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG154__RESERVED__SHIFT 0x10
+#define RTAVFS_REG154__RTAVFSCPO32_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG154__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG155
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG155__RESERVED__SHIFT 0x10
+#define RTAVFS_REG155__RTAVFSCPO33_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG155__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG156
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG156__RESERVED__SHIFT 0x10
+#define RTAVFS_REG156__RTAVFSCPO34_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG156__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG157
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG157__RESERVED__SHIFT 0x10
+#define RTAVFS_REG157__RTAVFSCPO35_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG157__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG158
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG158__RESERVED__SHIFT 0x10
+#define RTAVFS_REG158__RTAVFSCPO36_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG158__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG159
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG159__RESERVED__SHIFT 0x10
+#define RTAVFS_REG159__RTAVFSCPO37_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG159__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG160
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG160__RESERVED__SHIFT 0x10
+#define RTAVFS_REG160__RTAVFSCPO38_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG160__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG161
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG161__RESERVED__SHIFT 0x10
+#define RTAVFS_REG161__RTAVFSCPO39_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG161__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG162
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG162__RESERVED__SHIFT 0x10
+#define RTAVFS_REG162__RTAVFSCPO40_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG162__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG163
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG163__RESERVED__SHIFT 0x10
+#define RTAVFS_REG163__RTAVFSCPO41_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG163__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG164
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG164__RESERVED__SHIFT 0x10
+#define RTAVFS_REG164__RTAVFSCPO42_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG164__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG165
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG165__RESERVED__SHIFT 0x10
+#define RTAVFS_REG165__RTAVFSCPO43_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG165__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG166
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG166__RESERVED__SHIFT 0x10
+#define RTAVFS_REG166__RTAVFSCPO44_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG166__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG167
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG167__RESERVED__SHIFT 0x10
+#define RTAVFS_REG167__RTAVFSCPO45_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG167__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG168
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG168__RESERVED__SHIFT 0x10
+#define RTAVFS_REG168__RTAVFSCPO46_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG168__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG169
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG169__RESERVED__SHIFT 0x10
+#define RTAVFS_REG169__RTAVFSCPO47_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG169__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG170
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG170__RESERVED__SHIFT 0x10
+#define RTAVFS_REG170__RTAVFSCPO48_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG170__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG171
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG171__RESERVED__SHIFT 0x10
+#define RTAVFS_REG171__RTAVFSCPO49_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG171__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG172
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG172__RESERVED__SHIFT 0x10
+#define RTAVFS_REG172__RTAVFSCPO50_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG172__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG173
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG173__RESERVED__SHIFT 0x10
+#define RTAVFS_REG173__RTAVFSCPO51_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG173__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG174
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG174__RESERVED__SHIFT 0x10
+#define RTAVFS_REG174__RTAVFSCPO52_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG174__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG175
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG175__RESERVED__SHIFT 0x10
+#define RTAVFS_REG175__RTAVFSCPO53_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG175__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG176
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG176__RESERVED__SHIFT 0x10
+#define RTAVFS_REG176__RTAVFSCPO54_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG176__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG177
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG177__RESERVED__SHIFT 0x10
+#define RTAVFS_REG177__RTAVFSCPO55_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG177__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG178
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG178__RESERVED__SHIFT 0x10
+#define RTAVFS_REG178__RTAVFSCPO56_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG178__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG179
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG179__RESERVED__SHIFT 0x10
+#define RTAVFS_REG179__RTAVFSCPO57_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG179__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG180
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG180__RESERVED__SHIFT 0x10
+#define RTAVFS_REG180__RTAVFSCPO58_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG180__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG181
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG181__RESERVED__SHIFT 0x10
+#define RTAVFS_REG181__RTAVFSCPO59_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG181__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG182
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG182__RESERVED__SHIFT 0x10
+#define RTAVFS_REG182__RTAVFSCPO60_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG182__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG183
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG183__RESERVED__SHIFT 0x10
+#define RTAVFS_REG183__RTAVFSCPO61_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG183__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG184
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG184__RESERVED__SHIFT 0x10
+#define RTAVFS_REG184__RTAVFSCPO62_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG184__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG185
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT__SHIFT 0x0
+#define RTAVFS_REG185__RESERVED__SHIFT 0x10
+#define RTAVFS_REG185__RTAVFSCPO63_RIPPLECNT_MASK 0x0000FFFFL
+#define RTAVFS_REG185__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG186
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG186__RESERVED__SHIFT 0x11
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG186__RTAVFSTARGETFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG186__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG187
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE__SHIFT 0x0
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL__SHIFT 0x10
+#define RTAVFS_REG187__RESERVED__SHIFT 0x11
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDE_MASK 0x0000FFFFL
+#define RTAVFS_REG187__RTAVFSCURRENTFREQCNTOVERRIDESEL_MASK 0x00010000L
+#define RTAVFS_REG187__RESERVED_MASK 0xFFFE0000L
+//RTAVFS_REG189
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI__SHIFT 0x0
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH__SHIFT 0xa
+#define RTAVFS_REG189__RTAVFSVDDREGON__SHIFT 0x14
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET__SHIFT 0x15
+#define RTAVFS_REG189__RESERVED__SHIFT 0x16
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMPI_MASK 0x000003FFL
+#define RTAVFS_REG189__RTAVFSVOLTCODEFROMBINARYSEARCH_MASK 0x000FFC00L
+#define RTAVFS_REG189__RTAVFSVDDREGON_MASK 0x00100000L
+#define RTAVFS_REG189__RTAVFSVDDABOVEVDDRET_MASK 0x00200000L
+#define RTAVFS_REG189__RESERVED_MASK 0xFFC00000L
+//RTAVFS_REG190
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ__SHIFT 0x0
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL__SHIFT 0x1
+#define RTAVFS_REG190__RTAVFSRUNLOOP__SHIFT 0x6
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS__SHIFT 0x7
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS__SHIFT 0x8
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS__SHIFT 0x9
+#define RTAVFS_REG190__RESERVED__SHIFT 0xa
+#define RTAVFS_REG190__RTAVFSIGNORERLCREQ_MASK 0x00000001L
+#define RTAVFS_REG190__RTAVFSRIPPLECOUNTEROUTSEL_MASK 0x0000003EL
+#define RTAVFS_REG190__RTAVFSRUNLOOP_MASK 0x00000040L
+#define RTAVFS_REG190__RTAVFSSAVECPOWEIGHTS_MASK 0x00000080L
+#define RTAVFS_REG190__RTAVFSRESTORECPOWEIGHTS_MASK 0x00000100L
+#define RTAVFS_REG190__RTAVFSRESETRETENTIONREGS_MASK 0x00000200L
+#define RTAVFS_REG190__RESERVED_MASK 0xFFFFFC00L
+//RTAVFS_REG191
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP__SHIFT 0x0
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE__SHIFT 0x1
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS__SHIFT 0x2
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS__SHIFT 0x3
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS__SHIFT 0x4
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE__SHIFT 0x5
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY__SHIFT 0x6
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY__SHIFT 0x7
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY__SHIFT 0x8
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS__SHIFT 0x9
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK__SHIFT 0xa
+#define RTAVFS_REG191__RESERVED__SHIFT 0xb
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTUP_MASK 0x00000001L
+#define RTAVFS_REG191__RTAVFSSTOPATIDLE_MASK 0x00000002L
+#define RTAVFS_REG191__RTAVFSSTOPATRESETCPORIPPLECOUNTERS_MASK 0x00000004L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTCPOS_MASK 0x00000008L
+#define RTAVFS_REG191__RTAVFSSTOPATSTARTRIPPLECOUNTERS_MASK 0x00000010L
+#define RTAVFS_REG191__RTAVFSSTOPATRIPPLECOUNTERSDONE_MASK 0x00000020L
+#define RTAVFS_REG191__RTAVFSSTOPATCPOFINALRESULTREADY_MASK 0x00000040L
+#define RTAVFS_REG191__RTAVFSSTOPATVOLTCODEREADY_MASK 0x00000080L
+#define RTAVFS_REG191__RTAVFSSTOPATTARGETVOLATGEREADY_MASK 0x00000100L
+#define RTAVFS_REG191__RTAVFSSTOPATSTOPCPOS_MASK 0x00000200L
+#define RTAVFS_REG191__RTAVFSSTOPATWAITFORACK_MASK 0x00000400L
+#define RTAVFS_REG191__RESERVED_MASK 0xFFFFF800L
+//RTAVFS_REG192
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT__SHIFT 0x0
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT__SHIFT 0x10
+#define RTAVFS_REG192__RTAVFSAVFSSCALEDCPOCOUNT_MASK 0x0000FFFFL
+#define RTAVFS_REG192__RTAVFSAVFSFINALMINCPOCOUNT_MASK 0xFFFF0000L
+//RTAVFS_REG193
+#define RTAVFS_REG193__RTAVFSFSMSTATE__SHIFT 0x0
+#define RTAVFS_REG193__RESERVED__SHIFT 0x10
+#define RTAVFS_REG193__RTAVFSFSMSTATE_MASK 0x0000FFFFL
+#define RTAVFS_REG193__RESERVED_MASK 0xFFFF0000L
+//RTAVFS_REG194
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD__SHIFT 0x0
+#define RTAVFS_REG194__RTAVFSRIPPLECNTREAD_MASK 0xFFFFFFFFL
+
+
+// addressBlock: sqind
+//SQ_DEBUG_STS_LOCAL
+#define SQ_DEBUG_STS_LOCAL__BUSY__SHIFT 0x0
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL__SHIFT 0x4
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY__SHIFT 0xc
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY__SHIFT 0xd
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY__SHIFT 0xe
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY__SHIFT 0xf
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY__SHIFT 0x10
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY__SHIFT 0x11
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY__SHIFT 0x12
+#define SQ_DEBUG_STS_LOCAL__BUSY_MASK 0x00000001L
+#define SQ_DEBUG_STS_LOCAL__WAVE_LEVEL_MASK 0x000003F0L
+#define SQ_DEBUG_STS_LOCAL__SQ_BUSY_MASK 0x00001000L
+#define SQ_DEBUG_STS_LOCAL__IS_BUSY_MASK 0x00002000L
+#define SQ_DEBUG_STS_LOCAL__IB_BUSY_MASK 0x00004000L
+#define SQ_DEBUG_STS_LOCAL__ARB_BUSY_MASK 0x00008000L
+#define SQ_DEBUG_STS_LOCAL__EXP_BUSY_MASK 0x00010000L
+#define SQ_DEBUG_STS_LOCAL__BRMSG_BUSY_MASK 0x00020000L
+#define SQ_DEBUG_STS_LOCAL__VM_BUSY_MASK 0x00040000L
+//SQ_DEBUG_CTRL_LOCAL
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED__SHIFT 0x0
+#define SQ_DEBUG_CTRL_LOCAL__UNUSED_MASK 0x000000FFL
+//SQ_WAVE_ACTIVE
+#define SQ_WAVE_ACTIVE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_ACTIVE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_VALID_AND_IDLE
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT__SHIFT 0x0
+#define SQ_WAVE_VALID_AND_IDLE__WAVE_SLOT_MASK 0x000FFFFFL
+//SQ_WAVE_MODE
+#define SQ_WAVE_MODE__FP_ROUND__SHIFT 0x0
+#define SQ_WAVE_MODE__FP_DENORM__SHIFT 0x4
+#define SQ_WAVE_MODE__DX10_CLAMP__SHIFT 0x8
+#define SQ_WAVE_MODE__IEEE__SHIFT 0x9
+#define SQ_WAVE_MODE__LOD_CLAMPED__SHIFT 0xa
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN__SHIFT 0xb
+#define SQ_WAVE_MODE__EXCP_EN__SHIFT 0xc
+#define SQ_WAVE_MODE__WAVE_END__SHIFT 0x15
+#define SQ_WAVE_MODE__FP16_OVFL__SHIFT 0x17
+#define SQ_WAVE_MODE__DISABLE_PERF__SHIFT 0x1b
+#define SQ_WAVE_MODE__FP_ROUND_MASK 0x0000000FL
+#define SQ_WAVE_MODE__FP_DENORM_MASK 0x000000F0L
+#define SQ_WAVE_MODE__DX10_CLAMP_MASK 0x00000100L
+#define SQ_WAVE_MODE__IEEE_MASK 0x00000200L
+#define SQ_WAVE_MODE__LOD_CLAMPED_MASK 0x00000400L
+#define SQ_WAVE_MODE__TRAP_AFTER_INST_EN_MASK 0x00000800L
+#define SQ_WAVE_MODE__EXCP_EN_MASK 0x001FF000L
+#define SQ_WAVE_MODE__WAVE_END_MASK 0x00200000L
+#define SQ_WAVE_MODE__FP16_OVFL_MASK 0x00800000L
+#define SQ_WAVE_MODE__DISABLE_PERF_MASK 0x08000000L
+//SQ_WAVE_STATUS
+#define SQ_WAVE_STATUS__SCC__SHIFT 0x0
+#define SQ_WAVE_STATUS__SPI_PRIO__SHIFT 0x1
+#define SQ_WAVE_STATUS__USER_PRIO__SHIFT 0x3
+#define SQ_WAVE_STATUS__PRIV__SHIFT 0x5
+#define SQ_WAVE_STATUS__TRAP_EN__SHIFT 0x6
+#define SQ_WAVE_STATUS__TTRACE_EN__SHIFT 0x7
+#define SQ_WAVE_STATUS__EXPORT_RDY__SHIFT 0x8
+#define SQ_WAVE_STATUS__EXECZ__SHIFT 0x9
+#define SQ_WAVE_STATUS__VCCZ__SHIFT 0xa
+#define SQ_WAVE_STATUS__IN_TG__SHIFT 0xb
+#define SQ_WAVE_STATUS__IN_BARRIER__SHIFT 0xc
+#define SQ_WAVE_STATUS__HALT__SHIFT 0xd
+#define SQ_WAVE_STATUS__TRAP__SHIFT 0xe
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN__SHIFT 0xf
+#define SQ_WAVE_STATUS__VALID__SHIFT 0x10
+#define SQ_WAVE_STATUS__ECC_ERR__SHIFT 0x11
+#define SQ_WAVE_STATUS__SKIP_EXPORT__SHIFT 0x12
+#define SQ_WAVE_STATUS__PERF_EN__SHIFT 0x13
+#define SQ_WAVE_STATUS__COND_DBG_USER__SHIFT 0x14
+#define SQ_WAVE_STATUS__COND_DBG_SYS__SHIFT 0x15
+#define SQ_WAVE_STATUS__OREO_CONFLICT__SHIFT 0x16
+#define SQ_WAVE_STATUS__FATAL_HALT__SHIFT 0x17
+#define SQ_WAVE_STATUS__NO_VGPRS__SHIFT 0x18
+#define SQ_WAVE_STATUS__LDS_PARAM_READY__SHIFT 0x19
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC__SHIFT 0x1a
+#define SQ_WAVE_STATUS__MUST_EXPORT__SHIFT 0x1b
+#define SQ_WAVE_STATUS__IDLE__SHIFT 0x1c
+#define SQ_WAVE_STATUS__SCRATCH_EN__SHIFT 0x1d
+#define SQ_WAVE_STATUS__SCC_MASK 0x00000001L
+#define SQ_WAVE_STATUS__SPI_PRIO_MASK 0x00000006L
+#define SQ_WAVE_STATUS__USER_PRIO_MASK 0x00000018L
+#define SQ_WAVE_STATUS__PRIV_MASK 0x00000020L
+#define SQ_WAVE_STATUS__TRAP_EN_MASK 0x00000040L
+#define SQ_WAVE_STATUS__TTRACE_EN_MASK 0x00000080L
+#define SQ_WAVE_STATUS__EXPORT_RDY_MASK 0x00000100L
+#define SQ_WAVE_STATUS__EXECZ_MASK 0x00000200L
+#define SQ_WAVE_STATUS__VCCZ_MASK 0x00000400L
+#define SQ_WAVE_STATUS__IN_TG_MASK 0x00000800L
+#define SQ_WAVE_STATUS__IN_BARRIER_MASK 0x00001000L
+#define SQ_WAVE_STATUS__HALT_MASK 0x00002000L
+#define SQ_WAVE_STATUS__TRAP_MASK 0x00004000L
+#define SQ_WAVE_STATUS__TTRACE_SIMD_EN_MASK 0x00008000L
+#define SQ_WAVE_STATUS__VALID_MASK 0x00010000L
+#define SQ_WAVE_STATUS__ECC_ERR_MASK 0x00020000L
+#define SQ_WAVE_STATUS__SKIP_EXPORT_MASK 0x00040000L
+#define SQ_WAVE_STATUS__PERF_EN_MASK 0x00080000L
+#define SQ_WAVE_STATUS__COND_DBG_USER_MASK 0x00100000L
+#define SQ_WAVE_STATUS__COND_DBG_SYS_MASK 0x00200000L
+#define SQ_WAVE_STATUS__OREO_CONFLICT_MASK 0x00400000L
+#define SQ_WAVE_STATUS__FATAL_HALT_MASK 0x00800000L
+#define SQ_WAVE_STATUS__NO_VGPRS_MASK 0x01000000L
+#define SQ_WAVE_STATUS__LDS_PARAM_READY_MASK 0x02000000L
+#define SQ_WAVE_STATUS__MUST_GS_ALLOC_MASK 0x04000000L
+#define SQ_WAVE_STATUS__MUST_EXPORT_MASK 0x08000000L
+#define SQ_WAVE_STATUS__IDLE_MASK 0x10000000L
+#define SQ_WAVE_STATUS__SCRATCH_EN_MASK 0x20000000L
+//SQ_WAVE_TRAPSTS
+#define SQ_WAVE_TRAPSTS__EXCP__SHIFT 0x0
+#define SQ_WAVE_TRAPSTS__SAVECTX__SHIFT 0xa
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST__SHIFT 0xb
+#define SQ_WAVE_TRAPSTS__EXCP_HI__SHIFT 0xc
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB__SHIFT 0xf
+#define SQ_WAVE_TRAPSTS__HOST_TRAP__SHIFT 0x10
+#define SQ_WAVE_TRAPSTS__WAVESTART__SHIFT 0x11
+#define SQ_WAVE_TRAPSTS__WAVE_END__SHIFT 0x12
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT__SHIFT 0x13
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST__SHIFT 0x14
+#define SQ_WAVE_TRAPSTS__UTC_ERROR__SHIFT 0x1c
+#define SQ_WAVE_TRAPSTS__EXCP_MASK 0x000001FFL
+#define SQ_WAVE_TRAPSTS__SAVECTX_MASK 0x00000400L
+#define SQ_WAVE_TRAPSTS__ILLEGAL_INST_MASK 0x00000800L
+#define SQ_WAVE_TRAPSTS__EXCP_HI_MASK 0x00007000L
+#define SQ_WAVE_TRAPSTS__BUFFER_OOB_MASK 0x00008000L
+#define SQ_WAVE_TRAPSTS__HOST_TRAP_MASK 0x00010000L
+#define SQ_WAVE_TRAPSTS__WAVESTART_MASK 0x00020000L
+#define SQ_WAVE_TRAPSTS__WAVE_END_MASK 0x00040000L
+#define SQ_WAVE_TRAPSTS__PERF_SNAPSHOT_MASK 0x00080000L
+#define SQ_WAVE_TRAPSTS__TRAP_AFTER_INST_MASK 0x00100000L
+#define SQ_WAVE_TRAPSTS__UTC_ERROR_MASK 0x10000000L
+//SQ_WAVE_GPR_ALLOC
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE__SHIFT 0x0
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE__SHIFT 0xc
+#define SQ_WAVE_GPR_ALLOC__VGPR_BASE_MASK 0x000001FFL
+#define SQ_WAVE_GPR_ALLOC__VGPR_SIZE_MASK 0x000FF000L
+//SQ_WAVE_LDS_ALLOC
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE__SHIFT 0x0
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE__SHIFT 0xc
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE__SHIFT 0x18
+#define SQ_WAVE_LDS_ALLOC__LDS_BASE_MASK 0x000001FFL
+#define SQ_WAVE_LDS_ALLOC__LDS_SIZE_MASK 0x001FF000L
+#define SQ_WAVE_LDS_ALLOC__VGPR_SHARED_SIZE_MASK 0x0F000000L
+//SQ_WAVE_IB_STS
+#define SQ_WAVE_IB_STS__EXP_CNT__SHIFT 0x0
+#define SQ_WAVE_IB_STS__LGKM_CNT__SHIFT 0x4
+#define SQ_WAVE_IB_STS__VM_CNT__SHIFT 0xa
+#define SQ_WAVE_IB_STS__VS_CNT__SHIFT 0x1a
+#define SQ_WAVE_IB_STS__EXP_CNT_MASK 0x00000007L
+#define SQ_WAVE_IB_STS__LGKM_CNT_MASK 0x000003F0L
+#define SQ_WAVE_IB_STS__VM_CNT_MASK 0x0000FC00L
+#define SQ_WAVE_IB_STS__VS_CNT_MASK 0xFC000000L
+//SQ_WAVE_PC_LO
+#define SQ_WAVE_PC_LO__PC_LO__SHIFT 0x0
+#define SQ_WAVE_PC_LO__PC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_PC_HI
+#define SQ_WAVE_PC_HI__PC_HI__SHIFT 0x0
+#define SQ_WAVE_PC_HI__PC_HI_MASK 0x0000FFFFL
+//SQ_WAVE_IB_DBG1
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE__SHIFT 0x18
+#define SQ_WAVE_IB_DBG1__MISC_CNT__SHIFT 0x19
+#define SQ_WAVE_IB_DBG1__WAVE_IDLE_MASK 0x01000000L
+#define SQ_WAVE_IB_DBG1__MISC_CNT_MASK 0xFE000000L
+//SQ_WAVE_FLUSH_IB
+#define SQ_WAVE_FLUSH_IB__UNUSED__SHIFT 0x0
+#define SQ_WAVE_FLUSH_IB__UNUSED_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_LO
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_LO__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_FLAT_SCRATCH_HI
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA__SHIFT 0x0
+#define SQ_WAVE_FLAT_SCRATCH_HI__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_HW_ID1
+#define SQ_WAVE_HW_ID1__WAVE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID1__SIMD_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID1__WGP_ID__SHIFT 0xa
+#define SQ_WAVE_HW_ID1__SA_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID1__SE_ID__SHIFT 0x12
+#define SQ_WAVE_HW_ID1__DP_RATE__SHIFT 0x1d
+#define SQ_WAVE_HW_ID1__WAVE_ID_MASK 0x0000001FL
+#define SQ_WAVE_HW_ID1__SIMD_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID1__WGP_ID_MASK 0x00003C00L
+#define SQ_WAVE_HW_ID1__SA_ID_MASK 0x00010000L
+#define SQ_WAVE_HW_ID1__SE_ID_MASK 0x001C0000L
+#define SQ_WAVE_HW_ID1__DP_RATE_MASK 0xE0000000L
+//SQ_WAVE_HW_ID2
+#define SQ_WAVE_HW_ID2__QUEUE_ID__SHIFT 0x0
+#define SQ_WAVE_HW_ID2__PIPE_ID__SHIFT 0x4
+#define SQ_WAVE_HW_ID2__ME_ID__SHIFT 0x8
+#define SQ_WAVE_HW_ID2__STATE_ID__SHIFT 0xc
+#define SQ_WAVE_HW_ID2__WG_ID__SHIFT 0x10
+#define SQ_WAVE_HW_ID2__VM_ID__SHIFT 0x18
+#define SQ_WAVE_HW_ID2__QUEUE_ID_MASK 0x0000000FL
+#define SQ_WAVE_HW_ID2__PIPE_ID_MASK 0x00000030L
+#define SQ_WAVE_HW_ID2__ME_ID_MASK 0x00000300L
+#define SQ_WAVE_HW_ID2__STATE_ID_MASK 0x00007000L
+#define SQ_WAVE_HW_ID2__WG_ID_MASK 0x001F0000L
+#define SQ_WAVE_HW_ID2__VM_ID_MASK 0x0F000000L
+//SQ_WAVE_POPS_PACKER
+#define SQ_WAVE_POPS_PACKER__POPS_EN__SHIFT 0x0
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID__SHIFT 0x1
+#define SQ_WAVE_POPS_PACKER__POPS_EN_MASK 0x00000001L
+#define SQ_WAVE_POPS_PACKER__POPS_PACKER_ID_MASK 0x00000006L
+//SQ_WAVE_SCHED_MODE
+#define SQ_WAVE_SCHED_MODE__DEP_MODE__SHIFT 0x0
+#define SQ_WAVE_SCHED_MODE__DEP_MODE_MASK 0x00000003L
+//SQ_WAVE_IB_STS2
+#define SQ_WAVE_IB_STS2__INST_PREFETCH__SHIFT 0x0
+#define SQ_WAVE_IB_STS2__MEM_ORDER__SHIFT 0x8
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS__SHIFT 0xa
+#define SQ_WAVE_IB_STS2__WAVE64__SHIFT 0xb
+#define SQ_WAVE_IB_STS2__INST_PREFETCH_MASK 0x00000003L
+#define SQ_WAVE_IB_STS2__MEM_ORDER_MASK 0x00000300L
+#define SQ_WAVE_IB_STS2__FWD_PROGRESS_MASK 0x00000400L
+#define SQ_WAVE_IB_STS2__WAVE64_MASK 0x00000800L
+//SQ_WAVE_SHADER_CYCLES
+#define SQ_WAVE_SHADER_CYCLES__CYCLES__SHIFT 0x0
+#define SQ_WAVE_SHADER_CYCLES__CYCLES_MASK 0x000FFFFFL
+//SQ_WAVE_TTMP0
+#define SQ_WAVE_TTMP0__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP0__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP1
+#define SQ_WAVE_TTMP1__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP1__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP2
+#define SQ_WAVE_TTMP2__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP2__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP3
+#define SQ_WAVE_TTMP3__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP3__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP4
+#define SQ_WAVE_TTMP4__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP4__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP5
+#define SQ_WAVE_TTMP5__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP5__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP6
+#define SQ_WAVE_TTMP6__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP6__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP7
+#define SQ_WAVE_TTMP7__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP7__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP8
+#define SQ_WAVE_TTMP8__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP8__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP9
+#define SQ_WAVE_TTMP9__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP9__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP10
+#define SQ_WAVE_TTMP10__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP10__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP11
+#define SQ_WAVE_TTMP11__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP11__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP12
+#define SQ_WAVE_TTMP12__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP12__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP13
+#define SQ_WAVE_TTMP13__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP13__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP14
+#define SQ_WAVE_TTMP14__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP14__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_TTMP15
+#define SQ_WAVE_TTMP15__DATA__SHIFT 0x0
+#define SQ_WAVE_TTMP15__DATA_MASK 0xFFFFFFFFL
+//SQ_WAVE_M0
+#define SQ_WAVE_M0__M0__SHIFT 0x0
+#define SQ_WAVE_M0__M0_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_LO
+#define SQ_WAVE_EXEC_LO__EXEC_LO__SHIFT 0x0
+#define SQ_WAVE_EXEC_LO__EXEC_LO_MASK 0xFFFFFFFFL
+//SQ_WAVE_EXEC_HI
+#define SQ_WAVE_EXEC_HI__EXEC_HI__SHIFT 0x0
+#define SQ_WAVE_EXEC_HI__EXEC_HI_MASK 0xFFFFFFFFL
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index d8632ccf3494..c488d4a50cf4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -4409,6 +4409,10 @@
#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 111a71b434e2..2969fbf282b7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -26728,6 +26728,14 @@
//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 7e3231c2191c..a40ead44778a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -824,4 +824,62 @@ struct gpu_metrics_v2_2 {
uint64_t indep_throttle_status;
};
+struct gpu_metrics_v2_3 {
+ struct metrics_table_header common_header;
+
+ /* Temperature */
+ uint16_t temperature_gfx; // gfx temperature on APUs
+ uint16_t temperature_soc; // soc temperature on APUs
+ uint16_t temperature_core[8]; // CPU core temperature on APUs
+ uint16_t temperature_l3[2];
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Power/Energy */
+ uint16_t average_socket_power; // dGPU + APU power on A + A platform
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8]; // CPU core power on APUs
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8]; // CPU core clocks
+ uint16_t current_l3clk[2];
+
+ /* Throttle status (ASIC dependent) */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding[3];
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
+
+ /* Average Temperature */
+ uint16_t average_temperature_gfx; // average gfx temperature on APUs
+ uint16_t average_temperature_soc; // average soc temperature on APUs
+ uint16_t average_temperature_core[8]; // average CPU core temperature on APUs
+ uint16_t average_temperature_l3[2];
+};
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 80dab1146439..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -268,7 +268,9 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
- uint32_t reserved : 22;
+ uint32_t trap_en : 1;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 956b6ce81c84..1b300c569faf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -668,6 +668,51 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_residency_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_entrycount_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
{
struct smu_context *smu = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5e318b3f6c0f..948cc75376f8 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3405,9 +3405,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- if (adev->pm.dpm_enabled == 0)
- return;
-
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 65624d091ed2..cb5b9df78b4d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -435,6 +435,9 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
uint64_t event_arg);
+int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value);
+int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value);
+int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value);
int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 1eb4e613b27a..ec055858eb95 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1485,6 +1485,7 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
+ int err;
if (!addr || !size)
return -EINVAL;
@@ -1492,7 +1493,9 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
if (adev->pm.smu_prv_buffer) {
- amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ if (err)
+ return err;
*size = adev->pm.smu_prv_buffer_size;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index dad3e3741a4e..190af79f3236 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- uint32_t current_rpm;
- uint32_t percent = 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, &current_rpm))
- return -1;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 255 /
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM;
+ if (!duty100)
+ return -EINVAL;
- *speed = MIN(percent, 255);
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index 1e79baab753e..bd54fbd393b9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -195,7 +195,6 @@ static int init_powerplay_table_information(
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
uint32_t disable_power_control = 0;
- int result;
hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
@@ -257,9 +256,7 @@ static int init_powerplay_table_information(
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
-
- return result;
+ return append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
}
static int vega12_pp_tables_initialize(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index 6e0be6027705..01a7d66864f2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
@@ -401,8 +401,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
-extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
-extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
extern int phm_set_power_state(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pcurrent_state,
const struct pp_hw_power_state *pnew_power_state);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index 45214a364baa..e7ed2a7adf8f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2567,15 +2567,13 @@ static uint8_t polaris10_get_memory_modile_index(struct pp_hwmgr *hwmgr)
static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- int result;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *mc_reg_table = &smu_data->mc_reg_table;
uint8_t module_index = polaris10_get_memory_modile_index(hwmgr);
memset(mc_reg_table, 0, sizeof(pp_atomctrl_mc_reg_table));
- result = atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
- return result;
+ return atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
}
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 7510d470b864..13c5c7f1ecb9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -90,6 +90,30 @@ static int smu_sys_set_pp_feature_mask(void *handle,
return smu_set_pp_feature_mask(smu, new_mask);
}
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
+{
+ if (!smu->ppt_funcs->set_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_set_gfx_off_residency(smu, value);
+}
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_residency)
+ return -EINVAL;
+
+ return smu_get_gfx_off_residency(smu, value);
+}
+
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
+{
+ if (!smu->ppt_funcs->get_gfx_off_entrycount)
+ return -EINVAL;
+
+ return smu_get_gfx_off_entrycount(smu, value);
+}
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{
if (!smu->ppt_funcs->get_gfx_off_status)
@@ -581,6 +605,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
smu->od_enabled = true;
break;
case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 10):
smu_v13_0_0_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 7):
@@ -1576,6 +1601,7 @@ static int smu_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
+ uint64_t count;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1593,6 +1619,14 @@ static int smu_suspend(void *handle)
smu_set_gfx_cgpg(smu, false);
+ /*
+ * pwfw resets entrycount when device is suspended, so we save the
+ * last value to be used when we resume to keep it consistent
+ */
+ ret = smu_get_entrycount_gfxoff(smu, &count);
+ if (!ret)
+ adev->gfx.gfx_off_entrycount = count;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b81c657c7386..e2fa3b066b96 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1112,6 +1112,22 @@ struct pptable_funcs {
uint32_t (*get_gfx_off_status)(struct smu_context *smu);
/**
+ * @gfx_off_entrycount: total GFXOFF entry count at the time of
+ * query since system power-up
+ */
+ u32 (*get_gfx_off_entrycount)(struct smu_context *smu, uint64_t *entrycount);
+
+ /**
+ * @set_gfx_off_residency: set 1 to start logging, 0 to stop logging
+ */
+ u32 (*set_gfx_off_residency)(struct smu_context *smu, bool start);
+
+ /**
+ * @get_gfx_off_residency: Average GFXOFF residency % during the logging interval
+ */
+ u32 (*get_gfx_off_residency)(struct smu_context *smu, uint32_t *residency);
+
+ /**
* @register_irq_handler: Register interupt request handlers.
*/
int (*register_irq_handler)(struct smu_context *smu);
@@ -1454,6 +1470,12 @@ int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
+int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value);
+
+int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value);
+
+int smu_set_residency_gfxoff(struct smu_context *smu, bool value);
+
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index f745cd8f1ab7..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x22
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
index d2e10a724560..82cf9e563065 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
@@ -137,7 +137,7 @@
#define PPSMC_MSG_DisallowGpo 0x56
#define PPSMC_MSG_Enable2ndUSB20Port 0x57
-
-#define PPSMC_Message_Count 0x58
+#define PPSMC_MSG_DriverMode2Reset 0x5D
+#define PPSMC_Message_Count 0x5E
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
index fe130a497d6c..7471e2df2828 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
@@ -108,7 +108,10 @@
#define PPSMC_MSG_SetSlowPPTLimit 0x4A
#define PPSMC_MSG_GetFastPPTLimit 0x4B
#define PPSMC_MSG_GetSlowPPTLimit 0x4C
-#define PPSMC_Message_Count 0x4D
+#define PPSMC_MSG_GetGfxOffStatus 0x50
+#define PPSMC_MSG_GetGfxOffEntryCount 0x51
+#define PPSMC_MSG_LogGfxOffResidency 0x52
+#define PPSMC_Message_Count 0x53
//Argument for PPSMC_MSG_GfxDeviceDriverReset
enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 19084a4fcb2b..58098b82df66 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -235,7 +235,11 @@
__SMU_DUMMY_MAP(UnforceGfxVid), \
__SMU_DUMMY_MAP(HeavySBR), \
__SMU_DUMMY_MAP(SetBadHBMPagesRetiredFlagsPerChannel), \
- __SMU_DUMMY_MAP(EnableGfxImu),
+ __SMU_DUMMY_MAP(EnableGfxImu), \
+ __SMU_DUMMY_MAP(DriverMode2Reset), \
+ __SMU_DUMMY_MAP(GetGfxOffStatus), \
+ __SMU_DUMMY_MAP(GetGfxOffEntryCount), \
+ __SMU_DUMMY_MAP(LogGfxOffResidency),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ac308e72241a..9d62ea2af132 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,8 +30,9 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -291,5 +292,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 6db67f082d91..74996a8fb671 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -154,6 +154,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
+ MSG_MAP(DriverMode2Reset, PPSMC_MSG_DriverMode2Reset, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -368,6 +369,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)))
+ smu_baco->platform_support = false;
+
}
}
@@ -4254,6 +4266,57 @@ static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
return 0;
}
+static bool sienna_cichlid_is_mode2_reset_supported(struct smu_context *smu)
+{
+ return true;
+}
+
+static int sienna_cichlid_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 100;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_DriverMode2Reset);
+
+ mutex_lock(&smu->message_lock);
+
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
+ SMU_RESET_MODE_2);
+
+ ret = smu_cmn_wait_for_response(smu);
+ while (ret != 0 && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret != 0) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ if (!timeout) {
+ dev_err(adev->dev,
+ "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+
+ dev_info(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -4349,6 +4412,8 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
.get_unique_id = sienna_cichlid_get_unique_id,
+ .mode2_reset_is_support = sienna_cichlid_is_mode2_reset_supported,
+ .mode2_reset = sienna_cichlid_mode2_reset,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 89504ff8e9ed..cb10c7e31264 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -138,6 +138,9 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
+ MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0),
+ MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0),
+ MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0),
};
static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
@@ -220,14 +223,13 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
uint32_t ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
- goto err0_out;
+ return ret;
}
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -252,7 +254,10 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ if (smu_version >= 0x043F3E00)
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
+ else
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1645,6 +1650,63 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
+static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_legacy_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Power[0];
+ gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1702,6 +1764,77 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_2);
}
+static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
+ gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
+ memcpy(&gpu_metrics->average_temperature_core[0],
+ &metrics.Average.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1769,20 +1902,26 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
void **table)
{
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
return ret;
}
- if (if_version < 0x3)
- ret = vangogh_get_legacy_gpu_metrics(smu, table);
- else
- ret = vangogh_get_gpu_metrics(smu, table);
+ if (smu_version >= 0x043F3E00) {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+ } else {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+ }
return ret;
}
@@ -2200,6 +2339,76 @@ static int vangogh_set_power_limit(struct smu_context *smu,
return ret;
}
+/**
+ * vangogh_set_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ * @start: start/stop residency log
+ *
+ * This function will be used to log gfxoff residency
+ *
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+ u32 residency;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
+ start, &residency);
+
+ if (!start)
+ adev->gfx.gfx_off_residency = residency;
+
+ return ret;
+}
+
+/**
+ * vangogh_get_gfxoff_residency
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff residency.
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *residency = adev->gfx.gfx_off_residency;
+
+ return 0;
+}
+
+/**
+ * vangogh_get_gfxoff_entrycount - get gfxoff entry count
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This function will be used to get gfxoff entry count
+ *
+ * Returns standard response codes.
+ */
+static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
+{
+ int ret = 0, value = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
+ *entrycount = value + adev->gfx.gfx_off_entrycount;
+
+ return ret;
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -2237,6 +2446,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.mode2_reset = vangogh_mode2_reset,
.gfx_off_control = smu_v11_0_gfx_off_control,
.get_gfx_off_status = vangogh_get_gfxoff_status,
+ .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
+ .get_gfx_off_residency = vangogh_get_gfxoff_residency,
+ .set_gfx_off_residency = vangogh_set_gfxoff_residency,
.get_ppt_limit = vangogh_get_ppt_limit,
.get_power_limit = vangogh_get_power_limit,
.set_power_limit = vangogh_set_power_limit,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 18ee3b5e64c5..93fffdbab4f0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -59,6 +59,7 @@
MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
#define mmMP1_SMN_C2PMSG_66 0x0282
#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
@@ -84,9 +85,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -212,7 +210,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
return 0;
/* override pptable_id from driver parameter */
@@ -221,31 +220,6 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- /*
- * Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 37151 signed pptable when pp_table_id is 3715
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- case 3664:
- pptable_id = 36641;
- break;
- case 3683:
- pptable_id = 36831;
- break;
- case 3715:
- pptable_id = 37151;
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -330,6 +304,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
case IP_VERSION(13, 0, 5):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
break;
+ case IP_VERSION(13, 0, 10):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -425,8 +402,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
@@ -476,26 +455,8 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664, 3683 or 3715 on request
- * - use 3664 when pptable_id is 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- pptable_id = 3664;
- break;
- case 3664:
- case 3683:
- case 3715:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ pptable_id = 6666;
}
/* force using vbios pptable in sriov mode */
@@ -1107,6 +1068,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
+ if (!smu->irq_source.num_types)
+ return 0;
+
ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
if (ret)
return ret;
@@ -1116,6 +1080,9 @@ int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
{
+ if (!smu->irq_source.num_types)
+ return 0;
+
return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
}
@@ -1487,6 +1454,9 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
struct amdgpu_irq_src *irq_src = &smu->irq_source;
int ret = 0;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
irq_src->num_types = 1;
irq_src->funcs = &smu_v13_0_irq_funcs;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index df4a47acd724..1d454485e0d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
-
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
-
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -388,30 +353,35 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * With SCPM enabled, the pptable used will be signed. It cannot
- * be used directly by driver. To get the raw pptable, we need to
- * rely on the combo pptable(and its revelant SMU message).
- */
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 1016d1c216d8..c422bf8a09b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -400,11 +401,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -413,18 +430,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 15e4298c7cc8..e4f8f90ac5aa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,6 +969,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 2):
structure_size = sizeof(struct gpu_metrics_v2_2);
break;
+ case METRICS_VERSION(2, 3):
+ structure_size = sizeof(struct gpu_metrics_v2_3);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 7469bbfce1fb..ceb13c838067 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -47,6 +47,9 @@
#define smu_notify_memory_pool_location(smu) smu_ppt_funcs(notify_memory_pool_location, 0, smu)
#define smu_gfx_off_control(smu, enable) smu_ppt_funcs(gfx_off_control, 0, smu, enable)
#define smu_get_gfx_off_status(smu) smu_ppt_funcs(get_gfx_off_status, 0, smu)
+#define smu_get_gfx_off_entrycount(smu, value) smu_ppt_funcs(get_gfx_off_entrycount, 0, smu, value)
+#define smu_get_gfx_off_residency(smu, value) smu_ppt_funcs(get_gfx_off_residency, 0, smu, value)
+#define smu_set_gfx_off_residency(smu, value) smu_ppt_funcs(set_gfx_off_residency, 0, smu, value)
#define smu_set_last_dcef_min_deep_sleep_clk(smu) smu_ppt_funcs(set_last_dcef_min_deep_sleep_clk, 0, smu)
#define smu_system_features_control(smu, en) smu_ppt_funcs(system_features_control, 0, smu, en)
#define smu_init_max_sustainable_clocks(smu) smu_ppt_funcs(init_max_sustainable_clocks, 0, smu)
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 6e3f1d600541..c1b89274d2a4 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,7 +6,7 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
@@ -27,7 +27,7 @@ config DRM_MALI_DISPLAY
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Mali Display
diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig
index e91598b60781..4acc4285a4eb 100644
--- a/drivers/gpu/drm/arm/display/Kconfig
+++ b/drivers/gpu/drm/arm/display/Kconfig
@@ -4,7 +4,7 @@ config DRM_KOMEDA
depends on DRM && OF
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you want to compile the ARM Komeda display
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index daa1faccd3e7..6c56f5662bc7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -310,8 +310,7 @@ static int d71_reset(struct d71_dev *d71)
u32 __iomem *gcu = d71->gcu_addr;
int ret;
- malidp_write32_mask(gcu, BLK_CONTROL,
- GCU_CONTROL_SRST, GCU_CONTROL_SRST);
+ malidp_write32(gcu, BLK_CONTROL, GCU_CONTROL_SRST);
ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
100, 1000, 10000);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 59172acb9738..4cc07d6bb9d8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -235,7 +234,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
- DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n",
+ DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -286,7 +285,7 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc,
komeda_crtc_do_flush(crtc, old);
}
-static void
+void
komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct completion *input_flip_done)
{
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index ba16895690f1..9fce4239d4ad 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include "komeda_dev.h"
@@ -72,6 +73,7 @@ static int komeda_bind(struct device *dev)
}
dev_set_drvdata(dev, mdrv);
+ drm_fbdev_generic_setup(&mdrv->kms->base, 32);
return 0;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3c372d2deb0a..df5da5a44755 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -5,9 +5,9 @@
*
*/
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "komeda_framebuffer.h"
@@ -137,7 +137,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
}
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
- - to_drm_gem_cma_obj(obj)->paddr;
+ - to_drm_gem_dma_obj(obj)->dma_addr;
if (obj->size < min_size) {
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
i, obj->size, min_size);
@@ -239,7 +239,7 @@ dma_addr_t
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
{
struct drm_framebuffer *fb = &kfb->base;
- const struct drm_gem_cma_object *obj;
+ const struct drm_gem_dma_object *obj;
u32 offset, plane_x, plane_y, block_w, block_sz;
if (plane >= fb->format->num_planes) {
@@ -247,7 +247,7 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
return -EINVAL;
}
- obj = drm_fb_cma_get_gem_obj(fb, plane);
+ obj = drm_fb_dma_get_gem_obj(fb, plane);
offset = fb->offsets[plane];
if (!fb->modifier) {
@@ -260,7 +260,7 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
+ plane_y * fb->pitches[plane];
}
- return obj->paddr + offset;
+ return obj->dma_addr + offset;
}
/* if the fb can be supported by a specific layer */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 93b7f09b96ca..451746ebbe71 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -11,7 +11,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -21,9 +21,9 @@
#include "komeda_framebuffer.h"
#include "komeda_kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(komeda_cma_fops);
-static int komeda_gem_cma_dumb_create(struct drm_file *file,
+static int komeda_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -32,7 +32,7 @@ static int komeda_gem_cma_dumb_create(struct drm_file *file,
args->pitch = ALIGN(pitch, mdev->chip.bus_width);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
@@ -60,7 +60,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static const struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
@@ -69,6 +69,25 @@ static const struct drm_driver komeda_kms_driver = {
.minor = 1,
};
+static void komeda_kms_atomic_commit_hw_done(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct komeda_kms_dev *kms = to_kdev(dev);
+ int i;
+
+ for (i = 0; i < kms->n_crtcs; i++) {
+ struct komeda_crtc *kcrtc = &kms->crtcs[i];
+
+ if (kcrtc->base.state->active) {
+ struct completion *flip_done = NULL;
+ if (kcrtc->base.state->event)
+ flip_done = kcrtc->base.state->event->base.completion;
+ komeda_crtc_flush_and_wait_for_flip_done(kcrtc, flip_done);
+ }
+ }
+ drm_atomic_helper_commit_hw_done(state);
+}
+
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -81,7 +100,7 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_commit_hw_done(old_state);
+ komeda_kms_atomic_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(dev, old_state);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 7889e380ab23..7339339ef6b8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -183,6 +183,8 @@ void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts);
+void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
+ struct completion *input_flip_done);
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index e672b9cffee3..3276a3e82c62 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1271,7 +1271,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
return 0;
}
-/* Since standalong disabled components must be disabled separately and in the
+/* Since standalone disabled components must be disabled separately and in the
* last, So a complete disable operation may needs to call pipeline_disable
* twice (two phase disabling).
* Phase 1: disable the common components, flush it.
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index dff22dec54b5..c20ff72f0ae5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index afc9cd856501..7030339fa232 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -18,12 +18,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -252,8 +251,8 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -274,7 +273,7 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
return;
dest_h = drm_rect_height(&new_plane_state->dst);
- scanout_start = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
+ scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index e89ae0ec60eb..a032003c340c 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -21,13 +21,13 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -40,8 +40,7 @@
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
- struct drm_device *drm = arg;
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ struct hdlcd_drm_private *hdlcd = arg;
unsigned long irq_status;
irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
@@ -69,61 +68,32 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static void hdlcd_irq_preinstall(struct drm_device *drm)
+static int hdlcd_irq_install(struct hdlcd_drm_private *hdlcd)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
+ int ret;
+
/* Ensure interrupts are disabled */
hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
-}
-
-static void hdlcd_irq_postinstall(struct drm_device *drm)
-{
-#ifdef CONFIG_DEBUG_FS
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
- unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
-
- /* enable debug interrupts */
- irq_mask |= HDLCD_DEBUG_INT_MASK;
-
- hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
-#endif
-}
-
-static int hdlcd_irq_install(struct drm_device *drm, int irq)
-{
- int ret;
-
- if (irq == IRQ_NOTCONNECTED)
- return -ENOTCONN;
-
- hdlcd_irq_preinstall(drm);
- ret = request_irq(irq, hdlcd_irq, 0, drm->driver->name, drm);
+ ret = request_irq(hdlcd->irq, hdlcd_irq, 0, "hdlcd", hdlcd);
if (ret)
return ret;
- hdlcd_irq_postinstall(drm);
+#ifdef CONFIG_DEBUG_FS
+ /* enable debug interrupts */
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, HDLCD_DEBUG_INT_MASK);
+#endif
return 0;
}
-static void hdlcd_irq_uninstall(struct drm_device *drm)
+static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
/* disable all the interrupts that we might have enabled */
- unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
-
-#ifdef CONFIG_DEBUG_FS
- /* disable debug interrupts */
- irq_mask &= ~HDLCD_DEBUG_INT_MASK;
-#endif
-
- /* disable vsync interrupts */
- irq_mask &= ~HDLCD_INTERRUPT_VSYNC;
- hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+ hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
- free_irq(hdlcd->irq, drm);
+ free_irq(hdlcd->irq, hdlcd);
}
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
@@ -183,7 +153,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
goto irq_fail;
hdlcd->irq = ret;
- ret = hdlcd_irq_install(drm, hdlcd->irq);
+ ret = hdlcd_irq_install(hdlcd);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
@@ -255,11 +225,11 @@ static void hdlcd_debugfs_init(struct drm_minor *minor)
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = hdlcd_debugfs_init,
#endif
@@ -314,6 +284,15 @@ static int hdlcd_drm_bind(struct device *dev)
goto err_vblank;
}
+ /*
+ * If EFI left us running, take over from simple framebuffer
+ * drivers. Read HDLCD_REG_COMMAND to see if we are enabled.
+ */
+ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) {
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+ drm_aperture_remove_framebuffers(false, &hdlcd_driver);
+ }
+
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
@@ -335,7 +314,7 @@ err_pm_active:
err_unload:
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- hdlcd_irq_uninstall(drm);
+ hdlcd_irq_uninstall(hdlcd);
of_reserved_mem_device_release(drm->dev);
err_free:
drm_mode_config_cleanup(drm);
@@ -357,7 +336,7 @@ static void hdlcd_drm_unbind(struct device *dev)
hdlcd->crtc.port = NULL;
pm_runtime_get_sync(dev);
drm_atomic_helper_shutdown(drm);
- hdlcd_irq_uninstall(drm);
+ hdlcd_irq_uninstall(hdlcd);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index d5aef21426cf..1d0b0c54ccc7 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -19,10 +19,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
@@ -457,7 +456,7 @@ static int malidp_irq_init(struct platform_device *pdev)
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static int malidp_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
@@ -469,7 +468,7 @@ static int malidp_dumb_create(struct drm_file *file_priv,
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
#ifdef CONFIG_DEBUG_FS
@@ -566,7 +565,7 @@ static void malidp_debugfs_init(struct drm_minor *minor)
static const struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index b66ca5b33a7f..ef76d0e6ee2f 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -10,10 +10,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
@@ -160,7 +160,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
n_planes = fb->format->num_planes;
for (i = 0; i < n_planes; i++) {
- struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, i);
/* memory write buffers are never rotated */
u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
@@ -170,7 +170,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
mw_state->pitches[i] = fb->pitches[i];
- mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+ mw_state->addrs[i] = obj->dma_addr + fb->offsets[i];
}
mw_state->n_planes = n_planes;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 8a9562642d16..45f5e35e7f24 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -13,12 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "malidp_hw.h"
@@ -334,15 +333,15 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
for (i = 0; i < ms->n_planes; i++) {
struct drm_gem_object *obj;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sg_table *sgt;
struct scatterlist *sgl;
obj = drm_gem_fb_get_obj(ms->base.fb, i);
- cma_obj = to_drm_gem_cma_obj(obj);
+ dma_obj = to_drm_gem_dma_obj(obj);
- if (cma_obj->sgt)
- sgt = cma_obj->sgt;
+ if (dma_obj->sgt)
+ sgt = dma_obj->sgt;
else
sgt = obj->funcs->get_sg_table(obj);
@@ -353,14 +352,14 @@ static bool malidp_check_pages_threshold(struct malidp_plane_state *ms,
while (sgl) {
if (sgl->length < pgsize) {
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
return false;
}
sgl = sg_next(sgl);
}
- if (!cma_obj->sgt)
+ if (!dma_obj->sgt)
kfree(sgt);
}
@@ -715,7 +714,7 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
struct malidp_plane *mp,
int plane_index)
{
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u16 ptr;
struct drm_plane *plane = &mp->base;
bool afbc = fb->modifier ? true : false;
@@ -723,27 +722,27 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
ptr = mp->layer->ptr + (plane_index << 4);
/*
- * drm_fb_cma_get_gem_addr() alters the physical base address of the
+ * drm_fb_dma_get_gem_addr() alters the physical base address of the
* framebuffer as per the plane's src_x, src_y co-ordinates (ie to
* take care of source cropping).
* For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
* and _AD_CROP_V registers.
*/
if (!afbc) {
- paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
- plane_index);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, plane->state,
+ plane_index);
} else {
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
- obj = drm_fb_cma_get_gem_obj(fb, plane_index);
+ obj = drm_fb_dma_get_gem_obj(fb, plane_index);
if (WARN_ON(!obj))
return;
- paddr = obj->paddr;
+ dma_addr = obj->dma_addr;
}
- malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
- malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
+ malidp_hw_write(mp->hwdev, lower_32_bits(dma_addr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(dma_addr), ptr + 4);
}
static void malidp_de_set_plane_afbc(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 514c50dcb74d..3bc16db70ddb 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -145,7 +145,7 @@
#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff
#define MALIDP_SE_SET_COEFFTAB_DATA(x) \
((x) & MALIDP_SE_COEFFTAB_DATA_MASK)
-/* Enhance coeffents reigster offset */
+/* Enhance coefficients register offset */
#define MALIDP_SE_IMAGE_ENH 0x3C
/* ENH_LIMITS offset 0x0 */
#define MALIDP_SE_ENH_LOW_LEVEL 24
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index b7bb90ae787f..15dd667aa2e7 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -12,7 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 147abf1a3968..5430265ad458 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -107,11 +107,11 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
}
/*
- * We could grab something from CMA if it's enabled, but that
+ * We could grab something from DMA if it's enabled, but that
* involves building in a problem:
*
- * CMA's interface uses dma_alloc_coherent(), which provides us
- * with an CPU virtual address and a device address.
+ * GEM DMA helper interface uses dma_alloc_coherent(), which provides
+ * us with an CPU virtual address and a device address.
*
* The CPU virtual address may be either an address in the kernel
* direct mapped region (for example, as it would be on x86) or
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 424250535fed..f21eb8fb76d8 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -298,12 +298,6 @@ fail:
return ret;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static void armada_overlay_reset(struct drm_plane *plane)
{
struct armada_overlay_state *state;
@@ -468,7 +462,7 @@ static int armada_overlay_get_property(struct drm_plane *plane,
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
.update_plane = armada_overlay_plane_update,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = armada_ovl_plane_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = armada_overlay_reset,
.atomic_duplicate_state = armada_overlay_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index 959d7f0a5108..cc47c032dbc1 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -288,7 +288,7 @@ struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane)
static const struct drm_plane_funcs armada_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = armada_plane_reset,
.atomic_duplicate_state = armada_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 024ccab14f88..8137c39b057b 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -5,7 +5,7 @@ config DRM_ASPEED_GFX
depends on (COMPILE_TEST || ARCH_ASPEED)
depends on MMU
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select CMA if HAVE_DMA_CONTIGUOUS
select MFD_SYSCON
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index f3788d7d82d6..55a3444a51d8 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -7,11 +7,11 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -168,7 +168,7 @@ static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_framebuffer *fb = pipe->plane.state->fb;
struct drm_pending_vblank_event *event;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
spin_lock_irq(&crtc->dev->event_lock);
event = crtc->state->event;
@@ -185,10 +185,10 @@ static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
if (!fb)
return;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
if (!gem)
return;
- writel(gem->paddr, priv->base + CRT_ADDR);
+ writel(gem->dma_addr, priv->base + CRT_ADDR);
}
static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 7780b72de9e8..a94f1a9e8f40 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -16,9 +16,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -246,11 +245,11 @@ static void aspeed_gfx_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 760b27971557..b9392f31e629 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -39,7 +39,7 @@
#include "ast_drv.h"
-int ast_modeset = -1;
+static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 214b10178454..1bc0220e6783 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -42,7 +42,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -114,6 +113,9 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
case 1024:
vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
break;
+ case 1152:
+ vbios_mode->enh_table = &res_1152x864[refresh_rate_index];
+ break;
case 1280:
if (mode->crtc_vdisplay == 800)
vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
@@ -311,7 +313,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
u16 temp, precache = 0;
- if ((ast->chip == AST2500) &&
+ if ((ast->chip == AST2500 || ast->chip == AST2600) &&
(vbios_mode->enh_table->flags & AST2500PreCatchCRT))
precache = 40;
@@ -352,6 +354,12 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+ // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
+ if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080))
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02);
+ else
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00);
+
/* vert timings */
temp = (mode->crtc_vtotal) - 2;
if (temp & 0x100)
@@ -429,7 +437,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
{
const struct ast_vbios_dclk_info *clk_info;
- if (ast->chip == AST2500)
+ if ((ast->chip == AST2500) || (ast->chip == AST2600))
clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index];
else
clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
@@ -555,8 +563,8 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret)
return ret;
@@ -779,8 +787,8 @@ static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -1058,6 +1066,8 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
return MODE_OK;
if ((mode->hdisplay == 1600) && (mode->vdisplay == 900))
return MODE_OK;
+ if ((mode->hdisplay == 1152) && (mode->vdisplay == 864))
+ return MODE_OK;
if ((ast->chip == AST2100) || (ast->chip == AST2200) ||
(ast->chip == AST2300) || (ast->chip == AST2400) ||
@@ -1090,6 +1100,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
if (mode->vdisplay == 768)
status = MODE_OK;
break;
+ case 1152:
+ if (mode->vdisplay == 864)
+ status = MODE_OK;
+ break;
case 1280:
if (mode->vdisplay == 1024)
status = MODE_OK;
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index dbe1cc620f6e..0378c9bc079b 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -272,6 +272,13 @@ static const struct ast_vbios_enhtable res_1600x1200[] = {
(SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
};
+static const struct ast_vbios_enhtable res_1152x864[] = {
+ {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */
+ (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B },
+ {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */
+ (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B },
+};
+
/* 16:9 */
static const struct ast_vbios_enhtable res_1360x768[] = {
{1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 8ae679f1a518..3bdbab3a6333 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -2,7 +2,7 @@
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 651e3c109360..f7e7f4e919c7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -730,11 +730,11 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
clk_disable_unprepare(dc->hlcdc->periph_clk);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 2306ceb3e999..daa508504f47 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -12,11 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -449,9 +448,9 @@ static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR);
for (i = 0; i < state->nplanes; i++) {
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
- state->dscrs[i]->addr = gem->paddr + state->offsets[i];
+ state->dscrs[i]->addr = gem->dma_addr + state->offsets[i];
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index a031a0cd1f18..94de73cbeb2d 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -394,10 +394,7 @@ void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
- unsigned int offset = adv7511->type == ADV7533 ?
- ADV7533_REG_CEC_OFFSET : 0;
-
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 0b266f28f150..99964f5a5457 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -359,7 +359,7 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
goto err_cec_alloc;
}
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
regmap_write(adv7511->regmap_cec,
ADV7511_REG_CEC_SOFT_RESET + offset, 0x01);
@@ -386,7 +386,7 @@ err_cec_alloc:
dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
ret);
err_cec_parse_dt:
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 38bf28720f3a..f887200e8abc 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1336,13 +1336,10 @@ uninit_regulators:
return ret;
}
-static int adv7511_remove(struct i2c_client *i2c)
+static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- i2c_unregister_device(adv7511->i2c_cec);
- clk_disable_unprepare(adv7511->cec_clk);
-
adv7511_uninit_regulators(adv7511);
drm_bridge_remove(&adv7511->bridge);
@@ -1350,11 +1347,11 @@ static int adv7511_remove(struct i2c_client *i2c)
adv7511_audio_exit(adv7511);
cec_unregister_adapter(adv7511->cec_adap);
+ i2c_unregister_device(adv7511->i2c_cec);
+ clk_disable_unprepare(adv7511->cec_clk);
i2c_unregister_device(adv7511->i2c_packet);
i2c_unregister_device(adv7511->i2c_edid);
-
- return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index ae3d6e9a606c..660a54857929 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -787,7 +787,7 @@ err_unregister_i2c:
return err;
}
-static int anx6345_i2c_remove(struct i2c_client *client)
+static void anx6345_i2c_remove(struct i2c_client *client)
{
struct anx6345 *anx6345 = i2c_get_clientdata(client);
@@ -798,8 +798,6 @@ static int anx6345_i2c_remove(struct i2c_client *client)
kfree(anx6345->edid);
mutex_destroy(&anx6345->lock);
-
- return 0;
}
static const struct i2c_device_id anx6345_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index d2fc8676fab6..5997049fde5b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1357,7 +1357,7 @@ err_unregister_i2c:
return err;
}
-static int anx78xx_i2c_remove(struct i2c_client *client)
+static void anx78xx_i2c_remove(struct i2c_client *client)
{
struct anx78xx *anx78xx = i2c_get_clientdata(client);
@@ -1366,8 +1366,6 @@ static int anx78xx_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx78xx);
kfree(anx78xx->edid);
-
- return 0;
}
static const struct i2c_device_id anx78xx_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index d1f1d525aeb6..b0ff1ecb80a5 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1440,6 +1440,20 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
+ int ret;
+
+ /* Set irq detect window to 2ms */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT8_15,
+ (HPD_TIME >> 8) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT16_23,
+ (HPD_TIME >> 16) & 0xFF);
+ if (ret < 0)
+ return ret;
+
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
@@ -1642,6 +1656,7 @@ static int anx7625_parse_dt(struct device *dev,
anx7625_get_swing_setting(dev, pdata);
pdata->is_dpi = 0; /* default dsi mode */
+ of_node_put(pdata->mipi_host_node);
pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
if (!pdata->mipi_host_node) {
DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
@@ -1796,8 +1811,13 @@ static int anx7625_audio_hw_params(struct device *dev, void *data,
int wl, ch, rate;
int ret = 0;
- if (fmt->fmt != HDMI_DSP_A) {
- DRM_DEV_ERROR(dev, "only supports DSP_A\n");
+ if (anx7625_sink_detect(ctx) == connector_status_disconnected) {
+ DRM_DEV_DEBUG_DRIVER(dev, "DP not connected\n");
+ return 0;
+ }
+
+ if (fmt->fmt != HDMI_DSP_A && fmt->fmt != HDMI_I2S) {
+ DRM_DEV_ERROR(dev, "only supports DSP_A & I2S\n");
return -EINVAL;
}
@@ -1805,10 +1825,16 @@ static int anx7625_audio_hw_params(struct device *dev, void *data,
params->sample_rate, params->sample_width,
params->cea.channels);
- ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
- AUDIO_CHANNEL_STATUS_6,
- ~I2S_SLAVE_MODE,
- TDM_SLAVE_MODE);
+ if (fmt->fmt == HDMI_DSP_A)
+ ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6,
+ ~I2S_SLAVE_MODE,
+ TDM_SLAVE_MODE);
+ else
+ ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6,
+ ~TDM_SLAVE_MODE,
+ I2S_SLAVE_MODE);
/* Word length */
switch (params->sample_width) {
@@ -2689,7 +2715,7 @@ free_hdcp_wq:
return ret;
}
-static int anx7625_i2c_remove(struct i2c_client *client)
+static void anx7625_i2c_remove(struct i2c_client *client)
{
struct anx7625_data *platform = i2c_get_clientdata(client);
@@ -2709,8 +2735,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
-
- return 0;
}
static const struct i2c_device_id anx7625_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index e257a84db962..14f33d6be289 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -132,6 +132,12 @@
#define I2S_SLAVE_MODE 0x08
#define AUDIO_LAYOUT 0x01
+#define HPD_DET_TIMER_BIT0_7 0xea
+#define HPD_DET_TIMER_BIT8_15 0xeb
+#define HPD_DET_TIMER_BIT16_23 0xec
+/* HPD debounce time 2ms for 27M clock */
+#define HPD_TIME 54000
+
#define AUDIO_CONTROL_REGISTER 0xe6
#define TDM_TIMING_MODE 0x08
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index ab63e7b11944..31442a922502 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2605,7 +2605,8 @@ static int cdns_mhdp_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
cancel_work_sync(&mhdp->modeset_retry_work);
- flush_scheduled_work();
+ flush_work(&mhdp->hpd_work);
+ /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
clk_disable_unprepare(mhdp->clk);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 481c86b2406e..bf920c3503aa 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -151,6 +152,8 @@ struct chipone {
struct regulator *vdd1;
struct regulator *vdd2;
struct regulator *vdd3;
+ struct clk *refclk;
+ unsigned long refclk_rate;
bool interface_i2c;
};
@@ -259,7 +262,7 @@ static void chipone_configure_pll(struct chipone *icn,
/*
* DSI byte clock frequency (input into PLL) is calculated as:
- * DSI_CLK = mode clock * bpp / dsi_data_lanes / 8
+ * DSI_CLK = HS clock / 4
*
* DPI pixel clock frequency (output from PLL) is mode clock.
*
@@ -273,8 +276,10 @@ static void chipone_configure_pll(struct chipone *icn,
* It seems the PLL input clock after applying P pre-divider have
* to be lower than 20 MHz.
*/
- fin = mode_clock * mipi_dsi_pixel_format_to_bpp(icn->dsi->format) /
- icn->dsi->lanes / 8; /* in Hz */
+ if (icn->refclk)
+ fin = icn->refclk_rate;
+ else
+ fin = icn->dsi->hs_rate / 4; /* in Hz */
/* Minimum value of P predivider for PLL input in 5..20 MHz */
p_min = clamp(DIV_ROUND_UP(fin, 20000000), 1U, 31U);
@@ -319,16 +324,18 @@ static void chipone_configure_pll(struct chipone *icn,
best_p_pot = !(best_p & 1);
dev_dbg(icn->dev,
- "PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in=%d Hz ; DPI f_out=%d Hz\n",
+ "PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in(%s)=%d Hz ; DPI f_out=%d Hz\n",
best_p >> best_p_pot, best_p_pot, best_m, best_s + 1,
- min_delta, fin, (fin * best_m) / (best_p << (best_s + 1)));
+ min_delta, icn->refclk ? "EXT" : "DSI", fin,
+ (fin * best_m) / (best_p << (best_s + 1)));
ref_div = PLL_REF_DIV_P(best_p >> best_p_pot) | PLL_REF_DIV_S(best_s);
if (best_p_pot) /* Prefer /2 pre-divider */
ref_div |= PLL_REF_DIV_Pe;
- /* Clock source selection fixed to MIPI DSI clock lane */
- chipone_writeb(icn, PLL_CTRL(6), PLL_CTRL_6_MIPI_CLK);
+ /* Clock source selection either external clock or MIPI DSI clock lane */
+ chipone_writeb(icn, PLL_CTRL(6),
+ icn->refclk ? PLL_CTRL_6_EXTERNAL : PLL_CTRL_6_MIPI_CLK);
chipone_writeb(icn, PLL_REF_DIV, ref_div);
chipone_writeb(icn, PLL_INT(0), best_m);
}
@@ -464,6 +471,11 @@ static void chipone_atomic_pre_enable(struct drm_bridge *bridge,
"failed to enable VDD3 regulator: %d\n", ret);
}
+ ret = clk_prepare_enable(icn->refclk);
+ if (ret)
+ DRM_DEV_ERROR(icn->dev,
+ "failed to enable RECLK clock: %d\n", ret);
+
gpiod_set_value(icn->enable_gpio, 1);
usleep_range(10000, 11000);
@@ -474,6 +486,8 @@ static void chipone_atomic_post_disable(struct drm_bridge *bridge,
{
struct chipone *icn = bridge_to_chipone(bridge);
+ clk_disable_unprepare(icn->refclk);
+
if (icn->vdd1)
regulator_disable(icn->vdd1);
@@ -515,6 +529,8 @@ static int chipone_dsi_attach(struct chipone *icn)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->hs_rate = 500000000;
+ dsi->lp_rate = 16000000;
ret = mipi_dsi_attach(dsi);
if (ret < 0)
@@ -617,6 +633,20 @@ static int chipone_parse_dt(struct chipone *icn)
struct device *dev = icn->dev;
int ret;
+ icn->refclk = devm_clk_get_optional(dev, "refclk");
+ if (IS_ERR(icn->refclk)) {
+ ret = PTR_ERR(icn->refclk);
+ DRM_DEV_ERROR(dev, "failed to get REFCLK clock: %d\n", ret);
+ return ret;
+ } else if (icn->refclk) {
+ icn->refclk_rate = clk_get_rate(icn->refclk);
+ if (icn->refclk_rate < 10000000 || icn->refclk_rate > 154000000) {
+ DRM_DEV_ERROR(dev, "REFCLK out of range: %ld Hz\n",
+ icn->refclk_rate);
+ return -EINVAL;
+ }
+ }
+
icn->vdd1 = devm_regulator_get_optional(dev, "vdd1");
if (IS_ERR(icn->vdd1)) {
ret = PTR_ERR(icn->vdd1);
@@ -735,14 +765,12 @@ static int chipone_i2c_probe(struct i2c_client *client,
return chipone_dsi_host_attach(icn);
}
-static int chipone_dsi_remove(struct mipi_dsi_device *dsi)
+static void chipone_dsi_remove(struct mipi_dsi_device *dsi)
{
struct chipone *icn = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&icn->bridge);
-
- return 0;
}
static const struct of_device_id chipone_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index ba060277c3fd..b94f39a86846 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -583,14 +583,12 @@ static int ch7033_probe(struct i2c_client *client,
return 0;
}
-static int ch7033_remove(struct i2c_client *client)
+static void ch7033_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
-
- return 0;
}
static const struct of_device_id ch7033_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index 0f6d907432e3..fa91bdeddef0 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -159,13 +159,11 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
return 0;
}
-static int cros_ec_anx7688_bridge_remove(struct i2c_client *client)
+static void cros_ec_anx7688_bridge_remove(struct i2c_client *client)
{
struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client);
drm_bridge_remove(&anx7688->bridge);
-
- return 0;
}
static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 4b673c4792d7..dfe4351c9bdd 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -506,6 +506,9 @@ static int it6505_read(struct it6505 *it6505, unsigned int reg_addr)
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_read(it6505->regmap, reg_addr, &value);
if (err < 0) {
dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err);
@@ -521,6 +524,9 @@ static int it6505_write(struct it6505 *it6505, unsigned int reg_addr,
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_write(it6505->regmap, reg_addr, reg_val);
if (err < 0) {
@@ -538,6 +544,9 @@ static int it6505_set_bits(struct it6505 *it6505, unsigned int reg,
int err;
struct device *dev = &it6505->client->dev;
+ if (!it6505->powered)
+ return -ENODEV;
+
err = regmap_update_bits(it6505->regmap, reg, mask, value);
if (err < 0) {
dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d",
@@ -554,7 +563,7 @@ static void it6505_debug_print(struct it6505 *it6505, unsigned int reg,
struct device *dev = &it6505->client->dev;
int val;
- if (likely(!(__drm_debug & DRM_UT_DRIVER)))
+ if (!drm_debug_enabled(DRM_UT_DRIVER))
return;
val = it6505_read(it6505, reg);
@@ -682,7 +691,7 @@ static void it6505_calc_video_info(struct it6505 *it6505)
DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d",
hdes, vdes);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 3; i++) {
it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER,
ENABLE_PCLK_COUNTER);
usleep_range(10000, 15000);
@@ -699,7 +708,7 @@ static void it6505_calc_video_info(struct it6505 *it6505)
return;
}
- sum /= 10;
+ sum /= 3;
pclk = 13500 * 2048 / sum;
it6505->video_info.clock = pclk;
it6505->video_info.hdisplay = hdew;
@@ -2341,8 +2350,6 @@ static void it6505_irq_hpd(struct it6505 *it6505)
if (!it6505_get_video_status(it6505))
it6505_video_reset(it6505);
-
- it6505_calc_video_info(it6505);
} else {
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
@@ -2559,13 +2566,12 @@ static int it6505_poweron(struct it6505 *it6505)
usleep_range(10000, 20000);
}
+ it6505->powered = true;
it6505_reset_logic(it6505);
it6505_int_mask_enable(it6505);
it6505_init(it6505);
it6505_lane_off(it6505);
- it6505->powered = true;
-
return 0;
}
@@ -2954,6 +2960,9 @@ static void it6505_bridge_atomic_enable(struct drm_bridge *bridge,
it6505_int_mask_enable(it6505);
it6505_video_reset(it6505);
+
+ it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
+ DP_SET_POWER_D0);
}
static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -2965,9 +2974,9 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
DRM_DEV_DEBUG_DRIVER(dev, "start");
if (it6505->powered) {
- it6505_video_disable(it6505);
it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link,
DP_SET_POWER_D3);
+ it6505_video_disable(it6505);
}
}
@@ -3044,7 +3053,7 @@ static int it6505_init_pdata(struct it6505 *it6505)
return PTR_ERR(pdata->ovdd);
}
- pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(pdata->gpiod_reset)) {
dev_err(dev, "gpiod_reset gpio not found");
return PTR_ERR(pdata->gpiod_reset);
@@ -3316,7 +3325,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
return 0;
}
-static int it6505_i2c_remove(struct i2c_client *client)
+static void it6505_i2c_remove(struct i2c_client *client)
{
struct it6505 *it6505 = i2c_get_clientdata(client);
@@ -3324,8 +3333,6 @@ static int it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
-
- return 0;
}
static const struct i2c_device_id it6505_id[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 44278d54d35d..4f6f1deba28c 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1623,15 +1623,13 @@ static int it66121_probe(struct i2c_client *client,
return 0;
}
-static int it66121_remove(struct i2c_client *client)
+static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
-
- return 0;
}
static const struct of_device_id it66121_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 28bad30dc4e5..a98efef0ba0e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -165,30 +165,38 @@ static int lt8912_write_rxlogicres_config(struct lt8912 *lt)
return ret;
};
+/* enable LVDS output with some hardcoded configuration, not required for the HDMI output */
static int lt8912_write_lvds_config(struct lt8912 *lt)
{
const struct reg_sequence seq[] = {
+ // lvds power up
{0x44, 0x30},
{0x51, 0x05},
- {0x50, 0x24},
- {0x51, 0x2d},
- {0x52, 0x04},
- {0x69, 0x0e},
+
+ // core pll bypass
+ {0x50, 0x24}, // cp=50uA
+ {0x51, 0x2d}, // Pix_clk as reference, second order passive LPF PLL
+ {0x52, 0x04}, // loopdiv=0, use second-order PLL
+ {0x69, 0x0e}, // CP_PRESET_DIV_RATIO
{0x69, 0x8e},
{0x6a, 0x00},
- {0x6c, 0xb8},
+ {0x6c, 0xb8}, // RGD_CP_SOFT_K_EN,RGD_CP_SOFT_K[13:8]
{0x6b, 0x51},
- {0x04, 0xfb},
+
+ {0x04, 0xfb}, // core pll reset
{0x04, 0xff},
- {0x7f, 0x00},
- {0xa8, 0x13},
- {0x02, 0xf7},
+
+ // scaler bypass
+ {0x7f, 0x00}, // disable scaler
+ {0xa8, 0x13}, // 0x13: JEIDA, 0x33: VESA
+
+ {0x02, 0xf7}, // lvds pll reset
{0x02, 0xff},
{0x03, 0xcf},
{0x03, 0xff},
};
- return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq));
+ return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq));
};
static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b)
@@ -268,7 +276,7 @@ static int lt8912_video_setup(struct lt8912 *lt)
u32 hactive, h_total, hpw, hfp, hbp;
u32 vactive, v_total, vpw, vfp, vbp;
u8 settle = 0x08;
- int ret;
+ int ret, hsync_activehigh, vsync_activehigh;
if (!lt)
return -EINVAL;
@@ -278,12 +286,14 @@ static int lt8912_video_setup(struct lt8912 *lt)
hpw = lt->mode.hsync_len;
hbp = lt->mode.hback_porch;
h_total = hactive + hfp + hpw + hbp;
+ hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH;
vactive = lt->mode.vactive;
vfp = lt->mode.vfront_porch;
vpw = lt->mode.vsync_len;
vbp = lt->mode.vback_porch;
v_total = vactive + vfp + vpw + vbp;
+ vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH;
if (vactive <= 600)
settle = 0x04;
@@ -317,6 +327,13 @@ static int lt8912_video_setup(struct lt8912 *lt)
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff);
ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0),
+ vsync_activehigh ? BIT(0) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1),
+ hsync_activehigh ? BIT(1) : 0);
+ ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0),
+ lt->connector.display_info.is_hdmi ? BIT(0) : 0);
+
return ret;
}
@@ -714,7 +731,7 @@ err_dt_parse:
return ret;
}
-static int lt8912_remove(struct i2c_client *client)
+static void lt8912_remove(struct i2c_client *client)
{
struct lt8912 *lt = i2c_get_clientdata(client);
@@ -722,7 +739,6 @@ static int lt8912_remove(struct i2c_client *client)
drm_bridge_remove(&lt->bridge);
lt8912_free_i2c(lt);
lt8912_put_dt(lt);
- return 0;
}
static const struct of_device_id lt8912_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9a3e90427d12..933ca028d612 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -766,13 +766,11 @@ static int lt9211_probe(struct i2c_client *client,
return ret;
}
-static int lt9211_remove(struct i2c_client *client)
+static void lt9211_remove(struct i2c_client *client)
{
struct lt9211 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id lt9211_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 8a60e83482a0..7c0a99173b39 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -813,13 +813,14 @@ static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt961
drm_connector_helper_add(&lt9611->connector,
&lt9611_bridge_connector_helper_funcs);
- drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
+ drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
+
return 0;
}
@@ -1216,7 +1217,7 @@ err_of_put:
return ret;
}
-static int lt9611_remove(struct i2c_client *client)
+static void lt9611_remove(struct i2c_client *client)
{
struct lt9611 *lt9611 = i2c_get_clientdata(client);
@@ -1228,8 +1229,6 @@ static int lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi1_node);
of_node_put(lt9611->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fdf12d4c6416..fa1ee6264d92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -978,7 +978,7 @@ err_of_put:
return ret;
}
-static int lt9611uxc_remove(struct i2c_client *client)
+static void lt9611uxc_remove(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
@@ -993,8 +993,6 @@ static int lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi1_node);
of_node_put(lt9611uxc->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611uxc_id[] = {
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index cce98bf2a4e7..97359f807bfc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -296,7 +296,9 @@ static void ge_b850v3_lvds_remove(void)
* This check is to avoid both the drivers
* removing the bridge in their remove() function
*/
- if (!ge_b850v3_lvds_ptr)
+ if (!ge_b850v3_lvds_ptr ||
+ !ge_b850v3_lvds_ptr->stdp2690_i2c ||
+ !ge_b850v3_lvds_ptr->stdp4028_i2c)
goto out;
drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge);
@@ -355,11 +357,9 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
return ge_b850v3_register();
}
-static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
+static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
@@ -405,11 +405,9 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
return ge_b850v3_register();
}
-static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
+static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1ab91f4e057b..0851101a8c72 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -315,13 +315,11 @@ static int ptn3460_probe(struct i2c_client *client,
return 0;
}
-static int ptn3460_remove(struct i2c_client *client)
+static void ptn3460_remove(struct i2c_client *client)
{
struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ptn_bridge->bridge);
-
- return 0;
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 4277bf4f032b..216af76d0042 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -8,6 +8,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -367,6 +368,44 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
+static void drmm_drm_panel_bridge_release(struct drm_device *drm, void *ptr)
+{
+ struct drm_bridge *bridge = ptr;
+
+ drm_panel_bridge_remove(bridge);
+}
+
+/**
+ * drmm_panel_bridge_add - Creates a DRM-managed &drm_bridge and
+ * &drm_connector that just calls the
+ * appropriate functions from &drm_panel.
+ *
+ * @drm: DRM device to tie the bridge lifetime to
+ * @panel: The drm_panel being wrapped. Must be non-NULL.
+ *
+ * This is the DRM-managed version of drm_panel_bridge_add() which
+ * automatically calls drm_panel_bridge_remove() when @dev is cleaned
+ * up.
+ */
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel)
+{
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = drm_panel_bridge_add_typed(panel, panel->connector_type);
+ if (IS_ERR(bridge))
+ return bridge;
+
+ ret = drmm_add_action_or_reset(drm, drmm_drm_panel_bridge_release,
+ bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drmm_panel_bridge_add);
+
/**
* drm_panel_bridge_connector - return the connector for the panel bridge
* @bridge: The drm_bridge.
@@ -420,4 +459,39 @@ struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
return bridge;
}
EXPORT_SYMBOL(devm_drm_of_get_bridge);
+
+/**
+ * drmm_of_get_bridge - Return next bridge in the chain
+ * @drm: device to tie the bridge lifetime to
+ * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
+ *
+ * Given a DT node's port and endpoint number, finds the connected node
+ * and returns the associated bridge if any, or creates and returns a
+ * drm panel bridge instance if a panel is connected.
+ *
+ * Returns a drmm managed pointer to the bridge if successful, or an error
+ * pointer otherwise.
+ */
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *np,
+ u32 port, u32 endpoint)
+{
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(np, port, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (panel)
+ bridge = drmm_panel_bridge_add(drm, panel);
+
+ return bridge;
+}
+EXPORT_SYMBOL(drmm_of_get_bridge);
+
#endif
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b5750e5f71d7..309de802863d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -520,14 +520,12 @@ static int ps8622_probe(struct i2c_client *client,
return 0;
}
-static int ps8622_remove(struct i2c_client *client)
+static void ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
-
- return 0;
}
static const struct i2c_device_id ps8622_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 31e88cb39f8a..d7483c13c569 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -375,6 +375,11 @@ static int __maybe_unused ps8640_resume(struct device *dev)
gpiod_set_value(ps_bridge->gpio_reset, 1);
usleep_range(2000, 2500);
gpiod_set_value(ps_bridge->gpio_reset, 0);
+ /* Double reset for T4 and T5 */
+ msleep(50);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ msleep(50);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
/*
* Mystery 200 ms delay for the "MCU to be ready". It's unclear if
@@ -631,8 +636,8 @@ static int ps8640_probe(struct i2c_client *client)
if (!ps_bridge)
return -ENOMEM;
- ps_bridge->supplies[0].supply = "vdd33";
- ps_bridge->supplies[1].supply = "vdd12";
+ ps_bridge->supplies[0].supply = "vdd12";
+ ps_bridge->supplies[1].supply = "vdd33";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies);
if (ret)
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 7ab38d734ad6..878fb7d3732b 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1145,7 +1145,7 @@ static int sii902x_probe(struct i2c_client *client,
return ret;
}
-static int sii902x_remove(struct i2c_client *client)
+static void sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
@@ -1154,8 +1154,6 @@ static int sii902x_remove(struct i2c_client *client)
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
-
- return 0;
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 15c98a7bd81c..5b3061d4b5c3 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -936,14 +936,12 @@ static int sii9234_probe(struct i2c_client *client,
return 0;
}
-static int sii9234_remove(struct i2c_client *client)
+static void sii9234_remove(struct i2c_client *client)
{
struct sii9234 *ctx = i2c_get_clientdata(client);
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii9234_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index ab0bce4a988c..511982a1cedb 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2346,7 +2346,7 @@ static int sii8620_probe(struct i2c_client *client,
return 0;
}
-static int sii8620_remove(struct i2c_client *client)
+static void sii8620_remove(struct i2c_client *client)
{
struct sii8620 *ctx = i2c_get_clientdata(client);
@@ -2360,8 +2360,6 @@ static int sii8620_remove(struct i2c_client *client)
sii8620_cable_out(ctx);
}
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii8620_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 7d2ed0ed2fe2..4efb62bcdb63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -542,8 +542,8 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
- strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
+ strscpy(card->driver, DRIVER_NAME, sizeof(card->driver));
+ strscpy(card->shortname, "DW-HDMI", sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s rev 0x%02x, irq %d", card->shortname, revision,
data->irq);
@@ -561,7 +561,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
dw->pcm = pcm;
pcm->private_data = dw;
- strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
+ strscpy(pcm->name, DRIVER_NAME, sizeof(pcm->name));
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops);
/*
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 25a60eb4d67c..40d8ca37f5bc 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -3096,6 +3096,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
{
struct dw_hdmi *hdmi = dev_id;
u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat;
+ enum drm_connector_status status = connector_status_unknown;
intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0);
phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0);
@@ -3134,13 +3135,15 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
cec_notifier_phys_addr_invalidate(hdmi->cec_notifier);
mutex_unlock(&hdmi->cec_notifier_mutex);
}
- }
- if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
- enum drm_connector_status status = phy_int_pol & HDMI_PHY_HPD
- ? connector_status_connected
- : connector_status_disconnected;
+ if (phy_stat & HDMI_PHY_HPD)
+ status = connector_status_connected;
+
+ if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE)))
+ status = connector_status_disconnected;
+ }
+ if (status != connector_status_unknown) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
status == connector_status_connected ?
"plugin" : "plugout");
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 40439da4db49..7f4fce1aa998 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -241,14 +241,12 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int tc358762_remove(struct mipi_dsi_device *dsi)
+static void tc358762_remove(struct mipi_dsi_device *dsi)
{
struct tc358762 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id tc358762_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index fdfb14aca926..53259c12d777 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -381,14 +381,12 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int tc358764_remove(struct mipi_dsi_device *dsi)
+static void tc358764_remove(struct mipi_dsi_device *dsi)
{
struct tc358764 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id tc358764_of_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 02bd757a8987..2a58eb271f70 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -889,6 +889,7 @@ static int tc_set_edp_video_mode(struct tc_data *tc,
u32 dp0_syncval;
u32 bits_per_pixel = 24;
u32 in_bw, out_bw;
+ u32 dpipxlfmt;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -938,10 +939,15 @@ static int tc_set_edp_video_mode(struct tc_data *tc,
if (ret)
return ret;
- ret = regmap_write(tc->regmap, DPIPXLFMT,
- VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
- DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 |
- DPI_BPP_RGB888);
+ dpipxlfmt = DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dpipxlfmt |= VS_POL_ACTIVE_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dpipxlfmt |= HS_POL_ACTIVE_LOW;
+
+ ret = regmap_write(tc->regmap, DPIPXLFMT, dpipxlfmt);
if (ret)
return ret;
@@ -1244,7 +1250,13 @@ static int tc_main_link_disable(struct tc_data *tc)
if (ret)
return ret;
- return regmap_write(tc->regmap, DP0CTL, 0);
+ ret = regmap_write(tc->regmap, DP0CTL, 0);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(tc->regmap, DP_PHY_CTRL,
+ PHY_M0_RST | PHY_M1_RST | PHY_M0_EN,
+ PHY_M0_RST | PHY_M1_RST);
}
static int tc_dsi_rx_enable(struct tc_data *tc)
@@ -1252,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
u32 value;
int ret;
- regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
- regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
+ regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
@@ -1496,41 +1508,16 @@ tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
dev_err(tc->dev, "main link disable error: %d\n", ret);
}
-static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adj)
-{
- /* Fixup sync polarities, both hsync and vsync are active low */
- adj->flags = mode->flags;
- adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
- adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
-
- return true;
-}
-
-static int tc_common_atomic_check(struct drm_bridge *bridge,
- struct drm_bridge_state *bridge_state,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- const unsigned int max_khz)
-{
- tc_bridge_mode_fixup(bridge, &crtc_state->mode,
- &crtc_state->adjusted_mode);
-
- if (crtc_state->adjusted_mode.clock > max_khz)
- return -EINVAL;
-
- return 0;
-}
-
static int tc_dpi_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
/* DSI->DPI interface clock limitation: upto 100 MHz */
- return tc_common_atomic_check(bridge, bridge_state, crtc_state,
- conn_state, 100000);
+ if (crtc_state->adjusted_mode.clock > 100000)
+ return -EINVAL;
+
+ return 0;
}
static int tc_edp_atomic_check(struct drm_bridge *bridge,
@@ -1539,8 +1526,10 @@ static int tc_edp_atomic_check(struct drm_bridge *bridge,
struct drm_connector_state *conn_state)
{
/* DPI->(e)DP interface clock limitation: upto 154 MHz */
- return tc_common_atomic_check(bridge, bridge_state, crtc_state,
- conn_state, 154000);
+ if (crtc_state->adjusted_mode.clock > 154000)
+ return -EINVAL;
+
+ return 0;
}
static enum drm_mode_status
@@ -1783,7 +1772,6 @@ static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
.atomic_check = tc_edp_atomic_check,
.atomic_enable = tc_edp_bridge_atomic_enable,
.atomic_disable = tc_edp_bridge_atomic_disable,
- .mode_fixup = tc_bridge_mode_fixup,
.detect = tc_bridge_detect,
.get_edid = tc_get_edid,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
@@ -1925,22 +1913,23 @@ static int tc_mipi_dsi_host_attach(struct tc_data *tc)
static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
{
struct device *dev = tc->dev;
+ struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
/* port@1 is the DPI input/output port */
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge);
if (ret && ret != -ENODEV)
return ret;
if (panel) {
- struct drm_bridge *panel_bridge;
-
- panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
- tc->panel_bridge = panel_bridge;
+ if (bridge) {
+ tc->panel_bridge = bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
tc->bridge.funcs = &tc_dpi_bridge_funcs;
@@ -2010,9 +1999,10 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc)
for_each_endpoint_of_node(dev->of_node, node) {
of_graph_parse_endpoint(node, &endpoint);
- if (endpoint.port > 2)
+ if (endpoint.port > 2) {
+ of_node_put(node);
return -EINVAL;
-
+ }
mode |= BIT(endpoint.port);
}
@@ -2194,13 +2184,11 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index fd585bf925fe..4c4b77ce8aba 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1072,13 +1072,11 @@ static int tc358768_i2c_probe(struct i2c_client *client,
return mipi_dsi_host_register(&priv->dsi_host);
}
-static int tc358768_i2c_remove(struct i2c_client *client)
+static void tc358768_i2c_remove(struct i2c_client *client)
{
struct tc358768_priv *priv = i2c_get_clientdata(client);
mipi_dsi_host_unregister(&priv->dsi_host);
-
- return 0;
}
static struct i2c_driver tc358768_driver = {
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index f1c6e62b0e1d..02dc12b8151e 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -704,13 +704,11 @@ err_bridge_remove:
return ret;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index cef454862b67..186a9e2ff24d 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -379,14 +379,12 @@ err_remove_bridge:
return ret;
}
-static int dlpc3433_remove(struct i2c_client *client)
+static void dlpc3433_remove(struct i2c_client *client)
{
struct dlpc *dlpc = i2c_get_clientdata(client);
drm_bridge_remove(&dlpc->bridge);
of_node_put(dlpc->host_node);
-
- return 0;
}
static const struct i2c_device_id dlpc3433_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 14e7aa77e758..7ba9467fff12 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -708,13 +708,11 @@ err_remove_bridge:
return ret;
}
-static int sn65dsi83_remove(struct i2c_client *client)
+static void sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id sn65dsi83_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index d6dd4d99a229..3c3561942eb6 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
+#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -68,6 +69,7 @@
#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
+#define HPD_DEBOUNCED_STATE BIT(4)
#define SN_GPIO_IO_REG 0x5E
#define SN_GPIO_INPUT_SHIFT 4
#define SN_GPIO_OUTPUT_SHIFT 0
@@ -92,6 +94,8 @@
#define SN_DATARATE_CONFIG_REG 0x94
#define DP_DATARATE_MASK GENMASK(7, 5)
#define DP_DATARATE(x) ((x) << 5)
+#define SN_TRAINING_SETTING_REG 0x95
+#define SCRAMBLE_DISABLE BIT(4)
#define SN_ML_TX_MODE_REG 0x96
#define ML_TX_MAIN_LINK_OFF 0
#define ML_TX_NORMAL_MODE BIT(0)
@@ -698,11 +702,6 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
- DRM_ERROR("Fix bridge driver to make connector optional!");
- return -EINVAL;
- }
-
pdata->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&pdata->aux);
if (ret < 0) {
@@ -710,15 +709,18 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
return ret;
}
- /* We never want the next bridge to *also* create a connector: */
- flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR;
-
- /* Attach the next bridge */
+ /*
+ * Attach the next bridge.
+ * We never want the next bridge to *also* create a connector.
+ */
ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge,
- &pdata->bridge, flags);
+ &pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0)
goto err_initted_aux;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
pdata->connector = drm_bridge_connector_init(pdata->bridge.dev,
pdata->bridge.encoder);
if (IS_ERR(pdata->connector)) {
@@ -749,6 +751,29 @@ ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
+ /*
+ * The front and back porch registers are 8 bits, and pulse width
+ * registers are 15 bits, so reject any modes with larger periods.
+ */
+
+ if ((mode->hsync_start - mode->hdisplay) > 0xff)
+ return MODE_HBLANK_WIDE;
+
+ if ((mode->vsync_start - mode->vdisplay) > 0xff)
+ return MODE_VBLANK_WIDE;
+
+ if ((mode->hsync_end - mode->hsync_start) > 0x7fff)
+ return MODE_HSYNC_WIDE;
+
+ if ((mode->vsync_end - mode->vsync_start) > 0x7fff)
+ return MODE_VSYNC_WIDE;
+
+ if ((mode->htotal - mode->hsync_end) > 0xff)
+ return MODE_HBLANK_WIDE;
+
+ if ((mode->vtotal - mode->vsync_end) > 0xff)
+ return MODE_VBLANK_WIDE;
+
return MODE_OK;
}
@@ -779,9 +804,9 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
}
-static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata)
+static unsigned int ti_sn_bridge_get_bpp(struct drm_connector *connector)
{
- if (pdata->connector->display_info.bpc <= 6)
+ if (connector->display_info.bpc <= 6)
return 18;
else
return 24;
@@ -796,7 +821,7 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
@@ -804,7 +829,7 @@ static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata)
&pdata->bridge.encoder->crtc->state->adjusted_mode;
/* Calculate minimum bit rate based on our pixel clock. */
- bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
+ bit_rate_khz = mode->clock * bpp;
/* Calculate minimum DP data rate, taking 80% as per DP spec */
dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
@@ -1016,12 +1041,21 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ struct drm_connector *connector;
const char *last_err_str = "No supported DP rate";
unsigned int valid_rates;
int dp_rate_idx;
unsigned int val;
int ret = -EINVAL;
int max_dp_lanes;
+ unsigned int bpp;
+
+ connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state,
+ bridge->encoder);
+ if (!connector) {
+ dev_err_ratelimited(pdata->dev, "Could not get the connector\n");
+ return;
+ }
max_dp_lanes = ti_sn_get_max_lanes(pdata);
pdata->dp_lanes = min(pdata->dp_lanes, max_dp_lanes);
@@ -1040,15 +1074,27 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
/*
* The SN65DSI86 only supports ASSR Display Authentication method and
- * this method is enabled by default. An eDP panel must support this
+ * this method is enabled for eDP panels. An eDP panel must support this
* authentication method. We need to enable this method in the eDP panel
* at DisplayPort address 0x0010A prior to link training.
+ *
+ * As only ASSR is supported by SN65DSI86, for full DisplayPort displays
+ * we need to disable the scrambler.
*/
- drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
- DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_eDP) {
+ drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+ regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG,
+ SCRAMBLE_DISABLE, 0);
+ } else {
+ regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG,
+ SCRAMBLE_DISABLE, SCRAMBLE_DISABLE);
+ }
+
+ bpp = ti_sn_bridge_get_bpp(connector);
/* Set the DP output format (18 bpp or 24 bpp) */
- val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ val = bpp == 18 ? BPP_18_RGB : 0;
regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
/* DP lane config */
@@ -1059,7 +1105,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge,
valid_rates = ti_sn_bridge_read_valid_rates(pdata);
/* Train until we run out of rates */
- for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp);
dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
dp_rate_idx++) {
if (!(valid_rates & BIT(dp_rate_idx)))
@@ -1114,10 +1160,33 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge,
pm_runtime_put_sync(pdata->dev);
}
+static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ int val = 0;
+
+ pm_runtime_get_sync(pdata->dev);
+ regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
+ pm_runtime_put_autosuspend(pdata->dev);
+
+ return val & HPD_DEBOUNCED_STATE ? connector_status_connected
+ : connector_status_disconnected;
+}
+
+static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+
+ return drm_get_edid(connector, &pdata->aux.ddc);
+}
+
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
.mode_valid = ti_sn_bridge_mode_valid,
+ .get_edid = ti_sn_bridge_get_edid,
+ .detect = ti_sn_bridge_detect,
.atomic_pre_enable = ti_sn_bridge_atomic_pre_enable,
.atomic_enable = ti_sn_bridge_atomic_enable,
.atomic_disable = ti_sn_bridge_atomic_disable,
@@ -1198,10 +1267,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
int ret;
pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
- if (IS_ERR(pdata->next_bridge)) {
- DRM_ERROR("failed to create panel bridge\n");
- return PTR_ERR(pdata->next_bridge);
- }
+ if (IS_ERR(pdata->next_bridge))
+ return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+ "failed to create panel bridge\n");
ti_sn_bridge_parse_lanes(pdata, np);
@@ -1211,6 +1279,11 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
pdata->bridge.funcs = &ti_sn_bridge_funcs;
pdata->bridge.of_node = np;
+ pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
+ ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
+
+ if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
+ pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
drm_bridge_add(&pdata->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 401fe61217c7..b9635abbad16 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -394,11 +394,9 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return tfp410_init(&client->dev, true);
}
-static int tfp410_i2c_remove(struct i2c_client *client)
+static void tfp410_i2c_remove(struct i2c_client *client)
{
tfp410_fini(&client->dev);
-
- return 0;
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index e5bab236b3ae..16565a0a5da6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
+#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -40,6 +41,18 @@
#include "drm_dp_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct dp_aux_backlight {
struct backlight_device *base;
struct drm_dp_aux *aux;
@@ -390,6 +403,38 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+/**
+ * drm_dp_phy_name() - Get the name of the given DP PHY
+ * @dp_phy: The DP PHY identifier
+ *
+ * Given the @dp_phy, get a user friendly name of the DP PHY, either "DPRX" or
+ * "LTTPR <N>", or "<INVALID DP PHY>" on errors. The returned string is always
+ * non-NULL and valid.
+ *
+ * Returns: Name of the DP PHY.
+ */
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy)
+{
+ static const char * const phy_names[] = {
+ [DP_PHY_DPRX] = "DPRX",
+ [DP_PHY_LTTPR1] = "LTTPR 1",
+ [DP_PHY_LTTPR2] = "LTTPR 2",
+ [DP_PHY_LTTPR3] = "LTTPR 3",
+ [DP_PHY_LTTPR4] = "LTTPR 4",
+ [DP_PHY_LTTPR5] = "LTTPR 5",
+ [DP_PHY_LTTPR6] = "LTTPR 6",
+ [DP_PHY_LTTPR7] = "LTTPR 7",
+ [DP_PHY_LTTPR8] = "LTTPR 8",
+ };
+
+ if (dp_phy < 0 || dp_phy >= ARRAY_SIZE(phy_names) ||
+ WARN_ON(!phy_names[dp_phy]))
+ return "<INVALID DP PHY>";
+
+ return phy_names[dp_phy];
+}
+EXPORT_SYMBOL(drm_dp_phy_name);
+
void drm_dp_lttpr_link_train_clock_recovery_delay(void)
{
usleep_range(100, 200);
@@ -1597,7 +1642,7 @@ static int drm_dp_aux_reply_duration(const struct drm_dp_aux_msg *msg)
/*
* Calculate the length of the i2c transfer in usec, assuming
- * the i2c bus speed is as specified. Gives the the "worst"
+ * the i2c bus speed is as specified. Gives the "worst"
* case estimate, ie. successful while as long as possible.
* Doesn't account the "MOT" bit, and instead assumes each
* message includes a START, ADDRESS and STOP. Neither does it
@@ -2638,17 +2683,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
struct drm_dp_phy_test_params *data, u8 dp_rev)
{
int err, i;
- u8 link_config[2];
u8 test_pattern;
- link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate);
- link_config[1] = data->num_lanes;
- if (data->enhanced_frame_cap)
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2);
- if (err < 0)
- return err;
-
test_pattern = data->phy_pattern;
if (dp_rev < 0x12) {
test_pattern = (test_pattern << 2) &
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 57e65423e50d..ecd22c038c8c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -68,8 +68,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload);
+ int id, u8 start_slot, u8 num_slots);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -1235,57 +1234,6 @@ build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
return 0;
}
-static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_vcpi *vcpi)
-{
- int ret, vcpi_ret;
-
- mutex_lock(&mgr->payload_lock);
- ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
- if (ret > mgr->max_payloads) {
- ret = -EINVAL;
- drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
- goto out_unlock;
- }
-
- vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
- if (vcpi_ret > mgr->max_payloads) {
- ret = -EINVAL;
- drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
- goto out_unlock;
- }
-
- set_bit(ret, &mgr->payload_mask);
- set_bit(vcpi_ret, &mgr->vcpi_mask);
- vcpi->vcpi = vcpi_ret + 1;
- mgr->proposed_vcpis[ret - 1] = vcpi;
-out_unlock:
- mutex_unlock(&mgr->payload_lock);
- return ret;
-}
-
-static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
- int vcpi)
-{
- int i;
-
- if (vcpi == 0)
- return;
-
- mutex_lock(&mgr->payload_lock);
- drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
- clear_bit(vcpi - 1, &mgr->vcpi_mask);
-
- for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i] &&
- mgr->proposed_vcpis[i]->vcpi == vcpi) {
- mgr->proposed_vcpis[i] = NULL;
- clear_bit(i + 1, &mgr->payload_mask);
- }
- }
- mutex_unlock(&mgr->payload_lock);
-}
-
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
@@ -1738,6 +1686,20 @@ drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
#define save_port_topology_ref(port, type)
#endif
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_atomic_payload *payload;
+
+ list_for_each_entry(payload, &state->payloads, next)
+ if (payload->port == port)
+ return payload;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
struct drm_dp_mst_branch *mstb =
@@ -2496,7 +2458,7 @@ fail_put:
return ret;
}
-static void
+static int
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_connection_status_notify *conn_stat)
{
@@ -2509,7 +2471,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port = drm_dp_get_port(mstb, conn_stat->port_number);
if (!port)
- return;
+ return 0;
if (port->connector) {
if (!port->input && conn_stat->input_port) {
@@ -2562,8 +2524,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
out:
drm_dp_mst_topology_put_port(port);
- if (dowork)
- queue_work(system_long_wq, &mstb->mgr->work);
+ return dowork;
}
static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
@@ -3240,6 +3201,8 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status)
{
+ struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_sideband_msg_tx *txmsg;
u8 nonce[7];
int ret;
@@ -3256,6 +3219,10 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
get_random_bytes(nonce, sizeof(nonce));
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ state = to_drm_dp_mst_topology_state(mgr->base.state);
+ payload = drm_atomic_get_mst_payload_state(state, port);
+
/*
* "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
* transaction at the MST Branch device directly connected to the
@@ -3263,7 +3230,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
*/
txmsg->dst = mgr->mst_primary;
- build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
+ build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3280,6 +3247,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
out:
+ drm_modeset_unlock(&mgr->base.lock);
drm_dp_mst_topology_put_port(port);
out_get_port:
kfree(txmsg);
@@ -3288,238 +3256,162 @@ out_get_port:
EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_atomic_payload *payload)
{
- int ret;
-
- ret = drm_dp_dpcd_write_payload(mgr, id, payload);
- if (ret < 0) {
- payload->payload_state = 0;
- return ret;
- }
- payload->payload_state = DP_PAYLOAD_LOCAL;
- return 0;
+ return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ payload->time_slots);
}
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_atomic_payload *payload)
{
int ret;
+ struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
- ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
- if (ret < 0)
- return ret;
- payload->payload_state = DP_PAYLOAD_REMOTE;
+ if (!port)
+ return -EIO;
+
+ ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int id,
- struct drm_dp_payload *payload)
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
{
drm_dbg_kms(mgr->dev, "\n");
+
/* it's okay for these to fail */
- if (port) {
- drm_dp_payload_send_msg(mgr, port, id, 0);
- }
+ drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
+ drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
- drm_dp_dpcd_write_payload(mgr, id, payload);
- payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
- return 0;
-}
-
-static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
- int id,
- struct drm_dp_payload *payload)
-{
- payload->payload_state = 0;
return 0;
}
/**
- * drm_dp_update_payload_part1() - Execute payload update part 1
- * @mgr: manager to use.
- * @start_slot: this is the cur slot
- *
- * NOTE: start_slot is a temporary workaround for non-atomic drivers,
- * this will be removed when non-atomic mst helpers are moved out of the helper
+ * drm_dp_add_payload_part1() - Execute payload update part 1
+ * @mgr: Manager to use.
+ * @mst_state: The MST atomic state
+ * @payload: The payload to write
*
- * This iterates over all proposed virtual channels, and tries to
- * allocate space in the link for them. For 0->slots transitions,
- * this step just writes the VCPI to the MST device. For slots->0
- * transitions, this writes the updated VCPIs and removes the
- * remote VC payloads.
+ * Determines the starting time slot for the given payload, and programs the VCPI for this payload
+ * into hardware. After calling this, the driver should generate ACT and payload packets.
*
- * after calling this the driver should generate ACT and payload
- * packets.
+ * Returns: 0 on success, error code on failure. In the event that this fails,
+ * @payload.vc_start_slot will also be set to -1.
*/
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
{
- struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
- int i, j;
- int cur_slots = start_slot;
- bool skip;
+ int ret;
- mutex_lock(&mgr->payload_lock);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
- struct drm_dp_payload *payload = &mgr->payloads[i];
- bool put_port = false;
+ port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+ if (!port)
+ return 0;
- /* solve the current payloads - compare to the hw ones
- - update the hw view */
- req_payload.start_slot = cur_slots;
- if (vcpi) {
- port = container_of(vcpi, struct drm_dp_mst_port,
- vcpi);
+ if (mgr->payload_count == 0)
+ mgr->next_start_slot = mst_state->start_slot;
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
+ payload->vc_start_slot = mgr->next_start_slot;
- if (skip) {
- drm_dbg_kms(mgr->dev,
- "Virtual channel %d is not in current topology\n",
- i);
- continue;
- }
- /* Validated ports don't matter if we're releasing
- * VCPI
- */
- if (vcpi->num_slots) {
- port = drm_dp_mst_topology_get_port_validated(
- mgr, port);
- if (!port) {
- if (vcpi->num_slots == payload->num_slots) {
- cur_slots += vcpi->num_slots;
- payload->start_slot = req_payload.start_slot;
- continue;
- } else {
- drm_dbg_kms(mgr->dev,
- "Fail:set payload to invalid sink");
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
- }
- }
- put_port = true;
- }
+ ret = drm_dp_create_payload_step1(mgr, payload);
+ drm_dp_mst_topology_put_port(port);
+ if (ret < 0) {
+ drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
+ payload->port, ret);
+ payload->vc_start_slot = -1;
+ return ret;
+ }
- req_payload.num_slots = vcpi->num_slots;
- req_payload.vcpi = vcpi->vcpi;
- } else {
- port = NULL;
- req_payload.num_slots = 0;
- }
+ mgr->payload_count++;
+ mgr->next_start_slot += payload->time_slots;
- payload->start_slot = req_payload.start_slot;
- /* work out what is required to happen with this payload */
- if (payload->num_slots != req_payload.num_slots) {
-
- /* need to push an update for this payload */
- if (req_payload.num_slots) {
- drm_dp_create_payload_step1(mgr, vcpi->vcpi,
- &req_payload);
- payload->num_slots = req_payload.num_slots;
- payload->vcpi = req_payload.vcpi;
-
- } else if (payload->num_slots) {
- payload->num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port,
- payload->vcpi,
- payload);
- req_payload.payload_state =
- payload->payload_state;
- payload->start_slot = 0;
- }
- payload->payload_state = req_payload.payload_state;
- }
- cur_slots += req_payload.num_slots;
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_add_payload_part1);
- if (put_port)
- drm_dp_mst_topology_put_port(port);
- }
+/**
+ * drm_dp_remove_payload() - Remove an MST payload
+ * @mgr: Manager to use.
+ * @mst_state: The MST atomic state
+ * @payload: The payload to write
+ *
+ * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
+ * the starting time slots of all other payloads which would have been shifted towards the start of
+ * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
+ */
+void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload)
+{
+ struct drm_dp_mst_atomic_payload *pos;
+ bool send_remove = false;
- for (i = 0; i < mgr->max_payloads; /* do nothing */) {
- if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
- i++;
- continue;
- }
+ /* We failed to make the payload, so nothing to do */
+ if (payload->vc_start_slot == -1)
+ return;
- drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
- for (j = i; j < mgr->max_payloads - 1; j++) {
- mgr->payloads[j] = mgr->payloads[j + 1];
- mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
+ mutex_lock(&mgr->lock);
+ send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
+ mutex_unlock(&mgr->lock);
- if (mgr->proposed_vcpis[j] &&
- mgr->proposed_vcpis[j]->num_slots) {
- set_bit(j + 1, &mgr->payload_mask);
- } else {
- clear_bit(j + 1, &mgr->payload_mask);
- }
- }
+ if (send_remove)
+ drm_dp_destroy_payload_step1(mgr, mst_state, payload);
+ else
+ drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
+ payload->vcpi);
- memset(&mgr->payloads[mgr->max_payloads - 1], 0,
- sizeof(struct drm_dp_payload));
- mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
- clear_bit(mgr->max_payloads, &mgr->payload_mask);
+ list_for_each_entry(pos, &mst_state->payloads, next) {
+ if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
+ pos->vc_start_slot -= payload->time_slots;
}
- mutex_unlock(&mgr->payload_lock);
+ payload->vc_start_slot = -1;
- return 0;
+ mgr->payload_count--;
+ mgr->next_start_slot -= payload->time_slots;
}
-EXPORT_SYMBOL(drm_dp_update_payload_part1);
+EXPORT_SYMBOL(drm_dp_remove_payload);
/**
- * drm_dp_update_payload_part2() - Execute payload update part 2
- * @mgr: manager to use.
+ * drm_dp_add_payload_part2() - Execute payload update part 2
+ * @mgr: Manager to use.
+ * @state: The global atomic state
+ * @payload: The payload to update
+ *
+ * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
+ * function will send the sideband messages to finish allocating this payload.
*
- * This iterates over all proposed virtual channels, and tries to
- * allocate space in the link for them. For 0->slots transitions,
- * this step writes the remote VC payload commands. For slots->0
- * this just resets some internal state.
+ * Returns: 0 on success, negative error code on failure.
*/
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_atomic_state *state,
+ struct drm_dp_mst_atomic_payload *payload)
{
- struct drm_dp_mst_port *port;
- int i;
int ret = 0;
- bool skip;
- mutex_lock(&mgr->payload_lock);
- for (i = 0; i < mgr->max_payloads; i++) {
-
- if (!mgr->proposed_vcpis[i])
- continue;
-
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
-
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
-
- if (skip)
- continue;
+ /* Skip failed payloads */
+ if (payload->vc_start_slot == -1) {
+ drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ payload->port->connector->name);
+ return -EIO;
+ }
- drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
- if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
- ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
- } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
- ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
- }
- if (ret) {
- mutex_unlock(&mgr->payload_lock);
- return ret;
- }
+ ret = drm_dp_create_payload_step2(mgr, payload);
+ if (ret < 0) {
+ if (!payload->delete)
+ drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
+ payload->port, ret);
+ else
+ drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
+ payload->port, ret);
}
- mutex_unlock(&mgr->payload_lock);
- return 0;
+
+ return ret;
}
-EXPORT_SYMBOL(drm_dp_update_payload_part2);
+EXPORT_SYMBOL(drm_dp_add_payload_part2);
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -3699,7 +3591,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int ret = 0;
struct drm_dp_mst_branch *mstb = NULL;
- mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3707,10 +3598,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
- struct drm_dp_payload reset_pay;
- int lane_count;
- int link_rate;
-
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3721,16 +3608,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
- link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate);
- mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
- link_rate,
- lane_count);
- if (mgr->pbn_div == 0) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
/* add initial branch device at LCT 1 */
mstb = drm_dp_add_mst_branch_device(1, NULL);
if (mstb == NULL) {
@@ -3750,9 +3627,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
if (ret < 0)
goto out_unlock;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ /* Write reset payload */
+ drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
queue_work(system_long_wq, &mgr->work);
@@ -3764,19 +3640,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- memset(mgr->payloads, 0,
- mgr->max_payloads * sizeof(mgr->payloads[0]));
- memset(mgr->proposed_vcpis, 0,
- mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
- mgr->payload_mask = 0;
- set_bit(0, &mgr->payload_mask);
- mgr->vcpi_mask = 0;
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
- mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -4047,7 +3915,7 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb = NULL;
struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
- bool hotplug = false;
+ bool hotplug = false, dowork = false;
if (hdr->broadcast) {
const u8 *guid = NULL;
@@ -4070,11 +3938,14 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
hotplug = true;
}
drm_dp_mst_topology_put_mstb(mstb);
+
+ if (dowork)
+ queue_work(system_long_wq, &mgr->work);
return hotplug;
}
@@ -4293,341 +4164,352 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
EXPORT_SYMBOL(drm_dp_mst_get_edid);
/**
- * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
- * @mgr: manager to use
- * @pbn: payload bandwidth to convert into slots.
- *
- * Calculate the number of VCPI slots that will be required for the given PBN
- * value. This function is deprecated, and should not be used in atomic
- * drivers.
- *
- * RETURNS:
- * The total slots required for this port, or error.
- */
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn)
-{
- int num_slots;
-
- num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
-
- /* max. time slots - one slot for MTP header */
- if (num_slots > 63)
- return -ENOSPC;
- return num_slots;
-}
-EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
-
-static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_vcpi *vcpi, int pbn, int slots)
-{
- int ret;
-
- vcpi->pbn = pbn;
- vcpi->aligned_pbn = slots * mgr->pbn_div;
- vcpi->num_slots = slots;
-
- ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-/**
- * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
+ * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @port: port to find vcpi slots for
+ * @port: port to find time slots for
* @pbn: bandwidth required for the mode in PBN
- * @pbn_div: divider for DSC mode that takes FEC into account
*
- * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
- * may have had. Any atomic drivers which support MST must call this function
- * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
- * current VCPI allocation for the new state, but only when
- * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
- * to ensure compatibility with userspace applications that still use the
- * legacy modesetting UAPI.
+ * Allocates time slots to @port, replacing any previous time slot allocations it may
+ * have had. Any atomic drivers which support MST must call this function in
+ * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
+ * change the current time slot allocation for the new state, and ensure the MST
+ * atomic state is added whenever the state of payloads in the topology changes.
*
* Allocations set by this function are not checked against the bandwidth
* restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
*
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
- * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
+ * drm_dp_atomic_release_time_slots() in the same atomic check phase.
*
* See also:
- * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_atomic_release_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
* Total slots in the atomic state assigned for this port, or a negative error
* code if the port no longer exists
*/
-int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div)
+ struct drm_dp_mst_port *port, int pbn)
{
struct drm_dp_mst_topology_state *topology_state;
- struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
- int prev_slots, prev_bw, req_slots;
+ struct drm_dp_mst_atomic_payload *payload = NULL;
+ struct drm_connector_state *conn_state;
+ int prev_slots = 0, prev_bw = 0, req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- /* Find the current allocation for this port, if any */
- list_for_each_entry(pos, &topology_state->vcpis, next) {
- if (pos->port == port) {
- vcpi = pos;
- prev_slots = vcpi->vcpi;
- prev_bw = vcpi->pbn;
+ conn_state = drm_atomic_get_new_connector_state(state, port->connector);
+ topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
- /*
- * This should never happen, unless the driver tries
- * releasing and allocating the same VCPI allocation,
- * which is an error
- */
- if (WARN_ON(!prev_slots)) {
- drm_err(mgr->dev,
- "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
- port);
- return -EINVAL;
- }
+ /* Find the current allocation for this port, if any */
+ payload = drm_atomic_get_mst_payload_state(topology_state, port);
+ if (payload) {
+ prev_slots = payload->time_slots;
+ prev_bw = payload->pbn;
- break;
+ /*
+ * This should never happen, unless the driver tries
+ * releasing and allocating the same timeslot allocation,
+ * which is an error
+ */
+ if (drm_WARN_ON(mgr->dev, payload->delete)) {
+ drm_err(mgr->dev,
+ "cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
+ port);
+ return -EINVAL;
}
}
- if (!vcpi) {
- prev_slots = 0;
- prev_bw = 0;
- }
-
- if (pbn_div <= 0)
- pbn_div = mgr->pbn_div;
- req_slots = DIV_ROUND_UP(pbn, pbn_div);
+ req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
- drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+ drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_slots, req_slots);
drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
port->connector->base.id, port->connector->name,
port, prev_bw, pbn);
- /* Add the new allocation to the state */
- if (!vcpi) {
- vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
- if (!vcpi)
+ /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
+ if (!payload) {
+ payload = kzalloc(sizeof(*payload), GFP_KERNEL);
+ if (!payload)
return -ENOMEM;
drm_dp_mst_get_port_malloc(port);
- vcpi->port = port;
- list_add(&vcpi->next, &topology_state->vcpis);
+ payload->port = port;
+ payload->vc_start_slot = -1;
+ list_add(&payload->next, &topology_state->payloads);
}
- vcpi->vcpi = req_slots;
- vcpi->pbn = pbn;
+ payload->time_slots = req_slots;
+ payload->pbn = pbn;
return req_slots;
}
-EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
/**
- * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
+ * drm_dp_atomic_release_time_slots() - Release allocated time slots
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @port: The port to release the VCPI slots from
+ * @port: The port to release the time slots from
*
- * Releases any VCPI slots that have been allocated to a port in the atomic
- * state. Any atomic drivers which support MST must call this function in
- * their &drm_connector_helper_funcs.atomic_check() callback when the
- * connector will no longer have VCPI allocated (e.g. because its CRTC was
- * removed) when it had VCPI allocated in the previous atomic state.
+ * Releases any time slots that have been allocated to a port in the atomic
+ * state. Any atomic drivers which support MST must call this function
+ * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
+ * This helper will check whether time slots would be released by the new state and
+ * respond accordingly, along with ensuring the MST state is always added to the
+ * atomic state whenever a new state would modify the state of payloads on the
+ * topology.
*
* It is OK to call this even if @port has been removed from the system.
* Additionally, it is OK to call this function multiple times on the same
* @port as needed. It is not OK however, to call this function and
- * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
+ * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
* phase.
*
* See also:
- * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_atomic_find_time_slots()
* drm_dp_mst_atomic_check()
*
* Returns:
- * 0 if all slots for this port were added back to
- * &drm_dp_mst_topology_state.avail_slots or negative error code
+ * 0 on success, negative error code otherwise
*/
-int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_state *topology_state;
- struct drm_dp_vcpi_allocation *pos;
- bool found = false;
+ struct drm_dp_mst_atomic_payload *payload;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ bool update_payload = true;
+
+ old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
+ if (!old_conn_state->crtc)
+ return 0;
+
+ /* If the CRTC isn't disabled by this state, don't release it's payload */
+ new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
+ if (new_conn_state->crtc) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+
+ /* No modeset means no payload changes, so it's safe to not pull in the MST state */
+ if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ update_payload = false;
+ }
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- list_for_each_entry(pos, &topology_state->vcpis, next) {
- if (pos->port == port) {
- found = true;
- break;
- }
- }
- if (WARN_ON(!found)) {
- drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
+ topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
+ if (!update_payload)
+ return 0;
+
+ payload = drm_atomic_get_mst_payload_state(topology_state, port);
+ if (WARN_ON(!payload)) {
+ drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
port, &topology_state->base);
return -EINVAL;
}
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
- if (pos->vcpi) {
+ if (new_conn_state->crtc)
+ return 0;
+
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
+ if (!payload->delete) {
drm_dp_mst_put_port_malloc(port);
- pos->vcpi = 0;
- pos->pbn = 0;
+ payload->pbn = 0;
+ payload->delete = true;
+ topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
}
return 0;
}
-EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
/**
- * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
- * @mst_state: mst_state to update
- * @link_encoding_cap: the ecoding format on the link
- */
-void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
-{
- if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
- mst_state->total_avail_slots = 64;
- mst_state->start_slot = 0;
- } else {
- mst_state->total_avail_slots = 63;
- mst_state->start_slot = 1;
- }
-
- DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
- (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
- mst_state);
-}
-EXPORT_SYMBOL(drm_dp_mst_update_slots);
-
-/**
- * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
- * @mgr: manager for this port
- * @port: port to allocate a virtual channel for.
- * @pbn: payload bandwidth number to request
- * @slots: returned number of slots for this PBN.
+ * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
+ * @state: global atomic state
+ *
+ * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
+ * currently assigned to an MST topology. Drivers must call this hook from their
+ * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
+ *
+ * Returns:
+ * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
*/
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots)
+int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
{
- int ret;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, j, commit_idx, num_commit_deps;
- if (slots < 0)
- return false;
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ if (!mst_state->pending_crtc_mask)
+ continue;
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return false;
+ num_commit_deps = hweight32(mst_state->pending_crtc_mask);
+ mst_state->commit_deps = kmalloc_array(num_commit_deps,
+ sizeof(*mst_state->commit_deps), GFP_KERNEL);
+ if (!mst_state->commit_deps)
+ return -ENOMEM;
+ mst_state->num_commit_deps = num_commit_deps;
- if (port->vcpi.vcpi > 0) {
- drm_dbg_kms(mgr->dev,
- "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
- port->vcpi.vcpi, port->vcpi.pbn, pbn);
- if (pbn == port->vcpi.pbn) {
- drm_dp_mst_topology_put_port(port);
- return true;
+ commit_idx = 0;
+ for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
+ if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
+ mst_state->commit_deps[commit_idx++] =
+ drm_crtc_commit_get(crtc_state->commit);
+ }
}
}
- ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
- if (ret) {
- drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
- drm_dp_mst_topology_put_port(port);
- goto out;
- }
- drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
-
- /* Keep port allocated until its payload has been removed */
- drm_dp_mst_get_port_malloc(port);
- drm_dp_mst_topology_put_port(port);
- return true;
-out:
- return false;
+ return 0;
}
-EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+/**
+ * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
+ * prepare new MST state for commit
+ * @state: global atomic state
+ *
+ * Goes through any MST topologies in this atomic state, and waits for any pending commits which
+ * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
+ * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
+ * with eachother by forcing them to be executed sequentially in situations where the only resources
+ * the modeset objects in these commits share are an MST topology.
+ *
+ * This function also prepares the new MST state for commit by performing some state preparation
+ * which can't be done until this point, such as reading back the final VC start slots (which are
+ * determined at commit-time) from the previous state.
+ *
+ * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
+ * or whatever their equivalent of that is.
+ */
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
{
- int slots = 0;
+ struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
+ int i, j, ret;
+
+ for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
+ for (j = 0; j < old_mst_state->num_commit_deps; j++) {
+ ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
+ if (ret < 0)
+ drm_err(state->dev, "Failed to wait for %s: %d\n",
+ old_mst_state->commit_deps[j]->crtc->name, ret);
+ }
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return slots;
+ /* Now that previous state is committed, it's safe to copy over the start slot
+ * assignments
+ */
+ list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
+ if (old_payload->delete)
+ continue;
- slots = port->vcpi.num_slots;
- drm_dp_mst_topology_put_port(port);
- return slots;
+ new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
+ old_payload->port);
+ new_payload->vc_start_slot = old_payload->vc_start_slot;
+ }
+ }
}
-EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
/**
- * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
+ * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
+ * in SST mode
+ * @new_conn_state: The new connector state of the &drm_connector
+ * @mgr: The MST topology manager for the &drm_connector
+ *
+ * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
+ * serialize non-blocking commits happening on the real DP connector of an MST topology switching
+ * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
+ * MST topology will never share the same &drm_encoder.
+ *
+ * This function takes care of this serialization issue, by checking a root MST connector's atomic
+ * state to determine if it is about to have a modeset - and then pulling in the MST topology state
+ * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
*
- * This just resets the number of slots for the ports VCPI for later programming.
+ * Drivers implementing MST must call this function from the
+ * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
+ * driving MST sinks.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise
*/
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr)
{
- /*
- * A port with VCPI will remain allocated until its VCPI is
- * released, no verified ref needed
- */
+ struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, new_conn_state->connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_dp_mst_topology_state *mst_state = NULL;
+
+ if (new_conn_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
+ }
+ }
+
+ if (old_conn_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
+ if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (!mst_state) {
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+ }
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
+ }
+ }
- port->vcpi.num_slots = 0;
+ return 0;
}
-EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
/**
- * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
- * @mgr: manager for this port
- * @port: port to deallocate vcpi for
- *
- * This can be called unconditionally, regardless of whether
- * drm_dp_mst_allocate_vcpi() succeeded or not.
+ * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
+ * @mst_state: mst_state to update
+ * @link_encoding_cap: the ecoding format on the link
*/
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port)
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
{
- bool skip;
-
- if (!port->vcpi.vcpi)
- return;
-
- mutex_lock(&mgr->lock);
- skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
- mutex_unlock(&mgr->lock);
-
- if (skip)
- return;
+ if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
+ mst_state->total_avail_slots = 64;
+ mst_state->start_slot = 0;
+ } else {
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+ }
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- port->vcpi.num_slots = 0;
- port->vcpi.pbn = 0;
- port->vcpi.aligned_pbn = 0;
- port->vcpi.vcpi = 0;
- drm_dp_mst_put_port_malloc(port);
+ DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
+ (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
+ mst_state);
}
-EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+EXPORT_SYMBOL(drm_dp_mst_update_slots);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, struct drm_dp_payload *payload)
+ int id, u8 start_slot, u8 num_slots)
{
u8 payload_alloc[3], status;
int ret;
@@ -4637,8 +4519,8 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
DP_PAYLOAD_TABLE_UPDATED);
payload_alloc[0] = id;
- payload_alloc[1] = payload->start_slot;
- payload_alloc[2] = payload->num_slots;
+ payload_alloc[1] = start_slot;
+ payload_alloc[2] = num_slots;
ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
if (ret != 3) {
@@ -4853,8 +4735,9 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr)
{
- int i;
- struct drm_dp_mst_port *port;
+ struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_atomic_payload *payload;
+ int i, ret;
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
@@ -4863,36 +4746,35 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
/* dump VCPIs */
mutex_unlock(&mgr->lock);
- mutex_lock(&mgr->payload_lock);
- seq_printf(m, "\n*** VCPI Info ***\n");
- seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads);
+ ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
+ if (ret < 0)
+ return;
+
+ state = to_drm_dp_mst_topology_state(mgr->base.state);
+ seq_printf(m, "\n*** Atomic state info ***\n");
+ seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
+ state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
- seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n");
+ seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->proposed_vcpis[i]) {
+ list_for_each_entry(payload, &state->payloads, next) {
char name[14];
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- fetch_monitor_name(mgr, port, name, sizeof(name));
- seq_printf(m, "%10d%10d%10d%10d%20s\n",
+ if (payload->vcpi != i || payload->delete)
+ continue;
+
+ fetch_monitor_name(mgr, payload->port, name, sizeof(name));
+ seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
i,
- port->port_num,
- port->vcpi.vcpi,
- port->vcpi.num_slots,
+ payload->port->port_num,
+ payload->vcpi,
+ payload->vc_start_slot,
+ payload->vc_start_slot + payload->time_slots - 1,
+ payload->pbn,
+ payload->dsc_enabled ? "Y" : "N",
(*name != 0) ? name : "Unknown");
- } else
- seq_printf(m, "%6d - Unused\n", i);
- }
- seq_printf(m, "\n*** Payload Info ***\n");
- seq_printf(m, "| idx | state | start slot | # slots |\n");
- for (i = 0; i < mgr->max_payloads; i++) {
- seq_printf(m, "%10d%10d%15d%10d\n",
- i,
- mgr->payloads[i].payload_state,
- mgr->payloads[i].start_slot,
- mgr->payloads[i].num_slots);
+ }
}
- mutex_unlock(&mgr->payload_lock);
seq_printf(m, "\n*** DPCD Info ***\n");
mutex_lock(&mgr->lock);
@@ -4907,14 +4789,14 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- if (ret) {
+ if (ret != 2) {
seq_printf(m, "faux/mst read failed\n");
goto out;
}
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- if (ret) {
+ if (ret != 1) {
seq_printf(m, "mst ctrl read failed\n");
goto out;
}
@@ -4922,7 +4804,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- if (ret) {
+ if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
seq_printf(m, "branch oui read failed\n");
goto out;
}
@@ -4938,7 +4820,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
out:
mutex_unlock(&mgr->lock);
-
+ drm_modeset_unlock(&mgr->base.lock);
}
EXPORT_SYMBOL(drm_dp_mst_dump_topology);
@@ -5060,7 +4942,7 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
struct drm_dp_mst_topology_state *state, *old_state =
to_dp_mst_topology_state(obj->state);
- struct drm_dp_vcpi_allocation *pos, *vcpi;
+ struct drm_dp_mst_atomic_payload *pos, *payload;
state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
if (!state)
@@ -5068,25 +4950,28 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- INIT_LIST_HEAD(&state->vcpis);
+ INIT_LIST_HEAD(&state->payloads);
+ state->commit_deps = NULL;
+ state->num_commit_deps = 0;
+ state->pending_crtc_mask = 0;
- list_for_each_entry(pos, &old_state->vcpis, next) {
- /* Prune leftover freed VCPI allocations */
- if (!pos->vcpi)
+ list_for_each_entry(pos, &old_state->payloads, next) {
+ /* Prune leftover freed timeslot allocations */
+ if (pos->delete)
continue;
- vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
- if (!vcpi)
+ payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
+ if (!payload)
goto fail;
- drm_dp_mst_get_port_malloc(vcpi->port);
- list_add(&vcpi->next, &state->vcpis);
+ drm_dp_mst_get_port_malloc(payload->port);
+ list_add(&payload->next, &state->payloads);
}
return &state->base;
fail:
- list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
+ list_for_each_entry_safe(pos, payload, &state->payloads, next) {
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
@@ -5100,15 +4985,20 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
- struct drm_dp_vcpi_allocation *pos, *tmp;
+ struct drm_dp_mst_atomic_payload *pos, *tmp;
+ int i;
- list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
- /* We only keep references to ports with non-zero VCPIs */
- if (pos->vcpi)
+ list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
+ /* We only keep references to ports with active payloads */
+ if (!pos->delete)
drm_dp_mst_put_port_malloc(pos->port);
kfree(pos);
}
+ for (i = 0; i < mst_state->num_commit_deps; i++)
+ drm_crtc_commit_put(mst_state->commit_deps[i]);
+
+ kfree(mst_state->commit_deps);
kfree(mst_state);
}
@@ -5135,7 +5025,7 @@ static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
@@ -5143,9 +5033,9 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
/* Check that we have at least one port in our state that's downstream
* of this branch, otherwise we can skip this branch
*/
- list_for_each_entry(vcpi, &state->vcpis, next) {
- if (!vcpi->pbn ||
- !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+ list_for_each_entry(payload, &state->payloads, next) {
+ if (!payload->pbn ||
+ !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
continue;
found = true;
@@ -5176,25 +5066,15 @@ static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
- bool found = false;
-
- list_for_each_entry(vcpi, &state->vcpis, next) {
- if (vcpi->port != port)
- continue;
- if (!vcpi->pbn)
- return 0;
-
- found = true;
- break;
- }
- if (!found)
+ payload = drm_atomic_get_mst_payload_state(state, port);
+ if (!payload)
return 0;
/*
@@ -5208,7 +5088,7 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
return -EINVAL;
}
- pbn_used = vcpi->pbn;
+ pbn_used = payload->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
@@ -5230,28 +5110,28 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
}
static inline int
-drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state)
+drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
{
- struct drm_dp_vcpi_allocation *vcpi;
+ struct drm_dp_mst_atomic_payload *payload;
int avail_slots = mst_state->total_avail_slots, payload_count = 0;
- list_for_each_entry(vcpi, &mst_state->vcpis, next) {
- /* Releasing VCPI is always OK-even if the port is gone */
- if (!vcpi->vcpi) {
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
- vcpi->port);
+ list_for_each_entry(payload, &mst_state->payloads, next) {
+ /* Releasing payloads is always OK-even if the port is gone */
+ if (payload->delete) {
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
+ payload->port);
continue;
}
- drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
- vcpi->port, vcpi->vcpi);
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
+ payload->port, payload->time_slots);
- avail_slots -= vcpi->vcpi;
+ avail_slots -= payload->time_slots;
if (avail_slots < 0) {
drm_dbg_atomic(mgr->dev,
- "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
- vcpi->port, mst_state, avail_slots + vcpi->vcpi);
+ "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
+ payload->port, mst_state, avail_slots + payload->time_slots);
return -ENOSPC;
}
@@ -5261,9 +5141,22 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
mgr, mst_state, mgr->max_payloads);
return -EINVAL;
}
+
+ /* Assign a VCPI */
+ if (!payload->vcpi) {
+ payload->vcpi = ffz(mst_state->payload_mask) + 1;
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
+ payload->port, payload->vcpi);
+ mst_state->payload_mask |= BIT(payload->vcpi - 1);
+ }
}
- drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
- mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
+
+ if (!payload_count)
+ mst_state->pbn_div = 0;
+
+ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
+ mgr, mst_state, mst_state->pbn_div, avail_slots,
+ mst_state->total_avail_slots - avail_slots);
return 0;
}
@@ -5284,7 +5177,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_vcpi_allocation *pos;
+ struct drm_dp_mst_atomic_payload *pos;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_crtc *crtc;
@@ -5295,7 +5188,7 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (IS_ERR(mst_state))
return -EINVAL;
- list_for_each_entry(pos, &mst_state->vcpis, next) {
+ list_for_each_entry(pos, &mst_state->payloads, next) {
connector = pos->port->connector;
@@ -5334,7 +5227,6 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
* @state: Pointer to the new drm_atomic_state
* @port: Pointer to the affected MST Port
* @pbn: Newly recalculated bw required for link with DSC enabled
- * @pbn_div: Divider to calculate correct number of pbn per slot
* @enable: Boolean flag to enable or disable DSC on the port
*
* This function enables DSC on the given Port
@@ -5345,54 +5237,46 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
*/
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable)
+ int pbn, bool enable)
{
struct drm_dp_mst_topology_state *mst_state;
- struct drm_dp_vcpi_allocation *pos;
- bool found = false;
- int vcpi = 0;
+ struct drm_dp_mst_atomic_payload *payload;
+ int time_slots = 0;
mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
-
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- list_for_each_entry(pos, &mst_state->vcpis, next) {
- if (pos->port == port) {
- found = true;
- break;
- }
- }
-
- if (!found) {
+ payload = drm_atomic_get_mst_payload_state(mst_state, port);
+ if (!payload) {
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ "[MST PORT:%p] Couldn't find payload in mst state %p\n",
port, mst_state);
return -EINVAL;
}
- if (pos->dsc_enabled == enable) {
+ if (payload->dsc_enabled == enable) {
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
- port, enable, pos->vcpi);
- vcpi = pos->vcpi;
+ "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
+ port, enable, payload->time_slots);
+ time_slots = payload->time_slots;
}
if (enable) {
- vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
+ time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
drm_dbg_atomic(state->dev,
- "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
- port, vcpi);
- if (vcpi < 0)
+ "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
+ port, time_slots);
+ if (time_slots < 0)
return -EINVAL;
}
- pos->dsc_enabled = enable;
+ payload->dsc_enabled = enable;
- return vcpi;
+ return time_slots;
}
EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
+
/**
* drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
* atomic update is valid
@@ -5400,15 +5284,15 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
*
* Checks the given topology state for an atomic update to ensure that it's
* valid. This includes checking whether there's enough bandwidth to support
- * the new VCPI allocations in the atomic update.
+ * the new timeslot allocations in the atomic update.
*
* Any atomic drivers supporting DP MST must make sure to call this after
* checking the rest of their state in their
* &drm_mode_config_funcs.atomic_check() callback.
*
* See also:
- * drm_dp_atomic_find_vcpi_slots()
- * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_atomic_find_time_slots()
+ * drm_dp_atomic_release_time_slots()
*
* Returns:
*
@@ -5424,7 +5308,7 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
if (!mgr->mst_state)
continue;
- ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
+ ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
if (ret)
break;
@@ -5450,7 +5334,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
/**
* drm_atomic_get_mst_topology_state: get MST topology state
- *
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
@@ -5470,14 +5353,37 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
+ * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object.
+ *
+ * Returns:
+ *
+ * The MST topology state, or NULL if there's no topology state for this MST mgr
+ * in the global atomic state
+ */
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_private_state *priv_state =
+ drm_atomic_get_new_private_obj_state(state, &mgr->base);
+
+ return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+
+/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
* @dev: device providing this structure - for i2c addition.
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
- * @max_lane_count: maximum number of lanes this GPU supports
- * @max_link_rate: maximum link rate per lane this GPU supports in kHz
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
@@ -5485,14 +5391,12 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes, int max_payloads,
- int max_lane_count, int max_link_rate,
int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
- mutex_init(&mgr->payload_lock);
mutex_init(&mgr->delayed_destroy_lock);
mutex_init(&mgr->up_req_lock);
mutex_init(&mgr->probe_lock);
@@ -5522,19 +5426,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->aux = aux;
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
- mgr->max_lane_count = max_lane_count;
- mgr->max_link_rate = max_link_rate;
mgr->conn_base_id = conn_base_id;
- if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
- max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
- return -EINVAL;
- mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
- if (!mgr->payloads)
- return -ENOMEM;
- mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
- if (!mgr->proposed_vcpis)
- return -ENOMEM;
- set_bit(0, &mgr->payload_mask);
mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
if (mst_state == NULL)
@@ -5544,7 +5436,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mst_state->start_slot = 1;
mst_state->mgr = mgr;
- INIT_LIST_HEAD(&mst_state->vcpis);
+ INIT_LIST_HEAD(&mst_state->payloads);
drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
@@ -5567,19 +5459,12 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
destroy_workqueue(mgr->delayed_destroy_wq);
mgr->delayed_destroy_wq = NULL;
}
- mutex_lock(&mgr->payload_lock);
- kfree(mgr->payloads);
- mgr->payloads = NULL;
- kfree(mgr->proposed_vcpis);
- mgr->proposed_vcpis = NULL;
- mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
mutex_destroy(&mgr->delayed_destroy_lock);
- mutex_destroy(&mgr->payload_lock);
mutex_destroy(&mgr->qlock);
mutex_destroy(&mgr->lock);
mutex_destroy(&mgr->up_req_lock);
@@ -5908,8 +5793,10 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
/* Enpoint decompression with DP-to-DP peer device */
if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
(endpoint_fec & DP_FEC_CAPABLE) &&
- (upstream_dsc & 0x2) /* DSC passthrough */)
+ (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
+ port->passthrough_aux = &immediate_upstream_port->aux;
return &port->aux;
+ }
/* Virtual DPCD decompression with DP-to-DP peer device */
return &immediate_upstream_port->aux;
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 81881e81ceae..c3ad4ab2b456 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -35,6 +35,19 @@
* HDMI 2.0 specification. It is a point-to-point protocol that allows the
* HDMI source and HDMI sink to exchange data. The same I2C interface that
* is used to access EDID serves as the transport mechanism for SCDC.
+ *
+ * Note: The SCDC status is going to be lost when the display is
+ * disconnected. This can happen physically when the user disconnects
+ * the cable, but also when a display is switched on (such as waking up
+ * a TV).
+ *
+ * This is further complicated by the fact that, upon a disconnection /
+ * reconnection, KMS won't change the mode on its own. This means that
+ * one can't just rely on setting the SCDC status on enable, but also
+ * has to track the connector status changes using interrupts and
+ * restore the SCDC status. The typical solution for this is to trigger an
+ * empty modeset in drm_connector_helper_funcs.detect_ctx(), like what vc4 does
+ * in vc4_hdmi_reset_link().
*/
#define SCDC_I2C_SLAVE_ADDRESS 0x54
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
index fdb7d5c17ba1..3b8fdeeafd53 100644
--- a/drivers/gpu/drm/drm_aperture.c
+++ b/drivers/gpu/drm/drm_aperture.c
@@ -74,7 +74,7 @@
* given framebuffer memory. Ownership of the framebuffer memory is achieved
* by calling devm_aperture_acquire_from_firmware(). On success, the driver
* is the owner of the framebuffer range. The function fails if the
- * framebuffer is already by another driver. See below for an example.
+ * framebuffer is already owned by another driver. See below for an example.
*
* .. code-block:: c
*
@@ -112,7 +112,7 @@
*
* The generic driver is now subject to forced removal by other drivers. This
* only works for platform drivers that support hot unplug.
- * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al
+ * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al.
* for the registered framebuffer range, the aperture helpers call
* platform_device_unregister() and the generic driver unloads itself. It
* may not access the device's registers, framebuffer memory, ROM, etc
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
* @primary: also kick vga16fb if present
* @req_driver: requesting DRM driver
*
- * This function removes graphics device drivers which use memory range described by
+ * This function removes graphics device drivers which use the memory range described by
* @base and @size.
*
* Returns:
@@ -182,8 +182,8 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
* @pdev: PCI device
* @req_driver: requesting DRM driver
*
- * This function removes graphics device drivers using memory range configured
- * for any of @pdev's memory bars. The function assumes that PCI device with
+ * This function removes graphics device drivers using the memory range configured
+ * for any of @pdev's memory bars. The function assumes that a PCI device with
* shadowed ROM drives a primary display and so kicks out vga16fb.
*
* Returns:
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8bf41aa24068..98cc3137c062 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -38,7 +38,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -703,8 +702,12 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
- if (ret)
+ if (ret) {
+ drm_dbg_atomic(dev,
+ "[CONNECTOR:%d:%s] driver check failed\n",
+ connector->base.id, connector->name);
return ret;
+ }
connectors_mask |= BIT(i);
}
@@ -746,8 +749,12 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (funcs->atomic_check)
ret = funcs->atomic_check(connector, state);
- if (ret)
+ if (ret) {
+ drm_dbg_atomic(dev,
+ "[CONNECTOR:%d:%s] driver check failed\n",
+ connector->base.id, connector->name);
return ret;
+ }
}
/*
@@ -779,6 +786,45 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
+ * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state
+ * @encoder: encoder state to check
+ * @conn_state: connector state to check
+ *
+ * Checks if the writeback connector state is valid, and returns an error if it
+ * isn't.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int
+drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_writeback_job *wb_job = conn_state->writeback_job;
+ struct drm_property_blob *pixel_format_blob;
+ struct drm_framebuffer *fb;
+ size_t i, nformats;
+ u32 *formats;
+
+ if (!wb_job || !wb_job->fb)
+ return 0;
+
+ pixel_format_blob = wb_job->connector->pixel_formats_blob_ptr;
+ nformats = pixel_format_blob->length / sizeof(u32);
+ formats = pixel_format_blob->data;
+ fb = wb_job->fb;
+
+ for (i = 0; i < nformats; i++)
+ if (fb->format->format == formats[i])
+ return 0;
+
+ drm_dbg_kms(encoder->dev, "Invalid pixel format %p4cc\n", &fb->format->format);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_wb_encoder_state);
+
+/**
* drm_atomic_helper_check_plane_state() - Check plane state for validity
* @plane_state: plane state to check
* @crtc_state: CRTC state to check
@@ -1789,7 +1835,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
struct drm_plane_state *old_plane_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
- int i, n_planes = 0;
+ int i, ret, n_planes = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
@@ -1800,19 +1846,34 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
n_planes++;
/* FIXME: we support only single plane updates for now */
- if (n_planes != 1)
+ if (n_planes != 1) {
+ drm_dbg_atomic(dev,
+ "only single plane async updates are supported\n");
return -EINVAL;
+ }
if (!new_plane_state->crtc ||
- old_plane_state->crtc != new_plane_state->crtc)
+ old_plane_state->crtc != new_plane_state->crtc) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] async update cannot change CRTC\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
funcs = plane->helper_private;
- if (!funcs->atomic_async_update)
+ if (!funcs->atomic_async_update) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] driver does not support async updates\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
- if (new_plane_state->fence)
+ if (new_plane_state->fence) {
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] missing fence for async update\n",
+ plane->base.id, plane->name);
return -EINVAL;
+ }
/*
* Don't do an async update if there is an outstanding commit modifying
@@ -1827,7 +1888,12 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- return funcs->atomic_async_check(plane, state);
+ ret = funcs->atomic_async_check(plane, state);
+ if (ret != 0)
+ drm_dbg_atomic(dev,
+ "[PLANE:%d:%s] driver async check failed\n",
+ plane->base.id, plane->name);
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 6e433d465f41..cf92a9ae8034 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -140,14 +140,14 @@ struct drm_master *drm_master_create(struct drm_device *dev)
kref_init(&master->refcount);
drm_master_legacy_init(master);
- idr_init(&master->magic_map);
+ idr_init_base(&master->magic_map, 1);
master->dev = dev;
/* initialize the tree of output resource lessees */
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
- idr_init(&master->lessee_idr);
+ idr_init_base(&master->lessee_idr, 1);
return master;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 6abf7a2407e9..1545c50fd1c8 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -847,8 +847,8 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
struct drm_connector_state *conn_state,
u32 out_bus_fmt)
{
+ unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- unsigned int num_in_bus_fmts, i;
struct drm_bridge *prev_bridge;
u32 *in_bus_fmts;
int ret;
@@ -969,7 +969,7 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_connector *conn = conn_state->connector;
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
- unsigned int i, num_out_bus_fmts;
+ unsigned int i, num_out_bus_fmts = 0;
struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index af3b7395bf69..2b230b4d6942 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -264,7 +264,7 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
dumb_args.width = width;
dumb_args.height = height;
- dumb_args.bpp = info->cpp[0] * 8;
+ dumb_args.bpp = drm_format_info_bpp(info, 0);
ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
if (ret)
goto err_delete;
@@ -373,7 +373,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
int ret;
info = drm_format_info(format);
- fb_req.bpp = info->cpp[0] * 8;
+ fb_req.bpp = drm_format_info_bpp(info, 0);
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 17c6c3eefcd6..d021497841b8 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -575,7 +575,7 @@ int drm_plane_create_color_properties(struct drm_plane *plane,
len++;
}
- prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
+ prop = drm_property_create_enum(dev, 0, "COLOR_RANGE",
enum_list, len);
if (!prop)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1ab083b35e3b..e3142c8142b3 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -22,15 +22,16 @@
#include <drm/drm_auth.h>
#include <drm/drm_connector.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_panel.h>
-#include <drm/drm_utils.h>
#include <drm/drm_print.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_sysfs.h>
+#include <drm/drm_utils.h>
#include <linux/fb.h>
#include <linux/uaccess.h>
@@ -214,23 +215,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
-/**
- * drm_connector_init - Init a preallocated connector
- * @dev: DRM device
- * @connector: the connector to init
- * @funcs: callbacks for this connector
- * @connector_type: user visible type of the connector
- *
- * Initialises a preallocated connector. Connectors should be
- * subclassed as part of driver connector objects.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_init(struct drm_device *dev,
- struct drm_connector *connector,
- const struct drm_connector_funcs *funcs,
- int connector_type)
+static int __drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@@ -278,6 +267,9 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
+ /* provide ddc symlink in sysfs */
+ connector->ddc = ddc;
+
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
@@ -334,6 +326,38 @@ out_put:
return ret;
}
+
+/**
+ * drm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * At driver unload time the driver's &drm_connector_funcs.destroy hook
+ * should call drm_connector_cleanup() and free the connector structure.
+ * The connector structure should not be allocated with devm_kzalloc().
+ *
+ * Note: consider using drmm_connector_init() instead of
+ * drm_connector_init() to let the DRM managed resource infrastructure
+ * take care of cleanup and deallocation.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type)
+{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
+}
EXPORT_SYMBOL(drm_connector_init);
/**
@@ -347,8 +371,16 @@ EXPORT_SYMBOL(drm_connector_init);
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
+ * At driver unload time the driver's &drm_connector_funcs.destroy hook
+ * should call drm_connector_cleanup() and free the connector structure.
+ * The connector structure should not be allocated with devm_kzalloc().
+ *
* Ensures that the ddc field of the connector is correctly set.
*
+ * Note: consider using drmm_connector_init() instead of
+ * drm_connector_init_with_ddc() to let the DRM managed resource
+ * infrastructure take care of cleanup and deallocation.
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -358,18 +390,63 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
int connector_type,
struct i2c_adapter *ddc)
{
+ if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
+ return -EINVAL;
+
+ return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
+}
+EXPORT_SYMBOL(drm_connector_init_with_ddc);
+
+static void drm_connector_cleanup_action(struct drm_device *dev,
+ void *ptr)
+{
+ struct drm_connector *connector = ptr;
+
+ drm_connector_cleanup(connector);
+}
+
+/**
+ * drmm_connector_init - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: optional pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Cleanup is automatically handled with a call to
+ * drm_connector_cleanup() in a DRM-managed action.
+ *
+ * The connector structure should be allocated with drmm_kzalloc().
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
int ret;
- ret = drm_connector_init(dev, connector, funcs, connector_type);
+ if (drm_WARN_ON(dev, funcs && funcs->destroy))
+ return -EINVAL;
+
+ ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL);
if (ret)
return ret;
- /* provide ddc symlink in sysfs */
- connector->ddc = ddc;
+ ret = drmm_add_action_or_reset(dev, drm_connector_cleanup_action,
+ connector);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(drm_connector_init_with_ddc);
+EXPORT_SYMBOL(drmm_connector_init);
/**
* drm_connector_attach_edid_property - attach edid property.
@@ -517,6 +594,9 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* e.g. DP MST connectors. All other connectors will be registered automatically
* when calling drm_dev_register().
*
+ * When the connector is no longer available, callers must call
+ * drm_connector_unregister().
+ *
* Returns:
* Zero on success, error code on failure.
*/
@@ -573,9 +653,8 @@ EXPORT_SYMBOL(drm_connector_register);
* @connector: the connector to unregister
*
* Unregister userspace interfaces for a connector. Only call this for
- * connectors which have registered explicitly by calling drm_dev_register(),
- * since connectors are unregistered automatically when drm_dev_unregister() is
- * called.
+ * connectors which have been registered explicitly by calling
+ * drm_connector_register().
*/
void drm_connector_unregister(struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cad2a7e5166f..df9bf3c9206e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -343,9 +343,10 @@ static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *
* The @primary and @cursor planes are only relevant for legacy uAPI, see
* &drm_crtc.primary and &drm_crtc.cursor.
*
- * Note: consider using drmm_crtc_alloc_with_planes() instead of
- * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure
- * take care of cleanup and deallocation.
+ * Note: consider using drmm_crtc_alloc_with_planes() or
+ * drmm_crtc_init_with_planes() instead of drm_crtc_init_with_planes()
+ * to let the DRM managed resource infrastructure take care of cleanup
+ * and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -370,14 +371,88 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
-static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev,
- void *ptr)
+static void drmm_crtc_init_with_planes_cleanup(struct drm_device *dev,
+ void *ptr)
{
struct drm_crtc *crtc = ptr;
drm_crtc_cleanup(crtc);
}
+__printf(6, 0)
+static int __drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name,
+ va_list args)
+{
+ int ret;
+
+ drm_WARN_ON(dev, funcs && funcs->destroy);
+
+ ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, args);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, drmm_crtc_init_with_planes_cleanup,
+ crtc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * drmm_crtc_init_with_planes - Initialise a new CRTC object with
+ * specified primary and cursor planes.
+ * @dev: DRM device
+ * @crtc: CRTC object to init
+ * @primary: Primary plane for CRTC
+ * @cursor: Cursor plane for CRTC
+ * @funcs: callbacks for the new CRTC
+ * @name: printf style format string for the CRTC name, or NULL for default name
+ *
+ * Inits a new object created as base part of a driver crtc object. Drivers
+ * should use this function instead of drm_crtc_init(), which is only provided
+ * for backwards compatibility with drivers which do not yet support universal
+ * planes). For really simple hardware which has only 1 plane look at
+ * drm_simple_display_pipe_init() instead.
+ *
+ * Cleanup is automatically handled through registering
+ * drmm_crtc_cleanup() with drmm_add_action(). The crtc structure should
+ * be allocated with drmm_kzalloc().
+ *
+ * The @drm_crtc_funcs.destroy hook must be NULL.
+ *
+ * The @primary and @cursor planes are only relevant for legacy uAPI, see
+ * &drm_crtc.primary and &drm_crtc.cursor.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, name);
+ ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
+ va_end(ap);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_crtc_init_with_planes);
+
void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
size_t size, size_t offset,
struct drm_plane *primary,
@@ -400,17 +475,12 @@ void *__drmm_crtc_alloc_with_planes(struct drm_device *dev,
crtc = container + offset;
va_start(ap, name);
- ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
- name, ap);
+ ret = __drmm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs,
+ name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
- ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup,
- crtc);
- if (ret)
- return ERR_PTR(ret);
-
return container;
}
EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 8a6d54515f92..7d86020b5244 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -45,12 +46,23 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "drm_crtc_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
/**
* DOC: overview
*
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 937b699ac2a8..d8b2955e88fd 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -224,6 +224,7 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
const struct drm_plane_state *old_state,
const struct drm_plane_state *state)
{
+ struct drm_rect src;
memset(iter, 0, sizeof(*iter));
if (!state || !state->crtc || !state->fb || !state->visible)
@@ -233,10 +234,12 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
iter->num_clips = drm_plane_get_damage_clips_count(state);
/* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
- iter->plane_src.x1 = state->src.x1 >> 16;
- iter->plane_src.y1 = state->src.y1 >> 16;
- iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
- iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+ src = drm_plane_state_src(state);
+
+ iter->plane_src.x1 = src.x1 >> 16;
+ iter->plane_src.y1 = src.y1 >> 16;
+ iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF);
if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
iter->clips = NULL;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..01ee3febb813 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index bbc25e3b7220..4005dab6147d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5165,6 +5165,51 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode)
mode->clock = clock;
}
+static void drm_calculate_luminance_range(struct drm_connector *connector)
+{
+ struct hdr_static_metadata *hdr_metadata = &connector->hdr_sink_metadata.hdmi_type1;
+ struct drm_luminance_range_info *luminance_range =
+ &connector->display_info.luminance_range;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98
+ };
+ u32 max_avg, min_cll, max, min, q, r;
+
+ if (!(hdr_metadata->metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1)))
+ return;
+
+ max_avg = hdr_metadata->max_fall;
+ min_cll = hdr_metadata->min_cll;
+
+ /*
+ * From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_avg >> 5;
+ r = max_avg % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ /* min luminance: maxLum * (CV/255)^2 / 100 */
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ luminance_range->min_luminance = min;
+ luminance_range->max_luminance = max;
+}
+
static uint8_t eotf_supported(const u8 *edid_ext)
{
return edid_ext[2] &
@@ -5196,8 +5241,12 @@ drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
connector->hdr_sink_metadata.hdmi_type1.max_cll = db[4];
if (len >= 5)
connector->hdr_sink_metadata.hdmi_type1.max_fall = db[5];
- if (len >= 6)
+ if (len >= 6) {
connector->hdr_sink_metadata.hdmi_type1.min_cll = db[6];
+
+ /* Calculate only when all values are available */
+ drm_calculate_luminance_range(connector);
+ }
}
static void
@@ -5971,12 +6020,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
@@ -5992,18 +6043,28 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
if (!version_greater(drm_edid, 1, 1))
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
info->monitor_range.min_vfreq,
@@ -6101,6 +6162,7 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
+ memset(&info->luminance_range, 0, sizeof(info->luminance_range));
info->mso_stream_count = 0;
info->mso_pixel_overlap = 0;
diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c
index a940024c8087..1143bc7f3252 100644
--- a/drivers/gpu/drm/drm_encoder.c
+++ b/drivers/gpu/drm/drm_encoder.c
@@ -27,6 +27,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -148,9 +149,9 @@ out_put:
* the encoder structure. The encoder structure should not be allocated with
* devm_kzalloc().
*
- * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to
- * let the DRM managed resource infrastructure take care of cleanup and
- * deallocation.
+ * Note: consider using drmm_encoder_alloc() or drmm_encoder_init()
+ * instead of drm_encoder_init() to let the DRM managed resource
+ * infrastructure take care of cleanup and deallocation.
*
* Returns:
* Zero on success, error code on failure.
@@ -212,6 +213,30 @@ static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr)
drm_encoder_cleanup(encoder);
}
+__printf(5, 0)
+static int __drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type,
+ const char *name,
+ va_list args)
+{
+ int ret;
+
+ if (drm_WARN_ON(dev, funcs && funcs->destroy))
+ return -EINVAL;
+
+ ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, args);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...)
@@ -221,9 +246,6 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
va_list ap;
int ret;
- if (WARN_ON(funcs && funcs->destroy))
- return ERR_PTR(-EINVAL);
-
container = drmm_kzalloc(dev, size, GFP_KERNEL);
if (!container)
return ERR_PTR(-ENOMEM);
@@ -231,19 +253,50 @@ void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset,
encoder = container + offset;
va_start(ap, name);
- ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
va_end(ap);
if (ret)
return ERR_PTR(ret);
- ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder);
- if (ret)
- return ERR_PTR(ret);
-
return container;
}
EXPORT_SYMBOL(__drmm_encoder_alloc);
+/**
+ * drmm_encoder_init - Initialize a preallocated encoder
+ * @dev: drm device
+ * @encoder: the encoder to init
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * Initializes a preallocated encoder. Encoder should be subclassed as
+ * part of driver encoder objects. Cleanup is automatically handled
+ * through registering drm_encoder_cleanup() with drmm_add_action(). The
+ * encoder structure should be allocated with drmm_kzalloc().
+ *
+ * The @drm_encoder_funcs.destroy hook must be NULL.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, name);
+ ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap);
+ va_end(ap);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(drmm_encoder_init);
+
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 69c57273b184..3b535ad1b07c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drm kms/fb cma (contiguous memory allocator) helper functions
+ * drm kms/fb dma helper functions
*
* Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
@@ -10,35 +10,40 @@
*/
#include <drm/drm_damage_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
/**
- * DOC: framebuffer cma helper functions
+ * DOC: framebuffer dma helper functions
*
- * Provides helper functions for creating a cma (contiguous memory allocator)
- * backed framebuffer.
+ * Provides helper functions for creating a DMA-contiguous framebuffer.
+ *
+ * Depending on the platform, the buffers may be physically non-contiguous and
+ * mapped through an IOMMU or a similar mechanism, or allocated from
+ * physically-contiguous memory (using, for instance, CMA or a pool of memory
+ * reserved at early boot). This is handled behind the scenes by the DMA mapping
+ * API.
*
* drm_gem_fb_create() is used in the &drm_mode_config_funcs.fb_create
- * callback function to create a cma backed framebuffer.
+ * callback function to create a DMA-contiguous framebuffer.
*/
/**
- * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
+ * drm_fb_dma_get_gem_obj() - Get DMA GEM object for framebuffer
* @fb: The framebuffer
* @plane: Which plane
*
- * Return the CMA GEM object for given framebuffer.
+ * Return the DMA GEM object for given framebuffer.
*
* This function will usually be called from the CRTC callback functions.
*/
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
struct drm_gem_object *gem;
@@ -47,27 +52,27 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
if (!gem)
return NULL;
- return to_drm_gem_cma_obj(gem);
+ return to_drm_gem_dma_obj(gem);
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
+EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_obj);
/**
- * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer, for pixel
+ * drm_fb_dma_get_gem_addr() - Get DMA (bus) address for framebuffer, for pixel
* formats where values are grouped in blocks this will get you the beginning of
* the block
* @fb: The framebuffer
* @state: Which state of drm plane
* @plane: Which plane
- * Return the CMA GEM address for given framebuffer.
+ * Return the DMA GEM address for given framebuffer.
*
* This function will usually be called from the PLANE callback functions.
*/
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_gem_cma_object *obj;
- dma_addr_t paddr;
+ struct drm_gem_dma_object *obj;
+ dma_addr_t dma_addr;
u8 h_div = 1, v_div = 1;
u32 block_w = drm_format_info_block_width(fb->format, plane);
u32 block_h = drm_format_info_block_height(fb->format, plane);
@@ -77,11 +82,11 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
u32 block_start_y;
u32 num_hblocks;
- obj = drm_fb_cma_get_gem_obj(fb, plane);
+ obj = drm_fb_dma_get_gem_obj(fb, plane);
if (!obj)
return 0;
- paddr = obj->paddr + fb->offsets[plane];
+ dma_addr = obj->dma_addr + fb->offsets[plane];
if (plane > 0) {
h_div = fb->format->hsub;
@@ -93,43 +98,43 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
block_start_y = (sample_y / block_h) * block_h;
num_hblocks = sample_x / block_w;
- paddr += fb->pitches[plane] * block_start_y;
- paddr += block_size * num_hblocks;
+ dma_addr += fb->pitches[plane] * block_start_y;
+ dma_addr += block_size * num_hblocks;
- return paddr;
+ return dma_addr;
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
+EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_addr);
/**
- * drm_fb_cma_sync_non_coherent - Sync GEM object to non-coherent backing
+ * drm_fb_dma_sync_non_coherent - Sync GEM object to non-coherent backing
* memory
* @drm: DRM device
* @old_state: Old plane state
* @state: New plane state
*
* This function can be used by drivers that use damage clips and have
- * CMA GEM objects backed by non-coherent memory. Calling this function
+ * DMA GEM objects backed by non-coherent memory. Calling this function
* in a plane's .atomic_update ensures that all the data in the backing
* memory have been written to RAM.
*/
-void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
struct drm_plane_state *old_state,
struct drm_plane_state *state)
{
const struct drm_format_info *finfo = state->fb->format;
struct drm_atomic_helper_damage_iter iter;
- const struct drm_gem_cma_object *cma_obj;
+ const struct drm_gem_dma_object *dma_obj;
unsigned int offset, i;
struct drm_rect clip;
dma_addr_t daddr;
size_t nb_bytes;
for (i = 0; i < finfo->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(state->fb, i);
- if (!cma_obj->map_noncoherent)
+ dma_obj = drm_fb_dma_get_gem_obj(state->fb, i);
+ if (!dma_obj->map_noncoherent)
continue;
- daddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
+ daddr = drm_fb_dma_get_gem_addr(state->fb, state, i);
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
@@ -142,4 +147,4 @@ void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
}
}
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_sync_non_coherent);
+EXPORT_SYMBOL_GPL(drm_fb_dma_sync_non_coherent);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2d4cee6a10ff..71edb80fe0fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -377,12 +377,31 @@ static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
struct iosys_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
- unsigned int cpp = fb->format->cpp[0];
- size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
- void *src = fb_helper->fbdev->screen_buffer + offset;
- size_t len = (clip->x2 - clip->x1) * cpp;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
unsigned int y;
+ void *src;
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->fbdev->screen_buffer + offset;
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
for (y = clip->y1; y < clip->y2; y++) {
@@ -1274,19 +1293,23 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
}
static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
- u8 depth)
+ const struct drm_format_info *format)
{
- switch (depth) {
- case 8:
+ u8 depth = format->depth;
+
+ if (format->is_color_indexed) {
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
- var->red.length = 8; /* 8bit DAC */
- var->green.length = 8;
- var->blue.length = 8;
+ var->red.length = depth;
+ var->green.length = depth;
+ var->blue.length = depth;
var->transp.offset = 0;
var->transp.length = 0;
- break;
+ return;
+ }
+
+ switch (depth) {
case 15:
var->red.offset = 10;
var->green.offset = 5;
@@ -1341,7 +1364,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
+ const struct drm_format_info *format = fb->format;
struct drm_device *dev = fb_helper->dev;
+ unsigned int bpp;
if (in_dbg_master())
return -EINVAL;
@@ -1351,22 +1376,33 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
var->pixclock = 0;
}
- if ((drm_format_info_block_width(fb->format, 0) > 1) ||
- (drm_format_info_block_height(fb->format, 0) > 1))
- return -EINVAL;
+ switch (format->format) {
+ case DRM_FORMAT_C1:
+ case DRM_FORMAT_C2:
+ case DRM_FORMAT_C4:
+ /* supported format with sub-byte pixels */
+ break;
+
+ default:
+ if ((drm_format_info_block_width(format, 0) > 1) ||
+ (drm_format_info_block_height(format, 0) > 1))
+ return -EINVAL;
+ break;
+ }
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
- if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
+ bpp = drm_format_info_bpp(format, 0);
+ if (var->bits_per_pixel > bpp ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
- fb->width, fb->height, fb->format->cpp[0] * 8);
+ fb->width, fb->height, bpp);
return -EINVAL;
}
@@ -1381,13 +1417,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
!var->blue.length && !var->transp.length &&
!var->red.msb_right && !var->green.msb_right &&
!var->blue.msb_right && !var->transp.msb_right) {
- drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
+ drm_fb_helper_fill_pixel_fmt(var, format);
}
/*
* Likewise, bits_per_pixel should be rounded up to a supported value.
*/
- var->bits_per_pixel = fb->format->cpp[0] * 8;
+ var->bits_per_pixel = bpp;
/*
* drm fbdev emulation doesn't support changing the pixel format at all,
@@ -1723,11 +1759,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+ bool is_color_indexed)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
- FB_VISUAL_TRUECOLOR;
+ info->fix.visual = is_color_indexed ? FB_VISUAL_PSEUDOCOLOR
+ : FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
@@ -1744,19 +1780,31 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
+ const struct drm_format_info *format = fb->format;
+
+ switch (format->format) {
+ case DRM_FORMAT_C1:
+ case DRM_FORMAT_C2:
+ case DRM_FORMAT_C4:
+ /* supported format with sub-byte pixels */
+ break;
+
+ default:
+ WARN_ON((drm_format_info_block_width(format, 0) > 1) ||
+ (drm_format_info_block_height(format, 0) > 1));
+ break;
+ }
- WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) ||
- (drm_format_info_block_height(fb->format, 0) > 1));
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
- info->var.bits_per_pixel = fb->format->cpp[0] * 8;
+ info->var.bits_per_pixel = drm_format_info_bpp(format, 0);
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
+ drm_fb_helper_fill_pixel_fmt(&info->var, format);
info->var.xres = fb_width;
info->var.yres = fb_height;
@@ -1781,7 +1829,8 @@ void drm_fb_helper_fill_info(struct fb_info *info,
{
struct drm_framebuffer *fb = fb_helper->fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0],
+ fb->format->is_color_indexed);
drm_fb_helper_fill_var(info, fb_helper,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index dc7d2e5b16c8..a8b4d918e9a3 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -48,11 +48,6 @@
#include "drm_internal.h"
#include "drm_legacy.h"
-#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#include <uapi/asm/mman.h>
-#include <drm/drm_vma_manager.h>
-#endif
-
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -131,7 +126,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev)
* };
*
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
- * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
+ * DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
* simpler.
*
* The driver's &file_operations must be stored in &drm_driver.fops.
@@ -912,139 +907,3 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
return file;
}
EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
-
-#ifdef CONFIG_MMU
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * drm_addr_inflate() attempts to construct an aligned area by inflating
- * the area size and skipping the unaligned start of the area.
- * adapted from shmem_get_unmapped_area()
- */
-static unsigned long drm_addr_inflate(unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags,
- unsigned long huge_size)
-{
- unsigned long offset, inflated_len;
- unsigned long inflated_addr;
- unsigned long inflated_offset;
-
- offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
- if (offset && offset + len < 2 * huge_size)
- return addr;
- if ((addr & (huge_size - 1)) == offset)
- return addr;
-
- inflated_len = len + huge_size - PAGE_SIZE;
- if (inflated_len > TASK_SIZE)
- return addr;
- if (inflated_len < len)
- return addr;
-
- inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
- 0, flags);
- if (IS_ERR_VALUE(inflated_addr))
- return addr;
- if (inflated_addr & ~PAGE_MASK)
- return addr;
-
- inflated_offset = inflated_addr & (huge_size - 1);
- inflated_addr += offset - inflated_offset;
- if (inflated_offset > offset)
- inflated_addr += huge_size;
-
- if (inflated_addr > TASK_SIZE - len)
- return addr;
-
- return inflated_addr;
-}
-
-/**
- * drm_get_unmapped_area() - Get an unused user-space virtual memory area
- * suitable for huge page table entries.
- * @file: The struct file representing the address space being mmap()'d.
- * @uaddr: Start address suggested by user-space.
- * @len: Length of the area.
- * @pgoff: The page offset into the address space.
- * @flags: mmap flags
- * @mgr: The address space manager used by the drm driver. This argument can
- * probably be removed at some point when all drivers use the same
- * address space manager.
- *
- * This function attempts to find an unused user-space virtual memory area
- * that can accommodate the size we want to map, and that is properly
- * aligned to facilitate huge page table entries matching actual
- * huge pages or huge page aligned memory in buffer objects. Buffer objects
- * are assumed to start at huge page boundary pfns (io memory) or be
- * populated by huge pages aligned to the start of the buffer object
- * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
- *
- * Return: aligned user-space address.
- */
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr)
-{
- unsigned long addr;
- unsigned long inflated_addr;
- struct drm_vma_offset_node *node;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /*
- * @pgoff is the file page-offset the huge page boundaries of
- * which typically aligns to physical address huge page boundaries.
- * That's not true for DRM, however, where physical address huge
- * page boundaries instead are aligned with the offset from
- * buffer object start. So adjust @pgoff to be the offset from
- * buffer object start.
- */
- drm_vma_offset_lock_lookup(mgr);
- node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
- if (node)
- pgoff -= node->vm_node.start;
- drm_vma_offset_unlock_lookup(mgr);
-
- addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
- if (IS_ERR_VALUE(addr))
- return addr;
- if (addr & ~PAGE_MASK)
- return addr;
- if (addr > TASK_SIZE - len)
- return addr;
-
- if (len < HPAGE_PMD_SIZE)
- return addr;
- if (flags & MAP_FIXED)
- return addr;
- /*
- * Our priority is to support MAP_SHARED mapped hugely;
- * and support MAP_PRIVATE mapped hugely too, until it is COWed.
- * But if caller specified an address hint, respect that as before.
- */
- if (uaddr)
- return addr;
-
- inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
- HPAGE_PMD_SIZE);
-
- if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
- len >= HPAGE_PUD_SIZE)
- inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
- flags, HPAGE_PUD_SIZE);
- return inflated_addr;
-}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr)
-{
- return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
-#endif /* CONFIG_MMU */
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index a3ccd8bc966f..e2f76621453c 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,9 +8,10 @@
* (at your option) any later version.
*/
+#include <linux/io.h>
+#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/io.h>
#include <drm/drm_device.h>
#include <drm/drm_format_helper.h>
@@ -40,11 +41,11 @@ unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info
}
EXPORT_SYMBOL(drm_fb_clip_offset);
-/* TODO: Make this functon work with multi-plane formats. */
-static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool vaddr_cached_hint,
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
+/* TODO: Make this function work with multi-plane formats. */
+static int __drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
@@ -54,7 +55,7 @@ static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pix
const void *sbuf;
/*
- * Some source buffers, such as CMA memory, use write-combine
+ * Some source buffers, such as DMA memory, use write-combine
* caching, so reads are uncached. Speed up access by fetching
* one line at a time.
*/
@@ -83,11 +84,11 @@ static int drm_fb_xfrm(void *dst, unsigned long dst_pitch, unsigned long dst_pix
return 0;
}
-/* TODO: Make this functon work with multi-plane formats. */
-static int drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool vaddr_cached_hint,
- void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
+/* TODO: Make this function work with multi-plane formats. */
+static int __drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned long dst_pixsize,
+ const void *vaddr, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
unsigned long linepixels = drm_rect_width(clip);
unsigned long lines = drm_rect_height(clip);
@@ -128,65 +129,82 @@ static int drm_fb_xfrm_toio(void __iomem *dst, unsigned long dst_pitch, unsigned
return 0;
}
-/**
- * drm_fb_memcpy - Copy clip buffer
- * @dst: Destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: Source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- *
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
- */
-void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+/* TODO: Make this function work with multi-plane formats. */
+static int drm_fb_xfrm(struct iosys_map *dst,
+ const unsigned int *dst_pitch, const u8 *dst_pixsize,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool vaddr_cached_hint,
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels))
{
- unsigned int cpp = fb->format->cpp[0];
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y, lines = clip->y2 - clip->y1;
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
if (!dst_pitch)
- dst_pitch = len;
+ dst_pitch = default_dst_pitch;
- vaddr += clip_offset(clip, fb->pitches[0], cpp);
- for (y = 0; y < lines; y++) {
- memcpy(dst, vaddr, len);
- vaddr += fb->pitches[0];
- dst += dst_pitch;
- }
+ /* TODO: handle src in I/O memory here */
+ if (dst[0].is_iomem)
+ return __drm_fb_xfrm_toio(dst[0].vaddr_iomem, dst_pitch[0], dst_pixsize[0],
+ src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
+ else
+ return __drm_fb_xfrm(dst[0].vaddr, dst_pitch[0], dst_pixsize[0],
+ src[0].vaddr, fb, clip, vaddr_cached_hint, xfrm_line);
}
-EXPORT_SYMBOL(drm_fb_memcpy);
/**
- * drm_fb_memcpy_toio - Copy clip buffer
- * @dst: Destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: Source buffer
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Array of destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
+ * This function copies parts of a framebuffer to display memory. Destination and
+ * framebuffer formats must match. No conversion takes place. The parameters @dst,
+ * @dst_pitch and @src refer to arrays. Each array must have at least as many entries
+ * as there are planes in @fb's format. Each entry stores the value for the format's
+ * respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*/
-void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- unsigned int cpp = fb->format->cpp[0];
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y, lines = clip->y2 - clip->y1;
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
- if (!dst_pitch)
- dst_pitch = len;
+ const struct drm_format_info *format = fb->format;
+ unsigned int i, y, lines = drm_rect_height(clip);
- vaddr += clip_offset(clip, fb->pitches[0], cpp);
- for (y = 0; y < lines; y++) {
- memcpy_toio(dst, vaddr, len);
- vaddr += fb->pitches[0];
- dst += dst_pitch;
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ unsigned int bpp_i = drm_format_info_bpp(format, i);
+ unsigned int cpp_i = DIV_ROUND_UP(bpp_i, 8);
+ size_t len_i = DIV_ROUND_UP(drm_rect_width(clip) * bpp_i, 8);
+ unsigned int dst_pitch_i = dst_pitch[i];
+ struct iosys_map dst_i = dst[i];
+ struct iosys_map src_i = src[i];
+
+ if (!dst_pitch_i)
+ dst_pitch_i = len_i;
+
+ iosys_map_incr(&src_i, clip_offset(clip, fb->pitches[i], cpp_i));
+ for (y = 0; y < lines; y++) {
+ /* TODO: handle src_i in I/O memory here */
+ iosys_map_memcpy_to(&dst_i, 0, src_i.vaddr, len_i);
+ iosys_map_incr(&src_i, fb->pitches[i]);
+ iosys_map_incr(&dst_i, dst_pitch_i);
+ }
}
}
-EXPORT_SYMBOL(drm_fb_memcpy_toio);
+EXPORT_SYMBOL(drm_fb_memcpy);
static void drm_fb_swab16_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -210,37 +228,47 @@ static void drm_fb_swab32_line(void *dbuf, const void *sbuf, unsigned int pixels
/**
* drm_fb_swab - Swap bytes into clip buffer
- * @dst: Destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: Source buffer
+ * @dst: Array of destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @cached: Source buffer is mapped cached (eg. not write-combined)
*
- * If @cached is false a temporary buffer is used to cache one pixel line at a
- * time to speed up slow uncached reads.
+ * This function copies parts of a framebuffer to display memory and swaps per-pixel
+ * bytes during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index. If @cached is
+ * false a temporary buffer is used to cache one pixel line at a time to speed up
+ * slow uncached reads.
*
- * This function does not apply clipping on dst, i.e. the destination
- * is at the top-left corner.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*/
-void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool cached)
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached)
{
- u8 cpp = fb->format->cpp[0];
+ const struct drm_format_info *format = fb->format;
+ u8 cpp = DIV_ROUND_UP(drm_format_info_bpp(format, 0), 8);
+ void (*swab_line)(void *dbuf, const void *sbuf, unsigned int npixels);
switch (cpp) {
case 4:
- drm_fb_xfrm(dst, dst_pitch, cpp, src, fb, clip, cached, drm_fb_swab32_line);
+ swab_line = drm_fb_swab32_line;
break;
case 2:
- drm_fb_xfrm(dst, dst_pitch, cpp, src, fb, clip, cached, drm_fb_swab16_line);
+ swab_line = drm_fb_swab16_line;
break;
default:
drm_warn_once(fb->dev, "Format %p4cc has unsupported pixel size.\n",
- &fb->format->format);
- break;
+ &format->format);
+ return;
}
+
+ drm_fb_xfrm(dst, dst_pitch, &cpp, src, fb, clip, cached, swab_line);
}
EXPORT_SYMBOL(drm_fb_swab);
@@ -261,32 +289,50 @@ static void drm_fb_xrgb8888_to_rgb332_line(void *dbuf, const void *sbuf, unsigne
/**
* drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
- * @dst: RGB332 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: XRGB8888 source buffer
+ * @dst: Array of RGB332 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB332 devices that don't support XRGB8888 natively.
*/
-void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm(dst, dst_pitch, 1, src, fb, clip, false, drm_fb_xrgb8888_to_rgb332_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 1,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgb332_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u16 *dbuf16 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val16 = ((sbuf32[x] & 0x00F80000) >> 8) |
- ((sbuf32[x] & 0x0000FC00) >> 5) |
- ((sbuf32[x] & 0x000000F8) >> 3);
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
dbuf16[x] = val16;
}
}
@@ -295,146 +341,143 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
u16 *dbuf16 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val16 = ((sbuf32[x] & 0x00F80000) >> 8) |
- ((sbuf32[x] & 0x0000FC00) >> 5) |
- ((sbuf32[x] & 0x000000F8) >> 3);
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00F80000) >> 8) |
+ ((pix & 0x0000FC00) >> 5) |
+ ((pix & 0x000000F8) >> 3);
dbuf16[x] = swab16(val16);
}
}
/**
* drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of RGB565 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
* @swab: Swap bytes
*
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
- */
-void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip,
- bool swab)
-{
- if (swab)
- drm_fb_xfrm(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_swab_line);
- else
- drm_fb_xfrm(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_line);
-}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
-
-/**
- * drm_fb_xrgb8888_to_rgb565_toio - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- * @swab: Swap bytes
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
*
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGB565 devices that don't support XRGB8888 natively.
*/
-void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab)
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool swab)
{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels);
+
if (swab)
- drm_fb_xfrm_toio(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_swab_line);
+ xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line;
else
- drm_fb_xfrm_toio(dst, dst_pitch, 2, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb565_line);
+ xfrm_line = drm_fb_xrgb8888_to_rgb565_line;
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, xfrm_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- *dbuf8++ = (sbuf32[x] & 0x000000FF) >> 0;
- *dbuf8++ = (sbuf32[x] & 0x0000FF00) >> 8;
- *dbuf8++ = (sbuf32[x] & 0x00FF0000) >> 16;
+ pix = le32_to_cpu(sbuf32[x]);
+ *dbuf8++ = (pix & 0x000000FF) >> 0;
+ *dbuf8++ = (pix & 0x0000FF00) >> 8;
+ *dbuf8++ = (pix & 0x00FF0000) >> 16;
}
}
/**
* drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
- * @dst: RGB888 destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @src: XRGB8888 source buffer
+ * @dst: Array of RGB888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for RGB888 devices that don't natively
- * support XRGB8888.
- */
-void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
-{
- drm_fb_xfrm(dst, dst_pitch, 3, src, fb, clip, false, drm_fb_xrgb8888_to_rgb888_line);
-}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-
-/**
- * drm_fb_xrgb8888_to_rgb888_toio - Convert XRGB8888 to RGB888 clip buffer
- * @dst: RGB565 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
* Drivers can use this function for RGB888 devices that don't natively
* support XRGB8888.
*/
-void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 3, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_rgb888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 3,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgb888_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
static void drm_fb_rgb565_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
- const u16 *sbuf16 = sbuf;
+ __le32 *dbuf32 = dbuf;
+ const __le16 *sbuf16 = sbuf;
unsigned int x;
- for (x = 0; x < pixels; x++, ++sbuf16, ++dbuf32) {
- u32 val32 = ((*sbuf16 & 0xf800) << 8) |
- ((*sbuf16 & 0x07e0) << 5) |
- ((*sbuf16 & 0x001f) << 3);
- *dbuf32 = 0xff000000 | val32 |
- ((val32 >> 3) & 0x00070007) |
- ((val32 >> 2) & 0x00000300);
+ for (x = 0; x < pixels; x++) {
+ u16 val16 = le16_to_cpu(sbuf16[x]);
+ u32 val32 = ((val16 & 0xf800) << 8) |
+ ((val16 & 0x07e0) << 5) |
+ ((val16 & 0x001f) << 3);
+ val32 = 0xff000000 | val32 |
+ ((val32 >> 3) & 0x00070007) |
+ ((val32 >> 2) & 0x00000300);
+ dbuf32[x] = cpu_to_le32(val32);
}
}
-static void drm_fb_rgb565_to_xrgb8888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_rgb565_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_rgb565_to_xrgb8888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_rgb565_to_xrgb8888_line);
}
static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
+ __le32 *dbuf32 = dbuf;
const u8 *sbuf8 = sbuf;
unsigned int x;
@@ -442,117 +485,159 @@ static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigne
u8 r = *sbuf8++;
u8 g = *sbuf8++;
u8 b = *sbuf8++;
- *dbuf32++ = 0xff000000 | (r << 16) | (g << 8) | b;
+ u32 pix = 0xff000000 | (r << 16) | (g << 8) | b;
+ dbuf32[x] = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb888_to_xrgb8888_toio(void __iomem *dst, unsigned int dst_pitch,
- const void *vaddr, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_rgb888_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_rgb888_to_xrgb8888_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_rgb888_to_xrgb8888_line);
}
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u32 *dbuf32 = dbuf;
- const u32 *sbuf32 = sbuf;
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
u32 val32;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- val32 = ((sbuf32[x] & 0x000000FF) << 2) |
- ((sbuf32[x] & 0x0000FF00) << 4) |
- ((sbuf32[x] & 0x00FF0000) << 6);
- *dbuf32++ = val32 | ((val32 >> 8) & 0x00300C03);
+ pix = le32_to_cpu(sbuf32[x]);
+ val32 = ((pix & 0x000000FF) << 2) |
+ ((pix & 0x0000FF00) << 4) |
+ ((pix & 0x00FF0000) << 6);
+ pix = val32 | ((val32 >> 8) & 0x00300C03);
+ *dbuf32++ = cpu_to_le32(pix);
}
}
/**
- * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip
- * buffer
- * @dst: XRGB2101010 destination buffer (iomem)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * drm_fb_xrgb8888_to_xrgb2101010 - Convert XRGB8888 to XRGB2101010 clip buffer
+ * @dst: Array of XRGB2101010 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drivers can use this function for XRGB2101010 devices that don't natively
- * support XRGB8888.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XRGB2101010 devices that don't support XRGB8888
+ * natively.
*/
-void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst,
- unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm_toio(dst, dst_pitch, 4, vaddr, fb, clip, false,
- drm_fb_xrgb8888_to_xrgb2101010_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xrgb2101010_line);
}
-EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio);
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
- const u32 *sbuf32 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
for (x = 0; x < pixels; x++) {
- u8 r = (*sbuf32 & 0x00ff0000) >> 16;
- u8 g = (*sbuf32 & 0x0000ff00) >> 8;
- u8 b = *sbuf32 & 0x000000ff;
+ u32 pix = le32_to_cpu(sbuf32[x]);
+ u8 r = (pix & 0x00ff0000) >> 16;
+ u8 g = (pix & 0x0000ff00) >> 8;
+ u8 b = pix & 0x000000ff;
/* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
*dbuf8++ = (3 * r + 6 * g + b) / 10;
- sbuf32++;
}
}
/**
* drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
- * @dst: 8-bit grayscale destination buffer
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of 8-bit grayscale destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * Drm doesn't have native monochrome or grayscale support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
*
- * Monochrome drivers will use the most significant bit,
- * where 1 means foreground color and 0 background color.
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
- * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ * DRM doesn't have native monochrome or grayscale support. Drivers can use this
+ * function for grayscale devices that don't support XRGB8888 natively.Such
+ * drivers can announce the commonly supported XR24 format to userspace and use
+ * this function to convert to the native format. Monochrome drivers will use the
+ * most significant bit, where 1 means foreground color and 0 background color.
+ * ITU BT.601 is being used for the RGB -> luma (brightness) conversion.
*/
-void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
- drm_fb_xfrm(dst, dst_pitch, 1, vaddr, fb, clip, false, drm_fb_xrgb8888_to_gray8_line);
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 1,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_gray8_line);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
/**
- * drm_fb_blit_toio - Copy parts of a framebuffer to display memory
- * @dst: The display memory to copy to
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * drm_fb_blit - Copy parts of a framebuffer to display memory
+ * @dst: Array of display-memory addresses to copy to
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
* @dst_format: FOURCC code of the display's color format
- * @vmap: The framebuffer memory to copy from
+ * @src: The framebuffer memory to copy from
* @fb: The framebuffer to copy from
* @clip: Clip rectangle area to copy
*
* This function copies parts of a framebuffer to display memory. If the
* formats of the display and the framebuffer mismatch, the blit function
- * will attempt to convert between them.
+ * will attempt to convert between them during the process. The parameters @dst,
+ * @dst_pitch and @src refer to arrays. Each array must have at least as many
+ * entries as there are planes in @dst_format's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
*
* Returns:
* 0 on success, or
* -EINVAL if the color-format conversion failed, or
* a negative error code otherwise.
*/
-int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format,
- const void *vmap, const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
uint32_t fb_format = fb->format->format;
@@ -567,30 +652,30 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
dst_format = DRM_FORMAT_XRGB2101010;
if (dst_format == fb_format) {
- drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
} else if (dst_format == DRM_FORMAT_RGB565) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb565_toio(dst, dst_pitch, vmap, fb, clip, false);
+ drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
}
} else if (dst_format == DRM_FORMAT_RGB888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
}
} else if (dst_format == DRM_FORMAT_XRGB8888) {
if (fb_format == DRM_FORMAT_RGB888) {
- drm_fb_rgb888_to_xrgb8888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_rgb888_to_xrgb8888(dst, dst_pitch, src, fb, clip);
return 0;
} else if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_rgb565_to_xrgb8888_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_rgb565_to_xrgb8888(dst, dst_pitch, src, fb, clip);
return 0;
}
} else if (dst_format == DRM_FORMAT_XRGB2101010) {
if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip);
+ drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
}
}
@@ -600,8 +685,7 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
return -EINVAL;
}
-EXPORT_SYMBOL(drm_fb_blit_toio);
-
+EXPORT_SYMBOL(drm_fb_blit);
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -622,49 +706,67 @@ static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int
/**
* drm_fb_xrgb8888_to_mono - Convert XRGB8888 to monochrome
- * @dst: monochrome destination buffer (0=black, 1=white)
- * @dst_pitch: Number of bytes between two consecutive scanlines within dst
- * @vaddr: XRGB8888 source buffer
+ * @dst: Array of monochrome destination buffers (0=black, 1=white)
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
- * DRM doesn't have native monochrome support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the first bit (LSB) in the first byte of the monochrome
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native monochrome support. Drivers can use this function for
+ * monochrome devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
*
* This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
* then the result is converted from grayscale to monochrome.
- *
- * The first pixel (upper left corner of the clip rectangle) will be converted
- * and copied to the first bit (LSB) in the first byte of the monochrome
- * destination buffer.
- * If the caller requires that the first pixel in a byte must be located at an
- * x-coordinate that is a multiple of 8, then the caller must take care itself
- * of supplying a suitable clip rectangle.
*/
-void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *vaddr,
- const struct drm_framebuffer *fb, const struct drm_rect *clip)
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
unsigned int linepixels = drm_rect_width(clip);
unsigned int lines = drm_rect_height(clip);
unsigned int cpp = fb->format->cpp[0];
unsigned int len_src32 = linepixels * cpp;
struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
unsigned int y;
- u8 *mono = dst, *gray8;
+ u8 *mono = dst[0].vaddr, *gray8;
u32 *src32;
if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
return;
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
+
/*
* The mono destination buffer contains 1 bit per pixel
*/
- if (!dst_pitch)
- dst_pitch = DIV_ROUND_UP(linepixels, 8);
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 8);
/*
- * The cma memory is write-combined so reads are uncached.
+ * The dma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
*
* Also, format conversion from XR24 to monochrome are done
@@ -686,9 +788,117 @@ void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *vadd
drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
drm_fb_gray8_to_mono_line(mono, gray8, linepixels);
vaddr += fb->pitches[0];
- mono += dst_pitch;
+ mono += dst_pitch_0;
}
kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+
+static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
+{
+ const uint32_t *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_fb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @driver_fourccs: 4CC codes of all driver-supported color formats
+ * @driver_nfourccs: The number of entries in @driver_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and the emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Devices that have to emulate the
+ * format, and possibly others, can use drm_fb_build_fourcc_list() to
+ * create a list of supported color formats. The returned list can
+ * be handed over to drm_universal_plane_init() et al. Native formats
+ * will go before emulated formats. Other heuristics might be applied
+ * to optimize the order. Formats near the beginning of the list are
+ * usually preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_fb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ const u32 *driver_fourccs, size_t driver_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ bool found_native = false;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ u32 fourcc = native_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ if (!found_native)
+ found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc);
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The plane's atomic_update helper converts the framebuffer's color format
+ * to a native format when copying to device memory.
+ *
+ * If there is not a single format supported by both, device and
+ * driver, the native formats are likely not supported by the conversion
+ * helpers. Therefore *only* support the native formats and add a
+ * conversion helper ASAP.
+ */
+ if (!found_native) {
+ drm_warn(dev, "Format conversion helpers required to add extra formats.\n");
+ goto out;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = driver_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+out:
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 07741b678798..e09331bb3bc7 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -43,6 +43,21 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
uint32_t fmt = DRM_FORMAT_INVALID;
switch (bpp) {
+ case 1:
+ if (depth == 1)
+ fmt = DRM_FORMAT_C1;
+ break;
+
+ case 2:
+ if (depth == 2)
+ fmt = DRM_FORMAT_C2;
+ break;
+
+ case 4:
+ if (depth == 4)
+ fmt = DRM_FORMAT_C4;
+ break;
+
case 8:
if (depth == 8)
fmt = DRM_FORMAT_C8;
@@ -132,7 +147,26 @@ EXPORT_SYMBOL(drm_driver_legacy_fb_format);
const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
- { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_C1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1, .is_color_indexed = true },
+ { .format = DRM_FORMAT_D1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_D8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R1, .depth = 1, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 8, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R2, .depth = 2, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 4, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R4, .depth = 4, .num_planes = 1,
+ .char_per_block = { 1, }, .block_w = { 2, }, .block_h = { 1, }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
@@ -371,6 +405,25 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
EXPORT_SYMBOL(drm_format_info_block_height);
/**
+ * drm_format_info_bpp - number of bits per pixel
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The actual number of bits per pixel, depending on the plane index.
+ */
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ return info->char_per_block[plane] * 8 /
+ (drm_format_info_block_width(info, plane) *
+ drm_format_info_block_height(info, plane));
+}
+EXPORT_SYMBOL(drm_format_info_bpp);
+
+/**
* drm_format_info_min_pitch - computes the minimum required pitch in bytes
* @info: pixel format info
* @plane: plane index
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 4562a8b86579..2dd97473ca10 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -87,13 +87,13 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
src_x > fb_width - src_w ||
src_h > fb_height ||
src_y > fb_height - src_h) {
- DRM_DEBUG_KMS("Invalid source coordinates "
- "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
- src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
- src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
- src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
- src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
- fb->width, fb->height);
+ drm_dbg_kms(fb->dev, "Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
@@ -125,7 +125,7 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth);
if (r.pixel_format == DRM_FORMAT_INVALID) {
- DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
+ drm_dbg_kms(dev, "bad {bpp:%d, depth:%d}\n", or->bpp, or->depth);
return -EINVAL;
}
@@ -177,18 +177,18 @@ static int framebuffer_check(struct drm_device *dev,
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
- DRM_DEBUG_KMS("bad framebuffer format %p4cc\n",
- &r->pixel_format);
+ drm_dbg_kms(dev, "bad framebuffer format %p4cc\n",
+ &r->pixel_format);
return -EINVAL;
}
if (r->width == 0) {
- DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
+ drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width);
return -EINVAL;
}
if (r->height == 0) {
- DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
+ drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height);
return -EINVAL;
}
@@ -202,12 +202,12 @@ static int framebuffer_check(struct drm_device *dev,
u64 min_pitch = drm_format_info_min_pitch(info, i, width);
if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
- DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+ drm_dbg_kms(dev, "Format requires non-linear modifier for plane %d\n", i);
return -EINVAL;
}
if (!r->handles[i]) {
- DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
+ drm_dbg_kms(dev, "no buffer object handle for plane %d\n", i);
return -EINVAL;
}
@@ -218,20 +218,20 @@ static int framebuffer_check(struct drm_device *dev,
return -ERANGE;
if (block_size && r->pitches[i] < min_pitch) {
- DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
+ drm_dbg_kms(dev, "bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
r->modifier[i] != r->modifier[0]) {
- DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n",
- r->modifier[i], i);
+ drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n",
+ r->modifier[i], i);
return -EINVAL;
}
@@ -244,7 +244,7 @@ static int framebuffer_check(struct drm_device *dev,
if (r->pixel_format != DRM_FORMAT_NV12 ||
width % 128 || height % 32 ||
r->pitches[i] % 128) {
- DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
+ drm_dbg_kms(dev, "bad modifier data for plane %d\n", i);
return -EINVAL;
}
break;
@@ -256,7 +256,7 @@ static int framebuffer_check(struct drm_device *dev,
for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
- DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero modifier for unused plane %d\n", i);
return -EINVAL;
}
@@ -265,17 +265,17 @@ static int framebuffer_check(struct drm_device *dev,
continue;
if (r->handles[i]) {
- DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i);
+ drm_dbg_kms(dev, "buffer object handle for unused plane %d\n", i);
return -EINVAL;
}
if (r->pitches[i]) {
- DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero pitch for unused plane %d\n", i);
return -EINVAL;
}
if (r->offsets[i]) {
- DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i);
+ drm_dbg_kms(dev, "non-zero offset for unused plane %d\n", i);
return -EINVAL;
}
}
@@ -293,24 +293,24 @@ drm_internal_framebuffer_create(struct drm_device *dev,
int ret;
if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
- DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+ drm_dbg_kms(dev, "bad framebuffer flags 0x%08x\n", r->flags);
return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
- r->width, config->min_width, config->max_width);
+ drm_dbg_kms(dev, "bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
- r->height, config->min_height, config->max_height);
+ drm_dbg_kms(dev, "bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return ERR_PTR(-EINVAL);
}
if (r->flags & DRM_MODE_FB_MODIFIERS &&
dev->mode_config.fb_modifiers_not_supported) {
- DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+ drm_dbg_kms(dev, "driver does not support fb modifiers\n");
return ERR_PTR(-EINVAL);
}
@@ -320,7 +320,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("could not create framebuffer\n");
+ drm_dbg_kms(dev, "could not create framebuffer\n");
return fb;
}
@@ -356,7 +356,7 @@ int drm_mode_addfb2(struct drm_device *dev,
if (IS_ERR(fb))
return PTR_ERR(fb);
- DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+ drm_dbg_kms(dev, "[FB:%d]\n", fb->base.id);
r->fb_id = fb->base.id;
/* Transfer ownership to the filp for reaping on close */
@@ -384,7 +384,7 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev,
* then. So block it to make userspace fallback to
* ADDFB.
*/
- DRM_DEBUG_KMS("addfb2 broken on bigendian");
+ drm_dbg_kms(dev, "addfb2 broken on bigendian");
return -EOPNOTSUPP;
}
#endif
@@ -530,7 +530,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->height = fb->height;
r->width = fb->width;
r->depth = fb->format->depth;
- r->bpp = fb->format->cpp[0] * 8;
+ r->bpp = drm_format_info_bpp(fb->format, 0);
r->pitch = fb->pitches[0];
/* GET_FB() is an unprivileged ioctl so we must not return a
@@ -935,7 +935,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
* the id and get back -EINVAL. Obviously no concern at driver unload time.
*
* Also, the framebuffer will not be removed from the lookup idr - for
- * user-created framebuffers this will happen in in the rmfb ioctl. For
+ * user-created framebuffers this will happen in the rmfb ioctl. For
* driver-private objects (e.g. for fbdev) drivers need to explicitly call
* drm_framebuffer_unregister_private.
*/
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad068865ba20..8b68a3c1e6ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -165,6 +165,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
+ INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -936,6 +937,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
+ drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1259,3 +1261,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_lru_init - initialize a LRU
+ *
+ * @lru: The LRU to initialize
+ * @lock: The lock protecting the LRU
+ */
+void
+drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
+{
+ lru->lock = lock;
+ lru->count = 0;
+ INIT_LIST_HEAD(&lru->list);
+}
+EXPORT_SYMBOL(drm_gem_lru_init);
+
+static void
+drm_gem_lru_remove_locked(struct drm_gem_object *obj)
+{
+ obj->lru->count -= obj->size >> PAGE_SHIFT;
+ WARN_ON(obj->lru->count < 0);
+ list_del(&obj->lru_node);
+ obj->lru = NULL;
+}
+
+/**
+ * drm_gem_lru_remove - remove object from whatever LRU it is in
+ *
+ * If the object is currently in any LRU, remove it.
+ *
+ * @obj: The GEM object to remove from current LRU
+ */
+void
+drm_gem_lru_remove(struct drm_gem_object *obj)
+{
+ struct drm_gem_lru *lru = obj->lru;
+
+ if (!lru)
+ return;
+
+ mutex_lock(lru->lock);
+ drm_gem_lru_remove_locked(obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_remove);
+
+static void
+drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ lockdep_assert_held_once(lru->lock);
+
+ if (obj->lru)
+ drm_gem_lru_remove_locked(obj);
+
+ lru->count += obj->size >> PAGE_SHIFT;
+ list_add_tail(&obj->lru_node, &lru->list);
+ obj->lru = lru;
+}
+
+/**
+ * drm_gem_lru_move_tail - move the object to the tail of the LRU
+ *
+ * If the object is already in this LRU it will be moved to the
+ * tail. Otherwise it will be removed from whichever other LRU
+ * it is in (if any) and moved into this LRU.
+ *
+ * @lru: The LRU to move the object into.
+ * @obj: The GEM object to move into this LRU
+ */
+void
+drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ mutex_lock(lru->lock);
+ drm_gem_lru_move_tail_locked(lru, obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_move_tail);
+
+/**
+ * drm_gem_lru_scan - helper to implement shrinker.scan_objects
+ *
+ * If the shrink callback succeeds, it is expected that the driver
+ * move the object out of this LRU.
+ *
+ * If the LRU possibly contain active buffers, it is the responsibility
+ * of the shrink callback to check for this (ie. dma_resv_test_signaled())
+ * or if necessary block until the buffer becomes idle.
+ *
+ * @lru: The LRU to scan
+ * @nr_to_scan: The number of pages to try to reclaim
+ * @shrink: Callback to try to shrink/reclaim the object.
+ */
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj))
+{
+ struct drm_gem_lru still_in_lru;
+ struct drm_gem_object *obj;
+ unsigned freed = 0;
+
+ drm_gem_lru_init(&still_in_lru, lru->lock);
+
+ mutex_lock(lru->lock);
+
+ while (freed < nr_to_scan) {
+ obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
+
+ if (!obj)
+ break;
+
+ drm_gem_lru_move_tail_locked(&still_in_lru, obj);
+
+ /*
+ * If it's in the process of being freed, gem_object->free()
+ * may be blocked on lock waiting to remove it. So just
+ * skip it.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ continue;
+
+ /*
+ * Now that we own a reference, we can drop the lock for the
+ * rest of the loop body, to reduce contention with other
+ * code paths that need the LRU lock
+ */
+ mutex_unlock(lru->lock);
+
+ /*
+ * Note that this still needs to be trylock, since we can
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+ if (!dma_resv_trylock(obj->resv))
+ goto tail;
+
+ if (shrink(obj)) {
+ freed += obj->size >> PAGE_SHIFT;
+
+ /*
+ * If we succeeded in releasing the object's backing
+ * pages, we expect the driver to have moved the object
+ * out of this LRU
+ */
+ WARN_ON(obj->lru == &still_in_lru);
+ WARN_ON(obj->lru == lru);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+tail:
+ drm_gem_object_put(obj);
+ mutex_lock(lru->lock);
+ }
+
+ /*
+ * Move objects we've skipped over out of the temporary still_in_lru
+ * back into this LRU
+ */
+ list_for_each_entry (obj, &still_in_lru.list, lru_node)
+ obj->lru = lru;
+ list_splice_tail(&still_in_lru.list, &lru->list);
+ lru->count += still_in_lru.count;
+
+ mutex_unlock(lru->lock);
+
+ return freed;
+}
+EXPORT_SYMBOL(drm_gem_lru_scan);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 42abee9a0f4f..f6901ff97bbb 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drm gem CMA (contiguous memory allocator) helper functions
+ * drm gem DMA helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
*
@@ -20,20 +20,17 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vma_manager.h>
/**
- * DOC: cma helpers
+ * DOC: dma helpers
*
- * The DRM GEM/CMA helpers are a means to provide buffer objects that are
+ * The DRM GEM/DMA helpers are a means to provide buffer objects that are
* presented to the device as a contiguous chunk of memory. This is useful
* for devices that do not support scatter-gather DMA (either directly or
* by using an intimately attached IOMMU).
*
- * Despite the name, the DRM GEM/CMA helpers are not hardwired to use the
- * Contiguous Memory Allocator (CMA).
- *
* For devices that access the memory bus through an (external) IOMMU then
* the buffer objects are allocated using a traditional page-based
* allocator and may be scattered through physical memory. However they
@@ -44,36 +41,36 @@
* objects that are physically contiguous in memory.
*
* For GEM callback helpers in struct &drm_gem_object functions, see likewise
- * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps
- * drm_gem_cma_vmap()). These helpers perform the necessary type conversion.
+ * named functions with an _object_ infix (e.g., drm_gem_dma_object_vmap() wraps
+ * drm_gem_dma_vmap()). These helpers perform the necessary type conversion.
*/
-static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+static const struct drm_gem_object_funcs drm_gem_dma_default_funcs = {
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
/**
- * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
+ * __drm_gem_dma_create - Create a GEM DMA object without allocating memory
* @drm: DRM device
* @size: size of the object to allocate
* @private: true if used for internal purposes
*
- * This function creates and initializes a GEM CMA object of the given size,
+ * This function creates and initializes a GEM DMA object of the given size,
* but doesn't allocate any memory to back the object.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-static struct drm_gem_cma_object *
-__drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
+static struct drm_gem_dma_object *
+__drm_gem_dma_create(struct drm_device *drm, size_t size, bool private)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret = 0;
@@ -81,22 +78,22 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
gem_obj = drm->driver->gem_create_object(drm, size);
if (IS_ERR(gem_obj))
return ERR_CAST(gem_obj);
- cma_obj = to_drm_gem_cma_obj(gem_obj);
+ dma_obj = to_drm_gem_dma_obj(gem_obj);
} else {
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
}
if (!gem_obj->funcs)
- gem_obj->funcs = &drm_gem_cma_default_funcs;
+ gem_obj->funcs = &drm_gem_dma_default_funcs;
if (private) {
drm_gem_private_object_init(drm, gem_obj, size);
/* Always use writecombine for dma-buf mappings */
- cma_obj->map_noncoherent = false;
+ dma_obj->map_noncoherent = false;
} else {
ret = drm_gem_object_init(drm, gem_obj, size);
}
@@ -109,19 +106,19 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
goto error;
}
- return cma_obj;
+ return dma_obj;
error:
- kfree(cma_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
/**
- * drm_gem_cma_create - allocate an object with the given size
+ * drm_gem_dma_create - allocate an object with the given size
* @drm: DRM device
* @size: size of the object to allocate
*
- * This function creates a CMA GEM object and allocates memory as backing store.
+ * This function creates a DMA GEM object and allocates memory as backing store.
* The allocated memory will occupy a contiguous chunk of bus address space.
*
* For devices that are directly connected to the memory bus then the allocated
@@ -131,78 +128,79 @@ error:
* requirements.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
size_t size)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
int ret;
size = round_up(size, PAGE_SIZE);
- cma_obj = __drm_gem_cma_create(drm, size, false);
- if (IS_ERR(cma_obj))
- return cma_obj;
+ dma_obj = __drm_gem_dma_create(drm, size, false);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
- if (cma_obj->map_noncoherent) {
- cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
- &cma_obj->paddr,
+ if (dma_obj->map_noncoherent) {
+ dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
+ &dma_obj->dma_addr,
DMA_TO_DEVICE,
GFP_KERNEL | __GFP_NOWARN);
} else {
- cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
+ &dma_obj->dma_addr,
GFP_KERNEL | __GFP_NOWARN);
}
- if (!cma_obj->vaddr) {
+ if (!dma_obj->vaddr) {
drm_dbg(drm, "failed to allocate buffer with size %zu\n",
size);
ret = -ENOMEM;
goto error;
}
- return cma_obj;
+ return dma_obj;
error:
- drm_gem_object_put(&cma_obj->base);
+ drm_gem_object_put(&dma_obj->base);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_create);
+EXPORT_SYMBOL_GPL(drm_gem_dma_create);
/**
- * drm_gem_cma_create_with_handle - allocate an object with the given size and
+ * drm_gem_dma_create_with_handle - allocate an object with the given size and
* return a GEM handle to it
* @file_priv: DRM file-private structure to register the handle for
* @drm: DRM device
* @size: size of the object to allocate
* @handle: return location for the GEM handle
*
- * This function creates a CMA GEM object, allocating a chunk of memory as
+ * This function creates a DMA GEM object, allocating a chunk of memory as
* backing store. The GEM object is then added to the list of object associated
* with the given file and a handle to it is returned.
*
* The allocated memory will occupy a contiguous chunk of bus address space.
- * See drm_gem_cma_create() for more details.
+ * See drm_gem_dma_create() for more details.
*
* Returns:
- * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
+ * A struct drm_gem_dma_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
-static struct drm_gem_cma_object *
-drm_gem_cma_create_with_handle(struct drm_file *file_priv,
+static struct drm_gem_dma_object *
+drm_gem_dma_create_with_handle(struct drm_file *file_priv,
struct drm_device *drm, size_t size,
uint32_t *handle)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
- cma_obj = drm_gem_cma_create(drm, size);
- if (IS_ERR(cma_obj))
- return cma_obj;
+ dma_obj = drm_gem_dma_create(drm, size);
+ if (IS_ERR(dma_obj))
+ return dma_obj;
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
/*
* allocate a id of idr table where the obj is registered
@@ -214,44 +212,44 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
if (ret)
return ERR_PTR(ret);
- return cma_obj;
+ return dma_obj;
}
/**
- * drm_gem_cma_free - free resources associated with a CMA GEM object
- * @cma_obj: CMA GEM object to free
+ * drm_gem_dma_free - free resources associated with a DMA GEM object
+ * @dma_obj: DMA GEM object to free
*
- * This function frees the backing memory of the CMA GEM object, cleans up the
+ * This function frees the backing memory of the DMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
*/
-void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj)
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
{
- struct drm_gem_object *gem_obj = &cma_obj->base;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(cma_obj->vaddr);
+ struct drm_gem_object *gem_obj = &dma_obj->base;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(dma_obj->vaddr);
if (gem_obj->import_attach) {
- if (cma_obj->vaddr)
+ if (dma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
- drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
- } else if (cma_obj->vaddr) {
- if (cma_obj->map_noncoherent)
- dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr,
+ drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
+ } else if (dma_obj->vaddr) {
+ if (dma_obj->map_noncoherent)
+ dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->dma_addr,
DMA_TO_DEVICE);
else
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
+ dma_obj->vaddr, dma_obj->dma_addr);
}
drm_gem_object_release(gem_obj);
- kfree(cma_obj);
+ kfree(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_free);
+EXPORT_SYMBOL_GPL(drm_gem_dma_free);
/**
- * drm_gem_cma_dumb_create_internal - create a dumb buffer object
+ * drm_gem_dma_dumb_create_internal - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
@@ -264,12 +262,12 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_free);
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
if (args->pitch < min_pitch)
args->pitch = min_pitch;
@@ -277,14 +275,14 @@ int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
+ return PTR_ERR_OR_ZERO(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create_internal);
/**
- * drm_gem_cma_dumb_create - create a dumb buffer object
+ * drm_gem_dma_dumb_create - create a dumb buffer object
* @file_priv: DRM file-private structure to create the dumb buffer for
* @drm: DRM device
* @args: IOCTL data
@@ -296,35 +294,35 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
*
* For hardware with additional restrictions, drivers can adjust the fields
* set up by userspace and pass the IOCTL data along to the
- * drm_gem_cma_dumb_create_internal() function.
+ * drm_gem_dma_dumb_create_internal() function.
*
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
args->size = args->pitch * args->height;
- cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
+ dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
- return PTR_ERR_OR_ZERO(cma_obj);
+ return PTR_ERR_OR_ZERO(dma_obj);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
+EXPORT_SYMBOL_GPL(drm_gem_dma_dumb_create);
-const struct vm_operations_struct drm_gem_cma_vm_ops = {
+const struct vm_operations_struct drm_gem_dma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
-EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
+EXPORT_SYMBOL_GPL(drm_gem_dma_vm_ops);
#ifndef CONFIG_MMU
/**
- * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
+ * drm_gem_dma_get_unmapped_area - propose address for mapping in noMMU cases
* @filp: file object
* @addr: memory address
* @len: buffer size
@@ -339,13 +337,13 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
* Returns:
* mapping address on success or a negative error code on failure.
*/
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj = NULL;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
@@ -384,35 +382,35 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
return -EACCES;
}
- cma_obj = to_drm_gem_cma_obj(obj);
+ dma_obj = to_drm_gem_dma_obj(obj);
drm_gem_object_put(obj);
- return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
+ return dma_obj->vaddr ? (unsigned long)dma_obj->vaddr : -EINVAL;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
#endif
/**
- * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @dma_obj: DMA GEM object
* @p: DRM printer
* @indent: Tab indentation level
*
- * This function prints paddr and vaddr for use in e.g. debugfs output.
+ * This function prints dma_addr and vaddr for use in e.g. debugfs output.
*/
-void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj,
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
struct drm_printer *p, unsigned int indent)
{
- drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
- drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
+ drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
+ drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
}
-EXPORT_SYMBOL(drm_gem_cma_print_info);
+EXPORT_SYMBOL(drm_gem_dma_print_info);
/**
- * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
- * pages for a CMA GEM object
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_get_sg_table - provide a scatter/gather table of pinned
+ * pages for a DMA GEM object
+ * @dma_obj: DMA GEM object
*
* This function exports a scatter/gather table by calling the standard
* DMA mapping API.
@@ -420,9 +418,9 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
*/
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
{
- struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_gem_object *obj = &dma_obj->base;
struct sg_table *sgt;
int ret;
@@ -430,8 +428,8 @@ struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj)
if (!sgt)
return ERR_PTR(-ENOMEM);
- ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
- cma_obj->paddr, obj->size);
+ ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
+ dma_obj->dma_addr, obj->size);
if (ret < 0)
goto out;
@@ -441,10 +439,10 @@ out:
kfree(sgt);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_dma_get_sg_table);
/**
- * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
+ * drm_gem_dma_prime_import_sg_table - produce a DMA GEM object from another
* driver's scatter/gather table of pinned pages
* @dev: device to import into
* @attach: DMA-BUF attachment
@@ -453,7 +451,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
* This function imports a scatter/gather table exported via DMA-BUF by
* another driver. Imported buffers must be physically contiguous in memory
* (i.e. the scatter/gather table must contain a single entry). Drivers that
- * use the CMA helpers should set this as their
+ * use the DMA helpers should set this as their
* &drm_driver.gem_prime_import_sg_table callback.
*
* Returns:
@@ -461,56 +459,57 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
* error code on failure.
*/
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
/* check if the entries in the sg_table are contiguous */
if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
return ERR_PTR(-EINVAL);
- /* Create a CMA GEM buffer. */
- cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
- if (IS_ERR(cma_obj))
- return ERR_CAST(cma_obj);
+ /* Create a DMA GEM buffer. */
+ dma_obj = __drm_gem_dma_create(dev, attach->dmabuf->size, true);
+ if (IS_ERR(dma_obj))
+ return ERR_CAST(dma_obj);
- cma_obj->paddr = sg_dma_address(sgt->sgl);
- cma_obj->sgt = sgt;
+ dma_obj->dma_addr = sg_dma_address(sgt->sgl);
+ dma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
+ attach->dmabuf->size);
- return &cma_obj->base;
+ return &dma_obj->base;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
+EXPORT_SYMBOL_GPL(drm_gem_dma_prime_import_sg_table);
/**
- * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
+ * drm_gem_dma_vmap - map a DMA GEM object into the kernel's virtual
* address space
- * @cma_obj: CMA GEM object
- * @map: Returns the kernel virtual address of the CMA GEM object's backing
+ * @dma_obj: DMA GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing
* store.
*
* This function maps a buffer into the kernel's virtual address space.
- * Since the CMA buffers are already mapped into the kernel virtual address
+ * Since the DMA buffers are already mapped into the kernel virtual address
* space this simply returns the cached virtual address.
*
* Returns:
* 0 on success, or a negative error code otherwise.
*/
-int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj,
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
struct iosys_map *map)
{
- iosys_map_set_vaddr(map, cma_obj->vaddr);
+ iosys_map_set_vaddr(map, dma_obj->vaddr);
return 0;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+EXPORT_SYMBOL_GPL(drm_gem_dma_vmap);
/**
- * drm_gem_cma_mmap - memory-map an exported CMA GEM object
- * @cma_obj: CMA GEM object
+ * drm_gem_dma_mmap - memory-map an exported DMA GEM object
+ * @dma_obj: DMA GEM object
* @vma: VMA for the area to be mapped
*
* This function maps a buffer into a userspace process's address space.
@@ -520,9 +519,9 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma)
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma)
{
- struct drm_gem_object *obj = &cma_obj->base;
+ struct drm_gem_object *obj = &dma_obj->base;
int ret;
/*
@@ -534,37 +533,38 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_DONTEXPAND;
- if (cma_obj->map_noncoherent) {
+ if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- ret = dma_mmap_pages(cma_obj->base.dev->dev,
+ ret = dma_mmap_pages(dma_obj->base.dev->dev,
vma, vma->vm_end - vma->vm_start,
- virt_to_page(cma_obj->vaddr));
+ virt_to_page(dma_obj->vaddr));
} else {
- ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
- cma_obj->paddr, vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
+ dma_obj->dma_addr,
+ vma->vm_end - vma->vm_start);
}
if (ret)
drm_gem_vm_close(vma);
return ret;
}
-EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
+EXPORT_SYMBOL_GPL(drm_gem_dma_mmap);
/**
- * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ * drm_gem_dma_prime_import_sg_table_vmap - PRIME import another driver's
* scatter/gather table and get the virtual address of the buffer
* @dev: DRM device
* @attach: DMA-BUF attachment
* @sgt: Scatter/gather table of pinned pages
*
* This function imports a scatter/gather table using
- * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
- * virtual address. This ensures that a CMA GEM object always has its virtual
+ * drm_gem_dma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a DMA GEM object always has its virtual
* address set. This address is released when the object is freed.
*
* This function can be used as the &drm_driver.gem_prime_import_sg_table
- * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
+ * callback. The &DRM_GEM_DMA_DRIVER_OPS_VMAP macro provides a shortcut to set
* the necessary DRM driver operations.
*
* Returns:
@@ -572,11 +572,11 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
* error code on failure.
*/
struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *obj;
struct iosys_map map;
int ret;
@@ -587,19 +587,19 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
return ERR_PTR(ret);
}
- obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, &map);
return obj;
}
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = map.vaddr;
+ dma_obj = to_drm_gem_dma_obj(obj);
+ dma_obj->vaddr = map.vaddr;
return obj;
}
-EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
+EXPORT_SYMBOL(drm_gem_dma_prime_import_sg_table_vmap);
-MODULE_DESCRIPTION("DRM CMA memory-management helpers");
+MODULE_DESCRIPTION("DRM DMA memory-management helpers");
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 61339a9cd010..880a4975507f 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -490,6 +490,8 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi
}
EXPORT_SYMBOL(drm_gem_fb_end_cpu_access);
+// TODO Drop this function and replace by drm_format_info_bpp() once all
+// DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c
static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
@@ -497,11 +499,6 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
info = drm_get_format_info(dev, mode_cmd);
- /* use whatever a driver has set */
- if (info->cpp[0])
- return info->cpp[0] * 8;
-
- /* guess otherwise */
switch (info->format) {
case DRM_FORMAT_YUV420_8BIT:
return 12;
@@ -510,11 +507,8 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
case DRM_FORMAT_VUY101010:
return 30;
default:
- break;
+ return drm_format_info_bpp(info, 0);
}
-
- /* all attempts failed */
- return 0;
}
static int drm_gem_afbc_min_size(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 904fc893c905..35138f8a375c 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -663,7 +663,7 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info);
* drm_gem_shmem_get_pages_sgt() instead.
*
* Returns:
- * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
*/
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index d607043716d3..125160b534be 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -226,9 +226,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
- ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, false, NULL, NULL,
- ttm_buffer_object_destroy);
+ ret = ttm_bo_init_validate(bdev, &gbo->bo, ttm_bo_type_device,
+ &gbo->placement, pg_align, false, NULL, NULL,
+ ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8faad23dc1d8..ca2a6e6101dc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -472,7 +472,13 @@ EXPORT_SYMBOL(drm_invalid_op);
*/
static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
{
- int len;
+ size_t len;
+
+ /* don't attempt to copy a NULL pointer */
+ if (WARN_ONCE(!value, "BUG: the value to copy was not set!")) {
+ *buf_len = 0;
+ return 0;
+ }
/* don't overflow userbuf */
len = strlen(value);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 2f61f53d472f..a6ac56580876 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -205,7 +205,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
struct iosys_map data[DRM_FORMAT_MAX_PLANES];
- void *src;
+ struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
@@ -215,17 +215,16 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = drm_gem_fb_vmap(fb, map, data);
if (ret)
goto out_drm_gem_fb_end_cpu_access;
- src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(dst, 0, src, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, data, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(dst, 0, src, fb, clip);
+ drm_fb_memcpy(&dst_map, NULL, data, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(dst, 0, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, data, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -311,6 +310,24 @@ err_drm_dev_exit:
}
/**
+ * mipi_dbi_pipe_mode_valid - MIPI DBI mode-valid helper
+ * @pipe: Simple display pipe
+ * @mode: The mode to test
+ *
+ * This function validates a given display mode against the MIPI DBI's hardware
+ * display. Drivers can use this as their &drm_simple_display_pipe_funcs->mode_valid
+ * callback.
+ */
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+
+ return drm_crtc_helper_mode_valid_fixed(&pipe->crtc, mode, &dbidev->mode);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_mode_valid);
+
+/**
* mipi_dbi_pipe_update - Display pipe update helper
* @pipe: Simple display pipe
* @old_state: Old plane state
@@ -416,26 +433,8 @@ EXPORT_SYMBOL(mipi_dbi_pipe_disable);
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &dbidev->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm) {
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- }
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &dbidev->mode);
}
static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = {
@@ -1136,7 +1135,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in the GEM code
- * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical
+ * (e.g., drm_gem_dma_create()). The dma_addr returned will be a physical
* address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index c40bde96cfdf..3ec02748d56f 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -346,6 +346,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -1236,7 +1237,9 @@ static int mipi_dsi_drv_remove(struct device *dev)
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- return drv->remove(dsi);
+ drv->remove(dsi);
+
+ return 0;
}
static void mipi_dsi_drv_shutdown(struct device *dev)
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 59b34f07cfce..939d621c9ad4 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -151,6 +151,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->registration_state != DRM_CONNECTOR_REGISTERED)
+ continue;
+
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&
(connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
@@ -412,8 +415,8 @@ int drmm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
- idr_init(&dev->mode_config.object_idr);
- idr_init(&dev->mode_config.tile_idr);
+ idr_init_base(&dev->mode_config.object_idr, 1);
+ idr_init_base(&dev->mode_config.tile_idr, 1);
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 0f08319453b2..f858dfedf2cf 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -100,45 +100,16 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
* formats than this and drivers may specify a more accurate list when
- * creating the primary plane. However drivers that still call
- * drm_plane_init() will use this minimal format list as the default.
+ * creating the primary plane.
*/
static const uint32_t safe_modeset_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
-static struct drm_plane *create_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- /*
- * Remove the format_default field from drm_plane when dropping
- * this helper.
- */
- primary->format_default = true;
-
- /* possible_crtc's will be filled in later by crtc_init */
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- safe_modeset_formats,
- ARRAY_SIZE(safe_modeset_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
+static const struct drm_plane_funcs primary_plane_funcs = {
+ DRM_PLANE_NON_ATOMIC_FUNCS,
+};
/**
* drm_crtc_init - Legacy CRTC initialization function
@@ -171,10 +142,33 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
struct drm_plane *primary;
+ int ret;
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
+ &primary_plane_funcs,
+ safe_modeset_formats,
+ ARRAY_SIZE(safe_modeset_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
- primary = create_primary_plane(dev);
- return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs,
- NULL);
+ /*
+ * Remove the format_default field from drm_plane when dropping
+ * this helper.
+ */
+ primary->format_default = true;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, NULL);
+ if (ret)
+ goto err_drm_plane_cleanup;
+
+ return 0;
+
+err_drm_plane_cleanup:
+ drm_plane_cleanup(primary);
+ kfree(primary);
+ return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index fc1728d46ac2..8a0c0e0bb5bd 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -103,6 +103,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -128,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Anbernic Win600 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"),
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -152,6 +164,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO AIR */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *)&lcd1080x1920_leftside_up,
}, { /* AYA NEO NEXT */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 726f2f163c26..33357629a7f5 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -448,6 +448,44 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size,
}
EXPORT_SYMBOL(__drmm_universal_plane_alloc);
+void *__drm_universal_plane_alloc(struct drm_device *dev, size_t size,
+ size_t offset, uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type type,
+ const char *name, ...)
+{
+ void *container;
+ struct drm_plane *plane;
+ va_list ap;
+ int ret;
+
+ if (drm_WARN_ON(dev, !funcs))
+ return ERR_PTR(-EINVAL);
+
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ plane = container + offset;
+
+ va_start(ap, name);
+ ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
+ formats, format_count, format_modifiers,
+ type, name, ap);
+ va_end(ap);
+ if (ret)
+ goto err_kfree;
+
+ return container;
+
+err_kfree:
+ kfree(container);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(__drm_universal_plane_alloc);
+
int drm_plane_register_all(struct drm_device *dev)
{
unsigned int num_planes = 0;
@@ -483,38 +521,6 @@ void drm_plane_unregister_all(struct drm_device *dev)
}
/**
- * drm_plane_init - Initialize a legacy plane
- * @dev: DRM device
- * @plane: plane object to init
- * @possible_crtcs: bitmask of possible CRTCs
- * @funcs: callbacks for the new plane
- * @formats: array of supported formats (DRM_FORMAT\_\*)
- * @format_count: number of elements in @formats
- * @is_primary: plane type (primary vs overlay)
- *
- * Legacy API to initialize a DRM plane.
- *
- * New drivers should call drm_universal_plane_init() instead.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary)
-{
- enum drm_plane_type type;
-
- type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
- return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count,
- NULL, type, NULL);
-}
-EXPORT_SYMBOL(drm_plane_init);
-
-/**
* drm_plane_cleanup - Clean up the core plane usage
* @plane: plane to cleanup
*
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 838b32b70bce..865bd999b187 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -30,8 +30,10 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#define SUBPIXEL_MASK 0xffff
@@ -145,13 +147,36 @@ static int drm_plane_helper_check_update(struct drm_plane *plane,
return 0;
}
-static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+/**
+ * drm_plane_helper_update_primary - Helper for updating primary planes
+ * @plane: plane to update
+ * @crtc: the plane's new CRTC
+ * @fb: the plane's new framebuffer
+ * @crtc_x: x coordinate within CRTC
+ * @crtc_y: y coordinate within CRTC
+ * @crtc_w: width coordinate within CRTC
+ * @crtc_h: height coordinate within CRTC
+ * @src_x: x coordinate within source
+ * @src_y: y coordinate within source
+ * @src_w: width coordinate within source
+ * @src_h: height coordinate within source
+ * @ctx: modeset locking context
+ *
+ * This helper validates the given parameters and updates the primary plane.
+ *
+ * This function is only useful for non-atomic modesetting. Don't use
+ * it in new drivers.
+ *
+ * Returns:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_set set = {
.crtc = crtc,
@@ -172,15 +197,19 @@ static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *c
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
+ struct drm_device *dev = plane->dev;
struct drm_connector **connector_list;
int num_connectors, ret;
bool visible;
+ if (drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)))
+ return -EINVAL;
+
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest,
DRM_MODE_ROTATE_0,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false, &visible);
if (ret)
return ret;
@@ -218,31 +247,74 @@ static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *c
kfree(connector_list);
return ret;
}
+EXPORT_SYMBOL(drm_plane_helper_update_primary);
-static int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+/**
+ * drm_plane_helper_disable_primary - Helper for disabling primary planes
+ * @plane: plane to disable
+ * @ctx: modeset locking context
+ *
+ * This helper returns an error when trying to disable the primary
+ * plane.
+ *
+ * This function is only useful for non-atomic modesetting. Don't use
+ * it in new drivers.
+ *
+ * Returns:
+ * An errno code.
+ */
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *dev = plane->dev;
+
+ drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev));
+
return -EINVAL;
}
+EXPORT_SYMBOL(drm_plane_helper_disable_primary);
/**
- * drm_primary_helper_destroy() - Helper for primary plane destruction
+ * drm_plane_helper_destroy() - Helper for primary plane destruction
* @plane: plane to destroy
*
* Provides a default plane destroy handler for primary planes. This handler
* is called during CRTC destruction. We disable the primary plane, remove
* it from the DRM plane list, and deallocate the plane structure.
*/
-void drm_primary_helper_destroy(struct drm_plane *plane)
+void drm_plane_helper_destroy(struct drm_plane *plane)
{
drm_plane_cleanup(plane);
kfree(plane);
}
-EXPORT_SYMBOL(drm_primary_helper_destroy);
-
-const struct drm_plane_funcs drm_primary_helper_funcs = {
- .update_plane = drm_primary_helper_update,
- .disable_plane = drm_primary_helper_disable,
- .destroy = drm_primary_helper_destroy,
-};
-EXPORT_SYMBOL(drm_primary_helper_funcs);
+EXPORT_SYMBOL(drm_plane_helper_destroy);
+
+/**
+ * drm_plane_helper_atomic_check() - Helper to check plane atomic-state
+ * @plane: plane to check
+ * @state: atomic state object
+ *
+ * Provides a default plane-state check handler for planes whose atomic-state
+ * scale and positioning are not expected to change since the plane is always
+ * a fullscreen scanout buffer.
+ *
+ * This is often the case for the primary plane of simple framebuffers.
+ *
+ * RETURNS:
+ * Zero on success, or an errno code otherwise.
+ */
+int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+}
+EXPORT_SYMBOL(drm_plane_helper_atomic_check);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index f783d4963d4b..5b93c11895bb 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,14 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#define DEBUG /* for pr_debug() */
-
#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
@@ -40,7 +39,7 @@
* __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
-unsigned int __drm_debug;
+unsigned long __drm_debug;
EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
@@ -52,7 +51,30 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, __drm_debug, int, 0600);
+
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+module_param_named(debug, __drm_debug, ulong, 0600);
+#else
+/* classnames must match vals of enum drm_debug_category */
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
+static struct ddebug_class_param drm_debug_bitmap = {
+ .bits = &__drm_debug,
+ .flags = "p",
+ .map = &drm_debug_classes,
+};
+module_param_cb(debug, &param_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
+#endif
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
@@ -162,7 +184,8 @@ EXPORT_SYMBOL(__drm_printfn_info);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
{
- pr_debug("%s %pV", p->prefix, vaf);
+ /* pr_debug callsite decorations are unhelpful here */
+ printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_debug);
@@ -256,15 +279,16 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
+ /* we know we are printing for either syslog, tracefs, or both */
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
@@ -278,14 +302,14 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
va_end(args);
}
-EXPORT_SYMBOL(drm_dev_dbg);
+EXPORT_SYMBOL(__drm_dev_dbg);
-void __drm_dbg(enum drm_debug_category category, const char *format, ...)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
va_start(args, format);
@@ -297,7 +321,7 @@ void __drm_dbg(enum drm_debug_category category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(__drm_dbg);
+EXPORT_SYMBOL(___drm_dbg);
void __drm_err(const char *format, ...)
{
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bb427c5a4f1f..69b0b2b9cc1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -1015,6 +1015,30 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/**
+ * drm_crtc_helper_mode_valid_fixed - Validates a display mode
+ * @crtc: the crtc
+ * @mode: the mode to validate
+ * @fixed_mode: the display hardware's mode
+ *
+ * Returns:
+ * MODE_OK on success, or another mode-status code otherwise.
+ */
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode)
+{
+ if (mode->hdisplay != fixed_mode->hdisplay && mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_ONE_SIZE;
+ else if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_ONE_WIDTH;
+ else if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_ONE_HEIGHT;
+
+ return MODE_OK;
+}
+EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed);
+
+/**
* drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID
* property from the connector's
* DDC channel
@@ -1051,6 +1075,46 @@ int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc);
/**
+ * drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector
+ * @connector: the connector
+ * @fixed_mode: the display hardware's mode
+ *
+ * This function duplicates a display modes for a connector. Drivers for hardware
+ * that only supports a single fixed mode can use this function in their connector's
+ * get_modes helper.
+ *
+ * Returns:
+ * The number of created modes.
+ */
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(dev, fixed_mode);
+ if (!mode) {
+ drm_err(dev, "Failed to duplicate mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(fixed_mode));
+ return 0;
+ }
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm)
+ connector->display_info.width_mm = mode->width_mm;
+ if (mode->height_mm)
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+EXPORT_SYMBOL(drm_connector_helper_get_modes_fixed);
+
+/**
* drm_connector_helper_get_modes - Read EDID and update connector.
* @connector: The connector
*
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 36633590ebf3..e9f782119d3d 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -223,8 +222,8 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
&pipe->crtc);
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index bf33c3084cb4..a971590b8132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 66e5f1e34044..7c3aa77186d3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 10b0036f8a2e..b7c11bdce2c8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -893,7 +893,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
if (!edid)
return -ENODEV;
- hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
+ hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
@@ -922,8 +922,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
return -EINVAL;
}
-static int hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 65260a658684..8d333db813b7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1045,7 +1045,7 @@ static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
+static enum drm_mode_status mixer_mode_valid(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct mixer_context *ctx = crtc->ctx;
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index e95e96c565ba..5ca71ef87325 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -3,7 +3,7 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 7a503bf08d0f..b4acc3422ba4 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -20,9 +20,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -150,13 +149,13 @@ static void fsl_dcu_unload(struct drm_device *dev)
dev->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(fsl_dcu_drm_fops);
static const struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index d763f53f480c..5b47000738e4 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -6,7 +6,6 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 0cd527f0c146..794a87d16f88 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -10,10 +10,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -84,7 +84,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
int index;
@@ -95,7 +95,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
if (index < 0)
return;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -136,7 +136,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
DCU_LAYER_POSY(new_state->crtc_y) |
DCU_LAYER_POSX(new_state->crtc_x));
regmap_write(fsl_dev->regmap,
- DCU_CTRLDESCLN(index, 3), gem->paddr);
+ DCU_CTRLDESCLN(index, 3), gem->dma_addr);
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
DCU_LAYER_EN |
DCU_LAYER_TRANS(0xff) |
@@ -171,16 +171,10 @@ static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = {
.atomic_update = fsl_dcu_drm_plane_atomic_update,
};
-static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = {
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
- .destroy = fsl_dcu_drm_plane_destroy,
+ .destroy = drm_plane_helper_destroy,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
.update_plane = drm_atomic_helper_update_plane,
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 0cff20265f97..807b989e3c77 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -7,6 +7,8 @@ config DRM_GMA500
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
+ select X86_PLATFORM_DEVICES if ACPI
+ select ACPI_WMI if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 46b9c0f13d6d..577a4987b193 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -7,75 +7,109 @@
* Authors: Eric Knopp
*/
+#include <acpi/video.h>
+
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
#include "intel_bios.h"
#include "power.h"
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-static void do_gma_backlight_set(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- backlight_update_status(dev_priv->backlight_device);
-}
-#endif
-
void gma_backlight_enable(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_enabled = true;
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
- do_gma_backlight_set(dev);
- }
-#endif
+ dev_priv->ops->backlight_set(dev, dev_priv->backlight_level);
}
void gma_backlight_disable(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_enabled = false;
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = 0;
- do_gma_backlight_set(dev);
- }
-#endif
+ dev_priv->ops->backlight_set(dev, 0);
}
void gma_backlight_set(struct drm_device *dev, int v)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
dev_priv->backlight_level = v;
- if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
- dev_priv->backlight_device->props.brightness = v;
- do_gma_backlight_set(dev);
- }
-#endif
+ if (dev_priv->backlight_enabled)
+ dev_priv->ops->backlight_set(dev, v);
+}
+
+static int gma_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
+ if (dev_priv->ops->backlight_get)
+ return dev_priv->ops->backlight_get(dev);
+
+ return dev_priv->backlight_level;
}
+static int gma_backlight_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ int level = backlight_get_brightness(bd);
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ gma_backlight_set(dev, level);
+ return 0;
+}
+
+static const struct backlight_ops gma_backlight_ops __maybe_unused = {
+ .get_brightness = gma_backlight_get_brightness,
+ .update_status = gma_backlight_update_status,
+};
+
int gma_backlight_init(struct drm_device *dev)
{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct backlight_properties props __maybe_unused = {};
+ int ret;
+
dev_priv->backlight_enabled = true;
- return dev_priv->ops->backlight_init(dev);
-#else
- return 0;
+ dev_priv->backlight_level = 100;
+
+ ret = dev_priv->ops->backlight_init(dev);
+ if (ret)
+ return ret;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping %s backlight registration\n",
+ dev_priv->ops->backlight_name);
+ return 0;
+ }
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ props.brightness = dev_priv->backlight_level;
+ props.max_brightness = PSB_MAX_BRIGHTNESS;
+ props.type = BACKLIGHT_RAW;
+
+ dev_priv->backlight_device =
+ backlight_device_register(dev_priv->ops->backlight_name,
+ dev->dev, dev,
+ &gma_backlight_ops, &props);
+ if (IS_ERR(dev_priv->backlight_device))
+ return PTR_ERR(dev_priv->backlight_device);
#endif
+
+ return 0;
}
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- if (dev_priv->backlight_device) {
- dev_priv->backlight_device->props.brightness = 0;
- backlight_update_status(dev_priv->backlight_device);
+
+ if (dev_priv->backlight_device)
backlight_device_unregister(dev_priv->backlight_device);
- }
#endif
}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index dd32b484dd82..3065596257e9 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -5,7 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <drm/drm.h>
@@ -62,14 +61,10 @@ static int cdv_output_init(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
/*
* Cedartrail Backlght Interfaces
*/
-static struct backlight_device *cdv_backlight_device;
-
static int cdv_backlight_combination_mode(struct drm_device *dev)
{
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
@@ -92,9 +87,8 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
return max;
}
-static int cdv_get_brightness(struct backlight_device *bd)
+static int cdv_get_brightness(struct drm_device *dev)
{
- struct drm_device *dev = bl_get_data(bd);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
@@ -106,20 +100,13 @@ static int cdv_get_brightness(struct backlight_device *bd)
val *= lbpc;
}
return (val * 100)/cdv_get_max_backlight(dev);
-
}
-static int cdv_set_brightness(struct backlight_device *bd)
+static void cdv_set_brightness(struct drm_device *dev, int level)
{
- struct drm_device *dev = bl_get_data(bd);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int level = bd->props.brightness;
u32 blc_pwm_ctl;
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
level *= cdv_get_max_backlight(dev);
level /= 100;
@@ -136,38 +123,18 @@ static int cdv_set_brightness(struct backlight_device *bd)
blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl |
(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
- return 0;
}
-static const struct backlight_ops cdv_ops = {
- .get_brightness = cdv_get_brightness,
- .update_status = cdv_set_brightness,
-};
-
static int cdv_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- cdv_backlight_device = backlight_device_register("psb-bl",
- NULL, (void *)dev, &cdv_ops, &props);
- if (IS_ERR(cdv_backlight_device))
- return PTR_ERR(cdv_backlight_device);
-
- cdv_backlight_device->props.brightness =
- cdv_get_brightness(cdv_backlight_device);
- backlight_update_status(cdv_backlight_device);
- dev_priv->backlight_device = cdv_backlight_device;
- dev_priv->backlight_enabled = true;
+
+ dev_priv->backlight_level = cdv_get_brightness(dev);
+ cdv_set_brightness(dev, dev_priv->backlight_level);
+
return 0;
}
-#endif
-
/*
* Provide the Cedarview specific chip logic and low level methods
* for power management
@@ -581,11 +548,9 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = true;
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
@@ -615,9 +580,10 @@ const struct psb_ops cdv_chip_ops = {
.hotplug = cdv_hotplug_event,
.hotplug_enable = cdv_hotplug_enable,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = cdv_backlight_init,
-#endif
+ .backlight_get = cdv_get_brightness,
+ .backlight_set = cdv_set_brightness,
+ .backlight_name = "psb-bl",
.init_pm = cdv_init_pm,
.save_regs = cdv_save_display_registers,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index bb2e9d64018a..53b967282d6a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -115,7 +115,7 @@ i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
/*
* Write a single byte to the current I2C address, the
- * the I2C link must be running or this returns -EIO
+ * I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index bd40c040a2c9..fe7b8436f87a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
@@ -552,28 +555,11 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
return ret;
}
-int gma_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set, ctx);
-
- pm_runtime_forbid(dev->dev);
- ret = drm_crtc_helper_set_config(set, ctx);
- pm_runtime_allow(dev->dev);
-
- return ret;
-}
-
const struct drm_crtc_funcs gma_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
- .set_config = gma_crtc_set_config,
+ .set_config = drm_crtc_helper_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = gma_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 113cf048105e..c8b611a2f6c6 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -69,8 +69,6 @@ extern int gma_crtc_page_flip(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
struct drm_modeset_acquire_ctx *ctx);
-extern int gma_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
extern void gma_crtc_save(struct drm_crtc *crtc);
extern void gma_crtc_restore(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 6004390d647a..64761f46b434 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -310,7 +310,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
temp & ~PIPEACONF_ENABLE, i);
REG_READ_WITH_AUX(map->conf, i);
}
- /* Wait for for the pipe disable to take effect. */
+ /* Wait for the pipe disable to take effect. */
gma_wait_for_vblank(dev);
temp = REG_READ_WITH_AUX(map->dpll, i);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 5923a9c89312..2531959d3d77 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -5,7 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/module.h>
@@ -37,29 +36,18 @@ static int oaktrail_output_init(struct drm_device *dev)
* Provide the low level interfaces for the Moorestown backlight
*/
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
#define BLC_PWM_FREQ_CALC_CONSTANT 32
#define MHz 1000000
#define BLC_ADJUSTMENT_MAX 100
-static struct backlight_device *oaktrail_backlight_device;
-static int oaktrail_brightness;
-
-static int oaktrail_set_brightness(struct backlight_device *bd)
+static void oaktrail_set_brightness(struct drm_device *dev, int level)
{
- struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int level = bd->props.brightness;
u32 blc_pwm_ctl;
u32 max_pwm_blc;
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
if (gma_power_begin(dev, 0)) {
/* Calculate and set the brightness value */
max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
@@ -82,19 +70,9 @@ static int oaktrail_set_brightness(struct backlight_device *bd)
REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
gma_power_end(dev);
}
- oaktrail_brightness = level;
- return 0;
-}
-
-static int oaktrail_get_brightness(struct backlight_device *bd)
-{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return oaktrail_brightness;
}
-static int device_backlight_init(struct drm_device *dev)
+static int oaktrail_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
@@ -123,44 +101,11 @@ static int device_backlight_init(struct drm_device *dev)
REG_WRITE(BLC_PWM_CTL, value | (value << 16));
gma_power_end(dev);
}
- return 0;
-}
-
-static const struct backlight_ops oaktrail_ops = {
- .get_brightness = oaktrail_get_brightness,
- .update_status = oaktrail_set_brightness,
-};
-static int oaktrail_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
- NULL, (void *)dev, &oaktrail_ops, &props);
-
- if (IS_ERR(oaktrail_backlight_device))
- return PTR_ERR(oaktrail_backlight_device);
-
- ret = device_backlight_init(dev);
- if (ret < 0) {
- backlight_device_unregister(oaktrail_backlight_device);
- return ret;
- }
- oaktrail_backlight_device->props.brightness = 100;
- oaktrail_backlight_device->props.max_brightness = 100;
- backlight_update_status(oaktrail_backlight_device);
- dev_priv->backlight_device = oaktrail_backlight_device;
+ oaktrail_set_brightness(dev, PSB_MAX_BRIGHTNESS);
return 0;
}
-#endif
-
/*
* Provide the Moorestown specific chip logic and low level methods
* for power management
@@ -501,12 +446,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+ dev_priv->use_msi = true;
dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
@@ -548,9 +490,9 @@ const struct psb_ops oaktrail_chip_ops = {
.output_init = oaktrail_output_init,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
.backlight_init = oaktrail_backlight_init,
-#endif
+ .backlight_set = oaktrail_set_brightness,
+ .backlight_name = "oaktrail-bl",
.save_regs = oaktrail_save_display_registers,
.restore_regs = oaktrail_restore_display_registers,
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 4d98df189e10..75b4eb1c8884 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -61,7 +61,6 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while (pp_status & PP_ON);
dev_priv->is_lvds_on = false;
- pm_request_idle(dev->dev);
}
gma_power_end(dev);
}
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index dc494df71a48..0c271072af63 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -150,21 +150,17 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
- struct backlight_device *bd = dev_priv->backlight_device;
DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
- if (bd == NULL)
- return ASLE_BACKLIGHT_FAILED;
-
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
- gma_backlight_set(dev, bclp * bd->props.max_brightness / 255);
+ gma_backlight_set(dev, bclp * PSB_MAX_BRIGHTNESS / 255);
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index b91de6d36e41..186af29bea6f 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -37,9 +37,6 @@
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-static struct mutex power_mutex; /* Serialize power ops */
-static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */
-
/**
* gma_power_init - initialise power manager
* @dev: our device
@@ -54,13 +51,23 @@ void gma_power_init(struct drm_device *dev)
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
dev_priv->ospm_base &= 0xffff;
- dev_priv->display_power = true; /* We start active */
- dev_priv->display_count = 0; /* Currently no users */
- dev_priv->suspended = false; /* And not suspended */
- mutex_init(&power_mutex);
-
if (dev_priv->ops->init_pm)
dev_priv->ops->init_pm(dev);
+
+ /*
+ * Runtime pm support is broken atm. So for now unconditionally
+ * call pm_runtime_get() here and put it again in psb_driver_unload()
+ *
+ * To fix this we need to call pm_runtime_get() once for each active
+ * pipe at boot and then put() / get() for each pipe disable / enable
+ * so that the device gets runtime suspended when no pipes are active.
+ * Once this is in place the pm_runtime_get() below should be replaced
+ * by a pm_runtime_allow() call to undo the pm_runtime_forbid() from
+ * pci_pm_init().
+ */
+ pm_runtime_get(dev->dev);
+
+ dev_priv->pm_initialized = true;
}
/**
@@ -71,8 +78,12 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+
+ if (!dev_priv->pm_initialized)
+ return;
+
+ pm_runtime_put_noidle(dev->dev);
}
/**
@@ -85,11 +96,8 @@ static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- if (dev_priv->suspended)
- return;
dev_priv->ops->save_regs(dev);
dev_priv->ops->power_down(dev);
- dev_priv->display_power = false;
}
/**
@@ -106,8 +114,6 @@ static void gma_resume_display(struct pci_dev *pdev)
/* turn on the display power island */
dev_priv->ops->power_up(dev);
- dev_priv->suspended = false;
- dev_priv->display_power = true;
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
@@ -131,21 +137,14 @@ static void gma_suspend_pci(struct pci_dev *pdev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
- if (dev_priv->suspended)
- return;
-
pci_save_state(pdev);
pci_read_config_dword(pdev, 0x5C, &bsm);
dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
dev_priv->regs.saveVBT = vbt;
- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
-
- dev_priv->suspended = true;
}
/**
@@ -155,29 +154,17 @@ static void gma_suspend_pci(struct pci_dev *pdev)
* Perform the resume processing on our PCI device state - rewrite
* register state and re-enable the PCI device
*/
-static bool gma_resume_pci(struct pci_dev *pdev)
+static int gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
-
- if (!dev_priv->suspended)
- return true;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
- /* restoring MSI address and data in PCIx space */
- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
- ret = pci_enable_device(pdev);
- if (ret != 0)
- dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
- else
- dev_priv->suspended = false;
- return !dev_priv->suspended;
+ return pci_enable_device(pdev);
}
/**
@@ -192,20 +179,10 @@ int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- mutex_lock(&power_mutex);
- if (!dev_priv->suspended) {
- if (dev_priv->display_count) {
- mutex_unlock(&power_mutex);
- dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
- return -EBUSY;
- }
- gma_irq_uninstall(dev);
- gma_suspend_display(dev);
- gma_suspend_pci(pdev);
- }
- mutex_unlock(&power_mutex);
+ gma_irq_uninstall(dev);
+ gma_suspend_display(dev);
+ gma_suspend_pci(pdev);
return 0;
}
@@ -220,28 +197,13 @@ int gma_power_resume(struct device *_dev)
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- mutex_lock(&power_mutex);
gma_resume_pci(pdev);
gma_resume_display(pdev);
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
- mutex_unlock(&power_mutex);
+ gma_irq_install(dev);
return 0;
}
/**
- * gma_power_is_on - returne true if power is on
- * @dev: our DRM device
- *
- * Returns true if the display island power is on at this moment
- */
-bool gma_power_is_on(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- return dev_priv->display_power;
-}
-
-/**
* gma_power_begin - begin requiring power
* @dev: our DRM device
* @force_on: true to force power on
@@ -251,35 +213,10 @@ bool gma_power_is_on(struct drm_device *dev)
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&power_ctrl_lock, flags);
- /* Power already on ? */
- if (dev_priv->display_power) {
- dev_priv->display_count++;
- pm_runtime_get(dev->dev);
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return true;
- }
- if (force_on == false)
- goto out_false;
-
- /* Ok power up needed */
- ret = gma_resume_pci(pdev);
- if (ret == 0) {
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
- pm_runtime_get(dev->dev);
- dev_priv->display_count++;
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return true;
- }
-out_false:
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
- return false;
+ if (force_on)
+ return pm_runtime_resume_and_get(dev->dev) == 0;
+ else
+ return pm_runtime_get_if_in_use(dev->dev) == 1;
}
/**
@@ -291,46 +228,5 @@ out_false:
*/
void gma_power_end(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- unsigned long flags;
- spin_lock_irqsave(&power_ctrl_lock, flags);
- dev_priv->display_count--;
- WARN_ON(dev_priv->display_count < 0);
- spin_unlock_irqrestore(&power_ctrl_lock, flags);
pm_runtime_put(dev->dev);
}
-
-int psb_runtime_suspend(struct device *dev)
-{
- return gma_power_suspend(dev);
-}
-
-int psb_runtime_resume(struct device *dev)
-{
- return gma_power_resume(dev);
-}
-
-int psb_runtime_idle(struct device *dev)
-{
- struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
- struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev);
- if (dev_priv->display_count)
- return 0;
- else
- return 1;
-}
-
-int gma_power_thaw(struct device *_dev)
-{
- return gma_power_resume(_dev);
-}
-
-int gma_power_freeze(struct device *_dev)
-{
- return gma_power_suspend(_dev);
-}
-
-int gma_power_restore(struct device *_dev)
-{
- return gma_power_resume(_dev);
-}
diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 0c89c4d6ec20..063328d66652 100644
--- a/drivers/gpu/drm/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
@@ -43,9 +43,6 @@ void gma_power_uninit(struct drm_device *dev);
*/
int gma_power_suspend(struct device *dev);
int gma_power_resume(struct device *dev);
-int gma_power_thaw(struct device *dev);
-int gma_power_freeze(struct device *dev);
-int gma_power_restore(struct device *_dev);
/*
* These are the functions the driver should use to wrap all hw access
@@ -54,19 +51,4 @@ int gma_power_restore(struct device *_dev);
bool gma_power_begin(struct drm_device *dev, bool force);
void gma_power_end(struct drm_device *dev);
-/*
- * Use this function to do an instantaneous check for if the hw is on.
- * Only use this in cases where you know the mutex is already held such
- * as in irq install/uninstall and you need to
- * prevent a deadlock situation. Otherwise use gma_power_begin().
- */
-bool gma_power_is_on(struct drm_device *dev);
-
-/*
- * GFX-Runtime PM callbacks
- */
-int psb_runtime_suspend(struct device *dev);
-int psb_runtime_resume(struct device *dev);
-int psb_runtime_idle(struct device *dev);
-
#endif /*_PSB_POWERMGMT_H_*/
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 71534f4ca834..3c294c38bdb4 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -5,8 +5,6 @@
*
**************************************************************************/
-#include <linux/backlight.h>
-
#include <drm/drm.h>
#include "gma_device.h"
@@ -24,8 +22,6 @@ static int psb_output_init(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
/*
* Poulsbo Backlight Interfaces
*/
@@ -41,18 +37,6 @@ static int psb_output_init(struct drm_device *dev)
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-static int psb_brightness;
-static struct backlight_device *psb_backlight_device;
-
-static int psb_get_brightness(struct backlight_device *bd)
-{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return psb_brightness;
-}
-
-
static int psb_backlight_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@@ -86,62 +70,13 @@ static int psb_backlight_setup(struct drm_device *dev)
REG_WRITE(BLC_PWM_CTL,
(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
}
- return 0;
-}
-
-static int psb_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(psb_backlight_device);
- int level = bd->props.brightness;
-
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
- psb_intel_lvds_set_brightness(dev, level);
- psb_brightness = level;
- return 0;
-}
-
-static const struct backlight_ops psb_ops = {
- .get_brightness = psb_get_brightness,
- .update_status = psb_set_brightness,
-};
-
-static int psb_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- int ret;
- struct backlight_properties props;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- psb_backlight_device = backlight_device_register("psb-bl",
- NULL, (void *)dev, &psb_ops, &props);
- if (IS_ERR(psb_backlight_device))
- return PTR_ERR(psb_backlight_device);
-
- ret = psb_backlight_setup(dev);
- if (ret < 0) {
- backlight_device_unregister(psb_backlight_device);
- psb_backlight_device = NULL;
- return ret;
- }
- psb_backlight_device->props.brightness = 100;
- psb_backlight_device->props.max_brightness = 100;
- backlight_update_status(psb_backlight_device);
- dev_priv->backlight_device = psb_backlight_device;
+ psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
-
return 0;
}
-#endif
-
/*
* Provide the Poulsbo specific chip logic and low level methods
* for power management
@@ -345,9 +280,9 @@ const struct psb_ops psb_chip_ops = {
.output_init = psb_output_init,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = psb_backlight_init,
-#endif
+ .backlight_init = psb_backlight_setup,
+ .backlight_set = psb_intel_lvds_set_brightness,
+ .backlight_name = "psb-bl",
.init_pm = psb_init_pm,
.save_regs = psb_save_display_registers,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1d8744f3e702..cd9c73f5a64a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -169,8 +169,7 @@ static void psb_driver_unload(struct drm_device *dev)
/* TODO: Kill vblank etc here */
- if (dev_priv->backlight_device)
- gma_backlight_exit(dev);
+ gma_backlight_exit(dev);
psb_modeset_cleanup(dev);
gma_irq_uninstall(dev);
@@ -383,7 +382,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- gma_irq_install(dev, pdev->irq);
+ gma_irq_install(dev);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@@ -399,6 +398,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (gma_encoder->type == INTEL_OUTPUT_LVDS ||
gma_encoder->type == INTEL_OUTPUT_MIPI) {
ret = gma_backlight_init(dev);
+ if (ret == 0)
+ acpi_video_register_backlight();
break;
}
}
@@ -407,11 +408,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
return ret;
psb_intel_opregion_enable_asle(dev);
-#if 0
- /* Enable runtime pm at last */
- pm_runtime_enable(dev->dev);
- pm_runtime_set_active(dev->dev);
-#endif
return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
@@ -420,33 +416,6 @@ out_err:
return ret;
}
-static inline void get_brightness(struct backlight_device *bd)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- if (bd) {
- bd->props.brightness = bd->ops->get_brightness(bd);
- backlight_update_status(bd);
- }
-#endif
-}
-
-static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- static unsigned int runtime_allowed;
-
- if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
- runtime_allowed++;
- pm_runtime_allow(dev->dev);
- dev_priv->rpm_enabled = 1;
- }
- return drm_ioctl(filp, cmd, arg);
- /* FIXME: do we need to wrap the other side of this */
-}
-
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_psb_private *dev_priv;
@@ -493,22 +462,13 @@ static void psb_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
}
-static const struct dev_pm_ops psb_pm_ops = {
- .resume = gma_power_resume,
- .suspend = gma_power_suspend,
- .thaw = gma_power_thaw,
- .freeze = gma_power_freeze,
- .restore = gma_power_restore,
- .runtime_suspend = psb_runtime_suspend,
- .runtime_resume = psb_runtime_resume,
- .runtime_idle = psb_runtime_idle,
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(psb_pm_ops, gma_power_suspend, gma_power_resume, NULL);
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0ea3d23575f3..ae544b69fc47 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -172,6 +172,8 @@
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
+#define PSB_MAX_BRIGHTNESS 100
+
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -426,9 +428,7 @@ struct drm_psb_private {
spinlock_t irqmask_lock;
/* Power */
- bool suspended;
- bool display_power;
- int display_count;
+ bool pm_initialized;
/* Modesetting */
struct psb_intel_mode_device mode_dev;
@@ -486,10 +486,8 @@ struct drm_psb_private {
unsigned int core_freq;
uint32_t iLVDS_enable;
- /* Runtime PM state */
- int rpm_enabled;
-
/* MID specific */
+ bool use_msi;
bool has_gct;
struct oaktrail_gct_data gct_data;
@@ -499,10 +497,6 @@ struct drm_psb_private {
/* Register state */
struct psb_save_area regs;
- /* MSI reg save */
- uint32_t msi_addr;
- uint32_t msi_data;
-
/* Hotplug handling */
struct work_struct hotplug_work;
@@ -530,10 +524,6 @@ struct drm_psb_private {
struct drm_fb_helper *fb_helper;
- /* Panel brightness */
- int brightness;
- int brightness_adjusted;
-
bool dsr_enable;
u32 dsr_fb_update;
bool dpi_panel_on[3];
@@ -602,10 +592,13 @@ struct psb_ops {
void (*disable_sr)(struct drm_device *dev);
void (*lvds_bl_power)(struct drm_device *dev, bool on);
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
/* Backlight */
int (*backlight_init)(struct drm_device *dev);
-#endif
+ void (*backlight_set)(struct drm_device *dev, int level);
+ int (*backlight_get)(struct drm_device *dev);
+ const char *backlight_name;
+
int i2c_bus; /* I2C bus identifier for Moorestown */
};
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9a5ea06a1a8e..531c1781a8fb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -9,8 +9,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <drm/drm_plane_helper.h>
-
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 8ccba116821b..8a1111fe714b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -197,8 +197,6 @@ extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
extern void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
-extern void oaktrail_dsi_init(struct drm_device *dev,
- struct psb_intel_mode_device *mode_dev);
struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
@@ -219,9 +217,6 @@ extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
int pipe);
extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
int sdvoB);
-extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
-extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
- int enable);
extern int intelfb_probe(struct drm_device *dev);
extern int intelfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index a85aace25548..bdced46dd333 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -400,26 +400,38 @@ static const struct _sdvo_cmd_name {
#define IS_SDVOB(reg) (reg == SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
-static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 cmd, const void *args, int args_len)
{
- int i;
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ int i, pos = 0;
+ char buffer[73];
+
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
+ for (i = 0; i < args_len; i++) {
+ BUF_PRINT("%02X ", ((u8 *)args)[i]);
+ }
+
+ for (; i < 8; i++) {
+ BUF_PRINT(" ");
+ }
- DRM_DEBUG_KMS("%s: W: %02X ",
- SDVO_NAME(psb_intel_sdvo), cmd);
- for (i = 0; i < args_len; i++)
- DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
- for (; i < 8; i++)
- DRM_DEBUG_KMS(" ");
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
+ BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
break;
}
}
+
if (i == ARRAY_SIZE(sdvo_cmd_names))
- DRM_DEBUG_KMS("(%02X)", cmd);
- DRM_DEBUG_KMS("\n");
+ BUF_PRINT("(%02X)", cmd);
+
+ drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(psb_intel_sdvo), cmd, buffer);
}
static const char *cmd_status_names[] = {
@@ -490,13 +502,13 @@ static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 c
}
static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
- void *response, int response_len)
+ void *response, int response_len)
{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ char buffer[73];
+ int i, pos = 0;
u8 retry = 5;
u8 status;
- int i;
-
- DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
/*
* The documentation states that all commands will be
@@ -520,10 +532,13 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
goto log_fail;
}
+#define BUF_PRINT(args...) \
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
+
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
+ BUF_PRINT("(%s)", cmd_status_names[status]);
else
- DRM_DEBUG_KMS("(??? %d)", status);
+ BUF_PRINT("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
@@ -534,13 +549,18 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
- DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
+ BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- DRM_DEBUG_KMS("\n");
+
+ drm_WARN_ON(dev, pos >= sizeof(buffer) - 1);
+#undef BUF_PRINT
+
+ DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(psb_intel_sdvo), buffer);
return true;
log_fail:
- DRM_DEBUG_KMS("... failed\n");
+ DRM_DEBUG_KMS("%s: R: ... failed %s\n",
+ SDVO_NAME(psb_intel_sdvo), buffer);
return false;
}
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6e6d61bbeab..d421031462df 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -228,7 +228,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg)
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
- if (dsp_int && gma_power_is_on(dev)) {
+ if (dsp_int) {
gma_vdc_interrupt(dev, vdc_stat);
handled = 1;
}
@@ -264,13 +264,12 @@ void gma_irq_preinstall(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- if (gma_power_is_on(dev)) {
- PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
- PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
- PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
- PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
- }
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
+ PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
+
if (dev->vblank[0].enabled)
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
if (dev->vblank[1].enabled)
@@ -316,17 +315,24 @@ void gma_irq_postinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (irq == IRQ_NOTCONNECTED)
+ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = false;
+ }
+
+ if (pdev->irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
@@ -369,6 +375,8 @@ void gma_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
free_irq(pdev->irq, dev);
+ if (dev_priv->use_msi)
+ pci_disable_msi(pdev);
}
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index b51e395194ff..7648f69824a5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -17,7 +17,7 @@ struct drm_device;
void gma_irq_preinstall(struct drm_device *dev);
void gma_irq_postinstall(struct drm_device *dev);
-int gma_irq_install(struct drm_device *dev, unsigned int irq);
+int gma_irq_install(struct drm_device *dev);
void gma_irq_uninstall(struct drm_device *dev);
int gma_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 4873f9799f41..7c6dc2bcd14a 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -59,6 +59,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
unsigned int bits_per_pixel = 8 / block_width;
unsigned int x, y, width, height;
u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
+ struct iosys_map dst_map, vmap;
size_t len;
void *buf;
@@ -74,7 +75,9 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
if (!buf)
return 0;
- drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect);
+ iosys_map_set_vaddr(&dst_map, buf);
+ iosys_map_set_vaddr(&vmap, src);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
pix8 = buf;
for (y = 0; y < height; y++) {
@@ -105,7 +108,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
unsigned int bits_per_pixel = 8 / block_width;
u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
unsigned int x, y, width;
- u32 *pix32;
+ __le32 *sbuf32;
+ u32 pix32;
size_t len;
/* Start on a byte boundary */
@@ -114,8 +118,8 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
for (y = rect->y1; y < rect->y2; y++) {
- pix32 = src + (y * fb->pitches[0]);
- pix32 += rect->x1;
+ sbuf32 = src + (y * fb->pitches[0]);
+ sbuf32 += rect->x1;
for (x = 0; x < width; x++) {
unsigned int pixpos = x % block_width; /* within byte from the left */
@@ -126,9 +130,10 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
*block = 0;
}
- r = *pix32 >> 16;
- g = *pix32 >> 8;
- b = *pix32++;
+ pix32 = le32_to_cpu(*sbuf32++);
+ r = pix32 >> 16;
+ g = pix32 >> 8;
+ b = pix32;
switch (format->format) {
case GUD_DRM_FORMAT_XRGB1111:
@@ -154,6 +159,7 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
u8 compression = gdrm->compression;
struct iosys_map map[DRM_FORMAT_MAX_PLANES];
struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
+ struct iosys_map dst;
void *vaddr, *buf;
size_t pitch, len;
int ret = 0;
@@ -177,6 +183,7 @@ retry:
buf = gdrm->compress_buf;
else
buf = gdrm->bulk_buf;
+ iosys_map_set_vaddr(&dst, buf);
/*
* Imported buffers are assumed to be write-combined and thus uncached
@@ -190,23 +197,24 @@ retry:
goto end_cpu_access;
}
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian());
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+ gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach);
+ drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
} else if (compression && !import_attach && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(buf, 0, vaddr, fb, rect);
+ drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
}
memset(req, 0, sizeof(*req));
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index b770f7662830..c5265675bf0c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -3,7 +3,7 @@ config DRM_HISI_KIRIN
tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
depends on DRM && OF && ARM64
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have a hisilicon Kirin chipsets(hi6220).
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 61c29c2834e6..871f79a6b17e 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -24,11 +24,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -549,13 +548,13 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
- struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
- u32 addr = (u32)obj->paddr + y * stride;
+ u32 addr = (u32) obj->dma_addr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
- ch + 1, y, in_h, stride, (u32)obj->paddr);
+ ch + 1, y, in_h, stride, (u32) obj->dma_addr);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
addr, fb->width, fb->height, fmt,
&fb->format->format);
@@ -920,12 +919,12 @@ static const struct drm_mode_config_funcs ade_mode_config_funcs = {
};
-DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ade_fops);
static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
.date = "20150718",
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 2af51df6dca7..73ee7f25f734 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -19,9 +19,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..ca127ff797f7 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -146,22 +142,22 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
- hv->dirt_needed = true;
-
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index b8e64dd8d3a6..28e732f94bf2 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -21,19 +21,18 @@
#include "hyperv_drm.h"
static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
- const struct iosys_map *map,
+ const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct hyperv_drm_device *hv = to_hv(fb->dev);
- void __iomem *dst = hv->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
int idx;
if (!drm_dev_enter(&hv->dev, &idx))
return -ENODEV;
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 76a182a9a765..013a7829182d 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -208,7 +208,7 @@ static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg
VM_PKT_DATA_INBAND, 0);
if (ret)
- drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+ drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b91e48d2190d..578b738859b9 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -417,11 +417,9 @@ fail:
return -ENODEV;
}
-static int ch7006_remove(struct i2c_client *client)
+static void ch7006_remove(struct i2c_client *client)
{
ch7006_dbg(client, "\n");
-
- return 0;
}
static int ch7006_resume(struct device *dev)
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 741886b54419..1bc0b5de4499 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -370,12 +370,6 @@ sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int
-sil164_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_client *
sil164_detect_slave(struct i2c_client *client)
{
@@ -427,7 +421,6 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
- .remove = sil164_remove,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5b03fdd1eaa4..9ed54e7ccff2 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -478,14 +478,12 @@ static int tda9950_probe(struct i2c_client *client,
return 0;
}
-static int tda9950_remove(struct i2c_client *client)
+static void tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
-
- return 0;
}
static struct i2c_device_id tda9950_ids[] = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f8eb6f69be05..d444e7fffb54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2076,11 +2076,10 @@ tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
-static int tda998x_remove(struct i2c_client *client)
+static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 7ae3b7d67fcf..3efce05d7b57 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,8 @@ config DRM_I915
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
+ select X86_PLATFORM_DEVICES if ACPI
+ select ACPI_WMI if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 522ef9b4aff3..a26edcdadc21 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -123,6 +123,7 @@ gt-y += \
gt/intel_ring.o \
gt/intel_ring_submission.o \
gt/intel_rps.o \
+ gt/intel_sa_media.o \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
@@ -257,7 +258,8 @@ i915-y += \
display/intel_vga.o \
display/i9xx_plane.o \
display/skl_scaler.o \
- display/skl_universal_plane.o
+ display/skl_universal_plane.o \
+ display/skl_watermark.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 82ad8fe7440c..e3e3d27ffb53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -1169,7 +1169,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -1223,7 +1223,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 861dcd2eb890..a5be4af792cb 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -202,7 +202,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
* Should measure whether using a lower cdclk w/o IPS
*/
if (IS_BROADWELL(i915) &&
- crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
+ crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 592e5adfed8b..5afbe3e98ee8 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
@@ -126,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
- return dev_priv->fbc[INTEL_FBC_A];
+ return dev_priv->display.fbc[INTEL_FBC_A];
else
return NULL;
}
@@ -326,8 +325,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
return ret;
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
i9xx_plane_has_windowing(plane));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5dcfa7feffa9..ed4d93942dbd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -33,6 +33,7 @@
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
@@ -641,13 +642,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -657,13 +658,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
@@ -693,7 +694,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -709,7 +710,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void
@@ -1629,6 +1630,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -2070,8 +2073,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index e78430001f07..9df78e7caa2b 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <acpi/video.h>
#include "i915_drv.h"
#include "intel_acpi.h"
@@ -331,3 +332,29 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
*/
fwnode_handle_put(fwnode);
}
+
+void intel_acpi_video_register(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ acpi_video_register();
+
+ /*
+ * If i915 is driving an internal panel without registering its native
+ * backlight handler try to register the acpi_video backlight.
+ * For panels not driven by i915 another GPU driver may still register
+ * a native backlight later and acpi_video_register_backlight() should
+ * only be called after any native backlights have been registered.
+ */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_panel *panel = &to_intel_connector(connector)->panel;
+
+ if (panel->backlight.funcs && !panel->backlight.device) {
+ acpi_video_register_backlight();
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 4a760a2baed9..6a0007452f95 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -14,6 +14,7 @@ void intel_unregister_dsm_handler(void);
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
void intel_acpi_device_id_update(struct drm_i915_private *i915);
void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
+void intel_acpi_video_register(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
@@ -23,6 +24,8 @@ static inline
void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
static inline
void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_video_register(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 40da7910f845..18f0a5ae3bac 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -32,7 +32,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -63,9 +62,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property)
+ if (property == dev_priv->display.properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->broadcast_rgb_property)
+ else if (property == dev_priv->display.properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
drm_dbg_atomic(&dev_priv->drm,
@@ -96,12 +95,12 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property) {
+ if (property == dev_priv->display.properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->broadcast_rgb_property) {
+ if (property == dev_priv->display.properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index efe8591619e3..aaa6708256d5 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "gt/intel_rps.h"
@@ -43,9 +42,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_pm.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
+#include "skl_watermark.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 6c9ee905f132..aacbc6da84ef 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -393,7 +393,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@@ -441,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -496,7 +496,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Disable timestamps */
tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
@@ -514,7 +514,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@@ -532,7 +532,7 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bpp = crtc_state->dsc.compressed_bpp;
- cdclk = i915->cdclk.hw.cdclk;
+ cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
@@ -639,7 +639,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
@@ -677,7 +677,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
@@ -814,7 +814,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@@ -838,17 +838,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_enable(encoder,
- crtc_state,
- conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->audio.encoder_map[pipe] = encoder;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -878,7 +878,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_connector *connector = old_conn_state->connector;
enum port port = encoder->port;
@@ -891,15 +891,15 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name, pipe_name(pipe));
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_disable(encoder,
- old_crtc_state,
- old_conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = NULL;
- dev_priv->audio.encoder_map[pipe] = NULL;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -935,13 +935,13 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->audio.funcs = &g4x_audio_funcs;
+ dev_priv->display.funcs.audio = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->audio.funcs = &hsw_audio_funcs;
+ dev_priv->display.funcs.audio = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
}
}
@@ -971,7 +971,7 @@ void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
struct aud_ts_cdclk_m_n aud_ts;
if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts);
+ get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
@@ -1046,13 +1046,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (dev_priv->audio.power_refcount++ == 0) {
+ if (dev_priv->display.audio.power_refcount++ == 0) {
if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -1073,7 +1073,7 @@ static void i915_audio_component_put_power(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--dev_priv->audio.power_refcount == 0)
+ if (--dev_priv->display.audio.power_refcount == 0)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
@@ -1119,7 +1119,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
- return dev_priv->cdclk.hw.cdclk;
+ return dev_priv->display.cdclk.hw.cdclk;
}
/*
@@ -1140,10 +1140,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
if (drm_WARN_ON(&dev_priv->drm,
- pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map)))
+ pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map)))
return NULL;
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@@ -1159,7 +1159,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
return NULL;
for_each_pipe(dev_priv, pipe) {
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@@ -1177,7 +1177,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1187,7 +1187,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
return 0;
cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
@@ -1206,7 +1206,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -1220,13 +1220,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
const u8 *eld;
int ret = -EINVAL;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
port_name(port));
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1238,7 +1238,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1273,7 +1273,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- dev_priv->audio.component = acomp;
+ dev_priv->display.audio.component = acomp;
drm_modeset_unlock_all(&dev_priv->drm);
return 0;
@@ -1288,14 +1288,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- dev_priv->audio.component = NULL;
+ dev_priv->display.audio.component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
- if (dev_priv->audio.power_refcount)
+ if (dev_priv->display.audio.power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
- dev_priv->audio.power_refcount);
+ dev_priv->display.audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1359,13 +1359,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- dev_priv->audio.freq_cntrl = aud_freq;
+ dev_priv->display.audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
intel_audio_cdclk_change_post(dev_priv);
- dev_priv->audio.component_registered = true;
+ dev_priv->display.audio.component_registered = true;
}
/**
@@ -1377,11 +1377,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->audio.component_registered)
+ if (!dev_priv->display.audio.component_registered)
return;
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
- dev_priv->audio.component_registered = false;
+ dev_priv->display.audio.component_registered = false;
}
/**
@@ -1403,7 +1403,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_audio_deinit(struct drm_i915_private *dev_priv)
{
- if ((dev_priv)->audio.lpe.platdev != NULL)
+ if (dev_priv->display.audio.lpe.platdev != NULL)
intel_lpe_audio_teardown(dev_priv);
else
i915_audio_component_cleanup(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 110fc98ec280..beba39a38c87 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -8,7 +8,10 @@
#include <linux/pwm.h>
#include <linux/string_helpers.h>
+#include <acpi/video.h>
+
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -16,6 +19,8 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#include "intel_pci_config.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
/**
* scale - scale values from one range to another
@@ -86,7 +91,7 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
return val;
if (dev_priv->params.invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -126,7 +131,7 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -303,7 +308,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -319,7 +324,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -463,14 +468,14 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
return;
}
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -813,11 +818,11 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -827,12 +832,12 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
@@ -860,7 +865,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -870,7 +875,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
@@ -950,6 +955,11 @@ int intel_backlight_device_register(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(&i915->drm, "Skipping intel_backlight registration\n");
+ return 0;
+ }
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
@@ -971,26 +981,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name)
return -ENOMEM;
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
}
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
drm_err(&i915->drm,
@@ -1113,7 +1121,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_PINEVIEW(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1131,7 +1139,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_G4X(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1591,11 +1599,11 @@ void intel_backlight_update(struct intel_atomic_state *state,
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
@@ -1605,7 +1613,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&dev_priv->drm,
"no backlight present per VBT, but present per quirk\n");
} else {
@@ -1620,9 +1628,9 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
@@ -1773,9 +1781,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
/* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
new file mode 100644
index 000000000000..50c1210f6d5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_REGS_H__
+#define __INTEL_BACKLIGHT_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
+
+/* Backlight control */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLM_HISTOGRAM_ENABLE (1 << 31)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
+#define BLC_PWM_CPU_CTL _MMIO(0x48254)
+
+#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
+
+/* BXT backlight register definition. */
+#define _BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define _BXT_BLC_PWM_FREQ1 0xC8254
+#define _BXT_BLC_PWM_DUTY1 0xC8258
+
+#define _BXT_BLC_PWM_CTL2 0xC8350
+#define _BXT_BLC_PWM_FREQ2 0xC8354
+#define _BXT_BLC_PWM_DUTY2 0xC8358
+
+#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
+#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
+#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
+
+/* Utility pin */
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
+
+#endif /* __INTEL_BACKLIGHT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 51dde5bfd956..28bdb936cd1f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -135,18 +135,6 @@ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
return block - bdb;
}
-/* size of the block excluding the header */
-static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id)
-{
- const void *block;
-
- block = find_raw_section(bdb, section_id);
- if (!block)
- return 0;
-
- return get_blocksize(block);
-}
-
struct bdb_block_entry {
struct list_head node;
enum bdb_block_id section_id;
@@ -159,7 +147,7 @@ find_section(struct drm_i915_private *i915,
{
struct bdb_block_entry *entry;
- list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
if (entry->section_id == section_id)
return entry->data + 3;
}
@@ -231,9 +219,14 @@ static bool validate_lfp_data_ptrs(const void *bdb,
{
int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size;
int data_block_size, lfp_data_size;
+ const void *data_block;
int i;
- data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA);
+ data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!data_block)
+ return false;
+
+ data_block_size = get_blocksize(data_block);
if (data_block_size == 0)
return false;
@@ -261,21 +254,6 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (16 * lfp_data_size > data_block_size)
return false;
- /*
- * Except for vlv/chv machines all real VBTs seem to have 6
- * unaccounted bytes in the fp_timing table. And it doesn't
- * appear to be a really intentional hole as the fp_timing
- * 0xffff terminator is always within those 6 missing bytes.
- */
- if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size &&
- fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
- return false;
-
- if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset ||
- ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
- ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
- return false;
-
/* make sure the table entries have uniform size */
for (i = 1; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size ||
@@ -289,6 +267,23 @@ static bool validate_lfp_data_ptrs(const void *bdb,
return false;
}
+ /*
+ * Except for vlv/chv machines all real VBTs seem to have 6
+ * unaccounted bytes in the fp_timing table. And it doesn't
+ * appear to be a really intentional hole as the fp_timing
+ * 0xffff terminator is always within those 6 missing bytes.
+ */
+ if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size)
+ fp_timing_size += 6;
+
+ if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset ||
+ ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
+ ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
+ return false;
+
/* make sure the tables fit inside the data block */
for (i = 0; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size ||
@@ -300,6 +295,15 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size)
return false;
+ /* make sure fp_timing terminators are present at expected locations */
+ for (i = 0; i < 16; i++) {
+ const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset +
+ fp_timing_size - 2;
+
+ if (*t != 0xffff)
+ return false;
+ }
+
return true;
}
@@ -333,18 +337,6 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
return validate_lfp_data_ptrs(bdb, ptrs);
}
-static const void *find_fp_timing_terminator(const u8 *data, int size)
-{
- int i;
-
- for (i = 0; i < size - 1; i++) {
- if (data[i] == 0xff && data[i+1] == 0xff)
- return &data[i];
- }
-
- return NULL;
-}
-
static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
int table_size, int total_size)
{
@@ -368,11 +360,22 @@ static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
const void *bdb)
{
- int i, size, table_size, block_size, offset;
- const void *t0, *t1, *block;
+ int i, size, table_size, block_size, offset, fp_timing_size;
struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const void *block;
void *ptrs_block;
+ /*
+ * The hardcoded fp_timing_size is only valid for
+ * modernish VBTs. All older VBTs definitely should
+ * include block 41 and thus we don't need to
+ * generate one.
+ */
+ if (i915->display.vbt.version < 155)
+ return NULL;
+
+ fp_timing_size = 38;
+
block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
if (!block)
return NULL;
@@ -381,17 +384,8 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
block_size = get_blocksize(block);
- size = block_size;
- t0 = find_fp_timing_terminator(block, size);
- if (!t0)
- return NULL;
-
- size -= t0 - block - 2;
- t1 = find_fp_timing_terminator(t0 + 2, size);
- if (!t1)
- return NULL;
-
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
if (size * 16 > block_size)
return NULL;
@@ -409,7 +403,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
table_size = sizeof(struct lvds_dvo_timing);
size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
- table_size = t0 - block + 2;
+ table_size = fp_timing_size;
size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
if (ptrs->ptr[0].fp_timing.table_size)
@@ -424,14 +418,14 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
return NULL;
}
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
for (i = 1; i < 16; i++) {
next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
}
- size = t1 - t0;
table_size = sizeof(struct lvds_lfp_panel_name);
if (16 * (size + table_size) <= block_size) {
@@ -479,6 +473,13 @@ init_bdb_block(struct drm_i915_private *i915,
block_size = get_blocksize(block);
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
if (!entry) {
@@ -501,7 +502,7 @@ init_bdb_block(struct drm_i915_private *i915,
return;
}
- list_add_tail(&entry->node, &i915->vbt.bdb_blocks);
+ list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
}
static void init_bdb_blocks(struct drm_i915_private *i915,
@@ -604,6 +605,19 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
+static void dump_pnp_id(struct drm_i915_private *i915,
+ const struct lvds_pnp_id *pnp_id,
+ const char *name)
+{
+ u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
+ char vend[4];
+
+ drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
+ name, drm_edid_decode_mfg_id(mfg_name, vend),
+ pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
+ pnp_id->mfg_week, pnp_id->mfg_year + 1990);
+}
+
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
@@ -655,6 +669,8 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
edid_id_nodate.mfg_week = 0;
edid_id_nodate.mfg_year = 0;
+ dump_pnp_id(i915, edid_id, "EDID");
+
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
@@ -861,6 +877,7 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -874,11 +891,18 @@ parse_lfp_data(struct drm_i915_private *i915,
if (!panel->vbt.lfp_lvds_vbt_mode)
parse_lfp_panel_dtd(i915, panel, data, ptrs);
+ pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
+ dump_pnp_id(i915, pnp_id, "Panel");
+
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
- if (i915->vbt.version >= 188) {
+ drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ (int)sizeof(tail->panel_name[0].name),
+ tail->panel_name[panel_type].name);
+
+ if (i915->display.vbt.version >= 188) {
panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
drm_dbg_kms(&i915->drm,
@@ -904,7 +928,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
* first on VBT >= 229, but still fall back to trying the old LFP
* block if that fails.
*/
- if (i915->vbt.version < 229)
+ if (i915->display.vbt.version < 229)
return;
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
@@ -1008,12 +1032,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (i915->vbt.version >= 191) {
+ if (i915->display.vbt.version >= 191) {
size_t exp_size;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->vbt.version >= 234)
+ else if (i915->display.vbt.version >= 234)
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
else
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
@@ -1030,14 +1054,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (i915->vbt.version >= 234) {
+ if (i915->display.vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -1134,37 +1158,37 @@ parse_general_features(struct drm_i915_private *i915)
if (!general)
return;
- i915->vbt.int_tv_support = general->int_tv_support;
+ i915->display.vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (i915->vbt.version >= 155 &&
+ if (i915->display.vbt.version >= 155 &&
(HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
- i915->vbt.int_crt_support = general->int_crt_support;
- i915->vbt.lvds_use_ssc = general->enable_ssc;
- i915->vbt.lvds_ssc_freq =
+ i915->display.vbt.int_crt_support = general->int_crt_support;
+ i915->display.vbt.lvds_use_ssc = general->enable_ssc;
+ i915->display.vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(i915, general->ssc_freq);
- i915->vbt.display_clock_mode = general->display_clock_mode;
- i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (i915->vbt.version >= 181) {
- i915->vbt.orientation = general->rotate_180 ?
+ i915->display.vbt.display_clock_mode = general->display_clock_mode;
+ i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (i915->display.vbt.version >= 181) {
+ i915->display.vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (i915->vbt.version >= 249 && general->afc_startup_config) {
- i915->vbt.override_afc_startup = true;
- i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
+ i915->display.vbt.override_afc_startup = true;
+ i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
}
drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- i915->vbt.int_tv_support,
- i915->vbt.int_crt_support,
- i915->vbt.lvds_use_ssc,
- i915->vbt.lvds_ssc_freq,
- i915->vbt.display_clock_mode,
- i915->vbt.fdi_rx_polarity_inverted);
+ i915->display.vbt.int_tv_support,
+ i915->display.vbt.int_crt_support,
+ i915->display.vbt.lvds_use_ssc,
+ i915->display.vbt.lvds_ssc_freq,
+ i915->display.vbt.display_clock_mode,
+ i915->display.vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -1190,7 +1214,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
return;
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
@@ -1214,7 +1238,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
mapping->slave_addr = child->slave_addr;
@@ -1265,7 +1289,7 @@ parse_driver_features(struct drm_i915_private *i915)
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -1278,10 +1302,10 @@ parse_driver_features(struct drm_i915_private *i915)
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (i915->vbt.version >= 134 &&
+ if (i915->display.vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
}
}
@@ -1295,7 +1319,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
if (!driver)
return;
- if (i915->vbt.version < 228) {
+ if (i915->display.vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
@@ -1328,7 +1352,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.vrr = true; /* matches Windows behaviour */
- if (i915->vbt.version < 228)
+ if (i915->display.vbt.version < 228)
return;
power = find_section(i915, BDB_LFP_POWER);
@@ -1354,10 +1378,10 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.drrs_type = DRRS_TYPE_NONE;
}
- if (i915->vbt.version >= 232)
+ if (i915->display.vbt.version >= 232)
panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
- if (i915->vbt.version >= 233)
+ if (i915->display.vbt.version >= 233)
panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
panel_type);
}
@@ -1393,7 +1417,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.pps = *edp_pps;
- if (i915->vbt.version >= 224) {
+ if (i915->display.vbt.version >= 224) {
panel->vbt.edp.rate =
edp->edp_fast_link_training_rate[panel_type] * 20;
} else {
@@ -1472,7 +1496,7 @@ parse_edp(struct drm_i915_private *i915,
break;
}
- if (i915->vbt.version >= 173) {
+ if (i915->display.vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
@@ -1488,7 +1512,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.drrs_msa_timing_delay =
panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
- if (i915->vbt.version >= 244)
+ if (i915->display.vbt.version >= 244)
panel->vbt.edp.max_link_rate =
edp->edp_max_port_link_rate[panel_type] * 20;
}
@@ -1520,7 +1544,7 @@ parse_psr(struct drm_i915_private *i915,
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (i915->vbt.version >= 205 &&
+ if (i915->display.vbt.version >= 205 &&
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
@@ -1566,7 +1590,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (i915->vbt.version >= 226) {
+ if (i915->display.vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = panel_bits(wakeup_time, panel_type, 2);
@@ -1596,7 +1620,9 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel,
enum port port)
{
- if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
+ if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
panel->vbt.dsi.cabc_ports = BIT(port);
@@ -1609,11 +1635,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break;
}
@@ -1625,12 +1651,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports =
- BIT(PORT_A) | BIT(PORT_C);
+ BIT(PORT_A) | BIT(port_bc);
break;
}
}
@@ -2051,7 +2077,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
u16 block_size;
int index;
- if (i915->vbt.version < 198)
+ if (i915->display.vbt.version < 198)
return;
params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
@@ -2071,7 +2097,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!child->compression_enable)
@@ -2205,7 +2231,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2254,7 +2280,7 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
@@ -2271,7 +2297,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2306,7 +2332,7 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
child->aux_channel = 0;
@@ -2418,7 +2444,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
@@ -2480,15 +2506,23 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 216)
+ if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
- if (devdata->i915->vbt.version >= 230)
+ if (devdata->i915->display.vbt.version >= 230)
return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
else
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
+static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 244)
+ return 0;
+
+ return devdata->child.dp_max_lane_count + 1;
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2544,7 +2578,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 158)
+ if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2552,7 +2586,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 204)
+ if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
switch (devdata->child.hdmi_max_data_rate) {
@@ -2661,7 +2695,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
return;
}
- if (i915->vbt.ports[port]) {
+ if (i915->display.vbt.ports[port]) {
drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -2676,7 +2710,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
if (intel_bios_encoder_supports_dp(devdata))
sanitize_aux_ch(devdata, port);
- i915->vbt.ports[port] = devdata;
+ i915->display.vbt.ports[port] = devdata;
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -2692,12 +2726,12 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
if (!has_ddi_port_info(i915))
return;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node)
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
parse_ddi_port(devdata);
for_each_port(port) {
- if (i915->vbt.ports[port])
- print_ddi_port(i915->vbt.ports[port], port);
+ if (i915->display.vbt.ports[port])
+ print_ddi_port(i915->display.vbt.ports[port], port);
}
}
@@ -2730,33 +2764,33 @@ parse_general_definitions(struct drm_i915_private *i915)
bus_pin = defs->crt_ddc_gmbus_pin;
drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(i915, bus_pin))
- i915->vbt.crt_ddc_pin = bus_pin;
+ i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->vbt.version < 106) {
+ if (i915->display.vbt.version < 106) {
expected_size = 22;
- } else if (i915->vbt.version < 111) {
+ } else if (i915->display.vbt.version < 111) {
expected_size = 27;
- } else if (i915->vbt.version < 195) {
+ } else if (i915->display.vbt.version < 195) {
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->vbt.version == 195) {
+ } else if (i915->display.vbt.version == 195) {
expected_size = 37;
- } else if (i915->vbt.version <= 215) {
+ } else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 237) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
drm_dbg(&i915->drm,
"Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->vbt.version, expected_size);
+ i915->display.vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
drm_err(&i915->drm,
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->vbt.version);
+ defs->child_dev_size, expected_size, i915->display.vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
@@ -2792,10 +2826,10 @@ parse_general_definitions(struct drm_i915_private *i915)
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
}
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
drm_dbg_kms(&i915->drm,
"no child dev is parsed from VBT\n");
}
@@ -2804,25 +2838,25 @@ parse_general_definitions(struct drm_i915_private *i915)
static void
init_vbt_defaults(struct drm_i915_private *i915)
{
- i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
- i915->vbt.int_tv_support = 1;
- i915->vbt.int_crt_support = 1;
+ i915->display.vbt.int_tv_support = 1;
+ i915->display.vbt.int_crt_support = 1;
/* driver features */
- i915->vbt.int_lvds_support = 1;
+ i915->display.vbt.int_lvds_support = 1;
/* Default to using SSC */
- i915->vbt.lvds_use_ssc = 1;
+ i915->display.vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
- !HAS_PCH_SPLIT(i915));
+ i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+ !HAS_PCH_SPLIT(i915));
drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
- i915->vbt.lvds_ssc_freq);
+ i915->display.vbt.lvds_ssc_freq);
}
/* Common defaults which may be overridden by VBT. */
@@ -2883,7 +2917,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (port == PORT_A)
child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
drm_dbg_kms(&i915->drm,
"Generating default VBT child device with type 0x04%x on port %c\n",
@@ -2891,7 +2925,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
}
/* Bypass some minimum baseline VBT version checks */
- i915->vbt.version = 155;
+ i915->display.vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -3078,12 +3112,12 @@ err_unmap_oprom:
*/
void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = i915->opregion.vbt;
+ const struct vbt_header *vbt = i915->display.opregion.vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&i915->vbt.display_devices);
- INIT_LIST_HEAD(&i915->vbt.bdb_blocks);
+ INIT_LIST_HEAD(&i915->display.vbt.display_devices);
+ INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
if (!HAS_DISPLAY(i915)) {
drm_dbg_kms(&i915->drm,
@@ -3111,11 +3145,11 @@ void intel_bios_init(struct drm_i915_private *i915)
goto out;
bdb = get_bdb_header(vbt);
- i915->vbt.version = bdb->version;
+ i915->display.vbt.version = bdb->version;
drm_dbg_kms(&i915->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version);
+ (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
init_bdb_blocks(i915, bdb);
@@ -3172,13 +3206,13 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata, *nd;
struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
list_del(&entry->node);
kfree(entry);
}
@@ -3212,13 +3246,13 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (!i915->vbt.int_tv_support)
+ if (!i915->display.vbt.int_tv_support)
return false;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/*
@@ -3255,10 +3289,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -3285,7 +3319,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (i915->opregion.vbt)
+ if (i915->display.opregion.vbt)
return true;
}
@@ -3304,7 +3338,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
/**
@@ -3364,7 +3398,7 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
const struct child_device_config *child;
u8 dvo_port;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3463,7 +3497,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3494,7 +3528,7 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
if (drm_WARN_ON_ONCE(&i915->drm,
!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
@@ -3514,7 +3548,7 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
@@ -3530,7 +3564,7 @@ bool
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return devdata && devdata->child.lane_reversal;
}
@@ -3538,7 +3572,7 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
if (!devdata || !devdata->child.aux_channel) {
@@ -3576,7 +3610,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_D_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC3;
@@ -3586,7 +3620,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_E_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC4;
@@ -3594,25 +3628,25 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC1;
else
aux_ch = AUX_CH_F;
break;
case DP_AUX_G:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC2;
else
aux_ch = AUX_CH_G;
break;
case DP_AUX_H:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC3;
else
aux_ch = AUX_CH_H;
break;
case DP_AUX_I:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC4;
else
aux_ch = AUX_CH_I;
@@ -3632,7 +3666,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_max_tmds_clock(devdata);
}
@@ -3641,14 +3675,14 @@ int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_hdmi_level_shift(devdata);
}
int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.dp_iboost_level);
@@ -3656,7 +3690,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.hdmi_iboost_level);
@@ -3665,15 +3699,23 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_dp_max_link_rate(devdata);
}
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_dp_max_lane_count(devdata);
+}
+
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
if (!devdata || !devdata->child.ddc_pin)
return 0;
@@ -3683,16 +3725,16 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c;
+ return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 209 && devdata->child.tbt;
+ return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e47582b0de0a..e375405a7828 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -258,6 +258,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 79269d2c476b..4ace026b29bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,15 +5,17 @@
#include <drm/drm_atomic_state_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -137,6 +139,42 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return 0;
}
+static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp, int point)
+{
+ u32 val, val2;
+ u16 dclk;
+
+ val = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
+ val2 = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
+ dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
+ sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
+ sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
+
+ sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
+ sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int
+intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return mtl_read_qgv_point_info(dev_priv, sp, point);
+ else if (IS_DG1(dev_priv))
+ return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ else
+ return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi,
bool is_y_tile)
@@ -147,7 +185,32 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = 4;
+ break;
+ default:
+ MISSING_CASE(dram_info->type);
+ return -EINVAL;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -181,7 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
@@ -193,11 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- if (IS_DG1(dev_priv))
- ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i);
- else
- ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
-
+ ret = intel_read_qgv_point_info(dev_priv, sp, i);
if (ret)
return ret;
@@ -284,6 +343,13 @@ static const struct intel_sa_info adlp_sa_info = {
.derating = 20,
};
+static const struct intel_sa_info mtl_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -292,7 +358,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -308,7 +374,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
int clpchgroup;
int j;
@@ -346,9 +412,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -363,7 +429,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -399,20 +465,22 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
- if (i < num_groups - 1)
- bi_next = &dev_priv->max_bw[i + 1];
-
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
- if (i < num_groups - 1 && clpchgroup < clperchgroup)
- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
- else
- bi_next->num_planes = 0;
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->display.bw.max[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
@@ -456,9 +524,9 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -466,7 +534,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
static void dg2_get_bw_info(struct drm_i915_private *i915)
{
unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->max_bw);
+ int num_groups = ARRAY_SIZE(i915->display.bw.max);
int i;
/*
@@ -477,7 +545,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->max_bw[i];
+ struct intel_bw_info *bi = &i915->display.bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -485,7 +553,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}
static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
@@ -498,9 +566,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -526,9 +594,9 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -541,14 +609,14 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
return bi->deratedbw[qgv_point];
}
- return dev_priv->max_bw[0].deratedbw[qgv_point];
+ return dev_priv->display.bw.max[0].deratedbw[qgv_point];
}
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
+ &dev_priv->display.bw.max[0];
return bi->psf_bw[psf_gv_point];
}
@@ -558,7 +626,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14)
+ tgl_get_bw_info(dev_priv, &mtl_sa_info);
+ else if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_P(dev_priv))
tgl_get_bw_info(dev_priv, &adlp_sa_info);
@@ -667,7 +737,7 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -678,7 +748,7 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -689,7 +759,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -896,8 +966,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
- unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -970,8 +1040,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
int i, ret;
u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
- unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
- unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
bool changed = false;
/* FIXME earlier gens need some checks too */
@@ -1126,7 +1196,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
&state->base, &intel_bw_funcs);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 6e80162632dd..ed05070b7307 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -79,26 +79,26 @@ struct intel_cdclk_funcs {
void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
+ dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config);
}
static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
+ dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe);
}
static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_config)
{
- return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
+ return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config);
}
static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
int cdclk)
{
- return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
+ return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk);
}
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
@@ -548,7 +548,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
else
default_credits = PFI_CREDIT(8);
- if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_63;
@@ -1026,7 +1026,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(&dev_priv->drm, "DPLL0 not locked\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
/* We'll want to keep using the current vco from now on. */
skl_set_preferred_cdclk_vco(dev_priv, vco);
@@ -1040,7 +1040,7 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
@@ -1049,7 +1049,7 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
switch (cdclk) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 308571:
@@ -1098,13 +1098,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco) {
+ if (dev_priv->display.cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
@@ -1116,7 +1116,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
intel_de_posting_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
@@ -1151,11 +1151,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1166,7 +1166,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
*/
cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+ skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
@@ -1175,9 +1175,9 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1186,19 +1186,19 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
skl_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0) {
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0) {
/*
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
if (dev_priv->skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
return;
}
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
@@ -1211,7 +1211,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1352,35 +1352,35 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = {
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
drm_WARN(&dev_priv->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ min_cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
- return dev_priv->cdclk.hw.ref * table[i].ratio;
+ return dev_priv->display.cdclk.hw.ref * table[i].ratio;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
@@ -1554,12 +1554,12 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
@@ -1571,7 +1571,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@@ -1583,12 +1583,12 @@ static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
val = ICL_CDCLK_PLL_RATIO(ratio);
@@ -1601,12 +1601,12 @@ static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
/* Write PLL ratio without disabling */
@@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
val &= ~BXT_DE_PLL_FREQ_REQ;
intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1655,7 +1655,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 2:
@@ -1672,19 +1672,19 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
return table[i].waveform;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0xffff;
}
@@ -1721,22 +1721,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
- if (dev_priv->cdclk.hw.vco != vco)
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_enable(dev_priv, vco);
} else {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_enable(dev_priv, vco);
}
@@ -1803,7 +1803,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
+ dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1812,10 +1812,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, clock, vco;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1833,32 +1833,32 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
/* Make sure this is a legal cdclk value for the platform */
- cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
- if (cdclk != dev_priv->cdclk.hw.cdclk)
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
+ if (cdclk != dev_priv->display.cdclk.hw.cdclk)
goto sanitize;
/* Make sure the VCO is correct for the cdclk */
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- if (vco != dev_priv->cdclk.hw.vco)
+ if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
if (has_cdclk_squasher(dev_priv))
- clock = dev_priv->cdclk.hw.vco / 2;
+ clock = dev_priv->display.cdclk.hw.vco / 2;
else
- clock = dev_priv->cdclk.hw.cdclk;
+ clock = dev_priv->display.cdclk.hw.cdclk;
expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->cdclk.hw.cdclk >= 500000)
+ dev_priv->display.cdclk.hw.cdclk >= 500000)
expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (cdctl == expected)
@@ -1869,10 +1869,10 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1881,11 +1881,11 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
bxt_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0)
return;
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
/*
* FIXME:
@@ -1902,7 +1902,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1916,7 +1916,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
* intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
- * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and
* sanitizing the state of the hardware if needed. This is generally done only
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
@@ -2077,10 +2077,10 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
+ if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
@@ -2098,12 +2098,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
* functions use cdclk. Not all platforms/ports do,
* but we'll lock them all for simplicity.
*/
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&dev_priv->display.gmbus.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
- &dev_priv->gmbus_mutex);
+ &dev_priv->display.gmbus.mutex);
}
intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
@@ -2113,7 +2113,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
mutex_unlock(&intel_dp->aux.hw_mutex);
}
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&dev_priv->display.gmbus.mutex);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2124,9 +2124,9 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_audio_cdclk_change_post(dev_priv);
if (drm_WARN(&dev_priv->drm,
- intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]");
intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]");
}
}
@@ -2300,7 +2300,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
/*
- * HACK. Currently for TGL platforms we calculate
+ * HACK. Currently for TGL/DG2 platforms we calculate
* min_cdclk initially based on pixel_rate divided
* by 2, accounting for also plane requirements,
* however in some cases the lowest possible CDCLK
@@ -2308,14 +2308,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* Explicitly stating here that this seems to be currently
* rather a Hack, than final solution.
*/
- if (IS_TIGERLAKE(dev_priv)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
/*
* Clamp to max_cdclk_freq in case pixel rate is higher,
* in order not to break an 8K, but still leave W/A at place.
*/
min_cdclk = max_t(int, min_cdclk,
min_t(int, crtc_state->pixel_rate,
- dev_priv->max_cdclk_freq));
+ dev_priv->display.cdclk.max_cdclk_freq));
}
return min_cdclk;
@@ -2368,10 +2368,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
for_each_pipe(dev_priv, pipe)
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- if (min_cdclk > dev_priv->max_cdclk_freq) {
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm,
"required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ min_cdclk, dev_priv->display.cdclk.max_cdclk_freq);
return -EINVAL;
}
@@ -2643,7 +2643,7 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *cdclk_state;
- cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj);
if (IS_ERR(cdclk_state))
return ERR_CAST(cdclk_state);
@@ -2693,7 +2693,7 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv)
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -2799,7 +2799,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
- int max_cdclk_freq = dev_priv->max_cdclk_freq;
+ int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq;
if (DISPLAY_VER(dev_priv) >= 10)
return 2 * max_cdclk_freq;
@@ -2825,19 +2825,19 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (IS_JSL_EHL(dev_priv)) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 552000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 552000;
else
- dev_priv->max_cdclk_freq = 556800;
+ dev_priv->display.cdclk.max_cdclk_freq = 556800;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 648000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 648000;
else
- dev_priv->max_cdclk_freq = 652800;
+ dev_priv->display.cdclk.max_cdclk_freq = 652800;
} else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->max_cdclk_freq = 316800;
+ dev_priv->display.cdclk.max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
- dev_priv->max_cdclk_freq = 624000;
+ dev_priv->display.cdclk.max_cdclk_freq = 624000;
} else if (DISPLAY_VER(dev_priv) == 9) {
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -2859,7 +2859,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
else
max_cdclk = 308571;
- dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
@@ -2868,26 +2868,26 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* available? PCI ID, VTB, something else?
*/
if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULT(dev_priv))
- dev_priv->max_cdclk_freq = 540000;
+ dev_priv->display.cdclk.max_cdclk_freq = 540000;
else
- dev_priv->max_cdclk_freq = 675000;
+ dev_priv->display.cdclk.max_cdclk_freq = 675000;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 320000;
+ dev_priv->display.cdclk.max_cdclk_freq = 320000;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 400000;
+ dev_priv->display.cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
- dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+ dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
dev_priv->max_dotclk_freq);
@@ -2901,7 +2901,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2911,7 +2911,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_de_write(dev_priv, GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000));
}
static int dg1_rawclk(struct drm_i915_private *dev_priv)
@@ -3036,6 +3036,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
freq = dg1_rawclk(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ /*
+ * MTL always uses a 38.4 MHz rawclk. The bspec tells us
+ * "RAWCLK_FREQ defaults to the values for 38.4 and does
+ * not need to be programmed."
+ */
+ freq = 38400;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -3187,78 +3194,78 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = dg2_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- dev_priv->cdclk.table = adlp_a_step_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
else
- dev_priv->cdclk.table = adlp_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = rkl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_JSL_EHL(dev_priv)) {
- dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->cdclk_funcs = &icl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &icl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
- dev_priv->cdclk.table = glk_cdclk_table;
+ dev_priv->display.cdclk.table = glk_cdclk_table;
else
- dev_priv->cdclk.table = bxt_cdclk_table;
+ dev_priv->display.cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->cdclk_funcs = &skl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &chv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_IRONLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs;
} else if (IS_GM45(dev_priv)) {
- dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs;
} else if (IS_G45(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I965GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs;
} else if (IS_I965G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs;
} else if (IS_G33(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I945GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs;
} else if (IS_I945G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_I915GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs;
} else if (IS_I915G(dev_priv)) {
- dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs;
} else if (IS_I865G(dev_priv)) {
- dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs;
} else if (IS_I85X(dev_priv)) {
- dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs;
} else if (IS_I845G(dev_priv)) {
- dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs;
} else if (IS_I830(dev_priv)) {
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
- if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
+ if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk,
"Unknown platform. Assuming i830\n"))
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index b535cf6a7d9e..c674879a84a5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -77,9 +77,9 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
#define intel_atomic_get_old_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
int intel_cdclk_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 9583d17e858d..6bda4274eae9 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -26,6 +26,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
+#include "intel_dsb.h"
#include "vlv_dsi_pll.h"
struct intel_color_funcs {
@@ -1167,22 +1168,22 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->load_luts(crtc_state);
+ dev_priv->display.funcs.color->load_luts(crtc_state);
}
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->color_commit_noarm)
- dev_priv->color_funcs->color_commit_noarm(crtc_state);
+ if (dev_priv->display.funcs.color->color_commit_noarm)
+ dev_priv->display.funcs.color->color_commit_noarm(crtc_state);
}
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->color_commit_arm(crtc_state);
+ dev_priv->display.funcs.color->color_commit_arm(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1238,15 +1239,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- return dev_priv->color_funcs->color_check(crtc_state);
+ return dev_priv->display.funcs.color->color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->read_luts)
- dev_priv->color_funcs->read_luts(crtc_state);
+ if (dev_priv->display.funcs.color->read_luts)
+ dev_priv->display.funcs.color->read_luts(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -2225,28 +2226,28 @@ void intel_color_init(struct intel_crtc *crtc)
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->color_funcs = &chv_color_funcs;
+ dev_priv->display.funcs.color = &chv_color_funcs;
} else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->color_funcs = &i965_color_funcs;
+ dev_priv->display.funcs.color = &i965_color_funcs;
} else {
- dev_priv->color_funcs = &i9xx_color_funcs;
+ dev_priv->display.funcs.color = &i9xx_color_funcs;
}
} else {
if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->color_funcs = &icl_color_funcs;
+ dev_priv->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 10)
- dev_priv->color_funcs = &glk_color_funcs;
+ dev_priv->display.funcs.color = &glk_color_funcs;
else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->color_funcs = &skl_color_funcs;
+ dev_priv->display.funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 8)
- dev_priv->color_funcs = &bdw_color_funcs;
+ dev_priv->display.funcs.color = &bdw_color_funcs;
else if (DISPLAY_VER(dev_priv) == 7) {
if (IS_HASWELL(dev_priv))
- dev_priv->color_funcs = &hsw_color_funcs;
+ dev_priv->display.funcs.color = &hsw_color_funcs;
else
- dev_priv->color_funcs = &ivb_color_funcs;
+ dev_priv->display.funcs.color = &ivb_color_funcs;
} else
- dev_priv->color_funcs = &ilk_color_funcs;
+ dev_priv->display.funcs.color = &ilk_color_funcs;
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1dcc268927a2..6d5cbeb8df4d 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -229,7 +229,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->force_audio_property;
+ prop = dev_priv->display.properties.force_audio;
if (prop == NULL) {
prop = drm_property_create_enum(dev, 0,
"audio",
@@ -238,7 +238,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->force_audio_property = prop;
+ dev_priv->display.properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -256,7 +256,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->broadcast_rgb_property;
+ prop = dev_priv->display.properties.broadcast_rgb;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
@@ -265,7 +265,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->broadcast_rgb_property = prop;
+ dev_priv->display.properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 6a3893c8ff22..4a8ff2f97608 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -46,6 +46,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -444,6 +445,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* FDI must always be 2.7 GHz */
pipe_config->port_clock = 135000 * 2;
+ adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+
return 0;
}
@@ -643,9 +646,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct i2c_adapter *i2c;
bool ret = false;
- BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
-
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@@ -931,7 +932,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev_priv))
goto out;
@@ -1110,8 +1111,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = intel_de_read(dev_priv,
- FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->display.fdi.rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 4442aa355f86..6792a9056f46 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank_work.h>
#include "i915_irq.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4ca6e9493ff2..e9212f69c360 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -134,8 +134,8 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
drm_dbg_kms(&i915->drm,
"\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
@@ -262,10 +262,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (DISPLAY_VER(i915) >= 9)
drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index c2797ad2d313..87899e89b3a7 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
#include "intel_atomic.h"
@@ -20,9 +19,9 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
@@ -144,8 +143,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
}
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2330604b0bcc..da8472cdc135 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -57,6 +57,7 @@
#include "intel_lspcon.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
@@ -323,28 +324,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
-{
- int dotclock;
-
- if (intel_crtc_has_dp_encoder(pipe_config))
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
- dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
- else
- dotclock = pipe_config->port_clock;
-
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- !intel_crtc_has_dp_encoder(pipe_config))
- dotclock *= 2;
-
- if (pipe_config->pixel_multiplier)
- dotclock /= pipe_config->pixel_multiplier;
-
- return dotclock;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
/* CRT dotclock is determined via other means */
@@ -631,7 +610,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(&dev_priv->drm,
"Quirk Increase DDI disabled time\n");
@@ -1425,7 +1404,7 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
@@ -1435,17 +1414,17 @@ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
*/
intel_de_rmw(i915, reg, clk_off, 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, 0, clk_off);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
@@ -1720,12 +1699,12 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
intel_de_write(i915, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
@@ -1734,12 +1713,12 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
@@ -1824,7 +1803,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
@@ -1832,7 +1811,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
@@ -1840,12 +1819,12 @@ static void skl_ddi_disable_clock(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
@@ -2691,10 +2670,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
@@ -2862,6 +2845,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
@@ -2919,8 +2904,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* On ADL_P the PHY link rate and lane count must be programmed but
* these are both 0 for HDMI.
*/
- intel_de_write(dev_priv, DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -3611,10 +3600,22 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(crtc_state))
- return intel_dp_initial_fastset_check(encoder, crtc_state);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool fastset = true;
- return true;
+ if (intel_phy_is_tc(i915, phy)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ !intel_dp_initial_fastset_check(encoder, crtc_state))
+ fastset = false;
+
+ return fastset;
}
static enum intel_output_type
@@ -4028,7 +4029,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -4036,7 +4037,7 @@ static bool lpt_digital_port_connected(struct intel_encoder *encoder)
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
@@ -4044,7 +4045,7 @@ static bool hsw_digital_port_connected(struct intel_encoder *encoder)
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index fc5d94862ef3..dd008ba8afe3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -92,6 +91,7 @@
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_dpt.h"
+#include "intel_dsb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
@@ -118,6 +118,7 @@
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -164,16 +165,16 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
*/
void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- if (dev_priv->wm_disp->update_wm)
- dev_priv->wm_disp->update_wm(dev_priv);
+ if (dev_priv->display.funcs.wm->update_wm)
+ dev_priv->display.funcs.wm->update_wm(dev_priv);
}
static int intel_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_pipe_wm)
- return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ if (dev_priv->display.funcs.wm->compute_pipe_wm)
+ return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
return 0;
}
@@ -181,20 +182,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->wm_disp->compute_intermediate_wm)
+ if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
return 0;
if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->wm_disp->compute_pipe_wm))
+ !dev_priv->display.funcs.wm->compute_pipe_wm))
return 0;
- return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
+ return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
}
static bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->initial_watermarks) {
- dev_priv->wm_disp->initial_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->initial_watermarks) {
+ dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
return true;
}
return false;
@@ -204,23 +205,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->atomic_update_watermarks)
- dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->atomic_update_watermarks)
+ dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
}
static void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->optimize_watermarks)
- dev_priv->wm_disp->optimize_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->optimize_watermarks)
+ dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
}
static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_global_watermarks)
- return dev_priv->wm_disp->compute_global_watermarks(state);
+ if (dev_priv->display.funcs.wm->compute_global_watermarks)
+ return dev_priv->display.funcs.wm->compute_global_watermarks(state);
return 0;
}
@@ -619,7 +620,10 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
+ FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+ else if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -671,7 +675,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
return DISPLAY_VER(dev_priv) < 4 ||
(plane->fbc &&
- plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
+ plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL);
}
/*
@@ -1487,7 +1491,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->dpll.mgr) {
+ if (i915->display.dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -1839,7 +1843,9 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
+ CHICKEN_TRANS(transcoder);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -2081,22 +2087,20 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
- else if (IS_DG2(dev_priv))
- /*
- * DG2 outputs labelled as "combo PHY" in the bspec use
- * SNPS PHYs with completely different programming,
- * hence we always return false here.
- */
- return false;
else if (IS_ALDERLAKE_S(dev_priv))
return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_JSL_EHL(dev_priv))
return phy <= PHY_C;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
return phy <= PHY_B;
else
+ /*
+ * DG2 outputs labelled as "combo PHY" in the bspec use
+ * SNPS PHYs with completely different programming,
+ * hence we always return false here.
+ */
return false;
}
@@ -2402,7 +2406,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->wm_disp->initial_watermarks)
+ if (!dev_priv->display.funcs.wm->initial_watermarks)
intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
@@ -2661,7 +2665,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->max_cdclk_freq * 9 / 10;
+ clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2693,6 +2697,10 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_crtc_compute_pipe_src(crtc_state);
if (ret)
return ret;
@@ -2719,19 +2727,11 @@ intel_reduce_m_n_ratio(u32 *num, u32 *den)
}
}
-static void compute_m_n(unsigned int m, unsigned int n,
- u32 *ret_m, u32 *ret_n,
- bool constant_n)
+static void compute_m_n(u32 *ret_m, u32 *ret_n,
+ u32 m, u32 n, u32 constant_n)
{
- /*
- * Several DP dongles in particular seem to be fussy about
- * too large link M/N values. Give N value as 0x8000 that
- * should be acceptable by specific devices. 0x8000 is the
- * specified fixed N value for asynchronous clock mode,
- * which the devices expect also in synchronous clock mode.
- */
if (constant_n)
- *ret_n = DP_LINK_CONSTANT_N_VALUE;
+ *ret_n = constant_n;
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -2743,22 +2743,28 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable)
+ bool fec_enable)
{
u32 data_clock = bits_per_pixel * pixel_clock;
if (fec_enable)
data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ /*
+ * Windows/BIOS uses fixed M/N values always. Follow suit.
+ *
+ * Also several DP dongles in particular seem to be fussy
+ * about too large link M/N values. Presumably the 20bit
+ * value used by Windows/BIOS is acceptable to everyone.
+ */
m_n->tu = 64;
- compute_m_n(data_clock,
- link_clock * nlanes * 8,
- &m_n->data_m, &m_n->data_n,
- constant_n);
+ compute_m_n(&m_n->data_m, &m_n->data_n,
+ data_clock, link_clock * nlanes * 8,
+ 0x8000000);
- compute_m_n(pixel_clock, link_clock,
- &m_n->link_m, &m_n->link_n,
- constant_n);
+ compute_m_n(&m_n->link_m, &m_n->link_n,
+ pixel_clock, link_clock,
+ 0x80000);
}
static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
@@ -2774,12 +2780,12 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
drm_dbg_kms(&dev_priv->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
+ dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -4127,7 +4133,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
+ MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
+ CHICKEN_TRANS(pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -4146,7 +4154,7 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display->get_pipe_config(crtc, crtc_state))
+ if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4375,7 +4383,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->vbt.lvds_ssc_freq;
+ return dev_priv->display.vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
else if (DISPLAY_VER(dev_priv) != 2)
@@ -4493,7 +4501,31 @@ int intel_dotclock_calculate(int link_freq,
if (!m_n->link_n)
return 0;
- return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
+ m_n->link_n);
+}
+
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
+ pipe_config->pipe_bpp);
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
+ dotclock *= 2;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ return dotclock;
}
/* Returns the currently programmed mode of the given encoder. */
@@ -4754,7 +4786,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->ipc_enabled)
+ skl_watermark_ipc_enabled(dev_priv))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -4800,10 +4832,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed) {
- ret = intel_dpll_crtc_compute_clock(state, crtc);
- if (ret)
- return ret;
-
ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
@@ -5368,46 +5396,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2)
}
static bool
-intel_compare_m_n(unsigned int m, unsigned int n,
- unsigned int m2, unsigned int n2,
- bool exact)
-{
- if (m == m2 && n == n2)
- return true;
-
- if (exact || !m || !n || !m2 || !n2)
- return false;
-
- BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
-
- if (n > n2) {
- while (n > n2) {
- m2 <<= 1;
- n2 <<= 1;
- }
- } else if (n < n2) {
- while (n < n2) {
- m <<= 1;
- n <<= 1;
- }
- }
-
- if (n != n2)
- return false;
-
- return intel_fuzzy_clock_check(m, m2);
-}
-
-static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
- const struct intel_link_m_n *m2_n2,
- bool exact)
+ const struct intel_link_m_n *m2_n2)
{
return m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->data_m, m_n->data_n,
- m2_n2->data_m, m2_n2->data_n, exact) &&
- intel_compare_m_n(m_n->link_m, m_n->link_n,
- m2_n2->link_m, m2_n2->link_n, exact);
+ m_n->data_m == m2_n2->data_m &&
+ m_n->data_n == m2_n2->data_n &&
+ m_n->link_m == m2_n2->link_m &&
+ m_n->link_n == m2_n2->link_n;
}
static bool
@@ -5601,8 +5597,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name,\
- !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
@@ -5649,9 +5644,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
*/
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name, !fastset) && \
+ &pipe_config->name) && \
!intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name, !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"or tu %i data %i/%i link %i/%i, " \
@@ -5686,16 +5681,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
- if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected %i, found %i)", \
- current_config->name, \
- pipe_config->name); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
@@ -5751,8 +5736,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
- PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (!fastset || !pipe_config->seamless_m_n)
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
PIPE_CONF_CHECK_M_N(dp_m2_n2);
@@ -5814,7 +5800,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_RECT(pch_pfit.dst);
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
- PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+ PIPE_CONF_CHECK_I(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
@@ -5841,7 +5827,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->dpll.mgr) {
+ if (dev_priv->display.dpll.mgr) {
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@@ -5884,9 +5870,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ if (!fastset || !pipe_config->seamless_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+ PIPE_CONF_CHECK_I(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -5928,7 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
-#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_CHECK_TIMINGS
#undef PIPE_CONF_CHECK_RECT
@@ -6050,20 +6037,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
}
}
-static void intel_modeset_clear_plls(struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_release_shared_dplls(state, crtc);
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6164,23 +6137,6 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->update_pipe = true;
}
-static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- /*
- * If we're not doing the full modeset we want to
- * keep the current M/N values as they may be
- * sufficiently different to the computed values
- * to cause problems.
- *
- * FIXME: should really copy more fuzzy state here
- */
- new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
- new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
- new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
- new_crtc_state->has_drrs = old_crtc_state->has_drrs;
-}
-
static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
u8 plane_ids_mask)
@@ -6837,9 +6793,11 @@ static int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
+ if (new_crtc_state->hw.enable) {
+ ret = intel_modeset_pipe_config_late(state, crtc);
+ if (ret)
+ goto fail;
+ }
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6890,15 +6848,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- any_ms = true;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- }
- if (!new_crtc_state->update_pipe)
- continue;
+ any_ms = true;
- intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
+ intel_release_shared_dplls(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6939,8 +6894,6 @@ static int intel_atomic_check(struct drm_device *dev,
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
-
- intel_modeset_clear_plls(state);
}
ret = intel_atomic_check_crtcs(state);
@@ -7059,6 +7012,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
+
+ if (new_crtc_state->seamless_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
}
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -7121,7 +7078,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display->crtc_enable(state, crtc);
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
return;
@@ -7200,7 +7157,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display->crtc_disable(state, crtc);
+ dev_priv->display.funcs.display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
@@ -7411,7 +7368,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
struct intel_atomic_state *state, *next;
struct llist_node *freed;
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
@@ -7419,7 +7376,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
intel_atomic_helper_free_state(dev_priv);
}
@@ -7532,6 +7489,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_atomic_commit_fence_wait(state);
drm_atomic_helper_wait_for_dependencies(&state->base);
+ drm_dp_mst_atomic_wait_for_dependencies(&state->base);
if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
@@ -7588,7 +7546,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display->commit_modeset_enables(state);
+ dev_priv->display.funcs.display->commit_modeset_enables(state);
intel_encoders_update_complete(state);
@@ -7711,7 +7669,7 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
case FENCE_FREE:
{
struct intel_atomic_helper *helper =
- &to_i915(state->base.dev)->atomic_helper;
+ &to_i915(state->base.dev)->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work);
@@ -7814,12 +7772,12 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->flip_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->modeset_wq);
+ flush_workqueue(dev_priv->display.wq.modeset);
intel_atomic_commit_tail(state);
}
@@ -7925,7 +7883,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->vbt.int_crt_support)
+ if (!dev_priv->display.vbt.int_crt_support)
return false;
return true;
@@ -8060,7 +8018,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
intel_crt_init(dev_priv);
/*
@@ -8319,7 +8277,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
-static const struct drm_i915_display_funcs skl_display_funcs = {
+static const struct intel_display_funcs skl_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8327,7 +8285,7 @@ static const struct drm_i915_display_funcs skl_display_funcs = {
.get_initial_plane_config = skl_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs ddi_display_funcs = {
+static const struct intel_display_funcs ddi_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8335,7 +8293,7 @@ static const struct drm_i915_display_funcs ddi_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs pch_split_display_funcs = {
+static const struct intel_display_funcs pch_split_display_funcs = {
.get_pipe_config = ilk_get_pipe_config,
.crtc_enable = ilk_crtc_enable,
.crtc_disable = ilk_crtc_disable,
@@ -8343,7 +8301,7 @@ static const struct drm_i915_display_funcs pch_split_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs vlv_display_funcs = {
+static const struct intel_display_funcs vlv_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = valleyview_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8351,7 +8309,7 @@ static const struct drm_i915_display_funcs vlv_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs i9xx_display_funcs = {
+static const struct intel_display_funcs i9xx_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = i9xx_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8374,16 +8332,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display = &skl_display_funcs;
+ dev_priv->display.funcs.display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display = &ddi_display_funcs;
+ dev_priv->display.funcs.display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display = &pch_split_display_funcs;
+ dev_priv->display.funcs.display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display = &vlv_display_funcs;
+ dev_priv->display.funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display = &i9xx_display_funcs;
+ dev_priv->display.funcs.display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
@@ -8396,11 +8354,11 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
+ cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
intel_update_cdclk(i915);
- intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+ intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -8456,7 +8414,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->wm_disp->optimize_watermarks)
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -8600,6 +8558,10 @@ out:
return ret;
}
+static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
static void intel_mode_config_init(struct drm_i915_private *i915)
{
struct drm_mode_config *mode_config = &i915->drm.mode_config;
@@ -8614,6 +8576,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->prefer_shadow = 1;
mode_config->funcs = &intel_mode_funcs;
+ mode_config->helper_private = &intel_mode_config_funcs;
mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
@@ -8683,11 +8646,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
intel_dmc_ucode_init(i915);
- i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
- i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
- WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
-
- i915->window2_delay = 0; /* No DSB so no window2 delay */
+ i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
intel_mode_config_init(i915);
@@ -8703,8 +8664,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->atomic_helper.free_list);
- INIT_WORK(&i915->atomic_helper.free_work,
+ init_llist_head(&i915->display.atomic_helper.free_list);
+ INIT_WORK(&i915->display.atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
intel_init_quirks(i915);
@@ -8764,7 +8725,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_hdcp_component_init(i915);
- if (i915->max_cdclk_freq == 0)
+ if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
/*
@@ -8828,7 +8789,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_hpd_init(i915);
intel_hpd_poll_disable(i915);
- intel_init_ipc(i915);
+ skl_watermark_ipc_init(i915);
return 0;
}
@@ -8959,7 +8920,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(i915, state, &ctx);
- intel_enable_ipc(i915);
+ skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -8994,11 +8955,18 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- flush_workqueue(i915->flip_wq);
- flush_workqueue(i915->modeset_wq);
+ flush_workqueue(i915->display.wq.flip);
+ flush_workqueue(i915->display.wq.modeset);
+
+ flush_work(&i915->display.atomic_helper.free_work);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
- flush_work(&i915->atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
}
/* part #2: call after irq uninstall */
@@ -9013,13 +8981,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /*
- * MST topology needs to be suspended so we don't have any calls to
- * fbdev after it's finalized. MST will be destroyed later as part of
- * drm_mode_config_cleanup()
- */
- intel_dp_mst_suspend(i915);
-
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(i915);
@@ -9036,8 +8997,8 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
intel_gmbus_teardown(i915);
- destroy_workqueue(i915->flip_wq);
- destroy_workqueue(i915->modeset_wq);
+ destroy_workqueue(i915->display.wq.flip);
+ destroy_workqueue(i915->display.wq.modeset);
intel_fbc_cleanup(i915);
}
@@ -9084,7 +9045,7 @@ void intel_display_driver_register(struct drm_i915_private *i915)
/* Must be done after probing outputs */
intel_opregion_register(i915);
- acpi_video_register();
+ intel_acpi_video_register(i915);
intel_audio_init(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index fa5371036239..884e8e67b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -45,7 +45,7 @@ struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_address_space;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@@ -375,7 +375,7 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p))
+ for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for_each_pipe(__dev_priv, __p) \
@@ -383,7 +383,7 @@ enum hpd_pin {
#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t))
+ for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
for_each_cpu_transcoder(__dev_priv, __t) \
@@ -547,7 +547,7 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable);
+ bool fec_enable);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
new file mode 100644
index 000000000000..96cf994b0ad1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_CORE_H__
+#define __INTEL_DISPLAY_CORE_H__
+
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_connector.h>
+
+#include "intel_cdclk.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dmc.h"
+#include "intel_dpll_mgr.h"
+#include "intel_fbc.h"
+#include "intel_global_state.h"
+#include "intel_gmbus.h"
+#include "intel_opregion.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct drm_property;
+struct i915_audio_component;
+struct i915_hdcp_comp_master;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_bios_encoder_data;
+struct intel_cdclk_funcs;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dpll_funcs;
+struct intel_dpll_mgr;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_overlay;
+
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
+/* Amount of PSF GV points, BSpec precisely defines this */
+#define I915_NUM_PSF_GV_POINTS 3
+
+struct intel_display_funcs {
+ /*
+ * Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state.
+ */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
+/* functions used for watermark calcs for display. */
+struct intel_wm_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
+ int (*compute_pipe_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_intermediate_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
+};
+
+struct intel_audio {
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
+/*
+ * dpll and cdclk state is protected by connection_mutex dpll.lock serializes
+ * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
+ * dpll, because on some platforms plls share registers.
+ */
+struct intel_dpll {
+ struct mutex lock;
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
+
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+};
+
+struct intel_frontbuffer_tracking {
+ spinlock_t lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
+struct intel_hotplug {
+ struct delayed_work hotplug_work;
+
+ const u32 *hpd, *pch_hpd;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ u32 retry_bits;
+ struct delayed_work reenable_work;
+
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
+ unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
+struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ enum drm_panel_orientation orientation;
+
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
+ int crt_ddc_pin;
+
+ struct list_head display_devices;
+ struct list_head bdb_blocks;
+
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
+ struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+ } sdvo_mappings[2];
+};
+
+struct intel_wm {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ u16 pri_latency[5];
+ /* sprite */
+ u16 spr_latency[5];
+ /* cursor */
+ u16 cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ u16 skl_latency[8];
+
+ /* current hardware state */
+ union {
+ struct ilk_wm_values hw;
+ struct vlv_wm_values vlv;
+ struct g4x_wm_values g4x;
+ };
+
+ u8 max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * crtc_state->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
+
+ bool ipc_enabled;
+};
+
+struct intel_display {
+ /* Display functions */
+ struct {
+ /* Top level crtc-ish functions */
+ const struct intel_display_funcs *display;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk;
+
+ /* Display pll funcs */
+ const struct intel_dpll_funcs *dpll;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug;
+
+ /* pm display functions */
+ const struct intel_wm_funcs *wm;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio;
+ } funcs;
+
+ /* Grouping using anonymous structs. Keep sorted. */
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
+ struct {
+ /* backlight registers and fields in struct intel_panel */
+ struct mutex lock;
+ } backlight;
+
+ struct {
+ struct intel_global_obj obj;
+
+ struct intel_bw_info {
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
+ /* for each PSF GV point */
+ unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+ u8 num_planes;
+ } max[6];
+ } bw;
+
+ struct {
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
+
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
+ struct intel_global_obj obj;
+
+ unsigned int max_cdclk_freq;
+ } cdclk;
+
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
+ struct {
+ /* VLV/CHV/BXT/GLK DSI MMIO register base address */
+ u32 mmio_base;
+ } dsi;
+
+ struct {
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+ struct work_struct suspend_work;
+ } fbdev;
+
+ struct {
+ unsigned int pll_freq;
+ u32 rx_config;
+ } fdi;
+
+ struct {
+ /*
+ * Base address of where the gmbus and gpio blocks are located
+ * (either on PCH or on SoC for platforms without PCH).
+ */
+ u32 mmio_base;
+
+ /*
+ * gmbus.mutex protects against concurrent usage of the single
+ * hw gmbus controller on different i2c buses.
+ */
+ struct mutex mutex;
+
+ struct intel_gmbus *bus[GMBUS_NUM_PINS];
+
+ wait_queue_head_t wait_queue;
+ } gmbus;
+
+ struct {
+ struct i915_hdcp_comp_master *master;
+ bool comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex comp_mutex;
+ } hdcp;
+
+ struct {
+ struct i915_power_domains domains;
+
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
+ u32 chv_phy_control;
+
+ /* perform PHY state sanity checks? */
+ bool chv_phy_assert[2];
+ } power;
+
+ struct {
+ u32 mmio_base;
+
+ /* protects panel power sequencer state */
+ struct mutex mutex;
+ } pps;
+
+ struct {
+ struct drm_property *broadcast_rgb;
+ struct drm_property *force_audio;
+ } properties;
+
+ struct {
+ unsigned long mask;
+ } quirks;
+
+ struct {
+ enum {
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } status;
+
+ u32 block_time_us;
+ } sagv;
+
+ struct {
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset;
+
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip;
+ } wq;
+
+ /* Grouping using named structs. Keep sorted. */
+ struct intel_audio audio;
+ struct intel_dmc dmc;
+ struct intel_dpll dpll;
+ struct intel_fbc *fbc[I915_MAX_FBCS];
+ struct intel_frontbuffer_tracking fb_tracking;
+ struct intel_hotplug hotplug;
+ struct intel_opregion opregion;
+ struct intel_overlay *overlay;
+ struct intel_vbt_data vbt;
+ struct intel_wm wm;
+};
+
+#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 6c3954479047..7c7253a2541c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -26,6 +26,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -37,10 +38,10 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
+ dev_priv->display.fb_tracking.busy_bits);
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
+ dev_priv->display.fb_tracking.flip_bits);
return 0;
}
@@ -103,7 +104,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
@@ -113,7 +115,8 @@ static int i915_opregion(struct seq_file *m, void *unused)
static int i915_vbt(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -129,7 +132,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -722,10 +725,11 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
/* Not all platformas have a scaler */
if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d",
num_scalers,
crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_id,
+ crtc_state->hw.scaling_filter);
for (i = 0; i < num_scalers; i++) {
const struct intel_scaler *sc =
@@ -932,11 +936,11 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
drm_modeset_lock_all(dev);
seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
- dev_priv->dpll.ref_clks.nssc,
- dev_priv->dpll.ref_clks.ssc);
+ dev_priv->display.dpll.ref_clks.nssc,
+ dev_priv->display.dpll.ref_clks.ssc);
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
@@ -979,58 +983,6 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- drm_info(&dev_priv->drm,
- "Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1427,9 +1379,9 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
wm_latency_show(m, latencies);
@@ -1442,9 +1394,9 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
wm_latency_show(m, latencies);
@@ -1457,9 +1409,9 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
wm_latency_show(m, latencies);
@@ -1550,9 +1502,9 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1565,9 +1517,9 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1580,9 +1532,9 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1617,14 +1569,14 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+ flush_work(&dev_priv->display.hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1639,7 +1591,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1678,7 +1630,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1702,7 +1654,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled));
+ str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1720,7 +1672,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
char *newline;
char tmp[16];
int i;
@@ -1756,7 +1708,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1907,7 +1859,6 @@ static const struct {
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
@@ -1931,6 +1882,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
+ skl_watermark_ipc_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -2137,7 +2089,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
.write = i915_dsc_fec_support_write
};
-static int i915_dsc_bpp_show(struct seq_file *m, void *data)
+static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_device *dev = connector->dev;
@@ -2160,14 +2112,14 @@ static int i915_dsc_bpp_show(struct seq_file *m, void *data)
}
crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
+ seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
-static ssize_t i915_dsc_bpp_write(struct file *file,
+static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
@@ -2175,33 +2127,32 @@ static ssize_t i915_dsc_bpp_write(struct file *file,
((struct seq_file *)file->private_data)->private;
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- int dsc_bpp = 0;
+ int dsc_bpc = 0;
int ret;
- ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
+ ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc);
if (ret < 0)
return ret;
- intel_dp->force_dsc_bpp = dsc_bpp;
+ intel_dp->force_dsc_bpc = dsc_bpc;
*offp += len;
return len;
}
-static int i915_dsc_bpp_open(struct inode *inode,
+static int i915_dsc_bpc_open(struct inode *inode,
struct file *file)
{
- return single_open(file, i915_dsc_bpp_show,
- inode->i_private);
+ return single_open(file, i915_dsc_bpc_show, inode->i_private);
}
-static const struct file_operations i915_dsc_bpp_fops = {
+static const struct file_operations i915_dsc_bpc_fops = {
.owner = THIS_MODULE,
- .open = i915_dsc_bpp_open,
+ .open = i915_dsc_bpc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .write = i915_dsc_bpp_write
+ .write = i915_dsc_bpc_write
};
/*
@@ -2271,8 +2222,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
- debugfs_create_file("i915_dsc_bpp", 0644, root,
- connector, &i915_dsc_bpp_fops);
+ debugfs_create_file("i915_dsc_bpc", 0644, root,
+ connector, &i915_dsc_bpc_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 589af257edeb..1e608b9e5055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_de.h"
@@ -18,8 +19,8 @@
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
@@ -243,7 +244,7 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains;
bool ret;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(dev_priv, domain);
@@ -268,7 +269,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
+ if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -291,7 +292,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
@@ -301,7 +302,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->dmc.target_dc_state)
+ if (state == dev_priv->display.dmc.target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->dmc.target_dc_state = state;
+ dev_priv->display.dmc.target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -339,7 +340,7 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
return !drm_WARN_ON(&i915->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
@@ -352,7 +353,7 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
@@ -375,7 +376,7 @@ static void print_power_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
@@ -390,7 +391,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
power_domains->async_put_wakeref);
@@ -445,7 +446,7 @@ static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -474,7 +475,7 @@ static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
@@ -501,7 +502,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
@@ -527,7 +528,7 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -563,7 +564,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
@@ -583,7 +584,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
static void __intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
__intel_display_power_put_domain(dev_priv, domain);
@@ -596,7 +597,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
@@ -610,7 +611,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -637,8 +638,8 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- power_domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ display.power.domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = 0;
@@ -698,7 +699,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -746,7 +747,7 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -779,7 +780,7 @@ out_verify:
static void
intel_display_power_flush_work_sync(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_display_power_flush_work(i915);
cancel_delayed_work_sync(&power_domains->async_put_work);
@@ -908,7 +909,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return 0;
if (IS_DG2(dev_priv))
- max_dc = 0;
+ max_dc = 1;
else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
@@ -976,15 +977,15 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
*/
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->dmc.allowed_dc_mask =
+ dev_priv->display.dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->dmc.target_dc_state =
+ dev_priv->display.dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1003,12 +1004,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- intel_display_power_map_cleanup(&dev_priv->power_domains);
+ intel_display_power_map_cleanup(&dev_priv->display.power.domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1037,7 +1038,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
@@ -1060,14 +1061,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
- dev_priv->dbuf.enabled_slices = req_slices;
+ dev_priv->display.dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- dev_priv->dbuf.enabled_slices =
+ dev_priv->display.dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
/*
@@ -1075,7 +1076,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
* figure out later which slices we have and what we need.
*/
gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
- dev_priv->dbuf.enabled_slices);
+ dev_priv->display.dbuf.enabled_slices);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -1101,7 +1102,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1309,7 +1310,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
}
/*
@@ -1381,6 +1382,9 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
+ if (DISPLAY_VER(dev_priv) >= 14)
+ reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
+
val = intel_de_read(dev_priv, reg);
if (enable)
@@ -1394,7 +1398,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1426,13 +1430,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1459,7 +1464,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1493,13 +1498,14 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1601,7 +1607,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
u32 val;
@@ -1668,13 +1674,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ intel_dmc_disable_program(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -1712,7 +1719,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->chv_phy_control =
+ dev_priv->display.power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1734,27 +1741,27 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
}
if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
@@ -1766,21 +1773,21 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
}
drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
@@ -1864,7 +1871,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
power_domains->initializing = true;
@@ -1905,8 +1912,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
/* Disable power support if the user asked so. */
if (!i915->params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->power_domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(i915);
@@ -1927,12 +1934,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -1954,7 +1961,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
*/
void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1988,7 +1995,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -2003,7 +2010,7 @@ void intel_power_domains_enable(struct drm_i915_private *i915)
*/
void intel_power_domains_disable(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
@@ -2026,7 +2033,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2039,7 +2046,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2053,7 +2060,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -2080,7 +2087,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
void intel_power_domains_resume(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
if (power_domains->display_core_suspended) {
intel_power_domains_init_hw(i915, true);
@@ -2098,7 +2105,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
for_each_power_well(i915, power_well) {
@@ -2126,7 +2133,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
*/
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2232,10 +2239,10 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->dmc.allowed_dc_mask &
+ if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->dmc.allowed_dc_mask &
+ else if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
@@ -2243,7 +2250,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
@@ -2252,7 +2259,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
int i;
mutex_lock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 97b367f39f35..dc04afc6cc8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1350,6 +1350,117 @@ static const struct i915_power_well_desc_list xelpd_power_wells[] = {
I915_PW_DESCRIPTORS(xelpd_power_wells_main),
};
+/*
+ * MTL is based on XELPD power domains with the exception of power gating for:
+ * - DDI_IO (moved to PLL logic)
+ * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on)
+ */
+#define XELPDP_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_TBT1);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_TBT2);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_TBT3);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpdp_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpdp_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
+ I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1388,7 +1499,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1447,7 +1558,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
@@ -1457,7 +1568,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 14)
+ return set_power_wells(power_domains, xelpdp_power_wells);
+ else if (DISPLAY_VER(i915) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
else if (IS_DG1(i915))
return set_power_wells(power_domains, dg1_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 91cfd5890f46..df7ee4969ef1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_crt.h"
@@ -16,10 +17,10 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -84,7 +85,7 @@ lookup_power_well(struct drm_i915_private *i915,
drm_WARN(&i915->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->power_domains.power_wells[0];
+ return &i915->display.power.domains.power_wells[0];
}
void intel_power_well_enable(struct drm_i915_private *i915,
@@ -710,8 +711,8 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->dmc.dc_state, val);
- dev_priv->dmc.dc_state = val;
+ dev_priv->display.dmc.dc_state, val);
+ dev_priv->display.dmc.dc_state = val;
}
/**
@@ -746,8 +747,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->dmc.allowed_dc_mask))
- state &= dev_priv->dmc.allowed_dc_mask;
+ state & ~dev_priv->display.dmc.allowed_dc_mask))
+ state &= dev_priv->display.dmc.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -755,16 +756,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->dmc.dc_state)
+ if ((val & mask) != dev_priv->display.dmc.dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->dmc.dc_state, val & mask);
+ dev_priv->display.dmc.dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->dmc.dc_state = val & mask;
+ dev_priv->display.dmc.dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -945,7 +946,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -958,7 +959,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -971,7 +972,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1000,7 +1001,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->dmc.target_dc_state) {
+ switch (dev_priv->display.dmc.target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1156,10 +1157,10 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1207,7 +1208,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->power_domains.initializing)
+ if (dev_priv->display.power.domains.initializing)
return;
intel_hpd_init(dev_priv);
@@ -1302,7 +1303,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_control = dev_priv->display.power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1313,7 +1314,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
@@ -1321,7 +1322,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
@@ -1397,7 +1398,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ phy_status, dev_priv->display.power.chv_phy_control);
}
#undef BITS_SET
@@ -1457,13 +1458,13 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1487,18 +1488,18 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
drm_dbg_kms(&dev_priv->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
+ dev_priv->display.power.chv_phy_assert[phy] = true;
assert_chv_phy_status(dev_priv);
}
@@ -1516,7 +1517,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[phy])
+ if (!dev_priv->display.power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1570,27 +1571,27 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
bool was_override;
mutex_lock(&power_domains->lock);
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
if (override == was_override)
goto out;
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ phy, ch, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1604,26 +1605,26 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ phy, ch, mask, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1701,7 +1702,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1797,6 +1798,43 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
+static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
+
+ /*
+ * The power status flag cannot be used to determine whether aux
+ * power wells have finished powering up. Instead we're
+ * expected to just wait a fixed 600us after raising the request
+ * bit.
+ */
+ usleep_range(600, 1200);
+}
+
+static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ 0);
+ usleep_range(10, 30);
+}
+
+static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) &
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
+}
const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -1910,3 +1948,10 @@ const struct i915_power_well_ops tgl_tc_cold_off_ops = {
.disable = tgl_tc_cold_off_power_well_disable,
.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
+
+const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = xelpdp_aux_power_well_enable,
+ .disable = xelpdp_aux_power_well_disable,
+ .is_enabled = xelpdp_aux_power_well_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index d0624642dcb6..e13b521e322a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -14,15 +14,15 @@ struct drm_i915_private;
struct i915_power_well;
#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
+ (__dev_priv)->display.power.domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
+ (__dev_priv)->display.power.domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -80,6 +80,9 @@ struct i915_power_well_instance {
*/
u8 idx;
} hsw;
+ struct {
+ u8 aux_ch;
+ } xelpdp;
};
};
@@ -169,5 +172,6 @@ extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
extern const struct i915_power_well_ops icl_aux_power_well_ops;
extern const struct i915_power_well_ops icl_ddi_power_well_ops;
extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+extern const struct i915_power_well_ops xelpdp_aux_power_well_ops;
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 0da9b208d56e..298d00a11f47 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -105,7 +105,7 @@ struct intel_fb_view {
* In the normal view the FB object's backing store sg list is used
* directly and hence the remap information here is not used.
*/
- struct i915_ggtt_view gtt;
+ struct i915_gtt_view gtt;
/*
* The GTT view (gtt.type) specific information for each FB color
@@ -1130,6 +1130,7 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;
@@ -1712,7 +1713,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
- int force_dsc_bpp;
+ int force_dsc_bpc;
bool hobl_failed;
bool hobl_active;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa9ef591b885..e52ecc0738a6 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,8 +52,8 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
-#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07)
MODULE_FIRMWARE(DG2_DMC_PATH);
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
@@ -250,7 +250,7 @@ struct stepping_info {
static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
{
- return i915->dmc.dmc_info[dmc_id].payload;
+ return i915->display.dmc.dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -277,6 +277,17 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
+static void disable_event_handler(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
static void
disable_flip_queue_event(struct drm_i915_private *i915,
i915_reg_t ctl_reg, i915_reg_t htp_reg)
@@ -299,12 +310,7 @@ disable_flip_queue_event(struct drm_i915_private *i915,
return;
}
- intel_de_write(i915, ctl_reg,
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
- intel_de_write(i915, htp_reg, 0);
+ disable_event_handler(i915, ctl_reg, htp_reg);
}
static bool
@@ -356,6 +362,51 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
}
}
+static void disable_all_event_handlers(struct drm_i915_private *i915)
+{
+ int id;
+
+ /* TODO: disable the event handlers on pre-GEN12 platforms as well */
+ if (DISPLAY_VER(i915) < 12)
+ return;
+
+ for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ int handler;
+
+ if (!has_dmc_id_fw(i915, id))
+ continue;
+
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(i915,
+ DMC_EVT_CTL(i915, id, handler),
+ DMC_EVT_HTP(i915, id, handler));
+ }
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ enum pipe pipe;
+
+ if (DISPLAY_VER(i915) != 13)
+ return;
+
+ /*
+ * Wa_16015201720:adl-p,dg2
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ * For pipe C and D clock gating needs to be disabled only
+ * during initializing the firmware.
+ */
+ if (enable)
+ for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ 0, PIPEDMC_GATING_DIS);
+ else
+ for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ PIPEDMC_GATING_DIS, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -366,12 +417,16 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
*/
void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
u32 id, i;
if (!intel_dmc_has_payload(dev_priv))
return;
+ pipedmc_clock_gating_wa(dev_priv, true);
+
+ disable_all_event_handlers(dev_priv);
+
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
@@ -393,7 +448,7 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
}
}
- dev_priv->dmc.dc_state = 0;
+ dev_priv->display.dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
@@ -403,12 +458,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
* here.
*/
disable_all_flip_queue_events(dev_priv);
+
+ pipedmc_clock_gating_wa(dev_priv, false);
+}
+
+/**
+ * intel_dmc_disable_program() - disable the firmware
+ * @i915: i915 drm device
+ *
+ * Disable all event handlers in the firmware, making sure the firmware is
+ * inactive after the display is uninitialized.
+ */
+void intel_dmc_disable_program(struct drm_i915_private *i915)
+{
+ if (!intel_dmc_has_payload(i915))
+ return;
+
+ pipedmc_clock_gating_wa(i915, true);
+ disable_all_event_handlers(i915);
+ pipedmc_clock_gating_wa(i915, false);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
{
drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -445,7 +519,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
{
unsigned int i, id;
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
for (i = 0; i < num_entries; i++) {
id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
@@ -473,7 +547,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 start_range, end_range;
int i;
@@ -511,7 +585,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -622,7 +696,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -676,7 +750,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -713,7 +787,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
u32 readcount = 0;
@@ -740,7 +814,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->dmc.dmc_info[id].present)
+ if (!dev_priv->display.dmc.dmc_info[id].present)
continue;
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
@@ -756,15 +830,15 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- dev_priv->dmc.wakeref =
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ dev_priv->display.dmc.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->dmc.wakeref);
+ fetch_and_zero(&dev_priv->display.dmc.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
@@ -775,10 +849,10 @@ static void dmc_load_work_fn(struct work_struct *work)
struct intel_dmc *dmc;
const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
- dmc = &dev_priv->dmc;
+ dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
+ dmc = &dev_priv->display.dmc;
- request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
+ request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
parse_dmc_fw(dev_priv, fw);
if (intel_dmc_has_payload(dev_priv)) {
@@ -787,7 +861,7 @@ static void dmc_load_work_fn(struct work_struct *work)
drm_info(&dev_priv->drm,
"Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
drm_notice(&dev_priv->drm,
@@ -810,9 +884,9 @@ static void dmc_load_work_fn(struct work_struct *work)
*/
void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
- INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
+ INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
if (!HAS_DMC(dev_priv))
return;
@@ -895,7 +969,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
}
drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->dmc.work);
+ schedule_work(&dev_priv->display.dmc.work);
}
/**
@@ -911,7 +985,7 @@ void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
if (!HAS_DMC(dev_priv))
return;
- flush_work(&dev_priv->dmc.work);
+ flush_work(&dev_priv->display.dmc.work);
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(dev_priv))
@@ -953,16 +1027,16 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->dmc.dmc_info[id].payload);
+ kfree(dev_priv->display.dmc.dmc_info[id].payload);
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->dmc;
+ struct intel_dmc *dmc = &i915->display.dmc;
if (!HAS_DMC(i915))
return;
@@ -984,7 +1058,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->dmc;
+ dmc = &i915->display.dmc;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 41091aee3b47..67e03315ef99 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -47,6 +47,7 @@ struct intel_dmc {
void intel_dmc_ucode_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_disable_program(struct drm_i915_private *i915);
void intel_dmc_ucode_fini(struct drm_i915_private *i915);
void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
void intel_dmc_ucode_resume(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 238620b55966..5e5e41644ddf 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -28,6 +28,8 @@
#define _DMC_REG(i915, dmc_id, reg) \
((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+#define DMC_EVENT_HANDLER_COUNT_GEN12 8
+
#define _DMC_EVT_HTP_0 0x8f004
#define DMC_EVT_HTP(i915, dmc_id, handler) \
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 32292c0be2bd..c9be61d2348e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -286,11 +286,22 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
+static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
+{
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int max_lanes = dig_port->max_lanes;
+
+ if (vbt_max_lanes)
+ max_lanes = min(max_lanes, vbt_max_lanes);
+
+ return max_lanes;
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- int source_max = dig_port->max_lanes;
+ int source_max = intel_dp_max_source_lane_count(dig_port);
int sink_max = intel_dp->max_sink_lane_count;
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
@@ -389,23 +400,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -413,23 +414,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -491,7 +476,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -720,7 +705,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
if (bigjoiner) {
u32 max_bpp_bigjoiner =
- i915->max_cdclk_freq * 48 /
+ i915->display.cdclk.max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
@@ -1312,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static bool has_seamless_m_n(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ /*
+ * Seamless M/N reprogramming only implemented
+ * for BDW+ double buffered M/N registers so far.
+ */
+ return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /* FIXME a bit of a mess wrt clock vs. crtc_clock */
+ if (has_seamless_m_n(connector))
+ return intel_panel_highest_mode(connector, adjusted_mode)->clock;
+ else
+ return adjusted_mode->crtc_clock;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, i, lane_count;
+ int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
+ mode_rate = intel_dp_link_required(clock, output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp_common_rate(intel_dp, i);
@@ -1377,7 +1386,18 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
return 0;
}
-#define DSC_SUPPORTED_VERSION_MIN 1
+static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+}
+
+static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
+ DP_DSC_MINOR_SHIFT;
+}
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
@@ -1395,6 +1415,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
* DP_DSC_RC_BUF_SIZE for this.
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
/*
* Slice Height of 8 works for all currently available panels. So start
@@ -1416,9 +1437,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+ min(intel_dp_source_dsc_version_minor(intel_dp),
+ intel_dp_sink_dsc_version_minor(intel_dp));
vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
@@ -1464,6 +1484,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ if (intel_dp->force_dsc_bpc) {
+ pipe_bpp = intel_dp->force_dsc_bpc * 3;
+ drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp);
+ }
+
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
drm_dbg_kms(&dev_priv->drm,
@@ -1515,28 +1540,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
- /* As of today we support DSC for only RGB */
- if (intel_dp->force_dsc_bpp) {
- if (intel_dp->force_dsc_bpp >= 8 &&
- intel_dp->force_dsc_bpp < pipe_bpp) {
- drm_dbg_kms(&dev_priv->drm,
- "DSC BPP forced to %d",
- intel_dp->force_dsc_bpp);
- pipe_config->dsc.compressed_bpp =
- intel_dp->force_dsc_bpp;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid DSC BPP %d",
- intel_dp->force_dsc_bpp);
- }
- }
-
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
+ if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
pipe_config->bigjoiner_pipes) {
if (pipe_config->dsc.slice_count < 2) {
drm_dbg_kms(&dev_priv->drm,
@@ -1626,7 +1635,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Optimize for slow and wide for everything, because there are some
* eDP 1.3 and 1.4 panels don't work well with fast and narrow.
*/
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
@@ -1869,8 +1878,7 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- /* M1/N1 is double buffered */
- if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
@@ -1908,13 +1916,16 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
+ int output_bpp)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
+ if (has_seamless_m_n(connector))
+ pipe_config->seamless_m_n = true;
+
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2);
@@ -1932,7 +1943,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2007,7 +2018,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
@@ -2086,7 +2096,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2097,8 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config,
- output_bpp, constant_n);
+ intel_dp_drrs_compute_config(connector, pipe_config, output_bpp);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -4992,12 +5001,21 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
{
struct drm_i915_private *dev_priv = to_i915(conn->dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn);
+ struct intel_connector *intel_conn = to_intel_connector(conn);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder);
int ret;
ret = intel_digital_connector_atomic_check(conn, &state->base);
if (ret)
return ret;
+ if (intel_dp_mst_source_support(intel_dp)) {
+ ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr);
+ if (ret)
+ return ret;
+ }
+
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
@@ -5023,9 +5041,9 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
+ i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock);
- queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5183,7 +5201,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
return;
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->vbt.orientation,
+ i915->display.vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -5293,8 +5311,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector);
- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2bc119374555..48c375c65a41 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -42,7 +42,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
@@ -86,7 +86,7 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
+ freq = dev_priv->display.cdclk.hw.cdclk;
else
freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
@@ -150,6 +150,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -170,6 +171,13 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
+ /*
+ * Power request bit is already set during aux power well enable.
+ * Preserve the bit across aux transactions.
+ */
+ if (DISPLAY_VER(i915) >= 14)
+ ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
+
return ret;
}
@@ -629,6 +637,46 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
}
}
+static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
void intel_dp_aux_fini(struct intel_dp *intel_dp)
{
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
@@ -644,7 +692,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
} else if (DISPLAY_VER(dev_priv) >= 9) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index c92d5bb2326a..83af95bce98d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -278,6 +278,8 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
+ struct drm_luminance_range_info *luminance_range =
+ &connector->base.display_info.luminance_range;
int ret;
if (panel->backlight.edp.intel.sdr_uses_aux) {
@@ -293,8 +295,17 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi
}
}
- panel->backlight.max = 512;
- panel->backlight.min = 0;
+ if (luminance_range->max_luminance) {
+ panel->backlight.max = luminance_range->max_luminance;
+ panel->backlight.min = luminance_range->min_luminance;
+ } else {
+ panel->backlight.max = 512;
+ panel->backlight.min = 0;
+ }
+
+ drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min,
+ panel->backlight.max);
+
panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe);
panel->backlight.enabled = panel->backlight.level != 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index a7640dbcf00e..88689124c013 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -17,6 +17,7 @@
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9feaf1a589f3..3d3efcf02011 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -37,17 +37,6 @@ static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}
-static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
- char *buf, size_t buf_size)
-{
- if (dp_phy == DP_PHY_DPRX)
- snprintf(buf, buf_size, "DPRX");
- else
- snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
-
- return buf;
-}
-
static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
@@ -60,20 +49,19 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- char phy_name[10];
-
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
- encoder->base.base.id, encoder->base.name, phy_name,
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
@@ -423,14 +411,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_TX_FFE_ARGS(link_status));
} else {
@@ -438,7 +425,7 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
"vswing request: " TRAIN_REQ_FMT ", "
"pre-emphasis request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_VSWING_ARGS(link_status),
TRAIN_REQ_PREEMPH_ARGS(link_status));
@@ -503,13 +490,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
@@ -546,13 +532,12 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE presets: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
} else {
@@ -560,7 +545,7 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
"vswing levels: " TRAIN_SET_FMT ", "
"pre-emphasis levels: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
@@ -671,6 +656,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
@@ -732,12 +739,11 @@ intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
}
@@ -757,21 +763,19 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
int voltage_tries, cr_tries, max_cr_tries;
u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -795,14 +799,16 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery OK\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return true;
}
@@ -810,7 +816,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -818,7 +825,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -828,7 +836,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -846,7 +855,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
- encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy), max_cr_tries);
return false;
}
@@ -924,15 +934,12 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
if (training_pattern != DP_TRAINING_PATTERN_4)
@@ -944,7 +951,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
encoder->base.base.id, encoder->base.name,
- phy_name);
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -955,7 +962,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status) < 0) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -966,7 +974,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
"continue channel equalization\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -975,7 +984,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
channel_eq = true;
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -985,7 +995,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
}
@@ -995,7 +1006,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
}
return channel_eq;
@@ -1070,7 +1082,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- char phy_name[10];
bool ret = false;
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
@@ -1086,7 +1097,7 @@ out:
"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
ret ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 14d2a64193b2..03604a37931c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -52,30 +52,36 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct drm_dp_mst_topology_state *mst_state;
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
+ mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
+ // TODO: Handle pbn_div changes by adding a new MST helper
+ if (!mst_state->pbn_div) {
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+ limits->max_rate,
+ limits->max_lane_count);
+ }
+
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
crtc_state->pipe_bpp = bpp;
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
crtc_state->pipe_bpp,
false);
-
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
- connector->port,
- crtc_state->pbn,
- drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- crtc_state->port_clock,
- crtc_state->lane_count));
+ slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
+ connector->port, crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
if (slots >= 0)
@@ -93,7 +99,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n, crtc_state->fec_enable);
+ crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
@@ -308,14 +314,8 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(&state->base, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(&state->base, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
- struct drm_crtc *new_crtc = new_conn_state->crtc;
- struct drm_dp_mst_topology_mgr *mgr;
int ret;
ret = intel_digital_connector_atomic_check(connector, &state->base);
@@ -326,28 +326,9 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- if (!old_conn_state->crtc)
- return 0;
-
- /* We only want to free VCPI if this state disables the CRTC on this
- * connector
- */
- if (new_crtc) {
- struct intel_crtc *crtc = to_intel_crtc(new_crtc);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(&crtc_state->uapi) ||
- crtc_state->uapi.enable)
- return 0;
- }
-
- mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr;
- ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr,
- intel_connector->port);
-
- return ret;
+ return drm_dp_atomic_release_time_slots(&state->base,
+ &intel_connector->mst_port->mst_mgr,
+ intel_connector->port);
}
static void clear_act_sent(struct intel_encoder *encoder,
@@ -383,21 +364,17 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1;
- int ret;
drm_dbg_kms(&i915->drm, "active links %d\n",
intel_dp->active_mst_links);
intel_hdcp_disable(intel_mst->connector);
- drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
-
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
- if (ret) {
- drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
- }
+ drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
@@ -425,8 +402,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
- drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
@@ -434,8 +409,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, old_crtc_state);
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
-
intel_ddi_disable_transcoder_func(old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9)
@@ -502,7 +475,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
- int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1;
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
int ret;
bool first_mst_stream;
@@ -528,16 +502,13 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
- ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- connector->port,
- pipe_config->pbn,
- pipe_config->dp_m_n.tu);
- if (!ret)
- drm_err(&dev_priv->drm, "failed to allocate vcpi\n");
-
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot);
+ ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ if (ret < 0)
+ drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n",
+ connector->base.name, ret);
/*
* Before Gen 12 this is not done as part of
@@ -560,7 +531,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_digital_port *dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &dig_port->dp;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
@@ -588,9 +562,13 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, pipe_config);
- drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
+ if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
+ FECSTALL_DIS_DPTSTREAM_DPTTG);
+ else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
FECSTALL_DIS_DPTSTREAM_DPTTG);
@@ -972,8 +950,6 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
- int max_source_rate =
- intel_dp->source_rates[intel_dp->num_source_rates - 1];
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
@@ -989,10 +965,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
- &intel_dp->aux, 16, 3,
- dig_port->max_lanes,
- max_source_rate,
- conn_base_id);
+ &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index cc6abe761f5e..8732b8722ed7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -484,7 +484,7 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
- lockdep_assert_held(&dev_priv->power_domains.lock);
+ lockdep_assert_held(&dev_priv->display.power.domains.lock);
was_enabled = true;
if (rcomp_phy != -1)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 5262f16b45ac..b15ba78d64d6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -938,12 +938,25 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_compute_shared_dplls(state, crtc, encoder);
+ if (ret)
+ return ret;
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /* CRT dotclock is determined via other means */
+ if (!crtc_state->has_pch_encoder)
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
}
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -969,8 +982,15 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_mpllb_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
- return intel_mpllb_calc_state(crtc_state, encoder);
+ return 0;
}
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
@@ -991,7 +1011,7 @@ static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
factor = 21;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev_priv) &&
intel_is_dual_link_lvds(dev_priv)))
factor = 25;
@@ -1096,6 +1116,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 120000;
+ int ret;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
@@ -1105,8 +1126,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_panel_use_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ dev_priv->display.vbt.lvds_ssc_freq);
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
}
if (intel_is_dual_link_lvds(dev_priv)) {
@@ -1132,7 +1153,14 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- return intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_compute_shared_dplls(state, crtc, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return ret;
}
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -1198,6 +1226,13 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
chv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1217,6 +1252,13 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
vlv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1231,7 +1273,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1259,6 +1301,11 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1273,7 +1320,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1292,6 +1339,9 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1306,7 +1356,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1325,6 +1375,11 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1339,7 +1394,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1360,6 +1415,9 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1411,16 +1469,13 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (!crtc_state->hw.enable)
return 0;
- ret = i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
@@ -1439,17 +1494,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
- if (!crtc_state->hw.enable)
+ if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->dpll_funcs->crtc_get_shared_dpll)
+ if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
@@ -1463,23 +1516,23 @@ void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv))
- dev_priv->dpll_funcs = &dg2_dpll_funcs;
+ dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->dpll_funcs = &hsw_dpll_funcs;
+ dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->dpll_funcs = &ilk_dpll_funcs;
+ dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->dpll_funcs = &chv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &chv_dpll_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->dpll_funcs = &vlv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->dpll_funcs = &g4x_dpll_funcs;
+ dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
else if (IS_PINEVIEW(dev_priv))
- dev_priv->dpll_funcs = &pnv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->dpll_funcs = &i9xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->dpll_funcs = &i8xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 118598c9a809..e5fb66a5dd02 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -113,8 +113,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -149,7 +149,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->dpll.shared_dplls[id];
+ return &dev_priv->display.dpll.shared_dplls[id];
}
/**
@@ -164,11 +164,11 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- long pll_idx = pll - dev_priv->dpll.shared_dplls;
+ long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
if (drm_WARN_ON(&dev_priv->drm,
pll_idx < 0 ||
- pll_idx >= dev_priv->dpll.num_shared_dpll))
+ pll_idx >= dev_priv->display.dpll.num_shared_dpll))
return -1;
return pll_idx;
@@ -245,7 +245,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
old_mask = pll->active_mask;
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
@@ -271,7 +271,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
/**
@@ -294,7 +294,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
"%s not used by [CRTC:%d:%s]\n", pll->info->name,
crtc->base.base.id, crtc->base.name))
@@ -317,7 +317,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static struct intel_shared_dpll *
@@ -336,7 +336,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].pipe_mask == 0) {
@@ -436,9 +436,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->dpll.shared_dplls[i];
+ &dev_priv->display.dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -537,7 +537,7 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
@@ -905,37 +905,6 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static int
-hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- unsigned int p, n2, r2;
-
- hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
-
- crtc_state->dpll_hw_state.wrpll =
- WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- return 0;
-}
-
-static struct intel_shared_dpll *
-hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- return intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_WRPLL2) |
- BIT(DPLL_ID_WRPLL1));
-}
-
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
@@ -948,7 +917,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- refclk = dev_priv->dpll.ref_clks.nssc;
+ refclk = dev_priv->display.dpll.ref_clks.nssc;
break;
}
fallthrough;
@@ -958,7 +927,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
* code only cares about 5% accuracy, and spread is a max of
* 0.5% downspread.
*/
- refclk = dev_priv->dpll.ref_clks.ssc;
+ refclk = dev_priv->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -977,6 +946,41 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
}
static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
+
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
+}
+
+static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1145,12 +1149,12 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 135000;
+ i915->display.dpll.ref_clks.ssc = 135000;
/* Non-SSC is only used on non-ULT HSW. */
if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- i915->dpll.ref_clks.nssc = 24000;
+ i915->display.dpll.ref_clks.nssc = 24000;
else
- i915->dpll.ref_clks.nssc = 135000;
+ i915->display.dpll.ref_clks.nssc = 135000;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1618,48 +1622,11 @@ skip_remaining_dividers:
return 0;
}
-static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
- int ret;
-
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
- i915->dpll.ref_clks.nssc, &wrpll_params);
- if (ret)
- return ret;
-
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
-
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
- return 0;
-}
-
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
@@ -1726,6 +1693,46 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wrpll_params wrpll_params = {};
+ u32 ctrl1, cfgcr1, cfgcr2;
+ int ret;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1858,7 +1865,7 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2171,7 +2178,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
}
}
- chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
+ chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
clk_div->dot != crtc_state->port_clock);
@@ -2245,6 +2252,23 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+
+ return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
+}
+
static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2258,28 +2282,20 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct dpll clk_div = {};
+ int ret;
bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
-}
-
-static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
+ ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+ if (ret)
+ return ret;
- clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
- return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+ return 0;
}
static int bxt_compute_dpll(struct intel_atomic_state *state,
@@ -2324,8 +2340,8 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 100000;
- i915->dpll.ref_clks.nssc = 100000;
+ i915->display.dpll.ref_clks.ssc = 100000;
+ i915->display.dpll.ref_clks.nssc = 100000;
/* DSI non-SSC ref 19.2MHz */
}
@@ -2468,7 +2484,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
- i915->dpll.ref_clks.nssc == 38400;
+ i915->display.dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -2562,7 +2578,7 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->dpll.ref_clks.nssc == 24000 ?
+ dev_priv->display.dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2585,9 +2601,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (DISPLAY_VER(dev_priv) >= 12) {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2598,9 +2614,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2630,7 +2646,7 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
/*
* For ICL+, the spec states: if reference frequency is 38.4,
@@ -2769,8 +2785,8 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- if (i915->vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
+ if (i915->display.vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2857,7 +2873,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->dpll.ref_clks.nssc;
+ int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2965,8 +2981,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
- if (dev_priv->vbt.override_afc_startup) {
- u8 val = dev_priv->vbt.override_afc_startup_val;
+ if (dev_priv->display.vbt.override_afc_startup) {
+ u8 val = dev_priv->display.vbt.override_afc_startup_val;
pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
@@ -3063,7 +3079,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- ref_clock = dev_priv->dpll.ref_clks.nssc;
+ ref_clock = dev_priv->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(dev_priv) >= 12) {
m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
@@ -3197,6 +3213,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+
+ crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3282,6 +3304,12 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
if (ret)
return ret;
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+
+ crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3440,7 +3468,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->dpll.ref_clks.nssc == 38400) {
+ if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3502,7 +3530,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val;
@@ -3566,7 +3594,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR1(id));
- if (dev_priv->vbt.override_afc_startup) {
+ if (dev_priv->display.vbt.override_afc_startup) {
hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
@@ -3638,9 +3666,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
- drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
!i915_mmio_reg_valid(div0_reg));
- if (dev_priv->vbt.override_afc_startup &&
+ if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
hw_state->div0);
@@ -3732,7 +3760,7 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
hw_state->mg_pll_div0);
@@ -3967,7 +3995,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -4192,22 +4220,24 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->dpll.num_shared_dpll = 0;
+ dev_priv->display.dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
+ break;
+
drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
- dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
+ dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll.mgr = dpll_mgr;
- dev_priv->dpll.num_shared_dpll = i;
- mutex_init(&dev_priv->dpll.lock);
-
- BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
+ dev_priv->display.dpll.mgr = dpll_mgr;
+ dev_priv->display.dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->display.dpll.lock);
}
/**
@@ -4229,7 +4259,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4262,7 +4292,7 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4285,7 +4315,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -4314,7 +4344,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
@@ -4385,16 +4415,16 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
{
- if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
- i915->dpll.mgr->update_ref_clks(i915);
+ if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
+ i915->display.dpll.mgr->update_ref_clks(i915);
}
void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
}
static void sanitize_dpll_state(struct drm_i915_private *i915,
@@ -4420,8 +4450,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
}
/**
@@ -4434,8 +4464,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll.mgr) {
- dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->display.dpll.mgr) {
+ dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
@@ -4533,7 +4563,7 @@ void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index c4affcb216fd..fc9c3e41c333 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -9,6 +9,36 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dsb.h"
+
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
#define DSB_BUF_SIZE (2 * PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6cb9c580cdca..74dd2b3343bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -11,34 +11,6 @@
#include "i915_reg_defs.h"
struct intel_crtc_state;
-struct i915_vma;
-
-enum dsb_id {
- INVALID_DSB = -1,
- DSB1,
- DSB2,
- DSB3,
- MAX_DSB_PER_PIPE
-};
-
-struct intel_dsb {
- enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
-
- /*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
- */
- int free_pos;
-
- /*
- * ins_start_offset will help to store start address of the dsb
- * instuction and help in identifying the batch of auto-increment
- * register.
- */
- u32 ins_start_offset;
-};
void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 35e121cd226c..5efdd471ac2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -106,7 +106,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->vbt.orientation;
+ orientation = dev_priv->display.vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index eafef0a87fea..ce80bd8be519 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -89,9 +89,6 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
- u16 dcs_backlight_ports;
- u16 dcs_cabc_ports;
-
/* RGB or BGR */
bool bgr_enabled;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 1bc7118c56a2..20e466d843ce 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -53,7 +53,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused
enum port port;
size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
&data, len);
@@ -80,7 +80,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
data[1] = level;
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mode_flags = dsi_device->mode_flags;
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
@@ -93,12 +93,13 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
dcs_set_backlight(conn_state, 0);
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_OFF;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -106,7 +107,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state,
&cabc, sizeof(cabc));
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -127,10 +128,11 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -146,7 +148,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&ctrl, sizeof(ctrl));
}
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_MEDIUM;
dsi_device = intel_dsi->dsi_hosts[port]->device;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index d96c3cc46e50..50205f064d93 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -75,8 +75,8 @@ struct intel_dvo_dev_ops {
*
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
- int (*mode_valid)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode);
+ enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
/*
* Callback for preparing mode changes on an output
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index b191915ab351..eefa33c555ac 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1395,7 +1395,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
plane_view_height_tiles(fb, color_plane, dims, y));
}
- if (view->gtt.type == I915_GGTT_VIEW_ROTATED) {
+ if (view->gtt.type == I915_GTT_VIEW_ROTATED) {
drm_WARN_ON(&i915->drm, remap_info->linear);
check_array_bounds(i915, view->gtt.rotated.plane, color_plane);
@@ -1420,7 +1420,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
} else {
- drm_WARN_ON(&i915->drm, view->gtt.type != I915_GGTT_VIEW_REMAPPED);
+ drm_WARN_ON(&i915->drm, view->gtt.type != I915_GTT_VIEW_REMAPPED);
check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
@@ -1503,12 +1503,12 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
}
static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
- enum i915_ggtt_view_type view_type)
+ enum i915_gtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
- if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
@@ -1530,16 +1530,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(i915, &fb->normal_view, I915_GTT_VIEW_NORMAL);
drm_WARN_ON(&i915->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -1620,8 +1620,8 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
u32 gtt_offset = 0;
intel_fb_view_init(i915, &plane_state->view,
- drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
- I915_GGTT_VIEW_REMAPPED);
+ drm_rotation_90_or_270(rotation) ? I915_GTT_VIEW_ROTATED :
+ I915_GTT_VIEW_REMAPPED);
src_x = plane_state->uapi.src.x1 >> 16;
src_y = plane_state->uapi.src.y1 >> 16;
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index bd6e7c98e751..c86e5d4ee016 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -18,7 +18,7 @@
static struct i915_vma *
intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags,
struct i915_address_space *vm)
@@ -79,7 +79,7 @@ err:
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
index e4fcd0218d9d..de0efaa25905 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -11,12 +11,12 @@
struct drm_framebuffer;
struct i915_vma;
struct intel_plane_state;
-struct i915_ggtt_view;
+struct i915_gtt_view;
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
bool phys_cursor,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
bool uses_fence,
unsigned long *out_flags);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16537830ccf0..f38175304928 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,11 +55,11 @@
#define for_each_fbc_id(__dev_priv, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
+ for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
for_each_fbc_id((__dev_priv), (__fbc_id)) \
- for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])
+ for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@@ -1098,6 +1098,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ /* Wa_14016291713 */
+ if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
+ return 0;
+ }
+
if (!pixel_format_is_valid(plane_state)) {
plane_state->no_fbc_reason = "pixel format not supported";
return 0;
@@ -1704,17 +1710,17 @@ void intel_fbc_init(struct drm_i915_private *i915)
enum intel_fbc_id fbc_id;
if (!drm_mm_initialized(&i915->mm.stolen))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
if (need_fbc_vtd_wa(i915))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
i915->params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
- i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+ i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
}
/**
@@ -1834,7 +1840,7 @@ void intel_fbc_debugfs_register(struct drm_i915_private *i915)
struct drm_minor *minor = i915->drm.primary;
struct intel_fbc *fbc;
- fbc = i915->fbc[INTEL_FBC_A];
+ fbc = i915->display.fbc[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, minor->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index db60143295ec..4adb98afe6ff 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,6 +19,7 @@ struct intel_plane_state;
enum intel_fbc_id {
INTEL_FBC_A,
+ INTEL_FBC_B,
I915_MAX_FBCS,
};
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 221336178991..112aa0447a0d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -198,8 +198,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
- const struct i915_ggtt_view view = {
- .type = I915_GGTT_VIEW_NORMAL,
+ const struct i915_gtt_view view = {
+ .type = I915_GTT_VIEW_NORMAL,
};
intel_wakeref_t wakeref;
struct fb_info *info;
@@ -210,6 +210,12 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_gem_object *obj;
int ret;
+ mutex_lock(&ifbdev->hpd_lock);
+ ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
+ mutex_unlock(&ifbdev->hpd_lock);
+ if (ret)
+ return ret;
+
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
@@ -500,7 +506,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
{
intel_fbdev_set_suspend(&container_of(work,
struct drm_i915_private,
- fbdev_suspend_work)->drm,
+ display.fbdev.suspend_work)->drm,
FBINFO_STATE_RUNNING,
true);
}
@@ -530,8 +536,8 @@ int intel_fbdev_init(struct drm_device *dev)
return ret;
}
- dev_priv->fbdev = ifbdev;
- INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+ dev_priv->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
return 0;
}
@@ -548,7 +554,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -568,12 +574,13 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
- cancel_work_sync(&dev_priv->fbdev_suspend_work);
+ intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
+
if (!current_is_async())
intel_fbdev_sync(ifbdev);
@@ -582,7 +589,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+ struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
if (!ifbdev)
return;
@@ -596,7 +603,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
*/
static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
- struct intel_fbdev *ifbdev = i915->fbdev;
+ struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -614,11 +621,11 @@ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
if (!ifbdev || !ifbdev->vma)
- return;
+ goto set_suspend;
info = ifbdev->helper.fbdev;
@@ -631,7 +638,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* ourselves, so only flush outstanding work upon suspend!
*/
if (state != FBINFO_STATE_RUNNING)
- flush_work(&dev_priv->fbdev_suspend_work);
+ flush_work(&dev_priv->display.fbdev.suspend_work);
console_lock();
} else {
@@ -645,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
- schedule_work(&dev_priv->fbdev_suspend_work);
+ schedule_work(&dev_priv->display.fbdev.suspend_work);
return;
}
}
@@ -661,12 +668,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
+set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
@@ -685,7 +693,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 67d2484afbaa..7f47e5c85c81 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -113,7 +113,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
+ dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
}
/* units of 100MHz */
@@ -210,14 +210,14 @@ void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
u32 fdi_pll_clk =
intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
- i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
- i915->fdi_pll_freq = 270000;
+ i915->display.fdi.pll_freq = 270000;
} else {
return;
}
- drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
}
int intel_fdi_link_freq(struct drm_i915_private *i915,
@@ -226,7 +226,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915,
if (HAS_DDI(i915))
return pipe_config->port_clock; /* SPLL */
else
- return i915->fdi_pll_freq;
+ return i915->display.fdi.pll_freq;
}
int ilk_fdi_compute_config(struct intel_crtc *crtc,
@@ -256,7 +256,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
+ link_bw, &pipe_config->fdi_m_n, false);
ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
if (ret == -EDEADLK)
@@ -789,7 +789,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
@@ -1066,11 +1066,11 @@ void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE(dev_priv)) {
- dev_priv->fdi_funcs = &ilk_funcs;
+ dev_priv->display.funcs.fdi = &ilk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->fdi_funcs = &gen6_funcs;
+ dev_priv->display.funcs.fdi = &gen6_funcs;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->fdi_funcs = &ivb_funcs;
+ dev_priv->display.funcs.fdi = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 791248f812aa..d80e3e8a9b01 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -81,9 +81,9 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->fb_tracking.lock);
- frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -111,11 +111,11 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
/**
@@ -131,11 +131,11 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->fb_tracking.flip_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (frontbuffer_bits)
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
@@ -155,10 +155,10 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
@@ -170,10 +170,10 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.busy_bits |= frontbuffer_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin);
@@ -191,11 +191,11 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Filter out new bits since rendering started. */
- frontbuffer_bits &= i915->fb_tracking.busy_bits;
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.busy_bits;
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
if (frontbuffer_bits)
@@ -221,7 +221,7 @@ static void frontbuffer_retire(struct i915_active *ref)
}
static void frontbuffer_release(struct kref *ref)
- __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+ __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
@@ -238,7 +238,7 @@ static void frontbuffer_release(struct kref *ref)
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
- spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+ spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
i915_active_fini(&front->write);
@@ -268,7 +268,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
frontbuffer_retire,
I915_ACTIVE_RETIRE_SLEEPS);
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
kfree(front);
front = rcu_dereference_protected(obj->frontbuffer, true);
@@ -277,7 +277,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
i915_gem_object_get(obj);
rcu_assign_pointer(obj->frontbuffer, front);
}
- spin_unlock(&i915->fb_tracking.lock);
+ spin_unlock(&i915->display.fb_tracking.lock);
return front;
}
@@ -286,7 +286,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
- &to_i915(front->obj->base.dev)->fb_tracking.lock);
+ &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
}
/**
@@ -311,6 +311,8 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
BITS_PER_TYPE(atomic_t));
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32);
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
drm_WARN_ON(old->obj->base.dev,
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index ff0c37b079aa..3c474ed937fb 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -25,6 +25,7 @@
#define __INTEL_FRONTBUFFER_H__
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/kref.h>
#include "gem/i915_gem_object_types.h"
@@ -48,6 +49,23 @@ struct intel_frontbuffer {
struct rcu_head rcu;
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-wise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
+#define INTEL_FRONTBUFFER(pipe, plane_id) \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a6ba7fb72339..74443f57f62d 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
+#include "intel_gmbus_regs.h"
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -45,7 +46,7 @@ struct intel_gmbus {
u32 reg0;
i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
};
struct gmbus_pin {
@@ -116,6 +117,18 @@ static const struct gmbus_pin gmbus_pins_dg2[] = {
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
};
+static const struct gmbus_pin gmbus_pins_mtp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_5_MTP] = { "dpe", GPIOF },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+};
+
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
unsigned int pin)
{
@@ -128,6 +141,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
+ pins = gmbus_pins_mtp;
+ size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
@@ -170,55 +186,55 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_gmbus_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *i915)
{
- intel_de_write(dev_priv, GMBUS0, 0);
- intel_de_write(dev_priv, GMBUS4, 0);
+ intel_de_write(i915, GMBUS0(i915), 0);
+ intel_de_write(i915, GMBUS4(i915), 0);
}
-static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(i915, DSPCLK_GATE_D(i915));
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(i915, DSPCLK_GATE_D(i915), val);
}
-static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
}
-static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *i915 = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
@@ -234,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -249,7 +265,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -264,7 +280,7 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -283,7 +299,7 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -301,12 +317,12 @@ static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, false);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, false);
set_data(bus, 1);
set_clock(bus, 1);
@@ -318,13 +334,13 @@ static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, true);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, true);
}
static void
@@ -356,7 +372,7 @@ static bool has_gmbus_irq(struct drm_i915_private *i915)
return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
}
-static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
+static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
u32 gmbus2;
@@ -366,21 +382,21 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
- if (!has_gmbus_irq(dev_priv))
+ if (!has_gmbus_irq(i915))
irq_en = 0;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_en);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
2);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
50);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
@@ -389,7 +405,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
}
static int
-gmbus_wait_idle(struct drm_i915_private *dev_priv)
+gmbus_wait_idle(struct drm_i915_private *i915)
{
DEFINE_WAIT(wait);
u32 irq_enable;
@@ -397,35 +413,35 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
- if (has_gmbus_irq(dev_priv))
+ if (has_gmbus_irq(i915))
irq_enable = GMBUS_IDLE_EN;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_wait_for_register_fw(&dev_priv->uncore,
- GMBUS2, GMBUS_ACTIVE, 0,
+ ret = intel_wait_for_register_fw(&i915->uncore,
+ GMBUS2(i915), GMBUS_ACTIVE, 0,
10);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
return ret;
}
-static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
{
- return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
}
static int
-gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_read_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus0_reg, u32 gmbus1_index)
{
unsigned int size = len;
- bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool burst_read = len > gmbus_max_xfer_size(i915);
bool extra_byte_added = false;
if (burst_read) {
@@ -438,21 +454,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- intel_de_write_fw(dev_priv, GMBUS0,
+ intel_de_write_fw(i915, GMBUS0(i915),
gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
- val = intel_de_read_fw(dev_priv, GMBUS3);
+ val = intel_de_read_fw(i915, GMBUS3(i915));
do {
if (extra_byte_added && len == 1)
break;
@@ -463,7 +479,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg);
}
return 0;
@@ -480,7 +496,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -489,12 +505,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- if (HAS_GMBUS_BURST_READ(dev_priv))
+ if (HAS_GMBUS_BURST_READ(i915))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
- len = min(rx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(rx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -507,7 +523,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_write_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
@@ -520,8 +536,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- intel_de_write_fw(dev_priv, GMBUS3, val);
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS3(i915), val);
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -531,9 +547,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(i915, GMBUS3(i915), val);
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
@@ -542,7 +558,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -551,9 +567,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(tx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
gmbus1_index);
if (ret)
return ret;
@@ -580,7 +596,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
@@ -596,17 +612,17 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
+ intel_de_write_fw(i915, GMBUS5(i915), gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
gmbus1_index);
else
- ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, 0);
+ intel_de_write_fw(i915, GMBUS5(i915), 0);
return ret;
}
@@ -616,34 +632,34 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
u32 gmbus0_source)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, false);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, false);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, false);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, false);
retry:
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ ret = gmbus_index_xfer(i915, &msgs[i],
gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ ret = gmbus_xfer_read(i915, &msgs[i],
gmbus0_source | bus->reg0, 0);
} else {
- ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_write(i915, &msgs[i], 0);
}
if (!ret)
- ret = gmbus_wait(dev_priv,
+ ret = gmbus_wait(i915,
GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
@@ -655,19 +671,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
ret = ret ?: i;
goto out;
@@ -686,8 +702,8 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -697,11 +713,11 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
- intel_de_write_fw(dev_priv, GMBUS1, 0);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
+ intel_de_write_fw(i915, GMBUS1(i915), 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
- drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
@@ -712,7 +728,7 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
@@ -721,10 +737,10 @@ clear_err:
goto out;
timeout:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -734,10 +750,10 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, true);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, true);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, true);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, true);
return ret;
}
@@ -746,11 +762,11 @@ static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -762,7 +778,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -770,7 +786,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@@ -790,8 +806,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- mutex_lock(&dev_priv->gmbus_mutex);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ mutex_lock(&i915->display.gmbus.mutex);
/*
* In order to output Aksv to the receiver, use an indexed write to
@@ -800,8 +816,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
*/
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
- mutex_unlock(&dev_priv->gmbus_mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ mutex_unlock(&i915->display.gmbus.mutex);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -824,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
}
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- return mutex_trylock(&dev_priv->gmbus_mutex);
+ return mutex_trylock(&i915->display.gmbus.mutex);
}
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
static const struct i2c_lock_operations gmbus_lock_ops = {
@@ -855,31 +871,31 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*/
-int intel_gmbus_setup(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
- else if (!HAS_GMCH(dev_priv))
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE;
+ else if (!HAS_GMCH(i915))
/*
* Broxton uses the same PCH offsets for South Display Engine,
* even though it doesn't have a PCH.
*/
- dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
+ i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE;
- mutex_init(&dev_priv->gmbus_mutex);
- init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+ mutex_init(&i915->display.gmbus.mutex);
+ init_waitqueue_head(&i915->display.gmbus.wait_queue);
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
const struct gmbus_pin *gmbus_pin;
struct intel_gmbus *bus;
- gmbus_pin = get_gmbus_pin(dev_priv, pin);
+ gmbus_pin = get_gmbus_pin(i915, pin);
if (!gmbus_pin)
continue;
@@ -896,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
"i915 gmbus %s", gmbus_pin->name);
bus->adapter.dev.parent = &pdev->dev;
- bus->dev_priv = dev_priv;
+ bus->i915 = i915;
bus->adapter.algo = &gmbus_algorithm;
bus->adapter.lock_ops = &gmbus_lock_ops;
@@ -911,10 +927,10 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev_priv))
+ if (IS_I830(i915))
bus->force_bit = 1;
- intel_gpio_setup(bus, GPIO(gmbus_pin->gpio));
+ intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio));
ret = i2c_add_adapter(&bus->adapter);
if (ret) {
@@ -922,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
goto err;
}
- dev_priv->gmbus[pin] = bus;
+ i915->display.gmbus.bus[pin] = bus;
}
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
return 0;
err:
- intel_gmbus_teardown(dev_priv);
+ intel_gmbus_teardown(i915);
return ret;
}
-struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
unsigned int pin)
{
- if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) ||
- !dev_priv->gmbus[pin]))
+ if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) ||
+ !i915->display.gmbus.bus[pin]))
return NULL;
- return &dev_priv->gmbus[pin]->adapter;
+ return &i915->display.gmbus.bus[pin]->adapter;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
bus->force_bit += force_bit ? 1 : -1;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -968,20 +984,20 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
return bus->force_bit;
}
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
+void intel_gmbus_teardown(struct drm_i915_private *i915)
{
unsigned int pin;
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
struct intel_gmbus *bus;
- bus = dev_priv->gmbus[pin];
+ bus = i915->display.gmbus.bus[pin];
if (!bus)
continue;
i2c_del_adapter(&bus->adapter);
kfree(bus);
- dev_priv->gmbus[pin] = NULL;
+ i915->display.gmbus.bus[pin] = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index 8edc2e99cf53..20f704bd4e70 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -24,6 +24,7 @@ struct i2c_adapter;
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_5_MTP 5
#define GMBUS_PIN_9_TC1_ICP 9
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
new file mode 100644
index 000000000000..53aacbda983c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_REGS_H__
+#define __INTEL_GMBUS_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base)
+
+#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio))
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+/* clock/port select */
+#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100)
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
+
+/* command/status */
+#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
+
+/* status */
+#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
+
+/* data buffer bytes 3-0 */
+#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c)
+
+/* interrupt mask (Pineview+) */
+#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
+
+/* byte index */
+#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
+
+#endif /* __INTEL_GMBUS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 8ea66a2e1b09..6406fd487ee5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
@@ -30,8 +31,30 @@
static int intel_conn_to_vcpi(struct intel_connector *connector)
{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_atomic_payload *payload;
+ struct drm_dp_mst_topology_state *mst_state;
+ int vcpi = 0;
+
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
- return connector->port ? connector->port->vcpi.vcpi : 0;
+ if (!connector->port)
+ return 0;
+ mgr = connector->port->mgr;
+
+ drm_modeset_lock(&mgr->base.lock, NULL);
+ mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
+ payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
+ if (drm_WARN_ON(mgr->dev, !payload))
+ goto out;
+
+ vcpi = payload->vcpi;
+ if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
+ vcpi = 0;
+ goto out;
+ }
+out:
+ drm_modeset_unlock(&mgr->base.lock);
+ return vcpi;
}
/*
@@ -187,12 +210,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return false;
/* MEI interface is solid */
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return false;
}
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
/* Sink's capability for HDCP2.2 */
hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
@@ -1109,8 +1132,8 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- return INTEL_INFO(dev_priv)->display.has_hdcp &&
- (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
+ return RUNTIME_INFO(dev_priv)->has_hdcp &&
+ (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -1123,11 +1146,11 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1135,7 +1158,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1153,11 +1176,11 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1167,7 +1190,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1181,18 +1204,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1207,11 +1230,11 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1219,7 +1242,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1234,11 +1257,11 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1246,7 +1269,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1261,11 +1284,11 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1273,7 +1296,7 @@ hdcp2_verify_lprime(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1287,11 +1310,11 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1299,7 +1322,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1316,11 +1339,11 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1330,7 +1353,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1345,18 +1368,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1369,11 +1392,11 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1381,7 +1404,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1393,17 +1416,17 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
&dig_port->hdcp_port_data);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -2121,10 +2144,10 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
- dev_priv->hdcp_master->mei_dev = mei_kdev;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
}
@@ -2135,9 +2158,9 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = NULL;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = NULL;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
static const struct component_ops i915_hdcp_component_ops = {
@@ -2228,19 +2251,19 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
if (!is_hdcp2_supported(dev_priv))
return;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
- dev_priv->hdcp_comp_added = true;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = true;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
ret);
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
}
@@ -2453,14 +2476,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
new file mode 100644
index 000000000000..2a3733e8966c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_REGS_H__
+#define __INTEL_HDCP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS REG_BIT(7)
+#define HDCP_FUSE_ERROR REG_BIT(6)
+#define HDCP_FUSE_DONE REG_BIT(5)
+#define HDCP_KEY_LOAD_STATUS REG_BIT(1)
+#define HDCP_KEY_LOAD_DONE REG_BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT REG_BIT(31)
+#define HDCP_TRANSB_REP_PRESENT REG_BIT(30)
+#define HDCP_TRANSC_REP_PRESENT REG_BIT(29)
+#define HDCP_TRANSD_REP_PRESENT REG_BIT(28)
+#define HDCP_DDIB_REP_PRESENT REG_BIT(30)
+#define HDCP_DDIA_REP_PRESENT REG_BIT(29)
+#define HDCP_DDIC_REP_PRESENT REG_BIT(28)
+#define HDCP_DDID_REP_PRESENT REG_BIT(27)
+#define HDCP_DDIF_REP_PRESENT REG_BIT(26)
+#define HDCP_DDIE_REP_PRESENT REG_BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY REG_BIT(16)
+#define HDCP_SHA1_READY REG_BIT(17)
+#define HDCP_SHA1_COMPLETE REG_BIT(18)
+#define HDCP_SHA1_V_MATCH REG_BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + (x))
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
+#define HDCP_CONF_CAPTURE_AN REG_BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
+#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28)
+#define HDCP_STATUS_AUTH REG_BIT(21)
+#define HDCP_STATUS_ENC REG_BIT(20)
+#define HDCP_STATUS_RI_MATCH REG_BIT(19)
+#define HDCP_STATUS_R0_READY REG_BIT(18)
+#define HDCP_STATUS_AN_READY REG_BIT(17)
+#define HDCP_STATUS_CIPHER REG_BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
+#define AUTH_LINK_AUTHENTICATED REG_BIT(31)
+#define AUTH_LINK_TYPE REG_BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
+#define AUTH_CLR_KEYS REG_BIT(18)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
+#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
+#define LINK_TYPE_STATUS REG_BIT(22)
+#define LINK_AUTH_STATUS REG_BIT(21)
+#define LINK_ENCRYPTION_STATUS REG_BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
+
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
+#define STREAM_TYPE_STATUS REG_BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE REG_BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
+#endif /* __INTEL_HDCP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ebd91aa69dd2..7816b2a33fee 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -50,6 +50,7 @@
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
@@ -1891,7 +1892,7 @@ int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
* 1.5x for 12bpc
* 1.25x for 10bpc
*/
- return clock * bpc / 8;
+ return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
@@ -2001,6 +2002,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock *= 2;
}
+ /*
+ * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be
+ * enumerated only if FRL is supported. Current platforms do not support
+ * FRL so prune the higher resolution modes that require doctclock more
+ * than 600MHz.
+ */
+ if (clock > 600000)
+ return MODE_CLOCK_HIGH;
+
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 5f8b4f481cff..f7a2f485b177 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -119,13 +119,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -140,7 +140,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
enum hpd_pin pin, bool long_hpd)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -148,7 +148,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -191,7 +191,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
drm_info(&dev_priv->drm,
@@ -199,7 +199,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -209,7 +209,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -218,7 +218,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
- hotplug.reenable_work.work);
+ display.hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -233,7 +233,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
@@ -245,8 +245,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
}
intel_hpd_irq_setup(dev_priv);
@@ -297,16 +297,16 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+ container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->hotplug.long_port_mask;
- dev_priv->hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->hotplug.short_port_mask;
- dev_priv->hotplug.short_port_mask = 0;
+ long_port_mask = dev_priv->display.hotplug.long_port_mask;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->display.hotplug.short_port_mask;
+ dev_priv->display.hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.event_bits |= old_bits;
+ dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -353,10 +353,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+ i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
spin_unlock_irq(&i915->irq_lock);
- queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+ queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
}
/*
@@ -366,7 +366,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.hotplug_work.work);
+ display.hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -379,10 +379,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
- hpd_event_bits = dev_priv->hotplug.event_bits;
- dev_priv->hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->hotplug.retry_bits;
- dev_priv->hotplug.retry_bits = 0;
+ hpd_event_bits = dev_priv->display.hotplug.event_bits;
+ dev_priv->display.hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
+ dev_priv->display.hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -435,10 +435,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
retry &= ~changed;
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.retry_bits |= retry;
+ dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -502,10 +502,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.long_port_mask |= BIT(port);
+ dev_priv->display.hotplug.long_port_mask |= BIT(port);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.short_port_mask |= BIT(port);
+ dev_priv->display.hotplug.short_port_mask |= BIT(port);
}
}
@@ -516,7 +516,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -529,7 +529,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
continue;
}
- if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -540,13 +540,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->hotplug.event_bits |= BIT(pin);
+ dev_priv->display.hotplug.event_bits |= BIT(pin);
long_hpd = true;
queue_hp = true;
}
if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->hotplug.event_bits &= ~BIT(pin);
+ dev_priv->display.hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -567,9 +567,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+ queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
/**
@@ -594,8 +594,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
return;
for_each_hpd_pin(i) {
- dev_priv->hotplug.stats[i].count = 0;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[i].count = 0;
+ dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
}
/*
@@ -611,7 +611,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.poll_init_work);
+ display.hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -619,7 +619,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+ enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -672,7 +672,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
!INTEL_DISPLAY_ENABLED(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -680,7 +680,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
/**
@@ -707,17 +707,17 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+ INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
}
@@ -728,17 +728,17 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.long_port_mask = 0;
- dev_priv->hotplug.short_port_mask = 0;
- dev_priv->hotplug.event_bits = 0;
- dev_priv->hotplug.retry_bits = 0;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ dev_priv->display.hotplug.short_port_mask = 0;
+ dev_priv->display.hotplug.event_bits = 0;
+ dev_priv->display.hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
- cancel_work_sync(&dev_priv->hotplug.poll_init_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+ cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@@ -749,8 +749,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return false;
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
ret = true;
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -764,6 +764,6 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 4970bf146c4a..dca6003ccac8 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -73,8 +73,9 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
+#include "intel_pci_config.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL)
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
@@ -96,13 +97,13 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq;
+ rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
- rsc[1].start = pci_resource_start(pdev, 0) +
+ rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE;
- rsc[1].end = pci_resource_start(pdev, 0) +
+ rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
@@ -148,7 +149,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->audio.lpe.platdev);
+ platform_device_unregister(dev_priv->display.audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,7 +168,7 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->audio.lpe.irq;
+ int irq = dev_priv->display.audio.lpe.irq;
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
@@ -204,15 +205,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
{
int ret;
- dev_priv->audio.lpe.irq = irq_alloc_desc(0);
- if (dev_priv->audio.lpe.irq < 0) {
+ dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
+ if (dev_priv->display.audio.lpe.irq < 0) {
drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->audio.lpe.irq);
- ret = dev_priv->audio.lpe.irq;
+ dev_priv->display.audio.lpe.irq);
+ ret = dev_priv->display.audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
ret = lpe_audio_irq_init(dev_priv);
@@ -223,10 +224,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
goto err_free_irq;
}
- dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+ dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
- if (IS_ERR(dev_priv->audio.lpe.platdev)) {
- ret = PTR_ERR(dev_priv->audio.lpe.platdev);
+ if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
+ ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
drm_err(&dev_priv->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
@@ -241,10 +242,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
err_free_irq:
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
err:
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
return ret;
}
@@ -262,7 +263,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
if (!HAS_LPE_AUDIO(dev_priv))
return;
- ret = generic_handle_irq(dev_priv->audio.lpe.irq);
+ ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
if (ret)
drm_err_ratelimited(&dev_priv->drm,
"error handling LPE audio irq: %d\n", ret);
@@ -303,10 +304,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
}
/**
@@ -333,7 +334,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (!HAS_LPE_AUDIO(dev_priv))
return;
- pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev);
+ pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@@ -361,7 +362,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 730480ac3300..9aa38e8141b5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -837,12 +837,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->vbt.int_lvds_support) {
+ if (!dev_priv->display.vbt.int_lvds_support) {
drm_dbg_kms(&dev_priv->drm,
"Internal LVDS support disabled by VBT\n");
return;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index f0e04d3904c6..cbfabd58b75a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -23,6 +23,7 @@
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
#include "intel_pm.h"
+#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
@@ -30,11 +31,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct intel_encoder *encoder;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -70,7 +71,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
- i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
+ i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -415,9 +416,9 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -535,7 +536,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a91586d77cb6..0fdcf2e6d57f 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -15,8 +15,8 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
/*
* Cross check the actual hw state with our own modeset state tracking (and its
@@ -94,10 +94,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
/*
* FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
+ * Yell if the encoder disagrees. Allow for slight
+ * rounding differences.
*/
- drm_WARN(&dev_priv->drm,
- !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 1c0c745c142d..caa07ef34f21 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -252,7 +252,7 @@ struct opregion_asle_ext {
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = i915->opregion.swsci;
+ struct opregion_swsci *swsci = i915->display.opregion.swsci;
u32 main_function, sub_function;
if (!swsci)
@@ -265,11 +265,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((i915->opregion.swsci_sbcb_sub_functions &
+ if ((i915->display.opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((i915->opregion.swsci_gbda_sub_functions &
+ if ((i915->display.opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@@ -280,7 +280,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
- struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@@ -462,7 +462,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
struct drm_device *dev = &dev_priv->drm;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -586,8 +586,8 @@ static void asle_work(struct work_struct *work)
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
- container_of(opregion, struct drm_i915_private, opregion);
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ container_of(opregion, struct drm_i915_private, display.opregion);
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -635,8 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- if (dev_priv->opregion.asle)
- schedule_work(&dev_priv->opregion.asle_work);
+ if (dev_priv->display.opregion.asle)
+ schedule_work(&dev_priv->display.opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -692,7 +692,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -731,7 +731,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -761,7 +761,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -839,7 +839,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->params.vbt_firmware;
int ret;
@@ -879,7 +879,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@@ -1106,7 +1106,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
const void *in_edid;
const struct edid *edid;
struct edid *new_edid;
@@ -1141,7 +1141,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
struct opregion_header *header = opregion->header;
if (!header || header->over.major < 2 ||
@@ -1153,7 +1153,7 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1169,7 +1169,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1200,7 +1200,7 @@ void intel_opregion_resume(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1210,7 +1210,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
- cancel_work_sync(&i915->opregion.asle_work);
+ cancel_work_sync(&i915->display.opregion.asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@@ -1218,7 +1218,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 79ed8bd04a07..c12bdca8da9b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -211,9 +211,9 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D,
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -487,7 +487,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
void intel_overlay_reset(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
if (!overlay)
return;
@@ -1113,7 +1113,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1273,7 +1273,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1416,7 +1416,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->overlay = overlay;
+ dev_priv->display.overlay = overlay;
drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
@@ -1428,7 +1428,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->overlay);
+ overlay = fetch_and_zero(&dev_priv->display.overlay);
if (!overlay)
return;
@@ -1457,7 +1457,7 @@ struct intel_overlay_error_state {
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
struct intel_overlay_error_state *error;
if (!overlay || !overlay->active)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 237a40623dd7..a3a3f9fe4342 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -37,13 +37,14 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_panel.h"
+#include "intel_quirks.h"
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
if (i915->params.panel_use_ssc >= 0)
return i915->params.panel_use_ssc != 0;
- return i915->vbt.lvds_use_ssc
- && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
+ return i915->display.vbt.lvds_use_ssc &&
+ !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
@@ -81,15 +82,14 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
mode->clock != preferred_mode->clock;
}
-static bool is_alt_vrr_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode)
+static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
{
return drm_mode_match(mode, preferred_mode,
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS) &&
mode->hdisplay == preferred_mode->hdisplay &&
- mode->vdisplay == preferred_mode->vdisplay &&
- mode->clock != preferred_mode->clock;
+ mode->vdisplay == preferred_mode->vdisplay;
}
const struct drm_display_mode *
@@ -114,6 +114,21 @@ intel_panel_downclock_mode(struct intel_connector *connector,
return best_mode;
}
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode;
+
+ /* pick the fixed_mode that has the highest clock */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (fixed_mode->clock > best_mode->clock)
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
int intel_panel_get_modes(struct intel_connector *connector)
{
const struct drm_display_mode *fixed_mode;
@@ -172,19 +187,7 @@ int intel_panel_compute_config(struct intel_connector *connector,
return 0;
}
-static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode,
- bool has_vrr)
-{
- /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */
- if (has_vrr)
- return is_alt_vrr_mode(mode, preferred_mode);
- else
- return is_alt_drrs_mode(mode, preferred_mode);
-}
-
-static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector,
- bool has_vrr)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *preferred_mode =
@@ -192,7 +195,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr))
+ if (!is_alt_fixed_mode(mode, preferred_mode))
continue;
drm_dbg_kms(&dev_priv->drm,
@@ -255,7 +258,7 @@ void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
{
intel_panel_add_edid_preferred_mode(connector);
if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
- intel_panel_add_edid_alt_fixed_modes(connector, has_vrr);
+ intel_panel_add_edid_alt_fixed_modes(connector);
intel_panel_destroy_probed_modes(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b087c0c3cc6d..eff3ffd3d082 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -31,6 +31,9 @@ intel_panel_fixed_mode(struct intel_connector *connector,
const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode);
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
int intel_panel_get_modes(struct intel_connector *connector);
enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9934c8a9e240..a66097cdc1e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -167,6 +167,15 @@ static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
}
}
+int lpt_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct iclkip_params p;
+
+ lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
+
+ return lpt_iclkip_freq(&p);
+}
+
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
@@ -179,6 +188,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
lpt_disable_iclkip(dev_priv);
lpt_compute_iclkip(&p, clock);
+ drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
@@ -514,7 +524,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->vbt.display_clock_mode;
+ has_ck505 = dev_priv->display.vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
@@ -522,7 +532,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -654,7 +664,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- BUG_ON(val != final);
+ drm_WARN_ON(&dev_priv->drm, val != final);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index 12ab2c75a800..9bcf56629f24 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -14,6 +14,7 @@ struct intel_crtc_state;
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+int lpt_iclkip(const struct intel_crtc_state *crtc_state);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index d10f27d0b7b0..76be796df255 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -311,7 +311,7 @@ void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+ dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 1b21a341962f..21944f5bf3a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -12,6 +12,7 @@
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_pps.h"
+#include "intel_quirks.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -28,7 +29,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->pps_mutex);
+ mutex_lock(&dev_priv->display.pps.mutex);
return wakeref;
}
@@ -38,7 +39,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- mutex_unlock(&dev_priv->pps_mutex);
+ mutex_unlock(&dev_priv->display.pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -163,7 +164,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -212,7 +213,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
int backlight_controller = connector->panel.vbt.backlight.controller;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -282,7 +283,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
@@ -407,7 +408,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -420,7 +421,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -463,7 +464,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_verify_state(intel_dp);
@@ -556,7 +557,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
@@ -580,7 +581,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -657,7 +658,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
@@ -748,7 +749,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -771,7 +772,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -832,7 +833,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -991,7 +992,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1021,7 +1022,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
@@ -1064,7 +1065,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1176,7 +1177,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1202,7 +1203,7 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
drm_dbg_kms(&dev_priv->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
@@ -1223,7 +1224,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1246,7 +1247,7 @@ static void pps_init_delays(struct intel_dp *intel_dp)
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1312,7 +1313,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1487,11 +1488,11 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
void intel_pps_setup(struct drm_i915_private *i915)
{
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->pps_mmio_base = PCH_PPS_BASE;
+ i915->display.pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->pps_mmio_base = VLV_PPS_BASE;
+ i915->display.pps.mmio_base = VLV_PPS_BASE;
else
- i915->pps_mmio_base = PPS_BASE;
+ i915->display.pps.mmio_base = PPS_BASE;
}
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index e6a870641cd2..9def8d9fade6 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -706,7 +706,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -805,13 +805,14 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
- /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
- req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
+ /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
+ req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
if ((hblank_ns - req_ns) > 100)
return true;
- if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ /* Not supported <13 / Wa_22012279113:adl-p */
+ if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1721,8 +1722,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
new_plane_state, i) {
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
.x2 = INT_MAX };
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
@@ -1767,22 +1766,18 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
}
- drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ src = drm_plane_state_src(&new_plane_state->uapi);
+ drm_rect_fp_to_int(&src, &src);
- drm_atomic_helper_damage_iter_init(&iter,
- &old_plane_state->uapi,
- &new_plane_state->uapi);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip,
- &crtc_state->pipe_src);
- }
-
- if (damaged_area.y1 == -1)
+ if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
+ &new_plane_state->uapi, &damaged_area))
continue;
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
}
@@ -1863,7 +1858,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -1871,7 +1868,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
- crtc_state->uapi.encoder_mask) {
+ old_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
bool needs_to_disable = false;
@@ -1884,10 +1881,10 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - All planes will go inactive
* - Changing between PSR versions
*/
- needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
- needs_to_disable |= !crtc_state->has_psr;
- needs_to_disable |= !crtc_state->active_planes;
- needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
+ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
+ needs_to_disable |= !new_crtc_state->has_psr;
+ needs_to_disable |= !new_crtc_state->active_planes;
+ needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
if (psr->enabled && needs_to_disable)
intel_psr_disable_locked(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index c8488f5ebd04..6e48d3bcdfec 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,12 +9,17 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
+static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ i915->display.quirks.mask |= BIT(quirk);
+}
+
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
@@ -24,14 +29,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
*/
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
drm_info(&i915->drm, "applying backlight present quirk\n");
}
@@ -40,7 +45,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
*/
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
@@ -50,13 +55,13 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
*/
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK;
+ intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
}
@@ -191,6 +196,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
@@ -213,3 +221,8 @@ void intel_init_quirks(struct drm_i915_private *i915)
intel_dmi_quirks[i].hook(i915);
}
}
+
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ return i915->display.quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index b0fcff142a56..10a4d163149f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -6,8 +6,20 @@
#ifndef __INTEL_QUIRKS_H__
#define __INTEL_QUIRKS_H__
+#include <linux/types.h>
+
struct drm_i915_private;
-void intel_init_quirks(struct drm_i915_private *dev_priv);
+enum intel_quirk_id {
+ QUIRK_BACKLIGHT_PRESENT,
+ QUIRK_INCREASE_DDI_DISABLED_TIME,
+ QUIRK_INCREASE_T12_DELAY,
+ QUIRK_INVERT_BRIGHTNESS,
+ QUIRK_LVDS_SSC_DISABLE,
+ QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+};
+
+void intel_init_quirks(struct drm_i915_private *i915);
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 19122bc6d2ab..f5b744bef18f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2016,7 +2016,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
- dev_priv->vbt.crt_ddc_pin));
+ dev_priv->display.vbt.crt_ddc_pin));
}
static enum drm_connector_status
@@ -2581,9 +2581,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2599,9 +2599,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
u8 pin;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2639,11 +2639,11 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
- my_mapping = &dev_priv->vbt.sdvo_mappings[0];
- other_mapping = &dev_priv->vbt.sdvo_mappings[1];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->vbt.sdvo_mappings[1];
- other_mapping = &dev_priv->vbt.sdvo_mappings[0];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 0bdbedc67d7d..937cefd6f78f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -518,6 +518,1086 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
};
/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_25200 = {
+ .clock = 25200,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27027 = {
+ .clock = 27027,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_28320 = {
+ .clock = 28320,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_30240 = {
+ .clock = 30240,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_31500 = {
+ .clock = 31500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_36000 = {
+ .clock = 36000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_40000 = {
+ .clock = 40000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_49500 = {
+ .clock = 49500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_50000 = {
+ .clock = 50000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_57284 = {
+ .clock = 57284,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_58000 = {
+ .clock = 58000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_65000 = {
+ .clock = 65000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_71000 = {
+ .clock = 71000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74176 = {
+ .clock = 74176,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_75000 = {
+ .clock = 75000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_78750 = {
+ .clock = 78750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_85500 = {
+ .clock = 85500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_88750 = {
+ .clock = 88750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_106500 = {
+ .clock = 106500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_108000 = {
+ .clock = 108000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_115500 = {
+ .clock = 115500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_119000 = {
+ .clock = 119000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_135000 = {
+ .clock = 135000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_138500 = {
+ .clock = 138500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_147160 = {
+ .clock = 147160,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148352 = {
+ .clock = 148352,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_154000 = {
+ .clock = 154000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_162000 = {
+ .clock = 162000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_209800 = {
+ .clock = 209800,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ .clock = 262750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ .clock = 268500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_296703 = {
+ .clock = 296703,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ .clock = 241500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ .clock = 497750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_592000 = {
+ .clock = 592000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_593407 = {
+ .clock = 593407,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_297 = {
.clock = 297000,
.ref_control =
@@ -584,6 +1664,42 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_148_5,
&dg2_hdmi_297,
&dg2_hdmi_594,
+ &dg2_hdmi_25200,
+ &dg2_hdmi_27027,
+ &dg2_hdmi_28320,
+ &dg2_hdmi_30240,
+ &dg2_hdmi_31500,
+ &dg2_hdmi_36000,
+ &dg2_hdmi_40000,
+ &dg2_hdmi_49500,
+ &dg2_hdmi_50000,
+ &dg2_hdmi_57284,
+ &dg2_hdmi_58000,
+ &dg2_hdmi_65000,
+ &dg2_hdmi_71000,
+ &dg2_hdmi_74176,
+ &dg2_hdmi_75000,
+ &dg2_hdmi_78750,
+ &dg2_hdmi_85500,
+ &dg2_hdmi_88750,
+ &dg2_hdmi_106500,
+ &dg2_hdmi_108000,
+ &dg2_hdmi_115500,
+ &dg2_hdmi_119000,
+ &dg2_hdmi_135000,
+ &dg2_hdmi_138500,
+ &dg2_hdmi_147160,
+ &dg2_hdmi_148352,
+ &dg2_hdmi_154000,
+ &dg2_hdmi_162000,
+ &dg2_hdmi_209800,
+ &dg2_hdmi_241500,
+ &dg2_hdmi_262750,
+ &dg2_hdmi_268500,
+ &dg2_hdmi_296703,
+ &dg2_hdmi_497750,
+ &dg2_hdmi_592000,
+ &dg2_hdmi_593407,
NULL,
};
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 2713faad0625..7649c50b5445 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -39,7 +39,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include "i915_drv.h"
@@ -1355,8 +1354,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
int ret;
if (g4x_fb_scalable(plane_state->hw.fb)) {
@@ -1426,8 +1425,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return ret;
ret = intel_atomic_plane_check_clipping(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6773840f6cc7..e5af955b5600 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -246,7 +246,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
@@ -279,7 +279,7 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9379f3463344..dcf89d701f0f 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -39,6 +39,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
@@ -982,10 +983,10 @@ intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
static void
intel_tv_mode_to_mode(struct drm_display_mode *mode,
- const struct tv_mode *tv_mode)
+ const struct tv_mode *tv_mode,
+ int clock)
{
- mode->clock = tv_mode->clock /
- (tv_mode->oversample >> !tv_mode->progressive);
+ mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive);
/*
* tv_mode horizontal timings:
@@ -1143,7 +1144,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
xsize = tmp >> 16;
ysize = tmp & 0xffff;
- intel_tv_mode_to_mode(&mode, &tv_mode);
+ intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&mode));
@@ -1184,6 +1185,9 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(pipe_config->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1192,6 +1196,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
+ int ret;
if (!tv_mode)
return -EINVAL;
@@ -1206,7 +1211,13 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = tv_mode->clock;
- intel_tv_mode_to_mode(adjusted_mode, tv_mode);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ pipe_config->clock_set = true;
+
+ intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
@@ -1804,7 +1815,7 @@ intel_tv_get_modes(struct drm_connector *connector)
* about the actual timings of the mode. We
* do ignore the margins though.
*/
- intel_tv_mode_to_mode(mode, tv_mode);
+ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
if (count == 0) {
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 509b0a419c20..a9f44abfc9fc 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -76,6 +76,20 @@ struct bdb_header {
} __packed;
/*
+ * BDB version number dependencies are documented as:
+ *
+ * <start>+
+ * indicates the field was introduced in version <start>
+ * and is still valid
+ *
+ * <start>-<end>
+ * indicates the field was introduced in version <start>
+ * and obsoleted in version <end>+1.
+ *
+ * ??? indicates the specific version number is unknown
+ */
+
+/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
@@ -144,12 +158,12 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rotate_180:1; /* 181 */
+ u8 rotate_180:1; /* 181+ */
u8 fdi_rx_polarity_inverted:1;
- u8 vbios_extended_mode:1; /* 160 */
- u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
- u8 panel_best_fit_timing:1; /* 160 */
- u8 ignore_strap_state:1; /* 160 */
+ u8 vbios_extended_mode:1; /* 160+ */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */
+ u8 panel_best_fit_timing:1; /* 160+ */
+ u8 ignore_strap_state:1; /* 160+ */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -164,11 +178,11 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
/* bits 6 */
- u8 tc_hpd_retry_timeout:7; /* 242 */
+ u8 tc_hpd_retry_timeout:7; /* 242+ */
u8 rsvd12:1;
/* bits 7 */
- u8 afc_startup_config:2;/* 249 */
+ u8 afc_startup_config:2; /* 249+ */
u8 rsvd13:6;
} __packed;
@@ -183,6 +197,15 @@ struct bdb_general_features {
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
/* Device handle */
+#define DEVICE_HANDLE_CRT 0x0001
+#define DEVICE_HANDLE_EFP1 0x0004
+#define DEVICE_HANDLE_EFP2 0x0040
+#define DEVICE_HANDLE_EFP3 0x0020
+#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */
+#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */
+#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */
+#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */
+#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */
#define DEVICE_HANDLE_LFP1 0x0008
#define DEVICE_HANDLE_LFP2 0x0080
@@ -275,27 +298,27 @@ struct bdb_general_features {
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11 /* 193 */
-#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_DPE 11 /* 193+ */
+#define DVO_PORT_HDMIE 12 /* 193+ */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
-#define DVO_PORT_DPG 15 /* 217 */
-#define DVO_PORT_HDMIG 16 /* 217 */
-#define DVO_PORT_DPH 17 /* 217 */
-#define DVO_PORT_HDMIH 18 /* 217 */
-#define DVO_PORT_DPI 19 /* 217 */
-#define DVO_PORT_HDMII 20 /* 217 */
-#define DVO_PORT_MIPIA 21 /* 171 */
-#define DVO_PORT_MIPIB 22 /* 171 */
-#define DVO_PORT_MIPIC 23 /* 171 */
-#define DVO_PORT_MIPID 24 /* 171 */
-
-#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
-#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
-#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
-#define HDMI_MAX_DATA_RATE_594 3 /* 249 */
-#define HDMI_MAX_DATA_RATE_340 4 /* 249 */
-#define HDMI_MAX_DATA_RATE_300 5 /* 249 */
+#define DVO_PORT_DPG 15 /* 217+ */
+#define DVO_PORT_HDMIG 16 /* 217+ */
+#define DVO_PORT_DPH 17 /* 217+ */
+#define DVO_PORT_HDMIH 18 /* 217+ */
+#define DVO_PORT_DPI 19 /* 217+ */
+#define DVO_PORT_HDMII 20 /* 217+ */
+#define DVO_PORT_MIPIA 21 /* 171+ */
+#define DVO_PORT_MIPIB 22 /* 171+ */
+#define DVO_PORT_MIPIC 23 /* 171+ */
+#define DVO_PORT_MIPID 24 /* 171+ */
+
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */
+#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */
+#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */
+#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
@@ -362,10 +385,10 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
- * space for the full structure below, and initialize the tail not actually
- * present in VBT to zeros. Accessing those fields is fine, as long as the
- * default zero is taken into account, again according to the BDB version.
+ * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * reserve space for the full structure below, and initialize the tail not
+ * actually present in VBT to zeros. Accessing those fields is fine, as long as
+ * the default zero is taken into account, again according to the BDB version.
*
* BDB versions 155 and below are considered legacy, and version 155 seems to be
* a baseline for some of the VBT documentation. When adding new fields, please
@@ -379,20 +402,30 @@ struct child_device_config {
u8 device_id[10]; /* ascii string */
struct {
u8 i2c_speed;
- u8 dp_onboard_redriver; /* 158 */
- u8 dp_ondock_redriver; /* 158 */
- u8 hdmi_level_shifter_value:5; /* 169 */
- u8 hdmi_max_data_rate:3; /* 204 */
- u16 dtd_buf_ptr; /* 161 */
- u8 edidless_efp:1; /* 161 */
- u8 compression_enable:1; /* 198 */
- u8 compression_method_cps:1; /* 198 */
- u8 ganged_edp:1; /* 202 */
- u8 reserved0:4;
- u8 compression_structure_index:4; /* 198 */
- u8 reserved1:4;
- u8 slave_port; /* 202 */
- u8 reserved2;
+ u8 dp_onboard_redriver_preemph:3; /* 158+ */
+ u8 dp_onboard_redriver_vswing:3; /* 158+ */
+ u8 dp_onboard_redriver_present:1; /* 158+ */
+ u8 reserved0:1;
+ u8 dp_ondock_redriver_preemph:3; /* 158+ */
+ u8 dp_ondock_redriver_vswing:3; /* 158+ */
+ u8 dp_ondock_redriver_present:1; /* 158+ */
+ u8 reserved1:1;
+ u8 hdmi_level_shifter_value:5; /* 158+ */
+ u8 hdmi_max_data_rate:3; /* 204+ */
+ u16 dtd_buf_ptr; /* 161+ */
+ u8 edidless_efp:1; /* 161+ */
+ u8 compression_enable:1; /* 198+ */
+ u8 compression_method_cps:1; /* 198+ */
+ u8 ganged_edp:1; /* 202+ */
+ u8 lttpr_non_transparent:1; /* 235+ */
+ u8 disable_compression_for_ext_disp:1; /* 251+ */
+ u8 reserved2:2;
+ u8 compression_structure_index:4; /* 198+ */
+ u8 reserved3:4;
+ u8 hdmi_max_frl_rate:4; /* 237+ */
+ u8 hdmi_max_frl_rate_valid:1; /* 237+ */
+ u8 reserved4:3; /* 237+ */
+ u8 reserved5;
} __packed;
} __packed;
@@ -412,16 +445,16 @@ struct child_device_config {
u8 ddc2_pin;
} __packed;
struct {
- u8 efp_routed:1; /* 158 */
- u8 lane_reversal:1; /* 184 */
- u8 lspcon:1; /* 192 */
- u8 iboost:1; /* 196 */
- u8 hpd_invert:1; /* 196 */
- u8 use_vbt_vswing:1; /* 218 */
- u8 flag_reserved:2;
- u8 hdmi_support:1; /* 158 */
- u8 dp_support:1; /* 158 */
- u8 tmds_support:1; /* 158 */
+ u8 efp_routed:1; /* 158+ */
+ u8 lane_reversal:1; /* 184+ */
+ u8 lspcon:1; /* 192+ */
+ u8 iboost:1; /* 196+ */
+ u8 hpd_invert:1; /* 196+ */
+ u8 use_vbt_vswing:1; /* 218+ */
+ u8 dp_max_lane_count:2; /* 244+ */
+ u8 hdmi_support:1; /* 158+ */
+ u8 dp_support:1; /* 158+ */
+ u8 tmds_support:1; /* 158+ */
u8 support_reserved:5;
u8 aux_channel;
u8 dongle_detect;
@@ -429,7 +462,7 @@ struct child_device_config {
} __packed;
u8 pipe_cap:2;
- u8 sdvo_stall:1; /* 158 */
+ u8 sdvo_stall:1; /* 158+ */
u8 hpd_status:2;
u8 integrated_encoder:1;
u8 capabilities_reserved:2;
@@ -437,21 +470,21 @@ struct child_device_config {
union {
u8 dvo2_wiring;
- u8 mipi_bridge_type; /* 171 */
+ u8 mipi_bridge_type; /* 171+ */
} __packed;
u16 extended_type;
u8 dvo_function;
- u8 dp_usb_type_c:1; /* 195 */
- u8 tbt:1; /* 209 */
- u8 flags2_reserved:2; /* 195 */
- u8 dp_port_trace_length:4; /* 209 */
- u8 dp_gpio_index; /* 195 */
- u16 dp_gpio_pin_num; /* 195 */
- u8 dp_iboost_level:4; /* 196 */
- u8 hdmi_iboost_level:4; /* 196 */
- u8 dp_max_link_rate:3; /* 216/230 GLK+ */
- u8 dp_max_link_rate_reserved:5; /* 216/230 */
+ u8 dp_usb_type_c:1; /* 195+ */
+ u8 tbt:1; /* 209+ */
+ u8 flags2_reserved:2; /* 195+ */
+ u8 dp_port_trace_length:4; /* 209+ */
+ u8 dp_gpio_index; /* 195+ */
+ u16 dp_gpio_pin_num; /* 195+ */
+ u8 dp_iboost_level:4; /* 196+ */
+ u8 hdmi_iboost_level:4; /* 196+ */
+ u8 dp_max_link_rate:3; /* 216+ */
+ u8 dp_max_link_rate_reserved:5; /* 216+ */
} __packed;
struct bdb_general_definitions {
@@ -459,7 +492,7 @@ struct bdb_general_definitions {
u8 crt_ddc_gmbus_pin;
/* DPMS bits */
- u8 dpms_acpi:1;
+ u8 dpms_non_acpi:1;
u8 skip_boot_crt_detect:1;
u8 dpms_aim:1;
u8 rsvd1:5; /* finish byte */
@@ -488,25 +521,25 @@ struct bdb_general_definitions {
struct psr_table {
/* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
+ u8 full_link:1; /* 165+ */
+ u8 require_aux_to_wakeup:1; /* 165+ */
u8 feature_bits_rsvd:6;
/* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
+ u8 idle_frames:4; /* 165+ */
+ u8 lines_to_wait:3; /* 165+ */
u8 wait_times_rsvd:1;
/* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+ u16 tp1_wakeup_time; /* 165+ */
+ u16 tp2_tp3_wakeup_time; /* 165+ */
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
/* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
+ u32 psr2_tp2_tp3_wakeup_time; /* 226+ */
} __packed;
/*
@@ -519,9 +552,10 @@ struct bdb_psr {
#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
+ /* Driver bits */
u8 boot_dev_algorithm:1;
- u8 block_display_switch:1;
- u8 allow_display_switch:1;
+ u8 allow_display_switch_dvd:1;
+ u8 allow_display_switch_dos:1;
u8 hotplug_dvo:1;
u8 dual_view_zoom:1;
u8 int15h_hook:1;
@@ -533,6 +567,7 @@ struct bdb_driver_features {
u8 boot_mode_bpp;
u8 boot_mode_refresh;
+ /* Extended Driver Bits 1 */
u16 enable_lfp_primary:1;
u16 selective_mode_pruning:1;
u16 dual_frequency:1;
@@ -548,29 +583,40 @@ struct bdb_driver_features {
u16 tv_hotplug:1;
u16 hdmi_config:2;
- u8 static_display:1;
- u8 reserved2:7;
+ /* Driver Flags 1 */
+ u8 static_display:1; /* 163+ */
+ u8 embedded_platform:1; /* 163+ */
+ u8 display_subsystem_enable:1; /* 163+ */
+ u8 reserved0:5;
+
u16 legacy_crt_max_x;
u16 legacy_crt_max_y;
u8 legacy_crt_max_refresh;
- u8 hdmi_termination;
- u8 custom_vbt_version;
- /* Driver features data block */
- u16 rmpm_enabled:1;
- u16 s2ddt_enabled:1;
- u16 dpst_enabled:1;
- u16 bltclt_enabled:1;
- u16 adb_enabled:1;
- u16 drrs_enabled:1;
- u16 grs_enabled:1;
- u16 gpmt_enabled:1;
- u16 tbt_enabled:1;
- u16 psr_enabled:1;
- u16 ips_enabled:1;
- u16 reserved3:1;
- u16 dmrrs_enabled:1;
- u16 reserved4:2;
+ /* Extended Driver Bits 2 */
+ u8 hdmi_termination:1;
+ u8 cea861d_hdmi_support:1;
+ u8 self_refresh_enable:1;
+ u8 reserved1:5;
+
+ u8 custom_vbt_version; /* 155+ */
+
+ /* Driver Feature Flags */
+ u16 rmpm_enabled:1; /* 165+ */
+ u16 s2ddt_enabled:1; /* 165+ */
+ u16 dpst_enabled:1; /* 165-227 */
+ u16 bltclt_enabled:1; /* 165+ */
+ u16 adb_enabled:1; /* 165-227 */
+ u16 drrs_enabled:1; /* 165-227 */
+ u16 grs_enabled:1; /* 165+ */
+ u16 gpmt_enabled:1; /* 165+ */
+ u16 tbt_enabled:1; /* 165+ */
+ u16 psr_enabled:1; /* 165-227 */
+ u16 ips_enabled:1; /* 165+ */
+ u16 dpfs_enabled:1; /* 165+ */
+ u16 dmrrs_enabled:1; /* 174-227 */
+ u16 adt_enabled:1; /* ???-228 */
+ u16 hpd_wake:1; /* 201-240 */
u16 pc_feature_valid:1;
} __packed;
@@ -657,7 +703,7 @@ struct bdb_sdvo_panel_dtds {
struct edp_fast_link_params {
- u8 rate:4;
+ u8 rate:4; /* ???-223 */
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
@@ -690,18 +736,18 @@ struct bdb_edp {
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature; /* 162 */
- u16 edp_t3_optimization; /* 165 */
- u64 edp_vswing_preemph; /* 173 */
- u16 fast_link_training; /* 182 */
- u16 dpcd_600h_write_required; /* 185 */
- struct edp_pwm_delays pwm_delays[16]; /* 186 */
- u16 full_link_params_provided; /* 199 */
- struct edp_full_link_params full_link_params[16]; /* 199 */
- u16 apical_enable; /* 203 */
- struct edp_apical_params apical_params[16]; /* 203 */
- u16 edp_fast_link_training_rate[16]; /* 224 */
- u16 edp_max_port_link_rate[16]; /* 244 */
+ u16 edp_s3d_feature; /* 162+ */
+ u16 edp_t3_optimization; /* 165+ */
+ u64 edp_vswing_preemph; /* 173+ */
+ u16 fast_link_training; /* 182+ */
+ u16 dpcd_600h_write_required; /* 185+ */
+ struct edp_pwm_delays pwm_delays[16]; /* 186+ */
+ u16 full_link_params_provided; /* 199+ */
+ struct edp_full_link_params full_link_params[16]; /* 199+ */
+ u16 apical_enable; /* 203+ */
+ struct edp_apical_params apical_params[16]; /* 203+ */
+ u16 edp_fast_link_training_rate[16]; /* 224+ */
+ u16 edp_max_port_link_rate[16]; /* 244+ */
} __packed;
/*
@@ -710,14 +756,14 @@ struct bdb_edp {
struct bdb_lvds_options {
u8 panel_type;
- u8 panel_type2; /* 212 */
+ u8 panel_type2; /* 212+ */
/* LVDS capabilities, stored in a dword */
u8 pfit_mode:2;
u8 pfit_text_mode_enhanced:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_ratio_auto:1;
u8 pixel_dither:1;
- u8 lvds_edid:1;
+ u8 lvds_edid:1; /* ???-240 */
u8 rsvd2:1;
u8 rsvd4;
/* LVDS Panel channel bits stored here */
@@ -731,11 +777,11 @@ struct bdb_lvds_options {
/* LVDS panel type bits stored here */
u32 dps_panel_type_bits;
/* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
+ u32 blt_control_type_bits; /* ???-240 */
- u16 lcdvcc_s0_enable; /* 200 */
- u32 rotation; /* 228 */
- u32 position; /* 240 */
+ u16 lcdvcc_s0_enable; /* 200+ */
+ u32 rotation; /* 228+ */
+ u32 position; /* 240+ */
} __packed;
/*
@@ -756,7 +802,7 @@ struct lvds_lfp_data_ptr {
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries;
struct lvds_lfp_data_ptr ptr[16];
- struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
+ struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */
} __packed;
/*
@@ -808,20 +854,20 @@ struct lvds_lfp_panel_name {
} __packed;
struct lvds_lfp_black_border {
- u8 top; /* 227 */
- u8 bottom; /* 227 */
- u8 left; /* 238 */
- u8 right; /* 238 */
+ u8 top; /* 227+ */
+ u8 bottom; /* 227+ */
+ u8 left; /* 238+ */
+ u8 right; /* 238+ */
} __packed;
struct bdb_lvds_lfp_data_tail {
- struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
- u16 scaling_enable; /* 187 */
- u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
- u8 pixel_overlap_count[16]; /* 208 */
- struct lvds_lfp_black_border black_border[16]; /* 227 */
- u16 dual_lfp_port_sync_enable; /* 231 */
- u16 gpu_dithering_for_banding_artifacts; /* 245 */
+ struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */
+ u16 scaling_enable; /* 187+ */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */
+ u8 pixel_overlap_count[16]; /* 208+ */
+ struct lvds_lfp_black_border black_border[16]; /* 227+ */
+ u16 dual_lfp_port_sync_enable; /* 231+ */
+ u16 gpu_dithering_for_banding_artifacts; /* 245+ */
} __packed;
/*
@@ -836,7 +882,7 @@ struct lfp_backlight_data_entry {
u8 active_low_pwm:1;
u8 obsolete1:5;
u16 pwm_freq_hz;
- u8 min_brightness; /* Obsolete from 234+ */
+ u8 min_brightness; /* ???-233 */
u8 obsolete2;
u8 obsolete3;
} __packed;
@@ -859,7 +905,7 @@ struct lfp_brightness_level {
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* Obsolete from 234+ */
+ u8 level[16]; /* ???-233 */
struct lfp_backlight_control_method backlight_control[16];
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
@@ -874,8 +920,8 @@ struct lfp_power_features {
u8 reserved1:1;
u8 power_conservation_pref:3;
u8 reserved2:1;
- u8 lace_enabled_status:1;
- u8 lace_support:1;
+ u8 lace_enabled_status:1; /* 210+ */
+ u8 lace_support:1; /* 210+ */
u8 als_enable:1;
} __packed;
@@ -895,24 +941,24 @@ struct aggressiveness_profile2_entry {
} __packed;
struct bdb_lfp_power {
- struct lfp_power_features features;
+ struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
- u8 lace_aggressiveness_profile:3;
+ u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst;
- u16 psr;
- u16 drrs;
- u16 lace_support;
- u16 adt;
- u16 dmrrs;
- u16 adb;
- u16 lace_enabled_status;
- struct aggressiveness_profile_entry aggressiveness[16];
- u16 hobl; /* 232+ */
- u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 dpst; /* 228+ */
+ u16 psr; /* 228+ */
+ u16 drrs; /* 228+ */
+ u16 lace_support; /* 228+ */
+ u16 adt; /* 228+ */
+ u16 dmrrs; /* 228+ */
+ u16 adb; /* 228+ */
+ u16 lace_enabled_status; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ u16 hobl; /* 232+ */
+ u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
} __packed;
/*
@@ -922,10 +968,10 @@ struct bdb_lfp_power {
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */
- struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */
- u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 43e1bbc1e303..269f9792390d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -344,7 +344,7 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!INTEL_INFO(i915)->display.has_dsc)
+ if (!RUNTIME_INFO(i915)->has_dsc)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
@@ -597,6 +596,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_VER_MIN_SHIFT |
vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->dsc_version_minor == 2)
+ pps_val |= DSC_ALT_ICH_SEL;
if (vdsc_cfg->block_pred_enable)
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 04250a0fec3c..5eac99021875 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -142,11 +142,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* For XE_LPD+, we use guardband and pipeline override
* is deprecated.
*/
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * FIXME: Subtract Window2 delay from below value.
+ *
+ * Window2 specifies time required to program DSB (Window2) in
+ * number of scan lines. Assuming 0 for no DSB.
+ */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
- i915->window2_delay;
- else
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ } else {
/*
* FIXME: s/4/framestart_delay/ to get consistent
* earliest/latest points for register latching regardless
@@ -159,6 +164,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.pipeline_full =
min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ }
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index c11e15a93164..7cb713043408 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "i915_drv.h"
#include "intel_atomic_plane.h"
@@ -15,11 +14,11 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -1856,8 +1855,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int min_scale = DRM_PLANE_HELPER_NO_SCALING;
- int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_NO_SCALING;
+ int max_scale = DRM_PLANE_NO_SCALING;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1929,7 +1928,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id, enum plane_id plane_id)
{
- if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0)
+ if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0)
return false;
return plane_id == PLANE_PRIMARY;
@@ -1941,7 +1940,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id))
- return dev_priv->fbc[fbc_id];
+ return dev_priv->display.fbc[fbc_id];
else
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
new file mode 100644
index 000000000000..01b0932757ed
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -0,0 +1,3562 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_blend.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "skl_watermark.h"
+
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+
+static void skl_sagv_disable(struct drm_i915_private *i915);
+
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
+{
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (intel_uncore_read(&i915->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
+ }
+
+ return enabled_slices;
+}
+
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9;
+}
+
+static bool
+intel_has_sagv(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+}
+
+static u32
+intel_sagv_block_time(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14) {
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+
+ return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = snb_pcode_read(&i915->uncore,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ return 0;
+ }
+
+ return val;
+ } else if (DISPLAY_VER(i915) == 11) {
+ return 10;
+ } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ return 30;
+ } else {
+ return 0;
+ }
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+
+ i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->display.sagv.block_time_us))
+ i915->display.sagv.block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.block_time_us = 0;
+}
+
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+static void skl_sagv_enable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for SAGV when enabling */
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to enable SAGV\n");
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_ENABLED;
+}
+
+static void skl_sagv_disable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_DISABLED;
+}
+
+static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (!intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_disable(i915);
+}
+
+static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_enable(i915);
+}
+
+static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_pre_plane_update(state);
+ else
+ skl_sagv_pre_plane_update(state);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_post_plane_update(state);
+ else
+ skl_sagv_post_plane_update(state);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ int max_level = INT_MAX;
+
+ if (!intel_has_sagv(i915))
+ return false;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ int level;
+
+ /* Skip this plane if it's not enabled */
+ if (!wm->wm[0].enable)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(i915);
+ !wm->wm[level].enable; --level)
+ { }
+
+ /* Highest common enabled wm level for all planes */
+ max_level = min(level, max_level);
+ }
+
+ /* No enabled planes? */
+ if (max_level == INT_MAX)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * All enabled planes must have enabled a common wm level that
+ * can tolerate memory latencies higher than sagv_block_time_us
+ */
+ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (wm->wm[0].enable && !wm->sagv.wm0.enable)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(i915) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_can_enable_sagv(i915, new_bw_state) !=
+ intel_can_enable_sagv(i915, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_can_enable_sagv(i915, new_bw_state);
+ }
+
+ return 0;
+}
+
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
+static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->display.dbuf.size /
+ hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
+}
+
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
+}
+
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
+}
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+ enum dbuf_slice start_slice, end_slice;
+ u8 slice_mask = 0;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+
+ return hdisplay;
+}
+
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
+
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(i915, pipe) {
+ int weight = dbuf_state->weight[pipe];
+
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
+ continue;
+
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
+
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
+
+ if (new_dbuf_state->weight[pipe] == 0) {
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
+ goto out;
+ }
+
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
+
+ skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
+out:
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
+
+ return 0;
+}
+
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
+{
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ drm_WARN_ON(&i915->drm, ret);
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
+ if (entry->end)
+ entry->end++;
+}
+
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+ const enum pipe pipe,
+ const enum plane_id plane_id,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ u32 val;
+
+ /* Cursor doesn't support NV12/planar, so no extra calculation needed */
+ if (plane_id == PLANE_CURSOR) {
+ val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(ddb, val);
+ return;
+ }
+
+ val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
+
+ if (DISPLAY_VER(i915) >= 11)
+ return;
+
+ val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+}
+
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+ enum plane_id plane_id;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ wakeref = intel_display_power_get_if_enabled(i915, power_domain);
+ if (!wakeref)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(i915, pipe,
+ plane_id,
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
+
+ intel_display_power_put(i915, power_domain, wakeref);
+}
+
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
+}
+
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
+}
+
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_DG2(i915))
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) >= 13)
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 12)
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 11)
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
+}
+
+static bool
+use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static u64
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ u64 data_rate = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->rel_data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv.wm0;
+
+ return &wm->wm[level];
+}
+
+static const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
+/*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+static void
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(wm, 0, sizeof(*wm));
+}
+
+static void
+skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ memset(wm, 0, sizeof(*wm));
+ memset(uv_wm, 0, sizeof(*uv_wm));
+ }
+}
+
+static bool icl_need_wm1_wa(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ /*
+ * Wa_1408961008:icl, ehl
+ * Wa_14012656716:tgl, adl
+ * Underruns with WM1+ disabled
+ */
+ return DISPLAY_VER(i915) == 11 ||
+ (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+}
+
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 start, size;
+};
+
+static void
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 size, extra = 0;
+
+ if (data_rate) {
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate,
+ iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+ }
+
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
+}
+
+static int
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
+ struct skl_plane_ddb_iter iter;
+ enum plane_id plane_id;
+ u16 cursor_size;
+ u32 blocks;
+ int level;
+
+ /* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ iter.start = alloc->start;
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
+ return 0;
+
+ /* Allocate fixed number of blocks for cursor. */
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
+
+ /*
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
+ */
+ for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ drm_WARN_ON(&i915->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
+ blocks = U32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
+ break;
+ }
+ }
+
+ if (level < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ blocks, iter.size);
+ return -EINVAL;
+ }
+
+ /* avoid the WARN later when we don't allocate any extra DDB */
+ if (iter.data_rate == 0)
+ iter.size = 0;
+
+ /*
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
+ }
+ drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
+
+ if (icl_need_wm1_wa(i915, plane_id) &&
+ level == 1 && wm->wm[0].enable) {
+ wm->wm[level].blocks = wm->wm[0].blocks;
+ wm->wm[level].lines = wm->wm[0].lines;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
+ }
+ }
+
+ /*
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
+ }
+
+ return 0;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and cpp should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+ */
+static uint_fixed_16_16_t
+skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ ret = add_fixed16_u32(ret, 1);
+
+ return ret;
+}
+
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate;
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
+ return ret;
+}
+
+static uint_fixed_16_16_t
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 pixel_rate;
+ u32 crtc_htotal;
+ uint_fixed_16_16_t linetime_us;
+
+ if (!crtc_state->hw.active)
+ return u32_to_fixed16(0);
+
+ pixel_rate = crtc_state->pixel_rate;
+
+ if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ return u32_to_fixed16(0);
+
+ crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
+
+ return linetime_us;
+}
+
+static int
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 interm_pbpl;
+
+ /* only planar format has two planes */
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&i915->drm,
+ "Non planar format have single plane\n");
+ return -EINVAL;
+ }
+
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
+
+ wp->width = width;
+ if (color_plane == 1 && wp->is_planar)
+ wp->width /= 2;
+
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ wp->dbuf_block_size = 256;
+ else
+ wp->dbuf_block_size = 512;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ switch (wp->cpp) {
+ case 1:
+ wp->y_min_scanlines = 16;
+ break;
+ case 2:
+ wp->y_min_scanlines = 8;
+ break;
+ case 4:
+ wp->y_min_scanlines = 4;
+ break;
+ default:
+ MISSING_CASE(wp->cpp);
+ return -EINVAL;
+ }
+ } else {
+ wp->y_min_scanlines = 4;
+ }
+
+ if (skl_needs_memory_bw_wa(i915))
+ wp->y_min_scanlines *= 2;
+
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines,
+ wp->dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ }
+
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+
+ wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+
+ return 0;
+}
+
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->hw.rotation,
+ intel_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
+static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+{
+ if (DISPLAY_VER(i915) >= 10)
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static int skl_wm_max_lines(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 255;
+ else
+ return 31;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ u32 blocks, lines, min_ddb_alloc = 0;
+
+ if (latency == 0 ||
+ (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ wp->cpp, latency, wp->dbuf_block_size);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ latency,
+ wp->plane_blocks_per_line);
+
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else {
+ if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
+ wp->dbuf_block_size < 1) &&
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (DISPLAY_VER(i915) == 9)
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
+ selected_result = method1;
+ }
+ }
+
+ blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(i915, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
+ lines = div_round_up_fixed16(selected_result,
+ wp->plane_blocks_per_line);
+
+ if (DISPLAY_VER(i915) == 9) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ lines += wp->y_min_scanlines;
+ } else {
+ blocks++;
+ }
+
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->blocks > blocks)
+ blocks = result_prev->blocks;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 11) {
+ if (wp->y_tiled) {
+ int extra_lines;
+
+ if (lines % wp->y_min_scanlines == 0)
+ extra_lines = wp->y_min_scanlines;
+ else
+ extra_lines = wp->y_min_scanlines * 2 -
+ lines % wp->y_min_scanlines;
+
+ min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
+ wp->plane_blocks_per_line);
+ } else {
+ min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
+ }
+ }
+
+ if (!skl_wm_has_lines(i915, level))
+ lines = 0;
+
+ if (lines > skl_wm_max_lines(i915)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * If lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
+ result->blocks = blocks;
+ result->lines = lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
+ result->enable = true;
+
+ if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
+ result->can_sagv = latency >= i915->display.sagv.block_time_us;
+}
+
+static void
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_wm_level *levels)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level *result_prev = &levels[0];
+
+ for (level = 0; level <= max_level; level++) {
+ struct skl_wm_level *result = &levels[level];
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency,
+ wm_params, result_prev, result);
+
+ result_prev = result;
+ }
+}
+
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = 0;
+
+ if (i915->display.sagv.block_time_us)
+ latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+
+ skl_compute_plane_wm(crtc_state, plane, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
+static void skl_compute_transition_wm(struct drm_i915_private *i915,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
+{
+ u16 trans_min, trans_amount, trans_y_tile_min;
+ u16 wm0_blocks, trans_offset, blocks;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!skl_watermark_ipc_enabled(i915))
+ return;
+
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (DISPLAY_VER(i915) == 9)
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (DISPLAY_VER(i915) == 10)
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
+
+ trans_offset = trans_min + trans_amount;
+
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->blocks is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_blocks = wm0->blocks - 1;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
+ blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
+ } else {
+ blocks = wm0_blocks + trans_offset;
+ }
+ blocks++;
+
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ trans_wm->blocks = blocks;
+ trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
+ trans_wm->enable = true;
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane, int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
+
+ skl_compute_transition_wm(i915, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
+
+ skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->planar_slave)
+ return 0;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (plane_state->planar_linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ drm_WARN_ON(&i915->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_state->planar_linked_plane, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int skl_build_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int ret, i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ /*
+ * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
+ * instead but we don't populate that correctly for NV12 Y
+ * planes so for now hack this.
+ */
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 11)
+ ret = icl_build_plane_wm(crtc_state, plane_state);
+ else
+ ret = skl_build_plane_wm(crtc_state, plane_state);
+ if (ret)
+ return ret;
+ }
+
+ crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
+
+ return 0;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ intel_de_write_fw(i915, reg,
+ PLANE_BUF_END(entry->end - 1) |
+ PLANE_BUF_START(entry->start));
+ else
+ intel_de_write_fw(i915, reg, 0);
+}
+
+static void skl_write_wm_level(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ u32 val = 0;
+
+ if (level->enable)
+ val |= PLANE_WM_EN;
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
+ val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
+
+ intel_de_write_fw(i915, reg, val);
+}
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915,
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(i915) < 11)
+ skl_ddb_entry_write(i915,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
+}
+
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, CUR_WM(pipe, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
+}
+
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
+{
+ return l1->enable == l2->enable &&
+ l1->ignore_lines == l2->ignore_lines &&
+ l1->lines == l2->lines &&
+ l1->blocks == l2->blocks;
+}
+
+static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
+}
+
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ return a->start < b->end && b->start < a->end;
+}
+
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(i915, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
+static int
+skl_compute_ddb(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_MBUS_JOINING(i915))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(i915)->display.dbuf.slice_mask,
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
+static void
+skl_print_wm_changes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_ddb_entry *old, *new;
+
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+ }
+ }
+}
+
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
+ skl_plane_wm_level(new_pipe_wm, plane->id, level)))
+ return false;
+ }
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static int
+skl_compute_wm(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_build_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ skl_print_wm_changes(state);
+
+ return 0;
+}
+
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+{
+ level->enable = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
+ level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+}
+
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int level, max_level;
+ enum plane_id plane_id;
+ u32 val;
+
+ max_level = ilk_wm_max_level(i915);
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
+
+ for (level = 0; level <= max_level; level++) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ }
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
+}
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_crtc *crtc;
+
+ if (HAS_MBUS_JOINING(i915))
+ dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
+ enum plane_id plane_id;
+ u8 slices;
+
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ plane_id, ddb, ddb_y);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ }
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(i915, slices);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ str_yes_no(dbuf_state->joined_mbus));
+ }
+
+ dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(i915);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+{
+ return i915->display.wm.ipc_enabled;
+}
+
+void skl_watermark_ipc_update(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+}
+
+static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(i915))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
+ return i915->dram_info.symmetric_memory;
+
+ return true;
+}
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+
+ skl_watermark_ipc_update(i915);
+}
+
+static void
+adjust_wm_latency(struct drm_i915_private *i915,
+ u16 wm[], int max_level, int read_latency)
+{
+ bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ int i, level;
+
+ /*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ max_level = level - 1;
+ break;
+ }
+ }
+
+ /*
+ * WaWmMemoryReadLatency
+ *
+ * punit doesn't take into account the read latency so we need
+ * to add proper adjustement to each valid level we retrieve
+ * from the punit when level 0 response data is 0us.
+ */
+ if (wm[0] == 0) {
+ for (level = 0; level <= max_level; level++)
+ wm[level] += read_latency;
+ }
+
+ /*
+ * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * If we could not get dimm info enable this WA to prevent from
+ * any underrun. If not able to get Dimm info assume 16GB dimm
+ * to avoid any underrun.
+ */
+ if (wm_lv_0_adjust_needed)
+ wm[0] += 1;
+}
+
+static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ int max_level = ilk_wm_max_level(i915);
+ u32 val;
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ adjust_wm_latency(i915, wm, max_level, 6);
+}
+
+static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ int max_level = ilk_wm_max_level(i915);
+ int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
+ int mult = IS_DG2(i915) ? 2 : 1;
+ u32 val;
+ int ret;
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ adjust_wm_latency(i915, wm, max_level, read_latency);
+}
+
+static void skl_setup_wm_latency(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14)
+ mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, i915->display.wm.skl_latency);
+
+ intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ update_mbus_pre_enable(state);
+ gen9_dbuf_slices_update(i915,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915,
+ new_dbuf_state->enabled_slices);
+}
+
+static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return !(active_pipes & BIT(PIPE_D));
+ case PIPE_D:
+ return !(active_pipes & BIT(PIPE_A));
+ case PIPE_B:
+ return !(active_pipes & BIT(PIPE_C));
+ case PIPE_C:
+ return !(active_pipes & BIT(PIPE_B));
+ default: /* to suppress compiler warning */
+ MISSING_CASE(pipe);
+ break;
+ }
+
+ return false;
+}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= MBUS_DBOX_I_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
+ MBUS_DBOX_A_CREDIT(8);
+ else if (IS_ALDERLAKE_P(i915))
+ /* Wa_22010947358:adl-p */
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 14) {
+ val |= MBUS_DBOX_B_CREDIT(0xA);
+ } else if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ u32 pipe_val = val;
+
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 14) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
+ new_dbuf_state->active_pipes))
+ pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
+ }
+}
+
+static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *i915 = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ str_yes_no(skl_watermark_ipc_enabled(i915)));
+ return 0;
+}
+
+static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+
+ return single_open(file, skl_watermark_ipc_status_show, i915);
+}
+
+static ssize_t skl_watermark_ipc_status_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (!skl_watermark_ipc_enabled(i915) && enable)
+ drm_info(&i915->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ i915->display.wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(i915);
+ }
+
+ return len;
+}
+
+static const struct file_operations skl_watermark_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = skl_watermark_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = skl_watermark_ipc_status_write
+};
+
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ if (!HAS_IPC(i915))
+ return;
+
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
new file mode 100644
index 000000000000..7a5a4e67cd73
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SKL_WATERMARK_H__
+#define __SKL_WATERMARK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_global_state.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_bw_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915);
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry);
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915);
+void skl_wm_sanitize(struct drm_i915_private *i915);
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915);
+void skl_watermark_ipc_update(struct drm_i915_private *i915);
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+
+void skl_wm_init(struct drm_i915_private *i915);
+
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ bool joined_mbus;
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *i915);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+void intel_mbus_dbox_update(struct intel_atomic_state *state);
+
+#endif /* __SKL_WATERMARK_H__ */
+
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9b1fed99874..b3f5ca280ef2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -822,9 +822,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -998,9 +998,9 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
/* Assert reset */
@@ -1277,13 +1277,12 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
- if (intel_dsi->dual_link)
- pclk *= 2;
+ pipe_config->port_clock = pclk;
- if (pclk) {
- pipe_config->hw.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
- }
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -1872,9 +1871,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
return;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1933,8 +1932,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 5894b0138343..af7402127cd9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -113,6 +113,61 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
return 0;
}
+static int vlv_dsi_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_clock;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0, n;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int i;
+
+ pll_ctl = config->dsi_pll.ctrl;
+ pll_div = config->dsi_pll.div;
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* N1 divisor */
+ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+ n = 1 << n; /* register has log2(N1) */
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / (p * n);
+
+ return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+}
+
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
@@ -122,8 +177,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int ret;
- u32 dsi_clk;
+ int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -145,6 +199,14 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
+ pclk = vlv_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
@@ -262,13 +324,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
- u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
- int i;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -280,65 +336,31 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
- /* mask out other bits and extract the P1 divisor */
- pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
- pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
-
- /* N1 divisor */
- n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
- n = 1 << n; /* register has log2(N1) */
-
- /* mask out the other bits and extract the M1 divisor */
- pll_div &= DSI_PLL_M1_DIV_MASK;
- pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
-
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
-
- if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
- if (lfsr_converts[i] == pll_div)
- break;
- }
-
- if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
- return 0;
- }
-
- m = i + 62;
+ return vlv_dsi_pclk(encoder, config);
+}
- dsi_clock = (m * refclk) / (p * n);
+static int bxt_dsi_pclk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_ratio, dsi_clk;
- pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+ dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
- return pclk;
+ return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
}
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- u32 pclk;
- u32 dsi_clk;
- u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 pclk;
config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
-
- dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
-
- pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
+ pclk = bxt_dsi_pclk(encoder, config);
drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
@@ -463,6 +485,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
+ int pclk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -502,6 +525,14 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
+ pclk = bxt_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index 356e51515346..e065b8f2ee08 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,6 +11,8 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
+#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
@@ -96,8 +98,8 @@
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
+#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
@@ -106,11 +108,11 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
+#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
+#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
@@ -145,8 +147,8 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
@@ -168,76 +170,76 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
+#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
+#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
+#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
+#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
+#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
+#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
@@ -247,27 +249,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
+#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
+#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
@@ -276,8 +278,8 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
+#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
@@ -290,35 +292,35 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
+#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
+#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
+#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
+#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
+#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
+#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
@@ -330,8 +332,8 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
@@ -348,15 +350,15 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
+#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
@@ -367,34 +369,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
+#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
+#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -407,8 +409,8 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
+#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
@@ -440,21 +442,21 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
+#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
+#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
@@ -462,18 +464,18 @@
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
+#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
+#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..0bcde53c50c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
@@ -1521,10 +1525,6 @@ static void context_close(struct i915_gem_context *ctx)
ctx->file_priv = ERR_PTR(-EBADF);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
-
client = ctx->client;
if (client) {
spin_lock(&client->ctx_lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 1674b0c5802b..d44a152ce680 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -397,7 +397,7 @@ struct i915_vma *
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -434,7 +434,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
*/
vma = ERR_PTR(-ENOSPC);
if ((flags & PIN_MAPPABLE) == 0 &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL))
+ (!view || view->type == I915_GTT_VIEW_NORMAL))
vma = i915_gem_object_ggtt_pin_ww(obj, ww, view, 0, alignment,
flags | PIN_MAPPABLE |
PIN_NONBLOCK);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 1b88ea13435c..5a7a14e85c3f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -12,8 +12,6 @@ struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
-extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 0c5c43852e24..73d9eda1d6b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -194,17 +194,17 @@ int i915_gem_mmap_gtt_version(void)
return 4;
}
-static inline struct i915_ggtt_view
+static inline struct i915_gtt_view
compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
@@ -212,7 +212,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
return view;
}
@@ -341,12 +341,12 @@ retry:
PIN_NOEVICT);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
/* Use a partial view if it is bigger than available space */
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE | PIN_NOSEARCH;
- if (view.type == I915_GGTT_VIEW_NORMAL)
+ if (view.type == I915_GTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
/*
@@ -357,7 +357,7 @@ retry:
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
flags = PIN_MAPPABLE;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
@@ -394,7 +394,7 @@ retry:
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+ area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
@@ -413,7 +413,7 @@ retry:
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -550,6 +550,20 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_device *bdev = bo->bdev;
+
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+
+ if (obj->userfault_count) {
+ /* rpm wakeref provide exclusive access */
+ list_del(&obj->userfault_link);
+ obj->userfault_count = 0;
+ }
+}
+
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
@@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
+
+ if (obj->userfault_count) {
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_del(&obj->userfault_link);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ obj->userfault_count = 0;
+ }
}
static struct i915_mmap_offset *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..1fa91b3033b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 389e9f157ca5..7ff9c7877bec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */
- if (obj->userfault_count)
+ if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
i915_gem_object_release_mmap_gtt(obj);
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
@@ -723,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
bool lmem_placement = false;
int i;
+ if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ return false;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 6f0a3ce35567..7317d4102955 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -543,7 +543,7 @@ struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
u32 alignment,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
unsigned int flags);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 9f6b14ec189a..40305e2bcd49 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -298,7 +298,8 @@ struct drm_i915_gem_object {
};
/**
- * Whether the object is currently in the GGTT mmap.
+ * Whether the object is currently in the GGTT or any other supported
+ * fake offset mmap backed by lmem.
*/
unsigned int userfault_count;
struct list_head userfault_link;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8357dbdcab5c..4df50b049cea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -20,7 +20,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
bool shrinkable;
int i;
@@ -66,7 +66,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
shrinkable = i915_gem_object_is_shrinkable(obj);
if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
GEM_BUG_ON(!list_empty(&obj->mm.link));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 00359ec9d58b..3428f735e786 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4eed3dd90ba8..f42ca1179f37 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -75,7 +75,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
if (size > resource_size(&mr->region))
return -ENOMEM;
- if (sg_alloc_table(st, page_count, GFP_KERNEL))
+ if (sg_alloc_table(st, page_count, GFP_KERNEL | __GFP_NOWARN))
return -ENOMEM;
/*
@@ -137,7 +137,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
* trigger the out-of-memory killer and for
* this we want __GFP_RETRY_MAYFAIL.
*/
- gfp |= __GFP_RETRY_MAYFAIL;
+ gfp |= __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
}
} while (1);
@@ -209,7 +209,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
rebuild_st:
- st = kmalloc(sizeof(*st), GFP_KERNEL);
+ st = kmalloc(sizeof(*st), GFP_KERNEL | __GFP_NOWARN);
if (!st)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 166d0a4b9e8c..acc561c0f0aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -18,10 +18,12 @@
#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_mchbar_regs.h"
+#include "intel_pci_config.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -428,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
reserved_base = stolen_top;
reserved_size = 0;
- switch (GRAPHICS_VER(i915)) {
- case 2:
- case 3:
- break;
- case 4:
- if (!IS_G4X(i915))
- break;
- fallthrough;
- case 5:
- g4x_get_stolen_reserved(i915, uncore,
+ if (GRAPHICS_VER(i915) >= 11) {
+ icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- case 6:
- gen6_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 7:
- if (IS_VALLEYVIEW(i915))
- vlv_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- else
- gen7_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 8:
- case 9:
+ } else if (GRAPHICS_VER(i915) >= 8) {
if (IS_LP(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- default:
- MISSING_CASE(GRAPHICS_VER(i915));
- fallthrough;
- case 11:
- case 12:
- icl_get_stolen_reserved(i915, uncore,
- &reserved_base,
- &reserved_size);
- break;
+ } else if (GRAPHICS_VER(i915) >= 7) {
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ else
+ gen7_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ gen6_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
+ g4x_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
}
/*
@@ -827,10 +810,13 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
if (IS_DG1(uncore->i915)) {
- lmem_size = pci_resource_len(pdev, 2);
+ lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
} else {
@@ -842,11 +828,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
}
dsm_size = lmem_size - dsm_base;
- if (pci_resource_len(pdev, 2) < lmem_size) {
+ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
- io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
io_size = dsm_size;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 85518b28cd72..fd42b89b7162 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
*/
if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -458,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f131dc065f47..e3fc38dd5db0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ if (i915_gem_object_needs_ccs_pages(obj))
ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
NUM_BYTES_PER_CCS_BYTE),
PAGE_SIZE);
@@ -361,7 +361,6 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- struct ttm_resource *res = bo->resource;
if (!obj)
return false;
@@ -378,45 +377,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
if (!i915_gem_object_evictable(obj))
return false;
- switch (res->mem_type) {
- case I915_PL_LMEM0: {
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
- struct i915_ttm_buddy_resource *bman_res =
- to_ttm_buddy_resource(res);
- struct drm_buddy *mm = bman_res->mm;
- struct drm_buddy_block *block;
-
- if (!place->fpfn && !place->lpfn)
- return true;
-
- GEM_BUG_ON(!place->lpfn);
-
- /*
- * If we just want something mappable then we can quickly check
- * if the current victim resource is using any of the CPU
- * visible portion.
- */
- if (!place->fpfn &&
- place->lpfn == i915_ttm_buddy_man_visible_size(man))
- return bman_res->used_visible_size > 0;
-
- /* Real range allocation */
- list_for_each_entry(block, &bman_res->blocks, link) {
- unsigned long fpfn =
- drm_buddy_block_offset(block) >> PAGE_SHIFT;
- unsigned long lpfn = fpfn +
- (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
-
- if (place->fpfn < lpfn && place->lpfn > fpfn)
- return true;
- }
- return false;
- } default:
- break;
- }
-
- return true;
+ return ttm_bo_eviction_valuable(bo, place);
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -548,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = 0;
+
+ if (bo->resource && likely(obj)) {
+ /* ttm_bo_release() already has dma_resv_lock */
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
- if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -1020,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
@@ -1041,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1062,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible\n");
dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ goto out_rpm;
}
}
@@ -1073,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out_rpm;
+
+ /* ttm_bo_vm_reserve() already has dma_resv_lock */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ }
+
+ if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
return ret;
}
@@ -1242,9 +1235,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* Similarly, in delayed_destroy, we can't call ttm_bo_put()
* until successful initialization.
*/
- ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
- bo_type, &i915_sys_placement,
- page_size >> PAGE_SHIFT,
+ ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+ &i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
if (ret)
return i915_ttm_err_to_gem(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 9aad84059d56..07e49f22f2de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -79,7 +79,12 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
goto out_no_populate;
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
- GEM_WARN_ON(err);
+ if (err) {
+ drm_err(&i915->drm,
+ "Unable to copy from device to system memory, err:%pe\n",
+ ERR_PTR(err));
+ goto out_no_populate;
+ }
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 72ce2c9f42fd..c570cf780079 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -358,7 +358,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err;
@@ -419,7 +419,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i, j, single;
@@ -438,7 +438,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
combination |= page_sizes[j];
}
- mkwrite_device_info(i915)->page_sizes = combination;
+ RUNTIME_INFO(i915)->page_sizes = combination;
for (single = 0; single <= 1; ++single) {
obj = fake_huge_pages_object(i915, combination, !!single);
@@ -485,7 +485,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
out_put:
i915_gem_object_put(obj);
out_device:
- mkwrite_device_info(i915)->page_sizes = saved_mask;
+ RUNTIME_INFO(i915)->page_sizes = saved_mask;
return err;
}
@@ -495,7 +495,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -573,7 +573,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
int err;
@@ -1390,7 +1390,7 @@ out_put:
static int igt_ppgtt_sanity_check(void *arg)
{
struct drm_i915_private *i915 = arg;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct {
igt_create_fn fn;
unsigned int flags;
@@ -1764,8 +1764,8 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- mkwrite_device_info(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3cfc621ef363..9a6a6b5b722b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -711,7 +711,7 @@ static bool bad_swizzling(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return true;
if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 13b088cc787e..a666d7e610f5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -434,5 +434,5 @@ int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_coherency),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 62c61af77a42..51ed824b020c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -476,5 +476,5 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3ced9948a331..b73c91aa5450 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -93,7 +93,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
{
const unsigned long npages = obj->base.size / PAGE_SIZE;
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
struct i915_vma *vma;
unsigned long page;
u32 __iomem *io;
@@ -210,7 +210,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
}
for_each_prime_number_from(page, 1, npages) {
- struct i915_ggtt_view view =
+ struct i915_gtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
u32 __iomem *io;
struct page *p;
@@ -367,7 +367,7 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
/*
* The swizzling pattern is actually unknown as it
* varies based on physical address of each page.
@@ -464,7 +464,7 @@ static int igt_smoke_tiling(void *arg)
* Remember to look at the st_seed if we see a flip-flop in BAT!
*/
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return 0;
obj = huge_gem_object(i915,
@@ -1844,5 +1844,5 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_mmap_gpu),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index fe0a890775e2..bdf5bb40ccf1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -95,5 +95,5 @@ int i915_gem_object_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_huge),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1bb766c79dcb..5aaacc53fa4c 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -247,6 +247,7 @@ err_scratch1:
i915_gem_object_put(vm->scratch[1]);
err_scratch0:
i915_gem_object_put(vm->scratch[0]);
+ vm->scratch[0] = NULL;
return ret;
}
@@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
- mutex_destroy(&ppgtt->flush);
+ if (ppgtt->base.pd)
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+ mutex_destroy(&ppgtt->flush);
}
static void pd_vma_bind(struct i915_address_space *vm,
@@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_put;
ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
if (IS_ERR(ppgtt->base.pd)) {
err = PTR_ERR(ppgtt->base.pd);
- goto err_scratch;
+ goto err_put;
}
return &ppgtt->base;
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->base.vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 98645797962f..e49fa6fa6aee 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -165,10 +165,12 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
{
+ u32 gsi_offset = gt->uncore->gsi_offset;
+
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
- *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
}
*cs++ = preparser_disable(false);
@@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index 32e3d2b831bb..e4d24c811dd6 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
+struct intel_gt;
struct i915_request;
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
@@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index c7bd5d71b03e..2128b7a72a25 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ if (ppgtt->pd)
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ gen8_pd_top_count(vm), vm->top);
+
free_scratch(vm);
}
@@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
struct drm_i915_gem_object *obj;
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto free_scratch;
+ }
ret = map_pt_dma(vm, obj);
if (ret) {
@@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
free_scratch:
while (i--)
i915_gem_object_put(vm->scratch[i]);
- return -ENOMEM;
+ vm->scratch[0] = NULL;
+ return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -901,6 +907,7 @@ err_pd:
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
+ struct i915_page_directory *pd;
struct i915_ppgtt *ppgtt;
int err;
@@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
}
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
@@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
ppgtt->vm.foreach = gen8_ppgtt_foreach;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
+ pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_put;
+ }
+ ppgtt->pd = pd;
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_put;
+ }
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
return ppgtt;
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 37fa813af766..1f7188129cd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
*/
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
return false;
- else if (GRAPHICS_VER(i915) == 12)
+ else if (MEDIA_VER(i915) >= 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
- else if (GRAPHICS_VER(i915) == 11)
+ else if (MEDIA_VER(i915) == 11)
return logical_vdbox % 2 == 0;
- MISSING_CASE(GRAPHICS_VER(i915));
return false;
}
+static void engine_mask_apply_media_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse, fuse1;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ if (MEDIA_VER(gt->i915) < 11)
+ return;
+
+ /*
+ * On newer platforms the fusing register is called 'enable' and has
+ * enable semantics, while on older platforms it is called 'disable'
+ * and bits have disable semantices.
+ */
+ media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ media_fuse = ~media_fuse;
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
+ gt->info.vdbox_sfc_access |= BIT(i);
+ logical_vdbox++;
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+}
+
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -672,7 +739,10 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
unsigned long ccs_mask;
unsigned int i;
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER(i915) < 11)
+ return;
+
+ if (hweight32(CCS_MASK(gt)) <= 1)
return;
ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
@@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
unsigned long meml3_mask;
unsigned long quad;
+ if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
+ return;
+
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
@@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
*/
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = &gt->info;
- struct intel_uncore *uncore = gt->uncore;
- unsigned int logical_vdbox = 0;
- unsigned int i;
- u32 media_fuse, fuse1;
- u16 vdbox_mask;
- u16 vebox_mask;
-
- info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
-
- if (GRAPHICS_VER(i915) < 11)
- return info->engine_mask;
- /*
- * On newer platforms the fusing register is called 'enable' and has
- * enable semantics, while on older platforms it is called 'disable'
- * and bits have disable semantices.
- */
- media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
- media_fuse = ~media_fuse;
-
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
- gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
- } else {
- gt->info.sfc_mask = ~0;
- }
-
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(gt, _VCS(i))) {
- vdbox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vdbox_mask)) {
- info->engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&i915->drm, "vcs%u fused off\n", i);
- continue;
- }
-
- if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
- gt->info.vdbox_sfc_access |= BIT(i);
- logical_vdbox++;
- }
- drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(gt));
- GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
-
- for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(gt, _VECS(i))) {
- vebox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vebox_mask)) {
- info->engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&i915->drm, "vecs%u fused off\n", i);
- }
- }
- drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(gt));
- GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+ GEM_BUG_ON(!info->engine_mask);
+ engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
@@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
return false;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
return true;
}
@@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
}
void intel_engines_reset_default_submission(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 889f0df3940b..fe1a0d5fd4b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -110,6 +110,7 @@
#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b..6b5d4ea22b67 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdf..c718e6dc40b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15a915bb4088..30cf5c3369d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -16,7 +16,9 @@
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
+#include "intel_pci_config.h"
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
@@ -869,8 +871,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+ GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
@@ -930,7 +932,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
if (!HAS_LMEM(i915)) {
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
@@ -1084,7 +1089,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
unsigned int size;
u16 snb_gmch_ctl;
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 6ebda3d65086..ea775e601686 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -727,7 +727,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
- i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
@@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
detect_bit_6_swizzle(ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 0e494028b81d..7af6db3194dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -7,6 +7,7 @@
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
@@ -36,10 +37,56 @@ static int gsc_irq_init(int irq)
return irq_set_chip_data(irq, NULL);
}
+static int
+gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_CPU_CLEAR);
+ if (IS_ERR(obj)) {
+ drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
+ goto out_put;
+ }
+
+ intf->gem_obj = obj;
+
+ return 0;
+
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+
+ if (!obj)
+ return;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+
+ i915_gem_object_put(obj);
+}
+
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
+ bool use_polling;
+ bool slow_firmware;
+ size_t lmem_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
@@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
+static const struct gsc_def gsc_def_xehpsdv[] = {
+ {
+ /* HECI1 not enabled on the device. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .use_polling = true,
+ .slow_firmware = true,
+ }
+};
+
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
+ .lmem_size = SZ_4M,
},
{
.name = "mei-gscfi",
@@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev)
kfree(adev);
}
-static void gsc_destroy_one(struct intel_gsc_intf *intf)
+static void gsc_destroy_one(struct drm_i915_private *i915,
+ struct intel_gsc *gsc, unsigned int intf_id)
{
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
+
if (intf->adev) {
auxiliary_device_delete(&intf->adev->aux_dev);
auxiliary_device_uninit(&intf->adev->aux_dev);
intf->adev = NULL;
}
+
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
+
+ gsc_ext_om_destroy(intf);
}
-static void gsc_init_one(struct drm_i915_private *i915,
- struct intel_gsc_intf *intf,
+static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
int ret;
intf->irq = -1;
@@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
+ } else if (IS_XEHPSDV(i915)) {
+ def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
@@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
}
+ /* skip irq initialization */
+ if (def->use_polling)
+ goto add_device;
+
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
- return;
+ goto fail;
}
ret = gsc_irq_init(intf->irq);
@@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915,
goto fail;
}
+add_device:
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
+ if (def->lmem_size) {
+ drm_dbg(&i915->drm, "setting up GSC lmem\n");
+
+ if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
+ drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
+ kfree(adev);
+ goto fail;
+ }
+
+ adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
+ adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
+ }
+
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
+ adev->slow_firmware = def->slow_firmware;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
@@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
fail:
- gsc_destroy_one(intf);
+ gsc_destroy_one(i915, gsc, intf->id);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
@@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
return;
}
- if (gt->gsc.intf[intf_id].irq < 0) {
- drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
+ if (gt->gsc.intf[intf_id].irq < 0)
return;
- }
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
@@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_init_one(i915, &gsc->intf[i], i);
+ gsc_init_one(i915, gsc, i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
@@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_destroy_one(&gsc->intf[i]);
+ gsc_destroy_one(gt->i915, gsc, i);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 68582f912b21..fcac1775e9c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -20,11 +20,14 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
+ *
+ * @gem_obj: scratch memory GSC operations
* @intf : gsc interface
*/
struct intel_gsc {
struct intel_gsc_intf {
struct mei_aux_device *adev;
+ struct drm_i915_gem_object *gem_obj;
int irq;
unsigned int id;
} intf[INTEL_GSC_NUM_INTERFACES];
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f435e06125aa..d0b03a928b9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -26,18 +26,22 @@
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
+#include "intel_pci_config.h"
#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-static void __intel_gt_init_early(struct intel_gt *gt)
+void intel_gt_common_init_early(struct intel_gt *gt)
{
- spin_lock_init(&gt->irq_lock);
+ spin_lock_init(gt->irq_lock);
+ INIT_LIST_HEAD(&gt->lmem_userfault_list);
+ mutex_init(&gt->lmem_userfault_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -57,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt)
}
/* Preliminary initialization of Tile 0 */
-void intel_root_gt_init_early(struct drm_i915_private *i915)
+int intel_root_gt_init_early(struct drm_i915_private *i915)
{
struct intel_gt *gt = to_gt(i915);
gt->i915 = i915;
gt->uncore = &i915->uncore;
+ gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+ if (!gt->irq_lock)
+ return -ENOMEM;
- __intel_gt_init_early(gt);
+ intel_gt_common_init_early(gt);
+
+ return 0;
}
static int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -780,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int ret;
if (!gt_is_root(gt)) {
- struct intel_uncore_mmio_debug *mmio_debug;
struct intel_uncore *uncore;
+ spinlock_t *irq_lock;
- uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+ uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
- mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
- if (!mmio_debug) {
- kfree(uncore);
+ irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+ if (!irq_lock)
return -ENOMEM;
- }
gt->uncore = uncore;
- gt->uncore->debug = mmio_debug;
+ gt->irq_lock = irq_lock;
- __intel_gt_init_early(gt);
+ intel_gt_common_init_early(gt);
}
intel_uncore_init_early(gt->uncore, gt);
+ intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
if (ret)
@@ -810,27 +818,17 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
return 0;
}
-static void
-intel_gt_tile_cleanup(struct intel_gt *gt)
-{
- intel_uncore_cleanup_mmio(gt->uncore);
-
- if (!gt_is_root(gt)) {
- kfree(gt->uncore->debug);
- kfree(gt->uncore);
- kfree(gt);
- }
-}
-
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = &i915->gt0;
+ const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
+ unsigned int i;
int ret;
- mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
phys_addr = pci_resource_start(pdev, mmio_bar);
/*
@@ -838,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
* and it has been already initialized early during probe
* in i915_driver_probe()
*/
+ gt->i915 = i915;
+ gt->name = "Primary GT";
+ gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
i915->gt[0] = gt;
- /* TODO: add more tiles */
+ if (!HAS_EXTRA_GT_LIST(i915))
+ return 0;
+
+ for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+ gtdef->name != NULL;
+ i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gt->i915 = i915;
+ gt->name = gtdef->name;
+ gt->type = gtdef->type;
+ gt->info.engine_mask = gtdef->engine_mask;
+ gt->info.id = i;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+ gtdef->mapping_base,
+ SZ_16M,
+ pci_resource_len(pdev, mmio_bar)))) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ switch (gtdef->type) {
+ case GT_TILE:
+ ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+ break;
+
+ case GT_MEDIA:
+ ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+ gtdef->gsi_offset);
+ break;
+
+ case GT_PRIMARY:
+ /* Primary GT should not appear in extra GT list */
+ default:
+ MISSING_CASE(gtdef->type);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ goto err;
+
+ i915->gt[i] = gt;
+ }
+
return 0;
+
+err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+ intel_gt_release_all(i915);
+
+ return ret;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
@@ -868,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915)
struct intel_gt *gt;
unsigned int id;
- for_each_gt(gt, i915, id) {
- intel_gt_tile_cleanup(gt);
+ for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
- }
}
void intel_gt_info_print(const struct intel_gt_info *info,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 40b06adf509a..2ee582e287c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
-void intel_root_gt_init_early(struct drm_i915_private *i915);
+void intel_gt_common_init_early(struct intel_gt *gt);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
@@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
-
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index d5d1b04dbcad..3f656d3dba9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
return base_freq + frac_freq;
}
-static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock =
- (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
@@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
}
}
-static u32 read_clock_frequency(struct intel_uncore *uncore)
+static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ *
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
- if (GRAPHICS_VER(uncore->i915) <= 4) {
- /*
- * PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configuration”
- * (“CLKCFG”) MCHBAR register)
- */
- return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
- } else if (GRAPHICS_VER(uncore->i915) <= 8) {
/*
- * PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- return f12_5_mhz;
- } else if (GRAPHICS_VER(uncore->i915) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (GRAPHICS_VER(uncore->i915) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+}
+
+static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
- * First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (GRAPHICS_VER(uncore->i915) >= 11)
- freq = gen11_get_crystal_clock_freq(uncore, c0);
- else
- freq = gen9_get_crystal_clock_freq(uncore, c0);
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
+ return freq;
}
-void intel_gt_init_clock_frequency(struct intel_gt *gt)
+static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
- * Note that on gen11+, the clock frequency may be reconfigured.
- * We do not, and we assume nobody else does.
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return 12500000;
+}
+
+static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
+{
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
*/
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ if (GRAPHICS_VER(uncore->i915) >= 11)
+ return gen11_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 9)
+ return gen9_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 5)
+ return gen5_read_clock_frequency(uncore);
+ else
+ return gen2_read_clock_frequency(uncore);
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
gt->clock_frequency = read_clock_frequency(gt->uncore);
- if (gt->clock_frequency)
- gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+ else if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 3a72d4fd0214..f26882fdc24c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
u32 timeout_ts;
u32 ident;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -59,11 +59,17 @@ static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
+ struct intel_gt *media_gt = gt->i915->media_gt;
+
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
+ return guc_irq_handler(&media_gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
+ return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
@@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
{
struct intel_engine_cs *engine;
+ /*
+ * Platforms with standalone media have their media engines in another
+ * GT.
+ */
+ if (MEDIA_VER(gt->i915) >= 13 &&
+ (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
+ if (!gt->i915->media_gt)
+ goto err;
+
+ gt = gt->i915->media_gt;
+ }
+
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
@@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
if (likely(engine))
return intel_engine_cs_irq(engine, iir);
+err:
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
@@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
unsigned long intr_dw;
unsigned int bit;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
@@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
@@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
void __iomem * const regs = gt->uncore->regs;
u32 dw;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (!HAS_L3_DPF(gt->i915))
return;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
@@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index a334787a4939..6c9a46452364 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -55,6 +55,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
+/**
+ * with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
+ * it to sleep, run some code and then asynchrously put the reference
+ * away.
+ *
+ * @gt: pointer to the gt
+ * @wf: pointer to a temporary wakeref.
+ */
#define with_intel_gt_pm_if_awake(gt, wf) \
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 40bdd4cb629f..108b9e76c32e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -504,8 +504,8 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index 11060f5a4c89..52f2a28b2058 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
new_val = gt->pm_imr;
new_val &= ~interrupt_mask;
@@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
struct intel_uncore *uncore = gt->uncore;
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intel_uncore_write(uncore, reg, reset_mask);
intel_uncore_write(uncore, reg, reset_mask);
@@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt)
void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier |= enable_mask;
write_pm_ier(gt);
@@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier &= ~disable_mask;
gen6_gt_pm_mask_irq(gt, disable_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 60d6eb5f245b..2275ee47da95 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -259,6 +259,9 @@
#define GEN9_PREEMPT_GPGPU_COMMAND_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(1, 0)
#define GEN9_PREEMPT_GPGPU_LEVEL_MASK GEN9_PREEMPT_GPGPU_LEVEL(1, 1)
+#define DRAW_WATERMARK _MMIO(0x26c0)
+#define VERT_WM_VAL REG_GENMASK(9, 0)
+
#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */
#define RENDER_HWS_PGA_GEN7 _MMIO(0x4080)
@@ -374,6 +377,9 @@
#define CHICKEN_RASTER_1 _MMIO(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
+#define CHICKEN_RASTER_2 _MMIO(0x6208)
+#define TBIMR_FAST_CLIP REG_BIT(5)
+
#define VFLSKPD _MMIO(0x62a8)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -1007,6 +1013,8 @@
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
+#define GUCPMTIMESTAMP _MMIO(0xc3e8)
+
#define __GEN9_RCS0_MOCS0 0xc800
#define GEN9_GFX_MOCS(i) _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
#define __GEN9_VCS0_MOCS0 0xc900
@@ -1078,6 +1086,7 @@
#define GEN10_SAMPLER_MODE _MMIO(0xe18c)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define GEN11_SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
@@ -1101,6 +1110,8 @@
#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4)
+#define THREAD_EX_ARB_MODE REG_GENMASK(3, 2)
+#define THREAD_EX_ARB_MODE_RR_AFTER_DEP REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -1123,6 +1134,8 @@
#define RT_CTRL _MMIO(0xe530)
#define DIS_NULL_QUERY REG_BIT(10)
+#define STACKID_CTRL REG_GENMASK(6, 5)
+#define STACKID_CTRL_512 REG_FIELD_PREP(STACKID_CTRL, 0x2)
#define EU_PERF_CNTL1 _MMIO(0xe558)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
@@ -1541,6 +1554,8 @@
#define OTHER_GTPM_INSTANCE 1
#define OTHER_KCR_INSTANCE 4
#define OTHER_GSC_INSTANCE 6
+#define OTHER_MEDIA_GUC_INSTANCE 16
+#define OTHER_MEDIA_GTPM_INSTANCE 17
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
@@ -1565,4 +1580,12 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+/*
+ * Standalone Media's non-engine GT registers are located at their regular GT
+ * offsets plus 0x380000. This extra offset is stored inside the intel_uncore
+ * structure so that the existing code can be used for both GTs without
+ * modification.
+ */
+#define MTL_MEDIA_GSI_BASE 0x380000
+
#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 9e4ebf53379b..d651ccd0ab20 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -22,11 +22,6 @@ bool is_object_gt(struct kobject *kobj)
return !strncmp(kobj->name, "gt", 2);
}
-static struct intel_gt *kobj_to_gt(struct kobject *kobj)
-{
- return container_of(kobj, struct intel_gt, sysfs_gt);
-}
-
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name)
{
@@ -101,6 +96,10 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
+ gt->sysfs_defaults = kobject_create_and_add(".defaults", &gt->sysfs_gt);
+ if (!gt->sysfs_defaults)
+ goto exit_fail;
+
intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
@@ -113,5 +112,6 @@ exit_fail:
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
+ kobject_put(gt->sysfs_defaults);
kobject_put(&gt->sysfs_gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
index a99aa7e8b01a..6232923a420d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -10,6 +10,7 @@
#include <linux/kobject.h>
#include "i915_gem.h" /* GEM_BUG_ON() */
+#include "intel_gt_types.h"
struct intel_gt;
@@ -22,6 +23,11 @@ intel_gt_create_kobj(struct intel_gt *gt,
struct kobject *dir,
const char *name);
+static inline struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct intel_gt, sysfs_gt);
+}
+
void intel_gt_sysfs_register(struct intel_gt *gt);
void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 73a8b46e0234..180dd6f3ef57 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
-static const struct attribute *freq_attrs[] = {
- &dev_attr_punit_req_freq_mhz.attr,
+static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
@@ -727,6 +726,34 @@ static const struct attribute *media_perf_power_attrs[] = {
NULL
};
+static ssize_t
+default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
+}
+
+static struct kobj_attribute default_min_freq_mhz =
+__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
+
+static ssize_t
+default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_gt *gt = kobj_to_gt(kobj->parent);
+
+ return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
+}
+
+static struct kobj_attribute default_max_freq_mhz =
+__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
+
+static const struct attribute * const rps_defaults_attrs[] = {
+ &default_min_freq_mhz.attr,
+ &default_max_freq_mhz.attr,
+ NULL
+};
+
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
const struct attribute * const *attrs)
{
@@ -763,12 +790,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj))
return;
- ret = sysfs_create_files(kobj, freq_attrs);
+ ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
@@ -776,4 +811,10 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
"failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
gt->info.id, ERR_PTR(ret));
}
+
+ ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to add gt%u rps defaults (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 3804a583382b..f19c2de77ff6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -76,8 +76,22 @@ enum intel_submission_method {
INTEL_SUBMISSION_GUC,
};
+struct gt_defaults {
+ u32 min_freq;
+ u32 max_freq;
+};
+
+enum intel_gt_type {
+ GT_PRIMARY,
+ GT_TILE,
+ GT_MEDIA,
+};
+
struct intel_gt {
struct drm_i915_private *i915;
+ const char *name;
+ enum intel_gt_type type;
+
struct intel_uncore *uncore;
struct i915_ggtt *ggtt;
@@ -127,6 +141,20 @@ struct intel_gt {
struct intel_wakeref wakeref;
atomic_t user_wakeref;
+ /**
+ * Protects access to lmem usefault list.
+ * It is required, if we are outside of the runtime suspend path,
+ * access to @lmem_userfault_list requires always first grabbing the
+ * runtime pm, to ensure we can't race against runtime suspend.
+ * Once we have that we also need to grab @lmem_userfault_lock,
+ * at which point we have exclusive access.
+ * The runtime suspend path is special since it doesn't really hold any locks,
+ * but instead has exclusive access by virtue of all other accesses requiring
+ * holding the runtime pm wakeref.
+ */
+ struct mutex lmem_userfault_lock;
+ struct list_head lmem_userfault_list;
+
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
@@ -142,6 +170,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
+ /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
u32 clock_frequency;
u32 clock_period_ns;
@@ -149,7 +180,7 @@ struct intel_gt {
struct intel_rc6 rc6;
struct intel_rps rps;
- spinlock_t irq_lock;
+ spinlock_t *irq_lock;
u32 gt_imr;
u32 pm_ier;
u32 pm_imr;
@@ -251,6 +282,18 @@ struct intel_gt {
/* gt/gtN sysfs */
struct kobject sysfs_gt;
+
+ /* sysfs defaults per gt */
+ struct gt_defaults defaults;
+ struct kobject *sysfs_defaults;
+};
+
+struct intel_gt_definition {
+ enum intel_gt_type type;
+ char *name;
+ u32 mapping_base;
+ u32 gsi_offset;
+ intel_engine_mask_t engine_mask;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b67831833c9a..2eaeba14319e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -405,6 +405,9 @@ void free_scratch(struct i915_address_space *vm)
{
int i;
+ if (!vm->scratch[0])
+ return;
+
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index e639434e97fd..c0ca53cba9f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -386,9 +386,6 @@ struct i915_ggtt {
*/
struct list_head userfault_list;
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
struct mutex error_mutex;
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 14fe65812e42..1d19c073ba2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -12,6 +12,7 @@
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
- if (rps->max_freq <= rps->min_freq)
- return false;
-
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
- consts->min_gpu_freq = rps->min_freq;
- consts->max_gpu_freq = rps->max_freq;
- if (GRAPHICS_VER(i915) >= 9) {
- /* Convert GT frequency to 50 HZ units */
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
- }
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
return;
/*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eec73c66406c..3955292483a6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
return -1;
}
+static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x80;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x70;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x64;
+ else if (GRAPHICS_VER(engine->i915) >= 8 &&
+ engine->class == RENDER_CLASS)
+ return 0xc4;
+ else
+ return -1;
+}
+
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
@@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs,
bool inhibit)
{
u32 ctl;
+ int loc;
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
+
+ loc = lrc_ring_bb_offset(engine);
+ if (loc != -1)
+ regs[loc + 1] = 0;
}
static void init_wa_bb_regs(u32 * const regs,
@@ -1242,6 +1262,23 @@ dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
return cs;
}
+/*
+ * The bspec's tuning guide asks us to program a vertical watermark value of
+ * 0x3FF. However this register is not saved/restored properly by the
+ * hardware, so we're required to apply the desired value via INDIRECT_CTX
+ * batch buffer to ensure the value takes effect properly. All other bits
+ * in this register should remain at 0 (the hardware default).
+ */
+static u32 *
+dg2_emit_draw_watermark_setting(u32 *cs)
+{
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DRAW_WATERMARK);
+ *cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF);
+
+ return cs;
+}
+
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
@@ -1261,7 +1298,12 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
+
+ /* Wa_16014892111 */
+ if (IS_DG2(ce->engine->i915))
+ cs = dg2_emit_draw_watermark_setting(cs);
return cs;
}
@@ -1283,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
return cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2b10b96b17b5..aaaf1906026c 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -511,44 +511,16 @@ static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
return cmd;
}
-static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
-{
- u32 num_cmds, num_blks, total_size;
-
- if (!GET_CCS_BYTES(i915, size))
- return 0;
-
- /*
- * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
- * blocks. one XY_CTRL_SURF_COPY_BLT command can
- * transfer upto 1024 blocks.
- */
- num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
- NUM_CCS_BYTES_PER_BLOCK);
- num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
- total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
-
- /*
- * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
- */
- total_size += 2 * MI_FLUSH_DW_SIZE;
-
- return total_size;
-}
-
static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
struct drm_i915_private *i915 = rq->engine->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
- u32 num_ccs_blks, ccs_ring_size;
+ u32 num_ccs_blks;
u32 *cs;
- ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
- WARN_ON(!ccs_ring_size);
-
- cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -583,8 +555,7 @@ static int emit_copy_ccs(struct i915_request *rq,
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
- if (ccs_ring_size & 1)
- *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -638,40 +609,38 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
-static int scatter_list_length(struct scatterlist *sg)
+static u64 scatter_list_length(struct scatterlist *sg)
{
- int len = 0;
+ u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
sg = sg_next(sg);
- };
+ }
return len;
}
-static void
+static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
- if (ccs_bytes_to_cpy) {
- if (!src_is_lmem)
- /*
- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
- * will be taken for the blt. in Flat-ccs supported
- * platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
- * for main memory
- */
- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
- } else { /* ccs handling is not required */
- *src_sz = CHUNK_SZ;
- }
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
}
-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
- u32 len;
+ u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@@ -702,12 +671,12 @@ intel_context_migrate_copy(struct intel_context *ce,
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
- int src_sz, dst_sz;
+ u64 src_sz, dst_sz;
bool ccs_is_src, overwrite_ccs;
int err;
@@ -790,8 +759,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
- bytes_to_cpy, ccs_bytes_to_cpy);
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz);
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 6ee8d1127016..7ecfa672f738 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -312,7 +312,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index aa6aed837194..f3ad93db0b21 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -4,8 +4,10 @@
*/
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "intel_memory_region.h"
+#include "intel_pci_config.h"
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
@@ -45,7 +47,6 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
}
-#define LMEM_BAR_NUM 2
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
@@ -56,15 +57,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
u32 pci_cmd;
int i;
- current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM));
+ current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
u32 bar_sizes;
rebar_size = i915->params.lmem_bar_size *
(resource_size_t)SZ_1M;
- bar_sizes = pci_rebar_get_possible_sizes(pdev,
- LMEM_BAR_NUM);
+ bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return;
@@ -107,7 +107,7 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_write_config_dword(pdev, PCI_COMMAND,
pci_cmd & ~PCI_COMMAND_MEMORY);
- _resize_bar(i915, LMEM_BAR_NUM, rebar_size);
+ _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
@@ -202,6 +202,9 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (!IS_DGFX(i915))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
if (HAS_FLAT_CCS(i915)) {
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
@@ -236,8 +239,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
mul_u32_u32(i915->params.lmem_size, SZ_1M));
}
- io_start = pci_resource_start(pdev, 2);
- io_size = min(pci_resource_len(pdev, 2), lmem_size);
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
+ io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
if (!io_size)
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index c68d36fb5bbd..b36674356986 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -776,7 +776,7 @@ static void revoke_mmaps(struct intel_gt *gt)
continue;
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1281,9 +1281,6 @@ static void intel_gt_reset_global(struct intel_gt *gt,
intel_wedge_on_timeout(&w, gt, 5 * HZ) {
intel_display_prepare_reset(gt->i915);
- /* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&gt->reset.backoff_srcu);
-
intel_gt_reset(gt, engine_mask, reason);
intel_display_finish_reset(gt->i915);
@@ -1392,6 +1389,9 @@ void intel_gt_handle_error(struct intel_gt *gt,
}
}
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
+
intel_gt_reset_global(gt, engine_mask, msg);
if (!intel_uc_uses_guc_submission(&gt->uc)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fb3f57ee450b..6b86250c31ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
rps_reset_ei(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
@@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (GRAPHICS_VER(gt->i915) >= 11)
gen11_rps_reset_interrupts(rps);
else
gen6_rps_reset_interrupts(rps);
rps->pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void rps_disable_interrupts(struct intel_rps *rps)
@@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
@@ -1107,7 +1107,12 @@ void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *c
caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ if (GRAPHICS_VER(i915) >= 10)
+ caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
+ intel_uncore_read(to_gt(i915)->uncore,
+ GEN10_FREQ_INFO_REC));
+ else
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
caps->min_freq = (rp_state_cap >> 16) & 0xff;
}
@@ -1546,6 +1551,9 @@ void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ if (!intel_rps_is_enabled(rps))
+ return;
+
intel_rps_clear_enabled(rps);
intel_rps_clear_interrupts(rps);
intel_rps_clear_timer(rps);
@@ -1789,10 +1797,10 @@ static void rps_work(struct work_struct *work)
int new_freq, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
if (!pm_iir && !client_boost)
@@ -1865,9 +1873,9 @@ static void rps_work(struct work_struct *work)
mutex_unlock(&rps->lock);
out:
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_unmask_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1875,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
struct intel_gt *gt = rps_to_gt(rps);
const u32 events = rps->pm_events & pm_iir;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!events))
return;
@@ -1895,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
events = pm_iir & rps->pm_events;
if (events) {
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
GT_TRACE(gt, "irq events:%x\n", events);
@@ -1903,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
rps->pm_iir |= events;
schedule_work(&rps->work);
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
if (GRAPHICS_VER(gt->i915) >= 8)
@@ -1979,7 +1987,9 @@ void intel_rps_init(struct intel_rps *rps)
/* Derive initial user preferences/limits from the hardware limits */
rps->max_freq_softlimit = rps->max_freq;
+ rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
rps->min_freq_softlimit = rps->min_freq;
+ rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
/* After setting max-softlimit, find the overclock max freq */
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
@@ -2126,6 +2136,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2249,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 1e8d56491308..4509dfdc52e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
new file mode 100644
index 000000000000..e8f3d18c12b8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_sa_media.h"
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore;
+
+ uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->gsi_offset = gsi_offset;
+
+ gt->irq_lock = to_gt(i915)->irq_lock;
+ intel_gt_common_init_early(gt);
+ intel_uncore_init_early(uncore, gt);
+
+ /*
+ * Standalone media shares the general MMIO space with the primary
+ * GT. We'll re-use the primary GT's mapping.
+ */
+ uncore->regs = i915->uncore.regs;
+ if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
+ return -EIO;
+
+ gt->uncore = uncore;
+ gt->phys_addr = phys_addr;
+
+ /*
+ * For current platforms we can assume there's only a single
+ * media GT and cache it for quick lookup.
+ */
+ drm_WARN_ON(&i915->drm, i915->media_gt);
+ i915->media_gt = gt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h
new file mode 100644
index 000000000000..3afb310de932
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __INTEL_SA_MEDIA__
+#define __INTEL_SA_MEDIA__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset);
+
+#endif /* __INTEL_SA_MEDIA_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index c6d3050604c8..66f21c735d54 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -382,7 +382,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
static void gen9_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_device_info *info = mkwrite_device_info(i915);
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
u32 fuse2, eu_disable, subslice_mask;
@@ -471,10 +470,10 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
if (IS_GEN9_LP(i915)) {
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
+ RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
sseu->min_eu_in_pool = 0;
- if (info->has_pooled_eu) {
+ if (HAS_POOLED_EU(i915)) {
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
sseu->min_eu_in_pool = 3;
else if (IS_SS_DISABLED(1))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e8111fce56d0..6d2003d598e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -568,6 +568,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_add(wal,
@@ -2102,13 +2103,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
/* Wa_1509235366:dg2 */
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
-
- /*
- * The following are not actually "workarounds" but rather
- * recommended tuning settings documented in the bspec's
- * performance guide section.
- */
- wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
@@ -2119,6 +2113,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_1509727124:dg2 */
+ wa_masked_en(wal, GEN10_SAMPLER_MODE,
+ SC_DISABLE_POWER_OPTIMIZATION_EBB);
+ }
+
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
@@ -2195,15 +2196,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) ||
- IS_DG2_G11(i915)) {
- /* Wa_22012654132:dg2 */
- wa_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
- 0 /* write-only, so skip validation */,
- true);
- }
-
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
@@ -2397,7 +2389,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
FF_DOP_CLOCK_GATE_DISABLE);
}
- if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
+ if (IS_GRAPHICS_VER(i915, 9, 12)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -2670,6 +2662,56 @@ ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
}
/*
+ * The bspec performance guide has recommended MMIO tuning settings. These
+ * aren't truly "workarounds" but we want to program them with the same
+ * workaround infrastructure to ensure that they're automatically added to
+ * the GuC save/restore lists, re-applied at the right times, and checked for
+ * any conflicting programming requested by real workarounds.
+ *
+ * Programming settings should be added here only if their registers are not
+ * part of an engine's register state context. If a register is part of a
+ * context, then any tuning settings should be programmed in an appropriate
+ * function invoked by __intel_engine_init_ctx_wa().
+ */
+static void
+add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
+{
+ if (IS_PONTEVECCHIO(i915)) {
+ wa_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ }
+
+ if (IS_DG2(i915)) {
+ wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
+ wa_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
+
+ /*
+ * This is also listed as Wa_22012654132 for certain DG2
+ * steppings, but the tuning setting programming is a superset
+ * since it applies to all DG2 variants and steppings.
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
+ }
+
+ /*
+ * This tuning setting proves beneficial only on ATS-M designs; the
+ * default "age based" setting is optimal on regular DG2 and other
+ * platforms.
+ */
+ if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
+ wa_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
+ THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+}
+
+/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
* specific engine. Since all render+compute engines get reset
@@ -2683,14 +2725,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_PONTEVECCHIO(i915)) {
- /*
- * The following is not actually a "workaround" but rather
- * a recommended tuning setting documented in the bspec's
- * performance guide section.
- */
- wa_write(wal, XEHPC_L3SCRUB, SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ add_render_compute_tuning_settings(i915, wal);
+ if (IS_PONTEVECCHIO(i915)) {
/* Wa_16016694945 */
wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 09f8cd2d0e2c..1e08b2473b99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -2077,7 +2077,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2136,7 +2136,7 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[1]->context);
+ intel_context_ban(rq[1]->context, rq[1]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2219,7 +2219,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- intel_context_set_banned(rq[2]->context);
+ intel_context_ban(rq[2]->context, rq[2]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -2234,7 +2234,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- if (rq[1]->fence.error != 0) {
+ /*
+ * The behavior between having semaphores and not is different. With
+ * semaphores the subsequent request is on the hardware and not cancelled
+ * while without the request is held in the driver and cancelled.
+ */
+ if (intel_engine_has_semaphores(rq[1]->engine) &&
+ rq[1]->fence.error != 0) {
pr_err("Normal inflight1 request did not complete\n");
err = -EINVAL;
goto out;
@@ -2282,7 +2288,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- intel_context_set_banned(rq->context);
+ intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 6493265d5f64..7f3bb1d34dfb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1302,13 +1302,15 @@ static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1432,7 +1434,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
int (*fn)(void *),
unsigned int flags)
{
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
@@ -1444,6 +1446,8 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
return 0;
+ engine = intel_selftest_find_any_engine(gt);
+
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
@@ -1819,12 +1823,14 @@ static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine = gt->engine[RCS0];
+ struct intel_engine_cs *engine;
struct hang h;
struct i915_request *rq;
struct i915_gpu_coredump *error;
int err;
+ engine = intel_selftest_find_any_engine(gt);
+
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 1109088fe8f6..82d3f8058995 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -27,6 +27,9 @@
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+#define LRI_HEADER MI_INSTR(0x22, 0)
+#define LRI_LENGTH_MASK GENMASK(7, 0)
+
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
@@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg)
continue;
}
- if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
err = -EINVAL;
@@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg)
lrc_ring_cmd_buf_cctl(engine),
"RING_CMD_BUF_CCTL"
},
+ {
+ i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
+ lrc_ring_bb_offset(engine),
+ "RING_BB_OFFSET"
+ },
{ },
}, *t;
u32 *hw;
@@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /*
+ * Keep it simple, skip parsing complex commands
+ *
+ * At present, there are no more MI_LOAD_REGISTER_IMM
+ * commands after the first 3D state command. Rather
+ * than include a table (see i915_cmd_parser.c) of all
+ * the possible commands and their instruction lengths
+ * (or mask for variable length instructions), assume
+ * we have gathered the complete list of registers and
+ * bail out.
+ */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ /* Assume all other MI commands match LRI length mask */
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
@@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1343,6 +1395,30 @@ err_A0:
return err;
}
+static struct i915_vma *
+create_result_vma(struct i915_address_space *vm, unsigned long sz)
+{
+ struct i915_vma *vma;
+ void *ptr;
+
+ vma = create_user_vma(vm, sz);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Set the results to a known value distinct from the poison */
+ ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ i915_vma_put(vma);
+ return ERR_CAST(ptr);
+ }
+
+ memset(ptr, POISON_INUSE, vma->size);
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return vma;
+}
+
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
{
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
@@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
goto err_A;
}
- ref[0] = create_user_vma(A->vm, SZ_64K);
+ ref[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[0])) {
err = PTR_ERR(ref[0]);
goto err_B;
}
- ref[1] = create_user_vma(A->vm, SZ_64K);
+ ref[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[1])) {
err = PTR_ERR(ref[1]);
goto err_ref0;
@@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
i915_request_put(rq);
- result[0] = create_user_vma(A->vm, SZ_64K);
+ result[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[0])) {
err = PTR_ERR(result[0]);
goto err_ref1;
}
- result[1] = create_user_vma(A->vm, SZ_64K);
+ result[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[1])) {
err = PTR_ERR(result[1]);
goto err_result0;
@@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
err = poison_registers(B, poison, sema);
- if (err) {
- WRITE_ONCE(*sema, -1);
- i915_request_put(rq);
- goto err_result1;
- }
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- i915_request_put(rq);
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("%s(%s): wait for results timed out\n",
+ __func__, engine->name);
err = -ETIME;
- goto err_result1;
}
+
+ /* Always cancel the semaphore wait, just in case the GPU gets stuck */
+ WRITE_ONCE(*sema, -1);
i915_request_put(rq);
+ if (err)
+ goto err_result1;
err = compare_isolation(engine, ref, result, A, poison);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index ac29691e0b1a..f8a1d27df272 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -166,6 +166,15 @@ static int run_test(struct intel_gt *gt, int test_type)
return -EIO;
}
+ /*
+ * FIXME: With efficient frequency enabled, GuC can request
+ * frequencies higher than the SLPC max. While this is fixed
+ * in GuC, we level set these tests with RPn as min.
+ */
+ err = slpc_set_min_freq(slpc, slpc->min_freq);
+ if (err)
+ return err;
+
if (slpc->min_freq == slpc->rp0_freq) {
pr_err("Min/Max are fused to the same value\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index df83c1cc7c7a..28b8387f97b7 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -37,6 +37,7 @@
* | | | - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large) |
* | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) |
* | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) |
+ * | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) |
* +---+-------+--------------------------------------------------------------+
* |...| | RESERVED = MBZ |
* +---+-------+--------------------------------------------------------------+
@@ -49,9 +50,10 @@ struct guc_ct_buffer_desc {
u32 tail;
u32 status;
#define GUC_CTB_STATUS_NO_ERROR 0
-#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
-#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
-#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+#define GUC_CTB_STATUS_OVERFLOW BIT(0)
+#define GUC_CTB_STATUS_UNDERFLOW BIT(1)
+#define GUC_CTB_STATUS_MISMATCH BIT(2)
+#define GUC_CTB_STATUS_UNUSED BIT(3)
u32 reserved[13];
} __packed;
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2706a8c65090..bac06e3d6f2c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_enable_guc_interrupts(struct intel_guc *guc)
@@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
@@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen9_reset_guc_interrupts(guc);
@@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
@@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_ENABLE, events);
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_MASK, ~events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
@@ -224,53 +224,22 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
- u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
- u32 flags;
-
- #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
- #define LOG_UNIT SZ_1M
- #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS
- #else
- #define LOG_UNIT SZ_4K
- #define LOG_FLAG 0
- #endif
-
- #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0)
- #define CAPTURE_UNIT SZ_1M
- #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS
- #else
- #define CAPTURE_UNIT SZ_4K
- #define CAPTURE_FLAG 0
- #endif
-
- BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT));
- BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
- BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
-
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
+ struct intel_guc_log *log = &guc->log;
+ u32 offset, flags;
+
+ GEM_BUG_ON(!log->sizes_initialised);
+
+ offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
- CAPTURE_FLAG |
- LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << GUC_LOG_CAPTURE_SHIFT) |
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
+ log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
+ (log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
+ (log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
- #undef LOG_UNIT
- #undef LOG_FLAG
- #undef CAPTURE_UNIT
- #undef CAPTURE_FLAG
-
return flags;
}
@@ -389,6 +358,23 @@ void intel_guc_write_params(struct intel_guc *guc)
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
}
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ u32 stamp = 0;
+ u64 ktime;
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
+ ktime = ktime_get_boottime_ns();
+
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
+ drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
+ drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
+}
+
int intel_guc_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index a7acffbf15d1..804133df1ac9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -464,4 +464,6 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
void intel_guc_write_barrier(struct intel_guc *guc);
+void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ba7541f3ca61..74cbe8eaf531 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -464,7 +464,11 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
}
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
-#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
+#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+ XEHP_LR_HW_CONTEXT_SIZE : \
+ LR_HW_CONTEXT_SIZE)
+#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
static int guc_prep_golden_context(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -525,7 +529,7 @@ static int guc_prep_golden_context(struct intel_guc *guc)
* on all engines).
*/
ads_blob_write(guc, ads.eng_state_size[guc_class],
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
addr_ggtt);
@@ -599,7 +603,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
}
GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
- real_size - LRC_SKIP_SIZE);
+ real_size - LRC_SKIP_SIZE(gt->i915));
GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
addr_ggtt += alloc_size;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 75257bd20ff0..8f1165146013 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -600,10 +600,8 @@ intel_guc_capture_getnullheader(struct intel_guc *guc,
return 0;
}
-#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
-
-int
-intel_guc_capture_output_min_size_est(struct intel_guc *guc)
+static int
+guc_capture_output_min_size_est(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
@@ -623,13 +621,8 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
* For each engine instance, there would be 1 x guc_state_capture_group_t output
* followed by 3 x guc_state_capture_t lists. The latter is how the register
* dumps are split across different register types (where the '3' are global vs class
- * vs instance). Finally, let's multiply the whole thing by 3x (just so we are
- * not limited to just 1 round of data in a worst case full register dump log)
- *
- * NOTE: intel_guc_log that allocates the log buffer would round this size up to
- * a power of two.
+ * vs instance).
*/
-
for_each_engine(engine, gt, id) {
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
(3 * sizeof(struct guc_state_capture_header_t));
@@ -649,7 +642,31 @@ intel_guc_capture_output_min_size_est(struct intel_guc *guc)
worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
- return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
+ return worst_min_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the i915 can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int min_size = guc_capture_output_min_size_est(guc);
+ int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+
+ if (min_size < 0)
+ drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
+ min_size);
+ else if (min_size > buffer_size)
+ drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
+ buffer_size, min_size);
+ else if (spare_size > buffer_size)
+ drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
+ buffer_size, spare_size, min_size);
}
/*
@@ -1278,7 +1295,8 @@ static void __guc_capture_process_output(struct intel_guc *guc)
log_buf_state = guc->log.buf_addr +
(sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
- src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr +
+ intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
/*
* Make a copy of the state structure, inside GuC log buffer
@@ -1286,7 +1304,7 @@ static void __guc_capture_process_output(struct intel_guc *guc)
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER);
+ buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_count = log_buf_state_local.buffer_full_cnt;
@@ -1365,33 +1383,22 @@ guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
return NULL;
}
-#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define __out(a, ...) \
- do { \
- drm_warn((&(a)->i915->drm), __VA_ARGS__); \
- i915_error_printf((a), __VA_ARGS__); \
- } while (0)
-#else
-#define __out(a, ...) \
- i915_error_printf(a, __VA_ARGS__)
-#endif
-
#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
do { \
- __out(ebuf, " i915-Eng-Name: %s command stream\n", \
- (eng)->name); \
- __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
- __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
- __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
- (eng)->logical_mask); \
+ i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
} while (0)
#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
do { \
- __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
- (node)->eng_inst); \
- __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
- __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
} while (0)
int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
@@ -1423,57 +1430,57 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
guc = &ee->engine->gt->uc.guc;
- __out(ebuf, "global --- GuC Error Capture on %s command stream:\n",
- ee->engine->name);
+ i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
node = ee->guc_capture_node;
if (!node) {
- __out(ebuf, " No matching ee-node\n");
+ i915_error_printf(ebuf, " No matching ee-node\n");
return 0;
}
- __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+ i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
- __out(ebuf, " RegListType: %s\n",
- datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
- __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+ i915_error_printf(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
switch (i) {
case GUC_CAPTURE_LIST_TYPE_GLOBAL:
default:
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
- __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
- __out(ebuf, " i915-Eng-Class: %d\n",
- guc_class_to_engine_class(node->eng_class));
+ i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
if (eng)
GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
else
- __out(ebuf, " i915-Eng-Lookup Fail!\n");
+ i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
GCAP_PRINT_GUC_INST_INFO(ebuf, node);
break;
}
numregs = node->reginfo[i].num_regs;
- __out(ebuf, " NumRegs: %d\n", numregs);
+ i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
j = 0;
while (numregs--) {
regs = node->reginfo[i].regs;
str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
node->eng_class, 0, regs[j].offset, &is_ext);
if (!str)
- __out(ebuf, " REG-0x%08x", regs[j].offset);
+ i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
else
- __out(ebuf, " %s", str);
+ i915_error_printf(ebuf, " %s", str);
if (is_ext)
- __out(ebuf, "[%ld][%ld]",
- FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
- FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
- __out(ebuf, ": 0x%08x\n", regs[j].value);
+ i915_error_printf(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
++j;
}
}
@@ -1580,5 +1587,7 @@ int intel_guc_capture_init(struct intel_guc *guc)
INIT_LIST_HEAD(&guc->capture->outlist);
INIT_LIST_HEAD(&guc->capture->cachelist);
+ check_guc_capture_size(guc);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
index d3d7bd0b6db6..fbd3713c7832 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -21,7 +21,6 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
struct intel_context *ce);
void intel_guc_capture_process(struct intel_guc *guc);
-int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr);
int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index f01325cd1b62..2b22065e87bf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -455,6 +455,7 @@ corrupted:
/**
* wait_for_ct_request_update - Wait for CT request state update.
+ * @ct: pointer to CT
* @req: pointer to pending request
* @status: placeholder for status
*
@@ -467,9 +468,10 @@ corrupted:
* * 0 response received (status is valid)
* * -ETIMEDOUT no response within hardcoded timeout
*/
-static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
+static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
{
int err;
+ bool ct_enabled;
/*
* Fast commands should complete in less than 10us, so sample quickly
@@ -481,12 +483,15 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
#define done \
- (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
+ (!(ct_enabled = intel_guc_ct_enabled(ct)) || \
+ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
GUC_HXG_ORIGIN_GUC)
err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
if (err)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
+ if (!ct_enabled)
+ err = -ENODEV;
*status = req->status;
return err;
@@ -703,11 +708,18 @@ retry:
intel_guc_notify(ct_to_guc(ct));
- err = wait_for_ct_request_update(&request, status);
+ err = wait_for_ct_request_update(ct, &request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
if (unlikely(err)) {
- CT_ERROR(ct, "No response for request %#x (fence %u)\n",
- action[0], request.fence);
+ if (err == -ENODEV)
+ /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
+ * In this case, output is debug rather than error info
+ */
+ CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
+ action[0], request.fence);
+ else
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
goto unlink;
}
@@ -771,8 +783,9 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
- action[0], ERR_PTR(ret), status);
+ if (ret != -ENODEV)
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
@@ -816,8 +829,22 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
if (unlikely(ctb->broken))
return -EPIPE;
- if (unlikely(desc->status))
- goto corrupted;
+ if (unlikely(desc->status)) {
+ u32 status = desc->status;
+
+ if (status & GUC_CTB_STATUS_UNUSED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
+ status &= ~GUC_CTB_STATUS_UNUSED;
+ }
+
+ if (status)
+ goto corrupted;
+ }
GEM_BUG_ON(head > size);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 25b2d7ce6640..55d3ef93e86f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -13,8 +13,163 @@
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
+#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#else
+#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
+#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M
+#endif
+
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
+struct guc_log_section {
+ u32 max;
+ u32 flag;
+ u32 default_val;
+ const char *name;
+};
+
+static void _guc_log_init_sizes(struct intel_guc_log *log)
+{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
+ {
+ GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
+ "crash dump"
+ },
+ {
+ GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
+ GUC_LOG_LOG_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
+ "debug",
+ },
+ {
+ GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
+ GUC_LOG_CAPTURE_ALLOC_UNITS,
+ GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
+ "capture",
+ }
+ };
+ int i;
+
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
+ log->sizes[i].bytes = sections[i].default_val;
+
+ /* If debug size > 1MB then bump default crash size to keep the same units */
+ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
+ GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
+ log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
+
+ /* Prepare the GuC API structure fields: */
+ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
+ /* Convert to correct units */
+ if ((log->sizes[i].bytes % SZ_1M) == 0) {
+ log->sizes[i].units = SZ_1M;
+ log->sizes[i].flag = sections[i].flag;
+ } else {
+ log->sizes[i].units = SZ_4K;
+ log->sizes[i].flag = 0;
+ }
+
+ if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
+ drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
+ log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
+
+ if (!log->sizes[i].count) {
+ drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ } else {
+ /* Size is +1 unit */
+ log->sizes[i].count--;
+ }
+
+ /* Clip to field size */
+ if (log->sizes[i].count > sections[i].max) {
+ drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
+ log->sizes[i].count = sections[i].max;
+ }
+ }
+
+ if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
+ drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units,
+ log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
+ log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
+ log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
+ }
+
+ log->sizes_initialised = true;
+}
+
+static void guc_log_init_sizes(struct intel_guc_log *log)
+{
+ if (log->sizes_initialised)
+ return;
+
+ _guc_log_init_sizes(log);
+}
+
+static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
+}
+
+static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
+}
+
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
+{
+ guc_log_init_sizes(log);
+
+ return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
+}
+
+static u32 intel_guc_log_size(struct intel_guc_log *log)
+{
+ /*
+ * GuC Log buffer Layout:
+ *
+ * NB: Ordering must follow "enum guc_log_buffer_type".
+ *
+ * +===============================+ 00B
+ * | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
+ * +-------------------------------+ 64B
+ * | Capture state header |
+ * +-------------------------------+ 96B
+ * | |
+ * +===============================+ PAGE_SIZE (4KB)
+ * | Debug logs |
+ * +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
+ * | Capture logs |
+ * +===============================+ + CAPTURE_SIZE
+ */
+ return PAGE_SIZE +
+ intel_guc_log_section_size_crash(log) +
+ intel_guc_log_section_size_debug(log) +
+ intel_guc_log_section_size_capture(log);
+}
+
/**
* DOC: GuC firmware log
*
@@ -139,7 +294,8 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size -
+ intel_guc_log_section_size_capture(log));
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
@@ -184,15 +340,16 @@ bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
return overflow;
}
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
- return DEBUG_BUFFER_SIZE;
+ return intel_guc_log_section_size_debug(log);
case GUC_CRASH_DUMP_LOG_BUFFER:
- return CRASH_BUFFER_SIZE;
+ return intel_guc_log_section_size_crash(log);
case GUC_CAPTURE_LOG_BUFFER:
- return CAPTURE_BUFFER_SIZE;
+ return intel_guc_log_section_size_capture(log);
default:
MISSING_CASE(type);
}
@@ -200,7 +357,8 @@ unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
+ enum guc_log_buffer_type type)
{
enum guc_log_buffer_type i;
size_t offset = PAGE_SIZE;/* for the log_buffer_states */
@@ -208,7 +366,7 @@ size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
if (i == type)
break;
- offset += intel_guc_get_log_buffer_size(i);
+ offset += intel_guc_get_log_buffer_size(log, i);
}
return offset;
@@ -259,7 +417,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
- buffer_size = intel_guc_get_log_buffer_size(type);
+ buffer_size = intel_guc_get_log_buffer_size(log, type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
@@ -374,7 +532,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
* Keep the size of sub buffers same as shared log buffer
* but GuC log-events excludes the error-state-capture logs
*/
- subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
+ subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -461,32 +619,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
GEM_BUG_ON(log->vma);
- /*
- * GuC Log buffer Layout
- * (this ordering must follow "enum guc_log_buffer_type" definition)
- *
- * +===============================+ 00B
- * | Debug state header |
- * +-------------------------------+ 32B
- * | Crash dump state header |
- * +-------------------------------+ 64B
- * | Capture state header |
- * +-------------------------------+ 96B
- * | |
- * +===============================+ PAGE_SIZE (4KB)
- * | Debug logs |
- * +===============================+ + DEBUG_SIZE
- * | Crash Dump logs |
- * +===============================+ + CRASH_SIZE
- * | Capture logs |
- * +===============================+ + CAPTURE_SIZE
- */
- if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
- DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
- CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
-
- guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
- CAPTURE_BUFFER_SIZE;
+ guc_log_size = intel_guc_log_size(log);
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
@@ -749,8 +882,9 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
struct intel_guc *guc = log_to_guc(log);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
struct drm_i915_gem_object *obj = NULL;
- u32 *map;
- int i = 0;
+ void *map;
+ u32 *page;
+ int i, j;
if (!intel_guc_is_supported(guc))
return -ENODEV;
@@ -763,21 +897,34 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
if (!obj)
return 0;
+ page = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ intel_guc_dump_time_info(guc, p);
+
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
DRM_DEBUG("Failed to pin object\n");
drm_puts(p, "(log data unaccessible)\n");
+ free_page((unsigned long)page);
return PTR_ERR(map);
}
- for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(map + i), *(map + i + 1),
- *(map + i + 2), *(map + i + 3));
+ for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
+ if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
+ memcpy(page, map + i, PAGE_SIZE);
+
+ for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
+ drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ *(page + j + 0), *(page + j + 1),
+ *(page + j + 2), *(page + j + 3));
+ }
drm_puts(p, "\n");
i915_gem_object_unpin_map(obj);
+ free_page((unsigned long)page);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 18007e639be9..02127703be80 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -15,20 +15,6 @@
struct intel_guc;
-#if defined(CONFIG_DRM_I915_DEBUG_GUC)
-#define CRASH_BUFFER_SIZE SZ_2M
-#define DEBUG_BUFFER_SIZE SZ_16M
-#define CAPTURE_BUFFER_SIZE SZ_4M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
-#define CRASH_BUFFER_SIZE SZ_1M
-#define DEBUG_BUFFER_SIZE SZ_2M
-#define CAPTURE_BUFFER_SIZE SZ_1M
-#else
-#define CRASH_BUFFER_SIZE SZ_8K
-#define DEBUG_BUFFER_SIZE SZ_64K
-#define CAPTURE_BUFFER_SIZE SZ_16K
-#endif
-
/*
* While we're using plain log level in i915, GuC controls are much more...
* "elaborate"? We have a couple of bits for verbosity, separate bit for actual
@@ -46,10 +32,30 @@ struct intel_guc;
#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2)
#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
+enum {
+ GUC_LOG_SECTIONS_CRASH,
+ GUC_LOG_SECTIONS_DEBUG,
+ GUC_LOG_SECTIONS_CAPTURE,
+ GUC_LOG_SECTIONS_LIMIT
+};
+
struct intel_guc_log {
u32 level;
+
+ /* Allocation settings */
+ struct {
+ s32 bytes; /* Size in bytes */
+ s32 units; /* GuC API units - 1MB or 4KB */
+ s32 count; /* Number of API units */
+ u32 flag; /* GuC API units flag */
+ } sizes[GUC_LOG_SECTIONS_LIMIT];
+ bool sizes_initialised;
+
+ /* Combined buffer allocation */
struct i915_vma *vma;
void *buf_addr;
+
+ /* RelayFS support */
struct {
bool buf_in_use;
bool started;
@@ -58,6 +64,7 @@ struct intel_guc_log {
struct mutex lock;
u32 full_count;
} relay;
+
/* logging related stats */
struct {
u32 sampled_overflow;
@@ -69,8 +76,9 @@ struct intel_guc_log {
void intel_guc_log_init_early(struct intel_guc_log *log);
bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
unsigned int full_cnt);
-unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type);
-size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type);
+unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
+ enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, enum guc_log_buffer_type type);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
@@ -92,4 +100,6 @@ void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p);
int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
bool dump_load_err);
+u32 intel_guc_log_section_size_capture(struct intel_guc_log *log);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 8dc063f087eb..a7092f711e9c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -102,6 +102,10 @@
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
+#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
+#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
+#define GUC_SEM_INTR_ENABLE_ALL (0xff)
+
#define GUC_NUM_DOORBELLS 256
/* format of the HW-monitored doorbell cacheline */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index ec9c4ca0f615..fdd895f73f9f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -137,17 +137,6 @@ static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
return ret > 0 ? -EPROTO : ret;
}
-static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
-{
- u32 request[] = {
- GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
- id,
- };
-
- return intel_guc_send(guc, request, ARRAY_SIZE(request));
-}
-
static bool slpc_is_running(struct intel_guc_slpc *slpc)
{
return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
@@ -201,16 +190,6 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
return ret;
}
-static int slpc_unset_param(struct intel_guc_slpc *slpc,
- u8 id)
-{
- struct intel_guc *guc = slpc_to_guc(slpc);
-
- GEM_BUG_ON(id >= SLPC_MAX_PARAM);
-
- return guc_action_slpc_unset_param(guc, id);
-}
-
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -488,23 +467,33 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
/* Need a lock now since waitboost can be modifying min as well */
mutex_lock(&slpc->lock);
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-
- ret = slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- val);
-
- /* Return standardized err code for sysfs calls */
- if (ret)
- ret = -EIO;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ /* Ignore efficient freq if lower min freq is requested */
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val < slpc->rp1_freq);
+ if (ret) {
+ i915_probe_error(i915, "Failed to toggle efficient freq (%pe)\n",
+ ERR_PTR(ret));
+ goto out;
}
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+ val);
+
if (!ret)
slpc->min_freq_softlimit = val;
+out:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
+ /* Return standardized err code for sysfs calls */
+ if (ret)
+ ret = -EIO;
+
return ret;
}
@@ -575,45 +564,28 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
* unless they have deviated from defaults, in which case,
* we retain the values and set min/max accordingly.
*/
- if (!slpc->max_freq_softlimit)
+ if (!slpc->max_freq_softlimit) {
slpc->max_freq_softlimit = slpc->rp0_freq;
- else if (slpc->max_freq_softlimit != slpc->rp0_freq)
+ slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
+ } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
ret = intel_guc_slpc_set_max_freq(slpc,
slpc->max_freq_softlimit);
+ }
if (unlikely(ret))
return ret;
- if (!slpc->min_freq_softlimit)
- slpc->min_freq_softlimit = slpc->min_freq;
- else if (slpc->min_freq_softlimit != slpc->min_freq)
+ if (!slpc->min_freq_softlimit) {
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
+ if (unlikely(ret))
+ return ret;
+ slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+ } else if (slpc->min_freq_softlimit != slpc->min_freq) {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
-
- return 0;
-}
-
-static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
-{
- int ret = 0;
-
- if (ignore) {
- ret = slpc_set_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- ignore);
- if (!ret)
- return slpc_set_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
- slpc->min_freq);
- } else {
- ret = slpc_unset_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
- if (!ret)
- return slpc_unset_param(slpc,
- SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
}
- return ret;
+ return 0;
}
static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
@@ -675,14 +647,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
slpc_get_rp_values(slpc);
- /* Ignore efficient freq and set min to platform min */
- ret = slpc_ignore_eff_freq(slpc, true);
- if (unlikely(ret)) {
- i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
- ERR_PTR(ret));
- return ret;
- }
-
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 76916aed897a..22ba66e48a9b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
- cancel_delayed_work(&guc->timestamp.work);
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
__reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
- spin_lock_irq(&guc_to_gt(guc)->irq_lock);
- spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+ spin_lock_irq(guc_to_gt(guc)->irq_lock);
+ spin_unlock_irq(guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
@@ -1868,7 +1873,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (guc->submission_initialized)
return 0;
- if (guc->fw.major_ver_found < 70) {
+ if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
@@ -2303,7 +2308,7 @@ static int register_context(struct intel_context *ce, bool loop)
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- if (guc->fw.major_ver_found >= 70)
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
ret = register_context_v70(guc, ce, loop);
else
ret = register_context_v69(guc, ce, loop);
@@ -2315,7 +2320,7 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (guc->fw.major_ver_found >= 70)
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
guc_context_policy_init_v70(ce, loop);
}
@@ -2420,7 +2425,6 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
- bool missing = false;
unsigned long flags;
int ret;
@@ -2438,32 +2442,9 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
ret = __guc_context_set_context_policies(guc, &policy, loop);
- missing = ret != 0;
-
- if (!missing && intel_context_is_parent(ce)) {
- struct intel_context *child;
-
- for_each_child(ce, child) {
- __guc_context_policy_start_klv(&policy, child->guc_id.id);
-
- if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
- __guc_context_policy_add_preempt_to_idle(&policy, 1);
-
- child->guc_state.prio = ce->guc_state.prio;
- __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
- __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
- __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-
- ret = __guc_context_set_context_policies(guc, &policy, loop);
- if (ret) {
- missing = true;
- break;
- }
- }
- }
spin_lock_irqsave(&ce->guc_state.lock, flags);
- if (missing)
+ if (ret != 0)
set_context_policy_required(ce);
else
clr_context_policy_required(ce);
@@ -2945,7 +2926,7 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- if (guc->fw.major_ver_found >= 70) {
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id);
@@ -3210,7 +3191,7 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- if (guc->fw.major_ver_found >= 70) {
+ if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
@@ -4027,6 +4008,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
xa_destroy(&guc->context_lookup);
/*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
@@ -4191,13 +4179,27 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
void intel_guc_submission_enable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /* Enable and route to GuC */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES,
+ GUC_SEM_INTR_ROUTE_TO_GUC |
+ GUC_SEM_INTR_ENABLE_ALL);
+
guc_init_lrc_mapping(guc);
guc_init_engine_stats(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
/* Note: By the time we're here, GuC may have already been reset */
+
+ /* Disable and route to host */
+ if (GRAPHICS_VER(gt->i915) >= 12)
+ intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, 0x0);
}
static bool __guc_submission_supported(struct intel_guc *guc)
@@ -5163,4 +5165,5 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#include "selftest_guc_multi_lrc.c"
+#include "selftest_guc_hangcheck.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index f2e7c82985ef..dbd048b77e19 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
intel_guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
@@ -435,9 +435,11 @@ static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
- drm_info(&i915->drm, "%s firmware %s version %u.%u\n",
- intel_uc_fw_type_repr(fw->type), fw->path,
- fw->major_ver_found, fw->minor_ver_found);
+ drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.major_ver,
+ fw->file_selected.minor_ver,
+ fw->file_selected.patch_ver);
}
static int __uc_init_hw(struct intel_uc *uc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 56a0d80f88ba..b91ad4aede1f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -41,7 +41,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
"%s firmware -> %s\n",
intel_uc_fw_type_repr(uc_fw->type),
status == INTEL_UC_FIRMWARE_SELECTED ?
- uc_fw->path : intel_uc_fw_status_repr(status));
+ uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
@@ -51,84 +51,153 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
*
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
+ *
+ * Version numbers:
+ * Originally, the driver required an exact match major/minor/patch furmware
+ * file and only supported that one version for any given platform. However,
+ * the new direction from upstream is to be backwards compatible with all
+ * prior releases and to be as flexible as possible as to what firmware is
+ * loaded.
+ *
+ * For GuC, the major version number signifies a backwards breaking API change.
+ * So, new format GuC firmware files are labelled by their major version only.
+ * For HuC, there is no KMD interaction, hence no version matching requirement.
+ * So, new format HuC firmware files have no version number at all.
+ *
+ * All of which means that the table below must keep all old format files with
+ * full three point version number. But newer files have reduced requirements.
+ * Having said that, the driver still needs to track the minor version number
+ * for GuC at least. As it is useful to report to the user that they are not
+ * running with a recent enough version for all KMD supported features,
+ * security fixes, etc. to be enabled.
+ */
+#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
+ fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
+ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
+
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+ fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
+ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_raw(dg1)) \
+ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
+ fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
+ fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
+ fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
+ fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
+ fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
+
+/*
+ * Set of macros for producing a list of filenames from the above table.
*/
-#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(DG2, 0, guc_def(dg2, 70, 1, 2)) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
- fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
- fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
-
-#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3))
-
-#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
- fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(ALDERLAKE_S, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, huc_def(dg1, 7, 9, 3)) \
- fw_def(ROCKETLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(TIGERLAKE, 0, huc_def(tgl, 7, 9, 3)) \
- fw_def(JASPERLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ELKHARTLAKE, 0, huc_def(ehl, 9, 0, 0)) \
- fw_def(ICELAKE, 0, huc_def(icl, 9, 0, 0)) \
- fw_def(COMETLAKE, 5, huc_def(cml, 4, 0, 0)) \
- fw_def(COMETLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(COFFEELAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(GEMINILAKE, 0, huc_def(glk, 4, 0, 0)) \
- fw_def(KABYLAKE, 0, huc_def(kbl, 4, 0, 0)) \
- fw_def(BROXTON, 0, huc_def(bxt, 2, 0, 0)) \
- fw_def(SKYLAKE, 0, huc_def(skl, 2, 0, 0))
-
-#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
+#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
+ "i915/" \
+ __stringify(prefix_) name_ ".bin"
+
+#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) ".bin"
+
+#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) name_ \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
-#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
+/* Minor for internal driver use, not part of file name */
+#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
+
+#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
-#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
- __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
+#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
-/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+
+/*
+ * All blobs need to be declared via MODULE_FIRMWARE().
+ * This first expansion of the table macros is solely to provide
+ * that declaration.
+ */
#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
MODULE_FIRMWARE(uc_);
-INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
-INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
-INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP)
-/* The below structs and macros are used to iterate across the list of blobs */
+/*
+ * The next expansion of the table macros (in __uc_fw_auto_select below) provides
+ * actual data structures with both the filename and the version information.
+ * These structure arrays are then iterated over to the list of suitable files
+ * for the current platform and to then attempt to load those files, in the order
+ * listed, until one is successfully found.
+ */
struct __packed uc_fw_blob {
+ const char *path;
+ bool legacy;
u8 major;
u8 minor;
- const char *path;
+ u8 patch;
};
-#define UC_FW_BLOB(major_, minor_, path_) \
- { .major = major_, .minor = minor_, .path = path_ }
+#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .major = major_, \
+ .minor = minor_, \
+ .patch = patch_, \
+ .path = path_,
+
+#define UC_FW_BLOB_NEW(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = false }
-#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
+ { UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
+ .legacy = true }
-#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
- UC_FW_BLOB(major_, minor_, \
- MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+#define GUC_FW_BLOB(prefix_, major_, minor_) \
+ UC_FW_BLOB_NEW(major_, minor_, 0, \
+ MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
+
+#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_) \
+ UC_FW_BLOB_NEW(0, 0, 0, MAKE_HUC_FW_PATH_BLANK(prefix_))
+
+#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB_OLD(major_, minor_, patch_, \
+ MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
@@ -152,23 +221,22 @@ static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
static const struct uc_fw_platform_requirement blobs_guc[] = {
- INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
- };
- static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
- INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+ INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
};
static const struct uc_fw_platform_requirement blobs_huc[] = {
- INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP)
};
static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
+ static bool verified;
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
+ bool found;
/*
* The only difference between the ADL GuC FWs is the HWConfig support.
@@ -183,50 +251,102 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
fw_blobs = blobs_all[uc_fw->type].blobs;
fw_count = blobs_all[uc_fw->type].count;
+ found = false;
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
- if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
- const struct uc_fw_blob *blob = &fw_blobs[i].blob;
- uc_fw->path = blob->path;
- uc_fw->wanted_path = blob->path;
- uc_fw->major_ver_wanted = blob->major;
- uc_fw->minor_ver_wanted = blob->minor;
- break;
- }
- }
+ const struct uc_fw_blob *blob = &fw_blobs[i].blob;
- if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
- const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
- u32 count = ARRAY_SIZE(blobs_guc_fallback);
+ if (p != fw_blobs[i].p)
+ continue;
- for (i = 0; i < count && p <= blobs[i].p; i++) {
- if (p == blobs[i].p && rev >= blobs[i].rev) {
- const struct uc_fw_blob *blob = &blobs[i].blob;
+ if (rev < fw_blobs[i].rev)
+ continue;
- uc_fw->fallback.path = blob->path;
- uc_fw->fallback.major_ver = blob->major;
- uc_fw->fallback.minor_ver = blob->minor;
- break;
- }
+ if (uc_fw->file_selected.path) {
+ if (uc_fw->file_selected.path == blob->path)
+ uc_fw->file_selected.path = NULL;
+
+ continue;
}
+
+ uc_fw->file_selected.path = blob->path;
+ uc_fw->file_wanted.path = blob->path;
+ uc_fw->file_wanted.major_ver = blob->major;
+ uc_fw->file_wanted.minor_ver = blob->minor;
+ found = true;
+ break;
+ }
+
+ if (!found && uc_fw->file_selected.path) {
+ /* Failed to find a match for the last attempt?! */
+ uc_fw->file_selected.path = NULL;
}
/* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
+ verified = true;
+
for (i = 1; i < fw_count; i++) {
+ /* Next platform is good: */
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
+ /* Next platform revision is good: */
if (fw_blobs[i].p == fw_blobs[i - 1].p &&
fw_blobs[i].rev < fw_blobs[i - 1].rev)
continue;
- pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
- intel_platform_name(fw_blobs[i - 1].p),
- fw_blobs[i - 1].rev,
- intel_platform_name(fw_blobs[i].p),
- fw_blobs[i].rev);
+ /* Platform/revision must be in order: */
+ if (fw_blobs[i].p != fw_blobs[i - 1].p ||
+ fw_blobs[i].rev != fw_blobs[i - 1].rev)
+ goto bad;
+
+ /* Next major version is good: */
+ if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
+ continue;
+
+ /* New must be before legacy: */
+ if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
+ goto bad;
+
+ /* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
+ if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
+ if (!fw_blobs[i - 1].blob.major)
+ continue;
+
+ if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
+ continue;
+ }
+
+ /* Major versions must be in order: */
+ if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
+ goto bad;
+
+ /* Next minor version is good: */
+ if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
+ continue;
+
+ /* Minor versions must be in order: */
+ if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
+ goto bad;
+
+ /* Patch versions must be in order: */
+ if (fw_blobs[i].blob.patch <= fw_blobs[i - 1].blob.patch)
+ continue;
- uc_fw->path = NULL;
+bad:
+ drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
+ fw_blobs[i - 1].blob.legacy ? "L" : "v",
+ fw_blobs[i - 1].blob.major,
+ fw_blobs[i - 1].blob.minor,
+ fw_blobs[i - 1].blob.patch,
+ intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
+ fw_blobs[i].blob.legacy ? "L" : "v",
+ fw_blobs[i].blob.major,
+ fw_blobs[i].blob.minor,
+ fw_blobs[i].blob.patch);
+
+ uc_fw->file_selected.path = NULL;
}
}
}
@@ -259,7 +379,7 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
}
if (unlikely(path)) {
- uc_fw->path = path;
+ uc_fw->file_selected.path = path;
uc_fw->user_overridden = true;
}
}
@@ -283,7 +403,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
*/
BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
GEM_BUG_ON(uc_fw->status);
- GEM_BUG_ON(uc_fw->path);
+ GEM_BUG_ON(uc_fw->file_selected.path);
uc_fw->type = type;
@@ -292,7 +412,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
__uc_fw_user_override(i915, uc_fw);
}
- intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
+ intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
INTEL_UC_FIRMWARE_SELECTED :
INTEL_UC_FIRMWARE_DISABLED :
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
@@ -305,32 +425,32 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
- uc_fw->path = "<invalid>";
+ uc_fw->file_selected.path = "<invalid>";
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
- uc_fw->major_ver_wanted += 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver += 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
- uc_fw->minor_ver_wanted += 1;
+ uc_fw->file_wanted.minor_ver += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->major_ver_wanted &&
+ } else if (uc_fw->file_wanted.major_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
- uc_fw->major_ver_wanted -= 1;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver -= 1;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->minor_ver_wanted &&
+ } else if (uc_fw->file_wanted.minor_ver &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
- uc_fw->minor_ver_wanted -= 1;
+ uc_fw->file_wanted.minor_ver -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
- uc_fw->major_ver_wanted = 0;
- uc_fw->minor_ver_wanted = 0;
+ uc_fw->file_wanted.major_ver = 0;
+ uc_fw->file_wanted.minor_ver = 0;
uc_fw->user_overridden = true;
}
}
@@ -339,10 +459,12 @@ static int check_gsc_manifest(const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
u32 *dw = (u32 *)fw->data;
- u32 version = dw[HUC_GSC_VERSION_DW];
+ u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
+ u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
- uc_fw->major_ver_found = FIELD_GET(HUC_GSC_MAJOR_VER_MASK, version);
- uc_fw->minor_ver_found = FIELD_GET(HUC_GSC_MINOR_VER_MASK, version);
+ uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
return 0;
}
@@ -357,7 +479,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw->size < sizeof(struct uc_css_header))) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -ENODATA;
}
@@ -370,7 +492,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&i915->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, sizeof(struct uc_css_header));
return -EPROTO;
}
@@ -385,7 +507,7 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw->size < size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw->size, size);
return -ENOEXEC;
}
@@ -394,16 +516,18 @@ static int check_ccs_header(struct drm_i915_private *i915,
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= i915->wopcm.size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
size, (size_t)i915->wopcm.size);
return -E2BIG;
}
/* Get version numbers from the CSS header */
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->sw_version);
+ uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
+ css->sw_version);
+ uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
+ css->sw_version);
+ uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
+ css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
@@ -422,9 +546,11 @@ static int check_ccs_header(struct drm_i915_private *i915,
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct intel_uc_fw_file file_ideal;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
+ bool old_ver = false;
int err;
GEM_BUG_ON(!i915->wopcm.size);
@@ -437,24 +563,32 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = firmware_request_nowarn(&fw, uc_fw->path, dev);
- if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
- err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
- if (!err) {
- drm_notice(&i915->drm,
- "%s firmware %s is recommended, but only %s was found\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->wanted_path,
- uc_fw->fallback.path);
- drm_info(&i915->drm,
- "Consider updating your linux-firmware pkg or downloading from %s\n",
- INTEL_UC_FIRMWARE_URL);
-
- uc_fw->path = uc_fw->fallback.path;
- uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
- uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
+
+ /* Any error is terminal if overriding. Don't bother searching for older versions */
+ if (err && intel_uc_fw_is_overridden(uc_fw))
+ goto fail;
+
+ while (err == -ENOENT) {
+ old_ver = true;
+
+ __uc_fw_auto_select(i915, uc_fw);
+ if (!uc_fw->file_selected.path) {
+ /*
+ * No more options! But set the path back to something
+ * valid just in case it gets dereferenced.
+ */
+ uc_fw->file_selected.path = file_ideal.path;
+
+ /* Also, preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+ break;
}
+
+ err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
}
+
if (err)
goto fail;
@@ -465,18 +599,39 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (err)
goto fail;
- if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- if (!intel_uc_fw_is_overridden(uc_fw)) {
- err = -ENOEXEC;
- goto fail;
+ if (uc_fw->file_wanted.major_ver) {
+ /* Check the file's major version was as it claimed */
+ if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
+ drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ if (!intel_uc_fw_is_overridden(uc_fw)) {
+ err = -ENOEXEC;
+ goto fail;
+ }
+ } else {
+ if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ old_ver = true;
}
}
+ if (old_ver) {
+ /* Preserve the version that was really wanted */
+ memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
+
+ drm_notice(&i915->drm,
+ "%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_wanted.path,
+ uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
+ uc_fw->file_selected.path,
+ uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
+ drm_info(&i915->drm,
+ "Consider updating your linux-firmware pkg or downloading from %s\n",
+ INTEL_UC_FIRMWARE_URL);
+ }
+
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
@@ -503,7 +658,7 @@ fail:
INTEL_UC_FIRMWARE_ERROR);
i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, err);
drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
@@ -645,7 +800,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
fail:
i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
err);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
@@ -863,19 +1018,34 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
+ u32 ver_sel, ver_want;
+
drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
- if (uc_fw->fallback.path) {
- drm_printf(p, "%s firmware fallback: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
- drm_printf(p, "fallback selected: %s\n",
- str_yes_no(uc_fw->path == uc_fw->fallback.path));
- }
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
+ if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
+ drm_printf(p, "%s firmware wanted: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver);
+ if (ver_sel < ver_want)
+ drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
+ uc_fw->file_wanted.major_ver,
+ uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.patch_ver,
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
+ else
+ drm_printf(p, "\tversion: found %u.%u.%u\n",
+ uc_fw->file_selected.major_ver,
+ uc_fw->file_selected.minor_ver,
+ uc_fw->file_selected.patch_ver);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 7aa2644400b9..cb586f7df270 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -65,6 +65,18 @@ enum intel_uc_fw_type {
#define INTEL_UC_FW_NUM_TYPES 2
/*
+ * The firmware build process will generate a version header file with major and
+ * minor version defined. The versions are built into CSS header of firmware.
+ * i915 kernel driver set the minimal firmware version required per platform.
+ */
+struct intel_uc_fw_file {
+ const char *path;
+ u16 major_ver;
+ u16 minor_ver;
+ u16 patch_ver;
+};
+
+/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the uC.
*/
@@ -74,11 +86,12 @@ struct intel_uc_fw {
const enum intel_uc_fw_status status;
enum intel_uc_fw_status __status; /* no accidental overwrites */
};
- const char *wanted_path;
- const char *path;
+ struct intel_uc_fw_file file_wanted;
+ struct intel_uc_fw_file file_selected;
bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
+
/**
* @dummy: A vma used in binding the uc fw to ggtt. We can't define this
* vma on the stack as it can lead to a stack overflow, so we define it
@@ -89,30 +102,18 @@ struct intel_uc_fw {
struct i915_vma_resource dummy;
struct i915_vma *rsa_data;
- /*
- * The firmware build process will generate a version header file with major and
- * minor version defined. The versions are built into CSS header of firmware.
- * i915 kernel driver set the minimal firmware version required per platform.
- */
- u16 major_ver_wanted;
- u16 minor_ver_wanted;
- u16 major_ver_found;
- u16 minor_ver_found;
-
- struct {
- const char *path;
- u16 major_ver;
- u16 minor_ver;
- } fallback;
-
u32 rsa_size;
u32 ucode_size;
-
u32 private_data_size;
bool loaded_via_gsc;
};
+#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
+#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
+ (uc)->fw.file_selected.minor_ver, \
+ (uc)->fw.file_selected.patch_ver))
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index b05e0e35b734..7a411178bdbf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -83,8 +83,10 @@ struct uc_css_header {
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
-#define HUC_GSC_VERSION_DW 44
-#define HUC_GSC_MAJOR_VER_MASK (0xFF << 0)
-#define HUC_GSC_MINOR_VER_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_HI_DW 44
+#define HUC_GSC_MAJOR_VER_HI_MASK (0xFF << 0)
+#define HUC_GSC_MINOR_VER_HI_MASK (0xFF << 16)
+#define HUC_GSC_VERSION_LO_DW 45
+#define HUC_GSC_PATCH_VER_LO_MASK (0xFF << 0)
#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 1df71d0796ae..e28518fe8b90 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -54,6 +54,9 @@ static int intel_guc_scrub_ctbs(void *arg)
struct intel_engine_cs *engine;
struct intel_context *ce;
+ if (!intel_has_gpu_reset(gt))
+ return 0;
+
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
@@ -62,7 +65,7 @@ static int intel_guc_scrub_ctbs(void *arg)
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- pr_err("Failed to create context, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create context, %d: %d\n", i, ret);
goto err;
}
@@ -83,7 +86,7 @@ static int intel_guc_scrub_ctbs(void *arg)
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to create request, %d: %d\n", i, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n", i, ret);
goto err;
}
@@ -93,7 +96,7 @@ static int intel_guc_scrub_ctbs(void *arg)
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err;
}
i915_request_put(last[i]);
@@ -110,7 +113,7 @@ static int intel_guc_scrub_ctbs(void *arg)
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err;
}
@@ -150,7 +153,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
- pr_err("Context array allocation failed\n");
+ drm_err(&gt->i915->drm, "Context array allocation failed\n");
return -ENOMEM;
}
@@ -164,24 +167,24 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
- pr_err("Failed to create spinner: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
- pr_err("Failed to create spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
- pr_err("Failed to add Spinner request: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
goto err_spin_rq;
}
@@ -191,7 +194,7 @@ static int intel_guc_steal_guc_ids(void *arg)
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index--]);
ce[context_index] = NULL;
- pr_err("Failed to create context: %d\n", ret);
+ drm_err(&gt->i915->drm, "Failed to create context: %d\n", ret);
goto err_spin_rq;
}
@@ -200,8 +203,8 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = PTR_ERR(rq);
rq = NULL;
if (ret != -EAGAIN) {
- pr_err("Failed to create request, %d: %d\n",
- context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to create request, %d: %d\n",
+ context_index, ret);
goto err_spin_rq;
}
} else {
@@ -215,7 +218,7 @@ static int intel_guc_steal_guc_ids(void *arg)
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
- pr_err("Spin request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Spin request failed to complete: %d\n", ret);
i915_request_put(last);
goto err_spin_rq;
}
@@ -227,7 +230,7 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
- pr_err("Last request failed to complete: %d\n", ret);
+ drm_err(&gt->i915->drm, "Last request failed to complete: %d\n", ret);
goto err_spin_rq;
}
@@ -235,7 +238,7 @@ static int intel_guc_steal_guc_ids(void *arg)
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed to steal guc_id, %d: %d\n", context_index, ret);
+ drm_err(&gt->i915->drm, "Failed to steal guc_id, %d: %d\n", context_index, ret);
goto err_spin_rq;
}
@@ -243,21 +246,20 @@ static int intel_guc_steal_guc_ids(void *arg)
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
- pr_err("Request with stolen guc_id failed to complete: %d\n",
- ret);
+ drm_err(&gt->i915->drm, "Request with stolen guc_id failed to complete: %d\n", ret);
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
- pr_err("No guc_id was stolen");
+ drm_err(&gt->i915->drm, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
new file mode 100644
index 000000000000..01f8cd3c3134
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+#define BEAT_INTERVAL 100
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_hang_guc(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ struct i915_gem_context *ctx;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine;
+ unsigned int reset_count;
+ u32 guc_status;
+ u32 old_beat;
+
+ ctx = kernel_context(gt->i915, NULL);
+ if (IS_ERR(ctx)) {
+ drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
+ return PTR_ERR(ctx);
+ }
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(gt->engine[BCS0]);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err;
+ }
+
+ engine = ce->engine;
+ reset_count = i915_reset_count(global);
+
+ old_beat = engine->props.heartbeat_interval_ms;
+ ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to boost heatbeat interval: %d\n", ret);
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to create spinner: %d\n", ret);
+ goto err;
+ }
+
+ rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to add Spinner request: %d\n", ret);
+ goto err_spin;
+ }
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "Failed to reset GuC, ret = %d\n", ret);
+ goto err_spin;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ if (!(guc_status & GS_MIA_IN_RESET)) {
+ i915_request_put(rq);
+ drm_err(&gt->i915->drm, "GuC failed to reset: status = 0x%08X\n", guc_status);
+ ret = -EIO;
+ goto err_spin;
+ }
+
+ /* Wait for the heartbeat to cause a reset */
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Request failed to complete: %d\n", ret);
+ goto err_spin;
+ }
+
+ if (i915_reset_count(global) == reset_count) {
+ drm_err(&gt->i915->drm, "Failed to record a GPU reset\n");
+ ret = -EINVAL;
+ goto err_spin;
+ }
+
+err_spin:
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ intel_engine_set_heartbeat(engine, old_beat);
+
+ if (ret == 0) {
+ rq = nop_request(engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ i915_request_put(rq);
+ if (ret) {
+ drm_err(&gt->i915->drm, "No-op failed to complete: %d\n", ret);
+ goto err;
+ }
+ }
+
+err:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ kernel_context_close(ctx);
+
+ return ret;
+}
+
+int intel_guc_hang_check(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_hang_guc),
+ };
+ struct intel_gt *gt = to_gt(i915);
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index 812220a43df8..d17982c36d25 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -115,30 +115,30 @@ static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
- pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
+ drm_err(&gt->i915->drm, "Failed creating contexts: %ld", PTR_ERR(parent));
return PTR_ERR(parent);
} else if (!parent) {
- pr_debug("Not enough engines in class: %d", class);
+ drm_dbg(&gt->i915->drm, "Not enough engines in class: %d", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
- pr_err("Failed creating requests: %d", ret);
+ drm_err(&gt->i915->drm, "Failed creating requests: %d", ret);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
- pr_err("Failed waiting on request: %d", ret);
+ drm_err(&gt->i915->drm, "Failed waiting on request: %d", ret);
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
- pr_err("GT failed to idle: %d\n", ret);
+ drm_err(&gt->i915->drm, "GT failed to idle: %d\n", ret);
}
out:
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..3b81a6d35a7b 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index dad3a6054335..eef3bba8a41b 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "gvt.h"
+#include "intel_pci_config.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
@@ -353,9 +354,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(pdev, 0);
+ pci_resource_len(pdev, GTTMMADR_BAR);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(pdev, 2);
+ pci_resource_len(pdev, GTT_APERTURE_BAR);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index a30ba2d7b7ba..1b509c1a1e33 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -32,9 +32,10 @@
*
*/
+#include "display/intel_gmbus_regs.h"
+#include "gvt.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "gvt.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beea5895e499..daac2050d77d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -498,7 +498,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
+ refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -529,7 +529,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
+ int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {0};
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c85bafe7539e..1c6e941c9666 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 94e5c29d2ee3..ae987e92251d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- intel_device_info_print_static(INTEL_INFO(i915), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
@@ -188,47 +187,47 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
- switch (vma->ggtt_view.type) {
- case I915_GGTT_VIEW_NORMAL:
+ switch (vma->gtt_view.type) {
+ case I915_GTT_VIEW_NORMAL:
seq_puts(m, ", normal");
break;
- case I915_GGTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_PARTIAL:
seq_printf(m, ", partial [%08llx+%x]",
- vma->ggtt_view.partial.offset << PAGE_SHIFT,
- vma->ggtt_view.partial.size << PAGE_SHIFT);
+ vma->gtt_view.partial.offset << PAGE_SHIFT,
+ vma->gtt_view.partial.size << PAGE_SHIFT);
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
seq_printf(m, ", rotated [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.rotated.plane[0].width,
- vma->ggtt_view.rotated.plane[0].height,
- vma->ggtt_view.rotated.plane[0].src_stride,
- vma->ggtt_view.rotated.plane[0].dst_stride,
- vma->ggtt_view.rotated.plane[0].offset,
- vma->ggtt_view.rotated.plane[1].width,
- vma->ggtt_view.rotated.plane[1].height,
- vma->ggtt_view.rotated.plane[1].src_stride,
- vma->ggtt_view.rotated.plane[1].dst_stride,
- vma->ggtt_view.rotated.plane[1].offset);
+ vma->gtt_view.rotated.plane[0].width,
+ vma->gtt_view.rotated.plane[0].height,
+ vma->gtt_view.rotated.plane[0].src_stride,
+ vma->gtt_view.rotated.plane[0].dst_stride,
+ vma->gtt_view.rotated.plane[0].offset,
+ vma->gtt_view.rotated.plane[1].width,
+ vma->gtt_view.rotated.plane[1].height,
+ vma->gtt_view.rotated.plane[1].src_stride,
+ vma->gtt_view.rotated.plane[1].dst_stride,
+ vma->gtt_view.rotated.plane[1].offset);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
seq_printf(m, ", remapped [(%ux%u, src_stride=%u, dst_stride=%u, offset=%u), (%ux%u, src_stride=%u, dst_stride=%u, offset=%u)]",
- vma->ggtt_view.remapped.plane[0].width,
- vma->ggtt_view.remapped.plane[0].height,
- vma->ggtt_view.remapped.plane[0].src_stride,
- vma->ggtt_view.remapped.plane[0].dst_stride,
- vma->ggtt_view.remapped.plane[0].offset,
- vma->ggtt_view.remapped.plane[1].width,
- vma->ggtt_view.remapped.plane[1].height,
- vma->ggtt_view.remapped.plane[1].src_stride,
- vma->ggtt_view.remapped.plane[1].dst_stride,
- vma->ggtt_view.remapped.plane[1].offset);
+ vma->gtt_view.remapped.plane[0].width,
+ vma->gtt_view.remapped.plane[0].height,
+ vma->gtt_view.remapped.plane[0].src_stride,
+ vma->gtt_view.remapped.plane[0].dst_stride,
+ vma->gtt_view.remapped.plane[0].offset,
+ vma->gtt_view.remapped.plane[1].width,
+ vma->gtt_view.remapped.plane[1].height,
+ vma->gtt_view.remapped.plane[1].src_stride,
+ vma->gtt_view.remapped.plane[1].dst_stride,
+ vma->gtt_view.remapped.plane[1].offset);
break;
default:
- MISSING_CASE(vma->ggtt_view.type);
+ MISSING_CASE(vma->gtt_view.type);
break;
}
}
@@ -411,7 +410,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
@@ -493,7 +492,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- str_enabled_disabled(!dev_priv->power_domains.init_wakeref));
+ str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index deb8a8b76965..c459eb362c47 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -61,6 +61,7 @@
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
+#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_create.h"
@@ -105,6 +106,12 @@ static const char irst_name[] = "INT3392";
static const struct drm_driver i915_drm_driver;
+static void i915_release_bridge_dev(struct drm_device *dev,
+ void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
@@ -115,7 +122,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "bridge device not found\n");
return -EIO;
}
- return 0;
+
+ return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
+ dev_priv->bridge_dev);
}
/* Allocate space for the MCH regs if needed, return nonzero on error */
@@ -252,8 +261,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->hotplug.dp_wq == NULL)
+ dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->display.hotplug.dp_wq == NULL)
goto out_free_wq;
return 0;
@@ -268,7 +277,7 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
- destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -302,8 +311,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(to_gt(i915), ALL_ENGINES);
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ __intel_gt_reset(gt, ALL_ENGINES);
+ }
}
/**
@@ -326,19 +340,19 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
- intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+ intel_uncore_mmio_debug_init_early(dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->audio.mutex);
- mutex_init(&dev_priv->wm.wm_mutex);
- mutex_init(&dev_priv->pps_mutex);
- mutex_init(&dev_priv->hdcp_comp_mutex);
+ mutex_init(&dev_priv->display.audio.mutex);
+ mutex_init(&dev_priv->display.wm.wm_mutex);
+ mutex_init(&dev_priv->display.pps.mutex);
+ mutex_init(&dev_priv->display.hdcp.comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -357,7 +371,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_root_gt_init_early(dev_priv);
+ ret = intel_root_gt_init_early(dev_priv);
+ if (ret < 0)
+ goto err_rootgt;
i915_drm_clients_init(&dev_priv->clients, dev_priv);
@@ -382,6 +398,7 @@ err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
i915_drm_clients_fini(&dev_priv->clients);
+err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -423,7 +440,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
@@ -432,17 +450,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
- if (ret)
- return ret;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_uncore_init_mmio(gt->uncore);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(&dev_priv->drm,
+ intel_uncore_fini_mmio,
+ gt->uncore);
+ if (ret)
+ return ret;
+ }
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_runtime_init(dev_priv);
- ret = intel_gt_init_mmio(to_gt(dev_priv));
- if (ret)
- goto err_uncore;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_gt_init_mmio(gt);
+ if (ret)
+ goto err_uncore;
+ }
/* As early as possible, scrub existing GPU state before clobbering */
sanitize_gpu(dev_priv);
@@ -451,8 +479,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
return ret;
}
@@ -464,8 +490,6 @@ err_uncore:
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
}
/**
@@ -715,6 +739,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct intel_gt *gt;
+ unsigned int i;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -734,7 +760,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
- intel_gt_driver_register(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_register(gt);
intel_display_driver_register(dev_priv);
@@ -753,6 +780,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
+
i915_switcheroo_unregister(dev_priv);
intel_unregister_dsm_handler();
@@ -762,7 +792,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
- intel_gt_driver_unregister(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_unregister(gt);
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -784,6 +815,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
struct drm_printer p = drm_debug_printer("i915 device info:");
+ struct intel_gt *gt;
+ unsigned int i;
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
@@ -793,10 +826,11 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
GRAPHICS_VER(dev_priv));
- intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print(INTEL_INFO(dev_priv),
+ RUNTIME_INFO(dev_priv), &p);
i915_print_iommu_status(dev_priv, &p);
- intel_gt_info_print(&to_gt(dev_priv)->info, &p);
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_info_print(&gt->info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -814,6 +848,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
+ struct intel_runtime_info *runtime;
struct drm_i915_private *i915;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
@@ -829,7 +864,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
- RUNTIME_INFO(i915)->device_id = pdev->device;
+
+ /* Initialize initial runtime info from static const data and pdev. */
+ runtime = RUNTIME_INFO(i915);
+ memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
+ runtime->device_id = pdev->device;
return i915;
}
@@ -948,7 +987,9 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -972,18 +1013,19 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t wakeref;
if (!dev_priv->do_release)
return;
- disable_rpm_wakeref_asserts(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
i915_gem_driver_release(dev_priv);
@@ -994,7 +1036,8 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_mmio_release(dev_priv);
- enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_put(rpm, wakeref);
+
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
@@ -1206,13 +1249,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
disable_rpm_wakeref_asserts(rpm);
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -1344,7 +1389,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -1398,9 +1444,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
drm_err(&dev_priv->drm,
"Resume prepare failed: %d, continuing anyway\n", ret);
- intel_uncore_resume_early(&dev_priv->uncore);
-
- intel_gt_check_and_clear_faults(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i) {
+ intel_uncore_resume_early(gt->uncore);
+ intel_gt_check_and_clear_faults(gt);
+ }
intel_display_power_resume_early(dev_priv);
@@ -1580,7 +1627,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1595,11 +1643,13 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_gt_runtime_suspend(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_suspend(gt);
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_display_power_suspend(dev_priv);
@@ -1663,7 +1713,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1683,7 +1734,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
- intel_uncore_runtime_resume(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_runtime_resume(gt->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1691,7 +1743,8 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_runtime_resume(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
/*
* On VLV/CHV display interrupts are part of the display
@@ -1703,7 +1756,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_hpd_poll_disable(dev_priv);
}
- intel_enable_ipc(dev_priv);
+ skl_watermark_ipc_update(dev_priv);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 086bbe8945d6..bdc81db76dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,20 +34,10 @@
#include <linux/pm_qos.h>
-#include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_cdclk.h"
#include "display/intel_display.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dmc.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_dsb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_frontbuffer.h"
-#include "display/intel_global_state.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_opregion.h"
+#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_lmem.h"
@@ -70,80 +60,24 @@
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
-#include "intel_pm_types.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
-struct dpll;
struct drm_i915_clock_gating_funcs;
struct drm_i915_gem_object;
struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_audio_funcs;
-struct intel_cdclk_config;
-struct intel_cdclk_funcs;
-struct intel_cdclk_state;
-struct intel_cdclk_vals;
-struct intel_color_funcs;
struct intel_connector;
-struct intel_crtc;
struct intel_dp;
-struct intel_dpll_funcs;
struct intel_encoder;
-struct intel_fbdev;
-struct intel_fdi_funcs;
-struct intel_gmbus;
-struct intel_hotplug_funcs;
-struct intel_initial_plane_config;
struct intel_limit;
-struct intel_overlay;
struct intel_overlay_error_state;
struct vlv_s0ix_state;
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
-struct i915_hotplug {
- struct delayed_work hotplug_work;
-
- const u32 *hpd, *pch_hpd;
-
- struct {
- unsigned long last_jiffies;
- int count;
- enum {
- HPD_ENABLED = 0,
- HPD_DISABLED = 1,
- HPD_MARK_DISABLED = 2
- } state;
- } stats[HPD_NUM_PINS];
- u32 event_bits;
- u32 retry_bits;
- struct delayed_work reenable_work;
-
- u32 long_port_mask;
- u32 short_port_mask;
- struct work_struct dig_port_work;
-
- struct work_struct poll_init_work;
- bool poll_enabled;
-
- unsigned int hpd_storm_threshold;
- /* Whether or not to count short HPD IRQs in HPD storms */
- u8 hpd_short_storm_enabled;
-
- /*
- * if we get a HPD irq from DP and a HPD irq from non-DP
- * the non-DP HPD could block the workqueue on a mode config
- * mutex getting, that userspace may have taken. However
- * userspace is waiting on the DP workqueue to run which is
- * blocked behind the non-DP one.
- */
- struct workqueue_struct *dp_wq;
-};
-
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -151,55 +85,9 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-struct sdvo_device_mapping {
- u8 initialized;
- u8 dvo_port;
- u8 slave_addr;
- u8 dvo_wiring;
- u8 i2c_pin;
- u8 ddc_pin;
-};
-
-/* functions used for watermark calcs for display. */
-struct drm_i915_wm_disp_funcs {
- /* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
- int (*compute_pipe_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_intermediate_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_global_watermarks)(struct intel_atomic_state *state);
-};
-
-struct drm_i915_display_funcs {
- /* Returns the active state of the crtc, and if the crtc is active,
- * fills out the pipe-config with the hw state. */
- bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_state *);
- void (*get_initial_plane_config)(struct intel_crtc *,
- struct intel_initial_plane_config *);
- void (*crtc_enable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*crtc_disable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*commit_modeset_enables)(struct intel_atomic_state *state);
-};
-
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-#define QUIRK_LVDS_SSC_DISABLE (1<<1)
-#define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_BACKLIGHT_PRESENT (1<<3)
-#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
-#define QUIRK_INCREASE_T12_DELAY (1<<6)
-#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
-#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -289,51 +177,8 @@ i915_fence_timeout(const struct drm_i915_private *i915)
return i915_fence_context_timeout(i915, U64_MAX);
}
-/* Amount of SAGV/QGV points, BSpec precisely defines this */
-#define I915_NUM_QGV_POINTS 8
-
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-/* Amount of PSF GV points, BSpec precisely defines this */
-#define I915_NUM_PSF_GV_POINTS 3
-
-struct intel_vbt_data {
- /* bdb version */
- u16 version;
-
- /* Feature bits */
- unsigned int int_tv_support:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int int_lvds_support:1;
- unsigned int display_clock_mode:1;
- unsigned int fdi_rx_polarity_inverted:1;
- int lvds_ssc_freq;
- enum drm_panel_orientation orientation;
-
- bool override_afc_startup;
- u8 override_afc_startup_val;
-
- int crt_ddc_pin;
-
- struct list_head display_devices;
- struct list_head bdb_blocks;
-
- struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
- struct sdvo_device_mapping sdvo_mappings[2];
-};
-
-struct i915_frontbuffer_tracking {
- spinlock_t lock;
-
- /*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
- */
- unsigned busy_bits;
- unsigned flip_bits;
-};
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -348,32 +193,11 @@ struct i915_selftest_stash {
struct ida mock_region_instances;
};
-/* intel_audio.c private */
-struct intel_audio_private {
- /* Display internal audio functions */
- const struct intel_audio_funcs *funcs;
-
- /* hda/i915 audio component */
- struct i915_audio_component *component;
- bool component_registered;
- /* mutex for audio/video sync */
- struct mutex mutex;
- int power_refcount;
- u32 freq_cntrl;
-
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
-
- /* necessary resource sharing with HDMI LPE audio driver. */
- struct {
- struct platform_device *platdev;
- int irq;
- } lpe;
-};
-
struct drm_i915_private {
struct drm_device drm;
+ struct intel_display display;
+
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -417,27 +241,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_dmc dmc;
-
- struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of where the gmbus and gpio blocks are located (either
- * on PCH or on SoC for platforms without PCH).
- */
- u32 gpio_mmio_base;
-
- /* MMIO base address for MIPI regs */
- u32 mipi_mmio_base;
-
- u32 pps_mmio_base;
-
- wait_queue_head_t gmbus_wait_queue;
-
struct pci_dev *bridge_dev;
struct rb_root uabi_engines;
@@ -461,48 +264,15 @@ struct drm_i915_private {
};
u32 pipestat_irq_mask[I915_MAX_PIPES];
- struct i915_hotplug hotplug;
- struct intel_fbc *fbc[I915_MAX_FBCS];
- struct intel_opregion opregion;
- struct intel_vbt_data vbt;
-
bool preserve_bios_swizzle;
- /* overlay */
- struct intel_overlay *overlay;
-
- /* backlight registers and fields in struct intel_panel */
- struct mutex backlight_lock;
-
- /* protects panel power sequencer state */
- struct mutex pps_mutex;
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
- unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- struct {
- /* The current hardware cdclk configuration */
- struct intel_cdclk_config hw;
-
- /* cdclk, divider, and ratio table from bspec */
- const struct intel_cdclk_vals *table;
-
- struct intel_global_obj obj;
- } cdclk;
-
- struct {
- /* The current hardware dbuf configuration */
- u8 enabled_slices;
-
- struct intel_global_obj obj;
- } dbuf;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -512,40 +282,14 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
- /* ordered wq for modesets */
- struct workqueue_struct *modeset_wq;
- /* unbound hipri wq for page flips/plane updates */
- struct workqueue_struct *flip_wq;
-
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* pm display functions */
- const struct drm_i915_wm_disp_funcs *wm_disp;
-
- /* irq display functions */
- const struct intel_hotplug_funcs *hotplug_funcs;
-
- /* fdi display functions */
- const struct intel_fdi_funcs *fdi_funcs;
-
- /* display pll funcs */
- const struct intel_dpll_funcs *dpll_funcs;
-
- /* Display functions */
- const struct drm_i915_display_funcs *display;
-
- /* Display internal color functions */
- const struct intel_color_funcs *color_funcs;
-
- /* Display CDCLK functions */
- const struct intel_cdclk_funcs *cdclk_funcs;
-
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
- unsigned long quirks;
+ unsigned long gem_quirks;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
@@ -554,34 +298,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- /**
- * dpll and cdclk state is protected by connection_mutex
- * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms plls
- * share registers.
- */
- struct {
- struct mutex lock;
-
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *mgr;
-
- struct {
- int nssc;
- int ssc;
- } ref_clks;
- } dpll;
-
struct list_head global_obj_list;
- struct i915_frontbuffer_tracking fb_tracking;
-
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -600,21 +318,8 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- struct i915_power_domains power_domains;
-
struct i915_gpu_error gpu_error;
- /* list of fbdev register on this device */
- struct intel_fbdev *fbdev;
- struct work_struct fbdev_suspend_work;
-
- struct drm_property *broadcast_rgb_property;
- struct drm_property *force_audio_property;
-
- u32 fdi_rx_config;
-
- /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
- u32 chv_phy_control;
/*
* Shadows for CHV DPLL_MD regs to keep the state
* checker somewhat working in the presence hardware
@@ -627,51 +332,6 @@ struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
- enum {
- I915_SAGV_UNKNOWN = 0,
- I915_SAGV_DISABLED,
- I915_SAGV_ENABLED,
- I915_SAGV_NOT_CONTROLLED
- } sagv_status;
-
- u32 sagv_block_time_us;
-
- struct {
- /*
- * Raw watermark latency values:
- * in 0.1us units for WM0,
- * in 0.5us units for WM1+.
- */
- /* primary */
- u16 pri_latency[5];
- /* sprite */
- u16 spr_latency[5];
- /* cursor */
- u16 cur_latency[5];
- /*
- * Raw watermark memory latency values
- * for SKL for all 8 levels
- * in 1us units.
- */
- u16 skl_latency[8];
-
- /* current hardware state */
- union {
- struct ilk_wm_values hw;
- struct vlv_wm_values vlv;
- struct g4x_wm_values g4x;
- };
-
- u8 max_level;
-
- /*
- * Should be held around atomic WM register writing; also
- * protects * intel_crtc->wm.active and
- * crtc_state->wm.need_postvbl_update.
- */
- struct mutex wm_mutex;
- } wm;
-
struct dram_info {
bool wm_lv_0_adjust_needed;
u8 num_channels;
@@ -689,18 +349,6 @@ struct drm_i915_private {
u8 num_psf_gv_points;
} dram_info;
- struct intel_bw_info {
- /* for each QGV point */
- unsigned int deratedbw[I915_NUM_QGV_POINTS];
- /* for each PSF GV point */
- unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- u8 num_planes;
- } max_bw[6];
-
- struct intel_global_obj bw_obj;
-
struct intel_runtime_pm runtime_pm;
struct i915_perf perf;
@@ -716,6 +364,9 @@ struct drm_i915_private {
struct kobject *sysfs_gt;
+ /* Quick lookup of media GT (current platforms only have one) */
+ struct intel_gt *media_gt;
+
struct {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
@@ -733,9 +384,6 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
- u8 window2_delay;
-
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -743,31 +391,16 @@ struct drm_i915_private {
bool irq_enabled;
- union {
- /* perform PHY state sanity checks? */
- bool chv_phy_assert[2];
-
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
- };
-
- bool ipc_enabled;
-
- struct intel_audio_private audio;
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 snps_phy_failed_calibration;
struct i915_pmu pmu;
struct i915_drm_clients clients;
- struct i915_hdcp_comp_master *hdcp_master;
- bool hdcp_comp_added;
-
- /* Mutex to protect the above hdcp component related values. */
- struct mutex hdcp_comp_mutex;
-
/* The TTM device structure. */
struct ttm_device bdev;
@@ -826,28 +459,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(engine__) && (engine__)->uabi_class == (class__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define I915_GTT_OFFSET_NONE ((u32)-1)
-
-/*
- * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
- * considered to be the frontbuffer for the given plane interface-wise. This
- * doesn't mean that the hw necessarily already scans it out, but that any
- * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
- *
- * We have one bit per pipe and per scanout plane type.
- */
-#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
- BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
- BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
-})
-#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
- BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
- GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
- INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
@@ -856,19 +467,19 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
-#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
- INTEL_INFO(i915)->graphics.rel)
+#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
+#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
+ RUNTIME_INFO(i915)->graphics.ip.rel)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \
- INTEL_INFO(i915)->media.rel)
+#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
+#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
+ RUNTIME_INFO(i915)->media.ip.rel)
#define IS_MEDIA_VER(i915, from, until) \
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
-#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver)
+#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver)
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
@@ -1210,7 +821,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
+#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
@@ -1218,7 +829,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
+ ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
})
#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
@@ -1249,13 +860,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
-#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0)
+#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
+#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+
+#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
@@ -1264,7 +877,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1272,7 +885,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
-#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
+#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -1302,9 +915,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
+
/*
* Platform has the dedicated compression control state for each lmem surfaces
* stored in lmem to support the 3D and media compression formats.
@@ -1313,7 +928,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1335,9 +950,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
+#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
@@ -1352,91 +967,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
-#define HAS_PERCTX_PREEMPT_CTRL(i915) \
- ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
-
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
+#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
-/* i915_gem.c */
-void i915_gem_init_early(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-
-static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
-{
- /*
- * A single pass should suffice to release all the freed objects (along
- * most call paths) , but be a little more paranoid in that freeing
- * the objects does take a little amount of time, during which the rcu
- * callbacks could have added new objects into the freed list, and
- * armed the work again.
- */
- while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
-}
-
-static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
-{
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in
- * general we have workers that are armed by RCU and then rearm
- * themselves in their callbacks. To be paranoid, we need to
- * drain the workqueue a second time after waiting for the RCU
- * grace period so that we catch work queued via RCU from the first
- * pass. As neither drain_workqueue() nor flush_workqueue() report
- * a result, we make an assumption that we only don't require more
- * than 3 passes to catch all _recursive_ RCU delayed work.
- *
- */
- int pass = 3;
- do {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- } while (--pass);
- drain_workqueue(i915->wq);
-}
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
- struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
- unsigned long flags);
-#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
-#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
-#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
-#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
-
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-
-int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-
-int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-void i915_gem_driver_register(struct drm_i915_private *i915);
-void i915_gem_driver_unregister(struct drm_i915_private *i915);
-void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
-void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-
-int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..2bdddb61ebd7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
&to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
+ list_for_each_entry_safe(obj, on,
+ &to_gt(i915)->lmem_userfault_list, userfault_link)
+ i915_gem_object_runtime_pm_release_mmap_offset(obj);
+
/*
* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -885,7 +889,7 @@ static void discard_ggtt_vma(struct i915_vma *vma)
struct i915_vma *
i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -896,7 +900,7 @@ i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
GEM_WARN_ON(!ww);
if (flags & PIN_MAPPABLE &&
- (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
+ (!view || view->type == I915_GTT_VIEW_NORMAL)) {
/*
* If the required space is larger than the available
* aperture, we will not able to find a slot for the
@@ -987,7 +991,7 @@ new_vma:
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view,
+ const struct i915_gtt_view *view,
u64 size, u64 alignment, u64 flags)
{
struct i915_gem_ww_ctx ww;
@@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -1085,14 +1089,50 @@ out:
return err;
}
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ }
+
+ drain_workqueue(i915->wq);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
- mkwrite_device_info(dev_priv)->page_sizes =
- I915_GTT_PAGE_SIZE_4K;
+ RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
ret = i915_gem_init_userptr(dev_priv);
if (ret)
@@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
@@ -1191,7 +1231,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
- i915_gem_drain_freed_objects(dev_priv);
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
@@ -1213,7 +1254,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
- spin_lock_init(&dev_priv->fb_tracking.lock);
+ spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 68d8d52bd541..a5cdf6662d01 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,12 +26,55 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/types.h>
#include <drm/drm_drv.h>
#include "i915_utils.h"
+struct drm_file;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_gem_ww_ctx;
+struct i915_gtt_view;
+struct i915_vma;
+
+void i915_gem_init_early(struct drm_i915_private *i915);
+void i915_gem_cleanup_early(struct drm_i915_private *i915);
+
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915);
+void i915_gem_drain_workqueue(struct drm_i915_private *i915);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
+
+void i915_gem_runtime_suspend(struct drm_i915_private *i915);
+
+int __must_check i915_gem_init(struct drm_i915_private *i915);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_remove(struct drm_i915_private *i915);
+void i915_gem_driver_release(struct drm_i915_private *i915);
+
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
+
+/* FIXME: All of the below belong somewhere else. */
#ifdef CONFIG_DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 6fd15b39570c..342c8ca6414e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -36,7 +36,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->overlay;
+ value = !!i915->display.overlay;
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 32e92651ef7c..9ea2fe34e7d3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -646,8 +646,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_print_static(&error->device_info, &p);
- intel_device_info_print_runtime(&error->runtime_info, &p);
+ intel_device_info_print(&error->device_info, &error->runtime_info, &p);
intel_driver_caps_print(&error->driver_caps, &p);
}
@@ -671,6 +670,18 @@ static void err_print_pciid(struct drm_i915_error_state_buf *m,
pdev->subsystem_device);
}
+static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
+ const char *name,
+ const struct intel_ctb_coredump *ctb)
+{
+ if (!ctb->size)
+ return;
+
+ err_printf(m, "GuC %s CTB: raw: 0x%08X, 0x%08X/%08X, cached: 0x%08X/%08X, desc = 0x%08X, buf = 0x%08X x 0x%08X\n",
+ name, ctb->raw_status, ctb->raw_head, ctb->raw_tail,
+ ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -678,7 +689,12 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
+ err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
+ err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
+ err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
+ err_print_guc_ctb(m, "Recv", error_uc->guc.ctb + 1);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_ctb);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -720,6 +736,8 @@ static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
int i;
err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
+ err_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
+ gt->clock_frequency, gt->clock_period_ns);
err_printf(m, "EIR: 0x%08x\n", gt->eir);
err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
@@ -851,7 +869,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->gt) {
bool print_guc_capture = false;
- if (error->gt->uc && error->gt->uc->is_guc_capture)
+ if (error->gt->uc && error->gt->uc->guc.is_guc_capture)
print_guc_capture = true;
err_print_gt_display(m, error->gt);
@@ -1004,9 +1022,12 @@ static void cleanup_params(struct i915_gpu_coredump *error)
static void cleanup_uc(struct intel_uc_coredump *uc)
{
- kfree(uc->guc_fw.path);
- kfree(uc->huc_fw.path);
- i915_vma_coredump_free(uc->guc_log);
+ kfree(uc->guc_fw.file_selected.path);
+ kfree(uc->huc_fw.file_selected.path);
+ kfree(uc->guc_fw.file_wanted.path);
+ kfree(uc->huc_fw.file_wanted.path);
+ i915_vma_coredump_free(uc->guc.vma_log);
+ i915_vma_coredump_free(uc->guc.vma_ctb);
kfree(uc);
}
@@ -1655,6 +1676,23 @@ gt_record_engines(struct intel_gt_coredump *gt,
}
}
+static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
+ const struct intel_guc_ct_buffer *ctb,
+ const void *blob_ptr, struct intel_guc *guc)
+{
+ if (!ctb || !ctb->desc)
+ return;
+
+ saved->raw_status = ctb->desc->status;
+ saved->raw_head = ctb->desc->head;
+ saved->raw_tail = ctb->desc->tail;
+ saved->head = ctb->head;
+ saved->tail = ctb->tail;
+ saved->size = ctb->size;
+ saved->desc_offset = ((void *)ctb->desc) - blob_ptr;
+ saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1669,14 +1707,26 @@ gt_record_uc(struct intel_gt_coredump *gt,
memcpy(&error_uc->guc_fw, &uc->guc.fw, sizeof(uc->guc.fw));
memcpy(&error_uc->huc_fw, &uc->huc.fw, sizeof(uc->huc.fw));
- /* Non-default firmware paths will be specified by the modparam.
- * As modparams are generally accesible from the userspace make
- * explicit copies of the firmware paths.
+ error_uc->guc_fw.file_selected.path = kstrdup(uc->guc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_selected.path = kstrdup(uc->huc.fw.file_selected.path, ALLOW_FAIL);
+ error_uc->guc_fw.file_wanted.path = kstrdup(uc->guc.fw.file_wanted.path, ALLOW_FAIL);
+ error_uc->huc_fw.file_wanted.path = kstrdup(uc->huc.fw.file_wanted.path, ALLOW_FAIL);
+
+ /*
+ * Save the GuC log and include a timestamp reference for converting the
+ * log times to system times (in conjunction with the error->boottime and
+ * gt->clock_frequency fields saved elsewhere).
*/
- error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
- error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
- error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
- "GuC log buffer", compress);
+ error_uc->guc.timestamp = intel_uncore_read(gt->_gt->uncore, GUCPMTIMESTAMP);
+ error_uc->guc.vma_log = create_vma_coredump(gt->_gt, uc->guc.log.vma,
+ "GuC log buffer", compress);
+ error_uc->guc.vma_ctb = create_vma_coredump(gt->_gt, uc->guc.ct.vma,
+ "GuC CT buffer", compress);
+ error_uc->guc.last_fence = uc->guc.ct.requests.last_fence;
+ gt_record_guc_ctb(error_uc->guc.ctb + 0, &uc->guc.ct.ctbs.send,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
+ uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
return error_uc;
}
@@ -1833,6 +1883,8 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
static void gt_record_info(struct intel_gt_coredump *gt)
{
memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info));
+ gt->clock_frequency = gt->_gt->clock_frequency;
+ gt->clock_period_ns = gt->_gt->clock_period_ns;
}
/*
@@ -2027,9 +2079,9 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
error->gt->uc = gt_record_uc(error->gt, compress);
if (error->gt->uc) {
if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
- error->gt->uc->is_guc_capture = true;
+ error->gt->uc->guc.is_guc_capture = true;
else
- GEM_BUG_ON(error->gt->uc->is_guc_capture);
+ GEM_BUG_ON(error->gt->uc->guc.is_guc_capture);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 55a143b92d10..efc75cc2ffdb 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -125,6 +125,15 @@ struct intel_engine_coredump {
struct intel_engine_coredump *next;
};
+struct intel_ctb_coredump {
+ u32 raw_head, head;
+ u32 raw_tail, tail;
+ u32 raw_status;
+ u32 desc_offset;
+ u32 cmds_offset;
+ u32 size;
+};
+
struct intel_gt_coredump {
const struct intel_gt *_gt;
bool awake;
@@ -150,6 +159,8 @@ struct intel_gt_coredump {
u32 gtt_cache;
u32 aux_err; /* gen12 */
u32 gam_done; /* gen12 */
+ u32 clock_frequency;
+ u32 clock_period_ns;
/* Display related */
u32 derrmr;
@@ -163,8 +174,14 @@ struct intel_gt_coredump {
struct intel_uc_coredump {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
- struct i915_vma_coredump *guc_log;
- bool is_guc_capture;
+ struct guc_info {
+ struct intel_ctb_coredump ctb[2];
+ struct i915_vma_coredump *vma_ctb;
+ struct i915_vma_coredump *vma_log;
+ u32 timestamp;
+ u16 last_fence;
+ bool is_guc_capture;
+ } guc;
} *uc;
struct intel_gt_coredump *next;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 73cebc6aa650..86a42d9e8041 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -65,7 +65,7 @@
/*
* Interrupt statistic for PMU. Increments the counter only if the
- * interrupt originated from the the GPU so interrupts from a device which
+ * interrupt originated from the GPU so interrupts from a device which
* shares the interrupt line are not accounted.
*/
static inline void pmu_irq_stats(struct drm_i915_private *i915,
@@ -185,7 +185,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
if (HAS_GMCH(dev_priv)) {
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
@@ -595,7 +595,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle)
+ if (!dev_priv->display.opregion.asle)
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
@@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work)
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1272,7 +1272,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
u32 enabled_irqs = 0;
for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
@@ -1304,12 +1304,12 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
@@ -1637,7 +1637,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (hotplug_trigger) {
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1841,7 +1841,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
pch_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1986,7 +1986,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
@@ -1998,7 +1998,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
@@ -2024,7 +2024,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
@@ -2036,7 +2036,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
@@ -2057,7 +2057,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
ilk_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2237,7 +2237,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
bxt_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2257,7 +2257,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2269,7 +2269,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2653,9 +2653,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
static u32
-gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -2669,10 +2669,10 @@ gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(gt->i915);
+ intel_opregion_asle_intr(i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -2736,11 +2736,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -2801,11 +2801,11 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -3313,8 +3313,8 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3383,8 +3383,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@ -3460,8 +3460,8 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
@@ -3538,8 +3538,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3578,8 +3578,8 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
if (DISPLAY_VER(dev_priv) >= 8)
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3636,8 +3636,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -4370,8 +4370,8 @@ HPD_FUNCS(ilk);
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->hotplug_funcs)
- i915->hotplug_funcs->hpd_irq_setup(i915);
+ if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
/**
@@ -4413,33 +4413,33 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display_irqs_enabled = false;
- dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
+ dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->hotplug_funcs = &i915_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG2(dev_priv))
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (HAS_PCH_DG1(dev_priv))
- dev_priv->hotplug_funcs = &dg1_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->hotplug_funcs = &gen11_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->hotplug_funcs = &bxt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->hotplug_funcs = &spt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
else
- dev_priv->hotplug_funcs = &ilk_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6fc475a5db61..d1e4d528cb17 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -29,6 +29,18 @@
#include "i915_params.h"
#include "i915_drv.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
#define i915_param_named(name, T, perm, desc) \
module_param_named(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index aacc10f2e73f..cd4487a1d3be 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,16 +26,22 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_sa_media.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
+#include "intel_pci_config.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
- .graphics.ver = (x), \
- .media.ver = (x), \
- .display.ver = (x)
+ .__runtime.graphics.ip.ver = (x), \
+ .__runtime.media.ip.ver = (x), \
+ .__runtime.display.ip.ver = (x)
+
+#define NO_DISPLAY .__runtime.pipe_mask = 0
#define I845_PIPE_OFFSETS \
.display.pipe_offsets = { \
@@ -159,16 +165,16 @@
/* Keep in gen based order, and chronological order within a gen */
#define GEN_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN_DEFAULT_REGIONS \
- .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -177,7 +183,7 @@
.has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -189,8 +195,8 @@
#define I845_FEATURES \
GEN(2), \
- .display.pipe_mask = BIT(PIPE_A), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
+ .__runtime.pipe_mask = BIT(PIPE_A), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -198,7 +204,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -221,22 +227,22 @@ static const struct intel_device_info i845g_info = {
static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN3_FEATURES \
GEN(3), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -266,7 +272,7 @@ static const struct intel_device_info i915gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -291,7 +297,7 @@ static const struct intel_device_info i945gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -323,12 +329,12 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -351,7 +357,7 @@ static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.has_overlay = 1,
.display.supports_tv = 1,
.hws_needs_physical = 1,
@@ -361,7 +367,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -369,18 +375,18 @@ static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.supports_tv = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
GEN(5), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -403,16 +409,16 @@ static const struct intel_device_info ilk_m_info = {
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
.has_rps = true,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN6_FEATURES \
GEN(6), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -420,8 +426,8 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6p = 1, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
@@ -460,11 +466,11 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -473,8 +479,8 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
@@ -515,9 +521,8 @@ static const struct intel_device_info ivb_m_gt2_info = {
static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
+ NO_DISPLAY,
.gt = 2,
- .display.pipe_mask = 0, /* legal, last one wins */
- .display.cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -525,8 +530,8 @@ static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
@@ -534,11 +539,11 @@ static const struct intel_device_info vlv_info = {
.display.has_gmch = 1,
.display.has_hotplug = 1,
.dma_mask_size = 40,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
- .ppgtt_size = 31,
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING,
+ .__runtime.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
@@ -549,8 +554,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -584,8 +589,8 @@ static const struct intel_device_info hsw_gt3_info = {
GEN(8), \
.has_logical_ring_contexts = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_64bit_reloc = 1
#define BDW_PLATFORM \
@@ -613,18 +618,18 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -632,8 +637,8 @@ static const struct intel_device_info chv_info = {
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.dma_mask_size = 39,
- .ppgtt_type = INTEL_PPGTT_FULL,
- .ppgtt_size = 32,
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL,
+ .__runtime.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -646,16 +651,16 @@ static const struct intel_device_info chv_info = {
};
#define GEN9_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_gt_uc = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
@@ -678,7 +683,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -697,29 +702,29 @@ static const struct intel_device_info skl_gt4_info = {
.is_lp = 1, \
.display.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .display.has_hdcp = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_hdcp = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_rc6 = 1, \
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
@@ -739,7 +744,7 @@ static const struct intel_device_info bxt_info = {
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
- .display.ver = 10,
+ .__runtime.display.ip.ver = 10,
.display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -761,7 +766,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -782,7 +787,7 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -801,15 +806,15 @@ static const struct intel_device_info cml_gt2_info = {
};
#define GEN11_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define GEN11_FEATURES \
GEN9_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
.display.abox_mask = BIT(0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -832,37 +837,37 @@ static const struct intel_device_info cml_gt2_info = {
ICL_COLORS, \
.display.dbuf.size = 2048, \
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
- .display.has_dsc = 1, \
+ .__runtime.has_dsc = 1, \
.has_coherent_ggtt = false, \
.has_logical_ring_elsq = 1
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
static const struct intel_device_info jsl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_JASPERLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
.display.abox_mask = GENMASK(2, 1), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -890,7 +895,7 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.display.has_modular_fia = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -898,17 +903,17 @@ static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
.display.abox_mask = BIT(0),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
#define DGFX_FEATURES \
- .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
+ .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
@@ -918,24 +923,24 @@ static const struct intel_device_info rkl_info = {
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 10,
+ .__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
/* Wa_16011227922 */
- .ppgtt_size = 47,
+ .__runtime.ppgtt_size = 47,
};
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.dma_mask_size = 39,
};
@@ -951,18 +956,18 @@ static const struct intel_device_info adl_s_info = {
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ddi = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.display.has_dp_mst = 1, \
.display.has_dsb = 1, \
- .display.has_dsc = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_dsc = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
.display.has_fpga_dbg = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_hotplug = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
- .display.ver = 13, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.display.ip.ver = 13, \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -985,28 +990,28 @@ static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .ppgtt_size = 48,
+ .__runtime.ppgtt_size = 48,
.dma_mask_size = 39,
};
#undef GEN
#define XE_HP_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .graphics.ver = 12, \
- .graphics.rel = 50, \
+ .__runtime.graphics.ip.ver = 12, \
+ .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
.has_3d_pipeline = 1, \
@@ -1022,12 +1027,12 @@ static const struct intel_device_info adl_p_info = {
.has_reset_engine = 1, \
.has_rps = 1, \
.has_runtime_pm = 1, \
- .ppgtt_size = 48, \
- .ppgtt_type = INTEL_PPGTT_FULL
+ .__runtime.ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL
#define XE_HPM_FEATURES \
- .media.ver = 12, \
- .media.rel = 50
+ .__runtime.media.ip.ver = 12, \
+ .__runtime.media.ip.rel = 50
__maybe_unused
static const struct intel_device_info xehpsdv_info = {
@@ -1035,11 +1040,11 @@ static const struct intel_device_info xehpsdv_info = {
XE_HPM_FEATURES,
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
- .display = { },
+ NO_DISPLAY,
.has_64k_pages = 1,
.needs_compact_pt = 1,
.has_media_ratio_mode = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
@@ -1052,8 +1057,8 @@ static const struct intel_device_info xehpsdv_info = {
XE_HP_FEATURES, \
XE_HPM_FEATURES, \
DGFX_FEATURES, \
- .graphics.rel = 55, \
- .media.rel = 55, \
+ .__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_4tile = 1, \
.has_64k_pages = 1, \
@@ -1061,7 +1066,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
.has_media_ratio_mode = 1, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
BIT(VCS0) | BIT(VCS2) | \
@@ -1070,15 +1075,16 @@ static const struct intel_device_info xehpsdv_info = {
static const struct intel_device_info dg2_info = {
DG2_FEATURES,
XE_LPD_FEATURES,
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
.require_force_probe = 1,
};
static const struct intel_device_info ats_m_info = {
DG2_FEATURES,
- .display = { 0 },
+ NO_DISPLAY,
.require_force_probe = 1,
+ .tuning_thread_rr_after_dep = 1,
};
#define XE_HPC_FEATURES \
@@ -1095,12 +1101,12 @@ static const struct intel_device_info pvc_info = {
XE_HPC_FEATURES,
XE_HPM_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 60,
- .media.rel = 60,
+ .__runtime.graphics.ip.rel = 60,
+ .__runtime.media.ip.rel = 60,
PLATFORM(INTEL_PONTEVECCHIO),
- .display = { 0 },
+ NO_DISPLAY,
.has_flat_ccs = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(BCS0) |
BIT(VCS0) |
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
@@ -1109,8 +1115,19 @@ static const struct intel_device_info pvc_info = {
#define XE_LPDP_FEATURES \
XE_LPD_FEATURES, \
- .display.ver = 14, \
- .display.has_cdclk_crawl = 1
+ .__runtime.display.ip.ver = 14, \
+ .display.has_cdclk_crawl = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
+
+static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ {
+ .type = GT_MEDIA,
+ .name = "Standalone Media GT",
+ .gsi_offset = MTL_MEDIA_GSI_BASE,
+ .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ },
+ {}
+};
__maybe_unused
static const struct intel_device_info mtl_info = {
@@ -1120,15 +1137,16 @@ static const struct intel_device_info mtl_info = {
* Real graphics IP version will be obtained from hardware GMD_ID
* register. Value provided here is just for sanity checking.
*/
- .graphics.ver = 12,
- .graphics.rel = 70,
- .media.ver = 13,
+ .__runtime.graphics.ip.ver = 12,
+ .__runtime.graphics.ip.rel = 70,
+ .__runtime.media.ip.ver = 13,
PLATFORM(INTEL_METEORLAKE),
.display.has_modular_fia = 1,
+ .extra_gt_list = xelpmp_extra_gt,
.has_flat_ccs = 0,
.has_snoop = 1,
- .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
};
@@ -1262,6 +1280,27 @@ static bool force_probe(u16 device_id, const char *devices)
return ret;
}
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
+{
+ if (!pci_resource_flags(pdev, bar))
+ return false;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET)
+ return false;
+
+ if (!pci_resource_len(pdev, bar))
+ return false;
+
+ return true;
+}
+
+static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info)
+{
+ int gttmmaddr_bar = intel_info->__runtime.graphics.ip.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
+
+ return i915_pci_resource_valid(pdev, gttmmaddr_bar);
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
@@ -1287,6 +1326,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ if (!intel_mmio_bar_valid(pdev, intel_info))
+ return -ENXIO;
+
/* Detect if we need to wait for other drivers early on */
if (intel_modeset_probe_defer(pdev))
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
index ee048c238174..8dfe19f9a775 100644
--- a/drivers/gpu/drm/i915/i915_pci.h
+++ b/drivers/gpu/drm/i915/i915_pci.h
@@ -6,7 +6,13 @@
#ifndef __I915_PCI_H__
#define __I915_PCI_H__
+#include <linux/types.h>
+
+struct pci_dev;
+
int i915_pci_register_driver(void);
void i915_pci_unregister_driver(void);
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar);
+
#endif /* __I915_PCI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f3c23fe9ad9c..0defbb43ceea 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1376,7 +1376,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct i915_perf *perf = stream->perf;
- BUG_ON(stream != perf->exclusive_stream);
+ if (WARN_ON(stream != perf->exclusive_stream))
+ return;
/*
* Unset exclusive_stream first, it will be checked while disabling
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3168d7007e10..1a9bd829fc7e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1125,8 +1125,12 @@
#define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */
#define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14)
#define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x)
+#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2)
+#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3)
#define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8)
#define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x)
+#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5)
+#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x)
#define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0)
#define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x)
@@ -1462,69 +1466,6 @@
#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
- * GPIO regs
- */
-#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
- 4 * (gpio))
-
-# define GPIO_CLOCK_DIR_MASK (1 << 0)
-# define GPIO_CLOCK_DIR_IN (0 << 1)
-# define GPIO_CLOCK_DIR_OUT (1 << 1)
-# define GPIO_CLOCK_VAL_MASK (1 << 2)
-# define GPIO_CLOCK_VAL_OUT (1 << 3)
-# define GPIO_CLOCK_VAL_IN (1 << 4)
-# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-# define GPIO_DATA_DIR_MASK (1 << 8)
-# define GPIO_DATA_DIR_IN (0 << 9)
-# define GPIO_DATA_DIR_OUT (1 << 9)
-# define GPIO_DATA_VAL_MASK (1 << 10)
-# define GPIO_DATA_VAL_OUT (1 << 11)
-# define GPIO_DATA_VAL_IN (1 << 12)
-# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-
-#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1 << 11)
-#define GMBUS_RATE_100KHZ (0 << 8)
-#define GMBUS_RATE_50KHZ (1 << 8)
-#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
-#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-
-#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1 << 31)
-#define GMBUS_SW_RDY (1 << 30)
-#define GMBUS_ENT (1 << 29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0 << 25)
-#define GMBUS_CYCLE_WAIT (1 << 25)
-#define GMBUS_CYCLE_INDEX (2 << 25)
-#define GMBUS_CYCLE_STOP (4 << 25)
-#define GMBUS_BYTE_COUNT_SHIFT 16
-#define GMBUS_BYTE_COUNT_MAX 256U
-#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
-#define GMBUS_SLAVE_INDEX_SHIFT 8
-#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1 << 0)
-#define GMBUS_SLAVE_WRITE (0 << 0)
-#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1 << 15)
-#define GMBUS_HW_WAIT_PHASE (1 << 14)
-#define GMBUS_STALL_TIMEOUT (1 << 13)
-#define GMBUS_INT (1 << 12)
-#define GMBUS_HW_RDY (1 << 11)
-#define GMBUS_SATOER (1 << 10)
-#define GMBUS_ACTIVE (1 << 9)
-#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
-#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
-#define GMBUS_NAK_EN (1 << 3)
-#define GMBUS_IDLE_EN (1 << 2)
-#define GMBUS_HW_WAIT_EN (1 << 1)
-#define GMBUS_HW_RDY_EN (1 << 0)
-#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1 << 31)
-
-/*
* Clock control & power management
*/
#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
@@ -1700,7 +1641,7 @@
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
+#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1857,14 +1798,14 @@
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
-#define PROCHOT_MASK REG_BIT(1)
-#define THERMAL_LIMIT_MASK REG_BIT(2)
-#define RATL_MASK REG_BIT(6)
-#define VR_THERMALERT_MASK REG_BIT(7)
-#define VR_TDC_MASK REG_BIT(8)
-#define POWER_LIMIT_4_MASK REG_BIT(9)
-#define POWER_LIMIT_1_MASK REG_BIT(11)
-#define POWER_LIMIT_2_MASK REG_BIT(12)
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -1916,6 +1857,13 @@
#define CLKGATE_DIS_PSL(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
/*
* Display engine regs
*/
@@ -2822,7 +2770,7 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \
+#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \
PPS_BASE + (reg) + \
(pps_idx) * 0x100)
@@ -2918,118 +2866,6 @@
#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
-#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
- _VLV_BLC_PWM_CTL2_B)
-
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
-#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
- _VLV_BLC_PWM_CTL_B)
-
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
-#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
- _VLV_BLC_HIST_CTL_B)
-
-/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
-#define BLM_PWM_ENABLE (1 << 31)
-#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
-#define BLM_PIPE_SELECT (1 << 29)
-#define BLM_PIPE_SELECT_IVB (3 << 29)
-#define BLM_PIPE_A (0 << 29)
-#define BLM_PIPE_B (1 << 29)
-#define BLM_PIPE_C (2 << 29) /* ivb + */
-#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
-#define BLM_TRANSCODER_B BLM_PIPE_B
-#define BLM_TRANSCODER_C BLM_PIPE_C
-#define BLM_TRANSCODER_EDP (3 << 29)
-#define BLM_PIPE(pipe) ((pipe) << 29)
-#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
-#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
-#define BLM_PHASE_IN_ENABLE (1 << 25)
-#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
-#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
-#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
-#define BLM_PHASE_IN_COUNT_SHIFT (8)
-#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
-#define BLM_PHASE_IN_INCR_SHIFT (0)
-#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
-/*
- * This is the number of cycles out of the backlight modulation cycle for which
- * the backlight is on.
- *
- * This field must be no greater than the number of cycles in the complete
- * backlight modulation cycle.
- */
-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
-#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
-#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define BLM_HISTOGRAM_ENABLE (1 << 31)
-
-/* New registers for PCH-split platforms. Safe where new bits show up, the
- * register layout machtes with gen4 BLC_PWM_CTL[12]. */
-#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
-#define BLC_PWM_CPU_CTL _MMIO(0x48254)
-
-#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
-
-/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
- * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
-#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
-#define BLM_PCH_PWM_ENABLE (1 << 31)
-#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
-#define BLM_PCH_POLARITY (1 << 29)
-#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_MODE_DATA (0 << 24)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_VBLANK (4 << 24)
-#define UTIL_PIN_MODE_VSYNC (5 << 24)
-#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
-#define UTIL_PIN_OUTPUT_DATA (1 << 23)
-#define UTIL_PIN_POLARITY (1 << 22)
-#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
-#define UTIL_PIN_INPUT_DATA (1 << 16)
-
-/* BXT backlight register definition. */
-#define _BXT_BLC_PWM_CTL1 0xC8250
-#define BXT_BLC_PWM_ENABLE (1 << 31)
-#define BXT_BLC_PWM_POLARITY (1 << 29)
-#define _BXT_BLC_PWM_FREQ1 0xC8254
-#define _BXT_BLC_PWM_DUTY1 0xC8258
-
-#define _BXT_BLC_PWM_CTL2 0xC8350
-#define _BXT_BLC_PWM_FREQ2 0xC8354
-#define _BXT_BLC_PWM_DUTY2 0xC8358
-
-#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
-#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
-#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
-
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
@@ -3619,6 +3455,34 @@
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210
+#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410
+#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610
+#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810
+
+#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_CTL, \
+ _DPB_AUX_CH_CTL, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_CTL, \
+ _XELPDP_USBC2_AUX_CH_CTL, \
+ _XELPDP_USBC3_AUX_CH_CTL, \
+ _XELPDP_USBC4_AUX_CH_CTL))
+
+#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214
+#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414
+#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614
+#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814
+
+#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_DATA1, \
+ _XELPDP_USBC2_AUX_CH_DATA1, \
+ _XELPDP_USBC3_AUX_CH_DATA1, \
+ _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4)
+
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
@@ -3631,6 +3495,8 @@
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19)
+#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18)
#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
@@ -5862,6 +5728,13 @@
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
+
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
@@ -5926,7 +5799,8 @@
_BW_BUDDY1_PAGE_MASK))
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -6718,10 +6592,10 @@
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
-#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
-#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
-#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
@@ -6937,265 +6811,6 @@ enum skl_power_gate {
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-/* HDCP Key Registers */
-#define HDCP_KEY_CONF _MMIO(0x66c00)
-#define HDCP_AKSV_SEND_TRIGGER BIT(31)
-#define HDCP_CLEAR_KEYS_TRIGGER BIT(30)
-#define HDCP_KEY_LOAD_TRIGGER BIT(8)
-#define HDCP_KEY_STATUS _MMIO(0x66c04)
-#define HDCP_FUSE_IN_PROGRESS BIT(7)
-#define HDCP_FUSE_ERROR BIT(6)
-#define HDCP_FUSE_DONE BIT(5)
-#define HDCP_KEY_LOAD_STATUS BIT(1)
-#define HDCP_KEY_LOAD_DONE BIT(0)
-#define HDCP_AKSV_LO _MMIO(0x66c10)
-#define HDCP_AKSV_HI _MMIO(0x66c14)
-
-/* HDCP Repeater Registers */
-#define HDCP_REP_CTL _MMIO(0x66d00)
-#define HDCP_TRANSA_REP_PRESENT BIT(31)
-#define HDCP_TRANSB_REP_PRESENT BIT(30)
-#define HDCP_TRANSC_REP_PRESENT BIT(29)
-#define HDCP_TRANSD_REP_PRESENT BIT(28)
-#define HDCP_DDIB_REP_PRESENT BIT(30)
-#define HDCP_DDIA_REP_PRESENT BIT(29)
-#define HDCP_DDIC_REP_PRESENT BIT(28)
-#define HDCP_DDID_REP_PRESENT BIT(27)
-#define HDCP_DDIF_REP_PRESENT BIT(26)
-#define HDCP_DDIE_REP_PRESENT BIT(25)
-#define HDCP_TRANSA_SHA1_M0 (1 << 20)
-#define HDCP_TRANSB_SHA1_M0 (2 << 20)
-#define HDCP_TRANSC_SHA1_M0 (3 << 20)
-#define HDCP_TRANSD_SHA1_M0 (4 << 20)
-#define HDCP_DDIB_SHA1_M0 (1 << 20)
-#define HDCP_DDIA_SHA1_M0 (2 << 20)
-#define HDCP_DDIC_SHA1_M0 (3 << 20)
-#define HDCP_DDID_SHA1_M0 (4 << 20)
-#define HDCP_DDIF_SHA1_M0 (5 << 20)
-#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
-#define HDCP_SHA1_BUSY BIT(16)
-#define HDCP_SHA1_READY BIT(17)
-#define HDCP_SHA1_COMPLETE BIT(18)
-#define HDCP_SHA1_V_MATCH BIT(19)
-#define HDCP_SHA1_TEXT_32 (1 << 1)
-#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
-#define HDCP_SHA1_TEXT_24 (4 << 1)
-#define HDCP_SHA1_TEXT_16 (5 << 1)
-#define HDCP_SHA1_TEXT_8 (6 << 1)
-#define HDCP_SHA1_TEXT_0 (7 << 1)
-#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
-#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
-#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
-#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
-#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
-#define HDCP_SHA_TEXT _MMIO(0x66d18)
-
-/* HDCP Auth Registers */
-#define _PORTA_HDCP_AUTHENC 0x66800
-#define _PORTB_HDCP_AUTHENC 0x66500
-#define _PORTC_HDCP_AUTHENC 0x66600
-#define _PORTD_HDCP_AUTHENC 0x66700
-#define _PORTE_HDCP_AUTHENC 0x66A00
-#define _PORTF_HDCP_AUTHENC 0x66900
-#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
- _PORTA_HDCP_AUTHENC, \
- _PORTB_HDCP_AUTHENC, \
- _PORTC_HDCP_AUTHENC, \
- _PORTD_HDCP_AUTHENC, \
- _PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + (x))
-#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
-#define _TRANSA_HDCP_CONF 0x66400
-#define _TRANSB_HDCP_CONF 0x66500
-#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
- _TRANSB_HDCP_CONF)
-#define HDCP_CONF(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_CONF(trans) : \
- PORT_HDCP_CONF(port))
-
-#define HDCP_CONF_CAPTURE_AN BIT(0)
-#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
-#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
-#define _TRANSA_HDCP_ANINIT 0x66404
-#define _TRANSB_HDCP_ANINIT 0x66504
-#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_ANINIT, \
- _TRANSB_HDCP_ANINIT)
-#define HDCP_ANINIT(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANINIT(trans) : \
- PORT_HDCP_ANINIT(port))
-
-#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
-#define _TRANSA_HDCP_ANLO 0x66408
-#define _TRANSB_HDCP_ANLO 0x66508
-#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
- _TRANSB_HDCP_ANLO)
-#define HDCP_ANLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANLO(trans) : \
- PORT_HDCP_ANLO(port))
-
-#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
-#define _TRANSA_HDCP_ANHI 0x6640C
-#define _TRANSB_HDCP_ANHI 0x6650C
-#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
- _TRANSB_HDCP_ANHI)
-#define HDCP_ANHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANHI(trans) : \
- PORT_HDCP_ANHI(port))
-
-#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
-#define _TRANSA_HDCP_BKSVLO 0x66410
-#define _TRANSB_HDCP_BKSVLO 0x66510
-#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVLO, \
- _TRANSB_HDCP_BKSVLO)
-#define HDCP_BKSVLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVLO(trans) : \
- PORT_HDCP_BKSVLO(port))
-
-#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
-#define _TRANSA_HDCP_BKSVHI 0x66414
-#define _TRANSB_HDCP_BKSVHI 0x66514
-#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVHI, \
- _TRANSB_HDCP_BKSVHI)
-#define HDCP_BKSVHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVHI(trans) : \
- PORT_HDCP_BKSVHI(port))
-
-#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
-#define _TRANSA_HDCP_RPRIME 0x66418
-#define _TRANSB_HDCP_RPRIME 0x66518
-#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_RPRIME, \
- _TRANSB_HDCP_RPRIME)
-#define HDCP_RPRIME(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_RPRIME(trans) : \
- PORT_HDCP_RPRIME(port))
-
-#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
-#define _TRANSA_HDCP_STATUS 0x6641C
-#define _TRANSB_HDCP_STATUS 0x6651C
-#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_STATUS, \
- _TRANSB_HDCP_STATUS)
-#define HDCP_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_STATUS(trans) : \
- PORT_HDCP_STATUS(port))
-
-#define HDCP_STATUS_STREAM_A_ENC BIT(31)
-#define HDCP_STATUS_STREAM_B_ENC BIT(30)
-#define HDCP_STATUS_STREAM_C_ENC BIT(29)
-#define HDCP_STATUS_STREAM_D_ENC BIT(28)
-#define HDCP_STATUS_AUTH BIT(21)
-#define HDCP_STATUS_ENC BIT(20)
-#define HDCP_STATUS_RI_MATCH BIT(19)
-#define HDCP_STATUS_R0_READY BIT(18)
-#define HDCP_STATUS_AN_READY BIT(17)
-#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
-
-/* HDCP2.2 Registers */
-#define _PORTA_HDCP2_BASE 0x66800
-#define _PORTB_HDCP2_BASE 0x66500
-#define _PORTC_HDCP2_BASE 0x66600
-#define _PORTD_HDCP2_BASE 0x66700
-#define _PORTE_HDCP2_BASE 0x66A00
-#define _PORTF_HDCP2_BASE 0x66900
-#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
- _PORTA_HDCP2_BASE, \
- _PORTB_HDCP2_BASE, \
- _PORTC_HDCP2_BASE, \
- _PORTD_HDCP2_BASE, \
- _PORTE_HDCP2_BASE, \
- _PORTF_HDCP2_BASE) + (x))
-
-#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
-#define _TRANSA_HDCP2_AUTH 0x66498
-#define _TRANSB_HDCP2_AUTH 0x66598
-#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
- _TRANSB_HDCP2_AUTH)
-#define AUTH_LINK_AUTHENTICATED BIT(31)
-#define AUTH_LINK_TYPE BIT(30)
-#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
-#define AUTH_CLR_KEYS BIT(18)
-#define HDCP2_AUTH(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH(trans) : \
- PORT_HDCP2_AUTH(port))
-
-#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
-#define _TRANSA_HDCP2_CTL 0x664B0
-#define _TRANSB_HDCP2_CTL 0x665B0
-#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
- _TRANSB_HDCP2_CTL)
-#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-#define HDCP2_CTL(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_CTL(trans) : \
- PORT_HDCP2_CTL(port))
-
-#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define _TRANSA_HDCP2_STATUS 0x664B4
-#define _TRANSB_HDCP2_STATUS 0x665B4
-#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STATUS, \
- _TRANSB_HDCP2_STATUS)
-#define LINK_TYPE_STATUS BIT(22)
-#define LINK_AUTH_STATUS BIT(21)
-#define LINK_ENCRYPTION_STATUS BIT(20)
-#define HDCP2_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STATUS(trans) : \
- PORT_HDCP2_STATUS(port))
-
-#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
-#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
-#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
-#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
-#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
- _PIPEA_HDCP2_STREAM_STATUS, \
- _PIPEB_HDCP2_STREAM_STATUS, \
- _PIPEC_HDCP2_STREAM_STATUS, \
- _PIPED_HDCP2_STREAM_STATUS))
-
-#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
-#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
-#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STREAM_STATUS, \
- _TRANSB_HDCP2_STREAM_STATUS)
-#define STREAM_ENCRYPTION_STATUS BIT(31)
-#define STREAM_TYPE_STATUS BIT(30)
-#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STREAM_STATUS(trans) : \
- PIPE_HDCP2_STREAM_STATUS(pipe))
-
-#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
-#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
-#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
- _PORTA_HDCP2_AUTH_STREAM, \
- _PORTB_HDCP2_AUTH_STREAM)
-#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
-#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
-#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_AUTH_STREAM, \
- _TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE BIT(31)
-#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH_STREAM(trans) : \
- PORT_HDCP2_AUTH_STREAM(port))
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -7503,16 +7118,16 @@ enum skl_power_gate {
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK (3 << 26)
-#define CDCLK_FREQ_450_432 (0 << 26)
-#define CDCLK_FREQ_540 (1 << 26)
-#define CDCLK_FREQ_337_308 (2 << 26)
-#define CDCLK_FREQ_675_617 (3 << 26)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
@@ -8367,6 +7982,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
#define DSC_VBR_ENABLE (1 << 19)
#define DSC_422_ENABLE (1 << 18)
#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
@@ -8717,4 +8333,27 @@ enum skl_power_gate {
#define GEN12_CULLBIT2 _MMIO(0x7030)
#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
+#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
+#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
+#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0)
+#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16)
+
+#define MTL_LATENCY_SAGV _MMIO(0x4578b)
+#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
+
+#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
+#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
+#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index ae984c66c48a..6fc0d1b89690 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -241,8 +241,6 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON(!fn);
-
__init_waitqueue_head(&fence->wait, name, key);
fence->fn = fn;
#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index a7c603bc1b01..619fc5a22f0c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -48,11 +48,15 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
do { \
static struct lock_class_key __key; \
\
+ BUILD_BUG_ON((fn) == NULL); \
__i915_sw_fence_init((fence), (fn), #fence, &__key); \
} while (0)
#else
#define i915_sw_fence_init(fence, fn) \
- __i915_sw_fence_init((fence), (fn), NULL, NULL)
+do { \
+ BUILD_BUG_ON((fn) == NULL); \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL); \
+} while (0)
#endif
void i915_sw_fence_reinit(struct i915_sw_fence *fence);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 427de1aaab36..e19452f0e100 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -173,6 +173,77 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
kfree(bman_res);
}
+static bool i915_ttm_buddy_man_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct drm_buddy *mm = &bman->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ GEM_BUG_ON(!place->lpfn);
+
+ /*
+ * If we just want something mappable then we can quickly check
+ * if the current victim resource is using any of the CPU
+ * visible portion.
+ */
+ if (!place->fpfn &&
+ place->lpfn == i915_ttm_buddy_man_visible_size(man))
+ return bman_res->used_visible_size > 0;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn && place->lpfn > fpfn)
+ return true;
+ }
+
+ return false;
+}
+
+static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct drm_buddy *mm = &bman->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ GEM_BUG_ON(!place->lpfn);
+
+ if (!place->fpfn &&
+ place->lpfn == i915_ttm_buddy_man_visible_size(man))
+ return bman_res->used_visible_size == res->num_pages;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn || lpfn > place->lpfn)
+ return false;
+ }
+
+ return true;
+}
+
static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
@@ -200,6 +271,8 @@ static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
.alloc = i915_ttm_buddy_man_alloc,
.free = i915_ttm_buddy_man_free,
+ .intersects = i915_ttm_buddy_man_intersects,
+ .compatible = i915_ttm_buddy_man_compatible,
.debug = i915_ttm_buddy_man_debug,
};
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c10d68cdc3ca..6c14d13364bf 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -360,10 +360,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 260371716490..f17c09ead7d7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -109,7 +109,7 @@ static void __i915_vma_retire(struct i915_active *ref)
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
@@ -141,9 +141,9 @@ vma_create(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&vma->obj_link);
RB_CLEAR_NODE(&vma->obj_node);
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- vma->ggtt_view = *view;
- if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ vma->gtt_view = *view;
+ if (view->type == I915_GTT_VIEW_PARTIAL) {
GEM_BUG_ON(range_overflows_t(u64,
view->partial.offset,
view->partial.size,
@@ -151,10 +151,10 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
GEM_BUG_ON(vma->size > obj->base.size);
- } else if (view->type == I915_GGTT_VIEW_ROTATED) {
+ } else if (view->type == I915_GTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
- } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
+ } else if (view->type == I915_GTT_VIEW_REMAPPED) {
vma->size = intel_remapped_info_size(&view->remapped);
vma->size <<= PAGE_SHIFT;
}
@@ -248,7 +248,7 @@ err_vma:
static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct rb_node *rb;
@@ -286,7 +286,7 @@ i915_vma_lookup(struct drm_i915_gem_object *obj,
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
@@ -1203,7 +1203,7 @@ err_st_alloc:
}
static noinline struct sg_table *
-intel_partial_pages(const struct i915_ggtt_view *view,
+intel_partial_pages(const struct i915_gtt_view *view,
struct drm_i915_gem_object *obj)
{
struct sg_table *st;
@@ -1247,33 +1247,33 @@ __i915_vma_get_pages(struct i915_vma *vma)
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
- switch (vma->ggtt_view.type) {
+ switch (vma->gtt_view.type) {
default:
- GEM_BUG_ON(vma->ggtt_view.type);
+ GEM_BUG_ON(vma->gtt_view.type);
fallthrough;
- case I915_GGTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_NORMAL:
pages = vma->obj->mm.pages;
break;
- case I915_GGTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_ROTATED:
pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
break;
- case I915_GGTT_VIEW_REMAPPED:
+ case I915_GTT_VIEW_REMAPPED:
pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
+ intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
break;
- case I915_GGTT_VIEW_PARTIAL:
- pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
+ case I915_GTT_VIEW_PARTIAL:
+ pages = intel_partial_pages(&vma->gtt_view, vma->obj);
break;
}
if (IS_ERR(pages)) {
drm_err(&vma->vm->i915->drm,
"Failed to get pages for VMA view type %u (%ld)!\n",
- vma->ggtt_view.type, PTR_ERR(pages));
+ vma->gtt_view.type, PTR_ERR(pages));
return PTR_ERR(pages);
}
@@ -1806,7 +1806,7 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
GEM_BUG_ON(!vma->obj->userfault_count);
node = &vma->mmo->vma_node;
- vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
@@ -1882,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
enum dma_resv_usage usage;
int idx;
- obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
+ obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 33a58f605d75..aecd9c64486b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -43,7 +43,7 @@
struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+ const struct i915_gtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags);
#define I915_VMA_RELEASE_MAP BIT(0)
@@ -160,7 +160,7 @@ static inline void i915_vma_put(struct i915_vma *vma)
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
ptrdiff_t cmp;
@@ -170,8 +170,8 @@ i915_vma_compare(struct i915_vma *vma,
if (cmp)
return cmp;
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
- cmp = vma->ggtt_view.type;
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL != 0);
+ cmp = vma->gtt_view.type;
if (!view)
return cmp;
@@ -181,7 +181,7 @@ i915_vma_compare(struct i915_vma *vma,
assert_i915_gem_gtt_types();
- /* ggtt_view.type also encodes its size so that we both distinguish
+ /* gtt_view.type also encodes its size so that we both distinguish
* different views using it as a "type" and also use a compact (no
* accessing of uninitialised padding bytes) memcmp without storing
* an extra parameter or adding more code.
@@ -191,14 +191,14 @@ i915_vma_compare(struct i915_vma *vma,
* we assert above that all branches have the same address, and that
* each branch has a unique type/size.
*/
- BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
- BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
- BUILD_BUG_ON(I915_GGTT_VIEW_ROTATED >= I915_GGTT_VIEW_REMAPPED);
+ BUILD_BUG_ON(I915_GTT_VIEW_NORMAL >= I915_GTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GTT_VIEW_PARTIAL >= I915_GTT_VIEW_ROTATED);
+ BUILD_BUG_ON(I915_GTT_VIEW_ROTATED >= I915_GTT_VIEW_REMAPPED);
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), partial));
BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
offsetof(typeof(*view), remapped));
- return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
+ return memcmp(&vma->gtt_view.partial, &view->partial, view->type);
}
struct i915_vma_work *i915_vma_work(void);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 5a67995ea5fe..de1342dbfa12 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -216,6 +216,10 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
/**
* i915_vma_resource_unbind - Unbind a vma resource
* @vma_res: The vma resource to unbind.
+ * @tlb: pointer to vma->obj->mm.tlb associated with the resource
+ * to be stored at vma_res->tlb. When not-NULL, it will be used
+ * to do TLB cache invalidation before freeing a VMA resource.
+ * Used only for async unbind.
*
* At this point this function does little more than publish a fence that
* signals immediately unless signaling is held back.
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index be6e028c3b57..ec0f6c9f57d0 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -67,30 +67,30 @@ enum i915_cache_level;
* Implementation and usage
*
* GGTT views are implemented using VMAs and are distinguished via enum
- * i915_ggtt_view_type and struct i915_ggtt_view.
+ * i915_gtt_view_type and struct i915_gtt_view.
*
* A new flavour of core GEM functions which work with GGTT bound objects were
* added with the _ggtt_ infix, and sometimes with _view postfix to avoid
- * renaming in large amounts of code. They take the struct i915_ggtt_view
+ * renaming in large amounts of code. They take the struct i915_gtt_view
* parameter encapsulating all metadata required to implement a view.
*
* As a helper for callers which are only interested in the normal view,
- * globally const i915_ggtt_view_normal singleton instance exists. All old core
+ * globally const i915_gtt_view_normal singleton instance exists. All old core
* GEM API functions, the ones not taking the view parameter, are operating on,
* or with the normal GGTT view.
*
* Code wanting to add or use a new GGTT view needs to:
*
* 1. Add a new enum with a suitable name.
- * 2. Extend the metadata in the i915_ggtt_view structure if required.
+ * 2. Extend the metadata in the i915_gtt_view structure if required.
* 3. Add support to i915_get_vma_pages().
*
* New views are required to build a scatter-gather table from within the
- * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
+ * i915_get_vma_pages function. This table is stored in the vma.gtt_view and
* exists for the lifetime of an VMA.
*
* Core API is designed to have copy semantics which means that passed in
- * struct i915_ggtt_view does not need to be persistent (left around after
+ * struct i915_gtt_view does not need to be persistent (left around after
* calling the core API functions).
*
*/
@@ -130,11 +130,11 @@ struct intel_partial_info {
unsigned int size;
} __packed;
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
- I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
+enum i915_gtt_view_type {
+ I915_GTT_VIEW_NORMAL = 0,
+ I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
+ I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
};
static inline void assert_i915_gem_gtt_types(void)
@@ -152,18 +152,18 @@ static inline void assert_i915_gem_gtt_types(void)
/* As we encode the size of each branch inside the union into its type,
* we have to be careful that each branch has a unique size.
*/
- switch ((enum i915_ggtt_view_type)0) {
- case I915_GGTT_VIEW_NORMAL:
- case I915_GGTT_VIEW_PARTIAL:
- case I915_GGTT_VIEW_ROTATED:
- case I915_GGTT_VIEW_REMAPPED:
+ switch ((enum i915_gtt_view_type)0) {
+ case I915_GTT_VIEW_NORMAL:
+ case I915_GTT_VIEW_PARTIAL:
+ case I915_GTT_VIEW_ROTATED:
+ case I915_GTT_VIEW_REMAPPED:
/* gcc complains if these are identical cases */
break;
}
}
-struct i915_ggtt_view {
- enum i915_ggtt_view_type type;
+struct i915_gtt_view {
+ enum i915_gtt_view_type type;
union {
/* Members need to contain no holes/padding */
struct intel_partial_info partial;
@@ -280,11 +280,11 @@ struct i915_vma {
/**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
- * i915_ggtt_view_type is used to distinguish between those entries.
- * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
+ * i915_gtt_view_type is used to distinguish between those entries.
+ * The default one of zero (I915_GTT_VIEW_NORMAL) is default and also
* assumed in GEM functions which take no ggtt view parameter.
*/
- struct i915_ggtt_view ggtt_view;
+ struct i915_gtt_view gtt_view;
/** This object's place on the active/inactive lists */
struct list_head vm_link;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d98fbbd589aa..20575eb77ea7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -88,46 +88,57 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p)
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p)
{
- if (info->graphics.rel)
- drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver,
- info->graphics.rel);
+ if (runtime->graphics.ip.rel)
+ drm_printf(p, "graphics version: %u.%02u\n",
+ runtime->graphics.ip.ver,
+ runtime->graphics.ip.rel);
else
- drm_printf(p, "graphics version: %u\n", info->graphics.ver);
+ drm_printf(p, "graphics version: %u\n",
+ runtime->graphics.ip.ver);
- if (info->media.rel)
- drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel);
+ if (runtime->media.ip.rel)
+ drm_printf(p, "media version: %u.%02u\n",
+ runtime->media.ip.ver,
+ runtime->media.ip.rel);
else
- drm_printf(p, "media version: %u\n", info->media.ver);
+ drm_printf(p, "media version: %u\n",
+ runtime->media.ip.ver);
- if (info->display.rel)
- drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel);
+ if (runtime->display.ip.rel)
+ drm_printf(p, "display version: %u.%02u\n",
+ runtime->display.ip.ver,
+ runtime->display.ip.rel);
else
- drm_printf(p, "display version: %u\n", info->display.ver);
+ drm_printf(p, "display version: %u\n",
+ runtime->display.ip.ver);
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", info->memory_regions);
- drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
- drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
- drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+ drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+ drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-}
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
-{
- drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
+ drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
+ drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
+ drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+
+ drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
#undef INTEL_VGA_DEVICE
@@ -364,55 +375,55 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
- info->display.pipe_mask = 0;
- info->display.cpu_transcoder_mask = 0;
- info->display.fbc_mask = 0;
+ runtime->pipe_mask = 0;
+ runtime->cpu_transcoder_mask = 0;
+ runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_A);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
- info->display.fbc_mask &= ~BIT(INTEL_FBC_A);
+ runtime->pipe_mask &= ~BIT(PIPE_A);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
}
if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_B);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ runtime->pipe_mask &= ~BIT(PIPE_B);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
}
if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
- info->display.pipe_mask &= ~BIT(PIPE_D);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ runtime->pipe_mask &= ~BIT(PIPE_D);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
}
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
- info->display.has_hdcp = 0;
+ runtime->has_hdcp = 0;
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
- info->display.fbc_mask = 0;
+ runtime->fbc_mask = 0;
if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- info->display.has_dmc = 0;
+ runtime->has_dmc = 0;
if (DISPLAY_VER(dev_priv) >= 10 &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
- info->display.has_dsc = 0;
+ runtime->has_dsc = 0;
}
if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
- info->ppgtt_type = INTEL_PPGTT_NONE;
+ runtime->ppgtt_type = INTEL_PPGTT_NONE;
}
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
@@ -422,8 +433,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
memset(&info->display, 0, sizeof(info->display));
+
+ runtime->cpu_transcoder_mask = 0;
memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
+ runtime->fbc_mask = 0;
+ runtime->has_hdcp = false;
+ runtime->has_dmc = false;
+ runtime->has_dsc = false;
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 23bf230aa104..d638235e1d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -37,6 +37,7 @@
struct drm_printer;
struct drm_i915_private;
+struct intel_gt_definition;
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
@@ -164,7 +165,6 @@ enum intel_ppgtt_type {
func(has_media_ratio_mode); \
func(has_mslice_steering); \
func(has_one_eu_per_fuse_bit); \
- func(has_pooled_eu); \
func(has_pxp); \
func(has_rc6); \
func(has_rc6p); \
@@ -172,6 +172,7 @@ enum intel_ppgtt_type {
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
+ func(tuning_thread_rr_after_dep); \
func(unfenced_needs_alignment); \
func(hws_needs_physical);
@@ -179,14 +180,11 @@ enum intel_ppgtt_type {
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
func(has_cdclk_crawl); \
- func(has_dmc); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
- func(has_dsc); \
func(has_fpga_dbg); \
func(has_gmch); \
- func(has_hdcp); \
func(has_hotplug); \
func(has_hti); \
func(has_ipc); \
@@ -202,23 +200,67 @@ struct ip_version {
u8 rel;
};
-struct intel_device_info {
- struct ip_version graphics;
- struct ip_version media;
+struct intel_runtime_info {
+ struct {
+ struct ip_version ip;
+ } graphics;
+ struct {
+ struct ip_version ip;
+ } media;
+ struct {
+ struct ip_version ip;
+ } display;
+
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * single runtime conditionals, and also to provide groundwork for
+ * future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
+ u16 device_id;
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
- enum intel_platform platform;
+ u32 rawclk_freq;
- unsigned int dma_mask_size; /* available DMA address bits */
+ struct intel_step_info step;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
- unsigned int page_sizes; /* page sizes supported by the HW */
-
u32 memory_regions; /* regions supported by the HW */
+ bool has_pooled_eu;
+
+ /* display */
+ struct {
+ u8 pipe_mask;
+ u8 cpu_transcoder_mask;
+
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ u8 fbc_mask;
+
+ bool has_hdcp;
+ bool has_dmc;
+ bool has_dsc;
+ };
+};
+
+struct intel_device_info {
+ enum intel_platform platform;
+
+ unsigned int dma_mask_size; /* available DMA address bits */
+
+ const struct intel_gt_definition *extra_gt_list;
+
u8 gt; /* GT number, 0 if undefined */
#define DEFINE_FLAG(name) u8 name:1
@@ -226,12 +268,6 @@ struct intel_device_info {
#undef DEFINE_FLAG
struct {
- u8 ver;
- u8 rel;
-
- u8 pipe_mask;
- u8 cpu_transcoder_mask;
- u8 fbc_mask;
u8 abox_mask;
struct {
@@ -258,27 +294,11 @@ struct intel_device_info {
u32 gamma_lut_tests;
} color;
} display;
-};
-struct intel_runtime_info {
/*
- * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
- * into single runtime conditionals, and also to provide groundwork
- * for future per platform, or per SKU build optimizations.
- *
- * Array can be extended when necessary if the corresponding
- * BUILD_BUG_ON is hit.
+ * Initial runtime info. Do not access outside of i915_driver_create().
*/
- u32 platform_mask[2];
-
- u16 device_id;
-
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- u32 rawclk_freq;
-
- struct intel_step_info step;
+ const struct intel_runtime_info __runtime;
};
struct intel_driver_caps {
@@ -291,10 +311,9 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p);
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 437447119770..2403ccd52c74 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -466,6 +466,43 @@ static int gen12_get_dram_info(struct drm_i915_private *i915)
return icl_pcode_read_mem_global_info(i915);
}
+static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+{
+ u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
+ struct dram_info *dram_info = &i915->dram_info;
+
+ val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val);
+ switch (val) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val);
+ return -EINVAL;
+ }
+
+ dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
+ dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
+ /* PSF GV points not supported in D14+ */
+
+ return 0;
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
@@ -480,7 +517,9 @@ void intel_dram_detect(struct drm_i915_private *i915)
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
- if (GRAPHICS_VER(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 14)
+ ret = xelpdp_get_dram_info(i915);
+ else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 157e166672d7..8279dc580a3e 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -4,6 +4,7 @@
*/
#include "display/intel_audio_regs.h"
+#include "display/intel_backlight_regs.h"
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -1076,7 +1077,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
index 2aad2f0cc8db..ffc702b79579 100644
--- a/drivers/gpu/drm/i915/intel_mchbar_regs.h
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -196,6 +196,9 @@
#define RP1_CAP_MASK REG_GENMASK(15, 8)
#define RPN_CAP_MASK REG_GENMASK(23, 16)
+#define GEN10_FREQ_INFO_REC _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
+#define RPE_MASK REG_GENMASK(15, 8)
+
/* snb MCH registers for priority tuning */
#define MCH_SSKPD _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define SSKPD_NEW_WM0_MASK_HSW REG_GENMASK64(63, 56)
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 0fec25be146a..ba9843cb1b13 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -138,6 +138,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
+ case INTEL_PCH_MTP_DEVICE_ID_TYPE:
+ case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
+ return PCH_MTP;
default:
return PCH_NONE;
}
@@ -166,7 +171,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
+ else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 7c8ce9781d1a..32aff5a70d04 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -25,6 +25,7 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
+ PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
@@ -57,12 +58,15 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
+#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h
index 12cd9d4f23de..4977a524ce6f 100644
--- a/drivers/gpu/drm/i915/intel_pci_config.h
+++ b/drivers/gpu/drm/i915/intel_pci_config.h
@@ -6,6 +6,13 @@
#ifndef __INTEL_PCI_CONFIG_H__
#define __INTEL_PCI_CONFIG_H__
+/* PCI BARs */
+#define GTTMMADR_BAR 0
+#define GEN2_GTTMMADR_BAR 1
+#define GFXMEM_BAR 2
+#define GTT_APERTURE_BAR GFXMEM_BAR
+#define GEN12_LMEM_BAR GFXMEM_BAR
+
/* BSM in include/drm/i915_drm.h */
#define MCHBAR_I915 0x44
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f06babdb3a8c..8f86f56e7ca4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,61 +25,22 @@
*
*/
-#include <linux/module.h>
-#include <linux/string_helpers.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
-
-#include "display/intel_atomic.h"
-#include "display/intel_atomic_plane.h"
-#include "display/intel_bw.h"
#include "display/intel_de.h"
#include "display/intel_display_trace.h"
-#include "display/intel_display_types.h"
-#include "display/intel_fb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_sprite.h"
-#include "display/skl_universal_plane.h"
+#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
-#include "gt/intel_llc.h"
#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_irq.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
-#include "../../../platform/x86/intel_ips.h"
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv);
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -469,13 +430,13 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
ret = _intel_set_memory_cxsr(dev_priv, enable);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->wm.vlv.cxsr = enable;
+ dev_priv->display.wm.vlv.cxsr = enable;
else if (IS_G4X(dev_priv))
- dev_priv->wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
return ret;
}
@@ -835,11 +796,11 @@ static bool is_enabling(int old, int new, int threshold)
static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
{
- return dev_priv->wm.max_level + 1;
+ return dev_priv->display.wm.max_level + 1;
}
-static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1094,11 +1055,11 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+ dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1151,7 +1112,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1325,7 +1286,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (level > dev_priv->wm.max_level)
+ if (level > dev_priv->display.wm.max_level)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1584,7 +1545,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
struct g4x_wm_values new_wm = {};
g4x_merge_wm(dev_priv, &new_wm);
@@ -1610,10 +1571,10 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
@@ -1626,10 +1587,10 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1651,15 +1612,15 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
}
@@ -1673,7 +1634,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->wm.pri_latency[level] == 0)
+ if (dev_priv->display.wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1694,7 +1655,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->wm.pri_latency[level] * 10);
+ dev_priv->display.wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -2159,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->wm.max_level;
+ wm->level = dev_priv->display.wm.max_level;
wm->cxsr = true;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2198,7 +2159,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
struct vlv_wm_values new_wm = {};
vlv_merge_wm(dev_priv, &new_wm);
@@ -2236,10 +2197,10 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
@@ -2252,10 +2213,10 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void i965_update_wm(struct drm_i915_private *dev_priv)
@@ -2836,9 +2797,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->wm.pri_latency[level];
- u16 spr_latency = dev_priv->wm.spr_latency[level];
- u16 cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2862,119 +2823,43 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[])
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- u32 val;
- int ret, i;
- int level, max_level = ilk_wm_max_level(dev_priv);
- int mult = IS_DG2(dev_priv) ? 2 : 1;
+ u64 sskpd;
- /* read the first set of memory latencies[0:3] */
- val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
-
- wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
-
- /* read the second set of memory latencies[4:7] */
- val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
-
- wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
- /*
- * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
- * need to be disabled. We make sure to sanitize the values out
- * of the punit to satisfy this requirement.
- */
- for (level = 1; level <= max_level; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
- wm[i] = 0;
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
- max_level = level - 1;
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
- break;
- }
- }
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
- /*
- * WaWmMemoryReadLatency
- *
- * punit doesn't take into account the read latency so we need
- * to add proper adjustement to each valid level we retrieve
- * from the punit when level 0 response data is 0us.
- */
- if (wm[0] == 0) {
- u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
- for (level = 0; level <= max_level; level++)
- wm[level] += adjust;
- }
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
- /*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
- * If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
- * to avoid any underrun.
- */
- if (dev_priv->dram_info.wm_lv_0_adjust_needed)
- wm[0] += 1;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 6) {
- u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
- } else {
- MISSING_CASE(INTEL_DEVID(dev_priv));
- }
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
@@ -3008,9 +2893,8 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
return 2;
}
-static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name,
- const u16 wm[])
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -3062,18 +2946,18 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
if (!changed)
return;
drm_dbg_kms(&dev_priv->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
@@ -3089,37 +2973,42 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->wm.pri_latency[3] == 0 &&
- dev_priv->wm.spr_latency[3] == 0 &&
- dev_priv->wm.cur_latency[3] == 0)
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
return;
- dev_priv->wm.pri_latency[3] = 0;
- dev_priv->wm.spr_latency[3] = 0;
- dev_priv->wm.cur_latency[3] = 0;
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
drm_dbg_kms(&dev_priv->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
- memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
if (DISPLAY_VER(dev_priv) == 6) {
snb_wm_latency_quirk(dev_priv);
@@ -3127,12 +3016,6 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
-}
-
static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
@@ -3387,7 +3270,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
- return dev_priv->wm.pri_latency[level];
+ return dev_priv->display.wm.pri_latency[level];
}
static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
@@ -3539,7 +3422,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
@@ -3573,7 +3456,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
unsigned int dirty;
u32 val;
@@ -3635,7 +3518,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->wm.hw = *results;
+ dev_priv->display.wm.hw = *results;
}
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
@@ -3643,2765 +3526,6 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
-{
- u8 enabled_slices = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(dev_priv, slice) {
- if (intel_uncore_read(&dev_priv->uncore,
- DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
- enabled_slices |= BIT(slice);
- }
-
- return enabled_slices;
-}
-
-/*
- * FIXME: We still don't have the proper code detect if we need to apply the WA,
- * so assume we'll always need it in order to avoid underruns.
- */
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) == 9;
-}
-
-static bool
-intel_has_sagv(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
-}
-
-static u32
-intel_sagv_block_time(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 12) {
- u32 val = 0;
- int ret;
-
- ret = snb_pcode_read(&dev_priv->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
- return 0;
- }
-
- return val;
- } else if (DISPLAY_VER(dev_priv) == 11) {
- return 10;
- } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
- return 30;
- } else {
- return 0;
- }
-}
-
-static void intel_sagv_init(struct drm_i915_private *i915)
-{
- if (!intel_has_sagv(i915))
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
-
- /*
- * Probe to see if we have working SAGV control.
- * For icl+ this was already determined by intel_bw_init_hw().
- */
- if (DISPLAY_VER(i915) < 11)
- skl_sagv_disable(i915);
-
- drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
-
- i915->sagv_block_time_us = intel_sagv_block_time(i915);
-
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
-
- /* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
- "Excessive SAGV block time %u, ignoring\n",
- i915->sagv_block_time_us))
- i915->sagv_block_time_us = 0;
-
- if (!intel_has_sagv(i915))
- i915->sagv_block_time_us = 0;
-}
-
-/*
- * SAGV dynamically adjusts the system agent voltage and clock frequencies
- * depending on power and performance requirements. The display engine access
- * to system memory is blocked during the adjustment time. Because of the
- * blocking time, having this enabled can cause full system hangs and/or pipe
- * underruns if we don't meet all of the following requirements:
- *
- * - <= 1 pipe enabled
- * - All planes can enable watermarks for latencies >= SAGV engine block time
- * - We're not using an interlaced display configuration
- */
-static void skl_sagv_enable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_ENABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
-
- /* We don't need to wait for SAGV when enabling */
-
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_ENABLED;
-}
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_DISABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
- /* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_DISABLED;
-}
-
-static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
-}
-
-static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
-}
-
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_pre_plane_update(state);
- else
- skl_sagv_pre_plane_update(state);
-}
-
-void intel_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_post_plane_update(state);
- else
- skl_sagv_post_plane_update(state);
-}
-
-static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- int max_level = INT_MAX;
-
- if (!intel_has_sagv(dev_priv))
- return false;
-
- if (!crtc_state->hw.active)
- return true;
-
- if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
- return false;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
- int level;
-
- /* Skip this plane if it's not enabled */
- if (!wm->wm[0].enable)
- continue;
-
- /* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev_priv);
- !wm->wm[level].enable; --level)
- { }
-
- /* Highest common enabled wm level for all planes */
- max_level = min(level, max_level);
- }
-
- /* No enabled planes? */
- if (max_level == INT_MAX)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- /*
- * All enabled planes must have enabled a common wm level that
- * can tolerate memory latencies higher than sagv_block_time_us
- */
- if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
- return false;
- }
-
- return true;
-}
-
-static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- if (!crtc_state->hw.active)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (wm->wm[0].enable && !wm->sagv.wm0.enable)
- return false;
- }
-
- return true;
-}
-
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(dev_priv) >= 12)
- return tgl_crtc_can_enable_sagv(crtc_state);
- else
- return skl_crtc_can_enable_sagv(crtc_state);
-}
-
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(dev_priv) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
- DISPLAY_VER(dev_priv) >= 12 &&
- intel_can_enable_sagv(dev_priv, new_bw_state);
- }
-
- return 0;
-}
-
-static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
- u16 start, u16 end)
-{
- entry->start = start;
- entry->end = end;
-
- return end;
-}
-
-static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
-{
- return INTEL_INFO(dev_priv)->display.dbuf.size /
- hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
-}
-
-static void
-skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
- struct skl_ddb_entry *ddb)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
-
- if (!slice_mask) {
- ddb->start = 0;
- ddb->end = 0;
- return;
- }
-
- ddb->start = (ffs(slice_mask) - 1) * slice_size;
- ddb->end = fls(slice_mask) * slice_size;
-
- WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
-}
-
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
-{
- struct skl_ddb_entry ddb;
-
- if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
- slice_mask = BIT(DBUF_S1);
- else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
- slice_mask = BIT(DBUF_S3);
-
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
-
- return ddb.start;
-}
-
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
- enum dbuf_slice start_slice, end_slice;
- u8 slice_mask = 0;
-
- if (!skl_ddb_entry_size(entry))
- return 0;
-
- start_slice = entry->start / slice_size;
- end_slice = (entry->end - 1) / slice_size;
-
- /*
- * Per plane DDB entry can in a really worst case be on multiple slices
- * but single entry is anyway contigious.
- */
- while (start_slice <= end_slice) {
- slice_mask |= BIT(start_slice);
- start_slice++;
- }
-
- return slice_mask;
-}
-
-static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
-{
- const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int hdisplay, vdisplay;
-
- if (!crtc_state->hw.active)
- return 0;
-
- /*
- * Watermark/ddb requirement highly depends upon width of the
- * framebuffer, So instead of allocating DDB equally among pipes
- * distribute DDB based on resolution/width of the display.
- */
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
-
- return hdisplay;
-}
-
-static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
- enum pipe for_pipe,
- unsigned int *weight_start,
- unsigned int *weight_end,
- unsigned int *weight_total)
-{
- struct drm_i915_private *dev_priv =
- to_i915(dbuf_state->base.state->base.dev);
- enum pipe pipe;
-
- *weight_start = 0;
- *weight_end = 0;
- *weight_total = 0;
-
- for_each_pipe(dev_priv, pipe) {
- int weight = dbuf_state->weight[pipe];
-
- /*
- * Do not account pipes using other slice sets
- * luckily as of current BSpec slice sets do not partially
- * intersect(pipes share either same one slice or same slice set
- * i.e no partial intersection), so it is enough to check for
- * equality for now.
- */
- if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
- continue;
-
- *weight_total += weight;
- if (pipe < for_pipe) {
- *weight_start += weight;
- *weight_end += weight;
- } else if (pipe == for_pipe) {
- *weight_end += weight;
- }
- }
-}
-
-static int
-skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- unsigned int weight_total, weight_start, weight_end;
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- struct intel_crtc_state *crtc_state;
- struct skl_ddb_entry ddb_slices;
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset = 0;
- u32 ddb_range_size;
- u32 dbuf_slice_mask;
- u32 start, end;
- int ret;
-
- if (new_dbuf_state->weight[pipe] == 0) {
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
- goto out;
- }
-
- dbuf_slice_mask = new_dbuf_state->slices[pipe];
-
- skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
- ddb_range_size = skl_ddb_entry_size(&ddb_slices);
-
- intel_crtc_dbuf_weights(new_dbuf_state, pipe,
- &weight_start, &weight_end, &weight_total);
-
- start = ddb_range_size * weight_start / weight_total;
- end = ddb_range_size * weight_end / weight_total;
-
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
- ddb_slices.start - mbus_offset + start,
- ddb_slices.start - mbus_offset + end);
-
-out:
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
- skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
- &new_dbuf_state->ddb[pipe]))
- return 0;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
- crtc->base.base.id, crtc->base.name,
- old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
- old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
- new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
- old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
-
- return 0;
-}
-
-static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane);
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */);
-
-static unsigned int
-skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
- int num_active)
-{
- struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level wm = {};
- int ret, min_ddb_alloc = 0;
- struct skl_wm_params wp;
-
- ret = skl_compute_wm_params(crtc_state, 256,
- drm_format_info(DRM_FORMAT_ARGB8888),
- DRM_FORMAT_MOD_LINEAR,
- DRM_MODE_ROTATE_0,
- crtc_state->pixel_rate, &wp, 0);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
- if (wm.min_ddb_alloc == U16_MAX)
- break;
-
- min_ddb_alloc = wm.min_ddb_alloc;
- }
-
- return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
-}
-
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
-{
- skl_ddb_entry_init(entry,
- REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
- REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
- if (entry->end)
- entry->end++;
-}
-
-static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
- const enum pipe pipe,
- const enum plane_id plane_id,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- u32 val;
-
- /* Cursor doesn't support NV12/planar, so no extra calculation needed */
- if (plane_id == PLANE_CURSOR) {
- val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(ddb, val);
- return;
- }
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb, val);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb_y, val);
-}
-
-static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_display_power_domain power_domain;
- enum pipe pipe = crtc->pipe;
- intel_wakeref_t wakeref;
- enum plane_id plane_id;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id,
- &ddb[plane_id],
- &ddb_y[plane_id]);
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-}
-
-struct dbuf_slice_conf_entry {
- u8 active_pipes;
- u8 dbuf_mask[I915_MAX_PIPES];
- bool join_mbus;
-};
-
-/*
- * Table taken from Bspec 12716
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-/*
- * Table taken from Bspec 49255
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
- /*
- * Keep the join_mbus cases first so check_mbus_joined()
- * will prefer them over the !join_mbus cases.
- */
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {}
-
-};
-
-static bool check_mbus_joined(u8 active_pipes,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
- return dbuf_slices[i].join_mbus;
- }
- return false;
-}
-
-static bool adlp_check_mbus_joined(u8 active_pipes)
-{
- return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
-}
-
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes &&
- dbuf_slices[i].join_mbus == join_mbus)
- return dbuf_slices[i].dbuf_mask[pipe];
- }
- return 0;
-}
-
-/*
- * This function finds an entry with same enabled pipe configuration and
- * returns correspondent DBuf slice mask as stated in BSpec for particular
- * platform.
- */
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- /*
- * FIXME: For ICL this is still a bit unclear as prev BSpec revision
- * required calculating "pipe ratio" in order to determine
- * if one or two slices can be used for single pipe configurations
- * as additional constraint to the existing table.
- * However based on recent info, it should be not "pipe ratio"
- * but rather ratio between pixel_rate and cdclk with additional
- * constants, so for now we are using only table until this is
- * clarified. Also this is the reason why crtc_state param is
- * still here - we will need it once those additional constraints
- * pop up.
- */
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- icl_allowed_dbufs);
-}
-
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- tgl_allowed_dbufs);
-}
-
-static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- adlp_allowed_dbufs);
-}
-
-static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- dg2_allowed_dbufs);
-}
-
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- /*
- * For anything else just return one slice yet.
- * Should be extended for other platforms.
- */
- return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
-}
-
-static bool
-use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
-static u64
-skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- u64 data_rate = 0;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->rel_data_rate[plane_id];
-
- if (DISPLAY_VER(i915) < 11)
- data_rate += crtc_state->rel_data_rate_y[plane_id];
- }
-
- return data_rate;
-}
-
-static const struct skl_wm_level *
-skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- int level)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (level == 0 && pipe_wm->use_sagv_wm)
- return &wm->sagv.wm0;
-
- return &wm->wm[level];
-}
-
-static const struct skl_wm_level *
-skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (pipe_wm->use_sagv_wm)
- return &wm->sagv.trans_wm;
-
- return &wm->trans_wm;
-}
-
-/*
- * We only disable the watermarks for each plane if
- * they exceed the ddb allocation of said plane. This
- * is done so that we don't end up touching cursor
- * watermarks needlessly when some other plane reduces
- * our max possible watermark level.
- *
- * Bspec has this to say about the PLANE_WM enable bit:
- * "All the watermarks at this level for all enabled
- * planes must be enabled before the level will be used."
- * So this is actually safe to do.
- */
-static void
-skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
- memset(wm, 0, sizeof(*wm));
-}
-
-static void
-skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
- const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
- uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- memset(wm, 0, sizeof(*wm));
- memset(uv_wm, 0, sizeof(*uv_wm));
- }
-}
-
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
-{
- /*
- * Wa_1408961008:icl, ehl
- * Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
- */
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
-}
-
-struct skl_plane_ddb_iter {
- u64 data_rate;
- u16 start, size;
-};
-
-static void
-skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
- struct skl_ddb_entry *ddb,
- const struct skl_wm_level *wm,
- u64 data_rate)
-{
- u16 size, extra = 0;
-
- if (data_rate) {
- extra = min_t(u16, iter->size,
- DIV64_U64_ROUND_UP(iter->size * data_rate,
- iter->data_rate));
- iter->size -= extra;
- iter->data_rate -= data_rate;
- }
-
- /*
- * Keep ddb entry of all disabled planes explicitly zeroed
- * to avoid skl_ddb_add_affected_planes() adding them to
- * the state when other planes change their allocations.
- */
- size = wm->min_ddb_alloc + extra;
- if (size)
- iter->start = skl_ddb_entry_init(ddb, iter->start,
- iter->start + size);
-}
-
-static int
-skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
- int num_active = hweight8(dbuf_state->active_pipes);
- struct skl_plane_ddb_iter iter;
- enum plane_id plane_id;
- u16 cursor_size;
- u32 blocks;
- int level;
-
- /* Clear the partitioning for disabled planes. */
- memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
- memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
-
- if (!crtc_state->hw.active)
- return 0;
-
- iter.start = alloc->start;
- iter.size = skl_ddb_entry_size(alloc);
- if (iter.size == 0)
- return 0;
-
- /* Allocate fixed number of blocks for cursor. */
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
-
- iter.data_rate = skl_total_relative_data_rate(crtc_state);
-
- /*
- * Find the highest watermark level for which we can satisfy the block
- * requirement of active planes.
- */
- for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
- blocks = 0;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&dev_priv->drm,
- wm->wm[level].min_ddb_alloc != U16_MAX);
- blocks = U32_MAX;
- break;
- }
- continue;
- }
-
- blocks += wm->wm[level].min_ddb_alloc;
- blocks += wm->uv_wm[level].min_ddb_alloc;
- }
-
- if (blocks <= iter.size) {
- iter.size -= blocks;
- break;
- }
- }
-
- if (level < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
- blocks, iter.size);
- return -EINVAL;
- }
-
- /* avoid the WARN later when we don't allocate any extra DDB */
- if (iter.data_rate == 0)
- iter.size = 0;
-
- /*
- * Grant each plane the blocks it requires at the highest achievable
- * watermark level, plus an extra share of the leftover blocks
- * proportional to its relative data rate.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
- crtc_state->rel_data_rate_y[plane_id]);
- skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
- crtc_state->rel_data_rate[plane_id]);
- } else {
- skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
- crtc_state->rel_data_rate[plane_id]);
- }
- }
- drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
-
- /*
- * When we calculated watermark values we didn't know how high
- * of a level we'd actually be able to hit, so we just marked
- * all levels as "enabled." Go back now and disable the ones
- * that aren't actually possible.
- */
- for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id))
- skl_check_nv12_wm_level(&wm->wm[level],
- &wm->uv_wm[level],
- ddb_y, ddb);
- else
- skl_check_wm_level(&wm->wm[level], ddb);
-
- if (icl_need_wm1_wa(dev_priv, plane_id) &&
- level == 1 && wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
- }
- }
- }
-
- /*
- * Go back and disable the transition and SAGV watermarks
- * if it turns out we don't have enough DDB blocks for them.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_check_wm_level(&wm->trans_wm, ddb_y);
- } else {
- WARN_ON(skl_ddb_entry_size(ddb_y));
-
- skl_check_wm_level(&wm->trans_wm, ddb);
- }
-
- skl_check_wm_level(&wm->sagv.wm0, ddb);
- skl_check_wm_level(&wm->sagv.trans_wm, ddb);
- }
-
- return 0;
-}
-
-/*
- * The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and cpp should always be <= 8, so that
- * should allow pixel_rate up to ~2 GHz which seems sufficient since max
- * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
-*/
-static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
- u8 cpp, u32 latency, u32 dbuf_block_size)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate * cpp;
- ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- ret = add_fixed16_u32(ret, 1);
-
- return ret;
-}
-
-static uint_fixed_16_16_t
-skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
- uint_fixed_16_16_t plane_blocks_per_line)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate;
- wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
- pipe_htotal * 1000);
- ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
- return ret;
-}
-
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
-}
-
-static int
-skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 interm_pbpl;
-
- /* only planar format has two planes */
- if (color_plane == 1 &&
- !intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "Non planar format have single plane\n");
- return -EINVAL;
- }
-
- wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
- modifier == I915_FORMAT_MOD_4_TILED ||
- modifier == I915_FORMAT_MOD_Yf_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
-
- wp->width = width;
- if (color_plane == 1 && wp->is_planar)
- wp->width /= 2;
-
- wp->cpp = format->cpp[color_plane];
- wp->plane_pixel_rate = plane_pixel_rate;
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
- wp->dbuf_block_size = 256;
- else
- wp->dbuf_block_size = 512;
-
- if (drm_rotation_90_or_270(rotation)) {
- switch (wp->cpp) {
- case 1:
- wp->y_min_scanlines = 16;
- break;
- case 2:
- wp->y_min_scanlines = 8;
- break;
- case 4:
- wp->y_min_scanlines = 4;
- break;
- default:
- MISSING_CASE(wp->cpp);
- return -EINVAL;
- }
- } else {
- wp->y_min_scanlines = 4;
- }
-
- if (skl_needs_memory_bw_wa(dev_priv))
- wp->y_min_scanlines *= 2;
-
- wp->plane_bytes_per_line = wp->width * wp->cpp;
- if (wp->y_tiled) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
- wp->y_min_scanlines,
- wp->dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
- wp->y_min_scanlines);
- } else {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size);
-
- if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
- }
-
- wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
- wp->plane_blocks_per_line);
-
- wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(crtc_state));
-
- return 0;
-}
-
-static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct skl_wm_params *wp, int color_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- return skl_compute_wm_params(crtc_state, width,
- fb->format, fb->modifier,
- plane_state->hw.rotation,
- intel_plane_pixel_rate(crtc_state, plane_state),
- wp, color_plane);
-}
-
-static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 10)
- return true;
-
- /* The number of lines are ignored for the level 0 watermark. */
- return level > 0;
-}
-
-static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 13)
- return 255;
- else
- return 31;
-}
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- uint_fixed_16_16_t method1, method2;
- uint_fixed_16_16_t selected_result;
- u32 blocks, lines, min_ddb_alloc = 0;
-
- if (latency == 0 ||
- (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
- latency += 4;
-
- if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
- latency += 15;
-
- method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
- wp->cpp, latency, wp->dbuf_block_size);
- method2 = skl_wm_method2(wp->plane_pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- latency,
- wp->plane_blocks_per_line);
-
- if (wp->y_tiled) {
- selected_result = max_fixed16(method2, wp->y_tile_minimum);
- } else {
- if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
- wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
- selected_result = method2;
- } else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(dev_priv) == 9)
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
- } else {
- selected_result = method1;
- }
- }
-
- blocks = fixed16_to_u32_round_up(selected_result) + 1;
- /*
- * Lets have blocks at minimum equivalent to plane_blocks_per_line
- * as there will be at minimum one line for lines configuration. This
- * is a work around for FIFO underruns observed with resolutions like
- * 4k 60 Hz in single channel DRAM configurations.
- *
- * As per the Bspec 49325, if the ddb allocation can hold at least
- * one plane_blocks_per_line, we should have selected method2 in
- * the above logic. Assuming that modern versions have enough dbuf
- * and method2 guarantees blocks equivalent to at least 1 line,
- * select the blocks as plane_blocks_per_line.
- *
- * TODO: Revisit the logic when we have better understanding on DRAM
- * channels' impact on the level 0 memory latency and the relevant
- * wm calculations.
- */
- if (skl_wm_has_lines(dev_priv, level))
- blocks = max(blocks,
- fixed16_to_u32_round_up(wp->plane_blocks_per_line));
- lines = div_round_up_fixed16(selected_result,
- wp->plane_blocks_per_line);
-
- if (DISPLAY_VER(dev_priv) == 9) {
- /* Display WA #1125: skl,bxt,kbl */
- if (level == 0 && wp->rc_surface)
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
-
- /* Display WA #1126: skl,bxt,kbl */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
- lines += wp->y_min_scanlines;
- } else {
- blocks++;
- }
-
- /*
- * Make sure result blocks for higher latency levels are
- * atleast as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
- }
- }
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- if (wp->y_tiled) {
- int extra_lines;
-
- if (lines % wp->y_min_scanlines == 0)
- extra_lines = wp->y_min_scanlines;
- else
- extra_lines = wp->y_min_scanlines * 2 -
- lines % wp->y_min_scanlines;
-
- min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
- wp->plane_blocks_per_line);
- } else {
- min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
- }
- }
-
- if (!skl_wm_has_lines(dev_priv, level))
- lines = 0;
-
- if (lines > skl_wm_max_lines(dev_priv)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * If lines is valid, assume we can use this watermark level
- * for now. We'll come back and disable it after we calculate the
- * DDB allocation if it turns out we don't actually have enough
- * blocks to satisfy it.
- */
- result->blocks = blocks;
- result->lines = lines;
- /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
- result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
- result->enable = true;
-
- if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
- result->can_sagv = latency >= dev_priv->sagv_block_time_us;
-}
-
-static void
-skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_wm_level *levels)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level *result_prev = &levels[0];
-
- for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = &levels[level];
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency,
- wm_params, result_prev, result);
-
- result_prev = result;
- }
-}
-
-static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_plane_wm *plane_wm)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
- struct skl_wm_level *levels = plane_wm->wm;
- unsigned int latency = 0;
-
- if (dev_priv->sagv_block_time_us)
- latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
-
- skl_compute_plane_wm(crtc_state, plane, 0, latency,
- wm_params, &levels[0],
- sagv_wm);
-}
-
-static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
- struct skl_wm_level *trans_wm,
- const struct skl_wm_level *wm0,
- const struct skl_wm_params *wp)
-{
- u16 trans_min, trans_amount, trans_y_tile_min;
- u16 wm0_blocks, trans_offset, blocks;
-
- /* Transition WM don't make any sense if ipc is disabled */
- if (!dev_priv->ipc_enabled)
- return;
-
- /*
- * WaDisableTWM:skl,kbl,cfl,bxt
- * Transition WM are not recommended by HW team for GEN9
- */
- if (DISPLAY_VER(dev_priv) == 9)
- return;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- trans_min = 4;
- else
- trans_min = 14;
-
- /* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(dev_priv) == 10)
- trans_amount = 0;
- else
- trans_amount = 10; /* This is configurable amount */
-
- trans_offset = trans_min + trans_amount;
-
- /*
- * The spec asks for Selected Result Blocks for wm0 (the real value),
- * not Result Blocks (the integer value). Pay attention to the capital
- * letters. The value wm_l0->blocks is actually Result Blocks, but
- * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
- * and since we later will have to get the ceiling of the sum in the
- * transition watermarks calculation, we can just pretend Selected
- * Result Blocks is Result Blocks minus 1 and it should work for the
- * current platforms.
- */
- wm0_blocks = wm0->blocks - 1;
-
- if (wp->y_tiled) {
- trans_y_tile_min =
- (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
- blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
- } else {
- blocks = wm0_blocks + trans_offset;
- }
- blocks++;
-
- /*
- * Just assume we can enable the transition watermark. After
- * computing the DDB we'll come back and disable it if that
- * assumption turns out to be false.
- */
- trans_wm->blocks = blocks;
- trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
- trans_wm->enable = true;
-}
-
-static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane, int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, color_plane);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
-
- skl_compute_transition_wm(dev_priv, &wm->trans_wm,
- &wm->wm[0], &wm_params);
-
- if (DISPLAY_VER(dev_priv) >= 12) {
- tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
-
- skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
- &wm->sagv.wm0, &wm_params);
- }
-
- return 0;
-}
-
-static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane)
-{
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- wm->is_planar = true;
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, 1);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
-
- return 0;
-}
-
-static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
-
- memset(wm, 0, sizeof(*wm));
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
-
- if (fb->format->is_yuv && fb->format->num_planes > 1) {
- ret = skl_build_plane_wm_uv(crtc_state, plane_state,
- plane);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- int ret;
-
- /* Watermarks calculated in master */
- if (plane_state->planar_slave)
- return 0;
-
- memset(wm, 0, sizeof(*wm));
-
- if (plane_state->planar_linked_plane) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- drm_WARN_ON(&dev_priv->drm,
- !intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
- fb->format->num_planes == 1);
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane_state->planar_linked_plane, 0);
- if (ret)
- return ret;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 1);
- if (ret)
- return ret;
- } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int skl_build_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- int ret, i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- /*
- * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
- * instead but we don't populate that correctly for NV12 Y
- * planes so for now hack this.
- */
- if (plane->pipe != crtc->pipe)
- continue;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- ret = icl_build_plane_wm(crtc_state, plane_state);
- else
- ret = skl_build_plane_wm(crtc_state, plane_state);
- if (ret)
- return ret;
- }
-
- crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
-
- return 0;
-}
-
-static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_ddb_entry *entry)
-{
- if (entry->end)
- intel_de_write_fw(dev_priv, reg,
- PLANE_BUF_END(entry->end - 1) |
- PLANE_BUF_START(entry->start));
- else
- intel_de_write_fw(dev_priv, reg, 0);
-}
-
-static void skl_write_wm_level(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_wm_level *level)
-{
- u32 val = 0;
-
- if (level->enable)
- val |= PLANE_WM_EN;
- if (level->ignore_lines)
- val |= PLANE_WM_IGNORE_LINES;
- val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
- val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
-
- intel_de_write_fw(dev_priv, reg, val);
-}
-
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb);
-
- if (DISPLAY_VER(dev_priv) < 11)
- skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
-}
-
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
-}
-
-static bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2)
-{
- return l1->enable == l2->enable &&
- l1->ignore_lines == l2->ignore_lines &&
- l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
-}
-
-static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
- const struct skl_plane_wm *wm1,
- const struct skl_plane_wm *wm2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
- return false;
- }
-
- return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
- skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
- skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
-}
-
-static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- return a->start < b->end && b->start < a->end;
-}
-
-static void skl_ddb_entry_union(struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- if (a->end && b->end) {
- a->start = min(a->start, b->start);
- a->end = max(a->end, b->end);
- } else if (b->end) {
- a->start = b->start;
- a->end = b->end;
- }
-}
-
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx)
-{
- int i;
-
- for (i = 0; i < num_entries; i++) {
- if (i != ignore_idx &&
- skl_ddb_entries_overlap(ddb, &entries[i]))
- return true;
- }
-
- return false;
-}
-
-static int
-skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
- &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
- skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
-{
- struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
- u8 enabled_slices;
- enum pipe pipe;
-
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- enabled_slices = BIT(DBUF_S1);
-
- for_each_pipe(dev_priv, pipe)
- enabled_slices |= dbuf_state->slices[pipe];
-
- return enabled_slices;
-}
-
-static int
-skl_compute_ddb(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state;
- struct intel_dbuf_state *new_dbuf_state = NULL;
- const struct intel_crtc_state *old_crtc_state;
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- break;
- }
-
- if (!new_dbuf_state)
- return 0;
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- if (HAS_MBUS_JOINING(dev_priv))
- new_dbuf_state->joined_mbus =
- adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
- new_dbuf_state->joined_mbus);
-
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
- old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes(state);
- if (ret)
- return ret;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
- old_dbuf_state->enabled_slices,
- new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus));
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
-
- if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- ret = skl_crtc_allocate_ddb(state, crtc);
- if (ret)
- return ret;
- }
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_crtc_allocate_plane_ddb(state, crtc);
- if (ret)
- return ret;
-
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static char enast(bool enable)
-{
- return enable ? '*' : ' ';
-}
-
-static void
-skl_print_wm_changes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- int i;
-
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
-
- old_pipe_wm = &old_crtc_state->wm.skl.optimal;
- new_pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_ddb_entry *old, *new;
-
- old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
- new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (skl_ddb_entry_equal(old, new))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
- }
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_plane_wm *old_wm, *new_wm;
-
- old_wm = &old_pipe_wm->planes[plane_id];
- new_wm = &new_pipe_wm->planes[plane_id];
-
- if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
- }
- }
-}
-
-static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
- const struct skl_pipe_wm *old_pipe_wm,
- const struct skl_pipe_wm *new_pipe_wm)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
- skl_plane_wm_level(new_pipe_wm, plane->id, level)))
- return false;
- }
-
- if (HAS_HW_SAGV_WM(i915)) {
- const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
- const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
-
- if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
- !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
- return false;
- }
-
- return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
- skl_plane_trans_wm(new_pipe_wm, plane->id));
-}
-
-/*
- * To make sure the cursor watermark registers are always consistent
- * with our computed state the following scenario needs special
- * treatment:
- *
- * 1. enable cursor
- * 2. move cursor entirely offscreen
- * 3. disable cursor
- *
- * Step 2. does call .disable_plane() but does not zero the watermarks
- * (since we consider an offscreen cursor still active for the purposes
- * of watermarks). Step 3. would not normally call .disable_plane()
- * because the actual plane visibility isn't changing, and we don't
- * deallocate the cursor ddb until the pipe gets disabled. So we must
- * force step 3. to call .disable_plane() to update the watermark
- * registers properly.
- *
- * Other planes do not suffer from this issues as their watermarks are
- * calculated based on the actual plane visibility. The only time this
- * can trigger for the other planes is during the initial readout as the
- * default value of the watermarks registers is not zero.
- */
-static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- /*
- * Force a full wm update for every plane on modeset.
- * Required because the reset value of the wm registers
- * is non-zero, whereas we want all disabled planes to
- * have zero watermarks. So if we turn off the relevant
- * power well the hardware state will go out of sync
- * with the software state.
- */
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
- skl_plane_selected_wm_equals(plane,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static int
-skl_compute_wm(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_build_pipe_wm(state, crtc);
- if (ret)
- return ret;
- }
-
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
- /*
- * skl_compute_ddb() will have adjusted the final watermarks
- * based on how much ddb is available. Now we can actually
- * check if the final watermarks changed.
- */
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
-
- skl_print_wm_changes(state);
-
- return 0;
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -6459,10 +3583,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
@@ -6475,210 +3599,17 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
-{
- level->enable = val & PLANE_WM_EN;
- level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
- level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
-}
-
-static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int level, max_level;
- enum plane_id plane_id;
- u32 val;
-
- max_level = ilk_wm_max_level(dev_priv);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_plane_wm *wm = &out->planes[plane_id];
-
- for (level = 0; level <= max_level; level++) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
-
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
- }
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- wm->sagv.wm0 = wm->wm[0];
- wm->sagv.trans_wm = wm->trans_wm;
- }
- }
-}
-
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- struct intel_crtc *crtc;
-
- if (HAS_MBUS_JOINING(dev_priv))
- dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset;
- enum plane_id plane_id;
- u8 slices;
-
- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
- crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
-
- memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
- plane_id, ddb, ddb_y);
-
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
- }
-
- dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(dev_priv, slices);
- crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
-
- /* The slices actually used by the planes on the pipe */
- dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
- crtc->base.base.id, crtc->base.name,
- dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
- dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
- str_yes_no(dbuf_state->joined_mbus));
- }
-
- dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
@@ -6826,7 +3757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
struct intel_crtc *crtc;
g4x_read_wm_values(dev_priv, wm);
@@ -6920,7 +3851,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -6968,12 +3899,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -7007,7 +3938,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -7076,7 +4007,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -7117,7 +4048,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/*
@@ -7138,7 +4069,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
@@ -7167,168 +4098,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct intel_plane *plane;
- u8 hw_enabled_slices;
-
- if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->dbuf.enabled_slices)
- drm_err(&dev_priv->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
-}
-
-void intel_enable_ipc(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!HAS_IPC(dev_priv))
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
-
- if (dev_priv->ipc_enabled)
- val |= DISP_IPC_ENABLE;
- else
- val &= ~DISP_IPC_ENABLE;
-
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
-}
-
-static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
-{
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- return false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return dev_priv->dram_info.symmetric_memory;
-
- return true;
-}
-
-void intel_init_ipc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_IPC(dev_priv))
- return;
-
- dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
-
- intel_enable_ipc(dev_priv);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -7436,7 +4205,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (dev_priv->vbt.fdi_rx_polarity_inverted)
+ if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
@@ -7587,9 +4356,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
- if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
+ /* Wa_1409120013 */
+ if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
@@ -7966,7 +4734,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7977,7 +4745,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
@@ -8169,18 +4937,14 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
}
-static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
-};
-
-static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
+static const struct intel_wm_funcs ilk_wm_funcs = {
.compute_pipe_wm = ilk_compute_pipe_wm,
.compute_intermediate_wm = ilk_compute_intermediate_wm,
.initial_watermarks = ilk_initial_watermarks,
.optimize_watermarks = ilk_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
+static const struct intel_wm_funcs vlv_wm_funcs = {
.compute_pipe_wm = vlv_compute_pipe_wm,
.compute_intermediate_wm = vlv_compute_intermediate_wm,
.initial_watermarks = vlv_initial_watermarks,
@@ -8188,67 +4952,67 @@ static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
.atomic_update_watermarks = vlv_atomic_update_fifo,
};
-static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
+static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_pipe_wm = g4x_compute_pipe_wm,
.compute_intermediate_wm = g4x_compute_intermediate_wm,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
+static const struct intel_wm_funcs pnv_wm_funcs = {
.update_wm = pnv_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
+static const struct intel_wm_funcs i965_wm_funcs = {
.update_wm = i965_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
+static const struct intel_wm_funcs i9xx_wm_funcs = {
.update_wm = i9xx_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
+static const struct intel_wm_funcs i845_wm_funcs = {
.update_wm = i845_update_wm,
};
-static const struct drm_i915_wm_disp_funcs nop_funcs = {
+static const struct intel_wm_funcs nop_funcs = {
};
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ skl_wm_init(dev_priv);
+ return;
+ }
+
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
pnv_get_mem_freq(dev_priv);
else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
- intel_sagv_init(dev_priv);
-
/* For FIFO watermark updates */
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &skl_wm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
- dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->wm_disp = &ilk_wm_funcs;
+ if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
+ dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
+ (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
+ dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
} else {
drm_dbg_kms(&dev_priv->drm,
"Failed to read display plane latency. "
"Disable CxSR\n");
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &vlv_wm_funcs;
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
} else if (IS_G4X(dev_priv)) {
g4x_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &g4x_wm_funcs;
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
@@ -8262,22 +5026,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
} else
- dev_priv->wm_disp = &pnv_wm_funcs;
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->wm_disp = &i965_wm_funcs;
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 2) {
if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->wm_disp = &i845_wm_funcs;
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
else
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else {
drm_err(&dev_priv->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
}
@@ -8286,183 +5050,3 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return NULL;
-
- return &dbuf_state->base;
-}
-
-static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
- struct intel_global_state *state)
-{
- kfree(state);
-}
-
-static const struct intel_global_state_funcs intel_dbuf_funcs = {
- .atomic_duplicate_state = intel_dbuf_duplicate_state,
- .atomic_destroy_state = intel_dbuf_destroy_state,
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_global_state *dbuf_state;
-
- dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
- if (IS_ERR(dbuf_state))
- return ERR_CAST(dbuf_state);
-
- return to_intel_dbuf_state(dbuf_state);
-}
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return -ENOMEM;
-
- intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
- &dbuf_state->base, &intel_dbuf_funcs);
-
- return 0;
-}
-
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(dev_priv))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(dev_priv, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(dev_priv,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(dev_priv,
- new_dbuf_state->enabled_slices);
-}
-
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
- const struct intel_crtc *crtc;
- u32 val = 0;
- int i;
-
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
- if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
- val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
- val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
- }
-
- /* Wa_22010947358:adl-p */
- if (IS_ALDERLAKE_P(i915))
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
- else
- val |= MBUS_DBOX_A_CREDIT(2);
-
- if (IS_ALDERLAKE_P(i915)) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(12);
- } else {
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.active ||
- !intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
- }
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 945503ae493e..c09b872d65c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,22 +8,9 @@
#include <linux/types.h>
-#include "display/intel_display.h"
-#include "display/intel_global_state.h"
-
-#include "i915_drv.h"
-
-struct drm_device;
struct drm_i915_private;
-struct i915_request;
-struct intel_atomic_state;
-struct intel_bw_state;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_plane;
-struct skl_ddb_entry;
-struct skl_pipe_wm;
-struct skl_wm_level;
+struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
@@ -34,56 +21,14 @@ void intel_pm_setup(struct drm_i915_private *dev_priv);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state);
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-void skl_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state);
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
-void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[]);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- bool joined_mbus;
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv);
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
-
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a852c471d1b3..5cd423c7b646 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
#include "gt/intel_engine_regs.h"
@@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
}
void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
{
- spin_lock_init(&mmio_debug->lock);
- mmio_debug->unclaimed_mmio_check = 1;
+ spin_lock_init(&i915->mmio_debug.lock);
+ i915->mmio_debug.unclaimed_mmio_check = 1;
+
+ i915->uncore.debug = &i915->mmio_debug;
}
-static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+static void mmio_debug_suspend(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
/* Save and disable mmio debugging for the user bypass */
- if (!mmio_debug->suspend_count++) {
- mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
- mmio_debug->unclaimed_mmio_check = 0;
+ if (!uncore->debug->suspend_count++) {
+ uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
+ uncore->debug->unclaimed_mmio_check = 0;
}
+
+ spin_unlock(&uncore->debug->lock);
}
-static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
+
+static void mmio_debug_resume(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
+
+ if (!--uncore->debug->suspend_count)
+ uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
+
+ if (check_for_unclaimed_mmio(uncore))
+ drm_info(&uncore->i915->drm,
+ "Invalid mmio detected during user access\n");
- if (!--mmio_debug->suspend_count)
- mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+ spin_unlock(&uncore->debug->lock);
}
static const char * const forcewake_domain_names[] = {
@@ -112,8 +131,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- /* WaRsClearFWBitsAtReset:bdw,skl */
- fw_clear(d, 0xffff);
+ /* WaRsClearFWBitsAtReset */
+ if (GRAPHICS_VER(d->uncore->i915) >= 12)
+ fw_clear(d, 0xefff);
+ else
+ fw_clear(d, 0xffff);
}
static inline void
@@ -674,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
spin_lock_irq(&uncore->lock);
if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
- spin_lock(&uncore->debug->lock);
- mmio_debug_suspend(uncore->debug);
- spin_unlock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore);
}
spin_unlock_irq(&uncore->lock);
}
@@ -692,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake_count) {
- spin_lock(&uncore->debug->lock);
- mmio_debug_resume(uncore->debug);
-
- if (check_for_unclaimed_mmio(uncore))
- drm_info(&uncore->i915->drm,
- "Invalid mmio detected during user access\n");
- spin_unlock(&uncore->debug->lock);
-
+ mmio_debug_resume(uncore);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
spin_unlock_irq(&uncore->lock);
@@ -915,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
entry = BSEARCH(offset,
uncore->fw_domains_table,
uncore->fw_domains_table_entries,
@@ -1130,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
return false;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
return BSEARCH(offset,
uncore->shadowed_reg_table,
uncore->shadowed_reg_table_entries,
@@ -1701,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (likely(!uncore->i915->params.mmio_debug))
+ if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
@@ -1982,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->uncore = uncore;
d->wake_count = 0;
- d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
- d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
d->id = domain_id;
@@ -2067,7 +2086,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
if (GRAPHICS_VER(i915) >= 11) {
/* we'll prune the domains of missing engines later */
- intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
+ intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask;
int i;
uncore->fw_get_funcs = &uncore_get_fallback;
@@ -2220,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
+{
+ iounmap(regs);
+}
+
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{
struct drm_i915_private *i915 = uncore->i915;
@@ -2232,14 +2256,15 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
- * For dgfx chips register range is expanded to 4MB.
+ * For dgfx chips register range is expanded to 4MB, and this larger
+ * range is also used for integrated gpus beginning with Meteor Lake.
*/
- if (GRAPHICS_VER(i915) < 5)
- mmio_size = 512 * 1024;
- else if (IS_DGFX(i915))
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
mmio_size = 4 * 1024 * 1024;
- else
+ else if (GRAPHICS_VER(i915) >= 5)
mmio_size = 2 * 1024 * 1024;
+ else
+ mmio_size = 512 * 1024;
uncore->regs = ioremap(phys_addr, mmio_size);
if (uncore->regs == NULL) {
@@ -2247,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return 0;
-}
-
-void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
-{
- iounmap(uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2262,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
uncore->i915 = gt->i915;
uncore->gt = gt;
uncore->rpm = &gt->i915->runtime_pm;
- uncore->debug = &gt->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -2442,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
}
}
-void intel_uncore_fini_mmio(struct intel_uncore *uncore)
+/* Called via drm-managed action */
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
{
+ struct intel_uncore *uncore = data;
+
if (intel_uncore_has_forcewake(uncore)) {
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
@@ -2573,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret;
+ if (!uncore->debug)
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
ret = check_for_unclaimed_mmio(uncore);
spin_unlock_irq(&uncore->debug->lock);
@@ -2585,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
+ if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b1fa912a65e7..5022bac80b67 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -33,6 +33,7 @@
#include "i915_reg_defs.h"
+struct drm_device;
struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
@@ -135,6 +136,16 @@ struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ /*
+ * Do we need to apply an additional offset to reach the beginning
+ * of the basic non-engine GT registers (referred to as "GSI" on
+ * newer platforms, or "GT block" on older platforms)? If so, we'll
+ * track that here and apply it transparently to registers in the
+ * appropriate range to maintain compatibility with our existing
+ * register definitions and GT code.
+ */
+ u32 gsi_offset;
+
unsigned int flags;
#define UNCORE_HAS_FORCEWAKE BIT(0)
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
@@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
+void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
@@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
-void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
void intel_uncore_runtime_resume(struct intel_uncore *uncore);
@@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
2, timeout_ms, NULL);
}
+#define IS_GSI_REG(reg) ((reg) < 0x40000)
+
/* register access functions */
#define __raw_read(x__, s__) \
static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
i915_reg_t reg) \
{ \
- return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ return read##s__(uncore->regs + offset); \
}
#define __raw_write(x__, s__) \
static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
i915_reg_t reg, u##x__ val) \
{ \
- write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ write##s__(val, uncore->regs + offset); \
}
__raw_read(8, b)
__raw_read(16, w)
@@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
return (reg_val & mask) != expected_val ? -EINVAL : 0;
}
+/*
+ * The raw_reg_{read,write} macros are intended as a micro-optimization for
+ * interrupt handlers so that the pointer indirection on uncore->regs can
+ * be computed once (and presumably cached in a register) instead of generating
+ * extra load instructions for each MMIO access.
+ *
+ * Given that these macros are only intended for non-GSI interrupt registers
+ * (and the goal is to avoid extra instructions generated by the compiler),
+ * these macros do not account for uncore->gsi_offset. Any caller that needs
+ * to use these macros on a GSI register is responsible for adding the
+ * appropriate GSI offset to the 'base' parameter.
+ */
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 15311eaed848..69cdaaddc4a9 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -169,11 +169,23 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
* We want to get the same effect as if we received a termination
* interrupt, so just pretend that we did.
*/
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
queue_work(system_unbound_wq, &pxp->session_work);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
+}
+
+static bool pxp_component_bound(struct intel_pxp *pxp)
+{
+ bool bound = false;
+
+ mutex_lock(&pxp->tee_mutex);
+ if (pxp->pxp_component)
+ bound = true;
+ mutex_unlock(&pxp->tee_mutex);
+
+ return bound;
}
/*
@@ -187,6 +199,9 @@ int intel_pxp_start(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return -ENODEV;
+ if (wait_for(pxp_component_bound(pxp), 250))
+ return -ENXIO;
+
mutex_lock(&pxp->arb_mutex);
if (pxp->arb_is_valid)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e888b5124a07..4359e8be4101 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val)
return -ENODEV;
/* simulate a termination interrupt */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!wait_for_completion_timeout(&pxp->termination,
msecs_to_jiffies(100)))
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 04745f914407..c28be430718a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
return;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!iir))
return;
@@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
static inline void pxp_irq_reset(struct intel_gt *gt)
{
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_enable(struct intel_pxp *pxp)
{
struct intel_gt *gt = pxp_to_gt(pxp);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (!pxp->irq_enabled)
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
@@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
__pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
pxp->irq_enabled = true;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_disable(struct intel_pxp *pxp)
@@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp)
*/
GEM_WARN_ON(intel_pxp_is_active(pxp));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pxp->irq_enabled = false;
__pxp_set_interrupts(gt, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
pxp_irq_reset(gt);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 92b00b4de240..1bb5b5249157 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work)
intel_wakeref_t wakeref;
u32 events = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
events = fetch_and_zero(&pxp->session_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!events)
return;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ab9f17fc85bc..e050a2de5fd1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1080,7 +1080,7 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL;
- obj = i915_gem_object_create_region(mr, size, 0, 0);
+ obj = i915_gem_object_create_region(mr, size, 0, I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj)) {
/* if iGVT-g or DMAR is active, stolen mem will be uninitialized */
if (PTR_ERR(obj) == -ENODEV && is_stolen)
@@ -2324,5 +2324,5 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index bdd290f2bf3c..aaf8a380e5c7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests)
selftest(slpc, intel_slpc_live_selftests)
selftest(guc, intel_guc_live_selftests)
selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
+selftest(guc_hang, intel_guc_hang_check)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c
index 88db2e3d81d0..429c6d73b159 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_perf.c
@@ -431,7 +431,7 @@ int i915_perf_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = i915_subtests(tests, i915);
+ err = i915_live_subtests(tests, i915);
destroy_empty_config(&i915->perf);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index c56a0c2cd2f7..818a4909c1f3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -971,7 +971,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err)
goto err;
- /* Force the wait wait now to avoid including it in the benchmark */
+ /* Force the wait now to avoid including it in the benchmark */
err = i915_vma_sync(vma);
if (err)
goto err_pin;
@@ -1821,7 +1821,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
static int switch_to_kernel_sync(struct intel_context *ce, int err)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 6921ba128015..71b52d5efef4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -51,9 +51,9 @@ static bool assert_vma(struct i915_vma *vma,
ok = false;
}
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("VMA created with wrong type [%d]\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -63,7 +63,7 @@ static bool assert_vma(struct i915_vma *vma,
static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+ const struct i915_gtt_view *view)
{
struct i915_vma *vma;
bool ok = true;
@@ -91,7 +91,7 @@ checked_vma_instance(struct drm_i915_gem_object *obj,
}
if (i915_vma_compare(vma, vma->vm,
- i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
+ i915_vma_is_ggtt(vma) ? &vma->gtt_view : NULL)) {
pr_err("i915_vma_compare failed with itself\n");
return ERR_PTR(-EINVAL);
}
@@ -530,12 +530,12 @@ assert_remapped(struct drm_i915_gem_object *obj,
return sg;
}
-static unsigned int remapped_size(enum i915_ggtt_view_type view_type,
+static unsigned int remapped_size(enum i915_gtt_view_type view_type,
const struct intel_remapped_plane_info *a,
const struct intel_remapped_plane_info *b)
{
- if (view_type == I915_GGTT_VIEW_ROTATED)
+ if (view_type == I915_GTT_VIEW_ROTATED)
return a->dst_stride * a->width + b->dst_stride * b->width;
else
return a->dst_stride * a->height + b->dst_stride * b->height;
@@ -569,9 +569,9 @@ static int igt_vma_rotate_remap(void *arg)
{ }
}, *a, *b;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
const unsigned int max_pages = 64;
@@ -588,7 +588,7 @@ static int igt_vma_rotate_remap(void *arg)
for (t = types; *t; t++) {
for (a = planes; a->width; a++) {
for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.remapped.plane[0] = *a,
.remapped.plane[1] = *b,
@@ -602,11 +602,11 @@ static int igt_vma_rotate_remap(void *arg)
max_offset = max_pages - max_offset;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[0].height :
plane_info[0].width;
if (!plane_info[1].dst_stride)
- plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ?
+ plane_info[1].dst_stride = view.type == I915_GTT_VIEW_ROTATED ?
plane_info[1].height :
plane_info[1].width;
@@ -630,7 +630,7 @@ static int igt_vma_rotate_remap(void *arg)
expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]);
- if (view.type == I915_GGTT_VIEW_ROTATED &&
+ if (view.type == I915_GTT_VIEW_ROTATED &&
vma->size != expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -638,7 +638,7 @@ static int igt_vma_rotate_remap(void *arg)
goto out_object;
}
- if (view.type == I915_GGTT_VIEW_REMAPPED &&
+ if (view.type == I915_GTT_VIEW_REMAPPED &&
vma->size > expected_pages * PAGE_SIZE) {
pr_err("VMA is wrong size, expected %lu, found %llu\n",
PAGE_SIZE * expected_pages, vma->size);
@@ -668,13 +668,13 @@ static int igt_vma_rotate_remap(void *arg)
sg = vma->pages->sgl;
for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
- if (view.type == I915_GGTT_VIEW_ROTATED)
+ if (view.type == I915_GTT_VIEW_ROTATED)
sg = assert_rotated(obj, &view.rotated, n, sg);
else
sg = assert_remapped(obj, &view.remapped, n, sg);
if (IS_ERR(sg)) {
pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n",
- view.type == I915_GGTT_VIEW_ROTATED ?
+ view.type == I915_GTT_VIEW_ROTATED ?
"rotated" : "remapped", n,
plane_info[0].width,
plane_info[0].height,
@@ -741,7 +741,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj,
}
static bool assert_pin(struct i915_vma *vma,
- struct i915_ggtt_view *view,
+ struct i915_gtt_view *view,
u64 size,
const char *name)
{
@@ -759,8 +759,8 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
- if (view && view->type != I915_GGTT_VIEW_NORMAL) {
- if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
+ if (view && view->type != I915_GTT_VIEW_NORMAL) {
+ if (memcmp(&vma->gtt_view, view, sizeof(*view))) {
pr_err("(%s) VMA mismatch upon creation!\n",
name);
ok = false;
@@ -772,9 +772,9 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
} else {
- if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) {
pr_err("Not the normal ggtt view! Found %d\n",
- vma->ggtt_view.type);
+ vma->gtt_view.type);
ok = false;
}
@@ -818,14 +818,14 @@ static int igt_vma_partial(void *arg)
nvma = 0;
for_each_prime_number_from(sz, 1, npages) {
for_each_prime_number_from(offset, 0, npages - sz) {
- struct i915_ggtt_view view;
+ struct i915_gtt_view view;
- view.type = I915_GGTT_VIEW_PARTIAL;
+ view.type = I915_GTT_VIEW_PARTIAL;
view.partial.offset = offset;
view.partial.size = sz;
if (sz == npages)
- view.type = I915_GGTT_VIEW_NORMAL;
+ view.type = I915_GTT_VIEW_NORMAL;
vma = checked_vma_instance(obj, vm, &view);
if (IS_ERR(vma)) {
@@ -976,9 +976,9 @@ static int igt_vma_remapped_gtt(void *arg)
{ }
}, *p;
- enum i915_ggtt_view_type types[] = {
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_REMAPPED,
+ enum i915_gtt_view_type types[] = {
+ I915_GTT_VIEW_ROTATED,
+ I915_GTT_VIEW_REMAPPED,
0,
}, *t;
struct drm_i915_gem_object *obj;
@@ -996,7 +996,7 @@ static int igt_vma_remapped_gtt(void *arg)
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
- struct i915_ggtt_view view = {
+ struct i915_gtt_view view = {
.type = *t,
.rotated.plane[0] = *p,
};
@@ -1012,7 +1012,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
if (!plane_info[0].dst_stride)
- plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ?
+ plane_info[0].dst_stride = *t == I915_GTT_VIEW_ROTATED ?
p->height : p->width;
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
@@ -1021,7 +1021,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != *t);
+ GEM_BUG_ON(vma->gtt_view.type != *t);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1035,7 +1035,7 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int offset;
u32 val = y << 16 | x;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE;
else
offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE;
@@ -1052,7 +1052,7 @@ static int igt_vma_remapped_gtt(void *arg)
goto out;
}
- GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL);
+ GEM_BUG_ON(vma->gtt_view.type != I915_GTT_VIEW_NORMAL);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -1067,7 +1067,7 @@ static int igt_vma_remapped_gtt(void *arg)
u32 exp = y << 16 | x;
u32 val;
- if (*t == I915_GGTT_VIEW_ROTATED)
+ if (*t == I915_GTT_VIEW_ROTATED)
src_idx = rotated_index(&view.rotated, 0, x, y);
else
src_idx = remapped_index(&view.remapped, 0, x, y);
@@ -1076,7 +1076,7 @@ static int igt_vma_remapped_gtt(void *arg)
val = ioread32(&map[offset / sizeof(*map)]);
if (val != exp) {
pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n",
- *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped",
+ *t == I915_GTT_VIEW_ROTATED ? "Rotated" : "Remapped",
exp, val);
i915_vma_unpin_iomap(vma);
err = -EINVAL;
@@ -1103,5 +1103,5 @@ int i915_vma_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_vma_remapped_gtt),
};
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9c31a16f8380..fff11c90f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = {
static void mock_gt_probe(struct drm_i915_private *i915)
{
i915->gt[0] = &i915->gt0;
+ i915->gt[0]->name = "Mock GT";
}
struct drm_i915_private *mock_gem_device(void)
@@ -172,14 +173,14 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->graphics.ver = -1;
+ RUNTIME_INFO(i915)->graphics.ip.ver = -1;
- mkwrite_device_info(i915)->page_sizes =
+ RUNTIME_INFO(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
@@ -209,7 +210,7 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(to_gt(i915));
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
- mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+ RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index bb9738c7c825..975de4ff7313 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -3,7 +3,7 @@ config DRM_IMX
tristate "DRM Support for Freescale i.MX"
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 5c2b2277afbf..3ffc061d392b 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,7 +2,7 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 8cf3352d8858..b4f82ebca532 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -8,7 +8,7 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -17,7 +17,7 @@
#include "dcss-dev.h"
#include "dcss-kms.h"
-DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
+DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
@@ -28,7 +28,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
static const struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index c29f343f33e5..ab6d32bad756 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -6,10 +6,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
@@ -147,7 +147,7 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = new_plane_state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
@@ -156,8 +156,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
if (!fb || !new_plane_state->crtc)
return 0;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- WARN_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ WARN_ON(!dma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
@@ -218,26 +218,26 @@ static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
format->char_per_block[0] * (state->src.x1 >> 16);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
- p1_ba = cma_obj->paddr + fb->offsets[0] +
+ p1_ba = dma_obj->dma_addr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
2 * format->char_per_block[0] * (state->src.x1 >> 17);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
- p2_ba = cma_obj->paddr + fb->offsets[1] +
+ p2_ba = dma_obj->dma_addr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
(state->src.x1 >> 17)) << 1);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index a57812ec36b1..8dd8b0f912af 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -16,13 +16,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -34,7 +32,7 @@
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
-DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops);
void imx_drm_connector_destroy(struct drm_connector *connector)
{
@@ -154,7 +152,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
args->width = ALIGN(width, 8);
- ret = drm_gem_cma_dumb_create(file_priv, drm, args);
+ ret = drm_gem_dma_dumb_create(file_priv, drm, args);
if (ret)
return ret;
@@ -164,7 +162,7 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
static const struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create),
.ioctls = imx_drm_ioctls,
.num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
.fops = &imx_drm_driver_fops,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index c3e1a3f14d30..e721bebda2bd 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -32,7 +32,7 @@ extern struct platform_driver ipu_drm_driver;
void imx_drm_mode_config_init(struct drm_device *drm);
-struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
+struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index f7863d6dea80..5f26090b0c98 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -18,8 +18,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index ea5f594955df..dba4f7d81d69 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -8,13 +8,12 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <video/imx-ipu-v3.h>
@@ -126,14 +125,14 @@ static inline unsigned long
drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, plane);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
+ BUG_ON(!dma_obj);
- return cma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
+ return dma_obj->dma_addr + fb->offsets[plane] + fb->pitches[plane] * y +
fb->format->cpp[plane] * x;
}
@@ -141,18 +140,18 @@ static inline unsigned long
drm_plane_state_to_ubo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 1);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
+ return dma_obj->dma_addr + fb->offsets[1] + fb->pitches[1] * y +
fb->format->cpp[1] * x - eba;
}
@@ -160,18 +159,18 @@ static inline unsigned long
drm_plane_state_to_vbo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
unsigned long eba = drm_plane_state_to_eba(state, 0);
int x = state->src.x1 >> 16;
int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
- BUG_ON(!cma_obj);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 2);
+ BUG_ON(!dma_obj);
x /= fb->format->hsub;
y /= fb->format->vsub;
- return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
+ return dma_obj->dma_addr + fb->offsets[2] + fb->pitches[2] * y +
fb->format->cpp[2] * x - eba;
}
@@ -393,8 +392,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 090830bcbde7..a53f475d33df 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,7 +8,7 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Ingenic SoCs.
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index eb8208bfe5ab..ab0515d2c420 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -30,8 +30,8 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
@@ -41,7 +41,6 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -482,8 +481,8 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(priv_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
priv->soc_info->has_osd,
true);
if (ret)
@@ -670,12 +669,12 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
if (newstate && newstate->fb) {
if (priv->soc_info->map_noncoherent)
- drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate);
+ drm_fb_dma_sync_non_coherent(&priv->drm, oldstate, newstate);
crtc_state = newstate->crtc->state;
plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0);
- addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
+ addr = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
width = newstate->src_w >> 16;
height = newstate->src_h >> 16;
cpp = newstate->fb->format->cpp[0];
@@ -915,7 +914,7 @@ static struct drm_gem_object *
ingenic_drm_gem_create_object(struct drm_device *drm, size_t size)
{
struct ingenic_drm *priv = drm_device_get_priv(drm);
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
@@ -948,7 +947,7 @@ static void ingenic_drm_destroy_state(struct drm_private_obj *obj,
kfree(priv_state);
}
-DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ingenic_drm_fops);
static const struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -961,7 +960,7 @@ static const struct drm_driver ingenic_drm_driver_data = {
.fops = &ingenic_drm_fops,
.gem_create_object = ingenic_drm_gem_create_object,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -1464,21 +1463,22 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused ingenic_drm_suspend(struct device *dev)
+static int ingenic_drm_suspend(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(&priv->drm);
}
-static int __maybe_unused ingenic_drm_resume(struct device *dev)
+static int ingenic_drm_resume(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(&priv->drm);
}
-static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops,
+ ingenic_drm_suspend, ingenic_drm_resume);
static const u32 jz4740_formats[] = {
DRM_FORMAT_XRGB1555,
@@ -1541,6 +1541,32 @@ static const struct jz_soc_info jz4725b_soc_info = {
.num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
};
+static const struct jz_soc_info jz4760_soc_info = {
+ .needs_dev_clk = false,
+ .has_osd = true,
+ .map_noncoherent = false,
+ .max_width = 1280,
+ .max_height = 720,
+ .max_burst = JZ_LCD_CTRL_BURST_32,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
+static const struct jz_soc_info jz4760b_soc_info = {
+ .needs_dev_clk = false,
+ .has_osd = true,
+ .map_noncoherent = false,
+ .max_width = 1280,
+ .max_height = 720,
+ .max_burst = JZ_LCD_CTRL_BURST_64,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
+};
+
static const struct jz_soc_info jz4770_soc_info = {
.needs_dev_clk = false,
.has_osd = true,
@@ -1572,6 +1598,8 @@ static const struct jz_soc_info jz4780_soc_info = {
static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4740-lcd", .data = &jz4740_soc_info },
{ .compatible = "ingenic,jz4725b-lcd", .data = &jz4725b_soc_info },
+ { .compatible = "ingenic,jz4760-lcd", .data = &jz4760_soc_info },
+ { .compatible = "ingenic,jz4760b-lcd", .data = &jz4760b_soc_info },
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ .compatible = "ingenic,jz4780-lcd", .data = &jz4780_soc_info },
{ /* sentinel */ },
@@ -1581,7 +1609,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
.name = "ingenic-drm",
- .pm = pm_ptr(&ingenic_drm_pm_ops),
+ .pm = pm_sleep_ptr(&ingenic_drm_pm_ops),
.of_match_table = of_match_ptr(ingenic_drm_of_match),
},
.probe = ingenic_drm_probe,
@@ -1616,4 +1644,4 @@ module_exit(ingenic_drm_exit);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("DRM driver for the Ingenic SoCs\n");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 32a50935aa6d..7a43505011a5 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -22,14 +22,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_property.h>
#include <drm/drm_vblank.h>
@@ -363,15 +362,15 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
}
if (ingenic_drm_map_noncoherent(ipu->master))
- drm_fb_cma_sync_non_coherent(ipu->drm, oldstate, newstate);
+ drm_fb_dma_sync_non_coherent(ipu->drm, oldstate, newstate);
/* New addresses will be committed in vblank handler... */
- ipu->addr_y = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
+ ipu->addr_y = drm_fb_dma_get_gem_addr(newstate->fb, newstate, 0);
if (finfo->num_planes > 1)
- ipu->addr_u = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ ipu->addr_u = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
1);
if (finfo->num_planes > 2)
- ipu->addr_v = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ ipu->addr_v = drm_fb_dma_get_gem_addr(newstate->fb, newstate,
2);
if (!needs_modeset)
@@ -697,10 +696,12 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
struct drm_crtc_state *crtc_state;
+ bool mode_changed;
if (property != ipu->sharpness_prop)
return -EINVAL;
+ mode_changed = val != ipu->sharpness;
ipu->sharpness = val;
if (state->crtc) {
@@ -708,7 +709,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- crtc_state->mode_changed = true;
+ crtc_state->mode_changed |= mode_changed;
}
return 0;
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index 5fdd43dad507..fd011367db1d 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,7 +3,7 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
Choose this option if you have Intel's KeemBay SOC which integrates
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 76fef0880504..2382ccb3ee99 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -16,7 +16,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -433,14 +433,14 @@ static void kmb_irq_uninstall(struct drm_device *drm)
free_irq(kmb->irq_lcd, drm);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
/* GEM Operations */
.fops = &fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 89d055a089a6..a42f63f6f957 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -8,13 +8,12 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include "kmb_drv.h"
#include "kmb_plane.h"
@@ -136,8 +135,8 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
}
@@ -404,7 +403,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
+ addr[Y_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id),
addr[Y_PLANE] + fb->offsets[0]);
val = get_pixel_format(fb->format->format);
@@ -416,7 +415,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state,
+ addr[U_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state,
U_PLANE);
/* check if Cb/Cr is swapped*/
if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER))
@@ -437,7 +436,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id),
((width) * fb->format->cpp[0]));
- addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb,
+ addr[V_PLANE] = drm_fb_dma_get_gem_addr(fb,
new_plane_state,
V_PLANE);
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
index 300b2be07385..fa7a88368809 100644
--- a/drivers/gpu/drm/logicvc/Kconfig
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -3,7 +3,7 @@ config DRM_LOGICVC
depends on DRM
depends on OF || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_KMS_DMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM display driver for the logiCVC programmable logic block from Xylon
diff --git a/drivers/gpu/drm/logicvc/logicvc_crtc.c b/drivers/gpu/drm/logicvc/logicvc_crtc.c
index c94bb9bb456b..43a675d03808 100644
--- a/drivers/gpu/drm/logicvc/logicvc_crtc.c
+++ b/drivers/gpu/drm/logicvc/logicvc_crtc.c
@@ -12,7 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 65a050176c33..cc9a4e965f77 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -18,7 +18,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
@@ -29,9 +29,9 @@
#include "logicvc_of.h"
#include "logicvc_regs.h"
-DEFINE_DRM_GEM_CMA_FOPS(logicvc_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(logicvc_drm_fops);
-static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
+static int logicvc_drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_device *drm_dev,
struct drm_mode_create_dumb *args)
{
@@ -40,7 +40,7 @@ static int logicvc_drm_gem_cma_dumb_create(struct drm_file *file_priv,
/* Stride is always fixed to its configuration value. */
args->pitch = logicvc->config.row_stride * DIV_ROUND_UP(args->bpp, 8);
- return drm_gem_cma_dumb_create_internal(file_priv, drm_dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm_dev, args);
}
static struct drm_driver logicvc_drm_driver = {
@@ -54,7 +54,7 @@ static struct drm_driver logicvc_drm_driver = {
.major = 1,
.minor = 0,
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(logicvc_drm_gem_dma_dumb_create),
};
static struct regmap_config logicvc_drm_regmap_config = {
diff --git a/drivers/gpu/drm/logicvc/logicvc_interface.c b/drivers/gpu/drm/logicvc/logicvc_interface.c
index c73592f6c406..815cebb4c4ca 100644
--- a/drivers/gpu/drm/logicvc/logicvc_interface.c
+++ b/drivers/gpu/drm/logicvc/logicvc_interface.c
@@ -12,7 +12,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c
index 441e3cfce4cf..464000aea765 100644
--- a/drivers/gpu/drm/logicvc/logicvc_layer.c
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.c
@@ -10,11 +10,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include "logicvc_crtc.h"
@@ -117,8 +116,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
}
}
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
can_position = (drm_plane->type == DRM_PLANE_TYPE_OVERLAY &&
layer->index != (logicvc->config.layers_count - 1) &&
@@ -158,7 +157,7 @@ static void logicvc_plane_atomic_update(struct drm_plane *drm_plane,
new_state->crtc_h - 1);
if (logicvc->caps->layer_address) {
- phys_addr_t fb_addr = drm_fb_cma_get_gem_addr(fb, new_state, 0);
+ phys_addr_t fb_addr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
regmap_write(logicvc->regmap, LOGICVC_LAYER_ADDRESS_REG(index),
fb_addr);
@@ -281,7 +280,7 @@ int logicvc_layer_buffer_find_setup(struct logicvc_drm *logicvc,
return -ENOMEM;
}
- fb_addr = drm_fb_cma_get_gem_addr(fb, state, 0);
+ fb_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
if (fb_addr < logicvc->reserved_mem_base) {
drm_err(drm_dev,
"Framebuffer memory below reserved memory base!\n");
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c
index 11940704f644..d8207ffda1af 100644
--- a/drivers/gpu/drm/logicvc/logicvc_mode.c
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.c
@@ -10,9 +10,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index d0bf1bc8da3f..4f3d68e11bc1 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -10,7 +10,7 @@ config DRM_MCDE
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the ST-Ericsson MCDE
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 4df477540d07..52043a12a2e8 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -11,11 +11,11 @@
#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
@@ -165,7 +165,7 @@ static int mcde_display_check(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
@@ -1424,7 +1424,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
* from the DRM core before the display is enabled.
*/
if (fb) {
- mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
+ mcde_set_extsrc(mcde, drm_fb_dma_get_gem_addr(fb, pstate, 0));
dev_info_once(mcde->dev, "first update of display contents\n");
/*
* Usually the flow is already active, unless we are in
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index e601baa87e55..1c4482ad507d 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -37,7 +37,7 @@
* (effectively using channels 0..3) for concurrent use.
*
* In the current DRM/KMS setup, we use one external source, one overlay,
- * one FIFO and one formatter which we connect to the simple CMA framebuffer
+ * one FIFO and one formatter which we connect to the simple DMA framebuffer
* helpers. We then provide a bridge to the DSI port, and on the DSI port
* bridge we connect hang a panel bridge or other bridge. This may be subject
* to change as we exploit more of the hardware capabilities.
@@ -68,10 +68,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
@@ -198,7 +198,7 @@ static int mcde_modeset_init(struct drm_device *drm)
return 0;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver mcde_drm_driver = {
.driver_features =
@@ -212,7 +212,7 @@ static const struct drm_driver mcde_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 2976d21e9a34..369e495d0c3e 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -7,7 +7,7 @@ config DRM_MEDIATEK
depends on HAVE_ARM_SMCCC
depends on OF
depends on MTK_MMSYS
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
@@ -21,6 +21,15 @@ config DRM_MEDIATEK
This driver provides kernel mode setting and
buffer management to userspace.
+config DRM_MEDIATEK_DP
+ tristate "DRM DPTX Support for MediaTek SoCs"
+ depends on DRM_MEDIATEK
+ select PHY_MTK_DP
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ help
+ DRM/KMS Display Port driver for MediaTek SoCs.
+
config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 6e604a933ed0..3517d1c65cd7 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -23,3 +23,5 @@ mediatek-drm-hdmi-objs := mtk_cec.o \
mtk_hdmi_ddc.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
+
+obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
new file mode 100644
index 000000000000..9d085c05c49c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -0,0 +1,2663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019-2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre
+ */
+
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+#include <sound/hdmi-codec.h>
+#include <video/videomode.h>
+
+#include "mtk_dp_reg.h"
+
+#define MTK_DP_SIP_CONTROL_AARCH32 MTK_SIP_SMC_CMD(0x523)
+#define MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE (BIT(0) | BIT(5))
+#define MTK_DP_SIP_ATF_VIDEO_UNMUTE BIT(5)
+
+#define MTK_DP_THREAD_CABLE_STATE_CHG BIT(0)
+#define MTK_DP_THREAD_HPD_EVENT BIT(1)
+
+#define MTK_DP_4P1T 4
+#define MTK_DP_HDE 2
+#define MTK_DP_PIX_PER_ADDR 2
+#define MTK_DP_AUX_WAIT_REPLY_COUNT 20
+#define MTK_DP_TBC_BUF_READ_START_ADDR 0x8
+#define MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY 5
+#define MTK_DP_TRAIN_DOWNSCALE_RETRY 10
+#define MTK_DP_VERSION 0x11
+#define MTK_DP_SDP_AUI 0x4
+
+enum {
+ MTK_DP_CAL_GLB_BIAS_TRIM = 0,
+ MTK_DP_CAL_CLKTX_IMPSE,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2,
+ MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2,
+ MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3,
+ MTK_DP_CAL_MAX,
+};
+
+struct mtk_dp_train_info {
+ bool sink_ssc;
+ bool cable_plugged_in;
+ /* link_rate is in multiple of 0.27Gbps */
+ int link_rate;
+ int lane_count;
+ unsigned int channel_eq_pattern;
+};
+
+struct mtk_dp_audio_cfg {
+ bool detect_monitor;
+ int sad_count;
+ int sample_rate;
+ int word_length_bits;
+ int channels;
+};
+
+struct mtk_dp_info {
+ enum dp_pixelformat format;
+ struct videomode vm;
+ struct mtk_dp_audio_cfg audio_cur_cfg;
+};
+
+struct mtk_dp_efuse_fmt {
+ unsigned short idx;
+ unsigned short shift;
+ unsigned short mask;
+ unsigned short min_val;
+ unsigned short max_val;
+ unsigned short default_val;
+};
+
+struct mtk_dp {
+ bool enabled;
+ bool need_debounce;
+ u8 max_lanes;
+ u8 max_linkrate;
+ u8 rx_cap[DP_RECEIVER_CAP_SIZE];
+ u32 cal_data[MTK_DP_CAL_MAX];
+ u32 irq_thread_handle;
+ /* irq_thread_lock is used to protect irq_thread_handle */
+ spinlock_t irq_thread_lock;
+
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *conn;
+ struct drm_device *drm_dev;
+ struct drm_dp_aux aux;
+
+ const struct mtk_dp_data *data;
+ struct mtk_dp_info info;
+ struct mtk_dp_train_info train_info;
+
+ struct platform_device *phy_dev;
+ struct phy *phy;
+ struct regmap *regs;
+ struct timer_list debounce_timer;
+
+ /* For audio */
+ bool audio_enable;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct platform_device *audio_pdev;
+
+ struct device *codec_dev;
+ /* protect the plugged_cb as it's used in both bridge ops and audio */
+ struct mutex update_plugged_status_lock;
+};
+
+struct mtk_dp_data {
+ int bridge_type;
+ unsigned int smc_cmd;
+ const struct mtk_dp_efuse_fmt *efuse_fmt;
+ bool audio_supported;
+};
+
+static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 3,
+ .shift = 27,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 9,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 2,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 2,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 2,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 2,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 2,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 2,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 2,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 2,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
+static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
+ [MTK_DP_CAL_GLB_BIAS_TRIM] = {
+ .idx = 0,
+ .shift = 27,
+ .mask = 0x1f,
+ .min_val = 1,
+ .max_val = 0x1e,
+ .default_val = 0xf,
+ },
+ [MTK_DP_CAL_CLKTX_IMPSE] = {
+ .idx = 0,
+ .shift = 13,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = {
+ .idx = 1,
+ .shift = 28,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = {
+ .idx = 1,
+ .shift = 20,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = {
+ .idx = 1,
+ .shift = 12,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = {
+ .idx = 1,
+ .shift = 4,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = {
+ .idx = 1,
+ .shift = 24,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = {
+ .idx = 1,
+ .shift = 16,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = {
+ .idx = 1,
+ .shift = 8,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+ [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = {
+ .idx = 1,
+ .shift = 0,
+ .mask = 0xf,
+ .min_val = 1,
+ .max_val = 0xe,
+ .default_val = 0x8,
+ },
+};
+
+static struct regmap_config mtk_dp_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SEC_OFFSET + 0x90,
+ .name = "mtk-dp-registers",
+};
+
+static struct mtk_dp *mtk_dp_from_bridge(struct drm_bridge *b)
+{
+ return container_of(b, struct mtk_dp, bridge);
+}
+
+static u32 mtk_dp_read(struct mtk_dp *mtk_dp, u32 offset)
+{
+ u32 read_val;
+ int ret;
+
+ ret = regmap_read(mtk_dp->regs, offset, &read_val);
+ if (ret) {
+ dev_err(mtk_dp->dev, "Failed to read register 0x%x: %d\n",
+ offset, ret);
+ return 0;
+ }
+
+ return read_val;
+}
+
+static int mtk_dp_write(struct mtk_dp *mtk_dp, u32 offset, u32 val)
+{
+ int ret = regmap_write(mtk_dp->regs, offset, val);
+
+ if (ret)
+ dev_err(mtk_dp->dev,
+ "Failed to write register 0x%x with value 0x%x\n",
+ offset, val);
+ return ret;
+}
+
+static int mtk_dp_update_bits(struct mtk_dp *mtk_dp, u32 offset,
+ u32 val, u32 mask)
+{
+ int ret = regmap_update_bits(mtk_dp->regs, offset, mask, val);
+
+ if (ret)
+ dev_err(mtk_dp->dev,
+ "Failed to update register 0x%x with value 0x%x, mask 0x%x\n",
+ offset, val, mask);
+ return ret;
+}
+
+static void mtk_dp_bulk_16bit_write(struct mtk_dp *mtk_dp, u32 offset, u8 *buf,
+ size_t length)
+{
+ int i;
+
+ /* 2 bytes per register */
+ for (i = 0; i < length; i += 2) {
+ u32 val = buf[i] | (i + 1 < length ? buf[i + 1] << 8 : 0);
+
+ if (mtk_dp_write(mtk_dp, offset + i * 2, val))
+ return;
+ }
+}
+
+static void mtk_dp_msa_bypass_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ u32 mask = HTOTAL_SEL_DP_ENC0_P0 | VTOTAL_SEL_DP_ENC0_P0 |
+ HSTART_SEL_DP_ENC0_P0 | VSTART_SEL_DP_ENC0_P0 |
+ HWIDTH_SEL_DP_ENC0_P0 | VHEIGHT_SEL_DP_ENC0_P0 |
+ HSP_SEL_DP_ENC0_P0 | HSW_SEL_DP_ENC0_P0 |
+ VSP_SEL_DP_ENC0_P0 | VSW_SEL_DP_ENC0_P0;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030, enable ? 0 : mask, mask);
+}
+
+static void mtk_dp_set_msa(struct mtk_dp *mtk_dp)
+{
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ /* horizontal */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3010,
+ mode.htotal, HTOTAL_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3018,
+ vm->hsync_len + vm->hback_porch,
+ HSTART_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
+ vm->hsync_len, HSW_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3028,
+ 0, HSP_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3020,
+ vm->hactive, HWIDTH_SW_DP_ENC0_P0_MASK);
+
+ /* vertical */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3014,
+ mode.vtotal, VTOTAL_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_301C,
+ vm->vsync_len + vm->vback_porch,
+ VSTART_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
+ vm->vsync_len, VSW_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_302C,
+ 0, VSP_SW_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3024,
+ vm->vactive, VHEIGHT_SW_DP_ENC0_P0_MASK);
+
+ /* horizontal */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3064,
+ vm->hactive, HDE_NUM_LAST_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3154,
+ mode.htotal, PGEN_HTOTAL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3158,
+ vm->hfront_porch,
+ PGEN_HSYNC_RISING_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_315C,
+ vm->hsync_len,
+ PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3160,
+ vm->hback_porch + vm->hsync_len,
+ PGEN_HFDE_START_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3164,
+ vm->hactive,
+ PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
+
+ /* vertical */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3168,
+ mode.vtotal,
+ PGEN_VTOTAL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_316C,
+ vm->vfront_porch,
+ PGEN_VSYNC_RISING_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3170,
+ vm->vsync_len,
+ PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3174,
+ vm->vback_porch + vm->vsync_len,
+ PGEN_VFDE_START_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3178,
+ vm->vactive,
+ PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK);
+}
+
+static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp,
+ enum dp_pixelformat color_format)
+{
+ u32 val;
+
+ /* update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ color_format << DP_TEST_COLOR_FORMAT_SHIFT,
+ DP_TEST_COLOR_FORMAT_MASK);
+
+ switch (color_format) {
+ case DP_PIXELFORMAT_YUV422:
+ val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422;
+ break;
+ case DP_PIXELFORMAT_RGB:
+ val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB;
+ break;
+ default:
+ drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n",
+ color_format);
+ return -EINVAL;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK);
+ return 0;
+}
+
+static void mtk_dp_set_color_depth(struct mtk_dp *mtk_dp)
+{
+ /* Only support 8 bits currently */
+ /* Update MISC0 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034,
+ DP_MSA_MISC_8_BPC, DP_TEST_BIT_DEPTH_MASK);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT,
+ VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_config_mn_mode(struct mtk_dp *mtk_dp)
+{
+ /* 0: hw mode, 1: sw mode */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_set_sram_read_start(struct mtk_dp *mtk_dp, u32 val)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ val, SRAM_START_READ_THRD_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_setup_encoder(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C,
+ VIDEO_MN_GEN_EN_DP_ENC0_P0,
+ VIDEO_MN_GEN_EN_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
+ SDP_DOWN_CNT_DP_ENC0_P0_VAL,
+ SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
+ SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL,
+ SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3300,
+ VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL << 8,
+ VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364,
+ FIFO_READ_START_POINT_DP_ENC1_P0_VAL << 12,
+ FIFO_READ_START_POINT_DP_ENC1_P0_MASK);
+ mtk_dp_write(mtk_dp, MTK_DP_ENC1_P0_3368, DP_ENC1_P0_3368_VAL);
+}
+
+static void mtk_dp_pg_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3038,
+ enable ? VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK : 0,
+ VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31B0,
+ PGEN_PATTERN_SEL_VAL << 4, PGEN_PATTERN_SEL_MASK);
+}
+
+static void mtk_dp_audio_setup_channels(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ u32 channel_enable_bits;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3324,
+ AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX,
+ AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK);
+
+ /* audio channel count change reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
+ DP_ENC_DUMMY_RW_1, DP_ENC_DUMMY_RW_1);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3304,
+ AU_PRTY_REGEN_DP_ENC1_P0_MASK |
+ AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
+ AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK,
+ AU_PRTY_REGEN_DP_ENC1_P0_MASK |
+ AU_CH_STS_REGEN_DP_ENC1_P0_MASK |
+ AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK);
+
+ switch (cfg->channels) {
+ case 2:
+ channel_enable_bits = AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_2CH_EN_DP_ENC0_P0_MASK;
+ break;
+ case 8:
+ default:
+ channel_enable_bits = AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_8CH_EN_DP_ENC0_P0_MASK;
+ break;
+ }
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
+ channel_enable_bits | AU_EN_DP_ENC0_P0,
+ AUDIO_2CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_2CH_EN_DP_ENC0_P0_MASK |
+ AUDIO_8CH_SEL_DP_ENC0_P0_MASK |
+ AUDIO_8CH_EN_DP_ENC0_P0_MASK |
+ AU_EN_DP_ENC0_P0);
+
+ /* audio channel count change reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4, 0, DP_ENC_DUMMY_RW_1);
+
+ /* enable audio reset */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_33F4,
+ DP_ENC_DUMMY_RW_1_AUDIO_RST_EN,
+ DP_ENC_DUMMY_RW_1_AUDIO_RST_EN);
+}
+
+static void mtk_dp_audio_channel_status_set(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ struct snd_aes_iec958 iec = { 0 };
+
+ switch (cfg->sample_rate) {
+ case 32000:
+ iec.status[3] = IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ iec.status[3] = IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ iec.status[3] = IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ iec.status[3] = IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ iec.status[3] = IEC958_AES3_CON_FS_96000;
+ break;
+ case 192000:
+ iec.status[3] = IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ iec.status[3] = IEC958_AES3_CON_FS_NOTID;
+ break;
+ }
+
+ switch (cfg->word_length_bits) {
+ case 16:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16;
+ break;
+ case 20:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_20_16 |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ case 24:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_24_20 |
+ IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ default:
+ iec.status[4] = IEC958_AES4_CON_WORDLEN_NOTID;
+ }
+
+ /* IEC 60958 consumer channel status bits */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_308C,
+ 0, CH_STATUS_0_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3090,
+ iec.status[3] << 8, CH_STATUS_1_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3094,
+ iec.status[4], CH_STATUS_2_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_audio_sdp_asp_set_channels(struct mtk_dp *mtk_dp,
+ int channels)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_312C,
+ (min(8, channels) - 1) << 8,
+ ASP_HB2_DP_ENC0_P0_MASK | ASP_HB3_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_audio_set_divider(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30BC,
+ AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
+ AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_sdp_trigger_aui(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
+ MTK_DP_SDP_AUI, SDP_PACKET_TYPE_DP_ENC1_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3280,
+ SDP_PACKET_W_DP_ENC1_P0, SDP_PACKET_W_DP_ENC1_P0);
+}
+
+static void mtk_dp_sdp_set_data(struct mtk_dp *mtk_dp, u8 *data_bytes)
+{
+ mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_ENC1_P0_3200,
+ data_bytes, 0x10);
+}
+
+static void mtk_dp_sdp_set_header_aui(struct mtk_dp *mtk_dp,
+ struct dp_sdp_header *header)
+{
+ u32 db_addr = MTK_DP_ENC0_P0_30D8 + (MTK_DP_SDP_AUI - 1) * 8;
+
+ mtk_dp_bulk_16bit_write(mtk_dp, db_addr, (u8 *)header, 4);
+}
+
+static void mtk_dp_disable_sdp_aui(struct mtk_dp *mtk_dp)
+{
+ /* Disable periodic send */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc, 0,
+ 0xff << ((MTK_DP_ENC0_P0_30A8 & 3) * 8));
+}
+
+static void mtk_dp_setup_sdp_aui(struct mtk_dp *mtk_dp,
+ struct dp_sdp *sdp)
+{
+ u32 shift;
+
+ mtk_dp_sdp_set_data(mtk_dp, sdp->db);
+ mtk_dp_sdp_set_header_aui(mtk_dp, &sdp->sdp_header);
+ mtk_dp_disable_sdp_aui(mtk_dp);
+
+ shift = (MTK_DP_ENC0_P0_30A8 & 3) * 8;
+
+ mtk_dp_sdp_trigger_aui(mtk_dp);
+ /* Enable periodic sending */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A8 & 0xfffc,
+ 0x05 << shift, 0xff << shift);
+}
+
+static void mtk_dp_aux_irq_clear(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_write(mtk_dp, MTK_DP_AUX_P0_3640, DP_AUX_P0_3640_VAL);
+}
+
+static void mtk_dp_aux_set_cmd(struct mtk_dp *mtk_dp, u8 cmd, u32 addr)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3644,
+ cmd, MCU_REQUEST_COMMAND_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3648,
+ addr, MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_364C,
+ addr >> 16, MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK);
+}
+
+static void mtk_dp_aux_clear_fifo(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
+ MCU_ACK_TRAN_COMPLETE_AUX_TX_P0,
+ MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 |
+ PHY_FIFO_RST_AUX_TX_P0_MASK |
+ MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
+}
+
+static void mtk_dp_aux_request_ready(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3630,
+ AUX_TX_REQUEST_READY_AUX_TX_P0,
+ AUX_TX_REQUEST_READY_AUX_TX_P0);
+}
+
+static void mtk_dp_aux_fill_write_fifo(struct mtk_dp *mtk_dp, u8 *buf,
+ size_t length)
+{
+ mtk_dp_bulk_16bit_write(mtk_dp, MTK_DP_AUX_P0_3708, buf, length);
+}
+
+static void mtk_dp_aux_read_rx_fifo(struct mtk_dp *mtk_dp, u8 *buf,
+ size_t length, int read_delay)
+{
+ int read_pos;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
+ 0, AUX_RD_MODE_AUX_TX_P0_MASK);
+
+ for (read_pos = 0; read_pos < length; read_pos++) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3620,
+ AUX_RX_FIFO_READ_PULSE_TX_P0,
+ AUX_RX_FIFO_READ_PULSE_TX_P0);
+
+ /* Hardware needs time to update the data */
+ usleep_range(read_delay, read_delay * 2);
+ buf[read_pos] = (u8)(mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3620) &
+ AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK);
+ }
+}
+
+static void mtk_dp_aux_set_length(struct mtk_dp *mtk_dp, size_t length)
+{
+ if (length > 0) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3650,
+ (length - 1) << 12,
+ MCU_REQ_DATA_NUM_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ 0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ } else {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ AUX_NO_LENGTH_AUX_TX_P0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ }
+}
+
+static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
+{
+ int wait_reply = MTK_DP_AUX_WAIT_REPLY_COUNT;
+
+ while (--wait_reply) {
+ u32 aux_irq_status;
+
+ if (is_read) {
+ u32 fifo_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3618);
+
+ if (fifo_status &
+ (AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK |
+ AUX_RX_FIFO_FULL_AUX_TX_P0_MASK)) {
+ return 0;
+ }
+ }
+
+ aux_irq_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3640);
+ if (aux_irq_status & AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0)
+ return 0;
+
+ if (aux_irq_status & AUX_400US_TIMEOUT_IRQ_AUX_TX_P0)
+ return -ETIMEDOUT;
+
+ /* Give the hardware a chance to reach completion before retrying */
+ usleep_range(100, 500);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ u32 addr, u8 *buf, size_t length)
+{
+ int ret;
+ u32 reply_cmd;
+
+ if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
+ (cmd == DP_AUX_NATIVE_READ && !length)))
+ return -EINVAL;
+
+ if (!is_read)
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
+ AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0,
+ AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0);
+
+ /* We need to clear fifo and irq before sending commands to the sink device. */
+ mtk_dp_aux_clear_fifo(mtk_dp);
+ mtk_dp_aux_irq_clear(mtk_dp);
+
+ mtk_dp_aux_set_cmd(mtk_dp, cmd, addr);
+ mtk_dp_aux_set_length(mtk_dp, length);
+
+ if (!is_read) {
+ if (length)
+ mtk_dp_aux_fill_write_fifo(mtk_dp, buf, length);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3704,
+ AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK,
+ AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK);
+ }
+
+ mtk_dp_aux_request_ready(mtk_dp);
+
+ /* Wait for feedback from sink device. */
+ ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);
+
+ reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
+ AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
+
+ if (ret || reply_cmd) {
+ u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+ drm_err(mtk_dp->drm_dev,
+ "AUX Rx Aux hang, need SW reset\n");
+ return -EIO;
+ }
+
+ return -ETIMEDOUT;
+ }
+
+ if (!length) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_362C,
+ 0,
+ AUX_NO_LENGTH_AUX_TX_P0 |
+ AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK |
+ AUX_RESERVED_RW_0_AUX_TX_P0_MASK);
+ } else if (is_read) {
+ int read_delay;
+
+ if (cmd == (DP_AUX_I2C_READ | DP_AUX_I2C_MOT) ||
+ cmd == DP_AUX_I2C_READ)
+ read_delay = 500;
+ else
+ read_delay = 100;
+
+ mtk_dp_aux_read_rx_fifo(mtk_dp, buf, length, read_delay);
+ }
+
+ return 0;
+}
+
+static void mtk_dp_set_swing_pre_emphasis(struct mtk_dp *mtk_dp, int lane_num,
+ int swing_val, int preemphasis)
+{
+ u32 lane_shift = lane_num * DP_TX1_VOLT_SWING_SHIFT;
+
+ dev_dbg(mtk_dp->dev,
+ "link training: swing_val = 0x%x, pre-emphasis = 0x%x\n",
+ swing_val, preemphasis);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ swing_val << (DP_TX0_VOLT_SWING_SHIFT + lane_shift),
+ DP_TX0_VOLT_SWING_MASK << lane_shift);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ preemphasis << (DP_TX0_PRE_EMPH_SHIFT + lane_shift),
+ DP_TX0_PRE_EMPH_MASK << lane_shift);
+}
+
+static void mtk_dp_reset_swing_pre_emphasis(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_SWING_EMP,
+ 0,
+ DP_TX0_VOLT_SWING_MASK |
+ DP_TX1_VOLT_SWING_MASK |
+ DP_TX2_VOLT_SWING_MASK |
+ DP_TX3_VOLT_SWING_MASK |
+ DP_TX0_PRE_EMPH_MASK |
+ DP_TX1_PRE_EMPH_MASK |
+ DP_TX2_PRE_EMPH_MASK |
+ DP_TX3_PRE_EMPH_MASK);
+}
+
+static u32 mtk_dp_swirq_get_clear(struct mtk_dp *mtk_dp)
+{
+ u32 irq_status = mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_35D0) &
+ SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK;
+
+ if (irq_status) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
+ irq_status, SW_IRQ_CLR_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35C8,
+ 0, SW_IRQ_CLR_DP_TRANS_P0_MASK);
+ }
+
+ return irq_status;
+}
+
+static u32 mtk_dp_hwirq_get_clear(struct mtk_dp *mtk_dp)
+{
+ u32 irq_status = (mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3418) &
+ IRQ_STATUS_DP_TRANS_P0_MASK) >> 12;
+
+ if (irq_status) {
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ irq_status, IRQ_CLR_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ 0, IRQ_CLR_DP_TRANS_P0_MASK);
+ }
+
+ return irq_status;
+}
+
+static void mtk_dp_hwirq_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3418,
+ enable ? 0 :
+ IRQ_MASK_DP_TRANS_P0_DISC_IRQ |
+ IRQ_MASK_DP_TRANS_P0_CONN_IRQ |
+ IRQ_MASK_DP_TRANS_P0_INT_IRQ,
+ IRQ_MASK_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_initialize_settings(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_342C,
+ XTAL_FREQ_DP_TRANS_P0_DEFAULT,
+ XTAL_FREQ_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3540,
+ FEC_CLOCK_EN_MODE_DP_TRANS_P0,
+ FEC_CLOCK_EN_MODE_DP_TRANS_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_31EC,
+ AUDIO_CH_SRC_SEL_DP_ENC0_P0,
+ AUDIO_CH_SRC_SEL_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
+ 0, SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_IRQ_MASK,
+ IRQ_MASK_AUX_TOP_IRQ, IRQ_MASK_AUX_TOP_IRQ);
+}
+
+static void mtk_dp_initialize_hpd_detect_settings(struct mtk_dp *mtk_dp)
+{
+ u32 val;
+ /* Debounce threshold */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ 8, HPD_DEB_THD_DP_TRANS_P0_MASK);
+
+ val = (HPD_INT_THD_DP_TRANS_P0_LOWER_500US |
+ HPD_INT_THD_DP_TRANS_P0_UPPER_1100US) << 4;
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ val, HPD_INT_THD_DP_TRANS_P0_MASK);
+
+ /*
+ * Connect threshold 1.5ms + 5 x 0.1ms = 2ms
+ * Disconnect threshold 1.5ms + 5 x 0.1ms = 2ms
+ */
+ val = (5 << 8) | (5 << 12);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3410,
+ val,
+ HPD_DISC_THD_DP_TRANS_P0_MASK |
+ HPD_CONN_THD_DP_TRANS_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3430,
+ HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT,
+ HPD_INT_THD_ECO_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_initialize_aux_settings(struct mtk_dp *mtk_dp)
+{
+ /* modify timeout threshold = 0x1595 */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_360C,
+ AUX_TIMEOUT_THR_AUX_TX_P0_VAL,
+ AUX_TIMEOUT_THR_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3658,
+ 0, AUX_TX_OV_EN_AUX_TX_P0_MASK);
+ /* 25 for 26M */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3634,
+ AUX_TX_OVER_SAMPLE_RATE_FOR_26M << 8,
+ AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK);
+ /* 13 for 26M */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3614,
+ AUX_RX_UI_CNT_THR_AUX_FOR_26M,
+ AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_37C8,
+ MTK_ATOP_EN_AUX_TX_P0,
+ MTK_ATOP_EN_AUX_TX_P0);
+}
+
+static void mtk_dp_initialize_digital_settings(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_304C,
+ 0, VBID_VIDEO_MUTE_DP_ENC0_P0_MASK);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3368,
+ BS2BS_MODE_DP_ENC1_P0_VAL << 12,
+ BS2BS_MODE_DP_ENC1_P0_MASK);
+
+ /* dp tx encoder reset all sw */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0,
+ DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
+
+ /* Wait for sw reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3004,
+ 0, DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0);
+}
+
+static void mtk_dp_digital_sw_reset(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
+ DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0,
+ DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
+
+ /* Wait for sw reset to complete */
+ usleep_range(1000, 5000);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_340C,
+ 0, DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0);
+}
+
+static void mtk_dp_set_lanes(struct mtk_dp *mtk_dp, int lanes)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_35F0,
+ lanes == 0 ? 0 : DP_TRANS_DUMMY_RW_0,
+ DP_TRANS_DUMMY_RW_0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ lanes, LANE_NUM_DP_ENC0_P0_MASK);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_34A4,
+ lanes << 2, LANE_NUM_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_get_calibration_data(struct mtk_dp *mtk_dp)
+{
+ const struct mtk_dp_efuse_fmt *fmt;
+ struct device *dev = mtk_dp->dev;
+ struct nvmem_cell *cell;
+ u32 *cal_data = mtk_dp->cal_data;
+ u32 *buf;
+ int i;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, "dp_calibration_data");
+ if (IS_ERR(cell)) {
+ dev_warn(dev, "Failed to get nvmem cell dp_calibration_data\n");
+ goto use_default_val;
+ }
+
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf) || ((len / sizeof(u32)) != 4)) {
+ dev_warn(dev, "Failed to read nvmem_cell_read\n");
+
+ if (!IS_ERR(buf))
+ kfree(buf);
+
+ goto use_default_val;
+ }
+
+ for (i = 0; i < MTK_DP_CAL_MAX; i++) {
+ fmt = &mtk_dp->data->efuse_fmt[i];
+ cal_data[i] = (buf[fmt->idx] >> fmt->shift) & fmt->mask;
+
+ if (cal_data[i] < fmt->min_val || cal_data[i] > fmt->max_val) {
+ dev_warn(mtk_dp->dev, "Invalid efuse data, idx = %d\n", i);
+ kfree(buf);
+ goto use_default_val;
+ }
+ }
+ kfree(buf);
+
+ return;
+
+use_default_val:
+ dev_warn(mtk_dp->dev, "Use default calibration data\n");
+ for (i = 0; i < MTK_DP_CAL_MAX; i++)
+ cal_data[i] = mtk_dp->data->efuse_fmt[i].default_val;
+}
+
+static void mtk_dp_set_calibration_data(struct mtk_dp *mtk_dp)
+{
+ u32 *cal_data = mtk_dp->cal_data;
+
+ mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_DPAUX_TX,
+ cal_data[MTK_DP_CAL_CLKTX_IMPSE] << 20,
+ RG_CKM_PT0_CKTX_IMPSEL);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_GLB_BIAS_GEN_00,
+ cal_data[MTK_DP_CAL_GLB_BIAS_TRIM] << 16,
+ RG_XTP_GLB_BIAS_INTR_CTRL);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] << 12,
+ RG_XTP_LN0_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_0,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] << 16,
+ RG_XTP_LN0_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] << 12,
+ RG_XTP_LN1_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_1,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] << 16,
+ RG_XTP_LN1_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] << 12,
+ RG_XTP_LN2_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_2,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] << 16,
+ RG_XTP_LN2_TX_IMPSEL_NMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] << 12,
+ RG_XTP_LN3_TX_IMPSEL_PMOS);
+ mtk_dp_update_bits(mtk_dp, DP_PHY_LANE_TX_3,
+ cal_data[MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] << 16,
+ RG_XTP_LN3_TX_IMPSEL_NMOS);
+}
+
+static int mtk_dp_phy_configure(struct mtk_dp *mtk_dp,
+ u32 link_rate, int lane_count)
+{
+ int ret;
+ union phy_configure_opts phy_opts = {
+ .dp = {
+ .link_rate = drm_dp_bw_code_to_link_rate(link_rate) / 100,
+ .set_rate = 1,
+ .lanes = lane_count,
+ .set_lanes = 1,
+ .ssc = mtk_dp->train_info.sink_ssc,
+ }
+ };
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, DP_PWR_STATE_BANDGAP,
+ DP_PWR_STATE_MASK);
+
+ ret = phy_configure(mtk_dp->phy, &phy_opts);
+ if (ret)
+ return ret;
+
+ mtk_dp_set_calibration_data(mtk_dp);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE, DP_PWR_STATE_MASK);
+
+ return 0;
+}
+
+static void mtk_dp_set_idle_pattern(struct mtk_dp *mtk_dp, bool enable)
+{
+ u32 val = POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK |
+ POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK;
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3580,
+ enable ? val : 0, val);
+}
+
+static void mtk_dp_train_set_pattern(struct mtk_dp *mtk_dp, int pattern)
+{
+ /* TPS1 */
+ if (pattern == 1)
+ mtk_dp_set_idle_pattern(mtk_dp, false);
+
+ mtk_dp_update_bits(mtk_dp,
+ MTK_DP_TRANS_P0_3400,
+ pattern ? BIT(pattern - 1) << 12 : 0,
+ PATTERN1_EN_DP_TRANS_P0_MASK |
+ PATTERN2_EN_DP_TRANS_P0_MASK |
+ PATTERN3_EN_DP_TRANS_P0_MASK |
+ PATTERN4_EN_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_set_enhanced_frame_mode(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ ENHANCED_FRAME_EN_DP_ENC0_P0,
+ ENHANCED_FRAME_EN_DP_ENC0_P0);
+}
+
+static void mtk_dp_training_set_scramble(struct mtk_dp *mtk_dp, bool enable)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TRANS_P0_3404,
+ enable ? DP_SCR_EN_DP_TRANS_P0_MASK : 0,
+ DP_SCR_EN_DP_TRANS_P0_MASK);
+}
+
+static void mtk_dp_video_mute(struct mtk_dp *mtk_dp, bool enable)
+{
+ struct arm_smccc_res res;
+ u32 val = VIDEO_MUTE_SEL_DP_ENC0_P0 |
+ (enable ? VIDEO_MUTE_SW_DP_ENC0_P0 : 0);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3000,
+ val,
+ VIDEO_MUTE_SEL_DP_ENC0_P0 |
+ VIDEO_MUTE_SW_DP_ENC0_P0);
+
+ arm_smccc_smc(MTK_DP_SIP_CONTROL_AARCH32,
+ mtk_dp->data->smc_cmd, enable,
+ 0, 0, 0, 0, 0, &res);
+
+ dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: %s, ret: 0x%lx-0x%lx\n",
+ mtk_dp->data->smc_cmd, enable ? "enable" : "disable", res.a0, res.a1);
+}
+
+static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute)
+{
+ u32 val[3];
+
+ if (mute) {
+ val[0] = VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
+ VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0;
+ val[1] = 0;
+ val[2] = 0;
+ } else {
+ val[0] = 0;
+ val[1] = AU_EN_DP_ENC0_P0;
+ /* Send one every two frames */
+ val[2] = 0x0F;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3030,
+ val[0],
+ VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 |
+ VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3088,
+ val[1], AU_EN_DP_ENC0_P0);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_30A4,
+ val[2], AU_TS_CFG_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_power_enable(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
+ 0, SW_RST_B_PHYD);
+
+ /* Wait for power enable */
+ usleep_range(10, 200);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_RESET_AND_PROBE,
+ SW_RST_B_PHYD, SW_RST_B_PHYD);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL, DP_PWR_STATE_MASK);
+ mtk_dp_write(mtk_dp, MTK_DP_1040,
+ RG_DPAUX_RX_VALID_DEGLITCH_EN | RG_XTP_GLB_CKDET_EN |
+ RG_DPAUX_RX_EN);
+ mtk_dp_update_bits(mtk_dp, MTK_DP_0034, 0, DA_CKM_CKTX0_EN_FORCE_EN);
+}
+
+static void mtk_dp_power_disable(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_write(mtk_dp, MTK_DP_TOP_PWR_STATE, 0);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_0034,
+ DA_CKM_CKTX0_EN_FORCE_EN, DA_CKM_CKTX0_EN_FORCE_EN);
+
+ /* Disable RX */
+ mtk_dp_write(mtk_dp, MTK_DP_1040, 0);
+ mtk_dp_write(mtk_dp, MTK_DP_TOP_MEM_PD,
+ 0x550 | FUSE_SEL | MEM_ISO_EN);
+}
+
+static void mtk_dp_initialize_priv_data(struct mtk_dp *mtk_dp)
+{
+ mtk_dp->train_info.link_rate = DP_LINK_BW_5_4;
+ mtk_dp->train_info.lane_count = mtk_dp->max_lanes;
+ mtk_dp->train_info.cable_plugged_in = false;
+
+ mtk_dp->info.format = DP_PIXELFORMAT_RGB;
+ memset(&mtk_dp->info.vm, 0, sizeof(struct videomode));
+ mtk_dp->audio_enable = false;
+}
+
+static void mtk_dp_sdp_set_down_cnt_init(struct mtk_dp *mtk_dp,
+ u32 sram_read_start)
+{
+ u32 sdp_down_cnt_init = 0;
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ if (mode.clock > 0)
+ sdp_down_cnt_init = sram_read_start *
+ mtk_dp->train_info.link_rate * 2700 * 8 /
+ (mode.clock * 4);
+
+ switch (mtk_dp->train_info.lane_count) {
+ case 1:
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x1A);
+ break;
+ case 2:
+ /* case for LowResolution && High Audio Sample Rate */
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 0x10);
+ sdp_down_cnt_init += mode.vtotal <= 525 ? 4 : 0;
+ break;
+ case 4:
+ default:
+ sdp_down_cnt_init = max_t(u32, sdp_down_cnt_init, 6);
+ break;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3040,
+ sdp_down_cnt_init,
+ SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK);
+}
+
+static void mtk_dp_sdp_set_down_cnt_init_in_hblank(struct mtk_dp *mtk_dp)
+{
+ int pix_clk_mhz;
+ u32 dc_offset;
+ u32 spd_down_cnt_init = 0;
+ struct drm_display_mode mode;
+ struct videomode *vm = &mtk_dp->info.vm;
+
+ drm_display_mode_from_videomode(vm, &mode);
+
+ pix_clk_mhz = mtk_dp->info.format == DP_PIXELFORMAT_YUV420 ?
+ mode.clock / 2000 : mode.clock / 1000;
+
+ switch (mtk_dp->train_info.lane_count) {
+ case 1:
+ spd_down_cnt_init = 0x20;
+ break;
+ case 2:
+ dc_offset = (mode.vtotal <= 525) ? 0x14 : 0x00;
+ spd_down_cnt_init = 0x18 + dc_offset;
+ break;
+ case 4:
+ default:
+ dc_offset = (mode.vtotal <= 525) ? 0x08 : 0x00;
+ if (pix_clk_mhz > mtk_dp->train_info.link_rate * 27)
+ spd_down_cnt_init = 0x8;
+ else
+ spd_down_cnt_init = 0x10 + dc_offset;
+ break;
+ }
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_ENC1_P0_3364, spd_down_cnt_init,
+ SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK);
+}
+
+static void mtk_dp_setup_tu(struct mtk_dp *mtk_dp)
+{
+ u32 sram_read_start = min_t(u32, MTK_DP_TBC_BUF_READ_START_ADDR,
+ mtk_dp->info.vm.hactive /
+ mtk_dp->train_info.lane_count /
+ MTK_DP_4P1T / MTK_DP_HDE /
+ MTK_DP_PIX_PER_ADDR);
+ mtk_dp_set_sram_read_start(mtk_dp, sram_read_start);
+ mtk_dp_setup_encoder(mtk_dp);
+ mtk_dp_sdp_set_down_cnt_init_in_hblank(mtk_dp);
+ mtk_dp_sdp_set_down_cnt_init(mtk_dp, sram_read_start);
+}
+
+static void mtk_dp_set_tx_out(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_setup_tu(mtk_dp);
+}
+
+static void mtk_dp_train_update_swing_pre(struct mtk_dp *mtk_dp, int lanes,
+ u8 dpcd_adjust_req[2])
+{
+ int lane;
+
+ for (lane = 0; lane < lanes; ++lane) {
+ u8 val;
+ u8 swing;
+ u8 preemphasis;
+ int index = lane / 2;
+ int shift = lane % 2 ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 0;
+
+ swing = (dpcd_adjust_req[index] >> shift) &
+ DP_ADJUST_VOLTAGE_SWING_LANE0_MASK;
+ preemphasis = ((dpcd_adjust_req[index] >> shift) &
+ DP_ADJUST_PRE_EMPHASIS_LANE0_MASK) >>
+ DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT;
+ val = swing << DP_TRAIN_VOLTAGE_SWING_SHIFT |
+ preemphasis << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (swing == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
+ val |= DP_TRAIN_MAX_SWING_REACHED;
+ if (preemphasis == 3)
+ val |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ mtk_dp_set_swing_pre_emphasis(mtk_dp, lane, swing, preemphasis);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_LANE0_SET + lane,
+ val);
+ }
+}
+
+static void mtk_dp_pattern(struct mtk_dp *mtk_dp, bool is_tps1)
+{
+ int pattern;
+ unsigned int aux_offset;
+
+ if (is_tps1) {
+ pattern = 1;
+ aux_offset = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1;
+ } else {
+ aux_offset = mtk_dp->train_info.channel_eq_pattern;
+
+ switch (mtk_dp->train_info.channel_eq_pattern) {
+ case DP_TRAINING_PATTERN_4:
+ pattern = 4;
+ break;
+ case DP_TRAINING_PATTERN_3:
+ pattern = 3;
+ aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
+ break;
+ case DP_TRAINING_PATTERN_2:
+ default:
+ pattern = 2;
+ aux_offset |= DP_LINK_SCRAMBLING_DISABLE;
+ break;
+ }
+ }
+
+ mtk_dp_train_set_pattern(mtk_dp, pattern);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET, aux_offset);
+}
+
+static int mtk_dp_train_setting(struct mtk_dp *mtk_dp, u8 target_link_rate,
+ u8 target_lane_count)
+{
+ int ret;
+
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LINK_BW_SET, target_link_rate);
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_LANE_COUNT_SET,
+ target_lane_count | DP_LANE_COUNT_ENHANCED_FRAME_EN);
+
+ if (mtk_dp->train_info.sink_ssc)
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+
+ mtk_dp_set_lanes(mtk_dp, target_lane_count / 2);
+ ret = mtk_dp_phy_configure(mtk_dp, target_link_rate, target_lane_count);
+ if (ret)
+ return ret;
+
+ dev_dbg(mtk_dp->dev,
+ "Link train target_link_rate = 0x%x, target_lane_count = 0x%x\n",
+ target_link_rate, target_lane_count);
+
+ return 0;
+}
+
+static int mtk_dp_train_cr(struct mtk_dp *mtk_dp, u8 target_lane_count)
+{
+ u8 lane_adjust[2] = {};
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ u8 prev_lane_adjust = 0xff;
+ int train_retries = 0;
+ int voltage_retries = 0;
+
+ mtk_dp_pattern(mtk_dp, true);
+
+ /* In DP spec 1.4, the retry count of CR is defined as 10. */
+ do {
+ train_retries++;
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return -ENODEV;
+ }
+
+ drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ lane_adjust, sizeof(lane_adjust));
+ mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
+ lane_adjust);
+
+ drm_dp_link_train_clock_recovery_delay(&mtk_dp->aux,
+ mtk_dp->rx_cap);
+
+ /* check link status from sink device */
+ drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
+ if (drm_dp_clock_recovery_ok(link_status,
+ target_lane_count)) {
+ dev_dbg(mtk_dp->dev, "Link train CR pass\n");
+ return 0;
+ }
+
+ /*
+ * In DP spec 1.4, if current voltage level is the same
+ * with previous voltage level, we need to retry 5 times.
+ */
+ if (prev_lane_adjust == link_status[4]) {
+ voltage_retries++;
+ /*
+ * Condition of CR fail:
+ * 1. Failed to pass CR using the same voltage
+ * level over five times.
+ * 2. Failed to pass CR when the current voltage
+ * level is the same with previous voltage
+ * level and reach max voltage level (3).
+ */
+ if (voltage_retries > MTK_DP_TRAIN_VOLTAGE_LEVEL_RETRY ||
+ (prev_lane_adjust & DP_ADJUST_VOLTAGE_SWING_LANE0_MASK) == 3) {
+ dev_dbg(mtk_dp->dev, "Link train CR fail\n");
+ break;
+ }
+ } else {
+ /*
+ * If the voltage level is changed, we need to
+ * re-calculate this retry count.
+ */
+ voltage_retries = 0;
+ }
+ prev_lane_adjust = link_status[4];
+ } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
+
+ /* Failed to train CR, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_train_eq(struct mtk_dp *mtk_dp, u8 target_lane_count)
+{
+ u8 lane_adjust[2] = {};
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ int train_retries = 0;
+
+ mtk_dp_pattern(mtk_dp, false);
+
+ do {
+ train_retries++;
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return -ENODEV;
+ }
+
+ drm_dp_dpcd_read(&mtk_dp->aux, DP_ADJUST_REQUEST_LANE0_1,
+ lane_adjust, sizeof(lane_adjust));
+ mtk_dp_train_update_swing_pre(mtk_dp, target_lane_count,
+ lane_adjust);
+
+ drm_dp_link_train_channel_eq_delay(&mtk_dp->aux,
+ mtk_dp->rx_cap);
+
+ /* check link status from sink device */
+ drm_dp_dpcd_read_link_status(&mtk_dp->aux, link_status);
+ if (drm_dp_channel_eq_ok(link_status, target_lane_count)) {
+ dev_dbg(mtk_dp->dev, "Link train EQ pass\n");
+
+ /* Training done, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+ return 0;
+ }
+ dev_dbg(mtk_dp->dev, "Link train EQ fail\n");
+ } while (train_retries < MTK_DP_TRAIN_DOWNSCALE_RETRY);
+
+ /* Failed to train EQ, and disable pattern. */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+ mtk_dp_train_set_pattern(mtk_dp, 0);
+
+ return -ETIMEDOUT;
+}
+
+static int mtk_dp_parse_capabilities(struct mtk_dp *mtk_dp)
+{
+ u8 val;
+ ssize_t ret;
+
+ drm_dp_read_dpcd_caps(&mtk_dp->aux, mtk_dp->rx_cap);
+
+ if (drm_dp_tps4_supported(mtk_dp->rx_cap))
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_4;
+ else if (drm_dp_tps3_supported(mtk_dp->rx_cap))
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_3;
+ else
+ mtk_dp->train_info.channel_eq_pattern = DP_TRAINING_PATTERN_2;
+
+ mtk_dp->train_info.sink_ssc = drm_dp_max_downspread(mtk_dp->rx_cap);
+
+ ret = drm_dp_dpcd_readb(&mtk_dp->aux, DP_MSTM_CAP, &val);
+ if (ret < 1) {
+ drm_err(mtk_dp->drm_dev, "Read mstm cap failed\n");
+ return ret == 0 ? -EIO : ret;
+ }
+
+ if (val & DP_MST_CAP) {
+ /* Clear DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 */
+ ret = drm_dp_dpcd_readb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ &val);
+ if (ret < 1) {
+ drm_err(mtk_dp->drm_dev, "Read irq vector failed\n");
+ return ret == 0 ? -EIO : ret;
+ }
+
+ if (val)
+ drm_dp_dpcd_writeb(&mtk_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0,
+ val);
+ }
+
+ return 0;
+}
+
+static bool mtk_dp_edid_parse_audio_capabilities(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ if (!mtk_dp->data->audio_supported)
+ return false;
+
+ if (mtk_dp->info.audio_cur_cfg.sad_count <= 0) {
+ drm_info(mtk_dp->drm_dev, "The SADs is NULL\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void mtk_dp_train_change_mode(struct mtk_dp *mtk_dp)
+{
+ phy_reset(mtk_dp->phy);
+ mtk_dp_reset_swing_pre_emphasis(mtk_dp);
+}
+
+static int mtk_dp_training(struct mtk_dp *mtk_dp)
+{
+ int ret;
+ u8 lane_count, link_rate, train_limit, max_link_rate;
+
+ link_rate = min_t(u8, mtk_dp->max_linkrate,
+ mtk_dp->rx_cap[DP_MAX_LINK_RATE]);
+ max_link_rate = link_rate;
+ lane_count = min_t(u8, mtk_dp->max_lanes,
+ drm_dp_max_lane_count(mtk_dp->rx_cap));
+
+ /*
+ * TPS are generated by the hardware pattern generator. From the
+ * hardware setting we need to disable this scramble setting before
+ * use the TPS pattern generator.
+ */
+ mtk_dp_training_set_scramble(mtk_dp, false);
+
+ for (train_limit = 6; train_limit > 0; train_limit--) {
+ mtk_dp_train_change_mode(mtk_dp);
+
+ ret = mtk_dp_train_setting(mtk_dp, link_rate, lane_count);
+ if (ret)
+ return ret;
+
+ ret = mtk_dp_train_cr(mtk_dp, lane_count);
+ if (ret == -ENODEV) {
+ return ret;
+ } else if (ret) {
+ /* reduce link rate */
+ switch (link_rate) {
+ case DP_LINK_BW_1_62:
+ lane_count = lane_count / 2;
+ link_rate = max_link_rate;
+ if (lane_count == 0)
+ return -EIO;
+ break;
+ case DP_LINK_BW_2_7:
+ link_rate = DP_LINK_BW_1_62;
+ break;
+ case DP_LINK_BW_5_4:
+ link_rate = DP_LINK_BW_2_7;
+ break;
+ case DP_LINK_BW_8_1:
+ link_rate = DP_LINK_BW_5_4;
+ break;
+ default:
+ return -EINVAL;
+ };
+ continue;
+ }
+
+ ret = mtk_dp_train_eq(mtk_dp, lane_count);
+ if (ret == -ENODEV) {
+ return ret;
+ } else if (ret) {
+ /* reduce lane count */
+ if (lane_count == 0)
+ return -EIO;
+ lane_count /= 2;
+ continue;
+ }
+
+ /* if we can run to this, training is done. */
+ break;
+ }
+
+ if (train_limit == 0)
+ return -ETIMEDOUT;
+
+ mtk_dp->train_info.link_rate = link_rate;
+ mtk_dp->train_info.lane_count = lane_count;
+
+ /*
+ * After training done, we need to output normal stream instead of TPS,
+ * so we need to enable scramble.
+ */
+ mtk_dp_training_set_scramble(mtk_dp, true);
+ mtk_dp_set_enhanced_frame_mode(mtk_dp);
+
+ return 0;
+}
+
+static void mtk_dp_video_enable(struct mtk_dp *mtk_dp, bool enable)
+{
+ /* the mute sequence is different between enable and disable */
+ if (enable) {
+ mtk_dp_msa_bypass_enable(mtk_dp, false);
+ mtk_dp_pg_enable(mtk_dp, false);
+ mtk_dp_set_tx_out(mtk_dp);
+ mtk_dp_video_mute(mtk_dp, false);
+ } else {
+ mtk_dp_video_mute(mtk_dp, true);
+ mtk_dp_pg_enable(mtk_dp, true);
+ mtk_dp_msa_bypass_enable(mtk_dp, true);
+ }
+}
+
+static void mtk_dp_audio_sdp_setup(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ struct dp_sdp sdp;
+ struct hdmi_audio_infoframe frame;
+
+ hdmi_audio_infoframe_init(&frame);
+ frame.coding_type = HDMI_AUDIO_CODING_TYPE_PCM;
+ frame.channels = cfg->channels;
+ frame.sample_frequency = cfg->sample_rate;
+
+ switch (cfg->word_length_bits) {
+ case 16:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ break;
+ case 20:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_20;
+ break;
+ case 24:
+ default:
+ frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_24;
+ break;
+ }
+
+ hdmi_audio_infoframe_pack_for_dp(&frame, &sdp, MTK_DP_VERSION);
+
+ mtk_dp_audio_sdp_asp_set_channels(mtk_dp, cfg->channels);
+ mtk_dp_setup_sdp_aui(mtk_dp, &sdp);
+}
+
+static void mtk_dp_audio_setup(struct mtk_dp *mtk_dp,
+ struct mtk_dp_audio_cfg *cfg)
+{
+ mtk_dp_audio_sdp_setup(mtk_dp, cfg);
+ mtk_dp_audio_channel_status_set(mtk_dp, cfg);
+
+ mtk_dp_audio_setup_channels(mtk_dp, cfg);
+ mtk_dp_audio_set_divider(mtk_dp);
+}
+
+static int mtk_dp_video_config(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_config_mn_mode(mtk_dp);
+ mtk_dp_set_msa(mtk_dp);
+ mtk_dp_set_color_depth(mtk_dp);
+ return mtk_dp_set_color_format(mtk_dp, mtk_dp->info.format);
+}
+
+static void mtk_dp_init_port(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_set_idle_pattern(mtk_dp, true);
+ mtk_dp_initialize_priv_data(mtk_dp);
+
+ mtk_dp_initialize_settings(mtk_dp);
+ mtk_dp_initialize_aux_settings(mtk_dp);
+ mtk_dp_initialize_digital_settings(mtk_dp);
+
+ mtk_dp_update_bits(mtk_dp, MTK_DP_AUX_P0_3690,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0,
+ RX_REPLY_COMPLETE_MODE_AUX_TX_P0);
+ mtk_dp_initialize_hpd_detect_settings(mtk_dp);
+
+ mtk_dp_digital_sw_reset(mtk_dp);
+}
+
+static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
+{
+ struct mtk_dp *mtk_dp = dev;
+ unsigned long flags;
+ u32 status;
+
+ if (mtk_dp->need_debounce && mtk_dp->train_info.cable_plugged_in)
+ msleep(100);
+
+ spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
+ status = mtk_dp->irq_thread_handle;
+ mtk_dp->irq_thread_handle = 0;
+ spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+
+ if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
+ drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
+
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ mtk_dp_disable_sdp_aui(mtk_dp);
+ memset(&mtk_dp->info.audio_cur_cfg, 0,
+ sizeof(mtk_dp->info.audio_cur_cfg));
+
+ mtk_dp->need_debounce = false;
+ mod_timer(&mtk_dp->debounce_timer,
+ jiffies + msecs_to_jiffies(100) - 1);
+ }
+ }
+
+ if (status & MTK_DP_THREAD_HPD_EVENT)
+ dev_dbg(mtk_dp->dev, "Receive IRQ from sink devices\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_dp_hpd_event(int hpd, void *dev)
+{
+ struct mtk_dp *mtk_dp = dev;
+ bool cable_sta_chg = false;
+ unsigned long flags;
+ u32 irq_status = mtk_dp_swirq_get_clear(mtk_dp) |
+ mtk_dp_hwirq_get_clear(mtk_dp);
+
+ if (!irq_status)
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&mtk_dp->irq_thread_lock, flags);
+
+ if (irq_status & MTK_DP_HPD_INTERRUPT)
+ mtk_dp->irq_thread_handle |= MTK_DP_THREAD_HPD_EVENT;
+
+ /* Cable state is changed. */
+ if (irq_status != MTK_DP_HPD_INTERRUPT) {
+ mtk_dp->irq_thread_handle |= MTK_DP_THREAD_CABLE_STATE_CHG;
+ cable_sta_chg = true;
+ }
+
+ spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+
+ if (cable_sta_chg) {
+ if (!!(mtk_dp_read(mtk_dp, MTK_DP_TRANS_P0_3414) &
+ HPD_DB_DP_TRANS_P0_MASK))
+ mtk_dp->train_info.cable_plugged_in = true;
+ else
+ mtk_dp->train_info.cable_plugged_in = false;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
+ struct platform_device *pdev)
+{
+ struct device_node *endpoint;
+ struct device *dev = &pdev->dev;
+ int ret;
+ void __iomem *base;
+ u32 linkrate;
+ int len;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mtk_dp->regs = devm_regmap_init_mmio(dev, base, &mtk_dp_regmap_config);
+ if (IS_ERR(mtk_dp->regs))
+ return PTR_ERR(mtk_dp->regs);
+
+ endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
+ len = of_property_count_elems_of_size(endpoint,
+ "data-lanes", sizeof(u32));
+ if (len < 0 || len > 4 || len == 3) {
+ dev_err(dev, "invalid data lane size: %d\n", len);
+ return -EINVAL;
+ }
+
+ mtk_dp->max_lanes = len;
+
+ ret = device_property_read_u32(dev, "max-linkrate-mhz", &linkrate);
+ if (ret) {
+ dev_err(dev, "failed to read max linkrate: %d\n", ret);
+ return ret;
+ }
+
+ mtk_dp->max_linkrate = drm_dp_link_rate_to_bw_code(linkrate * 100);
+
+ return 0;
+}
+
+static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp)
+{
+ mutex_lock(&mtk_dp->update_plugged_status_lock);
+ if (mtk_dp->plugged_cb && mtk_dp->codec_dev)
+ mtk_dp->plugged_cb(mtk_dp->codec_dev,
+ mtk_dp->enabled &
+ mtk_dp->info.audio_cur_cfg.detect_monitor);
+ mutex_unlock(&mtk_dp->update_plugged_status_lock);
+}
+
+static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ enum drm_connector_status ret = connector_status_disconnected;
+ bool enabled = mtk_dp->enabled;
+ u8 sink_count = 0;
+
+ if (!mtk_dp->train_info.cable_plugged_in)
+ return ret;
+
+ if (!enabled) {
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+ /*
+ * Some dongles still source HPD when they do not connect to any
+ * sink device. To avoid this, we need to read the sink count
+ * to make sure we do connect to sink devices. After this detect
+ * function, we just need to check the HPD connection to check
+ * whether we connect to a sink device.
+ */
+ drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count);
+ if (DP_GET_SINK_COUNT(sink_count))
+ ret = connector_status_connected;
+
+ if (!enabled) {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+ }
+
+ return ret;
+}
+
+static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ bool enabled = mtk_dp->enabled;
+ struct edid *new_edid = NULL;
+ struct mtk_dp_audio_cfg *audio_caps = &mtk_dp->info.audio_cur_cfg;
+ struct cea_sad *sads;
+
+ if (!enabled) {
+ drm_bridge_chain_pre_enable(bridge);
+
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ /* power on panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+
+ new_edid = drm_get_edid(connector, &mtk_dp->aux.ddc);
+
+ /*
+ * Parse capability here to let atomic_get_input_bus_fmts and
+ * mode_valid use the capability to calculate sink bitrates.
+ */
+ if (mtk_dp_parse_capabilities(mtk_dp)) {
+ drm_err(mtk_dp->drm_dev, "Can't parse capabilities\n");
+ new_edid = NULL;
+ }
+
+ if (new_edid) {
+ audio_caps->sad_count = drm_edid_to_sad(new_edid, &sads);
+ audio_caps->detect_monitor = drm_detect_monitor_audio(new_edid);
+ }
+
+ if (!enabled) {
+ /* power off panel */
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+
+ drm_bridge_chain_post_disable(bridge);
+ }
+
+ return new_edid;
+}
+
+static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct mtk_dp *mtk_dp;
+ bool is_read;
+ u8 request;
+ size_t accessed_bytes = 0;
+ int ret;
+
+ mtk_dp = container_of(mtk_aux, struct mtk_dp, aux);
+
+ if (!mtk_dp->train_info.cable_plugged_in) {
+ ret = -EAGAIN;
+ goto err;
+ }
+
+ switch (msg->request) {
+ case DP_AUX_I2C_MOT:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE | DP_AUX_I2C_MOT:
+ request = msg->request & ~DP_AUX_I2C_WRITE_STATUS_UPDATE;
+ is_read = false;
+ break;
+ case DP_AUX_I2C_READ:
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ | DP_AUX_I2C_MOT:
+ request = msg->request;
+ is_read = true;
+ break;
+ default:
+ drm_err(mtk_aux->drm_dev, "invalid aux cmd = %d\n",
+ msg->request);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ do {
+ size_t to_access = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES,
+ msg->size - accessed_bytes);
+
+ ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
+ msg->address + accessed_bytes,
+ msg->buffer + accessed_bytes,
+ to_access);
+
+ if (ret) {
+ drm_info(mtk_dp->drm_dev,
+ "Failed to do AUX transfer: %d\n", ret);
+ goto err;
+ }
+ accessed_bytes += to_access;
+ } while (accessed_bytes < msg->size);
+
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK | DP_AUX_I2C_REPLY_ACK;
+ return msg->size;
+err:
+ msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
+ return ret;
+}
+
+static int mtk_dp_poweron(struct mtk_dp *mtk_dp)
+{
+ int ret;
+
+ ret = phy_init(mtk_dp->phy);
+ if (ret) {
+ dev_err(mtk_dp->dev, "Failed to initialize phy: %d\n", ret);
+ return ret;
+ }
+
+ mtk_dp_init_port(mtk_dp);
+ mtk_dp_power_enable(mtk_dp);
+
+ return 0;
+}
+
+static void mtk_dp_poweroff(struct mtk_dp *mtk_dp)
+{
+ mtk_dp_power_disable(mtk_dp);
+ phy_exit(mtk_dp->phy);
+}
+
+static int mtk_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ dev_err(mtk_dp->dev, "Driver does not provide a connector!");
+ return -EINVAL;
+ }
+
+ mtk_dp->aux.drm_dev = bridge->dev;
+ ret = drm_dp_aux_register(&mtk_dp->aux);
+ if (ret) {
+ dev_err(mtk_dp->dev,
+ "failed to register DP AUX channel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_dp_poweron(mtk_dp);
+ if (ret)
+ goto err_aux_register;
+
+ if (mtk_dp->next_bridge) {
+ ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge,
+ &mtk_dp->bridge, flags);
+ if (ret) {
+ drm_warn(mtk_dp->drm_dev,
+ "Failed to attach external bridge: %d\n", ret);
+ goto err_bridge_attach;
+ }
+ }
+
+ mtk_dp->drm_dev = bridge->dev;
+
+ mtk_dp_hwirq_enable(mtk_dp, true);
+
+ return 0;
+
+err_bridge_attach:
+ mtk_dp_poweroff(mtk_dp);
+err_aux_register:
+ drm_dp_aux_unregister(&mtk_dp->aux);
+ return ret;
+}
+
+static void mtk_dp_bridge_detach(struct drm_bridge *bridge)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ mtk_dp->drm_dev = NULL;
+ mtk_dp_poweroff(mtk_dp);
+ drm_dp_aux_unregister(&mtk_dp->aux);
+}
+
+static void mtk_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ int ret;
+
+ mtk_dp->conn = drm_atomic_get_new_connector_for_encoder(old_state->base.state,
+ bridge->encoder);
+ if (!mtk_dp->conn) {
+ drm_err(mtk_dp->drm_dev,
+ "Can't enable bridge as connector is missing\n");
+ return;
+ }
+
+ /* power on aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL_LANE,
+ DP_PWR_STATE_MASK);
+
+ if (mtk_dp->train_info.cable_plugged_in) {
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ usleep_range(2000, 5000);
+ }
+
+ /* Training */
+ ret = mtk_dp_training(mtk_dp);
+ if (ret) {
+ drm_err(mtk_dp->drm_dev, "Training failed, %d\n", ret);
+ goto power_off_aux;
+ }
+
+ ret = mtk_dp_video_config(mtk_dp);
+ if (ret)
+ goto power_off_aux;
+
+ mtk_dp_video_enable(mtk_dp, true);
+
+ mtk_dp->audio_enable =
+ mtk_dp_edid_parse_audio_capabilities(mtk_dp,
+ &mtk_dp->info.audio_cur_cfg);
+ if (mtk_dp->audio_enable) {
+ mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
+ mtk_dp_audio_mute(mtk_dp, false);
+ } else {
+ memset(&mtk_dp->info.audio_cur_cfg, 0,
+ sizeof(mtk_dp->info.audio_cur_cfg));
+ }
+
+ mtk_dp->enabled = true;
+ mtk_dp_update_plugged_status(mtk_dp);
+
+ return;
+power_off_aux:
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+}
+
+static void mtk_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+
+ mtk_dp->enabled = false;
+ mtk_dp_update_plugged_status(mtk_dp);
+ mtk_dp_video_enable(mtk_dp, false);
+ mtk_dp_audio_mute(mtk_dp, true);
+
+ if (mtk_dp->train_info.cable_plugged_in) {
+ drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3);
+ usleep_range(2000, 3000);
+ }
+
+ /* power off aux */
+ mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
+ DP_PWR_STATE_BANDGAP_TPLL,
+ DP_PWR_STATE_MASK);
+
+ /* Ensure the sink is muted */
+ msleep(20);
+}
+
+static enum drm_mode_status
+mtk_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24;
+ u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+ drm_dp_max_lane_count(mtk_dp->rx_cap),
+ drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+ mtk_dp->max_lanes);
+
+ if (rate < mode->clock * bpp / 8)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static u32 *mtk_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ return output_fmts;
+}
+
+static const u32 mt8195_input_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+};
+
+static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_display_info *display_info =
+ &conn_state->connector->display_info;
+ u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) *
+ drm_dp_max_lane_count(mtk_dp->rx_cap),
+ drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) *
+ mtk_dp->max_lanes);
+
+ *num_input_fmts = 0;
+
+ /*
+ * If the linkrate is smaller than datarate of RGB888, larger than
+ * datarate of YUV422 and sink device supports YUV422, we output YUV422
+ * format. Use this condition, we can support more resolution.
+ */
+ if ((rate < (mode->clock * 24 / 8)) &&
+ (rate > (mode->clock * 16 / 8)) &&
+ (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) {
+ input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+ *num_input_fmts = 1;
+ input_fmts[0] = MEDIA_BUS_FMT_YUYV8_1X16;
+ } else {
+ input_fmts = kcalloc(ARRAY_SIZE(mt8195_input_fmts),
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ *num_input_fmts = ARRAY_SIZE(mt8195_input_fmts);
+ memcpy(input_fmts, mt8195_input_fmts, sizeof(mt8195_input_fmts));
+ }
+
+ return input_fmts;
+}
+
+static int mtk_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge);
+ struct drm_crtc *crtc = conn_state->crtc;
+ unsigned int input_bus_format;
+
+ input_bus_format = bridge_state->input_bus_cfg.format;
+
+ dev_dbg(mtk_dp->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ if (input_bus_format == MEDIA_BUS_FMT_YUYV8_1X16)
+ mtk_dp->info.format = DP_PIXELFORMAT_YUV422;
+ else
+ mtk_dp->info.format = DP_PIXELFORMAT_RGB;
+
+ if (!crtc) {
+ drm_err(mtk_dp->drm_dev,
+ "Can't enable bridge as connector state doesn't have a crtc\n");
+ return -EINVAL;
+ }
+
+ drm_display_mode_to_videomode(&crtc_state->adjusted_mode, &mtk_dp->info.vm);
+
+ return 0;
+}
+
+static const struct drm_bridge_funcs mtk_dp_bridge_funcs = {
+ .atomic_check = mtk_dp_bridge_atomic_check,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_output_bus_fmts = mtk_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = mtk_dp_bridge_atomic_get_input_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = mtk_dp_bridge_attach,
+ .detach = mtk_dp_bridge_detach,
+ .atomic_enable = mtk_dp_bridge_atomic_enable,
+ .atomic_disable = mtk_dp_bridge_atomic_disable,
+ .mode_valid = mtk_dp_bridge_mode_valid,
+ .get_edid = mtk_dp_get_edid,
+ .detect = mtk_dp_bdg_detect,
+};
+
+static void mtk_dp_debounce_timer(struct timer_list *t)
+{
+ struct mtk_dp *mtk_dp = from_timer(mtk_dp, t, debounce_timer);
+
+ mtk_dp->need_debounce = true;
+}
+
+/*
+ * HDMI audio codec callbacks
+ */
+static int mtk_dp_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ if (!mtk_dp->enabled) {
+ dev_err(mtk_dp->dev, "%s, DP is not ready!\n", __func__);
+ return -ENODEV;
+ }
+
+ mtk_dp->info.audio_cur_cfg.channels = params->cea.channels;
+ mtk_dp->info.audio_cur_cfg.sample_rate = params->sample_rate;
+
+ mtk_dp_audio_setup(mtk_dp, &mtk_dp->info.audio_cur_cfg);
+
+ return 0;
+}
+
+static int mtk_dp_audio_startup(struct device *dev, void *data)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_audio_mute(mtk_dp, false);
+
+ return 0;
+}
+
+static void mtk_dp_audio_shutdown(struct device *dev, void *data)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_audio_mute(mtk_dp, true);
+}
+
+static int mtk_dp_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
+ size_t len)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ if (mtk_dp->enabled)
+ memcpy(buf, mtk_dp->conn->eld, len);
+ else
+ memset(buf, 0, len);
+
+ return 0;
+}
+
+static int mtk_dp_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_dp *mtk_dp = data;
+
+ mutex_lock(&mtk_dp->update_plugged_status_lock);
+ mtk_dp->plugged_cb = fn;
+ mtk_dp->codec_dev = codec_dev;
+ mutex_unlock(&mtk_dp->update_plugged_status_lock);
+
+ mtk_dp_update_plugged_status(mtk_dp);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops mtk_dp_audio_codec_ops = {
+ .hw_params = mtk_dp_audio_hw_params,
+ .audio_startup = mtk_dp_audio_startup,
+ .audio_shutdown = mtk_dp_audio_shutdown,
+ .get_eld = mtk_dp_audio_get_eld,
+ .hook_plugged_cb = mtk_dp_audio_hook_plugged_cb,
+ .no_capture_mute = 1,
+};
+
+static int mtk_dp_register_audio_driver(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &mtk_dp_audio_codec_ops,
+ .max_i2s_channels = 8,
+ .i2s = 1,
+ .data = mtk_dp,
+ };
+
+ mtk_dp->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ return PTR_ERR_OR_ZERO(mtk_dp->audio_pdev);
+}
+
+static int mtk_dp_probe(struct platform_device *pdev)
+{
+ struct mtk_dp *mtk_dp;
+ struct device *dev = &pdev->dev;
+ int ret, irq_num;
+
+ mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
+ if (!mtk_dp)
+ return -ENOMEM;
+
+ mtk_dp->dev = dev;
+ mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
+
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0)
+ return dev_err_probe(dev, irq_num,
+ "failed to request dp irq resource\n");
+
+ mtk_dp->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
+ if (IS_ERR(mtk_dp->next_bridge) &&
+ PTR_ERR(mtk_dp->next_bridge) == -ENODEV)
+ mtk_dp->next_bridge = NULL;
+ else if (IS_ERR(mtk_dp->next_bridge))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->next_bridge),
+ "Failed to get bridge\n");
+
+ ret = mtk_dp_dt_parse(mtk_dp, pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to parse dt\n");
+
+ drm_dp_aux_init(&mtk_dp->aux);
+ mtk_dp->aux.name = "aux_mtk_dp";
+ mtk_dp->aux.transfer = mtk_dp_aux_transfer;
+
+ spin_lock_init(&mtk_dp->irq_thread_lock);
+
+ ret = devm_request_threaded_irq(dev, irq_num, mtk_dp_hpd_event,
+ mtk_dp_hpd_event_thread,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(dev),
+ mtk_dp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to request mediatek dptx irq\n");
+
+ mutex_init(&mtk_dp->update_plugged_status_lock);
+
+ platform_set_drvdata(pdev, mtk_dp);
+
+ if (mtk_dp->data->audio_supported) {
+ ret = mtk_dp_register_audio_driver(dev);
+ if (ret) {
+ dev_err(dev, "Failed to register audio driver: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ mtk_dp->phy_dev = platform_device_register_data(dev, "mediatek-dp-phy",
+ PLATFORM_DEVID_AUTO,
+ &mtk_dp->regs,
+ sizeof(struct regmap *));
+ if (IS_ERR(mtk_dp->phy_dev))
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy_dev),
+ "Failed to create device mediatek-dp-phy\n");
+
+ mtk_dp_get_calibration_data(mtk_dp);
+
+ mtk_dp->phy = devm_phy_get(&mtk_dp->phy_dev->dev, "dp");
+
+ if (IS_ERR(mtk_dp->phy)) {
+ platform_device_unregister(mtk_dp->phy_dev);
+ return dev_err_probe(dev, PTR_ERR(mtk_dp->phy),
+ "Failed to get phy\n");
+ }
+
+ mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
+ mtk_dp->bridge.of_node = dev->of_node;
+
+ mtk_dp->bridge.ops =
+ DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ mtk_dp->bridge.type = mtk_dp->data->bridge_type;
+
+ drm_bridge_add(&mtk_dp->bridge);
+
+ mtk_dp->need_debounce = true;
+ timer_setup(&mtk_dp->debounce_timer, mtk_dp_debounce_timer, 0);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ return 0;
+}
+
+static int mtk_dp_remove(struct platform_device *pdev)
+{
+ struct mtk_dp *mtk_dp = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ del_timer_sync(&mtk_dp->debounce_timer);
+ drm_bridge_remove(&mtk_dp->bridge);
+ platform_device_unregister(mtk_dp->phy_dev);
+ if (mtk_dp->audio_pdev)
+ platform_device_unregister(mtk_dp->audio_pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_dp_suspend(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ mtk_dp_power_disable(mtk_dp);
+ mtk_dp_hwirq_enable(mtk_dp, false);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int mtk_dp_resume(struct device *dev)
+{
+ struct mtk_dp *mtk_dp = dev_get_drvdata(dev);
+
+ pm_runtime_get_sync(dev);
+ mtk_dp_init_port(mtk_dp);
+ mtk_dp_hwirq_enable(mtk_dp, true);
+ mtk_dp_power_enable(mtk_dp);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mtk_dp_pm_ops, mtk_dp_suspend, mtk_dp_resume);
+
+static const struct mtk_dp_data mt8195_edp_data = {
+ .bridge_type = DRM_MODE_CONNECTOR_eDP,
+ .smc_cmd = MTK_DP_SIP_ATF_EDP_VIDEO_UNMUTE,
+ .efuse_fmt = mt8195_edp_efuse_fmt,
+ .audio_supported = false,
+};
+
+static const struct mtk_dp_data mt8195_dp_data = {
+ .bridge_type = DRM_MODE_CONNECTOR_DisplayPort,
+ .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE,
+ .efuse_fmt = mt8195_dp_efuse_fmt,
+ .audio_supported = true,
+};
+
+static const struct of_device_id mtk_dp_of_match[] = {
+ {
+ .compatible = "mediatek,mt8195-edp-tx",
+ .data = &mt8195_edp_data,
+ },
+ {
+ .compatible = "mediatek,mt8195-dp-tx",
+ .data = &mt8195_dp_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_dp_of_match);
+
+static struct platform_driver mtk_dp_driver = {
+ .probe = mtk_dp_probe,
+ .remove = mtk_dp_remove,
+ .driver = {
+ .name = "mediatek-drm-dp",
+ .of_match_table = mtk_dp_of_match,
+ .pm = &mtk_dp_pm_ops,
+ },
+};
+
+module_platform_driver(mtk_dp_driver);
+
+MODULE_AUTHOR("Jitao Shi <jitao.shi@mediatek.com>");
+MODULE_AUTHOR("Markus Schneider-Pargmann <msp@baylibre.com>");
+MODULE_AUTHOR("Bo-Chen Chen <rex-bc.chen@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek DisplayPort Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
new file mode 100644
index 000000000000..84e38cef03c2
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019-2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre
+ */
+#ifndef _MTK_DP_REG_H_
+#define _MTK_DP_REG_H_
+
+#define SEC_OFFSET 0x4000
+
+#define MTK_DP_HPD_DISCONNECT BIT(1)
+#define MTK_DP_HPD_CONNECT BIT(2)
+#define MTK_DP_HPD_INTERRUPT BIT(3)
+
+/* offset: 0x0 */
+#define DP_PHY_GLB_BIAS_GEN_00 0x0
+#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(20, 16)
+#define DP_PHY_GLB_DPAUX_TX 0x8
+#define RG_CKM_PT0_CKTX_IMPSEL GENMASK(23, 20)
+#define MTK_DP_0034 0x34
+#define DA_XTP_GLB_CKDET_EN_FORCE_VAL BIT(15)
+#define DA_XTP_GLB_CKDET_EN_FORCE_EN BIT(14)
+#define DA_CKM_INTCKTX_EN_FORCE_VAL BIT(13)
+#define DA_CKM_INTCKTX_EN_FORCE_EN BIT(12)
+#define DA_CKM_CKTX0_EN_FORCE_VAL BIT(11)
+#define DA_CKM_CKTX0_EN_FORCE_EN BIT(10)
+#define DA_CKM_XTAL_CK_FORCE_VAL BIT(9)
+#define DA_CKM_XTAL_CK_FORCE_EN BIT(8)
+#define DA_CKM_BIAS_LPF_EN_FORCE_VAL BIT(7)
+#define DA_CKM_BIAS_LPF_EN_FORCE_EN BIT(6)
+#define DA_CKM_BIAS_EN_FORCE_VAL BIT(5)
+#define DA_CKM_BIAS_EN_FORCE_EN BIT(4)
+#define DA_XTP_GLB_AVD10_ON_FORCE_VAL BIT(3)
+#define DA_XTP_GLB_AVD10_ON_FORCE BIT(2)
+#define DA_XTP_GLB_LDO_EN_FORCE_VAL BIT(1)
+#define DA_XTP_GLB_LDO_EN_FORCE_EN BIT(0)
+#define DP_PHY_LANE_TX_0 0x104
+#define RG_XTP_LN0_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN0_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_1 0x204
+#define RG_XTP_LN1_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN1_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_2 0x304
+#define RG_XTP_LN2_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN2_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define DP_PHY_LANE_TX_3 0x404
+#define RG_XTP_LN3_TX_IMPSEL_PMOS GENMASK(15, 12)
+#define RG_XTP_LN3_TX_IMPSEL_NMOS GENMASK(19, 16)
+#define MTK_DP_1040 0x1040
+#define RG_DPAUX_RX_VALID_DEGLITCH_EN BIT(2)
+#define RG_XTP_GLB_CKDET_EN BIT(1)
+#define RG_DPAUX_RX_EN BIT(0)
+
+/* offset: TOP_OFFSET (0x2000) */
+#define MTK_DP_TOP_PWR_STATE 0x2000
+#define DP_PWR_STATE_MASK GENMASK(1, 0)
+#define DP_PWR_STATE_BANDGAP BIT(0)
+#define DP_PWR_STATE_BANDGAP_TPLL BIT(1)
+#define DP_PWR_STATE_BANDGAP_TPLL_LANE GENMASK(1, 0)
+#define MTK_DP_TOP_SWING_EMP 0x2004
+#define DP_TX0_VOLT_SWING_MASK GENMASK(1, 0)
+#define DP_TX0_VOLT_SWING_SHIFT 0
+#define DP_TX0_PRE_EMPH_MASK GENMASK(3, 2)
+#define DP_TX0_PRE_EMPH_SHIFT 2
+#define DP_TX1_VOLT_SWING_MASK GENMASK(9, 8)
+#define DP_TX1_VOLT_SWING_SHIFT 8
+#define DP_TX1_PRE_EMPH_MASK GENMASK(11, 10)
+#define DP_TX2_VOLT_SWING_MASK GENMASK(17, 16)
+#define DP_TX2_PRE_EMPH_MASK GENMASK(19, 18)
+#define DP_TX3_VOLT_SWING_MASK GENMASK(25, 24)
+#define DP_TX3_PRE_EMPH_MASK GENMASK(27, 26)
+#define MTK_DP_TOP_RESET_AND_PROBE 0x2020
+#define SW_RST_B_PHYD BIT(4)
+#define MTK_DP_TOP_IRQ_MASK 0x202c
+#define IRQ_MASK_AUX_TOP_IRQ BIT(2)
+#define MTK_DP_TOP_MEM_PD 0x2038
+#define MEM_ISO_EN BIT(0)
+#define FUSE_SEL BIT(2)
+
+/* offset: ENC0_OFFSET (0x3000) */
+#define MTK_DP_ENC0_P0_3000 0x3000
+#define LANE_NUM_DP_ENC0_P0_MASK GENMASK(1, 0)
+#define VIDEO_MUTE_SW_DP_ENC0_P0 BIT(2)
+#define VIDEO_MUTE_SEL_DP_ENC0_P0 BIT(3)
+#define ENHANCED_FRAME_EN_DP_ENC0_P0 BIT(4)
+#define MTK_DP_ENC0_P0_3004 0x3004
+#define VIDEO_M_CODE_SEL_DP_ENC0_P0_MASK BIT(8)
+#define DP_TX_ENCODER_4P_RESET_SW_DP_ENC0_P0 BIT(9)
+#define MTK_DP_ENC0_P0_3010 0x3010
+#define HTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3014 0x3014
+#define VTOTAL_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3018 0x3018
+#define HSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_301C 0x301c
+#define VSTART_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3020 0x3020
+#define HWIDTH_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3024 0x3024
+#define VHEIGHT_SW_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3028 0x3028
+#define HSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0)
+#define HSP_SW_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_302C 0x302c
+#define VSW_SW_DP_ENC0_P0_MASK GENMASK(14, 0)
+#define VSP_SW_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_3030 0x3030
+#define HTOTAL_SEL_DP_ENC0_P0 BIT(0)
+#define VTOTAL_SEL_DP_ENC0_P0 BIT(1)
+#define HSTART_SEL_DP_ENC0_P0 BIT(2)
+#define VSTART_SEL_DP_ENC0_P0 BIT(3)
+#define HWIDTH_SEL_DP_ENC0_P0 BIT(4)
+#define VHEIGHT_SEL_DP_ENC0_P0 BIT(5)
+#define HSP_SEL_DP_ENC0_P0 BIT(6)
+#define HSW_SEL_DP_ENC0_P0 BIT(7)
+#define VSP_SEL_DP_ENC0_P0 BIT(8)
+#define VSW_SEL_DP_ENC0_P0 BIT(9)
+#define VBID_AUDIO_MUTE_FLAG_SW_DP_ENC0_P0 BIT(11)
+#define VBID_AUDIO_MUTE_FLAG_SEL_DP_ENC0_P0 BIT(12)
+#define MTK_DP_ENC0_P0_3034 0x3034
+#define MTK_DP_ENC0_P0_3038 0x3038
+#define VIDEO_SOURCE_SEL_DP_ENC0_P0_MASK BIT(11)
+#define MTK_DP_ENC0_P0_303C 0x303c
+#define SRAM_START_READ_THRD_DP_ENC0_P0_MASK GENMASK(5, 0)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_MASK GENMASK(10, 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_16BIT (0 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_12BIT (1 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_10BIT (2 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_8BIT (3 << 8)
+#define VIDEO_COLOR_DEPTH_DP_ENC0_P0_6BIT (4 << 8)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK GENMASK(14, 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB (0 << 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422 (1 << 12)
+#define PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR420 (2 << 12)
+#define VIDEO_MN_GEN_EN_DP_ENC0_P0 BIT(15)
+#define MTK_DP_ENC0_P0_3040 0x3040
+#define SDP_DOWN_CNT_DP_ENC0_P0_VAL 0x20
+#define SDP_DOWN_CNT_INIT_DP_ENC0_P0_MASK GENMASK(11, 0)
+#define MTK_DP_ENC0_P0_304C 0x304c
+#define VBID_VIDEO_MUTE_DP_ENC0_P0_MASK BIT(2)
+#define SDP_VSYNC_RISING_MASK_DP_ENC0_P0_MASK BIT(8)
+#define MTK_DP_ENC0_P0_3064 0x3064
+#define HDE_NUM_LAST_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3088 0x3088
+#define AU_EN_DP_ENC0_P0 BIT(6)
+#define AUDIO_8CH_EN_DP_ENC0_P0_MASK BIT(7)
+#define AUDIO_8CH_SEL_DP_ENC0_P0_MASK BIT(8)
+#define AUDIO_2CH_EN_DP_ENC0_P0_MASK BIT(14)
+#define AUDIO_2CH_SEL_DP_ENC0_P0_MASK BIT(15)
+#define MTK_DP_ENC0_P0_308C 0x308c
+#define CH_STATUS_0_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3090 0x3090
+#define CH_STATUS_1_DP_ENC0_P0_MASK GENMASK(15, 0)
+#define MTK_DP_ENC0_P0_3094 0x3094
+#define CH_STATUS_2_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define MTK_DP_ENC0_P0_30A4 0x30a4
+#define AU_TS_CFG_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define MTK_DP_ENC0_P0_30A8 0x30a8
+#define MTK_DP_ENC0_P0_30BC 0x30bc
+#define ISRC_CONT_DP_ENC0_P0 BIT(0)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MASK GENMASK(10, 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_2 (1 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_4 (2 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_MUL_8 (3 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2 (5 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_4 (6 << 8)
+#define AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_8 (7 << 8)
+#define MTK_DP_ENC0_P0_30D8 0x30d8
+#define MTK_DP_ENC0_P0_312C 0x312c
+#define ASP_HB2_DP_ENC0_P0_MASK GENMASK(7, 0)
+#define ASP_HB3_DP_ENC0_P0_MASK GENMASK(15, 8)
+#define MTK_DP_ENC0_P0_3154 0x3154
+#define PGEN_HTOTAL_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3158 0x3158
+#define PGEN_HSYNC_RISING_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_315C 0x315c
+#define PGEN_HSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3160 0x3160
+#define PGEN_HFDE_START_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3164 0x3164
+#define PGEN_HFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(13, 0)
+#define MTK_DP_ENC0_P0_3168 0x3168
+#define PGEN_VTOTAL_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_316C 0x316c
+#define PGEN_VSYNC_RISING_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3170 0x3170
+#define PGEN_VSYNC_PULSE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3174 0x3174
+#define PGEN_VFDE_START_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_3178 0x3178
+#define PGEN_VFDE_ACTIVE_WIDTH_DP_ENC0_P0_MASK GENMASK(12, 0)
+#define MTK_DP_ENC0_P0_31B0 0x31b0
+#define PGEN_PATTERN_SEL_VAL 4
+#define PGEN_PATTERN_SEL_MASK GENMASK(6, 4)
+#define MTK_DP_ENC0_P0_31EC 0x31ec
+#define AUDIO_CH_SRC_SEL_DP_ENC0_P0 BIT(4)
+#define ISRC1_HB3_DP_ENC0_P0_MASK GENMASK(15, 8)
+
+/* offset: ENC1_OFFSET (0x3200) */
+#define MTK_DP_ENC1_P0_3200 0x3200
+#define MTK_DP_ENC1_P0_3280 0x3280
+#define SDP_PACKET_TYPE_DP_ENC1_P0_MASK GENMASK(4, 0)
+#define SDP_PACKET_W_DP_ENC1_P0 BIT(5)
+#define SDP_PACKET_W_DP_ENC1_P0_MASK BIT(5)
+#define MTK_DP_ENC1_P0_3300 0x3300
+#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL 2
+#define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK GENMASK(9, 8)
+#define MTK_DP_ENC1_P0_3304 0x3304
+#define AU_PRTY_REGEN_DP_ENC1_P0_MASK BIT(8)
+#define AU_CH_STS_REGEN_DP_ENC1_P0_MASK BIT(9)
+#define AUDIO_SAMPLE_PRSENT_REGEN_DP_ENC1_P0_MASK BIT(12)
+#define MTK_DP_ENC1_P0_3324 0x3324
+#define AUDIO_SOURCE_MUX_DP_ENC1_P0_MASK GENMASK(9, 8)
+#define AUDIO_SOURCE_MUX_DP_ENC1_P0_DPRX 0
+#define MTK_DP_ENC1_P0_3364 0x3364
+#define SDP_DOWN_CNT_IN_HBLANK_DP_ENC1_P0_VAL 0x20
+#define SDP_DOWN_CNT_INIT_IN_HBLANK_DP_ENC1_P0_MASK GENMASK(11, 0)
+#define FIFO_READ_START_POINT_DP_ENC1_P0_VAL 4
+#define FIFO_READ_START_POINT_DP_ENC1_P0_MASK GENMASK(15, 12)
+#define MTK_DP_ENC1_P0_3368 0x3368
+#define VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 BIT(0)
+#define VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 BIT(4)
+#define SDP_DP13_EN_DP_ENC1_P0 BIT(8)
+#define BS2BS_MODE_DP_ENC1_P0 BIT(12)
+#define BS2BS_MODE_DP_ENC1_P0_MASK GENMASK(13, 12)
+#define BS2BS_MODE_DP_ENC1_P0_VAL 1
+#define DP_ENC1_P0_3368_VAL (VIDEO_SRAM_FIFO_CNT_RESET_SEL_DP_ENC1_P0 | \
+ VIDEO_STABLE_CNT_THRD_DP_ENC1_P0 | \
+ SDP_DP13_EN_DP_ENC1_P0 | \
+ BS2BS_MODE_DP_ENC1_P0)
+#define MTK_DP_ENC1_P0_33F4 0x33f4
+#define DP_ENC_DUMMY_RW_1_AUDIO_RST_EN BIT(0)
+#define DP_ENC_DUMMY_RW_1 BIT(9)
+
+/* offset: TRANS_OFFSET (0x3400) */
+#define MTK_DP_TRANS_P0_3400 0x3400
+#define PATTERN1_EN_DP_TRANS_P0_MASK BIT(12)
+#define PATTERN2_EN_DP_TRANS_P0_MASK BIT(13)
+#define PATTERN3_EN_DP_TRANS_P0_MASK BIT(14)
+#define PATTERN4_EN_DP_TRANS_P0_MASK BIT(15)
+#define MTK_DP_TRANS_P0_3404 0x3404
+#define DP_SCR_EN_DP_TRANS_P0_MASK BIT(0)
+#define MTK_DP_TRANS_P0_340C 0x340c
+#define DP_TX_TRANSMITTER_4P_RESET_SW_DP_TRANS_P0 BIT(13)
+#define MTK_DP_TRANS_P0_3410 0x3410
+#define HPD_DEB_THD_DP_TRANS_P0_MASK GENMASK(3, 0)
+#define HPD_INT_THD_DP_TRANS_P0_MASK GENMASK(7, 4)
+#define HPD_INT_THD_DP_TRANS_P0_LOWER_500US (2 << 4)
+#define HPD_INT_THD_DP_TRANS_P0_UPPER_1100US (2 << 6)
+#define HPD_DISC_THD_DP_TRANS_P0_MASK GENMASK(11, 8)
+#define HPD_CONN_THD_DP_TRANS_P0_MASK GENMASK(15, 12)
+#define MTK_DP_TRANS_P0_3414 0x3414
+#define HPD_DB_DP_TRANS_P0_MASK BIT(2)
+#define MTK_DP_TRANS_P0_3418 0x3418
+#define IRQ_CLR_DP_TRANS_P0_MASK GENMASK(3, 0)
+#define IRQ_MASK_DP_TRANS_P0_MASK GENMASK(7, 4)
+#define IRQ_MASK_DP_TRANS_P0_DISC_IRQ (BIT(1) << 4)
+#define IRQ_MASK_DP_TRANS_P0_CONN_IRQ (BIT(2) << 4)
+#define IRQ_MASK_DP_TRANS_P0_INT_IRQ (BIT(3) << 4)
+#define IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 12)
+#define MTK_DP_TRANS_P0_342C 0x342c
+#define XTAL_FREQ_DP_TRANS_P0_DEFAULT (BIT(0) | BIT(3) | BIT(5) | BIT(6))
+#define XTAL_FREQ_DP_TRANS_P0_MASK GENMASK(7, 0)
+#define MTK_DP_TRANS_P0_3430 0x3430
+#define HPD_INT_THD_ECO_DP_TRANS_P0_MASK GENMASK(1, 0)
+#define HPD_INT_THD_ECO_DP_TRANS_P0_HIGH_BOUND_EXT BIT(1)
+#define MTK_DP_TRANS_P0_34A4 0x34a4
+#define LANE_NUM_DP_TRANS_P0_MASK GENMASK(3, 2)
+#define MTK_DP_TRANS_P0_3540 0x3540
+#define FEC_EN_DP_TRANS_P0_MASK BIT(0)
+#define FEC_CLOCK_EN_MODE_DP_TRANS_P0 BIT(3)
+#define MTK_DP_TRANS_P0_3580 0x3580
+#define POST_MISC_DATA_LANE0_OV_DP_TRANS_P0_MASK BIT(8)
+#define POST_MISC_DATA_LANE1_OV_DP_TRANS_P0_MASK BIT(9)
+#define POST_MISC_DATA_LANE2_OV_DP_TRANS_P0_MASK BIT(10)
+#define POST_MISC_DATA_LANE3_OV_DP_TRANS_P0_MASK BIT(11)
+#define MTK_DP_TRANS_P0_35C8 0x35c8
+#define SW_IRQ_CLR_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define SW_IRQ_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define MTK_DP_TRANS_P0_35D0 0x35d0
+#define SW_IRQ_FINAL_STATUS_DP_TRANS_P0_MASK GENMASK(15, 0)
+#define MTK_DP_TRANS_P0_35F0 0x35f0
+#define DP_TRANS_DUMMY_RW_0 BIT(3)
+#define DP_TRANS_DUMMY_RW_0_MASK GENMASK(3, 2)
+
+/* offset: AUX_OFFSET (0x3600) */
+#define MTK_DP_AUX_P0_360C 0x360c
+#define AUX_TIMEOUT_THR_AUX_TX_P0_MASK GENMASK(12, 0)
+#define AUX_TIMEOUT_THR_AUX_TX_P0_VAL 0x1595
+#define MTK_DP_AUX_P0_3614 0x3614
+#define AUX_RX_UI_CNT_THR_AUX_TX_P0_MASK GENMASK(6, 0)
+#define AUX_RX_UI_CNT_THR_AUX_FOR_26M 13
+#define MTK_DP_AUX_P0_3618 0x3618
+#define AUX_RX_FIFO_FULL_AUX_TX_P0_MASK BIT(9)
+#define AUX_RX_FIFO_WRITE_POINTER_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3620 0x3620
+#define AUX_RD_MODE_AUX_TX_P0_MASK BIT(9)
+#define AUX_RX_FIFO_READ_PULSE_TX_P0 BIT(8)
+#define AUX_RX_FIFO_READ_DATA_AUX_TX_P0_MASK GENMASK(7, 0)
+#define MTK_DP_AUX_P0_3624 0x3624
+#define AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3628 0x3628
+#define AUX_RX_PHY_STATE_AUX_TX_P0_MASK GENMASK(9, 0)
+#define AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE BIT(0)
+#define MTK_DP_AUX_P0_362C 0x362c
+#define AUX_NO_LENGTH_AUX_TX_P0 BIT(0)
+#define AUX_TX_AUXTX_OV_EN_AUX_TX_P0_MASK BIT(1)
+#define AUX_RESERVED_RW_0_AUX_TX_P0_MASK GENMASK(15, 2)
+#define MTK_DP_AUX_P0_3630 0x3630
+#define AUX_TX_REQUEST_READY_AUX_TX_P0 BIT(3)
+#define MTK_DP_AUX_P0_3634 0x3634
+#define AUX_TX_OVER_SAMPLE_RATE_AUX_TX_P0_MASK GENMASK(15, 8)
+#define AUX_TX_OVER_SAMPLE_RATE_FOR_26M 25
+#define MTK_DP_AUX_P0_3640 0x3640
+#define AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(6)
+#define AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(5)
+#define AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 BIT(4)
+#define AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 BIT(3)
+#define AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 BIT(2)
+#define AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 BIT(1)
+#define AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 BIT(0)
+#define DP_AUX_P0_3640_VAL (AUX_400US_TIMEOUT_IRQ_AUX_TX_P0 | \
+ AUX_RX_DATA_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_ADDR_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_CMD_RECV_IRQ_AUX_TX_P0 | \
+ AUX_RX_MCCS_RECV_COMPLETE_IRQ_AUX_TX_P0 | \
+ AUX_RX_EDID_RECV_COMPLETE_IRQ_AUX_TX_P0 | \
+ AUX_RX_AUX_RECV_COMPLETE_IRQ_AUX_TX_P0)
+#define MTK_DP_AUX_P0_3644 0x3644
+#define MCU_REQUEST_COMMAND_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3648 0x3648
+#define MCU_REQUEST_ADDRESS_LSB_AUX_TX_P0_MASK GENMASK(15, 0)
+#define MTK_DP_AUX_P0_364C 0x364c
+#define MCU_REQUEST_ADDRESS_MSB_AUX_TX_P0_MASK GENMASK(3, 0)
+#define MTK_DP_AUX_P0_3650 0x3650
+#define MCU_REQ_DATA_NUM_AUX_TX_P0_MASK GENMASK(15, 12)
+#define PHY_FIFO_RST_AUX_TX_P0_MASK BIT(9)
+#define MCU_ACK_TRAN_COMPLETE_AUX_TX_P0 BIT(8)
+#define MTK_DP_AUX_P0_3658 0x3658
+#define AUX_TX_OV_EN_AUX_TX_P0_MASK BIT(0)
+#define MTK_DP_AUX_P0_3690 0x3690
+#define RX_REPLY_COMPLETE_MODE_AUX_TX_P0 BIT(8)
+#define MTK_DP_AUX_P0_3704 0x3704
+#define AUX_TX_FIFO_WDATA_NEW_MODE_T_AUX_TX_P0_MASK BIT(1)
+#define AUX_TX_FIFO_NEW_MODE_EN_AUX_TX_P0 BIT(2)
+#define MTK_DP_AUX_P0_3708 0x3708
+#define MTK_DP_AUX_P0_37C8 0x37c8
+#define MTK_ATOP_EN_AUX_TX_P0 BIT(0)
+
+#endif /*_MTK_DP_REG_H_*/
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 630a4e301ef6..508a6d994e83 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -11,7 +11,6 @@
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 42cc7052b050..112615817dcb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -15,7 +15,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0e4c77724b05..91f58db5915f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -20,7 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -833,11 +833,8 @@ static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
- int ret;
-
- ret = drm_mode_config_helper_suspend(drm);
- return ret;
+ return drm_mode_config_helper_suspend(drm);
}
static void mtk_drm_sys_complete(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 139d7724c6d0..47e96b0289f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -8,7 +8,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
@@ -22,7 +22,7 @@ static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
.mmap = mtk_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 5c0d9ce69931..2f5e007dd380 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -108,8 +107,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
crtc_state = new_plane_state->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
@@ -202,8 +201,8 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 3196189429bc..4c80b6896dc3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -16,7 +16,6 @@
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 6c70fc3214af..823909da87db 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,7 +4,7 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bd4ca11d3ff5..3b24a924b7b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -19,7 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
@@ -87,16 +87,16 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
args->size = PAGE_ALIGN(args->pitch * args->height);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver meson_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- /* CMA Ops */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
+ /* DMA Ops */
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
/* Misc */
.fops = &fops,
@@ -388,10 +388,14 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
- component_unbind_all(dev, drm);
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
+ meson_encoder_hdmi_remove(priv);
+ meson_encoder_cvbs_remove(priv);
+
+ component_unbind_all(dev, drm);
+
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
}
@@ -493,6 +497,13 @@ static int meson_drv_probe(struct platform_device *pdev)
return 0;
};
+static int meson_drv_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &meson_drv_master_ops);
+
+ return 0;
+}
+
static struct meson_drm_match_data meson_drm_gxbb_data = {
.compat = VPU_COMPATIBLE_GXBB,
};
@@ -530,6 +541,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
+ .remove = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 177dac3ca3be..c62ee358456f 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -25,6 +25,12 @@ enum vpu_compatible {
VPU_COMPATIBLE_G12A = 3,
};
+enum {
+ MESON_ENC_CVBS = 0,
+ MESON_ENC_HDMI,
+ MESON_ENC_LAST,
+};
+
struct meson_drm_match_data {
enum vpu_compatible compat;
struct meson_afbcd_ops *afbcd_ops;
@@ -51,6 +57,7 @@ struct meson_drm {
struct drm_crtc *crtc;
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ void *encoders[MESON_ENC_LAST];
const struct meson_drm_soc_limits *limits;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 8110a6e39320..5675bc2a92cf 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -281,5 +281,18 @@ int meson_encoder_cvbs_init(struct meson_drm *priv)
}
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
+ priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
+
return 0;
}
+
+void meson_encoder_cvbs_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_cvbs *meson_encoder_cvbs;
+
+ if (priv->encoders[MESON_ENC_CVBS]) {
+ meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
+ drm_bridge_remove(&meson_encoder_cvbs->bridge);
+ drm_bridge_remove(meson_encoder_cvbs->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
index 61d9d183ce7f..09710fec3c66 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h
@@ -25,5 +25,6 @@ struct meson_cvbs_mode {
extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT];
int meson_encoder_cvbs_init(struct meson_drm *priv);
+void meson_encoder_cvbs_remove(struct meson_drm *priv);
#endif /* __MESON_VENC_CVBS_H */
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 2f616c55c271..53231bfdf7e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -452,6 +452,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv)
meson_encoder_hdmi->cec_notifier = notifier;
}
+ priv->encoders[MESON_ENC_HDMI] = meson_encoder_hdmi;
+
dev_dbg(priv->dev, "HDMI encoder initialized\n");
return 0;
@@ -460,3 +462,14 @@ err_put_node:
of_node_put(remote);
return ret;
}
+
+void meson_encoder_hdmi_remove(struct meson_drm *priv)
+{
+ struct meson_encoder_hdmi *meson_encoder_hdmi;
+
+ if (priv->encoders[MESON_ENC_HDMI]) {
+ meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
+ drm_bridge_remove(&meson_encoder_hdmi->bridge);
+ drm_bridge_remove(meson_encoder_hdmi->next_bridge);
+ }
+}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
index ed19494f0956..a6cd38eb5f71 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h
@@ -8,5 +8,6 @@
#define __MESON_ENCODER_HDMI_H
int meson_encoder_hdmi_init(struct meson_drm *priv);
+void meson_encoder_hdmi_remove(struct meson_drm *priv);
#endif /* __MESON_ENCODER_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index b4a0518c1028..7f98de38842b 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -11,12 +11,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_overlay.h"
#include "meson_registers.h"
@@ -477,7 +476,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
plane);
struct drm_framebuffer *fb = new_state->fb;
struct meson_drm *priv = meson_overlay->priv;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
bool interlace_mode;
@@ -651,8 +650,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
switch (priv->viu.vd1_planes) {
case 3:
- gem = drm_fb_cma_get_gem_obj(fb, 2);
- priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+ gem = drm_fb_dma_get_gem_obj(fb, 2);
+ priv->viu.vd1_addr2 = gem->dma_addr + fb->offsets[2];
priv->viu.vd1_stride2 = fb->pitches[2];
priv->viu.vd1_height2 =
drm_format_info_plane_height(fb->format,
@@ -663,8 +662,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_height2);
fallthrough;
case 2:
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ priv->viu.vd1_addr1 = gem->dma_addr + fb->offsets[1];
priv->viu.vd1_stride1 = fb->pitches[1];
priv->viu.vd1_height1 =
drm_format_info_plane_height(fb->format,
@@ -675,8 +674,8 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
priv->viu.vd1_height1);
fallthrough;
case 1:
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ priv->viu.vd1_addr0 = gem->dma_addr + fb->offsets[0];
priv->viu.vd1_stride0 = fb->pitches[0];
priv->viu.vd1_height0 =
drm_format_info_plane_height(fb->format,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index b9ac932af8d0..815dfe30492b 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -15,12 +15,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "meson_plane.h"
#include "meson_registers.h"
@@ -95,7 +94,7 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
FRAC_16_16(1, 5),
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -140,7 +139,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
struct drm_rect dest = drm_plane_state_dest(new_state);
struct meson_drm *priv = meson_plane->priv;
struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned long flags;
int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
@@ -170,7 +169,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
@@ -366,9 +365,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
}
/* Update Canvas with buffer address */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- priv->viu.osd1_addr = gem->paddr;
+ priv->viu.osd1_addr = gem->dma_addr;
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
priv->viu.osd1_width = fb->width;
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index bb7e109534de..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 89558549c3af..182e224c460d 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
mgag200-y := \
+ mgag200_bmc.o \
mgag200_drv.o \
mgag200_g200.o \
mgag200_g200eh.o \
@@ -10,7 +11,6 @@ mgag200-y := \
mgag200_g200se.o \
mgag200_g200wb.o \
mgag200_i2c.o \
- mgag200_mode.o \
- mgag200_pll.o
+ mgag200_mode.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_bmc.c b/drivers/gpu/drm/mgag200/mgag200_bmc.c
new file mode 100644
index 000000000000..2ba2e3c5086a
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_bmc.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/delay.h>
+
+#include "mgag200_drv.h"
+
+void mgag200_bmc_disable_vidrst(struct mga_device *mdev)
+{
+ u8 tmp;
+ int iter_max;
+
+ /*
+ * 1 - The first step is to inform the BMC of an upcoming mode
+ * change. We are putting the misc<0> to output.
+ */
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /*
+ * 2- Second step to mask any further scan request. This is
+ * done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /*
+ * 3a- The third step is to verify if there is an active scan.
+ * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>).
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /*
+ * 3b- This step occurs only if the remove is actually
+ * scanning. We are waiting for the end of the frame which is
+ * a 1 on remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+void mgag200_bmc_enable_vidrst(struct mga_device *mdev)
+{
+ u8 tmp;
+
+ /* Ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* Assert rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(10);
+
+ /* Deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* Remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* Put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 251a1bb648cc..ece6cd102dbb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -155,15 +155,16 @@ int mgag200_device_preinit(struct mga_device *mdev)
return 0;
}
-int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
- const struct mgag200_device_info *info)
+int mgag200_device_init(struct mga_device *mdev,
+ const struct mgag200_device_info *info,
+ const struct mgag200_device_funcs *funcs)
{
struct drm_device *dev = &mdev->base;
u8 crtcext3, misc;
int ret;
mdev->info = info;
- mdev->type = type;
+ mdev->funcs = funcs;
ret = drmm_mutex_init(dev, &mdev->rmmio_lock);
if (ret)
@@ -226,29 +227,29 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (type) {
case G200_PCI:
case G200_AGP:
- mdev = mgag200_g200_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200_device_create(pdev, &mgag200_driver);
break;
case G200_SE_A:
case G200_SE_B:
mdev = mgag200_g200se_device_create(pdev, &mgag200_driver, type);
break;
case G200_WB:
- mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver);
break;
case G200_EV:
- mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver);
break;
case G200_EH:
- mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver);
break;
case G200_EH3:
- mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver);
break;
case G200_ER:
- mdev = mgag200_g200er_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200er_device_create(pdev, &mgag200_driver);
break;
case G200_EW3:
- mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver, type);
+ mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver);
break;
default:
dev_err(&pdev->dev, "Device type %d is unsupported\n", type);
@@ -262,7 +263,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 301c4ab46539..f0c2349404b4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -15,11 +15,13 @@
#include <video/vga.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_plane.h>
#include "mgag200_reg.h"
@@ -123,11 +125,39 @@
#define MGA_MISC_OUT 0x1fc2
#define MGA_MISC_IN 0x1fcc
+/*
+ * TODO: This is a pretty large set of default values for all kinds of
+ * settings. It should be split and set in the various DRM helpers,
+ * such as the CRTC reset or atomic_enable helpers. The PLL values
+ * probably belong to each model's PLL code.
+ */
+#define MGAG200_DAC_DEFAULT(xvrefctrl, xpixclkctrl, xmiscctrl, xsyspllm, xsysplln, xsyspllp) \
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0, \
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x18: */ (xvrefctrl), \
+ /* 0x19: */ 0, \
+ /* 0x1a: */ (xpixclkctrl), \
+ /* 0x1b: */ 0xff, 0xbf, 0x20, \
+ /* 0x1e: */ (xmiscctrl), \
+ /* 0x1f: */ 0x20, \
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, \
+ /* 0x2c: */ (xsyspllm), \
+ /* 0x2d: */ (xsysplln), \
+ /* 0x2e: */ (xsyspllp), \
+ /* 0x2f: */ 0x40, \
+ /* 0x30: */ 0x00, 0xb0, 0x00, 0xc2, 0x34, 0x14, 0x02, 0x83, \
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3a, \
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0, \
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0 \
+
+#define MGAG200_LUT_SIZE 256
+
#define MGAG200_MAX_FB_HEIGHT 4096
#define MGAG200_MAX_FB_WIDTH 4096
struct mga_device;
-struct mgag200_pll;
/*
* Stores parameters for programming the PLLs
@@ -146,20 +176,12 @@ struct mgag200_pll_values {
unsigned int s;
};
-struct mgag200_pll_funcs {
- int (*compute)(struct mgag200_pll *pll, long clock, struct mgag200_pll_values *pllc);
- void (*update)(struct mgag200_pll *pll, const struct mgag200_pll_values *pllc);
-};
-
-struct mgag200_pll {
- struct mga_device *mdev;
-
- const struct mgag200_pll_funcs *funcs;
-};
-
struct mgag200_crtc_state {
struct drm_crtc_state base;
+ /* Primary-plane format; required for modesetting and color mgmt. */
+ const struct drm_format_info *format;
+
struct mgag200_pll_values pixpllc;
};
@@ -188,8 +210,6 @@ enum mga_type {
G200_EW3,
};
-#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
-
struct mgag200_device_info {
u16 max_hdisplay;
u16 max_vdisplay;
@@ -230,10 +250,39 @@ struct mgag200_device_info {
.bug_no_startadd = (_bug_no_startadd), \
}
+struct mgag200_device_funcs {
+ /*
+ * Disables an external reset source (i.e., BMC) before programming
+ * a new display mode.
+ */
+ void (*disable_vidrst)(struct mga_device *mdev);
+
+ /*
+ * Enables an external reset source (i.e., BMC) after programming
+ * a new display mode.
+ */
+ void (*enable_vidrst)(struct mga_device *mdev);
+
+ /*
+ * Validate that the given state can be programmed into PIXPLLC. On
+ * success, the calculated parameters should be stored in the CRTC's
+ * state in struct @mgag200_crtc_state.pixpllc.
+ */
+ int (*pixpllc_atomic_check)(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+
+ /*
+ * Program PIXPLLC from the CRTC state. The parameters should have been
+ * stored in struct @mgag200_crtc_state.pixpllc by the corresponding
+ * implementation of @pixpllc_atomic_check.
+ */
+ void (*pixpllc_atomic_update)(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+};
+
struct mga_device {
struct drm_device base;
const struct mgag200_device_info *info;
+ const struct mgag200_device_funcs *funcs;
struct resource *rmmio_res;
void __iomem *rmmio;
@@ -243,12 +292,11 @@ struct mga_device {
void __iomem *vram;
resource_size_t vram_available;
- enum mga_type type;
-
- struct mgag200_pll pixpll;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct mga_i2c_chan i2c;
struct drm_connector connector;
- struct drm_simple_display_pipe display_pipe;
};
static inline struct mga_device *to_mga_device(struct drm_device *dev)
@@ -287,35 +335,113 @@ int mgag200_init_pci_options(struct pci_dev *pdev, u32 option, u32 option2);
resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size);
resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
int mgag200_device_preinit(struct mga_device *mdev);
-int mgag200_device_init(struct mga_device *mdev, enum mga_type type,
- const struct mgag200_device_info *info);
+int mgag200_device_init(struct mga_device *mdev,
+ const struct mgag200_device_info *info,
+ const struct mgag200_device_funcs *funcs);
/* mgag200_<device type>.c */
-struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type);
-struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
-struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type);
+void mgag200_g200wb_init_registers(struct mga_device *mdev);
+void mgag200_g200wb_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv);
+void mgag200_g200eh_init_registers(struct mga_device *mdev);
+void mgag200_g200eh_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
+struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv);
- /* mgag200_mode.c */
-resource_size_t mgag200_device_probe_vram(struct mga_device *mdev);
-int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_fb_available);
+/*
+ * mgag200_mode.c
+ */
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_display_mode;
+struct drm_plane;
+struct drm_atomic_state;
+
+extern const uint32_t mgag200_primary_plane_formats[];
+extern const size_t mgag200_primary_plane_formats_size;
+extern const uint64_t mgag200_primary_plane_fmtmods[];
+
+int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state);
+void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state);
+void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *old_state);
+#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .atomic_check = mgag200_primary_plane_helper_atomic_check, \
+ .atomic_update = mgag200_primary_plane_helper_atomic_update, \
+ .atomic_disable = mgag200_primary_plane_helper_atomic_disable
+
+#define MGAG200_PRIMARY_PLANE_FUNCS \
+ .update_plane = drm_atomic_helper_update_plane, \
+ .disable_plane = drm_atomic_helper_disable_plane, \
+ .destroy = drm_plane_cleanup, \
+ DRM_GEM_SHADOW_PLANE_FUNCS
+
+enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode);
+int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state);
+void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
+
+#define MGAG200_CRTC_HELPER_FUNCS \
+ .mode_valid = mgag200_crtc_helper_mode_valid, \
+ .atomic_check = mgag200_crtc_helper_atomic_check, \
+ .atomic_flush = mgag200_crtc_helper_atomic_flush, \
+ .atomic_enable = mgag200_crtc_helper_atomic_enable, \
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+
+void mgag200_crtc_reset(struct drm_crtc *crtc);
+struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
+void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
+
+#define MGAG200_CRTC_FUNCS \
+ .reset = mgag200_crtc_reset, \
+ .destroy = drm_crtc_cleanup, \
+ .set_config = drm_atomic_helper_set_config, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
+ .atomic_destroy_state = mgag200_crtc_atomic_destroy_state
+
+#define MGAG200_DAC_ENCODER_FUNCS \
+ .destroy = drm_encoder_cleanup
+
+int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector);
+
+#define MGAG200_VGA_CONNECTOR_HELPER_FUNCS \
+ .get_modes = mgag200_vga_connector_helper_get_modes
+
+#define MGAG200_VGA_CONNECTOR_FUNCS \
+ .reset = drm_atomic_helper_connector_reset, \
+ .fill_modes = drm_helper_probe_single_connector_modes, \
+ .destroy = drm_connector_cleanup, \
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, \
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state
+
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode);
+void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_enable_display(struct mga_device *mdev);
+void mgag200_init_registers(struct mga_device *mdev);
+int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available);
+
+ /* mgag200_bmc.c */
+void mgag200_bmc_disable_vidrst(struct mga_device *mdev);
+void mgag200_bmc_enable_vidrst(struct mga_device *mdev);
/* mgag200_i2c.c */
int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c);
- /* mgag200_pll.c */
-int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev);
-
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index 674385921b7f..bf5d7fe525a3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -3,7 +3,11 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -30,6 +34,235 @@ static int mgag200_g200_init_pci_options(struct pci_dev *pdev)
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
+static void mgag200_g200_init_registers(struct mgag200_g200_device *g200)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f,
+ 0x04, 0x2d, 0x19)
+ };
+
+ struct mga_device *mdev = &g200->base;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); ++i) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
+{
+ static const int post_div_max = 7;
+ static const int in_div_min = 1;
+ static const int in_div_max = 6;
+ static const int feed_div_min = 7;
+ static const int feed_div_max = 127;
+
+ struct drm_device *dev = crtc->dev;
+ struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev);
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ u8 testp, testm, testn;
+ u8 n = 0, m = 0, p, s;
+ long f_vco;
+ long computed;
+ long delta, tmp_delta;
+ long ref_clk = g200->ref_clk;
+ long p_clk_min = g200->pclk_min;
+ long p_clk_max = g200->pclk_max;
+
+ if (clock > p_clk_max) {
+ drm_err(dev, "Pixel Clock %ld too high\n", clock);
+ return -EINVAL;
+ }
+
+ if (clock < p_clk_min >> 3)
+ clock = p_clk_min >> 3;
+
+ f_vco = clock;
+ for (testp = 0;
+ testp <= post_div_max && f_vco < p_clk_min;
+ testp = (testp << 1) + 1, f_vco <<= 1)
+ ;
+ p = testp + 1;
+
+ delta = clock;
+
+ for (testm = in_div_min; testm <= in_div_max; testm++) {
+ for (testn = feed_div_min; testn <= feed_div_max; testn++) {
+ computed = ref_clk * (testn + 1) / (testm + 1);
+ if (computed < f_vco)
+ tmp_delta = f_vco - computed;
+ else
+ tmp_delta = computed - f_vco;
+ if (tmp_delta < delta) {
+ delta = tmp_delta;
+ m = testm + 1;
+ n = testn + 1;
+ }
+ }
+ }
+ f_vco = ref_clk * n / m;
+ if (f_vco < 100000)
+ s = 0;
+ else if (f_vco < 140000)
+ s = 1;
+ else if (f_vco < 180000)
+ s = 2;
+ else
+ s = 3;
+
+ drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
+ clock, f_vco, m, n, p, s);
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM Device
*/
@@ -160,8 +393,12 @@ out:
pci_unmap_rom(pdev, rom);
}
-struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mgag200_g200_device *g200;
struct mga_device *mdev;
@@ -187,15 +424,24 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
mgag200_g200_init_refclk(g200);
- ret = mgag200_device_init(mdev, type, &mgag200_g200_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200_device_info,
+ &mgag200_g200_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200_init_registers(g200);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index 1b9a22728744..fad62453a91d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -1,11 +1,267 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+void mgag200_g200eh_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9,
+ MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 800000;
+ static const unsigned int vcomin = 400000;
+ static const unsigned int pllreffreq = 33333;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 16; testp > 0; testp >>= 1) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 17; testn < 257; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+void mgag200_g200eh_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200eh_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200eh_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200eh_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200eh_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200eh_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200eh_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200eh_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200eh_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +269,12 @@
static const struct mgag200_device_info mgag200_g200eh_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 37500, false, 1, 0, false);
-struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200eh_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +296,24 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200eh_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200eh_device_info,
+ &mgag200_g200eh_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200eh_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200eh_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index 438cda1b14c9..0f7d8112cd49 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -2,20 +2,184 @@
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200eh3_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 3000000;
+ static const unsigned int vcomin = 1500000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+ testp = 0;
+
+ for (testm = 150; testm >= 6; testm--) {
+ if (clock * testm > vcomax)
+ continue;
+ if (clock * testm < vcomin)
+ continue;
+ for (testn = 120; testn >= 60; testn--) {
+ computed = (pllreffreq * testn) / testm;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn + 1;
+ m = testm + 1;
+ p = testp + 1;
+ }
+ if (delta == 0)
+ break;
+ }
+ if (delta == 0)
+ break;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200eh3_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200eh3_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200eh3_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200eh3_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200eh3_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200eh3_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200eh3_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200eh3_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200eh3_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200eh3_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200eh3_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200eh3_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200eh3_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200eh3_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
* DRM device
*/
static const struct mgag200_device_info mgag200_g200eh3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false);
+static const struct mgag200_device_funcs mgag200_g200eh3_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200eh3_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, // same as G200EH
+};
+
struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum mga_type type)
+ const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -37,15 +201,24 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200eh3_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200eh3_device_info,
+ &mgag200_g200eh3_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200eh_init_registers(mdev); // same as G200EH
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200eh3_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 0790d4e6463d..bce267e0f7de 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -1,11 +1,305 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200er_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f, 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ WREG_DAC(0x90, 0); /* G200ER specific */
+
+ mgag200_init_registers(mdev);
+
+ WREG_ECRT(0x24, 0x5); /* G200ER specific */
+}
+
+static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev)
+{
+ static const uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */
+ u32 memctl;
+
+ memctl = RREG32(MGAREG_MEMCTL);
+
+ memctl |= RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
+
+ udelay(1000);
+
+ memctl &= ~RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200er_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 1488000;
+ static const unsigned int vcomin = 1056000;
+ static const unsigned int pllreffreq = 48000;
+ static const unsigned int m_div_val[] = { 1, 2, 4, 8 };
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ int testr, testn, testm, testo;
+ unsigned int p, m, n, s;
+ unsigned int computed, vco;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ vco = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (vco < vcomin)
+ continue;
+ if (vco > vcomax)
+ continue;
+ computed = vco / (m_div_val[testm] * (testo + 1));
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = (testm | (testo << 3)) + 1;
+ n = testn + 1;
+ p = testr + 1;
+ s = testr;
+ }
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200er_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200er_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200er_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200er_reset_tagfifo(mdev);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200er_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200er_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200er_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200er_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200er_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200er_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200er_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200er_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200er_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200er_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200er_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +307,12 @@
static const struct mgag200_device_info mgag200_g200er_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 1, 0, false);
-struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200er_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200er_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200er_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -32,15 +330,24 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200er_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200er_device_info,
+ &mgag200_g200er_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200er_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200er_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 5353422d0eef..ac957f42abe1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -1,11 +1,306 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200ev_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x00,
+ MGA1064_PIX_CLK_CTL_SEL_PLL,
+ MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev)
+{
+ WREG_ECRT(0x06, 0x00);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200ev_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 550000;
+ static const unsigned int vcomin = 150000;
+ static const unsigned int pllreffreq = 50000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200ev_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG8(DAC_DATA, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200ev_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200ev_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200ev_set_hiprilvl(mdev);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200ev_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200ev_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200ev_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200ev_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200ev_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200ev_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200ev_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200ev_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200ev_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200ev_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200ev_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +308,12 @@
static const struct mgag200_device_info mgag200_g200ev_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 32700, false, 0, 1, false);
-struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200ev_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200ev_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200ev_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +335,24 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200ev_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200ev_device_info,
+ &mgag200_g200ev_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200ev_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200ev_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 3bfc1324cf78..170934414d7d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -2,10 +2,179 @@
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+static void mgag200_g200ew3_init_registers(struct mga_device *mdev)
+{
+ mgag200_g200wb_init_registers(mdev); // same as G200WB
+
+ WREG_ECRT(0x34, 0x5); // G200EW3 specific
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200ew3_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 800000;
+ static const unsigned int vcomin = 400000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn, testp2;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 1; testp < 8; testp++) {
+ for (testp2 = 1; testp2 < 8; testp2++) {
+ if (testp < testp2)
+ continue;
+ if ((clock * testp * testp2) > vcomax)
+ continue;
+ if ((clock * testp * testp2) < vcomin)
+ continue;
+ for (testm = 1; testm < 26; testm++) {
+ for (testn = 32; testn < 2048 ; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp * testp2);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm + 1;
+ n = testn + 1;
+ p = testp + 1;
+ s = testp2;
+ }
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200ew3_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200ew3_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200ew3_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200ew3_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200ew3_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200ew3_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200ew3_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200ew3_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200ew3_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200ew3_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200ew3_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200ew3_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200ew3_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200ew3_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,6 +182,13 @@
static const struct mgag200_device_info mgag200_g200ew3_device_info =
MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false);
+static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = {
+ .disable_vidrst = mgag200_bmc_disable_vidrst,
+ .enable_vidrst = mgag200_bmc_enable_vidrst,
+ .pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB
+};
+
static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev)
{
resource_size_t vram_size = resource_size(mdev->vram_res);
@@ -23,8 +199,7 @@ static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev
}
struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum mga_type type)
+ const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -46,15 +221,24 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200ew3_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200ew3_device_info,
+ &mgag200_g200ew3_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200ew3_init_registers(mdev);
+
vram_available = mgag200_g200ew3_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200ew3_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 0a3e66695e22..be389ed91cbd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -1,8 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
@@ -28,6 +33,404 @@ static int mgag200_g200se_init_pci_options(struct pci_dev *pdev)
return mgag200_init_pci_options(pdev, option, 0x00008000);
}
+static void mgag200_g200se_init_registers(struct mgag200_g200se_device *g200se)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x03,
+ MGA1064_PIX_CLK_CTL_SEL_PLL,
+ MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS,
+ 0x00, 0x00, 0x00)
+ };
+
+ struct mga_device *mdev = &g200se->base;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
+ const struct drm_display_mode *mode,
+ const struct drm_format_info *format)
+{
+ struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base);
+ unsigned int hiprilvl;
+ u8 crtcext6;
+
+ if (g200se->unique_rev_id >= 0x04) {
+ hiprilvl = 0;
+ } else if (g200se->unique_rev_id >= 0x02) {
+ unsigned int bpp;
+ unsigned long mb;
+
+ if (format->cpp[0] * 8 > 16)
+ bpp = 32;
+ else if (format->cpp[0] * 8 > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hiprilvl = 0;
+ else if (mb > 2600)
+ hiprilvl = 1;
+ else if (mb > 1900)
+ hiprilvl = 2;
+ else if (mb > 1160)
+ hiprilvl = 3;
+ else if (mb > 440)
+ hiprilvl = 4;
+ else
+ hiprilvl = 5;
+
+ } else if (g200se->unique_rev_id >= 0x01) {
+ hiprilvl = 3;
+ } else {
+ hiprilvl = 4;
+ }
+
+ crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */
+
+ WREG_ECRT(0x06, crtcext6);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200se_00_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 320000;
+ static const unsigned int vcomin = 160000;
+ static const unsigned int pllreffreq = 25000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm;
+ n = testn;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ pr_warn("PLL delta too large\n");
+ return -EINVAL;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200se_00_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+}
+
+static int mgag200_g200se_04_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 1600000;
+ static const unsigned int vcomin = 800000;
+ static const unsigned int pllreffreq = 25000;
+ static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+ unsigned int fvv;
+ unsigned int i;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ if (clock < 25000)
+ clock = 25000;
+ clock = clock * 2;
+
+ /* Permited delta is 0.5% as VESA Specification */
+ permitteddelta = clock * 5 / 1000;
+
+ for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) {
+ testp = pvalues_e4[i];
+
+ if ((clock * testp) > vcomax)
+ continue;
+ if ((clock * testp) < vcomin)
+ continue;
+
+ for (testn = 50; testn <= 256; testn++) {
+ for (testm = 1; testm <= 32; testm++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm;
+ n = testn;
+ p = testp;
+ }
+ }
+ }
+ }
+
+ fvv = pllreffreq * n / m;
+ fvv = (fvv - 800000) / 50000;
+ if (fvv > 15)
+ fvv = 15;
+ s = fvv << 1;
+
+ if (delta > permitteddelta) {
+ pr_warn("PLL delta too large\n");
+ return -EINVAL;
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
+ xpixpllcn = pixpllcn;
+ xpixpllcp = (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
+
+ WREG_DAC(0x1a, 0x09);
+ msleep(20);
+ WREG_DAC(0x1a, 0x01);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200se_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200se_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
+
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
+
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
+
+ mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
+
+ mgag200_enable_display(mdev);
+
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
+
+static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
+ .mode_valid = mgag200_crtc_helper_mode_valid,
+ .atomic_check = mgag200_crtc_helper_atomic_check,
+ .atomic_flush = mgag200_crtc_helper_atomic_flush,
+ .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
+ .atomic_disable = mgag200_crtc_helper_atomic_disable
+};
+
+static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200se_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200se_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200se_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200se_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200se_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200se_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200se_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200se_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200se_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200se_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200se_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -65,11 +468,22 @@ static int mgag200_g200se_init_unique_rev_id(struct mgag200_g200se_device *g200s
return 0;
}
+static const struct mgag200_device_funcs mgag200_g200se_00_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200se_00_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200se_00_pixpllc_atomic_update,
+};
+
+static const struct mgag200_device_funcs mgag200_g200se_04_device_funcs = {
+ .pixpllc_atomic_check = mgag200_g200se_04_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200se_04_pixpllc_atomic_update,
+};
+
struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
enum mga_type type)
{
struct mgag200_g200se_device *g200se;
const struct mgag200_device_info *info;
+ const struct mgag200_device_funcs *funcs;
struct mga_device *mdev;
struct drm_device *dev;
resource_size_t vram_available;
@@ -116,15 +530,28 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
return ERR_PTR(-EINVAL);
}
- ret = mgag200_device_init(mdev, type, info);
+ if (g200se->unique_rev_id >= 0x04)
+ funcs = &mgag200_g200se_04_device_funcs;
+ else
+ funcs = &mgag200_g200se_00_device_funcs;
+
+ ret = mgag200_device_init(mdev, info, funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200se_init_registers(g200se);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200se_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index c8450ac8eaec..9baa727ac6f9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -1,11 +1,314 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
+void mgag200_g200wb_init_registers(struct mga_device *mdev)
+{
+ static const u8 dacvalue[] = {
+ MGAG200_DAC_DEFAULT(0x07, 0xc9, 0x1f, 0x00, 0x00, 0x00)
+ };
+
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
+ if ((i <= 0x17) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)) ||
+ ((i >= 0x44) && (i <= 0x4e)))
+ continue;
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ mgag200_init_registers(mdev);
+}
+
+/*
+ * PIXPLLC
+ */
+
+static int mgag200_g200wb_pixpllc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ static const unsigned int vcomax = 550000;
+ static const unsigned int vcomin = 150000;
+ static const unsigned int pllreffreq = 48000;
+
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ long clock = new_crtc_state->mode.clock;
+ struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc;
+ unsigned int delta, tmpdelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n, s;
+ unsigned int computed;
+
+ m = n = p = s = 0;
+ delta = 0xffffffff;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) / (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn;
+ m = testm;
+ p = testp;
+ s = 0;
+ }
+ }
+ }
+ }
+
+ pixpllc->m = m;
+ pixpllc->n = n;
+ pixpllc->p = p;
+ pixpllc->s = s;
+
+ return 0;
+}
+
+void mgag200_g200wb_pixpllc_atomic_update(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc;
+ bool pll_locked = false;
+ unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
+ u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
+ int i, j, tmpcount, vcount;
+
+ pixpllcm = pixpllc->m - 1;
+ pixpllcn = pixpllc->n - 1;
+ pixpllcp = pixpllc->p - 1;
+ pixpllcs = pixpllc->s;
+
+ xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
+ xpixpllcn = pixpllcn;
+ xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp;
+
+ WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG8(DAC_DATA, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG8(DAC_DATA, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG8(DAC_DATA, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG8(DAC_DATA, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+}
+
+/*
+ * Mode-setting pipeline
+ */
+
+static const struct drm_plane_helper_funcs mgag200_g200wb_primary_plane_helper_funcs = {
+ MGAG200_PRIMARY_PLANE_HELPER_FUNCS,
+};
+
+static const struct drm_plane_funcs mgag200_g200wb_primary_plane_funcs = {
+ MGAG200_PRIMARY_PLANE_FUNCS,
+};
+
+static const struct drm_crtc_helper_funcs mgag200_g200wb_crtc_helper_funcs = {
+ MGAG200_CRTC_HELPER_FUNCS,
+};
+
+static const struct drm_crtc_funcs mgag200_g200wb_crtc_funcs = {
+ MGAG200_CRTC_FUNCS,
+};
+
+static const struct drm_encoder_funcs mgag200_g200wb_dac_encoder_funcs = {
+ MGAG200_DAC_ENCODER_FUNCS,
+};
+
+static const struct drm_connector_helper_funcs mgag200_g200wb_vga_connector_helper_funcs = {
+ MGAG200_VGA_CONNECTOR_HELPER_FUNCS,
+};
+
+static const struct drm_connector_funcs mgag200_g200wb_vga_connector_funcs = {
+ MGAG200_VGA_CONNECTOR_FUNCS,
+};
+
+static int mgag200_g200wb_pipeline_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ struct drm_plane *primary_plane = &mdev->primary_plane;
+ struct drm_crtc *crtc = &mdev->crtc;
+ struct drm_encoder *encoder = &mdev->encoder;
+ struct mga_i2c_chan *i2c = &mdev->i2c;
+ struct drm_connector *connector = &mdev->connector;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0,
+ &mgag200_g200wb_primary_plane_funcs,
+ mgag200_primary_plane_formats,
+ mgag200_primary_plane_formats_size,
+ mgag200_primary_plane_fmtmods,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &mgag200_g200wb_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &mgag200_g200wb_crtc_funcs, NULL);
+ if (ret) {
+ drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret);
+ return ret;
+ }
+ drm_crtc_helper_add(crtc, &mgag200_g200wb_crtc_helper_funcs);
+
+ /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
+ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE);
+ drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder, &mgag200_g200wb_dac_encoder_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "drm_encoder_init() failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mgag200_i2c_init(mdev, i2c);
+ if (ret) {
+ drm_err(dev, "failed to add DDC bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mgag200_g200wb_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret) {
+ drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
+ return ret;
+ }
+ drm_connector_helper_add(connector, &mgag200_g200wb_vga_connector_helper_funcs);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* DRM device
*/
@@ -13,8 +316,14 @@
static const struct mgag200_device_info mgag200_g200wb_device_info =
MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false);
-struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv,
- enum mga_type type)
+static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = {
+ .disable_vidrst = mgag200_bmc_disable_vidrst,
+ .enable_vidrst = mgag200_bmc_enable_vidrst,
+ .pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check,
+ .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update,
+};
+
+struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv)
{
struct mga_device *mdev;
struct drm_device *dev;
@@ -36,15 +345,24 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
if (ret)
return ERR_PTR(ret);
- ret = mgag200_device_init(mdev, type, &mgag200_g200wb_device_info);
+ ret = mgag200_device_init(mdev, &mgag200_g200wb_device_info,
+ &mgag200_g200wb_device_funcs);
if (ret)
return ERR_PTR(ret);
+ mgag200_g200wb_init_registers(mdev);
+
vram_available = mgag200_device_probe_vram(mdev);
- ret = mgag200_modeset_init(mdev, vram_available);
+ ret = mgag200_mode_config_init(mdev, vram_available);
if (ret)
return ERR_PTR(ret);
+ ret = mgag200_g200wb_pipeline_init(mdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_mode_config_reset(dev);
+
return mdev;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 225cca2ed60e..bbab2549243a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -11,24 +11,19 @@
#include <linux/delay.h>
#include <linux/iosys-map.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
-#define MGAG200_LUT_SIZE 256
-
/*
* This file contains setup code for the CRTC.
*/
@@ -132,95 +127,6 @@ static inline void mga_wait_busy(struct mga_device *mdev)
} while ((status & 0x01) && time_before(jiffies, timeout));
}
-static void mgag200_g200wb_hold_bmc(struct mga_device *mdev)
-{
- u8 tmp;
- int iter_max;
-
- /* 1- The first step is to warn the BMC of an upcoming mode change.
- * We are putting the misc<0> to output.*/
-
- WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x10;
- WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
-
- /* we are putting a 1 on the misc<0> line */
- WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x10;
- WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
-
- /* 2- Second step to mask and further scan request
- * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
- */
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x80;
- WREG_DAC(MGA1064_SPAREREG, tmp);
-
- /* 3a- the third step is to verifu if there is an active scan
- * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
- */
- iter_max = 300;
- while (!(tmp & 0x1) && iter_max) {
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- udelay(1000);
- iter_max--;
- }
-
- /* 3b- this step occurs only if the remove is actually scanning
- * we are waiting for the end of the frame which is a 1 on
- * remvsyncsts (XSPAREREG<1>)
- */
- if (iter_max) {
- iter_max = 300;
- while ((tmp & 0x2) && iter_max) {
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- udelay(1000);
- iter_max--;
- }
- }
-}
-
-static void mgag200_g200wb_release_bmc(struct mga_device *mdev)
-{
- u8 tmp;
-
- /* 1- The first step is to ensure that the vrsten and hrsten are set */
- WREG8(MGAREG_CRTCEXT_INDEX, 1);
- tmp = RREG8(MGAREG_CRTCEXT_DATA);
- WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
-
- /* 2- second step is to assert the rstlvl2 */
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x8;
- WREG8(DAC_DATA, tmp);
-
- /* wait 10 us */
- udelay(10);
-
- /* 3- deassert rstlvl2 */
- tmp &= ~0x08;
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
- WREG8(DAC_DATA, tmp);
-
- /* 4- remove mask of scan request */
- WREG8(DAC_INDEX, MGA1064_SPAREREG);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x80;
- WREG8(DAC_DATA, tmp);
-
- /* 5- put back a 0 on the misc<0> line */
- WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x10;
- WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
-}
-
/*
* This is how the framebuffer base address is stored in g200 cards:
* * Assume @offset is the gpu_addr variable of the framebuffer object
@@ -267,86 +173,10 @@ static void mgag200_set_startadd(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
-static void mgag200_set_dac_regs(struct mga_device *mdev)
-{
- size_t i;
- u8 dacvalue[] = {
- /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
- /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
- /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
- /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
- /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
- /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
- /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
- };
-
- switch (mdev->type) {
- case G200_PCI:
- case G200_AGP:
- dacvalue[MGA1064_SYS_PLL_M] = 0x04;
- dacvalue[MGA1064_SYS_PLL_N] = 0x2D;
- dacvalue[MGA1064_SYS_PLL_P] = 0x19;
- break;
- case G200_SE_A:
- case G200_SE_B:
- dacvalue[MGA1064_VREF_CTL] = 0x03;
- dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
- MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_WB:
- case G200_EW3:
- dacvalue[MGA1064_VREF_CTL] = 0x07;
- break;
- case G200_EV:
- dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_EH:
- case G200_EH3:
- dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
- MGA1064_MISC_CTL_DAC_RAM_CS;
- break;
- case G200_ER:
- break;
- }
-
- for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
- if ((i <= 0x17) ||
- (i == 0x1b) ||
- (i == 0x1c) ||
- ((i >= 0x1f) && (i <= 0x29)) ||
- ((i >= 0x30) && (i <= 0x37)))
- continue;
- if (IS_G200_SE(mdev) &&
- ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
- continue;
- if ((mdev->type == G200_EV ||
- mdev->type == G200_WB ||
- mdev->type == G200_EH ||
- mdev->type == G200_EW3 ||
- mdev->type == G200_EH3) &&
- (i >= 0x44) && (i <= 0x4e))
- continue;
-
- WREG_DAC(i, dacvalue[i]);
- }
-
- if (mdev->type == G200_ER)
- WREG_DAC(0x90, 0);
-}
-
-static void mgag200_init_regs(struct mga_device *mdev)
+void mgag200_init_registers(struct mga_device *mdev)
{
u8 crtc11, misc;
- mgag200_set_dac_regs(mdev);
-
WREG_SEQ(2, 0x0f);
WREG_SEQ(3, 0x00);
WREG_SEQ(4, 0x0e);
@@ -364,19 +194,12 @@ static void mgag200_init_regs(struct mga_device *mdev)
MGAREG_CRTC11_VINTCLR);
WREG_CRT(0x11, crtc11);
- if (mdev->type == G200_ER)
- WREG_ECRT(0x24, 0x5);
-
- if (mdev->type == G200_EW3)
- WREG_ECRT(0x34, 0x5);
-
misc = RREG8(MGA_MISC_IN);
misc |= MGAREG_MISC_IOADSEL;
WREG8(MGA_MISC_OUT, misc);
}
-static void mgag200_set_mode_regs(struct mga_device *mdev,
- const struct drm_display_mode *mode)
+void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode)
{
const struct mgag200_device_info *info = mdev->info;
unsigned int hdisplay, hsyncstart, hsyncend, htotal;
@@ -500,11 +323,9 @@ static void mgag200_set_offset(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
-static void mgag200_set_format_regs(struct mga_device *mdev,
- const struct drm_framebuffer *fb)
+void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format)
{
struct drm_device *dev = &mdev->base;
- const struct drm_format_info *format = fb->format;
unsigned int bpp, bppshift, scale;
u8 crtcext3, xmulctrl;
@@ -565,76 +386,9 @@ static void mgag200_set_format_regs(struct mga_device *mdev,
WREG_ECRT(3, crtcext3);
}
-static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev)
+void mgag200_enable_display(struct mga_device *mdev)
{
- static uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */
- u32 memctl;
-
- memctl = RREG32(MGAREG_MEMCTL);
-
- memctl |= RESET_FLAG;
- WREG32(MGAREG_MEMCTL, memctl);
-
- udelay(1000);
-
- memctl &= ~RESET_FLAG;
- WREG32(MGAREG_MEMCTL, memctl);
-}
-
-static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
- const struct drm_display_mode *mode,
- const struct drm_framebuffer *fb)
-{
- struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base);
- unsigned int hiprilvl;
- u8 crtcext6;
-
- if (g200se->unique_rev_id >= 0x04) {
- hiprilvl = 0;
- } else if (g200se->unique_rev_id >= 0x02) {
- unsigned int bpp;
- unsigned long mb;
-
- if (fb->format->cpp[0] * 8 > 16)
- bpp = 32;
- else if (fb->format->cpp[0] * 8 > 8)
- bpp = 16;
- else
- bpp = 8;
-
- mb = (mode->clock * bpp) / 1000;
- if (mb > 3100)
- hiprilvl = 0;
- else if (mb > 2600)
- hiprilvl = 1;
- else if (mb > 1900)
- hiprilvl = 2;
- else if (mb > 1160)
- hiprilvl = 3;
- else if (mb > 440)
- hiprilvl = 4;
- else
- hiprilvl = 5;
-
- } else if (g200se->unique_rev_id >= 0x01) {
- hiprilvl = 3;
- } else {
- hiprilvl = 4;
- }
-
- crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */
-
- WREG_ECRT(0x06, crtcext6);
-}
-
-static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev)
-{
- WREG_ECRT(0x06, 0x00);
-}
-
-static void mgag200_enable_display(struct mga_device *mdev)
-{
- u8 seq0, seq1, crtcext1;
+ u8 seq0, crtcext1;
RREG_SEQ(0x00, seq0);
seq0 |= MGAREG_SEQ0_SYNCRST |
@@ -648,12 +402,6 @@ static void mgag200_enable_display(struct mga_device *mdev)
mga_wait_vsync(mdev);
mga_wait_busy(mdev);
- RREG_SEQ(0x01, seq1);
- seq1 &= ~MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
-
- msleep(20);
-
RREG_ECRT(0x01, crtcext1);
crtcext1 &= ~MGAREG_CRTCEXT1_VSYNCOFF;
crtcext1 &= ~MGAREG_CRTCEXT1_HSYNCOFF;
@@ -662,7 +410,7 @@ static void mgag200_enable_display(struct mga_device *mdev)
static void mgag200_disable_display(struct mga_device *mdev)
{
- u8 seq0, seq1, crtcext1;
+ u8 seq0, crtcext1;
RREG_SEQ(0x00, seq0);
seq0 &= ~MGAREG_SEQ0_SYNCRST;
@@ -675,59 +423,127 @@ static void mgag200_disable_display(struct mga_device *mdev)
mga_wait_vsync(mdev);
mga_wait_busy(mdev);
- RREG_SEQ(0x01, seq1);
- seq1 |= MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
-
- msleep(20);
-
RREG_ECRT(0x01, crtcext1);
crtcext1 |= MGAREG_CRTCEXT1_VSYNCOFF |
MGAREG_CRTCEXT1_HSYNCOFF;
WREG_ECRT(0x01, crtcext1);
}
+static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_map *vmap,
+ struct drm_framebuffer *fb, struct drm_rect *clip)
+{
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram);
+
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip);
+}
+
/*
- * Connector
+ * Primary plane
*/
-static int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
+const uint32_t mgag200_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+};
+
+const size_t mgag200_primary_plane_formats_size = ARRAY_SIZE(mgag200_primary_plane_formats);
+
+const uint64_t mgag200_primary_plane_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
{
- struct mga_device *mdev = to_mga_device(connector->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ struct mgag200_crtc_state *new_mgag200_crtc_state;
int ret;
- /*
- * Protect access to I/O registers from concurrent modesetting
- * by acquiring the I/O-register lock.
- */
- mutex_lock(&mdev->rmmio_lock);
- ret = drm_connector_helper_get_modes_from_ddc(connector);
- mutex_unlock(&mdev->rmmio_lock);
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_crtc);
- return ret;
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, true);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ if (plane->state)
+ fb = plane->state->fb;
+
+ if (!fb || (fb->format != new_fb->format))
+ new_crtc_state->mode_changed = true; /* update PLL settings */
+
+ new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state);
+ new_mgag200_crtc_state->format = new_fb->format;
+
+ return 0;
}
-static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
- .get_modes = mgag200_vga_connector_helper_get_modes,
-};
+void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ u8 seq1;
-static const struct drm_connector_funcs mga_vga_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
+ if (!fb)
+ return;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage);
+ }
+
+ /* Always scanout image at VRAM offset 0 */
+ mgag200_set_startadd(mdev, (u32)0);
+ mgag200_set_offset(mdev, fb);
+
+ if (!old_plane_state->crtc && plane_state->crtc) { // enabling
+ RREG_SEQ(0x01, seq1);
+ seq1 &= ~MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
+ }
+}
+
+void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 seq1;
+
+ RREG_SEQ(0x01, seq1);
+ seq1 |= MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
+}
/*
- * Simple Display Pipe
+ * CRTC
*/
-static enum drm_mode_status
-mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct mga_device *mdev = to_mga_device(pipe->crtc.dev);
+ struct mga_device *mdev = to_mga_device(crtc->dev);
const struct mgag200_device_info *info = mdev->info;
/*
@@ -754,167 +570,112 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
return MODE_OK;
}
-static void
-mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
- struct drm_rect *clip, const struct iosys_map *map)
-{
- void __iomem *dst = mdev->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
-
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip);
-}
-
-static void
-mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct mgag200_pll *pixpll = &mdev->pixpll;
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
- struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
-
- /*
- * Concurrent operations could possibly trigger a call to
- * drm_connector_helper_funcs.get_modes by trying to read the
- * display modes. Protect access to I/O registers by acquiring
- * the I/O-register lock.
- */
- mutex_lock(&mdev->rmmio_lock);
-
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- mgag200_g200wb_hold_bmc(mdev);
-
- mgag200_set_format_regs(mdev, fb);
- mgag200_set_mode_regs(mdev, adjusted_mode);
-
- pixpll->funcs->update(pixpll, &mgag200_crtc_state->pixpllc);
-
- if (mdev->type == G200_ER)
- mgag200_g200er_reset_tagfifo(mdev);
-
- if (IS_G200_SE(mdev))
- mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, fb);
- else if (mdev->type == G200_EV)
- mgag200_g200ev_set_hiprilvl(mdev);
-
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- mgag200_g200wb_release_bmc(mdev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ struct drm_property_blob *new_gamma_lut = new_crtc_state->gamma_lut;
+ int ret;
- if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, fb->format, crtc_state->gamma_lut->data);
- else
- mgag200_crtc_set_gamma_linear(mdev, fb->format);
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- mgag200_enable_display(mdev);
+ if (!new_crtc_state->enable)
+ return 0;
- mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->data[0]);
+ if (new_crtc_state->mode_changed) {
+ if (funcs->pixpllc_atomic_check) {
+ ret = funcs->pixpllc_atomic_check(crtc, new_state);
+ if (ret)
+ return ret;
+ }
+ }
- /* Always scanout image at VRAM offset 0 */
- mgag200_set_startadd(mdev, (u32)0);
- mgag200_set_offset(mdev, fb);
+ if (new_crtc_state->color_mgmt_changed && new_gamma_lut) {
+ if (new_gamma_lut->length != MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) {
+ drm_dbg(dev, "Wrong size for gamma_lut %zu\n", new_gamma_lut->length);
+ return -EINVAL;
+ }
+ }
- mutex_unlock(&mdev->rmmio_lock);
+ return drm_atomic_add_affected_planes(new_state, crtc);
}
-static void
-mgag200_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct mga_device *mdev = to_mga_device(crtc->dev);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
- mgag200_disable_display(mdev);
+ if (crtc_state->enable && crtc_state->color_mgmt_changed) {
+ const struct drm_format_info *format = mgag200_crtc_state->format;
+
+ if (crtc_state->gamma_lut)
+ mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ else
+ mgag200_crtc_set_gamma_linear(mdev, format);
+ }
}
-static int
-mgag200_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_plane *plane = plane_state->plane;
- struct drm_device *dev = plane->dev;
+ struct drm_device *dev = crtc->dev;
struct mga_device *mdev = to_mga_device(dev);
- struct mgag200_pll *pixpll = &mdev->pixpll;
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
- struct drm_framebuffer *new_fb = plane_state->fb;
- struct drm_framebuffer *fb = NULL;
- int ret;
+ const struct drm_format_info *format = mgag200_crtc_state->format;
- if (!new_fb)
- return 0;
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
- if (plane->state)
- fb = plane->state->fb;
+ mgag200_set_format_regs(mdev, format);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
- if (!fb || (fb->format != new_fb->format))
- crtc_state->mode_changed = true; /* update PLL settings */
+ if (funcs->pixpllc_atomic_update)
+ funcs->pixpllc_atomic_update(crtc, old_state);
- if (crtc_state->mode_changed) {
- ret = pixpll->funcs->compute(pixpll, crtc_state->mode.clock,
- &mgag200_crtc_state->pixpllc);
- if (ret)
- return ret;
- }
+ mgag200_enable_display(mdev);
- if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
- if (crtc_state->gamma_lut->length !=
- MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) {
- drm_err(dev, "Wrong size for gamma_lut %zu\n",
- crtc_state->gamma_lut->length);
- return -EINVAL;
- }
- }
- return 0;
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
}
-static void
-mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
{
- struct drm_plane *plane = &pipe->plane;
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_device *dev = plane->dev;
- struct mga_device *mdev = to_mga_device(dev);
- struct drm_plane_state *state = plane->state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect damage;
- struct drm_atomic_helper_damage_iter iter;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
+ const struct mgag200_device_funcs *funcs = mdev->funcs;
- if (!fb)
- return;
+ if (funcs->disable_vidrst)
+ funcs->disable_vidrst(mdev);
- mutex_lock(&mdev->rmmio_lock);
+ mgag200_disable_display(mdev);
- if (crtc->state->color_mgmt_changed && crtc->state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, fb->format, crtc->state->gamma_lut->data);
+ if (funcs->enable_vidrst)
+ funcs->enable_vidrst(mdev);
+}
- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->data[0]);
- }
- /* Always scanout image at VRAM offset 0 */
- mgag200_set_startadd(mdev, (u32)0);
- mgag200_set_offset(mdev, fb);
+void mgag200_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mgag200_crtc_state *mgag200_crtc_state;
- mutex_unlock(&mdev->rmmio_lock);
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL);
+ if (mgag200_crtc_state)
+ __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
-static struct drm_crtc_state *
-mgag200_simple_display_pipe_duplicate_crtc_state(struct drm_simple_display_pipe *pipe)
+struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_crtc_state *crtc_state = crtc->state;
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
struct mgag200_crtc_state *new_mgag200_crtc_state;
@@ -927,14 +688,14 @@ mgag200_simple_display_pipe_duplicate_crtc_state(struct drm_simple_display_pipe
return NULL;
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base);
+ new_mgag200_crtc_state->format = mgag200_crtc_state->format;
memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc,
sizeof(new_mgag200_crtc_state->pixpllc));
return &new_mgag200_crtc_state->base;
}
-static void mgag200_simple_display_pipe_destroy_crtc_state(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state)
+void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
{
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
@@ -942,50 +703,49 @@ static void mgag200_simple_display_pipe_destroy_crtc_state(struct drm_simple_dis
kfree(mgag200_crtc_state);
}
-static void mgag200_simple_display_pipe_reset_crtc(struct drm_simple_display_pipe *pipe)
+/*
+ * Connector
+ */
+
+int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct mgag200_crtc_state *mgag200_crtc_state;
+ struct mga_device *mdev = to_mga_device(connector->dev);
+ int ret;
- if (crtc->state) {
- mgag200_simple_display_pipe_destroy_crtc_state(pipe, crtc->state);
- crtc->state = NULL; /* must be set to NULL here */
- }
+ /*
+ * Protect access to I/O registers from concurrent modesetting
+ * by acquiring the I/O-register lock.
+ */
+ mutex_lock(&mdev->rmmio_lock);
+ ret = drm_connector_helper_get_modes_from_ddc(connector);
+ mutex_unlock(&mdev->rmmio_lock);
- mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL);
- if (!mgag200_crtc_state)
- return;
- __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base);
+ return ret;
}
-static const struct drm_simple_display_pipe_funcs
-mgag200_simple_display_pipe_funcs = {
- .mode_valid = mgag200_simple_display_pipe_mode_valid,
- .enable = mgag200_simple_display_pipe_enable,
- .disable = mgag200_simple_display_pipe_disable,
- .check = mgag200_simple_display_pipe_check,
- .update = mgag200_simple_display_pipe_update,
- .reset_crtc = mgag200_simple_display_pipe_reset_crtc,
- .duplicate_crtc_state = mgag200_simple_display_pipe_duplicate_crtc_state,
- .destroy_crtc_state = mgag200_simple_display_pipe_destroy_crtc_state,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
-};
-
-static const uint32_t mgag200_simple_display_pipe_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
-};
-
-static const uint64_t mgag200_simple_display_pipe_fmtmods[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
/*
* Mode config
*/
+static void mgag200_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct mga_device *mdev = to_mga_device(state->dev);
+
+ /*
+ * Concurrent operations could possibly trigger a call to
+ * drm_connector_helper_funcs.get_modes by trying to read the
+ * display modes. Protect access to I/O registers by acquiring
+ * the I/O-register lock.
+ */
+ mutex_lock(&mdev->rmmio_lock);
+ drm_atomic_helper_commit_tail(state);
+ mutex_unlock(&mdev->rmmio_lock);
+}
+
+static const struct drm_mode_config_helper_funcs mgag200_mode_config_helper_funcs = {
+ .atomic_commit_tail = mgag200_mode_config_helper_atomic_commit_tail,
+};
+
/* Calculates a mode's required memory bandwidth (in KiB/sec). */
static uint32_t mgag200_calculate_mode_bandwidth(const struct drm_display_mode *mode,
unsigned int bits_per_pixel)
@@ -1048,23 +808,16 @@ static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_available)
+int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available)
{
struct drm_device *dev = &mdev->base;
- struct mga_i2c_chan *i2c = &mdev->i2c;
- struct drm_connector *connector = &mdev->connector;
- struct drm_simple_display_pipe *pipe = &mdev->display_pipe;
- size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats);
int ret;
- mgag200_init_regs(mdev);
-
mdev->vram_available = vram_available;
ret = drmm_mode_config_init(dev);
if (ret) {
- drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
- ret);
+ drm_err(dev, "drmm_mode_config_init() failed: %d\n", ret);
return ret;
}
@@ -1073,48 +826,7 @@ int mgag200_modeset_init(struct mga_device *mdev, resource_size_t vram_available
dev->mode_config.preferred_depth = 24;
dev->mode_config.fb_base = mdev->vram_res->start;
dev->mode_config.funcs = &mgag200_mode_config_funcs;
-
- ret = mgag200_i2c_init(mdev, i2c);
- if (ret) {
- drm_err(dev, "failed to add DDC bus: %d\n", ret);
- return ret;
- }
-
- ret = drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &i2c->adapter);
- if (ret) {
- drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret);
- return ret;
- }
- drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
-
- ret = mgag200_pixpll_init(&mdev->pixpll, mdev);
- if (ret)
- return ret;
-
- ret = drm_simple_display_pipe_init(dev, pipe,
- &mgag200_simple_display_pipe_funcs,
- mgag200_simple_display_pipe_formats,
- format_count,
- mgag200_simple_display_pipe_fmtmods,
- connector);
- if (ret) {
- drm_err(dev,
- "drm_simple_display_pipe_init() failed, error %d\n",
- ret);
- return ret;
- }
-
- drm_plane_enable_fb_damage_clips(&pipe->plane);
-
- /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */
- drm_mode_crtc_set_gamma_size(&pipe->crtc, MGAG200_LUT_SIZE);
-
- drm_crtc_enable_color_mgmt(&pipe->crtc, 0, false, MGAG200_LUT_SIZE);
-
- drm_mode_config_reset(dev);
+ dev->mode_config.helper_private = &mgag200_mode_config_helper_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_pll.c b/drivers/gpu/drm/mgag200/mgag200_pll.c
deleted file mode 100644
index 8065ca5d8de9..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_pll.c
+++ /dev/null
@@ -1,997 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/delay.h>
-
-#include "mgag200_drv.h"
-
-/*
- * G200
- */
-
-static int mgag200_pixpll_compute_g200(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- struct mga_device *mdev = pixpll->mdev;
- struct drm_device *dev = &mdev->base;
- struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev);
- const int post_div_max = 7;
- const int in_div_min = 1;
- const int in_div_max = 6;
- const int feed_div_min = 7;
- const int feed_div_max = 127;
- u8 testp, testm, testn;
- u8 n = 0, m = 0, p, s;
- long f_vco;
- long computed;
- long delta, tmp_delta;
- long ref_clk = g200->ref_clk;
- long p_clk_min = g200->pclk_min;
- long p_clk_max = g200->pclk_max;
-
- if (clock > p_clk_max) {
- drm_err(dev, "Pixel Clock %ld too high\n", clock);
- return -EINVAL;
- }
-
- if (clock < p_clk_min >> 3)
- clock = p_clk_min >> 3;
-
- f_vco = clock;
- for (testp = 0;
- testp <= post_div_max && f_vco < p_clk_min;
- testp = (testp << 1) + 1, f_vco <<= 1)
- ;
- p = testp + 1;
-
- delta = clock;
-
- for (testm = in_div_min; testm <= in_div_max; testm++) {
- for (testn = feed_div_min; testn <= feed_div_max; testn++) {
- computed = ref_clk * (testn + 1) / (testm + 1);
- if (computed < f_vco)
- tmp_delta = f_vco - computed;
- else
- tmp_delta = computed - f_vco;
- if (tmp_delta < delta) {
- delta = tmp_delta;
- m = testm + 1;
- n = testn + 1;
- }
- }
- }
- f_vco = ref_clk * n / m;
- if (f_vco < 100000)
- s = 0;
- else if (f_vco < 140000)
- s = 1;
- else if (f_vco < 180000)
- s = 2;
- else
- s = 3;
-
- drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n",
- clock, f_vco, m, n, p, s);
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- struct mga_device *mdev = pixpll->mdev;
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200 = {
- .compute = mgag200_pixpll_compute_g200,
- .update = mgag200_pixpll_update_g200,
-};
-
-/*
- * G200SE
- */
-
-static int mgag200_pixpll_compute_g200se_00(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 320000;
- static const unsigned int vcomin = 160000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta, permitteddelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
- permitteddelta = clock * 5 / 1000;
-
- for (testp = 8; testp > 0; testp /= 2) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testn = 17; testn < 256; testn++) {
- for (testm = 1; testm < 32; testm++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm;
- n = testn;
- p = testp;
- }
- }
- }
- }
-
- if (delta > permitteddelta) {
- pr_warn("PLL delta too large\n");
- return -EINVAL;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void mgag200_pixpll_update_g200se_00(struct mgag200_pll *pixpll,
- const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-}
-
-static int mgag200_pixpll_compute_g200se_04(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 1600000;
- static const unsigned int vcomin = 800000;
- static const unsigned int pllreffreq = 25000;
- static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1};
-
- unsigned int delta, tmpdelta, permitteddelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
- unsigned int fvv;
- unsigned int i;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- if (clock < 25000)
- clock = 25000;
- clock = clock * 2;
-
- /* Permited delta is 0.5% as VESA Specification */
- permitteddelta = clock * 5 / 1000;
-
- for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) {
- testp = pvalues_e4[i];
-
- if ((clock * testp) > vcomax)
- continue;
- if ((clock * testp) < vcomin)
- continue;
-
- for (testn = 50; testn <= 256; testn++) {
- for (testm = 1; testm <= 32; testm++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
-
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm;
- n = testn;
- p = testp;
- }
- }
- }
- }
-
- fvv = pllreffreq * n / m;
- fvv = (fvv - 800000) / 50000;
- if (fvv > 15)
- fvv = 15;
- s = fvv << 1;
-
- if (delta > permitteddelta) {
- pr_warn("PLL delta too large\n");
- return -EINVAL;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void mgag200_pixpll_update_g200se_04(struct mgag200_pll *pixpll,
- const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp);
-
- WREG_DAC(0x1a, 0x09);
- msleep(20);
- WREG_DAC(0x1a, 0x01);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_00 = {
- .compute = mgag200_pixpll_compute_g200se_00,
- .update = mgag200_pixpll_update_g200se_00,
-};
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200se_04 = {
- .compute = mgag200_pixpll_compute_g200se_04,
- .update = mgag200_pixpll_update_g200se_04,
-};
-
-/*
- * G200WB
- */
-
-static int mgag200_pixpll_compute_g200wb(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 550000;
- static const unsigned int vcomin = 150000;
- static const unsigned int pllreffreq = 48000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 1; testp < 9; testp++) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testm = 1; testm < 17; testm++) {
- for (testn = 1; testn < 151; testn++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- s = 0;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200wb(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- int i, j, tmpcount, vcount;
- struct mga_device *mdev = pixpll->mdev;
- bool pll_locked = false;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- for (i = 0; i <= 32 && pll_locked == false; i++) {
- if (i > 0) {
- WREG8(MGAREG_CRTC_INDEX, 0x1e);
- tmp = RREG8(MGAREG_CRTC_DATA);
- if (tmp < 0xff)
- WREG8(MGAREG_CRTC_DATA, tmp+1);
- }
-
- /* set pixclkdis to 1 */
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG8(DAC_DATA, tmp);
-
- /* select PLL Set C */
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- /* reset the PLL */
- WREG8(DAC_INDEX, MGA1064_VREF_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~0x04;
- WREG8(DAC_DATA, tmp);
-
- udelay(50);
-
- /* program pixel pll register */
- WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-
- /* turn pll on */
- WREG8(DAC_INDEX, MGA1064_VREF_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= 0x04;
- WREG_DAC(MGA1064_VREF_CTL, tmp);
-
- udelay(500);
-
- /* select the pixel pll */
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
- tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
- WREG8(DAC_DATA, tmp);
-
- /* reset dotclock rate bit */
- WREG8(MGAREG_SEQ_INDEX, 1);
- tmp = RREG8(MGAREG_SEQ_DATA);
- tmp &= ~0x8;
- WREG8(MGAREG_SEQ_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- vcount = RREG8(MGAREG_VCOUNT);
-
- for (j = 0; j < 30 && pll_locked == false; j++) {
- tmpcount = RREG8(MGAREG_VCOUNT);
- if (tmpcount < vcount)
- vcount = 0;
- if ((tmpcount - vcount) > 2)
- pll_locked = true;
- else
- udelay(5);
- }
- }
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
- WREG_DAC(MGA1064_REMHEADCTL, tmp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200wb = {
- .compute = mgag200_pixpll_compute_g200wb,
- .update = mgag200_pixpll_update_g200wb,
-};
-
-/*
- * G200EV
- */
-
-static int mgag200_pixpll_compute_g200ev(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 550000;
- static const unsigned int vcomin = 150000;
- static const unsigned int pllreffreq = 50000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 16; testp > 0; testp--) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testn = 1; testn < 257; testn++) {
- for (testm = 1; testm < 17; testm++) {
- computed = (pllreffreq * testn) /
- (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200ev(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
- tmp = RREG8(DAC_DATA);
- WREG8(DAC_DATA, tmp & ~0x40);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
- tmp = RREG8(DAC_DATA);
- WREG8(DAC_DATA, tmp | 0x40);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= (0x3 << 2);
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ev = {
- .compute = mgag200_pixpll_compute_g200ev,
- .update = mgag200_pixpll_update_g200ev,
-};
-
-/*
- * G200EH
- */
-
-static int mgag200_pixpll_compute_g200eh(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 800000;
- static const unsigned int vcomin = 400000;
- static const unsigned int pllreffreq = 33333;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 16; testp > 0; testp >>= 1) {
- if (clock * testp > vcomax)
- continue;
- if (clock * testp < vcomin)
- continue;
-
- for (testm = 1; testm < 33; testm++) {
- for (testn = 17; testn < 257; testn++) {
- computed = (pllreffreq * testn) / (testm * testp);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn;
- m = testm;
- p = testp;
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200eh(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- int i, j, tmpcount, vcount;
- struct mga_device *mdev = pixpll->mdev;
- bool pll_locked = false;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- for (i = 0; i <= 32 && pll_locked == false; i++) {
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= 0x3 << 2;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp);
-
- udelay(500);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
- tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- vcount = RREG8(MGAREG_VCOUNT);
-
- for (j = 0; j < 30 && pll_locked == false; j++) {
- tmpcount = RREG8(MGAREG_VCOUNT);
- if (tmpcount < vcount)
- vcount = 0;
- if ((tmpcount - vcount) > 2)
- pll_locked = true;
- else
- udelay(5);
- }
- }
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh = {
- .compute = mgag200_pixpll_compute_g200eh,
- .update = mgag200_pixpll_update_g200eh,
-};
-
-/*
- * G200EH3
- */
-
-static int mgag200_pixpll_compute_g200eh3(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 3000000;
- static const unsigned int vcomin = 1500000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
- testp = 0;
-
- for (testm = 150; testm >= 6; testm--) {
- if (clock * testm > vcomax)
- continue;
- if (clock * testm < vcomin)
- continue;
- for (testn = 120; testn >= 60; testn--) {
- computed = (pllreffreq * testn) / testm;
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- n = testn + 1;
- m = testm + 1;
- p = testp + 1;
- }
- if (delta == 0)
- break;
- }
- if (delta == 0)
- break;
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200eh3 = {
- .compute = mgag200_pixpll_compute_g200eh3,
- .update = mgag200_pixpll_update_g200eh, // same as G200EH
-};
-
-/*
- * G200ER
- */
-
-static int mgag200_pixpll_compute_g200er(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 1488000;
- static const unsigned int vcomin = 1056000;
- static const unsigned int pllreffreq = 48000;
- static const unsigned int m_div_val[] = { 1, 2, 4, 8 };
-
- unsigned int delta, tmpdelta;
- int testr, testn, testm, testo;
- unsigned int p, m, n, s;
- unsigned int computed, vco;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testr = 0; testr < 4; testr++) {
- if (delta == 0)
- break;
- for (testn = 5; testn < 129; testn++) {
- if (delta == 0)
- break;
- for (testm = 3; testm >= 0; testm--) {
- if (delta == 0)
- break;
- for (testo = 5; testo < 33; testo++) {
- vco = pllreffreq * (testn + 1) /
- (testr + 1);
- if (vco < vcomin)
- continue;
- if (vco > vcomax)
- continue;
- computed = vco / (m_div_val[testm] * (testo + 1));
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = (testm | (testo << 3)) + 1;
- n = testn + 1;
- p = testr + 1;
- s = testr;
- }
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static void
-mgag200_pixpll_update_g200er(struct mgag200_pll *pixpll, const struct mgag200_pll_values *pixpllc)
-{
- unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs;
- u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp;
- struct mga_device *mdev = pixpll->mdev;
-
- pixpllcm = pixpllc->m - 1;
- pixpllcn = pixpllc->n - 1;
- pixpllcp = pixpllc->p - 1;
- pixpllcs = pixpllc->s;
-
- xpixpllcm = pixpllcm;
- xpixpllcn = pixpllcn;
- xpixpllcp = (pixpllcs << 3) | pixpllcp;
-
- WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
- WREG8(DAC_DATA, tmp);
-
- WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
- tmp = RREG8(DAC_DATA);
- tmp |= MGA1064_REMHEADCTL_CLKDIS;
- WREG8(DAC_DATA, tmp);
-
- tmp = RREG8(MGAREG_MEM_MISC_READ);
- tmp |= (0x3<<2) | 0xc0;
- WREG8(MGAREG_MEM_MISC_WRITE, tmp);
-
- WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
- tmp = RREG8(DAC_DATA);
- tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
- tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
- WREG8(DAC_DATA, tmp);
-
- udelay(500);
-
- WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn);
- WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm);
- WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp);
-
- udelay(50);
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200er = {
- .compute = mgag200_pixpll_compute_g200er,
- .update = mgag200_pixpll_update_g200er,
-};
-
-/*
- * G200EW3
- */
-
-static int mgag200_pixpll_compute_g200ew3(struct mgag200_pll *pixpll, long clock,
- struct mgag200_pll_values *pixpllc)
-{
- static const unsigned int vcomax = 800000;
- static const unsigned int vcomin = 400000;
- static const unsigned int pllreffreq = 25000;
-
- unsigned int delta, tmpdelta;
- unsigned int testp, testm, testn, testp2;
- unsigned int p, m, n, s;
- unsigned int computed;
-
- m = n = p = s = 0;
- delta = 0xffffffff;
-
- for (testp = 1; testp < 8; testp++) {
- for (testp2 = 1; testp2 < 8; testp2++) {
- if (testp < testp2)
- continue;
- if ((clock * testp * testp2) > vcomax)
- continue;
- if ((clock * testp * testp2) < vcomin)
- continue;
- for (testm = 1; testm < 26; testm++) {
- for (testn = 32; testn < 2048 ; testn++) {
- computed = (pllreffreq * testn) / (testm * testp * testp2);
- if (computed > clock)
- tmpdelta = computed - clock;
- else
- tmpdelta = clock - computed;
- if (tmpdelta < delta) {
- delta = tmpdelta;
- m = testm + 1;
- n = testn + 1;
- p = testp + 1;
- s = testp2;
- }
- }
- }
- }
- }
-
- pixpllc->m = m;
- pixpllc->n = n;
- pixpllc->p = p;
- pixpllc->s = s;
-
- return 0;
-}
-
-static const struct mgag200_pll_funcs mgag200_pixpll_funcs_g200ew3 = {
- .compute = mgag200_pixpll_compute_g200ew3,
- .update = mgag200_pixpll_update_g200wb, // same as G200WB
-};
-
-/*
- * PLL initialization
- */
-
-int mgag200_pixpll_init(struct mgag200_pll *pixpll, struct mga_device *mdev)
-{
- struct drm_device *dev = &mdev->base;
- struct mgag200_g200se_device *g200se;
-
- pixpll->mdev = mdev;
-
- switch (mdev->type) {
- case G200_PCI:
- case G200_AGP:
- pixpll->funcs = &mgag200_pixpll_funcs_g200;
- break;
- case G200_SE_A:
- case G200_SE_B:
- g200se = to_mgag200_g200se_device(dev);
-
- if (g200se->unique_rev_id >= 0x04)
- pixpll->funcs = &mgag200_pixpll_funcs_g200se_04;
- else
- pixpll->funcs = &mgag200_pixpll_funcs_g200se_00;
- break;
- case G200_WB:
- pixpll->funcs = &mgag200_pixpll_funcs_g200wb;
- break;
- case G200_EV:
- pixpll->funcs = &mgag200_pixpll_funcs_g200ev;
- break;
- case G200_EH:
- pixpll->funcs = &mgag200_pixpll_funcs_g200eh;
- break;
- case G200_EH3:
- pixpll->funcs = &mgag200_pixpll_funcs_g200eh3;
- break;
- case G200_ER:
- pixpll->funcs = &mgag200_pixpll_funcs_g200er;
- break;
- case G200_EW3:
- pixpll->funcs = &mgag200_pixpll_funcs_g200ew3;
- break;
- default:
- drm_err(dev, "unknown device type %d\n", mdev->type);
- return -ENODEV;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index 99a9ab7d9119..1019ffd6c260 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -96,7 +96,7 @@
#define MGAREG_SRCORG 0x2cb4
#define MGAREG_DSTORG 0x2cb8
-/* add or or this to one of the previous "power registers" to start
+/* add or this to one of the previous "power registers" to start
the drawing engine */
#define MGAREG_EXEC 0x0100
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0ab0e1dd8bbb..2c8b9899625b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 0c6b2a6d0b4c..7cb8d9849c07 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index b03e2c413ab1..beea4a7fc1df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
+
#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 310a317885a1..e033d6a67a20 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
(val & 1), 100, 1000);
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Force the GMU off in case it isn't responsive */
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
+
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
+ /* Reset GPU core blocks */
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+ udelay(100);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (!a6xx_has_gbif(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /* The GBIF halt needs to be explicitly cleared */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
@@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
- a6xx_gmu_notify_slumber(gmu);
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 4d501100b9e4..fdc578016e0b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/reset.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, 0x31);
+ OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
/*
@@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu)
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (a6xx_has_gbif(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
/*
@@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- int i;
+ int i, active_submits;
adreno_dump_info(gpu);
@@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/*
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
- gpu->funcs->pm_suspend(gpu);
- gpu->funcs->pm_resume(gpu);
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ /* Call into gpucc driver to poll for cx gdsc collapse */
+ reset_control_reset(gpu->cx_collapse);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 781dcd3fb283..13ce321283ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
@@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum dpu_sspp sspp_idx;
+
state = plane->state;
if (!state)
continue;
@@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_dpu_plane_state(state);
fb = state->fb;
- dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
- set_bit(dpu_plane_pipe(plane), fetch_active);
+ sspp_idx = dpu_plane_pipe(plane);
+ set_bit(sspp_idx, fetch_active);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
@@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
stage_idx = zpos_cnt[pstate->stage]++;
stage_cfg->stage[pstate->stage][stage_idx] =
- dpu_plane_pipe(plane);
+ sspp_idx;
stage_cfg->multirect_index[pstate->stage][stage_idx] =
pstate->multirect_index;
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, pstate, stage_idx,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
format->base.pixel_format,
fb ? fb->modifier : 0);
@@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
pstate, format);
- mixer[lm_idx].flush_mask |= flush_mask;
+ mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl,
+ sspp_idx);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
- mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
@@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
- mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
- mixer[i].hw_lm->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_mixer(ctl,
+ mixer[i].hw_lm->idx);
- DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->idx - CTL_0);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
@@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
dspp->ops.setup_pcc(dspp, &cfg);
}
- mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
- mixer[i].hw_dspp->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
-
- DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
- mixer[i].hw_lm->idx - DSPP_0,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_dspp(ctl,
+ mixer[i].hw_dspp->idx);
}
}
@@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
for (i = 1; i < SSPP_MAX; i++) {
- if (pipe_staged[i]) {
+ if (pipe_staged[i])
dpu_plane_clear_multirect(pipe_staged[i]);
-
- if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
- DPU_ERROR(
- "r1 only virt plane:%d not supported\n",
- pipe_staged[i]->plane->base.id);
- rc = -EINVAL;
- goto end;
- }
- }
}
z_pos = -1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 9b67645c2574..539b68b1626a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -97,7 +97,6 @@ struct dpu_crtc_mixer {
struct dpu_hw_ctl *lm_ctl;
struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
- u32 flush_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c682d4e02d1b..9c6817b5a194 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -162,7 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
- * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
+ * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -208,7 +208,7 @@ struct dpu_encoder_virt {
bool wide_bus_en;
/* DSC configuration */
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
static u32
-dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
+dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
{
int ssm_delay, total_pixels, soft_slice_per_enc;
- soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
/*
* minimum number of initial line pixels is a sum of:
@@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
* 5. 6 additional pixels as the output of the rate buffer is
* 48 bits wide
*/
- ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
- total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
+ ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3);
- return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
+ return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 common_mode,
u32 initial_lines)
{
@@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
}
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
@@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- pic_width = dsc->drm->pic_width;
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- this_frame_slices = pic_width / dsc->drm->slice_width;
- intf_ip_w = this_frame_slices * dsc->drm->slice_width;
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
/*
* dsc merge case: when using 2 encoders for the same stream,
@@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
- u32 flush_mask = 0;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_lm[2];
struct dpu_hw_mixer *hw_mixer[2];
@@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
- if (phys_enc->hw_ctl->ops.update_pending_flush)
- phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
+ phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
if (phys_enc->hw_ctl->ops.setup_blendstage)
@@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index d4d1ecd416e3..9e7236ef34e6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -36,7 +36,7 @@ struct msm_display_info {
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0239a811d5ec..27f029fdc682 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1333,7 +1333,7 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.default_ot_rd_limit = 32,
.default_ot_wr_limit = 32,
@@ -1363,7 +1363,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = {
static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
@@ -1939,11 +1939,6 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
- struct dpu_mdss_cfg *dpu_cfg;
-
- dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
- if (!dpu_cfg)
- return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
if (cfg_handler[i].hw_rev == hw_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 71fe4c505f5b..38aa38ab1568 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -76,7 +76,7 @@ enum {
/**
* MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
* @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index e12b7fa48a7b..a35ecb6676c8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -150,92 +150,84 @@ static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
- uint32_t flushbits = 0;
-
switch (sspp) {
case SSPP_VIG0:
- flushbits = BIT(0);
+ ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
- flushbits = BIT(1);
+ ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
- flushbits = BIT(2);
+ ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
- flushbits = BIT(18);
+ ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
- flushbits = BIT(3);
+ ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
- flushbits = BIT(4);
+ ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
- flushbits = BIT(5);
+ ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
- flushbits = BIT(19);
+ ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
- flushbits = BIT(11);
+ ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
- flushbits = BIT(12);
+ ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
- flushbits = BIT(24);
+ ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
- flushbits = BIT(25);
+ ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_CURSOR0:
- flushbits = BIT(22);
+ ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
- flushbits = BIT(23);
+ ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
-
- return flushbits;
}
-static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
- uint32_t flushbits = 0;
-
switch (lm) {
case LM_0:
- flushbits = BIT(6);
+ ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
- flushbits = BIT(7);
+ ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
- flushbits = BIT(8);
+ ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
- flushbits = BIT(9);
+ ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
- flushbits = BIT(10);
+ ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
- flushbits = BIT(20);
+ ctx->pending_flush_mask |= BIT(20);
break;
default:
- return -EINVAL;
+ break;
}
- flushbits |= CTL_FLUSH_MASK_CTL;
-
- return flushbits;
+ ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
@@ -294,29 +286,25 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
-static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp)
{
- uint32_t flushbits = 0;
-
switch (dspp) {
case DSPP_0:
- flushbits = BIT(13);
+ ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
- flushbits = BIT(14);
+ ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
- flushbits = BIT(15);
+ ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
- flushbits = BIT(21);
+ ctx->pending_flush_mask |= BIT(21);
break;
default:
- return 0;
+ break;
}
-
- return flushbits;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
@@ -685,9 +673,9 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
- ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
+ ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 7d9ad6a3f9f6..96c012ec8467 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -130,6 +130,32 @@ struct dpu_hw_ctl_ops {
enum dpu_merge_3d blk);
/**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : SSPP block index
+ */
+ void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : LM block index
+ */
+ void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : DSPP block index
+ */
+ void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -171,15 +197,6 @@ struct dpu_hw_ctl_ops {
*/
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
- uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
- enum dpu_sspp blk);
-
- uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
- enum dpu_lm blk);
-
- uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
- enum dpu_dspp blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 411689ae6382..f2ddcfb6f7ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -37,7 +37,7 @@ static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
@@ -52,89 +52,89 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
if (is_cmd_mode)
initial_lines += 1;
- slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
+ slice_last_group_size = 3 - (dsc->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
- data |= dsc->drm->bits_per_pixel << 12;
- lsb = dsc->drm->bits_per_pixel % 4;
- bpp = dsc->drm->bits_per_pixel / 4;
+ data |= dsc->bits_per_pixel << 12;
+ lsb = dsc->bits_per_pixel % 4;
+ bpp = dsc->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
- data |= (dsc->drm->block_pred_enable << 7);
- data |= (dsc->drm->line_buf_depth << 3);
- data |= (dsc->drm->simple_422 << 2);
- data |= (dsc->drm->convert_rgb << 1);
- data |= dsc->drm->bits_per_component;
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
- data = dsc->drm->pic_width << 16;
- data |= dsc->drm->pic_height;
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
- data = dsc->drm->slice_width << 16;
- data |= dsc->drm->slice_height;
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
- data = dsc->drm->slice_chunk_size << 16;
+ data = dsc->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
- data = dsc->drm->initial_dec_delay << 16;
- data |= dsc->drm->initial_xmit_delay;
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
- data = dsc->drm->initial_scale_value;
+ data = dsc->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
- data = dsc->drm->scale_decrement_interval;
+ data = dsc->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
- data = dsc->drm->scale_increment_interval;
+ data = dsc->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
- data = dsc->drm->first_line_bpg_offset;
+ data = dsc->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
- data = dsc->drm->nfl_bpg_offset << 16;
- data |= dsc->drm->slice_bpg_offset;
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
- data = dsc->drm->initial_offset << 16;
- data |= dsc->drm->final_offset;
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
- det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
+ det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
data = det_thresh_flatness << 10;
- data |= dsc->drm->flatness_max_qp << 5;
- data |= dsc->drm->flatness_min_qp;
+ data |= dsc->flatness_max_qp << 5;
+ data |= dsc->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
- data = dsc->drm->rc_model_size;
+ data = dsc->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
- data = dsc->drm->rc_tgt_offset_low << 18;
- data |= dsc->drm->rc_tgt_offset_high << 14;
- data |= dsc->drm->rc_quant_incr_limit1 << 9;
- data |= dsc->drm->rc_quant_incr_limit0 << 4;
- data |= dsc->drm->rc_edge_factor;
+ data = dsc->rc_tgt_offset_low << 18;
+ data |= dsc->rc_tgt_offset_high << 14;
+ data |= dsc->rc_quant_incr_limit1 << 9;
+ data |= dsc->rc_quant_incr_limit0 << 4;
+ data |= dsc->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
- struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
+ struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
- DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
+ DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
off += 4;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index 45e4118f1fa2..c0b77fe1a696 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -31,7 +31,7 @@ struct dpu_hw_dsc_ops {
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines);
@@ -41,7 +41,7 @@ struct dpu_hw_dsc_ops {
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc);
+ struct drm_dsc_config *dsc);
};
struct dpu_hw_dsc {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 9f402be55fbf..d3b0ed0a9c6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -273,11 +273,9 @@ enum dpu_wd_timer {
};
enum dpu_vbif {
- VBIF_0,
- VBIF_1,
+ VBIF_RT,
+ VBIF_NRT,
VBIF_MAX,
- VBIF_RT = VBIF_0,
- VBIF_NRT = VBIF_1
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 102c21bb4192..691c471b08c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -780,8 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
}
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe)
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog)
{
struct dpu_hw_pipe *hw_pipe;
const struct dpu_sspp_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 78b1bc9e004f..0c95b7e64f6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -377,11 +377,9 @@ struct dpu_kms;
* @idx: Pipe index for which driver object is required
* @addr: Mapped register io address of MDP
* @catalog : Pointer to mdss catalog data
- * @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe);
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 008e1420e6e5..5e6e2626151e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -384,12 +384,9 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
- struct device *mdss_dev = dpu_dev->parent;
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP/DPU device. */
- path0 = of_icc_get(mdss_dev, "mdp0-mem");
- path1 = of_icc_get(mdss_dev, "mdp1-mem");
+ path0 = msm_icc_get(dpu_dev, "mdp0-mem");
+ path1 = msm_icc_get(dpu_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@@ -782,7 +779,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1, 0);
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -826,12 +823,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
- for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
-
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
- dpu_kms->hw_vbif[vbif_idx] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
}
}
}
@@ -902,12 +897,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
- struct dpu_hw_mdp *top;
dpu_kms = to_dpu_kms(kms);
cat = dpu_kms->catalog;
- top = dpu_kms->hw_mdp;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -1113,12 +1106,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
- if (!dpu_kms->hw_vbif[vbif_idx])
- rc = -EINVAL;
DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
dpu_kms->hw_vbif[vbif_idx] = NULL;
goto power_error;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a617a3d8b1bc..658005f609f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -91,7 +91,7 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @mplane_list: List of multirect planes of the same pipe
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
*/
@@ -106,8 +106,6 @@ struct dpu_plane {
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
- bool is_virtual;
- struct list_head mplane_list;
const struct dpu_mdss_cfg *catalog;
};
@@ -225,7 +223,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
- struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
u32 fixed_buff_size;
u32 total_fl;
@@ -239,19 +237,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
pstate = to_dpu_plane_state(plane->state);
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
- list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- u32 tmp_width;
-
- if (!tmp->base.state->visible)
- continue;
- tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
- DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
- pdpu->base.base.id, tmp->base.base.id,
- src_width,
- tmp_width);
- src_width = max_t(u32, src_width,
- tmp_width);
- }
+ /* FIXME: in multirect case account for the src_width of all the planes */
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == DPU_CHROMA_420) {
@@ -854,13 +840,8 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
}
done:
- if (dpu_plane[R0]->is_virtual) {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
- } else {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- }
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -869,18 +850,6 @@ done:
return 0;
}
-/**
- * dpu_plane_get_ctl_flush - get control flush for the given plane
- * @plane: Pointer to drm plane structure
- * @ctl: Pointer to hardware control driver
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp)
-{
- *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
-}
-
static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -1266,19 +1235,13 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ trace_dpu_plane_disable(DRMID(plane), false,
pstate->multirect_mode);
pstate->pending = true;
-
- if (is_dpu_plane_virtual(plane) &&
- pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
- pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
- DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
@@ -1493,22 +1456,16 @@ enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
}
-bool is_dpu_plane_virtual(struct drm_plane *plane)
-{
- return plane ? to_dpu_plane(plane)->is_virtual : false;
-}
-
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id)
+ unsigned long possible_crtcs)
{
- struct drm_plane *plane = NULL, *master_plane = NULL;
+ struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
uint32_t supported_rotations;
int ret = -EINVAL;
@@ -1524,18 +1481,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &pdpu->base;
pdpu->pipe = pipe;
- pdpu->is_virtual = (master_plane_id != 0);
- INIT_LIST_HEAD(&pdpu->mplane_list);
- master_plane = drm_plane_find(dev, NULL, master_plane_id);
- if (master_plane) {
- struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
-
- list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
- }
/* initialize underlying h/w driver */
- pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
- master_plane_id != 0);
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog);
if (IS_ERR(pdpu->pipe_hw)) {
DPU_ERROR("[%u]SSPP init failed\n", pipe);
ret = PTR_ERR(pdpu->pipe_hw);
@@ -1545,14 +1493,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (pdpu->is_virtual) {
- format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
- }
- else {
- format_list = pdpu->pipe_hw->cap->sblk->format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
- }
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1562,14 +1504,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
pdpu->catalog = kms->catalog;
- if (kms->catalog->mixer_count &&
- kms->catalog->mixer[0].sblk->maxblendstages) {
- zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
- if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
- zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
- }
-
- ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
@@ -1594,15 +1529,14 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
- pipe, plane->base.id, master_plane_id);
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
return plane;
clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
- list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index e1463107a6fc..b7b1b05199c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -65,23 +65,6 @@ struct dpu_multirect_plane_states {
enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
/**
- * is_dpu_plane_virtual - check for virtual plane
- * @plane: Pointer to DRM plane object
- * returns: true - if the plane is virtual
- * false - if the plane is primary
- */
-bool is_dpu_plane_virtual(struct drm_plane *plane);
-
-/**
- * dpu_plane_get_ctl_flush - get control flush mask
- * @plane: Pointer to DRM plane object
- * @ctl: Pointer to control hardware
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp);
-
-/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
@@ -99,14 +82,11 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
- * a regular plane initialization. A non-zero primary plane
- * id will be passed for a virtual pipe initialization.
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id);
+ unsigned long possible_crtcs);
/**
* dpu_plane_validate_multirecti_v2 - validate the multirect planes
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 21d20373eb8b..1305e250b71e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -11,6 +11,26 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
+static const char *dpu_vbif_name(enum dpu_vbif idx)
+{
+ switch (idx) {
+ case VBIF_RT:
+ return "VBIF_RT";
+ case VBIF_NRT:
+ return "VBIF_NRT";
+ default:
+ return "??";
+ }
+}
+
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@@ -42,12 +62,12 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
if (!status) {
rc = -ETIMEDOUT;
- DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
- vbif->idx - VBIF_0, xin_id);
+ DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
+ dpu_vbif_name(vbif->idx), xin_id);
} else {
rc = 0;
- DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
- vbif->idx - VBIF_0, xin_id);
+ DRM_DEBUG_ATOMIC("%s client %d is halted\n",
+ dpu_vbif_name(vbif->idx), xin_id);
}
return rc;
@@ -87,8 +107,8 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
- vbif->idx - VBIF_0, params->xin_id,
+ DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ dpu_vbif_name(vbif->idx), params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
@@ -133,8 +153,8 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
- vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
+ dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
return ot_lim;
}
@@ -148,20 +168,15 @@ exit:
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
- int ret, i;
+ int ret;
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
- vbif = dpu_kms->hw_vbif[i];
- }
-
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
@@ -204,7 +219,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@@ -216,13 +231,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
- vbif = dpu_kms->hw_vbif[i];
- break;
- }
- }
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
@@ -245,8 +254,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
- params->vbif_idx, params->xin_id, i,
+ DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
+ dpu_vbif_name(params->vbif_idx), params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
@@ -266,8 +275,8 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
- DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
- vbif->idx - VBIF_0, pnd, src);
+ DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
+ dpu_vbif_name(vbif->idx), pnd, src);
}
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index d2a48caf9d27..b0d21838a134 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -902,12 +902,9 @@ fail:
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP5 device. */
- struct device *mdss_dev = pdev->dev.parent;
- struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
- struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
+ struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 7257515871a9..676279d0ca8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -431,7 +431,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
if (rate == link_rate_hbr3)
pixel_div = 6;
- else if (rate == 1620000 || rate == 270000)
+ else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ab6aa13b1639..3854c9f1f7e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
@@ -1238,8 +1238,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
-
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
@@ -1358,25 +1356,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
-
- return ret;
-}
-
-static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
- ctrl->dp_ctrl.pixel_rate * 1000);
-
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
- if (ret)
- DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
-
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
return ret;
}
@@ -1520,8 +1500,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@@ -1535,38 +1513,6 @@ end:
return ret;
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
-
-static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- if (!ctrl->link->phy_params.phy_test_pattern_sel) {
- drm_dbg_dp(ctrl->drm_dev,
- "no test pattern selected by sink\n");
- return ret;
- }
-
- /*
- * The global reset will need DP link related clocks to be
- * running. Add the global reset just before disabling the
- * link clocks and core clocks.
- */
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
- if (ret) {
- DRM_ERROR("failed to disable DP controller\n");
- return ret;
- }
-
- ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
- if (!ret)
- ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
- else
- DRM_ERROR("failed to enable DP link controller\n");
-
- return ret;
-}
-
static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1619,6 +1565,48 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+ int ret;
+ unsigned long pixel_rate;
+
+ if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "no test pattern selected by sink\n");
+ return 0;
+ }
+
+ /*
+ * The global reset will need DP link related clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to disable DP controller\n");
+ return ret;
+ }
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to enable DP link controller\n");
+ return ret;
+ }
+
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@@ -1689,11 +1677,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
struct dp_ctrl_private *ctrl;
- u32 rate = 0;
+ u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
+ unsigned long pixel_rate;
if (!dp_ctrl)
return -EINVAL;
@@ -1701,25 +1690,24 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
rate = ctrl->panel->link_info.rate;
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
"using phy test link parameters\n");
- if (!ctrl->panel->dp_mode.drm_mode.clock)
- ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+ if (!pixel_rate)
+ pixel_rate = phy_cts_pixel_clk_khz;
} else {
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
}
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
- ctrl->dp_ctrl.pixel_rate);
-
+ pixel_rate);
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@@ -1816,31 +1804,12 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
-{
- int ret;
- struct dp_ctrl_private *ctrl;
-
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
- ret = dp_ctrl_enable_stream_clocks(ctrl);
- if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
- return ret;
- }
-
- dp_ctrl_send_phy_test_pattern(ctrl);
-
- return 0;
-}
-
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
+ unsigned long pixel_rate;
unsigned long pixel_rate_orig;
if (!dp_ctrl)
@@ -1848,15 +1817,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
- pixel_rate_orig = ctrl->dp_ctrl.pixel_rate;
if (dp_ctrl->wide_bus_en)
- ctrl->dp_ctrl.pixel_rate >>= 1;
+ pixel_rate >>= 1;
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
- ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ ctrl->link->link_params.num_lanes, pixel_rate);
if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
@@ -1866,9 +1834,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
}
}
- ret = dp_ctrl_enable_stream_clocks(ctrl);
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b563e2e3bfe5..9f29734af81c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -16,7 +16,6 @@
struct dp_ctrl {
bool orientation;
atomic_t aborted;
- u32 pixel_rate;
bool wide_bus_en;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 36f0af02749f..36bb6191d2f0 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -786,7 +786,7 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
@@ -965,8 +965,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
if (channel_eq_done && clock_recovery_done)
return -EINVAL;
-
- return 0;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1625328fa430..39bbabb5daf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -6,14 +6,6 @@
#include "dsi.h"
#include "dsi_cfg.h"
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
-{
- if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
- return NULL;
-
- return msm_dsi->encoder;
-}
-
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
@@ -21,7 +13,7 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
@@ -220,7 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv;
- struct drm_bridge *ext_bridge;
int ret;
if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
@@ -254,26 +245,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- /*
- * check if the dsi encoder output is connected to a panel or an
- * external bridge. We create a connector only if we're connected to a
- * drm_panel device. When we're connected to an external bridge, we
- * assume that the drm_bridge driver will create the connector itself.
- */
- ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
-
- if (ext_bridge)
- msm_dsi->connector =
- msm_dsi_manager_ext_bridge_init(msm_dsi->id);
- else
- msm_dsi->connector =
- msm_dsi_manager_connector_init(msm_dsi->id);
-
- if (IS_ERR(msm_dsi->connector)) {
- ret = PTR_ERR(msm_dsi->connector);
+ ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
- msm_dsi->connector = NULL;
goto fail;
}
@@ -287,12 +262,6 @@ fail:
msm_dsi->bridge = NULL;
}
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-
- msm_dsi->connector = NULL;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 580a1e6358bf..2a96b4fe7839 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -12,7 +12,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "disp/msm_disp_snapshot.h"
@@ -30,27 +29,12 @@ enum msm_dsi_phy_usecase {
MSM_DSI_PHY_SLAVE,
};
-#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
-/* Regulators for DSI devices */
-struct dsi_reg_entry {
- char name[32];
- int enable_load;
- int disable_load;
-};
-
-struct dsi_reg_config {
- int num;
- struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
-};
-
struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
- /* connector managed by us when we're connected to a drm_panel */
- struct drm_connector *connector;
/* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
@@ -58,10 +42,8 @@ struct msm_dsi {
struct msm_dsi_phy *phy;
/*
- * panel/external_bridge connected to dsi bridge output, only one of the
- * two can be valid at a time
+ * external_bridge connected to dsi bridge output
*/
- struct drm_panel *panel;
struct drm_bridge *external_bridge;
struct device *phy_dev;
@@ -76,8 +58,7 @@ struct msm_dsi {
/* dsi manager */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
-struct drm_connector *msm_dsi_manager_connector_init(u8 id);
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
+int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
@@ -87,11 +68,9 @@ void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
{
- return msm_dsi->panel || msm_dsi->external_bridge;
+ return msm_dsi->external_bridge;
}
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
-
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
@@ -116,9 +95,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
@@ -154,7 +131,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2c23324a2296..7e97c239ed48 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -9,16 +9,16 @@ static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss", "iface", "bus",
};
+static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"avdd", 10000, 100}, /* 3.0 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = apq8064_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = { 0x4700000, 0x5800000 },
@@ -29,16 +29,16 @@ static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdd", 150000, 100}, /* 3.0 V */
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8974_apq8084_regulators,
+ .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd922800, 0xfd922b00 },
@@ -49,15 +49,15 @@ static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8916_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8916_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators),
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
.io_start = { 0x1a98000 },
@@ -68,34 +68,34 @@ static const char * const dsi_8976_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8976_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8976_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators),
.bus_clk_names = dsi_8976_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
.io_start = { 0x1a94000, 0x1a96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+ { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "lab_reg", .init_load_uA = -1 },
+ { .supply = "ibb_reg", .init_load_uA = -1 },
+};
+
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 6,
- .regs = {
- {"vdda", 100000, 100}, /* 1.25 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- {"vdd", 100000, 100}, /* 1.8 V */
- {"lab_reg", -1, -1},
- {"ibb_reg", -1, -1},
- },
- },
+ .regulator_data = msm8994_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd998000, 0xfd9a0000 },
@@ -106,16 +106,16 @@ static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
+ { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 18160, 1 }, /* 1.25 V */
- {"vcca", 17000, 32 }, /* 0.925 V */
- {"vddio", 100000, 100 },/* 1.8 V */
- },
- },
+ .regulator_data = msm8996_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
.bus_clk_names = dsi_8996_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
.io_start = { 0x994000, 0x996000 },
@@ -126,15 +126,15 @@ static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
+static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
+ { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdd", 367000, 16 }, /* 0.9 V */
- {"vdda", 62800, 2 }, /* 1.2 V */
- },
- },
+ .regulator_data = msm8998_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -145,14 +145,14 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
"iface", "bus", "core", "core_mmss",
};
+static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 12560, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm660_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
.bus_clk_names = dsi_sdm660_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -167,28 +167,28 @@ static const char * const dsi_sc7180_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sdm845_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm845_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators),
.bus_clk_names = dsi_sdm845_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
.io_start = { 0xae94000, 0xae96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7180_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7180_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators),
.bus_clk_names = dsi_sc7180_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
.io_start = { 0xae94000 },
@@ -199,14 +199,14 @@ static const char * const dsi_sc7280_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 8350, 0 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7280_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
.io_start = { 0xae94000 },
@@ -217,14 +217,14 @@ static const char * const dsi_qcm2290_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config qcm2290_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = qcm2290_dsi_cfg_regulators,
+ .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators),
.bus_clk_names = dsi_qcm2290_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
.io_start = { 0x5e94000 },
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index fe54a999968b..8f04e685a74e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -32,7 +32,8 @@
struct msm_dsi_config {
u32 io_offset;
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
const char * const *bus_clk_names;
const int num_bus_clks;
const resource_size_t io_start[DSI_MAX];
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a34078497af1..7fbf391c024f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -33,7 +33,7 @@
#define DSI_RESET_TOGGLE_DELAY_MS 20
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc);
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@@ -108,7 +108,7 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
phys_addr_t ctrl_size;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
int num_bus_clks;
struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
@@ -144,7 +144,6 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
- struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -161,10 +160,9 @@ struct msm_dsi_host {
struct regmap *sfpb;
struct drm_display_mode *mode;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
/* connected device info */
- struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -205,9 +203,6 @@ static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
msm_writel(data, msm_host->ctrl_base + reg);
}
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
-
static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
@@ -258,76 +253,6 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
return container_of(host, struct msm_dsi_host, base);
}
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- pr_err("regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- pr_err("regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
-static int dsi_regulator_init(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
- if (ret < 0) {
- pr_err("%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -916,7 +841,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
{
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, intf_width, reg_ctrl, reg_ctrl2;
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
@@ -927,24 +852,24 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
* compress mode registers
*/
intf_width = hdisplay;
- slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width);
+ slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
/* If slice_per_pkt is greater than slice_per_intf
* then default to 1. This can happen during partial
* update.
*/
- if (slice_per_intf > dsc->drm->slice_count)
- dsc->drm->slice_count = 1;
+ if (slice_per_intf > dsc->slice_count)
+ dsc->slice_count = 1;
- slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width);
- bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8);
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
- dsc->drm->slice_chunk_size = bytes_in_slice;
+ dsc->slice_chunk_size = bytes_in_slice;
total_bytes_per_intf = bytes_in_slice * slice_per_intf;
eol_byte_num = total_bytes_per_intf % 3;
- pkt_per_line = slice_per_intf / dsc->drm->slice_count;
+ pkt_per_line = slice_per_intf / dsc->slice_count;
if (is_cmd_mode) /* packet data type */
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
@@ -1009,7 +934,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
}
if (msm_host->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -1018,9 +943,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
return;
}
- dsc->drm->pic_width = mode->hdisplay;
- dsc->drm->pic_height = mode->vdisplay;
- DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height);
+ dsc->pic_width = mode->hdisplay;
+ dsc->pic_height = mode->vdisplay;
+ DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
/* we do the calculations for dsc parameters here so that
* panel can use these parameters
@@ -1500,14 +1425,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
-static void dsi_hpd_worker(struct work_struct *work)
-{
- struct msm_dsi_host *msm_host =
- container_of(work, struct msm_dsi_host, hpd_work);
-
- drm_helper_hpd_irq_event(msm_host->dev);
-}
-
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1686,6 +1603,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
+ if (dsi->dsc)
+ msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1697,8 +1616,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1710,11 +1627,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
dsi_dev_detach(msm_host->pdev);
- msm_host->device_node = NULL;
-
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1841,7 +1754,7 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
{
int mux_words_size;
int groups_per_line, groups_total;
@@ -1854,98 +1767,98 @@ static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
int final_value, final_scale;
int i;
- dsc->drm->rc_model_size = 8192;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->rc_edge_factor = 6;
- dsc->drm->rc_tgt_offset_high = 3;
- dsc->drm->rc_tgt_offset_low = 3;
- dsc->drm->simple_422 = 0;
- dsc->drm->convert_rgb = 1;
- dsc->drm->vbr_enable = 0;
+ dsc->rc_model_size = 8192;
+ dsc->first_line_bpg_offset = 12;
+ dsc->rc_edge_factor = 6;
+ dsc->rc_tgt_offset_high = 3;
+ dsc->rc_tgt_offset_low = 3;
+ dsc->simple_422 = 0;
+ dsc->convert_rgb = 1;
+ dsc->vbr_enable = 0;
/* handle only bpp = bpc = 8 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
- dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
+ dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- dsc->drm->rc_range_params[i].range_min_qp = min_qp[i];
- dsc->drm->rc_range_params[i].range_max_qp = max_qp[i];
- dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i];
+ dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->rc_range_params[i].range_max_qp = max_qp[i];
+ dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
}
- dsc->drm->initial_offset = 6144; /* Not bpp 12 */
- if (dsc->drm->bits_per_pixel != 8)
- dsc->drm->initial_offset = 2048; /* bpp = 12 */
+ dsc->initial_offset = 6144; /* Not bpp 12 */
+ if (dsc->bits_per_pixel != 8)
+ dsc->initial_offset = 2048; /* bpp = 12 */
mux_words_size = 48; /* bpc == 8/10 */
- if (dsc->drm->bits_per_component == 12)
+ if (dsc->bits_per_component == 12)
mux_words_size = 64;
- dsc->drm->initial_xmit_delay = 512;
- dsc->drm->initial_scale_value = 32;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1;
+ dsc->initial_xmit_delay = 512;
+ dsc->initial_scale_value = 32;
+ dsc->first_line_bpg_offset = 12;
+ dsc->line_buf_depth = dsc->bits_per_component + 1;
/* bpc 8 */
- dsc->drm->flatness_min_qp = 3;
- dsc->drm->flatness_max_qp = 12;
- dsc->drm->rc_quant_incr_limit0 = 11;
- dsc->drm->rc_quant_incr_limit1 = 11;
- dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ dsc->flatness_min_qp = 3;
+ dsc->flatness_max_qp = 12;
+ dsc->rc_quant_incr_limit0 = 11;
+ dsc->rc_quant_incr_limit1 = 11;
+ dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
/* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
* params are calculated
*/
- groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3);
- dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8;
- if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8)
- dsc->drm->slice_chunk_size++;
+ groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+ dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
+ if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
+ dsc->slice_chunk_size++;
/* rbs-min */
- min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset +
- dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel +
- groups_per_line * dsc->drm->first_line_bpg_offset;
+ min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
+ dsc->initial_xmit_delay * dsc->bits_per_pixel +
+ groups_per_line * dsc->first_line_bpg_offset;
- hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel);
+ hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
- dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay;
+ dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
- dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size /
- (dsc->drm->rc_model_size - dsc->drm->initial_offset);
+ dsc->initial_scale_value = 8 * dsc->rc_model_size /
+ (dsc->rc_model_size - dsc->initial_offset);
- slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height;
+ slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
- groups_total = groups_per_line * dsc->drm->slice_height;
+ groups_total = groups_per_line * dsc->slice_height;
- data = dsc->drm->first_line_bpg_offset * 2048;
+ data = dsc->first_line_bpg_offset * 2048;
- dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1));
+ dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
- pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2);
+ pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
- data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits);
- dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+ data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
+ dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
/* bpp * 16 + 0.5 */
- data = dsc->drm->bits_per_pixel * 16;
+ data = dsc->bits_per_pixel * 16;
data *= 2;
data++;
data /= 2;
target_bpp_x16 = data;
- data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16;
- final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits;
- dsc->drm->final_offset = final_value;
+ data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+ final_value = dsc->rc_model_size - data + num_extra_mux_bits;
+ dsc->final_offset = final_value;
- final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value);
+ final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
- data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset);
- dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data;
+ data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
+ dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
- dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8);
+ dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
return 0;
}
@@ -1954,7 +1867,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *endpoint, *device_node;
+ struct device_node *endpoint;
int ret = 0;
/*
@@ -1977,16 +1890,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
goto err;
}
- /* Get panel node from the output port's endpoint data */
- device_node = of_graph_get_remote_node(np, 1, 0);
- if (!device_node) {
- DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
- ret = -ENODEV;
- goto err;
- }
-
- msm_host->device_node = device_node;
-
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
@@ -1997,8 +1900,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
}
}
- of_node_put(device_node);
-
err:
of_node_put(endpoint);
@@ -2028,6 +1929,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
struct platform_device *pdev = msm_dsi->pdev;
+ const struct msm_dsi_config *cfg;
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
@@ -2060,6 +1962,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
pr_err("%s: get config failed\n", __func__);
goto fail;
}
+ cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
@@ -2069,13 +1972,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
/* fixup base address by io offset */
- msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+ msm_host->ctrl_base += cfg->io_offset;
- ret = dsi_regulator_init(msm_host);
- if (ret) {
- pr_err("%s: regulator init failed\n", __func__);
+ ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
+ cfg->regulator_data,
+ &msm_host->supplies);
+ if (ret)
goto fail;
- }
ret = dsi_clk_init(msm_host);
if (ret) {
@@ -2126,7 +2029,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
- INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->id = msm_host->id;
@@ -2159,23 +2061,9 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- struct drm_panel *panel;
int ret;
msm_host->dev = dev;
- panel = msm_dsi_host_get_panel(&msm_host->base);
-
- if (!IS_ERR(panel) && panel->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
-
- if (!dsc) {
- dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL);
- if (!dsc)
- return -ENOMEM;
- dsc->drm = panel->dsc;
- msm_host->dsc = dsc;
- }
- }
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@@ -2556,7 +2444,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
msm_dsi_sfpb_config(msm_host, true);
- ret = dsi_host_regulator_enable(msm_host);
+ ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
__func__, ret);
@@ -2596,7 +2485,8 @@ fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return ret;
@@ -2623,7 +2513,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
msm_dsi_sfpb_config(msm_host, false);
@@ -2659,45 +2550,33 @@ enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
int pic_width = mode->hdisplay;
int pic_height = mode->vdisplay;
if (!msm_host->dsc)
return MODE_OK;
- if (pic_width % dsc->drm->slice_width) {
+ if (pic_width % dsc->slice_width) {
pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
- pic_width, dsc->drm->slice_width);
+ pic_width, dsc->slice_width);
return MODE_H_ILLEGAL;
}
- if (pic_height % dsc->drm->slice_height) {
+ if (pic_height % dsc->slice_height) {
pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
- pic_height, dsc->drm->slice_height);
+ pic_height, dsc->slice_height);
return MODE_V_ILLEGAL;
}
return MODE_OK;
}
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
-{
- return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
-}
-
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
-{
- struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-
- return of_drm_find_bridge(msm_host->device_node);
-}
-
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2771,7 +2650,7 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index cb84d185d73a..3a1417397283 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -141,14 +141,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
- int ret;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
- ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
-
- return ret;
+ return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
}
static int
@@ -214,39 +211,26 @@ static void dsi_mgr_phy_disable(int id)
}
}
-struct dsi_connector {
- struct drm_connector base;
- int id;
-};
-
struct dsi_bridge {
struct drm_bridge base;
int id;
};
-#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
-static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
- return dsi_connector->id;
-}
-
static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
{
struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
return dsi_bridge->id;
}
-static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
+static void msm_dsi_manager_set_split_display(u8 id)
{
- struct msm_drm_private *priv = conn->dev->dev_private;
- struct msm_kms *kms = priv->kms;
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct msm_dsi *master_dsi, *slave_dsi;
- struct drm_panel *panel;
if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
@@ -256,89 +240,18 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
slave_dsi = other_dsi;
}
- /*
- * There is only 1 panel in the global panel list for bonded DSI mode.
- * Therefore slave dsi should get the drm_panel instance from master
- * dsi.
- */
- panel = msm_dsi_host_get_panel(master_dsi->host);
- if (IS_ERR(panel)) {
- DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
- PTR_ERR(panel));
- return PTR_ERR(panel);
- }
-
- if (!panel || !IS_BONDED_DSI())
- goto out;
-
- drm_object_attach_property(&conn->base,
- conn->dev->mode_config.tile_property, 0);
+ if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
+ return;
/*
* Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
- if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
+ if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
msm_dsi_is_cmd_mode(msm_dsi));
}
-
-out:
- msm_dsi->panel = panel;
- return 0;
-}
-
-static enum drm_connector_status dsi_mgr_connector_detect(
- struct drm_connector *connector, bool force)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- return msm_dsi->panel ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static void dsi_mgr_connector_destroy(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(dsi_connector);
-}
-
-static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- int num;
-
- if (!panel)
- return 0;
-
- /*
- * In bonded DSI mode, we have one connector that can be
- * attached to the drm_panel.
- */
- num = drm_panel_get_modes(panel, connector);
- if (!num)
- return 0;
-
- return num;
-}
-
-static struct drm_encoder *
-dsi_mgr_connector_best_encoder(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- DBG("");
- return msm_dsi_get_encoder(msm_dsi);
}
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
@@ -403,7 +316,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -418,18 +330,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (!dsi_mgr_power_on_early(bridge))
dsi_mgr_bridge_power_on(bridge);
- /* Always call panel functions once, because even for dual panels,
- * there is only one drm_panel instance.
- */
- if (panel) {
- ret = drm_panel_prepare(panel);
- if (ret) {
- pr_err("%s: prepare panel %d failed, %d\n", __func__,
- id, ret);
- goto panel_prep_fail;
- }
- }
-
ret = msm_dsi_host_enable(host);
if (ret) {
pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
@@ -449,9 +349,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
- if (panel)
- drm_panel_unprepare(panel);
-panel_prep_fail:
return;
}
@@ -469,62 +366,12 @@ void msm_dsi_manager_tpg_enable(void)
}
}
-static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- }
- }
-}
-
-static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-}
-
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -551,13 +398,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
- if (panel) {
- ret = drm_panel_unprepare(panel);
- if (ret)
- pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
- id, ret);
- }
-
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
@@ -614,76 +454,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
return msm_dsi_host_check_dsc(host, mode);
}
-static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
- .detect = dsi_mgr_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = dsi_mgr_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
- .get_modes = dsi_mgr_connector_get_modes,
- .best_encoder = dsi_mgr_connector_best_encoder,
-};
-
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.pre_enable = dsi_mgr_bridge_pre_enable,
- .enable = dsi_mgr_bridge_enable,
- .disable = dsi_mgr_bridge_disable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
.mode_valid = dsi_mgr_bridge_mode_valid,
};
-/* initialize connector when we're connected to a drm_panel */
-struct drm_connector *msm_dsi_manager_connector_init(u8 id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_connector *connector = NULL;
- struct dsi_connector *dsi_connector;
- int ret;
-
- dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
- if (!dsi_connector)
- return ERR_PTR(-ENOMEM);
-
- dsi_connector->id = id;
-
- connector = &dsi_connector->base;
-
- ret = drm_connector_init(msm_dsi->dev, connector,
- &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
-
- /* Enable HPD to let hpd event is handled
- * when panel is attached to the host.
- */
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, msm_dsi->encoder);
-
- ret = msm_dsi_manager_panel_init(connector, id);
- if (ret) {
- DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
- goto fail;
- }
-
- return connector;
-
-fail:
- connector->funcs->destroy(connector);
- return ERR_PTR(ret);
-}
-
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -722,18 +499,21 @@ fail:
return ERR_PTR(ret);
}
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+int msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
- struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
int ret;
int_bridge = msm_dsi->bridge;
- ext_bridge = msm_dsi->external_bridge =
- msm_dsi_host_get_bridge(msm_dsi->host);
+ ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
+ msm_dsi->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(ext_bridge))
+ return PTR_ERR(ext_bridge);
+
+ msm_dsi->external_bridge = ext_bridge;
encoder = msm_dsi->encoder;
@@ -745,36 +525,32 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret == -EINVAL) {
- struct drm_connector *connector;
- struct list_head *connector_list;
-
- /* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
-
/*
- * we need the drm_connector created by the external bridge
- * driver (or someone else) to feed it to our driver's
- * priv->connector[] list, mainly for msm_fbdev_init()
+ * link the internal dsi bridge to the external bridge,
+ * connector is created by the next bridge.
*/
- connector_list = &dev->mode_config.connector_list;
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ struct drm_connector *connector;
- list_for_each_entry(connector, connector_list, head) {
- if (drm_connector_has_possible_encoder(connector, encoder))
- return connector;
+ /* We are in charge of the connector, create one now. */
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return PTR_ERR(connector);
}
- return ERR_PTR(-ENODEV);
- }
-
- connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- return ERR_CAST(connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
}
- drm_connector_attach_encoder(connector, encoder);
+ /* The pipeline is ready, ping encoders if necessary */
+ msm_dsi_manager_set_split_display(id);
- return connector;
+ return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index a39de3bdc7fa..7fc0975cb869 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
@@ -507,82 +507,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(dev, num, s);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER) {
- DRM_DEV_ERROR(dev,
- "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- }
-
- return ret;
- }
-
- return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- int num = phy->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
@@ -697,12 +621,9 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
- if (phy->id < 0) {
- ret = phy->id;
- DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
- __func__, ret);
- goto fail;
- }
+ if (phy->id < 0)
+ return dev_err_probe(dev, phy->id,
+ "Couldn't identify PHY index\n");
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
@@ -710,86 +631,71 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
- if (IS_ERR(phy->base)) {
- DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->base))
+ return dev_err_probe(dev, PTR_ERR(phy->base),
+ "Failed to map phy base\n");
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
- if (IS_ERR(phy->pll_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->pll_base))
+ return dev_err_probe(dev, PTR_ERR(phy->pll_base),
+ "Failed to map pll base\n");
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
- if (IS_ERR(phy->lane_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->lane_base))
+ return dev_err_probe(dev, PTR_ERR(phy->lane_base),
+ "Failed to map phy lane base\n");
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
- if (IS_ERR(phy->reg_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->reg_base))
+ return dev_err_probe(dev, PTR_ERR(phy->reg_base),
+ "Failed to map phy regulator base\n");
}
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
- goto fail;
+ return ret;
}
- ret = dsi_phy_regulator_init(phy);
+ ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
+ phy->cfg->regulator_data,
+ &phy->supplies);
if (ret)
- goto fail;
+ return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk)) {
- DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
- ret = PTR_ERR(phy->ahb_clk);
- goto fail;
- }
+ if (IS_ERR(phy->ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
if (ret)
- goto fail;
+ return ret;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
- if (ret) {
- DRM_DEV_INFO(dev,
- "%s: pll init failed: %d, need separate pll clk driver\n",
- __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "PLL init failed; need separate clk driver\n");
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
-
-fail:
- return ret;
}
static struct platform_driver dsi_phy_platform_driver = {
@@ -829,7 +735,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto res_en_fail;
}
- ret = dsi_phy_regulator_enable(phy);
+ ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
@@ -866,7 +772,7 @@ pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
@@ -880,7 +786,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index dc91b43d5a38..60a99c6525b2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -29,7 +29,8 @@ struct msm_dsi_phy_ops {
};
struct msm_dsi_phy_cfg {
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
struct msm_dsi_phy_ops ops;
unsigned long min_pll_rate;
@@ -98,7 +99,7 @@ struct msm_dsi_phy {
int id;
struct clk *ahb_clk;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 08b015ea1b1e..27b592c776a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -188,19 +188,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -215,16 +215,19 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
- 0xba);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ 0xba);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
+ 0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
+ 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
+ 0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
- 0x4c);
+ 0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
@@ -236,18 +239,18 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *conf
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
- config->decimal_div_start);
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
- config->pll_clock_inverters);
+ config->pll_clock_inverters);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -306,7 +309,7 @@ static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data & ~BIT(5));
+ data & ~BIT(5));
ndelay(250);
}
@@ -315,7 +318,7 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data | BIT(5));
+ data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
@@ -326,7 +329,7 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data & ~BIT(5));
+ data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
@@ -335,7 +338,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data | BIT(5));
+ data | BIT(5));
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
@@ -356,7 +359,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
- 0x01);
+ 0x01);
/*
* ensure all PLL configurations are written prior to checking
@@ -378,10 +381,10 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
- 0x01);
+ 0x01);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
- REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
error:
return rc;
@@ -486,7 +489,7 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
@@ -515,7 +518,7 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -571,64 +574,59 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32], parent3[32], parent4[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *pclk_mux;
int ret;
DBG("DSI%d", pll_10nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
pll_10nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -636,52 +634,45 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+ pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ pll_out_div,
+ pll_post_out_div,
+ }), 4, 0, pll_10nm->phy->base +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
+ if (IS_ERR(pclk_mux)) {
+ ret = PTR_ERR(pclk_mux);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
+ 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -1028,14 +1019,14 @@ static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
return 0;
}
+static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
@@ -1052,12 +1043,8 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 8199c53567f4..0f8f4ca46429 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -711,7 +711,7 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -764,14 +764,14 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
const char *name,
- const char *parent_name,
+ const struct clk_hw *parent_hw,
unsigned long flags,
u8 shift)
{
struct dsi_pll_14nm_postdiv *pll_postdiv;
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_init_data postdiv_init = {
- .parent_names = (const char *[]) { parent_name },
+ .parent_hws = (const struct clk_hw *[]) { parent_hw },
.num_parents = 1,
.name = name,
.flags = flags,
@@ -800,72 +800,70 @@ static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_14nm_vco,
};
struct device *dev = &pll_14nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
int ret;
DBG("DSI%d", pll_14nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
pll_14nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
- CLK_SET_RATE_PARENT, 0);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
+ &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(n1_postdiv))
+ return PTR_ERR(n1_postdiv);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
/* DSI Byte clock = VCO_CLK / N1 / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
/*
* Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
* on the way. Don't let it set parent.
*/
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, n1_postdiv, 0, 1, 2);
+ if (IS_ERR(n1_postdivby2))
+ return PTR_ERR(n1_postdivby2);
- snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
* This is the output of N2 post-divider, bits 4-7 in
* REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
*/
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
+ 0, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
- provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
@@ -952,7 +950,8 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -1005,7 +1004,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1024,14 +1023,18 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 17000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 73400 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1047,12 +1050,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 73400, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_73p4mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1068,12 +1067,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index ee7c418a1c29..c9752b991744 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -129,15 +129,15 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
dsi_20nm_phy_regulator_ctrl(phy, false);
}
+static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- },
- },
+ .regulator_data = dsi_phy_20nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 48eab80b548e..4c1bf55c5f38 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -104,7 +104,7 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
- DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
}
@@ -201,9 +201,9 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
- DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
- DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
@@ -316,12 +316,12 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
- 0x0c, 100);
+ 0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
- locked = pll_28nm_poll_for_ready(pll_28nm,
- max_reads, timeout_us);
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
+ timeout_us);
if (locked)
break;
@@ -508,28 +508,28 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
- cached_state->postdiv1);
+ cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
- cached_state->byte_mux);
+ cached_state->byte_mux);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent1[32], parent2[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
int ret;
DBG("%d", pll_28nm->phy->id);
@@ -539,55 +539,49 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
else
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
+ snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+ analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
0, 4, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, 0, pll_28nm->phy->pll_base +
+ if (IS_ERR(analog_postdiv))
+ return PTR_ERR(analog_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+ indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
+ if (IS_ERR(indirect_path_div2))
+ return PTR_ERR(indirect_path_div2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent1, parent2
+ snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
+ byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ &pll_28nm->clk_hw,
+ indirect_path_div2,
}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+ if (IS_ERR(byte_mux))
+ return PTR_ERR(byte_mux);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT, 1, 4);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ byte_mux, CLK_SET_RATE_PARENT, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
@@ -627,31 +621,31 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
- DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
- DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
- DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
- DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
- DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
- DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
- DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
- DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
- DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
- DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
- DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
- DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
@@ -713,7 +707,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -769,14 +764,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -792,12 +787,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -813,12 +804,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index fc56cdcc9ad6..26c08047e20c 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -104,29 +104,29 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
- fb_divider & 0xff);
+ fb_divider & 0xff);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
- val);
+ val);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
- val);
+ val);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
- 0xf);
+ 0xf);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- val);
+ val);
return 0;
}
@@ -206,7 +206,7 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
- DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@@ -367,23 +367,23 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
- cached_state->postdiv2);
+ cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- cached_state->postdiv1);
+ cached_state->postdiv1);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char *clk_name, *parent_name, *vco_name;
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
@@ -404,20 +404,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!bytediv)
return -ENOMEM;
- vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!vco_name)
- return -ENOMEM;
-
- parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!parent_name)
- return -ENOMEM;
-
- clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!clk_name)
- return -ENOMEM;
-
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- vco_init.name = vco_name;
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ vco_init.name = clk_name;
pll_28nm->clk_hw.init = &vco_init;
@@ -429,13 +417,14 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
- snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
- bytediv_init.parent_names = (const char * const *) &parent_name;
+ bytediv_init.parent_hws = (const struct clk_hw*[]){
+ &pll_28nm->clk_hw,
+ };
bytediv_init.num_parents = 1;
/* DIV2 */
@@ -444,12 +433,12 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent_name, 0, pll_28nm->phy->pll_base +
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
@@ -489,29 +478,29 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
- DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
- DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
- DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
- DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
- DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
- DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
- DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
- DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
- DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
- DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
@@ -523,7 +512,7 @@ static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
- 0x100);
+ 0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
@@ -544,7 +533,7 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
- 0x3);
+ 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
@@ -577,11 +566,11 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
- 0x00);
+ 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
- 0x01);
+ 0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
- 0x66);
+ 0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
@@ -602,7 +591,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -648,14 +638,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_8960_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 66ed1919a1db..9e7fa7d88ead 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -176,19 +176,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -208,7 +208,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
- analog_controls_five_1);
+ analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
@@ -245,17 +245,20 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
+ pll->phy->cphy_mode ? 0x00 : 0x10);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -341,7 +344,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- data | BIT(5) | BIT(4));
+ data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -500,7 +503,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
@@ -529,7 +532,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -585,65 +588,60 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
int ret;
DBG("DSI%d", pll_7nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
pll_7nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1,
- pll_7nm->phy->cphy_mode ? 7 : 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -651,25 +649,25 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
if (pll_7nm->phy->cphy_mode)
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 2, 7);
else
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
@@ -682,34 +680,32 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
- snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = pll_post_out_div;
} else {
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2,
- }), 2, 0, pll_7nm->phy->base +
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = hw;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ phy_pll_out_dsi_parent, 0,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -841,7 +837,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: PHY timing calculation failed\n", __func__);
+ "%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -960,10 +956,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
@@ -982,9 +978,9 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
}
/* DSI lane settings */
@@ -1036,14 +1032,18 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
DBG("DSI%d PHY disabled", phy->id);
}
+static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 37550 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1065,12 +1065,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1087,12 +1083,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 37550, 0},
- },
- },
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index b06d9d25a189..4dd055416620 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -691,15 +691,13 @@ static const struct clk_ops hdmi_8996_pll_ops = {
.is_enabled = hdmi_8996_pll_is_enabled,
};
-static const char * const hdmi_pll_parents[] = {
- "xo",
-};
-
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
- .parent_names = hdmi_pll_parents,
- .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
@@ -707,8 +705,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8996 *pll;
- struct clk *clk;
- int i;
+ int i, ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
@@ -735,10 +732,16 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
}
pll->clk_hw.init = &pll_init;
- clk = devm_clk_register(dev, &pll->clk_hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7d2dab260f86..95f4374ae21c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -7,6 +7,7 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -326,6 +327,13 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
+ &fail_gem_alloc);
+ fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
+ &fail_gem_iova);
+#endif
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1ed4cd09dbf8..28034c21f6bc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -6,6 +6,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -78,6 +79,11 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(fail_gem_alloc);
+DECLARE_FAULT_ATTR(fail_gem_iova);
+#endif
+
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -355,7 +361,7 @@ static int msm_init_vram(struct drm_device *dev)
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
- * Grab the entire CMA chunk carved out in early startup in
+ * Grab the entire DMA chunk carved out in early startup in
* mach-msm:
*/
} else if (!msm_use_mmu(dev)) {
@@ -418,14 +424,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
- INIT_LIST_HEAD(&priv->inactive_willneed);
- INIT_LIST_HEAD(&priv->inactive_dontneed);
- INIT_LIST_HEAD(&priv->inactive_unpinned);
- mutex_init(&priv->mm_lock);
+ /*
+ * Initialize the LRUs:
+ */
+ mutex_init(&priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&priv->mm_lock);
+ might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
@@ -469,6 +479,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
+ drm_helper_move_panel_connectors_to_head(ddev);
+
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
@@ -697,6 +709,9 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
flags |= MSM_BO_WC;
}
+ if (should_fail(&fail_gem_alloc, args->size))
+ return -ENOMEM;
+
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
@@ -758,6 +773,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
@@ -779,6 +797,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
@@ -883,13 +904,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
- ret = mutex_lock_interruptible(&queue->lock);
+ ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
- mutex_unlock(&queue->lock);
+ mutex_unlock(&queue->idr_lock);
if (!fence)
return 0;
@@ -1242,10 +1263,15 @@ void msm_drv_shutdown(struct platform_device *pdev)
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
- if (!priv || !priv->kms)
- return;
-
- drm_atomic_helper_shutdown(drm);
+ /*
+ * Shutdown the hw if we're far enough along where things might be on.
+ * If we run this too early, we'll end up panicking in any variety of
+ * places. Since we don't register the drm device until late in
+ * msm_drm_init, drm_dev->registered is used as an indicator that the
+ * shutdown will be successful.
+ */
+ if (drm && drm->registered)
+ drm_atomic_helper_shutdown(drm);
}
static struct platform_driver msm_platform_driver = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b3689a2d27d7..b2ea262296a4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -27,13 +27,19 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
+#ifdef CONFIG_FAULT_INJECTION
+extern struct fault_attr fail_gem_alloc;
+extern struct fault_attr fail_gem_iova;
+#else
+# define should_fail(attr, size) 0
+#endif
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -96,11 +102,6 @@ struct msm_drm_thread {
struct kthread_worker *worker;
};
-/* DSC config */
-struct msm_display_dsc_config {
- struct drm_dsc_config *drm;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -142,28 +143,60 @@ struct msm_drm_private {
struct mutex obj_lock;
/**
- * LRUs of inactive GEM objects. Every bo is either in one of the
- * inactive lists (depending on whether or not it is shrinkable) or
- * gpu->active_list (for the gpu it is active on[1]), or transiently
- * on a temporary list as the shrinker is running.
+ * lru:
*
- * Note that inactive_willneed also contains pinned and vmap'd bos,
- * but the number of pinned-but-not-active objects is small (scanout
- * buffers, ringbuffer, etc).
+ * The various LRU's that a GEM object is in at various stages of
+ * it's lifetime. Objects start out in the unbacked LRU. When
+ * pinned (for scannout or permanently mapped GPU buffers, like
+ * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
+ * unpinned, it moves into willneed or dontneed LRU depending on
+ * madvise state. When backing pages are evicted (willneed) or
+ * purged (dontneed) it moves back into the unbacked LRU.
*
- * These lists are protected by mm_lock (which should be acquired
- * before per GEM object lock). One should *not* hold mm_lock in
- * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
- *
- * [1] if someone ever added support for the old 2d cores, there could be
- * more than one gpu object
+ * The dontneed LRU is considered by the shrinker for objects
+ * that are candidate for purging, and the willneed LRU is
+ * considered for objects that could be evicted.
*/
- struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */
- struct list_head inactive_dontneed; /* inactive + shrinkable */
- struct list_head inactive_unpinned; /* inactive + purged or unpinned */
- long shrinkable_count; /* write access under mm_lock */
- long evictable_count; /* write access under mm_lock */
- struct mutex mm_lock;
+ struct {
+ /**
+ * unbacked:
+ *
+ * The LRU for GEM objects without backing pages allocated.
+ * This mostly exists so that objects are always is one
+ * LRU.
+ */
+ struct drm_gem_lru unbacked;
+
+ /**
+ * pinned:
+ *
+ * The LRU for pinned GEM objects
+ */
+ struct drm_gem_lru pinned;
+
+ /**
+ * willneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * WILLNEED state (ie. can be evicted)
+ */
+ struct drm_gem_lru willneed;
+
+ /**
+ * dontneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * DONTNEED state (ie. can be purged)
+ */
+ struct drm_gem_lru dontneed;
+
+ /**
+ * lock:
+ *
+ * Protects manipulation of all of the LRUs.
+ */
+ struct mutex lock;
+ } lru;
struct workqueue_struct *wq;
@@ -290,7 +323,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -320,7 +353,7 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
return false;
}
-static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return NULL;
}
@@ -433,6 +466,8 @@ void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
+struct icc_path *msm_icc_get(struct device *dev, const char *name);
+
#define msm_writel(data, addr) writel((data), (addr))
#define msm_readl(addr) readl((addr))
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8ddbd2e001d4..1dee0d18abbb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -19,7 +19,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_inactive(struct msm_gem_object *msm_obj);
+static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->pages;
@@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj);
msm_obj->pages = NULL;
+ update_lru(obj);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
-
if (!IS_ERR(p)) {
- msm_obj->pin_count++;
- update_inactive(msm_obj);
+ to_msm_bo(obj)->pin_count++;
+ update_lru(obj);
}
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
msm_gem_lock(obj);
- msm_obj->pin_count--;
- GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ p = msm_gem_pin_pages_locked(obj);
+ msm_gem_unlock(obj);
+
+ return p;
+}
+
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
@@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
@@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
- pages = get_pages(obj);
+ pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
-
- if (!ret)
- msm_obj->pin_count++;
+ if (ret)
+ msm_gem_unpin_locked(obj);
return ret;
}
@@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
@@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
@@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->vaddr;
@@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
@@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- if (msm_obj->active_count == 0)
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
@@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
@@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- update_inactive(msm_obj);
drm_gem_free_mmap_offset(obj);
@@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
- GEM_WARN_ON(!msm_obj->evictable);
- GEM_WARN_ON(msm_obj->active_count);
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
@@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
-
- update_inactive(msm_obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
@@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
+static void update_lru(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
-
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- GEM_WARN_ON(msm_obj->dontneed);
-
- if (msm_obj->active_count++ == 0) {
- mutex_lock(&priv->mm_lock);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
- list_move_tail(&msm_obj->mm_list, &gpu->active_list);
- mutex_unlock(&priv->mm_lock);
- }
-}
-
-void msm_gem_active_put(struct drm_gem_object *obj)
-{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(&msm_obj->base);
- if (--msm_obj->active_count == 0) {
- update_inactive(msm_obj);
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(msm_obj->vmap_count);
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+ } else if (msm_obj->pin_count || msm_obj->vmap_count) {
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
}
-static void update_inactive(struct msm_gem_object *msm_obj)
+bool msm_gem_active(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(obj);
- if (msm_obj->active_count != 0)
- return;
-
- mutex_lock(&priv->mm_lock);
-
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
-
- list_del(&msm_obj->mm_list);
- if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
- mark_evictable(msm_obj);
- } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
- mark_purgeable(msm_obj);
- } else {
- GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- }
+ if (to_msm_bo(obj)->pin_count)
+ return true;
- mutex_unlock(&priv->mm_lock);
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
stats->all.count++;
stats->all.size += obj->size;
- if (is_active(msm_obj)) {
+ if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
@@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
@@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- mutex_lock(&priv->mm_lock);
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- list_del(&msm_obj->mm_list);
- mutex_unlock(&priv->mm_lock);
-
- /* object should not be on active list: */
- GEM_WARN_ON(is_active(msm_obj));
-
put_iova_spaces(obj, true);
if (obj->import_attach) {
@@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node;
- /* Call chain get_pages() -> update_inactive() tries to
- * access msm_obj->mm_list, but it is not initialized yet.
- * To avoid NULL pointer dereference error, initialize
- * mm_list to be empty.
- */
- INIT_LIST_HEAD(&msm_obj->mm_list);
-
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
@@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
@@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 432032ad4aed..c4844cf3a585 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -94,16 +94,6 @@ struct msm_gem_object {
uint8_t madv;
/**
- * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
- */
- bool dontneed : 1;
-
- /**
- * Is object evictable (ie. counted in priv->evictable_count)?
- */
- bool evictable : 1;
-
- /**
* count of active vmap'ing
*/
uint8_t vmap_count;
@@ -114,17 +104,6 @@ struct msm_gem_object {
*/
struct list_head node;
- /**
- * An object is either:
- * inactive - on priv->inactive_dontneed or priv->inactive_willneed
- * (depending on purgeability status)
- * active - on one one of the gpu's active_list.. well, at
- * least for now we don't have (I don't think) hw sync between
- * 2d and 3d one devices which have both, meaning we need to
- * block on submit if a bo is already on other ring
- */
- struct list_head mm_list;
-
struct page **pages;
struct sg_table *sgt;
void *vaddr;
@@ -138,7 +117,6 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
- int active_count;
int pin_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -159,8 +137,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
-struct page **msm_gem_get_pages(struct drm_gem_object *obj);
-void msm_gem_put_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
+void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -171,8 +149,7 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
-void msm_gem_active_put(struct drm_gem_object *obj);
+bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -208,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
-static inline bool __must_check
-msm_gem_trylock(struct drm_gem_object *obj)
-{
- return dma_resv_trylock(obj->resv);
-}
-
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -226,8 +197,8 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-static inline bool
-msm_gem_is_locked(struct drm_gem_object *obj)
+static inline void
+msm_gem_assert_locked(struct drm_gem_object *obj)
{
/*
* Destroying the object is a special case.. msm_gem_free_object()
@@ -241,13 +212,10 @@ msm_gem_is_locked(struct drm_gem_object *obj)
* Unfortunately lockdep is not aware of this detail. So when the
* refcount drops to zero, we pretend it is already locked.
*/
- return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
-}
-
-static inline bool is_active(struct msm_gem_object *msm_obj)
-{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
- return msm_obj->active_count;
+ lockdep_assert_once(
+ (kref_read(&obj->refcount) == 0) ||
+ (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD)
+ );
}
/* imported/exported objects are not purgeable: */
@@ -264,81 +232,15 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(&msm_obj->base);
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
-static inline void mark_purgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(msm_obj->dontneed))
- return;
-
- priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->dontneed = true;
-}
-
-static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(!msm_obj->dontneed))
- return;
-
- priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
- GEM_WARN_ON(priv->shrinkable_count < 0);
- msm_obj->dontneed = false;
-}
-
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
-static inline void mark_evictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(msm_obj->evictable))
- return;
-
- priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->evictable = true;
-}
-
-static inline void mark_unevictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(!msm_obj->evictable))
- return;
-
- priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
- WARN_ON(priv->evictable_count < 0);
- msm_obj->evictable = false;
-}
-
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
@@ -390,9 +292,8 @@ struct msm_gem_submit {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
-#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
+#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dcc8a573bc76..c1d91863df05 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -63,12 +63,12 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_get_pages(obj);
+ msm_gem_pin_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_put_pages(obj);
+ msm_gem_unpin_pages(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0317055e3253..1de14e67f96b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -24,103 +24,77 @@ static bool can_swap(void)
return enable_eviction && get_nr_swap_pages() > 0;
}
+static bool can_block(struct shrink_control *sc)
+{
+ if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
+ return false;
+ return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
+}
+
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned count = priv->shrinkable_count;
+ unsigned count = priv->lru.dontneed.count;
if (can_swap())
- count += priv->evictable_count;
+ count += priv->lru.willneed.count;
return count;
}
static bool
-purge(struct msm_gem_object *msm_obj)
+purge(struct drm_gem_object *obj)
{
- if (!is_purgeable(msm_obj))
+ if (!is_purgeable(to_msm_bo(obj)))
return false;
- /*
- * This will move the obj out of still_in_list to
- * the purged list
- */
- msm_gem_purge(&msm_obj->base);
+ if (msm_gem_active(obj))
+ return false;
+
+ msm_gem_purge(obj);
return true;
}
static bool
-evict(struct msm_gem_object *msm_obj)
+evict(struct drm_gem_object *obj)
{
- if (is_unevictable(msm_obj))
+ if (is_unevictable(to_msm_bo(obj)))
+ return false;
+
+ if (msm_gem_active(obj))
return false;
- msm_gem_evict(&msm_obj->base);
+ msm_gem_evict(obj);
return true;
}
-static unsigned long
-scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
- bool (*shrink)(struct msm_gem_object *msm_obj))
+static bool
+wait_for_idle(struct drm_gem_object *obj)
{
- unsigned freed = 0;
- struct list_head still_in_list;
-
- INIT_LIST_HEAD(&still_in_list);
-
- mutex_lock(&priv->mm_lock);
-
- while (freed < nr_to_scan) {
- struct msm_gem_object *msm_obj = list_first_entry_or_null(
- list, typeof(*msm_obj), mm_list);
-
- if (!msm_obj)
- break;
-
- list_move_tail(&msm_obj->mm_list, &still_in_list);
-
- /*
- * If it is in the process of being freed, msm_gem_free_object
- * can be blocked on mm_lock waiting to remove it. So just
- * skip it.
- */
- if (!kref_get_unless_zero(&msm_obj->base.refcount))
- continue;
-
- /*
- * Now that we own a reference, we can drop mm_lock for the
- * rest of the loop body, to reduce contention with the
- * retire_submit path (which could make more objects purgeable)
- */
-
- mutex_unlock(&priv->mm_lock);
-
- /*
- * Note that this still needs to be trylock, since we can
- * hit shrinker in response to trying to get backing pages
- * for this obj (ie. while it's lock is already held)
- */
- if (!msm_gem_trylock(&msm_obj->base))
- goto tail;
-
- if (shrink(msm_obj))
- freed += msm_obj->base.size >> PAGE_SHIFT;
+ enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+}
- msm_gem_unlock(&msm_obj->base);
+static bool
+active_purge(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
-tail:
- drm_gem_object_put(&msm_obj->base);
- mutex_lock(&priv->mm_lock);
- }
+ return purge(obj);
+}
- list_splice_tail(&still_in_list, list);
- mutex_unlock(&priv->mm_lock);
+static bool
+active_evict(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
- return freed;
+ return evict(obj);
}
static unsigned long
@@ -128,21 +102,34 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned long freed;
-
- freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
-
- if (freed > 0)
- trace_msm_gem_purge(freed << PAGE_SHIFT);
-
- if (can_swap() && freed < sc->nr_to_scan) {
- int evicted = scan(priv, sc->nr_to_scan - freed,
- &priv->inactive_willneed, evict);
+ struct {
+ struct drm_gem_lru *lru;
+ bool (*shrink)(struct drm_gem_object *obj);
+ bool cond;
+ unsigned long freed;
+ } stages[] = {
+ /* Stages of progressively more aggressive/expensive reclaim: */
+ { &priv->lru.dontneed, purge, true },
+ { &priv->lru.willneed, evict, can_swap() },
+ { &priv->lru.dontneed, active_purge, can_block(sc) },
+ { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
+ };
+ long nr = sc->nr_to_scan;
+ unsigned long freed = 0;
- if (evicted > 0)
- trace_msm_gem_evict(evicted << PAGE_SHIFT);
+ for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ if (!stages[i].cond)
+ continue;
+ stages[i].freed =
+ drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ nr -= stages[i].freed;
+ freed += stages[i].freed;
+ }
- freed += evicted;
+ if (freed) {
+ trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
+ stages[1].freed, stages[2].freed,
+ stages[3].freed);
}
return (freed > 0) ? freed : SHRINK_STOP;
@@ -173,12 +160,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct msm_gem_object *msm_obj)
+vmap_shrink(struct drm_gem_object *obj)
{
- if (!is_vunmapable(msm_obj))
+ if (!is_vunmapable(to_msm_bo(obj)))
return false;
- msm_gem_vunmap(&msm_obj->base);
+ msm_gem_vunmap(obj);
return true;
}
@@ -188,17 +175,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct list_head *mm_lists[] = {
- &priv->inactive_dontneed,
- &priv->inactive_willneed,
- priv->gpu ? &priv->gpu->active_list : NULL,
+ struct drm_gem_lru *lrus[] = {
+ &priv->lru.dontneed,
+ &priv->lru.willneed,
+ &priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
- for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
- unmapped += scan(priv, vmap_shrink_limit - unmapped,
- mm_lists[idx], vmap_shrink);
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
+ vmap_shrink);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c9e4aeb14f4a..5599d93ec0d2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -26,6 +26,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
uint64_t sz;
int ret;
@@ -36,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return ERR_PTR(-ENOMEM);
@@ -52,9 +53,13 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
+ submit->pid = get_pid(task_pid(current));
submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
+ /* Get a unique identifier for the submission for logging purposes */
+ submit->ident = atomic_inc_return(&ident) - 1;
+
INIT_LIST_HEAD(&submit->node);
return submit;
@@ -67,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;
if (submit->fence_id) {
- mutex_lock(&submit->queue->lock);
+ mutex_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
- mutex_unlock(&submit->queue->lock);
+ mutex_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
@@ -238,17 +243,13 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
- if (flags & BO_ACTIVE)
- msm_gem_active_put(obj);
-
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
- BO_ACTIVE | BO_LOCKED;
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
@@ -353,18 +354,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
- /*
- * Increment active_count first, so if under memory pressure, we
- * don't inadvertently evict a bo needed by the submit in order
- * to pin an earlier bo in the same submit.
- */
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_active_get(obj, submit->gpu);
- submit->bos[i].flags |= BO_ACTIVE;
- }
-
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
struct msm_gem_vma *vma;
@@ -512,11 +501,11 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- unsigned cleanup_flags = BO_LOCKED;
+ unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned i;
if (error)
- cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
+ cleanup_flags |= BO_VMA_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -533,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- msm_gem_lock(obj);
- /* Note, VMA already fence-unpinned before submit: */
- submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
- msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
@@ -718,7 +703,6 @@ static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
@@ -729,10 +713,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
- struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
unsigned i;
- int ret, submitid;
+ int ret;
if (!gpu)
return -ENXIO;
@@ -764,35 +747,26 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
- /* Get a unique identifier for the submission for logging purposes */
- submitid = atomic_inc_return(&ident) - 1;
-
ring = gpu->rb[queue->ring_nr];
- trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
- args->nr_bos, args->nr_cmds);
-
- ret = mutex_lock_interruptible(&queue->lock);
- if (ret)
- goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
- goto out_unlock;
+ return ret;
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos,
- args->nr_cmds);
- if (IS_ERR(submit)) {
- ret = PTR_ERR(submit);
- submit = NULL;
- goto out_unlock;
- }
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ if (IS_ERR(submit))
+ return PTR_ERR(submit);
+
+ trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
+ args->nr_bos, args->nr_cmds);
- submit->pid = pid;
- submit->ident = submitid;
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -887,6 +861,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ mutex_lock(&queue->idr_lock);
+
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
@@ -895,6 +871,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
+ mutex_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@@ -927,6 +904,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->user_fence, 1,
INT_MAX, GFP_KERNEL);
}
+
+ mutex_unlock(&queue->idr_lock);
+
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
@@ -965,9 +945,9 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
+out_post_unlock:
if (submit)
msm_gem_submit_put(submit);
-out_post_unlock:
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c2bfcf3f1f40..0098ee8438aa 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,6 +16,7 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
+#include <linux/reset.h>
#include <linux/sched/task.h>
/*
@@ -394,7 +395,6 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
- pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
@@ -423,9 +423,7 @@ static void recover_worker(struct kthread_work *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
- pm_runtime_put_sync(&gpu->pdev->dev);
/*
* Replay all remaining submits starting with highest priority
@@ -442,6 +440,8 @@ static void recover_worker(struct kthread_work *work)
}
}
+ pm_runtime_put(&gpu->pdev->dev);
+
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
@@ -664,11 +664,12 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
- mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ }
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@@ -757,14 +758,17 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
+ pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
+ }
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
@@ -846,7 +850,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
- INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
@@ -901,6 +904,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
+ gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "cx_collapse");
+
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
@@ -974,8 +980,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- WARN_ON(!list_empty(&gpu->active_list));
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4d935fedd2ac..ff911e7305ce 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -13,6 +13,7 @@
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -187,12 +188,6 @@ struct msm_gpu {
*/
int cur_ctx_seqno;
- /*
- * List of GEM active objects on this gpu. Protected by
- * msm_drm_private::mm_lock
- */
- struct list_head active_list;
-
/**
* lock:
*
@@ -277,6 +272,9 @@ struct msm_gpu {
bool hw_apriv;
struct thermal_cooling_device *cooling;
+
+ /* To poll for cx gdsc collapse during gpu recovery */
+ struct reset_control *cx_collapse;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
@@ -466,7 +464,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
- * @lock: submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@@ -479,6 +478,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
+ struct mutex idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d1f70426f554..85c443a37e4e 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index ca0b08d7875b..ac40d857bc45 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -115,29 +115,27 @@ TRACE_EVENT(msm_gmu_freq_change,
);
-TRACE_EVENT(msm_gem_purge,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
+TRACE_EVENT(msm_gem_shrink,
+ TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
+ u32 active_purged, u32 active_evicted),
+ TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
TP_STRUCT__entry(
- __field(u32, bytes)
+ __field(u32, nr_to_scan)
+ __field(u32, purged)
+ __field(u32, evicted)
+ __field(u32, active_purged)
+ __field(u32, active_evicted)
),
TP_fast_assign(
- __entry->bytes = bytes;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->purged = purged;
+ __entry->evicted = evicted;
+ __entry->active_purged = active_purged;
+ __entry->active_evicted = active_evicted;
),
- TP_printk("Purging %u bytes", __entry->bytes)
-);
-
-
-TRACE_EVENT(msm_gem_evict,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
- TP_STRUCT__entry(
- __field(u32, bytes)
- ),
- TP_fast_assign(
- __entry->bytes = bytes;
- ),
- TP_printk("Evicting %u bytes", __entry->bytes)
+ TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
+ __entry->nr_to_scan, __entry->purged, __entry->evicted,
+ __entry->active_purged, __entry->active_evicted)
);
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index 7b504617833a..d02cd29ce829 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -5,6 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/interconnect.h>
+
#include "msm_drv.h"
/*
@@ -124,3 +126,23 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
work->worker = worker;
kthread_init_work(&work->work, fn);
}
+
+struct icc_path *msm_icc_get(struct device *dev, const char *name)
+{
+ struct device *mdss_dev = dev->parent;
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (path)
+ return path;
+
+ /*
+ * If there are no interconnects attached to the corresponding device
+ * node, of_icc_get() will return NULL.
+ *
+ * If the MDP5/DPU device node doesn't have interconnects, lookup the
+ * path in the parent (MDSS) device.
+ */
+ return of_icc_get(mdss_dev, name);
+
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a54ed354578b..5577cea7c009 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,7 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
@@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
return container_of(mmu, struct msm_iommu_pagetable, base);
}
+/* based on iommu_pgsize() in iommu.c: */
+static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, size_t *count)
+{
+ unsigned int pgsize_idx, pgsize_idx_next;
+ unsigned long pgsizes;
+ size_t offset, pgsize, pgsize_next;
+ unsigned long addr_merge = paddr | iova;
+
+ /* Page sizes supported by the hardware and small enough for @size */
+ pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
+
+ /* Constrain the page sizes further based on the maximum alignment */
+ if (likely(addr_merge))
+ pgsizes &= GENMASK(__ffs(addr_merge), 0);
+
+ /* Make sure we have at least one suitable page size */
+ BUG_ON(!pgsizes);
+
+ /* Pick the biggest page size remaining */
+ pgsize_idx = __fls(pgsizes);
+ pgsize = BIT(pgsize_idx);
+ if (!count)
+ return pgsize;
+
+ /* Find the next biggest support page size, if it exists */
+ pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+ if (!pgsizes)
+ goto out_set_count;
+
+ pgsize_idx_next = __ffs(pgsizes);
+ pgsize_next = BIT(pgsize_idx_next);
+
+ /*
+ * There's no point trying a bigger page size unless the virtual
+ * and physical addresses are similarly offset within the larger page.
+ */
+ if ((iova ^ paddr) & (pgsize_next - 1))
+ goto out_set_count;
+
+ /* Calculate the offset to the next page size alignment boundary */
+ offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+ /*
+ * If size is big enough to accommodate the larger page, reduce
+ * the number of smaller pages.
+ */
+ if (offset + pgsize_next <= size)
+ size = offset;
+
+out_set_count:
+ *count = size >> pgsize_idx;
+ return pgsize;
+}
+
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
- size_t unmapped = 0;
- /* Unmap the block one page at a time */
while (size) {
- unmapped += ops->unmap(ops, iova, 4096, NULL);
- iova += 4096;
- size -= 4096;
+ size_t unmapped, pgsize, count;
+
+ pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
+
+ unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
+ if (!unmapped)
+ break;
+
+ iova += unmapped;
+ size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (unmapped == size) ? 0 : -EINVAL;
+ return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
@@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
- size_t mapped = 0;
u64 addr = iova;
unsigned int i;
@@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
- /* Map the block one page at a time */
while (size) {
- if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
- msm_iommu_pagetable_unmap(mmu, iova, mapped);
+ size_t pgsize, count, mapped = 0;
+ int ret;
+
+ pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
+
+ ret = ops->map_pages(ops, addr, phys, pgsize, count,
+ prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ phys += mapped;
+ addr += mapped;
+ size -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
-
- phys += 4096;
- addr += 4096;
- size -= 4096;
- mapped += 4096;
}
}
@@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index a92ffde53f0b..db2f847c8535 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 56eecb4a72dc..cad4c3525f0b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -29,8 +29,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_unlock(obj);
}
- pm_runtime_get_sync(&gpu->pdev->dev);
-
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
@@ -38,8 +36,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&gpu->lock);
- pm_runtime_put(&gpu->pdev->dev);
-
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index f486a3cd4e55..c6929e205b51 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;
idr_init(&queue->fence_idr);
+ mutex_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 873551b4552f..116f8168bda4 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,7 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
@@ -26,7 +26,7 @@ config DRM_IMX_LCDIF
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
help
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index befad33dcb95..075002ed6fb0 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -16,11 +15,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
@@ -45,23 +42,11 @@ static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif)
{
struct drm_device *drm = lcdif->drm;
struct drm_bridge *bridge;
- struct drm_panel *panel;
int ret;
- ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel,
- &bridge);
- if (ret)
- return ret;
-
- if (panel) {
- bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel,
- DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
- }
-
- if (!bridge)
- return -ENODEV;
+ bridge = devm_drm_of_get_bridge(drm->dev, drm->dev->of_node, 0, 0);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
ret = drm_bridge_attach(&lcdif->encoder, bridge, NULL, 0);
if (ret)
@@ -199,11 +184,11 @@ static void lcdif_unload(struct drm_device *drm)
drm->dev_private = NULL;
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver lcdif_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "imx-lcdif",
.desc = "i.MX LCDIF Controller DRM",
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.h b/drivers/gpu/drm/mxsfb/lcdif_drv.h
index cb916341e845..6cdba6e20c02 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.h
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.h
@@ -8,6 +8,7 @@
#ifndef __LCDIF_DRV_H__
#define __LCDIF_DRV_H__
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index 1bec1279c8b5..b1092aab1423 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -17,13 +17,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "lcdif_drv.h"
@@ -123,8 +122,8 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
writel(ctrl, lcdif->base + LCDC_V8_CTRL);
- writel(DISP_SIZE_DELTA_Y(m->crtc_vdisplay) |
- DISP_SIZE_DELTA_X(m->crtc_hdisplay),
+ writel(DISP_SIZE_DELTA_Y(m->vdisplay) |
+ DISP_SIZE_DELTA_X(m->hdisplay),
lcdif->base + LCDC_V8_DISP_SIZE);
writel(HSYN_PARA_BP_H(m->htotal - m->hsync_end) |
@@ -139,8 +138,8 @@ static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
VSYN_HSYN_WIDTH_PW_H(m->hsync_end - m->hsync_start),
lcdif->base + LCDC_V8_VSYN_HSYN_WIDTH);
- writel(CTRLDESCL0_1_HEIGHT(m->crtc_vdisplay) |
- CTRLDESCL0_1_WIDTH(m->crtc_hdisplay),
+ writel(CTRLDESCL0_1_HEIGHT(m->vdisplay) |
+ CTRLDESCL0_1_WIDTH(m->hdisplay),
lcdif->base + LCDC_V8_CTRLDESCL0_1);
writel(CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]),
@@ -204,7 +203,7 @@ static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n",
m->crtc_clock,
(int)(clk_get_rate(lcdif->clk) / 1000));
- DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n",
+ DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n",
bus_flags);
DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags);
@@ -297,7 +296,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
@@ -393,8 +392,8 @@ static int lcdif_plane_atomic_check(struct drm_plane *plane,
&lcdif->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -406,7 +405,7 @@ static void lcdif_plane_primary_atomic_update(struct drm_plane *plane,
plane);
dma_addr_t paddr;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
if (paddr) {
writel(lower_32_bits(paddr),
lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 55aad92e08ba..b29b332ed381 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -22,7 +22,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_module.h>
@@ -324,11 +324,11 @@ static void mxsfb_unload(struct drm_device *drm)
pm_runtime_disable(drm->dev);
}
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver mxsfb_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index e38ce5737a5f..3bcc9c0f2019 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -20,13 +20,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
@@ -353,7 +352,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_bridge_state *bridge_state = NULL;
struct drm_device *drm = mxsfb->drm;
u32 bus_format = 0;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
pm_runtime_get_sync(drm->dev);
mxsfb_enable_axi_clk(mxsfb);
@@ -389,10 +388,10 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
mxsfb_crtc_mode_set_nofb(mxsfb, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (paddr) {
- writel(paddr, mxsfb->base + mxsfb->devdata->cur_buf);
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (dma_addr) {
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->cur_buf);
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
}
mxsfb_enable_controller(mxsfb);
@@ -531,8 +530,8 @@ static int mxsfb_plane_atomic_check(struct drm_plane *plane,
&mxsfb->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -542,11 +541,11 @@ static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
plane);
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (paddr)
- writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (dma_addr)
+ writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
}
static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
@@ -557,11 +556,11 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
plane);
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u32 ctrl;
- paddr = drm_fb_cma_get_gem_addr(new_pstate->fb, new_pstate, 0);
- if (!paddr) {
+ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
+ if (!dma_addr) {
writel(0, mxsfb->base + LCDC_AS_CTRL);
return;
}
@@ -572,16 +571,16 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
* is understood, live with the 16 initial invalid pixels on the first
* line and start 64 bytes within the framebuffer.
*/
- paddr += 64;
+ dma_addr += 64;
- writel(paddr, mxsfb->base + LCDC_AS_NEXT_BUF);
+ writel(dma_addr, mxsfb->base + LCDC_AS_NEXT_BUF);
/*
* If the plane was previously disabled, write LCDC_AS_BUF as well to
* provide the first buffer.
*/
if (!old_pstate->fb)
- writel(paddr, mxsfb->base + LCDC_AS_BUF);
+ writel(dma_addr, mxsfb->base + LCDC_AS_BUF);
ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index f9e962fd94d0..ee92d576d277 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1275,31 +1275,9 @@ static const uint32_t modeset_formats[] = {
DRM_FORMAT_XRGB1555,
};
-static struct drm_plane *
-create_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- /* possible_crtc's will be filled in later by crtc_init */
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- modeset_formats,
- ARRAY_SIZE(modeset_formats), NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
+static const struct drm_plane_funcs nv04_primary_plane_funcs = {
+ DRM_PLANE_NON_ATOMIC_FUNCS,
+};
static int nv04_crtc_vblank_handler(struct nvif_notify *notify)
{
@@ -1315,6 +1293,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_crtc *nv_crtc;
+ struct drm_plane *primary;
int ret;
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
@@ -1329,8 +1308,18 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv_crtc->save = nv_crtc_save;
nv_crtc->restore = nv_crtc_restore;
- drm_crtc_init_with_planes(dev, &nv_crtc->base,
- create_primary_plane(dev), NULL,
+ primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0,
+ &nv04_primary_plane_funcs,
+ modeset_formats,
+ ARRAY_SIZE(modeset_formats), NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ kfree(nv_crtc);
+ return ret;
+ }
+
+ drm_crtc_init_with_planes(dev, &nv_crtc->base, primary, NULL,
&nv04_crtc_funcs, NULL);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 37e63e98cd08..33f29736024a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -296,9 +296,10 @@ nv10_overlay_init(struct drm_device *device)
break;
}
- ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */,
- &nv10_plane_funcs,
- formats, num_formats, false);
+ ret = drm_universal_plane_init(device, &plane->base, 3 /* both crtc's */,
+ &nv10_plane_funcs,
+ formats, num_formats, NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
@@ -475,9 +476,9 @@ nv04_overlay_init(struct drm_device *device)
if (!plane)
return;
- ret = drm_plane_init(device, &plane->base, 1 /* single crtc */,
- &nv04_plane_funcs,
- formats, 2, false);
+ ret = drm_universal_plane_init(device, &plane->base, 1 /* single crtc */,
+ &nv04_plane_funcs, formats, 2, NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index cad5a646983a..70c62b861276 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "nouveau_bo.h"
@@ -237,8 +236,8 @@ base507c_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index cd2c79e4b7af..78ee32da01c8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -29,7 +29,6 @@
#include <nvhw/class/cl507a.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
bool
curs507a_space(struct nv50_wndw *wndw)
@@ -103,8 +102,8 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
asyh->curs.visible = asyw->state.visible;
if (ret || !asyh->curs.visible)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a53d685a77eb..33c97d510999 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -39,7 +39,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -933,6 +932,7 @@ struct nv50_msto {
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
+ bool enabled;
};
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
@@ -948,57 +948,37 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
return msto->mstc->mstm->outp;
}
-static struct drm_dp_payload *
-nv50_msto_payload(struct nv50_msto *msto)
-{
- struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
- int vcpi = mstc->port->vcpi.vcpi, i;
-
- WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
-
- NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
- for (i = 0; i < mstm->mgr.max_payloads; i++) {
- struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
- NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
- mstm->outp->base.base.name, i, payload->vcpi,
- payload->start_slot, payload->num_slots);
- }
-
- for (i = 0; i < mstm->mgr.max_payloads; i++) {
- struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
- if (payload->vcpi == vcpi)
- return payload;
- }
-
- return NULL;
-}
-
static void
-nv50_msto_cleanup(struct nv50_msto *msto)
+nv50_msto_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
- struct nv50_mstc *mstc = msto->mstc;
- struct nv50_mstm *mstm = mstc->mstm;
-
- if (!msto->disabled)
- return;
+ struct drm_dp_mst_atomic_payload *payload =
+ drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
- drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
-
- msto->mstc = NULL;
- msto->disabled = false;
+ if (msto->disabled) {
+ msto->mstc = NULL;
+ msto->disabled = false;
+ } else if (msto->enabled) {
+ drm_dp_add_payload_part2(mgr, state, payload);
+ msto->enabled = false;
+ }
}
static void
-nv50_msto_prepare(struct nv50_msto *msto)
+nv50_msto_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
+ struct drm_dp_mst_atomic_payload *payload;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
@@ -1010,17 +990,21 @@ nv50_msto_prepare(struct nv50_msto *msto)
(0x0100 << msto->head->base.index),
};
- mutex_lock(&mstm->mgr.payload_lock);
-
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
- if (mstc->port->vcpi.vcpi > 0) {
- struct drm_dp_payload *payload = nv50_msto_payload(msto);
- if (payload) {
- args.vcpi.start_slot = payload->start_slot;
- args.vcpi.num_slots = payload->num_slots;
- args.vcpi.pbn = mstc->port->vcpi.pbn;
- args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
- }
+
+ payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
+
+ // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+ if (msto->disabled) {
+ drm_dp_remove_payload(mgr, mst_state, payload);
+ } else {
+ if (msto->enabled)
+ drm_dp_add_payload_part1(mgr, mst_state, payload);
+
+ args.vcpi.start_slot = payload->vc_start_slot;
+ args.vcpi.num_slots = payload->time_slots;
+ args.vcpi.pbn = payload->pbn;
+ args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div;
}
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
@@ -1029,7 +1013,6 @@ nv50_msto_prepare(struct nv50_msto *msto)
args.vcpi.pbn, args.vcpi.aligned_pbn);
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
- mutex_unlock(&mstm->mgr.payload_lock);
}
static int
@@ -1039,6 +1022,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_connector *connector = conn_state->connector;
+ struct drm_dp_mst_topology_state *mst_state;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
@@ -1050,7 +1034,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (ret)
return ret;
- if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
/*
@@ -1066,8 +1050,18 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
false);
}
- slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
- asyh->dp.pbn, 0);
+ mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ if (!mst_state->pbn_div) {
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
+ outp->dp.link_bw, outp->dp.link_nr);
+ }
+
+ slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
if (slots < 0)
return slots;
@@ -1099,7 +1093,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto;
- bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
@@ -1114,10 +1107,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
if (WARN_ON(!mstc))
return;
- r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu);
- if (!r)
- DRM_DEBUG_KMS("Failed to allocate VCPI\n");
-
if (!mstm->links++)
nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
@@ -1130,6 +1119,7 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
nv50_dp_bpc_to_depth(asyh->or.bpc));
msto->mstc = mstc;
+ msto->enabled = true;
mstm->modified = true;
}
@@ -1140,8 +1130,6 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
- drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
-
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
@@ -1256,29 +1244,8 @@ nv50_mstc_atomic_check(struct drm_connector *connector,
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
- struct drm_connector_state *new_conn_state =
- drm_atomic_get_new_connector_state(state, connector);
- struct drm_connector_state *old_conn_state =
- drm_atomic_get_old_connector_state(state, connector);
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *new_crtc = new_conn_state->crtc;
-
- if (!old_conn_state->crtc)
- return 0;
-
- /* We only want to free VCPI if this state disables the CRTC on this
- * connector
- */
- if (new_crtc) {
- crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
-
- if (!crtc_state ||
- !drm_atomic_crtc_needs_modeset(crtc_state) ||
- crtc_state->enable)
- return 0;
- }
- return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
+ return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
}
static int
@@ -1382,7 +1349,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
}
static void
-nv50_mstm_cleanup(struct nv50_mstm *mstm)
+nv50_mstm_cleanup(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
@@ -1390,14 +1359,12 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm)
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
drm_dp_check_act_status(&mstm->mgr);
- drm_dp_update_payload_part2(&mstm->mgr);
-
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
- nv50_msto_cleanup(msto);
+ nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
}
}
@@ -1405,20 +1372,34 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm)
}
static void
-nv50_mstm_prepare(struct nv50_mstm *mstm)
+nv50_mstm_prepare(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
- drm_dp_update_payload_part1(&mstm->mgr, 1);
+ /* Disable payloads first */
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
- if (mstc && mstc->mstm == mstm)
- nv50_msto_prepare(msto);
+ if (mstc && mstc->mstm == mstm && msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
+ }
+ }
+
+ /* Add payloads for new heads, while also updating the start slots of any unmodified (but
+ * active) heads that may have had their VC slots shifted left after the previous step
+ */
+ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+ struct nv50_msto *msto = nv50_msto(encoder);
+ struct nv50_mstc *mstc = msto->mstc;
+ if (mstc && mstc->mstm == mstm && !msto->disabled)
+ nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
}
}
@@ -1615,9 +1596,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
- max_payloads, outp->dcb->dpconf.link_nr,
- drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw),
- conn_base_id);
+ max_payloads, conn_base_id);
if (ret)
return ret;
@@ -1835,7 +1814,7 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
-static bool nv50_has_mst(struct nouveau_drm *drm)
+bool nv50_has_mst(struct nouveau_drm *drm)
{
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
u32 data;
@@ -2069,20 +2048,20 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
static void
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
- struct drm_encoder *encoder;
+ int i;
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
- drm_for_each_encoder(encoder, drm->dev) {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
- mstm = nouveau_encoder(encoder)->dp.mstm;
- if (mstm && mstm->modified)
- nv50_mstm_prepare(mstm);
- }
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_prepare(state, mst_state, mstm);
}
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
@@ -2091,12 +2070,10 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
disp->core->chan.base.device))
NV_ERROR(drm, "core notifier timeout\n");
- drm_for_each_encoder(encoder, drm->dev) {
- if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
- mstm = nouveau_encoder(encoder)->dp.mstm;
- if (mstm && mstm->modified)
- nv50_mstm_cleanup(mstm);
- }
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ mstm = nv50_mstm(mgr);
+ if (mstm->modified)
+ nv50_mstm_cleanup(state, mst_state, mstm);
}
}
@@ -2137,6 +2114,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_crc_atomic_stop_reporting(state);
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
@@ -2617,6 +2595,11 @@ nv50_disp_func = {
.atomic_state_free = nv50_disp_atomic_state_free,
};
+static const struct drm_mode_config_helper_funcs
+nv50_disp_helper_func = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
/******************************************************************************
* Init
*****************************************************************************/
@@ -2700,6 +2683,7 @@ nv50_display_create(struct drm_device *dev)
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
+ dev->mode_config.helper_private = &nv50_disp_helper_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
dev->mode_config.normalize_zpos = true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 38dec11e7dda..9d66c9c726c3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -106,6 +106,8 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
+bool nv50_has_mst(struct nouveau_drm *drm);
+
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
index d4af69e903ad..797c1e4e0eaa 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly507e.c
@@ -24,7 +24,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <nvif/if0014.h>
#include <nvif/push507c.h>
@@ -106,8 +105,8 @@ ovly507e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
int ret;
ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 082a66d59506..b3deea5aca58 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -23,7 +23,6 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <nouveau_bo.h>
#include <nvif/if0014.h>
@@ -297,8 +296,8 @@ wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
return drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 31167c398708..1d214a4b960a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -23,7 +23,6 @@
#include "atom.h"
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <nouveau_bo.h>
#include <nvif/pushc37b.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 6140db756d06..8cf096f841a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -386,3 +386,13 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
}
+
+bool nouveau_acpi_video_backlight_use_native(void)
+{
+ return acpi_video_backlight_use_native();
+}
+
+void nouveau_acpi_video_register_backlight(void)
+{
+ acpi_video_register_backlight();
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 330f9b837066..e39dd8b94b8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -11,6 +11,8 @@ void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
+bool nouveau_acpi_video_backlight_use_native(void);
+void nouveau_acpi_video_register_backlight(void);
#else
static inline bool nouveau_is_optimus(void) { return false; };
static inline bool nouveau_is_v1_dsm(void) { return false; };
@@ -18,6 +20,8 @@ static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
+static inline bool nouveau_acpi_video_backlight_use_native(void) { return true; }
+static inline void nouveau_acpi_video_register_backlight(void) {}
#endif
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index a2141d3d9b1d..a614582779ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -38,6 +38,7 @@
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
+#include "nouveau_acpi.h"
static struct ida bl_ida;
#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0'
@@ -405,6 +406,11 @@ nouveau_backlight_init(struct drm_connector *connector)
goto fail_alloc;
}
+ if (!nouveau_acpi_video_backlight_use_native()) {
+ NV_INFO(drm, "Skipping nv_backlight registration\n");
+ goto fail_alloc;
+ }
+
if (!nouveau_get_backlight_name(backlight_name, bl)) {
NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n");
goto fail_alloc;
@@ -430,6 +436,13 @@ nouveau_backlight_init(struct drm_connector *connector)
fail_alloc:
kfree(bl);
+ /*
+ * If we get here we have an internal panel, but no nv_backlight,
+ * try registering an ACPI video backlight device instead.
+ */
+ if (ret == 0)
+ nouveau_acpi_video_register_backlight();
+
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e29175e4b44c..126b3c6e12f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -281,8 +281,10 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
break;
}
- if (WARN_ON(pi < 0))
+ if (WARN_ON(pi < 0)) {
+ kfree(nvbo);
return ERR_PTR(-EINVAL);
+ }
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
@@ -307,9 +309,9 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
- ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
- &nvbo->placement, align >> PAGE_SHIFT, false, sg,
- robj, nouveau_bo_del_ttm);
+ ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
+ &nvbo->placement, align >> PAGE_SHIFT, false,
+ sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
@@ -1015,7 +1017,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
}
/* Fake bo copy. */
- if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
+ if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM &&
+ !bo->ttm)) {
ttm_bo_move_null(bo, new_reg);
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 43a9d1e1cf71..1991bbb1d05c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -504,7 +504,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed =
nv_encoder->caps.dp_interlace;
else
- connector->interlace_allowed = true;
+ connector->interlace_allowed =
+ drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1105,11 +1106,25 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
+static int
+nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state)
+{
+ struct nouveau_connector *nv_conn = nouveau_connector(connector);
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+
+ if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev)))
+ return 0;
+
+ return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr);
+}
+
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
.mode_valid = nouveau_connector_mode_valid,
.best_encoder = nouveau_connector_best_encoder,
+ .atomic_check = nouveau_connector_atomic_check,
};
static const struct drm_connector_funcs
@@ -1367,7 +1382,7 @@ nouveau_connector_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
drm_dp_aux_init(&nv_connector->aux);
- fallthrough;
+ break;
default:
funcs = &nouveau_connector_funcs;
break;
@@ -1430,6 +1445,8 @@ nouveau_connector_create(struct drm_device *dev,
switch (type) {
case DRM_MODE_CONNECTOR_DisplayPort:
+ nv_connector->dp_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
+ fallthrough;
case DRM_MODE_CONNECTOR_eDP:
drm_dp_cec_register_connector(&nv_connector->aux, connector);
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 4bf0c703eee7..f4e17ff68bf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -128,6 +128,9 @@ struct nouveau_connector {
struct drm_dp_aux aux;
+ /* The fixed DP encoder for this connector, if there is one */
+ struct nouveau_encoder *dp_encoder;
+
int dithering_mode;
int scaling_mode;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 561309d447e0..fd99ec0f4257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
@@ -70,6 +71,18 @@
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 1c3104d20571..a7db7c31064b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -211,75 +211,24 @@ static const struct attribute_group temp1_auto_point_sensor_group = {
#define N_ATTR_GROUPS 3
-static const u32 nouveau_config_chip[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const u32 nouveau_config_in[] = {
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_LABEL,
- 0
-};
-
-static const u32 nouveau_config_temp[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_EMERGENCY |
- HWMON_T_EMERGENCY_HYST,
- 0
-};
-
-static const u32 nouveau_config_fan[] = {
- HWMON_F_INPUT,
- 0
-};
-
-static const u32 nouveau_config_pwm[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const u32 nouveau_config_power[] = {
- HWMON_P_INPUT | HWMON_P_CAP_MAX | HWMON_P_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nouveau_chip = {
- .type = hwmon_chip,
- .config = nouveau_config_chip,
-};
-
-static const struct hwmon_channel_info nouveau_temp = {
- .type = hwmon_temp,
- .config = nouveau_config_temp,
-};
-
-static const struct hwmon_channel_info nouveau_fan = {
- .type = hwmon_fan,
- .config = nouveau_config_fan,
-};
-
-static const struct hwmon_channel_info nouveau_in = {
- .type = hwmon_in,
- .config = nouveau_config_in,
-};
-
-static const struct hwmon_channel_info nouveau_pwm = {
- .type = hwmon_pwm,
- .config = nouveau_config_pwm,
-};
-
-static const struct hwmon_channel_info nouveau_power = {
- .type = hwmon_power,
- .config = nouveau_config_power,
-};
-
static const struct hwmon_channel_info *nouveau_info[] = {
- &nouveau_chip,
- &nouveau_temp,
- &nouveau_fan,
- &nouveau_in,
- &nouveau_pwm,
- &nouveau_power,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_EMERGENCY | HWMON_T_EMERGENCY_HYST),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT |
+ HWMON_I_MIN | HWMON_I_MAX |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_CAP_MAX | HWMON_P_CRIT),
NULL
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2e517cdc24c9..76f8edefa637 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -187,3 +187,32 @@ nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
*res = &mem->base;
return 0;
}
+
+bool
+nouveau_mem_intersects(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ u32 num_pages = PFN_UP(size);
+
+ /* Don't evict BOs outside of the requested placement range */
+ if (place->fpfn >= (res->start + num_pages) ||
+ (place->lpfn && place->lpfn <= res->start))
+ return false;
+
+ return true;
+}
+
+bool
+nouveau_mem_compatible(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ u32 num_pages = PFN_UP(size);
+
+ if (res->start < place->fpfn ||
+ (place->lpfn && (res->start + num_pages) > place->lpfn))
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 325551eba5cd..1ee6cdb9ad9b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -25,6 +25,12 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource **);
void nouveau_mem_del(struct ttm_resource_manager *man,
struct ttm_resource *);
+bool nouveau_mem_intersects(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool nouveau_mem_compatible(struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 347488685f74..9608121e49b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -71,7 +71,6 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
sg, robj);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 85f1f5a0fe5d..9602c30928f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -42,6 +42,24 @@ nouveau_manager_del(struct ttm_resource_manager *man,
nouveau_mem_del(man, reg);
}
+static bool
+nouveau_manager_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return nouveau_mem_intersects(res, place, size);
+}
+
+static bool
+nouveau_manager_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return nouveau_mem_compatible(res, place, size);
+}
+
static int
nouveau_vram_manager_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
@@ -73,6 +91,8 @@ nouveau_vram_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nouveau_vram_manager = {
.alloc = nouveau_vram_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
@@ -97,6 +117,8 @@ nouveau_gart_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nouveau_gart_manager = {
.alloc = nouveau_gart_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
@@ -130,6 +152,8 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
const struct ttm_resource_manager_func nv04_gart_manager = {
.alloc = nv04_gart_manager_new,
.free = nouveau_manager_del,
+ .intersects = nouveau_manager_intersects,
+ .compatible = nouveau_manager_compatible,
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index b4a308f3cf7b..49e2664a734c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -64,12 +64,9 @@ nvbios_pmuEp(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr,
struct nvbios_pmuE *info)
{
u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
- memset(info, 0x00, sizeof(*info));
- switch (!!data * *ver) {
- default:
+ if (data) {
info->type = nvbios_rd08(bios, data + 0x00);
info->data = nvbios_rd32(bios, data + 0x02);
- break;
}
return data;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index a139dafffe06..7c33542f651b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -581,7 +581,7 @@ gm20b_clk_prog(struct nvkm_clk *base)
/*
* Interim step for changing DVFS detection settings: low enough
- * frequency to be safe at at DVFS coeff = 0.
+ * frequency to be safe at DVFS coeff = 0.
*
* 1. If voltage is increasing:
* - safe frequency target matches the lowest - old - frequency
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index c4de142cc85b..0ee344ebcd1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -2451,7 +2451,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
*decim_x = DIV_ROUND_UP(width, in_width_max);
- *decim_x = *decim_x > decim_x_min ? *decim_x : decim_x_min;
+ *decim_x = max(*decim_x, decim_x_min);
if (*decim_x > *x_predecim)
return -EINVAL;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 0399f3390a0a..c4febb861910 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1176,6 +1176,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
default:
break;
}
+ of_node_put(port);
}
}
@@ -1208,11 +1209,13 @@ static int dss_init_ports(struct dss_device *dss)
default:
break;
}
+ of_node_put(port);
}
return 0;
error:
+ of_node_put(port);
__dss_uninit_ports(dss, i);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 06a719c104f4..63ddc5127f7b 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index ac869acf80ea..61a27dd7392e 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -813,10 +813,8 @@ static int omap_dmm_probe(struct platform_device *dev)
}
omap_dmm->irq = platform_get_irq(dev, 0);
- if (omap_dmm->irq < 0) {
- dev_err(&dev->dev, "failed to get IRQ resource\n");
+ if (omap_dmm->irq < 0)
goto fail;
- }
omap_dmm->dev = &dev->dev;
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
index b0bc9ad2ef73..fb97c74386f2 100644
--- a/drivers/gpu/drm/omapdrm/omap_overlay.c
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index b6cb537f7689..24a2ded08b45 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a9043eacce97..a582ddd583c2 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -165,8 +165,8 @@ config DRM_PANEL_ILITEK_IL9322
config DRM_PANEL_ILITEK_ILI9341
tristate "Ilitek ILI9341 240x320 QVGA panels"
depends on OF && SPI
- depends on DRM_KMS_HELPER
- depends on DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_GEM_DMA_HELPER
depends on BACKLIGHT_CLASS_DEVICE
select DRM_MIPI_DBI
help
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index 174ff434bd71..b3235781e6ba 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -321,7 +321,7 @@ static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+static void tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
{
struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -332,8 +332,6 @@ static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
"Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id tm5p5_nt35596_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index ef00cd67dc40..ad58840eda41 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -410,7 +410,7 @@ static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
+static void boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
{
struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -420,8 +420,6 @@ static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&boe->panel);
-
- return 0;
}
static const struct of_device_id boe_bf060y8m_aj0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 42854bd37fd5..d879b3b14c48 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -919,7 +919,7 @@ static int panel_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int panel_remove(struct mipi_dsi_device *dsi)
+static void panel_remove(struct mipi_dsi_device *dsi)
{
struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi);
int err;
@@ -937,8 +937,6 @@ static int panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
-
- return 0;
}
static void panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 07f722f33fc5..857a2f0420d7 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1622,7 +1622,7 @@ static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
drm_panel_unprepare(&boe->base);
}
-static int boe_panel_remove(struct mipi_dsi_device *dsi)
+static void boe_panel_remove(struct mipi_dsi_device *dsi)
{
struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -1635,8 +1635,6 @@ static int boe_panel_remove(struct mipi_dsi_device *dsi)
if (boe->base.dev)
drm_panel_remove(&boe->base);
-
- return 0;
}
static const struct of_device_id boe_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c
index b0213a518f9d..ba17bcc4461c 100644
--- a/drivers/gpu/drm/panel/panel-dsi-cm.c
+++ b/drivers/gpu/drm/panel/panel-dsi-cm.c
@@ -579,7 +579,7 @@ err_bl:
return r;
}
-static int dsicm_remove(struct mipi_dsi_device *dsi)
+static void dsicm_remove(struct mipi_dsi_device *dsi)
{
struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi);
@@ -593,8 +593,6 @@ static int dsicm_remove(struct mipi_dsi_device *dsi)
if (ddata->extbldev)
put_device(&ddata->extbldev->dev);
-
- return 0;
}
static const struct dsic_panel_data taal_data = {
diff --git a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
index 386f8321b930..e85d63a176d0 100644
--- a/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
+++ b/drivers/gpu/drm/panel/panel-ebbg-ft8719.c
@@ -250,7 +250,7 @@ static int ebbg_ft8719_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
+static void ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
{
struct ebbg_ft8719 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -260,8 +260,6 @@ static int ebbg_ft8719_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ebbg_ft8719_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index cdb154c8b866..4b39d1dd9140 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -53,7 +53,7 @@ struct panel_delay {
* before the HPD signal is reliable. Ideally this is 0 but some panels,
* board designs, or bad pulldown configs can cause a glitch here.
*
- * NOTE: on some old panel data this number appers to be much too big.
+ * NOTE: on some old panel data this number appears to be much too big.
* Presumably some old panels simply didn't have HPD hooked up and put
* the hpd_absent here because this field predates the
* hpd_absent. While that works, it's non-ideal.
@@ -403,17 +403,10 @@ static int panel_edp_unprepare(struct drm_panel *panel)
static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
{
- int err;
-
p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(p->hpd_gpio))
+ return dev_err_probe(dev, PTR_ERR(p->hpd_gpio),
+ "failed to get 'hpd' GPIO\n");
return 0;
}
@@ -832,12 +825,9 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -1295,7 +1285,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
@@ -1854,6 +1845,12 @@ static const struct panel_delay delay_100_500_e200 = {
.enable = 200,
};
+static const struct panel_delay delay_200_500_e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.name = _name, \
@@ -1870,6 +1867,9 @@ static const struct panel_delay delay_100_500_e200 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
@@ -1877,10 +1877,19 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 01dd555a7f26..eee714cf3f49 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -321,7 +321,7 @@ static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int kd35t133_remove(struct mipi_dsi_device *dsi)
+static void kd35t133_remove(struct mipi_dsi_device *dsi)
{
struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -333,8 +333,6 @@ static int kd35t133_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id kd35t133_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index cb0bb3076099..76572c922983 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -486,14 +486,12 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+static void k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
{
struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id k101_im2ba02_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index ee61d60eceae..df493da50afe 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -233,14 +233,12 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
+static void feiyang_dsi_remove(struct mipi_dsi_device *dsi)
{
struct feiyang *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id feiyang_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 6826f4d4826a..39dc40cf681f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -32,7 +32,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -576,6 +576,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = ili9341_dbi_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -586,12 +587,12 @@ static const struct drm_display_mode ili9341_dbi_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops);
static struct drm_driver ili9341_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 596861269774..cbb68caa36f2 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -923,14 +923,12 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+static void ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
{
struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct ili9881c_desc lhr050h41_desc = {
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index f194b62e290c..9992d0d4c0e5 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -506,7 +506,7 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int innolux_panel_remove(struct mipi_dsi_device *dsi)
+static void innolux_panel_remove(struct mipi_dsi_device *dsi)
{
struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi);
int err;
@@ -524,8 +524,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
innolux_panel_del(innolux);
-
- return 0;
}
static void innolux_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 31eafbc38ec0..d8765b2294fb 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -288,7 +288,7 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
+static void jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
{
struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -298,8 +298,6 @@ static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id jdi_fhd_r63452_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 3c86ad262d5e..8f4f137a2af6 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -482,7 +482,7 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int jdi_panel_remove(struct mipi_dsi_device *dsi)
+static void jdi_panel_remove(struct mipi_dsi_device *dsi)
{
struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -497,8 +497,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
ret);
jdi_panel_del(jdi);
-
- return 0;
}
static void jdi_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c
index a3ec4cbdbf7a..1ab1ebe30882 100644
--- a/drivers/gpu/drm/panel/panel-khadas-ts050.c
+++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c
@@ -830,7 +830,7 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
+static void khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
{
struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi);
int err;
@@ -842,8 +842,6 @@ static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&khadas_ts050->base);
drm_panel_disable(&khadas_ts050->base);
drm_panel_unprepare(&khadas_ts050->base);
-
- return 0;
}
static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index daccb1fd5fda..17f8d80cf2b3 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -415,7 +415,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
+static void kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
{
struct kingdisplay_panel *kingdisplay = mipi_dsi_get_drvdata(dsi);
int err;
@@ -433,8 +433,6 @@ static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
kingdisplay_panel_del(kingdisplay);
-
- return 0;
}
static void kingdisplay_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index a5a414920430..5619f186d28c 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -628,7 +628,7 @@ static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+static void ltk050h3146w_remove(struct mipi_dsi_device *dsi)
{
struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -640,8 +640,6 @@ static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ltk050h3146w_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 21e48923836d..39e408c9f762 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -477,7 +477,7 @@ static void ltk500hd1829_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
+static void ltk500hd1829_remove(struct mipi_dsi_device *dsi)
{
struct ltk500hd1829 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -489,8 +489,6 @@ static int ltk500hd1829_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id ltk500hd1829_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 31daae1da9c9..772e3b6acece 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -336,7 +336,7 @@ static void mantix_shutdown(struct mipi_dsi_device *dsi)
drm_panel_disable(&ctx->panel);
}
-static int mantix_remove(struct mipi_dsi_device *dsi)
+static void mantix_remove(struct mipi_dsi_device *dsi)
{
struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
@@ -344,8 +344,6 @@ static int mantix_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id mantix_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 40ea41b0a5dd..493c3c23f0d6 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -231,7 +231,7 @@ struct nt35510_config {
* bits 0..2 in the lower nibble controls HCK, the booster clock
* frequency, the values are the same as for PCK in @bt1ctr.
* bits 4..5 in the upper nibble controls BTH, the boosting
- * amplification for the the step-up circuit.
+ * amplification for the step-up circuit.
* 0 = AVDD + VDDB
* 1 = AVDD - AVEE
* 2 = AVDD - AVEE + VDDB
@@ -966,7 +966,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35510_remove(struct mipi_dsi_device *dsi)
+static void nt35510_remove(struct mipi_dsi_device *dsi)
{
struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -974,9 +974,10 @@ static int nt35510_remove(struct mipi_dsi_device *dsi)
mipi_dsi_detach(dsi);
/* Power off */
ret = nt35510_power_off(nt);
- drm_panel_remove(&nt->panel);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to power off\n");
- return ret;
+ drm_panel_remove(&nt->panel);
}
/*
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 1b6042321ea1..cc7f96d70826 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -523,14 +523,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35560_remove(struct mipi_dsi_device *dsi)
+static void nt35560_remove(struct mipi_dsi_device *dsi)
{
struct nt35560 *nt = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&nt->panel);
-
- return 0;
}
static const struct of_device_id nt35560_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 288c7fa83ecc..3a844917da07 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -620,7 +620,7 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt35950_remove(struct mipi_dsi_device *dsi)
+static void nt35950_remove(struct mipi_dsi_device *dsi)
{
struct nt35950 *nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -639,8 +639,6 @@ static int nt35950_remove(struct mipi_dsi_device *dsi)
}
drm_panel_remove(&nt->panel);
-
- return 0;
}
static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
index 6d6ce42787e2..73bcffa1e0c1 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -669,7 +669,7 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
+static void nt36672a_panel_remove(struct mipi_dsi_device *dsi)
{
struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
int err;
@@ -687,8 +687,6 @@ static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
drm_panel_remove(&pinfo->base);
-
- return 0;
}
static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index cb5cb27462df..36a46cb7fe1c 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,7 +288,7 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
return 0;
}
-static int lcd_olinuxino_remove(struct i2c_client *client)
+static void lcd_olinuxino_remove(struct i2c_client *client)
{
struct lcd_olinuxino *panel = i2c_get_clientdata(client);
@@ -296,8 +296,6 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_disable(&panel->panel);
drm_panel_unprepare(&panel->panel);
-
- return 0;
}
static const struct of_device_id lcd_olinuxino_of_ids[] = {
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index dfb43b1374e7..b4729a94c34a 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -497,14 +497,12 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int otm8009a_remove(struct mipi_dsi_device *dsi)
+static void otm8009a_remove(struct mipi_dsi_device *dsi)
{
struct otm8009a *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id orisetech_otm8009a_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 198493a6eb6a..493e0504f6f7 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -206,7 +206,7 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
+static void osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
{
struct osd101t2587_panel *osd101t2587 = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -221,8 +221,6 @@ static int osd101t2587_panel_remove(struct mipi_dsi_device *dsi)
ret = mipi_dsi_detach(dsi);
if (ret < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
-
- return ret;
}
static void osd101t2587_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 3991f5d950af..8ba6d8287938 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -250,7 +250,7 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
+static void wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct wuxga_nt_panel *wuxga_nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -264,8 +264,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
wuxga_nt_panel_del(wuxga_nt);
-
- return 0;
}
static void wuxga_nt_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index a6dc5ab182fa..79f852465a84 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -446,7 +446,7 @@ error:
return -ENODEV;
}
-static int rpi_touchscreen_remove(struct i2c_client *i2c)
+static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
@@ -455,8 +455,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
-
- return 0;
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 4e021a572211..dbb1ed4efbed 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -616,7 +616,7 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int rad_panel_remove(struct mipi_dsi_device *dsi)
+static void rad_panel_remove(struct mipi_dsi_device *dsi)
{
struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
struct device *dev = &dsi->dev;
@@ -627,8 +627,6 @@ static int rad_panel_remove(struct mipi_dsi_device *dsi)
dev_err(dev, "Failed to detach from host (%d)\n", ret);
drm_panel_remove(&rad->panel);
-
- return 0;
}
static void rad_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 412c0dbcb2b6..5f9b340588fb 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -412,14 +412,12 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int rm68200_remove(struct mipi_dsi_device *dsi)
+static void rm68200_remove(struct mipi_dsi_device *dsi)
{
struct rm68200 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id raydium_rm68200_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 1fb579a574d9..a8a98c91b13c 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -208,14 +208,12 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
+static void rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
{
struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id rb070d30_panel_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 70560cac53a9..008e2b0d6652 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -212,14 +212,12 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6d16d0_remove(struct mipi_dsi_device *dsi)
+static void s6d16d0_remove(struct mipi_dsi_device *dsi)
{
struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&s6->panel);
-
- return 0;
}
static const struct of_device_id s6d16d0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 0ab1b7ec84cd..5c621b15e84c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -747,15 +747,13 @@ remove_panel:
return ret;
}
-static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
+static void s6e3ha2_remove(struct mipi_dsi_device *dsi)
{
struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
-
- return 0;
}
static const struct of_device_id s6e3ha2_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index e38262b67ff7..e06fd35de814 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -488,7 +488,7 @@ remove_panel:
return ret;
}
-static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
+static void s6e63j0x03_remove(struct mipi_dsi_device *dsi)
{
struct s6e63j0x03 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -496,8 +496,6 @@ static int s6e63j0x03_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
backlight_device_unregister(ctx->bl_dev);
-
- return 0;
}
static const struct of_device_id s6e63j0x03_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
index e0f773678168..ed3895e4ca5e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
@@ -113,11 +113,10 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
+static void s6e63m0_dsi_remove(struct mipi_dsi_device *dsi)
{
mipi_dsi_detach(dsi);
s6e63m0_remove(&dsi->dev);
- return 0;
}
static const struct of_device_id s6e63m0_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 29fde3823212..97ff7a18545c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -254,7 +254,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+static void s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
{
struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -264,8 +264,6 @@ static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 9b3599d6d2de..54213beafaf5 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -1028,14 +1028,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
+static void s6e8aa0_remove(struct mipi_dsi_device *dsi)
{
struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id s6e8aa0_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 1fb37fda4ba9..1a0d24595faa 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -305,7 +305,7 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sofef00_panel_remove(struct mipi_dsi_device *dsi)
+static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
{
struct sofef00_panel *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -315,8 +315,6 @@ static int sofef00_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id sofef00_panel_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index f8cd2a42ed13..14851408a5e1 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -391,7 +391,7 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_panel_remove(struct mipi_dsi_device *dsi)
+static void sharp_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
int err;
@@ -399,7 +399,7 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
/* only detach from host for the DSI-LINK2 interface */
if (!sharp) {
mipi_dsi_detach(dsi);
- return 0;
+ return;
}
err = drm_panel_disable(&sharp->base);
@@ -411,8 +411,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
sharp_panel_del(sharp);
-
- return 0;
}
static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index 25829a0a8e80..d1ec80a3e3c7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -305,7 +305,7 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
+static void sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
{
struct sharp_nt_panel *sharp_nt = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -319,8 +319,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
sharp_nt_panel_del(sharp_nt);
-
- return 0;
}
static void sharp_nt_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index e12570561629..8a4e0c1fe73f 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -298,7 +298,7 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int sharp_ls060_remove(struct mipi_dsi_device *dsi)
+static void sharp_ls060_remove(struct mipi_dsi_device *dsi)
{
struct sharp_ls060 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -308,8 +308,6 @@ static int sharp_ls060_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id sharp_ls060t1sx01_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ff5e1a44c43a..2944228a8e2c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -575,12 +575,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR(panel->enable_gpio)) {
- err = PTR_ERR(panel->enable_gpio);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ if (IS_ERR(panel->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
+ "failed to request GPIO\n");
err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
if (err) {
@@ -696,7 +693,7 @@ free_ddc:
return err;
}
-static int panel_simple_remove(struct device *dev)
+static void panel_simple_remove(struct device *dev)
{
struct panel_simple *panel = dev_get_drvdata(dev);
@@ -708,8 +705,6 @@ static int panel_simple_remove(struct device *dev)
pm_runtime_disable(dev);
if (panel->ddc)
put_device(&panel->ddc->dev);
-
- return 0;
}
static void panel_simple_shutdown(struct device *dev)
@@ -2257,7 +2252,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -2703,6 +2698,36 @@ static const struct panel_desc multi_inno_mi0700s4t_6 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing multi_inno_mi0800ft_9_timing = {
+ .pixelclock = { 32000000, 40000000, 50000000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 6, 26, 45 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 600, 600, 600 },
+ .vfront_porch = { 1, 12, 77 },
+ .vback_porch = { 3, 13, 22 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc multi_inno_mi0800ft_9 = {
+ .timings = &multi_inno_mi0800ft_9_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 162,
+ .height = 122,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
.pixelclock = { 68900000, 70000000, 73400000 },
.hactive = { 1280, 1280, 1280 },
@@ -3220,6 +3245,37 @@ static const struct panel_desc rocktech_rk101ii01d_ct = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing samsung_ltl101al01_timing = {
+ .pixelclock = { 66663000, 66663000, 66663000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 18, 18 },
+ .hback_porch = { 36, 36, 36 },
+ .hsync_len = { 16, 16, 16 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 16, 16, 16 },
+ .vsync_len = { 3, 3, 3 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc samsung_ltl101al01 = {
+ .timings = &samsung_ltl101al01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 135,
+ },
+ .delay = {
+ .prepare = 40,
+ .enable = 300,
+ .disable = 200,
+ .unprepare = 600,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -4104,6 +4160,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
}, {
+ .compatible = "multi-inno,mi0800ft-9",
+ .data = &multi_inno_mi0800ft_9,
+ }, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
@@ -4164,6 +4223,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
+ .compatible = "samsung,ltl101al01",
+ .data = &samsung_ltl101al01,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
@@ -4273,7 +4335,9 @@ static int panel_simple_platform_probe(struct platform_device *pdev)
static int panel_simple_platform_remove(struct platform_device *pdev)
{
- return panel_simple_remove(&pdev->dev);
+ panel_simple_remove(&pdev->dev);
+
+ return 0;
}
static void panel_simple_platform_shutdown(struct platform_device *pdev)
@@ -4566,7 +4630,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
return err;
}
-static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+static void panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
{
int err;
@@ -4574,7 +4638,7 @@ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
if (err < 0)
dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
- return panel_simple_remove(&dsi->dev);
+ panel_simple_remove(&dsi->dev);
}
static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 320a2a8fd459..c481daa4bbce 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -8,6 +8,7 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -41,59 +42,101 @@
/*
* Command2 with BK function selection.
*
- * BIT[4, 0]: [CN2, BKXSEL]
- * 10 = CMD2BK0, Command2 BK0
- * 11 = CMD2BK1, Command2 BK1
- * 00 = Command2 disable
+ * BIT[4].....CN2
+ * BIT[1:0]...BKXSEL
+ * 1:00 = CMD2BK0, Command2 BK0
+ * 1:01 = CMD2BK1, Command2 BK1
+ * 1:11 = CMD2BK3, Command2 BK3
+ * 0:00 = Command2 disable
*/
-#define DSI_CMD2BK1_SEL 0x11
#define DSI_CMD2BK0_SEL 0x10
+#define DSI_CMD2BK1_SEL 0x11
+#define DSI_CMD2BK3_SEL 0x13
#define DSI_CMD2BKX_SEL_NONE 0x00
/* Command2, BK0 bytes */
-#define DSI_LINESET_LINE 0x69
-#define DSI_LINESET_LDE_EN BIT(7)
-#define DSI_LINESET_LINEDELTA GENMASK(1, 0)
-#define DSI_CMD2_BK0_LNESET_B1 DSI_LINESET_LINEDELTA
-#define DSI_CMD2_BK0_LNESET_B0 (DSI_LINESET_LDE_EN | DSI_LINESET_LINE)
-#define DSI_INVSEL_DEFAULT GENMASK(5, 4)
-#define DSI_INVSEL_NLINV GENMASK(2, 0)
-#define DSI_INVSEL_RTNI GENMASK(2, 1)
-#define DSI_CMD2_BK0_INVSEL_B1 DSI_INVSEL_RTNI
-#define DSI_CMD2_BK0_INVSEL_B0 (DSI_INVSEL_DEFAULT | DSI_INVSEL_NLINV)
-#define DSI_CMD2_BK0_PORCTRL_B0(m) ((m)->vtotal - (m)->vsync_end)
-#define DSI_CMD2_BK0_PORCTRL_B1(m) ((m)->vsync_start - (m)->vdisplay)
+#define DSI_CMD2_BK0_GAMCTRL_AJ_MASK GENMASK(7, 6)
+#define DSI_CMD2_BK0_GAMCTRL_VC0_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC4_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC8_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC16_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC24_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC52_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC80_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC108_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC147_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC175_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC203_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC231_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC239_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC247_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC251_MASK GENMASK(5, 0)
+#define DSI_CMD2_BK0_GAMCTRL_VC255_MASK GENMASK(4, 0)
+#define DSI_CMD2_BK0_LNESET_LINE_MASK GENMASK(6, 0)
+#define DSI_CMD2_BK0_LNESET_LDE_EN BIT(7)
+#define DSI_CMD2_BK0_LNESET_LINEDELTA GENMASK(1, 0)
+#define DSI_CMD2_BK0_PORCTRL_VBP_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK0_PORCTRL_VFP_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK0_INVSEL_ONES_MASK GENMASK(5, 4)
+#define DSI_CMD2_BK0_INVSEL_NLINV_MASK GENMASK(2, 0)
+#define DSI_CMD2_BK0_INVSEL_RTNI_MASK GENMASK(4, 0)
/* Command2, BK1 bytes */
-#define DSI_CMD2_BK1_VRHA_SET 0x45
-#define DSI_CMD2_BK1_VCOM_SET 0x13
-#define DSI_CMD2_BK1_VGHSS_SET GENMASK(2, 0)
+#define DSI_CMD2_BK1_VRHA_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK1_VCOM_MASK GENMASK(7, 0)
+#define DSI_CMD2_BK1_VGHSS_MASK GENMASK(3, 0)
#define DSI_CMD2_BK1_TESTCMD_VAL BIT(7)
-#define DSI_VGLS_DEFAULT BIT(6)
-#define DSI_VGLS_SEL GENMASK(2, 0)
-#define DSI_CMD2_BK1_VGLS_SET (DSI_VGLS_DEFAULT | DSI_VGLS_SEL)
-#define DSI_PWCTLR1_AP BIT(7) /* Gamma OP bias, max */
-#define DSI_PWCTLR1_APIS BIT(2) /* Source OP input bias, min */
-#define DSI_PWCTLR1_APOS BIT(0) /* Source OP output bias, min */
-#define DSI_CMD2_BK1_PWCTLR1_SET (DSI_PWCTLR1_AP | DSI_PWCTLR1_APIS | \
- DSI_PWCTLR1_APOS)
-#define DSI_PWCTLR2_AVDD BIT(5) /* AVDD 6.6v */
-#define DSI_PWCTLR2_AVCL 0x0 /* AVCL -4.4v */
-#define DSI_CMD2_BK1_PWCTLR2_SET (DSI_PWCTLR2_AVDD | DSI_PWCTLR2_AVCL)
-#define DSI_SPD1_T2D BIT(3)
-#define DSI_CMD2_BK1_SPD1_SET (GENMASK(6, 4) | DSI_SPD1_T2D)
-#define DSI_CMD2_BK1_SPD2_SET DSI_CMD2_BK1_SPD1_SET
-#define DSI_MIPISET1_EOT_EN BIT(3)
-#define DSI_CMD2_BK1_MIPISET1_SET (BIT(7) | DSI_MIPISET1_EOT_EN)
+#define DSI_CMD2_BK1_VGLS_ONES BIT(6)
+#define DSI_CMD2_BK1_VGLS_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_PWRCTRL1_AP_MASK GENMASK(7, 6)
+#define DSI_CMD2_BK1_PWRCTRL1_APIS_MASK GENMASK(3, 2)
+#define DSI_CMD2_BK1_PWRCTRL1_APOS_MASK GENMASK(1, 0)
+#define DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK GENMASK(5, 4)
+#define DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK GENMASK(1, 0)
+#define DSI_CMD2_BK1_SPD1_ONES_MASK GENMASK(6, 4)
+#define DSI_CMD2_BK1_SPD1_T2D_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_SPD2_ONES_MASK GENMASK(6, 4)
+#define DSI_CMD2_BK1_SPD2_T3D_MASK GENMASK(3, 0)
+#define DSI_CMD2_BK1_MIPISET1_ONES BIT(7)
+#define DSI_CMD2_BK1_MIPISET1_EOT_EN BIT(3)
+
+#define CFIELD_PREP(_mask, _val) \
+ (((typeof(_mask))(_val) << (__builtin_ffsll(_mask) - 1)) & (_mask))
+
+enum op_bias {
+ OP_BIAS_OFF = 0,
+ OP_BIAS_MIN,
+ OP_BIAS_MIDDLE,
+ OP_BIAS_MAX
+};
+
+struct st7701;
struct st7701_panel_desc {
const struct drm_display_mode *mode;
unsigned int lanes;
- unsigned long flags;
enum mipi_dsi_pixel_format format;
- const char *const *supply_names;
- unsigned int num_supplies;
unsigned int panel_sleep_delay;
+
+ /* TFT matrix driver configuration, panel specific. */
+ const u8 pv_gamma[16]; /* Positive voltage gamma control */
+ const u8 nv_gamma[16]; /* Negative voltage gamma control */
+ const u8 nlinv; /* Inversion selection */
+ const u32 vop_uv; /* Vop in uV */
+ const u32 vcom_uv; /* Vcom in uV */
+ const u16 vgh_mv; /* Vgh in mV */
+ const s16 vgl_mv; /* Vgl in mV */
+ const u16 avdd_mv; /* Avdd in mV */
+ const s16 avcl_mv; /* Avcl in mV */
+ const enum op_bias gamma_op_bias;
+ const enum op_bias input_op_bias;
+ const enum op_bias output_op_bias;
+ const u16 t2d_ns; /* T2D in ns */
+ const u16 t3d_ns; /* T3D in ns */
+ const bool eot_en;
+
+ /* GIP sequence, fully custom and undocumented. */
+ void (*gip_sequence)(struct st7701 *st7701);
};
struct st7701 {
@@ -101,7 +144,7 @@ struct st7701 {
struct mipi_dsi_device *dsi;
const struct st7701_panel_desc *desc;
- struct regulator_bulk_data *supplies;
+ struct regulator_bulk_data supplies[2];
struct gpio_desc *reset;
unsigned int sleep_delay;
};
@@ -123,9 +166,37 @@ static inline int st7701_dsi_write(struct st7701 *st7701, const void *seq,
st7701_dsi_write(st7701, d, ARRAY_SIZE(d)); \
}
+static u8 st7701_vgls_map(struct st7701 *st7701)
+{
+ const struct st7701_panel_desc *desc = st7701->desc;
+ struct {
+ s32 vgl;
+ u8 val;
+ } map[16] = {
+ { -7060, 0x0 }, { -7470, 0x1 },
+ { -7910, 0x2 }, { -8140, 0x3 },
+ { -8650, 0x4 }, { -8920, 0x5 },
+ { -9210, 0x6 }, { -9510, 0x7 },
+ { -9830, 0x8 }, { -10170, 0x9 },
+ { -10530, 0xa }, { -10910, 0xb },
+ { -11310, 0xc }, { -11730, 0xd },
+ { -12200, 0xe }, { -12690, 0xf }
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (desc->vgl_mv == map[i].vgl)
+ return map[i].val;
+
+ return 0;
+}
+
static void st7701_init_sequence(struct st7701 *st7701)
{
- const struct drm_display_mode *mode = st7701->desc->mode;
+ const struct st7701_panel_desc *desc = st7701->desc;
+ const struct drm_display_mode *mode = desc->mode;
+ const u8 linecount8 = mode->vdisplay / 8;
+ const u8 linecountrem2 = (mode->vdisplay % 8) / 2;
ST7701_DSI(st7701, MIPI_DCS_SOFT_RESET, 0x00);
@@ -139,34 +210,105 @@ static void st7701_init_sequence(struct st7701 *st7701)
/* Command2, BK0 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK0_SEL);
- ST7701_DSI(st7701, DSI_CMD2_BK0_PVGAMCTRL, 0x00, 0x0E, 0x15, 0x0F,
- 0x11, 0x08, 0x08, 0x08, 0x08, 0x23, 0x04, 0x13, 0x12,
- 0x2B, 0x34, 0x1F);
- ST7701_DSI(st7701, DSI_CMD2_BK0_NVGAMCTRL, 0x00, 0x0E, 0x95, 0x0F,
- 0x13, 0x07, 0x09, 0x08, 0x08, 0x22, 0x04, 0x10, 0x0E,
- 0x2C, 0x34, 0x1F);
+ mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_PVGAMCTRL,
+ desc->pv_gamma, ARRAY_SIZE(desc->pv_gamma));
+ mipi_dsi_dcs_write(st7701->dsi, DSI_CMD2_BK0_NVGAMCTRL,
+ desc->nv_gamma, ARRAY_SIZE(desc->nv_gamma));
+ /*
+ * Vertical line count configuration:
+ * Line[6:0]: select number of vertical lines of the TFT matrix in
+ * multiples of 8 lines
+ * LDE_EN: enable sub-8-line granularity line count
+ * Line_delta[1:0]: add 0/2/4/6 extra lines to line count selected
+ * using Line[6:0]
+ *
+ * Total number of vertical lines:
+ * LN = ((Line[6:0] + 1) * 8) + (LDE_EN ? Line_delta[1:0] * 2 : 0)
+ */
ST7701_DSI(st7701, DSI_CMD2_BK0_LNESET,
- DSI_CMD2_BK0_LNESET_B0, DSI_CMD2_BK0_LNESET_B1);
+ FIELD_PREP(DSI_CMD2_BK0_LNESET_LINE_MASK, linecount8 - 1) |
+ (linecountrem2 ? DSI_CMD2_BK0_LNESET_LDE_EN : 0),
+ FIELD_PREP(DSI_CMD2_BK0_LNESET_LINEDELTA, linecountrem2));
ST7701_DSI(st7701, DSI_CMD2_BK0_PORCTRL,
- DSI_CMD2_BK0_PORCTRL_B0(mode),
- DSI_CMD2_BK0_PORCTRL_B1(mode));
+ FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VBP_MASK,
+ mode->vtotal - mode->vsync_end),
+ FIELD_PREP(DSI_CMD2_BK0_PORCTRL_VFP_MASK,
+ mode->vsync_start - mode->vdisplay));
+ /*
+ * Horizontal pixel count configuration:
+ * PCLK = 512 + (RTNI[4:0] * 16)
+ * The PCLK is number of pixel clock per line, which matches
+ * mode htotal. The minimum is 512 PCLK.
+ */
ST7701_DSI(st7701, DSI_CMD2_BK0_INVSEL,
- DSI_CMD2_BK0_INVSEL_B0, DSI_CMD2_BK0_INVSEL_B1);
+ DSI_CMD2_BK0_INVSEL_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_NLINV_MASK, desc->nlinv),
+ FIELD_PREP(DSI_CMD2_BK0_INVSEL_RTNI_MASK,
+ DIV_ROUND_UP(mode->htotal, 16)));
/* Command2, BK1 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BK1_SEL);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS, DSI_CMD2_BK1_VRHA_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM, DSI_CMD2_BK1_VCOM_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS, DSI_CMD2_BK1_VGHSS_SET);
+
+ /* Vop = 3.5375V + (VRHA[7:0] * 0.0125V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VRHS,
+ FIELD_PREP(DSI_CMD2_BK1_VRHA_MASK,
+ DIV_ROUND_CLOSEST(desc->vop_uv - 3537500, 12500)));
+
+ /* Vcom = 0.1V + (VCOM[7:0] * 0.0125V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VCOM,
+ FIELD_PREP(DSI_CMD2_BK1_VCOM_MASK,
+ DIV_ROUND_CLOSEST(desc->vcom_uv - 100000, 12500)));
+
+ /* Vgh = 11.5V + (VGHSS[7:0] * 0.5V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VGHSS,
+ FIELD_PREP(DSI_CMD2_BK1_VGHSS_MASK,
+ DIV_ROUND_CLOSEST(clamp(desc->vgh_mv,
+ (u16)11500,
+ (u16)17000) - 11500,
+ 500)));
+
ST7701_DSI(st7701, DSI_CMD2_BK1_TESTCMD, DSI_CMD2_BK1_TESTCMD_VAL);
- ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS, DSI_CMD2_BK1_VGLS_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1, DSI_CMD2_BK1_PWCTLR1_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2, DSI_CMD2_BK1_PWCTLR2_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1, DSI_CMD2_BK1_SPD1_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2, DSI_CMD2_BK1_SPD2_SET);
- ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1, DSI_CMD2_BK1_MIPISET1_SET);
+ /* Vgl is non-linear */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_VGLS,
+ DSI_CMD2_BK1_VGLS_ONES |
+ FIELD_PREP(DSI_CMD2_BK1_VGLS_MASK, st7701_vgls_map(st7701)));
+
+ ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR1,
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_AP_MASK,
+ desc->gamma_op_bias) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APIS_MASK,
+ desc->input_op_bias) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL1_APOS_MASK,
+ desc->output_op_bias));
+
+ /* Avdd = 6.2V + (AVDD[1:0] * 0.2V) , Avcl = -4.4V - (AVCL[1:0] * 0.2V) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_PWCTLR2,
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK,
+ DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) |
+ FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK,
+ DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200)));
+
+ /* T2D = 0.2us * T2D[3:0] */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1,
+ DSI_CMD2_BK1_SPD1_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK1_SPD1_T2D_MASK,
+ DIV_ROUND_CLOSEST(desc->t2d_ns, 200)));
+
+ /* T3D = 4us + (0.8us * T3D[3:0]) */
+ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD2,
+ DSI_CMD2_BK1_SPD2_ONES_MASK |
+ FIELD_PREP(DSI_CMD2_BK1_SPD2_T3D_MASK,
+ DIV_ROUND_CLOSEST(desc->t3d_ns - 4000, 800)));
+
+ ST7701_DSI(st7701, DSI_CMD2_BK1_MIPISET1,
+ DSI_CMD2_BK1_MIPISET1_ONES |
+ (desc->eot_en ? DSI_CMD2_BK1_MIPISET1_EOT_EN : 0));
+}
+
+static void ts8550b_gip_sequence(struct st7701 *st7701)
+{
/**
* ST7701_SPEC_V1.2 is unable to provide enough information above this
* specific command sequence, so grab the same from vendor BSP driver.
@@ -188,10 +330,78 @@ static void st7701_init_sequence(struct st7701 *st7701)
ST7701_DSI(st7701, 0xEC, 0x00, 0x00);
ST7701_DSI(st7701, 0xED, 0xFF, 0xF1, 0x04, 0x56, 0x72, 0x3F, 0xFF,
0xFF, 0xFF, 0xFF, 0xF3, 0x27, 0x65, 0x40, 0x1F, 0xFF);
+}
+
+static void dmt028vghmcmi_1a_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_DSI(st7701, 0xEE, 0x42);
+ ST7701_DSI(st7701, 0xE0, 0x00, 0x00, 0x02);
+
+ ST7701_DSI(st7701, 0xE1,
+ 0x04, 0xA0, 0x06, 0xA0,
+ 0x05, 0xA0, 0x07, 0xA0,
+ 0x00, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE2,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xE3,
+ 0x00, 0x00, 0x22, 0x22);
+ ST7701_DSI(st7701, 0xE4, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE5,
+ 0x0C, 0x90, 0xA0, 0xA0,
+ 0x0E, 0x92, 0xA0, 0xA0,
+ 0x08, 0x8C, 0xA0, 0xA0,
+ 0x0A, 0x8E, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xE6,
+ 0x00, 0x00, 0x22, 0x22);
+ ST7701_DSI(st7701, 0xE7, 0x44, 0x44);
+ ST7701_DSI(st7701, 0xE8,
+ 0x0D, 0x91, 0xA0, 0xA0,
+ 0x0F, 0x93, 0xA0, 0xA0,
+ 0x09, 0x8D, 0xA0, 0xA0,
+ 0x0B, 0x8F, 0xA0, 0xA0);
+ ST7701_DSI(st7701, 0xEB,
+ 0x00, 0x00, 0xE4, 0xE4,
+ 0x44, 0x00, 0x00);
+ ST7701_DSI(st7701, 0xED,
+ 0xFF, 0xF5, 0x47, 0x6F,
+ 0x0B, 0xA1, 0xAB, 0xFF,
+ 0xFF, 0xBA, 0x1A, 0xB0,
+ 0xF6, 0x74, 0x5F, 0xFF);
+ ST7701_DSI(st7701, 0xEF,
+ 0x08, 0x08, 0x08, 0x40,
+ 0x3F, 0x64);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ ST7701_DSI(st7701, 0xE6, 0x7C);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x0E);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ ST7701_DSI(st7701, 0x11);
+ msleep(120);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BK3_SEL);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x0C);
+ msleep(10);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
+
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+ ST7701_DSI(st7701, 0x11);
+ msleep(120);
+ ST7701_DSI(st7701, 0xE8, 0x00, 0x00);
- /* disable Command2 */
ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
+ ST7701_DSI(st7701, 0x3A, 0x70);
}
static int st7701_prepare(struct drm_panel *panel)
@@ -201,7 +411,7 @@ static int st7701_prepare(struct drm_panel *panel)
gpiod_set_value(st7701->reset, 0);
- ret = regulator_bulk_enable(st7701->desc->num_supplies,
+ ret = regulator_bulk_enable(ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
@@ -212,6 +422,13 @@ static int st7701_prepare(struct drm_panel *panel)
st7701_init_sequence(st7701);
+ if (st7701->desc->gip_sequence)
+ st7701->desc->gip_sequence(st7701);
+
+ /* Disable Command2 */
+ ST7701_DSI(st7701, DSI_CMD2BKX_SEL,
+ 0x77, 0x01, 0x00, 0x00, DSI_CMD2BKX_SEL_NONE);
+
return 0;
}
@@ -254,7 +471,7 @@ static int st7701_unprepare(struct drm_panel *panel)
*/
msleep(st7701->sleep_delay);
- regulator_bulk_disable(st7701->desc->num_supplies, st7701->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(st7701->supplies), st7701->supplies);
return 0;
}
@@ -310,46 +527,207 @@ static const struct drm_display_mode ts8550b_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
-static const char * const ts8550b_supply_names[] = {
- "VCC",
- "IOVCC",
-};
-
static const struct st7701_panel_desc ts8550b_desc = {
.mode = &ts8550b_mode,
.lanes = 2,
- .flags = MIPI_DSI_MODE_VIDEO,
.format = MIPI_DSI_FMT_RGB888,
- .supply_names = ts8550b_supply_names,
- .num_supplies = ARRAY_SIZE(ts8550b_supply_names),
.panel_sleep_delay = 80, /* panel need extra 80ms for sleep out cmd */
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x23),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2b),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0x2) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x15),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xf),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x13),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x7),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x9),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x10),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x2c),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x34),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 7,
+ .vop_uv = 4400000,
+ .vcom_uv = 337500,
+ .vgh_mv = 15000,
+ .vgl_mv = -9510,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MAX,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = ts8550b_gip_sequence,
+};
+
+static const struct drm_display_mode dmt028vghmcmi_1a_mode = {
+ .clock = 22325,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 4,
+ .htotal = 480 + 40 + 4 + 20,
+
+ .vdisplay = 640,
+ .vsync_start = 640 + 2,
+ .vsync_end = 640 + 2 + 40,
+ .vtotal = 640 + 2 + 40 + 16,
+
+ .width_mm = 56,
+ .height_mm = 78,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc dmt028vghmcmi_1a_desc = {
+ .mode = &dmt028vghmcmi_1a_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 5, /* panel need extra 5ms for sleep out cmd */
+
+ .pv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0x10),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x17),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xd),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x7),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x1f),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x11),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0xe),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x29),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC0_MASK, 0),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC4_MASK, 0xd),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC8_MASK, 0x14),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC16_MASK, 0xe),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC24_MASK, 0x11),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC52_MASK, 0x6),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC80_MASK, 0x4),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC108_MASK, 0x8),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC147_MASK, 0x8),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC175_MASK, 0x20),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC203_MASK, 0x5),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC231_MASK, 0x13),
+
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC239_MASK, 0x13),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC247_MASK, 0x26),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC251_MASK, 0x30),
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(DSI_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nlinv = 1,
+ .vop_uv = 4800000,
+ .vcom_uv = 1650000,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4400,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIN,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = dmt028vghmcmi_1a_gip_sequence,
};
static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
{
const struct st7701_panel_desc *desc;
struct st7701 *st7701;
- int ret, i;
+ int ret;
st7701 = devm_kzalloc(&dsi->dev, sizeof(*st7701), GFP_KERNEL);
if (!st7701)
return -ENOMEM;
desc = of_device_get_match_data(&dsi->dev);
- dsi->mode_flags = desc->flags;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- st7701->supplies = devm_kcalloc(&dsi->dev, desc->num_supplies,
- sizeof(*st7701->supplies),
- GFP_KERNEL);
- if (!st7701->supplies)
- return -ENOMEM;
-
- for (i = 0; i < desc->num_supplies; i++)
- st7701->supplies[i].supply = desc->supply_names[i];
+ st7701->supplies[0].supply = "VCC";
+ st7701->supplies[1].supply = "IOVCC";
- ret = devm_regulator_bulk_get(&dsi->dev, desc->num_supplies,
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(st7701->supplies),
st7701->supplies);
if (ret < 0)
return ret;
@@ -387,17 +765,16 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int st7701_dsi_remove(struct mipi_dsi_device *dsi)
+static void st7701_dsi_remove(struct mipi_dsi_device *dsi)
{
struct st7701 *st7701 = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(dsi);
drm_panel_remove(&st7701->panel);
-
- return 0;
}
static const struct of_device_id st7701_of_match[] = {
+ { .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 73f69c929a75..86a472b01360 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -598,7 +598,7 @@ static void st7703_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int st7703_remove(struct mipi_dsi_device *dsi)
+static void st7703_remove(struct mipi_dsi_device *dsi)
{
struct st7703 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -612,8 +612,6 @@ static int st7703_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
st7703_debugfs_remove(ctx);
-
- return 0;
}
static const struct of_device_id st7703_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 69f07b15fca4..fa9be3c299c0 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -517,7 +517,7 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static int truly_nt35521_remove(struct mipi_dsi_device *dsi)
+static void truly_nt35521_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -527,8 +527,6 @@ static int truly_nt35521_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id truly_nt35521_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
index 820731be7147..d8487bc6d611 100644
--- a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
+++ b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
@@ -210,7 +210,7 @@ static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi)
return mipi_dsi_attach(dsi);
}
-static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
+static void tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
{
struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
int err;
@@ -222,8 +222,6 @@ static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&tdo_tl070wsh30->base);
drm_panel_disable(&tdo_tl070wsh30->base);
drm_panel_unprepare(&tdo_tl070wsh30->base);
-
- return 0;
}
static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 9ca5c7ff41d6..b31cffb660a7 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -616,7 +616,7 @@ err_panel_add:
return ret;
}
-static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
+static void truly_nt35597_remove(struct mipi_dsi_device *dsi)
{
struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -628,7 +628,6 @@ static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
}
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id truly_nt35597_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index db2443ac81d3..ec228c269146 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -256,7 +256,7 @@ err_dsi_attach:
return ret;
}
-static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
{
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
@@ -264,7 +264,6 @@ static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
mipi_dsi_device_unregister(ctx->dsi);
drm_panel_remove(&ctx->panel);
- return 0;
}
static const struct of_device_id visionox_rm69299_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 8177f5a360fb..2c54733ee241 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -339,7 +339,7 @@ static void xpp055c272_shutdown(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
}
-static int xpp055c272_remove(struct mipi_dsi_device *dsi)
+static void xpp055c272_remove(struct mipi_dsi_device *dsi)
{
struct xpp055c272 *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
@@ -351,8 +351,6 @@ static int xpp055c272_remove(struct mipi_dsi_device *dsi)
dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&ctx->panel);
-
- return 0;
}
static const struct of_device_id xpp055c272_of_match[] = {
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
index 86cdc0ce79e6..079600328be1 100644
--- a/drivers/gpu/drm/panfrost/Kconfig
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -11,6 +11,7 @@ config DRM_PANFROST
select DRM_GEM_SHMEM_HELPER
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select WANT_DEV_COREDUMP
help
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index b71935862417..7da2b3f02ed9 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -9,6 +9,7 @@ panfrost-y := \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o \
- panfrost_perfcnt.o
+ panfrost_perfcnt.o \
+ panfrost_dump.o
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5110cd9b2425..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
new file mode 100644
index 000000000000..89056a1aac7d
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 Collabora ltd. */
+
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/devcoredump.h>
+#include <linux/moduleparam.h>
+#include <linux/iosys-map.h>
+#include <drm/panfrost_drm.h>
+#include <drm/drm_device.h>
+
+#include "panfrost_job.h"
+#include "panfrost_gem.h"
+#include "panfrost_regs.h"
+#include "panfrost_dump.h"
+#include "panfrost_device.h"
+
+static bool panfrost_dump_core = true;
+module_param_named(dump_core, panfrost_dump_core, bool, 0600);
+
+struct panfrost_dump_iterator {
+ void *start;
+ struct panfrost_dump_object_header *hdr;
+ void *data;
+};
+
+static const unsigned short panfrost_dump_registers[] = {
+ SHADER_READY_LO,
+ SHADER_READY_HI,
+ TILER_READY_LO,
+ TILER_READY_HI,
+ L2_READY_LO,
+ L2_READY_HI,
+ JOB_INT_MASK,
+ JOB_INT_STAT,
+ JS_HEAD_LO(0),
+ JS_HEAD_HI(0),
+ JS_TAIL_LO(0),
+ JS_TAIL_HI(0),
+ JS_AFFINITY_LO(0),
+ JS_AFFINITY_HI(0),
+ JS_CONFIG(0),
+ JS_STATUS(0),
+ JS_HEAD_NEXT_LO(0),
+ JS_HEAD_NEXT_HI(0),
+ JS_AFFINITY_NEXT_LO(0),
+ JS_AFFINITY_NEXT_HI(0),
+ JS_CONFIG_NEXT(0),
+ MMU_INT_MASK,
+ MMU_INT_STAT,
+ AS_TRANSTAB_LO(0),
+ AS_TRANSTAB_HI(0),
+ AS_MEMATTR_LO(0),
+ AS_MEMATTR_HI(0),
+ AS_FAULTSTATUS(0),
+ AS_FAULTADDRESS_LO(0),
+ AS_FAULTADDRESS_HI(0),
+ AS_STATUS(0),
+};
+
+static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
+ u32 type, void *data_end)
+{
+ struct panfrost_dump_object_header *hdr = iter->hdr;
+
+ hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC);
+ hdr->type = cpu_to_le32(type);
+ hdr->file_offset = cpu_to_le32(iter->data - iter->start);
+ hdr->file_size = cpu_to_le32(data_end - iter->data);
+
+ iter->hdr++;
+ iter->data += le32_to_cpu(hdr->file_size);
+}
+
+static void
+panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
+ struct panfrost_device *pfdev,
+ u32 as_nr, int slot)
+{
+ struct panfrost_dump_registers *dumpreg = iter->data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(panfrost_dump_registers); i++, dumpreg++) {
+ unsigned int js_as_offset = 0;
+ unsigned int reg;
+
+ if (panfrost_dump_registers[i] >= JS_BASE &&
+ panfrost_dump_registers[i] <= JS_BASE + JS_SLOT_STRIDE)
+ js_as_offset = slot * JS_SLOT_STRIDE;
+ else if (panfrost_dump_registers[i] >= MMU_BASE &&
+ panfrost_dump_registers[i] <= MMU_BASE + MMU_AS_STRIDE)
+ js_as_offset = (as_nr << MMU_AS_SHIFT);
+
+ reg = panfrost_dump_registers[i] + js_as_offset;
+
+ dumpreg->reg = cpu_to_le32(reg);
+ dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg));
+ }
+
+ panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
+}
+
+void panfrost_core_dump(struct panfrost_job *job)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ struct panfrost_dump_iterator iter;
+ struct drm_gem_object *dbo;
+ unsigned int n_obj, n_bomap_pages;
+ __le64 *bomap, *bomap_start;
+ size_t file_size;
+ u32 as_nr;
+ int slot;
+ int ret, i;
+
+ as_nr = job->mmu->as;
+ slot = panfrost_job_get_slot(job);
+
+ /* Only catch the first event, or when manually re-armed */
+ if (!panfrost_dump_core)
+ return;
+ panfrost_dump_core = false;
+
+ /* At least, we dump registers and end marker */
+ n_obj = 2;
+ n_bomap_pages = 0;
+ file_size = ARRAY_SIZE(panfrost_dump_registers) *
+ sizeof(struct panfrost_dump_registers);
+
+ /* Add in the active buffer objects */
+ for (i = 0; i < job->bo_count; i++) {
+ /*
+ * Even though the CPU could be configured to use 16K or 64K pages, this
+ * is a very unusual situation for most kernel setups on SoCs that have
+ * a Panfrost device. Also many places across the driver make the somewhat
+ * arbitrary assumption that Panfrost's MMU page size is the same as the CPU's,
+ * so let's have a sanity check to ensure that's always the case
+ */
+ dbo = job->bos[i];
+ WARN_ON(!IS_ALIGNED(dbo->size, PAGE_SIZE));
+
+ file_size += dbo->size;
+ n_bomap_pages += dbo->size >> PAGE_SHIFT;
+ n_obj++;
+ }
+
+ /* If we have any buffer objects, add a bomap object */
+ if (n_bomap_pages) {
+ file_size += n_bomap_pages * sizeof(*bomap);
+ n_obj++;
+ }
+
+ /* Add the size of the headers */
+ file_size += sizeof(*iter.hdr) * n_obj;
+
+ /*
+ * Allocate the file in vmalloc memory, it's likely to be big.
+ * The reason behind these GFP flags is that we don't want to trigger the
+ * OOM killer in the event that not enough memory could be found for our
+ * dump file. We also don't want the allocator to do any error reporting,
+ * as the right behaviour is failing gracefully if a big enough buffer
+ * could not be allocated.
+ */
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
+ if (!iter.start) {
+ dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
+ return;
+ }
+
+ /* Point the data member after the headers */
+ iter.hdr = iter.start;
+ iter.data = &iter.hdr[n_obj];
+
+ memset(iter.hdr, 0, iter.data - iter.start);
+
+ /*
+ * For now, we write the job identifier in the register dump header,
+ * so that we can decode the entire dump later with pandecode
+ */
+ iter.hdr->reghdr.jc = cpu_to_le64(job->jc);
+ iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR);
+ iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR);
+ iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id);
+ iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count);
+
+ panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
+
+ /* Reserve space for the bomap */
+ if (job->bo_count) {
+ bomap_start = bomap = iter.data;
+ memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
+ panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BOMAP,
+ bomap + n_bomap_pages);
+ }
+
+ for (i = 0; i < job->bo_count; i++) {
+ struct iosys_map map;
+ struct panfrost_gem_mapping *mapping;
+ struct panfrost_gem_object *bo;
+ struct sg_page_iter page_iter;
+ void *vaddr;
+
+ bo = to_panfrost_bo(job->bos[i]);
+ mapping = job->mappings[i];
+
+ if (!bo->base.sgt) {
+ dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
+ iter.hdr->bomap.valid = 0;
+ goto dump_header;
+ }
+
+ ret = drm_gem_shmem_vmap(&bo->base, &map);
+ if (ret) {
+ dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
+ iter.hdr->bomap.valid = 0;
+ goto dump_header;
+ }
+
+ WARN_ON(!mapping->active);
+
+ iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start));
+
+ for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
+ struct page *page = sg_page_iter_page(&page_iter);
+
+ if (!IS_ERR(page)) {
+ *bomap++ = cpu_to_le64(page_to_phys(page));
+ } else {
+ dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
+ *bomap++ = ~cpu_to_le64(0);
+ }
+ }
+
+ iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT);
+
+ vaddr = map.vaddr;
+ memcpy(iter.data, vaddr, bo->base.base.size);
+
+ drm_gem_shmem_vunmap(&bo->base, &map);
+
+ iter.hdr->bomap.valid = cpu_to_le32(1);
+
+dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
+ bo->base.base.size);
+ }
+ panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
+
+ dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.h b/drivers/gpu/drm/panfrost/panfrost_dump.h
new file mode 100644
index 000000000000..7d9bcefa5346
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Collabora ltd.
+ */
+
+#ifndef PANFROST_DUMP_H
+#define PANFROST_DUMP_H
+
+struct panfrost_job;
+void panfrost_core_dump(struct panfrost_job *job);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7c4208476fbd..dbc597ab46fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -20,6 +20,7 @@
#include "panfrost_regs.h"
#include "panfrost_gpu.h"
#include "panfrost_mmu.h"
+#include "panfrost_dump.h"
#define JOB_TIMEOUT_MS 500
@@ -727,6 +728,8 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
+ panfrost_core_dump(job);
+
atomic_set(&pfdev->reset.pending, 1);
panfrost_reset(pfdev, sched_job);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index b285a8001b1d..e246d914e7f6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
mmu_write(pfdev, MMU_INT_MASK, ~0);
}
-static size_t get_pgsize(u64 addr, size_t size)
+static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{
- if (addr & (SZ_2M - 1) || size < SZ_2M)
- return SZ_4K;
+ size_t blk_offset = -addr % SZ_2M;
+ if (blk_offset || size < SZ_2M) {
+ *count = min_not_zero(blk_offset, size) / SZ_4K;
+ return SZ_4K;
+ }
+ *count = size / SZ_2M;
return SZ_2M;
}
@@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
while (len) {
- size_t pgsize = get_pgsize(iova | paddr, len);
-
- ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
- iova += pgsize;
- paddr += pgsize;
- len -= pgsize;
+ size_t pgcount, mapped = 0;
+ size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
+
+ ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ GFP_KERNEL, &mapped);
+ /* Don't get stuck if things have gone wrong */
+ mapped = max(mapped, pgsize);
+ iova += mapped;
+ paddr += mapped;
+ len -= mapped;
}
}
@@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
mapping->mmu->as, iova, len);
while (unmapped_len < len) {
- size_t unmapped_page;
- size_t pgsize = get_pgsize(iova, len - unmapped_len);
-
- if (ops->iova_to_phys(ops, iova)) {
- unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
- WARN_ON(unmapped_page != pgsize);
+ size_t unmapped_page, pgcount;
+ size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+ if (bo->is_heap)
+ pgcount = 1;
+ if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
+ unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+ WARN_ON(unmapped_page != pgsize * pgcount);
}
- iova += pgsize;
- unmapped_len += pgsize;
+ iova += pgsize * pgcount;
+ unmapped_len += pgsize * pgcount;
}
panfrost_mmu_flush_range(pfdev, mapping->mmu,
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
index accb4fa3adb8..919f44ac853d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_regs.h
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -226,23 +226,25 @@
#define JOB_INT_MASK_DONE(j) BIT(j)
#define JS_BASE 0x1800
-#define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00)
-#define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04)
-#define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08)
-#define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c)
-#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10)
-#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14)
-#define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18)
-#define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c)
-#define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20)
-#define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24)
-#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40)
-#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44)
-#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50)
-#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54)
-#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58)
-#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60)
-#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70)
+#define JS_SLOT_STRIDE 0x80
+
+#define JS_HEAD_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x00)
+#define JS_HEAD_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x04)
+#define JS_TAIL_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x08)
+#define JS_TAIL_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x0c)
+#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x10)
+#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x14)
+#define JS_CONFIG(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x18)
+#define JS_XAFFINITY(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x1c)
+#define JS_COMMAND(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x20)
+#define JS_STATUS(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x24)
+#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x40)
+#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x44)
+#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x50)
+#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x54)
+#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x58)
+#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x60)
+#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * JS_SLOT_STRIDE) + 0x70)
/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
#define JS_CONFIG_START_FLUSH_CLEAN BIT(8)
@@ -281,7 +283,9 @@
#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
flush all L2 caches then issue a flush region command to all MMUs */
-#define MMU_AS(as) (0x2400 + ((as) << 6))
+#define MMU_BASE 0x2400
+#define MMU_AS_SHIFT 0x06
+#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x00) /* (RW) Translation Table Base Address for address space n, low word */
#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
@@ -300,6 +304,8 @@
#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
+#define MMU_AS_STRIDE (1 << MMU_AS_SHIFT)
+
/*
* Begin LPAE MMU TRANSTAB register values
*/
diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig
index 91ee05b01303..ad24cdf1d992 100644
--- a/drivers/gpu/drm/pl111/Kconfig
+++ b/drivers/gpu/drm/pl111/Kconfig
@@ -6,7 +6,7 @@ config DRM_PL111
depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n
depends on COMMON_CLK
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 6263346f24c6..6afdf260a4e2 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -15,11 +15,11 @@
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -94,7 +94,7 @@ static int pl111_display_check(struct drm_simple_display_pipe *pipe,
return -EINVAL;
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3)
@@ -398,7 +398,7 @@ static void pl111_display_update(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
- u32 addr = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 addr = drm_fb_dma_get_gem_addr(fb, pstate, 0);
writel(addr, priv->regs + CLCD_UBAS);
}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 19a4324bd356..eb25eedb5ee0 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,10 +48,9 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -208,10 +207,10 @@ pl111_gem_import_sg_table(struct drm_device *dev,
if (priv->use_device_memory)
return ERR_PTR(-EINVAL);
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver pl111_drm_driver = {
.driver_features =
@@ -224,7 +223,7 @@ static const struct drm_driver pl111_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_gem_dma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index efb01a554574..1b436b75fd39 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -404,6 +404,7 @@ static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
if (of_device_is_compatible(child, "arm,pl111")) {
has_coretile_clcd = true;
ct_clcd = child;
+ of_node_put(child);
break;
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 7b00c955cd82..63aa96a69752 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -53,17 +53,11 @@ void qxl_ring_free(struct qxl_ring *ring)
kfree(ring);
}
-void qxl_ring_init_hdr(struct qxl_ring *ring)
-{
- ring->ring->header.notify_on_prod = ring->n_elements;
-}
-
struct qxl_ring *
qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event)
{
struct qxl_ring *ring;
@@ -77,8 +71,6 @@ qxl_ring_create(struct qxl_ring_header *header,
ring->n_elements = n_elements;
ring->prod_notify = prod_notify;
ring->push_event = push_event;
- if (set_prod_notify)
- qxl_ring_init_hdr(ring);
spin_lock_init(&ring->lock);
return ring;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 2e8949863d6b..a152a7c6db21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -902,7 +902,7 @@ static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = {
static const struct drm_plane_funcs qxl_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -924,7 +924,7 @@ static const struct drm_plane_helper_funcs primary_helper_funcs = {
static const struct drm_plane_funcs qxl_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1cb6f0c224bb..3044ca948ce2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -194,7 +194,6 @@ static int qxl_drm_resume(struct drm_device *dev, bool thaw)
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
qxl_reinit_memslots(qdev);
- qxl_ring_init_hdr(qdev->release_ring);
}
qxl_create_monitors_object(qdev);
@@ -220,6 +219,7 @@ static int qxl_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct qxl_device *qdev = to_qxl(drm_dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -227,6 +227,7 @@ static int qxl_pm_resume(struct device *dev)
return -EIO;
}
+ qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 47c169673088..432758ad39a3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -277,10 +277,8 @@ struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
int element_size,
int n_elements,
int prod_notify,
- bool set_prod_notify,
wait_queue_head_t *push_event);
void qxl_ring_free(struct qxl_ring *ring);
-void qxl_ring_init_hdr(struct qxl_ring *ring);
int qxl_check_idle(struct qxl_ring *ring);
static inline uint64_t
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 9bf6d4cc98d4..dc3828db1991 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -194,7 +194,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_COMMAND_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
- false,
&qdev->display_event);
if (!qdev->command_ring) {
DRM_ERROR("Unable to create command ring\n");
@@ -207,7 +206,6 @@ int qxl_device_init(struct qxl_device *qdev,
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CURSOR,
- false,
&qdev->cursor_event);
if (!qdev->cursor_ring) {
@@ -219,7 +217,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
- QXL_RELEASE_RING_SIZE, 0, true,
+ QXL_RELEASE_RING_SIZE, 0,
NULL);
if (!qdev->release_ring) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index b42a657e4c2f..695d9308d1f0 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -141,7 +141,7 @@ int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
qxl_ttm_placement_from_domain(bo, domain);
bo->tbo.priority = priority;
- r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type,
&bo->placement, 0, &ctx, NULL, NULL,
&qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index e3ab3aca1396..bb4e56f2f170 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o \
- radeon_sync.o radeon_audio.o radeon_dp_auxch.o radeon_dp_mst.o
+ radeon_sync.o radeon_audio.o radeon_dp_auxch.o
radeon-$(CONFIG_MMU_NOTIFIER) += radeon_mn.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 69f1bc073902..d28d3acb3ba1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -617,13 +617,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- }
-
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
@@ -972,9 +965,7 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
- if (radeon_encoder->is_mst_encoder) {
- radeon_dp_mst_prepare_pll(crtc, mode);
- } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c93040e60d04..c841c273222e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -32,6 +32,8 @@
#include <drm/drm_file.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "atom.h"
#include "radeon_atombios.h"
#include "radeon.h"
@@ -209,6 +211,11 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping radeon atom DIG backlight registration\n");
+ return;
+ }
+
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
@@ -667,15 +674,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- struct radeon_encoder_atom_dig *dig_enc;
- if (radeon_encoder_is_digital(encoder)) {
- dig_enc = radeon_encoder->enc_priv;
- if (dig_enc->active_mst_links)
- return ATOM_ENCODER_MODE_DP_MST;
- }
- if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
- return ATOM_ENCODER_MODE_DP_MST;
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
@@ -1723,10 +1722,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* don't power off encoders with active MST links */
- if (dig->active_mst_links)
- return;
-
if (ASIC_IS_DCE4(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1992,53 +1987,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-void
-atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
- uint8_t frev, crev;
- union crtc_source_param args;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- if (frev != 1 && crev != 2)
- DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
-
- args.v2.ucCRTC = radeon_crtc->crtc_id;
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
-
- switch (fe) {
- case 0:
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- break;
- case 1:
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case 2:
- args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
- break;
- case 3:
- args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
- break;
- case 4:
- args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
- break;
- case 5:
- args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
- break;
- case 6:
- args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
- break;
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ac006bed4743..8ef25ab305ae 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2056,7 +2056,7 @@ static void ci_clear_vc(struct radeon_device *rdev)
static int ci_upload_firmware(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- int i, ret;
+ int i;
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
@@ -2067,9 +2067,7 @@ static int ci_upload_firmware(struct radeon_device *rdev)
ci_stop_smc_clock(rdev);
ci_reset_smc(rdev);
- ret = ci_load_smc_ucode(rdev, pi->sram_end);
-
- return ret;
+ return ci_load_smc_ucode(rdev, pi->sram_end);
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 8be4799a98ef..638f861af80f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -34,8 +34,6 @@
#include "r600_reg_safe.h"
static int r600_nomm;
-extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
-
struct r600_cs_track {
/* configuration we mirror so that we use same code btw kms/ums */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 08f83bf2c330..166c18d62f6d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -116,7 +116,6 @@ extern int radeon_use_pflipirq;
extern int radeon_bapm;
extern int radeon_backlight;
extern int radeon_auxch;
-extern int radeon_mst;
extern int radeon_uvd;
extern int radeon_vce;
extern int radeon_si_support;
@@ -2950,8 +2949,6 @@ struct radeon_hdmi_acr {
};
-extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
-
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 28c4413f4dc8..204127bad89c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -826,8 +826,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
}
radeon_link_encoder_connector(dev);
-
- radeon_setup_mst_connector(dev);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 58db79921cd3..f7431d224604 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -37,33 +37,12 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
-static int radeon_dp_handle_hpd(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_dp_mst_check_status(radeon_connector);
- if (ret == -EINVAL)
- return 1;
- return 0;
-}
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- if (radeon_connector->is_mst_connector)
- return;
- if (dig_connector->is_mst) {
- radeon_dp_handle_hpd(connector);
- return;
- }
- }
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
@@ -1664,9 +1643,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;
- if (radeon_dig_connector->is_mst)
- return connector_status_disconnected;
-
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
@@ -1729,21 +1705,12 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_dp_getdpcd(radeon_connector);
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- }
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- else
- ret = connector_status_connected;
- }
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
@@ -2561,25 +2528,3 @@ radeon_add_legacy_connector(struct drm_device *dev,
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
-
-void radeon_setup_mst_connector(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
-
- if (!ASIC_IS_DCE5(rdev))
- return;
-
- if (radeon_mst == 0)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- radeon_connector = to_radeon_connector(connector);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- radeon_dp_mst_init(radeon_connector);
- }
-}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ee0165687239..a556b6be1137 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1438,7 +1438,6 @@ int radeon_device_init(struct radeon_device *rdev,
goto failed;
radeon_gem_debugfs_init(rdev);
- radeon_mst_debugfs_init(rdev);
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f12675e3d261..ca5598ae8bfc 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -38,7 +38,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
deleted file mode 100644
index 54ced1f4ff67..000000000000
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ /dev/null
@@ -1,778 +0,0 @@
-// SPDX-License-Identifier: MIT
-
-#include <drm/display/drm_dp_mst_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_file.h>
-#include <drm/drm_probe_helper.h>
-
-#include "atom.h"
-#include "ni_reg.h"
-#include "radeon.h"
-
-static struct radeon_encoder *radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector);
-
-static int radeon_atom_set_enc_offset(int id)
-{
- static const int offsets[] = { EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- 0x13830 - 0x7030 };
-
- return offsets[id];
-}
-
-static int radeon_dp_mst_set_be_cntl(struct radeon_encoder *primary,
- struct radeon_encoder_mst *mst_enc,
- enum radeon_hpd_id hpd, bool enable)
-{
- struct drm_device *dev = primary->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- uint32_t reg;
- int retries = 0;
- uint32_t temp;
-
- reg = RREG32(NI_DIG_BE_CNTL + primary->offset);
-
- /* set MST mode */
- reg &= ~NI_DIG_FE_DIG_MODE(7);
- reg |= NI_DIG_FE_DIG_MODE(NI_DIG_MODE_DP_MST);
-
- if (enable)
- reg |= NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
- else
- reg &= ~NI_DIG_FE_SOURCE_SELECT(1 << mst_enc->fe);
-
- reg |= NI_DIG_HPD_SELECT(hpd);
- DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DIG_BE_CNTL + primary->offset, reg);
- WREG32(NI_DIG_BE_CNTL + primary->offset, reg);
-
- if (enable) {
- uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
-
- do {
- temp = RREG32(NI_DIG_FE_CNTL + offset);
- } while ((temp & NI_DIG_SYMCLK_FE_ON) && retries++ < 10000);
- if (retries == 10000)
- DRM_ERROR("timed out waiting for FE %d %d\n", primary->offset, mst_enc->fe);
- }
- return 0;
-}
-
-static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
- int stream_number,
- int fe,
- int slots)
-{
- struct drm_device *dev = primary->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- u32 temp, val;
- int retries = 0;
- int satreg, satidx;
-
- satreg = stream_number >> 1;
- satidx = stream_number & 1;
-
- temp = RREG32(NI_DP_MSE_SAT0 + satreg + primary->offset);
-
- val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe);
-
- val <<= (16 * satidx);
-
- temp &= ~(0xffff << (16 * satidx));
-
- temp |= val;
-
- DRM_DEBUG_KMS("writing 0x%08x 0x%08x\n", NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
- WREG32(NI_DP_MSE_SAT0 + satreg + primary->offset, temp);
-
- WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
-
- do {
- unsigned value1, value2;
- udelay(10);
- temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
-
- value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
- value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
-
- if (!value1 && !value2)
- break;
- } while (retries++ < 50);
-
- if (retries == 10000)
- DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
-
- /* MTP 16 ? */
- return 0;
-}
-
-static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn,
- struct radeon_encoder *primary)
-{
- struct drm_device *dev = mst_conn->base.dev;
- struct stream_attribs new_attribs[6];
- int i;
- int idx = 0;
- struct radeon_connector *radeon_connector;
- struct drm_connector *connector;
-
- memset(new_attribs, 0, sizeof(new_attribs));
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_encoder *subenc;
- struct radeon_encoder_mst *mst_enc;
-
- radeon_connector = to_radeon_connector(connector);
- if (!radeon_connector->is_mst_connector)
- continue;
-
- if (radeon_connector->mst_port != mst_conn)
- continue;
-
- subenc = radeon_connector->mst_encoder;
- mst_enc = subenc->enc_priv;
-
- if (!mst_enc->enc_active)
- continue;
-
- new_attribs[idx].fe = mst_enc->fe;
- new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port);
- idx++;
- }
-
- for (i = 0; i < idx; i++) {
- if (new_attribs[i].fe != mst_conn->cur_stream_attribs[i].fe ||
- new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) {
- radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots);
- mst_conn->cur_stream_attribs[i].fe = new_attribs[i].fe;
- mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots;
- }
- }
-
- for (i = idx; i < mst_conn->enabled_attribs; i++) {
- radeon_dp_mst_set_stream_attrib(primary, i, 0, 0);
- mst_conn->cur_stream_attribs[i].fe = 0;
- mst_conn->cur_stream_attribs[i].slots = 0;
- }
- mst_conn->enabled_attribs = idx;
- return 0;
-}
-
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
-{
- struct drm_device *dev = mst->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder_mst *mst_enc = mst->enc_priv;
- uint32_t val, temp;
- uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
- int retries = 0;
- uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
- uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
-
- val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
-
- WREG32(NI_DP_MSE_RATE_CNTL + offset, val);
-
- do {
- temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
- udelay(10);
- } while ((temp & 0x1) && (retries++ < 10000));
-
- if (retries >= 10000)
- DRM_ERROR("timed out wait for rate cntl %d\n", mst_enc->fe);
- return 0;
-}
-
-static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
- struct edid *edid;
- int ret = 0;
-
- edid = drm_dp_mst_get_edid(connector, &master->mst_mgr, radeon_connector->port);
- radeon_connector->edid = edid;
- DRM_DEBUG_KMS("edid retrieved %p\n", edid);
- if (radeon_connector->edid) {
- drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_connector_update_edid_property(&radeon_connector->base, NULL);
-
- return ret;
-}
-
-static int radeon_dp_mst_get_modes(struct drm_connector *connector)
-{
- return radeon_dp_mst_get_ddc_modes(connector);
-}
-
-static enum drm_mode_status
-radeon_dp_mst_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TODO - validate mode against available PBN for link */
- if (mode->clock < 10000)
- return MODE_CLOCK_LOW;
-
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- return MODE_H_ILLEGAL;
-
- return MODE_OK;
-}
-
-static struct
-drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- return &radeon_connector->mst_encoder->base;
-}
-
-static int
-radeon_dp_mst_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct radeon_connector *radeon_connector =
- to_radeon_connector(connector);
- struct radeon_connector *master = radeon_connector->mst_port;
-
- if (drm_connector_is_unregistered(connector))
- return connector_status_disconnected;
-
- return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
- radeon_connector->port);
-}
-
-static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = {
- .get_modes = radeon_dp_mst_get_modes,
- .mode_valid = radeon_dp_mst_mode_valid,
- .best_encoder = radeon_mst_best_encoder,
- .detect_ctx = radeon_dp_mst_detect,
-};
-
-static void
-radeon_dp_mst_connector_destroy(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_encoder *radeon_encoder = radeon_connector->mst_encoder;
-
- drm_encoder_cleanup(&radeon_encoder->base);
- kfree(radeon_encoder);
- drm_connector_cleanup(connector);
- kfree(radeon_connector);
-}
-
-static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = radeon_dp_mst_connector_destroy,
-};
-
-static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- const char *pathprop)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_connector *radeon_connector;
- struct drm_connector *connector;
-
- radeon_connector = kzalloc(sizeof(*radeon_connector), GFP_KERNEL);
- if (!radeon_connector)
- return NULL;
-
- radeon_connector->is_mst_connector = true;
- connector = &radeon_connector->base;
- radeon_connector->port = port;
- radeon_connector->mst_port = master;
- DRM_DEBUG_KMS("\n");
-
- drm_connector_init(dev, connector, &radeon_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
- drm_connector_helper_add(connector, &radeon_dp_mst_connector_helper_funcs);
- radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
-
- drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
- drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_connector_set_path_property(connector, pathprop);
-
- return connector;
-}
-
-static const struct drm_dp_mst_topology_cbs mst_cbs = {
- .add_connector = radeon_dp_add_mst_connector,
-};
-
-static struct
-radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (!connector->encoder)
- continue;
- if (!radeon_connector->is_mst_connector)
- continue;
-
- DRM_DEBUG_KMS("checking %p vs %p\n", connector->encoder, encoder);
- if (connector->encoder == encoder)
- return radeon_connector;
- }
- return NULL;
-}
-
-void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(radeon_crtc->encoder);
- struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
- struct radeon_connector *radeon_connector = radeon_mst_find_connector(&radeon_encoder->base);
- int dp_clock;
- struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
-
- if (radeon_connector) {
- radeon_connector->pixelclock_for_modeset = mode->clock;
- if (radeon_connector->base.display_info.bpc)
- radeon_crtc->bpc = radeon_connector->base.display_info.bpc;
- else
- radeon_crtc->bpc = 8;
- }
-
- DRM_DEBUG_KMS("dp_clock %p %d\n", dig_connector, dig_connector->dp_clock);
- dp_clock = dig_connector->dp_clock;
- radeon_crtc->ss_enabled =
- radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
- ASIC_INTERNAL_SS_ON_DP,
- dp_clock);
-}
-
-static void
-radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder, *primary;
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder_atom_dig *dig_enc;
- struct radeon_connector *radeon_connector;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
- int slots;
- s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
- if (!ASIC_IS_DCE5(rdev)) {
- DRM_ERROR("got mst dpms on non-DCE5\n");
- return;
- }
-
- radeon_connector = radeon_mst_find_connector(encoder);
- if (!radeon_connector)
- return;
-
- radeon_encoder = to_radeon_encoder(encoder);
-
- mst_enc = radeon_encoder->enc_priv;
-
- primary = mst_enc->primary;
-
- dig_enc = primary->enc_priv;
-
- crtc = encoder->crtc;
- DRM_DEBUG_KMS("got connector %d\n", dig_enc->active_mst_links);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- dig_enc->active_mst_links++;
-
- radeon_crtc = to_radeon_crtc(crtc);
-
- if (dig_enc->active_mst_links == 1) {
- mst_enc->fe = dig_enc->dig_encoder;
- mst_enc->fe_from_be = true;
- atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
-
- atombios_dig_encoder_setup(&primary->base, ATOM_ENCODER_CMD_SETUP, 0);
- atombios_dig_transmitter_setup2(&primary->base, ATOM_TRANSMITTER_ACTION_ENABLE,
- 0, 0, dig_enc->dig_encoder);
-
- if (radeon_dp_needs_link_train(mst_enc->connector) ||
- dig_enc->active_mst_links == 1) {
- radeon_dp_link_train(&primary->base, &mst_enc->connector->base);
- }
-
- } else {
- mst_enc->fe = radeon_atom_pick_dig_encoder(encoder, radeon_crtc->crtc_id);
- if (mst_enc->fe == -1)
- DRM_ERROR("failed to get frontend for dig encoder\n");
- mst_enc->fe_from_be = false;
- atombios_set_mst_encoder_crtc_source(encoder, mst_enc->fe);
- }
-
- DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
- dig_enc->linkb, radeon_crtc->crtc_id);
-
- slots = drm_dp_find_vcpi_slots(&radeon_connector->mst_port->mst_mgr,
- mst_enc->pbn);
- drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
- radeon_connector->port,
- mst_enc->pbn, slots);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
-
- radeon_dp_mst_set_be_cntl(primary, mst_enc,
- radeon_connector->mst_port->hpd.hpd, true);
-
- mst_enc->enc_active = true;
- radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-
- fixed_pbn = drm_int2fixp(mst_enc->pbn);
- fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
- avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
- radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
-
- atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
- mst_enc->fe);
- drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
-
- drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
-
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- DRM_ERROR("DPMS OFF %d\n", dig_enc->active_mst_links);
-
- if (!mst_enc->enc_active)
- return;
-
- drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
-
- drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
- /* and this can also fail */
- drm_dp_update_payload_part2(&radeon_connector->mst_port->mst_mgr);
-
- drm_dp_mst_deallocate_vcpi(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
-
- mst_enc->enc_active = false;
- radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
-
- radeon_dp_mst_set_be_cntl(primary, mst_enc,
- radeon_connector->mst_port->hpd.hpd, false);
- atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0,
- mst_enc->fe);
-
- if (!mst_enc->fe_from_be)
- radeon_atom_release_dig_encoder(rdev, mst_enc->fe);
-
- mst_enc->fe_from_be = false;
- dig_enc->active_mst_links--;
- if (dig_enc->active_mst_links == 0) {
- /* drop link */
- }
-
- break;
- }
-
-}
-
-static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_connector_atom_dig *dig_connector;
- int bpp = 24;
-
- mst_enc = radeon_encoder->enc_priv;
-
- mst_enc->pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp, false);
-
- mst_enc->primary->active_device = mst_enc->primary->devices & mst_enc->connector->devices;
- DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
- mst_enc->primary->active_device, mst_enc->primary->devices,
- mst_enc->connector->devices, mst_enc->primary->base.encoder_type);
-
-
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- dig_connector = mst_enc->connector->con_priv;
- dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
- dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
- DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
- dig_connector->dp_lane_count, dig_connector->dp_clock);
- return true;
-}
-
-static void radeon_mst_encoder_prepare(struct drm_encoder *encoder)
-{
- struct radeon_connector *radeon_connector;
- struct radeon_encoder *radeon_encoder, *primary;
- struct radeon_encoder_mst *mst_enc;
- struct radeon_encoder_atom_dig *dig_enc;
-
- radeon_connector = radeon_mst_find_connector(encoder);
- if (!radeon_connector) {
- DRM_DEBUG_KMS("failed to find connector %p\n", encoder);
- return;
- }
- radeon_encoder = to_radeon_encoder(encoder);
-
- radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- mst_enc = radeon_encoder->enc_priv;
-
- primary = mst_enc->primary;
-
- dig_enc = primary->enc_priv;
-
- mst_enc->port = radeon_connector->port;
-
- if (dig_enc->dig_encoder == -1) {
- dig_enc->dig_encoder = radeon_atom_pick_dig_encoder(&primary->base, -1);
- primary->offset = radeon_atom_set_enc_offset(dig_enc->dig_encoder);
- atombios_set_mst_encoder_crtc_source(encoder, dig_enc->dig_encoder);
-
-
- }
- DRM_DEBUG_KMS("%d %d\n", dig_enc->dig_encoder, primary->offset);
-}
-
-static void
-radeon_mst_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-static void radeon_mst_encoder_commit(struct drm_encoder *encoder)
-{
- radeon_mst_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- DRM_DEBUG_KMS("\n");
-}
-
-static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = {
- .dpms = radeon_mst_encoder_dpms,
- .mode_fixup = radeon_mst_mode_fixup,
- .prepare = radeon_mst_encoder_prepare,
- .mode_set = radeon_mst_encoder_mode_set,
- .commit = radeon_mst_encoder_commit,
-};
-
-static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs radeon_dp_mst_enc_funcs = {
- .destroy = radeon_dp_mst_encoder_destroy,
-};
-
-static struct radeon_encoder *
-radeon_dp_create_fake_mst_encoder(struct radeon_connector *connector)
-{
- struct drm_device *dev = connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder;
- struct radeon_encoder_mst *mst_enc;
- struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs = connector->base.helper_private;
- struct drm_encoder *enc_master = connector_funcs->best_encoder(&connector->base);
-
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
- radeon_encoder = kzalloc(sizeof(*radeon_encoder), GFP_KERNEL);
- if (!radeon_encoder)
- return NULL;
-
- radeon_encoder->enc_priv = kzalloc(sizeof(*mst_enc), GFP_KERNEL);
- if (!radeon_encoder->enc_priv) {
- kfree(radeon_encoder);
- return NULL;
- }
- encoder = &radeon_encoder->base;
- switch (rdev->num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- drm_encoder_init(dev, &radeon_encoder->base, &radeon_dp_mst_enc_funcs,
- DRM_MODE_ENCODER_DPMST, NULL);
- drm_encoder_helper_add(encoder, &radeon_mst_helper_funcs);
-
- mst_enc = radeon_encoder->enc_priv;
- mst_enc->connector = connector;
- mst_enc->primary = to_radeon_encoder(enc_master);
- radeon_encoder->is_mst_encoder = true;
- return radeon_encoder;
-}
-
-int
-radeon_dp_mst_init(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- int max_link_rate;
-
- if (!radeon_connector->ddc_bus->has_aux)
- return 0;
-
- if (radeon_connector_is_dp12_capable(&radeon_connector->base))
- max_link_rate = 0x14;
- else
- max_link_rate = 0x0a;
-
- radeon_connector->mst_mgr.cbs = &mst_cbs;
- return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
- &radeon_connector->ddc_bus->aux, 16, 6,
- 4, drm_dp_bw_code_to_link_rate(max_link_rate),
- radeon_connector->base.base.id);
-}
-
-int
-radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
-{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret;
- u8 msg[1];
-
- if (!radeon_mst)
- return 0;
-
- if (!ASIC_IS_DCE5(rdev))
- return 0;
-
- if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
- return 0;
-
- ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_MSTM_CAP, msg,
- 1);
- if (ret) {
- if (msg[0] & DP_MST_CAP) {
- DRM_DEBUG_KMS("Sink is MST capable\n");
- dig_connector->is_mst = true;
- } else {
- DRM_DEBUG_KMS("Sink is not MST capable\n");
- dig_connector->is_mst = false;
- }
-
- }
- drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
- dig_connector->is_mst);
- return dig_connector->is_mst;
-}
-
-int
-radeon_dp_mst_check_status(struct radeon_connector *radeon_connector)
-{
- struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
- int retry;
-
- if (dig_connector->is_mst) {
- u8 esi[16] = { 0 };
- int dret;
- int ret = 0;
- bool handled;
-
- dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI, esi, 8);
-go_again:
- if (dret == 8) {
- DRM_DEBUG_KMS("got esi %3ph\n", esi);
- ret = drm_dp_mst_hpd_irq(&radeon_connector->mst_mgr, esi, &handled);
-
- if (handled) {
- for (retry = 0; retry < 3; retry++) {
- int wret;
- wret = drm_dp_dpcd_write(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI + 1, &esi[1], 3);
- if (wret == 3)
- break;
- }
-
- dret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux,
- DP_SINK_COUNT_ESI, esi, 8);
- if (dret == 8) {
- DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
- goto go_again;
- }
- } else
- ret = 0;
-
- return ret;
- } else {
- DRM_DEBUG_KMS("failed to get ESI - device may have failed %d\n", ret);
- dig_connector->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&radeon_connector->mst_mgr,
- dig_connector->is_mst);
- /* send a hotplug event */
- }
- }
- return -EINVAL;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static int radeon_debugfs_mst_info_show(struct seq_file *m, void *unused)
-{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
- struct drm_device *dev = rdev->ddev;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- int i;
-
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- radeon_connector = to_radeon_connector(connector);
- dig_connector = radeon_connector->con_priv;
- if (radeon_connector->is_mst_connector)
- continue;
- if (!dig_connector->is_mst)
- continue;
- drm_dp_mst_dump_topology(m, &radeon_connector->mst_mgr);
-
- for (i = 0; i < radeon_connector->enabled_attribs; i++)
- seq_printf(m, "attrib %d: %d %d\n", i,
- radeon_connector->cur_stream_attribs[i].fe,
- radeon_connector->cur_stream_attribs[i].slots);
- }
- drm_modeset_unlock_all(dev);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_mst_info);
-#endif
-
-void radeon_mst_debugfs_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *root = rdev->ddev->primary->debugfs_root;
-
- debugfs_create_file("radeon_mst_info", 0444, root, rdev,
- &radeon_debugfs_mst_info_fops);
-
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 956c72b5aa33..6cbe1ab81aba 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -172,7 +172,6 @@ int radeon_use_pflipirq = 2;
int radeon_bapm = -1;
int radeon_backlight = -1;
int radeon_auxch = -1;
-int radeon_mst = 0;
int radeon_uvd = 1;
int radeon_vce = 1;
@@ -263,9 +262,6 @@ module_param_named(backlight, radeon_backlight, int, 0444);
MODULE_PARM_DESC(auxch, "Use native auxch experimental support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(auxch, radeon_auxch, int, 0444);
-MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
-module_param_named(mst, radeon_mst, int, 0444);
-
MODULE_PARM_DESC(uvd, "uvd enable/disable uvd support (1 = enable, 0 = disable)");
module_param_named(uvd, radeon_uvd, int, 0444);
@@ -516,14 +512,11 @@ long radeon_drm_ioctl(struct file *filp,
static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = radeon_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return radeon_drm_ioctl(filp, cmd, arg);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 46549d5179ee..fbc0a2182318 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -30,6 +30,8 @@
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "radeon.h"
#include "radeon_atombios.h"
#include "radeon_legacy_encoders.h"
@@ -167,7 +169,7 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
return;
if (radeon_backlight == 0) {
- return;
+ use_bl = false;
} else if (radeon_backlight == 1) {
use_bl = true;
} else if (radeon_backlight == -1) {
@@ -193,6 +195,13 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
else
radeon_legacy_backlight_init(radeon_encoder, connector);
}
+
+ /*
+ * If there is no native backlight device (which may happen even when
+ * use_bl==true) try registering an ACPI video backlight device instead.
+ */
+ if (!rdev->mode_info.bl_encoder)
+ acpi_video_register_backlight();
}
void
@@ -244,16 +253,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc;
-
- if (!radeon_connector->is_mst_connector)
- continue;
-
- mst_enc = radeon_encoder->enc_priv;
- if (mst_enc->connector == radeon_connector->mst_port)
- return connector;
- } else if (radeon_encoder->active_device & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -399,9 +399,6 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
- if (radeon_connector->is_mst_connector)
- return false;
-
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3907785d0798..da2173435edd 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -100,16 +100,8 @@ static void radeon_hotplug_work_func(struct work_struct *work)
static void radeon_dp_work_func(struct work_struct *work)
{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- dp_work);
- struct drm_device *dev = rdev->ddev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
-
- /* this should take a mutex */
- list_for_each_entry(connector, &mode_config->connector_list, head)
- radeon_connector_hotplug(connector);
}
+
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 1a66fb969ee7..0cd32c65456c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -33,6 +33,8 @@
#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
+#include <acpi/video.h>
+
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_legacy_encoders.h"
@@ -387,6 +389,11 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
return;
#endif
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping radeon legacy LVDS backlight registration\n");
+ return;
+ }
+
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index b34cffc162e2..9f5be416454f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -31,7 +31,6 @@
#define RADEON_MODE_H
#include <drm/display/drm_dp_helper.h>
-#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -436,24 +435,12 @@ struct radeon_encoder_atom_dig {
int panel_mode;
struct radeon_afmt *afmt;
struct r600_audio_pin *pin;
- int active_mst_links;
};
struct radeon_encoder_atom_dac {
enum radeon_tv_std tv_std;
};
-struct radeon_encoder_mst {
- int crtc;
- struct radeon_encoder *primary;
- struct radeon_connector *connector;
- struct drm_dp_mst_port *port;
- int pbn;
- int fe;
- bool fe_from_be;
- bool enc_active;
-};
-
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
@@ -475,8 +462,6 @@ struct radeon_encoder {
enum radeon_output_csc output_csc;
bool can_mst;
uint32_t offset;
- bool is_mst_encoder;
- /* front end for this mst encoder */
};
struct radeon_connector_atom_dig {
@@ -487,7 +472,6 @@ struct radeon_connector_atom_dig {
int dp_clock;
int dp_lane_count;
bool edp_on;
- bool is_mst;
};
struct radeon_gpio_rec {
@@ -531,11 +515,6 @@ enum radeon_connector_dither {
RADEON_FMT_DITHER_ENABLE = 1,
};
-struct stream_attribs {
- uint16_t fe;
- uint16_t slots;
-};
-
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -558,14 +537,6 @@ struct radeon_connector {
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
int pixelclock_for_modeset;
- bool is_mst_connector;
- struct radeon_connector *mst_port;
- struct drm_dp_mst_port *port;
- struct drm_dp_mst_topology_mgr mst_mgr;
-
- struct radeon_encoder *mst_encoder;
- struct stream_attribs cur_stream_attribs[6];
- int enabled_attribs;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
@@ -767,8 +738,6 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set, int fe);
-extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
- int fe);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
@@ -905,7 +874,6 @@ extern struct radeon_encoder_tv_dac *
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_lvds *
radeon_combios_get_lvds_info(struct radeon_encoder *encoder);
-extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_tv_dac *
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
extern struct radeon_encoder_primary_dac *
@@ -986,15 +954,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
-/* mst */
-int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
-void radeon_mst_debugfs_init(struct radeon_device *rdev);
-void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
-
-void radeon_setup_mst_connector(struct drm_device *dev);
-
int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c4a6802ca96..00c33b24d5d3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -202,9 +202,9 @@ int radeon_bo_create(struct radeon_device *rdev,
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
- r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, sg, resv,
- &radeon_ttm_bo_destroy);
+ r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type,
+ &bo->placement, page_align, !kernel, sg, resv,
+ &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e765abcb3b01..04c693ca419a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1899,7 +1899,7 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
* to false since we want to wait for vbl to avoid flicker.
*/
if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
- jiffies > rdev->pm.dynpm_action_timeout) {
+ time_after(jiffies, rdev->pm.dynpm_action_timeout)) {
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index f6e6a6d5d987..c959e8c6be7d 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -5,7 +5,7 @@ config DRM_RCAR_DU
depends on ARM || ARM64
depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
Choose this option if you have an R-Car chipset.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index e7275b5e7ec8..6f132325c8b7 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -14,10 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
-
-# 'remote-endpoint' is fixed up at run-time
-DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 621bbccb95d4..3619e1ddeb62 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -17,9 +17,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "rcar_cmm.h"
@@ -31,6 +29,7 @@
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_lvds.h"
+#include "rcar_mipi_dsi.h"
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
@@ -746,7 +745,19 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- rcar_lvds_clk_enable(bridge, mode->clock * 1000);
+ rcar_lvds_pclk_enable(bridge, mode->clock * 1000);
+ }
+
+ /*
+ * Similarly to LVDS, on V3U the dot clock is provided by the DSI
+ * encoder, and we need to enable the DSI clocks before enabling the CRTC.
+ */
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ rcar_mipi_dsi_pclk_enable(bridge, state);
}
rcar_du_crtc_start(rcrtc);
@@ -779,7 +790,20 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- rcar_lvds_clk_disable(bridge);
+ rcar_lvds_pclk_disable(bridge);
+ }
+
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ /*
+ * Disable the DSI clock output, see
+ * rcar_du_crtc_atomic_enable().
+ */
+
+ rcar_mipi_dsi_pclk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 70d85610d720..a2776f1d6f2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -20,15 +20,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
-#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Device Information
@@ -508,7 +506,8 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A779A0 has two MIPI DSI outputs. */
@@ -579,7 +578,7 @@ const char *rcar_du_output_name(enum rcar_du_output output)
* DRM operations
*/
-DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
+DEFINE_DRM_GEM_DMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index bfad7775d9a1..5cfa2bb7ad93 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -31,6 +31,7 @@ struct rcar_du_device;
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
@@ -91,6 +92,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
#define RCAR_DU_MAX_LVDS 2
+#define RCAR_DU_MAX_DSI 2
struct rcar_du_device {
struct device *dev;
@@ -107,6 +109,7 @@ struct rcar_du_device {
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
+ struct drm_bridge *dsi[RCAR_DU_MAX_DSI];
struct {
struct drm_property *colorkey;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 60d6be78323b..b1787be31e92 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,18 +9,13 @@
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_managed.h>
-#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
#include "rcar_lvds.h"
/* -----------------------------------------------------------------------------
@@ -84,6 +79,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
if (output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1)
rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
+
+ if (output == RCAR_DU_OUTPUT_DSI0 ||
+ output == RCAR_DU_OUTPUT_DSI1)
+ rcdu->dsi[output - RCAR_DU_OUTPUT_DSI0] = bridge;
}
/*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 761451ee5263..8c2719efda2a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -11,9 +11,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
@@ -328,12 +327,12 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
*/
static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
- .free = drm_gem_cma_object_free,
- .print_info = drm_gem_cma_object_print_info,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
- .mmap = drm_gem_cma_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .free = drm_gem_dma_object_free,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = drm_gem_dma_object_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
@@ -341,33 +340,33 @@ struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_gem_object *gem_obj;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
- return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ return drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
- /* Create a CMA GEM buffer. */
- cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
- if (!cma_obj)
+ /* Create a DMA GEM buffer. */
+ dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL);
+ if (!dma_obj)
return ERR_PTR(-ENOMEM);
- gem_obj = &cma_obj->base;
+ gem_obj = &dma_obj->base;
gem_obj->funcs = &rcar_du_gem_funcs;
drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
- cma_obj->map_noncoherent = false;
+ dma_obj->map_noncoherent = false;
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
- kfree(cma_obj);
+ kfree(dma_obj);
return ERR_PTR(ret);
}
- cma_obj->paddr = 0;
- cma_obj->sgt = sgt;
+ dma_obj->dma_addr = 0;
+ dma_obj->sgt = sgt;
return gem_obj;
}
@@ -390,7 +389,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = roundup(min_pitch, align);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
static struct drm_framebuffer *
@@ -406,8 +405,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
- dev_dbg(dev->dev, "unsupported pixel format %08x\n",
- mode_cmd->pixel_format);
+ dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
+ &mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 501d79367e3e..d759e0192181 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -12,11 +12,10 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_group.h"
@@ -342,7 +341,7 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
if (state->source == RCAR_DU_PLANE_MEMORY) {
struct drm_framebuffer *fb = state->state.fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int i;
if (state->format->planes == 2)
@@ -351,8 +350,8 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
pitch = fb->pitches[0] * 8 / state->format->bpp;
for (i = 0; i < state->format->planes; ++i) {
- gem = drm_fb_cma_get_gem_obj(fb, i);
- dma[i] = gem->paddr + fb->offsets[i];
+ gem = drm_fb_dma_get_gem_obj(fb, i);
+ dma[i] = gem->dma_addr + fb->offsets[i];
}
} else {
pitch = drm_rect_width(&state->state.src) >> 16;
@@ -507,8 +506,15 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
- rcar_du_plane_write(rgrp, index, PnMR,
- PnMR_SPIM_TP_OFF | state->format->pnmr);
+ struct rcar_du_device *rcdu = rgrp->dev;
+ u32 pnmr = state->format->pnmr | PnMR_SPIM_TP_OFF;
+
+ if (rcdu->info->features & RCAR_DU_FEATURE_NO_BLENDING) {
+ /* No blending. ALP and EOR are not supported. */
+ pnmr &= ~(PnMR_SPIM_ALP | PnMR_SPIM_EOR);
+ }
+
+ rcar_du_plane_write(rgrp, index, PnMR, pnmr);
rcar_du_plane_write(rgrp, index, PnDDCR4,
state->format->edf | PnDDCR4_CODE);
@@ -522,7 +528,6 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
* register to 0 to avoid this.
*/
- /* TODO: Check if alpha-blending should be disabled in PnMR. */
rcar_du_plane_write(rgrp, index, PnALPHAR, 0);
}
@@ -607,8 +612,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret < 0)
return ret;
@@ -620,8 +625,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
*format = rcar_du_format_info(state->fb->format->format);
if (*format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &state->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index dbc68cdabcff..e465aef41585 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -11,13 +11,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include <linux/bitops.h>
@@ -153,6 +152,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
+ u32 fourcc = state->format->fourcc;
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
@@ -169,9 +169,27 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- format = rcar_du_format_info(state->format->fourcc);
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+ }
+ }
+
+ format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
+ cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
+
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
@@ -184,7 +202,7 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
- struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
if (gem->sgt) {
@@ -213,7 +231,7 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
}
} else {
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
- gem->paddr, gem->base.size);
+ gem->dma_addr, gem->base.size);
if (ret)
goto fail;
}
@@ -437,6 +455,11 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_create_zpos_property(&plane->plane, i, 0,
num_planes - 1);
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
vsp->num_planes++;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 25f50a297c11..8cd37d7b8ae2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -166,8 +166,8 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
wb_state->format = rcar_du_format_info(fb->format->format);
if (wb_state->format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index d85aa4bc7f84..81a060c2fe3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -306,7 +306,7 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
* Clock - D3/E3 only
*/
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
int ret;
@@ -324,9 +324,9 @@ int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
return 0;
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge)
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -339,7 +339,7 @@ void rcar_lvds_clk_disable(struct drm_bridge *bridge)
clk_disable_unprepare(lvds->clocks.mod);
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
/* -----------------------------------------------------------------------------
* Bridge
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index 3097bf749bec..bee7033b60d6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -13,17 +13,17 @@
struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge);
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq);
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
-static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
- unsigned long freq)
+static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge,
+ unsigned long freq)
{
return -ENOSYS;
}
-static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { }
+static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { }
static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index 62f7eb84ab01..a7f2b7f66a17 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "rcar_mipi_dsi.h"
#include "rcar_mipi_dsi_regs.h"
struct rcar_mipi_dsi {
@@ -414,7 +415,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
/* Enable DOT clock */
vclkset = VCLKSET_CKEN;
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
if (dsi_format == 24)
vclkset |= VCLKSET_BPP_24;
@@ -429,7 +430,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
| VCLKSET_LANE(dsi->lanes - 1);
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
/* After setting VCLKSET register, enable VCLKEN */
rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
@@ -441,9 +442,21 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
{
+ /* Disable VCLKEN */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
+ /* Disable DOT clock */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+ /* CFGCLK disable */
+ rcar_mipi_dsi_clr(dsi, CFGCLKSET, CFGCLKSET_CKEN);
+
+ /* LPCLK disable */
+ rcar_mipi_dsi_clr(dsi, LPCLKSET, LPCLKSET_CKEN);
+
dev_dbg(dsi->dev, "DSI device is shutdown\n");
}
@@ -542,6 +555,34 @@ static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
return 0;
}
+static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
+{
+ u32 status;
+ int ret;
+
+ /* Disable transmission in video mode. */
+ rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_EN_VIDEO);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_ACT),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to disable video transmission\n");
+ return;
+ }
+
+ /* Assert video FIFO clear. */
+ rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_VFCLR);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_VFRDY),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to assert video FIFO clear\n");
+ return;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Bridge
*/
@@ -558,7 +599,22 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_start_video(dsi);
+}
+
+static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_stop_video(dsi);
+}
+
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
@@ -586,8 +642,6 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
if (ret < 0)
goto err_dsi_start_hs;
- rcar_mipi_dsi_start_video(dsi);
-
return;
err_dsi_start_hs:
@@ -595,15 +649,16 @@ err_dsi_start_hs:
err_dsi_startup:
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_enable);
-static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_shutdown(dsi);
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_disable);
static enum drm_mode_status
rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
new file mode 100644
index 000000000000..528a196e6edd
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car DSI Encoder
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ *
+ * Contact: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#ifndef __RCAR_MIPI_DSI_H__
+#define __RCAR_MIPI_DSI_H__
+
+struct drm_atomic_state;
+struct drm_bridge;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_MIPI_DSI)
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge);
+#else
+static inline void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
+{
+}
+#endif /* CONFIG_DRM_RCAR_MIPI_DSI */
+
+#endif /* __RCAR_MIPI_DSI_H__ */
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 53c2d9980d48..1bf3e2829cd0 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,7 +2,7 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 110e83aad9bb..bf6948125b84 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -179,6 +179,23 @@
#define RK3399_TXRX_SRC_SEL_ISP0 BIT(4)
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
+#define RK3568_GRF_VO_CON2 0x0368
+#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
+#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI0_TURNDISABLE BIT(2)
+#define RK3568_DSI0_FORCERXMODE BIT(0)
+
+/*
+ * Note these registers do not appear in the datasheet, they are
+ * however present in the BSP driver which is where these values
+ * come from. Name GRF_VO_CON3 is assumed.
+ */
+#define RK3568_GRF_VO_CON3 0x36c
+#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
+#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI1_TURNDISABLE BIT(2)
+#define RK3568_DSI1_FORCERXMODE BIT(0)
+
#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
enum {
@@ -735,8 +752,9 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi)
static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi,
int mux)
{
- regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
- mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+ if (dsi->cdata->lcdsel_grf_reg < 0)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
}
static int
@@ -963,6 +981,8 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev,
DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
goto out_pll_clk;
}
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dsi->encoder,
+ dev->of_node, 0, 0);
ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder.encoder);
if (ret) {
@@ -1612,6 +1632,30 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{ /* sentinel */ }
};
+static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
+ {
+ .reg = 0xfe060000,
+ .lcdsel_grf_reg = -1,
+ .lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
+ RK3568_DSI0_FORCETXSTOPMODE |
+ RK3568_DSI0_TURNDISABLE |
+ RK3568_DSI0_FORCERXMODE),
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xfe070000,
+ .lcdsel_grf_reg = -1,
+ .lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
+ RK3568_DSI1_FORCETXSTOPMODE |
+ RK3568_DSI1_TURNDISABLE |
+ RK3568_DSI1_FORCERXMODE),
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
{
.compatible = "rockchip,px30-mipi-dsi",
@@ -1622,6 +1666,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
}, {
.compatible = "rockchip,rk3399-mipi-dsi",
.data = &rk3399_chip_data,
+ }, {
+ .compatible = "rockchip,rk3568-mipi-dsi",
+ .data = &rk3568_chip_data,
},
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 13ed33e74457..813f9f8c8698 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 985584147da1..614e97aaac80 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -10,7 +10,7 @@
#include <drm/drm.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
@@ -279,7 +279,7 @@ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
.vmap = rockchip_gem_prime_vmap,
.vunmap = rockchip_gem_prime_vunmap,
.mmap = rockchip_drm_gem_object_mmap,
- .vm_ops = &drm_gem_cma_vm_ops,
+ .vm_ops = &drm_gem_dma_vm_ops,
};
static struct rockchip_gem_object *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ad3958b6f8bf..c356de5dd220 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -27,7 +28,6 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -68,6 +68,9 @@
#define VOP_REG_SET(vop, group, name, v) \
vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
+#define VOP_HAS_REG(vop, group, name) \
+ (!!(vop->data->group->name.mask))
+
#define VOP_INTR_SET_TYPE(vop, name, type, v) \
do { \
int i, reg = 0, mask = 0; \
@@ -185,12 +188,6 @@ struct vop {
struct vop_win win[];
};
-static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
-{
- writel(v, vop->regs + offset);
- vop->regsbak[offset >> 2] = v;
-}
-
static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
{
return readl(vop->regs + offset);
@@ -809,9 +806,9 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
const struct vop_win_data *win = vop_win->data;
int ret;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
if (!crtc || WARN_ON(!fb))
return 0;
@@ -1060,9 +1057,9 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
- DRM_PLANE_HELPER_NO_SCALING;
+ DRM_PLANE_NO_SCALING;
struct drm_crtc_state *crtc_state;
if (plane != new_plane_state->crtc->cursor)
@@ -1189,7 +1186,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
*
* Key points:
*
- * - DRM works in in kHz.
+ * - DRM works in kHz.
* - Clock framework works in Hz.
* - Rockchip's clock driver picks the clock rate that is the
* same _OR LOWER_ than the one requested.
@@ -1224,17 +1221,22 @@ static bool vop_dsp_lut_is_enabled(struct vop *vop)
return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en);
}
+static u32 vop_lut_buffer_index(struct vop *vop)
+{
+ return vop_read_reg(vop, 0, &vop->data->common->lut_buffer_index);
+}
+
static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc)
{
struct drm_color_lut *lut = crtc->state->gamma_lut->data;
- unsigned int i;
+ unsigned int i, bpc = ilog2(vop->data->lut_size);
for (i = 0; i < crtc->gamma_size; i++) {
u32 word;
- word = (drm_color_lut_extract(lut[i].red, 10) << 20) |
- (drm_color_lut_extract(lut[i].green, 10) << 10) |
- drm_color_lut_extract(lut[i].blue, 10);
+ word = (drm_color_lut_extract(lut[i].red, bpc) << (2 * bpc)) |
+ (drm_color_lut_extract(lut[i].green, bpc) << bpc) |
+ drm_color_lut_extract(lut[i].blue, bpc);
writel(word, vop->lut_regs + i * 4);
}
}
@@ -1244,38 +1246,66 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
{
struct drm_crtc_state *state = crtc->state;
unsigned int idle;
+ u32 lut_idx, old_idx;
int ret;
if (!vop->lut_regs)
return;
- /*
- * To disable gamma (gamma_lut is null) or to write
- * an update to the LUT, clear dsp_lut_en.
- */
- spin_lock(&vop->reg_lock);
- VOP_REG_SET(vop, common, dsp_lut_en, 0);
- vop_cfg_done(vop);
- spin_unlock(&vop->reg_lock);
- /*
- * In order to write the LUT to the internal memory,
- * we need to first make sure the dsp_lut_en bit is cleared.
- */
- ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
- idle, !idle, 5, 30 * 1000);
- if (ret) {
- DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
- return;
- }
+ if (!state->gamma_lut || !VOP_HAS_REG(vop, common, update_gamma_lut)) {
+ /*
+ * To disable gamma (gamma_lut is null) or to write
+ * an update to the LUT, clear dsp_lut_en.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, dsp_lut_en, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
- if (!state->gamma_lut)
- return;
+ /*
+ * In order to write the LUT to the internal memory,
+ * we need to first make sure the dsp_lut_en bit is cleared.
+ */
+ ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop,
+ idle, !idle, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n");
+ return;
+ }
+
+ if (!state->gamma_lut)
+ return;
+ } else {
+ /*
+ * On RK3399 the gamma LUT can updated without clearing dsp_lut_en,
+ * by setting update_gamma_lut then waiting for lut_buffer_index change
+ */
+ old_idx = vop_lut_buffer_index(vop);
+ }
spin_lock(&vop->reg_lock);
vop_crtc_write_gamma_lut(vop, crtc);
VOP_REG_SET(vop, common, dsp_lut_en, 1);
+ VOP_REG_SET(vop, common, update_gamma_lut, 1);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
+
+ if (VOP_HAS_REG(vop, common, update_gamma_lut)) {
+ ret = readx_poll_timeout(vop_lut_buffer_index, vop,
+ lut_idx, lut_idx != old_idx, 5, 30 * 1000);
+ if (ret) {
+ DRM_DEV_ERROR(vop->dev, "gamma LUT update timeout!\n");
+ return;
+ }
+
+ /*
+ * update_gamma_lut is auto cleared by HW, but write 0 to clear the bit
+ * in our backup of the regs.
+ */
+ spin_lock(&vop->reg_lock);
+ VOP_REG_SET(vop, common, update_gamma_lut, 0);
+ spin_unlock(&vop->reg_lock);
+ }
}
static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -1325,14 +1355,6 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
return;
}
- /*
- * If we have a GAMMA LUT in the state, then let's make sure
- * it's updated. We might be coming out of suspend,
- * which means the LUT internal memory needs to be re-written.
- */
- if (crtc->state->gamma_lut)
- vop_crtc_gamma_set(vop, crtc, old_state);
-
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
@@ -1423,6 +1445,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
VOP_REG_SET(vop, common, standby, 0);
mutex_unlock(&vop->vop_lock);
+
+ /*
+ * If we have a GAMMA LUT in the state, then let's make sure
+ * it's updated. We might be coming out of suspend,
+ * which means the LUT internal memory needs to be re-written.
+ */
+ if (crtc->state->gamma_lut)
+ vop_crtc_gamma_set(vop, crtc, old_state);
}
static bool vop_fs_irq_is_pending(struct vop *vop)
@@ -2148,8 +2178,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
- if (!vop_data->lut_size) {
- DRM_DEV_ERROR(dev, "no gamma LUT size defined\n");
+ if (vop_data->lut_size != 1024 && vop_data->lut_size != 256) {
+ DRM_DEV_ERROR(dev, "unsupported gamma LUT size %d\n", vop_data->lut_size);
return -EINVAL;
}
vop->lut_regs = devm_ioremap_resource(dev, res);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index ba88addc1a75..8502849833d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -113,6 +113,8 @@ struct vop_common {
struct vop_reg dither_down_en;
struct vop_reg dither_up;
struct vop_reg dsp_lut_en;
+ struct vop_reg update_gamma_lut;
+ struct vop_reg lut_buffer_index;
struct vop_reg gate_en;
struct vop_reg mmu_en;
struct vop_reg out_mode;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index e4631f515ba4..aac20be5ac08 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -29,7 +29,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -1439,11 +1438,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d03dd0402923..014f99e8928e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -875,6 +875,24 @@ static const struct vop_output rk3399_output = {
.mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
+static const struct vop_common rk3399_common = {
+ .standby = VOP_REG_SYNC(RK3399_SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 20),
+ .dither_down_sel = VOP_REG(RK3399_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3399_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 2),
+ .pre_dither_down = VOP_REG(RK3399_DSP_CTRL1, 0x1, 1),
+ .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
+ .dsp_lut_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 0),
+ .update_gamma_lut = VOP_REG(RK3399_DSP_CTRL1, 0x1, 7),
+ .lut_buffer_index = VOP_REG(RK3399_DBG_POST_REG1, 0x1, 1),
+ .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
+ .dsp_blank = VOP_REG(RK3399_DSP_CTRL0, 0x3, 18),
+ .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG_SYNC(RK3399_REG_CFG_DONE, 0x1, 0),
+};
+
static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = {
.y2r_coefficients = {
VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0),
@@ -957,7 +975,7 @@ static const struct vop_data rk3399_vop_big = {
.version = VOP_VERSION(3, 5),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3366_vop_intr,
- .common = &rk3288_common,
+ .common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.afbc = &rk3399_vop_afbc,
@@ -965,6 +983,7 @@ static const struct vop_data rk3399_vop_big = {
.win = rk3399_vop_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
+ .lut_size = 1024,
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
@@ -983,13 +1002,14 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = {
static const struct vop_data rk3399_vop_lit = {
.version = VOP_VERSION(3, 6),
.intr = &rk3366_vop_intr,
- .common = &rk3288_common,
+ .common = &rk3399_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
.misc = &rk3368_misc,
.win = rk3399_vop_lit_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
.win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data,
+ .lut_size = 256,
};
static const struct vop_win_data rk3228_vop_win_data[] = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index 0b3cd65ba5c1..406e981c75bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -628,6 +628,7 @@
#define RK3399_YUV2YUV_WIN 0x02c0
#define RK3399_YUV2YUV_POST 0x02c4
#define RK3399_AUTO_GATING_EN 0x02cc
+#define RK3399_DBG_POST_REG1 0x036c
#define RK3399_WIN0_CSC_COE 0x03a0
#define RK3399_WIN1_CSC_COE 0x03c0
#define RK3399_WIN2_CSC_COE 0x03e0
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 68317d3a7a27..e5a4ecde0063 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -198,7 +198,7 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
}
/**
- * drm_sched_dependency_optimized
+ * drm_sched_dependency_optimized - test if the dependency can be optimized
*
* @fence: the dependency fence
* @entity: the entity which depends on the above fence
@@ -592,7 +592,6 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
@@ -628,7 +627,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
struct drm_sched_entity *entity = job->entity;
BUG_ON(!entity);
-
+ drm_sched_entity_select_rq(entity);
sched = entity->rq->sched;
job->sched = sched;
@@ -994,6 +993,7 @@ static int drm_sched_main(void *param)
* used
* @score: optional score atomic shared with other schedulers
* @name: name used for debugging
+ * @dev: target &struct device
*
* Return 0 on success, otherwise error code.
*/
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
deleted file mode 100644
index 5ba5f9138c95..000000000000
--- a/drivers/gpu/drm/selftests/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
- test-drm_format.o test-drm_framebuffer.o \
- test-drm_damage_helper.o test-drm_dp_mst_helper.o \
- test-drm_rect.o
-
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o \
- test-drm_buddy.o
diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
deleted file mode 100644
index 455b756c4ae5..000000000000
--- a/drivers/gpu/drm/selftests/drm_buddy_selftests.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_buddy
- */
-selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
-selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
-selftest(buddy_alloc_range, igt_buddy_alloc_range)
-selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
-selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
-selftest(buddy_alloc_smoke, igt_buddy_alloc_smoke)
-selftest(buddy_alloc_pathological, igt_buddy_alloc_pathological)
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
deleted file mode 100644
index 29e367db6118..000000000000
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_mm
- */
-
-#define cmdline_test(test) selftest(test, test)
-
-cmdline_test(drm_cmdline_test_force_d_only)
-cmdline_test(drm_cmdline_test_force_D_only_dvi)
-cmdline_test(drm_cmdline_test_force_D_only_hdmi)
-cmdline_test(drm_cmdline_test_force_D_only_not_digital)
-cmdline_test(drm_cmdline_test_force_e_only)
-cmdline_test(drm_cmdline_test_margin_only)
-cmdline_test(drm_cmdline_test_interlace_only)
-cmdline_test(drm_cmdline_test_res)
-cmdline_test(drm_cmdline_test_res_missing_x)
-cmdline_test(drm_cmdline_test_res_missing_y)
-cmdline_test(drm_cmdline_test_res_bad_y)
-cmdline_test(drm_cmdline_test_res_missing_y_bpp)
-cmdline_test(drm_cmdline_test_res_vesa)
-cmdline_test(drm_cmdline_test_res_vesa_rblank)
-cmdline_test(drm_cmdline_test_res_rblank)
-cmdline_test(drm_cmdline_test_res_bpp)
-cmdline_test(drm_cmdline_test_res_bad_bpp)
-cmdline_test(drm_cmdline_test_res_refresh)
-cmdline_test(drm_cmdline_test_res_bad_refresh)
-cmdline_test(drm_cmdline_test_res_bpp_refresh)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_interlaced)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_margins)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_off)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_off)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_analog)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_force_on_digital)
-cmdline_test(drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on)
-cmdline_test(drm_cmdline_test_res_margins_force_on)
-cmdline_test(drm_cmdline_test_res_vesa_margins)
-cmdline_test(drm_cmdline_test_res_invalid_mode)
-cmdline_test(drm_cmdline_test_res_bpp_wrong_place_mode)
-cmdline_test(drm_cmdline_test_name)
-cmdline_test(drm_cmdline_test_name_bpp)
-cmdline_test(drm_cmdline_test_name_refresh)
-cmdline_test(drm_cmdline_test_name_bpp_refresh)
-cmdline_test(drm_cmdline_test_name_refresh_wrong_mode)
-cmdline_test(drm_cmdline_test_name_refresh_invalid_mode)
-cmdline_test(drm_cmdline_test_name_option)
-cmdline_test(drm_cmdline_test_name_bpp_option)
-cmdline_test(drm_cmdline_test_rotate_0)
-cmdline_test(drm_cmdline_test_rotate_90)
-cmdline_test(drm_cmdline_test_rotate_180)
-cmdline_test(drm_cmdline_test_rotate_270)
-cmdline_test(drm_cmdline_test_rotate_multiple)
-cmdline_test(drm_cmdline_test_rotate_invalid_val)
-cmdline_test(drm_cmdline_test_rotate_truncated)
-cmdline_test(drm_cmdline_test_hmirror)
-cmdline_test(drm_cmdline_test_vmirror)
-cmdline_test(drm_cmdline_test_margin_options)
-cmdline_test(drm_cmdline_test_multiple_options)
-cmdline_test(drm_cmdline_test_invalid_option)
-cmdline_test(drm_cmdline_test_bpp_extra_and_option)
-cmdline_test(drm_cmdline_test_extra_and_option)
-cmdline_test(drm_cmdline_test_freestanding_options)
-cmdline_test(drm_cmdline_test_freestanding_force_e_and_options)
-cmdline_test(drm_cmdline_test_panel_orientation)
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
deleted file mode 100644
index 8c87c964176b..000000000000
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_mm
- */
-selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
-selftest(init, igt_init)
-selftest(debug, igt_debug)
-selftest(reserve, igt_reserve)
-selftest(insert, igt_insert)
-selftest(replace, igt_replace)
-selftest(insert_range, igt_insert_range)
-selftest(align, igt_align)
-selftest(frag, igt_frag)
-selftest(align32, igt_align32)
-selftest(align64, igt_align64)
-selftest(evict, igt_evict)
-selftest(evict_range, igt_evict_range)
-selftest(bottomup, igt_bottomup)
-selftest(lowest, igt_lowest)
-selftest(topdown, igt_topdown)
-selftest(highest, igt_highest)
-selftest(color, igt_color)
-selftest(color_evict, igt_color_evict)
-selftest(color_evict_range, igt_color_evict_range)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
deleted file mode 100644
index 782e285ca383..000000000000
--- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_selftests_helper
- */
-selftest(drm_rect_clip_scaled_div_by_zero, igt_drm_rect_clip_scaled_div_by_zero)
-selftest(drm_rect_clip_scaled_not_clipped, igt_drm_rect_clip_scaled_not_clipped)
-selftest(drm_rect_clip_scaled_clipped, igt_drm_rect_clip_scaled_clipped)
-selftest(drm_rect_clip_scaled_signed_vs_unsigned, igt_drm_rect_clip_scaled_signed_vs_unsigned)
-selftest(check_plane_state, igt_check_plane_state)
-selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
-selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
-selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
-selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
-selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
-selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
-selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
-selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
-selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
-selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
-selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
-selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
-selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
-selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
-selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
-selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
-selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
-selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
-selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
-selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
-selftest(damage_iter_damage, igt_damage_iter_damage)
-selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
-selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
-selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
-selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
-selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode)
-selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode)
diff --git a/drivers/gpu/drm/selftests/drm_selftest.c b/drivers/gpu/drm/selftests/drm_selftest.c
deleted file mode 100644
index e29ed9faef5b..000000000000
--- a/drivers/gpu/drm/selftests/drm_selftest.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/compiler.h>
-
-#define selftest(name, func) __idx_##name,
-enum {
-#include TESTS
-};
-#undef selftest
-
-#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
-static struct drm_selftest {
- bool enabled;
- const char *name;
- int (*func)(void *);
-} selftests[] = {
-#include TESTS
-};
-#undef selftest
-
-/* Embed the line number into the parameter name so that we can order tests */
-#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
-#define selftest_0(n, func, id) \
-module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
-#define selftest(n, func) selftest_0(n, func, param(n))
-#include TESTS
-#undef selftest
-
-static void set_default_test_all(struct drm_selftest *st, unsigned long count)
-{
- unsigned long i;
-
- for (i = 0; i < count; i++)
- if (st[i].enabled)
- return;
-
- for (i = 0; i < count; i++)
- st[i].enabled = true;
-}
-
-static int run_selftests(struct drm_selftest *st,
- unsigned long count,
- void *data)
-{
- int err = 0;
-
- set_default_test_all(st, count);
-
- /* Tests are listed in natural order in drm_*_selftests.h */
- for (; count--; st++) {
- if (!st->enabled)
- continue;
-
- pr_debug("drm: Running %s\n", st->name);
- err = st->func(data);
- if (err)
- break;
- }
-
- if (WARN(err > 0 || err == -ENOTTY,
- "%s returned %d, conflicting with selftest's magic values!\n",
- st->name, err))
- err = -1;
-
- rcu_barrier();
- return err;
-}
-
-static int __maybe_unused
-__drm_subtests(const char *caller,
- const struct drm_subtest *st,
- int count,
- void *data)
-{
- int err;
-
- for (; count--; st++) {
- pr_debug("Running %s/%s\n", caller, st->name);
- err = st->func(data);
- if (err) {
- pr_err("%s: %s failed with error %d\n",
- caller, st->name, err);
- return err;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/drm_selftest.h b/drivers/gpu/drm/selftests/drm_selftest.h
deleted file mode 100644
index c784ec02ff53..000000000000
--- a/drivers/gpu/drm/selftests/drm_selftest.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __DRM_SELFTEST_H__
-#define __DRM_SELFTEST_H__
-
-struct drm_subtest {
- int (*func)(void *data);
- const char *name;
-};
-
-static int __drm_subtests(const char *caller,
- const struct drm_subtest *st,
- int count,
- void *data);
-#define drm_subtests(T, data) \
- __drm_subtests(__func__, T, ARRAY_SIZE(T), data)
-
-#define SUBTEST(x) { x, #x }
-
-#endif /* __DRM_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
deleted file mode 100644
index aca0c491040f..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_buddy.c
+++ /dev/null
@@ -1,994 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#define pr_fmt(fmt) "drm_buddy: " fmt
-
-#include <linux/module.h>
-#include <linux/prime_numbers.h>
-#include <linux/sched/signal.h>
-
-#include <drm/drm_buddy.h>
-
-#include "../lib/drm_random.h"
-
-#define TESTS "drm_buddy_selftests.h"
-#include "drm_selftest.h"
-
-#define IGT_TIMEOUT(name__) \
- unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
-
-static unsigned int random_seed;
-
-static inline u64 get_size(int order, u64 chunk_size)
-{
- return (1 << order) * chunk_size;
-}
-
-__printf(2, 3)
-static bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
-{
- va_list va;
-
- if (!signal_pending(current)) {
- cond_resched();
- if (time_before(jiffies, timeout))
- return false;
- }
-
- if (fmt) {
- va_start(va, fmt);
- vprintk(fmt, va);
- va_end(va);
- }
-
- return true;
-}
-
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static void __igt_dump_block(struct drm_buddy *mm,
- struct drm_buddy_block *block,
- bool buddy)
-{
- pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
- block->header,
- drm_buddy_block_state(block),
- drm_buddy_block_order(block),
- drm_buddy_block_offset(block),
- drm_buddy_block_size(mm, block),
- yesno(!block->parent),
- yesno(buddy));
-}
-
-static void igt_dump_block(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
-
- __igt_dump_block(mm, block, false);
-
- buddy = drm_get_buddy(block);
- if (buddy)
- __igt_dump_block(mm, buddy, true);
-}
-
-static int igt_check_block(struct drm_buddy *mm,
- struct drm_buddy_block *block)
-{
- struct drm_buddy_block *buddy;
- unsigned int block_state;
- u64 block_size;
- u64 offset;
- int err = 0;
-
- block_state = drm_buddy_block_state(block);
-
- if (block_state != DRM_BUDDY_ALLOCATED &&
- block_state != DRM_BUDDY_FREE &&
- block_state != DRM_BUDDY_SPLIT) {
- pr_err("block state mismatch\n");
- err = -EINVAL;
- }
-
- block_size = drm_buddy_block_size(mm, block);
- offset = drm_buddy_block_offset(block);
-
- if (block_size < mm->chunk_size) {
- pr_err("block size smaller than min size\n");
- err = -EINVAL;
- }
-
- if (!is_power_of_2(block_size)) {
- pr_err("block size not power of two\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(block_size, mm->chunk_size)) {
- pr_err("block size not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, mm->chunk_size)) {
- pr_err("block offset not aligned to min size\n");
- err = -EINVAL;
- }
-
- if (!IS_ALIGNED(offset, block_size)) {
- pr_err("block offset not aligned to block size\n");
- err = -EINVAL;
- }
-
- buddy = drm_get_buddy(block);
-
- if (!buddy && block->parent) {
- pr_err("buddy has gone fishing\n");
- err = -EINVAL;
- }
-
- if (buddy) {
- if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
- pr_err("buddy has wrong offset\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_size(mm, buddy) != block_size) {
- pr_err("buddy size mismatch\n");
- err = -EINVAL;
- }
-
- if (drm_buddy_block_state(buddy) == block_state &&
- block_state == DRM_BUDDY_FREE) {
- pr_err("block and its buddy are free\n");
- err = -EINVAL;
- }
- }
-
- return err;
-}
-
-static int igt_check_blocks(struct drm_buddy *mm,
- struct list_head *blocks,
- u64 expected_size,
- bool is_contiguous)
-{
- struct drm_buddy_block *block;
- struct drm_buddy_block *prev;
- u64 total;
- int err = 0;
-
- block = NULL;
- prev = NULL;
- total = 0;
-
- list_for_each_entry(block, blocks, link) {
- err = igt_check_block(mm, block);
-
- if (!drm_buddy_block_is_allocated(block)) {
- pr_err("block not allocated\n"),
- err = -EINVAL;
- }
-
- if (is_contiguous && prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(block);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("block offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- if (err)
- break;
-
- total += drm_buddy_block_size(mm, block);
- prev = block;
- }
-
- if (!err) {
- if (total != expected_size) {
- pr_err("size mismatch, expected=%llx, found=%llx\n",
- expected_size, total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev block, dump:\n");
- igt_dump_block(mm, prev);
- }
-
- pr_err("bad block, dump:\n");
- igt_dump_block(mm, block);
-
- return err;
-}
-
-static int igt_check_mm(struct drm_buddy *mm)
-{
- struct drm_buddy_block *root;
- struct drm_buddy_block *prev;
- unsigned int i;
- u64 total;
- int err = 0;
-
- if (!mm->n_roots) {
- pr_err("n_roots is zero\n");
- return -EINVAL;
- }
-
- if (mm->n_roots != hweight64(mm->size)) {
- pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
- mm->n_roots, hweight64(mm->size));
- return -EINVAL;
- }
-
- root = NULL;
- prev = NULL;
- total = 0;
-
- for (i = 0; i < mm->n_roots; ++i) {
- struct drm_buddy_block *block;
- unsigned int order;
-
- root = mm->roots[i];
- if (!root) {
- pr_err("root(%u) is NULL\n", i);
- err = -EINVAL;
- break;
- }
-
- err = igt_check_block(mm, root);
-
- if (!drm_buddy_block_is_free(root)) {
- pr_err("root not free\n");
- err = -EINVAL;
- }
-
- order = drm_buddy_block_order(root);
-
- if (!i) {
- if (order != mm->max_order) {
- pr_err("max order root missing\n");
- err = -EINVAL;
- }
- }
-
- if (prev) {
- u64 prev_block_size;
- u64 prev_offset;
- u64 offset;
-
- prev_offset = drm_buddy_block_offset(prev);
- prev_block_size = drm_buddy_block_size(mm, prev);
- offset = drm_buddy_block_offset(root);
-
- if (offset != (prev_offset + prev_block_size)) {
- pr_err("root offset mismatch\n");
- err = -EINVAL;
- }
- }
-
- block = list_first_entry_or_null(&mm->free_list[order],
- struct drm_buddy_block,
- link);
- if (block != root) {
- pr_err("root mismatch at order=%u\n", order);
- err = -EINVAL;
- }
-
- if (err)
- break;
-
- prev = root;
- total += drm_buddy_block_size(mm, root);
- }
-
- if (!err) {
- if (total != mm->size) {
- pr_err("expected mm size=%llx, found=%llx\n", mm->size,
- total);
- err = -EINVAL;
- }
- return err;
- }
-
- if (prev) {
- pr_err("prev root(%u), dump:\n", i - 1);
- igt_dump_block(mm, prev);
- }
-
- if (root) {
- pr_err("bad root(%u), dump:\n", i);
- igt_dump_block(mm, root);
- }
-
- return err;
-}
-
-static void igt_mm_config(u64 *size, u64 *chunk_size)
-{
- DRM_RND_STATE(prng, random_seed);
- u32 s, ms;
-
- /* Nothing fancy, just try to get an interesting bit pattern */
-
- prandom_seed_state(&prng, random_seed);
-
- /* Let size be a random number of pages up to 8 GB (2M pages) */
- s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
- /* Let the chunk size be a random power of 2 less than size */
- ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
- /* Round size down to the chunk size */
- s &= -ms;
-
- /* Convert from pages to bytes */
- *chunk_size = (u64)ms << 12;
- *size = (u64)s << 12;
-}
-
-static int igt_buddy_alloc_pathological(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block;
- const int max_order = 3;
- unsigned long flags = 0;
- int order, top, err;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- LIST_HEAD(holes);
- LIST_HEAD(tmp);
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left. Free the largest block, then whittle down again.
- * Eventually we will have a fully 50% fragmented mm.
- */
-
- mm_size = PAGE_SIZE << max_order;
- err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- BUG_ON(mm.max_order != max_order);
-
- for (top = max_order; top; top--) {
- /* Make room by freeing the largest allocated block */
- block = list_first_entry_or_null(&blocks, typeof(*block), link);
- if (block) {
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
- }
-
- for (order = top; order--; ) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
- min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
- order, top);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* There should be one final page for this sub-allocation */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM for hole\n");
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &holes);
-
- size = min_page_size = get_size(top, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
- top, max_order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- drm_buddy_free_list(&mm, &holes);
-
- /* Nothing larger than blocks of chunk_size now available */
- for (order = 1; order <= max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- if (err)
- err = 0;
-
-err:
- list_splice_tail(&holes, &blocks);
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_smoke(void *arg)
-{
- u64 mm_size, min_page_size, chunk_size, start = 0;
- unsigned long flags = 0;
- struct drm_buddy mm;
- int *order;
- int err, i;
-
- DRM_RND_STATE(prng, random_seed);
- IGT_TIMEOUT(end_time);
-
- igt_mm_config(&mm_size, &chunk_size);
-
- err = drm_buddy_init(&mm, mm_size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- order = drm_random_order(mm.max_order + 1, &prng);
- if (!order) {
- err = -ENOMEM;
- goto out_fini;
- }
-
- for (i = 0; i <= mm.max_order; ++i) {
- struct drm_buddy_block *block;
- int max_order = order[i];
- bool timeout = false;
- LIST_HEAD(blocks);
- u64 total, size;
- LIST_HEAD(tmp);
- int order;
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort\n");
- break;
- }
-
- order = max_order;
- total = 0;
-
- do {
-retry:
- size = min_page_size = get_size(order, chunk_size);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size,
- min_page_size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- } else {
- if (order--) {
- err = 0;
- goto retry;
- }
-
- pr_err("buddy_alloc with order=%d failed(%d)\n",
- order, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- list_move_tail(&block->link, &blocks);
-
- if (drm_buddy_block_order(block) != order) {
- pr_err("buddy_alloc order mismatch\n");
- err = -EINVAL;
- break;
- }
-
- total += drm_buddy_block_size(&mm, block);
-
- if (__igt_timeout(end_time, NULL)) {
- timeout = true;
- break;
- }
- } while (total < mm.size);
-
- if (!err)
- err = igt_check_blocks(&mm, &blocks, total, false);
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
- if (err || timeout)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- kfree(order);
-out_fini:
- drm_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_pessimistic(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block, *bn;
- const unsigned int max_order = 16;
- unsigned long flags = 0;
- struct drm_buddy mm;
- unsigned int order;
- LIST_HEAD(blocks);
- LIST_HEAD(tmp);
- int err;
-
- /*
- * Create a pot-sized mm, then allocate one of each possible
- * order within. This should leave the mm with exactly one
- * page left.
- */
-
- mm_size = PAGE_SIZE << max_order;
- err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
- BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order < max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* And now the last remaining block available */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
-
- /* Should be completely full! */
- for (order = max_order; order--; ) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
- order);
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- }
- }
-
- block = list_last_entry(&blocks, typeof(*block), link);
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
- /* As we free in increasing size, we make available larger blocks */
- order = 1;
- list_for_each_entry_safe(block, bn, &blocks, link) {
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
- order++;
- }
-
- /* To confirm, now the whole mm should be available */
- size = min_page_size = get_size(max_order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
- max_order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_del(&block->link);
- drm_buddy_free_block(&mm, block);
-
-err:
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_optimistic(void *arg)
-{
- u64 mm_size, size, min_page_size, start = 0;
- struct drm_buddy_block *block;
- unsigned long flags = 0;
- const int max_order = 16;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- LIST_HEAD(tmp);
- int order, err;
-
- /*
- * Create a mm with one block of each order available, and
- * try to allocate them all.
- */
-
- mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
- err = drm_buddy_init(&mm,
- mm_size,
- PAGE_SIZE);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- BUG_ON(mm.max_order != max_order);
-
- for (order = 0; order <= max_order; order++) {
- size = min_page_size = get_size(order, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (err) {
- pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
- order);
- goto err;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- }
-
- /* Should be completely full! */
- size = min_page_size = get_size(0, PAGE_SIZE);
- err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
- if (!err) {
- pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_blocks has no blocks\n");
- err = -EINVAL;
- goto err;
- }
-
- list_move_tail(&block->link, &blocks);
- err = -EINVAL;
- goto err;
- } else {
- err = 0;
- }
-
-err:
- drm_buddy_free_list(&mm, &blocks);
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_buddy_alloc_range(void *arg)
-{
- unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
- u64 offset, size, rem, chunk_size, end;
- unsigned long page_num;
- struct drm_buddy mm;
- LIST_HEAD(blocks);
- int err;
-
- igt_mm_config(&size, &chunk_size);
-
- err = drm_buddy_init(&mm, size, chunk_size);
- if (err) {
- pr_err("buddy_init failed(%d)\n", err);
- return err;
- }
-
- err = igt_check_mm(&mm);
- if (err) {
- pr_err("pre-mm check failed, abort, abort, abort!\n");
- goto err_fini;
- }
-
- rem = mm.size;
- offset = 0;
-
- for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
- struct drm_buddy_block *block;
- LIST_HEAD(tmp);
-
- size = min(page_num * mm.chunk_size, rem);
- end = offset + size;
-
- err = drm_buddy_alloc_blocks(&mm, offset, end, size, mm.chunk_size, &tmp, flags);
- if (err) {
- if (err == -ENOMEM) {
- pr_info("alloc_range hit -ENOMEM with size=%llx\n",
- size);
- } else {
- pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
- offset, size, err);
- }
-
- break;
- }
-
- block = list_first_entry_or_null(&tmp,
- struct drm_buddy_block,
- link);
- if (!block) {
- pr_err("alloc_range has no blocks\n");
- err = -EINVAL;
- break;
- }
-
- if (drm_buddy_block_offset(block) != offset) {
- pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
- drm_buddy_block_offset(block), offset);
- err = -EINVAL;
- }
-
- if (!err)
- err = igt_check_blocks(&mm, &tmp, size, true);
-
- list_splice_tail(&tmp, &blocks);
-
- if (err)
- break;
-
- offset += size;
-
- rem -= size;
- if (!rem)
- break;
-
- cond_resched();
- }
-
- if (err == -ENOMEM)
- err = 0;
-
- drm_buddy_free_list(&mm, &blocks);
-
- if (!err) {
- err = igt_check_mm(&mm);
- if (err)
- pr_err("post-mm check failed\n");
- }
-
-err_fini:
- drm_buddy_fini(&mm);
-
- return err;
-}
-
-static int igt_buddy_alloc_limit(void *arg)
-{
- u64 size = U64_MAX, start = 0;
- struct drm_buddy_block *block;
- unsigned long flags = 0;
- LIST_HEAD(allocated);
- struct drm_buddy mm;
- int err;
-
- err = drm_buddy_init(&mm, size, PAGE_SIZE);
- if (err)
- return err;
-
- if (mm.max_order != DRM_BUDDY_MAX_ORDER) {
- pr_err("mm.max_order(%d) != %d\n",
- mm.max_order, DRM_BUDDY_MAX_ORDER);
- err = -EINVAL;
- goto out_fini;
- }
-
- size = mm.chunk_size << mm.max_order;
- err = drm_buddy_alloc_blocks(&mm, start, size, size,
- PAGE_SIZE, &allocated, flags);
-
- if (unlikely(err))
- goto out_free;
-
- block = list_first_entry_or_null(&allocated,
- struct drm_buddy_block,
- link);
-
- if (!block) {
- err = -EINVAL;
- goto out_fini;
- }
-
- if (drm_buddy_block_order(block) != mm.max_order) {
- pr_err("block order(%d) != %d\n",
- drm_buddy_block_order(block), mm.max_order);
- err = -EINVAL;
- goto out_free;
- }
-
- if (drm_buddy_block_size(&mm, block) !=
- BIT_ULL(mm.max_order) * PAGE_SIZE) {
- pr_err("block size(%llu) != %llu\n",
- drm_buddy_block_size(&mm, block),
- BIT_ULL(mm.max_order) * PAGE_SIZE);
- err = -EINVAL;
- goto out_free;
- }
-
-out_free:
- drm_buddy_free_list(&mm, &allocated);
-out_fini:
- drm_buddy_fini(&mm);
- return err;
-}
-
-static int igt_sanitycheck(void *ignored)
-{
- pr_info("%s - ok!\n", __func__);
- return 0;
-}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_buddy_init(void)
-{
- int err;
-
- while (!random_seed)
- random_seed = get_random_int();
-
- pr_info("Testing DRM buddy manager (struct drm_buddy), with random_seed=0x%x\n",
- random_seed);
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_buddy_exit(void)
-{
-}
-
-module_init(test_drm_buddy_init);
-module_exit(test_drm_buddy_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
deleted file mode 100644
index d96cd890def6..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ /dev/null
@@ -1,1141 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 Bootlin
- */
-
-#define pr_fmt(fmt) "drm_cmdline: " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <drm/drm_connector.h>
-#include <drm/drm_modes.h>
-
-#define TESTS "drm_cmdline_selftests.h"
-#include "drm_selftest.h"
-#include "test-drm_modeset_common.h"
-
-static const struct drm_connector no_connector = {};
-
-static int drm_cmdline_test_force_e_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static const struct drm_connector connector_hdmi = {
- .connector_type = DRM_MODE_CONNECTOR_HDMIB,
-};
-
-static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &connector_hdmi,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static const struct drm_connector connector_dvi = {
- .connector_type = DRM_MODE_CONNECTOR_DVII,
-};
-
-static int drm_cmdline_test_force_D_only_dvi(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
- &connector_dvi,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static int drm_cmdline_test_force_d_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_OFF);
-
- return 0;
-}
-
-static int drm_cmdline_test_margin_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("m",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_interlace_only(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("i",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_x(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("x480",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_y(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024x",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_y(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024xtest",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_missing_y_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("1024x-24",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480M",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa_rblank(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480MR",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(!mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_rblank(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480R",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(!mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-test",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480@60",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bad_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480@refresh",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_interlaced(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60i",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(!mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_margins(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60m",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_off(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60d",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_OFF);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_off(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480-24@60de",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60e",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_analog(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_force_on_digital(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
- static const struct drm_connector connector = {
- .connector_type = DRM_MODE_CONNECTOR_DVII,
- };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60D",
- &connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24@60ime",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(!mode.refresh_specified);
- FAIL_ON(mode.refresh != 60);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(!mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_margins_force_on(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480me",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_vesa_margins(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480Mm",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(!mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(!mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_res_invalid_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480f",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_res_bpp_wrong_place_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480e-24",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC",
- &no_connector,
- &mode));
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24",
- &no_connector,
- &mode));
- FAIL_ON(strcmp(mode.name, "NTSC"));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC-24@60",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh_wrong_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60m",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_refresh_invalid_mode(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("NTSC@60f",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_name_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- return 0;
-}
-
-static int drm_cmdline_test_name_bpp_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("NTSC-24,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(strcmp(mode.name, "NTSC"));
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_0(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=0",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_0);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_90(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=90",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_90);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_180(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_270(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_270);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_multiple(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_invalid_val(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=42",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_rotate_truncated(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_hmirror(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_x",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_vmirror(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,reflect_y",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_margin_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_multiple_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480,rotate=270,reflect_x",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_invalid_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,test=42",
- &no_connector,
- &mode));
-
- return 0;
-}
-
-static int drm_cmdline_test_bpp_extra_and_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480-24e,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
-
- FAIL_ON(!mode.bpp_specified);
- FAIL_ON(mode.bpp != 24);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_extra_and_option(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("720x480e,rotate=180",
- &no_connector,
- &mode));
- FAIL_ON(!mode.specified);
- FAIL_ON(mode.xres != 720);
- FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_ROTATE_180);
-
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_freestanding_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-static int drm_cmdline_test_freestanding_force_e_and_options(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.tv_margins.right != 14);
- FAIL_ON(mode.tv_margins.left != 24);
- FAIL_ON(mode.tv_margins.bottom != 36);
- FAIL_ON(mode.tv_margins.top != 42);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_ON);
-
- return 0;
-}
-
-static int drm_cmdline_test_panel_orientation(void *ignored)
-{
- struct drm_cmdline_mode mode = { };
-
- FAIL_ON(!drm_mode_parse_command_line_for_connector("panel_orientation=upside_down",
- &no_connector,
- &mode));
- FAIL_ON(mode.specified);
- FAIL_ON(mode.refresh_specified);
- FAIL_ON(mode.bpp_specified);
-
- FAIL_ON(mode.panel_orientation != DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
-
- FAIL_ON(mode.rb);
- FAIL_ON(mode.cvt);
- FAIL_ON(mode.interlace);
- FAIL_ON(mode.margins);
- FAIL_ON(mode.force != DRM_FORCE_UNSPECIFIED);
-
- return 0;
-}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_cmdline_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-module_init(test_drm_cmdline_init);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
deleted file mode 100644
index 816e1464a98f..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ /dev/null
@@ -1,668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test case for drm_damage_helper functions
- */
-
-#define pr_fmt(fmt) "drm_damage_helper: " fmt
-
-#include <drm/drm_damage_helper.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane.h>
-#include <drm/drm_drv.h>
-
-#include "test-drm_modeset_common.h"
-
-struct drm_driver mock_driver;
-static struct drm_device mock_device;
-static struct drm_object_properties mock_obj_props;
-static struct drm_plane mock_plane;
-static struct drm_property mock_prop;
-
-static void mock_setup(struct drm_plane_state *state)
-{
- static bool setup_done = false;
-
- state->plane = &mock_plane;
-
- if (setup_done)
- return;
-
- /* just enough so that drm_plane_enable_fb_damage_clips() works */
- mock_device.driver = &mock_driver;
- mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
- mock_plane.dev = &mock_device;
- mock_obj_props.count = 0;
- mock_plane.base.properties = &mock_obj_props;
- mock_prop.base.id = 1; /* 0 is an invalid id */
- mock_prop.dev = &mock_device;
-
- drm_plane_enable_fb_damage_clips(&mock_plane);
-}
-
-static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
- int y2)
-{
- state->src.x1 = x1;
- state->src.y1 = y1;
- state->src.x2 = x2;
- state->src.y2 = y2;
-}
-
-static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
- int y2)
-{
- r->x1 = x1;
- r->y1 = y1;
- r->x2 = x2;
- r->y2 = y2;
-}
-
-static void set_damage_blob(struct drm_property_blob *damage_blob,
- struct drm_mode_rect *r, uint32_t size)
-{
- damage_blob->length = size;
- damage_blob->data = r;
-}
-
-static void set_plane_damage(struct drm_plane_state *state,
- struct drm_property_blob *damage_blob)
-{
- state->fb_damage_clips = damage_blob;
-}
-
-static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
- int x1, int y1, int x2, int y2)
-{
- /*
- * Round down x1/y1 and round up x2/y2. This is because damage is not in
- * 16.16 fixed point so to catch all pixels.
- */
- int src_x1 = state->src.x1 >> 16;
- int src_y1 = state->src.y1 >> 16;
- int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
- int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
-
- if (x1 >= x2 || y1 >= y2) {
- pr_err("Cannot have damage clip with no dimension.\n");
- return false;
- }
-
- if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
- pr_err("Damage cannot be outside rounded plane src.\n");
- return false;
- }
-
- if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
- pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
- return false;
- }
-
- return true;
-}
-
-const struct drm_framebuffer fb = {
- .width = 2048,
- .height = 2048
-};
-
-/* common mocked structs many tests need */
-#define MOCK_VARIABLES() \
- struct drm_plane_state old_state; \
- struct drm_plane_state state = { \
- .crtc = ZERO_SIZE_PTR, \
- .fb = (struct drm_framebuffer *) &fb, \
- .visible = true, \
- }; \
- mock_setup(&old_state); \
- mock_setup(&state);
-
-int igt_damage_iter_no_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src same as fb size. */
- set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
- set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src moved since old plane state. */
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 10 << 16, 10 << 16,
- (10 + 1024) << 16, (10 + 768) << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part and it moved since old plane state. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_not_visible(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.visible = false;
-
- mock_setup(&old_state);
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_no_crtc(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.crtc = NULL;
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_no_damage_no_fb(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_plane_state old_state;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- struct drm_plane_state state = {
- .crtc = ZERO_SIZE_PTR,
- .fb = 0,
- };
-
- mock_setup(&old_state);
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_simple_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage set to plane src */
- set_damage_clip(&damage, 0, 0, 1024, 768);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- set_damage_clip(&damage, 256, 192, 768, 576);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_intersect_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 256, 192, 1360, 768);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage clipped to src.");
- FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_outside_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* Damage clip outside plane src */
- set_damage_clip(&damage, 1360, 1360, 1380, 1380);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_damage_clip(&damage, 10, 10, 256, 330);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 10, 1, 1360, 330);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src has fractional part. */
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage clip outside plane src */
- set_damage_clip(&damage, 1360, 1360, 1380, 1380);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should have no damage.");
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src moved since old plane state. */
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 10 << 16, 10 << 16,
- (10 + 1024) << 16, (10 + 768) << 16);
- set_damage_clip(&damage, 20, 30, 256, 256);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
-
- return 0;
-}
-
-int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage;
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- /* Plane src with fractional part moved since old plane state. */
- set_plane_src(&old_state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* Damage intersect with plane src. */
- set_damage_clip(&damage, 20, 30, 1360, 256);
- set_damage_blob(&damage_blob, &damage, sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
-
- return 0;
-}
-
-int igt_damage_iter_damage(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* 2 damage clips. */
- set_damage_clip(&damage[0], 20, 30, 200, 180);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (num_hits == 0)
- FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
- if (num_hits == 1)
- FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
- num_hits++;
- }
-
- FAIL(num_hits != 2, "Should return damage when set.");
-
- return 0;
-}
-
-int igt_damage_iter_damage_one_intersect(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- /* 2 damage clips, one intersect plane src. */
- set_damage_clip(&damage[0], 20, 30, 200, 180);
- set_damage_clip(&damage[1], 2, 2, 1360, 1360);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (num_hits == 0)
- FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
- if (num_hits == 1)
- FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
- num_hits++;
- }
-
- FAIL(num_hits != 2, "Should return damage when set.");
-
- return 0;
-}
-
-int igt_damage_iter_damage_one_outside(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
- set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return damage when set.");
- FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
-
- return 0;
-}
-
-int igt_damage_iter_damage_src_moved(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 1, "Should return round off plane src as damage.");
- FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
-
- return 0;
-}
-
-int igt_damage_iter_damage_not_visible(void *ignored)
-{
- struct drm_atomic_helper_damage_iter iter;
- struct drm_property_blob damage_blob;
- struct drm_mode_rect damage[2];
- struct drm_rect clip;
- uint32_t num_hits = 0;
-
- MOCK_VARIABLES();
-
- state.visible = false;
-
- set_plane_src(&old_state, 0x40002, 0x40002,
- 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
- set_plane_src(&state, 0x3fffe, 0x3fffe,
- 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
- /* 2 damage clips, one outside plane src. */
- set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
- set_damage_clip(&damage[1], 240, 200, 280, 250);
- set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
- set_plane_damage(&state, &damage_blob);
- drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
- drm_atomic_for_each_plane_damage(&iter, &clip)
- num_hits++;
-
- FAIL(num_hits != 0, "Should not return any damage.");
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/test-drm_format.c b/drivers/gpu/drm/selftests/test-drm_format.c
deleted file mode 100644
index c5e212afa27a..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_format.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test cases for the drm_format functions
- */
-
-#define pr_fmt(fmt) "drm_format: " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <drm/drm_fourcc.h>
-
-#include "test-drm_modeset_common.h"
-
-int igt_check_drm_format_block_width(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_block_width(info, 0) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
-
- /* Test 1 plane format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 1);
- FAIL_ON(drm_format_info_block_width(info, 2) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test 3 planes format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 1);
- FAIL_ON(drm_format_info_block_width(info, 1) != 1);
- FAIL_ON(drm_format_info_block_width(info, 2) != 1);
- FAIL_ON(drm_format_info_block_width(info, 3) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- /* Test a tiled format */
- info = drm_format_info(DRM_FORMAT_X0L0);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_width(info, 0) != 2);
- FAIL_ON(drm_format_info_block_width(info, 1) != 0);
- FAIL_ON(drm_format_info_block_width(info, -1) != 0);
-
- return 0;
-}
-
-int igt_check_drm_format_block_height(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_block_height(info, 0) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
-
- /* Test 1 plane format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 1);
- FAIL_ON(drm_format_info_block_height(info, 2) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test 3 planes format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 1);
- FAIL_ON(drm_format_info_block_height(info, 1) != 1);
- FAIL_ON(drm_format_info_block_height(info, 2) != 1);
- FAIL_ON(drm_format_info_block_height(info, 3) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- /* Test a tiled format */
- info = drm_format_info(DRM_FORMAT_X0L0);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_block_height(info, 0) != 2);
- FAIL_ON(drm_format_info_block_height(info, 1) != 0);
- FAIL_ON(drm_format_info_block_height(info, -1) != 0);
-
- return 0;
-}
-
-int igt_check_drm_format_min_pitch(void *ignored)
-{
- const struct drm_format_info *info = NULL;
-
- /* Test invalid arguments */
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- /* Test 1 plane 8 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_RGB332);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1));
-
- /* Test 1 plane 16 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_XRGB4444);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1) * 2);
-
- /* Test 1 plane 24 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_RGB888);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 3);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 6);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 3072);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 5760);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 12288);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2013);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 3);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 3);
-
- /* Test 1 plane 32 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_ABGR8888);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 8);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 2560);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 7680);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 16384);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2684);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 4);
-
- /* Test 2 planes format */
- info = drm_format_info(DRM_FORMAT_NV12);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 672);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
- (uint64_t)(UINT_MAX - 1));
- FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1));
-
- /* Test 3 planes 8 bits per pixel format */
- info = drm_format_info(DRM_FORMAT_YUV422);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 3, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 1) != 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 2) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 320);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 320) != 320);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 512);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 512) != 512);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 960);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 960) != 960);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 2048) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 336);
- FAIL_ON(drm_format_info_min_pitch(info, 2, 336) != 336);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX);
- FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX / 2 + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1) !=
- (uint64_t)UINT_MAX / 2 + 1);
- FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
- FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
- FAIL_ON(drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2) !=
- (uint64_t)(UINT_MAX - 1) / 2);
-
- /* Test tiled format */
- info = drm_format_info(DRM_FORMAT_X0L2);
- FAIL_ON(!info);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
- FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
-
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
- FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
- (uint64_t)UINT_MAX * 2);
- FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
- (uint64_t)(UINT_MAX - 1) * 2);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.c b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
deleted file mode 100644
index 2a7f93774006..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common file for modeset selftests.
- */
-
-#include <linux/module.h>
-
-#include "test-drm_modeset_common.h"
-
-#define TESTS "drm_modeset_selftests.h"
-#include "drm_selftest.h"
-
-#include "drm_selftest.c"
-
-static int __init test_drm_modeset_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_modeset_exit(void)
-{
-}
-
-module_init(test_drm_modeset_init);
-module_exit(test_drm_modeset_exit);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
deleted file mode 100644
index cfb51d8da2bc..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __TEST_DRM_MODESET_COMMON_H__
-#define __TEST_DRM_MODESET_COMMON_H__
-
-#include <linux/errno.h>
-#include <linux/printk.h>
-
-#define FAIL(test, msg, ...) \
- do { \
- if (test) { \
- pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
- return -EINVAL; \
- } \
- } while (0)
-
-#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
-
-int igt_drm_rect_clip_scaled_div_by_zero(void *ignored);
-int igt_drm_rect_clip_scaled_not_clipped(void *ignored);
-int igt_drm_rect_clip_scaled_clipped(void *ignored);
-int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored);
-int igt_check_plane_state(void *ignored);
-int igt_check_drm_format_block_width(void *ignored);
-int igt_check_drm_format_block_height(void *ignored);
-int igt_check_drm_format_min_pitch(void *ignored);
-int igt_check_drm_framebuffer_create(void *ignored);
-int igt_damage_iter_no_damage(void *ignored);
-int igt_damage_iter_no_damage_fractional_src(void *ignored);
-int igt_damage_iter_no_damage_src_moved(void *ignored);
-int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
-int igt_damage_iter_no_damage_not_visible(void *ignored);
-int igt_damage_iter_no_damage_no_crtc(void *ignored);
-int igt_damage_iter_no_damage_no_fb(void *ignored);
-int igt_damage_iter_simple_damage(void *ignored);
-int igt_damage_iter_single_damage(void *ignored);
-int igt_damage_iter_single_damage_intersect_src(void *ignored);
-int igt_damage_iter_single_damage_outside_src(void *ignored);
-int igt_damage_iter_single_damage_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
-int igt_damage_iter_single_damage_src_moved(void *ignored);
-int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
-int igt_damage_iter_damage(void *ignored);
-int igt_damage_iter_damage_one_intersect(void *ignored);
-int igt_damage_iter_damage_one_outside(void *ignored);
-int igt_damage_iter_damage_src_moved(void *ignored);
-int igt_damage_iter_damage_not_visible(void *ignored);
-int igt_dp_mst_calc_pbn_mode(void *ignored);
-int igt_dp_mst_sideband_msg_req_decode(void *ignored);
-
-#endif
diff --git a/drivers/gpu/drm/selftests/test-drm_rect.c b/drivers/gpu/drm/selftests/test-drm_rect.c
deleted file mode 100644
index 3a5ff38321f4..000000000000
--- a/drivers/gpu/drm/selftests/test-drm_rect.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test cases for the drm_rect functions
- */
-
-#define pr_fmt(fmt) "drm_rect: " fmt
-
-#include <linux/limits.h>
-
-#include <drm/drm_rect.h>
-
-#include "test-drm_modeset_common.h"
-
-int igt_drm_rect_clip_scaled_div_by_zero(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /*
- * Make sure we don't divide by zero when dst
- * width/height is zero and dst and clip do not intersect.
- */
- drm_rect_init(&src, 0, 0, 0, 0);
- drm_rect_init(&dst, 0, 0, 0, 0);
- drm_rect_init(&clip, 1, 1, 1, 1);
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
- FAIL(visible, "Destination not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- drm_rect_init(&src, 0, 0, 0, 0);
- drm_rect_init(&dst, 3, 3, 0, 0);
- drm_rect_init(&clip, 1, 1, 1, 1);
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
- FAIL(visible, "Destination not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_not_clipped(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /* 1:1 scaling */
- drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
- drm_rect_init(&dst, 0, 0, 1, 1);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 1, 1);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
- src.y1 != 0 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling */
- drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 2 ||
- dst.y1 != 0 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_clipped(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /* 1:1 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:1 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 1, 1, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
- src.y1 != 1 << 16 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 1 || dst.x2 != 2 ||
- dst.y1 != 1 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 0, 0, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 2 << 16 ||
- src.y1 != 0 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 1 ||
- dst.y1 != 0 || dst.y2 != 1,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 2:1 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 1, 1, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
- src.y1 != 2 << 16 || src.y2 != 4 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 1 || dst.x2 != 2 ||
- dst.y1 != 1 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling top/left clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 4, 4);
- drm_rect_init(&clip, 0, 0, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 0 || src.x2 != 1 << 16 ||
- src.y1 != 0 || src.y2 != 1 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 0 || dst.x2 != 2 ||
- dst.y1 != 0 || dst.y2 != 2,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- /* 1:2 scaling bottom/right clip */
- drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
- drm_rect_init(&dst, 0, 0, 4, 4);
- drm_rect_init(&clip, 2, 2, 2, 2);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
- src.y1 != 1 << 16 || src.y2 != 2 << 16,
- "Source badly clipped\n");
- FAIL(dst.x1 != 2 || dst.x2 != 4 ||
- dst.y1 != 2 || dst.y2 != 4,
- "Destination badly clipped\n");
- FAIL(!visible, "Destination should be visible\n");
- FAIL(!drm_rect_visible(&src), "Source should be visible\n");
-
- return 0;
-}
-
-int igt_drm_rect_clip_scaled_signed_vs_unsigned(void *ignored)
-{
- struct drm_rect src, dst, clip;
- bool visible;
-
- /*
- * 'clip.x2 - dst.x1 >= dst width' could result a negative
- * src rectangle width which is no longer expected by the
- * code as it's using unsigned types. This could lead to
- * the clipped source rectangle appering visible when it
- * should have been fully clipped. Make sure both rectangles
- * end up invisible.
- */
- drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
- drm_rect_init(&dst, 0, 0, 2, 2);
- drm_rect_init(&clip, 3, 3, 1, 1);
-
- visible = drm_rect_clip_scaled(&src, &dst, &clip);
-
- FAIL(visible, "Destination should not be visible\n");
- FAIL(drm_rect_visible(&src), "Source should not be visible\n");
-
- return 0;
-}
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 288b838a904a..4ec5dc74a6b0 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -5,7 +5,7 @@ config DRM_SHMOBILE
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
Choose this option if you have an SH Mobile chipset.
If M is selected the module will be called shmob-drm.
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 071a929e9fe3..4624c0aff51f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -12,11 +12,10 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -289,18 +288,18 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
{
struct drm_crtc *crtc = &scrtc->crtc;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- scrtc->dma[0] = gem->paddr + fb->offsets[0]
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (scrtc->format->yuv) {
bpp = scrtc->format->bpp - 8;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- scrtc->dma[1] = gem->paddr + fb->offsets[1]
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 731cbad7520f..3d511fa38913 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -17,7 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -126,11 +126,11 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
static const struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 68d21be784aa..60a2c8d8a0d9 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -9,9 +9,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
index 6ec2b732bb94..0347b1fd2338 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
-struct drm_gem_cma_object;
+struct drm_gem_dma_object;
struct shmob_drm_device;
struct shmob_drm_format_info {
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 4763ea8e1af0..6c5f0cbe7d95 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -9,10 +9,10 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -41,18 +41,18 @@ static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
unsigned int bpp;
bpp = splane->format->yuv ? 8 : splane->format->bpp;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- splane->dma[0] = gem->paddr + fb->offsets[0]
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ splane->dma[0] = gem->dma_addr + fb->offsets[0]
+ y * fb->pitches[0] + x * bpp / 8;
if (splane->format->yuv) {
bpp = splane->format->bpp - 8;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
- splane->dma[1] = gem->paddr + fb->offsets[1]
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
+ splane->dma[1] = gem->dma_addr + fb->offsets[1]
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
+ x * (bpp == 16 ? 2 : 1);
}
@@ -252,9 +252,10 @@ int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
splane->index = index;
splane->alpha = 255;
- ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
- &shmob_drm_plane_funcs, formats,
- ARRAY_SIZE(formats), false);
+ ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1,
+ &shmob_drm_plane_funcs,
+ formats, ARRAY_SIZE(formats), NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e35e719cf315..6173020a9bf5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -50,7 +50,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
- idr_init(&dev_priv->object_idr);
+ idr_init_base(&dev_priv->object_idr, 1);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 1e0fcec7be47..ddfa0bb5d9c9 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -39,13 +39,11 @@ static int ssd130x_i2c_probe(struct i2c_client *client)
return 0;
}
-static int ssd130x_i2c_remove(struct i2c_client *client)
+static void ssd130x_i2c_remove(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_remove(ssd130x);
-
- return 0;
}
static void ssd130x_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 07802907e39a..19ab4942cb33 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -18,11 +18,6 @@ struct ssd130x_spi_transport {
struct gpio_desc *dc;
};
-static const struct regmap_config ssd130x_spi_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
/*
* The regmap bus .write handler, it is just a wrapper around spi_write()
* but toggling the Data/Command control pin (D/C#). Since for 4-wire SPI
@@ -56,17 +51,12 @@ static int ssd130x_spi_read(void *context, const void *reg, size_t reg_size,
return -EOPNOTSUPP;
}
-/*
- * A custom bus is needed due the special write that toggles a D/C# pin,
- * another option could be to just have a .reg_write() callback but that
- * will prevent to do data writes in bulk.
- *
- * Once the regmap API is extended to support defining a bulk write handler
- * in the struct regmap_config, this can be simplified and the bus dropped.
- */
-static struct regmap_bus regmap_ssd130x_spi_bus = {
+static const struct regmap_config ssd130x_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
.write = ssd130x_spi_write,
.read = ssd130x_spi_read,
+ .can_multi_write = true,
};
static int ssd130x_spi_probe(struct spi_device *spi)
@@ -90,8 +80,7 @@ static int ssd130x_spi_probe(struct spi_device *spi)
t->spi = spi;
t->dc = dc;
- regmap = devm_regmap_init(dev, &regmap_ssd130x_spi_bus, t,
- &ssd130x_spi_regmap_config);
+ regmap = devm_regmap_init(dev, NULL, t, &ssd130x_spi_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 5a3e3b78cd9e..bc41a5ae810a 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -18,10 +18,10 @@
#include <linux/pwm.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
@@ -537,11 +537,11 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x)
kfree(buf);
}
-static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *map,
+static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst;
unsigned int dst_pitch;
int ret = 0;
u8 *buf = NULL;
@@ -555,127 +555,174 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_m
if (!buf)
return -ENOMEM;
- drm_fb_xrgb8888_to_mono(buf, dst_pitch, vmap, fb, rect);
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ iosys_map_set_vaddr(&dst, buf);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
ssd130x_update_rect(ssd130x, buf, rect);
+out_free:
kfree(buf);
return ret;
}
-static int ssd130x_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_device *drm = plane->dev;
+ struct drm_rect src_clip, dst_clip;
+ int idx;
- if (mode->hdisplay != ssd130x->mode.hdisplay &&
- mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_SIZE;
+ if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
+ return;
- if (mode->hdisplay != ssd130x->mode.hdisplay)
- return MODE_ONE_WIDTH;
+ dst_clip = plane_state->dst;
+ if (!drm_rect_intersect(&dst_clip, &src_clip))
+ return;
- if (mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_HEIGHT;
+ if (!drm_dev_enter(drm, &idx))
+ return;
- return MODE_OK;
+ ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+
+ drm_dev_exit(idx);
}
-static void ssd130x_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_device *drm = &ssd130x->drm;
- int idx, ret;
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int idx;
- ret = ssd130x_power_on(ssd130x);
- if (ret)
+ if (!drm_dev_enter(drm, &idx))
return;
- ret = ssd130x_init(ssd130x);
- if (ret)
- goto out_power_off;
+ ssd130x_clear_screen(ssd130x);
- if (!drm_dev_enter(drm, &idx))
- goto out_power_off;
+ drm_dev_exit(idx);
+}
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &plane_state->dst);
+static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_update = ssd130x_primary_plane_helper_atomic_update,
+ .atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
+};
- ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
+static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
- backlight_enable(ssd130x->bl_dev);
+static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
- drm_dev_exit(idx);
+ if (mode->hdisplay != ssd130x->mode.hdisplay &&
+ mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_SIZE;
+ else if (mode->hdisplay != ssd130x->mode.hdisplay)
+ return MODE_ONE_WIDTH;
+ else if (mode->vdisplay != ssd130x->mode.vdisplay)
+ return MODE_ONE_HEIGHT;
- return;
-out_power_off:
- ssd130x_power_off(ssd130x);
+ return MODE_OK;
}
-static void ssd130x_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_device *drm = &ssd130x->drm;
- int idx;
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
- if (!drm_dev_enter(drm, &idx))
- return;
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- ssd130x_clear_screen(ssd130x);
+ return drm_atomic_add_affected_planes(new_state, crtc);
+}
- backlight_disable(ssd130x->bl_dev);
+/*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+ * the screen in the primary plane's atomic_disable function.
+ */
+static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
+ .mode_valid = ssd130x_crtc_helper_mode_valid,
+ .atomic_check = ssd130x_crtc_helper_atomic_check,
+};
- ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
+static void ssd130x_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
- ssd130x_power_off(ssd130x);
+ ssd130x_init(ssd130x);
- drm_dev_exit(idx);
+ drm_atomic_helper_crtc_reset(crtc);
}
-static void ssd130x_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_plane_state)
+static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
+ .reset = ssd130x_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct ssd130x_device *ssd130x = drm_to_ssd130x(pipe->crtc.dev);
- struct drm_plane_state *plane_state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *drm = &ssd130x->drm;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ int ret;
- if (!fb)
+ ret = ssd130x_power_on(ssd130x);
+ if (ret)
return;
- if (!pipe->crtc.state->active)
- return;
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
- return;
+ backlight_enable(ssd130x->bl_dev);
+}
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = encoder->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
- if (!drm_dev_enter(drm, &idx))
- return;
+ backlight_disable(ssd130x->bl_dev);
- ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
+ ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_OFF);
- drm_dev_exit(idx);
+ ssd130x_power_off(ssd130x);
}
-static const struct drm_simple_display_pipe_funcs ssd130x_pipe_funcs = {
- .mode_valid = ssd130x_display_pipe_mode_valid,
- .enable = ssd130x_display_pipe_enable,
- .disable = ssd130x_display_pipe_disable,
- .update = ssd130x_display_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs = {
+ .atomic_enable = ssd130x_encoder_helper_atomic_enable,
+ .atomic_disable = ssd130x_encoder_helper_atomic_disable,
+};
+
+static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
};
-static int ssd130x_connector_get_modes(struct drm_connector *connector)
+static int ssd130x_connector_helper_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
struct drm_display_mode *mode;
@@ -695,7 +742,7 @@ static int ssd130x_connector_get_modes(struct drm_connector *connector)
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
- .get_modes = ssd130x_connector_get_modes,
+ .get_modes = ssd130x_connector_helper_get_modes,
};
static const struct drm_connector_funcs ssd130x_connector_funcs = {
@@ -806,8 +853,16 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
struct device *dev = ssd130x->dev;
struct drm_device *drm = &ssd130x->drm;
unsigned long max_width, max_height;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
int ret;
+ /*
+ * Modesetting
+ */
+
ret = drmm_mode_config_init(drm);
if (ret) {
dev_err(dev, "DRM mode config init failed: %d\n", ret);
@@ -833,25 +888,65 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
drm->mode_config.preferred_depth = 32;
drm->mode_config.funcs = &ssd130x_mode_config_funcs;
- ret = drm_connector_init(drm, &ssd130x->connector, &ssd130x_connector_funcs,
+ /* Primary plane */
+
+ primary_plane = &ssd130x->primary_plane;
+ ret = drm_universal_plane_init(drm, primary_plane, 0, &ssd130x_primary_plane_funcs,
+ ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ dev_err(dev, "DRM primary plane init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_plane_helper_add(primary_plane, &ssd130x_primary_plane_helper_funcs);
+
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &ssd130x->crtc;
+ ret = drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ &ssd130x_crtc_funcs, NULL);
+ if (ret) {
+ dev_err(dev, "DRM crtc init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &ssd130x_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &ssd130x->encoder;
+ ret = drm_encoder_init(drm, encoder, &ssd130x_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret) {
+ dev_err(dev, "DRM encoder init failed: %d\n", ret);
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &ssd130x_encoder_helper_funcs);
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &ssd130x->connector;
+ ret = drm_connector_init(drm, connector, &ssd130x_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret) {
dev_err(dev, "DRM connector init failed: %d\n", ret);
return ret;
}
- drm_connector_helper_add(&ssd130x->connector, &ssd130x_connector_helper_funcs);
+ drm_connector_helper_add(connector, &ssd130x_connector_helper_funcs);
- ret = drm_simple_display_pipe_init(drm, &ssd130x->pipe, &ssd130x_pipe_funcs,
- ssd130x_formats, ARRAY_SIZE(ssd130x_formats),
- NULL, &ssd130x->connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret) {
- dev_err(dev, "DRM simple display pipeline init failed: %d\n", ret);
+ dev_err(dev, "DRM attach connector to encoder failed: %d\n", ret);
return ret;
}
- drm_plane_enable_fb_damage_clips(&ssd130x->pipe.plane);
-
drm_mode_config_reset(drm);
return 0;
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index 4c4a84e962e7..03038c1b6476 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -13,8 +13,11 @@
#ifndef __SSD1307X_H__
#define __SSD1307X_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_plane_helper.h>
#include <linux/regmap.h>
@@ -42,8 +45,10 @@ struct ssd130x_deviceinfo {
struct ssd130x_device {
struct drm_device drm;
struct device *dev;
- struct drm_simple_display_pipe pipe;
struct drm_display_mode mode;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
struct i2c_client *client;
diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig
index 9a9c7ebfc716..e22b780fe822 100644
--- a/drivers/gpu/drm/sprd/Kconfig
+++ b/drivers/gpu/drm/sprd/Kconfig
@@ -2,7 +2,7 @@ config DRM_SPRD
tristate "DRM Support for Unisoc SoCs Platform"
depends on ARCH_SPRD || COMPILE_TEST
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 3664089b6983..88f4259680f1 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -19,11 +19,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sprd_drm.h"
#include "sprd_dpu.h"
@@ -324,7 +323,7 @@ static u32 drm_blend_to_dpu(struct drm_plane_state *state)
static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
{
struct dpu_context *ctx = &dpu->ctx;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb = state->fb;
u32 addr, size, offset, pitch, blend, format, rotation;
u32 src_x = state->src_x >> 16;
@@ -341,8 +340,8 @@ static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
size = (src_w & 0xffff) | (src_h << 16);
for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(fb, i);
- addr = cma_obj->paddr + fb->offsets[i];
+ dma_obj = drm_fb_dma_get_gem_obj(fb, i);
+ addr = dma_obj->dma_addr + fb->offsets[i];
if (i == 0)
layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
@@ -524,8 +523,8 @@ static int sprd_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
}
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index b8fc1c6a0cb8..9d42f17a5734 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -13,7 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -48,14 +48,14 @@ static void sprd_drm_mode_config_init(struct drm_device *drm)
drm->mode_config.helper_private = &sprd_drm_mode_config_helper;
}
-DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sprd_drm_fops);
static struct drm_driver sprd_drm_drv = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &sprd_drm_fops,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 246a94afbe74..f2a880c48485 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -4,7 +4,7 @@ config DRM_STI
depends on OF && DRM && (ARCH_STI || ARCH_MULTIPLATFORM)
select RESET_CONTROLLER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL
select FW_LOADER
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 409795786f03..3c7154f2d5f3 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 1e9bd4241f10..db0a1eb53532 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -11,9 +11,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
@@ -243,8 +243,8 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
}
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -267,7 +267,7 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
u32 y, x;
u32 val;
@@ -278,10 +278,10 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
dst_x = newstate->crtc_x;
dst_y = newstate->crtc_y;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
/* Convert ARGB8888 to CLUT8 */
- sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
+ sti_cursor_argb8888_to_clut8(cursor, (u32 *)dma_obj->vaddr);
/* AWS and AWE depend on the mode */
y = sti_vtg_get_line_number(*mode, 0);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index d858209cf8de..7abf010a3293 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -14,9 +14,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -128,12 +127,12 @@ static void sti_mode_config_init(struct drm_device *dev)
dev->mode_config.normalize_zpos = true;
}
-DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sti_driver_fops);
static const struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.fops = &sti_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index af783f599306..43c72c2604a0 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -12,10 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
@@ -658,8 +658,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -714,7 +714,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
struct sti_gdp_node *top_field, *btm_field;
@@ -778,15 +778,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long) dma_obj->dma_addr);
/* pixel memory location */
bpp = fb->format->cpp[0];
- top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
+ top_field->gam_gdp_pml = (u32) dma_obj->dma_addr + fb->offsets[0];
top_field->gam_gdp_pml += src_x * bpp;
top_field->gam_gdp_pml += src_y * fb->pitches[0];
@@ -831,7 +831,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
- (unsigned long)cma_obj->paddr,
+ (unsigned long) dma_obj->dma_addr,
readl(gdp->regs + GAM_GDP_PML_OFFSET));
if (!curr_list) {
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 271982080437..02b77279f6e4 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -16,10 +16,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
@@ -1055,8 +1055,8 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
return -EINVAL;
}
- if (!drm_fb_cma_get_gem_obj(fb, 0)) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
+ if (!drm_fb_dma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get DMA GEM object for fb\n");
return -EINVAL;
}
@@ -1124,7 +1124,7 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct sti_hqvdp_cmd *cmd;
int scale_h, scale_v;
int cmd_offset;
@@ -1178,15 +1178,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->format->format,
- (unsigned long)cma_obj->paddr);
+ (unsigned long) dma_obj->dma_addr);
/* Buffer planes address */
- cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
- cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
+ cmd->top.current_luma = (u32) dma_obj->dma_addr + fb->offsets[0];
+ cmd->top.current_chroma = (u32) dma_obj->dma_addr + fb->offsets[1];
/* Pitches */
cmd->top.luma_processed_pitch = fb->pitches[0];
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index c74b524663ab..29e669ccec5b 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -9,10 +9,9 @@
#include <linux/types.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include "sti_compositor.h"
#include "sti_drv.h"
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index 8e33e629d9b0..2c0156bede9c 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -8,7 +8,6 @@
#define _STI_PLANE_H_
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index e0379488cd0d..ded72f879482 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -3,7 +3,7 @@ config DRM_STM
tristate "DRM Support for STMicroelectronics SoC Series"
depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index c63945dc2260..d7914f5122df 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -18,9 +18,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -37,7 +36,7 @@ static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int stm_gem_cma_dumb_create(struct drm_file *file,
+static int stm_gem_dma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -50,10 +49,10 @@ static int stm_gem_cma_dumb_create(struct drm_file *file,
args->pitch = roundup(min_pitch, 128);
args->height = roundup(args->height, 4);
- return drm_gem_cma_dumb_create_internal(file, dev, args);
+ return drm_gem_dma_dumb_create_internal(file, dev, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drv_driver_fops);
static const struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -64,7 +63,7 @@ static const struct drm_driver drv_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_cma_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_dma_dumb_create),
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index da7a0a183b27..03c6becda795 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -28,13 +28,12 @@
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -1347,7 +1346,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
}
/* Sets the FB address */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0);
+ paddr = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 0);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr += (fb->format->cpp[0] * (x1 - x0 + 1)) - 1;
@@ -1381,7 +1380,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
/* Configure the auxiliary frame buffer address 0 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X)
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
@@ -1393,8 +1392,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
break;
case DRM_FORMAT_YUV420:
/* Configure the auxiliary frame buffer address 0 & 1 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
- paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
+ paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
@@ -1411,8 +1410,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
break;
case DRM_FORMAT_YVU420:
/* Configure the auxiliary frame buffer address 0 & 1 */
- paddr1 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 2);
- paddr2 = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 1);
+ paddr1 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 2);
+ paddr2 = (u32)drm_fb_dma_get_gem_addr(fb, newstate, 1);
if (newstate->rotation & DRM_MODE_REFLECT_X) {
paddr1 += ((fb->format->cpp[1] * (x1 - x0 + 1)) >> 1) - 1;
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 3a43c436c74a..4741d9f6544c 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -3,7 +3,7 @@ config DRM_SUN4I
tristate "DRM Support for Allwinner A10 Display Engine"
depends on DRM && COMMON_CLK
depends on ARCH_SUNXI || COMPILE_TEST
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL
select REGMAP_MMIO
@@ -16,23 +16,25 @@ config DRM_SUN4I
if DRM_SUN4I
config DRM_SUN4I_HDMI
- tristate "Allwinner A10 HDMI Controller Support"
+ tristate "Allwinner A10/A10s/A20/A31 HDMI Controller Support"
+ depends on ARM || COMPILE_TEST
default DRM_SUN4I
help
- Choose this option if you have an Allwinner SoC with an HDMI
- controller.
+ Choose this option if you have an Allwinner A10/A10s/A20/A31
+ SoC with an HDMI controller.
config DRM_SUN4I_HDMI_CEC
- bool "Allwinner A10 HDMI CEC Support"
+ bool "Allwinner A10/A10s/A20/A31 HDMI CEC Support"
depends on DRM_SUN4I_HDMI
select CEC_CORE
select CEC_PIN
help
- Choose this option if you have an Allwinner SoC with an HDMI
- controller and want to use CEC.
+ Choose this option if you have an Allwinner A10/A10s/A20/A31
+ SoC with an HDMI controller and want to use CEC.
config DRM_SUN4I_BACKEND
tristate "Support for Allwinner A10 Display Engine Backend"
+ depends on ARM || COMPILE_TEST
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
@@ -41,8 +43,8 @@ config DRM_SUN4I_BACKEND
selected the module will be called sun4i-backend.
config DRM_SUN6I_DSI
- tristate "Allwinner A31 MIPI-DSI Controller Support"
- default MACH_SUN8I
+ tristate "Allwinner A31/A64 MIPI-DSI Controller Support"
+ default DRM_SUN4I
select CRC_CCITT
select DRM_MIPI_DSI
select RESET_CONTROLLER
@@ -55,15 +57,17 @@ config DRM_SUN6I_DSI
config DRM_SUN8I_DW_HDMI
tristate "Support for Allwinner version of DesignWare HDMI"
depends on DRM_SUN4I
+ default DRM_SUN4I
select DRM_DW_HDMI
help
Choose this option if you have an Allwinner SoC with the
- DesignWare HDMI controller with custom HDMI PHY. If M is
+ DesignWare HDMI controller. SoCs that support HDMI and
+ have a Display Engine 2.0 contain this controller. If M is
selected the module will be called sun8i_dw_hdmi.
config DRM_SUN8I_MIXER
tristate "Support for Allwinner Display Engine 2.0 Mixer"
- default MACH_SUN8I
+ default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
Allwinner Display Engine 2.0, which has a mixer to do some
@@ -75,6 +79,6 @@ config DRM_SUN8I_TCON_TOP
default DRM_SUN4I if DRM_SUN8I_MIXER!=n
help
TCON TOP is responsible for configuring display pipeline for
- HTMI, TVE and LCD.
+ HDMI, TVE and LCD.
endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 287e8c4bbaea..38070fc261f3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -19,11 +19,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_backend.h"
@@ -330,7 +329,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
u32 lo_paddr, hi_paddr;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
@@ -339,21 +338,21 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
fb->pitches[0] * 8);
/* Get the start of the displayed memory */
- paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
if (fb->format->is_yuv)
- return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
+ return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
/* Write the 32 lower bits of the address (in bits) */
- lo_paddr = paddr << 3;
+ lo_paddr = dma_addr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
regmap_write(backend->engine.regs,
SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
lo_paddr);
/* And the upper bits */
- hi_paddr = paddr >> 29;
+ hi_paddr = dma_addr >> 29;
DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 6eb1aabdb161..d06ffd99d86e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -17,9 +17,8 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
@@ -38,10 +37,10 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
/* The hardware only allows even pitches for YUV buffers. */
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
-DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
+DEFINE_DRM_GEM_DMA_FOPS(sun4i_drv_fops);
static const struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
@@ -55,7 +54,7 @@ static const struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 462fae73eae9..799ab7460ae5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -14,10 +14,10 @@
#include <linux/reset.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
#include "sun4i_drv.h"
@@ -160,7 +160,7 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
struct drm_framebuffer *fb = state->fb;
unsigned int strides[3] = {};
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
bool swap;
if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
@@ -221,22 +221,24 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
/* Set the physical address of the buffer in memory */
- paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
- regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
+ DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &dma_addr);
+ regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, dma_addr);
if (fb->format->num_planes > 1) {
- paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
- DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 2 : 1);
+ DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n",
+ &dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
- paddr);
+ dma_addr);
}
if (fb->format->num_planes > 2) {
- paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
- DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
+ dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 1 : 2);
+ DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n",
+ &dma_addr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
- paddr);
+ dma_addr);
}
}
EXPORT_SYMBOL(sun4i_frontend_update_buffer);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 648dd0b5b116..98f3176366c0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sun4i_backend.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 94883abe0dfd..c65f0a89b6b0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -14,6 +14,7 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -275,13 +276,6 @@ drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
encoder);
}
-static inline struct sun4i_tv *
-drm_connector_to_sun4i_tv(struct drm_connector *connector)
-{
- return container_of(connector, struct sun4i_tv,
- connector);
-}
-
/*
* FIXME: If only the drm_display_mode private field was usable, this
* could go away...
@@ -339,7 +333,8 @@ static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
}
-static void sun4i_tv_disable(struct drm_encoder *encoder)
+static void sun4i_tv_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
@@ -353,27 +348,18 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
sunxi_engine_disable_color_correction(crtc->engine);
}
-static void sun4i_tv_enable(struct drm_encoder *encoder)
+static void sun4i_tv_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, encoder->crtc);
+ struct drm_display_mode *mode = &crtc_state->mode;
+ const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
- sunxi_engine_apply_color_correction(crtc->engine);
-
- regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
- SUN4I_TVE_EN_ENABLE,
- SUN4I_TVE_EN_ENABLE);
-}
-
-static void sun4i_tv_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
-
/* Enable and map the DAC to the output */
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_DAC_MAP_MASK,
@@ -466,12 +452,17 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
SUN4I_TVE_RESYNC_FIELD : 0));
regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
+
+ sunxi_engine_apply_color_correction(crtc->engine);
+
+ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+ SUN4I_TVE_EN_ENABLE,
+ SUN4I_TVE_EN_ENABLE);
}
static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
- .disable = sun4i_tv_disable,
- .enable = sun4i_tv_enable,
- .mode_set = sun4i_tv_mode_set,
+ .atomic_disable = sun4i_tv_disable,
+ .atomic_enable = sun4i_tv_enable,
};
static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
@@ -497,27 +488,13 @@ static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
return i;
}
-static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TODO */
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
.get_modes = sun4i_tv_comp_get_modes,
- .mode_valid = sun4i_tv_comp_mode_valid,
};
-static void
-sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = sun4i_tv_comp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -604,7 +581,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
if (ret) {
dev_err(dev,
"Couldn't initialise the Composite connector\n");
- goto err_cleanup_connector;
+ goto err_cleanup_encoder;
}
tv->connector.interlace_allowed = true;
@@ -612,7 +589,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
return 0;
-err_cleanup_connector:
+err_cleanup_encoder:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
clk_disable_unprepare(tv->clk);
@@ -629,6 +606,7 @@ static void sun4i_tv_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&tv->connector);
drm_encoder_cleanup(&tv->encoder);
clk_disable_unprepare(tv->clk);
+ reset_control_assert(tv->reset);
}
static const struct component_ops sun4i_tv_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 648b38a73066..bafee05f6b24 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -16,10 +16,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_drv.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 36da962de394..ca75ca0835a6 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -13,12 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_mixer.h"
@@ -193,25 +192,25 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
- dma_addr_t paddr;
+ struct drm_gem_dma_object *gem;
+ dma_addr_t dma_addr;
u32 ch_base;
int bpp;
ch_base = sun8i_channel_base(mixer, channel);
/* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
bpp = fb->format->cpp[0];
- paddr = gem->paddr + fb->offsets[0];
+ dma_addr = gem->dma_addr + fb->offsets[0];
/* Fixup framebuffer address for src coordinates */
- paddr += (state->src.x1 >> 16) * bpp;
- paddr += (state->src.y1 >> 16) * fb->pitches[0];
+ dma_addr += (state->src.x1 >> 16) * bpp;
+ dma_addr += (state->src.y1 >> 16) * fb->pitches[0];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
@@ -219,11 +218,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
fb->pitches[0]);
- DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
- lower_32_bits(paddr));
+ lower_32_bits(dma_addr));
return 0;
}
@@ -246,8 +245,8 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_UI_SCALER_SCALE_MIN;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 1fee6499bdd3..f9c0a56d3a14 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -7,11 +7,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_csc.h"
@@ -309,9 +308,9 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 dx, dy, src_x, src_y;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
u32 ch_base;
int i;
@@ -323,12 +322,12 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
for (i = 0; i < format->num_planes; i++) {
/* Get the physical address of the buffer in memory */
- gem = drm_fb_cma_get_gem_obj(fb, i);
+ gem = drm_fb_dma_get_gem_obj(fb, i);
- DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
/* Compute the start of the displayed memory */
- paddr = gem->paddr + fb->offsets[i];
+ dma_addr = gem->dma_addr + fb->offsets[i];
dx = src_x;
dy = src_y;
@@ -339,8 +338,8 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
}
/* Fixup framebuffer address for src coordinates */
- paddr += dx * format->cpp[i];
- paddr += dy * fb->pitches[i];
+ dma_addr += dx * format->cpp[i];
+ dma_addr += dy * fb->pitches[i];
/* Set the line width */
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
@@ -351,12 +350,12 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
- i + 1, &paddr);
+ i + 1, &dma_addr);
regmap_write(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
overlay, i),
- lower_32_bits(paddr));
+ lower_32_bits(dma_addr));
}
return 0;
@@ -380,8 +379,8 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ min_scale = DRM_PLANE_NO_SCALING;
+ max_scale = DRM_PLANE_NO_SCALING;
if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_VI_SCALER_SCALE_MIN;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 747abafb6a5c..bd0f60704467 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -26,7 +26,6 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "dc.h"
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ed828de5ac01..9291209154a7 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2013 Avionic Design GmbH
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
- * Based on the KMS/FB CMA helpers
+ * Based on the KMS/FB DMA helpers
* Copyright (C) 2012 Analog Devices Inc.
*/
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index ca9f03e3675b..10090116895f 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -12,7 +12,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "dc.h"
#include "plane.h"
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 2c8273796d9d..91b70f7d2769 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o
+obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \
+ drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \
+ drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
new file mode 100644
index 000000000000..7a2b2d6bc3fe
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -0,0 +1,756 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <linux/prime_numbers.h>
+#include <linux/sched/signal.h>
+
+#include <drm/drm_buddy.h>
+
+#include "../lib/drm_random.h"
+
+#define TIMEOUT(name__) \
+ unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
+
+static unsigned int random_seed;
+
+static inline u64 get_size(int order, u64 chunk_size)
+{
+ return (1 << order) * chunk_size;
+}
+
+__printf(2, 3)
+static bool __timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+static void __dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block, bool buddy)
+{
+ kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
+ block->header, drm_buddy_block_state(block),
+ drm_buddy_block_order(block), drm_buddy_block_offset(block),
+ drm_buddy_block_size(mm, block), !block->parent, buddy);
+}
+
+static void dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+
+ __dump_block(test, mm, block, false);
+
+ buddy = drm_get_buddy(block);
+ if (buddy)
+ __dump_block(test, mm, buddy, true);
+}
+
+static int check_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = drm_buddy_block_state(block);
+
+ if (block_state != DRM_BUDDY_ALLOCATED &&
+ block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
+ kunit_err(test, "block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = drm_buddy_block_size(mm, block);
+ offset = drm_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ kunit_err(test, "block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ kunit_err(test, "block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ kunit_err(test, "block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ kunit_err(test, "block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ kunit_err(test, "block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = drm_get_buddy(block);
+
+ if (!buddy && block->parent) {
+ kunit_err(test, "buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ kunit_err(test, "buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_size(mm, buddy) != block_size) {
+ kunit_err(test, "buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_state(buddy) == block_state &&
+ block_state == DRM_BUDDY_FREE) {
+ kunit_err(test, "block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int check_blocks(struct kunit *test, struct drm_buddy *mm,
+ struct list_head *blocks, u64 expected_size, bool is_contiguous)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = check_block(test, mm, block);
+
+ if (!drm_buddy_block_is_allocated(block)) {
+ kunit_err(test, "block not allocated\n");
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += drm_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev block, dump:\n");
+ dump_block(test, mm, prev);
+ }
+
+ kunit_err(test, "bad block, dump:\n");
+ dump_block(test, mm, block);
+
+ return err;
+}
+
+static int check_mm(struct kunit *test, struct drm_buddy *mm)
+{
+ struct drm_buddy_block *root;
+ struct drm_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ kunit_err(test, "n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct drm_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ kunit_err(test, "root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = check_block(test, mm, root);
+
+ if (!drm_buddy_block_is_free(root)) {
+ kunit_err(test, "root not free\n");
+ err = -EINVAL;
+ }
+
+ order = drm_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ kunit_err(test, "max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct drm_buddy_block, link);
+ if (block != root) {
+ kunit_err(test, "root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += drm_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ kunit_err(test, "expected mm size=%llx, found=%llx\n",
+ mm->size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev root(%u), dump:\n", i - 1);
+ dump_block(test, mm, prev);
+ }
+
+ if (root) {
+ kunit_err(test, "bad root(%u), dump:\n", i);
+ dump_block(test, mm, root);
+ }
+
+ return err;
+}
+
+static void mm_config(u64 *size, u64 *chunk_size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ u32 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, random_seed);
+
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
+
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
+}
+
+static void drm_test_buddy_alloc_pathological(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ const int max_order = 3;
+ unsigned long flags = 0;
+ int order, top;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ }
+
+ for (order = top; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
+ mm_size, size, size,
+ &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM for hole\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &holes);
+
+ size = get_size(top, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ }
+
+ drm_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ }
+
+ list_splice_tail(&holes, &blocks);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_smoke(struct kunit *test)
+{
+ u64 mm_size, chunk_size, start = 0;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ int *order;
+ int i;
+
+ DRM_RND_STATE(prng, random_seed);
+ TIMEOUT(end_time);
+
+ mm_config(&mm_size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
+ "buddy_init failed\n");
+
+ order = drm_random_order(mm.max_order + 1, &prng);
+ KUNIT_ASSERT_TRUE(test, order);
+
+ for (i = 0; i <= mm.max_order; ++i) {
+ struct drm_buddy_block *block;
+ int max_order = order[i];
+ bool timeout = false;
+ LIST_HEAD(blocks);
+ u64 total, size;
+ LIST_HEAD(tmp);
+ int order, err;
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort\n");
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ size = get_size(order, chunk_size);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
+ if (err) {
+ if (err == -ENOMEM) {
+ KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
+ order);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
+ "buddy_alloc order mismatch\n");
+
+ total += drm_buddy_block_size(&mm, block);
+
+ if (__timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
+ } while (total < mm.size);
+
+ if (!err)
+ err = check_blocks(test, &mm, &blocks, total, false);
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
+ "post-mm check failed\n");
+ }
+
+ if (err || timeout)
+ break;
+
+ cond_resched();
+ }
+
+ kfree(order);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block, *bn;
+ const unsigned int max_order = 16;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order < max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM on final alloc\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ size = get_size(max_order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_optimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ const int max_order = 16;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ int order;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_range(struct kunit *test)
+{
+ unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
+ u64 offset, size, rem, chunk_size, end;
+ unsigned long page_num;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+
+ mm_config(&size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
+ "buddy_init failed");
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort!");
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct drm_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+ end = offset + size;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
+ size, mm.chunk_size,
+ &tmp, flags),
+ "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
+
+ KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
+ "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ drm_buddy_block_offset(block), offset);
+
+ KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
+
+ list_splice_tail(&tmp, &blocks);
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+
+ cond_resched();
+ }
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
+
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_limit(struct kunit *test)
+{
+ u64 size = U64_MAX, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+
+ KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
+ "mm.max_order(%d) != %d\n", mm.max_order,
+ DRM_BUDDY_MAX_ORDER);
+
+ size = mm.chunk_size << mm.max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
+ PAGE_SIZE, &allocated, flags));
+
+ block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
+ KUNIT_EXPECT_TRUE(test, block);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
+ "block order(%d) != %d\n",
+ drm_buddy_block_order(block), mm.max_order);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE,
+ "block size(%llu) != %llu\n",
+ drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static int drm_buddy_init_test(struct kunit *test)
+{
+ while (!random_seed)
+ random_seed = get_random_int();
+
+ return 0;
+}
+
+static struct kunit_case drm_buddy_tests[] = {
+ KUNIT_CASE(drm_test_buddy_alloc_limit),
+ KUNIT_CASE(drm_test_buddy_alloc_range),
+ KUNIT_CASE(drm_test_buddy_alloc_optimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_smoke),
+ KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ {}
+};
+
+static struct kunit_suite drm_buddy_test_suite = {
+ .name = "drm_buddy",
+ .init = drm_buddy_init_test,
+ .test_cases = drm_buddy_tests,
+};
+
+kunit_test_suite(drm_buddy_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
new file mode 100644
index 000000000000..34790e7a3760
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -0,0 +1,991 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Bootlin
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
+static const struct drm_connector no_connector = {};
+
+static void drm_test_cmdline_force_e_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_force_D_only_not_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static void drm_test_cmdline_force_D_only_hdmi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_hdmi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static void drm_test_cmdline_force_D_only_dvi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_dvi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_force_d_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480M";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480MR";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480R";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60i";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60m";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_off(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_analog(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ static const struct drm_connector connector = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+ };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60ime";
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480me";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_vesa_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480Mm";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_name(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+}
+
+static void drm_test_cmdline_name_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_name_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+}
+
+static void drm_test_cmdline_name_bpp_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_rotate_0(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=0";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_0);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_90(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=90";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_90);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_180(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_270(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_270);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_hmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_vmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_y";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_margin_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline =
+ "720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_multiple_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_bpp_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_freestanding_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_freestanding_force_e_and_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_panel_orientation(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "panel_orientation=upside_down";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.panel_orientation, DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+struct drm_cmdline_invalid_test {
+ const char *name;
+ const char *cmdline;
+};
+
+static void drm_test_cmdline_invalid(struct kunit *test)
+{
+ const struct drm_cmdline_invalid_test *params = test->param_value;
+ struct drm_cmdline_mode mode = { };
+
+ KUNIT_EXPECT_FALSE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ &no_connector,
+ &mode));
+}
+
+static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
+ {
+ .name = "margin_only",
+ .cmdline = "m",
+ },
+ {
+ .name = "interlace_only",
+ .cmdline = "i",
+ },
+ {
+ .name = "res_missing_x",
+ .cmdline = "x480",
+ },
+ {
+ .name = "res_missing_y",
+ .cmdline = "1024x",
+ },
+ {
+ .name = "res_bad_y",
+ .cmdline = "1024xtest",
+ },
+ {
+ .name = "res_missing_y_bpp",
+ .cmdline = "1024x-24",
+ },
+ {
+ .name = "res_bad_bpp",
+ .cmdline = "720x480-test",
+ },
+ {
+ .name = "res_bad_refresh",
+ .cmdline = "720x480@refresh",
+ },
+ {
+ .name = "res_bpp_refresh_force_on_off",
+ .cmdline = "720x480-24@60de",
+ },
+ {
+ .name = "res_invalid_mode",
+ .cmdline = "720x480f",
+ },
+ {
+ .name = "res_bpp_wrong_place_mode",
+ .cmdline = "720x480e-24",
+ },
+ {
+ .name = "name_bpp_refresh",
+ .cmdline = "NTSC-24@60",
+ },
+ {
+ .name = "name_refresh",
+ .cmdline = "NTSC@60",
+ },
+ {
+ .name = "name_refresh_wrong_mode",
+ .cmdline = "NTSC@60m",
+ },
+ {
+ .name = "name_refresh_invalid_mode",
+ .cmdline = "NTSC@60f",
+ },
+ {
+ .name = "rotate_multiple",
+ .cmdline = "720x480,rotate=0,rotate=90",
+ },
+ {
+ .name = "rotate_invalid_val",
+ .cmdline = "720x480,rotate=42",
+ },
+ {
+ .name = "rotate_truncated",
+ .cmdline = "720x480,rotate=",
+ },
+ {
+ .name = "invalid_option",
+ .cmdline = "720x480,test=42",
+ },
+};
+
+static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
+
+static struct kunit_case drm_cmdline_parser_tests[] = {
+ KUNIT_CASE(drm_test_cmdline_force_d_only),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_hdmi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_not_digital),
+ KUNIT_CASE(drm_test_cmdline_force_e_only),
+ KUNIT_CASE(drm_test_cmdline_res),
+ KUNIT_CASE(drm_test_cmdline_res_vesa),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_bpp),
+ KUNIT_CASE(drm_test_cmdline_res_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_margins),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_off),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_analog),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_digital),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_margins),
+ KUNIT_CASE(drm_test_cmdline_name),
+ KUNIT_CASE(drm_test_cmdline_name_bpp),
+ KUNIT_CASE(drm_test_cmdline_name_option),
+ KUNIT_CASE(drm_test_cmdline_name_bpp_option),
+ KUNIT_CASE(drm_test_cmdline_rotate_0),
+ KUNIT_CASE(drm_test_cmdline_rotate_90),
+ KUNIT_CASE(drm_test_cmdline_rotate_180),
+ KUNIT_CASE(drm_test_cmdline_rotate_270),
+ KUNIT_CASE(drm_test_cmdline_hmirror),
+ KUNIT_CASE(drm_test_cmdline_vmirror),
+ KUNIT_CASE(drm_test_cmdline_margin_options),
+ KUNIT_CASE(drm_test_cmdline_multiple_options),
+ KUNIT_CASE(drm_test_cmdline_bpp_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_freestanding_options),
+ KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
+ KUNIT_CASE(drm_test_cmdline_panel_orientation),
+ KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_cmdline_parser_test_suite = {
+ .name = "drm_cmdline_parser",
+ .test_cases = drm_cmdline_parser_tests
+};
+
+kunit_test_suite(drm_cmdline_parser_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_damage_helper_test.c b/drivers/gpu/drm/tests/drm_damage_helper_test.c
new file mode 100644
index 000000000000..115034fc3421
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_damage_helper_test.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_drv.h>
+
+struct drm_damage_mock {
+ struct drm_driver driver;
+ struct drm_device device;
+ struct drm_object_properties obj_props;
+ struct drm_plane plane;
+ struct drm_property prop;
+ struct drm_framebuffer fb;
+ struct drm_plane_state state;
+ struct drm_plane_state old_state;
+};
+
+static int drm_damage_helper_init(struct kunit *test)
+{
+ struct drm_damage_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->fb.width = 2048;
+ mock->fb.height = 2048;
+
+ mock->state.crtc = ZERO_SIZE_PTR;
+ mock->state.fb = &mock->fb;
+ mock->state.visible = true;
+
+ mock->old_state.plane = &mock->plane;
+ mock->state.plane = &mock->plane;
+
+ /* just enough so that drm_plane_enable_fb_damage_clips() works */
+ mock->device.driver = &mock->driver;
+ mock->device.mode_config.prop_fb_damage_clips = &mock->prop;
+ mock->plane.dev = &mock->device;
+ mock->obj_props.count = 0;
+ mock->plane.base.properties = &mock->obj_props;
+ mock->prop.base.id = 1; /* 0 is an invalid id */
+ mock->prop.dev = &mock->device;
+
+ drm_plane_enable_fb_damage_clips(&mock->plane);
+
+ test->priv = mock;
+
+ return 0;
+}
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src_x = x1;
+ state->src_y = y1;
+ state->src_w = x2 - x1;
+ state->src_h = y2 - y1;
+
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, u32 size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static void check_damage_clip(struct kunit *test, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_plane_state state = mock->state;
+
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state.src.x1 >> 16;
+ int src_y1 = state.src.y1 >> 16;
+ int src_x2 = (state.src.x2 >> 16) + !!(state.src.x2 & 0xFFFF);
+ int src_y2 = (state.src.y2 >> 16) + !!(state.src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2)
+ KUNIT_FAIL(test, "Cannot have damage clip with no dimension.");
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2)
+ KUNIT_FAIL(test, "Damage cannot be outside rounded plane src.");
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2)
+ KUNIT_FAIL(test, "Damage = %d %d %d %d, want = %d %d %d %d",
+ r->x1, r->y1, r->x2, r->y2, x1, y1, x2, y2);
+}
+
+static void drm_test_damage_iter_no_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src same as fb size. */
+ set_plane_src(&mock->old_state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ set_plane_src(&mock->state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 0, 0, 2048, 2048);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_no_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_no_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_crtc(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.crtc = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_fb(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.fb = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_simple_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 0, 0, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 256, 192, 768, 576);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage clipped to src.");
+ check_damage_clip(test, &clip, 256, 192, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage_outside_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 10, 10, 256, 330);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return damage clipped to rounded off src.");
+ check_damage_clip(test, &clip, 10, 4, 1029, 330);
+}
+
+static void drm_test_damage_iter_single_damage_outside_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_intersect(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_outside(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+}
+
+static void drm_test_damage_iter_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return round off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should not return any damage.");
+}
+
+static struct kunit_case drm_damage_helper_tests[] = {
+ KUNIT_CASE(drm_test_damage_iter_no_damage),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_not_visible),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_crtc),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_fb),
+ KUNIT_CASE(drm_test_damage_iter_simple_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_intersect),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_outside),
+ KUNIT_CASE(drm_test_damage_iter_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage_not_visible),
+ { }
+};
+
+static struct kunit_suite drm_damage_helper_test_suite = {
+ .name = "drm_damage_helper",
+ .init = drm_damage_helper_init,
+ .test_cases = drm_damage_helper_tests,
+};
+
+kunit_test_suite(drm_damage_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
index 4caa9be900ac..65c9d225b558 100644
--- a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -1,19 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the DRM DP MST helpers
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
#define PREFIX_STR "[drm_dp_mst_helper]"
+#include <kunit/test.h>
+
#include <linux/random.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "../display/drm_dp_mst_topology_internal.h"
-#include "test-drm_modeset_common.h"
-int igt_dp_mst_calc_pbn_mode(void *ignored)
+static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
int pbn, i;
const struct {
@@ -33,13 +36,11 @@ int igt_dp_mst_calc_pbn_mode(void *ignored)
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
test_params[i].bpp,
test_params[i].dsc);
- FAIL(pbn != test_params[i].expected,
- "Expected PBN %d for clock %d bpp %d, got %d\n",
+ KUNIT_EXPECT_EQ_MSG(test, pbn, test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
test_params[i].expected, test_params[i].rate,
test_params[i].bpp, pbn);
}
-
- return 0;
}
static bool
@@ -176,66 +177,64 @@ out:
return result;
}
-int igt_dp_mst_sideband_msg_req_decode(void *unused)
+static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
{
struct drm_dp_sideband_msg_req_body in = { 0 };
u8 data[] = { 0xff, 0x0, 0xdd };
int i;
-#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in))
-
in.req_type = DP_ENUM_PATH_RESOURCES;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_UP_PHY;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_DOWN_PHY;
in.u.port_num.port_number = 5;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_ALLOCATE_PAYLOAD;
in.u.allocate_payload.number_sdp_streams = 3;
for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.vcpi = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.pbn = U16_MAX;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_PAYLOAD;
in.u.query_payload.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.query_payload.vcpi = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_READ;
in.u.dpcd_read.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.dpcd_address = 0xfedcb;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.num_bytes = U8_MAX;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_WRITE;
in.u.dpcd_write.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.dpcd_address = 0xfedcb;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
in.u.dpcd_write.bytes = data;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_READ;
in.u.i2c_read.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.read_i2c_device_id = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.num_transactions = 3;
in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
@@ -244,32 +243,44 @@ int igt_dp_mst_sideband_msg_req_decode(void *unused)
in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
}
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_WRITE;
in.u.i2c_write.port_number = 0xf;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.write_i2c_device_id = 0x7f;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
in.u.i2c_write.bytes = data;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_STREAM_ENC_STATUS;
in.u.enc_status.stream_id = 1;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
get_random_bytes(in.u.enc_status.client_id,
sizeof(in.u.enc_status.client_id));
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_event = 3;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_event = 0;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_behavior = 3;
- DO_TEST();
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_behavior = 1;
- DO_TEST();
-
-#undef DO_TEST
- return 0;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
}
+
+static struct kunit_case drm_dp_mst_helper_tests[] = {
+ KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode),
+ KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode),
+ { }
+};
+
+static struct kunit_suite drm_dp_mst_helper_test_suite = {
+ .name = "drm_dp_mst_helper",
+ .test_cases = drm_dp_mst_helper_tests,
+};
+
+kunit_test_suite(drm_dp_mst_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 98583bf56044..8d86c250c2ec 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -16,34 +16,101 @@
#define TEST_BUF_SIZE 50
-struct xrgb8888_to_rgb332_case {
+struct convert_to_gray8_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb332_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb565_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+ const u16 expected_swab[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_xrgb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
+struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
- unsigned int dst_pitch;
struct drm_rect clip;
const u32 xrgb8888[TEST_BUF_SIZE];
- const u8 expected[4 * TEST_BUF_SIZE];
+ struct convert_to_gray8_result gray8_result;
+ struct convert_to_rgb332_result rgb332_result;
+ struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_xrgb2101010_result xrgb2101010_result;
};
-static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
+static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
{
.name = "single_pixel_source_buffer",
.pitch = 1 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(0, 0, 1, 1),
.xrgb8888 = { 0x01FF0000 },
- .expected = { 0xE0 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
.pitch = 2 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(1, 1, 1, 1),
.xrgb8888 = {
0x00000000, 0x00000000,
0x00000000, 0x10FF0000,
},
- .expected = { 0xE0 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -52,7 +119,6 @@ static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
*/
.name = "well_known_colors",
.pitch = 4 * 4,
- .dst_pitch = 0,
.clip = DRM_RECT_INIT(1, 1, 2, 4),
.xrgb8888 = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -61,28 +127,115 @@ static struct xrgb8888_to_rgb332_case xrgb8888_to_rgb332_cases[] = {
0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
},
- .expected = {
- 0xFF, 0x00,
- 0xE0, 0x1C,
- 0x03, 0xE3,
- 0xFC, 0x1F,
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0x4C, 0x99,
+ 0x19, 0x66,
+ 0xE5, 0xB2,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0xE0, 0x1C,
+ 0x03, 0xE3,
+ 0xFC, 0x1F,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x0000,
+ 0xF800, 0x07E0,
+ 0x001F, 0xF81F,
+ 0xFFE0, 0x07FF,
+ },
+ .expected_swab = {
+ 0xFFFF, 0x0000,
+ 0x00F8, 0xE007,
+ 0x1F00, 0x1FF8,
+ 0xE0FF, 0xFF07,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00,
+ 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF,
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x3FFFFFFF, 0x00000000,
+ 0x3FF00000, 0x000FFC00,
+ 0x000003FF, 0x3FF003FF,
+ 0x3FFFFC00, 0x000FFFFF,
+ },
},
},
{
/* Randomly picked colors. Full buffer within the clip area. */
.name = "destination_pitch",
.pitch = 3 * 4,
- .dst_pitch = 5,
.clip = DRM_RECT_INIT(0, 0, 3, 3),
.xrgb8888 = {
0xA10E449C, 0xB1114D05, 0xC1A80303,
0xD16C7073, 0xA20E449C, 0xB2114D05,
0xC2A80303, 0xD26C7073, 0xA30E449C,
},
- .expected = {
- 0x0A, 0x08, 0xA0, 0x00, 0x00,
- 0x6D, 0x0A, 0x08, 0x00, 0x00,
- 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ .gray8_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x3C, 0x33, 0x34, 0x00, 0x00,
+ 0x6F, 0x3C, 0x33, 0x00, 0x00,
+ 0x34, 0x6F, 0x3C, 0x00, 0x00,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x0A, 0x08, 0xA0, 0x00, 0x00,
+ 0x6D, 0x0A, 0x08, 0x00, 0x00,
+ 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0A33, 0x1260, 0xA800, 0x0000, 0x0000,
+ 0x6B8E, 0x0A33, 0x1260, 0x0000, 0x0000,
+ 0xA800, 0x6B8E, 0x0A33, 0x0000, 0x0000,
+ },
+ .expected_swab = {
+ 0x330A, 0x6012, 0x00A8, 0x0000, 0x0000,
+ 0x8E6B, 0x330A, 0x6012, 0x0000, 0x0000,
+ 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000,
+ 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
+ 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
+ },
},
},
};
@@ -111,41 +264,190 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
return dst_pitch * drm_rect_height(clip);
}
-static void xrgb8888_to_rgb332_case_desc(struct xrgb8888_to_rgb332_case *t,
- char *desc)
+static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
+{
+ u32 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = le32_to_cpu((__force __le32)buf[n]);
+
+ return dst;
+}
+
+static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
+ char *desc)
{
strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
}
-KUNIT_ARRAY_PARAM(xrgb8888_to_rgb332, xrgb8888_to_rgb332_cases,
- xrgb8888_to_rgb332_case_desc);
+KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases,
+ convert_xrgb8888_case_desc);
-static void xrgb8888_to_rgb332_test(struct kunit *test)
+static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
{
- const struct xrgb8888_to_rgb332_case *params = test->param_value;
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_gray8_result *result = &params->gray8_result;
size_t dst_size;
- __u8 *dst = NULL;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
struct drm_framebuffer fb = {
.format = drm_format_info(DRM_FORMAT_XRGB8888),
.pitches = { params->pitch, 0, 0 },
};
- dst_size = conversion_buf_size(DRM_FORMAT_RGB332, params->dst_pitch,
+ dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch,
&params->clip);
KUNIT_ASSERT_GT(test, dst_size, 0);
- dst = kunit_kzalloc(test, dst_size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dst);
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb332_result *result = &params->rgb332_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB332, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb332(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb565_result *result = &params->rgb565_result;
+ size_t dst_size;
+ __u16 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB565, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, false);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected_swab, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb888_result *result = &params->rgb888_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
+ size_t dst_size;
+ __u32 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
- drm_fb_xrgb8888_to_rgb332(dst, params->dst_pitch, params->xrgb8888,
- &fb, &params->clip);
- KUNIT_EXPECT_EQ(test, memcmp(dst, params->expected, dst_size), 0);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
}
static struct kunit_case drm_format_helper_test_cases[] = {
- KUNIT_CASE_PARAM(xrgb8888_to_rgb332_test,
- xrgb8888_to_rgb332_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_format_test.c b/drivers/gpu/drm/tests/drm_format_test.c
new file mode 100644
index 000000000000..ec6996ce819a
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_format_test.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+
+static void drm_test_format_block_width_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+}
+
+static void drm_test_format_block_width_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_height_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_min_pitch_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+}
+
+static void drm_test_format_min_pitch_one_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB332);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_one_plane_16bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static void drm_test_format_min_pitch_one_plane_24bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 6);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 3072);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 5760);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 12288);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2013);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 3);
+}
+
+static void drm_test_format_min_pitch_one_plane_32bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ABGR8888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 8);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 2560);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 7680);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 16384);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2684);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 4);
+}
+
+static void drm_test_format_min_pitch_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 672);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_three_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 3, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+}
+
+static void drm_test_format_min_pitch_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L2);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static struct kunit_case drm_format_tests[] = {
+ KUNIT_CASE(drm_test_format_block_width_invalid),
+ KUNIT_CASE(drm_test_format_block_width_one_plane),
+ KUNIT_CASE(drm_test_format_block_width_two_plane),
+ KUNIT_CASE(drm_test_format_block_width_three_plane),
+ KUNIT_CASE(drm_test_format_block_width_tiled),
+ KUNIT_CASE(drm_test_format_block_height_invalid),
+ KUNIT_CASE(drm_test_format_block_height_one_plane),
+ KUNIT_CASE(drm_test_format_block_height_two_plane),
+ KUNIT_CASE(drm_test_format_block_height_three_plane),
+ KUNIT_CASE(drm_test_format_block_height_tiled),
+ KUNIT_CASE(drm_test_format_min_pitch_invalid),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_16bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_24bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_32bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_two_plane),
+ KUNIT_CASE(drm_test_format_min_pitch_three_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_tiled),
+ {}
+};
+
+static struct kunit_suite drm_format_test_suite = {
+ .name = "drm_format",
+ .test_cases = drm_format_tests,
+};
+
+kunit_test_suite(drm_format_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
index f6d66285c5fc..df235b7fdaa5 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_framebuffer functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
-#include <linux/kernel.h>
+#include <kunit/test.h>
#include <drm/drm_device.h>
#include <drm/drm_mode.h>
@@ -12,8 +14,6 @@
#include "../drm_crtc_internal.h"
-#include "test-drm_modeset_common.h"
-
#define MIN_WIDTH 4
#define MAX_WIDTH 4096
#define MIN_HEIGHT 4
@@ -25,7 +25,7 @@ struct drm_framebuffer_test {
const char *name;
};
-static struct drm_framebuffer_test createbuffer_tests[] = {
+static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
.cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
@@ -73,12 +73,14 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
}
},
{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
@@ -89,11 +91,13 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
- .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
- .flags = DRM_MODE_FB_MODIFIERS, .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
}
},
-{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+{ .buffer_created = 0,
+ .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
.handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
.pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
@@ -143,7 +147,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
.handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
.pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
@@ -164,7 +169,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
- .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
}
},
{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
@@ -203,24 +209,29 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
},
{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
- .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH + MAX_WIDTH * MAX_HEIGHT,
- MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
- .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1, DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH +
+ MAX_WIDTH * MAX_HEIGHT, MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
- .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .handles = { 1, 1, 1 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
-{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
.modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
@@ -230,7 +241,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 1, .name = "YVU420 Valid modifier",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
@@ -245,8 +257,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
.handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
- .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
- AFBC_FORMAT_MOD_SPARSE },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
.pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
}
},
@@ -276,7 +288,8 @@ static struct drm_framebuffer_test createbuffer_tests[] = {
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
}
},
-{ .buffer_created = 1, .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+{ .buffer_created = 1,
+ .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
.cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
.handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
.pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
@@ -317,34 +330,53 @@ static struct drm_mode_config_funcs mock_config_funcs = {
.fb_create = fb_create_mock,
};
-static struct drm_device mock_drm_device = {
- .mode_config = {
- .min_width = MIN_WIDTH,
- .max_width = MAX_WIDTH,
- .min_height = MIN_HEIGHT,
- .max_height = MAX_HEIGHT,
- .funcs = &mock_config_funcs,
- },
-};
+static int drm_framebuffer_test_init(struct kunit *test)
+{
+ struct drm_device *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->mode_config.min_width = MIN_WIDTH;
+ mock->mode_config.max_width = MAX_WIDTH;
+ mock->mode_config.min_height = MIN_HEIGHT;
+ mock->mode_config.max_height = MAX_HEIGHT;
+ mock->mode_config.funcs = &mock_config_funcs;
-static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r)
+ test->priv = mock;
+ return 0;
+}
+
+static void drm_test_framebuffer_create(struct kunit *test)
{
+ const struct drm_framebuffer_test *params = test->param_value;
+ struct drm_device *mock = test->priv;
int buffer_created = 0;
- mock_drm_device.dev_private = &buffer_created;
- drm_internal_framebuffer_create(&mock_drm_device, r, NULL);
- return buffer_created;
+ mock->dev_private = &buffer_created;
+ drm_internal_framebuffer_create(mock, &params->cmd, NULL);
+ KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created);
}
-int igt_check_drm_framebuffer_create(void *ignored)
+static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc)
{
- int i = 0;
+ strcpy(desc, t->name);
+}
- for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) {
- FAIL(createbuffer_tests[i].buffer_created !=
- execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd),
- "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name);
- }
+KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases,
+ drm_framebuffer_test_to_desc);
- return 0;
-}
+static struct kunit_case drm_framebuffer_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params),
+ { }
+};
+
+static struct kunit_suite drm_framebuffer_test_suite = {
+ .name = "drm_framebuffer",
+ .init = drm_framebuffer_test_init,
+ .test_cases = drm_framebuffer_tests,
+};
+
+kunit_test_suite(drm_framebuffer_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/tests/drm_mm_test.c
index b768b53c4aee..659d1af4dca7 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for the drm_mm range manager
+ *
+ * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br>
*/
-#define pr_fmt(fmt) "drm_mm: " fmt
+#include <kunit/test.h>
-#include <linux/module.h>
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -16,9 +17,6 @@
#include "../lib/drm_random.h"
-#define TESTS "drm_mm_selftests.h"
-#include "drm_selftest.h"
-
static unsigned int random_seed;
static unsigned int max_iterations = 8192;
static unsigned int max_prime = 128;
@@ -45,13 +43,7 @@ static const struct insert_mode {
{}
};
-static int igt_sanitycheck(void *ignored)
-{
- pr_info("%s - ok!\n", __func__);
- return 0;
-}
-
-static bool assert_no_holes(const struct drm_mm *mm)
+static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
{
struct drm_mm_node *hole;
u64 hole_start, __always_unused hole_end;
@@ -61,13 +53,14 @@ static bool assert_no_holes(const struct drm_mm *mm)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
count++;
if (count) {
- pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
+ KUNIT_FAIL(test,
+ "Expected to find no holes (after reserve), found %lu instead\n", count);
return false;
}
drm_mm_for_each_node(hole, mm) {
if (drm_mm_hole_follows(hole)) {
- pr_err("Hole follows node, expected none!\n");
+ KUNIT_FAIL(test, "Hole follows node, expected none!\n");
return false;
}
}
@@ -75,7 +68,7 @@ static bool assert_no_holes(const struct drm_mm *mm)
return true;
}
-static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
+static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
{
struct drm_mm_node *hole;
u64 hole_start, hole_end;
@@ -89,62 +82,62 @@ static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (start != hole_start || end != hole_end) {
if (ok)
- pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
- hole_start, hole_end,
- start, end);
+ KUNIT_FAIL(test,
+ "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
+ hole_start, hole_end, start, end);
ok = false;
}
count++;
}
if (count != 1) {
- pr_err("Expected to find one hole, found %lu instead\n", count);
+ KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
ok = false;
}
return ok;
}
-static bool assert_continuous(const struct drm_mm *mm, u64 size)
+static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
{
struct drm_mm_node *node, *check, *found;
unsigned long n;
u64 addr;
- if (!assert_no_holes(mm))
+ if (!assert_no_holes(test, mm))
return false;
n = 0;
addr = 0;
drm_mm_for_each_node(node, mm) {
if (node->start != addr) {
- pr_err("node[%ld] list out of order, expected %llx found %llx\n",
- n, addr, node->start);
+ KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
+ n, addr, node->start);
return false;
}
if (node->size != size) {
- pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
- n, size, node->size);
+ KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
+ n, size, node->size);
return false;
}
if (drm_mm_hole_follows(node)) {
- pr_err("node[%ld] is followed by a hole!\n", n);
+ KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
return false;
}
found = NULL;
drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
if (node != check) {
- pr_err("lookup return wrong node, expected start %llx, found %llx\n",
- node->start, check->start);
+ KUNIT_FAIL(test,
+ "lookup return wrong node, expected start %llx, found %llx\n",
+ node->start, check->start);
return false;
}
found = check;
}
if (!found) {
- pr_err("lookup failed for node %llx + %llx\n",
- addr, size);
+ KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
return false;
}
@@ -166,107 +159,96 @@ static u64 misalignment(struct drm_mm_node *node, u64 alignment)
return rem;
}
-static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
+static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
u64 size, u64 alignment, unsigned long color)
{
bool ok = true;
if (!drm_mm_node_allocated(node) || node->mm != mm) {
- pr_err("node not allocated\n");
+ KUNIT_FAIL(test, "node not allocated\n");
ok = false;
}
if (node->size != size) {
- pr_err("node has wrong size, found %llu, expected %llu\n",
- node->size, size);
+ KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
+ node->size, size);
ok = false;
}
if (misalignment(node, alignment)) {
- pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
- node->start, misalignment(node, alignment), alignment);
+ KUNIT_FAIL(test,
+ "node is misaligned, start %llx rem %llu, expected alignment %llu\n",
+ node->start, misalignment(node, alignment), alignment);
ok = false;
}
if (node->color != color) {
- pr_err("node has wrong color, found %lu, expected %lu\n",
- node->color, color);
+ KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
+ node->color, color);
ok = false;
}
return ok;
}
-#define show_mm(mm) do { \
- struct drm_printer __p = drm_debug_printer(__func__); \
- drm_mm_print((mm), &__p); } while (0)
-
-static int igt_init(void *ignored)
+static void drm_test_mm_init(struct kunit *test)
{
const unsigned int size = 4096;
struct drm_mm mm;
struct drm_mm_node tmp;
- int ret = -EINVAL;
/* Start with some simple checks on initialising the struct drm_mm */
memset(&mm, 0, sizeof(mm));
- if (drm_mm_initialized(&mm)) {
- pr_err("zeroed mm claims to be initialized\n");
- return ret;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
+ "zeroed mm claims to be initialized\n");
memset(&mm, 0xff, sizeof(mm));
drm_mm_init(&mm, 0, size);
if (!drm_mm_initialized(&mm)) {
- pr_err("mm claims not to be initialized\n");
+ KUNIT_FAIL(test, "mm claims not to be initialized\n");
goto out;
}
if (!drm_mm_clean(&mm)) {
- pr_err("mm not empty on creation\n");
+ KUNIT_FAIL(test, "mm not empty on creation\n");
goto out;
}
/* After creation, it should all be one massive hole */
- if (!assert_one_hole(&mm, 0, size)) {
- ret = -EINVAL;
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
goto out;
}
memset(&tmp, 0, sizeof(tmp));
tmp.start = 0;
tmp.size = size;
- ret = drm_mm_reserve_node(&mm, &tmp);
- if (ret) {
- pr_err("failed to reserve whole drm_mm\n");
+ if (drm_mm_reserve_node(&mm, &tmp)) {
+ KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
goto out;
}
/* After filling the range entirely, there should be no holes */
- if (!assert_no_holes(&mm)) {
- ret = -EINVAL;
+ if (!assert_no_holes(test, &mm)) {
+ KUNIT_FAIL(test, "");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
- if (!assert_one_hole(&mm, 0, size)) {
- ret = -EINVAL;
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
goto out;
}
out:
- if (ret)
- show_mm(&mm);
drm_mm_takedown(&mm);
- return ret;
}
-static int igt_debug(void *ignored)
+static void drm_test_mm_debug(struct kunit *test)
{
struct drm_mm mm;
struct drm_mm_node nodes[2];
- int ret;
/* Create a small drm_mm with a couple of nodes and a few holes, and
* check that the debug iterator doesn't explode over a trivial drm_mm.
@@ -277,24 +259,15 @@ static int igt_debug(void *ignored)
memset(nodes, 0, sizeof(nodes));
nodes[0].start = 512;
nodes[0].size = 1024;
- ret = drm_mm_reserve_node(&mm, &nodes[0]);
- if (ret) {
- pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
- nodes[0].start, nodes[0].size);
- return ret;
- }
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
nodes[1].size = 1024;
nodes[1].start = 4096 - 512 - nodes[1].size;
- ret = drm_mm_reserve_node(&mm, &nodes[1]);
- if (ret) {
- pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
- nodes[1].start, nodes[1].size);
- return ret;
- }
-
- show_mm(&mm);
- return 0;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
}
static struct drm_mm_node *set_node(struct drm_mm_node *node,
@@ -305,7 +278,7 @@ static struct drm_mm_node *set_node(struct drm_mm_node *node,
return node;
}
-static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
+static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
{
int err;
@@ -314,17 +287,18 @@ static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
return true;
if (!err) {
- pr_err("impossible reserve succeeded, node %llu + %llu\n",
- node->start, node->size);
+ KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
+ node->start, node->size);
drm_mm_remove_node(node);
} else {
- pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
+ KUNIT_FAIL(test,
+ "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
err, -ENOSPC, node->start, node->size);
}
return false;
}
-static bool check_reserve_boundaries(struct drm_mm *mm,
+static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
unsigned int count,
u64 size)
{
@@ -339,29 +313,27 @@ static bool check_reserve_boundaries(struct drm_mm *mm,
B(size * count, 0),
B(-size, size),
B(-size, -size),
- B(-size, 2*size),
+ B(-size, 2 * size),
B(0, -size),
B(size, -size),
- B(count*size, size),
- B(count*size, -size),
- B(count*size, count*size),
- B(count*size, -count*size),
- B(count*size, -(count+1)*size),
- B((count+1)*size, size),
- B((count+1)*size, -size),
- B((count+1)*size, -2*size),
+ B(count * size, size),
+ B(count * size, -size),
+ B(count * size, count * size),
+ B(count * size, -count * size),
+ B(count * size, -(count + 1) * size),
+ B((count + 1) * size, size),
+ B((count + 1) * size, -size),
+ B((count + 1) * size, -2 * size),
#undef B
};
struct drm_mm_node tmp = {};
int n;
for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
- if (!expect_reserve_fail(mm,
- set_node(&tmp,
- boundaries[n].start,
- boundaries[n].size))) {
- pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
- n, boundaries[n].name, count, size);
+ if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
+ boundaries[n].size))) {
+ KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
+ n, boundaries[n].name, count, size);
return false;
}
}
@@ -369,7 +341,7 @@ static bool check_reserve_boundaries(struct drm_mm *mm,
return true;
}
-static int __igt_reserve(unsigned int count, u64 size)
+static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
{
DRM_RND_STATE(prng, random_seed);
struct drm_mm mm;
@@ -392,13 +364,12 @@ static int __igt_reserve(unsigned int count, u64 size)
goto err;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err_order;
+ KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
- if (!check_reserve_boundaries(&mm, count, size))
+ if (!check_reserve_boundaries(test, &mm, count, size))
goto out;
for (n = 0; n < count; n++) {
@@ -407,57 +378,53 @@ static int __igt_reserve(unsigned int count, u64 size)
err = drm_mm_reserve_node(&mm, &nodes[n]);
if (err) {
- pr_err("reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
ret = err;
goto out;
}
if (!drm_mm_node_allocated(&nodes[n])) {
- pr_err("reserved node not allocated! step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
+ n, nodes[n].start);
goto out;
}
- if (!expect_reserve_fail(&mm, &nodes[n]))
+ if (!expect_reserve_fail(test, &mm, &nodes[n]))
goto out;
}
/* After random insertion the nodes should be in order */
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
drm_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm,
- set_node(&tmp, order[n] * size, 1)))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
goto out;
/* Remove and reinsert should work */
drm_mm_remove_node(&nodes[order[n]]);
err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
if (err) {
- pr_err("reserve failed, step %d, start %llu\n",
- n, nodes[n].start);
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
ret = err;
goto out;
}
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Overlapping use should then fail */
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
goto out;
}
for (n = 0; n < count; n++) {
- if (!expect_reserve_fail(&mm,
- set_node(&tmp,
- size * n,
- size * (count - n))))
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
goto out;
}
@@ -472,8 +439,8 @@ static int __igt_reserve(unsigned int count, u64 size)
node = &nodes[order[(o + m) % count]];
err = drm_mm_reserve_node(&mm, node);
if (err) {
- pr_err("reserve failed, step %d/%d, start %llu\n",
- m, n, node->start);
+ KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
+ m, n, node->start);
ret = err;
goto out;
}
@@ -481,7 +448,7 @@ static int __igt_reserve(unsigned int count, u64 size)
o += n;
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
}
@@ -491,41 +458,30 @@ out:
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err_order:
kfree(order);
err:
return ret;
}
-static int igt_reserve(void *ignored)
+static void drm_test_mm_reserve(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
- int n, ret;
+ int n;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_reserve(count, size - 1);
- if (ret)
- return ret;
-
- ret = __igt_reserve(count, size);
- if (ret)
- return ret;
-
- ret = __igt_reserve(count, size + 1);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
cond_resched();
}
-
- return 0;
}
-static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
- u64 size, u64 alignment, unsigned long color,
- const struct insert_mode *mode)
+static bool expect_insert(struct kunit *test, struct drm_mm *mm,
+ struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
+ const struct insert_mode *mode)
{
int err;
@@ -533,12 +489,13 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
size, alignment, color,
mode->mode);
if (err) {
- pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
- size, alignment, color, mode->name, err);
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
+ size, alignment, color, mode->name, err);
return false;
}
- if (!assert_node(node, mm, size, alignment, color)) {
+ if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
@@ -546,7 +503,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
return true;
}
-static bool expect_insert_fail(struct drm_mm *mm, u64 size)
+static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
{
struct drm_mm_node tmp = {};
int err;
@@ -556,17 +513,18 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
return true;
if (!err) {
- pr_err("impossible insert succeeded, node %llu + %llu\n",
- tmp.start, tmp.size);
+ KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
+ tmp.start, tmp.size);
drm_mm_remove_node(&tmp);
} else {
- pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
- err, -ENOSPC, size);
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu\n",
+ err, -ENOSPC, size);
}
return false;
}
-static int __igt_insert(unsigned int count, u64 size, bool replace)
+static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
{
DRM_RND_STATE(prng, random_seed);
const struct insert_mode *mode;
@@ -582,8 +540,7 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
ret = -ENOMEM;
nodes = vmalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(count, &prng);
if (!order)
@@ -598,41 +555,43 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
node = replace ? &tmp : &nodes[n];
memset(node, 0, sizeof(*node));
- if (!expect_insert(&mm, node, size, 0, n, mode)) {
- pr_err("%s insert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
if (replace) {
drm_mm_replace_node(&tmp, &nodes[n]);
if (drm_mm_node_allocated(&tmp)) {
- pr_err("replaced old-node still allocated! step %d\n",
- n);
+ KUNIT_FAIL(test,
+ "replaced old-node still allocated! step %d\n",
+ n);
goto out;
}
- if (!assert_node(&nodes[n], &mm, size, 0, n)) {
- pr_err("replaced node did not inherit parameters, size %llu step %d\n",
- size, n);
+ if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
+ KUNIT_FAIL(test,
+ "replaced node did not inherit parameters, size %llu step %d\n",
+ size, n);
goto out;
}
if (tmp.start != nodes[n].start) {
- pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
- tmp.start, size,
- nodes[n].start, nodes[n].size);
+ KUNIT_FAIL(test,
+ "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
+ tmp.start, size, nodes[n].start, nodes[n].size);
goto out;
}
}
}
/* After random insertion the nodes should be in order */
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
/* Repeated use should then fail */
- if (!expect_insert_fail(&mm, size))
+ if (!expect_insert_fail(test, &mm, size))
goto out;
/* Remove one and reinsert, as the only hole it should refill itself */
@@ -640,19 +599,20 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
- if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
- pr_err("%s reinsert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
if (nodes[n].start != addr) {
- pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
}
@@ -665,19 +625,20 @@ static int __igt_insert(unsigned int count, u64 size, bool replace)
for (m = 0; m < n; m++) {
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node, size, 0, n, mode)) {
- pr_err("%s multiple reinsert failed, size %llu step %d\n",
- mode->name, size, n);
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s multiple reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
goto out;
}
}
o += n;
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
- if (!expect_insert_fail(&mm, size))
+ if (!expect_insert_fail(test, &mm, size))
goto out;
}
@@ -696,44 +657,31 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
return ret;
}
-static int igt_insert(void *ignored)
+static void drm_test_mm_insert(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
- int ret;
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_insert(count, size - 1, false);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size, false);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size + 1, false);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
cond_resched();
}
-
- return 0;
}
-static int igt_replace(void *ignored)
+static void drm_test_mm_replace(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
unsigned int n;
- int ret;
- /* Reuse igt_insert to exercise replacement by inserting a dummy node,
+ /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
* then replacing it with the intended node. We want to check that
* the tree is intact and all the information we need is carried
* across to the target node.
@@ -742,28 +690,17 @@ static int igt_replace(void *ignored)
for_each_prime_number_from(n, 1, 54) {
u64 size = BIT_ULL(n);
- ret = __igt_insert(count, size - 1, true);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size, true);
- if (ret)
- return ret;
-
- ret = __igt_insert(count, size + 1, true);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
cond_resched();
}
-
- return 0;
}
-static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
u64 size, u64 alignment, unsigned long color,
- u64 range_start, u64 range_end,
- const struct insert_mode *mode)
+ u64 range_start, u64 range_end, const struct insert_mode *mode)
{
int err;
@@ -772,13 +709,14 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
range_start, range_end,
mode->mode);
if (err) {
- pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
- size, alignment, color, mode->name,
- range_start, range_end, err);
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
+ size, alignment, color, mode->name,
+ range_start, range_end, err);
return false;
}
- if (!assert_node(node, mm, size, alignment, color)) {
+ if (!assert_node(test, node, mm, size, alignment, color)) {
drm_mm_remove_node(node);
return false;
}
@@ -786,67 +724,63 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
return true;
}
-static bool expect_insert_in_range_fail(struct drm_mm *mm,
- u64 size,
- u64 range_start,
- u64 range_end)
+static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 range_start, u64 range_end)
{
struct drm_mm_node tmp = {};
int err;
- err = drm_mm_insert_node_in_range(mm, &tmp,
- size, 0, 0,
- range_start, range_end,
+ err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
0);
if (likely(err == -ENOSPC))
return true;
if (!err) {
- pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
+ tmp.start, tmp.size, range_start, range_end);
drm_mm_remove_node(&tmp);
} else {
- pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
- err, -ENOSPC, size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
+ err, -ENOSPC, size, range_start, range_end);
}
return false;
}
-static bool assert_contiguous_in_range(struct drm_mm *mm,
- u64 size,
- u64 start,
- u64 end)
+static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 start, u64 end)
{
struct drm_mm_node *node;
unsigned int n;
- if (!expect_insert_in_range_fail(mm, size, start, end))
+ if (!expect_insert_in_range_fail(test, mm, size, start, end))
return false;
n = div64_u64(start + size - 1, size);
drm_mm_for_each_node(node, mm) {
if (node->start < start || node->start + node->size > end) {
- pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
- n, node->start, node->start + node->size, start, end);
+ KUNIT_FAIL(test,
+ "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
+ n, node->start, node->start + node->size, start, end);
return false;
}
if (node->start != n * size) {
- pr_err("node %d out of order, expected start %llx, found %llx\n",
- n, n * size, node->start);
+ KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
return false;
}
if (node->size != size) {
- pr_err("node %d has wrong size, expected size %llx, found %llx\n",
- n, size, node->size);
+ KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
+ n, size, node->size);
return false;
}
- if (drm_mm_hole_follows(node) &&
- drm_mm_hole_node_end(node) < end) {
- pr_err("node %d is followed by a hole!\n", n);
+ if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
+ KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
return false;
}
@@ -856,8 +790,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (start > 0) {
node = __drm_mm_interval_first(mm, 0, start - 1);
if (drm_mm_node_allocated(node)) {
- pr_err("node before start: node=%llx+%llu, start=%llx\n",
- node->start, node->size, start);
+ KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
+ node->start, node->size, start);
return false;
}
}
@@ -865,8 +799,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
if (end < U64_MAX) {
node = __drm_mm_interval_first(mm, end, U64_MAX);
if (drm_mm_node_allocated(node)) {
- pr_err("node after end: node=%llx+%llu, end=%llx\n",
- node->start, node->size, end);
+ KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
+ node->start, node->size, end);
return false;
}
}
@@ -874,7 +808,8 @@ static bool assert_contiguous_in_range(struct drm_mm *mm,
return true;
}
-static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
+static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
+ u64 start, u64 end)
{
const struct insert_mode *mode;
struct drm_mm mm;
@@ -886,14 +821,13 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
DRM_MM_BUG_ON(!size);
DRM_MM_BUG_ON(end <= start);
- /* Very similar to __igt_insert(), but now instead of populating the
+ /* Very similar to __drm_test_mm_insert(), but now instead of populating the
* full range of the drm_mm, we try to fill a small portion of it.
*/
ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
ret = -EINVAL;
drm_mm_init(&mm, 0, count * size);
@@ -903,20 +837,19 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
for (mode = insert_modes; mode->name; mode++) {
for (n = start_n; n <= end_n; n++) {
- if (!expect_insert_in_range(&mm, &nodes[n],
- size, size, n,
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
- pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
- mode->name, size, n,
- start_n, end_n,
- start, end);
+ KUNIT_FAIL(test,
+ "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
+ mode->name, size, n, start_n, end_n, start, end);
goto out;
}
}
- if (!assert_contiguous_in_range(&mm, size, start, end)) {
- pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
- mode->name, start, end, size);
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
+ mode->name, start, end, size);
goto out;
}
@@ -925,23 +858,24 @@ static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
u64 addr = nodes[n].start;
drm_mm_remove_node(&nodes[n]);
- if (!expect_insert_in_range(&mm, &nodes[n],
- size, size, n,
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
start, end, mode)) {
- pr_err("%s reinsert failed, step %d\n", mode->name, n);
+ KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
goto out;
}
if (nodes[n].start != addr) {
- pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
- mode->name, n, addr, nodes[n].start);
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
goto out;
}
}
- if (!assert_contiguous_in_range(&mm, size, start, end)) {
- pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
- mode->name, start, end, size);
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
+ mode->name, start, end, size);
goto out;
}
@@ -958,11 +892,10 @@ out:
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err:
return ret;
}
-static int insert_outside_range(void)
+static int insert_outside_range(struct kunit *test)
{
struct drm_mm mm;
const unsigned int start = 1024;
@@ -971,81 +904,58 @@ static int insert_outside_range(void)
drm_mm_init(&mm, start, size);
- if (!expect_insert_in_range_fail(&mm, 1, 0, start))
+ if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, size,
- start - size/2, start + (size+1)/2))
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ start - size / 2, start + (size + 1) / 2))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, size,
- end - (size+1)/2, end + size/2))
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ end - (size + 1) / 2, end + size / 2))
return -EINVAL;
- if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
+ if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
return -EINVAL;
drm_mm_takedown(&mm);
return 0;
}
-static int igt_insert_range(void *ignored)
+static void drm_test_mm_insert_range(struct kunit *test)
{
const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
unsigned int n;
- int ret;
/* Check that requests outside the bounds of drm_mm are rejected. */
- ret = insert_outside_range();
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
for_each_prime_number_from(n, 1, 50) {
const u64 size = BIT_ULL(n);
const u64 max = count * size;
- ret = __igt_insert_range(count, size, 0, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 1, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 0, max - 1);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, 0, max/2);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, max/2, max);
- if (ret)
- return ret;
-
- ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
- if (ret)
- return ret;
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 2, max / 2));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 4 + 1, 3 * max / 4 - 1));
cond_resched();
}
-
- return 0;
}
-static int prepare_igt_frag(struct drm_mm *mm,
- struct drm_mm_node *nodes,
- unsigned int num_insert,
- const struct insert_mode *mode)
+static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
+ unsigned int num_insert, const struct insert_mode *mode)
{
unsigned int size = 4096;
unsigned int i;
for (i = 0; i < num_insert; i++) {
- if (!expect_insert(mm, &nodes[i], size, 0, i,
- mode) != 0) {
- pr_err("%s insert failed\n", mode->name);
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return -EINVAL;
}
}
@@ -1057,12 +967,10 @@ static int prepare_igt_frag(struct drm_mm *mm,
}
return 0;
-
}
-static u64 get_insert_time(struct drm_mm *mm,
- unsigned int num_insert,
- struct drm_mm_node *nodes,
+static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
+ unsigned int num_insert, struct drm_mm_node *nodes,
const struct insert_mode *mode)
{
unsigned int size = 8192;
@@ -1071,8 +979,8 @@ static u64 get_insert_time(struct drm_mm *mm,
start = ktime_get();
for (i = 0; i < num_insert; i++) {
- if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
- pr_err("%s insert failed\n", mode->name);
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
return 0;
}
}
@@ -1080,28 +988,26 @@ static u64 get_insert_time(struct drm_mm *mm,
return ktime_to_ns(ktime_sub(ktime_get(), start));
}
-static int igt_frag(void *ignored)
+static void drm_test_mm_frag(struct kunit *test)
{
struct drm_mm mm;
const struct insert_mode *mode;
struct drm_mm_node *nodes, *node, *next;
unsigned int insert_size = 10000;
unsigned int scale_factor = 4;
- int ret = -EINVAL;
/* We need 4 * insert_size nodes to hold intermediate allocated
* drm_mm nodes.
- * 1 times for prepare_igt_frag()
+ * 1 times for prepare_frag()
* 1 times for get_insert_time()
* 2 times for get_insert_time()
*/
nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
- if (!nodes)
- return -ENOMEM;
+ KUNIT_ASSERT_TRUE(test, nodes);
/* For BOTTOMUP and TOPDOWN, we first fragment the
- * address space using prepare_igt_frag() and then try to verify
- * that that insertions scale quadratically from 10k to 20k insertions
+ * address space using prepare_frag() and then try to verify
+ * that insertions scale quadratically from 10k to 20k insertions
*/
drm_mm_init(&mm, 1, U64_MAX - 2);
for (mode = insert_modes; mode->name; mode++) {
@@ -1111,28 +1017,25 @@ static int igt_frag(void *ignored)
mode->mode != DRM_MM_INSERT_HIGH)
continue;
- ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
- if (ret)
+ if (prepare_frag(test, &mm, nodes, insert_size, mode))
goto err;
- insert_time1 = get_insert_time(&mm, insert_size,
+ insert_time1 = get_insert_time(test, &mm, insert_size,
nodes + insert_size, mode);
if (insert_time1 == 0)
goto err;
- insert_time2 = get_insert_time(&mm, (insert_size * 2),
+ insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
nodes + insert_size * 2, mode);
if (insert_time2 == 0)
goto err;
- pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
- mode->name, insert_size, insert_size * 2,
- insert_time1, insert_time2);
+ kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
+ mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
if (insert_time2 > (scale_factor * insert_time1)) {
- pr_err("%s fragmented insert took %llu nsecs more\n",
- mode->name,
- insert_time2 - (scale_factor * insert_time1));
+ KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
+ mode->name, insert_time2 - (scale_factor * insert_time1));
goto err;
}
@@ -1140,24 +1043,20 @@ static int igt_frag(void *ignored)
drm_mm_remove_node(node);
}
- ret = 0;
err:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-
- return ret;
}
-static int igt_align(void *ignored)
+static void drm_test_mm_align(struct kunit *test)
{
const struct insert_mode *mode;
const unsigned int max_count = min(8192u, max_prime);
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int prime;
- int ret = -EINVAL;
/* For each of the possible insertion modes, we pick a few
* arbitrary alignments and check that the inserted node
@@ -1165,8 +1064,7 @@ static int igt_align(void *ignored)
*/
nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
drm_mm_init(&mm, 1, U64_MAX - 2);
@@ -1176,11 +1074,9 @@ static int igt_align(void *ignored)
for_each_prime_number_from(prime, 1, max_count) {
u64 size = next_prime_number(prime);
- if (!expect_insert(&mm, &nodes[i],
- size, prime, i,
- mode)) {
- pr_err("%s insert failed with alignment=%d",
- mode->name, prime);
+ if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
+ KUNIT_FAIL(test, "%s insert failed with alignment=%d",
+ mode->name, prime);
goto out;
}
@@ -1194,22 +1090,18 @@ static int igt_align(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
vfree(nodes);
-err:
- return ret;
}
-static int igt_align_pot(int max)
+static void drm_test_mm_align_pot(struct kunit *test, int max)
{
struct drm_mm mm;
struct drm_mm_node *node, *next;
int bit;
- int ret = -EINVAL;
/* Check that we can align to the full u64 address space */
@@ -1220,51 +1112,45 @@ static int igt_align_pot(int max)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
- ret = -ENOMEM;
+ KUNIT_FAIL(test, "failed to allocate node");
goto out;
}
align = BIT_ULL(bit);
- size = BIT_ULL(bit-1) + 1;
- if (!expect_insert(&mm, node,
- size, align, bit,
- &insert_modes[0])) {
- pr_err("insert failed with alignment=%llx [%d]",
- align, bit);
+ size = BIT_ULL(bit - 1) + 1;
+ if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
goto out;
}
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
- return ret;
}
-static int igt_align32(void *ignored)
+static void drm_test_mm_align32(struct kunit *test)
{
- return igt_align_pot(32);
+ drm_test_mm_align_pot(test, 32);
}
-static int igt_align64(void *ignored)
+static void drm_test_mm_align64(struct kunit *test)
{
- return igt_align_pot(64);
+ drm_test_mm_align_pot(test, 64);
}
-static void show_scan(const struct drm_mm_scan *scan)
+static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
{
- pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
- scan->hit_start, scan->hit_end,
- scan->size, scan->alignment, scan->color);
+ kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
+ scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
}
-static void show_holes(const struct drm_mm *mm, int count)
+static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
{
u64 hole_start, hole_end;
struct drm_mm_node *hole;
@@ -1274,19 +1160,15 @@ static void show_holes(const struct drm_mm *mm, int count)
const char *node1 = NULL, *node2 = NULL;
if (drm_mm_node_allocated(hole))
- node1 = kasprintf(GFP_KERNEL,
- "[%llx + %lld, color=%ld], ",
+ node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
hole->start, hole->size, hole->color);
if (drm_mm_node_allocated(next))
- node2 = kasprintf(GFP_KERNEL,
- ", [%llx + %lld, color=%ld]",
+ node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
next->start, next->size, next->color);
- pr_info("%sHole [%llx - %llx, size %lld]%s\n",
- node1,
- hole_start, hole_end, hole_end - hole_start,
- node2);
+ kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
+ hole_start, hole_end, hole_end - hole_start, node2);
kfree(node2);
kfree(node1);
@@ -1301,12 +1183,9 @@ struct evict_node {
struct list_head link;
};
-static bool evict_nodes(struct drm_mm_scan *scan,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- bool use_color,
- struct list_head *evict_list)
+static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
+ struct evict_node *nodes, unsigned int *order, unsigned int count,
+ bool use_color, struct list_head *evict_list)
{
struct evict_node *e, *en;
unsigned int i;
@@ -1322,8 +1201,9 @@ static bool evict_nodes(struct drm_mm_scan *scan,
list_del(&e->link);
}
if (list_empty(evict_list)) {
- pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
- scan->size, count, scan->alignment, scan->color);
+ KUNIT_FAIL(test,
+ "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
+ scan->size, count, scan->alignment, scan->color);
return false;
}
@@ -1340,7 +1220,8 @@ static bool evict_nodes(struct drm_mm_scan *scan,
}
} else {
if (drm_mm_scan_color_evict(scan)) {
- pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
+ KUNIT_FAIL(test,
+ "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
return false;
}
}
@@ -1348,9 +1229,8 @@ static bool evict_nodes(struct drm_mm_scan *scan,
return true;
}
-static bool evict_nothing(struct drm_mm *mm,
- unsigned int total_size,
- struct evict_node *nodes)
+static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1371,7 +1251,7 @@ static bool evict_nothing(struct drm_mm *mm,
e = &nodes[n];
if (!drm_mm_node_allocated(&e->node)) {
- pr_err("node[%d] no longer allocated!\n", n);
+ KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
return false;
}
@@ -1387,17 +1267,16 @@ static bool evict_nothing(struct drm_mm *mm,
e = &nodes[n];
if (!e->link.next) {
- pr_err("node[%d] no longer connected!\n", n);
+ KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
return false;
}
}
- return assert_continuous(mm, nodes[0].node.size);
+ return assert_continuous(test, mm, nodes[0].node.size);
}
-static bool evict_everything(struct drm_mm *mm,
- unsigned int total_size,
- struct evict_node *nodes)
+static bool evict_everything(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1417,8 +1296,8 @@ static bool evict_everything(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
if (!drm_mm_scan_remove_block(&scan, &e->node)) {
if (!err) {
- pr_err("Node %lld not marked for eviction!\n",
- e->node.start);
+ KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
+ e->node.start);
err = -EINVAL;
}
}
@@ -1429,29 +1308,25 @@ static bool evict_everything(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link)
drm_mm_remove_node(&e->node);
- if (!assert_one_hole(mm, 0, total_size))
+ if (!assert_one_hole(test, mm, 0, total_size))
return false;
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return false;
}
}
- return assert_continuous(mm, nodes[0].node.size);
+ return assert_continuous(test, mm, nodes[0].node.size);
}
-static int evict_something(struct drm_mm *mm,
- u64 range_start, u64 range_end,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- unsigned int size,
- unsigned int alignment,
- const struct insert_mode *mode)
+static int evict_something(struct kunit *test, struct drm_mm *mm,
+ u64 range_start, u64 range_end, struct evict_node *nodes,
+ unsigned int *order, unsigned int count, unsigned int size,
+ unsigned int alignment, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -1459,38 +1334,35 @@ static int evict_something(struct drm_mm *mm,
struct drm_mm_node tmp;
int err;
- drm_mm_scan_init_with_range(&scan, mm,
- size, alignment, 0,
- range_start, range_end,
- mode->mode);
- if (!evict_nodes(&scan,
- nodes, order, count, false,
- &evict_list))
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
DRM_MM_INSERT_EVICT);
if (err) {
- pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
- size, alignment);
- show_scan(&scan);
- show_holes(mm, 3);
+ KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
+ size, alignment);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
- if (!assert_node(&tmp, mm, size, alignment, 0) ||
+ if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
drm_mm_hole_follows(&tmp)) {
- pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
- tmp.size, size,
- alignment, misalignment(&tmp, alignment),
- tmp.start, drm_mm_hole_follows(&tmp));
+ KUNIT_FAIL(test,
+ "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment),
+ tmp.start, drm_mm_hole_follows(&tmp));
err = -EINVAL;
}
@@ -1501,21 +1373,21 @@ static int evict_something(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return err;
}
}
- if (!assert_continuous(mm, nodes[0].node.size)) {
- pr_err("range is no longer continuous\n");
+ if (!assert_continuous(test, mm, nodes[0].node.size)) {
+ KUNIT_FAIL(test, "range is no longer continuous\n");
return -EINVAL;
}
return 0;
}
-static int igt_evict(void *ignored)
+static void drm_test_mm_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
@@ -1524,7 +1396,6 @@ static int igt_evict(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
/* Here we populate a full drm_mm and then try and insert a new node
* by evicting other nodes in a random order. The drm_mm_scan should
@@ -1533,61 +1404,49 @@ static int igt_evict(void *ignored)
* sizes to try and stress the hole finder.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
- err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
- if (err) {
- pr_err("insert failed, step %d\n", n);
- ret = err;
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
/* First check that using the scanner doesn't break the mm */
- if (!evict_nothing(&mm, size, nodes)) {
- pr_err("evict_nothing() failed\n");
+ if (!evict_nothing(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_nothing() failed\n");
goto out;
}
- if (!evict_everything(&mm, size, nodes)) {
- pr_err("evict_everything() failed\n");
+ if (!evict_everything(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_everything() failed\n");
goto out;
}
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- n, 1,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u) failed\n",
- mode->name, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
+ mode)) {
+ KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
+ mode->name, n);
goto out;
}
}
for (n = 1; n < size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- size/2, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, size/2, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, size / 2, n);
goto out;
}
}
@@ -1598,14 +1457,11 @@ static int igt_evict(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, 0, U64_MAX,
- nodes, order, size,
- nsize, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
- ret = err;
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
goto out;
}
}
@@ -1613,7 +1469,6 @@ static int igt_evict(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1621,11 +1476,9 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_evict_range(void *ignored)
+static void drm_test_mm_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int size = 8192;
@@ -1637,28 +1490,22 @@ static int igt_evict_range(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
- /* Like igt_evict() but now we are limiting the search to a
+ /* Like drm_test_mm_evict() but now we are limiting the search to a
* small portion of the full drm_mm.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
drm_mm_init(&mm, 0, size);
for (n = 0; n < size; n++) {
- err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
- if (err) {
- pr_err("insert failed, step %d\n", n);
- ret = err;
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -1666,26 +1513,22 @@ static int igt_evict_range(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- n, 1,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
- mode->name, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, n, 1, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u) failed with range [%u, %u]\n",
+ mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- range_size/2, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, range_size/2, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, range_size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, range_size / 2, n, range_start, range_end);
goto out;
}
}
@@ -1696,13 +1539,11 @@ static int igt_evict_range(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, size, &prng);
- err = evict_something(&mm, range_start, range_end,
- nodes, order, size,
- nsize, n,
- mode);
- if (err) {
- pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
- mode->name, nsize, n, range_start, range_end);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, nsize, n, range_start, range_end);
goto out;
}
}
@@ -1710,7 +1551,6 @@ static int igt_evict_range(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1718,8 +1558,6 @@ out:
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
static unsigned int node_index(const struct drm_mm_node *node)
@@ -1727,9 +1565,10 @@ static unsigned int node_index(const struct drm_mm_node *node)
return div64_u64(node->start, node->size);
}
-static int igt_topdown(void *ignored)
+static void drm_test_mm_topdown(struct kunit *test)
{
const struct insert_mode *topdown = &insert_modes[TOPDOWN];
+
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
@@ -1737,17 +1576,14 @@ static int igt_topdown(void *ignored)
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
- int ret;
/* When allocating top-down, we expect to be returned a node
* from a suitable hole at the top of the drm_mm. We check that
* the returned node does match the highest available slot.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
@@ -1757,28 +1593,26 @@ static int igt_topdown(void *ignored)
if (!order)
goto err_bitmap;
- ret = -EINVAL;
for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size*count);
+ drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
- if (!expect_insert(&mm, &nodes[n],
- size, 0, n,
- topdown)) {
- pr_err("insert failed, size %u step %d\n", size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
+ KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
goto out;
}
if (drm_mm_hole_follows(&nodes[n])) {
- pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
- n, nodes[n].start, size);
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d, start=%llx\n, size=%u",
+ n, nodes[n].start, size);
goto out;
}
- if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
+ if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
@@ -1793,23 +1627,23 @@ static int igt_topdown(void *ignored)
unsigned int last;
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node,
- size, 0, 0,
- topdown)) {
- pr_err("insert failed, step %d/%d\n", m, n);
+ if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
if (drm_mm_hole_follows(node)) {
- pr_err("hole after topdown insert %d/%d, start=%llx\n",
- m, n, node->start);
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d/%d, start=%llx\n",
+ m, n, node->start);
goto out;
}
last = find_last_bit(bitmap, count);
if (node_index(node) != last) {
- pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
- m, n, size, last, node_index(node));
+ KUNIT_FAIL(test,
+ "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
+ m, n, size, last, node_index(node));
goto out;
}
@@ -1827,7 +1661,6 @@ static int igt_topdown(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1837,13 +1670,12 @@ err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_bottomup(void *ignored)
+static void drm_test_mm_bottomup(struct kunit *test)
{
const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
+
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
@@ -1851,16 +1683,13 @@ static int igt_bottomup(void *ignored)
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
- int ret;
- /* Like igt_topdown, but instead of searching for the last hole,
+ /* Like drm_test_mm_topdown, but instead of searching for the last hole,
* we search for the first.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(count, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
@@ -1870,22 +1699,20 @@ static int igt_bottomup(void *ignored)
if (!order)
goto err_bitmap;
- ret = -EINVAL;
for (size = 1; size <= 64; size <<= 1) {
- drm_mm_init(&mm, 0, size*count);
+ drm_mm_init(&mm, 0, size * count);
for (n = 0; n < count; n++) {
- if (!expect_insert(&mm, &nodes[n],
- size, 0, n,
- bottomup)) {
- pr_err("bottomup insert failed, size %u step %d\n", size, n);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
+ KUNIT_FAIL(test,
+ "bottomup insert failed, size %u step %d\n", size, n);
goto out;
}
- if (!assert_one_hole(&mm, size*(n + 1), size*count))
+ if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
goto out;
}
- if (!assert_continuous(&mm, size))
+ if (!assert_continuous(test, &mm, size))
goto out;
drm_random_reorder(order, count, &prng);
@@ -1900,17 +1727,16 @@ static int igt_bottomup(void *ignored)
unsigned int first;
node = &nodes[order[(o + m) % count]];
- if (!expect_insert(&mm, node,
- size, 0, 0,
- bottomup)) {
- pr_err("insert failed, step %d/%d\n", m, n);
+ if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
goto out;
}
first = find_first_bit(bitmap, count);
if (node_index(node) != first) {
- pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
- m, n, first, node_index(node));
+ KUNIT_FAIL(test,
+ "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
+ m, n, first, node_index(node));
goto out;
}
__clear_bit(first, bitmap);
@@ -1927,7 +1753,6 @@ static int igt_bottomup(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
@@ -1937,47 +1762,39 @@ err_bitmap:
bitmap_free(bitmap);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int __igt_once(unsigned int mode)
+static void drm_test_mm_once(struct kunit *test, unsigned int mode)
{
struct drm_mm mm;
struct drm_mm_node rsvd_lo, rsvd_hi, node;
- int err;
drm_mm_init(&mm, 0, 7);
memset(&rsvd_lo, 0, sizeof(rsvd_lo));
rsvd_lo.start = 1;
rsvd_lo.size = 1;
- err = drm_mm_reserve_node(&mm, &rsvd_lo);
- if (err) {
- pr_err("Could not reserve low node\n");
+ if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
goto err;
}
memset(&rsvd_hi, 0, sizeof(rsvd_hi));
rsvd_hi.start = 5;
rsvd_hi.size = 1;
- err = drm_mm_reserve_node(&mm, &rsvd_hi);
- if (err) {
- pr_err("Could not reserve low node\n");
+ if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
goto err_lo;
}
if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
- pr_err("Expected a hole after lo and high nodes!\n");
- err = -EINVAL;
+ KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
goto err_hi;
}
memset(&node, 0, sizeof(node));
- err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
- if (err) {
- pr_err("Could not insert the node into the available hole!\n");
- err = -EINVAL;
+ if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
+ KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
goto err_hi;
}
@@ -1988,23 +1805,20 @@ err_lo:
drm_mm_remove_node(&rsvd_lo);
err:
drm_mm_takedown(&mm);
- return err;
}
-static int igt_lowest(void *ignored)
+static void drm_test_mm_lowest(struct kunit *test)
{
- return __igt_once(DRM_MM_INSERT_LOW);
+ drm_test_mm_once(test, DRM_MM_INSERT_LOW);
}
-static int igt_highest(void *ignored)
+static void drm_test_mm_highest(struct kunit *test)
{
- return __igt_once(DRM_MM_INSERT_HIGH);
+ drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
}
static void separate_adjacent_colors(const struct drm_mm_node *node,
- unsigned long color,
- u64 *start,
- u64 *end)
+ unsigned long color, u64 *start, u64 *end)
{
if (drm_mm_node_allocated(node) && node->color != color)
++*start;
@@ -2014,12 +1828,12 @@ static void separate_adjacent_colors(const struct drm_mm_node *node,
--*end;
}
-static bool colors_abutt(const struct drm_mm_node *node)
+static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
{
if (!drm_mm_hole_follows(node) &&
drm_mm_node_allocated(list_next_entry(node, node_list))) {
- pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
- node->color, node->start, node->size,
+ KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
+ node->color, node->start, node->size,
list_next_entry(node, node_list)->color,
list_next_entry(node, node_list)->start,
list_next_entry(node, node_list)->size);
@@ -2029,14 +1843,13 @@ static bool colors_abutt(const struct drm_mm_node *node)
return false;
}
-static int igt_color(void *ignored)
+static void drm_test_mm_color(struct kunit *test)
{
const unsigned int count = min(4096u, max_iterations);
const struct insert_mode *mode;
struct drm_mm mm;
struct drm_mm_node *node, *nn;
unsigned int n;
- int ret = -EINVAL, err;
/* Color adjustment complicates everything. First we just check
* that when we insert a node we apply any color_adjustment callback.
@@ -2049,15 +1862,11 @@ static int igt_color(void *ignored)
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- if (!expect_insert(&mm, node,
- n, 0, n,
- &insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
kfree(node);
goto out;
}
@@ -2065,8 +1874,8 @@ static int igt_color(void *ignored)
drm_mm_for_each_node_safe(node, nn, &mm) {
if (node->color != node->size) {
- pr_err("invalid color stored: expected %lld, found %ld\n",
- node->size, node->color);
+ KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
+ node->size, node->color);
goto out;
}
@@ -2081,18 +1890,14 @@ static int igt_color(void *ignored)
u64 last;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- node->size = 1 + 2*count;
+ node->size = 1 + 2 * count;
node->color = node->size;
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- pr_err("initial reserve failed!\n");
- ret = err;
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "initial reserve failed!\n");
goto out;
}
@@ -2102,19 +1907,15 @@ static int igt_color(void *ignored)
int rem;
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
node->start = last;
node->size = n + count;
node->color = node->size;
- err = drm_mm_reserve_node(&mm, node);
- if (err != -ENOSPC) {
- pr_err("reserve %d did not report color overlap! err=%d\n",
- n, err);
+ if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
+ KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
goto out;
}
@@ -2122,10 +1923,8 @@ static int igt_color(void *ignored)
rem = misalignment(node, n + count);
node->start += n + count - rem;
- err = drm_mm_reserve_node(&mm, node);
- if (err) {
- pr_err("reserve %d failed, err=%d\n", n, err);
- ret = err;
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "reserve %d failed", n);
goto out;
}
@@ -2134,16 +1933,11 @@ static int igt_color(void *ignored)
for (n = 1; n <= count; n++) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- ret = -ENOMEM;
+ if (!node)
goto out;
- }
- if (!expect_insert(&mm, node,
- n, n, n,
- mode)) {
- pr_err("%s insert failed, step %d\n",
- mode->name, n);
+ if (!expect_insert(test, &mm, node, n, n, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
kfree(node);
goto out;
}
@@ -2153,19 +1947,21 @@ static int igt_color(void *ignored)
u64 rem;
if (node->color != node->size) {
- pr_err("%s invalid color stored: expected %lld, found %ld\n",
- mode->name, node->size, node->color);
+ KUNIT_FAIL(test,
+ "%s invalid color stored: expected %lld, found %ld\n",
+ mode->name, node->size, node->color);
goto out;
}
- if (colors_abutt(node))
+ if (colors_abutt(test, node))
goto out;
div64_u64_rem(node->start, node->size, &rem);
if (rem) {
- pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
- mode->name, node->start, node->size, rem);
+ KUNIT_FAIL(test,
+ "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
+ mode->name, node->start, node->size, rem);
goto out;
}
@@ -2176,25 +1972,18 @@ static int igt_color(void *ignored)
cond_resched();
}
- ret = 0;
out:
drm_mm_for_each_node_safe(node, nn, &mm) {
drm_mm_remove_node(node);
kfree(node);
}
drm_mm_takedown(&mm);
- return ret;
}
-static int evict_color(struct drm_mm *mm,
- u64 range_start, u64 range_end,
- struct evict_node *nodes,
- unsigned int *order,
- unsigned int count,
- unsigned int size,
- unsigned int alignment,
- unsigned long color,
- const struct insert_mode *mode)
+static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
+ u64 range_end, struct evict_node *nodes, unsigned int *order,
+ unsigned int count, unsigned int size, unsigned int alignment,
+ unsigned long color, const struct insert_mode *mode)
{
struct drm_mm_scan scan;
LIST_HEAD(evict_list);
@@ -2202,39 +1991,37 @@ static int evict_color(struct drm_mm *mm,
struct drm_mm_node tmp;
int err;
- drm_mm_scan_init_with_range(&scan, mm,
- size, alignment, color,
- range_start, range_end,
- mode->mode);
- if (!evict_nodes(&scan,
- nodes, order, count, true,
- &evict_list))
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
return -EINVAL;
memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
DRM_MM_INSERT_EVICT);
if (err) {
- pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
- size, alignment, color, err);
- show_scan(&scan);
- show_holes(mm, 3);
+ KUNIT_FAIL(test,
+ "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
+ size, alignment, color, err);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
return err;
}
if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
- pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
- tmp.start, tmp.size, range_start, range_end);
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
err = -EINVAL;
}
- if (colors_abutt(&tmp))
+ if (colors_abutt(test, &tmp))
err = -EINVAL;
- if (!assert_node(&tmp, mm, size, alignment, color)) {
- pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
- tmp.size, size,
- alignment, misalignment(&tmp, alignment), tmp.start);
+ if (!assert_node(test, &tmp, mm, size, alignment, color)) {
+ KUNIT_FAIL(test,
+ "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
err = -EINVAL;
}
@@ -2245,8 +2032,8 @@ static int evict_color(struct drm_mm *mm,
list_for_each_entry(e, &evict_list, link) {
err = drm_mm_reserve_node(mm, &e->node);
if (err) {
- pr_err("Failed to reinsert node after eviction: start=%llx\n",
- e->node.start);
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
return err;
}
}
@@ -2255,7 +2042,7 @@ static int evict_color(struct drm_mm *mm,
return 0;
}
-static int igt_color_evict(void *ignored)
+static void drm_test_mm_color_evict(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = min(8192u, max_iterations);
@@ -2265,7 +2052,6 @@ static int igt_color_evict(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
/* Check that the drm_mm_scan also honours color adjustment when
* choosing its victims to create a hole. Our color_adjust does not
@@ -2273,23 +2059,20 @@ static int igt_color_evict(void *ignored)
* enlarging the set of victims that must be evicted.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
- drm_mm_init(&mm, 0, 2*total_size - 1);
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
- if (!expect_insert(&mm, &nodes[n].node,
+ if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -2297,26 +2080,19 @@ static int igt_color_evict(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- n, 1, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u) failed\n",
- mode->name, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ n, 1, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
goto out;
}
}
for (n = 1; n < total_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- total_size/2, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, total_size/2, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ total_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, total_size / 2, n);
goto out;
}
}
@@ -2327,13 +2103,10 @@ static int igt_color_evict(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, 0, U64_MAX,
- nodes, order, total_size,
- nsize, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
- mode->name, nsize, n);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ nsize, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
goto out;
}
}
@@ -2341,21 +2114,16 @@ static int igt_color_evict(void *ignored)
cond_resched();
}
- ret = 0;
out:
- if (ret)
- show_mm(&mm);
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-static int igt_color_evict_range(void *ignored)
+static void drm_test_mm_color_evict_range(struct kunit *test)
{
DRM_RND_STATE(prng, random_seed);
const unsigned int total_size = 8192;
@@ -2368,29 +2136,25 @@ static int igt_color_evict_range(void *ignored)
struct evict_node *nodes;
struct drm_mm_node *node, *next;
unsigned int *order, n;
- int ret, err;
- /* Like igt_color_evict(), but limited to small portion of the full
+ /* Like drm_test_mm_color_evict(), but limited to small portion of the full
* drm_mm range.
*/
- ret = -ENOMEM;
nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
- if (!nodes)
- goto err;
+ KUNIT_ASSERT_TRUE(test, nodes);
order = drm_random_order(total_size, &prng);
if (!order)
goto err_nodes;
- ret = -EINVAL;
- drm_mm_init(&mm, 0, 2*total_size - 1);
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
mm.color_adjust = separate_adjacent_colors;
for (n = 0; n < total_size; n++) {
- if (!expect_insert(&mm, &nodes[n].node,
+ if (!expect_insert(test, &mm, &nodes[n].node,
1, 0, color++,
&insert_modes[0])) {
- pr_err("insert failed, step %d\n", n);
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
goto out;
}
}
@@ -2398,26 +2162,22 @@ static int igt_color_evict_range(void *ignored)
for (mode = evict_modes; mode->name; mode++) {
for (n = 1; n <= range_size; n <<= 1) {
drm_random_reorder(order, range_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- n, 1, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
- mode->name, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, n, 1, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u) failed for range [%x, %x]\n",
+ mode->name, n, range_start, range_end);
goto out;
}
}
for (n = 1; n < range_size; n <<= 1) {
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- range_size/2, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, total_size/2, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, range_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, total_size / 2, n, range_start, range_end);
goto out;
}
}
@@ -2428,13 +2188,11 @@ static int igt_color_evict_range(void *ignored)
DRM_MM_BUG_ON(!nsize);
drm_random_reorder(order, total_size, &prng);
- err = evict_color(&mm, range_start, range_end,
- nodes, order, total_size,
- nsize, n, color++,
- mode);
- if (err) {
- pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
- mode->name, nsize, n, range_start, range_end);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, nsize, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, nsize, n, range_start, range_end);
goto out;
}
}
@@ -2442,46 +2200,57 @@ static int igt_color_evict_range(void *ignored)
cond_resched();
}
- ret = 0;
out:
- if (ret)
- show_mm(&mm);
drm_mm_for_each_node_safe(node, next, &mm)
drm_mm_remove_node(node);
drm_mm_takedown(&mm);
kfree(order);
err_nodes:
vfree(nodes);
-err:
- return ret;
}
-#include "drm_selftest.c"
-
-static int __init test_drm_mm_init(void)
+static int drm_mm_init_test(struct kunit *test)
{
- int err;
-
while (!random_seed)
random_seed = get_random_int();
- pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
- random_seed, max_iterations, max_prime);
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-static void __exit test_drm_mm_exit(void)
-{
+ return 0;
}
-module_init(test_drm_mm_init);
-module_exit(test_drm_mm_exit);
-
module_param(random_seed, uint, 0400);
module_param(max_iterations, uint, 0400);
module_param(max_prime, uint, 0400);
+static struct kunit_case drm_mm_tests[] = {
+ KUNIT_CASE(drm_test_mm_init),
+ KUNIT_CASE(drm_test_mm_debug),
+ KUNIT_CASE(drm_test_mm_reserve),
+ KUNIT_CASE(drm_test_mm_insert),
+ KUNIT_CASE(drm_test_mm_replace),
+ KUNIT_CASE(drm_test_mm_insert_range),
+ KUNIT_CASE(drm_test_mm_frag),
+ KUNIT_CASE(drm_test_mm_align),
+ KUNIT_CASE(drm_test_mm_align32),
+ KUNIT_CASE(drm_test_mm_align64),
+ KUNIT_CASE(drm_test_mm_evict),
+ KUNIT_CASE(drm_test_mm_evict_range),
+ KUNIT_CASE(drm_test_mm_topdown),
+ KUNIT_CASE(drm_test_mm_bottomup),
+ KUNIT_CASE(drm_test_mm_lowest),
+ KUNIT_CASE(drm_test_mm_highest),
+ KUNIT_CASE(drm_test_mm_color),
+ KUNIT_CASE(drm_test_mm_color_evict),
+ KUNIT_CASE(drm_test_mm_color_evict_range),
+ {}
+};
+
+static struct kunit_suite drm_mm_test_suite = {
+ .name = "drm_mm",
+ .init = drm_mm_init_test,
+ .test_cases = drm_mm_tests,
+};
+
+kunit_test_suite(drm_mm_test_suite);
+
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_plane_helper.c b/drivers/gpu/drm/tests/drm_plane_helper_test.c
index 64e8938ab194..ec71af791f1f 100644
--- a/drivers/gpu/drm/selftests/test-drm_plane_helper.c
+++ b/drivers/gpu/drm/tests/drm_plane_helper_test.c
@@ -1,20 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for the drm_plane_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
-#define pr_fmt(fmt) "drm_plane_helper: " fmt
+#include <kunit/test.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_modes.h>
-#include "test-drm_modeset_common.h"
-
static void set_src(struct drm_plane_state *plane_state,
- unsigned src_x, unsigned src_y,
- unsigned src_w, unsigned src_h)
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
{
plane_state->src_x = src_x;
plane_state->src_y = src_y;
@@ -23,8 +22,8 @@ static void set_src(struct drm_plane_state *plane_state,
}
static bool check_src_eq(struct drm_plane_state *plane_state,
- unsigned src_x, unsigned src_y,
- unsigned src_w, unsigned src_h)
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
{
if (plane_state->src.x1 < 0) {
pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
@@ -50,7 +49,7 @@ static bool check_src_eq(struct drm_plane_state *plane_state,
static void set_crtc(struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
- unsigned crtc_w, unsigned crtc_h)
+ unsigned int crtc_w, unsigned int crtc_h)
{
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
@@ -60,7 +59,7 @@ static void set_crtc(struct drm_plane_state *plane_state,
static bool check_crtc_eq(struct drm_plane_state *plane_state,
int crtc_x, int crtc_y,
- unsigned crtc_w, unsigned crtc_h)
+ unsigned int crtc_w, unsigned int crtc_h)
{
if (plane_state->dst.x1 != crtc_x ||
plane_state->dst.y1 != crtc_y ||
@@ -74,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state,
return true;
}
-int igt_check_plane_state(void *ignored)
+static void drm_test_check_plane_state(struct kunit *test)
{
int ret;
@@ -83,9 +82,8 @@ int igt_check_plane_state(void *ignored)
.enable = true,
.active = true,
.mode = {
- DRM_MODE("1024x768", 0, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048, 1184, 1344, 0, 768, 771,
+ 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
},
};
static struct drm_plane plane = {
@@ -106,119 +104,134 @@ int igt_check_plane_state(void *ignored)
set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
set_crtc(&plane_state, 0, 0, fb.width, fb.height);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Simple clipping check should pass\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
/* Rotated clipping + reflection, no scaling. */
plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Rotated clipping check should pass\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Rotated clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
plane_state.rotation = DRM_MODE_ROTATE_0;
/* Check whether positioning works correctly. */
set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
set_crtc(&plane_state, 0, 0, 1023, 767);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret,
+ "Should not be able to position on the crtc with can_position=false\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, false);
- FAIL(ret < 0, "Simple positioning should work\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple positioning should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1023, 767));
/* Simple scaling tests. */
set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
set_crtc(&plane_state, 0, 0, 1024, 768);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0x8001,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(!ret, "Upscaling out of range should fail.\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Upscaling out of range should fail.\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0x8000,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Upscaling exactly 2x should work\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Upscaling exactly 2x should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x1ffff, false, false);
- FAIL(!ret, "Downscaling out of range should fail.\n");
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Downscaling out of range should fail.\n");
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x20000, false, false);
- FAIL(ret < 0, "Should succeed with exact scaling limit\n");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed with exact scaling limit\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
/* Testing rounding errors. */
set_src(&plane_state, 0, 0, 0x40001, 0x40001);
set_crtc(&plane_state, 1022, 766, 4, 4);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x10001,
true, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
set_crtc(&plane_state, -2, -2, 1028, 772);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
0x10001,
false, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x40002, 0x40002,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
set_crtc(&plane_state, 1022, 766, 4, 4);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0xffff,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
/* Should not be rounded to 0x20001, which would be upscaling. */
- FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
set_crtc(&plane_state, -2, -2, 1028, 772);
ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
0xffff,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
- FAIL(ret < 0, "Should succeed by clipping to exact multiple");
- FAIL_ON(!plane_state.visible);
- FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16));
- FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768));
-
- return 0;
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x3fffe, 0x3fffe,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
}
+
+static struct kunit_case drm_plane_helper_test[] = {
+ KUNIT_CASE(drm_test_check_plane_state),
+ {}
+};
+
+static struct kunit_suite drm_plane_helper_test_suite = {
+ .name = "drm_plane_helper",
+ .test_cases = drm_plane_helper_test,
+};
+
+kunit_test_suite(drm_plane_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_rect_test.c b/drivers/gpu/drm/tests/drm_rect_test.c
new file mode 100644
index 000000000000..e9809ea32696
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_rect_test.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_rect.h>
+
+static void drm_test_rect_clip_scaled_div_by_zero(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * Make sure we don't divide by zero when dst
+ * width/height is zero and dst and clip do not intersect.
+ */
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 0, 0, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 3, 3, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_not_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 || dst.y1 != 0 ||
+ dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+ src.y1 != 2 << 16 || src.y2 != 4 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 || dst.y1 != 0 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 2, 2, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 2 || dst.x2 != 4 || dst.y1 != 2 ||
+ dst.y2 != 4, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_signed_vs_unsigned(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * 'clip.x2 - dst.x1 >= dst width' could result a negative
+ * src rectangle width which is no longer expected by the
+ * code as it's using unsigned types. This could lead to
+ * the clipped source rectangle appering visible when it
+ * should have been fully clipped. Make sure both rectangles
+ * end up invisible.
+ */
+ drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 3, 3, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination should not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static struct kunit_case drm_rect_tests[] = {
+ KUNIT_CASE(drm_test_rect_clip_scaled_div_by_zero),
+ KUNIT_CASE(drm_test_rect_clip_scaled_not_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_signed_vs_unsigned),
+ { }
+};
+
+static struct kunit_suite drm_rect_test_suite = {
+ .name = "drm_rect",
+ .test_cases = drm_rect_tests,
+};
+
+kunit_test_suite(drm_rect_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index bc4fa59b6fa9..378600806167 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,7 +3,7 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
Display SubSystem. There is currently three Keystone family
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 2218da3b3ca3..cd3c43a6c806 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -8,9 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
#include "tidss_crtc.h"
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index dd3c6a606ae2..ad93acc9abd2 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -24,9 +24,9 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include "tidss_crtc.h"
@@ -1954,16 +1954,16 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
}
static
-dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+dma_addr_t dispc_plane_state_dma_addr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
- gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+ gem = drm_fb_dma_get_gem_obj(state->fb, 0);
- return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ return gem->dma_addr + fb->offsets[0] + x * fb->format->cpp[0] +
y * fb->pitches[0];
}
@@ -1971,16 +1971,16 @@ static
dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
u32 x = state->src_x >> 16;
u32 y = state->src_y >> 16;
if (WARN_ON(state->fb->format->num_planes != 2))
return 0;
- gem = drm_fb_cma_get_gem_obj(fb, 1);
+ gem = drm_fb_dma_get_gem_obj(fb, 1);
- return gem->paddr + fb->offsets[1] +
+ return gem->dma_addr + fb->offsets[1] +
(x * fb->format->cpp[1] / fb->format->hsub) +
(y * fb->pitches[1] / fb->format->vsub);
}
@@ -1993,17 +1993,17 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
- dma_addr_t paddr = dispc_plane_state_paddr(state);
+ dma_addr_t dma_addr = dispc_plane_state_dma_addr(state);
struct dispc_scaling_params scale;
dispc_vid_calc_scaling(dispc, state, &scale, lite);
dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
- dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, dma_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)dma_addr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, dma_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
(scale.in_w - 1) | ((scale.in_h - 1) << 16));
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 04cfff89ee51..15cd9b91b7e2 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -15,7 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@@ -101,13 +101,13 @@ static void tidss_release(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
}
-DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+DEFINE_DRM_GEM_DMA_FOPS(tidss_fops);
static const struct drm_driver tidss_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &tidss_fops,
.release = tidss_release,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "tidss",
.desc = "TI Keystone DSS",
.date = "20180215",
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 666e527a0acf..afb2879980c6 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
@@ -71,7 +70,7 @@ static int tidss_atomic_check(struct drm_device *ddev,
* changes. This is needed for updating the plane positions in
* tidss_crtc_position_planes() which is called from
* crtc_atomic_enable() and crtc_atomic_flush(). We have an
- * extra flag to to mark x,y-position changes and together
+ * extra flag to mark x,y-position changes and together
* with zpos_changed the condition recognizes all the above
* cases.
*/
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 68a85a94ffcb..42d50ec5526d 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -11,7 +11,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include "tidss_crtc.h"
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e315591eb36b..d3bd2d7a181e 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -3,7 +3,7 @@ config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
depends on DRM && OF && ARM
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 509fbae8c9a6..b5f60b2b2d0e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -12,10 +12,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -64,13 +64,13 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
- start = gem->paddr + fb->offsets[0] +
+ start = gem->dma_addr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
crtc->x * fb->format->cpp[0];
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index eee3c447fbac..f72755b8ea14 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
@@ -476,11 +476,11 @@ static void tilcdc_debugfs_init(struct drm_minor *minor)
}
#endif
-DEFINE_DRM_GEM_CMA_FOPS(fops);
+DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 960136518814..cf77a8ce7398 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -5,7 +5,6 @@
*/
#include <drm/drm_atomic.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
@@ -106,11 +105,10 @@ int tilcdc_plane_init(struct drm_device *dev,
struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
- ret = drm_plane_init(dev, plane, 1,
- &tilcdc_plane_funcs,
- priv->pixelformats,
- priv->num_pixelformats,
- true);
+ ret = drm_universal_plane_init(dev, plane, 1, &tilcdc_plane_funcs,
+ priv->pixelformats,
+ priv->num_pixelformats,
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev->dev, "Failed to initialize plane: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 027cd87c3d0d..565957264875 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -3,7 +3,7 @@
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
@@ -55,7 +55,7 @@ config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
@@ -87,7 +87,7 @@ config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -100,7 +100,7 @@ config TINYDRM_ILI9163
tristate "DRM support for ILI9163 display panels"
depends on DRM && SPI
select BACKLIGHT_CLASS_DEVICE
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DBI
help
@@ -113,7 +113,7 @@ config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
@@ -125,7 +125,7 @@ config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -138,7 +138,7 @@ config TINYDRM_ILI9486
tristate "DRM support for ILI9486 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -152,7 +152,7 @@ config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
@@ -163,7 +163,7 @@ config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
help
DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021)
@@ -177,7 +177,7 @@ config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
@@ -189,7 +189,7 @@ config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
index 7461cb401407..bb302a3fd6b5 100644
--- a/drivers/gpu/drm/tiny/arcpgu.c
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -11,11 +11,11 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
@@ -220,14 +220,14 @@ static void arc_pgu_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *state)
{
struct arcpgu_drm_private *arcpgu;
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
if (!pipe->plane.state->fb)
return;
arcpgu = pipe_to_arcpgu_priv(pipe);
- gem = drm_fb_cma_get_gem_obj(pipe->plane.state->fb, 0);
- arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
+ gem = drm_fb_dma_get_gem_obj(pipe->plane.state->fb, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->dma_addr);
}
static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
@@ -243,7 +243,7 @@ static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
+DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
{
@@ -370,7 +370,7 @@ static const struct drm_driver arcpgu_drm_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 82364a0a7b18..a51262289aef 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -309,6 +309,8 @@ static void bochs_hw_fini(struct drm_device *dev)
static void bochs_hw_blank(struct bochs_device *bochs, bool blank)
{
DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
+ /* enable color bit (so VGA_IS1_RC access works) */
+ bochs_vga_writeb(bochs, VGA_MIS_W, VGA_MIS_COLOR);
/* discard ar_flip_flop */
(void)bochs_vga_readb(bochs, VGA_IS1_RC);
/* blank or unblank; we need only update index and set 0x20 */
@@ -583,13 +585,17 @@ static int bochs_load(struct drm_device *dev)
ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size);
if (ret)
- return ret;
+ goto err_hw_fini;
ret = bochs_kms_init(bochs);
if (ret)
- return ret;
+ goto err_hw_fini;
return 0;
+
+err_hw_fini:
+ bochs_hw_fini(dev);
+ return ret;
}
DEFINE_DRM_GEM_FOPS(bochs_fops);
@@ -664,11 +670,13 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_free_dev;
+ goto err_hw_fini;
drm_fbdev_generic_setup(dev, 32);
return ret;
+err_hw_fini:
+ bochs_hw_fini(dev);
err_free_dev:
drm_dev_put(dev);
return ret;
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index c4f5beea1f90..354d5e854a6f 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -316,28 +316,29 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
}
static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
- const struct iosys_map *map,
+ const struct iosys_map *vmap,
struct drm_rect *rect)
{
struct cirrus_device *cirrus = to_cirrus(fb->dev);
- void __iomem *dst = cirrus->vram;
- void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ struct iosys_map dst;
int idx;
if (!drm_dev_enter(&cirrus->dev, &idx))
return -ENODEV;
+ iosys_map_set_vaddr_iomem(&dst, cirrus->vram);
+
if (cirrus->cpp == fb->format->cpp[0]) {
- dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect);
- drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, rect));
+ drm_fb_memcpy(&dst, fb->pitches, vmap, fb, rect);
} else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) {
- dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
- drm_fb_xrgb8888_to_rgb565_toio(dst, cirrus->pitch, vmap, fb, rect, false);
+ iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
+ drm_fb_xrgb8888_to_rgb565(&dst, &cirrus->pitch, vmap, fb, rect, false);
} else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) {
- dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect);
- drm_fb_xrgb8888_to_rgb888_toio(dst, cirrus->pitch, vmap, fb, rect);
+ iosys_map_incr(&dst, drm_fb_clip_offset(cirrus->pitch, fb->format, rect));
+ drm_fb_xrgb8888_to_rgb888(&dst, &cirrus->pitch, vmap, fb, rect);
} else {
WARN_ON_ONCE("cpp mismatch");
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index ebb025543f8d..48c24aa8c28a 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -20,7 +20,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -181,6 +181,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -190,12 +191,12 @@ static const struct drm_display_mode yx350hv15_mode = {
DRM_SIMPLE_MODE(320, 480, 60, 75),
};
-DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+DEFINE_DRM_GEM_DMA_FOPS(hx8357d_fops);
static const struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index fc8ed245b0bc..9a1a5943bee0 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -11,7 +11,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -100,6 +100,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -110,12 +111,12 @@ static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(128, 160, 28, 35),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9163_fops);
static struct drm_driver ili9163_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9163_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 8d686eecd5f4..a79da2b4af64 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -19,12 +19,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -78,7 +78,7 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -104,7 +104,7 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (ret)
goto err_msg;
} else {
- tr = cma_obj->vaddr;
+ tr = dma_obj->vaddr;
}
switch (dbidev->rotation) {
@@ -335,12 +335,12 @@ static const struct drm_display_mode ili9225_mode = {
DRM_SIMPLE_MODE(176, 220, 35, 44),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9225_fops);
static const struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 5b8cc770ee7b..69b265e78096 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -19,7 +19,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -137,6 +137,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -146,12 +147,12 @@ static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9341_fops);
static const struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 6d655e18e0aa..c80028bb1d11 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -150,6 +150,7 @@ static void waveshare_enable(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = waveshare_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -159,12 +160,12 @@ static const struct drm_display_mode waveshare_mode = {
DRM_SIMPLE_MODE(480, 320, 73, 49),
};
-DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+DEFINE_DRM_GEM_DMA_FOPS(ili9486_fops);
static const struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 5e060f6910bb..bc522fb3d94d 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -17,7 +17,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -141,6 +141,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = mi0283qt_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -150,12 +151,12 @@ static const struct drm_display_mode mi0283qt_mode = {
DRM_SIMPLE_MODE(320, 240, 58, 43),
};
-DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
+DEFINE_DRM_GEM_DMA_FOPS(mi0283qt_fops);
static const struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index c759ff9c2c87..955a61d628e7 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -18,7 +18,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -212,17 +212,18 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = panel_mipi_dbi_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
};
-DEFINE_DRM_GEM_CMA_FOPS(panel_mipi_dbi_fops);
+DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
static const struct drm_driver panel_mipi_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &panel_mipi_dbi_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 013790c45d0a..e62f4d16b2c6 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -25,12 +25,12 @@
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
@@ -511,8 +511,10 @@ static void repaper_get_temperature(struct repaper_epd *epd)
static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
+ unsigned int dst_pitch = 0;
+ struct iosys_map dst, vmap;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
@@ -541,7 +543,9 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
if (ret)
goto out_free;
- drm_fb_xrgb8888_to_mono(buf, 0, cma_obj->vaddr, fb, &clip);
+ iosys_map_set_vaddr(&dst, buf);
+ iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -617,6 +621,15 @@ static void power_off(struct repaper_epd *epd)
gpiod_set_value_cansleep(epd->discharge, 0);
}
+static enum drm_mode_status repaper_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct repaper_epd *epd = drm_to_epd(crtc->dev);
+
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, epd->mode);
+}
+
static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
@@ -827,6 +840,7 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
+ .mode_valid = repaper_pipe_mode_valid,
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
@@ -835,22 +849,8 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
static int repaper_connector_get_modes(struct drm_connector *connector)
{
struct repaper_epd *epd = drm_to_epd(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, epd->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- drm_mode_set_name(mode);
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, epd->mode);
}
static const struct drm_connector_helper_funcs repaper_connector_hfuncs = {
@@ -903,12 +903,12 @@ static const struct drm_display_mode repaper_e2271cs021_mode = {
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
0xff, 0xfe, 0x00, 0x00 };
-DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
+DEFINE_DRM_GEM_DMA_FOPS(repaper_fops);
static const struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 5422363690e7..18489779fb8a 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -8,6 +8,7 @@
#include <linux/regulator/consumer.h>
#include <drm/drm_aperture.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
@@ -20,8 +21,8 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
@@ -30,16 +31,6 @@
#define DRIVER_MINOR 0
/*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
-#define RES_MM(d) \
- (((d) * 254ul) / (96ul * 10ul))
-
-#define SIMPLEDRM_MODE(hd, vd) \
- DRM_SIMPLE_MODE(hd, vd, RES_MM(hd), RES_MM(vd))
-
-/*
* Helpers for simplefb
*/
@@ -198,7 +189,6 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
struct simpledrm_device {
struct drm_device dev;
- struct platform_device *pdev;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
@@ -217,14 +207,15 @@ struct simpledrm_device {
unsigned int pitch;
/* memory management */
- struct resource *mem;
void __iomem *screen_base;
/* modesetting */
uint32_t formats[8];
size_t nformats;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
- struct drm_simple_display_pipe pipe;
};
static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
@@ -272,7 +263,7 @@ static void simpledrm_device_release_clocks(void *res)
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
unsigned int i;
@@ -370,7 +361,7 @@ static void simpledrm_device_release_regulators(void *res)
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
struct regulator *regulator;
@@ -451,120 +442,6 @@ static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
#endif
/*
- * Simplefb settings
- */
-
-static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height)
-{
- struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
-
- mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
- drm_mode_set_name(&mode);
-
- return mode;
-}
-
-static int simpledrm_device_init_fb(struct simpledrm_device *sdev)
-{
- int width, height, stride;
- const struct drm_format_info *format;
- struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
- const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
- struct device_node *of_node = pdev->dev.of_node;
-
- if (pd) {
- width = simplefb_get_width_pd(dev, pd);
- if (width < 0)
- return width;
- height = simplefb_get_height_pd(dev, pd);
- if (height < 0)
- return height;
- stride = simplefb_get_stride_pd(dev, pd);
- if (stride < 0)
- return stride;
- format = simplefb_get_format_pd(dev, pd);
- if (IS_ERR(format))
- return PTR_ERR(format);
- } else if (of_node) {
- width = simplefb_get_width_of(dev, of_node);
- if (width < 0)
- return width;
- height = simplefb_get_height_of(dev, of_node);
- if (height < 0)
- return height;
- stride = simplefb_get_stride_of(dev, of_node);
- if (stride < 0)
- return stride;
- format = simplefb_get_format_of(dev, of_node);
- if (IS_ERR(format))
- return PTR_ERR(format);
- } else {
- drm_err(dev, "no simplefb configuration found\n");
- return -ENODEV;
- }
-
- sdev->mode = simpledrm_mode(width, height);
- sdev->format = format;
- sdev->pitch = stride;
-
- drm_dbg_kms(dev, "display mode={" DRM_MODE_FMT "}\n",
- DRM_MODE_ARG(&sdev->mode));
- drm_dbg_kms(dev,
- "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
- &format->format, width, height, stride);
-
- return 0;
-}
-
-/*
- * Memory management
- */
-
-static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
-{
- struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
- struct resource *res, *mem;
- void __iomem *screen_base;
- int ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
- if (ret) {
- drm_err(dev, "could not acquire memory range %pr: error %d\n",
- res, ret);
- return ret;
- }
-
- mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- sdev->dev.driver->name);
- if (!mem) {
- /*
- * We cannot make this fatal. Sometimes this comes from magic
- * spaces our resource handlers simply don't know about. Use
- * the I/O-memory resource as-is and try to map that instead.
- */
- drm_warn(dev, "could not acquire memory region %pr\n", res);
- mem = res;
- }
-
- screen_base = devm_ioremap_wc(&pdev->dev, mem->start,
- resource_size(mem));
- if (!screen_base)
- return -ENOMEM;
-
- sdev->mem = mem;
- sdev->screen_base = screen_base;
-
- return 0;
-}
-
-/*
* Modesetting
*/
@@ -576,7 +453,7 @@ static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
* TODO: Add blit helpers for remaining formats and uncomment
* constants.
*/
-static const uint32_t simpledrm_default_formats[] = {
+static const uint32_t simpledrm_primary_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGB565,
@@ -587,100 +464,54 @@ static const uint32_t simpledrm_default_formats[] = {
DRM_FORMAT_ARGB2101010,
};
-static const uint64_t simpledrm_format_modifiers[] = {
+static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
-static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &sdev->mode);
- if (!mode)
- return 0;
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm)
- connector->display_info.width_mm = mode->width_mm;
- if (mode->height_mm)
- connector->display_info.height_mm = mode->height_mm;
-
- return 1;
-}
-
-static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
- .get_modes = simpledrm_connector_helper_get_modes,
-};
-
-static const struct drm_connector_funcs simpledrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static enum drm_mode_status
-simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
- const struct drm_display_mode *mode)
+static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
-
- if (mode->hdisplay != sdev->mode.hdisplay &&
- mode->vdisplay != sdev->mode.vdisplay)
- return MODE_ONE_SIZE;
- else if (mode->hdisplay != sdev->mode.hdisplay)
- return MODE_ONE_WIDTH;
- else if (mode->vdisplay != sdev->mode.vdisplay)
- return MODE_ONE_HEIGHT;
-
- return MODE_OK;
-}
-
-static void
-simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
-{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
- struct drm_device *dev = &sdev->dev;
- void __iomem *dst = sdev->screen_base;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect damage;
+ int ret, idx;
- if (!fb)
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
return;
- drm_rect_fp_to_int(&src_clip, &plane_state->src);
+ if (!drm_dev_enter(dev, &idx))
+ goto out_drm_gem_fb_end_cpu_access;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base);
+ struct drm_rect dst_clip = plane_state->dst;
- if (!drm_dev_enter(dev, &idx))
- return;
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
- dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
- drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
+ iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
+ drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb,
+ &damage);
+ }
drm_dev_exit(idx);
+out_drm_gem_fb_end_cpu_access:
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
-static void
-simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
- struct drm_device *dev = &sdev->dev;
+ struct drm_device *dev = plane->dev;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
int idx;
if (!drm_dev_enter(dev, &idx))
@@ -692,46 +523,81 @@ simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
drm_dev_exit(idx);
}
-static void
-simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_plane_state)
+static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = drm_plane_helper_atomic_check,
+ .atomic_update = simpledrm_primary_plane_helper_atomic_update,
+ .atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
+};
+
+static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
- struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
- struct drm_plane_state *plane_state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
- void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_device *dev = &sdev->dev;
- void __iomem *dst = sdev->screen_base;
- struct drm_rect src_clip, dst_clip;
- int idx;
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
- if (!fb)
- return;
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
+}
- if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
- return;
+static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
+ int ret;
- dst_clip = plane_state->dst;
- if (!drm_rect_intersect(&dst_clip, &src_clip))
- return;
+ ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
+ if (ret)
+ return ret;
- if (!drm_dev_enter(dev, &idx))
- return;
+ return drm_atomic_add_affected_planes(new_state, crtc);
+}
- dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip);
- drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip);
+/*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+ * the screen in the primary plane's atomic_disable function.
+ */
+static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
+ .mode_valid = simpledrm_crtc_helper_mode_valid,
+ .atomic_check = simpledrm_crtc_helper_atomic_check,
+};
- drm_dev_exit(idx);
+static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
+
+ return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
}
-static const struct drm_simple_display_pipe_funcs
-simpledrm_simple_display_pipe_funcs = {
- .mode_valid = simpledrm_simple_display_pipe_mode_valid,
- .enable = simpledrm_simple_display_pipe_enable,
- .disable = simpledrm_simple_display_pipe_disable,
- .update = simpledrm_simple_display_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
+ .get_modes = simpledrm_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs simpledrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
@@ -740,127 +606,207 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev,
- size_t *nformats_out)
-{
- struct drm_device *dev = &sdev->dev;
- size_t i;
-
- if (sdev->nformats)
- goto out; /* don't rebuild list on recurring calls */
-
- /* native format goes first */
- sdev->formats[0] = sdev->format->format;
- sdev->nformats = 1;
-
- /* default formats go second */
- for (i = 0; i < ARRAY_SIZE(simpledrm_default_formats); ++i) {
- if (simpledrm_default_formats[i] == sdev->format->format)
- continue; /* native format already went first */
- sdev->formats[sdev->nformats] = simpledrm_default_formats[i];
- sdev->nformats++;
- }
+/*
+ * Init / Cleanup
+ */
+static struct drm_display_mode simpledrm_mode(unsigned int width,
+ unsigned int height)
+{
/*
- * TODO: The simpledrm driver converts framebuffers to the native
- * format when copying them to device memory. If there are more
- * formats listed than supported by the driver, the native format
- * is not supported by the conversion helpers. Therefore *only*
- * support the native format and add a conversion helper ASAP.
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
*/
- if (drm_WARN_ONCE(dev, i != sdev->nformats,
- "format conversion helpers required for %p4cc",
- &sdev->format->format)) {
- sdev->nformats = 1;
- }
+ const struct drm_display_mode mode = {
+ DRM_MODE_INIT(60, width, height,
+ DRM_MODE_RES_MM(width, 96ul),
+ DRM_MODE_RES_MM(height, 96ul))
+ };
-out:
- *nformats_out = sdev->nformats;
- return sdev->formats;
+ return mode;
}
-static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
+static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
+ struct platform_device *pdev)
{
- struct drm_device *dev = &sdev->dev;
- struct drm_display_mode *mode = &sdev->mode;
- struct drm_connector *connector = &sdev->connector;
- struct drm_simple_display_pipe *pipe = &sdev->pipe;
+ const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
+ struct device_node *of_node = pdev->dev.of_node;
+ struct simpledrm_device *sdev;
+ struct drm_device *dev;
+ int width, height, stride;
+ const struct drm_format_info *format;
+ struct resource *res, *mem;
+ void __iomem *screen_base;
+ struct drm_plane *primary_plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
unsigned long max_width, max_height;
- const uint32_t *formats;
size_t nformats;
int ret;
- ret = drmm_mode_config_init(dev);
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
+ if (IS_ERR(sdev))
+ return ERR_CAST(sdev);
+ dev = &sdev->dev;
+ platform_set_drvdata(pdev, sdev);
+
+ /*
+ * Hardware settings
+ */
+
+ ret = simpledrm_device_init_clocks(sdev);
if (ret)
- return ret;
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_regulators(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (pd) {
+ width = simplefb_get_width_pd(dev, pd);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = simplefb_get_height_pd(dev, pd);
+ if (height < 0)
+ return ERR_PTR(height);
+ stride = simplefb_get_stride_pd(dev, pd);
+ if (stride < 0)
+ return ERR_PTR(stride);
+ format = simplefb_get_format_pd(dev, pd);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ } else if (of_node) {
+ width = simplefb_get_width_of(dev, of_node);
+ if (width < 0)
+ return ERR_PTR(width);
+ height = simplefb_get_height_of(dev, of_node);
+ if (height < 0)
+ return ERR_PTR(height);
+ stride = simplefb_get_stride_of(dev, of_node);
+ if (stride < 0)
+ return ERR_PTR(stride);
+ format = simplefb_get_format_of(dev, of_node);
+ if (IS_ERR(format))
+ return ERR_CAST(format);
+ } else {
+ drm_err(dev, "no simplefb configuration found\n");
+ return ERR_PTR(-ENODEV);
+ }
+ if (!stride) {
+ stride = drm_format_info_min_pitch(format, 0, width);
+ if (drm_WARN_ON(dev, !stride))
+ return ERR_PTR(-EINVAL);
+ }
- max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH);
- max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT);
+ sdev->mode = simpledrm_mode(width, height);
+ sdev->format = format;
+ sdev->pitch = stride;
- dev->mode_config.min_width = mode->hdisplay;
- dev->mode_config.max_width = max_width;
- dev->mode_config.min_height = mode->vdisplay;
- dev->mode_config.max_height = max_height;
- dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8;
- dev->mode_config.funcs = &simpledrm_mode_config_funcs;
+ drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
+ drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
+ &format->format, width, height, stride);
- ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- if (ret)
- return ret;
- drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
- drm_connector_set_panel_orientation_with_quirk(connector,
- DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
- mode->hdisplay, mode->vdisplay);
+ /*
+ * Memory management
+ */
- formats = simpledrm_device_formats(sdev, &nformats);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return ERR_PTR(-EINVAL);
- ret = drm_simple_display_pipe_init(dev, pipe, &simpledrm_simple_display_pipe_funcs,
- formats, nformats, simpledrm_format_modifiers,
- connector);
- if (ret)
- return ret;
+ ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: error %d\n", res, ret);
+ return ERR_PTR(ret);
+ }
- drm_plane_enable_fb_damage_clips(&pipe->plane);
+ mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
- drm_mode_config_reset(dev);
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+ sdev->screen_base = screen_base;
- return 0;
-}
+ /*
+ * Modesetting
+ */
-/*
- * Init / Cleanup
- */
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ERR_PTR(ret);
-static struct simpledrm_device *
-simpledrm_device_create(struct drm_driver *drv, struct platform_device *pdev)
-{
- struct simpledrm_device *sdev;
- int ret;
+ max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
+ max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
- sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device,
- dev);
- if (IS_ERR(sdev))
- return ERR_CAST(sdev);
- sdev->pdev = pdev;
- platform_set_drvdata(pdev, sdev);
+ dev->mode_config.min_width = width;
+ dev->mode_config.max_width = max_width;
+ dev->mode_config.min_height = height;
+ dev->mode_config.max_height = max_height;
+ dev->mode_config.preferred_depth = format->cpp[0] * 8;
+ dev->mode_config.funcs = &simpledrm_mode_config_funcs;
- ret = simpledrm_device_init_clocks(sdev);
+ /* Primary plane */
+
+ nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
+ simpledrm_primary_plane_formats,
+ ARRAY_SIZE(simpledrm_primary_plane_formats),
+ sdev->formats, ARRAY_SIZE(sdev->formats));
+
+ primary_plane = &sdev->primary_plane;
+ ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
+ sdev->formats, nformats,
+ simpledrm_primary_plane_format_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_regulators(sdev);
+ drm_plane_helper_add(primary_plane, &simpledrm_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary_plane);
+
+ /* CRTC */
+
+ crtc = &sdev->crtc;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &simpledrm_crtc_funcs, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_fb(sdev);
+ drm_crtc_helper_add(crtc, &simpledrm_crtc_helper_funcs);
+
+ /* Encoder */
+
+ encoder = &sdev->encoder;
+ ret = drm_encoder_init(dev, encoder, &simpledrm_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_mm(sdev);
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ /* Connector */
+
+ connector = &sdev->connector;
+ ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
if (ret)
return ERR_PTR(ret);
- ret = simpledrm_device_init_modeset(sdev);
+ drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ width, height);
+
+ ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ERR_PTR(ret);
+ drm_mode_config_reset(dev);
+
return sdev;
}
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 8eddb020c43e..b6f620b902e6 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -15,12 +15,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
@@ -69,12 +69,15 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
unsigned int x, y;
u8 *src, *buf, val;
+ struct iosys_map dst_map, vmap;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return;
- drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, clip);
+ iosys_map_set_vaddr(&dst_map, buf);
+ iosys_map_set_vaddr(&vmap, vaddr);
+ drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -92,8 +95,8 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- void *src = cma_obj->vaddr;
+ struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ void *src = dma_obj->vaddr;
int ret = 0;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
@@ -269,12 +272,12 @@ static const struct drm_display_mode st7586_mode = {
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
-DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7586_fops);
static const struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index e0f02d367d88..c36ba08acda1 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -20,7 +20,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
@@ -133,6 +133,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
@@ -151,12 +152,12 @@ static const struct st7735r_cfg rh128128t_cfg = {
.rgb = true,
};
-DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
+DEFINE_DRM_GEM_DMA_FOPS(st7735r_fops);
static const struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- DRM_GEM_CMA_DRIVER_OPS_VMAP,
+ DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 97184c333526..7c8e8be774f1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -117,12 +117,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man, *new_man;
struct ttm_device *bdev = bo->bdev;
+ bool old_use_tt, new_use_tt;
int ret;
- old_man = ttm_manager_type(bdev, bo->resource->mem_type);
- new_man = ttm_manager_type(bdev, mem->mem_type);
+ old_use_tt = bo->resource &&
+ ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -130,11 +131,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
* Create and bind a ttm if required.
*/
- if (new_man->use_tt) {
+ if (new_use_tt) {
/* Zero init the new TTM structure if the old location should
* have used one as well.
*/
- ret = ttm_tt_create(bo, old_man->use_tt);
+ ret = ttm_tt_create(bo, old_use_tt);
if (ret)
goto out_err;
@@ -160,8 +161,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return 0;
out_err:
- new_man = ttm_manager_type(bdev, bo->resource->mem_type);
- if (!new_man->use_tt)
+ if (!old_use_tt)
ttm_bo_tt_destroy(bo);
return ret;
@@ -518,6 +518,9 @@ out:
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
+ struct ttm_resource *res = bo->resource;
+ struct ttm_device *bdev = bo->bdev;
+
dma_resv_assert_held(bo->base.resv);
if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
@@ -525,11 +528,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
- (place->lpfn && place->lpfn <= bo->resource->start))
- return false;
-
- return true;
+ return ttm_resource_intersects(bdev, res, place, bo->base.size);
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
@@ -904,7 +903,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_resource_compat(bo->resource, placement)) {
+ if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -921,36 +920,61 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- struct sg_table *sg,
- struct dma_resv *resv,
+/**
+ * ttm_bo_init_reserved
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @ctx: TTM operation context for memory allocation.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function, enables driver-specific objects
+ * derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
+ * and it is the caller's responsibility to call ttm_bo_unreserve.
+ *
+ * If a failure occurs, the function will call the @destroy function. Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
- bool locked;
int ret;
- bo->destroy = destroy;
kref_init(&bo->kref);
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
- bo->page_alignment = page_alignment;
+ bo->page_alignment = alignment;
+ bo->destroy = destroy;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
- if (resv) {
+ if (resv)
bo->base.resv = resv;
- dma_resv_assert_held(bo->base.resv);
- } else {
+ else
bo->base.resv = &bo->base._resv;
- }
atomic_inc(&ttm_glob.bo_count);
ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
@@ -963,50 +987,84 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg)
+ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
- bo->resource->num_pages);
+ PFN_UP(bo->base.size));
+ if (ret)
+ goto err_put;
+ }
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
- if (!resv) {
- locked = dma_resv_trylock(bo->base.resv);
- WARN_ON(!locked);
- }
+ if (!resv)
+ WARN_ON(!dma_resv_trylock(bo->base.resv));
+ else
+ dma_resv_assert_held(resv);
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, ctx);
+ ret = ttm_bo_validate(bo, placement, ctx);
+ if (unlikely(ret))
+ goto err_unlock;
- if (unlikely(ret)) {
- if (!resv)
- ttm_bo_unreserve(bo);
+ return 0;
- ttm_bo_put(bo);
- return ret;
- }
+err_unlock:
+ if (!resv)
+ dma_resv_unlock(bo->base.resv);
+err_put:
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
-int ttm_bo_init(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct sg_table *sg,
- struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *))
+/**
+ * ttm_bo_init_validate
+ *
+ * @bdev: Pointer to a ttm_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @type: Requested type of buffer object.
+ * @placement: Initial placement for buffer object.
+ * @alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @sg: Scatter-gather table.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ *
+ * If a failure occurs, the function will call the @destroy function, Thus,
+ * after a failure, dereferencing @bo is illegal and will likely cause memory
+ * corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy) (struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
- ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
- page_alignment, &ctx, sg, resv, destroy);
+ ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
+ sg, resv, destroy);
if (ret)
return ret;
@@ -1015,7 +1073,7 @@ int ttm_bo_init(struct ttm_device *bdev,
return 0;
}
-EXPORT_SYMBOL(ttm_bo_init);
+EXPORT_SYMBOL(ttm_bo_init_validate);
/*
* buffer object vm functions.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1cbfb00c1d65..fa04e62202c1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -137,8 +137,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
ttm_manager_type(bo->bdev, dst_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *src_mem = bo->resource;
- struct ttm_resource_manager *src_man =
- ttm_manager_type(bdev, src_mem->mem_type);
+ struct ttm_resource_manager *src_man;
union {
struct ttm_kmap_iter_tt tt;
struct ttm_kmap_iter_linear_io io;
@@ -147,6 +146,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
+ if (!src_mem)
+ return 0;
+
+ src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
@@ -236,16 +239,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
@@ -399,6 +405,8 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
struct ttm_resource *mem = bo->resource;
int ret;
+ dma_resv_assert_held(bo->base.resv);
+
ret = ttm_mem_io_reserve(bo->bdev, mem);
if (ret)
return ret;
@@ -457,6 +465,8 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
+ dma_resv_assert_held(bo->base.resv);
+
if (iosys_map_is_null(map))
return;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index d91666721dc6..4cfef2b3514d 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -113,6 +113,37 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
kfree(node);
}
+static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ /* Don't evict BOs outside of the requested placement range */
+ if (place->fpfn >= (node->start + num_pages) ||
+ (place->lpfn && place->lpfn <= node->start))
+ return false;
+
+ return true;
+}
+
+static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
+ u32 num_pages = PFN_UP(size);
+
+ if (node->start < place->fpfn ||
+ (place->lpfn && (node->start + num_pages) > place->lpfn))
+ return false;
+
+ return true;
+}
+
static void ttm_range_man_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
@@ -126,6 +157,8 @@ static void ttm_range_man_debug(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func ttm_range_manager_func = {
.alloc = ttm_range_man_alloc,
.free = ttm_range_man_free,
+ .intersects = ttm_range_man_intersects,
+ .compatible = ttm_range_man_compatible,
.debug = ttm_range_man_debug
};
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 20f9adcc3235..a729c32a1e48 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -253,10 +253,71 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+/**
+ * ttm_resource_intersects - test for intersection
+ *
+ * @bdev: TTM device structure
+ * @res: The resource to test
+ * @place: The placement to test
+ * @size: How many bytes the new allocation needs.
+ *
+ * Test if @res intersects with @place and @size. Used for testing if evictions
+ * are valueable or not.
+ *
+ * Returns true if the res placement intersects with @place and @size.
+ */
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct ttm_resource_manager *man;
+
+ if (!res)
+ return false;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (!place || !man->func->intersects)
+ return true;
+
+ return man->func->intersects(man, res, place, size);
+}
+
+/**
+ * ttm_resource_compatible - test for compatibility
+ *
+ * @bdev: TTM device structure
+ * @res: The resource to test
+ * @place: The placement to test
+ * @size: How many bytes the new allocation needs.
+ *
+ * Test if @res compatible with @place and @size.
+ *
+ * Returns true if the res placement compatible with @place and @size.
+ */
+bool ttm_resource_compatible(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct ttm_resource_manager *man;
+
+ if (!res || !place)
+ return false;
+
+ man = ttm_manager_type(bdev, res->mem_type);
+ if (!man->func->compatible)
+ return true;
+
+ return man->func->compatible(man, res, place, size);
+}
+
static bool ttm_resource_places_compat(struct ttm_resource *res,
const struct ttm_place *places,
unsigned num_placement)
{
+ struct ttm_buffer_object *bo = res->bo;
+ struct ttm_device *bdev = bo->bdev;
unsigned i;
if (res->placement & TTM_PL_FLAG_TEMPORARY)
@@ -265,8 +326,7 @@ static bool ttm_resource_places_compat(struct ttm_resource *res,
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
- if (res->start < heap->fpfn || (heap->lpfn &&
- (res->start + res->num_pages) > heap->lpfn))
+ if (!ttm_resource_compatible(bdev, res, heap, bo->base.size))
continue;
if ((res->mem_type == heap->mem_type) &&
diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig
index 47a7dbe6c114..11e865be81c6 100644
--- a/drivers/gpu/drm/tve200/Kconfig
+++ b/drivers/gpu/drm/tve200/Kconfig
@@ -8,7 +8,7 @@ config DRM_TVE200
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
help
Choose this option for DRM support for the Faraday TV Encoder
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 771bad881714..37bdd976ae59 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -15,11 +15,11 @@
#include <linux/of_graph.h>
#include <linux/delay.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
@@ -90,7 +90,7 @@ static int tve200_display_check(struct drm_simple_display_pipe *pipe,
}
if (fb) {
- u32 offset = drm_fb_cma_get_gem_addr(fb, pstate, 0);
+ u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
@@ -267,14 +267,14 @@ static void tve200_display_update(struct drm_simple_display_pipe *pipe,
if (fb) {
/* For RGB, the Y component is used as base address */
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 0),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 0),
priv->regs + TVE200_Y_FRAME_BASE_ADDR);
/* For three plane YUV we need two more addresses */
if (fb->format->format == DRM_FORMAT_YUV420) {
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 1),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 1),
priv->regs + TVE200_U_FRAME_BASE_ADDR);
- writel(drm_fb_cma_get_gem_addr(fb, pstate, 2),
+ writel(drm_fb_dma_get_gem_addr(fb, pstate, 2),
priv->regs + TVE200_V_FRAME_BASE_ADDR);
}
}
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 6d9d2921abf4..04db72e3fa9c 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -39,9 +39,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
@@ -65,7 +64,7 @@ static int tve200_modeset_init(struct drm_device *dev)
struct tve200_drm_dev_private *priv = dev->dev_private;
struct drm_panel *panel;
struct drm_bridge *bridge;
- int ret = 0;
+ int ret;
drm_mode_config_init(dev);
mode_config = &dev->mode_config;
@@ -93,6 +92,7 @@ static int tve200_modeset_init(struct drm_device *dev)
* method to get the connector out of the bridge.
*/
dev_err(dev->dev, "the bridge is not a panel\n");
+ ret = -EINVAL;
goto out_bridge;
}
@@ -135,7 +135,7 @@ finish:
return ret;
}
-DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver tve200_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -147,7 +147,7 @@ static const struct drm_driver tve200_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- DRM_GEM_CMA_DRIVER_OPS,
+ DRM_GEM_DMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 5703277c6f52..91effdcefb6d 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -21,8 +21,14 @@ static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
+ int ret;
- return drm_mode_config_helper_suspend(dev);
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
+
+ udl_sync_pending_urbs(dev);
+ return 0;
}
static int udl_usb_resume(struct usb_interface *interface)
@@ -32,6 +38,16 @@ static int udl_usb_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
+static int udl_usb_reset_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct udl_device *udl = to_udl(dev);
+
+ udl_select_std_channel(udl);
+
+ return drm_mode_config_helper_resume(dev);
+}
+
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
@@ -140,6 +156,7 @@ static struct usb_driver udl_driver = {
.disconnect = udl_usb_disconnect,
.suspend = udl_usb_suspend,
.resume = udl_usb_resume,
+ .reset_resume = udl_usb_reset_resume,
.id_table = id_table,
};
module_usb_driver(udl_driver);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc16a13316e4..b4cc7cc568c7 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -34,14 +34,13 @@ struct udl_device;
struct urb_node {
struct list_head entry;
struct udl_device *dev;
- struct delayed_work release_urb_work;
struct urb *urb;
};
struct urb_list {
struct list_head list;
spinlock_t lock;
- struct semaphore limit_sem;
+ wait_queue_head_t sleep;
int available;
int count;
size_t size;
@@ -78,6 +77,7 @@ struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
+void udl_sync_pending_urbs(struct drm_device *dev);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
@@ -87,6 +87,7 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
int udl_drop_usb(struct drm_device *dev);
+int udl_select_std_channel(struct udl_device *udl);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 853f147036f6..061cb88c08a2 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -20,11 +20,10 @@
#define NR_USB_REQUEST_CHANNEL 0x12
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
-#define WRITES_IN_FLIGHT (4)
+#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
-#define GET_URB_TIMEOUT HZ
-#define FREE_URB_TIMEOUT (HZ*2)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
@@ -95,7 +94,7 @@ success:
/*
* Need to ensure a channel is selected before submitting URBs
*/
-static int udl_select_std_channel(struct udl_device *udl)
+int udl_select_std_channel(struct udl_device *udl)
{
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
@@ -119,14 +118,6 @@ static int udl_select_std_channel(struct udl_device *udl)
return ret < 0 ? ret : 0;
}
-static void udl_release_urb_work(struct work_struct *work)
-{
- struct urb_node *unode = container_of(work, struct urb_node,
- release_urb_work.work);
-
- up(&unode->dev->urbs.limit_sem);
-}
-
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
@@ -137,6 +128,7 @@ void udl_urb_completion(struct urb *urb)
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
+ urb->status == -EPROTO ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
@@ -150,49 +142,34 @@ void udl_urb_completion(struct urb *urb)
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
-#if 0
- /*
- * When using fb_defio, we deadlock if up() is called
- * while another is waiting. So queue to another process.
- */
- if (fb_defio)
- schedule_delayed_work(&unode->release_urb_work, 0);
- else
-#endif
- up(&udl->urbs.limit_sem);
+ wake_up(&udl->urbs.sleep);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
- int count = udl->urbs.count;
- struct list_head *node;
struct urb_node *unode;
struct urb *urb;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
- while (count--) {
- down(&udl->urbs.limit_sem);
-
+ while (udl->urbs.count) {
spin_lock_irq(&udl->urbs.lock);
-
- node = udl->urbs.list.next; /* have reserved one with sem */
- list_del_init(node);
-
+ urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
+ udl->urbs.count--;
spin_unlock_irq(&udl->urbs.lock);
-
- unode = list_entry(node, struct urb_node, entry);
- urb = unode->urb;
-
+ if (WARN_ON(!urb))
+ break;
+ unode = urb->context;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
- kfree(node);
+ kfree(unode);
}
- udl->urbs.count = 0;
+
+ wake_up_all(&udl->urbs.sleep);
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
@@ -205,24 +182,20 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
struct usb_device *udev = udl_to_usb_device(udl);
spin_lock_init(&udl->urbs.lock);
-
-retry:
- udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
-
- sema_init(&udl->urbs.limit_sem, 0);
+ init_waitqueue_head(&udl->urbs.sleep);
udl->urbs.count = 0;
udl->urbs.available = 0;
+retry:
+ udl->urbs.size = size;
+
while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
- INIT_DELAYED_WORK(&unode->release_urb_work,
- udl_release_urb_work);
-
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
@@ -250,7 +223,6 @@ retry:
list_add_tail(&unode->entry, &udl->urbs.list);
- up(&udl->urbs.limit_sem);
udl->urbs.count++;
udl->urbs.available++;
}
@@ -260,35 +232,41 @@ retry:
return udl->urbs.count;
}
-struct urb *udl_get_urb(struct drm_device *dev)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
{
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
- struct list_head *entry;
struct urb_node *unode;
- struct urb *urb = NULL;
+
+ assert_spin_locked(&udl->urbs.lock);
/* Wait for an in-flight buffer to complete and get re-queued */
- ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
- if (ret) {
- DRM_INFO("wait for urb interrupted: %x available: %d\n",
- ret, udl->urbs.available);
- goto error;
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ !udl->urbs.count ||
+ !list_empty(&udl->urbs.list),
+ udl->urbs.lock, timeout)) {
+ DRM_INFO("wait for urb interrupted: available: %d\n",
+ udl->urbs.available);
+ return NULL;
}
- spin_lock_irq(&udl->urbs.lock);
+ if (!udl->urbs.count)
+ return NULL;
- BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
- entry = udl->urbs.list.next;
- list_del_init(entry);
+ unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
+ list_del_init(&unode->entry);
udl->urbs.available--;
- spin_unlock_irq(&udl->urbs.lock);
+ return unode ? unode->urb : NULL;
+}
- unode = list_entry(entry, struct urb_node, entry);
- urb = unode->urb;
+#define GET_URB_TIMEOUT HZ
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+ struct urb *urb;
-error:
+ spin_lock_irq(&udl->urbs.lock);
+ urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
+ spin_unlock_irq(&udl->urbs.lock);
return urb;
}
@@ -297,10 +275,13 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
struct udl_device *udl = to_udl(dev);
int ret;
- BUG_ON(len > udl->urbs.size);
-
+ if (WARN_ON(len > udl->urbs.size)) {
+ ret = -EINVAL;
+ goto error;
+ }
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
+ error:
if (ret) {
udl_urb_completion(urb); /* because no one else will */
DRM_ERROR("usb_submit_urb error %x\n", ret);
@@ -308,6 +289,21 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
+/* wait until all pending URBs have been processed */
+void udl_sync_pending_urbs(struct drm_device *dev)
+{
+ struct udl_device *udl = to_udl(dev);
+
+ spin_lock_irq(&udl->urbs.lock);
+ /* 2 seconds as a sane timeout */
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ udl->urbs.available == udl->urbs.count,
+ udl->urbs.lock,
+ msecs_to_jiffies(2000)))
+ drm_err(dev, "Timeout for syncing pending URBs\n");
+ spin_unlock_irq(&udl->urbs.lock);
+}
+
int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index e67c40a48fb4..ec6876f449f3 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -242,38 +242,15 @@ static long udl_log_cpp(unsigned int cpp)
return __ffs(cpp);
}
-static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
- int width, int height)
-{
- int x1, x2;
-
- if (WARN_ON_ONCE(x < 0) ||
- WARN_ON_ONCE(y < 0) ||
- WARN_ON_ONCE(width < 0) ||
- WARN_ON_ONCE(height < 0))
- return -EINVAL;
-
- x1 = ALIGN_DOWN(x, sizeof(unsigned long));
- x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1;
-
- clip->x1 = x1;
- clip->y1 = y;
- clip->x2 = x2;
- clip->y2 = y + height;
-
- return 0;
-}
-
static int udl_handle_damage(struct drm_framebuffer *fb,
const struct iosys_map *map,
- int x, int y, int width, int height)
+ const struct drm_rect *clip)
{
struct drm_device *dev = fb->dev;
void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret;
char *cmd;
struct urb *urb;
- struct drm_rect clip;
int log_bpp;
ret = udl_log_cpp(fb->format->cpp[0]);
@@ -281,12 +258,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
- ret = udl_aligned_damage_clip(&clip, x, y, width, height);
- if (ret)
- return ret;
- else if ((clip.x2 > fb->width) || (clip.y2 > fb->height))
- return -EINVAL;
-
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
@@ -298,11 +269,11 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
}
cmd = urb->transfer_buffer;
- for (i = clip.y1; i < clip.y2; i++) {
+ for (i = clip->y1; i < clip->y2; i++) {
const int line_offset = fb->pitches[0] * i;
- const int byte_offset = line_offset + (clip.x1 << log_bpp);
- const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp;
- const int byte_width = (clip.x2 - clip.x1) << log_bpp;
+ const int byte_offset = line_offset + (clip->x1 << log_bpp);
+ const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp;
+ const int byte_width = drm_rect_width(clip) << log_bpp;
ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr,
&cmd, byte_offset, dev_byte_offset,
byte_width);
@@ -355,6 +326,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height);
char *buf;
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
@@ -380,10 +352,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
udl->mode_buf_len = wrptr - buf;
- udl_handle_damage(fb, &shadow_plane_state->data[0], 0, 0, fb->width, fb->height);
-
- if (!crtc_state->mode_changed)
- return;
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &clip);
/* enable display */
udl_crtc_write_mode_to_hw(crtc);
@@ -423,8 +392,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
- udl_handle_damage(fb, &shadow_plane_state->data[0], rect.x1, rect.y1,
- rect.x2 - rect.x1, rect.y2 - rect.y1);
+ udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
}
static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
@@ -479,6 +447,7 @@ int udl_modeset_init(struct drm_device *dev)
format_count, NULL, connector);
if (ret)
return ret;
+ drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane);
drm_mode_config_reset(dev);
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 971927669d6b..b57844632dbd 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -25,46 +25,6 @@
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-/*
- * Trims identical data from front and back of line
- * Sets new front buffer address and width
- * And returns byte count of identical pixels
- * Assumes CPU natural alignment (unsigned long)
- * for back and front buffer ptrs and width
- */
-#if 0
-static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
-{
- int j, k;
- const unsigned long *back = (const unsigned long *) bback;
- const unsigned long *front = (const unsigned long *) *bfront;
- const int width = *width_bytes / sizeof(unsigned long);
- int identical = width;
- int start = width;
- int end = width;
-
- for (j = 0; j < width; j++) {
- if (back[j] != front[j]) {
- start = j;
- break;
- }
- }
-
- for (k = width - 1; k > j; k--) {
- if (back[k] != front[k]) {
- end = k+1;
- break;
- }
- }
-
- identical = start + (width - end);
- *bfront = (u8 *) &front[start];
- *width_bytes = (end - start) * sizeof(unsigned long);
-
- return identical * sizeof(unsigned long);
-}
-#endif
-
static inline u16 pixel32_to_be16(const uint32_t pixel)
{
return (((pixel >> 3) & 0x001f) |
@@ -220,7 +180,11 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(log_bpp == 1 || log_bpp == 2));
+ if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) {
+ /* need to finish URB at error from this function */
+ udl_urb_completion(urb);
+ return -EINVAL;
+ }
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 8c7f910daa28..e8c975b81585 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -22,7 +22,6 @@
#include <linux/reset.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 725a252e837b..b8980440d137 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -313,7 +313,7 @@ v3d_lookup_bos(struct drm_device *dev,
}
job->bo = kvmalloc_array(job->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!job->bo) {
DRM_DEBUG("Failed to allocate validated BO pointers\n");
@@ -1092,7 +1092,7 @@ v3d_gem_init(struct drm_device *dev)
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have DMA enabled.\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c
index f6a88abccc7d..48aaaa972c49 100644
--- a/drivers/gpu/drm/v3d/v3d_perfmon.c
+++ b/drivers/gpu/drm/v3d/v3d_perfmon.c
@@ -95,7 +95,7 @@ struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id)
void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
{
mutex_init(&v3d_priv->perfmon.lock);
- idr_init(&v3d_priv->perfmon.idr);
+ idr_init_base(&v3d_priv->perfmon.idr, 1);
}
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index fa0d73ce07bc..341edd982cb3 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -269,8 +269,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane,
}
return drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
}
@@ -351,8 +351,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane,
}
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -477,7 +477,7 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
@@ -496,7 +496,7 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
static const struct drm_plane_funcs vbox_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
+ .destroy = drm_plane_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index a5de40fe1a76..f60d82504da0 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -43,7 +43,7 @@
* VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
* from the host and issue commands to the host.
*
- * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the
* following operations with the VBE data register can be performed:
*
* Operation Result
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index b0f3117102ca..246305d17a52 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -12,7 +12,7 @@ config DRM_VC4
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_PANEL_BRIDGE
select SND_PCM
select SND_PCM_ELD
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 0846d56f74f2..231add8b8e12 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -8,10 +8,10 @@
*
* The VC4 GPU architecture (both scanout and rendering) has direct
* access to system memory with no MMU in between. To support it, we
- * use the GEM CMA helper functions to allocate contiguous ranges of
+ * use the GEM DMA helper functions to allocate contiguous ranges of
* physical memory for our BOs.
*
- * Since the CMA allocator is very slow, we keep a cache of recently
+ * Since the DMA allocator is very slow, we keep a cache of recently
* freed BOs around so that the kernel's allocation of objects for 3D
* rendering can return quickly.
*/
@@ -179,7 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- drm_gem_cma_free(&bo->base);
+ drm_gem_dma_free(&bo->base);
}
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
@@ -303,7 +303,7 @@ static void vc4_bo_purge(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
- dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
+ dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
bo->base.vaddr = NULL;
bo->madv = __VC4_MADV_PURGED;
}
@@ -387,13 +387,14 @@ out:
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
- * This lets the CMA helpers allocate object structs for us, and keep
+ * This lets the DMA helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return ERR_PTR(-ENODEV);
@@ -404,7 +405,11 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
- mutex_init(&bo->madv_lock);
+
+ ret = drmm_mutex_init(dev, &bo->madv_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
mutex_lock(&vc4->bo_lock);
bo->label = VC4_BO_TYPE_KERNEL;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
@@ -421,7 +426,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -438,39 +443,39 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return bo;
}
- cma_obj = drm_gem_cma_create(dev, size);
- if (IS_ERR(cma_obj)) {
+ dma_obj = drm_gem_dma_create(dev, size);
+ if (IS_ERR(dma_obj)) {
/*
- * If we've run out of CMA memory, kill the cache of
- * CMA allocations we've got laying around and try again.
+ * If we've run out of DMA memory, kill the cache of
+ * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
/*
- * Still not enough CMA memory, purge the userspace BO
+ * Still not enough DMA memory, purge the userspace BO
* cache and retry.
* This is sub-optimal since we purge the whole userspace
* BO cache which forces user that want to re-use the BO to
* restore its initial content.
* Ideally, we should purge entries one by one and retry
- * after each to see if CMA allocation succeeds. Or even
+ * after each to see if DMA allocation succeeds. Or even
* better, try to find an entry with at least the same
* size.
*/
vc4_bo_userspace_cache_purge(dev);
- cma_obj = drm_gem_cma_create(dev, size);
+ dma_obj = drm_gem_dma_create(dev, size);
}
- if (IS_ERR(cma_obj)) {
+ if (IS_ERR(dma_obj)) {
struct drm_printer p = drm_info_printer(vc4->base.dev);
- DRM_ERROR("Failed to allocate from CMA:\n");
+ DRM_ERROR("Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
- bo = to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled
* only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB
@@ -479,7 +484,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
bo->madv = __VC4_MADV_NOTSUPP;
mutex_lock(&vc4->bo_lock);
- vc4_bo_set_label(&cma_obj->base, type);
+ vc4_bo_set_label(&dma_obj->base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
@@ -564,7 +569,7 @@ static void vc4_free_object(struct drm_gem_object *gem_bo)
goto out;
}
- /* If this object was partially constructed but CMA allocation
+ /* If this object was partially constructed but DMA allocation
* had failed, just free it. Can also happen when the BO has been
* purged.
*/
@@ -742,7 +747,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
return -EINVAL;
}
- return drm_gem_cma_mmap(&bo->base, vma);
+ return drm_gem_dma_mmap(&bo->base, vma);
}
static const struct vm_operations_struct vc4_vm_ops = {
@@ -754,8 +759,8 @@ static const struct vm_operations_struct vc4_vm_ops = {
static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.free = vc4_free_object,
.export = vc4_prime_export,
- .get_sg_table = drm_gem_cma_object_get_sg_table,
- .vmap = drm_gem_cma_object_vmap,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
.mmap = vc4_gem_object_mmap,
.vm_ops = &vc4_vm_ops,
};
@@ -984,10 +989,28 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int vc4_bo_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "bo_stats",
+ vc4_bo_stats_debugfs, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
int i;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1007,9 +1030,11 @@ int vc4_bo_cache_init(struct drm_device *dev)
for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
vc4->bo_labels[i].name = bo_type_names[i];
- mutex_init(&vc4->bo_lock);
-
- vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+ ret = drmm_mutex_init(dev, &vc4->bo_lock);
+ if (ret) {
+ kfree(vc4->bo_labels);
+ return ret;
+ }
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 029be98660b3..0108613e79d5 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -37,8 +37,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -206,11 +207,6 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
return ret;
}
-void vc4_crtc_destroy(struct drm_crtc *crtc)
-{
- drm_crtc_cleanup(crtc);
-}
-
static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
{
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
@@ -300,10 +296,17 @@ struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc,
static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
/* The PV needs to be disabled before it can be flushed */
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) & ~PV_CONTROL_EN);
CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder,
@@ -326,6 +329,10 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
bool debug_dump_regs = false;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
@@ -415,6 +422,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
drm_crtc_index(crtc));
drm_print_regset32(&p, &vc4_crtc->regset);
}
+
+ drm_dev_exit(idx);
}
static void require_hvs_enabled(struct drm_device *dev)
@@ -435,7 +444,10 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int ret;
+ int idx, ret;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN);
@@ -469,6 +481,8 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
if (vc4_encoder && vc4_encoder->post_crtc_powerdown)
vc4_encoder->post_crtc_powerdown(encoder, state);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -544,6 +558,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
return 0;
}
+void vc4_crtc_send_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ if (!crtc->state || !crtc->state->event)
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@@ -567,14 +595,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
* Make sure we issue a vblank event after disabling the CRTC if
* someone was waiting it.
*/
- if (crtc->state->event) {
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+ vc4_crtc_send_vblank(crtc);
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -586,10 +607,14 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state);
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+ int idx;
drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)",
crtc->name, crtc->base.id, encoder->name, encoder->base.id);
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
require_hvs_enabled(dev);
/* Enable vblank irq handling before crtc is started otherwise
@@ -617,6 +642,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
if (vc4_encoder->post_crtc_enable)
vc4_encoder->post_crtc_enable(encoder, state);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -709,17 +736,31 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
static int vc4_enable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
CRTC_WRITE(PV_INTEN, PV_INT_VFP_START);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_disable_vblank(struct drm_crtc *crtc)
{
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
CRTC_WRITE(PV_INTEN, 0);
+
+ drm_dev_exit(idx);
}
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
@@ -821,9 +862,9 @@ static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb)
struct vc4_bo *bo = NULL;
if (flip_state->old_fb) {
- struct drm_gem_cma_object *cma_bo =
- drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo =
+ drm_fb_dma_get_gem_obj(flip_state->old_fb, 0);
+ bo = to_vc4_bo(&dma_bo->base);
}
vc4_async_page_flip_complete(flip_state);
@@ -855,19 +896,19 @@ static int vc4_async_set_fence_cb(struct drm_device *dev,
struct vc4_async_flip_state *flip_state)
{
struct drm_framebuffer *fb = flip_state->fb;
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct dma_fence *fence;
int ret;
if (!vc4->is_vc5) {
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
vc4_async_page_flip_seqno_complete);
}
- ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
+ ret = dma_resv_get_singleton(dma_bo->base.resv, DMA_RESV_USAGE_READ, &fence);
if (ret)
return ret;
@@ -943,8 +984,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_dma_object *dma_bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -1050,9 +1091,23 @@ void vc4_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
}
+int vc4_crtc_late_register(struct drm_crtc *crtc)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, crtc_data->debugfs_name,
+ &vc4_crtc->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct drm_crtc_funcs vc4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.set_property = NULL,
.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
@@ -1063,6 +1118,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+ .late_register = vc4_crtc_late_register,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1077,10 +1133,10 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1091,10 +1147,10 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
static const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1105,10 +1161,10 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
static const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1119,10 +1175,10 @@ static const struct vc4_pv_data bcm2835_pv2_data = {
static const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
+ .debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
},
- .debugfs_name = "crtc0_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1133,10 +1189,10 @@ static const struct vc4_pv_data bcm2711_pv0_data = {
static const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
+ .debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
},
- .debugfs_name = "crtc1_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1147,10 +1203,10 @@ static const struct vc4_pv_data bcm2711_pv1_data = {
static const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
+ .debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
},
- .debugfs_name = "crtc2_regs",
.fifo_depth = 256,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1160,10 +1216,10 @@ static const struct vc4_pv_data bcm2711_pv2_data = {
static const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
+ .debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
},
- .debugfs_name = "crtc3_regs",
.fifo_depth = 64,
.pixels_per_clock = 1,
.encoder_types = {
@@ -1173,10 +1229,10 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
static const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
+ .debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
},
- .debugfs_name = "crtc4_regs",
.fifo_depth = 64,
.pixels_per_clock = 2,
.encoder_types = {
@@ -1230,6 +1286,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_plane *primary_plane;
unsigned int i;
+ int ret;
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
@@ -1237,15 +1294,18 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
* requirement of the plane configuration, and reject ones
* that will take too much.
*/
- primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(primary_plane)) {
dev_err(drm->dev, "failed to construct primary plane\n");
return PTR_ERR(primary_plane);
}
spin_lock_init(&vc4_crtc->irq_lock);
- drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
- crtc_funcs, NULL);
+ ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
+ crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
drm_crtc_helper_add(crtc, crtc_helper_funcs);
if (!vc4->is_vc5) {
@@ -1275,10 +1335,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
const struct vc4_pv_data *pv_data;
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
- struct drm_plane *destroy_plane, *temp;
int ret;
- vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
+ vc4_crtc = drmm_kzalloc(drm, sizeof(*vc4_crtc), GFP_KERNEL);
if (!vc4_crtc)
return -ENOMEM;
crtc = &vc4_crtc->base;
@@ -1310,23 +1369,11 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
IRQF_SHARED,
"vc4 crtc", vc4_crtc);
if (ret)
- goto err_destroy_planes;
+ return ret;
platform_set_drvdata(pdev, vc4_crtc);
- vc4_debugfs_add_regset32(drm, pv_data->debugfs_name,
- &vc4_crtc->regset);
-
return 0;
-
-err_destroy_planes:
- list_for_each_entry_safe(destroy_plane, temp,
- &drm->mode_config.plane_list, head) {
- if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
- destroy_plane->funcs->destroy(destroy_plane);
- }
-
- return ret;
}
static void vc4_crtc_unbind(struct device *dev, struct device *master,
@@ -1335,8 +1382,6 @@ static void vc4_crtc_unbind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev);
- vc4_crtc_destroy(&vc4_crtc->base);
-
CRTC_WRITE(PV_INTEN, 0);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index ba2d8ea562af..19cda4f91a82 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Broadcom
*/
+#include <drm/drm_drv.h>
+
#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
@@ -12,11 +14,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-struct vc4_debugfs_info_entry {
- struct list_head link;
- struct drm_info_list info;
-};
-
/*
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
@@ -25,62 +22,59 @@ void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
- struct vc4_debugfs_info_entry *entry;
+ struct drm_device *drm = &vc4->base;
- if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node,
- "brcm,bcm2711-vc5"))
- debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
- minor->debugfs_root, &vc4->load_tracker_enabled);
+ drm_WARN_ON(drm, vc4_hvs_debugfs_init(minor));
- list_for_each_entry(entry, &vc4->debugfs_list, link) {
- drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
+ if (vc4->v3d) {
+ drm_WARN_ON(drm, vc4_bo_debugfs_init(minor));
+ drm_WARN_ON(drm, vc4_v3d_debugfs_init(minor));
}
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
struct debugfs_regset32 *regset = node->info_ent->data;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, regset);
+ drm_dev_exit(idx);
+
return 0;
}
-/*
- * Registers a debugfs file with a callback function for a vc4 component.
- *
- * This is like drm_debugfs_create_files(), but that can only be
- * called a given DRM minor, while the various VC4 components want to
- * register their debugfs files during the component bind process. We
- * track the request and delay it to be called on each minor during
- * vc4_debugfs_init().
- */
-void vc4_debugfs_add_file(struct drm_device *dev,
- const char *name,
- int (*show)(struct seq_file*, void*),
- void *data)
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_device *dev = minor->dev;
+ struct dentry *root = minor->debugfs_root;
+ struct drm_info_list *file;
- struct vc4_debugfs_info_entry *entry =
- devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+ file = drmm_kzalloc(dev, sizeof(*file), GFP_KERNEL);
+ if (!file)
+ return -ENOMEM;
- if (!entry)
- return;
+ file->name = name;
+ file->show = show;
+ file->data = data;
- entry->info.name = name;
- entry->info.show = show;
- entry->info.data = data;
+ drm_debugfs_create_files(file, 1, root, minor);
- list_add(&entry->link, &vc4->debugfs_list);
+ return 0;
}
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *name,
- struct debugfs_regset32 *regset)
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *name,
+ struct debugfs_regset32 *regset)
{
- vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
+ return vc4_debugfs_add_file(minor, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index ef5e3921062c..1f8f44b7b5a5 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -84,9 +85,9 @@
/* General DPI hardware state. */
struct vc4_dpi {
- struct platform_device *pdev;
+ struct vc4_encoder encoder;
- struct drm_encoder *encoder;
+ struct platform_device *pdev;
void __iomem *regs;
@@ -96,21 +97,15 @@ struct vc4_dpi {
struct debugfs_regset32 regset;
};
-#define DPI_READ(offset) readl(dpi->regs + (offset))
-#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
-
-/* VC4 DPI encoder KMS struct */
-struct vc4_dpi_encoder {
- struct vc4_encoder base;
- struct vc4_dpi *dpi;
-};
-
-static inline struct vc4_dpi_encoder *
-to_vc4_dpi_encoder(struct drm_encoder *encoder)
+static inline struct vc4_dpi *
+to_vc4_dpi(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_dpi_encoder, base.base);
+ return container_of(encoder, struct vc4_dpi, encoder.base);
}
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
VC4_REG32(DPI_ID),
@@ -118,21 +113,27 @@ static const struct debugfs_reg32 dpi_regs[] = {
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct drm_device *dev = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
clk_disable_unprepare(dpi->pixel_clock);
+
+ drm_dev_exit(idx);
}
static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_display_mode *mode = &encoder->crtc->mode;
- struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
- struct vc4_dpi *dpi = vc4_encoder->dpi;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL, *connector_scan;
u32 dpi_c = DPI_ENABLE;
+ int idx;
int ret;
/* Look up the connector attached to DPI so we can get the
@@ -212,6 +213,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
dpi_c |= DPI_VSYNC_DISABLE;
}
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
DPI_WRITE(DPI_C, dpi_c);
ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
@@ -221,6 +225,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
ret = clk_prepare_enable(dpi->pixel_clock);
if (ret)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+ drm_dev_exit(idx);
}
static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder,
@@ -238,6 +244,23 @@ static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.mode_valid = vc4_dpi_encoder_mode_valid,
};
+static int vc4_dpi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dpi *dpi = to_vc4_dpi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "dpi_regs", &dpi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+ .late_register = vc4_dpi_late_register,
+};
+
static const struct of_device_id vc4_dpi_dt_match[] = {
{ .compatible = "brcm,bcm2835-dpi", .data = NULL },
{}
@@ -248,10 +271,11 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
*/
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
+ struct drm_device *drm = dpi->encoder.base.dev;
struct device *dev = &dpi->pdev->dev;
struct drm_bridge *bridge;
- bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
@@ -262,30 +286,28 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
return PTR_ERR(bridge);
}
- return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
+ return drm_bridge_attach(&dpi->encoder.base, bridge, NULL, 0);
+}
+
+static void vc4_dpi_disable_clock(void *ptr)
+{
+ struct vc4_dpi *dpi = ptr;
+
+ clk_disable_unprepare(dpi->core_clock);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_dpi *dpi;
- struct vc4_dpi_encoder *vc4_dpi_encoder;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ dpi = drmm_kzalloc(drm, sizeof(*dpi), GFP_KERNEL);
if (!dpi)
return -ENOMEM;
- vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
- GFP_KERNEL);
- if (!vc4_dpi_encoder)
- return -ENOMEM;
- vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
- vc4_dpi_encoder->dpi = dpi;
- dpi->encoder = &vc4_dpi_encoder->base.base;
-
+ dpi->encoder.type = VC4_ENCODER_TYPE_DPI;
dpi->pdev = pdev;
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
@@ -307,6 +329,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
DRM_ERROR("Failed to get core clock: %d\n", ret);
return ret;
}
+
dpi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clock)) {
ret = PTR_ERR(dpi->pixel_clock);
@@ -316,49 +339,35 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
}
ret = clk_prepare_enable(dpi->core_clock);
- if (ret)
+ if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+ return ret;
+ }
- drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
- drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+ ret = devm_add_action_or_reset(dev, vc4_dpi_disable_clock, dpi);
+ if (ret)
+ return ret;
- ret = vc4_dpi_init_bridge(dpi);
+ ret = drmm_encoder_init(drm, &dpi->encoder.base,
+ &vc4_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_DPI,
+ NULL);
if (ret)
- goto err_destroy_encoder;
+ return ret;
- dev_set_drvdata(dev, dpi);
+ drm_encoder_helper_add(&dpi->encoder.base, &vc4_dpi_encoder_helper_funcs);
- vc4->dpi = dpi;
+ ret = vc4_dpi_init_bridge(dpi);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+ dev_set_drvdata(dev, dpi);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(dpi->encoder);
- clk_disable_unprepare(dpi->core_clock);
- return ret;
-}
-
-static void vc4_dpi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_dpi *dpi = dev_get_drvdata(dev);
-
- drm_of_panel_bridge_remove(dev->of_node, 0, 0);
-
- drm_encoder_cleanup(dpi->encoder);
-
- clk_disable_unprepare(dpi->core_clock);
-
- vc4->dpi = NULL;
}
static const struct component_ops vc4_dpi_ops = {
.bind = vc4_dpi_bind,
- .unbind = vc4_dpi_unbind,
};
static int vc4_dpi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 292d1b6a01b6..ffbbb454c9e8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,7 +33,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_vblank.h>
@@ -86,7 +85,7 @@ static int vc5_dumb_create(struct drm_file *file_priv,
if (ret)
return ret;
- return drm_gem_cma_dumb_create_internal(file_priv, dev, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, dev, args);
}
static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
@@ -212,7 +211,7 @@ static const struct drm_driver vc4_drm_driver = {
.gem_create_object = vc4_create_object,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create),
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -235,7 +234,7 @@ static const struct drm_driver vc5_drm_driver = {
.debugfs_init = vc4_debugfs_init,
#endif
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create),
.fops = &vc4_drm_fops,
@@ -267,6 +266,13 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
+static void vc4_component_unbind_all(void *ptr)
+{
+ struct vc4_dev *vc4 = ptr;
+
+ component_unbind_all(vc4->dev, &vc4->base);
+}
+
static const struct of_device_id vc4_dma_range_matches[] = {
{ .compatible = "brcm,bcm2711-hvs" },
{ .compatible = "brcm,bcm2835-hvs" },
@@ -310,13 +316,16 @@ static int vc4_drm_bind(struct device *dev)
if (IS_ERR(vc4))
return PTR_ERR(vc4);
vc4->is_vc5 = is_vc5;
+ vc4->dev = dev;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
- mutex_init(&vc4->bin_bo_lock);
+ ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
+ if (ret)
+ return ret;
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -360,6 +369,10 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vc4_component_unbind_all, vc4);
+ if (ret)
+ return ret;
+
ret = vc4_plane_create_additional_planes(drm);
if (ret)
goto unbind_all;
@@ -380,8 +393,6 @@ static int vc4_drm_bind(struct device *dev)
return 0;
unbind_all:
- component_unbind_all(dev, drm);
-
return ret;
}
@@ -389,8 +400,7 @@ static void vc4_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- drm_dev_unregister(drm);
-
+ drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 1beb96b77b8c..418a8242691f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -14,7 +14,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
@@ -76,6 +76,7 @@ struct vc4_perfmon {
struct vc4_dev {
struct drm_device base;
+ struct device *dev;
bool is_vc5;
@@ -83,9 +84,6 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_v3d *v3d;
- struct vc4_dpi *dpi;
- struct vc4_vec *vec;
- struct vc4_txp *txp;
struct vc4_hang_state *hang_state;
@@ -241,7 +239,7 @@ to_vc4_dev(struct drm_device *dev)
}
struct vc4_bo {
- struct drm_gem_cma_object base;
+ struct drm_gem_dma_object base;
/* seqno of the last job to render using this BO. */
uint64_t seqno;
@@ -290,7 +288,7 @@ struct vc4_bo {
static inline struct vc4_bo *
to_vc4_bo(struct drm_gem_object *bo)
{
- return container_of(to_drm_gem_cma_obj(bo), struct vc4_bo, base);
+ return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
struct vc4_fence {
@@ -458,6 +456,8 @@ to_vc4_encoder(struct drm_encoder *encoder)
}
struct vc4_crtc_data {
+ const char *debugfs_name;
+
/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
unsigned int hvs_available_channels;
@@ -475,8 +475,6 @@ struct vc4_pv_data {
u8 pixels_per_clock;
enum vc4_encoder_type encoder_types[4];
- const char *debugfs_name;
-
};
struct vc4_crtc {
@@ -604,14 +602,14 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_cma_object **bo;
+ struct drm_gem_dma_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
* the binner temporary storage, this is all the BOs written
* by the job.
*/
- struct drm_gem_cma_object *rcl_write_bo[4];
+ struct drm_gem_dma_object *rcl_write_bo[4];
uint32_t rcl_write_bo_count;
/* Pointers for our position in vc4->job_list */
@@ -630,7 +628,7 @@ struct vc4_exec_info {
/* This is the BO where we store the validated command lists, shader
* records, and uniforms.
*/
- struct drm_gem_cma_object *exec_bo;
+ struct drm_gem_dma_object *exec_bo;
/**
* This tracks the per-shader-record state (packet 64) that
@@ -843,6 +841,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
+int vc4_bo_debugfs_init(struct drm_minor *minor);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
@@ -850,7 +849,6 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
const struct drm_crtc_funcs *crtc_funcs,
const struct drm_crtc_helper_funcs *crtc_helper_funcs);
-void vc4_crtc_destroy(struct drm_crtc *crtc);
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -861,6 +859,8 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_send_vblank(struct drm_crtc *crtc);
+int vc4_crtc_late_register(struct drm_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
@@ -868,25 +868,27 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
-void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data);
-void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset);
+int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset);
#else
-static inline void vc4_debugfs_add_file(struct drm_device *drm,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data)
+static inline int vc4_debugfs_add_file(struct drm_minor *minor,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
{
+ return 0;
}
-static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
- const char *filename,
- struct debugfs_regset32 *regset)
+static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
+ const char *filename,
+ struct debugfs_regset32 *regset)
{
+ return 0;
}
#endif
@@ -952,13 +954,15 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
void vc4_hvs_dump_state(struct vc4_hvs *hvs);
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel);
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel);
+int vc4_hvs_debugfs_init(struct drm_minor *minor);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
/* vc4_plane.c */
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type);
+ enum drm_plane_type type,
+ uint32_t possible_crtcs);
int vc4_plane_create_additional_planes(struct drm_device *dev);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
@@ -973,6 +977,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
int vc4_v3d_pm_get(struct vc4_dev *vc4);
void vc4_v3d_pm_put(struct vc4_dev *vc4);
+int vc4_v3d_debugfs_init(struct drm_minor *minor);
/* vc4_validate.c */
int
@@ -984,19 +989,19 @@ vc4_validate_bin_cl(struct drm_device *dev,
int
vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
-struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
+struct drm_gem_dma_object *vc4_use_bo(struct vc4_exec_info *exec,
uint32_t hindex);
int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
bool vc4_check_tex_size(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *fbo,
+ struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp);
/* vc4_validate_shader.c */
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj);
/* vc4_perfmon.c */
void vc4_perfmon_get(struct vc4_perfmon *perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index b7b2c76770dc..878e05d79e81 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -549,10 +549,13 @@ struct vc4_dsi_variant {
/* General DSI hardware state. */
struct vc4_dsi {
+ struct vc4_encoder encoder;
+ struct mipi_dsi_host dsi_host;
+
+ struct kref kref;
+
struct platform_device *pdev;
- struct mipi_dsi_host dsi_host;
- struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct list_head bridge_chain;
@@ -600,6 +603,12 @@ struct vc4_dsi {
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
+static inline struct vc4_dsi *
+to_vc4_dsi(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_dsi, encoder.base);
+}
+
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
@@ -644,18 +653,6 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val)
#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit)
-/* VC4 DSI encoder KMS struct */
-struct vc4_dsi_encoder {
- struct vc4_encoder base;
- struct vc4_dsi *dsi;
-};
-
-static inline struct vc4_dsi_encoder *
-to_vc4_dsi_encoder(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct vc4_dsi_encoder, base.base);
-}
-
static const struct debugfs_reg32 dsi0_regs[] = {
VC4_REG32(DSI0_CTRL),
VC4_REG32(DSI0_STAT),
@@ -795,8 +792,7 @@ dsi_esc_timing(u32 ns)
static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
struct drm_bridge *iter;
@@ -839,8 +835,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
unsigned long parent_rate = clk_get_rate(phy_parent);
unsigned long pixel_clock_hz = mode->clock * 1000;
@@ -875,8 +870,7 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_dsi_encoder *vc4_encoder = to_vc4_dsi_encoder(encoder);
- struct vc4_dsi *dsi = vc4_encoder->dsi;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
struct device *dev = &dsi->pdev->dev;
bool debug_dump_regs = false;
struct drm_bridge *iter;
@@ -1378,6 +1372,24 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
.mode_fixup = vc4_dsi_encoder_mode_fixup,
};
+static int vc4_dsi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, dsi->variant->debugfs_name,
+ &dsi->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
+ .late_register = vc4_dsi_late_register,
+};
+
static const struct vc4_dsi_variant bcm2711_dsi1_variant = {
.port = 1,
.debugfs_name = "dsi1_regs",
@@ -1564,26 +1576,50 @@ static void vc4_dsi_dma_chan_release(void *ptr)
dsi->reg_dma_chan = NULL;
}
+static void vc4_dsi_release(struct kref *kref)
+{
+ struct vc4_dsi *dsi =
+ container_of(kref, struct vc4_dsi, kref);
+
+ kfree(dsi);
+}
+
+static void vc4_dsi_get(struct vc4_dsi *dsi)
+{
+ kref_get(&dsi->kref);
+}
+
+static void vc4_dsi_put(struct vc4_dsi *dsi)
+{
+ kref_put(&dsi->kref, &vc4_dsi_release);
+}
+
+static void vc4_dsi_release_action(struct drm_device *drm, void *ptr)
+{
+ struct vc4_dsi *dsi = ptr;
+
+ vc4_dsi_put(dsi);
+}
+
static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- struct vc4_dsi_encoder *vc4_dsi_encoder;
+ struct drm_encoder *encoder = &dsi->encoder.base;
int ret;
- dsi->variant = of_device_get_match_data(dev);
+ vc4_dsi_get(dsi);
- vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder),
- GFP_KERNEL);
- if (!vc4_dsi_encoder)
- return -ENOMEM;
+ ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi);
+ if (ret)
+ return ret;
+
+ dsi->variant = of_device_get_match_data(dev);
INIT_LIST_HEAD(&dsi->bridge_chain);
- vc4_dsi_encoder->base.type = dsi->variant->port ?
- VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
- vc4_dsi_encoder->dsi = dsi;
- dsi->encoder = &vc4_dsi_encoder->base.base;
+ dsi->encoder.type = dsi->variant->port ?
+ VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
dsi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dsi->regs))
@@ -1687,7 +1723,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ dsi->bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
@@ -1702,10 +1738,20 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
- drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI,
+ NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = drm_bridge_attach(encoder, dsi->bridge, NULL, 0);
if (ret)
return ret;
/* Disable the atomic helper calls into the bridge. We
@@ -1713,11 +1759,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
* from our driver, since we need to sequence them within the
* encoder's enable/disable paths.
*/
- list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain);
-
- vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
-
- pm_runtime_enable(dev);
+ list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
return 0;
}
@@ -1726,15 +1768,13 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
-
- pm_runtime_disable(dev);
+ struct drm_encoder *encoder = &dsi->encoder.base;
/*
* Restore the bridge_chain so the bridge detach procedure can happen
* normally.
*/
- list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- drm_encoder_cleanup(dsi->encoder);
+ list_splice_init(&dsi->bridge_chain, &encoder->bridge_chain);
}
static const struct component_ops vc4_dsi_ops = {
@@ -1747,11 +1787,12 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
dev_set_drvdata(dev, dsi);
+ kref_init(&dsi->kref);
dsi->pdev = pdev;
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
@@ -1766,6 +1807,8 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct vc4_dsi *dsi = dev_get_drvdata(dev);
mipi_dsi_host_unregister(&dsi->dsi_host);
+ vc4_dsi_put(dsi);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index fe10d9c3fff8..628d40ff3aa1 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -126,7 +126,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
goto err_delete_handle;
}
bo_state[i].handle = handle;
- bo_state[i].paddr = vc4_bo->base.paddr;
+ bo_state[i].paddr = vc4_bo->base.dma_addr;
bo_state[i].size = vc4_bo->base.base.size;
}
@@ -764,7 +764,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_cma_object *),
+ sizeof(struct drm_gem_dma_object *),
GFP_KERNEL | __GFP_ZERO);
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
@@ -797,7 +797,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
}
drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_cma_object *)bo;
+ exec->bo[i] = (struct drm_gem_dma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
@@ -917,16 +917,16 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
&exec->unref_list);
- exec->ct0ca = exec->exec_bo->paddr + bin_offset;
+ exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
exec->bin_u = bin;
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
- exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
+ exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
exec->shader_rec_size = args->shader_rec_size;
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
- exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
+ exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
exec->uniforms_size = args->uniforms_size;
ret = vc4_validate_bin_cl(dev,
@@ -1308,6 +1308,7 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused);
int vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret;
if (WARN_ON_ONCE(vc4->is_vc5))
return -ENODEV;
@@ -1325,10 +1326,15 @@ int vc4_gem_init(struct drm_device *dev)
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
- mutex_init(&vc4->power_lock);
+ ret = drmm_mutex_init(dev, &vc4->power_lock);
+ if (ret)
+ return ret;
INIT_LIST_HEAD(&vc4->purgeable.list);
- mutex_init(&vc4->purgeable.lock);
+
+ ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
+ if (ret)
+ return ret;
return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1e5f68704d7d..64f9feabf43e 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
@@ -41,7 +42,6 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/rational.h>
@@ -124,6 +124,23 @@ static unsigned long long
vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode,
unsigned int bpc, enum vc4_hdmi_output_format fmt);
+static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_display_info *display = &vc4_hdmi->connector.display_info;
+
+ lockdep_assert_held(&vc4_hdmi->mutex);
+
+ if (!display->is_hdmi)
+ return false;
+
+ if (!display->hdmi.scdc.supported ||
+ !display->hdmi.scdc.scrambling.supported)
+ return false;
+
+ return true;
+}
+
static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode,
unsigned int bpc,
enum vc4_hdmi_output_format fmt)
@@ -146,7 +163,12 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
drm_print_regset32(&p, &vc4_hdmi->hdmi_regset);
drm_print_regset32(&p, &vc4_hdmi->hd_regset);
@@ -157,12 +179,23 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
drm_print_regset32(&p, &vc4_hdmi->ram_regset);
drm_print_regset32(&p, &vc4_hdmi->rm_regset);
+ drm_dev_exit(idx);
+
return 0;
}
static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -179,11 +212,23 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
+
+ /*
+ * We can be called by our bind callback, when the
+ * connector->dev pointer might not be initialised yet.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
reset_control_reset(vc4_hdmi->reset);
@@ -195,15 +240,31 @@ static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi)
HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
{
- unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ unsigned long cec_rate;
unsigned long flags;
u16 clk_cnt;
u32 value;
+ int idx;
+
+ /*
+ * This function is called by our runtime_resume implementation
+ * and thus at bind time, when we haven't registered our
+ * connector yet and thus don't have a pointer to the DRM
+ * device.
+ */
+ if (drm && !drm_dev_enter(drm, &idx))
+ return;
+
+ cec_rate = clk_get_rate(vc4_hdmi->cec_clock);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -219,58 +280,180 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi)
HDMI_WRITE(HDMI_CEC_CNTRL_1, value);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ if (drm)
+ drm_dev_exit(idx);
}
#else
static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {}
#endif
-static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder);
+static int reset_pipe(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ctx;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ crtc_state->connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+out:
+ drm_atomic_state_put(state);
+
+ return ret;
+}
-static enum drm_connector_status
-vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
+static int vc4_hdmi_reset_link(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
{
+ struct drm_device *drm = connector->dev;
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
- bool connected = false;
+ struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ bool scrambling_needed;
+ u8 config;
+ int ret;
- mutex_lock(&vc4_hdmi->mutex);
+ if (!connector)
+ return 0;
+
+ ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx);
+ if (ret)
+ return ret;
+
+ conn_state = connector->state;
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
+
+ crtc_state = crtc->state;
+ if (!crtc_state->active)
+ return 0;
+
+ if (!vc4_hdmi_supports_scrambling(encoder))
+ return 0;
+
+ scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode,
+ vc4_hdmi->output_bpc,
+ vc4_hdmi->output_format);
+ if (!scrambling_needed)
+ return 0;
+
+ if (conn_state->commit &&
+ !try_wait_for_completion(&conn_state->commit->hw_done))
+ return 0;
+
+ ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config);
+ if (ret < 0) {
+ drm_err(drm, "Failed to read TMDS config: %d\n", ret);
+ return 0;
+ }
+
+ if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed)
+ return 0;
+
+ /*
+ * HDMI 2.0 says that one should not send scrambled data
+ * prior to configuring the sink scrambling, and that
+ * TMDS clock/data transmission should be suspended when
+ * changing the TMDS clock rate in the sink. So let's
+ * just do a full modeset here, even though some sinks
+ * would be perfectly happy if were to just reconfigure
+ * the SCDC settings on the fly.
+ */
+ return reset_pipe(crtc, ctx);
+}
+
+static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ struct drm_modeset_acquire_ctx *ctx,
+ enum drm_connector_status status)
+{
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct edid *edid;
+
+ /*
+ * NOTE: This function should really be called with
+ * vc4_hdmi->mutex held, but doing so results in reentrancy
+ * issues since cec_s_phys_addr_from_edid might call
+ * .adap_enable, which leads to that funtion being called with
+ * our mutex held.
+ *
+ * A similar situation occurs with
+ * drm_atomic_helper_connector_hdmi_reset_link() that will call
+ * into our KMS hooks if the scrambling was enabled.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
+
+ if (status == connector_status_disconnected) {
+ cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ return;
+ }
+
+ edid = drm_get_edid(connector, vc4_hdmi->ddc);
+ if (!edid)
+ return;
+
+ cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ kfree(edid);
+
+ vc4_hdmi_reset_link(connector, ctx);
+}
+
+static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * vc4_hdmi_handle_hotplug() can call into other functions that
+ * would take the mutex while it's held here.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio) {
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
- connected = true;
+ status = connector_status_connected;
} else {
if (vc4_hdmi->variant->hp_detect &&
vc4_hdmi->variant->hp_detect(vc4_hdmi))
- connected = true;
+ status = connector_status_connected;
}
- if (connected) {
- if (connector->status != connector_status_connected) {
- struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc);
-
- if (edid) {
- cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- kfree(edid);
- }
- }
-
- vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base);
- pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_connected;
- }
-
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
+ vc4_hdmi_handle_hotplug(vc4_hdmi, ctx, status);
pm_runtime_put(&vc4_hdmi->pdev->dev);
- mutex_unlock(&vc4_hdmi->mutex);
- return connector_status_disconnected;
-}
-static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
+ return status;
}
static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
@@ -279,14 +462,21 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
int ret = 0;
struct edid *edid;
- mutex_lock(&vc4_hdmi->mutex);
+ /*
+ * NOTE: This function should really take vc4_hdmi->mutex, but
+ * doing so results in reentrancy issues since
+ * cec_s_phys_addr_from_edid might call .adap_enable, which
+ * leads to that funtion being called with our mutex held.
+ *
+ * Concurrency isn't an issue at the moment since we don't share
+ * any state with any of the other frameworks so we can ignore
+ * the lock for now.
+ */
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
- if (!edid) {
- ret = -ENODEV;
- goto out;
- }
+ if (!edid)
+ return -ENODEV;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -294,7 +484,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
if (vc4_hdmi->disable_4kp60) {
struct drm_device *drm = connector->dev;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
list_for_each_entry(mode, &connector->probed_modes, head) {
if (vc4_hdmi_mode_needs_scrambling(mode, 8, VC4_HDMI_OUTPUT_RGB)) {
@@ -304,9 +494,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
}
}
-out:
- mutex_unlock(&vc4_hdmi->mutex);
-
return ret;
}
@@ -378,15 +565,14 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
- .detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_hdmi_connector_destroy,
.reset = vc4_hdmi_connector_reset,
.atomic_duplicate_state = vc4_hdmi_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
+ .detect_ctx = vc4_hdmi_connector_detect_ctx,
.get_modes = vc4_hdmi_connector_get_modes,
.atomic_check = vc4_hdmi_connector_atomic_check,
};
@@ -398,10 +584,13 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
int ret;
- drm_connector_init_with_ddc(dev, connector,
- &vc4_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- vc4_hdmi->ddc);
+ ret = drmm_connector_init(dev, connector,
+ &vc4_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ vc4_hdmi->ddc);
+ if (ret)
+ return ret;
+
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/*
@@ -444,25 +633,34 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
bool poll)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = type - 0x80;
unsigned long flags;
+ int ret = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG,
HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
- if (!poll)
- return 0;
+ if (poll) {
+ ret = wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
+ BIT(packet_id)), 100);
+ }
- return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) &
- BIT(packet_id)), 100);
+ drm_dev_exit(idx);
+ return ret;
}
static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe *frame)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
u32 packet_id = frame->any.type - 0x80;
const struct vc4_hdmi_register *ram_packet_start =
&vc4_hdmi->variant->registers[HDMI_RAM_PACKET_START];
@@ -475,6 +673,10 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
unsigned long flags;
ssize_t len, i;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
WARN_ONCE(!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE),
@@ -482,12 +684,12 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
len = hdmi_infoframe_pack(frame, buffer, sizeof(buffer));
if (len < 0)
- return;
+ goto out;
ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true);
if (ret) {
DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret);
- return;
+ goto out;
}
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -523,6 +725,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
BIT(packet_id)), 100);
if (ret)
DRM_ERROR("Failed to wait for infoframe to start: %d\n", ret);
+
+out:
+ drm_dev_exit(idx);
}
static void vc4_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
@@ -649,35 +854,19 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
vc4_hdmi_set_hdr_infoframe(encoder);
}
-static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_info *display = &vc4_hdmi->connector.display_info;
-
- lockdep_assert_held(&vc4_hdmi->mutex);
-
- if (!display->is_hdmi)
- return false;
-
- if (!display->hdmi.scdc.supported ||
- !display->hdmi.scdc.scrambling.supported)
- return false;
-
- return true;
-}
-
#define SCRAMBLING_POLLING_DELAY_MS 1000
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
- if (!vc4_hdmi_supports_scrambling(encoder, mode))
+ if (!vc4_hdmi_supports_scrambling(encoder))
return;
if (!vc4_hdmi_mode_needs_scrambling(mode,
@@ -685,6 +874,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
vc4_hdmi->output_format))
return;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true);
drm_scdc_set_scrambling(vc4_hdmi->ddc, true);
@@ -693,6 +885,8 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
VC5_HDMI_SCRAMBLER_CTL_ENABLE);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
vc4_hdmi->scdc_enabled = true;
queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work,
@@ -702,7 +896,9 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
lockdep_assert_held(&vc4_hdmi->mutex);
@@ -714,6 +910,9 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
cancel_delayed_work_sync(&vc4_hdmi->scrambling_work);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) &
~VC5_HDMI_SCRAMBLER_CTL_ENABLE);
@@ -721,6 +920,8 @@ static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
drm_scdc_set_scrambling(vc4_hdmi->ddc, false);
drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_scrambling_wq(struct work_struct *work)
@@ -743,12 +944,17 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
vc4_hdmi->packet_ram_enabled = false;
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0);
@@ -766,6 +972,9 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
vc4_hdmi_disable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -773,11 +982,16 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX);
@@ -793,6 +1007,9 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
if (ret < 0)
DRM_ERROR("Failed to release power domain: %d\n", ret);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -800,8 +1017,13 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 csc_ctl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -836,6 +1058,8 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
/*
@@ -920,6 +1144,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
unsigned long flags;
@@ -928,6 +1153,10 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
u32 csc_chan_ctl = 0;
u32 csc_ctl = VC5_MT_CP_CSC_CTL_ENABLE | VC4_SET_FIELD(VC4_HD_CSC_CTL_MODE_CUSTOM,
VC5_MT_CP_CSC_CTL_MODE);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -970,12 +1199,15 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CSC_CTL, csc_ctl);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -995,6 +1227,10 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1027,12 +1263,15 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_MISC_CONTROL, reg);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
const struct vc4_hdmi_connector_state *vc4_state =
conn_state_to_vc4_hdmi_conn_state(state);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@@ -1056,6 +1295,10 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
unsigned char gcp;
bool gcp_en;
u32 reg;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1132,13 +1375,20 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
HDMI_WRITE(HDMI_CLOCK_STOP, 0);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 drift;
int ret;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -1167,25 +1417,32 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi)
VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1);
WARN_ONCE(ret, "Timeout waiting for "
"VC4_HDMI_FIFO_CTL_RECENTER_DONE");
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
unsigned long tmds_char_rate = vc4_conn_state->tmds_char_rate;
unsigned long bvb_rate, hsm_rate;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1206,13 +1463,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- goto out;
+ goto err_dev_exit;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -1264,6 +1521,8 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (vc4_hdmi->variant->set_timings)
vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode);
+ drm_dev_exit(idx);
+
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1272,6 +1531,8 @@ err_disable_pixel_clock:
clk_disable_unprepare(vc4_hdmi->pixel_clock);
err_put_runtime_pm:
pm_runtime_put(&vc4_hdmi->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
out:
mutex_unlock(&vc4_hdmi->mutex);
return;
@@ -1281,14 +1542,19 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_connector *connector = &vc4_hdmi->connector;
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
if (vc4_hdmi->variant->csc_setup)
vc4_hdmi->variant->csc_setup(vc4_hdmi, conn_state, mode);
@@ -1296,6 +1562,9 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1303,15 +1572,20 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
- struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode;
struct drm_display_info *display = &vc4_hdmi->connector.display_info;
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC;
unsigned long flags;
int ret;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_VID_CTL,
@@ -1370,6 +1644,9 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
vc4_hdmi_recenter_fifo(vc4_hdmi);
vc4_hdmi_enable_scrambling(encoder);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1692,6 +1969,26 @@ static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
.mode_valid = vc4_hdmi_encoder_mode_valid,
};
+static int vc4_hdmi_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
+ int ret;
+
+ ret = vc4_debugfs_add_file(drm->primary, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs,
+ vc4_hdmi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
+ .late_register = vc4_hdmi_late_register,
+};
+
static u32 vc4_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
{
int i;
@@ -1718,13 +2015,20 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
{
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
u32 hotplug;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return false;
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
hotplug = HDMI_READ(HDMI_HOTPLUG);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ drm_dev_exit(idx);
+
return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
}
@@ -1732,10 +2036,16 @@ static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
{
- u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
+ u32 hsm_clock;
unsigned long flags;
unsigned long n, m;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+ hsm_clock = clk_get_rate(vc4_hdmi->audio_clock);
rational_best_approximation(hsm_clock, samplerate,
VC4_HD_MAI_SMP_N_MASK >>
VC4_HD_MAI_SMP_N_SHIFT,
@@ -1748,6 +2058,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) |
VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M));
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ drm_dev_exit(idx);
}
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
@@ -1803,13 +2115,21 @@ static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int ret = 0;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_dev_exit;
}
vc4_hdmi->audio.streaming = true;
@@ -1826,9 +2146,12 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data)
if (vc4_hdmi->variant->phy_rng_enable)
vc4_hdmi->variant->phy_rng_enable(vc4_hdmi);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
@@ -1857,10 +2180,15 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi)
static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx))
+ goto out;
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_MAI_CTL,
@@ -1876,6 +2204,9 @@ static void vc4_hdmi_audio_shutdown(struct device *dev, void *data)
vc4_hdmi->audio.streaming = false;
vc4_hdmi_audio_reset(vc4_hdmi);
+ drm_dev_exit(idx);
+
+out:
mutex_unlock(&vc4_hdmi->mutex);
}
@@ -1923,6 +2254,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
struct hdmi_codec_params *params)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base;
unsigned int sample_rate = params->sample_rate;
unsigned int channels = params->channels;
@@ -1931,15 +2263,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
u32 channel_map;
u32 mai_audio_format;
u32 mai_sample_rate;
+ int ret = 0;
+ int idx;
dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
sample_rate, params->sample_width, channels);
mutex_lock(&vc4_hdmi->mutex);
+ if (!drm_dev_enter(drm, &idx)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) {
- mutex_unlock(&vc4_hdmi->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_dev_exit;
}
vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate);
@@ -1996,9 +2335,12 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data,
memcpy(&vc4_hdmi->audio.infoframe, &params->cea, sizeof(params->cea));
vc4_hdmi_set_audio_infoframe(encoder);
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
mutex_unlock(&vc4_hdmi->mutex);
- return 0;
+ return ret;
}
static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = {
@@ -2061,6 +2403,14 @@ static struct hdmi_codec_pdata vc4_hdmi_codec_pdata = {
.i2s = 1,
};
+static void vc4_hdmi_audio_codec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
{
const struct vc4_hdmi_register *mai_data =
@@ -2073,6 +2423,26 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
int index, len;
int ret;
+ /*
+ * ASoC makes it a bit hard to retrieve a pointer to the
+ * vc4_hdmi structure. Registering the card will overwrite our
+ * device drvdata with a pointer to the snd_soc_card structure,
+ * which can then be used to retrieve whatever drvdata we want
+ * to associate.
+ *
+ * However, that doesn't fly in the case where we wouldn't
+ * register an ASoC card (because of an old DT that is missing
+ * the dmas properties for example), then the card isn't
+ * registered and the device drvdata wouldn't be set.
+ *
+ * We can deal with both cases by making sure a snd_soc_card
+ * pointer and a vc4_hdmi structure are pointing to the same
+ * memory address, so we can treat them indistinctly without any
+ * issue.
+ */
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
+ BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
+
if (!of_find_property(dev->of_node, "dmas", &len) || !len) {
dev_warn(dev,
"'dmas' DT property is missing or empty, no HDMI audio\n");
@@ -2102,6 +2472,30 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
vc4_hdmi->audio.dma_data.maxburst = 2;
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing all the audio components
+ * by the time the driver doesn't have any user anymore.
+ *
+ * However, the ASoC core uses a number of devm_kzalloc calls
+ * when registering, even when using non-device-managed
+ * functions (such as in snd_soc_register_component()).
+ *
+ * If we call snd_soc_unregister_component() in a DRM-managed
+ * action, the device-managed actions have already been executed
+ * and thus we would access memory that has been freed.
+ *
+ * Using device-managed hooks here probably leaves us open to a
+ * bunch of issues if userspace still has a handle on the ALSA
+ * device when the device is removed. However, this is mitigated
+ * by the use of drm_dev_enter()/drm_dev_exit() in the audio
+ * path to prevent the access to the device resources if it
+ * isn't there anymore.
+ *
+ * Then, the vc4_hdmi structure is DRM-managed and thus only
+ * freed whenever the last user has closed the DRM device file.
+ * It should thus outlive ALSA in most situations.
+ */
ret = devm_snd_dmaengine_pcm_register(dev, &pcm_conf, 0);
if (ret) {
dev_err(dev, "Could not register PCM component: %d\n", ret);
@@ -2125,6 +2519,10 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->audio.codec_pdev = codec_pdev;
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_audio_codec_release, vc4_hdmi);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
@@ -2163,12 +2561,6 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
-static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
-{
- platform_device_unregister(vc4_hdmi->audio.codec_pdev);
- vc4_hdmi->audio.codec_pdev = NULL;
-}
-
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2191,21 +2583,19 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected");
unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed");
- ret = request_threaded_irq(hpd_con,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd connected", vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_con,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd connected", vc4_hdmi);
if (ret)
return ret;
- ret = request_threaded_irq(hpd_rm,
- NULL,
- vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
- "vc4 hdmi hpd disconnected", vc4_hdmi);
- if (ret) {
- free_irq(hpd_con, vc4_hdmi);
+ ret = devm_request_threaded_irq(&pdev->dev, hpd_rm,
+ NULL,
+ vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
+ "vc4 hdmi hpd disconnected", vc4_hdmi);
+ if (ret)
return ret;
- }
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -2213,16 +2603,6 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static void vc4_hdmi_hotplug_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "hpd-connected"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "hpd-removed"), vc4_hdmi);
- }
-}
-
#ifdef CONFIG_DRM_VC4_HDMI_CEC
static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
{
@@ -2296,6 +2676,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi)
{
u32 cntrl1;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
lockdep_assert_held(&vc4_hdmi->hw_lock);
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
@@ -2324,6 +2715,17 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi)
lockdep_assert_held(&vc4_hdmi->hw_lock);
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
vc4_hdmi->cec_rx_msg.len = 0;
cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1);
vc4_cec_read_msg(vc4_hdmi, cntrl1);
@@ -2355,6 +2757,17 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
irqreturn_t ret;
u32 cntrl5;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
if (!(stat & VC4_HDMI_CPU_CEC))
return IRQ_NONE;
@@ -2375,26 +2788,29 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
unsigned long flags;
u32 val;
int ret;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret)
+ if (ret) {
+ drm_dev_exit(idx);
return ret;
+ }
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2430,24 +2846,28 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
+
return 0;
}
static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
@@ -2459,8 +2879,12 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
pm_runtime_put(&vc4_hdmi->pdev->dev);
+ drm_dev_exit(idx);
+
return 0;
}
@@ -2475,24 +2899,27 @@ static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
{
struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(drm, &idx))
+ /*
+ * We can't return an error code, because the CEC
+ * framework will emit WARN_ON messages at unbind
+ * otherwise.
+ */
+ return 0;
+ mutex_lock(&vc4_hdmi->mutex);
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
HDMI_WRITE(HDMI_CEC_CNTRL_1,
(HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
(log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -2505,23 +2932,19 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
unsigned long flags;
u32 val;
unsigned int i;
+ int idx;
- /*
- * NOTE: This function should really take vc4_hdmi->mutex, but doing so
- * results in a reentrancy since cec_s_phys_addr_from_edid() called in
- * .detect or .get_modes might call .adap_enable, which leads to this
- * function being called with that mutex held.
- *
- * Concurrency is not an issue for the moment since we don't share any
- * state with KMS, so we can ignore the lock for now, but we need to
- * keep it in mind if we were to change that assumption.
- */
+ if (!drm_dev_enter(dev, &idx))
+ return -ENODEV;
if (msg->len > 16) {
drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len);
+ drm_dev_exit(idx);
return -ENOMEM;
}
+ mutex_lock(&vc4_hdmi->mutex);
+
spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
for (i = 0; i < msg->len; i += 4)
@@ -2541,6 +2964,8 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
HDMI_WRITE(HDMI_CEC_CNTRL_1, val);
spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+ mutex_unlock(&vc4_hdmi->mutex);
+ drm_dev_exit(idx);
return 0;
}
@@ -2551,6 +2976,14 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
.adap_transmit = vc4_hdmi_cec_adap_transmit,
};
+static void vc4_hdmi_cec_release(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ cec_unregister_adapter(vc4_hdmi->cec_adap);
+ vc4_hdmi->cec_adap = NULL;
+}
+
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
struct cec_connector_info conn_info;
@@ -2575,73 +3008,82 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
if (vc4_hdmi->variant->external_irq_controller) {
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-rx"),
- vc4_cec_irq_handler_rx_bare,
- vc4_cec_irq_handler_rx_thread, 0,
- "vc4 hdmi cec rx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
+ vc4_cec_irq_handler_rx_bare,
+ vc4_cec_irq_handler_rx_thread, 0,
+ "vc4 hdmi cec rx", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
- ret = request_threaded_irq(platform_get_irq_byname(pdev, "cec-tx"),
- vc4_cec_irq_handler_tx_bare,
- vc4_cec_irq_handler_tx_thread, 0,
- "vc4 hdmi cec tx", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
+ vc4_cec_irq_handler_tx_bare,
+ vc4_cec_irq_handler_tx_thread, 0,
+ "vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_remove_cec_rx_handler;
+ goto err_delete_cec_adap;
} else {
- ret = request_threaded_irq(platform_get_irq(pdev, 0),
- vc4_cec_irq_handler,
- vc4_cec_irq_handler_thread, 0,
- "vc4 hdmi cec", vc4_hdmi);
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4_hdmi);
if (ret)
goto err_delete_cec_adap;
}
ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
if (ret < 0)
- goto err_remove_handlers;
+ goto err_delete_cec_adap;
- return 0;
-
-err_remove_handlers:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- else
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
+ /*
+ * NOTE: Strictly speaking, we should probably use a DRM-managed
+ * registration there to avoid removing the CEC adapter by the
+ * time the DRM driver doesn't have any user anymore.
+ *
+ * However, the CEC framework already cleans up the CEC adapter
+ * only when the last user has closed its file descriptor, so we
+ * don't need to handle it in DRM.
+ *
+ * By the time the device-managed hook is executed, we will give
+ * up our reference to the CEC adapter and therefore don't
+ * really care when it's actually freed.
+ *
+ * There's still a problematic sequence: if we unregister our
+ * CEC adapter, but the userspace keeps a handle on the CEC
+ * adapter but not the DRM device for some reason. In such a
+ * case, our vc4_hdmi structure will be freed, but the
+ * cec_adapter structure will have a dangling pointer to what
+ * used to be our HDMI controller. If we get a CEC call at that
+ * moment, we could end up with a use-after-free. Fortunately,
+ * the CEC framework already handles this too, by calling
+ * cec_is_registered() in cec_ioctl() and cec_poll().
+ */
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
+ if (ret)
+ return ret;
-err_remove_cec_rx_handler:
- if (vc4_hdmi->variant->external_irq_controller)
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
+ return 0;
err_delete_cec_adap:
cec_delete_adapter(vc4_hdmi->cec_adap);
return ret;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi)
-{
- struct platform_device *pdev = vc4_hdmi->pdev;
-
- if (vc4_hdmi->variant->external_irq_controller) {
- free_irq(platform_get_irq_byname(pdev, "cec-rx"), vc4_hdmi);
- free_irq(platform_get_irq_byname(pdev, "cec-tx"), vc4_hdmi);
- } else {
- free_irq(platform_get_irq(pdev, 0), vc4_hdmi);
- }
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
-}
#else
static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
-
-static void vc4_hdmi_cec_exit(struct vc4_hdmi *vc4_hdmi) {};
#endif
-static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
+static void vc4_hdmi_free_regset(struct drm_device *drm, void *ptr)
+{
+ struct debugfs_reg32 *regs = ptr;
+
+ kfree(regs);
+}
+
+static int vc4_hdmi_build_regset(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi,
struct debugfs_regset32 *regset,
enum vc4_hdmi_regs reg)
{
@@ -2649,6 +3091,7 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
struct debugfs_reg32 *regs, *new_regs;
unsigned int count = 0;
unsigned int i;
+ int ret;
regs = kcalloc(variant->num_registers, sizeof(*regs),
GFP_KERNEL);
@@ -2674,10 +3117,15 @@ static int vc4_hdmi_build_regset(struct vc4_hdmi *vc4_hdmi,
regset->regs = new_regs;
regset->nregs = count;
+ ret = drmm_add_action_or_reset(drm, vc4_hdmi_free_regset, new_regs);
+ if (ret)
+ return ret;
+
return 0;
}
-static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2691,11 +3139,11 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
if (IS_ERR(vc4_hdmi->hd_regs))
return PTR_ERR(vc4_hdmi->hd_regs);
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
@@ -2718,7 +3166,8 @@ static int vc4_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
-static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
+static int vc5_hdmi_init_resources(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi)
{
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
@@ -2820,35 +3269,35 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return PTR_ERR(vc4_hdmi->reset);
}
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hdmi_regset, VC4_HDMI);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->hd_regset, VC4_HD);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->cec_regset, VC5_CEC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->csc_regset, VC5_CSC);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->dvp_regset, VC5_DVP);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->phy_regset, VC5_PHY);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->ram_regset, VC5_RAM);
if (ret)
return ret;
- ret = vc4_hdmi_build_regset(vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
+ ret = vc4_hdmi_build_regset(drm, vc4_hdmi, &vc4_hdmi->rm_regset, VC5_RM);
if (ret)
return ret;
@@ -2898,6 +3347,13 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
return 0;
}
+static void vc4_hdmi_put_ddc_device(void *ptr)
+{
+ struct vc4_hdmi *vc4_hdmi = ptr;
+
+ put_device(&vc4_hdmi->ddc->dev);
+}
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@@ -2908,10 +3364,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
struct device_node *ddc_node;
int ret;
- vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL);
+ vc4_hdmi = drmm_kzalloc(drm, sizeof(*vc4_hdmi), GFP_KERNEL);
if (!vc4_hdmi)
return -ENOMEM;
- mutex_init(&vc4_hdmi->mutex);
+
+ ret = drmm_mutex_init(drm, &vc4_hdmi->mutex);
+ if (ret)
+ return ret;
+
spin_lock_init(&vc4_hdmi->hw_lock);
INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq);
@@ -2935,7 +3395,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
vc4_hdmi->scdc_enabled = true;
- ret = variant->init_resources(vc4_hdmi);
+ ret = variant->init_resources(drm, vc4_hdmi);
if (ret)
return ret;
@@ -2952,13 +3412,16 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return -EPROBE_DEFER;
}
+ ret = devm_add_action_or_reset(dev, vc4_hdmi_put_ddc_device, vc4_hdmi);
+ if (ret)
+ return ret;
+
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
vc4_hdmi->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
if (IS_ERR(vc4_hdmi->hpd_gpio)) {
- ret = PTR_ERR(vc4_hdmi->hpd_gpio);
- goto err_put_ddc;
+ return PTR_ERR(vc4_hdmi->hpd_gpio);
}
vc4_hdmi->disable_wifi_frequencies =
@@ -2972,7 +3435,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4_hdmi->disable_4kp60 = true;
}
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* We need to have the device powered up at this point to call
@@ -2980,7 +3445,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
*/
ret = pm_runtime_resume_and_get(dev);
if (ret)
- goto err_disable_runtime_pm;
+ return ret;
if ((of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi0") ||
of_device_is_compatible(dev->of_node, "brcm,bcm2711-hdmi1")) &&
@@ -2990,93 +3455,43 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
}
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
+ ret = drmm_encoder_init(drm, encoder,
+ &vc4_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+ if (ret)
+ goto err_put_runtime_pm;
+
drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs);
ret = vc4_hdmi_connector_init(drm, vc4_hdmi);
if (ret)
- goto err_destroy_encoder;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_hotplug_init(vc4_hdmi);
if (ret)
- goto err_destroy_conn;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_cec_init(vc4_hdmi);
if (ret)
- goto err_free_hotplug;
+ goto err_put_runtime_pm;
ret = vc4_hdmi_audio_init(vc4_hdmi);
if (ret)
- goto err_free_cec;
-
- vc4_debugfs_add_file(drm, variant->debugfs_name,
- vc4_hdmi_debugfs_regs,
- vc4_hdmi);
+ goto err_put_runtime_pm;
pm_runtime_put_sync(dev);
return 0;
-err_free_cec:
- vc4_hdmi_cec_exit(vc4_hdmi);
-err_free_hotplug:
- vc4_hdmi_hotplug_exit(vc4_hdmi);
-err_destroy_conn:
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
-err_destroy_encoder:
- drm_encoder_cleanup(encoder);
+err_put_runtime_pm:
pm_runtime_put_sync(dev);
-err_disable_runtime_pm:
- pm_runtime_disable(dev);
-err_put_ddc:
- put_device(&vc4_hdmi->ddc->dev);
return ret;
}
-static void vc4_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct vc4_hdmi *vc4_hdmi;
-
- /*
- * ASoC makes it a bit hard to retrieve a pointer to the
- * vc4_hdmi structure. Registering the card will overwrite our
- * device drvdata with a pointer to the snd_soc_card structure,
- * which can then be used to retrieve whatever drvdata we want
- * to associate.
- *
- * However, that doesn't fly in the case where we wouldn't
- * register an ASoC card (because of an old DT that is missing
- * the dmas properties for example), then the card isn't
- * registered and the device drvdata wouldn't be set.
- *
- * We can deal with both cases by making sure a snd_soc_card
- * pointer and a vc4_hdmi structure are pointing to the same
- * memory address, so we can treat them indistinctly without any
- * issue.
- */
- BUILD_BUG_ON(offsetof(struct vc4_hdmi_audio, card) != 0);
- BUILD_BUG_ON(offsetof(struct vc4_hdmi, audio) != 0);
- vc4_hdmi = dev_get_drvdata(dev);
-
- kfree(vc4_hdmi->hdmi_regset.regs);
- kfree(vc4_hdmi->hd_regset.regs);
-
- vc4_hdmi_audio_exit(vc4_hdmi);
- vc4_hdmi_cec_exit(vc4_hdmi);
- vc4_hdmi_hotplug_exit(vc4_hdmi);
- vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
- drm_encoder_cleanup(&vc4_hdmi->encoder.base);
-
- pm_runtime_disable(dev);
-
- put_device(&vc4_hdmi->ddc->dev);
-}
-
static const struct component_ops vc4_hdmi_ops = {
.bind = vc4_hdmi_bind,
- .unbind = vc4_hdmi_unbind,
};
static int vc4_hdmi_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index c3ed2b07df23..db823efb2563 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -58,7 +58,8 @@ struct vc4_hdmi_variant {
/* Callback to get the resources (memory region, interrupts,
* clocks, etc) for that variant.
*/
- int (*init_resources)(struct vc4_hdmi *vc4_hdmi);
+ int (*init_resources)(struct drm_device *drm,
+ struct vc4_hdmi *vc4_hdmi);
/* Callback to reset the HDMI block */
void (*reset)(struct vc4_hdmi *vc4_hdmi);
@@ -71,7 +72,7 @@ struct vc4_hdmi_variant {
/* Callback to configure the video timings in the HDMI block */
void (*set_timings)(struct vc4_hdmi *vc4_hdmi,
struct drm_connector_state *state,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/* Callback to initialize the PHY according to the connector state */
void (*phy_init)(struct vc4_hdmi *vc4_hdmi,
@@ -194,15 +195,7 @@ struct vc4_hdmi {
/**
* @mutex: Mutex protecting the driver access across multiple
- * frameworks (KMS, ALSA).
- *
- * NOTE: While supported, CEC has been left out since
- * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a
- * reentrancy issue between .get_modes (or .detect) and .adap_enable.
- * Since we don't share any state between the CEC hooks and KMS', it's
- * not a big deal. The only trouble might come from updating the CEC
- * clock divider which might be affected by a modeset, but CEC should
- * be resilient to that.
+ * frameworks (KMS, ALSA, CEC).
*/
struct mutex mutex;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index fbaa741dda5f..4ac9f5a2d5f9 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_vblank.h>
#include "vc4_drv.h"
@@ -66,8 +67,12 @@ static const struct debugfs_reg32 hvs_regs[] = {
void vc4_hvs_dump_state(struct vc4_hvs *hvs)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
- int i;
+ int idx, i;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
drm_print_regset32(&p, &hvs->regset);
@@ -80,6 +85,8 @@ void vc4_hvs_dump_state(struct vc4_hvs *hvs)
readl((u32 __iomem *)hvs->dlist + i + 2),
readl((u32 __iomem *)hvs->dlist + i + 3));
}
+
+ drm_dev_exit(idx);
}
static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
@@ -175,6 +182,11 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
int ret, i;
u32 __iomem *dst_kernel;
+ /*
+ * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
+ * here since that function is only called from vc4_hvs_bind().
+ */
+
ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
if (ret) {
DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
@@ -199,10 +211,15 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
struct vc4_crtc *vc4_crtc)
{
+ struct drm_device *drm = &hvs->vc4->base;
struct drm_crtc *crtc = &vc4_crtc->base;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
u32 i;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
/* The LUT memory is laid out with each HVS channel in order,
* each of which takes 256 writes for R, 256 for G, then 256
* for B.
@@ -217,6 +234,8 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
for (i = 0; i < crtc->gamma_size; i++)
HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
@@ -238,7 +257,12 @@ static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
{
+ struct drm_device *drm = &hvs->vc4->base;
u8 field = 0;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return 0;
switch (fifo) {
case 0:
@@ -255,6 +279,7 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
break;
}
+ drm_dev_exit(idx);
return field;
}
@@ -267,6 +292,12 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
if (!vc4->is_vc5)
return output;
+ /*
+ * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
+ * here, but this function is only used during the DRM device
+ * initialization, so we should be fine.
+ */
+
switch (output) {
case 0:
return 0;
@@ -315,12 +346,17 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
struct drm_display_mode *mode, bool oneshot)
{
struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
unsigned int chan = vc4_crtc_state->assigned_channel;
bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
u32 dispbkgndx;
u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return -ENODEV;
HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
@@ -362,14 +398,22 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
*/
vc4_hvs_lut_load(hvs, vc4_crtc);
+ drm_dev_exit(idx);
+
return 0;
}
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
{
- if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ struct drm_device *drm = &hvs->vc4->base;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
return;
+ if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
+ goto out;
+
HVS_WRITE(SCALER_DISPCTRLX(chan),
HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
HVS_WRITE(SCALER_DISPCTRLX(chan),
@@ -385,6 +429,9 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
(SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
SCALER_DISPSTATX_EMPTY);
+
+out:
+ drm_dev_exit(idx);
}
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
@@ -426,9 +473,15 @@ static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
vc4_state->mm.start);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
@@ -513,6 +566,12 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ vc4_crtc_send_vblank(crtc);
+ return;
+ }
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
@@ -583,26 +642,44 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(hvs);
}
+
+ drm_dev_exit(idx);
}
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
{
- u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+ struct drm_device *drm = &hvs->vc4->base;
+ u32 dispctrl;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+ dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+
+ drm_dev_exit(idx);
}
static void vc4_hvs_report_underrun(struct drm_device *dev)
@@ -623,6 +700,17 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
u32 control;
u32 status;
+ /*
+ * NOTE: We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
+
status = HVS_READ(SCALER_DISPSTAT);
control = HVS_READ(SCALER_DISPCTRL);
@@ -645,6 +733,39 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
return irqret;
}
+int vc4_hvs_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_hvs *hvs = vc4->hvs;
+ int ret;
+
+ if (!vc4->hvs)
+ return -ENODEV;
+
+ if (!vc4->is_vc5)
+ debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+
+ ret = vc4_debugfs_add_file(minor, "hvs_dlists",
+ vc4_hvs_debugfs_dlist, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_file(minor, "hvs_underrun",
+ vc4_hvs_debugfs_underrun, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
+ &hvs->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -655,10 +776,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
u32 dispctrl;
u32 reg;
- hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
+ hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
return -ENOMEM;
-
hvs->vc4 = vc4;
hvs->pdev = pdev;
@@ -771,12 +891,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
- vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
- NULL);
- vc4_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist,
- NULL);
-
return 0;
}
@@ -786,11 +900,18 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
+ struct drm_mm_node *node, *next;
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
+ drm_mm_remove_node(node);
+
drm_mm_takedown(&vc4->hvs->dlist_mm);
+
+ drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
+ drm_mm_remove_node(node);
drm_mm_takedown(&vc4->hvs->lbm_mm);
clk_disable_unprepare(hvs->core_clk);
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 2eacfb6773d2..1e6db0121ccd 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -105,7 +105,7 @@ vc4_overflow_mem_work(struct work_struct *work)
}
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
- V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size);
+ V3D_WRITE(V3D_BPOA, bo->base.dma_addr + bin_bo_slot * vc4->bin_alloc_size);
V3D_WRITE(V3D_BPOS, bo->base.base.size);
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
@@ -295,7 +295,7 @@ vc4_irq_disable(struct drm_device *dev)
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
/* Finish any interrupt handler still in flight. */
- disable_irq(vc4->irq);
+ synchronize_irq(vc4->irq);
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index b45dcdfd7306..4419e810103d 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -18,7 +18,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 79a74184d732..c4ac2c946238 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -133,6 +133,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
+ mutex_destroy(&vc4file->perfmon.lock);
}
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index f27e87a23df7..8b92a45a3c89 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -19,11 +19,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include "uapi/drm/vc4_drm.h"
@@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -360,7 +360,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
return ret;
for (i = 0; i < num_planes; i++)
- vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
+ vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
/*
* We don't support subpixel source positioning for scaling,
@@ -1220,6 +1220,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
int i;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ goto out;
vc4_state->hw_dlist = dlist;
@@ -1227,6 +1231,9 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
for (i = 0; i < vc4_state->dlist_count; i++)
writel(vc4_state->dlist[i], &dlist[i]);
+ drm_dev_exit(idx);
+
+out:
return vc4_state->dlist_count;
}
@@ -1244,14 +1251,18 @@ u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
- struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
uint32_t addr;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
/* We're skipping the address adjustment for negative origin,
* because this is only called on the primary plane.
*/
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
- addr = bo->paddr + fb->offsets[0];
+ addr = bo->dma_addr + fb->offsets[0];
/* Write the new address into the hardware immediately. The
* scanout will start from this address as soon as the FIFO
@@ -1264,6 +1275,8 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
* also use our updated address.
*/
vc4_state->dlist[vc4_state->ptr0_offset] = addr;
+
+ drm_dev_exit(idx);
}
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
@@ -1272,6 +1285,10 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct vc4_plane_state *vc4_state, *new_vc4_state;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
swap(plane->state->fb, new_plane_state->fb);
plane->state->crtc_x = new_plane_state->crtc_x;
@@ -1334,6 +1351,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
&vc4_state->hw_dlist[vc4_state->pos2_offset]);
writel(vc4_state->dlist[vc4_state->ptr0_offset],
&vc4_state->hw_dlist[vc4_state->ptr0_offset]);
+
+ drm_dev_exit(idx);
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
@@ -1388,7 +1407,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
if (!state->fb)
return 0;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
drm_gem_plane_helper_prepare_fb(plane, state);
@@ -1406,7 +1425,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
if (plane->state->fb == state->fb || !state->fb)
return;
- bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
vc4_bo_dec_usecnt(bo);
}
@@ -1483,8 +1502,6 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
static const struct drm_plane_funcs vc4_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .set_property = NULL,
.reset = vc4_plane_reset,
.atomic_duplicate_state = vc4_plane_duplicate_state,
.atomic_destroy_state = vc4_plane_destroy_state,
@@ -1492,14 +1509,14 @@ static const struct drm_plane_funcs vc4_plane_funcs = {
};
struct drm_plane *vc4_plane_init(struct drm_device *dev,
- enum drm_plane_type type)
+ enum drm_plane_type type,
+ uint32_t possible_crtcs)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_plane *plane = NULL;
+ struct drm_plane *plane;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
int num_formats = 0;
- int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
@@ -1510,11 +1527,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_FORMAT_MOD_INVALID
};
- vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane),
- GFP_KERNEL);
- if (!vc4_plane)
- return ERR_PTR(-ENOMEM);
-
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
formats[num_formats] = hvs_formats[i].drm;
@@ -1522,13 +1534,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
}
}
+ vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
+ possible_crtcs,
+ &vc4_plane_funcs,
+ formats, num_formats,
+ modifiers, type, NULL);
+ if (IS_ERR(vc4_plane))
+ return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- ret = drm_universal_plane_init(dev, plane, 0,
- &vc4_plane_funcs,
- formats, num_formats,
- modifiers, type, NULL);
- if (ret)
- return ERR_PTR(ret);
if (vc4->is_vc5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
@@ -1575,13 +1588,11 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
*/
for (i = 0; i < 16; i++) {
struct drm_plane *plane =
- vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+ vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
+ GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
-
- plane->possible_crtcs =
- GENMASK(drm->mode_config.num_crtc - 1, 0);
}
drm_for_each_crtc(crtc, drm) {
@@ -1589,9 +1600,9 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* since we overlay planes on the CRTC in the order they were
* initialized.
*/
- cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
+ drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
crtc->cursor = cursor_plane;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index f6b7dc3df08c..1bda5010f15a 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -40,14 +40,14 @@
#include "vc4_packet.h"
struct vc4_rcl_setup {
- struct drm_gem_cma_object *color_read;
- struct drm_gem_cma_object *color_write;
- struct drm_gem_cma_object *zs_read;
- struct drm_gem_cma_object *zs_write;
- struct drm_gem_cma_object *msaa_color_write;
- struct drm_gem_cma_object *msaa_zs_write;
-
- struct drm_gem_cma_object *rcl;
+ struct drm_gem_dma_object *color_read;
+ struct drm_gem_dma_object *color_write;
+ struct drm_gem_dma_object *zs_read;
+ struct drm_gem_dma_object *zs_write;
+ struct drm_gem_dma_object *msaa_color_write;
+ struct drm_gem_dma_object *msaa_zs_write;
+
+ struct drm_gem_dma_object *rcl;
u32 next_offset;
u32 next_write_bo_index;
@@ -97,11 +97,11 @@ static void vc4_store_before_load(struct vc4_rcl_setup *setup)
* coordinates packet, and instead just store to the address given.
*/
static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *bo,
+ struct drm_gem_dma_object *bo,
struct drm_vc4_submit_rcl_surface *surf,
uint8_t x, uint8_t y)
{
- return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
+ return bo->dma_addr + surf->offset + VC4_TILE_BUFFER_SIZE *
(DIV_ROUND_UP(exec->args->width, 32) * y + x);
}
@@ -142,7 +142,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->color_read.bits);
- rcl_u32(setup, setup->color_read->paddr +
+ rcl_u32(setup, setup->color_read->dma_addr +
args->color_read.offset);
}
}
@@ -164,7 +164,7 @@ static void emit_tile(struct vc4_exec_info *exec,
} else {
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
- rcl_u32(setup, setup->zs_read->paddr +
+ rcl_u32(setup, setup->zs_read->dma_addr +
args->zs_read.offset);
}
}
@@ -232,7 +232,7 @@ static void emit_tile(struct vc4_exec_info *exec,
(last_tile_write ?
0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
rcl_u32(setup,
- (setup->zs_write->paddr + args->zs_write.offset) |
+ (setup->zs_write->dma_addr + args->zs_write.offset) |
((last && last_tile_write) ?
VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
}
@@ -355,7 +355,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
rcl_u32(setup,
- (setup->color_write ? (setup->color_write->paddr +
+ (setup->color_write ? (setup->color_write->dma_addr +
args->color_write.offset) :
0));
rcl_u16(setup, args->width);
@@ -374,14 +374,14 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
BUG_ON(setup->next_offset != size);
- exec->ct1ca = setup->rcl->paddr;
- exec->ct1ea = setup->rcl->paddr + setup->next_offset;
+ exec->ct1ca = setup->rcl->dma_addr;
+ exec->ct1ea = setup->rcl->dma_addr + setup->next_offset;
return 0;
}
static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
- struct drm_gem_cma_object *obj,
+ struct drm_gem_dma_object *obj,
struct drm_vc4_submit_rcl_surface *surf)
{
struct drm_vc4_submit_cl *args = exec->args;
@@ -407,7 +407,7 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
}
static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
@@ -433,7 +433,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
}
static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf,
bool is_write)
{
@@ -533,7 +533,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
static int
vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
struct vc4_rcl_setup *setup,
- struct drm_gem_cma_object **obj,
+ struct drm_gem_dma_object **obj,
struct drm_vc4_submit_rcl_surface *surf)
{
uint8_t tiling = VC4_GET_FIELD(surf->bits,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index d20b0bc51a18..bd181b5a7b52 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -15,8 +15,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
@@ -155,7 +156,6 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
- struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -276,13 +276,15 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
struct drm_atomic_state *state)
{
+ struct drm_device *drm = conn->dev;
struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
- struct drm_gem_cma_object *gem;
+ struct drm_gem_dma_object *gem;
struct drm_display_mode *mode;
struct drm_framebuffer *fb;
u32 ctrl;
+ int idx;
int i;
if (WARN_ON(!conn_state->writeback_job))
@@ -312,8 +314,11 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
*/
ctrl |= TXP_ALPHA_INVERT;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
- TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ gem = drm_fb_dma_get_gem_obj(fb, 0);
+ TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
TXP_WRITE(TXP_DIM,
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
@@ -322,6 +327,8 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
drm_writeback_queue_job(&txp->connector, conn_state);
+
+ drm_dev_exit(idx);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -337,16 +344,10 @@ vc4_txp_connector_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-static void vc4_txp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static const struct drm_connector_funcs vc4_txp_connector_funcs = {
.detect = vc4_txp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_txp_connector_destroy,
+ .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -354,7 +355,12 @@ static const struct drm_connector_funcs vc4_txp_connector_funcs = {
static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
@@ -369,6 +375,8 @@ static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
}
TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+
+ drm_dev_exit(idx);
}
static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
@@ -384,13 +392,13 @@ static void vc4_txp_disable_vblank(struct drm_crtc *crtc) {}
static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
- .destroy = vc4_crtc_destroy,
.page_flip = vc4_page_flip,
.reset = vc4_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
.enable_vblank = vc4_txp_enable_vblank,
.disable_vblank = vc4_txp_disable_vblank,
+ .late_register = vc4_crtc_late_register,
};
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
@@ -453,6 +461,16 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
struct vc4_txp *txp = data;
struct vc4_crtc *vc4_crtc = &txp->base;
+ /*
+ * We don't need to protect the register access using
+ * drm_dev_enter() there because the interrupt handler lifetime
+ * is tied to the device itself, and not to the DRM device.
+ *
+ * So when the device will be gone, one of the first thing we
+ * will be doing will be to unregister the interrupt handler,
+ * and then unregister the DRM device. drm_dev_enter() would
+ * thus always succeed if we are here.
+ */
TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
vc4_crtc_handle_vblank(vc4_crtc);
drm_writeback_signal_completion(&txp->connector, 0);
@@ -461,6 +479,7 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
}
static const struct vc4_crtc_data vc4_txp_crtc_data = {
+ .debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
};
@@ -469,7 +488,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
struct drm_crtc *crtc;
@@ -480,7 +498,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
if (irq < 0)
return irq;
- txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+ txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
vc4_crtc = &txp->base;
@@ -495,9 +513,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
- txp->regset.base = txp->regs;
- txp->regset.regs = txp_regs;
- txp->regset.nregs = ARRAY_SIZE(txp_regs);
+ vc4_crtc->regset.base = txp->regs;
+ vc4_crtc->regset.regs = txp_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -523,9 +541,6 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
return ret;
dev_set_drvdata(dev, txp);
- vc4->txp = txp;
-
- vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
return 0;
}
@@ -533,13 +548,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
static void vc4_txp_unbind(struct device *dev, struct device *master,
void *data)
{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_txp *txp = dev_get_drvdata(dev);
- vc4_txp_connector_destroy(&txp->connector.base);
-
- vc4->txp = NULL;
+ drm_connector_cleanup(&txp->connector.base);
}
static const struct component_ops vc4_txp_ops = {
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index cc714dcfe1f2..56abb0d6bc39 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -231,7 +231,7 @@ try_again:
* if it doesn't fit within the buffer that we allocated up front.
* However, it turns out that 16MB is "enough for anybody", and
* real-world applications run into allocation failures from the
- * overall CMA pool before they make scenes complicated enough to run
+ * overall DMA pool before they make scenes complicated enough to run
* out of bin space.
*/
static int bin_bo_alloc(struct vc4_dev *vc4)
@@ -261,15 +261,15 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
dev_err(&v3d->pdev->dev,
"Failed to allocate memory for tile binning: "
- "%d. You may need to enable CMA or give it "
+ "%d. You may need to enable DMA or give it "
"more memory.",
ret);
break;
}
/* Check if this BO won't trigger the addressing bug. */
- if ((bo->base.paddr & 0xf0000000) ==
- ((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) {
+ if ((bo->base.dma_addr & 0xf0000000) ==
+ ((bo->base.dma_addr + bo->base.base.size - 1) & 0xf0000000)) {
vc4->bin_bo = bo;
/* Set up for allocating 512KB chunks of
@@ -393,14 +393,34 @@ static int vc4_v3d_runtime_resume(struct device *dev)
vc4_v3d_init_hw(&vc4->base);
- /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
- enable_irq(vc4->irq);
vc4_irq_enable(&vc4->base);
return 0;
}
#endif
+int vc4_v3d_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *drm = minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_v3d *v3d = vc4->v3d;
+ int ret;
+
+ if (!vc4->v3d)
+ return -ENODEV;
+
+ ret = vc4_debugfs_add_file(minor, "v3d_ident",
+ vc4_v3d_debugfs_ident, NULL);
+ if (ret)
+ return ret;
+
+ ret = vc4_debugfs_add_regset32(minor, "v3d_regs", &v3d->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -443,44 +463,47 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
}
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ vc4->irq = ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_runtime_pm;
}
- ret = clk_prepare_enable(v3d->clk);
- if (ret != 0)
- return ret;
-
/* Reset the binner overflow address/size at setup, to be sure
* we don't reuse an old one.
*/
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- vc4_v3d_init_hw(drm);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- vc4->irq = ret;
-
ret = vc4_irq_install(drm, vc4->irq);
if (ret) {
DRM_ERROR("Failed to install IRQ handler\n");
- return ret;
+ goto err_put_runtime_pm;
}
- pm_runtime_set_active(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
- pm_runtime_enable(dev);
-
- vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
- vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
return 0;
+
+err_put_runtime_pm:
+ pm_runtime_put(dev);
+
+ return ret;
}
static void vc4_v3d_unbind(struct device *dev, struct device *master,
@@ -489,8 +512,6 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
- pm_runtime_disable(dev);
-
vc4_irq_uninstall(drm);
/* Disable the binner's overflow memory address, so the next
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2feba55bcef7..520231af4df9 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -102,11 +102,11 @@ size_is_lt(uint32_t width, uint32_t height, int cpp)
height <= 4 * utile_height(cpp));
}
-struct drm_gem_cma_object *
+struct drm_gem_dma_object *
vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
{
struct vc4_dev *vc4 = exec->dev;
- struct drm_gem_cma_object *obj;
+ struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->is_vc5))
@@ -129,7 +129,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
return obj;
}
-static struct drm_gem_cma_object *
+static struct drm_gem_dma_object *
vc4_use_handle(struct vc4_exec_info *exec, uint32_t gem_handles_packet_index)
{
return vc4_use_bo(exec, exec->bo_index[gem_handles_packet_index]);
@@ -160,7 +160,7 @@ gl_shader_rec_size(uint32_t pointer_bits)
}
bool
-vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
+vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo,
uint32_t offset, uint8_t tiling_format,
uint32_t width, uint32_t height, uint8_t cpp)
{
@@ -263,7 +263,7 @@ validate_increment_semaphore(VALIDATE_ARGS)
static int
validate_indexed_prim_list(VALIDATE_ARGS)
{
- struct drm_gem_cma_object *ib;
+ struct drm_gem_dma_object *ib;
uint32_t length = *(uint32_t *)(untrusted + 1);
uint32_t offset = *(uint32_t *)(untrusted + 5);
uint32_t max_index = *(uint32_t *)(untrusted + 9);
@@ -294,7 +294,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
return -EINVAL;
}
- *(uint32_t *)(validated + 5) = ib->paddr + offset;
+ *(uint32_t *)(validated + 5) = ib->dma_addr + offset;
return 0;
}
@@ -400,7 +400,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
* free when the job completes rendering.
*/
exec->bin_slots |= BIT(bin_slot);
- bin_addr = vc4->bin_bo->base.paddr + bin_slot * vc4->bin_alloc_size;
+ bin_addr = vc4->bin_bo->base.dma_addr + bin_slot * vc4->bin_alloc_size;
/* The tile state data array is 48 bytes per tile, and we put it at
* the start of a BO containing both it and the tile alloc.
@@ -575,7 +575,7 @@ reloc_tex(struct vc4_exec_info *exec,
struct vc4_texture_sample_info *sample,
uint32_t texture_handle_index, bool is_cs)
{
- struct drm_gem_cma_object *tex;
+ struct drm_gem_dma_object *tex;
uint32_t p0 = *(uint32_t *)(uniform_data_u + sample->p_offset[0]);
uint32_t p1 = *(uint32_t *)(uniform_data_u + sample->p_offset[1]);
uint32_t p2 = (sample->p_offset[2] != ~0 ?
@@ -608,7 +608,7 @@ reloc_tex(struct vc4_exec_info *exec,
"outside of UBO\n");
goto fail;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
return true;
}
@@ -736,7 +736,7 @@ reloc_tex(struct vc4_exec_info *exec,
offset -= level_size;
}
- *validated_p0 = tex->paddr + p0;
+ *validated_p0 = tex->dma_addr + p0;
if (is_cs) {
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
@@ -765,7 +765,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
+ struct drm_gem_dma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
@@ -840,7 +840,7 @@ validate_gl_shader_rec(struct drm_device *dev,
void *uniform_data_u;
uint32_t tex, uni;
- *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
+ *(uint32_t *)(pkt_v + o) = bo[i]->dma_addr + src_offset;
if (src_offset != 0) {
DRM_DEBUG("Shaders must be at offset 0 of "
@@ -896,7 +896,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
for (i = 0; i < nr_attributes; i++) {
- struct drm_gem_cma_object *vbo =
+ struct drm_gem_dma_object *vbo =
bo[ARRAY_SIZE(shader_reloc_offsets) + i];
uint32_t o = 36 + i * 8;
uint32_t offset = *(uint32_t *)(pkt_u + o + 0);
@@ -928,7 +928,7 @@ validate_gl_shader_rec(struct drm_device *dev,
}
}
- *(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
+ *(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
}
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index e315aeb5fef5..9745f8810eca 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -776,7 +776,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
}
struct vc4_validated_shader_info *
-vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+vc4_validate_shader(struct drm_gem_dma_object *shader_obj)
{
struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev);
bool found_shader_end = false;
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 11fc3d6f66b1..0b3333865702 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -14,6 +14,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
@@ -160,48 +161,28 @@ struct vc4_vec_variant {
/* General VEC hardware state. */
struct vc4_vec {
+ struct vc4_encoder encoder;
+ struct drm_connector connector;
+
struct platform_device *pdev;
const struct vc4_vec_variant *variant;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
-
void __iomem *regs;
struct clk *clock;
- const struct vc4_vec_tv_mode *tv_mode;
-
struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
-/* VC4 VEC encoder KMS struct */
-struct vc4_vec_encoder {
- struct vc4_encoder base;
- struct vc4_vec *vec;
-};
-
-static inline struct vc4_vec_encoder *
-to_vc4_vec_encoder(struct drm_encoder *encoder)
+static inline struct vc4_vec *
+encoder_to_vc4_vec(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_vec_encoder, base.base);
+ return container_of(encoder, struct vc4_vec, encoder.base);
}
-/* VC4 VEC connector KMS struct */
-struct vc4_vec_connector {
- struct drm_connector base;
- struct vc4_vec *vec;
-
- /* Since the connector is attached to just the one encoder,
- * this is the reference to it so we can do the best_encoder()
- * hook.
- */
- struct drm_encoder *encoder;
-};
-
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
@@ -211,7 +192,9 @@ enum vc4_vec_tv_mode_id {
struct vc4_vec_tv_mode {
const struct drm_display_mode *mode;
- void (*mode_set)(struct vc4_vec *vec);
+ u32 config0;
+ u32 config1;
+ u32 custom_freq;
};
static const struct debugfs_reg32 vec_regs[] = {
@@ -241,63 +224,41 @@ static const struct debugfs_reg32 vec_regs[] = {
VC4_REG32(VEC_DAC_MISC),
};
-static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_ntsc_j_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
static const struct drm_display_mode ntsc_mode = {
DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 3, 480 + 3 + 3, 480 + 3 + 3 + 16, 0,
+ 480, 480 + 7, 480 + 7 + 6, 525, 0,
DRM_MODE_FLAG_INTERLACE)
};
-static void vc4_vec_pal_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1, VEC_CONFIG1_C_CVBS_CVBS);
-}
-
-static void vc4_vec_pal_m_mode_set(struct vc4_vec *vec)
-{
- VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_PAL_BDGHI_STD);
- VEC_WRITE(VEC_CONFIG1,
- VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ);
- VEC_WRITE(VEC_FREQ3_2, 0x223b);
- VEC_WRITE(VEC_FREQ1_0, 0x61d1);
-}
-
static const struct drm_display_mode pal_mode = {
DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 2, 576 + 2 + 3, 576 + 2 + 3 + 20, 0,
+ 576, 576 + 4, 576 + 4 + 6, 625, 0,
DRM_MODE_FLAG_INTERLACE)
};
static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
[VC4_VEC_TV_MODE_NTSC] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_NTSC_J] = {
.mode = &ntsc_mode,
- .mode_set = vc4_vec_ntsc_j_mode_set,
+ .config0 = VEC_CONFIG0_NTSC_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL_M] = {
.mode = &pal_mode,
- .mode_set = vc4_vec_pal_m_mode_set,
+ .config0 = VEC_CONFIG0_PAL_BDGHI_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x223b61d1,
},
};
@@ -307,12 +268,6 @@ vc4_vec_connector_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
}
-static void vc4_vec_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static int vc4_vec_connector_get_modes(struct drm_connector *connector)
{
struct drm_connector_state *state = connector->state;
@@ -333,7 +288,6 @@ static int vc4_vec_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = vc4_vec_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -343,42 +297,38 @@ static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs =
.get_modes = vc4_vec_connector_get_modes,
};
-static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
- struct vc4_vec *vec)
+static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
{
- struct drm_connector *connector = NULL;
- struct vc4_vec_connector *vec_connector;
-
- vec_connector = devm_kzalloc(dev->dev, sizeof(*vec_connector),
- GFP_KERNEL);
- if (!vec_connector)
- return ERR_PTR(-ENOMEM);
+ struct drm_connector *connector = &vec->connector;
+ int ret;
- connector = &vec_connector->base;
connector->interlace_allowed = true;
- vec_connector->encoder = vec->encoder;
- vec_connector->vec = vec;
+ ret = drmm_connector_init(dev, connector, &vc4_vec_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite, NULL);
+ if (ret)
+ return ret;
- drm_connector_init(dev, connector, &vc4_vec_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_mode_property,
VC4_VEC_TV_MODE_NTSC);
- vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
- drm_connector_attach_encoder(connector, vec->encoder);
+ drm_connector_attach_encoder(connector, &vec->encoder.base);
- return connector;
+ return 0;
}
-static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
VEC_WRITE(VEC_CFG, 0);
VEC_WRITE(VEC_DAC_MISC,
@@ -392,20 +342,35 @@ static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
ret = pm_runtime_put(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to release power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
+
+ drm_dev_exit(idx);
+ return;
+
+err_dev_exit:
+ drm_dev_exit(idx);
}
-static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
+static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
- int ret;
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ struct drm_connector *connector = &vec->connector;
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct vc4_vec_tv_mode *tv_mode =
+ &vc4_vec_tv_modes[conn_state->tv.mode];
+ int idx, ret;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
ret = pm_runtime_get_sync(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
- return;
+ goto err_dev_exit;
}
/*
@@ -418,13 +383,13 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(vec->clock, 108000000);
if (ret) {
DRM_ERROR("Failed to set clock rate: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
ret = clk_prepare_enable(vec->clock);
if (ret) {
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- return;
+ goto err_put_runtime_pm;
}
/* Reset the different blocks */
@@ -455,29 +420,27 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder)
/* Mask all interrupts. */
VEC_WRITE(VEC_MASK0, 0);
- vec->tv_mode->mode_set(vec);
+ VEC_WRITE(VEC_CONFIG0, tv_mode->config0);
+ VEC_WRITE(VEC_CONFIG1, tv_mode->config1);
+
+ if (tv_mode->custom_freq) {
+ VEC_WRITE(VEC_FREQ3_2,
+ (tv_mode->custom_freq >> 16) & 0xffff);
+ VEC_WRITE(VEC_FREQ1_0,
+ tv_mode->custom_freq & 0xffff);
+ }
VEC_WRITE(VEC_DAC_MISC,
VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N);
VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN);
-}
+ drm_dev_exit(idx);
+ return;
-static bool vc4_vec_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static void vc4_vec_encoder_atomic_mode_set(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
- struct vc4_vec *vec = vc4_vec_encoder->vec;
-
- vec->tv_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+err_put_runtime_pm:
+ pm_runtime_put(&vec->pdev->dev);
+err_dev_exit:
+ drm_dev_exit(idx);
}
static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
@@ -496,11 +459,27 @@ static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = {
- .disable = vc4_vec_encoder_disable,
- .enable = vc4_vec_encoder_enable,
- .mode_fixup = vc4_vec_encoder_mode_fixup,
.atomic_check = vc4_vec_encoder_atomic_check,
- .atomic_mode_set = vc4_vec_encoder_atomic_mode_set,
+ .atomic_disable = vc4_vec_encoder_disable,
+ .atomic_enable = vc4_vec_encoder_enable,
+};
+
+static int vc4_vec_late_register(struct drm_encoder *encoder)
+{
+ struct drm_device *drm = encoder->dev;
+ struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
+ int ret;
+
+ ret = vc4_debugfs_add_regset32(drm->primary, "vec_regs",
+ &vec->regset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
+ .late_register = vc4_vec_late_register,
};
static const struct vc4_vec_variant bcm2835_vec_variant = {
@@ -532,9 +511,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_vec *vec;
- struct vc4_vec_encoder *vc4_vec_encoder;
int ret;
ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
@@ -542,18 +519,11 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vec = devm_kzalloc(dev, sizeof(*vec), GFP_KERNEL);
+ vec = drmm_kzalloc(drm, sizeof(*vec), GFP_KERNEL);
if (!vec)
return -ENOMEM;
- vc4_vec_encoder = devm_kzalloc(dev, sizeof(*vc4_vec_encoder),
- GFP_KERNEL);
- if (!vc4_vec_encoder)
- return -ENOMEM;
- vc4_vec_encoder->base.type = VC4_ENCODER_TYPE_VEC;
- vc4_vec_encoder->vec = vec;
- vec->encoder = &vc4_vec_encoder->base.base;
-
+ vec->encoder.type = VC4_ENCODER_TYPE_VEC;
vec->pdev = pdev;
vec->variant = (const struct vc4_vec_variant *)
of_device_get_match_data(dev);
@@ -572,49 +542,30 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
- drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
- vec->connector = vc4_vec_connector_init(drm, vec);
- if (IS_ERR(vec->connector)) {
- ret = PTR_ERR(vec->connector);
- goto err_destroy_encoder;
- }
+ ret = drmm_encoder_init(drm, &vec->encoder.base,
+ &vc4_vec_encoder_funcs,
+ DRM_MODE_ENCODER_TVDAC,
+ NULL);
+ if (ret)
+ return ret;
- dev_set_drvdata(dev, vec);
+ drm_encoder_helper_add(&vec->encoder.base, &vc4_vec_encoder_helper_funcs);
- vc4->vec = vec;
+ ret = vc4_vec_connector_init(drm, vec);
+ if (ret)
+ return ret;
- vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+ dev_set_drvdata(dev, vec);
return 0;
-
-err_destroy_encoder:
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- return ret;
-}
-
-static void vc4_vec_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- struct vc4_vec *vec = dev_get_drvdata(dev);
-
- vc4_vec_connector_destroy(vec->connector);
- drm_encoder_cleanup(vec->encoder);
- pm_runtime_disable(dev);
-
- vc4->vec = NULL;
}
static const struct component_ops vc4_vec_ops = {
.bind = vc4_vec_bind,
- .unbind = vc4_vec_unbind,
};
static int vc4_vec_dev_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
index 84db4eee7828..8b978dd51a25 100644
--- a/drivers/gpu/drm/via/Makefile
+++ b/drivers/gpu/drm/via/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-via-y := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
+via-y := via_dri1.o
obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
index 462375d543b9..eb848508b12b 100644
--- a/drivers/gpu/drm/via/via_3d_reg.h
+++ b/drivers/gpu/drm/via/via_3d_reg.h
@@ -1,25 +1,7 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * Copyright 1998-2011 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2011 S3 Graphics, Inc. All Rights Reserved.
*/
#ifndef VIA_3D_REG_H
@@ -50,6 +32,7 @@
#define HC_ParaType_Palette 0x0003
#define HC_ParaType_PreCR 0x0010
#define HC_ParaType_Auto 0x00fe
+#define INV_ParaType_Dummy 0x00300000
/* Transmission Space
*/
@@ -173,10 +156,10 @@
#define HC_HSPXOS_SHIFT 12
#define HC_HSPYOS_MASK 0x00000fff
-/* Command
+/*
* Command A
*/
-#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
+#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
#define HC_HE3Fire_MASK 0x00100000
#define HC_HPMType_MASK 0x000f0000
#define HC_HEFlag_MASK 0x0000e000
@@ -236,6 +219,8 @@
/* Enable Setting
*/
#define HC_SubA_HEnable 0x0000
+#define HC_HenForce1P_MASK 0x00800000 /* [Force 1 Pipe] */
+#define HC_HenZDCheck_MASK 0x00400000 /* [Z dirty bit settings] */
#define HC_HenTXEnvMap_MASK 0x00200000
#define HC_HenVertexCNT_MASK 0x00100000
#define HC_HenCPUDAZ_MASK 0x00080000
@@ -684,6 +669,12 @@
/* Texture subtype definitions
*/
+#define HC_SubType_Samp0 0x00000020
+#define HC_SubType_Samp1 0x00000021
+
+
+/* Texture subtype definitions
+ */
#define HC_SubType_Tex0 0x00000000
#define HC_SubType_Tex1 0x00000001
#define HC_SubType_TexGeneral 0x000000fe
@@ -762,7 +753,13 @@
#define HC_SubA_HTXnBumpM10 0x0092
#define HC_SubA_HTXnBumpM11 0x0093
#define HC_SubA_HTXnLScale 0x0094
-#define HC_SubA_HTXSMD 0x0000
+
+#define HC_SubA_HTXSMD 0x0000
+#define HC_SubA_HTXYUV2RGB1 0x0001
+#define HC_SubA_HTXYUV2RGB2 0x0002
+#define HC_SubA_HTXYUV2RGB3 0x0003
+#define HTXYUV2RGB4BT601 (1<<23)
+#define HTXYUV2RGB4BT709 (1<<22)
/* HC_SubA_HTXnL012BasH 0x0020
*/
#define HC_HTXnL0BasH_MASK 0x000000ff
@@ -965,6 +962,7 @@
#define HC_HTXnFM_Lum 0x00100000
#define HC_HTXnFM_Alpha 0x00180000
#define HC_HTXnFM_DX 0x00280000
+#define HC_HTXnFM_YUV 0x00300000
#define HC_HTXnFM_ARGB16 0x00880000
#define HC_HTXnFM_ARGB32 0x00980000
#define HC_HTXnFM_ABGR16 0x00a80000
@@ -995,6 +993,12 @@
#define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000)
#define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000)
#define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000)
+/* YUV package mode */
+#define HC_HTXnFM_YUY2 (HC_HTXnFM_YUV | 0x00000000)
+/* YUV planner mode */
+#define HC_HTXnFM_YV12 (HC_HTXnFM_YUV | 0x00040000)
+/* YUV planner mode */
+#define HC_HTXnFM_IYUV (HC_HTXnFM_YUV | 0x00040000)
#define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000)
#define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000)
#define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000)
@@ -1023,6 +1027,13 @@
#define HC_HTXnLoc_Local 0x00000000
#define HC_HTXnLoc_Sys 0x00000002
#define HC_HTXnLoc_AGP 0x00000003
+
+/* Video Texture */
+#define HC_HTXnYUV2RGBMode_RGB 0x00000000
+#define HC_HTXnYUV2RGBMode_SDTV 0x00000001
+#define HC_HTXnYUV2RGBMode_HDTV 0x00000002
+#define HC_HTXnYUV2RGBMode_TABLE 0x00000003
+
/* HC_SubA_HTXnTRAH 0x007f
*/
#define HC_HTXnTRAH_MASK 0x00ff0000
@@ -1330,9 +1341,9 @@
*/
#define HC_HFthRTXA_MASK 0x000000ff
-/******************************************************************************
-** Define the Halcyon Internal register access constants. For simulator only.
-******************************************************************************/
+/****************************************************************************
+ * Define the Halcyon Internal register access constants. For simulator only.
+ ***************************************************************************/
#define HC_SIMA_HAGPBstL 0x0000
#define HC_SIMA_HAGPBendL 0x0001
#define HC_SIMA_HAGPCMNT 0x0002
@@ -1477,80 +1488,80 @@
#define HC_SIMA_TX0TX1_OFF 0x0050
/*---- start of texture 1 setting ----
*/
-#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
#define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
/*---- end of texture 1 setting ---- 0xaf
*/
#define HC_SIMA_HTXSMD 0x00b0
@@ -1580,9 +1591,9 @@
#define HC_SIMA_HRErr 0x0445
#define HC_SIMA_FIFOstatus 0x0446
-/******************************************************************************
-** Define the AGP command header.
-******************************************************************************/
+/****************************************************************************
+ * Define the AGP command header.
+ ***************************************************************************/
#define HC_ACMD_MASK 0xfe000000
#define HC_ACMD_SUB_MASK 0x0c000000
#define HC_ACMD_HCmdA 0xee000000
@@ -1605,18 +1616,18 @@
#define HC_ACMD_H4COUNT_MASK 0x01fffe00
#define HC_ACMD_H4COUNT_SHIFT 9
-/********************************************************************************
-** Define Header
-********************************************************************************/
-#define HC_HEADER2 0xF210F110
+/*****************************************************************************
+ * Define Header
+ ****************************************************************************/
+#define HC_HEADER2 0xF210F110
-/********************************************************************************
-** Define Dummy Value
-********************************************************************************/
-#define HC_DUMMY 0xCCCCCCCC
-/********************************************************************************
-** Define for DMA use
-********************************************************************************/
+/*****************************************************************************
+ * Define Dummy Value
+ ****************************************************************************/
+#define HC_DUMMY 0xCCCCCCCC
+/*****************************************************************************
+ * Define for DMA use
+ ****************************************************************************/
#define HALCYON_HEADER2 0XF210F110
#define HALCYON_FIRECMD 0XEE100000
#define HALCYON_FIREMASK 0XFFF00000
@@ -1643,8 +1654,118 @@
#define HC_HAGPBpID_STOP 0x00000002
#define HC_HAGPBpH_MASK 0x00ffffff
+
#define VIA_VIDEO_HEADER5 0xFE040000
#define VIA_VIDEO_HEADER6 0xFE050000
#define VIA_VIDEO_HEADER7 0xFE060000
#define VIA_VIDEOMASK 0xFFFF0000
+
+/*****************************************************************************
+ * Define for H5 DMA use
+ ****************************************************************************/
+#define H5_HC_DUMMY 0xCC000000
+
+/* Command Header Type */
+#define INV_DUMMY_MASK 0xFF000000
+#define INV_AGPHeader0 0xFE000000
+#define INV_AGPHeader1 0xFE010000
+#define INV_AGPHeader2 0xFE020000
+#define INV_AGPHeader3 0xFE030000
+#define INV_AGPHeader4 0xFE040000
+#define INV_AGPHeader5 0xFE050000
+#define INV_AGPHeader6 0xFE060000
+#define INV_AGPHeader7 0xFE070000
+#define INV_AGPHeader9 0xFE090000
+#define INV_AGPHeaderA 0xFE0A0000
+#define INV_AGPHeader40 0xFE400000
+#define INV_AGPHeader41 0xFE410000
+#define INV_AGPHeader43 0xFE430000
+#define INV_AGPHeader45 0xFE450000
+#define INV_AGPHeader47 0xFE470000
+#define INV_AGPHeader4A 0xFE4A0000
+#define INV_AGPHeader82 0xFE820000
+#define INV_AGPHeader83 0xFE830000
+#define INV_AGPHeader_MASK 0xFFFF0000
+#define INV_AGPHeader2A 0xFE2A0000
+#define INV_AGPHeader25 0xFE250000
+#define INV_AGPHeader20 0xFE200000
+#define INV_AGPHeader23 0xFE230000
+#define INV_AGPHeaderE2 0xFEE20000
+#define INV_AGPHeaderE3 0xFEE30000
+
+/*Transmission IO Space*/
+#define INV_REG_CR_TRANS 0x041C
+#define INV_REG_CR_BEGIN 0x0420
+#define INV_REG_CR_END 0x0438
+
+#define INV_REG_3D_TRANS 0x043C
+#define INV_REG_3D_BEGIN 0x0440
+#define INV_REG_3D_END 0x06FC
+
+#define INV_ParaType_CmdVdata 0x0000
+
+/* H5 Enable Setting
+ */
+#define INV_HC_SubA_HEnable1 0x00
+
+#define INV_HC_HenAT4ALLRT_MASK 0x00100000
+#define INV_HC_HenATMRT3_MASK 0x00080000
+#define INV_HC_HenATMRT2_MASK 0x00040000
+#define INV_HC_HenATMRT1_MASK 0x00020000
+#define INV_HC_HenATMRT0_MASK 0x00010000
+#define INV_HC_HenSCMRT3_MASK 0x00008000
+#define INV_HC_HenSCMRT2_MASK 0x00004000
+#define INV_HC_HenSCMRT1_MASK 0x00002000
+#define INV_HC_HenSCMRT0_MASK 0x00001000
+#define INV_HC_HenFOGMRT3_MASK 0x00000800
+#define INV_HC_HenFOGMRT2_MASK 0x00000400
+#define INV_HC_HenFOGMRT1_MASK 0x00000200
+#define INV_HC_HenFOGMRT0_MASK 0x00000100
+#define INV_HC_HenABLMRT3_MASK 0x00000080
+#define INV_HC_HenABLMRT2_MASK 0x00000040
+#define INV_HC_HenABLMRT1_MASK 0x00000020
+#define INV_HC_HenABLMRT0_MASK 0x00000010
+#define INV_HC_HenDTMRT3_MASK 0x00000008
+#define INV_HC_HenDTMRT2_MASK 0x00000004
+#define INV_HC_HenDTMRT1_MASK 0x00000002
+#define INV_HC_HenDTMRT0_MASK 0x00000001
+
+#define INV_HC_SubA_HEnable2 0x01
+
+#define INV_HC_HenLUL2DR_MASK 0x00800000
+#define INV_HC_HenLDIAMOND_MASK 0x00400000
+#define INV_HC_HenPSPRITE_MASK 0x00200000
+#define INV_HC_HenC2S_MASK 0x00100000
+#define INV_HC_HenFOGPP_MASK 0x00080000
+#define INV_HC_HenSCPP_MASK 0x00040000
+#define INV_HC_HenCPP_MASK 0x00020000
+#define INV_HC_HenCZ_MASK 0x00002000
+#define INV_HC_HenVC_MASK 0x00001000
+#define INV_HC_HenCL_MASK 0x00000800
+#define INV_HC_HenPS_MASK 0x00000400
+#define INV_HC_HenWCZ_MASK 0x00000200
+#define INV_HC_HenTXCH_MASK 0x00000100
+#define INV_HC_HenBFCULL_MASK 0x00000080
+#define INV_HC_HenCW_MASK 0x00000040
+#define INV_HC_HenAA_MASK 0x00000020
+#define INV_HC_HenST_MASK 0x00000010
+#define INV_HC_HenZT_MASK 0x00000008
+#define INV_HC_HenZW_MASK 0x00000004
+#define INV_HC_HenSP_MASK 0x00000002
+#define INV_HC_HenLP_MASK 0x00000001
+
+/* H5 Miscellaneous Settings
+ */
+#define INV_HC_SubA_HCClipTL 0x0080
+#define INV_HC_SubA_HCClipBL 0x0081
+#define INV_HC_SubA_HSClipTL 0x0082
+#define INV_HC_SubA_HSClipBL 0x0083
+#define INV_HC_SubA_HSolidCL 0x0086
+#define INV_HC_SubA_HSolidCH 0x0087
+#define INV_HC_SubA_HGBClipGL 0x0088
+#define INV_HC_SubA_HGBClipGR 0x0089
+
+
+#define INV_HC_ParaType_Vetex 0x00040000
+
#endif
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
deleted file mode 100644
index 177b0499abf1..000000000000
--- a/drivers/gpu/drm/via/via_dma.c
+++ /dev/null
@@ -1,744 +0,0 @@
-/* via_dma.c -- DMA support for the VIA Unichrome/Pro
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
- * All Rights Reserved.
- *
- * Copyright 2004 The Unichrome project.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Tungsten Graphics,
- * Erdi Chen,
- * Thomas Hellstrom.
- */
-
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm.h>
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-#include "via_3d_reg.h"
-
-#define CMDBUF_ALIGNMENT_SIZE (0x100)
-#define CMDBUF_ALIGNMENT_MASK (0x0ff)
-
-/* defines for VIA 3D registers */
-#define VIA_REG_STATUS 0x400
-#define VIA_REG_TRANSET 0x43C
-#define VIA_REG_TRANSPACE 0x440
-
-/* VIA_REG_STATUS(0x400): Engine Status */
-#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
-#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
-#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
-#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
-
-#define SetReg2DAGP(nReg, nData) { \
- *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
- *((uint32_t *)(vb) + 1) = (nData); \
- vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low += 8; \
-}
-
-#define via_flush_write_combine() mb()
-
-#define VIA_OUT_RING_QW(w1, w2) do { \
- *vb++ = (w1); \
- *vb++ = (w2); \
- dev_priv->dma_low += 8; \
-} while (0)
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
-static int via_wait_idle(drm_via_private_t *dev_priv);
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
-
-/*
- * Free space in command buffer.
- */
-
-static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
- (hw_addr - dev_priv->dma_low));
-}
-
-/*
- * How much does the command regulator lag behind?
- */
-
-static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_low - hw_addr) :
- (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
-}
-
-/*
- * Check that the given size fits in the buffer, otherwise wait.
- */
-
-static inline int
-via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t cur_addr, hw_addr, next_addr;
- volatile uint32_t *hw_addr_ptr;
- uint32_t count;
- hw_addr_ptr = dev_priv->hw_addr_ptr;
- cur_addr = dev_priv->dma_low;
- next_addr = cur_addr + size + 512 * 1024;
- count = 1000000;
- do {
- hw_addr = *hw_addr_ptr - agp_base;
- if (count-- == 0) {
- DRM_ERROR
- ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
- hw_addr, cur_addr, next_addr);
- return -1;
- }
- if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
- msleep(1);
- } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
- return 0;
-}
-
-/*
- * Checks whether buffer head has reach the end. Rewind the ring buffer
- * when necessary.
- *
- * Returns virtual pointer to ring buffer.
- */
-
-static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
- unsigned int size)
-{
- if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
- dev_priv->dma_high) {
- via_cmdbuf_rewind(dev_priv);
- }
- if (via_cmdbuf_wait(dev_priv, size) != 0)
- return NULL;
-
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-int via_dma_cleanup(struct drm_device *dev)
-{
- if (dev->dev_private) {
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start) {
- via_cmdbuf_reset(dev_priv);
-
- drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- }
-
- }
-
- return 0;
-}
-
-static int via_initialize(struct drm_device *dev,
- drm_via_private_t *dev_priv,
- drm_via_dma_init_t *init)
-{
- if (!dev_priv || !dev_priv->mmio) {
- DRM_ERROR("via_dma_init called before via_map_init\n");
- return -EFAULT;
- }
-
- if (dev_priv->ring.virtual_start != NULL) {
- DRM_ERROR("called again without calling cleanup\n");
- return -EFAULT;
- }
-
- if (!dev->agp || !dev->agp->base) {
- DRM_ERROR("called with no agp memory available\n");
- return -EFAULT;
- }
-
- if (dev_priv->chipset == VIA_DX9_0) {
- DRM_ERROR("AGP DMA is not supported on this chip\n");
- return -EINVAL;
- }
-
- dev_priv->ring.map.offset = dev->agp->base + init->offset;
- dev_priv->ring.map.size = init->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_legacy_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- via_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->dma_ptr = dev_priv->ring.virtual_start;
- dev_priv->dma_low = 0;
- dev_priv->dma_high = init->size;
- dev_priv->dma_wrap = init->size;
- dev_priv->dma_offset = init->offset;
- dev_priv->last_pause_ptr = NULL;
- dev_priv->hw_addr_ptr =
- (volatile uint32_t *)((char *)dev_priv->mmio->handle +
- init->reg_pause_addr);
-
- via_cmdbuf_start(dev_priv);
-
- return 0;
-}
-
-static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_dma_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case VIA_INIT_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_initialize(dev, dev_priv, init);
- break;
- case VIA_CLEANUP_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_dma_cleanup(dev);
- break;
- case VIA_DMA_INITIALIZED:
- retcode = (dev_priv->ring.virtual_start != NULL) ?
- 0 : -EFAULT;
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv;
- uint32_t *vb;
- int ret;
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
-
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- /*
- * Running this function on AGP memory is dead slow. Therefore
- * we run it on a temporary cacheable system memory buffer and
- * copy it to AGP memory when ready.
- */
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 1))) {
- return ret;
- }
-
- vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
- if (vb == NULL)
- return -EAGAIN;
-
- memcpy(vb, dev_priv->pci_buf, cmd->size);
-
- dev_priv->dma_low += cmd->size;
-
- /*
- * Small submissions somehow stalls the CPU. (AGP cache effects?)
- * pad to greater size.
- */
-
- if (cmd->size < 0x100)
- via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
- via_cmdbuf_pause(dev_priv);
-
- return 0;
-}
-
-int via_driver_dma_quiescent(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (!via_wait_idle(dev_priv))
- return -EBUSY;
- return 0;
-}
-
-static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- return via_driver_dma_quiescent(dev);
-}
-
-static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
- drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 0))) {
- return ret;
- }
-
- ret =
- via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
- cmd->size);
- return ret;
-}
-
-static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
- uint32_t * vb, int qw_count)
-{
- for (; qw_count > 0; --qw_count)
- VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
- return vb;
-}
-
-/*
- * This function is used internally by ring buffer management code.
- *
- * Returns virtual pointer to ring buffer.
- */
-static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
-{
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-/*
- * Hooks a segment of data into the tail of the ring-buffer by
- * modifying the pause address stored in the buffer itself. If
- * the regulator has already paused, restart it.
- */
-static int via_hook_segment(drm_via_private_t *dev_priv,
- uint32_t pause_addr_hi, uint32_t pause_addr_lo,
- int no_pci_fire)
-{
- int paused, count;
- volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
- uint32_t reader, ptr;
- uint32_t diff;
-
- paused = 0;
- via_flush_write_combine();
- (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
-
- *paused_at = pause_addr_lo;
- via_flush_write_combine();
- (void) *paused_at;
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
-
- /*
- * If there is a possibility that the command reader will
- * miss the new pause address and pause on the old one,
- * In that case we need to program the new start address
- * using PCI.
- */
-
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- count = 10000000;
- while (diff == 0 && count--) {
- paused = (via_read(dev_priv, 0x41c) & 0x80000000);
- if (paused)
- break;
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- }
-
- paused = via_read(dev_priv, 0x41c) & 0x80000000;
-
- if (paused && !no_pci_fire) {
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- diff &= (dev_priv->dma_high - 1);
- if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
- DRM_ERROR("Paused at incorrect address. "
- "0x%08x, 0x%08x 0x%08x\n",
- ptr, reader, dev_priv->dma_diff);
- } else if (diff == 0) {
- /*
- * There is a concern that these writes may stall the PCI bus
- * if the GPU is not idle. However, idling the GPU first
- * doesn't make a difference.
- */
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- via_read(dev_priv, VIA_REG_TRANSPACE);
- }
- }
- return paused;
-}
-
-static int via_wait_idle(drm_via_private_t *dev_priv)
-{
- int count = 10000000;
-
- while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
- ;
-
- while (count && (via_read(dev_priv, VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
- VIA_3D_ENG_BUSY)))
- --count;
- return count;
-}
-
-static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
- uint32_t addr, uint32_t *cmd_addr_hi,
- uint32_t *cmd_addr_lo, int skip_wait)
-{
- uint32_t agp_base;
- uint32_t cmd_addr, addr_lo, addr_hi;
- uint32_t *vb;
- uint32_t qw_pad_count;
-
- if (!skip_wait)
- via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
-
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
- (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
- ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
-
- cmd_addr = (addr) ? addr :
- agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
- addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
- (cmd_addr & HC_HAGPBpL_MASK));
- addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
-
- vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
- VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
- return vb;
-}
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t start_addr, start_addr_lo;
- uint32_t end_addr, end_addr_lo;
- uint32_t command;
- uint32_t agp_base;
- uint32_t ptr;
- uint32_t reader;
- int count;
-
- dev_priv->dma_low = 0;
-
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- start_addr = agp_base;
- end_addr = agp_base + dev_priv->dma_high;
-
- start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
- end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
- command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
- ((end_addr & 0xff000000) >> 16));
-
- dev_priv->last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
- &pause_addr_hi, &pause_addr_lo, 1) - 1;
-
- via_flush_write_combine();
- (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, command);
- via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
- via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
-
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- wmb();
- via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- via_read(dev_priv, VIA_REG_TRANSPACE);
-
- dev_priv->dma_diff = 0;
-
- count = 10000000;
- while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- /*
- * This is the difference between where we tell the
- * command reader to pause and where it actually pauses.
- * This differs between hw implementation so we need to
- * detect it.
- */
-
- dev_priv->dma_diff = ptr - reader;
-}
-
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
-{
- uint32_t *vb;
-
- via_cmdbuf_wait(dev_priv, qwords + 2);
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
- via_align_buffer(dev_priv, vb, qwords);
-}
-
-static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
-{
- uint32_t *vb = via_get_dma(dev_priv);
- SetReg2DAGP(0x0C, (0 | (0 << 16)));
- SetReg2DAGP(0x10, 0 | (0 << 16));
- SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
-}
-
-static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t jump_addr_lo, jump_addr_hi;
- volatile uint32_t *last_pause_ptr;
- uint32_t dma_low_save1, dma_low_save2;
-
- via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
- &jump_addr_lo, 0);
-
- dev_priv->dma_wrap = dev_priv->dma_low;
-
- /*
- * Wrap command buffer to the beginning.
- */
-
- dev_priv->dma_low = 0;
- if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
- DRM_ERROR("via_cmdbuf_jump failed\n");
-
- via_dummy_bitblt(dev_priv);
- via_dummy_bitblt(dev_priv);
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
-
- *last_pause_ptr = pause_addr_lo;
- dma_low_save1 = dev_priv->dma_low;
-
- /*
- * Now, set a trap that will pause the regulator if it tries to rerun the old
- * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
- * and reissues the jump command over PCI, while the regulator has already taken the jump
- * and actually paused at the current buffer end).
- * There appears to be no other way to detect this condition, since the hw_addr_pointer
- * does not seem to get updated immediately when a jump occurs.
- */
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
- *last_pause_ptr = pause_addr_lo;
-
- dma_low_save2 = dev_priv->dma_low;
- dev_priv->dma_low = dma_low_save1;
- via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
- dev_priv->dma_low = dma_low_save2;
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_jump(dev_priv);
-}
-
-static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
-
- via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
-}
-
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
- via_wait_idle(dev_priv);
-}
-
-/*
- * User interface to the space and lag functions.
- */
-
-static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuf_size_t *d_siz = data;
- int ret = 0;
- uint32_t tmp_size, count;
- drm_via_private_t *dev_priv;
-
- DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- count = 1000000;
- tmp_size = d_siz->size;
- switch (d_siz->func) {
- case VIA_CMDBUF_SPACE:
- while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
- ret = -EAGAIN;
- }
- break;
- case VIA_CMDBUF_LAG:
- while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
- ret = -EAGAIN;
- }
- break;
- default:
- ret = -EFAULT;
- }
- d_siz->size = tmp_size;
-
- return ret;
-}
-
-const struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
-};
-
-int via_max_ioctl = ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
deleted file mode 100644
index e016a4d62090..000000000000
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ /dev/null
@@ -1,807 +0,0 @@
-/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom.
- * Partially based on code obtained from Digeo Inc.
- */
-
-
-/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
- */
-
-#include <linux/pagemap.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_device.h>
-#include <drm/via_drm.h>
-
-#include "via_dmablit.h"
-#include "via_drv.h"
-
-#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
-#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
-#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
-
-typedef struct _drm_via_descriptor {
- uint32_t mem_addr;
- uint32_t dev_addr;
- uint32_t size;
- uint32_t next;
-} drm_via_descriptor_t;
-
-
-/*
- * Unmap a DMA mapping.
- */
-
-
-
-static void
-via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int num_desc = vsg->num_desc;
- unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
- unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- dma_addr_t next = vsg->chain_start;
-
- while (num_desc--) {
- if (descriptor_this_page-- == 0) {
- cur_descriptor_page--;
- descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- }
- dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
- dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
- next = (dma_addr_t) desc_ptr->next;
- desc_ptr--;
- }
-}
-
-/*
- * If mode = 0, count how many descriptors are needed.
- * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
- * Descriptors are run in reverse order by the hardware because we are not allowed to update the
- * 'next' field without syncing calls when the descriptor is already mapped.
- */
-
-static void
-via_map_blit_for_device(struct pci_dev *pdev,
- const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
- int mode)
-{
- unsigned cur_descriptor_page = 0;
- unsigned num_descriptors_this_page = 0;
- unsigned char *mem_addr = xfer->mem_addr;
- unsigned char *cur_mem;
- unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
- uint32_t fb_addr = xfer->fb_addr;
- uint32_t cur_fb;
- unsigned long line_len;
- unsigned remaining_len;
- int num_desc = 0;
- int cur_line;
- dma_addr_t next = 0 | VIA_DMA_DPR_EC;
- drm_via_descriptor_t *desc_ptr = NULL;
-
- if (mode == 1)
- desc_ptr = vsg->desc_pages[cur_descriptor_page];
-
- for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
-
- line_len = xfer->line_length;
- cur_fb = fb_addr;
- cur_mem = mem_addr;
-
- while (line_len > 0) {
-
- remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
- line_len -= remaining_len;
-
- if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
- VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
- vsg->direction);
- desc_ptr->dev_addr = cur_fb;
-
- desc_ptr->size = remaining_len;
- desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
- DMA_TO_DEVICE);
- desc_ptr++;
- if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
- num_descriptors_this_page = 0;
- desc_ptr = vsg->desc_pages[++cur_descriptor_page];
- }
- }
-
- num_desc++;
- cur_mem += remaining_len;
- cur_fb += remaining_len;
- }
-
- mem_addr += xfer->mem_stride;
- fb_addr += xfer->fb_stride;
- }
-
- if (mode == 1) {
- vsg->chain_start = next;
- vsg->state = dr_via_device_mapped;
- }
- vsg->num_desc = num_desc;
-}
-
-/*
- * Function that frees up all resources for a blit. It is usable even if the
- * blit info has only been partially built as long as the status enum is consistent
- * with the actual status of the used resources.
- */
-
-
-static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int i;
-
- switch (vsg->state) {
- case dr_via_device_mapped:
- via_unmap_blit_from_device(pdev, vsg);
- fallthrough;
- case dr_via_desc_pages_alloc:
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (vsg->desc_pages[i] != NULL)
- free_page((unsigned long)vsg->desc_pages[i]);
- }
- kfree(vsg->desc_pages);
- fallthrough;
- case dr_via_pages_locked:
- unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
- fallthrough;
- case dr_via_pages_alloc:
- vfree(vsg->pages);
- fallthrough;
- default:
- vsg->state = dr_via_sg_init;
- }
- vfree(vsg->bounce_buffer);
- vsg->bounce_buffer = NULL;
- vsg->free_on_sequence = 0;
-}
-
-/*
- * Fire a blit engine.
- */
-
-static void
-via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
- VIA_DMA_CSR_DE);
- via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- wmb();
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
-}
-
-/*
- * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
- * occur here if the calling user does not have access to the submitted address.
- */
-
-static int
-via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- int ret;
- unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
- first_pfn + 1;
-
- vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
- if (NULL == vsg->pages)
- return -ENOMEM;
- ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages,
- vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
- vsg->pages);
- if (ret != vsg->num_pages) {
- if (ret < 0)
- return ret;
- vsg->state = dr_via_pages_locked;
- return -EINVAL;
- }
- vsg->state = dr_via_pages_locked;
- DRM_DEBUG("DMA pages locked\n");
- return 0;
-}
-
-/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
- * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contiguous.
- */
-
-static int
-via_alloc_desc_pages(drm_via_sg_info_t *vsg)
-{
- int i;
-
- vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
- vsg->descriptors_per_page;
-
- if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
- return -ENOMEM;
-
- vsg->state = dr_via_desc_pages_alloc;
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
- (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- }
- DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
- vsg->num_desc);
- return 0;
-}
-
-static void
-via_abort_dmablit(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
-}
-
-static void
-via_dmablit_engine_off(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
-}
-
-
-
-/*
- * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
- * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
- * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
- * the workqueue task takes care of processing associated with the old blit.
- */
-
-void
-via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- int cur;
- int done_transfer;
- unsigned long irqsave = 0;
- uint32_t status = 0;
-
- DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
- engine, from_irq, (unsigned long) blitq);
-
- if (from_irq)
- spin_lock(&blitq->blit_lock);
- else
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- done_transfer = blitq->is_active &&
- ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
-
- cur = blitq->cur;
- if (done_transfer) {
-
- blitq->blits[cur]->aborted = blitq->aborting;
- blitq->done_blit_handle++;
- wake_up(blitq->blit_queue + cur);
-
- cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
- cur = 0;
- blitq->cur = cur;
-
- /*
- * Clear transfer done flag.
- */
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
-
- blitq->is_active = 0;
- blitq->aborting = 0;
- schedule_work(&blitq->wq);
-
- } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
-
- /*
- * Abort transfer after one second.
- */
-
- via_abort_dmablit(dev, engine);
- blitq->aborting = 1;
- blitq->end = jiffies + HZ;
- }
-
- if (!blitq->is_active) {
- if (blitq->num_outstanding) {
- via_fire_dmablit(dev, blitq->blits[cur], engine);
- blitq->is_active = 1;
- blitq->cur = cur;
- blitq->num_outstanding--;
- blitq->end = jiffies + HZ;
- if (!timer_pending(&blitq->poll_timer))
- mod_timer(&blitq->poll_timer, jiffies + 1);
- } else {
- if (timer_pending(&blitq->poll_timer))
- del_timer(&blitq->poll_timer);
- via_dmablit_engine_off(dev, engine);
- }
- }
-
- if (from_irq)
- spin_unlock(&blitq->blit_lock);
- else
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-
-
-/*
- * Check whether this blit is still active, performing necessary locking.
- */
-
-static int
-via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
-{
- unsigned long irqsave;
- uint32_t slot;
- int active;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- /*
- * Allow for handle wraparounds.
- */
-
- active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
- ((blitq->cur_blit_handle - handle) <= (1 << 23));
-
- if (queue && active) {
- slot = handle - blitq->done_blit_handle + blitq->cur - 1;
- if (slot >= VIA_NUM_BLIT_SLOTS)
- slot -= VIA_NUM_BLIT_SLOTS;
- *queue = blitq->blit_queue + slot;
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return active;
-}
-
-/*
- * Sync. Wait for at least three seconds for the blit to be performed.
- */
-
-static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- wait_queue_head_t *queue;
- int ret = 0;
-
- if (via_dmablit_active(blitq, engine, handle, &queue)) {
- VIA_WAIT_ON(ret, *queue, 3 * HZ,
- !via_dmablit_active(blitq, engine, handle, NULL));
- }
- DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
- handle, engine, ret);
-
- return ret;
-}
-
-
-/*
- * A timer that regularly polls the blit engine in cases where we don't have interrupts:
- * a) Broken hardware (typically those that don't have any video capture facility).
- * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
- * The timer and hardware IRQ's can and do work in parallel. If the hardware has
- * irqs, it will shorten the latency somewhat.
- */
-
-
-
-static void
-via_dmablit_timer(struct timer_list *t)
-{
- drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
- struct drm_device *dev = blitq->dev;
- int engine = (int)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
- (unsigned long) jiffies);
-
- via_dmablit_handler(dev, engine, 0);
-
- if (!timer_pending(&blitq->poll_timer)) {
- mod_timer(&blitq->poll_timer, jiffies + 1);
-
- /*
- * Rerun handler to delete timer if engines are off, and
- * to shorten abort latency. This is a little nasty.
- */
-
- via_dmablit_handler(dev, engine, 0);
-
- }
-}
-
-
-
-
-/*
- * Workqueue task that frees data and mappings associated with a blit.
- * Also wakes up waiting processes. Each of these tasks handles one
- * blit engine only and may not be called on each interrupt.
- */
-
-
-static void
-via_dmablit_workqueue(struct work_struct *work)
-{
- drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
- struct drm_device *dev = blitq->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned long irqsave;
- drm_via_sg_info_t *cur_sg;
- int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- while (blitq->serviced != blitq->cur) {
-
- cur_released = blitq->serviced++;
-
- DRM_DEBUG("Releasing blit slot %d\n", cur_released);
-
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
- blitq->serviced = 0;
-
- cur_sg = blitq->blits[cur_released];
- blitq->num_free++;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- wake_up(&blitq->busy_queue);
-
- via_free_sg_info(pdev, cur_sg);
- kfree(cur_sg);
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-
-/*
- * Init all blit engines. Currently we use two, but some hardware have 4.
- */
-
-
-void
-via_init_dmablit(struct drm_device *dev)
-{
- int i, j;
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_blitq_t *blitq;
-
- pci_set_master(pdev);
-
- for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
- blitq = dev_priv->blit_queues + i;
- blitq->dev = dev;
- blitq->cur_blit_handle = 0;
- blitq->done_blit_handle = 0;
- blitq->head = 0;
- blitq->cur = 0;
- blitq->serviced = 0;
- blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
- blitq->num_outstanding = 0;
- blitq->is_active = 0;
- blitq->aborting = 0;
- spin_lock_init(&blitq->blit_lock);
- for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
- init_waitqueue_head(blitq->blit_queue + j);
- init_waitqueue_head(&blitq->busy_queue);
- INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
- }
-}
-
-/*
- * Build all info and do all mappings required for a blit.
- */
-
-
-static int
-via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int draw = xfer->to_fb;
- int ret = 0;
-
- vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
- vsg->bounce_buffer = NULL;
-
- vsg->state = dr_via_sg_init;
-
- if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
- DRM_ERROR("Zero size bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * Below check is a driver limitation, not a hardware one. We
- * don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
- * (Not a big limitation anyway.)
- */
-
- if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
- DRM_ERROR("Too large system memory stride. Stride: %d, "
- "Length: %d\n", xfer->mem_stride, xfer->line_length);
- return -EINVAL;
- }
-
- if ((xfer->mem_stride == xfer->line_length) &&
- (xfer->fb_stride == xfer->line_length)) {
- xfer->mem_stride *= xfer->num_lines;
- xfer->line_length = xfer->mem_stride;
- xfer->fb_stride = xfer->mem_stride;
- xfer->num_lines = 1;
- }
-
- /*
- * Don't lock an arbitrary large number of pages, since that causes a
- * DOS security hole.
- */
-
- if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
- DRM_ERROR("Too large PCI DMA bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * we allow a negative fb stride to allow flipping of images in
- * transfer.
- */
-
- if (xfer->mem_stride < xfer->line_length ||
- abs(xfer->fb_stride) < xfer->line_length) {
- DRM_ERROR("Invalid frame-buffer / memory stride.\n");
- return -EINVAL;
- }
-
- /*
- * A hardware bug seems to be worked around if system memory addresses start on
- * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
- * about this. Meanwhile, impose the following restrictions:
- */
-
-#ifdef VIA_BUGFREE
- if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#else
- if ((((unsigned long)xfer->mem_addr & 15) ||
- ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) &&
- ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#endif
-
- if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
- DRM_ERROR("Could not lock DMA pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
-
- via_map_blit_for_device(pdev, xfer, vsg, 0);
- if (0 != (ret = via_alloc_desc_pages(vsg))) {
- DRM_ERROR("Could not allocate DMA descriptor pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
- via_map_blit_for_device(pdev, xfer, vsg, 1);
-
- return 0;
-}
-
-
-/*
- * Reserve one free slot in the blit queue. Will wait for one second for one
- * to become available. Otherwise -EBUSY is returned.
- */
-
-static int
-via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
-{
- int ret = 0;
- unsigned long irqsave;
-
- DRM_DEBUG("Num free is %d\n", blitq->num_free);
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while (blitq->num_free == 0) {
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
- if (ret)
- return (-EINTR == ret) ? -EAGAIN : ret;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- blitq->num_free--;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return 0;
-}
-
-/*
- * Hand back a free slot if we changed our mind.
- */
-
-static void
-via_dmablit_release_slot(drm_via_blitq_t *blitq)
-{
- unsigned long irqsave;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- blitq->num_free++;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- wake_up(&blitq->busy_queue);
-}
-
-/*
- * Grab a free slot. Build blit info and queue a blit.
- */
-
-
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_sg_info_t *vsg;
- drm_via_blitq_t *blitq;
- int ret;
- int engine;
- unsigned long irqsave;
-
- if (dev_priv == NULL) {
- DRM_ERROR("Called without initialization.\n");
- return -EINVAL;
- }
-
- engine = (xfer->to_fb) ? 0 : 1;
- blitq = dev_priv->blit_queues + engine;
- if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
- return ret;
- if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
- via_dmablit_release_slot(blitq);
- return -ENOMEM;
- }
- if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
- via_dmablit_release_slot(blitq);
- kfree(vsg);
- return ret;
- }
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
- blitq->head = 0;
- blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- xfer->sync.engine = engine;
-
- via_dmablit_handler(dev, engine, 0);
-
- return 0;
-}
-
-/*
- * Sync on a previously submitted blit. Note that the X server use signals extensively, and
- * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
- * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
- */
-
-int
-via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_blitsync_t *sync = data;
- int err;
-
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
- return -EINVAL;
-
- err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
-
- if (-EINTR == err)
- err = -EAGAIN;
-
- return err;
-}
-
-
-/*
- * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
- * be reissued. See the above IOCTL code.
- */
-
-int
-via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_dmablit_t *xfer = data;
- int err;
-
- err = via_dmablit(dev, xfer);
-
- return err;
-}
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
deleted file mode 100644
index 9b662a327cef..000000000000
--- a/drivers/gpu/drm/via/via_dmablit.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom.
- * Register info from Digeo Inc.
- */
-
-#ifndef _VIA_DMABLIT_H
-#define _VIA_DMABLIT_H
-
-#include <linux/dma-mapping.h>
-
-#define VIA_NUM_BLIT_ENGINES 2
-#define VIA_NUM_BLIT_SLOTS 8
-
-struct _drm_via_descriptor;
-
-typedef struct _drm_via_sg_info {
- struct page **pages;
- unsigned long num_pages;
- struct _drm_via_descriptor **desc_pages;
- int num_desc_pages;
- int num_desc;
- enum dma_data_direction direction;
- unsigned char *bounce_buffer;
- dma_addr_t chain_start;
- uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
- int aborted;
- enum {
- dr_via_device_mapped,
- dr_via_desc_pages_alloc,
- dr_via_pages_locked,
- dr_via_pages_alloc,
- dr_via_sg_init
- } state;
-} drm_via_sg_info_t;
-
-typedef struct _drm_via_blitq {
- struct drm_device *dev;
- uint32_t cur_blit_handle;
- uint32_t done_blit_handle;
- unsigned serviced;
- unsigned head;
- unsigned cur;
- unsigned num_free;
- unsigned num_outstanding;
- unsigned long end;
- int aborting;
- int is_active;
- drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
- spinlock_t blit_lock;
- wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
- wait_queue_head_t busy_queue;
- struct work_struct wq;
- struct timer_list poll_timer;
-} drm_via_blitq_t;
-
-
-/*
- * PCI DMA Registers
- * Channels 2 & 3 don't seem to be implemented in hardware.
- */
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
-/* DPR */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
-#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
-
-/* MR */
-#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
-#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
-#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
-
-/* CSR */
-#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
-#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
-#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
-#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
-#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-
-
-
-#endif
diff --git a/drivers/gpu/drm/via/via_dri1.c b/drivers/gpu/drm/via/via_dri1.c
new file mode 100644
index 000000000000..217d1e84b0ea
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dri1.c
@@ -0,0 +1,3630 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2002 Tungsten Graphics, Inc.
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. All Rights Reserved.
+ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
+ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. All Rights Reserved.
+ * Copyright 2004 The Unichrome project. All Rights Reserved.
+ * Copyright 2004 BEAM Ltd.
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_legacy.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_pciids.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/via_drm.h>
+
+#include "via_3d_reg.h"
+
+#define DRIVER_AUTHOR "Various"
+
+#define DRIVER_NAME "via"
+#define DRIVER_DESC "VIA Unichrome / Pro"
+#define DRIVER_DATE "20070202"
+
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 11
+#define DRIVER_PATCHLEVEL 1
+
+typedef enum {
+ no_sequence = 0,
+ z_address,
+ dest_address,
+ tex_address
+} drm_via_sequence_t;
+
+typedef struct {
+ unsigned texture;
+ uint32_t z_addr;
+ uint32_t d_addr;
+ uint32_t t_addr[2][10];
+ uint32_t pitch[2][10];
+ uint32_t height[2][10];
+ uint32_t tex_level_lo[2];
+ uint32_t tex_level_hi[2];
+ uint32_t tex_palette_size[2];
+ uint32_t tex_npot[2];
+ drm_via_sequence_t unfinished;
+ int agp_texture;
+ int multitex;
+ struct drm_device *dev;
+ drm_local_map_t *map_cache;
+ uint32_t vertex_count;
+ int agp;
+ const uint32_t *buf_start;
+} drm_via_state_t;
+
+#define VIA_PCI_BUF_SIZE 60000
+#define VIA_FIRE_BUF_SIZE 1024
+#define VIA_NUM_IRQS 4
+
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+ struct page **pages;
+ unsigned long num_pages;
+ struct _drm_via_descriptor **desc_pages;
+ int num_desc_pages;
+ int num_desc;
+ enum dma_data_direction direction;
+ unsigned char *bounce_buffer;
+ dma_addr_t chain_start;
+ uint32_t free_on_sequence;
+ unsigned int descriptors_per_page;
+ int aborted;
+ enum {
+ dr_via_device_mapped,
+ dr_via_desc_pages_alloc,
+ dr_via_pages_locked,
+ dr_via_pages_alloc,
+ dr_via_sg_init
+ } state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+ struct drm_device *dev;
+ uint32_t cur_blit_handle;
+ uint32_t done_blit_handle;
+ unsigned serviced;
+ unsigned head;
+ unsigned cur;
+ unsigned num_free;
+ unsigned num_outstanding;
+ unsigned long end;
+ int aborting;
+ int is_active;
+ drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+ spinlock_t blit_lock;
+ wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+ wait_queue_head_t busy_queue;
+ struct work_struct wq;
+ struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+typedef struct drm_via_ring_buffer {
+ drm_local_map_t map;
+ char *virtual_start;
+} drm_via_ring_buffer_t;
+
+typedef uint32_t maskarray_t[5];
+
+typedef struct drm_via_irq {
+ atomic_t irq_received;
+ uint32_t pending_mask;
+ uint32_t enable_mask;
+ wait_queue_head_t irq_queue;
+} drm_via_irq_t;
+
+typedef struct drm_via_private {
+ drm_via_sarea_t *sarea_priv;
+ drm_local_map_t *sarea;
+ drm_local_map_t *fb;
+ drm_local_map_t *mmio;
+ unsigned long agpAddr;
+ wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
+ char *dma_ptr;
+ unsigned int dma_low;
+ unsigned int dma_high;
+ unsigned int dma_offset;
+ uint32_t dma_wrap;
+ volatile uint32_t *last_pause_ptr;
+ volatile uint32_t *hw_addr_ptr;
+ drm_via_ring_buffer_t ring;
+ ktime_t last_vblank;
+ int last_vblank_valid;
+ ktime_t nsec_per_vblank;
+ atomic_t vbl_received;
+ drm_via_state_t hc_state;
+ char pci_buf[VIA_PCI_BUF_SIZE];
+ const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+ uint32_t num_fire_offsets;
+ int chipset;
+ drm_via_irq_t via_irqs[VIA_NUM_IRQS];
+ unsigned num_irqs;
+ maskarray_t *irq_masks;
+ uint32_t irq_enable_mask;
+ uint32_t irq_pending_mask;
+ int *irq_map;
+ unsigned int idle_fault;
+ int vram_initialized;
+ struct drm_mm vram_mm;
+ int agp_initialized;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
+ unsigned long vram_offset;
+ unsigned long agp_offset;
+ drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+ uint32_t dma_diff;
+} drm_via_private_t;
+
+struct via_file_private {
+ struct list_head obj_list;
+};
+
+enum via_family {
+ VIA_OTHER = 0, /* Baseline */
+ VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
+ VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
+};
+
+/* VIA MMIO register access */
+static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
+{
+ return readl((void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8_mask(struct drm_via_private *dev_priv,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
+ tmp = (tmp & ~mask) | (val & mask);
+ writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+/*
+ * Poll in a loop waiting for 'contidition' to be true.
+ * Note: A direct replacement with wait_event_interruptible_timeout()
+ * will not work unless driver is updated to emit wake_up()
+ * in relevant places that can impact the 'condition'
+ *
+ * Returns:
+ * ret keeps current value if 'condition' becomes true
+ * ret = -BUSY if timeout happens
+ * ret = -EINTR if a signal interrupted the waiting period
+ */
+#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
+
+int via_do_cleanup_map(struct drm_device *dev);
+
+int via_dma_cleanup(struct drm_device *dev);
+int via_driver_dma_quiescent(struct drm_device *dev);
+
+#define CMDBUF_ALIGNMENT_SIZE (0x100)
+#define CMDBUF_ALIGNMENT_MASK (0x0ff)
+
+/* defines for VIA 3D registers */
+#define VIA_REG_STATUS 0x400
+#define VIA_REG_TRANSET 0x43C
+#define VIA_REG_TRANSPACE 0x440
+
+/* VIA_REG_STATUS(0x400): Engine Status */
+#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
+
+#define SetReg2DAGP(nReg, nData) { \
+ *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
+ *((uint32_t *)(vb) + 1) = (nData); \
+ vb = ((uint32_t *)vb) + 2; \
+ dev_priv->dma_low += 8; \
+}
+
+#define via_flush_write_combine() mb()
+
+#define VIA_OUT_RING_QW(w1, w2) do { \
+ *vb++ = (w1); \
+ *vb++ = (w2); \
+ dev_priv->dma_low += 8; \
+} while (0)
+
+#define VIA_MM_ALIGN_SHIFT 4
+#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+
+struct via_memblock {
+ struct drm_mm_node mm_node;
+ struct list_head owner_list;
+};
+
+#define VIA_REG_INTERRUPT 0x200
+
+/* VIA_REG_INTERRUPT */
+#define VIA_IRQ_GLOBAL (1 << 31)
+#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
+#define VIA_IRQ_VBLANK_PENDING (1 << 3)
+#define VIA_IRQ_HQV0_ENABLE (1 << 11)
+#define VIA_IRQ_HQV1_ENABLE (1 << 25)
+#define VIA_IRQ_HQV0_PENDING (1 << 9)
+#define VIA_IRQ_HQV1_PENDING (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
+/*
+ * PCI DMA Registers
+ * Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
+#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
+#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
+#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
+#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
+#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+
+/*
+ * Device-specific IRQs go here. This type might need to be extended with
+ * the register if there are multiple IRQ control registers.
+ * Currently we activate the HQV interrupts of Unichrome Pro group A.
+ */
+
+static maskarray_t via_pro_group_a_irqs[] = {
+ {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
+ 0x00000000 },
+ {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
+ 0x00000000 },
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+};
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
+
+static maskarray_t via_unichrome_irqs[] = {
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
+
+
+/*
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
+ */
+#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+ uint32_t mem_addr;
+ uint32_t dev_addr;
+ uint32_t size;
+ uint32_t next;
+} drm_via_descriptor_t;
+
+typedef enum {
+ state_command,
+ state_header2,
+ state_header1,
+ state_vheader5,
+ state_vheader6,
+ state_error
+} verifier_state_t;
+
+typedef enum {
+ no_check = 0,
+ check_for_header2,
+ check_for_header1,
+ check_for_header2_err,
+ check_for_header1_err,
+ check_for_fire,
+ check_z_buffer_addr0,
+ check_z_buffer_addr1,
+ check_z_buffer_addr_mode,
+ check_destination_addr0,
+ check_destination_addr1,
+ check_destination_addr_mode,
+ check_for_dummy,
+ check_for_dd,
+ check_texture_addr0,
+ check_texture_addr1,
+ check_texture_addr2,
+ check_texture_addr3,
+ check_texture_addr4,
+ check_texture_addr5,
+ check_texture_addr6,
+ check_texture_addr7,
+ check_texture_addr8,
+ check_texture_addr_mode,
+ check_for_vertex_count,
+ check_number_texunits,
+ forbidden_command
+} hazard_t;
+
+/*
+ * Associates each hazard above with a possible multi-command
+ * sequence. For example an address that is split over multiple
+ * commands and that needs to be checked at the first command
+ * that does not include any part of the address.
+ */
+
+static drm_via_sequence_t seqs[] = {
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ no_sequence,
+ z_address,
+ z_address,
+ z_address,
+ dest_address,
+ dest_address,
+ dest_address,
+ no_sequence,
+ no_sequence,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ tex_address,
+ no_sequence
+};
+
+typedef struct {
+ unsigned int code;
+ hazard_t hz;
+} hz_init_t;
+
+static hz_init_t init_table1[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xee, check_for_fire},
+ {0xcc, check_for_dummy},
+ {0xdd, check_for_dd},
+ {0x00, no_check},
+ {0x10, check_z_buffer_addr0},
+ {0x11, check_z_buffer_addr1},
+ {0x12, check_z_buffer_addr_mode},
+ {0x13, no_check},
+ {0x14, no_check},
+ {0x15, no_check},
+ {0x23, no_check},
+ {0x24, no_check},
+ {0x33, no_check},
+ {0x34, no_check},
+ {0x35, no_check},
+ {0x36, no_check},
+ {0x37, no_check},
+ {0x38, no_check},
+ {0x39, no_check},
+ {0x3A, no_check},
+ {0x3B, no_check},
+ {0x3C, no_check},
+ {0x3D, no_check},
+ {0x3E, no_check},
+ {0x40, check_destination_addr0},
+ {0x41, check_destination_addr1},
+ {0x42, check_destination_addr_mode},
+ {0x43, no_check},
+ {0x44, no_check},
+ {0x50, no_check},
+ {0x51, no_check},
+ {0x52, no_check},
+ {0x53, no_check},
+ {0x54, no_check},
+ {0x55, no_check},
+ {0x56, no_check},
+ {0x57, no_check},
+ {0x58, no_check},
+ {0x70, no_check},
+ {0x71, no_check},
+ {0x78, no_check},
+ {0x79, no_check},
+ {0x7A, no_check},
+ {0x7B, no_check},
+ {0x7C, no_check},
+ {0x7D, check_for_vertex_count}
+};
+
+static hz_init_t init_table2[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xee, check_for_fire},
+ {0xcc, check_for_dummy},
+ {0x00, check_texture_addr0},
+ {0x01, check_texture_addr0},
+ {0x02, check_texture_addr0},
+ {0x03, check_texture_addr0},
+ {0x04, check_texture_addr0},
+ {0x05, check_texture_addr0},
+ {0x06, check_texture_addr0},
+ {0x07, check_texture_addr0},
+ {0x08, check_texture_addr0},
+ {0x09, check_texture_addr0},
+ {0x20, check_texture_addr1},
+ {0x21, check_texture_addr1},
+ {0x22, check_texture_addr1},
+ {0x23, check_texture_addr4},
+ {0x2B, check_texture_addr3},
+ {0x2C, check_texture_addr3},
+ {0x2D, check_texture_addr3},
+ {0x2E, check_texture_addr3},
+ {0x2F, check_texture_addr3},
+ {0x30, check_texture_addr3},
+ {0x31, check_texture_addr3},
+ {0x32, check_texture_addr3},
+ {0x33, check_texture_addr3},
+ {0x34, check_texture_addr3},
+ {0x4B, check_texture_addr5},
+ {0x4C, check_texture_addr6},
+ {0x51, check_texture_addr7},
+ {0x52, check_texture_addr8},
+ {0x77, check_texture_addr2},
+ {0x78, no_check},
+ {0x79, no_check},
+ {0x7A, no_check},
+ {0x7B, check_texture_addr_mode},
+ {0x7C, no_check},
+ {0x7D, no_check},
+ {0x7E, no_check},
+ {0x7F, no_check},
+ {0x80, no_check},
+ {0x81, no_check},
+ {0x82, no_check},
+ {0x83, no_check},
+ {0x85, no_check},
+ {0x86, no_check},
+ {0x87, no_check},
+ {0x88, no_check},
+ {0x89, no_check},
+ {0x8A, no_check},
+ {0x90, no_check},
+ {0x91, no_check},
+ {0x92, no_check},
+ {0x93, no_check}
+};
+
+static hz_init_t init_table3[] = {
+ {0xf2, check_for_header2_err},
+ {0xf0, check_for_header1_err},
+ {0xcc, check_for_dummy},
+ {0x00, check_number_texunits}
+};
+
+static hazard_t table1[256];
+static hazard_t table2[256];
+static hazard_t table3[256];
+
+static __inline__ int
+eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
+{
+ if ((buf_end - *buf) >= num_words) {
+ *buf += num_words;
+ return 0;
+ }
+ DRM_ERROR("Illegal termination of DMA command buffer\n");
+ return 1;
+}
+
+/*
+ * Partially stolen from drm_memory.h
+ */
+
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
+ unsigned long offset,
+ unsigned long size,
+ struct drm_device *dev)
+{
+ struct drm_map_list *r_list;
+ drm_local_map_t *map = seq->map_cache;
+
+ if (map && map->offset <= offset
+ && (offset + size) <= (map->offset + map->size)) {
+ return map;
+ }
+
+ list_for_each_entry(r_list, &dev->maplist, head) {
+ map = r_list->map;
+ if (!map)
+ continue;
+ if (map->offset <= offset
+ && (offset + size) <= (map->offset + map->size)
+ && !(map->flags & _DRM_RESTRICTED)
+ && (map->type == _DRM_AGP)) {
+ seq->map_cache = map;
+ return map;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Require that all AGP texture levels reside in the same AGP map which should
+ * be mappable by the client. This is not a big restriction.
+ * FIXME: To actually enforce this security policy strictly, drm_rmmap
+ * would have to wait for dma quiescent before removing an AGP map.
+ * The via_drm_lookup_agp_map call in reality seems to take
+ * very little CPU time.
+ */
+
+static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
+{
+ switch (cur_seq->unfinished) {
+ case z_address:
+ DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
+ break;
+ case dest_address:
+ DRM_DEBUG("Destination start address is 0x%x\n",
+ cur_seq->d_addr);
+ break;
+ case tex_address:
+ if (cur_seq->agp_texture) {
+ unsigned start =
+ cur_seq->tex_level_lo[cur_seq->texture];
+ unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
+ unsigned long lo = ~0, hi = 0, tmp;
+ uint32_t *addr, *pitch, *height, tex;
+ unsigned i;
+ int npot;
+
+ if (end > 9)
+ end = 9;
+ if (start > 9)
+ start = 9;
+
+ addr =
+ &(cur_seq->t_addr[tex = cur_seq->texture][start]);
+ pitch = &(cur_seq->pitch[tex][start]);
+ height = &(cur_seq->height[tex][start]);
+ npot = cur_seq->tex_npot[tex];
+ for (i = start; i <= end; ++i) {
+ tmp = *addr++;
+ if (tmp < lo)
+ lo = tmp;
+ if (i == 0 && npot)
+ tmp += (*height++ * *pitch++);
+ else
+ tmp += (*height++ << *pitch++);
+ if (tmp > hi)
+ hi = tmp;
+ }
+
+ if (!via_drm_lookup_agp_map
+ (cur_seq, lo, hi - lo, cur_seq->dev)) {
+ DRM_ERROR
+ ("AGP texture is not in allowed map\n");
+ return 2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ cur_seq->unfinished = no_sequence;
+ return 0;
+}
+
+static __inline__ int
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
+{
+ register uint32_t tmp, *tmp_addr;
+
+ if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
+ int ret;
+ if ((ret = finish_current_sequence(cur_seq)))
+ return ret;
+ }
+
+ switch (hz) {
+ case check_for_header2:
+ if (cmd == HALCYON_HEADER2)
+ return 1;
+ return 0;
+ case check_for_header1:
+ if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ return 1;
+ return 0;
+ case check_for_header2_err:
+ if (cmd == HALCYON_HEADER2)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
+ break;
+ case check_for_header1_err:
+ if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
+ break;
+ case check_for_fire:
+ if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
+ return 1;
+ DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
+ break;
+ case check_for_dummy:
+ if (HC_DUMMY == cmd)
+ return 0;
+ DRM_ERROR("Illegal DMA HC_DUMMY command\n");
+ break;
+ case check_for_dd:
+ if (0xdddddddd == cmd)
+ return 0;
+ DRM_ERROR("Illegal DMA 0xdddddddd command\n");
+ break;
+ case check_z_buffer_addr0:
+ cur_seq->unfinished = z_address;
+ cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
+ (cmd & 0x00FFFFFF);
+ return 0;
+ case check_z_buffer_addr1:
+ cur_seq->unfinished = z_address;
+ cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
+ ((cmd & 0xFF) << 24);
+ return 0;
+ case check_z_buffer_addr_mode:
+ cur_seq->unfinished = z_address;
+ if ((cmd & 0x0000C000) == 0)
+ return 0;
+ DRM_ERROR("Attempt to place Z buffer in system memory\n");
+ return 2;
+ case check_destination_addr0:
+ cur_seq->unfinished = dest_address;
+ cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
+ (cmd & 0x00FFFFFF);
+ return 0;
+ case check_destination_addr1:
+ cur_seq->unfinished = dest_address;
+ cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
+ ((cmd & 0xFF) << 24);
+ return 0;
+ case check_destination_addr_mode:
+ cur_seq->unfinished = dest_address;
+ if ((cmd & 0x0000C000) == 0)
+ return 0;
+ DRM_ERROR
+ ("Attempt to place 3D drawing buffer in system memory\n");
+ return 2;
+ case check_texture_addr0:
+ cur_seq->unfinished = tex_address;
+ tmp = (cmd >> 24);
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+ *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
+ return 0;
+ case check_texture_addr1:
+ cur_seq->unfinished = tex_address;
+ tmp = ((cmd >> 24) - 0x20);
+ tmp += tmp << 1;
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+ tmp_addr++;
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
+ tmp_addr++;
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
+ return 0;
+ case check_texture_addr2:
+ cur_seq->unfinished = tex_address;
+ cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
+ cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
+ return 0;
+ case check_texture_addr3:
+ cur_seq->unfinished = tex_address;
+ tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
+ if (tmp == 0 &&
+ (cmd & HC_HTXnEnPit_MASK)) {
+ cur_seq->pitch[cur_seq->texture][tmp] =
+ (cmd & HC_HTXnLnPit_MASK);
+ cur_seq->tex_npot[cur_seq->texture] = 1;
+ } else {
+ cur_seq->pitch[cur_seq->texture][tmp] =
+ (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
+ cur_seq->tex_npot[cur_seq->texture] = 0;
+ if (cmd & 0x000FFFFF) {
+ DRM_ERROR
+ ("Unimplemented texture level 0 pitch mode.\n");
+ return 2;
+ }
+ }
+ return 0;
+ case check_texture_addr4:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
+ *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+ return 0;
+ case check_texture_addr5:
+ case check_texture_addr6:
+ cur_seq->unfinished = tex_address;
+ /*
+ * Texture width. We don't care since we have the pitch.
+ */
+ return 0;
+ case check_texture_addr7:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+ tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
+ tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
+ tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
+ tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
+ tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
+ tmp_addr[0] = 1 << (cmd & 0x0000000F);
+ return 0;
+ case check_texture_addr8:
+ cur_seq->unfinished = tex_address;
+ tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+ tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
+ tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
+ tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
+ tmp_addr[6] = 1 << (cmd & 0x0000000F);
+ return 0;
+ case check_texture_addr_mode:
+ cur_seq->unfinished = tex_address;
+ if (2 == (tmp = cmd & 0x00000003)) {
+ DRM_ERROR
+ ("Attempt to fetch texture from system memory.\n");
+ return 2;
+ }
+ cur_seq->agp_texture = (tmp == 3);
+ cur_seq->tex_palette_size[cur_seq->texture] =
+ (cmd >> 16) & 0x000000007;
+ return 0;
+ case check_for_vertex_count:
+ cur_seq->vertex_count = cmd & 0x0000FFFF;
+ return 0;
+ case check_number_texunits:
+ cur_seq->multitex = (cmd >> 3) & 1;
+ return 0;
+ default:
+ DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
+ return 2;
+ }
+ return 2;
+}
+
+static __inline__ int
+via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
+ drm_via_state_t *cur_seq)
+{
+ drm_via_private_t *dev_priv =
+ (drm_via_private_t *) cur_seq->dev->dev_private;
+ uint32_t a_fire, bcmd, dw_count;
+ int ret = 0;
+ int have_fire;
+ const uint32_t *buf = *buffer;
+
+ while (buf < buf_end) {
+ have_fire = 0;
+ if ((buf_end - buf) < 2) {
+ DRM_ERROR
+ ("Unexpected termination of primitive list.\n");
+ ret = 1;
+ break;
+ }
+ if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
+ break;
+ bcmd = *buf++;
+ if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
+ DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
+ *buf);
+ ret = 1;
+ break;
+ }
+ a_fire =
+ *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
+ HC_HE3Fire_MASK;
+
+ /*
+ * How many dwords per vertex ?
+ */
+
+ if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
+ DRM_ERROR("Illegal B command vertex data for AGP.\n");
+ ret = 1;
+ break;
+ }
+
+ dw_count = 0;
+ if (bcmd & (1 << 7))
+ dw_count += (cur_seq->multitex) ? 2 : 1;
+ if (bcmd & (1 << 8))
+ dw_count += (cur_seq->multitex) ? 2 : 1;
+ if (bcmd & (1 << 9))
+ dw_count++;
+ if (bcmd & (1 << 10))
+ dw_count++;
+ if (bcmd & (1 << 11))
+ dw_count++;
+ if (bcmd & (1 << 12))
+ dw_count++;
+ if (bcmd & (1 << 13))
+ dw_count++;
+ if (bcmd & (1 << 14))
+ dw_count++;
+
+ while (buf < buf_end) {
+ if (*buf == a_fire) {
+ if (dev_priv->num_fire_offsets >=
+ VIA_FIRE_BUF_SIZE) {
+ DRM_ERROR("Fire offset buffer full.\n");
+ ret = 1;
+ break;
+ }
+ dev_priv->fire_offsets[dev_priv->
+ num_fire_offsets++] =
+ buf;
+ have_fire = 1;
+ buf++;
+ if (buf < buf_end && *buf == a_fire)
+ buf++;
+ break;
+ }
+ if ((*buf == HALCYON_HEADER2) ||
+ ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
+ DRM_ERROR("Missing Vertex Fire command, "
+ "Stray Vertex Fire command or verifier "
+ "lost sync.\n");
+ ret = 1;
+ break;
+ }
+ if ((ret = eat_words(&buf, buf_end, dw_count)))
+ break;
+ }
+ if (buf >= buf_end && !have_fire) {
+ DRM_ERROR("Missing Vertex Fire command or verifier "
+ "lost sync.\n");
+ ret = 1;
+ break;
+ }
+ if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
+ DRM_ERROR("AGP Primitive list end misaligned.\n");
+ ret = 1;
+ break;
+ }
+ }
+ *buffer = buf;
+ return ret;
+}
+
+static __inline__ verifier_state_t
+via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
+ drm_via_state_t *hc_state)
+{
+ uint32_t cmd;
+ int hz_mode;
+ hazard_t hz;
+ const uint32_t *buf = *buffer;
+ const hazard_t *hz_table;
+
+ if ((buf_end - buf) < 2) {
+ DRM_ERROR
+ ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
+ return state_error;
+ }
+ buf++;
+ cmd = (*buf++ & 0xFFFF0000) >> 16;
+
+ switch (cmd) {
+ case HC_ParaType_CmdVdata:
+ if (via_check_prim_list(&buf, buf_end, hc_state))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case HC_ParaType_NotTex:
+ hz_table = table1;
+ break;
+ case HC_ParaType_Tex:
+ hc_state->texture = 0;
+ hz_table = table2;
+ break;
+ case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
+ hc_state->texture = 1;
+ hz_table = table2;
+ break;
+ case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
+ hz_table = table3;
+ break;
+ case HC_ParaType_Auto:
+ if (eat_words(&buf, buf_end, 2))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
+ if (eat_words(&buf, buf_end, 32))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+ case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
+ case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
+ DRM_ERROR("Texture palettes are rejected because of "
+ "lack of info how to determine their size.\n");
+ return state_error;
+ case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
+ DRM_ERROR("Fog factor palettes are rejected because of "
+ "lack of info how to determine their size.\n");
+ return state_error;
+ default:
+
+ /*
+ * There are some unimplemented HC_ParaTypes here, that
+ * need to be implemented if the Mesa driver is extended.
+ */
+
+ DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
+ "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
+ cmd, *(buf - 2));
+ *buffer = buf;
+ return state_error;
+ }
+
+ while (buf < buf_end) {
+ cmd = *buf++;
+ if ((hz = hz_table[cmd >> 24])) {
+ if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
+ if (hz_mode == 1) {
+ buf--;
+ break;
+ }
+ return state_error;
+ }
+ } else if (hc_state->unfinished &&
+ finish_current_sequence(hc_state)) {
+ return state_error;
+ }
+ }
+ if (hc_state->unfinished && finish_current_sequence(hc_state))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end, int *fire_count)
+{
+ uint32_t cmd;
+ const uint32_t *buf = *buffer;
+ const uint32_t *next_fire;
+ int burst = 0;
+
+ next_fire = dev_priv->fire_offsets[*fire_count];
+ buf++;
+ cmd = (*buf & 0xFFFF0000) >> 16;
+ via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+ switch (cmd) {
+ case HC_ParaType_CmdVdata:
+ while ((buf < buf_end) &&
+ (*fire_count < dev_priv->num_fire_offsets) &&
+ (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
+ while (buf <= next_fire) {
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
+ (burst & 63), *buf++);
+ burst += 4;
+ }
+ if ((buf < buf_end)
+ && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
+ buf++;
+
+ if (++(*fire_count) < dev_priv->num_fire_offsets)
+ next_fire = dev_priv->fire_offsets[*fire_count];
+ }
+ break;
+ default:
+ while (buf < buf_end) {
+
+ if (*buf == HC_HEADER2 ||
+ (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
+ (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
+ (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ break;
+
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
+ (burst & 63), *buf++);
+ burst += 4;
+ }
+ }
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ int verify_mmio_address(uint32_t address)
+{
+ if ((address > 0x3FF) && (address < 0xC00)) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access 3D- or command burst area.\n");
+ return 1;
+ } else if ((address > 0xCFF) && (address < 0x1300)) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access PCI DMA area.\n");
+ return 1;
+ } else if (address > 0x13FF) {
+ DRM_ERROR("Invalid VIDEO DMA command. "
+ "Attempt to access VGA registers.\n");
+ return 1;
+ }
+ return 0;
+}
+
+static __inline__ int
+verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
+ uint32_t dwords)
+{
+ const uint32_t *buf = *buffer;
+
+ if (buf_end - buf < dwords) {
+ DRM_ERROR("Illegal termination of video command.\n");
+ return 1;
+ }
+ while (dwords--) {
+ if (*buf++) {
+ DRM_ERROR("Illegal video command tail.\n");
+ return 1;
+ }
+ }
+ *buffer = buf;
+ return 0;
+}
+
+static __inline__ verifier_state_t
+via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
+{
+ uint32_t cmd;
+ const uint32_t *buf = *buffer;
+ verifier_state_t ret = state_command;
+
+ while (buf < buf_end) {
+ cmd = *buf;
+ if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
+ (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+ "Attempt to access 3D- or command burst area.\n");
+ ret = state_error;
+ break;
+ } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+ "Attempt to access VGA registers.\n");
+ ret = state_error;
+ break;
+ } else {
+ buf += 2;
+ }
+ }
+ *buffer = buf;
+ return ret;
+}
+
+static __inline__ verifier_state_t
+via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+ register uint32_t cmd;
+ const uint32_t *buf = *buffer;
+
+ while (buf < buf_end) {
+ cmd = *buf;
+ if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+ break;
+ via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+ buf++;
+ }
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
+{
+ uint32_t data;
+ const uint32_t *buf = *buffer;
+
+ if (buf_end - buf < 4) {
+ DRM_ERROR("Illegal termination of video header5 command\n");
+ return state_error;
+ }
+
+ data = *buf++ & ~VIA_VIDEOMASK;
+ if (verify_mmio_address(data))
+ return state_error;
+
+ data = *buf++;
+ if (*buf++ != 0x00F50000) {
+ DRM_ERROR("Illegal header5 header data\n");
+ return state_error;
+ }
+ if (*buf++ != 0x00000000) {
+ DRM_ERROR("Illegal header5 header data\n");
+ return state_error;
+ }
+ if (eat_words(&buf, buf_end, data))
+ return state_error;
+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+ uint32_t addr, count, i;
+ const uint32_t *buf = *buffer;
+
+ addr = *buf++ & ~VIA_VIDEOMASK;
+ i = count = *buf;
+ buf += 3;
+ while (i--)
+ via_write(dev_priv, addr, *buf++);
+ if (count & 3)
+ buf += 4 - (count & 3);
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
+{
+ uint32_t data;
+ const uint32_t *buf = *buffer;
+ uint32_t i;
+
+ if (buf_end - buf < 4) {
+ DRM_ERROR("Illegal termination of video header6 command\n");
+ return state_error;
+ }
+ buf++;
+ data = *buf++;
+ if (*buf++ != 0x00F60000) {
+ DRM_ERROR("Illegal header6 header data\n");
+ return state_error;
+ }
+ if (*buf++ != 0x00000000) {
+ DRM_ERROR("Illegal header6 header data\n");
+ return state_error;
+ }
+ if ((buf_end - buf) < (data << 1)) {
+ DRM_ERROR("Illegal termination of video header6 command\n");
+ return state_error;
+ }
+ for (i = 0; i < data; ++i) {
+ if (verify_mmio_address(*buf++))
+ return state_error;
+ buf++;
+ }
+ data <<= 1;
+ if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+ return state_error;
+ *buffer = buf;
+ return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
+{
+
+ uint32_t addr, count, i;
+ const uint32_t *buf = *buffer;
+
+ i = count = *++buf;
+ buf += 3;
+ while (i--) {
+ addr = *buf++;
+ via_write(dev_priv, addr, *buf++);
+ }
+ count <<= 1;
+ if (count & 3)
+ buf += 4 - (count & 3);
+ *buffer = buf;
+ return state_command;
+}
+
+static int
+via_verify_command_stream(const uint32_t * buf, unsigned int size,
+ struct drm_device * dev, int agp)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_state_t *hc_state = &dev_priv->hc_state;
+ drm_via_state_t saved_state = *hc_state;
+ uint32_t cmd;
+ const uint32_t *buf_end = buf + (size >> 2);
+ verifier_state_t state = state_command;
+ int cme_video;
+ int supported_3d;
+
+ cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
+ dev_priv->chipset == VIA_DX9_0);
+
+ supported_3d = dev_priv->chipset != VIA_DX9_0;
+
+ hc_state->dev = dev;
+ hc_state->unfinished = no_sequence;
+ hc_state->map_cache = NULL;
+ hc_state->agp = agp;
+ hc_state->buf_start = buf;
+ dev_priv->num_fire_offsets = 0;
+
+ while (buf < buf_end) {
+
+ switch (state) {
+ case state_header2:
+ state = via_check_header2(&buf, buf_end, hc_state);
+ break;
+ case state_header1:
+ state = via_check_header1(&buf, buf_end);
+ break;
+ case state_vheader5:
+ state = via_check_vheader5(&buf, buf_end);
+ break;
+ case state_vheader6:
+ state = via_check_vheader6(&buf, buf_end);
+ break;
+ case state_command:
+ cmd = *buf;
+ if ((cmd == HALCYON_HEADER2) && supported_3d)
+ state = state_header2;
+ else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ state = state_header1;
+ else if (cme_video
+ && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+ state = state_vheader5;
+ else if (cme_video
+ && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ state = state_vheader6;
+ else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
+ DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
+ state = state_error;
+ } else {
+ DRM_ERROR
+ ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+ cmd);
+ state = state_error;
+ }
+ break;
+ case state_error:
+ default:
+ *hc_state = saved_state;
+ return -EINVAL;
+ }
+ }
+ if (state == state_error) {
+ *hc_state = saved_state;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
+ unsigned int size)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ uint32_t cmd;
+ const uint32_t *buf_end = buf + (size >> 2);
+ verifier_state_t state = state_command;
+ int fire_count = 0;
+
+ while (buf < buf_end) {
+
+ switch (state) {
+ case state_header2:
+ state =
+ via_parse_header2(dev_priv, &buf, buf_end,
+ &fire_count);
+ break;
+ case state_header1:
+ state = via_parse_header1(dev_priv, &buf, buf_end);
+ break;
+ case state_vheader5:
+ state = via_parse_vheader5(dev_priv, &buf, buf_end);
+ break;
+ case state_vheader6:
+ state = via_parse_vheader6(dev_priv, &buf, buf_end);
+ break;
+ case state_command:
+ cmd = *buf;
+ if (cmd == HALCYON_HEADER2)
+ state = state_header2;
+ else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+ state = state_header1;
+ else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+ state = state_vheader5;
+ else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+ state = state_vheader6;
+ else {
+ DRM_ERROR
+ ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+ cmd);
+ state = state_error;
+ }
+ break;
+ case state_error:
+ default:
+ return -EINVAL;
+ }
+ }
+ if (state == state_error)
+ return -EINVAL;
+ return 0;
+}
+
+static void
+setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
+{
+ int i;
+
+ for (i = 0; i < 256; ++i)
+ table[i] = forbidden_command;
+
+ for (i = 0; i < size; ++i)
+ table[init_table[i].code] = init_table[i].hz;
+}
+
+static void via_init_command_verifier(void)
+{
+ setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
+ setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
+ setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
+}
+/*
+ * Unmap a DMA mapping.
+ */
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ int num_desc = vsg->num_desc;
+ unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+ unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ dma_addr_t next = vsg->chain_start;
+
+ while (num_desc--) {
+ if (descriptor_this_page-- == 0) {
+ cur_descriptor_page--;
+ descriptor_this_page = vsg->descriptors_per_page - 1;
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ }
+ dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+ dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+ next = (dma_addr_t) desc_ptr->next;
+ desc_ptr--;
+ }
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+ const drm_via_dmablit_t *xfer,
+ drm_via_sg_info_t *vsg,
+ int mode)
+{
+ unsigned cur_descriptor_page = 0;
+ unsigned num_descriptors_this_page = 0;
+ unsigned char *mem_addr = xfer->mem_addr;
+ unsigned char *cur_mem;
+ unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+ uint32_t fb_addr = xfer->fb_addr;
+ uint32_t cur_fb;
+ unsigned long line_len;
+ unsigned remaining_len;
+ int num_desc = 0;
+ int cur_line;
+ dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+ drm_via_descriptor_t *desc_ptr = NULL;
+
+ if (mode == 1)
+ desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+ for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+ line_len = xfer->line_length;
+ cur_fb = fb_addr;
+ cur_mem = mem_addr;
+
+ while (line_len > 0) {
+
+ remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+ line_len -= remaining_len;
+
+ if (mode == 1) {
+ desc_ptr->mem_addr =
+ dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
+ VIA_PFN(first_addr)],
+ VIA_PGOFF(cur_mem), remaining_len,
+ vsg->direction);
+ desc_ptr->dev_addr = cur_fb;
+
+ desc_ptr->size = remaining_len;
+ desc_ptr->next = (uint32_t) next;
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ DMA_TO_DEVICE);
+ desc_ptr++;
+ if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+ num_descriptors_this_page = 0;
+ desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+ }
+ }
+
+ num_desc++;
+ cur_mem += remaining_len;
+ cur_fb += remaining_len;
+ }
+
+ mem_addr += xfer->mem_stride;
+ fb_addr += xfer->fb_stride;
+ }
+
+ if (mode == 1) {
+ vsg->chain_start = next;
+ vsg->state = dr_via_device_mapped;
+ }
+ vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the
+ * blit info has only been partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+static void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ int i;
+
+ switch (vsg->state) {
+ case dr_via_device_mapped:
+ via_unmap_blit_from_device(pdev, vsg);
+ fallthrough;
+ case dr_via_desc_pages_alloc:
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
+ if (vsg->desc_pages[i] != NULL)
+ free_page((unsigned long)vsg->desc_pages[i]);
+ }
+ kfree(vsg->desc_pages);
+ fallthrough;
+ case dr_via_pages_locked:
+ unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE));
+ fallthrough;
+ case dr_via_pages_alloc:
+ vfree(vsg->pages);
+ fallthrough;
+ default:
+ vsg->state = dr_via_sg_init;
+ }
+ vfree(vsg->bounce_buffer);
+ vsg->bounce_buffer = NULL;
+ vsg->free_on_sequence = 0;
+}
+
+/*
+ * Fire a blit engine.
+ */
+static void
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_DMA_CSR_DE);
+ via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+ wmb();
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+ via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ int ret;
+ unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
+ first_pfn + 1;
+
+ vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
+ if (NULL == vsg->pages)
+ return -ENOMEM;
+ ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
+ vsg->num_pages,
+ vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
+ vsg->pages);
+ if (ret != vsg->num_pages) {
+ if (ret < 0)
+ return ret;
+ vsg->state = dr_via_pages_locked;
+ return -EINVAL;
+ }
+ vsg->state = dr_via_pages_locked;
+ DRM_DEBUG("DMA pages locked\n");
+ return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contiguous.
+ */
+static int
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+ int i;
+
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->descriptors_per_page;
+
+ if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
+ return -ENOMEM;
+
+ vsg->state = dr_via_desc_pages_alloc;
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
+ if (NULL == (vsg->desc_pages[i] =
+ (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ }
+ DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+ vsg->num_desc);
+ return 0;
+}
+
+static void
+via_abort_dmablit(struct drm_device *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(struct drm_device *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+static void
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ int cur;
+ int done_transfer;
+ unsigned long irqsave = 0;
+ uint32_t status = 0;
+
+ DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+ engine, from_irq, (unsigned long) blitq);
+
+ if (from_irq)
+ spin_lock(&blitq->blit_lock);
+ else
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ done_transfer = blitq->is_active &&
+ ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
+
+ cur = blitq->cur;
+ if (done_transfer) {
+
+ blitq->blits[cur]->aborted = blitq->aborting;
+ blitq->done_blit_handle++;
+ wake_up(blitq->blit_queue + cur);
+
+ cur++;
+ if (cur >= VIA_NUM_BLIT_SLOTS)
+ cur = 0;
+ blitq->cur = cur;
+
+ /*
+ * Clear transfer done flag.
+ */
+
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ schedule_work(&blitq->wq);
+
+ } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+ /*
+ * Abort transfer after one second.
+ */
+
+ via_abort_dmablit(dev, engine);
+ blitq->aborting = 1;
+ blitq->end = jiffies + HZ;
+ }
+
+ if (!blitq->is_active) {
+ if (blitq->num_outstanding) {
+ via_fire_dmablit(dev, blitq->blits[cur], engine);
+ blitq->is_active = 1;
+ blitq->cur = cur;
+ blitq->num_outstanding--;
+ blitq->end = jiffies + HZ;
+ if (!timer_pending(&blitq->poll_timer))
+ mod_timer(&blitq->poll_timer, jiffies + 1);
+ } else {
+ if (timer_pending(&blitq->poll_timer))
+ del_timer(&blitq->poll_timer);
+ via_dmablit_engine_off(dev, engine);
+ }
+ }
+
+ if (from_irq)
+ spin_unlock(&blitq->blit_lock);
+ else
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+ unsigned long irqsave;
+ uint32_t slot;
+ int active;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ /*
+ * Allow for handle wraparounds.
+ */
+
+ active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+ ((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+ if (queue && active) {
+ slot = handle - blitq->done_blit_handle + blitq->cur - 1;
+ if (slot >= VIA_NUM_BLIT_SLOTS)
+ slot -= VIA_NUM_BLIT_SLOTS;
+ *queue = blitq->blit_queue + slot;
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return active;
+}
+
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+static int
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ wait_queue_head_t *queue;
+ int ret = 0;
+
+ if (via_dmablit_active(blitq, engine, handle, &queue)) {
+ VIA_WAIT_ON(ret, *queue, 3 * HZ,
+ !via_dmablit_active(blitq, engine, handle, NULL));
+ }
+ DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+ handle, engine, ret);
+
+ return ret;
+}
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+static void
+via_dmablit_timer(struct timer_list *t)
+{
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
+ struct drm_device *dev = blitq->dev;
+ int engine = (int)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+ (unsigned long) jiffies);
+
+ via_dmablit_handler(dev, engine, 0);
+
+ if (!timer_pending(&blitq->poll_timer)) {
+ mod_timer(&blitq->poll_timer, jiffies + 1);
+
+ /*
+ * Rerun handler to delete timer if engines are off, and
+ * to shorten abort latency. This is a little nasty.
+ */
+
+ via_dmablit_handler(dev, engine, 0);
+
+ }
+}
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+static void
+via_dmablit_workqueue(struct work_struct *work)
+{
+ drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
+ struct drm_device *dev = blitq->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ unsigned long irqsave;
+ drm_via_sg_info_t *cur_sg;
+ int cur_released;
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ while (blitq->serviced != blitq->cur) {
+
+ cur_released = blitq->serviced++;
+
+ DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ blitq->serviced = 0;
+
+ cur_sg = blitq->blits[cur_released];
+ blitq->num_free++;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ wake_up(&blitq->busy_queue);
+
+ via_free_sg_info(pdev, cur_sg);
+ kfree(cur_sg);
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+static void
+via_init_dmablit(struct drm_device *dev)
+{
+ int i, j;
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ drm_via_blitq_t *blitq;
+
+ pci_set_master(pdev);
+
+ for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
+ blitq = dev_priv->blit_queues + i;
+ blitq->dev = dev;
+ blitq->cur_blit_handle = 0;
+ blitq->done_blit_handle = 0;
+ blitq->head = 0;
+ blitq->cur = 0;
+ blitq->serviced = 0;
+ blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
+ blitq->num_outstanding = 0;
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ spin_lock_init(&blitq->blit_lock);
+ for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
+ init_waitqueue_head(blitq->blit_queue + j);
+ init_waitqueue_head(&blitq->busy_queue);
+ INIT_WORK(&blitq->wq, via_dmablit_workqueue);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
+ }
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+static int
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int draw = xfer->to_fb;
+ int ret = 0;
+
+ vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ vsg->bounce_buffer = NULL;
+
+ vsg->state = dr_via_sg_init;
+
+ if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+ DRM_ERROR("Zero size bitblt.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Below check is a driver limitation, not a hardware one. We
+ * don't want to lock unused pages, and don't want to incoporate the
+ * extra logic of avoiding them. Make sure there are no.
+ * (Not a big limitation anyway.)
+ */
+
+ if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
+ DRM_ERROR("Too large system memory stride. Stride: %d, "
+ "Length: %d\n", xfer->mem_stride, xfer->line_length);
+ return -EINVAL;
+ }
+
+ if ((xfer->mem_stride == xfer->line_length) &&
+ (xfer->fb_stride == xfer->line_length)) {
+ xfer->mem_stride *= xfer->num_lines;
+ xfer->line_length = xfer->mem_stride;
+ xfer->fb_stride = xfer->mem_stride;
+ xfer->num_lines = 1;
+ }
+
+ /*
+ * Don't lock an arbitrary large number of pages, since that causes a
+ * DOS security hole.
+ */
+
+ if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
+ DRM_ERROR("Too large PCI DMA bitblt.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * we allow a negative fb stride to allow flipping of images in
+ * transfer.
+ */
+
+ if (xfer->mem_stride < xfer->line_length ||
+ abs(xfer->fb_stride) < xfer->line_length) {
+ DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * A hardware bug seems to be worked around if system memory addresses start on
+ * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+ * about this. Meanwhile, impose the following restrictions:
+ */
+
+#ifdef VIA_BUGFREE
+ if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+ ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return -EINVAL;
+ }
+#else
+ if ((((unsigned long)xfer->mem_addr & 15) ||
+ ((unsigned long)xfer->fb_addr & 3)) ||
+ ((xfer->num_lines > 1) &&
+ ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return -EINVAL;
+ }
+#endif
+
+ if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+ DRM_ERROR("Could not lock DMA pages.\n");
+ via_free_sg_info(pdev, vsg);
+ return ret;
+ }
+
+ via_map_blit_for_device(pdev, xfer, vsg, 0);
+ if (0 != (ret = via_alloc_desc_pages(vsg))) {
+ DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+ via_free_sg_info(pdev, vsg);
+ return ret;
+ }
+ via_map_blit_for_device(pdev, xfer, vsg, 1);
+
+ return 0;
+}
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+static int
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+ int ret = 0;
+ unsigned long irqsave;
+
+ DRM_DEBUG("Num free is %d\n", blitq->num_free);
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ while (blitq->num_free == 0) {
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
+ if (ret)
+ return (-EINTR == ret) ? -EAGAIN : ret;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ blitq->num_free--;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+static void
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+ unsigned long irqsave;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ blitq->num_free++;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ wake_up(&blitq->busy_queue);
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_sg_info_t *vsg;
+ drm_via_blitq_t *blitq;
+ int ret;
+ int engine;
+ unsigned long irqsave;
+
+ if (dev_priv == NULL) {
+ DRM_ERROR("Called without initialization.\n");
+ return -EINVAL;
+ }
+
+ engine = (xfer->to_fb) ? 0 : 1;
+ blitq = dev_priv->blit_queues + engine;
+ if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
+ return ret;
+ if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+ via_dmablit_release_slot(blitq);
+ return -ENOMEM;
+ }
+ if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+ via_dmablit_release_slot(blitq);
+ kfree(vsg);
+ return ret;
+ }
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ blitq->blits[blitq->head++] = vsg;
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ blitq->head = 0;
+ blitq->num_outstanding++;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ xfer->sync.engine = engine;
+
+ via_dmablit_handler(dev, engine, 0);
+
+ return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered.
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+static int
+via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_blitsync_t *sync = data;
+ int err;
+
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ return -EINVAL;
+
+ err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
+
+ if (-EINTR == err)
+ err = -EAGAIN;
+
+ return err;
+}
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * be reissued. See the above IOCTL code.
+ */
+static int
+via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_dmablit_t *xfer = data;
+ int err;
+
+ err = via_dmablit(dev, xfer);
+
+ return err;
+}
+
+static u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ if (pipe != 0)
+ return 0;
+
+ return atomic_read(&dev_priv->vbl_received);
+}
+
+static irqreturn_t via_driver_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+ int handled = 0;
+ ktime_t cur_vblank;
+ drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ int i;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ if (status & VIA_IRQ_VBLANK_PENDING) {
+ atomic_inc(&dev_priv->vbl_received);
+ if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
+ cur_vblank = ktime_get();
+ if (dev_priv->last_vblank_valid) {
+ dev_priv->nsec_per_vblank =
+ ktime_sub(cur_vblank,
+ dev_priv->last_vblank) >> 4;
+ }
+ dev_priv->last_vblank = cur_vblank;
+ dev_priv->last_vblank_valid = 1;
+ }
+ if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
+ DRM_DEBUG("nsec per vblank is: %llu\n",
+ ktime_to_ns(dev_priv->nsec_per_vblank));
+ }
+ drm_handle_vblank(dev, 0);
+ handled = 1;
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ if (status & cur_irq->pending_mask) {
+ atomic_inc(&cur_irq->irq_received);
+ wake_up(&cur_irq->irq_queue);
+ handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
+ via_dmablit_handler(dev, 0, 1);
+ else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
+ via_dmablit_handler(dev, 1, 1);
+ }
+ cur_irq++;
+ }
+
+ /* Acknowledge interrupts */
+ via_write(dev_priv, VIA_REG_INTERRUPT, status);
+
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
+{
+ u32 status;
+
+ if (dev_priv) {
+ /* Acknowledge interrupts */
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status |
+ dev_priv->irq_pending_mask);
+ }
+}
+
+static int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ u32 status;
+
+ if (pipe != 0) {
+ DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
+ return -EINVAL;
+ }
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
+
+ return 0;
+}
+
+static void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ u32 status;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
+
+ if (pipe != 0)
+ DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
+}
+
+static int
+via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
+ unsigned int *sequence)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ unsigned int cur_irq_sequence;
+ drm_via_irq_t *cur_irq;
+ int ret = 0;
+ maskarray_t *masks;
+ int real_irq;
+
+ DRM_DEBUG("\n");
+
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
+ if (irq >= drm_via_irq_num) {
+ DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
+ return -EINVAL;
+ }
+
+ real_irq = dev_priv->irq_map[irq];
+
+ if (real_irq < 0) {
+ DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+ irq);
+ return -EINVAL;
+ }
+
+ masks = dev_priv->irq_masks;
+ cur_irq = dev_priv->via_irqs + real_irq;
+
+ if (masks[real_irq][2] && !force_sequence) {
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
+ masks[irq][4]));
+ cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+ } else {
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ (((cur_irq_sequence =
+ atomic_read(&cur_irq->irq_received)) -
+ *sequence) <= (1 << 23)));
+ }
+ *sequence = cur_irq_sequence;
+ return ret;
+}
+
+
+/*
+ * drm_dma.h hooks
+ */
+
+static void via_driver_irq_preinstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+ drm_via_irq_t *cur_irq;
+ int i;
+
+ DRM_DEBUG("dev_priv: %p\n", dev_priv);
+ if (dev_priv) {
+ cur_irq = dev_priv->via_irqs;
+
+ dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
+ dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
+
+ if (dev_priv->chipset == VIA_PRO_GROUP_A ||
+ dev_priv->chipset == VIA_DX9_0) {
+ dev_priv->irq_masks = via_pro_group_a_irqs;
+ dev_priv->num_irqs = via_num_pro_group_a;
+ dev_priv->irq_map = via_irqmap_pro_group_a;
+ } else {
+ dev_priv->irq_masks = via_unichrome_irqs;
+ dev_priv->num_irqs = via_num_unichrome;
+ dev_priv->irq_map = via_irqmap_unichrome;
+ }
+
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
+ atomic_set(&cur_irq->irq_received, 0);
+ cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+ cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+ init_waitqueue_head(&cur_irq->irq_queue);
+ dev_priv->irq_enable_mask |= cur_irq->enable_mask;
+ dev_priv->irq_pending_mask |= cur_irq->pending_mask;
+ cur_irq++;
+
+ DRM_DEBUG("Initializing IRQ %d\n", i);
+ }
+
+ dev_priv->last_vblank_valid = 0;
+
+ /* Clear VSync interrupt regs */
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
+ ~(dev_priv->irq_enable_mask));
+
+ /* Clear bits if they're already high */
+ viadrv_acknowledge_irqs(dev_priv);
+ }
+}
+
+static int via_driver_irq_postinstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+
+ DRM_DEBUG("fun: %s\n", __func__);
+ if (!dev_priv)
+ return -EINVAL;
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+ | dev_priv->irq_enable_mask);
+
+ /* Some magic, oh for some data sheets ! */
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
+
+ return 0;
+}
+
+static void via_driver_irq_uninstall(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ u32 status;
+
+ DRM_DEBUG("\n");
+ if (dev_priv) {
+
+ /* Some more magic, oh for some data sheets ! */
+
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
+
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
+ ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
+ }
+}
+
+static int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_irqwait_t *irqwait = data;
+ struct timespec64 now;
+ int ret = 0;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+ int force_sequence;
+
+ if (irqwait->request.irq >= dev_priv->num_irqs) {
+ DRM_ERROR("Trying to wait on unknown irq %d\n",
+ irqwait->request.irq);
+ return -EINVAL;
+ }
+
+ cur_irq += irqwait->request.irq;
+
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+ case VIA_IRQ_RELATIVE:
+ irqwait->request.sequence +=
+ atomic_read(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
+ case VIA_IRQ_ABSOLUTE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (irqwait->request.type & VIA_IRQ_SIGNAL) {
+ DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
+ return -EINVAL;
+ }
+
+ force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
+
+ ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
+ &irqwait->request.sequence);
+ ktime_get_ts64(&now);
+ irqwait->reply.tval_sec = now.tv_sec;
+ irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
+
+ return ret;
+}
+
+static void via_init_futex(drm_via_private_t *dev_priv)
+{
+ unsigned int i;
+
+ DRM_DEBUG("\n");
+
+ for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+ init_waitqueue_head(&(dev_priv->decoder_queue[i]));
+ XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
+ }
+}
+
+static void via_cleanup_futex(drm_via_private_t *dev_priv)
+{
+}
+
+static void via_release_futex(drm_via_private_t *dev_priv, int context)
+{
+ unsigned int i;
+ volatile int *lock;
+
+ if (!dev_priv->sarea_priv)
+ return;
+
+ for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+ lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+ if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
+ if (_DRM_LOCK_IS_HELD(*lock)
+ && (*lock & _DRM_LOCK_CONT)) {
+ wake_up(&(dev_priv->decoder_queue[i]));
+ }
+ *lock = 0;
+ }
+ }
+}
+
+static int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_futex_t *fx = data;
+ volatile int *lock;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
+ int ret = 0;
+
+ DRM_DEBUG("\n");
+
+ if (fx->lock >= VIA_NR_XVMC_LOCKS)
+ return -EFAULT;
+
+ lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
+
+ switch (fx->func) {
+ case VIA_FUTEX_WAIT:
+ VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+ (fx->ms / 10) * (HZ / 100), *lock != fx->val);
+ return ret;
+ case VIA_FUTEX_WAKE:
+ wake_up(&(dev_priv->decoder_queue[fx->lock]));
+ return 0;
+ }
+ return 0;
+}
+
+static int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_agp_t *agp = data;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
+
+ dev_priv->agp_initialized = 1;
+ dev_priv->agp_offset = agp->offset;
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+ return 0;
+}
+
+static int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_fb_t *fb = data;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
+
+ dev_priv->vram_initialized = 1;
+ dev_priv->vram_offset = fb->offset;
+
+ mutex_unlock(&dev->struct_mutex);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+
+ return 0;
+
+}
+
+static int via_final_context(struct drm_device *dev, int context)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ via_release_futex(dev_priv, context);
+
+ /* Linux specific until context tracking code gets ported to BSD */
+ /* Last context, perform cleanup */
+ if (list_is_singular(&dev->ctxlist)) {
+ DRM_DEBUG("Last Context\n");
+ drm_legacy_irq_uninstall(dev);
+ via_cleanup_futex(dev_priv);
+ via_do_cleanup_map(dev);
+ }
+ return 1;
+}
+
+static void via_lastclose(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (!dev_priv)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static int via_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ drm_via_mem_t *mem = data;
+ int retval = 0, user_key;
+ struct via_memblock *item;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
+ unsigned long tmpSize;
+
+ if (mem->type > VIA_MEM_AGP) {
+ DRM_ERROR("Unknown memory type allocation\n");
+ return -EINVAL;
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+ dev_priv->agp_initialized)) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR
+ ("Attempt to allocate from uninitialized memory manager.\n");
+ return -EINVAL;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+ if (mem->type == VIA_MEM_AGP)
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ tmpSize);
+ else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ tmpSize);
+ if (retval)
+ goto fail_alloc;
+
+ retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
+ if (retval < 0)
+ goto fail_idr;
+ user_key = retval;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+ mem->index = user_key;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
+ DRM_DEBUG("Video memory allocation failed\n");
+
+ return retval;
+}
+
+static int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ drm_via_mem_t *mem = data;
+ struct via_memblock *obj;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = idr_find(&dev_priv->object_idr, mem->index);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->index);
+ list_del(&obj->owner_list);
+ drm_mm_remove_node(&obj->mm_node);
+ kfree(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG("free = 0x%lx\n", mem->index);
+
+ return 0;
+}
+
+
+static void via_reclaim_buffers_locked(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+ struct via_memblock *entry, *next;
+
+ if (!(dev->master && file->master->lock.hw_lock))
+ return;
+
+ drm_legacy_idlelock_take(&file->master->lock);
+
+ mutex_lock(&dev->struct_mutex);
+ if (list_empty(&file_priv->obj_list)) {
+ mutex_unlock(&dev->struct_mutex);
+ drm_legacy_idlelock_release(&file->master->lock);
+
+ return;
+ }
+
+ via_driver_dma_quiescent(dev);
+
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ drm_mm_remove_node(&entry->mm_node);
+ kfree(entry);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_legacy_idlelock_release(&file->master->lock);
+
+ return;
+}
+
+static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ dev_priv->sarea = drm_legacy_getsarea(dev);
+ if (!dev_priv->sarea) {
+ DRM_ERROR("could not find sarea!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
+ if (!dev_priv->fb) {
+ DRM_ERROR("could not find framebuffer!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+ dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
+ if (!dev_priv->mmio) {
+ DRM_ERROR("could not find mmio region!\n");
+ dev->dev_private = (void *)dev_priv;
+ via_do_cleanup_map(dev);
+ return -EINVAL;
+ }
+
+ dev_priv->sarea_priv =
+ (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+ init->sarea_priv_offset);
+
+ dev_priv->agpAddr = init->agpAddr;
+
+ via_init_futex(dev_priv);
+
+ via_init_dmablit(dev);
+
+ dev->dev_private = (void *)dev_priv;
+ return 0;
+}
+
+int via_do_cleanup_map(struct drm_device *dev)
+{
+ via_dma_cleanup(dev);
+
+ return 0;
+}
+
+static int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_init_t *init = data;
+
+ DRM_DEBUG("\n");
+
+ switch (init->func) {
+ case VIA_INIT_MAP:
+ return via_do_init_map(dev, init);
+ case VIA_CLEANUP_MAP:
+ return via_do_cleanup_map(dev);
+ }
+
+ return -EINVAL;
+}
+
+static int via_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ drm_via_private_t *dev_priv;
+ int ret = 0;
+
+ dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ idr_init_base(&dev_priv->object_idr, 1);
+ dev->dev_private = (void *)dev_priv;
+
+ dev_priv->chipset = chipset;
+
+ pci_set_master(pdev);
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret) {
+ kfree(dev_priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void via_driver_unload(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ idr_destroy(&dev_priv->object_idr);
+
+ kfree(dev_priv);
+}
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
+
+/*
+ * Free space in command buffer.
+ */
+
+static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+ return ((hw_addr <= dev_priv->dma_low) ?
+ (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
+ (hw_addr - dev_priv->dma_low));
+}
+
+/*
+ * How much does the command regulator lag behind?
+ */
+
+static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+ return ((hw_addr <= dev_priv->dma_low) ?
+ (dev_priv->dma_low - hw_addr) :
+ (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
+}
+
+/*
+ * Check that the given size fits in the buffer, otherwise wait.
+ */
+
+static inline int
+via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
+{
+ uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ uint32_t cur_addr, hw_addr, next_addr;
+ volatile uint32_t *hw_addr_ptr;
+ uint32_t count;
+ hw_addr_ptr = dev_priv->hw_addr_ptr;
+ cur_addr = dev_priv->dma_low;
+ next_addr = cur_addr + size + 512 * 1024;
+ count = 1000000;
+ do {
+ hw_addr = *hw_addr_ptr - agp_base;
+ if (count-- == 0) {
+ DRM_ERROR
+ ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
+ hw_addr, cur_addr, next_addr);
+ return -1;
+ }
+ if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
+ msleep(1);
+ } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
+ return 0;
+}
+
+/*
+ * Checks whether buffer head has reach the end. Rewind the ring buffer
+ * when necessary.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+
+static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
+ unsigned int size)
+{
+ if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
+ dev_priv->dma_high) {
+ via_cmdbuf_rewind(dev_priv);
+ }
+ if (via_cmdbuf_wait(dev_priv, size) != 0)
+ return NULL;
+
+ return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+int via_dma_cleanup(struct drm_device *dev)
+{
+ if (dev->dev_private) {
+ drm_via_private_t *dev_priv =
+ (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start && dev_priv->mmio) {
+ via_cmdbuf_reset(dev_priv);
+
+ drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = NULL;
+ }
+
+ }
+
+ return 0;
+}
+
+static int via_initialize(struct drm_device *dev,
+ drm_via_private_t *dev_priv,
+ drm_via_dma_init_t *init)
+{
+ if (!dev_priv || !dev_priv->mmio) {
+ DRM_ERROR("via_dma_init called before via_map_init\n");
+ return -EFAULT;
+ }
+
+ if (dev_priv->ring.virtual_start != NULL) {
+ DRM_ERROR("called again without calling cleanup\n");
+ return -EFAULT;
+ }
+
+ if (!dev->agp || !dev->agp->base) {
+ DRM_ERROR("called with no agp memory available\n");
+ return -EFAULT;
+ }
+
+ if (dev_priv->chipset == VIA_DX9_0) {
+ DRM_ERROR("AGP DMA is not supported on this chip\n");
+ return -EINVAL;
+ }
+
+ dev_priv->ring.map.offset = dev->agp->base + init->offset;
+ dev_priv->ring.map.size = init->size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_legacy_ioremap(&dev_priv->ring.map, dev);
+
+ if (dev_priv->ring.map.handle == NULL) {
+ via_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+ dev_priv->dma_ptr = dev_priv->ring.virtual_start;
+ dev_priv->dma_low = 0;
+ dev_priv->dma_high = init->size;
+ dev_priv->dma_wrap = init->size;
+ dev_priv->dma_offset = init->offset;
+ dev_priv->last_pause_ptr = NULL;
+ dev_priv->hw_addr_ptr =
+ (volatile uint32_t *)((char *)dev_priv->mmio->handle +
+ init->reg_pause_addr);
+
+ via_cmdbuf_start(dev_priv);
+
+ return 0;
+}
+
+static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_via_dma_init_t *init = data;
+ int retcode = 0;
+
+ switch (init->func) {
+ case VIA_INIT_DMA:
+ if (!capable(CAP_SYS_ADMIN))
+ retcode = -EPERM;
+ else
+ retcode = via_initialize(dev, dev_priv, init);
+ break;
+ case VIA_CLEANUP_DMA:
+ if (!capable(CAP_SYS_ADMIN))
+ retcode = -EPERM;
+ else
+ retcode = via_dma_cleanup(dev);
+ break;
+ case VIA_DMA_INITIALIZED:
+ retcode = (dev_priv->ring.virtual_start != NULL) ?
+ 0 : -EFAULT;
+ break;
+ default:
+ retcode = -EINVAL;
+ break;
+ }
+
+ return retcode;
+}
+
+static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
+{
+ drm_via_private_t *dev_priv;
+ uint32_t *vb;
+ int ret;
+
+ dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
+ return -EFAULT;
+ }
+
+ if (cmd->size > VIA_PCI_BUF_SIZE)
+ return -ENOMEM;
+
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
+ return -EFAULT;
+
+ /*
+ * Running this function on AGP memory is dead slow. Therefore
+ * we run it on a temporary cacheable system memory buffer and
+ * copy it to AGP memory when ready.
+ */
+
+ if ((ret =
+ via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+ cmd->size, dev, 1))) {
+ return ret;
+ }
+
+ vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
+ if (vb == NULL)
+ return -EAGAIN;
+
+ memcpy(vb, dev_priv->pci_buf, cmd->size);
+
+ dev_priv->dma_low += cmd->size;
+
+ /*
+ * Small submissions somehow stalls the CPU. (AGP cache effects?)
+ * pad to greater size.
+ */
+
+ if (cmd->size < 0x100)
+ via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
+ via_cmdbuf_pause(dev_priv);
+
+ return 0;
+}
+
+int via_driver_dma_quiescent(struct drm_device *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ if (!via_wait_idle(dev_priv))
+ return -EBUSY;
+ return 0;
+}
+
+static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ return via_driver_dma_quiescent(dev);
+}
+
+static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuffer_t *cmdbuf = data;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+ ret = via_dispatch_cmdbuffer(dev, cmdbuf);
+ return ret;
+}
+
+static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
+ drm_via_cmdbuffer_t *cmd)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (cmd->size > VIA_PCI_BUF_SIZE)
+ return -ENOMEM;
+ if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
+ return -EFAULT;
+
+ if ((ret =
+ via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+ cmd->size, dev, 0))) {
+ return ret;
+ }
+
+ ret =
+ via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
+ cmd->size);
+ return ret;
+}
+
+static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuffer_t *cmdbuf = data;
+ int ret;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+ ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
+ return ret;
+}
+
+static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
+ uint32_t * vb, int qw_count)
+{
+ for (; qw_count > 0; --qw_count)
+ VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
+ return vb;
+}
+
+/*
+ * This function is used internally by ring buffer management code.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
+{
+ return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+/*
+ * Hooks a segment of data into the tail of the ring-buffer by
+ * modifying the pause address stored in the buffer itself. If
+ * the regulator has already paused, restart it.
+ */
+static int via_hook_segment(drm_via_private_t *dev_priv,
+ uint32_t pause_addr_hi, uint32_t pause_addr_lo,
+ int no_pci_fire)
+{
+ int paused, count;
+ volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
+ uint32_t reader, ptr;
+ uint32_t diff;
+
+ paused = 0;
+ via_flush_write_combine();
+ (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
+
+ *paused_at = pause_addr_lo;
+ via_flush_write_combine();
+ (void) *paused_at;
+
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+ dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
+
+ /*
+ * If there is a possibility that the command reader will
+ * miss the new pause address and pause on the old one,
+ * In that case we need to program the new start address
+ * using PCI.
+ */
+
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ count = 10000000;
+ while (diff == 0 && count--) {
+ paused = (via_read(dev_priv, 0x41c) & 0x80000000);
+ if (paused)
+ break;
+ reader = *(dev_priv->hw_addr_ptr);
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ }
+
+ paused = via_read(dev_priv, 0x41c) & 0x80000000;
+
+ if (paused && !no_pci_fire) {
+ reader = *(dev_priv->hw_addr_ptr);
+ diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+ diff &= (dev_priv->dma_high - 1);
+ if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
+ DRM_ERROR("Paused at incorrect address. "
+ "0x%08x, 0x%08x 0x%08x\n",
+ ptr, reader, dev_priv->dma_diff);
+ } else if (diff == 0) {
+ /*
+ * There is a concern that these writes may stall the PCI bus
+ * if the GPU is not idle. However, idling the GPU first
+ * doesn't make a difference.
+ */
+
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
+ }
+ }
+ return paused;
+}
+
+static int via_wait_idle(drm_via_private_t *dev_priv)
+{
+ int count = 10000000;
+
+ while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+ ;
+
+ while (count && (via_read(dev_priv, VIA_REG_STATUS) &
+ (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
+ VIA_3D_ENG_BUSY)))
+ --count;
+ return count;
+}
+
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
+ uint32_t addr, uint32_t *cmd_addr_hi,
+ uint32_t *cmd_addr_lo, int skip_wait)
+{
+ uint32_t agp_base;
+ uint32_t cmd_addr, addr_lo, addr_hi;
+ uint32_t *vb;
+ uint32_t qw_pad_count;
+
+ if (!skip_wait)
+ via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
+
+ vb = via_get_dma(dev_priv);
+ VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
+ (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+ agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
+ ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
+
+ cmd_addr = (addr) ? addr :
+ agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
+ addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
+ (cmd_addr & HC_HAGPBpL_MASK));
+ addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
+
+ vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
+ VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
+ return vb;
+}
+
+static void via_cmdbuf_start(drm_via_private_t *dev_priv)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+ uint32_t start_addr, start_addr_lo;
+ uint32_t end_addr, end_addr_lo;
+ uint32_t command;
+ uint32_t agp_base;
+ uint32_t ptr;
+ uint32_t reader;
+ int count;
+
+ dev_priv->dma_low = 0;
+
+ agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+ start_addr = agp_base;
+ end_addr = agp_base + dev_priv->dma_high;
+
+ start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
+ end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
+ command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
+ ((end_addr & 0xff000000) >> 16));
+
+ dev_priv->last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
+ &pause_addr_hi, &pause_addr_lo, 1) - 1;
+
+ via_flush_write_combine();
+ (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
+
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, command);
+ via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
+
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ wmb();
+ via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
+
+ dev_priv->dma_diff = 0;
+
+ count = 10000000;
+ while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
+
+ reader = *(dev_priv->hw_addr_ptr);
+ ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
+ dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+ /*
+ * This is the difference between where we tell the
+ * command reader to pause and where it actually pauses.
+ * This differs between hw implementation so we need to
+ * detect it.
+ */
+
+ dev_priv->dma_diff = ptr - reader;
+}
+
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
+{
+ uint32_t *vb;
+
+ via_cmdbuf_wait(dev_priv, qwords + 2);
+ vb = via_get_dma(dev_priv);
+ VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
+ via_align_buffer(dev_priv, vb, qwords);
+}
+
+static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
+{
+ uint32_t *vb = via_get_dma(dev_priv);
+ SetReg2DAGP(0x0C, (0 | (0 << 16)));
+ SetReg2DAGP(0x10, 0 | (0 << 16));
+ SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
+}
+
+static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+ uint32_t jump_addr_lo, jump_addr_hi;
+ volatile uint32_t *last_pause_ptr;
+ uint32_t dma_low_save1, dma_low_save2;
+
+ via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
+ &jump_addr_lo, 0);
+
+ dev_priv->dma_wrap = dev_priv->dma_low;
+
+ /*
+ * Wrap command buffer to the beginning.
+ */
+
+ dev_priv->dma_low = 0;
+ if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
+ DRM_ERROR("via_cmdbuf_jump failed\n");
+
+ via_dummy_bitblt(dev_priv);
+ via_dummy_bitblt(dev_priv);
+
+ last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0) - 1;
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0);
+
+ *last_pause_ptr = pause_addr_lo;
+ dma_low_save1 = dev_priv->dma_low;
+
+ /*
+ * Now, set a trap that will pause the regulator if it tries to rerun the old
+ * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
+ * and reissues the jump command over PCI, while the regulator has already taken the jump
+ * and actually paused at the current buffer end).
+ * There appears to be no other way to detect this condition, since the hw_addr_pointer
+ * does not seem to get updated immediately when a jump occurs.
+ */
+
+ last_pause_ptr =
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0) - 1;
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ &pause_addr_lo, 0);
+ *last_pause_ptr = pause_addr_lo;
+
+ dma_low_save2 = dev_priv->dma_low;
+ dev_priv->dma_low = dma_low_save1;
+ via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
+ dev_priv->dma_low = dma_low_save2;
+ via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_jump(dev_priv);
+}
+
+static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
+{
+ uint32_t pause_addr_lo, pause_addr_hi;
+
+ via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
+ via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
+}
+
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
+{
+ via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
+ via_wait_idle(dev_priv);
+}
+
+/*
+ * User interface to the space and lag functions.
+ */
+
+static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ drm_via_cmdbuf_size_t *d_siz = data;
+ int ret = 0;
+ uint32_t tmp_size, count;
+ drm_via_private_t *dev_priv;
+
+ DRM_DEBUG("\n");
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ dev_priv = (drm_via_private_t *) dev->dev_private;
+
+ if (dev_priv->ring.virtual_start == NULL) {
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
+ return -EFAULT;
+ }
+
+ count = 1000000;
+ tmp_size = d_siz->size;
+ switch (d_siz->func) {
+ case VIA_CMDBUF_SPACE:
+ while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
+ && --count) {
+ if (!d_siz->wait)
+ break;
+ }
+ if (!count) {
+ DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
+ ret = -EAGAIN;
+ }
+ break;
+ case VIA_CMDBUF_LAG:
+ while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
+ && --count) {
+ if (!d_siz->wait)
+ break;
+ }
+ if (!count) {
+ DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
+ ret = -EAGAIN;
+ }
+ break;
+ default:
+ ret = -EFAULT;
+ }
+ d_siz->size = tmp_size;
+
+ return ret;
+}
+
+static const struct drm_ioctl_desc via_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+};
+
+static int via_max_ioctl = ARRAY_SIZE(via_ioctls);
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
+static struct pci_device_id pciidlist[] = {
+ viadrv_PCI_IDS
+};
+
+static const struct file_operations via_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_legacy_mmap,
+ .poll = drm_poll,
+ .compat_ioctl = drm_compat_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
+ .load = via_driver_load,
+ .unload = via_driver_unload,
+ .open = via_driver_open,
+ .preclose = via_reclaim_buffers_locked,
+ .postclose = via_driver_postclose,
+ .context_dtor = via_final_context,
+ .get_vblank_counter = via_get_vblank_counter,
+ .enable_vblank = via_enable_vblank,
+ .disable_vblank = via_disable_vblank,
+ .irq_preinstall = via_driver_irq_preinstall,
+ .irq_postinstall = via_driver_irq_postinstall,
+ .irq_uninstall = via_driver_irq_uninstall,
+ .irq_handler = via_driver_irq_handler,
+ .dma_quiescent = via_driver_dma_quiescent,
+ .lastclose = via_lastclose,
+ .ioctls = via_ioctls,
+ .fops = &via_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static int __init via_init(void)
+{
+ driver.num_ioctls = via_max_ioctl;
+ via_init_command_verifier();
+ return drm_legacy_pci_init(&driver, &via_pci_driver);
+}
+
+static void __exit via_exit(void)
+{
+ drm_legacy_pci_exit(&driver, &via_pci_driver);
+}
+
+module_init(via_init);
+module_exit(via_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
deleted file mode 100644
index 5da38082821f..000000000000
--- a/drivers/gpu/drm/via/via_drv.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-
-static int via_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- INIT_LIST_HEAD(&file_priv->obj_list);
-
- return 0;
-}
-
-static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
-static struct pci_device_id pciidlist[] = {
- viadrv_PCI_IDS
-};
-
-static const struct file_operations via_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
- .load = via_driver_load,
- .unload = via_driver_unload,
- .open = via_driver_open,
- .preclose = via_reclaim_buffers_locked,
- .postclose = via_driver_postclose,
- .context_dtor = via_final_context,
- .get_vblank_counter = via_get_vblank_counter,
- .enable_vblank = via_enable_vblank,
- .disable_vblank = via_disable_vblank,
- .irq_preinstall = via_driver_irq_preinstall,
- .irq_postinstall = via_driver_irq_postinstall,
- .irq_uninstall = via_driver_irq_uninstall,
- .irq_handler = via_driver_irq_handler,
- .dma_quiescent = via_driver_dma_quiescent,
- .lastclose = via_lastclose,
- .ioctls = via_ioctls,
- .fops = &via_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver via_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init via_init(void)
-{
- driver.num_ioctls = via_max_ioctl;
- via_init_command_verifier();
- return drm_legacy_pci_init(&driver, &via_pci_driver);
-}
-
-static void __exit via_exit(void)
-{
- drm_legacy_pci_exit(&driver, &via_pci_driver);
-}
-
-module_init(via_init);
-module_exit(via_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
deleted file mode 100644
index d5ad1b05bf77..000000000000
--- a/drivers/gpu/drm/via/via_drv.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _VIA_DRV_H_
-#define _VIA_DRV_H_
-
-#include <linux/irqreturn.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/sched/signal.h>
-#include <linux/wait.h>
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_mm.h>
-#include <drm/via_drm.h>
-
-#define DRIVER_AUTHOR "Various"
-
-#define DRIVER_NAME "via"
-#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20070202"
-
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 11
-#define DRIVER_PATCHLEVEL 1
-
-#include "via_verifier.h"
-
-#include "via_dmablit.h"
-
-#define VIA_PCI_BUF_SIZE 60000
-#define VIA_FIRE_BUF_SIZE 1024
-#define VIA_NUM_IRQS 4
-
-typedef struct drm_via_ring_buffer {
- drm_local_map_t map;
- char *virtual_start;
-} drm_via_ring_buffer_t;
-
-typedef uint32_t maskarray_t[5];
-
-typedef struct drm_via_irq {
- atomic_t irq_received;
- uint32_t pending_mask;
- uint32_t enable_mask;
- wait_queue_head_t irq_queue;
-} drm_via_irq_t;
-
-typedef struct drm_via_private {
- drm_via_sarea_t *sarea_priv;
- drm_local_map_t *sarea;
- drm_local_map_t *fb;
- drm_local_map_t *mmio;
- unsigned long agpAddr;
- wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
- char *dma_ptr;
- unsigned int dma_low;
- unsigned int dma_high;
- unsigned int dma_offset;
- uint32_t dma_wrap;
- volatile uint32_t *last_pause_ptr;
- volatile uint32_t *hw_addr_ptr;
- drm_via_ring_buffer_t ring;
- ktime_t last_vblank;
- int last_vblank_valid;
- ktime_t nsec_per_vblank;
- atomic_t vbl_received;
- drm_via_state_t hc_state;
- char pci_buf[VIA_PCI_BUF_SIZE];
- const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
- uint32_t num_fire_offsets;
- int chipset;
- drm_via_irq_t via_irqs[VIA_NUM_IRQS];
- unsigned num_irqs;
- maskarray_t *irq_masks;
- uint32_t irq_enable_mask;
- uint32_t irq_pending_mask;
- int *irq_map;
- unsigned int idle_fault;
- int vram_initialized;
- struct drm_mm vram_mm;
- int agp_initialized;
- struct drm_mm agp_mm;
- /** Mapping of userspace keys to mm objects */
- struct idr object_idr;
- unsigned long vram_offset;
- unsigned long agp_offset;
- drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
- uint32_t dma_diff;
-} drm_via_private_t;
-
-struct via_file_private {
- struct list_head obj_list;
-};
-
-enum via_family {
- VIA_OTHER = 0, /* Baseline */
- VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
- VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
-};
-
-/* VIA MMIO register access */
-static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
-{
- return readl((void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8_mask(struct drm_via_private *dev_priv,
- u32 reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
- tmp = (tmp & ~mask) | (val & mask);
- writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-/*
- * Poll in a loop waiting for 'contidition' to be true.
- * Note: A direct replacement with wait_event_interruptible_timeout()
- * will not work unless driver is updated to emit wake_up()
- * in relevant places that can impact the 'condition'
- *
- * Returns:
- * ret keeps current value if 'condition' becomes true
- * ret = -BUSY if timeout happens
- * ret = -EINTR if a signal interrupted the waiting period
- */
-#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
-
-extern const struct drm_ioctl_desc via_ioctls[];
-extern int via_max_ioctl;
-
-extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
-extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
-extern void via_driver_unload(struct drm_device *dev);
-
-extern int via_init_context(struct drm_device *dev, int context);
-extern int via_final_context(struct drm_device *dev, int context);
-
-extern int via_do_cleanup_map(struct drm_device *dev);
-extern u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern int via_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void via_disable_vblank(struct drm_device *dev, unsigned int pipe);
-
-extern irqreturn_t via_driver_irq_handler(int irq, void *arg);
-extern void via_driver_irq_preinstall(struct drm_device *dev);
-extern int via_driver_irq_postinstall(struct drm_device *dev);
-extern void via_driver_irq_uninstall(struct drm_device *dev);
-
-extern int via_dma_cleanup(struct drm_device *dev);
-extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(struct drm_device *dev);
-extern void via_init_futex(drm_via_private_t *dev_priv);
-extern void via_cleanup_futex(drm_via_private_t *dev_priv);
-extern void via_release_futex(drm_via_private_t *dev_priv, int context);
-
-extern void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void via_lastclose(struct drm_device *dev);
-
-extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
-extern void via_init_dmablit(struct drm_device *dev);
-
-#endif
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
deleted file mode 100644
index faeae5d881fb..000000000000
--- a/drivers/gpu/drm/via/via_irq.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/* via_irq.c
- *
- * Copyright 2004 BEAM Ltd.
- * Copyright 2002 Tungsten Graphics, Inc.
- * Copyright 2005 Thomas Hellstrom.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * BEAM LTD, TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Terry Barnaby <terry1@beam.ltd.uk>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Thomas Hellstrom <unichrome@shipmail.org>
- *
- * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
- * interrupt, as well as an infrastructure to handle other interrupts of the chip.
- * The refresh rate is also calculated for video playback sync purposes.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_vblank.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-#define VIA_REG_INTERRUPT 0x200
-
-/* VIA_REG_INTERRUPT */
-#define VIA_IRQ_GLOBAL (1 << 31)
-#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
-#define VIA_IRQ_VBLANK_PENDING (1 << 3)
-#define VIA_IRQ_HQV0_ENABLE (1 << 11)
-#define VIA_IRQ_HQV1_ENABLE (1 << 25)
-#define VIA_IRQ_HQV0_PENDING (1 << 9)
-#define VIA_IRQ_HQV1_PENDING (1 << 10)
-#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
-#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
-#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
-#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
-#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
-#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
-#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
-#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
-
-
-/*
- * Device-specific IRQs go here. This type might need to be extended with
- * the register if there are multiple IRQ control registers.
- * Currently we activate the HQV interrupts of Unichrome Pro group A.
- */
-
-static maskarray_t via_pro_group_a_irqs[] = {
- {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
-};
-static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
-static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-
-static maskarray_t via_unichrome_irqs[] = {
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
-};
-static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
-static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
-
-
-u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (pipe != 0)
- return 0;
-
- return atomic_read(&dev_priv->vbl_received);
-}
-
-irqreturn_t via_driver_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- int handled = 0;
- ktime_t cur_vblank;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int i;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- if (status & VIA_IRQ_VBLANK_PENDING) {
- atomic_inc(&dev_priv->vbl_received);
- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
- cur_vblank = ktime_get();
- if (dev_priv->last_vblank_valid) {
- dev_priv->nsec_per_vblank =
- ktime_sub(cur_vblank,
- dev_priv->last_vblank) >> 4;
- }
- dev_priv->last_vblank = cur_vblank;
- dev_priv->last_vblank_valid = 1;
- }
- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
- DRM_DEBUG("nsec per vblank is: %llu\n",
- ktime_to_ns(dev_priv->nsec_per_vblank));
- }
- drm_handle_vblank(dev, 0);
- handled = 1;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- if (status & cur_irq->pending_mask) {
- atomic_inc(&cur_irq->irq_received);
- wake_up(&cur_irq->irq_queue);
- handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
- via_dmablit_handler(dev, 0, 1);
- else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
- via_dmablit_handler(dev, 1, 1);
- }
- cur_irq++;
- }
-
- /* Acknowledge interrupts */
- via_write(dev_priv, VIA_REG_INTERRUPT, status);
-
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
-{
- u32 status;
-
- if (dev_priv) {
- /* Acknowledge interrupts */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status |
- dev_priv->irq_pending_mask);
- }
-}
-
-int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- if (pipe != 0) {
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
- return -EINVAL;
- }
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- if (pipe != 0)
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
-}
-
-static int
-via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
- unsigned int *sequence)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- unsigned int cur_irq_sequence;
- drm_via_irq_t *cur_irq;
- int ret = 0;
- maskarray_t *masks;
- int real_irq;
-
- DRM_DEBUG("\n");
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (irq >= drm_via_irq_num) {
- DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
- return -EINVAL;
- }
-
- real_irq = dev_priv->irq_map[irq];
-
- if (real_irq < 0) {
- DRM_ERROR("Video IRQ %d not available on this hardware.\n",
- irq);
- return -EINVAL;
- }
-
- masks = dev_priv->irq_masks;
- cur_irq = dev_priv->via_irqs + real_irq;
-
- if (masks[real_irq][2] && !force_sequence) {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
- masks[irq][4]));
- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
- } else {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- (((cur_irq_sequence =
- atomic_read(&cur_irq->irq_received)) -
- *sequence) <= (1 << 23)));
- }
- *sequence = cur_irq_sequence;
- return ret;
-}
-
-
-/*
- * drm_dma.h hooks
- */
-
-void via_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- drm_via_irq_t *cur_irq;
- int i;
-
- DRM_DEBUG("dev_priv: %p\n", dev_priv);
- if (dev_priv) {
- cur_irq = dev_priv->via_irqs;
-
- dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
- dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
-
- if (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0) {
- dev_priv->irq_masks = via_pro_group_a_irqs;
- dev_priv->num_irqs = via_num_pro_group_a;
- dev_priv->irq_map = via_irqmap_pro_group_a;
- } else {
- dev_priv->irq_masks = via_unichrome_irqs;
- dev_priv->num_irqs = via_num_unichrome;
- dev_priv->irq_map = via_irqmap_unichrome;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- atomic_set(&cur_irq->irq_received, 0);
- cur_irq->enable_mask = dev_priv->irq_masks[i][0];
- cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- init_waitqueue_head(&cur_irq->irq_queue);
- dev_priv->irq_enable_mask |= cur_irq->enable_mask;
- dev_priv->irq_pending_mask |= cur_irq->pending_mask;
- cur_irq++;
-
- DRM_DEBUG("Initializing IRQ %d\n", i);
- }
-
- dev_priv->last_vblank_valid = 0;
-
- /* Clear VSync interrupt regs */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(dev_priv->irq_enable_mask));
-
- /* Clear bits if they're already high */
- viadrv_acknowledge_irqs(dev_priv);
- }
-}
-
-int via_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("fun: %s\n", __func__);
- if (!dev_priv)
- return -EINVAL;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
- | dev_priv->irq_enable_mask);
-
- /* Some magic, oh for some data sheets ! */
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-void via_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("\n");
- if (dev_priv) {
-
- /* Some more magic, oh for some data sheets ! */
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
- }
-}
-
-int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_irqwait_t *irqwait = data;
- struct timespec64 now;
- int ret = 0;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int force_sequence;
-
- if (irqwait->request.irq >= dev_priv->num_irqs) {
- DRM_ERROR("Trying to wait on unknown irq %d\n",
- irqwait->request.irq);
- return -EINVAL;
- }
-
- cur_irq += irqwait->request.irq;
-
- switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
- case VIA_IRQ_RELATIVE:
- irqwait->request.sequence +=
- atomic_read(&cur_irq->irq_received);
- irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- break;
- case VIA_IRQ_ABSOLUTE:
- break;
- default:
- return -EINVAL;
- }
-
- if (irqwait->request.type & VIA_IRQ_SIGNAL) {
- DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
- return -EINVAL;
- }
-
- force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
-
- ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
- &irqwait->request.sequence);
- ktime_get_ts64(&now);
- irqwait->reply.tval_sec = now.tv_sec;
- irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
-
- return ret;
-}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
deleted file mode 100644
index a9f6b0c11966..000000000000
--- a/drivers/gpu/drm/via/via_map.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_vblank.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("could not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
- if (!dev_priv->fb) {
- DRM_ERROR("could not find framebuffer!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
- dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("could not find mmio region!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->sarea_priv =
- (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- dev_priv->agpAddr = init->agpAddr;
-
- via_init_futex(dev_priv);
-
- via_init_dmablit(dev);
-
- dev->dev_private = (void *)dev_priv;
- return 0;
-}
-
-int via_do_cleanup_map(struct drm_device *dev)
-{
- via_dma_cleanup(dev);
-
- return 0;
-}
-
-int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_init_t *init = data;
-
- DRM_DEBUG("\n");
-
- switch (init->func) {
- case VIA_INIT_MAP:
- return via_do_init_map(dev, init);
- case VIA_CLEANUP_MAP:
- return via_do_cleanup_map(dev);
- }
-
- return -EINVAL;
-}
-
-int via_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_private_t *dev_priv;
- int ret = 0;
-
- dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- idr_init(&dev_priv->object_idr);
- dev->dev_private = (void *)dev_priv;
-
- dev_priv->chipset = chipset;
-
- pci_set_master(pdev);
-
- ret = drm_vblank_init(dev, 1);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
-
- return 0;
-}
-
-void via_driver_unload(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- idr_destroy(&dev_priv->object_idr);
-
- kfree(dev_priv);
-}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
deleted file mode 100644
index c9afa1a51f23..000000000000
--- a/drivers/gpu/drm/via/via_mm.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/slab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-#define VIA_MM_ALIGN_SHIFT 4
-#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
-
-struct via_memblock {
- struct drm_mm_node mm_node;
- struct list_head owner_list;
-};
-
-int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_agp_t *agp = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp->offset;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
- return 0;
-}
-
-int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_fb_t *fb = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb->offset;
-
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
-
- return 0;
-
-}
-
-int via_final_context(struct drm_device *dev, int context)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- via_release_futex(dev_priv, context);
-
- /* Linux specific until context tracking code gets ported to BSD */
- /* Last context, perform cleanup */
- if (list_is_singular(&dev->ctxlist)) {
- DRM_DEBUG("Last Context\n");
- drm_legacy_irq_uninstall(dev);
- via_cleanup_futex(dev_priv);
- via_do_cleanup_map(dev);
- }
- return 1;
-}
-
-void via_lastclose(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (!dev_priv)
- return;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->vram_initialized) {
- drm_mm_takedown(&dev_priv->vram_mm);
- dev_priv->vram_initialized = 0;
- }
- if (dev_priv->agp_initialized) {
- drm_mm_takedown(&dev_priv->agp_mm);
- dev_priv->agp_initialized = 0;
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- drm_via_mem_t *mem = data;
- int retval = 0, user_key;
- struct via_memblock *item;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct via_file_private *file_priv = file->driver_priv;
- unsigned long tmpSize;
-
- if (mem->type > VIA_MEM_AGP) {
- DRM_ERROR("Unknown memory type allocation\n");
- return -EINVAL;
- }
- mutex_lock(&dev->struct_mutex);
- if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
- dev_priv->agp_initialized)) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR
- ("Attempt to allocate from uninitialized memory manager.\n");
- return -EINVAL;
- }
-
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- retval = -ENOMEM;
- goto fail_alloc;
- }
-
- tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- if (mem->type == VIA_MEM_AGP)
- retval = drm_mm_insert_node(&dev_priv->agp_mm,
- &item->mm_node,
- tmpSize);
- else
- retval = drm_mm_insert_node(&dev_priv->vram_mm,
- &item->mm_node,
- tmpSize);
- if (retval)
- goto fail_alloc;
-
- retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
- if (retval < 0)
- goto fail_idr;
- user_key = retval;
-
- list_add(&item->owner_list, &file_priv->obj_list);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
- mem->index = user_key;
-
- return 0;
-
-fail_idr:
- drm_mm_remove_node(&item->mm_node);
-fail_alloc:
- kfree(item);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
-
- return retval;
-}
-
-int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- drm_via_mem_t *mem = data;
- struct via_memblock *obj;
-
- mutex_lock(&dev->struct_mutex);
- obj = idr_find(&dev_priv->object_idr, mem->index);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- idr_remove(&dev_priv->object_idr, mem->index);
- list_del(&obj->owner_list);
- drm_mm_remove_node(&obj->mm_node);
- kfree(obj);
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("free = 0x%lx\n", mem->index);
-
- return 0;
-}
-
-
-void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
- struct via_memblock *entry, *next;
-
- if (!(dev->master && file->master->lock.hw_lock))
- return;
-
- drm_legacy_idlelock_take(&file->master->lock);
-
- mutex_lock(&dev->struct_mutex);
- if (list_empty(&file_priv->obj_list)) {
- mutex_unlock(&dev->struct_mutex);
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
- }
-
- via_driver_dma_quiescent(dev);
-
- list_for_each_entry_safe(entry, next, &file_priv->obj_list,
- owner_list) {
- list_del(&entry->owner_list);
- drm_mm_remove_node(&entry->mm_node);
- kfree(entry);
- }
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
-}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
deleted file mode 100644
index 3d6e3a70f318..000000000000
--- a/drivers/gpu/drm/via/via_verifier.c
+++ /dev/null
@@ -1,1110 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellstrom 2004, 2005.
- * This code was written using docs obtained under NDA from VIA Inc.
- *
- * Don't run this code directly on an AGP buffer. Due to cache problems it will
- * be very slow.
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-#include <drm/via_drm.h>
-
-#include "via_3d_reg.h"
-#include "via_drv.h"
-#include "via_verifier.h"
-
-typedef enum {
- state_command,
- state_header2,
- state_header1,
- state_vheader5,
- state_vheader6,
- state_error
-} verifier_state_t;
-
-typedef enum {
- no_check = 0,
- check_for_header2,
- check_for_header1,
- check_for_header2_err,
- check_for_header1_err,
- check_for_fire,
- check_z_buffer_addr0,
- check_z_buffer_addr1,
- check_z_buffer_addr_mode,
- check_destination_addr0,
- check_destination_addr1,
- check_destination_addr_mode,
- check_for_dummy,
- check_for_dd,
- check_texture_addr0,
- check_texture_addr1,
- check_texture_addr2,
- check_texture_addr3,
- check_texture_addr4,
- check_texture_addr5,
- check_texture_addr6,
- check_texture_addr7,
- check_texture_addr8,
- check_texture_addr_mode,
- check_for_vertex_count,
- check_number_texunits,
- forbidden_command
-} hazard_t;
-
-/*
- * Associates each hazard above with a possible multi-command
- * sequence. For example an address that is split over multiple
- * commands and that needs to be checked at the first command
- * that does not include any part of the address.
- */
-
-static drm_via_sequence_t seqs[] = {
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- z_address,
- z_address,
- z_address,
- dest_address,
- dest_address,
- dest_address,
- no_sequence,
- no_sequence,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- no_sequence
-};
-
-typedef struct {
- unsigned int code;
- hazard_t hz;
-} hz_init_t;
-
-static hz_init_t init_table1[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0xdd, check_for_dd},
- {0x00, no_check},
- {0x10, check_z_buffer_addr0},
- {0x11, check_z_buffer_addr1},
- {0x12, check_z_buffer_addr_mode},
- {0x13, no_check},
- {0x14, no_check},
- {0x15, no_check},
- {0x23, no_check},
- {0x24, no_check},
- {0x33, no_check},
- {0x34, no_check},
- {0x35, no_check},
- {0x36, no_check},
- {0x37, no_check},
- {0x38, no_check},
- {0x39, no_check},
- {0x3A, no_check},
- {0x3B, no_check},
- {0x3C, no_check},
- {0x3D, no_check},
- {0x3E, no_check},
- {0x40, check_destination_addr0},
- {0x41, check_destination_addr1},
- {0x42, check_destination_addr_mode},
- {0x43, no_check},
- {0x44, no_check},
- {0x50, no_check},
- {0x51, no_check},
- {0x52, no_check},
- {0x53, no_check},
- {0x54, no_check},
- {0x55, no_check},
- {0x56, no_check},
- {0x57, no_check},
- {0x58, no_check},
- {0x70, no_check},
- {0x71, no_check},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, no_check},
- {0x7C, no_check},
- {0x7D, check_for_vertex_count}
-};
-
-static hz_init_t init_table2[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0x00, check_texture_addr0},
- {0x01, check_texture_addr0},
- {0x02, check_texture_addr0},
- {0x03, check_texture_addr0},
- {0x04, check_texture_addr0},
- {0x05, check_texture_addr0},
- {0x06, check_texture_addr0},
- {0x07, check_texture_addr0},
- {0x08, check_texture_addr0},
- {0x09, check_texture_addr0},
- {0x20, check_texture_addr1},
- {0x21, check_texture_addr1},
- {0x22, check_texture_addr1},
- {0x23, check_texture_addr4},
- {0x2B, check_texture_addr3},
- {0x2C, check_texture_addr3},
- {0x2D, check_texture_addr3},
- {0x2E, check_texture_addr3},
- {0x2F, check_texture_addr3},
- {0x30, check_texture_addr3},
- {0x31, check_texture_addr3},
- {0x32, check_texture_addr3},
- {0x33, check_texture_addr3},
- {0x34, check_texture_addr3},
- {0x4B, check_texture_addr5},
- {0x4C, check_texture_addr6},
- {0x51, check_texture_addr7},
- {0x52, check_texture_addr8},
- {0x77, check_texture_addr2},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, check_texture_addr_mode},
- {0x7C, no_check},
- {0x7D, no_check},
- {0x7E, no_check},
- {0x7F, no_check},
- {0x80, no_check},
- {0x81, no_check},
- {0x82, no_check},
- {0x83, no_check},
- {0x85, no_check},
- {0x86, no_check},
- {0x87, no_check},
- {0x88, no_check},
- {0x89, no_check},
- {0x8A, no_check},
- {0x90, no_check},
- {0x91, no_check},
- {0x92, no_check},
- {0x93, no_check}
-};
-
-static hz_init_t init_table3[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xcc, check_for_dummy},
- {0x00, check_number_texunits}
-};
-
-static hazard_t table1[256];
-static hazard_t table2[256];
-static hazard_t table3[256];
-
-static __inline__ int
-eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
-{
- if ((buf_end - *buf) >= num_words) {
- *buf += num_words;
- return 0;
- }
- DRM_ERROR("Illegal termination of DMA command buffer\n");
- return 1;
-}
-
-/*
- * Partially stolen from drm_memory.h
- */
-
-static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
- unsigned long offset,
- unsigned long size,
- struct drm_device *dev)
-{
- struct drm_map_list *r_list;
- drm_local_map_t *map = seq->map_cache;
-
- if (map && map->offset <= offset
- && (offset + size) <= (map->offset + map->size)) {
- return map;
- }
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->offset <= offset
- && (offset + size) <= (map->offset + map->size)
- && !(map->flags & _DRM_RESTRICTED)
- && (map->type == _DRM_AGP)) {
- seq->map_cache = map;
- return map;
- }
- }
- return NULL;
-}
-
-/*
- * Require that all AGP texture levels reside in the same AGP map which should
- * be mappable by the client. This is not a big restriction.
- * FIXME: To actually enforce this security policy strictly, drm_rmmap
- * would have to wait for dma quiescent before removing an AGP map.
- * The via_drm_lookup_agp_map call in reality seems to take
- * very little CPU time.
- */
-
-static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
-{
- switch (cur_seq->unfinished) {
- case z_address:
- DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
- break;
- case dest_address:
- DRM_DEBUG("Destination start address is 0x%x\n",
- cur_seq->d_addr);
- break;
- case tex_address:
- if (cur_seq->agp_texture) {
- unsigned start =
- cur_seq->tex_level_lo[cur_seq->texture];
- unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
- unsigned long lo = ~0, hi = 0, tmp;
- uint32_t *addr, *pitch, *height, tex;
- unsigned i;
- int npot;
-
- if (end > 9)
- end = 9;
- if (start > 9)
- start = 9;
-
- addr =
- &(cur_seq->t_addr[tex = cur_seq->texture][start]);
- pitch = &(cur_seq->pitch[tex][start]);
- height = &(cur_seq->height[tex][start]);
- npot = cur_seq->tex_npot[tex];
- for (i = start; i <= end; ++i) {
- tmp = *addr++;
- if (tmp < lo)
- lo = tmp;
- if (i == 0 && npot)
- tmp += (*height++ * *pitch++);
- else
- tmp += (*height++ << *pitch++);
- if (tmp > hi)
- hi = tmp;
- }
-
- if (!via_drm_lookup_agp_map
- (cur_seq, lo, hi - lo, cur_seq->dev)) {
- DRM_ERROR
- ("AGP texture is not in allowed map\n");
- return 2;
- }
- }
- break;
- default:
- break;
- }
- cur_seq->unfinished = no_sequence;
- return 0;
-}
-
-static __inline__ int
-investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
-{
- register uint32_t tmp, *tmp_addr;
-
- if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
- int ret;
- if ((ret = finish_current_sequence(cur_seq)))
- return ret;
- }
-
- switch (hz) {
- case check_for_header2:
- if (cmd == HALCYON_HEADER2)
- return 1;
- return 0;
- case check_for_header1:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- return 0;
- case check_for_header2_err:
- if (cmd == HALCYON_HEADER2)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
- break;
- case check_for_header1_err:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
- break;
- case check_for_fire:
- if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
- break;
- case check_for_dummy:
- if (HC_DUMMY == cmd)
- return 0;
- DRM_ERROR("Illegal DMA HC_DUMMY command\n");
- break;
- case check_for_dd:
- if (0xdddddddd == cmd)
- return 0;
- DRM_ERROR("Illegal DMA 0xdddddddd command\n");
- break;
- case check_z_buffer_addr0:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_z_buffer_addr1:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_z_buffer_addr_mode:
- cur_seq->unfinished = z_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR("Attempt to place Z buffer in system memory\n");
- return 2;
- case check_destination_addr0:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_destination_addr1:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_destination_addr_mode:
- cur_seq->unfinished = dest_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR
- ("Attempt to place 3D drawing buffer in system memory\n");
- return 2;
- case check_texture_addr0:
- cur_seq->unfinished = tex_address;
- tmp = (cmd >> 24);
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
- return 0;
- case check_texture_addr1:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - 0x20);
- tmp += tmp << 1;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
- return 0;
- case check_texture_addr2:
- cur_seq->unfinished = tex_address;
- cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
- cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
- return 0;
- case check_texture_addr3:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
- if (tmp == 0 &&
- (cmd & HC_HTXnEnPit_MASK)) {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPit_MASK);
- cur_seq->tex_npot[cur_seq->texture] = 1;
- } else {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
- cur_seq->tex_npot[cur_seq->texture] = 0;
- if (cmd & 0x000FFFFF) {
- DRM_ERROR
- ("Unimplemented texture level 0 pitch mode.\n");
- return 2;
- }
- }
- return 0;
- case check_texture_addr4:
- cur_seq->unfinished = tex_address;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- return 0;
- case check_texture_addr5:
- case check_texture_addr6:
- cur_seq->unfinished = tex_address;
- /*
- * Texture width. We don't care since we have the pitch.
- */
- return 0;
- case check_texture_addr7:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
- tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
- tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[0] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr8:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[6] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr_mode:
- cur_seq->unfinished = tex_address;
- if (2 == (tmp = cmd & 0x00000003)) {
- DRM_ERROR
- ("Attempt to fetch texture from system memory.\n");
- return 2;
- }
- cur_seq->agp_texture = (tmp == 3);
- cur_seq->tex_palette_size[cur_seq->texture] =
- (cmd >> 16) & 0x000000007;
- return 0;
- case check_for_vertex_count:
- cur_seq->vertex_count = cmd & 0x0000FFFF;
- return 0;
- case check_number_texunits:
- cur_seq->multitex = (cmd >> 3) & 1;
- return 0;
- default:
- DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
- return 2;
- }
- return 2;
-}
-
-static __inline__ int
-via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t *cur_seq)
-{
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) cur_seq->dev->dev_private;
- uint32_t a_fire, bcmd, dw_count;
- int ret = 0;
- int have_fire;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- have_fire = 0;
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Unexpected termination of primitive list.\n");
- ret = 1;
- break;
- }
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
- break;
- bcmd = *buf++;
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
- DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
- *buf);
- ret = 1;
- break;
- }
- a_fire =
- *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
- HC_HE3Fire_MASK;
-
- /*
- * How many dwords per vertex ?
- */
-
- if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
- DRM_ERROR("Illegal B command vertex data for AGP.\n");
- ret = 1;
- break;
- }
-
- dw_count = 0;
- if (bcmd & (1 << 7))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 8))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 9))
- dw_count++;
- if (bcmd & (1 << 10))
- dw_count++;
- if (bcmd & (1 << 11))
- dw_count++;
- if (bcmd & (1 << 12))
- dw_count++;
- if (bcmd & (1 << 13))
- dw_count++;
- if (bcmd & (1 << 14))
- dw_count++;
-
- while (buf < buf_end) {
- if (*buf == a_fire) {
- if (dev_priv->num_fire_offsets >=
- VIA_FIRE_BUF_SIZE) {
- DRM_ERROR("Fire offset buffer full.\n");
- ret = 1;
- break;
- }
- dev_priv->fire_offsets[dev_priv->
- num_fire_offsets++] =
- buf;
- have_fire = 1;
- buf++;
- if (buf < buf_end && *buf == a_fire)
- buf++;
- break;
- }
- if ((*buf == HALCYON_HEADER2) ||
- ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
- DRM_ERROR("Missing Vertex Fire command, "
- "Stray Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if ((ret = eat_words(&buf, buf_end, dw_count)))
- break;
- }
- if (buf >= buf_end && !have_fire) {
- DRM_ERROR("Missing Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
- DRM_ERROR("AGP Primitive list end misaligned.\n");
- ret = 1;
- break;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
- drm_via_state_t *hc_state)
-{
- uint32_t cmd;
- int hz_mode;
- hazard_t hz;
- const uint32_t *buf = *buffer;
- const hazard_t *hz_table;
-
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
- return state_error;
- }
- buf++;
- cmd = (*buf++ & 0xFFFF0000) >> 16;
-
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- if (via_check_prim_list(&buf, buf_end, hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
- case HC_ParaType_NotTex:
- hz_table = table1;
- break;
- case HC_ParaType_Tex:
- hc_state->texture = 0;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
- hc_state->texture = 1;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
- hz_table = table3;
- break;
- case HC_ParaType_Auto:
- if (eat_words(&buf, buf_end, 2))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
- if (eat_words(&buf, buf_end, 32))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
- case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
- DRM_ERROR("Texture palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
- DRM_ERROR("Fog factor palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- default:
-
- /*
- * There are some unimplemented HC_ParaTypes here, that
- * need to be implemented if the Mesa driver is extended.
- */
-
- DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
- "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
- cmd, *(buf - 2));
- *buffer = buf;
- return state_error;
- }
-
- while (buf < buf_end) {
- cmd = *buf++;
- if ((hz = hz_table[cmd >> 24])) {
- if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
- if (hz_mode == 1) {
- buf--;
- break;
- }
- return state_error;
- }
- } else if (hc_state->unfinished &&
- finish_current_sequence(hc_state)) {
- return state_error;
- }
- }
- if (hc_state->unfinished && finish_current_sequence(hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end, int *fire_count)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- const uint32_t *next_fire;
- int burst = 0;
-
- next_fire = dev_priv->fire_offsets[*fire_count];
- buf++;
- cmd = (*buf & 0xFFFF0000) >> 16;
- via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- while ((buf < buf_end) &&
- (*fire_count < dev_priv->num_fire_offsets) &&
- (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
- while (buf <= next_fire) {
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- if ((buf < buf_end)
- && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
- buf++;
-
- if (++(*fire_count) < dev_priv->num_fire_offsets)
- next_fire = dev_priv->fire_offsets[*fire_count];
- }
- break;
- default:
- while (buf < buf_end) {
-
- if (*buf == HC_HEADER2 ||
- (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- break;
-
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ int verify_mmio_address(uint32_t address)
-{
- if ((address > 0x3FF) && (address < 0xC00)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access 3D- or command burst area.\n");
- return 1;
- } else if ((address > 0xCFF) && (address < 0x1300)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access PCI DMA area.\n");
- return 1;
- } else if (address > 0x13FF) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access VGA registers.\n");
- return 1;
- }
- return 0;
-}
-
-static __inline__ int
-verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
- uint32_t dwords)
-{
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < dwords) {
- DRM_ERROR("Illegal termination of video command.\n");
- return 1;
- }
- while (dwords--) {
- if (*buf++) {
- DRM_ERROR("Illegal video command tail.\n");
- return 1;
- }
- }
- *buffer = buf;
- return 0;
-}
-
-static __inline__ verifier_state_t
-via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- verifier_state_t ret = state_command;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
- (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access 3D- or command burst area.\n");
- ret = state_error;
- break;
- } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access VGA registers.\n");
- ret = state_error;
- break;
- } else {
- buf += 2;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- register uint32_t cmd;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
- buf++;
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header5 command\n");
- return state_error;
- }
-
- data = *buf++ & ~VIA_VIDEOMASK;
- if (verify_mmio_address(data))
- return state_error;
-
- data = *buf++;
- if (*buf++ != 0x00F50000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (eat_words(&buf, buf_end, data))
- return state_error;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- addr = *buf++ & ~VIA_VIDEOMASK;
- i = count = *buf;
- buf += 3;
- while (i--)
- via_write(dev_priv, addr, *buf++);
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
- uint32_t i;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- buf++;
- data = *buf++;
- if (*buf++ != 0x00F60000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if ((buf_end - buf) < (data << 1)) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- for (i = 0; i < data; ++i) {
- if (verify_mmio_address(*buf++))
- return state_error;
- buf++;
- }
- data <<= 1;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
-
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- i = count = *++buf;
- buf += 3;
- while (i--) {
- addr = *buf++;
- via_write(dev_priv, addr, *buf++);
- }
- count <<= 1;
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-int
-via_verify_command_stream(const uint32_t * buf, unsigned int size,
- struct drm_device * dev, int agp)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_state_t *hc_state = &dev_priv->hc_state;
- drm_via_state_t saved_state = *hc_state;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int cme_video;
- int supported_3d;
-
- cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0);
-
- supported_3d = dev_priv->chipset != VIA_DX9_0;
-
- hc_state->dev = dev;
- hc_state->unfinished = no_sequence;
- hc_state->map_cache = NULL;
- hc_state->agp = agp;
- hc_state->buf_start = buf;
- dev_priv->num_fire_offsets = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state = via_check_header2(&buf, buf_end, hc_state);
- break;
- case state_header1:
- state = via_check_header1(&buf, buf_end);
- break;
- case state_vheader5:
- state = via_check_vheader5(&buf, buf_end);
- break;
- case state_vheader6:
- state = via_check_vheader6(&buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if ((cmd == HALCYON_HEADER2) && supported_3d)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
- DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
- state = state_error;
- } else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- *hc_state = saved_state;
- return -EINVAL;
- }
- }
- if (state == state_error) {
- *hc_state = saved_state;
- return -EINVAL;
- }
- return 0;
-}
-
-int
-via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
- unsigned int size)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int fire_count = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state =
- via_parse_header2(dev_priv, &buf, buf_end,
- &fire_count);
- break;
- case state_header1:
- state = via_parse_header1(dev_priv, &buf, buf_end);
- break;
- case state_vheader5:
- state = via_parse_vheader5(dev_priv, &buf, buf_end);
- break;
- case state_vheader6:
- state = via_parse_vheader6(dev_priv, &buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if (cmd == HALCYON_HEADER2)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- return -EINVAL;
- }
- }
- if (state == state_error)
- return -EINVAL;
- return 0;
-}
-
-static void
-setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
-{
- int i;
-
- for (i = 0; i < 256; ++i)
- table[i] = forbidden_command;
-
- for (i = 0; i < size; ++i)
- table[init_table[i].code] = init_table[i].hz;
-}
-
-void via_init_command_verifier(void)
-{
- setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
- setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
- setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
-}
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
deleted file mode 100644
index 26b6d361ab95..000000000000
--- a/drivers/gpu/drm/via/via_verifier.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2004 The Unichrome Project. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellström 2004.
- */
-
-#ifndef _VIA_VERIFIER_H_
-#define _VIA_VERIFIER_H_
-
-typedef enum {
- no_sequence = 0,
- z_address,
- dest_address,
- tex_address
-} drm_via_sequence_t;
-
-typedef struct {
- unsigned texture;
- uint32_t z_addr;
- uint32_t d_addr;
- uint32_t t_addr[2][10];
- uint32_t pitch[2][10];
- uint32_t height[2][10];
- uint32_t tex_level_lo[2];
- uint32_t tex_level_hi[2];
- uint32_t tex_palette_size[2];
- uint32_t tex_npot[2];
- drm_via_sequence_t unfinished;
- int agp_texture;
- int multitex;
- struct drm_device *dev;
- drm_local_map_t *map_cache;
- uint32_t vertex_count;
- int agp;
- const uint32_t *buf_start;
-} drm_via_state_t;
-
-extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
- struct drm_device *dev, int agp);
-extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
- unsigned int size);
-
-#endif
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
deleted file mode 100644
index 53b1f58f99b4..000000000000
--- a/drivers/gpu/drm/via/via_video.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Thomas Hellstrom 2005.
- *
- * Video and XvMC related functions.
- */
-
-#include <drm/drm_device.h>
-#include <drm/via_drm.h>
-
-#include "via_drv.h"
-
-void via_init_futex(drm_via_private_t *dev_priv)
-{
- unsigned int i;
-
- DRM_DEBUG("\n");
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- init_waitqueue_head(&(dev_priv->decoder_queue[i]));
- XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
- }
-}
-
-void via_cleanup_futex(drm_via_private_t *dev_priv)
-{
-}
-
-void via_release_futex(drm_via_private_t *dev_priv, int context)
-{
- unsigned int i;
- volatile int *lock;
-
- if (!dev_priv->sarea_priv)
- return;
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
- if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
- if (_DRM_LOCK_IS_HELD(*lock)
- && (*lock & _DRM_LOCK_CONT)) {
- wake_up(&(dev_priv->decoder_queue[i]));
- }
- *lock = 0;
- }
- }
-}
-
-int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_futex_t *fx = data;
- volatile int *lock;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
- int ret = 0;
-
- DRM_DEBUG("\n");
-
- if (fx->lock >= VIA_NR_XVMC_LOCKS)
- return -EFAULT;
-
- lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
-
- switch (fx->func) {
- case VIA_FUTEX_WAIT:
- VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
- (fx->ms / 10) * (HZ / 100), *lock != fx->val);
- return ret;
- case VIA_FUTEX_WAKE:
- wake_up(&(dev_priv->decoder_queue[fx->lock]));
- return 0;
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 5c7f198c0712..9ea7611a9e0f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -349,6 +349,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
vgdev->ddev->mode_config.max_width = XRES_MAX;
vgdev->ddev->mode_config.max_height = YRES_MAX;
+ vgdev->ddev->mode_config.fb_modifiers_not_supported = true;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 5f25a8d15464..0035affc3e59 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -46,12 +46,11 @@ static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
-static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
- char unique[20];
int ret;
DRM_INFO("pci: %s detected at %s\n",
@@ -63,39 +62,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
return ret;
}
- /*
- * Normally the drm_dev_set_unique() call is done by core DRM.
- * The following comment covers, why virtio cannot rely on it.
- *
- * Unlike the other virtual GPU drivers, virtio abstracts the
- * underlying bus type by using struct virtio_device.
- *
- * Hence the dev_is_pci() check, used in core DRM, will fail
- * and the unique returned will be the virtio_device "virtio0",
- * while a "pci:..." one is required.
- *
- * A few other ideas were considered:
- * - Extend the dev_is_pci() check [in drm_set_busid] to
- * consider virtio.
- * Seems like a bigger hack than what we have already.
- *
- * - Point drm_device::dev to the parent of the virtio_device
- * Semantic changes:
- * * Using the wrong device for i2c, framebuffer_alloc and
- * prime import.
- * Visual changes:
- * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
- * will print the wrong information.
- *
- * We could address the latter issues, by introducing
- * drm_device::bus_dev, ... which would be used solely for this.
- *
- * So for the moment keep things as-is, with a bulky comment
- * for the next person who feels like removing this
- * drm_dev_set_unique() quirk.
- */
- snprintf(unique, sizeof(unique), "pci:%s", pname);
- return drm_dev_set_unique(dev, unique);
+ return 0;
}
static int virtio_gpu_probe(struct virtio_device *vdev)
@@ -109,18 +76,24 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (virtio_gpu_modeset == 0)
return -EINVAL;
- dev = drm_dev_alloc(&driver, &vdev->dev);
+ /*
+ * The virtio-gpu device is a virtual device that doesn't have DMA
+ * ops assigned to it, nor DMA mask set and etc. Its parent device
+ * is actual GPU device we want to use it for the DRM's device in
+ * order to benefit from using generic DRM APIs.
+ */
+ dev = drm_dev_alloc(&driver, vdev->dev.parent);
if (IS_ERR(dev))
return PTR_ERR(dev);
vdev->priv = dev;
- if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
- ret = virtio_gpu_pci_quirk(dev, vdev);
+ if (dev_is_pci(vdev->dev.parent)) {
+ ret = virtio_gpu_pci_quirk(dev);
if (ret)
goto err_free;
}
- ret = virtio_gpu_init(dev);
+ ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f80664cf98d0..9b98470593b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -101,8 +101,6 @@ struct virtio_gpu_object {
struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
- struct sg_table *pages;
- uint32_t mapped;
};
struct virtio_gpu_object_vram {
@@ -215,7 +213,6 @@ struct virtio_gpu_drv_cap_cache {
};
struct virtio_gpu_device {
- struct device *dev;
struct drm_device *ddev;
struct virtio_device *vdev;
@@ -283,7 +280,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
/* virtgpu_kms.c */
-int virtio_gpu_init(struct drm_device *dev);
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 580a78809836..7db48d17ee3a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -228,8 +228,10 @@ int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs)
for (i = 0; i < objs->nents; ++i) {
ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1);
- if (ret)
+ if (ret) {
+ virtio_gpu_array_unlock_resv(objs);
return ret;
+ }
}
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9b2702116f93..5d05093014ac 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -47,7 +47,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct virtio_gpu_fence_event *e = NULL;
int ret;
- if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
return 0;
e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -168,7 +168,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
* array contains any fence from a foreign context.
*/
ret = 0;
- if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+ if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
ret = dma_fence_wait(in_fence, true);
dma_fence_put(in_fence);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 3313b92db531..27b7f14dae89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -28,6 +28,7 @@
#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include "virtgpu_drv.h"
@@ -66,10 +67,11 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
{
int i, ret;
bool invalid_capset_id = false;
+ struct drm_device *drm = vgdev->ddev;
- vgdev->capsets = kcalloc(num_capsets,
- sizeof(struct virtio_gpu_drv_capset),
- GFP_KERNEL);
+ vgdev->capsets = drmm_kcalloc(drm, num_capsets,
+ sizeof(struct virtio_gpu_drv_capset),
+ GFP_KERNEL);
if (!vgdev->capsets) {
DRM_ERROR("failed to allocate cap sets\n");
return;
@@ -94,7 +96,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
- kfree(vgdev->capsets);
+ drmm_kfree(drm, vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
@@ -110,7 +112,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->num_capsets = num_capsets;
}
-int virtio_gpu_init(struct drm_device *dev)
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
@@ -123,17 +125,16 @@ int virtio_gpu_init(struct drm_device *dev)
u32 num_scanouts, num_capsets;
int ret = 0;
- if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
- vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
+ vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL);
if (!vgdev)
return -ENOMEM;
vgdev->ddev = dev;
dev->dev_private = vgdev;
- vgdev->vdev = dev_to_virtio(dev->dev);
- vgdev->dev = dev->dev;
+ vgdev->vdev = vdev;
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
@@ -257,7 +258,6 @@ err_vbufs:
vgdev->vdev->config->del_vqs(vgdev->vdev);
err_vqs:
dev->dev_private = NULL;
- kfree(vgdev);
return ret;
}
@@ -296,9 +296,6 @@ void virtio_gpu_release(struct drm_device *dev)
if (vgdev->has_host_visible)
drm_mm_takedown(&vgdev->host_visible_mm);
-
- kfree(vgdev->capsets);
- kfree(vgdev);
}
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 1cc8f3fc8e4b..8d7728181de0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -67,21 +67,6 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
if (virtio_gpu_is_shmem(bo)) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
-
- if (shmem->pages) {
- if (shmem->mapped) {
- dma_unmap_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- shmem->mapped = 0;
- }
-
- sg_free_table(shmem->pages);
- kfree(shmem->pages);
- shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base);
- }
-
drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
@@ -153,35 +138,18 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
- int si, ret;
+ struct sg_table *pages;
+ int si;
- ret = drm_gem_shmem_pin(&bo->base);
- if (ret < 0)
- return -EINVAL;
-
- /*
- * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
- * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
- * dma-ops. This is discouraged for other drivers, but should be fine
- * since virtio_gpu doesn't support dma-buf import from other devices.
- */
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
- if (IS_ERR(shmem->pages)) {
- drm_gem_shmem_unpin(&bo->base);
- return PTR_ERR(shmem->pages);
- }
+ pages = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- if (use_dma_api) {
- ret = dma_map_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- if (ret)
- return ret;
- *nents = shmem->mapped = shmem->pages->nents;
- } else {
- *nents = shmem->pages->orig_nents;
- }
+ if (use_dma_api)
+ *nents = pages->nents;
+ else
+ *nents = pages->orig_nents;
*ents = kvmalloc_array(*nents,
sizeof(struct virtio_gpu_mem_entry),
@@ -192,13 +160,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
if (use_dma_api) {
- for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+ for_each_sgtable_dma_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
(*ents)[si].padding = 0;
}
} else {
- for_each_sgtable_sg(shmem->pages, sg, si) {
+ for_each_sgtable_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
@@ -234,6 +202,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo->dumb = params->dumb;
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0)
+ goto err_put_id;
+
if (fence) {
ret = -ENOMEM;
objs = virtio_gpu_array_alloc(1);
@@ -246,13 +218,6 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
goto err_put_objs;
}
- ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
- if (ret != 0) {
- virtio_gpu_array_put_free(objs);
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
-
if (params->blob) {
if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
bo->guest_blob = true;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6d3cc9e238a4..4c09e313bebc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -26,7 +26,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include "virtgpu_drv.h"
@@ -67,16 +66,9 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
return format;
}
-static void virtio_gpu_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_cleanup(plane);
- kfree(plane);
-}
-
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = virtio_gpu_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -100,8 +92,8 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
is_cursor, true);
return ret;
}
@@ -266,14 +258,14 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
}
static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_plane_state *state)
{
struct virtio_gpu_framebuffer *vgfb;
- if (!plane->state->fb)
+ if (!state->fb)
return;
- vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ vgfb = to_virtio_gpu_framebuffer(state->fb);
if (vgfb->fence) {
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
@@ -379,11 +371,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
const struct drm_plane_helper_funcs *funcs;
struct drm_plane *plane;
const uint32_t *formats;
- int ret, nformats;
-
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
+ int nformats;
if (type == DRM_PLANE_TYPE_CURSOR) {
formats = virtio_gpu_cursor_formats;
@@ -394,17 +382,13 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
nformats = ARRAY_SIZE(virtio_gpu_formats);
funcs = &virtio_gpu_primary_helper_funcs;
}
- ret = drm_universal_plane_init(dev, plane, 1 << index,
- &virtio_gpu_plane_funcs,
- formats, nformats,
- NULL, type, NULL);
- if (ret)
- goto err_plane_init;
+
+ plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev,
+ 1 << index, &virtio_gpu_plane_funcs,
+ formats, nformats, NULL, type, NULL);
+ if (IS_ERR(plane))
+ return plane;
drm_plane_helper_add(plane, funcs);
return plane;
-
-err_plane_init:
- kfree(plane);
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index b7529b2b9883..9ff8660b50ad 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -322,7 +322,7 @@ static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
free_vbuf(vgdev, vbuf);
- return -1;
+ return -ENODEV;
}
if (vgdev->has_indirect)
@@ -386,7 +386,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (!sgt) {
if (fence && vbuf->objs)
virtio_gpu_array_unlock_resv(vbuf->objs);
- return -1;
+ return -ENOMEM;
}
elemcnt += sg_ents;
@@ -595,11 +595,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -721,7 +720,7 @@ static int virtio_get_edid_block(void *data, u8 *buf,
size_t start = block * EDID_LENGTH;
if (start + len > le32_to_cpu(resp->size))
- return -1;
+ return -EINVAL;
memcpy(buf, resp->edid + start, len);
return 0;
}
@@ -1019,11 +1018,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- if (virtio_gpu_is_shmem(bo) && use_dma_api) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
- }
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 72f779cbfedd..1b28a6a32948 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -3,6 +3,7 @@ vkms-y := \
vkms_drv.o \
vkms_plane.o \
vkms_output.o \
+ vkms_formats.o \
vkms_crtc.o \
vkms_composer.o \
vkms_writeback.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 775b97766e08..8e53fa80742b 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -7,203 +7,185 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
+#include <linux/minmax.h>
#include "vkms_drv.h"
-static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer,
- const struct vkms_composer *composer)
+static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
{
- u32 pixel;
- int src_offset = composer->offset + (y * composer->pitch)
- + (x * composer->cpp);
+ u32 new_color;
- pixel = *(u32 *)&buffer[src_offset];
+ new_color = (src * 0xffff + dst * (0xffff - alpha));
- return pixel;
+ return DIV_ROUND_CLOSEST(new_color, 0xffff);
}
/**
- * compute_crc - Compute CRC value on output frame
+ * pre_mul_alpha_blend - alpha blending equation
+ * @src_frame_info: source framebuffer's metadata
+ * @stage_buffer: The line with the pixels from src_plane
+ * @output_buffer: A line buffer that receives all the blends output
*
- * @vaddr: address to final framebuffer
- * @composer: framebuffer's metadata
+ * Using the information from the `frame_info`, this blends only the
+ * necessary pixels from the `stage_buffer` to the `output_buffer`
+ * using premultiplied blend formula.
*
- * returns CRC value computed using crc32 on the visible portion of
- * the final framebuffer at vaddr_out
+ * The current DRM assumption is that pixel color values have been already
+ * pre-multiplied with the alpha channel values. See more
+ * drm_plane_create_blend_mode_property(). Also, this formula assumes a
+ * completely opaque background.
*/
-static uint32_t compute_crc(const u8 *vaddr,
- const struct vkms_composer *composer)
+static void pre_mul_alpha_blend(struct vkms_frame_info *frame_info,
+ struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer)
{
- int x, y;
- u32 crc = 0, pixel = 0;
- int x_src = composer->src.x1 >> 16;
- int y_src = composer->src.y1 >> 16;
- int h_src = drm_rect_height(&composer->src) >> 16;
- int w_src = drm_rect_width(&composer->src) >> 16;
-
- for (y = y_src; y < y_src + h_src; ++y) {
- for (x = x_src; x < x_src + w_src; ++x) {
- pixel = get_pixel_from_buffer(x, y, vaddr, composer);
- crc = crc32_le(crc, (void *)&pixel, sizeof(u32));
- }
+ int x_dst = frame_info->dst.x1;
+ struct pixel_argb_u16 *out = output_buffer->pixels + x_dst;
+ struct pixel_argb_u16 *in = stage_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (int x = 0; x < x_limit; x++) {
+ out[x].a = (u16)0xffff;
+ out[x].r = pre_mul_blend_channel(in[x].r, out[x].r, in[x].a);
+ out[x].g = pre_mul_blend_channel(in[x].g, out[x].g, in[x].a);
+ out[x].b = pre_mul_blend_channel(in[x].b, out[x].b, in[x].a);
}
-
- return crc;
}
-static u8 blend_channel(u8 src, u8 dst, u8 alpha)
+static bool check_y_limit(struct vkms_frame_info *frame_info, int y)
{
- u32 pre_blend;
- u8 new_color;
-
- pre_blend = (src * 255 + dst * (255 - alpha));
-
- /* Faster div by 255 */
- new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8);
+ if (y >= frame_info->dst.y1 && y < frame_info->dst.y2)
+ return true;
- return new_color;
+ return false;
}
-/**
- * alpha_blend - alpha blending equation
- * @argb_src: src pixel on premultiplied alpha mode
- * @argb_dst: dst pixel completely opaque
- *
- * blend pixels using premultiplied blend formula. The current DRM assumption
- * is that pixel color values have been already pre-multiplied with the alpha
- * channel values. See more drm_plane_create_blend_mode_property(). Also, this
- * formula assumes a completely opaque background.
- */
-static void alpha_blend(const u8 *argb_src, u8 *argb_dst)
+static void fill_background(const struct pixel_argb_u16 *background_color,
+ struct line_buffer *output_buffer)
{
- u8 alpha;
-
- alpha = argb_src[3];
- argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
- argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
- argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
+ for (size_t i = 0; i < output_buffer->n_pixels; i++)
+ output_buffer->pixels[i] = *background_color;
}
/**
- * x_blend - blending equation that ignores the pixel alpha
+ * @wb_frame_info: The writeback frame buffer metadata
+ * @crtc_state: The crtc state
+ * @crc32: The crc output of the final frame
+ * @output_buffer: A buffer of a row that will receive the result of the blend(s)
+ * @stage_buffer: The line with the pixels from plane being blend to the output
*
- * overwrites RGB color value from src pixel to dst pixel.
+ * This function blends the pixels (Using the `pre_mul_alpha_blend`)
+ * from all planes, calculates the crc32 of the output from the former step,
+ * and, if necessary, convert and store the output to the writeback buffer.
*/
-static void x_blend(const u8 *xrgb_src, u8 *xrgb_dst)
+static void blend(struct vkms_writeback_job *wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32, struct line_buffer *stage_buffer,
+ struct line_buffer *output_buffer, size_t row_size)
{
- memcpy(xrgb_dst, xrgb_src, sizeof(u8) * 3);
-}
+ struct vkms_plane_state **plane = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
-/**
- * blend - blend value at vaddr_src with value at vaddr_dst
- * @vaddr_dst: destination address
- * @vaddr_src: source address
- * @dst_composer: destination framebuffer's metadata
- * @src_composer: source framebuffer's metadata
- * @pixel_blend: blending equation based on plane format
- *
- * Blend the vaddr_src value with the vaddr_dst value using a pixel blend
- * equation according to the supported plane formats DRM_FORMAT_(A/XRGB8888)
- * and clearing alpha channel to an completely opaque background. This function
- * uses buffer's metadata to locate the new composite values at vaddr_dst.
- *
- * TODO: completely clear the primary plane (a = 0xff) before starting to blend
- * pixel color values
- */
-static void blend(void *vaddr_dst, void *vaddr_src,
- struct vkms_composer *dst_composer,
- struct vkms_composer *src_composer,
- void (*pixel_blend)(const u8 *, u8 *))
-{
- int i, j, j_dst, i_dst;
- int offset_src, offset_dst;
- u8 *pixel_dst, *pixel_src;
-
- int x_src = src_composer->src.x1 >> 16;
- int y_src = src_composer->src.y1 >> 16;
-
- int x_dst = src_composer->dst.x1;
- int y_dst = src_composer->dst.y1;
- int h_dst = drm_rect_height(&src_composer->dst);
- int w_dst = drm_rect_width(&src_composer->dst);
-
- int y_limit = y_src + h_dst;
- int x_limit = x_src + w_dst;
-
- for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
- for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
- offset_dst = dst_composer->offset
- + (i_dst * dst_composer->pitch)
- + (j_dst++ * dst_composer->cpp);
- offset_src = src_composer->offset
- + (i * src_composer->pitch)
- + (j * src_composer->cpp);
-
- pixel_src = (u8 *)(vaddr_src + offset_src);
- pixel_dst = (u8 *)(vaddr_dst + offset_dst);
- pixel_blend(pixel_src, pixel_dst);
- /* clearing alpha channel (0xff)*/
- pixel_dst[3] = 0xff;
+ const struct pixel_argb_u16 background_color = { .a = 0xffff };
+
+ size_t crtc_y_limit = crtc_state->base.crtc->mode.vdisplay;
+
+ for (size_t y = 0; y < crtc_y_limit; y++) {
+ fill_background(&background_color, output_buffer);
+
+ /* The active planes are composed associatively in z-order. */
+ for (size_t i = 0; i < n_active_planes; i++) {
+ if (!check_y_limit(plane[i]->frame_info, y))
+ continue;
+
+ plane[i]->plane_read(stage_buffer, plane[i]->frame_info, y);
+ pre_mul_alpha_blend(plane[i]->frame_info, stage_buffer,
+ output_buffer);
}
- i_dst++;
+
+ *crc32 = crc32_le(*crc32, (void *)output_buffer->pixels, row_size);
+
+ if (wb)
+ wb->wb_write(&wb->wb_frame_info, output_buffer, y);
}
}
-static void compose_plane(struct vkms_composer *primary_composer,
- struct vkms_composer *plane_composer,
- void *vaddr_out)
+static int check_format_funcs(struct vkms_crtc_state *crtc_state,
+ struct vkms_writeback_job *active_wb)
{
- struct drm_framebuffer *fb = &plane_composer->fb;
- void *vaddr;
- void (*pixel_blend)(const u8 *p_src, u8 *p_dst);
+ struct vkms_plane_state **planes = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
- if (WARN_ON(iosys_map_is_null(&plane_composer->map[0])))
- return;
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (!planes[i]->plane_read)
+ return -1;
- vaddr = plane_composer->map[0].vaddr;
+ if (active_wb && !active_wb->wb_write)
+ return -1;
- if (fb->format->format == DRM_FORMAT_ARGB8888)
- pixel_blend = &alpha_blend;
- else
- pixel_blend = &x_blend;
+ return 0;
+}
+
+static int check_iosys_map(struct vkms_crtc_state *crtc_state)
+{
+ struct vkms_plane_state **plane_state = crtc_state->active_planes;
+ u32 n_active_planes = crtc_state->num_active_planes;
+
+ for (size_t i = 0; i < n_active_planes; i++)
+ if (iosys_map_is_null(&plane_state[i]->frame_info->map[0]))
+ return -1;
- blend(vaddr_out, vaddr, primary_composer, plane_composer, pixel_blend);
+ return 0;
}
-static int compose_active_planes(void **vaddr_out,
- struct vkms_composer *primary_composer,
- struct vkms_crtc_state *crtc_state)
+static int compose_active_planes(struct vkms_writeback_job *active_wb,
+ struct vkms_crtc_state *crtc_state,
+ u32 *crc32)
{
- struct drm_framebuffer *fb = &primary_composer->fb;
- struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- const void *vaddr;
- int i;
-
- if (!*vaddr_out) {
- *vaddr_out = kvzalloc(gem_obj->size, GFP_KERNEL);
- if (!*vaddr_out) {
- DRM_ERROR("Cannot allocate memory for output frame.");
- return -ENOMEM;
- }
- }
+ size_t line_width, pixel_size = sizeof(struct pixel_argb_u16);
+ struct line_buffer output_buffer, stage_buffer;
+ int ret = 0;
+
+ /*
+ * This check exists so we can call `crc32_le` for the entire line
+ * instead doing it for each channel of each pixel in case
+ * `struct `pixel_argb_u16` had any gap added by the compiler
+ * between the struct fields.
+ */
+ static_assert(sizeof(struct pixel_argb_u16) == 8);
- if (WARN_ON(iosys_map_is_null(&primary_composer->map[0])))
+ if (WARN_ON(check_iosys_map(crtc_state)))
return -EINVAL;
- vaddr = primary_composer->map[0].vaddr;
+ if (WARN_ON(check_format_funcs(crtc_state, active_wb)))
+ return -EINVAL;
- memcpy(*vaddr_out, vaddr, gem_obj->size);
+ line_width = crtc_state->base.crtc->mode.hdisplay;
+ stage_buffer.n_pixels = line_width;
+ output_buffer.n_pixels = line_width;
- /* If there are other planes besides primary, we consider the active
- * planes should be in z-order and compose them associatively:
- * ((primary <- overlay) <- cursor)
- */
- for (i = 1; i < crtc_state->num_active_planes; i++)
- compose_plane(primary_composer,
- crtc_state->active_planes[i]->composer,
- *vaddr_out);
+ stage_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!stage_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for the output line buffer");
+ return -ENOMEM;
+ }
- return 0;
+ output_buffer.pixels = kvmalloc(line_width * pixel_size, GFP_KERNEL);
+ if (!output_buffer.pixels) {
+ DRM_ERROR("Cannot allocate memory for intermediate line buffer");
+ ret = -ENOMEM;
+ goto free_stage_buffer;
+ }
+
+ blend(active_wb, crtc_state, crc32, &stage_buffer,
+ &output_buffer, line_width * pixel_size);
+
+ kvfree(output_buffer.pixels);
+free_stage_buffer:
+ kvfree(stage_buffer.pixels);
+
+ return ret;
}
/**
@@ -221,13 +203,11 @@ void vkms_composer_worker(struct work_struct *work)
struct vkms_crtc_state,
composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_writeback_job *active_wb = crtc_state->active_writeback;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- struct vkms_composer *primary_composer = NULL;
- struct vkms_plane_state *act_plane = NULL;
bool crc_pending, wb_pending;
- void *vaddr_out = NULL;
- u32 crc32 = 0;
u64 frame_start, frame_end;
+ u32 crc32 = 0;
int ret;
spin_lock_irq(&out->composer_lock);
@@ -247,35 +227,19 @@ void vkms_composer_worker(struct work_struct *work)
if (!crc_pending)
return;
- if (crtc_state->num_active_planes >= 1) {
- act_plane = crtc_state->active_planes[0];
- if (act_plane->base.base.plane->type == DRM_PLANE_TYPE_PRIMARY)
- primary_composer = act_plane->composer;
- }
-
- if (!primary_composer)
- return;
-
if (wb_pending)
- vaddr_out = crtc_state->active_writeback->data[0].vaddr;
+ ret = compose_active_planes(active_wb, crtc_state, &crc32);
+ else
+ ret = compose_active_planes(NULL, crtc_state, &crc32);
- ret = compose_active_planes(&vaddr_out, primary_composer,
- crtc_state);
- if (ret) {
- if (ret == -EINVAL && !wb_pending)
- kvfree(vaddr_out);
+ if (ret)
return;
- }
-
- crc32 = compute_crc(vaddr_out, primary_composer);
if (wb_pending) {
drm_writeback_signal_completion(&out->wb_connector, 0);
spin_lock_irq(&out->composer_lock);
crtc_state->wb_pending = false;
spin_unlock_irq(&out->composer_lock);
- } else {
- kvfree(vaddr_out);
}
/*
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1d60654b553b..0a67b8073f7e 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -23,28 +23,41 @@
#define NUM_OVERLAY_PLANES 8
-struct vkms_writeback_job {
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
-};
-
-struct vkms_composer {
- struct drm_framebuffer fb;
+struct vkms_frame_info {
+ struct drm_framebuffer *fb;
struct drm_rect src, dst;
- struct iosys_map map[4];
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
unsigned int offset;
unsigned int pitch;
unsigned int cpp;
};
+struct pixel_argb_u16 {
+ u16 a, r, g, b;
+};
+
+struct line_buffer {
+ size_t n_pixels;
+ struct pixel_argb_u16 *pixels;
+};
+
+struct vkms_writeback_job {
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
+ struct vkms_frame_info wb_frame_info;
+ void (*wb_write)(struct vkms_frame_info *frame_info,
+ const struct line_buffer *buffer, int y);
+};
+
/**
* vkms_plane_state - Driver specific plane state
* @base: base plane state
- * @composer: data required for composing computation
+ * @frame_info: data required for composing computation
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
+ void (*plane_read)(struct line_buffer *buffer,
+ const struct vkms_frame_info *frame_info, int y);
};
struct vkms_plane {
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
new file mode 100644
index 000000000000..d4950688b3f1
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <drm/drm_rect.h>
+#include <drm/drm_fixed.h>
+
+#include "vkms_formats.h"
+
+static size_t pixel_offset(const struct vkms_frame_info *frame_info, int x, int y)
+{
+ return frame_info->offset + (y * frame_info->pitch)
+ + (x * frame_info->cpp);
+}
+
+/*
+ * packed_pixels_addr - Get the pointer to pixel of a given pair of coordinates
+ *
+ * @frame_info: Buffer metadata
+ * @x: The x(width) coordinate of the 2D buffer
+ * @y: The y(Heigth) coordinate of the 2D buffer
+ *
+ * Takes the information stored in the frame_info, a pair of coordinates, and
+ * returns the address of the first color channel.
+ * This function assumes the channels are packed together, i.e. a color channel
+ * comes immediately after another in the memory. And therefore, this function
+ * doesn't work for YUV with chroma subsampling (e.g. YUV420 and NV21).
+ */
+static void *packed_pixels_addr(const struct vkms_frame_info *frame_info,
+ int x, int y)
+{
+ size_t offset = pixel_offset(frame_info, x, y);
+
+ return (u8 *)frame_info->map[0].vaddr + offset;
+}
+
+static void *get_packed_src_addr(const struct vkms_frame_info *frame_info, int y)
+{
+ int x_src = frame_info->src.x1 >> 16;
+ int y_src = y - frame_info->dst.y1 + (frame_info->src.y1 >> 16);
+
+ return packed_pixels_addr(frame_info, x_src, y_src);
+}
+
+static void ARGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u8 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ /*
+ * The 257 is the "conversion ratio". This number is obtained by the
+ * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get
+ * the best color value in a pixel format with more possibilities.
+ * A similar idea applies to others RGB color conversions.
+ */
+ out_pixels[x].a = (u16)src_pixels[3] * 257;
+ out_pixels[x].r = (u16)src_pixels[2] * 257;
+ out_pixels[x].g = (u16)src_pixels[1] * 257;
+ out_pixels[x].b = (u16)src_pixels[0] * 257;
+ }
+}
+
+static void XRGB8888_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u8 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = (u16)src_pixels[2] * 257;
+ out_pixels[x].g = (u16)src_pixels[1] * 257;
+ out_pixels[x].b = (u16)src_pixels[0] * 257;
+ }
+}
+
+static void ARGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info,
+ int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = le16_to_cpu(src_pixels[3]);
+ out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+ out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+ out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+ }
+}
+
+static void XRGB16161616_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info,
+ int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels += 4) {
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = le16_to_cpu(src_pixels[2]);
+ out_pixels[x].g = le16_to_cpu(src_pixels[1]);
+ out_pixels[x].b = le16_to_cpu(src_pixels[0]);
+ }
+}
+
+static void RGB565_to_argb_u16(struct line_buffer *stage_buffer,
+ const struct vkms_frame_info *frame_info, int y)
+{
+ struct pixel_argb_u16 *out_pixels = stage_buffer->pixels;
+ u16 *src_pixels = get_packed_src_addr(frame_info, y);
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ stage_buffer->n_pixels);
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ for (size_t x = 0; x < x_limit; x++, src_pixels++) {
+ u16 rgb_565 = le16_to_cpu(*src_pixels);
+ s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f);
+ s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f);
+ s64 fp_b = drm_int2fixp(rgb_565 & 0x1f);
+
+ out_pixels[x].a = (u16)0xffff;
+ out_pixels[x].r = drm_fixp2int(drm_fixp_mul(fp_r, fp_rb_ratio));
+ out_pixels[x].g = drm_fixp2int(drm_fixp_mul(fp_g, fp_g_ratio));
+ out_pixels[x].b = drm_fixp2int(drm_fixp_mul(fp_b, fp_rb_ratio));
+ }
+}
+
+/*
+ * The following functions take an line of argb_u16 pixels from the
+ * src_buffer, convert them to a specific format, and store them in the
+ * destination.
+ *
+ * They are used in the `compose_active_planes` to convert and store a line
+ * from the src_buffer to the writeback buffer.
+ */
+static void argb_u16_to_ARGB8888(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ /*
+ * This sequence below is important because the format's byte order is
+ * in little-endian. In the case of the ARGB8888 the memory is
+ * organized this way:
+ *
+ * | Addr | = blue channel
+ * | Addr + 1 | = green channel
+ * | Addr + 2 | = Red channel
+ * | Addr + 3 | = Alpha channel
+ */
+ dst_pixels[3] = DIV_ROUND_CLOSEST(in_pixels[x].a, 257);
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
+ }
+}
+
+static void argb_u16_to_XRGB8888(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u8 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = 0xff;
+ dst_pixels[2] = DIV_ROUND_CLOSEST(in_pixels[x].r, 257);
+ dst_pixels[1] = DIV_ROUND_CLOSEST(in_pixels[x].g, 257);
+ dst_pixels[0] = DIV_ROUND_CLOSEST(in_pixels[x].b, 257);
+ }
+}
+
+static void argb_u16_to_ARGB16161616(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = cpu_to_le16(in_pixels[x].a);
+ dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
+ dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
+ dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
+ }
+}
+
+static void argb_u16_to_XRGB16161616(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels += 4) {
+ dst_pixels[3] = 0xffff;
+ dst_pixels[2] = cpu_to_le16(in_pixels[x].r);
+ dst_pixels[1] = cpu_to_le16(in_pixels[x].g);
+ dst_pixels[0] = cpu_to_le16(in_pixels[x].b);
+ }
+}
+
+static void argb_u16_to_RGB565(struct vkms_frame_info *frame_info,
+ const struct line_buffer *src_buffer, int y)
+{
+ int x_dst = frame_info->dst.x1;
+ u16 *dst_pixels = packed_pixels_addr(frame_info, x_dst, y);
+ struct pixel_argb_u16 *in_pixels = src_buffer->pixels;
+ int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst),
+ src_buffer->n_pixels);
+
+ s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31));
+ s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63));
+
+ for (size_t x = 0; x < x_limit; x++, dst_pixels++) {
+ s64 fp_r = drm_int2fixp(in_pixels[x].r);
+ s64 fp_g = drm_int2fixp(in_pixels[x].g);
+ s64 fp_b = drm_int2fixp(in_pixels[x].b);
+
+ u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio));
+ u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio));
+ u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio));
+
+ *dst_pixels = cpu_to_le16(r << 11 | g << 5 | b);
+ }
+}
+
+void *get_frame_to_line_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &ARGB8888_to_argb_u16;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_to_argb_u16;
+ case DRM_FORMAT_ARGB16161616:
+ return &ARGB16161616_to_argb_u16;
+ case DRM_FORMAT_XRGB16161616:
+ return &XRGB16161616_to_argb_u16;
+ case DRM_FORMAT_RGB565:
+ return &RGB565_to_argb_u16;
+ default:
+ return NULL;
+ }
+}
+
+void *get_line_to_frame_function(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return &argb_u16_to_ARGB8888;
+ case DRM_FORMAT_XRGB8888:
+ return &argb_u16_to_XRGB8888;
+ case DRM_FORMAT_ARGB16161616:
+ return &argb_u16_to_ARGB16161616;
+ case DRM_FORMAT_XRGB16161616:
+ return &argb_u16_to_XRGB16161616;
+ case DRM_FORMAT_RGB565:
+ return &argb_u16_to_RGB565;
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
new file mode 100644
index 000000000000..43b7c1979018
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_FORMATS_H_
+#define _VKMS_FORMATS_H_
+
+#include "vkms_drv.h"
+
+void *get_frame_to_line_function(u32 format);
+
+void *get_line_to_frame_function(u32 format);
+
+#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index d8eb674b49a6..c3a845220e10 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -7,37 +7,42 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
#include "vkms_drv.h"
+#include "vkms_formats.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_RGB565
};
static const u32 vkms_plane_formats[] = {
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
};
static struct drm_plane_state *
vkms_plane_duplicate_state(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
- composer = kzalloc(sizeof(*composer), GFP_KERNEL);
- if (!composer) {
- DRM_DEBUG_KMS("Couldn't allocate composer\n");
+ frame_info = kzalloc(sizeof(*frame_info), GFP_KERNEL);
+ if (!frame_info) {
+ DRM_DEBUG_KMS("Couldn't allocate frame_info\n");
kfree(vkms_state);
return NULL;
}
- vkms_state->composer = composer;
+ vkms_state->frame_info = frame_info;
__drm_gem_duplicate_shadow_plane_state(plane, &vkms_state->base);
@@ -50,16 +55,16 @@ static void vkms_plane_destroy_state(struct drm_plane *plane,
struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
struct drm_crtc *crtc = vkms_state->base.base.crtc;
- if (crtc) {
+ if (crtc && vkms_state->frame_info->fb) {
/* dropping the reference we acquired in
* vkms_primary_plane_update()
*/
- if (drm_framebuffer_read_refcount(&vkms_state->composer->fb))
- drm_framebuffer_put(&vkms_state->composer->fb);
+ if (drm_framebuffer_read_refcount(vkms_state->frame_info->fb))
+ drm_framebuffer_put(vkms_state->frame_info->fb);
}
- kfree(vkms_state->composer);
- vkms_state->composer = NULL;
+ kfree(vkms_state->frame_info);
+ vkms_state->frame_info = NULL;
__drm_gem_destroy_shadow_plane_state(&vkms_state->base);
kfree(vkms_state);
@@ -99,23 +104,26 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
struct vkms_plane_state *vkms_plane_state;
struct drm_shadow_plane_state *shadow_plane_state;
struct drm_framebuffer *fb = new_state->fb;
- struct vkms_composer *composer;
+ struct vkms_frame_info *frame_info;
+ u32 fmt;
if (!new_state->crtc || !fb)
return;
+ fmt = fb->format->format;
vkms_plane_state = to_vkms_plane_state(new_state);
shadow_plane_state = &vkms_plane_state->base;
- composer = vkms_plane_state->composer;
- memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect));
- memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect));
- memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer));
- memcpy(&composer->map, &shadow_plane_state->data, sizeof(composer->map));
- drm_framebuffer_get(&composer->fb);
- composer->offset = fb->offsets[0];
- composer->pitch = fb->pitches[0];
- composer->cpp = fb->format->cpp[0];
+ frame_info = vkms_plane_state->frame_info;
+ memcpy(&frame_info->src, &new_state->src, sizeof(struct drm_rect));
+ memcpy(&frame_info->dst, &new_state->dst, sizeof(struct drm_rect));
+ frame_info->fb = fb;
+ memcpy(&frame_info->map, &shadow_plane_state->data, sizeof(frame_info->map));
+ drm_framebuffer_get(frame_info->fb);
+ frame_info->offset = fb->offsets[0];
+ frame_info->pitch = fb->pitches[0];
+ frame_info->cpp = fb->format->cpp[0];
+ vkms_plane_state->plane_read = get_frame_to_line_function(fmt);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -139,8 +147,8 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
can_position = true;
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
can_position, true);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 3b3c1e757ab4..84a51cd281b9 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -12,9 +12,13 @@
#include <drm/drm_gem_shmem_helper.h>
#include "vkms_drv.h"
+#include "vkms_formats.h"
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_RGB565
};
static const struct drm_connector_funcs vkms_wb_connector_funcs = {
@@ -31,6 +35,7 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
{
struct drm_framebuffer *fb;
const struct drm_display_mode *mode = &crtc_state->mode;
+ int ret;
if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
return 0;
@@ -42,11 +47,9 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
- if (fb->format->format != vkms_wb_formats[0]) {
- DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
- &fb->format->format);
- return -EINVAL;
- }
+ ret = drm_atomic_helper_check_wb_encoder_state(encoder, conn_state);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -76,12 +79,15 @@ static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
if (!vkmsjob)
return -ENOMEM;
- ret = drm_gem_fb_vmap(job->fb, vkmsjob->map, vkmsjob->data);
+ ret = drm_gem_fb_vmap(job->fb, vkmsjob->wb_frame_info.map, vkmsjob->data);
if (ret) {
DRM_ERROR("vmap failed: %d\n", ret);
goto err_kfree;
}
+ vkmsjob->wb_frame_info.fb = job->fb;
+ drm_framebuffer_get(vkmsjob->wb_frame_info.fb);
+
job->priv = vkmsjob;
return 0;
@@ -100,7 +106,9 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
if (!job->fb)
return;
- drm_gem_fb_vunmap(job->fb, vkmsjob->map);
+ drm_gem_fb_vunmap(job->fb, vkmsjob->wb_frame_info.map);
+
+ drm_framebuffer_put(vkmsjob->wb_frame_info.fb);
vkmsdev = drm_device_to_vkms_device(job->fb->dev);
vkms_set_composer(&vkmsdev->output, false);
@@ -117,17 +125,32 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
struct drm_writeback_connector *wb_conn = &output->wb_connector;
struct drm_connector_state *conn_state = wb_conn->base.state;
struct vkms_crtc_state *crtc_state = output->composer_state;
+ struct drm_framebuffer *fb = connector_state->writeback_job->fb;
+ u16 crtc_height = crtc_state->base.crtc->mode.vdisplay;
+ u16 crtc_width = crtc_state->base.crtc->mode.hdisplay;
+ struct vkms_writeback_job *active_wb;
+ struct vkms_frame_info *wb_frame_info;
+ u32 wb_format = fb->format->format;
if (!conn_state)
return;
vkms_set_composer(&vkmsdev->output, true);
+ active_wb = conn_state->writeback_job->priv;
+ wb_frame_info = &active_wb->wb_frame_info;
+
spin_lock_irq(&output->composer_lock);
- crtc_state->active_writeback = conn_state->writeback_job->priv;
+ crtc_state->active_writeback = active_wb;
+ wb_frame_info->offset = fb->offsets[0];
+ wb_frame_info->pitch = fb->pitches[0];
+ wb_frame_info->cpp = fb->format->cpp[0];
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
drm_writeback_queue_job(wb_conn, connector_state);
+ active_wb->wb_write = get_line_to_frame_function(wb_format);
+ drm_rect_init(&wb_frame_info->src, 0, 0, crtc_width, crtc_height);
+ drm_rect_init(&wb_frame_info->dst, 0, 0, crtc_width, crtc_height);
}
static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
index 1f6e3bbc6605..f84376718086 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -121,7 +121,7 @@ typedef __attribute__((aligned(32))) struct MKSGuestStatInfoEntry {
*
* Since the MKSGuestStatInfoEntry structures contain userlevel
* pointers, the InstanceDescriptor also contains pointers to the
- * begining of these sections allowing the host side code to correctly
+ * beginning of these sections allowing the host side code to correctly
* interpret the pointers.
*
* Because the host side code never acknowledges anything back to the
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 4c8700027c6d..1a2fa0f83f5f 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -96,7 +96,7 @@ struct ttm_object_device;
*
* This struct is intended to be used as a base struct for objects that
* are visible to user-space. It provides a global name, race-safe
- * access and refcounting, minimal access contol and hooks for unref actions.
+ * access and refcounting, minimal access control and hooks for unref actions.
*/
struct ttm_base_object {
@@ -138,7 +138,7 @@ struct ttm_prime_object {
*
* @tfile: Pointer to a struct ttm_object_file.
* @base: The struct ttm_base_object to initialize.
- * @shareable: This object is shareable with other applcations.
+ * @shareable: This object is shareable with other applications.
* (different @tfile pointers.)
* @type: The object type.
* @refcount_release: See the struct ttm_base_object description.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 85a66014c2b6..822251aaab0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -429,9 +429,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
drm_gem_private_object_init(vdev, &bo->base, size);
- ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
- ttm_bo_type_kernel, placement, 0,
- &ctx, NULL, NULL, vmw_bo_default_destroy);
+ ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
+ placement, 0, &ctx, NULL, NULL,
+ vmw_bo_default_destroy);
if (unlikely(ret))
goto error_free;
@@ -512,10 +512,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
size = ALIGN(size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
- ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
- ttm_bo_type_device,
- placement,
- 0, &ctx, NULL, NULL, bo_free);
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
+ placement, 0, &ctx, NULL, NULL, bo_free);
if (unlikely(ret)) {
return ret;
}
@@ -729,7 +727,7 @@ int vmw_user_bo_lookup(struct drm_file *filp,
* Any persistent usage of the object requires a refcount to be taken using
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called inbetween these function calls.
+ * or scheduling functions may be called in between these function calls.
*
* Return: A struct vmw_buffer_object pointer if successful or negative
* error pointer on failure.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 415774fde796..82ef58ccdd42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -36,7 +36,7 @@
* @res: Refcounted pointer to a struct vmw_resource.
* @hash: Hash entry for the manager hash table.
* @head: List head used either by the staging list or the manager list
- * of commited resources.
+ * of committed resources.
* @state: Staging state of this resource entry.
* @man: Pointer to a resource manager for this entry.
*/
@@ -51,9 +51,9 @@ struct vmw_cmdbuf_res {
/**
* struct vmw_cmdbuf_res_manager - Command buffer resource manager.
*
- * @resources: Hash table containing staged and commited command buffer
+ * @resources: Hash table containing staged and committed command buffer
* resources
- * @list: List of commited command buffer resources.
+ * @list: List of committed command buffer resources.
* @dev_priv: Pointer to a device private structure.
*
* @resources and @list are protected by the cmdbuf mutex for now.
@@ -118,7 +118,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
* This function commits a list of command buffer resource
* additions or removals.
* It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
+ * actions has committed the fifo contents to the device.
*/
void vmw_cmdbuf_res_commit(struct list_head *list)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 01a5b47e95f9..d7bd5eb1d3ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1398,18 +1398,6 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_mob_ttm");
}
-static unsigned long
-vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct drm_file *file_priv = file->private_data;
- struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
-
- return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
- dev_priv->drm.vma_offset_manager);
-}
-
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr)
{
@@ -1576,7 +1564,6 @@ static const struct file_operations vmwgfx_driver_fops = {
.compat_ioctl = vmw_compat_ioctl,
#endif
.llseek = noop_llseek,
- .get_unmapped_area = vmw_get_unmapped_area,
};
static const struct drm_driver driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index be19aa6e1f13..09e2d738aa87 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -877,7 +877,6 @@ static inline void vmw_user_resource_noref_release(void)
/**
* Buffer object helper functions - vmwgfx_bo.c
*/
-extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo);
extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
struct vmw_buffer_object *bo,
struct ttm_placement *placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d49de4905efa..f085dbd4736d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1172,7 +1172,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR_OR_NULL(vmw_bo)) {
+ if (IS_ERR(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
@@ -1226,7 +1226,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
- if (IS_ERR_OR_NULL(vmw_bo)) {
+ if (IS_ERR(vmw_bo)) {
VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ff2f735bbe7a..214829c32ed8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_sysfs.h>
#include <drm/drm_vblank.h>
@@ -720,8 +719,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, true);
if (!ret && new_fb) {
@@ -762,8 +761,8 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
new_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
true, true);
if (ret)
return ret;
@@ -2257,7 +2256,7 @@ out_fini:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
-
+
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 7046dfd0d1c6..85f86faa3243 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -70,7 +70,7 @@ struct vmw_du_update_plane {
*
* Some surface resource or buffer object need some extra cmd submission
* like update GB image for proxy surface and define a GMRFB for screen
- * object. That should should be done here as this callback will be
+ * object. That should be done here as this callback will be
* called after FIFO allocation with the address of command buufer.
*
* This callback is optional.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index e4347faccee0..b8761f16dd78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -28,7 +28,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 2aceac7856e2..089046fa21be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -1076,6 +1076,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data,
if (desc_len < 0) {
atomic_set(&dev_priv->mksstat_user_pids[slot], 0);
+ __free_page(page);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a7d62a4eb47b..f66caa540e14 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -525,7 +525,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
* for a resource and in that case, allocate
* one, reserve and validate it.
*
- * @ticket: The ww aqcquire context to use, or NULL if trylocking.
+ * @ticket: The ww acquire context to use, or NULL if trylocking.
* @res: The resource for which to allocate a backup buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
@@ -686,7 +686,7 @@ out_no_unbind:
* @intr: Perform waits interruptible if possible.
* @dirtying: Pending GPU operation will dirty the resource
*
- * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * On successful return, any backup DMA buffer pointed to by @res->backup will
* be reserved and validated.
* On hardware resource shortage, this function will repeatedly evict
* resources of the same type until the validation succeeds.
@@ -804,7 +804,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
* @dx_query_mob: Buffer containing the DX query MOB
*
* Read back cached states from the device if they exist. This function
- * assumings binding_mutex is held.
+ * assumes binding_mutex is held.
*/
int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
{
@@ -1125,7 +1125,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
}
/*
- * In order of increasing backup_offset, clean dirty resorces
+ * In order of increasing backup_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index c89ad3a2d141..ecd3c2fc978b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
@@ -1383,6 +1382,6 @@ out_revert:
vmw_validation_revert(&val_ctx);
out_unref:
vmw_validation_unref_lists(&val_ctx);
-
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 483ad544ea54..0d51b4542269 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -196,7 +196,7 @@ out_ret:
* type.
*
* Returns: Refcounted pointer to the embedded struct vmw_resource if
- * successfule. Error pointer otherwise.
+ * successful. Error pointer otherwise.
*/
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index eb014b97d156..8650c3aea8f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -29,7 +29,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include "vmwgfx_kms.h"
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index f9cf93c9e7e3..68ee897de9d7 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -8,7 +8,7 @@ config DRM_ZYNQMP_DPSUB
select DMA_ENGINE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
- select DRM_GEM_CMA_HELPER
+ select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY
help
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index cc32aa89cf8f..bbb365f2d087 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -15,12 +15,11 @@
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
#include <linux/clk.h>
@@ -1099,14 +1098,14 @@ static int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
struct dma_async_tx_descriptor *desc;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
- paddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
+ dma_addr = drm_fb_dma_get_gem_addr(state->fb, state, i);
dma->xt.numf = height;
dma->sgl.size = width * info->cpp[i];
dma->sgl.icg = state->fb->pitches[i] - dma->sgl.size;
- dma->xt.src_start = paddr;
+ dma->xt.src_start = dma_addr;
dma->xt.frame_size = 1;
dma->xt.dir = DMA_MEM_TO_DEV;
dma->xt.src_sgl = true;
@@ -1151,8 +1150,8 @@ zynqmp_disp_plane_atomic_check(struct drm_plane *plane,
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
false, false);
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 824b510e337b..1de2d927c32b 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -21,7 +21,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
@@ -47,7 +47,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
/* Enforce the alignment constraints of the DMA engine. */
args->pitch = ALIGN(pitch, dpsub->dma_align);
- return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+ return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
static struct drm_framebuffer *
@@ -75,13 +75,13 @@ static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = {
* DRM/KMS Driver
*/
-DEFINE_DRM_GEM_CMA_FOPS(zynqmp_dpsub_drm_fops);
+DEFINE_DRM_GEM_DMA_FOPS(zynqmp_dpsub_drm_fops);
static const struct drm_driver zynqmp_dpsub_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ce92830b5d1..185a077d59cd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -28,7 +28,6 @@ if HID
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
- depends on HID
select POWER_SUPPLY
default n
help
@@ -38,7 +37,6 @@ config HID_BATTERY_STRENGTH
config HIDRAW
bool "/dev/hidraw raw HID device support"
- depends on HID
help
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
@@ -57,7 +55,6 @@ config HIDRAW
config UHID
tristate "User-space I/O driver support for HID subsystem"
- depends on HID
default n
help
Say Y here if you want to provide HID I/O Drivers from user-space.
@@ -78,7 +75,6 @@ config UHID
config HID_GENERIC
tristate "Generic HID driver"
- depends on HID
default HID
help
Support for generic devices on the HID bus. This includes most
@@ -90,11 +86,9 @@ config HID_GENERIC
If unsure, say Y.
menu "Special HID drivers"
- depends on HID
config HID_A4TECH
tristate "A4TECH mice"
- depends on HID
default !EXPERT
help
Support for some A4TECH mice with two scroll wheels.
@@ -113,7 +107,6 @@ config HID_ACCUTOUCH
config HID_ACRUX
tristate "ACRUX game controller support"
- depends on HID
help
Say Y here if you want to enable support for ACRUX game controllers.
@@ -127,7 +120,6 @@ config HID_ACRUX_FF
config HID_APPLE
tristate "Apple {i,Power,Mac}Books"
- depends on HID
depends on LEDS_CLASS
depends on NEW_LEDS
default !EXPERT
@@ -167,13 +159,11 @@ config HID_ASUS
config HID_AUREAL
tristate "Aureal"
- depends on HID
help
Support for Aureal Cy se W-01RN Remote Controller and other Aureal derived remotes.
config HID_BELKIN
tristate "Belkin Flip KVM and Wireless keyboard"
- depends on HID
default !EXPERT
help
Support for Belkin Flip KVM and Wireless keyboard.
@@ -202,7 +192,6 @@ config HID_BIGBEN_FF
config HID_CHERRY
tristate "Cherry Cymotion keyboard"
- depends on HID
default !EXPERT
help
Support for Cherry Cymotion keyboard.
@@ -227,7 +216,6 @@ config HID_CORSAIR
config HID_COUGAR
tristate "Cougar devices"
- depends on HID
help
Support for Cougar devices that are not fully compliant with the
HID standard.
@@ -237,7 +225,6 @@ config HID_COUGAR
config HID_MACALLY
tristate "Macally devices"
- depends on HID
help
Support for Macally devices that are not fully compliant with the
HID standard.
@@ -262,7 +249,6 @@ config HID_PRODIKEYS
config HID_CMEDIA
tristate "CMedia audio chips"
- depends on HID
help
Support for CMedia CM6533 HID audio jack controls
and HS100B mute buttons.
@@ -288,14 +274,12 @@ config HID_CREATIVE_SB0540
config HID_CYPRESS
tristate "Cypress mouse and barcode readers"
- depends on HID
default !EXPERT
help
Support for cypress mouse and barcode readers.
config HID_DRAGONRISE
tristate "DragonRise Inc. game controller"
- depends on HID
help
Say Y here if you have DragonRise Inc. game controllers.
These might be branded as:
@@ -314,7 +298,6 @@ config DRAGONRISE_FF
config HID_EMS_FF
tristate "EMS Production Inc. force feedback support"
- depends on HID
select INPUT_FF_MEMLESS
help
Say Y here if you want to enable force feedback support for devices by
@@ -332,7 +315,6 @@ config HID_ELAN
config HID_ELECOM
tristate "ELECOM HID devices"
- depends on HID
help
Support for ELECOM devices:
- BM084 Bluetooth Mouse
@@ -349,7 +331,6 @@ config HID_ELO
config HID_EZKEY
tristate "Ezkey BTC 8193 keyboard"
- depends on HID
default !EXPERT
help
Support for Ezkey BTC 8193 keyboard.
@@ -367,19 +348,16 @@ config HID_FT260
config HID_GEMBIRD
tristate "Gembird Joypad"
- depends on HID
help
Support for Gembird JPD-DualForce 2.
config HID_GFRM
tristate "Google Fiber TV Box remote control support"
- depends on HID
help
Support for Google Fiber TV Box remote controls
config HID_GLORIOUS
tristate "Glorious PC Gaming Race mice"
- depends on HID
help
Support for Glorious PC Gaming Race mice such as
the Glorious Model O, O- and D.
@@ -424,7 +402,6 @@ config HID_VIVALDI
tristate "Vivaldi Keyboard"
select HID_VIVALDI_COMMON
select INPUT_VIVALDIFMAP
- depends on HID
help
Say Y here if you want to enable support for Vivaldi keyboards.
@@ -447,7 +424,6 @@ config HID_GT683R
config HID_KEYTOUCH
tristate "Keytouch HID devices"
- depends on HID
help
Support for Keytouch HID devices not fully compliant with
the specification. Currently supported:
@@ -455,7 +431,6 @@ config HID_KEYTOUCH
config HID_KYE
tristate "KYE/Genius devices"
- depends on HID
help
Support for KYE/Genius devices not fully compliant with HID standard:
- Ergo Mouse
@@ -471,32 +446,37 @@ config HID_UCLOGIC
config HID_WALTOP
tristate "Waltop"
- depends on HID
help
Support for Waltop tablets.
config HID_VIEWSONIC
tristate "ViewSonic/Signotec"
- depends on HID
help
Support for ViewSonic/Signotec PD1011 signature pad.
+config HID_VRC2
+ tristate "VRC-2 Car Controller"
+ depends on HID
+ help
+ Support for VRC-2 which is a 2-axis controller often used in
+ car simulators.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-vrc2.
+
config HID_XIAOMI
tristate "Xiaomi"
- depends on HID
help
Adds support for side buttons of Xiaomi Mi Dual Mode Wireless
Mouse Silent Edition.
config HID_GYRATION
tristate "Gyration remote control"
- depends on HID
help
Support for Gyration remote control.
config HID_ICADE
tristate "ION iCade arcade controller"
- depends on HID
help
Support for the ION iCade arcade controller to work as a joystick.
@@ -505,14 +485,12 @@ config HID_ICADE
config HID_ITE
tristate "ITE devices"
- depends on HID
default !EXPERT
help
Support for ITE devices not fully compliant with HID standard.
config HID_JABRA
tristate "Jabra USB HID Driver"
- depends on HID
help
Support for Jabra USB HID devices.
@@ -523,26 +501,22 @@ config HID_JABRA
config HID_TWINHAN
tristate "Twinhan IR remote control"
- depends on HID
help
Support for Twinhan IR remote control.
config HID_KENSINGTON
tristate "Kensington Slimblade Trackball"
- depends on HID
default !EXPERT
help
Support for Kensington Slimblade Trackball.
config HID_LCPOWER
tristate "LC-Power"
- depends on HID
help
Support for LC-Power RC1000MCE RF remote control.
config HID_LED
tristate "Simple RGB LED support"
- depends on HID
depends on LEDS_CLASS
help
Support for simple RGB LED devices. Currently supported are:
@@ -557,7 +531,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
- depends on HID
select NEW_LEDS
select LEDS_CLASS
help
@@ -675,7 +648,6 @@ config LOGIWHEELS_FF
config HID_MAGICMOUSE
tristate "Apple Magic Mouse/Trackpad multi-touch support"
- depends on HID
help
Support for the Apple Magic Mouse/Trackpad multi-touch.
@@ -684,14 +656,12 @@ config HID_MAGICMOUSE
config HID_MALTRON
tristate "Maltron L90 keyboard"
- depends on HID
help
Adds support for the volume up, volume down, mute, and play/pause buttons
of the Maltron L90 keyboard.
config HID_MAYFLASH
tristate "Mayflash game controller adapter force feedback"
- depends on HID
select INPUT_FF_MEMLESS
help
Say Y here if you have HJZ Mayflash PS3 game controller adapters
@@ -707,14 +677,12 @@ config HID_MEGAWORLD_FF
config HID_REDRAGON
tristate "Redragon keyboards"
- depends on HID
default !EXPERT
help
Support for Redragon keyboards that need fix-ups to work properly.
config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
- depends on HID
default !EXPERT
select INPUT_FF_MEMLESS
help
@@ -722,14 +690,12 @@ config HID_MICROSOFT
config HID_MONTEREY
tristate "Monterey Genius KB29E keyboard"
- depends on HID
default !EXPERT
help
Support for Monterey Genius KB29E.
config HID_MULTITOUCH
tristate "HID Multitouch panels"
- depends on HID
help
Generic support for HID multitouch panels.
@@ -775,7 +741,6 @@ config HID_MULTITOUCH
config HID_NINTENDO
tristate "Nintendo Joy-Con and Pro Controller support"
- depends on HID
depends on NEW_LEDS
depends on LEDS_CLASS
select POWER_SUPPLY
@@ -811,7 +776,6 @@ config HID_NTRIG
config HID_ORTEK
tristate "Ortek PKB-1700/WKB-2000/Skycable wireless keyboard and mouse trackpad"
- depends on HID
help
There are certain devices which have LogicalMaximum wrong in the keyboard
usage page of their report descriptor. The most prevailing ones so far
@@ -824,7 +788,6 @@ config HID_ORTEK
config HID_PANTHERLORD
tristate "Pantherlord/GreenAsia game controller"
- depends on HID
help
Say Y here if you have a PantherLord/GreenAsia based game controller
or adapter.
@@ -850,13 +813,11 @@ config HID_PENMOUNT
config HID_PETALYNX
tristate "Petalynx Maxter remote control"
- depends on HID
help
Support for Petalynx Maxter remote control.
config HID_PICOLCD
tristate "PicoLCD (graphic version)"
- depends on HID
help
This provides support for Minibox PicoLCD devices, currently
only the graphical ones are supported.
@@ -922,7 +883,6 @@ config HID_PICOLCD_CIR
config HID_PLANTRONICS
tristate "Plantronics USB HID Driver"
- depends on HID
help
Provides HID support for Plantronics USB audio devices.
Correctly maps vendor unique volume up/down HID usages to
@@ -933,7 +893,6 @@ config HID_PLANTRONICS
config HID_PLAYSTATION
tristate "PlayStation HID Driver"
- depends on HID
depends on LEDS_CLASS_MULTICOLOR
select CRC32
select POWER_SUPPLY
@@ -950,16 +909,23 @@ config PLAYSTATION_FF
Say Y here if you would like to enable force feedback support for
PlayStation game controllers.
+config HID_PXRC
+ tristate "PhoenixRC HID Flight Controller"
+ depends on HID
+ help
+ Support for PhoenixRC HID Flight Controller, a 8-axis flight controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-pxrc.
+
config HID_RAZER
tristate "Razer non-fully HID-compliant devices"
- depends on HID
help
Support for Razer devices that are not fully compliant with the
HID standard.
config HID_PRIMAX
tristate "Primax non-fully HID-compliant devices"
- depends on HID
help
Support for Primax devices that are not fully compliant with the
HID standard.
@@ -981,7 +947,6 @@ config HID_ROCCAT
config HID_SAITEK
tristate "Saitek (Mad Catz) non-fully HID-compliant devices"
- depends on HID
help
Support for Saitek devices that are not fully compliant with the
HID standard.
@@ -999,7 +964,6 @@ config HID_SAMSUNG
config HID_SEMITEK
tristate "Semitek USB keyboards"
- depends on HID
help
Support for Semitek USB keyboards that are not fully compliant
with the HID standard.
@@ -1050,13 +1014,11 @@ config SONY_FF
config HID_SPEEDLINK
tristate "Speedlink VAD Cezanne mouse support"
- depends on HID
help
Support for Speedlink Vicious and Divine Cezanne mouse.
config HID_STEAM
tristate "Steam Controller support"
- depends on HID
select POWER_SUPPLY
help
Say Y here if you have a Steam Controller if you want to use it
@@ -1065,19 +1027,16 @@ config HID_STEAM
config HID_STEELSERIES
tristate "Steelseries SRW-S1 steering wheel support"
- depends on HID
help
Support for Steelseries SRW-S1 steering wheel
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
- depends on HID
help
Support for Sunplus wireless desktop.
config HID_RMI
tristate "Synaptics RMI4 device support"
- depends on HID
select RMI4_CORE
select RMI4_F03
select RMI4_F11
@@ -1090,7 +1049,6 @@ config HID_RMI
config HID_GREENASIA
tristate "GreenAsia (Product ID 0x12) game controller support"
- depends on HID
help
Say Y here if you have a GreenAsia (Product ID 0x12) based game
controller or adapter.
@@ -1112,7 +1070,6 @@ config HID_HYPERV_MOUSE
config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
- depends on HID
help
Support for SmartJoy PLUS PS2/USB adapter, Super Dual Box,
Super Joy Box 3 Pro, Super Dual Box Pro, and Super Joy Box 5 Pro.
@@ -1130,20 +1087,23 @@ config SMARTJOYPLUS_FF
config HID_TIVO
tristate "TiVo Slide Bluetooth remote control support"
- depends on HID
help
Say Y if you have a TiVo Slide Bluetooth remote control.
config HID_TOPSEED
tristate "TopSeed Cyberlink, BTC Emprex, Conceptronic remote control support"
- depends on HID
help
Say Y if you have a TopSeed Cyberlink or BTC Emprex or Conceptronic
CLLRCMCE remote control.
+config HID_TOPRE
+ tristate "Topre REALFORCE keyboards"
+ depends on HID
+ help
+ Say Y for N-key rollover support on Topre REALFORCE R2 108 key keyboards.
+
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
- depends on HID
depends on LEDS_CLASS
select HID_LED
help
@@ -1170,7 +1130,6 @@ config THRUSTMASTER_FF
config HID_UDRAW_PS3
tristate "THQ PS3 uDraw tablet"
- depends on HID
help
Say Y here if you want to use the THQ uDraw gaming tablet for
the PS3.
@@ -1207,7 +1166,6 @@ config HID_WACOM
config HID_WIIMOTE
tristate "Nintendo Wii / Wii U peripherals"
- depends on HID
depends on LEDS_CLASS
select POWER_SUPPLY
select INPUT_FF_MEMLESS
@@ -1232,7 +1190,6 @@ config HID_WIIMOTE
config HID_XINMO
tristate "Xin-Mo non-fully compliant devices"
- depends on HID
help
Support for Xin-Mo devices that are not fully compliant with the HID
standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
@@ -1240,7 +1197,6 @@ config HID_XINMO
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
- depends on HID
help
Say Y here if you have a Zeroplus based game controller.
@@ -1254,13 +1210,12 @@ config ZEROPLUS_FF
config HID_ZYDACRON
tristate "Zydacron remote control support"
- depends on HID
help
Support for Zydacron remote control.
config HID_SENSOR_HUB
tristate "HID Sensors framework support"
- depends on HID && HAS_IOMEM
+ depends on HAS_IOMEM
select MFD_CORE
default n
help
@@ -1289,7 +1244,6 @@ config HID_SENSOR_CUSTOM_SENSOR
config HID_ALPS
tristate "Alps HID device support"
- depends on HID
help
Support for Alps I2C HID touchpads and StickPointer.
Say Y here if you have a Alps touchpads over i2c-hid or usbhid
@@ -1307,7 +1261,7 @@ config HID_MCP2221
will be called hid-mcp2221.ko.
config HID_KUNIT_TEST
- bool "KUnit tests for HID" if !KUNIT_ALL_TESTS
+ tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS
depends on KUNIT=y
depends on HID_UCLOGIC
default KUNIT_ALL_TESTS
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index b0bef8098139..e8014c1a2f8b 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -101,6 +101,7 @@ hid-picolcd-$(CONFIG_DEBUG_FS) += hid-picolcd_debugfs.o
obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
obj-$(CONFIG_HID_PLAYSTATION) += hid-playstation.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
+obj-$(CONFIG_HID_PXRC) += hid-pxrc.o
obj-$(CONFIG_HID_RAZER) += hid-razer.o
obj-$(CONFIG_HID_REDRAGON) += hid-redragon.o
obj-$(CONFIG_HID_RETRODE) += hid-retrode.o
@@ -123,6 +124,7 @@ obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o hid-thrustmaster.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
+obj-$(CONFIG_HID_TOPRE) += hid-topre.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_U2FZERO) += hid-u2fzero.o
hid-uclogic-objs := hid-uclogic-core.o \
@@ -136,6 +138,7 @@ obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_VIEWSONIC) += hid-viewsonic.o
+obj-$(CONFIG_HID_VRC2) += hid-vrc2.o
wacom-objs := wacom_wac.o wacom_sys.o
obj-$(CONFIG_HID_WACOM) += wacom.o
@@ -144,8 +147,10 @@ obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
obj-$(CONFIG_HID_SENSOR_CUSTOM_SENSOR) += hid-sensor-custom.o
-obj-$(CONFIG_HID_KUNIT_TEST) += hid-uclogic-rdesc.o \
+hid-uclogic-test-objs := hid-uclogic-rdesc.o \
+ hid-uclogic-params.o \
hid-uclogic-rdesc-test.o
+obj-$(CONFIG_HID_KUNIT_TEST) += hid-uclogic-test.o
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 4b90c86ee5f8..47774b9ab3de 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -288,11 +288,29 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static const struct dmi_system_id dmi_nodevs[] = {
+ {
+ /*
+ * Google Chromebooks use Chrome OS Embedded Controller Sensor
+ * Hub instead of Sensor Hub Fusion and leaves MP2
+ * uninitialized, which disables all functionalities, even
+ * including the registers necessary for feature detections.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ },
+ },
+ { }
+};
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
int rc;
+ if (dmi_first_match(dmi_nodevs))
+ return -ENODEV;
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 70436f9fad2f..4da2f9f62aba 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -110,6 +110,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
amd_sfh1_1_set_desc_ops(mp2_ops);
cl_data->num_hid_devices = amd_sfh_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+ if (cl_data->num_hid_devices == 0)
+ return -ENODEV;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -286,13 +288,13 @@ int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
phy_base <<= 21;
if (!devm_request_mem_region(dev, phy_base, 128 * 1024, "amd_sfh")) {
- dev_err(dev, "can't reserve mmio registers\n");
+ dev_dbg(dev, "can't reserve mmio registers\n");
return -ENOMEM;
}
mp2->vsbase = devm_ioremap(dev, phy_base, 128 * 1024);
if (!mp2->vsbase) {
- dev_err(dev, "failed to remap vsbase\n");
+ dev_dbg(dev, "failed to remap vsbase\n");
return -ENOMEM;
}
@@ -301,7 +303,7 @@ int amd_sfh1_1_init(struct amd_mp2_dev *mp2)
memcpy_fromio(&binfo, mp2->vsbase, sizeof(struct sfh_base_info));
if (binfo.sbase.fw_info.fw_ver == 0 || binfo.sbase.s_list.sl.sensors == 0) {
- dev_err(dev, "failed to get sensors\n");
+ dev_dbg(dev, "failed to get sensors\n");
return -EOPNOTSUPP;
}
dev_dbg(dev, "firmware version 0x%x\n", binfo.sbase.fw_info.fw_ver);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 08c9a9a60ae4..b59c3dafa6a4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1212,6 +1212,13 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc = new_rdesc;
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD &&
+ *rsize == 331 && rdesc[190] == 0x85 && rdesc[191] == 0x5a &&
+ rdesc[204] == 0x95 && rdesc[205] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-KEY keyb report descriptor\n");
+ rdesc[205] = 0x01;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b7f5566e338d..9c1d31f63f85 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle
*/
struct hid_report *hid_register_report(struct hid_device *device,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int application)
{
struct hid_report_enum *report_enum = device->report_enum + type;
@@ -967,7 +967,7 @@ static const char * const hid_report_names[] = {
* parsing.
*/
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts)
{
@@ -1921,7 +1921,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
* DO NOT USE in hid drivers directly, but through hid_hw_request instead.
*/
int __hid_request(struct hid_device *hid, struct hid_report *report,
- int reqtype)
+ enum hid_class_request reqtype)
{
char *buf;
int ret;
@@ -1954,8 +1954,8 @@ out:
}
EXPORT_SYMBOL_GPL(__hid_request);
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
- int interrupt)
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt)
{
struct hid_report_enum *report_enum = hid->report_enum + type;
struct hid_report *report;
@@ -2019,7 +2019,8 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
*
* This is data entry for lower layers.
*/
-int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt)
{
struct hid_report_enum *report_enum;
struct hid_driver *hdrv;
@@ -2088,6 +2089,7 @@ const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
return NULL;
}
+EXPORT_SYMBOL_GPL(hid_match_id);
static const struct hid_device_id hid_hiddev_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
@@ -2352,7 +2354,7 @@ EXPORT_SYMBOL_GPL(hid_hw_close);
* @reqtype: hid request type
*/
void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
+ struct hid_report *report, enum hid_class_request reqtype)
{
if (hdev->ll_driver->request)
return hdev->ll_driver->request(hdev, report, reqtype);
@@ -2377,7 +2379,7 @@ EXPORT_SYMBOL_GPL(hid_hw_request);
*/
int hid_hw_raw_request(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+ size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
{
if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
return -EINVAL;
@@ -2739,10 +2741,12 @@ int hid_add_device(struct hid_device *hdev)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
}
+ hdev->id = atomic_inc_return(&id);
+
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
- hdev->vendor, hdev->product, atomic_inc_return(&id));
+ hdev->vendor, hdev->product, hdev->id);
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index ff40f1e55c21..7ae5f27df54d 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -608,9 +608,11 @@ static struct hid_driver hammer_driver = {
.probe = hammer_probe,
.remove = hammer_remove,
.feature_mapping = vivaldi_feature_mapping,
- .input_configured = vivaldi_input_configured,
.input_mapping = hammer_input_mapping,
.event = hammer_event,
+ .driver = {
+ .dev_groups = vivaldi_attribute_groups,
+ },
};
static int __init hammer_init(void)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0fb720a96399..da86565f04d4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -185,6 +185,8 @@
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -414,6 +416,7 @@
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
+#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -1228,6 +1231,9 @@
#define USB_DEVICE_ID_TIVO_SLIDE 0x1201
#define USB_DEVICE_ID_TIVO_SLIDE_PRO 0x1203
+#define USB_VENDOR_ID_TOPRE 0x0853
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
+
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
@@ -1276,10 +1282,12 @@
#define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d
#define USB_VENDOR_ID_UGEE 0x28bd
+#define USB_DEVICE_ID_UGEE_PARBLO_A610_PRO 0x1903
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540 0x0075
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640 0x0094
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S 0x0909
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
#define USB_DEVICE_ID_UGEE_TABLET_G5 0x0074
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
@@ -1383,6 +1391,7 @@
#define USB_VENDOR_ID_MULTIPLE_1781 0x1781
#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a9d
+#define USB_DEVICE_ID_PHOENIXRC 0x0898
#define USB_VENDOR_ID_DRACAL_RAPHNET 0x289b
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 48c1c02c69f4..859aeb07542e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -383,6 +383,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1532,7 +1534,10 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
* assume ours
*/
if (!report->tool)
- hid_report_set_tool(report, input, usage->code);
+ report->tool = usage->code;
+
+ /* drivers may have changed the value behind our back, resend it */
+ hid_report_set_tool(report, input, report->tool);
} else {
hid_report_release_tool(report, input, usage->code);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 68f9e9d207f4..71a9c258a20b 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -41,6 +41,9 @@ module_param(disable_tap_to_click, bool, 0644);
MODULE_PARM_DESC(disable_tap_to_click,
"Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently).");
+/* Define a non-zero software ID to identify our own requests */
+#define LINUX_KERNEL_SW_ID 0x01
+
#define REPORT_ID_HIDPP_SHORT 0x10
#define REPORT_ID_HIDPP_LONG 0x11
#define REPORT_ID_HIDPP_VERY_LONG 0x12
@@ -71,21 +74,18 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
-#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
-#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
-#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
-#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
-#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(26)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(27)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(28)
/* These are just aliases for now */
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
-#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
- HIDPP_QUIRK_HI_RES_SCROLL_X2121)
+#define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
+ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -96,6 +96,9 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4)
#define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5)
#define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7)
+#define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8)
+#define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9)
#define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -343,7 +346,7 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
else
message->report_id = REPORT_ID_HIDPP_LONG;
message->fap.feature_index = feat_index;
- message->fap.funcindex_clientid = funcindex_clientid;
+ message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID;
memcpy(&message->fap.params, params, param_count);
ret = hidpp_send_message_sync(hidpp, message, response);
@@ -856,8 +859,8 @@ static int hidpp_unifying_init(struct hidpp_device *hidpp)
#define HIDPP_PAGE_ROOT 0x0000
#define HIDPP_PAGE_ROOT_IDX 0x00
-#define CMD_ROOT_GET_FEATURE 0x01
-#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11
+#define CMD_ROOT_GET_FEATURE 0x00
+#define CMD_ROOT_GET_PROTOCOL_VERSION 0x10
static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
u8 *feature_index, u8 *feature_type)
@@ -934,9 +937,9 @@ print_version:
#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005
-#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01
-#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11
-#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21
+#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x00
+#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x10
+#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x20
static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp,
u8 feature_index, u8 *nameLength)
@@ -1966,8 +1969,8 @@ static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp,
#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100
-#define CMD_TOUCHPAD_GET_RAW_INFO 0x01
-#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21
+#define CMD_TOUCHPAD_GET_RAW_INFO 0x00
+#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x20
#define EVENT_TOUCHPAD_RAW_XY 0x00
@@ -3415,14 +3418,14 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
int ret;
u8 multiplier = 1;
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) {
ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
if (ret == 0)
ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
- } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL) {
ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
&multiplier);
- } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ {
+ } else /* if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL) */ {
ret = hidpp10_enable_scrolling_acceleration(hidpp);
multiplier = 8;
}
@@ -3437,6 +3440,49 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
return 0;
}
+static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
+{
+ int ret;
+ unsigned long capabilities;
+
+ capabilities = hidpp->capabilities;
+
+ if (hidpp->protocol_major >= 2) {
+ u8 feature_index;
+ u8 feature_type;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n");
+ return 0;
+ }
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index, &feature_type);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
+ }
+ } else {
+ struct hidpp_report response;
+
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ HIDPP_ENABLE_FAST_SCROLL,
+ NULL, 0, &response);
+ if (!ret) {
+ hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
+ hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
+ }
+ }
+
+ if (hidpp->capabilities == capabilities)
+ hid_dbg(hidpp->hid_dev, "Did not detect HID++ hi-res scrolling hardware support\n");
+ return 0;
+}
+
/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -3691,8 +3737,9 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
- if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || hidpp->input == NULL || counter->wheel_multiplier == 0)
+ if (!(hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
+ || value == 0 || hidpp->input == NULL
+ || counter->wheel_multiplier == 0)
return 0;
hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
@@ -3924,6 +3971,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -3943,7 +3991,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
hi_res_scroll_enable(hidpp);
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
@@ -3959,8 +4007,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
- if (ret)
+ if (ret) {
input_free_device(input);
+ return;
+ }
hidpp->delayed_input = input;
}
@@ -4219,6 +4269,21 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+static const struct hid_device_id unhandled_hidpp_devices[] = {
+ /* Logitech Harmony Adapter for PS3, handled in hid-sony */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ /* Handled in hid-generic */
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD) },
+ {}
+};
+
+static bool hidpp_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ /* Refuse to handle devices handled by other HID drivers */
+ return !hid_match_id(hdev, unhandled_hidpp_devices);
+}
+
#define LDJ_DEVICE(product) \
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
@@ -4239,42 +4304,9 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
- { /* Mouse Logitech Anywhere MX */
- LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech Cube */
- LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M335 */
- LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M515 */
- LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
LDJ_DEVICE(0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
- | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
- { /* Mouse Logitech M705 (firmware RQM17) */
- LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
- { /* Mouse Logitech M705 (firmware RQM67) */
- LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech M720 */
- LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2 */
- LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4072), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Anywhere 2S */
- LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master */
- LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 2S */
- LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech MX Master 3 */
- LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* Mouse Logitech Performance MX */
- LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
{ /* Keyboard logitech K400 */
LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
@@ -4335,18 +4367,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
- { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
- { /* MX Master mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Ergo trackball over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
- { /* MX Master 3 mouse over Bluetooth */
- HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023),
- .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+
+ { /* And try to enable HID++ for all the Logitech Bluetooth devices */
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_ANY, USB_VENDOR_ID_LOGITECH, HID_ANY_ID) },
{}
};
@@ -4360,6 +4383,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .match = hidpp_match,
.report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 2e72922e36f5..91a4d3fc30e0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1186,7 +1186,7 @@ static void mt_touch_report(struct hid_device *hid,
int contact_count = -1;
/* sticky fingers release in progress, abort */
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
scantime = *app->scantime;
@@ -1267,7 +1267,7 @@ static void mt_touch_report(struct hid_device *hid,
del_timer(&td->release_timer);
}
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_touch_input_configured(struct hid_device *hdev,
@@ -1699,11 +1699,11 @@ static void mt_expired_timeout(struct timer_list *t)
* An input report came in just before we release the sticky fingers,
* it will take care of the sticky fingers.
*/
- if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
+ if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
mt_release_contacts(hdev);
- clear_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
+ clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 92ac4f605f13..5bfc0c450460 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -760,12 +760,31 @@ static int joycon_read_stick_calibration(struct joycon_ctlr *ctlr, u16 cal_addr,
cal_y->max = cal_y->center + y_max_above;
cal_y->min = cal_y->center - y_min_below;
- return 0;
+ /* check if calibration values are plausible */
+ if (cal_x->min >= cal_x->center || cal_x->center >= cal_x->max ||
+ cal_y->min >= cal_y->center || cal_y->center >= cal_y->max)
+ ret = -EINVAL;
+
+ return ret;
}
static const u16 DFLT_STICK_CAL_CEN = 2000;
static const u16 DFLT_STICK_CAL_MAX = 3500;
static const u16 DFLT_STICK_CAL_MIN = 500;
+static void joycon_use_default_calibration(struct hid_device *hdev,
+ struct joycon_stick_cal *cal_x,
+ struct joycon_stick_cal *cal_y,
+ const char *stick, int ret)
+{
+ hid_warn(hdev,
+ "Failed to read %s stick cal, using defaults; e=%d\n",
+ stick, ret);
+
+ cal_x->center = cal_y->center = DFLT_STICK_CAL_CEN;
+ cal_x->max = cal_y->max = DFLT_STICK_CAL_MAX;
+ cal_x->min = cal_y->min = DFLT_STICK_CAL_MIN;
+}
+
static int joycon_request_calibration(struct joycon_ctlr *ctlr)
{
u16 left_stick_addr = JC_CAL_FCT_DATA_LEFT_ADDR;
@@ -793,38 +812,24 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
&ctlr->left_stick_cal_x,
&ctlr->left_stick_cal_y,
true);
- if (ret) {
- hid_warn(ctlr->hdev,
- "Failed to read left stick cal, using dflts; e=%d\n",
- ret);
-
- ctlr->left_stick_cal_x.center = DFLT_STICK_CAL_CEN;
- ctlr->left_stick_cal_x.max = DFLT_STICK_CAL_MAX;
- ctlr->left_stick_cal_x.min = DFLT_STICK_CAL_MIN;
- ctlr->left_stick_cal_y.center = DFLT_STICK_CAL_CEN;
- ctlr->left_stick_cal_y.max = DFLT_STICK_CAL_MAX;
- ctlr->left_stick_cal_y.min = DFLT_STICK_CAL_MIN;
- }
+ if (ret)
+ joycon_use_default_calibration(ctlr->hdev,
+ &ctlr->left_stick_cal_x,
+ &ctlr->left_stick_cal_y,
+ "left", ret);
/* read the right stick calibration data */
ret = joycon_read_stick_calibration(ctlr, right_stick_addr,
&ctlr->right_stick_cal_x,
&ctlr->right_stick_cal_y,
false);
- if (ret) {
- hid_warn(ctlr->hdev,
- "Failed to read right stick cal, using dflts; e=%d\n",
- ret);
-
- ctlr->right_stick_cal_x.center = DFLT_STICK_CAL_CEN;
- ctlr->right_stick_cal_x.max = DFLT_STICK_CAL_MAX;
- ctlr->right_stick_cal_x.min = DFLT_STICK_CAL_MIN;
- ctlr->right_stick_cal_y.center = DFLT_STICK_CAL_CEN;
- ctlr->right_stick_cal_y.max = DFLT_STICK_CAL_MAX;
- ctlr->right_stick_cal_y.min = DFLT_STICK_CAL_MIN;
- }
+ if (ret)
+ joycon_use_default_calibration(ctlr->hdev,
+ &ctlr->right_stick_cal_x,
+ &ctlr->right_stick_cal_y,
+ "right", ret);
hid_dbg(ctlr->hdev, "calibration:\n"
"l_x_c=%d l_x_max=%d l_x_min=%d\n"
@@ -1221,6 +1226,7 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
spin_lock_irqsave(&ctlr->lock, flags);
if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
+ ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED &&
(msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS &&
(ctlr->rumble_queue_head != ctlr->rumble_queue_tail ||
ctlr->rumble_zero_countdown > 0)) {
@@ -1545,12 +1551,13 @@ static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l,
ctlr->rumble_queue_head = 0;
memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data,
JC_RUMBLE_DATA_SIZE);
- spin_unlock_irqrestore(&ctlr->lock, flags);
/* don't wait for the periodic send (reduces latency) */
- if (schedule_now)
+ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED)
queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
return 0;
}
@@ -1902,9 +1909,8 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
/* Set the home LED to 0 as default state */
ret = joycon_home_led_brightness_set(led, 0);
if (ret) {
- hid_err(hdev, "Failed to set home LED dflt; ret=%d\n",
- ret);
- return ret;
+ hid_warn(hdev, "Failed to set home LED default, unregistering home LED");
+ devm_led_classdev_unregister(&hdev->dev, led);
}
}
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index b1b5721b5d8f..40050eb85c0a 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -692,15 +692,12 @@ static ssize_t hardware_version_show(struct device *dev,
static DEVICE_ATTR_RO(hardware_version);
-static struct attribute *ps_device_attributes[] = {
+static struct attribute *ps_device_attrs[] = {
&dev_attr_firmware_version.attr,
&dev_attr_hardware_version.attr,
NULL
};
-
-static const struct attribute_group ps_device_attribute_group = {
- .attrs = ps_device_attributes,
-};
+ATTRIBUTE_GROUPS(ps_device);
static int dualsense_get_calibration_data(struct dualsense *ds)
{
@@ -1448,12 +1445,6 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
}
- ret = devm_device_add_group(&hdev->dev, &ps_device_attribute_group);
- if (ret) {
- hid_err(hdev, "Failed to register sysfs nodes.\n");
- goto err_close;
- }
-
return ret;
err_close:
@@ -1487,6 +1478,9 @@ static struct hid_driver ps_driver = {
.probe = ps_probe,
.remove = ps_remove,
.raw_event = ps_raw_event,
+ .driver = {
+ .dev_groups = ps_device_groups,
+ },
};
static int __init ps_init(void)
diff --git a/drivers/hid/hid-pxrc.c b/drivers/hid/hid-pxrc.c
new file mode 100644
index 000000000000..b0e517f9cde7
--- /dev/null
+++ b/drivers/hid/hid-pxrc.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for PhoenixRC 8-axis flight controller
+ *
+ * Copyright (C) 2022 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct pxrc_priv {
+ u8 slider;
+ u8 dial;
+ bool alternate;
+};
+
+static __u8 pxrc_rdesc_fixed[] = {
+ 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ 0x09, 0x04, // Usage (Joystick)
+ 0xA1, 0x01, // Collection (Application)
+ 0x09, 0x01, // Usage (Pointer)
+ 0xA1, 0x00, // Collection (Physical)
+ 0x09, 0x30, // Usage (X)
+ 0x09, 0x36, // Usage (Slider)
+ 0x09, 0x31, // Usage (Y)
+ 0x09, 0x32, // Usage (Z)
+ 0x09, 0x33, // Usage (Rx)
+ 0x09, 0x34, // Usage (Ry)
+ 0x09, 0x35, // Usage (Rz)
+ 0x09, 0x37, // Usage (Dial)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xFF, 0x00, // Logical Maximum (255)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0xFF, 0x00, // Physical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x08, // Report Count (8)
+ 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ 0xC0, // End Collection
+ 0xC0, // End Collection
+};
+
+static __u8 *pxrc_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ hid_info(hdev, "fixing up PXRC report descriptor\n");
+ *rsize = sizeof(pxrc_rdesc_fixed);
+ return pxrc_rdesc_fixed;
+}
+
+static int pxrc_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size)
+{
+ struct pxrc_priv *priv = hid_get_drvdata(hdev);
+
+ if (priv->alternate)
+ priv->slider = data[7];
+ else
+ priv->dial = data[7];
+
+ data[1] = priv->slider;
+ data[7] = priv->dial;
+
+ priv->alternate = !priv->alternate;
+ return 0;
+}
+
+static int pxrc_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct pxrc_priv *priv;
+
+ priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, priv);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id pxrc_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_PHOENIXRC) },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(hid, pxrc_devices);
+
+static struct hid_driver pxrc_driver = {
+ .name = "hid-pxrc",
+ .id_table = pxrc_devices,
+ .report_fixup = pxrc_report_fixup,
+ .probe = pxrc_probe,
+ .raw_event = pxrc_raw_event,
+};
+module_hid_driver(pxrc_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("HID driver for PXRC 8-axis flight controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index dc67717d2dab..70f602c64fd1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -314,6 +314,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 311eee599ce9..bb1f423f4ace 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -237,8 +237,7 @@ static int rmi_hid_read_block(struct rmi_transport_dev *xport, u16 addr,
read_input_count = data->readReport[1];
memcpy(buf + bytes_read, &data->readReport[2],
- read_input_count < bytes_needed ?
- read_input_count : bytes_needed);
+ min(read_input_count, bytes_needed));
bytes_read += read_input_count;
bytes_needed -= read_input_count;
@@ -347,8 +346,7 @@ static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size)
return 0;
}
- memcpy(hdata->readReport, data, size < hdata->input_report_size ?
- size : hdata->input_report_size);
+ memcpy(hdata->readReport, data, min((u32)size, hdata->input_report_size));
set_bit(RMI_READ_DATA_PENDING, &hdata->flags);
wake_up(&hdata->wait);
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 26373b82fe81..6da80e442fdd 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -257,6 +257,8 @@ int roccat_report_event(int minor, u8 const *data)
if (!new_value)
return -ENOMEM;
+ mutex_lock(&device->cbuf_lock);
+
report = &device->cbuf[device->cbuf_end];
/* passing NULL is safe */
@@ -276,6 +278,8 @@ int roccat_report_event(int minor, u8 const *data)
reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
}
+ mutex_unlock(&device->cbuf_lock);
+
wake_up_interruptible(&device->wait);
return 0;
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 60ec2b29d54d..03691cdcfb8e 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -368,7 +368,7 @@ static const unsigned int buzz_keymap[] = {
};
/* The Navigation controller is a partial DS3 and uses the same HID report
- * and hence the same keymap indices, however not not all axes/buttons
+ * and hence the same keymap indices, however not all axes/buttons
* are physically present. We use the same axis and button mapping as
* the DS3, which uses the Linux gamepad spec.
*/
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b29bd7..8ee43cb225fc 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -246,7 +256,7 @@ static int steam_get_serial(struct steam_device *steam)
if (reply[0] != 0xae || reply[1] != 0x15 || reply[2] != 0x01)
return -EIO;
reply[3 + STEAM_SERIAL_LEN] = 0;
- strlcpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
+ strscpy(steam->serial_no, reply + 3, sizeof(steam->serial_no));
return 0;
}
@@ -514,7 +524,7 @@ static int steam_register(struct steam_device *steam)
*/
mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
- strlcpy(steam->serial_no, "XXXXXXXXXX",
+ strscpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
mutex_unlock(&steam->mutex);
@@ -689,9 +699,9 @@ static struct hid_device *steam_create_client_hid(struct hid_device *hdev)
client_hdev->version = hdev->version;
client_hdev->type = hdev->type;
client_hdev->country = hdev->country;
- strlcpy(client_hdev->name, hdev->name,
+ strscpy(client_hdev->name, hdev->name,
sizeof(client_hdev->name));
- strlcpy(client_hdev->phys, hdev->phys,
+ strscpy(client_hdev->phys, hdev->phys,
sizeof(client_hdev->phys));
/*
* Since we use the same device info than the real interface to
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index c3e6d69fdfbd..cf1679b0d4fb 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -67,12 +67,13 @@ static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
{0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
+ {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
};
-static const uint8_t tm_wheels_infos_length = 4;
+static const uint8_t tm_wheels_infos_length = 7;
/*
* This structs contains (in little endian) the response data
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
new file mode 100644
index 000000000000..88a91cdad5f8
--- /dev/null
+++ b/drivers/hid/hid-topre.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for Topre REALFORCE Keyboards
+ *
+ * Copyright (c) 2022 Harry Stern <harry@harrystern.net>
+ *
+ * Based on the hid-macally driver
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Harry Stern <harry@harrystern.net>");
+MODULE_DESCRIPTION("REALFORCE R2 Keyboard driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Fix the REALFORCE R2's non-boot interface's report descriptor to match the
+ * events it's actually sending. It claims to send array events but is instead
+ * sending variable events.
+ */
+static __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 119 && rdesc[69] == 0x29 && rdesc[70] == 0xe7 &&
+ rdesc[71] == 0x81 && rdesc[72] == 0x00) {
+ hid_info(hdev,
+ "fixing up Topre REALFORCE keyboard report descriptor\n");
+ rdesc[72] = 0x02;
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id topre_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, topre_id_table);
+
+static struct hid_driver topre_driver = {
+ .name = "topre",
+ .id_table = topre_id_table,
+ .report_fixup = topre_report_fixup,
+};
+
+module_hid_driver(topre_driver);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 47a17375c7fc..0fbc408c2607 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -153,6 +153,7 @@ static int uclogic_input_configured(struct hid_device *hdev,
suffix = "Pad";
break;
case HID_DG_PEN:
+ case HID_DG_DIGITIZER:
suffix = "Pen";
break;
case HID_CP_CONSUMER_CONTROL:
@@ -510,6 +511,8 @@ static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER,
USB_DEVICE_ID_UGTIZER_TABLET_GT5040) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_PARBLO_A610_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_G5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_TABLET_EX07S) },
@@ -524,6 +527,8 @@ static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
{ }
};
diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
new file mode 100644
index 000000000000..57ef5d3e4b74
--- /dev/null
+++ b/drivers/hid/hid-uclogic-params-test.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * HID driver for UC-Logic devices not fully compliant with HID standard
+ *
+ * Copyright (c) 2022 José Expósito <jose.exposito89@gmail.com>
+ */
+
+#include <kunit/test.h>
+#include "./hid-uclogic-params.h"
+#include "./hid-uclogic-rdesc.h"
+
+#define MAX_STR_DESC_SIZE 14
+
+struct uclogic_parse_ugee_v2_desc_case {
+ const char *name;
+ int res;
+ const __u8 str_desc[MAX_STR_DESC_SIZE];
+ size_t str_desc_size;
+ const s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
+ enum uclogic_params_frame_type frame_type;
+};
+
+static struct uclogic_parse_ugee_v2_desc_case uclogic_parse_ugee_v2_desc_cases[] = {
+ {
+ .name = "invalid_str_desc",
+ .res = -EINVAL,
+ .str_desc = {},
+ .str_desc_size = 0,
+ .desc_params = {},
+ .frame_type = UCLOGIC_PARAMS_FRAME_BUTTONS,
+ },
+ {
+ .name = "resolution_with_value_0",
+ .res = 0,
+ .str_desc = {
+ 0x0E, 0x03,
+ 0x70, 0xB2,
+ 0x10, 0x77,
+ 0x08,
+ 0x00,
+ 0xFF, 0x1F,
+ 0x00, 0x00,
+ },
+ .str_desc_size = 12,
+ .desc_params = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xB270,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = 0x7710,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0,
+ [UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = 0x1FFF,
+ [UCLOGIC_RDESC_FRAME_PH_ID_UM] = 0x08,
+ },
+ .frame_type = UCLOGIC_PARAMS_FRAME_BUTTONS,
+ },
+ /* XP-PEN Deco L str_desc: Frame with 8 buttons */
+ {
+ .name = "frame_type_buttons",
+ .res = 0,
+ .str_desc = {
+ 0x0E, 0x03,
+ 0x70, 0xB2,
+ 0x10, 0x77,
+ 0x08,
+ 0x00,
+ 0xFF, 0x1F,
+ 0xD8, 0x13,
+ },
+ .str_desc_size = 12,
+ .desc_params = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xB270,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0x2320,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = 0x7710,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0x1770,
+ [UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = 0x1FFF,
+ [UCLOGIC_RDESC_FRAME_PH_ID_UM] = 0x08,
+ },
+ .frame_type = UCLOGIC_PARAMS_FRAME_BUTTONS,
+ },
+ /* PARBLO A610 PRO str_desc: Frame with 9 buttons and dial */
+ {
+ .name = "frame_type_dial",
+ .res = 0,
+ .str_desc = {
+ 0x0E, 0x03,
+ 0x96, 0xC7,
+ 0xF9, 0x7C,
+ 0x09,
+ 0x01,
+ 0xFF, 0x1F,
+ 0xD8, 0x13,
+ },
+ .str_desc_size = 12,
+ .desc_params = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xC796,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0x2749,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = 0x7CF9,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0x1899,
+ [UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = 0x1FFF,
+ [UCLOGIC_RDESC_FRAME_PH_ID_UM] = 0x09,
+ },
+ .frame_type = UCLOGIC_PARAMS_FRAME_DIAL,
+ },
+ /* XP-PEN Deco Pro S str_desc: Frame with 8 buttons and mouse */
+ {
+ .name = "frame_type_mouse",
+ .res = 0,
+ .str_desc = {
+ 0x0E, 0x03,
+ 0xC8, 0xB3,
+ 0x34, 0x65,
+ 0x08,
+ 0x02,
+ 0xFF, 0x1F,
+ 0xD8, 0x13,
+ },
+ .str_desc_size = 12,
+ .desc_params = {
+ [UCLOGIC_RDESC_PEN_PH_ID_X_LM] = 0xB3C8,
+ [UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0x2363,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = 0x6534,
+ [UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0x13EC,
+ [UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = 0x1FFF,
+ [UCLOGIC_RDESC_FRAME_PH_ID_UM] = 0x08,
+ },
+ .frame_type = UCLOGIC_PARAMS_FRAME_MOUSE,
+ },
+};
+
+static void uclogic_parse_ugee_v2_desc_case_desc(struct uclogic_parse_ugee_v2_desc_case *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(uclogic_parse_ugee_v2_desc, uclogic_parse_ugee_v2_desc_cases,
+ uclogic_parse_ugee_v2_desc_case_desc);
+
+static void uclogic_parse_ugee_v2_desc_test(struct kunit *test)
+{
+ int res;
+ s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
+ enum uclogic_params_frame_type frame_type;
+ const struct uclogic_parse_ugee_v2_desc_case *params = test->param_value;
+
+ res = uclogic_params_parse_ugee_v2_desc(params->str_desc,
+ params->str_desc_size,
+ desc_params,
+ ARRAY_SIZE(desc_params),
+ &frame_type);
+ KUNIT_ASSERT_EQ(test, res, params->res);
+
+ if (res)
+ return;
+
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM],
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM]);
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM],
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM]);
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM],
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM]);
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM],
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM]);
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM],
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM]);
+ KUNIT_EXPECT_EQ(test,
+ params->desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM],
+ desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM]);
+ KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
+}
+
+static struct kunit_case hid_uclogic_params_test_cases[] = {
+ KUNIT_CASE_PARAM(uclogic_parse_ugee_v2_desc_test,
+ uclogic_parse_ugee_v2_desc_gen_params),
+ {}
+};
+
+static struct kunit_suite hid_uclogic_params_test_suite = {
+ .name = "hid_uclogic_params_test",
+ .test_cases = hid_uclogic_params_test_cases,
+};
+
+kunit_test_suite(hid_uclogic_params_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the UC-Logic driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index c11fa239e6a2..34fa991e6267 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -1057,6 +1057,161 @@ cleanup:
}
/**
+ * uclogic_params_parse_ugee_v2_desc - parse the string descriptor containing
+ * pen and frame parameters returned by UGEE v2 devices.
+ *
+ * @str_desc: String descriptor, cannot be NULL.
+ * @str_desc_size: Size of the string descriptor.
+ * @desc_params: Output description params list.
+ * @desc_params_size: Size of the output description params list.
+ * @frame_type: Output frame type.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_parse_ugee_v2_desc(const __u8 *str_desc,
+ size_t str_desc_size,
+ s32 *desc_params,
+ size_t desc_params_size,
+ enum uclogic_params_frame_type *frame_type)
+{
+ s32 pen_x_lm, pen_y_lm;
+ s32 pen_x_pm, pen_y_pm;
+ s32 pen_pressure_lm;
+ s32 frame_num_buttons;
+ s32 resolution;
+
+ /* Minimum descriptor length required, maximum seen so far is 14 */
+ const int min_str_desc_size = 12;
+
+ if (!str_desc || str_desc_size < min_str_desc_size)
+ return -EINVAL;
+
+ if (desc_params_size != UCLOGIC_RDESC_PH_ID_NUM)
+ return -EINVAL;
+
+ pen_x_lm = get_unaligned_le16(str_desc + 2);
+ pen_y_lm = get_unaligned_le16(str_desc + 4);
+ frame_num_buttons = str_desc[6];
+ *frame_type = str_desc[7];
+ pen_pressure_lm = get_unaligned_le16(str_desc + 8);
+
+ resolution = get_unaligned_le16(str_desc + 10);
+ if (resolution == 0) {
+ pen_x_pm = 0;
+ pen_y_pm = 0;
+ } else {
+ pen_x_pm = pen_x_lm * 1000 / resolution;
+ pen_y_pm = pen_y_lm * 1000 / resolution;
+ }
+
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM] = pen_x_lm;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM] = pen_x_pm;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM] = pen_y_lm;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = pen_y_pm;
+ desc_params[UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] = pen_pressure_lm;
+ desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM] = frame_num_buttons;
+
+ return 0;
+}
+
+/**
+ * uclogic_params_ugee_v2_init_frame_buttons() - initialize a UGEE v2 frame with
+ * buttons.
+ * @p: Parameters to fill in, cannot be NULL.
+ * @desc_params: Device description params list.
+ * @desc_params_size: Size of the description params list.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_ugee_v2_init_frame_buttons(struct uclogic_params *p,
+ const s32 *desc_params,
+ size_t desc_params_size)
+{
+ __u8 *rdesc_frame = NULL;
+ int rc = 0;
+
+ if (!p || desc_params_size != UCLOGIC_RDESC_PH_ID_NUM)
+ return -EINVAL;
+
+ rdesc_frame = uclogic_rdesc_template_apply(
+ uclogic_rdesc_ugee_v2_frame_btn_template_arr,
+ uclogic_rdesc_ugee_v2_frame_btn_template_size,
+ desc_params, UCLOGIC_RDESC_PH_ID_NUM);
+ if (!rdesc_frame)
+ return -ENOMEM;
+
+ rc = uclogic_params_frame_init_with_desc(&p->frame_list[0],
+ rdesc_frame,
+ uclogic_rdesc_ugee_v2_frame_btn_template_size,
+ UCLOGIC_RDESC_V1_FRAME_ID);
+ kfree(rdesc_frame);
+ return rc;
+}
+
+/**
+ * uclogic_params_ugee_v2_init_frame_dial() - initialize a UGEE v2 frame with a
+ * bitmap dial.
+ * @p: Parameters to fill in, cannot be NULL.
+ * @desc_params: Device description params list.
+ * @desc_params_size: Size of the description params list.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_ugee_v2_init_frame_dial(struct uclogic_params *p,
+ const s32 *desc_params,
+ size_t desc_params_size)
+{
+ __u8 *rdesc_frame = NULL;
+ int rc = 0;
+
+ if (!p || desc_params_size != UCLOGIC_RDESC_PH_ID_NUM)
+ return -EINVAL;
+
+ rdesc_frame = uclogic_rdesc_template_apply(
+ uclogic_rdesc_ugee_v2_frame_dial_template_arr,
+ uclogic_rdesc_ugee_v2_frame_dial_template_size,
+ desc_params, UCLOGIC_RDESC_PH_ID_NUM);
+ if (!rdesc_frame)
+ return -ENOMEM;
+
+ rc = uclogic_params_frame_init_with_desc(&p->frame_list[0],
+ rdesc_frame,
+ uclogic_rdesc_ugee_v2_frame_dial_template_size,
+ UCLOGIC_RDESC_V1_FRAME_ID);
+ kfree(rdesc_frame);
+ if (rc)
+ return rc;
+
+ p->frame_list[0].bitmap_dial_byte = 7;
+ return 0;
+}
+
+/**
+ * uclogic_params_ugee_v2_init_frame_mouse() - initialize a UGEE v2 frame with a
+ * mouse.
+ * @p: Parameters to fill in, cannot be NULL.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
+{
+ int rc = 0;
+
+ if (!p)
+ return -EINVAL;
+
+ rc = uclogic_params_frame_init_with_desc(&p->frame_list[1],
+ uclogic_rdesc_ugee_v2_frame_mouse_template_arr,
+ uclogic_rdesc_ugee_v2_frame_mouse_template_size,
+ UCLOGIC_RDESC_V1_FRAME_ID);
+ return rc;
+}
+
+/**
* uclogic_params_ugee_v2_init() - initialize a UGEE graphics tablets by
* discovering their parameters.
*
@@ -1084,9 +1239,8 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
const int str_desc_len = 12;
__u8 *str_desc = NULL;
__u8 *rdesc_pen = NULL;
- __u8 *rdesc_frame = NULL;
s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
- s32 resolution;
+ enum uclogic_params_frame_type frame_type;
__u8 magic_arr[] = {
0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
@@ -1100,6 +1254,15 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
iface = to_usb_interface(hdev->dev.parent);
bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+
+ if (bInterfaceNumber == 0) {
+ rc = uclogic_params_ugee_v2_init_frame_mouse(&p);
+ if (rc)
+ goto cleanup;
+
+ goto output;
+ }
+
if (bInterfaceNumber != 2) {
uclogic_params_init_invalid(&p);
goto output;
@@ -1128,25 +1291,13 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
goto output;
}
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM] =
- get_unaligned_le16(str_desc + 2);
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM] =
- get_unaligned_le16(str_desc + 4);
- desc_params[UCLOGIC_RDESC_FRAME_PH_ID_UM] = str_desc[6];
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_PRESSURE_LM] =
- get_unaligned_le16(str_desc + 8);
- resolution = get_unaligned_le16(str_desc + 10);
- if (resolution == 0) {
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM] = 0;
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM] = 0;
- } else {
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_PM] =
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_X_LM] * 1000 /
- resolution;
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_PM] =
- desc_params[UCLOGIC_RDESC_PEN_PH_ID_Y_LM] * 1000 /
- resolution;
- }
+ rc = uclogic_params_parse_ugee_v2_desc(str_desc, str_desc_len,
+ desc_params,
+ ARRAY_SIZE(desc_params),
+ &frame_type);
+ if (rc)
+ goto cleanup;
+
kfree(str_desc);
str_desc = NULL;
@@ -1167,24 +1318,21 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
/* Initialize the frame interface */
- rdesc_frame = uclogic_rdesc_template_apply(
- uclogic_rdesc_ugee_v2_frame_btn_template_arr,
- uclogic_rdesc_ugee_v2_frame_btn_template_size,
- desc_params, ARRAY_SIZE(desc_params));
- if (!rdesc_frame) {
- rc = -ENOMEM;
- goto cleanup;
+ switch (frame_type) {
+ case UCLOGIC_PARAMS_FRAME_DIAL:
+ case UCLOGIC_PARAMS_FRAME_MOUSE:
+ rc = uclogic_params_ugee_v2_init_frame_dial(&p, desc_params,
+ ARRAY_SIZE(desc_params));
+ break;
+ case UCLOGIC_PARAMS_FRAME_BUTTONS:
+ default:
+ rc = uclogic_params_ugee_v2_init_frame_buttons(&p, desc_params,
+ ARRAY_SIZE(desc_params));
+ break;
}
- rc = uclogic_params_frame_init_with_desc(&p.frame_list[0],
- rdesc_frame,
- uclogic_rdesc_ugee_v2_frame_btn_template_size,
- UCLOGIC_RDESC_V1_FRAME_ID);
- kfree(rdesc_frame);
- if (rc) {
- uclogic_params_init_invalid(&p);
- goto output;
- }
+ if (rc)
+ goto cleanup;
output:
/* Output parameters */
@@ -1433,7 +1581,11 @@ int uclogic_params_init(struct uclogic_params *params,
}
break;
case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_PARBLO_A610_PRO):
+ case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
+ case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
rc = uclogic_params_ugee_v2_init(&p, hdev);
if (rc != 0)
goto cleanup;
@@ -1517,3 +1669,7 @@ cleanup:
uclogic_params_cleanup(&p);
return rc;
}
+
+#ifdef CONFIG_HID_KUNIT_TEST
+#include "hid-uclogic-params-test.c"
+#endif
diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
index 5bef8daaa607..a97477c02ff8 100644
--- a/drivers/hid/hid-uclogic-params.h
+++ b/drivers/hid/hid-uclogic-params.h
@@ -29,6 +29,16 @@ enum uclogic_params_pen_inrange {
UCLOGIC_PARAMS_PEN_INRANGE_NONE,
};
+/* Types of frames */
+enum uclogic_params_frame_type {
+ /* Frame with buttons */
+ UCLOGIC_PARAMS_FRAME_BUTTONS = 0,
+ /* Frame with buttons and a dial */
+ UCLOGIC_PARAMS_FRAME_DIAL,
+ /* Frame with buttons and a mouse (shaped as a dial + touchpad) */
+ UCLOGIC_PARAMS_FRAME_MOUSE,
+};
+
/*
* Pen report's subreport data.
*/
diff --git a/drivers/hid/hid-uclogic-rdesc-test.c b/drivers/hid/hid-uclogic-rdesc-test.c
index ebebffef5f8a..3971a0854c3e 100644
--- a/drivers/hid/hid-uclogic-rdesc-test.c
+++ b/drivers/hid/hid-uclogic-rdesc-test.c
@@ -97,7 +97,7 @@ static const __u8 template_params_none[] = {
static struct uclogic_template_case uclogic_template_cases[] = {
{
- .name = "Empty template",
+ .name = "empty_template",
.template = template_empty,
.template_size = sizeof(template_empty),
.param_list = params_pen_all,
@@ -105,7 +105,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = template_empty,
},
{
- .name = "Template smaller than the placeholder",
+ .name = "template_smaller_than_the_placeholder",
.template = template_small,
.template_size = sizeof(template_small),
.param_list = params_pen_all,
@@ -113,7 +113,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = template_small,
},
{
- .name = "No placeholder",
+ .name = "no_placeholder",
.template = template_no_ph,
.template_size = sizeof(template_no_ph),
.param_list = params_pen_all,
@@ -121,7 +121,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = template_no_ph,
},
{
- .name = "Pen placeholder at the end, without ID",
+ .name = "pen_placeholder_at_the_end_without_id",
.template = template_pen_ph_end,
.template_size = sizeof(template_pen_ph_end),
.param_list = params_pen_all,
@@ -129,7 +129,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = template_pen_ph_end,
},
{
- .name = "Frame button placeholder at the end, without ID",
+ .name = "frame_button_placeholder_at_the_end_without_id",
.template = template_btn_ph_end,
.template_size = sizeof(template_btn_ph_end),
.param_list = params_frame_all,
@@ -137,7 +137,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = template_btn_ph_end,
},
{
- .name = "All params present in the pen template",
+ .name = "all_params_present_in_the_pen_template",
.template = template_pen_all_params,
.template_size = sizeof(template_pen_all_params),
.param_list = params_pen_all,
@@ -145,7 +145,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = expected_pen_all_params,
},
{
- .name = "All params present in the frame template",
+ .name = "all_params_present_in_the_frame_template",
.template = template_frame_all_params,
.template_size = sizeof(template_frame_all_params),
.param_list = params_frame_all,
@@ -153,7 +153,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = expected_frame_all_params,
},
{
- .name = "Some params present in the pen template (complete param list)",
+ .name = "some_params_present_in_the_pen_template_with_complete_param_list",
.template = template_pen_some_params,
.template_size = sizeof(template_pen_some_params),
.param_list = params_pen_all,
@@ -161,7 +161,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = expected_pen_some_params,
},
{
- .name = "Some params present in the pen template (incomplete param list)",
+ .name = "some_params_present_in_the_pen_template_with_incomplete_param_list",
.template = template_pen_some_params,
.template_size = sizeof(template_pen_some_params),
.param_list = params_pen_some,
@@ -169,7 +169,7 @@ static struct uclogic_template_case uclogic_template_cases[] = {
.expected = expected_pen_some_params,
},
{
- .name = "No params present in the template",
+ .name = "no_params_present_in_the_template",
.template = template_params_none,
.template_size = sizeof(template_params_none),
.param_list = params_pen_some,
@@ -208,7 +208,7 @@ static struct kunit_case hid_uclogic_rdesc_test_cases[] = {
};
static struct kunit_suite hid_uclogic_rdesc_test_suite = {
- .name = "hid-uclogic-rdesc-test",
+ .name = "hid_uclogic_rdesc_test",
.test_cases = hid_uclogic_rdesc_test_cases,
};
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index 3d68e8b0784d..4bd54c4fb5b0 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -961,6 +961,80 @@ const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[] = {
const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size =
sizeof(uclogic_rdesc_ugee_v2_frame_btn_template_arr);
+/* Fixed report descriptor template for UGEE v2 frame reports (dial) */
+const __u8 uclogic_rdesc_ugee_v2_frame_dial_template_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, UCLOGIC_RDESC_V1_FRAME_ID,
+ /* Report ID, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ UCLOGIC_RDESC_FRAME_PH_BTN,
+ /* Usage Maximum (PLACEHOLDER), */
+ 0x95, 0x0A, /* Report Count (10), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x38, /* Usage (Wheel), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x15, 0xFF, /* Logical Minimum (-1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size =
+ sizeof(uclogic_rdesc_ugee_v2_frame_dial_template_arr);
+
+/* Fixed report descriptor template for UGEE v2 frame reports (mouse) */
+const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x05, 0x01, /* Usage Page (Pointer), */
+ 0xA0, /* Collection (Physical), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Generic Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x16, 0x00, 0x80, /* Logical Minimum (-32768), */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size =
+ sizeof(uclogic_rdesc_ugee_v2_frame_mouse_template_arr);
+
/* Fixed report descriptor for Ugee EX07 frame */
const __u8 uclogic_rdesc_ugee_ex07_frame_arr[] = {
0x05, 0x01, /* Usage Page (Desktop), */
@@ -1113,7 +1187,7 @@ __u8 *uclogic_rdesc_template_apply(const __u8 *template_ptr,
memcmp(p, pen_head, sizeof(pen_head)) == 0 &&
p[sizeof(pen_head)] < param_num) {
v = param_list[p[sizeof(pen_head)]];
- put_unaligned(cpu_to_le32(v), (s32 *)p);
+ put_unaligned((__force u32)cpu_to_le32(v), (s32 *)p);
p += sizeof(pen_head) + 1;
} else if (memcmp(p, btn_head, sizeof(btn_head)) == 0 &&
p[sizeof(btn_head)] < param_num) {
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index 86e64a9ee6bd..0502a0656496 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -169,6 +169,14 @@ extern const size_t uclogic_rdesc_ugee_v2_pen_template_size;
extern const __u8 uclogic_rdesc_ugee_v2_frame_btn_template_arr[];
extern const size_t uclogic_rdesc_ugee_v2_frame_btn_template_size;
+/* Fixed report descriptor template for UGEE v2 frame reports (dial) */
+extern const __u8 uclogic_rdesc_ugee_v2_frame_dial_template_arr[];
+extern const size_t uclogic_rdesc_ugee_v2_frame_dial_template_size;
+
+/* Fixed report descriptor template for UGEE v2 frame reports (mouse) */
+extern const __u8 uclogic_rdesc_ugee_v2_frame_mouse_template_arr[];
+extern const size_t uclogic_rdesc_ugee_v2_frame_mouse_template_size;
+
/* Fixed report descriptor for Ugee EX07 frame */
extern const __u8 uclogic_rdesc_ugee_ex07_frame_arr[];
extern const size_t uclogic_rdesc_ugee_ex07_frame_size;
diff --git a/drivers/hid/hid-vivaldi-common.c b/drivers/hid/hid-vivaldi-common.c
index 8b3e515d0f06..b0af2be94895 100644
--- a/drivers/hid/hid-vivaldi-common.c
+++ b/drivers/hid/hid-vivaldi-common.c
@@ -116,25 +116,26 @@ static struct attribute *vivaldi_sysfs_attrs[] = {
NULL
};
-static const struct attribute_group vivaldi_attribute_group = {
- .attrs = vivaldi_sysfs_attrs,
-};
-
-/**
- * vivaldi_input_configured - Complete initialization of device using vivaldi map
- * @hdev: HID device to which vivaldi attributes should be attached
- * @hidinput: HID input device (unused)
- */
-int vivaldi_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput)
+static umode_t vivaldi_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
{
+ struct hid_device *hdev = to_hid_device(kobj_to_dev(kobj));
struct vivaldi_data *data = hid_get_drvdata(hdev);
if (!data->num_function_row_keys)
return 0;
-
- return devm_device_add_group(&hdev->dev, &vivaldi_attribute_group);
+ return attr->mode;
}
-EXPORT_SYMBOL_GPL(vivaldi_input_configured);
+
+static const struct attribute_group vivaldi_attribute_group = {
+ .attrs = vivaldi_sysfs_attrs,
+ .is_visible = vivaldi_is_visible,
+};
+
+const struct attribute_group *vivaldi_attribute_groups[] = {
+ &vivaldi_attribute_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(vivaldi_attribute_groups);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-vivaldi-common.h b/drivers/hid/hid-vivaldi-common.h
index d42e82d77825..ba9adfa08a2d 100644
--- a/drivers/hid/hid-vivaldi-common.h
+++ b/drivers/hid/hid-vivaldi-common.h
@@ -4,13 +4,11 @@
struct hid_device;
struct hid_field;
-struct hid_input;
struct hid_usage;
void vivaldi_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage);
-int vivaldi_input_configured(struct hid_device *hdev,
- struct hid_input *hidinput);
+extern const struct attribute_group *vivaldi_attribute_groups[];
#endif /* _HID_VIVALDI_COMMON_H */
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index 3a979123e7d3..cda5938fb070 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -45,7 +45,9 @@ static struct hid_driver hid_vivaldi = {
.id_table = vivaldi_table,
.probe = vivaldi_probe,
.feature_mapping = vivaldi_feature_mapping,
- .input_configured = vivaldi_input_configured,
+ .driver = {
+ .dev_groups = vivaldi_attribute_groups,
+ },
};
module_hid_driver(hid_vivaldi);
diff --git a/drivers/hid/hid-vrc2.c b/drivers/hid/hid-vrc2.c
new file mode 100644
index 000000000000..80a2b7ef5e66
--- /dev/null
+++ b/drivers/hid/hid-vrc2.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for VRC-2 2-axis Car controller
+ *
+ * Copyright (C) 2022 Marcus Folkesson <marcus.folkesson@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+/*
+ * VID/PID are probably "borrowed", so keep them locally and
+ * do not populate hid-ids.h with those.
+ */
+#define USB_VENDOR_ID_VRC2 (0x07c0)
+#define USB_DEVICE_ID_VRC2 (0x1125)
+
+static __u8 vrc2_rdesc_fixed[] = {
+ 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ 0x09, 0x04, // Usage (Joystick)
+ 0xA1, 0x01, // Collection (Application)
+ 0x09, 0x01, // Usage (Pointer)
+ 0xA1, 0x00, // Collection (Physical)
+ 0x09, 0x30, // Usage (X)
+ 0x09, 0x31, // Usage (Y)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xFF, 0x07, // Logical Maximum (2047)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0xFF, 0x00, // Physical Maximum (255)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x02, // Report Count (2)
+ 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ 0xC0, // End Collection
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x03, // Report Count (3)
+ 0x81, 0x03, // Input (Cnst,Var,Abs)
+ 0xC0, // End Collection
+};
+
+static __u8 *vrc2_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ hid_info(hdev, "fixing up VRC-2 report descriptor\n");
+ *rsize = sizeof(vrc2_rdesc_fixed);
+ return vrc2_rdesc_fixed;
+}
+
+static int vrc2_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ /*
+ * The device gives us 2 separate USB endpoints.
+ * One of those (the one with report descriptor size of 23) is just bogus so ignore it
+ */
+ if (hdev->dev_rsize == 23)
+ return -ENODEV;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id vrc2_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_VRC2, USB_DEVICE_ID_VRC2) },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(hid, vrc2_devices);
+
+static struct hid_driver vrc2_driver = {
+ .name = "vrc2",
+ .id_table = vrc2_devices,
+ .report_fixup = vrc2_report_fixup,
+ .probe = vrc2_probe,
+};
+module_hid_driver(vrc2_driver);
+
+MODULE_AUTHOR("Marcus Folkesson <marcus.folkesson@gmail.com>");
+MODULE_DESCRIPTION("HID driver for VRC-2 2-axis Car controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 681614a8302a..197b1e7bf029 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -350,6 +350,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (int i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index c078f09a2318..0667b6022c3b 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -1036,7 +1036,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
client->name, (u16)hid->vendor, (u16)hid->product);
- strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+ strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
@@ -1064,7 +1064,7 @@ err_powered:
}
EXPORT_SYMBOL_GPL(i2c_hid_core_probe);
-int i2c_hid_core_remove(struct i2c_client *client)
+void i2c_hid_core_remove(struct i2c_client *client)
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
@@ -1078,8 +1078,6 @@ int i2c_hid_core_remove(struct i2c_client *client)
i2c_hid_free_buffers(ihid);
i2c_hid_core_power_down(ihid);
-
- return 0;
}
EXPORT_SYMBOL_GPL(i2c_hid_core_remove);
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 236cc062d5ef..96c75510ad3f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -33,7 +33,7 @@ struct i2chid_ops {
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
u16 hid_descriptor_address, u32 quirks);
-int i2c_hid_core_remove(struct i2c_client *client);
+void i2c_hid_core_remove(struct i2c_client *client);
void i2c_hid_core_shutdown(struct i2c_client *client);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index e600dbf04dfc..fc108f19a64c 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -32,6 +32,7 @@
#define ADL_P_DEVICE_ID 0x51FC
#define ADL_N_DEVICE_ID 0x54FC
#define RPL_S_DEVICE_ID 0x7A78
+#define MTL_P_DEVICE_ID 0x7E45
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 2c67ec17bec6..7120b30ac51d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -43,6 +43,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 6a5cc11aefd8..35dddc5015b3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -105,7 +105,7 @@ struct report_list {
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 405e0d5212cc..df0a825694f5 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 4490e2f7252a..be4c731aaa65 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1381,7 +1381,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->type = HID_TYPE_USBNONE;
if (dev->manufacturer)
- strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
+ strscpy(hid->name, dev->manufacturer, sizeof(hid->name));
if (dev->product) {
if (dev->manufacturer)
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index b4b007c4beb6..c439ed2f16db 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -294,7 +294,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
spin_lock_init(&kbd->leds_lock);
if (dev->manufacturer)
- strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
+ strscpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
if (dev->product) {
if (dev->manufacturer)
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index fb1d7d1f6999..3fd93c2e4f4a 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -142,7 +142,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
mouse->dev = input_dev;
if (dev->manufacturer)
- strlcpy(mouse->name, dev->manufacturer, sizeof(mouse->name));
+ strscpy(mouse->name, dev->manufacturer, sizeof(mouse->name));
if (dev->product) {
if (dev->manufacturer)
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 3f8b24a57014..4da50e19808e 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * drivers/input/tablet/wacom.h
- *
* USB Wacom tablet support
*
* Copyright (c) 2000-2004 Vojtech Pavlik <vojtech@ucw.cz>
@@ -78,10 +76,9 @@
* - integration of the Bluetooth devices
*/
-/*
- */
#ifndef WACOM_H
#define WACOM_H
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 194a2e327591..634263e4556b 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1,13 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drivers/input/tablet/wacom_sys.c
- *
* USB Wacom tablet support - system specific code
*/
-/*
- */
-
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
@@ -2226,7 +2221,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
} else if (strstr(product_name, "Wacom") ||
strstr(product_name, "wacom") ||
strstr(product_name, "WACOM")) {
- strlcpy(name, product_name, sizeof(name));
+ strscpy(name, product_name, sizeof(name));
} else {
snprintf(name, sizeof(name), "Wacom %s", product_name);
}
@@ -2244,7 +2239,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if (name[strlen(name)-1] == ' ')
name[strlen(name)-1] = '\0';
} else {
- strlcpy(name, features->name, sizeof(name));
+ strscpy(name, features->name, sizeof(name));
}
snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s",
@@ -2509,7 +2504,7 @@ static void wacom_wireless_work(struct work_struct *work)
goto fail;
}
- strlcpy(wacom_wac->name, wacom_wac1->name,
+ strscpy(wacom_wac->name, wacom_wac1->name,
sizeof(wacom_wac->name));
error = wacom_initialize_battery(wacom);
if (error)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index d049239256a2..77486962a773 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1,13 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * drivers/input/tablet/wacom_wac.c
- *
* USB Wacom tablet support - Wacom specific code
*/
-/*
- */
-
#include "wacom_wac.h"
#include "wacom.h"
#include <linux/input/mt.h>
@@ -713,11 +708,14 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x802: /* Intuos4/5 13HD/24HD General Pen */
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
+ case 0x200: /* Pro Pen 3 */
+ case 0x04200: /* Pro Pen 3 */
case 0x10842: /* MobileStudio Pro Pro Pen slim */
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x16802: /* Cintiq 13HD Pro Pen */
case 0x18802: /* DTH2242 Pen */
case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
tool_type = BTN_TOOL_PEN;
break;
@@ -4875,6 +4873,10 @@ static const struct wacom_features wacom_features_0x3c6 =
static const struct wacom_features wacom_features_0x3c8 =
{ "Wacom Intuos BT M", 21600, 13500, 4095, 63,
INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3dd =
+ { "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
+ INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
+ .touch_max = 10 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -5050,6 +5052,7 @@ const struct hid_device_id wacom_ids[] = {
{ BT_DEVICE_WACOM(0x393) },
{ BT_DEVICE_WACOM(0x3c6) },
{ BT_DEVICE_WACOM(0x3c8) },
+ { BT_DEVICE_WACOM(0x3dd) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index fef1538005b5..5ca6c06d143b 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/input/tablet/wacom_wac.h
- */
+
#ifndef WACOM_WAC_H
#define WACOM_WAC_H
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index e014ef36d872..8069f795c864 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1089,7 +1089,7 @@ static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
struct cs_char *csdata = vmf->vma->vm_private_data;
struct page *page;
- page = virt_to_page(csdata->mmap_base);
+ page = virt_to_page((void *)csdata->mmap_base);
get_page(page);
vmf->page = page;
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index cd7ebf4c2e2f..97ba59e60663 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/hsi/ssi_protocol.h>
static unsigned int pm = 1;
@@ -75,8 +74,7 @@ static int nokia_modem_gpio_probe(struct device *dev)
struct nokia_modem_device *modem = dev_get_drvdata(dev);
int gpio_count, gpio_name_count, i, err;
- gpio_count = of_gpio_count(np);
-
+ gpio_count = gpiod_count(dev, NULL);
if (gpio_count < 0) {
dev_err(dev, "missing gpios: %d\n", gpio_count);
return gpio_count;
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 21f11a5b965b..274ad8443f8c 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -796,7 +796,6 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd)
dev_err(&cl->device, "No memory for rx skb\n");
goto out1;
}
- skb->dev = ssi->netdev;
skb_put(skb, len * 4);
msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC);
if (unlikely(!msg)) {
@@ -931,6 +930,7 @@ static int ssip_pn_open(struct net_device *dev)
if (err < 0) {
dev_err(&cl->device, "Register HSI port event failed (%d)\n",
err);
+ hsi_release_port(cl);
return err;
}
dev_dbg(&cl->device, "Configuring SSI port\n");
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 44a3f5660c10..eb9820158318 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -524,6 +524,7 @@ static int ssi_probe(struct platform_device *pd)
if (!childpdev) {
err = -ENODEV;
dev_err(&pd->dev, "failed to create ssi controller port\n");
+ of_node_put(child);
goto out3;
}
}
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index a0cb5be246e1..b9495b720f1b 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -230,10 +230,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
if (msg->ttype == HSI_MSG_READ) {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_FROM_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
@@ -247,10 +247,10 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
} else {
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
DMA_TO_DEVICE);
- if (err < 0) {
+ if (!err) {
dev_dbg(&ssi->device, "DMA map SG failed !\n");
pm_runtime_put_autosuspend(omap_port->pdev);
- return err;
+ return -EIO;
}
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index eca7afd366d6..9dc27e5d367a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -431,34 +431,29 @@ struct vmbus_channel *relid2channel(u32 relid)
void vmbus_on_event(unsigned long data)
{
struct vmbus_channel *channel = (void *) data;
- unsigned long time_limit = jiffies + 2;
+ void (*callback_fn)(void *context);
trace_vmbus_on_event(channel);
hv_debug_delay_test(channel, INTERRUPT_DELAY);
- do {
- void (*callback_fn)(void *);
- /* A channel once created is persistent even when
- * there is no driver handling the device. An
- * unloading driver sets the onchannel_callback to NULL.
- */
- callback_fn = READ_ONCE(channel->onchannel_callback);
- if (unlikely(callback_fn == NULL))
- return;
-
- (*callback_fn)(channel->channel_callback_context);
+ /* A channel once created is persistent even when
+ * there is no driver handling the device. An
+ * unloading driver sets the onchannel_callback to NULL.
+ */
+ callback_fn = READ_ONCE(channel->onchannel_callback);
+ if (unlikely(!callback_fn))
+ return;
- if (channel->callback_mode != HV_CALL_BATCHED)
- return;
+ (*callback_fn)(channel->channel_callback_context);
- if (likely(hv_end_read(&channel->inbound) == 0))
- return;
+ if (channel->callback_mode != HV_CALL_BATCHED)
+ return;
- hv_begin_read(&channel->inbound);
- } while (likely(time_before(jiffies, time_limit)));
+ if (likely(hv_end_read(&channel->inbound) == 0))
+ return;
- /* The time limit (2 jiffies) has been reached */
+ hv_begin_read(&channel->inbound);
tasklet_schedule(&channel->callback_event);
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 660036da7449..922d83eb7ddf 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
/*
* The strings sent from the host are encoded in
- * in utf16; convert it to utf8 strings.
+ * utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 23c680d1a0f5..8b2e413bf19c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -45,8 +46,6 @@ struct vmbus_dynid {
static struct acpi_device *hv_acpi_dev;
-static struct completion probe_event;
-
static int hyperv_cpuhp_online;
static void *hv_panic_page;
@@ -1131,7 +1130,8 @@ void vmbus_on_msg_dpc(unsigned long data)
return;
INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
+ ctx->msg.header = msg_copy.header;
+ memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
/*
* The host can generate a rescind message while we
@@ -1572,7 +1572,7 @@ err_setup:
}
/**
- * __vmbus_child_driver_register() - Register a vmbus's driver
+ * __vmbus_driver_register() - Register a vmbus's driver
* @hv_driver: Pointer to driver structure you want to register
* @owner: owner module of the drv
* @mod_name: module name string
@@ -2051,7 +2051,7 @@ struct hv_device *vmbus_device_create(const guid_t *type,
child_device_obj->channel = channel;
guid_copy(&child_device_obj->dev_type, type);
guid_copy(&child_device_obj->dev_instance, instance);
- child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
+ child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
return child_device_obj;
}
@@ -2262,26 +2262,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
static void vmbus_reserve_fb(void)
{
- int size;
+ resource_size_t start = 0, size;
+ struct pci_dev *pdev;
+
+ if (efi_enabled(EFI_BOOT)) {
+ /* Gen2 VM: get FB base from EFI framebuffer */
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ } else {
+ /* Gen1 VM: get FB base from PCI */
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev)
+ return;
+
+ if (pdev->resource[0].flags & IORESOURCE_MEM) {
+ start = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ }
+
+ /*
+ * Release the PCI device so hyperv_drm or hyperv_fb driver can
+ * grab it later.
+ */
+ pci_dev_put(pdev);
+ }
+
+ if (!start)
+ return;
+
/*
* Make a claim for the frame buffer in the resource tree under the
* first node, which will be the one below 4GB. The length seems to
* be underreported, particularly in a Generation 1 VM. So start out
* reserving a larger area and make it smaller until it succeeds.
*/
-
- if (screen_info.lfb_base) {
- if (efi_enabled(EFI_BOOT))
- size = max_t(__u32, screen_info.lfb_size, 0x800000);
- else
- size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
- for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
- fb_mmio = __request_region(hyperv_mmio,
- screen_info.lfb_base, size,
- fb_mmio_name, 0);
- }
- }
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+ fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/**
@@ -2313,7 +2330,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2348,6 +2365,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
@@ -2427,7 +2452,8 @@ static int vmbus_acpi_add(struct acpi_device *device)
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
* firmware) is the VMOD that has the mmio ranges. Get that.
*/
- for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
+ for (ancestor = acpi_dev_parent(device); ancestor;
+ ancestor = acpi_dev_parent(ancestor)) {
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
@@ -2441,7 +2467,6 @@ static int vmbus_acpi_add(struct acpi_device *device)
ret_val = 0;
acpi_walk_err:
- complete(&probe_event);
if (ret_val)
vmbus_acpi_remove(device);
return ret_val;
@@ -2620,6 +2645,7 @@ static struct acpi_driver vmbus_acpi_driver = {
.remove = vmbus_acpi_remove,
},
.drv.pm = &vmbus_bus_pm,
+ .drv.probe_type = PROBE_FORCE_SYNCHRONOUS,
};
static void hv_kexec_handler(void)
@@ -2692,7 +2718,7 @@ static struct syscore_ops hv_synic_syscore_ops = {
static int __init hv_acpi_init(void)
{
- int ret, t;
+ int ret;
if (!hv_is_hyperv_initialized())
return -ENODEV;
@@ -2700,8 +2726,6 @@ static int __init hv_acpi_init(void)
if (hv_root_partition)
return 0;
- init_completion(&probe_event);
-
/*
* Get ACPI resources first.
*/
@@ -2710,9 +2734,8 @@ static int __init hv_acpi_init(void)
if (ret)
return ret;
- t = wait_for_completion_timeout(&probe_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
+ if (!hv_acpi_dev) {
+ ret = -ENODEV;
goto cleanup;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e70d9614bec2..7ac3daaf59ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -257,14 +257,14 @@ config SENSORS_AHT10
will be called aht10.
config SENSORS_AQUACOMPUTER_D5NEXT
- tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, and Farbwerk 360"
+ tristate "Aquacomputer D5 Next, Octo, Quadro, Farbwerk, Farbwerk 360, High Flow Next"
depends on USB_HID
select CRC16
help
If you say yes here you get support for sensors and fans of
the Aquacomputer D5 Next watercooling pump, Octo and Quadro fan
- controllers, Farbwerk and Farbwerk 360 RGB controllers, where
- available.
+ controllers, Farbwerk and Farbwerk 360 RGB controllers, High Flow
+ Next sensor, where available.
This driver can also be built as a module. If so, the module
will be called aquacomputer_d5next.
@@ -393,6 +393,7 @@ config SENSORS_ASB100
config SENSORS_ASPEED
tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
depends on THERMAL || THERMAL=n
select REGMAP
help
@@ -1066,6 +1067,18 @@ config SENSORS_MAX31730
This driver can also be built as a module. If so, the module
will be called max31730.
+config SENSORS_MAX31760
+ tristate "MAX31760 fan speed controller"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Support for the Analog Devices MAX31760 Precision Fan-Speed
+ Controller. MAX31760 integrates temperature sensing along with
+ precision PWM fan control.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31760.
+
config SENSORS_MAX6620
tristate "Maxim MAX6620 fan controller"
depends on I2C
@@ -1745,6 +1758,7 @@ config SENSORS_SIS5595
config SENSORS_SY7636A
tristate "Silergy SY7636A"
+ depends on MFD_SY7636A
help
If you say yes here you get support for the thermistor readout of
the Silergy SY7636A PMIC.
@@ -1785,6 +1799,19 @@ config SENSORS_EMC2103
This driver can also be built as a module. If so, the module
will be called emc2103.
+config SENSORS_EMC2305
+ tristate "Microchip EMC2305 and compatible EMC2301/2/3"
+ depends on I2C
+ imply THERMAL
+ help
+ If you say yes here you get support for the Microchip EMC2305
+ fan controller chips.
+ The Microchip EMC2305 is a fan controller for up to 5 fans.
+ Fan rotation speeds are reported in RPM.
+
+ This driver can also be built as a module. If so, the module
+ will be called emc2305.
+
config SENSORS_EMC6W201
tristate "SMSC EMC6W201"
depends on I2C
@@ -2341,21 +2368,6 @@ config SENSORS_ASUS_WMI
This driver can also be built as a module. If so, the module
will be called asus_wmi_sensors.
-config SENSORS_ASUS_WMI_EC
- tristate "ASUS WMI B550/X570"
- depends on ACPI_WMI && SENSORS_ASUS_EC=n
- help
- If you say yes here you get support for the ACPI embedded controller
- hardware monitoring interface found in B550/X570 ASUS motherboards.
- This driver will provide readings of fans, voltages and temperatures
- through the system firmware.
-
- This driver is deprecated in favor of the ASUS EC Sensors driver
- which provides fully compatible output.
-
- This driver can also be built as a module. If so, the module
- will be called asus_wmi_sensors_ec.
-
config SENSORS_ASUS_EC
tristate "ASUS EC Sensors"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 007e829d1d0d..11d076cad8a2 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o
obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
obj-$(CONFIG_SENSORS_ASUS_EC) += asus-ec-sensors.o
obj-$(CONFIG_SENSORS_ASUS_WMI) += asus_wmi_sensors.o
-obj-$(CONFIG_SENSORS_ASUS_WMI_EC) += asus_wmi_ec_sensors.o
# Native drivers
# asb100, then w83781d go first, as they can override other drivers' addresses.
@@ -70,6 +69,7 @@ obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o
+obj-$(CONFIG_SENSORS_EMC2305) += emc2305.o
obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o
obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
+obj-$(CONFIG_SENSORS_MAX31760) += max31760.o
obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 681f0623868f..a7cae6568155 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1504,7 +1504,6 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM_SLEEP
static int abituguru_suspend(struct device *dev)
{
struct abituguru_data *data = dev_get_drvdata(dev);
@@ -1526,16 +1525,12 @@ static int abituguru_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
-#define ABIT_UGURU_PM (&abituguru_pm)
-#else
-#define ABIT_UGURU_PM NULL
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
static struct platform_driver abituguru_driver = {
.driver = {
.name = ABIT_UGURU_NAME,
- .pm = ABIT_UGURU_PM,
+ .pm = pm_sleep_ptr(&abituguru_pm),
},
.probe = abituguru_probe,
.remove = abituguru_remove,
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 8229ad30c909..afb21f73032d 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1127,7 +1127,6 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM_SLEEP
static int abituguru3_suspend(struct device *dev)
{
struct abituguru3_data *data = dev_get_drvdata(dev);
@@ -1146,16 +1145,12 @@ static int abituguru3_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
-#define ABIT_UGURU3_PM (&abituguru3_pm)
-#else
-#define ABIT_UGURU3_PM NULL
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
static struct platform_driver abituguru3_driver = {
.driver = {
.name = ABIT_UGURU3_NAME,
- .pm = ABIT_UGURU3_PM
+ .pm = pm_sleep_ptr(&abituguru3_pm),
},
.probe = abituguru3_probe,
.remove = abituguru3_remove,
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index d2545a1be9fc..0962c12eba5a 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -598,7 +598,7 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
continue;
/* Create a symlink to domain objects */
- obj = acpi_bus_get_acpi_device(element->reference.handle);
+ obj = acpi_get_acpi_dev(element->reference.handle);
resource->domain_devices[i] = obj;
if (!obj)
continue;
@@ -927,8 +927,6 @@ static int acpi_power_meter_remove(struct acpi_device *device)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-
static int acpi_power_meter_resume(struct device *dev)
{
struct acpi_power_meter_resource *resource;
@@ -946,9 +944,8 @@ static int acpi_power_meter_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL,
+ acpi_power_meter_resume);
static struct acpi_driver acpi_power_meter_driver = {
.name = "power_meter",
@@ -959,7 +956,7 @@ static struct acpi_driver acpi_power_meter_driver = {
.remove = acpi_power_meter_remove,
.notify = acpi_power_meter_notify,
},
- .drv.pm = &acpi_power_meter_pm,
+ .drv.pm = pm_sleep_ptr(&acpi_power_meter_pm),
};
/* Module init/exit routines */
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index fd938c70293f..97b330b6c165 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -384,7 +384,7 @@ static int adc128_detect(struct i2c_client *client, struct i2c_board_info *info)
if (i2c_smbus_read_byte_data(client, ADC128_REG_BUSY_STATUS) & 0xfc)
return -ENODEV;
- strlcpy(info->type, "adc128d818", I2C_NAME_SIZE);
+ strscpy(info->type, "adc128d818", I2C_NAME_SIZE);
return 0;
}
@@ -495,14 +495,12 @@ error:
return err;
}
-static int adc128_remove(struct i2c_client *client)
+static void adc128_remove(struct i2c_client *client)
{
struct adc128_data *data = i2c_get_clientdata(client);
if (data->regulator)
regulator_disable(data->regulator);
-
- return 0;
}
static const struct i2c_device_id adc128_id[] = {
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 91ecfee243bf..2dc45e958730 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -426,7 +426,7 @@ static int adm1021_detect(struct i2c_client *client,
pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 4352f6a884e8..2984c4f98496 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -470,7 +470,7 @@ static int adm1025_detect(struct i2c_client *client,
else
return -ENODEV;
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 69b3ec752944..1f084f708743 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1610,7 +1610,7 @@ static int adm1026_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adm1026", I2C_NAME_SIZE);
+ strscpy(info->type, "adm1026", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 3e1999413f32..eaf6e5e04aac 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -329,7 +329,7 @@ static int adm1029_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adm1029", I2C_NAME_SIZE);
+ strscpy(info->type, "adm1029", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index ac841fa3a369..b42797bcb5b4 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -985,7 +985,7 @@ static int adm1031_detect(struct i2c_client *client,
return -ENODEV;
name = (id == 0x30) ? "adm1030" : "adm1031";
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 483cd757abd3..40e3558d3709 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -501,17 +501,23 @@ static int adm9240_fan_read(struct device *dev, u32 attr, int channel, long *val
switch (attr) {
case hwmon_fan_input:
+ mutex_lock(&data->update_lock);
err = regmap_read(data->regmap, ADM9240_REG_FAN(channel), &regval);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&data->update_lock);
return err;
+ }
if (regval == 255 && data->fan_div[channel] < 3) {
/* adjust fan clock divider on overflow */
err = adm9240_write_fan_div(data, channel,
++data->fan_div[channel]);
- if (err)
+ if (err) {
+ mutex_unlock(&data->update_lock);
return err;
+ }
}
*val = FAN_FROM_REG(regval, BIT(data->fan_div[channel]));
+ mutex_unlock(&data->update_lock);
break;
case hwmon_fan_div:
*val = BIT(data->fan_div[channel]);
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 1efc0bdcceab..067865f4887a 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -152,7 +152,7 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .pm = ADT7X10_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
.probe = adt7310_spi_probe,
.id_table = adt7310_id,
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index aede5baca7b9..0cebf6777239 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -98,7 +98,7 @@ static struct i2c_driver adt7410_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adt7410",
- .pm = ADT7X10_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
.probe_new = adt7410_i2c_probe,
.id_table = adt7410_ids,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index fad74aa62b64..bf5c5618f8d0 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -590,7 +590,7 @@ static int adt7411_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "adt7411", I2C_NAME_SIZE);
+ strscpy(info->type, "adt7411", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index e75bbd87ad09..9c0235849d4b 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -1782,7 +1782,7 @@ static int adt7462_detect(struct i2c_client *client,
if (revision != ADT7462_REVISION)
return -ENODEV;
- strlcpy(info->type, "adt7462", I2C_NAME_SIZE);
+ strscpy(info->type, "adt7462", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c67cd037a93f..927f8df05b7c 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1296,12 +1296,11 @@ static int adt7470_probe(struct i2c_client *client)
return 0;
}
-static int adt7470_remove(struct i2c_client *client)
+static void adt7470_remove(struct i2c_client *client)
{
struct adt7470_data *data = i2c_get_clientdata(client);
kthread_stop(data->auto_update);
- return 0;
}
static const struct i2c_device_id adt7470_id[] = {
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index ac480e6e4818..51b3d16c3223 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1342,7 +1342,7 @@ static int adt7475_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index ce54bffab2ec..da67734edafd 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -397,8 +397,6 @@ int adt7x10_probe(struct device *dev, const char *name, int irq,
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-#ifdef CONFIG_PM_SLEEP
-
static int adt7x10_suspend(struct device *dev)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
@@ -414,10 +412,7 @@ static int adt7x10_resume(struct device *dev)
return regmap_write(data->regmap, ADT7X10_CONFIG, data->config);
}
-SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
-EXPORT_SYMBOL_GPL(adt7x10_dev_pm_ops);
-
-#endif /* CONFIG_PM_SLEEP */
+EXPORT_SIMPLE_DEV_PM_OPS(adt7x10_dev_pm_ops, adt7x10_suspend, adt7x10_resume);
MODULE_AUTHOR("Hartmut Knaack");
MODULE_DESCRIPTION("ADT7410/ADT7420, ADT7310/ADT7320 common code");
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index ba22c32c8355..46caf3e21978 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -20,11 +20,6 @@ struct device;
int adt7x10_probe(struct device *dev, const char *name, int irq,
struct regmap *regmap);
-#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
-#define ADT7X10_DEV_PM_OPS (&adt7x10_dev_pm_ops)
-#else
-#define ADT7X10_DEV_PM_OPS NULL
-#endif
#endif
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 0c16face3fd3..3bfd12ff4b3c 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -809,7 +809,7 @@ static int amc6821_detect(
}
dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address);
- strlcpy(info->type, "amc6821", I2C_NAME_SIZE);
+ strscpy(info->type, "amc6821", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 66430553cc45..c51a2678f0eb 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo,
- * Quadro)
+ * Quadro, High Flow Next)
*
* Aquacomputer devices send HID reports (with ID 0x01) every second to report
* sensor values.
@@ -26,15 +26,17 @@
#define USB_PRODUCT_ID_D5NEXT 0xf00e
#define USB_PRODUCT_ID_FARBWERK360 0xf010
#define USB_PRODUCT_ID_OCTO 0xf011
+#define USB_PRODUCT_ID_HIGHFLOWNEXT 0xf012
-enum kinds { d5next, farbwerk, farbwerk360, octo, quadro };
+enum kinds { d5next, farbwerk, farbwerk360, octo, quadro, highflownext };
static const char *const aqc_device_names[] = {
[d5next] = "d5next",
[farbwerk] = "farbwerk",
[farbwerk360] = "farbwerk360",
[octo] = "octo",
- [quadro] = "quadro"
+ [quadro] = "quadro",
+ [highflownext] = "highflownext"
};
#define DRIVER_NAME "aquacomputer_d5next"
@@ -71,6 +73,8 @@ static u8 secondary_ctrl_report[] = {
#define D5NEXT_COOLANT_TEMP 0x57
#define D5NEXT_NUM_FANS 2
#define D5NEXT_NUM_SENSORS 1
+#define D5NEXT_NUM_VIRTUAL_SENSORS 8
+#define D5NEXT_VIRTUAL_SENSORS_START 0x3f
#define D5NEXT_PUMP_OFFSET 0x6c
#define D5NEXT_FAN_OFFSET 0x5f
#define D5NEXT_5V_VOLTAGE 0x39
@@ -86,14 +90,18 @@ static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 };
#define FARBWERK_SENSOR_START 0x2f
/* Register offsets for the Farbwerk 360 RGB controller */
-#define FARBWERK360_NUM_SENSORS 4
-#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_NUM_SENSORS 4
+#define FARBWERK360_SENSOR_START 0x32
+#define FARBWERK360_NUM_VIRTUAL_SENSORS 16
+#define FARBWERK360_VIRTUAL_SENSORS_START 0x3a
/* Register offsets for the Octo fan controller */
#define OCTO_POWER_CYCLES 0x18
#define OCTO_NUM_FANS 8
#define OCTO_NUM_SENSORS 4
#define OCTO_SENSOR_START 0x3D
+#define OCTO_NUM_VIRTUAL_SENSORS 16
+#define OCTO_VIRTUAL_SENSORS_START 0x45
#define OCTO_CTRL_REPORT_SIZE 0x65F
static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
@@ -105,12 +113,24 @@ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0
#define QUADRO_NUM_FANS 4
#define QUADRO_NUM_SENSORS 4
#define QUADRO_SENSOR_START 0x34
+#define QUADRO_NUM_VIRTUAL_SENSORS 16
+#define QUADRO_VIRTUAL_SENSORS_START 0x3c
#define QUADRO_CTRL_REPORT_SIZE 0x3c1
#define QUADRO_FLOW_SENSOR_OFFSET 0x6e
static u8 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 };
/* Fan speed registers in Quadro control report (from 0-100%) */
-static u16 quadro_ctrl_fan_offsets[] = { 0x36, 0x8b, 0xe0, 0x135 };
+static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 };
+
+/* Register offsets for the High Flow Next */
+#define HIGHFLOWNEXT_NUM_SENSORS 2
+#define HIGHFLOWNEXT_SENSOR_START 85
+#define HIGHFLOWNEXT_FLOW 81
+#define HIGHFLOWNEXT_WATER_QUALITY 89
+#define HIGHFLOWNEXT_POWER 91
+#define HIGHFLOWNEXT_CONDUCTIVITY 95
+#define HIGHFLOWNEXT_5V_VOLTAGE 97
+#define HIGHFLOWNEXT_5V_VOLTAGE_USB 99
/* Labels for D5 Next */
static const char *const label_d5next_temp[] = {
@@ -147,6 +167,25 @@ static const char *const label_temp_sensors[] = {
"Sensor 4"
};
+static const char *const label_virtual_temp_sensors[] = {
+ "Virtual sensor 1",
+ "Virtual sensor 2",
+ "Virtual sensor 3",
+ "Virtual sensor 4",
+ "Virtual sensor 5",
+ "Virtual sensor 6",
+ "Virtual sensor 7",
+ "Virtual sensor 8",
+ "Virtual sensor 9",
+ "Virtual sensor 10",
+ "Virtual sensor 11",
+ "Virtual sensor 12",
+ "Virtual sensor 13",
+ "Virtual sensor 14",
+ "Virtual sensor 15",
+ "Virtual sensor 16",
+};
+
/* Labels for Octo and Quadro (except speed) */
static const char *const label_fan_speed[] = {
"Fan 1 speed",
@@ -201,6 +240,27 @@ static const char *const label_quadro_speeds[] = {
"Flow speed [dL/h]"
};
+/* Labels for High Flow Next */
+static const char *const label_highflownext_temp_sensors[] = {
+ "Coolant temp",
+ "External sensor"
+};
+
+static const char *const label_highflownext_fan_speed[] = {
+ "Flow [dL/h]",
+ "Water quality [%]",
+ "Conductivity [nS/cm]",
+};
+
+static const char *const label_highflownext_power[] = {
+ "Dissipated power",
+};
+
+static const char *const label_highflownext_voltage[] = {
+ "+5V voltage",
+ "+5V USB voltage"
+};
+
struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
@@ -220,6 +280,8 @@ struct aqc_data {
u16 *fan_ctrl_offsets;
int num_temp_sensors;
int temp_sensor_start_offset;
+ int num_virtual_temp_sensors;
+ int virtual_temp_sensor_start_offset;
u16 power_cycle_count_offset;
u8 flow_sensor_offset;
@@ -231,7 +293,7 @@ struct aqc_data {
u32 power_cycles;
/* Sensor values */
- s32 temp_input[4];
+ s32 temp_input[20]; /* Max 4 physical and 16 virtual */
u16 speed_input[8];
u32 power_input[8];
u16 voltage_input[8];
@@ -239,6 +301,7 @@ struct aqc_data {
/* Label values */
const char *const *temp_label;
+ const char *const *virtual_temp_label;
const char *const *speed_label;
const char *const *power_label;
const char *const *voltage_label;
@@ -345,7 +408,7 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
switch (type) {
case hwmon_temp:
- if (channel < priv->num_temp_sensors)
+ if (channel < priv->num_temp_sensors + priv->num_virtual_temp_sensors)
return 0444;
break;
case hwmon_pwm:
@@ -360,6 +423,11 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
break;
case hwmon_fan:
switch (priv->kind) {
+ case highflownext:
+ /* Special case to support flow sensor, water quality and conductivity */
+ if (channel < 3)
+ return 0444;
+ break;
case quadro:
/* Special case to support flow sensor */
if (channel < priv->num_fans + 1)
@@ -372,6 +440,18 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
}
break;
case hwmon_power:
+ switch (priv->kind) {
+ case highflownext:
+ /* Special case to support one power sensor */
+ if (channel == 0)
+ return 0444;
+ break;
+ default:
+ if (channel < priv->num_fans)
+ return 0444;
+ break;
+ }
+ break;
case hwmon_curr:
if (channel < priv->num_fans)
return 0444;
@@ -383,6 +463,11 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
if (channel < priv->num_fans + 2)
return 0444;
break;
+ case highflownext:
+ /* Special case to support two voltage sensors */
+ if (channel < 2)
+ return 0444;
+ break;
default:
if (channel < priv->num_fans)
return 0444;
@@ -447,7 +532,10 @@ static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32
switch (type) {
case hwmon_temp:
- *str = priv->temp_label[channel];
+ if (channel < priv->num_temp_sensors)
+ *str = priv->temp_label[channel];
+ else
+ *str = priv->virtual_temp_label[channel - priv->num_temp_sensors];
break;
case hwmon_fan:
*str = priv->speed_label[channel];
@@ -512,6 +600,22 @@ static const struct hwmon_channel_info *aqc_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL,
@@ -568,7 +672,7 @@ static const struct hwmon_chip_info aqc_chip_info = {
static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size)
{
- int i, sensor_value;
+ int i, j, sensor_value;
struct aqc_data *priv;
if (report->id != STATUS_REPORT_ID)
@@ -581,7 +685,7 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
- /* Temperature sensor readings */
+ /* Physical temperature sensor readings */
for (i = 0; i < priv->num_temp_sensors; i++) {
sensor_value = get_unaligned_be16(data +
priv->temp_sensor_start_offset +
@@ -592,6 +696,18 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->temp_input[i] = sensor_value * 10;
}
+ /* Virtual temperature sensor readings */
+ for (j = 0; j < priv->num_virtual_temp_sensors; j++) {
+ sensor_value = get_unaligned_be16(data +
+ priv->virtual_temp_sensor_start_offset +
+ j * AQC_TEMP_SENSOR_SIZE);
+ if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ i++;
+ }
+
/* Fan speed and related readings */
for (i = 0; i < priv->num_fans; i++) {
priv->speed_input[i] =
@@ -618,6 +734,22 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
case quadro:
priv->speed_input[4] = get_unaligned_be16(data + priv->flow_sensor_offset);
break;
+ case highflownext:
+ /* If external temp sensor is not connected, its power reading is also N/A */
+ if (priv->temp_input[1] == -ENODATA)
+ priv->power_input[0] = -ENODATA;
+ else
+ priv->power_input[0] =
+ get_unaligned_be16(data + HIGHFLOWNEXT_POWER) * 1000000;
+
+ priv->voltage_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE) * 10;
+ priv->voltage_input[1] =
+ get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE_USB) * 10;
+
+ priv->speed_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_FLOW);
+ priv->speed_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_WATER_QUALITY);
+ priv->speed_input[2] = get_unaligned_be16(data + HIGHFLOWNEXT_CONDUCTIVITY);
+ break;
default:
break;
}
@@ -717,10 +849,13 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets;
priv->num_temp_sensors = D5NEXT_NUM_SENSORS;
priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP;
+ priv->num_virtual_temp_sensors = D5NEXT_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES;
priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE;
priv->temp_label = label_d5next_temp;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_d5next_speeds;
priv->power_label = label_d5next_power;
priv->voltage_label = label_d5next_voltages;
@@ -740,7 +875,11 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->num_fans = 0;
priv->num_temp_sensors = FARBWERK360_NUM_SENSORS;
priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START;
+ priv->num_virtual_temp_sensors = FARBWERK360_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = FARBWERK360_VIRTUAL_SENSORS_START;
+
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
break;
case USB_PRODUCT_ID_OCTO:
priv->kind = octo;
@@ -750,10 +889,13 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = octo_ctrl_fan_offsets;
priv->num_temp_sensors = OCTO_NUM_SENSORS;
priv->temp_sensor_start_offset = OCTO_SENSOR_START;
+ priv->num_virtual_temp_sensors = OCTO_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = OCTO_POWER_CYCLES;
priv->buffer_size = OCTO_CTRL_REPORT_SIZE;
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_fan_speed;
priv->power_label = label_fan_power;
priv->voltage_label = label_fan_voltage;
@@ -767,16 +909,32 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets;
priv->num_temp_sensors = QUADRO_NUM_SENSORS;
priv->temp_sensor_start_offset = QUADRO_SENSOR_START;
+ priv->num_virtual_temp_sensors = QUADRO_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
priv->speed_label = label_quadro_speeds;
priv->power_label = label_fan_power;
priv->voltage_label = label_fan_voltage;
priv->current_label = label_fan_current;
break;
+ case USB_PRODUCT_ID_HIGHFLOWNEXT:
+ priv->kind = highflownext;
+
+ priv->num_fans = 0;
+ priv->num_temp_sensors = HIGHFLOWNEXT_NUM_SENSORS;
+ priv->temp_sensor_start_offset = HIGHFLOWNEXT_SENSOR_START;
+ priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
+
+ priv->temp_label = label_highflownext_temp_sensors;
+ priv->speed_label = label_highflownext_fan_speed;
+ priv->power_label = label_highflownext_power;
+ priv->voltage_label = label_highflownext_voltage;
+ break;
default:
break;
}
@@ -833,6 +991,7 @@ static const struct hid_device_id aqc_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOWNEXT) },
{ }
};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 8cf0bcb85eb4..ce4da836765c 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -208,7 +208,7 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 val);
static int asb100_probe(struct i2c_client *client);
static int asb100_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int asb100_remove(struct i2c_client *client);
+static void asb100_remove(struct i2c_client *client);
static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
@@ -769,7 +769,7 @@ static int asb100_detect(struct i2c_client *client,
if (val1 != 0x31 || val2 != 0x06)
return -ENODEV;
- strlcpy(info->type, "asb100", I2C_NAME_SIZE);
+ strscpy(info->type, "asb100", I2C_NAME_SIZE);
return 0;
}
@@ -822,7 +822,7 @@ ERROR3:
return err;
}
-static int asb100_remove(struct i2c_client *client)
+static void asb100_remove(struct i2c_client *client)
{
struct asb100_data *data = i2c_get_clientdata(client);
@@ -831,8 +831,6 @@ static int asb100_remove(struct i2c_client *client)
i2c_unregister_device(data->lm75[1]);
i2c_unregister_device(data->lm75[0]);
-
- return 0;
}
/*
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index e835605a7456..54595454537b 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1153,7 +1153,7 @@ static int asc7621_detect(struct i2c_client *client,
if (company == asc7621_chips[chip_index].company_id &&
verstep == asc7621_chips[chip_index].verstep_id) {
- strlcpy(info->type, asc7621_chips[chip_index].name,
+ strscpy(info->type, asc7621_chips[chip_index].name,
I2C_NAME_SIZE);
dev_info(&adapter->dev, "Matched %s at 0x%02x\n",
@@ -1165,7 +1165,7 @@ static int asc7621_detect(struct i2c_client *client,
return -ENODEV;
}
-static int asc7621_remove(struct i2c_client *client)
+static void asc7621_remove(struct i2c_client *client)
{
struct asc7621_data *data = i2c_get_clientdata(client);
int i;
@@ -1176,8 +1176,6 @@ static int asc7621_remove(struct i2c_client *client)
device_remove_file(&client->dev,
&(asc7621_params[i].sda.dev_attr));
}
-
- return 0;
}
static const struct i2c_device_id asc7621_id[] = {
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 61a4684fc020..81e688975c6a 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -266,9 +266,7 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
#define SENSOR_SET_WATER_BLOCK \
(SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
-
struct ec_board_info {
- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
unsigned long sensors;
/*
* Defines which mutex to use for guarding access to the state and the
@@ -281,152 +279,194 @@ struct ec_board_info {
enum board_family family;
};
-static const struct ec_board_info board_info[] = {
- {
- .board_names = {"PRIME X470-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_400_series,
- },
- {
- .board_names = {"PRIME X570-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ProArt X570-CREATOR WIFI"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- },
- {
- .board_names = {"Pro WS X570-ACE"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG CROSSHAIR VIII FORMULA",
- "ROG CROSSHAIR VIII HERO",
- "ROG CROSSHAIR VIII HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG MAXIMUS XI HERO",
- "ROG MAXIMUS XI HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII IMPACT"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-I GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-F GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-I GAMING"},
- .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
- },
- {
- .board_names = {"ROG ZENITH II EXTREME"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
- SENSOR_SET_WATER_BLOCK |
- SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
- SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
- .family = family_amd_500_series,
- },
- {}
+static const struct ec_board_info board_info_prime_x470_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_400_series,
+};
+
+static const struct ec_board_info board_info_prime_x570_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_zenith_ii_extreme = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+ SENSOR_SET_WATER_BLOCK |
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_500_series,
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \
+ "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)board_info, \
+ }
+
+static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
+ &board_info_prime_x470_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
+ &board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
+ &board_info_pro_art_x570_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
+ &board_info_pro_ws_x570_ace),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
+ &board_info_crosshair_viii_dark_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
+ &board_info_strix_b550_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
+ &board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
+ &board_info_strix_x570_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
+ &board_info_strix_x570_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
+ &board_info_strix_x570_f_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
+ &board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
+ &board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
+ &board_info_zenith_ii_extreme),
+ {},
};
struct ec_sensor {
@@ -537,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
return -ENOENT;
}
-static int __init bank_compare(const void *a, const void *b)
+static int bank_compare(const void *a, const void *b)
{
return *((const s8 *)a) - *((const s8 *)b);
}
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
+static void setup_sensor_data(struct ec_sensors_data *ec)
{
struct ec_sensor *s = ec->sensors;
bool bank_found;
@@ -574,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
}
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
+static void fill_ec_registers(struct ec_sensors_data *ec)
{
const struct ec_sensor_info *si;
unsigned int i, j, register_idx = 0;
@@ -589,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
}
}
-static int __init setup_lock_data(struct device *dev)
+static int setup_lock_data(struct device *dev)
{
const char *mutex_path;
int status;
@@ -812,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
}
-static int __init
+static int
asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
struct device *dev, int num,
enum hwmon_sensor_types type, u32 config)
@@ -841,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
.ops = &asus_ec_hwmon_ops,
};
-static const struct ec_board_info * __init get_board_info(void)
+static const struct ec_board_info *get_board_info(void)
{
- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
- const struct ec_board_info *board;
-
- if (!dmi_board_vendor || !dmi_board_name ||
- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
- return NULL;
-
- for (board = board_info; board->sensors; board++) {
- if (match_string(board->board_names,
- MAX_IDENTICAL_BOARD_VARIATIONS,
- dmi_board_name) >= 0)
- return board;
- }
+ const struct dmi_system_id *dmi_entry;
- return NULL;
+ dmi_entry = dmi_first_match(dmi_table);
+ return dmi_entry ? dmi_entry->driver_data : NULL;
}
-static int __init asus_ec_probe(struct platform_device *pdev)
+static int asus_ec_probe(struct platform_device *pdev)
{
const struct hwmon_channel_info **ptr_asus_ec_ci;
int nr_count[hwmon_max] = { 0 }, nr_types = 0;
@@ -970,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwdev);
}
-
-static const struct acpi_device_id acpi_ec_ids[] = {
- /* Embedded Controller Device */
- { "PNP0C09", 0 },
- {}
-};
+MODULE_DEVICE_TABLE(dmi, dmi_table);
static struct platform_driver asus_ec_sensors_platform_driver = {
.driver = {
.name = "asus-ec-sensors",
- .acpi_match_table = acpi_ec_ids,
},
+ .probe = asus_ec_probe,
};
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
-/*
- * we use module_platform_driver_probe() rather than module_platform_driver()
- * because the probe function (and its dependants) are marked with __init, which
- * means we can't put it into the .probe member of the platform_driver struct
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
- * because the object is referenced from the module exit code.
- */
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+static struct platform_device *asus_ec_sensors_platform_device;
+
+static int __init asus_ec_init(void)
+{
+ asus_ec_sensors_platform_device =
+ platform_create_bundle(&asus_ec_sensors_platform_driver,
+ asus_ec_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(asus_ec_sensors_platform_device))
+ return PTR_ERR(asus_ec_sensors_platform_device);
+
+ return 0;
+}
+
+static void __exit asus_ec_exit(void)
+{
+ platform_device_unregister(asus_ec_sensors_platform_device);
+ platform_driver_unregister(&asus_ec_sensors_platform_driver);
+}
+
+module_init(asus_ec_init);
+module_exit(asus_ec_exit);
module_param_named(mutex_path, mutex_path_override, charp, 0);
MODULE_PARM_DESC(mutex_path,
diff --git a/drivers/hwmon/asus_wmi_ec_sensors.c b/drivers/hwmon/asus_wmi_ec_sensors.c
deleted file mode 100644
index a3a2f014dec0..000000000000
--- a/drivers/hwmon/asus_wmi_ec_sensors.c
+++ /dev/null
@@ -1,622 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * HWMON driver for ASUS B550/X570 motherboards that publish sensor
- * values via the embedded controller registers.
- *
- * Copyright (C) 2021 Eugene Shalygin <eugene.shalygin@gmail.com>
- * Copyright (C) 2018-2019 Ed Brindley <kernel@maidavale.org>
- *
- * EC provides:
- * - Chipset temperature
- * - CPU temperature
- * - Motherboard temperature
- * - T_Sensor temperature
- * - VRM temperature
- * - Water In temperature
- * - Water Out temperature
- * - CPU Optional Fan RPM
- * - Chipset Fan RPM
- * - Water Flow Fan RPM
- * - CPU current
- */
-
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <linux/hwmon.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nls.h>
-#include <linux/units.h>
-#include <linux/wmi.h>
-
-#include <asm/unaligned.h>
-
-#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
-#define ASUSWMI_METHODID_BLOCK_READ_EC 0x42524543 /* BREC */
-/* From the ASUS DSDT source */
-#define ASUSWMI_BREC_REGISTERS_MAX 16
-#define ASUSWMI_MAX_BUF_LEN 128
-#define SENSOR_LABEL_LEN 16
-
-static u32 hwmon_attributes[hwmon_max] = {
- [hwmon_chip] = HWMON_C_REGISTER_TZ,
- [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
- [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
- [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL,
- [hwmon_fan] = HWMON_F_INPUT | HWMON_F_LABEL,
-};
-
-struct asus_wmi_ec_sensor_address {
- u8 index;
- u8 bank;
- u8 size;
-};
-
-#define MAKE_SENSOR_ADDRESS(size_i, bank_i, index_i) { \
- .size = size_i, \
- .bank = bank_i, \
- .index = index_i, \
-}
-
-struct ec_sensor_info {
- struct asus_wmi_ec_sensor_address addr;
- char label[SENSOR_LABEL_LEN];
- enum hwmon_sensor_types type;
-};
-
-#define EC_SENSOR(sensor_label, sensor_type, size, bank, index) { \
- .addr = MAKE_SENSOR_ADDRESS(size, bank, index), \
- .label = sensor_label, \
- .type = sensor_type, \
-}
-
-enum known_ec_sensor {
- SENSOR_TEMP_CHIPSET,
- SENSOR_TEMP_CPU,
- SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR,
- SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT,
- SENSOR_FAN_CHIPSET,
- SENSOR_FAN_VRM_HS,
- SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_TEMP_WATER_IN,
- SENSOR_TEMP_WATER_OUT,
- SENSOR_MAX
-};
-
-/* All known sensors for ASUS EC controllers */
-static const struct ec_sensor_info known_ec_sensors[] = {
- [SENSOR_TEMP_CHIPSET] = EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
- [SENSOR_TEMP_CPU] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
- [SENSOR_TEMP_MB] = EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
- [SENSOR_TEMP_T_SENSOR] = EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
- [SENSOR_TEMP_VRM] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x3e),
- [SENSOR_FAN_CPU_OPT] = EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
- [SENSOR_FAN_VRM_HS] = EC_SENSOR("VRM HS", hwmon_fan, 2, 0x00, 0xb2),
- [SENSOR_FAN_CHIPSET] = EC_SENSOR("Chipset", hwmon_fan, 2, 0x00, 0xb4),
- [SENSOR_FAN_WATER_FLOW] = EC_SENSOR("Water_Flow", hwmon_fan, 2, 0x00, 0xbc),
- [SENSOR_CURR_CPU] = EC_SENSOR("CPU", hwmon_curr, 1, 0x00, 0xf4),
- [SENSOR_TEMP_WATER_IN] = EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
- [SENSOR_TEMP_WATER_OUT] = EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
-};
-
-struct asus_wmi_data {
- const enum known_ec_sensor known_board_sensors[SENSOR_MAX + 1];
-};
-
-/* boards with EC support */
-static struct asus_wmi_data sensors_board_PW_X570_P = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_PW_X570_A = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_R_C8H = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET, SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-/* Same as Hero but without chipset fan */
-static struct asus_wmi_data sensors_board_R_C8DH = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_TEMP_WATER_IN, SENSOR_TEMP_WATER_OUT,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_WATER_FLOW,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-/* Same as Hero but without water */
-static struct asus_wmi_data sensors_board_R_C8F = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT, SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_B550_E_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CPU_OPT,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_B550_I_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_VRM_HS,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-static struct asus_wmi_data sensors_board_RS_X570_E_G = {
- .known_board_sensors = {
- SENSOR_TEMP_CHIPSET, SENSOR_TEMP_CPU, SENSOR_TEMP_MB,
- SENSOR_TEMP_T_SENSOR, SENSOR_TEMP_VRM,
- SENSOR_FAN_CHIPSET,
- SENSOR_CURR_CPU,
- SENSOR_MAX
- },
-};
-
-#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, sensors) { \
- .matches = { \
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), \
- DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
- }, \
- .driver_data = sensors, \
-}
-
-static const struct dmi_system_id asus_wmi_ec_dmi_table[] = {
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO", &sensors_board_PW_X570_P),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE", &sensors_board_PW_X570_A),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO", &sensors_board_R_C8DH),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA", &sensors_board_R_C8F),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO", &sensors_board_R_C8H),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING", &sensors_board_RS_B550_E_G),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING", &sensors_board_RS_B550_I_G),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING", &sensors_board_RS_X570_E_G),
- {}
-};
-MODULE_DEVICE_TABLE(dmi, asus_wmi_ec_dmi_table);
-
-struct ec_sensor {
- enum known_ec_sensor info_index;
- long cached_value;
-};
-
-/**
- * struct asus_wmi_ec_info - sensor info.
- * @sensors: list of sensors.
- * @read_arg: UTF-16LE string to pass to BRxx() WMI function.
- * @read_buffer: decoded output from WMI result.
- * @nr_sensors: number of board EC sensors.
- * @nr_registers: number of EC registers to read (sensor might span more than 1 register).
- * @last_updated: in jiffies.
- */
-struct asus_wmi_ec_info {
- struct ec_sensor sensors[SENSOR_MAX];
- char read_arg[(ASUSWMI_BREC_REGISTERS_MAX * 4 + 1) * 2];
- u8 read_buffer[ASUSWMI_BREC_REGISTERS_MAX];
- unsigned int nr_sensors;
- unsigned int nr_registers;
- unsigned long last_updated;
-};
-
-struct asus_wmi_sensors {
- struct asus_wmi_ec_info ec;
- /* lock access to internal cache */
- struct mutex lock;
-};
-
-static int asus_wmi_ec_fill_board_sensors(struct asus_wmi_ec_info *ec,
- const enum known_ec_sensor *bsi)
-{
- struct ec_sensor *s = ec->sensors;
- int i;
-
- ec->nr_sensors = 0;
- ec->nr_registers = 0;
-
- for (i = 0; bsi[i] != SENSOR_MAX; i++) {
- s[i].info_index = bsi[i];
- ec->nr_sensors++;
- ec->nr_registers += known_ec_sensors[bsi[i]].addr.size;
- }
-
- return 0;
-}
-
-/*
- * The next four functions convert to or from BRxx string argument format.
- * The format of the string is as follows:
- * - The string consists of two-byte UTF-16LE characters.
- * - The value of the very first byte in the string is equal to the total
- * length of the next string in bytes, thus excluding the first two-byte
- * character.
- * - The rest of the string encodes the pairs of (bank, index) pairs, where
- * both values are byte-long (0x00 to 0xFF).
- * - Numbers are encoded as UTF-16LE hex values.
- */
-static int asus_wmi_ec_decode_reply_buffer(const u8 *in, u32 length, u8 *out)
-{
- char buffer[ASUSWMI_MAX_BUF_LEN * 2];
- u32 len = min_t(u32, get_unaligned_le16(in), length - 2);
-
- utf16s_to_utf8s((wchar_t *)(in + 2), len / 2, UTF16_LITTLE_ENDIAN, buffer, sizeof(buffer));
-
- return hex2bin(out, buffer, len / 4);
-}
-
-static void asus_wmi_ec_encode_registers(const u8 *in, u32 len, char *out)
-{
- char buffer[ASUSWMI_MAX_BUF_LEN * 2];
-
- bin2hex(buffer, in, len);
-
- utf8s_to_utf16s(buffer, len * 2, UTF16_LITTLE_ENDIAN, (wchar_t *)(out + 2), len * 2);
-
- put_unaligned_le16(len * 4, out);
-}
-
-static void asus_wmi_ec_make_block_read_query(struct asus_wmi_ec_info *ec)
-{
- u8 registers[ASUSWMI_BREC_REGISTERS_MAX * 2];
- const struct ec_sensor_info *si;
- int i, j, offset;
-
- offset = 0;
- for (i = 0; i < ec->nr_sensors; i++) {
- si = &known_ec_sensors[ec->sensors[i].info_index];
- for (j = 0; j < si->addr.size; j++) {
- registers[offset++] = si->addr.bank;
- registers[offset++] = si->addr.index + j;
- }
- }
-
- asus_wmi_ec_encode_registers(registers, offset, ec->read_arg);
-}
-
-static int asus_wmi_ec_block_read(u32 method_id, char *query, u8 *out)
-{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_buffer input;
- union acpi_object *obj;
- acpi_status status;
- int ret;
-
- /* The first byte of the BRxx() argument string has to be the string size. */
- input.length = query[0] + 2;
- input.pointer = query;
- status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0, method_id, &input, &output);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- obj = output.pointer;
- if (!obj)
- return -EIO;
-
- if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 2) {
- ret = -EIO;
- goto out_free_obj;
- }
-
- ret = asus_wmi_ec_decode_reply_buffer(obj->buffer.pointer, obj->buffer.length, out);
-
-out_free_obj:
- ACPI_FREE(obj);
- return ret;
-}
-
-static inline long get_sensor_value(const struct ec_sensor_info *si, u8 *data)
-{
- switch (si->addr.size) {
- case 1:
- return *data;
- case 2:
- return get_unaligned_be16(data);
- case 4:
- return get_unaligned_be32(data);
- default:
- return 0;
- }
-}
-
-static void asus_wmi_ec_update_ec_sensors(struct asus_wmi_ec_info *ec)
-{
- const struct ec_sensor_info *si;
- struct ec_sensor *s;
- u8 i_sensor;
- u8 *data;
-
- data = ec->read_buffer;
- for (i_sensor = 0; i_sensor < ec->nr_sensors; i_sensor++) {
- s = &ec->sensors[i_sensor];
- si = &known_ec_sensors[s->info_index];
- s->cached_value = get_sensor_value(si, data);
- data += si->addr.size;
- }
-}
-
-static long asus_wmi_ec_scale_sensor_value(long value, int data_type)
-{
- switch (data_type) {
- case hwmon_curr:
- case hwmon_temp:
- case hwmon_in:
- return value * MILLI;
- default:
- return value;
- }
-}
-
-static int asus_wmi_ec_find_sensor_index(const struct asus_wmi_ec_info *ec,
- enum hwmon_sensor_types type, int channel)
-{
- int i;
-
- for (i = 0; i < ec->nr_sensors; i++) {
- if (known_ec_sensors[ec->sensors[i].info_index].type == type) {
- if (channel == 0)
- return i;
-
- channel--;
- }
- }
- return -EINVAL;
-}
-
-static int asus_wmi_ec_get_cached_value_or_update(struct asus_wmi_sensors *sensor_data,
- int sensor_index,
- long *value)
-{
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int ret = 0;
-
- mutex_lock(&sensor_data->lock);
-
- if (time_after(jiffies, ec->last_updated + HZ)) {
- ret = asus_wmi_ec_block_read(ASUSWMI_METHODID_BLOCK_READ_EC,
- ec->read_arg, ec->read_buffer);
- if (ret)
- goto unlock;
-
- asus_wmi_ec_update_ec_sensors(ec);
- ec->last_updated = jiffies;
- }
-
- *value = ec->sensors[sensor_index].cached_value;
-
-unlock:
- mutex_unlock(&sensor_data->lock);
-
- return ret;
-}
-
-/* Now follow the functions that implement the hwmon interface */
-
-static int asus_wmi_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int ret, sidx, info_index;
- long value = 0;
-
- sidx = asus_wmi_ec_find_sensor_index(ec, type, channel);
- if (sidx < 0)
- return sidx;
-
- ret = asus_wmi_ec_get_cached_value_or_update(sensor_data, sidx, &value);
- if (ret)
- return ret;
-
- info_index = ec->sensors[sidx].info_index;
- *val = asus_wmi_ec_scale_sensor_value(value, known_ec_sensors[info_index].type);
-
- return ret;
-}
-
-static int asus_wmi_ec_hwmon_read_string(struct device *dev,
- enum hwmon_sensor_types type, u32 attr,
- int channel, const char **str)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int sensor_index;
-
- sensor_index = asus_wmi_ec_find_sensor_index(ec, type, channel);
- *str = known_ec_sensors[ec->sensors[sensor_index].info_index].label;
-
- return 0;
-}
-
-static umode_t asus_wmi_ec_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type, u32 attr,
- int channel)
-{
- const struct asus_wmi_sensors *sensor_data = drvdata;
- const struct asus_wmi_ec_info *ec = &sensor_data->ec;
- int index;
-
- index = asus_wmi_ec_find_sensor_index(ec, type, channel);
-
- return index < 0 ? 0 : 0444;
-}
-
-static int asus_wmi_hwmon_add_chan_info(struct hwmon_channel_info *asus_wmi_hwmon_chan,
- struct device *dev, int num,
- enum hwmon_sensor_types type, u32 config)
-{
- u32 *cfg;
-
- cfg = devm_kcalloc(dev, num + 1, sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
-
- asus_wmi_hwmon_chan->type = type;
- asus_wmi_hwmon_chan->config = cfg;
- memset32(cfg, config, num);
-
- return 0;
-}
-
-static const struct hwmon_ops asus_wmi_ec_hwmon_ops = {
- .is_visible = asus_wmi_ec_hwmon_is_visible,
- .read = asus_wmi_ec_hwmon_read,
- .read_string = asus_wmi_ec_hwmon_read_string,
-};
-
-static struct hwmon_chip_info asus_wmi_ec_chip_info = {
- .ops = &asus_wmi_ec_hwmon_ops,
-};
-
-static int asus_wmi_ec_configure_sensor_setup(struct device *dev,
- const enum known_ec_sensor *bsi)
-{
- struct asus_wmi_sensors *sensor_data = dev_get_drvdata(dev);
- struct asus_wmi_ec_info *ec = &sensor_data->ec;
- struct hwmon_channel_info *asus_wmi_hwmon_chan;
- const struct hwmon_channel_info **asus_wmi_ci;
- int nr_count[hwmon_max] = {}, nr_types = 0;
- const struct hwmon_chip_info *chip_info;
- const struct ec_sensor_info *si;
- enum hwmon_sensor_types type;
- struct device *hwdev;
- int i, ret;
-
- ret = asus_wmi_ec_fill_board_sensors(ec, bsi);
- if (ret)
- return ret;
-
- if (!sensor_data->ec.nr_sensors)
- return -ENODEV;
-
- for (i = 0; i < ec->nr_sensors; i++) {
- si = &known_ec_sensors[ec->sensors[i].info_index];
- if (!nr_count[si->type])
- nr_types++;
- nr_count[si->type]++;
- }
-
- if (nr_count[hwmon_temp]) {
- nr_count[hwmon_chip]++;
- nr_types++;
- }
-
- /*
- * If we can get values for all the registers in a single query,
- * the query will not change from call to call.
- */
- asus_wmi_ec_make_block_read_query(ec);
-
- asus_wmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*asus_wmi_hwmon_chan),
- GFP_KERNEL);
- if (!asus_wmi_hwmon_chan)
- return -ENOMEM;
-
- asus_wmi_ci = devm_kcalloc(dev, nr_types + 1, sizeof(*asus_wmi_ci), GFP_KERNEL);
- if (!asus_wmi_ci)
- return -ENOMEM;
-
- asus_wmi_ec_chip_info.info = asus_wmi_ci;
- chip_info = &asus_wmi_ec_chip_info;
-
- for (type = 0; type < hwmon_max; type++) {
- if (!nr_count[type])
- continue;
-
- ret = asus_wmi_hwmon_add_chan_info(asus_wmi_hwmon_chan, dev,
- nr_count[type], type,
- hwmon_attributes[type]);
- if (ret)
- return ret;
-
- *asus_wmi_ci++ = asus_wmi_hwmon_chan++;
- }
-
- dev_dbg(dev, "board has %d EC sensors that span %d registers",
- ec->nr_sensors, ec->nr_registers);
-
- hwdev = devm_hwmon_device_register_with_info(dev, "asus_wmi_ec_sensors",
- sensor_data, chip_info, NULL);
-
- return PTR_ERR_OR_ZERO(hwdev);
-}
-
-static int asus_wmi_probe(struct wmi_device *wdev, const void *context)
-{
- struct asus_wmi_sensors *sensor_data;
- struct asus_wmi_data *board_sensors;
- const struct dmi_system_id *dmi_id;
- const enum known_ec_sensor *bsi;
- struct device *dev = &wdev->dev;
-
- dmi_id = dmi_first_match(asus_wmi_ec_dmi_table);
- if (!dmi_id)
- return -ENODEV;
-
- board_sensors = dmi_id->driver_data;
- bsi = board_sensors->known_board_sensors;
-
- sensor_data = devm_kzalloc(dev, sizeof(*sensor_data), GFP_KERNEL);
- if (!sensor_data)
- return -ENOMEM;
-
- mutex_init(&sensor_data->lock);
-
- dev_set_drvdata(dev, sensor_data);
-
- return asus_wmi_ec_configure_sensor_setup(dev, bsi);
-}
-
-static const struct wmi_device_id asus_ec_wmi_id_table[] = {
- { ASUSWMI_MONITORING_GUID, NULL },
- { }
-};
-
-static struct wmi_driver asus_sensors_wmi_driver = {
- .driver = {
- .name = "asus_wmi_ec_sensors",
- },
- .id_table = asus_ec_wmi_id_table,
- .probe = asus_wmi_probe,
-};
-module_wmi_driver(asus_sensors_wmi_driver);
-
-MODULE_AUTHOR("Ed Brindley <kernel@maidavale.org>");
-MODULE_AUTHOR("Eugene Shalygin <eugene.shalygin@gmail.com>");
-MODULE_DESCRIPTION("Asus WMI Sensors Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/axi-fan-control.c b/drivers/hwmon/axi-fan-control.c
index 96c4a5c45291..6724e0dd3088 100644
--- a/drivers/hwmon/axi-fan-control.c
+++ b/drivers/hwmon/axi-fan-control.c
@@ -394,11 +394,6 @@ static int axi_fan_control_init(struct axi_fan_control_data *ctl,
return ret;
}
-static void axi_fan_control_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static const struct hwmon_channel_info *axi_fan_control_info[] = {
HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT),
HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_LABEL),
@@ -478,20 +473,12 @@ static int axi_fan_control_probe(struct platform_device *pdev)
if (IS_ERR(ctl->base))
return PTR_ERR(ctl->base);
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "clk_get failed with %ld\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, axi_fan_control_clk_disable, clk);
- if (ret)
- return ret;
-
ctl->clk_rate = clk_get_rate(clk);
if (!ctl->clk_rate)
return -EINVAL;
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 14389fd7afb8..345d883ab044 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -55,6 +55,7 @@
#define SECONDS_PER_DAY (SECONDS_PER_HOUR * 24)
#define RAIL_COUNT 3 /* 3v3 + 5v + 12v */
#define TEMP_COUNT 2
+#define OCP_MULTI_RAIL 0x02
#define PSU_CMD_SELECT_RAIL 0x00 /* expects length 2 */
#define PSU_CMD_RAIL_VOLTS_HCRIT 0x40 /* the rest of the commands expect length 3 */
@@ -71,9 +72,10 @@
#define PSU_CMD_RAIL_WATTS 0x96
#define PSU_CMD_VEND_STR 0x99
#define PSU_CMD_PROD_STR 0x9A
-#define PSU_CMD_TOTAL_WATTS 0xEE
#define PSU_CMD_TOTAL_UPTIME 0xD1
#define PSU_CMD_UPTIME 0xD2
+#define PSU_CMD_OCPMODE 0xD8
+#define PSU_CMD_TOTAL_WATTS 0xEE
#define PSU_CMD_INIT 0xFE
#define L_IN_VOLTS "v_in"
@@ -268,6 +270,7 @@ static int corsairpsu_get_value(struct corsairpsu_data *priv, u8 cmd, u8 rail, l
break;
case PSU_CMD_TOTAL_UPTIME:
case PSU_CMD_UPTIME:
+ case PSU_CMD_OCPMODE:
*val = tmp;
break;
default:
@@ -660,6 +663,29 @@ static int product_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(product);
+static int ocpmode_show(struct seq_file *seqf, void *unused)
+{
+ struct corsairpsu_data *priv = seqf->private;
+ long val;
+ int ret;
+
+ /*
+ * The rail mode is switchable on the fly. The RAW interface can be used for this. But it
+ * will not be included here, because I consider it somewhat dangerous for the health of the
+ * PSU. The returned value can be a bogus one, if the PSU is in the process of switching and
+ * getting of the value itself can also fail during this. Because of this every other value
+ * than OCP_MULTI_RAIL can be considered as "single rail".
+ */
+ ret = corsairpsu_get_value(priv, PSU_CMD_OCPMODE, 0, &val);
+ if (ret < 0)
+ seq_puts(seqf, "N/A\n");
+ else
+ seq_printf(seqf, "%s\n", (val == OCP_MULTI_RAIL) ? "multi rail" : "single rail");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ocpmode);
+
static void corsairpsu_debugfs_init(struct corsairpsu_data *priv)
{
char name[32];
@@ -671,6 +697,7 @@ static void corsairpsu_debugfs_init(struct corsairpsu_data *priv)
debugfs_create_file("uptime_total", 0444, priv->debugfs, priv, &uptime_total_fops);
debugfs_create_file("vendor", 0444, priv->debugfs, priv, &vendor_fops);
debugfs_create_file("product", 0444, priv->debugfs, priv, &product_fops);
+ debugfs_create_file("ocpmode", 0444, priv->debugfs, priv, &ocpmode_fops);
}
#else
@@ -786,13 +813,14 @@ static const struct hid_device_id corsairpsu_idtable[] = {
{ HID_USB_DEVICE(0x1b1c, 0x1c04) }, /* Corsair HX650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c05) }, /* Corsair HX750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c06) }, /* Corsair HX850i */
- { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c07) }, /* Corsair HX1000i revision 1 */
{ HID_USB_DEVICE(0x1b1c, 0x1c08) }, /* Corsair HX1200i */
{ HID_USB_DEVICE(0x1b1c, 0x1c09) }, /* Corsair RM550i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0a) }, /* Corsair RM650i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */
{ HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */
+ { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsaur HX1000i revision 2 */
{ },
};
MODULE_DEVICE_TABLE(hid, corsairpsu_idtable);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 7f8d95dd2717..1572b5416015 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1355,15 +1355,21 @@ static int __init dell_smm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
- dev_warn(&pdev->dev, "broken Dell BIOS detected, disallow fan support\n");
- if (!force)
+ if (!force) {
+ dev_notice(&pdev->dev, "Disabling fan support due to BIOS bugs\n");
data->disallow_fan_support = true;
+ } else {
+ dev_warn(&pdev->dev, "Enabling fan support despite BIOS bugs\n");
+ }
}
if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
- dev_warn(&pdev->dev, "broken Dell BIOS detected, disallow fan type call\n");
- if (!force)
+ if (!force) {
+ dev_notice(&pdev->dev, "Disabling fan type call due to BIOS bugs\n");
data->disallow_fan_type_call = true;
+ } else {
+ dev_warn(&pdev->dev, "Enabling fan type call despite BIOS bugs\n");
+ }
}
strscpy(data->bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index e3ad4c2d0038..66c48f70fae7 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2456,7 +2456,7 @@ static int dme1737_i2c_detect(struct i2c_client *client,
dev_info(dev, "Found a %s chip at 0x%02x (rev 0x%02x).\n",
verstep == SCH5027_VERSTEP ? "SCH5027" : "DME1737",
client->addr, verstep);
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
@@ -2508,14 +2508,12 @@ exit_remove:
return err;
}
-static int dme1737_i2c_remove(struct i2c_client *client)
+static void dme1737_i2c_remove(struct i2c_client *client)
{
struct dme1737_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
dme1737_remove_files(&client->dev);
-
- return 0;
}
static const struct i2c_device_id dme1737_id[] = {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 314838272049..61d59189a6d1 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -329,22 +329,22 @@ static int emc1403_detect(struct i2c_client *client,
id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
switch (id) {
case 0x20:
- strlcpy(info->type, "emc1402", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1402", I2C_NAME_SIZE);
break;
case 0x21:
- strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1403", I2C_NAME_SIZE);
break;
case 0x22:
- strlcpy(info->type, "emc1422", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1422", I2C_NAME_SIZE);
break;
case 0x23:
- strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1423", I2C_NAME_SIZE);
break;
case 0x25:
- strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1404", I2C_NAME_SIZE);
break;
case 0x27:
- strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+ strscpy(info->type, "emc1424", I2C_NAME_SIZE);
break;
default:
return -ENODEV;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index e4c95ca9e19f..361cf9292456 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -643,7 +643,7 @@ emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
if ((product != 0x24) && (product != 0x26))
return -ENODEV;
- strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
+ strscpy(info->type, "emc2103", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
new file mode 100644
index 000000000000..aa1f25add0b6
--- /dev/null
+++ b/drivers/hwmon/emc2305.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for EMC2305 fan controller
+ *
+ * Copyright (C) 2022 Nvidia Technologies Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_data/emc2305.h>
+#include <linux/thermal.h>
+
+static const unsigned short
+emc2305_normal_i2c[] = { 0x27, 0x2c, 0x2d, 0x2e, 0x2f, 0x4c, 0x4d, I2C_CLIENT_END };
+
+#define EMC2305_REG_DRIVE_FAIL_STATUS 0x27
+#define EMC2305_REG_DEVICE 0xfd
+#define EMC2305_REG_VENDOR 0xfe
+#define EMC2305_FAN_MAX 0xff
+#define EMC2305_FAN_MIN 0x00
+#define EMC2305_FAN_MAX_STATE 10
+#define EMC2305_DEVICE 0x34
+#define EMC2305_VENDOR 0x5d
+#define EMC2305_REG_PRODUCT_ID 0xfd
+#define EMC2305_TACH_REGS_UNUSE_BITS 3
+#define EMC2305_TACH_CNT_MULTIPLIER 0x02
+#define EMC2305_TACH_RANGE_MIN 480
+
+#define EMC2305_PWM_DUTY2STATE(duty, max_state, pwm_max) \
+ DIV_ROUND_CLOSEST((duty) * (max_state), (pwm_max))
+#define EMC2305_PWM_STATE2DUTY(state, max_state, pwm_max) \
+ DIV_ROUND_CLOSEST((state) * (pwm_max), (max_state))
+
+/*
+ * Factor by equations [2] and [3] from data sheet; valid for fans where the number of edges
+ * equal (poles * 2 + 1).
+ */
+#define EMC2305_RPM_FACTOR 3932160
+
+#define EMC2305_REG_FAN_DRIVE(n) (0x30 + 0x10 * (n))
+#define EMC2305_REG_FAN_MIN_DRIVE(n) (0x38 + 0x10 * (n))
+#define EMC2305_REG_FAN_TACH(n) (0x3e + 0x10 * (n))
+
+enum emc230x_product_id {
+ EMC2305 = 0x34,
+ EMC2303 = 0x35,
+ EMC2302 = 0x36,
+ EMC2301 = 0x37,
+};
+
+static const struct i2c_device_id emc2305_ids[] = {
+ { "emc2305", 0 },
+ { "emc2303", 0 },
+ { "emc2302", 0 },
+ { "emc2301", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, emc2305_ids);
+
+/**
+ * @cdev: cooling device;
+ * @curr_state: cooling current state;
+ * @last_hwmon_state: last cooling state updated by hwmon subsystem;
+ * @last_thermal_state: last cooling state updated by thermal subsystem;
+ *
+ * The 'last_hwmon_state' and 'last_thermal_state' fields are provided to support fan low limit
+ * speed feature. The purpose of this feature is to provides ability to limit fan speed
+ * according to some system wise considerations, like absence of some replaceable units (PSU or
+ * line cards), high system ambient temperature, unreliable transceivers temperature sensing or
+ * some other factors which indirectly impacts system's airflow
+ * Fan low limit feature is supported through 'hwmon' interface: 'hwmon' 'pwm' attribute is
+ * used for setting low limit for fan speed in case 'thermal' subsystem is configured in
+ * kernel. In this case setting fan speed through 'hwmon' will never let the 'thermal'
+ * subsystem to select a lower duty cycle than the duty cycle selected with the 'pwm'
+ * attribute.
+ * From other side, fan speed is to be updated in hardware through 'pwm' only in case the
+ * requested fan speed is above last speed set by 'thermal' subsystem, otherwise requested fan
+ * speed will be just stored with no PWM update.
+ */
+struct emc2305_cdev_data {
+ struct thermal_cooling_device *cdev;
+ unsigned int cur_state;
+ unsigned long last_hwmon_state;
+ unsigned long last_thermal_state;
+};
+
+/**
+ * @client: i2c client;
+ * @hwmon_dev: hwmon device;
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of PWM channels;
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ * @cdev_data: array of cooling devices data;
+ */
+struct emc2305_data {
+ struct i2c_client *client;
+ struct device *hwmon_dev;
+ u8 max_state;
+ u8 pwm_num;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+ struct emc2305_cdev_data cdev_data[EMC2305_PWM_MAX];
+};
+
+static char *emc2305_fan_name[] = {
+ "emc2305_fan",
+ "emc2305_fan1",
+ "emc2305_fan2",
+ "emc2305_fan3",
+ "emc2305_fan4",
+ "emc2305_fan5",
+};
+
+static void emc2305_unset_tz(struct device *dev);
+
+static int emc2305_get_max_channel(const struct emc2305_data *data)
+{
+ return data->pwm_num;
+}
+
+static int emc2305_get_cdev_idx(struct thermal_cooling_device *cdev)
+{
+ struct emc2305_data *data = cdev->devdata;
+ size_t len = strlen(cdev->type);
+ int ret;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ /*
+ * Returns index of cooling device 0..4 in case of separate PWM setting.
+ * Zero index is used in case of one common PWM setting.
+ * If the mode is not set as pwm_separate, all PWMs are to be bound
+ * to the common thermal zone and should work at the same speed
+ * to perform cooling for the same thermal junction.
+ * Otherwise, return specific channel that will be used in bound
+ * related PWM to the thermal zone.
+ */
+ if (!data->pwm_separate)
+ return 0;
+
+ ret = cdev->type[len - 1];
+ switch (ret) {
+ case '1' ... '5':
+ return ret - '1';
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int emc2305_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ int cdev_idx;
+ struct emc2305_data *data = cdev->devdata;
+
+ cdev_idx = emc2305_get_cdev_idx(cdev);
+ if (cdev_idx < 0)
+ return cdev_idx;
+
+ *state = data->cdev_data[cdev_idx].cur_state;
+ return 0;
+}
+
+static int emc2305_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct emc2305_data *data = cdev->devdata;
+ *state = data->max_state;
+ return 0;
+}
+
+static int emc2305_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
+{
+ int cdev_idx, ret;
+ struct emc2305_data *data = cdev->devdata;
+ struct i2c_client *client = data->client;
+ u8 val, i;
+
+ if (state > data->max_state)
+ return -EINVAL;
+
+ cdev_idx = emc2305_get_cdev_idx(cdev);
+ if (cdev_idx < 0)
+ return cdev_idx;
+
+ /* Save thermal state. */
+ data->cdev_data[cdev_idx].last_thermal_state = state;
+ state = max_t(unsigned long, state, data->cdev_data[cdev_idx].last_hwmon_state);
+
+ val = EMC2305_PWM_STATE2DUTY(state, data->max_state, EMC2305_FAN_MAX);
+
+ data->cdev_data[cdev_idx].cur_state = state;
+ if (data->pwm_separate) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(cdev_idx), val);
+ if (ret < 0)
+ return ret;
+ } else {
+ /*
+ * Set the same PWM value in all channels
+ * if common PWM channel is used.
+ */
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(i), val);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops emc2305_cooling_ops = {
+ .get_max_state = emc2305_get_max_state,
+ .get_cur_state = emc2305_get_cur_state,
+ .set_cur_state = emc2305_set_cur_state,
+};
+
+static int emc2305_show_fault(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int status_reg;
+
+ status_reg = i2c_smbus_read_byte_data(client, EMC2305_REG_DRIVE_FAIL_STATUS);
+ if (status_reg < 0)
+ return status_reg;
+
+ return status_reg & (1 << channel) ? 1 : 0;
+}
+
+static int emc2305_show_fan(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(client, EMC2305_REG_FAN_TACH(channel));
+ if (ret <= 0)
+ return ret;
+
+ ret = ret >> EMC2305_TACH_REGS_UNUSE_BITS;
+ ret = EMC2305_RPM_FACTOR / ret;
+ if (ret <= EMC2305_TACH_RANGE_MIN)
+ return 0;
+
+ return ret * EMC2305_TACH_CNT_MULTIPLIER;
+}
+
+static int emc2305_show_pwm(struct device *dev, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ return i2c_smbus_read_byte_data(client, EMC2305_REG_FAN_DRIVE(channel));
+}
+
+static int emc2305_set_pwm(struct device *dev, long val, int channel)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ if (val < data->pwm_min[channel] || val > EMC2305_FAN_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_DRIVE(channel), val);
+ if (ret < 0)
+ return ret;
+ data->cdev_data[channel].cur_state = EMC2305_PWM_DUTY2STATE(val, data->max_state,
+ EMC2305_FAN_MAX);
+ return 0;
+}
+
+static int emc2305_set_single_tz(struct device *dev, int idx)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ long pwm;
+ int i, cdev_idx, ret;
+
+ cdev_idx = (idx) ? idx - 1 : 0;
+ pwm = data->pwm_min[cdev_idx];
+
+ data->cdev_data[cdev_idx].cdev =
+ thermal_cooling_device_register(emc2305_fan_name[idx], data,
+ &emc2305_cooling_ops);
+
+ if (IS_ERR(data->cdev_data[cdev_idx].cdev)) {
+ dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
+ return PTR_ERR(data->cdev_data[cdev_idx].cdev);
+ }
+ /* Set minimal PWM speed. */
+ if (data->pwm_separate) {
+ ret = emc2305_set_pwm(dev, pwm, cdev_idx);
+ if (ret < 0)
+ return ret;
+ } else {
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = emc2305_set_pwm(dev, pwm, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ data->cdev_data[cdev_idx].cur_state =
+ EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_FAN_MAX);
+ data->cdev_data[cdev_idx].last_hwmon_state =
+ EMC2305_PWM_DUTY2STATE(data->pwm_min[cdev_idx], data->max_state,
+ EMC2305_FAN_MAX);
+ return 0;
+}
+
+static int emc2305_set_tz(struct device *dev)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (!data->pwm_separate)
+ return emc2305_set_single_tz(dev, 0);
+
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = emc2305_set_single_tz(dev, i + 1);
+ if (ret)
+ goto thermal_cooling_device_register_fail;
+ }
+ return 0;
+
+thermal_cooling_device_register_fail:
+ emc2305_unset_tz(dev);
+ return ret;
+}
+
+static void emc2305_unset_tz(struct device *dev)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int i;
+
+ /* Unregister cooling device. */
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ if (data->cdev_data[i].cdev)
+ thermal_cooling_device_unregister(data->cdev_data[i].cdev);
+}
+
+static umode_t
+emc2305_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel)
+{
+ int max_channel = emc2305_get_max_channel(data);
+
+ /* Don't show channels which are not physically connected. */
+ if (channel >= max_channel)
+ return 0;
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_fault:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+};
+
+static int
+emc2305_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val)
+{
+ struct emc2305_data *data = dev_get_drvdata(dev);
+ int cdev_idx;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ /* If thermal is configured - handle PWM limit setting. */
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ if (data->pwm_separate)
+ cdev_idx = channel;
+ else
+ cdev_idx = 0;
+ data->cdev_data[cdev_idx].last_hwmon_state =
+ EMC2305_PWM_DUTY2STATE(val, data->max_state,
+ EMC2305_FAN_MAX);
+ /*
+ * Update PWM only in case requested state is not less than the
+ * last thermal state.
+ */
+ if (data->cdev_data[cdev_idx].last_hwmon_state >=
+ data->cdev_data[cdev_idx].last_thermal_state)
+ return emc2305_set_cur_state(data->cdev_data[cdev_idx].cdev,
+ data->cdev_data[cdev_idx].last_hwmon_state);
+ return 0;
+ }
+ return emc2305_set_pwm(dev, val, channel);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static int
+emc2305_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = emc2305_show_fan(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ case hwmon_fan_fault:
+ ret = emc2305_show_fault(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = emc2305_show_pwm(dev, channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+};
+
+static const struct hwmon_ops emc2305_ops = {
+ .is_visible = emc2305_is_visible,
+ .read = emc2305_read,
+ .write = emc2305_write,
+};
+
+static const struct hwmon_channel_info *emc2305_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info emc2305_chip_info = {
+ .ops = &emc2305_ops,
+ .info = emc2305_info,
+};
+
+static int emc2305_identify(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct emc2305_data *data = i2c_get_clientdata(client);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, EMC2305_REG_PRODUCT_ID);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case EMC2305:
+ data->pwm_num = 5;
+ break;
+ case EMC2303:
+ data->pwm_num = 3;
+ break;
+ case EMC2302:
+ data->pwm_num = 2;
+ break;
+ case EMC2301:
+ data->pwm_num = 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int emc2305_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct device *dev = &client->dev;
+ struct emc2305_data *data;
+ struct emc2305_platform_data *pdata;
+ int vendor, device;
+ int ret;
+ int i;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ vendor = i2c_smbus_read_byte_data(client, EMC2305_REG_VENDOR);
+ if (vendor != EMC2305_VENDOR)
+ return -ENODEV;
+
+ device = i2c_smbus_read_byte_data(client, EMC2305_REG_DEVICE);
+ if (device != EMC2305_DEVICE)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ ret = emc2305_identify(dev);
+ if (ret)
+ return ret;
+
+ pdata = dev_get_platdata(&client->dev);
+ if (pdata) {
+ if (!pdata->max_state || pdata->max_state > EMC2305_FAN_MAX_STATE)
+ return -EINVAL;
+ data->max_state = pdata->max_state;
+ /*
+ * Validate a number of active PWM channels. Note that
+ * configured number can be less than the actual maximum
+ * supported by the device.
+ */
+ if (!pdata->pwm_num || pdata->pwm_num > EMC2305_PWM_MAX)
+ return -EINVAL;
+ data->pwm_num = pdata->pwm_num;
+ data->pwm_separate = pdata->pwm_separate;
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ data->pwm_min[i] = pdata->pwm_min[i];
+ } else {
+ data->max_state = EMC2305_FAN_MAX_STATE;
+ data->pwm_separate = false;
+ for (i = 0; i < EMC2305_PWM_MAX; i++)
+ data->pwm_min[i] = EMC2305_FAN_MIN;
+ }
+
+ data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "emc2305", data,
+ &emc2305_chip_info, NULL);
+ if (IS_ERR(data->hwmon_dev))
+ return PTR_ERR(data->hwmon_dev);
+
+ if (IS_REACHABLE(CONFIG_THERMAL)) {
+ ret = emc2305_set_tz(dev);
+ if (ret != 0)
+ return ret;
+ }
+
+ for (i = 0; i < data->pwm_num; i++) {
+ ret = i2c_smbus_write_byte_data(client, EMC2305_REG_FAN_MIN_DRIVE(i),
+ data->pwm_min[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void emc2305_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ emc2305_unset_tz(dev);
+}
+
+static struct i2c_driver emc2305_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "emc2305",
+ },
+ .probe = emc2305_probe,
+ .remove = emc2305_remove,
+ .id_table = emc2305_ids,
+ .address_list = emc2305_normal_i2c,
+};
+
+module_i2c_driver(emc2305_driver);
+
+MODULE_AUTHOR("Nvidia");
+MODULE_DESCRIPTION("Microchip EMC2305 fan controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 29082c8463f4..bcd93f0fe982 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -439,7 +439,7 @@ static int emc6w201_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "emc6w201", I2C_NAME_SIZE);
+ strscpy(info->type, "emc6w201", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 19b6c643059a..70121482a617 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -237,13 +237,6 @@ static const char f71882fg_nr_temps[] = {
static struct platform_device *f71882fg_pdev;
-/* Super-I/O Function prototypes */
-static inline int superio_inb(int base, int reg);
-static inline int superio_inw(int base, int reg);
-static inline int superio_enter(int base);
-static inline void superio_select(int base, int ld);
-static inline void superio_exit(int base);
-
struct f71882fg_sio_data {
enum chips type;
};
@@ -292,108 +285,422 @@ struct f71882fg_data {
s8 pwm_auto_point_temp[4][4];
};
-/* Sysfs in */
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t show_in_max(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_in_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_in_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_in_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_in_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* Sysfs Fan */
-static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t show_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_fan_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_fan_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* Sysfs Temp */
-static ssize_t show_temp(struct device *dev, struct device_attribute
- *devattr, char *buf);
+static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg)
+{
+ u8 val;
+
+ outb(reg, data->addr + ADDR_REG_OFFSET);
+ val = inb(data->addr + DATA_REG_OFFSET);
+
+ return val;
+}
+
+static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
+{
+ u16 val;
+
+ val = f71882fg_read8(data, reg) << 8;
+ val |= f71882fg_read8(data, reg + 1);
+
+ return val;
+}
+
+static inline int fan_from_reg(u16 reg)
+{
+ return reg ? (1500000 / reg) : 0;
+}
+
+static inline u16 fan_to_reg(int fan)
+{
+ return fan ? (1500000 / fan) : 0;
+}
+
+static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
+{
+ outb(reg, data->addr + ADDR_REG_OFFSET);
+ outb(val, data->addr + DATA_REG_OFFSET);
+}
+
+static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
+{
+ f71882fg_write8(data, reg, val >> 8);
+ f71882fg_write8(data, reg + 1, val & 0xff);
+}
+
+static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
+{
+ if (data->type == f71858fg)
+ return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
+ else
+ return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
+}
+
+static struct f71882fg_data *f71882fg_update_device(struct device *dev)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
+ int nr, reg, point;
+
+ mutex_lock(&data->update_lock);
+
+ /* Update once every 60 seconds */
+ if (time_after(jiffies, data->last_limits + 60 * HZ) ||
+ !data->valid) {
+ if (f71882fg_has_in1_alarm[data->type]) {
+ if (data->type == f81866a) {
+ data->in1_max =
+ f71882fg_read8(data,
+ F81866_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F81866_REG_IN_BEEP);
+ } else {
+ data->in1_max =
+ f71882fg_read8(data,
+ F71882FG_REG_IN1_HIGH);
+ data->in_beep =
+ f71882fg_read8(data,
+ F71882FG_REG_IN_BEEP);
+ }
+ }
+
+ /* Get High & boundary temps*/
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++) {
+ data->temp_ovt[nr] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_OVT(nr));
+ data->temp_high[nr] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HIGH(nr));
+ }
+
+ if (data->type != f8000) {
+ data->temp_hyst[0] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HYST(0));
+ data->temp_hyst[1] = f71882fg_read8(data,
+ F71882FG_REG_TEMP_HYST(1));
+ }
+ /* All but the f71858fg / f8000 have this register */
+ if ((data->type != f71858fg) && (data->type != f8000)) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+ data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+ }
+
+ if (f71882fg_fan_has_beep[data->type])
+ data->fan_beep = f71882fg_read8(data,
+ F71882FG_REG_FAN_BEEP);
+
+ if (f71882fg_temp_has_beep[data->type])
+ data->temp_beep = f71882fg_read8(data,
+ F71882FG_REG_TEMP_BEEP);
+
+ data->pwm_enable = f71882fg_read8(data,
+ F71882FG_REG_PWM_ENABLE);
+ data->pwm_auto_point_hyst[0] =
+ f71882fg_read8(data, F71882FG_REG_FAN_HYST(0));
+ data->pwm_auto_point_hyst[1] =
+ f71882fg_read8(data, F71882FG_REG_FAN_HYST(1));
+
+ for (nr = 0; nr < nr_fans; nr++) {
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_MAPPING(nr));
+
+ switch (data->type) {
+ default:
+ for (point = 0; point < 5; point++) {
+ data->pwm_auto_point_pwm[nr][point] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, point));
+ }
+ for (point = 0; point < 4; point++) {
+ data->pwm_auto_point_temp[nr][point] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, point));
+ }
+ break;
+ case f71808e:
+ case f71869:
+ data->pwm_auto_point_pwm[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM(nr, 0));
+ fallthrough;
+ case f71862fg:
+ data->pwm_auto_point_pwm[nr][1] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, 1));
+ data->pwm_auto_point_pwm[nr][4] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM
+ (nr, 4));
+ data->pwm_auto_point_temp[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, 0));
+ data->pwm_auto_point_temp[nr][3] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_TEMP
+ (nr, 3));
+ break;
+ }
+ }
+ data->last_limits = jiffies;
+ }
+
+ /* Update every second */
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->temp_status = f71882fg_read8(data,
+ F71882FG_REG_TEMP_STATUS);
+ data->temp_diode_open = f71882fg_read8(data,
+ F71882FG_REG_TEMP_DIODE_OPEN);
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++)
+ data->temp[nr] = f71882fg_read_temp(data, nr);
+
+ data->fan_status = f71882fg_read8(data,
+ F71882FG_REG_FAN_STATUS);
+ for (nr = 0; nr < nr_fans; nr++) {
+ data->fan[nr] = f71882fg_read16(data,
+ F71882FG_REG_FAN(nr));
+ data->fan_target[nr] =
+ f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr));
+ data->fan_full_speed[nr] =
+ f71882fg_read16(data,
+ F71882FG_REG_FAN_FULL_SPEED(nr));
+ data->pwm[nr] =
+ f71882fg_read8(data, F71882FG_REG_PWM(nr));
+ }
+ /* Some models have 1 more fan with limited capabilities */
+ if (data->type == f71808a) {
+ data->fan[2] = f71882fg_read16(data,
+ F71882FG_REG_FAN(2));
+ data->pwm[2] = f71882fg_read8(data,
+ F71882FG_REG_PWM(2));
+ }
+ if (data->type == f8000)
+ data->fan[3] = f71882fg_read16(data,
+ F71882FG_REG_FAN(3));
+
+ if (f71882fg_has_in1_alarm[data->type]) {
+ if (data->type == f81866a)
+ data->in_status = f71882fg_read8(data,
+ F81866_REG_IN_STATUS);
+
+ else
+ data->in_status = f71882fg_read8(data,
+ F71882FG_REG_IN_STATUS);
+ }
+
+ for (nr = 0; nr < F71882FG_MAX_INS; nr++)
+ if (f71882fg_has_in[data->type][nr])
+ data->in[nr] = f71882fg_read8(data,
+ F71882FG_REG_IN(nr));
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", f71882fg_names[data->type]);
+}
+
+static DEVICE_ATTR_RO(name);
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int sign, temp;
+
+ if (data->type == f71858fg) {
+ /* TEMP_TABLE_SEL 1 or 3 ? */
+ if (data->temp_config & 1) {
+ sign = data->temp[nr] & 0x0001;
+ temp = (data->temp[nr] >> 5) & 0x7ff;
+ } else {
+ sign = data->temp[nr] & 0x8000;
+ temp = (data->temp[nr] >> 5) & 0x3ff;
+ }
+ temp *= 125;
+ if (sign)
+ temp -= 128000;
+ } else {
+ temp = ((s8)data->temp[nr]) * 1000;
+ }
+
+ return sprintf(buf, "%d\n", temp);
+}
+
static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_high[nr] * 1000);
+}
+
static ssize_t store_temp_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
+ data->temp_high[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int temp_max_hyst;
+
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ temp_max_hyst = data->temp_hyst[nr / 2] >> 4;
+ else
+ temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f;
+ temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000;
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp_max_hyst);
+}
+
static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ ssize_t ret = count;
+ u8 reg;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+
+ /* convert abs to relative and check */
+ data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
+ val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
+ val = data->temp_high[nr] - val;
+
+ /* convert value to register contents */
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2));
+ if (nr & 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+ f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg);
+ data->temp_hyst[nr / 2] = reg;
+
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000);
+}
+
static ssize_t store_temp_crit(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
+ data->temp_ovt[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_type(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_beep(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t store_temp_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count);
-static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf);
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf);
-/* PWM and Auto point control */
-static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
- char *buf);
-static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count);
-static ssize_t show_simple_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_simple_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_enable(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-static ssize_t show_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr, char *buf);
-static ssize_t store_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr, const char *buf, size_t count);
-/* Sysfs misc */
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf);
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int temp_crit_hyst;
-static int f71882fg_probe(struct platform_device *pdev);
-static int f71882fg_remove(struct platform_device *pdev);
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ temp_crit_hyst = data->temp_hyst[nr / 2] >> 4;
+ else
+ temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f;
+ temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000;
+ mutex_unlock(&data->update_lock);
-static struct platform_driver f71882fg_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = f71882fg_probe,
- .remove = f71882fg_remove,
-};
+ return sprintf(buf, "%d\n", temp_crit_hyst);
+}
-static DEVICE_ATTR_RO(name);
+static ssize_t show_temp_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_diode_open & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
/*
* Temp attr for the f71858fg, the f71858fg is special as it has its
@@ -438,6 +745,15 @@ static struct sensor_device_attribute_2 f71858fg_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+static ssize_t show_temp_type(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp_type[nr]);
+}
+
/* Temp attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
@@ -490,6 +806,42 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
} };
+static ssize_t show_temp_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->temp_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_temp_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
+ if (val)
+ data->temp_beep |= 1 << nr;
+ else
+ data->temp_beep &= ~(1 << nr);
+
+ f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Temp attr for models which can beep on temp alarm */
static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
@@ -555,6 +907,15 @@ static struct sensor_device_attribute_2 f8000_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->in[nr] * 8);
+}
+
/* in attr for all models */
static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
@@ -570,6 +931,94 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
SENSOR_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 0, 10),
};
+static ssize_t show_in_max(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+
+ return sprintf(buf, "%d\n", data->in1_max * 8);
+}
+
+static ssize_t store_in_max(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 8;
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
+ data->in1_max = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_in_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->in_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_in_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ if (data->type == f81866a)
+ data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
+ else
+ data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+
+ if (val)
+ data->in_beep |= 1 << nr;
+ else
+ data->in_beep &= ~(1 << nr);
+
+ if (data->type == f81866a)
+ f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
+ else
+ f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_in_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->in_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
/* For models with in1 alarm capability */
static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
@@ -579,6 +1028,242 @@ static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
};
+static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int speed = fan_from_reg(data->fan[nr]);
+
+ if (speed == FAN_MIN_DETECT)
+ speed = 0;
+
+ return sprintf(buf, "%d\n", speed);
+}
+
+static ssize_t show_fan_full_speed(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int speed = fan_from_reg(data->fan_full_speed[nr]);
+ return sprintf(buf, "%d\n", speed);
+}
+
+static ssize_t store_fan_full_speed(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 23, 1500000);
+ val = fan_to_reg(val);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val);
+ data->fan_full_speed[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->fan_status & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int val, nr = to_sensor_dev_attr_2(devattr)->index;
+ mutex_lock(&data->update_lock);
+ if (data->pwm_enable & (1 << (2 * nr)))
+ /* PWM mode */
+ val = data->pwm[nr];
+ else {
+ /* RPM mode */
+ val = 255 * fan_from_reg(data->fan_target[nr])
+ / fan_from_reg(data->fan_full_speed[nr]);
+ }
+ mutex_unlock(&data->update_lock);
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) ||
+ (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) {
+ count = -EROFS;
+ goto leave;
+ }
+ if (data->pwm_enable & (1 << (2 * nr))) {
+ /* PWM mode */
+ f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
+ data->pwm[nr] = val;
+ } else {
+ /* RPM mode */
+ int target, full_speed;
+ full_speed = f71882fg_read16(data,
+ F71882FG_REG_FAN_FULL_SPEED(nr));
+ target = fan_to_reg(val * fan_from_reg(full_speed) / 255);
+ f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target);
+ data->fan_target[nr] = target;
+ data->fan_full_speed[nr] = full_speed;
+ }
+leave:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int result = 0;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ switch ((data->pwm_enable >> 2 * nr) & 3) {
+ case 0:
+ case 1:
+ result = 2; /* Normal auto mode */
+ break;
+ case 2:
+ result = 1; /* Manual mode */
+ break;
+ case 3:
+ if (data->type == f8000)
+ result = 3; /* Thermostat mode */
+ else
+ result = 1; /* Manual mode */
+ break;
+ }
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ /* Special case for F8000 pwm channel 3 which only does auto mode */
+ if (data->type == f8000 && nr == 2 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ /* Special case for F8000 auto PWM mode / Thermostat mode */
+ if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) {
+ switch (val) {
+ case 2:
+ data->pwm_enable &= ~(2 << (2 * nr));
+ break; /* Normal auto mode */
+ case 3:
+ data->pwm_enable |= 2 << (2 * nr);
+ break; /* Thermostat mode */
+ default:
+ count = -EINVAL;
+ goto leave;
+ }
+ } else {
+ switch (val) {
+ case 1:
+ /* The f71858fg does not support manual RPM mode */
+ if (data->type == f71858fg &&
+ ((data->pwm_enable >> (2 * nr)) & 1)) {
+ count = -EINVAL;
+ goto leave;
+ }
+ data->pwm_enable |= 2 << (2 * nr);
+ break; /* Manual */
+ case 2:
+ data->pwm_enable &= ~(2 << (2 * nr));
+ break; /* Normal auto mode */
+ default:
+ count = -EINVAL;
+ goto leave;
+ }
+ }
+ f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable);
+leave:
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_interpolate(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ result = (data->pwm_auto_point_mapping[nr] >> 4) & 1;
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_interpolate(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+ if (val)
+ val = data->pwm_auto_point_mapping[nr] | (1 << 4);
+ else
+ val = data->pwm_auto_point_mapping[nr] & (~(1 << 4));
+ f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+ data->pwm_auto_point_mapping[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Fan / PWM attr common to all models */
static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
@@ -626,6 +1311,38 @@ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
show_pwm_interpolate, store_pwm_interpolate, 0, 3),
} };
+static ssize_t show_simple_pwm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int val, nr = to_sensor_dev_attr_2(devattr)->index;
+
+ val = data->pwm[nr];
+ return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_simple_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
+ data->pwm[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Attr for the third fan of the f71808a, which only has manual pwm */
static struct sensor_device_attribute_2 f71808a_fan3_attr[] = {
SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2),
@@ -634,6 +1351,42 @@ static struct sensor_device_attribute_2 f71808a_fan3_attr[] = {
show_simple_pwm, store_simple_pwm, 0, 2),
};
+static ssize_t show_fan_beep(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ if (data->fan_beep & (1 << nr))
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t store_fan_beep(struct device *dev, struct device_attribute
+ *devattr, const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ unsigned long val;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
+ if (val)
+ data->fan_beep |= 1 << nr;
+ else
+ data->fan_beep &= ~(1 << nr);
+
+ f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/* Attr for models which can beep on Fan alarm */
static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep,
@@ -646,6 +1399,209 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
store_fan_beep, 0, 3),
};
+static ssize_t show_pwm_auto_point_channel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+
+ result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
+ data->temp_start);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_channel(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ switch (val) {
+ case 1:
+ val = 0;
+ break;
+ case 2:
+ val = 1;
+ break;
+ case 4:
+ val = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val += data->temp_start;
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_mapping[nr] =
+ f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
+ val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val;
+ f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
+ data->pwm_auto_point_mapping[nr] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ mutex_lock(&data->update_lock);
+ if (data->pwm_enable & (1 << (2 * pwm))) {
+ /* PWM mode */
+ result = data->pwm_auto_point_pwm[pwm][point];
+ } else {
+ /* RPM mode */
+ result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]);
+ }
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_pwm(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = clamp_val(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
+ if (data->pwm_enable & (1 << (2 * pwm))) {
+ /* PWM mode */
+ } else {
+ /* RPM mode */
+ if (val < 29) /* Prevent negative numbers */
+ val = 255;
+ else
+ val = (255 - val) * 32 / val;
+ }
+ f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val);
+ data->pwm_auto_point_pwm[pwm][point] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ result = data->pwm_auto_point_temp[pwm][point];
+ return sprintf(buf, "%d\n", 1000 * result);
+}
+
+static ssize_t store_pwm_auto_point_temp(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, pwm = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ if (data->auto_point_temp_signed)
+ val = clamp_val(val, -128, 127);
+ else
+ val = clamp_val(val, 0, 127);
+
+ mutex_lock(&data->update_lock);
+ f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
+ data->pwm_auto_point_temp[pwm][point] = val;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int result = 0;
+ struct f71882fg_data *data = f71882fg_update_device(dev);
+ int nr = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+
+ mutex_lock(&data->update_lock);
+ if (nr & 1)
+ result = data->pwm_auto_point_hyst[nr / 2] >> 4;
+ else
+ result = data->pwm_auto_point_hyst[nr / 2] & 0x0f;
+ result = 1000 * (data->pwm_auto_point_temp[nr][point] - result);
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct f71882fg_data *data = dev_get_drvdata(dev);
+ int err, nr = to_sensor_dev_attr_2(devattr)->index;
+ int point = to_sensor_dev_attr_2(devattr)->nr;
+ u8 reg;
+ long val;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ val /= 1000;
+
+ mutex_lock(&data->update_lock);
+ data->pwm_auto_point_temp[nr][point] =
+ f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
+ val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
+ data->pwm_auto_point_temp[nr][point]);
+ val = data->pwm_auto_point_temp[nr][point] - val;
+
+ reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
+ if (nr & 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+
+ f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg);
+ data->pwm_auto_point_hyst[nr / 2] = reg;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
/*
* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
* standard models
@@ -1144,1071 +2100,6 @@ static inline void superio_exit(int base)
release_region(base, 2);
}
-static inline int fan_from_reg(u16 reg)
-{
- return reg ? (1500000 / reg) : 0;
-}
-
-static inline u16 fan_to_reg(int fan)
-{
- return fan ? (1500000 / fan) : 0;
-}
-
-static u8 f71882fg_read8(struct f71882fg_data *data, u8 reg)
-{
- u8 val;
-
- outb(reg, data->addr + ADDR_REG_OFFSET);
- val = inb(data->addr + DATA_REG_OFFSET);
-
- return val;
-}
-
-static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
-{
- u16 val;
-
- val = f71882fg_read8(data, reg) << 8;
- val |= f71882fg_read8(data, reg + 1);
-
- return val;
-}
-
-static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
-{
- outb(reg, data->addr + ADDR_REG_OFFSET);
- outb(val, data->addr + DATA_REG_OFFSET);
-}
-
-static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
-{
- f71882fg_write8(data, reg, val >> 8);
- f71882fg_write8(data, reg + 1, val & 0xff);
-}
-
-static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
-{
- if (data->type == f71858fg)
- return f71882fg_read16(data, F71882FG_REG_TEMP(nr));
- else
- return f71882fg_read8(data, F71882FG_REG_TEMP(nr));
-}
-
-static struct f71882fg_data *f71882fg_update_device(struct device *dev)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr_fans = f71882fg_nr_fans[data->type];
- int nr_temps = f71882fg_nr_temps[data->type];
- int nr, reg, point;
-
- mutex_lock(&data->update_lock);
-
- /* Update once every 60 seconds */
- if (time_after(jiffies, data->last_limits + 60 * HZ) ||
- !data->valid) {
- if (f71882fg_has_in1_alarm[data->type]) {
- if (data->type == f81866a) {
- data->in1_max =
- f71882fg_read8(data,
- F81866_REG_IN1_HIGH);
- data->in_beep =
- f71882fg_read8(data,
- F81866_REG_IN_BEEP);
- } else {
- data->in1_max =
- f71882fg_read8(data,
- F71882FG_REG_IN1_HIGH);
- data->in_beep =
- f71882fg_read8(data,
- F71882FG_REG_IN_BEEP);
- }
- }
-
- /* Get High & boundary temps*/
- for (nr = data->temp_start; nr < nr_temps + data->temp_start;
- nr++) {
- data->temp_ovt[nr] = f71882fg_read8(data,
- F71882FG_REG_TEMP_OVT(nr));
- data->temp_high[nr] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HIGH(nr));
- }
-
- if (data->type != f8000) {
- data->temp_hyst[0] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HYST(0));
- data->temp_hyst[1] = f71882fg_read8(data,
- F71882FG_REG_TEMP_HYST(1));
- }
- /* All but the f71858fg / f8000 have this register */
- if ((data->type != f71858fg) && (data->type != f8000)) {
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
- data->temp_type[3] = (reg & 0x08) ? 2 : 4;
- }
-
- if (f71882fg_fan_has_beep[data->type])
- data->fan_beep = f71882fg_read8(data,
- F71882FG_REG_FAN_BEEP);
-
- if (f71882fg_temp_has_beep[data->type])
- data->temp_beep = f71882fg_read8(data,
- F71882FG_REG_TEMP_BEEP);
-
- data->pwm_enable = f71882fg_read8(data,
- F71882FG_REG_PWM_ENABLE);
- data->pwm_auto_point_hyst[0] =
- f71882fg_read8(data, F71882FG_REG_FAN_HYST(0));
- data->pwm_auto_point_hyst[1] =
- f71882fg_read8(data, F71882FG_REG_FAN_HYST(1));
-
- for (nr = 0; nr < nr_fans; nr++) {
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(nr));
-
- switch (data->type) {
- default:
- for (point = 0; point < 5; point++) {
- data->pwm_auto_point_pwm[nr][point] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, point));
- }
- for (point = 0; point < 4; point++) {
- data->pwm_auto_point_temp[nr][point] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, point));
- }
- break;
- case f71808e:
- case f71869:
- data->pwm_auto_point_pwm[nr][0] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM(nr, 0));
- fallthrough;
- case f71862fg:
- data->pwm_auto_point_pwm[nr][1] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, 1));
- data->pwm_auto_point_pwm[nr][4] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_PWM
- (nr, 4));
- data->pwm_auto_point_temp[nr][0] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, 0));
- data->pwm_auto_point_temp[nr][3] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_TEMP
- (nr, 3));
- break;
- }
- }
- data->last_limits = jiffies;
- }
-
- /* Update every second */
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- data->temp_status = f71882fg_read8(data,
- F71882FG_REG_TEMP_STATUS);
- data->temp_diode_open = f71882fg_read8(data,
- F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = data->temp_start; nr < nr_temps + data->temp_start;
- nr++)
- data->temp[nr] = f71882fg_read_temp(data, nr);
-
- data->fan_status = f71882fg_read8(data,
- F71882FG_REG_FAN_STATUS);
- for (nr = 0; nr < nr_fans; nr++) {
- data->fan[nr] = f71882fg_read16(data,
- F71882FG_REG_FAN(nr));
- data->fan_target[nr] =
- f71882fg_read16(data, F71882FG_REG_FAN_TARGET(nr));
- data->fan_full_speed[nr] =
- f71882fg_read16(data,
- F71882FG_REG_FAN_FULL_SPEED(nr));
- data->pwm[nr] =
- f71882fg_read8(data, F71882FG_REG_PWM(nr));
- }
- /* Some models have 1 more fan with limited capabilities */
- if (data->type == f71808a) {
- data->fan[2] = f71882fg_read16(data,
- F71882FG_REG_FAN(2));
- data->pwm[2] = f71882fg_read8(data,
- F71882FG_REG_PWM(2));
- }
- if (data->type == f8000)
- data->fan[3] = f71882fg_read16(data,
- F71882FG_REG_FAN(3));
-
- if (f71882fg_has_in1_alarm[data->type]) {
- if (data->type == f81866a)
- data->in_status = f71882fg_read8(data,
- F81866_REG_IN_STATUS);
-
- else
- data->in_status = f71882fg_read8(data,
- F71882FG_REG_IN_STATUS);
- }
-
- for (nr = 0; nr < F71882FG_MAX_INS; nr++)
- if (f71882fg_has_in[data->type][nr])
- data->in[nr] = f71882fg_read8(data,
- F71882FG_REG_IN(nr));
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
-/* Sysfs Interface */
-static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int speed = fan_from_reg(data->fan[nr]);
-
- if (speed == FAN_MIN_DETECT)
- speed = 0;
-
- return sprintf(buf, "%d\n", speed);
-}
-
-static ssize_t show_fan_full_speed(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int speed = fan_from_reg(data->fan_full_speed[nr]);
- return sprintf(buf, "%d\n", speed);
-}
-
-static ssize_t store_fan_full_speed(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 23, 1500000);
- val = fan_to_reg(val);
-
- mutex_lock(&data->update_lock);
- f71882fg_write16(data, F71882FG_REG_FAN_FULL_SPEED(nr), val);
- data->fan_full_speed[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_fan_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->fan_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_fan_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
- if (val)
- data->fan_beep |= 1 << nr;
- else
- data->fan_beep &= ~(1 << nr);
-
- f71882fg_write8(data, F71882FG_REG_FAN_BEEP, data->fan_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->fan_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->in[nr] * 8);
-}
-
-static ssize_t show_in_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
-
- return sprintf(buf, "%d\n", data->in1_max * 8);
-}
-
-static ssize_t store_in_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 8;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- if (data->type == f81866a)
- f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
- else
- f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
- data->in1_max = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_in_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->in_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_in_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- if (data->type == f81866a)
- data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
- else
- data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
-
- if (val)
- data->in_beep |= 1 << nr;
- else
- data->in_beep &= ~(1 << nr);
-
- if (data->type == f81866a)
- f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
- else
- f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_in_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->in_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int sign, temp;
-
- if (data->type == f71858fg) {
- /* TEMP_TABLE_SEL 1 or 3 ? */
- if (data->temp_config & 1) {
- sign = data->temp[nr] & 0x0001;
- temp = (data->temp[nr] >> 5) & 0x7ff;
- } else {
- sign = data->temp[nr] & 0x8000;
- temp = (data->temp[nr] >> 5) & 0x3ff;
- }
- temp *= 125;
- if (sign)
- temp -= 128000;
- } else {
- temp = ((s8)data->temp[nr]) * 1000;
- }
-
- return sprintf(buf, "%d\n", temp);
-}
-
-static ssize_t show_temp_max(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_high[nr] * 1000);
-}
-
-static ssize_t store_temp_max(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
- data->temp_high[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_max_hyst;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- temp_max_hyst = data->temp_hyst[nr / 2] >> 4;
- else
- temp_max_hyst = data->temp_hyst[nr / 2] & 0x0f;
- temp_max_hyst = (data->temp_high[nr] - temp_max_hyst) * 1000;
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", temp_max_hyst);
-}
-
-static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- ssize_t ret = count;
- u8 reg;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- mutex_lock(&data->update_lock);
-
- /* convert abs to relative and check */
- data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
- val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
- val = data->temp_high[nr] - val;
-
- /* convert value to register contents */
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(nr / 2));
- if (nr & 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
- f71882fg_write8(data, F71882FG_REG_TEMP_HYST(nr / 2), reg);
- data->temp_hyst[nr / 2] = reg;
-
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-static ssize_t show_temp_crit(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_ovt[nr] * 1000);
-}
-
-static ssize_t store_temp_crit(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
- data->temp_ovt[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_crit_hyst(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int temp_crit_hyst;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- temp_crit_hyst = data->temp_hyst[nr / 2] >> 4;
- else
- temp_crit_hyst = data->temp_hyst[nr / 2] & 0x0f;
- temp_crit_hyst = (data->temp_ovt[nr] - temp_crit_hyst) * 1000;
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", temp_crit_hyst);
-}
-
-static ssize_t show_temp_type(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- return sprintf(buf, "%d\n", data->temp_type[nr]);
-}
-
-static ssize_t show_temp_beep(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_beep & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t store_temp_beep(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
- if (val)
- data->temp_beep |= 1 << nr;
- else
- data->temp_beep &= ~(1 << nr);
-
- f71882fg_write8(data, F71882FG_REG_TEMP_BEEP, data->temp_beep);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_status & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_temp_fault(struct device *dev, struct device_attribute
- *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- if (data->temp_diode_open & (1 << nr))
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int val, nr = to_sensor_dev_attr_2(devattr)->index;
- mutex_lock(&data->update_lock);
- if (data->pwm_enable & (1 << (2 * nr)))
- /* PWM mode */
- val = data->pwm[nr];
- else {
- /* RPM mode */
- val = 255 * fan_from_reg(data->fan_target[nr])
- / fan_from_reg(data->fan_full_speed[nr]);
- }
- mutex_unlock(&data->update_lock);
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t store_pwm(struct device *dev,
- struct device_attribute *devattr, const char *buf,
- size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- if ((data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 3) != 2) ||
- (data->type != f8000 && !((data->pwm_enable >> 2 * nr) & 2))) {
- count = -EROFS;
- goto leave;
- }
- if (data->pwm_enable & (1 << (2 * nr))) {
- /* PWM mode */
- f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
- data->pwm[nr] = val;
- } else {
- /* RPM mode */
- int target, full_speed;
- full_speed = f71882fg_read16(data,
- F71882FG_REG_FAN_FULL_SPEED(nr));
- target = fan_to_reg(val * fan_from_reg(full_speed) / 255);
- f71882fg_write16(data, F71882FG_REG_FAN_TARGET(nr), target);
- data->fan_target[nr] = target;
- data->fan_full_speed[nr] = full_speed;
- }
-leave:
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_simple_pwm(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int val, nr = to_sensor_dev_attr_2(devattr)->index;
-
- val = data->pwm[nr];
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t store_simple_pwm(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
- data->pwm[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int result = 0;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- switch ((data->pwm_enable >> 2 * nr) & 3) {
- case 0:
- case 1:
- result = 2; /* Normal auto mode */
- break;
- case 2:
- result = 1; /* Manual mode */
- break;
- case 3:
- if (data->type == f8000)
- result = 3; /* Thermostat mode */
- else
- result = 1; /* Manual mode */
- break;
- }
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- /* Special case for F8000 pwm channel 3 which only does auto mode */
- if (data->type == f8000 && nr == 2 && val != 2)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- /* Special case for F8000 auto PWM mode / Thermostat mode */
- if (data->type == f8000 && ((data->pwm_enable >> 2 * nr) & 1)) {
- switch (val) {
- case 2:
- data->pwm_enable &= ~(2 << (2 * nr));
- break; /* Normal auto mode */
- case 3:
- data->pwm_enable |= 2 << (2 * nr);
- break; /* Thermostat mode */
- default:
- count = -EINVAL;
- goto leave;
- }
- } else {
- switch (val) {
- case 1:
- /* The f71858fg does not support manual RPM mode */
- if (data->type == f71858fg &&
- ((data->pwm_enable >> (2 * nr)) & 1)) {
- count = -EINVAL;
- goto leave;
- }
- data->pwm_enable |= 2 << (2 * nr);
- break; /* Manual */
- case 2:
- data->pwm_enable &= ~(2 << (2 * nr));
- break; /* Normal auto mode */
- default:
- count = -EINVAL;
- goto leave;
- }
- }
- f71882fg_write8(data, F71882FG_REG_PWM_ENABLE, data->pwm_enable);
-leave:
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- mutex_lock(&data->update_lock);
- if (data->pwm_enable & (1 << (2 * pwm))) {
- /* PWM mode */
- result = data->pwm_auto_point_pwm[pwm][point];
- } else {
- /* RPM mode */
- result = 32 * 255 / (32 + data->pwm_auto_point_pwm[pwm][point]);
- }
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_pwm(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val = clamp_val(val, 0, 255);
-
- mutex_lock(&data->update_lock);
- data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
- if (data->pwm_enable & (1 << (2 * pwm))) {
- /* PWM mode */
- } else {
- /* RPM mode */
- if (val < 29) /* Prevent negative numbers */
- val = 255;
- else
- val = (255 - val) * 32 / val;
- }
- f71882fg_write8(data, F71882FG_REG_POINT_PWM(pwm, point), val);
- data->pwm_auto_point_pwm[pwm][point] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result = 0;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- mutex_lock(&data->update_lock);
- if (nr & 1)
- result = data->pwm_auto_point_hyst[nr / 2] >> 4;
- else
- result = data->pwm_auto_point_hyst[nr / 2] & 0x0f;
- result = 1000 * (data->pwm_auto_point_temp[nr][point] - result);
- mutex_unlock(&data->update_lock);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- u8 reg;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_temp[nr][point] =
- f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
- val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
- data->pwm_auto_point_temp[nr][point]);
- val = data->pwm_auto_point_temp[nr][point] - val;
-
- reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
- if (nr & 1)
- reg = (reg & 0x0f) | (val << 4);
- else
- reg = (reg & 0xf0) | val;
-
- f71882fg_write8(data, F71882FG_REG_FAN_HYST(nr / 2), reg);
- data->pwm_auto_point_hyst[nr / 2] = reg;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- result = (data->pwm_auto_point_mapping[nr] >> 4) & 1;
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_interpolate(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- unsigned long val;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
- if (val)
- val = data->pwm_auto_point_mapping[nr] | (1 << 4);
- else
- val = data->pwm_auto_point_mapping[nr] & (~(1 << 4));
- f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
- data->pwm_auto_point_mapping[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int nr = to_sensor_dev_attr_2(devattr)->index;
-
- result = 1 << ((data->pwm_auto_point_mapping[nr] & 3) -
- data->temp_start);
-
- return sprintf(buf, "%d\n", result);
-}
-
-static ssize_t store_pwm_auto_point_channel(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, nr = to_sensor_dev_attr_2(devattr)->index;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- switch (val) {
- case 1:
- val = 0;
- break;
- case 2:
- val = 1;
- break;
- case 4:
- val = 2;
- break;
- default:
- return -EINVAL;
- }
- val += data->temp_start;
- mutex_lock(&data->update_lock);
- data->pwm_auto_point_mapping[nr] =
- f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr));
- val = (data->pwm_auto_point_mapping[nr] & 0xfc) | val;
- f71882fg_write8(data, F71882FG_REG_POINT_MAPPING(nr), val);
- data->pwm_auto_point_mapping[nr] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- int result;
- struct f71882fg_data *data = f71882fg_update_device(dev);
- int pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
-
- result = data->pwm_auto_point_temp[pwm][point];
- return sprintf(buf, "%d\n", 1000 * result);
-}
-
-static ssize_t store_pwm_auto_point_temp(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- int err, pwm = to_sensor_dev_attr_2(devattr)->index;
- int point = to_sensor_dev_attr_2(devattr)->nr;
- long val;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- val /= 1000;
-
- if (data->auto_point_temp_signed)
- val = clamp_val(val, -128, 127);
- else
- val = clamp_val(val, 0, 127);
-
- mutex_lock(&data->update_lock);
- f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
- data->pwm_auto_point_temp[pwm][point] = val;
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-
-static ssize_t name_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct f71882fg_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", f71882fg_names[data->type]);
-}
-
static int f71882fg_create_sysfs_files(struct platform_device *pdev,
struct sensor_device_attribute_2 *attr, int count)
{
@@ -2329,6 +2220,119 @@ static int f71882fg_create_fan_sysfs_files(
return err;
}
+static int f71882fg_remove(struct platform_device *pdev)
+{
+ struct f71882fg_data *data = platform_get_drvdata(pdev);
+ int nr_fans = f71882fg_nr_fans[data->type];
+ int nr_temps = f71882fg_nr_temps[data->type];
+ int i;
+ u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ device_remove_file(&pdev->dev, &dev_attr_name);
+
+ if (start_reg & 0x01) {
+ switch (data->type) {
+ case f71858fg:
+ if (data->temp_config & 0x10)
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
+ else
+ f71882fg_remove_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
+ case f8000:
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
+ break;
+ case f81866a:
+ f71882fg_remove_sysfs_files(pdev,
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
+ break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
+ }
+ if (f71882fg_temp_has_beep[data->type]) {
+ if (data->type == f81866a)
+ f71882fg_remove_sysfs_files(pdev,
+ &f81866_temp_beep_attr[0][0],
+ ARRAY_SIZE(f81866_temp_beep_attr[0])
+ * nr_temps);
+ else
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ device_remove_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
+ }
+ }
+
+ if (start_reg & 0x02) {
+ f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
+ ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
+
+ if (f71882fg_fan_has_beep[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_fan_beep_attr, nr_fans);
+ }
+
+ switch (data->type) {
+ case f71808a:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_auto_pwm_attr[0][0],
+ ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
+ f71882fg_remove_sysfs_files(pdev,
+ f71808a_fan3_attr,
+ ARRAY_SIZE(f71808a_fan3_attr));
+ break;
+ case f71862fg:
+ f71882fg_remove_sysfs_files(pdev,
+ &f71862fg_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) *
+ nr_fans);
+ break;
+ case f71808e:
+ case f71869:
+ f71882fg_remove_sysfs_files(pdev,
+ &f71869_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans);
+ break;
+ case f8000:
+ f71882fg_remove_sysfs_files(pdev,
+ f8000_fan_attr,
+ ARRAY_SIZE(f8000_fan_attr));
+ f71882fg_remove_sysfs_files(pdev,
+ &f8000_auto_pwm_attr[0][0],
+ ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans);
+ break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_auto_pwm_attr[0][0],
+ ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
+ }
+ }
+ return 0;
+}
+
static int f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
@@ -2502,119 +2506,6 @@ exit_unregister_sysfs:
return err; /* f71882fg_remove() also frees our data */
}
-static int f71882fg_remove(struct platform_device *pdev)
-{
- struct f71882fg_data *data = platform_get_drvdata(pdev);
- int nr_fans = f71882fg_nr_fans[data->type];
- int nr_temps = f71882fg_nr_temps[data->type];
- int i;
- u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
-
- if (data->hwmon_dev)
- hwmon_device_unregister(data->hwmon_dev);
-
- device_remove_file(&pdev->dev, &dev_attr_name);
-
- if (start_reg & 0x01) {
- switch (data->type) {
- case f71858fg:
- if (data->temp_config & 0x10)
- f71882fg_remove_sysfs_files(pdev,
- f8000_temp_attr,
- ARRAY_SIZE(f8000_temp_attr));
- else
- f71882fg_remove_sysfs_files(pdev,
- f71858fg_temp_attr,
- ARRAY_SIZE(f71858fg_temp_attr));
- break;
- case f8000:
- f71882fg_remove_sysfs_files(pdev,
- f8000_temp_attr,
- ARRAY_SIZE(f8000_temp_attr));
- break;
- case f81866a:
- f71882fg_remove_sysfs_files(pdev,
- f71858fg_temp_attr,
- ARRAY_SIZE(f71858fg_temp_attr));
- break;
- default:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_temp_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
- }
- if (f71882fg_temp_has_beep[data->type]) {
- if (data->type == f81866a)
- f71882fg_remove_sysfs_files(pdev,
- &f81866_temp_beep_attr[0][0],
- ARRAY_SIZE(f81866_temp_beep_attr[0])
- * nr_temps);
- else
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_temp_beep_attr[0][0],
- ARRAY_SIZE(fxxxx_temp_beep_attr[0])
- * nr_temps);
- }
-
- for (i = 0; i < F71882FG_MAX_INS; i++) {
- if (f71882fg_has_in[data->type][i]) {
- device_remove_file(&pdev->dev,
- &fxxxx_in_attr[i].dev_attr);
- }
- }
- if (f71882fg_has_in1_alarm[data->type]) {
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- }
- }
-
- if (start_reg & 0x02) {
- f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
- ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
-
- if (f71882fg_fan_has_beep[data->type]) {
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_fan_beep_attr, nr_fans);
- }
-
- switch (data->type) {
- case f71808a:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
- f71882fg_remove_sysfs_files(pdev,
- f71808a_fan3_attr,
- ARRAY_SIZE(f71808a_fan3_attr));
- break;
- case f71862fg:
- f71882fg_remove_sysfs_files(pdev,
- &f71862fg_auto_pwm_attr[0][0],
- ARRAY_SIZE(f71862fg_auto_pwm_attr[0]) *
- nr_fans);
- break;
- case f71808e:
- case f71869:
- f71882fg_remove_sysfs_files(pdev,
- &f71869_auto_pwm_attr[0][0],
- ARRAY_SIZE(f71869_auto_pwm_attr[0]) * nr_fans);
- break;
- case f8000:
- f71882fg_remove_sysfs_files(pdev,
- f8000_fan_attr,
- ARRAY_SIZE(f8000_fan_attr));
- f71882fg_remove_sysfs_files(pdev,
- &f8000_auto_pwm_attr[0][0],
- ARRAY_SIZE(f8000_auto_pwm_attr[0]) * nr_fans);
- break;
- default:
- f71882fg_remove_sysfs_files(pdev,
- &fxxxx_auto_pwm_attr[0][0],
- ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
- }
- }
- return 0;
-}
-
static int __init f71882fg_find(int sioaddr, struct f71882fg_sio_data *sio_data)
{
u16 devid;
@@ -2760,6 +2651,14 @@ exit_device_put:
return err;
}
+static struct platform_driver f71882fg_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .probe = f71882fg_probe,
+ .remove = f71882fg_remove,
+};
+
static int __init f71882fg_init(void)
{
int err;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 57c8a473698d..64fbb8cf687c 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -114,7 +114,7 @@ struct f75375_data {
static int f75375_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int f75375_probe(struct i2c_client *client);
-static int f75375_remove(struct i2c_client *client);
+static void f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
@@ -864,12 +864,11 @@ exit_remove:
return err;
}
-static int f75375_remove(struct i2c_client *client)
+static void f75375_remove(struct i2c_client *client)
{
struct f75375_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &f75375_group);
- return 0;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
@@ -897,7 +896,7 @@ static int f75375_detect(struct i2c_client *client,
version = f75375_read8(client, F75375_REG_VERSION);
dev_info(&adapter->dev, "found %s version: %02X\n", name, version);
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index c26195e3aad7..0a77d6161928 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -217,7 +217,7 @@ static const int FSCHMD_NO_TEMP_SENSORS[7] = { 3, 3, 4, 3, 5, 5, 11 };
static int fschmd_probe(struct i2c_client *client);
static int fschmd_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int fschmd_remove(struct i2c_client *client);
+static void fschmd_remove(struct i2c_client *client);
static struct fschmd_data *fschmd_update_device(struct device *dev);
/*
@@ -1075,7 +1075,7 @@ static int fschmd_detect(struct i2c_client *client,
else
return -ENODEV;
- strlcpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE);
return 0;
}
@@ -1248,7 +1248,7 @@ exit_detach:
return err;
}
-static int fschmd_remove(struct i2c_client *client)
+static void fschmd_remove(struct i2c_client *client)
{
struct fschmd_data *data = i2c_get_clientdata(client);
int i;
@@ -1291,8 +1291,6 @@ static int fschmd_remove(struct i2c_client *client)
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, fschmd_release_resources);
mutex_unlock(&watchdog_data_mutex);
-
- return 0;
}
static struct fschmd_data *fschmd_update_device(struct device *dev)
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index ceffc76a0c51..f5b8e724a8ca 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -739,17 +739,16 @@ static int fts_detect(struct i2c_client *client,
if (val != 0x11)
return -ENODEV;
- strlcpy(info->type, fts_id[0].name, I2C_NAME_SIZE);
+ strscpy(info->type, fts_id[0].name, I2C_NAME_SIZE);
info->flags = 0;
return 0;
}
-static int fts_remove(struct i2c_client *client)
+static void fts_remove(struct i2c_client *client)
{
struct fts_data *data = dev_get_drvdata(&client->dev);
watchdog_unregister_device(&data->wdd);
- return 0;
}
static int fts_probe(struct i2c_client *client)
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index dd683b0a648f..95286c40f55a 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -586,7 +586,7 @@ static int gl518_detect(struct i2c_client *client, struct i2c_board_info *info)
if (rev != 0x00 && rev != 0x80)
return -ENODEV;
- strlcpy(info->type, "gl518sm", I2C_NAME_SIZE);
+ strscpy(info->type, "gl518sm", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 096ba9797211..394da4ac977c 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -811,7 +811,7 @@ static int gl520_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, "gl520sm", I2C_NAME_SIZE);
+ strscpy(info->type, "gl520sm", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index befe989ca7b9..ba408942dbe7 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -37,9 +37,7 @@ struct gpio_fan_data {
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
-#ifdef CONFIG_PM_SLEEP
int resume_speed;
-#endif
bool pwm_enable;
struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
@@ -391,6 +389,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
@@ -554,7 +555,6 @@ static void gpio_fan_shutdown(struct platform_device *pdev)
set_fan_speed(fan_data, 0);
}
-#ifdef CONFIG_PM_SLEEP
static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
@@ -577,18 +577,14 @@ static int gpio_fan_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
-#define GPIO_FAN_PM (&gpio_fan_pm)
-#else
-#define GPIO_FAN_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
- .pm = GPIO_FAN_PM,
+ .pm = pm_sleep_ptr(&gpio_fan_pm),
.of_match_table = of_match_ptr(of_gpio_fan_match),
},
};
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index d64be48f1ef6..b60ec95b5edb 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -267,6 +267,7 @@ gsc_hwmon_get_devtree_pdata(struct device *dev)
pdata->nchannels = nchannels;
/* fan controller base address */
+ of_node_get(dev->parent->of_node);
fan = of_find_compatible_node(dev->parent->of_node, NULL, "gw,gsc-fan");
if (fan && of_property_read_u32(fan, "reg", &pdata->fan_base)) {
of_node_put(fan);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 2e2cd79d89eb..4218750d5a66 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -151,9 +151,9 @@ static DEFINE_IDA(hwmon_ida);
* between hwmon and thermal_sys modules.
*/
#ifdef CONFIG_THERMAL_OF
-static int hwmon_thermal_get_temp(void *data, int *temp)
+static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct hwmon_thermal_data *tdata = data;
+ struct hwmon_thermal_data *tdata = tz->devdata;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
int ret;
long t;
@@ -168,9 +168,9 @@ static int hwmon_thermal_get_temp(void *data, int *temp)
return 0;
}
-static int hwmon_thermal_set_trips(void *data, int low, int high)
+static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct hwmon_thermal_data *tdata = data;
+ struct hwmon_thermal_data *tdata = tz->devdata;
struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
const struct hwmon_chip_info *chip = hwdev->chip;
const struct hwmon_channel_info **info = chip->info;
@@ -203,7 +203,7 @@ static int hwmon_thermal_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
+static const struct thermal_zone_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
.set_trips = hwmon_thermal_set_trips,
};
@@ -227,8 +227,8 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
tdata->dev = dev;
tdata->index = index;
- tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
- &hwmon_thermal_ops);
+ tzd = devm_thermal_of_zone_register(dev, index, tdata,
+ &hwmon_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) != -ENODEV)
return PTR_ERR(tzd);
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 580a7d125b88..3aa40893fc09 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -6,11 +6,13 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+
#include <linux/hwmon.h>
-#include <linux/of.h>
#include <linux/hwmon-sysfs.h>
#include <linux/iio/consumer.h>
#include <linux/iio/types.h>
@@ -149,8 +151,8 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
- if (dev->of_node) {
- sname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+ if (dev_fwnode(dev)) {
+ sname = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", dev_fwnode(dev));
if (!sname)
return -ENOMEM;
strreplace(sname, '-', '_');
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index fc3007c3e85c..9b58655d2de4 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -568,13 +568,11 @@ out_restore_conf:
return ret;
}
-static int ina209_remove(struct i2c_client *client)
+static void ina209_remove(struct i2c_client *client)
{
struct ina209_data *data = i2c_get_clientdata(client);
ina209_restore_conf(client, data);
-
- return 0;
}
static const struct i2c_device_id ina209_id[] = {
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 58d3828e2ec0..2a57f4b60c29 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -913,7 +913,7 @@ fail:
return ret;
}
-static int ina3221_remove(struct i2c_client *client)
+static void ina3221_remove(struct i2c_client *client)
{
struct ina3221_data *ina = dev_get_drvdata(&client->dev);
int i;
@@ -926,11 +926,9 @@ static int ina3221_remove(struct i2c_client *client)
pm_runtime_put_noidle(ina->pm_dev);
mutex_destroy(&ina->lock);
-
- return 0;
}
-static int __maybe_unused ina3221_suspend(struct device *dev)
+static int ina3221_suspend(struct device *dev)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
@@ -953,7 +951,7 @@ static int __maybe_unused ina3221_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ina3221_resume(struct device *dev)
+static int ina3221_resume(struct device *dev)
{
struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
@@ -996,11 +994,8 @@ static int __maybe_unused ina3221_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops ina3221_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(ina3221_suspend, ina3221_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(ina3221_pm, ina3221_suspend, ina3221_resume,
+ NULL);
static const struct of_device_id ina3221_of_match_table[] = {
{ .compatible = "ti,ina3221", },
@@ -1020,7 +1015,7 @@ static struct i2c_driver ina3221_i2c_driver = {
.driver = {
.name = INA3221_DRIVER_NAME,
.of_match_table = ina3221_of_match_table,
- .pm = &ina3221_pm,
+ .pm = pm_ptr(&ina3221_pm),
},
.id_table = ina3221_ids,
};
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 0e543dbe0a6b..7bd154ba351b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -3179,7 +3179,7 @@ static int it87_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static void __maybe_unused it87_resume_sio(struct platform_device *pdev)
+static void it87_resume_sio(struct platform_device *pdev)
{
struct it87_data *data = dev_get_drvdata(&pdev->dev);
int err;
@@ -3211,7 +3211,7 @@ static void __maybe_unused it87_resume_sio(struct platform_device *pdev)
superio_exit(data->sioaddr);
}
-static int __maybe_unused it87_resume(struct device *dev)
+static int it87_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct it87_data *data = dev_get_drvdata(dev);
@@ -3238,12 +3238,12 @@ static int __maybe_unused it87_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(it87_dev_pm_ops, NULL, it87_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(it87_dev_pm_ops, NULL, it87_resume);
static struct platform_driver it87_driver = {
.driver = {
.name = DRVNAME,
- .pm = &it87_dev_pm_ops,
+ .pm = pm_sleep_ptr(&it87_dev_pm_ops),
},
.probe = it87_probe,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 07f7f8b5b73d..30888feaf589 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -441,7 +441,7 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
struct jc42_chips *chip = &jc42_chips[i];
if (manid == chip->manid &&
(devid & chip->devid_mask) == chip->devid) {
- strlcpy(info->type, "jc42", I2C_NAME_SIZE);
+ strscpy(info->type, "jc42", I2C_NAME_SIZE);
return 0;
}
}
@@ -524,7 +524,7 @@ static int jc42_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int jc42_remove(struct i2c_client *client)
+static void jc42_remove(struct i2c_client *client)
{
struct jc42_data *data = i2c_get_clientdata(client);
@@ -537,7 +537,6 @@ static int jc42_remove(struct i2c_client *client)
| (data->config & JC42_CFG_HYST_MASK);
i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
}
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 339a145afc09..9ab2cab4c710 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -996,11 +996,11 @@ static int lm63_detect(struct i2c_client *client,
}
if (chip_id == 0x41 && address == 0x4c)
- strlcpy(info->type, "lm63", I2C_NAME_SIZE);
+ strscpy(info->type, "lm63", I2C_NAME_SIZE);
else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
- strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ strscpy(info->type, "lm64", I2C_NAME_SIZE);
else if (chip_id == 0x49 && address == 0x4c)
- strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
+ strscpy(info->type, "lm96163", I2C_NAME_SIZE);
else
return -ENODEV;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index beb0d61bcd82..1346b3b3f463 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -257,7 +257,7 @@ static int lm73_detect(struct i2c_client *new_client,
if (id < 0 || id != LM73_ID)
return -ENODEV;
- strlcpy(info->type, "lm73", I2C_NAME_SIZE);
+ strscpy(info->type, "lm73", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 66dc826f7962..bcc3adcb3af1 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -893,7 +893,7 @@ static int lm75_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
+ strscpy(info->type, is_lm75a ? "lm75a" : "lm75", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index df6af85e170a..645cb2191abe 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -302,7 +302,7 @@ static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info)
|| i2c_smbus_read_word_data(client, 7) != min)
return -ENODEV;
- strlcpy(info->type, "lm77", I2C_NAME_SIZE);
+ strscpy(info->type, "lm77", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 5e129cbec1cb..694e171cab7f 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -617,7 +617,7 @@ static int lm78_i2c_detect(struct i2c_client *client,
if (isa)
mutex_unlock(&isa->update_lock);
- strlcpy(info->type, client_name, I2C_NAME_SIZE);
+ strscpy(info->type, client_name, I2C_NAME_SIZE);
return 0;
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index e85e062bbf32..35db0b97f912 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -586,7 +586,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
name = "lm80";
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 905f5689f907..616449f2cc50 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -412,7 +412,7 @@ static int lm83_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 88cf2012d34b..8d33c2484755 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1539,7 +1539,7 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
if (!type_name)
return -ENODEV;
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 1750bc588856..818fb6195245 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -833,7 +833,7 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 221de01a327a..db595f7d01f8 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2547,7 +2547,7 @@ static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
@@ -2956,7 +2956,7 @@ static void lm90_alert(struct i2c_client *client, enum i2c_alert_protocol type,
}
}
-static int __maybe_unused lm90_suspend(struct device *dev)
+static int lm90_suspend(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -2967,7 +2967,7 @@ static int __maybe_unused lm90_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused lm90_resume(struct device *dev)
+static int lm90_resume(struct device *dev)
{
struct lm90_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -2978,14 +2978,14 @@ static int __maybe_unused lm90_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(lm90_pm_ops, lm90_suspend, lm90_resume);
static struct i2c_driver lm90_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm90",
.of_match_table = of_match_ptr(lm90_of_match),
- .pm = &lm90_pm_ops,
+ .pm = pm_sleep_ptr(&lm90_pm_ops),
},
.probe_new = lm90_probe,
.alert = lm90_alert,
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 5bae6eedcaf1..2ff3044a677d 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -287,7 +287,7 @@ static int lm92_detect(struct i2c_client *new_client,
else
return -ENODEV;
- strlcpy(info->type, "lm92", I2C_NAME_SIZE);
+ strscpy(info->type, "lm92", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index dc67bf954b21..4cf50d5f4f59 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2575,7 +2575,7 @@ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
dev_dbg(&adapter->dev, "loading %s at %d, 0x%02x\n",
client->name, i2c_adapter_id(client->adapter),
client->addr);
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index ac169a994ae0..b4a9d0c223c4 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -644,7 +644,7 @@ static int lm95234_detect(struct i2c_client *client,
if (val & model_mask)
return -ENODEV;
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 8ea46ff20be5..f1ed777a8735 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -389,7 +389,7 @@ static int lm95241_detect(struct i2c_client *new_client,
}
/* Fill the i2c board info */
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 29388fcf5f74..c433f0af2d31 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -461,7 +461,7 @@ static int lm95245_detect(struct i2c_client *new_client,
return -ENODEV;
}
- strlcpy(info->type, name, I2C_NAME_SIZE);
+ strscpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index 5423466de697..7404e974762f 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -956,13 +956,6 @@ static struct attribute *ltc2947_attrs[] = {
};
ATTRIBUTE_GROUPS(ltc2947);
-static void ltc2947_clk_disable(void *data)
-{
- struct clk *extclk = data;
-
- clk_disable_unprepare(extclk);
-}
-
static int ltc2947_setup(struct ltc2947_data *st)
{
int ret;
@@ -989,7 +982,7 @@ static int ltc2947_setup(struct ltc2947_data *st)
return ret;
/* check external clock presence */
- extclk = devm_clk_get_optional(st->dev, NULL);
+ extclk = devm_clk_get_optional_enabled(st->dev, NULL);
if (IS_ERR(extclk))
return dev_err_probe(st->dev, PTR_ERR(extclk),
"Failed to get external clock\n");
@@ -1007,14 +1000,6 @@ static int ltc2947_setup(struct ltc2947_data *st)
return -EINVAL;
}
- ret = clk_prepare_enable(extclk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(st->dev, ltc2947_clk_disable,
- extclk);
- if (ret)
- return ret;
/* as in table 1 of the datasheet */
if (rate_hz >= LTC2947_CLK_MIN && rate_hz <= 1000000)
pre = 0;
@@ -1135,7 +1120,7 @@ int ltc2947_core_probe(struct regmap *map, const char *name)
}
EXPORT_SYMBOL_GPL(ltc2947_core_probe);
-static int __maybe_unused ltc2947_resume(struct device *dev)
+static int ltc2947_resume(struct device *dev)
{
struct ltc2947_data *st = dev_get_drvdata(dev);
u32 ctrl = 0;
@@ -1164,7 +1149,7 @@ static int __maybe_unused ltc2947_resume(struct device *dev)
LTC2947_CONT_MODE_MASK, LTC2947_CONT_MODE(1));
}
-static int __maybe_unused ltc2947_suspend(struct device *dev)
+static int ltc2947_suspend(struct device *dev)
{
struct ltc2947_data *st = dev_get_drvdata(dev);
@@ -1172,8 +1157,7 @@ static int __maybe_unused ltc2947_suspend(struct device *dev)
LTC2947_SHUTDOWN_MASK, 1);
}
-SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
-EXPORT_SYMBOL_GPL(ltc2947_pm_ops);
+EXPORT_SIMPLE_DEV_PM_OPS(ltc2947_pm_ops, ltc2947_suspend, ltc2947_resume);
const struct of_device_id ltc2947_of_match[] = {
{ .compatible = "adi,ltc2947" },
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
index ad0dfd3efbf8..96852bc8a964 100644
--- a/drivers/hwmon/ltc2947-i2c.c
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -36,7 +36,7 @@ static struct i2c_driver ltc2947_driver = {
.driver = {
.name = "ltc2947",
.of_match_table = ltc2947_of_match,
- .pm = &ltc2947_pm_ops,
+ .pm = pm_sleep_ptr(&ltc2947_pm_ops),
},
.probe_new = ltc2947_probe,
.id_table = ltc2947_id,
diff --git a/drivers/hwmon/ltc2947-spi.c b/drivers/hwmon/ltc2947-spi.c
index c24ca569db1b..a33be110098c 100644
--- a/drivers/hwmon/ltc2947-spi.c
+++ b/drivers/hwmon/ltc2947-spi.c
@@ -38,7 +38,7 @@ static struct spi_driver ltc2947_driver = {
.driver = {
.name = "ltc2947",
.of_match_table = ltc2947_of_match,
- .pm = &ltc2947_pm_ops,
+ .pm = pm_sleep_ptr(&ltc2947_pm_ops),
},
.probe = ltc2947_probe,
.id_table = ltc2947_id,
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index eae9e68027bc..445c77197f69 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -241,7 +241,7 @@ static int max1619_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "max1619", I2C_NAME_SIZE);
+ strscpy(info->type, "max1619", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 78688e6cb87d..9f748973d6a3 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -386,7 +386,7 @@ static int max1668_detect(struct i2c_client *client,
if (!type_name)
return -ENODEV;
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 93e048ee4955..9a31ef388396 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -113,7 +113,7 @@ static void max31722_remove(struct spi_device *spi)
dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
}
-static int __maybe_unused max31722_suspend(struct device *dev)
+static int max31722_suspend(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
@@ -121,7 +121,7 @@ static int __maybe_unused max31722_suspend(struct device *dev)
return max31722_set_mode(data, MAX31722_MODE_STANDBY);
}
-static int __maybe_unused max31722_resume(struct device *dev)
+static int max31722_resume(struct device *dev)
{
struct spi_device *spi_device = to_spi_device(dev);
struct max31722_data *data = spi_get_drvdata(spi_device);
@@ -129,7 +129,7 @@ static int __maybe_unused max31722_resume(struct device *dev)
return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
}
-static SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
static const struct spi_device_id max31722_spi_id[] = {
{"max31722", 0},
@@ -141,7 +141,7 @@ MODULE_DEVICE_TABLE(spi, max31722_spi_id);
static struct spi_driver max31722_driver = {
.driver = {
.name = "max31722",
- .pm = &max31722_pm_ops,
+ .pm = pm_sleep_ptr(&max31722_pm_ops),
},
.probe = max31722_probe,
.remove = max31722_remove,
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index 23598b8b8793..746a767c9fc6 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -399,33 +399,33 @@ static int max31730_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "max31730", I2C_NAME_SIZE);
+ strscpy(info->type, "max31730", I2C_NAME_SIZE);
return 0;
}
-static int __maybe_unused max31730_suspend(struct device *dev)
+static int max31730_suspend(struct device *dev)
{
struct max31730_data *data = dev_get_drvdata(dev);
return max31730_write_config(data, MAX31730_STOP, 0);
}
-static int __maybe_unused max31730_resume(struct device *dev)
+static int max31730_resume(struct device *dev)
{
struct max31730_data *data = dev_get_drvdata(dev);
return max31730_write_config(data, 0, MAX31730_STOP);
}
-static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume);
static struct i2c_driver max31730_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max31730",
.of_match_table = of_match_ptr(max31730_of_match),
- .pm = &max31730_pm_ops,
+ .pm = pm_sleep_ptr(&max31730_pm_ops),
},
.probe_new = max31730_probe,
.id_table = max31730_ids,
diff --git a/drivers/hwmon/max31760.c b/drivers/hwmon/max31760.c
new file mode 100644
index 000000000000..06d5f39dc33d
--- /dev/null
+++ b/drivers/hwmon/max31760.c
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+
+#define REG_CR1 0x00
+#define CR1_HYST BIT(5)
+#define CR1_DRV GENMASK(4, 3)
+#define CR1_TEMP_SRC GENMASK(1, 0)
+#define REG_CR2 0x01
+#define CR2_STBY BIT(7)
+#define CR2_ALERTS BIT(6)
+#define CR2_DFC BIT(0)
+#define REG_CR3 0x02
+#define REG_PWMR 0x50
+#define REG_PWMV 0x51
+#define REG_STATUS 0x5A
+#define STATUS_ALARM_CRIT(ch) BIT(2 + 2 * (ch))
+#define STATUS_ALARM_MAX(ch) BIT(3 + 2 * (ch))
+#define STATUS_RDFA BIT(6)
+
+#define REG_TACH(ch) (0x52 + (ch) * 2)
+#define REG_TEMP_INPUT(ch) (0x56 + (ch) * 2)
+#define REG_TEMP_MAX(ch) (0x06 + (ch) * 2)
+#define REG_TEMP_CRIT(ch) (0x0A + (ch) * 2)
+
+#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
+#define TEMP11_TO_REG(val) (DIV_ROUND_CLOSEST(clamp_val((val), -128000, \
+ 127875), 125) * 32)
+
+#define LUT_SIZE 48
+
+#define REG_LUT(index) (0x20 + (index))
+
+struct max31760_state {
+ struct regmap *regmap;
+
+ struct lut_attribute {
+ char name[24];
+ struct sensor_device_attribute sda;
+ } lut[LUT_SIZE];
+
+ struct attribute *attrs[LUT_SIZE + 2];
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+};
+
+static bool max31760_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg > 0x50;
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x5B,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = max31760_volatile_reg,
+};
+
+static const int max31760_pwm_freq[] = {33, 150, 1500, 25000};
+
+static int tach_to_rpm(u16 tach)
+{
+ if (tach == 0)
+ tach = 1;
+
+ return 60 * 100000 / tach / 2;
+}
+
+static int max31760_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int regval;
+ unsigned int reg_temp;
+ s16 temp;
+ u8 reg[2];
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_fault:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(STATUS_RDFA, regval);
+
+ return 0;
+ case hwmon_temp_max_alarm:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(STATUS_ALARM_MAX(1), regval);
+ else
+ *val = FIELD_GET(STATUS_ALARM_MAX(0), regval);
+
+ return 0;
+ case hwmon_temp_crit_alarm:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(STATUS_ALARM_CRIT(1), regval);
+ else
+ *val = FIELD_GET(STATUS_ALARM_CRIT(0), regval);
+
+ return 0;
+ case hwmon_temp_input:
+ reg_temp = REG_TEMP_INPUT(channel);
+ break;
+ case hwmon_temp_max:
+ reg_temp = REG_TEMP_MAX(channel);
+ break;
+ case hwmon_temp_crit:
+ reg_temp = REG_TEMP_CRIT(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = regmap_bulk_read(state->regmap, reg_temp, reg, 2);
+ if (ret)
+ return ret;
+
+ temp = (reg[0] << 8) | reg[1];
+
+ *val = TEMP11_FROM_REG(temp);
+
+ return 0;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = regmap_bulk_read(state->regmap, REG_TACH(channel), reg, 2);
+ if (ret)
+ return ret;
+
+ *val = tach_to_rpm(reg[0] * 256 + reg[1]);
+
+ return 0;
+ case hwmon_fan_fault:
+ ret = regmap_read(state->regmap, REG_STATUS, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(BIT(1), regval);
+ else
+ *val = FIELD_GET(BIT(0), regval);
+
+ return 0;
+ case hwmon_fan_enable:
+ ret = regmap_read(state->regmap, REG_CR3, &regval);
+ if (ret)
+ return ret;
+
+ if (channel)
+ *val = FIELD_GET(BIT(1), regval);
+ else
+ *val = FIELD_GET(BIT(0), regval);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = regmap_read(state->regmap, REG_PWMV, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+ case hwmon_pwm_freq:
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ regval = FIELD_GET(CR1_DRV, regval);
+ if (regval >= ARRAY_SIZE(max31760_pwm_freq))
+ return -EINVAL;
+
+ *val = max31760_pwm_freq[regval];
+
+ return 0;
+ case hwmon_pwm_enable:
+ ret = regmap_read(state->regmap, REG_CR2, &regval);
+ if (ret)
+ return ret;
+
+ *val = 2 - FIELD_GET(CR2_DFC, regval);
+
+ return 0;
+ case hwmon_pwm_auto_channels_temp:
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ switch (FIELD_GET(CR1_TEMP_SRC, regval)) {
+ case 0:
+ *val = 2;
+ break;
+ case 1:
+ *val = 1;
+ break;
+ case 2:
+ case 3:
+ *val = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int max31760_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int pwm_index;
+ unsigned int reg_temp;
+ int temp;
+ u8 reg_val[2];
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_max:
+ reg_temp = REG_TEMP_MAX(channel);
+ break;
+ case hwmon_temp_crit:
+ reg_temp = REG_TEMP_CRIT(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ temp = TEMP11_TO_REG(val);
+ reg_val[0] = temp >> 8;
+ reg_val[1] = temp & 0xFF;
+
+ return regmap_bulk_write(state->regmap, reg_temp, reg_val, 2);
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_enable:
+ if (val == 0)
+ return regmap_clear_bits(state->regmap, REG_CR3, BIT(channel));
+
+ if (val == 1)
+ return regmap_set_bits(state->regmap, REG_CR3, BIT(channel));
+
+ return -EINVAL;
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ return regmap_write(state->regmap, REG_PWMR, val);
+ case hwmon_pwm_enable:
+ if (val == 1)
+ return regmap_set_bits(state->regmap, REG_CR2, CR2_DFC);
+
+ if (val == 2)
+ return regmap_clear_bits(state->regmap, REG_CR2, CR2_DFC);
+
+ return -EINVAL;
+ case hwmon_pwm_freq:
+ pwm_index = find_closest(val, max31760_pwm_freq,
+ ARRAY_SIZE(max31760_pwm_freq));
+
+ return regmap_update_bits(state->regmap,
+ REG_CR1, CR1_DRV,
+ FIELD_PREP(CR1_DRV, pwm_index));
+ case hwmon_pwm_auto_channels_temp:
+ switch (val) {
+ case 1:
+ break;
+ case 2:
+ val = 0;
+ break;
+ case 3:
+ val = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(state->regmap, REG_CR1, CR1_TEMP_SRC, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *max31760_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_FAULT | HWMON_F_ENABLE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT | HWMON_T_FAULT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_ENABLE | HWMON_PWM_FREQ | HWMON_PWM_INPUT |
+ HWMON_PWM_AUTO_CHANNELS_TEMP),
+ NULL
+};
+
+static umode_t max31760_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_fault:
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ case hwmon_fan_enable:
+ return 0644;
+ default:
+ return 0;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ case hwmon_pwm_freq:
+ case hwmon_pwm_auto_channels_temp:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static int max31760_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ if (attr != hwmon_temp_label)
+ return -EOPNOTSUPP;
+
+ *str = channel ? "local" : "remote";
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops max31760_hwmon_ops = {
+ .is_visible = max31760_is_visible,
+ .read = max31760_read,
+ .write = max31760_write,
+ .read_string = max31760_read_string
+};
+
+static const struct hwmon_chip_info max31760_chip_info = {
+ .ops = &max31760_hwmon_ops,
+ .info = max31760_info,
+};
+
+static ssize_t lut_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(devattr);
+ struct max31760_state *state = dev_get_drvdata(dev);
+ int ret;
+ unsigned int regval;
+
+ ret = regmap_read(state->regmap, REG_LUT(sda->index), &regval);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", regval);
+}
+
+static ssize_t lut_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(devattr);
+ struct max31760_state *state = dev_get_drvdata(dev);
+ int ret;
+ u8 pwm;
+
+ ret = kstrtou8(buf, 10, &pwm);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(state->regmap, REG_LUT(sda->index), pwm);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t pwm1_auto_point_temp_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(state->regmap, REG_CR1, &regval);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", (1 + (int)FIELD_GET(CR1_HYST, regval)) * 2000);
+}
+
+static ssize_t pwm1_auto_point_temp_hyst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+ unsigned int hyst;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &hyst);
+ if (ret)
+ return ret;
+
+ if (hyst < 3000)
+ ret = regmap_clear_bits(state->regmap, REG_CR1, CR1_HYST);
+ else
+ ret = regmap_set_bits(state->regmap, REG_CR1, CR1_HYST);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(pwm1_auto_point_temp_hyst);
+
+static void max31760_create_lut_nodes(struct max31760_state *state)
+{
+ int i;
+ struct sensor_device_attribute *sda;
+ struct lut_attribute *lut;
+
+ for (i = 0; i < LUT_SIZE; ++i) {
+ lut = &state->lut[i];
+ sda = &lut->sda;
+
+ snprintf(lut->name, sizeof(lut->name),
+ "pwm1_auto_point%d_pwm", i + 1);
+
+ sda->dev_attr.attr.mode = 0644;
+ sda->index = i;
+ sda->dev_attr.show = lut_show;
+ sda->dev_attr.store = lut_store;
+ sda->dev_attr.attr.name = lut->name;
+
+ sysfs_attr_init(&sda->dev_attr.attr);
+
+ state->attrs[i] = &sda->dev_attr.attr;
+ }
+
+ state->attrs[i] = &dev_attr_pwm1_auto_point_temp_hyst.attr;
+
+ state->group.attrs = state->attrs;
+ state->groups[0] = &state->group;
+}
+
+static int max31760_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max31760_state *state;
+ struct device *hwmon_dev;
+ int ret;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(state->regmap))
+ return dev_err_probe(dev,
+ PTR_ERR(state->regmap),
+ "regmap initialization failed\n");
+
+ dev_set_drvdata(dev, state);
+
+ /* Set alert output to comparator mode */
+ ret = regmap_set_bits(state->regmap, REG_CR2, CR2_ALERTS);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot write register\n");
+
+ max31760_create_lut_nodes(state);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ state,
+ &max31760_chip_info,
+ state->groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id max31760_of_match[] = {
+ {.compatible = "adi,max31760"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, max31760_of_match);
+
+static const struct i2c_device_id max31760_id[] = {
+ {"max31760"},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max31760_id);
+
+static int max31760_suspend(struct device *dev)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+
+ return regmap_set_bits(state->regmap, REG_CR2, CR2_STBY);
+}
+
+static int max31760_resume(struct device *dev)
+{
+ struct max31760_state *state = dev_get_drvdata(dev);
+
+ return regmap_clear_bits(state->regmap, REG_CR2, CR2_STBY);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max31760_pm_ops, max31760_suspend,
+ max31760_resume);
+
+static struct i2c_driver max31760_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max31760",
+ .of_match_table = max31760_of_match,
+ .pm = pm_ptr(&max31760_pm_ops)
+ },
+ .probe_new = max31760_probe,
+ .id_table = max31760_id
+};
+module_i2c_driver(max31760_driver);
+
+MODULE_AUTHOR("Ibrahim Tilki <Ibrahim.Tilki@analog.com>");
+MODULE_DESCRIPTION("Analog Devices MAX31760 Fan Speed Controller");
+MODULE_SOFTDEP("pre: regmap_i2c");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 7e9362f6dc29..20bf5ffadefe 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -202,6 +202,9 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
}
mutex_unlock(&data->update_lock);
return 0;
+ case hwmon_fan_enable:
+ *val = !!(data->fan_config[channel] & MAX31790_FAN_CFG_TACH_INPUT_EN);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -214,7 +217,7 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
struct i2c_client *client = data->client;
int target_count;
int err = 0;
- u8 bits;
+ u8 bits, fan_config;
int sr;
mutex_lock(&data->update_lock);
@@ -243,6 +246,23 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
MAX31790_REG_TARGET_COUNT(channel),
data->target_count[channel]);
break;
+ case hwmon_fan_enable:
+ fan_config = data->fan_config[channel];
+ if (val == 0) {
+ fan_config &= ~MAX31790_FAN_CFG_TACH_INPUT_EN;
+ } else if (val == 1) {
+ fan_config |= MAX31790_FAN_CFG_TACH_INPUT_EN;
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ if (fan_config != data->fan_config[channel]) {
+ err = i2c_smbus_write_byte_data(client, MAX31790_REG_FAN_CONFIG(channel),
+ fan_config);
+ if (!err)
+ data->fan_config[channel] = fan_config;
+ }
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -270,6 +290,10 @@ static umode_t max31790_fan_is_visible(const void *_data, u32 attr, int channel)
!(fan_config & MAX31790_FAN_CFG_TACH_INPUT))
return 0644;
return 0;
+ case hwmon_fan_enable:
+ if (channel < NR_CHANNEL)
+ return 0644;
+ return 0;
default:
return 0;
}
@@ -423,12 +447,12 @@ static umode_t max31790_is_visible(const void *data,
static const struct hwmon_channel_info *max31790_info[] = {
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT | HWMON_F_ENABLE,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 14bb7726f8d7..9b895402c80d 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -514,7 +514,7 @@ static int max6639_detect(struct i2c_client *client,
if (dev_id != 0x58 || manu_id != 0x4D)
return -ENODEV;
- strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+ strscpy(info->type, "max6639", I2C_NAME_SIZE);
return 0;
}
@@ -571,7 +571,6 @@ static int max6639_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-#ifdef CONFIG_PM_SLEEP
static int max6639_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -609,7 +608,6 @@ static int max6639_resume(struct device *dev)
return i2c_smbus_write_byte_data(client,
MAX6639_REG_GCONFIG, ret & ~MAX6639_GCONFIG_STANDBY);
}
-#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id max6639_id[] = {
{"max6639", 0},
@@ -618,13 +616,13 @@ static const struct i2c_device_id max6639_id[] = {
MODULE_DEVICE_TABLE(i2c, max6639_id);
-static SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6639",
- .pm = &max6639_pm_ops,
+ .pm = pm_sleep_ptr(&max6639_pm_ops),
},
.probe_new = max6639_probe,
.id_table = max6639_id,
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 699d265aae2e..47ea34ff78f3 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -148,7 +148,7 @@ static int max6642_detect(struct i2c_client *client,
if ((reg_status & 0x2b) != 0x00)
return -ENODEV;
- strlcpy(info->type, "max6642", I2C_NAME_SIZE);
+ strscpy(info->type, "max6642", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 26278b0f17a9..394a4c7e46ab 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -9,6 +9,7 @@
*/
#include <linux/bits.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/hwmon.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -17,6 +18,7 @@
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/units.h>
/* PVT Common register */
@@ -30,6 +32,8 @@
#define CH_NUM_MSK GENMASK(31, 24)
#define CH_NUM_SFT 24
+#define VM_NUM_MAX (VM_NUM_MSK >> VM_NUM_SFT)
+
/* Macro Common Register */
#define CLK_SYNTH 0x00
#define CLK_SYNTH_LO_SFT 0
@@ -68,8 +72,9 @@
/* VM Individual Macro Register */
#define VM_COM_REG_SIZE 0x200
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch) \
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
/* SDA Slave Register */
#define IP_CTRL 0x00
@@ -98,13 +103,67 @@
#define PVT_POLL_DELAY_US 20
#define PVT_POLL_TIMEOUT_US 20000
-#define PVT_H_CONST 100000
-#define PVT_CAL5_CONST 2047
-#define PVT_G_CONST 40000
#define PVT_CONV_BITS 10
#define PVT_N_CONST 90
#define PVT_R_CONST 245805
+#define PVT_TEMP_MIN_mC -40000
+#define PVT_TEMP_MAX_mC 125000
+
+/* Temperature coefficients for series 5 */
+#define PVT_SERIES5_H_CONST 200000
+#define PVT_SERIES5_G_CONST 60000
+#define PVT_SERIES5_J_CONST -100
+#define PVT_SERIES5_CAL5_CONST 4094
+
+/* Temperature coefficients for series 6 */
+#define PVT_SERIES6_H_CONST 249400
+#define PVT_SERIES6_G_CONST 57400
+#define PVT_SERIES6_J_CONST 0
+#define PVT_SERIES6_CAL5_CONST 4096
+
+#define TEMPERATURE_SENSOR_SERIES_5 5
+#define TEMPERATURE_SENSOR_SERIES_6 6
+
+#define PRE_SCALER_X1 1
+#define PRE_SCALER_X2 2
+
+/**
+ * struct voltage_device - VM single input parameters.
+ * @vm_map: Map channel number to VM index.
+ * @ch_map: Map channel number to channel index.
+ * @pre_scaler: Pre scaler value (1 or 2) used to normalize the voltage output
+ * result.
+ *
+ * The structure provides mapping between channel-number (0..N-1) to VM-index
+ * (0..num_vm-1) and channel-index (0..ch_num-1) where N = num_vm * ch_num.
+ * It also provides normalization factor for the VM equation.
+ */
+struct voltage_device {
+ u32 vm_map;
+ u32 ch_map;
+ u32 pre_scaler;
+};
+
+/**
+ * struct voltage_channels - VM channel count.
+ * @total: Total number of channels in all VMs.
+ * @max: Maximum number of channels among all VMs.
+ *
+ * The structure provides channel count information across all VMs.
+ */
+struct voltage_channels {
+ u32 total;
+ u8 max;
+};
+
+struct temp_coeff {
+ u32 h;
+ u32 g;
+ u32 cal5;
+ s32 j;
+};
+
struct pvt_device {
struct regmap *c_map;
struct regmap *t_map;
@@ -112,13 +171,74 @@ struct pvt_device {
struct regmap *v_map;
struct clk *clk;
struct reset_control *rst;
+ struct dentry *dbgfs_dir;
+ struct voltage_device *vd;
+ struct voltage_channels vm_channels;
+ struct temp_coeff ts_coeff;
u32 t_num;
u32 p_num;
u32 v_num;
u32 ip_freq;
- u8 *vm_idx;
};
+static ssize_t pvt_ts_coeff_j_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pvt_device *pvt = file->private_data;
+ unsigned int len;
+ char buf[13];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", pvt->ts_coeff.j);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t pvt_ts_coeff_j_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pvt_device *pvt = file->private_data;
+ int ret;
+
+ ret = kstrtos32_from_user(user_buf, count, 0, &pvt->ts_coeff.j);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations pvt_ts_coeff_j_fops = {
+ .read = pvt_ts_coeff_j_read,
+ .write = pvt_ts_coeff_j_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void devm_pvt_ts_dbgfs_remove(void *data)
+{
+ struct pvt_device *pvt = (struct pvt_device *)data;
+
+ debugfs_remove_recursive(pvt->dbgfs_dir);
+ pvt->dbgfs_dir = NULL;
+}
+
+static int pvt_ts_dbgfs_create(struct pvt_device *pvt, struct device *dev)
+{
+ pvt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+ debugfs_create_u32("ts_coeff_h", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.h);
+ debugfs_create_u32("ts_coeff_g", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.g);
+ debugfs_create_u32("ts_coeff_cal5", 0644, pvt->dbgfs_dir,
+ &pvt->ts_coeff.cal5);
+ debugfs_create_file("ts_coeff_j", 0644, pvt->dbgfs_dir, pvt,
+ &pvt_ts_coeff_j_fops);
+
+ return devm_add_action_or_reset(dev, devm_pvt_ts_dbgfs_remove, pvt);
+}
+
static umode_t pvt_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -137,13 +257,28 @@ static umode_t pvt_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
+static long pvt_calc_temp(struct pvt_device *pvt, u32 nbs)
+{
+ /*
+ * Convert the register value to degrees centigrade temperature:
+ * T = G + H * (n / cal5 - 0.5) + J * F
+ */
+ struct temp_coeff *ts_coeff = &pvt->ts_coeff;
+
+ s64 tmp = ts_coeff->g +
+ div_s64(ts_coeff->h * (s64)nbs, ts_coeff->cal5) -
+ ts_coeff->h / 2 +
+ div_s64(ts_coeff->j * (s64)pvt->ip_freq, HZ_PER_MHZ);
+
+ return clamp_val(tmp, PVT_TEMP_MIN_mC, PVT_TEMP_MAX_mC);
+}
+
static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *t_map = pvt->t_map;
u32 stat, nbs;
int ret;
- u64 tmp;
switch (attr) {
case hwmon_temp_input:
@@ -155,7 +290,7 @@ static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
return ret;
ret = regmap_read(t_map, SDIF_DATA(channel), &nbs);
- if(ret < 0)
+ if (ret < 0)
return ret;
nbs &= SAMPLE_DATA_MSK;
@@ -164,9 +299,7 @@ static int pvt_read_temp(struct device *dev, u32 attr, int channel, long *val)
* Convert the register value to
* degrees centigrade temperature
*/
- tmp = nbs * PVT_H_CONST;
- do_div(tmp, PVT_CAL5_CONST);
- *val = tmp - PVT_G_CONST - pvt->ip_freq;
+ *val = pvt_calc_temp(pvt, nbs);
return 0;
default:
@@ -178,14 +311,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *v_map = pvt->v_map;
- u32 n, stat;
- u8 vm_idx;
+ u32 n, stat, pre_scaler;
+ u8 vm_idx, ch_idx;
int ret;
- if (channel >= pvt->v_num)
+ if (channel >= pvt->vm_channels.total)
return -EINVAL;
- vm_idx = pvt->vm_idx[channel];
+ vm_idx = pvt->vd[channel].vm_map;
+ ch_idx = pvt->vd[channel].ch_map;
switch (attr) {
case hwmon_in_input:
@@ -196,13 +330,25 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
if (ret)
return ret;
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
- if(ret < 0)
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
+ if (ret < 0)
return ret;
n &= SAMPLE_DATA_MSK;
- /* Convert the N bitstream count into voltage */
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+ pre_scaler = pvt->vd[channel].pre_scaler;
+ /*
+ * Convert the N bitstream count into voltage.
+ * To support negative voltage calculation for 64bit machines
+ * n must be cast to long, since n and *val differ both in
+ * signedness and in size.
+ * Division is used instead of right shift, because for signed
+ * numbers, the sign bit is used to fill the vacated bit
+ * positions, and if the number is negative, 1 is used.
+ * BIT(x) may not be used instead of (1 << x) because it's
+ * unsigned.
+ */
+ *val = pre_scaler * (PVT_N_CONST * (long)n - PVT_R_CONST) /
+ (1 << PVT_CONV_BITS);
return 0;
default:
@@ -277,23 +423,23 @@ static int pvt_init(struct pvt_device *pvt)
(key >> 1) << CLK_SYNTH_HI_SFT |
(key >> 1) << CLK_SYNTH_HOLD_SFT | CLK_SYNTH_EN;
- pvt->ip_freq = sys_freq * 100 / (key + 2);
+ pvt->ip_freq = clk_get_rate(pvt->clk) / (key + 2);
if (t_num) {
ret = regmap_write(t_map, SDIF_SMPL_CTRL, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(t_map, SDIF_DISABLE, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -306,7 +452,7 @@ static int pvt_init(struct pvt_device *pvt)
val = CFG0_MODE_2 | CFG0_PARALLEL_OUT | CFG0_12_BIT |
IP_CFG << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -319,7 +465,7 @@ static int pvt_init(struct pvt_device *pvt)
val = POWER_DELAY_CYCLE_256 | IP_TMR << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(t_map, SDIF_STAT,
@@ -333,39 +479,52 @@ static int pvt_init(struct pvt_device *pvt)
IP_CTRL << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(t_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
if (p_num) {
ret = regmap_write(p_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(p_map, SDIF_DISABLE, BIT(p_num) - 1);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(p_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
if (v_num) {
ret = regmap_write(v_map, SDIF_SMPL_CTRL, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, SDIF_HALT, 0x0);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, CLK_SYNTH, clk_synth);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_write(v_map, SDIF_DISABLE, 0x0);
- if(ret < 0)
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ val = (BIT(pvt->vm_channels.max) - 1) | VM_CH_INIT |
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -379,7 +538,7 @@ static int pvt_init(struct pvt_device *pvt)
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -392,7 +551,7 @@ static int pvt_init(struct pvt_device *pvt)
val = POWER_DELAY_CYCLE_64 | IP_TMR << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
@@ -406,7 +565,7 @@ static int pvt_init(struct pvt_device *pvt)
IP_CTRL << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
ret = regmap_write(v_map, SDIF_W, val);
- if(ret < 0)
+ if (ret < 0)
return ret;
}
@@ -451,46 +610,163 @@ static int pvt_get_regmap(struct platform_device *pdev, char *reg_name,
return 0;
}
-static void pvt_clk_disable(void *data)
+static void pvt_reset_control_assert(void *data)
{
struct pvt_device *pvt = data;
- clk_disable_unprepare(pvt->clk);
+ reset_control_assert(pvt->rst);
}
-static int pvt_clk_enable(struct device *dev, struct pvt_device *pvt)
+static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt)
{
int ret;
- ret = clk_prepare_enable(pvt->clk);
+ ret = reset_control_deassert(pvt->rst);
if (ret)
return ret;
- return devm_add_action_or_reset(dev, pvt_clk_disable, pvt);
+ return devm_add_action_or_reset(dev, pvt_reset_control_assert, pvt);
}
-static void pvt_reset_control_assert(void *data)
+static int pvt_get_active_channel(struct device *dev, struct pvt_device *pvt,
+ u32 vm_num, u32 ch_num, u8 *vm_idx)
{
- struct pvt_device *pvt = data;
+ u8 vm_active_ch[VM_NUM_MAX];
+ int ret, i, j, k;
- reset_control_assert(pvt->rst);
+ ret = device_property_read_u8_array(dev, "moortec,vm-active-channels",
+ vm_active_ch, vm_num);
+ if (ret) {
+ /*
+ * Incase "moortec,vm-active-channels" property is not defined,
+ * we assume each VM sensor has all of its channels active.
+ */
+ memset(vm_active_ch, ch_num, vm_num);
+ pvt->vm_channels.max = ch_num;
+ pvt->vm_channels.total = ch_num * vm_num;
+ } else {
+ for (i = 0; i < vm_num; i++) {
+ if (vm_active_ch[i] > ch_num) {
+ dev_err(dev, "invalid active channels: %u\n",
+ vm_active_ch[i]);
+ return -EINVAL;
+ }
+
+ pvt->vm_channels.total += vm_active_ch[i];
+
+ if (vm_active_ch[i] > pvt->vm_channels.max)
+ pvt->vm_channels.max = vm_active_ch[i];
+ }
+ }
+
+ /*
+ * Map between the channel-number to VM-index and channel-index.
+ * Example - 3 VMs, "moortec,vm_active_ch" = <5 2 4>:
+ * vm_map = [0 0 0 0 0 1 1 2 2 2 2]
+ * ch_map = [0 1 2 3 4 0 1 0 1 2 3]
+ */
+ pvt->vd = devm_kcalloc(dev, pvt->vm_channels.total, sizeof(*pvt->vd),
+ GFP_KERNEL);
+ if (!pvt->vd)
+ return -ENOMEM;
+
+ k = 0;
+ for (i = 0; i < vm_num; i++) {
+ for (j = 0; j < vm_active_ch[i]; j++) {
+ pvt->vd[k].vm_map = vm_idx[i];
+ pvt->vd[k].ch_map = j;
+ k++;
+ }
+ }
+
+ return 0;
}
-static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt)
+static int pvt_get_pre_scaler(struct device *dev, struct pvt_device *pvt)
{
+ u8 *pre_scaler_ch_list;
+ int i, ret, num_ch;
+ u32 channel;
+
+ /* Set default pre-scaler value to be 1. */
+ for (i = 0; i < pvt->vm_channels.total; i++)
+ pvt->vd[i].pre_scaler = PRE_SCALER_X1;
+
+ /* Get number of channels configured in "moortec,vm-pre-scaler-x2". */
+ num_ch = device_property_count_u8(dev, "moortec,vm-pre-scaler-x2");
+ if (num_ch <= 0)
+ return 0;
+
+ pre_scaler_ch_list = kcalloc(num_ch, sizeof(*pre_scaler_ch_list),
+ GFP_KERNEL);
+ if (!pre_scaler_ch_list)
+ return -ENOMEM;
+
+ /* Get list of all channels that have pre-scaler of 2. */
+ ret = device_property_read_u8_array(dev, "moortec,vm-pre-scaler-x2",
+ pre_scaler_ch_list, num_ch);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < num_ch; i++) {
+ channel = pre_scaler_ch_list[i];
+ pvt->vd[channel].pre_scaler = PRE_SCALER_X2;
+ }
+
+out:
+ kfree(pre_scaler_ch_list);
+
+ return ret;
+}
+
+static int pvt_set_temp_coeff(struct device *dev, struct pvt_device *pvt)
+{
+ struct temp_coeff *ts_coeff = &pvt->ts_coeff;
+ u32 series;
int ret;
- ret = reset_control_deassert(pvt->rst);
+ /* Incase ts-series property is not defined, use default 5. */
+ ret = device_property_read_u32(dev, "moortec,ts-series", &series);
if (ret)
- return ret;
+ series = TEMPERATURE_SENSOR_SERIES_5;
+
+ switch (series) {
+ case TEMPERATURE_SENSOR_SERIES_5:
+ ts_coeff->h = PVT_SERIES5_H_CONST;
+ ts_coeff->g = PVT_SERIES5_G_CONST;
+ ts_coeff->j = PVT_SERIES5_J_CONST;
+ ts_coeff->cal5 = PVT_SERIES5_CAL5_CONST;
+ break;
+ case TEMPERATURE_SENSOR_SERIES_6:
+ ts_coeff->h = PVT_SERIES6_H_CONST;
+ ts_coeff->g = PVT_SERIES6_G_CONST;
+ ts_coeff->j = PVT_SERIES6_J_CONST;
+ ts_coeff->cal5 = PVT_SERIES6_CAL5_CONST;
+ break;
+ default:
+ dev_err(dev, "invalid temperature sensor series (%u)\n",
+ series);
+ return -EINVAL;
+ }
- return devm_add_action_or_reset(dev, pvt_reset_control_assert, pvt);
+ dev_dbg(dev, "temperature sensor series = %u\n", series);
+
+ /* Override ts-coeff-h/g/j/cal5 if they are defined. */
+ device_property_read_u32(dev, "moortec,ts-coeff-h", &ts_coeff->h);
+ device_property_read_u32(dev, "moortec,ts-coeff-g", &ts_coeff->g);
+ device_property_read_u32(dev, "moortec,ts-coeff-j", &ts_coeff->j);
+ device_property_read_u32(dev, "moortec,ts-coeff-cal5", &ts_coeff->cal5);
+
+ dev_dbg(dev, "ts-coeff: h = %u, g = %u, j = %d, cal5 = %u\n",
+ ts_coeff->h, ts_coeff->g, ts_coeff->j, ts_coeff->cal5);
+
+ return 0;
}
static int mr75203_probe(struct platform_device *pdev)
{
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
const struct hwmon_channel_info **pvt_info;
- u32 ts_num, vm_num, pd_num, val, index, i;
struct device *dev = &pdev->dev;
u32 *temp_config, *in_config;
struct device *hwmon_dev;
@@ -505,32 +781,30 @@ static int mr75203_probe(struct platform_device *pdev)
if (ret)
return ret;
- pvt->clk = devm_clk_get(dev, NULL);
+ pvt->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(pvt->clk))
return dev_err_probe(dev, PTR_ERR(pvt->clk), "failed to get clock\n");
- ret = pvt_clk_enable(dev, pvt);
- if (ret) {
- dev_err(dev, "failed to enable clock\n");
- return ret;
- }
-
- pvt->rst = devm_reset_control_get_exclusive(dev, NULL);
+ pvt->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(pvt->rst))
return dev_err_probe(dev, PTR_ERR(pvt->rst),
"failed to get reset control\n");
- ret = pvt_reset_control_deassert(dev, pvt);
- if (ret)
- return dev_err_probe(dev, ret, "cannot deassert reset control\n");
+ if (pvt->rst) {
+ ret = pvt_reset_control_deassert(dev, pvt);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "cannot deassert reset control\n");
+ }
ret = regmap_read(pvt->c_map, PVT_IP_CONFIG, &val);
- if(ret < 0)
+ if (ret < 0)
return ret;
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
pvt->t_num = ts_num;
pvt->p_num = pd_num;
pvt->v_num = vm_num;
@@ -553,6 +827,10 @@ static int mr75203_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = pvt_set_temp_coeff(dev, pvt);
+ if (ret)
+ return ret;
+
temp_config = devm_kcalloc(dev, ts_num + 1,
sizeof(*temp_config), GFP_KERNEL);
if (!temp_config)
@@ -561,6 +839,8 @@ static int mr75203_probe(struct platform_device *pdev)
memset32(temp_config, HWMON_T_INPUT, ts_num);
pvt_temp.config = temp_config;
pvt_info[index++] = &pvt_temp;
+
+ pvt_ts_dbgfs_create(pvt, dev);
}
if (pd_num) {
@@ -570,44 +850,45 @@ static int mr75203_probe(struct platform_device *pdev)
}
if (vm_num) {
- u32 num = vm_num;
+ u8 vm_idx[VM_NUM_MAX];
ret = pvt_get_regmap(pdev, "vm", pvt);
if (ret)
return ret;
- pvt->vm_idx = devm_kcalloc(dev, vm_num, sizeof(*pvt->vm_idx),
- GFP_KERNEL);
- if (!pvt->vm_idx)
- return -ENOMEM;
-
- ret = device_property_read_u8_array(dev, "intel,vm-map",
- pvt->vm_idx, vm_num);
+ ret = device_property_read_u8_array(dev, "intel,vm-map", vm_idx,
+ vm_num);
if (ret) {
- num = 0;
+ /*
+ * Incase intel,vm-map property is not defined, we
+ * assume incremental channel numbers.
+ */
+ for (i = 0; i < vm_num; i++)
+ vm_idx[i] = i;
} else {
for (i = 0; i < vm_num; i++)
- if (pvt->vm_idx[i] >= vm_num ||
- pvt->vm_idx[i] == 0xff) {
- num = i;
+ if (vm_idx[i] >= vm_num || vm_idx[i] == 0xff) {
+ pvt->v_num = i;
+ vm_num = i;
break;
}
}
- /*
- * Incase intel,vm-map property is not defined, we assume
- * incremental channel numbers.
- */
- for (i = num; i < vm_num; i++)
- pvt->vm_idx[i] = i;
+ ret = pvt_get_active_channel(dev, pvt, vm_num, ch_num, vm_idx);
+ if (ret)
+ return ret;
+
+ ret = pvt_get_pre_scaler(dev, pvt);
+ if (ret)
+ return ret;
- in_config = devm_kcalloc(dev, num + 1,
+ in_config = devm_kcalloc(dev, pvt->vm_channels.total + 1,
sizeof(*in_config), GFP_KERNEL);
if (!in_config)
return -ENOMEM;
- memset32(in_config, HWMON_I_INPUT, num);
- in_config[num] = 0;
+ memset32(in_config, HWMON_I_INPUT, pvt->vm_channels.total);
+ in_config[pvt->vm_channels.total] = 0;
pvt_in.config = in_config;
pvt_info[index++] = &pvt_in;
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 6a9f420e7d32..a872f783e9cc 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -412,7 +412,7 @@ nct6683_create_attr_group(struct device *dev,
struct sensor_device_attr_u *su;
struct attribute_group *group;
struct attribute **attrs;
- int i, j, count;
+ int i, count;
if (repeat <= 0)
return ERR_PTR(-EINVAL);
@@ -443,7 +443,7 @@ nct6683_create_attr_group(struct device *dev,
for (i = 0; i < repeat; i++) {
t = tg->templates;
- for (j = 0; *t != NULL; j++) {
+ while (*t) {
snprintf(su->name, sizeof(su->name),
(*t)->dev_attr.attr.name, tg->base + i);
if ((*t)->s2) {
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 41c97cfacfb8..b34783784213 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -355,7 +355,7 @@ static void nct6791_enable_io_mapping(struct nct6775_sio_data *sio_data)
}
}
-static int __maybe_unused nct6775_suspend(struct device *dev)
+static int nct6775_suspend(struct device *dev)
{
int err;
u16 tmp;
@@ -386,7 +386,7 @@ out:
return err;
}
-static int __maybe_unused nct6775_resume(struct device *dev)
+static int nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
@@ -467,7 +467,7 @@ abort:
return err;
}
-static SIMPLE_DEV_PM_OPS(nct6775_dev_pm_ops, nct6775_suspend, nct6775_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(nct6775_dev_pm_ops, nct6775_suspend, nct6775_resume);
static void
nct6775_check_fan_inputs(struct nct6775_data *data, struct nct6775_sio_data *sio_data)
@@ -934,7 +934,7 @@ static int nct6775_platform_probe(struct platform_device *pdev)
static struct platform_driver nct6775_driver = {
.driver = {
.name = DRVNAME,
- .pm = &nct6775_dev_pm_ops,
+ .pm = pm_sleep_ptr(&nct6775_dev_pm_ops),
},
.probe = nct6775_platform_probe,
};
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index d1eeef02b6dc..a175f8283695 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1038,7 +1038,7 @@ static int nct7802_detect(struct i2c_client *client,
if (reg < 0 || (reg & 0x3f))
return -ENODEV;
- strlcpy(info->type, "nct7802", I2C_NAME_SIZE);
+ strscpy(info->type, "nct7802", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b1c837fc407a..ecc5db0011a3 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -798,7 +798,7 @@ static int nct7904_detect(struct i2c_client *client,
(i2c_smbus_read_byte_data(client, BANK_SEL_REG) & 0xf8) != 0x00)
return -ENODEV;
- strlcpy(info->type, "nct7904", I2C_NAME_SIZE);
+ strscpy(info->type, "nct7904", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index dd892ff5a3e8..533f38b0b4e9 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -787,6 +787,7 @@ static void nzxt_smart2_hid_remove(struct hid_device *hdev)
static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
{ HID_USB_DEVICE(0x1e71, 0x2006) }, /* NZXT Smart Device V2 */
{ HID_USB_DEVICE(0x1e71, 0x200d) }, /* NZXT Smart Device V2 */
+ { HID_USB_DEVICE(0x1e71, 0x200f) }, /* NZXT Smart Device V2 */
{ HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 45407b12db4b..dd690f700d49 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -10,6 +10,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/property.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
@@ -1216,8 +1217,16 @@ int occ_setup(struct occ *occ)
occ->groups[0] = &occ->group;
rc = occ_setup_sysfs(occ);
- if (rc)
+ if (rc) {
dev_err(occ->bus_dev, "failed to setup sysfs: %d\n", rc);
+ return rc;
+ }
+
+ if (!device_property_read_bool(occ->bus_dev, "ibm,no-poll-on-init")) {
+ rc = occ_active(occ, true);
+ if (rc)
+ occ_shutdown_sysfs(occ);
+ }
return rc;
}
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index b221be1f35f3..9e1744fccb35 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -227,13 +227,11 @@ static int p8_i2c_occ_probe(struct i2c_client *client)
return occ_setup(occ);
}
-static int p8_i2c_occ_remove(struct i2c_client *client)
+static void p8_i2c_occ_remove(struct i2c_client *client)
{
struct occ *occ = dev_get_drvdata(&client->dev);
occ_shutdown(occ);
-
- return 0;
}
static const struct of_device_id p8_i2c_occ_of_match[] = {
diff --git a/drivers/hwmon/occ/p9_sbe.c b/drivers/hwmon/occ/p9_sbe.c
index c1e0a1d96cd4..96521363b696 100644
--- a/drivers/hwmon/occ/p9_sbe.c
+++ b/drivers/hwmon/occ/p9_sbe.c
@@ -7,6 +7,7 @@
#include <linux/fsi-occ.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/string.h>
@@ -14,6 +15,8 @@
#include "common.h"
+#define OCC_CHECKSUM_RETRIES 3
+
struct p9_sbe_occ {
struct occ occ;
bool sbe_error;
@@ -80,18 +83,23 @@ done:
static int p9_sbe_occ_send_cmd(struct occ *occ, u8 *cmd, size_t len,
void *resp, size_t resp_len)
{
+ size_t original_resp_len = resp_len;
struct p9_sbe_occ *ctx = to_p9_sbe_occ(occ);
- int rc;
+ int rc, i;
- rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
- if (rc < 0) {
+ for (i = 0; i < OCC_CHECKSUM_RETRIES; ++i) {
+ rc = fsi_occ_submit(ctx->sbe, cmd, len, resp, &resp_len);
+ if (rc >= 0)
+ break;
if (resp_len) {
if (p9_sbe_occ_save_ffdc(ctx, resp, resp_len))
sysfs_notify(&occ->bus_dev->kobj, NULL,
bin_attr_ffdc.attr.name);
+ return rc;
}
-
- return rc;
+ if (rc != -EBADE)
+ return rc;
+ resp_len = original_resp_len;
}
switch (((struct occ_response *)resp)->return_status) {
@@ -174,9 +182,17 @@ static int p9_sbe_occ_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id p9_sbe_occ_of_match[] = {
+ { .compatible = "ibm,p9-occ-hwmon" },
+ { .compatible = "ibm,p10-occ-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, p9_sbe_occ_of_match);
+
static struct platform_driver p9_sbe_occ_driver = {
.driver = {
.name = "occ-hwmon",
+ .of_match_table = p9_sbe_occ_of_match,
},
.probe = p9_sbe_occ_probe,
.remove = p9_sbe_occ_remove,
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 0828436a1f6c..a4adc8bd531f 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -35,6 +35,18 @@
#include <linux/acpi.h>
#include <linux/io.h>
+#define DRIVER_NAME "pc87360"
+
+/* (temp & vin) channel conversion status register flags (pdf sec.11.5.12) */
+#define CHAN_CNVRTD 0x80 /* new data ready */
+#define CHAN_ENA 0x01 /* enabled channel (temp or vin) */
+#define CHAN_ALM_ENA 0x10 /* propagate to alarms-reg ?? (chk val!) */
+#define CHAN_READY (CHAN_ENA|CHAN_CNVRTD) /* sample ready mask */
+
+#define TEMP_OTS_OE 0x20 /* OTS Output Enable */
+#define VIN_RW1C_MASK (CHAN_READY|CHAN_ALM_MAX|CHAN_ALM_MIN) /* 0x87 */
+#define TEMP_RW1C_MASK (VIN_RW1C_MASK|TEMP_ALM_CRIT|TEMP_FAULT) /* 0xCF */
+
static u8 devid;
static struct platform_device *pdev;
static unsigned short extra_isa[3];
@@ -211,183 +223,181 @@ struct pc87360_data {
};
/*
- * Functions declaration
+ * ldi is the logical device index
+ * bank is for voltages and temperatures only
*/
-
-static int pc87360_probe(struct platform_device *pdev);
-static int pc87360_remove(struct platform_device *pdev);
-
static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg);
-static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg, u8 value);
-static void pc87360_init_device(struct platform_device *pdev,
- int use_thermistors);
-static struct pc87360_data *pc87360_update_device(struct device *dev);
-
-/*
- * Driver data
- */
-
-static struct platform_driver pc87360_driver = {
- .driver = {
- .name = "pc87360",
- },
- .probe = pc87360_probe,
- .remove = pc87360_remove,
-};
+ u8 reg)
+{
+ int res;
-/*
- * Sysfs stuff
- */
+ mutex_lock(&(data->lock));
+ if (bank != NO_BANK)
+ outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
+ res = inb_p(data->address[ldi] + reg);
+ mutex_unlock(&(data->lock));
-static ssize_t fan_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
- FAN_DIV_FROM_REG(data->fan_status[attr->index])));
-}
-static ssize_t fan_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
- FAN_DIV_FROM_REG(data->fan_status[attr->index])));
-}
-static ssize_t fan_div_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+ return res;
}
-static ssize_t fan_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
+ u8 reg, u8 value)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
+ mutex_lock(&(data->lock));
+ if (bank != NO_BANK)
+ outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
+ outb_p(value, data->address[ldi] + reg);
+ mutex_unlock(&(data->lock));
}
-static ssize_t fan_min_store(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static void pc87360_autodiv(struct device *dev, int nr)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
- long fan_min;
- int err;
-
- err = kstrtol(buf, 10, &fan_min);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- fan_min = FAN_TO_REG(fan_min,
- FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+ u8 old_min = data->fan_min[nr];
- /* If it wouldn't fit, change clock divisor */
- while (fan_min > 255
- && (data->fan_status[attr->index] & 0x60) != 0x60) {
- fan_min >>= 1;
- data->fan[attr->index] >>= 1;
- data->fan_status[attr->index] += 0x20;
+ /* Increase clock divider if needed and possible */
+ if ((data->fan_status[nr] & 0x04) /* overflow flag */
+ || (data->fan[nr] >= 224)) { /* next to overflow */
+ if ((data->fan_status[nr] & 0x60) != 0x60) {
+ data->fan_status[nr] += 0x20;
+ data->fan_min[nr] >>= 1;
+ data->fan[nr] >>= 1;
+ dev_dbg(dev,
+ "Increasing clock divider to %d for fan %d\n",
+ FAN_DIV_FROM_REG(data->fan_status[nr]), nr + 1);
+ }
+ } else {
+ /* Decrease clock divider if possible */
+ while (!(data->fan_min[nr] & 0x80) /* min "nails" divider */
+ && data->fan[nr] < 85 /* bad accuracy */
+ && (data->fan_status[nr] & 0x60) != 0x00) {
+ data->fan_status[nr] -= 0x20;
+ data->fan_min[nr] <<= 1;
+ data->fan[nr] <<= 1;
+ dev_dbg(dev,
+ "Decreasing clock divider to %d for fan %d\n",
+ FAN_DIV_FROM_REG(data->fan_status[nr]),
+ nr + 1);
+ }
}
- data->fan_min[attr->index] = fan_min > 255 ? 255 : fan_min;
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(attr->index),
- data->fan_min[attr->index]);
-
- /* Write new divider, preserve alarm bits */
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_STATUS(attr->index),
- data->fan_status[attr->index] & 0xF9);
- mutex_unlock(&data->update_lock);
-
- return count;
-}
-static struct sensor_device_attribute fan_input[] = {
- SENSOR_ATTR_RO(fan1_input, fan_input, 0),
- SENSOR_ATTR_RO(fan2_input, fan_input, 1),
- SENSOR_ATTR_RO(fan3_input, fan_input, 2),
-};
-static struct sensor_device_attribute fan_status[] = {
- SENSOR_ATTR_RO(fan1_status, fan_status, 0),
- SENSOR_ATTR_RO(fan2_status, fan_status, 1),
- SENSOR_ATTR_RO(fan3_status, fan_status, 2),
-};
-static struct sensor_device_attribute fan_div[] = {
- SENSOR_ATTR_RO(fan1_div, fan_div, 0),
- SENSOR_ATTR_RO(fan2_div, fan_div, 1),
- SENSOR_ATTR_RO(fan3_div, fan_div, 2),
-};
-static struct sensor_device_attribute fan_min[] = {
- SENSOR_ATTR_RW(fan1_min, fan_min, 0),
- SENSOR_ATTR_RW(fan2_min, fan_min, 1),
- SENSOR_ATTR_RW(fan3_min, fan_min, 2),
-};
-
-#define FAN_UNIT_ATTRS(X) \
-{ &fan_input[X].dev_attr.attr, \
- &fan_status[X].dev_attr.attr, \
- &fan_div[X].dev_attr.attr, \
- &fan_min[X].dev_attr.attr, \
- NULL \
+ /* Write new fan min if it changed */
+ if (old_min != data->fan_min[nr]) {
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(nr),
+ data->fan_min[nr]);
+ }
}
-static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n",
- PWM_FROM_REG(data->pwm[attr->index],
- FAN_CONFIG_INVERT(data->fan_conf,
- attr->index)));
-}
-static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static struct pc87360_data *pc87360_update_device(struct device *dev)
{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = dev_get_drvdata(dev);
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ u8 i;
mutex_lock(&data->update_lock);
- data->pwm[attr->index] = PWM_TO_REG(val,
- FAN_CONFIG_INVERT(data->fan_conf, attr->index));
- pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index),
- data->pwm[attr->index]);
- mutex_unlock(&data->update_lock);
- return count;
-}
-static struct sensor_device_attribute pwm[] = {
- SENSOR_ATTR_RW(pwm1, pwm, 0),
- SENSOR_ATTR_RW(pwm2, pwm, 1),
- SENSOR_ATTR_RW(pwm3, pwm, 2),
-};
+ if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
+ dev_dbg(dev, "Data update\n");
-static struct attribute *pc8736x_fan_attr[][5] = {
- FAN_UNIT_ATTRS(0),
- FAN_UNIT_ATTRS(1),
- FAN_UNIT_ATTRS(2)
-};
+ /* Fans */
+ for (i = 0; i < data->fannr; i++) {
+ if (FAN_CONFIG_MONITOR(data->fan_conf, i)) {
+ data->fan_status[i] =
+ pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_FAN_STATUS(i));
+ data->fan[i] = pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_FAN(i));
+ data->fan_min[i] = pc87360_read_value(data,
+ LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(i));
+ /* Change clock divider if needed */
+ pc87360_autodiv(dev, i);
+ /* Clear bits and write new divider */
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_STATUS(i),
+ data->fan_status[i]);
+ }
+ if (FAN_CONFIG_CONTROL(data->fan_conf, i))
+ data->pwm[i] = pc87360_read_value(data, LD_FAN,
+ NO_BANK, PC87360_REG_PWM(i));
+ }
-static const struct attribute_group pc8736x_fan_attr_group[] = {
- { .attrs = pc8736x_fan_attr[0], },
- { .attrs = pc8736x_fan_attr[1], },
- { .attrs = pc8736x_fan_attr[2], },
-};
+ /* Voltages */
+ for (i = 0; i < data->innr; i++) {
+ data->in_status[i] = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ /* Clear bits */
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS,
+ data->in_status[i]);
+ if ((data->in_status[i] & CHAN_READY) == CHAN_READY) {
+ data->in[i] = pc87360_read_value(data, LD_IN,
+ i, PC87365_REG_IN);
+ }
+ if (data->in_status[i] & CHAN_ENA) {
+ data->in_min[i] = pc87360_read_value(data,
+ LD_IN, i,
+ PC87365_REG_IN_MIN);
+ data->in_max[i] = pc87360_read_value(data,
+ LD_IN, i,
+ PC87365_REG_IN_MAX);
+ if (i >= 11)
+ data->in_crit[i-11] =
+ pc87360_read_value(data, LD_IN,
+ i, PC87365_REG_TEMP_CRIT);
+ }
+ }
+ if (data->innr) {
+ data->in_alarms = pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_IN_ALARMS1)
+ | ((pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_IN_ALARMS2)
+ & 0x07) << 8);
+ data->vid = (data->vid_conf & 0xE0) ?
+ pc87360_read_value(data, LD_IN,
+ NO_BANK, PC87365_REG_VID) : 0x1F;
+ }
+
+ /* Temperatures */
+ for (i = 0; i < data->tempnr; i++) {
+ data->temp_status[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS);
+ /* Clear bits */
+ pc87360_write_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS,
+ data->temp_status[i]);
+ if ((data->temp_status[i] & CHAN_READY) == CHAN_READY) {
+ data->temp[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP);
+ }
+ if (data->temp_status[i] & CHAN_ENA) {
+ data->temp_min[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_MIN);
+ data->temp_max[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_MAX);
+ data->temp_crit[i] = pc87360_read_value(data,
+ LD_TEMP, i,
+ PC87365_REG_TEMP_CRIT);
+ }
+ }
+ if (data->tempnr) {
+ data->temp_alarms = pc87360_read_value(data, LD_TEMP,
+ NO_BANK, PC87365_REG_TEMP_ALARMS)
+ & 0x3F;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
static ssize_t in_input_show(struct device *dev,
struct device_attribute *devattr, char *buf)
@@ -397,29 +407,52 @@ static ssize_t in_input_show(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t in_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+static struct sensor_device_attribute in_input[] = {
+ SENSOR_ATTR_RO(in0_input, in_input, 0),
+ SENSOR_ATTR_RO(in1_input, in_input, 1),
+ SENSOR_ATTR_RO(in2_input, in_input, 2),
+ SENSOR_ATTR_RO(in3_input, in_input, 3),
+ SENSOR_ATTR_RO(in4_input, in_input, 4),
+ SENSOR_ATTR_RO(in5_input, in_input, 5),
+ SENSOR_ATTR_RO(in6_input, in_input, 6),
+ SENSOR_ATTR_RO(in7_input, in_input, 7),
+ SENSOR_ATTR_RO(in8_input, in_input, 8),
+ SENSOR_ATTR_RO(in9_input, in_input, 9),
+ SENSOR_ATTR_RO(in10_input, in_input, 10),
+};
+
+static ssize_t in_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
- data->in_vref));
+ return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t in_max_show(struct device *dev,
+
+static struct sensor_device_attribute in_status[] = {
+ SENSOR_ATTR_RO(in0_status, in_status, 0),
+ SENSOR_ATTR_RO(in1_status, in_status, 1),
+ SENSOR_ATTR_RO(in2_status, in_status, 2),
+ SENSOR_ATTR_RO(in3_status, in_status, 3),
+ SENSOR_ATTR_RO(in4_status, in_status, 4),
+ SENSOR_ATTR_RO(in5_status, in_status, 5),
+ SENSOR_ATTR_RO(in6_status, in_status, 6),
+ SENSOR_ATTR_RO(in7_status, in_status, 7),
+ SENSOR_ATTR_RO(in8_status, in_status, 8),
+ SENSOR_ATTR_RO(in9_status, in_status, 9),
+ SENSOR_ATTR_RO(in10_status, in_status, 10),
+};
+
+static ssize_t in_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t in_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->in_status[attr->index]);
-}
+
static ssize_t in_min_store(struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count)
@@ -440,6 +473,30 @@ static ssize_t in_min_store(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
+static struct sensor_device_attribute in_min[] = {
+ SENSOR_ATTR_RW(in0_min, in_min, 0),
+ SENSOR_ATTR_RW(in1_min, in_min, 1),
+ SENSOR_ATTR_RW(in2_min, in_min, 2),
+ SENSOR_ATTR_RW(in3_min, in_min, 3),
+ SENSOR_ATTR_RW(in4_min, in_min, 4),
+ SENSOR_ATTR_RW(in5_min, in_min, 5),
+ SENSOR_ATTR_RW(in6_min, in_min, 6),
+ SENSOR_ATTR_RW(in7_min, in_min, 7),
+ SENSOR_ATTR_RW(in8_min, in_min, 8),
+ SENSOR_ATTR_RW(in9_min, in_min, 9),
+ SENSOR_ATTR_RW(in10_min, in_min, 10),
+};
+
+static ssize_t in_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ data->in_vref));
+}
+
static ssize_t in_max_store(struct device *dev,
struct device_attribute *devattr, const char *buf,
size_t count)
@@ -462,45 +519,6 @@ static ssize_t in_max_store(struct device *dev,
return count;
}
-static struct sensor_device_attribute in_input[] = {
- SENSOR_ATTR_RO(in0_input, in_input, 0),
- SENSOR_ATTR_RO(in1_input, in_input, 1),
- SENSOR_ATTR_RO(in2_input, in_input, 2),
- SENSOR_ATTR_RO(in3_input, in_input, 3),
- SENSOR_ATTR_RO(in4_input, in_input, 4),
- SENSOR_ATTR_RO(in5_input, in_input, 5),
- SENSOR_ATTR_RO(in6_input, in_input, 6),
- SENSOR_ATTR_RO(in7_input, in_input, 7),
- SENSOR_ATTR_RO(in8_input, in_input, 8),
- SENSOR_ATTR_RO(in9_input, in_input, 9),
- SENSOR_ATTR_RO(in10_input, in_input, 10),
-};
-static struct sensor_device_attribute in_status[] = {
- SENSOR_ATTR_RO(in0_status, in_status, 0),
- SENSOR_ATTR_RO(in1_status, in_status, 1),
- SENSOR_ATTR_RO(in2_status, in_status, 2),
- SENSOR_ATTR_RO(in3_status, in_status, 3),
- SENSOR_ATTR_RO(in4_status, in_status, 4),
- SENSOR_ATTR_RO(in5_status, in_status, 5),
- SENSOR_ATTR_RO(in6_status, in_status, 6),
- SENSOR_ATTR_RO(in7_status, in_status, 7),
- SENSOR_ATTR_RO(in8_status, in_status, 8),
- SENSOR_ATTR_RO(in9_status, in_status, 9),
- SENSOR_ATTR_RO(in10_status, in_status, 10),
-};
-static struct sensor_device_attribute in_min[] = {
- SENSOR_ATTR_RW(in0_min, in_min, 0),
- SENSOR_ATTR_RW(in1_min, in_min, 1),
- SENSOR_ATTR_RW(in2_min, in_min, 2),
- SENSOR_ATTR_RW(in3_min, in_min, 3),
- SENSOR_ATTR_RW(in4_min, in_min, 4),
- SENSOR_ATTR_RW(in5_min, in_min, 5),
- SENSOR_ATTR_RW(in6_min, in_min, 6),
- SENSOR_ATTR_RW(in7_min, in_min, 7),
- SENSOR_ATTR_RW(in8_min, in_min, 8),
- SENSOR_ATTR_RW(in9_min, in_min, 9),
- SENSOR_ATTR_RW(in10_min, in_min, 10),
-};
static struct sensor_device_attribute in_max[] = {
SENSOR_ATTR_RW(in0_max, in_max, 0),
SENSOR_ATTR_RW(in1_max, in_max, 1),
@@ -534,14 +552,6 @@ static ssize_t in_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
-static ssize_t in_max_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct pc87360_data *data = pc87360_update_device(dev);
- unsigned nr = to_sensor_dev_attr(devattr)->index;
-
- return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
-}
static struct sensor_device_attribute in_min_alarm[] = {
SENSOR_ATTR_RO(in0_min_alarm, in_min_alarm, 0),
@@ -556,6 +566,16 @@ static struct sensor_device_attribute in_min_alarm[] = {
SENSOR_ATTR_RO(in9_min_alarm, in_min_alarm, 9),
SENSOR_ATTR_RO(in10_min_alarm, in_min_alarm, 10),
};
+
+static ssize_t in_max_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pc87360_data *data = pc87360_update_device(dev);
+ unsigned nr = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
+}
+
static struct sensor_device_attribute in_max_alarm[] = {
SENSOR_ATTR_RO(in0_max_alarm, in_max_alarm, 0),
SENSOR_ATTR_RO(in1_max_alarm, in_max_alarm, 1),
@@ -592,6 +612,7 @@ static ssize_t vrm_show(struct device *dev, struct device_attribute *attr,
struct pc87360_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
+
static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -648,37 +669,39 @@ static ssize_t therm_input_show(struct device *dev,
return sprintf(buf, "%u\n", IN_FROM_REG(data->in[attr->index],
data->in_vref));
}
-static ssize_t therm_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+
+/*
+ * the +11 term below reflects the fact that VLM units 11,12,13 are
+ * used in the chip to measure voltage across the thermistors
+ */
+static struct sensor_device_attribute therm_input[] = {
+ SENSOR_ATTR_RO(temp4_input, therm_input, 0 + 11),
+ SENSOR_ATTR_RO(temp5_input, therm_input, 1 + 11),
+ SENSOR_ATTR_RO(temp6_input, therm_input, 2 + 11),
+};
+
+static ssize_t therm_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
- data->in_vref));
+ return sprintf(buf, "%u\n", data->in_status[attr->index]);
}
-static ssize_t therm_max_show(struct device *dev,
+
+static struct sensor_device_attribute therm_status[] = {
+ SENSOR_ATTR_RO(temp4_status, therm_status, 0 + 11),
+ SENSOR_ATTR_RO(temp5_status, therm_status, 1 + 11),
+ SENSOR_ATTR_RO(temp6_status, therm_status, 2 + 11),
+};
+
+static ssize_t therm_min_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
- data->in_vref));
-}
-static ssize_t therm_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[attr->index],
data->in_vref));
}
-static ssize_t therm_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->in_status[attr->index]);
-}
static ssize_t therm_min_store(struct device *dev,
struct device_attribute *devattr,
@@ -701,6 +724,21 @@ static ssize_t therm_min_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute therm_min[] = {
+ SENSOR_ATTR_RW(temp4_min, therm_min, 0 + 11),
+ SENSOR_ATTR_RW(temp5_min, therm_min, 1 + 11),
+ SENSOR_ATTR_RW(temp6_min, therm_min, 2 + 11),
+};
+
+static ssize_t therm_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[attr->index],
+ data->in_vref));
+}
+
static ssize_t therm_max_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -721,6 +759,22 @@ static ssize_t therm_max_store(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
+static struct sensor_device_attribute therm_max[] = {
+ SENSOR_ATTR_RW(temp4_max, therm_max, 0 + 11),
+ SENSOR_ATTR_RW(temp5_max, therm_max, 1 + 11),
+ SENSOR_ATTR_RW(temp6_max, therm_max, 2 + 11),
+};
+
+static ssize_t therm_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", IN_FROM_REG(data->in_crit[attr->index-11],
+ data->in_vref));
+}
+
static ssize_t therm_crit_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -742,30 +796,6 @@ static ssize_t therm_crit_store(struct device *dev,
return count;
}
-/*
- * the +11 term below reflects the fact that VLM units 11,12,13 are
- * used in the chip to measure voltage across the thermistors
- */
-static struct sensor_device_attribute therm_input[] = {
- SENSOR_ATTR_RO(temp4_input, therm_input, 0 + 11),
- SENSOR_ATTR_RO(temp5_input, therm_input, 1 + 11),
- SENSOR_ATTR_RO(temp6_input, therm_input, 2 + 11),
-};
-static struct sensor_device_attribute therm_status[] = {
- SENSOR_ATTR_RO(temp4_status, therm_status, 0 + 11),
- SENSOR_ATTR_RO(temp5_status, therm_status, 1 + 11),
- SENSOR_ATTR_RO(temp6_status, therm_status, 2 + 11),
-};
-static struct sensor_device_attribute therm_min[] = {
- SENSOR_ATTR_RW(temp4_min, therm_min, 0 + 11),
- SENSOR_ATTR_RW(temp5_min, therm_min, 1 + 11),
- SENSOR_ATTR_RW(temp6_min, therm_min, 2 + 11),
-};
-static struct sensor_device_attribute therm_max[] = {
- SENSOR_ATTR_RW(temp4_max, therm_max, 0 + 11),
- SENSOR_ATTR_RW(temp5_max, therm_max, 1 + 11),
- SENSOR_ATTR_RW(temp6_max, therm_max, 2 + 11),
-};
static struct sensor_device_attribute therm_crit[] = {
SENSOR_ATTR_RW(temp4_crit, therm_crit, 0 + 11),
SENSOR_ATTR_RW(temp5_crit, therm_crit, 1 + 11),
@@ -776,7 +806,6 @@ static struct sensor_device_attribute therm_crit[] = {
* show_therm_min/max_alarm() reads data from the per-channel voltage
* status register (sec 11.5.12)
*/
-
static ssize_t therm_min_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -786,6 +815,13 @@ static ssize_t therm_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MIN));
}
+
+static struct sensor_device_attribute therm_min_alarm[] = {
+ SENSOR_ATTR_RO(temp4_min_alarm, therm_min_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_min_alarm, therm_min_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_min_alarm, therm_min_alarm, 2 + 11),
+};
+
static ssize_t therm_max_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -795,6 +831,13 @@ static ssize_t therm_max_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & CHAN_ALM_MAX));
}
+
+static struct sensor_device_attribute therm_max_alarm[] = {
+ SENSOR_ATTR_RO(temp4_max_alarm, therm_max_alarm, 0 + 11),
+ SENSOR_ATTR_RO(temp5_max_alarm, therm_max_alarm, 1 + 11),
+ SENSOR_ATTR_RO(temp6_max_alarm, therm_max_alarm, 2 + 11),
+};
+
static ssize_t therm_crit_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -805,16 +848,6 @@ static ssize_t therm_crit_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->in_status[nr] & TEMP_ALM_CRIT));
}
-static struct sensor_device_attribute therm_min_alarm[] = {
- SENSOR_ATTR_RO(temp4_min_alarm, therm_min_alarm, 0 + 11),
- SENSOR_ATTR_RO(temp5_min_alarm, therm_min_alarm, 1 + 11),
- SENSOR_ATTR_RO(temp6_min_alarm, therm_min_alarm, 2 + 11),
-};
-static struct sensor_device_attribute therm_max_alarm[] = {
- SENSOR_ATTR_RO(temp4_max_alarm, therm_max_alarm, 0 + 11),
- SENSOR_ATTR_RO(temp5_max_alarm, therm_max_alarm, 1 + 11),
- SENSOR_ATTR_RO(temp6_max_alarm, therm_max_alarm, 2 + 11),
-};
static struct sensor_device_attribute therm_crit_alarm[] = {
SENSOR_ATTR_RO(temp4_crit_alarm, therm_crit_alarm, 0 + 11),
SENSOR_ATTR_RO(temp5_crit_alarm, therm_crit_alarm, 1 + 11),
@@ -849,37 +882,32 @@ static ssize_t temp_input_show(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
-static ssize_t temp_min_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
-}
+static struct sensor_device_attribute temp_input[] = {
+ SENSOR_ATTR_RO(temp1_input, temp_input, 0),
+ SENSOR_ATTR_RO(temp2_input, temp_input, 1),
+ SENSOR_ATTR_RO(temp3_input, temp_input, 2),
+};
-static ssize_t temp_max_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
+ return sprintf(buf, "%d\n", data->temp_status[attr->index]);
}
-static ssize_t temp_crit_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n",
- TEMP_FROM_REG(data->temp_crit[attr->index]));
-}
+static struct sensor_device_attribute temp_status[] = {
+ SENSOR_ATTR_RO(temp1_status, temp_status, 0),
+ SENSOR_ATTR_RO(temp2_status, temp_status, 1),
+ SENSOR_ATTR_RO(temp3_status, temp_status, 2),
+};
-static ssize_t temp_status_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%d\n", data->temp_status[attr->index]);
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[attr->index]));
}
static ssize_t temp_min_store(struct device *dev,
@@ -903,6 +931,20 @@ static ssize_t temp_min_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute temp_min[] = {
+ SENSOR_ATTR_RW(temp1_min, temp_min, 0),
+ SENSOR_ATTR_RW(temp2_min, temp_min, 1),
+ SENSOR_ATTR_RW(temp3_min, temp_min, 2),
+};
+
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[attr->index]));
+}
+
static ssize_t temp_max_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -924,6 +966,21 @@ static ssize_t temp_max_store(struct device *dev,
return count;
}
+static struct sensor_device_attribute temp_max[] = {
+ SENSOR_ATTR_RW(temp1_max, temp_max, 0),
+ SENSOR_ATTR_RW(temp2_max, temp_max, 1),
+ SENSOR_ATTR_RW(temp3_max, temp_max, 2),
+};
+
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%d\n",
+ TEMP_FROM_REG(data->temp_crit[attr->index]));
+}
+
static ssize_t temp_crit_store(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
@@ -945,47 +1002,17 @@ static ssize_t temp_crit_store(struct device *dev,
return count;
}
-static struct sensor_device_attribute temp_input[] = {
- SENSOR_ATTR_RO(temp1_input, temp_input, 0),
- SENSOR_ATTR_RO(temp2_input, temp_input, 1),
- SENSOR_ATTR_RO(temp3_input, temp_input, 2),
-};
-static struct sensor_device_attribute temp_status[] = {
- SENSOR_ATTR_RO(temp1_status, temp_status, 0),
- SENSOR_ATTR_RO(temp2_status, temp_status, 1),
- SENSOR_ATTR_RO(temp3_status, temp_status, 2),
-};
-static struct sensor_device_attribute temp_min[] = {
- SENSOR_ATTR_RW(temp1_min, temp_min, 0),
- SENSOR_ATTR_RW(temp2_min, temp_min, 1),
- SENSOR_ATTR_RW(temp3_min, temp_min, 2),
-};
-static struct sensor_device_attribute temp_max[] = {
- SENSOR_ATTR_RW(temp1_max, temp_max, 0),
- SENSOR_ATTR_RW(temp2_max, temp_max, 1),
- SENSOR_ATTR_RW(temp3_max, temp_max, 2),
-};
static struct sensor_device_attribute temp_crit[] = {
SENSOR_ATTR_RW(temp1_crit, temp_crit, 0),
SENSOR_ATTR_RW(temp2_crit, temp_crit, 1),
SENSOR_ATTR_RW(temp3_crit, temp_crit, 2),
};
-static ssize_t alarms_temp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pc87360_data *data = pc87360_update_device(dev);
- return sprintf(buf, "%u\n", data->temp_alarms);
-}
-
-static DEVICE_ATTR_RO(alarms_temp);
-
/*
- * show_temp_min/max_alarm() reads data from the per-channel status
+ * temp_min/max_alarm_show() reads data from the per-channel status
* register (sec 12.3.7), not the temp event status registers (sec
* 12.3.2) that show_temp_alarm() reads (via data->temp_alarms)
*/
-
static ssize_t temp_min_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -996,6 +1023,12 @@ static ssize_t temp_min_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MIN));
}
+static struct sensor_device_attribute temp_min_alarm[] = {
+ SENSOR_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0),
+ SENSOR_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1),
+ SENSOR_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2),
+};
+
static ssize_t temp_max_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -1006,6 +1039,12 @@ static ssize_t temp_max_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & CHAN_ALM_MAX));
}
+static struct sensor_device_attribute temp_max_alarm[] = {
+ SENSOR_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0),
+ SENSOR_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1),
+ SENSOR_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2),
+};
+
static ssize_t temp_crit_alarm_show(struct device *dev,
struct device_attribute *devattr,
char *buf)
@@ -1016,18 +1055,6 @@ static ssize_t temp_crit_alarm_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_ALM_CRIT));
}
-static struct sensor_device_attribute temp_min_alarm[] = {
- SENSOR_ATTR_RO(temp1_min_alarm, temp_min_alarm, 0),
- SENSOR_ATTR_RO(temp2_min_alarm, temp_min_alarm, 1),
- SENSOR_ATTR_RO(temp3_min_alarm, temp_min_alarm, 2),
-};
-
-static struct sensor_device_attribute temp_max_alarm[] = {
- SENSOR_ATTR_RO(temp1_max_alarm, temp_max_alarm, 0),
- SENSOR_ATTR_RO(temp2_max_alarm, temp_max_alarm, 1),
- SENSOR_ATTR_RO(temp3_max_alarm, temp_max_alarm, 2),
-};
-
static struct sensor_device_attribute temp_crit_alarm[] = {
SENSOR_ATTR_RO(temp1_crit_alarm, temp_crit_alarm, 0),
SENSOR_ATTR_RO(temp2_crit_alarm, temp_crit_alarm, 1),
@@ -1043,6 +1070,7 @@ static ssize_t temp_fault_show(struct device *dev,
return sprintf(buf, "%u\n", !!(data->temp_status[nr] & TEMP_FAULT));
}
+
static struct sensor_device_attribute temp_fault[] = {
SENSOR_ATTR_RO(temp1_fault, temp_fault, 0),
SENSOR_ATTR_RO(temp2_fault, temp_fault, 1),
@@ -1074,106 +1102,180 @@ static const struct attribute_group pc8736x_temp_attr_group[] = {
{ .attrs = pc8736x_temp_attr[2] }
};
-static ssize_t name_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t alarms_temp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pc87360_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", data->temp_alarms);
}
-static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(alarms_temp);
-/*
- * Device detection, registration and update
- */
+static ssize_t fan_input_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan[attr->index],
+ FAN_DIV_FROM_REG(data->fan_status[attr->index])));
+}
-static int __init pc87360_find(int sioaddr, u8 *devid,
- unsigned short *addresses)
+static struct sensor_device_attribute fan_input[] = {
+ SENSOR_ATTR_RO(fan1_input, fan_input, 0),
+ SENSOR_ATTR_RO(fan2_input, fan_input, 1),
+ SENSOR_ATTR_RO(fan3_input, fan_input, 2),
+};
+
+static ssize_t fan_status_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
- u16 val;
- int i;
- int nrdev; /* logical device count */
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ FAN_STATUS_FROM_REG(data->fan_status[attr->index]));
+}
- /* No superio_enter */
+static struct sensor_device_attribute fan_status[] = {
+ SENSOR_ATTR_RO(fan1_status, fan_status, 0),
+ SENSOR_ATTR_RO(fan2_status, fan_status, 1),
+ SENSOR_ATTR_RO(fan3_status, fan_status, 2),
+};
- /* Identify device */
- val = force_id ? force_id : superio_inb(sioaddr, DEVID);
- switch (val) {
- case 0xE1: /* PC87360 */
- case 0xE8: /* PC87363 */
- case 0xE4: /* PC87364 */
- nrdev = 1;
- break;
- case 0xE5: /* PC87365 */
- case 0xE9: /* PC87366 */
- nrdev = 3;
- break;
- default:
- superio_exit(sioaddr);
- return -ENODEV;
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+}
+
+static struct sensor_device_attribute fan_div[] = {
+ SENSOR_ATTR_RO(fan1_div, fan_div, 0),
+ SENSOR_ATTR_RO(fan2_div, fan_div, 1),
+ SENSOR_ATTR_RO(fan3_div, fan_div, 2),
+};
+
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n", FAN_FROM_REG(data->fan_min[attr->index],
+ FAN_DIV_FROM_REG(data->fan_status[attr->index])));
+}
+
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ long fan_min;
+ int err;
+
+ err = kstrtol(buf, 10, &fan_min);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ fan_min = FAN_TO_REG(fan_min,
+ FAN_DIV_FROM_REG(data->fan_status[attr->index]));
+
+ /* If it wouldn't fit, change clock divisor */
+ while (fan_min > 255
+ && (data->fan_status[attr->index] & 0x60) != 0x60) {
+ fan_min >>= 1;
+ data->fan[attr->index] >>= 1;
+ data->fan_status[attr->index] += 0x20;
}
- /* Remember the device id */
- *devid = val;
+ data->fan_min[attr->index] = fan_min > 255 ? 255 : fan_min;
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_MIN(attr->index),
+ data->fan_min[attr->index]);
- for (i = 0; i < nrdev; i++) {
- /* select logical device */
- superio_outb(sioaddr, DEV, logdev[i]);
+ /* Write new divider, preserve alarm bits */
+ pc87360_write_value(data, LD_FAN, NO_BANK,
+ PC87360_REG_FAN_STATUS(attr->index),
+ data->fan_status[attr->index] & 0xF9);
+ mutex_unlock(&data->update_lock);
- val = superio_inb(sioaddr, ACT);
- if (!(val & 0x01)) {
- pr_info("Device 0x%02x not activated\n", logdev[i]);
- continue;
- }
+ return count;
+}
- val = (superio_inb(sioaddr, BASE) << 8)
- | superio_inb(sioaddr, BASE + 1);
- if (!val) {
- pr_info("Base address not set for device 0x%02x\n",
- logdev[i]);
- continue;
- }
+static struct sensor_device_attribute fan_min[] = {
+ SENSOR_ATTR_RW(fan1_min, fan_min, 0),
+ SENSOR_ATTR_RW(fan2_min, fan_min, 1),
+ SENSOR_ATTR_RW(fan3_min, fan_min, 2),
+};
- addresses[i] = val;
+#define FAN_UNIT_ATTRS(X) \
+{ &fan_input[X].dev_attr.attr, \
+ &fan_status[X].dev_attr.attr, \
+ &fan_div[X].dev_attr.attr, \
+ &fan_min[X].dev_attr.attr, \
+ NULL \
+}
- if (i == 0) { /* Fans */
- confreg[0] = superio_inb(sioaddr, 0xF0);
- confreg[1] = superio_inb(sioaddr, 0xF1);
+static struct attribute *pc8736x_fan_attr[][5] = {
+ FAN_UNIT_ATTRS(0),
+ FAN_UNIT_ATTRS(1),
+ FAN_UNIT_ATTRS(2)
+};
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
- (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
- (confreg[0] >> 4) & 1);
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
- (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
- (confreg[0] >> 7) & 1);
- pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
- confreg[1] & 1, (confreg[1] >> 1) & 1,
- (confreg[1] >> 2) & 1);
- } else if (i == 1) { /* Voltages */
- /* Are we using thermistors? */
- if (*devid == 0xE9) { /* PC87366 */
- /*
- * These registers are not logical-device
- * specific, just that we won't need them if
- * we don't use the VLM device
- */
- confreg[2] = superio_inb(sioaddr, 0x2B);
- confreg[3] = superio_inb(sioaddr, 0x25);
+static const struct attribute_group pc8736x_fan_attr_group[] = {
+ { .attrs = pc8736x_fan_attr[0], },
+ { .attrs = pc8736x_fan_attr[1], },
+ { .attrs = pc8736x_fan_attr[2], },
+};
- if (confreg[2] & 0x40) {
- pr_info("Using thermistors for temperature monitoring\n");
- }
- if (confreg[3] & 0xE0) {
- pr_info("VID inputs routed (mode %u)\n",
- confreg[3] >> 5);
- }
- }
- }
- }
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = pc87360_update_device(dev);
+ return sprintf(buf, "%u\n",
+ PWM_FROM_REG(data->pwm[attr->index],
+ FAN_CONFIG_INVERT(data->fan_conf,
+ attr->index)));
+}
- superio_exit(sioaddr);
- return 0;
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = PWM_TO_REG(val,
+ FAN_CONFIG_INVERT(data->fan_conf, attr->index));
+ pc87360_write_value(data, LD_FAN, NO_BANK, PC87360_REG_PWM(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
}
+static struct sensor_device_attribute pwm[] = {
+ SENSOR_ATTR_RW(pwm1, pwm, 0),
+ SENSOR_ATTR_RW(pwm2, pwm, 1),
+ SENSOR_ATTR_RW(pwm3, pwm, 2),
+};
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pc87360_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
static void pc87360_remove_files(struct device *dev)
{
int i;
@@ -1190,6 +1292,146 @@ static void pc87360_remove_files(struct device *dev)
sysfs_remove_group(&dev->kobj, &pc8736x_vin_group);
}
+static void pc87360_init_device(struct platform_device *pdev,
+ int use_thermistors)
+{
+ struct pc87360_data *data = platform_get_drvdata(pdev);
+ int i, nr;
+ const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
+ const u8 init_temp[3] = { 2, 2, 1 };
+ u8 reg;
+
+ if (init >= 2 && data->innr) {
+ reg = pc87360_read_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONVRATE);
+ dev_info(&pdev->dev,
+ "VLM conversion set to 1s period, 160us delay\n");
+ pc87360_write_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONVRATE,
+ (reg & 0xC0) | 0x11);
+ }
+
+ nr = data->innr < 11 ? data->innr : 11;
+ for (i = 0; i < nr; i++) {
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ dev_dbg(&pdev->dev, "bios in%d status:0x%02x\n", i, reg);
+ if (init >= init_in[i]) {
+ /* Forcibly enable voltage channel */
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev, "Forcibly enabling in%d\n",
+ i);
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS,
+ (reg & 0x68) | 0x87);
+ }
+ }
+ }
+
+ /*
+ * We can't blindly trust the Super-I/O space configuration bit,
+ * most BIOS won't set it properly
+ */
+ dev_dbg(&pdev->dev, "bios thermistors:%d\n", use_thermistors);
+ for (i = 11; i < data->innr; i++) {
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_TEMP_STATUS);
+ use_thermistors = use_thermistors || (reg & CHAN_ENA);
+ /* thermistors are temp[4-6], measured on vin[11-14] */
+ dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i-7, reg);
+ }
+ dev_dbg(&pdev->dev, "using thermistors:%d\n", use_thermistors);
+
+ i = use_thermistors ? 2 : 0;
+ for (; i < data->tempnr; i++) {
+ reg = pc87360_read_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS);
+ dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i + 1, reg);
+ if (init >= init_temp[i]) {
+ /* Forcibly enable temperature channel */
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling temp%d\n", i + 1);
+ pc87360_write_value(data, LD_TEMP, i,
+ PC87365_REG_TEMP_STATUS,
+ 0xCF);
+ }
+ }
+ }
+
+ if (use_thermistors) {
+ for (i = 11; i < data->innr; i++) {
+ if (init >= init_in[i]) {
+ /*
+ * The pin may already be used by thermal
+ * diodes
+ */
+ reg = pc87360_read_value(data, LD_TEMP,
+ (i - 11) / 2, PC87365_REG_TEMP_STATUS);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Skipping temp%d, pin already in use by temp%d\n",
+ i - 7, (i - 11) / 2);
+ continue;
+ }
+
+ /* Forcibly enable thermistor channel */
+ reg = pc87360_read_value(data, LD_IN, i,
+ PC87365_REG_IN_STATUS);
+ if (!(reg & CHAN_ENA)) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling temp%d\n",
+ i - 7);
+ pc87360_write_value(data, LD_IN, i,
+ PC87365_REG_TEMP_STATUS,
+ (reg & 0x60) | 0x8F);
+ }
+ }
+ }
+ }
+
+ if (data->innr) {
+ reg = pc87360_read_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONFIG);
+ dev_dbg(&pdev->dev, "bios vin-cfg:0x%02x\n", reg);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling monitoring (VLM)\n");
+ pc87360_write_value(data, LD_IN, NO_BANK,
+ PC87365_REG_IN_CONFIG,
+ reg & 0xFE);
+ }
+ }
+
+ if (data->tempnr) {
+ reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
+ PC87365_REG_TEMP_CONFIG);
+ dev_dbg(&pdev->dev, "bios temp-cfg:0x%02x\n", reg);
+ if (reg & CHAN_ENA) {
+ dev_dbg(&pdev->dev,
+ "Forcibly enabling monitoring (TMS)\n");
+ pc87360_write_value(data, LD_TEMP, NO_BANK,
+ PC87365_REG_TEMP_CONFIG,
+ reg & 0xFE);
+ }
+
+ if (init >= 2) {
+ /* Chip config as documented by National Semi. */
+ pc87360_write_value(data, LD_TEMP, 0xF, 0xA, 0x08);
+ /*
+ * We voluntarily omit the bank here, in case the
+ * sequence itself matters. It shouldn't be a problem,
+ * since nobody else is supposed to access the
+ * device at that point.
+ */
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xB, 0x04);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xC, 0x35);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xD, 0x05);
+ pc87360_write_value(data, LD_TEMP, NO_BANK, 0xE, 0x05);
+ }
+ }
+}
+
static int pc87360_probe(struct platform_device *pdev)
{
int i;
@@ -1239,7 +1481,7 @@ static int pc87360_probe(struct platform_device *pdev)
data->address[i] = extra_isa[i];
if (data->address[i]
&& !devm_request_region(dev, extra_isa[i], PC87360_EXTENT,
- pc87360_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(dev,
"Region 0x%x-0x%x already in use!\n",
extra_isa[i], extra_isa[i]+PC87360_EXTENT-1);
@@ -1355,330 +1597,105 @@ static int pc87360_remove(struct platform_device *pdev)
}
/*
- * ldi is the logical device index
- * bank is for voltages and temperatures only
+ * Driver data
*/
-static int pc87360_read_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg)
-{
- int res;
-
- mutex_lock(&(data->lock));
- if (bank != NO_BANK)
- outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
- res = inb_p(data->address[ldi] + reg);
- mutex_unlock(&(data->lock));
+static struct platform_driver pc87360_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = pc87360_probe,
+ .remove = pc87360_remove,
+};
- return res;
-}
+/*
+ * Device detection, registration and update
+ */
-static void pc87360_write_value(struct pc87360_data *data, u8 ldi, u8 bank,
- u8 reg, u8 value)
+static int __init pc87360_find(int sioaddr, u8 *devid,
+ unsigned short *addresses)
{
- mutex_lock(&(data->lock));
- if (bank != NO_BANK)
- outb_p(bank, data->address[ldi] + PC87365_REG_BANK);
- outb_p(value, data->address[ldi] + reg);
- mutex_unlock(&(data->lock));
-}
+ u16 val;
+ int i;
+ int nrdev; /* logical device count */
-/* (temp & vin) channel conversion status register flags (pdf sec.11.5.12) */
-#define CHAN_CNVRTD 0x80 /* new data ready */
-#define CHAN_ENA 0x01 /* enabled channel (temp or vin) */
-#define CHAN_ALM_ENA 0x10 /* propagate to alarms-reg ?? (chk val!) */
-#define CHAN_READY (CHAN_ENA|CHAN_CNVRTD) /* sample ready mask */
+ /* No superio_enter */
-#define TEMP_OTS_OE 0x20 /* OTS Output Enable */
-#define VIN_RW1C_MASK (CHAN_READY|CHAN_ALM_MAX|CHAN_ALM_MIN) /* 0x87 */
-#define TEMP_RW1C_MASK (VIN_RW1C_MASK|TEMP_ALM_CRIT|TEMP_FAULT) /* 0xCF */
+ /* Identify device */
+ val = force_id ? force_id : superio_inb(sioaddr, DEVID);
+ switch (val) {
+ case 0xE1: /* PC87360 */
+ case 0xE8: /* PC87363 */
+ case 0xE4: /* PC87364 */
+ nrdev = 1;
+ break;
+ case 0xE5: /* PC87365 */
+ case 0xE9: /* PC87366 */
+ nrdev = 3;
+ break;
+ default:
+ superio_exit(sioaddr);
+ return -ENODEV;
+ }
+ /* Remember the device id */
+ *devid = val;
-static void pc87360_init_device(struct platform_device *pdev,
- int use_thermistors)
-{
- struct pc87360_data *data = platform_get_drvdata(pdev);
- int i, nr;
- const u8 init_in[14] = { 2, 2, 2, 2, 2, 2, 2, 1, 1, 3, 1, 2, 2, 2 };
- const u8 init_temp[3] = { 2, 2, 1 };
- u8 reg;
+ for (i = 0; i < nrdev; i++) {
+ /* select logical device */
+ superio_outb(sioaddr, DEV, logdev[i]);
- if (init >= 2 && data->innr) {
- reg = pc87360_read_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONVRATE);
- dev_info(&pdev->dev,
- "VLM conversion set to 1s period, 160us delay\n");
- pc87360_write_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONVRATE,
- (reg & 0xC0) | 0x11);
- }
+ val = superio_inb(sioaddr, ACT);
+ if (!(val & 0x01)) {
+ pr_info("Device 0x%02x not activated\n", logdev[i]);
+ continue;
+ }
- nr = data->innr < 11 ? data->innr : 11;
- for (i = 0; i < nr; i++) {
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- dev_dbg(&pdev->dev, "bios in%d status:0x%02x\n", i, reg);
- if (init >= init_in[i]) {
- /* Forcibly enable voltage channel */
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev, "Forcibly enabling in%d\n",
- i);
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS,
- (reg & 0x68) | 0x87);
- }
+ val = (superio_inb(sioaddr, BASE) << 8)
+ | superio_inb(sioaddr, BASE + 1);
+ if (!val) {
+ pr_info("Base address not set for device 0x%02x\n",
+ logdev[i]);
+ continue;
}
- }
- /*
- * We can't blindly trust the Super-I/O space configuration bit,
- * most BIOS won't set it properly
- */
- dev_dbg(&pdev->dev, "bios thermistors:%d\n", use_thermistors);
- for (i = 11; i < data->innr; i++) {
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_TEMP_STATUS);
- use_thermistors = use_thermistors || (reg & CHAN_ENA);
- /* thermistors are temp[4-6], measured on vin[11-14] */
- dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i-7, reg);
- }
- dev_dbg(&pdev->dev, "using thermistors:%d\n", use_thermistors);
+ addresses[i] = val;
- i = use_thermistors ? 2 : 0;
- for (; i < data->tempnr; i++) {
- reg = pc87360_read_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS);
- dev_dbg(&pdev->dev, "bios temp%d_status:0x%02x\n", i + 1, reg);
- if (init >= init_temp[i]) {
- /* Forcibly enable temperature channel */
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling temp%d\n", i + 1);
- pc87360_write_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS,
- 0xCF);
- }
- }
- }
+ if (i == 0) { /* Fans */
+ confreg[0] = superio_inb(sioaddr, 0xF0);
+ confreg[1] = superio_inb(sioaddr, 0xF1);
- if (use_thermistors) {
- for (i = 11; i < data->innr; i++) {
- if (init >= init_in[i]) {
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+ (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+ (confreg[0] >> 4) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+ (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+ (confreg[0] >> 7) & 1);
+ pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+ confreg[1] & 1, (confreg[1] >> 1) & 1,
+ (confreg[1] >> 2) & 1);
+ } else if (i == 1) { /* Voltages */
+ /* Are we using thermistors? */
+ if (*devid == 0xE9) { /* PC87366 */
/*
- * The pin may already be used by thermal
- * diodes
+ * These registers are not logical-device
+ * specific, just that we won't need them if
+ * we don't use the VLM device
*/
- reg = pc87360_read_value(data, LD_TEMP,
- (i - 11) / 2, PC87365_REG_TEMP_STATUS);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Skipping temp%d, pin already in use by temp%d\n",
- i - 7, (i - 11) / 2);
- continue;
- }
+ confreg[2] = superio_inb(sioaddr, 0x2B);
+ confreg[3] = superio_inb(sioaddr, 0x25);
- /* Forcibly enable thermistor channel */
- reg = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- if (!(reg & CHAN_ENA)) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling temp%d\n",
- i - 7);
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_TEMP_STATUS,
- (reg & 0x60) | 0x8F);
+ if (confreg[2] & 0x40) {
+ pr_info("Using thermistors for temperature monitoring\n");
+ }
+ if (confreg[3] & 0xE0) {
+ pr_info("VID inputs routed (mode %u)\n",
+ confreg[3] >> 5);
}
}
}
}
- if (data->innr) {
- reg = pc87360_read_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONFIG);
- dev_dbg(&pdev->dev, "bios vin-cfg:0x%02x\n", reg);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling monitoring (VLM)\n");
- pc87360_write_value(data, LD_IN, NO_BANK,
- PC87365_REG_IN_CONFIG,
- reg & 0xFE);
- }
- }
-
- if (data->tempnr) {
- reg = pc87360_read_value(data, LD_TEMP, NO_BANK,
- PC87365_REG_TEMP_CONFIG);
- dev_dbg(&pdev->dev, "bios temp-cfg:0x%02x\n", reg);
- if (reg & CHAN_ENA) {
- dev_dbg(&pdev->dev,
- "Forcibly enabling monitoring (TMS)\n");
- pc87360_write_value(data, LD_TEMP, NO_BANK,
- PC87365_REG_TEMP_CONFIG,
- reg & 0xFE);
- }
-
- if (init >= 2) {
- /* Chip config as documented by National Semi. */
- pc87360_write_value(data, LD_TEMP, 0xF, 0xA, 0x08);
- /*
- * We voluntarily omit the bank here, in case the
- * sequence itself matters. It shouldn't be a problem,
- * since nobody else is supposed to access the
- * device at that point.
- */
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xB, 0x04);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xC, 0x35);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xD, 0x05);
- pc87360_write_value(data, LD_TEMP, NO_BANK, 0xE, 0x05);
- }
- }
-}
-
-static void pc87360_autodiv(struct device *dev, int nr)
-{
- struct pc87360_data *data = dev_get_drvdata(dev);
- u8 old_min = data->fan_min[nr];
-
- /* Increase clock divider if needed and possible */
- if ((data->fan_status[nr] & 0x04) /* overflow flag */
- || (data->fan[nr] >= 224)) { /* next to overflow */
- if ((data->fan_status[nr] & 0x60) != 0x60) {
- data->fan_status[nr] += 0x20;
- data->fan_min[nr] >>= 1;
- data->fan[nr] >>= 1;
- dev_dbg(dev,
- "Increasing clock divider to %d for fan %d\n",
- FAN_DIV_FROM_REG(data->fan_status[nr]), nr + 1);
- }
- } else {
- /* Decrease clock divider if possible */
- while (!(data->fan_min[nr] & 0x80) /* min "nails" divider */
- && data->fan[nr] < 85 /* bad accuracy */
- && (data->fan_status[nr] & 0x60) != 0x00) {
- data->fan_status[nr] -= 0x20;
- data->fan_min[nr] <<= 1;
- data->fan[nr] <<= 1;
- dev_dbg(dev,
- "Decreasing clock divider to %d for fan %d\n",
- FAN_DIV_FROM_REG(data->fan_status[nr]),
- nr + 1);
- }
- }
-
- /* Write new fan min if it changed */
- if (old_min != data->fan_min[nr]) {
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(nr),
- data->fan_min[nr]);
- }
-}
-
-static struct pc87360_data *pc87360_update_device(struct device *dev)
-{
- struct pc87360_data *data = dev_get_drvdata(dev);
- u8 i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
- dev_dbg(dev, "Data update\n");
-
- /* Fans */
- for (i = 0; i < data->fannr; i++) {
- if (FAN_CONFIG_MONITOR(data->fan_conf, i)) {
- data->fan_status[i] =
- pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_FAN_STATUS(i));
- data->fan[i] = pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_FAN(i));
- data->fan_min[i] = pc87360_read_value(data,
- LD_FAN, NO_BANK,
- PC87360_REG_FAN_MIN(i));
- /* Change clock divider if needed */
- pc87360_autodiv(dev, i);
- /* Clear bits and write new divider */
- pc87360_write_value(data, LD_FAN, NO_BANK,
- PC87360_REG_FAN_STATUS(i),
- data->fan_status[i]);
- }
- if (FAN_CONFIG_CONTROL(data->fan_conf, i))
- data->pwm[i] = pc87360_read_value(data, LD_FAN,
- NO_BANK, PC87360_REG_PWM(i));
- }
-
- /* Voltages */
- for (i = 0; i < data->innr; i++) {
- data->in_status[i] = pc87360_read_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS);
- /* Clear bits */
- pc87360_write_value(data, LD_IN, i,
- PC87365_REG_IN_STATUS,
- data->in_status[i]);
- if ((data->in_status[i] & CHAN_READY) == CHAN_READY) {
- data->in[i] = pc87360_read_value(data, LD_IN,
- i, PC87365_REG_IN);
- }
- if (data->in_status[i] & CHAN_ENA) {
- data->in_min[i] = pc87360_read_value(data,
- LD_IN, i,
- PC87365_REG_IN_MIN);
- data->in_max[i] = pc87360_read_value(data,
- LD_IN, i,
- PC87365_REG_IN_MAX);
- if (i >= 11)
- data->in_crit[i-11] =
- pc87360_read_value(data, LD_IN,
- i, PC87365_REG_TEMP_CRIT);
- }
- }
- if (data->innr) {
- data->in_alarms = pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_IN_ALARMS1)
- | ((pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_IN_ALARMS2)
- & 0x07) << 8);
- data->vid = (data->vid_conf & 0xE0) ?
- pc87360_read_value(data, LD_IN,
- NO_BANK, PC87365_REG_VID) : 0x1F;
- }
-
- /* Temperatures */
- for (i = 0; i < data->tempnr; i++) {
- data->temp_status[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_STATUS);
- /* Clear bits */
- pc87360_write_value(data, LD_TEMP, i,
- PC87365_REG_TEMP_STATUS,
- data->temp_status[i]);
- if ((data->temp_status[i] & CHAN_READY) == CHAN_READY) {
- data->temp[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP);
- }
- if (data->temp_status[i] & CHAN_ENA) {
- data->temp_min[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_MIN);
- data->temp_max[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_MAX);
- data->temp_crit[i] = pc87360_read_value(data,
- LD_TEMP, i,
- PC87365_REG_TEMP_CRIT);
- }
- }
- if (data->tempnr) {
- data->temp_alarms = pc87360_read_value(data, LD_TEMP,
- NO_BANK, PC87365_REG_TEMP_ALARMS)
- & 0x3F;
- }
-
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ superio_exit(sioaddr);
+ return 0;
}
static int __init pc87360_device_add(unsigned short address)
@@ -1777,10 +1794,10 @@ static void __exit pc87360_exit(void)
platform_driver_unregister(&pc87360_driver);
}
-
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("PC8736x hardware monitor");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
module_init(pc87360_init);
module_exit(pc87360_exit);
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index a97a51005c61..af9614e918a4 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -228,14 +228,13 @@ exit_sysfs_remove:
return err;
}
-static int pcf8591_remove(struct i2c_client *client)
+static void pcf8591_remove(struct i2c_client *client)
{
struct pcf8591_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
- return 0;
}
/* Called when we have found a new PCF8591. */
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 951e4a9ff2d6..89668af67206 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -397,6 +397,15 @@ config SENSORS_TPS53679
This driver can also be built as a module. If so, the module will
be called tps53679.
+config SENSORS_TPS546D24
+ tristate "TPS546D24"
+ help
+ If you say yes here you get hardware monitoring support for TEXAS
+ TPS546D24.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps546d24
+
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e2fe86f98965..0002dbe22d52 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
+obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
index 8ecd4adfef40..24e5194706cf 100644
--- a/drivers/hwmon/pmbus/mp2888.c
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -34,7 +34,7 @@ struct mp2888_data {
int curr_sense_gain;
};
-#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
+#define to_mp2888_data(x) container_of(x, struct mp2888_data, info)
static int mp2888_read_byte_data(struct i2c_client *client, int page, int reg)
{
@@ -109,7 +109,7 @@ mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page,
* - Kcs is the DrMOS current sense gain of power stage, which is obtained from the
* register MP2888_MFR_VR_CONFIG1, bits 13-12 with the following selection of DrMOS
* (data->curr_sense_gain):
- * 00b - 5µA/A, 01b - 8.5µA/A, 10b - 9.7µA/A, 11b - 10µA/A.
+ * 00b - 8.5µA/A, 01b - 9.7µA/A, 1b - 10µA/A, 11b - 5µA/A.
* - Rcs is the internal phase current sense resistor. This parameter depends on hardware
* assembly. By default it is set to 1kΩ. In case of different assembly, user should
* scale this parameter by dividing it by Rcs.
@@ -118,10 +118,9 @@ mp2888_read_phase(struct i2c_client *client, struct mp2888_data *data, int page,
* because sampling of current occurrence of bit weight has a big deviation, especially for
* light load.
*/
- ret = DIV_ROUND_CLOSEST(ret * 100 - 9800, data->curr_sense_gain);
- ret = (data->phase_curr_resolution) ? ret * 2 : ret;
+ ret = DIV_ROUND_CLOSEST(ret * 200 - 19600, data->curr_sense_gain);
/* Scale according to total current resolution. */
- ret = (data->total_curr_resolution) ? ret * 8 : ret * 4;
+ ret = (data->total_curr_resolution) ? ret * 2 : ret;
return ret;
}
@@ -212,7 +211,7 @@ static int mp2888_read_word_data(struct i2c_client *client, int page, int phase,
ret = pmbus_read_word_data(client, page, phase, reg);
if (ret < 0)
return ret;
- ret = data->total_curr_resolution ? ret * 2 : ret;
+ ret = data->total_curr_resolution ? ret : DIV_ROUND_CLOSEST(ret, 2);
break;
case PMBUS_POUT_OP_WARN_LIMIT:
ret = pmbus_read_word_data(client, page, phase, reg);
@@ -223,7 +222,7 @@ static int mp2888_read_word_data(struct i2c_client *client, int page, int phase,
* set 1. Actual power is reported with 0.5W or 1W respectively resolution. Scaling
* is needed to match both.
*/
- ret = data->total_curr_resolution ? ret * 4 : ret * 2;
+ ret = data->total_curr_resolution ? ret * 2 : ret;
break;
/*
* The below registers are not implemented by device or implemented not according to the
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f10bac8860fc..7ec04934747e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1270,9 +1270,9 @@ struct pmbus_thermal_data {
struct pmbus_sensor *sensor;
};
-static int pmbus_thermal_get_temp(void *data, int *temp)
+static int pmbus_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct pmbus_thermal_data *tdata = data;
+ struct pmbus_thermal_data *tdata = tz->devdata;
struct pmbus_sensor *sensor = tdata->sensor;
struct pmbus_data *pmbus_data = tdata->pmbus_data;
struct i2c_client *client = to_i2c_client(pmbus_data->dev);
@@ -1296,7 +1296,7 @@ static int pmbus_thermal_get_temp(void *data, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops pmbus_thermal_ops = {
+static const struct thermal_zone_device_ops pmbus_thermal_ops = {
.get_temp = pmbus_thermal_get_temp,
};
@@ -1314,8 +1314,8 @@ static int pmbus_thermal_add_sensor(struct pmbus_data *pmbus_data,
tdata->sensor = sensor;
tdata->pmbus_data = pmbus_data;
- tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
- &pmbus_thermal_ops);
+ tzd = devm_thermal_of_zone_register(dev, index, tdata,
+ &pmbus_thermal_ops);
/*
* If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
* so ignore that error but forward any other error.
@@ -2861,7 +2861,7 @@ static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_low[page]) {
+ if (data->vout_low[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MIN);
@@ -2887,7 +2887,7 @@ static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_high[page]) {
+ if (data->vout_high[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MAX);
@@ -3016,11 +3016,10 @@ static int pmbus_regulator_register(struct pmbus_data *data)
rdev = devm_regulator_register(dev, &info->reg_desc[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "Failed to register %s regulator\n",
- info->reg_desc[i].name);
- return PTR_ERR(rdev);
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ info->reg_desc[i].name);
}
return 0;
@@ -3320,6 +3319,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
struct pmbus_data *data;
size_t groups_num = 0;
int ret;
+ int i;
char *name;
if (!info)
@@ -3353,6 +3353,11 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->currpage = -1;
data->currphase = -1;
+ for (i = 0; i < ARRAY_SIZE(data->vout_low); i++) {
+ data->vout_low[i] = -1;
+ data->vout_high[i] = -1;
+ }
+
ret = pmbus_init_common(client, data, info);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pmbus/tps546d24.c b/drivers/hwmon/pmbus/tps546d24.c
new file mode 100644
index 000000000000..435f94304ad8
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps546d24.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for TEXAS TPS546D24 buck converter
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pmbus.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info tps546d24_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int tps546d24_probe(struct i2c_client *client)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (reg < 0)
+ return reg;
+
+ if (reg & 0x80) {
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, PMBUS_VOUT_MODE, reg & 0x7f);
+ if (err < 0)
+ return err;
+ }
+ return pmbus_do_probe(client, &tps546d24_info);
+}
+
+static const struct i2c_device_id tps546d24_id[] = {
+ {"tps546d24", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tps546d24_id);
+
+static const struct of_device_id __maybe_unused tps546d24_of_match[] = {
+ {.compatible = "ti,tps546d24"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps546d24_of_match);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver tps546d24_driver = {
+ .driver = {
+ .name = "tps546d24",
+ .of_match_table = of_match_ptr(tps546d24_of_match),
+ },
+ .probe_new = tps546d24_probe,
+ .id_table = tps546d24_id,
+};
+
+module_i2c_driver(tps546d24_driver);
+
+MODULE_AUTHOR("Duke Du <dukedu83@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for TI tps546d24");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 6c08551d8d14..dc3d9a22d917 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -28,11 +28,23 @@ struct pwm_fan_tach {
u8 pulses_per_revolution;
};
+enum pwm_fan_enable_mode {
+ pwm_off_reg_off,
+ pwm_disable_reg_enable,
+ pwm_enable_reg_enable,
+ pwm_disable_reg_disable,
+};
+
struct pwm_fan_ctx {
+ struct device *dev;
+
struct mutex lock;
struct pwm_device *pwm;
struct pwm_state pwm_state;
struct regulator *reg_en;
+ enum pwm_fan_enable_mode enable_mode;
+ bool regulator_enabled;
+ bool enabled;
int tach_count;
struct pwm_fan_tach *tachs;
@@ -82,25 +94,140 @@ static void sample_timer(struct timer_list *t)
mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
-static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+static void pwm_fan_enable_mode_2_state(int enable_mode,
+ struct pwm_state *state,
+ bool *enable_regulator)
+{
+ switch (enable_mode) {
+ case pwm_disable_reg_enable:
+ /* disable pwm, keep regulator enabled */
+ state->enabled = false;
+ *enable_regulator = true;
+ break;
+ case pwm_enable_reg_enable:
+ /* keep pwm and regulator enabled */
+ state->enabled = true;
+ *enable_regulator = true;
+ break;
+ case pwm_off_reg_off:
+ case pwm_disable_reg_disable:
+ /* disable pwm and regulator */
+ state->enabled = false;
+ *enable_regulator = false;
+ }
+}
+
+static int pwm_fan_switch_power(struct pwm_fan_ctx *ctx, bool on)
{
- unsigned long period;
int ret = 0;
+
+ if (!ctx->reg_en)
+ return ret;
+
+ if (!ctx->regulator_enabled && on) {
+ ret = regulator_enable(ctx->reg_en);
+ if (ret == 0)
+ ctx->regulator_enabled = true;
+ } else if (ctx->regulator_enabled && !on) {
+ ret = regulator_disable(ctx->reg_en);
+ if (ret == 0)
+ ctx->regulator_enabled = false;
+ }
+ return ret;
+}
+
+static int pwm_fan_power_on(struct pwm_fan_ctx *ctx)
+{
struct pwm_state *state = &ctx->pwm_state;
+ int ret;
- mutex_lock(&ctx->lock);
- if (ctx->pwm_value == pwm)
- goto exit_set_pwm_err;
+ if (ctx->enabled)
+ return 0;
+
+ ret = pwm_fan_switch_power(ctx, true);
+ if (ret < 0) {
+ dev_err(ctx->dev, "failed to enable power supply\n");
+ return ret;
+ }
+
+ state->enabled = true;
+ ret = pwm_apply_state(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable PWM\n");
+ goto disable_regulator;
+ }
+
+ ctx->enabled = true;
+
+ return 0;
+
+disable_regulator:
+ pwm_fan_switch_power(ctx, false);
+ return ret;
+}
- period = state->period;
- state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
- state->enabled = pwm ? true : false;
+static int pwm_fan_power_off(struct pwm_fan_ctx *ctx)
+{
+ struct pwm_state *state = &ctx->pwm_state;
+ bool enable_regulator = false;
+ int ret;
+ if (!ctx->enabled)
+ return 0;
+
+ pwm_fan_enable_mode_2_state(ctx->enable_mode,
+ state,
+ &enable_regulator);
+
+ state->enabled = false;
+ state->duty_cycle = 0;
ret = pwm_apply_state(ctx->pwm, state);
+ if (ret) {
+ dev_err(ctx->dev, "failed to disable PWM\n");
+ return ret;
+ }
+
+ pwm_fan_switch_power(ctx, enable_regulator);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+{
+ struct pwm_state *state = &ctx->pwm_state;
+ unsigned long period;
+ int ret = 0;
+
+ if (pwm > 0) {
+ if (ctx->enable_mode == pwm_off_reg_off)
+ /* pwm-fan hard disabled */
+ return 0;
+
+ period = state->period;
+ state->duty_cycle = DIV_ROUND_UP(pwm * (period - 1), MAX_PWM);
+ ret = pwm_apply_state(ctx->pwm, state);
+ if (ret)
+ return ret;
+ ret = pwm_fan_power_on(ctx);
+ } else {
+ ret = pwm_fan_power_off(ctx);
+ }
if (!ret)
ctx->pwm_value = pwm;
-exit_set_pwm_err:
+
+ return ret;
+}
+
+static int set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
+{
+ int ret;
+
+ mutex_lock(&ctx->lock);
+ ret = __set_pwm(ctx, pwm);
mutex_unlock(&ctx->lock);
+
return ret;
}
@@ -115,20 +242,76 @@ static void pwm_fan_update_state(struct pwm_fan_ctx *ctx, unsigned long pwm)
ctx->pwm_fan_state = i;
}
+static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val)
+{
+ int ret = 0;
+ int old_val;
+
+ mutex_lock(&ctx->lock);
+
+ if (ctx->enable_mode == val)
+ goto out;
+
+ old_val = ctx->enable_mode;
+ ctx->enable_mode = val;
+
+ if (val == 0) {
+ /* Disable pwm-fan unconditionally */
+ ret = __set_pwm(ctx, 0);
+ if (ret)
+ ctx->enable_mode = old_val;
+ pwm_fan_update_state(ctx, 0);
+ } else {
+ /*
+ * Change PWM and/or regulator state if currently disabled
+ * Nothing to do if currently enabled
+ */
+ if (!ctx->enabled) {
+ struct pwm_state *state = &ctx->pwm_state;
+ bool enable_regulator = false;
+
+ state->duty_cycle = 0;
+ pwm_fan_enable_mode_2_state(val,
+ state,
+ &enable_regulator);
+
+ pwm_apply_state(ctx->pwm, state);
+ pwm_fan_switch_power(ctx, enable_regulator);
+ pwm_fan_update_state(ctx, 0);
+ }
+ }
+out:
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
static int pwm_fan_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
int ret;
- if (val < 0 || val > MAX_PWM)
- return -EINVAL;
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val < 0 || val > MAX_PWM)
+ return -EINVAL;
+ ret = set_pwm(ctx, val);
+ if (ret)
+ return ret;
+ pwm_fan_update_state(ctx, val);
+ break;
+ case hwmon_pwm_enable:
+ if (val < 0 || val > 3)
+ ret = -EINVAL;
+ else
+ ret = pwm_fan_update_enable(ctx, val);
- ret = __set_pwm(ctx, val);
- if (ret)
return ret;
+ default:
+ return -EOPNOTSUPP;
+ }
- pwm_fan_update_state(ctx, val);
return 0;
}
@@ -139,9 +322,15 @@ static int pwm_fan_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_pwm:
- *val = ctx->pwm_value;
- return 0;
-
+ switch (attr) {
+ case hwmon_pwm_input:
+ *val = ctx->pwm_value;
+ return 0;
+ case hwmon_pwm_enable:
+ *val = ctx->enable_mode;
+ return 0;
+ }
+ return -EOPNOTSUPP;
case hwmon_fan:
*val = ctx->tachs[channel].rpm;
return 0;
@@ -212,7 +401,7 @@ pwm_fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
if (state == ctx->pwm_fan_state)
return 0;
- ret = __set_pwm(ctx, ctx->pwm_fan_cooling_levels[state]);
+ ret = set_pwm(ctx, ctx->pwm_fan_cooling_levels[state]);
if (ret) {
dev_err(&cdev->device, "Cannot set pwm!\n");
return ret;
@@ -270,18 +459,14 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
-static void pwm_fan_regulator_disable(void *data)
-{
- regulator_disable(data);
-}
-
-static void pwm_fan_pwm_disable(void *__ctx)
+static void pwm_fan_cleanup(void *__ctx)
{
struct pwm_fan_ctx *ctx = __ctx;
- ctx->pwm_state.enabled = false;
- pwm_apply_state(ctx->pwm, &ctx->pwm_state);
del_timer_sync(&ctx->rpm_timer);
+ /* Switch off everything */
+ ctx->enable_mode = pwm_disable_reg_disable;
+ pwm_fan_power_off(ctx);
}
static int pwm_fan_probe(struct platform_device *pdev)
@@ -302,7 +487,8 @@ static int pwm_fan_probe(struct platform_device *pdev)
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
+ ctx->dev = &pdev->dev;
+ ctx->pwm = devm_pwm_get(dev, NULL);
if (IS_ERR(ctx->pwm))
return dev_err_probe(dev, PTR_ERR(ctx->pwm), "Could not get PWM\n");
@@ -314,22 +500,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
return PTR_ERR(ctx->reg_en);
ctx->reg_en = NULL;
- } else {
- ret = regulator_enable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to enable fan supply: %d\n", ret);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
- ctx->reg_en);
- if (ret)
- return ret;
}
pwm_init_state(ctx->pwm, &ctx->pwm_state);
/*
- * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
* long. Check this here to prevent the fan running at a too low
* frequency.
*/
@@ -338,14 +514,19 @@ static int pwm_fan_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* Set duty cycle to maximum allowed and enable PWM output */
- ret = __set_pwm(ctx, MAX_PWM);
+ ctx->enable_mode = pwm_disable_reg_enable;
+
+ /*
+ * Set duty cycle to maximum allowed and enable PWM output as well as
+ * the regulator. In case of error nothing is changed
+ */
+ ret = set_pwm(ctx, MAX_PWM);
if (ret) {
dev_err(dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
timer_setup(&ctx->rpm_timer, sample_timer, 0);
- ret = devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+ ret = devm_add_action_or_reset(dev, pwm_fan_cleanup, ctx);
if (ret)
return ret;
@@ -377,7 +558,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (!channels)
return -ENOMEM;
- channels[0] = HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT);
+ channels[0] = HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE);
for (i = 0; i < ctx->tach_count; i++) {
struct pwm_fan_tach *tach = &ctx->tachs[i];
@@ -451,65 +632,28 @@ static int pwm_fan_probe(struct platform_device *pdev)
return 0;
}
-static int pwm_fan_disable(struct device *dev)
-{
- struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- int ret;
-
- if (ctx->pwm_value) {
- /* keep ctx->pwm_state unmodified for pwm_fan_resume() */
- struct pwm_state state = ctx->pwm_state;
-
- state.duty_cycle = 0;
- state.enabled = false;
- ret = pwm_apply_state(ctx->pwm, &state);
- if (ret < 0)
- return ret;
- }
-
- if (ctx->reg_en) {
- ret = regulator_disable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to disable fan supply: %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static void pwm_fan_shutdown(struct platform_device *pdev)
{
- pwm_fan_disable(&pdev->dev);
+ struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
+
+ pwm_fan_cleanup(ctx);
}
-#ifdef CONFIG_PM_SLEEP
static int pwm_fan_suspend(struct device *dev)
{
- return pwm_fan_disable(dev);
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return pwm_fan_power_off(ctx);
}
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
- int ret;
-
- if (ctx->reg_en) {
- ret = regulator_enable(ctx->reg_en);
- if (ret) {
- dev_err(dev, "Failed to enable fan supply: %d\n", ret);
- return ret;
- }
- }
-
- if (ctx->pwm_value == 0)
- return 0;
- return pwm_apply_state(ctx->pwm, &ctx->pwm_state);
+ return set_pwm(ctx, ctx->pwm_value);
}
-#endif
-static SIMPLE_DEV_PM_OPS(pwm_fan_pm, pwm_fan_suspend, pwm_fan_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_fan_pm, pwm_fan_suspend, pwm_fan_resume);
static const struct of_device_id of_pwm_fan_match[] = {
{ .compatible = "pwm-fan", },
@@ -522,7 +666,7 @@ static struct platform_driver pwm_fan_driver = {
.shutdown = pwm_fan_shutdown,
.driver = {
.name = "pwm-fan",
- .pm = &pwm_fan_pm,
+ .pm = pm_sleep_ptr(&pwm_fan_pm),
.of_match_table = of_pwm_fan_match,
},
};
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 5187c6dd5a4f..4d75385f7d5e 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -62,9 +62,9 @@ static void scpi_scale_reading(u64 *value, struct sensor_data *sensor)
}
}
-static int scpi_read_temp(void *dev, int *temp)
+static int scpi_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct scpi_thermal_zone *zone = dev;
+ struct scpi_thermal_zone *zone = tz->devdata;
struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
@@ -121,7 +121,7 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", sensor->info.name);
}
-static const struct thermal_zone_of_device_ops scpi_sensor_ops = {
+static const struct thermal_zone_device_ops scpi_sensor_ops = {
.get_temp = scpi_read_temp,
};
@@ -275,10 +275,10 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
zone->sensor_id = i;
zone->scpi_sensors = scpi_sensors;
- z = devm_thermal_zone_of_sensor_register(dev,
- sensor->info.sensor_id,
- zone,
- &scpi_sensor_ops);
+ z = devm_thermal_of_zone_register(dev,
+ sensor->info.sensor_id,
+ zone,
+ &scpi_sensor_ops);
/*
* The call to thermal_zone_of_sensor_register returns
* an error for sensors that are not associated with
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index c19df3ade48e..13ac2d8f22c7 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -129,7 +129,7 @@ unlock:
static ssize_t sht4x_interval_write(struct sht4x_data *data, long val)
{
- data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, UINT_MAX);
+ data->update_interval = clamp_val(val, SHT4X_MIN_POLL_INTERVAL, INT_MAX);
return 0;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 018cb5a7651f..b0b05fd12221 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -37,6 +37,7 @@
* 735 0008 0735
*/
+#define DRIVER_NAME "sis5595"
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
@@ -191,21 +192,75 @@ struct sis5595_data {
static struct pci_dev *s_bridge; /* pointer to the (only) sis5595 */
-static int sis5595_probe(struct platform_device *pdev);
-static int sis5595_remove(struct platform_device *pdev);
+/* ISA access must be locked explicitly. */
+static int sis5595_read_value(struct sis5595_data *data, u8 reg)
+{
+ int res;
-static int sis5595_read_value(struct sis5595_data *data, u8 reg);
-static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value);
-static struct sis5595_data *sis5595_update_device(struct device *dev);
-static void sis5595_init_device(struct sis5595_data *data);
+ mutex_lock(&data->lock);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
+ mutex_unlock(&data->lock);
+ return res;
+}
-static struct platform_driver sis5595_driver = {
- .driver = {
- .name = "sis5595",
- },
- .probe = sis5595_probe,
- .remove = sis5595_remove,
-};
+static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
+{
+ mutex_lock(&data->lock);
+ outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
+ mutex_unlock(&data->lock);
+}
+
+static struct sis5595_data *sis5595_update_device(struct device *dev)
+{
+ struct sis5595_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+
+ for (i = 0; i <= data->maxins; i++) {
+ data->in[i] =
+ sis5595_read_value(data, SIS5595_REG_IN(i));
+ data->in_min[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_IN_MIN(i));
+ data->in_max[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_IN_MAX(i));
+ }
+ for (i = 0; i < 2; i++) {
+ data->fan[i] =
+ sis5595_read_value(data, SIS5595_REG_FAN(i));
+ data->fan_min[i] =
+ sis5595_read_value(data,
+ SIS5595_REG_FAN_MIN(i));
+ }
+ if (data->maxins == 3) {
+ data->temp =
+ sis5595_read_value(data, SIS5595_REG_TEMP);
+ data->temp_over =
+ sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
+ data->temp_hyst =
+ sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
+ }
+ i = sis5595_read_value(data, SIS5595_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
+ data->alarms =
+ sis5595_read_value(data, SIS5595_REG_ALARM1) |
+ (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
/* 4 Voltages */
static ssize_t in_show(struct device *dev, struct device_attribute *da,
@@ -568,6 +623,15 @@ static const struct attribute_group sis5595_group_temp1 = {
.attrs = sis5595_attributes_temp1,
};
+/* Called when we have found a new SIS5595. */
+static void sis5595_init_device(struct sis5595_data *data)
+{
+ u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
+ if (!(config & 0x01))
+ sis5595_write_value(data, SIS5595_REG_CONFIG,
+ (config & 0xf7) | 0x01);
+}
+
/* This is called when the module is loaded */
static int sis5595_probe(struct platform_device *pdev)
{
@@ -580,7 +644,7 @@ static int sis5595_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, SIS5595_EXTENT,
- sis5595_driver.driver.name))
+ DRIVER_NAME))
return -EBUSY;
data = devm_kzalloc(&pdev->dev, sizeof(struct sis5595_data),
@@ -591,7 +655,7 @@ static int sis5595_probe(struct platform_device *pdev)
mutex_init(&data->lock);
mutex_init(&data->update_lock);
data->addr = res->start;
- data->name = "sis5595";
+ data->name = DRIVER_NAME;
platform_set_drvdata(pdev, data);
/*
@@ -657,85 +721,6 @@ static int sis5595_remove(struct platform_device *pdev)
return 0;
}
-/* ISA access must be locked explicitly. */
-static int sis5595_read_value(struct sis5595_data *data, u8 reg)
-{
- int res;
-
- mutex_lock(&data->lock);
- outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
- res = inb_p(data->addr + SIS5595_DATA_REG_OFFSET);
- mutex_unlock(&data->lock);
- return res;
-}
-
-static void sis5595_write_value(struct sis5595_data *data, u8 reg, u8 value)
-{
- mutex_lock(&data->lock);
- outb_p(reg, data->addr + SIS5595_ADDR_REG_OFFSET);
- outb_p(value, data->addr + SIS5595_DATA_REG_OFFSET);
- mutex_unlock(&data->lock);
-}
-
-/* Called when we have found a new SIS5595. */
-static void sis5595_init_device(struct sis5595_data *data)
-{
- u8 config = sis5595_read_value(data, SIS5595_REG_CONFIG);
- if (!(config & 0x01))
- sis5595_write_value(data, SIS5595_REG_CONFIG,
- (config & 0xf7) | 0x01);
-}
-
-static struct sis5595_data *sis5595_update_device(struct device *dev)
-{
- struct sis5595_data *data = dev_get_drvdata(dev);
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
-
- for (i = 0; i <= data->maxins; i++) {
- data->in[i] =
- sis5595_read_value(data, SIS5595_REG_IN(i));
- data->in_min[i] =
- sis5595_read_value(data,
- SIS5595_REG_IN_MIN(i));
- data->in_max[i] =
- sis5595_read_value(data,
- SIS5595_REG_IN_MAX(i));
- }
- for (i = 0; i < 2; i++) {
- data->fan[i] =
- sis5595_read_value(data, SIS5595_REG_FAN(i));
- data->fan_min[i] =
- sis5595_read_value(data,
- SIS5595_REG_FAN_MIN(i));
- }
- if (data->maxins == 3) {
- data->temp =
- sis5595_read_value(data, SIS5595_REG_TEMP);
- data->temp_over =
- sis5595_read_value(data, SIS5595_REG_TEMP_OVER);
- data->temp_hyst =
- sis5595_read_value(data, SIS5595_REG_TEMP_HYST);
- }
- i = sis5595_read_value(data, SIS5595_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
- data->alarms =
- sis5595_read_value(data, SIS5595_REG_ALARM1) |
- (sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
-
static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
@@ -764,7 +749,7 @@ static int sis5595_device_add(unsigned short address)
struct resource res = {
.start = address,
.end = address + SIS5595_EXTENT - 1,
- .name = "sis5595",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -773,7 +758,7 @@ static int sis5595_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("sis5595", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -800,6 +785,14 @@ exit:
return err;
}
+static struct platform_driver sis5595_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = sis5595_probe,
+ .remove = sis5595_remove,
+};
+
static int sis5595_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
@@ -886,7 +879,7 @@ exit:
}
static struct pci_driver sis5595_pci_driver = {
- .name = "sis5595",
+ .name = DRIVER_NAME,
.id_table = sis5595_pci_ids,
.probe = sis5595_pci_probe,
};
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index 8c4ed72e5d68..c36bdbe423de 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -671,12 +671,11 @@ out_unregister:
return ret;
}
-static int smm665_remove(struct i2c_client *client)
+static void smm665_remove(struct i2c_client *client)
{
struct smm665_data *data = i2c_get_clientdata(client);
i2c_unregister_device(data->cmdreg);
- return 0;
}
static const struct i2c_device_id smm665_id[] = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index a5db15c087ae..70d2152234e2 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -582,7 +582,7 @@ static int smsc47m192_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE);
+ strscpy(info->type, "smsc47m192", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/sparx5-temp.c b/drivers/hwmon/sparx5-temp.c
index 98be48e3a22a..04fd8505e5d6 100644
--- a/drivers/hwmon/sparx5-temp.c
+++ b/drivers/hwmon/sparx5-temp.c
@@ -26,13 +26,6 @@ struct s5_hwmon {
struct clk *clk;
};
-static void s5_temp_clk_disable(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static void s5_temp_enable(struct s5_hwmon *hwmon)
{
u32 val = readl(hwmon->base + TEMP_CFG);
@@ -113,7 +106,6 @@ static int s5_temp_probe(struct platform_device *pdev)
{
struct device *hwmon_dev;
struct s5_hwmon *hwmon;
- int ret;
hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -123,19 +115,10 @@ static int s5_temp_probe(struct platform_device *pdev)
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
- hwmon->clk = devm_clk_get(&pdev->dev, NULL);
+ hwmon->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(hwmon->clk))
return PTR_ERR(hwmon->clk);
- ret = clk_prepare_enable(hwmon->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, s5_temp_clk_disable,
- hwmon->clk);
- if (ret)
- return ret;
-
s5_temp_enable(hwmon);
hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 0ed28408aa07..2f67c6747ead 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -692,7 +692,7 @@ static int stts751_detect(struct i2c_client *new_client,
}
dev_dbg(&new_client->dev, "Chip %s detected", name);
- strlcpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
+ strscpy(info->type, stts751_id[0].name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 6a804f5036f4..81cdb012993c 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -352,7 +352,7 @@ static int thmc50_detect(struct i2c_client *client,
pr_debug("thmc50: Detected %s (version %x, revision %x)\n",
type_name, (revision >> 4) - 0xc, revision & 0xf);
- strlcpy(info->type, type_name, I2C_NAME_SIZE);
+ strscpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index e867a0c2e539..2bf496a62206 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -260,7 +260,6 @@ static int tmp102_probe(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int tmp102_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -283,9 +282,8 @@ static int tmp102_resume(struct device *dev)
return err;
}
-#endif /* CONFIG_PM */
-static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
static const struct i2c_device_id tmp102_id[] = {
{ "tmp102", 0 },
@@ -302,7 +300,7 @@ MODULE_DEVICE_TABLE(of, tmp102_of_match);
static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = of_match_ptr(tmp102_of_match),
- .driver.pm = &tmp102_dev_pm_ops,
+ .driver.pm = pm_sleep_ptr(&tmp102_dev_pm_ops),
.probe_new = tmp102_probe,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index 5cab4436aa77..56d5cbf36a45 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -178,7 +178,7 @@ static int tmp103_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused tmp103_suspend(struct device *dev)
+static int tmp103_suspend(struct device *dev)
{
struct regmap *regmap = dev_get_drvdata(dev);
@@ -186,7 +186,7 @@ static int __maybe_unused tmp103_suspend(struct device *dev)
TMP103_CONF_SD_MASK, 0);
}
-static int __maybe_unused tmp103_resume(struct device *dev)
+static int tmp103_resume(struct device *dev)
{
struct regmap *regmap = dev_get_drvdata(dev);
@@ -194,7 +194,7 @@ static int __maybe_unused tmp103_resume(struct device *dev)
TMP103_CONF_SD_MASK, TMP103_CONF_SD);
}
-static SIMPLE_DEV_PM_OPS(tmp103_dev_pm_ops, tmp103_suspend, tmp103_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp103_dev_pm_ops, tmp103_suspend, tmp103_resume);
static const struct i2c_device_id tmp103_id[] = {
{ "tmp103", 0 },
@@ -212,7 +212,7 @@ static struct i2c_driver tmp103_driver = {
.driver = {
.name = "tmp103",
.of_match_table = of_match_ptr(tmp103_of_match),
- .pm = &tmp103_dev_pm_ops,
+ .pm = pm_sleep_ptr(&tmp103_dev_pm_ops),
},
.probe_new = tmp103_probe,
.id_table = tmp103_id,
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 5435664c3f6e..acb4ba750b09 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -390,7 +390,7 @@ static int tmp108_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused tmp108_suspend(struct device *dev)
+static int tmp108_suspend(struct device *dev)
{
struct tmp108 *tmp108 = dev_get_drvdata(dev);
@@ -398,7 +398,7 @@ static int __maybe_unused tmp108_suspend(struct device *dev)
TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
}
-static int __maybe_unused tmp108_resume(struct device *dev)
+static int tmp108_resume(struct device *dev)
{
struct tmp108 *tmp108 = dev_get_drvdata(dev);
int err;
@@ -410,7 +410,7 @@ static int __maybe_unused tmp108_resume(struct device *dev)
return err;
}
-static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
static const struct i2c_device_id tmp108_i2c_ids[] = {
{ "tmp108", 0 },
@@ -429,7 +429,7 @@ MODULE_DEVICE_TABLE(of, tmp108_of_ids);
static struct i2c_driver tmp108_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &tmp108_dev_pm_ops,
+ .pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
.of_match_table = of_match_ptr(tmp108_of_ids),
},
.probe_new = tmp108_probe,
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index cc0a1c219b1f..f358ba679626 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -671,7 +671,7 @@ static int tmp401_detect(struct i2c_client *client,
if (reg > 15)
return -ENODEV;
- strlcpy(info->type, tmp401_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, tmp401_id[kind].name, I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 1fd8d41d90c8..45fd7fb5ee01 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -353,7 +353,7 @@ static int tmp421_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
+ strscpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n",
names[kind], client->addr);
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 42762e87b014..68c77c493270 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -372,29 +372,12 @@ static const struct hwmon_chip_info tps23861_chip_info = {
.info = tps23861_info,
};
-static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
+static char *port_operating_mode_string(uint8_t mode_reg, unsigned int port)
{
- unsigned int regval;
- int mode;
-
- regmap_read(data->regmap, OPERATING_MODE, &regval);
+ unsigned int mode = ~0;
- switch (port) {
- case 1:
- mode = FIELD_GET(OPERATING_MODE_PORT_1_MASK, regval);
- break;
- case 2:
- mode = FIELD_GET(OPERATING_MODE_PORT_2_MASK, regval);
- break;
- case 3:
- mode = FIELD_GET(OPERATING_MODE_PORT_3_MASK, regval);
- break;
- case 4:
- mode = FIELD_GET(OPERATING_MODE_PORT_4_MASK, regval);
- break;
- default:
- mode = -EINVAL;
- }
+ if (port < TPS23861_NUM_PORTS)
+ mode = (mode_reg >> (2 * port)) & OPERATING_MODE_PORT_1_MASK;
switch (mode) {
case OPERATING_MODE_OFF:
@@ -410,15 +393,9 @@ static char *tps23861_port_operating_mode(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
+static char *port_detect_status_string(uint8_t status_reg)
{
- unsigned int regval;
-
- regmap_read(data->regmap,
- PORT_1_STATUS + (port - 1),
- &regval);
-
- switch (FIELD_GET(PORT_STATUS_DETECT_MASK, regval)) {
+ switch (FIELD_GET(PORT_STATUS_DETECT_MASK, status_reg)) {
case PORT_DETECT_UNKNOWN:
return "Unknown device";
case PORT_DETECT_SHORT:
@@ -448,15 +425,9 @@ static char *tps23861_port_detect_status(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_class_status(struct tps23861_data *data, int port)
+static char *port_class_status_string(uint8_t status_reg)
{
- unsigned int regval;
-
- regmap_read(data->regmap,
- PORT_1_STATUS + (port - 1),
- &regval);
-
- switch (FIELD_GET(PORT_STATUS_CLASS_MASK, regval)) {
+ switch (FIELD_GET(PORT_STATUS_CLASS_MASK, status_reg)) {
case PORT_CLASS_UNKNOWN:
return "Unknown";
case PORT_CLASS_RESERVED:
@@ -479,32 +450,27 @@ static char *tps23861_port_class_status(struct tps23861_data *data, int port)
}
}
-static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
+static char *port_poe_plus_status_string(uint8_t poe_plus, unsigned int port)
{
- unsigned int regval;
-
- regmap_read(data->regmap, POE_PLUS, &regval);
-
- if (BIT(port + 3) & regval)
- return "Yes";
- else
- return "No";
+ return (BIT(port + 4) & poe_plus) ? "Yes" : "No";
}
static int tps23861_port_resistance(struct tps23861_data *data, int port)
{
- u16 regval;
+ unsigned int raw_val;
+ __le16 regval;
regmap_bulk_read(data->regmap,
- PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
+ PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * port,
&regval,
2);
- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ raw_val = le16_to_cpu(regval);
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
case PORT_RESISTANCE_RSN_OTHER:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
case PORT_RESISTANCE_RSN_LOW:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
case PORT_RESISTANCE_RSN_SHORT:
case PORT_RESISTANCE_RSN_OPEN:
default:
@@ -515,14 +481,19 @@ static int tps23861_port_resistance(struct tps23861_data *data, int port)
static int tps23861_port_status_show(struct seq_file *s, void *data)
{
struct tps23861_data *priv = s->private;
- int i;
-
- for (i = 1; i < TPS23861_NUM_PORTS + 1; i++) {
- seq_printf(s, "Port: \t\t%d\n", i);
- seq_printf(s, "Operating mode: %s\n", tps23861_port_operating_mode(priv, i));
- seq_printf(s, "Detected: \t%s\n", tps23861_port_detect_status(priv, i));
- seq_printf(s, "Class: \t\t%s\n", tps23861_port_class_status(priv, i));
- seq_printf(s, "PoE Plus: \t%s\n", tps23861_port_poe_plus_status(priv, i));
+ unsigned int i, mode, poe_plus, status;
+
+ regmap_read(priv->regmap, OPERATING_MODE, &mode);
+ regmap_read(priv->regmap, POE_PLUS, &poe_plus);
+
+ for (i = 0; i < TPS23861_NUM_PORTS; i++) {
+ regmap_read(priv->regmap, PORT_1_STATUS + i, &status);
+
+ seq_printf(s, "Port: \t\t%d\n", i + 1);
+ seq_printf(s, "Operating mode: %s\n", port_operating_mode_string(mode, i));
+ seq_printf(s, "Detected: \t%s\n", port_detect_status_string(status));
+ seq_printf(s, "Class: \t\t%s\n", port_class_status_string(status));
+ seq_printf(s, "PoE Plus: \t%s\n", port_poe_plus_status_string(poe_plus, i));
seq_printf(s, "Resistance: \t%d\n", tps23861_port_resistance(priv, i));
seq_putc(s, '\n');
}
@@ -532,9 +503,17 @@ static int tps23861_port_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
-static void tps23861_init_debugfs(struct tps23861_data *data)
+static void tps23861_init_debugfs(struct tps23861_data *data,
+ struct device *hwmon_dev)
{
- data->debugfs_dir = debugfs_create_dir(data->client->name, NULL);
+ const char *debugfs_name;
+
+ debugfs_name = devm_kasprintf(&data->client->dev, GFP_KERNEL, "%s-%s",
+ data->client->name, dev_name(hwmon_dev));
+ if (!debugfs_name)
+ return;
+
+ data->debugfs_dir = debugfs_create_dir(debugfs_name, NULL);
debugfs_create_file("port_status",
0400,
@@ -583,18 +562,16 @@ static int tps23861_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- tps23861_init_debugfs(data);
+ tps23861_init_debugfs(data, hwmon_dev);
return 0;
}
-static int tps23861_remove(struct i2c_client *client)
+static void tps23861_remove(struct i2c_client *client)
{
struct tps23861_data *data = i2c_get_clientdata(client);
debugfs_remove_recursive(data->debugfs_dir);
-
- return 0;
}
static const struct of_device_id __maybe_unused tps23861_of_match[] = {
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 55634110c2f9..37d7374896f6 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -34,6 +34,8 @@
#include <linux/acpi.h>
#include <linux/io.h>
+#define DRIVER_NAME "via686a"
+
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -321,9 +323,6 @@ struct via686a_data {
static struct pci_dev *s_bridge; /* pointer to the (only) via686a */
-static int via686a_probe(struct platform_device *pdev);
-static int via686a_remove(struct platform_device *pdev);
-
static inline int via686a_read_value(struct via686a_data *data, u8 reg)
{
return inb_p(data->addr + reg);
@@ -335,8 +334,76 @@ static inline void via686a_write_value(struct via686a_data *data, u8 reg,
outb_p(value, data->addr + reg);
}
-static struct via686a_data *via686a_update_device(struct device *dev);
-static void via686a_init_device(struct via686a_data *data);
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+ int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = reg >> 6;
+}
+
+static struct via686a_data *via686a_update_device(struct device *dev)
+{
+ struct via686a_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i <= 4; i++) {
+ data->in[i] =
+ via686a_read_value(data, VIA686A_REG_IN(i));
+ data->in_min[i] = via686a_read_value(data,
+ VIA686A_REG_IN_MIN
+ (i));
+ data->in_max[i] =
+ via686a_read_value(data, VIA686A_REG_IN_MAX(i));
+ }
+ for (i = 1; i <= 2; i++) {
+ data->fan[i - 1] =
+ via686a_read_value(data, VIA686A_REG_FAN(i));
+ data->fan_min[i - 1] = via686a_read_value(data,
+ VIA686A_REG_FAN_MIN(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ data->temp[i] = via686a_read_value(data,
+ VIA686A_REG_TEMP[i]) << 2;
+ data->temp_over[i] =
+ via686a_read_value(data,
+ VIA686A_REG_TEMP_OVER[i]);
+ data->temp_hyst[i] =
+ via686a_read_value(data,
+ VIA686A_REG_TEMP_HYST[i]);
+ }
+ /*
+ * add in lower 2 bits
+ * temp1 uses bits 7-6 of VIA686A_REG_TEMP_LOW1
+ * temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
+ * temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
+ */
+ data->temp[0] |= (via686a_read_value(data,
+ VIA686A_REG_TEMP_LOW1)
+ & 0xc0) >> 6;
+ data->temp[1] |=
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
+ 0x30) >> 4;
+ data->temp[2] |=
+ (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
+ 0xc0) >> 6;
+
+ via686a_update_fan_div(data);
+ data->alarms =
+ via686a_read_value(data,
+ VIA686A_REG_ALARM1) |
+ (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
/* following are the sysfs callback functions */
@@ -654,13 +721,23 @@ static const struct attribute_group via686a_group = {
.attrs = via686a_attributes,
};
-static struct platform_driver via686a_driver = {
- .driver = {
- .name = "via686a",
- },
- .probe = via686a_probe,
- .remove = via686a_remove,
-};
+static void via686a_init_device(struct via686a_data *data)
+{
+ u8 reg;
+
+ /* Start monitoring */
+ reg = via686a_read_value(data, VIA686A_REG_CONFIG);
+ via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
+
+ /* Configure temp interrupt mode for continuous-interrupt operation */
+ reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
+ via686a_write_value(data, VIA686A_REG_TEMP_MODE,
+ (reg & ~VIA686A_TEMP_MODE_MASK)
+ | VIA686A_TEMP_MODE_CONTINUOUS);
+
+ /* Pre-read fan clock divisor values */
+ via686a_update_fan_div(data);
+}
/* This is called when the module is loaded */
static int via686a_probe(struct platform_device *pdev)
@@ -672,7 +749,7 @@ static int via686a_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, VIA686A_EXTENT,
- via686a_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
@@ -685,7 +762,7 @@ static int via686a_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->addr = res->start;
- data->name = "via686a";
+ data->name = DRIVER_NAME;
mutex_init(&data->update_lock);
/* Initialize the VIA686A chip */
@@ -719,94 +796,13 @@ static int via686a_remove(struct platform_device *pdev)
return 0;
}
-static void via686a_update_fan_div(struct via686a_data *data)
-{
- int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
- data->fan_div[0] = (reg >> 4) & 0x03;
- data->fan_div[1] = reg >> 6;
-}
-
-static void via686a_init_device(struct via686a_data *data)
-{
- u8 reg;
-
- /* Start monitoring */
- reg = via686a_read_value(data, VIA686A_REG_CONFIG);
- via686a_write_value(data, VIA686A_REG_CONFIG, (reg | 0x01) & 0x7F);
-
- /* Configure temp interrupt mode for continuous-interrupt operation */
- reg = via686a_read_value(data, VIA686A_REG_TEMP_MODE);
- via686a_write_value(data, VIA686A_REG_TEMP_MODE,
- (reg & ~VIA686A_TEMP_MODE_MASK)
- | VIA686A_TEMP_MODE_CONTINUOUS);
-
- /* Pre-read fan clock divisor values */
- via686a_update_fan_div(data);
-}
-
-static struct via686a_data *via686a_update_device(struct device *dev)
-{
- struct via686a_data *data = dev_get_drvdata(dev);
- int i;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i <= 4; i++) {
- data->in[i] =
- via686a_read_value(data, VIA686A_REG_IN(i));
- data->in_min[i] = via686a_read_value(data,
- VIA686A_REG_IN_MIN
- (i));
- data->in_max[i] =
- via686a_read_value(data, VIA686A_REG_IN_MAX(i));
- }
- for (i = 1; i <= 2; i++) {
- data->fan[i - 1] =
- via686a_read_value(data, VIA686A_REG_FAN(i));
- data->fan_min[i - 1] = via686a_read_value(data,
- VIA686A_REG_FAN_MIN(i));
- }
- for (i = 0; i <= 2; i++) {
- data->temp[i] = via686a_read_value(data,
- VIA686A_REG_TEMP[i]) << 2;
- data->temp_over[i] =
- via686a_read_value(data,
- VIA686A_REG_TEMP_OVER[i]);
- data->temp_hyst[i] =
- via686a_read_value(data,
- VIA686A_REG_TEMP_HYST[i]);
- }
- /*
- * add in lower 2 bits
- * temp1 uses bits 7-6 of VIA686A_REG_TEMP_LOW1
- * temp2 uses bits 5-4 of VIA686A_REG_TEMP_LOW23
- * temp3 uses bits 7-6 of VIA686A_REG_TEMP_LOW23
- */
- data->temp[0] |= (via686a_read_value(data,
- VIA686A_REG_TEMP_LOW1)
- & 0xc0) >> 6;
- data->temp[1] |=
- (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
- 0x30) >> 4;
- data->temp[2] |=
- (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
- 0xc0) >> 6;
-
- via686a_update_fan_div(data);
- data->alarms =
- via686a_read_value(data,
- VIA686A_REG_ALARM1) |
- (via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
-}
+static struct platform_driver via686a_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = via686a_probe,
+ .remove = via686a_remove,
+};
static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
@@ -819,7 +815,7 @@ static int via686a_device_add(unsigned short address)
struct resource res = {
.start = address,
.end = address + VIA686A_EXTENT - 1,
- .name = "via686a",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -828,7 +824,7 @@ static int via686a_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("via686a", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -918,7 +914,7 @@ exit:
}
static struct pci_driver via686a_pci_driver = {
- .name = "via686a",
+ .name = DRIVER_NAME,
.id_table = via686a_pci_ids,
.probe = via686a_pci_probe,
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 03275ac8ba72..3b7f8922b0d5 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -38,6 +38,8 @@ static struct platform_device *pdev;
#define VT8231_BASE_REG 0x70
#define VT8231_ENABLE_REG 0x74
+#define DRIVER_NAME "vt8231"
+
/*
* The VT8231 registers
*
@@ -162,10 +164,6 @@ struct vt8231_data {
};
static struct pci_dev *s_bridge;
-static int vt8231_probe(struct platform_device *pdev);
-static int vt8231_remove(struct platform_device *pdev);
-static struct vt8231_data *vt8231_update_device(struct device *dev);
-static void vt8231_init_device(struct vt8231_data *data);
static inline int vt8231_read_value(struct vt8231_data *data, u8 reg)
{
@@ -178,6 +176,74 @@ static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
outb_p(value, data->addr + reg);
}
+static struct vt8231_data *vt8231_update_device(struct device *dev)
+{
+ struct vt8231_data *data = dev_get_drvdata(dev);
+ int i;
+ u16 low;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i < 6; i++) {
+ if (ISVOLT(i, data->uch_config)) {
+ data->in[i] = vt8231_read_value(data,
+ regvolt[i]);
+ data->in_min[i] = vt8231_read_value(data,
+ regvoltmin[i]);
+ data->in_max[i] = vt8231_read_value(data,
+ regvoltmax[i]);
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ data->fan[i] = vt8231_read_value(data,
+ VT8231_REG_FAN(i));
+ data->fan_min[i] = vt8231_read_value(data,
+ VT8231_REG_FAN_MIN(i));
+ }
+
+ low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
+ low = (low >> 6) | ((low & 0x30) >> 2)
+ | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
+ for (i = 0; i < 6; i++) {
+ if (ISTEMP(i, data->uch_config)) {
+ data->temp[i] = (vt8231_read_value(data,
+ regtemp[i]) << 2)
+ | ((low >> (2 * i)) & 0x03);
+ data->temp_max[i] = vt8231_read_value(data,
+ regtempmax[i]);
+ data->temp_min[i] = vt8231_read_value(data,
+ regtempmin[i]);
+ }
+ }
+
+ i = vt8231_read_value(data, VT8231_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
+ data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
+ (vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
+
+ /* Set alarm flags correctly */
+ if (!data->fan[0] && data->fan_min[0])
+ data->alarms |= 0x40;
+ else if (data->fan[0] && !data->fan_min[0])
+ data->alarms &= ~0x40;
+
+ if (!data->fan[1] && data->fan_min[1])
+ data->alarms |= 0x80;
+ else if (data->fan[1] && !data->fan_min[1])
+ data->alarms &= ~0x80;
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
/* following are the sysfs callback functions */
static ssize_t in_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -751,29 +817,11 @@ static const struct attribute_group vt8231_group = {
.attrs = vt8231_attributes,
};
-static struct platform_driver vt8231_driver = {
- .driver = {
- .name = "vt8231",
- },
- .probe = vt8231_probe,
- .remove = vt8231_remove,
-};
-
-static const struct pci_device_id vt8231_pci_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
-
-static int vt8231_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
-
-static struct pci_driver vt8231_pci_driver = {
- .name = "vt8231",
- .id_table = vt8231_pci_ids,
- .probe = vt8231_pci_probe,
-};
+static void vt8231_init_device(struct vt8231_data *data)
+{
+ vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
+ vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
+}
static int vt8231_probe(struct platform_device *pdev)
{
@@ -784,7 +832,7 @@ static int vt8231_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, VT8231_EXTENT,
- vt8231_driver.driver.name)) {
+ DRIVER_NAME)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
@@ -796,7 +844,7 @@ static int vt8231_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
data->addr = res->start;
- data->name = "vt8231";
+ data->name = DRIVER_NAME;
mutex_init(&data->update_lock);
vt8231_init_device(data);
@@ -863,86 +911,28 @@ static int vt8231_remove(struct platform_device *pdev)
return 0;
}
-static void vt8231_init_device(struct vt8231_data *data)
-{
- vt8231_write_value(data, VT8231_REG_TEMP1_CONFIG, 0);
- vt8231_write_value(data, VT8231_REG_TEMP2_CONFIG, 0);
-}
-
-static struct vt8231_data *vt8231_update_device(struct device *dev)
-{
- struct vt8231_data *data = dev_get_drvdata(dev);
- int i;
- u16 low;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i < 6; i++) {
- if (ISVOLT(i, data->uch_config)) {
- data->in[i] = vt8231_read_value(data,
- regvolt[i]);
- data->in_min[i] = vt8231_read_value(data,
- regvoltmin[i]);
- data->in_max[i] = vt8231_read_value(data,
- regvoltmax[i]);
- }
- }
- for (i = 0; i < 2; i++) {
- data->fan[i] = vt8231_read_value(data,
- VT8231_REG_FAN(i));
- data->fan_min[i] = vt8231_read_value(data,
- VT8231_REG_FAN_MIN(i));
- }
-
- low = vt8231_read_value(data, VT8231_REG_TEMP_LOW01);
- low = (low >> 6) | ((low & 0x30) >> 2)
- | (vt8231_read_value(data, VT8231_REG_TEMP_LOW25) << 4);
- for (i = 0; i < 6; i++) {
- if (ISTEMP(i, data->uch_config)) {
- data->temp[i] = (vt8231_read_value(data,
- regtemp[i]) << 2)
- | ((low >> (2 * i)) & 0x03);
- data->temp_max[i] = vt8231_read_value(data,
- regtempmax[i]);
- data->temp_min[i] = vt8231_read_value(data,
- regtempmin[i]);
- }
- }
-
- i = vt8231_read_value(data, VT8231_REG_FANDIV);
- data->fan_div[0] = (i >> 4) & 0x03;
- data->fan_div[1] = i >> 6;
- data->alarms = vt8231_read_value(data, VT8231_REG_ALARM1) |
- (vt8231_read_value(data, VT8231_REG_ALARM2) << 8);
-
- /* Set alarm flags correctly */
- if (!data->fan[0] && data->fan_min[0])
- data->alarms |= 0x40;
- else if (data->fan[0] && !data->fan_min[0])
- data->alarms &= ~0x40;
-
- if (!data->fan[1] && data->fan_min[1])
- data->alarms |= 0x80;
- else if (data->fan[1] && !data->fan_min[1])
- data->alarms &= ~0x80;
- data->last_updated = jiffies;
- data->valid = true;
- }
+static struct platform_driver vt8231_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = vt8231_probe,
+ .remove = vt8231_remove,
+};
- mutex_unlock(&data->update_lock);
+static const struct pci_device_id vt8231_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
+ { 0, }
+};
- return data;
-}
+MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
static int vt8231_device_add(unsigned short address)
{
struct resource res = {
.start = address,
.end = address + VT8231_EXTENT - 1,
- .name = "vt8231",
+ .name = DRIVER_NAME,
.flags = IORESOURCE_IO,
};
int err;
@@ -951,7 +941,7 @@ static int vt8231_device_add(unsigned short address)
if (err)
goto exit;
- pdev = platform_device_alloc("vt8231", address);
+ pdev = platform_device_alloc(DRIVER_NAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
@@ -1040,6 +1030,12 @@ exit:
return -ENODEV;
}
+static struct pci_driver vt8231_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = vt8231_pci_ids,
+ .probe = vt8231_pci_probe,
+};
+
static int __init sm_vt8231_init(void)
{
return pci_register_driver(&vt8231_pci_driver);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index af89b32a93a5..939d4c35e713 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1944,7 +1944,7 @@ static int __init w83627ehf_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __maybe_unused w83627ehf_suspend(struct device *dev)
+static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
@@ -1955,7 +1955,7 @@ static int __maybe_unused w83627ehf_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused w83627ehf_resume(struct device *dev)
+static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
int i;
@@ -2010,12 +2010,12 @@ static int __maybe_unused w83627ehf_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(w83627ehf_dev_pm_ops, w83627ehf_suspend, w83627ehf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(w83627ehf_dev_pm_ops, w83627ehf_suspend, w83627ehf_resume);
static struct platform_driver w83627ehf_driver = {
.driver = {
.name = DRVNAME,
- .pm = &w83627ehf_dev_pm_ops,
+ .pm = pm_sleep_ptr(&w83627ehf_dev_pm_ops),
},
};
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 9be277156ed2..b638d672ac45 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -389,14 +389,184 @@ struct w83627hf_data {
#endif
};
-static int w83627hf_probe(struct platform_device *pdev);
-static int w83627hf_remove(struct platform_device *pdev);
+/* Registers 0x50-0x5f are banked */
+static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
+{
+ if ((reg & 0x00f0) == 0x50) {
+ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
+ outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET);
+ }
+}
+
+/* Not strictly necessary, but play it safe for now */
+static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg)
+{
+ if (reg & 0xff00) {
+ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
+ outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
+ }
+}
+
+static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
+{
+ int res, word_sized;
+
+ mutex_lock(&data->lock);
+ word_sized = (((reg & 0xff00) == 0x100)
+ || ((reg & 0xff00) == 0x200))
+ && (((reg & 0x00ff) == 0x50)
+ || ((reg & 0x00ff) == 0x53)
+ || ((reg & 0x00ff) == 0x55));
+ w83627hf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
+ res = inb_p(data->addr + W83781D_DATA_REG_OFFSET);
+ if (word_sized) {
+ outb_p((reg & 0xff) + 1,
+ data->addr + W83781D_ADDR_REG_OFFSET);
+ res =
+ (res << 8) + inb_p(data->addr +
+ W83781D_DATA_REG_OFFSET);
+ }
+ w83627hf_reset_bank(data, reg);
+ mutex_unlock(&data->lock);
+ return res;
+}
+
+static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
+{
+ int word_sized;
+
+ mutex_lock(&data->lock);
+ word_sized = (((reg & 0xff00) == 0x100)
+ || ((reg & 0xff00) == 0x200))
+ && (((reg & 0x00ff) == 0x53)
+ || ((reg & 0x00ff) == 0x55));
+ w83627hf_set_bank(data, reg);
+ outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
+ if (word_sized) {
+ outb_p(value >> 8,
+ data->addr + W83781D_DATA_REG_OFFSET);
+ outb_p((reg & 0xff) + 1,
+ data->addr + W83781D_ADDR_REG_OFFSET);
+ }
+ outb_p(value & 0xff,
+ data->addr + W83781D_DATA_REG_OFFSET);
+ w83627hf_reset_bank(data, reg);
+ mutex_unlock(&data->lock);
+ return 0;
+}
+
+static void w83627hf_update_fan_div(struct w83627hf_data *data)
+{
+ int reg;
+
+ reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
+ data->fan_div[0] = (reg >> 4) & 0x03;
+ data->fan_div[1] = (reg >> 6) & 0x03;
+ if (data->type != w83697hf) {
+ data->fan_div[2] = (w83627hf_read_value(data,
+ W83781D_REG_PIN) >> 6) & 0x03;
+ }
+ reg = w83627hf_read_value(data, W83781D_REG_VBAT);
+ data->fan_div[0] |= (reg >> 3) & 0x04;
+ data->fan_div[1] |= (reg >> 4) & 0x04;
+ if (data->type != w83697hf)
+ data->fan_div[2] |= (reg >> 5) & 0x04;
+}
-static int w83627hf_read_value(struct w83627hf_data *data, u16 reg);
-static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value);
-static void w83627hf_update_fan_div(struct w83627hf_data *data);
-static struct w83627hf_data *w83627hf_update_device(struct device *dev);
-static void w83627hf_init_device(struct platform_device *pdev);
+static struct w83627hf_data *w83627hf_update_device(struct device *dev)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ int i, num_temps = (data->type == w83697hf) ? 2 : 3;
+ int num_pwms = (data->type == w83697hf) ? 2 : 3;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i <= 8; i++) {
+ /* skip missing sensors */
+ if (((data->type == w83697hf) && (i == 1)) ||
+ ((data->type != w83627hf && data->type != w83697hf)
+ && (i == 5 || i == 6)))
+ continue;
+ data->in[i] =
+ w83627hf_read_value(data, W83781D_REG_IN(i));
+ data->in_min[i] =
+ w83627hf_read_value(data,
+ W83781D_REG_IN_MIN(i));
+ data->in_max[i] =
+ w83627hf_read_value(data,
+ W83781D_REG_IN_MAX(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ data->fan[i] =
+ w83627hf_read_value(data, W83627HF_REG_FAN(i));
+ data->fan_min[i] =
+ w83627hf_read_value(data,
+ W83627HF_REG_FAN_MIN(i));
+ }
+ for (i = 0; i <= 2; i++) {
+ u8 tmp = w83627hf_read_value(data,
+ W836X7HF_REG_PWM(data->type, i));
+ /* bits 0-3 are reserved in 627THF */
+ if (data->type == w83627thf)
+ tmp &= 0xf0;
+ data->pwm[i] = tmp;
+ if (i == 1 &&
+ (data->type == w83627hf || data->type == w83697hf))
+ break;
+ }
+ if (data->type == w83627hf) {
+ u8 tmp = w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ);
+ data->pwm_freq[0] = tmp & 0x07;
+ data->pwm_freq[1] = (tmp >> 4) & 0x07;
+ } else if (data->type != w83627thf) {
+ for (i = 1; i <= 3; i++) {
+ data->pwm_freq[i - 1] =
+ w83627hf_read_value(data,
+ W83637HF_REG_PWM_FREQ[i - 1]);
+ if (i == 2 && (data->type == w83697hf))
+ break;
+ }
+ }
+ if (data->type != w83627hf) {
+ for (i = 0; i < num_pwms; i++) {
+ u8 tmp = w83627hf_read_value(data,
+ W83627THF_REG_PWM_ENABLE[i]);
+ data->pwm_enable[i] =
+ ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i])
+ & 0x03) + 1;
+ }
+ }
+ for (i = 0; i < num_temps; i++) {
+ data->temp[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp[i]);
+ data->temp_max[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp_over[i]);
+ data->temp_max_hyst[i] = w83627hf_read_value(
+ data, w83627hf_reg_temp_hyst[i]);
+ }
+
+ w83627hf_update_fan_div(data);
+
+ data->alarms =
+ w83627hf_read_value(data, W83781D_REG_ALARM1) |
+ (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) |
+ (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16);
+ i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2);
+ data->beep_mask = (i << 8) |
+ w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
+ w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
#ifdef CONFIG_PM
static int w83627hf_suspend(struct device *dev)
@@ -464,99 +634,171 @@ static const struct dev_pm_ops w83627hf_dev_pm_ops = {
#define W83627HF_DEV_PM_OPS NULL
#endif /* CONFIG_PM */
-static struct platform_driver w83627hf_driver = {
- .driver = {
- .name = DRVNAME,
- .pm = W83627HF_DEV_PM_OPS,
- },
- .probe = w83627hf_probe,
- .remove = w83627hf_remove,
-};
-
-static ssize_t
-in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
-}
-static ssize_t
-in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
-}
-static ssize_t
-in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
+static int w83627thf_read_gpio5(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff, sel;
+
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
+ superio_select(sio_data, W83627HF_LD_GPIO5);
+
+ res = 0xff;
+
+ /* Make sure these GPIO pins are enabled */
+ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
+ dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
+ goto exit;
+ }
+
+ /*
+ * Make sure the pins are configured for input
+ * There must be at least five (VRM 9), and possibly 6 (VRM 10)
+ */
+ sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f;
+ if ((sel & 0x1f) != 0x1f) {
+ dev_dbg(&pdev->dev, "GPIO5 not configured for VID "
+ "function\n");
+ goto exit;
+ }
+
+ dev_info(&pdev->dev, "Reading VID from GPIO5\n");
+ res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel;
+
+exit:
+ superio_exit(sio_data);
+ return res;
}
-static ssize_t
-in_min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static int w83687thf_read_vid(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- long val;
- int err;
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff;
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
- mutex_lock(&data->update_lock);
- data->in_min[nr] = IN_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]);
- mutex_unlock(&data->update_lock);
- return count;
+ superio_select(sio_data, W83627HF_LD_HWM);
+
+ /* Make sure these GPIO pins are enabled */
+ if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) {
+ dev_dbg(&pdev->dev, "VID disabled, no VID function\n");
+ goto exit;
+ }
+
+ /* Make sure the pins are configured for input */
+ if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) {
+ dev_dbg(&pdev->dev, "VID configured as output, "
+ "no VID function\n");
+ goto exit;
+ }
+
+ res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f;
+
+exit:
+ superio_exit(sio_data);
+ return res;
}
-static ssize_t
-in_max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+
+static void w83627hf_init_device(struct platform_device *pdev)
{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- long val;
- int err;
+ struct w83627hf_data *data = platform_get_drvdata(pdev);
+ int i;
+ enum chips type = data->type;
+ u8 tmp;
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
+ /* Minimize conflicts with other winbond i2c-only clients... */
+ /* disable i2c subclients... how to disable main i2c client?? */
+ /* force i2c address to relatively uncommon address */
+ if (type == w83627hf) {
+ w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
+ w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
+ }
- mutex_lock(&data->update_lock);
- data->in_max[nr] = IN_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]);
- mutex_unlock(&data->update_lock);
- return count;
-}
+ /* Read VID only once */
+ if (type == w83627hf || type == w83637hf) {
+ int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
+ int hi = w83627hf_read_value(data, W83781D_REG_CHIPID);
+ data->vid = (lo & 0x0f) | ((hi & 0x01) << 4);
+ } else if (type == w83627thf) {
+ data->vid = w83627thf_read_gpio5(pdev);
+ } else if (type == w83687thf) {
+ data->vid = w83687thf_read_vid(pdev);
+ }
-static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
-static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
-static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
-static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
-static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
-static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
-static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
-static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
-static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
-static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
-static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
-static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
-static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
-static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
-static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
-static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
-static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
-static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
-static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
-static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
-static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
-static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
-static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
-static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+ /* Read VRM & OVT Config only once */
+ if (type == w83627thf || type == w83637hf || type == w83687thf) {
+ data->vrm_ovt =
+ w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG);
+ }
+
+ tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
+ for (i = 1; i <= 3; i++) {
+ if (!(tmp & BIT_SCFG1[i - 1])) {
+ data->sens[i - 1] = 4;
+ } else {
+ if (w83627hf_read_value
+ (data,
+ W83781D_REG_SCFG2) & BIT_SCFG2[i - 1])
+ data->sens[i - 1] = 1;
+ else
+ data->sens[i - 1] = 2;
+ }
+ if ((type == w83697hf) && (i == 2))
+ break;
+ }
+
+ if(init) {
+ /* Enable temp2 */
+ tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG);
+ if (tmp & 0x01) {
+ dev_warn(&pdev->dev, "Enabling temp2, readings "
+ "might not make sense\n");
+ w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG,
+ tmp & 0xfe);
+ }
+
+ /* Enable temp3 */
+ if (type != w83697hf) {
+ tmp = w83627hf_read_value(data,
+ W83627HF_REG_TEMP3_CONFIG);
+ if (tmp & 0x01) {
+ dev_warn(&pdev->dev, "Enabling temp3, "
+ "readings might not make sense\n");
+ w83627hf_write_value(data,
+ W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe);
+ }
+ }
+ }
+
+ /* Start monitoring */
+ w83627hf_write_value(data, W83781D_REG_CONFIG,
+ (w83627hf_read_value(data,
+ W83781D_REG_CONFIG) & 0xf7)
+ | 0x01);
+
+ /* Enable VBAT monitoring if needed */
+ tmp = w83627hf_read_value(data, W83781D_REG_VBAT);
+ if (!(tmp & 0x01))
+ w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01);
+}
/* use a different set of functions for in0 */
static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
@@ -582,6 +824,7 @@ static ssize_t in0_input_show(struct device *dev,
struct w83627hf_data *data = w83627hf_update_device(dev);
return show_in_0(data, buf, data->in[0]);
}
+static DEVICE_ATTR_RO(in0_input);
static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -590,13 +833,6 @@ static ssize_t in0_min_show(struct device *dev, struct device_attribute *attr,
return show_in_0(data, buf, data->in_min[0]);
}
-static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return show_in_0(data, buf, data->in_max[0]);
-}
-
static ssize_t in0_min_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -627,6 +863,15 @@ static ssize_t in0_min_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(in0_min);
+
+static ssize_t in0_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return show_in_0(data, buf, data->in_max[0]);
+}
+
static ssize_t in0_max_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -657,193 +902,16 @@ static ssize_t in0_max_store(struct device *dev,
return count;
}
-static DEVICE_ATTR_RO(in0_input);
-static DEVICE_ATTR_RW(in0_min);
static DEVICE_ATTR_RW(in0_max);
static ssize_t
-fan_input_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr],
- (long)DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t
-fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr],
- (long)DIV_FROM_REG(data->fan_div[nr])));
-}
-static ssize_t
-fan_min_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
- data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
- w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr),
- data->fan_min[nr]);
-
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
-static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
-static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
-static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
-static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
-
-static ssize_t
-temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp_max[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = w83627hf_update_device(dev);
-
- u16 tmp = data->temp_max_hyst[nr];
- return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
- : (long) TEMP_FROM_REG(tmp));
-}
-
-static ssize_t
-temp_max_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- u16 tmp;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
- mutex_lock(&data->update_lock);
- data->temp_max[nr] = tmp;
- w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static ssize_t
-temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- u16 tmp;
- long val;
- int err;
-
- err = kstrtol(buf, 10, &val);
- if (err)
- return err;
-
- tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
- mutex_lock(&data->update_lock);
- data->temp_max_hyst[nr] = tmp;
- w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp);
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
-static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
-static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
-
-static ssize_t
-cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
-}
-static DEVICE_ATTR_RO(cpu0_vid);
-
-static ssize_t
-vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%ld\n", (long) data->vrm);
-}
-static ssize_t
-vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- if (val > 255)
- return -EINVAL;
- data->vrm = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(vrm);
-
-static ssize_t
-alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) data->alarms);
-}
-static DEVICE_ATTR_RO(alarms);
-
-static ssize_t
alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
+
static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
@@ -861,44 +929,6 @@ static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 13);
static ssize_t
-beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n",
- (long)BEEP_MASK_FROM_REG(data->beep_mask));
-}
-
-static ssize_t
-beep_mask_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
-
- /* preserve beep enable */
- data->beep_mask = (data->beep_mask & 0x8000)
- | BEEP_MASK_TO_REG(val);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS1,
- data->beep_mask & 0xff);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS3,
- ((data->beep_mask) >> 16) & 0xff);
- w83627hf_write_value(data, W83781D_REG_BEEP_INTS2,
- (data->beep_mask >> 8) & 0xff);
-
- mutex_unlock(&data->update_lock);
- return count;
-}
-
-static DEVICE_ATTR_RW(beep_mask);
-
-static ssize_t
beep_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -974,6 +1004,143 @@ static SENSOR_DEVICE_ATTR_RW(temp3_beep, beep, 13);
static SENSOR_DEVICE_ATTR_RW(beep_enable, beep, 15);
static ssize_t
+in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
+}
+
+static ssize_t
+in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
+}
+
+static ssize_t
+in_min_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->in_min[nr] = IN_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_IN_MIN(nr), data->in_min[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
+}
+
+static ssize_t
+in_max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ long val;
+ int err;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->in_max[nr] = IN_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_IN_MAX(nr), data->in_max[nr]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+
+static ssize_t
+fan_input_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan[nr],
+ (long)DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t
+fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", FAN_FROM_REG(data->fan_min[nr],
+ (long)DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t
+fan_min_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ w83627hf_write_value(data, W83627HF_REG_FAN_MIN(nr),
+ data->fan_min[nr]);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+
+static ssize_t
fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -981,6 +1148,7 @@ fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%ld\n",
(long) DIV_FROM_REG(data->fan_div[nr]));
}
+
/*
* Note: we save and restore the fan minimum here, because its value is
* determined in part by the fan divisor. This follows the principle of
@@ -1033,138 +1201,92 @@ static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static ssize_t
-pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%ld\n", (long) data->pwm[nr]);
-}
-static ssize_t
-pwm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- int nr = to_sensor_dev_attr(devattr)->index;
- struct w83627hf_data *data = dev_get_drvdata(dev);
- unsigned long val;
- int err;
-
- err = kstrtoul(buf, 10, &val);
- if (err)
- return err;
-
- mutex_lock(&data->update_lock);
-
- if (data->type == w83627thf) {
- /* bits 0-3 are reserved in 627THF */
- data->pwm[nr] = PWM_TO_REG(val) & 0xf0;
- w83627hf_write_value(data,
- W836X7HF_REG_PWM(data->type, nr),
- data->pwm[nr] |
- (w83627hf_read_value(data,
- W836X7HF_REG_PWM(data->type, nr)) & 0x0f));
- } else {
- data->pwm[nr] = PWM_TO_REG(val);
- w83627hf_write_value(data,
- W836X7HF_REG_PWM(data->type, nr),
- data->pwm[nr]);
- }
-
- mutex_unlock(&data->update_lock);
- return count;
+ u16 tmp = data->temp[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
-static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
-
static ssize_t
-pwm_enable_show(struct device *dev, struct device_attribute *devattr,
- char *buf)
+temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm_enable[nr]);
+
+ u16 tmp = data->temp_max[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
static ssize_t
-pwm_enable_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+temp_max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
- u8 reg;
- unsigned long val;
+ u16 tmp;
+ long val;
int err;
- err = kstrtoul(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
- if (!val || val > 3) /* modes 1, 2 and 3 are supported */
- return -EINVAL;
+ tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
mutex_lock(&data->update_lock);
- data->pwm_enable[nr] = val;
- reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]);
- reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr];
- w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg);
+ data->temp_max[nr] = tmp;
+ w83627hf_write_value(data, w83627hf_reg_temp_over[nr], tmp);
mutex_unlock(&data->update_lock);
return count;
}
-static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
-
static ssize_t
-pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
- if (data->type == w83627hf)
- return sprintf(buf, "%ld\n",
- pwm_freq_from_reg_627hf(data->pwm_freq[nr]));
- else
- return sprintf(buf, "%ld\n",
- pwm_freq_from_reg(data->pwm_freq[nr]));
+
+ u16 tmp = data->temp_max_hyst[nr];
+ return sprintf(buf, "%ld\n", (nr) ? (long) LM75_TEMP_FROM_REG(tmp)
+ : (long) TEMP_FROM_REG(tmp));
}
static ssize_t
-pwm_freq_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
- static const u8 mask[]={0xF8, 0x8F};
- unsigned long val;
+ u16 tmp;
+ long val;
int err;
- err = kstrtoul(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
+ tmp = (nr) ? LM75_TEMP_TO_REG(val) : TEMP_TO_REG(val);
mutex_lock(&data->update_lock);
-
- if (data->type == w83627hf) {
- data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val);
- w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
- (data->pwm_freq[nr] << (nr*4)) |
- (w83627hf_read_value(data,
- W83627HF_REG_PWM_FREQ) & mask[nr]));
- } else {
- data->pwm_freq[nr] = pwm_freq_to_reg(val);
- w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr],
- data->pwm_freq[nr]);
- }
-
+ data->temp_max_hyst[nr] = tmp;
+ w83627hf_write_value(data, w83627hf_reg_temp_hyst[nr], tmp);
mutex_unlock(&data->update_lock);
return count;
}
-static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
-static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
-static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
static ssize_t
temp_type_show(struct device *dev, struct device_attribute *devattr,
@@ -1236,81 +1358,12 @@ static SENSOR_DEVICE_ATTR_RW(temp2_type, temp_type, 1);
static SENSOR_DEVICE_ATTR_RW(temp3_type, temp_type, 2);
static ssize_t
-name_show(struct device *dev, struct device_attribute *devattr, char *buf)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", data->name);
-}
-static DEVICE_ATTR_RO(name);
-
-static int __init w83627hf_find(int sioaddr, unsigned short *addr,
- struct w83627hf_sio_data *sio_data)
+alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- int err;
- u16 val;
-
- static __initconst char *const names[] = {
- "W83627HF",
- "W83627THF",
- "W83697HF",
- "W83637HF",
- "W83687THF",
- };
-
- sio_data->sioaddr = sioaddr;
- err = superio_enter(sio_data);
- if (err)
- return err;
-
- err = -ENODEV;
- val = force_id ? force_id : superio_inb(sio_data, DEVID);
- switch (val) {
- case W627_DEVID:
- sio_data->type = w83627hf;
- break;
- case W627THF_DEVID:
- sio_data->type = w83627thf;
- break;
- case W697_DEVID:
- sio_data->type = w83697hf;
- break;
- case W637_DEVID:
- sio_data->type = w83637hf;
- break;
- case W687THF_DEVID:
- sio_data->type = w83687thf;
- break;
- case 0xff: /* No device at all */
- goto exit;
- default:
- pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val);
- goto exit;
- }
-
- superio_select(sio_data, W83627HF_LD_HWM);
- val = (superio_inb(sio_data, WINB_BASE_REG) << 8) |
- superio_inb(sio_data, WINB_BASE_REG + 1);
- *addr = val & WINB_ALIGNMENT;
- if (*addr == 0) {
- pr_warn("Base address not set, skipping\n");
- goto exit;
- }
-
- val = superio_inb(sio_data, WINB_ACT_REG);
- if (!(val & 0x01)) {
- pr_warn("Enabling HWM logical device\n");
- superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
- }
-
- err = 0;
- pr_info(DRVNAME ": Found %s chip at %#x\n",
- names[sio_data->type], *addr);
-
- exit:
- superio_exit(sio_data);
- return err;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) data->alarms);
}
+static DEVICE_ATTR_RO(alarms);
#define VIN_UNIT_ATTRS(_X_) \
&sensor_dev_attr_in##_X_##_input.dev_attr.attr, \
@@ -1334,6 +1387,100 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
&sensor_dev_attr_temp##_X_##_alarm.dev_attr.attr, \
&sensor_dev_attr_temp##_X_##_beep.dev_attr.attr
+static ssize_t
+beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n",
+ (long)BEEP_MASK_FROM_REG(data->beep_mask));
+}
+
+static ssize_t
+beep_mask_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ /* preserve beep enable */
+ data->beep_mask = (data->beep_mask & 0x8000)
+ | BEEP_MASK_TO_REG(val);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS1,
+ data->beep_mask & 0xff);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS3,
+ ((data->beep_mask) >> 16) & 0xff);
+ w83627hf_write_value(data, W83781D_REG_BEEP_INTS2,
+ (data->beep_mask >> 8) & 0xff);
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static DEVICE_ATTR_RW(beep_mask);
+
+static ssize_t
+pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) data->pwm[nr]);
+}
+
+static ssize_t
+pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->type == w83627thf) {
+ /* bits 0-3 are reserved in 627THF */
+ data->pwm[nr] = PWM_TO_REG(val) & 0xf0;
+ w83627hf_write_value(data,
+ W836X7HF_REG_PWM(data->type, nr),
+ data->pwm[nr] |
+ (w83627hf_read_value(data,
+ W836X7HF_REG_PWM(data->type, nr)) & 0x0f));
+ } else {
+ data->pwm[nr] = PWM_TO_REG(val);
+ w83627hf_write_value(data,
+ W836X7HF_REG_PWM(data->type, nr),
+ data->pwm[nr]);
+ }
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+
+static ssize_t
+name_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
static struct attribute *w83627hf_attributes[] = {
&dev_attr_in0_input.attr,
&dev_attr_in0_min.attr,
@@ -1366,6 +1513,131 @@ static const struct attribute_group w83627hf_group = {
.attrs = w83627hf_attributes,
};
+static ssize_t
+pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ if (data->type == w83627hf)
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg_627hf(data->pwm_freq[nr]));
+ else
+ return sprintf(buf, "%ld\n",
+ pwm_freq_from_reg(data->pwm_freq[nr]));
+}
+
+static ssize_t
+pwm_freq_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ static const u8 mask[]={0xF8, 0x8F};
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->type == w83627hf) {
+ data->pwm_freq[nr] = pwm_freq_to_reg_627hf(val);
+ w83627hf_write_value(data, W83627HF_REG_PWM_FREQ,
+ (data->pwm_freq[nr] << (nr*4)) |
+ (w83627hf_read_value(data,
+ W83627HF_REG_PWM_FREQ) & mask[nr]));
+ } else {
+ data->pwm_freq[nr] = pwm_freq_to_reg(val);
+ w83627hf_write_value(data, W83637HF_REG_PWM_FREQ[nr],
+ data->pwm_freq[nr]);
+ }
+
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
+
+static ssize_t
+cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+}
+
+static DEVICE_ATTR_RO(cpu0_vid);
+
+static ssize_t
+vrm_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ return sprintf(buf, "%ld\n", (long) data->vrm);
+}
+
+static ssize_t
+vrm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val > 255)
+ return -EINVAL;
+ data->vrm = val;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(vrm);
+
+static ssize_t
+pwm_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = w83627hf_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm_enable[nr]);
+}
+
+static ssize_t
+pwm_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int nr = to_sensor_dev_attr(devattr)->index;
+ struct w83627hf_data *data = dev_get_drvdata(dev);
+ u8 reg;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (!val || val > 3) /* modes 1, 2 and 3 are supported */
+ return -EINVAL;
+ mutex_lock(&data->update_lock);
+ data->pwm_enable[nr] = val;
+ reg = w83627hf_read_value(data, W83627THF_REG_PWM_ENABLE[nr]);
+ reg &= ~(0x03 << W83627THF_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83627THF_PWM_ENABLE_SHIFT[nr];
+ w83627hf_write_value(data, W83627THF_REG_PWM_ENABLE[nr], reg);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+
static struct attribute *w83627hf_attributes_opt[] = {
VIN_UNIT_ATTRS(1),
VIN_UNIT_ATTRS(5),
@@ -1568,349 +1840,81 @@ static int w83627hf_remove(struct platform_device *pdev)
return 0;
}
-/* Registers 0x50-0x5f are banked */
-static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
-{
- if ((reg & 0x00f0) == 0x50) {
- outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
- outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET);
- }
-}
-
-/* Not strictly necessary, but play it safe for now */
-static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg)
-{
- if (reg & 0xff00) {
- outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
- outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
- }
-}
-
-static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
-{
- int res, word_sized;
-
- mutex_lock(&data->lock);
- word_sized = (((reg & 0xff00) == 0x100)
- || ((reg & 0xff00) == 0x200))
- && (((reg & 0x00ff) == 0x50)
- || ((reg & 0x00ff) == 0x53)
- || ((reg & 0x00ff) == 0x55));
- w83627hf_set_bank(data, reg);
- outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
- res = inb_p(data->addr + W83781D_DATA_REG_OFFSET);
- if (word_sized) {
- outb_p((reg & 0xff) + 1,
- data->addr + W83781D_ADDR_REG_OFFSET);
- res =
- (res << 8) + inb_p(data->addr +
- W83781D_DATA_REG_OFFSET);
- }
- w83627hf_reset_bank(data, reg);
- mutex_unlock(&data->lock);
- return res;
-}
+static struct platform_driver w83627hf_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .pm = W83627HF_DEV_PM_OPS,
+ },
+ .probe = w83627hf_probe,
+ .remove = w83627hf_remove,
+};
-static int w83627thf_read_gpio5(struct platform_device *pdev)
+static int __init w83627hf_find(int sioaddr, unsigned short *addr,
+ struct w83627hf_sio_data *sio_data)
{
- struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
- int res = 0xff, sel;
-
- if (superio_enter(sio_data)) {
- /*
- * Some other driver reserved the address space for itself.
- * We don't want to fail driver instantiation because of that,
- * so display a warning and keep going.
- */
- dev_warn(&pdev->dev,
- "Can not read VID data: Failed to enable SuperIO access\n");
- return res;
- }
+ int err;
+ u16 val;
- superio_select(sio_data, W83627HF_LD_GPIO5);
+ static __initconst char *const names[] = {
+ "W83627HF",
+ "W83627THF",
+ "W83697HF",
+ "W83637HF",
+ "W83687THF",
+ };
- res = 0xff;
+ sio_data->sioaddr = sioaddr;
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
- /* Make sure these GPIO pins are enabled */
- if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
- dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
+ err = -ENODEV;
+ val = force_id ? force_id : superio_inb(sio_data, DEVID);
+ switch (val) {
+ case W627_DEVID:
+ sio_data->type = w83627hf;
+ break;
+ case W627THF_DEVID:
+ sio_data->type = w83627thf;
+ break;
+ case W697_DEVID:
+ sio_data->type = w83697hf;
+ break;
+ case W637_DEVID:
+ sio_data->type = w83637hf;
+ break;
+ case W687THF_DEVID:
+ sio_data->type = w83687thf;
+ break;
+ case 0xff: /* No device at all */
goto exit;
- }
-
- /*
- * Make sure the pins are configured for input
- * There must be at least five (VRM 9), and possibly 6 (VRM 10)
- */
- sel = superio_inb(sio_data, W83627THF_GPIO5_IOSR) & 0x3f;
- if ((sel & 0x1f) != 0x1f) {
- dev_dbg(&pdev->dev, "GPIO5 not configured for VID "
- "function\n");
+ default:
+ pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%02x)\n", val);
goto exit;
}
- dev_info(&pdev->dev, "Reading VID from GPIO5\n");
- res = superio_inb(sio_data, W83627THF_GPIO5_DR) & sel;
-
-exit:
- superio_exit(sio_data);
- return res;
-}
-
-static int w83687thf_read_vid(struct platform_device *pdev)
-{
- struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
- int res = 0xff;
-
- if (superio_enter(sio_data)) {
- /*
- * Some other driver reserved the address space for itself.
- * We don't want to fail driver instantiation because of that,
- * so display a warning and keep going.
- */
- dev_warn(&pdev->dev,
- "Can not read VID data: Failed to enable SuperIO access\n");
- return res;
- }
-
superio_select(sio_data, W83627HF_LD_HWM);
-
- /* Make sure these GPIO pins are enabled */
- if (!(superio_inb(sio_data, W83687THF_VID_EN) & (1 << 2))) {
- dev_dbg(&pdev->dev, "VID disabled, no VID function\n");
+ val = (superio_inb(sio_data, WINB_BASE_REG) << 8) |
+ superio_inb(sio_data, WINB_BASE_REG + 1);
+ *addr = val & WINB_ALIGNMENT;
+ if (*addr == 0) {
+ pr_warn("Base address not set, skipping\n");
goto exit;
}
- /* Make sure the pins are configured for input */
- if (!(superio_inb(sio_data, W83687THF_VID_CFG) & (1 << 4))) {
- dev_dbg(&pdev->dev, "VID configured as output, "
- "no VID function\n");
- goto exit;
+ val = superio_inb(sio_data, WINB_ACT_REG);
+ if (!(val & 0x01)) {
+ pr_warn("Enabling HWM logical device\n");
+ superio_outb(sio_data, WINB_ACT_REG, val | 0x01);
}
- res = superio_inb(sio_data, W83687THF_VID_DATA) & 0x3f;
+ err = 0;
+ pr_info(DRVNAME ": Found %s chip at %#x\n",
+ names[sio_data->type], *addr);
-exit:
+ exit:
superio_exit(sio_data);
- return res;
-}
-
-static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
-{
- int word_sized;
-
- mutex_lock(&data->lock);
- word_sized = (((reg & 0xff00) == 0x100)
- || ((reg & 0xff00) == 0x200))
- && (((reg & 0x00ff) == 0x53)
- || ((reg & 0x00ff) == 0x55));
- w83627hf_set_bank(data, reg);
- outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
- if (word_sized) {
- outb_p(value >> 8,
- data->addr + W83781D_DATA_REG_OFFSET);
- outb_p((reg & 0xff) + 1,
- data->addr + W83781D_ADDR_REG_OFFSET);
- }
- outb_p(value & 0xff,
- data->addr + W83781D_DATA_REG_OFFSET);
- w83627hf_reset_bank(data, reg);
- mutex_unlock(&data->lock);
- return 0;
-}
-
-static void w83627hf_init_device(struct platform_device *pdev)
-{
- struct w83627hf_data *data = platform_get_drvdata(pdev);
- int i;
- enum chips type = data->type;
- u8 tmp;
-
- /* Minimize conflicts with other winbond i2c-only clients... */
- /* disable i2c subclients... how to disable main i2c client?? */
- /* force i2c address to relatively uncommon address */
- if (type == w83627hf) {
- w83627hf_write_value(data, W83781D_REG_I2C_SUBADDR, 0x89);
- w83627hf_write_value(data, W83781D_REG_I2C_ADDR, force_i2c);
- }
-
- /* Read VID only once */
- if (type == w83627hf || type == w83637hf) {
- int lo = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
- int hi = w83627hf_read_value(data, W83781D_REG_CHIPID);
- data->vid = (lo & 0x0f) | ((hi & 0x01) << 4);
- } else if (type == w83627thf) {
- data->vid = w83627thf_read_gpio5(pdev);
- } else if (type == w83687thf) {
- data->vid = w83687thf_read_vid(pdev);
- }
-
- /* Read VRM & OVT Config only once */
- if (type == w83627thf || type == w83637hf || type == w83687thf) {
- data->vrm_ovt =
- w83627hf_read_value(data, W83627THF_REG_VRM_OVT_CFG);
- }
-
- tmp = w83627hf_read_value(data, W83781D_REG_SCFG1);
- for (i = 1; i <= 3; i++) {
- if (!(tmp & BIT_SCFG1[i - 1])) {
- data->sens[i - 1] = 4;
- } else {
- if (w83627hf_read_value
- (data,
- W83781D_REG_SCFG2) & BIT_SCFG2[i - 1])
- data->sens[i - 1] = 1;
- else
- data->sens[i - 1] = 2;
- }
- if ((type == w83697hf) && (i == 2))
- break;
- }
-
- if(init) {
- /* Enable temp2 */
- tmp = w83627hf_read_value(data, W83627HF_REG_TEMP2_CONFIG);
- if (tmp & 0x01) {
- dev_warn(&pdev->dev, "Enabling temp2, readings "
- "might not make sense\n");
- w83627hf_write_value(data, W83627HF_REG_TEMP2_CONFIG,
- tmp & 0xfe);
- }
-
- /* Enable temp3 */
- if (type != w83697hf) {
- tmp = w83627hf_read_value(data,
- W83627HF_REG_TEMP3_CONFIG);
- if (tmp & 0x01) {
- dev_warn(&pdev->dev, "Enabling temp3, "
- "readings might not make sense\n");
- w83627hf_write_value(data,
- W83627HF_REG_TEMP3_CONFIG, tmp & 0xfe);
- }
- }
- }
-
- /* Start monitoring */
- w83627hf_write_value(data, W83781D_REG_CONFIG,
- (w83627hf_read_value(data,
- W83781D_REG_CONFIG) & 0xf7)
- | 0x01);
-
- /* Enable VBAT monitoring if needed */
- tmp = w83627hf_read_value(data, W83781D_REG_VBAT);
- if (!(tmp & 0x01))
- w83627hf_write_value(data, W83781D_REG_VBAT, tmp | 0x01);
-}
-
-static void w83627hf_update_fan_div(struct w83627hf_data *data)
-{
- int reg;
-
- reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
- data->fan_div[0] = (reg >> 4) & 0x03;
- data->fan_div[1] = (reg >> 6) & 0x03;
- if (data->type != w83697hf) {
- data->fan_div[2] = (w83627hf_read_value(data,
- W83781D_REG_PIN) >> 6) & 0x03;
- }
- reg = w83627hf_read_value(data, W83781D_REG_VBAT);
- data->fan_div[0] |= (reg >> 3) & 0x04;
- data->fan_div[1] |= (reg >> 4) & 0x04;
- if (data->type != w83697hf)
- data->fan_div[2] |= (reg >> 5) & 0x04;
-}
-
-static struct w83627hf_data *w83627hf_update_device(struct device *dev)
-{
- struct w83627hf_data *data = dev_get_drvdata(dev);
- int i, num_temps = (data->type == w83697hf) ? 2 : 3;
- int num_pwms = (data->type == w83697hf) ? 2 : 3;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- for (i = 0; i <= 8; i++) {
- /* skip missing sensors */
- if (((data->type == w83697hf) && (i == 1)) ||
- ((data->type != w83627hf && data->type != w83697hf)
- && (i == 5 || i == 6)))
- continue;
- data->in[i] =
- w83627hf_read_value(data, W83781D_REG_IN(i));
- data->in_min[i] =
- w83627hf_read_value(data,
- W83781D_REG_IN_MIN(i));
- data->in_max[i] =
- w83627hf_read_value(data,
- W83781D_REG_IN_MAX(i));
- }
- for (i = 0; i <= 2; i++) {
- data->fan[i] =
- w83627hf_read_value(data, W83627HF_REG_FAN(i));
- data->fan_min[i] =
- w83627hf_read_value(data,
- W83627HF_REG_FAN_MIN(i));
- }
- for (i = 0; i <= 2; i++) {
- u8 tmp = w83627hf_read_value(data,
- W836X7HF_REG_PWM(data->type, i));
- /* bits 0-3 are reserved in 627THF */
- if (data->type == w83627thf)
- tmp &= 0xf0;
- data->pwm[i] = tmp;
- if (i == 1 &&
- (data->type == w83627hf || data->type == w83697hf))
- break;
- }
- if (data->type == w83627hf) {
- u8 tmp = w83627hf_read_value(data,
- W83627HF_REG_PWM_FREQ);
- data->pwm_freq[0] = tmp & 0x07;
- data->pwm_freq[1] = (tmp >> 4) & 0x07;
- } else if (data->type != w83627thf) {
- for (i = 1; i <= 3; i++) {
- data->pwm_freq[i - 1] =
- w83627hf_read_value(data,
- W83637HF_REG_PWM_FREQ[i - 1]);
- if (i == 2 && (data->type == w83697hf))
- break;
- }
- }
- if (data->type != w83627hf) {
- for (i = 0; i < num_pwms; i++) {
- u8 tmp = w83627hf_read_value(data,
- W83627THF_REG_PWM_ENABLE[i]);
- data->pwm_enable[i] =
- ((tmp >> W83627THF_PWM_ENABLE_SHIFT[i])
- & 0x03) + 1;
- }
- }
- for (i = 0; i < num_temps; i++) {
- data->temp[i] = w83627hf_read_value(
- data, w83627hf_reg_temp[i]);
- data->temp_max[i] = w83627hf_read_value(
- data, w83627hf_reg_temp_over[i]);
- data->temp_max_hyst[i] = w83627hf_read_value(
- data, w83627hf_reg_temp_hyst[i]);
- }
-
- w83627hf_update_fan_div(data);
-
- data->alarms =
- w83627hf_read_value(data, W83781D_REG_ALARM1) |
- (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) |
- (w83627hf_read_value(data, W83781D_REG_ALARM3) << 16);
- i = w83627hf_read_value(data, W83781D_REG_BEEP_INTS2);
- data->beep_mask = (i << 8) |
- w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
- w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
- data->last_updated = jiffies;
- data->valid = true;
- }
-
- mutex_unlock(&data->update_lock);
-
- return data;
+ return err;
}
static int __init w83627hf_device_add(unsigned short address,
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index b3579721265f..dacabf25e83f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1171,7 +1171,7 @@ w83781d_detect(struct i2c_client *client, struct i2c_board_info *info)
if (isa)
mutex_unlock(&isa->update_lock);
- strlcpy(info->type, client_name, I2C_NAME_SIZE);
+ strscpy(info->type, client_name, I2C_NAME_SIZE);
return 0;
@@ -1239,7 +1239,7 @@ static int w83781d_probe(struct i2c_client *client)
return err;
}
-static int
+static void
w83781d_remove(struct i2c_client *client)
{
struct w83781d_data *data = i2c_get_clientdata(client);
@@ -1250,8 +1250,6 @@ w83781d_remove(struct i2c_client *client)
i2c_unregister_device(data->lm75[0]);
i2c_unregister_device(data->lm75[1]);
-
- return 0;
}
static int
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 80a9a78d7ce9..eaf691365023 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -315,7 +315,7 @@ struct w83791d_data {
static int w83791d_probe(struct i2c_client *client);
static int w83791d_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83791d_remove(struct i2c_client *client);
+static void w83791d_remove(struct i2c_client *client);
static int w83791d_read(struct i2c_client *client, u8 reg);
static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
@@ -1333,7 +1333,7 @@ static int w83791d_detect(struct i2c_client *client,
if (val1 != 0x71 || val2 != 0x5c)
return -ENODEV;
- strlcpy(info->type, "w83791d", I2C_NAME_SIZE);
+ strscpy(info->type, "w83791d", I2C_NAME_SIZE);
return 0;
}
@@ -1405,14 +1405,12 @@ error4:
return err;
}
-static int w83791d_remove(struct i2c_client *client)
+static void w83791d_remove(struct i2c_client *client)
{
struct w83791d_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
-
- return 0;
}
static void w83791d_init_client(struct i2c_client *client)
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 31a1cdc30877..6d160eee1446 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -286,7 +286,7 @@ struct w83792d_data {
static int w83792d_probe(struct i2c_client *client);
static int w83792d_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83792d_remove(struct i2c_client *client);
+static void w83792d_remove(struct i2c_client *client);
static struct w83792d_data *w83792d_update_device(struct device *dev);
#ifdef DEBUG
@@ -1346,7 +1346,7 @@ w83792d_detect(struct i2c_client *client, struct i2c_board_info *info)
if (val1 != 0x7a || val2 != 0x5c)
return -ENODEV;
- strlcpy(info->type, "w83792d", I2C_NAME_SIZE);
+ strscpy(info->type, "w83792d", I2C_NAME_SIZE);
return 0;
}
@@ -1429,7 +1429,7 @@ exit_remove_files:
return err;
}
-static int
+static void
w83792d_remove(struct i2c_client *client)
{
struct w83792d_data *data = i2c_get_clientdata(client);
@@ -1440,8 +1440,6 @@ w83792d_remove(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
-
- return 0;
}
static void
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0a65d164c8f0..a4926d907198 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -285,7 +285,7 @@ static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83793_remove(struct i2c_client *client);
+static void w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
@@ -1495,7 +1495,7 @@ static struct notifier_block watchdog_notifier = {
* Init / remove routines
*/
-static int w83793_remove(struct i2c_client *client)
+static void w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
@@ -1554,8 +1554,6 @@ static int w83793_remove(struct i2c_client *client)
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
-
- return 0;
}
static int
@@ -1636,7 +1634,7 @@ static int w83793_detect(struct i2c_client *client,
if (chip_id != 0x7b)
return -ENODEV;
- strlcpy(info->type, "w83793", I2C_NAME_SIZE);
+ strscpy(info->type, "w83793", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 45b12c4287df..84ff5c57e98c 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1967,7 +1967,7 @@ static int w83795_detect(struct i2c_client *client,
else
chip_name = "w83795g";
- strlcpy(info->type, chip_name, I2C_NAME_SIZE);
+ strscpy(info->type, chip_name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Found %s rev. %c at 0x%02hx\n", chip_name,
'A' + (device_id & 0xf), address);
@@ -2235,14 +2235,12 @@ exit_remove:
return err;
}
-static int w83795_remove(struct i2c_client *client)
+static void w83795_remove(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
w83795_handle_files(&client->dev, device_remove_file_wrapper);
-
- return 0;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index a41f989d66e2..f3622de0d96f 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -65,7 +65,7 @@ static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
static int w83l785ts_probe(struct i2c_client *client);
static int w83l785ts_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int w83l785ts_remove(struct i2c_client *client);
+static void w83l785ts_remove(struct i2c_client *client);
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval);
static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
@@ -157,7 +157,7 @@ static int w83l785ts_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, "w83l785ts", I2C_NAME_SIZE);
+ strscpy(info->type, "w83l785ts", I2C_NAME_SIZE);
return 0;
}
@@ -203,7 +203,7 @@ exit_remove:
return err;
}
-static int w83l785ts_remove(struct i2c_client *client)
+static void w83l785ts_remove(struct i2c_client *client)
{
struct w83l785ts_data *data = i2c_get_clientdata(client);
@@ -212,8 +212,6 @@ static int w83l785ts_remove(struct i2c_client *client)
&sensor_dev_attr_temp1_input.dev_attr);
device_remove_file(&client->dev,
&sensor_dev_attr_temp1_max.dev_attr);
-
- return 0;
}
static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 11ba23c1af85..2c4646fa8426 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -687,7 +687,7 @@ w83l786ng_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
- strlcpy(info->type, "w83l786ng", I2C_NAME_SIZE);
+ strscpy(info->type, "w83l786ng", I2C_NAME_SIZE);
return 0;
}
diff --git a/drivers/hwtracing/Kconfig b/drivers/hwtracing/Kconfig
index 13085835a636..911ee977103c 100644
--- a/drivers/hwtracing/Kconfig
+++ b/drivers/hwtracing/Kconfig
@@ -5,4 +5,6 @@ source "drivers/hwtracing/stm/Kconfig"
source "drivers/hwtracing/intel_th/Kconfig"
+source "drivers/hwtracing/ptt/Kconfig"
+
endmenu
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 514a9b8086e3..45c1eb5dfcb7 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -193,10 +193,10 @@ config CORESIGHT_TRBE
depends on ARM64 && CORESIGHT_SOURCE_ETM4X
help
This driver provides support for percpu Trace Buffer Extension (TRBE).
- TRBE always needs to be used along with it's corresponding percpu ETE
+ TRBE always needs to be used along with its corresponding percpu ETE
component. ETE generates trace data which is then captured with TRBE.
Unlike traditional sink devices, TRBE is a CPU feature accessible via
- system registers. But it's explicit dependency with trace unit (ETE)
+ system registers. But its explicit dependency with trace unit (ETE)
requires it to be plugged in as a coresight sink device.
To compile this driver as a module, choose M here: the module will be
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index e0740c6dbd54..bc90a03f478f 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -365,26 +365,15 @@ static const struct etr_buf_operations etr_catu_buf_ops = {
.get_data = catu_get_data_etr_buf,
};
-coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
-coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
-coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
-coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
-coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
-coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
-coresight_simple_reg64(struct catu_drvdata, sladdr,
- CATU_SLADDRLO, CATU_SLADDRHI);
-coresight_simple_reg64(struct catu_drvdata, inaddr,
- CATU_INADDRLO, CATU_INADDRHI);
-
static struct attribute *catu_mgmt_attrs[] = {
- &dev_attr_devid.attr,
- &dev_attr_control.attr,
- &dev_attr_status.attr,
- &dev_attr_mode.attr,
- &dev_attr_axictrl.attr,
- &dev_attr_irqen.attr,
- &dev_attr_sladdr.attr,
- &dev_attr_inaddr.attr,
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
+ coresight_simple_reg32(control, CATU_CONTROL),
+ coresight_simple_reg32(status, CATU_STATUS),
+ coresight_simple_reg32(mode, CATU_MODE),
+ coresight_simple_reg32(axictrl, CATU_AXICTRL),
+ coresight_simple_reg32(irqen, CATU_IRQEN),
+ coresight_simple_reg64(sladdr, CATU_SLADDRLO, CATU_SLADDRHI),
+ coresight_simple_reg64(inaddr, CATU_INADDRLO, CATU_INADDRHI),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 6160c2d75a56..442e034bbfba 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -70,24 +70,24 @@ struct catu_drvdata {
static inline u32 \
catu_read_##name(struct catu_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, offset, -1); \
+ return csdev_access_relaxed_read32(&drvdata->csdev->access, offset); \
} \
static inline void \
catu_write_##name(struct catu_drvdata *drvdata, u32 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, offset, -1); \
+ csdev_access_relaxed_write32(&drvdata->csdev->access, val, offset); \
}
#define CATU_REG_PAIR(name, lo_off, hi_off) \
static inline u64 \
catu_read_##name(struct catu_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+ return csdev_access_relaxed_read_pair(&drvdata->csdev->access, lo_off, hi_off); \
} \
static inline void \
catu_write_##name(struct catu_drvdata *drvdata, u64 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+ csdev_access_relaxed_write_pair(&drvdata->csdev->access, val, lo_off, hi_off); \
}
CATU_REG32(control, CATU_CONTROL);
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 1edfec1e9d18..d5dbc67bacb4 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -60,6 +60,34 @@ EXPORT_SYMBOL_GPL(coresight_barrier_pkt);
static const struct cti_assoc_op *cti_assoc_ops;
+ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
+
+ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show32);
+
void coresight_set_cti_ops(const struct cti_assoc_op *cti_op)
{
cti_assoc_ops = cti_op;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 7ff7e7780bbf..6d59c815ecf5 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -163,48 +163,82 @@ static struct attribute *coresight_cti_attrs[] = {
/* register based attributes */
-/* macro to access RO registers with power check only (no enable check). */
-#define coresight_cti_reg(name, offset) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- u32 val = 0; \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- val = readl_relaxed(drvdata->base + offset); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return sprintf(buf, "0x%x\n", val); \
-} \
-static DEVICE_ATTR_RO(name)
+/* Read registers with power check only (no enable check). */
+static ssize_t coresight_cti_reg_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
+ u32 val = 0;
-/* coresight management registers */
-coresight_cti_reg(devaff0, CTIDEVAFF0);
-coresight_cti_reg(devaff1, CTIDEVAFF1);
-coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS);
-coresight_cti_reg(devarch, CORESIGHT_DEVARCH);
-coresight_cti_reg(devid, CORESIGHT_DEVID);
-coresight_cti_reg(devtype, CORESIGHT_DEVTYPE);
-coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0);
-coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1);
-coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2);
-coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3);
-coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4);
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ if (drvdata->config.hw_powered)
+ val = readl_relaxed(drvdata->base + cti_attr->off);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put_sync(dev->parent);
+ return sysfs_emit(buf, "0x%x\n", val);
+}
+/* Write registers with power check only (no enable check). */
+static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
+ unsigned long val = 0;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ if (drvdata->config.hw_powered)
+ cti_write_single_reg(drvdata, cti_attr->off, val);
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put_sync(dev->parent);
+ return size;
+}
+
+#define coresight_cti_reg(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_cti_reg_show, NULL), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_cti_reg_rw(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0644, coresight_cti_reg_show, \
+ coresight_cti_reg_store), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_cti_reg_wo(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0200, NULL, coresight_cti_reg_store), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+/* coresight management registers */
static struct attribute *coresight_cti_mgmt_attrs[] = {
- &dev_attr_devaff0.attr,
- &dev_attr_devaff1.attr,
- &dev_attr_authstatus.attr,
- &dev_attr_devarch.attr,
- &dev_attr_devid.attr,
- &dev_attr_devtype.attr,
- &dev_attr_pidr0.attr,
- &dev_attr_pidr1.attr,
- &dev_attr_pidr2.attr,
- &dev_attr_pidr3.attr,
- &dev_attr_pidr4.attr,
+ coresight_cti_reg(devaff0, CTIDEVAFF0),
+ coresight_cti_reg(devaff1, CTIDEVAFF1),
+ coresight_cti_reg(authstatus, CORESIGHT_AUTHSTATUS),
+ coresight_cti_reg(devarch, CORESIGHT_DEVARCH),
+ coresight_cti_reg(devid, CORESIGHT_DEVID),
+ coresight_cti_reg(devtype, CORESIGHT_DEVTYPE),
+ coresight_cti_reg(pidr0, CORESIGHT_PERIPHIDR0),
+ coresight_cti_reg(pidr1, CORESIGHT_PERIPHIDR1),
+ coresight_cti_reg(pidr2, CORESIGHT_PERIPHIDR2),
+ coresight_cti_reg(pidr3, CORESIGHT_PERIPHIDR3),
+ coresight_cti_reg(pidr4, CORESIGHT_PERIPHIDR4),
NULL,
};
@@ -454,86 +488,11 @@ static ssize_t apppulse_store(struct device *dev,
}
static DEVICE_ATTR_WO(apppulse);
-coresight_cti_reg(triginstatus, CTITRIGINSTATUS);
-coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS);
-coresight_cti_reg(chinstatus, CTICHINSTATUS);
-coresight_cti_reg(choutstatus, CTICHOUTSTATUS);
-
/*
* Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
* integration control registers. Normally only used to investigate connection
* data.
*/
-#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
-
-/* macro to access RW registers with power check only (no enable check). */
-#define coresight_cti_reg_rw(name, offset) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- u32 val = 0; \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- val = readl_relaxed(drvdata->base + offset); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return sprintf(buf, "0x%x\n", val); \
-} \
- \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- unsigned long val = 0; \
- if (kstrtoul(buf, 0, &val)) \
- return -EINVAL; \
- \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- cti_write_single_reg(drvdata, offset, val); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return size; \
-} \
-static DEVICE_ATTR_RW(name)
-
-/* macro to access WO registers with power check only (no enable check). */
-#define coresight_cti_reg_wo(name, offset) \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); \
- unsigned long val = 0; \
- if (kstrtoul(buf, 0, &val)) \
- return -EINVAL; \
- \
- pm_runtime_get_sync(dev->parent); \
- spin_lock(&drvdata->spinlock); \
- if (drvdata->config.hw_powered) \
- cti_write_single_reg(drvdata, offset, val); \
- spin_unlock(&drvdata->spinlock); \
- pm_runtime_put_sync(dev->parent); \
- return size; \
-} \
-static DEVICE_ATTR_WO(name)
-
-coresight_cti_reg_rw(itchout, ITCHOUT);
-coresight_cti_reg_rw(ittrigout, ITTRIGOUT);
-coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL);
-coresight_cti_reg_wo(itchinack, ITCHINACK);
-coresight_cti_reg_wo(ittriginack, ITTRIGINACK);
-coresight_cti_reg(ittrigin, ITTRIGIN);
-coresight_cti_reg(itchin, ITCHIN);
-coresight_cti_reg(itchoutack, ITCHOUTACK);
-coresight_cti_reg(ittrigoutack, ITTRIGOUTACK);
-
-#endif /* CORESIGHT_CTI_INTEGRATION_REGS */
-
static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_inout_sel.attr,
&dev_attr_inen.attr,
@@ -544,20 +503,20 @@ static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_appset.attr,
&dev_attr_appclear.attr,
&dev_attr_apppulse.attr,
- &dev_attr_triginstatus.attr,
- &dev_attr_trigoutstatus.attr,
- &dev_attr_chinstatus.attr,
- &dev_attr_choutstatus.attr,
+ coresight_cti_reg(triginstatus, CTITRIGINSTATUS),
+ coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS),
+ coresight_cti_reg(chinstatus, CTICHINSTATUS),
+ coresight_cti_reg(choutstatus, CTICHOUTSTATUS),
#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
- &dev_attr_itctrl.attr,
- &dev_attr_ittrigin.attr,
- &dev_attr_itchin.attr,
- &dev_attr_ittrigout.attr,
- &dev_attr_itchout.attr,
- &dev_attr_itchoutack.attr,
- &dev_attr_ittrigoutack.attr,
- &dev_attr_ittriginack.attr,
- &dev_attr_itchinack.attr,
+ coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL),
+ coresight_cti_reg(ittrigin, ITTRIGIN),
+ coresight_cti_reg(itchin, ITCHIN),
+ coresight_cti_reg_rw(ittrigout, ITTRIGOUT),
+ coresight_cti_reg_rw(itchout, ITCHOUT),
+ coresight_cti_reg(itchoutack, ITCHOUTACK),
+ coresight_cti_reg(ittrigoutack, ITTRIGOUTACK),
+ coresight_cti_reg_wo(ittriginack, ITTRIGINACK),
+ coresight_cti_reg_wo(itchinack, ITCHINACK),
#endif
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index efa39820acec..8aa6e4f83e42 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -655,27 +655,15 @@ static const struct file_operations etb_fops = {
.llseek = no_llseek,
};
-#define coresight_etb10_reg(name, offset) \
- coresight_simple_reg32(struct etb_drvdata, name, offset)
-
-coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
-coresight_etb10_reg(sts, ETB_STATUS_REG);
-coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
-coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
-coresight_etb10_reg(trg, ETB_TRG);
-coresight_etb10_reg(ctl, ETB_CTL_REG);
-coresight_etb10_reg(ffsr, ETB_FFSR);
-coresight_etb10_reg(ffcr, ETB_FFCR);
-
static struct attribute *coresight_etb_mgmt_attrs[] = {
- &dev_attr_rdp.attr,
- &dev_attr_sts.attr,
- &dev_attr_rrp.attr,
- &dev_attr_rwp.attr,
- &dev_attr_trg.attr,
- &dev_attr_ctl.attr,
- &dev_attr_ffsr.attr,
- &dev_attr_ffcr.attr,
+ coresight_simple_reg32(rdp, ETB_RAM_DEPTH_REG),
+ coresight_simple_reg32(sts, ETB_STATUS_REG),
+ coresight_simple_reg32(rrp, ETB_RAM_READ_POINTER),
+ coresight_simple_reg32(rwp, ETB_RAM_WRITE_POINTER),
+ coresight_simple_reg32(trg, ETB_TRG),
+ coresight_simple_reg32(ctl, ETB_CTL_REG),
+ coresight_simple_reg32(ffsr, ETB_FFSR),
+ coresight_simple_reg32(ffcr, ETB_FFCR),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 68fcbf4ce7a8..fd81eca3ec18 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1252,31 +1252,17 @@ static struct attribute *coresight_etm_attrs[] = {
NULL,
};
-#define coresight_etm3x_reg(name, offset) \
- coresight_simple_reg32(struct etm_drvdata, name, offset)
-
-coresight_etm3x_reg(etmccr, ETMCCR);
-coresight_etm3x_reg(etmccer, ETMCCER);
-coresight_etm3x_reg(etmscr, ETMSCR);
-coresight_etm3x_reg(etmidr, ETMIDR);
-coresight_etm3x_reg(etmcr, ETMCR);
-coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
-coresight_etm3x_reg(etmteevr, ETMTEEVR);
-coresight_etm3x_reg(etmtssvr, ETMTSSCR);
-coresight_etm3x_reg(etmtecr1, ETMTECR1);
-coresight_etm3x_reg(etmtecr2, ETMTECR2);
-
static struct attribute *coresight_etm_mgmt_attrs[] = {
- &dev_attr_etmccr.attr,
- &dev_attr_etmccer.attr,
- &dev_attr_etmscr.attr,
- &dev_attr_etmidr.attr,
- &dev_attr_etmcr.attr,
- &dev_attr_etmtraceidr.attr,
- &dev_attr_etmteevr.attr,
- &dev_attr_etmtssvr.attr,
- &dev_attr_etmtecr1.attr,
- &dev_attr_etmtecr2.attr,
+ coresight_simple_reg32(etmccr, ETMCCR),
+ coresight_simple_reg32(etmccer, ETMCCER),
+ coresight_simple_reg32(etmscr, ETMSCR),
+ coresight_simple_reg32(etmidr, ETMIDR),
+ coresight_simple_reg32(etmcr, ETMCR),
+ coresight_simple_reg32(etmtraceidr, ETMTRACEIDR),
+ coresight_simple_reg32(etmteevr, ETMTEEVR),
+ coresight_simple_reg32(etmtssvr, ETMTSSCR),
+ coresight_simple_reg32(etmtecr1, ETMTECR1),
+ coresight_simple_reg32(etmtecr2, ETMTECR2),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index d39660a3e50c..80fefaba58ee 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -966,7 +966,7 @@ static inline bool cpu_supports_sysreg_trace(void)
{
u64 dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
- return ((dfr0 >> ID_AA64DFR0_TRACEVER_SHIFT) & 0xfUL) > 0;
+ return ((dfr0 >> ID_AA64DFR0_EL1_TraceVer_SHIFT) & 0xfUL) > 0;
}
static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
@@ -1054,7 +1054,7 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
u64 trfcr;
drvdata->trfcr = 0;
- if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
+ if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceFilt_SHIFT))
return;
/*
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 6ea8181816fc..9cac848cffaf 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2306,6 +2306,34 @@ static ssize_t cpu_show(struct device *dev,
}
static DEVICE_ATTR_RO(cpu);
+static ssize_t ts_source_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (!drvdata->trfcr) {
+ val = -1;
+ goto out;
+ }
+
+ switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) {
+ case TRFCR_ELx_TS_VIRTUAL:
+ case TRFCR_ELx_TS_GUEST_PHYSICAL:
+ case TRFCR_ELx_TS_PHYSICAL:
+ val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr);
+ break;
+ default:
+ val = -1;
+ break;
+ }
+
+out:
+ return sysfs_emit(buf, "%d\n", val);
+}
+static DEVICE_ATTR_RO(ts_source);
+
static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_nr_pe_cmp.attr,
&dev_attr_nr_addr_cmp.attr,
@@ -2360,6 +2388,7 @@ static struct attribute *coresight_etmv4_attrs[] = {
&dev_attr_vmid_val.attr,
&dev_attr_vmid_masks.attr,
&dev_attr_cpu.attr,
+ &dev_attr_ts_source.attr,
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index ff1dd2092ac5..595ce5862056 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -39,32 +39,37 @@
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+struct cs_pair_attribute {
+ struct device_attribute attr;
+ u32 lo_off;
+ u32 hi_off;
+};
+
+struct cs_off_attribute {
+ struct device_attribute attr;
+ u32 off;
+};
-typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
-#define __coresight_simple_func(type, func, name, lo_off, hi_off) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- type *drvdata = dev_get_drvdata(_dev->parent); \
- coresight_read_fn fn = func; \
- u64 val; \
- pm_runtime_get_sync(_dev->parent); \
- if (fn) \
- val = (u64)fn(_dev->parent, lo_off); \
- else \
- val = coresight_read_reg_pair(drvdata->base, \
- lo_off, hi_off); \
- pm_runtime_put_sync(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val); \
-} \
-static DEVICE_ATTR_RO(name)
-
-#define coresight_simple_func(type, func, name, offset) \
- __coresight_simple_func(type, func, name, offset, -1)
-#define coresight_simple_reg32(type, name, offset) \
- __coresight_simple_func(type, NULL, name, offset, -1)
-#define coresight_simple_reg64(type, name, lo_off, hi_off) \
- __coresight_simple_func(type, NULL, name, lo_off, hi_off)
+extern ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf);
+
+#define coresight_simple_reg32(name, offset) \
+ (&((struct cs_off_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_simple_show32, NULL), \
+ offset \
+ } \
+ })[0].attr.attr)
+
+#define coresight_simple_reg64(name, lo_off, hi_off) \
+ (&((struct cs_pair_attribute[]) { \
+ { \
+ __ATTR(name, 0444, coresight_simple_show_pair, NULL), \
+ lo_off, hi_off \
+ } \
+ })[0].attr.attr)
extern const u32 coresight_barrier_pkt[4];
#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(coresight_barrier_pkt))
@@ -127,25 +132,6 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
-static inline u64
-coresight_read_reg_pair(void __iomem *addr, s32 lo_offset, s32 hi_offset)
-{
- u64 val;
-
- val = readl_relaxed(addr + lo_offset);
- val |= (hi_offset < 0) ? 0 :
- (u64)readl_relaxed(addr + hi_offset) << 32;
- return val;
-}
-
-static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
- s32 lo_offset, s32 hi_offset)
-{
- writel_relaxed((u32)val, addr + lo_offset);
- if (hi_offset >= 0)
- writel_relaxed((u32)(val >> 32), addr + hi_offset);
-}
-
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index b86acbc74cf0..4dd50546d7e4 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -196,15 +196,9 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
-#define coresight_replicator_reg(name, offset) \
- coresight_simple_reg32(struct replicator_drvdata, name, offset)
-
-coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
-coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
-
static struct attribute *replicator_mgmt_attrs[] = {
- &dev_attr_idfilter0.attr,
- &dev_attr_idfilter1.attr,
+ coresight_simple_reg32(idfilter0, REPLICATOR_IDFILTER0),
+ coresight_simple_reg32(idfilter1, REPLICATOR_IDFILTER1),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index bb14a3a8a921..463f449cfb79 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -634,22 +634,6 @@ static ssize_t traceid_store(struct device *dev,
}
static DEVICE_ATTR_RW(traceid);
-#define coresight_stm_reg(name, offset) \
- coresight_simple_reg32(struct stm_drvdata, name, offset)
-
-coresight_stm_reg(tcsr, STMTCSR);
-coresight_stm_reg(tsfreqr, STMTSFREQR);
-coresight_stm_reg(syncr, STMSYNCR);
-coresight_stm_reg(sper, STMSPER);
-coresight_stm_reg(spter, STMSPTER);
-coresight_stm_reg(privmaskr, STMPRIVMASKR);
-coresight_stm_reg(spscr, STMSPSCR);
-coresight_stm_reg(spmscr, STMSPMSCR);
-coresight_stm_reg(spfeat1r, STMSPFEAT1R);
-coresight_stm_reg(spfeat2r, STMSPFEAT2R);
-coresight_stm_reg(spfeat3r, STMSPFEAT3R);
-coresight_stm_reg(devid, CORESIGHT_DEVID);
-
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
&dev_attr_hwevent_select.attr,
@@ -660,18 +644,18 @@ static struct attribute *coresight_stm_attrs[] = {
};
static struct attribute *coresight_stm_mgmt_attrs[] = {
- &dev_attr_tcsr.attr,
- &dev_attr_tsfreqr.attr,
- &dev_attr_syncr.attr,
- &dev_attr_sper.attr,
- &dev_attr_spter.attr,
- &dev_attr_privmaskr.attr,
- &dev_attr_spscr.attr,
- &dev_attr_spmscr.attr,
- &dev_attr_spfeat1r.attr,
- &dev_attr_spfeat2r.attr,
- &dev_attr_spfeat3r.attr,
- &dev_attr_devid.attr,
+ coresight_simple_reg32(tcsr, STMTCSR),
+ coresight_simple_reg32(tsfreqr, STMTSFREQR),
+ coresight_simple_reg32(syncr, STMSYNCR),
+ coresight_simple_reg32(sper, STMSPER),
+ coresight_simple_reg32(spter, STMSPTER),
+ coresight_simple_reg32(privmaskr, STMPRIVMASKR),
+ coresight_simple_reg32(spscr, STMSPSCR),
+ coresight_simple_reg32(spmscr, STMSPMSCR),
+ coresight_simple_reg32(spfeat1r, STMSPFEAT1R),
+ coresight_simple_reg32(spfeat2r, STMSPFEAT2R),
+ coresight_simple_reg32(spfeat3r, STMSPFEAT3R),
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index d0276af82494..07abf28ad725 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -251,41 +251,21 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
return memwidth;
}
-#define coresight_tmc_reg(name, offset) \
- coresight_simple_reg32(struct tmc_drvdata, name, offset)
-#define coresight_tmc_reg64(name, lo_off, hi_off) \
- coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
-
-coresight_tmc_reg(rsz, TMC_RSZ);
-coresight_tmc_reg(sts, TMC_STS);
-coresight_tmc_reg(trg, TMC_TRG);
-coresight_tmc_reg(ctl, TMC_CTL);
-coresight_tmc_reg(ffsr, TMC_FFSR);
-coresight_tmc_reg(ffcr, TMC_FFCR);
-coresight_tmc_reg(mode, TMC_MODE);
-coresight_tmc_reg(pscr, TMC_PSCR);
-coresight_tmc_reg(axictl, TMC_AXICTL);
-coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
-coresight_tmc_reg(devid, CORESIGHT_DEVID);
-coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
-coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
-coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
-
static struct attribute *coresight_tmc_mgmt_attrs[] = {
- &dev_attr_rsz.attr,
- &dev_attr_sts.attr,
- &dev_attr_rrp.attr,
- &dev_attr_rwp.attr,
- &dev_attr_trg.attr,
- &dev_attr_ctl.attr,
- &dev_attr_ffsr.attr,
- &dev_attr_ffcr.attr,
- &dev_attr_mode.attr,
- &dev_attr_pscr.attr,
- &dev_attr_devid.attr,
- &dev_attr_dba.attr,
- &dev_attr_axictl.attr,
- &dev_attr_authstatus.attr,
+ coresight_simple_reg32(rsz, TMC_RSZ),
+ coresight_simple_reg32(sts, TMC_STS),
+ coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
+ coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
+ coresight_simple_reg32(trg, TMC_TRG),
+ coresight_simple_reg32(ctl, TMC_CTL),
+ coresight_simple_reg32(ffsr, TMC_FFSR),
+ coresight_simple_reg32(ffcr, TMC_FFCR),
+ coresight_simple_reg32(mode, TMC_MODE),
+ coresight_simple_reg32(pscr, TMC_PSCR),
+ coresight_simple_reg32(devid, CORESIGHT_DEVID),
+ coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
+ coresight_simple_reg32(axictl, TMC_AXICTL),
+ coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6bec20a392b3..66959557cf39 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -282,12 +282,12 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
static inline u64 \
tmc_read_##name(struct tmc_drvdata *drvdata) \
{ \
- return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+ return csdev_access_relaxed_read_pair(&drvdata->csdev->access, lo_off, hi_off); \
} \
static inline void \
tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
{ \
- coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+ csdev_access_relaxed_write_pair(&drvdata->csdev->access, val, lo_off, hi_off); \
}
TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
index 30e4d7db4f8e..98ff1b17ad07 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.h
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -20,7 +20,8 @@
static inline bool is_trbe_available(void)
{
u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
- unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
+ unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0,
+ ID_AA64DFR0_EL1_TraceBuffer_SHIFT);
return trbe >= 0b0001;
}
diff --git a/drivers/hwtracing/ptt/Kconfig b/drivers/hwtracing/ptt/Kconfig
new file mode 100644
index 000000000000..6d46a09ffeb9
--- /dev/null
+++ b/drivers/hwtracing/ptt/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HISI_PTT
+ tristate "HiSilicon PCIe Tune and Trace Device"
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
+ depends on PCI && HAS_DMA && HAS_IOMEM && PERF_EVENTS
+ help
+ HiSilicon PCIe Tune and Trace device exists as a PCIe RCiEP
+ device, and it provides support for PCIe traffic tuning and
+ tracing TLP headers to the memory.
+
+ This driver can also be built as a module. If so, the module
+ will be called hisi_ptt.
diff --git a/drivers/hwtracing/ptt/Makefile b/drivers/hwtracing/ptt/Makefile
new file mode 100644
index 000000000000..908c09a98161
--- /dev/null
+++ b/drivers/hwtracing/ptt/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_HISI_PTT) += hisi_ptt.o
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
new file mode 100644
index 000000000000..5d5526aa60c4
--- /dev/null
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon PCIe tune and trace device
+ *
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+
+#include "hisi_ptt.h"
+
+/* Dynamic CPU hotplug state used by PTT */
+static enum cpuhp_state hisi_ptt_pmu_online;
+
+static bool hisi_ptt_wait_tuning_finish(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ return !readl_poll_timeout(hisi_ptt->iobase + HISI_PTT_TUNING_INT_STAT,
+ val, !(val & HISI_PTT_TUNING_INT_STAT_MASK),
+ HISI_PTT_WAIT_POLL_INTERVAL_US,
+ HISI_PTT_WAIT_TUNE_TIMEOUT_US);
+}
+
+static ssize_t hisi_ptt_tune_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ struct dev_ext_attribute *ext_attr;
+ struct hisi_ptt_tune_desc *desc;
+ u32 reg;
+ u16 val;
+
+ ext_attr = container_of(attr, struct dev_ext_attribute, attr);
+ desc = ext_attr->var;
+
+ mutex_lock(&hisi_ptt->tune_lock);
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
+ reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
+ desc->event_code);
+ writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+
+ /* Write all 1 to indicates it's the read process */
+ writel(~0U, hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+
+ if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return -ETIMEDOUT;
+ }
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+ reg &= HISI_PTT_TUNING_DATA_VAL_MASK;
+ val = FIELD_GET(HISI_PTT_TUNING_DATA_VAL_MASK, reg);
+
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t hisi_ptt_tune_attr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ struct dev_ext_attribute *ext_attr;
+ struct hisi_ptt_tune_desc *desc;
+ u32 reg;
+ u16 val;
+
+ ext_attr = container_of(attr, struct dev_ext_attribute, attr);
+ desc = ext_attr->var;
+
+ if (kstrtou16(buf, 10, &val))
+ return -EINVAL;
+
+ mutex_lock(&hisi_ptt->tune_lock);
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ reg &= ~(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB);
+ reg |= FIELD_PREP(HISI_PTT_TUNING_CTRL_CODE | HISI_PTT_TUNING_CTRL_SUB,
+ desc->event_code);
+ writel(reg, hisi_ptt->iobase + HISI_PTT_TUNING_CTRL);
+ writel(FIELD_PREP(HISI_PTT_TUNING_DATA_VAL_MASK, val),
+ hisi_ptt->iobase + HISI_PTT_TUNING_DATA);
+
+ if (!hisi_ptt_wait_tuning_finish(hisi_ptt)) {
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&hisi_ptt->tune_lock);
+ return count;
+}
+
+#define HISI_PTT_TUNE_ATTR(_name, _val, _show, _store) \
+ static struct hisi_ptt_tune_desc _name##_desc = { \
+ .name = #_name, \
+ .event_code = (_val), \
+ }; \
+ static struct dev_ext_attribute hisi_ptt_##_name##_attr = { \
+ .attr = __ATTR(_name, 0600, _show, _store), \
+ .var = &_name##_desc, \
+ }
+
+#define HISI_PTT_TUNE_ATTR_COMMON(_name, _val) \
+ HISI_PTT_TUNE_ATTR(_name, _val, \
+ hisi_ptt_tune_attr_show, \
+ hisi_ptt_tune_attr_store)
+
+/*
+ * The value of the tuning event are composed of two parts: main event code
+ * in BIT[0,15] and subevent code in BIT[16,23]. For example, qox_tx_cpl is
+ * a subevent of 'Tx path QoS control' which for tuning the weight of Tx
+ * completion TLPs. See hisi_ptt.rst documentation for more information.
+ */
+#define HISI_PTT_TUNE_QOS_TX_CPL (0x4 | (3 << 16))
+#define HISI_PTT_TUNE_QOS_TX_NP (0x4 | (4 << 16))
+#define HISI_PTT_TUNE_QOS_TX_P (0x4 | (5 << 16))
+#define HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL (0x5 | (6 << 16))
+#define HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL (0x5 | (7 << 16))
+
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_cpl, HISI_PTT_TUNE_QOS_TX_CPL);
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_np, HISI_PTT_TUNE_QOS_TX_NP);
+HISI_PTT_TUNE_ATTR_COMMON(qos_tx_p, HISI_PTT_TUNE_QOS_TX_P);
+HISI_PTT_TUNE_ATTR_COMMON(rx_alloc_buf_level, HISI_PTT_TUNE_RX_ALLOC_BUF_LEVEL);
+HISI_PTT_TUNE_ATTR_COMMON(tx_alloc_buf_level, HISI_PTT_TUNE_TX_ALLOC_BUF_LEVEL);
+
+static struct attribute *hisi_ptt_tune_attrs[] = {
+ &hisi_ptt_qos_tx_cpl_attr.attr.attr,
+ &hisi_ptt_qos_tx_np_attr.attr.attr,
+ &hisi_ptt_qos_tx_p_attr.attr.attr,
+ &hisi_ptt_rx_alloc_buf_level_attr.attr.attr,
+ &hisi_ptt_tx_alloc_buf_level_attr.attr.attr,
+ NULL,
+};
+
+static struct attribute_group hisi_ptt_tune_group = {
+ .name = "tune",
+ .attrs = hisi_ptt_tune_attrs,
+};
+
+static u16 hisi_ptt_get_filter_val(u16 devid, bool is_port)
+{
+ if (is_port)
+ return BIT(HISI_PCIE_CORE_PORT_ID(devid & 0xff));
+
+ return devid;
+}
+
+static bool hisi_ptt_wait_trace_hw_idle(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_STS,
+ val, val & HISI_PTT_TRACE_IDLE,
+ HISI_PTT_WAIT_POLL_INTERVAL_US,
+ HISI_PTT_WAIT_TRACE_TIMEOUT_US);
+}
+
+static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS,
+ val, !val, HISI_PTT_RESET_POLL_INTERVAL_US,
+ HISI_PTT_RESET_TIMEOUT_US);
+}
+
+static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt)
+{
+ writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ hisi_ptt->trace_ctrl.started = false;
+}
+
+static int hisi_ptt_trace_start(struct hisi_ptt *hisi_ptt)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ u32 val;
+ int i;
+
+ /* Check device idle before start trace */
+ if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) {
+ pci_err(hisi_ptt->pdev, "Failed to start trace, the device is still busy\n");
+ return -EBUSY;
+ }
+
+ ctrl->started = true;
+
+ /* Reset the DMA before start tracing */
+ val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ val |= HISI_PTT_TRACE_CTRL_RST;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ hisi_ptt_wait_dma_reset_done(hisi_ptt);
+
+ val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+ val &= ~HISI_PTT_TRACE_CTRL_RST;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ /* Reset the index of current buffer */
+ hisi_ptt->trace_ctrl.buf_index = 0;
+
+ /* Zero the trace buffers */
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++)
+ memset(ctrl->trace_buf[i].addr, 0, HISI_PTT_TRACE_BUF_SIZE);
+
+ /* Clear the interrupt status */
+ writel(HISI_PTT_TRACE_INT_STAT_MASK, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+ writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK);
+
+ /* Set the trace control register */
+ val = FIELD_PREP(HISI_PTT_TRACE_CTRL_TYPE_SEL, ctrl->type);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_RXTX_SEL, ctrl->direction);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_DATA_FORMAT, ctrl->format);
+ val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_TARGET_SEL, hisi_ptt->trace_ctrl.filter);
+ if (!hisi_ptt->trace_ctrl.is_port)
+ val |= HISI_PTT_TRACE_CTRL_FILTER_MODE;
+
+ /* Start the Trace */
+ val |= HISI_PTT_TRACE_CTRL_EN;
+ writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL);
+
+ return 0;
+}
+
+static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ struct perf_output_handle *handle = &ctrl->handle;
+ struct perf_event *event = handle->event;
+ struct hisi_ptt_pmu_buf *buf;
+ size_t size;
+ void *addr;
+
+ buf = perf_get_aux(handle);
+ if (!buf || !handle->size)
+ return -EINVAL;
+
+ addr = ctrl->trace_buf[ctrl->buf_index].addr;
+
+ /*
+ * If we're going to stop, read the size of already traced data from
+ * HISI_PTT_TRACE_WR_STS. Otherwise we're coming from the interrupt,
+ * the data size is always HISI_PTT_TRACE_BUF_SIZE.
+ */
+ if (stop) {
+ u32 reg;
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS);
+ size = FIELD_GET(HISI_PTT_TRACE_WR_STS_WRITE, reg);
+ } else {
+ size = HISI_PTT_TRACE_BUF_SIZE;
+ }
+
+ memcpy(buf->base + buf->pos, addr, size);
+ buf->pos += size;
+
+ /*
+ * Just commit the traced data if we're going to stop. Otherwise if the
+ * resident AUX buffer cannot contain the data of next trace buffer,
+ * apply a new one.
+ */
+ if (stop) {
+ perf_aux_output_end(handle, buf->pos);
+ } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
+ perf_aux_output_end(handle, buf->pos);
+
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf)
+ return -EINVAL;
+
+ buf->pos = handle->head % buf->length;
+ if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) {
+ perf_aux_output_end(handle, 0);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static irqreturn_t hisi_ptt_isr(int irq, void *context)
+{
+ struct hisi_ptt *hisi_ptt = context;
+ u32 status, buf_idx;
+
+ status = readl(hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+ if (!(status & HISI_PTT_TRACE_INT_STAT_MASK))
+ return IRQ_NONE;
+
+ buf_idx = ffs(status) - 1;
+
+ /* Clear the interrupt status of buffer @buf_idx */
+ writel(status, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT);
+
+ /*
+ * Update the AUX buffer and cache the current buffer index,
+ * as we need to know this and save the data when the trace
+ * is ended out of the interrupt handler. End the trace
+ * if the updating fails.
+ */
+ if (hisi_ptt_update_aux(hisi_ptt, buf_idx, false))
+ hisi_ptt_trace_end(hisi_ptt);
+ else
+ hisi_ptt->trace_ctrl.buf_index = (buf_idx + 1) % HISI_PTT_TRACE_BUF_CNT;
+
+ return IRQ_HANDLED;
+}
+
+static void hisi_ptt_irq_free_vectors(void *pdev)
+{
+ pci_free_irq_vectors(pdev);
+}
+
+static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+{
+ struct pci_dev *pdev = hisi_ptt->pdev;
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (ret < 0) {
+ pci_err(pdev, "failed to allocate irq vector, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_irq_free_vectors, pdev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ),
+ NULL, hisi_ptt_isr, 0,
+ DRV_NAME, hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to request irq %d, ret = %d\n",
+ pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
+{
+ struct hisi_ptt_filter_desc *filter;
+ struct hisi_ptt *hisi_ptt = data;
+
+ /*
+ * We won't fail the probe if filter allocation failed here. The filters
+ * should be partial initialized and users would know which filter fails
+ * through the log. Other functions of PTT device are still available.
+ */
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter) {
+ pci_err(hisi_ptt->pdev, "failed to add filter %s\n", pci_name(pdev));
+ return -ENOMEM;
+ }
+
+ filter->devid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ filter->is_port = true;
+ list_add_tail(&filter->list, &hisi_ptt->port_filters);
+
+ /* Update the available port mask */
+ hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true);
+ } else {
+ list_add_tail(&filter->list, &hisi_ptt->req_filters);
+ }
+
+ return 0;
+}
+
+static void hisi_ptt_release_filters(void *data)
+{
+ struct hisi_ptt_filter_desc *filter, *tmp;
+ struct hisi_ptt *hisi_ptt = data;
+
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list) {
+ list_del(&filter->list);
+ kfree(filter);
+ }
+
+ list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list) {
+ list_del(&filter->list);
+ kfree(filter);
+ }
+}
+
+static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ struct device *dev = &hisi_ptt->pdev->dev;
+ int i;
+
+ ctrl->trace_buf = devm_kcalloc(dev, HISI_PTT_TRACE_BUF_CNT,
+ sizeof(*ctrl->trace_buf), GFP_KERNEL);
+ if (!ctrl->trace_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; ++i) {
+ ctrl->trace_buf[i].addr = dmam_alloc_coherent(dev, HISI_PTT_TRACE_BUF_SIZE,
+ &ctrl->trace_buf[i].dma,
+ GFP_KERNEL);
+ if (!ctrl->trace_buf[i].addr)
+ return -ENOMEM;
+ }
+
+ /* Configure the trace DMA buffer */
+ for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) {
+ writel(lower_32_bits(ctrl->trace_buf[i].dma),
+ hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_LO_0 +
+ i * HISI_PTT_TRACE_ADDR_STRIDE);
+ writel(upper_32_bits(ctrl->trace_buf[i].dma),
+ hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_HI_0 +
+ i * HISI_PTT_TRACE_ADDR_STRIDE);
+ }
+ writel(HISI_PTT_TRACE_BUF_SIZE, hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_SIZE);
+
+ return 0;
+}
+
+static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt)
+{
+ struct pci_dev *pdev = hisi_ptt->pdev;
+ struct pci_bus *bus;
+ int ret;
+ u32 reg;
+
+ INIT_LIST_HEAD(&hisi_ptt->port_filters);
+ INIT_LIST_HEAD(&hisi_ptt->req_filters);
+
+ ret = hisi_ptt_config_trace_buf(hisi_ptt);
+ if (ret)
+ return ret;
+
+ /*
+ * The device range register provides the information about the root
+ * ports which the RCiEP can control and trace. The RCiEP and the root
+ * ports which it supports are on the same PCIe core, with same domain
+ * number but maybe different bus number. The device range register
+ * will tell us which root ports we can support, Bit[31:16] indicates
+ * the upper BDF numbers of the root port, while Bit[15:0] indicates
+ * the lower.
+ */
+ reg = readl(hisi_ptt->iobase + HISI_PTT_DEVICE_RANGE);
+ hisi_ptt->upper_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_UPPER, reg);
+ hisi_ptt->lower_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_LOWER, reg);
+
+ bus = pci_find_bus(pci_domain_nr(pdev->bus), PCI_BUS_NUM(hisi_ptt->upper_bdf));
+ if (bus)
+ pci_walk_bus(bus, hisi_ptt_init_filters, hisi_ptt);
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_release_filters, hisi_ptt);
+ if (ret)
+ return ret;
+
+ hisi_ptt->trace_ctrl.on_cpu = -1;
+ return 0;
+}
+
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev));
+ const cpumask_t *cpumask = cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *hisi_ptt_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_ptt_cpumask_attr_group = {
+ .attrs = hisi_ptt_cpumask_attrs,
+};
+
+/*
+ * Bit 19 indicates the filter type, 1 for Root Port filter and 0 for Requester
+ * filter. Bit[15:0] indicates the filter value, for Root Port filter it's
+ * a bit mask of desired ports and for Requester filter it's the Requester ID
+ * of the desired PCIe function. Bit[18:16] is reserved for extension.
+ *
+ * See hisi_ptt.rst documentation for detailed information.
+ */
+PMU_FORMAT_ATTR(filter, "config:0-19");
+PMU_FORMAT_ATTR(direction, "config:20-23");
+PMU_FORMAT_ATTR(type, "config:24-31");
+PMU_FORMAT_ATTR(format, "config:32-35");
+
+static struct attribute *hisi_ptt_pmu_format_attrs[] = {
+ &format_attr_filter.attr,
+ &format_attr_direction.attr,
+ &format_attr_type.attr,
+ &format_attr_format.attr,
+ NULL
+};
+
+static struct attribute_group hisi_ptt_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_ptt_pmu_format_attrs,
+};
+
+static const struct attribute_group *hisi_ptt_pmu_groups[] = {
+ &hisi_ptt_cpumask_attr_group,
+ &hisi_ptt_pmu_format_group,
+ &hisi_ptt_tune_group,
+ NULL
+};
+
+static int hisi_ptt_trace_valid_direction(u32 val)
+{
+ /*
+ * The direction values have different effects according to the data
+ * format (specified in the parentheses). TLP set A/B means different
+ * set of TLP types. See hisi_ptt.rst documentation for more details.
+ */
+ static const u32 hisi_ptt_trace_available_direction[] = {
+ 0, /* inbound(4DW) or reserved(8DW) */
+ 1, /* outbound(4DW) */
+ 2, /* {in, out}bound(4DW) or inbound(8DW), TLP set A */
+ 3, /* {in, out}bound(4DW) or inbound(8DW), TLP set B */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_direction); i++) {
+ if (val == hisi_ptt_trace_available_direction[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hisi_ptt_trace_valid_type(u32 val)
+{
+ /* Different types can be set simultaneously */
+ static const u32 hisi_ptt_trace_available_type[] = {
+ 1, /* posted_request */
+ 2, /* non-posted_request */
+ 4, /* completion */
+ };
+ int i;
+
+ if (!val)
+ return -EINVAL;
+
+ /*
+ * Walk the available list and clear the valid bits of
+ * the config. If there is any resident bit after the
+ * walk then the config is invalid.
+ */
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_type); i++)
+ val &= ~hisi_ptt_trace_available_type[i];
+
+ if (val)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hisi_ptt_trace_valid_format(u32 val)
+{
+ static const u32 hisi_ptt_trace_availble_format[] = {
+ 0, /* 4DW */
+ 1, /* 8DW */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_availble_format); i++) {
+ if (val == hisi_ptt_trace_availble_format[i])
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config)
+{
+ unsigned long val, port_mask = hisi_ptt->port_mask;
+ struct hisi_ptt_filter_desc *filter;
+
+ hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config);
+ val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config);
+
+ /*
+ * Port filters are defined as bit mask. For port filters, check
+ * the bits in the @val are within the range of hisi_ptt->port_mask
+ * and whether it's empty or not, otherwise user has specified
+ * some unsupported root ports.
+ *
+ * For Requester ID filters, walk the available filter list to see
+ * whether we have one matched.
+ */
+ if (!hisi_ptt->trace_ctrl.is_port) {
+ list_for_each_entry(filter, &hisi_ptt->req_filters, list) {
+ if (val == hisi_ptt_get_filter_val(filter->devid, filter->is_port))
+ return 0;
+ }
+ } else if (bitmap_subset(&val, &port_mask, BITS_PER_LONG)) {
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event)
+{
+ struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl;
+ u32 val;
+
+ val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, event->attr.config);
+ hisi_ptt->trace_ctrl.filter = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
+ ctrl->direction = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
+ ctrl->type = val;
+
+ val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
+ ctrl->format = val;
+}
+
+static int hisi_ptt_pmu_event_init(struct perf_event *event)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ int ret;
+ u32 val;
+
+ if (event->cpu < 0) {
+ dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ return -ENOENT;
+
+ ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config);
+ ret = hisi_ptt_trace_valid_direction(val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config);
+ ret = hisi_ptt_trace_valid_type(val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config);
+ return hisi_ptt_trace_valid_format(val);
+}
+
+static void *hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
+{
+ struct hisi_ptt_pmu_buf *buf;
+ struct page **pagelist;
+ int i;
+
+ if (overwrite) {
+ dev_warn(event->pmu->dev, "Overwrite mode is not supported\n");
+ return NULL;
+ }
+
+ /* If the pages size less than buffers, we cannot start trace */
+ if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE)
+ return NULL;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ pagelist = kcalloc(nr_pages, sizeof(*pagelist), GFP_KERNEL);
+ if (!pagelist)
+ goto err;
+
+ for (i = 0; i < nr_pages; i++)
+ pagelist[i] = virt_to_page(pages[i]);
+
+ buf->base = vmap(pagelist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->base) {
+ kfree(pagelist);
+ goto err;
+ }
+
+ buf->nr_pages = nr_pages;
+ buf->length = nr_pages * PAGE_SIZE;
+ buf->pos = 0;
+
+ kfree(pagelist);
+ return buf;
+err:
+ kfree(buf);
+ return NULL;
+}
+
+static void hisi_ptt_pmu_free_aux(void *aux)
+{
+ struct hisi_ptt_pmu_buf *buf = aux;
+
+ vunmap(buf->base);
+ kfree(buf);
+}
+
+static void hisi_ptt_pmu_start(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct perf_output_handle *handle = &hisi_ptt->trace_ctrl.handle;
+ struct hw_perf_event *hwc = &event->hw;
+ struct device *dev = event->pmu->dev;
+ struct hisi_ptt_pmu_buf *buf;
+ int cpu = event->cpu;
+ int ret;
+
+ hwc->state = 0;
+
+ /* Serialize the perf process if user specified several CPUs */
+ spin_lock(&hisi_ptt->pmu_lock);
+ if (hisi_ptt->trace_ctrl.started) {
+ dev_dbg(dev, "trace has already started\n");
+ goto stop;
+ }
+
+ /*
+ * Handle the interrupt on the same cpu which starts the trace to avoid
+ * context mismatch. Otherwise we'll trigger the WARN from the perf
+ * core in event_function_local(). If CPU passed is offline we'll fail
+ * here, just log it since we can do nothing here.
+ */
+ ret = irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+ cpumask_of(cpu));
+ if (ret)
+ dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+
+ hisi_ptt->trace_ctrl.on_cpu = cpu;
+
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf) {
+ dev_dbg(dev, "aux output begin failed\n");
+ goto stop;
+ }
+
+ buf->pos = handle->head % buf->length;
+
+ hisi_ptt_pmu_init_configs(hisi_ptt, event);
+
+ ret = hisi_ptt_trace_start(hisi_ptt);
+ if (ret) {
+ dev_dbg(dev, "trace start failed, ret = %d\n", ret);
+ perf_aux_output_end(handle, 0);
+ goto stop;
+ }
+
+ spin_unlock(&hisi_ptt->pmu_lock);
+ return;
+stop:
+ event->hw.state |= PERF_HES_STOPPED;
+ spin_unlock(&hisi_ptt->pmu_lock);
+}
+
+static void hisi_ptt_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ spin_lock(&hisi_ptt->pmu_lock);
+ if (hisi_ptt->trace_ctrl.started) {
+ hisi_ptt_trace_end(hisi_ptt);
+
+ if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt))
+ dev_warn(event->pmu->dev, "Device is still busy\n");
+
+ hisi_ptt_update_aux(hisi_ptt, hisi_ptt->trace_ctrl.buf_index, true);
+ }
+ spin_unlock(&hisi_ptt->pmu_lock);
+
+ hwc->state |= PERF_HES_STOPPED;
+ perf_event_update_userpage(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+static int hisi_ptt_pmu_add(struct perf_event *event, int flags)
+{
+ struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int cpu = event->cpu;
+
+ /* Only allow the cpus on the device's node to add the event */
+ if (!cpumask_test_cpu(cpu, cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev))))
+ return 0;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START) {
+ hisi_ptt_pmu_start(event, PERF_EF_RELOAD);
+ if (hwc->state & PERF_HES_STOPPED)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hisi_ptt_pmu_del(struct perf_event *event, int flags)
+{
+ hisi_ptt_pmu_stop(event, PERF_EF_UPDATE);
+}
+
+static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node);
+}
+
+static void hisi_ptt_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt)
+{
+ u16 core_id, sicl_id;
+ char *pmu_name;
+ u32 reg;
+ int ret;
+
+ ret = cpuhp_state_add_instance_nocalls(hisi_ptt_pmu_online,
+ &hisi_ptt->hotplug_node);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&hisi_ptt->pdev->dev,
+ hisi_ptt_remove_cpuhp_instance,
+ &hisi_ptt->hotplug_node);
+ if (ret)
+ return ret;
+
+ mutex_init(&hisi_ptt->tune_lock);
+ spin_lock_init(&hisi_ptt->pmu_lock);
+
+ hisi_ptt->hisi_ptt_pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+ .task_ctx_nr = perf_sw_context,
+ .attr_groups = hisi_ptt_pmu_groups,
+ .event_init = hisi_ptt_pmu_event_init,
+ .setup_aux = hisi_ptt_pmu_setup_aux,
+ .free_aux = hisi_ptt_pmu_free_aux,
+ .start = hisi_ptt_pmu_start,
+ .stop = hisi_ptt_pmu_stop,
+ .add = hisi_ptt_pmu_add,
+ .del = hisi_ptt_pmu_del,
+ };
+
+ reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION);
+ core_id = FIELD_GET(HISI_PTT_CORE_ID, reg);
+ sicl_id = FIELD_GET(HISI_PTT_SICL_ID, reg);
+
+ pmu_name = devm_kasprintf(&hisi_ptt->pdev->dev, GFP_KERNEL, "hisi_ptt%u_%u",
+ sicl_id, core_id);
+ if (!pmu_name)
+ return -ENOMEM;
+
+ ret = perf_pmu_register(&hisi_ptt->hisi_ptt_pmu, pmu_name, -1);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&hisi_ptt->pdev->dev,
+ hisi_ptt_unregister_pmu,
+ &hisi_ptt->hisi_ptt_pmu);
+}
+
+/*
+ * The DMA of PTT trace can only use direct mappings due to some
+ * hardware restriction. Check whether there is no IOMMU or the
+ * policy of the IOMMU domain is passthrough, otherwise the trace
+ * cannot work.
+ *
+ * The PTT device is supposed to behind an ARM SMMUv3, which
+ * should have passthrough the device by a quirk.
+ */
+static int hisi_ptt_check_iommu_mapping(struct pci_dev *pdev)
+{
+ struct iommu_domain *iommu_domain;
+
+ iommu_domain = iommu_get_domain_for_dev(&pdev->dev);
+ if (!iommu_domain || iommu_domain->type == IOMMU_DOMAIN_IDENTITY)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int hisi_ptt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct hisi_ptt *hisi_ptt;
+ int ret;
+
+ ret = hisi_ptt_check_iommu_mapping(pdev);
+ if (ret) {
+ pci_err(pdev, "requires direct DMA mappings\n");
+ return ret;
+ }
+
+ hisi_ptt = devm_kzalloc(&pdev->dev, sizeof(*hisi_ptt), GFP_KERNEL);
+ if (!hisi_ptt)
+ return -ENOMEM;
+
+ hisi_ptt->pdev = pdev;
+ pci_set_drvdata(pdev, hisi_ptt);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pci_err(pdev, "failed to enable device, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME);
+ if (ret) {
+ pci_err(pdev, "failed to remap io memory, ret = %d\n", ret);
+ return ret;
+ }
+
+ hisi_ptt->iobase = pcim_iomap_table(pdev)[2];
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pci_err(pdev, "failed to set 64 bit dma mask, ret = %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ ret = hisi_ptt_register_irq(hisi_ptt);
+ if (ret)
+ return ret;
+
+ ret = hisi_ptt_init_ctrls(hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to init controls, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hisi_ptt_register_pmu(hisi_ptt);
+ if (ret) {
+ pci_err(pdev, "failed to register PMU device, ret = %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id hisi_ptt_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12e) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, hisi_ptt_id_tbl);
+
+static struct pci_driver hisi_ptt_driver = {
+ .name = DRV_NAME,
+ .id_table = hisi_ptt_id_tbl,
+ .probe = hisi_ptt_probe,
+};
+
+static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_ptt *hisi_ptt;
+ struct device *dev;
+ int target, src;
+
+ hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node);
+ src = hisi_ptt->trace_ctrl.on_cpu;
+ dev = hisi_ptt->hisi_ptt_pmu.dev;
+
+ if (!hisi_ptt->trace_ctrl.started || src != cpu)
+ return 0;
+
+ target = cpumask_any_but(cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)), cpu);
+ if (target >= nr_cpu_ids) {
+ dev_err(dev, "no available cpu for perf context migration\n");
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&hisi_ptt->hisi_ptt_pmu, src, target);
+
+ /*
+ * Also make sure the interrupt bind to the migrated CPU as well. Warn
+ * the user on failure here.
+ */
+ if (irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ),
+ cpumask_of(target)))
+ dev_warn(dev, "failed to set the affinity of trace interrupt\n");
+
+ hisi_ptt->trace_ctrl.on_cpu = target;
+ return 0;
+}
+
+static int __init hisi_ptt_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRV_NAME, NULL,
+ hisi_ptt_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ hisi_ptt_pmu_online = ret;
+
+ ret = pci_register_driver(&hisi_ptt_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_ptt_pmu_online);
+
+ return ret;
+}
+module_init(hisi_ptt_init);
+
+static void __exit hisi_ptt_exit(void)
+{
+ pci_unregister_driver(&hisi_ptt_driver);
+ cpuhp_remove_multi_state(hisi_ptt_pmu_online);
+}
+module_exit(hisi_ptt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_DESCRIPTION("Driver for HiSilicon PCIe tune and trace device");
diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h
new file mode 100644
index 000000000000..5beb1648c93a
--- /dev/null
+++ b/drivers/hwtracing/ptt/hisi_ptt.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Driver for HiSilicon PCIe tune and trace device
+ *
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+
+#ifndef _HISI_PTT_H
+#define _HISI_PTT_H
+
+#include <linux/bits.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define DRV_NAME "hisi_ptt"
+
+/*
+ * The definition of the device registers and register fields.
+ */
+#define HISI_PTT_TUNING_CTRL 0x0000
+#define HISI_PTT_TUNING_CTRL_CODE GENMASK(15, 0)
+#define HISI_PTT_TUNING_CTRL_SUB GENMASK(23, 16)
+#define HISI_PTT_TUNING_DATA 0x0004
+#define HISI_PTT_TUNING_DATA_VAL_MASK GENMASK(15, 0)
+#define HISI_PTT_TRACE_ADDR_SIZE 0x0800
+#define HISI_PTT_TRACE_ADDR_BASE_LO_0 0x0810
+#define HISI_PTT_TRACE_ADDR_BASE_HI_0 0x0814
+#define HISI_PTT_TRACE_ADDR_STRIDE 0x8
+#define HISI_PTT_TRACE_CTRL 0x0850
+#define HISI_PTT_TRACE_CTRL_EN BIT(0)
+#define HISI_PTT_TRACE_CTRL_RST BIT(1)
+#define HISI_PTT_TRACE_CTRL_RXTX_SEL GENMASK(3, 2)
+#define HISI_PTT_TRACE_CTRL_TYPE_SEL GENMASK(7, 4)
+#define HISI_PTT_TRACE_CTRL_DATA_FORMAT BIT(14)
+#define HISI_PTT_TRACE_CTRL_FILTER_MODE BIT(15)
+#define HISI_PTT_TRACE_CTRL_TARGET_SEL GENMASK(31, 16)
+#define HISI_PTT_TRACE_INT_STAT 0x0890
+#define HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0)
+#define HISI_PTT_TRACE_INT_MASK 0x0894
+#define HISI_PTT_TUNING_INT_STAT 0x0898
+#define HISI_PTT_TUNING_INT_STAT_MASK BIT(0)
+#define HISI_PTT_TRACE_WR_STS 0x08a0
+#define HISI_PTT_TRACE_WR_STS_WRITE GENMASK(27, 0)
+#define HISI_PTT_TRACE_WR_STS_BUFFER GENMASK(29, 28)
+#define HISI_PTT_TRACE_STS 0x08b0
+#define HISI_PTT_TRACE_IDLE BIT(0)
+#define HISI_PTT_DEVICE_RANGE 0x0fe0
+#define HISI_PTT_DEVICE_RANGE_UPPER GENMASK(31, 16)
+#define HISI_PTT_DEVICE_RANGE_LOWER GENMASK(15, 0)
+#define HISI_PTT_LOCATION 0x0fe8
+#define HISI_PTT_CORE_ID GENMASK(15, 0)
+#define HISI_PTT_SICL_ID GENMASK(31, 16)
+
+/* Parameters of PTT trace DMA part. */
+#define HISI_PTT_TRACE_DMA_IRQ 0
+#define HISI_PTT_TRACE_BUF_CNT 4
+#define HISI_PTT_TRACE_BUF_SIZE SZ_4M
+#define HISI_PTT_TRACE_TOTAL_BUF_SIZE (HISI_PTT_TRACE_BUF_SIZE * \
+ HISI_PTT_TRACE_BUF_CNT)
+/* Wait time for hardware DMA to reset */
+#define HISI_PTT_RESET_TIMEOUT_US 10UL
+#define HISI_PTT_RESET_POLL_INTERVAL_US 1UL
+/* Poll timeout and interval for waiting hardware work to finish */
+#define HISI_PTT_WAIT_TUNE_TIMEOUT_US 1000000UL
+#define HISI_PTT_WAIT_TRACE_TIMEOUT_US 100UL
+#define HISI_PTT_WAIT_POLL_INTERVAL_US 10UL
+
+#define HISI_PCIE_CORE_PORT_ID(devfn) ((PCI_SLOT(devfn) & 0x7) << 1)
+
+/* Definition of the PMU configs */
+#define HISI_PTT_PMU_FILTER_IS_PORT BIT(19)
+#define HISI_PTT_PMU_FILTER_VAL_MASK GENMASK(15, 0)
+#define HISI_PTT_PMU_DIRECTION_MASK GENMASK(23, 20)
+#define HISI_PTT_PMU_TYPE_MASK GENMASK(31, 24)
+#define HISI_PTT_PMU_FORMAT_MASK GENMASK(35, 32)
+
+/**
+ * struct hisi_ptt_tune_desc - Describe tune event for PTT tune
+ * @hisi_ptt: PTT device this tune event belongs to
+ * @name: name of this event
+ * @event_code: code of the event
+ */
+struct hisi_ptt_tune_desc {
+ struct hisi_ptt *hisi_ptt;
+ const char *name;
+ u32 event_code;
+};
+
+/**
+ * struct hisi_ptt_dma_buffer - Describe a single trace buffer of PTT trace.
+ * The detail of the data format is described
+ * in the documentation of PTT device.
+ * @dma: DMA address of this buffer visible to the device
+ * @addr: virtual address of this buffer visible to the cpu
+ */
+struct hisi_ptt_dma_buffer {
+ dma_addr_t dma;
+ void *addr;
+};
+
+/**
+ * struct hisi_ptt_trace_ctrl - Control and status of PTT trace
+ * @trace_buf: array of the trace buffers for holding the trace data.
+ * the length will be HISI_PTT_TRACE_BUF_CNT.
+ * @handle: perf output handle of current trace session
+ * @buf_index: the index of current using trace buffer
+ * @on_cpu: current tracing cpu
+ * @started: current trace status, true for started
+ * @is_port: whether we're tracing root port or not
+ * @direction: direction of the TLP headers to trace
+ * @filter: filter value for tracing the TLP headers
+ * @format: format of the TLP headers to trace
+ * @type: type of the TLP headers to trace
+ */
+struct hisi_ptt_trace_ctrl {
+ struct hisi_ptt_dma_buffer *trace_buf;
+ struct perf_output_handle handle;
+ u32 buf_index;
+ int on_cpu;
+ bool started;
+ bool is_port;
+ u32 direction:2;
+ u32 filter:16;
+ u32 format:1;
+ u32 type:4;
+};
+
+/**
+ * struct hisi_ptt_filter_desc - Descriptor of the PTT trace filter
+ * @list: entry of this descriptor in the filter list
+ * @is_port: the PCI device of the filter is a Root Port or not
+ * @devid: the PCI device's devid of the filter
+ */
+struct hisi_ptt_filter_desc {
+ struct list_head list;
+ bool is_port;
+ u16 devid;
+};
+
+/**
+ * struct hisi_ptt_pmu_buf - Descriptor of the AUX buffer of PTT trace
+ * @length: size of the AUX buffer
+ * @nr_pages: number of pages of the AUX buffer
+ * @base: start address of AUX buffer
+ * @pos: position in the AUX buffer to commit traced data
+ */
+struct hisi_ptt_pmu_buf {
+ size_t length;
+ int nr_pages;
+ void *base;
+ long pos;
+};
+
+/**
+ * struct hisi_ptt - Per PTT device data
+ * @trace_ctrl: the control information of PTT trace
+ * @hotplug_node: node for register cpu hotplug event
+ * @hisi_ptt_pmu: the pum device of trace
+ * @iobase: base IO address of the device
+ * @pdev: pci_dev of this PTT device
+ * @tune_lock: lock to serialize the tune process
+ * @pmu_lock: lock to serialize the perf process
+ * @upper_bdf: the upper BDF range of the PCI devices managed by this PTT device
+ * @lower_bdf: the lower BDF range of the PCI devices managed by this PTT device
+ * @port_filters: the filter list of root ports
+ * @req_filters: the filter list of requester ID
+ * @port_mask: port mask of the managed root ports
+ */
+struct hisi_ptt {
+ struct hisi_ptt_trace_ctrl trace_ctrl;
+ struct hlist_node hotplug_node;
+ struct pmu hisi_ptt_pmu;
+ void __iomem *iobase;
+ struct pci_dev *pdev;
+ struct mutex tune_lock;
+ spinlock_t pmu_lock;
+ u32 upper_bdf;
+ u32 lower_bdf;
+
+ /*
+ * The trace TLP headers can either be filtered by certain
+ * root port, or by the requester ID. Organize the filters
+ * by @port_filters and @req_filters here. The mask of all
+ * the valid ports is also cached for doing sanity check
+ * of user input.
+ */
+ struct list_head port_filters;
+ struct list_head req_filters;
+ u16 port_mask;
+};
+
+#define to_hisi_ptt(pmu) container_of(pmu, struct hisi_ptt, hisi_ptt_pmu)
+
+#endif /* _HISI_PTT_H */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 7284206b278b..264e780ae32e 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,8 +488,8 @@ config I2C_BCM_KONA
config I2C_BRCMSTB
tristate "BRCM Settop/DSL I2C controller"
- depends on ARCH_BCM2835 || ARCH_BCM4908 || ARCH_BCMBCA || \
- ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || \
+ BMIPS_GENERIC || COMPILE_TEST
default y
help
If you say yes to this option, support will be included for the
@@ -1267,6 +1267,16 @@ config I2C_PARPORT
This support is also available as a module. If so, the module
will be called i2c-parport.
+config I2C_PCI1XXXX
+ tristate "PCI1XXXX I2C Host Adapter"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for
+ Microchip PCI1XXXX's I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-mchp-pci1xxxx.
+
config I2C_ROBOTFUZZ_OSIF
tristate "RobotFuzz Open Source InterFace USB adapter"
depends on USB
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index c5cac15f075c..e73cdb1d2b5a 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -133,6 +133,7 @@ obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
+obj-$(CONFIG_I2C_PCI1XXXX) += i2c-mchp-pci1xxxx.o
obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o
obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o
obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
index 84b7e6cbc67b..423fe0c8a471 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-plat.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -244,14 +244,18 @@ static const struct i2c_adapter_quirks amd_i2c_dev_quirks = {
static int i2c_amd_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
int ret;
struct amd_i2c_dev *i2c_dev;
- struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct amd_mp2_dev *mp2_dev;
- const char *uid;
+ u64 uid;
- if (!adev)
- return -ENODEV;
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing UID/bus id!\n");
+ if (uid >= 2)
+ return dev_err_probe(dev, -EINVAL, "incorrect UID/bus id \"%llu\"!\n", uid);
+ dev_dbg(dev, "bus id is %llu\n", uid);
/* The ACPI namespace doesn't contain information about which MP2 PCI
* device an AMDI0011 ACPI device is related to, so assume that there's
@@ -266,6 +270,7 @@ static int i2c_amd_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
+ i2c_dev->common.bus_id = uid;
i2c_dev->common.mp2_dev = mp2_dev;
i2c_dev->pdev = pdev;
platform_set_drvdata(pdev, i2c_dev);
@@ -276,20 +281,6 @@ static int i2c_amd_probe(struct platform_device *pdev)
i2c_dev->common.resume = &i2c_amd_resume;
#endif
- uid = adev->pnp.unique_id;
- if (!uid) {
- dev_err(&pdev->dev, "missing UID/bus id!\n");
- return -EINVAL;
- } else if (strcmp(uid, "0") == 0) {
- i2c_dev->common.bus_id = 0;
- } else if (strcmp(uid, "1") == 0) {
- i2c_dev->common.bus_id = 1;
- } else {
- dev_err(&pdev->dev, "incorrect UID/bus id \"%s\"!\n", uid);
- return -EINVAL;
- }
- dev_dbg(&pdev->dev, "bus id is %u\n", i2c_dev->common.bus_id);
-
/* Register the adapter */
amd_mp2_pm_runtime_get(mp2_dev);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 33f5588a50c0..fe0cd205502d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -10,10 +10,12 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
/* Register offsets for the I2C device. */
#define CDNS_I2C_CR_OFFSET 0x00 /* Control Register, RW */
@@ -127,6 +129,8 @@
#define CDNS_I2C_TIMEOUT_MAX 0xFF
#define CDNS_I2C_BROKEN_HOLD_BIT BIT(0)
+#define CDNS_I2C_POLL_US 100000
+#define CDNS_I2C_TIMEOUT_US 500000
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
@@ -204,6 +208,7 @@ struct cdns_i2c {
struct notifier_block clk_rate_change_nb;
u32 quirks;
u32 ctrl_reg;
+ struct i2c_bus_recovery_info rinfo;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
u16 ctrl_reg_diva_divb;
struct i2c_client *slave;
@@ -840,8 +845,14 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
#endif
/* Check if the bus is free */
- if (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_BA) {
+
+ ret = readl_relaxed_poll_timeout(id->membase + CDNS_I2C_SR_OFFSET,
+ reg,
+ !(reg & CDNS_I2C_SR_BA),
+ CDNS_I2C_POLL_US, CDNS_I2C_TIMEOUT_US);
+ if (ret) {
ret = -EAGAIN;
+ i2c_recover_bus(adap);
goto out;
}
@@ -1250,6 +1261,12 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->quirks = data->quirks;
}
+ id->rinfo.pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(id->rinfo.pinctrl)) {
+ dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
+ return PTR_ERR(id->rinfo.pinctrl);
+ }
+
id->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
@@ -1266,6 +1283,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
id->adap.retries = 3; /* Default retry value. */
id->adap.algo_data = id;
id->adap.dev.parent = &pdev->dev;
+ id->adap.bus_recovery_info = &id->rinfo;
init_completion(&id->xfer_done);
snprintf(id->adap.name, sizeof(id->adap.name),
"Cadence I2C at %08lx", (unsigned long)r_mem->start);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 471c47db546b..c836cf884185 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -823,7 +823,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
r = pm_runtime_resume_and_get(dev->dev);
if (r < 0) {
dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
- return r;
+ goto err_pm;
}
i2c_davinci_init(dev);
@@ -882,6 +882,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
pm_runtime_dont_use_autosuspend(dev->dev);
pm_runtime_put_sync(dev->dev);
+err_pm:
pm_runtime_disable(dev->dev);
return r;
diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c
index b624356c945f..8f36167bce62 100644
--- a/drivers/i2c/busses/i2c-designware-amdpsp.c
+++ b/drivers/i2c/busses/i2c-designware-amdpsp.c
@@ -6,6 +6,7 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/psp-sev.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <asm/msr.h>
@@ -15,6 +16,8 @@
#define PSP_MBOX_OFFSET 0x10570
#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define PSP_I2C_RESERVATION_TIME_MS 100
+
#define PSP_I2C_REQ_BUS_CMD 0x64
#define PSP_I2C_REQ_RETRY_CNT 400
#define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC)
@@ -240,6 +243,41 @@ cleanup:
return ret;
}
+static void release_bus(void)
+{
+ int status;
+
+ if (!psp_i2c_sem_acquired)
+ return;
+
+ status = psp_send_i2c_req(PSP_I2C_REQ_RELEASE);
+ if (status)
+ return;
+
+ dev_dbg(psp_i2c_dev, "PSP semaphore held for %ums\n",
+ jiffies_to_msecs(jiffies - psp_i2c_sem_acquired));
+
+ psp_i2c_sem_acquired = 0;
+}
+
+static void psp_release_i2c_bus_deferred(struct work_struct *work)
+{
+ mutex_lock(&psp_i2c_access_mutex);
+
+ /*
+ * If there is any pending transaction, cannot release the bus here.
+ * psp_release_i2c_bus will take care of this later.
+ */
+ if (psp_i2c_access_count)
+ goto cleanup;
+
+ release_bus();
+
+cleanup:
+ mutex_unlock(&psp_i2c_access_mutex);
+}
+static DECLARE_DELAYED_WORK(release_queue, psp_release_i2c_bus_deferred);
+
static int psp_acquire_i2c_bus(void)
{
int status;
@@ -250,21 +288,23 @@ static int psp_acquire_i2c_bus(void)
if (psp_i2c_mbox_fail)
goto cleanup;
+ psp_i2c_access_count++;
+
/*
- * Simply increment usage counter and return if PSP semaphore was
- * already taken by kernel.
+ * No need to request bus arbitration once we are inside semaphore
+ * reservation period.
*/
- if (psp_i2c_access_count) {
- psp_i2c_access_count++;
+ if (psp_i2c_sem_acquired)
goto cleanup;
- }
status = psp_send_i2c_req(PSP_I2C_REQ_ACQUIRE);
if (status)
goto cleanup;
psp_i2c_sem_acquired = jiffies;
- psp_i2c_access_count++;
+
+ schedule_delayed_work(&release_queue,
+ msecs_to_jiffies(PSP_I2C_RESERVATION_TIME_MS));
/*
* In case of errors with PSP arbitrator psp_i2c_mbox_fail variable is
@@ -279,8 +319,6 @@ cleanup:
static void psp_release_i2c_bus(void)
{
- int status;
-
mutex_lock(&psp_i2c_access_mutex);
/* Return early if mailbox was malfunctional */
@@ -295,13 +333,12 @@ static void psp_release_i2c_bus(void)
if (psp_i2c_access_count)
goto cleanup;
- /* Send a release command to PSP */
- status = psp_send_i2c_req(PSP_I2C_REQ_RELEASE);
- if (status)
- goto cleanup;
-
- dev_dbg(psp_i2c_dev, "PSP semaphore held for %ums\n",
- jiffies_to_msecs(jiffies - psp_i2c_sem_acquired));
+ /*
+ * Send a release command to PSP if the semaphore reservation timeout
+ * elapsed but x86 still owns the controller.
+ */
+ if (!delayed_work_pending(&release_queue))
+ release_bus();
cleanup:
mutex_unlock(&psp_i2c_access_mutex);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 608e61209455..e499f96506c5 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -27,7 +27,6 @@
#include "i2c-ccgx-ucsi.h"
#define DRIVER_NAME "i2c-designware-pci"
-#define AMD_CLK_RATE_HZ 100000
enum dw_pci_ctl_id_t {
medfield,
@@ -100,11 +99,6 @@ static u32 mfld_get_clk_rate_khz(struct dw_i2c_dev *dev)
return 25000;
}
-static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
-{
- return AMD_CLK_RATE_HZ;
-}
-
static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
@@ -126,15 +120,6 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
return -ENODEV;
}
-static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
-{
- struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
-
- dev->flags |= MODEL_AMD_NAVI_GPU;
- dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
- return 0;
-}
-
static int mrfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
{
/*
@@ -159,6 +144,20 @@ static u32 ehl_get_clk_rate_khz(struct dw_i2c_dev *dev)
return 100000;
}
+static u32 navi_amd_get_clk_rate_khz(struct dw_i2c_dev *dev)
+{
+ return 100000;
+}
+
+static int navi_amd_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ struct dw_i2c_dev *dev = dev_get_drvdata(&pdev->dev);
+
+ dev->flags |= MODEL_AMD_NAVI_GPU;
+ dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ;
+ return 0;
+}
+
static struct dw_pci_controller dw_pci_controllers[] = {
[medfield] = {
.bus_num = -1,
@@ -243,6 +242,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
int r;
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
+ struct i2c_timings *t;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
return dev_err_probe(&pdev->dev, -EINVAL,
@@ -263,7 +263,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return dev_err_probe(&pdev->dev, r,
"I/O memory remapping failed\n");
- dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -272,12 +272,14 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
return r;
dev->get_clk_rate_khz = controller->get_clk_rate_khz;
- dev->timings.bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
dev->irq = pci_irq_vector(pdev, 0);
dev->flags |= controller->flags;
+ t = &dev->timings;
+ i2c_parse_fw_timings(&pdev->dev, t, false);
+
pci_set_drvdata(pdev, dev);
if (controller->setup) {
@@ -389,6 +391,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4bbe), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bbf), elkhartlake },
{ PCI_VDEVICE(INTEL, 0x4bc0), elkhartlake },
+ /* AMD NAVI */
{ PCI_VDEVICE(ATI, 0x7314), navi_amd },
{ PCI_VDEVICE(ATI, 0x73a4), navi_amd },
{ PCI_VDEVICE(ATI, 0x73e4), navi_amd },
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a176296f4fff..e06509edc5f3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1838,6 +1838,7 @@ static struct pci_driver i801_driver = {
.shutdown = i801_shutdown,
.driver = {
.pm = &i801_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index b51ab3cad2b1..188f2a36d2fd 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -94,7 +94,8 @@ enum lpi2c_imx_pincfg {
struct lpi2c_imx_struct {
struct i2c_adapter adapter;
- struct clk *clk;
+ int num_clks;
+ struct clk_bulk_data *clks;
void __iomem *base;
__u8 *rx_buf;
__u8 *tx_buf;
@@ -207,7 +208,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
lpi2c_imx_set_mode(lpi2c_imx);
- clk_rate = clk_get_rate(lpi2c_imx->clk);
+ clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk);
if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
filt = 0;
else
@@ -561,11 +562,12 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
strscpy(lpi2c_imx->adapter.name, pdev->name,
sizeof(lpi2c_imx->adapter.name));
- lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lpi2c_imx->clk)) {
- dev_err(&pdev->dev, "can't get I2C peripheral clock\n");
- return PTR_ERR(lpi2c_imx->clk);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get I2C peripheral clock, ret=%d\n", ret);
+ return ret;
}
+ lpi2c_imx->num_clks = ret;
ret = of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &lpi2c_imx->bitrate);
@@ -582,11 +584,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
platform_set_drvdata(pdev, lpi2c_imx);
- ret = clk_prepare_enable(lpi2c_imx->clk);
- if (ret) {
- dev_err(&pdev->dev, "clk enable failed %d\n", ret);
+ ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
+ if (ret)
return ret;
- }
pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -633,7 +633,7 @@ static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
- clk_disable_unprepare(lpi2c_imx->clk);
+ clk_bulk_disable_unprepare(lpi2c_imx->num_clks, lpi2c_imx->clks);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -645,7 +645,7 @@ static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
int ret;
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(lpi2c_imx->clk);
+ ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
if (ret) {
dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
return ret;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e47fa3465671..3082183bd66a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1583,7 +1583,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (i2c_imx->dma)
i2c_imx_dma_free(i2c_imx);
- if (ret == 0) {
+ if (ret >= 0) {
/* setup chip registers to defaults */
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 6078fa0c0d48..fe2349590f75 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -937,11 +937,8 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "dma_set_mask fail\n");
- return -ENODEV;
- }
+ dev_err(&pdev->dev, "dma_set_mask fail\n");
+ return -ENODEV;
}
err = ismt_dev_init(priv);
diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
new file mode 100644
index 000000000000..f5342201eb6b
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PCI1XXXX I2C adapter driver for PCIe Switch
+ * which has I2C controller in one of its downstream functions
+ *
+ * Copyright (C) 2021 - 2022 Microchip Technology Inc.
+ *
+ * Authors: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
+ * Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#define SMBUS_MAST_CORE_ADDR_BASE 0x00000
+#define SMBUS_MAST_SYS_REG_ADDR_BASE 0x01000
+
+/* SMB register space. */
+#define SMB_CORE_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x00)
+
+#define SMB_CORE_CTRL_ESO BIT(6)
+#define SMB_CORE_CTRL_FW_ACK BIT(4)
+#define SMB_CORE_CTRL_ACK BIT(0)
+
+#define SMB_CORE_CMD_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x0F)
+#define SMB_CORE_CMD_REG_OFF2 (SMBUS_MAST_CORE_ADDR_BASE + 0x0E)
+#define SMB_CORE_CMD_REG_OFF1 (SMBUS_MAST_CORE_ADDR_BASE + 0x0D)
+
+#define SMB_CORE_CMD_READM BIT(4)
+#define SMB_CORE_CMD_STOP BIT(2)
+#define SMB_CORE_CMD_START BIT(0)
+
+#define SMB_CORE_CMD_REG_OFF0 (SMBUS_MAST_CORE_ADDR_BASE + 0x0C)
+
+#define SMB_CORE_CMD_M_PROCEED BIT(1)
+#define SMB_CORE_CMD_M_RUN BIT(0)
+
+#define SMB_CORE_SR_HOLD_TIME_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x18)
+
+/*
+ * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
+ * baud clock required to program 'Hold Time' at X KHz.
+ */
+#define SR_HOLD_TIME_100K_TICKS 133
+#define SR_HOLD_TIME_400K_TICKS 20
+#define SR_HOLD_TIME_1000K_TICKS 11
+
+#define SMB_CORE_COMPLETION_REG_OFF3 (SMBUS_MAST_CORE_ADDR_BASE + 0x23)
+
+#define COMPLETION_MDONE BIT(6)
+#define COMPLETION_IDLE BIT(5)
+#define COMPLETION_MNAKX BIT(0)
+
+#define SMB_CORE_IDLE_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x24)
+
+/*
+ * FAIR_BUS_IDLE_MIN_XK_TICKS field will indicate the number of ticks of
+ * the baud clock required to program 'fair idle delay' at X KHz. Fair idle
+ * delay establishes the MCTP T(IDLE_DELAY) period.
+ */
+#define FAIR_BUS_IDLE_MIN_100K_TICKS 969
+#define FAIR_BUS_IDLE_MIN_400K_TICKS 157
+#define FAIR_BUS_IDLE_MIN_1000K_TICKS 157
+
+/*
+ * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
+ * baud clock required to satisfy the fairness protocol at X KHz.
+ */
+#define FAIR_IDLE_DELAY_100K_TICKS 1000
+#define FAIR_IDLE_DELAY_400K_TICKS 500
+#define FAIR_IDLE_DELAY_1000K_TICKS 500
+
+#define SMB_IDLE_SCALING_100K \
+ ((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
+#define SMB_IDLE_SCALING_400K \
+ ((FAIR_IDLE_DELAY_400K_TICKS << 16) | FAIR_BUS_IDLE_MIN_400K_TICKS)
+#define SMB_IDLE_SCALING_1000K \
+ ((FAIR_IDLE_DELAY_1000K_TICKS << 16) | FAIR_BUS_IDLE_MIN_1000K_TICKS)
+
+#define SMB_CORE_CONFIG_REG3 (SMBUS_MAST_CORE_ADDR_BASE + 0x2B)
+
+#define SMB_CONFIG3_ENMI BIT(6)
+#define SMB_CONFIG3_ENIDI BIT(5)
+
+#define SMB_CORE_CONFIG_REG2 (SMBUS_MAST_CORE_ADDR_BASE + 0x2A)
+#define SMB_CORE_CONFIG_REG1 (SMBUS_MAST_CORE_ADDR_BASE + 0x29)
+
+#define SMB_CONFIG1_ASR BIT(7)
+#define SMB_CONFIG1_ENAB BIT(2)
+#define SMB_CONFIG1_RESET BIT(1)
+#define SMB_CONFIG1_FEN BIT(0)
+
+#define SMB_CORE_BUS_CLK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x2C)
+
+/*
+ * BUS_CLK_XK_LOW_PERIOD_TICKS field defines the number of I2C Baud Clock
+ * periods that make up the low phase of the I2C/SMBus bus clock at X KHz.
+ */
+#define BUS_CLK_100K_LOW_PERIOD_TICKS 156
+#define BUS_CLK_400K_LOW_PERIOD_TICKS 41
+#define BUS_CLK_1000K_LOW_PERIOD_TICKS 15
+
+/*
+ * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
+ * periods that make up the high phase of the I2C/SMBus bus clock at X KHz.
+ */
+#define BUS_CLK_100K_HIGH_PERIOD_TICKS 154
+#define BUS_CLK_400K_HIGH_PERIOD_TICKS 35
+#define BUS_CLK_1000K_HIGH_PERIOD_TICKS 14
+
+#define BUS_CLK_100K \
+ ((BUS_CLK_100K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_100K_LOW_PERIOD_TICKS)
+#define BUS_CLK_400K \
+ ((BUS_CLK_400K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_400K_LOW_PERIOD_TICKS)
+#define BUS_CLK_1000K \
+ ((BUS_CLK_1000K_HIGH_PERIOD_TICKS << 8) | BUS_CLK_1000K_LOW_PERIOD_TICKS)
+
+#define SMB_CORE_CLK_SYNC_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x3C)
+
+/*
+ * CLK_SYNC_XK defines the number of clock cycles to sync up to the external
+ * clock before comparing the internal and external clocks for clock stretching
+ * at X KHz.
+ */
+#define CLK_SYNC_100K 4
+#define CLK_SYNC_400K 4
+#define CLK_SYNC_1000K 4
+
+#define SMB_CORE_DATA_TIMING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x40)
+
+/*
+ *
+ * FIRST_START_HOLD_XK_TICKS will indicate the number of ticks of the baud
+ * clock required to program 'FIRST_START_HOLD' timer at X KHz. This timer
+ * determines the SCLK hold time following SDAT driven low during the first
+ * START bit in a transfer.
+ */
+#define FIRST_START_HOLD_100K_TICKS 22
+#define FIRST_START_HOLD_400K_TICKS 16
+#define FIRST_START_HOLD_1000K_TICKS 6
+
+/*
+ * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'STOP_SETUP' timer at X KHz. This timer determines the
+ * SDAT setup time from the rising edge of SCLK for a STOP condition.
+ */
+#define STOP_SETUP_100K_TICKS 157
+#define STOP_SETUP_400K_TICKS 20
+#define STOP_SETUP_1000K_TICKS 12
+
+/*
+ * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
+ * SDAT setup time from the rising edge of SCLK for a repeated START condition.
+ */
+#define RESTART_SETUP_100K_TICKS 157
+#define RESTART_SETUP_400K_TICKS 20
+#define RESTART_SETUP_1000K_TICKS 12
+
+/*
+ * DATA_HOLD_XK_TICKS will indicate the number of ticks of the baud clock
+ * required to program 'DATA_HOLD' timer at X KHz. This timer determines the
+ * SDAT hold time following SCLK driven low.
+ */
+#define DATA_HOLD_100K_TICKS 2
+#define DATA_HOLD_400K_TICKS 2
+#define DATA_HOLD_1000K_TICKS 2
+
+#define DATA_TIMING_100K \
+ ((FIRST_START_HOLD_100K_TICKS << 24) | (STOP_SETUP_100K_TICKS << 16) | \
+ (RESTART_SETUP_100K_TICKS << 8) | DATA_HOLD_100K_TICKS)
+#define DATA_TIMING_400K \
+ ((FIRST_START_HOLD_400K_TICKS << 24) | (STOP_SETUP_400K_TICKS << 16) | \
+ (RESTART_SETUP_400K_TICKS << 8) | DATA_HOLD_400K_TICKS)
+#define DATA_TIMING_1000K \
+ ((FIRST_START_HOLD_1000K_TICKS << 24) | (STOP_SETUP_1000K_TICKS << 16) | \
+ (RESTART_SETUP_1000K_TICKS << 8) | DATA_HOLD_1000K_TICKS)
+
+#define SMB_CORE_TO_SCALING_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x44)
+
+/*
+ * BUS_IDLE_MIN_XK_TICKS defines Bus Idle Minimum Time.
+ * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
+ * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
+ */
+#define BUS_IDLE_MIN_100K_TICKS 167UL
+#define BUS_IDLE_MIN_400K_TICKS 139UL
+#define BUS_IDLE_MIN_1000K_TICKS 133UL
+
+/*
+ * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
+ * SMBus Controller Cumulative Time-Out duration =
+ * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
+ */
+#define CTRL_CUM_TIME_OUT_100K_TICKS 159
+#define CTRL_CUM_TIME_OUT_400K_TICKS 159
+#define CTRL_CUM_TIME_OUT_1000K_TICKS 159
+
+/*
+ * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
+ * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
+ * Baud_Clock_Period x 4096
+ */
+#define TARGET_CUM_TIME_OUT_100K_TICKS 199
+#define TARGET_CUM_TIME_OUT_400K_TICKS 199
+#define TARGET_CUM_TIME_OUT_1000K_TICKS 199
+
+/*
+ * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
+ * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
+ */
+#define CLOCK_HIGH_TIME_OUT_100K_TICKS 204
+#define CLOCK_HIGH_TIME_OUT_400K_TICKS 204
+#define CLOCK_HIGH_TIME_OUT_1000K_TICKS 204
+
+#define TO_SCALING_100K \
+ ((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_100K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_100K_TICKS)
+#define TO_SCALING_400K \
+ ((BUS_IDLE_MIN_400K_TICKS << 24) | (CTRL_CUM_TIME_OUT_400K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_400K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_400K_TICKS)
+#define TO_SCALING_1000K \
+ ((BUS_IDLE_MIN_1000K_TICKS << 24) | (CTRL_CUM_TIME_OUT_1000K_TICKS << 16) | \
+ (TARGET_CUM_TIME_OUT_1000K_TICKS << 8) | CLOCK_HIGH_TIME_OUT_1000K_TICKS)
+
+#define I2C_SCL_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x100)
+#define I2C_SDA_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x101)
+
+#define I2C_FOD_EN BIT(4)
+#define I2C_PULL_UP_EN BIT(3)
+#define I2C_PULL_DOWN_EN BIT(2)
+#define I2C_INPUT_EN BIT(1)
+#define I2C_OUTPUT_EN BIT(0)
+
+#define SMBUS_CONTROL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x200)
+
+#define CTL_RESET_COUNTERS BIT(3)
+#define CTL_TRANSFER_DIR BIT(2)
+#define CTL_HOST_FIFO_ENTRY BIT(1)
+#define CTL_RUN BIT(0)
+
+#define I2C_DIRN_WRITE 0
+#define I2C_DIRN_READ 1
+
+#define SMBUS_STATUS_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x204)
+
+#define STA_DMA_TERM BIT(7)
+#define STA_DMA_REQ BIT(6)
+#define STA_THRESHOLD BIT(2)
+#define STA_BUF_FULL BIT(1)
+#define STA_BUF_EMPTY BIT(0)
+
+#define SMBUS_INTR_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x208)
+
+#define INTR_STAT_DMA_TERM BIT(7)
+#define INTR_STAT_THRESHOLD BIT(2)
+#define INTR_STAT_BUF_FULL BIT(1)
+#define INTR_STAT_BUF_EMPTY BIT(0)
+
+#define SMBUS_INTR_MSK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x20C)
+
+#define INTR_MSK_DMA_TERM BIT(7)
+#define INTR_MSK_THRESHOLD BIT(2)
+#define INTR_MSK_BUF_FULL BIT(1)
+#define INTR_MSK_BUF_EMPTY BIT(0)
+
+#define ALL_NW_LAYER_INTERRUPTS \
+ (INTR_MSK_DMA_TERM | INTR_MSK_THRESHOLD | INTR_MSK_BUF_FULL | \
+ INTR_MSK_BUF_EMPTY)
+
+#define SMBUS_MCU_COUNTER_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x214)
+
+#define SMBALERT_MST_PAD_CTRL_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x230)
+
+#define SMBALERT_MST_PU BIT(0)
+
+#define SMBUS_GEN_INT_STAT_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x23C)
+
+#define SMBUS_GEN_INT_MASK_REG_OFF (SMBUS_MAST_CORE_ADDR_BASE + 0x240)
+
+#define SMBALERT_INTR_MASK BIT(10)
+#define I2C_BUF_MSTR_INTR_MASK BIT(9)
+#define I2C_INTR_MASK BIT(8)
+#define SMBALERT_WAKE_INTR_MASK BIT(2)
+#define I2C_BUF_MSTR_WAKE_INTR_MASK BIT(1)
+#define I2C_WAKE_INTR_MASK BIT(0)
+
+#define ALL_HIGH_LAYER_INTR \
+ (SMBALERT_INTR_MASK | I2C_BUF_MSTR_INTR_MASK | I2C_INTR_MASK | \
+ SMBALERT_WAKE_INTR_MASK | I2C_BUF_MSTR_WAKE_INTR_MASK | \
+ I2C_WAKE_INTR_MASK)
+
+#define SMBUS_RESET_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x248)
+
+#define PERI_SMBUS_D3_RESET_DIS BIT(16)
+
+#define SMBUS_MST_BUF (SMBUS_MAST_CORE_ADDR_BASE + 0x280)
+
+#define SMBUS_BUF_MAX_SIZE 0x80
+
+#define I2C_FLAGS_DIRECT_MODE BIT(7)
+#define I2C_FLAGS_POLLING_MODE BIT(6)
+#define I2C_FLAGS_STOP BIT(5)
+#define I2C_FLAGS_SMB_BLK_READ BIT(4)
+
+#define PCI1XXXX_I2C_TIMEOUT_MS 1000
+
+/* General Purpose Register. */
+#define SMB_GPR_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0c00 + \
+ 0x00)
+
+/* Lock Register. */
+#define SMB_GPR_LOCK_REG (SMBUS_MAST_CORE_ADDR_BASE + 0x1000 + 0x0000 + \
+ 0x00A0)
+
+#define SMBUS_PERI_LOCK BIT(3)
+
+struct pci1xxxx_i2c {
+ struct completion i2c_xfer_done;
+ bool i2c_xfer_in_progress;
+ struct i2c_adapter adap;
+ void __iomem *i2c_base;
+ u32 freq;
+ u32 flags;
+};
+
+static int set_sys_lock(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG;
+ u8 data;
+
+ writel(SMBUS_PERI_LOCK, p);
+ data = readl(p);
+ if (data != SMBUS_PERI_LOCK)
+ return -EPERM;
+
+ return 0;
+}
+
+static int release_sys_lock(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_GPR_LOCK_REG;
+ u8 data;
+
+ data = readl(p);
+ if (data != SMBUS_PERI_LOCK)
+ return 0;
+
+ writel(0, p);
+ data = readl(p);
+ if (data & SMBUS_PERI_LOCK)
+ return -EPERM;
+
+ return 0;
+}
+
+static void pci1xxxx_ack_high_level_intr(struct pci1xxxx_i2c *i2c, u16 intr_msk)
+{
+ writew(intr_msk, i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF);
+}
+
+static void pci1xxxx_i2c_configure_smbalert_pin(struct pci1xxxx_i2c *i2c,
+ bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBALERT_MST_PAD_CTRL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+
+ if (enable)
+ regval |= SMBALERT_MST_PU;
+ else
+ regval &= ~SMBALERT_MST_PU;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_send_start_stop(struct pci1xxxx_i2c *i2c, bool start)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ u8 regval;
+
+ regval = readb(p);
+
+ if (start)
+ regval |= SMB_CORE_CMD_START;
+ else
+ regval |= SMB_CORE_CMD_STOP;
+
+ writeb(regval, p);
+}
+
+/*
+ * When accessing the core control reg, we should not do a read modified write
+ * as they are write '1' to clear bits. Instead we need to write with the
+ * specific bits that needs to be set.
+ */
+static void pci1xxxx_i2c_set_clear_FW_ACK(struct pci1xxxx_i2c *i2c, bool set)
+{
+ u8 regval;
+
+ if (set)
+ regval = SMB_CORE_CTRL_FW_ACK | SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK;
+ else
+ regval = SMB_CORE_CTRL_ESO | SMB_CORE_CTRL_ACK;
+
+ writeb(regval, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF);
+}
+
+static void pci1xxxx_i2c_buffer_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ u8 transferlen, unsigned char *buf)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_MST_BUF;
+
+ if (slaveaddr)
+ writeb(slaveaddr, p++);
+
+ if (buf)
+ memcpy_toio(p, buf, transferlen);
+}
+
+/*
+ * When accessing the core control reg, we should not do a read modified write
+ * as there are write '1' to clear bits. Instead we need to write with the
+ * specific bits that needs to be set.
+ */
+static void pci1xxxx_i2c_enable_ESO(struct pci1xxxx_i2c *i2c)
+{
+ writeb(SMB_CORE_CTRL_ESO, i2c->i2c_base + SMB_CORE_CTRL_REG_OFF);
+}
+
+static void pci1xxxx_i2c_reset_counters(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= CTL_RESET_COUNTERS;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_transfer_dir(struct pci1xxxx_i2c *i2c, u8 direction)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (direction == I2C_DIRN_WRITE)
+ regval &= ~CTL_TRANSFER_DIR;
+ else
+ regval |= CTL_TRANSFER_DIR;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_mcu_count(struct pci1xxxx_i2c *i2c, u8 count)
+{
+ writeb(count, i2c->i2c_base + SMBUS_MCU_COUNTER_REG_OFF);
+}
+
+static void pci1xxxx_i2c_set_read_count(struct pci1xxxx_i2c *i2c, u8 readcount)
+{
+ writeb(readcount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF3);
+}
+
+static void pci1xxxx_i2c_set_write_count(struct pci1xxxx_i2c *i2c, u8 writecount)
+{
+ writeb(writecount, i2c->i2c_base + SMB_CORE_CMD_REG_OFF2);
+}
+
+static void pci1xxxx_i2c_set_DMA_run(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= CTL_RUN;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_set_mrun_proceed(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF0;
+ u8 regval;
+
+ regval = readb(p);
+ regval |= SMB_CORE_CMD_M_RUN;
+ regval |= SMB_CORE_CMD_M_PROCEED;
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_start_DMA(struct pci1xxxx_i2c *i2c)
+{
+ pci1xxxx_i2c_set_DMA_run(i2c);
+ pci1xxxx_i2c_set_mrun_proceed(i2c);
+}
+
+static void pci1xxxx_i2c_config_asr(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CONFIG_REG1;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval |= SMB_CONFIG1_ASR;
+ else
+ regval &= ~SMB_CONFIG1_ASR;
+ writeb(regval, p);
+}
+
+static irqreturn_t pci1xxxx_i2c_isr(int irq, void *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev;
+ void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF;
+ irqreturn_t intr_handled = IRQ_NONE;
+ u16 reg1;
+ u8 reg3;
+
+ /*
+ * Read the SMBus interrupt status register to see if the
+ * DMA_TERM interrupt has caused this callback.
+ */
+ reg1 = readw(p1);
+
+ if (reg1 & I2C_BUF_MSTR_INTR_MASK) {
+ reg3 = readb(p2);
+ if (reg3 & INTR_STAT_DMA_TERM) {
+ complete(&i2c->i2c_xfer_done);
+ intr_handled = IRQ_HANDLED;
+ writeb(INTR_STAT_DMA_TERM, p2);
+ }
+ pci1xxxx_ack_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK);
+ }
+
+ if (reg1 & SMBALERT_INTR_MASK) {
+ intr_handled = IRQ_HANDLED;
+ pci1xxxx_ack_high_level_intr(i2c, SMBALERT_INTR_MASK);
+ }
+
+ return intr_handled;
+}
+
+static void pci1xxxx_i2c_set_count(struct pci1xxxx_i2c *i2c, u8 mcucount,
+ u8 writecount, u8 readcount)
+{
+ pci1xxxx_i2c_set_mcu_count(i2c, mcucount);
+ pci1xxxx_i2c_set_write_count(i2c, writecount);
+ pci1xxxx_i2c_set_read_count(i2c, readcount);
+}
+
+static void pci1xxxx_i2c_set_readm(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval |= SMB_CORE_CMD_READM;
+ else
+ regval &= ~SMB_CORE_CMD_READM;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_ack_nw_layer_intr(struct pci1xxxx_i2c *i2c, u8 ack_intr_msk)
+{
+ writeb(ack_intr_msk, i2c->i2c_base + SMBUS_INTR_STAT_REG_OFF);
+}
+
+static void pci1xxxx_config_nw_layer_intr(struct pci1xxxx_i2c *i2c,
+ u8 intr_msk, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_INTR_MSK_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (enable)
+ regval &= ~intr_msk;
+ else
+ regval |= intr_msk;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_config_padctrl(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p1 = i2c->i2c_base + I2C_SCL_PAD_CTRL_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + I2C_SDA_PAD_CTRL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p1);
+ if (enable)
+ regval |= I2C_INPUT_EN | I2C_OUTPUT_EN;
+ else
+ regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN);
+
+ writeb(regval, p1);
+
+ regval = readb(p2);
+ if (enable)
+ regval |= I2C_INPUT_EN | I2C_OUTPUT_EN;
+ else
+ regval &= ~(I2C_INPUT_EN | I2C_OUTPUT_EN);
+
+ writeb(regval, p2);
+}
+
+static void pci1xxxx_i2c_set_mode(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_CONTROL_REG_OFF;
+ u8 regval;
+
+ regval = readb(p);
+ if (i2c->flags & I2C_FLAGS_DIRECT_MODE)
+ regval &= ~CTL_HOST_FIFO_ENTRY;
+ else
+ regval |= CTL_HOST_FIFO_ENTRY;
+
+ writeb(regval, p);
+}
+
+static void pci1xxxx_i2c_config_high_level_intr(struct pci1xxxx_i2c *i2c,
+ u16 intr_msk, bool enable)
+{
+ void __iomem *p = i2c->i2c_base + SMBUS_GEN_INT_MASK_REG_OFF;
+ u16 regval;
+
+ regval = readw(p);
+ if (enable)
+ regval &= ~intr_msk;
+ else
+ regval |= intr_msk;
+ writew(regval, p);
+}
+
+static void pci1xxxx_i2c_configure_core_reg(struct pci1xxxx_i2c *i2c, bool enable)
+{
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CONFIG_REG1;
+ void __iomem *p3 = i2c->i2c_base + SMB_CORE_CONFIG_REG3;
+ u8 reg1;
+ u8 reg3;
+
+ reg1 = readb(p1);
+ reg3 = readb(p3);
+ if (enable) {
+ reg1 |= SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN;
+ reg3 |= SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI;
+ } else {
+ reg1 &= ~(SMB_CONFIG1_ENAB | SMB_CONFIG1_FEN);
+ reg3 &= ~(SMB_CONFIG3_ENMI | SMB_CONFIG3_ENIDI);
+ }
+
+ writeb(reg1, p1);
+ writeb(reg3, p3);
+}
+
+static void pci1xxxx_i2c_set_freq(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *bp = i2c->i2c_base;
+ void __iomem *p_idle_scaling = bp + SMB_CORE_IDLE_SCALING_REG_OFF;
+ void __iomem *p_data_timing = bp + SMB_CORE_DATA_TIMING_REG_OFF;
+ void __iomem *p_hold_time = bp + SMB_CORE_SR_HOLD_TIME_REG_OFF;
+ void __iomem *p_to_scaling = bp + SMB_CORE_TO_SCALING_REG_OFF;
+ void __iomem *p_clk_sync = bp + SMB_CORE_CLK_SYNC_REG_OFF;
+ void __iomem *p_clk_reg = bp + SMB_CORE_BUS_CLK_REG_OFF;
+
+ switch (i2c->freq) {
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ writeb(SR_HOLD_TIME_100K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_100K, p_idle_scaling);
+ writew(BUS_CLK_100K, p_clk_reg);
+ writel(CLK_SYNC_100K, p_clk_sync);
+ writel(DATA_TIMING_100K, p_data_timing);
+ writel(TO_SCALING_100K, p_to_scaling);
+ break;
+
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ writeb(SR_HOLD_TIME_1000K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_1000K, p_idle_scaling);
+ writew(BUS_CLK_1000K, p_clk_reg);
+ writel(CLK_SYNC_1000K, p_clk_sync);
+ writel(DATA_TIMING_1000K, p_data_timing);
+ writel(TO_SCALING_1000K, p_to_scaling);
+ break;
+
+ case I2C_MAX_FAST_MODE_FREQ:
+ default:
+ writeb(SR_HOLD_TIME_400K_TICKS, p_hold_time);
+ writel(SMB_IDLE_SCALING_400K, p_idle_scaling);
+ writew(BUS_CLK_400K, p_clk_reg);
+ writel(CLK_SYNC_400K, p_clk_sync);
+ writel(DATA_TIMING_400K, p_data_timing);
+ writel(TO_SCALING_400K, p_to_scaling);
+ break;
+ }
+}
+
+static void pci1xxxx_i2c_init(struct pci1xxxx_i2c *i2c)
+{
+ void __iomem *p2 = i2c->i2c_base + SMBUS_STATUS_REG_OFF;
+ void __iomem *p1 = i2c->i2c_base + SMB_GPR_REG;
+ u8 regval;
+ u8 ret;
+
+ ret = set_sys_lock(i2c);
+ if (ret == -EPERM) {
+ /*
+ * Configure I2C Fast Mode as default frequency if unable
+ * to acquire sys lock.
+ */
+ regval = 0;
+ } else {
+ regval = readl(p1);
+ release_sys_lock(i2c);
+ }
+
+ switch (regval) {
+ case 0:
+ i2c->freq = I2C_MAX_FAST_MODE_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 1:
+ i2c->freq = I2C_MAX_STANDARD_MODE_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 2:
+ i2c->freq = I2C_MAX_FAST_MODE_PLUS_FREQ;
+ pci1xxxx_i2c_set_freq(i2c);
+ break;
+ case 3:
+ default:
+ break;
+ }
+
+ pci1xxxx_i2c_config_padctrl(i2c, true);
+ i2c->flags |= I2C_FLAGS_DIRECT_MODE;
+ pci1xxxx_i2c_set_mode(i2c);
+
+ /*
+ * Added as a precaution since BUF_EMPTY in status register
+ * also trigered an Interrupt.
+ */
+ writeb(STA_BUF_EMPTY, p2);
+
+ /* Configure core I2c control registers. */
+ pci1xxxx_i2c_configure_core_reg(i2c, true);
+
+ /*
+ * Enable pull-up for the SMB alert pin which is just used for
+ * wakeup right now.
+ */
+ pci1xxxx_i2c_configure_smbalert_pin(i2c, true);
+}
+
+static void pci1xxxx_i2c_clear_flags(struct pci1xxxx_i2c *i2c)
+{
+ u8 regval;
+
+ /* Reset the internal buffer counters. */
+ pci1xxxx_i2c_reset_counters(i2c);
+
+ /* Clear low level interrupts. */
+ regval = COMPLETION_MNAKX | COMPLETION_IDLE | COMPLETION_MDONE;
+ writeb(regval, i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3);
+ reinit_completion(&i2c->i2c_xfer_done);
+ pci1xxxx_ack_nw_layer_intr(i2c, ALL_NW_LAYER_INTERRUPTS);
+ pci1xxxx_ack_high_level_intr(i2c, ALL_HIGH_LAYER_INTR);
+}
+
+static int pci1xxxx_i2c_read(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ unsigned char *buf, u16 total_len)
+{
+ void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3;
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ void __iomem *p3 = i2c->i2c_base + SMBUS_MST_BUF;
+ unsigned long time_left;
+ u16 remainingbytes;
+ u8 transferlen;
+ int retval = 0;
+ u8 read_count;
+ u32 regval;
+ u16 count;
+
+ /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */
+ pci1xxxx_i2c_enable_ESO(i2c);
+ pci1xxxx_i2c_clear_flags(i2c);
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true);
+
+ /*
+ * The I2C transfer could be more than 128 bytes. Our Core is
+ * capable of only sending 128 at a time.
+ * As far as the I2C read is concerned, initailly send the
+ * read slave address along with the number of bytes to read in
+ * ReadCount. After sending the slave address the interrupt
+ * is generated. On seeing the ACK for the slave address, reverse the
+ * buffer direction and run the DMA to initiate Read from slave.
+ */
+ for (count = 0; count < total_len; count += transferlen) {
+
+ /*
+ * Before start of any transaction clear the existing
+ * START/STOP conditions.
+ */
+ writeb(0, p1);
+ remainingbytes = total_len - count;
+ transferlen = min_t(u16, remainingbytes, SMBUS_BUF_MAX_SIZE);
+
+ /*
+ * Send STOP bit for the last chunk in the transaction.
+ * For I2C read transaction of more than BUF_SIZE, NACK should
+ * only be sent for the last read.
+ * Hence a bit FW_ACK is set for all the read chunks except for
+ * the last chunk. For the last chunk NACK should be sent and
+ * FW_ACK is cleared Send STOP only when I2C_FLAGS_STOP bit is
+ * set in the flags and only for the last transaction.
+ */
+ if ((count + transferlen >= total_len) &&
+ (i2c->flags & I2C_FLAGS_STOP)) {
+ pci1xxxx_i2c_set_clear_FW_ACK(i2c, false);
+ pci1xxxx_i2c_send_start_stop(i2c, 0);
+ } else {
+ pci1xxxx_i2c_set_clear_FW_ACK(i2c, true);
+ }
+
+ /* Send START bit for the first transaction. */
+ if (count == 0) {
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE);
+ pci1xxxx_i2c_send_start_stop(i2c, 1);
+
+ /* Write I2c buffer with just the slave addr. */
+ pci1xxxx_i2c_buffer_write(i2c, slaveaddr, 0, NULL);
+
+ /* Set the count. Readcount is the transfer bytes. */
+ pci1xxxx_i2c_set_count(i2c, 1, 1, transferlen);
+
+ /*
+ * Set the Auto_start_read bit so that the HW itself
+ * will take care of the read phase.
+ */
+ pci1xxxx_i2c_config_asr(i2c, true);
+ if (i2c->flags & I2C_FLAGS_SMB_BLK_READ)
+ pci1xxxx_i2c_set_readm(i2c, true);
+ } else {
+ pci1xxxx_i2c_set_count(i2c, 0, 0, transferlen);
+ pci1xxxx_i2c_config_asr(i2c, false);
+ pci1xxxx_i2c_clear_flags(i2c);
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_READ);
+ }
+
+ /* Start the DMA. */
+ pci1xxxx_i2c_start_DMA(i2c);
+
+ /* Wait for the DMA_TERM interrupt. */
+ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done,
+ msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS));
+ if (time_left == 0) {
+ /* Reset the I2C core to release the bus lock. */
+ pci1xxxx_i2c_init(i2c);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ /* Read the completion reg to know the reason for DMA_TERM. */
+ regval = readb(p2);
+
+ /* Slave did not respond. */
+ if (regval & COMPLETION_MNAKX) {
+ writeb(COMPLETION_MNAKX, p2);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ if (i2c->flags & I2C_FLAGS_SMB_BLK_READ) {
+ buf[0] = readb(p3);
+ read_count = buf[0];
+ memcpy_fromio(&buf[1], p3 + 1, read_count);
+ } else {
+ memcpy_fromio(&buf[count], p3, transferlen);
+ }
+ }
+
+cleanup:
+ /* Disable all the interrupts. */
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false);
+ pci1xxxx_i2c_config_asr(i2c, false);
+ return retval;
+}
+
+static int pci1xxxx_i2c_write(struct pci1xxxx_i2c *i2c, u8 slaveaddr,
+ unsigned char *buf, u16 total_len)
+{
+ void __iomem *p2 = i2c->i2c_base + SMB_CORE_COMPLETION_REG_OFF3;
+ void __iomem *p1 = i2c->i2c_base + SMB_CORE_CMD_REG_OFF1;
+ unsigned long time_left;
+ u16 remainingbytes;
+ u8 actualwritelen;
+ u8 transferlen;
+ int retval = 0;
+ u32 regval;
+ u16 count;
+
+ /* Enable I2C host controller by setting the ESO bit in the CONTROL REG. */
+ pci1xxxx_i2c_enable_ESO(i2c);
+
+ /* Set the Buffer direction. */
+ pci1xxxx_i2c_set_transfer_dir(i2c, I2C_DIRN_WRITE);
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, true);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, true);
+
+ /*
+ * The i2c transfer could be more than 128 bytes. Our Core is
+ * capable of only sending 128 at a time.
+ */
+ for (count = 0; count < total_len; count += transferlen) {
+ /*
+ * Before start of any transaction clear the existing
+ * START/STOP conditions.
+ */
+ writeb(0, p1);
+ pci1xxxx_i2c_clear_flags(i2c);
+ remainingbytes = total_len - count;
+
+ /* If it is the starting of the transaction send START. */
+ if (count == 0) {
+ pci1xxxx_i2c_send_start_stop(i2c, 1);
+
+ /* -1 for the slave address. */
+ transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE - 1,
+ remainingbytes);
+ pci1xxxx_i2c_buffer_write(i2c, slaveaddr,
+ transferlen, &buf[count]);
+ /*
+ * The actual number of bytes written on the I2C bus
+ * is including the slave address.
+ */
+ actualwritelen = transferlen + 1;
+ } else {
+ transferlen = min_t(u16, SMBUS_BUF_MAX_SIZE, remainingbytes);
+ pci1xxxx_i2c_buffer_write(i2c, 0, transferlen, &buf[count]);
+ actualwritelen = transferlen;
+ }
+
+ pci1xxxx_i2c_set_count(i2c, actualwritelen, actualwritelen, 0);
+
+ /*
+ * Send STOP only when I2C_FLAGS_STOP bit is set in the flags and
+ * only for the last transaction.
+ */
+ if (remainingbytes <= transferlen &&
+ (i2c->flags & I2C_FLAGS_STOP))
+ pci1xxxx_i2c_send_start_stop(i2c, 0);
+
+ pci1xxxx_i2c_start_DMA(i2c);
+
+ /*
+ * Wait for the DMA_TERM interrupt.
+ */
+ time_left = wait_for_completion_timeout(&i2c->i2c_xfer_done,
+ msecs_to_jiffies(PCI1XXXX_I2C_TIMEOUT_MS));
+ if (time_left == 0) {
+ /* Reset the I2C core to release the bus lock. */
+ pci1xxxx_i2c_init(i2c);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+
+ regval = readb(p2);
+ if (regval & COMPLETION_MNAKX) {
+ writeb(COMPLETION_MNAKX, p2);
+ retval = -ETIMEDOUT;
+ goto cleanup;
+ }
+ }
+cleanup:
+ /* Disable all the interrupts. */
+ pci1xxxx_config_nw_layer_intr(i2c, INTR_MSK_DMA_TERM, false);
+ pci1xxxx_i2c_config_high_level_intr(i2c, I2C_BUF_MSTR_INTR_MASK, false);
+
+ return retval;
+}
+
+static int pci1xxxx_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct pci1xxxx_i2c *i2c = i2c_get_adapdata(adap);
+ u8 slaveaddr;
+ int retval;
+ u32 i;
+
+ i2c->i2c_xfer_in_progress = true;
+ for (i = 0; i < num; i++) {
+ slaveaddr = i2c_8bit_addr_from_msg(&msgs[i]);
+
+ /*
+ * Send the STOP bit if the transfer is the final one or
+ * if the I2C_M_STOP flag is set.
+ */
+ if ((i == num - 1) || (msgs[i].flags & I2C_M_STOP))
+ i2c->flags |= I2C_FLAGS_STOP;
+ else
+ i2c->flags &= ~I2C_FLAGS_STOP;
+
+ if (msgs[i].flags & I2C_M_RECV_LEN)
+ i2c->flags |= I2C_FLAGS_SMB_BLK_READ;
+ else
+ i2c->flags &= ~I2C_FLAGS_SMB_BLK_READ;
+
+ if (msgs[i].flags & I2C_M_RD)
+ retval = pci1xxxx_i2c_read(i2c, slaveaddr,
+ msgs[i].buf, msgs[i].len);
+ else
+ retval = pci1xxxx_i2c_write(i2c, slaveaddr,
+ msgs[i].buf, msgs[i].len);
+
+ if (retval < 0)
+ break;
+ }
+ i2c->i2c_xfer_in_progress = false;
+
+ if (retval < 0)
+ return retval;
+
+ return num;
+}
+
+/*
+ * List of supported functions by the driver.
+ */
+static u32 pci1xxxx_i2c_get_funcs(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_PROTOCOL_MANGLING |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm pci1xxxx_i2c_algo = {
+ .master_xfer = pci1xxxx_i2c_xfer,
+ .functionality = pci1xxxx_i2c_get_funcs,
+};
+
+static const struct i2c_adapter_quirks pci1xxxx_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN,
+};
+
+static const struct i2c_adapter pci1xxxx_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "PCI1xxxx I2C Adapter",
+ .algo = &pci1xxxx_i2c_algo,
+ .quirks = &pci1xxxx_i2c_quirks,
+};
+
+static int pci1xxxx_i2c_suspend(struct device *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev);
+ void __iomem *p = i2c->i2c_base + SMBUS_RESET_REG;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 regval;
+
+ i2c_mark_adapter_suspended(&i2c->adap);
+
+ /*
+ * If the system is put into 'suspend' state when the I2C transfer is in
+ * progress, wait until the transfer completes.
+ */
+ while (i2c->i2c_xfer_in_progress)
+ msleep(20);
+
+ pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, true);
+
+ /*
+ * Enable the PERST_DIS bit to mask the PERST from resetting the core
+ * registers.
+ */
+ regval = readl(p);
+ regval |= PERI_SMBUS_D3_RESET_DIS;
+ writel(regval, p);
+
+ /* Enable PCI wake in the PMCSR register. */
+ device_set_wakeup_enable(dev, true);
+ pci_wake_from_d3(pdev, true);
+
+ return 0;
+}
+
+static int pci1xxxx_i2c_resume(struct device *dev)
+{
+ struct pci1xxxx_i2c *i2c = dev_get_drvdata(dev);
+ void __iomem *p1 = i2c->i2c_base + SMBUS_GEN_INT_STAT_REG_OFF;
+ void __iomem *p2 = i2c->i2c_base + SMBUS_RESET_REG;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 regval;
+
+ regval = readw(p1);
+ writew(regval, p1);
+ pci1xxxx_i2c_config_high_level_intr(i2c, SMBALERT_WAKE_INTR_MASK, false);
+ regval = readl(p2);
+ regval &= ~PERI_SMBUS_D3_RESET_DIS;
+ writel(regval, p2);
+ i2c_mark_adapter_resumed(&i2c->adap);
+ pci_wake_from_d3(pdev, false);
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_i2c_pm_ops, pci1xxxx_i2c_suspend,
+ pci1xxxx_i2c_resume);
+
+static void pci1xxxx_i2c_shutdown(struct pci1xxxx_i2c *i2c)
+{
+ pci1xxxx_i2c_config_padctrl(i2c, false);
+ pci1xxxx_i2c_configure_core_reg(i2c, false);
+}
+
+static int pci1xxxx_i2c_probe_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct pci1xxxx_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, i2c);
+ i2c->i2c_xfer_in_progress = false;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ /*
+ * We are getting the base address of the SMB core. SMB core uses
+ * BAR0 and size is 32K.
+ */
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret < 0)
+ return ret;
+
+ i2c->i2c_base = pcim_iomap_table(pdev)[0];
+ init_completion(&i2c->i2c_xfer_done);
+ pci1xxxx_i2c_init(i2c);
+
+ ret = devm_add_action(dev, (void (*)(void *))pci1xxxx_i2c_shutdown, i2c);
+ if (ret)
+ return ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(dev, pci_irq_vector(pdev, 0), pci1xxxx_i2c_isr,
+ 0, pci_name(pdev), i2c);
+ if (ret)
+ return ret;
+
+ i2c->adap = pci1xxxx_i2c_ops;
+ i2c->adap.dev.parent = dev;
+
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ "MCHP PCI1xxxx i2c adapter at %s", pci_name(pdev));
+
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = devm_i2c_add_adapter(dev, &i2c->adap);
+ if (ret)
+ return dev_err_probe(dev, ret, "i2c add adapter failed\n");
+
+ return 0;
+}
+
+static const struct pci_device_id pci1xxxx_i2c_pci_id_table[] = {
+ { PCI_VDEVICE(EFAR, 0xA003) },
+ { PCI_VDEVICE(EFAR, 0xA013) },
+ { PCI_VDEVICE(EFAR, 0xA023) },
+ { PCI_VDEVICE(EFAR, 0xA033) },
+ { PCI_VDEVICE(EFAR, 0xA043) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci1xxxx_i2c_pci_id_table);
+
+static struct pci_driver pci1xxxx_i2c_pci_driver = {
+ .name = "i2c-mchp-pci1xxxx",
+ .id_table = pci1xxxx_i2c_pci_id_table,
+ .probe = pci1xxxx_i2c_probe_pci,
+ .driver = {
+ .pm = pm_sleep_ptr(&pci1xxxx_i2c_pm_ops),
+ },
+};
+module_pci_driver(pci1xxxx_i2c_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tharun Kumar P<tharunkumar.pasumarthi@microchip.com>");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_DESCRIPTION("Microchip Technology Inc. pci1xxxx I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index 8716032f030a..e68e775f187e 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -31,8 +32,6 @@
(MLXBF_I2C_FUNC_SMBUS_DEFAULT | MLXBF_I2C_FUNC_SMBUS_BLOCK | \
I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SLAVE)
-#define MLXBF_I2C_SMBUS_MAX 3
-
/* Shared resources info in BlueField platforms. */
#define MLXBF_I2C_COALESCE_TYU_ADDR 0x02801300
@@ -47,6 +46,9 @@
#define MLXBF_I2C_COREPLL_YU_ADDR 0x02800c30
#define MLXBF_I2C_COREPLL_YU_SIZE 0x00c
+#define MLXBF_I2C_COREPLL_RSH_YU_ADDR 0x13409824
+#define MLXBF_I2C_COREPLL_RSH_YU_SIZE 0x00c
+
#define MLXBF_I2C_SHARED_RES_MAX 3
/*
@@ -63,13 +65,14 @@
*/
#define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000)
/* Reference clock for Bluefield - 156 MHz. */
-#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000)
+#define MLXBF_I2C_PLL_IN_FREQ 156250000ULL
/* Constant used to determine the PLL frequency. */
-#define MLNXBF_I2C_COREPLL_CONST 16384
+#define MLNXBF_I2C_COREPLL_CONST 16384ULL
+
+#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000ULL
/* PLL registers. */
-#define MLXBF_I2C_CORE_PLL_REG0 0x0
#define MLXBF_I2C_CORE_PLL_REG1 0x4
#define MLXBF_I2C_CORE_PLL_REG2 0x8
@@ -129,14 +132,10 @@
/* Slave busy bit reset. */
#define MLXBF_I2C_CAUSE_S_GW_BUSY_FALL BIT(18)
-#define MLXBF_I2C_CAUSE_SLAVE_ARBITER_BITS_MASK GENMASK(20, 0)
-
/* Cause coalesce registers. */
#define MLXBF_I2C_CAUSE_COALESCE_0 0x00
-#define MLXBF_I2C_CAUSE_COALESCE_1 0x04
-#define MLXBF_I2C_CAUSE_COALESCE_2 0x08
-#define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT MLXBF_I2C_SMBUS_MAX
+#define MLXBF_I2C_CAUSE_TYU_SLAVE_BIT 3
#define MLXBF_I2C_CAUSE_YU_SLAVE_BIT 1
/* Functional enable register. */
@@ -163,15 +162,6 @@
#define MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(num, val) \
((val) | (0x3 << MLXBF_I2C_GPIO_SMBUS_GW_PINS(num)))
-/* SMBus timing parameters. */
-#define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00
-#define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04
-#define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08
-#define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c
-#define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10
-#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
-#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-
/*
* Defines SMBus operating frequency and core clock frequency.
* According to ADB files, default values are compliant to 100KHz SMBus
@@ -181,42 +171,46 @@
#define MLXBF_I2C_COREPLL_FREQ MLXBF_I2C_TYU_PLL_OUT_FREQ
/* Core PLL TYU configuration. */
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(12, 0)
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(5, 0)
-
-#define MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT 3
-#define MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT 16
-#define MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT 20
+#define MLXBF_I2C_COREPLL_CORE_F_TYU_MASK GENMASK(15, 3)
+#define MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK GENMASK(19, 16)
+#define MLXBF_I2C_COREPLL_CORE_R_TYU_MASK GENMASK(25, 20)
/* Core PLL YU configuration. */
#define MLXBF_I2C_COREPLL_CORE_F_YU_MASK GENMASK(25, 0)
#define MLXBF_I2C_COREPLL_CORE_OD_YU_MASK GENMASK(3, 0)
-#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(5, 0)
+#define MLXBF_I2C_COREPLL_CORE_R_YU_MASK GENMASK(31, 26)
-#define MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT 0
-#define MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT 1
-#define MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT 26
+/* SMBus timing parameters. */
+#define MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH 0x00
+#define MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE 0x04
+#define MLXBF_I2C_SMBUS_TIMER_THOLD 0x08
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP 0x0c
+#define MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA 0x10
+#define MLXBF_I2C_SMBUS_THIGH_MAX_TBUF 0x14
+#define MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT 0x18
-/* Core PLL frequency. */
-static u64 mlxbf_i2c_corepll_frequency;
+#define MLXBF_I2C_SHIFT_0 0
+#define MLXBF_I2C_SHIFT_8 8
+#define MLXBF_I2C_SHIFT_16 16
+#define MLXBF_I2C_SHIFT_24 24
+
+#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
+#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
+
+#define MLXBF_I2C_MST_ADDR_OFFSET 0x200
/* SMBus Master GW. */
-#define MLXBF_I2C_SMBUS_MASTER_GW 0x200
+#define MLXBF_I2C_SMBUS_MASTER_GW 0x0
/* Number of bytes received and sent. */
-#define MLXBF_I2C_SMBUS_RS_BYTES 0x300
+#define MLXBF_I2C_YU_SMBUS_RS_BYTES 0x100
+#define MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES 0x10c
/* Packet error check (PEC) value. */
-#define MLXBF_I2C_SMBUS_MASTER_PEC 0x304
+#define MLXBF_I2C_SMBUS_MASTER_PEC 0x104
/* Status bits (ACK/NACK/FW Timeout). */
-#define MLXBF_I2C_SMBUS_MASTER_STATUS 0x308
+#define MLXBF_I2C_SMBUS_MASTER_STATUS 0x108
/* SMbus Master Finite State Machine. */
-#define MLXBF_I2C_SMBUS_MASTER_FSM 0x310
-
-/*
- * When enabled, the master will issue a stop condition in case of
- * timeout while waiting for FW response.
- */
-#define MLXBF_I2C_SMBUS_EN_FW_TIMEOUT 0x31c
+#define MLXBF_I2C_YU_SMBUS_MASTER_FSM 0x110
+#define MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM 0x100
/* SMBus master GW control bits offset in MLXBF_I2C_SMBUS_MASTER_GW[31:3]. */
#define MLXBF_I2C_MASTER_LOCK_BIT BIT(31) /* Lock bit. */
@@ -236,14 +230,14 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_MASTER_ENABLE_READ \
(MLXBF_I2C_MASTER_ENABLE | MLXBF_I2C_MASTER_CTL_READ_BIT)
-#define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address shift. */
-#define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes shift. */
-#define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte shift. */
-#define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Parse expected bytes shift. */
-#define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes shift. */
+#define MLXBF_I2C_MASTER_WRITE_SHIFT 21 /* Control write bytes */
+#define MLXBF_I2C_MASTER_SEND_PEC_SHIFT 20 /* Send PEC byte when set to 1 */
+#define MLXBF_I2C_MASTER_PARSE_EXP_SHIFT 11 /* Control parse expected bytes */
+#define MLXBF_I2C_MASTER_SLV_ADDR_SHIFT 12 /* Slave address */
+#define MLXBF_I2C_MASTER_READ_SHIFT 4 /* Control read bytes */
/* SMBus master GW Data descriptor. */
-#define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x280
+#define MLXBF_I2C_MASTER_DATA_DESC_ADDR 0x80
#define MLXBF_I2C_MASTER_DATA_DESC_SIZE 0x80 /* Size in bytes. */
/* Maximum bytes to read/write per SMBus transaction. */
@@ -269,19 +263,21 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK BIT(31)
#define MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK BIT(15)
+#define MLXBF_I2C_SLV_ADDR_OFFSET 0x400
+
/* SMBus slave GW. */
-#define MLXBF_I2C_SMBUS_SLAVE_GW 0x400
+#define MLXBF_I2C_SMBUS_SLAVE_GW 0x0
/* Number of bytes received and sent from/to master. */
-#define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x500
+#define MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES 0x100
/* Packet error check (PEC) value. */
-#define MLXBF_I2C_SMBUS_SLAVE_PEC 0x504
+#define MLXBF_I2C_SMBUS_SLAVE_PEC 0x104
/* SMBus slave Finite State Machine (FSM). */
-#define MLXBF_I2C_SMBUS_SLAVE_FSM 0x510
+#define MLXBF_I2C_SMBUS_SLAVE_FSM 0x110
/*
* Should be set when all raised causes handled, and cleared by HW on
* every new cause.
*/
-#define MLXBF_I2C_SMBUS_SLAVE_READY 0x52c
+#define MLXBF_I2C_SMBUS_SLAVE_READY 0x12c
/* SMBus slave GW control bits offset in MLXBF_I2C_SMBUS_SLAVE_GW[31:19]. */
#define MLXBF_I2C_SLAVE_BUSY_BIT BIT(30) /* Busy bit. */
@@ -294,23 +290,74 @@ static u64 mlxbf_i2c_corepll_frequency;
#define MLXBF_I2C_SLAVE_SEND_PEC_SHIFT 21 /* Send PEC byte shift. */
/* SMBus slave GW Data descriptor. */
-#define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x480
+#define MLXBF_I2C_SLAVE_DATA_DESC_ADDR 0x80
#define MLXBF_I2C_SLAVE_DATA_DESC_SIZE 0x80 /* Size in bytes. */
/* SMbus slave configuration registers. */
-#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x514
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG 0x114
#define MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT 16
-#define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT 7
+#define MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT BIT(7)
#define MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK GENMASK(6, 0)
-#define MLXBF_I2C_SLAVE_ADDR_ENABLED(addr) \
- ((addr) & (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT))
-
/*
* Timeout is given in microsends. Note also that timeout handling is not
* exact.
*/
#define MLXBF_I2C_SMBUS_TIMEOUT (300 * 1000) /* 300ms */
+#define MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT (300 * 1000) /* 300ms */
+
+/* Polling frequency in microseconds. */
+#define MLXBF_I2C_POLL_FREQ_IN_USEC 200
+
+#define MLXBF_I2C_SMBUS_OP_CNT_1 1
+#define MLXBF_I2C_SMBUS_OP_CNT_2 2
+#define MLXBF_I2C_SMBUS_OP_CNT_3 3
+#define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3
+
+/* Helper macro to define an I2C resource parameters. */
+#define MLXBF_I2C_RES_PARAMS(addr, size, str) \
+ { \
+ .start = (addr), \
+ .end = (addr) + (size) - 1, \
+ .name = (str) \
+ }
+
+enum {
+ MLXBF_I2C_TIMING_100KHZ = 100000,
+ MLXBF_I2C_TIMING_400KHZ = 400000,
+ MLXBF_I2C_TIMING_1000KHZ = 1000000,
+};
+
+enum {
+ MLXBF_I2C_F_READ = BIT(0),
+ MLXBF_I2C_F_WRITE = BIT(1),
+ MLXBF_I2C_F_NORESTART = BIT(3),
+ MLXBF_I2C_F_SMBUS_OPERATION = BIT(4),
+ MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
+ MLXBF_I2C_F_SMBUS_PEC = BIT(6),
+ MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
+};
+
+/* Mellanox BlueField chip type. */
+enum mlxbf_i2c_chip_type {
+ MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */
+ MLXBF_I2C_CHIP_TYPE_2, /* Mellanox BlueField-2 chip. */
+ MLXBF_I2C_CHIP_TYPE_3 /* Mellanox BlueField-3 chip. */
+};
+
+/* List of chip resources that are being accessed by the driver. */
+enum {
+ MLXBF_I2C_SMBUS_RES,
+ MLXBF_I2C_MST_CAUSE_RES,
+ MLXBF_I2C_SLV_CAUSE_RES,
+ MLXBF_I2C_COALESCE_RES,
+ MLXBF_I2C_SMBUS_TIMER_RES,
+ MLXBF_I2C_SMBUS_MST_RES,
+ MLXBF_I2C_SMBUS_SLV_RES,
+ MLXBF_I2C_COREPLL_RES,
+ MLXBF_I2C_GPIO_RES,
+ MLXBF_I2C_END_RES
+};
/* Encapsulates timing parameters. */
struct mlxbf_i2c_timings {
@@ -331,27 +378,12 @@ struct mlxbf_i2c_timings {
u32 timeout; /* Detect clock low timeout. */
};
-enum {
- MLXBF_I2C_F_READ = BIT(0),
- MLXBF_I2C_F_WRITE = BIT(1),
- MLXBF_I2C_F_NORESTART = BIT(3),
- MLXBF_I2C_F_SMBUS_OPERATION = BIT(4),
- MLXBF_I2C_F_SMBUS_BLOCK = BIT(5),
- MLXBF_I2C_F_SMBUS_PEC = BIT(6),
- MLXBF_I2C_F_SMBUS_PROCESS_CALL = BIT(7),
-};
-
struct mlxbf_i2c_smbus_operation {
u32 flags;
u32 length; /* Buffer length in bytes. */
u8 *buffer;
};
-#define MLXBF_I2C_SMBUS_OP_CNT_1 1
-#define MLXBF_I2C_SMBUS_OP_CNT_2 2
-#define MLXBF_I2C_SMBUS_OP_CNT_3 3
-#define MLXBF_I2C_SMBUS_MAX_OP_CNT MLXBF_I2C_SMBUS_OP_CNT_3
-
struct mlxbf_i2c_smbus_request {
u8 slave;
u8 operation_cnt;
@@ -365,24 +397,38 @@ struct mlxbf_i2c_resource {
u8 type;
};
-/* List of chip resources that are being accessed by the driver. */
-enum {
- MLXBF_I2C_SMBUS_RES,
- MLXBF_I2C_MST_CAUSE_RES,
- MLXBF_I2C_SLV_CAUSE_RES,
- MLXBF_I2C_COALESCE_RES,
- MLXBF_I2C_COREPLL_RES,
- MLXBF_I2C_GPIO_RES,
- MLXBF_I2C_END_RES,
+struct mlxbf_i2c_chip_info {
+ enum mlxbf_i2c_chip_type type;
+ /* Chip shared resources that are being used by the I2C controller. */
+ struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX];
+
+ /* Callback to calculate the core PLL frequency. */
+ u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res);
+
+ /* Registers' address offset */
+ u32 smbus_master_rs_bytes_off;
+ u32 smbus_master_fsm_off;
};
-/* Helper macro to define an I2C resource parameters. */
-#define MLXBF_I2C_RES_PARAMS(addr, size, str) \
- { \
- .start = (addr), \
- .end = (addr) + (size) - 1, \
- .name = (str) \
- }
+struct mlxbf_i2c_priv {
+ const struct mlxbf_i2c_chip_info *chip;
+ struct i2c_adapter adap;
+ struct mlxbf_i2c_resource *smbus;
+ struct mlxbf_i2c_resource *timer;
+ struct mlxbf_i2c_resource *mst;
+ struct mlxbf_i2c_resource *slv;
+ struct mlxbf_i2c_resource *mst_cause;
+ struct mlxbf_i2c_resource *slv_cause;
+ struct mlxbf_i2c_resource *coalesce;
+ u64 frequency; /* Core frequency in Hz. */
+ int bus; /* Physical bus identifier. */
+ int irq;
+ struct i2c_client *slave[MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT];
+ u32 resource_version;
+};
+
+/* Core PLL frequency. */
+static u64 mlxbf_i2c_corepll_frequency;
static struct resource mlxbf_i2c_coalesce_tyu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COALESCE_TYU_ADDR,
@@ -396,6 +442,10 @@ static struct resource mlxbf_i2c_corepll_yu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_YU_ADDR,
MLXBF_I2C_COREPLL_YU_SIZE,
"COREPLL_MEM");
+static struct resource mlxbf_i2c_corepll_rsh_yu_params =
+ MLXBF_I2C_RES_PARAMS(MLXBF_I2C_COREPLL_RSH_YU_ADDR,
+ MLXBF_I2C_COREPLL_RSH_YU_SIZE,
+ "COREPLL_MEM");
static struct resource mlxbf_i2c_gpio_tyu_params =
MLXBF_I2C_RES_PARAMS(MLXBF_I2C_GPIO_TYU_ADDR,
MLXBF_I2C_GPIO_TYU_SIZE,
@@ -405,34 +455,6 @@ static struct mutex mlxbf_i2c_coalesce_lock;
static struct mutex mlxbf_i2c_corepll_lock;
static struct mutex mlxbf_i2c_gpio_lock;
-/* Mellanox BlueField chip type. */
-enum mlxbf_i2c_chip_type {
- MLXBF_I2C_CHIP_TYPE_1, /* Mellanox BlueField-1 chip. */
- MLXBF_I2C_CHIP_TYPE_2, /* Mallanox BlueField-2 chip. */
-};
-
-struct mlxbf_i2c_chip_info {
- enum mlxbf_i2c_chip_type type;
- /* Chip shared resources that are being used by the I2C controller. */
- struct mlxbf_i2c_resource *shared_res[MLXBF_I2C_SHARED_RES_MAX];
-
- /* Callback to calculate the core PLL frequency. */
- u64 (*calculate_freq)(struct mlxbf_i2c_resource *corepll_res);
-};
-
-struct mlxbf_i2c_priv {
- const struct mlxbf_i2c_chip_info *chip;
- struct i2c_adapter adap;
- struct mlxbf_i2c_resource *smbus;
- struct mlxbf_i2c_resource *mst_cause;
- struct mlxbf_i2c_resource *slv_cause;
- struct mlxbf_i2c_resource *coalesce;
- u64 frequency; /* Core frequency in Hz. */
- int bus; /* Physical bus identifier. */
- int irq;
- struct i2c_client *slave;
-};
-
static struct mlxbf_i2c_resource mlxbf_i2c_coalesce_res[] = {
[MLXBF_I2C_CHIP_TYPE_1] = {
.params = &mlxbf_i2c_coalesce_tyu_params,
@@ -452,6 +474,11 @@ static struct mlxbf_i2c_resource mlxbf_i2c_corepll_res[] = {
.params = &mlxbf_i2c_corepll_yu_params,
.lock = &mlxbf_i2c_corepll_lock,
.type = MLXBF_I2C_COREPLL_RES,
+ },
+ [MLXBF_I2C_CHIP_TYPE_3] = {
+ .params = &mlxbf_i2c_corepll_rsh_yu_params,
+ .lock = &mlxbf_i2c_corepll_lock,
+ .type = MLXBF_I2C_COREPLL_RES,
}
};
@@ -468,26 +495,13 @@ static u8 mlxbf_i2c_bus_count;
static struct mutex mlxbf_i2c_bus_lock;
-/* Polling frequency in microseconds. */
-#define MLXBF_I2C_POLL_FREQ_IN_USEC 200
-
-#define MLXBF_I2C_SHIFT_0 0
-#define MLXBF_I2C_SHIFT_8 8
-#define MLXBF_I2C_SHIFT_16 16
-#define MLXBF_I2C_SHIFT_24 24
-
-#define MLXBF_I2C_MASK_8 GENMASK(7, 0)
-#define MLXBF_I2C_MASK_16 GENMASK(15, 0)
-
-#define MLXBF_I2C_FREQUENCY_1GHZ 1000000000
-
/*
* Function to poll a set of bits at a specific address; it checks whether
* the bits are equal to zero when eq_zero is set to 'true', and not equal
* to zero when eq_zero is set to 'false'.
* Note that the timeout is given in microseconds.
*/
-static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
+static u32 mlxbf_i2c_poll(void __iomem *io, u32 addr, u32 mask,
bool eq_zero, u32 timeout)
{
u32 bits;
@@ -509,18 +523,37 @@ static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask,
* a transaction. Accordingly, this function polls the Master FSM stop
* bit; it returns false when the bit is asserted, true if not.
*/
-static bool mlxbf_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
+static bool mlxbf_i2c_smbus_master_wait_for_idle(struct mlxbf_i2c_priv *priv)
{
u32 mask = MLXBF_I2C_SMBUS_MASTER_FSM_STOP_MASK;
- u32 addr = MLXBF_I2C_SMBUS_MASTER_FSM;
+ u32 addr = priv->chip->smbus_master_fsm_off;
u32 timeout = MLXBF_I2C_SMBUS_TIMEOUT;
- if (mlxbf_smbus_poll(priv->smbus->io, addr, mask, true, timeout))
+ if (mlxbf_i2c_poll(priv->mst->io, addr, mask, true, timeout))
return true;
return false;
}
+/*
+ * wait for the lock to be released before acquiring it.
+ */
+static bool mlxbf_i2c_smbus_master_lock(struct mlxbf_i2c_priv *priv)
+{
+ if (mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ MLXBF_I2C_MASTER_LOCK_BIT, true,
+ MLXBF_I2C_SMBUS_LOCK_POLL_TIMEOUT))
+ return true;
+
+ return false;
+}
+
+static void mlxbf_i2c_smbus_master_unlock(struct mlxbf_i2c_priv *priv)
+{
+ /* Clear the gw to clear the lock */
+ writel(0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
+}
+
static bool mlxbf_i2c_smbus_transaction_success(u32 master_status,
u32 cause_status)
{
@@ -558,7 +591,7 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
* then read the cause and master status bits to determine if
* errors occurred during the transaction.
*/
- mlxbf_smbus_poll(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW,
+ mlxbf_i2c_poll(priv->mst->io, MLXBF_I2C_SMBUS_MASTER_GW,
MLXBF_I2C_MASTER_BUSY_BIT, true,
MLXBF_I2C_SMBUS_TIMEOUT);
@@ -571,7 +604,7 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
* Parse both Cause and Master GW bits, then return transaction status.
*/
- master_status_bits = readl(priv->smbus->io +
+ master_status_bits = readl(priv->mst->io +
MLXBF_I2C_SMBUS_MASTER_STATUS);
master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK;
@@ -596,7 +629,8 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv)
}
static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
- const u8 *data, u8 length, u32 addr)
+ const u8 *data, u8 length, u32 addr,
+ bool is_master)
{
u8 offset, aligned_length;
u32 data32;
@@ -613,12 +647,16 @@ static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv,
*/
for (offset = 0; offset < aligned_length; offset += sizeof(u32)) {
data32 = *((u32 *)(data + offset));
- iowrite32be(data32, priv->smbus->io + addr + offset);
+ if (is_master)
+ iowrite32be(data32, priv->mst->io + addr + offset);
+ else
+ iowrite32be(data32, priv->slv->io + addr + offset);
}
}
static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
- u8 *data, u8 length, u32 addr)
+ u8 *data, u8 length, u32 addr,
+ bool is_master)
{
u32 data32, mask;
u8 byte, offset;
@@ -634,14 +672,20 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv,
*/
for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) {
- data32 = ioread32be(priv->smbus->io + addr + offset);
+ if (is_master)
+ data32 = ioread32be(priv->mst->io + addr + offset);
+ else
+ data32 = ioread32be(priv->slv->io + addr + offset);
*((u32 *)(data + offset)) = data32;
}
if (!(length & mask))
return;
- data32 = ioread32be(priv->smbus->io + addr + offset);
+ if (is_master)
+ data32 = ioread32be(priv->mst->io + addr + offset);
+ else
+ data32 = ioread32be(priv->slv->io + addr + offset);
for (byte = 0; byte < (length & mask); byte++) {
data[offset + byte] = data32 & GENMASK(7, 0);
@@ -667,16 +711,16 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave,
command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT);
/* Clear status bits. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
+ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_STATUS);
/* Set the cause data. */
- writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR);
+ writel(~0x0, priv->mst_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR);
/* Zero PEC byte. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC);
+ writel(0x0, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_PEC);
/* Zero byte count. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES);
+ writel(0x0, priv->mst->io + priv->chip->smbus_master_rs_bytes_off);
/* GW activation. */
- writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW);
+ writel(command, priv->mst->io + MLXBF_I2C_SMBUS_MASTER_GW);
/*
* Poll master status and check status bits. An ACK is sent when
@@ -712,10 +756,19 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
slave = request->slave & GENMASK(6, 0);
addr = slave << 1;
- /* First of all, check whether the HW is idle. */
- if (WARN_ON(!mlxbf_smbus_master_wait_for_idle(priv)))
+ /*
+ * Try to acquire the smbus gw lock before any reads of the GW register since
+ * a read sets the lock.
+ */
+ if (WARN_ON(!mlxbf_i2c_smbus_master_lock(priv)))
return -EBUSY;
+ /* Check whether the HW is idle */
+ if (WARN_ON(!mlxbf_i2c_smbus_master_wait_for_idle(priv))) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
/* Set first byte. */
data_desc[data_idx++] = addr;
@@ -738,6 +791,11 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
if (flags & MLXBF_I2C_F_WRITE) {
write_en = 1;
write_len += operation->length;
+ if (data_idx + operation->length >
+ MLXBF_I2C_MASTER_DATA_DESC_SIZE) {
+ ret = -ENOBUFS;
+ goto out_unlock;
+ }
memcpy(data_desc + data_idx,
operation->buffer, operation->length);
data_idx += operation->length;
@@ -763,25 +821,25 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* must be written to the data registers.
*/
mlxbf_i2c_smbus_write_data(priv, (const u8 *)data_desc, data_len,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
if (write_en) {
ret = mlxbf_i2c_smbus_enable(priv, slave, write_len, block_en,
pec_en, 0);
if (ret)
- return ret;
+ goto out_unlock;
}
if (read_en) {
/* Write slave address to Master GW data descriptor. */
mlxbf_i2c_smbus_write_data(priv, (const u8 *)&addr, 1,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
ret = mlxbf_i2c_smbus_enable(priv, slave, read_len, block_en,
pec_en, 1);
if (!ret) {
/* Get Master GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, read_len + 1,
- MLXBF_I2C_MASTER_DATA_DESC_ADDR);
+ MLXBF_I2C_MASTER_DATA_DESC_ADDR, true);
/* Get data from Master GW data descriptor. */
memcpy(read_buf, data_desc, read_len + 1);
@@ -793,9 +851,12 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv,
* next tag integration.
*/
writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK,
- priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM);
+ priv->mst->io + priv->chip->smbus_master_fsm_off);
}
+out_unlock:
+ mlxbf_i2c_smbus_master_unlock(priv);
+
return ret;
}
@@ -1082,7 +1143,7 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
timer |= mlxbf_i2c_set_timer(priv, timings->scl_low,
false, MLXBF_I2C_MASK_16,
MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH);
timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false,
@@ -1093,34 +1154,34 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16);
timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false,
MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE);
timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_THOLD);
timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io +
+ writel(timer, priv->timer->io +
MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP);
timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA);
timer = mlxbf_i2c_set_timer(priv, timings->buf, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0);
timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false,
MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16);
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF);
timer = timings->timeout;
- writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
+ writel(timer, priv->timer->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT);
}
enum mlxbf_i2c_timings_config {
@@ -1407,24 +1468,19 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev,
return 0;
}
-static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
{
- u64 core_frequency, pad_frequency;
+ u64 core_frequency;
u8 core_od, core_r;
u32 corepll_val;
u16 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
/* Get Core PLL configuration bits. */
- core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_TYU_MASK;
- core_od = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_OD_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK;
- core_r = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_R_TYU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_TYU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_TYU_MASK, corepll_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_TYU_MASK, corepll_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_TYU_MASK, corepll_val);
/*
* Compute PLL output frequency as follow:
@@ -1436,31 +1492,26 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- core_frequency = pad_frequency * (++core_f);
+ core_frequency = MLXBF_I2C_PLL_IN_FREQ * (++core_f);
core_frequency /= (++core_r) * (++core_od);
return core_frequency;
}
-static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
+static u64 mlxbf_i2c_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
{
u32 corepll_reg1_val, corepll_reg2_val;
- u64 corepll_frequency, pad_frequency;
+ u64 corepll_frequency;
u8 core_od, core_r;
u32 core_f;
- pad_frequency = MLXBF_I2C_PLL_IN_FREQ;
-
corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1);
corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2);
/* Get Core PLL configuration bits */
- core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_F_YU_MASK;
- core_r = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_R_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_R_YU_MASK;
- core_od = rol32(corepll_reg2_val, MLXBF_I2C_COREPLL_CORE_OD_YU_SHIFT) &
- MLXBF_I2C_COREPLL_CORE_OD_YU_MASK;
+ core_f = FIELD_GET(MLXBF_I2C_COREPLL_CORE_F_YU_MASK, corepll_reg1_val);
+ core_r = FIELD_GET(MLXBF_I2C_COREPLL_CORE_R_YU_MASK, corepll_reg1_val);
+ core_od = FIELD_GET(MLXBF_I2C_COREPLL_CORE_OD_YU_MASK, corepll_reg2_val);
/*
* Compute PLL output frequency as follow:
@@ -1472,7 +1523,7 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res)
* Where PLL_OUT_FREQ and PLL_IN_FREQ refer to CoreFrequency
* and PadFrequency, respectively.
*/
- corepll_frequency = (pad_frequency * core_f) / MLNXBF_I2C_COREPLL_CONST;
+ corepll_frequency = (MLXBF_I2C_PLL_IN_FREQ * core_f) / MLNXBF_I2C_COREPLL_CONST;
corepll_frequency /= (++core_r) * (++core_od);
return corepll_frequency;
@@ -1523,28 +1574,26 @@ static int mlxbf_i2c_calculate_corepll_freq(struct platform_device *pdev,
return 0;
}
-static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
+static int mlxbf_i2c_slave_enable(struct mlxbf_i2c_priv *priv,
+ struct i2c_client *slave)
{
- u32 slave_reg, slave_reg_tmp, slave_reg_avail, slave_addr_mask;
- u8 reg, reg_cnt, byte, addr_tmp, reg_avail, byte_avail;
- bool avail, disabled;
-
- disabled = false;
- avail = false;
+ u8 reg, reg_cnt, byte, addr_tmp;
+ u32 slave_reg, slave_reg_tmp;
if (!priv)
return -EPERM;
reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
- slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Read the slave registers. There are 4 * 32-bit slave registers.
- * Each slave register can hold up to 4 * 8-bit slave configuration
- * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ * Each slave register can hold up to 4 * 8-bit slave configuration:
+ * 1) A 7-bit address
+ * 2) And a status bit (1 if enabled, 0 if not).
+ * Look for the next available slave register slot.
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = readl(priv->smbus->io +
+ slave_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/*
* Each register holds 4 slave addresses. So, we have to keep
@@ -1556,121 +1605,87 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr)
addr_tmp = slave_reg_tmp & GENMASK(7, 0);
/*
- * Mark the first available slave address slot, i.e. its
- * enabled bit should be unset. This slot might be used
- * later on to register our slave.
- */
- if (!avail && !MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp)) {
- avail = true;
- reg_avail = reg;
- byte_avail = byte;
- slave_reg_avail = slave_reg;
- }
-
- /*
- * Parse slave address bytes and check whether the
- * slave address already exists and it's enabled,
- * i.e. most significant bit is set.
+ * If an enable bit is not set in the
+ * MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG register, then the
+ * slave address slot associated with that bit is
+ * free. So set the enable bit and write the
+ * slave address bits.
*/
- if ((addr_tmp & slave_addr_mask) == addr) {
- if (MLXBF_I2C_SLAVE_ADDR_ENABLED(addr_tmp))
- return 0;
- disabled = true;
- break;
+ if (!(addr_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT)) {
+ slave_reg &= ~(MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK << (byte * 8));
+ slave_reg |= (slave->addr << (byte * 8));
+ slave_reg |= MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT << (byte * 8);
+ writel(slave_reg, priv->slv->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ (reg * 0x4));
+
+ /*
+ * Set the slave at the corresponding index.
+ */
+ priv->slave[(reg * 4) + byte] = slave;
+
+ return 0;
}
/* Parse next byte. */
slave_reg_tmp >>= 8;
}
-
- /* Exit the loop if the slave address is found. */
- if (disabled)
- break;
}
- if (!avail && !disabled)
- return -EINVAL; /* No room for a new slave address. */
-
- if (avail && !disabled) {
- reg = reg_avail;
- byte = byte_avail;
- /* Set the slave address. */
- slave_reg_avail &= ~(slave_addr_mask << (byte * 8));
- slave_reg_avail |= addr << (byte * 8);
- slave_reg = slave_reg_avail;
- }
-
- /* Enable the slave address and update the register. */
- slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8);
- writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
- reg * 0x4);
-
- return 0;
+ return -EBUSY;
}
-static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv)
+static int mlxbf_i2c_slave_disable(struct mlxbf_i2c_priv *priv, u8 addr)
{
- u32 slave_reg, slave_reg_tmp, slave_addr_mask;
- u8 addr, addr_tmp, reg, reg_cnt, slave_byte;
- struct i2c_client *client = priv->slave;
- bool exist;
+ u8 addr_tmp, reg, reg_cnt, byte;
+ u32 slave_reg, slave_reg_tmp;
- exist = false;
-
- addr = client->addr;
reg_cnt = MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT >> 2;
- slave_addr_mask = MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Read the slave registers. There are 4 * 32-bit slave registers.
- * Each slave register can hold up to 4 * 8-bit slave configuration
- * (7-bit address, 1 status bit (1 if enabled, 0 if not)).
+ * Each slave register can hold up to 4 * 8-bit slave configuration:
+ * 1) A 7-bit address
+ * 2) And a status bit (1 if enabled, 0 if not).
+ * Check if addr is present in the registers.
*/
for (reg = 0; reg < reg_cnt; reg++) {
- slave_reg = readl(priv->smbus->io +
+ slave_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4);
/* Check whether the address slots are empty. */
- if (slave_reg == 0)
+ if (!slave_reg)
continue;
/*
- * Each register holds 4 slave addresses. So, we have to keep
- * the byte order consistent with the value read in order to
- * update the register correctly, if needed.
+ * Check if addr matches any of the 4 slave addresses
+ * in the register.
*/
slave_reg_tmp = slave_reg;
- slave_byte = 0;
- while (slave_reg_tmp != 0) {
- addr_tmp = slave_reg_tmp & slave_addr_mask;
+ for (byte = 0; byte < 4; byte++) {
+ addr_tmp = slave_reg_tmp & MLXBF_I2C_SMBUS_SLAVE_ADDR_MASK;
/*
* Parse slave address bytes and check whether the
* slave address already exists.
*/
if (addr_tmp == addr) {
- exist = true;
- break;
+ /* Clear the slave address slot. */
+ slave_reg &= ~(GENMASK(7, 0) << (byte * 8));
+ writel(slave_reg, priv->slv->io +
+ MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
+ (reg * 0x4));
+ /* Free slave at the corresponding index */
+ priv->slave[(reg * 4) + byte] = NULL;
+
+ return 0;
}
/* Parse next byte. */
slave_reg_tmp >>= 8;
- slave_byte += 1;
}
-
- /* Exit the loop if the slave address is found. */
- if (exist)
- break;
}
- if (!exist)
- return 0; /* Slave is not registered, nothing to do. */
-
- /* Cleanup the slave address slot. */
- slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8));
- writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG +
- reg * 0x4);
-
- return 0;
+ return -ENXIO;
}
static int mlxbf_i2c_init_coalesce(struct platform_device *pdev,
@@ -1760,7 +1775,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
int ret;
/* Reset FSM. */
- writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
+ writel(0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_FSM);
/*
* Enable slave cause interrupt bits. Drive
@@ -1775,7 +1790,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev,
writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0);
/* Finally, set the 'ready' bit to start handling transactions. */
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
/* Initialize the cause coalesce resource. */
ret = mlxbf_i2c_init_coalesce(pdev, priv);
@@ -1820,84 +1835,93 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read,
return true;
}
-static bool mlxbf_smbus_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
+static bool mlxbf_i2c_slave_wait_for_idle(struct mlxbf_i2c_priv *priv,
u32 timeout)
{
u32 mask = MLXBF_I2C_CAUSE_S_GW_BUSY_FALL;
u32 addr = MLXBF_I2C_CAUSE_ARBITER;
- if (mlxbf_smbus_poll(priv->slv_cause->io, addr, mask, false, timeout))
+ if (mlxbf_i2c_poll(priv->slv_cause->io, addr, mask, false, timeout))
return true;
return false;
}
-/* Send byte to 'external' smbus master. */
-static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+static struct i2c_client *mlxbf_i2c_get_slave_from_addr(
+ struct mlxbf_i2c_priv *priv, u8 addr)
{
- u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
- u8 write_size, pec_en, addr, byte, value, byte_cnt, desc_size;
- struct i2c_client *slave = priv->slave;
- u32 control32, data32;
- int ret;
+ int i;
- if (!slave)
- return -EINVAL;
+ for (i = 0; i < MLXBF_I2C_SMBUS_SLAVE_ADDR_CNT; i++) {
+ if (!priv->slave[i])
+ continue;
+
+ if (priv->slave[i]->addr == addr)
+ return priv->slave[i];
+ }
+
+ return NULL;
+}
- addr = 0;
- byte = 0;
- desc_size = MLXBF_I2C_SLAVE_DATA_DESC_SIZE;
+/*
+ * Send byte to 'external' smbus master. This function is executed when
+ * an external smbus master wants to read data from the BlueField.
+ */
+static int mlxbf_i2c_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+{
+ u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
+ u8 write_size, pec_en, addr, value, byte_cnt;
+ struct i2c_client *slave;
+ u32 control32, data32;
+ int ret = 0;
/*
- * Read bytes received from the external master. These bytes should
- * be located in the first data descriptor register of the slave GW.
- * These bytes are the slave address byte and the internal register
- * address, if supplied.
+ * Read the first byte received from the external master to
+ * determine the slave address. This byte is located in the
+ * first data descriptor register of the slave GW.
*/
- if (recv_bytes > 0) {
- data32 = ioread32be(priv->smbus->io +
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
-
- /* Parse the received bytes. */
- switch (recv_bytes) {
- case 2:
- byte = (data32 >> 8) & GENMASK(7, 0);
- fallthrough;
- case 1:
- addr = (data32 & GENMASK(7, 0)) >> 1;
- }
+ data32 = ioread32be(priv->slv->io +
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+ addr = (data32 & GENMASK(7, 0)) >> 1;
- /* Check whether it's our slave address. */
- if (slave->addr != addr)
- return -EINVAL;
+ /*
+ * Check if the slave address received in the data descriptor register
+ * matches any of the slave addresses registered. If there is a match,
+ * set the slave.
+ */
+ slave = mlxbf_i2c_get_slave_from_addr(priv, addr);
+ if (!slave) {
+ ret = -ENXIO;
+ goto clear_csr;
}
/*
- * I2C read transactions may start by a WRITE followed by a READ.
- * Indeed, most slave devices would expect the internal address
- * following the slave address byte. So, write that byte first,
- * and then, send the requested data bytes to the master.
+ * An I2C read can consist of a WRITE bit transaction followed by
+ * a READ bit transaction. Indeed, slave devices often expect
+ * the slave address to be followed by the internal address.
+ * So, write the internal address byte first, and then, send the
+ * requested data to the master.
*/
if (recv_bytes > 1) {
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
- value = byte;
+ value = (data32 >> 8) & GENMASK(7, 0);
ret = i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED,
&value);
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
if (ret < 0)
- return ret;
+ goto clear_csr;
}
/*
- * Now, send data to the master; currently, the driver supports
- * READ_BYTE, READ_WORD and BLOCK READ protocols. Note that the
- * hardware can send up to 128 bytes per transfer. That is the
- * size of its data registers.
+ * Send data to the master. Currently, the driver supports
+ * READ_BYTE, READ_WORD and BLOCK READ protocols. The
+ * hardware can send up to 128 bytes per transfer which is
+ * the total size of the data registers.
*/
i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value);
- for (byte_cnt = 0; byte_cnt < desc_size; byte_cnt++) {
+ for (byte_cnt = 0; byte_cnt < MLXBF_I2C_SLAVE_DATA_DESC_SIZE; byte_cnt++) {
data_desc[byte_cnt] = value;
i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value);
}
@@ -1905,14 +1929,12 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
/* Send a stop condition to the backend. */
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
- /* Handle the actual transfer. */
-
/* Set the number of bytes to write to master. */
write_size = (byte_cnt - 1) & 0x7f;
/* Write data to Slave GW data descriptor. */
mlxbf_i2c_smbus_write_data(priv, data_desc, byte_cnt,
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false);
pec_en = 0; /* Disable PEC since it is not supported. */
@@ -1921,46 +1943,52 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT);
control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT);
- writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW);
+ writel(control32, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_GW);
/*
* Wait until the transfer is completed; the driver will wait
* until the GW is idle, a cause will rise on fall of GW busy.
*/
- mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+ mlxbf_i2c_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT);
+clear_csr:
/* Release the Slave GW. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
- return 0;
+ return ret;
}
-/* Receive bytes from 'external' smbus master. */
-static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
+/*
+ * Receive bytes from 'external' smbus master. This function is executed when
+ * an external smbus master wants to write data to the BlueField.
+ */
+static int mlxbf_i2c_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
{
u8 data_desc[MLXBF_I2C_SLAVE_DATA_DESC_SIZE] = { 0 };
- struct i2c_client *slave = priv->slave;
+ struct i2c_client *slave;
u8 value, byte, addr;
int ret = 0;
- if (!slave)
- return -EINVAL;
-
/* Read data from Slave GW data descriptor. */
mlxbf_i2c_smbus_read_data(priv, data_desc, recv_bytes,
- MLXBF_I2C_SLAVE_DATA_DESC_ADDR);
-
- /* Check whether its our slave address. */
+ MLXBF_I2C_SLAVE_DATA_DESC_ADDR, false);
addr = data_desc[0] >> 1;
- if (slave->addr != addr)
- return -EINVAL;
/*
- * Notify the slave backend; another I2C master wants to write data
- * to us. This event is sent once the slave address and the write bit
- * is detected.
+ * Check if the slave address received in the data descriptor register
+ * matches any of the slave addresses registered.
+ */
+ slave = mlxbf_i2c_get_slave_from_addr(priv, addr);
+ if (!slave) {
+ ret = -EINVAL;
+ goto clear_csr;
+ }
+
+ /*
+ * Notify the slave backend that an smbus master wants to write data
+ * to the BlueField.
*/
i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1973,18 +2001,22 @@ static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes)
break;
}
- /* Send a stop condition to the backend. */
+ /*
+ * Send a stop event to the slave backend, to signal
+ * the end of the write transactions.
+ */
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
+clear_csr:
/* Release the Slave GW. */
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
- writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
- writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
+ writel(0x0, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_PEC);
+ writel(0x1, priv->slv->io + MLXBF_I2C_SMBUS_SLAVE_READY);
return ret;
}
-static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
+static irqreturn_t mlxbf_i2c_irq(int irq, void *ptr)
{
struct mlxbf_i2c_priv *priv = ptr;
bool read, write, irq_is_set;
@@ -2014,7 +2046,7 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
* slave, if the higher 8 bits are sent then the slave expect N bytes
* from the master.
*/
- rw_bytes_reg = readl(priv->smbus->io +
+ rw_bytes_reg = readl(priv->slv->io +
MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES);
recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0);
@@ -2032,9 +2064,9 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr)
MLXBF_I2C_SLAVE_DATA_DESC_SIZE : recv_bytes;
if (read)
- mlxbf_smbus_irq_send(priv, recv_bytes);
+ mlxbf_i2c_irq_send(priv, recv_bytes);
else
- mlxbf_smbus_irq_recv(priv, recv_bytes);
+ mlxbf_i2c_irq_recv(priv, recv_bytes);
return IRQ_HANDLED;
}
@@ -2129,23 +2161,21 @@ static s32 mlxbf_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
static int mlxbf_i2c_reg_slave(struct i2c_client *slave)
{
struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ struct device *dev = &slave->dev;
int ret;
- if (priv->slave)
- return -EBUSY;
-
/*
* Do not support ten bit chip address and do not use Packet Error
* Checking (PEC).
*/
- if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC))
+ if (slave->flags & (I2C_CLIENT_TEN | I2C_CLIENT_PEC)) {
+ dev_err(dev, "SMBus PEC and 10 bit address not supported\n");
return -EAFNOSUPPORT;
+ }
- ret = mlxbf_slave_enable(priv, slave->addr);
- if (ret < 0)
- return ret;
-
- priv->slave = slave;
+ ret = mlxbf_i2c_slave_enable(priv, slave);
+ if (ret)
+ dev_err(dev, "Surpassed max number of registered slaves allowed\n");
return 0;
}
@@ -2153,18 +2183,19 @@ static int mlxbf_i2c_reg_slave(struct i2c_client *slave)
static int mlxbf_i2c_unreg_slave(struct i2c_client *slave)
{
struct mlxbf_i2c_priv *priv = i2c_get_adapdata(slave->adapter);
+ struct device *dev = &slave->dev;
int ret;
- WARN_ON(!priv->slave);
-
- /* Unregister slave, i.e. disable the slave address in hardware. */
- ret = mlxbf_slave_disable(priv);
- if (ret < 0)
- return ret;
-
- priv->slave = NULL;
+ /*
+ * Unregister slave by:
+ * 1) Disabling the slave address in hardware
+ * 2) Freeing priv->slave at the corresponding index
+ */
+ ret = mlxbf_i2c_slave_disable(priv, slave->addr);
+ if (ret)
+ dev_err(dev, "Unable to find slave 0x%x\n", slave->addr);
- return 0;
+ return ret;
}
static u32 mlxbf_i2c_functionality(struct i2c_adapter *adap)
@@ -2180,14 +2211,27 @@ static struct mlxbf_i2c_chip_info mlxbf_i2c_chip[] = {
[1] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_1],
[2] = &mlxbf_i2c_gpio_res[MLXBF_I2C_CHIP_TYPE_1]
},
- .calculate_freq = mlxbf_calculate_freq_from_tyu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_tyu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM
},
[MLXBF_I2C_CHIP_TYPE_2] = {
.type = MLXBF_I2C_CHIP_TYPE_2,
.shared_res = {
[0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_2]
},
- .calculate_freq = mlxbf_calculate_freq_from_yu
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_YU_SMBUS_MASTER_FSM
+ },
+ [MLXBF_I2C_CHIP_TYPE_3] = {
+ .type = MLXBF_I2C_CHIP_TYPE_3,
+ .shared_res = {
+ [0] = &mlxbf_i2c_corepll_res[MLXBF_I2C_CHIP_TYPE_3]
+ },
+ .calculate_freq = mlxbf_i2c_calculate_freq_from_yu,
+ .smbus_master_rs_bytes_off = MLXBF_I2C_RSH_YU_SMBUS_RS_BYTES,
+ .smbus_master_fsm_off = MLXBF_I2C_RSH_YU_SMBUS_MASTER_FSM
}
};
@@ -2203,24 +2247,11 @@ static struct i2c_adapter_quirks mlxbf_i2c_quirks = {
.max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH,
};
-static const struct of_device_id mlxbf_i2c_dt_ids[] = {
- {
- .compatible = "mellanox,i2c-mlxbf1",
- .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1]
- },
- {
- .compatible = "mellanox,i2c-mlxbf2",
- .data = &mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2]
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids);
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = {
{ "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] },
{ "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] },
+ { "MLNXBF31", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_3] },
{},
};
@@ -2229,35 +2260,27 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_i2c_acpi_ids);
static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
{
const struct acpi_device_id *aid;
- struct acpi_device *adev;
- unsigned long bus_id = 0;
- const char *uid;
+ u64 bus_id;
int ret;
if (acpi_disabled)
return -ENOENT;
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENXIO;
-
aid = acpi_match_device(mlxbf_i2c_acpi_ids, dev);
if (!aid)
return -ENODEV;
priv->chip = (struct mlxbf_i2c_chip_info *)aid->driver_data;
- uid = acpi_device_uid(adev);
- if (!uid || !(*uid)) {
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &bus_id);
+ if (ret) {
dev_err(dev, "Cannot retrieve UID\n");
- return -ENODEV;
+ return ret;
}
- ret = kstrtoul(uid, 0, &bus_id);
- if (!ret)
- priv->bus = bus_id;
+ priv->bus = bus_id;
- return ret;
+ return 0;
}
#else
static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
@@ -2266,36 +2289,12 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
}
#endif /* CONFIG_ACPI */
-static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv)
-{
- const struct of_device_id *oid;
- int bus_id = -1;
-
- if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- oid = of_match_node(mlxbf_i2c_dt_ids, dev->of_node);
- if (!oid)
- return -ENODEV;
-
- priv->chip = oid->data;
-
- bus_id = of_alias_get_id(dev->of_node, "i2c");
- if (bus_id >= 0)
- priv->bus = bus_id;
- }
-
- if (bus_id < 0) {
- dev_err(dev, "Cannot get bus id");
- return bus_id;
- }
-
- return 0;
-}
-
static int mlxbf_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mlxbf_i2c_priv *priv;
struct i2c_adapter *adap;
+ u32 resource_version;
int irq, ret;
priv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_priv), GFP_KERNEL);
@@ -2303,17 +2302,63 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mlxbf_i2c_acpi_probe(dev, priv);
- if (ret < 0 && ret != -ENOENT && ret != -ENXIO)
- ret = mlxbf_i2c_of_probe(dev, priv);
-
if (ret < 0)
return ret;
- ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
- MLXBF_I2C_SMBUS_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch smbus resource info");
- return ret;
+ /* This property allows the driver to stay backward compatible with older
+ * ACPI tables.
+ * Starting BlueField-3 SoC, the "smbus" resource was broken down into 3
+ * separate resources "timer", "master" and "slave".
+ */
+ if (device_property_read_u32(dev, "resource_version", &resource_version))
+ resource_version = 0;
+
+ priv->resource_version = resource_version;
+
+ if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && resource_version == 0) {
+ priv->timer = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->timer)
+ return -ENOMEM;
+
+ priv->mst = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->mst)
+ return -ENOMEM;
+
+ priv->slv = devm_kzalloc(dev, sizeof(struct mlxbf_i2c_resource), GFP_KERNEL);
+ if (!priv->slv)
+ return -ENOMEM;
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
+ MLXBF_I2C_SMBUS_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch smbus resource info");
+ return ret;
+ }
+
+ priv->timer->io = priv->smbus->io;
+ priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET;
+ priv->slv->io = priv->smbus->io + MLXBF_I2C_SLV_ADDR_OFFSET;
+ } else {
+ ret = mlxbf_i2c_init_resource(pdev, &priv->timer,
+ MLXBF_I2C_SMBUS_TIMER_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch timer resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->mst,
+ MLXBF_I2C_SMBUS_MST_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch master resource info");
+ return ret;
+ }
+
+ ret = mlxbf_i2c_init_resource(pdev, &priv->slv,
+ MLXBF_I2C_SMBUS_SLV_RES);
+ if (ret < 0) {
+ dev_err(dev, "Cannot fetch slave resource info");
+ return ret;
+ }
}
ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause,
@@ -2372,8 +2417,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
+ ret = devm_request_irq(dev, irq, mlxbf_i2c_irq,
+ IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "Cannot get irq %d\n", irq);
@@ -2401,8 +2446,19 @@ static int mlxbf_i2c_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *params;
- params = priv->smbus->params;
- devm_release_mem_region(dev, params->start, resource_size(params));
+ if (priv->chip->type < MLXBF_I2C_CHIP_TYPE_3 && priv->resource_version == 0) {
+ params = priv->smbus->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+ } else {
+ params = priv->timer->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->mst->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+
+ params = priv->slv->params;
+ devm_release_mem_region(dev, params->start, resource_size(params));
+ }
params = priv->mst_cause->params;
devm_release_mem_region(dev, params->start, resource_size(params));
@@ -2434,7 +2490,6 @@ static struct platform_driver mlxbf_i2c_driver = {
.remove = mlxbf_i2c_remove,
.driver = {
.name = "i2c-mlxbf",
- .of_match_table = mlxbf_i2c_dt_ids,
#ifdef CONFIG_ACPI
.acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids),
#endif /* CONFIG_ACPI */
@@ -2467,4 +2522,5 @@ module_exit(mlxbf_i2c_exit);
MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver");
MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>");
+MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index ecba1dfc1278..849848ccb080 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -400,7 +400,6 @@ static int riic_i2c_probe(struct platform_device *pdev)
{
struct riic_dev *riic;
struct i2c_adapter *adap;
- struct resource *res;
struct i2c_timings i2c_t;
struct reset_control *rstc;
int i, ret;
@@ -409,8 +408,7 @@ static int riic_i2c_probe(struct platform_device *pdev)
if (!riic)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- riic->base = devm_ioremap_resource(&pdev->dev, res);
+ riic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(riic->base))
return PTR_ERR(riic->base);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 2e98e7793bba..d1658ed76562 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1165,6 +1165,11 @@ static const struct rk3x_i2c_soc_data rv1108_soc_data = {
.calc_timings = rk3x_i2c_v1_calc_timings,
};
+static const struct rk3x_i2c_soc_data rv1126_soc_data = {
+ .grf_offset = 0x118,
+ .calc_timings = rk3x_i2c_v1_calc_timings,
+};
+
static const struct rk3x_i2c_soc_data rk3066_soc_data = {
.grf_offset = 0x154,
.calc_timings = rk3x_i2c_v0_calc_timings,
@@ -1196,6 +1201,10 @@ static const struct of_device_id rk3x_i2c_match[] = {
.data = &rv1108_soc_data
},
{
+ .compatible = "rockchip,rv1126-i2c",
+ .data = &rv1126_soc_data
+ },
+ {
.compatible = "rockchip,rk3066-i2c",
.data = &rk3066_soc_data
},
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 6746aa46d96c..0239e134b90f 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -6,15 +6,13 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#define ACPI_SMBUS_HC_CLASS "smbus"
-#define ACPI_SMBUS_HC_DEVICE_NAME "cmi"
-
/* SMBUS HID definition as supported by Microsoft Windows */
#define ACPI_SMBUS_MS_HID "SMB0001"
@@ -30,7 +28,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
- struct smbus_methods_t *methods;
+ const struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -358,29 +356,25 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
return AE_OK;
}
-static int acpi_smbus_cmi_add(struct acpi_device *device)
+static int smbus_cmi_probe(struct platform_device *device)
{
+ struct device *dev = &device->dev;
struct acpi_smbus_cmi *smbus_cmi;
- const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi)
return -ENOMEM;
- smbus_cmi->handle = device->handle;
- strcpy(acpi_device_name(device), ACPI_SMBUS_HC_DEVICE_NAME);
- strcpy(acpi_device_class(device), ACPI_SMBUS_HC_CLASS);
- device->driver_data = smbus_cmi;
+ smbus_cmi->handle = ACPI_HANDLE(dev);
+ smbus_cmi->methods = device_get_match_data(dev);
+
+ platform_set_drvdata(device, smbus_cmi);
+
smbus_cmi->cap_info = 0;
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
- for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
- if (!strcmp(id->id, acpi_device_hid(device)))
- smbus_cmi->methods =
- (struct smbus_methods_t *) id->driver_data;
-
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
@@ -390,8 +384,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
}
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
- "SMBus CMI adapter %s",
- acpi_device_name(device));
+ "SMBus CMI adapter %s", dev_name(dev));
smbus_cmi->adapter.owner = THIS_MODULE;
smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
smbus_cmi->adapter.algo_data = smbus_cmi;
@@ -408,31 +401,28 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err:
kfree(smbus_cmi);
- device->driver_data = NULL;
return ret;
}
-static int acpi_smbus_cmi_remove(struct acpi_device *device)
+static int smbus_cmi_remove(struct platform_device *device)
{
- struct acpi_smbus_cmi *smbus_cmi = acpi_driver_data(device);
+ struct acpi_smbus_cmi *smbus_cmi = platform_get_drvdata(device);
i2c_del_adapter(&smbus_cmi->adapter);
kfree(smbus_cmi);
- device->driver_data = NULL;
return 0;
}
-static struct acpi_driver acpi_smbus_cmi_driver = {
- .name = ACPI_SMBUS_HC_DEVICE_NAME,
- .class = ACPI_SMBUS_HC_CLASS,
- .ids = acpi_smbus_cmi_ids,
- .ops = {
- .add = acpi_smbus_cmi_add,
- .remove = acpi_smbus_cmi_remove,
+static struct platform_driver smbus_cmi_driver = {
+ .probe = smbus_cmi_probe,
+ .remove = smbus_cmi_remove,
+ .driver = {
+ .name = "smbus_cmi",
+ .acpi_match_table = acpi_smbus_cmi_ids,
},
};
-module_acpi_driver(acpi_smbus_cmi_driver);
+module_platform_driver(smbus_cmi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 031c78ac42e6..954022c04cc4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -443,11 +443,16 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
u32 *dma_buf;
int err;
- if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi)
+ if (i2c_dev->is_vi)
return 0;
- if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
- dev_dbg(i2c_dev->dev, "DMA support not enabled\n");
+ if (!i2c_dev->hw->has_apb_dma) {
+ if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
+ dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n");
+ return 0;
+ }
+ } else if (!IS_ENABLED(CONFIG_TEGRA186_GPC_DMA)) {
+ dev_dbg(i2c_dev->dev, "GPC DMA support not enabled\n");
return 0;
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 08b561f0709d..4dd777cc0c89 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -26,7 +26,7 @@ struct gsb_buffer {
union {
u16 wdata;
u8 bdata;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
} __packed;
@@ -137,6 +137,11 @@ static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = {
{}
};
+struct i2c_acpi_irq_context {
+ int irq;
+ bool wake_capable;
+};
+
static int i2c_acpi_do_lookup(struct acpi_device *adev,
struct i2c_acpi_lookup *lookup)
{
@@ -168,13 +173,19 @@ static int i2c_acpi_do_lookup(struct acpi_device *adev,
return 0;
}
-static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
+static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data)
{
- int *irq = data;
+ struct i2c_acpi_irq_context *irq_ctx = data;
struct resource r;
- if (*irq <= 0 && acpi_dev_resource_interrupt(ares, 0, &r))
- *irq = i2c_dev_irq_from_resources(&r, 1);
+ if (irq_ctx->irq > 0)
+ return 1;
+
+ if (!acpi_dev_resource_interrupt(ares, 0, &r))
+ return 1;
+
+ irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1);
+ irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE;
return 1; /* No need to add resource to the list */
}
@@ -182,31 +193,40 @@ static int i2c_acpi_add_resource(struct acpi_resource *ares, void *data)
/**
* i2c_acpi_get_irq - get device IRQ number from ACPI
* @client: Pointer to the I2C client device
+ * @wake_capable: Set to true if the IRQ is wake capable
*
* Find the IRQ number used by a specific client device.
*
* Return: The IRQ number or an error code.
*/
-int i2c_acpi_get_irq(struct i2c_client *client)
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
struct list_head resource_list;
- int irq = -ENOENT;
+ struct i2c_acpi_irq_context irq_ctx = {
+ .irq = -ENOENT,
+ };
int ret;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- i2c_acpi_add_resource, &irq);
+ i2c_acpi_add_irq_resource, &irq_ctx);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resource_list);
- if (irq == -ENOENT)
- irq = acpi_dev_gpio_irq_get(adev, 0);
+ if (irq_ctx.irq == -ENOENT)
+ irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable);
+
+ if (irq_ctx.irq < 0)
+ return irq_ctx.irq;
+
+ if (wake_capable)
+ *wake_capable = irq_ctx.wake_capable;
- return irq;
+ return irq_ctx.irq;
}
static int i2c_acpi_get_info(struct acpi_device *adev,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 91007558bcb2..b4edf10e8fd0 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -487,7 +487,11 @@ static int i2c_device_probe(struct device *dev)
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
} else if (ACPI_COMPANION(dev)) {
- irq = i2c_acpi_get_irq(client);
+ bool wake_capable;
+
+ irq = i2c_acpi_get_irq(client, &wake_capable);
+ if (irq > 0 && wake_capable)
+ client->flags |= I2C_CLIENT_WAKE;
}
if (irq == -EPROBE_DEFER) {
status = irq;
@@ -599,13 +603,9 @@ static void i2c_device_remove(struct device *dev)
driver = to_i2c_driver(dev->driver);
if (driver->remove) {
- int status;
-
dev_dbg(dev, "remove\n");
- status = driver->remove(client);
- if (status)
- dev_warn(dev, "remove failed (%pe), will be ignored\n", ERR_PTR(status));
+ driver->remove(client);
}
devres_release_group(&client->dev, client->devres_group_id);
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 87e2c914f1c5..1247e6e6e975 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -61,11 +61,11 @@ static inline int __i2c_check_suspended(struct i2c_adapter *adap)
#ifdef CONFIG_ACPI
void i2c_acpi_register_devices(struct i2c_adapter *adap);
-int i2c_acpi_get_irq(struct i2c_client *client);
+int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
-static inline int i2c_acpi_get_irq(struct i2c_client *client)
+static inline int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable)
{
return 0;
}
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 774507b54b57..313904be5f3b 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -243,9 +243,10 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
int (*deselect)(struct i2c_mux_core *, u32))
{
struct i2c_mux_core *muxc;
+ size_t mux_size;
- muxc = devm_kzalloc(dev, struct_size(muxc, adapter, max_adapters)
- + sizeof_priv, GFP_KERNEL);
+ mux_size = struct_size(muxc, adapter, max_adapters);
+ muxc = devm_kzalloc(dev, size_add(mux_size, sizeof_priv), GFP_KERNEL);
if (!muxc)
return NULL;
if (sizeof_priv)
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 5c7ae421cacf..4abc2d919881 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -181,14 +181,12 @@ static int i2c_slave_eeprom_probe(struct i2c_client *client, const struct i2c_de
return 0;
};
-static int i2c_slave_eeprom_remove(struct i2c_client *client)
+static void i2c_slave_eeprom_remove(struct i2c_client *client)
{
struct eeprom_data *eeprom = i2c_get_clientdata(client);
i2c_slave_unregister(client);
sysfs_remove_bin_file(&client->dev.kobj, &eeprom->bin);
-
- return 0;
}
static const struct i2c_device_id i2c_slave_eeprom_id[] = {
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 56dae08dfd48..75ee7ebdb614 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -153,13 +153,12 @@ static int i2c_slave_testunit_probe(struct i2c_client *client)
return i2c_slave_register(client, i2c_slave_testunit_slave_cb);
};
-static int i2c_slave_testunit_remove(struct i2c_client *client)
+static void i2c_slave_testunit_remove(struct i2c_client *client)
{
struct testunit_data *tu = i2c_get_clientdata(client);
cancel_delayed_work_sync(&tu->worker);
i2c_slave_unregister(client);
- return 0;
}
static const struct i2c_device_id i2c_slave_testunit_id[] = {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 8ba9b59a3c40..07c92c8495a3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -153,12 +153,11 @@ static int smbalert_probe(struct i2c_client *ara,
}
/* IRQ and memory resources are managed so they are freed automatically */
-static int smbalert_remove(struct i2c_client *ara)
+static void smbalert_remove(struct i2c_client *ara)
{
struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
cancel_work_sync(&alert->alert);
- return 0;
}
static const struct i2c_device_id smbalert_ids[] = {
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 1708b1a82da2..ea838dbae32e 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -9,7 +9,7 @@ menu "Multiplexer I2C Chip support"
config I2C_ARB_GPIO_CHALLENGE
tristate "GPIO-based I2C arbitration"
depends on GPIOLIB || COMPILE_TEST
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for an
I2C multimaster arbitration scheme using GPIOs and a challenge &
@@ -34,7 +34,7 @@ config I2C_MUX_GPIO
config I2C_MUX_GPMUX
tristate "General Purpose I2C multiplexer"
select MULTIPLEXER
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for a
general purpose I2C multiplexer. This driver provides access to
@@ -77,7 +77,7 @@ config I2C_MUX_PCA954x
config I2C_MUX_PINCTRL
tristate "pinctrl-based I2C multiplexer"
depends on PINCTRL
- depends on OF || COMPILE_TEST
+ depends on OF
help
If you say yes to this option, support will be included for an I2C
multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 704f1e50f6f4..70835825083f 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -294,13 +294,11 @@ static int ltc4306_probe(struct i2c_client *client)
return 0;
}
-static int ltc4306_remove(struct i2c_client *client)
+static void ltc4306_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
i2c_mux_del_adapters(muxc);
-
- return 0;
}
static struct i2c_driver ltc4306_driver = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 6daec8d3d331..ea83de78f52d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -325,12 +325,11 @@ static int pca9541_probe(struct i2c_client *client,
return 0;
}
-static int pca9541_remove(struct i2c_client *client)
+static void pca9541_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
i2c_mux_del_adapters(muxc);
- return 0;
}
static struct i2c_driver pca9541_driver = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 4ad665757dd8..a5f458b635df 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -521,14 +521,13 @@ fail_cleanup:
return ret;
}
-static int pca954x_remove(struct i2c_client *client)
+static void pca954x_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
device_remove_file(&client->dev, &dev_attr_idle_state);
pca954x_cleanup(muxc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index f1bb00a11ad6..d5ad904756fd 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
static struct platform_driver i2c_mux_pinctrl_driver = {
.driver = {
.name = "i2c-mux-pinctrl",
- .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+ .of_match_table = i2c_mux_pinctrl_of_match,
},
.probe = i2c_mux_pinctrl_probe,
.remove = i2c_mux_pinctrl_remove,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3e101719689a..cfeb24d40d37 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -928,6 +928,51 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
+static struct cpuidle_state adl_n_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 2,
+ .target_residency = 4,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 195,
+ .target_residency = 585,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C8",
+ .desc = "MWAIT 0x40",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 260,
+ .target_residency = 1040,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C10",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 660,
+ .target_residency = 1980,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state spr_cstates[] __initdata = {
{
.name = "C1",
@@ -1309,6 +1354,10 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates,
};
+static const struct idle_cpu idle_cpu_adl_n __initconst = {
+ .state_table = adl_n_cstates,
+};
+
static const struct idle_cpu idle_cpu_spr __initconst = {
.state_table = spr_cstates,
.disable_promotion_to_c1e = true,
@@ -1379,6 +1428,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
@@ -1507,7 +1557,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
state = &drv->states[drv->state_count++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate);
- strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
+ strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency;
/*
* For C1-type C-states use the same number for both the exit
@@ -1816,6 +1866,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
break;
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
+ case INTEL_FAM6_ALDERLAKE_N:
adl_idle_state_table_update();
break;
}
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 35798712f811..ffac66db7ac9 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -541,6 +541,19 @@ config MMA9553
To compile this driver as a module, choose M here: the module
will be called mma9553.
+config MSA311
+ tristate "MEMSensing Digital 3-Axis Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the MEMSensing MSA311
+ accelerometer driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called msa311.
+
config MXC4005
tristate "Memsic MXC4005XC 3-Axis Accelerometer Driver"
depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 4d8792668838..5e45b5fa5ab5 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -58,6 +58,8 @@ obj-$(CONFIG_MMA9551_CORE) += mma9551_core.o
obj-$(CONFIG_MMA9551) += mma9551.o
obj-$(CONFIG_MMA9553) += mma9553.o
+obj-$(CONFIG_MSA311) += msa311.o
+
obj-$(CONFIG_MXC4005) += mxc4005.o
obj-$(CONFIG_MXC6255) += mxc6255.o
diff --git a/drivers/iio/accel/adxl313.h b/drivers/iio/accel/adxl313.h
index 4415f2fc07e1..72f624af4686 100644
--- a/drivers/iio/accel/adxl313.h
+++ b/drivers/iio/accel/adxl313.h
@@ -8,6 +8,8 @@
#ifndef _ADXL313_H_
#define _ADXL313_H_
+#include <linux/iio/iio.h>
+
/* ADXL313 register definitions */
#define ADXL313_REG_DEVID0 0x00
#define ADXL313_REG_DEVID1 0x01
@@ -26,6 +28,7 @@
#define ADXL313_REG_FIFO_STATUS 0x39
#define ADXL313_DEVID0 0xAD
+#define ADXL313_DEVID0_ADXL312_314 0xE5
#define ADXL313_DEVID1 0x1D
#define ADXL313_PARTID 0xCB
#define ADXL313_SOFT_RESET 0x52
@@ -37,18 +40,46 @@
#define ADXL313_MEASUREMENT_MODE BIT(3)
#define ADXL313_RANGE_MSK GENMASK(1, 0)
-#define ADXL313_RANGE_4G 3
+#define ADXL313_RANGE_MAX 3
#define ADXL313_FULL_RES BIT(3)
#define ADXL313_SPI_3WIRE BIT(6)
#define ADXL313_I2C_DISABLE BIT(6)
+extern const struct regmap_access_table adxl312_readable_regs_table;
extern const struct regmap_access_table adxl313_readable_regs_table;
+extern const struct regmap_access_table adxl314_readable_regs_table;
+extern const struct regmap_access_table adxl312_writable_regs_table;
extern const struct regmap_access_table adxl313_writable_regs_table;
+extern const struct regmap_access_table adxl314_writable_regs_table;
+
+enum adxl313_device_type {
+ ADXL312,
+ ADXL313,
+ ADXL314,
+};
+
+struct adxl313_data {
+ struct regmap *regmap;
+ const struct adxl313_chip_info *chip_info;
+ struct mutex lock; /* lock to protect transf_buf */
+ __le16 transf_buf __aligned(IIO_DMA_MINALIGN);
+};
+
+struct adxl313_chip_info {
+ const char *name;
+ enum adxl313_device_type type;
+ int scale_factor;
+ bool variable_range;
+ bool soft_reset;
+ int (*check_id)(struct device *dev, struct adxl313_data *data);
+};
+
+extern const struct adxl313_chip_info adxl31x_chip_info[];
int adxl313_core_probe(struct device *dev,
struct regmap *regmap,
- const char *name,
+ const struct adxl313_chip_info *chip_info,
int (*setup)(struct device *, struct regmap *));
#endif /* _ADXL313_H_ */
diff --git a/drivers/iio/accel/adxl313_core.c b/drivers/iio/accel/adxl313_core.c
index afeef779e1d0..4de0a41bd679 100644
--- a/drivers/iio/accel/adxl313_core.c
+++ b/drivers/iio/accel/adxl313_core.c
@@ -8,12 +8,18 @@
*/
#include <linux/bitfield.h>
-#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include "adxl313.h"
+static const struct regmap_range adxl312_readable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_DEVID0, ADXL313_REG_DEVID0),
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_FIFO_STATUS),
+};
+
static const struct regmap_range adxl313_readable_reg_range[] = {
regmap_reg_range(ADXL313_REG_DEVID0, ADXL313_REG_XID),
regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
@@ -22,12 +28,109 @@ static const struct regmap_range adxl313_readable_reg_range[] = {
regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_FIFO_STATUS),
};
+const struct regmap_access_table adxl312_readable_regs_table = {
+ .yes_ranges = adxl312_readable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_readable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl312_readable_regs_table, IIO_ADXL313);
+
const struct regmap_access_table adxl313_readable_regs_table = {
.yes_ranges = adxl313_readable_reg_range,
.n_yes_ranges = ARRAY_SIZE(adxl313_readable_reg_range),
};
EXPORT_SYMBOL_NS_GPL(adxl313_readable_regs_table, IIO_ADXL313);
+const struct regmap_access_table adxl314_readable_regs_table = {
+ .yes_ranges = adxl312_readable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_readable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl314_readable_regs_table, IIO_ADXL313);
+
+static int adxl312_check_id(struct device *dev,
+ struct adxl313_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID0_ADXL312_314)
+ dev_warn(dev, "Invalid manufacturer ID: %#02x\n", regval);
+
+ return 0;
+}
+
+static int adxl313_check_id(struct device *dev,
+ struct adxl313_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID0)
+ dev_warn(dev, "Invalid manufacturer ID: 0x%02x\n", regval);
+
+ /* Check DEVID1 and PARTID */
+ if (regval == ADXL313_DEVID0) {
+ ret = regmap_read(data->regmap, ADXL313_REG_DEVID1, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_DEVID1)
+ dev_warn(dev, "Invalid mems ID: 0x%02x\n", regval);
+
+ ret = regmap_read(data->regmap, ADXL313_REG_PARTID, &regval);
+ if (ret)
+ return ret;
+
+ if (regval != ADXL313_PARTID)
+ dev_warn(dev, "Invalid device ID: 0x%02x\n", regval);
+ }
+
+ return 0;
+}
+
+const struct adxl313_chip_info adxl31x_chip_info[] = {
+ [ADXL312] = {
+ .name = "adxl312",
+ .type = ADXL312,
+ .scale_factor = 28425072,
+ .variable_range = true,
+ .soft_reset = false,
+ .check_id = &adxl312_check_id,
+ },
+ [ADXL313] = {
+ .name = "adxl313",
+ .type = ADXL313,
+ .scale_factor = 9576806,
+ .variable_range = true,
+ .soft_reset = true,
+ .check_id = &adxl313_check_id,
+ },
+ [ADXL314] = {
+ .name = "adxl314",
+ .type = ADXL314,
+ .scale_factor = 478858719,
+ .variable_range = false,
+ .soft_reset = false,
+ .check_id = &adxl312_check_id,
+ },
+};
+EXPORT_SYMBOL_NS_GPL(adxl31x_chip_info, IIO_ADXL313);
+
+static const struct regmap_range adxl312_writable_reg_range[] = {
+ regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
+ regmap_reg_range(ADXL313_REG_THRESH_ACT, ADXL313_REG_ACT_INACT_CTL),
+ regmap_reg_range(ADXL313_REG_BW_RATE, ADXL313_REG_INT_MAP),
+ regmap_reg_range(ADXL313_REG_DATA_FORMAT, ADXL313_REG_DATA_FORMAT),
+ regmap_reg_range(ADXL313_REG_FIFO_CTL, ADXL313_REG_FIFO_CTL),
+};
+
static const struct regmap_range adxl313_writable_reg_range[] = {
regmap_reg_range(ADXL313_REG_SOFT_RESET, ADXL313_REG_SOFT_RESET),
regmap_reg_range(ADXL313_REG_OFS_AXIS(0), ADXL313_REG_OFS_AXIS(2)),
@@ -37,17 +140,23 @@ static const struct regmap_range adxl313_writable_reg_range[] = {
regmap_reg_range(ADXL313_REG_FIFO_CTL, ADXL313_REG_FIFO_CTL),
};
+const struct regmap_access_table adxl312_writable_regs_table = {
+ .yes_ranges = adxl312_writable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_writable_reg_range),
+};
+EXPORT_SYMBOL_NS_GPL(adxl312_writable_regs_table, IIO_ADXL313);
+
const struct regmap_access_table adxl313_writable_regs_table = {
.yes_ranges = adxl313_writable_reg_range,
.n_yes_ranges = ARRAY_SIZE(adxl313_writable_reg_range),
};
EXPORT_SYMBOL_NS_GPL(adxl313_writable_regs_table, IIO_ADXL313);
-struct adxl313_data {
- struct regmap *regmap;
- struct mutex lock; /* lock to protect transf_buf */
- __le16 transf_buf __aligned(IIO_DMA_MINALIGN);
+const struct regmap_access_table adxl314_writable_regs_table = {
+ .yes_ranges = adxl312_writable_reg_range,
+ .n_yes_ranges = ARRAY_SIZE(adxl312_writable_reg_range),
};
+EXPORT_SYMBOL_NS_GPL(adxl314_writable_regs_table, IIO_ADXL313);
static const int adxl313_odr_freqs[][2] = {
[0] = { 6, 250000 },
@@ -156,12 +265,10 @@ static int adxl313_read_raw(struct iio_dev *indio_dev,
*val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- /*
- * Scale for any g range is given in datasheet as
- * 1024 LSB/g = 0.0009765625 * 9.80665 = 0.009576806640625 m/s^2
- */
*val = 0;
- *val2 = 9576806;
+
+ *val2 = data->chip_info->scale_factor;
+
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(data->regmap,
@@ -170,7 +277,7 @@ static int adxl313_read_raw(struct iio_dev *indio_dev,
return ret;
/*
- * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * 8-bit resolution at minimum range, that is 4x accel data scale
* factor at full resolution
*/
*val = sign_extend32(regval, 7) * 4;
@@ -198,7 +305,7 @@ static int adxl313_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
/*
- * 8-bit resolution at +/- 0.5g, that is 4x accel data scale
+ * 8-bit resolution at minimum range, that is 4x accel data scale
* factor at full resolution
*/
if (clamp_val(val, -128 * 4, 127 * 4) != val)
@@ -223,14 +330,18 @@ static const struct iio_info adxl313_info = {
static int adxl313_setup(struct device *dev, struct adxl313_data *data,
int (*setup)(struct device *, struct regmap *))
{
- unsigned int regval;
int ret;
- /* Ensures the device is in a consistent state after start up */
- ret = regmap_write(data->regmap, ADXL313_REG_SOFT_RESET,
- ADXL313_SOFT_RESET);
- if (ret)
- return ret;
+ /*
+ * If sw reset available, ensures the device is in a consistent
+ * state after start up
+ */
+ if (data->chip_info->soft_reset) {
+ ret = regmap_write(data->regmap, ADXL313_REG_SOFT_RESET,
+ ADXL313_SOFT_RESET);
+ if (ret)
+ return ret;
+ }
if (setup) {
ret = setup(dev, data->regmap);
@@ -238,46 +349,25 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
return ret;
}
- ret = regmap_read(data->regmap, ADXL313_REG_DEVID0, &regval);
+ ret = data->chip_info->check_id(dev, data);
if (ret)
return ret;
- if (regval != ADXL313_DEVID0) {
- dev_err(dev, "Invalid manufacturer ID: 0x%02x\n", regval);
- return -ENODEV;
- }
-
- ret = regmap_read(data->regmap, ADXL313_REG_DEVID1, &regval);
- if (ret)
- return ret;
-
- if (regval != ADXL313_DEVID1) {
- dev_err(dev, "Invalid mems ID: 0x%02x\n", regval);
- return -ENODEV;
- }
-
- ret = regmap_read(data->regmap, ADXL313_REG_PARTID, &regval);
- if (ret)
- return ret;
+ /* Sets the range to maximum, full resolution, if applicable */
+ if (data->chip_info->variable_range) {
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_RANGE_MSK,
+ FIELD_PREP(ADXL313_RANGE_MSK, ADXL313_RANGE_MAX));
+ if (ret)
+ return ret;
- if (regval != ADXL313_PARTID) {
- dev_err(dev, "Invalid device ID: 0x%02x\n", regval);
- return -ENODEV;
+ /* Enables full resolution */
+ ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
+ ADXL313_FULL_RES, ADXL313_FULL_RES);
+ if (ret)
+ return ret;
}
- /* Sets the range to +/- 4g */
- ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
- ADXL313_RANGE_MSK,
- FIELD_PREP(ADXL313_RANGE_MSK, ADXL313_RANGE_4G));
- if (ret)
- return ret;
-
- /* Enables full resolution */
- ret = regmap_update_bits(data->regmap, ADXL313_REG_DATA_FORMAT,
- ADXL313_FULL_RES, ADXL313_FULL_RES);
- if (ret)
- return ret;
-
/* Enables measurement mode */
return regmap_update_bits(data->regmap, ADXL313_REG_POWER_CTL,
ADXL313_POWER_CTL_MSK,
@@ -288,7 +378,7 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
* adxl313_core_probe() - probe and setup for adxl313 accelerometer
* @dev: Driver model representation of the device
* @regmap: Register map of the device
- * @name: Device name buffer reference
+ * @chip_info: Structure containing device specific data
* @setup: Setup routine to be executed right before the standard device
* setup, can also be set to NULL if not required
*
@@ -296,7 +386,7 @@ static int adxl313_setup(struct device *dev, struct adxl313_data *data,
*/
int adxl313_core_probe(struct device *dev,
struct regmap *regmap,
- const char *name,
+ const struct adxl313_chip_info *chip_info,
int (*setup)(struct device *, struct regmap *))
{
struct adxl313_data *data;
@@ -309,9 +399,11 @@ int adxl313_core_probe(struct device *dev,
data = iio_priv(indio_dev);
data->regmap = regmap;
+ data->chip_info = chip_info;
+
mutex_init(&data->lock);
- indio_dev->name = name;
+ indio_dev->name = chip_info->name;
indio_dev->info = &adxl313_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = adxl313_channels;
diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
index c329765dbf60..99cc7fc29488 100644
--- a/drivers/iio/accel/adxl313_i2c.c
+++ b/drivers/iio/accel/adxl313_i2c.c
@@ -14,42 +14,72 @@
#include "adxl313.h"
-static const struct regmap_config adxl313_i2c_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .rd_table = &adxl313_readable_regs_table,
- .wr_table = &adxl313_writable_regs_table,
- .max_register = 0x39,
+static const struct regmap_config adxl31x_i2c_regmap_config[] = {
+ [ADXL312] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl312_readable_regs_table,
+ .wr_table = &adxl312_writable_regs_table,
+ .max_register = 0x39,
+ },
+ [ADXL313] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+ },
+ [ADXL314] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl314_readable_regs_table,
+ .wr_table = &adxl314_writable_regs_table,
+ .max_register = 0x39,
+ },
};
-static int adxl313_i2c_probe(struct i2c_client *client)
-{
- struct regmap *regmap;
-
- regmap = devm_regmap_init_i2c(client, &adxl313_i2c_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
- PTR_ERR(regmap));
- return PTR_ERR(regmap);
- }
-
- return adxl313_core_probe(&client->dev, regmap, client->name, NULL);
-}
-
static const struct i2c_device_id adxl313_i2c_id[] = {
- { "adxl313" },
+ { .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
{ }
};
MODULE_DEVICE_TABLE(i2c, adxl313_i2c_id);
static const struct of_device_id adxl313_of_match[] = {
- { .compatible = "adi,adxl313" },
+ { .compatible = "adi,adxl312", .data = &adxl31x_chip_info[ADXL312] },
+ { .compatible = "adi,adxl313", .data = &adxl31x_chip_info[ADXL313] },
+ { .compatible = "adi,adxl314", .data = &adxl31x_chip_info[ADXL314] },
{ }
};
MODULE_DEVICE_TABLE(of, adxl313_of_match);
+static int adxl313_i2c_probe(struct i2c_client *client)
+{
+ const struct adxl313_chip_info *chip_data;
+ struct regmap *regmap;
+
+ /*
+ * Retrieves device specific data as a pointer to a
+ * adxl313_chip_info structure
+ */
+ chip_data = device_get_match_data(&client->dev);
+ if (!chip_data)
+ chip_data = (const struct adxl313_chip_info *)i2c_match_id(adxl313_i2c_id, client)->driver_data;
+
+ regmap = devm_regmap_init_i2c(client,
+ &adxl31x_i2c_regmap_config[chip_data->type]);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Error initializing i2c regmap: %ld\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return adxl313_core_probe(&client->dev, regmap, chip_data, NULL);
+}
+
static struct i2c_driver adxl313_i2c_driver = {
.driver = {
.name = "adxl313_i2c",
diff --git a/drivers/iio/accel/adxl313_spi.c b/drivers/iio/accel/adxl313_spi.c
index a3c6d553462d..b7cc15678a2b 100644
--- a/drivers/iio/accel/adxl313_spi.c
+++ b/drivers/iio/accel/adxl313_spi.c
@@ -11,17 +11,38 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <linux/property.h>
#include "adxl313.h"
-static const struct regmap_config adxl313_spi_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .rd_table = &adxl313_readable_regs_table,
- .wr_table = &adxl313_writable_regs_table,
- .max_register = 0x39,
- /* Setting bits 7 and 6 enables multiple-byte read */
- .read_flag_mask = BIT(7) | BIT(6),
+static const struct regmap_config adxl31x_spi_regmap_config[] = {
+ [ADXL312] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl312_readable_regs_table,
+ .wr_table = &adxl312_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
+ [ADXL313] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl313_readable_regs_table,
+ .wr_table = &adxl313_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
+ [ADXL314] = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &adxl314_readable_regs_table,
+ .wr_table = &adxl314_writable_regs_table,
+ .max_register = 0x39,
+ /* Setting bits 7 and 6 enables multiple-byte read */
+ .read_flag_mask = BIT(7) | BIT(6),
+ },
};
static int adxl313_spi_setup(struct device *dev, struct regmap *regmap)
@@ -42,7 +63,7 @@ static int adxl313_spi_setup(struct device *dev, struct regmap *regmap)
static int adxl313_spi_probe(struct spi_device *spi)
{
- const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct adxl313_chip_info *chip_data;
struct regmap *regmap;
int ret;
@@ -51,26 +72,40 @@ static int adxl313_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- regmap = devm_regmap_init_spi(spi, &adxl313_spi_regmap_config);
+ /*
+ * Retrieves device specific data as a pointer to a
+ * adxl313_chip_info structure
+ */
+ chip_data = device_get_match_data(&spi->dev);
+ if (!chip_data)
+ chip_data = (const struct adxl313_chip_info *)spi_get_device_id(spi)->driver_data;
+
+ regmap = devm_regmap_init_spi(spi,
+ &adxl31x_spi_regmap_config[chip_data->type]);
+
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Error initializing spi regmap: %ld\n",
PTR_ERR(regmap));
return PTR_ERR(regmap);
}
- return adxl313_core_probe(&spi->dev, regmap, id->name,
- &adxl313_spi_setup);
+ return adxl313_core_probe(&spi->dev, regmap,
+ chip_data, &adxl313_spi_setup);
}
static const struct spi_device_id adxl313_spi_id[] = {
- { "adxl313" },
+ { .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] },
+ { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] },
{ }
};
MODULE_DEVICE_TABLE(spi, adxl313_spi_id);
static const struct of_device_id adxl313_of_match[] = {
- { .compatible = "adi,adxl313" },
+ { .compatible = "adi,adxl312", .data = &adxl31x_chip_info[ADXL312] },
+ { .compatible = "adi,adxl313", .data = &adxl31x_chip_info[ADXL313] },
+ { .compatible = "adi,adxl314", .data = &adxl31x_chip_info[ADXL314] },
{ }
};
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 370bfec1275a..1919e0089c11 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -32,7 +33,6 @@
#define ADXL345_BW_RATE GENMASK(3, 0)
#define ADXL345_BASE_RATE_NANO_HZ 97656250LL
-#define NHZ_PER_HZ 1000000000LL
#define ADXL345_POWER_CTL_MEASURE BIT(3)
#define ADXL345_POWER_CTL_STANDBY 0x00
@@ -139,7 +139,7 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
samp_freq_nhz = ADXL345_BASE_RATE_NANO_HZ <<
(regval & ADXL345_BW_RATE);
- *val = div_s64_rem(samp_freq_nhz, NHZ_PER_HZ, val2);
+ *val = div_s64_rem(samp_freq_nhz, NANOHZ_PER_HZ, val2);
return IIO_VAL_INT_PLUS_NANO;
}
@@ -164,7 +164,8 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
ADXL345_REG_OFS_AXIS(chan->address),
val / 4);
case IIO_CHAN_INFO_SAMP_FREQ:
- n = div_s64(val * NHZ_PER_HZ + val2, ADXL345_BASE_RATE_NANO_HZ);
+ n = div_s64(val * NANOHZ_PER_HZ + val2,
+ ADXL345_BASE_RATE_NANO_HZ);
return regmap_update_bits(data->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE,
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 9c9e98578667..d03fc3400f94 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -1045,7 +1045,7 @@ err_disable_vdd:
return ret;
}
-static int bma180_remove(struct i2c_client *client)
+static void bma180_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bma180_data *data = iio_priv(indio_dev);
@@ -1062,8 +1062,6 @@ static int bma180_remove(struct i2c_client *client)
mutex_unlock(&data->mutex);
regulator_disable(data->vddio_supply);
regulator_disable(data->vdd_supply);
-
- return 0;
}
static int bma180_suspend(struct device *dev)
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index e8f802a82300..36edbaff4f7f 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -40,6 +40,7 @@
#define BMA400_INT_STAT1_REG 0x0f
#define BMA400_INT_STAT2_REG 0x10
#define BMA400_INT12_MAP_REG 0x23
+#define BMA400_INT_ENG_OVRUN_MSK BIT(4)
/* Temperature register */
#define BMA400_TEMP_DATA_REG 0x11
@@ -105,6 +106,19 @@
#define BMA400_INT_GEN2_MSK BIT(3)
#define BMA400_GEN_HYST_MSK GENMASK(1, 0)
+/* TAP config registers */
+#define BMA400_TAP_CONFIG 0x57
+#define BMA400_TAP_CONFIG1 0x58
+#define BMA400_S_TAP_MSK BIT(2)
+#define BMA400_D_TAP_MSK BIT(3)
+#define BMA400_INT_S_TAP_MSK BIT(10)
+#define BMA400_INT_D_TAP_MSK BIT(11)
+#define BMA400_TAP_SEN_MSK GENMASK(2, 0)
+#define BMA400_TAP_TICSTH_MSK GENMASK(1, 0)
+#define BMA400_TAP_QUIET_MSK GENMASK(3, 2)
+#define BMA400_TAP_QUIETDT_MSK GENMASK(5, 4)
+#define BMA400_TAP_TIM_LIST_LEN 4
+
/*
* BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before
* converting to micro values for +-2g range.
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index c31bdd9b168e..ad8fce3e08cd 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -26,6 +26,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -47,6 +48,27 @@ static int bma400_sample_freqs[14];
static const int bma400_osr_range[] = { 0, 1, 3 };
+static int tap_reset_timeout[BMA400_TAP_TIM_LIST_LEN] = {
+ 300000,
+ 400000,
+ 500000,
+ 600000
+};
+
+static int tap_max2min_time[BMA400_TAP_TIM_LIST_LEN] = {
+ 30000,
+ 45000,
+ 60000,
+ 90000
+};
+
+static int double_tap2_min_delay[BMA400_TAP_TIM_LIST_LEN] = {
+ 20000,
+ 40000,
+ 60000,
+ 80000
+};
+
/* See the ACC_CONFIG0 section of the datasheet */
enum bma400_power_mode {
POWER_MODE_SLEEP = 0x00,
@@ -88,6 +110,7 @@ struct bma400_data {
bool step_event_en;
bool activity_event_en;
unsigned int generic_event_en;
+ unsigned int tap_event_en_bitmask;
/* Correct time stamp alignment */
struct {
__le16 buff[3];
@@ -216,6 +239,115 @@ static const struct iio_event_spec bma400_accel_event[] = {
BIT(IIO_EV_INFO_HYSTERESIS) |
BIT(IIO_EV_INFO_ENABLE),
},
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_SINGLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT),
+ },
+ {
+ .type = IIO_EV_TYPE_GESTURE,
+ .dir = IIO_EV_DIR_DOUBLETAP,
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_RESET_TIMEOUT) |
+ BIT(IIO_EV_INFO_TAP2_MIN_DELAY),
+ },
+};
+
+static int usec_to_tapreg_raw(int usec, const int *time_list)
+{
+ int index;
+
+ for (index = 0; index < BMA400_TAP_TIM_LIST_LEN; index++) {
+ if (usec == time_list[index])
+ return index;
+ }
+ return -EINVAL;
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, reg_val, raw, vals[2];
+
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1, &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_TICSTH_MSK, reg_val);
+ vals[0] = 0;
+ vals[1] = tap_max2min_time[raw];
+
+ return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
+}
+
+static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma400_data *data = iio_priv(indio_dev);
+ int ret, val_int, val_fract, raw;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract);
+ if (ret)
+ return ret;
+
+ raw = usec_to_tapreg_raw(val_fract, tap_max2min_time);
+ if (raw < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, BMA400_TAP_CONFIG1,
+ BMA400_TAP_TICSTH_MSK,
+ FIELD_PREP(BMA400_TAP_TICSTH_MSK, raw));
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_maxtomin_time, 0);
+
+/*
+ * Tap interrupts works with 200 Hz input data rate and the time based tap
+ * controls are in the terms of data samples so the below calculation is
+ * used to convert the configuration values into seconds.
+ * e.g.:
+ * 60 data samples * 0.005 ms = 0.3 seconds.
+ * 80 data samples * 0.005 ms = 0.4 seconds.
+ */
+
+/* quiet configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_tap_reset_timeout_available,
+ "0.3 0.4 0.5 0.6");
+
+/* tics_th configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_tap_maxtomin_time_available,
+ "0.03 0.045 0.06 0.09");
+
+/* quiet_dt configuration values in seconds */
+static IIO_CONST_ATTR(in_accel_gesture_doubletap_tap2_min_delay_available,
+ "0.02 0.04 0.06 0.08");
+
+/* List of sensitivity values available to configure tap interrupts */
+static IIO_CONST_ATTR(in_accel_gesture_tap_value_available, "0 1 2 3 4 5 6 7");
+
+static struct attribute *bma400_event_attributes[] = {
+ &iio_const_attr_in_accel_gesture_tap_value_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_reset_timeout_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_tap_maxtomin_time_available.dev_attr.attr,
+ &iio_const_attr_in_accel_gesture_doubletap_tap2_min_delay_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_gesture_tap_maxtomin_time.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group bma400_event_attribute_group = {
+ .attrs = bma400_event_attributes,
};
#define BMA400_ACC_CHANNEL(_index, _axis) { \
@@ -1012,6 +1144,12 @@ static int bma400_read_event_config(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
return FIELD_GET(BMA400_INT_GEN2_MSK,
data->generic_event_en);
+ case IIO_EV_DIR_SINGLETAP:
+ return FIELD_GET(BMA400_S_TAP_MSK,
+ data->tap_event_en_bitmask);
+ case IIO_EV_DIR_DOUBLETAP:
+ return FIELD_GET(BMA400_D_TAP_MSK,
+ data->tap_event_en_bitmask);
default:
return -EINVAL;
}
@@ -1046,7 +1184,8 @@ static int bma400_activity_event_en(struct bma400_data *data,
enum iio_event_direction dir,
int state)
{
- int ret, reg, msk, value, field_value;
+ int ret, reg, msk, value;
+ int field_value = 0;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1101,6 +1240,80 @@ static int bma400_activity_event_en(struct bma400_data *data,
return 0;
}
+static int bma400_tap_event_en(struct bma400_data *data,
+ enum iio_event_direction dir, int state)
+{
+ unsigned int mask, field_value;
+ int ret;
+
+ /*
+ * Tap interrupts can be configured only in normal mode.
+ * See table in section 4.3 "Power modes - performance modes" of
+ * datasheet v1.2.
+ */
+ if (data->power_mode != POWER_MODE_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Tap interrupts are operating with a data rate of 200Hz.
+ * See section 4.7 "Tap sensing interrupt" in datasheet v1.2.
+ */
+ if (data->sample_freq.hz != 200 && state) {
+ dev_err(data->dev, "Invalid data rate for tap interrupts.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
+ BMA400_S_TAP_MSK,
+ FIELD_PREP(BMA400_S_TAP_MSK, state));
+ if (ret)
+ return ret;
+
+ switch (dir) {
+ case IIO_EV_DIR_SINGLETAP:
+ mask = BMA400_S_TAP_MSK;
+ set_mask_bits(&field_value, BMA400_S_TAP_MSK,
+ FIELD_PREP(BMA400_S_TAP_MSK, state));
+ break;
+ case IIO_EV_DIR_DOUBLETAP:
+ mask = BMA400_D_TAP_MSK;
+ set_mask_bits(&field_value, BMA400_D_TAP_MSK,
+ FIELD_PREP(BMA400_D_TAP_MSK, state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG1_REG, mask,
+ field_value);
+ if (ret)
+ return ret;
+
+ set_mask_bits(&data->tap_event_en_bitmask, mask, field_value);
+
+ return 0;
+}
+
+static int bma400_disable_adv_interrupt(struct bma400_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, BMA400_INT_CONFIG0_REG, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, BMA400_INT_CONFIG1_REG, 0);
+ if (ret)
+ return ret;
+
+ data->tap_event_en_bitmask = 0;
+ data->generic_event_en = 0;
+ data->step_event_en = false;
+ data->activity_event_en = false;
+
+ return 0;
+}
+
static int bma400_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -1111,10 +1324,20 @@ static int bma400_write_event_config(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
- mutex_lock(&data->mutex);
- ret = bma400_activity_event_en(data, dir, state);
- mutex_unlock(&data->mutex);
- return ret;
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
+ mutex_lock(&data->mutex);
+ ret = bma400_activity_event_en(data, dir, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_EV_TYPE_GESTURE:
+ mutex_lock(&data->mutex);
+ ret = bma400_tap_event_en(data, dir, state);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
case IIO_STEPS:
mutex_lock(&data->mutex);
ret = bma400_steps_event_enable(data, state);
@@ -1157,10 +1380,13 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int ret, reg;
+ int ret, reg, reg_val, raw;
- switch (chan->type) {
- case IIO_ACCEL:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
reg = get_gen_config_reg(dir);
if (reg < 0)
return -EINVAL;
@@ -1196,6 +1422,39 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(BMA400_TAP_SEN_MSK, reg_val);
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_QUIET_MSK, reg_val);
+ *val = 0;
+ *val2 = tap_reset_timeout[raw];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ &reg_val);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMA400_TAP_QUIETDT_MSK, reg_val);
+ *val = 0;
+ *val2 = double_tap2_min_delay[raw];
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -1209,10 +1468,13 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int reg, ret;
+ int reg, ret, raw;
- switch (chan->type) {
- case IIO_ACCEL:
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (type) {
+ case IIO_EV_TYPE_MAG:
reg = get_gen_config_reg(dir);
if (reg < 0)
return -EINVAL;
@@ -1248,6 +1510,40 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_EV_TYPE_GESTURE:
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val < 0 || val > 7)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG,
+ BMA400_TAP_SEN_MSK,
+ FIELD_PREP(BMA400_TAP_SEN_MSK,
+ val));
+ case IIO_EV_INFO_RESET_TIMEOUT:
+ raw = usec_to_tapreg_raw(val2, tap_reset_timeout);
+ if (raw < 0)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG1,
+ BMA400_TAP_QUIET_MSK,
+ FIELD_PREP(BMA400_TAP_QUIET_MSK,
+ raw));
+ case IIO_EV_INFO_TAP2_MIN_DELAY:
+ raw = usec_to_tapreg_raw(val2, double_tap2_min_delay);
+ if (raw < 0)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ BMA400_TAP_CONFIG1,
+ BMA400_TAP_QUIETDT_MSK,
+ FIELD_PREP(BMA400_TAP_QUIETDT_MSK,
+ raw));
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -1287,6 +1583,7 @@ static const struct iio_info bma400_info = {
.write_event_config = bma400_write_event_config,
.write_event_value = bma400_write_event_value,
.read_event_value = bma400_read_event_value,
+ .event_attrs = &bma400_event_attribute_group,
};
static const struct iio_trigger_ops bma400_trigger_ops = {
@@ -1350,6 +1647,32 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
if (ret || !data->status)
goto unlock_err;
+ /*
+ * Disable all advance interrupts if interrupt engine overrun occurs.
+ * See section 4.7 "Interrupt engine overrun" in datasheet v1.2.
+ */
+ if (FIELD_GET(BMA400_INT_ENG_OVRUN_MSK, le16_to_cpu(data->status))) {
+ bma400_disable_adv_interrupt(data);
+ dev_err(data->dev, "Interrupt engine overrun\n");
+ goto unlock_err;
+ }
+
+ if (FIELD_GET(BMA400_INT_S_TAP_MSK, le16_to_cpu(data->status)))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_SINGLETAP),
+ timestamp);
+
+ if (FIELD_GET(BMA400_INT_D_TAP_MSK, le16_to_cpu(data->status)))
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_GESTURE,
+ IIO_EV_DIR_DOUBLETAP),
+ timestamp);
+
if (FIELD_GET(BMA400_INT_GEN1_MSK, le16_to_cpu(data->status)))
ev_dir = IIO_EV_DIR_RISING;
@@ -1467,5 +1790,6 @@ int bma400_probe(struct device *dev, struct regmap *regmap, int irq,
EXPORT_SYMBOL_NS(bma400_probe, IIO_BMA400);
MODULE_AUTHOR("Dan Robertson <dan@dlrobertson.com>");
+MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>");
MODULE_DESCRIPTION("Bosch BMA400 triaxial acceleration sensor core");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index dff4d7dd101c..be8cc598b88e 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -209,13 +209,11 @@ static int bmc150_accel_probe(struct i2c_client *client,
return 0;
}
-static int bmc150_accel_remove(struct i2c_client *client)
+static void bmc150_accel_remove(struct i2c_client *client)
{
bmc150_acpi_dual_accel_remove(client);
bmc150_accel_core_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_accel_acpi_match[] = {
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index bca4cf98bf4d..84edcc78d796 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -606,7 +606,7 @@ void bmi088_accel_core_remove(struct device *dev)
}
EXPORT_SYMBOL_NS_GPL(bmi088_accel_core_remove, IIO_BMI088);
-static int __maybe_unused bmi088_accel_runtime_suspend(struct device *dev)
+static int bmi088_accel_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmi088_accel_data *data = iio_priv(indio_dev);
@@ -614,7 +614,7 @@ static int __maybe_unused bmi088_accel_runtime_suspend(struct device *dev)
return bmi088_accel_power_down(data);
}
-static int __maybe_unused bmi088_accel_runtime_resume(struct device *dev)
+static int bmi088_accel_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmi088_accel_data *data = iio_priv(indio_dev);
@@ -622,13 +622,10 @@ static int __maybe_unused bmi088_accel_runtime_resume(struct device *dev)
return bmi088_accel_power_up(data);
}
-const struct dev_pm_ops bmi088_accel_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(bmi088_accel_runtime_suspend,
- bmi088_accel_runtime_resume, NULL)
-};
-EXPORT_SYMBOL_NS_GPL(bmi088_accel_pm_ops, IIO_BMI088);
+EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(bmi088_accel_pm_ops,
+ bmi088_accel_runtime_suspend,
+ bmi088_accel_runtime_resume, NULL,
+ IIO_BMI088);
MODULE_AUTHOR("Niek van Agt <niek.van.agt@topicproducts.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c
index 9e2ed3bd5661..ee540edd8412 100644
--- a/drivers/iio/accel/bmi088-accel-spi.c
+++ b/drivers/iio/accel/bmi088-accel-spi.c
@@ -80,7 +80,7 @@ MODULE_DEVICE_TABLE(spi, bmi088_accel_id);
static struct spi_driver bmi088_accel_driver = {
.driver = {
.name = "bmi088_accel_spi",
- .pm = &bmi088_accel_pm_ops,
+ .pm = pm_ptr(&bmi088_accel_pm_ops),
.of_match_table = bmi088_of_match,
},
.probe = bmi088_accel_probe,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 748b35c2f0c3..adc66b3615c0 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1064,7 +1064,7 @@ static int kxcjk1013_write_event_config(struct iio_dev *indio_dev,
/*
* We will expect the enable and disable to do operation in
- * in reverse order. This will happen here anyway as our
+ * reverse order. This will happen here anyway as our
* resume operation uses sync mode runtime pm calls, the
* suspend operation will be delayed by autosuspend delay
* So the disable operation will still happen in reverse of
@@ -1611,7 +1611,7 @@ err_poweroff:
return ret;
}
-static int kxcjk1013_remove(struct i2c_client *client)
+static void kxcjk1013_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct kxcjk1013_data *data = iio_priv(indio_dev);
@@ -1630,8 +1630,6 @@ static int kxcjk1013_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
kxcjk1013_set_mode(data, STANDBY);
mutex_unlock(&data->mutex);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index d57f264bd6c8..61346ea8ef19 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -32,11 +32,9 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
i2c->name);
}
-static int kxsd9_i2c_remove(struct i2c_client *client)
+static void kxsd9_i2c_remove(struct i2c_client *client)
{
kxsd9_common_remove(&client->dev);
-
- return 0;
}
static const struct of_device_id kxsd9_of_match[] = {
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index c15d16e7f1da..2462000e0519 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -151,15 +151,13 @@ static int mc3230_probe(struct i2c_client *client,
return ret;
}
-static int mc3230_remove(struct i2c_client *client)
+static void mc3230_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
mc3230_set_opcon(iio_priv(indio_dev), MC3230_MODE_OPCON_STANDBY);
-
- return 0;
}
static int mc3230_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma7455_i2c.c b/drivers/iio/accel/mma7455_i2c.c
index a3b84e8a3ea8..c63b321b01cd 100644
--- a/drivers/iio/accel/mma7455_i2c.c
+++ b/drivers/iio/accel/mma7455_i2c.c
@@ -26,11 +26,9 @@ static int mma7455_i2c_probe(struct i2c_client *i2c,
return mma7455_core_probe(&i2c->dev, regmap, name);
}
-static int mma7455_i2c_remove(struct i2c_client *i2c)
+static void mma7455_i2c_remove(struct i2c_client *i2c)
{
mma7455_core_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id mma7455_i2c_ids[] = {
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 794f2f383303..85829990bbad 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -207,7 +207,7 @@ static int mma7660_probe(struct i2c_client *client,
return ret;
}
-static int mma7660_remove(struct i2c_client *client)
+static void mma7660_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -218,8 +218,6 @@ static int mma7660_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n",
ERR_PTR(ret));
-
- return 0;
}
static int mma7660_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index c7d9ca96dbaa..3ba28c2ff68a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1735,7 +1735,7 @@ disable_regulator_vdd:
return ret;
}
-static int mma8452_remove(struct i2c_client *client)
+static void mma8452_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma8452_data *data = iio_priv(indio_dev);
@@ -1751,8 +1751,6 @@ static int mma8452_remove(struct i2c_client *client)
regulator_disable(data->vddio_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 123cdbbb265c..f7a793f4a8e3 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -509,7 +509,7 @@ out_poweroff:
return ret;
}
-static int mma9551_remove(struct i2c_client *client)
+static void mma9551_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma9551_data *data = iio_priv(indio_dev);
@@ -522,8 +522,6 @@ static int mma9551_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
mutex_unlock(&data->mutex);
-
- return 0;
}
static int mma9551_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 09df58d4be33..2da0e005b13e 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1148,7 +1148,7 @@ out_poweroff:
return ret;
}
-static int mma9553_remove(struct i2c_client *client)
+static void mma9553_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mma9553_data *data = iio_priv(indio_dev);
@@ -1161,8 +1161,6 @@ static int mma9553_remove(struct i2c_client *client)
mutex_lock(&data->mutex);
mma9551_set_device_state(data->client, false);
mutex_unlock(&data->mutex);
-
- return 0;
}
static int mma9553_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
new file mode 100644
index 000000000000..2fded3759171
--- /dev/null
+++ b/drivers/iio/accel/msa311.c
@@ -0,0 +1,1321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MEMSensing digital 3-Axis accelerometer
+ *
+ * MSA311 is a tri-axial, low-g accelerometer with I2C digital output for
+ * sensitivity consumer applications. It has dynamic user-selectable full
+ * scales range of +-2g/+-4g/+-8g/+-16g and allows acceleration measurements
+ * with output data rates from 1Hz to 1000Hz.
+ *
+ * MSA311 is available in an ultra small (2mm x 2mm, height 0.95mm) LGA package
+ * and is guaranteed to operate over -40C to +85C.
+ *
+ * This driver supports following MSA311 features:
+ * - IIO interface
+ * - Different power modes: NORMAL, SUSPEND
+ * - ODR (Output Data Rate) selection
+ * - Scale selection
+ * - IIO triggered buffer
+ * - NEW_DATA interrupt + trigger
+ *
+ * Below features to be done:
+ * - Motion Events: ACTIVE, TAP, ORIENT, FREEFALL
+ * - Low Power mode
+ *
+ * Copyright (c) 2022, SberDevices. All Rights Reserved.
+ *
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/string_helpers.h>
+#include <linux/units.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define MSA311_SOFT_RESET_REG 0x00
+#define MSA311_PARTID_REG 0x01
+#define MSA311_ACC_X_REG 0x02
+#define MSA311_ACC_Y_REG 0x04
+#define MSA311_ACC_Z_REG 0x06
+#define MSA311_MOTION_INT_REG 0x09
+#define MSA311_DATA_INT_REG 0x0A
+#define MSA311_TAP_ACTIVE_STS_REG 0x0B
+#define MSA311_ORIENT_STS_REG 0x0C
+#define MSA311_RANGE_REG 0x0F
+#define MSA311_ODR_REG 0x10
+#define MSA311_PWR_MODE_REG 0x11
+#define MSA311_SWAP_POLARITY_REG 0x12
+#define MSA311_INT_SET_0_REG 0x16
+#define MSA311_INT_SET_1_REG 0x17
+#define MSA311_INT_MAP_0_REG 0x19
+#define MSA311_INT_MAP_1_REG 0x1A
+#define MSA311_INT_CONFIG_REG 0x20
+#define MSA311_INT_LATCH_REG 0x21
+#define MSA311_FREEFALL_DUR_REG 0x22
+#define MSA311_FREEFALL_TH_REG 0x23
+#define MSA311_FREEFALL_HY_REG 0x24
+#define MSA311_ACTIVE_DUR_REG 0x27
+#define MSA311_ACTIVE_TH_REG 0x28
+#define MSA311_TAP_DUR_REG 0x2A
+#define MSA311_TAP_TH_REG 0x2B
+#define MSA311_ORIENT_HY_REG 0x2C
+#define MSA311_Z_BLOCK_REG 0x2D
+#define MSA311_OFFSET_X_REG 0x38
+#define MSA311_OFFSET_Y_REG 0x39
+#define MSA311_OFFSET_Z_REG 0x3A
+
+enum msa311_fields {
+ /* Soft_Reset */
+ F_SOFT_RESET_I2C, F_SOFT_RESET_SPI,
+ /* Motion_Interrupt */
+ F_ORIENT_INT, F_S_TAP_INT, F_D_TAP_INT, F_ACTIVE_INT, F_FREEFALL_INT,
+ /* Data_Interrupt */
+ F_NEW_DATA_INT,
+ /* Tap_Active_Status */
+ F_TAP_SIGN, F_TAP_FIRST_X, F_TAP_FIRST_Y, F_TAP_FIRST_Z, F_ACTV_SIGN,
+ F_ACTV_FIRST_X, F_ACTV_FIRST_Y, F_ACTV_FIRST_Z,
+ /* Orientation_Status */
+ F_ORIENT_Z, F_ORIENT_X_Y,
+ /* Range */
+ F_FS,
+ /* ODR */
+ F_X_AXIS_DIS, F_Y_AXIS_DIS, F_Z_AXIS_DIS, F_ODR,
+ /* Power Mode/Bandwidth */
+ F_PWR_MODE, F_LOW_POWER_BW,
+ /* Swap_Polarity */
+ F_X_POLARITY, F_Y_POLARITY, F_Z_POLARITY, F_X_Y_SWAP,
+ /* Int_Set_0 */
+ F_ORIENT_INT_EN, F_S_TAP_INT_EN, F_D_TAP_INT_EN, F_ACTIVE_INT_EN_Z,
+ F_ACTIVE_INT_EN_Y, F_ACTIVE_INT_EN_X,
+ /* Int_Set_1 */
+ F_NEW_DATA_INT_EN, F_FREEFALL_INT_EN,
+ /* Int_Map_0 */
+ F_INT1_ORIENT, F_INT1_S_TAP, F_INT1_D_TAP, F_INT1_ACTIVE,
+ F_INT1_FREEFALL,
+ /* Int_Map_1 */
+ F_INT1_NEW_DATA,
+ /* Int_Config */
+ F_INT1_OD, F_INT1_LVL,
+ /* Int_Latch */
+ F_RESET_INT, F_LATCH_INT,
+ /* Freefall_Hy */
+ F_FREEFALL_MODE, F_FREEFALL_HY,
+ /* Active_Dur */
+ F_ACTIVE_DUR,
+ /* Tap_Dur */
+ F_TAP_QUIET, F_TAP_SHOCK, F_TAP_DUR,
+ /* Tap_Th */
+ F_TAP_TH,
+ /* Orient_Hy */
+ F_ORIENT_HYST, F_ORIENT_BLOCKING, F_ORIENT_MODE,
+ /* Z_Block */
+ F_Z_BLOCKING,
+ /* End of register map */
+ F_MAX_FIELDS,
+};
+
+static const struct reg_field msa311_reg_fields[] = {
+ /* Soft_Reset */
+ [F_SOFT_RESET_I2C] = REG_FIELD(MSA311_SOFT_RESET_REG, 2, 2),
+ [F_SOFT_RESET_SPI] = REG_FIELD(MSA311_SOFT_RESET_REG, 5, 5),
+ /* Motion_Interrupt */
+ [F_ORIENT_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 6, 6),
+ [F_S_TAP_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 5, 5),
+ [F_D_TAP_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 4, 4),
+ [F_ACTIVE_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 2, 2),
+ [F_FREEFALL_INT] = REG_FIELD(MSA311_MOTION_INT_REG, 0, 0),
+ /* Data_Interrupt */
+ [F_NEW_DATA_INT] = REG_FIELD(MSA311_DATA_INT_REG, 0, 0),
+ /* Tap_Active_Status */
+ [F_TAP_SIGN] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 7, 7),
+ [F_TAP_FIRST_X] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 6, 6),
+ [F_TAP_FIRST_Y] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 5, 5),
+ [F_TAP_FIRST_Z] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 4, 4),
+ [F_ACTV_SIGN] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 3, 3),
+ [F_ACTV_FIRST_X] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 2, 2),
+ [F_ACTV_FIRST_Y] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 1, 1),
+ [F_ACTV_FIRST_Z] = REG_FIELD(MSA311_TAP_ACTIVE_STS_REG, 0, 0),
+ /* Orientation_Status */
+ [F_ORIENT_Z] = REG_FIELD(MSA311_ORIENT_STS_REG, 6, 6),
+ [F_ORIENT_X_Y] = REG_FIELD(MSA311_ORIENT_STS_REG, 4, 5),
+ /* Range */
+ [F_FS] = REG_FIELD(MSA311_RANGE_REG, 0, 1),
+ /* ODR */
+ [F_X_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 7, 7),
+ [F_Y_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 6, 6),
+ [F_Z_AXIS_DIS] = REG_FIELD(MSA311_ODR_REG, 5, 5),
+ [F_ODR] = REG_FIELD(MSA311_ODR_REG, 0, 3),
+ /* Power Mode/Bandwidth */
+ [F_PWR_MODE] = REG_FIELD(MSA311_PWR_MODE_REG, 6, 7),
+ [F_LOW_POWER_BW] = REG_FIELD(MSA311_PWR_MODE_REG, 1, 4),
+ /* Swap_Polarity */
+ [F_X_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 3, 3),
+ [F_Y_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 2, 2),
+ [F_Z_POLARITY] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 1, 1),
+ [F_X_Y_SWAP] = REG_FIELD(MSA311_SWAP_POLARITY_REG, 0, 0),
+ /* Int_Set_0 */
+ [F_ORIENT_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 6, 6),
+ [F_S_TAP_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 5, 5),
+ [F_D_TAP_INT_EN] = REG_FIELD(MSA311_INT_SET_0_REG, 4, 4),
+ [F_ACTIVE_INT_EN_Z] = REG_FIELD(MSA311_INT_SET_0_REG, 2, 2),
+ [F_ACTIVE_INT_EN_Y] = REG_FIELD(MSA311_INT_SET_0_REG, 1, 1),
+ [F_ACTIVE_INT_EN_X] = REG_FIELD(MSA311_INT_SET_0_REG, 0, 0),
+ /* Int_Set_1 */
+ [F_NEW_DATA_INT_EN] = REG_FIELD(MSA311_INT_SET_1_REG, 4, 4),
+ [F_FREEFALL_INT_EN] = REG_FIELD(MSA311_INT_SET_1_REG, 3, 3),
+ /* Int_Map_0 */
+ [F_INT1_ORIENT] = REG_FIELD(MSA311_INT_MAP_0_REG, 6, 6),
+ [F_INT1_S_TAP] = REG_FIELD(MSA311_INT_MAP_0_REG, 5, 5),
+ [F_INT1_D_TAP] = REG_FIELD(MSA311_INT_MAP_0_REG, 4, 4),
+ [F_INT1_ACTIVE] = REG_FIELD(MSA311_INT_MAP_0_REG, 2, 2),
+ [F_INT1_FREEFALL] = REG_FIELD(MSA311_INT_MAP_0_REG, 0, 0),
+ /* Int_Map_1 */
+ [F_INT1_NEW_DATA] = REG_FIELD(MSA311_INT_MAP_1_REG, 0, 0),
+ /* Int_Config */
+ [F_INT1_OD] = REG_FIELD(MSA311_INT_CONFIG_REG, 1, 1),
+ [F_INT1_LVL] = REG_FIELD(MSA311_INT_CONFIG_REG, 0, 0),
+ /* Int_Latch */
+ [F_RESET_INT] = REG_FIELD(MSA311_INT_LATCH_REG, 7, 7),
+ [F_LATCH_INT] = REG_FIELD(MSA311_INT_LATCH_REG, 0, 3),
+ /* Freefall_Hy */
+ [F_FREEFALL_MODE] = REG_FIELD(MSA311_FREEFALL_HY_REG, 2, 2),
+ [F_FREEFALL_HY] = REG_FIELD(MSA311_FREEFALL_HY_REG, 0, 1),
+ /* Active_Dur */
+ [F_ACTIVE_DUR] = REG_FIELD(MSA311_ACTIVE_DUR_REG, 0, 1),
+ /* Tap_Dur */
+ [F_TAP_QUIET] = REG_FIELD(MSA311_TAP_DUR_REG, 7, 7),
+ [F_TAP_SHOCK] = REG_FIELD(MSA311_TAP_DUR_REG, 6, 6),
+ [F_TAP_DUR] = REG_FIELD(MSA311_TAP_DUR_REG, 0, 2),
+ /* Tap_Th */
+ [F_TAP_TH] = REG_FIELD(MSA311_TAP_TH_REG, 0, 4),
+ /* Orient_Hy */
+ [F_ORIENT_HYST] = REG_FIELD(MSA311_ORIENT_HY_REG, 4, 6),
+ [F_ORIENT_BLOCKING] = REG_FIELD(MSA311_ORIENT_HY_REG, 2, 3),
+ [F_ORIENT_MODE] = REG_FIELD(MSA311_ORIENT_HY_REG, 0, 1),
+ /* Z_Block */
+ [F_Z_BLOCKING] = REG_FIELD(MSA311_Z_BLOCK_REG, 0, 3),
+};
+
+#define MSA311_WHO_AM_I 0x13
+
+/*
+ * Possible Full Scale ranges
+ *
+ * Axis data is 12-bit signed value, so
+ *
+ * fs0 = (2 + 2) * 9.81 / (2^11) = 0.009580
+ * fs1 = (4 + 4) * 9.81 / (2^11) = 0.019160
+ * fs2 = (8 + 8) * 9.81 / (2^11) = 0.038320
+ * fs3 = (16 + 16) * 9.81 / (2^11) = 0.076641
+ */
+enum {
+ MSA311_FS_2G,
+ MSA311_FS_4G,
+ MSA311_FS_8G,
+ MSA311_FS_16G,
+};
+
+struct iio_decimal_fract {
+ int integral;
+ int microfract;
+};
+
+static const struct iio_decimal_fract msa311_fs_table[] = {
+ {0, 9580}, {0, 19160}, {0, 38320}, {0, 76641},
+};
+
+/* Possible Output Data Rate values */
+enum {
+ MSA311_ODR_1_HZ,
+ MSA311_ODR_1_95_HZ,
+ MSA311_ODR_3_9_HZ,
+ MSA311_ODR_7_81_HZ,
+ MSA311_ODR_15_63_HZ,
+ MSA311_ODR_31_25_HZ,
+ MSA311_ODR_62_5_HZ,
+ MSA311_ODR_125_HZ,
+ MSA311_ODR_250_HZ,
+ MSA311_ODR_500_HZ,
+ MSA311_ODR_1000_HZ,
+};
+
+static const struct iio_decimal_fract msa311_odr_table[] = {
+ {1, 0}, {1, 950000}, {3, 900000}, {7, 810000}, {15, 630000},
+ {31, 250000}, {62, 500000}, {125, 0}, {250, 0}, {500, 0}, {1000, 0},
+};
+
+/* All supported power modes */
+#define MSA311_PWR_MODE_NORMAL 0b00
+#define MSA311_PWR_MODE_LOW 0b01
+#define MSA311_PWR_MODE_UNKNOWN 0b10
+#define MSA311_PWR_MODE_SUSPEND 0b11
+static const char * const msa311_pwr_modes[] = {
+ [MSA311_PWR_MODE_NORMAL] = "normal",
+ [MSA311_PWR_MODE_LOW] = "low",
+ [MSA311_PWR_MODE_UNKNOWN] = "unknown",
+ [MSA311_PWR_MODE_SUSPEND] = "suspend",
+};
+
+/* Autosuspend delay */
+#define MSA311_PWR_SLEEP_DELAY_MS 2000
+
+/* Possible INT1 types and levels */
+enum {
+ MSA311_INT1_OD_PUSH_PULL,
+ MSA311_INT1_OD_OPEN_DRAIN,
+};
+
+enum {
+ MSA311_INT1_LVL_LOW,
+ MSA311_INT1_LVL_HIGH,
+};
+
+/* Latch INT modes */
+#define MSA311_LATCH_INT_NOT_LATCHED 0b0000
+#define MSA311_LATCH_INT_250MS 0b0001
+#define MSA311_LATCH_INT_500MS 0b0010
+#define MSA311_LATCH_INT_1S 0b0011
+#define MSA311_LATCH_INT_2S 0b0100
+#define MSA311_LATCH_INT_4S 0b0101
+#define MSA311_LATCH_INT_8S 0b0110
+#define MSA311_LATCH_INT_1MS 0b1010
+#define MSA311_LATCH_INT_2MS 0b1011
+#define MSA311_LATCH_INT_25MS 0b1100
+#define MSA311_LATCH_INT_50MS 0b1101
+#define MSA311_LATCH_INT_100MS 0b1110
+#define MSA311_LATCH_INT_LATCHED 0b0111
+
+static const struct regmap_range msa311_readonly_registers[] = {
+ regmap_reg_range(MSA311_PARTID_REG, MSA311_ORIENT_STS_REG),
+};
+
+static const struct regmap_access_table msa311_writeable_table = {
+ .no_ranges = msa311_readonly_registers,
+ .n_no_ranges = ARRAY_SIZE(msa311_readonly_registers),
+};
+
+static const struct regmap_range msa311_writeonly_registers[] = {
+ regmap_reg_range(MSA311_SOFT_RESET_REG, MSA311_SOFT_RESET_REG),
+};
+
+static const struct regmap_access_table msa311_readable_table = {
+ .no_ranges = msa311_writeonly_registers,
+ .n_no_ranges = ARRAY_SIZE(msa311_writeonly_registers),
+};
+
+static const struct regmap_range msa311_volatile_registers[] = {
+ regmap_reg_range(MSA311_ACC_X_REG, MSA311_ORIENT_STS_REG),
+};
+
+static const struct regmap_access_table msa311_volatile_table = {
+ .yes_ranges = msa311_volatile_registers,
+ .n_yes_ranges = ARRAY_SIZE(msa311_volatile_registers),
+};
+
+static const struct regmap_config msa311_regmap_config = {
+ .name = "msa311",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MSA311_OFFSET_Z_REG,
+ .wr_table = &msa311_writeable_table,
+ .rd_table = &msa311_readable_table,
+ .volatile_table = &msa311_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#define MSA311_GENMASK(field) ({ \
+ typeof(&(msa311_reg_fields)[0]) _field; \
+ _field = &msa311_reg_fields[(field)]; \
+ GENMASK(_field->msb, _field->lsb); \
+})
+
+/**
+ * struct msa311_priv - MSA311 internal private state
+ * @regs: Underlying I2C bus adapter used to abstract slave
+ * register accesses
+ * @fields: Abstract objects for each registers fields access
+ * @dev: Device handler associated with appropriate bus client
+ * @lock: Protects msa311 device state between setup and data access routines
+ * (power transitions, samp_freq/scale tune, retrieving axes data, etc)
+ * @chip_name: Chip name in the format "msa311-%02x" % partid
+ * @new_data_trig: Optional NEW_DATA interrupt driven trigger used
+ * to notify external consumers a new sample is ready
+ * @vdd: Optional external voltage regulator for the device power supply
+ */
+struct msa311_priv {
+ struct regmap *regs;
+ struct regmap_field *fields[F_MAX_FIELDS];
+
+ struct device *dev;
+ struct mutex lock;
+ char *chip_name;
+
+ struct iio_trigger *new_data_trig;
+ struct regulator *vdd;
+};
+
+enum msa311_si {
+ MSA311_SI_X,
+ MSA311_SI_Y,
+ MSA311_SI_Z,
+ MSA311_SI_TIMESTAMP,
+};
+
+#define MSA311_ACCEL_CHANNEL(axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = MSA311_SI_##axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_LE, \
+ }, \
+ .datasheet_name = "ACC_"#axis, \
+}
+
+static const struct iio_chan_spec msa311_channels[] = {
+ MSA311_ACCEL_CHANNEL(X),
+ MSA311_ACCEL_CHANNEL(Y),
+ MSA311_ACCEL_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(MSA311_SI_TIMESTAMP),
+};
+
+/**
+ * msa311_get_odr() - Read Output Data Rate (ODR) value from MSA311 accel
+ * @msa311: MSA311 internal private state
+ * @odr: output ODR value
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -ERRNO in other failures
+ */
+static int msa311_get_odr(struct msa311_priv *msa311, unsigned int *odr)
+{
+ int err;
+
+ err = regmap_field_read(msa311->fields[F_ODR], odr);
+ if (err)
+ return err;
+
+ /*
+ * Filter the same 1000Hz ODR register values based on datasheet info.
+ * ODR can be equal to 1010-1111 for 1000Hz, but function returns 1010
+ * all the time.
+ */
+ if (*odr > MSA311_ODR_1000_HZ)
+ *odr = MSA311_ODR_1000_HZ;
+
+ return 0;
+}
+
+/**
+ * msa311_set_odr() - Setup Output Data Rate (ODR) value for MSA311 accel
+ * @msa311: MSA311 internal private state
+ * @odr: requested ODR value
+ *
+ * This function should be called under msa311->lock. Possible ODR values:
+ * - 1Hz (not available in normal mode)
+ * - 1.95Hz (not available in normal mode)
+ * - 3.9Hz
+ * - 7.81Hz
+ * - 15.63Hz
+ * - 31.25Hz
+ * - 62.5Hz
+ * - 125Hz
+ * - 250Hz
+ * - 500Hz
+ * - 1000Hz
+ *
+ * Return: 0 on success, -EINVAL for bad ODR value in the certain power mode,
+ * -ERRNO in other failures
+ */
+static int msa311_set_odr(struct msa311_priv *msa311, unsigned int odr)
+{
+ struct device *dev = msa311->dev;
+ unsigned int pwr_mode;
+ bool good_odr;
+ int err;
+
+ err = regmap_field_read(msa311->fields[F_PWR_MODE], &pwr_mode);
+ if (err)
+ return err;
+
+ /* Filter bad ODR values */
+ if (pwr_mode == MSA311_PWR_MODE_NORMAL)
+ good_odr = (odr > MSA311_ODR_1_95_HZ);
+ else
+ good_odr = false;
+
+ if (!good_odr) {
+ dev_err(dev,
+ "can't set odr %u.%06uHz, not available in %s mode\n",
+ msa311_odr_table[odr].integral,
+ msa311_odr_table[odr].microfract,
+ msa311_pwr_modes[pwr_mode]);
+ return -EINVAL;
+ }
+
+ return regmap_field_write(msa311->fields[F_ODR], odr);
+}
+
+/**
+ * msa311_wait_for_next_data() - Wait next accel data available after resume
+ * @msa311: MSA311 internal private state
+ *
+ * Return: 0 on success, -EINTR if msleep() was interrupted,
+ * -ERRNO in other failures
+ */
+static int msa311_wait_for_next_data(struct msa311_priv *msa311)
+{
+ static const unsigned int unintr_thresh_ms = 20;
+ struct device *dev = msa311->dev;
+ unsigned long freq_uhz;
+ unsigned long wait_ms;
+ unsigned int odr;
+ int err;
+
+ err = msa311_get_odr(msa311, &odr);
+ if (err) {
+ dev_err(dev, "can't get actual frequency (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * After msa311 resuming is done, we need to wait for data
+ * to be refreshed by accel logic.
+ * A certain timeout is calculated based on the current ODR value.
+ * If requested timeout isn't so long (let's assume 20ms),
+ * we can wait for next data in uninterruptible sleep.
+ */
+ freq_uhz = msa311_odr_table[odr].integral * MICROHZ_PER_HZ +
+ msa311_odr_table[odr].microfract;
+ wait_ms = (MICROHZ_PER_HZ / freq_uhz) * MSEC_PER_SEC;
+
+ if (wait_ms < unintr_thresh_ms)
+ usleep_range(wait_ms * USEC_PER_MSEC,
+ unintr_thresh_ms * USEC_PER_MSEC);
+ else if (msleep_interruptible(wait_ms))
+ return -EINTR;
+
+ return 0;
+}
+
+/**
+ * msa311_set_pwr_mode() - Install certain MSA311 power mode
+ * @msa311: MSA311 internal private state
+ * @mode: Power mode can be equal to NORMAL or SUSPEND
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -ERRNO on failure
+ */
+static int msa311_set_pwr_mode(struct msa311_priv *msa311, unsigned int mode)
+{
+ struct device *dev = msa311->dev;
+ unsigned int prev_mode;
+ int err;
+
+ if (mode >= ARRAY_SIZE(msa311_pwr_modes))
+ return -EINVAL;
+
+ dev_dbg(dev, "transition to %s mode\n", msa311_pwr_modes[mode]);
+
+ err = regmap_field_read(msa311->fields[F_PWR_MODE], &prev_mode);
+ if (err)
+ return err;
+
+ err = regmap_field_write(msa311->fields[F_PWR_MODE], mode);
+ if (err)
+ return err;
+
+ /* Wait actual data if we wake up */
+ if (prev_mode == MSA311_PWR_MODE_SUSPEND &&
+ mode == MSA311_PWR_MODE_NORMAL)
+ return msa311_wait_for_next_data(msa311);
+
+ return 0;
+}
+
+/**
+ * msa311_get_axis() - Read MSA311 accel data for certain IIO channel axis spec
+ * @msa311: MSA311 internal private state
+ * @chan: IIO channel specification
+ * @axis: Output accel axis data for requested IIO channel spec
+ *
+ * This function should be called under msa311->lock.
+ *
+ * Return: 0 on success, -EINVAL for unknown IIO channel specification,
+ * -ERRNO in other failures
+ */
+static int msa311_get_axis(struct msa311_priv *msa311,
+ const struct iio_chan_spec * const chan,
+ __le16 *axis)
+{
+ struct device *dev = msa311->dev;
+ unsigned int axis_reg;
+
+ if (chan->scan_index < MSA311_SI_X || chan->scan_index > MSA311_SI_Z) {
+ dev_err(dev, "invalid scan_index value [%d]\n",
+ chan->scan_index);
+ return -EINVAL;
+ }
+
+ /* Axes data layout has 2 byte gap for each axis starting from X axis */
+ axis_reg = MSA311_ACC_X_REG + (chan->scan_index << 1);
+
+ return regmap_bulk_read(msa311->regs, axis_reg, axis, sizeof(*axis));
+}
+
+static int msa311_read_raw_data(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ __le16 axis;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_get_axis(msa311, chan, &axis);
+ mutex_unlock(&msa311->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err) {
+ dev_err(dev, "can't get axis %s (%pe)\n",
+ chan->datasheet_name, ERR_PTR(err));
+ return err;
+ }
+
+ /*
+ * Axis data format is:
+ * ACC_X = (ACC_X_MSB[7:0] << 4) | ACC_X_LSB[7:4]
+ */
+ *val = sign_extend32(le16_to_cpu(axis) >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+
+ return IIO_VAL_INT;
+}
+
+static int msa311_read_scale(struct iio_dev *indio_dev, int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int fs;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = regmap_field_read(msa311->fields[F_FS], &fs);
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't get actual scale (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ *val = msa311_fs_table[fs].integral;
+ *val2 = msa311_fs_table[fs].microfract;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int msa311_read_samp_freq(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int odr;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_get_odr(msa311, &odr);
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't get actual frequency (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ *val = msa311_odr_table[odr].integral;
+ *val2 = msa311_odr_table[odr].microfract;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int msa311_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return msa311_read_raw_data(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_SCALE:
+ return msa311_read_scale(indio_dev, val, val2);
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return msa311_read_samp_freq(indio_dev, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)msa311_odr_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* ODR value has 2 ints (integer and fractional parts) */
+ *length = ARRAY_SIZE(msa311_odr_table) * 2;
+ return IIO_AVAIL_LIST;
+
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)msa311_fs_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* FS value has 2 ints (integer and fractional parts) */
+ *length = ARRAY_SIZE(msa311_fs_table) * 2;
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_write_scale(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int fs;
+ int err;
+
+ /* We do not have fs >= 1, so skip such values */
+ if (val)
+ return 0;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ for (fs = 0; fs < ARRAY_SIZE(msa311_fs_table); fs++)
+ /* Do not check msa311_fs_table[fs].integral, it's always 0 */
+ if (val2 == msa311_fs_table[fs].microfract) {
+ mutex_lock(&msa311->lock);
+ err = regmap_field_write(msa311->fields[F_FS], fs);
+ mutex_unlock(&msa311->lock);
+ break;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't update scale (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ unsigned int odr;
+ int err;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ /*
+ * Sampling frequency changing is prohibited when buffer mode is
+ * enabled, because sometimes MSA311 chip returns outliers during
+ * frequency values growing up in the read operation moment.
+ */
+ err = iio_device_claim_direct_mode(indio_dev);
+ if (err)
+ return err;
+
+ err = -EINVAL;
+ for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++)
+ if (val == msa311_odr_table[odr].integral &&
+ val2 == msa311_odr_table[odr].microfract) {
+ mutex_lock(&msa311->lock);
+ err = msa311_set_odr(msa311, odr);
+ mutex_unlock(&msa311->lock);
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return msa311_write_scale(indio_dev, val, val2);
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return msa311_write_samp_freq(indio_dev, val, val2);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int msa311_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ int err;
+
+ if (reg > regmap_get_max_register(msa311->regs))
+ return -EINVAL;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err)
+ return err;
+
+ mutex_lock(&msa311->lock);
+
+ if (readval)
+ err = regmap_read(msa311->regs, reg, readval);
+ else
+ err = regmap_write(msa311->regs, reg, writeval);
+
+ mutex_unlock(&msa311->lock);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ if (err)
+ dev_err(dev, "can't %s register %u from debugfs (%pe)\n",
+ str_read_write(readval), reg, ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int msa311_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static int msa311_set_new_data_trig_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ struct device *dev = msa311->dev;
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = regmap_field_write(msa311->fields[F_NEW_DATA_INT_EN], state);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev,
+ "can't %s buffer due to new_data_int failure (%pe)\n",
+ str_enable_disable(state), ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ return iio_trigger_get_drvdata(trig) == indio_dev ? 0 : -EINVAL;
+}
+
+static irqreturn_t msa311_buffer_thread(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct msa311_priv *msa311 = iio_priv(pf->indio_dev);
+ struct iio_dev *indio_dev = pf->indio_dev;
+ const struct iio_chan_spec *chan;
+ struct device *dev = msa311->dev;
+ int bit, err, i = 0;
+ __le16 axis;
+ struct {
+ __le16 channels[MSA311_SI_Z + 1];
+ s64 ts __aligned(8);
+ } buf;
+
+ memset(&buf, 0, sizeof(buf));
+
+ mutex_lock(&msa311->lock);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ chan = &msa311_channels[bit];
+
+ err = msa311_get_axis(msa311, chan, &axis);
+ if (err) {
+ mutex_unlock(&msa311->lock);
+ dev_err(dev, "can't get axis %s (%pe)\n",
+ chan->datasheet_name, ERR_PTR(err));
+ goto notify_done;
+ }
+
+ buf.channels[i++] = axis;
+ }
+
+ mutex_unlock(&msa311->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &buf,
+ iio_get_time_ns(indio_dev));
+
+notify_done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msa311_irq_thread(int irq, void *p)
+{
+ struct msa311_priv *msa311 = iio_priv(p);
+ unsigned int new_data_int_enabled;
+ struct device *dev = msa311->dev;
+ int err;
+
+ mutex_lock(&msa311->lock);
+
+ /*
+ * We do not check NEW_DATA int status, because based on the
+ * specification it's cleared automatically after a fixed time.
+ * So just check that is enabled by driver logic.
+ */
+ err = regmap_field_read(msa311->fields[F_NEW_DATA_INT_EN],
+ &new_data_int_enabled);
+
+ mutex_unlock(&msa311->lock);
+ if (err) {
+ dev_err(dev, "can't read new_data interrupt state (%pe)\n",
+ ERR_PTR(err));
+ return IRQ_NONE;
+ }
+
+ if (new_data_int_enabled)
+ iio_trigger_poll_chained(msa311->new_data_trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info msa311_info = {
+ .read_raw = msa311_read_raw,
+ .read_avail = msa311_read_avail,
+ .write_raw = msa311_write_raw,
+ .debugfs_reg_access = msa311_debugfs_reg_access,
+};
+
+static const struct iio_buffer_setup_ops msa311_buffer_setup_ops = {
+ .preenable = msa311_buffer_preenable,
+ .postdisable = msa311_buffer_postdisable,
+};
+
+static const struct iio_trigger_ops msa311_new_data_trig_ops = {
+ .set_trigger_state = msa311_set_new_data_trig_state,
+ .validate_device = msa311_validate_device,
+};
+
+static int msa311_check_partid(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ unsigned int partid;
+ int err;
+
+ err = regmap_read(msa311->regs, MSA311_PARTID_REG, &partid);
+ if (err)
+ return dev_err_probe(dev, err, "failed to read partid\n");
+
+ if (partid != MSA311_WHO_AM_I)
+ dev_warn(dev, "invalid partid (%#x), expected (%#x)\n",
+ partid, MSA311_WHO_AM_I);
+
+ msa311->chip_name = devm_kasprintf(dev, GFP_KERNEL,
+ "msa311-%02x", partid);
+ if (!msa311->chip_name)
+ return dev_err_probe(dev, -ENOMEM, "can't alloc chip name\n");
+
+ return 0;
+}
+
+static int msa311_soft_reset(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ int err;
+
+ err = regmap_write(msa311->regs, MSA311_SOFT_RESET_REG,
+ MSA311_GENMASK(F_SOFT_RESET_I2C) |
+ MSA311_GENMASK(F_SOFT_RESET_SPI));
+ if (err)
+ return dev_err_probe(dev, err, "can't soft reset all logic\n");
+
+ return 0;
+}
+
+static int msa311_chip_init(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ const char zero_bulk[2] = { };
+ int err;
+
+ err = regmap_write(msa311->regs, MSA311_RANGE_REG, MSA311_FS_16G);
+ if (err)
+ return dev_err_probe(dev, err, "failed to setup accel range\n");
+
+ /* Disable all interrupts by default */
+ err = regmap_bulk_write(msa311->regs, MSA311_INT_SET_0_REG,
+ zero_bulk, sizeof(zero_bulk));
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't disable set0/set1 interrupts\n");
+
+ /* Unmap all INT1 interrupts by default */
+ err = regmap_bulk_write(msa311->regs, MSA311_INT_MAP_0_REG,
+ zero_bulk, sizeof(zero_bulk));
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to unmap map0/map1 interrupts\n");
+
+ /* Disable all axes by default */
+ err = regmap_update_bits(msa311->regs, MSA311_ODR_REG,
+ MSA311_GENMASK(F_X_AXIS_DIS) |
+ MSA311_GENMASK(F_Y_AXIS_DIS) |
+ MSA311_GENMASK(F_Z_AXIS_DIS), 0);
+ if (err)
+ return dev_err_probe(dev, err, "can't enable all axes\n");
+
+ err = msa311_set_odr(msa311, MSA311_ODR_125_HZ);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to set accel frequency\n");
+
+ return 0;
+}
+
+static int msa311_setup_interrupts(struct msa311_priv *msa311)
+{
+ struct device *dev = msa311->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
+ struct iio_trigger *trig;
+ int err;
+
+ /* Keep going without interrupts if no initialized I2C IRQ */
+ if (i2c->irq <= 0)
+ return 0;
+
+ err = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ msa311_irq_thread, IRQF_ONESHOT,
+ msa311->chip_name, indio_dev);
+ if (err)
+ return dev_err_probe(dev, err, "failed to request IRQ\n");
+
+ trig = devm_iio_trigger_alloc(dev, "%s-new-data", msa311->chip_name);
+ if (!trig)
+ return dev_err_probe(dev, -ENOMEM,
+ "can't allocate newdata trigger\n");
+
+ msa311->new_data_trig = trig;
+ msa311->new_data_trig->ops = &msa311_new_data_trig_ops;
+ iio_trigger_set_drvdata(msa311->new_data_trig, indio_dev);
+
+ err = devm_iio_trigger_register(dev, msa311->new_data_trig);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't register newdata trigger\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_OD],
+ MSA311_INT1_OD_PUSH_PULL);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't enable push-pull interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_LVL],
+ MSA311_INT1_LVL_HIGH);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't set active interrupt level\n");
+
+ err = regmap_field_write(msa311->fields[F_LATCH_INT],
+ MSA311_LATCH_INT_LATCHED);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't latch interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_RESET_INT], 1);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't reset interrupt\n");
+
+ err = regmap_field_write(msa311->fields[F_INT1_NEW_DATA], 1);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't map new data interrupt\n");
+
+ return 0;
+}
+
+static int msa311_regmap_init(struct msa311_priv *msa311)
+{
+ struct regmap_field **fields = msa311->fields;
+ struct device *dev = msa311->dev;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct regmap *regmap;
+ int i;
+
+ regmap = devm_regmap_init_i2c(i2c, &msa311_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "failed to register i2c regmap\n");
+
+ msa311->regs = regmap;
+
+ for (i = 0; i < F_MAX_FIELDS; i++) {
+ fields[i] = devm_regmap_field_alloc(dev,
+ msa311->regs,
+ msa311_reg_fields[i]);
+ if (IS_ERR(msa311->fields[i]))
+ return dev_err_probe(dev, PTR_ERR(msa311->fields[i]),
+ "can't alloc field[%d]\n", i);
+ }
+
+ return 0;
+}
+
+static void msa311_powerdown(void *msa311)
+{
+ msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_SUSPEND);
+}
+
+static void msa311_vdd_disable(void *vdd)
+{
+ regulator_disable(vdd);
+}
+
+static int msa311_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct msa311_priv *msa311;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*msa311));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "IIO device allocation failed\n");
+
+ msa311 = iio_priv(indio_dev);
+ msa311->dev = dev;
+ i2c_set_clientdata(i2c, indio_dev);
+
+ err = msa311_regmap_init(msa311);
+ if (err)
+ return err;
+
+ mutex_init(&msa311->lock);
+
+ msa311->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(msa311->vdd))
+ return dev_err_probe(dev, PTR_ERR(msa311->vdd),
+ "can't get vdd supply\n");
+
+ err = regulator_enable(msa311->vdd);
+ if (err)
+ return dev_err_probe(dev, err, "can't enable vdd supply\n");
+
+ err = devm_add_action_or_reset(dev, msa311_vdd_disable, msa311->vdd);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't add vdd disable action\n");
+
+ err = msa311_check_partid(msa311);
+ if (err)
+ return err;
+
+ err = msa311_soft_reset(msa311);
+ if (err)
+ return err;
+
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_NORMAL);
+ if (err)
+ return dev_err_probe(dev, err, "failed to power on device\n");
+
+ /*
+ * Register powerdown deferred callback which suspends the chip
+ * after module unloaded.
+ *
+ * MSA311 should be in SUSPEND mode in the two cases:
+ * 1) When driver is loaded, but we do not have any data or
+ * configuration requests to it (we are solving it using
+ * autosuspend feature).
+ * 2) When driver is unloaded and device is not used (devm action is
+ * used in this case).
+ */
+ err = devm_add_action_or_reset(dev, msa311_powerdown, msa311);
+ if (err)
+ return dev_err_probe(dev, err, "can't add powerdown action\n");
+
+ err = pm_runtime_set_active(dev);
+ if (err)
+ return err;
+
+ err = devm_pm_runtime_enable(dev);
+ if (err)
+ return err;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, MSA311_PWR_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ err = msa311_chip_init(msa311);
+ if (err)
+ return err;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = msa311_channels;
+ indio_dev->num_channels = ARRAY_SIZE(msa311_channels);
+ indio_dev->name = msa311->chip_name;
+ indio_dev->info = &msa311_info;
+
+ err = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ msa311_buffer_thread,
+ &msa311_buffer_setup_ops);
+ if (err)
+ return dev_err_probe(dev, err,
+ "can't setup IIO trigger buffer\n");
+
+ err = msa311_setup_interrupts(msa311);
+ if (err)
+ return err;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err)
+ return dev_err_probe(dev, err, "IIO device register failed\n");
+
+ return 0;
+}
+
+static int msa311_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_SUSPEND);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev, "failed to power off device (%pe)\n",
+ ERR_PTR(err));
+
+ return err;
+}
+
+static int msa311_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct msa311_priv *msa311 = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&msa311->lock);
+ err = msa311_set_pwr_mode(msa311, MSA311_PWR_MODE_NORMAL);
+ mutex_unlock(&msa311->lock);
+ if (err)
+ dev_err(dev, "failed to power on device (%pe)\n",
+ ERR_PTR(err));
+
+ return err;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(msa311_pm_ops, msa311_runtime_suspend,
+ msa311_runtime_resume, NULL);
+
+static const struct i2c_device_id msa311_i2c_id[] = {
+ { .name = "msa311" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, msa311_i2c_id);
+
+static const struct of_device_id msa311_of_match[] = {
+ { .compatible = "memsensing,msa311" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msa311_of_match);
+
+static struct i2c_driver msa311_driver = {
+ .driver = {
+ .name = "msa311",
+ .of_match_table = msa311_of_match,
+ .pm = pm_ptr(&msa311_pm_ops),
+ },
+ .probe_new = msa311_probe,
+ .id_table = msa311_i2c_id,
+};
+module_i2c_driver(msa311_driver);
+
+MODULE_AUTHOR("Dmitry Rokosov <ddrokosov@sberdevices.ru>");
+MODULE_DESCRIPTION("MEMSensing MSA311 3-axis accelerometer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index ceca28913355..7b1d6fb692b3 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -597,7 +597,7 @@ err_power_off:
return ret;
}
-static int stk8312_remove(struct i2c_client *client)
+static void stk8312_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct stk8312_data *data = iio_priv(indio_dev);
@@ -609,8 +609,6 @@ static int stk8312_remove(struct i2c_client *client)
iio_trigger_unregister(data->dready_trig);
stk8312_set_mode(data, STK8312_MODE_STANDBY);
-
- return 0;
}
static int stk8312_suspend(struct device *dev)
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 7d59efb41e22..2f5e4ab2a6e7 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -490,7 +490,7 @@ err_power_off:
return ret;
}
-static int stk8ba50_remove(struct i2c_client *client)
+static void stk8ba50_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct stk8ba50_data *data = iio_priv(indio_dev);
@@ -502,8 +502,6 @@ static int stk8ba50_remove(struct i2c_client *client)
iio_trigger_unregister(data->dready_trig);
stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
-
- return 0;
}
static int stk8ba50_suspend(struct device *dev)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7fe5930891e0..791612ca6012 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -653,6 +653,20 @@ config MAX1118
To compile this driver as a module, choose M here: the module will be
called max1118.
+config MAX11205
+ tristate "Maxim max11205 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+ help
+ Say yes here to build support for Maxim max11205 16-bit, single-channel
+ ultra-low power delta-sigma ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max11205.
+
config MAX1241
tristate "Maxim max1241 ADC driver"
depends on SPI_MASTER
@@ -718,6 +732,8 @@ config MCP3422
config MCP3911
tristate "Microchip Technology MCP3911 driver"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Microchip Technology's MCP3911
analog to digital converter.
@@ -919,6 +935,21 @@ config ROCKCHIP_SARADC
To compile this driver as a module, choose M here: the
module will be called rockchip_saradc.
+config RICHTEK_RTQ6056
+ tristate "Richtek RTQ6056 Current and Power Monitor ADC"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to enable RQT6056 ADC support.
+ RTQ6056 is a high accuracy current-sense monitor with I2C and SMBus
+ compatible interface, and the device provides full information for
+ system by reading out the load current and power.
+
+ This driver can also be built as a module. If so, the module will be
+ called rtq6056.
+
config RZG2L_ADC
tristate "Renesas RZ/G2L ADC driver"
depends on ARCH_RZG2L || COMPILE_TEST
@@ -1022,22 +1053,6 @@ config STMPE_ADC
Say yes here to build support for ST Microelectronics STMPE
built-in ADC block (stmpe811).
-config STX104
- tristate "Apex Embedded Systems STX104 driver"
- depends on PC104 && X86
- select ISA_BUS_API
- select GPIOLIB
- help
- Say yes here to build support for the Apex Embedded Systems STX104
- integrated analog PC/104 card.
-
- This driver supports the 16 channels of single-ended (8 channels of
- differential) analog inputs, 2 channels of analog output, 4 digital
- inputs, and 4 digital outputs provided by the STX104.
-
- The base port addresses for the devices may be configured via the base
- array module parameter.
-
config SUN4I_GPADC
tristate "Support for the Allwinner SoCs GPADC"
depends on IIO
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 1772a549a3c8..46caba7a010c 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
+obj-$(CONFIG_MAX11205) += max11205.o
obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MAX9611) += max9611.o
@@ -85,10 +86,10 @@ obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
+obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
-obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 930ce96e6ff5..4fa2126a354b 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -925,8 +925,8 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int ab8500_gpadc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
@@ -938,7 +938,7 @@ static int ab8500_gpadc_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info ab8500_gpadc_info = {
- .of_xlate = ab8500_gpadc_of_xlate,
+ .fwnode_xlate = ab8500_gpadc_fwnode_xlate,
.read_raw = ab8500_gpadc_read_raw,
};
@@ -968,7 +968,7 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
/**
* ab8500_gpadc_parse_channel() - process devicetree channel configuration
* @dev: pointer to containing device
- * @np: device tree node for the channel to configure
+ * @fwnode: fw node for the channel to configure
* @ch: channel info to fill in
* @iio_chan: IIO channel specification to fill in
*
@@ -976,15 +976,15 @@ static int ab8500_gpadc_runtime_resume(struct device *dev)
* and define usage for things like AUX GPADC inputs more precisely.
*/
static int ab8500_gpadc_parse_channel(struct device *dev,
- struct device_node *np,
+ struct fwnode_handle *fwnode,
struct ab8500_gpadc_chan_info *ch,
struct iio_chan_spec *iio_chan)
{
- const char *name = np->name;
+ const char *name = fwnode_get_name(fwnode);
u32 chan;
int ret;
- ret = of_property_read_u32(np, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -1021,22 +1021,20 @@ static int ab8500_gpadc_parse_channel(struct device *dev,
/**
* ab8500_gpadc_parse_channels() - Parse the GPADC channels from DT
* @gpadc: the GPADC to configure the channels for
- * @np: device tree node containing the channel configurations
* @chans: the IIO channels we parsed
* @nchans: the number of IIO channels we parsed
*/
static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
- struct device_node *np,
struct iio_chan_spec **chans_parsed,
unsigned int *nchans_parsed)
{
- struct device_node *child;
+ struct fwnode_handle *child;
struct ab8500_gpadc_chan_info *ch;
struct iio_chan_spec *iio_chans;
unsigned int nchans;
int i;
- nchans = of_get_available_child_count(np);
+ nchans = device_get_child_node_count(gpadc->dev);
if (!nchans) {
dev_err(gpadc->dev, "no channel children\n");
return -ENODEV;
@@ -1054,7 +1052,7 @@ static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ device_for_each_child_node(gpadc->dev, child) {
struct iio_chan_spec *iio_chan;
int ret;
@@ -1064,7 +1062,7 @@ static int ab8500_gpadc_parse_channels(struct ab8500_gpadc *gpadc,
ret = ab8500_gpadc_parse_channel(gpadc->dev, child, ch,
iio_chan);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
i++;
@@ -1081,7 +1079,6 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
struct ab8500_gpadc *gpadc;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
struct iio_chan_spec *iio_chans;
unsigned int n_iio_chans;
int ret;
@@ -1096,7 +1093,7 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->dev = dev;
gpadc->ab8500 = dev_get_drvdata(dev->parent);
- ret = ab8500_gpadc_parse_channels(gpadc, np, &iio_chans, &n_iio_chans);
+ ret = ab8500_gpadc_parse_channels(gpadc, &iio_chans, &n_iio_chans);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index c5b785d8b241..4088786e1026 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -936,11 +936,6 @@ static void ad7124_reg_disable(void *r)
regulator_disable(r);
}
-static void ad7124_clk_disable(void *c)
-{
- clk_disable_unprepare(c);
-}
-
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -993,18 +988,10 @@ static int ad7124_probe(struct spi_device *spi)
return ret;
}
- st->mclk = devm_clk_get(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7124_clk_disable, st->mclk);
- if (ret)
- return ret;
-
ret = ad7124_soft_reset(st);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 92c68d467c50..a2f9fda25ff3 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -287,10 +287,8 @@ static int ad7292_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev,
ad7292_regulator_disable, st);
- if (ret) {
- regulator_disable(st->reg);
+ if (ret)
return ret;
- }
ret = regulator_get_voltage(st->reg);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 652db768ef37..70a25949142c 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -539,13 +539,6 @@ static void ad7768_regulator_disable(void *data)
regulator_disable(st->vref);
}
-static void ad7768_clk_disable(void *data)
-{
- struct ad7768_state *st = data;
-
- clk_disable_unprepare(st->mclk);
-}
-
static int ad7768_set_channel_label(struct iio_dev *indio_dev,
int num_channels)
{
@@ -600,18 +593,10 @@ static int ad7768_probe(struct spi_device *spi)
if (ret)
return ret;
- st->mclk = devm_clk_get(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7768_clk_disable, st);
- if (ret)
- return ret;
-
st->mclk_freq = clk_get_rate(st->mclk);
mutex_init(&st->lock);
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index edad1f30121d..9d6bf6d0927a 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
@@ -93,6 +94,7 @@ enum ad7923_id {
.sign = 'u', \
.realbits = (bits), \
.storagebits = 16, \
+ .shift = 12 - (bits), \
.endianness = IIO_BE, \
}, \
}
@@ -268,7 +270,8 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
return ret;
if (chan->address == EXTRACT(ret, 12, 4))
- *val = EXTRACT(ret, 0, 12);
+ *val = EXTRACT(ret, chan->scan_type.shift,
+ chan->scan_type.realbits);
else
return -EIO;
@@ -298,6 +301,7 @@ static void ad7923_regulator_disable(void *data)
static int ad7923_probe(struct spi_device *spi)
{
+ u32 ad7923_range = AD7923_RANGE;
struct ad7923_state *st;
struct iio_dev *indio_dev;
const struct ad7923_chip_info *info;
@@ -309,8 +313,11 @@ static int ad7923_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ if (device_property_read_bool(&spi->dev, "adi,range-double"))
+ ad7923_range = 0;
+
st->spi = spi;
- st->settings = AD7923_CODING | AD7923_RANGE |
+ st->settings = AD7923_CODING | ad7923_range |
AD7923_PM_MODE_WRITE(AD7923_PM_MODE_OPS);
info = &ad7923_chip_info[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 262bd7665b33..6dbe9d5e08a2 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -880,7 +880,7 @@ error_disable_reg:
return ret;
}
-static int ad799x_remove(struct i2c_client *client)
+static void ad799x_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ad799x_state *st = iio_priv(indio_dev);
@@ -892,8 +892,6 @@ static int ad799x_remove(struct i2c_client *client)
regulator_disable(st->vref);
regulator_disable(st->reg);
kfree(st->rx_buf);
-
- return 0;
}
static int ad799x_suspend(struct device *dev)
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 5a5f33f7bc8f..7534572f7475 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -378,13 +378,6 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
return ad9467_outputmode_set(st->spi, st->output_mode);
}
-static void ad9467_clk_disable(void *data)
-{
- struct ad9467_state *st = data;
-
- clk_disable_unprepare(st->clk);
-}
-
static int ad9467_probe(struct spi_device *spi)
{
const struct ad9467_chip_info *info;
@@ -404,18 +397,10 @@ static int ad9467_probe(struct spi_device *spi)
st = adi_axi_adc_conv_priv(conv);
st->spi = spi;
- st->clk = devm_clk_get(&spi->dev, "adc-clk");
+ st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
- ret = clk_prepare_enable(st->clk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
- if (ret)
- return ret;
-
st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(st->pwrdown_gpio))
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 279430c1d88c..4294d6539cdb 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -16,9 +16,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/sched.h>
+#include <linux/units.h>
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -26,9 +28,13 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <linux/nvmem-consumer.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <dt-bindings/iio/adc/at91-sama5d2_adc.h>
+
struct at91_adc_reg_layout {
/* Control Register */
u16 CR;
@@ -73,11 +79,14 @@ struct at91_adc_reg_layout {
/* Startup Time */
#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
#define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
+/* Minimum startup time for temperature sensor */
+#define AT91_SAMA5D2_MR_STARTUP_TS_MIN (50)
/* Analog Change */
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
-#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+#define AT91_SAMA5D2_MR_TRACKTIM_TS 6
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xf
/* Transfer Time */
#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
@@ -138,11 +147,19 @@ struct at91_adc_reg_layout {
/* Extended Mode Register */
u16 EMR;
/* Extended Mode Register - Oversampling rate */
-#define AT91_SAMA5D2_EMR_OSR(V) ((V) << 16)
-#define AT91_SAMA5D2_EMR_OSR_MASK GENMASK(17, 16)
+#define AT91_SAMA5D2_EMR_OSR(V, M) (((V) << 16) & (M))
#define AT91_SAMA5D2_EMR_OSR_1SAMPLES 0
#define AT91_SAMA5D2_EMR_OSR_4SAMPLES 1
#define AT91_SAMA5D2_EMR_OSR_16SAMPLES 2
+#define AT91_SAMA5D2_EMR_OSR_64SAMPLES 3
+#define AT91_SAMA5D2_EMR_OSR_256SAMPLES 4
+
+/* Extended Mode Register - TRACKX */
+#define AT91_SAMA5D2_TRACKX_MASK GENMASK(23, 22)
+#define AT91_SAMA5D2_TRACKX(x) (((x) << 22) & \
+ AT91_SAMA5D2_TRACKX_MASK)
+/* TRACKX for temperature sensor. */
+#define AT91_SAMA5D2_TRACKX_TS (1)
/* Extended Mode Register - Averaging on single trigger event */
#define AT91_SAMA5D2_EMR_ASTE(V) ((V) << 20)
@@ -159,6 +176,8 @@ struct at91_adc_reg_layout {
u16 ACR;
/* Analog Control Register - Pen detect sensitivity mask */
#define AT91_SAMA5D2_ACR_PENDETSENS_MASK GENMASK(1, 0)
+/* Analog Control Register - Source last channel */
+#define AT91_SAMA5D2_ACR_SRCLCH BIT(16)
/* Touchscreen Mode Register */
u16 TSMR;
@@ -226,6 +245,10 @@ struct at91_adc_reg_layout {
u16 WPSR;
/* Version Register */
u16 VERSION;
+/* Temperature Sensor Mode Register */
+ u16 TEMPMR;
+/* Temperature Sensor Mode - Temperature sensor on */
+#define AT91_SAMA5D2_TEMPMR_TEMPON BIT(0)
};
static const struct at91_adc_reg_layout sama5d2_layout = {
@@ -280,6 +303,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.EOC_IDR = 0x38,
.EOC_IMR = 0x3c,
.EOC_ISR = 0x40,
+ .TEMPMR = 0x44,
.OVER = 0x4c,
.EMR = 0x50,
.CWR = 0x54,
@@ -305,11 +329,6 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-/* Possible values for oversampling ratio */
-#define AT91_OSR_1SAMPLES 1
-#define AT91_OSR_4SAMPLES 4
-#define AT91_OSR_16SAMPLES 16
-
#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
{ \
.type = IIO_VOLTAGE, \
@@ -325,6 +344,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num, \
.indexed = 1, \
}
@@ -346,6 +367,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = "CH"#num"-CH"#num2, \
.indexed = 1, \
}
@@ -365,6 +388,8 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = name, \
}
#define AT91_SAMA5D2_CHAN_PRESSURE(num, name) \
@@ -380,6 +405,23 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ)|\
BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = name, \
+ }
+
+#define AT91_SAMA5D2_CHAN_TEMP(num, name, addr) \
+ { \
+ .type = IIO_TEMP, \
+ .channel = num, \
+ .address = addr, \
+ .scan_index = num, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.datasheet_name = name, \
}
@@ -403,6 +445,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
* @max_index: highest channel index (highest index may be higher
* than the total channel number)
* @hw_trig_cnt: number of possible hardware triggers
+ * @osr_mask: oversampling ratio bitmask on EMR register
+ * @oversampling_avail: available oversampling values
+ * @oversampling_avail_no: number of available oversampling values
+ * @chan_realbits: realbits for registered channels
+ * @temp_chan: temperature channel index
+ * @temp_sensor: temperature sensor supported
*/
struct at91_adc_platform {
const struct at91_adc_reg_layout *layout;
@@ -414,20 +462,58 @@ struct at91_adc_platform {
unsigned int max_channels;
unsigned int max_index;
unsigned int hw_trig_cnt;
+ unsigned int osr_mask;
+ unsigned int oversampling_avail[5];
+ unsigned int oversampling_avail_no;
+ unsigned int chan_realbits;
+ unsigned int temp_chan;
+ bool temp_sensor;
+};
+
+/**
+ * struct at91_adc_temp_sensor_clb - at91-sama5d2 temperature sensor
+ * calibration data structure
+ * @p1: P1 calibration temperature
+ * @p4: P4 calibration voltage
+ * @p6: P6 calibration voltage
+ */
+struct at91_adc_temp_sensor_clb {
+ u32 p1;
+ u32 p4;
+ u32 p6;
+};
+
+/**
+ * enum at91_adc_ts_clb_idx - calibration indexes in NVMEM buffer
+ * @AT91_ADC_TS_CLB_IDX_P1: index for P1
+ * @AT91_ADC_TS_CLB_IDX_P4: index for P4
+ * @AT91_ADC_TS_CLB_IDX_P6: index for P6
+ * @AT91_ADC_TS_CLB_IDX_MAX: max index for temperature calibration packet in OTP
+ */
+enum at91_adc_ts_clb_idx {
+ AT91_ADC_TS_CLB_IDX_P1 = 2,
+ AT91_ADC_TS_CLB_IDX_P4 = 5,
+ AT91_ADC_TS_CLB_IDX_P6 = 7,
+ AT91_ADC_TS_CLB_IDX_MAX = 19,
};
+/* Temperature sensor calibration - Vtemp voltage sensitivity to temperature. */
+#define AT91_ADC_TS_VTEMP_DT (2080U)
+
/**
* struct at91_adc_soc_info - at91-sama5d2 soc information struct
* @startup_time: device startup time
* @min_sample_rate: minimum sample rate in Hz
* @max_sample_rate: maximum sample rate in Hz
* @platform: pointer to the platform structure
+ * @temp_sensor_clb: temperature sensor calibration data structure
*/
struct at91_adc_soc_info {
unsigned startup_time;
unsigned min_sample_rate;
unsigned max_sample_rate;
const struct at91_adc_platform *platform;
+ struct at91_adc_temp_sensor_clb temp_sensor_clb;
};
struct at91_adc_trigger {
@@ -475,6 +561,18 @@ struct at91_adc_touch {
struct work_struct workq;
};
+/**
+ * struct at91_adc_temp - at91-sama5d2 temperature information structure
+ * @sample_period_val: sample period value
+ * @saved_sample_rate: saved sample rate
+ * @saved_oversampling: saved oversampling
+ */
+struct at91_adc_temp {
+ u16 sample_period_val;
+ u16 saved_sample_rate;
+ u16 saved_oversampling;
+};
+
/*
* Buffer size requirements:
* No channels * bytes_per_channel(2) + timestamp bytes (8)
@@ -502,7 +600,9 @@ struct at91_adc_state {
wait_queue_head_t wq_data_available;
struct at91_adc_dma dma_st;
struct at91_adc_touch touch_st;
+ struct at91_adc_temp temp_st;
struct iio_dev *indio_dev;
+ struct device *dev;
/* Ensure naturally aligned timestamp */
u16 buffer[AT91_BUFFER_MAX_HWORDS] __aligned(8);
/*
@@ -591,6 +691,7 @@ static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
+ AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
static const struct at91_adc_platform sama5d2_platform = {
@@ -612,6 +713,10 @@ static const struct at91_adc_platform sama5d2_platform = {
.max_index = AT91_SAMA5D2_MAX_CHAN_IDX,
#define AT91_SAMA5D2_HW_TRIG_CNT 3
.hw_trig_cnt = AT91_SAMA5D2_HW_TRIG_CNT,
+ .osr_mask = GENMASK(17, 16),
+ .oversampling_avail = { 1, 4, 16, },
+ .oversampling_avail_no = 3,
+ .chan_realbits = 14,
};
static const struct at91_adc_platform sama7g5_platform = {
@@ -619,14 +724,23 @@ static const struct at91_adc_platform sama7g5_platform = {
.adc_channels = &at91_sama7g5_adc_channels,
#define AT91_SAMA7G5_SINGLE_CHAN_CNT 16
#define AT91_SAMA7G5_DIFF_CHAN_CNT 8
+#define AT91_SAMA7G5_TEMP_CHAN_CNT 1
.nr_channels = AT91_SAMA7G5_SINGLE_CHAN_CNT +
- AT91_SAMA7G5_DIFF_CHAN_CNT,
+ AT91_SAMA7G5_DIFF_CHAN_CNT +
+ AT91_SAMA7G5_TEMP_CHAN_CNT,
#define AT91_SAMA7G5_MAX_CHAN_IDX (AT91_SAMA7G5_SINGLE_CHAN_CNT + \
- AT91_SAMA7G5_DIFF_CHAN_CNT)
+ AT91_SAMA7G5_DIFF_CHAN_CNT + \
+ AT91_SAMA7G5_TEMP_CHAN_CNT)
.max_channels = ARRAY_SIZE(at91_sama7g5_adc_channels),
.max_index = AT91_SAMA7G5_MAX_CHAN_IDX,
#define AT91_SAMA7G5_HW_TRIG_CNT 3
.hw_trig_cnt = AT91_SAMA7G5_HW_TRIG_CNT,
+ .osr_mask = GENMASK(18, 16),
+ .oversampling_avail = { 1, 4, 16, 64, 256, },
+ .oversampling_avail_no = 5,
+ .chan_realbits = 16,
+ .temp_sensor = true,
+ .temp_chan = AT91_SAMA7G5_ADC_TEMP_CHANNEL,
};
static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
@@ -650,8 +764,8 @@ at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
return indio_dev->channels + index;
}
-static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static inline int at91_adc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
}
@@ -725,51 +839,91 @@ static void at91_adc_eoc_ena(struct at91_adc_state *st, unsigned int channel)
at91_adc_writel(st, EOC_IER, BIT(channel));
}
-static void at91_adc_config_emr(struct at91_adc_state *st)
+static int at91_adc_config_emr(struct at91_adc_state *st,
+ u32 oversampling_ratio, u32 trackx)
{
/* configure the extended mode register */
- unsigned int emr = at91_adc_readl(st, EMR);
-
- /* select oversampling per single trigger event */
- emr |= AT91_SAMA5D2_EMR_ASTE(1);
+ unsigned int emr, osr;
+ unsigned int osr_mask = st->soc_info.platform->osr_mask;
+ int i, ret;
- /* delete leftover content if it's the case */
- emr &= ~AT91_SAMA5D2_EMR_OSR_MASK;
+ /* Check against supported oversampling values. */
+ for (i = 0; i < st->soc_info.platform->oversampling_avail_no; i++) {
+ if (oversampling_ratio == st->soc_info.platform->oversampling_avail[i])
+ break;
+ }
+ if (i == st->soc_info.platform->oversampling_avail_no)
+ return -EINVAL;
/* select oversampling ratio from configuration */
- switch (st->oversampling_ratio) {
- case AT91_OSR_1SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ switch (oversampling_ratio) {
+ case 1:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_1SAMPLES,
+ osr_mask);
break;
- case AT91_OSR_4SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ case 4:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_4SAMPLES,
+ osr_mask);
break;
- case AT91_OSR_16SAMPLES:
- emr |= AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES) &
- AT91_SAMA5D2_EMR_OSR_MASK;
+ case 16:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_16SAMPLES,
+ osr_mask);
+ break;
+ case 64:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_64SAMPLES,
+ osr_mask);
+ break;
+ case 256:
+ osr = AT91_SAMA5D2_EMR_OSR(AT91_SAMA5D2_EMR_OSR_256SAMPLES,
+ osr_mask);
break;
}
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
+ emr = at91_adc_readl(st, EMR);
+ /* select oversampling per single trigger event */
+ emr |= AT91_SAMA5D2_EMR_ASTE(1);
+ /* delete leftover content if it's the case */
+ emr &= ~(osr_mask | AT91_SAMA5D2_TRACKX_MASK);
+ /* Update osr and trackx. */
+ emr |= osr | AT91_SAMA5D2_TRACKX(trackx);
at91_adc_writel(st, EMR, emr);
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
+ st->oversampling_ratio = oversampling_ratio;
+
+ return 0;
}
static int at91_adc_adjust_val_osr(struct at91_adc_state *st, int *val)
{
- if (st->oversampling_ratio == AT91_OSR_1SAMPLES) {
- /*
- * in this case we only have 12 bits of real data, but channel
- * is registered as 14 bits, so shift left two bits
- */
- *val <<= 2;
- } else if (st->oversampling_ratio == AT91_OSR_4SAMPLES) {
- /*
- * in this case we have 13 bits of real data, but channel
- * is registered as 14 bits, so left shift one bit
- */
- *val <<= 1;
- }
+ int nbits, diff;
+
+ if (st->oversampling_ratio == 1)
+ nbits = 12;
+ else if (st->oversampling_ratio == 4)
+ nbits = 13;
+ else if (st->oversampling_ratio == 16)
+ nbits = 14;
+ else if (st->oversampling_ratio == 64)
+ nbits = 15;
+ else if (st->oversampling_ratio == 256)
+ nbits = 16;
+ else
+ /* Should not happen. */
+ return -EINVAL;
+
+ /*
+ * We have nbits of real data and channel is registered as
+ * st->soc_info.platform->chan_realbits, so shift left diff bits.
+ */
+ diff = st->soc_info.platform->chan_realbits - nbits;
+ *val <<= diff;
return IIO_VAL_INT;
}
@@ -799,15 +953,22 @@ static void at91_adc_adjust_val_osr_array(struct at91_adc_state *st, void *buf,
static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
{
u32 clk_khz = st->current_sample_rate / 1000;
- int i = 0;
+ int i = 0, ret;
u16 pendbc;
u32 tsmr, acr;
- if (!state) {
+ if (state) {
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+ } else {
/* disabling touch IRQs and setting mode to no touch enabled */
at91_adc_writel(st, IDR,
AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
at91_adc_writel(st, TSMR, 0);
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
return 0;
}
/*
@@ -948,10 +1109,9 @@ static int at91_adc_read_pressure(struct at91_adc_state *st, int chan, u16 *val)
return IIO_VAL_INT;
}
-static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+static void at91_adc_configure_trigger_registers(struct at91_adc_state *st,
+ bool state)
{
- struct iio_dev *indio = iio_trigger_get_drvdata(trig);
- struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, TRGR);
/* clear TRGMOD */
@@ -962,6 +1122,26 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
/* set/unset hw trigger */
at91_adc_writel(st, TRGR, status);
+}
+
+static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+ struct at91_adc_state *st = iio_priv(indio);
+ int ret;
+
+ if (state) {
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ at91_adc_configure_trigger_registers(st, state);
+
+ if (!state) {
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ }
return 0;
}
@@ -1120,11 +1300,15 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/* we continue with the triggered buffer */
ret = at91_adc_dma_start(indio_dev);
if (ret) {
dev_err(&indio_dev->dev, "buffer prepare failed\n");
- return ret;
+ goto pm_runtime_put;
}
for_each_set_bit(bit, indio_dev->active_scan_mask,
@@ -1135,7 +1319,8 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
continue;
/* these channel types cannot be handled by this trigger */
if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
+ chan->type == IIO_PRESSURE ||
+ chan->type == IIO_TEMP)
continue;
at91_adc_cor(st, chan);
@@ -1146,12 +1331,16 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
if (at91_adc_buffer_check_use_irq(indio_dev, st))
at91_adc_writel(st, IER, AT91_SAMA5D2_IER_DRDY);
- return 0;
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ return ret;
}
static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
u8 bit;
/* check if we are disabling triggered buffer or the touchscreen */
@@ -1162,6 +1351,10 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (!(iio_device_get_current_mode(indio_dev) & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/*
* For each enable channel we must disable it in hardware.
* In the case of DMA, we must read the last converted value
@@ -1177,7 +1370,8 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
continue;
/* these channel types are virtual, no need to do anything */
if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
+ chan->type == IIO_PRESSURE ||
+ chan->type == IIO_TEMP)
continue;
at91_adc_writel(st, CHDR, BIT(chan->channel));
@@ -1196,6 +1390,9 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (st->dma_st.dma_chan)
dmaengine_terminate_sync(st->dma_st.dma_chan);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return 0;
}
@@ -1224,6 +1421,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
return trig;
}
+
static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
struct iio_poll_func *pf)
{
@@ -1377,25 +1575,35 @@ static unsigned at91_adc_startup_time(unsigned startup_time_min,
return i;
}
-static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq)
+static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq,
+ unsigned int startup_time,
+ unsigned int tracktim)
{
struct at91_adc_state *st = iio_priv(indio_dev);
unsigned f_per, prescal, startup, mr;
+ int ret;
f_per = clk_get_rate(st->per_clk);
prescal = (f_per / (2 * freq)) - 1;
- startup = at91_adc_startup_time(st->soc_info.startup_time,
- freq / 1000);
+ startup = at91_adc_startup_time(startup_time, freq / 1000);
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return;
mr = at91_adc_readl(st, MR);
mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
mr |= AT91_SAMA5D2_MR_STARTUP(startup);
mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
+ mr |= AT91_SAMA5D2_MR_TRACKTIM(tracktim);
at91_adc_writel(st, MR, mr);
- dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
- freq, startup, prescal);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
+ dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u, tracktim=%u\n",
+ freq, startup, prescal, tracktim);
st->current_sample_rate = freq;
}
@@ -1522,6 +1730,7 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
return IRQ_HANDLED;
}
+/* This needs to be called with direct mode claimed and st->lock locked. */
static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
@@ -1529,50 +1738,46 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
u16 tmp_val;
int ret;
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
/*
* Keep in mind that we cannot use software trigger or touchscreen
* if external trigger is enabled
*/
if (chan->type == IIO_POSITIONRELATIVE) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
ret = at91_adc_read_position(st, chan->channel,
&tmp_val);
*val = tmp_val;
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
- return at91_adc_adjust_val_osr(st, val);
+ goto pm_runtime_put;
}
if (chan->type == IIO_PRESSURE) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
ret = at91_adc_read_pressure(st, chan->channel,
&tmp_val);
*val = tmp_val;
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ if (ret > 0)
+ ret = at91_adc_adjust_val_osr(st, val);
- return at91_adc_adjust_val_osr(st, val);
+ goto pm_runtime_put;
}
- /* in this case we have a voltage channel */
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
+ /* in this case we have a voltage or temperature channel */
st->chan = chan;
at91_adc_cor(st, chan);
at91_adc_writel(st, CHER, BIT(chan->channel));
+ /*
+ * TEMPMR.TEMPON needs to update after CHER otherwise if none
+ * of the channels are enabled and TEMPMR.TEMPON = 1 will
+ * trigger DRDY interruption while preparing for temperature read.
+ */
+ if (chan->type == IIO_TEMP)
+ at91_adc_writel(st, TEMPMR, AT91_SAMA5D2_TEMPMR_TEMPON);
at91_adc_eoc_ena(st, chan->channel);
at91_adc_writel(st, CR, AT91_SAMA5D2_CR_START);
@@ -1592,14 +1797,125 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
}
at91_adc_eoc_dis(st, st->chan->channel);
+ if (chan->type == IIO_TEMP)
+ at91_adc_writel(st, TEMPMR, 0U);
at91_adc_writel(st, CHDR, BIT(chan->channel));
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, LCDR);
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+ return ret;
+}
+
+static int at91_adc_read_info_locked(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ ret = at91_adc_read_info_raw(indio_dev, chan, val);
mutex_unlock(&st->lock);
iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static void at91_adc_temp_sensor_configure(struct at91_adc_state *st,
+ bool start)
+{
+ u32 sample_rate, oversampling_ratio;
+ u32 startup_time, tracktim, trackx;
+
+ if (start) {
+ /*
+ * Configure the sensor for best accuracy: 10MHz frequency,
+ * oversampling rate of 256, tracktim=0xf and trackx=1.
+ */
+ sample_rate = 10 * MEGA;
+ oversampling_ratio = 256;
+ startup_time = AT91_SAMA5D2_MR_STARTUP_TS_MIN;
+ tracktim = AT91_SAMA5D2_MR_TRACKTIM_TS;
+ trackx = AT91_SAMA5D2_TRACKX_TS;
+
+ st->temp_st.saved_sample_rate = st->current_sample_rate;
+ st->temp_st.saved_oversampling = st->oversampling_ratio;
+ } else {
+ /* Go back to previous settings. */
+ sample_rate = st->temp_st.saved_sample_rate;
+ oversampling_ratio = st->temp_st.saved_oversampling;
+ startup_time = st->soc_info.startup_time;
+ tracktim = 0;
+ trackx = 0;
+ }
+
+ at91_adc_setup_samp_freq(st->indio_dev, sample_rate, startup_time,
+ tracktim);
+ at91_adc_config_emr(st, oversampling_ratio, trackx);
+}
+
+static int at91_adc_read_temp(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ struct at91_adc_temp_sensor_clb *clb = &st->soc_info.temp_sensor_clb;
+ u64 div1, div2;
+ u32 tmp;
+ int ret, vbg, vtemp;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ goto unlock;
+
+ at91_adc_temp_sensor_configure(st, true);
+
+ /* Read VBG. */
+ tmp = at91_adc_readl(st, ACR);
+ tmp |= AT91_SAMA5D2_ACR_SRCLCH;
+ at91_adc_writel(st, ACR, tmp);
+ ret = at91_adc_read_info_raw(indio_dev, chan, &vbg);
+ if (ret < 0)
+ goto restore_config;
+
+ /* Read VTEMP. */
+ tmp &= ~AT91_SAMA5D2_ACR_SRCLCH;
+ at91_adc_writel(st, ACR, tmp);
+ ret = at91_adc_read_info_raw(indio_dev, chan, &vtemp);
+
+restore_config:
+ /* Revert previous settings. */
+ at91_adc_temp_sensor_configure(st, false);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+unlock:
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Temp[milli] = p1[milli] + (vtemp * clb->p6 - clb->p4 * vbg)/
+ * (vbg * AT91_ADC_TS_VTEMP_DT)
+ */
+ div1 = DIV_ROUND_CLOSEST_ULL(((u64)vtemp * clb->p6), vbg);
+ div1 = DIV_ROUND_CLOSEST_ULL((div1 * 1000), AT91_ADC_TS_VTEMP_DT);
+ div2 = DIV_ROUND_CLOSEST_ULL((u64)clb->p4, AT91_ADC_TS_VTEMP_DT);
+ div2 *= 1000;
+ *val = clb->p1 + (int)div1 - (int)div2;
+
return ret;
}
@@ -1611,7 +1927,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return at91_adc_read_info_raw(indio_dev, chan, val);
+ return at91_adc_read_info_locked(indio_dev, chan, val);
+
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
if (chan->differential)
@@ -1619,6 +1936,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_PROCESSED:
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+ return at91_adc_read_temp(indio_dev, chan, val);
+
case IIO_CHAN_INFO_SAMP_FREQ:
*val = at91_adc_get_sample_freq(st);
return IIO_VAL_INT;
@@ -1637,31 +1959,60 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- if ((val != AT91_OSR_1SAMPLES) && (val != AT91_OSR_4SAMPLES) &&
- (val != AT91_OSR_16SAMPLES))
- return -EINVAL;
/* if no change, optimize out */
if (val == st->oversampling_ratio)
return 0;
- st->oversampling_ratio = val;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
/* update ratio */
- at91_adc_config_emr(st);
- return 0;
+ ret = at91_adc_config_emr(st, val, 0);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
- at91_adc_setup_samp_freq(indio_dev, val);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+ at91_adc_setup_samp_freq(indio_dev, val,
+ st->soc_info.startup_time, 0);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return 0;
default:
return -EINVAL;
}
}
+static int at91_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = (int *)st->soc_info.platform->oversampling_avail;
+ *type = IIO_VAL_INT;
+ *length = st->soc_info.platform->oversampling_avail_no;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static void at91_adc_dma_init(struct at91_adc_state *st)
{
struct device *dev = &st->indio_dev->dev;
@@ -1817,10 +2168,11 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev)
at91_adc_writel(st, MR,
AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
- at91_adc_setup_samp_freq(indio_dev, st->soc_info.min_sample_rate);
+ at91_adc_setup_samp_freq(indio_dev, st->soc_info.min_sample_rate,
+ st->soc_info.startup_time, 0);
/* configure extended mode register */
- at91_adc_config_emr(st);
+ at91_adc_config_emr(st, st->oversampling_ratio, 0);
}
static ssize_t at91_adc_get_fifo_state(struct device *dev,
@@ -1849,20 +2201,6 @@ static IIO_DEVICE_ATTR(hwfifo_watermark, 0444,
static IIO_CONST_ATTR(hwfifo_watermark_min, "2");
static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR);
-static IIO_CONST_ATTR(oversampling_ratio_available,
- __stringify(AT91_OSR_1SAMPLES) " "
- __stringify(AT91_OSR_4SAMPLES) " "
- __stringify(AT91_OSR_16SAMPLES));
-
-static struct attribute *at91_adc_attributes[] = {
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group at91_adc_attribute_group = {
- .attrs = at91_adc_attributes,
-};
-
static const struct attribute *at91_adc_fifo_attributes[] = {
&iio_const_attr_hwfifo_watermark_min.dev_attr.attr,
&iio_const_attr_hwfifo_watermark_max.dev_attr.attr,
@@ -1872,11 +2210,11 @@ static const struct attribute *at91_adc_fifo_attributes[] = {
};
static const struct iio_info at91_adc_info = {
- .attrs = &at91_adc_attribute_group,
+ .read_avail = &at91_adc_read_avail,
.read_raw = &at91_adc_read_raw,
.write_raw = &at91_adc_write_raw,
.update_scan_mode = &at91_adc_update_scan_mode,
- .of_xlate = &at91_adc_of_xlate,
+ .fwnode_xlate = &at91_adc_fwnode_xlate,
.hwfifo_set_watermark = &at91_adc_set_watermark,
};
@@ -1918,12 +2256,62 @@ static int at91_adc_buffer_and_trigger_init(struct device *dev,
return 0;
}
+static int at91_adc_temp_sensor_init(struct at91_adc_state *st,
+ struct device *dev)
+{
+ struct at91_adc_temp_sensor_clb *clb = &st->soc_info.temp_sensor_clb;
+ struct nvmem_cell *temp_calib;
+ u32 *buf;
+ size_t len;
+ int ret = 0;
+
+ if (!st->soc_info.platform->temp_sensor)
+ return 0;
+
+ /* Get the calibration data from NVMEM. */
+ temp_calib = devm_nvmem_cell_get(dev, "temperature_calib");
+ if (IS_ERR(temp_calib)) {
+ ret = PTR_ERR(temp_calib);
+ if (ret != -ENOENT)
+ dev_err(dev, "Failed to get temperature_calib cell!\n");
+ return ret;
+ }
+
+ buf = nvmem_cell_read(temp_calib, &len);
+ if (IS_ERR(buf)) {
+ dev_err(dev, "Failed to read calibration data!\n");
+ return PTR_ERR(buf);
+ }
+ if (len < AT91_ADC_TS_CLB_IDX_MAX * 4) {
+ dev_err(dev, "Invalid calibration data!\n");
+ ret = -EINVAL;
+ goto free_buf;
+ }
+
+ /* Store calibration data for later use. */
+ clb->p1 = buf[AT91_ADC_TS_CLB_IDX_P1];
+ clb->p4 = buf[AT91_ADC_TS_CLB_IDX_P4];
+ clb->p6 = buf[AT91_ADC_TS_CLB_IDX_P6];
+
+ /*
+ * We prepare here the conversion to milli and also add constant
+ * factor (5 degrees Celsius) to p1 here to avoid doing it on
+ * hotpath.
+ */
+ clb->p1 = clb->p1 * 1000 + 5000;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
static int at91_adc_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct at91_adc_state *st;
struct resource *res;
- int ret, i;
+ int ret, i, num_channels;
u32 edge_type = IRQ_TYPE_NONE;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
@@ -1933,13 +2321,20 @@ static int at91_adc_probe(struct platform_device *pdev)
st = iio_priv(indio_dev);
st->indio_dev = indio_dev;
- st->soc_info.platform = of_device_get_match_data(&pdev->dev);
+ st->soc_info.platform = device_get_match_data(dev);
+
+ ret = at91_adc_temp_sensor_init(st, &pdev->dev);
+ /* Don't register temperature channel if initialization failed. */
+ if (ret)
+ num_channels = st->soc_info.platform->max_channels - 1;
+ else
+ num_channels = st->soc_info.platform->max_channels;
indio_dev->name = dev_name(&pdev->dev);
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
indio_dev->info = &at91_adc_info;
indio_dev->channels = *st->soc_info.platform->adc_channels;
- indio_dev->num_channels = st->soc_info.platform->max_channels;
+ indio_dev->num_channels = num_channels;
bitmap_set(&st->touch_st.channels_bitmask,
st->soc_info.platform->touch_chan_x, 1);
@@ -1948,36 +2343,34 @@ static int at91_adc_probe(struct platform_device *pdev)
bitmap_set(&st->touch_st.channels_bitmask,
st->soc_info.platform->touch_chan_p, 1);
- st->oversampling_ratio = AT91_OSR_1SAMPLES;
+ st->oversampling_ratio = 1;
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,min-sample-rate-hz",
- &st->soc_info.min_sample_rate);
+ ret = device_property_read_u32(dev, "atmel,min-sample-rate-hz",
+ &st->soc_info.min_sample_rate);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,min-sample-rate-hz\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,max-sample-rate-hz",
- &st->soc_info.max_sample_rate);
+ ret = device_property_read_u32(dev, "atmel,max-sample-rate-hz",
+ &st->soc_info.max_sample_rate);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,max-sample-rate-hz\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
- &st->soc_info.startup_time);
+ ret = device_property_read_u32(dev, "atmel,startup-time-ms",
+ &st->soc_info.startup_time);
if (ret) {
dev_err(&pdev->dev,
"invalid or missing value for atmel,startup-time-ms\n");
return ret;
}
- ret = of_property_read_u32(pdev->dev.of_node,
- "atmel,trigger-edge-type", &edge_type);
+ ret = device_property_read_u32(dev, "atmel,trigger-edge-type",
+ &edge_type);
if (ret) {
dev_dbg(&pdev->dev,
"atmel,trigger-edge-type not specified, only software trigger available\n");
@@ -2051,13 +2444,19 @@ static int at91_adc_probe(struct platform_device *pdev)
if (ret)
goto vref_disable;
- at91_adc_hw_init(indio_dev);
-
platform_set_drvdata(pdev, indio_dev);
+ st->dev = &pdev->dev;
+ pm_runtime_set_autosuspend_delay(st->dev, 500);
+ pm_runtime_use_autosuspend(st->dev);
+ pm_runtime_set_active(st->dev);
+ pm_runtime_enable(st->dev);
+ pm_runtime_get_noresume(st->dev);
+
+ at91_adc_hw_init(indio_dev);
ret = at91_adc_buffer_and_trigger_init(&pdev->dev, indio_dev);
if (ret < 0)
- goto per_clk_disable_unprepare;
+ goto err_pm_disable;
if (dma_coerce_mask_and_coherent(&indio_dev->dev, DMA_BIT_MASK(32)))
dev_info(&pdev->dev, "cannot set DMA mask to 32-bit\n");
@@ -2073,11 +2472,18 @@ static int at91_adc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + st->soc_info.platform->layout->VERSION));
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
+
return 0;
dma_disable:
at91_adc_dma_disable(st);
-per_clk_disable_unprepare:
+err_pm_disable:
+ pm_runtime_put_noidle(st->dev);
+ pm_runtime_disable(st->dev);
+ pm_runtime_set_suspended(st->dev);
+ pm_runtime_dont_use_autosuspend(st->dev);
clk_disable_unprepare(st->per_clk);
vref_disable:
regulator_disable(st->vref);
@@ -2095,6 +2501,8 @@ static int at91_adc_remove(struct platform_device *pdev)
at91_adc_dma_disable(st);
+ pm_runtime_disable(st->dev);
+ pm_runtime_set_suspended(st->dev);
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
@@ -2107,6 +2515,14 @@ static int at91_adc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(st->dev);
+ if (ret < 0)
+ return ret;
+
+ if (iio_buffer_enabled(indio_dev))
+ at91_adc_buffer_postdisable(indio_dev);
/*
* Do a sofware reset of the ADC before we go to suspend.
@@ -2116,6 +2532,8 @@ static int at91_adc_suspend(struct device *dev)
*/
at91_adc_writel(st, CR, AT91_SAMA5D2_CR_SWRST);
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_noidle(st->dev);
clk_disable_unprepare(st->per_clk);
regulator_disable(st->vref);
regulator_disable(st->reg);
@@ -2145,21 +2563,28 @@ static int at91_adc_resume(struct device *dev)
if (ret)
goto vref_disable_resume;
+ pm_runtime_get_noresume(st->dev);
+
at91_adc_hw_init(indio_dev);
/* reconfiguring trigger hardware state */
- if (!iio_buffer_enabled(indio_dev))
- return 0;
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = at91_adc_buffer_prepare(indio_dev);
+ if (ret)
+ goto pm_runtime_put;
- /* check if we are enabling triggered buffer or the touchscreen */
- if (at91_adc_current_chan_is_touch(indio_dev))
- return at91_adc_configure_touch(st, true);
- else
- return at91_adc_configure_trigger(st->trig, true);
+ at91_adc_configure_trigger_registers(st, true);
+ }
+
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_autosuspend(st->dev);
- /* not needed but more explicit */
return 0;
+pm_runtime_put:
+ pm_runtime_mark_last_busy(st->dev);
+ pm_runtime_put_noidle(st->dev);
+ clk_disable_unprepare(st->per_clk);
vref_disable_resume:
regulator_disable(st->vref);
reg_disable_resume:
@@ -2169,8 +2594,29 @@ resume_failed:
return ret;
}
-static DEFINE_SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend,
- at91_adc_resume);
+static int at91_adc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ clk_disable(st->per_clk);
+
+ return 0;
+}
+
+static int at91_adc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return clk_enable(st->per_clk);
+}
+
+static const struct dev_pm_ops at91_adc_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(at91_adc_suspend, at91_adc_resume)
+ RUNTIME_PM_OPS(at91_adc_runtime_suspend, at91_adc_runtime_resume,
+ NULL)
+};
static const struct of_device_id at91_adc_dt_match[] = {
{
@@ -2191,7 +2637,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = "at91-sama5d2_adc",
.of_match_table = at91_adc_dt_match,
- .pm = pm_sleep_ptr(&at91_adc_pm_ops),
+ .pm = pm_ptr(&at91_adc_pm_ops),
},
};
module_platform_driver(at91_adc_driver)
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index e48446784a0a..36777b827165 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -202,7 +202,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
struct imx8qxp_adc *adc = iio_priv(indio_dev);
struct device *dev = adc->dev;
- u32 ctrl, vref_uv;
+ u32 ctrl;
long ret;
switch (mask) {
@@ -245,8 +245,10 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- vref_uv = regulator_get_voltage(adc->vref);
- *val = vref_uv / 1000;
+ ret = regulator_get_voltage(adc->vref);
+ if (ret < 0)
+ return ret;
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 240e6c420701..910e7e965fc4 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -1034,7 +1034,7 @@ static int ina2xx_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int ina2xx_remove(struct i2c_client *client)
+static void ina2xx_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
@@ -1048,8 +1048,6 @@ static int ina2xx_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static const struct i2c_device_id ina2xx_id[] = {
diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
index bf5c03c34f84..a7325dbbb99a 100644
--- a/drivers/iio/adc/ingenic-adc.c
+++ b/drivers/iio/adc/ingenic-adc.c
@@ -719,12 +719,12 @@ static int ingenic_adc_read_raw(struct iio_dev *iio_dev,
}
}
-static int ingenic_adc_of_xlate(struct iio_dev *iio_dev,
- const struct of_phandle_args *iiospec)
+static int ingenic_adc_fwnode_xlate(struct iio_dev *iio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
- if (!iiospec->args_count)
+ if (!iiospec->nargs)
return -EINVAL;
for (i = 0; i < iio_dev->num_channels; ++i)
@@ -734,16 +734,11 @@ static int ingenic_adc_of_xlate(struct iio_dev *iio_dev,
return -EINVAL;
}
-static void ingenic_adc_clk_cleanup(void *data)
-{
- clk_unprepare(data);
-}
-
static const struct iio_info ingenic_adc_info = {
.write_raw = ingenic_adc_write_raw,
.read_raw = ingenic_adc_read_raw,
.read_avail = ingenic_adc_read_avail,
- .of_xlate = ingenic_adc_of_xlate,
+ .fwnode_xlate = ingenic_adc_fwnode_xlate,
};
static int ingenic_adc_buffer_enable(struct iio_dev *iio_dev)
@@ -858,13 +853,13 @@ static int ingenic_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->clk = devm_clk_get(dev, "adc");
+ adc->clk = devm_clk_get_prepared(dev, "adc");
if (IS_ERR(adc->clk)) {
dev_err(dev, "Unable to get clock\n");
return PTR_ERR(adc->clk);
}
- ret = clk_prepare_enable(adc->clk);
+ ret = clk_enable(adc->clk);
if (ret) {
dev_err(dev, "Failed to enable clock\n");
return ret;
@@ -893,12 +888,6 @@ static int ingenic_adc_probe(struct platform_device *pdev)
usleep_range(2000, 3000); /* Must wait at least 2ms. */
clk_disable(adc->clk);
- ret = devm_add_action_or_reset(dev, ingenic_adc_clk_cleanup, adc->clk);
- if (ret) {
- dev_err(dev, "Unable to add action\n");
- return ret;
- }
-
iio_dev->name = "jz-adc";
iio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
iio_dev->setup_ops = &ingenic_buffer_setup_ops;
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 42e6cd6fa6f7..450a243d1f7c 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -121,11 +121,6 @@ static void lpc18xx_clear_cr_reg(void *data)
writel(0, adc->base + LPC18XX_ADC_CR);
}
-static void lpc18xx_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static void lpc18xx_regulator_disable(void *vref)
{
regulator_disable(vref);
@@ -151,7 +146,7 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (IS_ERR(adc->base))
return PTR_ERR(adc->base);
- adc->clk = devm_clk_get(&pdev->dev, NULL);
+ adc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(adc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk),
"error getting clock\n");
@@ -177,17 +172,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_prepare_enable(adc->clk);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&pdev->dev, lpc18xx_clk_disable,
- adc->clk);
- if (ret)
- return ret;
-
rate = clk_get_rate(adc->clk);
clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
diff --git a/drivers/iio/adc/ltc2496.c b/drivers/iio/adc/ltc2496.c
index dfb3bb5997e5..2593fa4322eb 100644
--- a/drivers/iio/adc/ltc2496.c
+++ b/drivers/iio/adc/ltc2496.c
@@ -15,6 +15,7 @@
#include <linux/iio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include "ltc2497.h"
@@ -74,6 +75,7 @@ static int ltc2496_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
st->common_ddata.result_and_measure = ltc2496_result_and_measure;
+ st->common_ddata.chip_info = device_get_match_data(dev);
return ltc2497core_probe(dev, indio_dev);
}
@@ -85,8 +87,13 @@ static void ltc2496_remove(struct spi_device *spi)
ltc2497core_remove(indio_dev);
}
+static const struct ltc2497_chip_info ltc2496_info = {
+ .resolution = 16,
+ .name = NULL,
+};
+
static const struct of_device_id ltc2496_of_match[] = {
- { .compatible = "lltc,ltc2496", },
+ { .compatible = "lltc,ltc2496", .data = &ltc2496_info, },
{},
};
MODULE_DEVICE_TABLE(of, ltc2496_of_match);
diff --git a/drivers/iio/adc/ltc2497-core.c b/drivers/iio/adc/ltc2497-core.c
index 2a485c8a1940..f52d37af4d1f 100644
--- a/drivers/iio/adc/ltc2497-core.c
+++ b/drivers/iio/adc/ltc2497-core.c
@@ -95,7 +95,7 @@ static int ltc2497core_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret / 1000;
- *val2 = 17;
+ *val2 = ddata->chip_info->resolution + 1;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -169,7 +169,15 @@ int ltc2497core_probe(struct device *dev, struct iio_dev *indio_dev)
struct ltc2497core_driverdata *ddata = iio_priv(indio_dev);
int ret;
- indio_dev->name = dev_name(dev);
+ /*
+ * Keep using dev_name() for the iio_dev's name on some of the parts,
+ * since updating it would result in a ABI breakage.
+ */
+ if (ddata->chip_info->name)
+ indio_dev->name = ddata->chip_info->name;
+ else
+ indio_dev->name = dev_name(dev);
+
indio_dev->info = &ltc2497core_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ltc2497core_channel;
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index f7c786f37ceb..556f10dfb502 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -12,18 +12,31 @@
#include <linux/iio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include <asm/unaligned.h>
#include "ltc2497.h"
+enum ltc2497_chip_type {
+ TYPE_LTC2497,
+ TYPE_LTC2499,
+};
+
struct ltc2497_driverdata {
/* this must be the first member */
struct ltc2497core_driverdata common_ddata;
struct i2c_client *client;
+ u32 recv_size;
+ u32 sub_lsb;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
*/
- __be32 buf __aligned(IIO_DMA_MINALIGN);
+ union {
+ __be32 d32;
+ u8 d8[3];
+ } data __aligned(IIO_DMA_MINALIGN);
};
static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
@@ -34,13 +47,43 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
int ret;
if (val) {
- ret = i2c_master_recv(st->client, (char *)&st->buf, 3);
+ if (st->recv_size == 3)
+ ret = i2c_master_recv(st->client, (char *)&st->data.d8,
+ st->recv_size);
+ else
+ ret = i2c_master_recv(st->client, (char *)&st->data.d32,
+ st->recv_size);
if (ret < 0) {
dev_err(&st->client->dev, "i2c_master_recv failed\n");
return ret;
}
- *val = (be32_to_cpu(st->buf) >> 14) - (1 << 17);
+ /*
+ * The data format is 16/24 bit 2s complement, but with an upper sign bit on the
+ * resolution + 1 position, which is set for positive values only. Given this
+ * bit's value, subtracting BIT(resolution + 1) from the ADC's result is
+ * equivalent to a sign extension.
+ */
+ if (st->recv_size == 3) {
+ *val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
+ - BIT(ddata->chip_info->resolution + 1);
+ } else {
+ *val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
+ - BIT(ddata->chip_info->resolution + 1);
+ }
+
+ /*
+ * The part started a new conversion at the end of the above i2c
+ * transfer, so if the address didn't change since the last call
+ * everything is fine and we can return early.
+ * If not (which should only happen when some sort of bulk
+ * conversion is implemented) we have to program the new
+ * address. Note that this probably fails as the conversion that
+ * was triggered above is like not complete yet and the two
+ * operations have to be done in a single transfer.
+ */
+ if (ddata->addr_prev == address)
+ return 0;
}
ret = i2c_smbus_write_byte(st->client,
@@ -54,9 +97,11 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
static int ltc2497_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ const struct ltc2497_chip_info *chip_info;
struct iio_dev *indio_dev;
struct ltc2497_driverdata *st;
struct device *dev = &client->dev;
+ u32 resolution;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
@@ -71,26 +116,46 @@ static int ltc2497_probe(struct i2c_client *client,
st->client = client;
st->common_ddata.result_and_measure = ltc2497_result_and_measure;
+ chip_info = device_get_match_data(dev);
+ if (!chip_info)
+ chip_info = (const struct ltc2497_chip_info *)id->driver_data;
+ st->common_ddata.chip_info = chip_info;
+
+ resolution = chip_info->resolution;
+ st->sub_lsb = 31 - (resolution + 1);
+ st->recv_size = BITS_TO_BYTES(resolution) + 1;
+
return ltc2497core_probe(dev, indio_dev);
}
-static int ltc2497_remove(struct i2c_client *client)
+static void ltc2497_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
ltc2497core_remove(indio_dev);
-
- return 0;
}
+static const struct ltc2497_chip_info ltc2497_info[] = {
+ [TYPE_LTC2497] = {
+ .resolution = 16,
+ .name = NULL,
+ },
+ [TYPE_LTC2499] = {
+ .resolution = 24,
+ .name = "ltc2499",
+ },
+};
+
static const struct i2c_device_id ltc2497_id[] = {
- { "ltc2497", 0 },
+ { "ltc2497", (kernel_ulong_t)&ltc2497_info[TYPE_LTC2497] },
+ { "ltc2499", (kernel_ulong_t)&ltc2497_info[TYPE_LTC2499] },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc2497_id);
static const struct of_device_id ltc2497_of_match[] = {
- { .compatible = "lltc,ltc2497", },
+ { .compatible = "lltc,ltc2497", .data = &ltc2497_info[TYPE_LTC2497] },
+ { .compatible = "lltc,ltc2499", .data = &ltc2497_info[TYPE_LTC2499] },
{},
};
MODULE_DEVICE_TABLE(of, ltc2497_of_match);
diff --git a/drivers/iio/adc/ltc2497.h b/drivers/iio/adc/ltc2497.h
index d0b42dd6b8ad..e023de0d88c4 100644
--- a/drivers/iio/adc/ltc2497.h
+++ b/drivers/iio/adc/ltc2497.h
@@ -4,9 +4,15 @@
#define LTC2497_CONFIG_DEFAULT LTC2497_ENABLE
#define LTC2497_CONVERSION_TIME_MS 150ULL
+struct ltc2497_chip_info {
+ u32 resolution;
+ const char *name;
+};
+
struct ltc2497core_driverdata {
struct regulator *ref;
ktime_t time_prev;
+ const struct ltc2497_chip_info *chip_info;
u8 addr_prev;
int (*result_and_measure)(struct ltc2497core_driverdata *ddata,
u8 address, int *val);
diff --git a/drivers/iio/adc/max11205.c b/drivers/iio/adc/max11205.c
new file mode 100644
index 000000000000..65fc32971ba5
--- /dev/null
+++ b/drivers/iio/adc/max11205.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Maxim MAX11205 16-Bit Delta-Sigma ADC
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1240-max11205.pdf
+ * Copyright (C) 2022 Analog Devices, Inc.
+ * Author: Ramona Bolboaca <ramona.bolboaca@analog.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/adc/ad_sigma_delta.h>
+
+#define MAX11205_BIT_SCALE 15
+#define MAX11205A_OUT_DATA_RATE 116
+#define MAX11205B_OUT_DATA_RATE 13
+
+enum max11205_chip_type {
+ TYPE_MAX11205A,
+ TYPE_MAX11205B,
+};
+
+struct max11205_chip_info {
+ unsigned int out_data_rate;
+ const char *name;
+};
+
+struct max11205_state {
+ const struct max11205_chip_info *chip_info;
+ struct regulator *vref;
+ struct ad_sigma_delta sd;
+};
+
+static const struct ad_sigma_delta_info max11205_sigma_delta_info = {
+ .has_registers = false,
+};
+
+static int max11205_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max11205_state *st = iio_priv(indio_dev);
+ int reg_mv;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ reg_mv = regulator_get_voltage(st->vref);
+ if (reg_mv < 0)
+ return reg_mv;
+ reg_mv /= 1000;
+ *val = reg_mv;
+ *val2 = MAX11205_BIT_SCALE;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->chip_info->out_data_rate;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max11205_iio_info = {
+ .read_raw = max11205_read_raw,
+ .validate_trigger = ad_sd_validate_trigger,
+};
+
+static const struct iio_chan_spec max11205_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct max11205_chip_info max11205_chip_info[] = {
+ [TYPE_MAX11205A] = {
+ .out_data_rate = MAX11205A_OUT_DATA_RATE,
+ .name = "max11205a",
+ },
+ [TYPE_MAX11205B] = {
+ .out_data_rate = MAX11205B_OUT_DATA_RATE,
+ .name = "max11205b",
+ },
+};
+
+static void max11205_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static int max11205_probe(struct spi_device *spi)
+{
+ struct max11205_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ ad_sd_init(&st->sd, indio_dev, spi, &max11205_sigma_delta_info);
+
+ st->chip_info = device_get_match_data(&spi->dev);
+ if (!st->chip_info)
+ st->chip_info =
+ (const struct max11205_chip_info *)spi_get_device_id(spi)->driver_data;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max11205_channels;
+ indio_dev->num_channels = 1;
+ indio_dev->info = &max11205_iio_info;
+
+ st->vref = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->vref))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->vref),
+ "Failed to get vref regulator\n");
+
+ ret = regulator_enable(st->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, max11205_reg_disable, st->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id max11205_spi_ids[] = {
+ { "max11205a", (kernel_ulong_t)&max11205_chip_info[TYPE_MAX11205A] },
+ { "max11205b", (kernel_ulong_t)&max11205_chip_info[TYPE_MAX11205B] },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max11205_spi_ids);
+
+static const struct of_device_id max11205_dt_ids[] = {
+ {
+ .compatible = "maxim,max11205a",
+ .data = &max11205_chip_info[TYPE_MAX11205A],
+ },
+ {
+ .compatible = "maxim,max11205b",
+ .data = &max11205_chip_info[TYPE_MAX11205B],
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max11205_dt_ids);
+
+static struct spi_driver max11205_spi_driver = {
+ .driver = {
+ .name = "max11205",
+ .of_match_table = max11205_dt_ids,
+ },
+ .probe = max11205_probe,
+ .id_table = max11205_spi_ids,
+};
+module_spi_driver(max11205_spi_driver);
+
+MODULE_AUTHOR("Ramona Bolboaca <ramona.bolboaca@analog.com>");
+MODULE_DESCRIPTION("MAX11205 ADC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_AD_SIGMA_DELTA);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index eef55ed4814a..a28cf86cdce8 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -29,7 +29,6 @@
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/driver.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -1595,11 +1594,6 @@ static int max1363_probe(struct i2c_client *client,
if (!indio_dev)
return -ENOMEM;
- ret = devm_iio_map_array_register(&client->dev, indio_dev,
- client->dev.platform_data);
- if (ret < 0)
- return ret;
-
st = iio_priv(indio_dev);
mutex_init(&st->lock);
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 1cb4590fe412..b35fd2c9c3c0 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -5,16 +5,25 @@
* Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
* Copyright (C) 2018 Kent Gustavsson <kent@minoris.se>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/iio/iio.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/trigger.h>
+
+#include <asm/unaligned.h>
+
#define MCP3911_REG_CHANNEL0 0x00
#define MCP3911_REG_CHANNEL1 0x03
#define MCP3911_REG_MOD 0x06
@@ -22,6 +31,8 @@
#define MCP3911_REG_GAIN 0x09
#define MCP3911_REG_STATUSCOM 0x0a
+#define MCP3911_STATUSCOM_DRHIZ BIT(12)
+#define MCP3911_STATUSCOM_READ GENMASK(7, 6)
#define MCP3911_STATUSCOM_CH1_24WIDTH BIT(4)
#define MCP3911_STATUSCOM_CH0_24WIDTH BIT(3)
#define MCP3911_STATUSCOM_EN_OFFCAL BIT(2)
@@ -30,6 +41,7 @@
#define MCP3911_REG_CONFIG 0x0c
#define MCP3911_CONFIG_CLKEXT BIT(1)
#define MCP3911_CONFIG_VREFEXT BIT(2)
+#define MCP3911_CONFIG_OSR GENMASK(13, 11)
#define MCP3911_REG_OFFCAL_CH0 0x0e
#define MCP3911_REG_GAINCAL_CH0 0x11
@@ -40,20 +52,30 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
#define MCP3911_NUM_CHANNELS 2
+static const int mcp3911_osr_table[] = { 32, 64, 128, 256, 512, 1024, 2048, 4096 };
+
struct mcp3911 {
struct spi_device *spi;
struct mutex lock;
struct regulator *vref;
struct clk *clki;
u32 dev_addr;
+ struct iio_trigger *trig;
+ struct {
+ u32 channels[MCP3911_NUM_CHANNELS];
+ s64 ts __aligned(8);
+ } scan;
+
+ u8 tx_buf __aligned(IIO_DMA_MINALIGN);
+ u8 rx_buf[MCP3911_NUM_CHANNELS * 3];
};
static int mcp3911_read(struct mcp3911 *adc, u8 reg, u32 *val, u8 len)
@@ -98,6 +120,36 @@ static int mcp3911_update(struct mcp3911 *adc, u8 reg, u32 mask,
return mcp3911_write(adc, reg, val, len);
}
+static int mcp3911_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+}
+
+static int mcp3911_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *type = IIO_VAL_INT;
+ *vals = mcp3911_osr_table;
+ *length = ARRAY_SIZE(mcp3911_osr_table);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int mcp3911_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long mask)
@@ -113,6 +165,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -124,6 +178,15 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = mcp3911_read(adc, MCP3911_REG_CONFIG, val, 2);
+ if (ret)
+ goto out;
+
+ *val = FIELD_GET(MCP3911_CONFIG_OSR, *val);
+ *val = 32 << *val;
+ ret = IIO_VAL_INT;
+ break;
case IIO_CHAN_INFO_SCALE:
if (adc->vref) {
@@ -137,11 +200,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
@@ -176,6 +246,17 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev,
MCP3911_STATUSCOM_EN_OFFCAL,
MCP3911_STATUSCOM_EN_OFFCAL, 2);
break;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ for (int i = 0; i < sizeof(mcp3911_osr_table); i++) {
+ if (val == mcp3911_osr_table[i]) {
+ val = FIELD_PREP(MCP3911_CONFIG_OSR, i);
+ ret = mcp3911_update(adc, MCP3911_REG_CONFIG, MCP3911_CONFIG_OSR,
+ val, 2);
+ break;
+ }
+ }
+ break;
}
out:
@@ -187,28 +268,90 @@ out:
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = idx, \
+ .scan_index = idx, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_OFFSET) | \
BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 24, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec mcp3911_channels[] = {
MCP3911_CHAN(0),
MCP3911_CHAN(1),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static irqreturn_t mcp3911_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mcp3911 *adc = iio_priv(indio_dev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = &adc->tx_buf,
+ .len = 1,
+ }, {
+ .rx_buf = adc->rx_buf,
+ .len = sizeof(adc->rx_buf),
+ },
+ };
+ int scan_index;
+ int i = 0;
+ int ret;
+
+ mutex_lock(&adc->lock);
+ adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr);
+ ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer));
+ if (ret < 0) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+ goto out;
+ }
+
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask, indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan = &indio_dev->channels[scan_index];
+
+ adc->scan.channels[i] = get_unaligned_be24(&adc->rx_buf[scan_chan->channel * 3]);
+ i++;
+ }
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
+ iio_get_time_ns(indio_dev));
+out:
+ mutex_unlock(&adc->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info mcp3911_info = {
.read_raw = mcp3911_read_raw,
.write_raw = mcp3911_write_raw,
+ .read_avail = mcp3911_read_avail,
+ .write_raw_get_fmt = mcp3911_write_raw_get_fmt,
};
static int mcp3911_config(struct mcp3911 *adc)
{
struct device *dev = &adc->spi->dev;
- u32 configreg;
+ u32 regval;
int ret;
- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
+ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
+
+ /*
+ * Fallback to "device-addr" due to historical mismatch between
+ * dt-bindings and implementation
+ */
+ if (ret)
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
@@ -217,31 +360,67 @@ static int mcp3911_config(struct mcp3911 *adc)
}
dev_dbg(&adc->spi->dev, "use device address %i\n", adc->dev_addr);
- ret = mcp3911_read(adc, MCP3911_REG_CONFIG, &configreg, 2);
+ ret = mcp3911_read(adc, MCP3911_REG_CONFIG, &regval, 2);
if (ret)
return ret;
+ regval &= ~MCP3911_CONFIG_VREFEXT;
if (adc->vref) {
dev_dbg(&adc->spi->dev, "use external voltage reference\n");
- configreg |= MCP3911_CONFIG_VREFEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 1);
} else {
dev_dbg(&adc->spi->dev,
"use internal voltage reference (1.2V)\n");
- configreg &= ~MCP3911_CONFIG_VREFEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_VREFEXT, 0);
}
+ regval &= ~MCP3911_CONFIG_CLKEXT;
if (adc->clki) {
dev_dbg(&adc->spi->dev, "use external clock as clocksource\n");
- configreg |= MCP3911_CONFIG_CLKEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 1);
} else {
dev_dbg(&adc->spi->dev,
"use crystal oscillator as clocksource\n");
- configreg &= ~MCP3911_CONFIG_CLKEXT;
+ regval |= FIELD_PREP(MCP3911_CONFIG_CLKEXT, 0);
}
- return mcp3911_write(adc, MCP3911_REG_CONFIG, configreg, 2);
+ ret = mcp3911_write(adc, MCP3911_REG_CONFIG, regval, 2);
+ if (ret)
+ return ret;
+
+ ret = mcp3911_read(adc, MCP3911_REG_STATUSCOM, &regval, 2);
+ if (ret)
+ return ret;
+
+ /* Address counter incremented, cycle through register types */
+ regval &= ~MCP3911_STATUSCOM_READ;
+ regval |= FIELD_PREP(MCP3911_STATUSCOM_READ, 0x02);
+
+ return mcp3911_write(adc, MCP3911_REG_STATUSCOM, regval, 2);
}
+static void mcp3911_cleanup_regulator(void *vref)
+{
+ regulator_disable(vref);
+}
+
+static int mcp3911_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ struct mcp3911 *adc = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ enable_irq(adc->spi->irq);
+ else
+ disable_irq(adc->spi->irq);
+
+ return 0;
+}
+
+static const struct iio_trigger_ops mcp3911_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+ .set_trigger_state = mcp3911_set_trigger_state,
+};
+
static int mcp3911_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -270,9 +449,14 @@ static int mcp3911_probe(struct spi_device *spi)
ret = regulator_enable(adc->vref);
if (ret)
return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev,
+ mcp3911_cleanup_regulator, adc->vref);
+ if (ret)
+ return ret;
}
- adc->clki = devm_clk_get(&adc->spi->dev, NULL);
+ adc->clki = devm_clk_get_enabled(&adc->spi->dev, NULL);
if (IS_ERR(adc->clki)) {
if (PTR_ERR(adc->clki) == -ENOENT) {
adc->clki = NULL;
@@ -280,21 +464,22 @@ static int mcp3911_probe(struct spi_device *spi)
dev_err(&adc->spi->dev,
"failed to get adc clk (%ld)\n",
PTR_ERR(adc->clki));
- ret = PTR_ERR(adc->clki);
- goto reg_disable;
- }
- } else {
- ret = clk_prepare_enable(adc->clki);
- if (ret < 0) {
- dev_err(&adc->spi->dev,
- "Failed to enable clki: %d\n", ret);
- goto reg_disable;
+ return PTR_ERR(adc->clki);
}
}
ret = mcp3911_config(adc);
if (ret)
- goto clk_disable;
+ return ret;
+
+ if (device_property_read_bool(&adc->spi->dev, "microchip,data-ready-hiz"))
+ ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
+ 0, 2);
+ else
+ ret = mcp3911_update(adc, MCP3911_REG_STATUSCOM, MCP3911_STATUSCOM_DRHIZ,
+ MCP3911_STATUSCOM_DRHIZ, 2);
+ if (ret)
+ return ret;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -306,31 +491,38 @@ static int mcp3911_probe(struct spi_device *spi)
mutex_init(&adc->lock);
- ret = iio_device_register(indio_dev);
- if (ret)
- goto clk_disable;
-
- return ret;
+ if (spi->irq > 0) {
+ adc->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!adc->trig)
+ return PTR_ERR(adc->trig);
-clk_disable:
- clk_disable_unprepare(adc->clki);
-reg_disable:
- if (adc->vref)
- regulator_disable(adc->vref);
-
- return ret;
-}
+ adc->trig->ops = &mcp3911_trigger_ops;
+ iio_trigger_set_drvdata(adc->trig, adc);
+ ret = devm_iio_trigger_register(&spi->dev, adc->trig);
+ if (ret)
+ return ret;
-static void mcp3911_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp3911 *adc = iio_priv(indio_dev);
+ /*
+ * The device generates interrupts as long as it is powered up.
+ * Some platforms might not allow the option to power it down so
+ * don't enable the interrupt to avoid extra load on the system.
+ */
+ ret = devm_request_irq(&spi->dev, spi->irq,
+ &iio_trigger_generic_data_rdy_poll, IRQF_NO_AUTOEN | IRQF_ONESHOT,
+ indio_dev->name, adc->trig);
+ if (ret)
+ return ret;
+ }
- iio_device_unregister(indio_dev);
+ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+ NULL,
+ mcp3911_trigger_handler, NULL);
+ if (ret)
+ return ret;
- clk_disable_unprepare(adc->clki);
- if (adc->vref)
- regulator_disable(adc->vref);
+ return devm_iio_device_register(&adc->spi->dev, indio_dev);
}
static const struct of_device_id mcp3911_dt_ids[] = {
@@ -351,7 +543,6 @@ static struct spi_driver mcp3911_driver = {
.of_match_table = mcp3911_dt_ids,
},
.probe = mcp3911_probe,
- .remove = mcp3911_remove,
.id_table = mcp3911_id,
};
module_spi_driver(mcp3911_driver);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 35260d9e4e47..3710473e526f 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -353,7 +353,7 @@ static int mt6360_adc_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static const struct of_device_id __maybe_unused mt6360_adc_of_id[] = {
+static const struct of_device_id mt6360_adc_of_id[] = {
{ .compatible = "mediatek,mt6360-adc", },
{}
};
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 5e9e56821075..eb424496ee1d 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -14,9 +14,9 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -694,8 +694,8 @@ static int pm8xxx_read_raw(struct iio_dev *indio_dev,
}
}
-static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int pm8xxx_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct pm8xxx_xoadc *adc = iio_priv(indio_dev);
u8 pre_scale_mux;
@@ -706,10 +706,10 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
* First cell is prescaler or premux, second cell is analog
* mux.
*/
- if (iiospec->args_count != 2) {
- dev_err(&indio_dev->dev, "wrong number of arguments for %pOFn need 2 got %d\n",
- iiospec->np,
- iiospec->args_count);
+ if (iiospec->nargs != 2) {
+ dev_err(&indio_dev->dev, "wrong number of arguments for %pfwP need 2 got %d\n",
+ iiospec->fwnode,
+ iiospec->nargs);
return -EINVAL;
}
pre_scale_mux = (u8)iiospec->args[0];
@@ -727,34 +727,34 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info pm8xxx_xoadc_info = {
- .of_xlate = pm8xxx_of_xlate,
+ .fwnode_xlate = pm8xxx_fwnode_xlate,
.read_raw = pm8xxx_read_raw,
};
static int pm8xxx_xoadc_parse_channel(struct device *dev,
- struct device_node *np,
+ struct fwnode_handle *fwnode,
const struct xoadc_channel *hw_channels,
struct iio_chan_spec *iio_chan,
struct pm8xxx_chan_info *ch)
{
- const char *name = np->name;
+ const char *name = fwnode_get_name(fwnode);
const struct xoadc_channel *hwchan;
- u32 pre_scale_mux, amux_channel;
+ u32 pre_scale_mux, amux_channel, reg[2];
u32 rsv, dec;
int ret;
int chid;
- ret = of_property_read_u32_index(np, "reg", 0, &pre_scale_mux);
+ ret = fwnode_property_read_u32_array(fwnode, "reg", reg,
+ ARRAY_SIZE(reg));
if (ret) {
- dev_err(dev, "invalid pre scale/mux number %s\n", name);
- return ret;
- }
- ret = of_property_read_u32_index(np, "reg", 1, &amux_channel);
- if (ret) {
- dev_err(dev, "invalid amux channel number %s\n", name);
+ dev_err(dev, "invalid pre scale/mux or amux channel number %s\n",
+ name);
return ret;
}
+ pre_scale_mux = reg[0];
+ amux_channel = reg[1];
+
/* Find the right channel setting */
chid = 0;
hwchan = &hw_channels[0];
@@ -778,7 +778,7 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
/* Everyone seems to use default ("type 2") decimation */
ch->decimation = VADC_DEF_DECIMATION;
- if (!of_property_read_u32(np, "qcom,ratiometric", &rsv)) {
+ if (!fwnode_property_read_u32(fwnode, "qcom,ratiometric", &rsv)) {
ch->calibration = VADC_CALIB_RATIOMETRIC;
if (rsv > XOADC_RSV_MAX) {
dev_err(dev, "%s too large RSV value %d\n", name, rsv);
@@ -791,7 +791,7 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
}
/* Optional decimation, if omitted we use the default */
- ret = of_property_read_u32(np, "qcom,decimation", &dec);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &dec);
if (!ret) {
ret = qcom_vadc_decimation_from_dt(dec);
if (ret < 0) {
@@ -820,15 +820,14 @@ static int pm8xxx_xoadc_parse_channel(struct device *dev,
return 0;
}
-static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc,
- struct device_node *np)
+static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc)
{
- struct device_node *child;
+ struct fwnode_handle *child;
struct pm8xxx_chan_info *ch;
int ret;
int i;
- adc->nchans = of_get_available_child_count(np);
+ adc->nchans = device_get_child_node_count(adc->dev);
if (!adc->nchans) {
dev_err(adc->dev, "no channel children\n");
return -ENODEV;
@@ -846,14 +845,14 @@ static int pm8xxx_xoadc_parse_channels(struct pm8xxx_xoadc *adc,
return -ENOMEM;
i = 0;
- for_each_available_child_of_node(np, child) {
+ device_for_each_child_node(adc->dev, child) {
ch = &adc->chans[i];
ret = pm8xxx_xoadc_parse_channel(adc->dev, child,
adc->variant->channels,
&adc->iio_chans[i],
ch);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
i++;
@@ -884,12 +883,11 @@ static int pm8xxx_xoadc_probe(struct platform_device *pdev)
const struct xoadc_variant *variant;
struct pm8xxx_xoadc *adc;
struct iio_dev *indio_dev;
- struct device_node *np = pdev->dev.of_node;
struct regmap *map;
struct device *dev = &pdev->dev;
int ret;
- variant = of_device_get_match_data(dev);
+ variant = device_get_match_data(dev);
if (!variant)
return -ENODEV;
@@ -904,7 +902,7 @@ static int pm8xxx_xoadc_probe(struct platform_device *pdev)
init_completion(&adc->complete);
mutex_init(&adc->lock);
- ret = pm8xxx_xoadc_parse_channels(adc, np);
+ ret = pm8xxx_xoadc_parse_channels(adc);
if (ret)
return ret;
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 87438d1e5c0b..821fee60a765 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -14,9 +14,9 @@
#include <linux/log2.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -403,8 +403,8 @@ static irqreturn_t adc5_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int adc5_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int adc5_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct adc5_chip *adc = iio_priv(indio_dev);
int i;
@@ -416,8 +416,8 @@ static int adc5_of_xlate(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int adc7_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int adc7_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct adc5_chip *adc = iio_priv(indio_dev);
int i, v_channel;
@@ -481,12 +481,12 @@ static int adc7_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adc5_info = {
.read_raw = adc5_read_raw,
- .of_xlate = adc5_of_xlate,
+ .fwnode_xlate = adc5_fwnode_xlate,
};
static const struct iio_info adc7_info = {
.read_raw = adc7_read_raw,
- .of_xlate = adc7_of_xlate,
+ .fwnode_xlate = adc7_fwnode_xlate,
};
struct adc5_channels {
@@ -526,6 +526,8 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_DEFAULT)
[ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
SCALE_HW_CALIB_DEFAULT)
+ [ADC5_VCOIN] = ADC5_CHAN_VOLT("vcoin", 1,
+ SCALE_HW_CALIB_DEFAULT)
[ADC5_DIE_TEMP] = ADC5_CHAN_TEMP("die_temp", 0,
SCALE_HW_CALIB_PMIC_THERM)
[ADC5_USB_IN_I] = ADC5_CHAN_VOLT("usb_in_i_uv", 0,
@@ -549,6 +551,12 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC5_AMUX_THM2] = ADC5_CHAN_TEMP("amux_thm2", 0,
SCALE_HW_CALIB_PM5_SMB_TEMP)
+ [ADC5_GPIO1_100K_PU] = ADC5_CHAN_TEMP("gpio1_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ADC5_GPIO3_100K_PU] = ADC5_CHAN_TEMP("gpio3_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ADC5_GPIO4_100K_PU] = ADC5_CHAN_TEMP("gpio4_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
};
static const struct adc5_channels adc7_chans_pmic[ADC5_MAX_CHANNEL] = {
@@ -589,6 +597,8 @@ static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_DEFAULT)
[ADC5_1P25VREF] = ADC5_CHAN_VOLT("vref_1p25", 0,
SCALE_HW_CALIB_DEFAULT)
+ [ADC5_VREF_VADC] = ADC5_CHAN_VOLT("vref_vadc", 0,
+ SCALE_HW_CALIB_DEFAULT)
[ADC5_VPH_PWR] = ADC5_CHAN_VOLT("vph_pwr", 1,
SCALE_HW_CALIB_DEFAULT)
[ADC5_VBAT_SNS] = ADC5_CHAN_VOLT("vbat_sns", 1,
@@ -611,18 +621,18 @@ static const struct adc5_channels adc5_chans_rev2[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_THERM_100K_PULLUP)
};
-static int adc5_get_dt_channel_data(struct adc5_chip *adc,
+static int adc5_get_fw_channel_data(struct adc5_chip *adc,
struct adc5_channel_prop *prop,
- struct device_node *node,
+ struct fwnode_handle *fwnode,
const struct adc5_data *data)
{
- const char *name = node->name, *channel_name;
+ const char *name = fwnode_get_name(fwnode), *channel_name;
u32 chan, value, varr[2];
u32 sid = 0;
int ret;
struct device *dev = adc->dev;
- ret = of_property_read_u32(node, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -647,15 +657,13 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->channel = chan;
prop->sid = sid;
- channel_name = of_get_property(node,
- "label", NULL) ? : node->name;
- if (!channel_name) {
- dev_err(dev, "Invalid channel name\n");
- return -EINVAL;
- }
+ ret = fwnode_property_read_string(fwnode, "label", &channel_name);
+ if (ret)
+ channel_name = name;
+
prop->datasheet_name = channel_name;
- ret = of_property_read_u32(node, "qcom,decimation", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &value);
if (!ret) {
ret = qcom_adc5_decimation_from_dt(value, data->decimation);
if (ret < 0) {
@@ -668,7 +676,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->decimation = ADC5_DECIMATION_DEFAULT;
}
- ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ ret = fwnode_property_read_u32_array(fwnode, "qcom,pre-scaling", varr, 2);
if (!ret) {
ret = qcom_adc5_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
@@ -682,7 +690,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
adc->data->adc_chans[prop->channel].prescale_index;
}
- ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,hw-settle-time", &value);
if (!ret) {
u8 dig_version[2];
@@ -713,7 +721,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
}
- ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,avg-samples", &value);
if (!ret) {
ret = qcom_adc5_avg_samples_from_dt(value);
if (ret < 0) {
@@ -726,7 +734,7 @@ static int adc5_get_dt_channel_data(struct adc5_chip *adc,
prop->avg_samples = VADC_DEF_AVG_SAMPLES;
}
- if (of_property_read_bool(node, "qcom,ratiometric"))
+ if (fwnode_property_read_bool(fwnode, "qcom,ratiometric"))
prop->cal_method = ADC5_RATIOMETRIC_CAL;
else
prop->cal_method = ADC5_ABSOLUTE_CAL;
@@ -801,16 +809,16 @@ static const struct of_device_id adc5_match_table[] = {
};
MODULE_DEVICE_TABLE(of, adc5_match_table);
-static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
+static int adc5_get_fw_data(struct adc5_chip *adc)
{
const struct adc5_channels *adc_chan;
struct iio_chan_spec *iio_chan;
struct adc5_channel_prop prop, *chan_props;
- struct device_node *child;
+ struct fwnode_handle *child;
unsigned int index = 0;
int ret;
- adc->nchannels = of_get_available_child_count(node);
+ adc->nchannels = device_get_child_node_count(adc->dev);
if (!adc->nchannels)
return -EINVAL;
@@ -826,14 +834,14 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
chan_props = adc->chan_props;
iio_chan = adc->iio_chans;
- adc->data = of_device_get_match_data(adc->dev);
+ adc->data = device_get_match_data(adc->dev);
if (!adc->data)
adc->data = &adc5_data_pmic;
- for_each_available_child_of_node(node, child) {
- ret = adc5_get_dt_channel_data(adc, &prop, child, adc->data);
+ device_for_each_child_node(adc->dev, child) {
+ ret = adc5_get_fw_channel_data(adc, &prop, child, adc->data);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
@@ -858,7 +866,6 @@ static int adc5_get_dt_data(struct adc5_chip *adc, struct device_node *node)
static int adc5_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct adc5_chip *adc;
@@ -870,7 +877,7 @@ static int adc5_probe(struct platform_device *pdev)
if (!regmap)
return -ENODEV;
- ret = of_property_read_u32(node, "reg", &reg);
+ ret = device_property_read_u32(dev, "reg", &reg);
if (ret < 0)
return ret;
@@ -886,7 +893,7 @@ static int adc5_probe(struct platform_device *pdev)
init_completion(&adc->complete);
mutex_init(&adc->lock);
- ret = adc5_get_dt_data(adc, node);
+ ret = adc5_get_fw_data(adc);
if (ret) {
dev_err(dev, "adc get dt data failed\n");
return ret;
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 34202ba52469..bcff0f62b70e 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/log2.h>
@@ -481,8 +482,8 @@ static int vadc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int vadc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int vadc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
struct vadc_priv *vadc = iio_priv(indio_dev);
unsigned int i;
@@ -496,7 +497,7 @@ static int vadc_of_xlate(struct iio_dev *indio_dev,
static const struct iio_info vadc_info = {
.read_raw = vadc_read_raw,
- .of_xlate = vadc_of_xlate,
+ .fwnode_xlate = vadc_fwnode_xlate,
};
struct vadc_channels {
@@ -647,15 +648,15 @@ static const struct vadc_channels vadc_chans[] = {
VADC_CHAN_NO_SCALE(LR_MUX3_BUF_PU1_PU2_XO_THERM, 0)
};
-static int vadc_get_dt_channel_data(struct device *dev,
+static int vadc_get_fw_channel_data(struct device *dev,
struct vadc_channel_prop *prop,
- struct device_node *node)
+ struct fwnode_handle *fwnode)
{
- const char *name = node->name;
+ const char *name = fwnode_get_name(fwnode);
u32 chan, value, varr[2];
int ret;
- ret = of_property_read_u32(node, "reg", &chan);
+ ret = fwnode_property_read_u32(fwnode, "reg", &chan);
if (ret) {
dev_err(dev, "invalid channel number %s\n", name);
return ret;
@@ -669,7 +670,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
/* the channel has DT description */
prop->channel = chan;
- ret = of_property_read_u32(node, "qcom,decimation", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,decimation", &value);
if (!ret) {
ret = qcom_vadc_decimation_from_dt(value);
if (ret < 0) {
@@ -682,7 +683,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->decimation = VADC_DEF_DECIMATION;
}
- ret = of_property_read_u32_array(node, "qcom,pre-scaling", varr, 2);
+ ret = fwnode_property_read_u32_array(fwnode, "qcom,pre-scaling", varr, 2);
if (!ret) {
ret = vadc_prescaling_from_dt(varr[0], varr[1]);
if (ret < 0) {
@@ -695,7 +696,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->prescale = vadc_chans[prop->channel].prescale_index;
}
- ret = of_property_read_u32(node, "qcom,hw-settle-time", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,hw-settle-time", &value);
if (!ret) {
ret = vadc_hw_settle_time_from_dt(value);
if (ret < 0) {
@@ -708,7 +709,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->hw_settle_time = VADC_DEF_HW_SETTLE_TIME;
}
- ret = of_property_read_u32(node, "qcom,avg-samples", &value);
+ ret = fwnode_property_read_u32(fwnode, "qcom,avg-samples", &value);
if (!ret) {
ret = vadc_avg_samples_from_dt(value);
if (ret < 0) {
@@ -721,7 +722,7 @@ static int vadc_get_dt_channel_data(struct device *dev,
prop->avg_samples = VADC_DEF_AVG_SAMPLES;
}
- if (of_property_read_bool(node, "qcom,ratiometric"))
+ if (fwnode_property_read_bool(fwnode, "qcom,ratiometric"))
prop->calibration = VADC_CALIB_RATIOMETRIC;
else
prop->calibration = VADC_CALIB_ABSOLUTE;
@@ -731,16 +732,16 @@ static int vadc_get_dt_channel_data(struct device *dev,
return 0;
}
-static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
+static int vadc_get_fw_data(struct vadc_priv *vadc)
{
const struct vadc_channels *vadc_chan;
struct iio_chan_spec *iio_chan;
struct vadc_channel_prop prop;
- struct device_node *child;
+ struct fwnode_handle *child;
unsigned int index = 0;
int ret;
- vadc->nchannels = of_get_available_child_count(node);
+ vadc->nchannels = device_get_child_node_count(vadc->dev);
if (!vadc->nchannels)
return -EINVAL;
@@ -756,10 +757,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
iio_chan = vadc->iio_chans;
- for_each_available_child_of_node(node, child) {
- ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
+ device_for_each_child_node(vadc->dev, child) {
+ ret = vadc_get_fw_channel_data(vadc->dev, &prop, child);
if (ret) {
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
@@ -848,7 +849,6 @@ static int vadc_check_revision(struct vadc_priv *vadc)
static int vadc_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct iio_dev *indio_dev;
struct vadc_priv *vadc;
@@ -860,7 +860,7 @@ static int vadc_probe(struct platform_device *pdev)
if (!regmap)
return -ENODEV;
- ret = of_property_read_u32(node, "reg", &reg);
+ ret = device_property_read_u32(dev, "reg", &reg);
if (ret < 0)
return ret;
@@ -880,7 +880,7 @@ static int vadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = vadc_get_dt_data(vadc, node);
+ ret = vadc_get_fw_data(vadc);
if (ret)
return ret;
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
new file mode 100644
index 000000000000..c1b2e8dc9a26
--- /dev/null
+++ b/drivers/iio/adc/rtq6056.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022 Richtek Technology Corp.
+ *
+ * ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define RTQ6056_REG_CONFIG 0x00
+#define RTQ6056_REG_SHUNTVOLT 0x01
+#define RTQ6056_REG_BUSVOLT 0x02
+#define RTQ6056_REG_POWER 0x03
+#define RTQ6056_REG_CURRENT 0x04
+#define RTQ6056_REG_CALIBRATION 0x05
+#define RTQ6056_REG_MASKENABLE 0x06
+#define RTQ6056_REG_ALERTLIMIT 0x07
+#define RTQ6056_REG_MANUFACTID 0xFE
+#define RTQ6056_REG_DIEID 0xFF
+
+#define RTQ6056_VENDOR_ID 0x1214
+#define RTQ6056_DEFAULT_CONFIG 0x4127
+#define RTQ6056_CONT_ALLON 7
+
+enum {
+ RTQ6056_CH_VSHUNT = 0,
+ RTQ6056_CH_VBUS,
+ RTQ6056_CH_POWER,
+ RTQ6056_CH_CURRENT,
+ RTQ6056_MAX_CHANNEL
+};
+
+enum {
+ F_OPMODE = 0,
+ F_VSHUNTCT,
+ F_VBUSCT,
+ F_AVG,
+ F_RESET,
+ F_MAX_FIELDS
+};
+
+struct rtq6056_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rm_fields[F_MAX_FIELDS];
+ u32 shunt_resistor_uohm;
+ int vshuntct_us;
+ int vbusct_us;
+ int avg_sample;
+};
+
+static const struct reg_field rtq6056_reg_fields[F_MAX_FIELDS] = {
+ [F_OPMODE] = REG_FIELD(RTQ6056_REG_CONFIG, 0, 2),
+ [F_VSHUNTCT] = REG_FIELD(RTQ6056_REG_CONFIG, 3, 5),
+ [F_VBUSCT] = REG_FIELD(RTQ6056_REG_CONFIG, 6, 8),
+ [F_AVG] = REG_FIELD(RTQ6056_REG_CONFIG, 9, 11),
+ [F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
+};
+
+static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = RTQ6056_REG_SHUNTVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = RTQ6056_REG_BUSVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+ .address = RTQ6056_REG_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 3,
+ .address = RTQ6056_REG_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
+};
+
+static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch,
+ int *val)
+{
+ struct device *dev = priv->dev;
+ unsigned int addr = ch->address;
+ unsigned int regval;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = regmap_read(priv->regmap, addr, &regval);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
+ if (ret)
+ return ret;
+
+ /* Power and VBUS is unsigned 16-bit, others are signed 16-bit */
+ if (addr == RTQ6056_REG_BUSVOLT || addr == RTQ6056_REG_POWER)
+ *val = regval;
+ else
+ *val = sign_extend32(regval, 16);
+
+ return IIO_VAL_INT;
+}
+
+static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
+ int *val2)
+{
+ switch (ch->address) {
+ case RTQ6056_REG_SHUNTVOLT:
+ /* VSHUNT lsb 2.5uV */
+ *val = 2500;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_BUSVOLT:
+ /* VBUS lsb 1.25mV */
+ *val = 1250;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_POWER:
+ /* Power lsb 25mW */
+ *val = 25;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Sample frequency for channel VSHUNT and VBUS. The indices correspond
+ * with the bit value expected by the chip. And it can be found at
+ * https://www.richtek.com/assets/product_file/RTQ6056/DSQ6056-00.pdf
+ */
+static const int rtq6056_samp_freq_list[] = {
+ 7194, 4926, 3717, 1904, 964, 485, 243, 122,
+};
+
+static int rtq6056_adc_set_samp_freq(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch, int val)
+{
+ struct regmap_field *rm_field;
+ unsigned int selector;
+ int *ct, ret;
+
+ if (val > 7194 || val < 122)
+ return -EINVAL;
+
+ if (ch->address == RTQ6056_REG_SHUNTVOLT) {
+ rm_field = priv->rm_fields[F_VSHUNTCT];
+ ct = &priv->vshuntct_us;
+ } else if (ch->address == RTQ6056_REG_BUSVOLT) {
+ rm_field = priv->rm_fields[F_VBUSCT];
+ ct = &priv->vbusct_us;
+ } else
+ return -EINVAL;
+
+ selector = find_closest_descending(val, rtq6056_samp_freq_list,
+ ARRAY_SIZE(rtq6056_samp_freq_list));
+
+ ret = regmap_field_write(rm_field, selector);
+ if (ret)
+ return ret;
+
+ *ct = 1000000 / rtq6056_samp_freq_list[selector];
+
+ return 0;
+}
+
+/*
+ * Available averaging rate for rtq6056. The indices correspond with the bit
+ * value expected by the chip. And it can be found at
+ * https://www.richtek.com/assets/product_file/RTQ6056/DSQ6056-00.pdf
+ */
+static const int rtq6056_avg_sample_list[] = {
+ 1, 4, 16, 64, 128, 256, 512, 1024,
+};
+
+static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
+{
+ unsigned int selector;
+ int ret;
+
+ if (val > 1024 || val < 1)
+ return -EINVAL;
+
+ selector = find_closest(val, rtq6056_avg_sample_list,
+ ARRAY_SIZE(rtq6056_avg_sample_list));
+
+ ret = regmap_field_write(priv->rm_fields[F_AVG], selector);
+ if (ret)
+ return ret;
+
+ priv->avg_sample = rtq6056_avg_sample_list[selector];
+
+ return 0;
+}
+
+static int rtq6056_adc_get_sample_freq(struct rtq6056_priv *priv,
+ struct iio_chan_spec const *ch, int *val)
+{
+ int sample_time;
+
+ if (ch->address == RTQ6056_REG_SHUNTVOLT)
+ sample_time = priv->vshuntct_us;
+ else if (ch->address == RTQ6056_REG_BUSVOLT)
+ sample_time = priv->vbusct_us;
+ else {
+ sample_time = priv->vshuntct_us + priv->vbusct_us;
+ sample_time *= priv->avg_sample;
+ }
+
+ *val = 1000000 / sample_time;
+
+ return IIO_VAL_INT;
+}
+
+static int rtq6056_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return rtq6056_adc_read_channel(priv, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ return rtq6056_adc_read_scale(chan, val, val2);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = priv->avg_sample;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return rtq6056_adc_get_sample_freq(priv, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = rtq6056_samp_freq_list;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(rtq6056_samp_freq_list);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = rtq6056_avg_sample_list;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(rtq6056_avg_sample_list);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = rtq6056_adc_set_samp_freq(priv, chan, val);
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = rtq6056_adc_set_average(priv, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static const char *rtq6056_channel_labels[RTQ6056_MAX_CHANNEL] = {
+ [RTQ6056_CH_VSHUNT] = "Vshunt",
+ [RTQ6056_CH_VBUS] = "Vbus",
+ [RTQ6056_CH_POWER] = "Power",
+ [RTQ6056_CH_CURRENT] = "Current",
+};
+
+static int rtq6056_adc_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ return sysfs_emit(label, "%s\n", rtq6056_channel_labels[chan->channel]);
+}
+
+static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
+ int resistor_uohm)
+{
+ unsigned int calib_val;
+ int ret;
+
+ if (resistor_uohm <= 0) {
+ dev_err(priv->dev, "Invalid resistor [%d]\n", resistor_uohm);
+ return -EINVAL;
+ }
+
+ /* calibration = 5120000 / (Rshunt (uOhm) * current lsb (1mA)) */
+ calib_val = 5120000 / resistor_uohm;
+ ret = regmap_write(priv->regmap, RTQ6056_REG_CALIBRATION, calib_val);
+ if (ret)
+ return ret;
+
+ priv->shunt_resistor_uohm = resistor_uohm;
+
+ return 0;
+}
+
+static ssize_t shunt_resistor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rtq6056_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int vals[2] = { priv->shunt_resistor_uohm, 1000000 };
+
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
+}
+
+static ssize_t shunt_resistor_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ int val, val_fract, ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
+ if (ret)
+ goto out_store;
+
+ ret = rtq6056_set_shunt_resistor(priv, val * 1000000 + val_fract);
+
+out_store:
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret ?: len;
+}
+
+static IIO_DEVICE_ATTR_RW(shunt_resistor, 0);
+
+static struct attribute *rtq6056_attributes[] = {
+ &iio_dev_attr_shunt_resistor.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group rtq6056_attribute_group = {
+ .attrs = rtq6056_attributes,
+};
+
+static const struct iio_info rtq6056_info = {
+ .attrs = &rtq6056_attribute_group,
+ .read_raw = rtq6056_adc_read_raw,
+ .read_avail = rtq6056_adc_read_avail,
+ .write_raw = rtq6056_adc_write_raw,
+ .read_label = rtq6056_adc_read_label,
+};
+
+static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ struct device *dev = priv->dev;
+ struct {
+ u16 vals[RTQ6056_MAX_CHANNEL];
+ s64 timestamp __aligned(8);
+ } data;
+ unsigned int raw;
+ int i = 0, bit, ret;
+
+ memset(&data, 0, sizeof(data));
+
+ pm_runtime_get_sync(dev);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) {
+ unsigned int addr = rtq6056_channels[bit].address;
+
+ ret = regmap_read(priv->regmap, addr, &raw);
+ if (ret)
+ goto out;
+
+ data.vals[i++] = raw;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data, iio_get_time_ns(indio_dev));
+
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void rtq6056_enter_shutdown_state(void *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+
+ /* Enter shutdown state */
+ regmap_field_write(priv->rm_fields[F_OPMODE], 0);
+}
+
+static bool rtq6056_is_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RTQ6056_REG_CONFIG ... RTQ6056_REG_ALERTLIMIT:
+ case RTQ6056_REG_MANUFACTID ... RTQ6056_REG_DIEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rtq6056_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RTQ6056_REG_CONFIG:
+ case RTQ6056_REG_CALIBRATION ... RTQ6056_REG_ALERTLIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rtq6056_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = RTQ6056_REG_DIEID,
+ .readable_reg = rtq6056_is_readable_reg,
+ .writeable_reg = rtq6056_is_writeable_reg,
+};
+
+static int rtq6056_probe(struct i2c_client *i2c)
+{
+ struct iio_dev *indio_dev;
+ struct rtq6056_priv *priv;
+ struct device *dev = &i2c->dev;
+ struct regmap *regmap;
+ unsigned int vendor_id, shunt_resistor_uohm;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dev = dev;
+ priv->vshuntct_us = priv->vbusct_us = 1037;
+ priv->avg_sample = 1;
+ i2c_set_clientdata(i2c, priv);
+
+ regmap = devm_regmap_init_i2c(i2c, &rtq6056_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ priv->regmap = regmap;
+
+ ret = regmap_read(regmap, RTQ6056_REG_MANUFACTID, &vendor_id);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get manufacturer info\n");
+
+ if (vendor_id != RTQ6056_VENDOR_ID)
+ return dev_err_probe(dev, -ENODEV,
+ "Invalid vendor id 0x%04x\n", vendor_id);
+
+ ret = devm_regmap_field_bulk_alloc(dev, regmap, priv->rm_fields,
+ rtq6056_reg_fields, F_MAX_FIELDS);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regmap field\n");
+
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 1037 microsecond, and operating mode to all on.
+ */
+ ret = regmap_write(regmap, RTQ6056_REG_CONFIG, RTQ6056_DEFAULT_CONFIG);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable continuous sensing\n");
+
+ ret = devm_add_action_or_reset(dev, rtq6056_enter_shutdown_state, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable pm_runtime\n");
+
+ /* By default, use 2000 micro-Ohm resistor */
+ shunt_resistor_uohm = 2000;
+ device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &shunt_resistor_uohm);
+
+ ret = rtq6056_set_shunt_resistor(priv, shunt_resistor_uohm);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init shunt resistor\n");
+
+ indio_dev->name = "rtq6056";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = rtq6056_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rtq6056_channels);
+ indio_dev->info = &rtq6056_info;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ rtq6056_buffer_trigger_handler,
+ NULL);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to allocate iio trigger buffer\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int rtq6056_runtime_suspend(struct device *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+
+ /* Configure to shutdown mode */
+ return regmap_field_write(priv->rm_fields[F_OPMODE], 0);
+}
+
+static int rtq6056_runtime_resume(struct device *dev)
+{
+ struct rtq6056_priv *priv = dev_get_drvdata(dev);
+ int sample_rdy_time_us, ret;
+
+ ret = regmap_field_write(priv->rm_fields[F_OPMODE], RTQ6056_CONT_ALLON);
+ if (ret)
+ return ret;
+
+ sample_rdy_time_us = priv->vbusct_us + priv->vshuntct_us;
+ sample_rdy_time_us *= priv->avg_sample;
+
+ usleep_range(sample_rdy_time_us, sample_rdy_time_us + 100);
+
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(rtq6056_pm_ops, rtq6056_runtime_suspend,
+ rtq6056_runtime_resume, NULL);
+
+static const struct of_device_id rtq6056_device_match[] = {
+ { .compatible = "richtek,rtq6056" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtq6056_device_match);
+
+static struct i2c_driver rtq6056_driver = {
+ .driver = {
+ .name = "rtq6056",
+ .of_match_table = rtq6056_device_match,
+ .pm = pm_ptr(&rtq6056_pm_ops),
+ },
+ .probe_new = rtq6056_probe,
+};
+module_i2c_driver(rtq6056_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ6056 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 1ce52af3fe8b..81d5db91c67b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -62,6 +63,7 @@ struct stm32_adc_priv;
* @regs: common registers for all instances
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
+ * @ipid: adc identification number
* @has_syscfg: SYSCFG capability flags
* @num_irqs: number of interrupt lines
* @num_adcs: maximum number of ADC instances in the common registers
@@ -70,6 +72,7 @@ struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
+ u32 ipid;
unsigned int has_syscfg;
unsigned int num_irqs;
unsigned int num_adcs;
@@ -78,6 +81,7 @@ struct stm32_adc_priv_cfg {
/**
* struct stm32_adc_priv - stm32 ADC core private data
* @irq: irq(s) for ADC block
+ * @nb_adc_max: actual maximum number of instance per ADC block
* @domain: irq domain reference
* @aclk: clock reference for the analog circuitry
* @bclk: bus clock common for all ADCs, depends on part used
@@ -95,6 +99,7 @@ struct stm32_adc_priv_cfg {
*/
struct stm32_adc_priv {
int irq[STM32_ADC_MAX_ADCS];
+ unsigned int nb_adc_max;
struct irq_domain *domain;
struct clk *aclk;
struct clk *bclk;
@@ -354,7 +359,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
* before invoking the interrupt handler (e.g. call ISR only for
* IRQ-enabled ADCs).
*/
- for (i = 0; i < priv->cfg->num_adcs; i++) {
+ for (i = 0; i < priv->nb_adc_max; i++) {
if ((status & priv->cfg->regs->eoc_msk[i] &&
stm32_adc_eoc_enabled(priv, i)) ||
(status & priv->cfg->regs->ovr_msk[i]))
@@ -424,7 +429,7 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
int hwirq;
unsigned int i;
- for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+ for (hwirq = 0; hwirq < priv->nb_adc_max; hwirq++)
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
@@ -642,6 +647,49 @@ static int stm32_adc_core_switches_probe(struct device *dev,
return 0;
}
+static int stm32_adc_probe_identification(struct platform_device *pdev,
+ struct stm32_adc_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ const char *compat;
+ int ret, count = 0;
+ u32 id, val;
+
+ if (!priv->cfg->ipid)
+ return 0;
+
+ id = FIELD_GET(STM32MP1_IPIDR_MASK,
+ readl_relaxed(priv->common.base + STM32MP1_ADC_IPDR));
+ if (id != priv->cfg->ipid) {
+ dev_err(&pdev->dev, "Unexpected IP version: 0x%x", id);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_string(child, "compatible", &compat);
+ if (ret)
+ continue;
+ /* Count child nodes with stm32 adc compatible */
+ if (strstr(compat, "st,stm32") && strstr(compat, "adc"))
+ count++;
+ }
+
+ val = readl_relaxed(priv->common.base + STM32MP1_ADC_HWCFGR0);
+ priv->nb_adc_max = FIELD_GET(STM32MP1_ADCNUM_MASK, val);
+ if (count > priv->nb_adc_max) {
+ dev_err(&pdev->dev, "Unexpected child number: %d", count);
+ return -EINVAL;
+ }
+
+ val = readl_relaxed(priv->common.base + STM32MP1_ADC_VERR);
+ dev_dbg(&pdev->dev, "ADC version: %lu.%lu\n",
+ FIELD_GET(STM32MP1_MAJREV_MASK, val),
+ FIELD_GET(STM32MP1_MINREV_MASK, val));
+
+ return 0;
+}
+
static int stm32_adc_probe(struct platform_device *pdev)
{
struct stm32_adc_priv *priv;
@@ -661,6 +709,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->cfg = (const struct stm32_adc_priv_cfg *)
of_match_device(dev->driver->of_match_table, dev)->data;
+ priv->nb_adc_max = priv->cfg->num_adcs;
spin_lock_init(&priv->common.lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -703,6 +752,10 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret)
goto err_pm_stop;
+ ret = stm32_adc_probe_identification(pdev, priv);
+ if (ret < 0)
+ goto err_hw_stop;
+
ret = regulator_get_voltage(priv->vref);
if (ret < 0) {
dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
@@ -811,8 +864,8 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .ipid = STM32MP15_IPIDR_NUMBER,
.num_irqs = 2,
- .num_adcs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
index faedf7a49555..2118ef63843d 100644
--- a/drivers/iio/adc/stm32-adc-core.h
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -24,6 +24,7 @@
* | 0x300 | Master & Slave common regs |
* --------------------------------------------------------
*/
+/* Maximum ADC instances number per ADC block for all supported SoCs */
#define STM32_ADC_MAX_ADCS 3
#define STM32_ADC_OFFSET 0x100
#define STM32_ADCX_COMN_OFFSET 0x300
@@ -105,6 +106,12 @@
/* STM32MP1 - ADC2 instance option register */
#define STM32MP1_ADC2_OR 0xD0
+/* STM32MP1 - Identification registers */
+#define STM32MP1_ADC_HWCFGR0 0x3F0
+#define STM32MP1_ADC_VERR 0x3F4
+#define STM32MP1_ADC_IPDR 0x3F8
+#define STM32MP1_ADC_SIDR 0x3FC
+
/* STM32H7 - common registers for all ADC instances */
#define STM32H7_ADC_CSR (STM32_ADCX_COMN_OFFSET + 0x00)
#define STM32H7_ADC_CCR (STM32_ADCX_COMN_OFFSET + 0x08)
@@ -181,6 +188,30 @@ enum stm32h7_adc_dmngt {
/* STM32MP1_ADC2_OR - bit fields */
#define STM32MP1_VDDCOREEN BIT(0)
+/* STM32MP1_ADC_HWCFGR0 - bit fields */
+#define STM32MP1_ADCNUM_SHIFT 0
+#define STM32MP1_ADCNUM_MASK GENMASK(3, 0)
+#define STM32MP1_MULPIPE_SHIFT 4
+#define STM32MP1_MULPIPE_MASK GENMASK(7, 4)
+#define STM32MP1_OPBITS_SHIFT 8
+#define STM32MP1_OPBITS_MASK GENMASK(11, 8)
+#define STM32MP1_IDLEVALUE_SHIFT 12
+#define STM32MP1_IDLEVALUE_MASK GENMASK(15, 12)
+
+/* STM32MP1_ADC_VERR - bit fields */
+#define STM32MP1_MINREV_SHIFT 0
+#define STM32MP1_MINREV_MASK GENMASK(3, 0)
+#define STM32MP1_MAJREV_SHIFT 4
+#define STM32MP1_MAJREV_MASK GENMASK(7, 4)
+
+/* STM32MP1_ADC_IPDR - bit fields */
+#define STM32MP1_IPIDR_MASK GENMASK(31, 0)
+
+/* STM32MP1_ADC_SIDR - bit fields */
+#define STM32MP1_SIDR_MASK GENMASK(31, 0)
+
+#define STM32MP15_IPIDR_NUMBER 0x00110005
+
/**
* struct stm32_adc_common - stm32 ADC driver common data (for all instances)
* @base: control registers base cpu addr
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 130e8dd6f0c8..6256977eb7f7 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -21,11 +21,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include "stm32-adc-core.h"
@@ -241,6 +241,7 @@ struct stm32_adc_cfg {
* @chan_name: channel name array
* @num_diff: number of differential channels
* @int_ch: internal channel indexes array
+ * @nsmps: number of channels with optional sample time
*/
struct stm32_adc {
struct stm32_adc_common *common;
@@ -267,6 +268,7 @@ struct stm32_adc {
char chan_name[STM32_ADC_CH_MAX][STM32_ADC_CH_SZ];
u32 num_diff;
int int_ch[STM32_ADC_INT_CH_NB];
+ int nsmps;
};
struct stm32_adc_diff_channel {
@@ -1520,8 +1522,8 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
return ret;
}
-static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int stm32_adc_fwnode_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
int i;
@@ -1575,7 +1577,7 @@ static const struct iio_info stm32_adc_iio_info = {
.hwfifo_set_watermark = stm32_adc_set_watermark,
.update_scan_mode = stm32_adc_update_scan_mode,
.debugfs_reg_access = stm32_adc_debugfs_reg_access,
- .of_xlate = stm32_adc_of_xlate,
+ .fwnode_xlate = stm32_adc_fwnode_xlate,
};
static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
@@ -1772,14 +1774,14 @@ static const struct iio_chan_spec_ext_info stm32_adc_ext_info[] = {
{},
};
-static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
+static int stm32_adc_fw_get_resolution(struct iio_dev *indio_dev)
{
- struct device_node *node = indio_dev->dev.of_node;
+ struct device *dev = &indio_dev->dev;
struct stm32_adc *adc = iio_priv(indio_dev);
unsigned int i;
u32 res;
- if (of_property_read_u32(node, "assigned-resolution-bits", &res))
+ if (device_property_read_u32(dev, "assigned-resolution-bits", &res))
res = adc->cfg->adc_info->resolutions[0];
for (i = 0; i < adc->cfg->adc_info->num_res; i++)
@@ -1863,11 +1865,11 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm32_adc *adc)
{
- struct device_node *node = indio_dev->dev.of_node;
+ struct device *dev = &indio_dev->dev;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
int num_channels = 0, ret;
- ret = of_property_count_u32_elems(node, "st,adc-channels");
+ ret = device_property_count_u32(dev, "st,adc-channels");
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
return -EINVAL;
@@ -1875,8 +1877,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
num_channels += ret;
}
- ret = of_property_count_elems_of_size(node, "st,adc-diff-channels",
- sizeof(struct stm32_adc_diff_channel));
+ /*
+ * each st,adc-diff-channels is a group of 2 u32 so we divide @ret
+ * to get the *real* number of channels.
+ */
+ ret = device_property_count_u32(dev, "st,adc-diff-channels");
+ if (ret < 0)
+ return ret;
+
+ ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
if (ret > adc_info->max_channels) {
dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
return -EINVAL;
@@ -1886,8 +1895,8 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
}
/* Optional sample time is provided either for each, or all channels */
- ret = of_property_count_u32_elems(node, "st,min-sample-time-nsecs");
- if (ret > 1 && ret != num_channels) {
+ adc->nsmps = device_property_count_u32(dev, "st,min-sample-time-nsecs");
+ if (adc->nsmps > 1 && adc->nsmps != num_channels) {
dev_err(&indio_dev->dev, "Invalid st,min-sample-time-nsecs\n");
return -EINVAL;
}
@@ -1897,21 +1906,20 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
struct stm32_adc *adc,
- struct iio_chan_spec *channels)
+ struct iio_chan_spec *channels,
+ int nchans)
{
- struct device_node *node = indio_dev->dev.of_node;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
+ struct device *dev = &indio_dev->dev;
u32 num_diff = adc->num_diff;
int size = num_diff * sizeof(*diff) / sizeof(u32);
- int scan_index = 0, val, ret, i;
- struct property *prop;
- const __be32 *cur;
- u32 smp = 0;
+ int scan_index = 0, ret, i, c;
+ u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
if (num_diff) {
- ret = of_property_read_u32_array(node, "st,adc-diff-channels",
- (u32 *)diff, size);
+ ret = device_property_read_u32_array(dev, "st,adc-diff-channels",
+ (u32 *)diff, size);
if (ret) {
dev_err(&indio_dev->dev, "Failed to get diff channels %d\n", ret);
return ret;
@@ -1932,32 +1940,47 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
}
}
- of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
- if (val >= adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+ ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
+ nchans);
+ if (ret)
+ return ret;
+
+ for (c = 0; c < nchans; c++) {
+ if (chans[c] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n",
+ chans[c]);
return -EINVAL;
}
/* Channel can't be configured both as single-ended & diff */
for (i = 0; i < num_diff; i++) {
- if (val == diff[i].vinp) {
- dev_err(&indio_dev->dev, "channel %d misconfigured\n", val);
+ if (chans[c] == diff[i].vinp) {
+ dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]);
return -EINVAL;
}
}
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
- 0, scan_index, false);
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ chans[c], 0, scan_index, false);
scan_index++;
}
+ if (adc->nsmps > 0) {
+ ret = device_property_read_u32_array(dev, "st,min-sample-time-nsecs",
+ smps, adc->nsmps);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < scan_index; i++) {
/*
- * Using of_property_read_u32_index(), smp value will only be
- * modified if valid u32 value can be decoded. This allows to
- * get either no value, 1 shared value for all indexes, or one
- * value per channel.
+ * This check is used with the above logic so that smp value
+ * will only be modified if valid u32 value can be decoded. This
+ * allows to get either no value, 1 shared value for all indexes,
+ * or one value per channel. The point is to have the same
+ * behavior as 'of_property_read_u32_index()'.
*/
- of_property_read_u32_index(node, "st,min-sample-time-nsecs", i, &smp);
+ if (i < adc->nsmps)
+ smp = smps[i];
/* Prepare sampling time settings */
stm32_adc_smpr_init(adc, channels[i].channel, smp);
@@ -2005,22 +2028,21 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
struct stm32_adc *adc,
struct iio_chan_spec *channels)
{
- struct device_node *node = indio_dev->dev.of_node;
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
- struct device_node *child;
+ struct fwnode_handle *child;
const char *name;
int val, scan_index = 0, ret;
bool differential;
u32 vin[2];
- for_each_available_child_of_node(node, child) {
- ret = of_property_read_u32(child, "reg", &val);
+ device_for_each_child_node(&indio_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &val);
if (ret) {
dev_err(&indio_dev->dev, "Missing channel index %d\n", ret);
goto err;
}
- ret = of_property_read_string(child, "label", &name);
+ ret = fwnode_property_read_string(child, "label", &name);
/* label is optional */
if (!ret) {
if (strlen(name) >= STM32_ADC_CH_SZ) {
@@ -2047,7 +2069,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
}
differential = false;
- ret = of_property_read_u32_array(child, "diff-channels", vin, 2);
+ ret = fwnode_property_read_u32_array(child, "diff-channels", vin, 2);
/* diff-channels is optional */
if (!ret) {
differential = true;
@@ -2064,7 +2086,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val,
vin[1], scan_index, differential);
- ret = of_property_read_u32(child, "st,min-sample-time-ns", &val);
+ ret = fwnode_property_read_u32(child, "st,min-sample-time-ns", &val);
/* st,min-sample-time-ns is optional */
if (!ret) {
stm32_adc_smpr_init(adc, channels[scan_index].channel, val);
@@ -2082,14 +2104,13 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
return scan_index;
err:
- of_node_put(child);
+ fwnode_handle_put(child);
return ret;
}
-static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
+static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
{
- struct device_node *node = indio_dev->dev.of_node;
struct stm32_adc *adc = iio_priv(indio_dev);
const struct stm32_adc_info *adc_info = adc->cfg->adc_info;
struct iio_chan_spec *channels;
@@ -2099,7 +2120,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
for (i = 0; i < STM32_ADC_INT_CH_NB; i++)
adc->int_ch[i] = STM32_ADC_INT_CH_NONE;
- num_channels = of_get_available_child_count(node);
+ num_channels = device_get_child_node_count(&indio_dev->dev);
/* If no channels have been found, fallback to channels legacy properties. */
if (!num_channels) {
legacy = true;
@@ -2130,7 +2151,8 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev, bool timestamping)
return -ENOMEM;
if (legacy)
- ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels);
+ ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
+ num_channels);
else
ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
if (ret < 0)
@@ -2212,9 +2234,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
bool timestamping = false;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
@@ -2223,17 +2242,16 @@ static int stm32_adc_probe(struct platform_device *pdev)
adc->common = dev_get_drvdata(pdev->dev.parent);
spin_lock_init(&adc->lock);
init_completion(&adc->completion);
- adc->cfg = (const struct stm32_adc_cfg *)
- of_match_device(dev->driver->of_match_table, dev)->data;
+ adc->cfg = device_get_match_data(dev);
indio_dev->name = dev_name(&pdev->dev);
- indio_dev->dev.of_node = pdev->dev.of_node;
+ device_set_node(&indio_dev->dev, dev_fwnode(&pdev->dev));
indio_dev->info = &stm32_adc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED;
platform_set_drvdata(pdev, indio_dev);
- ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+ ret = device_property_read_u32(dev, "reg", &adc->offset);
if (ret != 0) {
dev_err(&pdev->dev, "missing reg property\n");
return -EINVAL;
@@ -2262,7 +2280,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
}
}
- ret = stm32_adc_of_get_resolution(indio_dev);
+ ret = stm32_adc_fw_get_resolution(indio_dev);
if (ret < 0)
return ret;
@@ -2279,7 +2297,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
timestamping = true;
}
- ret = stm32_adc_chan_of_init(indio_dev, timestamping);
+ ret = stm32_adc_chan_fw_init(indio_dev, timestamping);
if (ret < 0)
goto err_dma_disable;
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 2d393a4dfff6..a6ade70dedf8 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -412,9 +412,9 @@ static int sun4i_gpadc_runtime_resume(struct device *dev)
return 0;
}
-static int sun4i_gpadc_get_temp(void *data, int *temp)
+static int sun4i_gpadc_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sun4i_gpadc_iio *info = data;
+ struct sun4i_gpadc_iio *info = tz->devdata;
int val, scale, offset;
if (sun4i_gpadc_temp_read(info->indio_dev, &val))
@@ -428,7 +428,7 @@ static int sun4i_gpadc_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+static const struct thermal_zone_device_ops sun4i_ts_tz_ops = {
.get_temp = &sun4i_gpadc_get_temp,
};
@@ -637,9 +637,9 @@ static int sun4i_gpadc_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (IS_ENABLED(CONFIG_THERMAL_OF)) {
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
- 0, info,
- &sun4i_ts_tz_ops);
+ info->tzd = devm_thermal_of_zone_register(info->sensor_device,
+ 0, info,
+ &sun4i_ts_tz_ops);
/*
* Do not fail driver probing when failing to register in
* thermal because no thermal DT node is found.
@@ -681,8 +681,6 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_THERMAL_OF))
return 0;
- thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
-
if (!info->no_irq)
iio_map_array_unregister(indio_dev);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index e3dfc155fbe2..8bceba694026 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -1094,7 +1094,7 @@ static int ads1015_probe(struct i2c_client *client,
return 0;
}
-static int ads1015_remove(struct i2c_client *client)
+static void ads1015_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ads1015_data *data = iio_priv(indio_dev);
@@ -1110,8 +1110,6 @@ static int ads1015_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 32237cacc9a3..5235a93f28bc 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -797,13 +797,6 @@ static void ads131e08_regulator_disable(void *data)
regulator_disable(st->vref_reg);
}
-static void ads131e08_clk_disable(void *data)
-{
- struct ads131e08_state *st = data;
-
- clk_disable_unprepare(st->adc_clk);
-}
-
static int ads131e08_probe(struct spi_device *spi)
{
const struct ads131e08_info *info;
@@ -896,21 +889,11 @@ static int ads131e08_probe(struct spi_device *spi)
st->vref_reg = NULL;
}
- st->adc_clk = devm_clk_get(&spi->dev, "adc-clk");
+ st->adc_clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->adc_clk))
return dev_err_probe(&spi->dev, PTR_ERR(st->adc_clk),
"failed to get the ADC clock\n");
- ret = clk_prepare_enable(st->adc_clk);
- if (ret) {
- dev_err(&spi->dev, "failed to prepare/enable the ADC clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ads131e08_clk_disable, st);
- if (ret)
- return ret;
-
adc_clk_hz = clk_get_rate(st->adc_clk);
if (!adc_clk_hz) {
dev_err(&spi->dev, "failed to get the ADC clock rate\n");
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 0d9436a69cbf..1bbb51a6683c 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -8,7 +8,9 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
@@ -139,6 +141,7 @@ enum tsc2046_state {
struct tsc2046_adc_priv {
struct spi_device *spi;
const struct tsc2046_adc_dcfg *dcfg;
+ struct regulator *vref_reg;
struct iio_trigger *trig;
struct hrtimer trig_timer;
@@ -173,6 +176,7 @@ struct tsc2046_adc_priv {
u32 scan_interval_us;
u32 time_per_scan_us;
u32 time_per_bit_ns;
+ unsigned int vref_mv;
struct tsc2046_adc_ch_cfg ch_cfg[TI_TSC2046_MAX_CHAN];
};
@@ -252,7 +256,9 @@ static u8 tsc2046_adc_get_cmd(struct tsc2046_adc_priv *priv, int ch_idx,
case TI_TSC2046_ADDR_AUX:
case TI_TSC2046_ADDR_VBAT:
case TI_TSC2046_ADDR_TEMP0:
- pd |= TI_TSC2046_SER | TI_TSC2046_PD1_VREF_ON;
+ pd |= TI_TSC2046_SER;
+ if (!priv->vref_reg)
+ pd |= TI_TSC2046_PD1_VREF_ON;
}
return TI_TSC2046_START | FIELD_PREP(TI_TSC2046_ADDR, ch_idx) | pd;
@@ -468,7 +474,7 @@ static int tsc2046_adc_read_raw(struct iio_dev *indio_dev,
* So, it is better to use external voltage-divider driver
* instead, which is calculating complete chain.
*/
- *val = TI_TSC2046_INT_VREF;
+ *val = priv->vref_mv;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -740,6 +746,49 @@ static void tsc2046_adc_parse_fwnode(struct tsc2046_adc_priv *priv)
}
}
+static void tsc2046_adc_regulator_disable(void *data)
+{
+ struct tsc2046_adc_priv *priv = data;
+
+ regulator_disable(priv->vref_reg);
+}
+
+static int tsc2046_adc_configure_regulator(struct tsc2046_adc_priv *priv)
+{
+ struct device *dev = &priv->spi->dev;
+ int ret;
+
+ priv->vref_reg = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(priv->vref_reg)) {
+ /* If regulator exists but can't be get, return an error */
+ if (PTR_ERR(priv->vref_reg) != -ENODEV)
+ return PTR_ERR(priv->vref_reg);
+ priv->vref_reg = NULL;
+ }
+ if (!priv->vref_reg) {
+ /* Use internal reference */
+ priv->vref_mv = TI_TSC2046_INT_VREF;
+ return 0;
+ }
+
+ ret = regulator_enable(priv->vref_reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, tsc2046_adc_regulator_disable,
+ priv);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(priv->vref_reg);
+ if (ret < 0)
+ return ret;
+
+ priv->vref_mv = ret / MILLI;
+
+ return 0;
+}
+
static int tsc2046_adc_probe(struct spi_device *spi)
{
const struct tsc2046_adc_dcfg *dcfg;
@@ -756,6 +805,11 @@ static int tsc2046_adc_probe(struct spi_device *spi)
}
dcfg = device_get_match_data(dev);
+ if (!dcfg) {
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ dcfg = (const struct tsc2046_adc_dcfg *)id->driver_data;
+ }
if (!dcfg)
return -EINVAL;
@@ -781,6 +835,10 @@ static int tsc2046_adc_probe(struct spi_device *spi)
indio_dev->num_channels = dcfg->num_channels;
indio_dev->info = &tsc2046_adc_info;
+ ret = tsc2046_adc_configure_regulator(priv);
+ if (ret)
+ return ret;
+
tsc2046_adc_parse_fwnode(priv);
ret = tsc2046_adc_setup_spi_msg(priv);
@@ -833,11 +891,18 @@ static const struct of_device_id ads7950_of_table[] = {
};
MODULE_DEVICE_TABLE(of, ads7950_of_table);
+static const struct spi_device_id tsc2046_adc_spi_ids[] = {
+ { "tsc2046e-adc", (unsigned long)&tsc2046_adc_dcfg_tsc2046e },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, tsc2046_adc_spi_ids);
+
static struct spi_driver tsc2046_adc_driver = {
.driver = {
.name = "tsc2046",
.of_match_table = ads7950_of_table,
},
+ .id_table = tsc2046_adc_spi_ids,
.probe = tsc2046_adc_probe,
};
module_spi_driver(tsc2046_adc_driver);
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 9cd2713146e5..5b4bdf3a26bb 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1351,11 +1351,6 @@ static const struct of_device_id ams_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ams_of_match_table);
-static void ams_clk_disable_unprepare(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static int ams_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
@@ -1380,18 +1375,10 @@ static int ams_probe(struct platform_device *pdev)
if (IS_ERR(ams->base))
return PTR_ERR(ams->base);
- ams->clk = devm_clk_get(&pdev->dev, NULL);
+ ams->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(ams->clk))
return PTR_ERR(ams->clk);
- ret = clk_prepare_enable(ams->clk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
- if (ret < 0)
- return ret;
-
ret = devm_delayed_work_autocancel(&pdev->dev, &ams->ams_unmask_work,
ams_unmask_worker);
if (ret < 0)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 1b247722ba25..292f2892d223 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1296,13 +1296,6 @@ static const char * const xadc_type_names[] = {
[XADC_TYPE_US] = "xilinx-system-monitor",
};
-static void xadc_clk_disable_unprepare(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
static void xadc_cancel_delayed_work(void *data)
{
struct delayed_work *work = data;
@@ -1374,19 +1367,10 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- xadc->clk = devm_clk_get(dev, NULL);
+ xadc->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(xadc->clk))
return PTR_ERR(xadc->clk);
- ret = clk_prepare_enable(xadc->clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev,
- xadc_clk_disable_unprepare, xadc->clk);
- if (ret)
- return ret;
-
/*
* Make sure not to exceed the maximum samplerate since otherwise the
* resulting interrupt storm will soft-lock the system.
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
index 138492362f20..fcf6d2269bfc 100644
--- a/drivers/iio/addac/Kconfig
+++ b/drivers/iio/addac/Kconfig
@@ -17,4 +17,20 @@ config AD74413R
To compile this driver as a module, choose M here: the
module will be called ad74413r.
+config STX104
+ tristate "Apex Embedded Systems STX104 driver"
+ depends on PC104 && X86
+ select ISA_BUS_API
+ select GPIOLIB
+ help
+ Say yes here to build support for the Apex Embedded Systems STX104
+ integrated analog PC/104 card.
+
+ This driver supports the 16 channels of single-ended (8 channels of
+ differential) analog inputs, 2 channels of analog output, 4 digital
+ inputs, and 4 digital outputs provided by the STX104.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
endmenu
diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
index cfd4bbe64ad3..17de20ef0d8e 100644
--- a/drivers/iio/addac/Makefile
+++ b/drivers/iio/addac/Makefile
@@ -5,3 +5,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD74413R) += ad74413r.o
+obj-$(CONFIG_STX104) += stx104.o
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/addac/stx104.c
index 48a91a95e597..48a91a95e597 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/addac/stx104.c
diff --git a/drivers/iio/cdc/Kconfig b/drivers/iio/cdc/Kconfig
index 5e3319a3ff48..e0a5ce66a984 100644
--- a/drivers/iio/cdc/Kconfig
+++ b/drivers/iio/cdc/Kconfig
@@ -14,4 +14,14 @@ config AD7150
To compile this driver as a module, choose M here: the
module will be called ad7150.
+config AD7746
+ tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
+ depends on I2C
+ help
+ Say yes here to build support for Analog Devices capacitive sensors.
+ (AD7745, AD7746, AD7747) Provides direct access via sysfs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7746.
+
endmenu
diff --git a/drivers/iio/cdc/Makefile b/drivers/iio/cdc/Makefile
index ee490637b032..41db756d8020 100644
--- a/drivers/iio/cdc/Makefile
+++ b/drivers/iio/cdc/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_AD7150) += ad7150.o
+obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c
index 52b8957c19c9..b266f5328140 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/iio/cdc/ad7746.c
@@ -5,6 +5,7 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/i2c.h>
@@ -15,12 +16,12 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include <asm/unaligned.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-/*
- * AD7746 Register Definition
- */
+/* AD7746 Register Definition */
#define AD7746_REG_STATUS 0
#define AD7746_REG_CAP_DATA_HIGH 1
@@ -48,11 +49,12 @@
#define AD7746_CAPSETUP_CACHOP BIT(0)
/* Voltage/Temperature Setup Register Bit Designations (AD7746_REG_VT_SETUP) */
-#define AD7746_VTSETUP_VTEN (1 << 7)
-#define AD7746_VTSETUP_VTMD_INT_TEMP (0 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_TEMP (1 << 5)
-#define AD7746_VTSETUP_VTMD_VDD_MON (2 << 5)
-#define AD7746_VTSETUP_VTMD_EXT_VIN (3 << 5)
+#define AD7746_VTSETUP_VTEN BIT(7)
+#define AD7746_VTSETUP_VTMD_MASK GENMASK(6, 5)
+#define AD7746_VTSETUP_VTMD_INT_TEMP 0
+#define AD7746_VTSETUP_VTMD_EXT_TEMP 1
+#define AD7746_VTSETUP_VTMD_VDD_MON 2
+#define AD7746_VTSETUP_VTMD_EXT_VIN 3
#define AD7746_VTSETUP_EXTREF BIT(4)
#define AD7746_VTSETUP_VTSHORT BIT(1)
#define AD7746_VTSETUP_VTCHOP BIT(0)
@@ -64,23 +66,22 @@
#define AD7746_EXCSETUP_NEXCB BIT(4)
#define AD7746_EXCSETUP_EXCA BIT(3)
#define AD7746_EXCSETUP_NEXCA BIT(2)
-#define AD7746_EXCSETUP_EXCLVL(x) (((x) & 0x3) << 0)
+#define AD7746_EXCSETUP_EXCLVL_MASK GENMASK(1, 0)
/* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS_SHIFT 6
-#define AD7746_CONF_CAPFS_SHIFT 3
#define AD7746_CONF_VTFS_MASK GENMASK(7, 6)
#define AD7746_CONF_CAPFS_MASK GENMASK(5, 3)
-#define AD7746_CONF_MODE_IDLE (0 << 0)
-#define AD7746_CONF_MODE_CONT_CONV (1 << 0)
-#define AD7746_CONF_MODE_SINGLE_CONV (2 << 0)
-#define AD7746_CONF_MODE_PWRDN (3 << 0)
-#define AD7746_CONF_MODE_OFFS_CAL (5 << 0)
-#define AD7746_CONF_MODE_GAIN_CAL (6 << 0)
+#define AD7746_CONF_MODE_MASK GENMASK(2, 0)
+#define AD7746_CONF_MODE_IDLE 0
+#define AD7746_CONF_MODE_CONT_CONV 1
+#define AD7746_CONF_MODE_SINGLE_CONV 2
+#define AD7746_CONF_MODE_PWRDN 3
+#define AD7746_CONF_MODE_OFFS_CAL 5
+#define AD7746_CONF_MODE_GAIN_CAL 6
/* CAPDAC Register Bit Designations (AD7746_REG_CAPDACx) */
#define AD7746_CAPDAC_DACEN BIT(7)
-#define AD7746_CAPDAC_DACP(x) ((x) & 0x7F)
+#define AD7746_CAPDAC_DACP_MASK GENMASK(6, 0)
struct ad7746_chip_info {
struct i2c_client *client;
@@ -94,11 +95,6 @@ struct ad7746_chip_info {
u8 vt_setup;
u8 capdac[2][2];
s8 capdac_set;
-
- union {
- __be32 d32;
- u8 d8[4];
- } data ____cacheline_aligned;
};
enum ad7746_chan {
@@ -112,43 +108,87 @@ enum ad7746_chan {
CIN2_DIFF,
};
+struct ad7746_chan_info {
+ u8 addr;
+ union {
+ u8 vtmd;
+ struct { /* CAP SETUP fields */
+ unsigned int cin2 : 1;
+ unsigned int capdiff : 1;
+ };
+ };
+};
+
+static const struct ad7746_chan_info ad7746_chan_info[] = {
+ [VIN] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_EXT_VIN,
+ },
+ [VIN_VDD] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_VDD_MON,
+ },
+ [TEMP_INT] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_INT_TEMP,
+ },
+ [TEMP_EXT] = {
+ .addr = AD7746_REG_VT_DATA_HIGH,
+ .vtmd = AD7746_VTSETUP_VTMD_EXT_TEMP,
+ },
+ [CIN1] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ },
+ [CIN1_DIFF] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .capdiff = 1,
+ },
+ [CIN2] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .cin2 = 1,
+ },
+ [CIN2_DIFF] = {
+ .addr = AD7746_REG_CAP_DATA_HIGH,
+ .cin2 = 1,
+ .capdiff = 1,
+ },
+};
+
static const struct iio_chan_spec ad7746_channels[] = {
[VIN] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_VIN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = VIN,
},
[VIN_VDD] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
.extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_VDD_MON,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = VIN_VDD,
},
[TEMP_INT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_INT_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = TEMP_INT,
},
[TEMP_EXT] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- .address = AD7746_REG_VT_DATA_HIGH << 8 |
- AD7746_VTSETUP_VTMD_EXT_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .address = TEMP_EXT,
},
[CIN1] = {
.type = IIO_CAPACITANCE,
@@ -158,7 +198,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN1,
},
[CIN1_DIFF] = {
.type = IIO_CAPACITANCE,
@@ -167,11 +208,11 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 0,
.channel2 = 2,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN1_DIFF,
},
[CIN2] = {
.type = IIO_CAPACITANCE,
@@ -181,8 +222,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CIN2,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN2,
},
[CIN2_DIFF] = {
.type = IIO_CAPACITANCE,
@@ -191,22 +232,22 @@ static const struct iio_chan_spec ad7746_channels[] = {
.channel = 1,
.channel2 = 3,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_ZEROPOINT),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .address = AD7746_REG_CAP_DATA_HIGH << 8 |
- AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .address = CIN2_DIFF,
}
};
/* Values are Update Rate (Hz), Conversion Time (ms) + 1*/
static const unsigned char ad7746_vt_filter_rate_table[][2] = {
- {50, 20 + 1}, {31, 32 + 1}, {16, 62 + 1}, {8, 122 + 1},
+ { 50, 20 + 1 }, { 31, 32 + 1 }, { 16, 62 + 1 }, { 8, 122 + 1 },
};
static const unsigned char ad7746_cap_filter_rate_table[][2] = {
- {91, 11 + 1}, {84, 12 + 1}, {50, 20 + 1}, {26, 38 + 1},
- {16, 62 + 1}, {13, 77 + 1}, {11, 92 + 1}, {9, 110 + 1},
+ { 91, 11 + 1 }, { 84, 12 + 1 }, { 50, 20 + 1 }, { 26, 38 + 1 },
+ { 16, 62 + 1 }, { 13, 77 + 1 }, { 11, 92 + 1 }, { 9, 110 + 1 },
};
static int ad7746_set_capdac(struct ad7746_chip_info *chip, int channel)
@@ -231,10 +272,13 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_CAPACITANCE:
- cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
+ cap_setup = FIELD_PREP(AD7746_CAPSETUP_CIN2,
+ ad7746_chan_info[chan->address].cin2) |
+ FIELD_PREP(AD7746_CAPSETUP_CAPDIFF,
+ ad7746_chan_info[chan->address].capdiff) |
+ FIELD_PREP(AD7746_CAPSETUP_CAPEN, 1);
vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
- idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
- AD7746_CONF_CAPFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
ret = ad7746_set_capdac(chip, chan->channel);
@@ -246,10 +290,11 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
break;
case IIO_VOLTAGE:
case IIO_TEMP:
- vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
+ vt_setup = FIELD_PREP(AD7746_VTSETUP_VTMD_MASK,
+ ad7746_chan_info[chan->address].vtmd) |
+ FIELD_PREP(AD7746_VTSETUP_VTEN, 1);
cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
- idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
- AD7746_CONF_VTFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
delay = ad7746_cap_filter_rate_table[idx][1];
break;
default:
@@ -332,7 +377,8 @@ static ssize_t ad7746_start_offset_calib(struct device *dev,
return ret;
return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_OFFS_CAL);
+ FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_OFFS_CAL));
}
static ssize_t ad7746_start_gain_calib(struct device *dev,
@@ -347,7 +393,8 @@ static ssize_t ad7746_start_gain_calib(struct device *dev,
return ret;
return ad7746_start_calib(dev, attr, buf, len,
- AD7746_CONF_MODE_GAIN_CAL);
+ FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_GAIN_CAL));
}
static IIO_DEVICE_ATTR(in_capacitance0_calibbias_calibration,
@@ -374,7 +421,7 @@ static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_CAPFS_MASK;
- chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
+ chip->config |= FIELD_PREP(AD7746_CONF_CAPFS_MASK, i);
return 0;
}
@@ -392,23 +439,17 @@ static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
chip->config &= ~AD7746_CONF_VTFS_MASK;
- chip->config |= i << AD7746_CONF_VTFS_SHIFT;
+ chip->config |= FIELD_PREP(AD7746_CONF_VTFS_MASK, i);
return 0;
}
-static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
-static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
- "91 84 50 26 16 13 11 9");
-
static struct attribute *ad7746_attributes[] = {
&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
&iio_dev_attr_in_voltage0_calibscale_calibration.dev_attr.attr,
- &iio_const_attr_in_voltage_sampling_frequency_available.dev_attr.attr,
- &iio_const_attr_in_capacitance_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -425,14 +466,10 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
struct ad7746_chip_info *chip = iio_priv(indio_dev);
int ret, reg;
- mutex_lock(&chip->lock);
-
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
- if (val != 1) {
- ret = -EINVAL;
- goto out;
- }
+ if (val != 1)
+ return -EINVAL;
val = (val2 * 1024) / 15625;
@@ -444,33 +481,32 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
reg = AD7746_REG_VOLT_GAINH;
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client, reg, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < 0 || val > 0xFFFF) {
- ret = -EINVAL;
- goto out;
- }
+ if (val < 0 || val > 0xFFFF)
+ return -EINVAL;
+
+ mutex_lock(&chip->lock);
ret = i2c_smbus_write_word_swapped(chip->client,
AD7746_REG_CAP_OFFH, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_OFFSET:
- if (val < 0 || val > 43008000) { /* 21pF */
- ret = -EINVAL;
- goto out;
- }
+ case IIO_CHAN_INFO_ZEROPOINT:
+ if (val < 0 || val > 43008000) /* 21pF */
+ return -EINVAL;
/*
* CAPDAC Scale = 21pF_typ / 127
@@ -479,42 +515,104 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
*/
val /= 338646;
-
+ mutex_lock(&chip->lock);
chip->capdac[chan->channel][chan->differential] = val > 0 ?
- AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0;
+ FIELD_PREP(AD7746_CAPDAC_DACP_MASK, val) | AD7746_CAPDAC_DACEN : 0;
ret = ad7746_set_capdac(chip, chan->channel);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ mutex_unlock(&chip->lock);
+ return ret;
+ }
chip->capdac_set = chan->channel;
+ mutex_unlock(&chip->lock);
- ret = 0;
- break;
+ return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2) {
- ret = -EINVAL;
- goto out;
- }
+ if (val2)
+ return -EINVAL;
switch (chan->type) {
case IIO_CAPACITANCE:
+ mutex_lock(&chip->lock);
ret = ad7746_store_cap_filter_rate_setup(chip, val);
- break;
+ mutex_unlock(&chip->lock);
+ return ret;
case IIO_VOLTAGE:
+ mutex_lock(&chip->lock);
ret = ad7746_store_vt_filter_rate_setup(chip, val);
- break;
+ mutex_unlock(&chip->lock);
+ return ret;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+ default:
+ return -EINVAL;
+ }
+}
+
+static const int ad7746_v_samp_freq[] = { 50, 31, 16, 8, };
+static const int ad7746_cap_samp_freq[] = { 91, 84, 50, 26, 16, 13, 11, 9, };
+
+static int ad7746_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad7746_v_samp_freq;
+ *length = ARRAY_SIZE(ad7746_v_samp_freq);
+ break;
+ case IIO_CAPACITANCE:
+ *vals = ad7746_cap_samp_freq;
+ *length = ARRAY_SIZE(ad7746_cap_samp_freq);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+}
-out:
- mutex_unlock(&chip->lock);
- return ret;
+static int ad7746_read_channel(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ad7746_chip_info *chip = iio_priv(indio_dev);
+ int ret, delay;
+ u8 data[3];
+ u8 regval;
+
+ ret = ad7746_select_channel(indio_dev, chan);
+ if (ret < 0)
+ return ret;
+ delay = ret;
+
+ regval = chip->config | FIELD_PREP(AD7746_CONF_MODE_MASK,
+ AD7746_CONF_MODE_SINGLE_CONV);
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG, regval);
+ if (ret < 0)
+ return ret;
+
+ msleep(delay);
+ /* Now read the actual register */
+ ret = i2c_smbus_read_i2c_block_data(chip->client,
+ ad7746_chan_info[chan->address].addr,
+ sizeof(data), data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Offset applied internally becaue the _offset userspace interface is
+ * needed for the CAP DACs which apply a controllable offset.
+ */
+ *val = get_unaligned_be24(data) - 0x800000;
+
+ return 0;
}
static int ad7746_read_raw(struct iio_dev *indio_dev,
@@ -523,55 +621,18 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad7746_chip_info *chip = iio_priv(indio_dev);
- int ret, delay, idx;
- u8 regval, reg;
-
- mutex_lock(&chip->lock);
+ int ret, idx;
+ u8 reg;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- case IIO_CHAN_INFO_PROCESSED:
- ret = ad7746_select_channel(indio_dev, chan);
- if (ret < 0)
- goto out;
- delay = ret;
-
- regval = chip->config | AD7746_CONF_MODE_SINGLE_CONV;
- ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_CFG,
- regval);
- if (ret < 0)
- goto out;
-
- msleep(delay);
- /* Now read the actual register */
-
- ret = i2c_smbus_read_i2c_block_data(chip->client,
- chan->address >> 8, 3,
- &chip->data.d8[1]);
-
+ mutex_lock(&chip->lock);
+ ret = ad7746_read_channel(indio_dev, chan, val);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
-
- *val = (be32_to_cpu(chip->data.d32) & 0xFFFFFF) - 0x800000;
-
- switch (chan->type) {
- case IIO_TEMP:
- /*
- * temperature in milli degrees Celsius
- * T = ((*val / 2048) - 4096) * 1000
- */
- *val = (*val * 125) / 256;
- break;
- case IIO_VOLTAGE:
- if (chan->channel == 1) /* supply_raw*/
- *val = *val * 6;
- break;
- default:
- break;
- }
+ return ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
@@ -581,83 +642,78 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
reg = AD7746_REG_VOLT_GAINH;
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client, reg);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
/* 1 + gain_val / 2^16 */
*val = 1;
*val2 = (15625 * ret) / 1024;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
+ mutex_lock(&chip->lock);
ret = i2c_smbus_read_word_swapped(chip->client,
AD7746_REG_CAP_OFFH);
+ mutex_unlock(&chip->lock);
if (ret < 0)
- goto out;
+ return ret;
*val = ret;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
- [chan->differential]) * 338646;
+ case IIO_CHAN_INFO_ZEROPOINT:
+ *val = FIELD_GET(AD7746_CAPDAC_DACP_MASK,
+ chip->capdac[chan->channel][chan->differential]) * 338646;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
*val = 0;
*val2 = 488;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_VOLTAGE:
/* 1170mV / 2^23 */
*val = 1170;
+ if (chan->channel == 1)
+ *val *= 6;
*val2 = 23;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = 125;
+ *val2 = 8;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- break;
case IIO_CHAN_INFO_SAMP_FREQ:
switch (chan->type) {
case IIO_CAPACITANCE:
- idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
- AD7746_CONF_CAPFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_CAPFS_MASK, chip->config);
*val = ad7746_cap_filter_rate_table[idx][0];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_VOLTAGE:
- idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
- AD7746_CONF_VTFS_SHIFT;
+ idx = FIELD_GET(AD7746_CONF_VTFS_MASK, chip->config);
*val = ad7746_vt_filter_rate_table[idx][0];
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
- mutex_unlock(&chip->lock);
- return ret;
}
static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
.read_raw = ad7746_read_raw,
+ .read_avail = ad7746_read_avail,
.write_raw = ad7746_write_raw,
};
@@ -674,10 +730,9 @@ static int ad7746_probe(struct i2c_client *client,
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
+
chip = iio_priv(indio_dev);
mutex_init(&chip->lock);
- /* this is only used for device removal purposes */
- i2c_set_clientdata(client, indio_dev);
chip->client = client;
chip->capdac_set = -1;
@@ -710,24 +765,24 @@ static int ad7746_probe(struct i2c_client *client,
if (!ret) {
switch (vdd_permille) {
case 125:
- regval |= AD7746_EXCSETUP_EXCLVL(0);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 0);
break;
case 250:
- regval |= AD7746_EXCSETUP_EXCLVL(1);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 1);
break;
case 375:
- regval |= AD7746_EXCSETUP_EXCLVL(2);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 2);
break;
case 500:
- regval |= AD7746_EXCSETUP_EXCLVL(3);
+ regval |= FIELD_PREP(AD7746_EXCSETUP_EXCLVL_MASK, 3);
break;
default:
break;
}
}
- ret = i2c_smbus_write_byte_data(chip->client,
- AD7746_REG_EXC_SETUP, regval);
+ ret = i2c_smbus_write_byte_data(chip->client, AD7746_REG_EXC_SETUP,
+ regval);
if (ret < 0)
return ret;
@@ -740,7 +795,6 @@ static const struct i2c_device_id ad7746_id[] = {
{ "ad7747", 7747 },
{}
};
-
MODULE_DEVICE_TABLE(i2c, ad7746_id);
static const struct of_device_id ad7746_of_match[] = {
@@ -749,7 +803,6 @@ static const struct of_device_id ad7746_of_match[] = {
{ .compatible = "adi,ad7747" },
{ },
};
-
MODULE_DEVICE_TABLE(of, ad7746_of_match);
static struct i2c_driver ad7746_driver = {
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 8378c00fa2ff..7cac77a931c7 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -722,7 +722,7 @@ unregister_trigger:
return ret;
}
-static int atlas_remove(struct i2c_client *client)
+static void atlas_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct atlas_data *data = iio_priv(indio_dev);
@@ -739,8 +739,6 @@ static int atlas_remove(struct i2c_client *client)
if (ret)
dev_err(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int atlas_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 560183efb36f..ba4045e20303 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -532,7 +532,7 @@ err_poweroff:
return ret;
}
-static int ccs811_remove(struct i2c_client *client)
+static void ccs811_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ccs811_data *data = iio_priv(indio_dev);
@@ -548,8 +548,6 @@ static int ccs811_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down device (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static const struct i2c_device_id ccs811_id[] = {
diff --git a/drivers/iio/chemical/sgp30.c b/drivers/iio/chemical/sgp30.c
index 2343d444604d..e2c13c78c7e0 100644
--- a/drivers/iio/chemical/sgp30.c
+++ b/drivers/iio/chemical/sgp30.c
@@ -552,15 +552,13 @@ static int sgp_probe(struct i2c_client *client,
return 0;
}
-static int sgp_remove(struct i2c_client *client)
+static void sgp_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct sgp_data *data = iio_priv(indio_dev);
if (data->iaq_thread)
kthread_stop(data->iaq_thread);
-
- return 0;
}
static const struct i2c_device_id sgp_id[] = {
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 793d628db55f..54ccf19ab2bb 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -18,6 +18,7 @@
#include <linux/scmi_protocol.h>
#include <linux/time.h>
#include <linux/types.h>
+#include <linux/units.h>
#define SCMI_IIO_NUM_OF_AXIS 3
@@ -130,7 +131,6 @@ static const struct iio_buffer_setup_ops scmi_iio_buffer_ops = {
static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
{
struct scmi_iio_priv *sensor = iio_priv(iio_dev);
- const unsigned long UHZ_PER_HZ = 1000000UL;
u64 sec, mult, uHz, sf;
u32 sensor_config;
char buf[32];
@@ -145,7 +145,7 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
return err;
}
- uHz = val * UHZ_PER_HZ + val2;
+ uHz = val * MICROHZ_PER_HZ + val2;
/*
* The seconds field in the sensor interval in SCMI is 16 bits long
@@ -156,10 +156,10 @@ static int scmi_iio_set_odr_val(struct iio_dev *iio_dev, int val, int val2)
* count the number of characters
*/
sf = (u64)uHz * 0xFFFF;
- do_div(sf, UHZ_PER_HZ);
+ do_div(sf, MICROHZ_PER_HZ);
mult = scnprintf(buf, sizeof(buf), "%llu", sf) - 1;
- sec = int_pow(10, mult) * UHZ_PER_HZ;
+ sec = int_pow(10, mult) * MICROHZ_PER_HZ;
do_div(sec, uHz);
if (sec == 0) {
dev_err(&iio_dev->dev,
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 9910ba1da085..35720c64fea8 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -354,7 +354,7 @@ void st_sensors_dev_name_probe(struct device *dev, char *name, int len)
return;
/* The name from the match takes precedence if present */
- strlcpy(name, match, len);
+ strscpy(name, match, len);
}
EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, IIO_ST_SENSORS);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 81775152aac6..a81bfa47a221 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -559,11 +559,9 @@ static int ad5380_i2c_probe(struct i2c_client *i2c,
return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
}
-static int ad5380_i2c_remove(struct i2c_client *i2c)
+static void ad5380_i2c_remove(struct i2c_client *i2c)
{
ad5380_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5380_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 09e242949cd0..7324065d3782 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -575,11 +575,9 @@ static int ad5446_i2c_probe(struct i2c_client *i2c,
&ad5446_i2c_chip_info[id->driver_data]);
}
-static int ad5446_i2c_remove(struct i2c_client *i2c)
+static void ad5446_i2c_remove(struct i2c_client *i2c)
{
ad5446_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5446_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 34e1319a9712..8e5e014e0c28 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -13,6 +13,8 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <asm/unaligned.h>
+
#define AD5593R_MODE_CONF (0 << 4)
#define AD5593R_MODE_DAC_WRITE (1 << 4)
#define AD5593R_MODE_ADC_READBACK (4 << 4)
@@ -20,6 +22,24 @@
#define AD5593R_MODE_GPIO_READBACK (6 << 4)
#define AD5593R_MODE_REG_READBACK (7 << 4)
+static int ad5593r_read_word(struct i2c_client *i2c, u8 reg, u16 *value)
+{
+ int ret;
+ u8 buf[2];
+
+ ret = i2c_smbus_write_byte(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ *value = get_unaligned_be16(buf);
+
+ return 0;
+}
+
static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
@@ -38,13 +58,7 @@ static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
if (val < 0)
return (int) val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_ADC_READBACK, value);
}
static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
@@ -58,25 +72,19 @@ static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
- if (val < 0)
- return (int) val;
-
- *value = (u16) val;
-
- return 0;
+ return ad5593r_read_word(i2c, AD5593R_MODE_REG_READBACK | reg, value);
}
static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
- s32 val;
+ u16 val;
+ int ret;
- val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
- if (val < 0)
- return (int) val;
+ ret = ad5593r_read_word(i2c, AD5593R_MODE_GPIO_READBACK, &val);
+ if (ret)
+ return ret;
*value = (u8) val;
@@ -94,14 +102,16 @@ static const struct ad5592r_rw_ops ad5593r_rw_ops = {
static int ad5593r_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
+ if (!i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
}
-static int ad5593r_i2c_remove(struct i2c_client *i2c)
+static void ad5593r_i2c_remove(struct i2c_client *i2c)
{
ad5592r_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5593r_i2c_ids[] = {
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 762503c1901b..aa36cbf0137c 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -65,11 +65,9 @@ static int ad5686_i2c_probe(struct i2c_client *i2c,
ad5686_i2c_write, ad5686_i2c_read);
}
-static int ad5686_i2c_remove(struct i2c_client *i2c)
+static void ad5686_i2c_remove(struct i2c_client *i2c)
{
ad5686_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id ad5686_i2c_id[] = {
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index 509394690bcc..3e17a681174e 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -281,15 +281,13 @@ fail:
return ret;
}
-static int ds4424_remove(struct i2c_client *client)
+static void ds4424_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ds4424_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(data->vcc_reg);
-
- return 0;
}
static const struct i2c_device_id ds4424_id[] = {
diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c
index 22b02f50fe41..5a812f87970c 100644
--- a/drivers/iio/dac/m62332.c
+++ b/drivers/iio/dac/m62332.c
@@ -218,7 +218,7 @@ err:
return ret;
}
-static int m62332_remove(struct i2c_client *client)
+static void m62332_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -226,8 +226,6 @@ static int m62332_remove(struct i2c_client *client)
iio_map_array_unregister(indio_dev);
m62332_set_value(indio_dev, 0, 0);
m62332_set_value(indio_dev, 0, 1);
-
- return 0;
}
static const struct i2c_device_id m62332_id[] = {
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index bb4b85a7b95b..446d1a8fe4be 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -486,7 +486,7 @@ err_disable_vdd_reg:
return err;
}
-static int mcp4725_remove(struct i2c_client *client)
+static void mcp4725_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mcp4725_data *data = iio_priv(indio_dev);
@@ -496,8 +496,6 @@ static int mcp4725_remove(struct i2c_client *client)
if (data->vref_reg)
regulator_disable(data->vref_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
static const struct i2c_device_id mcp4725_id[] = {
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index f91f8a504989..3210e3098f9a 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -382,15 +382,13 @@ static int dac5571_probe(struct i2c_client *client,
return ret;
}
-static int dac5571_remove(struct i2c_client *i2c)
+static void dac5571_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct dac5571_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(data->vref);
-
- return 0;
}
static const struct of_device_id dac5571_of_id[] = {
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index 135c8cedc33d..b27088464826 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -540,13 +540,6 @@ static int adf4371_setup(struct adf4371_state *st)
return regmap_bulk_write(st->regmap, ADF4371_REG(0x30), st->buf, 5);
}
-static void adf4371_clk_disable(void *data)
-{
- struct adf4371_state *st = data;
-
- clk_disable_unprepare(st->clkin);
-}
-
static int adf4371_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -579,18 +572,10 @@ static int adf4371_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
- st->clkin = devm_clk_get(&spi->dev, "clkin");
+ st->clkin = devm_clk_get_enabled(&spi->dev, "clkin");
if (IS_ERR(st->clkin))
return PTR_ERR(st->clkin);
- ret = clk_prepare_enable(st->clkin);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, adf4371_clk_disable, st);
- if (ret)
- return ret;
-
st->clkin_freq = clk_get_rate(st->clkin);
ret = adf4371_setup(st);
diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c
index 865addd10db4..bb5e1feef42b 100644
--- a/drivers/iio/frequency/admv1014.c
+++ b/drivers/iio/frequency/admv1014.c
@@ -669,8 +669,7 @@ static int admv1014_init(struct admv1014_state *st)
chip_id = FIELD_GET(ADMV1014_CHIP_ID_MSK, chip_id);
if (chip_id != ADMV1014_CHIP_ID) {
dev_err(&spi->dev, "Invalid Chip ID.\n");
- ret = -EINVAL;
- return ret;
+ return -EINVAL;
}
ret = __admv1014_spi_update_bits(st, ADMV1014_REG_QUAD,
diff --git a/drivers/iio/frequency/adrf6780.c b/drivers/iio/frequency/adrf6780.c
index 21878bad0909..b4defb82f37e 100644
--- a/drivers/iio/frequency/adrf6780.c
+++ b/drivers/iio/frequency/adrf6780.c
@@ -441,11 +441,6 @@ static void adrf6780_properties_parse(struct adrf6780_state *st)
st->vdet_out_en = device_property_read_bool(&spi->dev, "adi,vdet-out-en");
}
-static void adrf6780_clk_disable(void *data)
-{
- clk_disable_unprepare(data);
-}
-
static void adrf6780_powerdown(void *data)
{
/* Disable all components in the Enable Register */
@@ -473,20 +468,11 @@ static int adrf6780_probe(struct spi_device *spi)
adrf6780_properties_parse(st);
- st->clkin = devm_clk_get(&spi->dev, "lo_in");
+ st->clkin = devm_clk_get_enabled(&spi->dev, "lo_in");
if (IS_ERR(st->clkin))
return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
"failed to get the LO input clock\n");
- ret = clk_prepare_enable(st->clkin);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, adrf6780_clk_disable,
- st->clkin);
- if (ret)
- return ret;
-
mutex_init(&st->lock);
ret = adrf6780_init(st);
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index b3fa46bd02cb..908ccc385254 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -32,11 +32,9 @@ static int bmg160_i2c_probe(struct i2c_client *client,
return bmg160_core_probe(&client->dev, regmap, client->irq, name);
}
-static int bmg160_i2c_remove(struct i2c_client *client)
+static void bmg160_i2c_remove(struct i2c_client *client)
{
bmg160_core_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmg160_acpi_match[] = {
diff --git a/drivers/iio/gyro/fxas21002c_i2c.c b/drivers/iio/gyro/fxas21002c_i2c.c
index a7807fd97483..13bb52c594d1 100644
--- a/drivers/iio/gyro/fxas21002c_i2c.c
+++ b/drivers/iio/gyro/fxas21002c_i2c.c
@@ -33,11 +33,9 @@ static int fxas21002c_i2c_probe(struct i2c_client *i2c)
return fxas21002c_core_probe(&i2c->dev, regmap, i2c->irq, i2c->name);
}
-static int fxas21002c_i2c_remove(struct i2c_client *i2c)
+static void fxas21002c_i2c_remove(struct i2c_client *i2c)
{
fxas21002c_core_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id fxas21002c_i2c_id[] = {
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index 0491c64e1b32..421501584587 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -350,7 +350,7 @@ error_unconfigure_buffer:
return ret;
}
-static int itg3200_remove(struct i2c_client *client)
+static void itg3200_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -360,8 +360,6 @@ static int itg3200_remove(struct i2c_client *client)
itg3200_remove_trigger(indio_dev);
itg3200_buffer_unconfigure(indio_dev);
-
- return 0;
}
static int itg3200_suspend(struct device *dev)
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 78f4a0102986..12e3afa9dd11 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -78,7 +78,7 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
return 0;
}
-static int mpu3050_i2c_remove(struct i2c_client *client)
+static void mpu3050_i2c_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
@@ -87,8 +87,6 @@ static int mpu3050_i2c_remove(struct i2c_client *client)
i2c_mux_del_adapters(mpu3050->i2cmux);
mpu3050_common_remove(&client->dev);
-
- return 0;
}
/*
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index dd7800159051..8fca787b2524 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -578,7 +578,7 @@ disable_reg:
return ret;
}
-static int afe4404_remove(struct i2c_client *client)
+static void afe4404_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct afe4404_data *afe = iio_priv(indio_dev);
@@ -594,8 +594,6 @@ static int afe4404_remove(struct i2c_client *client)
ret = regulator_disable(afe->regulator);
if (ret)
dev_err(afe->dev, "Unable to disable regulator\n");
-
- return 0;
}
static const struct i2c_device_id afe4404_ids[] = {
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index ad5717965223..2cca5e0519f8 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -471,15 +471,13 @@ static int max30100_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int max30100_remove(struct i2c_client *client)
+static void max30100_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max30100_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
max30100_set_powermode(data, false);
-
- return 0;
}
static const struct i2c_device_id max30100_id[] = {
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index abbcef563807..437298a29f2d 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -592,15 +592,13 @@ static int max30102_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int max30102_remove(struct i2c_client *client)
+static void max30102_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct max30102_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
max30102_set_power(data, false);
-
- return 0;
}
static const struct i2c_device_id max30102_id[] = {
diff --git a/drivers/iio/humidity/hdc2010.c b/drivers/iio/humidity/hdc2010.c
index 1381df46187c..d6858ccb056e 100644
--- a/drivers/iio/humidity/hdc2010.c
+++ b/drivers/iio/humidity/hdc2010.c
@@ -308,7 +308,7 @@ static int hdc2010_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int hdc2010_remove(struct i2c_client *client)
+static void hdc2010_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct hdc2010_data *data = iio_priv(indio_dev);
@@ -318,8 +318,6 @@ static int hdc2010_remove(struct i2c_client *client)
/* Disable Automatic Measurement Mode */
if (hdc2010_update_drdy_config(data, HDC2010_AMM, 0))
dev_warn(&client->dev, "Unable to restore default AMM\n");
-
- return 0;
}
static const struct i2c_device_id hdc2010_id[] = {
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 001ca2c3ff95..f1d7d4b5e222 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -52,6 +52,7 @@ config ADIS16480
ADIS16485, ADIS16488 inertial sensors.
source "drivers/iio/imu/bmi160/Kconfig"
+source "drivers/iio/imu/bno055/Kconfig"
config FXOS8700
tristate
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index c82748096c77..6eb612034722 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -15,6 +15,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+obj-y += bno055/
obj-$(CONFIG_FXOS8700) += fxos8700_core.o
obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index ff2b0fab840a..aec55f7e1f26 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1120,11 +1120,6 @@ check_burst32:
return IRQ_HANDLED;
}
-static void adis16475_disable_clk(void *data)
-{
- clk_disable_unprepare((struct clk *)data);
-}
-
static int adis16475_config_sync_mode(struct adis16475 *st)
{
int ret;
@@ -1150,19 +1145,11 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
/* All the other modes require external input signal */
if (sync->sync_mode != ADIS16475_SYNC_OUTPUT) {
- struct clk *clk = devm_clk_get(dev, NULL);
+ struct clk *clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, adis16475_disable_clk, clk);
- if (ret)
- return ret;
-
st->clk_freq = clk_get_rate(clk);
if (st->clk_freq < sync->min_rate ||
st->clk_freq > sync->max_rate) {
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
new file mode 100644
index 000000000000..fa79b1ac4f85
--- /dev/null
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config BOSCH_BNO055
+ tristate
+
+config BOSCH_BNO055_SERIAL
+ tristate "Bosch BNO055 attached via UART"
+ depends on SERIAL_DEV_BUS
+ select BOSCH_BNO055
+ help
+ Enable this to support Bosch BNO055 IMUs attached via UART.
+
+ This driver can also be built as a module. If so, the module will be
+ called bno055_sl.
+
+config BOSCH_BNO055_I2C
+ tristate "Bosch BNO055 attached via I2C bus"
+ depends on I2C
+ select REGMAP_I2C
+ select BOSCH_BNO055
+ help
+ Enable this to support Bosch BNO055 IMUs attached via I2C bus.
+
+ This driver can also be built as a module. If so, the module will be
+ called bno055_i2c.
diff --git a/drivers/iio/imu/bno055/Makefile b/drivers/iio/imu/bno055/Makefile
new file mode 100644
index 000000000000..98c624730dae
--- /dev/null
+++ b/drivers/iio/imu/bno055/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_BOSCH_BNO055) += bno055.o
+obj-$(CONFIG_BOSCH_BNO055_SERIAL) += bno055_ser.o
+bno055_ser-y := bno055_ser_core.o
+# define_trace.h needs to know how to find our header
+CFLAGS_bno055_ser_trace.o := -I$(src)
+bno055_ser-$(CONFIG_TRACING) += bno055_ser_trace.o
+
+obj-$(CONFIG_BOSCH_BNO055_I2C) += bno055_i2c.o
diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c
new file mode 100644
index 000000000000..307557a609e3
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO driver for Bosch BNO055 IMU
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ *
+ * Portions of this driver are taken from the BNO055 driver patch
+ * from Vlad Dogaru which is Copyright (c) 2016, Intel Corporation.
+ *
+ * This driver is also based on BMI160 driver, which is:
+ * Copyright (c) 2016, Intel Corporation.
+ * Copyright (c) 2019, Martin Kelly.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "bno055.h"
+
+#define BNO055_FW_UID_FMT "bno055-caldata-%*phN.dat"
+#define BNO055_FW_GENERIC_NAME "bno055-caldata.dat"
+
+/* common registers */
+#define BNO055_PAGESEL_REG 0x7
+
+/* page 0 registers */
+#define BNO055_CHIP_ID_REG 0x0
+#define BNO055_CHIP_ID_MAGIC 0xA0
+#define BNO055_SW_REV_LSB_REG 0x4
+#define BNO055_SW_REV_MSB_REG 0x5
+#define BNO055_ACC_DATA_X_LSB_REG 0x8
+#define BNO055_ACC_DATA_Y_LSB_REG 0xA
+#define BNO055_ACC_DATA_Z_LSB_REG 0xC
+#define BNO055_MAG_DATA_X_LSB_REG 0xE
+#define BNO055_MAG_DATA_Y_LSB_REG 0x10
+#define BNO055_MAG_DATA_Z_LSB_REG 0x12
+#define BNO055_GYR_DATA_X_LSB_REG 0x14
+#define BNO055_GYR_DATA_Y_LSB_REG 0x16
+#define BNO055_GYR_DATA_Z_LSB_REG 0x18
+#define BNO055_EUL_DATA_X_LSB_REG 0x1A
+#define BNO055_EUL_DATA_Y_LSB_REG 0x1C
+#define BNO055_EUL_DATA_Z_LSB_REG 0x1E
+#define BNO055_QUAT_DATA_W_LSB_REG 0x20
+#define BNO055_LIA_DATA_X_LSB_REG 0x28
+#define BNO055_LIA_DATA_Y_LSB_REG 0x2A
+#define BNO055_LIA_DATA_Z_LSB_REG 0x2C
+#define BNO055_GRAVITY_DATA_X_LSB_REG 0x2E
+#define BNO055_GRAVITY_DATA_Y_LSB_REG 0x30
+#define BNO055_GRAVITY_DATA_Z_LSB_REG 0x32
+#define BNO055_SCAN_CH_COUNT ((BNO055_GRAVITY_DATA_Z_LSB_REG - BNO055_ACC_DATA_X_LSB_REG) / 2)
+#define BNO055_TEMP_REG 0x34
+#define BNO055_CALIB_STAT_REG 0x35
+#define BNO055_CALIB_STAT_MAGN_SHIFT 0
+#define BNO055_CALIB_STAT_ACCEL_SHIFT 2
+#define BNO055_CALIB_STAT_GYRO_SHIFT 4
+#define BNO055_CALIB_STAT_SYS_SHIFT 6
+#define BNO055_SYS_ERR_REG 0x3A
+#define BNO055_POWER_MODE_REG 0x3E
+#define BNO055_POWER_MODE_NORMAL 0
+#define BNO055_SYS_TRIGGER_REG 0x3F
+#define BNO055_SYS_TRIGGER_RST_SYS BIT(5)
+#define BNO055_SYS_TRIGGER_CLK_SEL BIT(7)
+#define BNO055_OPR_MODE_REG 0x3D
+#define BNO055_OPR_MODE_CONFIG 0x0
+#define BNO055_OPR_MODE_AMG 0x7
+#define BNO055_OPR_MODE_FUSION_FMC_OFF 0xB
+#define BNO055_OPR_MODE_FUSION 0xC
+#define BNO055_UNIT_SEL_REG 0x3B
+/* Android orientation mode means: pitch value decreases turning clockwise */
+#define BNO055_UNIT_SEL_ANDROID BIT(7)
+#define BNO055_UNIT_SEL_GYR_RPS BIT(1)
+#define BNO055_CALDATA_START 0x55
+#define BNO055_CALDATA_END 0x6A
+#define BNO055_CALDATA_LEN 22
+
+/*
+ * The difference in address between the register that contains the
+ * value and the register that contains the offset. This applies for
+ * accel, gyro and magn channels.
+ */
+#define BNO055_REG_OFFSET_ADDR 0x4D
+
+/* page 1 registers */
+#define BNO055_PG1(x) ((x) | 0x80)
+#define BNO055_ACC_CONFIG_REG BNO055_PG1(0x8)
+#define BNO055_ACC_CONFIG_LPF_MASK GENMASK(4, 2)
+#define BNO055_ACC_CONFIG_RANGE_MASK GENMASK(1, 0)
+#define BNO055_MAG_CONFIG_REG BNO055_PG1(0x9)
+#define BNO055_MAG_CONFIG_HIGHACCURACY 0x18
+#define BNO055_MAG_CONFIG_ODR_MASK GENMASK(2, 0)
+#define BNO055_GYR_CONFIG_REG BNO055_PG1(0xA)
+#define BNO055_GYR_CONFIG_RANGE_MASK GENMASK(2, 0)
+#define BNO055_GYR_CONFIG_LPF_MASK GENMASK(5, 3)
+#define BNO055_GYR_AM_SET_REG BNO055_PG1(0x1F)
+#define BNO055_UID_LOWER_REG BNO055_PG1(0x50)
+#define BNO055_UID_HIGHER_REG BNO055_PG1(0x5F)
+#define BNO055_UID_LEN 16
+
+struct bno055_sysfs_attr {
+ int *vals;
+ int len;
+ int *fusion_vals;
+ int *hw_xlate;
+ int type;
+};
+
+static int bno055_acc_lpf_vals[] = {
+ 7, 810000, 15, 630000, 31, 250000, 62, 500000,
+ 125, 0, 250, 0, 500, 0, 1000, 0,
+};
+
+static struct bno055_sysfs_attr bno055_acc_lpf = {
+ .vals = bno055_acc_lpf_vals,
+ .len = ARRAY_SIZE(bno055_acc_lpf_vals),
+ .fusion_vals = (int[]){62, 500000},
+ .type = IIO_VAL_INT_PLUS_MICRO,
+};
+
+static int bno055_acc_range_vals[] = {
+ /* G: 2, 4, 8, 16 */
+ 1962, 3924, 7848, 15696
+};
+
+static struct bno055_sysfs_attr bno055_acc_range = {
+ .vals = bno055_acc_range_vals,
+ .len = ARRAY_SIZE(bno055_acc_range_vals),
+ .fusion_vals = (int[]){3924}, /* 4G */
+ .type = IIO_VAL_INT,
+};
+
+/*
+ * Theoretically the IMU should return data in a given (i.e. fixed) unit
+ * regardless of the range setting. This happens for the accelerometer, but not
+ * for the gyroscope; the gyroscope range setting affects the scale.
+ * This is probably due to this[0] bug.
+ * For this reason we map the internal range setting onto the standard IIO scale
+ * attribute for gyro.
+ * Since the bug[0] may be fixed in future, we check for the IMU FW version and
+ * eventually warn the user.
+ * Currently we just don't care about "range" attributes for gyro.
+ *
+ * [0] https://community.bosch-sensortec.com/t5/MEMS-sensors-forum/BNO055-Wrong-sensitivity-resolution-in-datasheet/td-p/10266
+ */
+
+/*
+ * dps = hwval * (dps_range/2^15)
+ * rps = hwval * (rps_range/2^15)
+ * = hwval * (dps_range/(2^15 * k))
+ * where k is rad-to-deg factor
+ */
+static int bno055_gyr_scale_vals[] = {
+ 125, 1877467, 250, 1877467, 500, 1877467,
+ 1000, 1877467, 2000, 1877467,
+};
+
+static struct bno055_sysfs_attr bno055_gyr_scale = {
+ .vals = bno055_gyr_scale_vals,
+ .len = ARRAY_SIZE(bno055_gyr_scale_vals),
+ .fusion_vals = (int[]){1, 900},
+ .hw_xlate = (int[]){4, 3, 2, 1, 0},
+ .type = IIO_VAL_FRACTIONAL,
+};
+
+static int bno055_gyr_lpf_vals[] = {12, 23, 32, 47, 64, 116, 230, 523};
+static struct bno055_sysfs_attr bno055_gyr_lpf = {
+ .vals = bno055_gyr_lpf_vals,
+ .len = ARRAY_SIZE(bno055_gyr_lpf_vals),
+ .fusion_vals = (int[]){32},
+ .hw_xlate = (int[]){5, 4, 7, 3, 6, 2, 1, 0},
+ .type = IIO_VAL_INT,
+};
+
+static int bno055_mag_odr_vals[] = {2, 6, 8, 10, 15, 20, 25, 30};
+static struct bno055_sysfs_attr bno055_mag_odr = {
+ .vals = bno055_mag_odr_vals,
+ .len = ARRAY_SIZE(bno055_mag_odr_vals),
+ .fusion_vals = (int[]){20},
+ .type = IIO_VAL_INT,
+};
+
+struct bno055_priv {
+ struct regmap *regmap;
+ struct device *dev;
+ struct clk *clk;
+ int operation_mode;
+ int xfer_burst_break_thr;
+ struct mutex lock;
+ u8 uid[BNO055_UID_LEN];
+ struct gpio_desc *reset_gpio;
+ bool sw_reset;
+ struct {
+ __le16 chans[BNO055_SCAN_CH_COUNT];
+ s64 timestamp __aligned(8);
+ } buf;
+ struct dentry *debugfs;
+};
+
+static bool bno055_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ /* data and status registers */
+ if (reg >= BNO055_ACC_DATA_X_LSB_REG && reg <= BNO055_SYS_ERR_REG)
+ return true;
+
+ /* when in fusion mode, config is updated by chip */
+ if (reg == BNO055_MAG_CONFIG_REG ||
+ reg == BNO055_ACC_CONFIG_REG ||
+ reg == BNO055_GYR_CONFIG_REG)
+ return true;
+
+ /* calibration data may be updated by the IMU */
+ if (reg >= BNO055_CALDATA_START && reg <= BNO055_CALDATA_END)
+ return true;
+
+ return false;
+}
+
+static bool bno055_regmap_readable(struct device *dev, unsigned int reg)
+{
+ /* unnamed PG0 reserved areas */
+ if ((reg < BNO055_PG1(0) && reg > BNO055_CALDATA_END) ||
+ reg == 0x3C)
+ return false;
+
+ /* unnamed PG1 reserved areas */
+ if (reg > BNO055_PG1(BNO055_UID_HIGHER_REG) ||
+ (reg < BNO055_PG1(BNO055_UID_LOWER_REG) && reg > BNO055_PG1(BNO055_GYR_AM_SET_REG)) ||
+ reg == BNO055_PG1(0xE) ||
+ (reg < BNO055_PG1(BNO055_PAGESEL_REG) && reg >= BNO055_PG1(0x0)))
+ return false;
+ return true;
+}
+
+static bool bno055_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ /*
+ * Unreadable registers are indeed reserved; there are no WO regs
+ * (except for a single bit in SYS_TRIGGER register)
+ */
+ if (!bno055_regmap_readable(dev, reg))
+ return false;
+
+ /* data and status registers */
+ if (reg >= BNO055_ACC_DATA_X_LSB_REG && reg <= BNO055_SYS_ERR_REG)
+ return false;
+
+ /* ID areas */
+ if (reg < BNO055_PAGESEL_REG ||
+ (reg <= BNO055_UID_HIGHER_REG && reg >= BNO055_UID_LOWER_REG))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_range_cfg bno055_regmap_ranges[] = {
+ {
+ .range_min = 0,
+ .range_max = 0x7f * 2,
+ .selector_reg = BNO055_PAGESEL_REG,
+ .selector_mask = GENMASK(7, 0),
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x80,
+ },
+};
+
+const struct regmap_config bno055_regmap_config = {
+ .name = "bno055",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .ranges = bno055_regmap_ranges,
+ .num_ranges = 1,
+ .volatile_reg = bno055_regmap_volatile,
+ .max_register = 0x80 * 2,
+ .writeable_reg = bno055_regmap_writeable,
+ .readable_reg = bno055_regmap_readable,
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_NS_GPL(bno055_regmap_config, IIO_BNO055);
+
+/* must be called in configuration mode */
+static int bno055_calibration_load(struct bno055_priv *priv, const u8 *data, int len)
+{
+ if (len != BNO055_CALDATA_LEN) {
+ dev_dbg(priv->dev, "Invalid calibration file size %d (expected %d)",
+ len, BNO055_CALDATA_LEN);
+ return -EINVAL;
+ }
+
+ dev_dbg(priv->dev, "loading cal data: %*ph", BNO055_CALDATA_LEN, data);
+ return regmap_bulk_write(priv->regmap, BNO055_CALDATA_START,
+ data, BNO055_CALDATA_LEN);
+}
+
+static int bno055_operation_mode_do_set(struct bno055_priv *priv,
+ int operation_mode)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, BNO055_OPR_MODE_REG,
+ operation_mode);
+ if (ret)
+ return ret;
+
+ /* Following datasheet specifications: sensor takes 7mS up to 19 mS to switch mode */
+ msleep(20);
+
+ return 0;
+}
+
+static int bno055_system_reset(struct bno055_priv *priv)
+{
+ int ret;
+
+ if (priv->reset_gpio) {
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ } else if (priv->sw_reset) {
+ ret = regmap_write(priv->regmap, BNO055_SYS_TRIGGER_REG,
+ BNO055_SYS_TRIGGER_RST_SYS);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ regcache_drop_region(priv->regmap, 0x0, 0xff);
+ usleep_range(650000, 700000);
+
+ return 0;
+}
+
+static int bno055_init(struct bno055_priv *priv, const u8 *caldata, int len)
+{
+ int ret;
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, BNO055_POWER_MODE_REG,
+ BNO055_POWER_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, BNO055_SYS_TRIGGER_REG,
+ priv->clk ? BNO055_SYS_TRIGGER_CLK_SEL : 0);
+ if (ret)
+ return ret;
+
+ /* use standard SI units */
+ ret = regmap_write(priv->regmap, BNO055_UNIT_SEL_REG,
+ BNO055_UNIT_SEL_ANDROID | BNO055_UNIT_SEL_GYR_RPS);
+ if (ret)
+ return ret;
+
+ if (caldata) {
+ ret = bno055_calibration_load(priv, caldata, len);
+ if (ret)
+ dev_warn(priv->dev, "failed to load calibration data with error %d\n",
+ ret);
+ }
+
+ return 0;
+}
+
+static ssize_t bno055_operation_mode_set(struct bno055_priv *priv,
+ int operation_mode)
+{
+ u8 caldata[BNO055_CALDATA_LEN];
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ goto exit_unlock;
+
+ if (operation_mode == BNO055_OPR_MODE_FUSION ||
+ operation_mode == BNO055_OPR_MODE_FUSION_FMC_OFF) {
+ /* for entering fusion mode, reset the chip to clear the algo state */
+ ret = regmap_bulk_read(priv->regmap, BNO055_CALDATA_START, caldata,
+ BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_system_reset(priv);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_init(priv, caldata, BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+ }
+
+ ret = bno055_operation_mode_do_set(priv, operation_mode);
+ if (ret)
+ goto exit_unlock;
+
+ priv->operation_mode = operation_mode;
+
+exit_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void bno055_uninit(void *arg)
+{
+ struct bno055_priv *priv = arg;
+
+ /* stop the IMU */
+ bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+}
+
+#define BNO055_CHANNEL(_type, _axis, _index, _address, _sep, _sh, _avail) { \
+ .address = _address, \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | (_sep), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | (_sh), \
+ .info_mask_shared_by_type_available = _avail, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ .repeat = IIO_MOD_##_axis == IIO_MOD_QUATERNION ? 4 : 0, \
+ }, \
+}
+
+/* scan indexes follow DATA register order */
+enum bno055_scan_axis {
+ BNO055_SCAN_ACCEL_X,
+ BNO055_SCAN_ACCEL_Y,
+ BNO055_SCAN_ACCEL_Z,
+ BNO055_SCAN_MAGN_X,
+ BNO055_SCAN_MAGN_Y,
+ BNO055_SCAN_MAGN_Z,
+ BNO055_SCAN_GYRO_X,
+ BNO055_SCAN_GYRO_Y,
+ BNO055_SCAN_GYRO_Z,
+ BNO055_SCAN_YAW,
+ BNO055_SCAN_ROLL,
+ BNO055_SCAN_PITCH,
+ BNO055_SCAN_QUATERNION,
+ BNO055_SCAN_LIA_X,
+ BNO055_SCAN_LIA_Y,
+ BNO055_SCAN_LIA_Z,
+ BNO055_SCAN_GRAVITY_X,
+ BNO055_SCAN_GRAVITY_Y,
+ BNO055_SCAN_GRAVITY_Z,
+ BNO055_SCAN_TIMESTAMP,
+ _BNO055_SCAN_MAX
+};
+
+static const struct iio_chan_spec bno055_channels[] = {
+ /* accelerometer */
+ BNO055_CHANNEL(IIO_ACCEL, X, BNO055_SCAN_ACCEL_X,
+ BNO055_ACC_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ BNO055_CHANNEL(IIO_ACCEL, Y, BNO055_SCAN_ACCEL_Y,
+ BNO055_ACC_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ BNO055_CHANNEL(IIO_ACCEL, Z, BNO055_SCAN_ACCEL_Z,
+ BNO055_ACC_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)),
+ /* gyroscope */
+ BNO055_CHANNEL(IIO_ANGL_VEL, X, BNO055_SCAN_GYRO_X,
+ BNO055_GYR_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ BNO055_CHANNEL(IIO_ANGL_VEL, Y, BNO055_SCAN_GYRO_Y,
+ BNO055_GYR_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ BNO055_CHANNEL(IIO_ANGL_VEL, Z, BNO055_SCAN_GYRO_Z,
+ BNO055_GYR_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_SCALE)),
+ /* magnetometer */
+ BNO055_CHANNEL(IIO_MAGN, X, BNO055_SCAN_MAGN_X,
+ BNO055_MAG_DATA_X_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ BNO055_CHANNEL(IIO_MAGN, Y, BNO055_SCAN_MAGN_Y,
+ BNO055_MAG_DATA_Y_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ BNO055_CHANNEL(IIO_MAGN, Z, BNO055_SCAN_MAGN_Z,
+ BNO055_MAG_DATA_Z_LSB_REG, BIT(IIO_CHAN_INFO_OFFSET),
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), BIT(IIO_CHAN_INFO_SAMP_FREQ)),
+ /* euler angle */
+ BNO055_CHANNEL(IIO_ROT, YAW, BNO055_SCAN_YAW,
+ BNO055_EUL_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ROT, ROLL, BNO055_SCAN_ROLL,
+ BNO055_EUL_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ROT, PITCH, BNO055_SCAN_PITCH,
+ BNO055_EUL_DATA_Z_LSB_REG, 0, 0, 0),
+ /* quaternion */
+ BNO055_CHANNEL(IIO_ROT, QUATERNION, BNO055_SCAN_QUATERNION,
+ BNO055_QUAT_DATA_W_LSB_REG, 0, 0, 0),
+
+ /* linear acceleration */
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_X, BNO055_SCAN_LIA_X,
+ BNO055_LIA_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_Y, BNO055_SCAN_LIA_Y,
+ BNO055_LIA_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_ACCEL, LINEAR_Z, BNO055_SCAN_LIA_Z,
+ BNO055_LIA_DATA_Z_LSB_REG, 0, 0, 0),
+
+ /* gravity vector */
+ BNO055_CHANNEL(IIO_GRAVITY, X, BNO055_SCAN_GRAVITY_X,
+ BNO055_GRAVITY_DATA_X_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_GRAVITY, Y, BNO055_SCAN_GRAVITY_Y,
+ BNO055_GRAVITY_DATA_Y_LSB_REG, 0, 0, 0),
+ BNO055_CHANNEL(IIO_GRAVITY, Z, BNO055_SCAN_GRAVITY_Z,
+ BNO055_GRAVITY_DATA_Z_LSB_REG, 0, 0, 0),
+
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = -1,
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(BNO055_SCAN_TIMESTAMP),
+};
+
+static int bno055_get_regmask(struct bno055_priv *priv, int *val, int *val2,
+ int reg, int mask, struct bno055_sysfs_attr *attr)
+{
+ const int shift = __ffs(mask);
+ int hwval, idx;
+ int ret;
+ int i;
+
+ ret = regmap_read(priv->regmap, reg, &hwval);
+ if (ret)
+ return ret;
+
+ idx = (hwval & mask) >> shift;
+ if (attr->hw_xlate)
+ for (i = 0; i < attr->len; i++)
+ if (attr->hw_xlate[i] == idx) {
+ idx = i;
+ break;
+ }
+ if (attr->type == IIO_VAL_INT) {
+ *val = attr->vals[idx];
+ } else { /* IIO_VAL_INT_PLUS_MICRO or IIO_VAL_FRACTIONAL */
+ *val = attr->vals[idx * 2];
+ *val2 = attr->vals[idx * 2 + 1];
+ }
+
+ return attr->type;
+}
+
+static int bno055_set_regmask(struct bno055_priv *priv, int val, int val2,
+ int reg, int mask, struct bno055_sysfs_attr *attr)
+{
+ const int shift = __ffs(mask);
+ int best_delta;
+ int req_val;
+ int tbl_val;
+ bool first;
+ int delta;
+ int hwval;
+ int ret;
+ int len;
+ int i;
+
+ /*
+ * The closest value the HW supports is only one in fusion mode,
+ * and it is autoselected, so don't do anything, just return OK,
+ * as the closest possible value has been (virtually) selected
+ */
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return 0;
+
+ len = attr->len;
+
+ /*
+ * We always get a request in INT_PLUS_MICRO, but we
+ * take care of the micro part only when we really have
+ * non-integer tables. This prevents 32-bit overflow with
+ * larger integers contained in integer tables.
+ */
+ req_val = val;
+ if (attr->type != IIO_VAL_INT) {
+ len /= 2;
+ req_val = min(val, 2147) * 1000000 + val2;
+ }
+
+ first = true;
+ for (i = 0; i < len; i++) {
+ switch (attr->type) {
+ case IIO_VAL_INT:
+ tbl_val = attr->vals[i];
+ break;
+ case IIO_VAL_INT_PLUS_MICRO:
+ WARN_ON(attr->vals[i * 2] > 2147);
+ tbl_val = attr->vals[i * 2] * 1000000 +
+ attr->vals[i * 2 + 1];
+ break;
+ case IIO_VAL_FRACTIONAL:
+ WARN_ON(attr->vals[i * 2] > 4294);
+ tbl_val = attr->vals[i * 2] * 1000000 /
+ attr->vals[i * 2 + 1];
+ break;
+ default:
+ return -EINVAL;
+ }
+ delta = abs(tbl_val - req_val);
+ if (delta < best_delta || first) {
+ best_delta = delta;
+ hwval = i;
+ first = false;
+ }
+ }
+
+ if (attr->hw_xlate)
+ hwval = attr->hw_xlate[hwval];
+
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, hwval << shift);
+ if (ret)
+ return ret;
+
+ return bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_AMG);
+}
+
+static int bno055_read_simple_chan(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ __le16 raw_val;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_bulk_read(priv->regmap, chan->address,
+ &raw_val, sizeof(raw_val));
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ *val = 0;
+ } else {
+ ret = regmap_bulk_read(priv->regmap,
+ chan->address +
+ BNO055_REG_OFFSET_ADDR,
+ &raw_val, sizeof(raw_val));
+ if (ret < 0)
+ return ret;
+ /*
+ * IMU reports sensor offsets; IIO wants correction
+ * offsets, thus we need the 'minus' here.
+ */
+ *val = -sign_extend32(le16_to_cpu(raw_val), 15);
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ switch (chan->type) {
+ case IIO_GRAVITY:
+ /* Table 3-35: 1 m/s^2 = 100 LSB */
+ case IIO_ACCEL:
+ /* Table 3-17: 1 m/s^2 = 100 LSB */
+ *val2 = 100;
+ break;
+ case IIO_MAGN:
+ /*
+ * Table 3-19: 1 uT = 16 LSB. But we need
+ * Gauss: 1G = 0.1 uT.
+ */
+ *val2 = 160;
+ break;
+ case IIO_ANGL_VEL:
+ /*
+ * Table 3-22: 1 Rps = 900 LSB
+ * .. but this is not exactly true. See comment at the
+ * beginning of this file.
+ */
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ *val = bno055_gyr_scale.fusion_vals[0];
+ *val2 = bno055_gyr_scale.fusion_vals[1];
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_RANGE_MASK,
+ &bno055_gyr_scale);
+ break;
+ case IIO_ROT:
+ /* Table 3-28: 1 degree = 16 LSB */
+ *val2 = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_FRACTIONAL;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (chan->type != IIO_MAGN)
+ return -EINVAL;
+
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_MAG_CONFIG_REG,
+ BNO055_MAG_CONFIG_ODR_MASK,
+ &bno055_mag_odr);
+
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_LPF_MASK,
+ &bno055_gyr_lpf);
+ case IIO_ACCEL:
+ return bno055_get_regmask(priv, val, val2,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_LPF_MASK,
+ &bno055_acc_lpf);
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_sysfs_attr_avail(struct bno055_priv *priv, struct bno055_sysfs_attr *attr,
+ const int **vals, int *length)
+{
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG) {
+ /* locked when fusion enabled */
+ *vals = attr->fusion_vals;
+ if (attr->type == IIO_VAL_INT)
+ *length = 1;
+ else
+ *length = 2; /* IIO_VAL_INT_PLUS_MICRO or IIO_VAL_FRACTIONAL*/
+ } else {
+ *vals = attr->vals;
+ *length = attr->len;
+ }
+
+ return attr->type;
+}
+
+static int bno055_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_gyr_scale,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_gyr_lpf,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ case IIO_ACCEL:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_acc_lpf,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_MAGN:
+ *type = bno055_sysfs_attr_avail(priv, &bno055_mag_odr,
+ vals, length);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_read_temp_chan(struct iio_dev *indio_dev, int *val)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ unsigned int raw_val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, BNO055_TEMP_REG, &raw_val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Tables 3-36 and 3-37: one byte of priv, signed, 1 LSB = 1C.
+ * ABI wants milliC.
+ */
+ *val = raw_val * 1000;
+
+ return IIO_VAL_INT;
+}
+
+static int bno055_read_quaternion(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ __le16 raw_vals[4];
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (size < 4)
+ return -EINVAL;
+ ret = regmap_bulk_read(priv->regmap,
+ BNO055_QUAT_DATA_W_LSB_REG,
+ raw_vals, sizeof(raw_vals));
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < 4; i++)
+ vals[i] = sign_extend32(le16_to_cpu(raw_vals[i]), 15);
+ *val_len = 4;
+ return IIO_VAL_INT_MULTIPLE;
+ case IIO_CHAN_INFO_SCALE:
+ /* Table 3-31: 1 quaternion = 2^14 LSB */
+ if (size < 2)
+ return -EINVAL;
+ vals[0] = 1;
+ vals[1] = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool bno055_is_chan_readable(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return true;
+
+ switch (chan->type) {
+ case IIO_GRAVITY:
+ case IIO_ROT:
+ return false;
+ case IIO_ACCEL:
+ if (chan->channel2 == IIO_MOD_LINEAR_X ||
+ chan->channel2 == IIO_MOD_LINEAR_Y ||
+ chan->channel2 == IIO_MOD_LINEAR_Z)
+ return false;
+ return true;
+ default:
+ return true;
+ }
+}
+
+static int _bno055_read_raw_multi(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ if (!bno055_is_chan_readable(indio_dev, chan))
+ return -EBUSY;
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ case IIO_ACCEL:
+ case IIO_ANGL_VEL:
+ case IIO_GRAVITY:
+ if (size < 2)
+ return -EINVAL;
+ *val_len = 2;
+ return bno055_read_simple_chan(indio_dev, chan,
+ &vals[0], &vals[1],
+ mask);
+ case IIO_TEMP:
+ *val_len = 1;
+ return bno055_read_temp_chan(indio_dev, &vals[0]);
+ case IIO_ROT:
+ /*
+ * Rotation is exposed as either a quaternion or three
+ * Euler angles.
+ */
+ if (chan->channel2 == IIO_MOD_QUATERNION)
+ return bno055_read_quaternion(indio_dev, chan,
+ size, vals,
+ val_len, mask);
+ if (size < 2)
+ return -EINVAL;
+ *val_len = 2;
+ return bno055_read_simple_chan(indio_dev, chan,
+ &vals[0], &vals[1],
+ mask);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_read_raw_multi(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int size, int *vals, int *val_len,
+ long mask)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = _bno055_read_raw_multi(indio_dev, chan, size,
+ vals, val_len, mask);
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int _bno055_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ switch (chan->type) {
+ case IIO_MAGN:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_MAG_CONFIG_REG,
+ BNO055_MAG_CONFIG_ODR_MASK,
+ &bno055_mag_odr);
+ default:
+ return -EINVAL;
+ }
+ case IIO_ACCEL:
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_LPF_MASK,
+ &bno055_acc_lpf);
+
+ default:
+ return -EINVAL;
+ }
+ case IIO_ANGL_VEL:
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_LPF_MASK,
+ &bno055_gyr_lpf);
+ case IIO_CHAN_INFO_SCALE:
+ return bno055_set_regmask(priv, val, val2,
+ BNO055_GYR_CONFIG_REG,
+ BNO055_GYR_CONFIG_RANGE_MASK,
+ &bno055_gyr_scale);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bno055_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = _bno055_write_raw(iio_dev, chan, val, val2, mask);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static ssize_t in_accel_range_raw_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int len = 0;
+ int i;
+
+ if (priv->operation_mode != BNO055_OPR_MODE_AMG)
+ return sysfs_emit(buf, "%d\n", bno055_acc_range.fusion_vals[0]);
+
+ for (i = 0; i < bno055_acc_range.len; i++)
+ len += sysfs_emit_at(buf, len, "%d ", bno055_acc_range.vals[i]);
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t fusion_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%d\n",
+ priv->operation_mode != BNO055_OPR_MODE_AMG);
+}
+
+static ssize_t fusion_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ bool en;
+ int ret;
+
+ if (indio_dev->active_scan_mask &&
+ !bitmap_empty(indio_dev->active_scan_mask, _BNO055_SCAN_MAX))
+ return -EBUSY;
+
+ ret = kstrtobool(buf, &en);
+ if (ret)
+ return -EINVAL;
+
+ if (!en)
+ return bno055_operation_mode_set(priv, BNO055_OPR_MODE_AMG) ?: len;
+
+ /*
+ * Coming from AMG means the FMC was off, just switch to fusion but
+ * don't change anything that doesn't belong to us (i.e let FMC stay off).
+ * Coming from any other fusion mode means we don't need to do anything.
+ */
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG)
+ return bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION_FMC_OFF) ?: len;
+
+ return len;
+}
+
+static ssize_t in_magn_calibration_fast_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%d\n",
+ priv->operation_mode == BNO055_OPR_MODE_FUSION);
+}
+
+static ssize_t in_magn_calibration_fast_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ if (indio_dev->active_scan_mask &&
+ !bitmap_empty(indio_dev->active_scan_mask, _BNO055_SCAN_MAX))
+ return -EBUSY;
+
+ if (sysfs_streq(buf, "0")) {
+ if (priv->operation_mode == BNO055_OPR_MODE_FUSION) {
+ ret = bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION_FMC_OFF);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG)
+ return -EINVAL;
+
+ if (priv->operation_mode != BNO055_OPR_MODE_FUSION) {
+ ret = bno055_operation_mode_set(priv, BNO055_OPR_MODE_FUSION);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t in_accel_range_raw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int val;
+ int ret;
+
+ ret = bno055_get_regmask(priv, &val, NULL,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_RANGE_MASK,
+ &bno055_acc_range);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t in_accel_range_raw_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->lock);
+ ret = bno055_set_regmask(priv, val, 0,
+ BNO055_ACC_CONFIG_REG,
+ BNO055_ACC_CONFIG_RANGE_MASK,
+ &bno055_acc_range);
+ mutex_unlock(&priv->lock);
+
+ return ret ?: len;
+}
+
+static ssize_t bno055_get_calib_status(struct device *dev, char *buf, int which)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+ int calib;
+ int ret;
+ int val;
+
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG ||
+ (priv->operation_mode == BNO055_OPR_MODE_FUSION_FMC_OFF &&
+ which == BNO055_CALIB_STAT_MAGN_SHIFT)) {
+ calib = 0;
+ } else {
+ mutex_lock(&priv->lock);
+ ret = regmap_read(priv->regmap, BNO055_CALIB_STAT_REG, &val);
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return -EIO;
+
+ calib = ((val >> which) & GENMASK(1, 0)) + 1;
+ }
+
+ return sysfs_emit(buf, "%d\n", calib);
+}
+
+static ssize_t serialnumber_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%*ph\n", BNO055_UID_LEN, priv->uid);
+}
+
+static ssize_t calibration_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct bno055_priv *priv = iio_priv(dev_to_iio_dev(kobj_to_dev(kobj)));
+ u8 data[BNO055_CALDATA_LEN];
+ int ret;
+
+ /*
+ * Calibration data is volatile; reading it in chunks will possibly
+ * results in inconsistent data. We require the user to read the whole
+ * blob in a single chunk
+ */
+ if (count < BNO055_CALDATA_LEN || pos)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ ret = bno055_operation_mode_do_set(priv, BNO055_OPR_MODE_CONFIG);
+ if (ret)
+ goto exit_unlock;
+
+ ret = regmap_bulk_read(priv->regmap, BNO055_CALDATA_START, data,
+ BNO055_CALDATA_LEN);
+ if (ret)
+ goto exit_unlock;
+
+ ret = bno055_operation_mode_do_set(priv, priv->operation_mode);
+ if (ret)
+ goto exit_unlock;
+
+ memcpy(buf, data, BNO055_CALDATA_LEN);
+
+ ret = BNO055_CALDATA_LEN;
+exit_unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static ssize_t sys_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_SYS_SHIFT);
+}
+
+static ssize_t in_accel_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_ACCEL_SHIFT);
+}
+
+static ssize_t in_gyro_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_GYRO_SHIFT);
+}
+
+static ssize_t in_magn_calibration_auto_status_show(struct device *dev,
+ struct device_attribute *a,
+ char *buf)
+{
+ return bno055_get_calib_status(dev, buf, BNO055_CALIB_STAT_MAGN_SHIFT);
+}
+
+static int bno055_debugfs_reg_access(struct iio_dev *iio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ if (readval)
+ return regmap_read(priv->regmap, reg, readval);
+ else
+ return regmap_write(priv->regmap, reg, writeval);
+}
+
+static ssize_t bno055_show_fw_version(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct bno055_priv *priv = file->private_data;
+ int rev, ver;
+ char *buf;
+ int ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_LSB_REG, &rev);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_MSB_REG, &ver);
+ if (ret)
+ return ret;
+
+ buf = kasprintf(GFP_KERNEL, "ver: 0x%x, rev: 0x%x\n", ver, rev);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations bno055_fw_version_ops = {
+ .open = simple_open,
+ .read = bno055_show_fw_version,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static void bno055_debugfs_remove(void *_priv)
+{
+ struct bno055_priv *priv = _priv;
+
+ debugfs_remove(priv->debugfs);
+ priv->debugfs = NULL;
+}
+
+static void bno055_debugfs_init(struct iio_dev *iio_dev)
+{
+ struct bno055_priv *priv = iio_priv(iio_dev);
+
+ priv->debugfs = debugfs_create_file("firmware_version", 0400,
+ iio_get_debugfs_dentry(iio_dev),
+ priv, &bno055_fw_version_ops);
+ if (!IS_ERR(priv->debugfs))
+ devm_add_action_or_reset(priv->dev, bno055_debugfs_remove,
+ priv);
+ if (IS_ERR_OR_NULL(priv->debugfs))
+ dev_warn(priv->dev, "failed to setup debugfs");
+}
+
+static IIO_DEVICE_ATTR_RW(fusion_enable, 0);
+static IIO_DEVICE_ATTR_RW(in_magn_calibration_fast_enable, 0);
+static IIO_DEVICE_ATTR_RW(in_accel_range_raw, 0);
+
+static IIO_DEVICE_ATTR_RO(in_accel_range_raw_available, 0);
+static IIO_DEVICE_ATTR_RO(sys_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_accel_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_gyro_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(in_magn_calibration_auto_status, 0);
+static IIO_DEVICE_ATTR_RO(serialnumber, 0);
+
+static struct attribute *bno055_attrs[] = {
+ &iio_dev_attr_in_accel_range_raw_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_range_raw.dev_attr.attr,
+ &iio_dev_attr_fusion_enable.dev_attr.attr,
+ &iio_dev_attr_in_magn_calibration_fast_enable.dev_attr.attr,
+ &iio_dev_attr_sys_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_accel_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_gyro_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_in_magn_calibration_auto_status.dev_attr.attr,
+ &iio_dev_attr_serialnumber.dev_attr.attr,
+ NULL
+};
+
+static BIN_ATTR_RO(calibration_data, BNO055_CALDATA_LEN);
+
+static struct bin_attribute *bno055_bin_attrs[] = {
+ &bin_attr_calibration_data,
+ NULL
+};
+
+static const struct attribute_group bno055_attrs_group = {
+ .attrs = bno055_attrs,
+ .bin_attrs = bno055_bin_attrs,
+};
+
+static const struct iio_info bno055_info = {
+ .read_raw_multi = bno055_read_raw_multi,
+ .read_avail = bno055_read_avail,
+ .write_raw = bno055_write_raw,
+ .attrs = &bno055_attrs_group,
+ .debugfs_reg_access = bno055_debugfs_reg_access,
+};
+
+/*
+ * Reads len samples from the HW, stores them in buf starting from buf_idx,
+ * and applies mask to cull (skip) unneeded samples.
+ * Updates buf_idx incrementing with the number of stored samples.
+ * Samples from HW are transferred into buf, then in-place copy on buf is
+ * performed in order to cull samples that need to be skipped.
+ * This avoids copies of the first samples until we hit the 1st sample to skip,
+ * and also avoids having an extra bounce buffer.
+ * buf must be able to contain len elements in spite of how many samples we are
+ * going to cull.
+ */
+static int bno055_scan_xfer(struct bno055_priv *priv,
+ int start_ch, int len, unsigned long mask,
+ __le16 *buf, int *buf_idx)
+{
+ const int base = BNO055_ACC_DATA_X_LSB_REG;
+ bool quat_in_read = false;
+ int buf_base = *buf_idx;
+ __le16 *dst, *src;
+ int offs_fixup = 0;
+ int xfer_len = len;
+ int ret;
+ int i, n;
+
+ if (!mask)
+ return 0;
+
+ /*
+ * All channels are made up 1 16-bit sample, except for quaternion that
+ * is made up 4 16-bit values.
+ * For us the quaternion CH is just like 4 regular CHs.
+ * If our read starts past the quaternion make sure to adjust the
+ * starting offset; if the quaternion is contained in our scan then make
+ * sure to adjust the read len.
+ */
+ if (start_ch > BNO055_SCAN_QUATERNION) {
+ start_ch += 3;
+ } else if ((start_ch <= BNO055_SCAN_QUATERNION) &&
+ ((start_ch + len) > BNO055_SCAN_QUATERNION)) {
+ quat_in_read = true;
+ xfer_len += 3;
+ }
+
+ ret = regmap_bulk_read(priv->regmap,
+ base + start_ch * sizeof(__le16),
+ buf + buf_base,
+ xfer_len * sizeof(__le16));
+ if (ret)
+ return ret;
+
+ for_each_set_bit(i, &mask, len) {
+ if (quat_in_read && ((start_ch + i) > BNO055_SCAN_QUATERNION))
+ offs_fixup = 3;
+
+ dst = buf + *buf_idx;
+ src = buf + buf_base + offs_fixup + i;
+
+ n = (start_ch + i == BNO055_SCAN_QUATERNION) ? 4 : 1;
+
+ if (dst != src)
+ memcpy(dst, src, n * sizeof(__le16));
+
+ *buf_idx += n;
+ }
+ return 0;
+}
+
+static irqreturn_t bno055_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio_dev = pf->indio_dev;
+ struct bno055_priv *priv = iio_priv(iio_dev);
+ int xfer_start, start, end, prev_end;
+ unsigned long mask;
+ int quat_extra_len;
+ bool first = true;
+ int buf_idx = 0;
+ bool thr_hit;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Walk the bitmap and eventually perform several transfers.
+ * Bitmap ones-fields that are separated by gaps <= xfer_burst_break_thr
+ * will be included in same transfer.
+ * Every time the bitmap contains a gap wider than xfer_burst_break_thr
+ * then we split the transfer, skipping the gap.
+ */
+ for_each_set_bitrange(start, end, iio_dev->active_scan_mask,
+ iio_dev->masklength) {
+ /*
+ * First transfer will start from the beginning of the first
+ * ones-field in the bitmap
+ */
+ if (first) {
+ xfer_start = start;
+ } else {
+ /*
+ * We found the next ones-field; check whether to
+ * include it in * the current transfer or not (i.e.
+ * let's perform the current * transfer and prepare for
+ * another one).
+ */
+
+ /*
+ * In case the zeros-gap contains the quaternion bit,
+ * then its length is actually 4 words instead of 1
+ * (i.e. +3 wrt other channels).
+ */
+ quat_extra_len = ((start > BNO055_SCAN_QUATERNION) &&
+ (prev_end <= BNO055_SCAN_QUATERNION)) ? 3 : 0;
+
+ /* If the gap is wider than xfer_burst_break_thr then.. */
+ thr_hit = (start - prev_end + quat_extra_len) >
+ priv->xfer_burst_break_thr;
+
+ /*
+ * .. transfer all the data up to the gap. Then set the
+ * next transfer start index at right after the gap
+ * (i.e. at the start of this ones-field).
+ */
+ if (thr_hit) {
+ mask = *iio_dev->active_scan_mask >> xfer_start;
+ ret = bno055_scan_xfer(priv, xfer_start,
+ prev_end - xfer_start,
+ mask, priv->buf.chans, &buf_idx);
+ if (ret)
+ goto done;
+ xfer_start = start;
+ }
+ }
+ first = false;
+ prev_end = end;
+ }
+
+ /*
+ * We finished walking the bitmap; no more gaps to check for. Just
+ * perform the current transfer.
+ */
+ mask = *iio_dev->active_scan_mask >> xfer_start;
+ ret = bno055_scan_xfer(priv, xfer_start,
+ prev_end - xfer_start,
+ mask, priv->buf.chans, &buf_idx);
+
+ if (!ret)
+ iio_push_to_buffers_with_timestamp(iio_dev,
+ &priv->buf, pf->timestamp);
+done:
+ mutex_unlock(&priv->lock);
+ iio_trigger_notify_done(iio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int bno055_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct bno055_priv *priv = iio_priv(indio_dev);
+ const unsigned long fusion_mask =
+ BIT(BNO055_SCAN_YAW) |
+ BIT(BNO055_SCAN_ROLL) |
+ BIT(BNO055_SCAN_PITCH) |
+ BIT(BNO055_SCAN_QUATERNION) |
+ BIT(BNO055_SCAN_LIA_X) |
+ BIT(BNO055_SCAN_LIA_Y) |
+ BIT(BNO055_SCAN_LIA_Z) |
+ BIT(BNO055_SCAN_GRAVITY_X) |
+ BIT(BNO055_SCAN_GRAVITY_Y) |
+ BIT(BNO055_SCAN_GRAVITY_Z);
+
+ if (priv->operation_mode == BNO055_OPR_MODE_AMG &&
+ bitmap_intersects(indio_dev->active_scan_mask, &fusion_mask,
+ _BNO055_SCAN_MAX))
+ return -EBUSY;
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops bno055_buffer_setup_ops = {
+ .preenable = bno055_buffer_preenable,
+};
+
+int bno055_probe(struct device *dev, struct regmap *regmap,
+ int xfer_burst_break_thr, bool sw_reset)
+{
+ const struct firmware *caldata = NULL;
+ struct bno055_priv *priv;
+ struct iio_dev *iio_dev;
+ char *fw_name_buf;
+ unsigned int val;
+ int rev, ver;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ iio_dev->name = "bno055";
+ priv = iio_priv(iio_dev);
+ mutex_init(&priv->lock);
+ priv->regmap = regmap;
+ priv->dev = dev;
+ priv->xfer_burst_break_thr = xfer_burst_break_thr;
+ priv->sw_reset = sw_reset;
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n");
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "clk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get CLK\n");
+
+ if (priv->reset_gpio) {
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(650000, 750000);
+ } else if (!sw_reset) {
+ dev_warn(dev, "No usable reset method; IMU may be unreliable\n");
+ }
+
+ ret = regmap_read(priv->regmap, BNO055_CHIP_ID_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val != BNO055_CHIP_ID_MAGIC)
+ dev_warn(dev, "Unrecognized chip ID 0x%x\n", val);
+
+ /*
+ * In case we haven't a HW reset pin, we can still reset the chip via
+ * register write. This is probably nonsense in case we can't even
+ * communicate with the chip or the chip isn't the one we expect (i.e.
+ * we don't write to unknown chips), so we perform SW reset only after
+ * chip magic ID check
+ */
+ if (!priv->reset_gpio) {
+ ret = bno055_system_reset(priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_LSB_REG, &rev);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, BNO055_SW_REV_MSB_REG, &ver);
+ if (ret)
+ return ret;
+
+ /*
+ * The stock FW version contains a bug (see comment at the beginning of
+ * this file) that causes the anglvel scale to be changed depending on
+ * the chip range setting. We workaround this, but we don't know what
+ * other FW versions might do.
+ */
+ if (ver != 0x3 || rev != 0x11)
+ dev_warn(dev, "Untested firmware version. Anglvel scale may not work as expected\n");
+
+ ret = regmap_bulk_read(priv->regmap, BNO055_UID_LOWER_REG,
+ priv->uid, BNO055_UID_LEN);
+ if (ret)
+ return ret;
+
+ /* Sensor calibration data */
+ fw_name_buf = kasprintf(GFP_KERNEL, BNO055_FW_UID_FMT,
+ BNO055_UID_LEN, priv->uid);
+ if (!fw_name_buf)
+ return -ENOMEM;
+
+ ret = request_firmware(&caldata, fw_name_buf, dev);
+ kfree(fw_name_buf);
+ if (ret)
+ ret = request_firmware(&caldata, BNO055_FW_GENERIC_NAME, dev);
+ if (ret) {
+ dev_notice(dev, "Calibration file load failed. See instruction in kernel Documentation/iio/bno055.rst\n");
+ ret = bno055_init(priv, NULL, 0);
+ } else {
+ ret = bno055_init(priv, caldata->data, caldata->size);
+ release_firmware(caldata);
+ }
+ if (ret)
+ return ret;
+
+ priv->operation_mode = BNO055_OPR_MODE_FUSION;
+ ret = bno055_operation_mode_do_set(priv, priv->operation_mode);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, bno055_uninit, priv);
+ if (ret)
+ return ret;
+
+ iio_dev->channels = bno055_channels;
+ iio_dev->num_channels = ARRAY_SIZE(bno055_channels);
+ iio_dev->info = &bno055_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, iio_dev,
+ iio_pollfunc_store_time,
+ bno055_trigger_handler,
+ &bno055_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, iio_dev);
+ if (ret)
+ return ret;
+
+ bno055_debugfs_init(iio_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(bno055_probe, IIO_BNO055);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@iit.it>");
+MODULE_DESCRIPTION("Bosch BNO055 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055.h b/drivers/iio/imu/bno055/bno055.h
new file mode 100644
index 000000000000..64f9fc95cebc
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __BNO055_H__
+#define __BNO055_H__
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct device;
+int bno055_probe(struct device *dev, struct regmap *regmap,
+ int xfer_burst_break_thr, bool sw_reset);
+extern const struct regmap_config bno055_regmap_config;
+
+#endif
diff --git a/drivers/iio/imu/bno055/bno055_i2c.c b/drivers/iio/imu/bno055/bno055_i2c.c
new file mode 100644
index 000000000000..c1bbc0fe34f9
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_i2c.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for I2C-interfaced Bosch BNO055 IMU.
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bno055.h"
+
+#define BNO055_I2C_XFER_BURST_BREAK_THRESHOLD 3
+
+static int bno055_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &bno055_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "Unable to init register map");
+
+ return bno055_probe(&client->dev, regmap,
+ BNO055_I2C_XFER_BURST_BREAK_THRESHOLD, true);
+}
+
+static const struct i2c_device_id bno055_i2c_id[] = {
+ {"bno055", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bno055_i2c_id);
+
+static const struct of_device_id __maybe_unused bno055_i2c_of_match[] = {
+ { .compatible = "bosch,bno055" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bno055_i2c_of_match);
+
+static struct i2c_driver bno055_driver = {
+ .driver = {
+ .name = "bno055-i2c",
+ .of_match_table = bno055_i2c_of_match,
+ },
+ .probe_new = bno055_i2c_probe,
+ .id_table = bno055_i2c_id,
+};
+module_i2c_driver(bno055_driver);
+
+MODULE_AUTHOR("Andrea Merello");
+MODULE_DESCRIPTION("Bosch BNO055 I2C interface");
+MODULE_IMPORT_NS(IIO_BNO055);
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
new file mode 100644
index 000000000000..57728a568471
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Serial line interface for Bosh BNO055 IMU (via serdev).
+ * This file implements serial communication up to the register read/write
+ * level.
+ *
+ * Copyright (C) 2021-2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ * Written by Andrea Merello <andrea.merello@iit.it>
+ *
+ * This driver is based on
+ * Plantower PMS7003 particulate matter sensor driver
+ * Which is
+ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/serdev.h>
+
+#include "bno055_ser_trace.h"
+#include "bno055.h"
+
+/*
+ * Register writes cmd have the following format
+ * +------+------+-----+-----+----- ... ----+
+ * | 0xAA | 0xOO | REG | LEN | payload[LEN] |
+ * +------+------+-----+-----+----- ... ----+
+ *
+ * Register write responses have the following format
+ * +------+----------+
+ * | 0xEE | ERROCODE |
+ * +------+----------+
+ *
+ * .. except when writing the SYS_RST bit (i.e. triggering a system reset); in
+ * case the IMU accepts the command, then it resets without responding. We don't
+ * handle this (yet) here (so we inform the common bno055 code not to perform
+ * sw resets - bno055 on serial bus basically requires the hw reset pin).
+ *
+ * Register read have the following format
+ * +------+------+-----+-----+
+ * | 0xAA | 0xO1 | REG | LEN |
+ * +------+------+-----+-----+
+ *
+ * Successful register read response have the following format
+ * +------+-----+----- ... ----+
+ * | 0xBB | LEN | payload[LEN] |
+ * +------+-----+----- ... ----+
+ *
+ * Failed register read response have the following format
+ * +------+--------+
+ * | 0xEE | ERRCODE| (ERRCODE always > 1)
+ * +------+--------+
+ *
+ * Error codes are
+ * 01: OK
+ * 02: read/write FAIL
+ * 04: invalid address
+ * 05: write on RO
+ * 06: wrong start byte
+ * 07: bus overrun
+ * 08: len too high
+ * 09: len too low
+ * 10: bus RX byte timeout (timeout is 30mS)
+ *
+ *
+ * **WORKAROUND ALERT**
+ *
+ * Serial communication seems very fragile: the BNO055 buffer seems to overflow
+ * very easy; BNO055 seems able to sink few bytes, then it needs a brief pause.
+ * On the other hand, it is also picky on timeout: if there is a pause > 30mS in
+ * between two bytes then the transaction fails (IMU internal RX FSM resets).
+ *
+ * BNO055 has been seen also failing to process commands in case we send them
+ * too close each other (or if it is somehow busy?)
+ *
+ * In particular I saw these scenarios:
+ * 1) If we send 2 bytes per time, then the IMU never(?) overflows.
+ * 2) If we send 4 bytes per time (i.e. the full header), then the IMU could
+ * overflow, but it seem to sink all 4 bytes, then it returns error.
+ * 3) If we send more than 4 bytes, the IMU could overflow, and I saw it sending
+ * error after 4 bytes are sent; we have troubles in synchronizing again,
+ * because we are still sending data, and the IMU interprets it as the 1st
+ * byte of a new command.
+ *
+ * While we must avoid case 3, we could send 4 bytes per time and eventually
+ * retry in case of failure; this seemed convenient for reads (which requires
+ * TXing exactly 4 bytes), however it has been seen that, depending by the IMU
+ * settings (e.g. LPF), failures became less or more frequent; in certain IMU
+ * configurations they are very rare, but in certain others we keeps failing
+ * even after like 30 retries.
+ *
+ * So, we just split TXes in [2-bytes + delay] steps, and still keep an eye on
+ * the IMU response; in case it overflows (which is now unlikely), we retry.
+ */
+
+/*
+ * Read operation overhead:
+ * 4 bytes req + 2byte resp hdr.
+ * 6 bytes = 60 bit (considering 1start + 1stop bits).
+ * 60/115200 = ~520uS + about 2500mS delay -> ~3mS
+ * In 3mS we could read back about 34 bytes that means 17 samples, this means
+ * that in case of scattered reads in which the gap is 17 samples or less it is
+ * still convenient to go for a burst.
+ * We have to take into account also IMU response time - IMU seems to be often
+ * reasonably quick to respond, but sometimes it seems to be in some "critical
+ * section" in which it delays handling of serial protocol. Because of this we
+ * round-up to 22, which is the max number of samples, always bursting indeed.
+ */
+#define BNO055_SER_XFER_BURST_BREAK_THRESHOLD 22
+
+struct bno055_ser_priv {
+ enum {
+ CMD_NONE,
+ CMD_READ,
+ CMD_WRITE,
+ } expect_response;
+ int expected_data_len;
+ u8 *response_buf;
+
+ /**
+ * enum cmd_status - represent the status of a command sent to the HW.
+ * @STATUS_CRIT: The command failed: the serial communication failed.
+ * @STATUS_OK: The command executed successfully.
+ * @STATUS_FAIL: The command failed: HW responded with an error.
+ */
+ enum {
+ STATUS_CRIT = -1,
+ STATUS_OK = 0,
+ STATUS_FAIL = 1,
+ } cmd_status;
+
+ /*
+ * Protects all the above fields, which are accessed in behalf of both
+ * the serdev RX callback and the regmap side
+ */
+ struct mutex lock;
+
+ /* Only accessed in serdev RX callback context*/
+ struct {
+ enum {
+ RX_IDLE,
+ RX_START,
+ RX_DATA,
+ } state;
+ int databuf_count;
+ int expected_len;
+ int type;
+ } rx;
+
+ /* Never accessed in behalf of serdev RX callback context */
+ bool cmd_stale;
+
+ struct completion cmd_complete;
+ struct serdev_device *serdev;
+};
+
+static int bno055_ser_send_chunk(struct bno055_ser_priv *priv, const u8 *data, int len)
+{
+ int ret;
+
+ trace_send_chunk(len, data);
+ ret = serdev_device_write(priv->serdev, data, len, msecs_to_jiffies(25));
+ if (ret < 0)
+ return ret;
+
+ if (ret < len)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Send a read or write command.
+ * 'data' can be NULL (used in read case). 'len' parameter is always valid; in
+ * case 'data' is non-NULL then it must match 'data' size.
+ */
+static int bno055_ser_do_send_cmd(struct bno055_ser_priv *priv,
+ bool read, int addr, int len, const u8 *data)
+{
+ u8 hdr[] = {0xAA, read, addr, len};
+ int chunk_len;
+ int ret;
+
+ ret = bno055_ser_send_chunk(priv, hdr, 2);
+ if (ret)
+ goto fail;
+ usleep_range(2000, 3000);
+ ret = bno055_ser_send_chunk(priv, hdr + 2, 2);
+ if (ret)
+ goto fail;
+
+ if (read)
+ return 0;
+
+ while (len) {
+ chunk_len = min(len, 2);
+ usleep_range(2000, 3000);
+ ret = bno055_ser_send_chunk(priv, data, chunk_len);
+ if (ret)
+ goto fail;
+ data += chunk_len;
+ len -= chunk_len;
+ }
+
+ return 0;
+fail:
+ /* waiting more than 30mS should clear the BNO055 internal state */
+ usleep_range(40000, 50000);
+ return ret;
+}
+
+static int bno055_ser_send_cmd(struct bno055_ser_priv *priv,
+ bool read, int addr, int len, const u8 *data)
+{
+ const int retry_max = 5;
+ int retry = retry_max;
+ int ret = 0;
+
+ /*
+ * In case previous command was interrupted we still need to wait it to
+ * complete before we can issue new commands
+ */
+ if (priv->cmd_stale) {
+ ret = wait_for_completion_interruptible_timeout(&priv->cmd_complete,
+ msecs_to_jiffies(100));
+ if (ret == -ERESTARTSYS)
+ return -ERESTARTSYS;
+
+ priv->cmd_stale = false;
+ /* if serial protocol broke, bail out */
+ if (priv->cmd_status == STATUS_CRIT)
+ return -EIO;
+ }
+
+ /*
+ * Try to convince the IMU to cooperate.. as explained in the comments
+ * at the top of this file, the IMU could also refuse the command (i.e.
+ * it is not ready yet); retry in this case.
+ */
+ do {
+ mutex_lock(&priv->lock);
+ priv->expect_response = read ? CMD_READ : CMD_WRITE;
+ reinit_completion(&priv->cmd_complete);
+ mutex_unlock(&priv->lock);
+
+ if (retry != retry_max)
+ trace_cmd_retry(read, addr, retry_max - retry);
+ ret = bno055_ser_do_send_cmd(priv, read, addr, len, data);
+ if (ret)
+ continue;
+
+ ret = wait_for_completion_interruptible_timeout(&priv->cmd_complete,
+ msecs_to_jiffies(100));
+ if (ret == -ERESTARTSYS) {
+ priv->cmd_stale = true;
+ return -ERESTARTSYS;
+ }
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (priv->cmd_status == STATUS_OK)
+ return 0;
+ if (priv->cmd_status == STATUS_CRIT)
+ return -EIO;
+
+ /* loop in case priv->cmd_status == STATUS_FAIL */
+ } while (--retry);
+
+ if (ret < 0)
+ return ret;
+ if (priv->cmd_status == STATUS_FAIL)
+ return -EINVAL;
+ return 0;
+}
+
+static int bno055_ser_write_reg(void *context, const void *_data, size_t count)
+{
+ const u8 *data = _data;
+ struct bno055_ser_priv *priv = context;
+
+ if (count < 2) {
+ dev_err(&priv->serdev->dev, "Invalid write count %zu", count);
+ return -EINVAL;
+ }
+
+ trace_write_reg(data[0], data[1]);
+ return bno055_ser_send_cmd(priv, 0, data[0], count - 1, data + 1);
+}
+
+static int bno055_ser_read_reg(void *context,
+ const void *_reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ int reg_addr;
+ const u8 *reg = _reg;
+ struct bno055_ser_priv *priv = context;
+
+ if (val_size > 128) {
+ dev_err(&priv->serdev->dev, "Invalid read valsize %zu", val_size);
+ return -EINVAL;
+ }
+
+ reg_addr = *reg;
+ trace_read_reg(reg_addr, val_size);
+ mutex_lock(&priv->lock);
+ priv->expected_data_len = val_size;
+ priv->response_buf = val;
+ mutex_unlock(&priv->lock);
+
+ ret = bno055_ser_send_cmd(priv, 1, reg_addr, val_size, NULL);
+
+ mutex_lock(&priv->lock);
+ priv->response_buf = NULL;
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/*
+ * Handler for received data; this is called from the receiver callback whenever
+ * it got some packet from the serial bus. The status tells us whether the
+ * packet is valid (i.e. header ok && received payload len consistent wrt the
+ * header). It's now our responsibility to check whether this is what we
+ * expected, of whether we got some unexpected, yet valid, packet.
+ */
+static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
+{
+ mutex_lock(&priv->lock);
+ switch (priv->expect_response) {
+ case CMD_NONE:
+ dev_warn(&priv->serdev->dev, "received unexpected, yet valid, data from sensor");
+ mutex_unlock(&priv->lock);
+ return;
+
+ case CMD_READ:
+ priv->cmd_status = status;
+ if (status == STATUS_OK &&
+ priv->rx.databuf_count != priv->expected_data_len) {
+ /*
+ * If we got here, then the lower layer serial protocol
+ * seems consistent with itself; if we got an unexpected
+ * amount of data then signal it as a non critical error
+ */
+ priv->cmd_status = STATUS_FAIL;
+ dev_warn(&priv->serdev->dev,
+ "received an unexpected amount of, yet valid, data from sensor");
+ }
+ break;
+
+ case CMD_WRITE:
+ priv->cmd_status = status;
+ break;
+ }
+
+ priv->expect_response = CMD_NONE;
+ mutex_unlock(&priv->lock);
+ complete(&priv->cmd_complete);
+}
+
+/*
+ * Serdev receiver FSM. This tracks the serial communication and parse the
+ * header. It pushes packets to bno055_ser_handle_rx(), eventually communicating
+ * failures (i.e. malformed packets).
+ * Ideally it doesn't know anything about upper layer (i.e. if this is the
+ * packet we were really expecting), but since we copies the payload into the
+ * receiver buffer (that is not valid when i.e. we don't expect data), we
+ * snoop a bit in the upper layer..
+ * Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
+ * unless we require to AND we don't queue more than one request per time).
+ */
+static int bno055_ser_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ int status;
+ struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
+ int remaining = size;
+
+ if (size == 0)
+ return 0;
+
+ trace_recv(size, buf);
+ switch (priv->rx.state) {
+ case RX_IDLE:
+ /*
+ * New packet.
+ * Check for its 1st byte that identifies the pkt type.
+ */
+ if (buf[0] != 0xEE && buf[0] != 0xBB) {
+ dev_err(&priv->serdev->dev,
+ "Invalid packet start %x", buf[0]);
+ bno055_ser_handle_rx(priv, STATUS_CRIT);
+ break;
+ }
+ priv->rx.type = buf[0];
+ priv->rx.state = RX_START;
+ remaining--;
+ buf++;
+ priv->rx.databuf_count = 0;
+ fallthrough;
+
+ case RX_START:
+ /*
+ * Packet RX in progress, we expect either 1-byte len or 1-byte
+ * status depending by the packet type.
+ */
+ if (remaining == 0)
+ break;
+
+ if (priv->rx.type == 0xEE) {
+ if (remaining > 1) {
+ dev_err(&priv->serdev->dev, "EE pkt. Extra data received");
+ status = STATUS_CRIT;
+ } else {
+ status = (buf[0] == 1) ? STATUS_OK : STATUS_FAIL;
+ }
+ bno055_ser_handle_rx(priv, status);
+ priv->rx.state = RX_IDLE;
+ break;
+
+ } else {
+ /*priv->rx.type == 0xBB */
+ priv->rx.state = RX_DATA;
+ priv->rx.expected_len = buf[0];
+ remaining--;
+ buf++;
+ }
+ fallthrough;
+
+ case RX_DATA:
+ /* Header parsed; now receiving packet data payload */
+ if (remaining == 0)
+ break;
+
+ if (priv->rx.databuf_count + remaining > priv->rx.expected_len) {
+ /*
+ * This is an inconsistency in serial protocol, we lost
+ * sync and we don't know how to handle further data
+ */
+ dev_err(&priv->serdev->dev, "BB pkt. Extra data received");
+ bno055_ser_handle_rx(priv, STATUS_CRIT);
+ priv->rx.state = RX_IDLE;
+ break;
+ }
+
+ mutex_lock(&priv->lock);
+ /*
+ * NULL e.g. when read cmd is stale or when no read cmd is
+ * actually pending.
+ */
+ if (priv->response_buf &&
+ /*
+ * Snoop on the upper layer protocol stuff to make sure not
+ * to write to an invalid memory. Apart for this, let's the
+ * upper layer manage any inconsistency wrt expected data
+ * len (as long as the serial protocol is consistent wrt
+ * itself (i.e. response header is consistent with received
+ * response len.
+ */
+ (priv->rx.databuf_count + remaining <= priv->expected_data_len))
+ memcpy(priv->response_buf + priv->rx.databuf_count,
+ buf, remaining);
+ mutex_unlock(&priv->lock);
+
+ priv->rx.databuf_count += remaining;
+
+ /*
+ * Reached expected len advertised by the IMU for the current
+ * packet. Pass it to the upper layer (for us it is just valid).
+ */
+ if (priv->rx.databuf_count == priv->rx.expected_len) {
+ bno055_ser_handle_rx(priv, STATUS_OK);
+ priv->rx.state = RX_IDLE;
+ }
+ break;
+ }
+
+ return size;
+}
+
+static const struct serdev_device_ops bno055_ser_serdev_ops = {
+ .receive_buf = bno055_ser_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static struct regmap_bus bno055_ser_regmap_bus = {
+ .write = bno055_ser_write_reg,
+ .read = bno055_ser_read_reg,
+};
+
+static int bno055_ser_probe(struct serdev_device *serdev)
+{
+ struct bno055_ser_priv *priv;
+ struct regmap *regmap;
+ int ret;
+
+ priv = devm_kzalloc(&serdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ serdev_device_set_drvdata(serdev, priv);
+ priv->serdev = serdev;
+ mutex_init(&priv->lock);
+ init_completion(&priv->cmd_complete);
+
+ serdev_device_set_client_ops(serdev, &bno055_ser_serdev_ops);
+ ret = devm_serdev_device_open(&serdev->dev, serdev);
+ if (ret)
+ return ret;
+
+ if (serdev_device_set_baudrate(serdev, 115200) != 115200) {
+ dev_err(&serdev->dev, "Cannot set required baud rate");
+ return -EIO;
+ }
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret) {
+ dev_err(&serdev->dev, "Cannot set required parity setting");
+ return ret;
+ }
+ serdev_device_set_flow_control(serdev, false);
+
+ regmap = devm_regmap_init(&serdev->dev, &bno055_ser_regmap_bus,
+ priv, &bno055_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&serdev->dev, PTR_ERR(regmap),
+ "Unable to init register map");
+
+ return bno055_probe(&serdev->dev, regmap,
+ BNO055_SER_XFER_BURST_BREAK_THRESHOLD, false);
+}
+
+static const struct of_device_id bno055_ser_of_match[] = {
+ { .compatible = "bosch,bno055" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bno055_ser_of_match);
+
+static struct serdev_device_driver bno055_ser_driver = {
+ .driver = {
+ .name = "bno055-ser",
+ .of_match_table = bno055_ser_of_match,
+ },
+ .probe = bno055_ser_probe,
+};
+module_serdev_device_driver(bno055_ser_driver);
+
+MODULE_AUTHOR("Andrea Merello <andrea.merello@iit.it>");
+MODULE_DESCRIPTION("Bosch BNO055 serdev interface");
+MODULE_IMPORT_NS(IIO_BNO055);
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bno055/bno055_ser_trace.c b/drivers/iio/imu/bno055/bno055_ser_trace.c
new file mode 100644
index 000000000000..48397b66daef
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_trace.c
@@ -0,0 +1,14 @@
+//SPDX-License-Identifier: GPL-2.0
+
+/*
+ * bno055_ser Trace Support
+ * Copyright (C) 2022 Istituto Italiano di Tecnologia
+ * Electronic Design Laboratory
+ *
+ * Based on:
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ */
+
+#define CREATE_TRACE_POINTS
+#include "bno055_ser_trace.h"
diff --git a/drivers/iio/imu/bno055/bno055_ser_trace.h b/drivers/iio/imu/bno055/bno055_ser_trace.h
new file mode 100644
index 000000000000..7d9eae166eec
--- /dev/null
+++ b/drivers/iio/imu/bno055/bno055_ser_trace.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(__BNO055_SERDEV_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
+#define __BNO055_SERDEV_TRACE_H__
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bno055_ser
+
+TRACE_EVENT(send_chunk,
+ TP_PROTO(int len, const u8 *data),
+ TP_ARGS(len, data),
+ TP_STRUCT__entry(
+ __field(int, len)
+ __dynamic_array(u8, chunk, len)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ memcpy(__get_dynamic_array(chunk),
+ data, __entry->len);
+ ),
+ TP_printk("len: %d, data: = %*ph",
+ __entry->len, __entry->len, __get_dynamic_array(chunk)
+ )
+);
+
+TRACE_EVENT(cmd_retry,
+ TP_PROTO(bool read, int addr, int retry),
+ TP_ARGS(read, addr, retry),
+ TP_STRUCT__entry(
+ __field(bool, read)
+ __field(int, addr)
+ __field(int, retry)
+ ),
+ TP_fast_assign(
+ __entry->read = read;
+ __entry->addr = addr;
+ __entry->retry = retry;
+ ),
+ TP_printk("%s addr 0x%x retry #%d",
+ __entry->read ? "read" : "write",
+ __entry->addr, __entry->retry
+ )
+);
+
+TRACE_EVENT(write_reg,
+ TP_PROTO(u8 addr, u8 value),
+ TP_ARGS(addr, value),
+ TP_STRUCT__entry(
+ __field(u8, addr)
+ __field(u8, value)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("reg 0x%x = 0x%x",
+ __entry->addr, __entry->value
+ )
+);
+
+TRACE_EVENT(read_reg,
+ TP_PROTO(int addr, size_t len),
+ TP_ARGS(addr, len),
+ TP_STRUCT__entry(
+ __field(int, addr)
+ __field(size_t, len)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->len = len;
+ ),
+ TP_printk("reg 0x%x (len %zu)",
+ __entry->addr, __entry->len
+ )
+);
+
+TRACE_EVENT(recv,
+ TP_PROTO(size_t len, const unsigned char *buf),
+ TP_ARGS(len, buf),
+ TP_STRUCT__entry(
+ __field(size_t, len)
+ __dynamic_array(unsigned char, buf, len)
+ ),
+ TP_fast_assign(
+ __entry->len = len;
+ memcpy(__get_dynamic_array(buf),
+ buf, __entry->len);
+ ),
+ TP_printk("len: %zu, data: = %*ph",
+ __entry->len, (int)__entry->len, __get_dynamic_array(buf)
+ )
+);
+
+#endif /* __BNO055_SERDEV_TRACE_H__ || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE bno055_ser_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 9b4298095d3f..f7bce428d9eb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -65,7 +65,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
sub_elem = &elem->package.elements[j];
if (sub_elem->type == ACPI_TYPE_STRING)
- strlcpy(info->type, sub_elem->string.pointer,
+ strscpy(info->type, sub_elem->string.pointer,
sizeof(info->type));
else if (sub_elem->type == ACPI_TYPE_INTEGER) {
if (sub_elem->integer.value != client->addr) {
@@ -158,7 +158,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
char *name;
info.addr = secondary;
- strlcpy(info.type, dev_name(&adev->dev),
+ strscpy(info.type, dev_name(&adev->dev),
sizeof(info.type));
name = strchr(info.type, ':');
if (name)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 2aa647704a79..14255a918eb1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -157,7 +157,7 @@ out_del_mux:
return result;
}
-static int inv_mpu_remove(struct i2c_client *client)
+static void inv_mpu_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
@@ -166,8 +166,6 @@ static int inv_mpu_remove(struct i2c_client *client)
inv_mpu_acpi_delete_mux_client(client);
i2c_mux_del_adapters(st->muxc);
}
-
- return 0;
}
/*
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index ec23b1ee472b..b10c0dcac0bb 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1418,7 +1418,7 @@ err_chip_uninit:
return ret;
}
-static int kmx61_remove(struct i2c_client *client)
+static void kmx61_remove(struct i2c_client *client)
{
struct kmx61_data *data = i2c_get_clientdata(client);
@@ -1439,8 +1439,6 @@ static int kmx61_remove(struct i2c_client *client)
mutex_lock(&data->lock);
kmx61_set_mode(data, KMX61_ALL_STBY, KMX61_ACC | KMX61_MAG, true);
mutex_unlock(&data->lock);
-
- return 0;
}
static int kmx61_suspend(struct device *dev)
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index fefd0b939100..2ed2b3f40c0b 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -12,7 +12,7 @@ config IIO_ST_LSM6DSX
Say yes here to build support for STMicroelectronics LSM6DSx imu
sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
- lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop,
+ lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, lsm6dstx,
the accelerometer/gyroscope of lsm9ds1 and lsm6dst.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index a86dd29a4738..6b57d47be69e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -32,6 +32,7 @@
#define ST_LSM6DST_DEV_NAME "lsm6dst"
#define ST_LSM6DSOP_DEV_NAME "lsm6dsop"
#define ST_ASM330LHHX_DEV_NAME "asm330lhhx"
+#define ST_LSM6DSTX_DEV_NAME "lsm6dstx"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID,
@@ -51,6 +52,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DST_ID,
ST_LSM6DSOP_ID,
ST_ASM330LHHX_ID,
+ ST_LSM6DSTX_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index c7d3730ab1c5..e49f2d120ed3 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -15,7 +15,7 @@
* value of the decimation factor and ODR set for each FIFO data set.
*
* LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
- * LSM6DST/LSM6DSOP:
+ * LSM6DST/LSM6DSOP/LSM6DSTX:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b5e4a4113652..f8bbb005718e 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -26,7 +26,8 @@
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP:
+ * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP/
+ * LSM6DSTX:
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
@@ -791,6 +792,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.hw_id = ST_ASM330LHHX_ID,
.name = ST_ASM330LHHX_DEV_NAME,
.wai = 0x6b,
+ }, {
+ .hw_id = ST_LSM6DSTX_ID,
+ .name = ST_LSM6DSTX_DEV_NAME,
+ .wai = 0x6d,
},
},
.channels = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 2ea34c0d3a8c..307c8c436862 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -105,6 +105,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,asm330lhhx",
.data = (void *)ST_ASM330LHHX_ID,
},
+ {
+ .compatible = "st,lsm6dstx",
+ .data = (void *)ST_LSM6DSTX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -127,6 +131,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
{ ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
+ { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index 6a8883f022a8..6a4eecf4bb05 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -105,6 +105,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,asm330lhhx",
.data = (void *)ST_ASM330LHHX_ID,
},
+ {
+ .compatible = "st,lsm6dstx",
+ .data = (void *)ST_LSM6DSTX_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -127,6 +131,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DST_DEV_NAME, ST_LSM6DST_ID },
{ ST_LSM6DSOP_DEV_NAME, ST_LSM6DSOP_ID },
{ ST_ASM330LHHX_DEV_NAME, ST_ASM330LHHX_ID },
+ { ST_LSM6DSTX_DEV_NAME, ST_LSM6DSTX_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index acc2b6c05d57..228598b82a2f 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -843,8 +843,8 @@ static int iio_verify_update(struct iio_dev *indio_dev,
* to verify.
*/
if (remove_buffer && !insert_buffer &&
- list_is_singular(&iio_dev_opaque->buffer_list))
- return 0;
+ list_is_singular(&iio_dev_opaque->buffer_list))
+ return 0;
modes = indio_dev->modes;
@@ -940,6 +940,7 @@ struct iio_demux_table {
static void iio_buffer_demux_free(struct iio_buffer *buffer)
{
struct iio_demux_table *p, *q;
+
list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
list_del(&p->l);
kfree(p);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 0f4dbda3b9d3..151ff3993354 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -134,6 +134,12 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_ETHANOL] = "ethanol",
[IIO_MOD_H2] = "h2",
[IIO_MOD_O2] = "o2",
+ [IIO_MOD_LINEAR_X] = "linear_x",
+ [IIO_MOD_LINEAR_Y] = "linear_y",
+ [IIO_MOD_LINEAR_Z] = "linear_z",
+ [IIO_MOD_PITCH] = "pitch",
+ [IIO_MOD_YAW] = "yaw",
+ [IIO_MOD_ROLL] = "roll",
};
/* relies on pairs of these shared then separate */
@@ -168,6 +174,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
+ [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
};
/**
* iio_device_id() - query the unique ID for the device
@@ -236,6 +243,7 @@ static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
return iio_dev_opaque->debugfs_dentry;
}
EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
@@ -447,6 +455,7 @@ static const struct file_operations iio_debugfs_reg_fops = {
static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+
debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
}
@@ -1021,6 +1030,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
int ret = 0;
char *name = NULL;
char *full_postfix;
+
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
@@ -1299,8 +1309,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SEPARATE,
- &chan->
- info_mask_separate_available);
+ &chan->info_mask_separate_available);
if (ret < 0)
return ret;
attrcount += ret;
@@ -1314,8 +1323,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
IIO_SHARED_BY_TYPE,
- &chan->
- info_mask_shared_by_type_available);
+ &chan->info_mask_shared_by_type_available);
if (ret < 0)
return ret;
attrcount += ret;
@@ -1355,6 +1363,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
if (chan->ext_info) {
unsigned int i = 0;
+
for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
ret = __iio_add_chan_devattr(ext_info->name,
chan,
@@ -1403,6 +1412,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
return sysfs_emit(buf, "%s\n", indio_dev->name);
}
@@ -1412,6 +1422,7 @@ static ssize_t label_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
return sysfs_emit(buf, "%s\n", indio_dev->label);
}
@@ -1565,7 +1576,7 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_clear_attrs;
}
- /* Copy across original attributes */
+ /* Copy across original attributes, and point to original binary attributes */
if (indio_dev->info->attrs) {
memcpy(iio_dev_opaque->chan_attr_group.attrs,
indio_dev->info->attrs->attrs,
@@ -1573,6 +1584,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
*attrcount_orig);
iio_dev_opaque->chan_attr_group.is_visible =
indio_dev->info->attrs->is_visible;
+ iio_dev_opaque->chan_attr_group.bin_attrs =
+ indio_dev->info->attrs->bin_attrs;
}
attrn = attrcount_orig;
/* Add all elements from the list. */
@@ -1621,6 +1634,8 @@ static void iio_dev_release(struct device *device)
iio_device_detach_buffers(indio_dev);
+ lockdep_unregister_key(&iio_dev_opaque->mlock_key);
+
ida_free(&iio_ida, iio_dev_opaque->id);
kfree(iio_dev_opaque);
}
@@ -1680,6 +1695,9 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
+ lockdep_register_key(&iio_dev_opaque->mlock_key);
+ lockdep_set_class(&indio_dev->mlock, &iio_dev_opaque->mlock_key);
+
return indio_dev;
}
EXPORT_SYMBOL(iio_device_alloc);
@@ -1777,6 +1795,7 @@ static int iio_chrdev_release(struct inode *inode, struct file *filp)
struct iio_dev_opaque *iio_dev_opaque =
container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
+
kfree(ib);
clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
iio_device_put(indio_dev);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index b5e059e15b0a..3d78da2531a9 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -231,12 +231,15 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
+ [IIO_EV_TYPE_GESTURE] = "gesture",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
+ [IIO_EV_DIR_FALLING] = "falling",
+ [IIO_EV_DIR_SINGLETAP] = "singletap",
+ [IIO_EV_DIR_DOUBLETAP] = "doubletap",
};
static const char * const iio_ev_info_text[] = {
@@ -247,6 +250,8 @@ static const char * const iio_ev_info_text[] = {
[IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db",
[IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db",
[IIO_EV_INFO_TIMEOUT] = "timeout",
+ [IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout",
+ [IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay",
};
static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
@@ -354,9 +359,10 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
enum iio_shared_by shared_by, const unsigned long *mask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- ssize_t (*show)(struct device *, struct device_attribute *, char *);
- ssize_t (*store)(struct device *, struct device_attribute *,
- const char *, size_t);
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
unsigned int attrcount = 0;
unsigned int i;
char *postfix;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index b78814d869b7..6885a186fe27 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -50,6 +50,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct iio_trigger *trig = to_iio_trigger(dev);
+
return sysfs_emit(buf, "%s\n", trig->name);
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index df74765d33dc..872fd5c24147 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -5,9 +5,9 @@
*/
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
@@ -45,13 +45,13 @@ int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
int i = 0, ret = 0;
struct iio_map_internal *mapi;
- if (maps == NULL)
+ if (!maps)
return 0;
mutex_lock(&iio_map_list_lock);
- while (maps[i].consumer_dev_name != NULL) {
+ while (maps[i].consumer_dev_name) {
mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
- if (mapi == NULL) {
+ if (!mapi) {
ret = -ENOMEM;
goto error_ret;
}
@@ -69,7 +69,6 @@ error_ret:
}
EXPORT_SYMBOL_GPL(iio_map_array_register);
-
/*
* Remove all map entries associated with the given iio device
*/
@@ -117,15 +116,8 @@ static const struct iio_chan_spec
return chan;
}
-#ifdef CONFIG_OF
-
-static int iio_dev_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data && dev->type == &iio_device_type;
-}
-
/**
- * __of_iio_simple_xlate - translate iiospec to the IIO channel index
+ * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
* @indio_dev: pointer to the iio_dev structure
* @iiospec: IIO specifier as found in the device tree
*
@@ -134,14 +126,14 @@ static int iio_dev_node_match(struct device *dev, const void *data)
* whether IIO index is less than num_channels (that is specified in the
* iio_dev).
*/
-static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec)
+static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec)
{
- if (!iiospec->args_count)
+ if (!iiospec->nargs)
return 0;
if (iiospec->args[0] >= indio_dev->num_channels) {
- dev_err(&indio_dev->dev, "invalid channel index %u\n",
+ dev_err(&indio_dev->dev, "invalid channel index %llu\n",
iiospec->args[0]);
return -EINVAL;
}
@@ -149,32 +141,33 @@ static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
return iiospec->args[0];
}
-static int __of_iio_channel_get(struct iio_channel *channel,
- struct device_node *np, int index)
+static int __fwnode_iio_channel_get(struct iio_channel *channel,
+ struct fwnode_handle *fwnode, int index)
{
+ struct fwnode_reference_args iiospec;
struct device *idev;
struct iio_dev *indio_dev;
int err;
- struct of_phandle_args iiospec;
- err = of_parse_phandle_with_args(np, "io-channels",
- "#io-channel-cells",
- index, &iiospec);
+ err = fwnode_property_get_reference_args(fwnode, "io-channels",
+ "#io-channel-cells", 0,
+ index, &iiospec);
if (err)
return err;
- idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
- iio_dev_node_match);
- of_node_put(iiospec.np);
- if (idev == NULL)
+ idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
+ if (!idev) {
+ fwnode_handle_put(iiospec.fwnode);
return -EPROBE_DEFER;
+ }
indio_dev = dev_to_iio_dev(idev);
channel->indio_dev = indio_dev;
- if (indio_dev->info->of_xlate)
- index = indio_dev->info->of_xlate(indio_dev, &iiospec);
+ if (indio_dev->info->fwnode_xlate)
+ index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
else
- index = __of_iio_simple_xlate(indio_dev, &iiospec);
+ index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
+ fwnode_handle_put(iiospec.fwnode);
if (index < 0)
goto err_put;
channel->channel = &indio_dev->channels[index];
@@ -186,7 +179,8 @@ err_put:
return index;
}
-static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
+static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
+ int index)
{
struct iio_channel *channel;
int err;
@@ -195,10 +189,10 @@ static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
return ERR_PTR(-EINVAL);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (channel == NULL)
+ if (!channel)
return ERR_PTR(-ENOMEM);
- err = __of_iio_channel_get(channel, np, index);
+ err = __fwnode_iio_channel_get(channel, fwnode, index);
if (err)
goto err_free_channel;
@@ -209,74 +203,116 @@ err_free_channel:
return ERR_PTR(err);
}
-struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
- const char *name)
+static struct iio_channel *
+__fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
{
- struct iio_channel *chan = NULL;
-
- /* Walk up the tree of devices looking for a matching iio channel */
- while (np) {
- int index = 0;
-
+ struct iio_channel *chan;
+ int index = 0;
+
+ /*
+ * For named iio channels, first look up the name in the
+ * "io-channel-names" property. If it cannot be found, the
+ * index will be an error code, and fwnode_iio_channel_get()
+ * will fail.
+ */
+ if (name)
+ index = fwnode_property_match_string(fwnode, "io-channel-names",
+ name);
+
+ chan = fwnode_iio_channel_get(fwnode, index);
+ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+ return chan;
+ if (name) {
+ if (index >= 0) {
+ pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
+ fwnode, name, index);
+ /*
+ * In this case, we found 'name' in 'io-channel-names'
+ * but somehow we still fail so that we should not proceed
+ * with any other lookup. Hence, explicitly return -EINVAL
+ * (maybe not the better error code) so that the caller
+ * won't do a system lookup.
+ */
+ return ERR_PTR(-EINVAL);
+ }
/*
- * For named iio channels, first look up the name in the
- * "io-channel-names" property. If it cannot be found, the
- * index will be an error code, and of_iio_channel_get()
- * will fail.
+ * If index < 0, then fwnode_property_get_reference_args() fails
+ * with -EINVAL or -ENOENT (ACPI case) which is expected. We
+ * should not proceed if we get any other error.
*/
- if (name)
- index = of_property_match_string(np, "io-channel-names",
- name);
- chan = of_iio_channel_get(np, index);
- if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
- break;
- else if (name && index >= 0) {
- pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
- np, name ? name : "", index);
- return NULL;
- }
-
+ if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
+ return chan;
+ } else if (PTR_ERR(chan) != -ENOENT) {
/*
- * No matching IIO channel found on this node.
- * If the parent node has a "io-channel-ranges" property,
- * then we can try one of its channels.
+ * if !name, then we should only proceed the lookup if
+ * fwnode_property_get_reference_args() returns -ENOENT.
*/
- np = np->parent;
- if (np && !of_get_property(np, "io-channel-ranges", NULL))
- return NULL;
+ return chan;
}
- return chan;
+ /* so we continue the lookup */
+ return ERR_PTR(-ENODEV);
}
-EXPORT_SYMBOL_GPL(of_iio_channel_get_by_name);
-static struct iio_channel *of_iio_channel_get_all(struct device *dev)
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name)
{
+ struct fwnode_handle *parent;
+ struct iio_channel *chan;
+
+ /* Walk up the tree of devices looking for a matching iio channel */
+ chan = __fwnode_iio_channel_get_by_name(fwnode, name);
+ if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
+ return chan;
+
+ /*
+ * No matching IIO channel found on this node.
+ * If the parent node has a "io-channel-ranges" property,
+ * then we can try one of its channels.
+ */
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (!fwnode_property_present(parent, "io-channel-ranges")) {
+ fwnode_handle_put(parent);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = __fwnode_iio_channel_get_by_name(fwnode, name);
+ if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
+ fwnode_handle_put(parent);
+ return chan;
+ }
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
+
+static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct iio_channel *chans;
int i, mapind, nummaps = 0;
int ret;
do {
- ret = of_parse_phandle_with_args(dev->of_node,
- "io-channels",
- "#io-channel-cells",
- nummaps, NULL);
+ ret = fwnode_property_get_reference_args(fwnode, "io-channels",
+ "#io-channel-cells", 0,
+ nummaps, NULL);
if (ret < 0)
break;
} while (++nummaps);
- if (nummaps == 0) /* no error, return NULL to search map table */
- return NULL;
+ if (nummaps == 0)
+ return ERR_PTR(-ENODEV);
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
- if (chans == NULL)
+ if (!chans)
return ERR_PTR(-ENOMEM);
- /* Search for OF matches */
+ /* Search for FW matches */
for (mapind = 0; mapind < nummaps; mapind++) {
- ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
- mapind);
+ ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
if (ret)
goto error_free_chans;
}
@@ -289,15 +325,6 @@ error_free_chans:
return ERR_PTR(ret);
}
-#else /* CONFIG_OF */
-
-static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
-{
- return NULL;
-}
-
-#endif /* CONFIG_OF */
-
static struct iio_channel *iio_channel_get_sys(const char *name,
const char *channel_name)
{
@@ -305,7 +332,7 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
struct iio_channel *channel;
int err;
- if (name == NULL && channel_name == NULL)
+ if (!(name || channel_name))
return ERR_PTR(-ENODEV);
/* first find matching entry the channel map */
@@ -320,11 +347,11 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
break;
}
mutex_unlock(&iio_map_list_lock);
- if (c == NULL)
+ if (!c)
return ERR_PTR(-ENODEV);
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (channel == NULL) {
+ if (!channel) {
err = -ENOMEM;
goto error_no_mem;
}
@@ -336,7 +363,7 @@ static struct iio_channel *iio_channel_get_sys(const char *name,
iio_chan_spec_from_name(channel->indio_dev,
c->map->adc_channel_label);
- if (channel->channel == NULL) {
+ if (!channel->channel) {
err = -EINVAL;
goto error_no_chan;
}
@@ -358,9 +385,9 @@ struct iio_channel *iio_channel_get(struct device *dev,
struct iio_channel *channel;
if (dev) {
- channel = of_iio_channel_get_by_name(dev->of_node,
- channel_name);
- if (channel != NULL)
+ channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
+ channel_name);
+ if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
return channel;
}
@@ -400,14 +427,14 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
-struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
- struct device_node *np,
- const char *channel_name)
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *channel_name)
{
struct iio_channel *channel;
int ret;
- channel = of_iio_channel_get_by_name(np, channel_name);
+ channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
if (IS_ERR(channel))
return channel;
@@ -417,7 +444,7 @@ struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
return channel;
}
-EXPORT_SYMBOL_GPL(devm_of_iio_channel_get_by_name);
+EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
struct iio_channel *iio_channel_get_all(struct device *dev)
{
@@ -428,11 +455,15 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
int mapind = 0;
int i, ret;
- if (dev == NULL)
+ if (!dev)
return ERR_PTR(-EINVAL);
- chans = of_iio_channel_get_all(dev);
- if (chans)
+ chans = fwnode_iio_channel_get_all(dev);
+ /*
+ * We only want to carry on if the error is -ENODEV. Anything else
+ * should be reported up the stack.
+ */
+ if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV)
return chans;
name = dev_name(dev);
@@ -452,7 +483,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
/* NULL terminated array to save passing size */
chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
- if (chans == NULL) {
+ if (!chans) {
ret = -ENOMEM;
goto error_ret;
}
@@ -466,7 +497,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev)
chans[mapind].channel =
iio_chan_spec_from_name(chans[mapind].indio_dev,
c->map->adc_channel_label);
- if (chans[mapind].channel == NULL) {
+ if (!chans[mapind].channel) {
ret = -EINVAL;
goto error_free_chans;
}
@@ -528,14 +559,14 @@ struct iio_channel *devm_iio_channel_get_all(struct device *dev)
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
- enum iio_chan_info_enum info)
+ enum iio_chan_info_enum info)
{
int unused;
int vals[INDIO_MAX_RAW_ELEMENTS];
int ret;
int val_len = 2;
- if (val2 == NULL)
+ if (!val2)
val2 = &unused;
if (!iio_channel_has_info(chan->channel, info))
@@ -547,9 +578,10 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
vals, &val_len, info);
*val = vals[0];
*val2 = vals[1];
- } else
+ } else {
ret = chan->indio_dev->info->read_raw(chan->indio_dev,
chan->channel, val, val2, info);
+ }
return ret;
}
@@ -560,7 +592,7 @@ int iio_read_channel_raw(struct iio_channel *chan, int *val)
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -579,7 +611,7 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -593,7 +625,8 @@ err_unlock:
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
- int raw, int *processed, unsigned int scale)
+ int raw, int *processed,
+ unsigned int scale)
{
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
@@ -626,7 +659,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
}
scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
- IIO_CHAN_INFO_SCALE);
+ IIO_CHAN_INFO_SCALE);
if (scale_type < 0) {
/*
* If no channel scaling is available apply consumer scale to
@@ -671,19 +704,19 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
}
int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
- int *processed, unsigned int scale)
+ int *processed, unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
- scale);
+ scale);
err_unlock:
mutex_unlock(&iio_dev_opaque->info_exist_lock);
@@ -698,7 +731,7 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -724,7 +757,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -802,7 +835,7 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
int type;
ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
- IIO_CHAN_INFO_RAW);
+ IIO_CHAN_INFO_RAW);
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
@@ -886,7 +919,7 @@ int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
/* Need to verify underlying driver has not gone away */
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -913,7 +946,7 @@ int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
int ret;
mutex_lock(&iio_dev_opaque->info_exist_lock);
- if (chan->indio_dev->info == NULL) {
+ if (!chan->indio_dev->info) {
ret = -ENODEV;
goto err_unlock;
}
@@ -947,9 +980,8 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
}
EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
-static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
- const struct iio_channel *chan,
- const char *attr)
+static const struct iio_chan_spec_ext_info *
+iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
{
const struct iio_chan_spec_ext_info *ext_info;
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 8537e88f02e3..7cf6e8490123 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -331,6 +331,17 @@ config LTR501
This driver can also be built as a module. If so, the module
will be called ltr501.
+config LTRF216A
+ tristate "Liteon LTRF216A Light Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say Y or M here, you get support for Liteon LTRF216A
+ Ambient Light Sensor.
+
+ If built as a dynamically linked module, it will be called
+ ltrf216a.
+
config LV0104CS
tristate "LV0104CS Ambient Light Sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index d10912faf964..6f23817fae6f 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_LTRF216A) += ltrf216a.o
obj-$(CONFIG_LV0104CS) += lv0104cs.o
obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_MAX44009) += max44009.o
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 0f9d77598997..b70f2681bcb3 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -452,7 +452,7 @@ err:
return ret;
}
-static int apds9300_remove(struct i2c_client *client)
+static void apds9300_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct apds9300_data *data = iio_priv(indio_dev);
@@ -462,8 +462,6 @@ static int apds9300_remove(struct i2c_client *client)
/* Ensure that power off and interrupts are disabled */
apds9300_set_intr_state(data, 0);
apds9300_set_power_state(data, 0);
-
- return 0;
}
static int apds9300_suspend(struct device *dev)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 09b831f9f40b..b62c139baf41 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -1067,7 +1067,7 @@ error_power_down:
return ret;
}
-static int apds9960_remove(struct i2c_client *client)
+static void apds9960_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct apds9960_data *data = iio_priv(indio_dev);
@@ -1076,8 +1076,6 @@ static int apds9960_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
apds9960_set_powermode(data, 0);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 471985c220bb..3e92820bc820 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -263,7 +263,7 @@ static int bh1750_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int bh1750_remove(struct i2c_client *client)
+static void bh1750_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bh1750_data *data = iio_priv(indio_dev);
@@ -273,8 +273,6 @@ static int bh1750_remove(struct i2c_client *client)
mutex_lock(&data->lock);
i2c_smbus_write_byte(client, BH1750_POWER_DOWN);
mutex_unlock(&data->lock);
-
- return 0;
}
static int bh1750_suspend(struct device *dev)
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index fc7141390117..90bca392b262 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -202,7 +202,7 @@ out_disable_pm:
return ret;
}
-static int bh1780_remove(struct i2c_client *client)
+static void bh1780_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct bh1780_data *bh1780 = iio_priv(indio_dev);
@@ -216,8 +216,6 @@ static int bh1780_remove(struct i2c_client *client)
if (ret < 0)
dev_err(&client->dev, "failed to power off (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int bh1780_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index edbe6a3138d0..001055d09750 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -505,7 +505,7 @@ static int cm32181_resume(struct device *dev)
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
}
-DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm3218" },
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 2c80a0535d2c..5214cd014cf8 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -357,7 +357,7 @@ static int cm3232_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int cm3232_remove(struct i2c_client *client)
+static void cm3232_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -365,8 +365,6 @@ static int cm3232_remove(struct i2c_client *client)
CM3232_CMD_ALS_DISABLE);
iio_device_unregister(indio_dev);
-
- return 0;
}
static const struct i2c_device_id cm3232_id[] = {
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index c721b69d5095..0b30db77f78b 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -226,8 +226,10 @@ static int cm3605_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return dev_err_probe(dev, irq, "failed to get irq\n");
+ if (irq < 0) {
+ ret = dev_err_probe(dev, irq, "failed to get irq\n");
+ goto out_disable_aset;
+ }
ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
NULL, 0, "cm3605", indio_dev);
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 89f5e48a6642..6615c98b601c 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -700,7 +700,7 @@ error_disable_reg:
return ret;
}
-static int cm36651_remove(struct i2c_client *client)
+static void cm36651_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct cm36651_data *cm36651 = iio_priv(indio_dev);
@@ -710,8 +710,6 @@ static int cm36651_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
i2c_unregister_device(cm36651->ps_client);
i2c_unregister_device(cm36651->ara_client);
-
- return 0;
}
static const struct i2c_device_id cm36651_id[] = {
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index e2707416f9a8..8000fa347344 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -619,7 +619,7 @@ out_disable_vdd:
return ret;
}
-static int gp2ap002_remove(struct i2c_client *client)
+static void gp2ap002_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
@@ -631,8 +631,6 @@ static int gp2ap002_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
regulator_disable(gp2ap002->vio);
regulator_disable(gp2ap002->vdd);
-
- return 0;
}
static int gp2ap002_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index b820041159f7..826439299e8b 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1573,7 +1573,7 @@ error_regulator_disable:
return err;
}
-static int gp2ap020a00f_remove(struct i2c_client *client)
+static void gp2ap020a00f_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct gp2ap020a00f_data *data = iio_priv(indio_dev);
@@ -1589,8 +1589,6 @@ static int gp2ap020a00f_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(data->vled_reg);
-
- return 0;
}
static const struct i2c_device_id gp2ap020a00f_id[] = {
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index ff5996d77818..32d58e18f26d 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -636,7 +636,7 @@ static int isl29028_probe(struct i2c_client *client,
return 0;
}
-static int isl29028_remove(struct i2c_client *client)
+static void isl29028_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct isl29028_chip *chip = iio_priv(indio_dev);
@@ -647,8 +647,6 @@ static int isl29028_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
isl29028_clear_configure_reg(chip);
-
- return 0;
}
static int isl29028_suspend(struct device *dev)
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index eb68a52aab82..c199e63cce82 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -300,15 +300,13 @@ static int isl29125_powerdown(struct isl29125_data *data)
(data->conf1 & ~ISL29125_MODE_MASK) | ISL29125_MODE_PD);
}
-static int isl29125_remove(struct i2c_client *client)
+static void isl29125_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
isl29125_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int isl29125_suspend(struct device *dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 5387c12231cf..57ce6d75966c 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -373,7 +373,7 @@ static int jsa1212_power_off(struct jsa1212_data *data)
return ret;
}
-static int jsa1212_remove(struct i2c_client *client)
+static void jsa1212_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct jsa1212_data *data = iio_priv(indio_dev);
@@ -381,8 +381,6 @@ static int jsa1212_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
jsa1212_power_off(data);
-
- return 0;
}
static int jsa1212_suspend(struct device *dev)
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 679a1e1086ae..74a1ccda8b9c 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1600,15 +1600,13 @@ powerdown_on_error:
return ret;
}
-static int ltr501_remove(struct i2c_client *client)
+static void ltr501_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
ltr501_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int ltr501_suspend(struct device *dev)
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
new file mode 100644
index 000000000000..4b8ef36b6912
--- /dev/null
+++ b/drivers/iio/light/ltrf216a.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LTRF216A Ambient Light Sensor
+ *
+ * Copyright (C) 2022 Collabora, Ltd.
+ * Author: Shreeya Patel <shreeya.patel@collabora.com>
+ *
+ * Copyright (C) 2021 Lite-On Technology Corp (Singapore)
+ * Author: Shi Zhigang <Zhigang.Shi@liteon.com>
+ *
+ * IIO driver for LTRF216A (7-bit I2C slave address 0x53).
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+
+#include <asm/unaligned.h>
+
+#define LTRF216A_ALS_RESET_MASK BIT(4)
+#define LTRF216A_ALS_DATA_STATUS BIT(3)
+#define LTRF216A_ALS_ENABLE_MASK BIT(1)
+#define LTRF216A_MAIN_CTRL 0x00
+#define LTRF216A_ALS_MEAS_RES 0x04
+#define LTRF216A_ALS_GAIN 0x05
+#define LTRF216A_PART_ID 0x06
+#define LTRF216A_MAIN_STATUS 0x07
+#define LTRF216A_ALS_CLEAR_DATA_0 0x0a
+#define LTRF216A_ALS_CLEAR_DATA_1 0x0b
+#define LTRF216A_ALS_CLEAR_DATA_2 0x0c
+#define LTRF216A_ALS_DATA_0 0x0d
+#define LTRF216A_ALS_DATA_1 0x0e
+#define LTRF216A_ALS_DATA_2 0x0f
+#define LTRF216A_INT_CFG 0x19
+#define LTRF216A_INT_PST 0x1a
+#define LTRF216A_ALS_THRES_UP_0 0x21
+#define LTRF216A_ALS_THRES_UP_1 0x22
+#define LTRF216A_ALS_THRES_UP_2 0x23
+#define LTRF216A_ALS_THRES_LOW_0 0x24
+#define LTRF216A_ALS_THRES_LOW_1 0x25
+#define LTRF216A_ALS_THRES_LOW_2 0x26
+#define LTRF216A_ALS_READ_DATA_DELAY_US 20000
+
+static const int ltrf216a_int_time_available[][2] = {
+ { 0, 400000 },
+ { 0, 200000 },
+ { 0, 100000 },
+ { 0, 50000 },
+ { 0, 25000 },
+};
+
+static const int ltrf216a_int_time_reg[][2] = {
+ { 400, 0x03 },
+ { 200, 0x13 },
+ { 100, 0x22 },
+ { 50, 0x31 },
+ { 25, 0x40 },
+};
+
+/*
+ * Window Factor is needed when the device is under Window glass
+ * with coated tinted ink. This is to compensate for the light loss
+ * due to the lower transmission rate of the window glass and helps
+ * in calculating lux.
+ */
+#define LTRF216A_WIN_FAC 1
+
+struct ltrf216a_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ u32 int_time;
+ u16 int_time_fac;
+ u8 als_gain_fac;
+ /*
+ * Protects regmap accesses and makes sure integration time
+ * remains constant during the measurement of lux.
+ */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec ltrf216a_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ },
+};
+
+static void ltrf216a_reset(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+
+ /* reset sensor, chip fails to respond to this, so ignore any errors */
+ regmap_write(data->regmap, LTRF216A_MAIN_CTRL, LTRF216A_ALS_RESET_MASK);
+
+ /* reset time */
+ usleep_range(1000, 2000);
+}
+
+static int ltrf216a_enable(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ /* enable sensor */
+ ret = regmap_set_bits(data->regmap,
+ LTRF216A_MAIN_CTRL, LTRF216A_ALS_ENABLE_MASK);
+ if (ret) {
+ dev_err(dev, "failed to enable sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* sleep for one integration cycle after enabling the device */
+ msleep(ltrf216a_int_time_reg[0][0]);
+
+ return 0;
+}
+
+static int ltrf216a_disable(struct iio_dev *indio_dev)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = regmap_write(data->regmap, LTRF216A_MAIN_CTRL, 0);
+ if (ret)
+ dev_err(dev, "failed to disable sensor: %d\n", ret);
+
+ return ret;
+}
+
+static void ltrf216a_cleanup(void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ ltrf216a_disable(indio_dev);
+}
+
+static int ltrf216a_set_int_time(struct ltrf216a_data *data, int itime)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int i;
+ u8 reg_val;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ltrf216a_int_time_available); i++) {
+ if (ltrf216a_int_time_available[i][1] == itime)
+ break;
+ }
+ if (i == ARRAY_SIZE(ltrf216a_int_time_available))
+ return -EINVAL;
+
+ reg_val = ltrf216a_int_time_reg[i][1];
+
+ ret = regmap_write(data->regmap, LTRF216A_ALS_MEAS_RES, reg_val);
+ if (ret) {
+ dev_err(dev, "failed to set integration time: %d\n", ret);
+ return ret;
+ }
+
+ data->int_time_fac = ltrf216a_int_time_reg[i][0];
+ data->int_time = itime;
+
+ return 0;
+}
+
+static int ltrf216a_get_int_time(struct ltrf216a_data *data,
+ int *val, int *val2)
+{
+ *val = 0;
+ *val2 = data->int_time;
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ltrf216a_set_power_state(struct ltrf216a_data *data, bool on)
+{
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+
+ if (on) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume runtime PM: %d\n", ret);
+ return ret;
+ }
+ } else {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static int ltrf216a_read_data(struct ltrf216a_data *data, u8 addr)
+{
+ struct device *dev = &data->client->dev;
+ int ret, val;
+ u8 buf[3];
+
+ ret = regmap_read_poll_timeout(data->regmap, LTRF216A_MAIN_STATUS,
+ val, val & LTRF216A_ALS_DATA_STATUS,
+ LTRF216A_ALS_READ_DATA_DELAY_US,
+ LTRF216A_ALS_READ_DATA_DELAY_US * 50);
+ if (ret) {
+ dev_err(dev, "failed to wait for measurement data: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_read(data->regmap, addr, buf, sizeof(buf));
+ if (ret) {
+ dev_err(dev, "failed to read measurement data: %d\n", ret);
+ return ret;
+ }
+
+ return get_unaligned_le24(&buf[0]);
+}
+
+static int ltrf216a_get_lux(struct ltrf216a_data *data)
+{
+ int ret, greendata;
+ u64 lux, div;
+
+ ret = ltrf216a_set_power_state(data, true);
+ if (ret)
+ return ret;
+
+ greendata = ltrf216a_read_data(data, LTRF216A_ALS_DATA_0);
+ if (greendata < 0)
+ return greendata;
+
+ ltrf216a_set_power_state(data, false);
+
+ lux = greendata * 45 * LTRF216A_WIN_FAC * 100;
+ div = data->als_gain_fac * data->int_time_fac * 100;
+
+ return div_u64(lux, div);
+}
+
+static int ltrf216a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ltrf216a_set_power_state(data, true);
+ if (ret)
+ return ret;
+ mutex_lock(&data->lock);
+ ret = ltrf216a_read_data(data, LTRF216A_ALS_DATA_0);
+ mutex_unlock(&data->lock);
+ ltrf216a_set_power_state(data, false);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&data->lock);
+ ret = ltrf216a_get_lux(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_INT_TIME:
+ mutex_lock(&data->lock);
+ ret = ltrf216a_get_int_time(data, val, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltrf216a_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val != 0)
+ return -EINVAL;
+ mutex_lock(&data->lock);
+ ret = ltrf216a_set_int_time(data, val2);
+ mutex_unlock(&data->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ltrf216a_read_available(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *length = ARRAY_SIZE(ltrf216a_int_time_available) * 2;
+ *vals = (const int *)ltrf216a_int_time_available;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ltrf216a_info = {
+ .read_raw = ltrf216a_read_raw,
+ .write_raw = ltrf216a_write_raw,
+ .read_avail = ltrf216a_read_available,
+};
+
+static bool ltrf216a_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_CTRL:
+ case LTRF216A_ALS_MEAS_RES:
+ case LTRF216A_ALS_GAIN:
+ case LTRF216A_PART_ID:
+ case LTRF216A_MAIN_STATUS:
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ case LTRF216A_ALS_DATA_0:
+ case LTRF216A_ALS_DATA_1:
+ case LTRF216A_ALS_DATA_2:
+ case LTRF216A_INT_CFG:
+ case LTRF216A_INT_PST:
+ case LTRF216A_ALS_THRES_UP_0:
+ case LTRF216A_ALS_THRES_UP_1:
+ case LTRF216A_ALS_THRES_UP_2:
+ case LTRF216A_ALS_THRES_LOW_0:
+ case LTRF216A_ALS_THRES_LOW_1:
+ case LTRF216A_ALS_THRES_LOW_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_CTRL:
+ case LTRF216A_ALS_MEAS_RES:
+ case LTRF216A_ALS_GAIN:
+ case LTRF216A_INT_CFG:
+ case LTRF216A_INT_PST:
+ case LTRF216A_ALS_THRES_UP_0:
+ case LTRF216A_ALS_THRES_UP_1:
+ case LTRF216A_ALS_THRES_UP_2:
+ case LTRF216A_ALS_THRES_LOW_0:
+ case LTRF216A_ALS_THRES_LOW_1:
+ case LTRF216A_ALS_THRES_LOW_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case LTRF216A_MAIN_STATUS:
+ case LTRF216A_ALS_CLEAR_DATA_0:
+ case LTRF216A_ALS_CLEAR_DATA_1:
+ case LTRF216A_ALS_CLEAR_DATA_2:
+ case LTRF216A_ALS_DATA_0:
+ case LTRF216A_ALS_DATA_1:
+ case LTRF216A_ALS_DATA_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ltrf216a_precious_reg(struct device *dev, unsigned int reg)
+{
+ return reg == LTRF216A_MAIN_STATUS;
+}
+
+static const struct regmap_config ltrf216a_regmap_config = {
+ .name = "ltrf216a",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = LTRF216A_ALS_THRES_LOW_2,
+ .readable_reg = ltrf216a_readable_reg,
+ .writeable_reg = ltrf216a_writable_reg,
+ .volatile_reg = ltrf216a_volatile_reg,
+ .precious_reg = ltrf216a_precious_reg,
+ .disable_locking = true,
+};
+
+static int ltrf216a_probe(struct i2c_client *client)
+{
+ struct ltrf216a_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &ltrf216a_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(data->regmap),
+ "regmap initialization failed\n");
+
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ indio_dev->info = &ltrf216a_info;
+ indio_dev->name = "ltrf216a";
+ indio_dev->channels = ltrf216a_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ltrf216a_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ return ret;
+
+ /* reset sensor, chip fails to respond to this, so ignore any errors */
+ ltrf216a_reset(indio_dev);
+
+ ret = regmap_reinit_cache(data->regmap, &ltrf216a_regmap_config);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to reinit regmap cache\n");
+
+ ret = ltrf216a_enable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, ltrf216a_cleanup,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&client->dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ data->int_time = 100000;
+ data->int_time_fac = 100;
+ data->als_gain_fac = 3;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int ltrf216a_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = ltrf216a_disable(indio_dev);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(data->regmap, true);
+
+ return 0;
+}
+
+static int ltrf216a_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ltrf216a_data *data = iio_priv(indio_dev);
+ int ret;
+
+ regcache_cache_only(data->regmap, false);
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ goto cache_only;
+
+ ret = ltrf216a_enable(indio_dev);
+ if (ret)
+ goto cache_only;
+
+ return 0;
+
+cache_only:
+ regcache_cache_only(data->regmap, true);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ltrf216a_pm_ops, ltrf216a_runtime_suspend,
+ ltrf216a_runtime_resume, NULL);
+
+static const struct i2c_device_id ltrf216a_id[] = {
+ { "ltrf216a" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltrf216a_id);
+
+static const struct of_device_id ltrf216a_of_match[] = {
+ { .compatible = "liteon,ltrf216a" },
+ { .compatible = "ltr,ltrf216a" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ltrf216a_of_match);
+
+static struct i2c_driver ltrf216a_driver = {
+ .driver = {
+ .name = "ltrf216a",
+ .pm = pm_ptr(&ltrf216a_pm_ops),
+ .of_match_table = ltrf216a_of_match,
+ },
+ .probe_new = ltrf216a_probe,
+ .id_table = ltrf216a_id,
+};
+module_i2c_driver(ltrf216a_driver);
+
+MODULE_AUTHOR("Shreeya Patel <shreeya.patel@collabora.com>");
+MODULE_AUTHOR("Shi Zhigang <Zhigang.Shi@liteon.com>");
+MODULE_DESCRIPTION("LTRF216A ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index a326d47afc9b..a26d1c3f9543 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -794,7 +794,7 @@ static int opt3001_probe(struct i2c_client *client,
return 0;
}
-static int opt3001_remove(struct i2c_client *client)
+static void opt3001_remove(struct i2c_client *client)
{
struct iio_dev *iio = i2c_get_clientdata(client);
struct opt3001 *opt = iio_priv(iio);
@@ -808,7 +808,7 @@ static int opt3001_remove(struct i2c_client *client)
if (ret < 0) {
dev_err(opt->dev, "failed to read register %02x\n",
OPT3001_CONFIGURATION);
- return 0;
+ return;
}
reg = ret;
@@ -820,8 +820,6 @@ static int opt3001_remove(struct i2c_client *client)
dev_err(opt->dev, "failed to write register %02x\n",
OPT3001_CONFIGURATION);
}
-
- return 0;
}
static const struct i2c_device_id opt3001_id[] = {
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 772874e707ae..3cb2de51f4aa 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -394,7 +394,7 @@ out_err:
return ret;
}
-static int pa12203001_remove(struct i2c_client *client)
+static void pa12203001_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -408,8 +408,6 @@ static int pa12203001_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index dabdd05f0e2c..d1c16dd76058 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -1041,7 +1041,7 @@ err_poweroff:
return ret;
}
-static int rpr0521_remove(struct i2c_client *client)
+static void rpr0521_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -1051,8 +1051,6 @@ static int rpr0521_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
rpr0521_poweroff(iio_priv(indio_dev));
-
- return 0;
}
static int rpr0521_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 3d4cc1180b6a..c737d3e193ae 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -325,7 +325,7 @@ int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap)
}
EXPORT_SYMBOL_NS(st_uvis25_probe, IIO_UVIS25);
-static int __maybe_unused st_uvis25_suspend(struct device *dev)
+static int st_uvis25_suspend(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct st_uvis25_hw *hw = iio_priv(iio_dev);
@@ -334,7 +334,7 @@ static int __maybe_unused st_uvis25_suspend(struct device *dev)
ST_UVIS25_REG_ODR_MASK, 0);
}
-static int __maybe_unused st_uvis25_resume(struct device *dev)
+static int st_uvis25_resume(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct st_uvis25_hw *hw = iio_priv(iio_dev);
@@ -346,10 +346,7 @@ static int __maybe_unused st_uvis25_resume(struct device *dev)
return 0;
}
-const struct dev_pm_ops st_uvis25_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(st_uvis25_suspend, st_uvis25_resume)
-};
-EXPORT_SYMBOL_NS(st_uvis25_pm_ops, IIO_UVIS25);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(st_uvis25_pm_ops, st_uvis25_suspend, st_uvis25_resume, IIO_UVIS25);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
MODULE_DESCRIPTION("STMicroelectronics uvis25 sensor driver");
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index b06d09af28a3..c982b0b255cf 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(i2c, st_uvis25_i2c_id_table);
static struct i2c_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_i2c",
- .pm = &st_uvis25_pm_ops,
+ .pm = pm_sleep_ptr(&st_uvis25_pm_ops),
.of_match_table = st_uvis25_i2c_of_match,
},
.probe = st_uvis25_i2c_probe,
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index 3a4dc6d7180c..86a232320d7d 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -55,7 +55,7 @@ MODULE_DEVICE_TABLE(spi, st_uvis25_spi_id_table);
static struct spi_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_spi",
- .pm = &st_uvis25_pm_ops,
+ .pm = pm_sleep_ptr(&st_uvis25_pm_ops),
.of_match_table = st_uvis25_spi_of_match,
},
.probe = st_uvis25_spi_probe,
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index f7cc7a6c0c8d..7b8e0da6aabc 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -649,14 +649,12 @@ err_standby:
return ret;
}
-static int stk3310_remove(struct i2c_client *client)
+static void stk3310_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
stk3310_set_state(iio_priv(indio_dev), STK3310_STATE_STANDBY);
-
- return 0;
}
static int stk3310_suspend(struct device *dev)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 823435f59bb6..db17fec634be 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -559,7 +559,7 @@ static int tcs3472_powerdown(struct tcs3472_data *data)
return ret;
}
-static int tcs3472_remove(struct i2c_client *client)
+static void tcs3472_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -568,8 +568,6 @@ static int tcs3472_remove(struct i2c_client *client)
free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
tcs3472_powerdown(iio_priv(indio_dev));
-
- return 0;
}
static int tcs3472_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index bbb577459fb9..951f35ef3f41 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -796,7 +796,7 @@ fail:
return err;
}
-static int tsl2563_remove(struct i2c_client *client)
+static void tsl2563_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct tsl2563_chip *chip = iio_priv(indio_dev);
@@ -809,8 +809,6 @@ static int tsl2563_remove(struct i2c_client *client)
i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
chip->intr);
tsl2563_set_power(chip, 0);
-
- return 0;
}
static int tsl2563_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 82662dab87c0..0a2ca1a8146d 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -873,7 +873,7 @@ static int tsl2583_probe(struct i2c_client *clientp,
return 0;
}
-static int tsl2583_remove(struct i2c_client *client)
+static void tsl2583_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -884,8 +884,6 @@ static int tsl2583_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
-
- return 0;
}
static int tsl2583_suspend(struct device *dev)
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index 6ae1b27e50b6..090038fed889 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -207,12 +207,10 @@ static int tsl4531_powerdown(struct i2c_client *client)
TSL4531_MODE_POWERDOWN);
}
-static int tsl4531_remove(struct i2c_client *client)
+static void tsl4531_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
tsl4531_powerdown(client);
-
- return 0;
}
static int tsl4531_suspend(struct device *dev)
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 80d2299da561..3e652d7f3b0e 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -904,7 +904,7 @@ out_err:
}
-static int us5182d_remove(struct i2c_client *client)
+static void us5182d_remove(struct i2c_client *client)
{
struct us5182d_data *data = iio_priv(i2c_get_clientdata(client));
int ret;
@@ -918,8 +918,6 @@ static int us5182d_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to shut down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int us5182d_suspend(struct device *dev)
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 3db4e26731bb..f6c83ecaad8b 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -1111,7 +1111,7 @@ static const struct of_device_id vcnl_4000_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vcnl_4000_of_match);
-static int vcnl4000_remove(struct i2c_client *client)
+static void vcnl4000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct vcnl4000_data *data = iio_priv(indio_dev);
@@ -1126,8 +1126,6 @@ static int vcnl4000_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to power down (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int vcnl4000_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 6a196cf2270b..3ed37f6057fb 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -601,7 +601,7 @@ fail_poweroff:
return ret;
}
-static int vcnl4035_remove(struct i2c_client *client)
+static void vcnl4035_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
int ret;
@@ -616,8 +616,6 @@ static int vcnl4035_remove(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "Failed to put device into standby (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
static int vcnl4035_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index 1e55e09a8d16..cfa4e9e7c803 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -180,15 +180,13 @@ fail:
return ret;
}
-static int veml6070_remove(struct i2c_client *client)
+static void veml6070_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct veml6070_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
i2c_unregister_device(data->client2);
-
- return 0;
}
static const struct i2c_device_id veml6070_id[] = {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 07eb619bcfe8..b91fc5e6a26e 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -216,8 +216,8 @@ config YAMAHA_YAS530
select IIO_TRIGGERED_BUFFER
help
Say Y here to add support for the Yamaha YAS530 series of
- 3-Axis Magnetometers. Right now YAS530, YAS532 and YAS533 are
- fully supported.
+ 3-Axis Magnetometers. YAS530, YAS532, YAS533 and YAS537 are
+ supported.
This driver can also be compiled as a module.
To compile this driver as a module, choose M here: the module
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index c89a91db0690..7ec9ab3beb45 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -969,7 +969,7 @@ disable_pm:
return ret;
}
-static int ak8974_remove(struct i2c_client *i2c)
+static void ak8974_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct ak8974 *ak8974 = iio_priv(indio_dev);
@@ -981,8 +981,6 @@ static int ak8974_remove(struct i2c_client *i2c)
pm_runtime_disable(&i2c->dev);
ak8974_set_power(ak8974, AK8974_PWR_OFF);
regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
-
- return 0;
}
static int ak8974_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 2432e697150c..caf03a2a98a5 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -1018,7 +1018,7 @@ power_off:
return err;
}
-static int ak8975_remove(struct i2c_client *client)
+static void ak8975_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ak8975_data *data = iio_priv(indio_dev);
@@ -1030,8 +1030,6 @@ static int ak8975_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
ak8975_set_mode(data, POWER_DOWN);
ak8975_power_off(data);
-
- return 0;
}
static int ak8975_runtime_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 65c004411d0f..570deaa87836 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -34,11 +34,9 @@ static int bmc150_magn_i2c_probe(struct i2c_client *client,
return bmc150_magn_probe(&client->dev, regmap, client->irq, name);
}
-static int bmc150_magn_i2c_remove(struct i2c_client *client)
+static void bmc150_magn_i2c_remove(struct i2c_client *client)
{
bmc150_magn_remove(&client->dev);
-
- return 0;
}
static const struct acpi_device_id bmc150_magn_acpi_match[] = {
diff --git a/drivers/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 9120c8bbf3dd..60fbb5431c88 100644
--- a/drivers/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -52,16 +52,5 @@ int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
enum hmc5843_ids id, const char *name);
void hmc5843_common_remove(struct device *dev);
-int hmc5843_common_suspend(struct device *dev);
-int hmc5843_common_resume(struct device *dev);
-
-#ifdef CONFIG_PM_SLEEP
-static __maybe_unused SIMPLE_DEV_PM_OPS(hmc5843_pm_ops,
- hmc5843_common_suspend,
- hmc5843_common_resume);
-#define HMC5843_PM_OPS (&hmc5843_pm_ops)
-#else
-#define HMC5843_PM_OPS NULL
-#endif
-
+extern const struct dev_pm_ops hmc5843_pm_ops;
#endif /* HMC5843_CORE_H */
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 4a63b2da9df0..c5521d61da29 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -603,19 +603,19 @@ static const struct iio_info hmc5843_info = {
static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
-int hmc5843_common_suspend(struct device *dev)
+static int hmc5843_common_suspend(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
HMC5843_MODE_SLEEP);
}
-EXPORT_SYMBOL_NS(hmc5843_common_suspend, IIO_HMC5843);
-int hmc5843_common_resume(struct device *dev)
+static int hmc5843_common_resume(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
HMC5843_MODE_CONVERSION_CONTINUOUS);
}
-EXPORT_SYMBOL_NS(hmc5843_common_resume, IIO_HMC5843);
+EXPORT_NS_SIMPLE_DEV_PM_OPS(hmc5843_pm_ops, hmc5843_common_suspend,
+ hmc5843_common_resume, IIO_HMC5843);
int hmc5843_common_probe(struct device *dev, struct regmap *regmap,
enum hmc5843_ids id, const char *name)
diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 8d2ff8fc204d..18a13dd51296 100644
--- a/drivers/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -65,11 +65,9 @@ static int hmc5843_i2c_probe(struct i2c_client *cli,
id->driver_data, id->name);
}
-static int hmc5843_i2c_remove(struct i2c_client *client)
+static void hmc5843_i2c_remove(struct i2c_client *client)
{
hmc5843_common_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id hmc5843_id[] = {
@@ -93,7 +91,7 @@ MODULE_DEVICE_TABLE(of, hmc5843_of_match);
static struct i2c_driver hmc5843_driver = {
.driver = {
.name = "hmc5843",
- .pm = HMC5843_PM_OPS,
+ .pm = pm_sleep_ptr(&hmc5843_pm_ops),
.of_match_table = hmc5843_of_match,
},
.id_table = hmc5843_id,
diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 8403f09aba39..c42d2e2a6a6c 100644
--- a/drivers/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -86,13 +86,13 @@ static const struct spi_device_id hmc5843_id[] = {
MODULE_DEVICE_TABLE(spi, hmc5843_id);
static struct spi_driver hmc5843_driver = {
- .driver = {
- .name = "hmc5843",
- .pm = HMC5843_PM_OPS,
- },
- .id_table = hmc5843_id,
- .probe = hmc5843_spi_probe,
- .remove = hmc5843_spi_remove,
+ .driver = {
+ .name = "hmc5843",
+ .pm = pm_sleep_ptr(&hmc5843_pm_ops),
+ },
+ .id_table = hmc5843_id,
+ .probe = hmc5843_spi_probe,
+ .remove = hmc5843_spi_remove,
};
module_spi_driver(hmc5843_driver);
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 226439d0bfb5..b870ad803862 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -559,7 +559,7 @@ disable_regulator_vdd:
return ret;
}
-static int mag3110_remove(struct i2c_client *client)
+static void mag3110_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mag3110_data *data = iio_priv(indio_dev);
@@ -569,8 +569,6 @@ static int mag3110_remove(struct i2c_client *client)
mag3110_standby(iio_priv(indio_dev));
regulator_disable(data->vddio_reg);
regulator_disable(data->vdd_reg);
-
- return 0;
}
static int mag3110_suspend(struct device *dev)
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index aeaa4da6923b..801c760feb4d 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -10,13 +10,16 @@
* (YAS534 is a magnetic switch, not handled)
* YAS535 MS-6C
* YAS536 MS-3W
- * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Xiaomi)
+ * YAS537 MS-3T (2015 Samsung Galaxy S6, Note 5, Galaxy S7)
* YAS539 MS-3S (2018 Samsung Galaxy A7 SM-A750FN)
*
* Code functions found in the MPU3050 YAS530 and YAS532 drivers
* named "inv_compass" in the Tegra Android kernel tree.
* Copyright (C) 2012 InvenSense Corporation
*
+ * Code functions for YAS537 based on Yamaha Android kernel driver.
+ * Copyright (c) 2014 Yamaha Corporation
+ *
* Author: Linus Walleij <linus.walleij@linaro.org>
*/
#include <linux/bitfield.h>
@@ -29,9 +32,11 @@
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/random.h>
+#include <linux/units.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
@@ -40,20 +45,39 @@
#include <asm/unaligned.h>
-/* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
+/* Commonly used registers */
#define YAS5XX_DEVICE_ID 0x80
-#define YAS5XX_ACTUATE_INIT_COIL 0x81
-#define YAS5XX_MEASURE 0x82
-#define YAS5XX_CONFIG 0x83
-#define YAS5XX_MEASURE_INTERVAL 0x84
-#define YAS5XX_OFFSET_X 0x85 /* [-31 .. 31] */
-#define YAS5XX_OFFSET_Y1 0x86 /* [-31 .. 31] */
-#define YAS5XX_OFFSET_Y2 0x87 /* [-31 .. 31] */
-#define YAS5XX_TEST1 0x88
-#define YAS5XX_TEST2 0x89
-#define YAS5XX_CAL 0x90
#define YAS5XX_MEASURE_DATA 0xB0
+/* These registers are used by YAS530, YAS532 and YAS533 */
+#define YAS530_ACTUATE_INIT_COIL 0x81
+#define YAS530_MEASURE 0x82
+#define YAS530_CONFIG 0x83
+#define YAS530_MEASURE_INTERVAL 0x84
+#define YAS530_OFFSET_X 0x85 /* [-31 .. 31] */
+#define YAS530_OFFSET_Y1 0x86 /* [-31 .. 31] */
+#define YAS530_OFFSET_Y2 0x87 /* [-31 .. 31] */
+#define YAS530_TEST1 0x88
+#define YAS530_TEST2 0x89
+#define YAS530_CAL 0x90
+
+/* Registers used by YAS537 */
+#define YAS537_MEASURE 0x81 /* Originally YAS537_REG_CMDR */
+#define YAS537_CONFIG 0x82 /* Originally YAS537_REG_CONFR */
+#define YAS537_MEASURE_INTERVAL 0x83 /* Originally YAS537_REG_INTRVLR */
+#define YAS537_OFFSET_X 0x84 /* Originally YAS537_REG_OXR */
+#define YAS537_OFFSET_Y1 0x85 /* Originally YAS537_REG_OY1R */
+#define YAS537_OFFSET_Y2 0x86 /* Originally YAS537_REG_OY2R */
+#define YAS537_AVR 0x87
+#define YAS537_HCK 0x88
+#define YAS537_LCK 0x89
+#define YAS537_SRST 0x90
+#define YAS537_ADCCAL 0x91
+#define YAS537_MTC 0x93
+#define YAS537_OC 0x9E
+#define YAS537_TRM 0x9F
+#define YAS537_CAL 0xC0
+
/* Bits in the YAS5xx config register */
#define YAS5XX_CONFIG_INTON BIT(0) /* Interrupt on? */
#define YAS5XX_CONFIG_INTHACT BIT(1) /* Interrupt active high? */
@@ -65,6 +89,7 @@
#define YAS5XX_MEASURE_LDTC BIT(1)
#define YAS5XX_MEASURE_FORS BIT(2)
#define YAS5XX_MEASURE_DLYMES BIT(4)
+#define YAS5XX_MEASURE_CONT BIT(5)
/* Bits in the measure data register */
#define YAS5XX_MEASURE_DATA_BUSY BIT(7)
@@ -88,33 +113,101 @@
#define YAS532_DATA_BITS 13
#define YAS532_DATA_CENTER BIT(YAS532_DATA_BITS - 1)
#define YAS532_DATA_OVERFLOW (BIT(YAS532_DATA_BITS) - 1)
-#define YAS532_20DEGREES 390 /* Looks like Kelvin */
-/* These variant IDs are known from code dumps */
#define YAS537_DEVICE_ID 0x07 /* YAS537 (MS-3T) */
-#define YAS539_DEVICE_ID 0x08 /* YAS539 (MS-3S) */
+#define YAS537_VERSION_0 0 /* Version naming unknown */
+#define YAS537_VERSION_1 1 /* Version naming unknown */
+#define YAS537_MAG_AVERAGE_32_MASK GENMASK(6, 4)
+#define YAS537_MEASURE_TIME_WORST_US 1500
+#define YAS537_DEFAULT_SENSOR_DELAY_MS 50
+#define YAS537_MAG_RCOIL_TIME_US 65
+#define YAS537_MTC3_MASK_PREP GENMASK(7, 0)
+#define YAS537_MTC3_MASK_GET GENMASK(7, 5)
+#define YAS537_MTC3_ADD_BIT BIT(4)
+#define YAS537_HCK_MASK_PREP GENMASK(4, 0)
+#define YAS537_HCK_MASK_GET GENMASK(7, 4)
+#define YAS537_LCK_MASK_PREP GENMASK(4, 0)
+#define YAS537_LCK_MASK_GET GENMASK(3, 0)
+#define YAS537_OC_MASK_GET GENMASK(5, 0)
/* Turn off device regulators etc after 5 seconds of inactivity */
#define YAS5XX_AUTOSUSPEND_DELAY_MS 5000
+enum chip_ids {
+ yas530,
+ yas532,
+ yas533,
+ yas537,
+};
+
+static const int yas530_volatile_reg[] = {
+ YAS530_ACTUATE_INIT_COIL,
+ YAS530_MEASURE,
+};
+
+static const int yas537_volatile_reg[] = {
+ YAS537_MEASURE,
+};
+
struct yas5xx_calibration {
/* Linearization calibration x, y1, y2 */
s32 r[3];
u32 f[3];
/* Temperature compensation calibration */
- s32 Cx, Cy1, Cy2;
+ s16 Cx, Cy1, Cy2;
/* Misc calibration coefficients */
- s32 a2, a3, a4, a5, a6, a7, a8, a9, k;
+ s8 a2, a3, a4, a6, a7, a8;
+ s16 a5, a9;
+ u8 k;
/* clock divider */
u8 dck;
};
+struct yas5xx;
+
+/**
+ * struct yas5xx_chip_info - device-specific data and function pointers
+ * @devid: device ID number
+ * @product_name: product name of the YAS variant
+ * @version_names: version letters or namings
+ * @volatile_reg: device-specific volatile registers
+ * @volatile_reg_qty: quantity of device-specific volatile registers
+ * @scaling_val2: scaling value for IIO_CHAN_INFO_SCALE
+ * @t_ref: number of counts at reference temperature 20 °C
+ * @min_temp_x10: starting point of temperature counting in 1/10:s degrees Celsius
+ * @get_measure: function pointer to get a measurement
+ * @get_calibration_data: function pointer to get calibration data
+ * @dump_calibration: function pointer to dump calibration for debugging
+ * @measure_offsets: function pointer to measure the offsets
+ * @power_on: function pointer to power-on procedure
+ *
+ * The "t_ref" value for YAS532/533 is known from the Android driver.
+ * For YAS530 and YAS537 it was approximately measured.
+ *
+ * The temperatures "min_temp_x10" are derived from the temperature resolutions
+ * given in the data sheets.
+ */
+struct yas5xx_chip_info {
+ unsigned int devid;
+ const char *product_name;
+ const char *version_names[2];
+ const int *volatile_reg;
+ int volatile_reg_qty;
+ u32 scaling_val2;
+ u16 t_ref;
+ s16 min_temp_x10;
+ int (*get_measure)(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo);
+ int (*get_calibration_data)(struct yas5xx *yas5xx);
+ void (*dump_calibration)(struct yas5xx *yas5xx);
+ int (*measure_offsets)(struct yas5xx *yas5xx);
+ int (*power_on)(struct yas5xx *yas5xx);
+};
+
/**
* struct yas5xx - state container for the YAS5xx driver
* @dev: parent device pointer
- * @devid: device ID number
+ * @chip_info: device-specific data and function pointers
* @version: device version
- * @name: device name
* @calibration: calibration settings from the OTP storage
* @hard_offsets: offsets for each axis measured with initcoil actuated
* @orientation: mounting matrix, flipped axis etc
@@ -128,11 +221,10 @@ struct yas5xx_calibration {
*/
struct yas5xx {
struct device *dev;
- unsigned int devid;
+ const struct yas5xx_chip_info *chip_info;
unsigned int version;
- char name[16];
struct yas5xx_calibration calibration;
- u8 hard_offsets[3];
+ s8 hard_offsets[3];
struct iio_mount_matrix orientation;
struct regmap *map;
struct regulator_bulk_data regs[2];
@@ -179,23 +271,26 @@ static u16 yas532_extract_axis(u8 *data)
}
/**
- * yas5xx_measure() - Make a measure from the hardware
+ * yas530_measure() - Make a measure from the hardware
* @yas5xx: The device state
* @t: the raw temperature measurement
* @x: the raw x axis measurement
* @y1: the y1 axis measurement
* @y2: the y2 axis measurement
* @return: 0 on success or error code
+ *
+ * Used by YAS530, YAS532 and YAS533.
*/
-static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
+static int yas530_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
unsigned int busy;
u8 data[8];
int ret;
u16 val;
mutex_lock(&yas5xx->lock);
- ret = regmap_write(yas5xx->map, YAS5XX_MEASURE, YAS5XX_MEASURE_START);
+ ret = regmap_write(yas5xx->map, YAS530_MEASURE, YAS5XX_MEASURE_START);
if (ret < 0)
goto out_unlock;
@@ -219,7 +314,7 @@ static int yas5xx_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
mutex_unlock(&yas5xx->lock);
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
/*
* The t value is 9 bits in big endian format
@@ -261,9 +356,82 @@ out_unlock:
return ret;
}
-static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
+/**
+ * yas537_measure() - Make a measure from the hardware
+ * @yas5xx: The device state
+ * @t: the raw temperature measurement
+ * @x: the raw x axis measurement
+ * @y1: the y1 axis measurement
+ * @y2: the y2 axis measurement
+ * @return: 0 on success or error code
+ */
+static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y2)
{
struct yas5xx_calibration *c = &yas5xx->calibration;
+ unsigned int busy;
+ u8 data[8];
+ u16 xy1y2[3];
+ s32 h[3], s[3];
+ int i, ret;
+
+ mutex_lock(&yas5xx->lock);
+
+ /* Contrary to YAS530/532, also a "cont" bit is set, meaning unknown */
+ ret = regmap_write(yas5xx->map, YAS537_MEASURE, YAS5XX_MEASURE_START |
+ YAS5XX_MEASURE_CONT);
+ if (ret < 0)
+ goto out_unlock;
+
+ /* Use same timeout like YAS530/532 but the bit is in data row 2 */
+ ret = regmap_read_poll_timeout(yas5xx->map, YAS5XX_MEASURE_DATA + 2, busy,
+ !(busy & YAS5XX_MEASURE_DATA_BUSY),
+ 500, 20000);
+ if (ret) {
+ dev_err(yas5xx->dev, "timeout waiting for measurement\n");
+ goto out_unlock;
+ }
+
+ ret = regmap_bulk_read(yas5xx->map, YAS5XX_MEASURE_DATA,
+ data, sizeof(data));
+ if (ret)
+ goto out_unlock;
+
+ mutex_unlock(&yas5xx->lock);
+
+ *t = get_unaligned_be16(&data[0]);
+ xy1y2[0] = FIELD_GET(GENMASK(13, 0), get_unaligned_be16(&data[2]));
+ xy1y2[1] = get_unaligned_be16(&data[4]);
+ xy1y2[2] = get_unaligned_be16(&data[6]);
+
+ /* The second version of YAS537 needs to include calibration coefficients */
+ if (yas5xx->version == YAS537_VERSION_1) {
+ for (i = 0; i < 3; i++)
+ s[i] = xy1y2[i] - BIT(13);
+ h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
+ h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
+ h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
+ for (i = 0; i < 3; i++) {
+ clamp_val(h[i], -BIT(13), BIT(13) - 1);
+ xy1y2[i] = h[i] + BIT(13);
+ }
+ }
+
+ *x = xy1y2[0];
+ *y1 = xy1y2[1];
+ *y2 = xy1y2[2];
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&yas5xx->lock);
+ return ret;
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static s32 yas530_linearize(struct yas5xx *yas5xx, u16 val, int axis)
+{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
+ struct yas5xx_calibration *c = &yas5xx->calibration;
static const s32 yas532ac_coef[] = {
YAS532_VERSION_AC_COEF_X,
YAS532_VERSION_AC_COEF_Y1,
@@ -272,7 +440,7 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
s32 coef;
/* Select coefficients */
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
if (yas5xx->version == YAS530_VERSION_A)
coef = YAS530_VERSION_A_COEF;
@@ -302,8 +470,24 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
(yas5xx->hard_offsets[axis] - c->r[axis]) * coef;
}
+static s32 yas5xx_calc_temperature(struct yas5xx *yas5xx, u16 t)
+{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
+ s32 to;
+ u16 t_ref;
+ s16 min_temp_x10;
+ int ref_temp_x10;
+
+ t_ref = ci->t_ref;
+ min_temp_x10 = ci->min_temp_x10;
+ ref_temp_x10 = 200;
+
+ to = (min_temp_x10 + ((ref_temp_x10 - min_temp_x10) * t / t_ref)) * 100;
+ return to;
+}
+
/**
- * yas5xx_get_measure() - Measure a sample of all axis and process
+ * yas530_get_measure() - Measure a sample of all axis and process
* @yas5xx: The device state
* @to: Temperature out
* @xo: X axis out
@@ -311,36 +495,50 @@ static s32 yas5xx_linearize(struct yas5xx *yas5xx, u16 val, int axis)
* @zo: Z axis out
* @return: 0 on success or error code
*
- * Returned values are in nanotesla according to some code.
+ * Used by YAS530, YAS532 and YAS533.
*/
-static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+static int yas530_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
struct yas5xx_calibration *c = &yas5xx->calibration;
- u16 t, x, y1, y2;
- /* These are "signed x, signed y1 etc */
+ u16 t_ref, t_comp, t, x, y1, y2;
+ /* These are signed x, signed y1 etc */
s32 sx, sy1, sy2, sy, sz;
int ret;
/* We first get raw data that needs to be translated to [x,y,z] */
- ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ ret = yas530_measure(yas5xx, &t, &x, &y1, &y2);
if (ret)
return ret;
/* Do some linearization if available */
- sx = yas5xx_linearize(yas5xx, x, 0);
- sy1 = yas5xx_linearize(yas5xx, y1, 1);
- sy2 = yas5xx_linearize(yas5xx, y2, 2);
+ sx = yas530_linearize(yas5xx, x, 0);
+ sy1 = yas530_linearize(yas5xx, y1, 1);
+ sy2 = yas530_linearize(yas5xx, y2, 2);
+
+ /*
+ * Set the temperature for compensation (unit: counts):
+ * YAS532/YAS533 version AC uses the temperature deviation as a
+ * multiplier. YAS530 and YAS532 version AB use solely the t value.
+ */
+ t_ref = ci->t_ref;
+ if (ci->devid == YAS532_DEVICE_ID &&
+ yas5xx->version == YAS532_VERSION_AC) {
+ t_comp = t - t_ref;
+ } else {
+ t_comp = t;
+ }
/*
* Temperature compensation for x, y1, y2 respectively:
*
- * Cx * t
- * x' = x - ------
- * 100
+ * Cx * t_comp
+ * x' = x - -----------
+ * 100
*/
- sx = sx - (c->Cx * t) / 100;
- sy1 = sy1 - (c->Cy1 * t) / 100;
- sy2 = sy2 - (c->Cy2 * t) / 100;
+ sx = sx - (c->Cx * t_comp) / 100;
+ sy1 = sy1 - (c->Cy1 * t_comp) / 100;
+ sy2 = sy2 - (c->Cy2 * t_comp) / 100;
/*
* Break y1 and y2 into y and z, y1 and y2 are apparently encoding
@@ -349,11 +547,9 @@ static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo,
sy = sy1 - sy2;
sz = -sy1 - sy2;
- /*
- * FIXME: convert to Celsius? Just guessing this is given
- * as 1/10:s of degrees so multiply by 100 to get millicentigrades.
- */
- *to = t * 100;
+ /* Calculate temperature readout */
+ *to = yas5xx_calc_temperature(yas5xx, t);
+
/*
* Calibrate [x,y,z] with some formulas like this:
*
@@ -376,19 +572,56 @@ static int yas5xx_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo,
return 0;
}
+/**
+ * yas537_get_measure() - Measure a sample of all axis and process
+ * @yas5xx: The device state
+ * @to: Temperature out
+ * @xo: X axis out
+ * @yo: Y axis out
+ * @zo: Z axis out
+ * @return: 0 on success or error code
+ */
+static int yas537_get_measure(struct yas5xx *yas5xx, s32 *to, s32 *xo, s32 *yo, s32 *zo)
+{
+ u16 t, x, y1, y2;
+ int ret;
+
+ /* We first get raw data that needs to be translated to [x,y,z] */
+ ret = yas537_measure(yas5xx, &t, &x, &y1, &y2);
+ if (ret)
+ return ret;
+
+ /* Calculate temperature readout */
+ *to = yas5xx_calc_temperature(yas5xx, t);
+
+ /*
+ * Unfortunately, no linearization or temperature compensation formulas
+ * are known for YAS537.
+ */
+
+ /* Calculate x, y, z from x, y1, y2 */
+ *xo = (x - BIT(13)) * 300;
+ *yo = (y1 - y2) * 1732 / 10;
+ *zo = (-y1 - y2 + BIT(14)) * 300;
+
+ return 0;
+}
+
static int yas5xx_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
s32 t, x, y, z;
int ret;
switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
pm_runtime_get_sync(yas5xx->dev);
- ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret)
@@ -412,19 +645,8 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (chan->address == 0) {
- /* Temperature is unscaled */
- *val = 1;
- return IIO_VAL_INT;
- }
- /*
- * The axis values are in nanotesla according to the vendor
- * drivers, but is clearly in microtesla according to
- * experiments. Since 1 uT = 0.01 Gauss, we need to divide
- * by 100000000 (10^8) to get to Gauss from the raw value.
- */
*val = 1;
- *val2 = 100000000;
+ *val2 = ci->scaling_val2;
return IIO_VAL_FRACTIONAL;
default:
/* Unknown request */
@@ -435,11 +657,12 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
{
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
s32 t, x, y, z;
int ret;
pm_runtime_get_sync(yas5xx->dev);
- ret = yas5xx_get_measure(yas5xx, &t, &x, &y, &z);
+ ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret) {
@@ -505,7 +728,7 @@ static const struct iio_chan_spec yas5xx_channels[] = {
.address = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 32,
.storagebits = 32,
.endianness = IIO_CPU,
@@ -525,9 +748,26 @@ static const struct iio_info yas5xx_info = {
static bool yas5xx_volatile_reg(struct device *dev, unsigned int reg)
{
- return reg == YAS5XX_ACTUATE_INIT_COIL ||
- reg == YAS5XX_MEASURE ||
- (reg >= YAS5XX_MEASURE_DATA && reg <= YAS5XX_MEASURE_DATA + 8);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
+ int reg_qty;
+ int i;
+
+ if (reg >= YAS5XX_MEASURE_DATA && reg < YAS5XX_MEASURE_DATA + 8)
+ return true;
+
+ /*
+ * YAS versions share different registers on the same address,
+ * need to differentiate.
+ */
+ reg_qty = ci->volatile_reg_qty;
+ for (i = 0; i < reg_qty; i++) {
+ if (reg == ci->volatile_reg[i])
+ return true;
+ }
+
+ return false;
}
/* TODO: enable regmap cache, using mark dirty and sync at runtime resume */
@@ -539,11 +779,13 @@ static const struct regmap_config yas5xx_regmap_config = {
};
/**
- * yas53x_extract_calibration() - extracts the a2-a9 and k calibration
+ * yas530_extract_calibration() - extracts the a2-a9 and k calibration
* @data: the bitfield to use
* @c: the calibration to populate
+ *
+ * Used by YAS530, YAS532 and YAS533.
*/
-static void yas53x_extract_calibration(u8 *data, struct yas5xx_calibration *c)
+static void yas530_extract_calibration(u8 *data, struct yas5xx_calibration *c)
{
u64 val = get_unaligned_be64(data);
@@ -581,24 +823,27 @@ static int yas530_get_calibration_data(struct yas5xx *yas5xx)
int ret;
/* Dummy read, first read is ALWAYS wrong */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
/* Actual calibration readout */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
- dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+ dev_dbg(yas5xx->dev, "calibration data: %16ph\n", data);
+ /* Contribute calibration data to the input pool for kernel entropy */
add_device_randomness(data, sizeof(data));
+
+ /* Extract version */
yas5xx->version = data[15] & GENMASK(1, 0);
/* Extract the calibration from the bitfield */
c->Cx = data[0] * 6 - 768;
c->Cy1 = data[1] * 6 - 768;
c->Cy2 = data[2] * 6 - 768;
- yas53x_extract_calibration(&data[3], c);
+ yas530_extract_calibration(&data[3], c);
/*
* Extract linearization:
@@ -618,6 +863,7 @@ static int yas530_get_calibration_data(struct yas5xx *yas5xx)
c->r[0] = sign_extend32(FIELD_GET(GENMASK(28, 23), val), 5);
c->r[1] = sign_extend32(FIELD_GET(GENMASK(20, 15), val), 5);
c->r[2] = sign_extend32(FIELD_GET(GENMASK(12, 7), val), 5);
+
return 0;
}
@@ -629,22 +875,22 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
int ret;
/* Dummy read, first read is ALWAYS wrong */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
/* Actual calibration readout */
- ret = regmap_bulk_read(yas5xx->map, YAS5XX_CAL, data, sizeof(data));
+ ret = regmap_bulk_read(yas5xx->map, YAS530_CAL, data, sizeof(data));
if (ret)
return ret;
- dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data);
+ dev_dbg(yas5xx->dev, "calibration data: %14ph\n", data);
/* Sanity check, is this all zeroes? */
- if (memchr_inv(data, 0x00, 13) == NULL) {
- if (!(data[13] & BIT(7)))
- dev_warn(yas5xx->dev, "calibration is blank!\n");
- }
+ if (!memchr_inv(data, 0x00, 13) && !(data[13] & BIT(7)))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+ /* Contribute calibration data to the input pool for kernel entropy */
add_device_randomness(data, sizeof(data));
+
/* Only one bit of version info reserved here as far as we know */
yas5xx->version = data[13] & BIT(0);
@@ -652,7 +898,8 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
c->Cx = data[0] * 10 - 1280;
c->Cy1 = data[1] * 10 - 1280;
c->Cy2 = data[2] * 10 - 1280;
- yas53x_extract_calibration(&data[3], c);
+ yas530_extract_calibration(&data[3], c);
+
/*
* Extract linearization:
* Linearization layout in the 32 bits at byte 10:
@@ -675,7 +922,204 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx)
return 0;
}
-static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
+static int yas537_get_calibration_data(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+ u8 data[17];
+ u32 val1, val2, val3, val4;
+ int i, ret;
+
+ /* Writing SRST register */
+ ret = regmap_write(yas5xx->map, YAS537_SRST, BIT(1));
+ if (ret)
+ return ret;
+
+ /* Calibration readout, YAS537 needs one readout only */
+ ret = regmap_bulk_read(yas5xx->map, YAS537_CAL, data, sizeof(data));
+ if (ret)
+ return ret;
+ dev_dbg(yas5xx->dev, "calibration data: %17ph\n", data);
+
+ /* Sanity check, is this all zeroes? */
+ if (!memchr_inv(data, 0x00, 16) && !FIELD_GET(GENMASK(5, 0), data[16]))
+ dev_warn(yas5xx->dev, "calibration is blank!\n");
+
+ /* Contribute calibration data to the input pool for kernel entropy */
+ add_device_randomness(data, sizeof(data));
+
+ /* Extract version information */
+ yas5xx->version = FIELD_GET(GENMASK(7, 6), data[16]);
+
+ /* There are two versions of YAS537 behaving differently */
+ switch (yas5xx->version) {
+ case YAS537_VERSION_0:
+ /*
+ * The first version simply writes data back into registers:
+ *
+ * data[0] YAS537_MTC 0x93
+ * data[1] 0x94
+ * data[2] 0x95
+ * data[3] 0x96
+ * data[4] 0x97
+ * data[5] 0x98
+ * data[6] 0x99
+ * data[7] 0x9a
+ * data[8] 0x9b
+ * data[9] 0x9c
+ * data[10] 0x9d
+ * data[11] YAS537_OC 0x9e
+ *
+ * data[12] YAS537_OFFSET_X 0x84
+ * data[13] YAS537_OFFSET_Y1 0x85
+ * data[14] YAS537_OFFSET_Y2 0x86
+ *
+ * data[15] YAS537_HCK 0x88
+ * data[16] YAS537_LCK 0x89
+ */
+ for (i = 0; i < 12; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_MTC + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
+ data[i + 12]);
+ if (ret)
+ return ret;
+ yas5xx->hard_offsets[i] = data[i + 12];
+ }
+ for (i = 0; i < 2; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_HCK + i,
+ data[i + 15]);
+ if (ret)
+ return ret;
+ }
+ break;
+ case YAS537_VERSION_1:
+ /*
+ * The second version writes some data into registers but also
+ * extracts calibration coefficients.
+ *
+ * Registers being written:
+ *
+ * data[0] YAS537_MTC 0x93
+ * data[1] YAS537_MTC+1 0x94
+ * data[2] YAS537_MTC+2 0x95
+ * data[3] YAS537_MTC+3 (partially) 0x96
+ *
+ * data[12] YAS537_OFFSET_X 0x84
+ * data[13] YAS537_OFFSET_Y1 0x85
+ * data[14] YAS537_OFFSET_Y2 0x86
+ *
+ * data[15] YAS537_HCK (partially) 0x88
+ * YAS537_LCK (partially) 0x89
+ * data[16] YAS537_OC (partially) 0x9e
+ */
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_MTC + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = regmap_write(yas5xx->map, YAS537_OFFSET_X + i,
+ data[i + 12]);
+ if (ret)
+ return ret;
+ yas5xx->hard_offsets[i] = data[i + 12];
+ }
+ /*
+ * Visualization of partially taken data:
+ *
+ * data[3] n 7 6 5 4 3 2 1 0
+ * YAS537_MTC+3 x x x 1 0 0 0 0
+ *
+ * data[15] n 7 6 5 4 3 2 1 0
+ * YAS537_HCK x x x x 0
+ *
+ * data[15] n 7 6 5 4 3 2 1 0
+ * YAS537_LCK x x x x 0
+ *
+ * data[16] n 7 6 5 4 3 2 1 0
+ * YAS537_OC x x x x x x
+ */
+ ret = regmap_write(yas5xx->map, YAS537_MTC + 3,
+ FIELD_PREP(YAS537_MTC3_MASK_PREP,
+ FIELD_GET(YAS537_MTC3_MASK_GET, data[3])) |
+ YAS537_MTC3_ADD_BIT);
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_HCK,
+ FIELD_PREP(YAS537_HCK_MASK_PREP,
+ FIELD_GET(YAS537_HCK_MASK_GET, data[15])));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_LCK,
+ FIELD_PREP(YAS537_LCK_MASK_PREP,
+ FIELD_GET(YAS537_LCK_MASK_GET, data[15])));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_OC,
+ FIELD_GET(YAS537_OC_MASK_GET, data[16]));
+ if (ret)
+ return ret;
+ /*
+ * For data extraction, build some blocks. Four 32-bit blocks
+ * look appropriate.
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[0] 0 [ Cx Cx Cx Cx Cx Cx Cx Cx ] bits 31 .. 24
+ * data[1] 1 [ Cx C1 C1 C1 C1 C1 C1 C1 ] bits 23 .. 16
+ * data[2] 2 [ C1 C1 C2 C2 C2 C2 C2 C2 ] bits 15 .. 8
+ * data[3] 3 [ C2 C2 C2 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[3] 0 [ a2 a2 a2 a2 a2 ] bits 31 .. 24
+ * data[4] 1 [ a2 a2 a3 a3 a3 a3 a3 a3 ] bits 23 .. 16
+ * data[5] 2 [ a3 a4 a4 a4 a4 a4 a4 a4 ] bits 15 .. 8
+ * data[6] 3 [ a4 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[6] 0 [ a5 a5 a5 a5 a5 a5 a5 ] bits 31 .. 24
+ * data[7] 1 [ a5 a5 a6 a6 a6 a6 a6 a6 ] bits 23 .. 16
+ * data[8] 2 [ a6 a7 a7 a7 a7 a7 a7 a7 ] bits 15 .. 8
+ * data[9] 3 [ a7 ] bits 7 .. 0
+ *
+ * n 7 6 5 4 3 2 1 0
+ * data[9] 0 [ a8 a8 a8 a8 a8 a8 a8 ] bits 31 .. 24
+ * data[10] 1 [ a9 a9 a9 a9 a9 a9 a9 a9 ] bits 23 .. 16
+ * data[11] 2 [ a9 k k k k k k k ] bits 15 .. 8
+ * data[12] 3 [ ] bits 7 .. 0
+ */
+ val1 = get_unaligned_be32(&data[0]);
+ val2 = get_unaligned_be32(&data[3]);
+ val3 = get_unaligned_be32(&data[6]);
+ val4 = get_unaligned_be32(&data[9]);
+ /* Extract calibration coefficients and modify */
+ c->Cx = FIELD_GET(GENMASK(31, 23), val1) - 256;
+ c->Cy1 = FIELD_GET(GENMASK(22, 14), val1) - 256;
+ c->Cy2 = FIELD_GET(GENMASK(13, 5), val1) - 256;
+ c->a2 = FIELD_GET(GENMASK(28, 22), val2) - 64;
+ c->a3 = FIELD_GET(GENMASK(21, 15), val2) - 64;
+ c->a4 = FIELD_GET(GENMASK(14, 7), val2) - 128;
+ c->a5 = FIELD_GET(GENMASK(30, 22), val3) - 112;
+ c->a6 = FIELD_GET(GENMASK(21, 15), val3) - 64;
+ c->a7 = FIELD_GET(GENMASK(14, 7), val3) - 128;
+ c->a8 = FIELD_GET(GENMASK(30, 24), val4) - 64;
+ c->a9 = FIELD_GET(GENMASK(23, 15), val4) - 112;
+ c->k = FIELD_GET(GENMASK(14, 8), val4);
+ break;
+ default:
+ dev_err(yas5xx->dev, "unknown version of YAS537\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static void yas530_dump_calibration(struct yas5xx *yas5xx)
{
struct yas5xx_calibration *c = &yas5xx->calibration;
@@ -698,20 +1142,42 @@ static void yas5xx_dump_calibration(struct yas5xx *yas5xx)
dev_dbg(yas5xx->dev, "dck = %d\n", c->dck);
}
-static int yas5xx_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
+static void yas537_dump_calibration(struct yas5xx *yas5xx)
+{
+ struct yas5xx_calibration *c = &yas5xx->calibration;
+
+ if (yas5xx->version == YAS537_VERSION_1) {
+ dev_dbg(yas5xx->dev, "Cx = %d\n", c->Cx);
+ dev_dbg(yas5xx->dev, "Cy1 = %d\n", c->Cy1);
+ dev_dbg(yas5xx->dev, "Cy2 = %d\n", c->Cy2);
+ dev_dbg(yas5xx->dev, "a2 = %d\n", c->a2);
+ dev_dbg(yas5xx->dev, "a3 = %d\n", c->a3);
+ dev_dbg(yas5xx->dev, "a4 = %d\n", c->a4);
+ dev_dbg(yas5xx->dev, "a5 = %d\n", c->a5);
+ dev_dbg(yas5xx->dev, "a6 = %d\n", c->a6);
+ dev_dbg(yas5xx->dev, "a7 = %d\n", c->a7);
+ dev_dbg(yas5xx->dev, "a8 = %d\n", c->a8);
+ dev_dbg(yas5xx->dev, "a9 = %d\n", c->a9);
+ dev_dbg(yas5xx->dev, "k = %d\n", c->k);
+ }
+}
+
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_set_offsets(struct yas5xx *yas5xx, s8 ox, s8 oy1, s8 oy2)
{
int ret;
- ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_X, ox);
+ ret = regmap_write(yas5xx->map, YAS530_OFFSET_X, ox);
if (ret)
return ret;
- ret = regmap_write(yas5xx->map, YAS5XX_OFFSET_Y1, oy1);
+ ret = regmap_write(yas5xx->map, YAS530_OFFSET_Y1, oy1);
if (ret)
return ret;
- return regmap_write(yas5xx->map, YAS5XX_OFFSET_Y2, oy2);
+ return regmap_write(yas5xx->map, YAS530_OFFSET_Y2, oy2);
}
-static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
+/* Used by YAS530, YAS532 and YAS533 */
+static s8 yas530_adjust_offset(s8 old, int bit, u16 center, u16 measure)
{
if (measure > center)
return old + BIT(bit);
@@ -720,8 +1186,10 @@ static s8 yas5xx_adjust_offset(s8 old, int bit, u16 center, u16 measure)
return old;
}
-static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_measure_offsets(struct yas5xx *yas5xx)
{
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
int ret;
u16 center;
u16 t, x, y1, y2;
@@ -729,12 +1197,12 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
int i;
/* Actuate the init coil and measure offsets */
- ret = regmap_write(yas5xx->map, YAS5XX_ACTUATE_INIT_COIL, 0);
+ ret = regmap_write(yas5xx->map, YAS530_ACTUATE_INIT_COIL, 0);
if (ret)
return ret;
/* When the initcoil is active this should be around the center */
- switch (yas5xx->devid) {
+ switch (ci->devid) {
case YAS530_DEVICE_ID:
center = YAS530_DATA_CENTER;
break;
@@ -763,26 +1231,26 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
oy2 = 0;
for (i = 4; i >= 0; i--) {
- ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ ret = yas530_set_offsets(yas5xx, ox, oy1, oy2);
if (ret)
return ret;
- ret = yas5xx_measure(yas5xx, &t, &x, &y1, &y2);
+ ret = yas530_measure(yas5xx, &t, &x, &y1, &y2);
if (ret)
return ret;
dev_dbg(yas5xx->dev, "measurement %d: x=%d, y1=%d, y2=%d\n",
5-i, x, y1, y2);
- ox = yas5xx_adjust_offset(ox, i, center, x);
- oy1 = yas5xx_adjust_offset(oy1, i, center, y1);
- oy2 = yas5xx_adjust_offset(oy2, i, center, y2);
+ ox = yas530_adjust_offset(ox, i, center, x);
+ oy1 = yas530_adjust_offset(oy1, i, center, y1);
+ oy2 = yas530_adjust_offset(oy2, i, center, y2);
}
/* Needed for calibration algorithm */
yas5xx->hard_offsets[0] = ox;
yas5xx->hard_offsets[1] = oy1;
yas5xx->hard_offsets[2] = oy2;
- ret = yas5xx_set_offsets(yas5xx, ox, oy1, oy2);
+ ret = yas530_set_offsets(yas5xx, ox, oy1, oy2);
if (ret)
return ret;
@@ -791,35 +1259,139 @@ static int yas5xx_meaure_offsets(struct yas5xx *yas5xx)
return 0;
}
-static int yas5xx_power_on(struct yas5xx *yas5xx)
+/* Used by YAS530, YAS532 and YAS533 */
+static int yas530_power_on(struct yas5xx *yas5xx)
{
unsigned int val;
int ret;
/* Zero the test registers */
- ret = regmap_write(yas5xx->map, YAS5XX_TEST1, 0);
+ ret = regmap_write(yas5xx->map, YAS530_TEST1, 0);
if (ret)
return ret;
- ret = regmap_write(yas5xx->map, YAS5XX_TEST2, 0);
+ ret = regmap_write(yas5xx->map, YAS530_TEST2, 0);
if (ret)
return ret;
/* Set up for no interrupts, calibrated clock divider */
val = FIELD_PREP(YAS5XX_CONFIG_CCK_MASK, yas5xx->calibration.dck);
- ret = regmap_write(yas5xx->map, YAS5XX_CONFIG, val);
+ ret = regmap_write(yas5xx->map, YAS530_CONFIG, val);
if (ret)
return ret;
/* Measure interval 0 (back-to-back?) */
- return regmap_write(yas5xx->map, YAS5XX_MEASURE_INTERVAL, 0);
+ return regmap_write(yas5xx->map, YAS530_MEASURE_INTERVAL, 0);
}
+static int yas537_power_on(struct yas5xx *yas5xx)
+{
+ __be16 buf;
+ int ret;
+ u8 intrvl;
+
+ /* Writing ADCCAL and TRM registers */
+ buf = cpu_to_be16(GENMASK(9, 3));
+ ret = regmap_bulk_write(yas5xx->map, YAS537_ADCCAL, &buf, sizeof(buf));
+ if (ret)
+ return ret;
+ ret = regmap_write(yas5xx->map, YAS537_TRM, GENMASK(7, 0));
+ if (ret)
+ return ret;
+
+ /* The interval value is static in regular operation */
+ intrvl = (YAS537_DEFAULT_SENSOR_DELAY_MS * MILLI
+ - YAS537_MEASURE_TIME_WORST_US) / 4100;
+ ret = regmap_write(yas5xx->map, YAS537_MEASURE_INTERVAL, intrvl);
+ if (ret)
+ return ret;
+
+ /* The average value is also static in regular operation */
+ ret = regmap_write(yas5xx->map, YAS537_AVR, YAS537_MAG_AVERAGE_32_MASK);
+ if (ret)
+ return ret;
+
+ /* Perform the "rcoil" part but skip the "last_after_rcoil" read */
+ ret = regmap_write(yas5xx->map, YAS537_CONFIG, BIT(3));
+ if (ret)
+ return ret;
+
+ /* Wait until the coil has ramped up */
+ usleep_range(YAS537_MAG_RCOIL_TIME_US, YAS537_MAG_RCOIL_TIME_US + 100);
+
+ return 0;
+}
+
+static const struct yas5xx_chip_info yas5xx_chip_info_tbl[] = {
+ [yas530] = {
+ .devid = YAS530_DEVICE_ID,
+ .product_name = "YAS530 MS-3E",
+ .version_names = { "A", "B" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000000, /* picotesla to Gauss */
+ .t_ref = 182, /* counts */
+ .min_temp_x10 = -620, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas530_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas532] = {
+ .devid = YAS532_DEVICE_ID,
+ .product_name = "YAS532 MS-3R",
+ .version_names = { "AB", "AC" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 390, /* counts */
+ .min_temp_x10 = -500, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas532_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas533] = {
+ .devid = YAS532_DEVICE_ID,
+ .product_name = "YAS533 MS-3F",
+ .version_names = { "AB", "AC" },
+ .volatile_reg = yas530_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas530_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 390, /* counts */
+ .min_temp_x10 = -500, /* 1/10:s degrees Celsius */
+ .get_measure = yas530_get_measure,
+ .get_calibration_data = yas532_get_calibration_data,
+ .dump_calibration = yas530_dump_calibration,
+ .measure_offsets = yas530_measure_offsets,
+ .power_on = yas530_power_on,
+ },
+ [yas537] = {
+ .devid = YAS537_DEVICE_ID,
+ .product_name = "YAS537 MS-3T",
+ .version_names = { "v0", "v1" }, /* version naming unknown */
+ .volatile_reg = yas537_volatile_reg,
+ .volatile_reg_qty = ARRAY_SIZE(yas537_volatile_reg),
+ .scaling_val2 = 100000, /* nanotesla to Gauss */
+ .t_ref = 8120, /* counts */
+ .min_temp_x10 = -3860, /* 1/10:s degrees Celsius */
+ .get_measure = yas537_get_measure,
+ .get_calibration_data = yas537_get_calibration_data,
+ .dump_calibration = yas537_dump_calibration,
+ /* .measure_offets is not needed for yas537 */
+ .power_on = yas537_power_on,
+ },
+};
+
static int yas5xx_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
struct device *dev = &i2c->dev;
struct yas5xx *yas5xx;
+ const struct yas5xx_chip_info *ci;
+ int id_check;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*yas5xx));
@@ -843,10 +1415,8 @@ static int yas5xx_probe(struct i2c_client *i2c,
return dev_err_probe(dev, ret, "cannot get regulators\n");
ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
- if (ret) {
- dev_err(dev, "cannot enable regulators\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot enable regulators\n");
/* See comment in runtime resume callback */
usleep_range(31000, 40000);
@@ -854,57 +1424,55 @@ static int yas5xx_probe(struct i2c_client *i2c,
/* This will take the device out of reset if need be */
yas5xx->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(yas5xx->reset)) {
- ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset),
- "failed to get reset line\n");
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->reset), "failed to get reset line\n");
goto reg_off;
}
yas5xx->map = devm_regmap_init_i2c(i2c, &yas5xx_regmap_config);
if (IS_ERR(yas5xx->map)) {
- dev_err(dev, "failed to allocate register map\n");
- ret = PTR_ERR(yas5xx->map);
+ ret = dev_err_probe(dev, PTR_ERR(yas5xx->map), "failed to allocate register map\n");
goto assert_reset;
}
- ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &yas5xx->devid);
+ ci = device_get_match_data(dev);
+ if (!ci)
+ ci = (const struct yas5xx_chip_info *)id->driver_data;
+ yas5xx->chip_info = ci;
+
+ ret = regmap_read(yas5xx->map, YAS5XX_DEVICE_ID, &id_check);
if (ret)
goto assert_reset;
- switch (yas5xx->devid) {
- case YAS530_DEVICE_ID:
- ret = yas530_get_calibration_data(yas5xx);
- if (ret)
- goto assert_reset;
- dev_info(dev, "detected YAS530 MS-3E %s",
- yas5xx->version ? "B" : "A");
- strncpy(yas5xx->name, "yas530", sizeof(yas5xx->name));
- break;
- case YAS532_DEVICE_ID:
- ret = yas532_get_calibration_data(yas5xx);
- if (ret)
- goto assert_reset;
- dev_info(dev, "detected YAS532/YAS533 MS-3R/F %s",
- yas5xx->version ? "AC" : "AB");
- strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
- break;
- default:
- ret = -ENODEV;
- dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
+ if (id_check != ci->devid) {
+ ret = dev_err_probe(dev, -ENODEV,
+ "device ID %02x doesn't match %s\n",
+ id_check, id->name);
goto assert_reset;
}
- yas5xx_dump_calibration(yas5xx);
- ret = yas5xx_power_on(yas5xx);
+ ret = ci->get_calibration_data(yas5xx);
if (ret)
goto assert_reset;
- ret = yas5xx_meaure_offsets(yas5xx);
+
+ dev_info(dev, "detected %s %s\n", ci->product_name,
+ ci->version_names[yas5xx->version]);
+
+ ci->dump_calibration(yas5xx);
+
+ ret = ci->power_on(yas5xx);
if (ret)
goto assert_reset;
+ if (ci->measure_offsets) {
+ ret = ci->measure_offsets(yas5xx);
+ if (ret)
+ goto assert_reset;
+ }
+
indio_dev->info = &yas5xx_info;
indio_dev->available_scan_masks = yas5xx_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->name = yas5xx->name;
+ indio_dev->name = id->name;
indio_dev->channels = yas5xx_channels;
indio_dev->num_channels = ARRAY_SIZE(yas5xx_channels);
@@ -912,13 +1480,13 @@ static int yas5xx_probe(struct i2c_client *i2c,
yas5xx_handle_trigger,
NULL);
if (ret) {
- dev_err(dev, "triggered buffer setup failed\n");
+ dev_err_probe(dev, ret, "triggered buffer setup failed\n");
goto assert_reset;
}
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(dev, "device register failed\n");
+ dev_err_probe(dev, ret, "device register failed\n");
goto cleanup_buffer;
}
@@ -943,7 +1511,7 @@ reg_off:
return ret;
}
-static int yas5xx_remove(struct i2c_client *i2c)
+static void yas5xx_remove(struct i2c_client *i2c)
{
struct iio_dev *indio_dev = i2c_get_clientdata(i2c);
struct yas5xx *yas5xx = iio_priv(indio_dev);
@@ -961,8 +1529,6 @@ static int yas5xx_remove(struct i2c_client *i2c)
pm_runtime_disable(dev);
gpiod_set_value_cansleep(yas5xx->reset, 1);
regulator_bulk_disable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
-
- return 0;
}
static int yas5xx_runtime_suspend(struct device *dev)
@@ -980,6 +1546,7 @@ static int yas5xx_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct yas5xx *yas5xx = iio_priv(indio_dev);
+ const struct yas5xx_chip_info *ci = yas5xx->chip_info;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(yas5xx->regs), yas5xx->regs);
@@ -996,7 +1563,7 @@ static int yas5xx_runtime_resume(struct device *dev)
usleep_range(31000, 40000);
gpiod_set_value_cansleep(yas5xx->reset, 0);
- ret = yas5xx_power_on(yas5xx);
+ ret = ci->power_on(yas5xx);
if (ret) {
dev_err(dev, "cannot power on\n");
goto out_reset;
@@ -1015,17 +1582,19 @@ static DEFINE_RUNTIME_DEV_PM_OPS(yas5xx_dev_pm_ops, yas5xx_runtime_suspend,
yas5xx_runtime_resume, NULL);
static const struct i2c_device_id yas5xx_id[] = {
- {"yas530", },
- {"yas532", },
- {"yas533", },
+ {"yas530", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas530] },
+ {"yas532", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas532] },
+ {"yas533", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas533] },
+ {"yas537", (kernel_ulong_t)&yas5xx_chip_info_tbl[yas537] },
{}
};
MODULE_DEVICE_TABLE(i2c, yas5xx_id);
static const struct of_device_id yas5xx_of_match[] = {
- { .compatible = "yamaha,yas530", },
- { .compatible = "yamaha,yas532", },
- { .compatible = "yamaha,yas533", },
+ { .compatible = "yamaha,yas530", &yas5xx_chip_info_tbl[yas530] },
+ { .compatible = "yamaha,yas532", &yas5xx_chip_info_tbl[yas532] },
+ { .compatible = "yamaha,yas533", &yas5xx_chip_info_tbl[yas533] },
+ { .compatible = "yamaha,yas537", &yas5xx_chip_info_tbl[yas537] },
{}
};
MODULE_DEVICE_TABLE(of, yas5xx_of_match);
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index fe514f0b5506..5ec7060d31d9 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -384,7 +384,7 @@ error_unreg_trigger:
return ret;
}
-static int lmp91000_remove(struct i2c_client *client)
+static void lmp91000_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct lmp91000_data *data = iio_priv(indio_dev);
@@ -396,8 +396,6 @@ static int lmp91000_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->trig);
-
- return 0;
}
static const struct of_device_id lmp91000_of_match[] = {
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 0ff756cea63a..c9453389e4f7 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -17,14 +17,14 @@ config ABP060MG
will be called abp060mg.
config BMP280
- tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
+ tristate "Bosch Sensortec BMP180/BMP280/BMP380 pressure sensor I2C driver"
depends on (I2C || SPI_MASTER)
select REGMAP
select BMP280_I2C if (I2C)
select BMP280_SPI if (SPI_MASTER)
help
- Say yes here to build support for Bosch Sensortec BMP180 and BMP280
- pressure and temperature sensors. Also supports the BME280 with
+ Say yes here to build support for Bosch Sensortec BMP180, BMP280 and
+ BMP380 pressure and temperature sensors. Also supports the BME280 with
an additional humidity sensor channel.
To compile this driver as a module, choose M here: the core module
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index fe7aa81e7cc9..c0aff78489b4 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -9,13 +9,22 @@
* Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
*
* Datasheet:
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
- * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf
+ * https://cdn-shop.adafruit.com/datasheets/BST-BMP180-DS000-09.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp280-ds001.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
+ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp388-ds001.pdf
+ *
+ * Notice:
+ * The link to the bmp180 datasheet points to an outdated version missing these changes:
+ * - Changed document referral from ANP015 to BST-MPS-AN004-00 on page 26
+ * - Updated equation for B3 param on section 3.5 to ((((long)AC1 * 4 + X3) << oss) + 2) / 4
+ * - Updated RoHS directive to 2011/65/EU effective 8 June 2011 on page 26
*/
#define pr_fmt(fmt) "bmp280: " fmt
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -30,6 +39,8 @@
#include <linux/pm_runtime.h>
#include <linux/random.h>
+#include <asm/unaligned.h>
+
#include "bmp280.h"
/*
@@ -74,12 +85,51 @@ struct bmp280_calib {
s8 H6;
};
+/* See datasheet Section 3.11.1. */
+struct bmp380_calib {
+ u16 T1;
+ u16 T2;
+ s8 T3;
+ s16 P1;
+ s16 P2;
+ s8 P3;
+ s8 P4;
+ u16 P5;
+ u16 P6;
+ s8 P7;
+ s8 P8;
+ s16 P9;
+ s8 P10;
+ s8 P11;
+};
+
static const char *const bmp280_supply_names[] = {
"vddd", "vdda"
};
#define BMP280_NUM_SUPPLIES ARRAY_SIZE(bmp280_supply_names)
+enum bmp380_odr {
+ BMP380_ODR_200HZ,
+ BMP380_ODR_100HZ,
+ BMP380_ODR_50HZ,
+ BMP380_ODR_25HZ,
+ BMP380_ODR_12_5HZ,
+ BMP380_ODR_6_25HZ,
+ BMP380_ODR_3_125HZ,
+ BMP380_ODR_1_5625HZ,
+ BMP380_ODR_0_78HZ,
+ BMP380_ODR_0_39HZ,
+ BMP380_ODR_0_2HZ,
+ BMP380_ODR_0_1HZ,
+ BMP380_ODR_0_05HZ,
+ BMP380_ODR_0_02HZ,
+ BMP380_ODR_0_01HZ,
+ BMP380_ODR_0_006HZ,
+ BMP380_ODR_0_003HZ,
+ BMP380_ODR_0_0015HZ,
+};
+
struct bmp280_data {
struct device *dev;
struct mutex lock;
@@ -90,6 +140,7 @@ struct bmp280_data {
union {
struct bmp180_calib bmp180;
struct bmp280_calib bmp280;
+ struct bmp380_calib bmp380;
} calib;
struct regulator_bulk_data supplies[BMP280_NUM_SUPPLIES];
unsigned int start_up_time; /* in microseconds */
@@ -98,36 +149,99 @@ struct bmp280_data {
u8 oversampling_press;
u8 oversampling_temp;
u8 oversampling_humid;
+ u8 iir_filter_coeff;
+
+ /*
+ * BMP380 devices introduce sampling frequency configuration. See
+ * datasheet sections 3.3.3. and 4.3.19 for more details.
+ *
+ * BMx280 devices allowed indirect configuration of sampling frequency
+ * changing the t_standby duration between measurements, as detailed on
+ * section 3.6.3 of the datasheet.
+ */
+ int sampling_freq;
/*
* Carryover value from temperature conversion, used in pressure
* calculation.
*/
s32 t_fine;
+
+ /*
+ * DMA (thus cache coherency maintenance) may require the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ /* Sensor data buffer */
+ u8 buf[3];
+ /* Calibration data buffers */
+ __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2];
+ __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2];
+ u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT];
+ /* Miscellaneous, endianess-aware data buffers */
+ __le16 le16;
+ __be16 be16;
+ } __aligned(IIO_DMA_MINALIGN);
};
struct bmp280_chip_info {
+ unsigned int id_reg;
+
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ unsigned int start_up_time;
+
const int *oversampling_temp_avail;
int num_oversampling_temp_avail;
+ int oversampling_temp_default;
const int *oversampling_press_avail;
int num_oversampling_press_avail;
+ int oversampling_press_default;
const int *oversampling_humid_avail;
int num_oversampling_humid_avail;
+ int oversampling_humid_default;
+
+ const int *iir_filter_coeffs_avail;
+ int num_iir_filter_coeffs_avail;
+ int iir_filter_coeff_default;
+
+ const int (*sampling_freq_avail)[2];
+ int num_sampling_freq_avail;
+ int sampling_freq_default;
int (*chip_config)(struct bmp280_data *);
int (*read_temp)(struct bmp280_data *, int *);
int (*read_press)(struct bmp280_data *, int *, int *);
int (*read_humid)(struct bmp280_data *, int *, int *);
+ int (*read_calib)(struct bmp280_data *);
};
/*
* These enums are used for indexing into the array of compensation
* parameters for BMP280.
*/
-enum { T1, T2, T3 };
-enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
+enum { T1, T2, T3, P1, P2, P3, P4, P5, P6, P7, P8, P9 };
+
+enum {
+ /* Temperature calib indexes */
+ BMP380_T1 = 0,
+ BMP380_T2 = 2,
+ BMP380_T3 = 4,
+ /* Pressure calib indexes */
+ BMP380_P1 = 5,
+ BMP380_P2 = 7,
+ BMP380_P3 = 9,
+ BMP380_P4 = 10,
+ BMP380_P5 = 11,
+ BMP380_P6 = 13,
+ BMP380_P7 = 15,
+ BMP380_P8 = 16,
+ BMP380_P9 = 17,
+ BMP380_P10 = 19,
+ BMP380_P11 = 20,
+};
static const struct iio_chan_spec bmp280_channels[] = {
{
@@ -147,56 +261,81 @@ static const struct iio_chan_spec bmp280_channels[] = {
},
};
-static int bmp280_read_calib(struct bmp280_data *data,
- struct bmp280_calib *calib,
- unsigned int chip)
+static const struct iio_chan_spec bmp380_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ },
+};
+
+static int bmp280_read_calib(struct bmp280_data *data)
{
+ struct bmp280_calib *calib = &data->calib.bmp280;
int ret;
- unsigned int tmp;
- __le16 l16;
- __be16 b16;
- struct device *dev = data->dev;
- __le16 t_buf[BMP280_COMP_TEMP_REG_COUNT / 2];
- __le16 p_buf[BMP280_COMP_PRESS_REG_COUNT / 2];
- /* Read temperature calibration values. */
+
+ /* Read temperature and pressure calibration values. */
ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
- t_buf, BMP280_COMP_TEMP_REG_COUNT);
+ data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
if (ret < 0) {
dev_err(data->dev,
- "failed to read temperature calibration parameters\n");
+ "failed to read temperature and pressure calibration parameters\n");
return ret;
}
- /* Toss the temperature calibration data into the entropy pool */
- add_device_randomness(t_buf, sizeof(t_buf));
+ /* Toss the temperature and pressure calibration data into the entropy pool */
+ add_device_randomness(data->bmp280_cal_buf, sizeof(data->bmp280_cal_buf));
+
+ /* Parse temperature calibration values. */
+ calib->T1 = le16_to_cpu(data->bmp280_cal_buf[T1]);
+ calib->T2 = le16_to_cpu(data->bmp280_cal_buf[T2]);
+ calib->T3 = le16_to_cpu(data->bmp280_cal_buf[T3]);
+
+ /* Parse pressure calibration values. */
+ calib->P1 = le16_to_cpu(data->bmp280_cal_buf[P1]);
+ calib->P2 = le16_to_cpu(data->bmp280_cal_buf[P2]);
+ calib->P3 = le16_to_cpu(data->bmp280_cal_buf[P3]);
+ calib->P4 = le16_to_cpu(data->bmp280_cal_buf[P4]);
+ calib->P5 = le16_to_cpu(data->bmp280_cal_buf[P5]);
+ calib->P6 = le16_to_cpu(data->bmp280_cal_buf[P6]);
+ calib->P7 = le16_to_cpu(data->bmp280_cal_buf[P7]);
+ calib->P8 = le16_to_cpu(data->bmp280_cal_buf[P8]);
+ calib->P9 = le16_to_cpu(data->bmp280_cal_buf[P9]);
- calib->T1 = le16_to_cpu(t_buf[T1]);
- calib->T2 = le16_to_cpu(t_buf[T2]);
- calib->T3 = le16_to_cpu(t_buf[T3]);
+ return 0;
+}
- /* Read pressure calibration values. */
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
- p_buf, BMP280_COMP_PRESS_REG_COUNT);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read pressure calibration parameters\n");
+static int bme280_read_calib(struct bmp280_data *data)
+{
+ struct bmp280_calib *calib = &data->calib.bmp280;
+ struct device *dev = data->dev;
+ unsigned int tmp;
+ int ret;
+
+ /* Load shared calibration params with bmp280 first */
+ ret = bmp280_read_calib(data);
+ if (ret < 0) {
+ dev_err(dev, "failed to read common bmp280 calibration parameters\n");
return ret;
}
- /* Toss the pressure calibration data into the entropy pool */
- add_device_randomness(p_buf, sizeof(p_buf));
-
- calib->P1 = le16_to_cpu(p_buf[P1]);
- calib->P2 = le16_to_cpu(p_buf[P2]);
- calib->P3 = le16_to_cpu(p_buf[P3]);
- calib->P4 = le16_to_cpu(p_buf[P4]);
- calib->P5 = le16_to_cpu(p_buf[P5]);
- calib->P6 = le16_to_cpu(p_buf[P6]);
- calib->P7 = le16_to_cpu(p_buf[P7]);
- calib->P8 = le16_to_cpu(p_buf[P8]);
- calib->P9 = le16_to_cpu(p_buf[P9]);
-
/*
* Read humidity calibration values.
* Due to some odd register addressing we cannot just
@@ -204,8 +343,6 @@ static int bmp280_read_calib(struct bmp280_data *data,
* value separately and sometimes do some bit shifting...
* Humidity data is only available on BME280.
*/
- if (chip != BME280_CHIP_ID)
- return 0;
ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &tmp);
if (ret < 0) {
@@ -214,12 +351,13 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H1 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &l16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2,
+ &data->le16, sizeof(data->le16));
if (ret < 0) {
dev_err(dev, "failed to read H2 comp value\n");
return ret;
}
- calib->H2 = sign_extend32(le16_to_cpu(l16), 15);
+ calib->H2 = sign_extend32(le16_to_cpu(data->le16), 15);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &tmp);
if (ret < 0) {
@@ -228,20 +366,22 @@ static int bmp280_read_calib(struct bmp280_data *data,
}
calib->H3 = tmp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &b16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4,
+ &data->be16, sizeof(data->be16));
if (ret < 0) {
dev_err(dev, "failed to read H4 comp value\n");
return ret;
}
- calib->H4 = sign_extend32(((be16_to_cpu(b16) >> 4) & 0xff0) |
- (be16_to_cpu(b16) & 0xf), 11);
+ calib->H4 = sign_extend32(((be16_to_cpu(data->be16) >> 4) & 0xff0) |
+ (be16_to_cpu(data->be16) & 0xf), 11);
- ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &l16, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5,
+ &data->le16, sizeof(data->le16));
if (ret < 0) {
dev_err(dev, "failed to read H5 comp value\n");
return ret;
}
- calib->H5 = sign_extend32(((le16_to_cpu(l16) >> 4) & 0xfff), 11);
+ calib->H5 = sign_extend32(FIELD_GET(BMP280_COMP_H5_MASK, le16_to_cpu(data->le16)), 11);
ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
if (ret < 0) {
@@ -261,8 +401,8 @@ static int bmp280_read_calib(struct bmp280_data *data,
static u32 bmp280_compensate_humidity(struct bmp280_data *data,
s32 adc_humidity)
{
- s32 var;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s32 var;
var = ((s32)data->t_fine) - (s32)76800;
var = ((((adc_humidity << 14) - (calib->H4 << 20) - (calib->H5 * var))
@@ -286,8 +426,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
static s32 bmp280_compensate_temp(struct bmp280_data *data,
s32 adc_temp)
{
- s32 var1, var2;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s32 var1, var2;
var1 = (((adc_temp >> 3) - ((s32)calib->T1 << 1)) *
((s32)calib->T2)) >> 11;
@@ -309,8 +449,8 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
static u32 bmp280_compensate_press(struct bmp280_data *data,
s32 adc_press)
{
- s64 var1, var2, p;
struct bmp280_calib *calib = &data->calib.bmp280;
+ s64 var1, var2, p;
var1 = ((s64)data->t_fine) - 128000;
var2 = var1 * var1 * (s64)calib->P6;
@@ -335,17 +475,17 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
static int bmp280_read_temp(struct bmp280_data *data,
int *val)
{
- int ret;
- __be32 tmp = 0;
s32 adc_temp, comp_temp;
+ int ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
+ data->buf, sizeof(data->buf));
if (ret < 0) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
}
- adc_temp = be32_to_cpu(tmp) >> 12;
+ adc_temp = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
if (adc_temp == BMP280_TEMP_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading temperature skipped\n");
@@ -368,23 +508,23 @@ static int bmp280_read_temp(struct bmp280_data *data,
static int bmp280_read_press(struct bmp280_data *data,
int *val, int *val2)
{
- int ret;
- __be32 tmp = 0;
- s32 adc_press;
u32 comp_press;
+ s32 adc_press;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp280_read_temp(data, NULL);
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
+ data->buf, sizeof(data->buf));
if (ret < 0) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
}
- adc_press = be32_to_cpu(tmp) >> 12;
+ adc_press = FIELD_GET(BMP280_MEAS_TRIM_MASK, get_unaligned_be24(data->buf));
if (adc_press == BMP280_PRESS_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading pressure skipped\n");
@@ -400,23 +540,23 @@ static int bmp280_read_press(struct bmp280_data *data,
static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
- __be16 tmp;
- int ret;
- s32 adc_humidity;
u32 comp_humidity;
+ s32 adc_humidity;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp280_read_temp(data, NULL);
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
+ &data->be16, sizeof(data->be16));
if (ret < 0) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
}
- adc_humidity = be16_to_cpu(tmp);
+ adc_humidity = be16_to_cpu(data->be16);
if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
/* reading was skipped */
dev_err(data->dev, "reading humidity skipped\n");
@@ -433,8 +573,8 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret;
struct bmp280_data *data = iio_priv(indio_dev);
+ int ret;
pm_runtime_get_sync(data->dev);
mutex_lock(&data->lock);
@@ -475,6 +615,25 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!data->chip_info->sampling_freq_avail) {
+ ret = -EINVAL;
+ break;
+ }
+
+ *val = data->chip_info->sampling_freq_avail[data->sampling_freq][0];
+ *val2 = data->chip_info->sampling_freq_avail[data->sampling_freq][1];
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (!data->chip_info->iir_filter_coeffs_avail) {
+ ret = -EINVAL;
+ break;
+ }
+
+ *val = (1 << data->iir_filter_coeff) - 1;
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
break;
@@ -490,15 +649,23 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_humid_avail;
const int n = data->chip_info->num_oversampling_humid_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_humid;
data->oversampling_humid = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_humid = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
}
}
return -EINVAL;
@@ -507,15 +674,23 @@ static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_temp_avail;
const int n = data->chip_info->num_oversampling_temp_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_temp;
data->oversampling_temp = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_temp = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
}
}
return -EINVAL;
@@ -524,15 +699,73 @@ static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
int val)
{
- int i;
const int *avail = data->chip_info->oversampling_press_avail;
const int n = data->chip_info->num_oversampling_press_avail;
+ int ret, prev;
+ int i;
for (i = 0; i < n; i++) {
if (avail[i] == val) {
+ prev = data->oversampling_press;
data->oversampling_press = ilog2(val);
- return data->chip_info->chip_config(data);
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->oversampling_press = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_sampling_frequency(struct bmp280_data *data,
+ int val, int val2)
+{
+ const int (*avail)[2] = data->chip_info->sampling_freq_avail;
+ const int n = data->chip_info->num_sampling_freq_avail;
+ int ret, prev;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i][0] == val && avail[i][1] == val2) {
+ prev = data->sampling_freq;
+ data->sampling_freq = i;
+
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->sampling_freq = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+ }
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_iir_filter_coeffs(struct bmp280_data *data, int val)
+{
+ const int *avail = data->chip_info->iir_filter_coeffs_avail;
+ const int n = data->chip_info->num_iir_filter_coeffs_avail;
+ int ret, prev;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] - 1 == val) {
+ prev = data->iir_filter_coeff;
+ data->iir_filter_coeff = i;
+
+ ret = data->chip_info->chip_config(data);
+ if (ret) {
+ data->iir_filter_coeff = prev;
+ data->chip_info->chip_config(data);
+ return ret;
+
+ }
+ return 0;
}
}
return -EINVAL;
@@ -542,9 +775,15 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- int ret = 0;
struct bmp280_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ /*
+ * Helper functions to update sensor running configuration.
+ * If an error happens applying new settings, will try restore
+ * previous parameters to ensure the sensor is left in a known
+ * working configuration.
+ */
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
pm_runtime_get_sync(data->dev);
@@ -567,6 +806,22 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+ ret = bmp280_write_sampling_frequency(data, val, val2);
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ break;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ pm_runtime_get_sync(data->dev);
+ mutex_lock(&data->lock);
+ ret = bmp280_write_iir_filter_coeffs(data, val);
+ mutex_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ break;
default:
return -EINVAL;
}
@@ -597,6 +852,17 @@ static int bmp280_read_avail(struct iio_dev *indio_dev,
}
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (const int *)data->chip_info->sampling_freq_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = data->chip_info->num_sampling_freq_avail;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = data->chip_info->iir_filter_coeffs_avail;
+ *type = IIO_VAL_INT;
+ *length = data->chip_info->num_iir_filter_coeffs_avail;
+ return IIO_AVAIL_LIST;
default:
return -EINVAL;
}
@@ -610,9 +876,9 @@ static const struct iio_info bmp280_info = {
static int bmp280_chip_config(struct bmp280_data *data)
{
+ u8 osrs = FIELD_PREP(BMP280_OSRS_TEMP_MASK, data->oversampling_temp + 1) |
+ FIELD_PREP(BMP280_OSRS_PRESS_MASK, data->oversampling_press + 1);
int ret;
- u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
- BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
@@ -640,21 +906,39 @@ static int bmp280_chip_config(struct bmp280_data *data)
static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
static const struct bmp280_chip_info bmp280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 2,
+
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ /*
+ * Oversampling config values on BMx280 have one additional setting
+ * that other generations of the family don't:
+ * The value 0 means the measurement is bypassed instead of
+ * oversampling set to x1.
+ *
+ * To account for this difference, and preserve the same common
+ * config logic, this is handled later on chip_config callback
+ * incrementing one unit the oversampling setting.
+ */
+ .oversampling_temp_default = BMP280_OSRS_TEMP_2X - 1,
.oversampling_press_avail = bmp280_oversampling_avail,
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
.chip_config = bmp280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
+ .read_calib = bmp280_read_calib,
};
static int bme280_chip_config(struct bmp280_data *data)
{
+ u8 osrs = FIELD_PREP(BMP280_OSRS_HUMIDITY_MASK, data->oversampling_humid + 1);
int ret;
- u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
/*
* Oversampling of humidity must be set before oversampling of
@@ -670,27 +954,405 @@ static int bme280_chip_config(struct bmp280_data *data)
}
static const struct bmp280_chip_info bme280_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 3,
+
.oversampling_temp_avail = bmp280_oversampling_avail,
.num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_temp_default = BMP280_OSRS_TEMP_2X - 1,
.oversampling_press_avail = bmp280_oversampling_avail,
.num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_press_default = BMP280_OSRS_PRESS_16X - 1,
.oversampling_humid_avail = bmp280_oversampling_avail,
.num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+ .oversampling_humid_default = BMP280_OSRS_HUMIDITY_16X - 1,
.chip_config = bme280_chip_config,
.read_temp = bmp280_read_temp,
.read_press = bmp280_read_press,
.read_humid = bmp280_read_humid,
+ .read_calib = bme280_read_calib,
};
-static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+/*
+ * Helper function to send a command to BMP3XX sensors.
+ *
+ * Sensor processes commands written to the CMD register and signals
+ * execution result through "cmd_rdy" and "cmd_error" flags available on
+ * STATUS and ERROR registers.
+ */
+static int bmp380_cmd(struct bmp280_data *data, u8 cmd)
+{
+ unsigned int reg;
+ int ret;
+
+ /* Check if device is ready to process a command */
+ ret = regmap_read(data->regmap, BMP380_REG_STATUS, &reg);
+ if (ret) {
+ dev_err(data->dev, "failed to read error register\n");
+ return ret;
+ }
+ if (!(reg & BMP380_STATUS_CMD_RDY_MASK)) {
+ dev_err(data->dev, "device is not ready to accept commands\n");
+ return -EBUSY;
+ }
+
+ /* Send command to process */
+ ret = regmap_write(data->regmap, BMP380_REG_CMD, cmd);
+ if (ret) {
+ dev_err(data->dev, "failed to send command to device\n");
+ return ret;
+ }
+ /* Wait for 2ms for command to be processed */
+ usleep_range(data->start_up_time, data->start_up_time + 100);
+ /* Check for command processing error */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &reg);
+ if (ret) {
+ dev_err(data->dev, "error reading ERROR reg\n");
+ return ret;
+ }
+ if (reg & BMP380_ERR_CMD_MASK) {
+ dev_err(data->dev, "error processing command 0x%X\n", cmd);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of
+ * "5123" equals 51.2º C. t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+ */
+static s32 bmp380_compensate_temp(struct bmp280_data *data, u32 adc_temp)
+{
+ s64 var1, var2, var3, var4, var5, var6, comp_temp;
+ struct bmp380_calib *calib = &data->calib.bmp380;
+
+ var1 = ((s64) adc_temp) - (((s64) calib->T1) << 8);
+ var2 = var1 * ((s64) calib->T2);
+ var3 = var1 * var1;
+ var4 = var3 * ((s64) calib->T3);
+ var5 = (var2 << 18) + var4;
+ var6 = var5 >> 32;
+ data->t_fine = (s32) var6;
+ comp_temp = (var6 * 25) >> 14;
+
+ comp_temp = clamp_val(comp_temp, BMP380_MIN_TEMP, BMP380_MAX_TEMP);
+ return (s32) comp_temp;
+}
+
+/*
+ * Returns pressure in Pa as an unsigned 32 bit integer in fractional Pascal.
+ * Output value of "9528709" represents 9528709/100 = 95287.09 Pa = 952.8709 hPa.
+ *
+ * Taken from datasheet, Section 9.3. "Pressure compensation" and repository
+ * https://github.com/BoschSensortec/BMP3-Sensor-API.
+ */
+static u32 bmp380_compensate_press(struct bmp280_data *data, u32 adc_press)
+{
+ s64 var1, var2, var3, var4, var5, var6, offset, sensitivity;
+ struct bmp380_calib *calib = &data->calib.bmp380;
+ u32 comp_press;
+
+ var1 = (s64)data->t_fine * (s64)data->t_fine;
+ var2 = var1 >> 6;
+ var3 = (var2 * ((s64) data->t_fine)) >> 8;
+ var4 = ((s64)calib->P8 * var3) >> 5;
+ var5 = ((s64)calib->P7 * var1) << 4;
+ var6 = ((s64)calib->P6 * (s64)data->t_fine) << 22;
+ offset = ((s64)calib->P5 << 47) + var4 + var5 + var6;
+ var2 = ((s64)calib->P4 * var3) >> 5;
+ var4 = ((s64)calib->P3 * var1) << 2;
+ var5 = ((s64)calib->P2 - ((s64)1 << 14)) *
+ ((s64)data->t_fine << 21);
+ sensitivity = (((s64) calib->P1 - ((s64) 1 << 14)) << 46) +
+ var2 + var4 + var5;
+ var1 = (sensitivity >> 24) * (s64)adc_press;
+ var2 = (s64)calib->P10 * (s64)data->t_fine;
+ var3 = var2 + ((s64)calib->P9 << 16);
+ var4 = (var3 * (s64)adc_press) >> 13;
+
+ /*
+ * Dividing by 10 followed by multiplying by 10 to avoid
+ * possible overflow caused by (uncomp_data->pressure * partial_data4).
+ */
+ var5 = ((s64)adc_press * div_s64(var4, 10)) >> 9;
+ var5 *= 10;
+ var6 = (s64)adc_press * (s64)adc_press;
+ var2 = ((s64)calib->P11 * var6) >> 16;
+ var3 = (var2 * (s64)adc_press) >> 7;
+ var4 = (offset >> 2) + var1 + var5 + var3;
+ comp_press = ((u64)var4 * 25) >> 40;
+
+ comp_press = clamp_val(comp_press, BMP380_MIN_PRES, BMP380_MAX_PRES);
+ return comp_press;
+}
+
+static int bmp380_read_temp(struct bmp280_data *data, int *val)
+{
+ s32 comp_temp;
+ u32 adc_temp;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_TEMP_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read temperature\n");
+ return ret;
+ }
+
+ adc_temp = get_unaligned_le24(data->buf);
+ if (adc_temp == BMP380_TEMP_SKIPPED) {
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
+ comp_temp = bmp380_compensate_temp(data, adc_temp);
+
+ /*
+ * Val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ /* IIO reports temperatures in milli Celsius */
+ *val = comp_temp * 10;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp380_read_press(struct bmp280_data *data, int *val, int *val2)
+{
+ s32 comp_press;
+ u32 adc_press;
+ int ret;
+
+ /* Read and compensate for temperature so we get a reading of t_fine */
+ ret = bmp380_read_temp(data, NULL);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_PRESS_XLSB,
+ data->buf, sizeof(data->buf));
+ if (ret) {
+ dev_err(data->dev, "failed to read pressure\n");
+ return ret;
+ }
+
+ adc_press = get_unaligned_le24(data->buf);
+ if (adc_press == BMP380_PRESS_SKIPPED) {
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
+ comp_press = bmp380_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ /* Compensated pressure is in cPa (centipascals) */
+ *val2 = 100000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp380_read_calib(struct bmp280_data *data)
+{
+ struct bmp380_calib *calib = &data->calib.bmp380;
+ int ret;
+
+ /* Read temperature and pressure calibration data */
+ ret = regmap_bulk_read(data->regmap, BMP380_REG_CALIB_TEMP_START,
+ data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read temperature calibration parameters\n");
+ return ret;
+ }
+
+ /* Toss the temperature calibration data into the entropy pool */
+ add_device_randomness(data->bmp380_cal_buf, sizeof(data->bmp380_cal_buf));
+
+ /* Parse calibration values */
+ calib->T1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T1]);
+ calib->T2 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_T2]);
+ calib->T3 = data->bmp380_cal_buf[BMP380_T3];
+ calib->P1 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P1]);
+ calib->P2 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P2]);
+ calib->P3 = data->bmp380_cal_buf[BMP380_P3];
+ calib->P4 = data->bmp380_cal_buf[BMP380_P4];
+ calib->P5 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P5]);
+ calib->P6 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P6]);
+ calib->P7 = data->bmp380_cal_buf[BMP380_P7];
+ calib->P8 = data->bmp380_cal_buf[BMP380_P8];
+ calib->P9 = get_unaligned_le16(&data->bmp380_cal_buf[BMP380_P9]);
+ calib->P10 = data->bmp380_cal_buf[BMP380_P10];
+ calib->P11 = data->bmp380_cal_buf[BMP380_P11];
+
+ return 0;
+}
+
+static const int bmp380_odr_table[][2] = {
+ [BMP380_ODR_200HZ] = {200, 0},
+ [BMP380_ODR_100HZ] = {100, 0},
+ [BMP380_ODR_50HZ] = {50, 0},
+ [BMP380_ODR_25HZ] = {25, 0},
+ [BMP380_ODR_12_5HZ] = {12, 500000},
+ [BMP380_ODR_6_25HZ] = {6, 250000},
+ [BMP380_ODR_3_125HZ] = {3, 125000},
+ [BMP380_ODR_1_5625HZ] = {1, 562500},
+ [BMP380_ODR_0_78HZ] = {0, 781250},
+ [BMP380_ODR_0_39HZ] = {0, 390625},
+ [BMP380_ODR_0_2HZ] = {0, 195313},
+ [BMP380_ODR_0_1HZ] = {0, 97656},
+ [BMP380_ODR_0_05HZ] = {0, 48828},
+ [BMP380_ODR_0_02HZ] = {0, 24414},
+ [BMP380_ODR_0_01HZ] = {0, 12207},
+ [BMP380_ODR_0_006HZ] = {0, 6104},
+ [BMP380_ODR_0_003HZ] = {0, 3052},
+ [BMP380_ODR_0_0015HZ] = {0, 1526},
+};
+
+static int bmp380_chip_config(struct bmp280_data *data)
{
+ bool change = false, aux;
+ unsigned int tmp;
+ u8 osrs;
int ret;
+
+ /* Configure power control register */
+ ret = regmap_update_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_CTRL_SENSORS_MASK,
+ BMP380_CTRL_SENSORS_PRESS_EN |
+ BMP380_CTRL_SENSORS_TEMP_EN);
+ if (ret) {
+ dev_err(data->dev,
+ "failed to write operation control register\n");
+ return ret;
+ }
+
+ /* Configure oversampling */
+ osrs = FIELD_PREP(BMP380_OSRS_TEMP_MASK, data->oversampling_temp) |
+ FIELD_PREP(BMP380_OSRS_PRESS_MASK, data->oversampling_press);
+
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_OSR,
+ BMP380_OSRS_TEMP_MASK |
+ BMP380_OSRS_PRESS_MASK,
+ osrs, &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write oversampling register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ /* Configure output data rate */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_ODR,
+ BMP380_ODRS_MASK, data->sampling_freq, &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write ODR selection register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ /* Set filter data */
+ ret = regmap_update_bits_check(data->regmap, BMP380_REG_CONFIG, BMP380_FILTER_MASK,
+ FIELD_PREP(BMP380_FILTER_MASK, data->iir_filter_coeff),
+ &aux);
+ if (ret) {
+ dev_err(data->dev, "failed to write config register\n");
+ return ret;
+ }
+ change = change || aux;
+
+ if (change) {
+ /*
+ * The configurations errors are detected on the fly during a measurement
+ * cycle. If the sampling frequency is too low, it's faster to reset
+ * the measurement loop than wait until the next measurement is due.
+ *
+ * Resets sensor measurement loop toggling between sleep and normal
+ * operating modes.
+ */
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+ FIELD_PREP(BMP380_MODE_MASK, BMP380_MODE_SLEEP));
+ if (ret) {
+ dev_err(data->dev, "failed to set sleep mode\n");
+ return ret;
+ }
+ usleep_range(2000, 2500);
+ ret = regmap_write_bits(data->regmap, BMP380_REG_POWER_CONTROL,
+ BMP380_MODE_MASK,
+ FIELD_PREP(BMP380_MODE_MASK, BMP380_MODE_NORMAL));
+ if (ret) {
+ dev_err(data->dev, "failed to set normal mode\n");
+ return ret;
+ }
+ /*
+ * Waits for measurement before checking configuration error flag.
+ * Selected longest measure time indicated in section 3.9.1
+ * in the datasheet.
+ */
+ msleep(80);
+
+ /* Check config error flag */
+ ret = regmap_read(data->regmap, BMP380_REG_ERROR, &tmp);
+ if (ret) {
+ dev_err(data->dev,
+ "failed to read error register\n");
+ return ret;
+ }
+ if (tmp & BMP380_ERR_CONF_MASK) {
+ dev_warn(data->dev,
+ "sensor flagged configuration as incompatible\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 };
+static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128};
+
+static const struct bmp280_chip_info bmp380_chip_info = {
+ .id_reg = BMP380_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp380_channels,
+ .num_channels = 2,
+
+ .oversampling_temp_avail = bmp380_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp380_oversampling_avail),
+ .oversampling_temp_default = ilog2(1),
+
+ .oversampling_press_avail = bmp380_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp380_oversampling_avail),
+ .oversampling_press_default = ilog2(4),
+
+ .sampling_freq_avail = bmp380_odr_table,
+ .num_sampling_freq_avail = ARRAY_SIZE(bmp380_odr_table) * 2,
+ .sampling_freq_default = BMP380_ODR_50HZ,
+
+ .iir_filter_coeffs_avail = bmp380_iir_filter_coeffs_avail,
+ .num_iir_filter_coeffs_avail = ARRAY_SIZE(bmp380_iir_filter_coeffs_avail),
+ .iir_filter_coeff_default = 2,
+
+ .chip_config = bmp380_chip_config,
+ .read_temp = bmp380_read_temp,
+ .read_press = bmp380_read_press,
+ .read_calib = bmp380_read_calib,
+};
+
+static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+{
const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
unsigned int delay_us;
unsigned int ctrl;
+ int ret;
if (data->use_eoc)
reinit_completion(&data->done);
@@ -710,7 +1372,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
if (!ret)
dev_err(data->dev, "timeout waiting for completion\n");
} else {
- if (ctrl_meas == BMP180_MEAS_TEMP)
+ if (FIELD_GET(BMP180_MEAS_CTRL_MASK, ctrl_meas) == BMP180_MEAS_TEMP)
delay_us = 4500;
else
delay_us =
@@ -732,55 +1394,57 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
{
- __be16 tmp;
int ret;
- ret = bmp180_measure(data, BMP180_MEAS_TEMP);
+ ret = bmp180_measure(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_TEMP) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
+ &data->be16, sizeof(data->be16));
if (ret)
return ret;
- *val = be16_to_cpu(tmp);
+ *val = be16_to_cpu(data->be16);
return 0;
}
-static int bmp180_read_calib(struct bmp280_data *data,
- struct bmp180_calib *calib)
+static int bmp180_read_calib(struct bmp280_data *data)
{
+ struct bmp180_calib *calib = &data->calib.bmp180;
int ret;
int i;
- __be16 buf[BMP180_REG_CALIB_COUNT / 2];
- ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
- sizeof(buf));
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START,
+ data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
if (ret < 0)
return ret;
/* None of the words has the value 0 or 0xFFFF */
- for (i = 0; i < ARRAY_SIZE(buf); i++) {
- if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
+ for (i = 0; i < ARRAY_SIZE(data->bmp180_cal_buf); i++) {
+ if (data->bmp180_cal_buf[i] == cpu_to_be16(0) ||
+ data->bmp180_cal_buf[i] == cpu_to_be16(0xffff))
return -EIO;
}
/* Toss the calibration data into the entropy pool */
- add_device_randomness(buf, sizeof(buf));
-
- calib->AC1 = be16_to_cpu(buf[AC1]);
- calib->AC2 = be16_to_cpu(buf[AC2]);
- calib->AC3 = be16_to_cpu(buf[AC3]);
- calib->AC4 = be16_to_cpu(buf[AC4]);
- calib->AC5 = be16_to_cpu(buf[AC5]);
- calib->AC6 = be16_to_cpu(buf[AC6]);
- calib->B1 = be16_to_cpu(buf[B1]);
- calib->B2 = be16_to_cpu(buf[B2]);
- calib->MB = be16_to_cpu(buf[MB]);
- calib->MC = be16_to_cpu(buf[MC]);
- calib->MD = be16_to_cpu(buf[MD]);
+ add_device_randomness(data->bmp180_cal_buf, sizeof(data->bmp180_cal_buf));
+
+ calib->AC1 = be16_to_cpu(data->bmp180_cal_buf[AC1]);
+ calib->AC2 = be16_to_cpu(data->bmp180_cal_buf[AC2]);
+ calib->AC3 = be16_to_cpu(data->bmp180_cal_buf[AC3]);
+ calib->AC4 = be16_to_cpu(data->bmp180_cal_buf[AC4]);
+ calib->AC5 = be16_to_cpu(data->bmp180_cal_buf[AC5]);
+ calib->AC6 = be16_to_cpu(data->bmp180_cal_buf[AC6]);
+ calib->B1 = be16_to_cpu(data->bmp180_cal_buf[B1]);
+ calib->B2 = be16_to_cpu(data->bmp180_cal_buf[B2]);
+ calib->MB = be16_to_cpu(data->bmp180_cal_buf[MB]);
+ calib->MC = be16_to_cpu(data->bmp180_cal_buf[MC]);
+ calib->MD = be16_to_cpu(data->bmp180_cal_buf[MD]);
return 0;
}
@@ -793,8 +1457,8 @@ static int bmp180_read_calib(struct bmp280_data *data,
*/
static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
{
- s32 x1, x2;
struct bmp180_calib *calib = &data->calib.bmp180;
+ s32 x1, x2;
x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
x2 = (calib->MC << 11) / (x1 + calib->MD);
@@ -805,8 +1469,8 @@ static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
static int bmp180_read_temp(struct bmp280_data *data, int *val)
{
- int ret;
s32 adc_temp, comp_temp;
+ int ret;
ret = bmp180_read_adc_temp(data, &adc_temp);
if (ret)
@@ -828,19 +1492,22 @@ static int bmp180_read_temp(struct bmp280_data *data, int *val)
static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
{
- int ret;
- __be32 tmp = 0;
u8 oss = data->oversampling_press;
+ int ret;
- ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
+ ret = bmp180_measure(data,
+ FIELD_PREP(BMP180_MEAS_CTRL_MASK, BMP180_MEAS_PRESS) |
+ FIELD_PREP(BMP180_OSRS_PRESS_MASK, oss) |
+ BMP180_MEAS_SCO);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB,
+ data->buf, sizeof(data->buf));
if (ret)
return ret;
- *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
+ *val = get_unaligned_be24(data->buf) >> (8 - oss);
return 0;
}
@@ -852,11 +1519,11 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
*/
static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
{
+ struct bmp180_calib *calib = &data->calib.bmp180;
+ s32 oss = data->oversampling_press;
s32 x1, x2, x3, p;
s32 b3, b6;
u32 b4, b7;
- s32 oss = data->oversampling_press;
- struct bmp180_calib *calib = &data->calib.bmp180;
b6 = data->t_fine - 4000;
x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
@@ -883,9 +1550,9 @@ static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
static int bmp180_read_press(struct bmp280_data *data,
int *val, int *val2)
{
- int ret;
- s32 adc_press;
u32 comp_press;
+ s32 adc_press;
+ int ret;
/* Read and compensate temperature so we get a reading of t_fine. */
ret = bmp180_read_temp(data, NULL);
@@ -913,17 +1580,25 @@ static const int bmp180_oversampling_temp_avail[] = { 1 };
static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
static const struct bmp280_chip_info bmp180_chip_info = {
+ .id_reg = BMP280_REG_ID,
+ .start_up_time = 2000,
+ .channels = bmp280_channels,
+ .num_channels = 2,
+
.oversampling_temp_avail = bmp180_oversampling_temp_avail,
.num_oversampling_temp_avail =
ARRAY_SIZE(bmp180_oversampling_temp_avail),
+ .oversampling_temp_default = 0,
.oversampling_press_avail = bmp180_oversampling_press_avail,
.num_oversampling_press_avail =
ARRAY_SIZE(bmp180_oversampling_press_avail),
+ .oversampling_press_default = BMP180_MEAS_PRESS_8X,
.chip_config = bmp180_chip_config,
.read_temp = bmp180_read_temp,
.read_press = bmp180_read_press,
+ .read_calib = bmp180_read_calib,
};
static irqreturn_t bmp085_eoc_irq(int irq, void *d)
@@ -990,11 +1665,12 @@ int bmp280_common_probe(struct device *dev,
const char *name,
int irq)
{
- int ret;
+ const struct bmp280_chip_info *chip_info;
struct iio_dev *indio_dev;
struct bmp280_data *data;
- unsigned int chip_id;
struct gpio_desc *gpiod;
+ unsigned int chip_id;
+ int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -1005,36 +1681,36 @@ int bmp280_common_probe(struct device *dev,
data->dev = dev;
indio_dev->name = name;
- indio_dev->channels = bmp280_channels;
indio_dev->info = &bmp280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
switch (chip) {
case BMP180_CHIP_ID:
- indio_dev->num_channels = 2;
- data->chip_info = &bmp180_chip_info;
- data->oversampling_press = ilog2(8);
- data->oversampling_temp = ilog2(1);
- data->start_up_time = 10000;
+ chip_info = &bmp180_chip_info;
break;
case BMP280_CHIP_ID:
- indio_dev->num_channels = 2;
- data->chip_info = &bmp280_chip_info;
- data->oversampling_press = ilog2(16);
- data->oversampling_temp = ilog2(2);
- data->start_up_time = 2000;
+ chip_info = &bmp280_chip_info;
break;
case BME280_CHIP_ID:
- indio_dev->num_channels = 3;
- data->chip_info = &bme280_chip_info;
- data->oversampling_press = ilog2(16);
- data->oversampling_humid = ilog2(16);
- data->oversampling_temp = ilog2(2);
- data->start_up_time = 2000;
+ chip_info = &bme280_chip_info;
+ break;
+ case BMP380_CHIP_ID:
+ chip_info = &bmp380_chip_info;
break;
default:
return -EINVAL;
}
+ data->chip_info = chip_info;
+
+ /* Apply initial values from chip info structure */
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = chip_info->num_channels;
+ data->oversampling_press = chip_info->oversampling_press_default;
+ data->oversampling_humid = chip_info->oversampling_humid_default;
+ data->oversampling_temp = chip_info->oversampling_temp_default;
+ data->iir_filter_coeff = chip_info->iir_filter_coeff_default;
+ data->sampling_freq = chip_info->sampling_freq_default;
+ data->start_up_time = chip_info->start_up_time;
/* Bring up regulators */
regulator_bulk_set_supply_names(data->supplies,
@@ -1071,7 +1747,8 @@ int bmp280_common_probe(struct device *dev,
}
data->regmap = regmap;
- ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
+
+ ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id);
if (ret < 0)
return ret;
if (chip_id != chip) {
@@ -1080,6 +1757,13 @@ int bmp280_common_probe(struct device *dev,
return -EINVAL;
}
+ /* BMP3xx requires soft-reset as part of initialization */
+ if (chip_id == BMP380_CHIP_ID) {
+ ret = bmp380_cmd(data, BMP380_CMD_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+ }
+
ret = data->chip_info->chip_config(data);
if (ret < 0)
return ret;
@@ -1091,21 +1775,11 @@ int bmp280_common_probe(struct device *dev,
* non-volatile memory during production". Let's read them out at probe
* time once. They will not change.
*/
- if (chip_id == BMP180_CHIP_ID) {
- ret = bmp180_read_calib(data, &data->calib.bmp180);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
- } else if (chip_id == BMP280_CHIP_ID || chip_id == BME280_CHIP_ID) {
- ret = bmp280_read_calib(data, &data->calib.bmp280, chip_id);
- if (ret < 0) {
- dev_err(data->dev,
- "failed to read calibration coefficients\n");
- return ret;
- }
- }
+
+ ret = data->chip_info->read_calib(data);
+ if (ret < 0)
+ return dev_err_probe(data->dev, ret,
+ "failed to read calibration coefficients\n");
/*
* Attempt to grab an optional EOC IRQ - only the BMP085 has this
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index bf4a7a617537..0c27211f3ea0 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -19,6 +19,9 @@ static int bmp280_i2c_probe(struct i2c_client *client,
case BME280_CHIP_ID:
regmap_config = &bmp280_regmap_config;
break;
+ case BMP380_CHIP_ID:
+ regmap_config = &bmp380_regmap_config;
+ break;
default:
return -EINVAL;
}
@@ -37,19 +40,21 @@ static int bmp280_i2c_probe(struct i2c_client *client,
}
static const struct of_device_id bmp280_of_i2c_match[] = {
- { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
- { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
- { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
{ .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
+ { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
+ { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
+ { .compatible = "bosch,bmp380", .data = (void *)BMP380_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
static const struct i2c_device_id bmp280_i2c_id[] = {
- {"bmp280", BMP280_CHIP_ID },
- {"bmp180", BMP180_CHIP_ID },
{"bmp085", BMP180_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp280", BMP280_CHIP_ID },
{"bme280", BME280_CHIP_ID },
+ {"bmp380", BMP380_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index 969698518984..c98c67970265 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -72,6 +72,49 @@ static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
+static bool bmp380_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP380_REG_CMD:
+ case BMP380_REG_CONFIG:
+ case BMP380_REG_FIFO_CONFIG_1:
+ case BMP380_REG_FIFO_CONFIG_2:
+ case BMP380_REG_FIFO_WATERMARK_LSB:
+ case BMP380_REG_FIFO_WATERMARK_MSB:
+ case BMP380_REG_POWER_CONTROL:
+ case BMP380_REG_INT_CONTROL:
+ case BMP380_REG_IF_CONFIG:
+ case BMP380_REG_ODR:
+ case BMP380_REG_OSR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool bmp380_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP380_REG_TEMP_XLSB:
+ case BMP380_REG_TEMP_LSB:
+ case BMP380_REG_TEMP_MSB:
+ case BMP380_REG_PRESS_XLSB:
+ case BMP380_REG_PRESS_LSB:
+ case BMP380_REG_PRESS_MSB:
+ case BMP380_REG_SENSOR_TIME_XLSB:
+ case BMP380_REG_SENSOR_TIME_LSB:
+ case BMP380_REG_SENSOR_TIME_MSB:
+ case BMP380_REG_INT_STATUS:
+ case BMP380_REG_FIFO_DATA:
+ case BMP380_REG_STATUS:
+ case BMP380_REG_ERROR:
+ case BMP380_REG_EVENT:
+ return true;
+ default:
+ return false;
+ }
+}
+
const struct regmap_config bmp280_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -83,3 +126,15 @@ const struct regmap_config bmp280_regmap_config = {
.volatile_reg = bmp280_is_volatile_reg,
};
EXPORT_SYMBOL_NS(bmp280_regmap_config, IIO_BMP280);
+
+const struct regmap_config bmp380_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP380_REG_CMD,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp380_is_writeable_reg,
+ .volatile_reg = bmp380_is_volatile_reg,
+};
+EXPORT_SYMBOL_NS(bmp380_regmap_config, IIO_BMP280);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 4cfaf3e869b8..011c68e07ebf 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -66,6 +66,9 @@ static int bmp280_spi_probe(struct spi_device *spi)
case BME280_CHIP_ID:
regmap_config = &bmp280_regmap_config;
break;
+ case BMP380_CHIP_ID:
+ regmap_config = &bmp380_regmap_config;
+ break;
default:
return -EINVAL;
}
@@ -92,6 +95,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
{ .compatible = "bosch,bmp181", },
{ .compatible = "bosch,bmp280", },
{ .compatible = "bosch,bme280", },
+ { .compatible = "bosch,bmp380", },
{ },
};
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
@@ -101,6 +105,7 @@ static const struct spi_device_id bmp280_spi_id[] = {
{ "bmp181", BMP180_CHIP_ID },
{ "bmp280", BMP280_CHIP_ID },
{ "bme280", BME280_CHIP_ID },
+ { "bmp380", BMP380_CHIP_ID },
{ }
};
MODULE_DEVICE_TABLE(spi, bmp280_spi_id);
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 57ba0e85db91..c791325c7416 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -3,6 +3,87 @@
#include <linux/device.h>
#include <linux/regmap.h>
+/* BMP380 specific registers */
+#define BMP380_REG_CMD 0x7E
+#define BMP380_REG_CONFIG 0x1F
+#define BMP380_REG_ODR 0x1D
+#define BMP380_REG_OSR 0x1C
+#define BMP380_REG_POWER_CONTROL 0x1B
+#define BMP380_REG_IF_CONFIG 0x1A
+#define BMP380_REG_INT_CONTROL 0x19
+#define BMP380_REG_INT_STATUS 0x11
+#define BMP380_REG_EVENT 0x10
+#define BMP380_REG_STATUS 0x03
+#define BMP380_REG_ERROR 0x02
+#define BMP380_REG_ID 0x00
+
+#define BMP380_REG_FIFO_CONFIG_1 0x18
+#define BMP380_REG_FIFO_CONFIG_2 0x17
+#define BMP380_REG_FIFO_WATERMARK_MSB 0x16
+#define BMP380_REG_FIFO_WATERMARK_LSB 0x15
+#define BMP380_REG_FIFO_DATA 0x14
+#define BMP380_REG_FIFO_LENGTH_MSB 0x13
+#define BMP380_REG_FIFO_LENGTH_LSB 0x12
+
+#define BMP380_REG_SENSOR_TIME_MSB 0x0E
+#define BMP380_REG_SENSOR_TIME_LSB 0x0D
+#define BMP380_REG_SENSOR_TIME_XLSB 0x0C
+
+#define BMP380_REG_TEMP_MSB 0x09
+#define BMP380_REG_TEMP_LSB 0x08
+#define BMP380_REG_TEMP_XLSB 0x07
+
+#define BMP380_REG_PRESS_MSB 0x06
+#define BMP380_REG_PRESS_LSB 0x05
+#define BMP380_REG_PRESS_XLSB 0x04
+
+#define BMP380_REG_CALIB_TEMP_START 0x31
+#define BMP380_CALIB_REG_COUNT 21
+
+#define BMP380_FILTER_MASK GENMASK(3, 1)
+#define BMP380_FILTER_OFF 0
+#define BMP380_FILTER_1X 1
+#define BMP380_FILTER_3X 2
+#define BMP380_FILTER_7X 3
+#define BMP380_FILTER_15X 4
+#define BMP380_FILTER_31X 5
+#define BMP380_FILTER_63X 6
+#define BMP380_FILTER_127X 7
+
+#define BMP380_OSRS_TEMP_MASK GENMASK(5, 3)
+#define BMP380_OSRS_PRESS_MASK GENMASK(2, 0)
+
+#define BMP380_ODRS_MASK GENMASK(4, 0)
+
+#define BMP380_CTRL_SENSORS_MASK GENMASK(1, 0)
+#define BMP380_CTRL_SENSORS_PRESS_EN BIT(0)
+#define BMP380_CTRL_SENSORS_TEMP_EN BIT(1)
+#define BMP380_MODE_MASK GENMASK(5, 4)
+#define BMP380_MODE_SLEEP 0
+#define BMP380_MODE_FORCED 1
+#define BMP380_MODE_NORMAL 3
+
+#define BMP380_MIN_TEMP -4000
+#define BMP380_MAX_TEMP 8500
+#define BMP380_MIN_PRES 3000000
+#define BMP380_MAX_PRES 12500000
+
+#define BMP380_CMD_NOOP 0x00
+#define BMP380_CMD_EXTMODE_EN_MID 0x34
+#define BMP380_CMD_FIFO_FLUSH 0xB0
+#define BMP380_CMD_SOFT_RESET 0xB6
+
+#define BMP380_STATUS_CMD_RDY_MASK BIT(4)
+#define BMP380_STATUS_DRDY_PRESS_MASK BIT(5)
+#define BMP380_STATUS_DRDY_TEMP_MASK BIT(6)
+
+#define BMP380_ERR_FATAL_MASK BIT(0)
+#define BMP380_ERR_CMD_MASK BIT(1)
+#define BMP380_ERR_CONF_MASK BIT(2)
+
+#define BMP380_TEMP_SKIPPED 0x800000
+#define BMP380_PRESS_SKIPPED 0x800000
+
/* BMP280 specific registers */
#define BMP280_REG_HUMIDITY_LSB 0xFE
#define BMP280_REG_HUMIDITY_MSB 0xFD
@@ -13,6 +94,9 @@
#define BMP280_REG_PRESS_LSB 0xF8
#define BMP280_REG_PRESS_MSB 0xF7
+/* Helper mask to truncate excess 4 bits on pressure and temp readings */
+#define BMP280_MEAS_TRIM_MASK GENMASK(24, 4)
+
#define BMP280_REG_CONFIG 0xF5
#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_STATUS 0xF3
@@ -32,44 +116,46 @@
#define BMP280_REG_COMP_PRESS_START 0x8E
#define BMP280_COMP_PRESS_REG_COUNT 18
-#define BMP280_FILTER_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_COMP_H5_MASK GENMASK(15, 4)
+
+#define BMP280_CONTIGUOUS_CALIB_REGS (BMP280_COMP_TEMP_REG_COUNT + \
+ BMP280_COMP_PRESS_REG_COUNT)
+
+#define BMP280_FILTER_MASK GENMASK(4, 2)
#define BMP280_FILTER_OFF 0
-#define BMP280_FILTER_2X BIT(2)
-#define BMP280_FILTER_4X BIT(3)
-#define BMP280_FILTER_8X (BIT(3) | BIT(2))
-#define BMP280_FILTER_16X BIT(4)
+#define BMP280_FILTER_2X 1
+#define BMP280_FILTER_4X 2
+#define BMP280_FILTER_8X 3
+#define BMP280_FILTER_16X 4
-#define BMP280_OSRS_HUMIDITY_MASK (BIT(2) | BIT(1) | BIT(0))
-#define BMP280_OSRS_HUMIDITIY_X(osrs_h) ((osrs_h) << 0)
+#define BMP280_OSRS_HUMIDITY_MASK GENMASK(2, 0)
#define BMP280_OSRS_HUMIDITY_SKIP 0
-#define BMP280_OSRS_HUMIDITY_1X BMP280_OSRS_HUMIDITIY_X(1)
-#define BMP280_OSRS_HUMIDITY_2X BMP280_OSRS_HUMIDITIY_X(2)
-#define BMP280_OSRS_HUMIDITY_4X BMP280_OSRS_HUMIDITIY_X(3)
-#define BMP280_OSRS_HUMIDITY_8X BMP280_OSRS_HUMIDITIY_X(4)
-#define BMP280_OSRS_HUMIDITY_16X BMP280_OSRS_HUMIDITIY_X(5)
+#define BMP280_OSRS_HUMIDITY_1X 1
+#define BMP280_OSRS_HUMIDITY_2X 2
+#define BMP280_OSRS_HUMIDITY_4X 3
+#define BMP280_OSRS_HUMIDITY_8X 4
+#define BMP280_OSRS_HUMIDITY_16X 5
-#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_MASK GENMASK(7, 5)
#define BMP280_OSRS_TEMP_SKIP 0
-#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
-#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
-#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
-#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
-#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
-#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
-
-#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_TEMP_1X 1
+#define BMP280_OSRS_TEMP_2X 2
+#define BMP280_OSRS_TEMP_4X 3
+#define BMP280_OSRS_TEMP_8X 4
+#define BMP280_OSRS_TEMP_16X 5
+
+#define BMP280_OSRS_PRESS_MASK GENMASK(4, 2)
#define BMP280_OSRS_PRESS_SKIP 0
-#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
-#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
-#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
-#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
-#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
-#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
-
-#define BMP280_MODE_MASK (BIT(1) | BIT(0))
+#define BMP280_OSRS_PRESS_1X 1
+#define BMP280_OSRS_PRESS_2X 2
+#define BMP280_OSRS_PRESS_4X 3
+#define BMP280_OSRS_PRESS_8X 4
+#define BMP280_OSRS_PRESS_16X 5
+
+#define BMP280_MODE_MASK GENMASK(1, 0)
#define BMP280_MODE_SLEEP 0
-#define BMP280_MODE_FORCED BIT(0)
-#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+#define BMP280_MODE_FORCED 1
+#define BMP280_MODE_NORMAL 3
/* BMP180 specific registers */
#define BMP180_REG_OUT_XLSB 0xF8
@@ -79,19 +165,22 @@
#define BMP180_REG_CALIB_START 0xAA
#define BMP180_REG_CALIB_COUNT 22
+#define BMP180_MEAS_CTRL_MASK GENMASK(4, 0)
+#define BMP180_MEAS_TEMP 0x0E
+#define BMP180_MEAS_PRESS 0x14
#define BMP180_MEAS_SCO BIT(5)
-#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
-#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
-#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
-#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+#define BMP180_OSRS_PRESS_MASK GENMASK(7, 6)
+#define BMP180_MEAS_PRESS_1X 0
+#define BMP180_MEAS_PRESS_2X 1
+#define BMP180_MEAS_PRESS_4X 2
+#define BMP180_MEAS_PRESS_8X 3
/* BMP180 and BMP280 common registers */
#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_RESET 0xE0
#define BMP280_REG_ID 0xD0
+#define BMP380_CHIP_ID 0x50
#define BMP180_CHIP_ID 0x55
#define BMP280_CHIP_ID 0x58
#define BME280_CHIP_ID 0x60
@@ -105,6 +194,7 @@
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
+extern const struct regmap_config bmp380_regmap_config;
/* Probe called from different transports */
int bmp280_common_probe(struct device *dev,
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 5f6bb3603a8b..f0b0d198c6d4 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -129,9 +129,8 @@ static int dlh_read_direct(struct dlh_state *st,
if (ret)
return ret;
- *pressure = get_unaligned_be32(&st->rx_buf[1]) >> 8;
- *temperature = get_unaligned_be32(&st->rx_buf[3]) &
- GENMASK(DLH_NUM_TEMP_BITS - 1, 0);
+ *pressure = get_unaligned_be24(&st->rx_buf[1]);
+ *temperature = get_unaligned_be24(&st->rx_buf[4]);
return 0;
}
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 36fb7ae0d0a9..984a3f511a1a 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -89,6 +89,7 @@ struct dps310_data {
s32 c00, c10, c20, c30, c01, c11, c21;
s32 pressure_raw;
s32 temp_raw;
+ bool timeout_recovery_failed;
};
static const struct iio_chan_spec dps310_channels[] = {
@@ -159,6 +160,102 @@ static int dps310_get_coefs(struct dps310_data *data)
return 0;
}
+/*
+ * Some versions of the chip will read temperatures in the ~60C range when
+ * it's actually ~20C. This is the manufacturer recommended workaround
+ * to correct the issue. The registers used below are undocumented.
+ */
+static int dps310_temp_workaround(struct dps310_data *data)
+{
+ int rc;
+ int reg;
+
+ rc = regmap_read(data->regmap, 0x32, &reg);
+ if (rc)
+ return rc;
+
+ /*
+ * If bit 1 is set then the device is okay, and the workaround does not
+ * need to be applied
+ */
+ if (reg & BIT(1))
+ return 0;
+
+ rc = regmap_write(data->regmap, 0x0e, 0xA5);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0f, 0x96);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x62, 0x02);
+ if (rc)
+ return rc;
+
+ rc = regmap_write(data->regmap, 0x0e, 0x00);
+ if (rc)
+ return rc;
+
+ return regmap_write(data->regmap, 0x0f, 0x00);
+}
+
+static int dps310_startup(struct dps310_data *data)
+{
+ int rc;
+ int ready;
+
+ /*
+ * Set up pressure sensor in single sample, one measurement per second
+ * mode
+ */
+ rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
+ if (rc)
+ return rc;
+
+ /*
+ * Set up external (MEMS) temperature sensor in single sample, one
+ * measurement per second mode
+ */
+ rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
+ if (rc)
+ return rc;
+
+ /* Temp and pressure shifts are disabled when PRC <= 8 */
+ rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
+ DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
+ if (rc)
+ return rc;
+
+ /* MEAS_CFG doesn't update correctly unless first written with 0 */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, 0);
+ if (rc)
+ return rc;
+
+ /* Turn on temperature and pressure measurement in the background */
+ rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
+ DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
+ DPS310_TEMP_EN | DPS310_BACKGROUND);
+ if (rc)
+ return rc;
+
+ /*
+ * Calibration coefficients required for reporting temperature.
+ * They are available 40ms after the device has started
+ */
+ rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
+ ready & DPS310_COEF_RDY, 10000, 40000);
+ if (rc)
+ return rc;
+
+ rc = dps310_get_coefs(data);
+ if (rc)
+ return rc;
+
+ return dps310_temp_workaround(data);
+}
+
static int dps310_get_pres_precision(struct dps310_data *data)
{
int rc;
@@ -297,11 +394,69 @@ static int dps310_get_temp_k(struct dps310_data *data)
return scale_factors[ilog2(rc)];
}
+static int dps310_reset_wait(struct dps310_data *data)
+{
+ int rc;
+
+ rc = regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ if (rc)
+ return rc;
+
+ /* Wait for device chip access: 2.5ms in specification */
+ usleep_range(2500, 12000);
+ return 0;
+}
+
+static int dps310_reset_reinit(struct dps310_data *data)
+{
+ int rc;
+
+ rc = dps310_reset_wait(data);
+ if (rc)
+ return rc;
+
+ return dps310_startup(data);
+}
+
+static int dps310_ready_status(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int sleep = DPS310_POLL_SLEEP_US(timeout);
+ int ready;
+
+ return regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready, ready & ready_bit,
+ sleep, timeout);
+}
+
+static int dps310_ready(struct dps310_data *data, int ready_bit, int timeout)
+{
+ int rc;
+
+ rc = dps310_ready_status(data, ready_bit, timeout);
+ if (rc) {
+ if (rc == -ETIMEDOUT && !data->timeout_recovery_failed) {
+ /* Reset and reinitialize the chip. */
+ if (dps310_reset_reinit(data)) {
+ data->timeout_recovery_failed = true;
+ } else {
+ /* Try again to get sensor ready status. */
+ if (dps310_ready_status(data, ready_bit, timeout))
+ data->timeout_recovery_failed = true;
+ else
+ return 0;
+ }
+ }
+
+ return rc;
+ }
+
+ data->timeout_recovery_failed = false;
+ return 0;
+}
+
static int dps310_read_pres_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
s32 raw;
u8 val[3];
@@ -313,9 +468,7 @@ static int dps310_read_pres_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_PRS_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
+ rc = dps310_ready(data, DPS310_PRS_RDY, timeout);
if (rc)
goto done;
@@ -352,7 +505,6 @@ static int dps310_read_temp_raw(struct dps310_data *data)
{
int rc;
int rate;
- int ready;
int timeout;
if (mutex_lock_interruptible(&data->lock))
@@ -362,10 +514,8 @@ static int dps310_read_temp_raw(struct dps310_data *data)
timeout = DPS310_POLL_TIMEOUT_US(rate);
/* Poll for sensor readiness; base the timeout upon the sample rate. */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_TMP_RDY,
- DPS310_POLL_SLEEP_US(timeout), timeout);
- if (rc < 0)
+ rc = dps310_ready(data, DPS310_TMP_RDY, timeout);
+ if (rc)
goto done;
rc = dps310_read_temp_ready(data);
@@ -660,7 +810,7 @@ static void dps310_reset(void *action_data)
{
struct dps310_data *data = action_data;
- regmap_write(data->regmap, DPS310_RESET, DPS310_RESET_MAGIC);
+ dps310_reset_wait(data);
}
static const struct regmap_config dps310_regmap_config = {
@@ -677,52 +827,12 @@ static const struct iio_info dps310_info = {
.write_raw = dps310_write_raw,
};
-/*
- * Some verions of chip will read temperatures in the ~60C range when
- * its actually ~20C. This is the manufacturer recommended workaround
- * to correct the issue. The registers used below are undocumented.
- */
-static int dps310_temp_workaround(struct dps310_data *data)
-{
- int rc;
- int reg;
-
- rc = regmap_read(data->regmap, 0x32, &reg);
- if (rc < 0)
- return rc;
-
- /*
- * If bit 1 is set then the device is okay, and the workaround does not
- * need to be applied
- */
- if (reg & BIT(1))
- return 0;
-
- rc = regmap_write(data->regmap, 0x0e, 0xA5);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0f, 0x96);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x62, 0x02);
- if (rc < 0)
- return rc;
-
- rc = regmap_write(data->regmap, 0x0e, 0x00);
- if (rc < 0)
- return rc;
-
- return regmap_write(data->regmap, 0x0f, 0x00);
-}
-
static int dps310_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct dps310_data *data;
struct iio_dev *iio;
- int rc, ready;
+ int rc;
iio = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!iio)
@@ -747,54 +857,8 @@ static int dps310_probe(struct i2c_client *client,
if (rc)
return rc;
- /*
- * Set up pressure sensor in single sample, one measurement per second
- * mode
- */
- rc = regmap_write(data->regmap, DPS310_PRS_CFG, 0);
-
- /*
- * Set up external (MEMS) temperature sensor in single sample, one
- * measurement per second mode
- */
- rc = regmap_write(data->regmap, DPS310_TMP_CFG, DPS310_TMP_EXT);
- if (rc < 0)
- return rc;
-
- /* Temp and pressure shifts are disabled when PRC <= 8 */
- rc = regmap_write_bits(data->regmap, DPS310_CFG_REG,
- DPS310_PRS_SHIFT_EN | DPS310_TMP_SHIFT_EN, 0);
- if (rc < 0)
- return rc;
-
- /* MEAS_CFG doesn't update correctly unless first written with 0 */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, 0);
- if (rc < 0)
- return rc;
-
- /* Turn on temperature and pressure measurement in the background */
- rc = regmap_write_bits(data->regmap, DPS310_MEAS_CFG,
- DPS310_MEAS_CTRL_BITS, DPS310_PRS_EN |
- DPS310_TEMP_EN | DPS310_BACKGROUND);
- if (rc < 0)
- return rc;
-
- /*
- * Calibration coefficients required for reporting temperature.
- * They are available 40ms after the device has started
- */
- rc = regmap_read_poll_timeout(data->regmap, DPS310_MEAS_CFG, ready,
- ready & DPS310_COEF_RDY, 10000, 40000);
- if (rc < 0)
- return rc;
-
- rc = dps310_get_coefs(data);
- if (rc < 0)
- return rc;
-
- rc = dps310_temp_workaround(data);
- if (rc < 0)
+ rc = dps310_startup(data);
+ if (rc)
return rc;
rc = devm_iio_device_register(&client->dev, iio);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index af4621eaa6b5..b62f28585db5 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -595,7 +595,7 @@ static int icp10100_probe(struct i2c_client *client,
return devm_iio_device_register(&client->dev, indio_dev);
}
-static int __maybe_unused icp10100_suspend(struct device *dev)
+static int icp10100_suspend(struct device *dev)
{
struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -607,7 +607,7 @@ static int __maybe_unused icp10100_suspend(struct device *dev)
return ret;
}
-static int __maybe_unused icp10100_resume(struct device *dev)
+static int icp10100_resume(struct device *dev)
{
struct icp10100_state *st = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -626,8 +626,8 @@ out_unlock:
return ret;
}
-static UNIVERSAL_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume,
- NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(icp10100_pm, icp10100_suspend, icp10100_resume,
+ NULL);
static const struct of_device_id icp10100_of_match[] = {
{
@@ -646,7 +646,7 @@ MODULE_DEVICE_TABLE(i2c, icp10100_id);
static struct i2c_driver icp10100_driver = {
.driver = {
.name = "icp10100",
- .pm = &icp10100_pm,
+ .pm = pm_ptr(&icp10100_pm),
.of_match_table = icp10100_of_match,
},
.probe = icp10100_probe,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index d4f89e4babed..2f22aba61e4d 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -290,15 +290,13 @@ static int mpl3115_standby(struct mpl3115_data *data)
data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
}
-static int mpl3115_remove(struct i2c_client *client)
+static void mpl3115_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
mpl3115_standby(iio_priv(indio_dev));
-
- return 0;
}
static int mpl3115_suspend(struct device *dev)
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 3b1de71e0d15..b681a4183909 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -105,11 +105,9 @@ static int ms5611_i2c_probe(struct i2c_client *client,
return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data);
}
-static int ms5611_i2c_remove(struct i2c_client *client)
+static void ms5611_i2c_remove(struct i2c_client *client)
{
ms5611_remove(i2c_get_clientdata(client));
-
- return 0;
}
static const struct of_device_id ms5611_i2c_matches[] = {
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 156e6a72dc5c..6e11bea784fa 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -22,6 +22,7 @@ enum st_press_type {
LPS33HW,
LPS35HW,
LPS22HH,
+ LPS22DF,
ST_PRESS_MAX,
};
@@ -32,6 +33,7 @@ enum st_press_type {
#define LPS33HW_PRESS_DEV_NAME "lps33hw"
#define LPS35HW_PRESS_DEV_NAME "lps35hw"
#define LPS22HH_PRESS_DEV_NAME "lps22hh"
+#define LPS22DF_PRESS_DEV_NAME "lps22df"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 76913a2028d2..80176e3083af 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -552,6 +552,76 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ /*
+ * CUSTOM VALUES FOR LPS22DF SENSOR
+ * See LPS22DF datasheet:
+ * http://www.st.com/resource/en/datasheet/lps22df.pdf
+ */
+ .wai = 0xb4,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LPS22DF_PRESS_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+ .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+ .odr = {
+ .addr = 0x10,
+ .mask = 0x78,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01 },
+ { .hz = 4, .value = 0x02 },
+ { .hz = 10, .value = 0x03 },
+ { .hz = 25, .value = 0x04 },
+ { .hz = 50, .value = 0x05 },
+ { .hz = 75, .value = 0x06 },
+ { .hz = 100, .value = 0x07 },
+ { .hz = 200, .value = 0x08 },
+ },
+ },
+ .pw = {
+ .addr = 0x10,
+ .mask = 0x78,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ /*
+ * Pressure and temperature sensitivity values
+ * as defined in table 2 of LPS22DF datasheet.
+ */
+ [0] = {
+ .num = ST_PRESS_FS_AVL_1260MB,
+ .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+ .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x11,
+ .mask = BIT(3),
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x13,
+ .mask = BIT(5),
+ .addr_od = 0x12,
+ .mask_od = BIT(1),
+ },
+ .addr_ihl = 0x12,
+ .mask_ihl = BIT(3),
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x0E,
+ .value = BIT(5),
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
};
static int st_press_write_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 7035777fd988..58fede861891 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -47,6 +47,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hh",
.data = LPS22HH_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22df",
+ .data = LPS22DF_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -67,6 +71,7 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS33HW_PRESS_DEV_NAME, LPS33HW },
{ LPS35HW_PRESS_DEV_NAME, LPS35HW },
{ LPS22HH_PRESS_DEV_NAME, LPS22HH },
+ { LPS22DF_PRESS_DEV_NAME, LPS22DF },
{},
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index bfab8e7fb061..25cca5ad7c55 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -51,6 +51,10 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hh",
.data = LPS22HH_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps22df",
+ .data = LPS22DF_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -97,6 +101,7 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS33HW_PRESS_DEV_NAME },
{ LPS35HW_PRESS_DEV_NAME },
{ LPS22HH_PRESS_DEV_NAME },
+ { LPS22DF_PRESS_DEV_NAME },
{ "lps001wp-press" },
{ "lps25h-press", },
{ "lps331ap-press" },
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index 0db0860d386b..f26dd8cbb387 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -53,11 +53,9 @@ static int zpa2326_probe_i2c(struct i2c_client *client,
zpa2326_i2c_hwid(client), regmap);
}
-static int zpa2326_remove_i2c(struct i2c_client *client)
+static void zpa2326_remove_i2c(struct i2c_client *client)
{
zpa2326_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id zpa2326_i2c_ids[] = {
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 648ae576d6fa..791a33d5286c 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -311,7 +311,7 @@ error_unreg_buffer:
return ret;
}
-static int lidar_remove(struct i2c_client *client)
+static void lidar_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -320,8 +320,6 @@ static int lidar_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct i2c_device_id lidar_id[] = {
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index 05015351a34a..faf2f806ce80 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -359,7 +359,7 @@ static int srf04_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev)
+static int srf04_pm_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
@@ -371,7 +371,7 @@ static int __maybe_unused srf04_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused srf04_pm_runtime_resume(struct device *dev)
+static int srf04_pm_runtime_resume(struct device *dev)
{
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
@@ -385,8 +385,8 @@ static int __maybe_unused srf04_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops srf04_pm_ops = {
- SET_RUNTIME_PM_OPS(srf04_pm_runtime_suspend,
- srf04_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(srf04_pm_runtime_suspend,
+ srf04_pm_runtime_resume, NULL)
};
static struct platform_driver srf04_driver = {
@@ -395,7 +395,7 @@ static struct platform_driver srf04_driver = {
.driver = {
.name = "srf04-gpio",
.of_match_table = of_srf04_match,
- .pm = &srf04_pm_ops,
+ .pm = pm_ptr(&srf04_pm_ops),
},
};
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index ea7318b508ea..0e4747ccd3cf 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -965,7 +965,7 @@ static int sx9310_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9310_chip_info, &sx9310_regmap_config);
}
-static int __maybe_unused sx9310_suspend(struct device *dev)
+static int sx9310_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
u8 ctrl0;
@@ -991,7 +991,7 @@ out:
return ret;
}
-static int __maybe_unused sx9310_resume(struct device *dev)
+static int sx9310_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -1013,7 +1013,7 @@ out:
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9310_pm_ops, sx9310_suspend, sx9310_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9310_pm_ops, sx9310_suspend, sx9310_resume);
static const struct acpi_device_id sx9310_acpi_match[] = {
{ "STH9310", SX9310_WHOAMI_VALUE },
@@ -1041,7 +1041,7 @@ static struct i2c_driver sx9310_driver = {
.name = "sx9310",
.acpi_match_table = sx9310_acpi_match,
.of_match_table = sx9310_of_match,
- .pm = &sx9310_pm_ops,
+ .pm = pm_sleep_ptr(&sx9310_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index edb5a2ce4e27..977cf17cec52 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -1073,7 +1073,7 @@ static int sx9324_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9324_chip_info, &sx9324_regmap_config);
}
-static int __maybe_unused sx9324_suspend(struct device *dev)
+static int sx9324_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
unsigned int regval;
@@ -1098,7 +1098,7 @@ out:
return ret;
}
-static int __maybe_unused sx9324_resume(struct device *dev)
+static int sx9324_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -1114,7 +1114,7 @@ static int __maybe_unused sx9324_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9324_pm_ops, sx9324_suspend, sx9324_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9324_pm_ops, sx9324_suspend, sx9324_resume);
static const struct acpi_device_id sx9324_acpi_match[] = {
{ "STH9324", SX9324_WHOAMI_VALUE },
@@ -1139,7 +1139,7 @@ static struct i2c_driver sx9324_driver = {
.name = "sx9324",
.acpi_match_table = sx9324_acpi_match,
.of_match_table = sx9324_of_match,
- .pm = &sx9324_pm_ops,
+ .pm = pm_sleep_ptr(&sx9324_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index d9a12e6be6ca..7fa2213d23ba 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -819,7 +819,7 @@ static int sx9360_probe(struct i2c_client *client)
return sx_common_probe(client, &sx9360_chip_info, &sx9360_regmap_config);
}
-static int __maybe_unused sx9360_suspend(struct device *dev)
+static int sx9360_suspend(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
unsigned int regval;
@@ -844,7 +844,7 @@ out:
return ret;
}
-static int __maybe_unused sx9360_resume(struct device *dev)
+static int sx9360_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
@@ -861,7 +861,7 @@ static int __maybe_unused sx9360_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(sx9360_pm_ops, sx9360_suspend, sx9360_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sx9360_pm_ops, sx9360_suspend, sx9360_resume);
static const struct acpi_device_id sx9360_acpi_match[] = {
{ "STH9360", SX9360_WHOAMI_VALUE },
@@ -886,7 +886,7 @@ static struct i2c_driver sx9360_driver = {
.name = "sx9360",
.acpi_match_table = sx9360_acpi_match,
.of_match_table = sx9360_of_match,
- .pm = &sx9360_pm_ops,
+ .pm = pm_sleep_ptr(&sx9360_pm_ops),
/*
* Lots of i2c transfers in probe + over 200 ms waiting in
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 42589d6200ad..d4670864ddc7 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -979,7 +979,7 @@ out_trigger_unregister:
return ret;
}
-static int sx9500_remove(struct i2c_client *client)
+static void sx9500_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct sx9500_data *data = iio_priv(indio_dev);
@@ -989,8 +989,6 @@ static int sx9500_remove(struct i2c_client *client)
if (client->irq > 0)
iio_trigger_unregister(data->trig);
kfree(data->buffer);
-
- return 0;
}
static int sx9500_suspend(struct device *dev)
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index c253a5315988..8eb0f962ed25 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -79,16 +79,15 @@ struct mlx90614_data {
/* Bandwidth values for IIR filtering */
static const int mlx90614_iir_values[] = {77, 31, 20, 15, 723, 153, 110, 86};
-static IIO_CONST_ATTR(in_temp_object_filter_low_pass_3db_frequency_available,
- "0.15 0.20 0.31 0.77 0.86 1.10 1.53 7.23");
-
-static struct attribute *mlx90614_attributes[] = {
- &iio_const_attr_in_temp_object_filter_low_pass_3db_frequency_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group mlx90614_attr_group = {
- .attrs = mlx90614_attributes,
+static const int mlx90614_freqs[][2] = {
+ {0, 150000},
+ {0, 200000},
+ {0, 310000},
+ {0, 770000},
+ {0, 860000},
+ {1, 100000},
+ {1, 530000},
+ {7, 230000}
};
/*
@@ -373,6 +372,22 @@ static int mlx90614_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
+static int mlx90614_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (int *)mlx90614_freqs;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(mlx90614_freqs);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_chan_spec mlx90614_channels[] = {
{
.type = IIO_TEMP,
@@ -389,6 +404,8 @@ static const struct iio_chan_spec mlx90614_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
},
@@ -401,6 +418,8 @@ static const struct iio_chan_spec mlx90614_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_CALIBEMISSIVITY) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
},
@@ -410,7 +429,7 @@ static const struct iio_info mlx90614_info = {
.read_raw = mlx90614_read_raw,
.write_raw = mlx90614_write_raw,
.write_raw_get_fmt = mlx90614_write_raw_get_fmt,
- .attrs = &mlx90614_attr_group,
+ .read_avail = mlx90614_read_avail,
};
#ifdef CONFIG_PM
@@ -571,7 +590,7 @@ static int mlx90614_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int mlx90614_remove(struct i2c_client *client)
+static void mlx90614_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mlx90614_data *data = iio_priv(indio_dev);
@@ -584,8 +603,6 @@ static int mlx90614_remove(struct i2c_client *client)
mlx90614_sleep(data);
pm_runtime_set_suspended(&client->dev);
}
-
- return 0;
}
static const struct i2c_device_id mlx90614_id[] = {
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index 7ee7ff8047a4..f6dec0e5f097 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -18,6 +18,7 @@
#include <linux/math64.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -128,6 +129,7 @@
* calculations
* @object_ambient_temperature: Ambient temperature at object (might differ of
* the ambient temperature of sensor.
+ * @regulator: Regulator of the device
*/
struct mlx90632_data {
struct i2c_client *client;
@@ -136,6 +138,7 @@ struct mlx90632_data {
u16 emissivity;
u8 mtyp;
u32 object_ambient_temperature;
+ struct regulator *regulator;
};
static const struct regmap_range mlx90632_volatile_reg_range[] = {
@@ -208,6 +211,15 @@ static s32 mlx90632_pwr_continuous(struct regmap *regmap)
}
/**
+ * mlx90632_reset_delay() - Give the mlx90632 some time to reset properly
+ * If this is not done, the following I2C command(s) will not be accepted.
+ */
+static void mlx90632_reset_delay(void)
+{
+ usleep_range(150, 200);
+}
+
+/**
* mlx90632_perform_measurement() - Trigger and retrieve current measurement cycle
* @data: pointer to mlx90632_data object containing regmap information
*
@@ -248,11 +260,7 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
if (ret < 0)
return ret;
- /*
- * Give the mlx90632 some time to reset properly before sending a new I2C command
- * if this is not done, the following I2C command(s) will not be accepted.
- */
- usleep_range(150, 200);
+ mlx90632_reset_delay();
ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
(MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
@@ -841,6 +849,32 @@ static int mlx90632_wakeup(struct mlx90632_data *data)
return mlx90632_pwr_continuous(data->regmap);
}
+static void mlx90632_disable_regulator(void *_data)
+{
+ struct mlx90632_data *data = _data;
+ int ret;
+
+ ret = regulator_disable(data->regulator);
+ if (ret < 0)
+ dev_err(regmap_get_device(data->regmap),
+ "Failed to disable power regulator: %d\n", ret);
+}
+
+static int mlx90632_enable_regulator(struct mlx90632_data *data)
+{
+ int ret;
+
+ ret = regulator_enable(data->regulator);
+ if (ret < 0) {
+ dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n");
+ return ret;
+ }
+
+ mlx90632_reset_delay();
+
+ return ret;
+}
+
static int mlx90632_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -876,6 +910,23 @@ static int mlx90632_probe(struct i2c_client *client,
indio_dev->channels = mlx90632_channels;
indio_dev->num_channels = ARRAY_SIZE(mlx90632_channels);
+ mlx90632->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(mlx90632->regulator))
+ return dev_err_probe(&client->dev, PTR_ERR(mlx90632->regulator),
+ "failed to get vdd regulator");
+
+ ret = mlx90632_enable_regulator(mlx90632);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev, mlx90632_disable_regulator,
+ mlx90632);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to setup regulator cleanup action %d\n",
+ ret);
+ return ret;
+ }
+
ret = mlx90632_wakeup(mlx90632);
if (ret < 0) {
dev_err(&client->dev, "Wakeup failed: %d\n", ret);
@@ -924,7 +975,7 @@ static int mlx90632_probe(struct i2c_client *client,
return iio_device_register(indio_dev);
}
-static int mlx90632_remove(struct i2c_client *client)
+static void mlx90632_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct mlx90632_data *data = iio_priv(indio_dev);
@@ -936,8 +987,6 @@ static int mlx90632_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
mlx90632_sleep(data);
-
- return 0;
}
static const struct i2c_device_id mlx90632_id[] = {
diff --git a/drivers/iio/test/iio-test-rescale.c b/drivers/iio/test/iio-test-rescale.c
index cc782ccff880..31ee55a6faed 100644
--- a/drivers/iio/test/iio-test-rescale.c
+++ b/drivers/iio/test/iio-test-rescale.c
@@ -29,7 +29,7 @@ struct rescale_tc_data {
const char *expected_off;
};
-const struct rescale_tc_data scale_cases[] = {
+static const struct rescale_tc_data scale_cases[] = {
/*
* Typical use cases
*/
@@ -477,7 +477,7 @@ const struct rescale_tc_data scale_cases[] = {
},
};
-const struct rescale_tc_data offset_cases[] = {
+static const struct rescale_tc_data offset_cases[] = {
/*
* Typical use cases
*/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index b985e0d9bc05..1f9938a2c475 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -175,6 +175,7 @@ struct cm_device {
struct cm_av {
struct cm_port *port;
struct rdma_ah_attr ah_attr;
+ u16 dlid_datapath;
u16 pkey_index;
u8 timeout;
};
@@ -617,7 +618,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
- __be64 service_mask = cm_id_priv->id.service_mask;
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
@@ -625,9 +625,16 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
- if ((cur_cm_id_priv->id.service_mask & service_id) ==
- (service_mask & cur_cm_id_priv->id.service_id) &&
- (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
+
+ if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
+ link = &(*link)->rb_left;
+ else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
+ link = &(*link)->rb_right;
+ else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
+ link = &(*link)->rb_left;
+ else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
+ link = &(*link)->rb_right;
+ else {
/*
* Sharing an ib_cm_id with different handlers is not
* supported
@@ -643,17 +650,6 @@ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
spin_unlock_irqrestore(&cm.lock, flags);
return cur_cm_id_priv;
}
-
- if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
- link = &(*link)->rb_left;
- else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
- link = &(*link)->rb_right;
- else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
- link = &(*link)->rb_left;
- else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
- link = &(*link)->rb_right;
- else
- link = &(*link)->rb_right;
}
cm_id_priv->listen_sharecount++;
rb_link_node(&cm_id_priv->service_node, parent, link);
@@ -670,12 +666,7 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
- if ((cm_id_priv->id.service_mask & service_id) ==
- cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device)) {
- refcount_inc(&cm_id_priv->refcount);
- return cm_id_priv;
- }
+
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
@@ -684,8 +675,10 @@ static struct cm_id_private *cm_find_listen(struct ib_device *device,
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
- else
- node = node->rb_right;
+ else {
+ refcount_inc(&cm_id_priv->refcount);
+ return cm_id_priv;
+ }
}
return NULL;
}
@@ -1158,22 +1151,17 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
}
EXPORT_SYMBOL(ib_destroy_cm_id);
-static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
- __be64 service_mask)
+static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id)
{
- service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
- service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
return -EINVAL;
- if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
+ if (service_id == IB_CM_ASSIGN_SERVICE_ID)
cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
- } else {
+ else
cm_id_priv->id.service_id = service_id;
- cm_id_priv->id.service_mask = service_mask;
- }
+
return 0;
}
@@ -1185,12 +1173,8 @@ static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
* and service ID resolution requests. The service ID should be specified
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
* assign a service ID to the caller.
- * @service_mask: Mask applied to service ID used to listen across a
- * range of service IDs. If set to 0, the service ID is matched
- * exactly. This parameter is ignored if %service_id is set to
- * IB_CM_ASSIGN_SERVICE_ID.
*/
-int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
+int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id)
{
struct cm_id_private *cm_id_priv =
container_of(cm_id, struct cm_id_private, id);
@@ -1203,7 +1187,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
goto out;
}
- ret = cm_init_listen(cm_id_priv, service_id, service_mask);
+ ret = cm_init_listen(cm_id_priv, service_id);
if (ret)
goto out;
@@ -1251,7 +1235,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
if (IS_ERR(cm_id_priv))
return ERR_CAST(cm_id_priv);
- err = cm_init_listen(cm_id_priv, service_id, 0);
+ err = cm_init_listen(cm_id_priv, service_id);
if (err) {
ib_destroy_cm_id(&cm_id_priv->id);
return ERR_PTR(err);
@@ -1321,6 +1305,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct sa_path_rec *pri_path = param->primary_path;
struct sa_path_rec *alt_path = param->alternate_path;
bool pri_ext = false;
+ __be16 lid;
if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
@@ -1380,9 +1365,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
htons(ntohl(sa_path_get_dlid(
pri_path)))));
} else {
+
+ if (param->primary_path_inbound) {
+ lid = param->primary_path_inbound->ib.dlid;
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(lid));
+ } else
+ IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
+ be16_to_cpu(IB_LID_PERMISSIVE));
+
/* Work-around until there's a way to obtain remote LID info */
- IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
- be16_to_cpu(IB_LID_PERMISSIVE));
IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
be16_to_cpu(IB_LID_PERMISSIVE));
}
@@ -1522,7 +1514,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
}
}
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
@@ -1538,6 +1529,10 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av);
+ if (param->primary_path_outbound)
+ cm_id_priv->av.dlid_datapath =
+ be16_to_cpu(param->primary_path_outbound->ib.dlid);
+
if (param->alternate_path)
cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av);
@@ -1632,14 +1627,13 @@ static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num,
static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
u32 lid;
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(primary_path,
- IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
- req_msg));
+ sa_path_set_dlid(primary_path, wc->slid);
sa_path_set_slid(primary_path,
IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
req_msg));
@@ -1676,7 +1670,8 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
- struct sa_path_rec *alt_path)
+ struct sa_path_rec *alt_path,
+ struct ib_wc *wc)
{
primary_path->dgid =
*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
@@ -1734,7 +1729,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
if (sa_path_is_roce(alt_path))
alt_path->roce.route_resolved = false;
}
- cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc);
}
static u16 cm_get_bth_pkey(struct cm_work *work)
@@ -2079,7 +2074,6 @@ static int cm_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
@@ -2148,7 +2142,7 @@ static int cm_req_handler(struct cm_work *work)
if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
+ &work->path[1], work->mad_recv_wc->wc);
if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
sa_path_set_dmac(&work->path[0],
cm_id_priv->av.ah_attr.roce.dmac);
@@ -2173,6 +2167,10 @@ static int cm_req_handler(struct cm_work *work)
NULL, 0);
goto rejected;
}
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB)
+ cm_id_priv->av.dlid_datapath =
+ IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg);
+
if (cm_req_has_alt_path(req_msg)) {
ret = cm_init_av_by_path(&work->path[1], NULL,
&cm_id_priv->alt_av);
@@ -3486,7 +3484,6 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
cm_move_av_from_path(&cm_id_priv->av, &av);
cm_id->service_id = param->service_id;
- cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
if (cm_id->state != IB_CM_IDLE) {
@@ -3561,7 +3558,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
cm_id_priv->id.service_id =
cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
- cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_id_priv->tid = sidr_req_msg->hdr.tid;
wc = work->mad_recv_wc->wc;
@@ -4134,6 +4130,10 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
+ if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) &&
+ cm_id_priv->av.dlid_datapath &&
+ (cm_id_priv->av.dlid_datapath != 0xffff))
+ qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath;
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 46d06678dfbe..70da57ef2eeb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1841,8 +1841,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
@@ -2026,6 +2026,8 @@ static void _destroy_id(struct rdma_id_private *id_priv,
cma_id_put(id_priv->id.context);
kfree(id_priv->id.route.path_rec);
+ kfree(id_priv->id.route.path_rec_inbound);
+ kfree(id_priv->id.route.path_rec_outbound);
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
@@ -2241,14 +2243,14 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
goto err;
rt = &id->route;
- rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
- rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
- GFP_KERNEL);
+ rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
+ rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
+ sizeof(*rt->path_rec), GFP_KERNEL);
if (!rt->path_rec)
goto err;
rt->path_rec[0] = *path;
- if (rt->num_paths == 2)
+ if (rt->num_pri_alt_paths == 2)
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
@@ -2817,26 +2819,72 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
}
EXPORT_SYMBOL(rdma_set_min_rnr_timer);
+static void route_set_path_rec_inbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_inbound) {
+ route->path_rec_inbound =
+ kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
+ if (!route->path_rec_inbound)
+ return;
+ }
+
+ *route->path_rec_inbound = *path_rec;
+}
+
+static void route_set_path_rec_outbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
+{
+ struct rdma_route *route = &work->id->id.route;
+
+ if (!route->path_rec_outbound) {
+ route->path_rec_outbound =
+ kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
+ if (!route->path_rec_outbound)
+ return;
+ }
+
+ *route->path_rec_outbound = *path_rec;
+}
+
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
- void *context)
+ int num_prs, void *context)
{
struct cma_work *work = context;
struct rdma_route *route;
+ int i;
route = &work->id->id.route;
- if (!status) {
- route->num_paths = 1;
- *route->path_rec = *path_rec;
- } else {
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
- work->event.status = status;
- pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
- status);
+ if (status)
+ goto fail;
+
+ for (i = 0; i < num_prs; i++) {
+ if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
+ *route->path_rec = path_rec[i];
+ else if (path_rec[i].flags & IB_PATH_INBOUND)
+ route_set_path_rec_inbound(work, &path_rec[i]);
+ else if (path_rec[i].flags & IB_PATH_OUTBOUND)
+ route_set_path_rec_outbound(work, &path_rec[i]);
}
+ if (!route->path_rec) {
+ status = -EINVAL;
+ goto fail;
+ }
+
+ route->num_pri_alt_paths = 1;
+ queue_work(cma_wq, &work->work);
+ return;
+fail:
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
+ status);
queue_work(cma_wq, &work->work);
}
@@ -3081,7 +3129,7 @@ int rdma_set_ib_path(struct rdma_cm_id *id,
dev_put(ndev);
}
- id->route.num_paths = 1;
+ id->route.num_pri_alt_paths = 1;
return 0;
err_free:
@@ -3214,7 +3262,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err1;
}
- route->num_paths = 1;
+ route->num_pri_alt_paths = 1;
ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
@@ -3274,7 +3322,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
err2:
kfree(route->path_rec);
route->path_rec = NULL;
- route->num_paths = 0;
+ route->num_pri_alt_paths = 0;
err1:
kfree(work);
return ret;
@@ -4265,7 +4313,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
}
req.primary_path = &route->path_rec[0];
- if (route->num_paths == 2)
+ req.primary_path_inbound = route->path_rec_inbound;
+ req.primary_path_outbound = route->path_rec_outbound;
+ if (route->num_pri_alt_paths == 2)
req.alternate_path = &route->path_rec[1];
req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index de8a2d5d741c..7b68b3ea979f 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -292,7 +292,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strscpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d275db195f1a..ae60c73babcc 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -422,7 +422,7 @@ int ib_device_rename(struct ib_device *ibdev, const char *name)
return ret;
}
- strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev);
downgrade_write(&devices_rwsem);
@@ -1217,7 +1217,7 @@ static int assign_name(struct ib_device *device, const char *name)
ret = -ENFILE;
goto out;
}
- strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
+ strscpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
&last_id, GFP_KERNEL);
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
index 7063e41eaf26..c77d7d2559a1 100644
--- a/drivers/infiniband/core/lag.c
+++ b/drivers/infiniband/core/lag.c
@@ -7,8 +7,7 @@
#include <rdma/ib_cache.h>
#include <rdma/lag.h>
-static struct sk_buff *rdma_build_skb(struct ib_device *device,
- struct net_device *netdev,
+static struct sk_buff *rdma_build_skb(struct net_device *netdev,
struct rdma_ah_attr *ah_attr,
gfp_t flags)
{
@@ -86,7 +85,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
struct net_device *slave;
struct sk_buff *skb;
- skb = rdma_build_skb(device, master, ah_attr, flags);
+ skb = rdma_build_skb(master, ah_attr, flags);
if (!skb)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 003e504feca2..0de83d9a4985 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -50,6 +50,7 @@
#include <rdma/ib_marshall.h>
#include <rdma/ib_addr.h>
#include <rdma/opa_addr.h>
+#include <rdma/rdma_cm.h>
#include "sa.h"
#include "core_priv.h"
@@ -104,7 +105,8 @@ struct ib_sa_device {
};
struct ib_sa_query {
- void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
+ void (*callback)(struct ib_sa_query *sa_query, int status,
+ int num_prs, struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -116,6 +118,12 @@ struct ib_sa_query {
u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */
+
+ /* A separate buffer to save pathrecords of a response, as in cases
+ * like IB/netlink, mulptiple pathrecords are supported, so that
+ * mad->data is not large enough to hold them
+ */
+ void *resp_pr_data;
};
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
@@ -123,7 +131,8 @@ struct ib_sa_query {
#define IB_SA_QUERY_OPA 0x00000004
struct ib_sa_path_query {
- void (*callback)(int, struct sa_path_rec *, void *);
+ void (*callback)(int status, struct sa_path_rec *rec,
+ int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr;
@@ -712,7 +721,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
sa_rec->reversible != 0)
- query->path_use = LS_RESOLVE_PATH_USE_GMP;
+ query->path_use = LS_RESOLVE_PATH_USE_ALL;
else
query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
header->path_use = query->path_use;
@@ -865,50 +874,81 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
+ struct ib_path_rec_data *srec, *drec;
+ struct ib_sa_path_query *path_query;
struct ib_mad_send_wc mad_send_wc;
- struct ib_sa_mad *mad = NULL;
const struct nlattr *head, *curr;
- struct ib_path_rec_data *rec;
- int len, rem;
+ struct ib_sa_mad *mad = NULL;
+ int len, rem, num_prs = 0;
u32 mask = 0;
int status = -EIO;
- if (query->callback) {
- head = (const struct nlattr *) nlmsg_data(nlh);
- len = nlmsg_len(nlh);
- switch (query->path_use) {
- case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
- mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
- break;
+ if (!query->callback)
+ goto out;
- case LS_RESOLVE_PATH_USE_ALL:
- case LS_RESOLVE_PATH_USE_GMP:
- default:
- mask = IB_PATH_PRIMARY | IB_PATH_GMP |
- IB_PATH_BIDIRECTIONAL;
- break;
+ path_query = container_of(query, struct ib_sa_path_query, sa_query);
+ mad = query->mad_buf->mad;
+ if (!path_query->conv_pr &&
+ (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
+ /* Need a larger buffer for possible multiple PRs */
+ query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
+ sizeof(*drec), GFP_KERNEL);
+ if (!query->resp_pr_data) {
+ query->callback(query, -ENOMEM, 0, NULL);
+ return;
}
- nla_for_each_attr(curr, head, len, rem) {
- if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
- rec = nla_data(curr);
- /*
- * Get the first one. In the future, we may
- * need to get up to 6 pathrecords.
- */
- if ((rec->flags & mask) == mask) {
- mad = query->mad_buf->mad;
- mad->mad_hdr.method |=
- IB_MGMT_METHOD_RESP;
- memcpy(mad->data, rec->path_rec,
- sizeof(rec->path_rec));
- status = 0;
- break;
- }
- }
+ }
+
+ head = (const struct nlattr *) nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+ switch (query->path_use) {
+ case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
+ mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
+ break;
+
+ case LS_RESOLVE_PATH_USE_ALL:
+ mask = IB_PATH_PRIMARY;
+ break;
+
+ case LS_RESOLVE_PATH_USE_GMP:
+ default:
+ mask = IB_PATH_PRIMARY | IB_PATH_GMP |
+ IB_PATH_BIDIRECTIONAL;
+ break;
+ }
+
+ drec = (struct ib_path_rec_data *)query->resp_pr_data;
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
+ continue;
+
+ srec = nla_data(curr);
+ if ((srec->flags & mask) != mask)
+ continue;
+
+ status = 0;
+ if (!drec) {
+ memcpy(mad->data, srec->path_rec,
+ sizeof(srec->path_rec));
+ num_prs = 1;
+ break;
}
- query->callback(query, status, mad);
+
+ memcpy(drec, srec, sizeof(*drec));
+ drec++;
+ num_prs++;
+ if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
+ break;
}
+ if (!status)
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+
+ query->callback(query, status, num_prs, mad);
+ kvfree(query->resp_pr_data);
+ query->resp_pr_data = NULL;
+
+out:
mad_send_wc.send_buf = query->mad_buf;
mad_send_wc.status = IB_WC_SUCCESS;
send_handler(query->mad_buf->mad_agent, &mad_send_wc);
@@ -1411,41 +1451,90 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
return PR_IB_SUPPORTED;
}
+static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
+ int status, struct ib_sa_mad *mad)
+{
+ struct sa_path_rec rec = {};
+
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&rec);
+
+ if (query->conv_pr) {
+ struct sa_path_rec opa;
+
+ memset(&opa, 0, sizeof(struct sa_path_rec));
+ sa_convert_path_ib_to_opa(&opa, &rec);
+ query->callback(status, &opa, 1, query->context);
+ } else {
+ query->callback(status, &rec, 1, query->context);
+ }
+}
+
+/**
+ * ib_sa_pr_callback_multiple() - Parse path records then do callback.
+ *
+ * In a multiple-PR case the PRs are saved in "query->resp_pr_data"
+ * (instead of"mad->data") and with "ib_path_rec_data" structure format,
+ * so that rec->flags can be set to indicate the type of PR.
+ * This is valid only in IB fabric.
+ */
+static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
+ int status, int num_prs,
+ struct ib_path_rec_data *rec_data)
+{
+ struct sa_path_rec *rec;
+ int i;
+
+ rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ for (i = 0; i < num_prs; i++) {
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec_data[i].path_rec, rec + i);
+ rec[i].rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(rec + i);
+ rec[i].flags = rec_data[i].flags;
+ }
+
+ query->callback(status, rec, num_prs, query->context);
+ kvfree(rec);
+}
+
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query);
+ struct sa_path_rec rec;
- if (mad) {
- struct sa_path_rec rec;
-
- if (sa_query->flags & IB_SA_QUERY_OPA) {
- ib_unpack(opa_path_rec_table,
- ARRAY_SIZE(opa_path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_OPA;
- query->callback(status, &rec, query->context);
- } else {
- ib_unpack(path_rec_table,
- ARRAY_SIZE(path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_dmac_zero(&rec);
-
- if (query->conv_pr) {
- struct sa_path_rec opa;
+ if (!mad || !num_prs) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
- memset(&opa, 0, sizeof(struct sa_path_rec));
- sa_convert_path_ib_to_opa(&opa, &rec);
- query->callback(status, &opa, query->context);
- } else {
- query->callback(status, &rec, query->context);
- }
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ if (num_prs != 1) {
+ query->callback(-EINVAL, NULL, 0, query->context);
+ return;
}
- } else
- query->callback(status, NULL, query->context);
+
+ ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ query->callback(status, &rec, num_prs, query->context);
+ } else {
+ if (!sa_query->resp_pr_data)
+ ib_sa_pr_callback_single(query, status, mad);
+ else
+ ib_sa_pr_callback_multiple(query, status, num_prs,
+ sa_query->resp_pr_data);
+ }
}
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
@@ -1489,7 +1578,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
- void *context),
+ int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
@@ -1588,7 +1677,7 @@ err1:
EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
@@ -1680,7 +1769,7 @@ err1:
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_paths,
struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
@@ -1790,7 +1879,7 @@ static void ib_classportinfo_cb(void *context)
}
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
- int status,
+ int status, int num_prs,
struct ib_sa_mad *mad)
{
unsigned long flags;
@@ -1966,13 +2055,13 @@ static void send_handler(struct ib_mad_agent *agent,
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ query->callback(query, -ETIMEDOUT, 0, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ query->callback(query, -EINTR, 0, NULL);
break;
default:
- query->callback(query, -EIO, NULL);
+ query->callback(query, -EIO, 0, NULL);
break;
}
@@ -2000,10 +2089,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
+ -EINVAL : 0, 1,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else
- query->callback(query, -EIO, NULL);
+ query->callback(query, -EIO, 0, NULL);
}
ib_free_recv_mad(mad_recv_wc);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9d6ac9dff39a..bf42650f125b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -754,8 +754,8 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
{
struct rdma_dev_addr *dev_addr;
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
dev_addr = &route->addr.dev_addr;
rdma_addr_get_dgid(dev_addr,
@@ -781,8 +781,8 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
struct rdma_route *route)
{
- resp->num_paths = route->num_paths;
- switch (route->num_paths) {
+ resp->num_paths = route->num_pri_alt_paths;
+ switch (route->num_pri_alt_paths) {
case 0:
rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
(union ib_gid *)&resp->ib_route[0].dgid);
@@ -921,7 +921,7 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
if (!resp)
return -ENOMEM;
- resp->num_paths = ctx->cm_id->route.num_paths;
+ resp->num_paths = ctx->cm_id->route.num_pri_alt_paths;
for (i = 0, out_len -= sizeof(*resp);
i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
i++, out_len -= sizeof(struct ib_path_rec_data)) {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 186ed8859920..e9fa22d31c23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -43,8 +43,6 @@
#include <linux/hmm.h>
#include <linux/pagemap.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "uverbs.h"
@@ -462,7 +460,7 @@ retry:
mutex_unlock(&umem_odp->umem_mutex);
out_put_mm:
- mmput(owning_mm);
+ mmput_async(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 046376bd68e2..4796f6a8828c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -739,6 +739,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
mr->uobject = uobj;
atomic_inc(&pd->usecnt);
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_set_name(&mr->res, NULL);
@@ -861,8 +862,10 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
mr->pd = new_pd;
atomic_inc(&new_pd->usecnt);
}
- if (cmd.flags & IB_MR_REREG_TRANS)
+ if (cmd.flags & IB_MR_REREG_TRANS) {
mr->iova = cmd.hca_va;
+ mr->length = cmd.length;
+ }
}
memset(&resp, 0, sizeof(resp));
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 990f0724acc6..d9799706c58e 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -337,6 +337,14 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
break;
+ case UVERBS_ATTR_TYPE_RAW_FD:
+ if (uattr->attr_data.reserved || uattr->len != 0 ||
+ uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX)
+ return -EINVAL;
+ /* _uverbs_get_const_signed() is the accessor */
+ e->ptr_attr.data = uattr->data_s64;
+ break;
+
case UVERBS_ATTR_TYPE_IDRS_ARRAY:
return uverbs_process_idrs_array(pbundle, attr_uapi,
&e->objs_arr_attr, uattr,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e54b3f1b730e..26b021f43ba4 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1038,7 +1038,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
rdma_restrack_put(&srq->res);
- atomic_dec(&srq->pd->usecnt);
+ atomic_dec(&pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type))
@@ -2149,6 +2149,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->pd = pd;
mr->dm = NULL;
atomic_inc(&pd->usecnt);
+ mr->iova = virt_addr;
+ mr->length = length;
rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR);
rdma_restrack_parent_name(&mr->res, &pd->res);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 3d6834d3d4fb..8c0c80a8d338 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -725,7 +725,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 0b0b93b529f3..d4b9226088bd 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -444,7 +444,10 @@ struct efa_admin_create_cq_cmd {
/*
* 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8.
- * 7:5 : reserved7 - MBZ
+ * 5 : set_src_addr - If set, source address will be
+ * filled on RX completions from unknown senders.
+ * Requires 8 words CQ entry size.
+ * 7:6 : reserved7 - MBZ
*/
u8 cq_caps_2;
@@ -980,6 +983,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK BIT(5)
/* create_cq_resp */
#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index fb405da4e1db..8f8885e002ba 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -168,7 +168,10 @@ int efa_com_create_cq(struct efa_com_dev *edev,
EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
create_cmd.eqn = params->eqn;
}
-
+ if (params->set_src_addr) {
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
+ }
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
&create_cmd.cq_ba.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index c33010bbf9e8..0898ad5bc340 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -75,7 +75,8 @@ struct efa_com_create_cq_params {
u16 uarn;
u16 eqn;
u8 entry_size_in_bytes;
- bool interrupt_mode_enabled;
+ u8 interrupt_mode_enabled : 1;
+ u8 set_src_addr : 1;
};
struct efa_com_create_cq_result {
diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h
new file mode 100644
index 000000000000..17ba8984b11e
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_io_defs.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_IO_H_
+#define _EFA_IO_H_
+
+#define EFA_IO_TX_DESC_NUM_BUFS 2
+#define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1
+#define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32
+#define EFA_IO_TX_DESC_IMM_DATA_SIZE 4
+
+enum efa_io_queue_type {
+ /* send queue (of a QP) */
+ EFA_IO_SEND_QUEUE = 1,
+ /* recv queue (of a QP) */
+ EFA_IO_RECV_QUEUE = 2,
+};
+
+enum efa_io_send_op_type {
+ /* send message */
+ EFA_IO_SEND = 0,
+ /* RDMA read */
+ EFA_IO_RDMA_READ = 1,
+};
+
+enum efa_io_comp_status {
+ /* Successful completion */
+ EFA_IO_COMP_STATUS_OK = 0,
+ /* Flushed during QP destroy */
+ EFA_IO_COMP_STATUS_FLUSHED = 1,
+ /* Internal QP error */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2,
+ /* Bad operation type */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_OP_TYPE = 3,
+ /* Bad AH */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4,
+ /* LKEY not registered or does not match IOVA */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5,
+ /* Message too long */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6,
+ /* Destination ENI is down or does not run EFA */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7,
+ /* Connection was reset by remote side */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8,
+ /* Bad dest QP number (QP does not exist or is in error state) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_DEST_QPN = 9,
+ /* Destination resource not ready (no WQEs posted on RQ) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_RNR = 10,
+ /* Receiver SGL too short */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11,
+ /* Unexpected status returned by responder */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12,
+ /* Unresponsive remote - detected locally */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13,
+};
+
+struct efa_io_tx_meta_desc {
+ /* Verbs-generated Request ID */
+ u16 req_id;
+
+ /*
+ * control flags
+ * 3:0 : op_type - operation type: send/rdma/fast mem
+ * ops/etc
+ * 4 : has_imm - immediate_data field carries valid
+ * data.
+ * 5 : inline_msg - inline mode - inline message data
+ * follows this descriptor (no buffer descriptors).
+ * Note that it is different from immediate data
+ * 6 : meta_extension - Extended metadata. MBZ
+ * 7 : meta_desc - Indicates metadata descriptor.
+ * Must be set.
+ */
+ u8 ctrl1;
+
+ /*
+ * control flags
+ * 0 : phase
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. Must be set.
+ * 3 : last - Indicates last descriptor in
+ * transaction. Must be set.
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for the first descriptor
+ * 7:5 : reserved29 - MBZ
+ */
+ u8 ctrl2;
+
+ u16 dest_qp_num;
+
+ /*
+ * If inline_msg bit is set, length of inline message in bytes,
+ * otherwise length of SGL (number of buffers).
+ */
+ u16 length;
+
+ /*
+ * immediate data: if has_imm is set, then this field is included
+ * within Tx message and reported in remote Rx completion.
+ */
+ u32 immediate_data;
+
+ u16 ah;
+
+ u16 reserved;
+
+ /* Queue key */
+ u32 qkey;
+
+ u8 reserved2[12];
+};
+
+/*
+ * Tx queue buffer descriptor, for any transport type. Preceded by metadata
+ * descriptor.
+ */
+struct efa_io_tx_buf_desc {
+ /* length in bytes */
+ u32 length;
+
+ /*
+ * 23:0 : lkey - local memory translation key
+ * 31:24 : reserved - MBZ
+ */
+ u32 lkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_remote_mem_addr {
+ /* length in bytes */
+ u32 length;
+
+ /* remote memory translation key */
+ u32 rkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_rdma_req {
+ /* Remote memory address */
+ struct efa_io_remote_mem_addr remote_mem;
+
+ /* Local memory address */
+ struct efa_io_tx_buf_desc local_mem[1];
+};
+
+/*
+ * Tx WQE, composed of tx meta descriptors followed by either tx buffer
+ * descriptors or inline data
+ */
+struct efa_io_tx_wqe {
+ /* TX meta */
+ struct efa_io_tx_meta_desc meta;
+
+ union {
+ /* Send buffer descriptors */
+ struct efa_io_tx_buf_desc sgl[2];
+
+ u8 inline_data[32];
+
+ /* RDMA local and remote memory addresses */
+ struct efa_io_rdma_req rdma_req;
+ } data;
+};
+
+/*
+ * Rx buffer descriptor; RX WQE is composed of one or more RX buffer
+ * descriptors.
+ */
+struct efa_io_rx_desc {
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer Pointer[63:32] */
+ u32 buf_addr_hi;
+
+ /* Verbs-generated request id. */
+ u16 req_id;
+
+ /* Length in bytes. */
+ u16 length;
+
+ /*
+ * LKey and control flags
+ * 23:0 : lkey
+ * 29:24 : reserved - MBZ
+ * 30 : first - Indicates first descriptor in WQE
+ * 31 : last - Indicates last descriptor in WQE
+ */
+ u32 lkey_ctrl;
+};
+
+/* Common IO completion descriptor */
+struct efa_io_cdesc_common {
+ /*
+ * verbs-generated request ID, as provided in the completed tx or rx
+ * descriptor.
+ */
+ u16 req_id;
+
+ u8 status;
+
+ /*
+ * flags
+ * 0 : phase - Phase bit
+ * 2:1 : q_type - enum efa_io_queue_type: send/recv
+ * 3 : has_imm - indicates that immediate data is
+ * present - for RX completions only
+ * 7:4 : reserved28 - MBZ
+ */
+ u8 flags;
+
+ /* local QP number */
+ u16 qp_num;
+
+ /* Transferred length */
+ u16 length;
+};
+
+/* Tx completion descriptor */
+struct efa_io_tx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+};
+
+/* Rx Completion Descriptor */
+struct efa_io_rx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+
+ /* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
+ u16 ah;
+
+ u16 src_qp_num;
+
+ /* Immediate data */
+ u32 imm;
+};
+
+/* Extended Rx Completion Descriptor */
+struct efa_io_rx_cdesc_ex {
+ /* Base RX completion info */
+ struct efa_io_rx_cdesc rx_cdesc_base;
+
+ /*
+ * Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is
+ * enabled.
+ */
+ u8 src_addr[16];
+};
+
+/* tx_meta_desc */
+#define EFA_IO_TX_META_DESC_OP_TYPE_MASK GENMASK(3, 0)
+#define EFA_IO_TX_META_DESC_HAS_IMM_MASK BIT(4)
+#define EFA_IO_TX_META_DESC_INLINE_MSG_MASK BIT(5)
+#define EFA_IO_TX_META_DESC_META_EXTENSION_MASK BIT(6)
+#define EFA_IO_TX_META_DESC_META_DESC_MASK BIT(7)
+#define EFA_IO_TX_META_DESC_PHASE_MASK BIT(0)
+#define EFA_IO_TX_META_DESC_FIRST_MASK BIT(2)
+#define EFA_IO_TX_META_DESC_LAST_MASK BIT(3)
+#define EFA_IO_TX_META_DESC_COMP_REQ_MASK BIT(4)
+
+/* tx_buf_desc */
+#define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0)
+
+/* rx_desc */
+#define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0)
+#define EFA_IO_RX_DESC_FIRST_MASK BIT(30)
+#define EFA_IO_RX_DESC_LAST_MASK BIT(31)
+
+/* cdesc_common */
+#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
+#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
+#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
+
+#endif /* _EFA_IO_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index ecfe70eb5efb..31454643f8c5 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/dma-buf.h>
@@ -15,6 +15,7 @@
#include <rdma/uverbs_ioctl.h>
#include "efa.h"
+#include "efa_io_defs.h"
enum {
EFA_MMAP_DMA_PAGE = 0,
@@ -242,6 +243,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rq_wr = dev_attr->max_rq_depth;
resp.max_rdma_size = dev_attr->max_rdma_size;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@@ -1064,6 +1066,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
int entries = attr->cqe;
+ bool set_src_addr;
int err;
ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
@@ -1109,7 +1112,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (!cmd.cq_entry_size) {
+ set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
+ if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
+ (set_src_addr ||
+ cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
ibdev_dbg(ibdev,
"Invalid entry size [%u]\n", cmd.cq_entry_size);
err = -EINVAL;
@@ -1138,6 +1144,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs;
+ params.set_src_addr = set_src_addr;
if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
cq->eq = efa_vec2eq(dev, attr->comp_vector);
params.eqn = cq->eq->eeq.eqn;
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 2aae635c1c8d..730783fbc894 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <linux/xarray.h>
#include <rdma/ib_verbs.h>
@@ -196,6 +197,7 @@ struct erdma_dev {
struct erdma_devattr attrs;
/* physical port state (only one port per device) */
enum ib_port_state state;
+ u32 mtu;
/* cmdq and aeq use the same msix vector */
struct erdma_irq comm_irq;
@@ -269,7 +271,7 @@ void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index f13f16479eca..74f6348f240a 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -10,15 +10,7 @@
/* Copyright (c) 2008-2019, IBM Corporation */
/* Copyright (c) 2017, Open Grid Computing, Inc. */
-#include <linux/errno.h>
-#include <linux/inetdevice.h>
-#include <linux/net.h>
-#include <linux/types.h>
#include <linux/workqueue.h>
-#include <net/addrconf.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index 57da0c670472..6ebfa6989b11 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -4,13 +4,7 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
#include "erdma.h"
-#include "erdma_hw.h"
-#include "erdma_verbs.h"
static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
{
@@ -441,7 +435,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
}
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1)
{
struct erdma_comp_wait *comp_wait;
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 751c7f9f0de7..58e0dc5c75d1 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -4,9 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <rdma/ib_verbs.h>
-
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static void *get_next_valid_cqe(struct erdma_cq *cq)
@@ -62,7 +59,6 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
[ERDMA_OP_RECV_INV] = IB_WC_RECV,
[ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
[ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
[ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
[ERDMA_OP_REG_MR] = IB_WC_REG_MR,
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 8f2d094e0227..ed54130d924b 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -4,12 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
-#include "erdma.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
#define MAX_POLL_CHUNK_SIZE 16
@@ -229,9 +223,7 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
- sizeof(struct erdma_cmdq_create_eq_req),
- NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -281,8 +273,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index b210c49c669f..e788887732e1 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -153,6 +153,7 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_CREATE_EQ = 0,
CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2,
+ CMDQ_OPCODE_CONF_MTU = 3,
};
/* cmdq-SQE HDR */
@@ -190,6 +191,11 @@ struct erdma_cmdq_destroy_eq_req {
u8 qtype;
};
+struct erdma_cmdq_config_mtu_req {
+ u64 hdr;
+ u32 mtu;
+};
+
/* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
@@ -450,13 +456,13 @@ enum erdma_opcode {
ERDMA_OP_RECV_IMM = 5,
ERDMA_OP_RECV_INV = 6,
- ERDMA_OP_REQ_ERR = 7,
- ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_RSVD0 = 7,
+ ERDMA_OP_RSVD1 = 8,
ERDMA_OP_WRITE_WITH_IMM = 9,
- ERDMA_OP_RECV_ERR = 10,
+ ERDMA_OP_RSVD2 = 10,
+ ERDMA_OP_RSVD3 = 11,
- ERDMA_OP_INVALIDATE = 11,
ERDMA_OP_RSP_SEND_IMM = 12,
ERDMA_OP_SEND_WITH_INV = 13,
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 07e743d24847..49778bb294ae 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -4,21 +4,12 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
@@ -43,10 +34,15 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
+ case NETDEV_CHANGEMTU:
+ if (dev->mtu != netdev->mtu) {
+ erdma_set_mtu(dev, netdev->mtu);
+ dev->mtu = netdev->mtu;
+ }
+ break;
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR:
- case NETDEV_CHANGEMTU:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGE:
default:
@@ -104,6 +100,7 @@ static int erdma_device_register(struct erdma_dev *dev)
if (ret)
return ret;
+ dev->mtu = dev->netdev->mtu;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index bc3ec22a62c5..5fe1a339a435 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -6,15 +6,6 @@
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
-
-#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_verbs.h"
@@ -105,8 +96,7 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
@@ -124,8 +114,7 @@ static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 699bd3f59cd3..62be98e2b941 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -9,21 +9,14 @@
/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include <rdma/uverbs_ioctl.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
@@ -102,7 +95,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
}
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1);
if (!err)
qp->attrs.cookie =
@@ -151,8 +144,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
@@ -202,8 +194,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
}
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -976,8 +967,7 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (ret)
return ret;
@@ -1002,8 +992,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1040,8 +1029,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1448,6 +1436,17 @@ err_out_xa:
return ret;
}
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
+{
+ struct erdma_cmdq_config_mtu_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CONF_MTU);
+ req.mtu = mtu;
+
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+}
+
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c7baddb1f292..ab6380635e9e 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -7,15 +7,7 @@
#ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__
-#include <linux/errno.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/iw_cm.h>
-
#include "erdma.h"
-#include "erdma_cm.h"
-#include "erdma_hw.h"
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
@@ -338,5 +330,6 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
#endif
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index f1245c94ae26..ebe970f76232 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8753,7 +8753,7 @@ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
/*
* When writing a LCB CSR, out_data contains the full value to
- * to be written, while in_data contains the relative LCB
+ * be written, while in_data contains the relative LCB
* address in 7:0. Do the work here, rather than the caller,
* of distrubting the write data to where it needs to go:
*
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 629beff053ad..f5f9269fdc16 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -965,7 +965,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
- strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
+ strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
uctxt->jkey = generate_jkey(current_uid());
hfi1_stats.sps_ctxts++;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index aa15a5cc7cf3..1d77514ebbee 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1114,7 +1114,7 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
* Reset all of the fabric serdes for this HFI in preparation to take the
* link to Polling.
*
- * To do a reset, we need to write to to the serdes registers. Unfortunately,
+ * To do a reset, we need to write to the serdes registers. Unfortunately,
* the fabric serdes download to the other HFI on the ASIC will have turned
* off the firmware validation on this HFI. This means we can't write to the
* registers to reset the serdes. Work around this by performing a complete
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
index 3afa7545242c..629691a572ef 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_rx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -11,13 +11,10 @@
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{
- void *dst_data;
-
skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data);
- dst_data = skb_put(skb, size);
- memcpy(dst_data, data, size);
+ skb_put_data(skb, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 6988f6f21bde..e6e17984553c 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1447,12 +1447,10 @@ static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
OPA_LINKDOWN_REASON_UNKNOWN);
- ret = set_link_state(ppd, HLS_DN_DOWNDEF);
- return ret;
+ return set_link_state(ppd, HLS_DN_DOWNDEF);
}
static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
@@ -1801,7 +1799,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ib_set_device_ops(ibdev, &hfi1_dev_ops);
- strlcpy(ibdev->node_desc, init_utsname()->nodename,
+ strscpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
/*
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 38565532d654..7f30f32b34dc 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -391,9 +391,6 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send);
-extern const u32 rc_only_opcode;
-extern const u32 uc_only_opcode;
-
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 9f04f25d9631..a7d259238305 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -10,6 +10,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 8acd599ffac1..736dc2f993b4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -454,7 +454,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
cqn);
return;
}
@@ -475,14 +475,14 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
event_type, cqn);
return;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f848eedc6a23..723e55a7de8d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -240,7 +240,6 @@ struct hns_roce_hem_table {
/* Single obj size */
unsigned long obj_size;
unsigned long table_chunk_size;
- int lowmem;
struct mutex mutex;
struct hns_roce_hem **hem;
u64 **bt_l1;
@@ -599,7 +598,6 @@ struct hns_roce_qp {
struct hns_roce_db rdb;
struct hns_roce_db sdb;
unsigned long en_flags;
- u32 doorbell_qpn;
enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq;
@@ -726,18 +724,17 @@ struct hns_roce_caps {
u32 max_sq_sg;
u32 max_sq_inline;
u32 max_rq_sg;
- u32 max_extend_sg;
+ u32 rsv0;
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
- int num_qpc_timer;
u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
u32 max_srq_sges;
u32 max_sq_desc_sz;
u32 max_rq_desc_sz;
- u32 max_srq_desc_sz;
+ u32 rsv2;
int max_qp_init_rdma;
int max_qp_dest_rdma;
u32 num_cqs;
@@ -750,7 +747,7 @@ struct hns_roce_caps {
int num_comp_vectors;
int num_other_vectors;
u32 num_mtpts;
- u32 num_mtt_segs;
+ u32 rsv1;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
@@ -849,11 +846,6 @@ struct hns_roce_caps {
enum cong_type cong_type;
};
-struct hns_roce_dfx_hw {
- int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-};
-
enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_INITED,
HNS_ROCE_DEVICE_STATE_RST_DOWN,
@@ -899,6 +891,9 @@ struct hns_roce_hw {
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
+ int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
+ int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
+ int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -960,7 +955,6 @@ struct hns_roce_dev {
void *priv;
struct workqueue_struct *irq_workq;
struct work_struct ecc_work;
- const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
@@ -1228,8 +1222,12 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ce1a0d2792a3..aa8a08d1c014 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -455,7 +455,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
- flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
+ flag = GFP_KERNEL | __GFP_NOWARN;
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, flag);
if (!table->hem[index->buf]) {
@@ -588,8 +588,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
table->hem[i] = hns_roce_alloc_hem(hr_dev,
table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
- (table->lowmem ? GFP_KERNEL :
- GFP_HIGHUSER) | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
@@ -725,9 +724,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int length;
int i, j;
- if (!table->lowmem)
- return NULL;
-
mutex_lock(&table->mutex);
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
@@ -783,8 +779,7 @@ out:
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem)
+ unsigned long obj_size, unsigned long nobj)
{
unsigned long obj_per_chunk;
unsigned long num_hem;
@@ -861,7 +856,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
table->type = type;
table->num_hem = num_hem;
table->obj_size = obj_size;
- table->lowmem = use_lowmem;
mutex_init(&table->mutex);
return 0;
@@ -932,7 +926,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table,
i * table->table_chunk_size / table->obj_size, 0))
- dev_err(dev, "Clear HEM base address failed.\n");
+ dev_err(dev, "clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
}
@@ -986,7 +980,7 @@ struct hns_roce_hem_head {
static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
- bool exist_bt, int bt_level)
+ bool exist_bt)
{
struct hns_roce_hem_item *hem;
@@ -1195,7 +1189,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
start_aligned = (distance / step) * step + r->offset;
end = min_t(int, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
- true, level);
+ true);
if (!cur) {
ret = -ENOMEM;
goto err_exit;
@@ -1247,7 +1241,7 @@ alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
/* indicate to last region */
r = &regions[region_cnt - 1];
hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
- ba_num, true, 0);
+ ba_num, true);
if (!hem)
return ERR_PTR(-ENOMEM);
@@ -1264,7 +1258,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem;
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
- r->count, false, 0);
+ r->count, false);
if (!hem)
return -ENOMEM;
@@ -1421,7 +1415,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
&hem_list->btm_bt);
if (ret) {
dev_err(hr_dev->dev,
- "alloc hem trunk fail ret=%d!\n", ret);
+ "alloc hem trunk fail ret = %d!\n", ret);
goto err_alloc;
}
}
@@ -1430,7 +1424,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
region_cnt);
if (ret)
- dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
+ dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
else
return 0;
@@ -1468,19 +1462,17 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr)
+ int offset, int *mtt_cnt)
{
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
- u64 phy_base = 0;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
- phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
nr = hem->end + 1 - offset;
break;
}
@@ -1489,8 +1481,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
if (mtt_cnt)
*mtt_cnt = nr;
- if (phy_addr)
- *phy_addr = phy_base;
-
return cpu_base;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 2d84a6b3f05d..7d23d3c51da4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -111,8 +111,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
dma_addr_t *dma_handle);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem);
+ unsigned long obj_size, unsigned long nobj);
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
@@ -132,7 +131,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr);
+ int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cbdafaac678a..1ead35fb031b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -193,8 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
unsigned int *sge_idx, u32 msg_len)
{
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
- unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
- unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
+ unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
unsigned int i = 0;
@@ -222,7 +221,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
if (len <= left_len_in_pg) {
memcpy(dseg, addr, len);
- idx += len / dseg_len;
+ idx += len / HNS_ROCE_SGE_SIZE;
i++;
if (i >= wr->num_sge)
@@ -237,7 +236,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
len -= left_len_in_pg;
addr += left_len_in_pg;
- idx += left_len_in_pg / dseg_len;
+ idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
dseg = hns_roce_get_extend_sge(qp,
idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
@@ -381,7 +380,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
- ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
+ ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
ibqp->qp_type);
return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
@@ -637,7 +636,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
} else {
struct hns_roce_v2_db sq_db = {};
- hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
+ hr_reg_write(&sq_db, DB_TAG, qp->qpn);
hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
hr_reg_write(&sq_db, DB_PI, qp->sq.head);
hr_reg_write(&sq_db, DB_SL, qp->sl);
@@ -1406,20 +1405,20 @@ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after reset.\n");
+ "func clear success after reset.\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
@@ -1431,21 +1430,21 @@ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after sw reset\n");
+ "func clear success after sw reset\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+ dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
}
static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
@@ -1458,7 +1457,7 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
- dev_info(hr_dev->dev, "Func clear success after reset.\n");
+ dev_info(hr_dev->dev, "func clear success after reset.\n");
return;
}
@@ -1475,9 +1474,9 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (retval && !flag)
dev_warn(hr_dev->dev,
- "Func clear read failed, ret = %d.\n", retval);
+ "func clear read failed, ret = %d.\n", retval);
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
@@ -1498,7 +1497,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true;
- dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
+ dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
ret);
goto out;
}
@@ -1966,7 +1965,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
- caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1977,14 +1975,13 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
- caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -2185,13 +2182,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
if (!caps->num_comp_vectors)
- caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
- (u32)priv->handle->rinfo.num_vectors - 2);
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
+ (u32)priv->handle->rinfo.num_vectors -
+ (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
@@ -2272,15 +2270,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
- caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
- caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
caps->cqe_sz = resp_a->cqe_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
@@ -4300,7 +4295,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
- int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4364,7 +4358,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
+ const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4613,7 +4607,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_DQPN);
}
- memcpy(&(context->dmac), dmac, sizeof(u32));
+ memcpy(&context->dmac, dmac, sizeof(u32));
hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0;
hr_reg_clear(qpc_mask, QPC_DMAC_H);
@@ -5015,11 +5009,9 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- modify_qp_init_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -5040,14 +5032,14 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 20.\n");
+ "local ACK timeout shall be 0 to 20.\n");
return false;
}
*timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 31.\n");
+ "local ACK timeout shall be 0 to 31.\n");
return false;
}
}
@@ -5307,9 +5299,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
-static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_v2_qp_context *hr_context)
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
@@ -5319,11 +5310,11 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
- hr_qp->qpn);
+ qpn);
if (ret)
goto out;
- memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
+ memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5353,7 +5344,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto done;
}
- ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret) {
ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL;
@@ -5551,7 +5542,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20);
}
- ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
+ ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
@@ -5774,6 +5765,64 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
+ void *buffer)
+{
+ struct hns_roce_v2_cq_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_CQC, cqn);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying CQ, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
+ void *buffer)
+{
+ struct hns_roce_v2_mpt_entry *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ key_to_hw_index(key));
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying MPT, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
@@ -5782,26 +5831,26 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- ibdev_info(ibdev, "Path migrated succeeded.\n");
+ ibdev_info(ibdev, "path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- ibdev_warn(ibdev, "Path migration failed.\n");
+ ibdev_warn(ibdev, "path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- ibdev_warn(ibdev, "Send queue drained.\n");
+ ibdev_warn(ibdev, "send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
+ ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@@ -5823,7 +5872,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
ibdev_warn(ibdev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- ibdev_warn(ibdev, "Function level reset.\n");
+ ibdev_warn(ibdev, "function level reset.\n");
break;
case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
ibdev_err(ibdev, "xrc domain violation error.\n");
@@ -5847,12 +5896,12 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
if (!irq_work)
return;
- INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
+ INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
irq_work->queue_num = queue_num;
- queue_work(hr_dev->irq_workq, &(irq_work->work));
+ queue_work(hr_dev->irq_workq, &irq_work->work);
}
static void update_eq_db(struct hns_roce_eq *eq)
@@ -5942,7 +5991,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_FLR:
break;
default:
- dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
break;
}
@@ -6012,7 +6061,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
/* Completion event interrupt */
int_work = hns_roce_v2_ceq_int(hr_dev, eq);
else
- /* Asychronous event interrupt */
+ /* Asynchronous event interrupt */
int_work = hns_roce_v2_aeq_int(hr_dev, eq);
return IRQ_RETVAL(int_work);
@@ -6333,7 +6382,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
0);
if (err)
- dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
+ dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
return err;
}
@@ -6422,7 +6471,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]);
if (ret) {
- dev_err(hr_dev->dev, "Request irq error!\n");
+ dev_err(hr_dev->dev, "request irq error!\n");
goto err_request_failed;
}
}
@@ -6575,10 +6624,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(eq_table->eq);
}
-static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
- .query_cqc_info = hns_roce_v2_query_cqc_info,
-};
-
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
@@ -6619,6 +6664,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
+ .query_cqc = hns_roce_v2_query_cqc,
+ .query_qpc = hns_roce_v2_query_qpc,
+ .query_mpt = hns_roce_v2_query_mpt,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6650,7 +6698,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
- hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
@@ -6846,7 +6893,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
} else {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
- dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ dev_info(dev, "reset done, RoCE client reinit finished.\n");
}
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f96debac30fe..b11579027e82 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -36,17 +36,16 @@
#include <linux/bitops.h>
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
-#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
@@ -55,7 +54,6 @@
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
-#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -65,7 +63,6 @@
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
-#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
@@ -83,7 +80,7 @@
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
@@ -406,6 +403,7 @@ enum hns_roce_v2_qp_state {
struct hns_roce_v2_qp_context_ex {
__le32 data[64];
};
+
struct hns_roce_v2_qp_context {
__le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba;
@@ -758,7 +756,8 @@ struct hns_roce_v2_mpt_entry {
#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
-#define MPT_LEN MPT_FIELD_LOC(191, 128)
+#define MPT_LEN_L MPT_FIELD_LOC(159, 128)
+#define MPT_LEN_H MPT_FIELD_LOC(191, 160)
#define MPT_LKEY MPT_FIELD_LOC(223, 192)
#define MPT_VA MPT_FIELD_LOC(287, 224)
#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
@@ -1173,7 +1172,7 @@ struct hns_roce_query_pf_caps_a {
__le16 max_sq_sg;
__le16 max_sq_inline;
__le16 max_rq_sg;
- __le32 max_extend_sg;
+ __le32 rsv0;
__le16 num_qpc_timer;
__le16 num_cqc_timer;
__le16 max_srq_sges;
@@ -1181,7 +1180,7 @@ struct hns_roce_query_pf_caps_a {
u8 num_other_vectors;
u8 max_sq_desc_sz;
u8 max_rq_desc_sz;
- u8 max_srq_desc_sz;
+ u8 rsv1;
u8 cqe_sz;
};
@@ -1462,9 +1461,6 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
deleted file mode 100644
index f7a75a7cda74..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-// Copyright (c) 2019 Hisilicon Limited.
-
-#include "hnae3.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hw_v2.h"
-
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer)
-{
- struct hns_roce_v2_cq_context *cq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- cq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC,
- cqn);
- if (ret) {
- dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
- goto err_mailbox;
- }
-
- memcpy(buffer, cq_context, sizeof(*cq_context));
-
-err_mailbox:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c8af4ebd7cbd..dcf89689a4c6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -97,7 +97,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "Can't find netdev on port(%u)!\n", port);
+ dev_err(dev, "can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -239,7 +239,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "Find netdev %u failed!\n", port);
+ dev_err(dev, "find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -515,7 +515,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
- .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
@@ -566,6 +565,15 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
};
+static const struct ib_device_ops hns_roce_dev_restrack_ops = {
+ .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
+};
+
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -605,6 +613,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (!hr_dev->iboe.netdevs[i])
continue;
@@ -650,17 +659,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
- hr_dev->caps.num_mtpts, 1);
+ hr_dev->caps.num_mtpts);
if (ret) {
- dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
+ dev_err(dev, "failed to init MTPT context memory, aborting.\n");
return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init QP context memory, aborting.\n");
+ dev_err(dev, "failed to init QP context memory, aborting.\n");
goto err_unmap_dmpt;
}
@@ -668,9 +677,9 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_IRRL,
hr_dev->caps.irrl_entry_sz *
hr_dev->caps.max_qp_init_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
+ dev_err(dev, "failed to init irrl_table memory, aborting.\n");
goto err_unmap_qp;
}
@@ -680,19 +689,19 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_TRRL,
hr_dev->caps.trrl_entry_sz *
hr_dev->caps.max_qp_dest_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init trrl_table memory, aborting.\n");
+ "failed to init trrl_table memory, aborting.\n");
goto err_unmap_irrl;
}
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
- hr_dev->caps.num_cqs, 1);
+ hr_dev->caps.num_cqs);
if (ret) {
- dev_err(dev, "Failed to init CQ context memory, aborting.\n");
+ dev_err(dev, "failed to init CQ context memory, aborting.\n");
goto err_unmap_trrl;
}
@@ -700,10 +709,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
- hr_dev->caps.num_srqs, 1);
+ hr_dev->caps.num_srqs);
if (ret) {
dev_err(dev,
- "Failed to init SRQ context memory, aborting.\n");
+ "failed to init SRQ context memory, aborting.\n");
goto err_unmap_cq;
}
}
@@ -713,10 +722,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
&hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC,
hr_dev->caps.sccc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init SCC context memory, aborting.\n");
+ "failed to init SCC context memory, aborting.\n");
goto err_unmap_srq;
}
}
@@ -725,10 +734,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.num_qpc_timer, 1);
+ hr_dev->caps.qpc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init QPC timer memory, aborting.\n");
+ "failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
@@ -737,10 +746,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
- hr_dev->caps.cqc_timer_bt_num, 1);
+ hr_dev->caps.cqc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init CQC timer memory, aborting.\n");
+ "failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
@@ -749,7 +758,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
HEM_TYPE_GMV,
hr_dev->caps.gmv_entry_sz,
- hr_dev->caps.gmv_entry_num, 1);
+ hr_dev->caps.gmv_entry_num);
if (ret) {
dev_err(dev,
"failed to init gmv table memory, ret = %d\n",
@@ -818,13 +827,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) {
- dev_err(dev, "Failed to allocate priv_uar.\n");
+ dev_err(dev, "failed to allocate priv_uar.\n");
goto err_uar_table_free;
}
ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
- dev_err(dev, "Failed to init qp_table.\n");
+ dev_err(dev, "failed to init qp_table.\n");
goto err_uar_table_free;
}
@@ -837,9 +846,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_init_srq_table(hr_dev);
- }
return 0;
@@ -902,14 +910,14 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
- dev_err(dev, "Init RoCE Command Queue failed!\n");
+ dev_err(dev, "init RoCE Command Queue failed!\n");
return ret;
}
}
ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
- dev_err(dev, "Get RoCE engine profile failed!\n");
+ dev_err(dev, "get RoCE engine profile failed!\n");
goto error_failed_cmd_init;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 867972c2a894..845ac7d3831f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -190,7 +190,7 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (mr == NULL)
+ if (!mr)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
@@ -249,7 +249,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
return &mr->ibmr;
@@ -586,7 +585,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
while (offset < end && npage < max_count) {
count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
- offset, &count, NULL);
+ offset, &count);
if (!mtts)
return -ENOBUFS;
@@ -835,7 +834,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
- &mtt_count, NULL);
+ &mtt_count);
if (!mtts || !mtt_count)
goto done;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 48d3616a6d71..f0bd82a18069 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -56,7 +56,7 @@ static void flush_work_handle(struct work_struct *work)
if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
if (ret)
- dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+ dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
ret);
}
@@ -105,7 +105,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
- dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ dev_warn(dev, "async event for bogus QP %08x\n", qpn);
return;
}
@@ -218,7 +218,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
num = 1;
- hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
@@ -234,8 +233,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
qp_table->bank[bankid].inuse++;
mutex_unlock(&qp_table->bank_mutex);
-
- hr_qp->doorbell_qpn = (u32)num;
}
hr_qp->qpn = num;
@@ -278,7 +275,7 @@ static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret)
- dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
+ dev_err(hr_dev->dev, "failed to xa store for QPC\n");
else
/* add QP to device's QP list for softwc */
add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
@@ -299,14 +296,14 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get QPC table\n");
+ dev_err(dev, "failed to get QPC table\n");
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get IRRL table\n");
+ dev_err(dev, "failed to get IRRL table\n");
goto err_put_qp;
}
@@ -315,7 +312,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get TRRL table\n");
+ dev_err(dev, "failed to get TRRL table\n");
goto err_put_irrl;
}
}
@@ -325,7 +322,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get SCC CTX table\n");
+ dev_err(dev, "failed to get SCC CTX table\n");
goto err_put_trrl;
}
}
@@ -462,11 +459,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
hr_qp->rq.rsv_sge);
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
- hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
@@ -1209,7 +1203,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
if (ret)
- ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
+ ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 24a154d64630..989a2af2e938 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -9,91 +9,223 @@
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
-static int hns_roce_fill_cq(struct sk_buff *msg,
- struct hns_roce_v2_cq_context *context)
+#define MAX_ENTRY_NUM 256
+
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
- if (rdma_nl_put_driver_u32(msg, "state",
- hr_reg_read(context, CQC_ARM_ST)))
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ceqn",
- hr_reg_read(context, CQC_CEQN)))
+ if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cqn",
- hr_reg_read(context, CQC_CQN)))
+ if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
goto err;
- if (rdma_nl_put_driver_u32(msg, "hopnum",
- hr_reg_read(context, CQC_CQE_HOP_NUM)))
+ if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
goto err;
- if (rdma_nl_put_driver_u32(msg, "pi",
- hr_reg_read(context, CQC_CQ_PRODUCER_IDX)))
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct hns_roce_v2_cq_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_cqc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
+ data[offset++] = hr_reg_read(&context, CQC_SHIFT);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
+ data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
+ data[offset++] = hr_reg_read(&context, CQC_CEQN);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ci",
- hr_reg_read(context, CQC_CQ_CONSUMER_IDX)))
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "coalesce",
- hr_reg_read(context, CQC_CQ_MAX_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "period",
- hr_reg_read(context, CQC_CQ_PERIOD)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cnt",
- hr_reg_read(context, CQC_CQE_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
goto err;
+ nla_nest_end(msg, table_attr);
+
return 0;
err:
+ nla_nest_cancel(msg, table_attr);
+
return -EMSGSIZE;
}
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq)
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct hns_roce_v2_cq_context *context;
- struct nlattr *table_attr;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct hns_roce_v2_qp_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
int ret;
- if (!hr_dev->dfx->query_cqc_info)
+ if (!hr_dev->hw->query_qpc)
return -EINVAL;
- context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret)
- goto err;
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, QPC_QP_ST);
+ data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
+ data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
+ data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
+ data[offset++] = hr_reg_read(&context, QPC_SRQN);
+ data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
+ data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_RQWS);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr) {
- ret = -EMSGSIZE;
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
goto err;
- }
- if (hns_roce_fill_cq(msg, context)) {
- ret = -EMSGSIZE;
- goto err_cancel_table;
- }
+ if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
+ goto err;
nla_nest_end(msg, table_attr);
- kfree(context);
return 0;
-err_cancel_table:
- nla_nest_cancel(msg, table_attr);
err:
- kfree(context);
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct hns_roce_v2_mpt_entry context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_mpt)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, MPT_ST);
+ data[offset++] = hr_reg_read(&context, MPT_PD);
+ data[offset++] = hr_reg_read(&context, MPT_LKEY);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_L);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_H);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index e03e03082a5f..c1906cab5c8a 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -314,6 +314,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_INVALID_REQUEST 0x0223
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 4f132c6fb653..ab246447520b 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
switch (info->ae_id) {
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- fallthrough;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
qp->flush_code = FLUSH_PROT_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->flush_code = FLUSH_LOC_LEN_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- case IRDMA_AE_IB_INVALID_REQUEST:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp->flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
break;
default:
- qp->flush_code = FLUSH_FATAL_ERR;
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
}
}
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 9e7b8ecb137a..517d41a1c289 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -98,6 +98,7 @@ enum irdma_term_mpa_errors {
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_hw_stats_index_32b {
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index daeab5daed5b..a6e5d350a94c 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -1005,6 +1006,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
int ret_code;
bool move_cq_head = true;
u8 polarity;
+ u8 op_type;
bool ext_valid;
__le64 *ext_cqe;
@@ -1187,7 +1189,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
@@ -1204,6 +1205,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
break;
}
} while (1);
+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ddd0ebbdd7d5..2ef61923c926 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -103,6 +103,7 @@ enum irdma_flush_opcode {
FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index fdf4cc88cb91..8dfc9e154d73 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
- cqp_request->compl_info.min_err_code == 0x8029) {
- if (!rf->reset) {
- rf->reset = true;
- rf->gen_ops.request_reset(rf);
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
}
}
}
@@ -2476,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
}
ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp;
@@ -2598,7 +2604,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwqp->iwscq);
} else {
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b07b8af2997..a22afbb25bc5 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_cq = rf->max_cq - rf->used_cqs;
- props->max_cqe = rf->max_cqe;
+ props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
- if (rdma_protocol_roce(ibdev, 1))
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
props->max_ah = rf->max_ah;
props->max_mcast_grp = rf->max_mcg;
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
@@ -296,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_alloc_ucontext_req req;
+ struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL;
@@ -314,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 legacy support with libi40iw */
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
@@ -386,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context)
*/
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -395,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0;
int err;
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
@@ -811,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req;
+ struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
@@ -833,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (err_code)
return err_code;
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return -EINVAL;
+
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
@@ -1117,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@@ -1135,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1371,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1423,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else {
iwqp->ibqp_state = attr->qp_state;
}
- if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
@@ -1463,6 +1491,8 @@ exit:
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -1477,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
int err;
unsigned long flags;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1562,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1659,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
}
}
}
- if (attr_mask & IB_QP_STATE && udata &&
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
@@ -1794,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request;
@@ -1816,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP;
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
if (entries > rf->max_cqe)
return -EINVAL;
@@ -1948,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
@@ -1966,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
+
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
@@ -2743,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
struct ib_udata *udata)
{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc;
@@ -2760,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
region = ib_umem_get(pd->device, start, len, access);
if (IS_ERR(region)) {
@@ -3009,6 +3061,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int status;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
@@ -3039,8 +3092,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
cqp_info->post_sq = 1;
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
@@ -3308,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_RETRY_EXC_ERR;
case FLUSH_MW_BIND_ERR:
return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -4289,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
struct irdma_create_ah_resp uresp;
struct irdma_ah *parent_ah;
int err;
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 04a67b481608..a40bf58bcdd3 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.length = length;
mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2a2a9e9afc9d..2211a0be16f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -907,6 +907,7 @@ static bool devx_is_whitelist_cmd(void *in)
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -962,6 +963,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -2158,32 +2160,39 @@ err:
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs,
- struct devx_umem *obj)
+ struct devx_umem *obj, u32 access_flags)
{
u64 addr;
size_t size;
- u32 access;
int err;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
return -EFAULT;
- err = uverbs_get_flags32(&access, attrs,
- MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
if (err)
return err;
- err = ib_check_mr_access(&dev->ib_dev, access);
- if (err)
- return err;
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int dmabuf_fd;
+
+ err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
+ if (err)
+ return -EFAULT;
- obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
- if (IS_ERR(obj->umem))
- return PTR_ERR(obj->umem);
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return PTR_ERR(umem_dmabuf);
+ obj->umem = &umem_dmabuf->umem;
+ } else {
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+ }
return 0;
}
@@ -2222,7 +2231,8 @@ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
- struct devx_umem_reg_cmd *cmd)
+ struct devx_umem_reg_cmd *cmd,
+ int access)
{
unsigned long pgsz_bitmap;
unsigned int page_size;
@@ -2271,6 +2281,9 @@ static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, page_offset,
ib_umem_dma_offset(obj->umem, page_size));
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ);
@@ -2288,20 +2301,30 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
int err;
if (!c->devx_uid)
return -EINVAL;
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
if (!obj)
return -ENOMEM;
- err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
if (err)
goto err_obj_free;
- err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
if (err)
goto err_umem_release;
@@ -2833,6 +2856,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
UVERBS_ATTR_TYPE(u64),
UA_MANDATORY),
+ UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
+ UA_OPTIONAL),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 293ed709e5ed..9c8a7b206dcf 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -147,6 +147,28 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
+static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out,
+ size_t sz)
+{
+ u32 *in;
+ int err;
+
+ in = kvzalloc(sz, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
+}
+
static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -166,6 +188,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
@@ -202,8 +230,7 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
goto done;
}
- err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
- out_cnt, sz);
+ err = query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fc94a1b25485..c669ef6e47e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -26,7 +26,7 @@
#include <linux/mlx5/eswitch.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
@@ -46,7 +46,6 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -1826,6 +1825,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
+
return 0;
}
@@ -4336,7 +4338,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 6191aa833ac2..96ffbbaf0a73 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include <linux/jiffies.h>
@@ -152,6 +151,7 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
for (i = 0; i < 8; i++)
mlx5_write64(&mmio_wqe[i * 2],
bf->bfreg->map + bf->offset + i * 8);
+ io_stop_wc();
bf->offset ^= bf->buf_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2e2ad3918385..4a7f7064bd0e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -708,6 +708,7 @@ struct mlx5_ib_umr_context {
};
enum {
+ MLX5_UMR_STATE_UNINIT,
MLX5_UMR_STATE_ACTIVE,
MLX5_UMR_STATE_RECOVER,
MLX5_UMR_STATE_ERR,
@@ -1540,6 +1541,18 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
{
+ /*
+ * If the driver is in hash mode and the port_select_flow_table_bypass cap
+ * is supported, it means that the driver no longer needs to assign the port
+ * affinity by default. If a user wants to set the port affinity explicitly,
+ * the user has a dedicated API to do that, so there is no need to assign
+ * the port affinity by default.
+ */
+ if (dev->lag_active &&
+ mlx5_lag_mode_is_hash(dev->mdev) &&
+ MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ return 0;
+
return dev->lag_active ||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
@@ -1550,4 +1563,40 @@ static inline bool rt_supported(int ts_cap)
return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
}
+
+/*
+ * PCI Peer to Peer is a trainwreck. If no switch is present then things
+ * sometimes work, depending on the pci_distance_p2p logic for excluding broken
+ * root complexes. However if a switch is present in the path, then things get
+ * really ugly depending on how the switch is setup. This table assumes that the
+ * root complex is strict and is validating that all req/reps are matches
+ * perfectly - so any scenario where it sees only half the transaction is a
+ * failure.
+ *
+ * CR/RR/DT ATS RO P2P
+ * 00X X X OK
+ * 010 X X fails (request is routed to root but root never sees comp)
+ * 011 0 X fails (request is routed to root but root never sees comp)
+ * 011 1 X OK
+ * 10X X 1 OK
+ * 101 X 0 fails (completion is routed to root but root didn't see req)
+ * 110 X 0 SLOW
+ * 111 0 0 SLOW
+ * 111 1 0 fails (completion is routed to root but root didn't see req)
+ * 111 1 1 OK
+ *
+ * Unfortunately we cannot reliably know if a switch is present or what the
+ * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
+ * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
+ *
+ * For now assume if the umem is a dma_buf then it is P2P.
+ */
+static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
+ struct ib_umem *umem, int access_flags)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
+ return false;
+ return access_flags & IB_ACCESS_RELAXED_ORDERING;
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 129d531bd01b..410cc5fd2523 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -39,9 +39,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
-#include <rdma/ib_verbs.h>
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"
@@ -937,7 +935,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
* cache then synchronously create an uncached one.
*/
if (!ent || ent->limit == 0 ||
- !mlx5r_umr_can_reconfig(dev, 0, access_flags)) {
+ !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
+ mlx5_umem_needs_ats(dev, umem, access_flags)) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex);
@@ -1018,6 +1017,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
+ if (mlx5_umem_needs_ats(dev, umem, access_flags))
+ MLX5_SET(mkc, mkc, ma_translation_mode, 1);
if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(iova, umem->length, mr->page_shift));
@@ -1402,7 +1403,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
}
- mr->ibmr.length = new_umem->length;
mr->ibmr.iova = iova;
mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e305bf1dc6c2..bc97958818bb 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
#include <linux/dma-buf.h>
@@ -795,7 +794,8 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
- if (mmkey->type == MLX5_MKEY_MW)
+ if (mmkey->type == MLX5_MKEY_MW ||
+ mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
return mmkey->key == key;
}
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index e00b94d1b1ea..d5105b5c9979 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -177,6 +177,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
sema_init(&dev->umrc.sem, MAX_UMR_WR);
mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
return 0;
@@ -191,6 +192,8 @@ destroy_pd:
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
ib_destroy_qp(dev->umrc.qp);
ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index bdf5ed38de22..f330ce895d88 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1252,7 +1252,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 265a581133dc..56f06c68f31a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1363,7 +1363,7 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strlcpy(dev->model_number,
+ strscpy(dev->model_number,
hba_attribs->controller_model_number,
sizeof(dev->model_number));
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6861c6384f18..9d2dd135b784 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2124,7 +2124,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcpy(msg,
+ strscpy(msg,
"[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index cb2a02d671e2..692b64efad97 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -295,7 +295,7 @@ void qib_free_irq(struct qib_devdata *dd)
* Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
+ * we switch to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 67a1b4562dc2..67923ced6e2d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -95,7 +95,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int ret;
int off;
int i;
- int flags;
dma_addr_t pa;
unsigned int gup_flags;
struct mm_struct *mm;
@@ -132,8 +131,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
goto out;
}
- flags = IOMMU_READ | IOMMU_CACHE;
- flags |= (writable) ? IOMMU_WRITE : 0;
gup_flags = FOLL_WRITE;
gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 59481ae39505..d61f8de7f21c 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -15,7 +15,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("RDMA Verbs Transport Library");
-static int rvt_init(void)
+static int __init rvt_init(void)
{
int ret = rvt_driver_cq_init();
@@ -26,7 +26,7 @@ static int rvt_init(void)
}
module_init(rvt_init);
-static void rvt_cleanup(void)
+static void __exit rvt_cleanup(void)
{
rvt_cq_exit();
}
diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
index e03af3012590..46bb07c5c4df 100644
--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
+++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
@@ -151,18 +151,8 @@ int rxe_icrc_check(struct sk_buff *skb, struct rxe_pkt_info *pkt)
payload_size(pkt) + bth_pad(pkt));
icrc = ~icrc;
- if (unlikely(icrc != pkt_icrc)) {
- if (skb->protocol == htons(ETH_P_IPV6))
- pr_warn_ratelimited("bad ICRC from %pI6c\n",
- &ipv6_hdr(skb)->saddr);
- else if (skb->protocol == htons(ETH_P_IP))
- pr_warn_ratelimited("bad ICRC from %pI4\n",
- &ip_hdr(skb)->saddr);
- else
- pr_warn_ratelimited("bad ICRC from unknown\n");
-
+ if (unlikely(icrc != pkt_icrc))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 22f6cc31d1d6..c2a5c8814a48 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 850b80f5ad8b..502e9ada99b3 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < mr->iova || length > mr->length ||
- iova > mr->iova + mr->length - length)
+ if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
+ iova > mr->ibmr.iova + mr->ibmr.length - length)
return -EFAULT;
return 0;
@@ -103,17 +103,16 @@ err1:
return -ENOMEM;
}
-void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
+void rxe_mr_init_dma(int access, struct rxe_mr *mr)
{
rxe_mr_init(access, mr);
- mr->ibmr.pd = &pd->ibpd;
mr->access = access;
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_DMA;
}
-int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
struct rxe_map **map;
@@ -125,7 +124,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int err;
int i;
- umem = ib_umem_get(pd->ibpd.device, start, length, access);
+ umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
pr_warn("%s: Unable to pin memory region err = %d\n",
__func__, (int)PTR_ERR(umem));
@@ -175,12 +174,8 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
}
}
- mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
- mr->length = length;
- mr->iova = iova;
- mr->va = start;
mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
@@ -197,7 +192,7 @@ err_out:
return err;
}
-int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
+int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
{
int err;
@@ -208,7 +203,6 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
if (err)
goto err1;
- mr->ibmr.pd = &pd->ibpd;
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
mr->type = IB_MR_TYPE_MEM_REG;
@@ -222,7 +216,7 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- size_t offset = iova - mr->iova + mr->offset;
+ size_t offset = iova - mr->ibmr.iova + mr->offset;
int map_index;
int buf_index;
u64 length;
@@ -605,7 +599,7 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->access = access;
mr->lkey = key;
mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
- mr->iova = wqe->wr.wr.reg.mr->iova;
+ mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 104993801a80..902b7df7aaed 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -114,15 +114,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->iova + mr->length)))) {
+ (mr->ibmr.iova + mr->ibmr.length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c53f4529f098..35f327b9d4b8 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -145,7 +145,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (skb_linearize(skb)) {
- pr_err("skb_linearize failed\n");
ib_device_put(&rxe->ib_dev);
goto drop;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 516bf9b95e48..a62bab88415c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -19,34 +19,34 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
int has_srq)
{
if (cap->max_send_wr > rxe->attr.max_qp_wr) {
- pr_warn("invalid send wr = %d > %d\n",
- cap->max_send_wr, rxe->attr.max_qp_wr);
+ pr_debug("invalid send wr = %u > %d\n",
+ cap->max_send_wr, rxe->attr.max_qp_wr);
goto err1;
}
if (cap->max_send_sge > rxe->attr.max_send_sge) {
- pr_warn("invalid send sge = %d > %d\n",
- cap->max_send_sge, rxe->attr.max_send_sge);
+ pr_debug("invalid send sge = %u > %d\n",
+ cap->max_send_sge, rxe->attr.max_send_sge);
goto err1;
}
if (!has_srq) {
if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
- pr_warn("invalid recv wr = %d > %d\n",
- cap->max_recv_wr, rxe->attr.max_qp_wr);
+ pr_debug("invalid recv wr = %u > %d\n",
+ cap->max_recv_wr, rxe->attr.max_qp_wr);
goto err1;
}
if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
- pr_warn("invalid recv sge = %d > %d\n",
- cap->max_recv_sge, rxe->attr.max_recv_sge);
+ pr_debug("invalid recv sge = %u > %d\n",
+ cap->max_recv_sge, rxe->attr.max_recv_sge);
goto err1;
}
}
if (cap->max_inline_data > rxe->max_inline_data) {
- pr_warn("invalid max inline data = %d > %d\n",
- cap->max_inline_data, rxe->max_inline_data);
+ pr_debug("invalid max inline data = %u > %d\n",
+ cap->max_inline_data, rxe->max_inline_data);
goto err1;
}
@@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
}
if (!init->recv_cq || !init->send_cq) {
- pr_warn("missing cq\n");
+ pr_debug("missing cq\n");
goto err1;
}
@@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
if (init->qp_type == IB_QPT_GSI) {
if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
- pr_warn("invalid port = %d\n", port_num);
+ pr_debug("invalid port = %d\n", port_num);
goto err1;
}
port = &rxe->port;
if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
- pr_warn("GSI QP exists for port %d\n", port_num);
+ pr_debug("GSI QP exists for port %d\n", port_num);
goto err1;
}
}
@@ -242,9 +242,9 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->req_pkts);
- rxe_init_task(rxe, &qp->req.task, qp,
+ rxe_init_task(&qp->req.task, qp,
rxe_requester, "req");
- rxe_init_task(rxe, &qp->comp.task, qp,
+ rxe_init_task(&qp->comp.task, qp,
rxe_completer, "comp");
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
@@ -292,7 +292,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
skb_queue_head_init(&qp->resp_pkts);
- rxe_init_task(rxe, &qp->resp.task, qp,
+ rxe_init_task(&qp->resp.task, qp,
rxe_responder, "resp");
qp->resp.opcode = OPCODE_NONE;
@@ -402,7 +402,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
- pr_warn("invalid mask or state for qp\n");
+ pr_debug("invalid mask or state for qp\n");
goto err1;
}
@@ -416,7 +416,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_PORT) {
if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
- pr_warn("invalid port %d\n", attr->port_num);
+ pr_debug("invalid port %d\n", attr->port_num);
goto err1;
}
}
@@ -431,12 +431,12 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
goto err1;
if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
- pr_warn("invalid alt port %d\n", attr->alt_port_num);
+ pr_debug("invalid alt port %d\n", attr->alt_port_num);
goto err1;
}
if (attr->alt_timeout > 31) {
- pr_warn("invalid QP alt timeout %d > 31\n",
- attr->alt_timeout);
+ pr_debug("invalid QP alt timeout %d > 31\n",
+ attr->alt_timeout);
goto err1;
}
}
@@ -457,17 +457,16 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
- pr_warn("invalid max_rd_atomic %d > %d\n",
- attr->max_rd_atomic,
- rxe->attr.max_qp_rd_atom);
+ pr_debug("invalid max_rd_atomic %d > %d\n",
+ attr->max_rd_atomic,
+ rxe->attr.max_qp_rd_atom);
goto err1;
}
}
if (mask & IB_QP_TIMEOUT) {
if (attr->timeout > 31) {
- pr_warn("invalid QP timeout %d > 31\n",
- attr->timeout);
+ pr_debug("invalid QP timeout %d > 31\n", attr->timeout);
goto err1;
}
}
@@ -797,7 +796,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
- __rxe_do_task(&qp->req.task);
+ if (qp->req.task.func)
+ __rxe_do_task(&qp->req.task);
+
if (qp->sq.queue) {
__rxe_do_task(&qp->comp.task);
__rxe_do_task(&qp->req.task);
@@ -833,8 +834,10 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
free_rd_atomic_resources(qp);
- kernel_sock_shutdown(qp->sk, SHUT_RDWR);
- sock_release(qp->sk);
+ if (qp->sk) {
+ kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+ sock_release(qp->sk);
+ }
}
/* called when the last reference to the qp is dropped */
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index dbd4971039c0..d6dbf5a0058d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -112,23 +112,25 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem)
{
enum queue_type type = q->type;
+ u32 new_prod;
u32 prod;
u32 cons;
if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
return -EINVAL;
- prod = queue_get_producer(new_q, type);
+ new_prod = queue_get_producer(new_q, type);
+ prod = queue_get_producer(q, type);
cons = queue_get_consumer(q, type);
- while (!queue_empty(q, type)) {
- memcpy(queue_addr_from_index(new_q, prod),
+ while ((prod - cons) & q->index_mask) {
+ memcpy(queue_addr_from_index(new_q, new_prod),
queue_addr_from_index(q, cons), new_q->elem_size);
- prod = queue_next_index(new_q, prod);
+ new_prod = queue_next_index(new_q, new_prod);
cons = queue_next_index(q, cons);
}
- new_q->buf->producer_index = prod;
+ new_q->buf->producer_index = new_prod;
q->buf->consumer_index = cons;
/* update private index copies */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index f3ad7b6dbd97..434a693cd4a5 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -16,47 +16,36 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
unsigned int pkt_type;
if (unlikely(!qp->valid))
- goto err1;
+ return -EINVAL;
pkt_type = pkt->opcode & 0xe0;
switch (qp_type(qp)) {
case IB_QPT_RC:
- if (unlikely(pkt_type != IB_OPCODE_RC)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_RC))
+ return -EINVAL;
break;
case IB_QPT_UC:
- if (unlikely(pkt_type != IB_OPCODE_UC)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_UC))
+ return -EINVAL;
break;
case IB_QPT_UD:
case IB_QPT_GSI:
- if (unlikely(pkt_type != IB_OPCODE_UD)) {
- pr_warn_ratelimited("bad qp type\n");
- goto err1;
- }
+ if (unlikely(pkt_type != IB_OPCODE_UD))
+ return -EINVAL;
break;
default:
- pr_warn_ratelimited("unsupported qp type\n");
- goto err1;
+ return -EINVAL;
}
if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp->resp.state != QP_STATE_READY))
- goto err1;
+ return -EINVAL;
} else if (unlikely(qp->req.state < QP_STATE_READY ||
- qp->req.state > QP_STATE_DRAINED)) {
- goto err1;
- }
+ qp->req.state > QP_STATE_DRAINED))
+ return -EINVAL;
return 0;
-
-err1:
- return -EINVAL;
}
static void set_bad_pkey_cntr(struct rxe_port *port)
@@ -84,26 +73,20 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
pkt->pkey_index = 0;
if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
- pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
set_bad_pkey_cntr(port);
- goto err1;
+ return -EINVAL;
}
if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
- pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
- deth_qkey(pkt), qkey, qpn);
set_qkey_viol_cntr(port);
- goto err1;
+ return -EINVAL;
}
}
return 0;
-
-err1:
- return -EINVAL;
}
static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
@@ -112,13 +95,10 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb = PKT_TO_SKB(pkt);
if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
- goto done;
+ return 0;
- if (unlikely(pkt->port_num != qp->attr.port_num)) {
- pr_warn_ratelimited("port %d != qp port %d\n",
- pkt->port_num, qp->attr.port_num);
- goto err1;
- }
+ if (unlikely(pkt->port_num != qp->attr.port_num))
+ return -EINVAL;
if (skb->protocol == htons(ETH_P_IP)) {
struct in_addr *saddr =
@@ -126,19 +106,9 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
- if (ip_hdr(skb)->daddr != saddr->s_addr) {
- pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
- &ip_hdr(skb)->daddr,
- &saddr->s_addr);
- goto err1;
- }
-
- if (ip_hdr(skb)->saddr != daddr->s_addr) {
- pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
- &ip_hdr(skb)->saddr,
- &daddr->s_addr);
- goto err1;
- }
+ if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
+ (ip_hdr(skb)->saddr != daddr->s_addr))
+ return -EINVAL;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *saddr =
@@ -146,24 +116,12 @@ static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in6_addr *daddr =
&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
- if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
- pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
- &ipv6_hdr(skb)->daddr, saddr);
- goto err1;
- }
-
- if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
- pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
- &ipv6_hdr(skb)->saddr, daddr);
- goto err1;
- }
+ if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
+ memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
+ return -EINVAL;
}
-done:
return 0;
-
-err1:
- return -EINVAL;
}
static int hdr_check(struct rxe_pkt_info *pkt)
@@ -175,24 +133,18 @@ static int hdr_check(struct rxe_pkt_info *pkt)
int index;
int err;
- if (unlikely(bth_tver(pkt) != BTH_TVER)) {
- pr_warn_ratelimited("bad tver\n");
+ if (unlikely(bth_tver(pkt) != BTH_TVER))
goto err1;
- }
- if (unlikely(qpn == 0)) {
- pr_warn_once("QP 0 not supported");
+ if (unlikely(qpn == 0))
goto err1;
- }
if (qpn != IB_MULTICAST_QPN) {
index = (qpn == 1) ? port->qp_gsi_index : qpn;
qp = rxe_pool_get_index(&rxe->qp_pool, index);
- if (unlikely(!qp)) {
- pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
+ if (unlikely(!qp))
goto err1;
- }
err = check_type_state(rxe, pkt, qp);
if (unlikely(err))
@@ -206,10 +158,8 @@ static int hdr_check(struct rxe_pkt_info *pkt)
if (unlikely(err))
goto err2;
} else {
- if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
- pr_warn_ratelimited("no grh for mcast qpn\n");
+ if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
goto err1;
- }
}
pkt->qp = qp;
@@ -364,10 +314,8 @@ void rxe_rcv(struct sk_buff *skb)
if (unlikely(skb->len < RXE_BTH_BYTES))
goto drop;
- if (rxe_chk_dgid(rxe, skb) < 0) {
- pr_warn_ratelimited("failed checking dgid\n");
+ if (rxe_chk_dgid(rxe, skb) < 0)
goto drop;
- }
pkt->opcode = bth_opcode(pkt);
pkt->psn = bth_psn(pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index b36ec5c4d5e0..ed5a09e86417 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -809,10 +809,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
if (!skb)
return RESPST_ERR_RNR;
- err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
- payload, RXE_FROM_MR_OBJ);
- if (err)
- pr_err("Failed copying memory\n");
+ rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
+ payload, RXE_FROM_MR_OBJ);
if (mr)
rxe_put(mr);
@@ -823,10 +821,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err) {
- pr_err("Failed sending RDMA reply.\n");
+ if (err)
return RESPST_ERR_RNR;
- }
res->read.va += payload;
res->read.resid -= payload;
@@ -1028,50 +1024,41 @@ finish:
return RESPST_CLEANUP;
}
-static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+
+static int send_common_ack(struct rxe_qp *qp, u8 syndrome, u32 psn,
+ int opcode, const char *msg)
{
- int err = 0;
+ int err;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
- skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
- 0, psn, syndrome);
- if (!skb) {
- err = -ENOMEM;
- goto err1;
- }
+ skb = prepare_ack_packet(qp, &ack_pkt, opcode, 0, psn, syndrome);
+ if (!skb)
+ return -ENOMEM;
err = rxe_xmit_packet(qp, &ack_pkt, skb);
if (err)
- pr_err_ratelimited("Failed sending ack\n");
+ pr_err_ratelimited("Failed sending %s\n", msg);
-err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+static int send_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
{
- int err = 0;
- struct rxe_pkt_info ack_pkt;
- struct sk_buff *skb;
-
- skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
- 0, psn, syndrome);
- if (!skb) {
- err = -ENOMEM;
- goto out;
- }
+ return send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ACKNOWLEDGE, "ACK");
+}
- err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err)
- pr_err_ratelimited("Failed sending atomic ack\n");
+static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+{
+ int ret = send_common_ack(qp, syndrome, psn,
+ IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, "ATOMIC ACK");
/* have to clear this since it is used to trigger
* long read replies
*/
qp->resp.res = NULL;
-out:
- return err;
+ return ret;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 2248cf33d776..ec2b7de1c497 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -94,10 +94,9 @@ void rxe_do_task(struct tasklet_struct *t)
task->ret = ret;
}
-int rxe_init_task(void *obj, struct rxe_task *task,
+int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name)
{
- task->obj = obj;
task->arg = arg;
task->func = func;
snprintf(task->name, sizeof(task->name), "%s", name);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index 11d183fd3338..7f612a1c68a7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -19,7 +19,6 @@ enum {
* called again.
*/
struct rxe_task {
- void *obj;
struct tasklet_struct tasklet;
int state;
spinlock_t state_lock; /* spinlock for task state */
@@ -35,7 +34,7 @@ struct rxe_task {
* arg => parameter to pass to fcn
* func => function to call until it returns != 0
*/
-int rxe_init_task(void *obj, struct rxe_task *task,
+int rxe_init_task(struct rxe_task *task,
void *arg, int (*func)(void *), char *name);
/* cleanup task */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index e264cf69bf55..88825edc7dce 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -262,7 +262,6 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id;
- recv_wqe->num_sge = num_sge;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
@@ -526,7 +525,6 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
const struct ib_send_wr *ibwr)
{
wr->wr_id = ibwr->wr_id;
- wr->num_sge = ibwr->num_sge;
wr->opcode = ibwr->opcode;
wr->send_flags = ibwr->send_flags;
@@ -903,7 +901,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
return ERR_PTR(-ENOMEM);
rxe_get(pd);
- rxe_mr_init_dma(pd, access, mr);
+ mr->ibmr.pd = ibpd;
+
+ rxe_mr_init_dma(access, mr);
rxe_finalize(mr);
return &mr->ibmr;
@@ -928,8 +928,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- err = rxe_mr_init_user(pd, start, length, iova, access, mr);
+ err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err)
goto err3;
@@ -938,7 +939,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
return &mr->ibmr;
err3:
- rxe_put(pd);
rxe_cleanup(mr);
err2:
return ERR_PTR(err);
@@ -962,8 +962,9 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
}
rxe_get(pd);
+ mr->ibmr.pd = ibpd;
- err = rxe_mr_init_fast(pd, max_num_sg, mr);
+ err = rxe_mr_init_fast(max_num_sg, mr);
if (err)
goto err2;
@@ -972,7 +973,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return &mr->ibmr;
err2:
- rxe_put(pd);
rxe_cleanup(mr);
err1:
return ERR_PTR(err);
@@ -1007,12 +1007,9 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
- mr->va = ibmr->iova;
- mr->iova = ibmr->iova;
- mr->length = ibmr->length;
mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1;
- mr->offset = mr->iova & mr->page_mask;
+ mr->offset = ibmr->iova & mr->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 96af3e054f4d..5f5cbfcb3569 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -305,9 +305,6 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
- u64 va;
- u64 iova;
- size_t length;
u32 offset;
int access;
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index 1b5105cbabae..81b70a3eeb87 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,7 +1,10 @@
config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver"
- depends on INET && INFINIBAND && LIBCRC32C
+ depends on INET && INFINIBAND
depends on INFINIBAND_VIRT_DMA
+ select LIBCRC32C
+ select CRYPTO
+ select CRYPTO_CRC32C
help
This driver implements the iWARP RDMA transport over
the Linux TCP/IP network stack. It enables a system with a
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index df03d84c6868..2f3a9cda3850 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -418,6 +418,7 @@ struct siw_qp {
struct ib_qp base_qp;
struct siw_device *sdev;
struct kref ref;
+ struct completion qp_free;
struct list_head devq;
int tx_cpu;
struct siw_qp_attrs attrs;
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 7e01f2438afc..e6f634971228 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -1342,6 +1342,6 @@ void siw_free_qp(struct kref *ref)
vfree(qp->orq);
siw_put_tx_cpu(qp->tx_cpu);
-
+ complete(&qp->qp_free);
atomic_dec(&sdev->num_qp);
}
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 875ea6f1b04a..fd721cc19682 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -961,27 +961,28 @@ out:
static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
{
struct sk_buff *skb = srx->skb;
+ int avail = min(srx->skb_new, srx->fpdu_part_rem);
u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
__wsum crc_in, crc_own = 0;
siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
srx->fpdu_part_rem, srx->skb_new, srx->pad);
- if (srx->skb_new < srx->fpdu_part_rem)
- return -EAGAIN;
-
- skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
+ skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
- if (srx->mpa_crc_hd && srx->pad)
- crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
+ srx->skb_new -= avail;
+ srx->skb_offset += avail;
+ srx->skb_copied += avail;
+ srx->fpdu_part_rem -= avail;
- srx->skb_new -= srx->fpdu_part_rem;
- srx->skb_offset += srx->fpdu_part_rem;
- srx->skb_copied += srx->fpdu_part_rem;
+ if (srx->fpdu_part_rem)
+ return -EAGAIN;
if (!srx->mpa_crc_hd)
return 0;
+ if (srx->pad)
+ crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
/*
* CRC32 is computed, transmitted and received directly in NBO,
* so there's never a reason to convert byte order.
@@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
* completely received.
*/
if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
- bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
+ int hdrlen = iwarp_pktinfo[opcode].hdr_len;
- if (srx->skb_new < bytes)
- return -EAGAIN;
+ bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
skb_copy_bits(skb, srx->skb_offset,
(char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
srx->skb_new -= bytes;
srx->skb_offset += bytes;
srx->skb_copied += bytes;
+
+ if (srx->fpdu_part_rcvd < hdrlen)
+ return -EAGAIN;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 1f4e60257700..7d47b521070b 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)paddr);
return NULL;
}
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap_local(kaddr);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 8dedae7ae79e..3e814cfb298c 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -480,6 +480,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
list_add_tail(&qp->devq, &sdev->qp_list);
spin_unlock_irqrestore(&sdev->lock, flags);
+ init_completion(&qp->qp_free);
+
return 0;
err_out_xa:
@@ -624,6 +626,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
qp->scq = qp->rcq = NULL;
siw_qp_put(qp);
+ wait_for_completion(&qp->qp_free);
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index fd9d7f2c4d64..ebb35b809f26 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -884,8 +884,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
goto err_cm;
}
- ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
- 0);
+ ret = ib_cm_listen(priv->cm.id,
+ cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
if (ret) {
pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index a09ca21f7dff..8af99b18d361 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -65,10 +65,10 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
- strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
+ strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a4904371e2db..ac25fc80fb33 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -742,7 +742,7 @@ void ipoib_flush_paths(struct net_device *dev)
static void path_rec_completion(int status,
struct sa_path_rec *pathrec,
- void *path_ptr)
+ int num_prs, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index 42d557dff19d..29b3d8fce3f5 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -124,8 +124,8 @@ static struct vnic_stats vnic_gstrings_stats[] = {
static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
+ strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
index 3898509be270..5227e7788e1f 100644
--- a/drivers/infiniband/ulp/rtrs/Makefile
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -1,12 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-or-later
+CFLAGS_rtrs-clt-trace.o = -I$(src)
+
rtrs-client-y := rtrs-clt.o \
rtrs-clt-stats.o \
- rtrs-clt-sysfs.o
+ rtrs-clt-sysfs.o \
+ rtrs-clt-trace.o
+
+CFLAGS_rtrs-srv-trace.o = -I$(src)
rtrs-server-y := rtrs-srv.o \
rtrs-srv-stats.o \
- rtrs-srv-sysfs.o
+ rtrs-srv-sysfs.o \
+ rtrs-srv-trace.o
rtrs-core-y := rtrs.o
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
new file mode 100644
index 000000000000..f14fa1f36ce8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-clt.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-clt-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
new file mode 100644
index 000000000000..7738e2676855
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-trace.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_clt
+
+#if !defined(_TRACE_RTRS_CLT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_CLT_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_clt_path;
+struct rtrs_clt_sess;
+
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTING_ERR);
+TRACE_DEFINE_ENUM(RTRS_CLT_RECONNECTING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_CLT_CLOSED);
+TRACE_DEFINE_ENUM(RTRS_CLT_DEAD);
+
+#define show_rtrs_clt_state(x) \
+ __print_symbolic(x, \
+ { RTRS_CLT_CONNECTING, "CONNECTING" }, \
+ { RTRS_CLT_CONNECTING_ERR, "CONNECTING_ERR" }, \
+ { RTRS_CLT_RECONNECTING, "RECONNECTING" }, \
+ { RTRS_CLT_CONNECTED, "CONNECTED" }, \
+ { RTRS_CLT_CLOSING, "CLOSING" }, \
+ { RTRS_CLT_CLOSED, "CLOSED" }, \
+ { RTRS_CLT_DEAD, "DEAD" })
+
+DECLARE_EVENT_CLASS(rtrs_clt_conn_class,
+ TP_PROTO(struct rtrs_clt_path *clt_path),
+
+ TP_ARGS(clt_path),
+
+ TP_STRUCT__entry(
+ __field(int, state)
+ __field(int, reconnect_attempts)
+ __field(int, max_reconnect_attempts)
+ __field(int, fail_cnt)
+ __field(int, success_cnt)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_clt_sess *clt = clt_path->clt;
+
+ __entry->state = clt_path->state;
+ __entry->reconnect_attempts = clt_path->reconnect_attempts;
+ __entry->max_reconnect_attempts = clt->max_reconnect_attempts;
+ __entry->fail_cnt = clt_path->stats->reconnects.fail_cnt;
+ __entry->success_cnt = clt_path->stats->reconnects.successful_cnt;
+ memcpy(__entry->sessname, kobject_name(&clt_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("RTRS-CLT: sess='%s' state=%s attempts='%d' max-attempts='%d' fail='%d' success='%d'",
+ __entry->sessname,
+ show_rtrs_clt_state(__entry->state),
+ __entry->reconnect_attempts,
+ __entry->max_reconnect_attempts,
+ __entry->fail_cnt,
+ __entry->success_cnt
+ )
+);
+
+#define DEFINE_CLT_CONN_EVENT(name) \
+DEFINE_EVENT(rtrs_clt_conn_class, rtrs_##name, \
+ TP_PROTO(struct rtrs_clt_path *clt_path), \
+ TP_ARGS(clt_path))
+
+DEFINE_CLT_CONN_EVENT(clt_reconnect_work);
+DEFINE_CLT_CONN_EVENT(clt_close_conns);
+DEFINE_CLT_CONN_EVENT(rdma_error_recovery);
+
+#endif /* _TRACE_RTRS_CLT_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-clt-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index baecde41d126..758e1d7ebc36 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -16,6 +16,7 @@
#include "rtrs-clt.h"
#include "rtrs-log.h"
+#include "rtrs-clt-trace.h"
#define RTRS_CONNECT_TIMEOUT_MS 30000
/*
@@ -53,7 +54,10 @@ static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
rcu_read_lock();
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
- connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
+ connected = true;
+ break;
+ }
rcu_read_unlock();
return connected;
@@ -302,6 +306,8 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ trace_rtrs_rdma_error_recovery(clt_path);
+
if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
@@ -1004,7 +1010,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, bool fr_en,
- u32 size, u32 imm, struct ib_send_wr *wr,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -1024,12 +1031,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
num_sge = 2;
ptail = tail;
} else {
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ for_each_sg(req->sglist, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
- num_sge = 1 + req->sg_cnt;
+ num_sge = 1 + count;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
@@ -1142,7 +1149,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
if (ret) {
@@ -1942,6 +1949,8 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
+ trace_rtrs_clt_close_conns(clt_path);
+
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
@@ -2212,17 +2221,6 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
}
}
-static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
- struct rtrs_clt_path *clt_path,
- struct rtrs_clt_path *next)
-{
- struct rtrs_clt_path **ppcpu_path;
-
- /* Call cmpxchg() without sparse warnings */
- ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
- return clt_path == cmpxchg(ppcpu_path, clt_path, next);
-}
-
static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
@@ -2297,7 +2295,8 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
- if (xchg_paths(ppcpu_path, clt_path, next))
+ if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
+ next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
@@ -2648,6 +2647,8 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
reconnect_dwork);
clt = clt_path->clt;
+ trace_rtrs_clt_reconnect_work(clt_path);
+
if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index ac0df734eba8..a2420eecaf5a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -26,11 +26,10 @@
/*
* Max IB immediate data size is 2^28 (MAX_IMM_PAYL_BITS)
* and the minimum chunk size is 4096 (2^12).
- * So the maximum sess_queue_depth is 65536 (2^16) in theory.
- * But mempool_create, create_qp and ib_post_send fail with
- * "cannot allocate memory" error if sess_queue_depth is too big.
+ * So the maximum sess_queue_depth is 65535 (2^16 - 1) in theory
+ * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
* Therefore the pratical max value of sess_queue_depth is
- * somewhere between 1 and 65534 and it depends on the system.
+ * somewhere between 1 and 65535 and it depends on the system.
*/
#define MAX_SESS_QUEUE_DEPTH 65535
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
new file mode 100644
index 000000000000..29ca59ceb0dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#include "rtrs.h"
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+
+/*
+ * We include this last to have the helpers above available for the trace
+ * event implementations.
+ */
+#define CREATE_TRACE_POINTS
+#include "rtrs-srv-trace.h"
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
new file mode 100644
index 000000000000..587d3e033081
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-trace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rtrs_srv
+
+#if !defined(_TRACE_RTRS_SRV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RTRS_SRV_H
+
+#include <linux/tracepoint.h>
+
+struct rtrs_srv_op;
+struct rtrs_srv_con;
+struct rtrs_srv_path;
+
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CONNECTED);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSING);
+TRACE_DEFINE_ENUM(RTRS_SRV_CLOSED);
+
+#define show_rtrs_srv_state(x) \
+ __print_symbolic(x, \
+ { RTRS_SRV_CONNECTING, "CONNECTING" }, \
+ { RTRS_SRV_CONNECTED, "CONNECTED" }, \
+ { RTRS_SRV_CLOSING, "CLOSING" }, \
+ { RTRS_SRV_CLOSED, "CLOSED" })
+
+TRACE_EVENT(send_io_resp_imm,
+ TP_PROTO(struct rtrs_srv_op *id,
+ bool need_inval,
+ bool always_invalidate,
+ int errno),
+
+ TP_ARGS(id, need_inval, always_invalidate, errno),
+
+ TP_STRUCT__entry(
+ __field(u8, dir)
+ __field(bool, need_inval)
+ __field(bool, always_invalidate)
+ __field(u32, msg_id)
+ __field(int, wr_cnt)
+ __field(u32, signal_interval)
+ __field(int, state)
+ __field(int, errno)
+ __array(char, sessname, NAME_MAX)
+ ),
+
+ TP_fast_assign(
+ struct rtrs_srv_con *con = id->con;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+
+ __entry->dir = id->dir;
+ __entry->state = srv_path->state;
+ __entry->errno = errno;
+ __entry->need_inval = need_inval;
+ __entry->always_invalidate = always_invalidate;
+ __entry->msg_id = id->msg_id;
+ __entry->wr_cnt = atomic_read(&con->c.wr_cnt);
+ __entry->signal_interval = s->signal_interval;
+ memcpy(__entry->sessname, kobject_name(&srv_path->kobj), NAME_MAX);
+ ),
+
+ TP_printk("sess='%s' state='%s' dir=%s err='%d' inval='%d' glob-inval='%d' msgid='%u' wrcnt='%d' sig-interval='%u'",
+ __entry->sessname,
+ show_rtrs_srv_state(__entry->state),
+ __print_symbolic(__entry->dir,
+ { READ, "READ" },
+ { WRITE, "WRITE" }),
+ __entry->errno,
+ __entry->need_inval,
+ __entry->always_invalidate,
+ __entry->msg_id,
+ __entry->wr_cnt,
+ __entry->signal_interval
+ )
+);
+
+#endif /* _TRACE_RTRS_SRV_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE rtrs-srv-trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 34c03bde5064..22d7ba05e9fe 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -16,6 +16,7 @@
#include "rtrs-log.h"
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
+#include "rtrs-srv-trace.h"
MODULE_DESCRIPTION("RDMA Transport Server");
MODULE_LICENSE("GPL");
@@ -57,11 +58,6 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c);
}
-static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
-{
- return container_of(s, struct rtrs_srv_path, s);
-}
-
static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
@@ -375,6 +371,8 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
}
}
+ trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
+
if (need_inval && always_invalidate) {
wr = &inv_wr;
inv_wr.next = &rwr.wr;
@@ -595,7 +593,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
- int nr, chunks;
+ int nr, nr_sgt, chunks;
chunks = chunks_per_mr * mri;
if (!always_invalidate)
@@ -610,19 +608,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
- if (nr < sgt->nents) {
- err = nr < 0 ? nr : -EINVAL;
+ if (!nr_sgt) {
+ err = -EINVAL;
goto free_sg;
}
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sgt->nents);
+ nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;
}
- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
if (nr < 0 || nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +639,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
}
}
/* Eventually dma addr for each chunk can be cached */
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
@@ -1024,7 +1022,7 @@ static void process_read(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(msg->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
- ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len,
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
@@ -1077,7 +1075,7 @@ static void process_write(struct rtrs_srv_con *con,
usr_len = le16_to_cpu(req->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
- ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len,
+ ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
rtrs_err_rl(s,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 186a63c217df..2f8a638e36fa 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -91,6 +91,11 @@ struct rtrs_srv_path {
struct rtrs_srv_stats *stats;
};
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
+{
+ return container_of(s, struct rtrs_srv_path, s);
+}
+
struct rtrs_srv_sess {
struct list_head paths_list;
int paths_up;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 60fa0b0160f4..ed324b47d93a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -175,7 +175,7 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
* length error
*/
for (i = 0; i < num_sge; i++)
- if (WARN_ON(sge[i].length == 0))
+ if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
return -EINVAL;
return rtrs_post_send(con->qp, head, &wr.wr, tail);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 5e57a7ccc7fb..b48b53a7c143 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -139,7 +139,6 @@ struct rtrs_srv_ops {
* @priv: Private data set by rtrs_srv_set_sess_priv()
* @id: internal RTRS operation id
- * @dir: READ/WRITE
* @data: Pointer to (bidirectional) rdma memory area:
* - in case of %RTRS_SRV_RDMA_EV_RECV contains
* data sent by the client
@@ -151,7 +150,7 @@ struct rtrs_srv_ops {
* @usrlen: Size of the user message
*/
int (*rdma_ev)(void *priv,
- struct rtrs_srv_op *id, int dir,
+ struct rtrs_srv_op *id,
void *data, size_t datalen, const void *usr,
size_t usrlen);
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7720ea270ed8..1075c2ac8fe2 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -699,7 +699,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
static void srp_path_rec_completion(int status,
struct sa_path_rec *pathrec,
- void *ch_ptr)
+ int num_paths, void *ch_ptr)
{
struct srp_rdma_ch *ch = ch_ptr;
struct srp_target_port *target = ch->target;
@@ -1961,7 +1961,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (scmnd) {
req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
- } else {
+ }
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
@@ -2788,7 +2789,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
- struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
+ struct srp_request *req = scsi_cmd_priv(scmnd);
u32 tag;
u16 ch_idx;
struct srp_rdma_ch *ch;
@@ -2796,8 +2797,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req)
- return SUCCESS;
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
@@ -2990,7 +2989,7 @@ static ssize_t local_ib_port_show(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sysfs_emit(buf, "%d\n", target->srp_host->port);
+ return sysfs_emit(buf, "%u\n", target->srp_host->port);
}
static DEVICE_ATTR_RO(local_ib_port);
@@ -3178,11 +3177,16 @@ static void srp_release_dev(struct device *dev)
struct srp_host *host =
container_of(dev, struct srp_host, dev);
- complete(&host->released);
+ kfree(host);
}
+static struct attribute *srp_class_attrs[];
+
+ATTRIBUTE_GROUPS(srp_class);
+
static struct class srp_class = {
.name = "infiniband_srp",
+ .dev_groups = srp_class_groups,
.dev_release = srp_release_dev
};
@@ -3883,12 +3887,19 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sysfs_emit(buf, "%d\n", host->port);
+ return sysfs_emit(buf, "%u\n", host->port);
}
static DEVICE_ATTR_RO(port);
-static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
+static struct attribute *srp_class_attrs[] = {
+ &dev_attr_add_target.attr,
+ &dev_attr_ibdev.attr,
+ &dev_attr_port.attr,
+ NULL
+};
+
+static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
{
struct srp_host *host;
@@ -3898,33 +3909,24 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock);
- init_completion(&host->released);
mutex_init(&host->add_target_mutex);
host->srp_dev = device;
host->port = port;
+ device_initialize(&host->dev);
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
- dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
- port);
-
- if (device_register(&host->dev))
- goto free_host;
- if (device_create_file(&host->dev, &dev_attr_add_target))
- goto err_class;
- if (device_create_file(&host->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(&host->dev, &dev_attr_port))
- goto err_class;
+ if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
+ port))
+ goto put_host;
+ if (device_add(&host->dev))
+ goto put_host;
return host;
-err_class:
- device_unregister(&host->dev);
-
-free_host:
- kfree(host);
-
+put_host:
+ device_del(&host->dev);
+ put_device(&host->dev);
return NULL;
}
@@ -3936,7 +3938,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
char name[IB_DEVICE_NAME_MAX + 8];
- snprintf(name, sizeof(name), "srp-%s-%d",
+ snprintf(name, sizeof(name), "srp-%s-%u",
dev_name(&device->dev), host->port);
device_rename(&host->dev, name);
}
@@ -3948,7 +3950,7 @@ static int srp_add_one(struct ib_device *device)
struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
int mr_page_shift;
- unsigned int p;
+ u32 p;
u64 max_pages_per_mr;
unsigned int flags = 0;
@@ -4030,12 +4032,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
srp_dev = client_data;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
- device_unregister(&host->dev);
/*
- * Wait for the sysfs entry to go away, so that no new
- * target ports can be created.
+ * Remove the add_target sysfs entry so that no new target ports
+ * can be created.
*/
- wait_for_completion(&host->released);
+ device_del(&host->dev);
/*
* Remove all target ports.
@@ -4053,7 +4054,7 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
*/
flush_workqueue(srp_remove_wq);
- kfree(host);
+ put_device(&host->dev);
}
ib_dealloc_pd(srp_dev->pd);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 55a575e2cace..00b0068fda20 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -120,11 +120,10 @@ struct srp_device {
*/
struct srp_host {
struct srp_device *srp_dev;
- u8 port;
+ u32 port;
struct device dev;
struct list_head target_list;
spinlock_t target_lock;
- struct completion released;
struct list_head list;
struct mutex add_target_mutex;
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 21cbe30d526f..3c3fae738c3e 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1421,7 +1421,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
- memcpy(srp_rsp + 1, sense_data, sense_data_len);
+ memcpy(srp_rsp->data, sense_data, sense_data_len);
}
return sizeof(*srp_rsp) + sense_data_len;
@@ -2300,7 +2300,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
goto free_recv_ring;
}
- strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ strscpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
@@ -3191,7 +3191,7 @@ static int srpt_add_one(struct ib_device *device)
* if this HCA is gone bad and replaced by different HCA
*/
ret = sdev->cm_id ?
- ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) :
+ ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid)) :
0;
if (ret < 0) {
pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 34bcd99a46f5..2beda29021a3 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -327,7 +327,7 @@ err_free_mem:
return error;
}
-static int as5011_remove(struct i2c_client *client)
+static void as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
@@ -337,8 +337,6 @@ static int as5011_remove(struct i2c_client *client)
input_unregister_device(as5011->input_dev);
kfree(as5011);
-
- return 0;
}
static const struct i2c_device_id as5011_id[] = {
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc9f0b4..b86de1312512 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b9fac7..2380546d7978 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@ again:
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805c480f..cba92bd590a8 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761ebbdf7..9ccb9107ccbe 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 1a1a05d7cd42..e2719737360a 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -592,12 +592,11 @@ static int adp5588_probe(struct i2c_client *client,
return 0;
}
-static int adp5588_remove(struct i2c_client *client)
+static void adp5588_remove(struct i2c_client *client)
{
adp5588_write(client, CFG, 0);
/* all resources will be freed by devm */
- return 0;
}
static int __maybe_unused adp5588_suspend(struct device *dev)
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
index 93446b21f98f..db793a550c25 100644
--- a/drivers/input/keyboard/iqs62x-keys.c
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -77,6 +77,7 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to read switch code: %d\n",
ret);
+ fwnode_handle_put(child);
return ret;
}
iqs62x_keys->switches[i].code = val;
@@ -90,6 +91,8 @@ static int iqs62x_keys_parse_prop(struct platform_device *pdev,
iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
IQS62X_EVENT_HALL_N_T :
IQS62X_EVENT_HALL_S_T);
+
+ fwnode_handle_put(child);
}
return 0;
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 6c38d034ec6e..407dd2ad6302 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -752,7 +752,7 @@ fail1:
return err;
}
-static int lm8323_remove(struct i2c_client *client)
+static void lm8323_remove(struct i2c_client *client)
{
struct lm8323_chip *lm = i2c_get_clientdata(client);
int i;
@@ -769,8 +769,6 @@ static int lm8323_remove(struct i2c_client *client)
led_classdev_unregister(&lm->pwm[i].cdev);
kfree(lm);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 7c5f8c6bb957..9dac22c14125 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -200,15 +200,13 @@ static int lm8333_probe(struct i2c_client *client,
return err;
}
-static int lm8333_remove(struct i2c_client *client)
+static void lm8333_remove(struct i2c_client *client)
{
struct lm8333 *lm8333 = i2c_get_clientdata(client);
free_irq(client->irq, lm8333);
input_unregister_device(lm8333->input);
kfree(lm8333);
-
- return 0;
}
static const struct i2c_device_id lm8333_id[] = {
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 8cb0062b98e4..ac1637a3389e 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -194,7 +194,7 @@ err_free_mem:
return error;
}
-static int mcs_touchkey_remove(struct i2c_client *client)
+static void mcs_touchkey_remove(struct i2c_client *client)
{
struct mcs_touchkey_data *data = i2c_get_clientdata(client);
@@ -203,8 +203,6 @@ static int mcs_touchkey_remove(struct i2c_client *client)
data->poweron(false);
input_unregister_device(data->input_dev);
kfree(data);
-
- return 0;
}
static void mcs_touchkey_shutdown(struct i2c_client *client)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 7174e1df1ee3..9fcce18b1d65 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -216,7 +216,7 @@ err_free_mem:
return err;
}
-static int qt1070_remove(struct i2c_client *client)
+static void qt1070_remove(struct i2c_client *client)
{
struct qt1070_data *data = i2c_get_clientdata(client);
@@ -225,8 +225,6 @@ static int qt1070_remove(struct i2c_client *client)
input_unregister_device(data->input);
kfree(data);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 32d4a076eaa3..382b1519218c 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -432,7 +432,7 @@ err_free_mem:
return error;
}
-static int qt2160_remove(struct i2c_client *client)
+static void qt2160_remove(struct i2c_client *client)
{
struct qt2160_data *qt2160 = i2c_get_clientdata(client);
@@ -446,8 +446,6 @@ static int qt2160_remove(struct i2c_client *client)
input_unregister_device(qt2160->input);
kfree(qt2160);
-
- return 0;
}
static const struct i2c_device_id qt2160_idtable[] = {
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 65286762b02a..ad8660be0127 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -20,7 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#define SNVS_HPVIDR1_REG 0xF8
+#define SNVS_HPVIDR1_REG 0xBF8
#define SNVS_LPSR_REG 0x4C /* LP Status Register */
#define SNVS_LPCR_REG 0x38 /* LP Control Register */
#define SNVS_HPSR_REG 0x14
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2a9755910065..afcdfbb002ff 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -307,7 +307,7 @@ fail1:
return error;
}
-static int tca6416_keypad_remove(struct i2c_client *client)
+static void tca6416_keypad_remove(struct i2c_client *client)
{
struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
@@ -318,8 +318,6 @@ static int tca6416_keypad_remove(struct i2c_client *client)
input_unregister_device(chip->input);
kfree(chip);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a3b5f88d2bd1..5be636aaa94f 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -99,13 +99,11 @@ static int adxl34x_i2c_probe(struct i2c_client *client,
return 0;
}
-static int adxl34x_i2c_remove(struct i2c_client *client)
+static void adxl34x_i2c_remove(struct i2c_client *client)
{
struct adxl34x *ac = i2c_get_clientdata(client);
adxl34x_remove(ac);
-
- return 0;
}
static int __maybe_unused adxl34x_i2c_suspend(struct device *dev)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index a9d984da95f3..84fe394da7a6 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -513,11 +513,9 @@ static int bma150_probe(struct i2c_client *client,
return 0;
}
-static int bma150_remove(struct i2c_client *client)
+static void bma150_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused bma150_suspend(struct device *dev)
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 03fb49127c3a..3b23210c46b7 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -58,13 +58,11 @@ static int cma3000_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cma3000_i2c_remove(struct i2c_client *client)
+static void cma3000_i2c_remove(struct i2c_client *client)
{
struct cma3000_accl_data *data = i2c_get_clientdata(client);
cma3000_exit(data);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index abc423165522..cfd6640e4f82 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -157,7 +157,7 @@ static int pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_i
return ret;
}
-static int pcf8574_kp_remove(struct i2c_client *client)
+static void pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
@@ -165,8 +165,6 @@ static int pcf8574_kp_remove(struct i2c_client *client)
input_unregister_device(lp->idev);
kfree(lp);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64dbda1a2..76873aa005b4 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 434d48ae4b12..ffad142801b3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -186,7 +186,6 @@ static const char * const smbus_pnp_ids[] = {
"LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
- "LEN2064", /* T14 Gen 1 AMD / P14s Gen 1 AMD */
"LEN2068", /* T14 Gen 1 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index fa304648d611..987ee67a1045 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -587,7 +587,7 @@ err_mem_free:
return ret;
}
-static int synaptics_i2c_remove(struct i2c_client *client)
+static void synaptics_i2c_remove(struct i2c_client *client)
{
struct synaptics_i2c *touch = i2c_get_clientdata(client);
@@ -596,8 +596,6 @@ static int synaptics_i2c_remove(struct i2c_client *client)
input_unregister_device(touch->input);
kfree(touch);
-
- return 0;
}
static int __maybe_unused synaptics_i2c_suspend(struct device *dev)
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 2407ea43de59..c130468541b7 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -338,13 +338,11 @@ static int rmi_smb_probe(struct i2c_client *client,
return 0;
}
-static int rmi_smb_remove(struct i2c_client *client)
+static void rmi_smb_remove(struct i2c_client *client)
{
struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
rmi_unregister_transport_device(&rmi_smb->xport);
-
- return 0;
}
static int __maybe_unused rmi_smb_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index eb66cd2689b7..4eedea08b0b5 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -3284,7 +3284,7 @@ err_disable_regulators:
return error;
}
-static int mxt_remove(struct i2c_client *client)
+static void mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
@@ -3294,8 +3294,6 @@ static int mxt_remove(struct i2c_client *client)
mxt_free_object_table(data);
regulator_bulk_disable(ARRAY_SIZE(data->regulators),
data->regulators);
-
- return 0;
}
static int __maybe_unused mxt_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 2f1f0d7607f8..34f422e246ef 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -552,15 +552,13 @@ static int bu21013_probe(struct i2c_client *client,
return 0;
}
-static int bu21013_remove(struct i2c_client *client)
+static void bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts *ts = i2c_get_clientdata(client);
/* Make sure IRQ will exit quickly even if there is contact */
ts->touch_stopped = true;
/* The resources will be freed by devm */
-
- return 0;
}
static int __maybe_unused bu21013_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index c65ccb2f4716..28ae7c15397a 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -43,13 +43,11 @@ static int cyttsp4_i2c_probe(struct i2c_client *client,
return PTR_ERR_OR_ZERO(ts);
}
-static int cyttsp4_i2c_remove(struct i2c_client *client)
+static void cyttsp4_i2c_remove(struct i2c_client *client)
{
struct cyttsp4 *ts = i2c_get_clientdata(client);
cyttsp4_remove(ts);
-
- return 0;
}
static const struct i2c_device_id cyttsp4_i2c_id[] = {
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 82beddb28761..5fb441387fe5 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1346,13 +1346,11 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return 0;
}
-static int edt_ft5x06_ts_remove(struct i2c_client *client)
+static void edt_ft5x06_ts_remove(struct i2c_client *client)
{
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
-
- return 0;
}
static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index d016505fc081..a33cc7950cf5 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "1151", .data = &gt1x_chip_data },
+ { .id = "1158", .data = &gt1x_chip_data },
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
@@ -1382,14 +1383,12 @@ reset:
return 0;
}
-static int goodix_ts_remove(struct i2c_client *client)
+static void goodix_ts_remove(struct i2c_client *client)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
if (ts->load_cfg_from_disk)
wait_for_completion(&ts->firmware_loading_complete);
-
- return 0;
}
static int __maybe_unused goodix_suspend(struct device *dev)
@@ -1508,6 +1507,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt1158" },
{ .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 2745bf1aee38..83f4be05e27b 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1453,7 +1453,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
"ce", GPIOD_OUT_LOW);
if (IS_ERR(ts->gpio_ce)) {
error = PTR_ERR(ts->gpio_ce);
- if (error != EPROBE_DEFER)
+ if (error != -EPROBE_DEFER)
dev_err(&client->dev,
"Failed to get gpio: %d\n", error);
return error;
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 42d3fd7e04d7..79cd660d879e 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -176,7 +176,7 @@ static int migor_ts_probe(struct i2c_client *client,
return error;
}
-static int migor_ts_remove(struct i2c_client *client)
+static void migor_ts_remove(struct i2c_client *client)
{
struct migor_ts_priv *priv = i2c_get_clientdata(client);
@@ -185,8 +185,6 @@ static int migor_ts_remove(struct i2c_client *client)
kfree(priv);
dev_set_drvdata(&client->dev, NULL);
-
- return 0;
}
static int __maybe_unused migor_ts_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
index 85a1f465c097..1a7d00289b4c 100644
--- a/drivers/input/touchscreen/s6sy761.c
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -475,11 +475,9 @@ static int s6sy761_probe(struct i2c_client *client,
return 0;
}
-static int s6sy761_remove(struct i2c_client *client)
+static void s6sy761_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused s6sy761_runtime_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index c175d44c52f3..d5bd170808fb 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -738,11 +738,9 @@ static int stmfts_probe(struct i2c_client *client,
return 0;
}
-static int stmfts_remove(struct i2c_client *client)
+static void stmfts_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused stmfts_runtime_suspend(struct device *dev)
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 742a7e96c1b5..73eb8f80be6e 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -192,12 +192,12 @@ static int sun4i_get_temp(const struct sun4i_ts_data *ts, int *temp)
return 0;
}
-static int sun4i_get_tz_temp(void *data, int *temp)
+static int sun4i_get_tz_temp(struct thermal_zone_device *tz, int *temp)
{
- return sun4i_get_temp(data, temp);
+ return sun4i_get_temp(tz->devdata, temp);
}
-static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+static const struct thermal_zone_device_ops sun4i_ts_tz_ops = {
.get_temp = sun4i_get_tz_temp,
};
@@ -356,8 +356,8 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
- &sun4i_ts_tz_ops);
+ thermal = devm_thermal_of_zone_register(ts->dev, 0, ts,
+ &sun4i_ts_tz_ops);
if (IS_ERR(thermal))
return PTR_ERR(thermal);
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 9fdd870c4c0b..a9565353ee98 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -43,11 +43,9 @@ static int tsc2004_probe(struct i2c_client *i2c,
tsc2004_cmd);
}
-static int tsc2004_remove(struct i2c_client *i2c)
+static void tsc2004_remove(struct i2c_client *i2c)
{
tsc200x_remove(&i2c->dev);
-
- return 0;
}
static const struct i2c_device_id tsc2004_idtable[] = {
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 808f6e7a8048..25debded65a8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1057,29 +1057,25 @@ EXPORT_SYMBOL_GPL(icc_provider_add);
/**
* icc_provider_del() - delete previously added interconnect provider
* @provider: the interconnect provider that will be removed from topology
- *
- * Return: 0 on success, or an error code otherwise
*/
-int icc_provider_del(struct icc_provider *provider)
+void icc_provider_del(struct icc_provider *provider)
{
mutex_lock(&icc_lock);
if (provider->users) {
pr_warn("interconnect provider still has %d users\n",
provider->users);
mutex_unlock(&icc_lock);
- return -EBUSY;
+ return;
}
if (!list_empty(&provider->nodes)) {
pr_warn("interconnect provider still has nodes\n");
mutex_unlock(&icc_lock);
- return -EBUSY;
+ return;
}
list_del(&provider->provider_list);
mutex_unlock(&icc_lock);
-
- return 0;
}
EXPORT_SYMBOL_GPL(icc_provider_del);
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 48ffd59953bf..823d9be9771a 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -324,13 +324,13 @@ provider_del:
}
EXPORT_SYMBOL_GPL(imx_icc_register);
-int imx_icc_unregister(struct platform_device *pdev)
+void imx_icc_unregister(struct platform_device *pdev)
{
struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
imx_icc_unregister_nodes(&imx_provider->provider);
- return icc_provider_del(&imx_provider->provider);
+ icc_provider_del(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
index e0a2ee173ecd..895907cdcb3b 100644
--- a/drivers/interconnect/imx/imx.h
+++ b/drivers/interconnect/imx/imx.h
@@ -103,6 +103,6 @@ int imx_icc_register(struct platform_device *pdev,
struct imx_icc_node_desc *nodes,
int nodes_count,
struct imx_icc_noc_setting *noc_settings);
-int imx_icc_unregister(struct platform_device *pdev);
+void imx_icc_unregister(struct platform_device *pdev);
#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
index ae797412db96..b43325364aa3 100644
--- a/drivers/interconnect/imx/imx8mm.c
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -88,7 +88,9 @@ static int imx8mm_icc_probe(struct platform_device *pdev)
static int imx8mm_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mm_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
index 1ce94c5bdd8c..8ce6d8e4bf5e 100644
--- a/drivers/interconnect/imx/imx8mn.c
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -77,7 +77,9 @@ static int imx8mn_icc_probe(struct platform_device *pdev)
static int imx8mn_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mn_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c
index 5f1c83ed157b..8bfaf173f1da 100644
--- a/drivers/interconnect/imx/imx8mp.c
+++ b/drivers/interconnect/imx/imx8mp.c
@@ -242,7 +242,9 @@ static int imx8mp_icc_probe(struct platform_device *pdev)
static int imx8mp_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mp_icc_driver = {
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index 7f00a0511c6e..b6fb71305c99 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -87,7 +87,9 @@ static int imx8mq_icc_probe(struct platform_device *pdev)
static int imx8mq_icc_remove(struct platform_device *pdev)
{
- return imx_icc_unregister(pdev);
+ imx_icc_unregister(pdev);
+
+ return 0;
}
static struct platform_driver imx8mq_icc_driver = {
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 25d5b4baf6f6..1a1c941635a2 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config INTERCONNECT_QCOM
- bool "Qualcomm Network-on-Chip interconnect drivers"
+ tristate "Qualcomm Network-on-Chip interconnect drivers"
depends on ARCH_QCOM
help
Support for Qualcomm's Network-on-Chip interconnect hardware.
diff --git a/drivers/interconnect/qcom/icc-common.c b/drivers/interconnect/qcom/icc-common.c
index 0822ce207b5d..f27f4fdc4531 100644
--- a/drivers/interconnect/qcom/icc-common.c
+++ b/drivers/interconnect/qcom/icc-common.c
@@ -5,6 +5,7 @@
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "icc-common.h"
@@ -32,3 +33,5 @@ struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 7f6a70e0256a..39e43b957599 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -563,6 +563,8 @@ int qnoc_remove(struct platform_device *pdev)
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
EXPORT_SYMBOL(qnoc_remove);
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 114bb8f64573..fd17291c61eb 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -251,7 +251,9 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_remove);
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 6fa0ad90fc3d..5ea192f1141d 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -749,7 +749,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static const struct of_device_id msm8974_noc_of_match[] = {
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index 4198656f4e59..ddbdf0943f94 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -217,7 +217,9 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static int qcom_osm_l3_probe(struct platform_device *pdev)
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index e821fd0b2f66..e3a12e3d6e06 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -1933,7 +1933,9 @@ static int qnoc_remove(struct platform_device *pdev)
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_nodes_remove(&qp->provider);
- return icc_provider_del(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
}
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5c5cb5bee8b6..dc5f7a156ff5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -67,6 +67,17 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
If unsure, say N here.
+config IOMMU_IO_PGTABLE_DART
+ bool "Apple DART Formats"
+ select IOMMU_IO_PGTABLE
+ depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ help
+ Enable support for the Apple DART pagetable formats. These include
+ the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
+ SoCs.
+
+ If unsure, say N here.
+
endmenu
config IOMMU_DEBUGFS
@@ -137,7 +148,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
- bool
+ def_bool ARM64 || IA64 || X86
select DMA_OPS
select IOMMU_API
select IOMMU_IOVA
@@ -294,7 +305,7 @@ config APPLE_DART
tristate "Apple DART IOMMU Support"
depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
- select IOMMU_IO_PGTABLE_LPAE
+ select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE
help
Support for Apple DART (Device Address Resolution Table) IOMMUs
@@ -476,7 +487,6 @@ config VIRTIO_IOMMU
depends on VIRTIO
depends on (ARM64 || X86)
select IOMMU_API
- select IOMMU_DMA
select INTERVAL_TREE
select ACPI_VIOT if ACPI
help
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 44475a9b3eea..cc9f381013c3 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index a3cbafb603f5..9b5fc3356bf2 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -9,7 +9,6 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
- select IOMMU_DMA
select IOMMU_IO_PGTABLE
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index a935f8f4b974..773d8aa00283 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o
+obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 84e5bb1bf01b..c160a332ce33 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -18,7 +18,6 @@ extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5b1019dab328..1d0a70c85333 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -94,6 +94,7 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
+#define FEATURE_GIOSUP (1ULL<<48)
#define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_SNP (1ULL<<63)
@@ -276,6 +277,8 @@
* 512GB Pages are not supported due to a hardware bug
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+/* 4K, 2MB, 1G page sizes are supported */
+#define AMD_IOMMU_PGSIZES_V2 (PAGE_SIZE | (1ULL << 21) | (1ULL << 30))
/* Bit value definition for dte irq remapping fields*/
#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
@@ -376,6 +379,7 @@
#define DTE_FLAG_IW (1ULL << 62)
#define DTE_FLAG_IOTLB (1ULL << 32)
+#define DTE_FLAG_GIOV (1ULL << 54)
#define DTE_FLAG_GV (1ULL << 55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
#define DTE_GLX_SHIFT (56)
@@ -434,6 +438,7 @@
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
+#define PD_GIOV_MASK (1UL << 4) /* domain enable GIOV support */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -456,6 +461,8 @@ struct irq_remap_table {
/* Interrupt remapping feature used? */
extern bool amd_iommu_irq_remap;
+extern const struct iommu_ops amd_iommu_ops;
+
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
@@ -526,7 +533,8 @@ struct amd_io_pgtable {
struct io_pgtable iop;
int mode;
u64 *root;
- atomic64_t pt_root; /* pgtable root and pgtable mode */
+ atomic64_t pt_root; /* pgtable root and pgtable mode */
+ u64 *pgd; /* v2 pgtable pgd pointer */
};
/*
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index fdc642362c14..1a2d425bf568 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -95,8 +95,6 @@
* out of it.
*/
-extern const struct iommu_ops amd_iommu_ops;
-
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
@@ -2068,6 +2066,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
init_iommu_perf_ctr(iommu);
+ if (amd_iommu_pgtable == AMD_IOMMU_V2) {
+ if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
+ !iommu_feature(iommu, FEATURE_GT)) {
+ pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (iommu_default_passthrough()) {
+ pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+ }
+
if (is_rd890_iommu(iommu->dev)) {
int i, j;
@@ -2146,6 +2155,8 @@ static void print_iommu_info(void)
if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
pr_info("X2APIC enabled\n");
}
+ if (amd_iommu_pgtable == AMD_IOMMU_V2)
+ pr_info("V2 page table enabled\n");
}
static int __init amd_iommu_init_pci(void)
@@ -2168,20 +2179,13 @@ static int __init amd_iommu_init_pci(void)
/*
* Order is important here to make sure any unity map requirements are
* fulfilled. The unity mappings are created and written to the device
- * table during the amd_iommu_init_api() call.
+ * table during the iommu_init_pci() call.
*
* After that we call init_device_table_dma() to make sure any
* uninitialized DTE will block DMA, and in the end we flush the caches
* of all IOMMUs to make sure the changes to the device table are
* active.
*/
- ret = amd_iommu_init_api();
- if (ret) {
- pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
- ret);
- goto out;
- }
-
for_each_pci_segment(pci_seg)
init_device_table_dma(pci_seg);
@@ -3366,17 +3370,30 @@ static int __init parse_amd_iommu_intr(char *str)
static int __init parse_amd_iommu_options(char *str)
{
- for (; *str; ++str) {
+ if (!str)
+ return -EINVAL;
+
+ while (*str) {
if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
iommu_set_dma_strict();
- }
- if (strncmp(str, "force_enable", 12) == 0)
+ } else if (strncmp(str, "force_enable", 12) == 0) {
amd_iommu_force_enable = true;
- if (strncmp(str, "off", 3) == 0)
+ } else if (strncmp(str, "off", 3) == 0) {
amd_iommu_disabled = true;
- if (strncmp(str, "force_isolation", 15) == 0)
+ } else if (strncmp(str, "force_isolation", 15) == 0) {
amd_iommu_force_isolation = true;
+ } else if (strncmp(str, "pgtbl_v1", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+
+ str += strcspn(str, ",");
+ while (*str == ',')
+ str++;
}
return 1;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 7d4b61e5db47..ace0e9b8b913 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
* supporting all features of AMD IOMMU page tables like level skipping
* and full 64 bit address spaces.
*/
-static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
LIST_HEAD(freelist);
@@ -369,39 +370,47 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
u64 __pte, *pte;
int ret, i, count;
- BUG_ON(!IS_ALIGNED(iova, size));
- BUG_ON(!IS_ALIGNED(paddr, size));
+ BUG_ON(!IS_ALIGNED(iova, pgsize));
+ BUG_ON(!IS_ALIGNED(paddr, pgsize));
ret = -EINVAL;
if (!(prot & IOMMU_PROT_MASK))
goto out;
- count = PAGE_SIZE_PTE_COUNT(size);
- pte = alloc_pte(dom, iova, size, NULL, gfp, &updated);
+ while (pgcount > 0) {
+ count = PAGE_SIZE_PTE_COUNT(pgsize);
+ pte = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
- ret = -ENOMEM;
- if (!pte)
- goto out;
+ ret = -ENOMEM;
+ if (!pte)
+ goto out;
- for (i = 0; i < count; ++i)
- free_clear_pte(&pte[i], pte[i], &freelist);
+ for (i = 0; i < count; ++i)
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (!list_empty(&freelist))
- updated = true;
+ if (!list_empty(&freelist))
+ updated = true;
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ if (count > 1) {
+ __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ } else
+ __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ iova += pgsize;
+ paddr += pgsize;
+ pgcount--;
+ if (mapped)
+ *mapped += pgsize;
+ }
ret = 0;
@@ -426,17 +435,18 @@ out:
return ret;
}
-static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t size,
- struct iommu_iotlb_gather *gather)
+static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
+ size_t size = pgcount << __ffs(pgsize);
- BUG_ON(!is_power_of_2(size));
+ BUG_ON(!is_power_of_2(pgsize));
unmapped = 0;
@@ -448,14 +458,14 @@ static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
+ } else {
+ return unmapped;
}
iova = (iova & ~(unmap_size - 1)) + unmap_size;
unmapped += unmap_size;
}
- BUG_ON(unmapped && !is_power_of_2(unmapped));
-
return unmapped;
}
@@ -514,8 +524,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
cfg->tlb = &v1_flush_ops;
- pgtable->iop.ops.map = iommu_v1_map_page;
- pgtable->iop.ops.unmap = iommu_v1_unmap_page;
+ pgtable->iop.ops.map_pages = iommu_v1_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
return &pgtable->iop;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
new file mode 100644
index 000000000000..8638ddf6fb3b
--- /dev/null
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic AMD IO page table v2 allocator.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+ * Author: Vasant Hegde <vasant.hegde@amd.com>
+ */
+
+#define pr_fmt(fmt) "AMD-Vi: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+
+#include <asm/barrier.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu.h"
+
+#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
+#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
+#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
+#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
+#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
+#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
+#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
+#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
+#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
+
+#define MAX_PTRS_PER_PAGE 512
+
+#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
+#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
+
+
+static inline int get_pgtable_level(void)
+{
+ /* 5 level page table is not supported */
+ return PAGE_MODE_4_LEVEL;
+}
+
+static inline bool is_large_pte(u64 pte)
+{
+ return (pte & IOMMU_PAGE_PSE);
+}
+
+static inline void *alloc_pgtable_page(void)
+{
+ return (void *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline u64 set_pgtable_attr(u64 *page)
+{
+ u64 prot;
+
+ prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
+ prot |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ return (iommu_virt_to_phys(page) | prot);
+}
+
+static inline void *get_pgtable_pte(u64 pte)
+{
+ return iommu_phys_to_virt(pte & PM_ADDR_MASK);
+}
+
+static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
+{
+ u64 pte;
+
+ pte = __sme_set(paddr & PM_ADDR_MASK);
+ pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
+ pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
+
+ if (prot & IOMMU_PROT_IW)
+ pte |= IOMMU_PAGE_RW;
+
+ /* Large page */
+ if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
+ pte |= IOMMU_PAGE_PSE;
+
+ return pte;
+}
+
+static inline u64 get_alloc_page_size(u64 size)
+{
+ if (size >= IOMMU_PAGE_SIZE_1G)
+ return IOMMU_PAGE_SIZE_1G;
+
+ if (size >= IOMMU_PAGE_SIZE_2M)
+ return IOMMU_PAGE_SIZE_2M;
+
+ return PAGE_SIZE;
+}
+
+static inline int page_size_to_level(u64 pg_size)
+{
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ return PAGE_MODE_3_LEVEL;
+ if (pg_size == IOMMU_PAGE_SIZE_2M)
+ return PAGE_MODE_2_LEVEL;
+
+ return PAGE_MODE_1_LEVEL;
+}
+
+static inline void free_pgtable_page(u64 *pt)
+{
+ free_page((unsigned long)pt);
+}
+
+static void free_pgtable(u64 *pt, int level)
+{
+ u64 *p;
+ int i;
+
+ for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
+
+ if (is_large_pte(pt[i]))
+ continue;
+
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = get_pgtable_pte(pt[i]);
+ if (level > 2)
+ free_pgtable(p, level - 1);
+ else
+ free_pgtable_page(p);
+ }
+
+ free_pgtable_page(pt);
+}
+
+/* Allocate page table */
+static u64 *v2_alloc_pte(u64 *pgd, unsigned long iova,
+ unsigned long pg_size, bool *updated)
+{
+ u64 *pte, *page;
+ int level, end_level;
+
+ level = get_pgtable_level() - 1;
+ end_level = page_size_to_level(pg_size);
+ pte = &pgd[PM_LEVEL_INDEX(level, iova)];
+ iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
+
+ while (level >= end_level) {
+ u64 __pte, __npte;
+
+ __pte = *pte;
+
+ if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
+ /* Unmap large pte */
+ cmpxchg64(pte, *pte, 0ULL);
+ *updated = true;
+ continue;
+ }
+
+ if (!IOMMU_PTE_PRESENT(__pte)) {
+ page = alloc_pgtable_page();
+ if (!page)
+ return NULL;
+
+ __npte = set_pgtable_attr(page);
+ /* pte could have been changed somewhere. */
+ if (cmpxchg64(pte, __pte, __npte) != __pte)
+ free_pgtable_page(page);
+ else if (IOMMU_PTE_PRESENT(__pte))
+ *updated = true;
+
+ continue;
+ }
+
+ level -= 1;
+ pte = get_pgtable_pte(__pte);
+ pte = &pte[PM_LEVEL_INDEX(level, iova)];
+ }
+
+ /* Tear down existing pte entries */
+ if (IOMMU_PTE_PRESENT(*pte)) {
+ u64 *__pte;
+
+ *updated = true;
+ __pte = get_pgtable_pte(*pte);
+ cmpxchg64(pte, *pte, 0ULL);
+ if (pg_size == IOMMU_PAGE_SIZE_1G)
+ free_pgtable(__pte, end_level - 1);
+ else if (pg_size == IOMMU_PAGE_SIZE_2M)
+ free_pgtable_page(__pte);
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address.
+ * If there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
+ unsigned long iova, unsigned long *page_size)
+{
+ u64 *pte;
+ int level;
+
+ level = get_pgtable_level() - 1;
+ pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
+ /* Default page size is 4K */
+ *page_size = PAGE_SIZE;
+
+ while (level) {
+ /* Not present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Walk to the next level */
+ pte = get_pgtable_pte(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
+
+ /* Large page */
+ if (is_large_pte(*pte)) {
+ if (level == PAGE_MODE_3_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_1G;
+ else if (level == PAGE_MODE_2_LEVEL)
+ *page_size = IOMMU_PAGE_SIZE_2M;
+ else
+ return NULL; /* Wrongly set PSE bit in PTE */
+
+ break;
+ }
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
+ struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
+ u64 *pte;
+ unsigned long map_size;
+ unsigned long mapped_size = 0;
+ unsigned long o_iova = iova;
+ size_t size = pgcount << __ffs(pgsize);
+ int count = 0;
+ int ret = 0;
+ bool updated = false;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
+ return -EINVAL;
+
+ if (!(prot & IOMMU_PROT_MASK))
+ return -EINVAL;
+
+ while (mapped_size < size) {
+ map_size = get_alloc_page_size(pgsize);
+ pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated);
+ if (!pte) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *pte = set_pte_attr(paddr, map_size, prot);
+
+ count++;
+ iova += map_size;
+ paddr += map_size;
+ mapped_size += map_size;
+ }
+
+out:
+ if (updated) {
+ if (count > 1)
+ amd_iommu_flush_tlb(&pdom->domain, 0);
+ else
+ amd_iommu_flush_page(&pdom->domain, 0, o_iova);
+ }
+
+ if (mapped)
+ *mapped += mapped_size;
+
+ return ret;
+}
+
+static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
+ unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
+ unsigned long unmap_size;
+ unsigned long unmapped = 0;
+ size_t size = pgcount << __ffs(pgsize);
+ u64 *pte;
+
+ if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
+ return 0;
+
+ while (unmapped < size) {
+ pte = fetch_pte(pgtable, iova, &unmap_size);
+ if (!pte)
+ return unmapped;
+
+ *pte = 0ULL;
+
+ iova = (iova & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ return unmapped;
+}
+
+static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+ unsigned long offset_mask, pte_pgsize;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ offset_mask = pte_pgsize - 1;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
+
+ return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
+/*
+ * ----------------------------------------------------
+ */
+static void v2_tlb_flush_all(void *cookie)
+{
+}
+
+static void v2_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+}
+
+static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+}
+
+static const struct iommu_flush_ops v2_flush_ops = {
+ .tlb_flush_all = v2_tlb_flush_all,
+ .tlb_flush_walk = v2_tlb_flush_walk,
+ .tlb_add_page = v2_tlb_add_page,
+};
+
+static void v2_free_pgtable(struct io_pgtable *iop)
+{
+ struct protection_domain *pdom;
+ struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
+
+ pdom = container_of(pgtable, struct protection_domain, iop);
+ if (!(pdom->flags & PD_IOMMUV2_MASK))
+ return;
+
+ /*
+ * Make changes visible to IOMMUs. No need to clear gcr3 entry
+ * as gcr3 table is already freed.
+ */
+ amd_iommu_domain_update(pdom);
+
+ /* Free page table */
+ free_pgtable(pgtable->pgd, get_pgtable_level());
+}
+
+static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
+ struct protection_domain *pdom = (struct protection_domain *)cookie;
+ int ret;
+
+ pgtable->pgd = alloc_pgtable_page();
+ if (!pgtable->pgd)
+ return NULL;
+
+ ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
+ if (ret)
+ goto err_free_pgd;
+
+ pgtable->iop.ops.map_pages = iommu_v2_map_pages;
+ pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
+ pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
+
+ cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
+ cfg->ias = IOMMU_IN_ADDR_BIT_SIZE,
+ cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
+ cfg->tlb = &v2_flush_ops;
+
+ return &pgtable->iop;
+
+err_free_pgd:
+ free_pgtable_page(pgtable->pgd);
+
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
+ .alloc = v2_alloc_pgtable,
+ .free = v2_free_pgtable,
+};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 65b8e4fd8217..65856e401949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -11,8 +11,6 @@
#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <linux/amba/bus.h>
-#include <linux/platform_device.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -20,7 +18,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
@@ -42,6 +39,7 @@
#include <asm/dma.h>
#include "amd_iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -66,10 +64,6 @@ LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
-/*
- * Domain for untranslated devices - only allocated
- * if iommu=pt passed on kernel cmd line.
- */
const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -85,6 +79,7 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static int domain_enable_v2(struct protection_domain *domain, int pasids);
/****************************************************************************
*
@@ -939,7 +934,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = data;
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
@@ -1596,6 +1592,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
flags |= tmp;
+
+ if (domain->flags & PD_GIOV_MASK)
+ pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
@@ -1649,6 +1648,10 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Override supported page sizes */
+ if (domain->flags & PD_GIOV_MASK)
+ domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+
/* Update device table */
set_dte_entry(iommu, dev_data->devid, domain,
ats, dev_data->iommu_v2);
@@ -1693,7 +1696,7 @@ static void pdev_iommuv2_disable(struct pci_dev *pdev)
pci_disable_pasid(pdev);
}
-static int pdev_iommuv2_enable(struct pci_dev *pdev)
+static int pdev_pri_ats_enable(struct pci_dev *pdev)
{
int ret;
@@ -1756,11 +1759,19 @@ static int attach_device(struct device *dev,
struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
ret = -EINVAL;
- if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
+
+ /*
+ * In case of using AMD_IOMMU_V1 page table mode and the device
+ * is enabling for PPR/ATS support (using v2 table),
+ * we need to make sure that the domain type is identity map.
+ */
+ if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ def_domain->type != IOMMU_DOMAIN_IDENTITY) {
goto out;
+ }
if (dev_data->iommu_v2) {
- if (pdev_iommuv2_enable(pdev) != 0)
+ if (pdev_pri_ats_enable(pdev) != 0)
goto out;
dev_data->ats.enabled = true;
@@ -1851,6 +1862,10 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
if (!iommu)
return ERR_PTR(-ENODEV);
+ /* Not registered yet? */
+ if (!iommu->iommu.ops)
+ return ERR_PTR(-ENODEV);
+
if (dev_iommu_priv_get(dev))
return &iommu->iommu;
@@ -1937,25 +1952,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_domain_flush_complete(domain);
}
-int __init amd_iommu_init_api(void)
-{
- int err;
-
- err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-#ifdef CONFIG_ARM_AMBA
- err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
- if (err)
- return err;
-#endif
- err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
- if (err)
- return err;
-
- return 0;
-}
-
/*****************************************************************************
*
* The following functions belong to the exported interface of AMD IOMMU
@@ -1988,12 +1984,12 @@ static void protection_domain_free(struct protection_domain *domain)
if (!domain)
return;
- if (domain->id)
- domain_id_free(domain->id);
-
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
+ if (domain->id)
+ domain_id_free(domain->id);
+
kfree(domain);
}
@@ -2011,8 +2007,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
if (mode != PAGE_MODE_NONE) {
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
+ if (!pt_root) {
+ domain_id_free(domain->id);
return -ENOMEM;
+ }
}
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2020,6 +2018,24 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return 0;
}
+static int protection_domain_init_v2(struct protection_domain *domain)
+{
+ spin_lock_init(&domain->lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ domain->flags |= PD_GIOV_MASK;
+
+ if (domain_enable_v2(domain, 1)) {
+ domain_id_free(domain->id);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
@@ -2047,6 +2063,9 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
break;
+ case AMD_IOMMU_V2:
+ ret = protection_domain_init_v2(domain);
+ break;
default:
ret = -EINVAL;
}
@@ -2055,8 +2074,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
goto out_err;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
- if (!pgtbl_ops)
+ if (!pgtbl_ops) {
+ domain_id_free(domain->id);
goto out_err;
+ }
return domain;
out_err:
@@ -2174,13 +2195,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
- if (ops->map)
+ if (ops->map_pages)
domain_flush_np_cache(domain, iova, size);
}
-static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot,
- gfp_t gfp)
+static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2196,8 +2217,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map)
- ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
+ if (ops->map_pages) {
+ ret = ops->map_pages(ops, iova, paddr, pgsize,
+ pgcount, prot, gfp, mapped);
+ }
return ret;
}
@@ -2223,9 +2246,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
iommu_iotlb_gather_add_range(gather, iova, size);
}
-static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- size_t page_size,
- struct iommu_iotlb_gather *gather)
+static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2235,9 +2258,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
- r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+ r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
- amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+ if (r)
+ amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
return r;
}
@@ -2251,7 +2275,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return ops->iova_to_phys(ops, iova);
}
-static bool amd_iommu_capable(enum iommu_cap cap)
+static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -2399,8 +2423,8 @@ const struct iommu_ops amd_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .unmap = amd_iommu_unmap,
+ .map_pages = amd_iommu_map_pages,
+ .unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -2447,11 +2471,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
}
EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int domain_enable_v2(struct protection_domain *domain, int pasids)
{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int levels, ret;
+ int levels;
/* Number of GCR3 table levels required */
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
@@ -2460,7 +2483,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- spin_lock_irqsave(&domain->lock, flags);
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ return -ENOMEM;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+
+ amd_iommu_domain_update(domain);
+
+ return 0;
+}
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *pdom = to_pdomain(dom);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdom->lock, flags);
/*
* Save us all sanity checks whether devices already in the
@@ -2468,24 +2509,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
* devices attached when it is switched into IOMMUv2 mode.
*/
ret = -EBUSY;
- if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
goto out;
- ret = -ENOMEM;
- domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
- goto out;
-
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
-
- amd_iommu_domain_update(domain);
-
- ret = 0;
+ if (!pdom->gcr3_tbl)
+ ret = domain_enable_v2(pdom, pasids);
out:
- spin_unlock_irqrestore(&domain->lock, flags);
-
+ spin_unlock_irqrestore(&pdom->lock, flags);
return ret;
}
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 696d5555be57..6a1f02c62dff 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL)
goto out_free_states;
+ /* See iommu_is_default_domain() */
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 1b1725759262..4526575b999e 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dev_printk.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -33,6 +32,8 @@
#include <linux/swab.h>
#include <linux/types.h>
+#include "dma-iommu.h"
+
#define DART_MAX_STREAMS 16
#define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2
@@ -81,10 +82,16 @@
#define DART_TTBR_VALID BIT(31)
#define DART_TTBR_SHIFT 12
+struct apple_dart_hw {
+ u32 oas;
+ enum io_pgtable_fmt fmt;
+};
+
/*
* Private structure associated with each DART device.
*
* @dev: device struct
+ * @hw: SoC-specific hardware data
* @regs: mapped MMIO region
* @irq: interrupt number, can be shared with other DARTs
* @clks: clocks associated with this DART
@@ -98,6 +105,7 @@
*/
struct apple_dart {
struct device *dev;
+ const struct apple_dart_hw *hw;
void __iomem *regs;
@@ -421,13 +429,13 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize,
.ias = 32,
- .oas = 36,
+ .oas = dart->hw->oas,
.coherent_walk = 1,
.iommu_dev = dart->dev,
};
dart_domain->pgtbl_ops =
- alloc_io_pgtable_ops(APPLE_DART, &pgtbl_cfg, domain);
+ alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
if (!dart_domain->pgtbl_ops) {
ret = -ENOMEM;
goto done;
@@ -820,27 +828,6 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int apple_dart_set_bus_ops(const struct iommu_ops *ops)
-{
- int ret;
-
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, ops);
- if (ret)
- return ret;
- }
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, ops);
- if (ret) {
- bus_set_iommu(&platform_bus_type, NULL);
- return ret;
- }
- }
-#endif
- return 0;
-}
-
static int apple_dart_probe(struct platform_device *pdev)
{
int ret;
@@ -854,6 +841,7 @@ static int apple_dart_probe(struct platform_device *pdev)
return -ENOMEM;
dart->dev = dev;
+ dart->hw = of_device_get_match_data(dev);
spin_lock_init(&dart->lock);
dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -895,14 +883,10 @@ static int apple_dart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dart);
- ret = apple_dart_set_bus_ops(&apple_dart_iommu_ops);
- if (ret)
- goto err_free_irq;
-
ret = iommu_device_sysfs_add(&dart->iommu, dev, NULL, "apple-dart.%s",
dev_name(&pdev->dev));
if (ret)
- goto err_remove_bus_ops;
+ goto err_free_irq;
ret = iommu_device_register(&dart->iommu, &apple_dart_iommu_ops, dev);
if (ret)
@@ -916,8 +900,6 @@ static int apple_dart_probe(struct platform_device *pdev)
err_sysfs_remove:
iommu_device_sysfs_remove(&dart->iommu);
-err_remove_bus_ops:
- apple_dart_set_bus_ops(NULL);
err_free_irq:
free_irq(dart->irq, dart);
err_clk_disable:
@@ -932,7 +914,6 @@ static int apple_dart_remove(struct platform_device *pdev)
apple_dart_hw_reset(dart);
free_irq(dart->irq, dart);
- apple_dart_set_bus_ops(NULL);
iommu_device_unregister(&dart->iommu);
iommu_device_sysfs_remove(&dart->iommu);
@@ -942,8 +923,18 @@ static int apple_dart_remove(struct platform_device *pdev)
return 0;
}
+static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ .oas = 36,
+ .fmt = APPLE_DART,
+};
+static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ .oas = 42,
+ .fmt = APPLE_DART2,
+};
+
static const struct of_device_id apple_dart_of_match[] = {
- { .compatible = "apple,t8103-dart", .data = NULL },
+ { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
};
MODULE_DEVICE_TABLE(of, apple_dart_of_match);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 1ef7bbb4acf3..5968a568aae2 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
}
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
cd->ttbr = virt_to_phys(mm->pgd);
@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
* addresses larger than what we support.
*/
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
oas = id_aa64mmfr0_parange_to_phys_shift(fld);
if (smmu->oas < oas)
return false;
/* We can support bigger ASIDs than the CPU, but not smaller */
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
asid_bits = fld ? 16 : 8;
if (smmu->asid_bits < asid_bits)
return false;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index d32b02336411..ba47c73f5b8c 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -14,7 +14,6 @@
#include <linux/bitops.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io-pgtable.h>
@@ -28,9 +27,8 @@
#include <linux/pci-ats.h>
#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-
#include "arm-smmu-v3.h"
+#include "../../dma-iommu.h"
#include "../../iommu-sva-lib.h"
static bool disable_bypass = true;
@@ -1992,11 +1990,14 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
};
/* IOMMU API */
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return master->smmu->features & ARM_SMMU_FEAT_COHERENCY;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -2817,6 +2818,26 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
}
}
+/*
+ * HiSilicon PCIe tune and trace device can be used to trace TLP headers on the
+ * PCIe link and save the data to memory by DMA. The hardware is restricted to
+ * use identity mapping only.
+ */
+#define IS_HISI_PTT_DEVICE(pdev) ((pdev)->vendor == PCI_VENDOR_ID_HUAWEI && \
+ (pdev)->device == 0xa12e)
+
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (IS_HISI_PTT_DEVICE(pdev))
+ return IOMMU_DOMAIN_IDENTITY;
+ }
+
+ return 0;
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -2831,6 +2852,7 @@ static struct iommu_ops arm_smmu_ops = {
.sva_unbind = arm_smmu_sva_unbind,
.sva_get_pasid = arm_smmu_sva_get_pasid,
.page_response = arm_smmu_page_response,
+ .def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
@@ -3673,43 +3695,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
return SZ_128K;
}
-static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
-{
- int err;
-
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- return err;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != ops) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- if (platform_bus_type.iommu_ops != ops) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-
- return 0;
-
-err_reset_amba_ops:
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
- return err;
-}
-
static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
resource_size_t size)
{
@@ -3848,27 +3833,17 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ret;
}
- ret = arm_smmu_set_bus_ops(&arm_smmu_ops);
- if (ret)
- goto err_unregister_device;
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return ret;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
- arm_smmu_set_bus_ops(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dfa82df00342..6c1114a4d6cc 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -21,7 +21,6 @@
#include <linux/acpi_iort.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -37,10 +36,10 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
-#include <linux/amba/bus.h>
#include <linux/fsl/mc.h>
#include "arm-smmu.h"
+#include "../../dma-iommu.h"
/*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
@@ -93,8 +92,6 @@ static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
-static int arm_smmu_bus_init(struct iommu_ops *ops);
-
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@@ -180,20 +177,6 @@ static int arm_smmu_register_legacy_master(struct device *dev,
kfree(sids);
return err;
}
-
-/*
- * With the legacy DT binding in play, we have no guarantees about
- * probe order, but then we're also not doing default domains, so we can
- * delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no probe_device() calls get missed.
- */
-static int arm_smmu_legacy_bus_init(void)
-{
- if (using_legacy_binding)
- return arm_smmu_bus_init(&arm_smmu_ops);
- return 0;
-}
-device_initcall_sync(arm_smmu_legacy_bus_init);
#else
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
@@ -1330,15 +1313,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova);
}
-static bool arm_smmu_capable(enum iommu_cap cap)
+static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- /*
- * Return true here as the SMMU can always send out coherent
- * requests.
- */
- return true;
+ /* Assume that a coherent TCU implies coherent TBUs */
+ return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -2016,52 +1998,6 @@ static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
return 0;
}
-static int arm_smmu_bus_init(struct iommu_ops *ops)
-{
- int err;
-
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type)) {
- err = bus_set_iommu(&platform_bus_type, ops);
- if (err)
- return err;
- }
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype)) {
- err = bus_set_iommu(&amba_bustype, ops);
- if (err)
- goto err_reset_platform_ops;
- }
-#endif
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- err = bus_set_iommu(&pci_bus_type, ops);
- if (err)
- goto err_reset_amba_ops;
- }
-#endif
-#ifdef CONFIG_FSL_MC_BUS
- if (!iommu_present(&fsl_mc_bus_type)) {
- err = bus_set_iommu(&fsl_mc_bus_type, ops);
- if (err)
- goto err_reset_pci_ops;
- }
-#endif
- return 0;
-
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
-err_reset_amba_ops: __maybe_unused;
-#ifdef CONFIG_ARM_AMBA
- bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_platform_ops: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
- return err;
-}
-
static void arm_smmu_rmr_install_bypass_smr(struct arm_smmu_device *smmu)
{
struct list_head rmr_list;
@@ -2226,7 +2162,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev);
if (err) {
dev_err(dev, "Failed to register iommu\n");
- goto err_sysfs_remove;
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return err;
}
platform_set_drvdata(pdev, smmu);
@@ -2248,24 +2185,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
}
- /*
- * For ACPI and generic DT bindings, an SMMU will be probed before
- * any device which might need it, so we want the bus ops in place
- * ready to handle default domain setup as soon as any SMMU exists.
- */
- if (!using_legacy_binding) {
- err = arm_smmu_bus_init(&arm_smmu_ops);
- if (err)
- goto err_unregister_device;
- }
-
return 0;
-
-err_unregister_device:
- iommu_device_unregister(&smmu->iommu);
-err_sysfs_remove:
- iommu_device_sysfs_remove(&smmu->iommu);
- return err;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
@@ -2278,7 +2198,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
dev_notice(&pdev->dev, "disabling translation\n");
- arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu);
iommu_device_sysfs_remove(&smmu->iommu);
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17235116d3bb..3869c3ecda8c 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -493,7 +493,7 @@ static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static bool qcom_iommu_capable(enum iommu_cap cap)
+static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -837,8 +837,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
-
if (qcom_iommu->local_base) {
pm_runtime_get_sync(dev);
writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
@@ -856,8 +854,6 @@ static int qcom_iommu_device_remove(struct platform_device *pdev)
{
struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
- bus_set_iommu(&platform_bus_type, NULL);
-
pm_runtime_force_suspend(&pdev->dev);
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&qcom_iommu->iommu);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 17dd683b2fce..9297b741f5e8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -13,7 +13,6 @@
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
@@ -30,6 +29,8 @@
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
+#include "dma-iommu.h"
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -1633,6 +1634,13 @@ out_free_page:
return NULL;
}
+/**
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
+ * @desc: MSI descriptor, will store the MSI page
+ * @msi_addr: MSI target address to be mapped
+ *
+ * Return: 0 on success or negative error code if the mapping failed.
+ */
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
@@ -1661,8 +1669,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
return 0;
}
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
+/**
+ * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
+ * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
+ * @msg: MSI message containing target physical address
+ */
+void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
new file mode 100644
index 000000000000..942790009292
--- /dev/null
+++ b/drivers/iommu/dma-iommu.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014-2015 ARM Ltd.
+ */
+#ifndef __DMA_IOMMU_H
+#define __DMA_IOMMU_H
+
+#include <linux/iommu.h>
+
+#ifdef CONFIG_IOMMU_DMA
+
+int iommu_get_dma_cookie(struct iommu_domain *domain);
+void iommu_put_dma_cookie(struct iommu_domain *domain);
+
+int iommu_dma_init_fq(struct iommu_domain *domain);
+
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+
+extern bool iommu_dma_forcedac;
+
+#else /* CONFIG_IOMMU_DMA */
+
+static inline int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
+#endif /* CONFIG_IOMMU_DMA */
+#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 8e18984a0c4f..45fd4850bacb 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1446,16 +1446,7 @@ static int __init exynos_iommu_init(void)
goto err_zero_lv2;
}
- ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
- if (ret) {
- pr_err("%s: Failed to register exynos-iommu driver.\n",
- __func__);
- goto err_set_iommu;
- }
-
return 0;
-err_set_iommu:
- kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
err_zero_lv2:
platform_driver_unregister(&exynos_sysmmu_driver);
err_reg_driver:
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 011f9ab7f743..fa20f4b03e12 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -178,7 +178,7 @@ static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
return iova;
}
-static bool fsl_pamu_capable(enum iommu_cap cap)
+static bool fsl_pamu_capable(struct device *dev, enum iommu_cap cap)
{
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
@@ -476,11 +476,7 @@ int __init pamu_domain_init(void)
if (ret) {
iommu_device_sysfs_remove(&pamu_iommu);
pr_err("Can't register iommu device\n");
- return ret;
}
- bus_set_iommu(&platform_bus_type, &fsl_pamu_ops);
- bus_set_iommu(&pci_bus_type, &fsl_pamu_ops);
-
return ret;
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 39a06d245f12..b7dff5092fd2 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -19,8 +19,9 @@ config INTEL_IOMMU
select DMAR_TABLE
select SWIOTLB
select IOASID
- select IOMMU_DMA
select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -48,10 +49,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on X86_64
- select PCI_PASID
- select PCI_PRI
select MMU_NOTIFIER
- select IOASID
select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
diff --git a/drivers/iommu/intel/cap_audit.c b/drivers/iommu/intel/cap_audit.c
index 3ee68393122f..806986696841 100644
--- a/drivers/iommu/intel/cap_audit.c
+++ b/drivers/iommu/intel/cap_audit.c
@@ -37,7 +37,7 @@ static inline void check_dmar_capabilities(struct intel_iommu *a,
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_MHMV_MASK);
MINIMAL_FEATURE_IOMMU(b, ecap, ECAP_IRO_MASK);
- CHECK_FEATURE_MISMATCH(a, b, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH(a, b, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH(a, b, cap, write_drain, CAP_WD_MASK);
@@ -84,7 +84,7 @@ static int cap_audit_hotplug(struct intel_iommu *iommu, enum cap_audit_type type
goto out;
}
- CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, 5lp_support, CAP_FL5LP_MASK);
+ CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl5lp_support, CAP_FL5LP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, fl1gp_support, CAP_FL1GP_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, read_drain, CAP_RD_MASK);
CHECK_FEATURE_MISMATCH_HOTPLUG(iommu, cap, write_drain, CAP_WD_MASK);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 7cca030a508e..a8b36c3fddf1 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -15,7 +15,6 @@
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
-#include <linux/dma-iommu.h>
#include <linux/dmi.h>
#include <linux/intel-svm.h>
#include <linux/memory.h>
@@ -26,6 +25,7 @@
#include <linux/tboot.h>
#include "iommu.h"
+#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
#include "pasid.h"
@@ -163,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_clear_pasid_enable(struct context_entry *context)
-{
- context->lo &= ~(1ULL << 11);
-}
-
-static inline bool context_pasid_enabled(struct context_entry *context)
-{
- return !!(context->lo & (1ULL << 11));
-}
-
-static inline void context_set_copied(struct context_entry *context)
-{
- context->hi |= (1ull << 3);
-}
-
-static inline bool context_copied(struct context_entry *context)
-{
- return !!(context->hi & (1ULL << 3));
-}
-
-static inline bool __context_present(struct context_entry *context)
-{
- return (context->lo & 1);
-}
-
-bool context_present(struct context_entry *context)
-{
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -231,6 +199,11 @@ static inline void context_set_domain_id(struct context_entry *context,
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
+static inline void context_set_pasid(struct context_entry *context)
+{
+ context->lo |= CONTEXT_PASIDE;
+}
+
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
@@ -242,6 +215,26 @@ static inline void context_clear_entry(struct context_entry *context)
context->hi = 0;
}
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -402,14 +395,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+ unsigned long fl_sagaw, sl_sagaw;
+
+ fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return sl_sagaw;
+
+ /* First level only. */
+ if (!ecap_slts(iommu->ecap))
+ return fl_sagaw;
+
+ return fl_sagaw & sl_sagaw;
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
+ sagaw = __iommu_calculate_sagaw(iommu);
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
@@ -505,8 +520,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@@ -518,7 +534,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@@ -578,6 +594,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
struct context_entry *context;
u64 *entry;
+ /*
+ * Except that the caller requested to allocate a new entry,
+ * returning a copied context entry makes no sense.
+ */
+ if (!alloc && context_copied(iommu, bus, devfn))
+ return NULL;
+
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
@@ -795,32 +818,11 @@ static void free_context_table(struct intel_iommu *iommu)
}
#ifdef CONFIG_DMAR_DEBUG
-static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
+static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+ u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
- struct device_domain_info *info;
- struct dma_pte *parent, *pte;
- struct dmar_domain *domain;
- struct pci_dev *pdev;
- int offset, level;
-
- pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
- if (!pdev)
- return;
-
- info = dev_iommu_priv_get(&pdev->dev);
- if (!info || !info->domain) {
- pr_info("device [%02x:%02x.%d] not probed\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- return;
- }
-
- domain = info->domain;
- level = agaw_to_level(domain->agaw);
- parent = domain->pgd;
- if (!parent) {
- pr_info("no page table setup\n");
- return;
- }
+ struct dma_pte *pte;
+ int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
@@ -847,9 +849,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
+ int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
- int i, dir_index, index;
+ struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
@@ -877,8 +880,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
- if (!sm_supported(iommu))
+ if (!sm_supported(iommu)) {
+ level = agaw_to_level(ctx_entry->hi & 7);
+ pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
+ }
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
@@ -905,8 +911,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
+ if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+ level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+ pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+ } else {
+ level = agaw_to_level((pte->val[0] >> 2) & 0x7);
+ pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
+ }
+
pgtable_walk:
- pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
+ pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
@@ -1225,6 +1239,13 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ /*
+ * Hardware invalidates all DMA remapping hardware translation
+ * caches as part of SRTP flow.
+ */
+ if (cap_esrtps(iommu->cap))
+ return;
+
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
if (sm_supported(iommu))
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
@@ -1341,23 +1362,21 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
}
static struct device_domain_info *
-iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
- u8 bus, u8 devfn)
+domain_lookup_dev_info(struct dmar_domain *domain,
+ struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct device_domain_info *info;
+ unsigned long flags;
- if (!iommu->qi)
- return NULL;
-
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- spin_unlock(&domain->lock);
- return info->ats_supported ? info : NULL;
+ spin_unlock_irqrestore(&domain->lock, flags);
+ return info;
}
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@@ -1366,8 +1385,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@@ -1375,10 +1395,10 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
-static void iommu_enable_dev_iotlb(struct device_domain_info *info)
+static void iommu_enable_pci_caps(struct device_domain_info *info)
{
struct pci_dev *pdev;
@@ -1401,7 +1421,6 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
info->pfsid = pci_dev_id(pf_pdev);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
the device if you enable PASID support after ATS support is
undefined. So always enable PASID support on devices which
@@ -1414,7 +1433,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
(info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
-#endif
+
if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
@@ -1437,16 +1456,16 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
info->ats_enabled = 0;
domain_update_iotlb(info->domain);
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
+
if (info->pri_enabled) {
pci_disable_pri(pdev);
info->pri_enabled = 0;
}
+
if (info->pasid_enabled) {
pci_disable_pasid(pdev);
info->pasid_enabled = 0;
}
-#endif
}
static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
@@ -1467,14 +1486,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1688,6 +1708,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
+ if (iommu->copied_tables) {
+ bitmap_free(iommu->copied_tables);
+ iommu->copied_tables = NULL;
+ }
+
/* free context mapping */
free_context_table(iommu);
@@ -1890,7 +1915,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
- iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+ domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
struct context_entry *context;
@@ -1913,7 +1938,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
ret = 0;
- if (context_present(context))
+ if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
/*
@@ -1925,7 +1950,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
- if (context_copied(context)) {
+ if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
@@ -1936,6 +1961,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
+
+ clear_context_copied(iommu, bus, devfn);
}
context_clear_entry(context);
@@ -1961,6 +1988,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_sm_dte(context);
if (info && info->pri_supported)
context_set_sm_pre(context);
+ if (info && info->pasid_supported)
+ context_set_pasid(context);
} else {
struct dma_pte *pgd = domain->pgd;
int agaw;
@@ -2018,7 +2047,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
} else {
iommu_flush_write_buffer(iommu);
}
- iommu_enable_dev_iotlb(info);
+ iommu_enable_pci_caps(info);
ret = 0;
@@ -2429,6 +2458,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
+ unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2440,9 +2470,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2684,32 +2714,14 @@ static int copy_context_table(struct intel_iommu *iommu,
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
+ if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
-
+ set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
@@ -2735,8 +2747,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
+ ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
+ new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
@@ -2747,6 +2759,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (new_ext != ext)
return -EINVAL;
+ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
+ if (!iommu->copied_tables)
+ return -ENOMEM;
+
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
@@ -3890,7 +3906,6 @@ static int __init probe_acpi_namespace_devices(void)
continue;
}
- pn->dev->bus->iommu_ops = &intel_iommu_ops;
ret = iommu_probe_device(pn->dev);
if (ret)
break;
@@ -4023,7 +4038,6 @@ int __init intel_iommu_init(void)
}
up_read(&dmar_global_lock);
- bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
@@ -4080,6 +4094,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4091,9 +4106,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@@ -4412,24 +4427,25 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock(&dmar_domain->lock);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
-static bool intel_iommu_capable(enum iommu_cap cap)
+static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return true;
@@ -4449,7 +4465,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
+ if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -4566,52 +4582,6 @@ static void intel_iommu_get_resv_regions(struct device *device,
list_add_tail(&reg->list, head);
}
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct context_entry *context;
- struct dmar_domain *domain;
- u64 ctx_lo;
- int ret;
-
- domain = info->domain;
- if (!domain)
- return -EINVAL;
-
- spin_lock(&iommu->lock);
- ret = -EINVAL;
- if (!info->pasid_supported)
- goto out;
-
- context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
- if (WARN_ON(!context))
- goto out;
-
- ctx_lo = context[0].lo;
-
- if (!(ctx_lo & CONTEXT_PASIDE)) {
- ctx_lo |= CONTEXT_PASIDE;
- context[0].lo = ctx_lo;
- wmb();
- iommu->flush.flush_context(iommu,
- domain_id_iommu(domain, iommu),
- PCI_DEVID(info->bus, info->devfn),
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
- }
-
- /* Enable PASID support in the device, if it wasn't already */
- if (!info->pasid_enabled)
- iommu_enable_dev_iotlb(info);
-
- ret = 0;
-
- out:
- spin_unlock(&iommu->lock);
-
- return ret;
-}
-
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{
if (dev_is_pci(dev))
@@ -4635,9 +4605,6 @@ static int intel_iommu_enable_sva(struct device *dev)
if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
return -ENODEV;
- if (intel_iommu_enable_pasid(iommu, dev))
- return -ENODEV;
-
if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
return -EINVAL;
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index fae45bbb0c7f..92023dff9513 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -146,7 +146,9 @@
/*
* Decoding Capability Register
*/
-#define cap_5lp_support(c) (((c) >> 60) & 1)
+#define cap_esrtps(c) (((c) >> 63) & 1)
+#define cap_esirtps(c) (((c) >> 62) & 1)
+#define cap_fl5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
@@ -197,7 +199,6 @@
#define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1)
-#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
@@ -265,7 +266,6 @@
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
@@ -579,6 +579,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
+ unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -587,6 +588,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ unsigned long prq_seq_number;
struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
@@ -701,6 +703,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
+static inline bool context_present(struct context_entry *context)
+{
+ return (context->lo & 1);
+}
+
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
@@ -737,7 +744,6 @@ extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -757,7 +763,6 @@ struct intel_svm_dev {
struct device *dev;
struct intel_iommu *iommu;
struct iommu_sva sva;
- unsigned long prq_seq_number;
u32 pasid;
int users;
u16 did;
@@ -784,7 +789,6 @@ static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 2e9683e970f8..5962bb5027d0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -494,7 +494,8 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
* Global invalidation of interrupt entry cache to make sure the
* hardware uses the new irq remapping table.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
}
static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
@@ -680,7 +681,8 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
* global invalidation of interrupt entry cache before disabling
* interrupt-remapping.
*/
- qi_global_iec(iommu);
+ if (!cap_esirtps(iommu->cap))
+ qi_global_iec(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flags);
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c5e7e8b020a5..c30ddac40ee5 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -392,16 +392,6 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
-/*
- * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
- * of a scalable mode PASID entry.
- */
-static inline void
-pasid_set_eafe(struct pasid_entry *pe)
-{
- pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
-}
-
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid)
@@ -529,7 +519,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
}
- if ((flags & PASID_FLAG_FL5LP) && !cap_5lp_support(iommu->cap)) {
+ if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name);
return -EINVAL;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8bcfb93dda56..7d08eb034f2d 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -49,23 +49,6 @@ static void *pasid_private_find(ioasid_t pasid)
}
static struct intel_svm_dev *
-svm_lookup_device_by_sid(struct intel_svm *svm, u16 sid)
-{
- struct intel_svm_dev *sdev = NULL, *t;
-
- rcu_read_lock();
- list_for_each_entry_rcu(t, &svm->devs, list) {
- if (t->sid == sid) {
- sdev = t;
- break;
- }
- }
- rcu_read_unlock();
-
- return sdev;
-}
-
-static struct intel_svm_dev *
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
{
struct intel_svm_dev *sdev = NULL, *t;
@@ -181,7 +164,7 @@ void intel_svm_check(struct intel_iommu *iommu)
}
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
- !cap_5lp_support(iommu->cap)) {
+ !cap_fl5lp_support(iommu->cap)) {
pr_err("%s SVM disabled, incompatible paging mode\n",
iommu->name);
return;
@@ -706,11 +689,10 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
static irqreturn_t prq_event_thread(int irq, void *d)
{
- struct intel_svm_dev *sdev = NULL;
struct intel_iommu *iommu = d;
- struct intel_svm *svm = NULL;
struct page_req_dsc *req;
int head, tail, handled;
+ struct pci_dev *pdev;
u64 address;
/*
@@ -730,8 +712,6 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("IOMMU: %s: Page request without PASID\n",
iommu->name);
bad_req:
- svm = NULL;
- sdev = NULL;
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
goto prq_advance;
}
@@ -758,34 +738,19 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- if (!svm || svm->pasid != req->pasid) {
- /*
- * It can't go away, because the driver is not permitted
- * to unbind the mm while any page faults are outstanding.
- */
- svm = pasid_private_find(req->pasid);
- if (IS_ERR_OR_NULL(svm) || (svm->flags & SVM_FLAG_SUPERVISOR_MODE))
- goto bad_req;
- }
-
- if (!sdev || sdev->sid != req->rid) {
- sdev = svm_lookup_device_by_sid(svm, req->rid);
- if (!sdev)
- goto bad_req;
- }
-
- sdev->prq_seq_number++;
-
+ pdev = pci_get_domain_bus_and_slot(iommu->segment,
+ PCI_BUS_NUM(req->rid),
+ req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (intel_svm_prq_report(iommu, sdev->dev, req))
+ if (!pdev || intel_svm_prq_report(iommu, &pdev->dev, req))
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- trace_prq_report(iommu, sdev->dev, req->qw_0, req->qw_1,
+ trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
req->priv_data[0], req->priv_data[1],
- sdev->prq_seq_number);
+ iommu->prq_seq_number++);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -881,8 +846,6 @@ int intel_svm_page_response(struct device *dev,
struct iommu_page_response *msg)
{
struct iommu_fault_page_request *prm;
- struct intel_svm_dev *sdev = NULL;
- struct intel_svm *svm = NULL;
struct intel_iommu *iommu;
bool private_present;
bool pasid_present;
@@ -901,8 +864,6 @@ int intel_svm_page_response(struct device *dev,
if (!msg || !evt)
return -EINVAL;
- mutex_lock(&pasid_mutex);
-
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@@ -919,12 +880,6 @@ int intel_svm_page_response(struct device *dev,
goto out;
}
- ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev);
- if (ret || !sdev) {
- ret = -ENODEV;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -954,6 +909,5 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
out:
- mutex_unlock(&pasid_mutex);
return ret;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 94ff319ae8ac..0ba817e86346 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -130,9 +130,6 @@
#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
-#define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
-#define APPLE_DART_PTE_PROT_NO_READ (1<<8)
-
/* IOPTE accessors */
#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
@@ -200,8 +197,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM));
- p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
- gfp | __GFP_ZERO, order);
+ p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
if (!p)
return NULL;
@@ -406,15 +402,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte;
- if (data->iop.fmt == APPLE_DART) {
- pte = 0;
- if (!(prot & IOMMU_WRITE))
- pte |= APPLE_DART_PTE_PROT_NO_WRITE;
- if (!(prot & IOMMU_READ))
- pte |= APPLE_DART_PTE_PROT_NO_READ;
- return pte;
- }
-
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_nG;
@@ -1107,52 +1094,6 @@ out_free_data:
return NULL;
}
-static struct io_pgtable *
-apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
-{
- struct arm_lpae_io_pgtable *data;
- int i;
-
- if (cfg->oas > 36)
- return NULL;
-
- data = arm_lpae_alloc_pgtable(cfg);
- if (!data)
- return NULL;
-
- /*
- * The table format itself always uses two levels, but the total VA
- * space is mapped by four separate tables, making the MMIO registers
- * an effective "level 1". For simplicity, though, we treat this
- * equivalently to LPAE stage 2 concatenation at level 2, with the
- * additional TTBRs each just pointing at consecutive pages.
- */
- if (data->start_level < 1)
- goto out_free_data;
- if (data->start_level == 1 && data->pgd_bits > 2)
- goto out_free_data;
- if (data->start_level > 1)
- data->pgd_bits = 0;
- data->start_level = 2;
- cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
- data->pgd_bits += data->bits_per_level;
-
- data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
- cfg);
- if (!data->pgd)
- goto out_free_data;
-
- for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
- cfg->apple_dart_cfg.ttbr[i] =
- virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
-
- return &data->iop;
-
-out_free_data:
- kfree(data);
- return NULL;
-}
-
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
@@ -1178,11 +1119,6 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.free = arm_lpae_free_pgtable,
};
-struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
- .alloc = apple_dart_alloc_pgtable,
- .free = arm_lpae_free_pgtable,
-};
-
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie __initdata;
@@ -1343,12 +1279,17 @@ static int __init arm_lpae_do_selftests(void)
};
int i, j, pass = 0, fail = 0;
+ struct device dev;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
.coherent_walk = true,
+ .iommu_dev = &dev,
};
+ /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
+ set_dev_node(&dev, NUMA_NO_NODE);
+
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(ias); ++j) {
cfg.pgsize_bitmap = pgsize[i];
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
new file mode 100644
index 000000000000..74b1ef2b96be
--- /dev/null
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Apple DART page table allocator.
+ *
+ * Copyright (C) 2022 The Asahi Linux Contributors
+ *
+ * Based on io-pgtable-arm.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "dart io-pgtable: " fmt
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+
+#define DART1_MAX_ADDR_BITS 36
+
+#define DART_MAX_TABLES 4
+#define DART_LEVELS 2
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct dart_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+#define DART_GRANULE(d) \
+ (sizeof(dart_iopte) << (d)->bits_per_level)
+#define DART_PTES_PER_TABLE(d) \
+ (DART_GRANULE(d) >> ilog2(sizeof(dart_iopte)))
+
+#define APPLE_DART_PTE_SUBPAGE_START GENMASK_ULL(63, 52)
+#define APPLE_DART_PTE_SUBPAGE_END GENMASK_ULL(51, 40)
+
+#define APPLE_DART1_PADDR_MASK GENMASK_ULL(35, 12)
+#define APPLE_DART2_PADDR_MASK GENMASK_ULL(37, 10)
+#define APPLE_DART2_PADDR_SHIFT (4)
+
+/* Apple DART1 protection bits */
+#define APPLE_DART1_PTE_PROT_NO_READ BIT(8)
+#define APPLE_DART1_PTE_PROT_NO_WRITE BIT(7)
+#define APPLE_DART1_PTE_PROT_SP_DIS BIT(1)
+
+/* Apple DART2 protection bits */
+#define APPLE_DART2_PTE_PROT_NO_READ BIT(3)
+#define APPLE_DART2_PTE_PROT_NO_WRITE BIT(2)
+#define APPLE_DART2_PTE_PROT_NO_CACHE BIT(1)
+
+/* marks PTE as valid */
+#define APPLE_DART_PTE_VALID BIT(0)
+
+/* IOPTE accessors */
+#define iopte_deref(pte, d) __va(iopte_to_paddr(pte, d))
+
+struct dart_io_pgtable {
+ struct io_pgtable iop;
+
+ int tbl_bits;
+ int bits_per_level;
+
+ void *pgd[DART_MAX_TABLES];
+};
+
+typedef u64 dart_iopte;
+
+
+static dart_iopte paddr_to_iopte(phys_addr_t paddr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte pte;
+
+ if (data->iop.fmt == APPLE_DART)
+ return paddr & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ pte = paddr >> APPLE_DART2_PADDR_SHIFT;
+ pte &= APPLE_DART2_PADDR_MASK;
+
+ return pte;
+}
+
+static phys_addr_t iopte_to_paddr(dart_iopte pte,
+ struct dart_io_pgtable *data)
+{
+ u64 paddr;
+
+ if (data->iop.fmt == APPLE_DART)
+ return pte & APPLE_DART1_PADDR_MASK;
+
+ /* format is APPLE_DART2 */
+ paddr = pte & APPLE_DART2_PADDR_MASK;
+ paddr <<= APPLE_DART2_PADDR_SHIFT;
+
+ return paddr;
+}
+
+static void *__dart_alloc_pages(size_t size, gfp_t gfp,
+ struct io_pgtable_cfg *cfg)
+{
+ int order = get_order(size);
+ struct page *p;
+
+ VM_BUG_ON((gfp & __GFP_HIGHMEM));
+ p = alloc_pages(gfp | __GFP_ZERO, order);
+ if (!p)
+ return NULL;
+
+ return page_address(p);
+}
+
+static int dart_init_pte(struct dart_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr,
+ dart_iopte prot, int num_entries,
+ dart_iopte *ptep)
+{
+ int i;
+ dart_iopte pte = prot;
+ size_t sz = data->iop.cfg.pgsize_bitmap;
+
+ for (i = 0; i < num_entries; i++)
+ if (ptep[i] & APPLE_DART_PTE_VALID) {
+ /* We require an unmap first */
+ WARN_ON(ptep[i] & APPLE_DART_PTE_VALID);
+ return -EEXIST;
+ }
+
+ /* subpage protection: always allow access to the entire page */
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
+ pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
+
+ pte |= APPLE_DART1_PTE_PROT_SP_DIS;
+ pte |= APPLE_DART_PTE_VALID;
+
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
+
+ return 0;
+}
+
+static dart_iopte dart_install_table(dart_iopte *table,
+ dart_iopte *ptep,
+ dart_iopte curr,
+ struct dart_io_pgtable *data)
+{
+ dart_iopte old, new;
+
+ new = paddr_to_iopte(__pa(table), data) | APPLE_DART_PTE_VALID;
+
+ /*
+ * Ensure the table itself is visible before its PTE can be.
+ * Whilst we could get away with cmpxchg64_release below, this
+ * doesn't have any ordering semantics when !CONFIG_SMP.
+ */
+ dma_wmb();
+
+ old = cmpxchg64_relaxed(ptep, curr, new);
+
+ return old;
+}
+
+static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+{
+ return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->tbl_bits) - 1);
+}
+
+static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+{
+
+ return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
+}
+
+static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+{
+ dart_iopte pte, *ptep;
+ int tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ if (!ptep)
+ return NULL;
+
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
+
+ /* Deref to get level 2 table */
+ return iopte_deref(pte, data);
+}
+
+static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
+ int prot)
+{
+ dart_iopte pte = 0;
+
+ if (data->iop.fmt == APPLE_DART) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART1_PTE_PROT_NO_READ;
+ }
+ if (data->iop.fmt == APPLE_DART2) {
+ if (!(prot & IOMMU_WRITE))
+ pte |= APPLE_DART2_PTE_PROT_NO_WRITE;
+ if (!(prot & IOMMU_READ))
+ pte |= APPLE_DART2_PTE_PROT_NO_READ;
+ if (!(prot & IOMMU_CACHE))
+ pte |= APPLE_DART2_PTE_PROT_NO_CACHE;
+ }
+
+ return pte;
+}
+
+static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int iommu_prot, gfp_t gfp, size_t *mapped)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ size_t tblsz = DART_GRANULE(data);
+ int ret = 0, tbl, num_entries, max_entries, map_idx_start;
+ dart_iopte pte, *cptep, *ptep;
+ dart_iopte prot;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap))
+ return -EINVAL;
+
+ if (WARN_ON(paddr >> cfg->oas))
+ return -ERANGE;
+
+ /* If no access, then nothing to do */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ tbl = dart_get_table(data, iova);
+
+ ptep = data->pgd[tbl];
+ ptep += dart_get_l1_index(data, iova);
+ pte = READ_ONCE(*ptep);
+
+ /* no L2 table present */
+ if (!pte) {
+ cptep = __dart_alloc_pages(tblsz, gfp, cfg);
+ if (!cptep)
+ return -ENOMEM;
+
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ free_pages((unsigned long)cptep, get_order(tblsz));
+
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
+
+ ptep = iopte_deref(pte, data);
+
+ /* install a leaf entries into L2 table */
+ prot = dart_prot_to_pte(data, iommu_prot);
+ map_idx_start = dart_get_l2_index(data, iova);
+ max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+ ptep += map_idx_start;
+ ret = dart_init_pte(data, iova, paddr, prot, num_entries, ptep);
+ if (!ret && mapped)
+ *mapped += num_entries * pgsize;
+
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ wmb();
+
+ return ret;
+}
+
+static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ int i = 0, num_entries, max_entries, unmap_idx_start;
+ dart_iopte pte, *ptep;
+
+ if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
+ return 0;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (WARN_ON(!ptep))
+ return 0;
+
+ unmap_idx_start = dart_get_l2_index(data, iova);
+ ptep += unmap_idx_start;
+
+ max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
+ num_entries = min_t(int, pgcount, max_entries);
+
+ while (i < num_entries) {
+ pte = READ_ONCE(*ptep);
+ if (WARN_ON(!pte))
+ break;
+
+ /* clear pte */
+ *ptep = 0;
+
+ if (!iommu_iotlb_gather_queued(gather))
+ io_pgtable_tlb_add_page(&data->iop, gather,
+ iova + i * pgsize, pgsize);
+
+ ptep++;
+ i++;
+ }
+
+ return i * pgsize;
+}
+
+static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ dart_iopte pte, *ptep;
+
+ ptep = dart_get_l2(data, iova);
+
+ /* Valid L2 IOPTE pointer? */
+ if (!ptep)
+ return 0;
+
+ ptep += dart_get_l2_index(data, iova);
+
+ pte = READ_ONCE(*ptep);
+ /* Found translation */
+ if (pte) {
+ iova &= (data->iop.cfg.pgsize_bitmap - 1);
+ return iopte_to_paddr(pte, data) | iova;
+ }
+
+ /* Ran out of page tables to walk */
+ return 0;
+}
+
+static struct dart_io_pgtable *
+dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
+{
+ struct dart_io_pgtable *data;
+ int tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
+
+ va_bits = cfg->ias - pg_shift;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
+ if ((1 << tbl_bits) > DART_MAX_TABLES)
+ return NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->tbl_bits = tbl_bits;
+ data->bits_per_level = bits_per_level;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map_pages = dart_map_pages,
+ .unmap_pages = dart_unmap_pages,
+ .iova_to_phys = dart_iova_to_phys,
+ };
+
+ return data;
+}
+
+static struct io_pgtable *
+apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+ struct dart_io_pgtable *data;
+ int i;
+
+ if (!cfg->coherent_walk)
+ return NULL;
+
+ if (cfg->oas != 36 && cfg->oas != 42)
+ return NULL;
+
+ if (cfg->ias > cfg->oas)
+ return NULL;
+
+ if (!(cfg->pgsize_bitmap == SZ_4K || cfg->pgsize_bitmap == SZ_16K))
+ return NULL;
+
+ data = dart_alloc_pgtable(cfg);
+ if (!data)
+ return NULL;
+
+ cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+
+ for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
+ data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
+ cfg);
+ if (!data->pgd[i])
+ goto out_free_data;
+ cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
+ }
+
+ return &data->iop;
+
+out_free_data:
+ while (--i >= 0)
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ kfree(data);
+ return NULL;
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ dart_iopte *ptep, *end;
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
+ ptep = data->pgd[i];
+ end = (void *)ptep + DART_GRANULE(data);
+
+ while (ptep != end) {
+ dart_iopte pte = *ptep++;
+
+ if (pte) {
+ unsigned long page =
+ (unsigned long)iopte_deref(pte, data);
+
+ free_pages(page, get_order(DART_GRANULE(data)));
+ }
+ }
+ free_pages((unsigned long)data->pgd[i],
+ get_order(DART_GRANULE(data)));
+ }
+
+ kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
+ .alloc = apple_dart_alloc_pgtable,
+ .free = apple_dart_free_pgtable,
+};
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index f4bfcef98297..b843fcd365d2 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -20,13 +20,17 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
[ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
+#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_DART
[APPLE_DART] = &io_pgtable_apple_dart_init_fns,
+ [APPLE_DART2] = &io_pgtable_apple_dart_init_fns,
#endif
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
#ifdef CONFIG_AMD_IOMMU
[AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
+ [AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
#endif
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 780fb7071577..4893c2429ca5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -6,8 +6,8 @@
#define pr_fmt(fmt) "iommu: " fmt
+#include <linux/amba/bus.h>
#include <linux/device.h>
-#include <linux/dma-iommu.h>
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/bug.h>
@@ -16,17 +16,21 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
+#include "dma-iommu.h"
+
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
@@ -75,6 +79,8 @@ static const char * const iommu_group_resv_type_string[] = {
#define IOMMU_CMD_LINE_DMA_API BIT(0)
#define IOMMU_CMD_LINE_STRICT BIT(1)
+static int iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data);
static int iommu_alloc_default_domain(struct iommu_group *group,
struct device *dev);
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -103,6 +109,22 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock);
+static struct bus_type * const iommu_buses[] = {
+ &platform_bus_type,
+#ifdef CONFIG_PCI
+ &pci_bus_type,
+#endif
+#ifdef CONFIG_ARM_AMBA
+ &amba_bustype,
+#endif
+#ifdef CONFIG_FSL_MC_BUS
+ &fsl_mc_bus_type,
+#endif
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+ &host1x_context_device_bus_type,
+#endif
+};
+
/*
* Use a function instead of an array here because the domain-type is a
* bit-field, so an array would waste memory.
@@ -126,6 +148,8 @@ static const char *iommu_domain_type_str(unsigned int t)
static int __init iommu_subsys_init(void)
{
+ struct notifier_block *nb;
+
if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
iommu_set_default_passthrough(false);
@@ -152,10 +176,27 @@ static int __init iommu_subsys_init(void)
(iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
"(set via kernel command line)" : "");
+ nb = kcalloc(ARRAY_SIZE(iommu_buses), sizeof(*nb), GFP_KERNEL);
+ if (!nb)
+ return -ENOMEM;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
+ nb[i].notifier_call = iommu_bus_notifier;
+ bus_register_notifier(iommu_buses[i], &nb[i]);
+ }
+
return 0;
}
subsys_initcall(iommu_subsys_init);
+static int remove_iommu_group(struct device *dev, void *data)
+{
+ if (dev->iommu && dev->iommu->iommu_dev == data)
+ iommu_release_device(dev);
+
+ return 0;
+}
+
/**
* iommu_device_register() - Register an IOMMU hardware instance
* @iommu: IOMMU handle for the instance
@@ -167,23 +208,42 @@ subsys_initcall(iommu_subsys_init);
int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops, struct device *hwdev)
{
+ int err = 0;
+
/* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL;
+ /*
+ * Temporarily enforce global restriction to a single driver. This was
+ * already the de-facto behaviour, since any possible combination of
+ * existing drivers would compete for at least the PCI or platform bus.
+ */
+ if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
+ return -EBUSY;
iommu->ops = ops;
if (hwdev)
- iommu->fwnode = hwdev->fwnode;
+ iommu->fwnode = dev_fwnode(hwdev);
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
- return 0;
+
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
+ iommu_buses[i]->iommu_ops = ops;
+ err = bus_iommu_probe(iommu_buses[i]);
+ }
+ if (err)
+ iommu_device_unregister(iommu);
+ return err;
}
EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu)
{
+ for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
+ bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
+
spin_lock(&iommu_device_lock);
list_del(&iommu->list);
spin_unlock(&iommu_device_lock);
@@ -654,7 +714,6 @@ struct iommu_group *iommu_group_alloc(void)
ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
NULL, "%d", group->id);
if (ret) {
- ida_free(&iommu_group_ida, group->id);
kobject_put(&group->kobj);
return ERR_PTR(ret);
}
@@ -1612,13 +1671,6 @@ static int probe_iommu_group(struct device *dev, void *data)
return ret;
}
-static int remove_iommu_group(struct device *dev, void *data)
-{
- iommu_release_device(dev);
-
- return 0;
-}
-
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1775,75 +1827,6 @@ int bus_iommu_probe(struct bus_type *bus)
return ret;
}
-static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
-{
- struct notifier_block *nb;
- int err;
-
- nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
- if (!nb)
- return -ENOMEM;
-
- nb->notifier_call = iommu_bus_notifier;
-
- err = bus_register_notifier(bus, nb);
- if (err)
- goto out_free;
-
- err = bus_iommu_probe(bus);
- if (err)
- goto out_err;
-
-
- return 0;
-
-out_err:
- /* Clean up */
- bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
- bus_unregister_notifier(bus, nb);
-
-out_free:
- kfree(nb);
-
- return err;
-}
-
-/**
- * bus_set_iommu - set iommu-callbacks for the bus
- * @bus: bus.
- * @ops: the callbacks provided by the iommu-driver
- *
- * This function is called by an iommu driver to set the iommu methods
- * used for a particular bus. Drivers for devices on that bus can use
- * the iommu-api after these ops are registered.
- * This special function is needed because IOMMUs are usually devices on
- * the bus itself, so the iommu drivers are not initialized when the bus
- * is set up. With this function the iommu-driver can set the iommu-ops
- * afterwards.
- */
-int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
-{
- int err;
-
- if (ops == NULL) {
- bus->iommu_ops = NULL;
- return 0;
- }
-
- if (bus->iommu_ops != NULL)
- return -EBUSY;
-
- bus->iommu_ops = ops;
-
- /* Do IOMMU specific setup for this bus-type */
- err = iommu_bus_init(bus, ops);
- if (err)
- bus->iommu_ops = NULL;
-
- return err;
-}
-EXPORT_SYMBOL_GPL(bus_set_iommu);
-
bool iommu_present(struct bus_type *bus)
{
return bus->iommu_ops != NULL;
@@ -1869,19 +1852,10 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
if (!ops->capable)
return false;
- return ops->capable(cap);
+ return ops->capable(dev, cap);
}
EXPORT_SYMBOL_GPL(device_iommu_capable);
-bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
-{
- if (!bus->iommu_ops || !bus->iommu_ops->capable)
- return false;
-
- return bus->iommu_ops->capable(cap);
-}
-EXPORT_SYMBOL_GPL(iommu_capable);
-
/**
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
@@ -3076,6 +3050,24 @@ out:
return ret;
}
+static bool iommu_is_default_domain(struct iommu_group *group)
+{
+ if (group->domain == group->default_domain)
+ return true;
+
+ /*
+ * If the default domain was set to identity and it is still an identity
+ * domain then we consider this a pass. This happens because of
+ * amd_iommu_init_device() replacing the default idenytity domain with an
+ * identity domain that has a different configuration for AMDGPU.
+ */
+ if (group->default_domain &&
+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
+ return true;
+ return false;
+}
+
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
@@ -3094,8 +3086,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->domain != group->default_domain ||
- group->owner) {
+ if (group->owner || !iommu_is_default_domain(group)) {
ret = -EBUSY;
goto unlock_out;
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 47d1983dfa2a..a44ad92fc5eb 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -661,9 +661,6 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
unsigned long flags;
int i;
- if (!mag)
- return;
-
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (i = 0 ; i < mag->size; ++i) {
@@ -683,12 +680,12 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
static bool iova_magazine_full(struct iova_magazine *mag)
{
- return (mag && mag->size == IOVA_MAG_SIZE);
+ return mag->size == IOVA_MAG_SIZE;
}
static bool iova_magazine_empty(struct iova_magazine *mag)
{
- return (!mag || mag->size == 0);
+ return mag->size == 0;
}
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
@@ -697,8 +694,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
int i;
unsigned long pfn;
- BUG_ON(iova_magazine_empty(mag));
-
/* Only fall back to the rbtree if we have no suitable pfns at all */
for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
if (i == 0)
@@ -713,8 +708,6 @@ static unsigned long iova_magazine_pop(struct iova_magazine *mag,
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
{
- BUG_ON(iova_magazine_full(mag));
-
mag->pfns[mag->size++] = pfn;
}
@@ -882,7 +875,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
unsigned int log_size = order_base_2(size);
- if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 1d42084d0276..3b30c0752274 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1090,11 +1090,6 @@ static int ipmmu_probe(struct platform_device *pdev)
ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
if (ret)
return ret;
-
-#if defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
}
/*
@@ -1168,32 +1163,4 @@ static struct platform_driver ipmmu_driver = {
.probe = ipmmu_probe,
.remove = ipmmu_remove,
};
-
-static int __init ipmmu_init(void)
-{
- struct device_node *np;
- static bool setup_done;
- int ret;
-
- if (setup_done)
- return 0;
-
- np = of_find_matching_node(NULL, ipmmu_of_ids);
- if (!np)
- return 0;
-
- of_node_put(np);
-
- ret = platform_driver_register(&ipmmu_driver);
- if (ret < 0)
- return ret;
-
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &ipmmu_ops);
-#endif
-
- setup_done = true;
- return 0;
-}
-subsys_initcall(ipmmu_init);
+builtin_platform_driver(ipmmu_driver);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a24aa804ea3..16179a9a7283 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -792,8 +792,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
- bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
-
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 7e363b1f24df..5a4e00e4bbbc 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -138,6 +138,7 @@
#define PM_CLK_AO BIT(15)
#define IFA_IOMMU_PCIE_SUPPORT BIT(16)
#define PGTABLE_PA_35_EN BIT(17)
+#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -157,6 +158,7 @@
enum mtk_iommu_plat {
M4U_MT2712,
M4U_MT6779,
+ M4U_MT6795,
M4U_MT8167,
M4U_MT8173,
M4U_MT8183,
@@ -955,7 +957,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int ban
* Global control settings are in bank0. May re-init these global registers
* since no sure if there is bank0 consumers.
*/
- if (data->plat_data->m4u_plat == M4U_MT8173) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else {
@@ -1243,30 +1245,13 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->hw_list = &data->hw_list_head;
}
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
- if (ret)
- goto out_list_del;
- }
-
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
if (ret)
- goto out_bus_set_null;
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops);
- if (ret) /* PCIe fail don't affect platform_bus. */
- goto out_list_del;
- }
-#endif
+ goto out_list_del;
}
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_list_del:
list_del(&data->list);
iommu_device_unregister(&data->iommu);
@@ -1294,11 +1279,6 @@ static int mtk_iommu_remove(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
- MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) {
-#ifdef CONFIG_PCI
- bus_set_iommu(&pci_bus_type, NULL);
-#endif
}
pm_runtime_disable(&pdev->dev);
for (i = 0; i < data->plat_data->banks_num; i++) {
@@ -1413,6 +1393,19 @@ static const struct mtk_iommu_plat_data mt6779_data = {
.larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
};
+static const struct mtk_iommu_plat_data mt6795_data = {
+ .m4u_plat = M4U_MT6795,
+ .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}}, /* Linear mapping. */
+};
+
static const struct mtk_iommu_plat_data mt8167_data = {
.m4u_plat = M4U_MT8167,
.flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
@@ -1427,7 +1420,8 @@ static const struct mtk_iommu_plat_data mt8167_data = {
static const struct mtk_iommu_plat_data mt8173_data = {
.m4u_plat = M4U_MT8173,
.flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI |
- HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM,
+ HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM |
+ TF_PORT_TO_ADDR_MT8173,
.inv_sel_reg = REG_MMU_INV_SEL_GEN1,
.banks_num = 1,
.banks_enable = {true},
@@ -1524,6 +1518,7 @@ static const struct mtk_iommu_plat_data mt8195_data_vpp = {
static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
{ .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
+ { .compatible = "mediatek,mt6795-m4u", .data = &mt6795_data},
{ .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 128c7a3f1778..6e0e65831eb7 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -691,19 +691,11 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
if (ret)
goto out_sysfs_remove;
- if (!iommu_present(&platform_bus_type)) {
- ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops);
- if (ret)
- goto out_dev_unreg;
- }
-
ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
if (ret)
- goto out_bus_set_null;
+ goto out_dev_unreg;
return ret;
-out_bus_set_null:
- bus_set_iommu(&platform_bus_type, NULL);
out_dev_unreg:
iommu_device_unregister(&data->iommu);
out_sysfs_remove:
@@ -718,9 +710,6 @@ static int mtk_iommu_v1_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
- if (iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, NULL);
-
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 41f4eb005219..5696314ae69e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -40,7 +40,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -ENODEV;
+ return driver_deferred_probe_check_state(dev);
if (!try_module_get(ops->owner))
return -ENODEV;
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index a99afb5d9011..259f65291d90 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -32,12 +32,12 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
+ if (len < maxcol) \
+ goto out; \
+ bytes = scnprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
- if (len < maxcol) \
- goto out; \
} while (0)
static ssize_t
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d9cf2820c02e..07ee2600113c 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1776,14 +1776,8 @@ static int __init omap_iommu_init(void)
goto fail_driver;
}
- ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
- if (ret)
- goto fail_bus;
-
return 0;
-fail_bus:
- platform_driver_unregister(&omap_iommu_driver);
fail_driver:
kmem_cache_destroy(iopte_cachep);
return ret;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ab57c4b8fade..a3fc59b814ab 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1300,8 +1300,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (!dma_dev)
dma_dev = &pdev->dev;
- bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
-
pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) {
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index c898bcbbce11..3c071782f6f1 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -39,7 +39,7 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
return container_of(dom, struct s390_domain, domain);
}
-static bool s390_iommu_capable(enum iommu_cap cap)
+static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
@@ -185,7 +185,12 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{
- struct zpci_dev *zdev = to_zpci_dev(dev);
+ struct zpci_dev *zdev;
+
+ if (!dev_is_pci(dev))
+ return ERR_PTR(-ENODEV);
+
+ zdev = to_zpci_dev(dev);
return &zdev->iommu_dev;
}
@@ -385,9 +390,3 @@ static const struct iommu_ops s390_iommu_ops = {
.free = s390_domain_free,
}
};
-
-static int __init s390_iommu_init(void)
-{
- return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
-}
-subsys_initcall(s390_iommu_init);
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 511959c8a14d..fadd2c907222 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -496,9 +496,6 @@ static int sprd_iommu_probe(struct platform_device *pdev)
if (ret)
goto remove_sysfs;
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
-
ret = sprd_iommu_clk_enable(sdev);
if (ret)
goto unregister_iommu;
@@ -534,8 +531,6 @@ static int sprd_iommu_remove(struct platform_device *pdev)
iommu_group_put(sdev->group);
sdev->group = NULL;
- bus_set_iommu(&platform_bus_type, NULL);
-
platform_set_drvdata(pdev, NULL);
iommu_device_sysfs_remove(&sdev->iommu);
iommu_device_unregister(&sdev->iommu);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index a84c63518773..cd9b74ee24de 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -965,8 +965,6 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
if (ret < 0)
goto err_unregister;
- bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
-
return 0;
err_unregister:
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 2a8de975fe63..5b1af40221ec 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1083,8 +1083,8 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
/*
* This is a bit of a hack. Ideally we'd want to simply return this
- * value. However the IOMMU registration process will attempt to add
- * all devices to the IOMMU when bus_set_iommu() is called. In order
+ * value. However iommu_device_register() will attempt to add
+ * all devices to the IOMMU before we get that far. In order
* not to rely on global variables to track the IOMMU instance, we
* set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
@@ -1138,32 +1138,15 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
return ERR_PTR(err);
err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
- if (err)
- goto remove_sysfs;
-
- err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unregister;
-
-#ifdef CONFIG_PCI
- err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops);
- if (err < 0)
- goto unset_platform_bus;
-#endif
+ if (err) {
+ iommu_device_sysfs_remove(&smmu->iommu);
+ return ERR_PTR(err);
+ }
if (IS_ENABLED(CONFIG_DEBUG_FS))
tegra_smmu_debugfs_init(smmu);
return smmu;
-
-unset_platform_bus: __maybe_unused;
- bus_set_iommu(&platform_bus_type, NULL);
-unregister:
- iommu_device_unregister(&smmu->iommu);
-remove_sysfs:
- iommu_device_sysfs_remove(&smmu->iommu);
-
- return ERR_PTR(err);
}
void tegra_smmu_remove(struct tegra_smmu *smmu)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 08eeafc9529f..b7c22802f57c 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -7,9 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/amba/bus.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/freezer.h>
#include <linux/interval_tree.h>
@@ -17,7 +15,6 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ids.h>
@@ -25,6 +22,8 @@
#include <uapi/linux/virtio_iommu.h>
+#include "dma-iommu.h"
+
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
@@ -925,7 +924,7 @@ static struct virtio_driver virtio_iommu_drv;
static int viommu_match_node(struct device *dev, const void *data)
{
- return dev->parent->fwnode == data;
+ return device_match_fwnode(dev->parent, data);
}
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
@@ -1006,7 +1005,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(struct device *dev, enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
@@ -1145,26 +1155,6 @@ static int viommu_probe(struct virtio_device *vdev)
iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
-#ifdef CONFIG_PCI
- if (pci_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
-#ifdef CONFIG_ARM_AMBA
- if (amba_bustype.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&amba_bustype, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-#endif
- if (platform_bus_type.iommu_ops != &viommu_ops) {
- ret = bus_set_iommu(&platform_bus_type, &viommu_ops);
- if (ret)
- goto err_unregister;
- }
-
vdev->priv = viommu;
dev_info(dev, "input address: %u bits\n",
@@ -1173,9 +1163,6 @@ static int viommu_probe(struct virtio_device *vdev)
return 0;
-err_unregister:
- iommu_device_sysfs_remove(&viommu->iommu);
- iommu_device_unregister(&viommu->iommu);
err_free_vqs:
vdev->config->del_vqs(vdev);
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 20d2b9ec1227..fc00274070b6 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -497,7 +497,7 @@ static unsigned int ipoctal_chars_in_buffer(struct tty_struct *tty)
}
static void ipoctal_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
unsigned int cflag;
unsigned char mr1 = 0;
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index b1c3198355e7..74d449858a61 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -429,8 +429,11 @@ int ipack_device_init(struct ipack_device *dev)
dev->dev.bus = &ipack_bus_type;
dev->dev.release = ipack_device_release;
dev->dev.parent = dev->bus->parent;
- dev_set_name(&dev->dev,
+ ret = dev_set_name(&dev->dev,
"ipack-dev.%u.%u", dev->bus->bus_nr, dev->slot);
+ if (ret)
+ return ret;
+
device_initialize(&dev->dev);
if (dev->bus->ops->set_clockrate(dev, 8))
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9fa408bf2..eb5ea5b69cfa 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -561,6 +561,11 @@ config IRQ_LOONGARCH_CPU
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
+ select LOONGSON_LIOINTC
+ select LOONGSON_EIOINTC
+ select LOONGSON_PCH_PIC
+ select LOONGSON_PCH_MSI
+ select LOONGSON_PCH_LPC
help
Support for the LoongArch CPU Interrupt Controller. For details of
irq chip hierarchy on LoongArch platforms please read the document
@@ -623,8 +628,9 @@ config LOONGSON_PCH_MSI
config LOONGSON_PCH_LPC
bool "Loongson PCH LPC Controller"
+ depends on LOONGARCH
depends on MACH_LOONGSON64
- default (MACH_LOONGSON64 && LOONGARCH)
+ default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
help
Support for the Loongson PCH LPC Controller.
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index b249d4df899e..6e1ac330d7a6 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -13,7 +13,7 @@
#define pr_fmt(fmt) "GICv2m: " fmt
#include <linux/acpi.h>
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5ff09de6c48f..973ede0197e3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -11,9 +11,9 @@
#include <linux/cpu.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
-#include <linux/dma-iommu.h>
#include <linux/efi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
@@ -1574,13 +1574,15 @@ static int its_select_cpu(struct irq_data *d,
const struct cpumask *aff_mask)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- cpumask_var_t tmpmask;
+ static DEFINE_RAW_SPINLOCK(tmpmask_lock);
+ static struct cpumask __tmpmask;
+ struct cpumask *tmpmask;
+ unsigned long flags;
int cpu, node;
-
- if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
- return -ENOMEM;
-
node = its_dev->its->numa_node;
+ tmpmask = &__tmpmask;
+
+ raw_spin_lock_irqsave(&tmpmask_lock, flags);
if (!irqd_affinity_is_managed(d)) {
/* First try the NUMA node */
@@ -1634,7 +1636,7 @@ static int its_select_cpu(struct irq_data *d,
cpu = cpumask_pick_least_loaded(d, tmpmask);
}
out:
- free_cpumask_var(tmpmask);
+ raw_spin_unlock_irqrestore(&tmpmask_lock, flags);
pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
return cpu;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index a2163d32f17d..e1efdec9e9ac 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) "GICv3: " fmt
-#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 4ea71b28f9f5..a6277dea4c7a 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -94,7 +94,7 @@ bool gic_cpuif_has_vsgi(void)
{
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_GIC_SHIFT);
+ fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3;
}
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index b4927e425f7b..527c90e0920e 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
@@ -18,7 +19,6 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#include <linux/dma-iommu.h>
#define MSI_IRQS_PER_MSIR 32
#define MSI_MSIR_OFFSET 4
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a73763d475f0..6a3f7498ea8e 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,7 @@ static int stm32_exti_h_domain_alloc(struct irq_domain *dm,
irq_domain_set_hwirq_and_chip(dm, virq, hwirq, chip, chip_data);
- if (!host_data->drv_data || !host_data->drv_data->desc_irqs)
+ if (!host_data->drv_data->desc_irqs)
return -EINVAL;
desc_irq = host_data->drv_data->desc_irqs[hwirq];
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index e69c4bf557bf..ae24848af233 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -798,7 +798,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
u16 ret;
if (contr == 0) {
- strlcpy(serial, driver_serial, CAPI_SERIAL_LEN);
+ strscpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
@@ -806,7 +806,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN);
+ strscpy(serial, ctr->serial, CAPI_SERIAL_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 7ea10db20e3a..48133d022812 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -59,6 +59,7 @@ struct l1oip {
int bundle; /* bundle channels in one frm */
int codec; /* codec to use for transmis. */
int limit; /* limit number of bchannels */
+ bool shutdown; /* if card is released */
/* timer */
struct timer_list keep_tl;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 2c40412466e6..a77195e378b7 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
p = frame;
/* restart timer */
- if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
@@ -601,7 +601,9 @@ multiframe:
goto multiframe;
/* restart timer */
- if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
+ if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
+ !hc->timeout_on) &&
+ !hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
@@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc)
{
int ch;
- if (timer_pending(&hc->keep_tl))
- del_timer(&hc->keep_tl);
+ hc->shutdown = true;
- if (timer_pending(&hc->timeout_tl))
- del_timer(&hc->timeout_tl);
+ del_timer_sync(&hc->keep_tl);
+ del_timer_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
diff --git a/drivers/leds/flash/leds-as3645a.c b/drivers/leds/flash/leds-as3645a.c
index aa3f82be0a9c..bb2249771acb 100644
--- a/drivers/leds/flash/leds-as3645a.c
+++ b/drivers/leds/flash/leds-as3645a.c
@@ -724,7 +724,7 @@ out_put_nodes:
return rval;
}
-static int as3645a_remove(struct i2c_client *client)
+static void as3645a_remove(struct i2c_client *client)
{
struct as3645a *flash = i2c_get_clientdata(client);
@@ -740,8 +740,6 @@ static int as3645a_remove(struct i2c_client *client)
fwnode_handle_put(flash->flash_node);
fwnode_handle_put(flash->indicator_node);
-
- return 0;
}
static const struct i2c_device_id as3645a_id_table[] = {
diff --git a/drivers/leds/flash/leds-lm3601x.c b/drivers/leds/flash/leds-lm3601x.c
index d0e1d4814042..78730e066a73 100644
--- a/drivers/leds/flash/leds-lm3601x.c
+++ b/drivers/leds/flash/leds-lm3601x.c
@@ -440,15 +440,16 @@ static int lm3601x_probe(struct i2c_client *client)
return lm3601x_register_leds(led, fwnode);
}
-static int lm3601x_remove(struct i2c_client *client)
+static void lm3601x_remove(struct i2c_client *client)
{
struct lm3601x_led *led = i2c_get_clientdata(client);
+ int ret;
- mutex_destroy(&led->lock);
-
- return regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
- LM3601X_ENABLE_MASK,
- LM3601X_MODE_STANDBY);
+ ret = regmap_update_bits(led->regmap, LM3601X_ENABLE_REG,
+ LM3601X_ENABLE_MASK, LM3601X_MODE_STANDBY);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to put into standby (%pe)\n", ERR_PTR(ret));
}
static const struct i2c_device_id lm3601x_id[] = {
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
index ee129ab7255d..e404fe8b0314 100644
--- a/drivers/leds/flash/leds-rt4505.c
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -393,12 +393,11 @@ static int rt4505_probe(struct i2c_client *client)
return 0;
}
-static int rt4505_remove(struct i2c_client *client)
+static void rt4505_remove(struct i2c_client *client)
{
struct rt4505_priv *priv = i2c_get_clientdata(client);
v4l2_flash_release(priv->v4l2_flash);
- return 0;
}
static void rt4505_shutdown(struct i2c_client *client)
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index a0df1fb28774..e072ee5409f7 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -334,13 +334,11 @@ exit:
return err;
}
-static int an30259a_remove(struct i2c_client *client)
+static void an30259a_remove(struct i2c_client *client)
{
struct an30259a *chip = i2c_get_clientdata(client);
mutex_destroy(&chip->mutex);
-
- return 0;
}
static const struct of_device_id an30259a_match_table[] = {
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 80d937454aee..0b52fc9097c6 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -401,15 +401,13 @@ error:
return ret;
}
-static int aw2013_remove(struct i2c_client *client)
+static void aw2013_remove(struct i2c_client *client)
{
struct aw2013 *chip = i2c_get_clientdata(client);
aw2013_chip_disable(chip);
mutex_destroy(&chip->mutex);
-
- return 0;
}
static const struct of_device_id aw2013_match_table[] = {
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 8bbaef5a2986..2b6678f6bd56 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -722,7 +722,7 @@ failed_unregister_dev_file:
return ret;
}
-static int bd2802_remove(struct i2c_client *client)
+static void bd2802_remove(struct i2c_client *client)
{
struct bd2802_led *led = i2c_get_clientdata(client);
int i;
@@ -733,8 +733,6 @@ static int bd2802_remove(struct i2c_client *client)
bd2802_disable_adv_conf(led);
for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index bd7d0d5cf3b6..3fb6a2fdaefa 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -677,7 +677,7 @@ exit:
return err;
}
-static int blinkm_remove(struct i2c_client *client)
+static void blinkm_remove(struct i2c_client *client)
{
struct blinkm_data *data = i2c_get_clientdata(client);
int ret = 0;
@@ -716,7 +716,6 @@ static int blinkm_remove(struct i2c_client *client)
dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
sysfs_remove_group(&client->dev.kobj, &blinkm_group);
- return 0;
}
static const struct i2c_device_id blinkm_id[] = {
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index fc63fce38c19..0d219c1ac3b5 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -457,7 +457,7 @@ static int is31fl32xx_probe(struct i2c_client *client,
return 0;
}
-static int is31fl32xx_remove(struct i2c_client *client)
+static void is31fl32xx_remove(struct i2c_client *client)
{
struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
int ret;
@@ -466,8 +466,6 @@ static int is31fl32xx_remove(struct i2c_client *client)
if (ret)
dev_err(&client->dev, "Failed to reset registers on removal (%pe)\n",
ERR_PTR(ret));
-
- return 0;
}
/*
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index e72393534b72..ba906c253c7f 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -470,13 +470,12 @@ static int lm3530_probe(struct i2c_client *client,
return 0;
}
-static int lm3530_remove(struct i2c_client *client)
+static void lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
- return 0;
}
static const struct i2c_device_id lm3530_id[] = {
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index beb53040e09e..db64d44bcbbf 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -704,7 +704,7 @@ static int lm3532_probe(struct i2c_client *client,
return ret;
}
-static int lm3532_remove(struct i2c_client *client)
+static void lm3532_remove(struct i2c_client *client)
{
struct lm3532_data *drvdata = i2c_get_clientdata(client);
@@ -712,8 +712,6 @@ static int lm3532_remove(struct i2c_client *client)
if (drvdata->enable_gpio)
gpiod_direction_output(drvdata->enable_gpio, 0);
-
- return 0;
}
static const struct of_device_id of_lm3532_leds_match[] = {
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 2d3e11845ba5..daa35927b301 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -491,7 +491,7 @@ err_out:
return err;
}
-static int lm355x_remove(struct i2c_client *client)
+static void lm355x_remove(struct i2c_client *client)
{
struct lm355x_chip_data *chip = i2c_get_clientdata(client);
struct lm355x_reg_data *preg = chip->regs;
@@ -501,8 +501,6 @@ static int lm355x_remove(struct i2c_client *client)
led_classdev_unregister(&chip->cdev_torch);
led_classdev_unregister(&chip->cdev_flash);
dev_info(&client->dev, "%s is removed\n", lm355x_name[chip->type]);
-
- return 0;
}
static const struct i2c_device_id lm355x_id[] = {
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 435309154e6b..428a5d928150 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -380,7 +380,7 @@ err_out:
return err;
}
-static int lm3642_remove(struct i2c_client *client)
+static void lm3642_remove(struct i2c_client *client)
{
struct lm3642_chip_data *chip = i2c_get_clientdata(client);
@@ -388,7 +388,6 @@ static int lm3642_remove(struct i2c_client *client)
led_classdev_unregister(&chip->cdev_torch);
led_classdev_unregister(&chip->cdev_flash);
regmap_write(chip->regmap, REG_ENABLE, 0);
- return 0;
}
static const struct i2c_device_id lm3642_id[] = {
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 87cd24ce3f95..54b4662bff41 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -491,14 +491,12 @@ static int lm3692x_probe(struct i2c_client *client,
return 0;
}
-static int lm3692x_remove(struct i2c_client *client)
+static void lm3692x_remove(struct i2c_client *client)
{
struct lm3692x_led *led = i2c_get_clientdata(client);
lm3692x_leds_disable(led);
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lm3692x_id[] = {
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index a8c9322558cc..71231a60eebc 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -337,7 +337,7 @@ static int lm3697_probe(struct i2c_client *client,
return lm3697_init(led);
}
-static int lm3697_remove(struct i2c_client *client)
+static void lm3697_remove(struct i2c_client *client)
{
struct lm3697 *led = i2c_get_clientdata(client);
struct device *dev = &led->client->dev;
@@ -345,10 +345,8 @@ static int lm3697_remove(struct i2c_client *client)
ret = regmap_update_bits(led->regmap, LM3697_CTRL_ENABLE,
LM3697_CTRL_A_B_EN, 0);
- if (ret) {
+ if (ret)
dev_err(dev, "Failed to disable the device\n");
- return ret;
- }
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -360,8 +358,6 @@ static int lm3697_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lm3697_id[] = {
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 437c711b2a27..673ad8c04f41 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -397,7 +397,7 @@ static int lp3944_probe(struct i2c_client *client,
return 0;
}
-static int lp3944_remove(struct i2c_client *client)
+static void lp3944_remove(struct i2c_client *client)
{
struct lp3944_platform_data *pdata = dev_get_platdata(&client->dev);
struct lp3944_data *data = i2c_get_clientdata(client);
@@ -414,8 +414,6 @@ static int lp3944_remove(struct i2c_client *client)
default:
break;
}
-
- return 0;
}
/* lp3944 i2c driver struct */
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 6ee9131fbf25..bf0ad1b5ce24 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -255,15 +255,13 @@ static int lp3952_probe(struct i2c_client *client,
return 0;
}
-static int lp3952_remove(struct i2c_client *client)
+static void lp3952_remove(struct i2c_client *client)
{
struct lp3952_led_array *priv;
priv = i2c_get_clientdata(client);
lp3952_on_off(priv, LP3952_LED_ALL, false);
gpiod_set_value(priv->enable_gpio, 0);
-
- return 0;
}
static const struct i2c_device_id lp3952_id[] = {
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index e129dcc656b8..28d6b39fa72d 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -563,7 +563,7 @@ static int lp50xx_probe(struct i2c_client *client)
return lp50xx_probe_dt(led);
}
-static int lp50xx_remove(struct i2c_client *client)
+static void lp50xx_remove(struct i2c_client *client)
{
struct lp50xx *led = i2c_get_clientdata(client);
int ret;
@@ -579,8 +579,6 @@ static int lp50xx_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lp50xx_id[] = {
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index a9e7507c998c..7ff20c260504 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -579,7 +579,7 @@ err_init:
return ret;
}
-static int lp5521_remove(struct i2c_client *client)
+static void lp5521_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -587,8 +587,6 @@ static int lp5521_remove(struct i2c_client *client)
lp5521_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5521_id[] = {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index b1590cb4a188..369d40b0b65b 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -947,7 +947,7 @@ err_init:
return ret;
}
-static int lp5523_remove(struct i2c_client *client)
+static void lp5523_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -955,8 +955,6 @@ static int lp5523_remove(struct i2c_client *client)
lp5523_stop_all_engines(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5523_id[] = {
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 31c14016d289..0e490085ff35 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -573,7 +573,7 @@ err_init:
return ret;
}
-static int lp5562_remove(struct i2c_client *client)
+static void lp5562_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -582,8 +582,6 @@ static int lp5562_remove(struct i2c_client *client)
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp5562_id[] = {
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index 2d2fda2ab104..ae11a02c0ab2 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -362,7 +362,7 @@ err_init:
return ret;
}
-static int lp8501_remove(struct i2c_client *client)
+static void lp8501_remove(struct i2c_client *client)
{
struct lp55xx_led *led = i2c_get_clientdata(client);
struct lp55xx_chip *chip = led->chip;
@@ -370,8 +370,6 @@ static int lp8501_remove(struct i2c_client *client)
lp8501_stop_engine(chip);
lp55xx_unregister_sysfs(chip);
lp55xx_deinit_device(chip);
-
- return 0;
}
static const struct i2c_device_id lp8501_id[] = {
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3c693d5e3b44..e2b36d3187eb 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -445,7 +445,7 @@ static int lp8860_probe(struct i2c_client *client,
return 0;
}
-static int lp8860_remove(struct i2c_client *client)
+static void lp8860_remove(struct i2c_client *client)
{
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
@@ -461,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
}
mutex_destroy(&led->lock);
-
- return 0;
}
static const struct i2c_device_id lp8860_id[] = {
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index f72b5d1be3a6..df83d97cb479 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -52,7 +52,7 @@ struct pca9532_data {
static int pca9532_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int pca9532_remove(struct i2c_client *client);
+static void pca9532_remove(struct i2c_client *client);
enum {
pca9530,
@@ -546,13 +546,11 @@ static int pca9532_probe(struct i2c_client *client,
return pca9532_configure(client, data, pca9532_pdata);
}
-static int pca9532_remove(struct i2c_client *client)
+static void pca9532_remove(struct i2c_client *client)
{
struct pca9532_data *data = i2c_get_clientdata(client);
pca9532_destroy_devices(data, data->chip_info->num_leds);
-
- return 0;
}
module_i2c_driver(pca9532_driver);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 1473ced8664c..161bef65c6b7 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -790,7 +790,7 @@ exit:
return err;
}
-static int tca6507_remove(struct i2c_client *client)
+static void tca6507_remove(struct i2c_client *client)
{
int i;
struct tca6507_chip *tca = i2c_get_clientdata(client);
@@ -802,8 +802,6 @@ static int tca6507_remove(struct i2c_client *client)
}
tca6507_remove_gpio(tca);
cancel_work_sync(&tca->work);
-
- return 0;
}
static struct i2c_driver tca6507_driver = {
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index eac6f4a573b2..c7c9851c894a 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -242,7 +242,7 @@ static int omnia_leds_probe(struct i2c_client *client,
return 0;
}
-static int omnia_leds_remove(struct i2c_client *client)
+static void omnia_leds_remove(struct i2c_client *client)
{
u8 buf[5];
@@ -258,8 +258,6 @@ static int omnia_leds_remove(struct i2c_client *client)
buf[4] = 255;
i2c_master_send(client, buf, 5);
-
- return 0;
}
static const struct of_device_id of_omnia_leds_match[] = {
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
index 4c9e663a90ba..b9eeb8702df0 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -13,28 +13,45 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
-static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+static struct gpiod_lookup_table *simatic_ipc_led_gpio_table;
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_127e = {
.dev_id = "leds-gpio",
.table = {
- GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
},
};
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table_227g = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 0, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 1, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 2, NULL, 2, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 3, NULL, 3, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 4, NULL, 4, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-2", 5, NULL, 5, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 6, NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-3", 7, NULL, 7, GPIO_ACTIVE_HIGH),
+ }
+};
+
static const struct gpio_led simatic_ipc_gpio_leds[] = {
- { .name = "green:" LED_FUNCTION_STATUS "-3" },
{ .name = "red:" LED_FUNCTION_STATUS "-1" },
{ .name = "green:" LED_FUNCTION_STATUS "-1" },
{ .name = "red:" LED_FUNCTION_STATUS "-2" },
{ .name = "green:" LED_FUNCTION_STATUS "-2" },
{ .name = "red:" LED_FUNCTION_STATUS "-3" },
+ { .name = "green:" LED_FUNCTION_STATUS "-3" },
};
static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
@@ -46,7 +63,7 @@ static struct platform_device *simatic_leds_pdev;
static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
{
- gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
+ gpiod_remove_lookup_table(simatic_ipc_led_gpio_table);
platform_device_unregister(simatic_leds_pdev);
return 0;
@@ -54,10 +71,25 @@ static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
{
+ const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
struct gpio_desc *gpiod;
int err;
- gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_127E:
+ simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
+ break;
+ case SIMATIC_IPC_DEVICE_227G:
+ if (!IS_ENABLED(CONFIG_GPIO_F7188X))
+ return -ENODEV;
+ request_module("gpio-f7188x");
+ simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_227g;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ gpiod_add_lookup_table(simatic_ipc_led_gpio_table);
simatic_leds_pdev = platform_device_register_resndata(NULL,
"leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
&simatic_ipc_gpio_leds_pdata,
diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c
index d2f0cde6f9c7..3ded340699fb 100644
--- a/drivers/macintosh/ams/ams-i2c.c
+++ b/drivers/macintosh/ams/ams-i2c.c
@@ -58,7 +58,7 @@ enum ams_i2c_cmd {
static int ams_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id);
-static int ams_i2c_remove(struct i2c_client *client);
+static void ams_i2c_remove(struct i2c_client *client);
static const struct i2c_device_id ams_id[] = {
{ "MAC,accelerometer_1", 0 },
@@ -230,7 +230,7 @@ static int ams_i2c_probe(struct i2c_client *client,
return 0;
}
-static int ams_i2c_remove(struct i2c_client *client)
+static void ams_i2c_remove(struct i2c_client *client)
{
if (ams_info.has_device) {
ams_sensor_detach();
@@ -245,8 +245,6 @@ static int ams_i2c_remove(struct i2c_client *client)
ams_info.has_device = 0;
}
-
- return 0;
}
static void ams_i2c_exit(void)
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index e604cbc91763..b004ea2a1102 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -563,7 +563,7 @@ static int probe_thermostat(struct i2c_client *client,
return 0;
}
-static int remove_thermostat(struct i2c_client *client)
+static void remove_thermostat(struct i2c_client *client)
{
struct thermostat *th = i2c_get_clientdata(client);
int i;
@@ -585,8 +585,6 @@ static int remove_thermostat(struct i2c_client *client)
write_both_fan_speed(th, -1);
kfree(th);
-
- return 0;
}
static const struct i2c_device_id therm_adt746x_id[] = {
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 9226b74fa08f..b8228ca40454 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -317,24 +317,26 @@ static void do_attach(struct i2c_adapter *adapter)
if (x.running || strncmp(adapter->name, "uni-n", 5))
return;
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL);
}
+ of_node_get(adapter->dev.of_node);
np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
if (np) {
of_node_put(np);
} else {
- strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
+ strscpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL);
}
}
-static int
+static void
do_remove(struct i2c_client *client)
{
if (x.running) {
@@ -348,8 +350,6 @@ do_remove(struct i2c_client *client)
x.fan = NULL;
else
printk(KERN_ERR "g4fan: bad client\n");
-
- return 0;
}
static int
diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
index 6ad6441abcbc..c5c54a4ce91f 100644
--- a/drivers/macintosh/windfarm_ad7417_sensor.c
+++ b/drivers/macintosh/windfarm_ad7417_sensor.c
@@ -289,7 +289,7 @@ static int wf_ad7417_probe(struct i2c_client *client,
return 0;
}
-static int wf_ad7417_remove(struct i2c_client *client)
+static void wf_ad7417_remove(struct i2c_client *client)
{
struct wf_ad7417_priv *pv = dev_get_drvdata(&client->dev);
int i;
@@ -302,8 +302,6 @@ static int wf_ad7417_remove(struct i2c_client *client)
wf_unregister_sensor(&pv->sensors[i]);
kref_put(&pv->ref, wf_ad7417_release);
-
- return 0;
}
static const struct i2c_device_id wf_ad7417_id[] = {
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 82e7b2005ae7..c5b1ca5bcd73 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -560,7 +560,7 @@ static int wf_fcu_probe(struct i2c_client *client,
return 0;
}
-static int wf_fcu_remove(struct i2c_client *client)
+static void wf_fcu_remove(struct i2c_client *client)
{
struct wf_fcu_priv *pv = dev_get_drvdata(&client->dev);
struct wf_fcu_fan *fan;
@@ -571,7 +571,6 @@ static int wf_fcu_remove(struct i2c_client *client)
wf_unregister_control(&fan->ctrl);
}
kref_put(&pv->ref, wf_fcu_release);
- return 0;
}
static const struct i2c_device_id wf_fcu_id[] = {
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index eb7e7f0bd219..204661c8e918 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -147,7 +147,7 @@ static int wf_lm75_probe(struct i2c_client *client,
return rc;
}
-static int wf_lm75_remove(struct i2c_client *client)
+static void wf_lm75_remove(struct i2c_client *client)
{
struct wf_lm75_sensor *lm = i2c_get_clientdata(client);
@@ -156,8 +156,6 @@ static int wf_lm75_remove(struct i2c_client *client)
/* release sensor */
wf_unregister_sensor(&lm->sens);
-
- return 0;
}
static const struct i2c_device_id wf_lm75_id[] = {
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index 807efdde86bc..40d25463346e 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -145,7 +145,7 @@ static int wf_lm87_probe(struct i2c_client *client,
return rc;
}
-static int wf_lm87_remove(struct i2c_client *client)
+static void wf_lm87_remove(struct i2c_client *client)
{
struct wf_lm87_sensor *lm = i2c_get_clientdata(client);
@@ -154,8 +154,6 @@ static int wf_lm87_remove(struct i2c_client *client)
/* release sensor */
wf_unregister_sensor(&lm->sens);
-
- return 0;
}
static const struct i2c_device_id wf_lm87_id[] = {
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 55ee417fb878..c0d404ebc792 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -104,14 +104,12 @@ static int wf_max6690_probe(struct i2c_client *client,
return rc;
}
-static int wf_max6690_remove(struct i2c_client *client)
+static void wf_max6690_remove(struct i2c_client *client)
{
struct wf_6690_sensor *max = i2c_get_clientdata(client);
max->i2c = NULL;
wf_unregister_sensor(&max->sens);
-
- return 0;
}
static const struct i2c_device_id wf_max6690_id[] = {
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 5ade627eaa78..be5d4593db93 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -316,7 +316,7 @@ static int wf_sat_probe(struct i2c_client *client,
return 0;
}
-static int wf_sat_remove(struct i2c_client *client)
+static void wf_sat_remove(struct i2c_client *client)
{
struct wf_sat *sat = i2c_get_clientdata(client);
struct wf_sat_sensor *sens;
@@ -330,8 +330,6 @@ static int wf_sat_remove(struct i2c_client *client)
}
sat->i2c = NULL;
kref_put(&sat->ref, wf_sat_release);
-
- return 0;
}
static const struct i2c_device_id wf_sat_id[] = {
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
index 496c4951ccb1..2a3e8d8ff8b5 100644
--- a/drivers/mailbox/apple-mailbox.c
+++ b/drivers/mailbox/apple-mailbox.c
@@ -17,6 +17,7 @@
*/
#include <linux/apple-mailbox.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
@@ -100,6 +102,7 @@ struct apple_mbox {
struct device *dev;
struct mbox_controller controller;
+ spinlock_t rx_lock;
};
static const struct of_device_id apple_mbox_of_match[];
@@ -112,6 +115,14 @@ static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
return !(mbox_ctrl & apple_mbox->hw->control_full);
}
+static bool apple_mbox_hw_send_empty(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return mbox_ctrl & apple_mbox->hw->control_empty;
+}
+
static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
struct apple_mbox_msg *msg)
{
@@ -195,13 +206,15 @@ static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+static int apple_mbox_poll(struct apple_mbox *apple_mbox)
{
- struct apple_mbox *apple_mbox = data;
struct apple_mbox_msg msg;
+ int ret = 0;
- while (apple_mbox_hw_recv(apple_mbox, &msg) == 0)
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0) {
mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+ ret++;
+ }
/*
* The interrupt will keep firing even if there are no more messages
@@ -216,9 +229,50 @@ static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
apple_mbox->regs + apple_mbox->hw->irq_ack);
}
+ return ret;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ spin_lock(&apple_mbox->rx_lock);
+ apple_mbox_poll(apple_mbox);
+ spin_unlock(&apple_mbox->rx_lock);
+
return IRQ_HANDLED;
}
+static bool apple_mbox_chan_peek_data(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&apple_mbox->rx_lock, flags);
+ ret = apple_mbox_poll(apple_mbox);
+ spin_unlock_irqrestore(&apple_mbox->rx_lock, flags);
+
+ return ret > 0;
+}
+
+static int apple_mbox_chan_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ unsigned long deadline = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, deadline)) {
+ if (apple_mbox_hw_send_empty(apple_mbox)) {
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
static int apple_mbox_chan_startup(struct mbox_chan *chan)
{
struct apple_mbox *apple_mbox = chan->con_priv;
@@ -250,6 +304,8 @@ static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
static const struct mbox_chan_ops apple_mbox_ops = {
.send_data = apple_mbox_chan_send_data,
+ .peek_data = apple_mbox_chan_peek_data,
+ .flush = apple_mbox_chan_flush,
.startup = apple_mbox_chan_startup,
.shutdown = apple_mbox_chan_shutdown,
};
@@ -304,6 +360,7 @@ static int apple_mbox_probe(struct platform_device *pdev)
mbox->controller.txdone_irq = true;
mbox->controller.of_xlate = apple_mbox_of_xlate;
mbox->chan.con_priv = mbox;
+ spin_lock_init(&mbox->rx_lock);
irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
if (!irqname)
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index fda16f76401e..bf6e86b0ed09 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -622,15 +622,15 @@ static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- if (rc < 0)
- return rc;
+ if (!rc)
+ return -EIO;
rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
DMA_FROM_DEVICE);
- if (rc < 0) {
+ if (!rc) {
dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
DMA_TO_DEVICE);
- return rc;
+ return -EIO;
}
return 0;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 02922073c9ef..20f2ec880ad6 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -904,7 +904,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
.xTR = 0x20,
.xRR = 0x40,
.xSR = {0x60, 0x60, 0x60, 0x60},
- .xCR = {0x64, 0x64, 0x64, 0x64},
+ .xCR = {0x64, 0x64, 0x64, 0x64, 0x64},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
@@ -927,7 +927,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
@@ -938,7 +938,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
.xTR = 0x200,
.xRR = 0x280,
.xSR = {0xC, 0x118, 0x124, 0x12C},
- .xCR = {0x110, 0x114, 0x120, 0x128},
+ .xCR = {0x8, 0x110, 0x114, 0x120, 0x128},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
@@ -949,7 +949,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
@@ -960,7 +960,7 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
- .xCR = {0x24, 0x24, 0x24, 0x24},
+ .xCR = {0x24, 0x24, 0x24, 0x24, 0x24},
};
static const struct of_device_id imx_mu_dt_ids[] = {
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 4e34854d1238..cfacb3f320a6 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -62,6 +62,7 @@ struct mpfs_mbox {
struct mbox_controller controller;
struct device *dev;
int irq;
+ void __iomem *ctrl_base;
void __iomem *mbox_base;
void __iomem *int_reg;
struct mbox_chan chans[1];
@@ -73,7 +74,7 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
{
u32 status;
- status = readl_relaxed(mbox->mbox_base + SERVICES_SR_OFFSET);
+ status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
return status & SCB_STATUS_BUSY_MASK;
}
@@ -99,29 +100,27 @@ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
for (index = 0; index < (msg->cmd_data_size / 4); index++)
writel_relaxed(word_buf[index],
- mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ mbox->mbox_base + msg->mbox_offset + index * 0x4);
if (extra_bits) {
u8 i;
u8 byte_off = ALIGN_DOWN(msg->cmd_data_size, 4);
u8 *byte_buf = msg->cmd_data + byte_off;
- val = readl_relaxed(mbox->mbox_base +
- MAILBOX_REG_OFFSET + index * 0x4);
+ val = readl_relaxed(mbox->mbox_base + msg->mbox_offset + index * 0x4);
for (i = 0u; i < extra_bits; i++) {
val &= ~(0xffu << (i * 8u));
val |= (byte_buf[i] << (i * 8u));
}
- writel_relaxed(val,
- mbox->mbox_base + MAILBOX_REG_OFFSET + index * 0x4);
+ writel_relaxed(val, mbox->mbox_base + msg->mbox_offset + index * 0x4);
}
}
opt_sel = ((msg->mbox_offset << 7u) | (msg->cmd_opcode & 0x7fu));
tx_trigger = (opt_sel << SCB_CTRL_POS) & SCB_CTRL_MASK;
tx_trigger |= SCB_CTRL_REQ_MASK | SCB_STATUS_NOTIFY_MASK;
- writel_relaxed(tx_trigger, mbox->mbox_base + SERVICES_CR_OFFSET);
+ writel_relaxed(tx_trigger, mbox->ctrl_base + SERVICES_CR_OFFSET);
return 0;
}
@@ -141,7 +140,7 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
if (!mpfs_mbox_busy(mbox)) {
for (i = 0; i < num_words; i++) {
response->resp_msg[i] =
- readl_relaxed(mbox->mbox_base + MAILBOX_REG_OFFSET
+ readl_relaxed(mbox->mbox_base
+ mbox->resp_offset + i * 0x4);
}
}
@@ -200,14 +199,18 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
if (!mbox)
return -ENOMEM;
- mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
- if (IS_ERR(mbox->mbox_base))
- return PTR_ERR(mbox->mbox_base);
+ mbox->ctrl_base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
+ if (IS_ERR(mbox->ctrl_base))
+ return PTR_ERR(mbox->ctrl_base);
mbox->int_reg = devm_platform_get_and_ioremap_resource(pdev, 1, &regs);
if (IS_ERR(mbox->int_reg))
return PTR_ERR(mbox->int_reg);
+ mbox->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 2, &regs);
+ if (IS_ERR(mbox->mbox_base)) // account for the old dt-binding w/ 2 regs
+ mbox->mbox_base = mbox->ctrl_base + MAILBOX_REG_OFFSET;
+
mbox->irq = platform_get_irq(pdev, 0);
if (mbox->irq < 0)
return mbox->irq;
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index ebfa33a40fce..3c2bc0ca454c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -676,7 +676,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
!pcc_mbox_ctrl->txdone_irq) {
- pr_err("Plaform Interrupt flag must be set to 1");
+ pr_err("Platform Interrupt flag must be set to 1");
rc = -EINVAL;
goto err;
}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 80a54d81412e..f1f0e87a79e6 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -142,7 +142,7 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq6018_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 31d58b7d55fe..7e27acf6c0cc 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -308,7 +308,8 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
goto err_mbox;
ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
- IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, name, ipcc);
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND |
+ IRQF_NO_THREAD, name, ipcc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
goto err_req_irq;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2acda9cea0f9..aebb7ef10e63 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -107,7 +107,7 @@
*
* BTREE NODES:
*
- * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
+ * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
* free smaller than a bucket - so, that's how big our btree nodes are.
*
* (If buckets are really big we'll only use part of the bucket for a btree node
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 94d38e8a59b3..2bba4d6aaaa2 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1264,7 +1264,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
*
* Don't worry event 'out' is allocated from mempool, it can
* still be swapped here. Because state->pool is a page mempool
- * creaated by by mempool_init_page_pool(), which allocates
+ * created by mempool_init_page_pool(), which allocates
* pages by alloc_pages() indeed.
*/
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index ca4f435f7216..bd3afc856d53 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -54,7 +54,6 @@ void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass);
-void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
void bch_mark_cache_miss_collision(struct cache_set *c,
struct bcache_device *d);
void bch_mark_sectors_bypassed(struct cache_set *c,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3f0ff3aab6f2..0285b676e983 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -157,6 +157,53 @@ static void __update_writeback_rate(struct cached_dev *dc)
dc->writeback_rate_target = target;
}
+static bool idle_counter_exceeded(struct cache_set *c)
+{
+ int counter, dev_nr;
+
+ /*
+ * If c->idle_counter is overflow (idel for really long time),
+ * reset as 0 and not set maximum rate this time for code
+ * simplicity.
+ */
+ counter = atomic_inc_return(&c->idle_counter);
+ if (counter <= 0) {
+ atomic_set(&c->idle_counter, 0);
+ return false;
+ }
+
+ dev_nr = atomic_read(&c->attached_dev_nr);
+ if (dev_nr == 0)
+ return false;
+
+ /*
+ * c->idle_counter is increased by writeback thread of all
+ * attached backing devices, in order to represent a rough
+ * time period, counter should be divided by dev_nr.
+ * Otherwise the idle time cannot be larger with more backing
+ * device attached.
+ * The following calculation equals to checking
+ * (counter / dev_nr) < (dev_nr * 6)
+ */
+ if (counter < (dev_nr * dev_nr * 6))
+ return false;
+
+ return true;
+}
+
+/*
+ * Idle_counter is increased every time when update_writeback_rate() is
+ * called. If all backing devices attached to the same cache set have
+ * identical dc->writeback_rate_update_seconds values, it is about 6
+ * rounds of update_writeback_rate() on each backing device before
+ * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
+ * to each dc->writeback_rate.rate.
+ * In order to avoid extra locking cost for counting exact dirty cached
+ * devices number, c->attached_dev_nr is used to calculate the idle
+ * throushold. It might be bigger if not all cached device are in write-
+ * back mode, but it still works well with limited extra rounds of
+ * update_writeback_rate().
+ */
static bool set_at_max_writeback_rate(struct cache_set *c,
struct cached_dev *dc)
{
@@ -167,21 +214,8 @@ static bool set_at_max_writeback_rate(struct cache_set *c,
/* Don't set max writeback rate if gc is running */
if (!c->gc_mark_valid)
return false;
- /*
- * Idle_counter is increased everytime when update_writeback_rate() is
- * called. If all backing devices attached to the same cache set have
- * identical dc->writeback_rate_update_seconds values, it is about 6
- * rounds of update_writeback_rate() on each backing device before
- * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
- * to each dc->writeback_rate.rate.
- * In order to avoid extra locking cost for counting exact dirty cached
- * devices number, c->attached_dev_nr is used to calculate the idle
- * throushold. It might be bigger if not all cached device are in write-
- * back mode, but it still works well with limited extra rounds of
- * update_writeback_rate().
- */
- if (atomic_inc_return(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6)
+
+ if (!idle_counter_exceeded(c))
return false;
if (atomic_read(&c->at_max_writeback_rate) != 1)
@@ -195,13 +229,10 @@ static bool set_at_max_writeback_rate(struct cache_set *c,
dc->writeback_rate_change = 0;
/*
- * Check c->idle_counter and c->at_max_writeback_rate agagain in case
- * new I/O arrives during before set_at_max_writeback_rate() returns.
- * Then the writeback rate is set to 1, and its new value should be
- * decided via __update_writeback_rate().
+ * In case new I/O arrives during before
+ * set_at_max_writeback_rate() returns.
*/
- if ((atomic_read(&c->idle_counter) <
- atomic_read(&c->attached_dev_nr) * 6) ||
+ if (!idle_counter_exceeded(c) ||
!atomic_read(&c->at_max_writeback_rate))
return false;
@@ -801,10 +832,9 @@ static int bch_writeback_thread(void *arg)
}
}
- if (dc->writeback_write_wq) {
- flush_workqueue(dc->writeback_write_wq);
+ if (dc->writeback_write_wq)
destroy_workqueue(dc->writeback_write_wq);
- }
+
cached_dev_put(dc);
wait_for_kthread_stop();
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4f49bbcce4f1..3001b10a3fbf 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -292,11 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-static void end_clone_request(struct request *clone, blk_status_t error)
+static enum rq_end_io_ret end_clone_request(struct request *clone,
+ blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_complete_request(tio->orig, error);
+ return RQ_END_IO_NONE;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 332f96b58252..d8034ff0cb24 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1856,9 +1856,7 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return !blk_queue_nowait(q);
+ return !bdev_nowait(dev->bdev);
}
static bool dm_table_supports_nowait(struct dm_table *t)
diff --git a/drivers/md/dm-verity-loadpin.c b/drivers/md/dm-verity-loadpin.c
index 387ec43aef72..4f78cc55c251 100644
--- a/drivers/md/dm-verity-loadpin.c
+++ b/drivers/md/dm-verity-loadpin.c
@@ -14,6 +14,7 @@ LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
static bool is_trusted_verity_target(struct dm_target *ti)
{
+ int verity_mode;
u8 *root_digest;
unsigned int digest_size;
struct dm_verity_loadpin_trusted_root_digest *trd;
@@ -22,6 +23,13 @@ static bool is_trusted_verity_target(struct dm_target *ti)
if (!dm_is_verity_target(ti))
return false;
+ verity_mode = dm_verity_get_mode(ti);
+
+ if ((verity_mode != DM_VERITY_MODE_EIO) &&
+ (verity_mode != DM_VERITY_MODE_RESTART) &&
+ (verity_mode != DM_VERITY_MODE_PANIC))
+ return false;
+
if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
return false;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 94b6cb599db4..8a00cc42e498 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1447,6 +1447,22 @@ bool dm_is_verity_target(struct dm_target *ti)
}
/*
+ * Get the verity mode (error behavior) of a verity target.
+ *
+ * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
+ * target.
+ */
+int dm_verity_get_mode(struct dm_target *ti)
+{
+ struct dm_verity *v = ti->private;
+
+ if (!dm_is_verity_target(ti))
+ return -EINVAL;
+
+ return v->mode;
+}
+
+/*
* Get the root digest of a verity target.
*
* Returns a copy of the root digest, the caller is responsible for
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 45455de1b4bc..98f306ec6a33 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -134,6 +134,7 @@ extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);
extern bool dm_is_verity_target(struct dm_target *ti);
+extern int dm_verity_get_mode(struct dm_target *ti);
extern int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest,
unsigned int *digest_size);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 742b2349fea3..10e0c5381d01 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -876,8 +876,8 @@ static int join(struct mddev *mddev, int nodes)
memset(str, 0, 64);
sprintf(str, "%pU", mddev->uuid);
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
- DLM_LSFL_FS, LVB_SIZE,
- &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
+ 0, LVB_SIZE, &md_ls_ops, mddev,
+ &ops_rv, &cinfo->lockspace);
if (ret)
goto err;
wait_for_completion(&cinfo->completion);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 729be2c5296c..a467b492d4ad 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5845,7 +5845,7 @@ int md_run(struct mddev *mddev)
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
- nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
+ nowait = nowait && bdev_nowait(rdev->bdev);
}
if (!bioset_initialized(&mddev->bio_set)) {
@@ -6982,7 +6982,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
* If the new disk does not support REQ_NOWAIT,
* disable on the whole MD.
*/
- if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
+ if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev);
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
@@ -8156,7 +8156,6 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
list_for_each(tmp,&all_mddevs)
if (!l--) {
mddev = list_entry(tmp, struct mddev, all_mddevs);
- mddev_get(mddev);
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 78addfe4a0c9..857c49399c28 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -47,7 +47,7 @@ static void dump_zones(struct mddev *mddev)
int len = 0;
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
- len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
+ len += scnprintf(line+len, 200-len, "%s%pg", k?"/":"",
conf->devlist[j * raid_disks + k]->bdev);
pr_debug("md: zone%d=[%s]\n", j, line);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 64d6e4cd8a3a..3aa8b6e11d58 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -79,6 +79,21 @@ static void end_reshape(struct r10conf *conf);
#include "raid1-10.c"
+#define NULL_CMD
+#define cmd_before(conf, cmd) \
+ do { \
+ write_sequnlock_irq(&(conf)->resync_lock); \
+ cmd; \
+ } while (0)
+#define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
+
+#define wait_event_barrier_cmd(conf, cond, cmd) \
+ wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
+ cmd_after(conf))
+
+#define wait_event_barrier(conf, cond) \
+ wait_event_barrier_cmd(conf, cond, NULL_CMD)
+
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
* 'struct resync_pages'.
@@ -274,6 +289,12 @@ static void put_buf(struct r10bio *r10_bio)
lower_barrier(conf);
}
+static void wake_up_barrier(struct r10conf *conf)
+{
+ if (wq_has_sleeper(&conf->wait_barrier))
+ wake_up(&conf->wait_barrier);
+}
+
static void reschedule_retry(struct r10bio *r10_bio)
{
unsigned long flags;
@@ -930,78 +951,101 @@ static void flush_pending_writes(struct r10conf *conf)
static void raise_barrier(struct r10conf *conf, int force)
{
+ write_seqlock_irq(&conf->resync_lock);
BUG_ON(force && !conf->barrier);
- spin_lock_irq(&conf->resync_lock);
/* Wait until no block IO is waiting (unless 'force') */
- wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock);
+ wait_event_barrier(conf, force || !conf->nr_waiting);
/* block any new IO from starting */
- conf->barrier++;
+ WRITE_ONCE(conf->barrier, conf->barrier + 1);
/* Now wait for all pending IO to complete */
- wait_event_lock_irq(conf->wait_barrier,
- !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock);
+ wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
+ conf->barrier < RESYNC_DEPTH);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static void lower_barrier(struct r10conf *conf)
{
unsigned long flags;
- spin_lock_irqsave(&conf->resync_lock, flags);
- conf->barrier--;
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+
+ write_seqlock_irqsave(&conf->resync_lock, flags);
+ WRITE_ONCE(conf->barrier, conf->barrier - 1);
+ write_sequnlock_irqrestore(&conf->resync_lock, flags);
wake_up(&conf->wait_barrier);
}
+static bool stop_waiting_barrier(struct r10conf *conf)
+{
+ struct bio_list *bio_list = current->bio_list;
+
+ /* barrier is dropped */
+ if (!conf->barrier)
+ return true;
+
+ /*
+ * If there are already pending requests (preventing the barrier from
+ * rising completely), and the pre-process bio queue isn't empty, then
+ * don't wait, as we need to empty that queue to get the nr_pending
+ * count down.
+ */
+ if (atomic_read(&conf->nr_pending) && bio_list &&
+ (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
+ return true;
+
+ /* move on if recovery thread is blocked by us */
+ if (conf->mddev->thread->tsk == current &&
+ test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
+ conf->nr_queued > 0)
+ return true;
+
+ return false;
+}
+
+static bool wait_barrier_nolock(struct r10conf *conf)
+{
+ unsigned int seq = read_seqbegin(&conf->resync_lock);
+
+ if (READ_ONCE(conf->barrier))
+ return false;
+
+ atomic_inc(&conf->nr_pending);
+ if (!read_seqretry(&conf->resync_lock, seq))
+ return true;
+
+ if (atomic_dec_and_test(&conf->nr_pending))
+ wake_up_barrier(conf);
+
+ return false;
+}
+
static bool wait_barrier(struct r10conf *conf, bool nowait)
{
bool ret = true;
- spin_lock_irq(&conf->resync_lock);
+ if (wait_barrier_nolock(conf))
+ return true;
+
+ write_seqlock_irq(&conf->resync_lock);
if (conf->barrier) {
- struct bio_list *bio_list = current->bio_list;
- conf->nr_waiting++;
- /* Wait for the barrier to drop.
- * However if there are already pending
- * requests (preventing the barrier from
- * rising completely), and the
- * pre-process bio queue isn't empty,
- * then don't wait, as we need to empty
- * that queue to get the nr_pending
- * count down.
- */
/* Return false when nowait flag is set */
if (nowait) {
ret = false;
} else {
+ conf->nr_waiting++;
raid10_log(conf->mddev, "wait barrier");
- wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (atomic_read(&conf->nr_pending) &&
- bio_list &&
- (!bio_list_empty(&bio_list[0]) ||
- !bio_list_empty(&bio_list[1]))) ||
- /* move on if recovery thread is
- * blocked by us
- */
- (conf->mddev->thread->tsk == current &&
- test_bit(MD_RECOVERY_RUNNING,
- &conf->mddev->recovery) &&
- conf->nr_queued > 0),
- conf->resync_lock);
+ wait_event_barrier(conf, stop_waiting_barrier(conf));
+ conf->nr_waiting--;
}
- conf->nr_waiting--;
if (!conf->nr_waiting)
wake_up(&conf->wait_barrier);
}
/* Only increment nr_pending when we wait */
if (ret)
atomic_inc(&conf->nr_pending);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
return ret;
}
@@ -1009,7 +1053,7 @@ static void allow_barrier(struct r10conf *conf)
{
if ((atomic_dec_and_test(&conf->nr_pending)) ||
(conf->array_freeze_pending))
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
}
static void freeze_array(struct r10conf *conf, int extra)
@@ -1026,27 +1070,24 @@ static void freeze_array(struct r10conf *conf, int extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
- spin_lock_irq(&conf->resync_lock);
+ write_seqlock_irq(&conf->resync_lock);
conf->array_freeze_pending++;
- conf->barrier++;
+ WRITE_ONCE(conf->barrier, conf->barrier + 1);
conf->nr_waiting++;
- wait_event_lock_irq_cmd(conf->wait_barrier,
- atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
- conf->resync_lock,
- flush_pending_writes(conf));
-
+ wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
+ conf->nr_queued + extra, flush_pending_writes(conf));
conf->array_freeze_pending--;
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static void unfreeze_array(struct r10conf *conf)
{
/* reverse the effect of the freeze */
- spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
+ write_seqlock_irq(&conf->resync_lock);
+ WRITE_ONCE(conf->barrier, conf->barrier - 1);
conf->nr_waiting--;
wake_up(&conf->wait_barrier);
- spin_unlock_irq(&conf->resync_lock);
+ write_sequnlock_irq(&conf->resync_lock);
}
static sector_t choose_data_offset(struct r10bio *r10_bio,
@@ -1885,7 +1926,7 @@ static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
__make_request(mddev, bio, sectors);
/* In case raid10d snuck in to freeze_array */
- wake_up(&conf->wait_barrier);
+ wake_up_barrier(conf);
return true;
}
@@ -1980,7 +2021,7 @@ static int enough(struct r10conf *conf, int ignore)
* Otherwise, it must be degraded:
* - recovery is interrupted.
* - &mddev->degraded is bumped.
-
+ *
* @rdev is marked as &Faulty excluding case when array is failed and
* &mddev->fail_last_dev is off.
*/
@@ -4032,7 +4073,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
- spin_lock_init(&conf->resync_lock);
+ seqlock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
atomic_set(&conf->nr_pending, 0);
@@ -4351,7 +4392,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
rdev->new_raid_disk = rdev->raid_disk * 2;
rdev->sectors = size;
}
- conf->barrier = 1;
+ WRITE_ONCE(conf->barrier, 1);
}
return conf;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 5c0804d8bb1f..8c072ce0bc54 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -76,7 +76,7 @@ struct r10conf {
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
- spinlock_t resync_lock;
+ seqlock_t resync_lock;
atomic_t nr_pending;
int nr_waiting;
int nr_queued;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index f4e1cc1ece43..79c73330020b 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -125,7 +125,7 @@ struct r5l_log {
* reclaimed. if it's 0, reclaim spaces
* used by io_units which are in
* IO_UNIT_STRIPE_END state (eg, reclaim
- * dones't wait for specific io_unit
+ * doesn't wait for specific io_unit
* switching to IO_UNIT_STRIPE_END
* state) */
wait_queue_head_t iounit_wait;
@@ -1327,9 +1327,9 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* superblock is updated to new log tail. Updating superblock (either
* directly call md_update_sb() or depend on md thread) must hold
* reconfig mutex. On the other hand, raid5_quiesce is called with
- * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
- * for all IO finish, hence waitting for reclaim thread, while reclaim
- * thread is calling this function and waitting for reconfig mutex. So
+ * reconfig_mutex hold. The first step of raid5_quiesce() is waiting
+ * for all IO finish, hence waiting for reclaim thread, while reclaim
+ * thread is calling this function and waiting for reconfig mutex. So
* there is a deadlock. We workaround this issue with a trylock.
* FIXME: we could miss discard if we can't take reconfig mutex
*/
@@ -1923,7 +1923,8 @@ r5c_recovery_alloc_stripe(
{
struct stripe_head *sh;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
+ sh = raid5_get_active_stripe(conf, NULL, stripe_sect,
+ noblock ? R5_GAS_NOBLOCK : 0);
if (!sh)
return NULL; /* no more stripe available */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 31a0cbf63384..7b820b81d8c2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -789,87 +790,80 @@ struct stripe_request_ctx {
*/
static bool is_inactive_blocked(struct r5conf *conf, int hash)
{
- int active = atomic_read(&conf->active_stripes);
-
if (list_empty(conf->inactive_list + hash))
return false;
if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return true;
- return active < (conf->max_nr_stripes * 3 / 4);
+ return (atomic_read(&conf->active_stripes) <
+ (conf->max_nr_stripes * 3 / 4));
}
-static struct stripe_head *__raid5_get_active_stripe(struct r5conf *conf,
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
struct stripe_request_ctx *ctx, sector_t sector,
- bool previous, bool noblock, bool noquiesce)
+ unsigned int flags)
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(conf, sector);
+ int previous = !!(flags & R5_GAS_PREVIOUS);
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
spin_lock_irq(conf->hash_locks + hash);
-retry:
- if (!noquiesce && conf->quiesce) {
- /*
- * Must release the reference to batch_last before waiting,
- * on quiesce, otherwise the batch_last will hold a reference
- * to a stripe and raid5_quiesce() will deadlock waiting for
- * active_stripes to go to zero.
- */
- if (ctx && ctx->batch_last) {
- raid5_release_stripe(ctx->batch_last);
- ctx->batch_last = NULL;
- }
-
- wait_event_lock_irq(conf->wait_for_quiescent, !conf->quiesce,
- *(conf->hash_locks + hash));
- }
+ for (;;) {
+ if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) {
+ /*
+ * Must release the reference to batch_last before
+ * waiting, on quiesce, otherwise the batch_last will
+ * hold a reference to a stripe and raid5_quiesce()
+ * will deadlock waiting for active_stripes to go to
+ * zero.
+ */
+ if (ctx && ctx->batch_last) {
+ raid5_release_stripe(ctx->batch_last);
+ ctx->batch_last = NULL;
+ }
- sh = find_get_stripe(conf, sector, conf->generation - previous, hash);
- if (sh)
- goto out;
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ !conf->quiesce,
+ *(conf->hash_locks + hash));
+ }
- if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
- goto wait_for_stripe;
+ sh = find_get_stripe(conf, sector, conf->generation - previous,
+ hash);
+ if (sh)
+ break;
- sh = get_free_stripe(conf, hash);
- if (sh) {
- r5c_check_stripe_cache_usage(conf);
- init_stripe(sh, sector, previous);
- atomic_inc(&sh->count);
- goto out;
- }
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
+ sh = get_free_stripe(conf, hash);
+ if (sh) {
+ r5c_check_stripe_cache_usage(conf);
+ init_stripe(sh, sector, previous);
+ atomic_inc(&sh->count);
+ break;
+ }
- if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
- set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE, &conf->cache_state);
+ }
-wait_for_stripe:
- if (noblock)
- goto out;
+ if (flags & R5_GAS_NOBLOCK)
+ break;
- set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- r5l_wake_reclaim(conf->log, 0);
- wait_event_lock_irq(conf->wait_for_stripe,
- is_inactive_blocked(conf, hash),
- *(conf->hash_locks + hash));
- clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
- goto retry;
+ set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ r5l_wake_reclaim(conf->log, 0);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ is_inactive_blocked(conf, hash),
+ *(conf->hash_locks + hash));
+ clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
+ }
-out:
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
-struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
- sector_t sector, bool previous, bool noblock, bool noquiesce)
-{
- return __raid5_get_active_stripe(conf, NULL, sector, previous, noblock,
- noquiesce);
-}
-
static bool is_full_stripe_write(struct stripe_head *sh)
{
BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
@@ -4047,7 +4041,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
* back cache (prexor with orig_page, and then xor with
* page) in the read path
*/
- if (s->injournal && s->failed) {
+ if (s->to_read && s->injournal && s->failed) {
if (test_bit(STRIPE_R5C_CACHING, &sh->state))
r5c_make_stripe_write_out(sh);
goto out;
@@ -4636,7 +4630,8 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
sector_t bn = raid5_compute_blocknr(sh, i, 1);
sector_t s = raid5_compute_sector(conf, bn, 0,
&dd_idx, NULL);
- sh2 = raid5_get_active_stripe(conf, s, 0, 1, 1);
+ sh2 = raid5_get_active_stripe(conf, NULL, s,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (sh2 == NULL)
/* so far only the early blocks of this stripe
* have been requested. When later blocks
@@ -5273,7 +5268,9 @@ static void handle_stripe(struct stripe_head *sh)
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
- = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1);
+ = raid5_get_active_stripe(conf, NULL, sh->sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOBLOCK |
+ R5_GAS_NOQUIESCE);
if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
/* sh cannot be written until sh_src has been read.
* so arrange for sh to be delayed a little
@@ -5542,7 +5539,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
&bad_sectors)) {
- bio_put(raid_bio);
rdev_dec_pending(rdev, mddev);
return 0;
}
@@ -5823,7 +5819,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
DEFINE_WAIT(w);
int d;
again:
- sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
prepare_to_wait(&conf->wait_for_overlap, &w,
TASK_UNINTERRUPTIBLE);
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
@@ -5978,7 +5974,7 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
enum stripe_result ret;
struct stripe_head *sh;
sector_t new_sector;
- int previous = 0;
+ int previous = 0, flags = 0;
int seq, dd_idx;
seq = read_seqcount_begin(&conf->gen_lock);
@@ -6012,8 +6008,11 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
pr_debug("raid456: %s, sector %llu logical %llu\n", __func__,
new_sector, logical_sector);
- sh = __raid5_get_active_stripe(conf, ctx, new_sector, previous,
- (bi->bi_opf & REQ_RAHEAD), 0);
+ if (previous)
+ flags |= R5_GAS_PREVIOUS;
+ if (bi->bi_opf & REQ_RAHEAD)
+ flags |= R5_GAS_NOBLOCK;
+ sh = raid5_get_active_stripe(conf, ctx, new_sector, flags);
if (unlikely(!sh)) {
/* cannot get stripe, just give-up */
bi->bi_status = BLK_STS_IOERR;
@@ -6362,7 +6361,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
int j;
int skipped_disk = 0;
- sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
+ R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPANDING, &sh->state);
atomic_inc(&conf->reshape_stripes);
/* If any of this stripe is beyond the end of the old
@@ -6411,7 +6411,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1;
while (first_sector <= last_sector) {
- sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1);
+ sh = raid5_get_active_stripe(conf, NULL, first_sector,
+ R5_GAS_PREVIOUS | R5_GAS_NOQUIESCE);
set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -6531,9 +6532,10 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr,
+ R5_GAS_NOBLOCK);
if (sh == NULL) {
- sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0);
+ sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0);
/* make sure we don't swamp the stripe cache if someone else
* is trying to get access
*/
@@ -6596,8 +6598,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
/* already done this stripe */
continue;
- sh = raid5_get_active_stripe(conf, sector, 0, 1, 1);
-
+ sh = raid5_get_active_stripe(conf, NULL, sector,
+ R5_GAS_NOBLOCK | R5_GAS_NOQUIESCE);
if (!sh) {
/* failed to get a stripe - must wait */
conf->retry_read_aligned = raid_bio;
@@ -6781,7 +6783,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a5082bed83c8..e873938a6125 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -803,16 +803,24 @@ raid5_get_dev_page(struct stripe_head *sh, int disk_idx)
}
#endif
-extern void md_raid5_kick_device(struct r5conf *conf);
-extern int raid5_set_cache_size(struct mddev *mddev, int size);
-extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
-extern void raid5_release_stripe(struct stripe_head *sh);
-extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
- int previous, int *dd_idx,
- struct stripe_head *sh);
-extern struct stripe_head *
-raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
- bool previous, bool noblock, bool noquiesce);
-extern int raid5_calc_degraded(struct r5conf *conf);
-extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
+void md_raid5_kick_device(struct r5conf *conf);
+int raid5_set_cache_size(struct mddev *mddev, int size);
+sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous);
+void raid5_release_stripe(struct stripe_head *sh);
+sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
+ int previous, int *dd_idx, struct stripe_head *sh);
+
+struct stripe_request_ctx;
+/* get stripe from previous generation (when reshaping) */
+#define R5_GAS_PREVIOUS (1 << 0)
+/* do not block waiting for a free stripe */
+#define R5_GAS_NOBLOCK (1 << 1)
+/* do not block waiting for quiesce to be released */
+#define R5_GAS_NOQUIESCE (1 << 2)
+struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
+ struct stripe_request_ctx *ctx, sector_t sector,
+ unsigned int flags);
+
+int raid5_calc_degraded(struct r5conf *conf);
+int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);
#endif
diff --git a/drivers/media/cec/i2c/ch7322.c b/drivers/media/cec/i2c/ch7322.c
index 0814338c43e4..34fad7123704 100644
--- a/drivers/media/cec/i2c/ch7322.c
+++ b/drivers/media/cec/i2c/ch7322.c
@@ -565,7 +565,7 @@ err_mutex:
return ret;
}
-static int ch7322_remove(struct i2c_client *client)
+static void ch7322_remove(struct i2c_client *client)
{
struct ch7322 *ch7322 = i2c_get_clientdata(client);
@@ -578,8 +578,6 @@ static int ch7322_remove(struct i2c_client *client)
mutex_destroy(&ch7322->mutex);
dev_info(&client->dev, "device unregistered\n");
-
- return 0;
}
static const struct of_device_id ch7322_of_match[] = {
diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index abf8e8bcbb34..4edbdd09535d 100644
--- a/drivers/media/cec/platform/sti/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
@@ -256,8 +256,8 @@ static void stih_rx_done(struct stih_cec *cec, u32 status)
if (!msg.len)
return;
- if (msg.len > 16)
- msg.len = 16;
+ if (msg.len > CEC_MAX_MSG_SIZE)
+ msg.len = CEC_MAX_MSG_SIZE;
for (i = 0; i < msg.len; i++)
msg.msg[i] = readl(cec->regs + CEC_RX_DATA_BASE + i);
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index a2ae71270054..852b7d92fbdd 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -22,7 +22,6 @@ config VIDEO_TVEEPROM
depends on I2C
source "drivers/media/common/b2c2/Kconfig"
-source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
source "drivers/media/common/v4l2-tpg/Kconfig"
source "drivers/media/common/videobuf2/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index ad0b1e95fb12..d78a0df15478 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/ videobuf2/
+obj-y += b2c2/ siano/ v4l2-tpg/ videobuf2/
# Please keep it alphabetically sorted by Kconfig name
# (e. g. LC_ALL=C sort Makefile)
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index b203c1e26353..ab9697f3b5f1 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -398,7 +398,7 @@ static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
}
/*
- * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
+ * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
* queue
*
@@ -417,7 +417,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
VB2_MAX_FRAME - q->num_buffers);
for (buffer = 0; buffer < num_buffers; ++buffer) {
- /* Allocate videobuf buffer structures */
+ /* Allocate vb2 buffer structures */
vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
if (!vb) {
dprintk(q, 1, "memory alloc for buffer struct failed\n");
@@ -599,7 +599,7 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
}
#endif
- /* Free videobuf buffers */
+ /* Free vb2 buffers */
for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
++buffer) {
kfree(q->bufs[buffer]);
@@ -1949,7 +1949,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
if (pb)
call_void_bufop(q, fill_user_buffer, vb, pb);
- /* Remove from videobuf queue */
+ /* Remove from vb2 queue */
list_del(&vb->queued_entry);
q->queued_count--;
@@ -1978,7 +1978,7 @@ EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
* __vb2_queue_cancel() - cancel and stop (pause) streaming
*
* Removes all queued buffers from driver's queue and all buffers queued by
- * userspace from videobuf's queue. Returns to state after reqbufs.
+ * userspace from vb2's queue. Returns to state after reqbufs.
*/
static void __vb2_queue_cancel(struct vb2_queue *q)
{
@@ -2016,7 +2016,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
q->uses_qbuf = 0;
/*
- * Remove all buffers from videobuf's list...
+ * Remove all buffers from vb2's list...
*/
INIT_LIST_HEAD(&q->queued_list);
/*
@@ -2139,7 +2139,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
/*
* Cancel will pause streaming and remove all buffers from the driver
- * and videobuf, effectively returning control over them to userspace.
+ * and vb2, effectively returning control over them to userspace.
*
* Note that we do this even if q->streaming == 0: if you prepare or
* queue buffers, and then call streamoff without ever having called
diff --git a/drivers/media/common/videobuf2/videobuf2-dvb.c b/drivers/media/common/videobuf2/videobuf2-dvb.c
index 9d571c9d31e9..8c15bcd07eef 100644
--- a/drivers/media/common/videobuf2/videobuf2-dvb.c
+++ b/drivers/media/common/videobuf2/videobuf2-dvb.c
@@ -3,8 +3,8 @@
*
* some helper function for simple DVB cards which simply DMA the
* complete transport stream and let the computer sort everything else
- * (i.e. we are using the software demux, ...). Also uses the
- * video-buf to manage DMA buffers.
+ * (i.e. we are using the software demux, ...). Also uses vb2
+ * to manage DMA buffers.
*
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
*/
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index f26cb8586bd4..1f5d235a8441 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -268,7 +268,7 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
/*
* Single-planar buffers do not use planes array,
* so fill in relevant v4l2_buffer struct fields instead.
- * In videobuf we use our internal V4l2_planes struct for
+ * In vb2 we use our internal V4l2_planes struct for
* single-planar buffers as well, for simplicity.
*
* If bytesused == 0 for the output buffer, then fall back
@@ -625,19 +625,6 @@ static const struct vb2_buf_ops v4l2_buf_ops = {
.copy_timestamp = __copy_timestamp,
};
-int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
- unsigned int start_idx)
-{
- unsigned int i;
-
- for (i = start_idx; i < q->num_buffers; i++)
- if (q->bufs[i]->copied_timestamp &&
- q->bufs[i]->timestamp == timestamp)
- return i;
- return -1;
-}
-EXPORT_SYMBOL_GPL(vb2_find_timestamp);
-
struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
{
unsigned int i;
@@ -652,7 +639,7 @@ EXPORT_SYMBOL_GPL(vb2_find_buffer);
/*
* vb2_querybuf() - query video buffer information
- * @q: videobuf queue
+ * @q: vb2 queue
* @b: buffer struct passed from userspace to vidioc_querybuf handler
* in driver
*
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index a1bd6d9c9223..909df82fed33 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -354,6 +354,12 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
+
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
vb2_core_querybuf(&ctx->vb_q, b->index, b);
dprintk(3, "[%s] index=%d\n", ctx->name, b->index);
return 0;
@@ -378,8 +384,13 @@ int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp)
int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
+ struct vb2_queue *q = &ctx->vb_q;
int ret;
+ if (b->index >= q->num_buffers) {
+ dprintk(1, "[%s] buffer index out of range\n", ctx->name);
+ return -EINVAL;
+ }
ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/a8293.c b/drivers/media/dvb-frontends/a8293.c
index 57f52c004a23..ba38783b2b4f 100644
--- a/drivers/media/dvb-frontends/a8293.c
+++ b/drivers/media/dvb-frontends/a8293.c
@@ -98,14 +98,13 @@ err:
return ret;
}
-static int a8293_remove(struct i2c_client *client)
+static void a8293_remove(struct i2c_client *client)
{
struct a8293_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
- return 0;
}
static const struct i2c_device_id a8293_id_table[] = {
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 7d7c341b2bd8..d85929582c3f 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1540,7 +1540,7 @@ err:
return ret;
}
-static int af9013_remove(struct i2c_client *client)
+static void af9013_remove(struct i2c_client *client)
{
struct af9013_state *state = i2c_get_clientdata(client);
@@ -1551,8 +1551,6 @@ static int af9013_remove(struct i2c_client *client)
regmap_exit(state->regmap);
kfree(state);
-
- return 0;
}
static const struct i2c_device_id af9013_id_table[] = {
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 785c49b3d307..808da7a9ffe7 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1163,7 +1163,7 @@ err:
return ret;
}
-static int af9033_remove(struct i2c_client *client)
+static void af9033_remove(struct i2c_client *client)
{
struct af9033_dev *dev = i2c_get_clientdata(client);
@@ -1171,8 +1171,6 @@ static int af9033_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id af9033_id_table[] = {
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 8cdca051e51b..e4f99bd468cb 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -758,13 +758,12 @@ static int au8522_probe(struct i2c_client *client,
return 0;
}
-static int au8522_remove(struct i2c_client *client)
+static void au8522_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
au8522_release_state(to_state(sd));
- return 0;
}
static const struct i2c_device_id au8522_id[] = {
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index 1c8207ab8988..fbc666fa04ec 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -664,14 +664,12 @@ err:
return ret;
}
-static int cxd2099_remove(struct i2c_client *client)
+static void cxd2099_remove(struct i2c_client *client)
{
struct cxd *ci = i2c_get_clientdata(client);
regmap_exit(ci->regmap);
kfree(ci);
-
- return 0;
}
static const struct i2c_device_id cxd2099_id[] = {
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index b1618339eec0..5d98222f9df0 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -705,7 +705,7 @@ err:
return ret;
}
-static int cxd2820r_remove(struct i2c_client *client)
+static void cxd2820r_remove(struct i2c_client *client)
{
struct cxd2820r_priv *priv = i2c_get_clientdata(client);
@@ -721,8 +721,6 @@ static int cxd2820r_remove(struct i2c_client *client)
regmap_exit(priv->regmap[0]);
kfree(priv);
-
- return 0;
}
static const struct i2c_device_id cxd2820r_id_table[] = {
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index d67f2dd997d0..fe19d127abb3 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3212,7 +3212,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_6: /* (36) if there is an input (diversity) */
if ((state->fe[1] != NULL) && (state->output_mode != OUTMODE_DIVERSITY)) {
- /* if there is a diversity fe in input and this fe is has not already failed : wait here until this this fe has succedeed or failed */
+ /* if there is a diversity fe in input and this fe is has not already failed : wait here until this fe has succeeded or failed */
if (dib8000_get_status(state->fe[1]) <= FE_STATUS_STD_SUCCESS) /* Something is locked on the input fe */
*tune_state = CT_DEMOD_STEP_8; /* go for mpeg */
else if (dib8000_get_status(state->fe[1]) >= FE_STATUS_TUNE_TIME_TOO_SHORT) { /* fe in input failed also, break the current one */
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 9430295a8175..47d83e0a470c 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -3516,7 +3516,7 @@ static int set_dvbt_standard(struct drxk_state *state,
status = write16(state, IQM_AF_CLP_LEN__A, 0);
if (status < 0)
goto error;
- /* window size for for sense pre-SAW detection */
+ /* window size for sense pre-SAW detection */
status = write16(state, IQM_AF_SNS_LEN__A, 0);
if (status < 0)
goto error;
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index d45b4ddc8f91..baf2a378e565 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -899,14 +899,13 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int dvb_pll_remove(struct i2c_client *client)
+static void dvb_pll_remove(struct i2c_client *client)
{
struct dvb_frontend *fe = i2c_get_clientdata(client);
struct dvb_pll_priv *priv = fe->tuner_priv;
ida_simple_remove(&pll_ida, priv->nr);
dvb_pll_release(fe);
- return 0;
}
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 136b76cb4807..424311afb2bf 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2226,7 +2226,7 @@ fail:
return ret;
}
-static int lgdt3306a_remove(struct i2c_client *client)
+static void lgdt3306a_remove(struct i2c_client *client)
{
struct lgdt3306a_state *state = i2c_get_clientdata(client);
@@ -2237,8 +2237,6 @@ static int lgdt3306a_remove(struct i2c_client *client)
kfree(state->cfg);
kfree(state);
-
- return 0;
}
static const struct i2c_device_id lgdt3306a_id_table[] = {
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index da3a8c5e18d8..ea9ae22fd201 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -974,15 +974,13 @@ static const struct dvb_frontend_ops lgdt3303_ops = {
.release = lgdt330x_release,
};
-static int lgdt330x_remove(struct i2c_client *client)
+static void lgdt330x_remove(struct i2c_client *client)
{
struct lgdt330x_state *state = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(state);
-
- return 0;
}
static const struct i2c_device_id lgdt330x_id_table[] = {
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index bce0f42f3d19..4e844b2ef597 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1914,7 +1914,7 @@ err:
return ret;
}
-static int m88ds3103_remove(struct i2c_client *client)
+static void m88ds3103_remove(struct i2c_client *client)
{
struct m88ds3103_dev *dev = i2c_get_clientdata(client);
@@ -1926,7 +1926,6 @@ static int m88ds3103_remove(struct i2c_client *client)
i2c_mux_del_adapters(dev->muxc);
kfree(dev);
- return 0;
}
static const struct i2c_device_id m88ds3103_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index fff212c0bf3b..452571b380b7 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -762,15 +762,13 @@ err_i2c_t:
return ret;
}
-static int mn88443x_remove(struct i2c_client *client)
+static void mn88443x_remove(struct i2c_client *client)
{
struct mn88443x_priv *chip = i2c_get_clientdata(client);
mn88443x_cmn_power_off(chip);
i2c_unregister_device(chip->client_t);
-
- return 0;
}
static const struct mn88443x_spec mn88443x_spec_pri = {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 73922fc8f39c..2b01cc678f7e 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -691,7 +691,7 @@ err:
return ret;
}
-static int mn88472_remove(struct i2c_client *client)
+static void mn88472_remove(struct i2c_client *client)
{
struct mn88472_dev *dev = i2c_get_clientdata(client);
@@ -706,8 +706,6 @@ static int mn88472_remove(struct i2c_client *client)
regmap_exit(dev->regmap[0]);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mn88472_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 4838969ef735..f0ecf5910c02 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -726,7 +726,7 @@ err:
return ret;
}
-static int mn88473_remove(struct i2c_client *client)
+static void mn88473_remove(struct i2c_client *client)
{
struct mn88473_dev *dev = i2c_get_clientdata(client);
@@ -741,8 +741,6 @@ static int mn88473_remove(struct i2c_client *client)
regmap_exit(dev->regmap[0]);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mn88473_id_table[] = {
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index dd7954e8f553..129630cbffff 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -1337,15 +1337,13 @@ err:
return -ENODEV;
}
-static int mxl692_remove(struct i2c_client *client)
+static void mxl692_remove(struct i2c_client *client)
{
struct mxl692_dev *dev = i2c_get_clientdata(client);
dev->fe.demodulator_priv = NULL;
i2c_set_clientdata(client, NULL);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id mxl692_id_table[] = {
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index e6b8367c8cce..e0fbf41316ae 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -865,7 +865,7 @@ err:
return ret;
}
-static int rtl2830_remove(struct i2c_client *client)
+static void rtl2830_remove(struct i2c_client *client)
{
struct rtl2830_dev *dev = i2c_get_clientdata(client);
@@ -874,8 +874,6 @@ static int rtl2830_remove(struct i2c_client *client)
i2c_mux_del_adapters(dev->muxc);
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id rtl2830_id_table[] = {
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index dcbeb9f5e12a..4fa884eda5d5 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -1110,7 +1110,7 @@ err:
return ret;
}
-static int rtl2832_remove(struct i2c_client *client)
+static void rtl2832_remove(struct i2c_client *client)
{
struct rtl2832_dev *dev = i2c_get_clientdata(client);
@@ -1123,8 +1123,6 @@ static int rtl2832_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id rtl2832_id_table[] = {
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 6a4f2997d6f5..05f71d169726 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -245,7 +245,7 @@ static void rtl2832_sdr_urb_complete(struct urb *urb)
if (unlikely(fbuf == NULL)) {
dev->vb_full++;
dev_notice_ratelimited(&pdev->dev,
- "videobuf is full, %d packets dropped\n",
+ "video buffer is full, %d packets dropped\n",
dev->vb_full);
goto skip;
}
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index ebee230afb7b..86b0d59169dd 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -1274,14 +1274,13 @@ error:
return ret;
}
-static int si2165_remove(struct i2c_client *client)
+static void si2165_remove(struct i2c_client *client)
{
struct si2165_state *state = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(state);
- return 0;
}
static const struct i2c_device_id si2165_id_table[] = {
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 196e028a6617..8157df4570d1 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -774,7 +774,7 @@ err:
return ret;
}
-static int si2168_remove(struct i2c_client *client)
+static void si2168_remove(struct i2c_client *client)
{
struct si2168_dev *dev = i2c_get_clientdata(client);
@@ -786,8 +786,6 @@ static int si2168_remove(struct i2c_client *client)
dev->fe.demodulator_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id si2168_id_table[] = {
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 992f22167fbe..27e7037e130e 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -398,14 +398,13 @@ err:
return ret;
}
-static int sp2_remove(struct i2c_client *client)
+static void sp2_remove(struct i2c_client *client)
{
struct sp2 *s = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
sp2_exit(client);
kfree(s);
- return 0;
}
static const struct i2c_device_id sp2_id[] = {
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 90d24131d335..0a600c1d7d1b 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -5032,12 +5032,11 @@ error:
return ret;
}
-static int stv090x_remove(struct i2c_client *client)
+static void stv090x_remove(struct i2c_client *client)
{
struct stv090x_state *state = i2c_get_clientdata(client);
stv090x_release(&state->frontend);
- return 0;
}
struct dvb_frontend *stv090x_attach(struct stv090x_config *config,
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index 5012d0231652..fbc4dbd62151 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -436,12 +436,11 @@ static int stv6110x_probe(struct i2c_client *client,
return 0;
}
-static int stv6110x_remove(struct i2c_client *client)
+static void stv6110x_remove(struct i2c_client *client)
{
struct stv6110x_state *stv6110x = i2c_get_clientdata(client);
stv6110x_release(stv6110x->frontend);
- return 0;
}
const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index e83836b29715..c22d2a2b2a45 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -819,14 +819,13 @@ free_state:
return ret;
}
-static int tc90522_remove(struct i2c_client *client)
+static void tc90522_remove(struct i2c_client *client)
{
struct tc90522_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
i2c_del_adapter(&state->tuner_i2c);
kfree(state);
- return 0;
}
diff --git a/drivers/media/dvb-frontends/tda1002x.h b/drivers/media/dvb-frontends/tda1002x.h
index 60a0952c1bca..00491bea9975 100644
--- a/drivers/media/dvb-frontends/tda1002x.h
+++ b/drivers/media/dvb-frontends/tda1002x.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
TDA10021/TDA10023 - Single Chip Cable Channel Receiver driver module
- used on the the Siemens DVB-C cards
+ used on the Siemens DVB-C cards
Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index d1d206ebdedd..0b3f6999515e 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1118,7 +1118,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
state->pll_pfactor = 0;
}
- /* Establish any defaults the the user didn't pass */
+ /* Establish any defaults the user didn't pass */
tda10048_establish_defaults(&state->frontend);
/* Set the xtal and freq defaults */
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 685c0ac71819..d1098ef20a8b 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -1221,14 +1221,13 @@ err:
return ret;
}
-static int tda10071_remove(struct i2c_client *client)
+static void tda10071_remove(struct i2c_client *client)
{
struct tda10071_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
- return 0;
}
static const struct i2c_device_id tda10071_id_table[] = {
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index 3e383912bcfd..02338256b974 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -696,7 +696,7 @@ err:
return ret;
}
-static int ts2020_remove(struct i2c_client *client)
+static void ts2020_remove(struct i2c_client *client)
{
struct ts2020_priv *dev = i2c_get_clientdata(client);
@@ -708,7 +708,6 @@ static int ts2020_remove(struct i2c_client *client)
regmap_exit(dev->regmap);
kfree(dev);
- return 0;
}
static const struct i2c_device_id ts2020_id_table[] = {
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 2958a4694461..516de278cc49 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -342,7 +342,7 @@ cleanup:
return ret;
}
-static int ad5820_remove(struct i2c_client *client)
+static void ad5820_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ad5820_device *coil = to_ad5820_device(subdev);
@@ -351,7 +351,6 @@ static int ad5820_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&coil->ctrls);
media_entity_cleanup(&coil->subdev.entity);
mutex_destroy(&coil->power_lock);
- return 0;
}
static const struct i2c_device_id ad5820_id_table[] = {
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 8679a44e6413..4a255a492918 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1174,7 +1174,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int ad9389b_remove(struct i2c_client *client)
+static void ad9389b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ad9389b_state *state = get_ad9389b_state(sd);
@@ -1192,7 +1192,6 @@ static int ad9389b_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 522a0b10e415..1f353157df07 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -510,7 +510,7 @@ free_and_quit:
return ret;
}
-static int adp1653_remove(struct i2c_client *client)
+static void adp1653_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct adp1653_flash *flash = to_adp1653_flash(subdev);
@@ -518,8 +518,6 @@ static int adp1653_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&flash->subdev);
v4l2_ctrl_handler_free(&flash->ctrls);
media_entity_cleanup(&flash->subdev.entity);
-
- return 0;
}
static const struct i2c_device_id adp1653_id_table[] = {
diff --git a/drivers/media/i2c/adv7170.c b/drivers/media/i2c/adv7170.c
index 714e31f993e1..61a2f87d3c62 100644
--- a/drivers/media/i2c/adv7170.c
+++ b/drivers/media/i2c/adv7170.c
@@ -368,12 +368,11 @@ static int adv7170_probe(struct i2c_client *client,
return 0;
}
-static int adv7170_remove(struct i2c_client *client)
+static void adv7170_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c
index 1813f67f0fe1..b58689728243 100644
--- a/drivers/media/i2c/adv7175.c
+++ b/drivers/media/i2c/adv7175.c
@@ -423,12 +423,11 @@ static int adv7175_probe(struct i2c_client *client,
return 0;
}
-static int adv7175_remove(struct i2c_client *client)
+static void adv7175_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 5fde5243722d..216fe396973f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1514,7 +1514,7 @@ err_unregister_csi_client:
return ret;
}
-static int adv7180_remove(struct i2c_client *client)
+static void adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7180_state *state = to_state(sd);
@@ -1534,8 +1534,6 @@ static int adv7180_remove(struct i2c_client *client)
adv7180_set_power_pin(state, false);
mutex_destroy(&state->mutex);
-
- return 0;
}
static const struct i2c_device_id adv7180_id[] = {
diff --git a/drivers/media/i2c/adv7183.c b/drivers/media/i2c/adv7183.c
index ba746a19fd39..313c706e8335 100644
--- a/drivers/media/i2c/adv7183.c
+++ b/drivers/media/i2c/adv7183.c
@@ -613,13 +613,12 @@ static int adv7183_probe(struct i2c_client *client,
return 0;
}
-static int adv7183_remove(struct i2c_client *client)
+static void adv7183_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id adv7183_id[] = {
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 63e94dfcb5d3..7e84869d2434 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -492,15 +492,13 @@ done:
return err;
}
-static int adv7343_remove(struct i2c_client *client)
+static void adv7343_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7343_state *state = to_state(sd);
v4l2_async_unregister_subdev(&state->sd);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id adv7343_id[] = {
diff --git a/drivers/media/i2c/adv7393.c b/drivers/media/i2c/adv7393.c
index b6234c8231c9..fb5fefa83b18 100644
--- a/drivers/media/i2c/adv7393.c
+++ b/drivers/media/i2c/adv7393.c
@@ -437,15 +437,13 @@ static int adv7393_probe(struct i2c_client *client,
return err;
}
-static int adv7393_remove(struct i2c_client *client)
+static void adv7393_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7393_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id adv7393_id[] = {
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
index 4e54148147b9..4498d78a2357 100644
--- a/drivers/media/i2c/adv748x/adv748x-core.c
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -815,7 +815,7 @@ err_free_mutex:
return ret;
}
-static int adv748x_remove(struct i2c_client *client)
+static void adv748x_remove(struct i2c_client *client)
{
struct adv748x_state *state = i2c_get_clientdata(client);
@@ -828,8 +828,6 @@ static int adv748x_remove(struct i2c_client *client)
adv748x_unregister_clients(state);
adv748x_dt_cleanup(state);
mutex_destroy(&state->mutex);
-
- return 0;
}
static const struct of_device_id adv748x_of_table[] = {
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 202e0cd83f90..0d5ce69f12e7 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -943,8 +943,8 @@ static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
msg.len);
- if (msg.len > 16)
- msg.len = 16;
+ if (msg.len > CEC_MAX_MSG_SIZE)
+ msg.len = CEC_MAX_MSG_SIZE;
if (msg.len) {
u8 i;
@@ -1923,7 +1923,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv7511_remove(struct i2c_client *client)
+static void adv7511_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7511_state *state = get_adv7511_state(sd);
@@ -1943,7 +1943,6 @@ static int adv7511_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 497419a5cfdd..bda0c547ce44 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2047,8 +2047,8 @@ static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
struct cec_msg msg;
msg.len = cec_read(sd, 0x25) & 0x1f;
- if (msg.len > 16)
- msg.len = 16;
+ if (msg.len > CEC_MAX_MSG_SIZE)
+ msg.len = CEC_MAX_MSG_SIZE;
if (msg.len) {
u8 i;
@@ -3660,7 +3660,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv76xx_remove(struct i2c_client *client)
+static void adv76xx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv76xx_state *state = to_state(sd);
@@ -3677,7 +3677,6 @@ static int adv76xx_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
adv76xx_unregister_clients(to_state(sd));
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22caa070273b..7731cc1887e6 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2215,8 +2215,8 @@ static void adv7842_cec_isr(struct v4l2_subdev *sd, bool *handled)
struct cec_msg msg;
msg.len = cec_read(sd, 0x25) & 0x1f;
- if (msg.len > 16)
- msg.len = 16;
+ if (msg.len > CEC_MAX_MSG_SIZE)
+ msg.len = CEC_MAX_MSG_SIZE;
if (msg.len) {
u8 i;
@@ -3593,7 +3593,7 @@ err_hdl:
/* ----------------------------------------------------------------------- */
-static int adv7842_remove(struct i2c_client *client)
+static void adv7842_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7842_state *state = to_state(sd);
@@ -3604,7 +3604,6 @@ static int adv7842_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
adv7842_unregister_clients(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
index 40b1a4aa846c..1af9f698eecf 100644
--- a/drivers/media/i2c/ak7375.c
+++ b/drivers/media/i2c/ak7375.c
@@ -169,7 +169,7 @@ err_cleanup:
return ret;
}
-static int ak7375_remove(struct i2c_client *client)
+static void ak7375_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
@@ -177,8 +177,6 @@ static int ak7375_remove(struct i2c_client *client)
ak7375_subdev_cleanup(ak7375_dev);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/ak881x.c b/drivers/media/i2c/ak881x.c
index dc569d5a4d9d..0370ad6b6811 100644
--- a/drivers/media/i2c/ak881x.c
+++ b/drivers/media/i2c/ak881x.c
@@ -297,13 +297,11 @@ static int ak881x_probe(struct i2c_client *client,
return 0;
}
-static int ak881x_remove(struct i2c_client *client)
+static void ak881x_remove(struct i2c_client *client)
{
struct ak881x *ak881x = to_ak881x(client);
v4l2_device_unregister_subdev(&ak881x->subdev);
-
- return 0;
}
static const struct i2c_device_id ak881x_id[] = {
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index c7bdfc69b9be..c6ab531532be 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1018,7 +1018,7 @@ entity_cleanup:
return ret;
}
-static int ar0521_remove(struct i2c_client *client)
+static void ar0521_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ar0521_dev *sensor = to_ar0521_dev(sd);
@@ -1031,7 +1031,6 @@ static int ar0521_remove(struct i2c_client *client)
ar0521_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&sensor->lock);
- return 0;
}
static const struct dev_pm_ops ar0521_pm_ops = {
diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c
index 73bc50c919d7..4d9bb6eb7d65 100644
--- a/drivers/media/i2c/bt819.c
+++ b/drivers/media/i2c/bt819.c
@@ -446,14 +446,13 @@ static int bt819_probe(struct i2c_client *client,
return 0;
}
-static int bt819_remove(struct i2c_client *client)
+static void bt819_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct bt819 *decoder = to_bt819(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/bt856.c b/drivers/media/i2c/bt856.c
index c134fda270a1..70443ef1ac46 100644
--- a/drivers/media/i2c/bt856.c
+++ b/drivers/media/i2c/bt856.c
@@ -223,12 +223,11 @@ static int bt856_probe(struct i2c_client *client,
return 0;
}
-static int bt856_remove(struct i2c_client *client)
+static void bt856_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id bt856_id[] = {
diff --git a/drivers/media/i2c/bt866.c b/drivers/media/i2c/bt866.c
index 1a8df9f18ffb..c2508cbafd02 100644
--- a/drivers/media/i2c/bt866.c
+++ b/drivers/media/i2c/bt866.c
@@ -190,12 +190,11 @@ static int bt866_probe(struct i2c_client *client,
return 0;
}
-static int bt866_remove(struct i2c_client *client)
+static void bt866_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id bt866_id[] = {
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 7609add2aff4..4a14d7e5d9f2 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3665,7 +3665,7 @@ out_power_off:
return rval;
}
-static int ccs_remove(struct i2c_client *client)
+static void ccs_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ccs_sensor *sensor = to_ccs_sensor(subdev);
@@ -3687,8 +3687,6 @@ static int ccs_remove(struct i2c_client *client)
kfree(sensor->ccs_limits);
kvfree(sensor->sdata.backing);
kvfree(sensor->mdata.backing);
-
- return 0;
}
static const struct ccs_device smia_device = {
diff --git a/drivers/media/i2c/cs3308.c b/drivers/media/i2c/cs3308.c
index ebe55e261bff..d901a59883a9 100644
--- a/drivers/media/i2c/cs3308.c
+++ b/drivers/media/i2c/cs3308.c
@@ -99,13 +99,12 @@ static int cs3308_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int cs3308_remove(struct i2c_client *client)
+static void cs3308_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/cs5345.c b/drivers/media/i2c/cs5345.c
index f6dd5edf77dd..591b1e7b24ee 100644
--- a/drivers/media/i2c/cs5345.c
+++ b/drivers/media/i2c/cs5345.c
@@ -178,14 +178,13 @@ static int cs5345_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int cs5345_remove(struct i2c_client *client)
+static void cs5345_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cs5345_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/cs53l32a.c b/drivers/media/i2c/cs53l32a.c
index 9a411106cfb3..9461589aea30 100644
--- a/drivers/media/i2c/cs53l32a.c
+++ b/drivers/media/i2c/cs53l32a.c
@@ -190,14 +190,13 @@ static int cs53l32a_probe(struct i2c_client *client,
return 0;
}
-static int cs53l32a_remove(struct i2c_client *client)
+static void cs53l32a_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cs53l32a_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id cs53l32a_id[] = {
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index dc31944c7d5b..f1a978af82ef 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -6026,7 +6026,7 @@ static int cx25840_probe(struct i2c_client *client,
return 0;
}
-static int cx25840_remove(struct i2c_client *client)
+static void cx25840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct cx25840_state *state = to_state(sd);
@@ -6034,7 +6034,6 @@ static int cx25840_remove(struct i2c_client *client)
cx25840_ir_remove(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id cx25840_id[] = {
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 9d7d1d149f1a..8cef9656c612 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -196,7 +196,7 @@ static u32 clock_divider_to_resolution(u16 divider)
{
/*
* Resolution is the duration of 1 tick of the readable portion of
- * of the pulse width counter as read from the FIFO. The two lsb's are
+ * the pulse width counter as read from the FIFO. The two lsb's are
* not readable, hence the << 2. This function returns ns.
*/
return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000,
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 206d74338b9c..af59687383aa 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -190,7 +190,7 @@ err_cleanup:
return rval;
}
-static int dw9714_remove(struct i2c_client *client)
+static void dw9714_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
@@ -206,8 +206,6 @@ static int dw9714_remove(struct i2c_client *client)
}
pm_runtime_set_suspended(&client->dev);
dw9714_subdev_cleanup(dw9714_dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index c086580efac7..0f47ef015a1d 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -499,7 +499,7 @@ err_free_handler:
return ret;
}
-static int dw9768_remove(struct i2c_client *client)
+static void dw9768_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9768 *dw9768 = sd_to_dw9768(sd);
@@ -511,8 +511,6 @@ static int dw9768_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
dw9768_runtime_suspend(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct of_device_id dw9768_of_table[] = {
diff --git a/drivers/media/i2c/dw9807-vcm.c b/drivers/media/i2c/dw9807-vcm.c
index 01c372925a80..3599720db7e9 100644
--- a/drivers/media/i2c/dw9807-vcm.c
+++ b/drivers/media/i2c/dw9807-vcm.c
@@ -216,7 +216,7 @@ err_cleanup:
return rval;
}
-static int dw9807_remove(struct i2c_client *client)
+static void dw9807_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct dw9807_device *dw9807_dev = sd_to_dw9807_vcm(sd);
@@ -224,8 +224,6 @@ static int dw9807_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
dw9807_subdev_cleanup(dw9807_dev);
-
- return 0;
}
/*
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 873d614339bb..ff9bb9fc97dd 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1460,7 +1460,7 @@ err_mutex:
return ret;
}
-static int __exit et8ek8_remove(struct i2c_client *client)
+static void __exit et8ek8_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
@@ -1477,8 +1477,6 @@ static int __exit et8ek8_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&sensor->subdev);
media_entity_cleanup(&sensor->subdev.entity);
mutex_destroy(&sensor->power_lock);
-
- return 0;
}
static const struct of_device_id et8ek8_of_table[] = {
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 055d1aa8410e..e422ac7609b5 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1101,7 +1101,7 @@ check_hwcfg_error:
return ret;
}
-static int hi556_remove(struct i2c_client *client)
+static void hi556_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi556 *hi556 = to_hi556(sd);
@@ -1111,8 +1111,6 @@ static int hi556_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&hi556->mutex);
-
- return 0;
}
static int hi556_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index ad35c3ff3611..c5b69823f257 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -2143,7 +2143,7 @@ err_mutex:
return ret;
}
-static int hi846_remove(struct i2c_client *client)
+static void hi846_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi846 *hi846 = to_hi846(sd);
@@ -2158,8 +2158,6 @@ static int hi846_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&hi846->mutex);
-
- return 0;
}
static const struct dev_pm_ops hi846_pm_ops = {
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 7e85349e1852..5a82b15a9513 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -2903,7 +2903,7 @@ check_hwcfg_error:
return ret;
}
-static int hi847_remove(struct i2c_client *client)
+static void hi847_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct hi847 *hi847 = to_hi847(sd);
@@ -2913,8 +2913,6 @@ static int hi847_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&hi847->mutex);
-
- return 0;
}
static int hi847_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index b9516b2f1c15..a0e17bb9d4ca 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -1061,7 +1061,7 @@ error_probe:
return ret;
}
-static int imx208_remove(struct i2c_client *client)
+static void imx208_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx208 *imx208 = to_imx208(sd);
@@ -1075,8 +1075,6 @@ static int imx208_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx208->imx208_mx);
-
- return 0;
}
static const struct dev_pm_ops imx208_pm_ops = {
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 83c1737abeec..710c9fb515fd 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -1080,7 +1080,7 @@ free_ctrl:
return ret;
}
-static int imx214_remove(struct i2c_client *client)
+static void imx214_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx214 *imx214 = to_imx214(sd);
@@ -1093,8 +1093,6 @@ static int imx214_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx214->mutex);
-
- return 0;
}
static const struct of_device_id imx214_of_match[] = {
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index e10af3f74b38..77bd79a5954e 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -1562,7 +1562,7 @@ error_power_off:
return ret;
}
-static int imx219_remove(struct i2c_client *client)
+static void imx219_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx219 *imx219 = to_imx219(sd);
@@ -1575,8 +1575,6 @@ static int imx219_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
imx219_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct of_device_id imx219_dt_ids[] = {
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index c249507aa2db..eab5fc1ee2f7 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1338,7 +1338,7 @@ error_identify:
return ret;
}
-static int imx258_remove(struct i2c_client *client)
+static void imx258_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx258 *imx258 = to_imx258(sd);
@@ -1351,8 +1351,6 @@ static int imx258_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
imx258_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops imx258_pm_ops = {
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 7de1f2948e53..a00761b1e18c 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -2142,7 +2142,7 @@ err_regmap:
return ret;
}
-static int imx274_remove(struct i2c_client *client)
+static void imx274_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct stimx274 *imx274 = to_imx274(sd);
@@ -2157,7 +2157,6 @@ static int imx274_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
mutex_destroy(&imx274->lock);
- return 0;
}
static const struct dev_pm_ops imx274_pm_ops = {
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 99f2a50d39a4..1ce64dcdf7f0 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1119,7 +1119,7 @@ free_err:
return ret;
}
-static int imx290_remove(struct i2c_client *client)
+static void imx290_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx290 *imx290 = to_imx290(sd);
@@ -1134,8 +1134,6 @@ static int imx290_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(imx290->dev))
imx290_power_off(imx290->dev);
pm_runtime_set_suspended(imx290->dev);
-
- return 0;
}
static const struct of_device_id imx290_of_match[] = {
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index a2b5a34de76b..245a18fb40ad 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2523,7 +2523,7 @@ error_probe:
return ret;
}
-static int imx319_remove(struct i2c_client *client)
+static void imx319_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx319 *imx319 = to_imx319(sd);
@@ -2536,8 +2536,6 @@ static int imx319_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx319->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx319_pm_ops = {
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 062125501788..7b0a9086447d 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -1089,7 +1089,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx334_remove(struct i2c_client *client)
+static void imx334_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx334 *imx334 = to_imx334(sd);
@@ -1102,8 +1102,6 @@ static int imx334_remove(struct i2c_client *client)
pm_runtime_suspended(&client->dev);
mutex_destroy(&imx334->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx334_pm_ops = {
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 410d6b86feb5..078ede2b7a00 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -1083,7 +1083,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx335_remove(struct i2c_client *client)
+static void imx335_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx335 *imx335 = to_imx335(sd);
@@ -1098,8 +1098,6 @@ static int imx335_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx335->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx335_pm_ops = {
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 3922b9305978..b46178681c05 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1810,7 +1810,7 @@ error_probe:
return ret;
}
-static int imx355_remove(struct i2c_client *client)
+static void imx355_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx355 *imx355 = to_imx355(sd);
@@ -1823,8 +1823,6 @@ static int imx355_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx355->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx355_pm_ops = {
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index a1394d6c1432..7f6d29e0e7c4 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -1257,7 +1257,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx412_remove(struct i2c_client *client)
+static void imx412_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx412 *imx412 = to_imx412(sd);
@@ -1272,8 +1272,6 @@ static int imx412_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx412->mutex);
-
- return 0;
}
static const struct dev_pm_ops imx412_pm_ops = {
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 56674173524f..ee6bbbb977f7 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -915,7 +915,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
return err;
}
-static int ir_remove(struct i2c_client *client)
+static void ir_remove(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
@@ -924,8 +924,6 @@ static int ir_remove(struct i2c_client *client)
i2c_unregister_device(ir->tx_c);
rc_unregister_device(ir->rc);
-
- return 0;
}
static const struct i2c_device_id ir_kbd_id[] = {
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index dc3068549dfa..246d8d182a8e 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1544,7 +1544,7 @@ err_entity_cleanup:
return ret;
}
-static int isl7998x_remove(struct i2c_client *client)
+static void isl7998x_remove(struct i2c_client *client)
{
struct isl7998x *isl7998x = i2c_to_isl7998x(client);
@@ -1552,8 +1552,6 @@ static int isl7998x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&isl7998x->subdev);
isl7998x_remove_controls(isl7998x);
media_entity_cleanup(&isl7998x->subdev.entity);
-
- return 0;
}
static const struct of_device_id isl7998x_of_match[] = {
diff --git a/drivers/media/i2c/ks0127.c b/drivers/media/i2c/ks0127.c
index c077f53b9c30..215d9a43b0b9 100644
--- a/drivers/media/i2c/ks0127.c
+++ b/drivers/media/i2c/ks0127.c
@@ -675,14 +675,13 @@ static int ks0127_probe(struct i2c_client *client, const struct i2c_device_id *i
return 0;
}
-static int ks0127_remove(struct i2c_client *client)
+static void ks0127_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
ks0127_write(sd, KS_OFMTA, 0x20); /* tristate */
ks0127_write(sd, KS_CMDA, 0x2c | 0x80); /* power down */
- return 0;
}
static const struct i2c_device_id ks0127_id[] = {
diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c
index 9e34ccce4fc3..edad3138cb07 100644
--- a/drivers/media/i2c/lm3560.c
+++ b/drivers/media/i2c/lm3560.c
@@ -443,7 +443,7 @@ static int lm3560_probe(struct i2c_client *client,
return 0;
}
-static int lm3560_remove(struct i2c_client *client)
+static void lm3560_remove(struct i2c_client *client)
{
struct lm3560_flash *flash = i2c_get_clientdata(client);
unsigned int i;
@@ -453,8 +453,6 @@ static int lm3560_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&flash->ctrls_led[i]);
media_entity_cleanup(&flash->subdev_led[i].entity);
}
-
- return 0;
}
static const struct i2c_device_id lm3560_id_table[] = {
diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c
index c76ccf67a909..0aaa963917d8 100644
--- a/drivers/media/i2c/lm3646.c
+++ b/drivers/media/i2c/lm3646.c
@@ -377,15 +377,13 @@ static int lm3646_probe(struct i2c_client *client,
return 0;
}
-static int lm3646_remove(struct i2c_client *client)
+static void lm3646_remove(struct i2c_client *client)
{
struct lm3646_flash *flash = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(&flash->subdev_led);
v4l2_ctrl_handler_free(&flash->ctrls_led);
media_entity_cleanup(&flash->subdev_led.entity);
-
- return 0;
}
static const struct i2c_device_id lm3646_id_table[] = {
diff --git a/drivers/media/i2c/m52790.c b/drivers/media/i2c/m52790.c
index 0a1efc1417bc..2ab91b993c33 100644
--- a/drivers/media/i2c/m52790.c
+++ b/drivers/media/i2c/m52790.c
@@ -154,12 +154,11 @@ static int m52790_probe(struct i2c_client *client,
return 0;
}
-static int m52790_remove(struct i2c_client *client)
+static void m52790_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index c19590389bfe..2201d2a26353 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -1020,15 +1020,13 @@ error:
return ret;
}
-static int m5mols_remove(struct i2c_client *client)
+static void m5mols_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id m5mols_id[] = {
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index 0eea200124d2..1019020f3a37 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1403,15 +1403,13 @@ err_reg:
return ret;
}
-static int max2175_remove(struct i2c_client *client)
+static void max2175_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct max2175 *ctx = max2175_from_sd(sd);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_async_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id max2175_id[] = {
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 3684faa72253..9c083cf14231 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1378,7 +1378,7 @@ err_powerdown:
return ret;
}
-static int max9286_remove(struct i2c_client *client)
+static void max9286_remove(struct i2c_client *client)
{
struct max9286_priv *priv = sd_to_max9286(i2c_get_clientdata(client));
@@ -1391,8 +1391,6 @@ static int max9286_remove(struct i2c_client *client)
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
max9286_cleanup_dt(priv);
-
- return 0;
}
static const struct of_device_id max9286_dt_ids[] = {
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index 48cc0b0922f4..49ec59b0ca43 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -415,15 +415,13 @@ cleanup:
return ret;
}
-static int ml86v7667_remove(struct i2c_client *client)
+static void ml86v7667_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ml86v7667_priv *priv = to_ml86v7667(sd);
v4l2_ctrl_handler_free(&priv->hdl);
v4l2_device_unregister_subdev(&priv->sd);
-
- return 0;
}
static const struct i2c_device_id ml86v7667_id[] = {
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 39530d43590e..4ce7a15a9884 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -859,7 +859,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int msp_remove(struct i2c_client *client)
+static void msp_remove(struct i2c_client *client)
{
struct msp_state *state = to_state(i2c_get_clientdata(client));
@@ -872,7 +872,6 @@ static int msp_remove(struct i2c_client *client)
msp_reset(client);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index ad13b0c890c0..ebf9cf1e1bce 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -833,7 +833,7 @@ error_hdl_free:
return ret;
}
-static int mt9m001_remove(struct i2c_client *client)
+static void mt9m001_remove(struct i2c_client *client)
{
struct mt9m001 *mt9m001 = to_mt9m001(client);
@@ -853,8 +853,6 @@ static int mt9m001_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9m001->hdl);
mutex_destroy(&mt9m001->mutex);
-
- return 0;
}
static const struct i2c_device_id mt9m001_id[] = {
diff --git a/drivers/media/i2c/mt9m032.c b/drivers/media/i2c/mt9m032.c
index ba0c0ea91c95..76b8c9c08c82 100644
--- a/drivers/media/i2c/mt9m032.c
+++ b/drivers/media/i2c/mt9m032.c
@@ -858,7 +858,7 @@ error_sensor:
return ret;
}
-static int mt9m032_remove(struct i2c_client *client)
+static void mt9m032_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9m032 *sensor = to_mt9m032(subdev);
@@ -867,7 +867,6 @@ static int mt9m032_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&sensor->ctrls);
media_entity_cleanup(&subdev->entity);
mutex_destroy(&sensor->lock);
- return 0;
}
static const struct i2c_device_id mt9m032_id_table[] = {
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index afc86efa9e3e..f5fe272d1205 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1359,15 +1359,13 @@ out_hdlfree:
return ret;
}
-static int mt9m111_remove(struct i2c_client *client)
+static void mt9m111_remove(struct i2c_client *client)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
v4l2_async_unregister_subdev(&mt9m111->subdev);
media_entity_cleanup(&mt9m111->subdev.entity);
v4l2_ctrl_handler_free(&mt9m111->hdl);
-
- return 0;
}
static const struct of_device_id mt9m111_of_match[] = {
{ .compatible = "micron,mt9m111", },
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 1fd4dc6e4726..45f7b5e52bc3 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -1209,7 +1209,7 @@ done:
return ret;
}
-static int mt9p031_remove(struct i2c_client *client)
+static void mt9p031_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
@@ -1218,8 +1218,6 @@ static int mt9p031_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
mutex_destroy(&mt9p031->power_lock);
-
- return 0;
}
static const struct i2c_device_id mt9p031_id[] = {
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index b651ee4a26e8..d5abe4a7ef07 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -961,7 +961,7 @@ done:
return ret;
}
-static int mt9t001_remove(struct i2c_client *client)
+static void mt9t001_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9t001 *mt9t001 = to_mt9t001(subdev);
@@ -969,7 +969,6 @@ static int mt9t001_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&mt9t001->ctrls);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
- return 0;
}
static const struct i2c_device_id mt9t001_id[] = {
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 8d2e3caa9b28..ad564095d0cf 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1102,14 +1102,12 @@ static int mt9t112_probe(struct i2c_client *client,
return v4l2_async_register_subdev(&priv->subdev);
}
-static int mt9t112_remove(struct i2c_client *client)
+static void mt9t112_remove(struct i2c_client *client)
{
struct mt9t112_priv *priv = to_mt9t112(client);
clk_disable_unprepare(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
-
- return 0;
}
static const struct i2c_device_id mt9t112_id[] = {
diff --git a/drivers/media/i2c/mt9v011.c b/drivers/media/i2c/mt9v011.c
index 7699e64e1127..9952ce06ebb2 100644
--- a/drivers/media/i2c/mt9v011.c
+++ b/drivers/media/i2c/mt9v011.c
@@ -561,7 +561,7 @@ static int mt9v011_probe(struct i2c_client *c,
return 0;
}
-static int mt9v011_remove(struct i2c_client *c)
+static void mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct mt9v011 *core = to_mt9v011(sd);
@@ -572,8 +572,6 @@ static int mt9v011_remove(struct i2c_client *c)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&core->ctrls);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 4cfdd3dfbd42..bc4388ccc2a8 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -1192,7 +1192,7 @@ err:
return ret;
}
-static int mt9v032_remove(struct i2c_client *client)
+static void mt9v032_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9v032 *mt9v032 = to_mt9v032(subdev);
@@ -1200,8 +1200,6 @@ static int mt9v032_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&mt9v032->ctrls);
media_entity_cleanup(&subdev->entity);
-
- return 0;
}
static const struct mt9v032_model_data mt9v032_model_data[] = {
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 2dc4a0f24ce8..fe18e5258d7a 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1238,7 +1238,7 @@ error_free_ctrls:
return ret;
}
-static int mt9v111_remove(struct i2c_client *client)
+static void mt9v111_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mt9v111_dev *mt9v111 = sd_to_mt9v111(sd);
@@ -1253,8 +1253,6 @@ static int mt9v111_remove(struct i2c_client *client)
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
-
- return 0;
}
static const struct of_device_id mt9v111_of_match[] = {
diff --git a/drivers/media/i2c/noon010pc30.c b/drivers/media/i2c/noon010pc30.c
index bc5187f46365..ecaf5e9057f1 100644
--- a/drivers/media/i2c/noon010pc30.c
+++ b/drivers/media/i2c/noon010pc30.c
@@ -789,7 +789,7 @@ np_err:
return ret;
}
-static int noon010_remove(struct i2c_client *client)
+static void noon010_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct noon010_info *info = to_noon010(sd);
@@ -797,8 +797,6 @@ static int noon010_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id noon010_id[] = {
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 87179fc04e00..35663c10fcd9 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1015,7 +1015,7 @@ check_hwcfg_error:
return ret;
}
-static int og01a1b_remove(struct i2c_client *client)
+static void og01a1b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct og01a1b *og01a1b = to_og01a1b(sd);
@@ -1025,8 +1025,6 @@ static int og01a1b_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&og01a1b->mutex);
-
- return 0;
}
static int og01a1b_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 0f08c05333ea..2c1eb724d8e5 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -975,7 +975,7 @@ err_destroy_mutex:
return ret;
}
-static int ov02a10_remove(struct i2c_client *client)
+static void ov02a10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov02a10 *ov02a10 = to_ov02a10(sd);
@@ -988,8 +988,6 @@ static int ov02a10_remove(struct i2c_client *client)
ov02a10_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&ov02a10->mutex);
-
- return 0;
}
static const struct of_device_id ov02a10_of_match[] = {
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index e5ef6466a3ec..c1703596c3dc 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -1415,7 +1415,7 @@ check_hwcfg_error:
return ret;
}
-static int ov08d10_remove(struct i2c_client *client)
+static void ov08d10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov08d10 *ov08d10 = to_ov08d10(sd);
@@ -1425,8 +1425,6 @@ static int ov08d10_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov08d10->mutex);
-
- return 0;
}
static int ov08d10_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index d5fe67c763f7..e618b613e078 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1769,7 +1769,7 @@ error_handler_free:
return ret;
}
-static int ov13858_remove(struct i2c_client *client)
+static void ov13858_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov13858 *ov13858 = to_ov13858(sd);
@@ -1779,8 +1779,6 @@ static int ov13858_remove(struct i2c_client *client)
ov13858_free_controls(ov13858);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct i2c_device_id ov13858_id_table[] = {
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 7caeae641051..549e5d93e568 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -1447,7 +1447,7 @@ error_handler_free:
return ret;
}
-static int ov13b10_remove(struct i2c_client *client)
+static void ov13b10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov13b10 *ov13b = to_ov13b10(sd);
@@ -1457,8 +1457,6 @@ static int ov13b10_remove(struct i2c_client *client)
ov13b10_free_controls(ov13b);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov13b10_pm_ops = {
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 4b75da55b260..29ed0ef8c033 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1271,7 +1271,7 @@ err_clk:
return ret;
}
-static int ov2640_remove(struct i2c_client *client)
+static void ov2640_remove(struct i2c_client *client)
{
struct ov2640_priv *priv = to_ov2640(client);
@@ -1281,7 +1281,6 @@ static int ov2640_remove(struct i2c_client *client)
media_entity_cleanup(&priv->subdev.entity);
v4l2_device_unregister_subdev(&priv->subdev);
clk_disable_unprepare(priv->clk);
- return 0;
}
static const struct i2c_device_id ov2640_id[] = {
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 13ded5b2aa66..42fc64ada08c 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1544,7 +1544,7 @@ error:
return ret;
}
-static int ov2659_remove(struct i2c_client *client)
+static void ov2659_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2659 *ov2659 = to_ov2659(sd);
@@ -1558,8 +1558,6 @@ static int ov2659_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
ov2659_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov2659_pm_ops = {
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 906c711f6821..de66d3395a4d 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1097,7 +1097,7 @@ lock_destroy:
return ret;
}
-static int ov2680_remove(struct i2c_client *client)
+static void ov2680_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -1106,8 +1106,6 @@ static int ov2680_remove(struct i2c_client *client)
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-
- return 0;
}
static int __maybe_unused ov2680_suspend(struct device *dev)
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index b6e010ea3249..a3b524f15d89 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -798,7 +798,7 @@ err_destroy_mutex:
return ret;
}
-static int ov2685_remove(struct i2c_client *client)
+static void ov2685_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2685 *ov2685 = to_ov2685(sd);
@@ -814,8 +814,6 @@ static int ov2685_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
__ov2685_power_off(ov2685);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index d5f0eabf20c6..5d74ad479214 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1053,7 +1053,7 @@ check_hwcfg_error:
return ret;
}
-static int ov2740_remove(struct i2c_client *client)
+static void ov2740_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2740 *ov2740 = to_ov2740(sd);
@@ -1063,8 +1063,6 @@ static int ov2740_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov2740->mutex);
-
- return 0;
}
static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 502f0b62e950..1852e1cfc7df 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3906,7 +3906,7 @@ entity_cleanup:
return ret;
}
-static int ov5640_remove(struct i2c_client *client)
+static void ov5640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5640_dev *sensor = to_ov5640_dev(sd);
@@ -3915,8 +3915,6 @@ static int ov5640_remove(struct i2c_client *client)
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->lock);
-
- return 0;
}
static const struct i2c_device_id ov5640_id[] = {
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 562c62f192c4..81e4e87e1821 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -1256,7 +1256,7 @@ free_ctrl:
return ret;
}
-static int ov5645_remove(struct i2c_client *client)
+static void ov5645_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5645 *ov5645 = to_ov5645(sd);
@@ -1265,8 +1265,6 @@ static int ov5645_remove(struct i2c_client *client)
media_entity_cleanup(&ov5645->sd.entity);
v4l2_ctrl_handler_free(&ov5645->ctrls);
mutex_destroy(&ov5645->power_lock);
-
- return 0;
}
static const struct i2c_device_id ov5645_id[] = {
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index d346d18ce629..847a7bbb69c5 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1448,7 +1448,7 @@ mutex_destroy:
return ret;
}
-static int ov5647_remove(struct i2c_client *client)
+static void ov5647_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5647 *sensor = to_sensor(sd);
@@ -1459,8 +1459,6 @@ static int ov5647_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
pm_runtime_disable(&client->dev);
mutex_destroy(&sensor->lock);
-
- return 0;
}
static const struct dev_pm_ops ov5647_pm_ops = {
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index dfcd33e9ee13..84604ea7bdf9 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -2587,7 +2587,7 @@ error_endpoint:
return ret;
}
-static int ov5648_remove(struct i2c_client *client)
+static void ov5648_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ov5648_sensor *sensor = ov5648_subdev_sensor(subdev);
@@ -2597,8 +2597,6 @@ static int ov5648_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
mutex_destroy(&sensor->mutex);
media_entity_cleanup(&subdev->entity);
-
- return 0;
}
static const struct dev_pm_ops ov5648_pm_ops = {
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 02f75c18e480..bc9fc3bc90c2 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2557,7 +2557,7 @@ error_print:
return ret;
}
-static int ov5670_remove(struct i2c_client *client)
+static void ov5670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5670 *ov5670 = to_ov5670(sd);
@@ -2568,8 +2568,6 @@ static int ov5670_remove(struct i2c_client *client)
mutex_destroy(&ov5670->mutex);
pm_runtime_disable(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov5670_pm_ops = {
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 82ba9f56baec..94dc8cb7a7c0 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1175,7 +1175,7 @@ check_hwcfg_error:
return ret;
}
-static int ov5675_remove(struct i2c_client *client)
+static void ov5675_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5675 *ov5675 = to_ov5675(sd);
@@ -1185,8 +1185,6 @@ static int ov5675_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov5675->mutex);
-
- return 0;
}
static int ov5675_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 82a9b2de7735..a97ec132ba3a 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1501,7 +1501,7 @@ err_ctrl_handler_free:
return ret;
}
-static int ov5693_remove(struct i2c_client *client)
+static void ov5693_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5693_device *ov5693 = to_ov5693_sensor(sd);
@@ -1519,8 +1519,6 @@ static int ov5693_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
ov5693_sensor_powerdown(ov5693);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
static const struct dev_pm_ops ov5693_pm_ops = {
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 910309783885..61906fc54e37 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1361,7 +1361,7 @@ err_destroy_mutex:
return ret;
}
-static int ov5695_remove(struct i2c_client *client)
+static void ov5695_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5695 *ov5695 = to_ov5695(sd);
@@ -1377,8 +1377,6 @@ static int ov5695_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
__ov5695_power_off(ov5695);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 6458e96d9091..18f041e985b7 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1096,13 +1096,12 @@ ectlhdlfree:
return ret;
}
-static int ov6650_remove(struct i2c_client *client)
+static void ov6650_remove(struct i2c_client *client)
{
struct ov6650 *priv = to_ov6650(client);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
- return 0;
}
static const struct i2c_device_id ov6650_id[] = {
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 1bd797c7926b..88e987435285 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1767,7 +1767,7 @@ destroy_mutex:
return ret;
}
-static int ov7251_remove(struct i2c_client *client)
+static void ov7251_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7251 *ov7251 = to_ov7251(sd);
@@ -1781,8 +1781,6 @@ static int ov7251_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(ov7251->dev))
ov7251_set_power_off(ov7251->dev);
pm_runtime_set_suspended(ov7251->dev);
-
- return 0;
}
static const struct dev_pm_ops ov7251_pm_ops = {
diff --git a/drivers/media/i2c/ov7640.c b/drivers/media/i2c/ov7640.c
index 977cd2d8ad33..5e2d67f0f9f2 100644
--- a/drivers/media/i2c/ov7640.c
+++ b/drivers/media/i2c/ov7640.c
@@ -70,13 +70,11 @@ static int ov7640_probe(struct i2c_client *client,
}
-static int ov7640_remove(struct i2c_client *client)
+static void ov7640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id ov7640_id[] = {
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 1be2c0e5bdc1..4b9b156b53c7 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -2009,7 +2009,7 @@ power_off:
return ret;
}
-static int ov7670_remove(struct i2c_client *client)
+static void ov7670_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7670_info *info = to_state(sd);
@@ -2017,7 +2017,6 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
media_entity_cleanup(&info->sd.entity);
- return 0;
}
static const struct i2c_device_id ov7670_id[] = {
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index 78602a2f70b0..4189e3fc3d53 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -1521,7 +1521,7 @@ error_mutex_destroy:
return ret;
}
-static int ov772x_remove(struct i2c_client *client)
+static void ov772x_remove(struct i2c_client *client)
{
struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client));
@@ -1532,8 +1532,6 @@ static int ov772x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
mutex_destroy(&priv->lock);
-
- return 0;
}
static const struct i2c_device_id ov772x_id[] = {
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 2539cfee85c8..c9fd9b0bc54a 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1153,7 +1153,7 @@ error_detect:
return ret;
}
-static int ov7740_remove(struct i2c_client *client)
+static void ov7740_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -1170,7 +1170,6 @@ static int ov7740_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
ov7740_set_power(ov7740, 0);
- return 0;
}
static int __maybe_unused ov7740_runtime_suspend(struct device *dev)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index a9728afc81d4..efa18d026ac3 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -2440,7 +2440,7 @@ check_hwcfg_error:
return ret;
}
-static int ov8856_remove(struct i2c_client *client)
+static void ov8856_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov8856 *ov8856 = to_ov8856(sd);
@@ -2452,8 +2452,6 @@ static int ov8856_remove(struct i2c_client *client)
mutex_destroy(&ov8856->mutex);
__ov8856_power_off(ov8856);
-
- return 0;
}
static int ov8856_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index b8f4f0d3e33d..a233c34b168e 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -3119,7 +3119,7 @@ error_endpoint:
return ret;
}
-static int ov8865_remove(struct i2c_client *client)
+static void ov8865_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ov8865_sensor *sensor = ov8865_subdev_sensor(subdev);
@@ -3131,8 +3131,6 @@ static int ov8865_remove(struct i2c_client *client)
media_entity_cleanup(&subdev->entity);
v4l2_fwnode_endpoint_free(&sensor->endpoint);
-
- return 0;
}
static const struct dev_pm_ops ov8865_pm_ops = {
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 2e0b315801e5..df144a2f6eda 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1091,7 +1091,7 @@ error_mutex_destroy:
*
* Return: 0 if successful, error code otherwise.
*/
-static int ov9282_remove(struct i2c_client *client)
+static void ov9282_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9282 *ov9282 = to_ov9282(sd);
@@ -1106,8 +1106,6 @@ static int ov9282_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
mutex_destroy(&ov9282->mutex);
-
- return 0;
}
static const struct dev_pm_ops ov9282_pm_ops = {
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 9f44ed52d164..8b80be33c5f4 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -744,15 +744,13 @@ ectrlinit:
return ret;
}
-static int ov9640_remove(struct i2c_client *client)
+static void ov9640_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9640_priv *priv = to_ov9640_sensor(sd);
v4l2_async_unregister_subdev(&priv->subdev);
v4l2_ctrl_handler_free(&priv->hdl);
-
- return 0;
}
static const struct i2c_device_id ov9640_id[] = {
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index c313e11a9754..4d458993e6d6 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1584,7 +1584,7 @@ err_mutex:
return ret;
}
-static int ov965x_remove(struct i2c_client *client)
+static void ov965x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov965x *ov965x = to_ov965x(sd);
@@ -1593,8 +1593,6 @@ static int ov965x_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
mutex_destroy(&ov965x->lock);
-
- return 0;
}
static const struct i2c_device_id ov965x_id[] = {
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index df538ceb71c3..8b0a158cb297 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -930,7 +930,7 @@ check_hwcfg_error:
return ret;
}
-static int ov9734_remove(struct i2c_client *client)
+static void ov9734_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov9734 *ov9734 = to_ov9734(sd);
@@ -940,8 +940,6 @@ static int ov9734_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
mutex_destroy(&ov9734->mutex);
-
- return 0;
}
static int ov9734_probe(struct i2c_client *client)
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 2615ad154f49..a2263fa825b5 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -646,7 +646,7 @@ error:
return ret;
}
-static int rdacm20_remove(struct i2c_client *client)
+static void rdacm20_remove(struct i2c_client *client)
{
struct rdacm20_device *dev = i2c_to_rdacm20(client);
@@ -655,8 +655,6 @@ static int rdacm20_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->ctrls);
media_entity_cleanup(&dev->sd.entity);
i2c_unregister_device(dev->sensor);
-
- return 0;
}
static void rdacm20_shutdown(struct i2c_client *client)
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index ef31cf5f23ca..9ccc56c30d3b 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -614,7 +614,7 @@ error:
return ret;
}
-static int rdacm21_remove(struct i2c_client *client)
+static void rdacm21_remove(struct i2c_client *client)
{
struct rdacm21_device *dev = sd_to_rdacm21(i2c_get_clientdata(client));
@@ -622,8 +622,6 @@ static int rdacm21_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->ctrls);
i2c_unregister_device(dev->isp);
fwnode_handle_put(dev->sd.fwnode);
-
- return 0;
}
static const struct of_device_id rdacm21_of_ids[] = {
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index 2e4018c26912..1c3502f34cd3 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1398,7 +1398,7 @@ err_free_ctrl:
return ret;
}
-static int rj54n1_remove(struct i2c_client *client)
+static void rj54n1_remove(struct i2c_client *client)
{
struct rj54n1 *rj54n1 = to_rj54n1(client);
@@ -1410,8 +1410,6 @@ static int rj54n1_remove(struct i2c_client *client)
clk_put(rj54n1->clk);
v4l2_ctrl_handler_free(&rj54n1->hdl);
v4l2_async_unregister_subdev(&rj54n1->subdev);
-
- return 0;
}
static const struct i2c_device_id rj54n1_id[] = {
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index e2b88c5e4f98..d96ba58ce1e5 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1770,7 +1770,7 @@ out_err:
return ret;
}
-static int s5c73m3_remove(struct i2c_client *client)
+static void s5c73m3_remove(struct i2c_client *client)
{
struct v4l2_subdev *oif_sd = i2c_get_clientdata(client);
struct s5c73m3 *state = oif_sd_to_s5c73m3(oif_sd);
@@ -1785,8 +1785,6 @@ static int s5c73m3_remove(struct i2c_client *client)
media_entity_cleanup(&sensor_sd->entity);
s5c73m3_unregister_spi_driver(state);
-
- return 0;
}
static const struct i2c_device_id s5c73m3_id[] = {
diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c
index af9a305242cd..3dddcd9dd351 100644
--- a/drivers/media/i2c/s5k4ecgx.c
+++ b/drivers/media/i2c/s5k4ecgx.c
@@ -996,7 +996,7 @@ out_err1:
return ret;
}
-static int s5k4ecgx_remove(struct i2c_client *client)
+static void s5k4ecgx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct s5k4ecgx *priv = to_s5k4ecgx(sd);
@@ -1006,8 +1006,6 @@ static int s5k4ecgx_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&priv->handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k4ecgx_id[] = {
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 6a5dceb699a8..5c2253ab3b6f 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -2018,7 +2018,7 @@ err_me:
return ret;
}
-static int s5k5baf_remove(struct i2c_client *c)
+static void s5k5baf_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct s5k5baf *state = to_s5k5baf(sd);
@@ -2030,8 +2030,6 @@ static int s5k5baf_remove(struct i2c_client *c)
sd = &state->cis_sd;
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k5baf_id[] = {
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index f6ecf6f92bb2..a4efd6d10b43 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -354,14 +354,13 @@ static int s5k6a3_probe(struct i2c_client *client)
return ret;
}
-static int s5k6a3_remove(struct i2c_client *client)
+static void s5k6a3_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
pm_runtime_disable(&client->dev);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
- return 0;
}
static const struct i2c_device_id s5k6a3_ids[] = {
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 105a4b7d8354..059211788a65 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -1621,15 +1621,13 @@ out_err:
return ret;
}
-static int s5k6aa_remove(struct i2c_client *client)
+static void s5k6aa_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
-
- return 0;
}
static const struct i2c_device_id s5k6aa_id[] = {
diff --git a/drivers/media/i2c/saa6588.c b/drivers/media/i2c/saa6588.c
index d1e0716bdfff..d6a51beabd02 100644
--- a/drivers/media/i2c/saa6588.c
+++ b/drivers/media/i2c/saa6588.c
@@ -484,7 +484,7 @@ static int saa6588_probe(struct i2c_client *client,
return 0;
}
-static int saa6588_remove(struct i2c_client *client)
+static void saa6588_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa6588 *s = to_saa6588(sd);
@@ -492,8 +492,6 @@ static int saa6588_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
cancel_delayed_work_sync(&s->work);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index a7f043cad149..5928cc6f4595 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -764,13 +764,12 @@ static int saa6752hs_probe(struct i2c_client *client,
return 0;
}
-static int saa6752hs_remove(struct i2c_client *client)
+static void saa6752hs_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
static const struct i2c_device_id saa6752hs_id[] = {
diff --git a/drivers/media/i2c/saa7110.c b/drivers/media/i2c/saa7110.c
index 0c7a9ce0a693..5067525d8b11 100644
--- a/drivers/media/i2c/saa7110.c
+++ b/drivers/media/i2c/saa7110.c
@@ -428,14 +428,13 @@ static int saa7110_probe(struct i2c_client *client,
return 0;
}
-static int saa7110_remove(struct i2c_client *client)
+static void saa7110_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7110 *decoder = to_saa7110(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 15ff80e6301e..86e70a980218 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1927,13 +1927,12 @@ static int saa711x_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int saa711x_remove(struct i2c_client *client)
+static void saa711x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id saa711x_id[] = {
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 891192f6412a..78c9388c2ea1 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -785,14 +785,13 @@ static int saa7127_probe(struct i2c_client *client,
/* ----------------------------------------------------------------------- */
-static int saa7127_remove(struct i2c_client *client)
+static void saa7127_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
/* Turn off TV output */
saa7127_set_video_enable(sd, 0);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index adf905360171..4f3d1b432a4e 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1324,13 +1324,12 @@ static int saa717x_probe(struct i2c_client *client,
return 0;
}
-static int saa717x_remove(struct i2c_client *client)
+static void saa717x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/saa7185.c b/drivers/media/i2c/saa7185.c
index 7a04422df8c8..266462325d30 100644
--- a/drivers/media/i2c/saa7185.c
+++ b/drivers/media/i2c/saa7185.c
@@ -322,7 +322,7 @@ static int saa7185_probe(struct i2c_client *client,
return 0;
}
-static int saa7185_remove(struct i2c_client *client)
+static void saa7185_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7185 *encoder = to_saa7185(sd);
@@ -330,7 +330,6 @@ static int saa7185_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
/* SW: output off is active */
saa7185_write(sd, 0x61, (encoder->reg[0x61]) | 0x40);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/sony-btf-mpx.c b/drivers/media/i2c/sony-btf-mpx.c
index ad239280c42e..927a9ec41463 100644
--- a/drivers/media/i2c/sony-btf-mpx.c
+++ b/drivers/media/i2c/sony-btf-mpx.c
@@ -357,13 +357,11 @@ static int sony_btf_mpx_probe(struct i2c_client *client,
return 0;
}
-static int sony_btf_mpx_remove(struct i2c_client *client)
+static void sony_btf_mpx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/sr030pc30.c b/drivers/media/i2c/sr030pc30.c
index 19c0252df2f1..ff18693beb5c 100644
--- a/drivers/media/i2c/sr030pc30.c
+++ b/drivers/media/i2c/sr030pc30.c
@@ -732,13 +732,12 @@ static int sr030pc30_probe(struct i2c_client *client,
return 0;
}
-static int sr030pc30_remove(struct i2c_client *client)
+static void sr030pc30_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id sr030pc30_id[] = {
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 16cc547976dd..31b89aff0e86 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -1067,7 +1067,7 @@ mutex_cleanup:
return ret;
}
-static int mipid02_remove(struct i2c_client *client)
+static void mipid02_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mipid02_dev *bridge = to_mipid02_dev(sd);
@@ -1078,8 +1078,6 @@ static int mipid02_remove(struct i2c_client *client)
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
mutex_destroy(&bridge->lock);
-
- return 0;
}
static const struct of_device_id mipid02_dt_ids[] = {
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e18b8947ad7e..200841c1f5cf 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -964,6 +964,8 @@ static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
v = i2c_rd32(sd, CECRCTR);
msg.len = v & 0x1f;
+ if (msg.len > CEC_MAX_MSG_SIZE)
+ msg.len = CEC_MAX_MSG_SIZE;
for (i = 0; i < msg.len; i++) {
v = i2c_rd32(sd, CECRBUF1 + i * 4);
msg.msg[i] = v & 0xff;
@@ -2169,7 +2171,7 @@ err_hdl:
return err;
}
-static int tc358743_remove(struct i2c_client *client)
+static void tc358743_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tc358743_state *state = to_state(sd);
@@ -2185,8 +2187,6 @@ static int tc358743_remove(struct i2c_client *client)
mutex_destroy(&state->confctl_mutex);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&state->hdl);
-
- return 0;
}
static const struct i2c_device_id tc358743_id[] = {
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index f66ac14cffad..83931826cf6f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2805,7 +2805,7 @@ err_free_state:
return ret;
}
-static int tda1997x_remove(struct i2c_client *client)
+static void tda1997x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tda1997x_state *state = to_state(sd);
@@ -2827,8 +2827,6 @@ static int tda1997x_remove(struct i2c_client *client)
mutex_destroy(&state->lock);
kfree(state);
-
- return 0;
}
static struct i2c_driver tda1997x_i2c_driver = {
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index cbdc9be0a597..11e918311b13 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -390,7 +390,7 @@ static int tda7432_probe(struct i2c_client *client,
return 0;
}
-static int tda7432_remove(struct i2c_client *client)
+static void tda7432_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tda7432 *t = to_state(sd);
@@ -398,7 +398,6 @@ static int tda7432_remove(struct i2c_client *client)
tda7432_set(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&t->hdl);
- return 0;
}
static const struct i2c_device_id tda7432_id[] = {
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index 8c6dfe746b20..aaa74944fc7c 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -175,12 +175,11 @@ static int tda9840_probe(struct i2c_client *client,
return 0;
}
-static int tda9840_remove(struct i2c_client *client)
+static void tda9840_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tda9840_id[] = {
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 67378dbcc74b..50e74314f315 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -134,12 +134,11 @@ static int tea6415c_probe(struct i2c_client *client,
return 0;
}
-static int tea6415c_remove(struct i2c_client *client)
+static void tea6415c_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tea6415c_id[] = {
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 712141b261ed..246f2b10ccc7 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -116,12 +116,11 @@ static int tea6420_probe(struct i2c_client *client,
return 0;
}
-static int tea6420_remove(struct i2c_client *client)
+static void tea6420_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id tea6420_id[] = {
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 8206bf7a5a8f..2a0f9a3d1a66 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -358,13 +358,11 @@ static int ths7303_probe(struct i2c_client *client,
return 0;
}
-static int ths7303_remove(struct i2c_client *client)
+static void ths7303_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
-
- return 0;
}
static const struct i2c_device_id ths7303_id[] = {
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index c52fe84cba1b..081ef5a4b950 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -468,7 +468,7 @@ static int ths8200_probe(struct i2c_client *client)
return 0;
}
-static int ths8200_remove(struct i2c_client *client)
+static void ths8200_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ths8200_state *decoder = to_state(sd);
@@ -478,8 +478,6 @@ static int ths8200_remove(struct i2c_client *client)
ths8200_s_power(sd, false);
v4l2_async_unregister_subdev(&decoder->sd);
-
- return 0;
}
static const struct i2c_device_id ths8200_id[] = {
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index e4c21990fea9..937fa1dbaecb 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -177,14 +177,13 @@ static int tlv320aic23b_probe(struct i2c_client *client,
return 0;
}
-static int tlv320aic23b_remove(struct i2c_client *client)
+static void tlv320aic23b_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tlv320aic23b_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index e6796e94dadf..9f1ed078b661 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -2065,7 +2065,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
return 0;
}
-static int tvaudio_remove(struct i2c_client *client)
+static void tvaudio_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct CHIPSTATE *chip = to_state(sd);
@@ -2079,7 +2079,6 @@ static int tvaudio_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&chip->hdl);
- return 0;
}
/* This driver supports many devices and the idea is to let the driver
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index cee60f945036..a746d96875f9 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -1121,7 +1121,7 @@ done:
* Unregister decoder as an i2c client device and V4L2
* device. Complement of tvp514x_probe().
*/
-static int tvp514x_remove(struct i2c_client *client)
+static void tvp514x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tvp514x_decoder *decoder = to_decoder(sd);
@@ -1129,7 +1129,6 @@ static int tvp514x_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(&decoder->sd);
media_entity_cleanup(&decoder->sd.entity);
v4l2_ctrl_handler_free(&decoder->hdl);
- return 0;
}
/* TVP5146 Init/Power on Sequence */
static const struct tvp514x_reg tvp5146_init_reg_seq[] = {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 93a980c4e899..859f1cb2fa74 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -2230,7 +2230,7 @@ err:
return res;
}
-static int tvp5150_remove(struct i2c_client *c)
+static void tvp5150_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp5150 *decoder = to_tvp5150(sd);
@@ -2250,8 +2250,6 @@ static int tvp5150_remove(struct i2c_client *c)
v4l2_ctrl_handler_free(&decoder->hdl);
pm_runtime_disable(&c->dev);
pm_runtime_set_suspended(&c->dev);
-
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 2de18833b07b..4ccd218f5584 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -1044,7 +1044,7 @@ error:
* Reset the TVP7002 device
* Returns zero.
*/
-static int tvp7002_remove(struct i2c_client *c)
+static void tvp7002_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
struct tvp7002 *device = to_tvp7002(sd);
@@ -1056,7 +1056,6 @@ static int tvp7002_remove(struct i2c_client *c)
media_entity_cleanup(&device->sd.entity);
#endif
v4l2_ctrl_handler_free(&device->hdl);
- return 0;
}
/* I2C Device ID table */
diff --git a/drivers/media/i2c/tw2804.c b/drivers/media/i2c/tw2804.c
index cd05f1ff504d..c7c8dfe8a8a8 100644
--- a/drivers/media/i2c/tw2804.c
+++ b/drivers/media/i2c/tw2804.c
@@ -405,14 +405,13 @@ static int tw2804_probe(struct i2c_client *client,
return 0;
}
-static int tw2804_remove(struct i2c_client *client)
+static void tw2804_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tw2804 *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id tw2804_id[] = {
diff --git a/drivers/media/i2c/tw9903.c b/drivers/media/i2c/tw9903.c
index f8e3ab4909d8..d7eef7986b75 100644
--- a/drivers/media/i2c/tw9903.c
+++ b/drivers/media/i2c/tw9903.c
@@ -235,13 +235,12 @@ static int tw9903_probe(struct i2c_client *client,
return 0;
}
-static int tw9903_remove(struct i2c_client *client)
+static void tw9903_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tw9906.c b/drivers/media/i2c/tw9906.c
index c528eb01fed0..549ad8f72f12 100644
--- a/drivers/media/i2c/tw9906.c
+++ b/drivers/media/i2c/tw9906.c
@@ -203,13 +203,12 @@ static int tw9906_probe(struct i2c_client *client,
return 0;
}
-static int tw9906_remove(struct i2c_client *client)
+static void tw9906_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&to_state(sd)->hdl);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
index 09f5b3986928..853b5acead32 100644
--- a/drivers/media/i2c/tw9910.c
+++ b/drivers/media/i2c/tw9910.c
@@ -993,7 +993,7 @@ error_clk_put:
return ret;
}
-static int tw9910_remove(struct i2c_client *client)
+static void tw9910_remove(struct i2c_client *client)
{
struct tw9910_priv *priv = to_tw9910(client);
@@ -1001,8 +1001,6 @@ static int tw9910_remove(struct i2c_client *client)
gpiod_put(priv->pdn_gpio);
clk_put(priv->clk);
v4l2_async_unregister_subdev(&priv->subdev);
-
- return 0;
}
static const struct i2c_device_id tw9910_id[] = {
diff --git a/drivers/media/i2c/uda1342.c b/drivers/media/i2c/uda1342.c
index b0a9c6d7163f..d0659c4392f2 100644
--- a/drivers/media/i2c/uda1342.c
+++ b/drivers/media/i2c/uda1342.c
@@ -72,12 +72,11 @@ static int uda1342_probe(struct i2c_client *client,
return 0;
}
-static int uda1342_remove(struct i2c_client *client)
+static void uda1342_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
static const struct i2c_device_id uda1342_id[] = {
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index ef35c6574785..4de26ed2ba00 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -210,12 +210,11 @@ static int upd64031a_probe(struct i2c_client *client,
return 0;
}
-static int upd64031a_remove(struct i2c_client *client)
+static void upd64031a_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index d6a1698caa2a..2bfd5443d406 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -181,12 +181,11 @@ static int upd64083_probe(struct i2c_client *client,
return 0;
}
-static int upd64083_remove(struct i2c_client *client)
+static void upd64083_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index e08e3579c0a1..f15ef2d13059 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -895,7 +895,7 @@ error_free_device:
return ret;
}
-static int video_i2c_remove(struct i2c_client *client)
+static void video_i2c_remove(struct i2c_client *client)
{
struct video_i2c_data *data = i2c_get_clientdata(client);
@@ -908,8 +908,6 @@ static int video_i2c_remove(struct i2c_client *client)
data->chip->set_power(data, false);
video_unregister_device(&data->vdev);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 492af8749fca..c832edad5fa7 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -163,12 +163,11 @@ static int vp27smpx_probe(struct i2c_client *client,
return 0;
}
-static int vp27smpx_remove(struct i2c_client *client)
+static void vp27smpx_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
- return 0;
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/i2c/vpx3220.c b/drivers/media/i2c/vpx3220.c
index 8be03fe5928c..b481ec196b88 100644
--- a/drivers/media/i2c/vpx3220.c
+++ b/drivers/media/i2c/vpx3220.c
@@ -526,15 +526,13 @@ static int vpx3220_probe(struct i2c_client *client,
return 0;
}
-static int vpx3220_remove(struct i2c_client *client)
+static void vpx3220_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct vpx3220 *decoder = to_vpx3220(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
-
- return 0;
}
static const struct i2c_device_id vpx3220_id[] = {
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index 29003dec6f2d..d496bb45f201 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -824,13 +824,12 @@ static int vs6624_probe(struct i2c_client *client,
return ret;
}
-static int vs6624_remove(struct i2c_client *client)
+static void vs6624_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- return 0;
}
static const struct i2c_device_id vs6624_id[] = {
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index ed533834db54..180b35347521 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -234,14 +234,13 @@ static int wm8739_probe(struct i2c_client *client,
return 0;
}
-static int wm8739_remove(struct i2c_client *client)
+static void wm8739_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8739_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id wm8739_id[] = {
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index d4c83c39892a..8ff97867d3cd 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -280,14 +280,13 @@ static int wm8775_probe(struct i2c_client *client,
return 0;
}
-static int wm8775_remove(struct i2c_client *client)
+static void wm8775_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8775_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
- return 0;
}
static const struct i2c_device_id wm8775_id[] = {
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 1224d908713a..dff0b450f387 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -13,12 +13,12 @@ if MEDIA_PCI_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Media capture support"
-source "drivers/media/pci/meye/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw5864/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
source "drivers/media/pci/tw686x/Kconfig"
+source "drivers/media/pci/zoran/Kconfig"
endif
@@ -27,7 +27,6 @@ if MEDIA_ANALOG_TV_SUPPORT
source "drivers/media/pci/dt3155/Kconfig"
source "drivers/media/pci/ivtv/Kconfig"
-source "drivers/media/pci/saa7146/Kconfig"
endif
@@ -58,7 +57,6 @@ source "drivers/media/pci/pluto2/Kconfig"
source "drivers/media/pci/pt1/Kconfig"
source "drivers/media/pci/pt3/Kconfig"
source "drivers/media/pci/smipcie/Kconfig"
-source "drivers/media/pci/ttpci/Kconfig"
endif
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 551169a3e434..8f887a8a7f17 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -5,8 +5,7 @@
# Please keep it alphabetically sorted by directory
# (e. g. LC_ALL=C sort Makefile)
-obj-y += ttpci/ \
- b2c2/ \
+obj-y += b2c2/ \
pluto2/ \
dm1105/ \
pt1/ \
@@ -14,7 +13,6 @@ obj-y += ttpci/ \
mantis/ \
ngene/ \
ddbridge/ \
- saa7146/ \
smipcie/ \
netup_unidvb/ \
intel/
@@ -32,10 +30,10 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821/
obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_DT3155) += dt3155/
obj-$(CONFIG_VIDEO_IVTV) += ivtv/
-obj-$(CONFIG_VIDEO_MEYE) += meye/
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_TW5864) += tw5864/
obj-$(CONFIG_VIDEO_TW686X) += tw686x/
obj-$(CONFIG_VIDEO_TW68) += tw68/
+obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/media/pci/cx18/cx18-av-audio.c b/drivers/media/pci/cx18/cx18-av-audio.c
index 833baa934448..78e05df9a7ba 100644
--- a/drivers/media/pci/cx18/cx18-av-audio.c
+++ b/drivers/media/pci/cx18/cx18-av-audio.c
@@ -50,7 +50,7 @@ static int set_audclk_freq(struct cx18 *cx, u32 freq)
*
* Many thanks to Jeff Campbell and Mike Bradley for their extensive
* investigation, experimentation, testing, and suggested solutions of
- * of audio/video sync problems with SVideo and CVBS captures.
+ * audio/video sync problems with SVideo and CVBS captures.
*/
if (state->aud_input > CX18_AV_AUDIO_SERIAL2) {
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index fdac310d7477..1b038b2802bf 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -248,7 +248,7 @@ void cx18_init_power(struct cx18 *cx, int lowpwr)
*
* Many thanks to Jeff Campbell and Mike Bradley for their extensive
* investigation, experimentation, testing, and suggested solutions of
- * of audio/video sync problems with SVideo and CVBS captures.
+ * audio/video sync problems with SVideo and CVBS captures.
*/
/* the fast clock is at 200/245 MHz */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index a07b18f2034e..9232a966bcab 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2086,6 +2086,9 @@ static struct {
/* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
*/
{ PCI_VENDOR_ID_AMD, 0x1419 },
+ /* 0x1631 is the PCI ID for the IOMMU found on Renoir/Cezanne
+ */
+ { PCI_VENDOR_ID_AMD, 0x1631 },
/* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
*/
{ PCI_VENDOR_ID_ATI, 0x5a23 },
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index ddfd2eb37484..222d04421468 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -235,7 +235,7 @@ static u32 clock_divider_to_resolution(u16 divider)
{
/*
* Resolution is the duration of 1 tick of the readable portion of
- * of the pulse width counter as read from the FIFO. The two lsb's are
+ * the pulse width counter as read from the FIFO. The two lsb's are
* not readable, hence the << 2. This function returns ns.
*/
return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000,
diff --git a/drivers/media/pci/cx88/cx88-dsp.c b/drivers/media/pci/cx88/cx88-dsp.c
index f1e1fc1cb4bd..e378f3b215c7 100644
--- a/drivers/media/pci/cx88/cx88-dsp.c
+++ b/drivers/media/pci/cx88/cx88-dsp.c
@@ -24,7 +24,7 @@
/*
* We calculate the baseband frequencies of the carrier and the pilot tones
- * based on the the sampling rate of the audio rds fifo.
+ * based on the sampling rate of the audio rds fifo.
*/
#define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0)
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index a075788c64d4..469aeaa725ad 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -144,11 +144,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL;
vb2_set_plane_payload(vb, 0, size);
- cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
- 0, VBI_LINE_LENGTH * lines,
- VBI_LINE_LENGTH, 0,
- lines);
- return 0;
+ return cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl,
+ 0, VBI_LINE_LENGTH * lines,
+ VBI_LINE_LENGTH, 0,
+ lines);
}
static void buffer_finish(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d3729be89252..b509c2a03852 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -431,6 +431,7 @@ static int queue_setup(struct vb2_queue *q,
static int buffer_prepare(struct vb2_buffer *vb)
{
+ int ret;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
struct cx88_core *core = dev->core;
@@ -445,35 +446,35 @@ static int buffer_prepare(struct vb2_buffer *vb)
switch (core->field) {
case V4L2_FIELD_TOP:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, UNSET,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, UNSET,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_BOTTOM:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, UNSET, 0,
- buf->bpl, 0, core->height);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, UNSET, 0,
+ buf->bpl, 0, core->height);
break;
case V4L2_FIELD_SEQ_TB:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- 0, buf->bpl * (core->height >> 1),
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ 0, buf->bpl * (core->height >> 1),
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl,
- buf->bpl * (core->height >> 1), 0,
- buf->bpl, 0,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl,
+ buf->bpl * (core->height >> 1), 0,
+ buf->bpl, 0,
+ core->height >> 1);
break;
case V4L2_FIELD_INTERLACED:
default:
- cx88_risc_buffer(dev->pci, &buf->risc,
- sgt->sgl, 0, buf->bpl,
- buf->bpl, buf->bpl,
- core->height >> 1);
+ ret = cx88_risc_buffer(dev->pci, &buf->risc,
+ sgt->sgl, 0, buf->bpl,
+ buf->bpl, buf->bpl,
+ core->height >> 1);
break;
}
dprintk(2,
@@ -481,7 +482,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
buf, buf->vb.vb2_buf.index, __func__,
core->width, core->height, dev->fmt->depth, dev->fmt->fourcc,
(unsigned long)buf->risc.dma);
- return 0;
+ return ret;
}
static void buffer_finish(struct vb2_buffer *vb)
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index e79e8a5a744a..4ba10c34a16a 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -538,7 +538,7 @@ static void ivtv_yuv_handle_vertical(struct ivtv *itv, struct yuv_frame_info *f)
reg_2964 = (reg_2964 << 16) + reg_2964 + (reg_2964 * 46 / 94);
/* Okay, we've wasted time working out the correct value,
- but if we use it, it fouls the the window alignment.
+ but if we use it, it fouls the window alignment.
Fudge it to what we want... */
reg_2964 = 0x00010001 + ((reg_2964 & 0x0000FFFF) - (reg_2964 >> 16));
reg_2968 = 0x00010001 + ((reg_2968 & 0x0000FFFF) - (reg_2968 >> 16));
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 3d296f1998a1..d1d7da84cd9d 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -596,43 +596,6 @@ struct mychip {
int capture_source[MIXER_ADDR_LAST + 1][2];
};
-#ifdef NGENE_V4L
-struct ngene_overlay {
- int tvnorm;
- struct v4l2_rect w;
- enum v4l2_field field;
- struct v4l2_clip *clips;
- int nclips;
- int setup_ok;
-};
-
-struct ngene_tvnorm {
- int v4l2_id;
- char *name;
- u16 swidth, sheight; /* scaled standard width, height */
- int tuner_norm;
- int soundstd;
-};
-
-struct ngene_vopen {
- struct ngene_channel *ch;
- enum v4l2_priority prio;
- int width;
- int height;
- int depth;
- struct videobuf_queue vbuf_q;
- struct videobuf_queue vbi;
- int fourcc;
- int picxcount;
- int resources;
- enum v4l2_buf_type type;
- const struct ngene_format *fmt;
-
- const struct ngene_format *ovfmt;
- struct ngene_overlay ov;
-};
-#endif
-
struct ngene_channel {
struct device device;
struct i2c_adapter i2c_adapter;
@@ -709,18 +672,6 @@ struct ngene_channel {
int tvnorm_num;
int tvnorm;
-#ifdef NGENE_V4L
- int videousers;
- struct v4l2_prio_state prio;
- struct ngene_vopen init;
- int resources;
- struct v4l2_framebuffer fbuf;
- struct ngene_buffer *screen; /* overlay */
- struct list_head capture; /* video capture queue */
- spinlock_t s_lock;
- struct semaphore reslock;
-#endif
-
int running;
int tsin_offset;
@@ -863,35 +814,6 @@ struct ngene_info {
int (*switch_ctrl)(struct ngene_channel *, int, int);
};
-#ifdef NGENE_V4L
-struct ngene_format {
- char *name;
- int fourcc; /* video4linux 2 */
- int btformat; /* BT848_COLOR_FMT_* */
- int format;
- int btswap; /* BT848_COLOR_CTL_* */
- int depth; /* bit/pixel */
- int flags;
- int hshift, vshift; /* for planar modes */
- int palette;
-};
-
-#define RESOURCE_OVERLAY 1
-#define RESOURCE_VIDEO 2
-#define RESOURCE_VBI 4
-
-struct ngene_buffer {
- /* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
-
- /* ngene specific */
- const struct ngene_format *fmt;
- int tvnorm;
- int btformat;
- int btswap;
-};
-#endif
-
/* Provided by ngene-core.c */
int ngene_probe(struct pci_dev *pci_dev, const struct pci_device_id *id);
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 0d51bdf01f43..f6deac85962e 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -445,8 +445,8 @@ static int pt3_fetch_thread(void *data)
pt3_proc_dma(adap);
delay = ktime_set(0, PT3_FETCH_DELAY * NSEC_PER_MSEC);
- set_current_state(TASK_UNINTERRUPTIBLE);
- freezable_schedule_hrtimeout_range(&delay,
+ set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
+ schedule_hrtimeout_range(&delay,
PT3_FETCH_DELAY_DELTA * NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 7973ae42873a..d5f32e3ff544 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -626,7 +626,7 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
portf = &dev->ports[SAA7164_PORT_VBI2];
/* Check that the hardware is accessible. If the status bytes are
- * 0xFF then the device is not accessible, the the IRQ belongs
+ * 0xFF then the device is not accessible, the IRQ belongs
* to another driver.
* 4 x u32 interrupt registers.
*/
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
index 80d20e2a2099..0adf3d80f248 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
@@ -1020,7 +1020,7 @@ static int solo_g_parm(struct file *file, void *priv,
cp->timeperframe.numerator = solo_enc->interval;
cp->timeperframe.denominator = solo_enc->solo_dev->fps;
cp->capturemode = 0;
- /* XXX: Shouldn't we be able to get/set this from videobuf? */
+ /* XXX: Shouldn't we be able to get/set this from vb2? */
cp->readbuffers = 2;
return 0;
diff --git a/drivers/staging/media/zoran/Kconfig b/drivers/media/pci/zoran/Kconfig
index 3fb3e27e04a8..3fb3e27e04a8 100644
--- a/drivers/staging/media/zoran/Kconfig
+++ b/drivers/media/pci/zoran/Kconfig
diff --git a/drivers/staging/media/zoran/Makefile b/drivers/media/pci/zoran/Makefile
index 9603bac0195c..9603bac0195c 100644
--- a/drivers/staging/media/zoran/Makefile
+++ b/drivers/media/pci/zoran/Makefile
diff --git a/drivers/staging/media/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c
index a0c8bde5ec11..8efc5e06b0f7 100644
--- a/drivers/staging/media/zoran/videocodec.c
+++ b/drivers/media/pci/zoran/videocodec.c
@@ -92,9 +92,8 @@ struct videocodec *videocodec_attach(struct videocodec_master *master)
h->attached += 1;
return codec;
- } else {
- kfree(codec);
}
+ kfree(codec);
}
h = h->next;
}
@@ -255,8 +254,8 @@ int videocodec_debugfs_show(struct seq_file *m)
struct codec_list *h = codeclist_top;
struct attached_list *a;
- seq_printf(m, "<S>lave or attached <M>aster name type flags magic ");
- seq_printf(m, "(connected as)\n");
+ seq_puts(m, "<S>lave or attached <M>aster name type flags magic ");
+ seq_puts(m, "(connected as)\n");
while (h) {
seq_printf(m, "S %32s %04x %08lx %08lx (TEMPLATE)\n",
diff --git a/drivers/staging/media/zoran/videocodec.h b/drivers/media/pci/zoran/videocodec.h
index 5e6057edd339..6b69f69667f9 100644
--- a/drivers/staging/media/zoran/videocodec.h
+++ b/drivers/media/pci/zoran/videocodec.h
@@ -12,109 +12,109 @@
/* general description */
/* =================== */
-/* Should ease the (re-)usage of drivers supporting cards with (different)
- video codecs. The codecs register to this module their functionality,
- and the processors (masters) can attach to them if they fit.
-
- The codecs are typically have a "strong" binding to their master - so I
- don't think it makes sense to have a full blown interfacing as with e.g.
- i2c. If you have an other opinion, let's discuss & implement it :-)))
-
- Usage:
-
- The slave has just to setup the videocodec structure and use two functions:
- videocodec_register(codecdata);
- videocodec_unregister(codecdata);
- The best is just calling them at module (de-)initialisation.
-
- The master sets up the structure videocodec_master and calls:
- codecdata=videocodec_attach(master_codecdata);
- videocodec_detach(codecdata);
-
- The slave is called during attach/detach via functions setup previously
- during register. At that time, the master_data pointer is set up
- and the slave can access any io registers of the master device (in the case
- the slave is bound to it). Otherwise it doesn't need this functions and
- therfor they may not be initialized.
-
- The other functions are just for convenience, as they are for sure used by
- most/all of the codecs. The last ones may be omitted, too.
-
- See the structure declaration below for more information and which data has
- to be set up for the master and the slave.
-
- ----------------------------------------------------------------------------
- The master should have "knowledge" of the slave and vice versa. So the data
- structures sent to/from slave via set_data/get_data set_image/get_image are
- device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!)
- ----------------------------------------------------------------------------
-*/
+/*
+ * Should ease the (re-)usage of drivers supporting cards with (different)
+ * video codecs. The codecs register to this module their functionality,
+ * and the processors (masters) can attach to them if they fit.
+ *
+ * The codecs are typically have a "strong" binding to their master - so I
+ * don't think it makes sense to have a full blown interfacing as with e.g.
+ * i2c. If you have an other opinion, let's discuss & implement it :-)))
+ *
+ * Usage:
+ *
+ * The slave has just to setup the videocodec structure and use two functions:
+ * videocodec_register(codecdata);
+ * videocodec_unregister(codecdata);
+ * The best is just calling them at module (de-)initialisation.
+ *
+ * The master sets up the structure videocodec_master and calls:
+ * codecdata=videocodec_attach(master_codecdata);
+ * videocodec_detach(codecdata);
+ *
+ * The slave is called during attach/detach via functions setup previously
+ * during register. At that time, the master_data pointer is set up
+ * and the slave can access any io registers of the master device (in the case
+ * the slave is bound to it). Otherwise it doesn't need this functions and
+ * therefor they may not be initialized.
+ *
+ * The other functions are just for convenience, as they are for sure used by
+ * most/all of the codecs. The last ones may be omitted, too.
+ *
+ * See the structure declaration below for more information and which data has
+ * to be set up for the master and the slave.
+ *
+ * ----------------------------------------------------------------------------
+ * The master should have "knowledge" of the slave and vice versa. So the data
+ * structures sent to/from slave via set_data/get_data set_image/get_image are
+ * device dependent and vary between MJPEG/MPEG/WAVELET/... devices. (!!!!)
+ * ----------------------------------------------------------------------------
+ */
/* ========================================== */
/* description of the videocodec_io structure */
/* ========================================== */
/*
- ==== master setup ====
- name -> name of the device structure for reference and debugging
- master_data -> data ref. for the master (e.g. the zr36055,57,67)
- readreg -> ref. to read-fn from register (setup by master, used by slave)
- writereg -> ref. to write-fn to register (setup by master, used by slave)
- this two functions do the lowlevel I/O job
-
- ==== slave functionality setup ====
- slave_data -> data ref. for the slave (e.g. the zr36050,60)
- check -> fn-ref. checks availability of an device, returns -EIO on failure or
- the type on success
- this makes espcecially sense if a driver module supports more than
- one codec which may be quite similar to access, nevertheless it
- is good for a first functionality check
-
- -- main functions you always need for compression/decompression --
-
- set_mode -> this fn-ref. resets the entire codec, and sets up the mode
- with the last defined norm/size (or device default if not
- available) - it returns 0 if the mode is possible
- set_size -> this fn-ref. sets the norm and image size for
- compression/decompression (returns 0 on success)
- the norm param is defined in videodev2.h (V4L2_STD_*)
-
- additional setup may be available, too - but the codec should work with
- some default values even without this
-
- set_data -> sets device-specific data (tables, quality etc.)
- get_data -> query device-specific data (tables, quality etc.)
-
- if the device delivers interrupts, they may be setup/handled here
- setup_interrupt -> codec irq setup (not needed for 36050/60)
- handle_interrupt -> codec irq handling (not needed for 36050/60)
-
- if the device delivers pictures, they may be handled here
- put_image -> puts image data to the codec (not needed for 36050/60)
- get_image -> gets image data from the codec (not needed for 36050/60)
- the calls include frame numbers and flags (even/odd/...)
- if needed and a flag which allows blocking until its ready
-*/
+ * ==== master setup ====
+ * name -> name of the device structure for reference and debugging
+ * master_data -> data ref. for the master (e.g. the zr36055,57,67)
+ * readreg -> ref. to read-fn from register (setup by master, used by slave)
+ * writereg -> ref. to write-fn to register (setup by master, used by slave)
+ * this two functions do the lowlevel I/O job
+ *
+ * ==== slave functionality setup ====
+ * slave_data -> data ref. for the slave (e.g. the zr36050,60)
+ * check -> fn-ref. checks availability of an device, returns -EIO on failure or
+ * the type on success
+ * this makes espcecially sense if a driver module supports more than
+ * one codec which may be quite similar to access, nevertheless it
+ * is good for a first functionality check
+ *
+ * -- main functions you always need for compression/decompression --
+ *
+ * set_mode -> this fn-ref. resets the entire codec, and sets up the mode
+ * with the last defined norm/size (or device default if not
+ * available) - it returns 0 if the mode is possible
+ * set_size -> this fn-ref. sets the norm and image size for
+ * compression/decompression (returns 0 on success)
+ * the norm param is defined in videodev2.h (V4L2_STD_*)
+ *
+ * additional setup may be available, too - but the codec should work with
+ * some default values even without this
+ *
+ * set_data -> sets device-specific data (tables, quality etc.)
+ * get_data -> query device-specific data (tables, quality etc.)
+ *
+ * if the device delivers interrupts, they may be setup/handled here
+ * setup_interrupt -> codec irq setup (not needed for 36050/60)
+ * handle_interrupt -> codec irq handling (not needed for 36050/60)
+
+ * if the device delivers pictures, they may be handled here
+ * put_image -> puts image data to the codec (not needed for 36050/60)
+ * get_image -> gets image data from the codec (not needed for 36050/60)
+ * the calls include frame numbers and flags (even/odd/...)
+ * if needed and a flag which allows blocking until its ready
+ */
/* ============== */
/* user interface */
/* ============== */
/*
- Currently there is only a information display planned, as the layer
- is not visible for the user space at all.
-
- Information is available via procfs. The current entry is "/proc/videocodecs"
- but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--.
-
-A example for such an output is:
-
-<S>lave or attached <M>aster name type flags magic (connected as)
-S zr36050 0002 0000d001 00000000 (TEMPLATE)
-M zr36055[0] 0001 0000c001 00000000 (zr36050[0])
-M zr36055[1] 0001 0000c001 00000000 (zr36050[1])
-
-*/
+ * Currently there is only a information display planned, as the layer
+ * is not visible for the user space at all.
+ *
+ * Information is available via procfs. The current entry is "/proc/videocodecs"
+ * but it makes sense to "hide" it in the /proc/video tree of v4l(2) --TODO--.
+ *
+ * A example for such an output is:
+ *
+ * <S>lave or attached <M>aster name type flags magic (connected as)
+ * S zr36050 0002 0000d001 00000000 (TEMPLATE)
+ * M zr36055[0] 0001 0000c001 00000000 (zr36050[0])
+ * M zr36055[1] 0001 0000c001 00000000 (zr36050[1])
+ */
/* =============================================== */
/* special defines for the videocodec_io structure */
@@ -293,15 +293,15 @@ struct videocodec_master {
// * master structure needs to be kmalloc'ed before calling attach
// and free'd after calling detach
// * returns pointer on success, NULL on failure
-extern struct videocodec *videocodec_attach(struct videocodec_master *);
+struct videocodec *videocodec_attach(struct videocodec_master *master);
// * 0 on success, <0 (errno) on failure
-extern int videocodec_detach(struct videocodec *);
+int videocodec_detach(struct videocodec *codec);
/* register and unregister commands for the slaves */
// * 0 on success, <0 (errno) on failure
-extern int videocodec_register(const struct videocodec *);
+int videocodec_register(const struct videocodec *codec);
// * 0 on success, <0 (errno) on failure
-extern int videocodec_unregister(const struct videocodec *);
+int videocodec_unregister(const struct videocodec *codec);
/* the other calls are directly done via the videocodec structure! */
diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 05227e5298f6..56340553b282 100644
--- a/drivers/staging/media/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -140,11 +140,16 @@ struct zoran_v4l_settings {
/* jpg-capture/-playback settings */
struct zoran_jpg_settings {
- int decimation; /* this bit is used to set everything to default */
- int hor_dcm, ver_dcm, tmp_dcm; /* capture decimation settings (tmp_dcm=1 means both fields) */
- int field_per_buff, odd_even; /* field-settings (odd_even=1 (+tmp_dcm=1) means top-field-first) */
- int img_x, img_y, img_width, img_height; /* crop settings (subframe capture) */
- struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
+ /* this bit is used to set everything to default */
+ int decimation;
+ /* capture decimation settings (tmp_dcm=1 means both fields) */
+ int hor_dcm, ver_dcm, tmp_dcm;
+ /* field-settings (odd_even=1 (+tmp_dcm=1) means top-field-first) */
+ int field_per_buff, odd_even;
+ /* crop settings (subframe capture) */
+ int img_x, img_y, img_width, img_height;
+ /* JPEG-specific capture settings */
+ struct v4l2_jpegcompression jpg_comp;
};
struct zoran;
@@ -248,7 +253,8 @@ struct zoran {
unsigned long vbseq;
/* zr36057's code buffer table */
- __le32 *stat_com; /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */
+ /* stat_com[i] is indexed by dma_head/tail & BUZ_MASK_STAT_COM */
+ __le32 *stat_com;
/* Additional stuff for testing */
unsigned int ghost_int;
@@ -292,14 +298,16 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev)
return container_of(v4l2_dev, struct zoran, v4l2_dev);
}
-/* There was something called _ALPHA_BUZ that used the PCI address instead of
- * the kernel iomapped address for btread/btwrite. */
+/*
+ * There was something called _ALPHA_BUZ that used the PCI address instead of
+ * the kernel iomapped address for btread/btwrite.
+ */
#define btwrite(dat, adr) writel((dat), zr->zr36057_mem + (adr))
#define btread(adr) readl(zr->zr36057_mem + (adr))
-#define btand(dat, adr) btwrite((dat) & btread(adr), adr)
-#define btor(dat, adr) btwrite((dat) | btread(adr), adr)
-#define btaor(dat, mask, adr) btwrite((dat) | ((mask) & btread(adr)), adr)
+#define btand(dat, adr) btwrite((dat) & btread(adr), (adr))
+#define btor(dat, adr) btwrite((dat) | btread(adr), (adr))
+#define btaor(dat, mask, adr) btwrite((dat) | ((mask) & btread(adr)), (adr))
#endif
diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 26f978a1cc72..3975fc1b2ee3 100644
--- a/drivers/staging/media/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -172,8 +172,6 @@ void zr36016_write(struct videocodec *codec, u16 reg, u32 val)
static void dc10_init(struct zoran *zr)
{
- pci_dbg(zr->pci_dev, "%s\n", __func__);
-
/* Pixel clock selection */
GPIO(zr, 4, 0);
GPIO(zr, 5, 1);
@@ -183,13 +181,10 @@ static void dc10_init(struct zoran *zr)
static void dc10plus_init(struct zoran *zr)
{
- pci_dbg(zr->pci_dev, "%s\n", __func__);
}
static void buz_init(struct zoran *zr)
{
- pci_dbg(zr->pci_dev, "%s\n", __func__);
-
/* some stuff from Iomega */
pci_write_config_dword(zr->pci_dev, 0xfc, 0x90680f15);
pci_write_config_dword(zr->pci_dev, 0x0c, 0x00012020);
@@ -198,8 +193,6 @@ static void buz_init(struct zoran *zr)
static void lml33_init(struct zoran *zr)
{
- pci_dbg(zr->pci_dev, "%s\n", __func__);
-
GPIO(zr, 2, 1); // Set Composite input/output
}
@@ -334,10 +327,6 @@ static void videocodec_exit(struct zoran *zr)
codec_exit(zr, zr->card.video_vfe);
}
-// struct tvnorm {
-// u16 wt, wa, h_start, h_sync_start, ht, ha, v_start;
-// };
-
static const struct tvnorm f50sqpixel = { 944, 768, 83, 880, 625, 576, 16 };
static const struct tvnorm f60sqpixel = { 780, 640, 51, 716, 525, 480, 12 };
static const struct tvnorm f50ccir601 = { 864, 720, 75, 804, 625, 576, 18 };
@@ -619,7 +608,10 @@ static struct card_info zoran_cards[NUM_CARDS] = {
}, {
.type = AVS6EYES,
.name = "6-Eyes",
-/* AverMedia chose not to brand the 6-Eyes. Thus it can't be autodetected, and requires card=x. */
+ /*
+ * AverMedia chose not to brand the 6-Eyes. Thus it can't be
+ * autodetected, and requires card=x.
+ */
.i2c_decoder = "ks0127",
.addrs_decoder = ks0127_addrs,
.i2c_encoder = "bt866",
@@ -764,7 +756,9 @@ int zoran_check_jpg_settings(struct zoran *zr,
case 4:
if (zr->card.type == DC10_NEW) {
- pci_dbg(zr->pci_dev, "%s - HDec by 4 is not supported on the DC10\n", __func__);
+ pci_dbg(zr->pci_dev,
+ "%s - HDec by 4 is not supported on the DC10\n",
+ __func__);
err0++;
break;
}
@@ -882,12 +876,7 @@ static int zoran_init_video_device(struct zoran *zr, struct video_device *video_
video_dev->device_caps = V4L2_CAP_STREAMING | dir;
strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name));
- /*
- * It's not a mem2mem device, but you can both capture and output from one and the same
- * device. This should really be split up into two device nodes, but that's a job for
- * another day.
- */
- video_dev->vfl_dir = VFL_DIR_M2M;
+ video_dev->vfl_dir = VFL_DIR_RX;
zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE);
err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]);
@@ -1019,7 +1008,9 @@ static int zr36057_init(struct zoran *zr)
zr->timing = zr->card.tvn[ZR_NORM_SECAM];
}
if (!zr->timing) {
- pci_warn(zr->pci_dev, "%s - default TV standard not supported by hardware. PAL will be used.\n", __func__);
+ pci_warn(zr->pci_dev,
+ "%s - default TV standard not supported by hardware. PAL will be used.\n",
+ __func__);
zr->norm = V4L2_STD_PAL;
zr->timing = zr->card.tvn[ZR_NORM_PAL];
}
@@ -1038,9 +1029,9 @@ static int zr36057_init(struct zoran *zr)
zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev,
BUZ_NUM_STAT_COM * sizeof(u32),
&zr->p_sc, GFP_KERNEL);
- if (!zr->stat_com) {
+ if (!zr->stat_com)
return -ENOMEM;
- }
+
for (j = 0; j < BUZ_NUM_STAT_COM; j++)
zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */
@@ -1066,9 +1057,11 @@ static int zr36057_init(struct zoran *zr)
return 0;
exit_statcomb:
- dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2,
+ zr->stat_comb, zr->p_scb);
exit_statcom:
- dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32),
+ zr->stat_com, zr->p_sc);
return err;
}
@@ -1099,8 +1092,10 @@ static void zoran_remove(struct pci_dev *pdev)
btwrite(0, ZR36057_SPGPPCR);
pci_free_irq(zr->pci_dev, 0, zr);
/* unmap and free memory */
- dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc);
- dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb);
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32),
+ zr->stat_com, zr->p_sc);
+ dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2,
+ zr->stat_comb, zr->p_scb);
pci_release_regions(pdev);
pci_disable_device(zr->pci_dev);
zoran_exit_video_devices(zr);
@@ -1299,7 +1294,8 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_err(pdev, "Unknown card, try specifying card=X module parameter\n");
goto zr_unreg;
}
- pci_info(zr->pci_dev, "%s() - card %s detected\n", __func__, zoran_cards[card_num].name);
+ pci_info(zr->pci_dev, "%s() - card %s detected\n", __func__,
+ zoran_cards[card_num].name);
} else {
card_num = card[nr];
if (card_num >= NUM_CARDS || card_num < 0) {
@@ -1324,7 +1320,8 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto zr_unreg;
- zr->zr36057_mem = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ zr->zr36057_mem = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (!zr->zr36057_mem) {
pci_err(pdev, "%s() - ioremap failed\n", __func__);
goto zr_pci_release;
@@ -1348,7 +1345,8 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
&latency);
need_latency = zr->revision > 1 ? 32 : 48;
if (latency != need_latency) {
- pci_info(zr->pci_dev, "Changing PCI latency from %d to %d\n", latency, need_latency);
+ pci_info(zr->pci_dev, "Changing PCI latency from %d to %d\n",
+ latency, need_latency);
pci_write_config_byte(zr->pci_dev, PCI_LATENCY_TIMER, need_latency);
}
diff --git a/drivers/staging/media/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 8e0d634cb30f..518cb426b446 100644
--- a/drivers/staging/media/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -19,11 +19,10 @@ extern int zr36067_debug;
extern const struct video_device zoran_template;
-extern int zoran_check_jpg_settings(struct zoran *zr,
- struct zoran_jpg_settings *settings,
- int try);
-extern void zoran_open_init_params(struct zoran *zr);
-extern void zoran_vdev_release(struct video_device *vdev);
+int zoran_check_jpg_settings(struct zoran *zr,
+ struct zoran_jpg_settings *settings, int try);
+void zoran_open_init_params(struct zoran *zr);
+void zoran_vdev_release(struct video_device *vdev);
void zr36016_write(struct videocodec *codec, u16 reg, u32 val);
diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 2470889a58fa..31f049b55529 100644
--- a/drivers/staging/media/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -50,7 +50,6 @@ static bool lml33dpath; /* default = 0
module_param(lml33dpath, bool, 0644);
MODULE_PARM_DESC(lml33dpath, "Use digital path capture mode (on LML33 cards)");
-int zr_set_buf(struct zoran *zr);
/*
* initialize video front end
*/
@@ -108,7 +107,6 @@ int post_office_wait(struct zoran *zr)
{
u32 por;
-// while (((por = btread(ZR36057_POR)) & (ZR36057_POR_PO_PEN | ZR36057_POR_PO_TIME)) == ZR36057_POR_PO_PEN) {
while ((por = btread(ZR36057_POR)) & ZR36057_POR_PO_PEN) {
/* wait for something to happen */
/* TODO add timeout */
@@ -155,10 +153,12 @@ void jpeg_codec_sleep(struct zoran *zr, int sleep)
{
GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep);
if (!sleep) {
- pci_dbg(zr->pci_dev, "%s() - wake GPIO=0x%08x\n", __func__, btread(ZR36057_GPPGCR1));
- udelay(500);
+ pci_dbg(zr->pci_dev, "%s() - wake GPIO=0x%08x\n",
+ __func__, btread(ZR36057_GPPGCR1));
+ usleep_range(500, 1000);
} else {
- pci_dbg(zr->pci_dev, "%s() - sleep GPIO=0x%08x\n", __func__, btread(ZR36057_GPPGCR1));
+ pci_dbg(zr->pci_dev, "%s() - sleep GPIO=0x%08x\n",
+ __func__, btread(ZR36057_GPPGCR1));
udelay(2);
}
}
@@ -284,7 +284,8 @@ static void zr36057_set_vfe(struct zoran *zr, int video_width, int video_height,
vcrop1 = (tvn->ha / 2 - he) / 2;
vcrop2 = tvn->ha / 2 - he - vcrop1;
v_start = tvn->v_start;
- v_end = v_start + tvn->ha / 2; // - 1; FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP
+ // FIXME SnapShot times out with -1 in 768*576 on the DC10 - LP
+ v_end = v_start + tvn->ha / 2; // - 1;
v_start += vcrop1;
v_end -= vcrop2;
reg = ((v_start & ZR36057_VFEVCR_VMASK) << ZR36057_VFEVCR_V_START)
@@ -298,10 +299,12 @@ static void zr36057_set_vfe(struct zoran *zr, int video_width, int video_height,
reg |= (hor_dcm << ZR36057_VFESPFR_HOR_DCM);
reg |= (ver_dcm << ZR36057_VFESPFR_VER_DCM);
reg |= (disp_mode << ZR36057_VFESPFR_DISP_MODE);
- /* RJ: I don't know, why the following has to be the opposite
+ /*
+ * RJ: I don't know, why the following has to be the opposite
* of the corresponding ZR36060 setting, but only this way
- * we get the correct colors when uncompressing to the screen */
- //reg |= ZR36057_VFESPFR_VCLK_POL; /**/
+ * we get the correct colors when uncompressing to the screen
+ */
+ //reg |= ZR36057_VFESPFR_VCLK_POL;
/* RJ: Don't know if that is needed for NTSC also */
if (!(zr->norm & V4L2_STD_NTSC))
reg |= ZR36057_VFESPFR_EXT_FL; // NEEDED!!!!!!! Wolfgang
@@ -342,7 +345,7 @@ void zr36057_set_memgrab(struct zoran *zr, int mode)
* will be stuck at 1 until capturing is turned back on.
*/
if (btread(ZR36057_VSSFGR) & ZR36057_VSSFGR_SNAP_SHOT)
- pci_warn(zr->pci_dev, "zr36057_set_memgrab(1) with SnapShot on!?\n");
+ pci_warn(zr->pci_dev, "%s(1) with SnapShot on!?\n", __func__);
/* switch on VSync interrupts */
btwrite(IRQ_MASK, ZR36057_ISR); // Clear Interrupts
@@ -595,11 +598,9 @@ void jpeg_start(struct zoran *zr)
/* enable the Go generation */
btor(ZR36057_JMC_GO_EN, ZR36057_JMC);
- udelay(30);
+ usleep_range(30, 100);
set_frame(zr, 1); // /FRAME
-
- pci_dbg(zr->pci_dev, "jpeg_start\n");
}
void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode)
@@ -803,8 +804,10 @@ static void zoran_reap_stat_com(struct zoran *zr)
unsigned int size = 0;
u32 fcnt;
- /* In motion decompress we don't have a hardware frame counter,
- * we just count the interrupts here */
+ /*
+ * In motion decompress we don't have a hardware frame counter,
+ * we just count the interrupts here
+ */
if (zr->codec_mode == BUZ_MODE_MOTION_DECOMPRESS)
zr->jpg_seq_num++;
@@ -938,9 +941,9 @@ void zoran_init_hardware(struct zoran *zr)
void zr36057_restart(struct zoran *zr)
{
btwrite(0, ZR36057_SPGPPCR);
- udelay(1000);
+ usleep_range(1000, 2000);
btor(ZR36057_SPGPPCR_SOFT_RESET, ZR36057_SPGPPCR);
- udelay(1000);
+ usleep_range(1000, 2000);
/* assert P_Reset */
btwrite(0, ZR36057_JPC);
diff --git a/drivers/media/pci/zoran/zoran_device.h b/drivers/media/pci/zoran/zoran_device.h
new file mode 100644
index 000000000000..34fd5cc914eb
--- /dev/null
+++ b/drivers/media/pci/zoran/zoran_device.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Zoran zr36057/zr36067 PCI controller driver, for the
+ * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
+ * Media Labs LML33/LML33R10.
+ *
+ * This part handles card-specific data and detection
+ *
+ * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
+ */
+
+#ifndef __ZORAN_DEVICE_H__
+#define __ZORAN_DEVICE_H__
+
+/* general purpose I/O */
+void GPIO(struct zoran *zr, int bit, unsigned int value);
+
+/* codec (or actually: guest bus) access */
+int post_office_wait(struct zoran *zr);
+int post_office_write(struct zoran *zr, unsigned int guest, unsigned int reg,
+ unsigned int value);
+int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg);
+
+void jpeg_codec_sleep(struct zoran *zr, int sleep);
+int jpeg_codec_reset(struct zoran *zr);
+
+/* zr360x7 access to raw capture */
+void zr36057_overlay(struct zoran *zr, int on);
+void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count);
+void zr36057_set_memgrab(struct zoran *zr, int mode);
+int wait_grab_pending(struct zoran *zr);
+
+/* interrupts */
+void print_interrupts(struct zoran *zr);
+void clear_interrupt_counters(struct zoran *zr);
+irqreturn_t zoran_irq(int irq, void *dev_id);
+
+/* JPEG codec access */
+void jpeg_start(struct zoran *zr);
+void zr36057_enable_jpg(struct zoran *zr, enum zoran_codec_mode mode);
+void zoran_feed_stat_com(struct zoran *zr);
+
+/* general */
+void zoran_set_pci_master(struct zoran *zr, int set_master);
+void zoran_init_hardware(struct zoran *zr);
+void zr36057_restart(struct zoran *zr);
+
+extern const struct zoran_format zoran_formats[];
+
+extern int v4l_bufsize;
+extern int jpg_bufsize;
+extern int pass_through;
+
+/* i2c */
+#define decoder_call(zr, o, f, args...) \
+ v4l2_subdev_call((zr)->decoder, o, f, ##args)
+#define encoder_call(zr, o, f, args...) \
+ v4l2_subdev_call((zr)->encoder, o, f, ##args)
+
+#endif /* __ZORAN_DEVICE_H__ */
diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 4304b7e21709..fa672cc8bc67 100644
--- a/drivers/staging/media/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -203,7 +203,6 @@ static int zoran_v4l_set_format(struct zoran *zr, int width, int height,
static int zoran_set_norm(struct zoran *zr, v4l2_std_id norm)
{
-
if (!(norm & zr->card.norms)) {
pci_dbg(zr->pci_dev, "%s - unsupported norm %llx\n", __func__, norm);
return -EINVAL;
@@ -287,17 +286,6 @@ static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
}
-#if 0
-/* TODO: output does not work yet */
-static int zoran_enum_fmt_vid_out(struct file *file, void *__fh,
- struct v4l2_fmtdesc *f)
-{
- struct zoran *zr = video_drvdata(file);
-
- return zoran_enum_fmt(zr, f, ZORAN_FORMAT_PLAYBACK);
-}
-#endif
-
static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
struct v4l2_format *fmt)
{
@@ -430,8 +418,10 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
fmt->fmt.pix.field = V4L2_FIELD_TOP;
bpp = DIV_ROUND_UP(zoran_formats[i].depth, 8);
- v4l_bound_align_image(&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH, bpp == 2 ? 1 : 2,
- &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT, 0, 0);
+ v4l_bound_align_image(&fmt->fmt.pix.width, BUZ_MIN_WIDTH, BUZ_MAX_WIDTH,
+ bpp == 2 ? 1 : 2,
+ &fmt->fmt.pix.height, BUZ_MIN_HEIGHT, BUZ_MAX_HEIGHT,
+ 0, 0);
fmt->fmt.pix.bytesperline = fmt->fmt.pix.width * bpp;
fmt->fmt.pix.sizeimage = fmt->fmt.pix.bytesperline * fmt->fmt.pix.height;
return 0;
@@ -627,38 +617,6 @@ static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
return res;
}
-#if 0
-/* TODO: output does not work yet */
-static int zoran_enum_output(struct file *file, void *__fh,
- struct v4l2_output *outp)
-{
- if (outp->index != 0)
- return -EINVAL;
-
- outp->index = 0;
- outp->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY;
- outp->std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
- outp->capabilities = V4L2_OUT_CAP_STD;
- strscpy(outp->name, "Autodetect", sizeof(outp->name));
-
- return 0;
-}
-static int zoran_g_output(struct file *file, void *__fh, unsigned int *output)
-{
- *output = 0;
-
- return 0;
-}
-
-static int zoran_s_output(struct file *file, void *__fh, unsigned int output)
-{
- if (output != 0)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
/* cropping (sub-frame capture) */
static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
{
@@ -746,9 +704,6 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_enum_input = zoran_enum_input,
.vidioc_g_input = zoran_g_input,
.vidioc_s_input = zoran_s_input,
-/* .vidioc_enum_output = zoran_enum_output,
- .vidioc_g_output = zoran_g_output,
- .vidioc_s_output = zoran_s_output,*/
.vidioc_g_std = zoran_g_std,
.vidioc_s_std = zoran_s_std,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -760,13 +715,9 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_fmt_vid_cap = zoran_enum_fmt_vid_cap,
-/* .vidioc_enum_fmt_vid_out = zoran_enum_fmt_vid_out,*/
.vidioc_g_fmt_vid_cap = zoran_g_fmt_vid_cap,
-/* .vidioc_g_fmt_vid_out = zoran_g_fmt_vid_out,*/
.vidioc_s_fmt_vid_cap = zoran_s_fmt_vid_cap,
-/* .vidioc_s_fmt_vid_out = zoran_s_fmt_vid_out,*/
.vidioc_try_fmt_vid_cap = zoran_try_fmt_vid_cap,
-/* .vidioc_try_fmt_vid_out = zoran_try_fmt_vid_out,*/
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -1013,7 +964,7 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir)
vq->dev = &zr->pci_dev->dev;
vq->type = dir;
- vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE;
+ vq->io_modes = VB2_DMABUF | VB2_MMAP;
vq->drv_priv = zr;
vq->buf_struct_size = sizeof(struct zr_buffer);
vq->ops = &zr_video_qops;
diff --git a/drivers/staging/media/zoran/zr36016.c b/drivers/media/pci/zoran/zr36016.c
index 0e0532537a3e..4b328ad6083f 100644
--- a/drivers/staging/media/zoran/zr36016.c
+++ b/drivers/media/pci/zoran/zr36016.c
@@ -15,18 +15,19 @@
/* codec io API */
#include "videocodec.h"
-/* it doesn't make sense to have more than 20 or so,
- just to prevent some unwanted loops */
+/*
+ * it doesn't make sense to have more than 20 or so,
+ * just to prevent some unwanted loops
+ */
#define MAX_CODECS 20
/* amount of chips attached via this driver */
static int zr36016_codecs;
-/* =========================================================================
- Local hardware I/O functions:
-
- read/write via codec layer (registers are located in the master device)
- ========================================================================= */
+/*
+ * Local hardware I/O functions: read/write via codec layer
+ * (registers are located in the master device)
+ */
/* read and write functions */
static u8 zr36016_read(struct zr36016 *ptr, u16 reg)
@@ -58,9 +59,12 @@ static void zr36016_write(struct zr36016 *ptr, u16 reg, u8 value)
zrdev_err(zr, "%s: invalid I/O setup, nothing written!\n", ptr->name);
}
-/* indirect read and write functions */
-/* the 016 supports auto-addr-increment, but
- * writing it all time cost not much and is safer... */
+/*
+ * indirect read and write functions
+ *
+ * the 016 supports auto-addr-increment, but
+ * writing it all time cost not much and is safer...
+ */
static u8 zr36016_readi(struct zr36016 *ptr, u16 reg)
{
u8 value = 0;
@@ -68,8 +72,8 @@ static u8 zr36016_readi(struct zr36016 *ptr, u16 reg)
/* just in case something is wrong... */
if ((ptr->codec->master_data->writereg) && (ptr->codec->master_data->readreg)) {
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
- value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF; // DATA
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F);
+ value = (ptr->codec->master_data->readreg(ptr->codec, ZR016_IDATA)) & 0xFF;
} else {
zrdev_err(zr, "%s: invalid I/O setup, nothing read (i)!\n", ptr->name);
}
@@ -88,18 +92,14 @@ static void zr36016_writei(struct zr36016 *ptr, u16 reg, u8 value)
/* just in case something is wrong... */
if (ptr->codec->master_data->writereg) {
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F); // ADDR
- ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF); // DATA
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IADDR, reg & 0x0F);
+ ptr->codec->master_data->writereg(ptr->codec, ZR016_IDATA, value & 0x0FF);
} else {
zrdev_err(zr, "%s: invalid I/O setup, nothing written (i)!\n", ptr->name);
}
}
-/* =========================================================================
- Local helper function:
-
- version read
- ========================================================================= */
+/* Local helper function: version read */
/* version kept in datastructure */
static u8 zr36016_read_version(struct zr36016 *ptr)
@@ -108,11 +108,10 @@ static u8 zr36016_read_version(struct zr36016 *ptr)
return ptr->version;
}
-/* =========================================================================
- Local helper function:
-
- basic test of "connectivity", writes/reads to/from PAX-Lo register
- ========================================================================= */
+/*
+ * Local helper function: basic test of "connectivity", writes/reads
+ * to/from PAX-Lo register
+ */
static int zr36016_basic_test(struct zr36016 *ptr)
{
@@ -150,36 +149,7 @@ static int zr36016_basic_test(struct zr36016 *ptr)
return 0; /* looks good! */
}
-/* =========================================================================
- Local helper function:
-
- simple loop for pushing the init datasets - NO USE --
- ========================================================================= */
-
-#if 0
-static int zr36016_pushit(struct zr36016 *ptr,
- u16 startreg,
- u16 len,
- const char *data)
-{
- struct zoran *zr = videocodec_to_zoran(ptr->codec);
- int i = 0;
-
- zrdev_dbg(zr, "%s: write data block to 0x%04x (len=%d)\n",
- ptr->name, startreg, len);
- while (i < len) {
- zr36016_writei(ptr, startreg++, data[i++]);
- }
-
- return i;
-}
-#endif
-
-/* =========================================================================
- Basic datasets & init:
-
- //TODO//
- ========================================================================= */
+/* Basic datasets & init */
static void zr36016_init(struct zr36016 *ptr)
{
@@ -213,14 +183,16 @@ static void zr36016_init(struct zr36016 *ptr)
zr36016_write(ptr, ZR016_GOSTOP, 1);
}
-/* =========================================================================
- CODEC API FUNCTIONS
-
- this functions are accessed by the master via the API structure
- ========================================================================= */
+/*
+ * CODEC API FUNCTIONS
+ *
+ * These functions are accessed by the master via the API structure
+ */
-/* set compression/expansion mode and launches codec -
- this should be the last call from the master before starting processing */
+/*
+ * set compression/expansion mode and launches codec -
+ * this should be the last call from the master before starting processing
+ */
static int zr36016_set_mode(struct videocodec *codec, int mode)
{
struct zr36016 *ptr = (struct zr36016 *)codec->data;
@@ -249,22 +221,28 @@ static int zr36016_set_video(struct videocodec *codec, const struct tvnorm *norm
cap->x, cap->y, cap->width, cap->height,
cap->decimation);
- /* if () return -EINVAL;
+ /*
+ * if () return -EINVAL;
* trust the master driver that it knows what it does - so
- * we allow invalid startx/y for now ... */
+ * we allow invalid startx/y for now ...
+ */
ptr->width = cap->width;
ptr->height = cap->height;
- /* (Ronald) This is ugly. zoran_device.c, line 387
+ /*
+ * (Ronald) This is ugly. zoran_device.c, line 387
* already mentions what happens if h_start is even
* (blue faces, etc., cr/cb inversed). There's probably
* some good reason why h_start is 0 instead of 1, so I'm
* leaving it to this for now, but really... This can be
- * done a lot simpler */
+ * done a lot simpler
+ */
ptr->xoff = (norm->h_start ? norm->h_start : 1) + cap->x;
- /* Something to note here (I don't understand it), setting
+ /*
+ * Something to note here (I don't understand it), setting
* v_start too high will cause the codec to 'not work'. I
* really don't get it. values of 16 (v_start) already break
- * it here. Just '0' seems to work. More testing needed! */
+ * it here. Just '0' seems to work. More testing needed!
+ */
ptr->yoff = norm->v_start + cap->y;
/* (Ronald) dzjeeh, can't this thing do hor_decimation = 4? */
ptr->xdec = ((cap->decimation & 0xff) == 1) ? 0 : 1;
@@ -319,11 +297,11 @@ static int zr36016_control(struct videocodec *codec, int type, int size, void *d
return size;
}
-/* =========================================================================
- Exit and unregister function:
-
- Deinitializes Zoran's JPEG processor
- ========================================================================= */
+/*
+ * Exit and unregister function:
+ *
+ * Deinitializes Zoran's JPEG processor
+ */
static int zr36016_unset(struct videocodec *codec)
{
@@ -344,14 +322,14 @@ static int zr36016_unset(struct videocodec *codec)
return -EFAULT;
}
-/* =========================================================================
- Setup and registry function:
-
- Initializes Zoran's JPEG processor
-
- Also sets pixel size, average code size, mode (compr./decompr.)
- (the given size is determined by the processor with the video interface)
- ========================================================================= */
+/*
+ * Setup and registry function:
+ *
+ * Initializes Zoran's JPEG processor
+ *
+ * Also sets pixel size, average code size, mode (compr./decompr.)
+ * (the given size is determined by the processor with the video interface)
+ */
static int zr36016_setup(struct videocodec *codec)
{
@@ -410,9 +388,7 @@ static const struct videocodec zr36016_codec = {
/* others are not used */
};
-/* =========================================================================
- HOOK IN DRIVER AS KERNEL MODULE
- ========================================================================= */
+/* HOOK IN DRIVER AS KERNEL MODULE */
int zr36016_init_module(void)
{
diff --git a/drivers/staging/media/zoran/zr36016.h b/drivers/media/pci/zoran/zr36016.h
index 04afba35669d..04afba35669d 100644
--- a/drivers/staging/media/zoran/zr36016.h
+++ b/drivers/media/pci/zoran/zr36016.h
diff --git a/drivers/staging/media/zoran/zr36050.c b/drivers/media/pci/zoran/zr36050.c
index 6a7ef28d996c..b07d7e5c1b4a 100644
--- a/drivers/staging/media/zoran/zr36050.c
+++ b/drivers/media/pci/zoran/zr36050.c
@@ -22,18 +22,20 @@
/* codec io API */
#include "videocodec.h"
-/* it doesn't make sense to have more than 20 or so,
- just to prevent some unwanted loops */
+/*
+ * it doesn't make sense to have more than 20 or so,
+ * just to prevent some unwanted loops
+ */
#define MAX_CODECS 20
/* amount of chips attached via this driver */
static int zr36050_codecs;
-/* =========================================================================
- Local hardware I/O functions:
-
- read/write via codec layer (registers are located in the master device)
- ========================================================================= */
+/*
+ * Local hardware I/O functions:
+ *
+ * read/write via codec layer (registers are located in the master device)
+ */
/* read and write functions */
static u8 zr36050_read(struct zr36050 *ptr, u16 reg)
@@ -66,12 +68,6 @@ static void zr36050_write(struct zr36050 *ptr, u16 reg, u8 value)
ptr->name);
}
-/* =========================================================================
- Local helper function:
-
- status read
- ========================================================================= */
-
/* status is kept in datastructure */
static u8 zr36050_read_status1(struct zr36050 *ptr)
{
@@ -81,12 +77,6 @@ static u8 zr36050_read_status1(struct zr36050 *ptr)
return ptr->status1;
}
-/* =========================================================================
- Local helper function:
-
- scale factor read
- ========================================================================= */
-
/* scale factor is kept in datastructure */
static u16 zr36050_read_scalefactor(struct zr36050 *ptr)
{
@@ -98,11 +88,11 @@ static u16 zr36050_read_scalefactor(struct zr36050 *ptr)
return ptr->scalefact;
}
-/* =========================================================================
- Local helper function:
-
- wait if codec is ready to proceed (end of processing) or time is over
- ========================================================================= */
+/*
+ * Local helper function:
+ *
+ * wait if codec is ready to proceed (end of processing) or time is over
+ */
static void zr36050_wait_end(struct zr36050 *ptr)
{
@@ -120,11 +110,10 @@ static void zr36050_wait_end(struct zr36050 *ptr)
}
}
-/* =========================================================================
- Local helper function:
-
- basic test of "connectivity", writes/reads to/from memory the SOF marker
- ========================================================================= */
+/*
+ * Local helper function: basic test of "connectivity", writes/reads
+ * to/from memory the SOF marker
+ */
static int zr36050_basic_test(struct zr36050 *ptr)
{
@@ -160,11 +149,7 @@ static int zr36050_basic_test(struct zr36050 *ptr)
return 0; /* looks good! */
}
-/* =========================================================================
- Local helper function:
-
- simple loop for pushing the init datasets
- ========================================================================= */
+/* Local helper function: simple loop for pushing the init datasets */
static int zr36050_pushit(struct zr36050 *ptr, u16 startreg, u16 len, const char *data)
{
@@ -179,16 +164,16 @@ static int zr36050_pushit(struct zr36050 *ptr, u16 startreg, u16 len, const char
return i;
}
-/* =========================================================================
- Basic datasets:
-
- jpeg baseline setup data (you find it on lots places in internet, or just
- extract it from any regular .jpg image...)
-
- Could be variable, but until it's not needed it they are just fixed to save
- memory. Otherwise expand zr36050 structure with arrays, push the values to
- it and initialize from there, as e.g. the linux zr36057/60 driver does it.
- ========================================================================= */
+/*
+ * Basic datasets:
+ *
+ * jpeg baseline setup data (you find it on lots places in internet, or just
+ * extract it from any regular .jpg image...)
+ *
+ * Could be variable, but until it's not needed it they are just fixed to save
+ * memory. Otherwise expand zr36050 structure with arrays, push the values to
+ * it and initialize from there, as e.g. the linux zr36057/60 driver does it.
+ */
static const char zr36050_dqt[0x86] = {
0xff, 0xdb, //Marker: DQT
@@ -281,18 +266,19 @@ static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
-/* =========================================================================
- Local helper functions:
-
- calculation and setup of parameter-dependent JPEG baseline segments
- (needed for compression only)
- ========================================================================= */
+/*
+ * Local helper functions:
+ *
+ * calculation and setup of parameter-dependent JPEG baseline segments
+ * (needed for compression only)
+ */
/* ------------------------------------------------------------------------- */
-/* SOF (start of frame) segment depends on width, height and sampling ratio
- of each color component */
-
+/*
+ * SOF (start of frame) segment depends on width, height and sampling ratio
+ * of each color component
+ */
static int zr36050_set_sof(struct zr36050 *ptr)
{
struct zoran *zr = videocodec_to_zoran(ptr->codec);
@@ -313,7 +299,8 @@ static int zr36050_set_sof(struct zr36050 *ptr)
sof_data[9] = NO_OF_COMPONENTS;
for (i = 0; i < NO_OF_COMPONENTS; i++) {
sof_data[10 + (i * 3)] = i; // index identifier
- sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios
+ sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) |
+ (ptr->v_samp_ratio[i]); // sampling ratios
sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection
}
return zr36050_pushit(ptr, ZR050_SOF_IDX,
@@ -322,8 +309,10 @@ static int zr36050_set_sof(struct zr36050 *ptr)
/* ------------------------------------------------------------------------- */
-/* SOS (start of scan) segment depends on the used scan components
- of each color component */
+/*
+ * SOS (start of scan) segment depends on the used scan components
+ * of each color component
+ */
static int zr36050_set_sos(struct zr36050 *ptr)
{
@@ -368,14 +357,14 @@ static int zr36050_set_dri(struct zr36050 *ptr)
return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data);
}
-/* =========================================================================
- Setup function:
-
- Setup compression/decompression of Zoran's JPEG processor
- ( see also zoran 36050 manual )
-
- ... sorry for the spaghetti code ...
- ========================================================================= */
+/*
+ * Setup function:
+ *
+ * Setup compression/decompression of Zoran's JPEG processor
+ * ( see also zoran 36050 manual )
+ *
+ * ... sorry for the spaghetti code ...
+ */
static void zr36050_init(struct zr36050 *ptr)
{
int sum = 0;
@@ -411,8 +400,10 @@ static void zr36050_init(struct zr36050 *ptr)
sum += zr36050_set_sos(ptr);
sum += zr36050_set_dri(ptr);
- /* setup the fixed jpeg tables - maybe variable, though -
- * (see table init section above) */
+ /*
+ * setup the fixed jpeg tables - maybe variable, though -
+ * (see table init section above)
+ */
zrdev_dbg(zr, "%s: write DQT, DHT, APP\n", ptr->name);
sum += zr36050_pushit(ptr, ZR050_DQT_IDX,
sizeof(zr36050_dqt), zr36050_dqt);
@@ -522,14 +513,16 @@ static void zr36050_init(struct zr36050 *ptr)
zr36050_read(ptr, 0);
}
-/* =========================================================================
- CODEC API FUNCTIONS
-
- this functions are accessed by the master via the API structure
- ========================================================================= */
+/*
+ * CODEC API FUNCTIONS
+ *
+ * this functions are accessed by the master via the API structure
+ */
-/* set compression/expansion mode and launches codec -
- this should be the last call from the master before starting processing */
+/*
+ * set compression/expansion mode and launches codec -
+ * this should be the last call from the master before starting processing
+ */
static int zr36050_set_mode(struct videocodec *codec, int mode)
{
struct zr36050 *ptr = (struct zr36050 *)codec->data;
@@ -558,9 +551,10 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
ptr->name, norm->h_start, norm->v_start,
cap->x, cap->y, cap->width, cap->height,
cap->decimation, cap->quality);
- /* if () return -EINVAL;
+ /*
* trust the master driver that it knows what it does - so
- * we allow invalid startx/y and norm for now ... */
+ * we allow invalid startx/y and norm for now ...
+ */
ptr->width = cap->width / (cap->decimation & 0xff);
ptr->height = cap->height / ((cap->decimation >> 8) & 0xff);
@@ -579,8 +573,10 @@ static int zr36050_set_video(struct videocodec *codec, const struct tvnorm *norm
ptr->real_code_vol = size >> 3; /* in bytes */
- /* Set max_block_vol here (previously in zr36050_init, moved
- * here for consistency with zr36060 code */
+ /*
+ * Set max_block_vol here (previously in zr36050_init, moved
+ * here for consistency with zr36060 code
+ */
zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);
return 0;
@@ -637,8 +633,6 @@ static int zr36050_control(struct videocodec *codec, int type, int size, void *d
if (size != sizeof(int))
return -EFAULT;
ptr->total_code_vol = *ival;
- /* (Kieran Morrissey)
- * code copied from zr36060.c to ensure proper bitrate */
ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3;
break;
@@ -701,11 +695,7 @@ static int zr36050_control(struct videocodec *codec, int type, int size, void *d
return size;
}
-/* =========================================================================
- Exit and unregister function:
-
- Deinitializes Zoran's JPEG processor
- ========================================================================= */
+/* Exit and unregister function: Deinitializes Zoran's JPEG processor */
static int zr36050_unset(struct videocodec *codec)
{
@@ -727,14 +717,14 @@ static int zr36050_unset(struct videocodec *codec)
return -EFAULT;
}
-/* =========================================================================
- Setup and registry function:
-
- Initializes Zoran's JPEG processor
-
- Also sets pixel size, average code size, mode (compr./decompr.)
- (the given size is determined by the processor with the video interface)
- ========================================================================= */
+/*
+ * Setup and registry function:
+ *
+ * Initializes Zoran's JPEG processor
+ *
+ * Also sets pixel size, average code size, mode (compr./decompr.)
+ * (the given size is determined by the processor with the video interface)
+ */
static int zr36050_setup(struct videocodec *codec)
{
@@ -771,8 +761,8 @@ static int zr36050_setup(struct videocodec *codec)
memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8);
memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8);
- ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag
- * (what is the difference?) */
+ /* 0 or 1 - fixed file size flag (what is the difference?) */
+ ptr->bitrate_ctrl = 0;
ptr->mode = CODEC_DO_COMPRESSION;
ptr->width = 384;
ptr->height = 288;
@@ -809,9 +799,7 @@ static const struct videocodec zr36050_codec = {
// others are not used
};
-/* =========================================================================
- HOOK IN DRIVER AS KERNEL MODULE
- ========================================================================= */
+/* HOOK IN DRIVER AS KERNEL MODULE */
int zr36050_init_module(void)
{
diff --git a/drivers/staging/media/zoran/zr36050.h b/drivers/media/pci/zoran/zr36050.h
index f9b58f4c77b9..f9b58f4c77b9 100644
--- a/drivers/staging/media/zoran/zr36050.h
+++ b/drivers/media/pci/zoran/zr36050.h
diff --git a/drivers/staging/media/zoran/zr36057.h b/drivers/media/pci/zoran/zr36057.h
index a2a75fd9f535..45d8afc62b37 100644
--- a/drivers/staging/media/zoran/zr36057.h
+++ b/drivers/media/pci/zoran/zr36057.h
@@ -11,117 +11,117 @@
/* Zoran ZR36057 registers */
#define ZR36057_VFEHCR 0x000 /* Video Front End, Horizontal Configuration Register */
-#define ZR36057_VFEHCR_HS_POL BIT(30)
-#define ZR36057_VFEHCR_H_START 10
+#define ZR36057_VFEHCR_HS_POL BIT(30)
+#define ZR36057_VFEHCR_H_START 10
#define ZR36057_VFEHCR_H_END 0
#define ZR36057_VFEHCR_HMASK 0x3ff
#define ZR36057_VFEVCR 0x004 /* Video Front End, Vertical Configuration Register */
-#define ZR36057_VFEVCR_VS_POL BIT(30)
-#define ZR36057_VFEVCR_V_START 10
+#define ZR36057_VFEVCR_VS_POL BIT(30)
+#define ZR36057_VFEVCR_V_START 10
#define ZR36057_VFEVCR_V_END 0
#define ZR36057_VFEVCR_VMASK 0x3ff
#define ZR36057_VFESPFR 0x008 /* Video Front End, Scaler and Pixel Format Register */
-#define ZR36057_VFESPFR_EXT_FL BIT(26)
-#define ZR36057_VFESPFR_TOP_FIELD BIT(25)
-#define ZR36057_VFESPFR_VCLK_POL BIT(24)
-#define ZR36057_VFESPFR_H_FILTER 21
-#define ZR36057_VFESPFR_HOR_DCM 14
-#define ZR36057_VFESPFR_VER_DCM 8
-#define ZR36057_VFESPFR_DISP_MODE 6
+#define ZR36057_VFESPFR_EXT_FL BIT(26)
+#define ZR36057_VFESPFR_TOP_FIELD BIT(25)
+#define ZR36057_VFESPFR_VCLK_POL BIT(24)
+#define ZR36057_VFESPFR_H_FILTER 21
+#define ZR36057_VFESPFR_HOR_DCM 14
+#define ZR36057_VFESPFR_VER_DCM 8
+#define ZR36057_VFESPFR_DISP_MODE 6
#define ZR36057_VFESPFR_YUV422 (0 << 3)
#define ZR36057_VFESPFR_RGB888 (1 << 3)
#define ZR36057_VFESPFR_RGB565 (2 << 3)
#define ZR36057_VFESPFR_RGB555 (3 << 3)
-#define ZR36057_VFESPFR_ERR_DIF (1 << 2)
-#define ZR36057_VFESPFR_PACK24 (1 << 1)
-#define ZR36057_VFESPFR_LITTLE_ENDIAN (1 << 0)
+#define ZR36057_VFESPFR_ERR_DIF BIT(2)
+#define ZR36057_VFESPFR_PACK24 BIT(1)
+#define ZR36057_VFESPFR_LITTLE_ENDIAN BIT(0)
#define ZR36057_VDTR 0x00c /* Video Display "Top" Register */
#define ZR36057_VDBR 0x010 /* Video Display "Bottom" Register */
#define ZR36057_VSSFGR 0x014 /* Video Stride, Status, and Frame Grab Register */
-#define ZR36057_VSSFGR_DISP_STRIDE 16
-#define ZR36057_VSSFGR_VID_OVF BIT(8)
-#define ZR36057_VSSFGR_SNAP_SHOT BIT(1)
-#define ZR36057_VSSFGR_FRAME_GRAB BIT(0)
+#define ZR36057_VSSFGR_DISP_STRIDE 16
+#define ZR36057_VSSFGR_VID_OVF BIT(8)
+#define ZR36057_VSSFGR_SNAP_SHOT BIT(1)
+#define ZR36057_VSSFGR_FRAME_GRAB BIT(0)
#define ZR36057_VDCR 0x018 /* Video Display Configuration Register */
-#define ZR36057_VDCR_VID_EN BIT(31)
-#define ZR36057_VDCR_MIN_PIX 24
-#define ZR36057_VDCR_TRITON BIT(24)
-#define ZR36057_VDCR_VID_WIN_HT 12
-#define ZR36057_VDCR_VID_WIN_WID 0
+#define ZR36057_VDCR_VID_EN BIT(31)
+#define ZR36057_VDCR_MIN_PIX 24
+#define ZR36057_VDCR_TRITON BIT(24)
+#define ZR36057_VDCR_VID_WIN_HT 12
+#define ZR36057_VDCR_VID_WIN_WID 0
#define ZR36057_MMTR 0x01c /* Masking Map "Top" Register */
#define ZR36057_MMBR 0x020 /* Masking Map "Bottom" Register */
#define ZR36057_OCR 0x024 /* Overlay Control Register */
-#define ZR36057_OCR_OVL_ENABLE BIT(15)
-#define ZR36057_OCR_MASK_STRIDE 0
+#define ZR36057_OCR_OVL_ENABLE BIT(15)
+#define ZR36057_OCR_MASK_STRIDE 0
#define ZR36057_SPGPPCR 0x028 /* System, PCI, and General Purpose Pins Control Register */
-#define ZR36057_SPGPPCR_SOFT_RESET BIT(24)
+#define ZR36057_SPGPPCR_SOFT_RESET BIT(24)
#define ZR36057_GPPGCR1 0x02c /* General Purpose Pins and GuestBus Control Register (1) */
#define ZR36057_MCSAR 0x030 /* MPEG Code Source Address Register */
#define ZR36057_MCTCR 0x034 /* MPEG Code Transfer Control Register */
-#define ZR36057_MCTCR_COD_TIME BIT(30)
-#define ZR36057_MCTCR_C_EMPTY BIT(29)
-#define ZR36057_MCTCR_C_FLUSH BIT(28)
+#define ZR36057_MCTCR_COD_TIME BIT(30)
+#define ZR36057_MCTCR_C_EMPTY BIT(29)
+#define ZR36057_MCTCR_C_FLUSH BIT(28)
#define ZR36057_MCTCR_COD_GUEST_ID 20
#define ZR36057_MCTCR_COD_GUEST_REG 16
#define ZR36057_MCMPR 0x038 /* MPEG Code Memory Pointer Register */
#define ZR36057_ISR 0x03c /* Interrupt Status Register */
-#define ZR36057_ISR_GIRQ1 BIT(30)
-#define ZR36057_ISR_GIRQ0 BIT(29)
-#define ZR36057_ISR_COD_REP_IRQ BIT(28)
-#define ZR36057_ISR_JPEG_REP_IRQ BIT(27)
+#define ZR36057_ISR_GIRQ1 BIT(30)
+#define ZR36057_ISR_GIRQ0 BIT(29)
+#define ZR36057_ISR_COD_REP_IRQ BIT(28)
+#define ZR36057_ISR_JPEG_REP_IRQ BIT(27)
#define ZR36057_ICR 0x040 /* Interrupt Control Register */
-#define ZR36057_ICR_GIRQ1 BIT(30)
-#define ZR36057_ICR_GIRQ0 BIT(29)
-#define ZR36057_ICR_COD_REP_IRQ BIT(28)
-#define ZR36057_ICR_JPEG_REP_IRQ BIT(27)
-#define ZR36057_ICR_INT_PIN_EN BIT(24)
+#define ZR36057_ICR_GIRQ1 BIT(30)
+#define ZR36057_ICR_GIRQ0 BIT(29)
+#define ZR36057_ICR_COD_REP_IRQ BIT(28)
+#define ZR36057_ICR_JPEG_REP_IRQ BIT(27)
+#define ZR36057_ICR_INT_PIN_EN BIT(24)
#define ZR36057_I2CBR 0x044 /* I2C Bus Register */
-#define ZR36057_I2CBR_SDA BIT(1)
-#define ZR36057_I2CBR_SCL BIT(0)
+#define ZR36057_I2CBR_SDA BIT(1)
+#define ZR36057_I2CBR_SCL BIT(0)
#define ZR36057_JMC 0x100 /* JPEG Mode and Control */
-#define ZR36057_JMC_JPG BIT(31)
-#define ZR36057_JMC_JPG_EXP_MODE (0 << 29)
-#define ZR36057_JMC_JPG_CMP_MODE BIT(29)
-#define ZR36057_JMC_MJPG_EXP_MODE (2 << 29)
-#define ZR36057_JMC_MJPG_CMP_MODE (3 << 29)
-#define ZR36057_JMC_RTBUSY_FB BIT(6)
-#define ZR36057_JMC_GO_EN BIT(5)
-#define ZR36057_JMC_SYNC_MSTR BIT(4)
-#define ZR36057_JMC_FLD_PER_BUFF BIT(3)
-#define ZR36057_JMC_VFIFO_FB BIT(2)
-#define ZR36057_JMC_CFIFO_FB BIT(1)
-#define ZR36057_JMC_STLL_LIT_ENDIAN BIT(0)
+#define ZR36057_JMC_JPG BIT(31)
+#define ZR36057_JMC_JPG_EXP_MODE (0 << 29)
+#define ZR36057_JMC_JPG_CMP_MODE BIT(29)
+#define ZR36057_JMC_MJPG_EXP_MODE (2 << 29)
+#define ZR36057_JMC_MJPG_CMP_MODE (3 << 29)
+#define ZR36057_JMC_RTBUSY_FB BIT(6)
+#define ZR36057_JMC_GO_EN BIT(5)
+#define ZR36057_JMC_SYNC_MSTR BIT(4)
+#define ZR36057_JMC_FLD_PER_BUFF BIT(3)
+#define ZR36057_JMC_VFIFO_FB BIT(2)
+#define ZR36057_JMC_CFIFO_FB BIT(1)
+#define ZR36057_JMC_STLL_LIT_ENDIAN BIT(0)
#define ZR36057_JPC 0x104 /* JPEG Process Control */
-#define ZR36057_JPC_P_RESET BIT(7)
-#define ZR36057_JPC_COD_TRNS_EN BIT(5)
-#define ZR36057_JPC_ACTIVE BIT(0)
+#define ZR36057_JPC_P_RESET BIT(7)
+#define ZR36057_JPC_COD_TRNS_EN BIT(5)
+#define ZR36057_JPC_ACTIVE BIT(0)
#define ZR36057_VSP 0x108 /* Vertical Sync Parameters */
-#define ZR36057_VSP_VSYNC_SIZE 16
-#define ZR36057_VSP_FRM_TOT 0
+#define ZR36057_VSP_VSYNC_SIZE 16
+#define ZR36057_VSP_FRM_TOT 0
#define ZR36057_HSP 0x10c /* Horizontal Sync Parameters */
-#define ZR36057_HSP_HSYNC_START 16
-#define ZR36057_HSP_LINE_TOT 0
+#define ZR36057_HSP_HSYNC_START 16
+#define ZR36057_HSP_LINE_TOT 0
#define ZR36057_FHAP 0x110 /* Field Horizontal Active Portion */
#define ZR36057_FHAP_NAX 16
@@ -132,22 +132,22 @@
#define ZR36057_FVAP_PAY 0
#define ZR36057_FPP 0x118 /* Field Process Parameters */
-#define ZR36057_FPP_ODD_EVEN BIT(0)
+#define ZR36057_FPP_ODD_EVEN BIT(0)
#define ZR36057_JCBA 0x11c /* JPEG Code Base Address */
#define ZR36057_JCFT 0x120 /* JPEG Code FIFO Threshold */
#define ZR36057_JCGI 0x124 /* JPEG Codec Guest ID */
-#define ZR36057_JCGI_JPE_GUEST_ID 4
-#define ZR36057_JCGI_JPE_GUEST_REG 0
+#define ZR36057_JCGI_JPE_GUEST_ID 4
+#define ZR36057_JCGI_JPE_GUEST_REG 0
#define ZR36057_GCR2 0x12c /* GuestBus Control Register (2) */
#define ZR36057_POR 0x200 /* Post Office Register */
-#define ZR36057_POR_PO_PEN BIT(25)
-#define ZR36057_POR_PO_TIME BIT(24)
-#define ZR36057_POR_PO_DIR BIT(23)
+#define ZR36057_POR_PO_PEN BIT(25)
+#define ZR36057_POR_PO_TIME BIT(24)
+#define ZR36057_POR_PO_DIR BIT(23)
#define ZR36057_STR 0x300 /* "Still" Transfer Register */
diff --git a/drivers/staging/media/zoran/zr36060.c b/drivers/media/pci/zoran/zr36060.c
index 7798016f1f96..75fd167603dc 100644
--- a/drivers/staging/media/zoran/zr36060.c
+++ b/drivers/media/pci/zoran/zr36060.c
@@ -243,7 +243,10 @@ static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC
static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 };
static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 };
-/* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */
+/*
+ * SOF (start of frame) segment depends on width, height and sampling ratio
+ * of each color component
+ */
static int zr36060_set_sof(struct zr36060 *ptr)
{
struct zoran *zr = videocodec_to_zoran(ptr->codec);
@@ -555,8 +558,6 @@ static int zr36060_set_video(struct videocodec *codec, const struct tvnorm *norm
reg = 6 - 1; /* VsyncSize */
zr36060_write(ptr, ZR060_SGR_VSYNC, reg);
- //reg = 30 - 1; /* HsyncSize */
-///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68);
reg = 68;
zr36060_write(ptr, ZR060_SGR_HSYNC, reg);
diff --git a/drivers/staging/media/zoran/zr36060.h b/drivers/media/pci/zoran/zr36060.h
index fbf5429534ac..75c88677a4bd 100644
--- a/drivers/staging/media/zoran/zr36060.h
+++ b/drivers/media/pci/zoran/zr36060.h
@@ -124,78 +124,78 @@ struct zr36060 {
/* ZR36060 LOAD register bits */
-#define ZR060_LOAD_LOAD BIT(7)
-#define ZR060_LOAD_SYNC_RST BIT(0)
+#define ZR060_LOAD_LOAD BIT(7)
+#define ZR060_LOAD_SYNC_RST BIT(0)
/* ZR36060 Code FIFO Status register bits */
-#define ZR060_CFSR_BUSY BIT(7)
-#define ZR060_CFSR_C_BUSY BIT(2)
+#define ZR060_CFSR_BUSY BIT(7)
+#define ZR060_CFSR_C_BUSY BIT(2)
#define ZR060_CFSR_CFIFO (3 << 0)
/* ZR36060 Code Interface register */
-#define ZR060_CIR_CODE16 BIT(7)
-#define ZR060_CIR_ENDIAN BIT(6)
-#define ZR060_CIR_CFIS BIT(2)
-#define ZR060_CIR_CODE_MSTR BIT(0)
+#define ZR060_CIR_CODE16 BIT(7)
+#define ZR060_CIR_ENDIAN BIT(6)
+#define ZR060_CIR_CFIS BIT(2)
+#define ZR060_CIR_CODE_MSTR BIT(0)
/* ZR36060 Codec Mode register */
-#define ZR060_CMR_COMP BIT(7)
-#define ZR060_CMR_ATP BIT(6)
-#define ZR060_CMR_PASS2 BIT(5)
-#define ZR060_CMR_TLM BIT(4)
-#define ZR060_CMR_BRB BIT(2)
-#define ZR060_CMR_FSF BIT(1)
+#define ZR060_CMR_COMP BIT(7)
+#define ZR060_CMR_ATP BIT(6)
+#define ZR060_CMR_PASS2 BIT(5)
+#define ZR060_CMR_TLM BIT(4)
+#define ZR060_CMR_BRB BIT(2)
+#define ZR060_CMR_FSF BIT(1)
/* ZR36060 Markers Enable register */
-#define ZR060_MER_APP BIT(7)
-#define ZR060_MER_COM BIT(6)
-#define ZR060_MER_DRI BIT(5)
-#define ZR060_MER_DQT BIT(4)
-#define ZR060_MER_DHT BIT(3)
+#define ZR060_MER_APP BIT(7)
+#define ZR060_MER_COM BIT(6)
+#define ZR060_MER_DRI BIT(5)
+#define ZR060_MER_DQT BIT(4)
+#define ZR060_MER_DHT BIT(3)
/* ZR36060 Interrupt Mask register */
-#define ZR060_IMR_EOAV BIT(3)
-#define ZR060_IMR_EOI BIT(2)
-#define ZR060_IMR_END BIT(1)
-#define ZR060_IMR_DATA_ERR BIT(0)
+#define ZR060_IMR_EOAV BIT(3)
+#define ZR060_IMR_EOI BIT(2)
+#define ZR060_IMR_END BIT(1)
+#define ZR060_IMR_DATA_ERR BIT(0)
/* ZR36060 Interrupt Status register */
#define ZR060_ISR_PRO_CNT (3 << 6)
-#define ZR060_ISR_EOAV BIT(3)
-#define ZR060_ISR_EOI BIT(2)
-#define ZR060_ISR_END BIT(1)
-#define ZR060_ISR_DATA_ERR BIT(0)
+#define ZR060_ISR_EOAV BIT(3)
+#define ZR060_ISR_EOI BIT(2)
+#define ZR060_ISR_END BIT(1)
+#define ZR060_ISR_DATA_ERR BIT(0)
/* ZR36060 Video Control register */
-#define ZR060_VCR_VIDEO8 BIT(7)
-#define ZR060_VCR_RANGE BIT(6)
-#define ZR060_VCR_FI_DET BIT(3)
-#define ZR060_VCR_FI_VEDGE BIT(2)
-#define ZR060_VCR_FI_EXT BIT(1)
-#define ZR060_VCR_SYNC_MSTR BIT(0)
+#define ZR060_VCR_VIDEO8 BIT(7)
+#define ZR060_VCR_RANGE BIT(6)
+#define ZR060_VCR_FI_DET BIT(3)
+#define ZR060_VCR_FI_VEDGE BIT(2)
+#define ZR060_VCR_FI_EXT BIT(1)
+#define ZR060_VCR_SYNC_MSTR BIT(0)
/* ZR36060 Video Polarity register */
-#define ZR060_VPR_VCLK_POL BIT(7)
-#define ZR060_VPR_P_VAL_POL BIT(6)
-#define ZR060_VPR_POE_POL BIT(5)
-#define ZR060_VPR_S_IMG_POL BIT(4)
-#define ZR060_VPR_BL_POL BIT(3)
-#define ZR060_VPR_FI_POL BIT(2)
-#define ZR060_VPR_HS_POL BIT(1)
-#define ZR060_VPR_VS_POL BIT(0)
+#define ZR060_VPR_VCLK_POL BIT(7)
+#define ZR060_VPR_P_VAL_POL BIT(6)
+#define ZR060_VPR_POE_POL BIT(5)
+#define ZR060_VPR_S_IMG_POL BIT(4)
+#define ZR060_VPR_BL_POL BIT(3)
+#define ZR060_VPR_FI_POL BIT(2)
+#define ZR060_VPR_HS_POL BIT(1)
+#define ZR060_VPR_VS_POL BIT(0)
/* ZR36060 Scaling register */
-#define ZR060_SR_V_SCALE BIT(2)
-#define ZR060_SR_H_SCALE2 BIT(0)
+#define ZR060_SR_V_SCALE BIT(2)
+#define ZR060_SR_H_SCALE2 BIT(0)
#define ZR060_SR_H_SCALE4 (2 << 0)
int zr36060_init_module(void);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1056ceaf5a8..a9334263fa9b 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -81,6 +81,7 @@ source "drivers/media/platform/samsung/Kconfig"
source "drivers/media/platform/st/Kconfig"
source "drivers/media/platform/sunxi/Kconfig"
source "drivers/media/platform/ti/Kconfig"
+source "drivers/media/platform/verisilicon/Kconfig"
source "drivers/media/platform/via/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index a881e97bae95..a91f42024273 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -24,6 +24,7 @@ obj-y += samsung/
obj-y += st/
obj-y += sunxi/
obj-y += ti/
+obj-y += verisilicon/
obj-y += via/
obj-y += xilinx/
diff --git a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index 5e7b319f300d..142d421a8d76 100644
--- a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -1030,7 +1030,6 @@ static int ge2d_remove(struct platform_device *pdev)
video_unregister_device(ge2d->vfd);
v4l2_m2m_release(ge2d->m2m_dev);
- video_device_release(ge2d->vfd);
v4l2_device_unregister(&ge2d->v4l2_dev);
clk_disable_unprepare(ge2d->clk);
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 9e64041cc1c1..feb75dc204de 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -808,14 +808,6 @@ static void vdec_init_fmt(struct vpu_inst *inst)
inst->cap_format.field = V4L2_FIELD_NONE;
else
inst->cap_format.field = V4L2_FIELD_SEQ_TB;
- if (vdec->codec_info.color_primaries == V4L2_COLORSPACE_DEFAULT)
- vdec->codec_info.color_primaries = V4L2_COLORSPACE_REC709;
- if (vdec->codec_info.transfer_chars == V4L2_XFER_FUNC_DEFAULT)
- vdec->codec_info.transfer_chars = V4L2_XFER_FUNC_709;
- if (vdec->codec_info.matrix_coeffs == V4L2_YCBCR_ENC_DEFAULT)
- vdec->codec_info.matrix_coeffs = V4L2_YCBCR_ENC_709;
- if (vdec->codec_info.full_range == V4L2_QUANTIZATION_DEFAULT)
- vdec->codec_info.full_range = V4L2_QUANTIZATION_LIM_RANGE;
}
static void vdec_init_crop(struct vpu_inst *inst)
@@ -1555,6 +1547,14 @@ static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i
vdec->codec_info.frame_rate.numerator,
vdec->codec_info.frame_rate.denominator);
break;
+ case 9:
+ num = scnprintf(str, size, "colorspace: %d, %d, %d, %d (%d)\n",
+ vdec->codec_info.color_primaries,
+ vdec->codec_info.transfer_chars,
+ vdec->codec_info.matrix_coeffs,
+ vdec->codec_info.full_range,
+ vdec->codec_info.vui_present);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index 461524dd1e44..37212f087fdd 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -644,7 +644,7 @@ static int venc_ctrl_init(struct vpu_inst *inst)
BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30);
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 8000, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index f914de6ed81e..beac0309ca8d 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -119,7 +119,6 @@ struct vpu_mbox {
enum vpu_core_state {
VPU_CORE_DEINIT = 0,
VPU_CORE_ACTIVE,
- VPU_CORE_SNAPSHOT,
VPU_CORE_HANG
};
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 73faa50d2865..f9ec1753f7c8 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -89,7 +89,7 @@ static int vpu_core_boot_done(struct vpu_core *core)
core->supported_instance_count = min(core->supported_instance_count, count);
}
core->fw_version = fw_version;
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
return 0;
}
@@ -172,10 +172,26 @@ int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
return __vpu_alloc_dma(core->dev, buf);
}
-static void vpu_core_check_hang(struct vpu_core *core)
+void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state)
{
- if (core->hang_mask)
- core->state = VPU_CORE_HANG;
+ if (state != core->state)
+ vpu_trace(core->dev, "vpu core state change from %d to %d\n", core->state, state);
+ core->state = state;
+ if (core->state == VPU_CORE_DEINIT)
+ core->hang_mask = 0;
+}
+
+static void vpu_core_update_state(struct vpu_core *core)
+{
+ if (!vpu_iface_get_power_state(core)) {
+ if (core->request_count)
+ vpu_core_set_state(core, VPU_CORE_HANG);
+ else
+ vpu_core_set_state(core, VPU_CORE_DEINIT);
+
+ } else if (core->state == VPU_CORE_ACTIVE && core->hang_mask) {
+ vpu_core_set_state(core, VPU_CORE_HANG);
+ }
}
static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
@@ -188,11 +204,13 @@ static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 ty
dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
if (c->type != type)
continue;
+ mutex_lock(&c->lock);
+ vpu_core_update_state(c);
+ mutex_unlock(&c->lock);
if (c->state == VPU_CORE_DEINIT) {
core = c;
break;
}
- vpu_core_check_hang(c);
if (c->state != VPU_CORE_ACTIVE)
continue;
if (c->request_count < request_count) {
@@ -409,6 +427,12 @@ int vpu_inst_register(struct vpu_inst *inst)
}
mutex_lock(&core->lock);
+ if (core->state != VPU_CORE_ACTIVE) {
+ dev_err(core->dev, "vpu core is not active, state = %d\n", core->state);
+ ret = -EINVAL;
+ goto exit;
+ }
+
if (inst->id >= 0 && inst->id < core->supported_instance_count)
goto exit;
@@ -450,7 +474,7 @@ int vpu_inst_unregister(struct vpu_inst *inst)
vpu_core_release_instance(core, inst->id);
inst->id = VPU_INST_NULL_ID;
}
- vpu_core_check_hang(core);
+ vpu_core_update_state(core);
if (core->state == VPU_CORE_HANG && !core->instance_mask) {
int err;
@@ -459,7 +483,7 @@ int vpu_inst_unregister(struct vpu_inst *inst)
err = vpu_core_sw_reset(core);
mutex_lock(&core->lock);
if (!err) {
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
core->hang_mask = 0;
}
}
@@ -609,7 +633,7 @@ static int vpu_core_probe(struct platform_device *pdev)
mutex_init(&core->cmd_lock);
init_completion(&core->cmp);
init_waitqueue_head(&core->ack_wq);
- core->state = VPU_CORE_DEINIT;
+ vpu_core_set_state(core, VPU_CORE_DEINIT);
core->res = of_device_get_match_data(dev);
if (!core->res)
@@ -758,33 +782,18 @@ static int __maybe_unused vpu_core_resume(struct device *dev)
mutex_lock(&core->lock);
pm_runtime_resume_and_get(dev);
vpu_core_get_vpu(core);
- if (core->state != VPU_CORE_SNAPSHOT)
- goto exit;
- if (!vpu_iface_get_power_state(core)) {
- if (!list_empty(&core->instances)) {
+ if (core->request_count) {
+ if (!vpu_iface_get_power_state(core))
ret = vpu_core_boot(core, false);
- if (ret) {
- dev_err(core->dev, "%s boot fail\n", __func__);
- core->state = VPU_CORE_DEINIT;
- goto exit;
- }
- } else {
- core->state = VPU_CORE_DEINIT;
- }
- } else {
- if (!list_empty(&core->instances)) {
+ else
ret = vpu_core_sw_reset(core);
- if (ret) {
- dev_err(core->dev, "%s sw_reset fail\n", __func__);
- core->state = VPU_CORE_HANG;
- goto exit;
- }
+ if (ret) {
+ dev_err(core->dev, "resume fail\n");
+ vpu_core_set_state(core, VPU_CORE_HANG);
}
- core->state = VPU_CORE_ACTIVE;
}
-
-exit:
+ vpu_core_update_state(core);
pm_runtime_put_sync(dev);
mutex_unlock(&core->lock);
@@ -798,18 +807,11 @@ static int __maybe_unused vpu_core_suspend(struct device *dev)
int ret = 0;
mutex_lock(&core->lock);
- if (core->state == VPU_CORE_ACTIVE) {
- if (!list_empty(&core->instances)) {
- ret = vpu_core_snapshot(core);
- if (ret) {
- mutex_unlock(&core->lock);
- return ret;
- }
- }
-
- core->state = VPU_CORE_SNAPSHOT;
- }
+ if (core->request_count)
+ ret = vpu_core_snapshot(core);
mutex_unlock(&core->lock);
+ if (ret)
+ return ret;
vpu_core_cancel_work(core);
diff --git a/drivers/media/platform/amphion/vpu_core.h b/drivers/media/platform/amphion/vpu_core.h
index 00a662997da4..65b562642603 100644
--- a/drivers/media/platform/amphion/vpu_core.h
+++ b/drivers/media/platform/amphion/vpu_core.h
@@ -11,5 +11,6 @@ u32 csr_readl(struct vpu_core *core, u32 reg);
int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf);
void vpu_free_dma(struct vpu_buffer *buf);
struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index);
+void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state);
#endif
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index f72c8a506b22..260f1c4b8f8d 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include "vpu.h"
#include "vpu_defs.h"
+#include "vpu_core.h"
#include "vpu_helpers.h"
#include "vpu_cmds.h"
#include "vpu_rpc.h"
@@ -233,6 +234,10 @@ static int vpu_dbg_core(struct seq_file *s, void *data)
if (seq_write(s, str, num))
return 0;
+ num = scnprintf(str, sizeof(str), "power %s\n",
+ vpu_iface_get_power_state(core) ? "on" : "off");
+ if (seq_write(s, str, num))
+ return 0;
num = scnprintf(str, sizeof(str), "state = %d\n", core->state);
if (seq_write(s, str, num))
return 0;
@@ -346,10 +351,10 @@ static ssize_t vpu_dbg_core_write(struct file *file,
pm_runtime_resume_and_get(core->dev);
mutex_lock(&core->lock);
- if (core->state != VPU_CORE_DEINIT && !core->instance_mask) {
+ if (vpu_iface_get_power_state(core) && !core->request_count) {
dev_info(core->dev, "reset\n");
if (!vpu_core_sw_reset(core)) {
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
core->hang_mask = 0;
}
}
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index f4a488bf9880..51e0702f9ae1 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -1293,7 +1293,7 @@ static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
vbuf = to_vb2_v4l2_buffer(scode->vb);
data = vb2_plane_vaddr(scode->vb, 0);
- if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
+ if (scode->inst->total_input_count == 0 || vpu_vb_is_codecconfig(vbuf))
return 0;
if (MALONE_VC1_CONTAIN_NAL(*data))
return 0;
diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 35145e3348f0..54270d6b6f50 100644
--- a/drivers/media/platform/intel/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -854,7 +854,7 @@ fail:
return -ENOMEM;
}
-static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+static void pxa_video_buf_set_actdma(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf)
{
buf->active_dma = DMA_Y;
@@ -973,7 +973,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
* stopped. This means the tailed buffer would never be transferred by DMA.
* This function restarts the capture for this corner case, where :
* - DADR() == DADDR_STOP
- * - a videobuffer is queued on the pcdev->capture list
+ * - a video buffer is queued on the pcdev->capture list
*
* Please check the "DMA hot chaining timeslice issue" in
* Documentation/driver-api/media/drivers/pxa_camera.rst
@@ -1163,7 +1163,7 @@ static void pxa_camera_eof(struct tasklet_struct *t)
pcdev->active = list_first_entry(&pcdev->capture,
struct pxa_buffer, queue);
buf = pcdev->active;
- pxa_videobuf_set_actdma(pcdev, buf);
+ pxa_video_buf_set_actdma(pcdev, buf);
pxa_dma_start_channels(pcdev);
}
@@ -1416,7 +1416,7 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
* the actual buffer is yours
*/
buf->inwork = 0;
- pxa_videobuf_set_actdma(pcdev, buf);
+ pxa_video_buf_set_actdma(pcdev, buf);
return ret;
}
diff --git a/drivers/media/platform/marvell/mcam-core.h b/drivers/media/platform/marvell/mcam-core.h
index f324d808d737..51e66db45af6 100644
--- a/drivers/media/platform/marvell/mcam-core.h
+++ b/drivers/media/platform/marvell/mcam-core.h
@@ -32,7 +32,7 @@
#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
!defined(MCAM_MODE_DMA_SG)
-#error One of the videobuf buffer modes must be selected in the config
+#error One of the vb2 buffer modes must be selected in the config
#endif
diff --git a/drivers/media/platform/mediatek/Kconfig b/drivers/media/platform/mediatek/Kconfig
index af47d9888552..84104e2cd024 100644
--- a/drivers/media/platform/mediatek/Kconfig
+++ b/drivers/media/platform/mediatek/Kconfig
@@ -6,3 +6,4 @@ source "drivers/media/platform/mediatek/jpeg/Kconfig"
source "drivers/media/platform/mediatek/mdp/Kconfig"
source "drivers/media/platform/mediatek/vcodec/Kconfig"
source "drivers/media/platform/mediatek/vpu/Kconfig"
+source "drivers/media/platform/mediatek/mdp3/Kconfig"
diff --git a/drivers/media/platform/mediatek/Makefile b/drivers/media/platform/mediatek/Makefile
index d3850a13f128..38e6ba917fe5 100644
--- a/drivers/media/platform/mediatek/Makefile
+++ b/drivers/media/platform/mediatek/Makefile
@@ -3,3 +3,4 @@ obj-y += jpeg/
obj-y += mdp/
obj-y += vcodec/
obj-y += vpu/
+obj-y += mdp3/
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 87685a62a5c2..3071b61946c3 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1414,7 +1414,6 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->vdev);
- video_device_release(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
diff --git a/drivers/media/platform/mediatek/mdp3/Kconfig b/drivers/media/platform/mediatek/mdp3/Kconfig
new file mode 100644
index 000000000000..50ae07b75b5f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MEDIATEK_MDP3
+ tristate "MediaTek MDP v3 driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
+ depends on REMOTEPROC
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MTK_MMSYS
+ select VIDEO_MEDIATEK_VPU
+ select MTK_CMDQ
+ select MTK_SCP
+ default n
+ help
+ It is a v4l2 driver and present in MediaTek MT8183 SoC.
+ The driver supports scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp3.
diff --git a/drivers/media/platform/mediatek/mdp3/Makefile b/drivers/media/platform/mediatek/mdp3/Makefile
new file mode 100644
index 000000000000..63e6c87e480b
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+mtk-mdp3-y += mtk-mdp3-core.o mtk-mdp3-vpu.o mtk-mdp3-regs.o
+mtk-mdp3-y += mtk-mdp3-m2m.o
+mtk-mdp3-y += mtk-mdp3-comp.o mtk-mdp3-cmdq.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP3) += mtk-mdp3.o
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h
new file mode 100644
index 000000000000..3b2c6531c194
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_CCORR_H__
+#define __MDP_REG_CCORR_H__
+
+#define MDP_CCORR_EN 0x000
+#define MDP_CCORR_CFG 0x020
+#define MDP_CCORR_SIZE 0x030
+
+/* MASK */
+#define MDP_CCORR_EN_MASK 0x00000001
+#define MDP_CCORR_CFG_MASK 0x70001317
+#define MDP_CCORR_SIZE_MASK 0x1fff1fff
+
+#endif // __MDP_REG_CCORR_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
new file mode 100644
index 000000000000..be4065e252d3
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_RDMA_H__
+#define __MDP_REG_RDMA_H__
+
+#define MDP_RDMA_EN 0x000
+#define MDP_RDMA_RESET 0x008
+#define MDP_RDMA_CON 0x020
+#define MDP_RDMA_GMCIF_CON 0x028
+#define MDP_RDMA_SRC_CON 0x030
+#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060
+#define MDP_RDMA_MF_BKGD_SIZE_IN_PXL 0x068
+#define MDP_RDMA_MF_SRC_SIZE 0x070
+#define MDP_RDMA_MF_CLIP_SIZE 0x078
+#define MDP_RDMA_MF_OFFSET_1 0x080
+#define MDP_RDMA_SF_BKGD_SIZE_IN_BYTE 0x090
+#define MDP_RDMA_SRC_END_0 0x100
+#define MDP_RDMA_SRC_END_1 0x108
+#define MDP_RDMA_SRC_END_2 0x110
+#define MDP_RDMA_SRC_OFFSET_0 0x118
+#define MDP_RDMA_SRC_OFFSET_1 0x120
+#define MDP_RDMA_SRC_OFFSET_2 0x128
+#define MDP_RDMA_SRC_OFFSET_0_P 0x148
+#define MDP_RDMA_TRANSFORM_0 0x200
+#define MDP_RDMA_RESV_DUMMY_0 0x2a0
+#define MDP_RDMA_MON_STA_1 0x408
+#define MDP_RDMA_SRC_BASE_0 0xf00
+#define MDP_RDMA_SRC_BASE_1 0xf08
+#define MDP_RDMA_SRC_BASE_2 0xf10
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_Y 0xf20
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_C 0xf28
+
+/* MASK */
+#define MDP_RDMA_EN_MASK 0x00000001
+#define MDP_RDMA_RESET_MASK 0x00000001
+#define MDP_RDMA_CON_MASK 0x00001110
+#define MDP_RDMA_GMCIF_CON_MASK 0xfffb3771
+#define MDP_RDMA_SRC_CON_MASK 0xf3ffffff
+#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE_MASK 0x001fffff
+#define MDP_RDMA_MF_BKGD_SIZE_IN_PXL_MASK 0x001fffff
+#define MDP_RDMA_MF_SRC_SIZE_MASK 0x1fff1fff
+#define MDP_RDMA_MF_CLIP_SIZE_MASK 0x1fff1fff
+#define MDP_RDMA_MF_OFFSET_1_MASK 0x003f001f
+#define MDP_RDMA_SF_BKGD_SIZE_IN_BYTE_MASK 0x001fffff
+#define MDP_RDMA_SRC_END_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_END_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_END_2_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_2_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_0_P_MASK 0xffffffff
+#define MDP_RDMA_TRANSFORM_0_MASK 0xff110777
+#define MDP_RDMA_RESV_DUMMY_0_MASK 0xffffffff
+#define MDP_RDMA_MON_STA_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_2_MASK 0xffffffff
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_Y_MASK 0xffffffff
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_C_MASK 0xffffffff
+
+#endif // __MDP_REG_RDMA_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
new file mode 100644
index 000000000000..484f6d60641f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_RSZ_H__
+#define __MDP_REG_RSZ_H__
+
+#define PRZ_ENABLE 0x000
+#define PRZ_CONTROL_1 0x004
+#define PRZ_CONTROL_2 0x008
+#define PRZ_INPUT_IMAGE 0x010
+#define PRZ_OUTPUT_IMAGE 0x014
+#define PRZ_HORIZONTAL_COEFF_STEP 0x018
+#define PRZ_VERTICAL_COEFF_STEP 0x01c
+#define PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET 0x020
+#define PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET 0x024
+#define PRZ_LUMA_VERTICAL_INTEGER_OFFSET 0x028
+#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET 0x02c
+#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET 0x030
+#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET 0x034
+
+/* MASK */
+#define PRZ_ENABLE_MASK 0x00010001
+#define PRZ_CONTROL_1_MASK 0xfffffff3
+#define PRZ_CONTROL_2_MASK 0x0ffffaff
+#define PRZ_INPUT_IMAGE_MASK 0xffffffff
+#define PRZ_OUTPUT_IMAGE_MASK 0xffffffff
+#define PRZ_HORIZONTAL_COEFF_STEP_MASK 0x007fffff
+#define PRZ_VERTICAL_COEFF_STEP_MASK 0x007fffff
+#define PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+#define PRZ_LUMA_VERTICAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+
+#endif // __MDP_REG_RSZ_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h
new file mode 100644
index 000000000000..0280e91c09e4
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_WDMA_H__
+#define __MDP_REG_WDMA_H__
+
+#define WDMA_EN 0x008
+#define WDMA_RST 0x00c
+#define WDMA_CFG 0x014
+#define WDMA_SRC_SIZE 0x018
+#define WDMA_CLIP_SIZE 0x01c
+#define WDMA_CLIP_COORD 0x020
+#define WDMA_DST_W_IN_BYTE 0x028
+#define WDMA_ALPHA 0x02c
+#define WDMA_BUF_CON2 0x03c
+#define WDMA_DST_UV_PITCH 0x078
+#define WDMA_DST_ADDR_OFFSET 0x080
+#define WDMA_DST_U_ADDR_OFFSET 0x084
+#define WDMA_DST_V_ADDR_OFFSET 0x088
+#define WDMA_FLOW_CTRL_DBG 0x0a0
+#define WDMA_DST_ADDR 0xf00
+#define WDMA_DST_U_ADDR 0xf04
+#define WDMA_DST_V_ADDR 0xf08
+
+/* MASK */
+#define WDMA_EN_MASK 0x00000001
+#define WDMA_RST_MASK 0x00000001
+#define WDMA_CFG_MASK 0xff03bff0
+#define WDMA_SRC_SIZE_MASK 0x3fff3fff
+#define WDMA_CLIP_SIZE_MASK 0x3fff3fff
+#define WDMA_CLIP_COORD_MASK 0x3fff3fff
+#define WDMA_DST_W_IN_BYTE_MASK 0x0000ffff
+#define WDMA_ALPHA_MASK 0x800000ff
+#define WDMA_BUF_CON2_MASK 0xffffffff
+#define WDMA_DST_UV_PITCH_MASK 0x0000ffff
+#define WDMA_DST_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_DST_U_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_DST_V_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_FLOW_CTRL_DBG_MASK 0x0000f3ff
+#define WDMA_DST_ADDR_MASK 0xffffffff
+#define WDMA_DST_U_ADDR_MASK 0xffffffff
+#define WDMA_DST_V_ADDR_MASK 0xffffffff
+
+#endif // __MDP_REG_WDMA_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
new file mode 100644
index 000000000000..6d3ff0e2b672
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_WROT_H__
+#define __MDP_REG_WROT_H__
+
+#define VIDO_CTRL 0x000
+#define VIDO_MAIN_BUF_SIZE 0x008
+#define VIDO_SOFT_RST 0x010
+#define VIDO_SOFT_RST_STAT 0x014
+#define VIDO_CROP_OFST 0x020
+#define VIDO_TAR_SIZE 0x024
+#define VIDO_OFST_ADDR 0x02c
+#define VIDO_STRIDE 0x030
+#define VIDO_OFST_ADDR_C 0x038
+#define VIDO_STRIDE_C 0x03c
+#define VIDO_DITHER 0x054
+#define VIDO_STRIDE_V 0x06c
+#define VIDO_OFST_ADDR_V 0x068
+#define VIDO_RSV_1 0x070
+#define VIDO_IN_SIZE 0x078
+#define VIDO_ROT_EN 0x07c
+#define VIDO_FIFO_TEST 0x080
+#define VIDO_MAT_CTRL 0x084
+#define VIDO_BASE_ADDR 0xf00
+#define VIDO_BASE_ADDR_C 0xf04
+#define VIDO_BASE_ADDR_V 0xf08
+
+/* MASK */
+#define VIDO_CTRL_MASK 0xf530711f
+#define VIDO_MAIN_BUF_SIZE_MASK 0x1fff7f77
+#define VIDO_SOFT_RST_MASK 0x00000001
+#define VIDO_SOFT_RST_STAT_MASK 0x00000001
+#define VIDO_TAR_SIZE_MASK 0x1fff1fff
+#define VIDO_CROP_OFST_MASK 0x1fff1fff
+#define VIDO_OFST_ADDR_MASK 0x0fffffff
+#define VIDO_STRIDE_MASK 0x0000ffff
+#define VIDO_OFST_ADDR_C_MASK 0x0fffffff
+#define VIDO_STRIDE_C_MASK 0x0000ffff
+#define VIDO_DITHER_MASK 0xff000001
+#define VIDO_STRIDE_V_MASK 0x0000ffff
+#define VIDO_OFST_ADDR_V_MASK 0x0fffffff
+#define VIDO_RSV_1_MASK 0xffffffff
+#define VIDO_IN_SIZE_MASK 0x1fff1fff
+#define VIDO_ROT_EN_MASK 0x00000001
+#define VIDO_FIFO_TEST_MASK 0x00000fff
+#define VIDO_MAT_CTRL_MASK 0x000000f3
+#define VIDO_BASE_ADDR_MASK 0xffffffff
+#define VIDO_BASE_ADDR_C_MASK 0xffffffff
+#define VIDO_BASE_ADDR_V_MASK 0xffffffff
+
+#endif // __MDP_REG_WROT_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
new file mode 100644
index 000000000000..3e66ebaee2da
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Holmes Chiou <holmes.chiou@mediatek.com>
+ * Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_IMG_IPI_H__
+#define __MTK_IMG_IPI_H__
+
+#include <linux/types.h>
+
+/*
+ * ISP-MDP generic input information
+ * MD5 of the target SCP blob:
+ * 6da52bdcf4bf76a0983b313e1d4745d6
+ */
+
+#define IMG_MAX_HW_INPUTS 3
+
+#define IMG_MAX_HW_OUTPUTS 4
+
+#define IMG_MAX_PLANES 3
+
+#define IMG_IPI_INIT 1
+#define IMG_IPI_DEINIT 2
+#define IMG_IPI_FRAME 3
+#define IMG_IPI_DEBUG 4
+
+struct img_timeval {
+ u32 tv_sec;
+ u32 tv_usec;
+} __packed;
+
+struct img_addr {
+ u64 va; /* Used for Linux OS access */
+ u32 pa; /* Used for CM4 access */
+ u32 iova; /* Used for IOMMU HW access */
+} __packed;
+
+struct tuning_addr {
+ u64 present;
+ u32 pa; /* Used for CM4 access */
+ u32 iova; /* Used for IOMMU HW access */
+} __packed;
+
+struct img_sw_addr {
+ u64 va; /* Used for APMCU access */
+ u32 pa; /* Used for CM4 access */
+} __packed;
+
+struct img_plane_format {
+ u32 size;
+ u16 stride;
+} __packed;
+
+struct img_pix_format {
+ u16 width;
+ u16 height;
+ u32 colorformat; /* enum mdp_color */
+ u16 ycbcr_prof; /* enum mdp_ycbcr_profile */
+ struct img_plane_format plane_fmt[IMG_MAX_PLANES];
+} __packed;
+
+struct img_image_buffer {
+ struct img_pix_format format;
+ u32 iova[IMG_MAX_PLANES];
+ /* enum mdp_buffer_usage, FD or advanced ISP usages */
+ u32 usage;
+} __packed;
+
+#define IMG_SUBPIXEL_SHIFT 20
+
+struct img_crop {
+ s16 left;
+ s16 top;
+ u16 width;
+ u16 height;
+ u32 left_subpix;
+ u32 top_subpix;
+ u32 width_subpix;
+ u32 height_subpix;
+} __packed;
+
+#define IMG_CTRL_FLAG_HFLIP BIT(0)
+#define IMG_CTRL_FLAG_DITHER BIT(1)
+#define IMG_CTRL_FLAG_SHARPNESS BIT(4)
+#define IMG_CTRL_FLAG_HDR BIT(5)
+#define IMG_CTRL_FLAG_DRE BIT(6)
+
+struct img_input {
+ struct img_image_buffer buffer;
+ u16 flags; /* HDR, DRE, dither */
+} __packed;
+
+struct img_output {
+ struct img_image_buffer buffer;
+ struct img_crop crop;
+ s16 rotation;
+ u16 flags; /* H-flip, sharpness, dither */
+} __packed;
+
+struct img_ipi_frameparam {
+ u32 index;
+ u32 frame_no;
+ struct img_timeval timestamp;
+ u8 type; /* enum mdp_stream_type */
+ u8 state;
+ u8 num_inputs;
+ u8 num_outputs;
+ u64 drv_data;
+ struct img_input inputs[IMG_MAX_HW_INPUTS];
+ struct img_output outputs[IMG_MAX_HW_OUTPUTS];
+ struct tuning_addr tuning_data;
+ struct img_addr subfrm_data;
+ struct img_sw_addr config_data;
+ struct img_sw_addr self_data;
+} __packed;
+
+struct img_sw_buffer {
+ u64 handle; /* Used for APMCU access */
+ u32 scp_addr; /* Used for CM4 access */
+} __packed;
+
+struct img_ipi_param {
+ u8 usage;
+ struct img_sw_buffer frm_param;
+} __packed;
+
+struct img_frameparam {
+ struct list_head list_entry;
+ struct img_ipi_frameparam frameparam;
+};
+
+/* ISP-MDP generic output information */
+
+struct img_comp_frame {
+ u32 output_disable:1;
+ u32 bypass:1;
+ u16 in_width;
+ u16 in_height;
+ u16 out_width;
+ u16 out_height;
+ struct img_crop crop;
+ u16 in_total_width;
+ u16 out_total_width;
+} __packed;
+
+struct img_region {
+ s16 left;
+ s16 right;
+ s16 top;
+ s16 bottom;
+} __packed;
+
+struct img_offset {
+ s16 left;
+ s16 top;
+ u32 left_subpix;
+ u32 top_subpix;
+} __packed;
+
+struct img_comp_subfrm {
+ u32 tile_disable:1;
+ struct img_region in;
+ struct img_region out;
+ struct img_offset luma;
+ struct img_offset chroma;
+ s16 out_vertical; /* Output vertical index */
+ s16 out_horizontal; /* Output horizontal index */
+} __packed;
+
+#define IMG_MAX_SUBFRAMES 14
+
+struct mdp_rdma_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 offset_0_p;
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_rdma_data {
+ u32 src_ctrl;
+ u32 control;
+ u32 iova[IMG_MAX_PLANES];
+ u32 iova_end[IMG_MAX_PLANES];
+ u32 mf_bkgd;
+ u32 mf_bkgd_in_pxl;
+ u32 sf_bkgd;
+ u32 ufo_dec_y;
+ u32 ufo_dec_c;
+ u32 transform;
+ struct mdp_rdma_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_rsz_subfrm {
+ u32 control2;
+ u32 src;
+ u32 clip;
+} __packed;
+
+struct mdp_rsz_data {
+ u32 coeff_step_x;
+ u32 coeff_step_y;
+ u32 control1;
+ u32 control2;
+ struct mdp_rsz_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_wrot_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+ u32 main_buf;
+} __packed;
+
+struct mdp_wrot_data {
+ u32 iova[IMG_MAX_PLANES];
+ u32 control;
+ u32 stride[IMG_MAX_PLANES];
+ u32 mat_ctrl;
+ u32 fifo_test;
+ u32 filter;
+ struct mdp_wrot_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_wdma_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_wdma_data {
+ u32 wdma_cfg;
+ u32 iova[IMG_MAX_PLANES];
+ u32 w_in_byte;
+ u32 uv_stride;
+ struct mdp_wdma_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct isp_data {
+ u64 dl_flags; /* 1 << (enum mdp_comp_type) */
+ u32 smxi_iova[4];
+ u32 cq_idx;
+ u32 cq_iova;
+ u32 tpipe_iova[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct img_compparam {
+ u16 type; /* enum mdp_comp_type */
+ u16 id; /* enum mtk_mdp_comp_id */
+ u32 input;
+ u32 outputs[IMG_MAX_HW_OUTPUTS];
+ u32 num_outputs;
+ struct img_comp_frame frame;
+ struct img_comp_subfrm subfrms[IMG_MAX_SUBFRAMES];
+ u32 num_subfrms;
+ union {
+ struct mdp_rdma_data rdma;
+ struct mdp_rsz_data rsz;
+ struct mdp_wrot_data wrot;
+ struct mdp_wdma_data wdma;
+ struct isp_data isp;
+ };
+} __packed;
+
+#define IMG_MAX_COMPONENTS 20
+
+struct img_mux {
+ u32 reg;
+ u32 value;
+ u32 subsys_id;
+};
+
+struct img_mmsys_ctrl {
+ struct img_mux sets[IMG_MAX_COMPONENTS * 2];
+ u32 num_sets;
+};
+
+struct img_config {
+ struct img_compparam components[IMG_MAX_COMPONENTS];
+ u32 num_components;
+ struct img_mmsys_ctrl ctrls[IMG_MAX_SUBFRAMES];
+ u32 num_subfrms;
+} __packed;
+
+#endif /* __MTK_IMG_IPI_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
new file mode 100644
index 000000000000..29f6c1cd3de7
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/mailbox_controller.h>
+#include <linux/platform_device.h>
+#include "mtk-mdp3-cmdq.h"
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-m2m.h"
+
+#define MDP_PATH_MAX_COMPS IMG_MAX_COMPONENTS
+
+struct mdp_path {
+ struct mdp_dev *mdp_dev;
+ struct mdp_comp_ctx comps[MDP_PATH_MAX_COMPS];
+ u32 num_comps;
+ const struct img_config *config;
+ const struct img_ipi_frameparam *param;
+ const struct v4l2_rect *composes[IMG_MAX_HW_OUTPUTS];
+ struct v4l2_rect bounds[IMG_MAX_HW_OUTPUTS];
+};
+
+#define has_op(ctx, op) \
+ ((ctx)->comp->ops && (ctx)->comp->ops->op)
+ #define call_op(ctx, op, ...) \
+ (has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0)
+
+static bool is_output_disabled(const struct img_compparam *param, u32 count)
+{
+ return (count < param->num_subfrms) ?
+ (param->frame.output_disable ||
+ param->subfrms[count].tile_disable) :
+ true;
+}
+
+static int mdp_path_subfrm_require(const struct mdp_path *path,
+ struct mdp_cmdq_cmd *cmd,
+ s32 *mutex_id, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct mdp_comp_ctx *ctx;
+ const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data;
+ struct device *dev = &path->mdp_dev->pdev->dev;
+ struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
+ int id, index;
+
+ /* Decide which mutex to use based on the current pipeline */
+ switch (path->comps[0].comp->id) {
+ case MDP_COMP_RDMA0:
+ *mutex_id = MDP_PIPE_RDMA0;
+ break;
+ case MDP_COMP_ISP_IMGI:
+ *mutex_id = MDP_PIPE_IMGI;
+ break;
+ case MDP_COMP_WPEI:
+ *mutex_id = MDP_PIPE_WPEI;
+ break;
+ case MDP_COMP_WPEI2:
+ *mutex_id = MDP_PIPE_WPEI2;
+ break;
+ default:
+ dev_err(dev, "Unknown pipeline and no mutex is assigned");
+ return -EINVAL;
+ }
+
+ /* Set mutex mod */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ id = ctx->comp->id;
+ mtk_mutex_write_mod(mutex[*mutex_id],
+ data->mdp_mutex_table_idx[id], false);
+ }
+
+ mtk_mutex_write_sof(mutex[*mutex_id],
+ MUTEX_SOF_IDX_SINGLE_MODE);
+
+ return 0;
+}
+
+static int mdp_path_subfrm_run(const struct mdp_path *path,
+ struct mdp_cmdq_cmd *cmd,
+ s32 *mutex_id, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct mdp_comp_ctx *ctx;
+ struct device *dev = &path->mdp_dev->pdev->dev;
+ struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
+ int index;
+ s32 event;
+
+ if (-1 == *mutex_id) {
+ dev_err(dev, "Incorrect mutex id");
+ return -EINVAL;
+ }
+
+ /* Wait WROT SRAM shared to DISP RDMA */
+ /* Clear SOF event for each engine */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
+ if (event != MDP_GCE_NO_EVENT)
+ MM_REG_CLEAR(cmd, event);
+ }
+
+ /* Enable the mutex */
+ mtk_mutex_enable_by_cmdq(mutex[*mutex_id], (void *)&cmd->pkt);
+
+ /* Wait SOF events and clear mutex modules (optional) */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
+ if (event != MDP_GCE_NO_EVENT)
+ MM_REG_WAIT(cmd, event);
+ }
+
+ return 0;
+}
+
+static int mdp_path_ctx_init(struct mdp_dev *mdp, struct mdp_path *path)
+{
+ const struct img_config *config = path->config;
+ int index, ret;
+
+ if (config->num_components < 1)
+ return -EINVAL;
+
+ for (index = 0; index < config->num_components; index++) {
+ ret = mdp_comp_ctx_config(mdp, &path->comps[index],
+ &config->components[index],
+ path->param);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
+ struct mdp_path *path, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct img_mmsys_ctrl *ctrl = &config->ctrls[count];
+ const struct img_mux *set;
+ struct mdp_comp_ctx *ctx;
+ s32 mutex_id;
+ int index, ret;
+
+ /* Acquire components */
+ ret = mdp_path_subfrm_require(path, cmd, &mutex_id, count);
+ if (ret)
+ return ret;
+ /* Enable mux settings */
+ for (index = 0; index < ctrl->num_sets; index++) {
+ set = &ctrl->sets[index];
+ cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
+ set->value, 0xFFFFFFFF);
+ }
+ /* Config sub-frame information */
+ for (index = (config->num_components - 1); index >= 0; index--) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ ret = call_op(ctx, config_subfrm, cmd, count);
+ if (ret)
+ return ret;
+ }
+ /* Run components */
+ ret = mdp_path_subfrm_run(path, cmd, &mutex_id, count);
+ if (ret)
+ return ret;
+ /* Wait components done */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ ret = call_op(ctx, wait_comp_event, cmd);
+ if (ret)
+ return ret;
+ }
+ /* Advance to the next sub-frame */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, advance_subfrm, cmd, count);
+ if (ret)
+ return ret;
+ }
+ /* Disable mux settings */
+ for (index = 0; index < ctrl->num_sets; index++) {
+ set = &ctrl->sets[index];
+ cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
+ 0, 0xFFFFFFFF);
+ }
+
+ return 0;
+}
+
+static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
+ struct mdp_path *path)
+{
+ const struct img_config *config = path->config;
+ struct mdp_comp_ctx *ctx;
+ int index, count, ret;
+
+ /* Config path frame */
+ /* Reset components */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, init_comp, cmd);
+ if (ret)
+ return ret;
+ }
+ /* Config frame mode */
+ for (index = 0; index < config->num_components; index++) {
+ const struct v4l2_rect *compose =
+ path->composes[ctx->param->outputs[0]];
+
+ ctx = &path->comps[index];
+ ret = call_op(ctx, config_frame, cmd, compose);
+ if (ret)
+ return ret;
+ }
+
+ /* Config path sub-frames */
+ for (count = 0; count < config->num_subfrms; count++) {
+ ret = mdp_path_config_subfrm(cmd, path, count);
+ if (ret)
+ return ret;
+ }
+ /* Post processing information */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, post_process, cmd);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ size_t size)
+{
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ return -ENOMEM;
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return 0;
+}
+
+static void mdp_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ pkt->va_base = NULL;
+}
+
+static void mdp_auto_release_work(struct work_struct *work)
+{
+ struct mdp_cmdq_cmd *cmd;
+ struct mdp_dev *mdp;
+
+ cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work);
+ mdp = cmd->mdp;
+
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(cmd->comps);
+ cmd->comps = NULL;
+ kfree(cmd);
+ cmd = NULL;
+}
+
+static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
+{
+ struct mdp_cmdq_cmd *cmd;
+ struct cmdq_cb_data *data;
+ struct mdp_dev *mdp;
+ struct device *dev;
+
+ if (!mssg) {
+ pr_info("%s:no callback data\n", __func__);
+ return;
+ }
+
+ data = (struct cmdq_cb_data *)mssg;
+ cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt);
+ mdp = cmd->mdp;
+ dev = &mdp->pdev->dev;
+
+ if (cmd->mdp_ctx)
+ mdp_m2m_job_finish(cmd->mdp_ctx);
+
+ if (cmd->user_cmdq_cb) {
+ struct cmdq_cb_data user_cb_data;
+
+ user_cb_data.sta = data->sta;
+ user_cb_data.pkt = data->pkt;
+ cmd->user_cmdq_cb(user_cb_data);
+ }
+
+ INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work);
+ if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) {
+ dev_err(dev, "%s:queue_work fail!\n", __func__);
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(cmd->comps);
+ cmd->comps = NULL;
+ kfree(cmd);
+ cmd = NULL;
+ }
+}
+
+int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+{
+ struct mdp_path *path = NULL;
+ struct mdp_cmdq_cmd *cmd = NULL;
+ struct mdp_comp *comps = NULL;
+ struct device *dev = &mdp->pdev->dev;
+ int i, ret;
+
+ atomic_inc(&mdp->job_count);
+ if (atomic_read(&mdp->suspended)) {
+ atomic_dec(&mdp->job_count);
+ return -ECANCELED;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ if (mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K)) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ comps = kcalloc(param->config->num_components, sizeof(*comps),
+ GFP_KERNEL);
+ if (!comps) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ path->mdp_dev = mdp;
+ path->config = param->config;
+ path->param = param->param;
+ for (i = 0; i < param->param->num_outputs; i++) {
+ path->bounds[i].left = 0;
+ path->bounds[i].top = 0;
+ path->bounds[i].width =
+ param->param->outputs[i].buffer.format.width;
+ path->bounds[i].height =
+ param->param->outputs[i].buffer.format.height;
+ path->composes[i] = param->composes[i] ?
+ param->composes[i] : &path->bounds[i];
+ }
+
+ ret = mdp_path_ctx_init(mdp, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_ctx_init error\n");
+ goto err_cmdq_data;
+ }
+
+ mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+
+ ret = mdp_path_config(mdp, cmd, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_config error\n");
+ goto err_cmdq_data;
+ }
+ cmdq_pkt_finalize(&cmd->pkt);
+
+ for (i = 0; i < param->config->num_components; i++)
+ memcpy(&comps[i], path->comps[i].comp,
+ sizeof(struct mdp_comp));
+
+ mdp->cmdq_clt->client.rx_callback = mdp_handle_cmdq_callback;
+ cmd->mdp = mdp;
+ cmd->user_cmdq_cb = param->cmdq_cb;
+ cmd->user_cb_data = param->cb_data;
+ cmd->comps = comps;
+ cmd->num_comps = param->config->num_components;
+ cmd->mdp_ctx = param->mdp_ctx;
+
+ ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps);
+ if (ret) {
+ dev_err(dev, "comp %d failed to enable clock!\n", ret);
+ goto err_clock_off;
+ }
+
+ dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev,
+ cmd->pkt.pa_base, cmd->pkt.cmd_buf_size,
+ DMA_TO_DEVICE);
+ ret = mbox_send_message(mdp->cmdq_clt->chan, &cmd->pkt);
+ if (ret < 0) {
+ dev_err(dev, "mbox send message fail %d!\n", ret);
+ goto err_clock_off;
+ }
+ mbox_client_txdone(mdp->cmdq_clt->chan, 0);
+
+ kfree(path);
+ return 0;
+
+err_clock_off:
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+err_cmdq_data:
+ kfree(path);
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+ if (cmd->pkt.buf_size > 0)
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(comps);
+ kfree(cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mdp_cmdq_send);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
new file mode 100644
index 000000000000..43475b862ddb
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_CMDQ_H__
+#define __MTK_MDP3_CMDQ_H__
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk-img-ipi.h"
+
+struct platform_device *mdp_get_plat_device(struct platform_device *pdev);
+
+struct mdp_cmdq_param {
+ struct img_config *config;
+ struct img_ipi_frameparam *param;
+ const struct v4l2_rect *composes[IMG_MAX_HW_OUTPUTS];
+
+ void (*cmdq_cb)(struct cmdq_cb_data data);
+ void *cb_data;
+ void *mdp_ctx;
+};
+
+struct mdp_cmdq_cmd {
+ struct work_struct auto_release_work;
+ struct cmdq_pkt pkt;
+ s32 *event;
+ struct mdp_dev *mdp;
+ void (*user_cmdq_cb)(struct cmdq_cb_data data);
+ void *user_cb_data;
+ struct mdp_comp *comps;
+ void *mdp_ctx;
+ u8 num_comps;
+};
+
+struct mdp_dev;
+
+int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param);
+
+#endif /* __MTK_MDP3_CMDQ_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
new file mode 100644
index 000000000000..e62abf3587bf
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-regs.h"
+
+#include "mdp_reg_rdma.h"
+#include "mdp_reg_ccorr.h"
+#include "mdp_reg_rsz.h"
+#include "mdp_reg_wrot.h"
+#include "mdp_reg_wdma.h"
+
+static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
+
+static inline const struct mdp_platform_config *
+__get_plat_cfg(const struct mdp_comp_ctx *ctx)
+{
+ if (!ctx)
+ return NULL;
+
+ return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
+}
+
+static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+
+ if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
+ if (ctx->comp->id == MDP_COMP_RDMA0)
+ return BIT(MDP_COMP_RDMA0) | BIT(MDP_COMP_RSZ1);
+
+ return BIT(ctx->comp->id);
+}
+
+static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
+ struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
+
+ /* Disable RSZ1 */
+ if (ctx->comp->id == MDP_COMP_RDMA0 && prz1)
+ MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
+ 0x0, BIT(0));
+ }
+
+ /* Reset RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_rdma_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_rdma_data *rdma = &ctx->param->rdma;
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ u32 colorformat = ctx->input->buffer.format.colorformat;
+ bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
+ bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
+ if (block10bit)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
+ else
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
+ }
+
+ /* Setup smi control */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
+ (7 << 4) + //burst type to 8
+ (1 << 16), //enable pre-ultra
+ 0x00030071);
+
+ /* Setup source frame info */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, rdma->src_ctrl,
+ 0x03C8FE0F);
+
+ if (mdp_cfg)
+ if (mdp_cfg->rdma_support_10bit && en_ufo) {
+ /* Setup source buffer base */
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
+ rdma->ufo_dec_y, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
+ rdma->ufo_dec_c, 0xFFFFFFFF);
+ /* Set 10bit source frame pitch */
+ if (block10bit)
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
+ rdma->mf_bkgd_in_pxl, 0x001FFFFF);
+ }
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, rdma->control,
+ 0x1110);
+ /* Setup source buffer base */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, rdma->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, rdma->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, rdma->iova[2],
+ 0xFFFFFFFF);
+ /* Setup source buffer end */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
+ rdma->iova_end[0], 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
+ rdma->iova_end[1], 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
+ rdma->iova_end[2], 0xFFFFFFFF);
+ /* Setup source frame pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
+ rdma->mf_bkgd, 0x001FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
+ rdma->sf_bkgd, 0x001FFFFF);
+ /* Setup color transform */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
+ rdma->transform, 0x0F110000);
+
+ return 0;
+}
+
+static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_rdma_subfrm *subfrm = &ctx->param->rdma.subfrms[index];
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ u32 colorformat = ctx->input->buffer.format.colorformat;
+ bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
+ bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Enable RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
+
+ /* Set Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
+ subfrm->offset[0], 0xFFFFFFFF);
+
+ /* Set 10bit UFO mode */
+ if (mdp_cfg)
+ if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_SRC_OFFSET_0_P,
+ subfrm->offset_0_p, 0xFFFFFFFF);
+
+ /* Set U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
+ subfrm->offset[1], 0xFFFFFFFF);
+ /* Set V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
+ subfrm->offset[2], 0xFFFFFFFF);
+ /* Set source size */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, subfrm->src,
+ 0x1FFF1FFF);
+ /* Set target size */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
+ subfrm->clip, 0x1FFF1FFF);
+ /* Set crop offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
+ subfrm->clip_ofst, 0x003F001F);
+
+ if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
+ if ((csf->in.right - csf->in.left + 1) > 320)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
+
+ return 0;
+}
+
+static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->comp->alias_id == 0)
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ else
+ dev_err(dev, "Do not support RDMA1_DONE event\n");
+
+ /* Disable RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
+ return 0;
+}
+
+static const struct mdp_comp_ops rdma_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_rdma,
+ .config_frame = config_rdma_frame,
+ .config_subfrm = config_rdma_subfrm,
+ .wait_comp_event = wait_rdma_event,
+};
+
+static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
+ /* Enable RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
+ return 0;
+}
+
+static int config_rsz_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_rsz_data *rsz = &ctx->param->rsz;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->param->frame.bypass) {
+ /* Disable RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
+ return 0;
+ }
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, rsz->control1,
+ 0x03FFFDF3);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, rsz->control2,
+ 0x0FFFC290);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
+ rsz->coeff_step_x, 0x007FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
+ rsz->coeff_step_y, 0x007FFFFF);
+ return 0;
+}
+
+static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_rsz_subfrm *subfrm = &ctx->param->rsz.subfrms[index];
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, subfrm->control2,
+ 0x00003800);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, subfrm->src,
+ 0xFFFFFFFF);
+
+ if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
+ if ((csf->in.right - csf->in.left + 1) <= 16)
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
+ BIT(27), BIT(27));
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
+ csf->luma.left, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ csf->luma.left_subpix, 0x1FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
+ csf->luma.top, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
+ csf->luma.top_subpix, 0x1FFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
+ csf->chroma.left, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ csf->chroma.left_subpix, 0x1FFFFF);
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, subfrm->clip,
+ 0xFFFFFFFF);
+
+ return 0;
+}
+
+static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+
+ if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if ((csf->in.right - csf->in.left + 1) <= 16)
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
+ BIT(27));
+ }
+
+ return 0;
+}
+
+static const struct mdp_comp_ops rsz_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_rsz,
+ .config_frame = config_rsz_frame,
+ .config_subfrm = config_rsz_subfrm,
+ .advance_subfrm = advance_rsz_subfrm,
+};
+
+static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_wrot_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_wrot_data *wrot = &ctx->param->wrot;
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write frame base address */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, wrot->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, wrot->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, wrot->iova[2],
+ 0xFFFFFFFF);
+ /* Write frame related registers */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, wrot->control,
+ 0xF131510F);
+ /* Write frame Y pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, wrot->stride[0],
+ 0x0000FFFF);
+ /* Write frame UV pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, wrot->stride[1],
+ 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, wrot->stride[2],
+ 0xFFFF);
+ /* Write matrix control */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, wrot->mat_ctrl, 0xF3);
+
+ /* Set the fixed ALPHA as 0xFF */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
+ 0xFF000000);
+ /* Set VIDO_EOL_SEL */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
+ /* Set VIDO_FIFO_TEST */
+ if (wrot->fifo_test != 0)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
+ wrot->fifo_test, 0xFFF);
+ /* Filter enable */
+ if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
+ wrot->filter, 0x77);
+
+ return 0;
+}
+
+static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_wrot_subfrm *subfrm = &ctx->param->wrot.subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
+ subfrm->offset[0], 0x0FFFFFFF);
+ /* Write U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
+ subfrm->offset[1], 0x0FFFFFFF);
+ /* Write V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
+ subfrm->offset[2], 0x0FFFFFFF);
+ /* Write source size */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, subfrm->src,
+ 0x1FFF1FFF);
+ /* Write target size */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, subfrm->clip,
+ 0x1FFF1FFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, subfrm->clip_ofst,
+ 0x1FFF1FFF);
+
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
+ subfrm->main_buf, 0x1FFF7F00);
+
+ /* Enable WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->comp->alias_id == 0)
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ else
+ dev_err(dev, "Do not support WROT1_DONE event\n");
+
+ if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
+ 0x77);
+
+ /* Disable WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
+
+ return 0;
+}
+
+static const struct mdp_comp_ops wrot_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_wrot,
+ .config_frame = config_wrot_frame,
+ .config_subfrm = config_wrot_subfrm,
+ .wait_comp_event = wait_wrot_event,
+};
+
+static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_wdma_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_wdma_data *wdma = &ctx->param->wdma;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
+ 0xFFFFFFFF);
+
+ /* Setup frame information */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, wdma->wdma_cfg,
+ 0x0F01B8F0);
+ /* Setup frame base address */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, wdma->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, wdma->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, wdma->iova[2],
+ 0xFFFFFFFF);
+ /* Setup Y pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
+ wdma->w_in_byte, 0x0000FFFF);
+ /* Setup UV pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
+ wdma->uv_stride, 0x0000FFFF);
+ /* Set the fixed ALPHA as 0xFF */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
+ 0x800000FF);
+
+ return 0;
+}
+
+static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_wdma_subfrm *subfrm = &ctx->param->wdma.subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
+ subfrm->offset[0], 0x0FFFFFFF);
+ /* Write U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
+ subfrm->offset[1], 0x0FFFFFFF);
+ /* Write V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
+ subfrm->offset[2], 0x0FFFFFFF);
+ /* Write source size */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, subfrm->src,
+ 0x3FFF3FFF);
+ /* Write target size */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, subfrm->clip,
+ 0x3FFF3FFF);
+ /* Write clip offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, subfrm->clip_ofst,
+ 0x3FFF3FFF);
+
+ /* Enable WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ /* Disable WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
+ return 0;
+}
+
+static const struct mdp_comp_ops wdma_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_wdma,
+ .config_frame = config_wdma_frame,
+ .config_subfrm = config_wdma_subfrm,
+ .wait_comp_event = wait_wdma_event,
+};
+
+static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* CCORR enable */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
+ /* Relay mode */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
+ return 0;
+}
+
+static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+ u32 hsize, vsize;
+
+ hsize = csf->in.right - csf->in.left + 1;
+ vsize = csf->in.bottom - csf->in.top + 1;
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
+ (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
+ return 0;
+}
+
+static const struct mdp_comp_ops ccorr_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_ccorr,
+ .config_subfrm = config_ccorr_subfrm,
+};
+
+static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
+ [MDP_COMP_TYPE_RDMA] = &rdma_ops,
+ [MDP_COMP_TYPE_RSZ] = &rsz_ops,
+ [MDP_COMP_TYPE_WROT] = &wrot_ops,
+ [MDP_COMP_TYPE_WDMA] = &wdma_ops,
+ [MDP_COMP_TYPE_CCORR] = &ccorr_ops,
+};
+
+struct mdp_comp_match {
+ enum mdp_comp_type type;
+ u32 alias_id;
+};
+
+static const struct mdp_comp_match mdp_comp_matches[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_WPEI] = { MDP_COMP_TYPE_WPEI, 0 },
+ [MDP_COMP_WPEO] = { MDP_COMP_TYPE_EXTO, 2 },
+ [MDP_COMP_WPEI2] = { MDP_COMP_TYPE_WPEI, 1 },
+ [MDP_COMP_WPEO2] = { MDP_COMP_TYPE_EXTO, 3 },
+ [MDP_COMP_ISP_IMGI] = { MDP_COMP_TYPE_IMGI, 0 },
+ [MDP_COMP_ISP_IMGO] = { MDP_COMP_TYPE_EXTO, 0 },
+ [MDP_COMP_ISP_IMG2O] = { MDP_COMP_TYPE_EXTO, 1 },
+
+ [MDP_COMP_CAMIN] = { MDP_COMP_TYPE_DL_PATH, 0 },
+ [MDP_COMP_CAMIN2] = { MDP_COMP_TYPE_DL_PATH, 1 },
+ [MDP_COMP_RDMA0] = { MDP_COMP_TYPE_RDMA, 0 },
+ [MDP_COMP_CCORR0] = { MDP_COMP_TYPE_CCORR, 0 },
+ [MDP_COMP_RSZ0] = { MDP_COMP_TYPE_RSZ, 0 },
+ [MDP_COMP_RSZ1] = { MDP_COMP_TYPE_RSZ, 1 },
+ [MDP_COMP_PATH0_SOUT] = { MDP_COMP_TYPE_PATH, 0 },
+ [MDP_COMP_PATH1_SOUT] = { MDP_COMP_TYPE_PATH, 1 },
+ [MDP_COMP_WROT0] = { MDP_COMP_TYPE_WROT, 0 },
+ [MDP_COMP_WDMA] = { MDP_COMP_TYPE_WDMA, 0 },
+};
+
+static const struct of_device_id mdp_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8183-mdp3-rdma",
+ .data = (void *)MDP_COMP_TYPE_RDMA,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-ccorr",
+ .data = (void *)MDP_COMP_TYPE_CCORR,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-rsz",
+ .data = (void *)MDP_COMP_TYPE_RSZ,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wrot",
+ .data = (void *)MDP_COMP_TYPE_WROT,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wdma",
+ .data = (void *)MDP_COMP_TYPE_WDMA,
+ },
+ {}
+};
+
+static const struct of_device_id mdp_sub_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8183-mdp3-wdma",
+ .data = (void *)MDP_COMP_TYPE_PATH,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wrot",
+ .data = (void *)MDP_COMP_TYPE_PATH,
+ },
+ {}
+};
+
+/* Used to describe the item order in MDP property */
+struct mdp_comp_info {
+ u32 clk_num;
+ u32 clk_ofst;
+ u32 dts_reg_ofst;
+};
+
+static const struct mdp_comp_info mdp_comp_dt_info[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_RDMA0] = {2, 0, 0},
+ [MDP_COMP_RSZ0] = {1, 0, 0},
+ [MDP_COMP_WROT0] = {1, 0, 0},
+ [MDP_COMP_WDMA] = {1, 0, 0},
+ [MDP_COMP_CCORR0] = {1, 0, 0},
+};
+
+static inline bool is_dma_capable(const enum mdp_comp_type type)
+{
+ return (type == MDP_COMP_TYPE_RDMA ||
+ type == MDP_COMP_TYPE_WROT ||
+ type == MDP_COMP_TYPE_WDMA);
+}
+
+static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
+{
+ /*
+ * Subcomponent PATH is only used for the direction of data flow and
+ * dose not need to wait for GCE event.
+ */
+ return (type == MDP_COMP_TYPE_PATH);
+}
+
+static int mdp_comp_get_id(enum mdp_comp_type type, int alias_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp_comp_matches); i++)
+ if (mdp_comp_matches[i].type == type &&
+ mdp_comp_matches[i].alias_id == alias_id)
+ return i;
+ return -ENODEV;
+}
+
+int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
+{
+ int i, ret;
+
+ if (comp->comp_dev) {
+ ret = pm_runtime_get_sync(comp->comp_dev);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to get power, err %d. type:%d id:%d\n",
+ ret, comp->type, comp->id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
+ if (IS_ERR_OR_NULL(comp->clks[i]))
+ continue;
+ ret = clk_prepare_enable(comp->clks[i]);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable clk %d. type:%d id:%d\n",
+ i, comp->type, comp->id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
+ if (IS_ERR_OR_NULL(comp->clks[i]))
+ continue;
+ clk_disable_unprepare(comp->clks[i]);
+ }
+
+ if (comp->comp_dev)
+ pm_runtime_put(comp->comp_dev);
+}
+
+int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ if (mdp_comp_clock_on(dev, &comps[i]) != 0)
+ return ++i;
+
+ return 0;
+}
+
+void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ mdp_comp_clock_off(dev, &comps[i]);
+}
+
+static int mdp_get_subsys_id(struct device *dev, struct device_node *node,
+ struct mdp_comp *comp)
+{
+ struct platform_device *comp_pdev;
+ struct cmdq_client_reg cmdq_reg;
+ int ret = 0;
+ int index = 0;
+
+ if (!dev || !node || !comp)
+ return -EINVAL;
+
+ comp_pdev = of_find_device_by_node(node);
+
+ if (!comp_pdev) {
+ dev_err(dev, "get comp_pdev fail! comp id=%d type=%d\n",
+ comp->id, comp->type);
+ return -ENODEV;
+ }
+
+ index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
+ ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
+ if (ret != 0) {
+ dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
+ return -EINVAL;
+ }
+
+ comp->subsys_id = cmdq_reg.subsys;
+ dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
+
+ return 0;
+}
+
+static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
+ struct mdp_comp *comp)
+{
+ struct resource res;
+ phys_addr_t base;
+ int index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
+
+ if (of_address_to_resource(node, index, &res) < 0)
+ base = 0L;
+ else
+ base = res.start;
+
+ comp->mdp_dev = mdp;
+ comp->regs = of_iomap(node, 0);
+ comp->reg_base = base;
+}
+
+static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
+ struct mdp_comp *comp, enum mtk_mdp_comp_id id)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int clk_num;
+ int clk_ofst;
+ int i;
+ s32 event;
+
+ if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
+ dev_err(dev, "Invalid component id %d\n", id);
+ return -EINVAL;
+ }
+
+ comp->id = id;
+ comp->type = mdp_comp_matches[id].type;
+ comp->alias_id = mdp_comp_matches[id].alias_id;
+ comp->ops = mdp_comp_ops[comp->type];
+ __mdp_comp_init(mdp, node, comp);
+
+ clk_num = mdp_comp_dt_info[id].clk_num;
+ clk_ofst = mdp_comp_dt_info[id].clk_ofst;
+
+ for (i = 0; i < clk_num; i++) {
+ comp->clks[i] = of_clk_get(node, i + clk_ofst);
+ if (IS_ERR(comp->clks[i]))
+ break;
+ }
+
+ mdp_get_subsys_id(dev, node, comp);
+
+ /* Set GCE SOF event */
+ if (is_bypass_gce_event(comp->type) ||
+ of_property_read_u32_index(node, "mediatek,gce-events",
+ MDP_GCE_EVENT_SOF, &event))
+ event = MDP_GCE_NO_EVENT;
+
+ comp->gce_event[MDP_GCE_EVENT_SOF] = event;
+
+ /* Set GCE EOF event */
+ if (is_dma_capable(comp->type)) {
+ if (of_property_read_u32_index(node, "mediatek,gce-events",
+ MDP_GCE_EVENT_EOF, &event)) {
+ dev_err(dev, "Component id %d has no EOF\n", id);
+ return -EINVAL;
+ }
+ } else {
+ event = MDP_GCE_NO_EVENT;
+ }
+
+ comp->gce_event[MDP_GCE_EVENT_EOF] = event;
+
+ return 0;
+}
+
+static void mdp_comp_deinit(struct mdp_comp *comp)
+{
+ if (!comp)
+ return;
+
+ if (comp->regs)
+ iounmap(comp->regs);
+}
+
+static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
+ struct device_node *node,
+ enum mtk_mdp_comp_id id)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct mdp_comp *comp;
+ int ret;
+
+ if (mdp->comp[id])
+ return ERR_PTR(-EEXIST);
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mdp_comp_init(mdp, node, comp, id);
+ if (ret) {
+ kfree(comp);
+ return ERR_PTR(ret);
+ }
+ mdp->comp[id] = comp;
+ mdp->comp[id]->mdp_dev = mdp;
+
+ dev_dbg(dev, "%s type:%d alias:%d id:%d base:%#x regs:%p\n",
+ dev->of_node->name, comp->type, comp->alias_id, id,
+ (u32)comp->reg_base, comp->regs);
+ return comp;
+}
+
+static int mdp_comp_sub_create(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct device_node *node, *parent;
+
+ parent = dev->of_node->parent;
+
+ for_each_child_of_node(parent, node) {
+ const struct of_device_id *of_id;
+ enum mdp_comp_type type;
+ int id, alias_id;
+ struct mdp_comp *comp;
+
+ of_id = of_match_node(mdp_sub_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+ if (!of_device_is_available(node)) {
+ dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
+ node);
+ continue;
+ }
+
+ type = (enum mdp_comp_type)(uintptr_t)of_id->data;
+ alias_id = mdp_comp_alias_id[type];
+ id = mdp_comp_get_id(type, alias_id);
+ if (id < 0) {
+ dev_err(dev,
+ "Fail to get sub comp. id: type %d alias %d\n",
+ type, alias_id);
+ return -EINVAL;
+ }
+ mdp_comp_alias_id[type]++;
+
+ comp = mdp_comp_create(mdp, node, id);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+ }
+
+ return 0;
+}
+
+void mdp_comp_destroy(struct mdp_dev *mdp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
+ if (mdp->comp[i]) {
+ pm_runtime_disable(mdp->comp[i]->comp_dev);
+ mdp_comp_deinit(mdp->comp[i]);
+ kfree(mdp->comp[i]);
+ mdp->comp[i] = NULL;
+ }
+ }
+}
+
+int mdp_comp_config(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct device_node *node, *parent;
+ struct platform_device *pdev;
+ int ret;
+
+ memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
+
+ parent = dev->of_node->parent;
+ /* Iterate over sibling MDP function blocks */
+ for_each_child_of_node(parent, node) {
+ const struct of_device_id *of_id;
+ enum mdp_comp_type type;
+ int id, alias_id;
+ struct mdp_comp *comp;
+
+ of_id = of_match_node(mdp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_dbg(dev, "Skipping disabled component %pOF\n",
+ node);
+ continue;
+ }
+
+ type = (enum mdp_comp_type)(uintptr_t)of_id->data;
+ alias_id = mdp_comp_alias_id[type];
+ id = mdp_comp_get_id(type, alias_id);
+ if (id < 0) {
+ dev_err(dev,
+ "Fail to get component id: type %d alias %d\n",
+ type, alias_id);
+ continue;
+ }
+ mdp_comp_alias_id[type]++;
+
+ comp = mdp_comp_create(mdp, node, id);
+ if (IS_ERR(comp)) {
+ ret = PTR_ERR(comp);
+ goto err_init_comps;
+ }
+
+ /* Only DMA capable components need the pm control */
+ comp->comp_dev = NULL;
+ if (!is_dma_capable(comp->type))
+ continue;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ dev_warn(dev, "can't find platform device of node:%s\n",
+ node->name);
+ return -ENODEV;
+ }
+
+ comp->comp_dev = &pdev->dev;
+ pm_runtime_enable(comp->comp_dev);
+ }
+
+ ret = mdp_comp_sub_create(mdp);
+ if (ret)
+ goto err_init_comps;
+
+ return 0;
+
+err_init_comps:
+ mdp_comp_destroy(mdp);
+ return ret;
+}
+
+int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
+ const struct img_compparam *param,
+ const struct img_ipi_frameparam *frame)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ if (param->type < 0 || param->type >= MDP_MAX_COMP_COUNT) {
+ dev_err(dev, "Invalid component id %d", param->type);
+ return -EINVAL;
+ }
+
+ ctx->comp = mdp->comp[param->type];
+ if (!ctx->comp) {
+ dev_err(dev, "Uninit component id %d", param->type);
+ return -EINVAL;
+ }
+
+ ctx->param = param;
+ ctx->input = &frame->inputs[param->input];
+ for (i = 0; i < param->num_outputs; i++)
+ ctx->outputs[i] = &frame->outputs[param->outputs[i]];
+ return 0;
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
new file mode 100644
index 000000000000..dc48f55ac4f7
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_COMP_H__
+#define __MTK_MDP3_COMP_H__
+
+#include "mtk-mdp3-cmdq.h"
+
+#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask, ...) \
+ cmdq_pkt_write_mask(&((cmd)->pkt), id, \
+ (base) + (ofst), (val), (mask), ##__VA_ARGS__)
+
+#define MM_REG_WRITE(cmd, id, base, ofst, val, mask, ...) \
+do { \
+ typeof(mask) (m) = (mask); \
+ MM_REG_WRITE_MASK(cmd, id, base, ofst, val, \
+ (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (m), ##__VA_ARGS__); \
+} while (0)
+
+#define MM_REG_WAIT(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_wfe(&((c)->pkt), (e), true); \
+} while (0)
+
+#define MM_REG_WAIT_NO_CLEAR(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_wfe(&((c)->pkt), (e), false); \
+} while (0)
+
+#define MM_REG_CLEAR(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_clear_event(&((c)->pkt), (e)); \
+} while (0)
+
+#define MM_REG_SET_EVENT(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_set_event(&((c)->pkt), (e)); \
+} while (0)
+
+#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask, ...) \
+do { \
+ typeof(_mask) (_m) = (_mask); \
+ cmdq_pkt_poll_mask(&((cmd)->pkt), id, \
+ (base) + (ofst), (val), (_m), ##__VA_ARGS__); \
+} while (0)
+
+#define MM_REG_POLL(cmd, id, base, ofst, val, mask, ...) \
+do { \
+ typeof(mask) (m) = (mask); \
+ MM_REG_POLL_MASK((cmd), id, base, ofst, val, \
+ (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (m), ##__VA_ARGS__); \
+} while (0)
+
+enum mtk_mdp_comp_id {
+ MDP_COMP_NONE = -1, /* Invalid engine */
+
+ /* ISP */
+ MDP_COMP_WPEI = 0,
+ MDP_COMP_WPEO, /* 1 */
+ MDP_COMP_WPEI2, /* 2 */
+ MDP_COMP_WPEO2, /* 3 */
+ MDP_COMP_ISP_IMGI, /* 4 */
+ MDP_COMP_ISP_IMGO, /* 5 */
+ MDP_COMP_ISP_IMG2O, /* 6 */
+
+ /* IPU */
+ MDP_COMP_IPUI, /* 7 */
+ MDP_COMP_IPUO, /* 8 */
+
+ /* MDP */
+ MDP_COMP_CAMIN, /* 9 */
+ MDP_COMP_CAMIN2, /* 10 */
+ MDP_COMP_RDMA0, /* 11 */
+ MDP_COMP_AAL0, /* 12 */
+ MDP_COMP_CCORR0, /* 13 */
+ MDP_COMP_RSZ0, /* 14 */
+ MDP_COMP_RSZ1, /* 15 */
+ MDP_COMP_TDSHP0, /* 16 */
+ MDP_COMP_COLOR0, /* 17 */
+ MDP_COMP_PATH0_SOUT, /* 18 */
+ MDP_COMP_PATH1_SOUT, /* 19 */
+ MDP_COMP_WROT0, /* 20 */
+ MDP_COMP_WDMA, /* 21 */
+
+ /* Dummy Engine */
+ MDP_COMP_RDMA1, /* 22 */
+ MDP_COMP_RSZ2, /* 23 */
+ MDP_COMP_TDSHP1, /* 24 */
+ MDP_COMP_WROT1, /* 25 */
+
+ MDP_MAX_COMP_COUNT /* ALWAYS keep at the end */
+};
+
+enum mdp_comp_type {
+ MDP_COMP_TYPE_INVALID = 0,
+
+ MDP_COMP_TYPE_RDMA,
+ MDP_COMP_TYPE_RSZ,
+ MDP_COMP_TYPE_WROT,
+ MDP_COMP_TYPE_WDMA,
+ MDP_COMP_TYPE_PATH,
+
+ MDP_COMP_TYPE_TDSHP,
+ MDP_COMP_TYPE_COLOR,
+ MDP_COMP_TYPE_DRE,
+ MDP_COMP_TYPE_CCORR,
+ MDP_COMP_TYPE_HDR,
+
+ MDP_COMP_TYPE_IMGI,
+ MDP_COMP_TYPE_WPEI,
+ MDP_COMP_TYPE_EXTO, /* External path */
+ MDP_COMP_TYPE_DL_PATH, /* Direct-link path */
+
+ MDP_COMP_TYPE_COUNT /* ALWAYS keep at the end */
+};
+
+#define MDP_GCE_NO_EVENT (-1)
+enum {
+ MDP_GCE_EVENT_SOF = 0,
+ MDP_GCE_EVENT_EOF = 1,
+ MDP_GCE_EVENT_MAX,
+};
+
+struct mdp_comp_ops;
+
+struct mdp_comp {
+ struct mdp_dev *mdp_dev;
+ void __iomem *regs;
+ phys_addr_t reg_base;
+ u8 subsys_id;
+ struct clk *clks[6];
+ struct device *comp_dev;
+ enum mdp_comp_type type;
+ enum mtk_mdp_comp_id id;
+ u32 alias_id;
+ s32 gce_event[MDP_GCE_EVENT_MAX];
+ const struct mdp_comp_ops *ops;
+};
+
+struct mdp_comp_ctx {
+ struct mdp_comp *comp;
+ const struct img_compparam *param;
+ const struct img_input *input;
+ const struct img_output *outputs[IMG_MAX_HW_OUTPUTS];
+};
+
+struct mdp_comp_ops {
+ s64 (*get_comp_flag)(const struct mdp_comp_ctx *ctx);
+ int (*init_comp)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd);
+ int (*config_frame)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose);
+ int (*config_subfrm)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index);
+ int (*wait_comp_event)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd);
+ int (*advance_subfrm)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index);
+ int (*post_process)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd);
+};
+
+struct mdp_dev;
+
+int mdp_comp_config(struct mdp_dev *mdp);
+void mdp_comp_destroy(struct mdp_dev *mdp);
+int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp);
+void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp);
+int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num);
+void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num);
+int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
+ const struct img_compparam *param,
+ const struct img_ipi_frameparam *frame);
+
+#endif /* __MTK_MDP3_COMP_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
new file mode 100644
index 000000000000..cde59579b7ae
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-m2m.h"
+
+static const struct mdp_platform_config mt8183_plat_cfg = {
+ .rdma_support_10bit = true,
+ .rdma_rsz1_sram_sharing = true,
+ .rdma_upsample_repeat_only = true,
+ .rsz_disable_dcm_small_sample = false,
+ .wrot_filter_constraint = false,
+};
+
+static const struct of_device_id mt8183_mdp_probe_infra[MDP_INFRA_MAX] = {
+ [MDP_INFRA_MMSYS] = { .compatible = "mediatek,mt8183-mmsys" },
+ [MDP_INFRA_MUTEX] = { .compatible = "mediatek,mt8183-disp-mutex" },
+ [MDP_INFRA_SCP] = { .compatible = "mediatek,mt8183-scp" }
+};
+
+static const u32 mt8183_mutex_idx[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_RDMA0] = MUTEX_MOD_IDX_MDP_RDMA0,
+ [MDP_COMP_RSZ0] = MUTEX_MOD_IDX_MDP_RSZ0,
+ [MDP_COMP_RSZ1] = MUTEX_MOD_IDX_MDP_RSZ1,
+ [MDP_COMP_TDSHP0] = MUTEX_MOD_IDX_MDP_TDSHP0,
+ [MDP_COMP_WROT0] = MUTEX_MOD_IDX_MDP_WROT0,
+ [MDP_COMP_WDMA] = MUTEX_MOD_IDX_MDP_WDMA,
+ [MDP_COMP_AAL0] = MUTEX_MOD_IDX_MDP_AAL0,
+ [MDP_COMP_CCORR0] = MUTEX_MOD_IDX_MDP_CCORR0,
+};
+
+static const struct mtk_mdp_driver_data mt8183_mdp_driver_data = {
+ .mdp_probe_infra = mt8183_mdp_probe_infra,
+ .mdp_cfg = &mt8183_plat_cfg,
+ .mdp_mutex_table_idx = mt8183_mutex_idx,
+};
+
+static const struct of_device_id mdp_of_ids[] = {
+ { .compatible = "mediatek,mt8183-mdp3-rdma",
+ .data = &mt8183_mdp_driver_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdp_of_ids);
+
+static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
+ enum mdp_infra_id id)
+{
+ struct device_node *node;
+ struct platform_device *mdp_pdev = NULL;
+ const struct mtk_mdp_driver_data *mdp_data;
+ const char *compat;
+
+ if (!pdev)
+ return NULL;
+
+ if (id < MDP_INFRA_MMSYS || id >= MDP_INFRA_MAX) {
+ dev_err(&pdev->dev, "Illegal infra id %d\n", id);
+ return NULL;
+ }
+
+ mdp_data = of_device_get_match_data(&pdev->dev);
+ if (!mdp_data) {
+ dev_err(&pdev->dev, "have no driver data to find node\n");
+ return NULL;
+ }
+ compat = mdp_data->mdp_probe_infra[id].compatible;
+
+ node = of_find_compatible_node(NULL, NULL, compat);
+ if (WARN_ON(!node)) {
+ dev_err(&pdev->dev, "find node from id %d failed\n", id);
+ return NULL;
+ }
+
+ mdp_pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (WARN_ON(!mdp_pdev)) {
+ dev_err(&pdev->dev, "find pdev from id %d failed\n", id);
+ return NULL;
+ }
+
+ return mdp_pdev;
+}
+
+struct platform_device *mdp_get_plat_device(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdp_node;
+ struct platform_device *mdp_pdev;
+
+ mdp_node = of_parse_phandle(dev->of_node, MDP_PHANDLE_NAME, 0);
+ if (!mdp_node) {
+ dev_err(dev, "can't get node %s\n", MDP_PHANDLE_NAME);
+ return NULL;
+ }
+
+ mdp_pdev = of_find_device_by_node(mdp_node);
+ of_node_put(mdp_node);
+
+ return mdp_pdev;
+}
+EXPORT_SYMBOL_GPL(mdp_get_plat_device);
+
+int mdp_vpu_get_locked(struct mdp_dev *mdp)
+{
+ int ret = 0;
+
+ if (mdp->vpu_count++ == 0) {
+ ret = rproc_boot(mdp->rproc_handle);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "vpu_load_firmware failed %d\n", ret);
+ goto err_load_vpu;
+ }
+ ret = mdp_vpu_register(mdp);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu register failed %d\n", ret);
+ goto err_reg_vpu;
+ }
+ ret = mdp_vpu_dev_init(&mdp->vpu, mdp->scp, &mdp->vpu_lock);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu device init failed %d\n", ret);
+ goto err_init_vpu;
+ }
+ }
+ return 0;
+
+err_init_vpu:
+ mdp_vpu_unregister(mdp);
+err_reg_vpu:
+err_load_vpu:
+ mdp->vpu_count--;
+ return ret;
+}
+
+void mdp_vpu_put_locked(struct mdp_dev *mdp)
+{
+ if (--mdp->vpu_count == 0) {
+ mdp_vpu_dev_deinit(&mdp->vpu);
+ mdp_vpu_unregister(mdp);
+ }
+}
+
+void mdp_video_device_release(struct video_device *vdev)
+{
+ struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev);
+ int i;
+
+ scp_put(mdp->scp);
+
+ destroy_workqueue(mdp->job_wq);
+ destroy_workqueue(mdp->clock_wq);
+
+ pm_runtime_disable(&mdp->pdev->dev);
+
+ vb2_dma_contig_clear_max_seg_size(&mdp->pdev->dev);
+
+ mdp_comp_destroy(mdp);
+ for (i = 0; i < MDP_PIPE_MAX; i++)
+ mtk_mutex_put(mdp->mdp_mutex[i]);
+
+ mdp_vpu_shared_mem_free(&mdp->vpu);
+ v4l2_m2m_release(mdp->m2m_dev);
+ kfree(mdp);
+}
+
+static int mdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mdp_dev *mdp;
+ struct platform_device *mm_pdev;
+ int ret, i;
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp) {
+ ret = -ENOMEM;
+ goto err_return;
+ }
+
+ mdp->pdev = pdev;
+ mdp->mdp_data = of_device_get_match_data(&pdev->dev);
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS);
+ if (!mm_pdev) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ mdp->mdp_mmsys = &mm_pdev->dev;
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX);
+ if (WARN_ON(!mm_pdev)) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ for (i = 0; i < MDP_PIPE_MAX; i++) {
+ mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+ if (!mdp->mdp_mutex[i]) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ }
+
+ ret = mdp_comp_config(mdp);
+ if (ret) {
+ dev_err(dev, "Failed to config mdp components\n");
+ goto err_return;
+ }
+
+ mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0);
+ if (!mdp->job_wq) {
+ dev_err(dev, "Unable to create job workqueue\n");
+ ret = -ENOMEM;
+ goto err_deinit_comp;
+ }
+
+ mdp->clock_wq = alloc_workqueue(MDP_MODULE_NAME "-clock", WQ_FREEZABLE,
+ 0);
+ if (!mdp->clock_wq) {
+ dev_err(dev, "Unable to create clock workqueue\n");
+ ret = -ENOMEM;
+ goto err_destroy_job_wq;
+ }
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_SCP);
+ if (WARN_ON(!mm_pdev)) {
+ dev_err(&pdev->dev, "Could not get scp device\n");
+ ret = -ENODEV;
+ goto err_destroy_clock_wq;
+ }
+ mdp->scp = platform_get_drvdata(mm_pdev);
+ mdp->rproc_handle = scp_get_rproc(mdp->scp);
+ dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
+
+ mutex_init(&mdp->vpu_lock);
+ mutex_init(&mdp->m2m_lock);
+
+ mdp->cmdq_clt = cmdq_mbox_create(dev, 0);
+ if (IS_ERR(mdp->cmdq_clt)) {
+ ret = PTR_ERR(mdp->cmdq_clt);
+ goto err_put_scp;
+ }
+
+ init_waitqueue_head(&mdp->callback_wq);
+ ida_init(&mdp->mdp_ida);
+ platform_set_drvdata(pdev, mdp);
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_mbox_destroy;
+ }
+
+ ret = mdp_m2m_device_register(mdp);
+ if (ret) {
+ v4l2_err(&mdp->v4l2_dev, "Failed to register m2m device\n");
+ goto err_unregister_device;
+ }
+
+ dev_dbg(dev, "mdp-%d registered successfully\n", pdev->id);
+ return 0;
+
+err_unregister_device:
+ v4l2_device_unregister(&mdp->v4l2_dev);
+err_mbox_destroy:
+ cmdq_mbox_destroy(mdp->cmdq_clt);
+err_put_scp:
+ scp_put(mdp->scp);
+err_destroy_clock_wq:
+ destroy_workqueue(mdp->clock_wq);
+err_destroy_job_wq:
+ destroy_workqueue(mdp->job_wq);
+err_deinit_comp:
+ mdp_comp_destroy(mdp);
+err_return:
+ for (i = 0; i < MDP_PIPE_MAX; i++)
+ mtk_mutex_put(mdp->mdp_mutex[i]);
+ kfree(mdp);
+ dev_dbg(dev, "Errno %d\n", ret);
+ return ret;
+}
+
+static int mdp_remove(struct platform_device *pdev)
+{
+ struct mdp_dev *mdp = platform_get_drvdata(pdev);
+
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int __maybe_unused mdp_suspend(struct device *dev)
+{
+ struct mdp_dev *mdp = dev_get_drvdata(dev);
+ int ret;
+
+ atomic_set(&mdp->suspended, 1);
+
+ if (atomic_read(&mdp->job_count)) {
+ ret = wait_event_timeout(mdp->callback_wq,
+ !atomic_read(&mdp->job_count),
+ 2 * HZ);
+ if (ret == 0) {
+ dev_err(dev,
+ "%s:flushed cmdq task incomplete, count=%d\n",
+ __func__, atomic_read(&mdp->job_count));
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mdp_resume(struct device *dev)
+{
+ struct mdp_dev *mdp = dev_get_drvdata(dev);
+
+ atomic_set(&mdp->suspended, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdp_suspend, mdp_resume)
+};
+
+static struct platform_driver mdp_driver = {
+ .probe = mdp_probe,
+ .remove = mdp_remove,
+ .driver = {
+ .name = MDP_MODULE_NAME,
+ .pm = &mdp_pm_ops,
+ .of_match_table = of_match_ptr(mdp_of_ids),
+ },
+};
+
+module_platform_driver(mdp_driver);
+
+MODULE_AUTHOR("Ping-Hsun Wu <ping-hsun.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek image processor 3 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
new file mode 100644
index 000000000000..2ef5fbc4f25a
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_CORE_H__
+#define __MTK_MDP3_CORE_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-vpu.h"
+
+#define MDP_MODULE_NAME "mtk-mdp3"
+#define MDP_DEVICE_NAME "MediaTek MDP3"
+#define MDP_PHANDLE_NAME "mediatek,mdp3"
+
+enum mdp_infra_id {
+ MDP_INFRA_MMSYS,
+ MDP_INFRA_MUTEX,
+ MDP_INFRA_SCP,
+ MDP_INFRA_MAX
+};
+
+enum mdp_buffer_usage {
+ MDP_BUFFER_USAGE_HW_READ,
+ MDP_BUFFER_USAGE_MDP,
+ MDP_BUFFER_USAGE_MDP2,
+ MDP_BUFFER_USAGE_ISP,
+ MDP_BUFFER_USAGE_WPE,
+};
+
+struct mdp_platform_config {
+ bool rdma_support_10bit;
+ bool rdma_rsz1_sram_sharing;
+ bool rdma_upsample_repeat_only;
+ bool rsz_disable_dcm_small_sample;
+ bool wrot_filter_constraint;
+};
+
+/* indicate which mutex is used by each pipepline */
+enum mdp_pipe_id {
+ MDP_PIPE_RDMA0,
+ MDP_PIPE_IMGI,
+ MDP_PIPE_WPEI,
+ MDP_PIPE_WPEI2,
+ MDP_PIPE_MAX
+};
+
+struct mtk_mdp_driver_data {
+ const struct of_device_id *mdp_probe_infra;
+ const struct mdp_platform_config *mdp_cfg;
+ const u32 *mdp_mutex_table_idx;
+};
+
+struct mdp_dev {
+ struct platform_device *pdev;
+ struct device *mdp_mmsys;
+ struct mtk_mutex *mdp_mutex[MDP_PIPE_MAX];
+ struct mdp_comp *comp[MDP_MAX_COMP_COUNT];
+ const struct mtk_mdp_driver_data *mdp_data;
+
+ struct workqueue_struct *job_wq;
+ struct workqueue_struct *clock_wq;
+ struct mdp_vpu_dev vpu;
+ struct mtk_scp *scp;
+ struct rproc *rproc_handle;
+ /* synchronization protect for accessing vpu working buffer info */
+ struct mutex vpu_lock;
+ s32 vpu_count;
+ u32 id_count;
+ struct ida mdp_ida;
+ struct cmdq_client *cmdq_clt;
+ wait_queue_head_t callback_wq;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *m2m_vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ /* synchronization protect for m2m device operation */
+ struct mutex m2m_lock;
+ atomic_t suspended;
+ atomic_t job_count;
+};
+
+int mdp_vpu_get_locked(struct mdp_dev *mdp);
+void mdp_vpu_put_locked(struct mdp_dev *mdp);
+int mdp_vpu_register(struct mdp_dev *mdp);
+void mdp_vpu_unregister(struct mdp_dev *mdp);
+void mdp_video_device_release(struct video_device *vdev);
+
+#endif /* __MTK_MDP3_CORE_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
new file mode 100644
index 000000000000..5f74ea3b7a52
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/platform_device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-m2m.h"
+
+static inline struct mdp_m2m_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mdp_m2m_ctx, fh);
+}
+
+static inline struct mdp_m2m_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mdp_m2m_ctx, ctrl_handler);
+}
+
+static inline struct mdp_frame *ctx_get_frame(struct mdp_m2m_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->curr_param.output;
+ else
+ return &ctx->curr_param.captures[0];
+}
+
+static inline void mdp_m2m_ctx_set_state(struct mdp_m2m_ctx *ctx, u32 state)
+{
+ atomic_or(state, &ctx->curr_param.state);
+}
+
+static inline bool mdp_m2m_ctx_is_state_set(struct mdp_m2m_ctx *ctx, u32 mask)
+{
+ return ((atomic_read(&ctx->curr_param.state) & mask) == mask);
+}
+
+static void mdp_m2m_process_done(void *priv, int vb_state)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+ src_vbuf = (struct vb2_v4l2_buffer *)
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = (struct vb2_v4l2_buffer *)
+ v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ ctx->curr_param.frame_no = ctx->frame_count[MDP_M2M_SRC];
+ src_vbuf->sequence = ctx->frame_count[MDP_M2M_SRC]++;
+ dst_vbuf->sequence = ctx->frame_count[MDP_M2M_DST]++;
+ v4l2_m2m_buf_copy_metadata(src_vbuf, dst_vbuf, true);
+
+ v4l2_m2m_buf_done(src_vbuf, vb_state);
+ v4l2_m2m_buf_done(dst_vbuf, vb_state);
+ v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mdp_m2m_device_run(void *priv)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ struct mdp_frame *frame;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ struct img_ipi_frameparam param = {};
+ struct mdp_cmdq_param task = {};
+ enum vb2_buffer_state vb_state = VB2_BUF_STATE_ERROR;
+ int ret;
+
+ if (mdp_m2m_ctx_is_state_set(ctx, MDP_M2M_CTX_ERROR)) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "mdp_m2m_ctx is in error state\n");
+ goto worker_end;
+ }
+
+ param.frame_no = ctx->curr_param.frame_no;
+ param.type = ctx->curr_param.type;
+ param.num_inputs = 1;
+ param.num_outputs = 1;
+
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mdp_set_src_config(&param.inputs[0], frame, &src_vb->vb2_buf);
+
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf);
+
+ ret = mdp_vpu_process(&ctx->vpu, &param);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "VPU MDP process failed: %d\n", ret);
+ goto worker_end;
+ }
+
+ task.config = ctx->vpu.config;
+ task.param = &param;
+ task.composes[0] = &frame->compose;
+ task.cmdq_cb = NULL;
+ task.cb_data = NULL;
+ task.mdp_ctx = ctx;
+
+ ret = mdp_cmdq_send(ctx->mdp_dev, &task);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "CMDQ sendtask failed: %d\n", ret);
+ goto worker_end;
+ }
+
+ return;
+
+worker_end:
+ mdp_m2m_process_done(ctx, vb_state);
+}
+
+static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct mdp_frame *capture;
+ struct vb2_queue *vq;
+ int ret;
+ bool out_streaming, cap_streaming;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->frame_count[MDP_M2M_SRC] = 0;
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type))
+ ctx->frame_count[MDP_M2M_DST] = 0;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vq = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ out_streaming = vb2_is_streaming(vq);
+ vq = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ cap_streaming = vb2_is_streaming(vq);
+
+ /* Check to see if scaling ratio is within supported range */
+ if ((V4L2_TYPE_IS_OUTPUT(q->type) && cap_streaming) ||
+ (V4L2_TYPE_IS_CAPTURE(q->type) && out_streaming)) {
+ ret = mdp_check_scaling_ratio(&capture->crop.c,
+ &capture->compose,
+ capture->rotation,
+ ctx->curr_param.limit);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Out of scaling range\n");
+ return ret;
+ }
+ }
+
+ if (!mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) {
+ ret = mdp_vpu_get_locked(ctx->mdp_dev);
+ if (ret)
+ return ret;
+
+ ret = mdp_vpu_ctx_init(&ctx->vpu, &ctx->mdp_dev->vpu,
+ MDP_DEV_M2M);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "VPU init failed %d\n", ret);
+ return -EINVAL;
+ }
+ mdp_m2m_ctx_set_state(ctx, MDP_VPU_INIT);
+ }
+
+ return 0;
+}
+
+static struct vb2_v4l2_buffer *mdp_m2m_buf_remove(struct mdp_m2m_ctx *ctx,
+ unsigned int type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return (struct vb2_v4l2_buffer *)
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ else
+ return (struct vb2_v4l2_buffer *)
+ v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vb;
+
+ vb = mdp_m2m_buf_remove(ctx, q->type);
+ while (vb) {
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ vb = mdp_m2m_buf_remove(ctx, q->type);
+ }
+}
+
+static int mdp_m2m_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 i;
+
+ pix_mp = &ctx_get_frame(ctx, q->type)->format.fmt.pix_mp;
+
+ /* from VIDIOC_CREATE_BUFS */
+ if (*num_planes) {
+ if (*num_planes != pix_mp->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pix_mp->num_planes; ++i)
+ if (sizes[i] < pix_mp->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ } else {/* from VIDIOC_REQBUFS */
+ *num_planes = pix_mp->num_planes;
+ for (i = 0; i < pix_mp->num_planes; ++i)
+ sizes[i] = pix_mp->plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ u32 i;
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ if (V4L2_TYPE_IS_CAPTURE(vb->type)) {
+ pix_mp = &ctx_get_frame(ctx, vb->type)->format.fmt.pix_mp;
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ vb2_set_plane_payload(vb, i,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+ }
+ return 0;
+}
+
+static int mdp_m2m_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static void mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static const struct vb2_ops mdp_m2m_qops = {
+ .queue_setup = mdp_m2m_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = mdp_m2m_buf_prepare,
+ .start_streaming = mdp_m2m_start_streaming,
+ .stop_streaming = mdp_m2m_stop_streaming,
+ .buf_queue = mdp_m2m_buf_queue,
+ .buf_out_validate = mdp_m2m_buf_out_validate,
+};
+
+static int mdp_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, MDP_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, MDP_DEVICE_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ return mdp_enum_fmt_mplane(f);
+}
+
+static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ frame = ctx_get_frame(ctx, f->type);
+ *f = frame->format;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->colorspace = ctx->curr_param.colorspace;
+ pix_mp->xfer_func = ctx->curr_param.xfer_func;
+ pix_mp->ycbcr_enc = ctx->curr_param.ycbcr_enc;
+ pix_mp->quantization = ctx->curr_param.quant;
+
+ return 0;
+}
+
+static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame = ctx_get_frame(ctx, f->type);
+ struct mdp_frame *capture;
+ const struct mdp_format *fmt;
+ struct vb2_queue *vq;
+
+ fmt = mdp_try_fmt_mplane(f, &ctx->curr_param, ctx->id);
+ if (!fmt)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ frame->format = *f;
+ frame->mdp_fmt = fmt;
+ frame->ycbcr_prof = mdp_map_ycbcr_prof_mplane(f, fmt->mdp_color);
+ frame->usage = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ MDP_BUFFER_USAGE_HW_READ : MDP_BUFFER_USAGE_MDP;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ capture->crop.c.left = 0;
+ capture->crop.c.top = 0;
+ capture->crop.c.width = f->fmt.pix_mp.width;
+ capture->crop.c.height = f->fmt.pix_mp.height;
+ ctx->curr_param.colorspace = f->fmt.pix_mp.colorspace;
+ ctx->curr_param.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->curr_param.quant = f->fmt.pix_mp.quantization;
+ ctx->curr_param.xfer_func = f->fmt.pix_mp.xfer_func;
+ } else {
+ capture->compose.left = 0;
+ capture->compose.top = 0;
+ capture->compose.width = f->fmt.pix_mp.width;
+ capture->compose.height = f->fmt.pix_mp.height;
+ }
+
+ return 0;
+}
+
+static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+
+ if (!mdp_try_fmt_mplane(f, &ctx->curr_param, ctx->id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mdp_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame;
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ valid = mdp_target_is_crop(s->target);
+ else if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ valid = mdp_target_is_compose(s->target);
+
+ if (!valid)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ s->r = frame->crop.c;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ s->r = frame->compose;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, s->type);
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->format.fmt.pix_mp.width;
+ s->r.height = frame->format.fmt.pix_mp.height;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, s->type);
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->format.fmt.pix_mp.width;
+ s->r.height = frame->format.fmt.pix_mp.height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int mdp_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame = ctx_get_frame(ctx, s->type);
+ struct mdp_frame *capture;
+ struct v4l2_rect r;
+ struct device *dev = &ctx->mdp_dev->pdev->dev;
+ bool valid = false;
+ int ret;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ valid = (s->target == V4L2_SEL_TGT_CROP);
+ else if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ valid = (s->target == V4L2_SEL_TGT_COMPOSE);
+
+ if (!valid) {
+ dev_dbg(dev, "[%s:%d] invalid type:%u target:%u", __func__,
+ ctx->id, s->type, s->target);
+ return -EINVAL;
+ }
+
+ ret = mdp_try_crop(ctx, &r, s, frame);
+ if (ret)
+ return ret;
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ if (mdp_target_is_crop(s->target))
+ capture->crop.c = r;
+ else
+ capture->compose = r;
+
+ s->r = r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mdp_m2m_ioctl_ops = {
+ .vidioc_querycap = mdp_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap = mdp_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out = mdp_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mdp_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mdp_m2m_g_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mdp_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mdp_m2m_s_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = mdp_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mdp_m2m_try_fmt_mplane,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = mdp_m2m_g_selection,
+ .vidioc_s_selection = mdp_m2m_s_selection,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int mdp_m2m_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->ops = &mdp_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = &ctx->mdp_dev->pdev->dev;
+ src_vq->lock = &ctx->ctx_lock;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->ops = &mdp_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+ dst_vq->lock = &ctx->ctx_lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int mdp_m2m_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mdp_m2m_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mdp_frame *capture;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ capture->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ capture->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ capture->rotation = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mdp_m2m_ctrl_ops = {
+ .s_ctrl = mdp_m2m_s_ctrl,
+};
+
+static int mdp_m2m_ctrls_create(struct mdp_m2m_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, MDP_MAX_CTRLS);
+ ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Failed to register controls\n");
+ return err;
+ }
+ return 0;
+}
+
+static int mdp_m2m_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mdp_dev *mdp = video_get_drvdata(vdev);
+ struct mdp_m2m_ctx *ctx;
+ struct device *dev = &mdp->pdev->dev;
+ int ret;
+ struct v4l2_format default_format = {};
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&mdp->m2m_lock)) {
+ ret = -ERESTARTSYS;
+ goto err_free_ctx;
+ }
+
+ ctx->id = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
+ ctx->mdp_dev = mdp;
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ file->private_data = &ctx->fh;
+ ret = mdp_m2m_ctrls_create(ctx);
+ if (ret)
+ goto err_exit_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_init(&ctx->ctx_lock);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx, mdp_m2m_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ dev_err(dev, "Failed to initialize m2m context\n");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto err_release_handler;
+ }
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+
+ ctx->curr_param.ctx = ctx;
+ ret = mdp_frameparam_init(&ctx->curr_param);
+ if (ret) {
+ dev_err(dev, "Failed to initialize mdp parameter\n");
+ goto err_release_m2m_ctx;
+ }
+
+ mutex_unlock(&mdp->m2m_lock);
+
+ /* Default format */
+ default_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ default_format.fmt.pix_mp.width = 32;
+ default_format.fmt.pix_mp.height = 32;
+ default_format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_YUV420M;
+ mdp_m2m_s_fmt_mplane(file, &ctx->fh, &default_format);
+ default_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ mdp_m2m_s_fmt_mplane(file, &ctx->fh, &default_format);
+
+ dev_dbg(dev, "%s:[%d]", __func__, ctx->id);
+
+ return 0;
+
+err_release_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_release_handler:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+err_exit_fh:
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&mdp->m2m_lock);
+err_free_ctx:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int mdp_m2m_release(struct file *file)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mdp_dev *mdp = video_drvdata(file);
+ struct device *dev = &mdp->pdev->dev;
+
+ mutex_lock(&mdp->m2m_lock);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ if (mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) {
+ mdp_vpu_ctx_deinit(&ctx->vpu);
+ mdp_vpu_put_locked(mdp);
+ }
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ ida_free(&mdp->mdp_ida, ctx->id);
+ mutex_unlock(&mdp->m2m_lock);
+
+ dev_dbg(dev, "%s:[%d]", __func__, ctx->id);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations mdp_m2m_fops = {
+ .owner = THIS_MODULE,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .open = mdp_m2m_open,
+ .release = mdp_m2m_release,
+};
+
+static const struct v4l2_m2m_ops mdp_m2m_ops = {
+ .device_run = mdp_m2m_device_run,
+};
+
+int mdp_m2m_device_register(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int ret = 0;
+
+ mdp->m2m_vdev = video_device_alloc();
+ if (!mdp->m2m_vdev) {
+ dev_err(dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_video_alloc;
+ }
+ mdp->m2m_vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+ mdp->m2m_vdev->fops = &mdp_m2m_fops;
+ mdp->m2m_vdev->ioctl_ops = &mdp_m2m_ioctl_ops;
+ mdp->m2m_vdev->release = mdp_video_device_release;
+ mdp->m2m_vdev->lock = &mdp->m2m_lock;
+ mdp->m2m_vdev->vfl_dir = VFL_DIR_M2M;
+ mdp->m2m_vdev->v4l2_dev = &mdp->v4l2_dev;
+ snprintf(mdp->m2m_vdev->name, sizeof(mdp->m2m_vdev->name), "%s:m2m",
+ MDP_MODULE_NAME);
+ video_set_drvdata(mdp->m2m_vdev, mdp);
+
+ mdp->m2m_dev = v4l2_m2m_init(&mdp_m2m_ops);
+ if (IS_ERR(mdp->m2m_dev)) {
+ dev_err(dev, "Failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(mdp->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ ret = video_register_device(mdp->m2m_vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dev, "Failed to register video device\n");
+ goto err_video_register;
+ }
+
+ v4l2_info(&mdp->v4l2_dev, "Driver registered as /dev/video%d",
+ mdp->m2m_vdev->num);
+ return 0;
+
+err_video_register:
+ v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+ video_device_release(mdp->m2m_vdev);
+err_video_alloc:
+
+ return ret;
+}
+
+void mdp_m2m_device_unregister(struct mdp_dev *mdp)
+{
+ video_unregister_device(mdp->m2m_vdev);
+}
+
+void mdp_m2m_job_finish(struct mdp_m2m_ctx *ctx)
+{
+ enum vb2_buffer_state vb_state = VB2_BUF_STATE_DONE;
+
+ mdp_m2m_process_done(ctx, vb_state);
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h
new file mode 100644
index 000000000000..61ddbaf1bf13
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_M2M_H__
+#define __MTK_MDP3_M2M_H__
+
+#include <media/v4l2-ctrls.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-vpu.h"
+#include "mtk-mdp3-regs.h"
+
+#define MDP_MAX_CTRLS 10
+
+enum {
+ MDP_M2M_SRC = 0,
+ MDP_M2M_DST = 1,
+ MDP_M2M_MAX,
+};
+
+struct mdp_m2m_ctrls {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *rotate;
+};
+
+struct mdp_m2m_ctx {
+ u32 id;
+ struct mdp_dev *mdp_dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mdp_m2m_ctrls ctrls;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mdp_vpu_ctx vpu;
+ u32 frame_count[MDP_M2M_MAX];
+
+ struct mdp_frameparam curr_param;
+ /* synchronization protect for mdp m2m context */
+ struct mutex ctx_lock;
+};
+
+int mdp_m2m_device_register(struct mdp_dev *mdp);
+void mdp_m2m_device_unregister(struct mdp_dev *mdp);
+void mdp_m2m_job_finish(struct mdp_m2m_ctx *ctx);
+
+#endif /* __MTK_MDP3_M2M_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
new file mode 100644
index 000000000000..4e84a37ecdfc
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <media/v4l2-common.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-regs.h"
+#include "mtk-mdp3-m2m.h"
+
+/*
+ * All 10-bit related formats are not added in the basic format list,
+ * please add the corresponding format settings before use.
+ */
+static const struct mdp_format mdp_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ .mdp_color = MDP_COLOR_GREY,
+ .depth = { 8 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565X,
+ .mdp_color = MDP_COLOR_BGR565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mdp_color = MDP_COLOR_RGB565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .mdp_color = MDP_COLOR_RGB888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .mdp_color = MDP_COLOR_BGR888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ABGR32,
+ .mdp_color = MDP_COLOR_BGRA8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .mdp_color = MDP_COLOR_ARGB8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mdp_color = MDP_COLOR_UYVY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ .mdp_color = MDP_COLOR_VYUY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mdp_color = MDP_COLOR_YUYV,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .mdp_color = MDP_COLOR_YVYU,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV24,
+ .mdp_color = MDP_COLOR_NV24,
+ .depth = { 24 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV42,
+ .mdp_color = MDP_COLOR_NV42,
+ .depth = { 24 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_MT21C,
+ .mdp_color = MDP_COLOR_420_BLK_UFO,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 4,
+ .halign = 5,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_MM21,
+ .mdp_color = MDP_COLOR_420_BLK,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 4,
+ .halign = 5,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16M,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61M,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
+static const struct mdp_limit mdp_def_limit = {
+ .out_limit = {
+ .wmin = 16,
+ .hmin = 16,
+ .wmax = 8176,
+ .hmax = 8176,
+ },
+ .cap_limit = {
+ .wmin = 2,
+ .hmin = 2,
+ .wmax = 8176,
+ .hmax = 8176,
+ },
+ .h_scale_up_max = 32,
+ .v_scale_up_max = 32,
+ .h_scale_down_max = 20,
+ .v_scale_down_max = 128,
+};
+
+static const struct mdp_format *mdp_find_fmt(u32 pixelformat, u32 type)
+{
+ u32 i, flag;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MDP_FMT_FLAG_OUTPUT :
+ MDP_FMT_FLAG_CAPTURE;
+ for (i = 0; i < ARRAY_SIZE(mdp_formats); ++i) {
+ if (!(mdp_formats[i].flags & flag))
+ continue;
+ if (mdp_formats[i].pixelformat == pixelformat)
+ return &mdp_formats[i];
+ }
+ return NULL;
+}
+
+static const struct mdp_format *mdp_find_fmt_by_index(u32 index, u32 type)
+{
+ u32 i, flag, num = 0;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MDP_FMT_FLAG_OUTPUT :
+ MDP_FMT_FLAG_CAPTURE;
+ for (i = 0; i < ARRAY_SIZE(mdp_formats); ++i) {
+ if (!(mdp_formats[i].flags & flag))
+ continue;
+ if (index == num)
+ return &mdp_formats[i];
+ num++;
+ }
+ return NULL;
+}
+
+enum mdp_ycbcr_profile mdp_map_ycbcr_prof_mplane(struct v4l2_format *f,
+ u32 mdp_color)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+
+ if (MDP_COLOR_IS_RGB(mdp_color))
+ return MDP_YCBCR_PROFILE_FULL_BT601;
+
+ switch (pix_mp->colorspace) {
+ case V4L2_COLORSPACE_JPEG:
+ return MDP_YCBCR_PROFILE_JPEG;
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_DCI_P3:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT709;
+ return MDP_YCBCR_PROFILE_BT709;
+ case V4L2_COLORSPACE_BT2020:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT2020;
+ return MDP_YCBCR_PROFILE_BT2020;
+ default:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT601;
+ return MDP_YCBCR_PROFILE_BT601;
+ }
+}
+
+static void mdp_bound_align_image(u32 *w, u32 *h,
+ struct v4l2_frmsize_stepwise *s,
+ unsigned int salign)
+{
+ unsigned int org_w, org_h;
+
+ org_w = *w;
+ org_h = *h;
+ v4l_bound_align_image(w, s->min_width, s->max_width, s->step_width,
+ h, s->min_height, s->max_height, s->step_height,
+ salign);
+
+ s->min_width = org_w;
+ s->min_height = org_h;
+ v4l2_apply_frmsize_constraints(w, h, s);
+}
+
+static int mdp_clamp_align(s32 *x, int min, int max, unsigned int align)
+{
+ unsigned int mask;
+
+ if (min < 0 || max < 0)
+ return -ERANGE;
+
+ /* Bits that must be zero to be aligned */
+ mask = ~((1 << align) - 1);
+
+ min = 0 ? 0 : ((min + ~mask) & mask);
+ max = max & mask;
+ if ((unsigned int)min > (unsigned int)max)
+ return -ERANGE;
+
+ /* Clamp to aligned min and max */
+ *x = clamp(*x, min, max);
+
+ /* Round to nearest aligned value */
+ if (align)
+ *x = (*x + (1 << (align - 1))) & mask;
+ return 0;
+}
+
+int mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f)
+{
+ const struct mdp_format *fmt;
+
+ fmt = mdp_find_fmt_by_index(f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixelformat;
+ return 0;
+}
+
+const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ struct mdp_frameparam *param,
+ u32 ctx_id)
+{
+ struct device *dev = &param->ctx->mdp_dev->pdev->dev;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct mdp_format *fmt;
+ const struct mdp_pix_limit *pix_limit;
+ struct v4l2_frmsize_stepwise s;
+ u32 org_w, org_h;
+ unsigned int i;
+
+ fmt = mdp_find_fmt(pix_mp->pixelformat, f->type);
+ if (!fmt) {
+ fmt = mdp_find_fmt_by_index(0, f->type);
+ if (!fmt) {
+ dev_dbg(dev, "%d: pixelformat %c%c%c%c invalid", ctx_id,
+ (pix_mp->pixelformat & 0xff),
+ (pix_mp->pixelformat >> 8) & 0xff,
+ (pix_mp->pixelformat >> 16) & 0xff,
+ (pix_mp->pixelformat >> 24) & 0xff);
+ return NULL;
+ }
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->flags = 0;
+ pix_mp->pixelformat = fmt->pixelformat;
+ if (V4L2_TYPE_IS_CAPTURE(f->type)) {
+ pix_mp->colorspace = param->colorspace;
+ pix_mp->xfer_func = param->xfer_func;
+ pix_mp->ycbcr_enc = param->ycbcr_enc;
+ pix_mp->quantization = param->quant;
+ }
+
+ pix_limit = V4L2_TYPE_IS_OUTPUT(f->type) ? &param->limit->out_limit :
+ &param->limit->cap_limit;
+ s.min_width = pix_limit->wmin;
+ s.max_width = pix_limit->wmax;
+ s.step_width = fmt->walign;
+ s.min_height = pix_limit->hmin;
+ s.max_height = pix_limit->hmax;
+ s.step_height = fmt->halign;
+ org_w = pix_mp->width;
+ org_h = pix_mp->height;
+
+ mdp_bound_align_image(&pix_mp->width, &pix_mp->height, &s, fmt->salign);
+ if (org_w != pix_mp->width || org_h != pix_mp->height)
+ dev_dbg(dev, "%d: size change: %ux%u to %ux%u", ctx_id,
+ org_w, org_h, pix_mp->width, pix_mp->height);
+
+ if (pix_mp->num_planes && pix_mp->num_planes != fmt->num_planes)
+ dev_dbg(dev, "%d num of planes change: %u to %u", ctx_id,
+ pix_mp->num_planes, fmt->num_planes);
+ pix_mp->num_planes = fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ u32 min_bpl = (pix_mp->width * fmt->row_depth[i]) >> 3;
+ u32 max_bpl = (pix_limit->wmax * fmt->row_depth[i]) >> 3;
+ u32 bpl = pix_mp->plane_fmt[i].bytesperline;
+ u32 min_si, max_si;
+ u32 si = pix_mp->plane_fmt[i].sizeimage;
+
+ bpl = clamp(bpl, min_bpl, max_bpl);
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+
+ min_si = (bpl * pix_mp->height * fmt->depth[i]) /
+ fmt->row_depth[i];
+ max_si = (bpl * s.max_height * fmt->depth[i]) /
+ fmt->row_depth[i];
+
+ si = clamp(si, min_si, max_si);
+ pix_mp->plane_fmt[i].sizeimage = si;
+
+ dev_dbg(dev, "%d: p%u, bpl:%u [%u, %u], sizeimage:%u [%u, %u]",
+ ctx_id, i, bpl, min_bpl, max_bpl, si, min_si, max_si);
+ }
+
+ return fmt;
+}
+
+static int mdp_clamp_start(s32 *x, int min, int max, unsigned int align,
+ u32 flags)
+{
+ if (flags & V4L2_SEL_FLAG_GE)
+ max = *x;
+ if (flags & V4L2_SEL_FLAG_LE)
+ min = *x;
+ return mdp_clamp_align(x, min, max, align);
+}
+
+static int mdp_clamp_end(s32 *x, int min, int max, unsigned int align,
+ u32 flags)
+{
+ if (flags & V4L2_SEL_FLAG_GE)
+ min = *x;
+ if (flags & V4L2_SEL_FLAG_LE)
+ max = *x;
+ return mdp_clamp_align(x, min, max, align);
+}
+
+int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
+ const struct v4l2_selection *s, struct mdp_frame *frame)
+{
+ struct device *dev = &ctx->mdp_dev->pdev->dev;
+ s32 left, top, right, bottom;
+ u32 framew, frameh, walign, halign;
+ int ret;
+
+ dev_dbg(dev, "%d target:%d, set:(%d,%d) %ux%u", ctx->id,
+ s->target, s->r.left, s->r.top, s->r.width, s->r.height);
+
+ left = s->r.left;
+ top = s->r.top;
+ right = s->r.left + s->r.width;
+ bottom = s->r.top + s->r.height;
+ framew = frame->format.fmt.pix_mp.width;
+ frameh = frame->format.fmt.pix_mp.height;
+
+ if (mdp_target_is_crop(s->target)) {
+ walign = 1;
+ halign = 1;
+ } else {
+ walign = frame->mdp_fmt->walign;
+ halign = frame->mdp_fmt->halign;
+ }
+
+ dev_dbg(dev, "%d align:%u,%u, bound:%ux%u", ctx->id,
+ walign, halign, framew, frameh);
+
+ ret = mdp_clamp_start(&left, 0, right, walign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_start(&top, 0, bottom, halign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_end(&right, left, framew, walign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_end(&bottom, top, frameh, halign, s->flags);
+ if (ret)
+ return ret;
+
+ r->left = left;
+ r->top = top;
+ r->width = right - left;
+ r->height = bottom - top;
+
+ dev_dbg(dev, "%d crop:(%d,%d) %ux%u", ctx->id,
+ r->left, r->top, r->width, r->height);
+ return 0;
+}
+
+int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose, s32 rotation,
+ const struct mdp_limit *limit)
+{
+ u32 crop_w, crop_h, comp_w, comp_h;
+
+ crop_w = crop->width;
+ crop_h = crop->height;
+ if (90 == rotation || 270 == rotation) {
+ comp_w = compose->height;
+ comp_h = compose->width;
+ } else {
+ comp_w = compose->width;
+ comp_h = compose->height;
+ }
+
+ if ((crop_w / comp_w) > limit->h_scale_down_max ||
+ (crop_h / comp_h) > limit->v_scale_down_max ||
+ (comp_w / crop_w) > limit->h_scale_up_max ||
+ (comp_h / crop_h) > limit->v_scale_up_max)
+ return -ERANGE;
+ return 0;
+}
+
+/* Stride that is accepted by MDP HW */
+static u32 mdp_fmt_get_stride(const struct mdp_format *fmt,
+ u32 bytesperline, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 stride;
+
+ stride = (bytesperline * MDP_COLOR_BITS_PER_PIXEL(c))
+ / fmt->row_depth[0];
+ if (plane == 0)
+ return stride;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ stride = stride / 2;
+ return stride;
+ }
+ return 0;
+}
+
+/* Stride that is accepted by MDP HW of format with contiguous planes */
+static u32 mdp_fmt_get_stride_contig(const struct mdp_format *fmt,
+ u32 pix_stride, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 stride = pix_stride;
+
+ if (plane == 0)
+ return stride;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ stride = stride >> MDP_COLOR_GET_H_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_UV_COPLANE(c) && !MDP_COLOR_IS_BLOCK_MODE(c))
+ stride = stride * 2;
+ return stride;
+ }
+ return 0;
+}
+
+/* Plane size that is accepted by MDP HW */
+static u32 mdp_fmt_get_plane_size(const struct mdp_format *fmt,
+ u32 stride, u32 height, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 bytesperline;
+
+ bytesperline = (stride * fmt->row_depth[0])
+ / MDP_COLOR_BITS_PER_PIXEL(c);
+ if (plane == 0)
+ return bytesperline * height;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ height = height >> MDP_COLOR_GET_V_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ bytesperline = bytesperline * 2;
+ return bytesperline * height;
+ }
+ return 0;
+}
+
+static void mdp_prepare_buffer(struct img_image_buffer *b,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &frame->format.fmt.pix_mp;
+ unsigned int i;
+
+ b->format.colorformat = frame->mdp_fmt->mdp_color;
+ b->format.ycbcr_prof = frame->ycbcr_prof;
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ u32 stride = mdp_fmt_get_stride(frame->mdp_fmt,
+ pix_mp->plane_fmt[i].bytesperline, i);
+
+ b->format.plane_fmt[i].stride = stride;
+ b->format.plane_fmt[i].size =
+ mdp_fmt_get_plane_size(frame->mdp_fmt, stride,
+ pix_mp->height, i);
+ b->iova[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+ for (; i < MDP_COLOR_GET_PLANE_COUNT(b->format.colorformat); ++i) {
+ u32 stride = mdp_fmt_get_stride_contig(frame->mdp_fmt,
+ b->format.plane_fmt[0].stride, i);
+
+ b->format.plane_fmt[i].stride = stride;
+ b->format.plane_fmt[i].size =
+ mdp_fmt_get_plane_size(frame->mdp_fmt, stride,
+ pix_mp->height, i);
+ b->iova[i] = b->iova[i - 1] + b->format.plane_fmt[i - 1].size;
+ }
+ b->usage = frame->usage;
+}
+
+void mdp_set_src_config(struct img_input *in,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ in->buffer.format.width = frame->format.fmt.pix_mp.width;
+ in->buffer.format.height = frame->format.fmt.pix_mp.height;
+ mdp_prepare_buffer(&in->buffer, frame, vb);
+}
+
+static u32 mdp_to_fixed(u32 *r, struct v4l2_fract *f)
+{
+ u32 q;
+
+ if (f->denominator == 0) {
+ *r = 0;
+ return 0;
+ }
+
+ q = f->numerator / f->denominator;
+ *r = div_u64(((u64)f->numerator - q * f->denominator) <<
+ IMG_SUBPIXEL_SHIFT, f->denominator);
+ return q;
+}
+
+static void mdp_set_src_crop(struct img_crop *c, struct mdp_crop *crop)
+{
+ c->left = crop->c.left
+ + mdp_to_fixed(&c->left_subpix, &crop->left_subpix);
+ c->top = crop->c.top
+ + mdp_to_fixed(&c->top_subpix, &crop->top_subpix);
+ c->width = crop->c.width
+ + mdp_to_fixed(&c->width_subpix, &crop->width_subpix);
+ c->height = crop->c.height
+ + mdp_to_fixed(&c->height_subpix, &crop->height_subpix);
+}
+
+static void mdp_set_orientation(struct img_output *out,
+ s32 rotation, bool hflip, bool vflip)
+{
+ u8 flip = 0;
+
+ if (hflip)
+ flip ^= 1;
+ if (vflip) {
+ /*
+ * A vertical flip is equivalent to
+ * a 180-degree rotation with a horizontal flip
+ */
+ rotation += 180;
+ flip ^= 1;
+ }
+
+ out->rotation = rotation % 360;
+ if (flip != 0)
+ out->flags |= IMG_CTRL_FLAG_HFLIP;
+ else
+ out->flags &= ~IMG_CTRL_FLAG_HFLIP;
+}
+
+void mdp_set_dst_config(struct img_output *out,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ out->buffer.format.width = frame->compose.width;
+ out->buffer.format.height = frame->compose.height;
+ mdp_prepare_buffer(&out->buffer, frame, vb);
+ mdp_set_src_crop(&out->crop, &frame->crop);
+ mdp_set_orientation(out, frame->rotation, frame->hflip, frame->vflip);
+}
+
+int mdp_frameparam_init(struct mdp_frameparam *param)
+{
+ struct mdp_frame *frame;
+
+ if (!param)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&param->list);
+ param->limit = &mdp_def_limit;
+ param->type = MDP_STREAM_TYPE_BITBLT;
+
+ frame = &param->output;
+ frame->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ frame->mdp_fmt = mdp_try_fmt_mplane(&frame->format, param, 0);
+ frame->ycbcr_prof =
+ mdp_map_ycbcr_prof_mplane(&frame->format,
+ frame->mdp_fmt->mdp_color);
+ frame->usage = MDP_BUFFER_USAGE_HW_READ;
+
+ param->num_captures = 1;
+ frame = &param->captures[0];
+ frame->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ frame->mdp_fmt = mdp_try_fmt_mplane(&frame->format, param, 0);
+ frame->ycbcr_prof =
+ mdp_map_ycbcr_prof_mplane(&frame->format,
+ frame->mdp_fmt->mdp_color);
+ frame->usage = MDP_BUFFER_USAGE_MDP;
+ frame->crop.c.width = param->output.format.fmt.pix_mp.width;
+ frame->crop.c.height = param->output.format.fmt.pix_mp.height;
+ frame->compose.width = frame->format.fmt.pix_mp.width;
+ frame->compose.height = frame->format.fmt.pix_mp.height;
+
+ return 0;
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
new file mode 100644
index 000000000000..f995e536d45f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_REGS_H__
+#define __MTK_MDP3_REGS_H__
+
+#include <linux/videodev2.h>
+#include <media/videobuf2-core.h>
+#include "mtk-img-ipi.h"
+
+/*
+ * MDP native color code
+ * Plane count: 1, 2, 3
+ * H-subsample: 0, 1, 2
+ * V-subsample: 0, 1
+ * Color group: 0-RGB, 1-YUV, 2-raw
+ */
+#define MDP_COLOR(PACKED, LOOSE, VIDEO, PLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+ (((PACKED) << 27) | ((LOOSE) << 26) | ((VIDEO) << 23) |\
+ ((PLANE) << 21) | ((HF) << 19) | ((VF) << 18) | ((BITS) << 8) |\
+ ((GROUP) << 6) | ((SWAP) << 5) | ((ID) << 0))
+
+#define MDP_COLOR_IS_10BIT_PACKED(c) ((0x08000000 & (c)) >> 27)
+#define MDP_COLOR_IS_10BIT_LOOSE(c) (((0x0c000000 & (c)) >> 26) == 1)
+#define MDP_COLOR_IS_10BIT_TILE(c) (((0x0c000000 & (c)) >> 26) == 3)
+#define MDP_COLOR_IS_UFP(c) ((0x02000000 & (c)) >> 25)
+#define MDP_COLOR_IS_INTERLACED(c) ((0x01000000 & (c)) >> 24)
+#define MDP_COLOR_IS_BLOCK_MODE(c) ((0x00800000 & (c)) >> 23)
+#define MDP_COLOR_GET_PLANE_COUNT(c) ((0x00600000 & (c)) >> 21)
+#define MDP_COLOR_GET_H_SUBSAMPLE(c) ((0x00180000 & (c)) >> 19)
+#define MDP_COLOR_GET_V_SUBSAMPLE(c) ((0x00040000 & (c)) >> 18)
+#define MDP_COLOR_BITS_PER_PIXEL(c) ((0x0003ff00 & (c)) >> 8)
+#define MDP_COLOR_GET_GROUP(c) ((0x000000c0 & (c)) >> 6)
+#define MDP_COLOR_IS_SWAPPED(c) ((0x00000020 & (c)) >> 5)
+#define MDP_COLOR_GET_UNIQUE_ID(c) ((0x0000001f & (c)) >> 0)
+#define MDP_COLOR_GET_HW_FORMAT(c) ((0x0000001f & (c)) >> 0)
+
+#define MDP_COLOR_IS_RGB(c) (MDP_COLOR_GET_GROUP(c) == 0)
+#define MDP_COLOR_IS_YUV(c) (MDP_COLOR_GET_GROUP(c) == 1)
+
+enum mdp_color {
+ MDP_COLOR_UNKNOWN = 0,
+
+ //MDP_COLOR_FULLG8,
+ MDP_COLOR_FULLG8_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8 = MDP_COLOR_FULLG8_BGGR,
+
+ //MDP_COLOR_FULLG10,
+ MDP_COLOR_FULLG10_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10 = MDP_COLOR_FULLG10_BGGR,
+
+ //MDP_COLOR_FULLG12,
+ MDP_COLOR_FULLG12_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12 = MDP_COLOR_FULLG12_BGGR,
+
+ //MDP_COLOR_FULLG14,
+ MDP_COLOR_FULLG14_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14 = MDP_COLOR_FULLG14_BGGR,
+
+ MDP_COLOR_UFO10 = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 24),
+
+ //MDP_COLOR_BAYER8,
+ MDP_COLOR_BAYER8_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8 = MDP_COLOR_BAYER8_BGGR,
+
+ //MDP_COLOR_BAYER10,
+ MDP_COLOR_BAYER10_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10 = MDP_COLOR_BAYER10_BGGR,
+
+ //MDP_COLOR_BAYER12,
+ MDP_COLOR_BAYER12_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12 = MDP_COLOR_BAYER12_BGGR,
+
+ //MDP_COLOR_BAYER14,
+ MDP_COLOR_BAYER14_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14 = MDP_COLOR_BAYER14_BGGR,
+
+ MDP_COLOR_RGB48 = MDP_COLOR(0, 0, 0, 1, 0, 0, 48, 0, 0, 23),
+ /* For bayer+mono raw-16 */
+ MDP_COLOR_RGB565_RAW = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 2, 0, 0),
+
+ MDP_COLOR_BAYER8_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 22),
+ MDP_COLOR_BAYER10_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 22),
+ MDP_COLOR_BAYER12_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 22),
+ MDP_COLOR_BAYER14_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 22),
+
+ /* Unified formats */
+ MDP_COLOR_GREY = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 1, 0, 7),
+
+ MDP_COLOR_RGB565 = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 0, 0, 0),
+ MDP_COLOR_BGR565 = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 0, 1, 0),
+ MDP_COLOR_RGB888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 0, 1, 1),
+ MDP_COLOR_BGR888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 0, 0, 1),
+ MDP_COLOR_RGBA8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 1, 2),
+ MDP_COLOR_BGRA8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 0, 2),
+ MDP_COLOR_ARGB8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 1, 3),
+ MDP_COLOR_ABGR8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 0, 3),
+
+ MDP_COLOR_UYVY = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 0, 4),
+ MDP_COLOR_VYUY = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 1, 4),
+ MDP_COLOR_YUYV = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 0, 5),
+ MDP_COLOR_YVYU = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 1, 5),
+
+ MDP_COLOR_I420 = MDP_COLOR(0, 0, 0, 3, 1, 1, 8, 1, 0, 8),
+ MDP_COLOR_YV12 = MDP_COLOR(0, 0, 0, 3, 1, 1, 8, 1, 1, 8),
+ MDP_COLOR_I422 = MDP_COLOR(0, 0, 0, 3, 1, 0, 8, 1, 0, 9),
+ MDP_COLOR_YV16 = MDP_COLOR(0, 0, 0, 3, 1, 0, 8, 1, 1, 9),
+ MDP_COLOR_I444 = MDP_COLOR(0, 0, 0, 3, 0, 0, 8, 1, 0, 10),
+ MDP_COLOR_YV24 = MDP_COLOR(0, 0, 0, 3, 0, 0, 8, 1, 1, 10),
+
+ MDP_COLOR_NV12 = MDP_COLOR(0, 0, 0, 2, 1, 1, 8, 1, 0, 12),
+ MDP_COLOR_NV21 = MDP_COLOR(0, 0, 0, 2, 1, 1, 8, 1, 1, 12),
+ MDP_COLOR_NV16 = MDP_COLOR(0, 0, 0, 2, 1, 0, 8, 1, 0, 13),
+ MDP_COLOR_NV61 = MDP_COLOR(0, 0, 0, 2, 1, 0, 8, 1, 1, 13),
+ MDP_COLOR_NV24 = MDP_COLOR(0, 0, 0, 2, 0, 0, 8, 1, 0, 14),
+ MDP_COLOR_NV42 = MDP_COLOR(0, 0, 0, 2, 0, 0, 8, 1, 1, 14),
+
+ /* MediaTek proprietary formats */
+ /* UFO encoded block mode */
+ MDP_COLOR_420_BLK_UFO = MDP_COLOR(0, 0, 5, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode */
+ MDP_COLOR_420_BLK = MDP_COLOR(0, 0, 1, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode + field mode */
+ MDP_COLOR_420_BLKI = MDP_COLOR(0, 0, 3, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode */
+ MDP_COLOR_422_BLK = MDP_COLOR(0, 0, 1, 1, 1, 0, 512, 1, 0, 4),
+
+ MDP_COLOR_IYU2 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 1, 0, 25),
+ MDP_COLOR_YUV444 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 1, 0, 30),
+
+ /* Packed 10-bit formats */
+ MDP_COLOR_RGBA1010102 = MDP_COLOR(1, 0, 0, 1, 0, 0, 32, 0, 1, 2),
+ MDP_COLOR_BGRA1010102 = MDP_COLOR(1, 0, 0, 1, 0, 0, 32, 0, 0, 2),
+ /* Packed 10-bit UYVY */
+ MDP_COLOR_UYVY_10P = MDP_COLOR(1, 0, 0, 1, 1, 0, 20, 1, 0, 4),
+ /* Packed 10-bit NV21 */
+ MDP_COLOR_NV21_10P = MDP_COLOR(1, 0, 0, 2, 1, 1, 10, 1, 1, 12),
+ /* 10-bit block mode */
+ MDP_COLOR_420_BLK_10_H = MDP_COLOR(1, 0, 1, 2, 1, 1, 320, 1, 0, 12),
+ /* 10-bit HEVC tile mode */
+ MDP_COLOR_420_BLK_10_V = MDP_COLOR(1, 1, 1, 2, 1, 1, 320, 1, 0, 12),
+ /* UFO encoded 10-bit block mode */
+ MDP_COLOR_420_BLK_U10_H = MDP_COLOR(1, 0, 5, 2, 1, 1, 320, 1, 0, 12),
+ /* UFO encoded 10-bit HEVC tile mode */
+ MDP_COLOR_420_BLK_U10_V = MDP_COLOR(1, 1, 5, 2, 1, 1, 320, 1, 0, 12),
+
+ /* Loose 10-bit formats */
+ MDP_COLOR_UYVY_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 0, 4),
+ MDP_COLOR_VYUY_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 1, 4),
+ MDP_COLOR_YUYV_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 0, 5),
+ MDP_COLOR_YVYU_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 1, 5),
+ MDP_COLOR_NV12_10L = MDP_COLOR(0, 1, 0, 2, 1, 1, 10, 1, 0, 12),
+ MDP_COLOR_NV21_10L = MDP_COLOR(0, 1, 0, 2, 1, 1, 10, 1, 1, 12),
+ MDP_COLOR_NV16_10L = MDP_COLOR(0, 1, 0, 2, 1, 0, 10, 1, 0, 13),
+ MDP_COLOR_NV61_10L = MDP_COLOR(0, 1, 0, 2, 1, 0, 10, 1, 1, 13),
+ MDP_COLOR_YV12_10L = MDP_COLOR(0, 1, 0, 3, 1, 1, 10, 1, 1, 8),
+ MDP_COLOR_I420_10L = MDP_COLOR(0, 1, 0, 3, 1, 1, 10, 1, 0, 8),
+};
+
+static inline bool MDP_COLOR_IS_UV_COPLANE(enum mdp_color c)
+{
+ return (MDP_COLOR_GET_PLANE_COUNT(c) == 2 && MDP_COLOR_IS_YUV(c));
+}
+
+/* Minimum Y stride that is accepted by MDP HW */
+static inline u32 mdp_color_get_min_y_stride(enum mdp_color c, u32 width)
+{
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) + 4) >> 3;
+}
+
+/* Minimum UV stride that is accepted by MDP HW */
+static inline u32 mdp_color_get_min_uv_stride(enum mdp_color c, u32 width)
+{
+ u32 min_stride;
+
+ if (MDP_COLOR_GET_PLANE_COUNT(c) == 1)
+ return 0;
+ min_stride = mdp_color_get_min_y_stride(c, width)
+ >> MDP_COLOR_GET_H_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_UV_COPLANE(c) && !MDP_COLOR_IS_BLOCK_MODE(c))
+ min_stride = min_stride * 2;
+ return min_stride;
+}
+
+/* Minimum Y plane size that is necessary in buffer */
+static inline u32 mdp_color_get_min_y_size(enum mdp_color c,
+ u32 width, u32 height)
+{
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) >> 8) * height;
+ return mdp_color_get_min_y_stride(c, width) * height;
+}
+
+/* Minimum UV plane size that is necessary in buffer */
+static inline u32 mdp_color_get_min_uv_size(enum mdp_color c,
+ u32 width, u32 height)
+{
+ height = height >> MDP_COLOR_GET_V_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_BLOCK_MODE(c) && (MDP_COLOR_GET_PLANE_COUNT(c) > 1))
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) >> 8) * height;
+ return mdp_color_get_min_uv_stride(c, width) * height;
+}
+
+/* Combine colorspace, xfer_func, ycbcr_encoding, and quantization */
+enum mdp_ycbcr_profile {
+ /* V4L2_YCBCR_ENC_601 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT601,
+ /* V4L2_YCBCR_ENC_709 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT709,
+ /* V4L2_YCBCR_ENC_601 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_JPEG,
+ MDP_YCBCR_PROFILE_FULL_BT601 = MDP_YCBCR_PROFILE_JPEG,
+
+ /* Colorspaces not support for capture */
+ /* V4L2_YCBCR_ENC_BT2020 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT2020,
+ /* V4L2_YCBCR_ENC_709 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_FULL_BT709,
+ /* V4L2_YCBCR_ENC_BT2020 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_FULL_BT2020,
+};
+
+#define MDP_FMT_FLAG_OUTPUT BIT(0)
+#define MDP_FMT_FLAG_CAPTURE BIT(1)
+
+struct mdp_format {
+ u32 pixelformat;
+ u32 mdp_color;
+ u8 depth[VIDEO_MAX_PLANES];
+ u8 row_depth[VIDEO_MAX_PLANES];
+ u8 num_planes;
+ u8 walign;
+ u8 halign;
+ u8 salign;
+ u32 flags;
+};
+
+struct mdp_pix_limit {
+ u32 wmin;
+ u32 hmin;
+ u32 wmax;
+ u32 hmax;
+};
+
+struct mdp_limit {
+ struct mdp_pix_limit out_limit;
+ struct mdp_pix_limit cap_limit;
+ u32 h_scale_up_max;
+ u32 v_scale_up_max;
+ u32 h_scale_down_max;
+ u32 v_scale_down_max;
+};
+
+enum mdp_stream_type {
+ MDP_STREAM_TYPE_UNKNOWN,
+ MDP_STREAM_TYPE_BITBLT,
+ MDP_STREAM_TYPE_GPU_BITBLT,
+ MDP_STREAM_TYPE_DUAL_BITBLT,
+ MDP_STREAM_TYPE_2ND_BITBLT,
+ MDP_STREAM_TYPE_ISP_IC,
+ MDP_STREAM_TYPE_ISP_VR,
+ MDP_STREAM_TYPE_ISP_ZSD,
+ MDP_STREAM_TYPE_ISP_IP,
+ MDP_STREAM_TYPE_ISP_VSS,
+ MDP_STREAM_TYPE_ISP_ZSD_SLOW,
+ MDP_STREAM_TYPE_WPE,
+ MDP_STREAM_TYPE_WPE2,
+};
+
+struct mdp_crop {
+ struct v4l2_rect c;
+ struct v4l2_fract left_subpix;
+ struct v4l2_fract top_subpix;
+ struct v4l2_fract width_subpix;
+ struct v4l2_fract height_subpix;
+};
+
+struct mdp_frame {
+ struct v4l2_format format;
+ const struct mdp_format *mdp_fmt;
+ u32 ycbcr_prof; /* enum mdp_ycbcr_profile */
+ u32 usage; /* enum mdp_buffer_usage */
+ struct mdp_crop crop;
+ struct v4l2_rect compose;
+ s32 rotation;
+ u32 hflip:1;
+ u32 vflip:1;
+ u32 hdr:1;
+ u32 dre:1;
+ u32 sharpness:1;
+ u32 dither:1;
+};
+
+static inline bool mdp_target_is_crop(u32 target)
+{
+ return (target == V4L2_SEL_TGT_CROP) ||
+ (target == V4L2_SEL_TGT_CROP_DEFAULT) ||
+ (target == V4L2_SEL_TGT_CROP_BOUNDS);
+}
+
+static inline bool mdp_target_is_compose(u32 target)
+{
+ return (target == V4L2_SEL_TGT_COMPOSE) ||
+ (target == V4L2_SEL_TGT_COMPOSE_DEFAULT) ||
+ (target == V4L2_SEL_TGT_COMPOSE_BOUNDS);
+}
+
+#define MDP_MAX_CAPTURES IMG_MAX_HW_OUTPUTS
+
+#define MDP_VPU_INIT BIT(0)
+#define MDP_M2M_CTX_ERROR BIT(1)
+
+struct mdp_frameparam {
+ struct list_head list;
+ struct mdp_m2m_ctx *ctx;
+ atomic_t state;
+ const struct mdp_limit *limit;
+ u32 type; /* enum mdp_stream_type */
+ u32 frame_no;
+ struct mdp_frame output;
+ struct mdp_frame captures[MDP_MAX_CAPTURES];
+ u32 num_captures;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+};
+
+int mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f);
+const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ struct mdp_frameparam *param,
+ u32 ctx_id);
+enum mdp_ycbcr_profile mdp_map_ycbcr_prof_mplane(struct v4l2_format *f,
+ u32 mdp_color);
+int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
+ const struct v4l2_selection *s, struct mdp_frame *frame);
+int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose, s32 rotation,
+ const struct mdp_limit *limit);
+void mdp_set_src_config(struct img_input *in,
+ struct mdp_frame *frame, struct vb2_buffer *vb);
+void mdp_set_dst_config(struct img_output *out,
+ struct mdp_frame *frame, struct vb2_buffer *vb);
+int mdp_frameparam_init(struct mdp_frameparam *param);
+
+#endif /* __MTK_MDP3_REGS_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
new file mode 100644
index 000000000000..9f5844385c8f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include "mtk-mdp3-vpu.h"
+#include "mtk-mdp3-core.h"
+
+#define MDP_VPU_MESSAGE_TIMEOUT 500U
+#define vpu_alloc_size 0x600000
+
+static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
+{
+ return container_of(vpu, struct mdp_dev, vpu);
+}
+
+static int mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev *vpu)
+{
+ if (vpu->work && vpu->work_addr)
+ return 0;
+
+ vpu->work = dma_alloc_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
+ &vpu->work_addr, GFP_KERNEL);
+
+ if (!vpu->work)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu)
+{
+ if (vpu->work && vpu->work_addr)
+ dma_free_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
+ vpu->work, vpu->work_addr);
+}
+
+static void mdp_vpu_ipi_handle_init_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct mdp_ipi_init_msg *msg = (struct mdp_ipi_init_msg *)data;
+ struct mdp_vpu_dev *vpu =
+ (struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
+
+ if (!vpu->work_size)
+ vpu->work_size = msg->work_size;
+
+ vpu->status = msg->status;
+ complete(&vpu->ipi_acked);
+}
+
+static void mdp_vpu_ipi_handle_deinit_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct mdp_ipi_deinit_msg *msg = (struct mdp_ipi_deinit_msg *)data;
+ struct mdp_vpu_dev *vpu =
+ (struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
+
+ vpu->status = msg->status;
+ complete(&vpu->ipi_acked);
+}
+
+static void mdp_vpu_ipi_handle_frame_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct img_sw_addr *addr = (struct img_sw_addr *)data;
+ struct img_ipi_frameparam *param =
+ (struct img_ipi_frameparam *)(unsigned long)addr->va;
+ struct mdp_vpu_ctx *ctx =
+ (struct mdp_vpu_ctx *)(unsigned long)param->drv_data;
+
+ if (param->state) {
+ struct mdp_dev *mdp = vpu_to_mdp(ctx->vpu_dev);
+
+ dev_err(&mdp->pdev->dev, "VPU MDP failure:%d\n", param->state);
+ }
+ ctx->vpu_dev->status = param->state;
+ complete(&ctx->vpu_dev->ipi_acked);
+}
+
+int mdp_vpu_register(struct mdp_dev *mdp)
+{
+ int err;
+ struct mtk_scp *scp = mdp->scp;
+ struct device *dev = &mdp->pdev->dev;
+
+ err = scp_ipi_register(scp, SCP_IPI_MDP_INIT,
+ mdp_vpu_ipi_handle_init_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_init;
+ }
+ err = scp_ipi_register(scp, SCP_IPI_MDP_DEINIT,
+ mdp_vpu_ipi_handle_deinit_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_deinit;
+ }
+ err = scp_ipi_register(scp, SCP_IPI_MDP_FRAME,
+ mdp_vpu_ipi_handle_frame_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_frame;
+ }
+ return 0;
+
+err_ipi_frame:
+ scp_ipi_unregister(scp, SCP_IPI_MDP_DEINIT);
+err_ipi_deinit:
+ scp_ipi_unregister(scp, SCP_IPI_MDP_INIT);
+err_ipi_init:
+
+ return err;
+}
+
+void mdp_vpu_unregister(struct mdp_dev *mdp)
+{
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_INIT);
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_DEINIT);
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_FRAME);
+}
+
+static int mdp_vpu_sendmsg(struct mdp_vpu_dev *vpu, enum scp_ipi_id id,
+ void *buf, unsigned int len)
+{
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ unsigned int t = MDP_VPU_MESSAGE_TIMEOUT;
+ int ret;
+
+ if (!vpu->scp) {
+ dev_dbg(&mdp->pdev->dev, "vpu scp is NULL");
+ return -EINVAL;
+ }
+ ret = scp_ipi_send(vpu->scp, id, buf, len, 2000);
+
+ if (ret) {
+ dev_err(&mdp->pdev->dev, "scp_ipi_send failed %d\n", ret);
+ return -EPERM;
+ }
+ ret = wait_for_completion_timeout(&vpu->ipi_acked,
+ msecs_to_jiffies(t));
+ if (!ret)
+ ret = -ETIME;
+ else if (vpu->status)
+ ret = -EINVAL;
+ else
+ ret = 0;
+ return ret;
+}
+
+int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
+ struct mutex *lock)
+{
+ struct mdp_ipi_init_msg msg = {
+ .drv_data = (unsigned long)vpu,
+ };
+ size_t mem_size;
+ phys_addr_t pool;
+ const size_t pool_size = sizeof(struct mdp_config_pool);
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ int err;
+
+ init_completion(&vpu->ipi_acked);
+ vpu->scp = scp;
+ vpu->lock = lock;
+ vpu->work_size = 0;
+ err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
+ if (err)
+ goto err_work_size;
+ /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */
+
+ mem_size = vpu_alloc_size;
+ if (mdp_vpu_shared_mem_alloc(vpu)) {
+ dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
+ goto err_mem_alloc;
+ }
+
+ pool = ALIGN((uintptr_t)vpu->work + vpu->work_size, 8);
+ if (pool + pool_size - (uintptr_t)vpu->work > mem_size) {
+ dev_err(&mdp->pdev->dev,
+ "VPU memory insufficient: %zx + %zx > %zx",
+ vpu->work_size, pool_size, mem_size);
+ err = -ENOMEM;
+ goto err_mem_size;
+ }
+
+ dev_dbg(&mdp->pdev->dev,
+ "VPU work:%pK pa:%pad sz:%zx pool:%pa sz:%zx (mem sz:%zx)",
+ vpu->work, &vpu->work_addr, vpu->work_size,
+ &pool, pool_size, mem_size);
+ vpu->pool = (struct mdp_config_pool *)(uintptr_t)pool;
+ msg.work_addr = vpu->work_addr;
+ msg.work_size = vpu->work_size;
+ err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
+ if (err)
+ goto err_work_size;
+
+ memset(vpu->pool, 0, sizeof(*vpu->pool));
+ return 0;
+
+err_work_size:
+ switch (vpu->status) {
+ case -MDP_IPI_EBUSY:
+ err = -EBUSY;
+ break;
+ case -MDP_IPI_ENOMEM:
+ err = -ENOSPC; /* -ENOMEM */
+ break;
+ }
+ return err;
+err_mem_size:
+err_mem_alloc:
+ return err;
+}
+
+int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu)
+{
+ struct mdp_ipi_deinit_msg msg = {
+ .drv_data = (unsigned long)vpu,
+ .work_addr = vpu->work_addr,
+ };
+
+ return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_DEINIT, &msg, sizeof(msg));
+}
+
+static struct img_config *mdp_config_get(struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id, uint32_t *addr)
+{
+ struct img_config *config;
+
+ if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(vpu->lock);
+ vpu->pool->cfg_count[id]++;
+ config = &vpu->pool->configs[id];
+ *addr = vpu->work_addr + ((uintptr_t)config - (uintptr_t)vpu->work);
+ mutex_unlock(vpu->lock);
+
+ return config;
+}
+
+static int mdp_config_put(struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id,
+ const struct img_config *config)
+{
+ int err = 0;
+
+ if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
+ return -EINVAL;
+ if (vpu->lock)
+ mutex_lock(vpu->lock);
+ if (!vpu->pool->cfg_count[id] || config != &vpu->pool->configs[id])
+ err = -EINVAL;
+ else
+ vpu->pool->cfg_count[id]--;
+ if (vpu->lock)
+ mutex_unlock(vpu->lock);
+ return err;
+}
+
+int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id)
+{
+ ctx->config = mdp_config_get(vpu, id, &ctx->inst_addr);
+ if (IS_ERR(ctx->config)) {
+ int err = PTR_ERR(ctx->config);
+
+ ctx->config = NULL;
+ return err;
+ }
+ ctx->config_id = id;
+ ctx->vpu_dev = vpu;
+ return 0;
+}
+
+int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx)
+{
+ int err = mdp_config_put(ctx->vpu_dev, ctx->config_id, ctx->config);
+
+ ctx->config_id = 0;
+ ctx->config = NULL;
+ ctx->inst_addr = 0;
+ return err;
+}
+
+int mdp_vpu_process(struct mdp_vpu_ctx *ctx, struct img_ipi_frameparam *param)
+{
+ struct mdp_vpu_dev *vpu = ctx->vpu_dev;
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ struct img_sw_addr addr;
+
+ if (!ctx->vpu_dev->work || !ctx->vpu_dev->work_addr) {
+ if (mdp_vpu_shared_mem_alloc(vpu)) {
+ dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
+ return -ENOMEM;
+ }
+ }
+ memset((void *)ctx->vpu_dev->work, 0, ctx->vpu_dev->work_size);
+ memset(ctx->config, 0, sizeof(*ctx->config));
+ param->config_data.va = (unsigned long)ctx->config;
+ param->config_data.pa = ctx->inst_addr;
+ param->drv_data = (unsigned long)ctx;
+
+ memcpy((void *)ctx->vpu_dev->work, param, sizeof(*param));
+ addr.pa = ctx->vpu_dev->work_addr;
+ addr.va = (uintptr_t)ctx->vpu_dev->work;
+ return mdp_vpu_sendmsg(ctx->vpu_dev, SCP_IPI_MDP_FRAME,
+ &addr, sizeof(addr));
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h
new file mode 100644
index 000000000000..244b3a32d689
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_VPU_H__
+#define __MTK_MDP3_VPU_H__
+
+#include <linux/platform_device.h>
+#include "mtk-img-ipi.h"
+
+enum mdp_ipi_result {
+ MDP_IPI_SUCCESS = 0,
+ MDP_IPI_ENOMEM = 12,
+ MDP_IPI_EBUSY = 16,
+ MDP_IPI_EINVAL = 22,
+ MDP_IPI_EMINST = 24,
+ MDP_IPI_ERANGE = 34,
+ MDP_IPI_NR_ERRNO,
+
+ MDP_IPI_EOTHER = MDP_IPI_NR_ERRNO,
+ MDP_IPI_PATH_CANT_MERGE,
+ MDP_IPI_OP_FAIL,
+};
+
+struct mdp_ipi_init_msg {
+ u32 status;
+ u64 drv_data;
+ u32 work_addr; /* [in] working buffer address */
+ u32 work_size; /* [in] working buffer size */
+} __packed;
+
+struct mdp_ipi_deinit_msg {
+ u32 status;
+ u64 drv_data;
+ u32 work_addr;
+} __packed;
+
+enum mdp_config_id {
+ MDP_DEV_M2M = 0,
+ MDP_CONFIG_POOL_SIZE /* ALWAYS keep at the end */
+};
+
+struct mdp_config_pool {
+ u64 cfg_count[MDP_CONFIG_POOL_SIZE];
+ struct img_config configs[MDP_CONFIG_POOL_SIZE];
+};
+
+struct mdp_vpu_dev {
+ /* synchronization protect for accessing vpu working buffer info */
+ struct mutex *lock;
+ struct mtk_scp *scp;
+ struct completion ipi_acked;
+ void *work;
+ dma_addr_t work_addr;
+ size_t work_size;
+ struct mdp_config_pool *pool;
+ u32 status;
+};
+
+struct mdp_vpu_ctx {
+ struct mdp_vpu_dev *vpu_dev;
+ u32 config_id;
+ struct img_config *config;
+ u32 inst_addr;
+};
+
+void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu);
+int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
+ struct mutex *lock /* for sync */);
+int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu);
+int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id);
+int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx);
+int mdp_vpu_process(struct mdp_vpu_ctx *vpu, struct img_ipi_frameparam *param);
+
+#endif /* __MTK_MDP3_VPU_H__ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
index 7d194a476713..641f533c417f 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
@@ -227,6 +227,8 @@ static int mtk_vcodec_dec_get_chip_name(void *priv)
return 8195;
else if (of_device_is_compatible(dev->of_node, "mediatek,mt8186-vcodec-dec"))
return 8186;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-dec"))
+ return 8188;
else
return 8173;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index e0b6ae9d6caa..174a6eec2f54 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -478,6 +478,10 @@ static const struct of_device_id mtk_vcodec_match[] = {
.compatible = "mediatek,mt8195-vcodec-dec",
.data = &mtk_lat_sig_core_pdata,
},
+ {
+ .compatible = "mediatek,mt8188-vcodec-dec",
+ .data = &mtk_lat_sig_core_pdata,
+ },
{},
};
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
index ef4584a46417..9acab54fd650 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
@@ -278,6 +278,7 @@ struct vdec_pic_info {
* @hw_id: hardware index used to identify different hardware.
*
* @msg_queue: msg queue used to store lat buffer information.
+ * @q_mutex: vb2_queue mutex.
*/
struct mtk_vcodec_ctx {
enum mtk_instance_type type;
@@ -324,6 +325,8 @@ struct mtk_vcodec_ctx {
int hw_id;
struct vdec_msg_queue msg_queue;
+
+ struct mutex q_mutex;
};
/*
@@ -401,6 +404,7 @@ struct mtk_vcodec_dec_pdata {
* @output_formats: array of supported output formats
* @num_output_formats: number of entries in output_formats
* @core_id: stand for h264 or vp8 encode index
+ * @uses_34bit: whether the encoder uses 34-bit iova
*/
struct mtk_vcodec_enc_pdata {
bool uses_ext;
@@ -411,9 +415,11 @@ struct mtk_vcodec_enc_pdata {
const struct mtk_video_fmt *output_formats;
size_t num_output_formats;
int core_id;
+ bool uses_34bit;
};
#define MTK_ENC_CTX_IS_EXT(ctx) ((ctx)->dev->venc_pdata->uses_ext)
+#define MTK_ENC_IOVA_IS_34BIT(ctx) ((ctx)->dev->venc_pdata->uses_34bit)
/**
* struct mtk_vcodec_dev - driver data
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index 25e816863597..d810a78dde51 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -225,6 +225,8 @@ static int mtk_vcodec_enc_get_chip_name(void *priv)
return 8192;
else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-enc"))
return 8195;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-enc"))
+ return 8188;
else
return 8173;
}
@@ -503,13 +505,13 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
f->fmt.pix.pixelformat = fmt->fourcc;
}
- ret = vidioc_try_fmt_out(ctx, f, fmt);
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt_out(ctx, f, q_data->fmt);
if (ret)
return ret;
- q_data->fmt = fmt;
- q_data->visible_width = f->fmt.pix_mp.width;
- q_data->visible_height = f->fmt.pix_mp.height;
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
@@ -1300,7 +1302,7 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
- ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->m2m_ctx->q_lock = &ctx->q_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
INIT_WORK(&ctx->encode_work, mtk_venc_worker);
@@ -1403,7 +1405,8 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_MPEG_VIDEO_VP8_PROFILE_0, 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
- 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+ ~(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
if (handler->error) {
@@ -1435,7 +1438,7 @@ int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &mtk_venc_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->lock = &ctx->q_mutex;
src_vq->dev = &ctx->dev->plat_dev->dev;
ret = vb2_queue_init(src_vq);
@@ -1449,7 +1452,7 @@ int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &mtk_venc_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->lock = &ctx->q_mutex;
dst_vq->dev = &ctx->dev->plat_dev->dev;
return vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 95e8c29ccc65..9095186d5495 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -130,6 +130,7 @@ static int fops_vcodec_open(struct file *file)
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
init_waitqueue_head(&ctx->queue[0]);
+ mutex_init(&ctx->q_mutex);
ctx->type = MTK_INST_ENCODER;
ret = mtk_vcodec_enc_ctrls_setup(ctx);
@@ -228,7 +229,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int ret;
@@ -272,14 +272,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ if (dev->enc_irq < 0) {
+ ret = dev->enc_irq;
goto err_res;
}
- dev->enc_irq = platform_get_irq(pdev, 0);
irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
@@ -406,6 +404,18 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.core_id = VENC_SYS,
};
+static const struct mtk_vcodec_enc_pdata mt8188_pdata = {
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 50000000,
+ .core_id = VENC_SYS,
+ .uses_34bit = true,
+};
+
static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
.uses_ext = true,
.capture_formats = mtk_video_formats_capture_h264,
@@ -434,6 +444,7 @@ static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc-vp8",
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
+ {.compatible = "mediatek,mt8188-vcodec-enc", .data = &mt8188_pdata},
{.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
{.compatible = "mediatek,mt8195-vcodec-enc", .data = &mt8195_pdata},
{},
diff --git a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
index 4d9b8798dffe..13c4f860fa69 100644
--- a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
@@ -127,6 +127,72 @@ struct venc_h264_vsi {
struct venc_h264_vpu_buf work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
};
+/**
+ * struct venc_h264_vpu_config_ext - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width. Buffer size is stream resolution in pixels aligned to
+ * hardware requirements.
+ * @buf_h: buffer height
+ * @gop_size: group of picture size (idr frame)
+ * @intra_period: intra frame period
+ * @framerate: frame rate in fps
+ * @profile: as specified in standard
+ * @level: as specified in standard
+ * @wfd: WFD mode 1:on, 0:off
+ * @max_qp: max quant parameter
+ * @min_qp: min quant parameter
+ * @reserved: reserved configs
+ */
+struct venc_h264_vpu_config_ext {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 intra_period;
+ u32 framerate;
+ u32 profile;
+ u32 level;
+ u32 wfd;
+ u32 max_qp;
+ u32 min_qp;
+ u32 reserved[8];
+};
+
+/**
+ * struct venc_h264_vpu_buf_34 - Structure for 34-bit buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: 34-bit IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_h264_vpu_buf_34 {
+ u64 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/**
+ * struct venc_h264_vsi_34 - Structure for VPU driver control and info share
+ * Used for 34-bit iova sharing
+ * @config: h264 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ */
+struct venc_h264_vsi_34 {
+ struct venc_h264_vpu_config_ext config;
+ struct venc_h264_vpu_buf_34 work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+};
+
/*
* struct venc_h264_inst - h264 encoder AP driver instance
* @hw_base: h264 encoder hardware register base
@@ -140,6 +206,8 @@ struct venc_h264_vsi {
* @vpu_inst: VPU instance to exchange information between AP and VPU
* @vsi: driver structure allocated by VPU side and shared to AP side for
* control and info share
+ * @vsi_34: driver structure allocated by VPU side and shared to AP side for
+ * control and info share, used for 34-bit iova sharing.
* @ctx: context for v4l2 layer integration
*/
struct venc_h264_inst {
@@ -152,6 +220,7 @@ struct venc_h264_inst {
unsigned int prepend_hdr;
struct venc_vpu_inst vpu_inst;
struct venc_h264_vsi *vsi;
+ struct venc_h264_vsi_34 *vsi_34;
struct mtk_vcodec_ctx *ctx;
};
@@ -244,14 +313,21 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
mtk_vcodec_debug_leave(inst);
}
-static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
+static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
{
+ struct venc_h264_vpu_buf *wb = NULL;
+ struct venc_h264_vpu_buf_34 *wb_34 = NULL;
int i;
+ u32 vpua, wb_size;
int ret = 0;
- struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs;
mtk_vcodec_debug_enter(inst);
+ if (is_34bit)
+ wb_34 = inst->vsi_34->work_bufs;
+ else
+ wb = inst->vsi->work_bufs;
+
for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
/*
* This 'wb' structure is set by VPU side and shared to AP for
@@ -269,13 +345,22 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
* address and do some memcpy access to move to bitstream buffer
* assigned by v4l2 layer.
*/
- inst->work_bufs[i].size = wb[i].size;
+ if (is_34bit) {
+ inst->work_bufs[i].size = wb_34[i].size;
+ vpua = wb_34[i].vpua;
+ wb_size = wb_34[i].size;
+ } else {
+ inst->work_bufs[i].size = wb[i].size;
+ vpua = wb[i].vpua;
+ wb_size = wb[i].size;
+ }
+
if (i == VENC_H264_VPU_WORK_BUF_SKIP_FRAME) {
struct mtk_vcodec_fw *handler;
handler = inst->vpu_inst.ctx->dev->fw_handler;
inst->work_bufs[i].va =
- mtk_vcodec_fw_map_dm_addr(handler, wb[i].vpua);
+ mtk_vcodec_fw_map_dm_addr(handler, vpua);
inst->work_bufs[i].dma_addr = 0;
} else {
ret = mtk_vcodec_mem_alloc(inst->ctx,
@@ -297,12 +382,14 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
handler = inst->vpu_inst.ctx->dev->fw_handler;
tmp_va = mtk_vcodec_fw_map_dm_addr(handler,
- wb[i].vpua);
- memcpy(inst->work_bufs[i].va, tmp_va,
- wb[i].size);
+ vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va, wb_size);
}
}
- wb[i].iova = inst->work_bufs[i].dma_addr;
+ if (is_34bit)
+ wb_34[i].iova = inst->work_bufs[i].dma_addr;
+ else
+ wb[i].iova = inst->work_bufs[i].dma_addr;
mtk_vcodec_debug(inst,
"work_buf[%d] va=0x%p iova=%pad size=%zu",
@@ -342,22 +429,22 @@ static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
return irq_status;
}
-static int h264_frame_type(struct venc_h264_inst *inst)
+static int h264_frame_type(unsigned int frm_cnt, unsigned int gop_size,
+ unsigned int intra_period)
{
- if ((inst->vsi->config.gop_size != 0 &&
- (inst->frm_cnt % inst->vsi->config.gop_size) == 0) ||
- (inst->frm_cnt == 0 && inst->vsi->config.gop_size == 0)) {
+ if ((gop_size != 0 && (frm_cnt % gop_size) == 0) ||
+ (frm_cnt == 0 && gop_size == 0)) {
/* IDR frame */
return VENC_H264_IDR_FRM;
- } else if ((inst->vsi->config.intra_period != 0 &&
- (inst->frm_cnt % inst->vsi->config.intra_period) == 0) ||
- (inst->frm_cnt == 0 && inst->vsi->config.intra_period == 0)) {
+ } else if ((intra_period != 0 && (frm_cnt % intra_period) == 0) ||
+ (frm_cnt == 0 && intra_period == 0)) {
/* I frame */
return VENC_H264_I_FRM;
} else {
return VENC_H264_P_FRM; /* Note: B frames are not supported */
}
}
+
static int h264_encode_sps(struct venc_h264_inst *inst,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
@@ -438,18 +525,32 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
unsigned int *bs_size)
{
int ret = 0;
+ unsigned int gop_size;
+ unsigned int intra_period;
unsigned int irq_status;
struct venc_frame_info frame_info;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
mtk_vcodec_debug_enter(inst);
mtk_vcodec_debug(inst, "frm_cnt = %d\n ", inst->frm_cnt);
+
+ if (MTK_ENC_IOVA_IS_34BIT(ctx)) {
+ gop_size = inst->vsi_34->config.gop_size;
+ intra_period = inst->vsi_34->config.intra_period;
+ } else {
+ gop_size = inst->vsi->config.gop_size;
+ intra_period = inst->vsi->config.intra_period;
+ }
frame_info.frm_count = inst->frm_cnt;
frame_info.skip_frm_count = inst->skip_frm_cnt;
- frame_info.frm_type = h264_frame_type(inst);
+ frame_info.frm_type = h264_frame_type(inst->frm_cnt, gop_size,
+ intra_period);
mtk_vcodec_debug(inst, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
frame_info.frm_count, frame_info.skip_frm_count,
frame_info.frm_type);
- ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf, bs_buf, &frame_info);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME,
+ frm_buf, bs_buf, &frame_info);
if (ret)
return ret;
@@ -517,7 +618,10 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
ret = vpu_enc_init(&inst->vpu_inst);
- inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
+ if (MTK_ENC_IOVA_IS_34BIT(ctx))
+ inst->vsi_34 = (struct venc_h264_vsi_34 *)inst->vpu_inst.vsi;
+ else
+ inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
mtk_vcodec_debug_leave(inst);
@@ -624,31 +728,61 @@ encode_err:
return ret;
}
+static void h264_enc_set_vsi_configs(struct venc_h264_inst *inst,
+ struct venc_enc_param *enc_prm)
+{
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.intra_period = enc_prm->intra_period;
+ inst->vsi->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi->config.wfd = 0;
+}
+
+static void h264_enc_set_vsi_34_configs(struct venc_h264_inst *inst,
+ struct venc_enc_param *enc_prm)
+{
+ inst->vsi_34->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi_34->config.bitrate = enc_prm->bitrate;
+ inst->vsi_34->config.pic_w = enc_prm->width;
+ inst->vsi_34->config.pic_h = enc_prm->height;
+ inst->vsi_34->config.buf_w = enc_prm->buf_width;
+ inst->vsi_34->config.buf_h = enc_prm->buf_height;
+ inst->vsi_34->config.gop_size = enc_prm->gop_size;
+ inst->vsi_34->config.framerate = enc_prm->frm_rate;
+ inst->vsi_34->config.intra_period = enc_prm->intra_period;
+ inst->vsi_34->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi_34->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi_34->config.wfd = 0;
+}
+
static int h264_enc_set_param(void *handle,
enum venc_set_param_type type,
struct venc_enc_param *enc_prm)
{
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+ const bool is_34bit = MTK_ENC_IOVA_IS_34BIT(ctx);
mtk_vcodec_debug(inst, "->type=%d", type);
switch (type) {
case VENC_SET_PARAM_ENC:
- inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
- inst->vsi->config.bitrate = enc_prm->bitrate;
- inst->vsi->config.pic_w = enc_prm->width;
- inst->vsi->config.pic_h = enc_prm->height;
- inst->vsi->config.buf_w = enc_prm->buf_width;
- inst->vsi->config.buf_h = enc_prm->buf_height;
- inst->vsi->config.gop_size = enc_prm->gop_size;
- inst->vsi->config.framerate = enc_prm->frm_rate;
- inst->vsi->config.intra_period = enc_prm->intra_period;
- inst->vsi->config.profile =
- h264_get_profile(inst, enc_prm->h264_profile);
- inst->vsi->config.level =
- h264_get_level(inst, enc_prm->h264_level);
- inst->vsi->config.wfd = 0;
+ if (is_34bit)
+ h264_enc_set_vsi_34_configs(inst, enc_prm);
+ else
+ h264_enc_set_vsi_configs(inst, enc_prm);
ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
if (ret)
break;
@@ -656,7 +790,7 @@ static int h264_enc_set_param(void *handle,
h264_enc_free_work_buf(inst);
inst->work_buf_allocated = false;
}
- ret = h264_enc_alloc_work_buf(inst);
+ ret = h264_enc_alloc_work_buf(inst, is_34bit);
if (ret)
break;
inst->work_buf_allocated = true;
diff --git a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
index 587a2cf15b76..bb16d96a7f57 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
@@ -101,6 +101,30 @@ struct venc_ap_ipi_msg_enc_ext {
};
/**
+ * struct venc_ap_ipi_msg_enc_ext_34 - AP to SCP extended enc cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_ENCODE)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * @bs_mode: bitstream mode for h264
+ * @reserved: for struct padding
+ * @input_addr: input frame buffer 34 bit address
+ * @bs_addr: output bitstream buffer 34 bit address
+ * @bs_size: bitstream buffer size
+ * @data_item: number of items in the data array
+ * @data: data array to store the set parameters
+ */
+struct venc_ap_ipi_msg_enc_ext_34 {
+ u32 msg_id;
+ u32 vpu_inst_addr;
+ u32 bs_mode;
+ u32 reserved;
+ u64 input_addr[3];
+ u64 bs_addr;
+ u32 bs_size;
+ u32 data_item;
+ u32 data[32];
+};
+
+/**
* struct venc_ap_ipi_msg_deinit - AP to VPU deinit cmd structure
* @msg_id: message id (AP_IPIMSG_XXX_ENC_DEINIT)
* @vpu_inst_addr: VPU encoder instance addr
diff --git a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
index d3570c4c177d..09e7eaa25aab 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
@@ -222,10 +222,11 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
return 0;
}
-int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
- struct venc_frm_buf *frm_buf,
- struct mtk_vcodec_mem *bs_buf,
- struct venc_frame_info *frame_info)
+static int vpu_enc_encode_32bits(struct venc_vpu_inst *vpu,
+ unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
{
const bool is_ext = MTK_ENC_CTX_IS_EXT(vpu->ctx);
size_t msg_size = is_ext ?
@@ -267,6 +268,73 @@ int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
return -EINVAL;
}
+ return 0;
+}
+
+static int vpu_enc_encode_34bits(struct venc_vpu_inst *vpu,
+ unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
+{
+ struct venc_ap_ipi_msg_enc_ext_34 out;
+ size_t msg_size = sizeof(struct venc_ap_ipi_msg_enc_ext_34);
+
+ mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_ENCODE;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.bs_mode = bs_mode;
+
+ if (frm_buf) {
+ if ((frm_buf->fb_addr[0].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[1].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[2].dma_addr % 16 == 0)) {
+ out.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
+ out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
+ out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
+ } else {
+ mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ return -EINVAL;
+ }
+ }
+ if (bs_buf) {
+ out.bs_addr = bs_buf->dma_addr;
+ out.bs_size = bs_buf->size;
+ }
+ if (frame_info) {
+ out.data_item = 3;
+ out.data[0] = frame_info->frm_count;
+ out.data[1] = frame_info->skip_frm_count;
+ out.data[2] = frame_info->frm_type;
+ }
+ if (vpu_enc_send_msg(vpu, &out, msg_size)) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
+ bs_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
+{
+ int ret;
+
+ if (MTK_ENC_IOVA_IS_34BIT(vpu->ctx))
+ ret = vpu_enc_encode_34bits(vpu, bs_mode,
+ frm_buf, bs_buf, frame_info);
+ else
+ ret = vpu_enc_encode_32bits(vpu, bs_mode,
+ frm_buf, bs_buf, frame_info);
+
+ if (ret)
+ return ret;
+
mtk_vcodec_debug(vpu, "bs_mode %d state %d size %d key_frm %d <-",
bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index 1ac0a6e91111..5917634889b5 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -15,18 +15,6 @@ config VIDEO_IMX_MIPI_CSIS
Video4Linux2 sub-device driver for the MIPI CSI-2 CSIS receiver
v3.3/v3.6.3 found on some i.MX7 and i.MX8 SoCs.
-config VIDEO_VIU
- tristate "NXP VIU Video Driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && (PPC_MPC512x || COMPILE_TEST) && I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Support for Freescale VIU video driver. This device captures
- video data, or overlays video on DIU frame buffer.
-
- Say Y here if you want to enable VIU device on MPC5121e Rev2+.
- In doubt, say N.
-
# mem2mem drivers
config VIDEO_IMX_PXP
@@ -51,4 +39,5 @@ config VIDEO_MX2_EMMAPRP
memory to memory. Operations include resizing and format
conversion.
+source "drivers/media/platform/nxp/dw100/Kconfig"
source "drivers/media/platform/nxp/imx-jpeg/Kconfig"
diff --git a/drivers/media/platform/nxp/Makefile b/drivers/media/platform/nxp/Makefile
index efc38c6578ce..81ab304ef31c 100644
--- a/drivers/media/platform/nxp/Makefile
+++ b/drivers/media/platform/nxp/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-y += dw100/
obj-y += imx-jpeg/
obj-$(CONFIG_VIDEO_IMX_MIPI_CSIS) += imx-mipi-csis.o
obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
-obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/nxp/dw100/Kconfig b/drivers/media/platform/nxp/dw100/Kconfig
new file mode 100644
index 000000000000..cd4531bb3110
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_DW100
+ tristate "NXP i.MX DW100 dewarper"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_MXC || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ DW100 is a memory-to-memory engine performing geometrical
+ transformation on source images through a programmable dewarping map.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dw100.
diff --git a/drivers/media/platform/nxp/dw100/Makefile b/drivers/media/platform/nxp/dw100/Makefile
new file mode 100644
index 000000000000..49db80589e9a
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+obj-$(CONFIG_VIDEO_DW100) += dw100.o
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
new file mode 100644
index 000000000000..b3b057798ab6
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -0,0 +1,1707 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DW100 Hardware dewarper
+ *
+ * Copyright 2022 NXP
+ * Author: Xavier Roumegue (xavier.roumegue@oss.nxp.com)
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <uapi/linux/dw100.h>
+
+#include "dw100_regs.h"
+
+#define DRV_NAME "dw100"
+
+#define DW100_MIN_W 176u
+#define DW100_MIN_H 144u
+#define DW100_MAX_W 4096u
+#define DW100_MAX_H 3072u
+#define DW100_ALIGN_W 3
+#define DW100_ALIGN_H 3
+
+#define DW100_BLOCK_SIZE 16
+
+#define DW100_DEF_W 640u
+#define DW100_DEF_H 480u
+#define DW100_DEF_LUT_W (DIV_ROUND_UP(DW100_DEF_W, DW100_BLOCK_SIZE) + 1)
+#define DW100_DEF_LUT_H (DIV_ROUND_UP(DW100_DEF_H, DW100_BLOCK_SIZE) + 1)
+
+/*
+ * 16 controls have been reserved for this driver for future extension, but
+ * let's limit the related driver allocation to the effective number of controls
+ * in use.
+ */
+#define DW100_MAX_CTRLS 1
+#define DW100_CTRL_DEWARPING_MAP 0
+
+enum {
+ DW100_QUEUE_SRC = 0,
+ DW100_QUEUE_DST = 1,
+};
+
+enum {
+ DW100_FMT_CAPTURE = BIT(0),
+ DW100_FMT_OUTPUT = BIT(1),
+};
+
+struct dw100_device {
+ struct platform_device *pdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct media_device mdev;
+ /* Video device lock */
+ struct mutex vfd_mutex;
+ void __iomem *mmio;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct dentry *debugfs_root;
+};
+
+struct dw100_q_data {
+ struct v4l2_pix_format_mplane pix_fmt;
+ unsigned int sequence;
+ const struct dw100_fmt *fmt;
+ struct v4l2_rect crop;
+};
+
+struct dw100_ctx {
+ struct v4l2_fh fh;
+ struct dw100_device *dw_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrls[DW100_MAX_CTRLS];
+ /* per context m2m queue lock */
+ struct mutex vq_mutex;
+
+ /* Look Up Table for pixel remapping */
+ unsigned int *map;
+ dma_addr_t map_dma;
+ size_t map_size;
+ unsigned int map_width;
+ unsigned int map_height;
+ bool user_map_is_set;
+
+ /* Source and destination queue data */
+ struct dw100_q_data q_data[2];
+};
+
+static const struct v4l2_frmsize_stepwise dw100_frmsize_stepwise = {
+ .min_width = DW100_MIN_W,
+ .min_height = DW100_MIN_H,
+ .max_width = DW100_MAX_W,
+ .max_height = DW100_MAX_H,
+ .step_width = 1UL << DW100_ALIGN_W,
+ .step_height = 1UL << DW100_ALIGN_H,
+};
+
+static const struct dw100_fmt {
+ u32 fourcc;
+ u32 types;
+ u32 reg_format;
+ bool reg_swap_uv;
+} formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = true,
+ },
+};
+
+static inline int to_dw100_fmt_type(enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return DW100_FMT_OUTPUT;
+ else
+ return DW100_FMT_CAPTURE;
+}
+
+static const struct dw100_fmt *dw100_find_pixel_format(u32 pixel_format,
+ int fmt_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct dw100_fmt *fmt = &formats[i];
+
+ if (fmt->fourcc == pixel_format && fmt->types & fmt_type)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static const struct dw100_fmt *dw100_find_format(struct v4l2_format *f)
+{
+ return dw100_find_pixel_format(f->fmt.pix_mp.pixelformat,
+ to_dw100_fmt_type(f->type));
+}
+
+static inline u32 dw100_read(struct dw100_device *dw_dev, u32 reg)
+{
+ return readl(dw_dev->mmio + reg);
+}
+
+static inline void dw100_write(struct dw100_device *dw_dev, u32 reg, u32 val)
+{
+ writel(val, dw_dev->mmio + reg);
+}
+
+static inline int dw100_dump_regs(struct seq_file *m)
+{
+ struct dw100_device *dw_dev = m->private;
+#define __DECLARE_REG(x) { #x, x }
+ unsigned int i;
+ static const struct reg_desc {
+ const char * const name;
+ unsigned int addr;
+ } dw100_regs[] = {
+ __DECLARE_REG(DW100_DEWARP_ID),
+ __DECLARE_REG(DW100_DEWARP_CTRL),
+ __DECLARE_REG(DW100_MAP_LUT_ADDR),
+ __DECLARE_REG(DW100_MAP_LUT_SIZE),
+ __DECLARE_REG(DW100_MAP_LUT_ADDR2),
+ __DECLARE_REG(DW100_MAP_LUT_SIZE2),
+ __DECLARE_REG(DW100_SRC_IMG_Y_BASE),
+ __DECLARE_REG(DW100_SRC_IMG_UV_BASE),
+ __DECLARE_REG(DW100_SRC_IMG_SIZE),
+ __DECLARE_REG(DW100_SRC_IMG_STRIDE),
+ __DECLARE_REG(DW100_DST_IMG_Y_BASE),
+ __DECLARE_REG(DW100_DST_IMG_UV_BASE),
+ __DECLARE_REG(DW100_DST_IMG_SIZE),
+ __DECLARE_REG(DW100_DST_IMG_STRIDE),
+ __DECLARE_REG(DW100_DST_IMG_Y_SIZE1),
+ __DECLARE_REG(DW100_DST_IMG_UV_SIZE1),
+ __DECLARE_REG(DW100_SRC_IMG_Y_BASE2),
+ __DECLARE_REG(DW100_SRC_IMG_UV_BASE2),
+ __DECLARE_REG(DW100_SRC_IMG_SIZE2),
+ __DECLARE_REG(DW100_SRC_IMG_STRIDE2),
+ __DECLARE_REG(DW100_DST_IMG_Y_BASE2),
+ __DECLARE_REG(DW100_DST_IMG_UV_BASE2),
+ __DECLARE_REG(DW100_DST_IMG_SIZE2),
+ __DECLARE_REG(DW100_DST_IMG_STRIDE2),
+ __DECLARE_REG(DW100_DST_IMG_Y_SIZE2),
+ __DECLARE_REG(DW100_DST_IMG_UV_SIZE2),
+ __DECLARE_REG(DW100_SWAP_CONTROL),
+ __DECLARE_REG(DW100_VERTICAL_SPLIT_LINE),
+ __DECLARE_REG(DW100_HORIZON_SPLIT_LINE),
+ __DECLARE_REG(DW100_SCALE_FACTOR),
+ __DECLARE_REG(DW100_ROI_START),
+ __DECLARE_REG(DW100_BOUNDARY_PIXEL),
+ __DECLARE_REG(DW100_INTERRUPT_STATUS),
+ __DECLARE_REG(DW100_BUS_CTRL),
+ __DECLARE_REG(DW100_BUS_CTRL1),
+ __DECLARE_REG(DW100_BUS_TIME_OUT_CYCLE),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dw100_regs); i++)
+ seq_printf(m, "%s: %#x\n", dw100_regs[i].name,
+ dw100_read(dw_dev, dw100_regs[i].addr));
+
+ return 0;
+}
+
+static inline struct dw100_ctx *dw100_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct dw100_ctx, fh);
+}
+
+static struct dw100_q_data *dw100_get_q_data(struct dw100_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return &ctx->q_data[DW100_QUEUE_SRC];
+ else
+ return &ctx->q_data[DW100_QUEUE_DST];
+}
+
+static u32 dw100_get_n_vertices_from_length(u32 length)
+{
+ return DIV_ROUND_UP(length, DW100_BLOCK_SIZE) + 1;
+}
+
+static u16 dw100_map_convert_to_uq12_4(u32 a)
+{
+ return (u16)((a & 0xfff) << 4);
+}
+
+static u32 dw100_map_format_coordinates(u16 xq, u16 yq)
+{
+ return (u32)((yq << 16) | xq);
+}
+
+static u32 *dw100_get_user_map(struct dw100_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl = ctx->ctrls[DW100_CTRL_DEWARPING_MAP];
+
+ return ctrl->p_cur.p_u32;
+}
+
+/*
+ * Create the dewarp map used by the hardware from the V4L2 control values which
+ * have been initialized with an identity map or set by the application.
+ */
+static int dw100_create_mapping(struct dw100_ctx *ctx)
+{
+ u32 *user_map;
+
+ if (ctx->map)
+ dma_free_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ ctx->map, ctx->map_dma);
+
+ ctx->map = dma_alloc_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ &ctx->map_dma, GFP_KERNEL);
+
+ if (!ctx->map)
+ return -ENOMEM;
+
+ user_map = dw100_get_user_map(ctx);
+ memcpy(ctx->map, user_map, ctx->map_size);
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "%ux%u %s mapping created (d:%pad-c:%p) for stream %ux%u->%ux%u\n",
+ ctx->map_width, ctx->map_height,
+ ctx->user_map_is_set ? "user" : "identity",
+ &ctx->map_dma, ctx->map,
+ ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width,
+ ctx->q_data[DW100_QUEUE_DST].pix_fmt.height,
+ ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width,
+ ctx->q_data[DW100_QUEUE_DST].pix_fmt.height);
+
+ return 0;
+}
+
+static void dw100_destroy_mapping(struct dw100_ctx *ctx)
+{
+ if (ctx->map) {
+ dma_free_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ ctx->map, ctx->map_dma);
+ ctx->map = NULL;
+ }
+}
+
+static int dw100_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw100_ctx *ctx =
+ container_of(ctrl->handler, struct dw100_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP:
+ ctx->user_map_is_set = true;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops dw100_ctrl_ops = {
+ .s_ctrl = dw100_s_ctrl,
+};
+
+/*
+ * Initialize the dewarping map with an identity mapping.
+ *
+ * A 16 pixels cell size grid is mapped on the destination image.
+ * The last cells width/height might be lesser than 16 if the destination image
+ * width/height is not divisible by 16. This dewarping grid map specifies the
+ * source image pixel location (x, y) on each grid intersection point.
+ * Bilinear interpolation is used to compute inner cell points locations.
+ *
+ * The coordinates are saved in UQ12.4 fixed point format.
+ */
+static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, u32 elems,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct dw100_ctx *ctx =
+ container_of(ctrl->handler, struct dw100_ctx, hdl);
+
+ u32 sw, sh, mw, mh, idx;
+ u16 qx, qy, qdx, qdy, qsh, qsw;
+ u32 *map = ctrl->p_cur.p_u32;
+
+ sw = ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width;
+ sh = ctx->q_data[DW100_QUEUE_SRC].pix_fmt.height;
+
+ mw = ctrl->dims[0];
+ mh = ctrl->dims[1];
+
+ qsw = dw100_map_convert_to_uq12_4(sw);
+ qsh = dw100_map_convert_to_uq12_4(sh);
+ qdx = qsw / (mw - 1);
+ qdy = qsh / (mh - 1);
+
+ ctx->map_width = mw;
+ ctx->map_height = mh;
+ ctx->map_size = mh * mw * sizeof(u32);
+
+ for (idx = from_idx; idx < elems; idx++) {
+ qy = min_t(u32, (idx / mw) * qdy, qsh);
+ qx = min_t(u32, (idx % mw) * qdx, qsw);
+ map[idx] = dw100_map_format_coordinates(qx, qy);
+ }
+
+ ctx->user_map_is_set = false;
+}
+
+static const struct v4l2_ctrl_type_ops dw100_ctrl_type_ops = {
+ .init = dw100_ctrl_dewarping_map_init,
+ .validate = v4l2_ctrl_type_op_validate,
+ .log = v4l2_ctrl_type_op_log,
+ .equal = v4l2_ctrl_type_op_equal,
+};
+
+static const struct v4l2_ctrl_config controls[] = {
+ [DW100_CTRL_DEWARPING_MAP] = {
+ .ops = &dw100_ctrl_ops,
+ .type_ops = &dw100_ctrl_type_ops,
+ .id = V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP,
+ .name = "Dewarping Vertex Map",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0xffffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { DW100_DEF_LUT_W, DW100_DEF_LUT_H },
+ },
+};
+
+static int dw100_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format_mplane *format;
+ unsigned int i;
+
+ format = &dw100_get_q_data(ctx, vq->type)->pix_fmt;
+
+ if (*nplanes) {
+ if (*nplanes != format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; ++i) {
+ if (sizes[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ *nplanes = format->num_planes;
+
+ for (i = 0; i < format->num_planes; ++i)
+ sizes[i] = format->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int dw100_buf_prepare(struct vb2_buffer *vb)
+{
+ unsigned int i;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct dw100_device *dw_dev = ctx->dw_dev;
+ const struct v4l2_pix_format_mplane *pix_fmt =
+ &dw100_get_q_data(ctx, vb->vb2_queue->type)->pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(&dw_dev->pdev->dev, "%x field isn't supported\n",
+ vbuf->field);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < pix_fmt->num_planes; i++) {
+ unsigned long size = pix_fmt->plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_dbg(&dw_dev->pdev->dev,
+ "User buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void dw100_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void dw100_return_all_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ return;
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int dw100_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+ struct dw100_q_data *q_data = dw100_get_q_data(ctx, q->type);
+ int ret;
+
+ q_data->sequence = 0;
+
+ ret = dw100_create_mapping(ctx);
+ if (ret)
+ goto err;
+
+ ret = pm_runtime_resume_and_get(&ctx->dw_dev->pdev->dev);
+ if (ret) {
+ dw100_destroy_mapping(ctx);
+ goto err;
+ }
+
+ return 0;
+err:
+ dw100_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void dw100_stop_streaming(struct vb2_queue *q)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+
+ dw100_return_all_buffers(q, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put_sync(&ctx->dw_dev->pdev->dev);
+
+ dw100_destroy_mapping(ctx);
+}
+
+static const struct vb2_ops dw100_qops = {
+ .queue_setup = dw100_queue_setup,
+ .buf_prepare = dw100_buf_prepare,
+ .buf_queue = dw100_buf_queue,
+ .start_streaming = dw100_start_streaming,
+ .stop_streaming = dw100_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int dw100_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct dw100_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &dw100_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->vq_mutex;
+ src_vq->dev = ctx->dw_dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &dw100_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->vq_mutex;
+ dst_vq->dev = ctx->dw_dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int dw100_open(struct file *file)
+{
+ struct dw100_device *dw_dev = video_drvdata(file);
+ struct dw100_ctx *ctx;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_pix_format_mplane *pix_fmt;
+ int ret, i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->vq_mutex);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dw_dev = dw_dev;
+
+ ctx->q_data[DW100_QUEUE_SRC].fmt = &formats[0];
+
+ pix_fmt = &ctx->q_data[DW100_QUEUE_SRC].pix_fmt;
+ pix_fmt->field = V4L2_FIELD_NONE;
+ pix_fmt->colorspace = V4L2_COLORSPACE_REC709;
+ pix_fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_fmt->colorspace);
+ pix_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_fmt->colorspace);
+ pix_fmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, pix_fmt->colorspace,
+ pix_fmt->ycbcr_enc);
+
+ v4l2_fill_pixfmt_mp(pix_fmt, formats[0].fourcc, DW100_DEF_W, DW100_DEF_H);
+
+ ctx->q_data[DW100_QUEUE_SRC].crop.top = 0;
+ ctx->q_data[DW100_QUEUE_SRC].crop.left = 0;
+ ctx->q_data[DW100_QUEUE_SRC].crop.width = DW100_DEF_W;
+ ctx->q_data[DW100_QUEUE_SRC].crop.height = DW100_DEF_H;
+
+ ctx->q_data[DW100_QUEUE_DST] = ctx->q_data[DW100_QUEUE_SRC];
+
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, ARRAY_SIZE(controls));
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(hdl, &controls[i], NULL);
+ if (hdl->error) {
+ dev_err(&ctx->dw_dev->pdev->dev,
+ "Adding control (%d) failed\n", i);
+ ret = hdl->error;
+ goto err;
+ }
+ }
+ ctx->fh.ctrl_handler = hdl;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dw_dev->m2m_dev,
+ ctx, &dw100_m2m_queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ return 0;
+
+err:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_destroy(&ctx->vq_mutex);
+ kfree(ctx);
+
+ return ret;
+}
+
+static int dw100_release(struct file *file)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_destroy(&ctx->vq_mutex);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations dw100_fops = {
+ .owner = THIS_MODULE,
+ .open = dw100_open,
+ .release = dw100_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int dw100_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "DW100 dewarper", sizeof(cap->card));
+
+ return 0;
+}
+
+static int dw100_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ int i, num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].types & to_dw100_fmt_type(f->type)) {
+ if (num == f->index) {
+ f->pixelformat = formats[i].fourcc;
+ return 0;
+ }
+ ++num;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int dw100_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct dw100_fmt *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = dw100_find_pixel_format(fsize->pixel_format,
+ DW100_FMT_OUTPUT | DW100_FMT_CAPTURE);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = dw100_frmsize_stepwise;
+
+ return 0;
+}
+
+static int dw100_g_fmt_vid(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct vb2_queue *vq;
+ struct dw100_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = dw100_get_q_data(ctx, f->type);
+
+ f->fmt.pix_mp = q_data->pix_fmt;
+
+ return 0;
+}
+
+static int dw100_try_fmt(struct file *file, struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ const struct dw100_fmt *fmt;
+
+ fmt = dw100_find_format(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ pix->pixelformat = fmt->fourcc;
+ }
+
+ v4l2_apply_frmsize_constraints(&pix->width, &pix->height,
+ &dw100_frmsize_stepwise);
+
+ v4l2_fill_pixfmt_mp(pix, fmt->fourcc, pix->width, pix->height);
+
+ pix->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ if (pix->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ if (pix->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ if (pix->quantization == V4L2_QUANTIZATION_DEFAULT)
+ pix->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false,
+ pix->colorspace,
+ pix->ycbcr_enc);
+ } else {
+ /*
+ * The DW100 can't perform colorspace conversion, the colorspace
+ * on the capture queue must be identical to the output queue.
+ */
+ const struct dw100_q_data *q_data =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = q_data->pix_fmt.colorspace;
+ pix->xfer_func = q_data->pix_fmt.xfer_func;
+ pix->ycbcr_enc = q_data->pix_fmt.ycbcr_enc;
+ pix->quantization = q_data->pix_fmt.quantization;
+ }
+
+ return 0;
+}
+
+static int dw100_s_fmt(struct dw100_ctx *ctx, struct v4l2_format *f)
+{
+ struct dw100_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = dw100_get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ dev_dbg(&ctx->dw_dev->pdev->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = dw100_find_format(f);
+ q_data->pix_fmt = f->fmt.pix_mp;
+ q_data->crop.top = 0;
+ q_data->crop.left = 0;
+ q_data->crop.width = f->fmt.pix_mp.width;
+ q_data->crop.height = f->fmt.pix_mp.height;
+
+ /* Propagate buffers encoding */
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct dw100_q_data *dst_q_data =
+ dw100_get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ dst_q_data->pix_fmt.colorspace = q_data->pix_fmt.colorspace;
+ dst_q_data->pix_fmt.ycbcr_enc = q_data->pix_fmt.ycbcr_enc;
+ dst_q_data->pix_fmt.quantization = q_data->pix_fmt.quantization;
+ dst_q_data->pix_fmt.xfer_func = q_data->pix_fmt.xfer_func;
+ }
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "Setting format for type %u, wxh: %ux%u, fmt: %p4cc\n",
+ f->type, q_data->pix_fmt.width, q_data->pix_fmt.height,
+ &q_data->pix_fmt.pixelformat);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ int ret;
+ u32 dims[V4L2_CTRL_MAX_DIMS] = {};
+ struct v4l2_ctrl *ctrl = ctx->ctrls[DW100_CTRL_DEWARPING_MAP];
+
+ dims[0] = dw100_get_n_vertices_from_length(q_data->pix_fmt.width);
+ dims[1] = dw100_get_n_vertices_from_length(q_data->pix_fmt.height);
+
+ ret = v4l2_ctrl_modify_dimensions(ctrl, dims);
+
+ if (ret) {
+ dev_err(&ctx->dw_dev->pdev->dev,
+ "Modifying LUT dimensions failed with error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw100_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return dw100_try_fmt(file, f);
+}
+
+static int dw100_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ int ret;
+
+ ret = dw100_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = dw100_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dw100_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return dw100_try_fmt(file, f);
+}
+
+static int dw100_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ int ret;
+
+ ret = dw100_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = dw100_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dw100_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct dw100_q_data *src_q_data;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_q_data->pix_fmt.width;
+ sel->r.height = src_q_data->pix_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = src_q_data->crop.top;
+ sel->r.left = src_q_data->crop.left;
+ sel->r.width = src_q_data->crop.width;
+ sel->r.height = src_q_data->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw100_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct dw100_q_data *src_q_data;
+ u32 qscalex, qscaley, qscale;
+ int x, y, w, h;
+ unsigned int wframe, hframe;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ ">>> Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ sel->type, sel->target,
+ sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ wframe = src_q_data->pix_fmt.width;
+ hframe = src_q_data->pix_fmt.height;
+
+ sel->r.top = clamp_t(int, sel->r.top, 0, hframe - DW100_MIN_H);
+ sel->r.left = clamp_t(int, sel->r.left, 0, wframe - DW100_MIN_W);
+ sel->r.height =
+ clamp(sel->r.height, DW100_MIN_H, hframe - sel->r.top);
+ sel->r.width =
+ clamp(sel->r.width, DW100_MIN_W, wframe - sel->r.left);
+
+ /* UQ16.16 for float operations */
+ qscalex = (sel->r.width << 16) / wframe;
+ qscaley = (sel->r.height << 16) / hframe;
+ y = sel->r.top;
+ x = sel->r.left;
+ if (qscalex == qscaley) {
+ qscale = qscalex;
+ } else {
+ switch (sel->flags) {
+ case 0:
+ qscale = (qscalex + qscaley) / 2;
+ break;
+ case V4L2_SEL_FLAG_GE:
+ qscale = max(qscaley, qscalex);
+ break;
+ case V4L2_SEL_FLAG_LE:
+ qscale = min(qscaley, qscalex);
+ break;
+ case V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ w = (u32)((((u64)wframe << 16) * qscale) >> 32);
+ h = (u32)((((u64)hframe << 16) * qscale) >> 32);
+ x = x + (sel->r.width - w) / 2;
+ y = y + (sel->r.height - h) / 2;
+ x = min(wframe - w, (unsigned int)max(0, x));
+ y = min(hframe - h, (unsigned int)max(0, y));
+
+ sel->r.top = y;
+ sel->r.left = x;
+ sel->r.width = w;
+ sel->r.height = h;
+
+ src_q_data->crop.top = sel->r.top;
+ src_q_data->crop.left = sel->r.left;
+ src_q_data->crop.width = sel->r.width;
+ src_q_data->crop.height = sel->r.height;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "<<< Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ sel->type, sel->target,
+ sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops dw100_ioctl_ops = {
+ .vidioc_querycap = dw100_querycap,
+
+ .vidioc_enum_fmt_vid_cap = dw100_enum_fmt_vid,
+ .vidioc_enum_framesizes = dw100_enum_framesizes,
+ .vidioc_g_fmt_vid_cap_mplane = dw100_g_fmt_vid,
+ .vidioc_try_fmt_vid_cap_mplane = dw100_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = dw100_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = dw100_enum_fmt_vid,
+ .vidioc_g_fmt_vid_out_mplane = dw100_g_fmt_vid,
+ .vidioc_try_fmt_vid_out_mplane = dw100_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = dw100_s_fmt_vid_out,
+
+ .vidioc_g_selection = dw100_g_selection,
+ .vidioc_s_selection = dw100_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void dw100_job_finish(struct dw100_device *dw_dev, bool with_error)
+{
+ struct dw100_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ enum vb2_buffer_state buf_state;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(dw_dev->m2m_dev);
+
+ if (!curr_ctx) {
+ dev_err(&dw_dev->pdev->dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (likely(!with_error))
+ buf_state = VB2_BUF_STATE_DONE;
+ else
+ buf_state = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(src_vb, buf_state);
+ v4l2_m2m_buf_done(dst_vb, buf_state);
+
+ dev_dbg(&dw_dev->pdev->dev, "Finishing transaction with%s error(s)\n",
+ with_error ? "" : "out");
+
+ v4l2_m2m_job_finish(dw_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
+}
+
+static void dw100_hw_reset(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+ val |= DW100_DEWARP_CTRL_ENABLE;
+ val |= DW100_DEWARP_CTRL_SOFT_RESET;
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+ val &= ~DW100_DEWARP_CTRL_SOFT_RESET;
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void _dw100_hw_set_master_bus_enable(struct dw100_device *dw_dev,
+ unsigned int enable)
+{
+ u32 val;
+
+ dev_dbg(&dw_dev->pdev->dev, "%sable master bus\n",
+ enable ? "En" : "Dis");
+
+ val = dw100_read(dw_dev, DW100_BUS_CTRL);
+
+ if (enable)
+ val |= DW100_BUS_CTRL_AXI_MASTER_ENABLE;
+ else
+ val &= ~DW100_BUS_CTRL_AXI_MASTER_ENABLE;
+
+ dw100_write(dw_dev, DW100_BUS_CTRL, val);
+}
+
+static void dw100_hw_master_bus_enable(struct dw100_device *dw_dev)
+{
+ _dw100_hw_set_master_bus_enable(dw_dev, 1);
+}
+
+static void dw100_hw_master_bus_disable(struct dw100_device *dw_dev)
+{
+ _dw100_hw_set_master_bus_enable(dw_dev, 0);
+}
+
+static void dw100_hw_dewarp_start(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ dev_dbg(&dw_dev->pdev->dev, "Starting Hardware CTRL:0x%08x\n", val);
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val | DW100_DEWARP_CTRL_START);
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void dw100_hw_init_ctrl(struct dw100_device *dw_dev)
+{
+ u32 val;
+ /*
+ * Input format YUV422_SP
+ * Output format YUV422_SP
+ * No hardware handshake (SW)
+ * No automatic double src buffering (Single)
+ * No automatic double dst buffering (Single)
+ * No Black Line
+ * Prefetch image pixel traversal
+ */
+
+ val = DW100_DEWARP_CTRL_ENABLE
+ /* Valid only for auto prefetch mode*/
+ | DW100_DEWARP_CTRL_PREFETCH_THRESHOLD(32);
+
+ /*
+ * Calculation mode required to support any scaling factor,
+ * but x4 slower than traversal mode.
+ *
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_TRAVERSAL
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_AUTO
+ *
+ * TODO: Find heuristics requiring calculation mode
+ */
+ val |= DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION;
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void dw100_hw_set_pixel_boundary(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = DW100_BOUNDARY_PIXEL_V(128)
+ | DW100_BOUNDARY_PIXEL_U(128)
+ | DW100_BOUNDARY_PIXEL_Y(0);
+
+ dw100_write(dw_dev, DW100_BOUNDARY_PIXEL, val);
+}
+
+static void dw100_hw_set_scale(struct dw100_device *dw_dev, u8 scale)
+{
+ dev_dbg(&dw_dev->pdev->dev, "Setting scale factor to %u\n", scale);
+
+ dw100_write(dw_dev, DW100_SCALE_FACTOR, scale);
+}
+
+static void dw100_hw_set_roi(struct dw100_device *dw_dev, u32 x, u32 y)
+{
+ u32 val;
+
+ dev_dbg(&dw_dev->pdev->dev, "Setting ROI region to %u.%u\n", x, y);
+
+ val = DW100_ROI_START_X(x) | DW100_ROI_START_Y(y);
+
+ dw100_write(dw_dev, DW100_ROI_START, val);
+}
+
+static void dw100_hw_set_src_crop(struct dw100_device *dw_dev,
+ const struct dw100_q_data *src_q_data,
+ const struct dw100_q_data *dst_q_data)
+{
+ const struct v4l2_rect *rect = &src_q_data->crop;
+ u32 src_scale, qscale, left_scale, top_scale;
+
+ /* HW Scale is UQ1.7 encoded */
+ src_scale = (rect->width << 7) / src_q_data->pix_fmt.width;
+ dw100_hw_set_scale(dw_dev, src_scale);
+
+ qscale = (dst_q_data->pix_fmt.width << 7) / src_q_data->pix_fmt.width;
+
+ left_scale = ((rect->left << 7) * qscale) >> 14;
+ top_scale = ((rect->top << 7) * qscale) >> 14;
+
+ dw100_hw_set_roi(dw_dev, left_scale, top_scale);
+}
+
+static void dw100_hw_set_source(struct dw100_device *dw_dev,
+ const struct dw100_q_data *q_data,
+ struct vb2_buffer *buffer)
+{
+ u32 width, height, stride, fourcc, val;
+ const struct dw100_fmt *fmt = q_data->fmt;
+ dma_addr_t addr_y = vb2_dma_contig_plane_dma_addr(buffer, 0);
+ dma_addr_t addr_uv;
+
+ width = q_data->pix_fmt.width;
+ height = q_data->pix_fmt.height;
+ stride = q_data->pix_fmt.plane_fmt[0].bytesperline;
+ fourcc = q_data->fmt->fourcc;
+
+ if (q_data->pix_fmt.num_planes == 2)
+ addr_uv = vb2_dma_contig_plane_dma_addr(buffer, 1);
+ else
+ addr_uv = addr_y + (stride * height);
+
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW source registers for %ux%u - stride %u, pixfmt: %p4cc, dma:%pad\n",
+ width, height, stride, &fourcc, &addr_y);
+
+ /* Pixel Format */
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ val &= ~DW100_DEWARP_CTRL_INPUT_FORMAT_MASK;
+ val |= DW100_DEWARP_CTRL_INPUT_FORMAT(fmt->reg_format);
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+
+ /* Swap */
+ val = dw100_read(dw_dev, DW100_SWAP_CONTROL);
+
+ val &= ~DW100_SWAP_CONTROL_SRC_MASK;
+ /*
+ * Data swapping is performed only on Y plane for source image.
+ */
+ if (fmt->reg_swap_uv &&
+ fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)
+ val |= DW100_SWAP_CONTROL_SRC(DW100_SWAP_CONTROL_Y
+ (DW100_SWAP_CONTROL_BYTE));
+
+ dw100_write(dw_dev, DW100_SWAP_CONTROL, val);
+
+ /* Image resolution */
+ dw100_write(dw_dev, DW100_SRC_IMG_SIZE,
+ DW100_IMG_SIZE_WIDTH(width) | DW100_IMG_SIZE_HEIGHT(height));
+
+ dw100_write(dw_dev, DW100_SRC_IMG_STRIDE, stride);
+
+ /* Buffers */
+ dw100_write(dw_dev, DW100_SRC_IMG_Y_BASE, DW100_IMG_Y_BASE(addr_y));
+ dw100_write(dw_dev, DW100_SRC_IMG_UV_BASE, DW100_IMG_UV_BASE(addr_uv));
+}
+
+static void dw100_hw_set_destination(struct dw100_device *dw_dev,
+ const struct dw100_q_data *q_data,
+ const struct dw100_fmt *ifmt,
+ struct vb2_buffer *buffer)
+{
+ u32 width, height, stride, fourcc, val, size_y, size_uv;
+ const struct dw100_fmt *fmt = q_data->fmt;
+ dma_addr_t addr_y, addr_uv;
+
+ width = q_data->pix_fmt.width;
+ height = q_data->pix_fmt.height;
+ stride = q_data->pix_fmt.plane_fmt[0].bytesperline;
+ fourcc = fmt->fourcc;
+
+ addr_y = vb2_dma_contig_plane_dma_addr(buffer, 0);
+ size_y = q_data->pix_fmt.plane_fmt[0].sizeimage;
+
+ if (q_data->pix_fmt.num_planes == 2) {
+ addr_uv = vb2_dma_contig_plane_dma_addr(buffer, 1);
+ size_uv = q_data->pix_fmt.plane_fmt[1].sizeimage;
+ } else {
+ addr_uv = addr_y + ALIGN(stride * height, 16);
+ size_uv = size_y;
+ if (fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV420_SP)
+ size_uv /= 2;
+ }
+
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW source registers for %ux%u - stride %u, pixfmt: %p4cc, dma:%pad\n",
+ width, height, stride, &fourcc, &addr_y);
+
+ /* Pixel Format */
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ val &= ~DW100_DEWARP_CTRL_OUTPUT_FORMAT_MASK;
+ val |= DW100_DEWARP_CTRL_OUTPUT_FORMAT(fmt->reg_format);
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+
+ /* Swap */
+ val = dw100_read(dw_dev, DW100_SWAP_CONTROL);
+
+ val &= ~DW100_SWAP_CONTROL_DST_MASK;
+
+ /*
+ * Avoid to swap twice
+ */
+ if (fmt->reg_swap_uv ^
+ (ifmt->reg_swap_uv && ifmt->reg_format !=
+ DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)) {
+ if (fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)
+ val |= DW100_SWAP_CONTROL_DST(DW100_SWAP_CONTROL_Y
+ (DW100_SWAP_CONTROL_BYTE));
+ else
+ val |= DW100_SWAP_CONTROL_DST(DW100_SWAP_CONTROL_UV
+ (DW100_SWAP_CONTROL_BYTE));
+ }
+
+ dw100_write(dw_dev, DW100_SWAP_CONTROL, val);
+
+ /* Image resolution */
+ dw100_write(dw_dev, DW100_DST_IMG_SIZE,
+ DW100_IMG_SIZE_WIDTH(width) | DW100_IMG_SIZE_HEIGHT(height));
+ dw100_write(dw_dev, DW100_DST_IMG_STRIDE, stride);
+ dw100_write(dw_dev, DW100_DST_IMG_Y_BASE, DW100_IMG_Y_BASE(addr_y));
+ dw100_write(dw_dev, DW100_DST_IMG_UV_BASE, DW100_IMG_UV_BASE(addr_uv));
+ dw100_write(dw_dev, DW100_DST_IMG_Y_SIZE1, DW100_DST_IMG_Y_SIZE(size_y));
+ dw100_write(dw_dev, DW100_DST_IMG_UV_SIZE1,
+ DW100_DST_IMG_UV_SIZE(size_uv));
+}
+
+static void dw100_hw_set_mapping(struct dw100_device *dw_dev, dma_addr_t addr,
+ u32 width, u32 height)
+{
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW mapping registers for %ux%u addr:%pad",
+ width, height, &addr);
+
+ dw100_write(dw_dev, DW100_MAP_LUT_ADDR, DW100_MAP_LUT_ADDR_ADDR(addr));
+ dw100_write(dw_dev, DW100_MAP_LUT_SIZE, DW100_MAP_LUT_SIZE_WIDTH(width)
+ | DW100_MAP_LUT_SIZE_HEIGHT(height));
+}
+
+static void dw100_hw_clear_irq(struct dw100_device *dw_dev, unsigned int irq)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS,
+ DW100_INTERRUPT_STATUS_INT_CLEAR(irq));
+}
+
+static void dw100_hw_enable_irq(struct dw100_device *dw_dev)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS,
+ DW100_INTERRUPT_STATUS_INT_ENABLE_MASK);
+}
+
+static void dw100_hw_disable_irq(struct dw100_device *dw_dev)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS, 0);
+}
+
+static u32 dw_hw_get_pending_irqs(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_INTERRUPT_STATUS);
+
+ return DW100_INTERRUPT_STATUS_INT_STATUS(val);
+}
+
+static irqreturn_t dw100_irq_handler(int irq, void *dev_id)
+{
+ struct dw100_device *dw_dev = dev_id;
+ u32 pending_irqs, err_irqs, frame_done_irq;
+ bool with_error = true;
+
+ pending_irqs = dw_hw_get_pending_irqs(dw_dev);
+ frame_done_irq = pending_irqs & DW100_INTERRUPT_STATUS_INT_FRAME_DONE;
+ err_irqs = DW100_INTERRUPT_STATUS_INT_ERR_STATUS(pending_irqs);
+
+ if (frame_done_irq) {
+ dev_dbg(&dw_dev->pdev->dev, "Frame done interrupt\n");
+ with_error = false;
+ err_irqs &= ~DW100_INTERRUPT_STATUS_INT_ERR_STATUS
+ (DW100_INTERRUPT_STATUS_INT_ERR_FRAME_DONE);
+ }
+
+ if (err_irqs)
+ dev_err(&dw_dev->pdev->dev, "Interrupt error: %#x\n", err_irqs);
+
+ dw100_hw_disable_irq(dw_dev);
+ dw100_hw_master_bus_disable(dw_dev);
+ dw100_hw_clear_irq(dw_dev, pending_irqs |
+ DW100_INTERRUPT_STATUS_INT_ERR_TIME_OUT);
+
+ dw100_job_finish(dw_dev, with_error);
+
+ return IRQ_HANDLED;
+}
+
+static void dw100_start(struct dw100_ctx *ctx, struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct dw100_device *dw_dev = ctx->dw_dev;
+
+ out_vb->sequence =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)->sequence++;
+ in_vb->sequence =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)->sequence++;
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "Starting queues %p->%p, sequence %u->%u\n",
+ v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE),
+ v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE),
+ in_vb->sequence, out_vb->sequence);
+
+ v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true);
+
+ /* Now, let's deal with hardware ... */
+ dw100_hw_master_bus_disable(dw_dev);
+ dw100_hw_init_ctrl(dw_dev);
+ dw100_hw_set_pixel_boundary(dw_dev);
+ dw100_hw_set_src_crop(dw_dev, &ctx->q_data[DW100_QUEUE_SRC],
+ &ctx->q_data[DW100_QUEUE_DST]);
+ dw100_hw_set_source(dw_dev, &ctx->q_data[DW100_QUEUE_SRC],
+ &in_vb->vb2_buf);
+ dw100_hw_set_destination(dw_dev, &ctx->q_data[DW100_QUEUE_DST],
+ ctx->q_data[DW100_QUEUE_SRC].fmt,
+ &out_vb->vb2_buf);
+ dw100_hw_set_mapping(dw_dev, ctx->map_dma,
+ ctx->map_width, ctx->map_height);
+ dw100_hw_enable_irq(dw_dev);
+ dw100_hw_dewarp_start(dw_dev);
+
+ /* Enable Bus */
+ dw100_hw_master_bus_enable(dw_dev);
+}
+
+static void dw100_device_run(void *priv)
+{
+ struct dw100_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ dw100_start(ctx, src_buf, dst_buf);
+}
+
+static const struct v4l2_m2m_ops dw100_m2m_ops = {
+ .device_run = dw100_device_run,
+};
+
+static struct video_device *dw100_init_video_device(struct dw100_device *dw_dev)
+{
+ struct video_device *vfd = &dw_dev->vfd;
+
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->fops = &dw100_fops;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vfd->ioctl_ops = &dw100_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dw_dev->v4l2_dev;
+ vfd->lock = &dw_dev->vfd_mutex;
+
+ strscpy(vfd->name, DRV_NAME, sizeof(vfd->name));
+ mutex_init(vfd->lock);
+ video_set_drvdata(vfd, dw_dev);
+
+ return vfd;
+}
+
+static int dw100_dump_regs_show(struct seq_file *m, void *private)
+{
+ struct dw100_device *dw_dev = m->private;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&dw_dev->pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dw100_dump_regs(m);
+
+ pm_runtime_put_sync(&dw_dev->pdev->dev);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(dw100_dump_regs);
+
+static void dw100_debugfs_init(struct dw100_device *dw_dev)
+{
+ dw_dev->debugfs_root =
+ debugfs_create_dir(dev_name(&dw_dev->pdev->dev), NULL);
+
+ debugfs_create_file("dump_regs", 0600, dw_dev->debugfs_root, dw_dev,
+ &dw100_dump_regs_fops);
+}
+
+static void dw100_debugfs_exit(struct dw100_device *dw_dev)
+{
+ debugfs_remove_recursive(dw_dev->debugfs_root);
+}
+
+static int dw100_probe(struct platform_device *pdev)
+{
+ struct dw100_device *dw_dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret, irq;
+
+ dw_dev = devm_kzalloc(&pdev->dev, sizeof(*dw_dev), GFP_KERNEL);
+ if (!dw_dev)
+ return -ENOMEM;
+ dw_dev->pdev = pdev;
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dw_dev->clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get clocks: %d\n", ret);
+ return ret;
+ }
+ dw_dev->num_clks = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dw_dev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dw_dev->mmio))
+ return PTR_ERR(dw_dev->mmio);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ platform_set_drvdata(pdev, dw_dev);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to resume the device: %d\n", ret);
+ goto err_pm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ ret = devm_request_irq(&pdev->dev, irq, dw100_irq_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dw_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dw_dev->v4l2_dev);
+ if (ret)
+ goto err_pm;
+
+ vfd = dw100_init_video_device(dw_dev);
+
+ dw_dev->m2m_dev = v4l2_m2m_init(&dw100_m2m_ops);
+ if (IS_ERR(dw_dev->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dw_dev->m2m_dev);
+ goto err_v4l2;
+ }
+
+ dw_dev->mdev.dev = &pdev->dev;
+ strscpy(dw_dev->mdev.model, "dw100", sizeof(dw_dev->mdev.model));
+ media_device_init(&dw_dev->mdev);
+ dw_dev->v4l2_dev.mdev = &dw_dev->mdev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto err_m2m;
+ }
+
+ ret = v4l2_m2m_register_media_controller(dw_dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init mem2mem media controller\n");
+ goto error_v4l2;
+ }
+
+ ret = media_device_register(&dw_dev->mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register mem2mem media device\n");
+ goto error_m2m_mc;
+ }
+
+ dw100_debugfs_init(dw_dev);
+
+ dev_info(&pdev->dev,
+ "dw100 v4l2 m2m registered as /dev/video%u\n", vfd->num);
+
+ return 0;
+
+error_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dw_dev->m2m_dev);
+error_v4l2:
+ video_unregister_device(vfd);
+err_m2m:
+ media_device_cleanup(&dw_dev->mdev);
+ v4l2_m2m_release(dw_dev->m2m_dev);
+err_v4l2:
+ v4l2_device_unregister(&dw_dev->v4l2_dev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int dw100_remove(struct platform_device *pdev)
+{
+ struct dw100_device *dw_dev = platform_get_drvdata(pdev);
+
+ dw100_debugfs_exit(dw_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ media_device_unregister(&dw_dev->mdev);
+ v4l2_m2m_unregister_media_controller(dw_dev->m2m_dev);
+ media_device_cleanup(&dw_dev->mdev);
+
+ video_unregister_device(&dw_dev->vfd);
+ mutex_destroy(dw_dev->vfd.lock);
+ v4l2_m2m_release(dw_dev->m2m_dev);
+ v4l2_device_unregister(&dw_dev->v4l2_dev);
+
+ return 0;
+}
+
+static int __maybe_unused dw100_runtime_suspend(struct device *dev)
+{
+ struct dw100_device *dw_dev = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(dw_dev->num_clks, dw_dev->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dw100_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct dw100_device *dw_dev = dev_get_drvdata(dev);
+
+ ret = clk_bulk_prepare_enable(dw_dev->num_clks, dw_dev->clks);
+
+ if (ret)
+ return ret;
+
+ dw100_hw_reset(dw_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw100_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw100_runtime_suspend,
+ dw100_runtime_resume, NULL)
+};
+
+static const struct of_device_id dw100_dt_ids[] = {
+ { .compatible = "nxp,imx8mp-dw100", .data = NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(of, dw100_dt_ids);
+
+static struct platform_driver dw100_driver = {
+ .probe = dw100_probe,
+ .remove = dw100_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &dw100_pm,
+ .of_match_table = dw100_dt_ids,
+ },
+};
+
+module_platform_driver(dw100_driver);
+
+MODULE_DESCRIPTION("DW100 Hardware dewarper");
+MODULE_AUTHOR("Xavier Roumegue <Xavier.Roumegue@oss.nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/nxp/dw100/dw100_regs.h b/drivers/media/platform/nxp/dw100/dw100_regs.h
new file mode 100644
index 000000000000..e85dfeff9056
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/dw100_regs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DW100 Hardware dewarper
+ *
+ * Copyright 2022 NXP
+ * Author: Xavier Roumegue (xavier.roumegue@oss.nxp.com)
+ */
+
+#ifndef _DW100_REGS_H_
+#define _DW100_REGS_H_
+
+/* AHB register offset */
+#define DW100_DEWARP_ID 0x00
+#define DW100_DEWARP_CTRL 0x04
+#define DW100_DEWARP_CTRL_ENABLE BIT(0)
+#define DW100_DEWARP_CTRL_START BIT(1)
+#define DW100_DEWARP_CTRL_SOFT_RESET BIT(2)
+#define DW100_DEWARP_CTRL_FORMAT_YUV422_SP 0UL
+#define DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED 1UL
+#define DW100_DEWARP_CTRL_FORMAT_YUV420_SP 2UL
+#define DW100_DEWARP_CTRL_INPUT_FORMAT_MASK GENMASK(5, 4)
+#define DW100_DEWARP_CTRL_INPUT_FORMAT(x) ((x) << 4)
+#define DW100_DEWARP_CTRL_OUTPUT_FORMAT(x) ((x) << 6)
+#define DW100_DEWARP_CTRL_OUTPUT_FORMAT_MASK GENMASK(7, 6)
+#define DW100_DEWARP_CTRL_SRC_AUTO_SHADOW BIT(8)
+#define DW100_DEWARP_CTRL_HW_HANDSHAKE BIT(9)
+#define DW100_DEWARP_CTRL_DST_AUTO_SHADOW BIT(10)
+#define DW100_DEWARP_CTRL_SPLIT_LINE BIT(11)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_MASK GENMASK(17, 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_TRAVERSAL (0UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION (1UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_AUTO (2UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_THRESHOLD_MASK GENMASK(24, 18)
+#define DW100_DEWARP_CTRL_PREFETCH_THRESHOLD(x) ((x) << 18)
+
+#define DW100_MAP_LUT_ADDR 0x08
+#define DW100_MAP_LUT_ADDR_ADDR(addr) (((addr) >> 4) & GENMASK(29, 0))
+#define DW100_MAP_LUT_SIZE 0x0c
+#define DW100_MAP_LUT_SIZE_WIDTH(w) (((w) & GENMASK(10, 0)) << 0)
+#define DW100_MAP_LUT_SIZE_HEIGHT(h) (((h) & GENMASK(10, 0)) << 16)
+#define DW100_SRC_IMG_Y_BASE 0x10
+#define DW100_IMG_Y_BASE(base) (((base) >> 4) & GENMASK(29, 0))
+#define DW100_SRC_IMG_UV_BASE 0x14
+#define DW100_IMG_UV_BASE(base) (((base) >> 4) & GENMASK(29, 0))
+#define DW100_SRC_IMG_SIZE 0x18
+#define DW100_IMG_SIZE_WIDTH(w) (((w) & GENMASK(12, 0)) << 0)
+#define DW100_IMG_SIZE_HEIGHT(h) (((h) & GENMASK(12, 0)) << 16)
+
+#define DW100_SRC_IMG_STRIDE 0x1c
+#define DW100_MAP_LUT_ADDR2 0x20
+#define DW100_MAP_LUT_SIZE2 0x24
+#define DW100_SRC_IMG_Y_BASE2 0x28
+#define DW100_SRC_IMG_UV_BASE2 0x2c
+#define DW100_SRC_IMG_SIZE2 0x30
+#define DW100_SRC_IMG_STRIDE2 0x34
+#define DW100_DST_IMG_Y_BASE 0x38
+#define DW100_DST_IMG_UV_BASE 0x3c
+#define DW100_DST_IMG_SIZE 0x40
+#define DW100_DST_IMG_STRIDE 0x44
+#define DW100_DST_IMG_Y_BASE2 0x48
+#define DW100_DST_IMG_UV_BASE2 0x4c
+#define DW100_DST_IMG_SIZE2 0x50
+#define DW100_DST_IMG_STRIDE2 0x54
+#define DW100_SWAP_CONTROL 0x58
+#define DW100_SWAP_CONTROL_BYTE BIT(0)
+#define DW100_SWAP_CONTROL_SHORT BIT(1)
+#define DW100_SWAP_CONTROL_WORD BIT(2)
+#define DW100_SWAP_CONTROL_LONG BIT(3)
+#define DW100_SWAP_CONTROL_Y(x) (((x) & GENMASK(3, 0)) << 0)
+#define DW100_SWAP_CONTROL_UV(x) (((x) & GENMASK(3, 0)) << 4)
+#define DW100_SWAP_CONTROL_SRC(x) (((x) & GENMASK(7, 0)) << 0)
+#define DW100_SWAP_CONTROL_DST(x) (((x) & GENMASK(7, 0)) << 8)
+#define DW100_SWAP_CONTROL_SRC2(x) (((x) & GENMASK(7, 0)) << 16)
+#define DW100_SWAP_CONTROL_DST2(x) (((x) & GENMASK(7, 0)) << 24)
+#define DW100_SWAP_CONTROL_SRC_MASK GENMASK(7, 0)
+#define DW100_SWAP_CONTROL_DST_MASK GENMASK(15, 8)
+#define DW100_SWAP_CONTROL_SRC2_MASK GENMASK(23, 16)
+#define DW100_SWAP_CONTROL_DST2_MASK GENMASK(31, 24)
+#define DW100_VERTICAL_SPLIT_LINE 0x5c
+#define DW100_HORIZON_SPLIT_LINE 0x60
+#define DW100_SCALE_FACTOR 0x64
+#define DW100_ROI_START 0x68
+#define DW100_ROI_START_X(x) (((x) & GENMASK(12, 0)) << 0)
+#define DW100_ROI_START_Y(y) (((y) & GENMASK(12, 0)) << 16)
+#define DW100_BOUNDARY_PIXEL 0x6c
+#define DW100_BOUNDARY_PIXEL_V(v) (((v) & GENMASK(7, 0)) << 0)
+#define DW100_BOUNDARY_PIXEL_U(u) (((u) & GENMASK(7, 0)) << 8)
+#define DW100_BOUNDARY_PIXEL_Y(y) (((y) & GENMASK(7, 0)) << 16)
+
+#define DW100_INTERRUPT_STATUS 0x70
+#define DW100_INTERRUPT_STATUS_INT_FRAME_DONE BIT(0)
+#define DW100_INTERRUPT_STATUS_INT_ERR_TIME_OUT BIT(1)
+#define DW100_INTERRUPT_STATUS_INT_ERR_AXI_RESP BIT(2)
+#define DW100_INTERRUPT_STATUS_INT_ERR_X BIT(3)
+#define DW100_INTERRUPT_STATUS_INT_ERR_MB_FETCH BIT(4)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME2 BIT(5)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME3 BIT(6)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME_DONE BIT(7)
+#define DW100_INTERRUPT_STATUS_INT_ERR_STATUS(x) (((x) >> 1) & 0x7f)
+#define DW100_INTERRUPT_STATUS_INT_STATUS(x) ((x) & 0xff)
+
+#define DW100_INTERRUPT_STATUS_INT_ENABLE_MASK GENMASK(15, 8)
+#define DW100_INTERRUPT_STATUS_INT_ENABLE(x) (((x) & GENMASK(7, 0)) << 8)
+#define DW100_INTERRUPT_STATUS_FRAME_BUSY BIT(16)
+#define DW100_INTERRUPT_STATUS_INT_CLEAR(x) (((x) & GENMASK(7, 0)) << 24)
+#define DW100_BUS_CTRL 0x74
+#define DW100_BUS_CTRL_AXI_MASTER_ENABLE BIT(31)
+#define DW100_BUS_CTRL1 0x78
+#define DW100_BUS_TIME_OUT_CYCLE 0x7c
+#define DW100_DST_IMG_Y_SIZE1 0x80
+#define DW100_DST_IMG_Y_SIZE(sz) (((sz) >> 4) & GENMASK(29, 0))
+#define DW100_DST_IMG_UV_SIZE(sz) (((sz) >> 4) & GENMASK(29, 0))
+#define DW100_DST_IMG_UV_SIZE1 0x84
+#define DW100_DST_IMG_Y_SIZE2 0x88
+#define DW100_DST_IMG_UV_SIZE2 0x8c
+
+#endif /* _DW100_REGS_H_ */
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index 37cf33c7e6ca..2f6f0c6ae555 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -22,6 +22,7 @@
struct clk;
struct device;
struct rcar_fcp_device;
+struct reset_control;
struct vsp1_drm;
struct vsp1_entity;
@@ -54,6 +55,7 @@ struct vsp1_uif;
#define VSP1_HAS_HGT BIT(8)
#define VSP1_HAS_BRS BIT(9)
#define VSP1_HAS_EXT_DL BIT(10)
+#define VSP1_HAS_NON_ZERO_LBA BIT(11)
struct vsp1_device_info {
u32 version;
@@ -66,6 +68,7 @@ struct vsp1_device_info {
unsigned int uif_count;
unsigned int wpf_count;
unsigned int num_bru_inputs;
+ u8 soc;
bool uapi;
};
@@ -79,6 +82,7 @@ struct vsp1_device {
void __iomem *mmio;
struct rcar_fcp_device *fcp;
struct device *bus_master;
+ struct reset_control *rstc;
struct vsp1_brx *brs;
struct vsp1_brx *bru;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index 0c2507dc03d6..c6f25200982c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -856,6 +856,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
rpf->mem.addr[1] = cfg->mem[1];
rpf->mem.addr[2] = cfg->mem[2];
+ rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
+
vsp1->drm->inputs[rpf_index].crop = cfg->src;
vsp1->drm->inputs[rpf_index].compose = cfg->dst;
vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index 1f73c48eb738..c260d318d298 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -16,6 +16,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/videodev2.h>
#include <media/rcar-fcp.h>
@@ -622,6 +623,7 @@ static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
rcar_fcp_disable(vsp1->fcp);
+ reset_control_assert(vsp1->rstc);
return 0;
}
@@ -631,13 +633,31 @@ static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
int ret;
+ ret = reset_control_deassert(vsp1->rstc);
+ if (ret < 0)
+ return ret;
+
if (vsp1->info) {
+ /*
+ * On R-Car Gen2 and RZ/G1, vsp1 register access after deassert
+ * can cause lock-up. It is a special case and needs some delay
+ * to avoid this lock-up.
+ */
+ if (vsp1->info->gen == 2)
+ udelay(1);
+
ret = vsp1_device_init(vsp1);
if (ret < 0)
- return ret;
+ goto done;
}
- return rcar_fcp_enable(vsp1->fcp);
+ ret = rcar_fcp_enable(vsp1->fcp);
+
+done:
+ if (ret < 0)
+ reset_control_assert(vsp1->rstc);
+
+ return ret;
}
static const struct dev_pm_ops vsp1_pm_ops = {
@@ -768,6 +788,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_V3,
.model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3H,
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU,
.lif_count = 1,
@@ -776,6 +797,17 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.wpf_count = 1,
.num_bru_inputs = 5,
}, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3M,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ }, {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
@@ -798,11 +830,55 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
},
};
+static const struct vsp1_device_info rzg2l_vsp2_device_info = {
+ .version = VI6_IP_VERSION_MODEL_VSPD_RZG2L,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_RZG2L,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL
+ | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 2,
+ .wpf_count = 1,
+};
+
+static const struct vsp1_device_info *vsp1_lookup_info(struct vsp1_device *vsp1)
+{
+ const struct vsp1_device_info *info;
+ unsigned int i;
+ u32 model;
+ u32 soc;
+
+ /*
+ * Try the info stored in match data first for devices that don't have
+ * a version register.
+ */
+ info = of_device_get_match_data(vsp1->dev);
+ if (info) {
+ vsp1->version = VI6_IP_VERSION_VSP_SW | info->version | info->soc;
+ return info;
+ }
+
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+ model = vsp1->version & VI6_IP_VERSION_MODEL_MASK;
+ soc = vsp1->version & VI6_IP_VERSION_SOC_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ info = &vsp1_device_infos[i];
+
+ if (model == info->version && (!info->soc || soc == info->soc))
+ return info;
+ }
+
+ dev_err(vsp1->dev, "unsupported IP version 0x%08x\n", vsp1->version);
+
+ return NULL;
+}
+
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
struct device_node *fcp_node;
- unsigned int i;
int ret;
int irq;
@@ -825,6 +901,11 @@ static int vsp1_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ vsp1->rstc = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(vsp1->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vsp1->rstc),
+ "failed to get reset control\n");
+
/* FCP (optional). */
fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
if (fcp_node) {
@@ -853,19 +934,8 @@ static int vsp1_probe(struct platform_device *pdev)
if (ret < 0)
goto done;
- vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
-
- for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
- if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
- vsp1_device_infos[i].version) {
- vsp1->info = &vsp1_device_infos[i];
- break;
- }
- }
-
+ vsp1->info = vsp1_lookup_info(vsp1);
if (!vsp1->info) {
- dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
- vsp1->version);
vsp1_device_put(vsp1);
ret = -ENXIO;
goto done;
@@ -922,6 +992,7 @@ static int vsp1_remove(struct platform_device *pdev)
static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp1" },
{ .compatible = "renesas,vsp2" },
+ { .compatible = "renesas,r9a07g044-vsp2", .data = &rzg2l_vsp2_device_info },
{ },
};
MODULE_DEVICE_TABLE(of, vsp1_of_match);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
index 6a6857ac9327..186a5730e1e3 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
@@ -107,6 +107,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
case VI6_IP_VERSION_MODEL_VSPDL_GEN3:
case VI6_IP_VERSION_MODEL_VSPD_V3:
+ case VI6_IP_VERSION_MODEL_VSPD_RZG2L:
hbth = 0;
obth = 1500;
lbth = 0;
@@ -130,13 +131,12 @@ static void lif_configure_stream(struct vsp1_entity *entity,
VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
/*
- * On R-Car V3M the LIF0 buffer attribute register has to be set to a
- * non-default value to guarantee proper operation (otherwise artifacts
- * may appear on the output). The value required by the manual is not
- * explained but is likely a buffer size or threshold.
+ * On R-Car V3M and RZ/G2L the LIF0 buffer attribute register has to be
+ * set to a non-default value to guarantee proper operation (otherwise
+ * artifacts may appear on the output). The value required by the
+ * manual is not explained but is likely a buffer size or threshold.
*/
- if ((entity->vsp1->version & VI6_IP_VERSION_MASK) ==
- (VI6_IP_VERSION_MODEL_VSPD_V3 | VI6_IP_VERSION_SOC_V3M))
+ if (vsp1_feature(entity->vsp1, VSP1_HAS_NON_ZERO_LBA))
vsp1_lif_write(lif, dlb, VI6_LIF_LBA,
VI6_LIF_LBA_LBA0 |
(1536 << VI6_LIF_LBA_LBA1_SHIFT));
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index fae7286eb01e..8928f4c6bb55 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -767,6 +767,8 @@
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+/* RZ/G2L SoCs have no version register, So use 0x80 as the model version */
+#define VI6_IP_VERSION_MODEL_VSPD_RZG2L (0x80 << 8)
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
@@ -780,6 +782,10 @@
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
+/* RZ/G2L SoCs have no version register, So use 0x80 for SoC Identification */
+#define VI6_IP_VERSION_SOC_RZG2L (0x80 << 0)
+
+#define VI6_IP_VERSION_VSP_SW (0xfffe << 16) /* SW VSP version */
/* -----------------------------------------------------------------------------
* RPF CLUT Registers
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index e8e0ee5f2277..df1606b49d77 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -305,7 +305,7 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
* @video: the video node
*
* This function completes the current buffer by filling its sequence number,
- * time stamp and payload size, and hands it back to the videobuf core.
+ * time stamp and payload size, and hands it back to the vb2 core.
*
* Return the next queued buffer or NULL if the queue is empty.
*/
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 2f8df74ad0fd..61b25fcf826e 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -816,7 +816,7 @@ static int rga_probe(struct platform_device *pdev)
ret = rga_parse_dt(rga);
if (ret)
- dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return dev_err_probe(&pdev->dev, ret, "Unable to parse OF data\n");
pm_runtime_enable(rga->dev);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 7a058f3e6298..2b0760add092 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
@@ -215,7 +215,7 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
- * @vb: v4l videobuf buffer
+ * @vb: v4l vb2 buffer
* @list: linked list structure for buffer queue
* @addr: precalculated DMA address set
* @index: buffer index for the output DMA engine
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index e3072d69c49f..a7704ff069d6 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
@@ -213,6 +213,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is)
if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
of_node_put(child);
+ of_node_put(i2c_bus);
return ret;
}
index++;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 761341934925..fca5c6405eec 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -323,7 +323,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
}
ctx->sequence++;
/* The MFC returns address of the buffer, now we have to
- * check which videobuf does it correspond to */
+ * check which vb2_buffer does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
@@ -1399,6 +1399,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
/* Deinit MFC if probe had failed */
err_enc_reg:
video_unregister_device(dev->vfd_dec);
+ dev->vfd_dec = NULL;
err_dec_reg:
video_device_release(dev->vfd_enc);
err_enc_alloc:
@@ -1444,8 +1445,6 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
- video_device_release(dev->vfd_enc);
- video_device_release(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
s5p_mfc_unconfigure_dma_memory(dev);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 1d46e113d01d..74d64a20ba5b 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -177,7 +177,7 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
/*
* CSI will lookup the next dma buffer for next frame before the
- * the current frame done IRQ triggered. This is not documented
+ * current frame done IRQ triggered. This is not documented
* but reported by Ondřej Jirman.
* The BSP code has workaround for this too. It skip to mark the
* first buffer as frame done for VB2 and pass the second buffer
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.h b/drivers/media/platform/ti/am437x/am437x-vpfe.h
index 05ee37db0273..f8b4e917b91a 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.h
@@ -267,7 +267,7 @@ struct vpfe_device {
* is different from the image window
*/
struct v4l2_rect crop;
- /* Buffer queue used in video-buf */
+ /* Buffer queue used in vb2 */
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index e136d70b4048..16ae52879a79 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -622,12 +622,12 @@ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
static struct v4l2_mbus_framefmt *
cal_camerarx_get_pad_format(struct cal_camerarx *phy,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&phy->subdev, sd_state, pad);
+ return v4l2_subdev_get_try_format(&phy->subdev, state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &phy->formats[pad];
default:
@@ -653,7 +653,7 @@ static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
}
static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -670,7 +670,7 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
goto out;
}
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
code->which);
code->code = fmt->code;
@@ -690,7 +690,7 @@ out:
}
static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -706,7 +706,7 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
if (cal_rx_pad_is_source(fse->pad)) {
struct v4l2_mbus_framefmt *fmt;
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
fse->which);
if (fse->code != fmt->code) {
@@ -738,7 +738,7 @@ out:
}
static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -746,7 +746,7 @@ static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&phy->mutex);
- fmt = cal_camerarx_get_pad_format(phy, sd_state, format->pad,
+ fmt = cal_camerarx_get_pad_format(phy, state, format->pad,
format->which);
format->format = *fmt;
@@ -756,7 +756,7 @@ static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -766,7 +766,7 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* No transcoding, source and sink formats must match. */
if (cal_rx_pad_is_source(format->pad))
- return cal_camerarx_sd_get_fmt(sd, sd_state, format);
+ return cal_camerarx_sd_get_fmt(sd, state, format);
/*
* Default to the first format if the requested media bus code isn't
@@ -792,12 +792,12 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&phy->mutex);
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
format->which);
*fmt = format->format;
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_FIRST_SOURCE,
format->which);
*fmt = format->format;
@@ -808,10 +808,10 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format format = {
- .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ .which = state ? V4L2_SUBDEV_FORMAT_TRY
: V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = CAL_CAMERARX_PAD_SINK,
.format = {
@@ -826,7 +826,7 @@ static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
},
};
- return cal_camerarx_sd_set_fmt(sd, sd_state, &format);
+ return cal_camerarx_sd_set_fmt(sd, state, &format);
}
static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
@@ -871,6 +871,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
phy->cal = cal;
phy->instance = instance;
+ spin_lock_init(&phy->vc_lock);
mutex_init(&phy->mutex);
phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index 776da0cfcdbe..21e3d0aabf70 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -191,7 +191,7 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
struct cal_ctx *ctx = video_drvdata(file);
const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse;
- int ret, found;
+ int found;
fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
if (!fmtinfo) {
@@ -206,12 +206,13 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
/* check for/find a valid width/height */
- ret = 0;
found = false;
fse.pad = 0;
fse.code = fmtinfo->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
for (fse.index = 0; ; fse.index++) {
+ int ret;
+
ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size,
NULL, &fse);
if (ret)
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 425b4f4b7ed7..56b61c0583cf 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -543,7 +543,22 @@ void cal_ctx_unprepare(struct cal_ctx *ctx)
void cal_ctx_start(struct cal_ctx *ctx)
{
- ctx->sequence = 0;
+ struct cal_camerarx *phy = ctx->phy;
+
+ /*
+ * Reset the frame number & sequence number, but only if the
+ * virtual channel is not already in use.
+ */
+
+ spin_lock(&phy->vc_lock);
+
+ if (phy->vc_enable_count[ctx->vc]++ == 0) {
+ phy->vc_frame_number[ctx->vc] = 0;
+ phy->vc_sequence[ctx->vc] = 0;
+ }
+
+ spin_unlock(&phy->vc_lock);
+
ctx->dma.state = CAL_DMA_RUNNING;
/* Configure the CSI-2, pixel processing and write DMA contexts. */
@@ -563,8 +578,15 @@ void cal_ctx_start(struct cal_ctx *ctx)
void cal_ctx_stop(struct cal_ctx *ctx)
{
+ struct cal_camerarx *phy = ctx->phy;
long timeout;
+ WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
+
+ spin_lock(&phy->vc_lock);
+ phy->vc_enable_count[ctx->vc]--;
+ spin_unlock(&phy->vc_lock);
+
/*
* Request DMA stop and wait until it completes. If completion times
* out, forcefully disable the DMA.
@@ -601,6 +623,34 @@ void cal_ctx_stop(struct cal_ctx *ctx)
* ------------------------------------------------------------------
*/
+/*
+ * Track a sequence number for each virtual channel, which is shared by
+ * all contexts using the same virtual channel. This is done using the
+ * CSI-2 frame number as a base.
+ */
+static void cal_update_seq_number(struct cal_ctx *ctx)
+{
+ struct cal_dev *cal = ctx->cal;
+ struct cal_camerarx *phy = ctx->phy;
+ u16 prev_frame_num, frame_num;
+ u8 vc = ctx->vc;
+
+ frame_num =
+ cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) &
+ 0xffff;
+
+ if (phy->vc_frame_number[vc] != frame_num) {
+ prev_frame_num = phy->vc_frame_number[vc];
+
+ if (prev_frame_num >= frame_num)
+ phy->vc_sequence[vc] += 1;
+ else
+ phy->vc_sequence[vc] += frame_num - prev_frame_num;
+
+ phy->vc_frame_number[vc] = frame_num;
+ }
+}
+
static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
{
spin_lock(&ctx->dma.lock);
@@ -631,6 +681,8 @@ static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
}
spin_unlock(&ctx->dma.lock);
+
+ cal_update_seq_number(ctx);
}
static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
@@ -657,27 +709,62 @@ static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
if (buf) {
buf->vb.vb2_buf.timestamp = ktime_get_ns();
buf->vb.field = ctx->v_fmt.fmt.pix.field;
- buf->vb.sequence = ctx->sequence++;
+ buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc];
+
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
}
+static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end)
+{
+ /*
+ * CAL HW interrupts are inherently racy. If we get both start and end
+ * interrupts, we don't know what has happened: did the DMA for a single
+ * frame start and end, or did one frame end and a new frame start?
+ *
+ * Usually for normal pixel frames we get the interrupts separately. If
+ * we do get both, we have to guess. The assumption in the code below is
+ * that the active vertical area is larger than the blanking vertical
+ * area, and thus it is more likely that we get the end of the old frame
+ * and the start of a new frame.
+ *
+ * However, for embedded data, which is only a few lines high, we always
+ * get both interrupts. Here the assumption is that we get both for the
+ * same frame.
+ */
+ if (ctx->v_fmt.fmt.pix.height < 10) {
+ if (start)
+ cal_irq_wdma_start(ctx);
+
+ if (end)
+ cal_irq_wdma_end(ctx);
+ } else {
+ if (end)
+ cal_irq_wdma_end(ctx);
+
+ if (start)
+ cal_irq_wdma_start(ctx);
+ }
+}
+
static irqreturn_t cal_irq(int irq_cal, void *data)
{
struct cal_dev *cal = data;
- u32 status;
-
- status = cal_read(cal, CAL_HL_IRQSTATUS(0));
- if (status) {
- unsigned int i;
+ u32 status[3];
+ unsigned int i;
- cal_write(cal, CAL_HL_IRQSTATUS(0), status);
+ for (i = 0; i < 3; ++i) {
+ status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i));
+ if (status[i])
+ cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]);
+ }
- if (status & CAL_HL_IRQ_OCPO_ERR_MASK)
+ if (status[0]) {
+ if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK)
dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
- if (status & CAL_HL_IRQ_CIO_MASK(i)) {
+ if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) {
u32 cio_stat = cal_read(cal,
CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
@@ -688,7 +775,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
cio_stat);
}
- if (status & CAL_HL_IRQ_VC_MASK(i)) {
+ if (status[0] & CAL_HL_IRQ_VC_MASK(i)) {
u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i));
dev_err_ratelimited(cal->dev,
@@ -700,32 +787,12 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
}
}
- /* Check which DMA just finished */
- status = cal_read(cal, CAL_HL_IRQSTATUS(1));
- if (status) {
- unsigned int i;
-
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(1), status);
-
- for (i = 0; i < cal->num_contexts; ++i) {
- if (status & CAL_HL_IRQ_WDMA_END_MASK(i))
- cal_irq_wdma_end(cal->ctx[i]);
- }
- }
-
- /* Check which DMA just started */
- status = cal_read(cal, CAL_HL_IRQSTATUS(2));
- if (status) {
- unsigned int i;
-
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(2), status);
+ for (i = 0; i < cal->num_contexts; ++i) {
+ bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i));
+ bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i));
- for (i = 0; i < cal->num_contexts; ++i) {
- if (status & CAL_HL_IRQ_WDMA_START_MASK(i))
- cal_irq_wdma_start(cal->ctx[i]);
- }
+ if (start || end)
+ cal_irq_handle_wdma(cal->ctx[i], start, end);
}
return IRQ_HANDLED;
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index 61409ddced98..80f2c9c73c71 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -180,6 +180,12 @@ struct cal_camerarx {
struct media_pad pads[CAL_CAMERARX_NUM_PADS];
struct v4l2_mbus_framefmt formats[CAL_CAMERARX_NUM_PADS];
+ /* protects the vc_* fields below */
+ spinlock_t vc_lock;
+ u8 vc_enable_count[4];
+ u16 vc_frame_number[4];
+ u32 vc_sequence[4];
+
/*
* Lock for camerarx ops. Protects:
* - formats
@@ -242,7 +248,6 @@ struct cal_ctx {
const struct cal_format_info **active_fmt;
unsigned int num_active_fmt;
- unsigned int sequence;
struct vb2_queue vb_vidq;
u8 dma_ctx;
u8 cport;
diff --git a/drivers/media/platform/ti/davinci/Kconfig b/drivers/media/platform/ti/davinci/Kconfig
index c61e697aeb12..96d4bed7fe9e 100644
--- a/drivers/media/platform/ti/davinci/Kconfig
+++ b/drivers/media/platform/ti/davinci/Kconfig
@@ -32,55 +32,6 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
To compile this driver as a module, choose M here. There will
be two modules called vpif.ko and vpif_capture.ko
-config VIDEO_DM6446_CCDC
- tristate "TI DM6446 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from slave decoders.
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and dm644x_ccdc.ko
-
-config VIDEO_DM355_CCDC
- tristate "TI DM355 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DM355 CCD hw module. DM355 CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from a slave decoders
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and dm355_ccdc.ko
-
-config VIDEO_DM365_ISIF
- tristate "TI DM365 ISIF video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables ISIF hw module. This is the hardware module for
- configuring ISIF in VPFE to capture Raw Bayer RGB data from
- a image sensor or YUV data from a YUV source.
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and isif.ko
-
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/ti/davinci/Makefile b/drivers/media/platform/ti/davinci/Makefile
index 05c45bf371aa..b20a91653162 100644
--- a/drivers/media/platform/ti/davinci/Makefile
+++ b/drivers/media/platform/ti/davinci/Makefile
@@ -8,9 +8,5 @@ obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o
#VPIF Capture driver
obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o
-# Capture: DM6446 and DM355
-obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o vpss.o dm644x_ccdc.o
-obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o vpss.o dm355_ccdc.o
-obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o vpss.o isif.o
obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpss.o vpbe.o vpbe_osd.o \
vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/ti/davinci/vpbe.c b/drivers/media/platform/ti/davinci/vpbe.c
index 5f0aeb744e81..509ecc84624e 100644
--- a/drivers/media/platform/ti/davinci/vpbe.c
+++ b/drivers/media/platform/ti/davinci/vpbe.c
@@ -280,7 +280,7 @@ static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
* vpbe_get_output - Get output
* @vpbe_dev: vpbe device ptr
*
- * return current vpbe output to the the index
+ * return current vpbe output to the index
*/
static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
{
diff --git a/drivers/media/platform/ti/davinci/vpif.h b/drivers/media/platform/ti/davinci/vpif.h
index 651943e3e375..52ecc2562216 100644
--- a/drivers/media/platform/ti/davinci/vpif.h
+++ b/drivers/media/platform/ti/davinci/vpif.h
@@ -322,10 +322,10 @@ static inline void channel1_intr_enable(int enable)
}
/* inline function to set buffer addresses in case of Y/C non mux mode */
-static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch0_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
@@ -334,10 +334,10 @@ static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
}
/* inline function to set buffer addresses in VPIF registers for video data */
-static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch0_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
@@ -345,10 +345,10 @@ static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA);
}
-static inline void ch1_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch1_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA);
@@ -538,10 +538,10 @@ static inline void channel3_clipping_enable(int enable)
}
/* inline function to set buffer addresses in case of Y/C non mux mode */
-static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch2_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
@@ -550,10 +550,10 @@ static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
}
/* inline function to set buffer addresses in VPIF registers for video data */
-static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch2_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
@@ -561,10 +561,10 @@ static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA);
}
-static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch3_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA);
@@ -574,18 +574,18 @@ static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
/* inline function to set buffer addresses in VPIF registers for vbi data */
static inline void ch2_set_vbi_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC);
}
static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC);
regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index b91eec899eb5..580723333fcc 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -632,11 +632,11 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
common = &(ch->common[VPIF_VIDEO_INDEX]);
if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
- common->set_addr = ch1_set_videobuf_addr;
+ common->set_addr = ch1_set_video_buf_addr;
else if (2 == muxmode)
- common->set_addr = ch0_set_videobuf_addr_yc_nmux;
+ common->set_addr = ch0_set_video_buf_addr_yc_nmux;
else
- common->set_addr = ch0_set_videobuf_addr;
+ common->set_addr = ch0_set_video_buf_addr;
}
/**
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.h b/drivers/media/platform/ti/davinci/vpif_capture.h
index d5951f61df47..6191056500cf 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.h
+++ b/drivers/media/platform/ti/davinci/vpif_capture.h
@@ -50,7 +50,7 @@ struct common_obj {
struct vpif_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Buffer queue used in video-buf */
+ /* Buffer queue used in vb2 */
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c
index 5d524acc995d..b2df81603f62 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.c
+++ b/drivers/media/platform/ti/davinci/vpif_display.c
@@ -563,12 +563,12 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
if (VPIF_CHANNEL3_VIDEO == ch->channel_id) {
- common->set_addr = ch3_set_videobuf_addr;
+ common->set_addr = ch3_set_video_buf_addr;
} else {
if (2 == muxmode)
- common->set_addr = ch2_set_videobuf_addr_yc_nmux;
+ common->set_addr = ch2_set_video_buf_addr_yc_nmux;
else
- common->set_addr = ch2_set_videobuf_addr;
+ common->set_addr = ch2_set_video_buf_addr;
}
}
diff --git a/drivers/media/platform/ti/davinci/vpif_display.h b/drivers/media/platform/ti/davinci/vpif_display.h
index f27474e0fc36..dae20053dd73 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.h
+++ b/drivers/media/platform/ti/davinci/vpif_display.h
@@ -64,11 +64,11 @@ struct common_obj {
struct vpif_disp_buffer *next_frm; /* Pointer pointing to next
* vb2_buffer */
struct v4l2_format fmt; /* Used to store the format */
- struct vb2_queue buffer_queue; /* Buffer queue used in
- * video-buf */
+ struct vb2_queue buffer_queue; /* Buffer queue used in vb2 */
struct list_head dma_queue; /* Queue of filled frames */
- spinlock_t irqlock; /* Used in video-buf */
+ spinlock_t irqlock; /* Used for video buffer
+ * handling */
/* channel specific parameters */
struct mutex lock; /* lock used to access this
diff --git a/drivers/media/platform/ti/omap/omap_voutlib.c b/drivers/media/platform/ti/omap/omap_voutlib.c
index fdea2309ee37..0ac46458e41c 100644
--- a/drivers/media/platform/ti/omap/omap_voutlib.c
+++ b/drivers/media/platform/ti/omap/omap_voutlib.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL_GPL(omap_vout_try_window);
/* Given a new render window in new_win, adjust the window to the
* nearest supported configuration. The image cropping window in crop
* will also be adjusted if necessary. Preference is given to keeping the
- * the window as close to the requested configuration as possible. If
+ * window as close to the requested configuration as possible. If
* successful, new_win, vout->win, and crop are updated.
* Returns zero if successful, or -EINVAL if the requested preview window is
* impossible and cannot reasonably be adjusted.
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index d251736eb420..a6052df9bb19 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1528,7 +1528,7 @@ void omap3isp_print_status(struct isp_device *isp)
* To solve this problem power management support is split into prepare/complete
* and suspend/resume operations. The pipelines are stopped in prepare() and the
* ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in
- * resume(), and the the pipelines are restarted in complete().
+ * resume(), and the pipelines are restarted in complete().
*
* TODO: PM dependencies between the ISP and sensors are not modelled explicitly
* yet.
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index d7059180e80e..cc9a97d5d505 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -1071,7 +1071,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
* processing might be possible but requires more testing.
*
* Stream start must be delayed until buffers are available at both the input
- * and output. The pipeline must be started in the videobuf queue callback with
+ * and output. The pipeline must be started in the vb2 queue callback with
* the buffers queue spinlock held. The modules subdev set stream operation must
* not sleep.
*/
diff --git a/drivers/staging/media/hantro/Kconfig b/drivers/media/platform/verisilicon/Kconfig
index 0172a6822ec2..e65b836b9d78 100644
--- a/drivers/staging/media/hantro/Kconfig
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -1,7 +1,11 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Verisilicon media platform drivers"
+
config VIDEO_HANTRO
tristate "Hantro VPU driver"
depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
+ depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV
select MEDIA_CONTROLLER
select MEDIA_CONTROLLER_REQUEST_API
diff --git a/drivers/staging/media/hantro/Makefile b/drivers/media/platform/verisilicon/Makefile
index ebd5ede7bef7..ebd5ede7bef7 100644
--- a/drivers/staging/media/hantro/Makefile
+++ b/drivers/media/platform/verisilicon/Makefile
diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 2989ebc631cc..2989ebc631cc 100644
--- a/drivers/staging/media/hantro/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 2036f72eeb4a..2036f72eeb4a 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
diff --git a/drivers/staging/media/hantro/hantro_g1.c b/drivers/media/platform/verisilicon/hantro_g1.c
index 0ab1cee62218..0ab1cee62218 100644
--- a/drivers/staging/media/hantro/hantro_g1.c
+++ b/drivers/media/platform/verisilicon/hantro_g1.c
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
index 9de7f05eff2a..9de7f05eff2a 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
index 9aea331e1a3c..9aea331e1a3c 100644
--- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/media/platform/verisilicon/hantro_g1_regs.h
index c623b3b0be18..c623b3b0be18 100644
--- a/drivers/staging/media/hantro/hantro_g1_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_g1_regs.h
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
index 851eb67f19f5..851eb67f19f5 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
diff --git a/drivers/staging/media/hantro/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
index ee5f14c5f8f2..ee5f14c5f8f2 100644
--- a/drivers/staging/media/hantro/hantro_g2.c
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index 233ecd863d5f..233ecd863d5f 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
diff --git a/drivers/staging/media/hantro/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h
index 82606783591a..82606783591a 100644
--- a/drivers/staging/media/hantro/hantro_g2_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h
diff --git a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
index 6fc4b555517f..6fc4b555517f 100644
--- a/drivers/staging/media/hantro/hantro_g2_vp9_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
index 12d69503d6ba..12d69503d6ba 100644
--- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c
+++ b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/media/platform/verisilicon/hantro_h1_regs.h
index 30e7e7b920b5..30e7e7b920b5 100644
--- a/drivers/staging/media/hantro/hantro_h1_regs.h
+++ b/drivers/media/platform/verisilicon/hantro_h1_regs.h
diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/media/platform/verisilicon/hantro_h264.c
index 4e9a0ecf5c13..4e9a0ecf5c13 100644
--- a/drivers/staging/media/hantro/hantro_h264.c
+++ b/drivers/media/platform/verisilicon/hantro_h264.c
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
index b990bc98164c..b990bc98164c 100644
--- a/drivers/staging/media/hantro/hantro_hevc.c
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
index e83f0c523a30..e83f0c523a30 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
diff --git a/drivers/staging/media/hantro/hantro_jpeg.c b/drivers/media/platform/verisilicon/hantro_jpeg.c
index d07b1b449b61..d07b1b449b61 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.c
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.c
diff --git a/drivers/staging/media/hantro/hantro_jpeg.h b/drivers/media/platform/verisilicon/hantro_jpeg.h
index 0b49d0b82caa..0b49d0b82caa 100644
--- a/drivers/staging/media/hantro/hantro_jpeg.h
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.h
diff --git a/drivers/staging/media/hantro/hantro_mpeg2.c b/drivers/media/platform/verisilicon/hantro_mpeg2.c
index 04e545eb0a83..04e545eb0a83 100644
--- a/drivers/staging/media/hantro/hantro_mpeg2.c
+++ b/drivers/media/platform/verisilicon/hantro_mpeg2.c
diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
index a0928c508434..a0928c508434 100644
--- a/drivers/staging/media/hantro/hantro_postproc.c
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 2c7a805289e7..2c7a805289e7 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
diff --git a/drivers/staging/media/hantro/hantro_v4l2.h b/drivers/media/platform/verisilicon/hantro_v4l2.h
index 64f6f57e9d7a..64f6f57e9d7a 100644
--- a/drivers/staging/media/hantro/hantro_v4l2.h
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.h
diff --git a/drivers/staging/media/hantro/hantro_vp8.c b/drivers/media/platform/verisilicon/hantro_vp8.c
index 381bc1d3bfda..381bc1d3bfda 100644
--- a/drivers/staging/media/hantro/hantro_vp8.c
+++ b/drivers/media/platform/verisilicon/hantro_vp8.c
diff --git a/drivers/staging/media/hantro/hantro_vp9.c b/drivers/media/platform/verisilicon/hantro_vp9.c
index 566cd376c097..566cd376c097 100644
--- a/drivers/staging/media/hantro/hantro_vp9.c
+++ b/drivers/media/platform/verisilicon/hantro_vp9.c
diff --git a/drivers/staging/media/hantro/hantro_vp9.h b/drivers/media/platform/verisilicon/hantro_vp9.h
index 26b69275f098..26b69275f098 100644
--- a/drivers/staging/media/hantro/hantro_vp9.h
+++ b/drivers/media/platform/verisilicon/hantro_vp9.h
diff --git a/drivers/staging/media/hantro/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
index 77f574fdfa77..77f574fdfa77 100644
--- a/drivers/staging/media/hantro/imx8m_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_h264_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
index 46c1a83bcc4e..46c1a83bcc4e 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_h264_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
index 8395c4d48dd0..8395c4d48dd0 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_jpeg_enc.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
index b66737fab46b..b66737fab46b 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_mpeg2_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
index d079075448c9..d079075448c9 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_hw_vp8_dec.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
diff --git a/drivers/staging/media/hantro/rockchip_vpu2_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
index 49e40889545b..49e40889545b 100644
--- a/drivers/staging/media/hantro/rockchip_vpu2_regs.h
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
diff --git a/drivers/staging/media/hantro/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
index 8de6fd2e8eef..8de6fd2e8eef 100644
--- a/drivers/staging/media/hantro/rockchip_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
diff --git a/drivers/staging/media/hantro/sama5d4_vdec_hw.c b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
index b205e2db5b04..b205e2db5b04 100644
--- a/drivers/staging/media/hantro/sama5d4_vdec_hw.c
+++ b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
diff --git a/drivers/staging/media/hantro/sunxi_vpu_hw.c b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
index 02ce8b064a8f..02ce8b064a8f 100644
--- a/drivers/staging/media/hantro/sunxi_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index cf8e892c47f0..29b53febc2e7 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -188,6 +188,7 @@ static const u32 xcsi2dt_mbus_lut[][2] = {
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_Y12_1X12 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SRGGB16_1X16 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SBGGR16_1X16 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SGBRG16_1X16 },
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index a0073122798f..5b214bf7f93a 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -40,6 +40,8 @@ static const struct xvip_video_format xvip_video_formats[] = {
1, V4L2_PIX_FMT_SGBRG8 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
1, V4L2_PIX_FMT_SBGGR8 },
+ { XVIP_VF_MONO_SENSOR, 12, "mono", MEDIA_BUS_FMT_Y12_1X12,
+ 2, V4L2_PIX_FMT_Y12 },
};
/**
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index f34f8b077e03..0a16c218a50a 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -471,7 +471,7 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
{
struct device_node *ports;
struct device_node *port;
- int ret;
+ int ret = 0;
ports = of_get_child_by_name(xdev->dev->of_node, "ports");
if (ports == NULL) {
@@ -481,13 +481,14 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
for_each_child_of_node(ports, port) {
ret = xvip_graph_dma_init_one(xdev, port);
- if (ret < 0) {
+ if (ret) {
of_node_put(port);
- return ret;
+ break;
}
}
- return 0;
+ of_node_put(ports);
+ return ret;
}
static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 877a24e5c577..abda40e81612 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -487,7 +487,7 @@ errfr:
return ret;
}
-static int tea5764_i2c_remove(struct i2c_client *client)
+static void tea5764_i2c_remove(struct i2c_client *client)
{
struct tea5764_device *radio = i2c_get_clientdata(client);
@@ -499,7 +499,6 @@ static int tea5764_i2c_remove(struct i2c_client *client)
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio);
}
- return 0;
}
/* I2C subsystem interface */
diff --git a/drivers/media/radio/saa7706h.c b/drivers/media/radio/saa7706h.c
index adb66f869dd2..f9e990a9c3ef 100644
--- a/drivers/media/radio/saa7706h.c
+++ b/drivers/media/radio/saa7706h.c
@@ -384,7 +384,7 @@ err:
return err;
}
-static int saa7706h_remove(struct i2c_client *client)
+static void saa7706h_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7706h_state *state = to_state(sd);
@@ -393,7 +393,6 @@ static int saa7706h_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(to_state(sd));
- return 0;
}
static const struct i2c_device_id saa7706h_id[] = {
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 59b3d77e282d..a6ad926c2b4e 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -461,7 +461,7 @@ err_initial:
/*
* si470x_i2c_remove - remove the device
*/
-static int si470x_i2c_remove(struct i2c_client *client)
+static void si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
@@ -472,7 +472,6 @@ static int si470x_i2c_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&radio->hdl);
v4l2_device_unregister(&radio->v4l2_dev);
- return 0;
}
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index adbf43ff6a21..2aec642133a1 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -1623,7 +1623,7 @@ exit:
}
/* si4713_remove - remove the device */
-static int si4713_remove(struct i2c_client *client)
+static void si4713_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct si4713_device *sdev = to_si4713_device(sd);
@@ -1635,8 +1635,6 @@ static int si4713_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
-
- return 0;
}
/* si4713_i2c_driver - i2c driver interface */
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index d8810492db4f..7b0870a9785b 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -165,13 +165,12 @@ static int tef6862_probe(struct i2c_client *client,
return 0;
}
-static int tef6862_remove(struct i2c_client *client)
+static void tef6862_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_state(sd));
- return 0;
}
static const struct i2c_device_id tef6862_id[] = {
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0834d5f866fd..39d2b03e2631 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
struct device *dev = ir->dev;
- char *data;
-
- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s: memory allocation failed!", __func__);
- return;
- }
+ char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, 3000);
+ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
+ GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
+ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 4, USB_TYPE_VENDOR,
+ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 2, USB_TYPE_VENDOR,
+ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
@@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
- kfree(data);
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
diff --git a/drivers/media/test-drivers/vidtv/vidtv_demod.c b/drivers/media/test-drivers/vidtv/vidtv_demod.c
index b7823d97b30d..e7959ab1add8 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_demod.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_demod.c
@@ -438,13 +438,11 @@ static int vidtv_demod_i2c_probe(struct i2c_client *client,
return 0;
}
-static int vidtv_demod_i2c_remove(struct i2c_client *client)
+static void vidtv_demod_i2c_remove(struct i2c_client *client)
{
struct vidtv_demod_state *state = i2c_get_clientdata(client);
kfree(state);
-
- return 0;
}
static struct i2c_driver vidtv_demod_i2c_driver = {
diff --git a/drivers/media/test-drivers/vidtv/vidtv_tuner.c b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
index 14b6bc902ee1..aabc97ed736b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_tuner.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_tuner.c
@@ -414,13 +414,11 @@ static int vidtv_tuner_i2c_probe(struct i2c_client *client,
return 0;
}
-static int vidtv_tuner_i2c_remove(struct i2c_client *client)
+static void vidtv_tuner_i2c_remove(struct i2c_client *client)
{
struct vidtv_tuner_dev *tuner_dev = i2c_get_clientdata(client);
kfree(tuner_dev);
-
- return 0;
}
static struct i2c_driver vidtv_tuner_i2c_driver = {
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 47575490e74a..7964426bf2f7 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -2,7 +2,7 @@
/*
* A virtual v4l2-mem2mem example device.
*
- * This is a virtual device driver for testing mem-to-mem videobuf framework.
+ * This is a virtual device driver for testing mem-to-mem vb2 framework.
* It simulates a device that uses memory buffers for both source and
* destination, processes the data and issues an "irq" (simulated by a delayed
* workqueue).
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 176b72cb143b..bfcfb3515901 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -35,7 +35,9 @@
#define MAX_HEIGHT 2160
/* The minimum image width/height */
#define MIN_WIDTH 16
-#define MIN_HEIGHT 16
+#define MIN_HEIGHT MIN_WIDTH
+/* Pixel Array control divider */
+#define PIXEL_ARRAY_DIV MIN_WIDTH
/* The data_offset of plane 0 for the multiplanar formats */
#define PLANE0_DATA_OFFSET 128
@@ -227,6 +229,7 @@ struct vivid_dev {
struct v4l2_ctrl *bitmask;
struct v4l2_ctrl *int_menu;
struct v4l2_ctrl *ro_int32;
+ struct v4l2_ctrl *pixel_array;
struct v4l2_ctrl *test_pattern;
struct v4l2_ctrl *colorspace;
struct v4l2_ctrl *rgb_range_cap;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index a78d676575bc..92b1a7598470 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -35,6 +35,7 @@
#define VIVID_CID_AREA (VIVID_CID_CUSTOM_BASE + 11)
#define VIVID_CID_RO_INTEGER (VIVID_CID_CUSTOM_BASE + 12)
#define VIVID_CID_U32_DYN_ARRAY (VIVID_CID_CUSTOM_BASE + 13)
+#define VIVID_CID_U8_PIXEL_ARRAY (VIVID_CID_CUSTOM_BASE + 14)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -228,6 +229,18 @@ static const struct v4l2_ctrl_config vivid_ctrl_u8_4d_array = {
.dims = { 2, 3, 4, 5 },
};
+static const struct v4l2_ctrl_config vivid_ctrl_u8_pixel_array = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_U8_PIXEL_ARRAY,
+ .name = "U8 Pixel Array",
+ .type = V4L2_CTRL_TYPE_U8,
+ .def = 0x80,
+ .min = 0x00,
+ .max = 0xff,
+ .step = 1,
+ .dims = { 640 / PIXEL_ARRAY_DIV, 360 / PIXEL_ARRAY_DIV },
+};
+
static const char * const vivid_ctrl_menu_strings[] = {
"Menu Item 0 (Skipped)",
"Menu Item 1",
@@ -1642,6 +1655,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_dyn_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_4d_array, NULL);
+ dev->pixel_array = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u8_pixel_array, NULL);
if (dev->has_vid_cap) {
/* Image Processing Controls */
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index b9caa4b26209..86b158eeb2d8 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -381,6 +381,7 @@ static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
{
struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
+ u32 dims[V4L2_CTRL_MAX_DIMS] = {};
unsigned size;
u64 pixelclock;
@@ -459,6 +460,9 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
tpg_update_mv_step(&dev->tpg);
+ dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV);
+ dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV);
+ v4l2_ctrl_modify_dimensions(dev->pixel_array, dims);
}
/* Map the field to something that is valid for the current input */
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index a3a8d051dc6c..61ae884ea59a 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -706,7 +706,7 @@ err:
return ret;
}
-static int e4000_remove(struct i2c_client *client)
+static void e4000_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct e4000_dev *dev = container_of(sd, struct e4000_dev, sd);
@@ -717,8 +717,6 @@ static int e4000_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id e4000_id_table[] = {
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 1b5961bdf2d5..f30932e1a0f3 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -588,7 +588,7 @@ err:
return ret;
}
-static int fc2580_remove(struct i2c_client *client)
+static void fc2580_remove(struct i2c_client *client)
{
struct fc2580_dev *dev = i2c_get_clientdata(client);
@@ -598,7 +598,6 @@ static int fc2580_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(&dev->hdl);
#endif
kfree(dev);
- return 0;
}
static const struct i2c_device_id fc2580_id_table[] = {
diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
index 8647c50b66e5..e32e3e9daa15 100644
--- a/drivers/media/tuners/m88rs6000t.c
+++ b/drivers/media/tuners/m88rs6000t.c
@@ -697,7 +697,7 @@ err:
return ret;
}
-static int m88rs6000t_remove(struct i2c_client *client)
+static void m88rs6000t_remove(struct i2c_client *client)
{
struct m88rs6000t_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->cfg.fe;
@@ -707,8 +707,6 @@ static int m88rs6000t_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id m88rs6000t_id[] = {
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 204e6186bf71..322c806228a5 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -509,11 +509,9 @@ err:
return ret;
}
-static int mt2060_remove(struct i2c_client *client)
+static void mt2060_remove(struct i2c_client *client)
{
dev_dbg(&client->dev, "\n");
-
- return 0;
}
static const struct i2c_device_id mt2060_id_table[] = {
diff --git a/drivers/media/tuners/mxl301rf.c b/drivers/media/tuners/mxl301rf.c
index c628435a1b06..6422056185a9 100644
--- a/drivers/media/tuners/mxl301rf.c
+++ b/drivers/media/tuners/mxl301rf.c
@@ -307,14 +307,13 @@ static int mxl301rf_probe(struct i2c_client *client,
return 0;
}
-static int mxl301rf_remove(struct i2c_client *client)
+static void mxl301rf_remove(struct i2c_client *client)
{
struct mxl301rf_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
state->cfg.fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
diff --git a/drivers/media/tuners/qm1d1b0004.c b/drivers/media/tuners/qm1d1b0004.c
index 008ad870c00f..9cba0893207c 100644
--- a/drivers/media/tuners/qm1d1b0004.c
+++ b/drivers/media/tuners/qm1d1b0004.c
@@ -232,14 +232,13 @@ err_mem:
return ret;
}
-static int qm1d1b0004_remove(struct i2c_client *client)
+static void qm1d1b0004_remove(struct i2c_client *client)
{
struct dvb_frontend *fe;
fe = i2c_get_clientdata(client);
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
- return 0;
}
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 53aa2558f71e..2d60bf501fb5 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -424,14 +424,13 @@ static int qm1d1c0042_probe(struct i2c_client *client,
return 0;
}
-static int qm1d1c0042_remove(struct i2c_client *client)
+static void qm1d1c0042_remove(struct i2c_client *client)
{
struct qm1d1c0042_state *state;
state = cfg_to_state(i2c_get_clientdata(client));
state->cfg.fe->tuner_priv = NULL;
kfree(state);
- return 0;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 0de587b412d4..476b32c04c20 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -951,7 +951,7 @@ err:
return ret;
}
-static int si2157_remove(struct i2c_client *client)
+static void si2157_remove(struct i2c_client *client)
{
struct si2157_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -969,8 +969,6 @@ static int si2157_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
/*
diff --git a/drivers/media/tuners/tda18212.c b/drivers/media/tuners/tda18212.c
index bf48f1cd83d2..eb97711c9c68 100644
--- a/drivers/media/tuners/tda18212.c
+++ b/drivers/media/tuners/tda18212.c
@@ -242,7 +242,7 @@ err:
return ret;
}
-static int tda18212_remove(struct i2c_client *client)
+static void tda18212_remove(struct i2c_client *client)
{
struct tda18212_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->cfg.fe;
@@ -252,8 +252,6 @@ static int tda18212_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id tda18212_id[] = {
diff --git a/drivers/media/tuners/tda18250.c b/drivers/media/tuners/tda18250.c
index 8a5781b966ee..e404a5afad4c 100644
--- a/drivers/media/tuners/tda18250.c
+++ b/drivers/media/tuners/tda18250.c
@@ -856,7 +856,7 @@ err:
return ret;
}
-static int tda18250_remove(struct i2c_client *client)
+static void tda18250_remove(struct i2c_client *client)
{
struct tda18250_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -866,8 +866,6 @@ static int tda18250_remove(struct i2c_client *client)
memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
fe->tuner_priv = NULL;
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id tda18250_id_table[] = {
diff --git a/drivers/media/tuners/tua9001.c b/drivers/media/tuners/tua9001.c
index af7d5ea1f77e..d141d000b819 100644
--- a/drivers/media/tuners/tua9001.c
+++ b/drivers/media/tuners/tua9001.c
@@ -227,7 +227,7 @@ err:
return ret;
}
-static int tua9001_remove(struct i2c_client *client)
+static void tua9001_remove(struct i2c_client *client)
{
struct tua9001_dev *dev = i2c_get_clientdata(client);
struct dvb_frontend *fe = dev->fe;
@@ -243,7 +243,6 @@ static int tua9001_remove(struct i2c_client *client)
dev_err(&client->dev, "Tuner disable failed (%pe)\n", ERR_PTR(ret));
}
kfree(dev);
- return 0;
}
static const struct i2c_device_id tua9001_id_table[] = {
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index af88e0766388..813171d25ac5 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -13,13 +13,11 @@ if MEDIA_USB_SUPPORT
if MEDIA_CAMERA_SUPPORT
comment "Webcam devices"
-source "drivers/media/usb/cpia2/Kconfig"
source "drivers/media/usb/gspca/Kconfig"
source "drivers/media/usb/pwc/Kconfig"
source "drivers/media/usb/s2255/Kconfig"
source "drivers/media/usb/usbtv/Kconfig"
source "drivers/media/usb/uvc/Kconfig"
-source "drivers/media/usb/zr364xx/Kconfig"
endif
@@ -38,7 +36,6 @@ if (MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT)
source "drivers/media/usb/au0828/Kconfig"
source "drivers/media/usb/cx231xx/Kconfig"
-source "drivers/media/usb/tm6000/Kconfig"
endif
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 25fa2015b179..6d171beea20d 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -12,7 +12,6 @@ obj-y += s2255/
obj-y += siano/
obj-y += ttusb-budget/
obj-y += ttusb-dec/
-obj-y += zr364xx/
# Please keep it alphabetically sorted by Kconfig name
# (e. g. LC_ALL=C sort Makefile)
@@ -24,12 +23,10 @@ obj-$(CONFIG_USB_MSI2500) += msi2500/
obj-$(CONFIG_USB_PWC) += pwc/
obj-$(CONFIG_USB_VIDEO_CLASS) += uvc/
obj-$(CONFIG_VIDEO_AU0828) += au0828/
-obj-$(CONFIG_VIDEO_CPIA2) += cpia2/
obj-$(CONFIG_VIDEO_CX231XX) += cx231xx/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
obj-$(CONFIG_VIDEO_HDPVR) += hdpvr/
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
obj-$(CONFIG_VIDEO_STK1160) += stk1160/
-obj-$(CONFIG_VIDEO_TM6000) += tm6000/
obj-$(CONFIG_VIDEO_USBTV) += usbtv/
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 240a7cc56777..462eb8423506 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -294,7 +294,7 @@ static void airspy_urb_complete(struct urb *urb)
if (unlikely(fbuf == NULL)) {
s->vb_full++;
dev_notice_ratelimited(s->dev,
- "videobuf is full, %d packets dropped\n",
+ "video buffer is full, %d packets dropped\n",
s->vb_full);
goto skip;
}
@@ -1070,6 +1070,10 @@ static int airspy_probe(struct usb_interface *intf,
ret);
goto err_free_controls;
}
+
+ /* Free buf if success*/
+ kfree(buf);
+
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
dev_notice(s->dev, "SDR API is still slightly experimental and functionality changes may follow\n");
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index c0f118563c7d..eb303e94cceb 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -384,7 +384,7 @@ static void au0828_copy_video(struct au0828_dev *dev,
}
/*
- * video-buf generic routine to get the next available buffer
+ * generic routine to get the next available buffer
*/
static inline void get_next_buf(struct au0828_dmaqueue *dma_q,
struct au0828_buffer **buf)
@@ -459,7 +459,7 @@ static void au0828_copy_vbi(struct au0828_dev *dev,
/*
- * video-buf generic routine to get the next available VBI buffer
+ * generic routine to get the next available VBI buffer
*/
static inline void vbi_get_next_buf(struct au0828_dmaqueue *dma_q,
struct au0828_buffer **buf)
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 7835bb0f32fc..790787f0eba8 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -425,12 +425,14 @@ static void flexcop_usb_transfer_exit(struct flexcop_usb *fc_usb)
static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
{
- u16 frame_size = le16_to_cpu(
- fc_usb->uintf->cur_altsetting->endpoint[0].desc.wMaxPacketSize);
- int bufsize = B2C2_USB_NUM_ISO_URB * B2C2_USB_FRAMES_PER_ISO *
- frame_size, i, j, ret;
+ struct usb_host_interface *alt = fc_usb->uintf->cur_altsetting;
+ u16 frame_size;
+ int bufsize, i, j, ret;
int buffer_offset = 0;
+ frame_size = usb_endpoint_maxp(&alt->endpoint[0].desc);
+ bufsize = B2C2_USB_NUM_ISO_URB * B2C2_USB_FRAMES_PER_ISO * frame_size;
+
deb_ts("creating %d iso-urbs with %d frames each of %d bytes size = %d.\n",
B2C2_USB_NUM_ISO_URB,
B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
@@ -501,17 +503,21 @@ urb_error:
static int flexcop_usb_init(struct flexcop_usb *fc_usb)
{
- /* use the alternate setting with the larges buffer */
- int ret = usb_set_interface(fc_usb->udev, 0, 1);
+ struct usb_host_interface *alt;
+ int ret;
+ /* use the alternate setting with the largest buffer */
+ ret = usb_set_interface(fc_usb->udev, 0, 1);
if (ret) {
err("set interface failed.");
return ret;
}
- if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
+ alt = fc_usb->uintf->cur_altsetting;
+
+ if (alt->desc.bNumEndpoints < 1)
return -ENODEV;
- if (!usb_endpoint_is_isoc_in(&fc_usb->uintf->cur_altsetting->endpoint[1].desc))
+ if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
return -ENODEV;
switch (fc_usb->udev->speed) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index fdc8b7f7b0c1..33431d9f54c2 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -558,7 +558,7 @@ u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
}
/*
- * video-buf generic routine to get the next available buffer
+ * generic routine to get the next available buffer
*/
static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer **buf)
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 425e470b0fd3..e23b8ccd79d4 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -220,7 +220,7 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
}
/*
- * video-buf generic routine to get the next available buffer
+ * generic routine to get the next available buffer
*/
static inline void get_next_buf(struct cx231xx_dmaqueue *dma_q,
struct cx231xx_buffer **buf)
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 9c77911fcad4..df90c6c5f3b9 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -786,7 +786,7 @@ static void technisat_usb2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *dev = usb_get_intfdata(intf);
- /* work and stuff was only created when the device is is hot-state */
+ /* work and stuff was only created when the device is hot-state */
if (dev != NULL) {
struct technisat_usb2_state *state = dev->priv;
if (state != NULL)
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 8181c0e6a25b..25e0620deff1 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -440,7 +440,7 @@ static inline void finish_buffer(struct em28xx *dev,
}
/*
- * Copy picture data from USB buffer to videobuf buffer
+ * Copy picture data from USB buffer to video buffer
*/
static void em28xx_copy_video(struct em28xx *dev,
struct em28xx_buffer *buf,
@@ -521,7 +521,7 @@ static void em28xx_copy_video(struct em28xx *dev,
}
/*
- * Copy VBI data from USB buffer to videobuf buffer
+ * Copy VBI data from USB buffer to video buffer
*/
static void em28xx_copy_vbi(struct em28xx *dev,
struct em28xx_buffer *buf,
diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c
index 1fa6f10ee157..2f45188bf9d4 100644
--- a/drivers/media/usb/go7007/s2250-board.c
+++ b/drivers/media/usb/go7007/s2250-board.c
@@ -601,7 +601,7 @@ fail:
return err;
}
-static int s2250_remove(struct i2c_client *client)
+static void s2250_remove(struct i2c_client *client)
{
struct s2250 *state = to_state(i2c_get_clientdata(client));
@@ -609,7 +609,6 @@ static int s2250_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(&state->sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
- return 0;
}
static const struct i2c_device_id s2250_id[] = {
diff --git a/drivers/media/usb/gspca/finepix.c b/drivers/media/usb/gspca/finepix.c
index 66c8e5122a0a..bc6133b525e3 100644
--- a/drivers/media/usb/gspca/finepix.c
+++ b/drivers/media/usb/gspca/finepix.c
@@ -129,7 +129,7 @@ again:
* for, then it's the end of the
* frame. Sometimes the jpeg is not complete,
* but there's nothing we can do. We also end
- * here if the the jpeg ends right at the end
+ * here if the jpeg ends right at the end
* of the frame. */
gspca_frame_add(gspca_dev, LAST_PACKET,
data, len);
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 71de6b4c4e4c..5a1f2698efb7 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -411,7 +411,7 @@ static void msi2500_isoc_handler(struct urb *urb)
if (unlikely(fbuf == NULL)) {
dev->vb_full++;
dev_dbg_ratelimited(dev->dev,
- "videobuf is full, %d packets dropped\n",
+ "video buffer is full, %d packets dropped\n",
dev->vb_full);
continue;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index 6954584526a3..26811efe0fb5 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -80,7 +80,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
static int pvr2_dvb_feed_thread(void *data)
{
int stat = pvr2_dvb_feed_func(data);
- /* from videobuf-dvb.c: */
+
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 8c208db9600b..c95a2229f4fa 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -18,6 +18,7 @@
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -985,36 +986,56 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
return value;
}
-static int __uvc_ctrl_get(struct uvc_video_chain *chain,
- struct uvc_control *ctrl, struct uvc_control_mapping *mapping,
- s32 *value)
+static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl)
{
+ u8 *data;
int ret;
- if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
- return -EACCES;
+ if (ctrl->loaded)
+ return 0;
- if (!ctrl->loaded) {
- if (ctrl->entity->get_cur) {
- ret = ctrl->entity->get_cur(chain->dev,
- ctrl->entity,
- ctrl->info.selector,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info.size);
- } else {
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
- ctrl->entity->id,
- chain->dev->intfnum,
- ctrl->info.selector,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info.size);
- }
- if (ret < 0)
- return ret;
+ data = uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT);
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) {
+ memset(data, 0, ctrl->info.size);
ctrl->loaded = 1;
+
+ return 0;
}
+ if (ctrl->entity->get_cur)
+ ret = ctrl->entity->get_cur(chain->dev, ctrl->entity,
+ ctrl->info.selector, data,
+ ctrl->info.size);
+ else
+ ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
+ ctrl->entity->id, chain->dev->intfnum,
+ ctrl->info.selector, data,
+ ctrl->info.size);
+
+ if (ret < 0)
+ return ret;
+
+ ctrl->loaded = 1;
+
+ return ret;
+}
+
+static int __uvc_ctrl_get(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ s32 *value)
+{
+ int ret;
+
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0)
+ return -EACCES;
+
+ ret = __uvc_ctrl_load_cur(chain, ctrl);
+ if (ret < 0)
+ return ret;
+
*value = __uvc_ctrl_get_value(mapping,
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
@@ -1810,21 +1831,10 @@ int uvc_ctrl_set(struct uvc_fh *handle,
* needs to be loaded from the device to perform the read-modify-write
* operation.
*/
- if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) {
- if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) {
- memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- 0, ctrl->info.size);
- } else {
- ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR,
- ctrl->entity->id, chain->dev->intfnum,
- ctrl->info.selector,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
- ctrl->info.size);
- if (ret < 0)
- return ret;
- }
-
- ctrl->loaded = 1;
+ if ((ctrl->info.size * 8) != mapping->size) {
+ ret = __uvc_ctrl_load_cur(chain, ctrl);
+ if (ret < 0)
+ return ret;
}
/* Backup the current value in case we need to rollback later. */
@@ -2411,10 +2421,9 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
- const struct uvc_control_info *info = uvc_ctrls;
- const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
- const struct uvc_control_mapping *mapping;
- const struct uvc_control_mapping *mend;
+ const struct uvc_control_mapping *mappings;
+ unsigned int num_mappings;
+ unsigned int i;
/*
* XU controls initialization requires querying the device for control
@@ -2425,7 +2434,9 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
return;
- for (; info < iend; ++info) {
+ for (i = 0; i < ARRAY_SIZE(uvc_ctrls); ++i) {
+ const struct uvc_control_info *info = &uvc_ctrls[i];
+
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
uvc_ctrl_add_info(chain->dev, ctrl, info);
@@ -2452,9 +2463,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
*/
if (chain->dev->info->mappings) {
bool custom = false;
- unsigned int i;
- for (i = 0; (mapping = chain->dev->info->mappings[i]); ++i) {
+ for (i = 0; chain->dev->info->mappings[i]; ++i) {
+ const struct uvc_control_mapping *mapping =
+ chain->dev->info->mappings[i];
+
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector) {
__uvc_ctrl_add_mapping(chain, ctrl, mapping);
@@ -2467,10 +2480,9 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
}
/* Process common mappings next. */
- mapping = uvc_ctrl_mappings;
- mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings);
+ for (i = 0; i < ARRAY_SIZE(uvc_ctrl_mappings); ++i) {
+ const struct uvc_control_mapping *mapping = &uvc_ctrl_mappings[i];
- for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
__uvc_ctrl_add_mapping(chain, ctrl, mapping);
@@ -2478,14 +2490,16 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
/* Finally process version-specific mappings. */
if (chain->dev->uvc_version < 0x0150) {
- mapping = uvc_ctrl_mappings_uvc11;
- mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
+ mappings = uvc_ctrl_mappings_uvc11;
+ num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
} else {
- mapping = uvc_ctrl_mappings_uvc15;
- mend = mapping + ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
+ mappings = uvc_ctrl_mappings_uvc15;
+ num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
}
- for (; mapping < mend; ++mapping) {
+ for (i = 0; i < num_mappings; ++i) {
+ const struct uvc_control_mapping *mapping = &mappings[i];
+
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
__uvc_ctrl_add_mapping(chain, ctrl, mapping);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9c05776f11d1..215fb483efb0 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -20,6 +20,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -35,198 +36,6 @@ unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
/* ------------------------------------------------------------------------
- * Video formats
- */
-
-static struct uvc_format_desc uvc_fmts[] = {
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:2 (YUYV)",
- .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
- .fcc = V4L2_PIX_FMT_YUYV,
- },
- {
- .name = "YUV 4:2:0 (NV12)",
- .guid = UVC_GUID_FORMAT_NV12,
- .fcc = V4L2_PIX_FMT_NV12,
- },
- {
- .name = "MJPEG",
- .guid = UVC_GUID_FORMAT_MJPEG,
- .fcc = V4L2_PIX_FMT_MJPEG,
- },
- {
- .name = "YVU 4:2:0 (YV12)",
- .guid = UVC_GUID_FORMAT_YV12,
- .fcc = V4L2_PIX_FMT_YVU420,
- },
- {
- .name = "YUV 4:2:0 (I420)",
- .guid = UVC_GUID_FORMAT_I420,
- .fcc = V4L2_PIX_FMT_YUV420,
- },
- {
- .name = "YUV 4:2:0 (M420)",
- .guid = UVC_GUID_FORMAT_M420,
- .fcc = V4L2_PIX_FMT_M420,
- },
- {
- .name = "YUV 4:2:2 (UYVY)",
- .guid = UVC_GUID_FORMAT_UYVY,
- .fcc = V4L2_PIX_FMT_UYVY,
- },
- {
- .name = "Greyscale 8-bit (Y800)",
- .guid = UVC_GUID_FORMAT_Y800,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (Y8 )",
- .guid = UVC_GUID_FORMAT_Y8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 8-bit (D3DFMT_L8)",
- .guid = UVC_GUID_FORMAT_D3DFMT_L8,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "IR 8-bit (L8_IR)",
- .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
- .fcc = V4L2_PIX_FMT_GREY,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_Y10,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "Greyscale 12-bit (Y12 )",
- .guid = UVC_GUID_FORMAT_Y12,
- .fcc = V4L2_PIX_FMT_Y12,
- },
- {
- .name = "Greyscale 16-bit (Y16 )",
- .guid = UVC_GUID_FORMAT_Y16,
- .fcc = V4L2_PIX_FMT_Y16,
- },
- {
- .name = "BGGR Bayer (BY8 )",
- .guid = UVC_GUID_FORMAT_BY8,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "BGGR Bayer (BA81)",
- .guid = UVC_GUID_FORMAT_BA81,
- .fcc = V4L2_PIX_FMT_SBGGR8,
- },
- {
- .name = "GBRG Bayer (GBRG)",
- .guid = UVC_GUID_FORMAT_GBRG,
- .fcc = V4L2_PIX_FMT_SGBRG8,
- },
- {
- .name = "GRBG Bayer (GRBG)",
- .guid = UVC_GUID_FORMAT_GRBG,
- .fcc = V4L2_PIX_FMT_SGRBG8,
- },
- {
- .name = "RGGB Bayer (RGGB)",
- .guid = UVC_GUID_FORMAT_RGGB,
- .fcc = V4L2_PIX_FMT_SRGGB8,
- },
- {
- .name = "RGB565",
- .guid = UVC_GUID_FORMAT_RGBP,
- .fcc = V4L2_PIX_FMT_RGB565,
- },
- {
- .name = "BGR 8:8:8 (BGR3)",
- .guid = UVC_GUID_FORMAT_BGR3,
- .fcc = V4L2_PIX_FMT_BGR24,
- },
- {
- .name = "H.264",
- .guid = UVC_GUID_FORMAT_H264,
- .fcc = V4L2_PIX_FMT_H264,
- },
- {
- .name = "H.265",
- .guid = UVC_GUID_FORMAT_H265,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
- {
- .name = "Greyscale 8 L/R (Y8I)",
- .guid = UVC_GUID_FORMAT_Y8I,
- .fcc = V4L2_PIX_FMT_Y8I,
- },
- {
- .name = "Greyscale 12 L/R (Y12I)",
- .guid = UVC_GUID_FORMAT_Y12I,
- .fcc = V4L2_PIX_FMT_Y12I,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_Z16,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Bayer 10-bit (SRGGB10P)",
- .guid = UVC_GUID_FORMAT_RW10,
- .fcc = V4L2_PIX_FMT_SRGGB10P,
- },
- {
- .name = "Bayer 16-bit (SBGGR16)",
- .guid = UVC_GUID_FORMAT_BG16,
- .fcc = V4L2_PIX_FMT_SBGGR16,
- },
- {
- .name = "Bayer 16-bit (SGBRG16)",
- .guid = UVC_GUID_FORMAT_GB16,
- .fcc = V4L2_PIX_FMT_SGBRG16,
- },
- {
- .name = "Bayer 16-bit (SRGGB16)",
- .guid = UVC_GUID_FORMAT_RG16,
- .fcc = V4L2_PIX_FMT_SRGGB16,
- },
- {
- .name = "Bayer 16-bit (SGRBG16)",
- .guid = UVC_GUID_FORMAT_GR16,
- .fcc = V4L2_PIX_FMT_SGRBG16,
- },
- {
- .name = "Depth data 16-bit (Z16)",
- .guid = UVC_GUID_FORMAT_INVZ,
- .fcc = V4L2_PIX_FMT_Z16,
- },
- {
- .name = "Greyscale 10-bit (Y10 )",
- .guid = UVC_GUID_FORMAT_INVI,
- .fcc = V4L2_PIX_FMT_Y10,
- },
- {
- .name = "IR:Depth 26-bit (INZI)",
- .guid = UVC_GUID_FORMAT_INZI,
- .fcc = V4L2_PIX_FMT_INZI,
- },
- {
- .name = "4-bit Depth Confidence (Packed)",
- .guid = UVC_GUID_FORMAT_CNF4,
- .fcc = V4L2_PIX_FMT_CNF4,
- },
- {
- .name = "HEVC",
- .guid = UVC_GUID_FORMAT_HEVC,
- .fcc = V4L2_PIX_FMT_HEVC,
- },
-};
-
-/* ------------------------------------------------------------------------
* Utility functions
*/
@@ -245,19 +54,6 @@ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
return NULL;
}
-static struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
-{
- unsigned int len = ARRAY_SIZE(uvc_fmts);
- unsigned int i;
-
- for (i = 0; i < len; ++i) {
- if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
- return &uvc_fmts[i];
- }
-
- return NULL;
-}
-
static enum v4l2_colorspace uvc_colorspace(const u8 primaries)
{
static const enum v4l2_colorspace colorprimaries[] = {
@@ -329,90 +125,6 @@ static enum v4l2_ycbcr_encoding uvc_ycbcr_enc(const u8 matrix_coefficients)
return V4L2_YCBCR_ENC_DEFAULT; /* Reserved */
}
-/*
- * Simplify a fraction using a simple continued fraction decomposition. The
- * idea here is to convert fractions such as 333333/10000000 to 1/30 using
- * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
- * arbitrary parameters to remove non-significative terms from the simple
- * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
- * respectively seems to give nice results.
- */
-void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
- unsigned int n_terms, unsigned int threshold)
-{
- u32 *an;
- u32 x, y, r;
- unsigned int i, n;
-
- an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
- if (an == NULL)
- return;
-
- /*
- * Convert the fraction to a simple continued fraction. See
- * https://en.wikipedia.org/wiki/Continued_fraction
- * Stop if the current term is bigger than or equal to the given
- * threshold.
- */
- x = *numerator;
- y = *denominator;
-
- for (n = 0; n < n_terms && y != 0; ++n) {
- an[n] = x / y;
- if (an[n] >= threshold) {
- if (n < 2)
- n++;
- break;
- }
-
- r = x - an[n] * y;
- x = y;
- y = r;
- }
-
- /* Expand the simple continued fraction back to an integer fraction. */
- x = 0;
- y = 1;
-
- for (i = n; i > 0; --i) {
- r = y;
- y = an[i-1] * y + x;
- x = r;
- }
-
- *numerator = y;
- *denominator = x;
- kfree(an);
-}
-
-/*
- * Convert a fraction to a frame interval in 100ns multiples. The idea here is
- * to compute numerator / denominator * 10000000 using 32 bit fixed point
- * arithmetic only.
- */
-u32 uvc_fraction_to_interval(u32 numerator, u32 denominator)
-{
- u32 multiplier;
-
- /* Saturate the result if the operation would overflow. */
- if (denominator == 0 ||
- numerator/denominator >= ((u32)-1)/10000000)
- return (u32)-1;
-
- /*
- * Divide both the denominator and the multiplier by two until
- * numerator * multiplier doesn't overflow. If anyone knows a better
- * algorithm please let me know.
- */
- multiplier = 10000000;
- while (numerator > ((u32)-1)/multiplier) {
- multiplier /= 2;
- denominator /= 2;
- }
-
- return denominator ? numerator * multiplier / denominator : 0;
-}
-
/* ------------------------------------------------------------------------
* Terminal and unit management
*/
@@ -1553,10 +1265,6 @@ static int uvc_gpio_parse(struct uvc_device *dev)
if (IS_ERR_OR_NULL(gpio_privacy))
return PTR_ERR_OR_ZERO(gpio_privacy);
- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (!unit)
- return -ENOMEM;
-
irq = gpiod_to_irq(gpio_privacy);
if (irq < 0) {
if (irq != EPROBE_DEFER)
@@ -1565,6 +1273,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
return irq;
}
+ unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (!unit)
+ return -ENOMEM;
+
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
unit->gpio.bControlSize = 1;
@@ -2740,7 +2452,7 @@ static const struct usb_device_id uvc_ids[] = {
.idProduct = 0x4034,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 0,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* LogiLink Wireless Webcam */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
@@ -3264,6 +2976,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
+ /* Sonix Technology USB 2.0 Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x3277,
+ .idProduct = 0x0072,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Acer EasyCamera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 4cc3fa6b8c98..f4d4c33b6dfb 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -386,7 +386,7 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream,
mutex_unlock(&stream->mutex);
denominator = 10000000;
- uvc_simplify_fraction(&numerator, &denominator, 8, 333);
+ v4l2_simplify_fraction(&numerator, &denominator, 8, 333);
memset(parm, 0, sizeof(*parm));
parm->type = stream->type;
@@ -427,7 +427,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
else
timeperframe = parm->parm.output.timeperframe;
- interval = uvc_fraction_to_interval(timeperframe.numerator,
+ interval = v4l2_fraction_to_interval(timeperframe.numerator,
timeperframe.denominator);
uvc_dbg(stream->dev, FORMAT, "Setting frame interval to %u/%u (%u)\n",
timeperframe.numerator, timeperframe.denominator, interval);
@@ -481,7 +481,7 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
/* Return the actual frame period. */
timeperframe.numerator = probe.dwFrameInterval;
timeperframe.denominator = 10000000;
- uvc_simplify_fraction(&timeperframe.numerator,
+ v4l2_simplify_fraction(&timeperframe.numerator,
&timeperframe.denominator, 8, 333);
if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -1275,7 +1275,7 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
fival->discrete.numerator =
frame->dwFrameInterval[index];
fival->discrete.denominator = 10000000;
- uvc_simplify_fraction(&fival->discrete.numerator,
+ v4l2_simplify_fraction(&fival->discrete.numerator,
&fival->discrete.denominator, 8, 333);
} else {
fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
@@ -1285,11 +1285,11 @@ static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
fival->stepwise.max.denominator = 10000000;
fival->stepwise.step.numerator = frame->dwFrameInterval[2];
fival->stepwise.step.denominator = 10000000;
- uvc_simplify_fraction(&fival->stepwise.min.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.min.numerator,
&fival->stepwise.min.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.max.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.max.numerator,
&fival->stepwise.max.denominator, 8, 333);
- uvc_simplify_fraction(&fival->stepwise.step.numerator,
+ v4l2_simplify_fraction(&fival->stepwise.step.numerator,
&fival->stepwise.step.denominator, 8, 333);
}
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 170a008f4006..d2eb9066e4dc 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1095,7 +1095,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
/*
* Synchronize to the input stream by waiting for the FID bit to be
- * toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE.
+ * toggled when the buffer state is not UVC_BUF_STATE_ACTIVE.
* stream->last_fid is initialized to -1, so the first isochronous
* frame will always be in sync.
*
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 24c911aeebce..df93db259312 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -42,144 +42,6 @@
#define UVC_EXT_GPIO_UNIT_ID 0x100
/* ------------------------------------------------------------------------
- * GUIDs
- */
-#define UVC_GUID_UVC_CAMERA \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
-#define UVC_GUID_UVC_OUTPUT \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}
-#define UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
-#define UVC_GUID_UVC_PROCESSING \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}
-#define UVC_GUID_UVC_SELECTOR \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02}
-#define UVC_GUID_EXT_GPIO_CONTROLLER \
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
-
-#define UVC_GUID_FORMAT_MJPEG \
- { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YUY2 \
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YUY2_ISIGHT \
- { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_NV12 \
- { 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_YV12 \
- { 'Y', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_I420 \
- { 'I', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_UYVY \
- { 'U', 'Y', 'V', 'Y', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y800 \
- { 'Y', '8', '0', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y8 \
- { 'Y', '8', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y10 \
- { 'Y', '1', '0', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y12 \
- { 'Y', '1', '2', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y16 \
- { 'Y', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BY8 \
- { 'B', 'Y', '8', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BA81 \
- { 'B', 'A', '8', '1', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GBRG \
- { 'G', 'B', 'R', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GRBG \
- { 'G', 'R', 'B', 'G', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RGGB \
- { 'R', 'G', 'G', 'B', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BG16 \
- { 'B', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GB16 \
- { 'G', 'B', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RG16 \
- { 'R', 'G', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_GR16 \
- { 'G', 'R', '1', '6', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RGBP \
- { 'R', 'G', 'B', 'P', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_BGR3 \
- { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
- 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
-#define UVC_GUID_FORMAT_M420 \
- { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_H264 \
- { 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_H265 \
- { 'H', '2', '6', '5', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y8I \
- { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Y12I \
- { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_Z16 \
- { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_RW10 \
- { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_INVZ \
- { 'I', 'N', 'V', 'Z', 0x90, 0x2d, 0x58, 0x4a, \
- 0x92, 0x0b, 0x77, 0x3f, 0x1f, 0x2c, 0x55, 0x6b}
-#define UVC_GUID_FORMAT_INZI \
- { 'I', 'N', 'Z', 'I', 0x66, 0x1a, 0x42, 0xa2, \
- 0x90, 0x65, 0xd0, 0x18, 0x14, 0xa8, 0xef, 0x8a}
-#define UVC_GUID_FORMAT_INVI \
- { 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
- 0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
-#define UVC_GUID_FORMAT_CNF4 \
- { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_D3DFMT_L8 \
- {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-#define UVC_GUID_FORMAT_KSMEDIA_L8_IR \
- {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-#define UVC_GUID_FORMAT_HEVC \
- { 'H', 'E', 'V', 'C', 0x00, 0x00, 0x10, 0x00, \
- 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
-
-
-/* ------------------------------------------------------------------------
* Driver specific constants.
*/
@@ -283,12 +145,6 @@ struct uvc_control {
struct uvc_fh *handle; /* File handle that last changed the control. */
};
-struct uvc_format_desc {
- char *name;
- u8 guid[16];
- u32 fcc;
-};
-
/*
* The term 'entity' refers to both UVC units and UVC terminals.
*
@@ -911,9 +767,6 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
/* Utility functions */
-void uvc_simplify_fraction(u32 *numerator, u32 *denominator,
- unsigned int n_terms, unsigned int threshold);
-u32 uvc_fraction_to_interval(u32 numerator, u32 denominator);
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep);
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 2d47c10de062..33162dc1daf6 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -779,7 +779,7 @@ register_client:
* @client: i2c_client descriptor
*/
-static int tuner_remove(struct i2c_client *client)
+static void tuner_remove(struct i2c_client *client)
{
struct tuner *t = to_tuner(i2c_get_clientdata(client));
@@ -789,7 +789,6 @@ static int tuner_remove(struct i2c_client *client)
list_del(&t->list);
kfree(t);
- return 0;
}
/*
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index e0fbe6ba4b6c..40f56e044640 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -484,3 +484,89 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+
+/*
+ * Simplify a fraction using a simple continued fraction decomposition. The
+ * idea here is to convert fractions such as 333333/10000000 to 1/30 using
+ * 32 bit arithmetic only. The algorithm is not perfect and relies upon two
+ * arbitrary parameters to remove non-significative terms from the simple
+ * continued fraction decomposition. Using 8 and 333 for n_terms and threshold
+ * respectively seems to give nice results.
+ */
+void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
+ unsigned int n_terms, unsigned int threshold)
+{
+ u32 *an;
+ u32 x, y, r;
+ unsigned int i, n;
+
+ an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL);
+ if (an == NULL)
+ return;
+
+ /*
+ * Convert the fraction to a simple continued fraction. See
+ * https://en.wikipedia.org/wiki/Continued_fraction
+ * Stop if the current term is bigger than or equal to the given
+ * threshold.
+ */
+ x = *numerator;
+ y = *denominator;
+
+ for (n = 0; n < n_terms && y != 0; ++n) {
+ an[n] = x / y;
+ if (an[n] >= threshold) {
+ if (n < 2)
+ n++;
+ break;
+ }
+
+ r = x - an[n] * y;
+ x = y;
+ y = r;
+ }
+
+ /* Expand the simple continued fraction back to an integer fraction. */
+ x = 0;
+ y = 1;
+
+ for (i = n; i > 0; --i) {
+ r = y;
+ y = an[i-1] * y + x;
+ x = r;
+ }
+
+ *numerator = y;
+ *denominator = x;
+ kfree(an);
+}
+EXPORT_SYMBOL_GPL(v4l2_simplify_fraction);
+
+/*
+ * Convert a fraction to a frame interval in 100ns multiples. The idea here is
+ * to compute numerator / denominator * 10000000 using 32 bit fixed point
+ * arithmetic only.
+ */
+u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator)
+{
+ u32 multiplier;
+
+ /* Saturate the result if the operation would overflow. */
+ if (denominator == 0 ||
+ numerator/denominator >= ((u32)-1)/10000000)
+ return (u32)-1;
+
+ /*
+ * Divide both the denominator and the multiplier by two until
+ * numerator * multiplier doesn't overflow. If anyone knows a better
+ * algorithm please let me know.
+ */
+ multiplier = 10000000;
+ while (numerator > ((u32)-1)/multiplier) {
+ multiplier /= 2;
+ denominator /= 2;
+ }
+
+ return denominator ? numerator * multiplier / denominator : 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 0f3d6b5667b0..55c26e7d370e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -1040,6 +1040,8 @@ int v4l2_compat_get_array_args(struct file *file, void *mbuf,
{
int err = 0;
+ memset(mbuf, 0, array_size);
+
switch (cmd) {
case VIDIOC_G_FMT32:
case VIDIOC_S_FMT32:
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 50d012ba3c02..a8c354ad3d23 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -89,10 +89,7 @@ static int req_to_user(struct v4l2_ext_control *c,
/* Helper function: copy the initial control value back to the caller */
static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
- int idx;
-
- for (idx = 0; idx < ctrl->elems; idx++)
- ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
+ ctrl->type_ops->init(ctrl, 0, ctrl->elems, ctrl->p_new);
return ptr_to_user(c, ctrl, ctrl->p_new);
}
@@ -105,8 +102,8 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
ctrl->is_new = 0;
if (ctrl->is_dyn_array &&
- c->size > ctrl->p_dyn_alloc_elems * ctrl->elem_size) {
- void *old = ctrl->p_dyn;
+ c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) {
+ void *old = ctrl->p_array;
void *tmp = kvzalloc(2 * c->size, GFP_KERNEL);
if (!tmp)
@@ -115,14 +112,13 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
ctrl->p_new.p = tmp;
ctrl->p_cur.p = tmp + c->size;
- ctrl->p_dyn = tmp;
- ctrl->p_dyn_alloc_elems = c->size / ctrl->elem_size;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = c->size / ctrl->elem_size;
kvfree(old);
}
if (ctrl->is_ptr && !ctrl->is_string) {
unsigned int elems = c->size / ctrl->elem_size;
- unsigned int idx;
if (copy_from_user(ctrl->p_new.p, c->ptr, c->size))
return -EFAULT;
@@ -130,8 +126,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
if (ctrl->is_dyn_array)
ctrl->new_elems = elems;
else if (ctrl->is_array)
- for (idx = elems; idx < ctrl->elems; idx++)
- ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
+ ctrl->type_ops->init(ctrl, elems, ctrl->elems, ctrl->p_new);
return 0;
}
@@ -467,7 +462,7 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
if (is_default)
ret = def_to_user(cs->controls + idx, ref->ctrl);
- else if (is_request && ref->p_req_dyn_enomem)
+ else if (is_request && ref->p_req_array_enomem)
ret = -ENOMEM;
else if (is_request && ref->p_req_valid)
ret = req_to_user(cs->controls + idx, ref);
@@ -499,12 +494,7 @@ EXPORT_SYMBOL(v4l2_g_ext_ctrls);
/* Validate a new control */
static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new)
{
- unsigned int idx;
- int err = 0;
-
- for (idx = 0; !err && idx < ctrl->new_elems; idx++)
- err = ctrl->type_ops->validate(ctrl, idx, p_new);
- return err;
+ return ctrl->type_ops->validate(ctrl, ctrl->new_elems, p_new);
}
/* Validate controls. */
@@ -989,6 +979,42 @@ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl,
}
EXPORT_SYMBOL(__v4l2_ctrl_modify_range);
+int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl,
+ u32 dims[V4L2_CTRL_MAX_DIMS])
+{
+ unsigned int elems = 1;
+ unsigned int i;
+ void *p_array;
+
+ lockdep_assert_held(ctrl->handler->lock);
+
+ if (!ctrl->is_array || ctrl->is_dyn_array)
+ return -EINVAL;
+
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ elems *= dims[i];
+ if (elems == 0)
+ return -EINVAL;
+ p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL);
+ if (!p_array)
+ return -ENOMEM;
+ kvfree(ctrl->p_array);
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->elems = elems;
+ ctrl->new_elems = elems;
+ ctrl->p_array = p_array;
+ ctrl->p_new.p = p_array;
+ ctrl->p_cur.p = p_array + elems * ctrl->elem_size;
+ for (i = 0; i < ctrl->nr_of_dims; i++)
+ ctrl->dims[i] = dims[i];
+ ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur);
+ cur_to_new(ctrl);
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE |
+ V4L2_EVENT_CTRL_CH_DIMENSIONS);
+ return 0;
+}
+EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions);
+
/* Implement VIDIOC_QUERY_EXT_CTRL */
int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc)
{
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 1f85828d6694..01f00093f259 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -65,33 +65,29 @@ void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
v4l2_event_queue_fh(sev->fh, &ev);
}
-static bool std_equal(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr1,
- union v4l2_ctrl_ptr ptr2)
+bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems,
+ union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2)
{
+ unsigned int i;
+
switch (ctrl->type) {
case V4L2_CTRL_TYPE_BUTTON:
return false;
case V4L2_CTRL_TYPE_STRING:
- idx *= ctrl->elem_size;
- /* strings are always 0-terminated */
- return !strcmp(ptr1.p_char + idx, ptr2.p_char + idx);
- case V4L2_CTRL_TYPE_INTEGER64:
- return ptr1.p_s64[idx] == ptr2.p_s64[idx];
- case V4L2_CTRL_TYPE_U8:
- return ptr1.p_u8[idx] == ptr2.p_u8[idx];
- case V4L2_CTRL_TYPE_U16:
- return ptr1.p_u16[idx] == ptr2.p_u16[idx];
- case V4L2_CTRL_TYPE_U32:
- return ptr1.p_u32[idx] == ptr2.p_u32[idx];
+ for (i = 0; i < elems; i++) {
+ unsigned int idx = i * ctrl->elem_size;
+
+ /* strings are always 0-terminated */
+ if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx))
+ return false;
+ }
+ return true;
default:
- if (ctrl->is_int)
- return ptr1.p_s32[idx] == ptr2.p_s32[idx];
- idx *= ctrl->elem_size;
- return !memcmp(ptr1.p_const + idx, ptr2.p_const + idx,
- ctrl->elem_size);
+ return !memcmp(ptr1.p_const, ptr2.p_const,
+ elems * ctrl->elem_size);
}
}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_equal);
/* Default intra MPEG-2 quantisation coefficients, from the specification. */
static const u8 mpeg2_intra_quant_matrix[64] = {
@@ -181,45 +177,76 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
}
}
-static void std_init(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ u32 tot_elems, union v4l2_ctrl_ptr ptr)
{
+ unsigned int i;
+ u32 elems = tot_elems - from_idx;
+
+ if (from_idx >= tot_elems)
+ return;
+
switch (ctrl->type) {
case V4L2_CTRL_TYPE_STRING:
- idx *= ctrl->elem_size;
- memset(ptr.p_char + idx, ' ', ctrl->minimum);
- ptr.p_char[idx + ctrl->minimum] = '\0';
+ for (i = from_idx; i < tot_elems; i++) {
+ unsigned int offset = i * ctrl->elem_size;
+
+ memset(ptr.p_char + offset, ' ', ctrl->minimum);
+ ptr.p_char[offset + ctrl->minimum] = '\0';
+ }
break;
case V4L2_CTRL_TYPE_INTEGER64:
- ptr.p_s64[idx] = ctrl->default_value;
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s64[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
+ }
break;
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BOOLEAN:
- ptr.p_s32[idx] = ctrl->default_value;
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_s32[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
+ }
break;
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
- ptr.p_s32[idx] = 0;
+ memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
break;
case V4L2_CTRL_TYPE_U8:
- ptr.p_u8[idx] = ctrl->default_value;
+ memset(ptr.p_u8 + from_idx, ctrl->default_value, elems);
break;
case V4L2_CTRL_TYPE_U16:
- ptr.p_u16[idx] = ctrl->default_value;
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u16[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
+ }
break;
case V4L2_CTRL_TYPE_U32:
- ptr.p_u32[idx] = ctrl->default_value;
+ if (ctrl->default_value) {
+ for (i = from_idx; i < tot_elems; i++)
+ ptr.p_u32[i] = ctrl->default_value;
+ } else {
+ memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
+ }
break;
default:
- std_init_compound(ctrl, idx, ptr);
+ for (i = from_idx; i < tot_elems; i++)
+ std_init_compound(ctrl, i, ptr);
break;
}
}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
-static void std_log(const struct v4l2_ctrl *ctrl)
+void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
{
union v4l2_ctrl_ptr ptr = ctrl->p_cur;
@@ -327,6 +354,7 @@ static void std_log(const struct v4l2_ctrl *ctrl)
break;
}
}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_log);
/*
* Round towards the closest legal value. Be careful when we are
@@ -520,7 +548,8 @@ validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame)
/*
* Compound controls validation requires setting unused fields/flags to zero
- * in order to properly detect unchanged controls with std_equal's memcmp.
+ * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's
+ * memcmp.
*/
static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
@@ -895,8 +924,8 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
return 0;
}
-static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
- union v4l2_ctrl_ptr ptr)
+static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
{
size_t len;
u64 offset;
@@ -966,11 +995,43 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
}
}
+int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems,
+ union v4l2_ctrl_ptr ptr)
+{
+ unsigned int i;
+ int ret = 0;
+
+ switch ((u32)ctrl->type) {
+ case V4L2_CTRL_TYPE_U8:
+ if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U16:
+ if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+ case V4L2_CTRL_TYPE_U32:
+ if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1)
+ return 0;
+ break;
+
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ memset(ptr.p_s32, 0, elems * sizeof(s32));
+ return 0;
+ }
+
+ for (i = 0; !ret && i < elems; i++)
+ ret = std_validate_elem(ctrl, i, ptr);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
+
static const struct v4l2_ctrl_type_ops std_type_ops = {
- .equal = std_equal,
- .init = std_init,
- .log = std_log,
- .validate = std_validate,
+ .equal = v4l2_ctrl_type_op_equal,
+ .init = v4l2_ctrl_type_op_init,
+ .log = v4l2_ctrl_type_op_log,
+ .validate = v4l2_ctrl_type_op_validate,
};
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
@@ -1048,23 +1109,26 @@ void cur_to_new(struct v4l2_ctrl *ctrl)
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems);
}
-static bool req_alloc_dyn_array(struct v4l2_ctrl_ref *ref, u32 elems)
+static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems)
{
void *tmp;
- if (elems < ref->p_req_dyn_alloc_elems)
+ if (elems == ref->p_req_array_alloc_elems)
+ return true;
+ if (ref->ctrl->is_dyn_array &&
+ elems < ref->p_req_array_alloc_elems)
return true;
tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL);
if (!tmp) {
- ref->p_req_dyn_enomem = true;
+ ref->p_req_array_enomem = true;
return false;
}
- ref->p_req_dyn_enomem = false;
+ ref->p_req_array_enomem = false;
kvfree(ref->p_req.p);
ref->p_req.p = tmp;
- ref->p_req_dyn_alloc_elems = elems;
+ ref->p_req_array_alloc_elems = elems;
return true;
}
@@ -1077,7 +1141,7 @@ void new_to_req(struct v4l2_ctrl_ref *ref)
return;
ctrl = ref->ctrl;
- if (ctrl->is_dyn_array && !req_alloc_dyn_array(ref, ctrl->new_elems))
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems))
return;
ref->p_req_elems = ctrl->new_elems;
@@ -1094,7 +1158,7 @@ void cur_to_req(struct v4l2_ctrl_ref *ref)
return;
ctrl = ref->ctrl;
- if (ctrl->is_dyn_array && !req_alloc_dyn_array(ref, ctrl->elems))
+ if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems))
return;
ref->p_req_elems = ctrl->elems;
@@ -1123,26 +1187,30 @@ int req_to_new(struct v4l2_ctrl_ref *ref)
return 0;
}
- /* Not a dynamic array, so just copy the request value */
- if (!ctrl->is_dyn_array) {
+ /* Not an array, so just copy the request value */
+ if (!ctrl->is_array) {
ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems);
return 0;
}
/* Sanity check, should never happen */
- if (WARN_ON(!ref->p_req_dyn_alloc_elems))
+ if (WARN_ON(!ref->p_req_array_alloc_elems))
+ return -ENOMEM;
+
+ if (!ctrl->is_dyn_array &&
+ ref->p_req_elems != ctrl->p_array_alloc_elems)
return -ENOMEM;
/*
* Check if the number of elements in the request is more than the
- * elements in ctrl->p_dyn. If so, attempt to realloc ctrl->p_dyn.
- * Note that p_dyn is allocated with twice the number of elements
+ * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array.
+ * Note that p_array is allocated with twice the number of elements
* in the dynamic array since it has to store both the current and
* new value of such a control.
*/
- if (ref->p_req_elems > ctrl->p_dyn_alloc_elems) {
+ if (ref->p_req_elems > ctrl->p_array_alloc_elems) {
unsigned int sz = ref->p_req_elems * ctrl->elem_size;
- void *old = ctrl->p_dyn;
+ void *old = ctrl->p_array;
void *tmp = kvzalloc(2 * sz, GFP_KERNEL);
if (!tmp)
@@ -1151,8 +1219,8 @@ int req_to_new(struct v4l2_ctrl_ref *ref)
memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size);
ctrl->p_new.p = tmp;
ctrl->p_cur.p = tmp + sz;
- ctrl->p_dyn = tmp;
- ctrl->p_dyn_alloc_elems = ref->p_req_elems;
+ ctrl->p_array = tmp;
+ ctrl->p_array_alloc_elems = ref->p_req_elems;
kvfree(old);
}
@@ -1243,7 +1311,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
list_del(&ref->node);
- if (ref->p_req_dyn_alloc_elems)
+ if (ref->p_req_array_alloc_elems)
kvfree(ref->p_req.p);
kfree(ref);
}
@@ -1252,7 +1320,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
list_del(&ctrl->node);
list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
list_del(&sev->node);
- kvfree(ctrl->p_dyn);
+ kvfree(ctrl->p_array);
kvfree(ctrl);
}
kvfree(hdl->buckets);
@@ -1368,7 +1436,7 @@ int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (hdl->error)
return hdl->error;
- if (allocate_req && !ctrl->is_dyn_array)
+ if (allocate_req && !ctrl->is_array)
size_extra_req = ctrl->elems * ctrl->elem_size;
new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
@@ -1442,7 +1510,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
unsigned elems = 1;
bool is_array;
unsigned tot_ctrl_size;
- unsigned idx;
void *data;
int err;
@@ -1584,11 +1651,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
else if (type == V4L2_CTRL_TYPE_CTRL_CLASS)
flags |= V4L2_CTRL_FLAG_READ_ONLY;
- else if (!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) &&
+ else if (!is_array &&
(type == V4L2_CTRL_TYPE_INTEGER64 ||
type == V4L2_CTRL_TYPE_STRING ||
- type >= V4L2_CTRL_COMPOUND_TYPES ||
- is_array))
+ type >= V4L2_CTRL_COMPOUND_TYPES))
sz_extra += 2 * tot_ctrl_size;
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
@@ -1632,14 +1698,14 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->cur.val = ctrl->val = def;
data = &ctrl[1];
- if (ctrl->is_dyn_array) {
- ctrl->p_dyn_alloc_elems = elems;
- ctrl->p_dyn = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
- if (!ctrl->p_dyn) {
+ if (ctrl->is_array) {
+ ctrl->p_array_alloc_elems = elems;
+ ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL);
+ if (!ctrl->p_array) {
kvfree(ctrl);
return NULL;
}
- data = ctrl->p_dyn;
+ data = ctrl->p_array;
}
if (!ctrl->is_int) {
@@ -1651,20 +1717,18 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) {
- if (ctrl->is_dyn_array)
+ if (ctrl->is_array)
ctrl->p_def.p = &ctrl[1];
else
ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size;
memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
}
- for (idx = 0; idx < elems; idx++) {
- ctrl->type_ops->init(ctrl, idx, ctrl->p_cur);
- ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
- }
+ ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur);
+ cur_to_new(ctrl);
if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
- kvfree(ctrl->p_dyn);
+ kvfree(ctrl->p_array);
kvfree(ctrl);
return NULL;
}
@@ -1978,7 +2042,6 @@ void update_from_auto_cluster(struct v4l2_ctrl *master)
static int cluster_changed(struct v4l2_ctrl *master)
{
bool changed = false;
- unsigned int idx;
int i;
for (i = 0; i < master->ncontrols; i++) {
@@ -2004,10 +2067,9 @@ static int cluster_changed(struct v4l2_ctrl *master)
if (ctrl->elems != ctrl->new_elems)
ctrl_changed = true;
-
- for (idx = 0; !ctrl_changed && idx < ctrl->elems; idx++)
- ctrl_changed = !ctrl->type_ops->equal(ctrl, idx,
- ctrl->p_cur, ctrl->p_new);
+ if (!ctrl_changed)
+ ctrl_changed = !ctrl->type_ops->equal(ctrl,
+ ctrl->elems, ctrl->p_cur, ctrl->p_new);
ctrl->has_changed = ctrl_changed;
changed |= ctrl->has_changed;
}
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index e70e128ccc9c..355595a0fefa 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -94,7 +94,7 @@ static int v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash,
* brightness <-> intensity conversion, it also must have defined
* related v4l2 control step == 1. In such a case a backward conversion
* from led brightness to v4l2 intensity is required to find out the
- * the aligned intensity value.
+ * aligned intensity value.
*/
if (has_flash_op(v4l2_flash, led_brightness_to_intensity))
ctrl->val = call_flash_op(v4l2_flash,
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c314025d977e..fddba75d9074 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1045,7 +1045,7 @@ static void v4l_sanitize_format(struct v4l2_format *fmt)
/*
* The v4l2_pix_format structure has been extended with fields that were
* not previously required to be set to zero by applications. The priv
- * field, when set to a magic value, indicates the the extended fields
+ * field, when set to a magic value, indicates that the extended fields
* are valid. Otherwise they will contain undefined values. To simplify
* the API towards drivers zero the extended fields and set the priv
* field to the magic value when the extended pixel format structure
@@ -2872,9 +2872,9 @@ static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
IOCTL_INFO(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
- IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL | INFO_FL_ALWAYS_COPY),
IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, v4l_stub_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, v4l_stub_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
IOCTL_INFO(VIDIOC_G_ENC_INDEX, v4l_stub_g_enc_index, v4l_print_enc_idx, 0),
@@ -3367,8 +3367,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
array_buf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (array_buf == NULL)
- goto out_array_args;
- err = -EFAULT;
+ goto out;
if (in_compat_syscall())
err = v4l2_compat_get_array_args(file, array_buf,
user_ptr, array_size,
@@ -3377,7 +3376,7 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
err = copy_from_user(array_buf, user_ptr, array_size) ?
-EFAULT : 0;
if (err)
- goto out_array_args;
+ goto out;
*kernel_ptr = array_buf;
}
@@ -3395,6 +3394,13 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
trace_v4l2_qbuf(video_devdata(file)->minor, parg);
}
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ */
+ if (err < 0 && !always_copy)
+ goto out;
+
if (has_array_args) {
*kernel_ptr = (void __force *)user_ptr;
if (in_compat_syscall()) {
@@ -3409,16 +3415,8 @@ video_usercopy(struct file *file, unsigned int orig_cmd, unsigned long arg,
} else if (copy_to_user(user_ptr, array_buf, array_size)) {
err = -EFAULT;
}
- goto out_array_args;
}
- /*
- * Some ioctls can return an error, but still have valid
- * results that must be returned.
- */
- if (err < 0 && !always_copy)
- goto out;
-out_array_args:
if (video_put_user((void __user *)arg, parg, cmd, orig_cmd))
err = -EFAULT;
out:
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 837e1855f94b..be7fde1ed3ea 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Memory-to-memory device framework for Video for Linux 2 and videobuf.
+ * Memory-to-memory device framework for Video for Linux 2 and vb2.
*
- * Helper functions for devices that use videobuf buffers for both their
+ * Helper functions for devices that use vb2 buffers for both their
* source and destination.
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
@@ -21,7 +21,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
-MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
+MODULE_DESCRIPTION("Mem to mem device framework for vb2");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index ac1a411648d8..fac290e48e0b 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -66,6 +66,15 @@ config BRCMSTB_DPFE
for the DRAM's temperature. Slower refresh rate means cooler RAM,
higher refresh rate means hotter RAM.
+config BRCMSTB_MEMC
+ tristate "Broadcom STB MEMC driver"
+ default ARCH_BRCMSTB
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ This driver provides a way to configure the Broadcom STB memory
+ controller and specifically control the Self Refresh Power Down
+ (SRPD) inactivity timeout.
+
config BT1_L2_CTL
bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index bc7663ed1c25..e148f636c082 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_BRCMSTB_DPFE) += brcmstb_dpfe.o
+obj-$(CONFIG_BRCMSTB_MEMC) += brcmstb_memc.o
obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
diff --git a/drivers/memory/brcmstb_memc.c b/drivers/memory/brcmstb_memc.c
new file mode 100644
index 000000000000..233a53f5bce1
--- /dev/null
+++ b/drivers/memory/brcmstb_memc.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DDR Self-Refresh Power Down (SRPD) support for Broadcom STB SoCs
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define REG_MEMC_CNTRLR_CONFIG 0x00
+#define CNTRLR_CONFIG_LPDDR4_SHIFT 5
+#define CNTRLR_CONFIG_MASK 0xf
+#define REG_MEMC_SRPD_CFG_21 0x20
+#define REG_MEMC_SRPD_CFG_20 0x34
+#define REG_MEMC_SRPD_CFG_1x 0x3c
+#define INACT_COUNT_SHIFT 0
+#define INACT_COUNT_MASK 0xffff
+#define SRPD_EN_SHIFT 16
+
+struct brcmstb_memc_data {
+ u32 srpd_offset;
+};
+
+struct brcmstb_memc {
+ struct device *dev;
+ void __iomem *ddr_ctrl;
+ unsigned int timeout_cycles;
+ u32 frequency;
+ u32 srpd_offset;
+};
+
+static int brcmstb_memc_uses_lpddr4(struct brcmstb_memc *memc)
+{
+ void __iomem *config = memc->ddr_ctrl + REG_MEMC_CNTRLR_CONFIG;
+ u32 reg;
+
+ reg = readl_relaxed(config) & CNTRLR_CONFIG_MASK;
+
+ return reg == CNTRLR_CONFIG_LPDDR4_SHIFT;
+}
+
+static int brcmstb_memc_srpd_config(struct brcmstb_memc *memc,
+ unsigned int cycles)
+{
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ /* Max timeout supported in HW */
+ if (cycles > INACT_COUNT_MASK)
+ return -EINVAL;
+
+ memc->timeout_cycles = cycles;
+
+ val = (cycles << INACT_COUNT_SHIFT) & INACT_COUNT_MASK;
+ if (cycles)
+ val |= BIT(SRPD_EN_SHIFT);
+
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static ssize_t frequency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->frequency);
+}
+
+static ssize_t srpd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", memc->timeout_cycles);
+}
+
+static ssize_t srpd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ /*
+ * Cannot change the inactivity timeout on LPDDR4 chips because the
+ * dynamic tuning process will also get affected by the inactivity
+ * timeout, thus making it non functional.
+ */
+ if (brcmstb_memc_uses_lpddr4(memc))
+ return -EOPNOTSUPP;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = brcmstb_memc_srpd_config(memc, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(frequency);
+static DEVICE_ATTR_RW(srpd);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_frequency.attr,
+ &dev_attr_srpd.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .attrs = dev_attrs,
+};
+
+static const struct of_device_id brcmstb_memc_of_match[];
+
+static int brcmstb_memc_probe(struct platform_device *pdev)
+{
+ const struct brcmstb_memc_data *memc_data;
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct brcmstb_memc *memc;
+ int ret;
+
+ memc = devm_kzalloc(dev, sizeof(*memc), GFP_KERNEL);
+ if (!memc)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, memc);
+
+ of_id = of_match_device(brcmstb_memc_of_match, dev);
+ memc_data = of_id->data;
+ memc->srpd_offset = memc_data->srpd_offset;
+
+ memc->ddr_ctrl = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(memc->ddr_ctrl))
+ return PTR_ERR(memc->ddr_ctrl);
+
+ of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &memc->frequency);
+
+ ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int brcmstb_memc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ sysfs_remove_group(&dev->kobj, &dev_attr_group);
+
+ return 0;
+}
+
+enum brcmstb_memc_hwtype {
+ BRCMSTB_MEMC_V21,
+ BRCMSTB_MEMC_V20,
+ BRCMSTB_MEMC_V1X,
+};
+
+static const struct brcmstb_memc_data brcmstb_memc_versions[] = {
+ { .srpd_offset = REG_MEMC_SRPD_CFG_21 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_20 },
+ { .srpd_offset = REG_MEMC_SRPD_CFG_1x },
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.1.x",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V20]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.5",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.6",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.7",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.8",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.3.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.0",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.1",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.2",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.3",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-c.1.4",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V21]
+ },
+ /* default to the original offset */
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &brcmstb_memc_versions[BRCMSTB_MEMC_V1X]
+ },
+ {}
+};
+
+static int brcmstb_memc_suspend(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+ void __iomem *cfg = memc->ddr_ctrl + memc->srpd_offset;
+ u32 val;
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ /*
+ * Disable SRPD prior to suspending the system since that can
+ * cause issues with other memory clients managed by the ARM
+ * trusted firmware to access memory.
+ */
+ val = readl_relaxed(cfg);
+ val &= ~BIT(SRPD_EN_SHIFT);
+ writel_relaxed(val, cfg);
+ /* Ensure the write is committed to the controller */
+ (void)readl_relaxed(cfg);
+
+ return 0;
+}
+
+static int brcmstb_memc_resume(struct device *dev)
+{
+ struct brcmstb_memc *memc = dev_get_drvdata(dev);
+
+ if (memc->timeout_cycles == 0)
+ return 0;
+
+ return brcmstb_memc_srpd_config(memc, memc->timeout_cycles);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_memc_pm_ops, brcmstb_memc_suspend,
+ brcmstb_memc_resume);
+
+static struct platform_driver brcmstb_memc_driver = {
+ .probe = brcmstb_memc_probe,
+ .remove = brcmstb_memc_remove,
+ .driver = {
+ .name = "brcmstb_memc",
+ .of_match_table = brcmstb_memc_of_match,
+ .pm = pm_ptr(&brcmstb_memc_pm_ops),
+ },
+};
+module_platform_driver(brcmstb_memc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("DDR SRPD driver for Broadcom STB chips");
diff --git a/drivers/memory/dfl-emif.c b/drivers/memory/dfl-emif.c
index 3f719816771d..da06cd30a016 100644
--- a/drivers/memory/dfl-emif.c
+++ b/drivers/memory/dfl-emif.c
@@ -24,11 +24,24 @@
#define EMIF_STAT_CLEAR_BUSY_SFT 16
#define EMIF_CTRL 0x10
#define EMIF_CTRL_CLEAR_EN_SFT 0
-#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(3, 0)
+#define EMIF_CTRL_CLEAR_EN_MSK GENMASK_ULL(7, 0)
#define EMIF_POLL_INVL 10000 /* us */
#define EMIF_POLL_TIMEOUT 5000000 /* us */
+/*
+ * The Capability Register replaces the Control Register (at the same
+ * offset) for EMIF feature revisions > 0. The bitmask that indicates
+ * the presence of memory channels exists in both the Capability Register
+ * and Control Register definitions. These can be thought of as a C union.
+ * The Capability Register definitions are used to check for the existence
+ * of a memory channel, and the Control Register definitions are used for
+ * managing the memory-clear functionality in revision 0.
+ */
+#define EMIF_CAPABILITY_BASE 0x10
+#define EMIF_CAPABILITY_CHN_MSK_V0 GENMASK_ULL(3, 0)
+#define EMIF_CAPABILITY_CHN_MSK GENMASK_ULL(7, 0)
+
struct dfl_emif {
struct device *dev;
void __iomem *base;
@@ -106,16 +119,30 @@ emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 0);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 1);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 2);
emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 3);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 4);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 5);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 6);
+emif_state_attr(init_done, EMIF_STAT_INIT_DONE_SFT, 7);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 0);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 1);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 2);
emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 3);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 4);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 5);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 6);
+emif_state_attr(cal_fail, EMIF_STAT_CALC_FAIL_SFT, 7);
+
emif_clear_attr(0);
emif_clear_attr(1);
emif_clear_attr(2);
emif_clear_attr(3);
+emif_clear_attr(4);
+emif_clear_attr(5);
+emif_clear_attr(6);
+emif_clear_attr(7);
+
static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf0_init_done.attr.attr,
@@ -134,6 +161,22 @@ static struct attribute *dfl_emif_attrs[] = {
&emif_attr_inf3_cal_fail.attr.attr,
&emif_attr_inf3_clear.attr.attr,
+ &emif_attr_inf4_init_done.attr.attr,
+ &emif_attr_inf4_cal_fail.attr.attr,
+ &emif_attr_inf4_clear.attr.attr,
+
+ &emif_attr_inf5_init_done.attr.attr,
+ &emif_attr_inf5_cal_fail.attr.attr,
+ &emif_attr_inf5_clear.attr.attr,
+
+ &emif_attr_inf6_init_done.attr.attr,
+ &emif_attr_inf6_cal_fail.attr.attr,
+ &emif_attr_inf6_clear.attr.attr,
+
+ &emif_attr_inf7_init_done.attr.attr,
+ &emif_attr_inf7_cal_fail.attr.attr,
+ &emif_attr_inf7_clear.attr.attr,
+
NULL,
};
@@ -143,15 +186,24 @@ static umode_t dfl_emif_visible(struct kobject *kobj,
struct dfl_emif *de = dev_get_drvdata(kobj_to_dev(kobj));
struct emif_attr *eattr = container_of(attr, struct emif_attr,
attr.attr);
+ struct dfl_device *ddev = to_dfl_dev(de->dev);
u64 val;
/*
- * This device supports upto 4 memory interfaces, but not all
+ * This device supports up to 8 memory interfaces, but not all
* interfaces are used on different platforms. The read out value of
- * CLEAN_EN field (which is a bitmap) could tell how many interfaces
- * are available.
+ * CAPABILITY_CHN_MSK field (which is a bitmap) indicates which
+ * interfaces are available.
*/
- val = FIELD_GET(EMIF_CTRL_CLEAR_EN_MSK, readq(de->base + EMIF_CTRL));
+ if (ddev->revision > 0 && strstr(attr->name, "_clear"))
+ return 0;
+
+ if (ddev->revision == 0)
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK_V0,
+ readq(de->base + EMIF_CAPABILITY_BASE));
+ else
+ val = FIELD_GET(EMIF_CAPABILITY_CHN_MSK,
+ readq(de->base + EMIF_CAPABILITY_BASE));
return (val & BIT_ULL(eattr->index)) ? attr->mode : 0;
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index d7cb7ead2ac7..5a9754442bc7 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
@@ -14,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <soc/mediatek/smi.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/memory/mtk-memory-port.h>
@@ -89,6 +91,7 @@
#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
#define MTK_SMI_FLAG_SW_FLAG BIT(1)
#define MTK_SMI_FLAG_SLEEP_CTL BIT(2)
+#define MTK_SMI_FLAG_CFG_PORT_SEC_CTL BIT(3)
#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
struct mtk_smi_reg_pair {
@@ -127,7 +130,7 @@ struct mtk_smi_common_plat {
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
- void (*config_port)(struct device *dev);
+ int (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
unsigned int flags_general;
const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
@@ -185,7 +188,7 @@ static const struct component_ops mtk_smi_larb_component_ops = {
.unbind = mtk_smi_larb_unbind,
};
-static void mtk_smi_larb_config_port_gen1(struct device *dev)
+static int mtk_smi_larb_config_port_gen1(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
@@ -214,31 +217,35 @@ static void mtk_smi_larb_config_port_gen1(struct device *dev)
common->smi_ao_base
+ REG_SMI_SECUR_CON_ADDR(m4u_port_id));
}
+ return 0;
}
-static void mtk_smi_larb_config_port_mt8167(struct device *dev)
+static int mtk_smi_larb_config_port_mt8167(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
+ return 0;
}
-static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+static int mtk_smi_larb_config_port_mt8173(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+ return 0;
}
-static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
+static int mtk_smi_larb_config_port_gen2_general(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg, flags_general = larb->larb_gen->flags_general;
const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
+ struct arm_smccc_res res;
int i;
if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
- return;
+ return 0;
if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
@@ -253,14 +260,78 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+ /*
+ * When mmu_en bits are in security world, the bank_sel still is in the
+ * LARB_NONSEC_CON below. And the mmu_en bits of LARB_NONSEC_CON have no
+ * effect in this case.
+ */
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_CFG_PORT_SEC_CTL)) {
+ arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL, IOMMU_ATF_CMD_CONFIG_SMI_LARB,
+ larb->larbid, *larb->mmu, 0, 0, 0, 0, &res);
+ if (res.a0 != 0) {
+ dev_err(dev, "Enable iommu fail, ret %ld\n", res.a0);
+ return -EINVAL;
+ }
+ }
+
for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
reg |= F_MMU_EN;
reg |= BANK_SEL(larb->bank[i]);
writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
}
+ return 0;
}
+static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x02, 0x18, 0x22, 0x22, 0x01, 0x02, 0x0a,},
+ [1] = {0x12, 0x02, 0x14, 0x14, 0x01, 0x18, 0x0a,},
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,},
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a, 0x07, 0x07,},
+ [5] = {0x02, 0x01, 0x04, 0x02, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x01, 0x0a, 0x05, 0x02, 0x03, 0x01, 0x01, 0x14, 0x14,
+ 0x0a, 0x14, 0x1e, 0x01, 0x0c, 0x0a, 0x05, 0x02, 0x02, 0x05,
+ 0x03, 0x01, 0x1e, 0x01, 0x05,},
+ [9] = {0x1e, 0x01, 0x0a, 0x0a, 0x01, 0x01, 0x03, 0x1e, 0x1e, 0x10,
+ 0x07, 0x01, 0x0a, 0x06, 0x03, 0x03, 0x0e, 0x01, 0x04, 0x28,},
+ [10] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [11] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [12] = {0x03, 0x20, 0x01, 0x20, 0x01, 0x01, 0x14, 0x0a, 0x0a, 0x0c,
+ 0x0a, 0x05, 0x02, 0x03, 0x02, 0x14, 0x0a, 0x0a, 0x14, 0x14,
+ 0x14, 0x01, 0x01, 0x14, 0x1e, 0x01, 0x05, 0x03, 0x02, 0x28,},
+ [13] = {0x07, 0x02, 0x04, 0x02, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
+ 0x07, 0x02, 0x04, 0x02, 0x05, 0x05,},
+ [14] = {0x02, 0x02, 0x0c, 0x0c, 0x0c, 0x0c, 0x01, 0x01, 0x02, 0x02,
+ 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x01, 0x01,},
+ [15] = {0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x0c, 0x0c,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x02,
+ 0x0c, 0x01, 0x01,},
+ [16] = {0x28, 0x28, 0x03, 0x01, 0x01, 0x03, 0x14, 0x14, 0x0a, 0x0d,
+ 0x03, 0x05, 0x0e, 0x01, 0x01, 0x05, 0x06, 0x0d, 0x01,},
+ [17] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [18] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [19] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [20] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [21] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,
+ 0x01,},
+ [23] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x18, 0x01, 0x01,},
+ [24] = {0x12, 0x06, 0x12, 0x06,},
+ [25] = {0x01},
+};
+
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@@ -347,6 +418,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8186 = {
.flags_general = MTK_SMI_FLAG_SLEEP_CTL,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG |
+ MTK_SMI_FLAG_SLEEP_CTL | MTK_SMI_FLAG_CFG_PORT_SEC_CTL,
+ .ostd = mtk_smi_larb_mt8188_ostd,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
@@ -367,6 +445,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
{.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
{.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
{.compatible = "mediatek,mt8186-smi-larb", .data = &mtk_smi_larb_mt8186},
+ {.compatible = "mediatek,mt8188-smi-larb", .data = &mtk_smi_larb_mt8188},
{.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
{.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
@@ -511,9 +590,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
mtk_smi_larb_sleep_ctrl_disable(larb);
/* Configure the basic setting for this larb */
- larb_gen->config_port(dev);
-
- return 0;
+ return larb_gen->config_port(dev);
}
static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
@@ -597,6 +674,18 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8186 = {
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(4) | F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vdo = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(5) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8188_vpp = {
+ .type = MTK_SMI_GEN2,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
.type = MTK_SMI_GEN2,
.has_gals = true,
@@ -633,6 +722,8 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
{.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
{.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
{.compatible = "mediatek,mt8186-smi-common", .data = &mtk_smi_common_mt8186},
+ {.compatible = "mediatek,mt8188-smi-common-vdo", .data = &mtk_smi_common_mt8188_vdo},
+ {.compatible = "mediatek,mt8188-smi-common-vpp", .data = &mtk_smi_common_mt8188_vpp},
{.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
{.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
{.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index dbdf87bc0b78..fcd20d85d385 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -134,6 +134,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
for_each_child_of_node(np_ddr, np_tim) {
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_do_get_timings(np_tim, &timings[i])) {
+ of_node_put(np_tim);
devm_kfree(dev, timings);
goto default_timings;
}
@@ -284,6 +285,7 @@ const struct lpddr3_timings
if (of_device_is_compatible(np_tim, tim_compat)) {
if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
devm_kfree(dev, timings);
+ of_node_put(np_tim);
goto default_timings;
}
i++;
diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
index f84b98278745..d39ee7d06665 100644
--- a/drivers/memory/pl353-smc.c
+++ b/drivers/memory/pl353-smc.c
@@ -122,6 +122,7 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id)
}
of_platform_device_create(child, NULL, &adev->dev);
+ of_node_put(child);
return 0;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index f9ee957072c3..52c7020c9d19 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -620,7 +620,6 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
mpt_ioctl_header __user *uhdr = (void __user *) arg;
mpt_ioctl_header khdr;
- int iocnum;
unsigned iocnumX;
int nonblock = (file->f_flags & O_NONBLOCK);
int ret;
@@ -634,12 +633,11 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
ret = -ENXIO; /* (-6) No such device or address */
- /* Verify intended MPT adapter - set iocnum and the adapter
+ /* Verify intended MPT adapter - set iocnumX and the adapter
* pointer (iocp)
*/
iocnumX = khdr.iocnum & 0xFF;
- if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
- (iocp == NULL))
+ if ((mpt_verify_adapter(iocnumX, &iocp) < 0) || (iocp == NULL))
return -ENODEV;
if (!iocp->active) {
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index eaf9845633b4..a30e47b74327 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -583,7 +583,7 @@ out_init:
return ret;
}
-static int pm800_remove(struct i2c_client *client)
+static void pm800_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -592,8 +592,6 @@ static int pm800_remove(struct i2c_client *client)
pm800_pages_exit(chip);
pm80x_deinit();
-
- return 0;
}
static struct i2c_driver pm800_driver = {
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index ada6c513302b..10d3637840c8 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -239,7 +239,7 @@ out_init:
return ret;
}
-static int pm805_remove(struct i2c_client *client)
+static void pm805_remove(struct i2c_client *client)
{
struct pm80x_chip *chip = i2c_get_clientdata(client);
@@ -247,8 +247,6 @@ static int pm805_remove(struct i2c_client *client)
device_irq_exit_805(chip);
pm80x_deinit();
-
- return 0;
}
static struct i2c_driver pm805_driver = {
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index b1e829ea909b..5dc86dd66202 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1201,7 +1201,7 @@ static int pm860x_probe(struct i2c_client *client)
return 0;
}
-static int pm860x_remove(struct i2c_client *client)
+static void pm860x_remove(struct i2c_client *client)
{
struct pm860x_chip *chip = i2c_get_clientdata(client);
@@ -1210,7 +1210,6 @@ static int pm860x_remove(struct i2c_client *client)
regmap_exit(chip->regmap_companion);
i2c_unregister_device(chip->companion);
}
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index abb58ab1a1a4..8b93856de432 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -589,8 +589,8 @@ config LPC_SCH
config INTEL_SOC_PMIC
bool "Support for Crystal Cove PMIC"
- depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
- depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
+ depends on (X86 && ACPI) || COMPILE_TEST
depends on I2C_DESIGNWARE_PLATFORM=y
select MFD_CORE
select REGMAP_I2C
@@ -938,6 +938,22 @@ config MFD_MT6360
PMIC part includes 2-channel BUCKs and 2-channel LDOs
LDO part includes 4-channel LDOs
+config MFD_MT6370
+ tristate "MediaTek MT6370 SubPMIC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C
+ help
+ Say Y here to enable MT6370 SubPMIC functional support.
+ It consists of a single cell battery charger with ADC monitoring, RGB
+ LEDs, dual channel flashlight, WLED backlight driver, display bias
+ voltage supply, one general purpose LDO, and the USB Type-C & PD
+ controller complies with the latest USB Type-C and PD standards.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370".
+
config MFD_MT6397
tristate "MediaTek MT6397 PMIC Support"
select MFD_CORE
@@ -963,6 +979,27 @@ config MFD_MENF21BMC
This driver can also be built as a module. If so the module
will be called menf21bmc.
+config MFD_OCELOT
+ tristate "Microsemi Ocelot External Control Support"
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ help
+ Ocelot is a family of networking chips that support multiple ethernet
+ and fibre interfaces. In addition to networking, they contain several
+ other functions, including pinctrl, MDIO, and communication with
+ external chips. While some chips have an internal processor capable of
+ running an OS, others don't. All chips can be controlled externally
+ through different interfaces, including SPI, I2C, and PCIe.
+
+ Say yes here to add support for Ocelot chips (VSC7511, VSC7512,
+ VSC7513, VSC7514) controlled externally.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ocelot-soc.
+
+ If unsure, say N.
+
config EZX_PCAP
bool "Motorola EZXPCAP Support"
depends on SPI_MASTER
@@ -1096,6 +1133,16 @@ config MFD_SPMI_PMIC
Say M here if you want to include support for the SPMI PMIC
series as a module. The module will be called "qcom-spmi-pmic".
+config MFD_SY7636A
+ tristate "Silergy SY7636A voltage regulator"
+ depends on I2C
+ select MFD_SIMPLE_MFD_I2C
+ help
+ Enable support for Silergy SY7636A voltage regulator.
+
+ To enable support for building sub-devices as modules,
+ choose M here.
+
config MFD_RDC321X
tristate "RDC R-321x southbridge"
select MFD_CORE
@@ -1128,6 +1175,18 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
+config MFD_RT5120
+ tristate "Richtek RT5120 Power Management IC"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ The enables support for Richtek RT5120 PMIC. It includes four high
+ efficiency buck converters and one LDO voltage regulator. The device
+ is targeted at providing the CPU voltage, memory, I/O and peripheral
+ power rails in home entertainment devices.
+
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1203,7 +1262,7 @@ config MFD_SI476X_CORE
module will be called si476x-core.
config MFD_SIMPLE_MFD_I2C
- tristate "Simple Multi-Functional Device support (I2C)"
+ tristate
depends on I2C
select MFD_CORE
select REGMAP_I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 858cacf659d6..7ed3ef4a698c 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -120,6 +120,9 @@ obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
+ocelot-soc-objs := ocelot-core.o ocelot-spi.o
+obj-$(CONFIG_MFD_OCELOT) += ocelot-soc.o
+
obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o
obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o
@@ -172,6 +175,11 @@ obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
obj-$(CONFIG_MFD_MP2629) += mp2629.o
+obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
+obj-$(CONFIG_MFD_MT6370) += mt6370.o
+mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
+obj-$(CONFIG_MFD_MT6397) += mt6397.o
+
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
@@ -234,16 +242,13 @@ obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT4831) += rt4831.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
+obj-$(CONFIG_MFD_RT5120) += rt5120.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
-intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
-obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
+obj-$(CONFIG_INTEL_SOC_PMIC) += intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
-obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
-mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
-obj-$(CONFIG_MFD_MT6397) += mt6397.o
obj-$(CONFIG_INTEL_SOC_PMIC_MRFLD) += intel_soc_pmic_mrfld.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
index 80c2fdd14fc4..7fd8b9988075 100644
--- a/drivers/mfd/acer-ec-a500.c
+++ b/drivers/mfd/acer-ec-a500.c
@@ -169,7 +169,7 @@ static int a500_ec_probe(struct i2c_client *client)
return 0;
}
-static int a500_ec_remove(struct i2c_client *client)
+static void a500_ec_remove(struct i2c_client *client)
{
if (of_device_is_system_power_controller(client->dev.of_node)) {
if (pm_power_off == a500_ec_poweroff)
@@ -177,8 +177,6 @@ static int a500_ec_remove(struct i2c_client *client)
unregister_restart_handler(&a500_ec_restart_handler);
}
-
- return 0;
}
static const struct of_device_id a500_ec_match[] = {
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index 6d83e6b9a692..bfc7cf56ff2c 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -84,13 +84,11 @@ static int arizona_i2c_probe(struct i2c_client *i2c,
return arizona_dev_init(arizona);
}
-static int arizona_i2c_remove(struct i2c_client *i2c)
+static void arizona_i2c_remove(struct i2c_client *i2c)
{
struct arizona *arizona = dev_get_drvdata(&i2c->dev);
arizona_dev_exit(arizona);
-
- return 0;
}
static const struct i2c_device_id arizona_i2c_id[] = {
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
index 00ab48018d8d..8fd6727dc30a 100644
--- a/drivers/mfd/axp20x-i2c.c
+++ b/drivers/mfd/axp20x-i2c.c
@@ -50,13 +50,11 @@ static int axp20x_i2c_probe(struct i2c_client *i2c,
return axp20x_device_probe(axp20x);
}
-static int axp20x_i2c_remove(struct i2c_client *i2c)
+static void axp20x_i2c_remove(struct i2c_client *i2c)
{
struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
axp20x_device_remove(axp20x);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index a818fbb55988..3f8f6ad3a98c 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -532,12 +532,11 @@ static int da903x_probe(struct i2c_client *client,
return da903x_add_subdevs(chip, pdata);
}
-static int da903x_remove(struct i2c_client *client)
+static void da903x_remove(struct i2c_client *client)
{
struct da903x_chip *chip = i2c_get_clientdata(client);
da903x_remove_subdevs(chip);
- return 0;
}
static struct i2c_driver da903x_driver = {
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 8de93db35f3a..5a74696c8704 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -168,12 +168,11 @@ static int da9052_i2c_probe(struct i2c_client *client,
return da9052_device_init(da9052, id->driver_data);
}
-static int da9052_i2c_remove(struct i2c_client *client)
+static void da9052_i2c_remove(struct i2c_client *client)
{
struct da9052 *da9052 = i2c_get_clientdata(client);
da9052_device_exit(da9052);
- return 0;
}
static struct i2c_driver da9052_i2c_driver = {
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index bc60433b68db..276c7d1c509e 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -41,13 +41,11 @@ static int da9055_i2c_probe(struct i2c_client *i2c,
return da9055_device_init(da9055);
}
-static int da9055_i2c_remove(struct i2c_client *i2c)
+static void da9055_i2c_remove(struct i2c_client *i2c)
{
struct da9055 *da9055 = i2c_get_clientdata(i2c);
da9055_device_exit(da9055);
-
- return 0;
}
/*
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 2774b2cbaea6..a26e473507c7 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -453,6 +453,7 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
+ regmap_reg_range(DA9062AA_CONFIG_J, DA9062AA_CONFIG_J),
regmap_reg_range(DA9062AA_GP_ID_0, DA9062AA_GP_ID_19),
};
@@ -723,14 +724,12 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
return ret;
}
-static int da9062_i2c_remove(struct i2c_client *i2c)
+static void da9062_i2c_remove(struct i2c_client *i2c)
{
struct da9062 *chip = i2c_get_clientdata(i2c);
mfd_remove_devices(chip->dev);
regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
-
- return 0;
}
static const struct i2c_device_id da9062_i2c_id[] = {
diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c
index 58009c8cb870..6ae56e46d24e 100644
--- a/drivers/mfd/da9150-core.c
+++ b/drivers/mfd/da9150-core.c
@@ -471,15 +471,13 @@ regmap_irq_fail:
return ret;
}
-static int da9150_remove(struct i2c_client *client)
+static void da9150_remove(struct i2c_client *client)
{
struct da9150 *da9150 = i2c_get_clientdata(client);
regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data);
mfd_remove_devices(da9150->dev);
i2c_unregister_device(da9150->core_qif);
-
- return 0;
}
static void da9150_shutdown(struct i2c_client *client)
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 54fb6cbd2aa0..759c59690680 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -375,11 +375,10 @@ static void dm355evm_power_off(void)
dm355evm_command(MSP_COMMAND_POWEROFF);
}
-static int dm355evm_msp_remove(struct i2c_client *client)
+static void dm355evm_msp_remove(struct i2c_client *client)
{
pm_power_off = NULL;
msp430 = NULL;
- return 0;
}
static int
diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c
index 1b73318d1f1f..3eff98e26bea 100644
--- a/drivers/mfd/ene-kb3930.c
+++ b/drivers/mfd/ene-kb3930.c
@@ -177,7 +177,7 @@ static int kb3930_probe(struct i2c_client *client)
return 0;
}
-static int kb3930_remove(struct i2c_client *client)
+static void kb3930_remove(struct i2c_client *client)
{
struct kb3930 *ddata = i2c_get_clientdata(client);
@@ -187,8 +187,6 @@ static int kb3930_remove(struct i2c_client *client)
unregister_restart_handler(&kb3930_restart_nb);
}
kb3930_power_off = NULL;
-
- return 0;
}
static const struct of_device_id kb3930_dt_ids[] = {
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 37e5e02a1d05..823595bcc9b7 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -69,7 +69,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
return irq;
tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
@@ -84,6 +84,19 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return 0;
}
+static int mx25_tsadc_unset_irq(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq >= 0) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static void mx25_tsadc_setup_clk(struct platform_device *pdev,
struct mx25_tsadc *tsadc)
{
@@ -171,18 +184,21 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tsadc);
- return devm_of_platform_populate(dev);
+ ret = devm_of_platform_populate(dev);
+ if (ret)
+ goto err_irq;
+
+ return 0;
+
+err_irq:
+ mx25_tsadc_unset_irq(pdev);
+
+ return ret;
}
static int mx25_tsadc_remove(struct platform_device *pdev)
{
- struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq) {
- irq_set_chained_handler_and_data(irq, NULL, NULL);
- irq_domain_remove(tsadc->domain);
- }
+ mx25_tsadc_unset_irq(pdev);
return 0;
}
diff --git a/drivers/mfd/gateworks-gsc.c b/drivers/mfd/gateworks-gsc.c
index d87876747b91..9d7d870c44a8 100644
--- a/drivers/mfd/gateworks-gsc.c
+++ b/drivers/mfd/gateworks-gsc.c
@@ -255,11 +255,9 @@ static int gsc_probe(struct i2c_client *client)
return 0;
}
-static int gsc_remove(struct i2c_client *client)
+static void gsc_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &attr_group);
-
- return 0;
}
static struct i2c_driver gsc_driver = {
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 417b0355d904..b45b1346ab54 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -20,7 +20,9 @@
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/htcpld.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
struct htcpld_chip {
@@ -58,8 +60,8 @@ struct htcpld_data {
uint irq_start;
int nirqs;
uint chained_irq;
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
+ struct gpio_desc *int_reset_gpio_hi;
+ struct gpio_desc *int_reset_gpio_lo;
/* htcpld info */
struct htcpld_chip *chip;
@@ -196,9 +198,9 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
* be asserted.
*/
if (htcpld->int_reset_gpio_hi)
- gpio_set_value(htcpld->int_reset_gpio_hi, 1);
+ gpiod_set_value(htcpld->int_reset_gpio_hi, 1);
if (htcpld->int_reset_gpio_lo)
- gpio_set_value(htcpld->int_reset_gpio_lo, 0);
+ gpiod_set_value(htcpld->int_reset_gpio_lo, 0);
return IRQ_HANDLED;
}
@@ -352,7 +354,7 @@ static int htcpld_register_chip_i2c(
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = plat_chip_data->addr;
- strlcpy(info.type, "htcpld-chip", I2C_NAME_SIZE);
+ strscpy(info.type, "htcpld-chip", I2C_NAME_SIZE);
info.platform_data = chip;
/* Add the I2C device. This calls the probe() function. */
@@ -562,34 +564,28 @@ static int htcpld_core_probe(struct platform_device *pdev)
return ret;
/* Request the GPIO(s) for the int reset and set them up */
- if (pdata->int_reset_gpio_hi) {
- ret = gpio_request(pdata->int_reset_gpio_hi, "htcpld-core");
- if (ret) {
- /*
- * If it failed, that sucks, but we can probably
- * continue on without it.
- */
- dev_warn(dev, "Unable to request int_reset_gpio_hi -- interrupts may not work\n");
- htcpld->int_reset_gpio_hi = 0;
- } else {
- htcpld->int_reset_gpio_hi = pdata->int_reset_gpio_hi;
- gpio_set_value(htcpld->int_reset_gpio_hi, 1);
- }
+ htcpld->int_reset_gpio_hi = gpiochip_request_own_desc(&htcpld->chip[2].chip_out,
+ 7, "htcpld-core", GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(htcpld->int_reset_gpio_hi)) {
+ /*
+ * If it failed, that sucks, but we can probably
+ * continue on without it.
+ */
+ htcpld->int_reset_gpio_hi = NULL;
+ dev_warn(dev, "Unable to request int_reset_gpio_hi -- interrupts may not work\n");
}
- if (pdata->int_reset_gpio_lo) {
- ret = gpio_request(pdata->int_reset_gpio_lo, "htcpld-core");
- if (ret) {
- /*
- * If it failed, that sucks, but we can probably
- * continue on without it.
- */
- dev_warn(dev, "Unable to request int_reset_gpio_lo -- interrupts may not work\n");
- htcpld->int_reset_gpio_lo = 0;
- } else {
- htcpld->int_reset_gpio_lo = pdata->int_reset_gpio_lo;
- gpio_set_value(htcpld->int_reset_gpio_lo, 0);
- }
+ htcpld->int_reset_gpio_lo = gpiochip_request_own_desc(&htcpld->chip[2].chip_out,
+ 0, "htcpld-core", GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(htcpld->int_reset_gpio_lo)) {
+ /*
+ * If it failed, that sucks, but we can probably
+ * continue on without it.
+ */
+ htcpld->int_reset_gpio_lo = NULL;
+ dev_warn(dev, "Unable to request int_reset_gpio_lo -- interrupts may not work\n");
}
dev_info(dev, "Initialized successfully\n");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index bb08b7a73fe1..dde31c50a632 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/pxa2xx_ssp.h>
#include "intel-lpss.h"
@@ -73,8 +74,18 @@ static void intel_lpss_pci_remove(struct pci_dev *pdev)
static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
+static const struct property_entry spt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_SPT_SSP),
+ { }
+};
+
+static const struct software_node spt_spi_node = {
+ .properties = spt_spi_properties,
+};
+
static const struct intel_lpss_platform_info spt_info = {
.clk_rate = 120000000,
+ .swnode = &spt_spi_node,
};
static const struct property_entry spt_i2c_properties[] = {
@@ -108,8 +119,18 @@ static const struct intel_lpss_platform_info spt_uart_info = {
.swnode = &uart_node,
};
+static const struct property_entry bxt_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BXT_SSP),
+ { }
+};
+
+static const struct software_node bxt_spi_node = {
+ .properties = bxt_spi_properties,
+};
+
static const struct intel_lpss_platform_info bxt_info = {
.clk_rate = 100000000,
+ .swnode = &bxt_spi_node,
};
static const struct intel_lpss_platform_info bxt_uart_info = {
@@ -166,6 +187,20 @@ static const struct intel_lpss_platform_info glk_i2c_info = {
.swnode = &glk_i2c_node,
};
+static const struct property_entry cnl_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_CNL_SSP),
+ { }
+};
+
+static const struct software_node cnl_spi_node = {
+ .properties = cnl_spi_properties,
+};
+
+static const struct intel_lpss_platform_info cnl_info = {
+ .clk_rate = 120000000,
+ .swnode = &cnl_spi_node,
+};
+
static const struct intel_lpss_platform_info cnl_i2c_info = {
.clk_rate = 216000000,
.swnode = &spt_i2c_node,
@@ -176,12 +211,26 @@ static const struct intel_lpss_platform_info ehl_i2c_info = {
.swnode = &bxt_i2c_node,
};
+static const struct property_entry tgl_spi_properties[] = {
+ PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_CNL_SSP),
+ { }
+};
+
+static const struct software_node tgl_spi_node = {
+ .properties = tgl_spi_properties,
+};
+
+static const struct intel_lpss_platform_info tgl_info = {
+ .clk_rate = 100000000,
+ .swnode = &tgl_spi_node,
+};
+
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x02a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x02ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x02c5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02c7), (kernel_ulong_t)&spt_uart_info },
@@ -189,18 +238,18 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x02e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x02eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x02fb), (kernel_ulong_t)&cnl_info },
/* CML-H */
{ PCI_VDEVICE(INTEL, 0x06a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x06c7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x06e8), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06e9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06ea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x06eb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x06fb), (kernel_ulong_t)&cnl_info },
/* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
@@ -255,8 +304,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* ICL-LP */
{ PCI_VDEVICE(INTEL, 0x34a8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x34a9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x34ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x34c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34c7), (kernel_ulong_t)&spt_uart_info },
@@ -264,15 +313,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x34e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&cnl_info },
/* ICL-N */
{ PCI_VDEVICE(INTEL, 0x38a8), (kernel_ulong_t)&spt_uart_info },
/* TGL-H */
{ PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
@@ -281,8 +330,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&tgl_info },
/* EHL */
{ PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
@@ -301,8 +350,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc7), (kernel_ulong_t)&spt_uart_info },
@@ -310,12 +359,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4deb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x4dfb), (kernel_ulong_t)&cnl_info },
/* ADL-P */
{ PCI_VDEVICE(INTEL, 0x51a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x51a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x51ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info },
@@ -325,12 +374,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x51eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x51fb), (kernel_ulong_t)&tgl_info },
/* ADL-M */
{ PCI_VDEVICE(INTEL, 0x54a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x54a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54aa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x54ab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x54c5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c6), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54c7), (kernel_ulong_t)&bxt_uart_info },
@@ -338,7 +387,7 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x54e9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54ea), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x54eb), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x54fb), (kernel_ulong_t)&tgl_info },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&apl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&apl_i2c_info },
@@ -358,39 +407,39 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* RPL-S */
{ PCI_VDEVICE(INTEL, 0x7a28), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7a29), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7a2a), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a2b), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7a4c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4e), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a4f), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a5c), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7a79), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7a7b), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7a7c), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7d), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7a7e), (kernel_ulong_t)&bxt_uart_info },
/* ADL-S */
{ PCI_VDEVICE(INTEL, 0x7aa8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7aa9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7aaa), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7aab), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7acc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7ace), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7acf), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7adc), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7af9), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7afb), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7afc), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afd), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7afe), (kernel_ulong_t)&bxt_uart_info },
/* MTL-P */
{ PCI_VDEVICE(INTEL, 0x7e25), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0x7e26), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x7e27), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7e30), (kernel_ulong_t)&tgl_info },
+ { PCI_VDEVICE(INTEL, 0x7e46), (kernel_ulong_t)&tgl_info },
{ PCI_VDEVICE(INTEL, 0x7e50), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e51), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x7e52), (kernel_ulong_t)&bxt_uart_info },
@@ -424,8 +473,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-LP */
{ PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
@@ -433,12 +482,12 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&cnl_info },
/* TGL-LP */
{ PCI_VDEVICE(INTEL, 0xa0a8), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0a9), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0aa), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0ab), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0c5), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c6), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0c7), (kernel_ulong_t)&bxt_uart_info },
@@ -448,15 +497,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa0db), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dc), (kernel_ulong_t)&bxt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa0dd), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0de), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0df), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0e8), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0e9), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0ea), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa0eb), (kernel_ulong_t)&spt_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa0fb), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0fd), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0fe), (kernel_ulong_t)&cnl_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
@@ -479,14 +528,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CNL-H */
{ PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info },
- { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
- { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&cnl_info },
/* CML-V */
{ PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
index f4d0d72573c8..7e3319e5b22f 100644
--- a/drivers/mfd/intel-m10-bmc.c
+++ b/drivers/mfd/intel-m10-bmc.c
@@ -21,6 +21,7 @@ enum m10bmc_type {
static struct mfd_cell m10bmc_d5005_subdevs[] = {
{ .name = "d5005bmc-hwmon" },
+ { .name = "d5005bmc-sec-update" }
};
static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 1c7577b881ff..282b8fd08009 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -140,7 +140,7 @@ static void chtdc_ti_shutdown(struct i2c_client *i2c)
disable_irq(pmic->irq);
}
-static int __maybe_unused chtdc_ti_suspend(struct device *dev)
+static int chtdc_ti_suspend(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -149,7 +149,7 @@ static int __maybe_unused chtdc_ti_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused chtdc_ti_resume(struct device *dev)
+static int chtdc_ti_resume(struct device *dev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
@@ -158,7 +158,7 @@ static int __maybe_unused chtdc_ti_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
static const struct acpi_device_id chtdc_ti_acpi_ids[] = {
{ "INT33F5" },
@@ -169,7 +169,7 @@ MODULE_DEVICE_TABLE(acpi, chtdc_ti_acpi_ids);
static struct i2c_driver chtdc_ti_i2c_driver = {
.driver = {
.name = "intel_soc_pmic_chtdc_ti",
- .pm = &chtdc_ti_pm_ops,
+ .pm = pm_sleep_ptr(&chtdc_ti_pm_ops),
.acpi_match_table = chtdc_ti_acpi_ids,
},
.probe_new = chtdc_ti_probe,
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
deleted file mode 100644
index 5e8c94e008ed..000000000000
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ /dev/null
@@ -1,160 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel SoC PMIC MFD Driver
- *
- * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
- *
- * Author: Yang, Bin <bin.yang@intel.com>
- * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/platform_data/x86/soc.h>
-#include <linux/pwm.h>
-#include <linux/regmap.h>
-
-#include "intel_soc_pmic_core.h"
-
-/* PWM consumed by the Intel GFX */
-static struct pwm_lookup crc_pwm_lookup[] = {
- PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
-};
-
-static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
-{
- struct device *dev = &i2c->dev;
- struct intel_soc_pmic_config *config;
- struct intel_soc_pmic *pmic;
- int ret;
-
- if (soc_intel_is_byt())
- config = &intel_soc_pmic_config_byt_crc;
- else
- config = &intel_soc_pmic_config_cht_crc;
-
- pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- dev_set_drvdata(dev, pmic);
-
- pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
- if (IS_ERR(pmic->regmap))
- return PTR_ERR(pmic->regmap);
-
- pmic->irq = i2c->irq;
-
- ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
- config->irq_flags | IRQF_ONESHOT,
- 0, config->irq_chip,
- &pmic->irq_chip_data);
- if (ret)
- return ret;
-
- ret = enable_irq_wake(pmic->irq);
- if (ret)
- dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
-
- /* Add lookup table for crc-pwm */
- pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
-
- /* To distuingish this domain from the GPIO/charger's irqchip domains */
- irq_domain_update_bus_token(regmap_irq_get_domain(pmic->irq_chip_data),
- DOMAIN_BUS_NEXUS);
-
- ret = mfd_add_devices(dev, -1, config->cell_dev,
- config->n_cell_devs, NULL, 0,
- regmap_irq_get_domain(pmic->irq_chip_data));
- if (ret)
- goto err_del_irq_chip;
-
- return 0;
-
-err_del_irq_chip:
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
- return ret;
-}
-
-static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
-
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
-
- /* remove crc-pwm lookup table */
- pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
-
- mfd_remove_devices(&i2c->dev);
-
- return 0;
-}
-
-static void intel_soc_pmic_shutdown(struct i2c_client *i2c)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
-
- disable_irq(pmic->irq);
-
- return;
-}
-
-#if defined(CONFIG_PM_SLEEP)
-static int intel_soc_pmic_suspend(struct device *dev)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
-
- disable_irq(pmic->irq);
-
- return 0;
-}
-
-static int intel_soc_pmic_resume(struct device *dev)
-{
- struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
-
- enable_irq(pmic->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(intel_soc_pmic_pm_ops, intel_soc_pmic_suspend,
- intel_soc_pmic_resume);
-
-static const struct i2c_device_id intel_soc_pmic_i2c_id[] = {
- { }
-};
-MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
-
-#if defined(CONFIG_ACPI)
-static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
- { "INT33FD" },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match);
-#endif
-
-static struct i2c_driver intel_soc_pmic_i2c_driver = {
- .driver = {
- .name = "intel_soc_pmic_i2c",
- .pm = &intel_soc_pmic_pm_ops,
- .acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
- },
- .probe = intel_soc_pmic_i2c_probe,
- .remove = intel_soc_pmic_i2c_remove,
- .id_table = intel_soc_pmic_i2c_id,
- .shutdown = intel_soc_pmic_shutdown,
-};
-
-module_i2c_driver(intel_soc_pmic_i2c_driver);
-
-MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
-MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
deleted file mode 100644
index d490685845eb..000000000000
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Intel SoC PMIC MFD Driver
- *
- * Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
- *
- * Author: Yang, Bin <bin.yang@intel.com>
- * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
- */
-
-#ifndef __INTEL_SOC_PMIC_CORE_H__
-#define __INTEL_SOC_PMIC_CORE_H__
-
-struct intel_soc_pmic_config {
- unsigned long irq_flags;
- struct mfd_cell *cell_dev;
- int n_cell_devs;
- const struct regmap_config *regmap_config;
- const struct regmap_irq_chip *irq_chip;
-};
-
-extern struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc;
-extern struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc;
-
-#endif /* __INTEL_SOC_PMIC_CORE_H__ */
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 5bb0367bd974..b1548a933dc3 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -2,18 +2,21 @@
/*
* Device access for Crystal Cove PMIC
*
- * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2012-2014, 2022 Intel Corporation. All rights reserved.
*
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/regmap.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
-
-#include "intel_soc_pmic_core.h"
+#include <linux/platform_data/x86/soc.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
#define CRYSTAL_COVE_MAX_REGISTER 0xC6
@@ -132,7 +135,20 @@ static const struct regmap_irq_chip crystal_cove_irq_chip = {
.mask_base = CRYSTAL_COVE_REG_MIRQLVL1,
};
-struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc = {
+/* PWM consumed by the Intel GFX */
+static struct pwm_lookup crc_pwm_lookup[] = {
+ PWM_LOOKUP("crystal_cove_pwm", 0, "0000:00:02.0", "pwm_pmic_backlight", 0, PWM_POLARITY_NORMAL),
+};
+
+struct crystal_cove_config {
+ unsigned long irq_flags;
+ struct mfd_cell *cell_dev;
+ int n_cell_devs;
+ const struct regmap_config *regmap_config;
+ const struct regmap_irq_chip *irq_chip;
+};
+
+static const struct crystal_cove_config crystal_cove_config_byt_crc = {
.irq_flags = IRQF_TRIGGER_RISING,
.cell_dev = crystal_cove_byt_dev,
.n_cell_devs = ARRAY_SIZE(crystal_cove_byt_dev),
@@ -140,10 +156,121 @@ struct intel_soc_pmic_config intel_soc_pmic_config_byt_crc = {
.irq_chip = &crystal_cove_irq_chip,
};
-struct intel_soc_pmic_config intel_soc_pmic_config_cht_crc = {
+static const struct crystal_cove_config crystal_cove_config_cht_crc = {
.irq_flags = IRQF_TRIGGER_RISING,
.cell_dev = crystal_cove_cht_dev,
.n_cell_devs = ARRAY_SIZE(crystal_cove_cht_dev),
.regmap_config = &crystal_cove_regmap_config,
.irq_chip = &crystal_cove_irq_chip,
};
+
+static int crystal_cove_i2c_probe(struct i2c_client *i2c)
+{
+ const struct crystal_cove_config *config;
+ struct device *dev = &i2c->dev;
+ struct intel_soc_pmic *pmic;
+ int ret;
+
+ if (soc_intel_is_byt())
+ config = &crystal_cove_config_byt_crc;
+ else
+ config = &crystal_cove_config_cht_crc;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+
+ pmic->irq = i2c->irq;
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ config->irq_flags | IRQF_ONESHOT,
+ 0, config->irq_chip, &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ ret = enable_irq_wake(pmic->irq);
+ if (ret)
+ dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
+
+ /* Add lookup table for crc-pwm */
+ pwm_add_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ /* To distuingish this domain from the GPIO/charger's irqchip domains */
+ irq_domain_update_bus_token(regmap_irq_get_domain(pmic->irq_chip_data),
+ DOMAIN_BUS_NEXUS);
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_NONE, config->cell_dev,
+ config->n_cell_devs, NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+ if (ret)
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ return ret;
+}
+
+static void crystal_cove_i2c_remove(struct i2c_client *i2c)
+{
+ /* remove crc-pwm lookup table */
+ pwm_remove_table(crc_pwm_lookup, ARRAY_SIZE(crc_pwm_lookup));
+
+ mfd_remove_devices(&i2c->dev);
+}
+
+static void crystal_cove_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(i2c);
+
+ disable_irq(pmic->irq);
+
+ return;
+}
+
+static int crystal_cove_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int crystal_cove_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(crystal_cove_pm_ops, crystal_cove_suspend, crystal_cove_resume);
+
+static const struct acpi_device_id crystal_cove_acpi_match[] = {
+ { "INT33FD" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, crystal_cove_acpi_match);
+
+static struct i2c_driver crystal_cove_i2c_driver = {
+ .driver = {
+ .name = "crystal_cove_i2c",
+ .pm = pm_sleep_ptr(&crystal_cove_pm_ops),
+ .acpi_match_table = crystal_cove_acpi_match,
+ },
+ .probe_new = crystal_cove_i2c_probe,
+ .remove = crystal_cove_i2c_remove,
+ .shutdown = crystal_cove_shutdown,
+};
+
+module_i2c_driver(crystal_cove_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
+MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>");
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index 575ab67e243d..1895fce25b06 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -1008,13 +1008,11 @@ static int iqs62x_probe(struct i2c_client *client)
return ret;
}
-static int iqs62x_remove(struct i2c_client *client)
+static void iqs62x_remove(struct i2c_client *client)
{
struct iqs62x_core *iqs62x = i2c_get_clientdata(client);
wait_for_completion(&iqs62x->fw_done);
-
- return 0;
}
static int __maybe_unused iqs62x_suspend(struct device *dev)
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 5690768f3e63..be32ffc5af38 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -607,15 +607,13 @@ static int lm3533_i2c_probe(struct i2c_client *i2c,
return lm3533_device_init(lm3533);
}
-static int lm3533_i2c_remove(struct i2c_client *i2c)
+static void lm3533_i2c_remove(struct i2c_client *i2c)
{
struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
dev_dbg(&i2c->dev, "%s\n", __func__);
lm3533_device_exit(lm3533);
-
- return 0;
}
static const struct i2c_device_id lm3533_i2c_ids[] = {
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index 348439a3fbbd..39006297f3d2 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -175,6 +175,7 @@ int lp8788_irq_init(struct lp8788 *lp, int irq)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"lp8788-irq", irqd);
if (ret) {
+ irq_domain_remove(lp->irqdm);
dev_err(lp->dev, "failed to create a thread for IRQ_N\n");
return ret;
}
@@ -188,4 +189,6 @@ void lp8788_irq_exit(struct lp8788 *lp)
{
if (lp->irq)
free_irq(lp->irq, lp->irqdm);
+ if (lp->irqdm)
+ irq_domain_remove(lp->irqdm);
}
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index c223d2c6a363..724a5712b36b 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -195,17 +195,24 @@ static int lp8788_probe(struct i2c_client *cl, const struct i2c_device_id *id)
if (ret)
return ret;
- return mfd_add_devices(lp->dev, -1, lp8788_devs,
- ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(lp->dev, -1, lp8788_devs,
+ ARRAY_SIZE(lp8788_devs), NULL, 0, NULL);
+ if (ret)
+ goto err_exit_irq;
+
+ return 0;
+
+err_exit_irq:
+ lp8788_irq_exit(lp);
+ return ret;
}
-static int lp8788_remove(struct i2c_client *cl)
+static void lp8788_remove(struct i2c_client *cl)
{
struct lp8788 *lp = i2c_get_clientdata(cl);
mfd_remove_devices(lp->dev);
lp8788_irq_exit(lp);
- return 0;
}
static const struct i2c_device_id lp8788_ids[] = {
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 650951f89f1c..7b1c597b6879 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -959,7 +959,7 @@ static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
info = &lpc_chipset_info[priv->chipset];
pdata->version = info->iTCO_version;
- strlcpy(pdata->name, info->name, sizeof(pdata->name));
+ strscpy(pdata->name, info->name, sizeof(pdata->name));
cell->platform_data = pdata;
cell->pdata_size = sizeof(*pdata);
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 7df5b9ba5855..915d2f95bad3 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -112,13 +112,11 @@ static int madera_i2c_probe(struct i2c_client *i2c,
return madera_dev_init(madera);
}
-static int madera_i2c_remove(struct i2c_client *i2c)
+static void madera_i2c_remove(struct i2c_client *i2c)
{
struct madera *madera = dev_get_drvdata(&i2c->dev);
madera_dev_exit(madera);
-
- return 0;
}
static const struct i2c_device_id madera_i2c_id[] = {
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6c487fa14e9c..d44ad6f33742 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -463,7 +463,7 @@ err_max77836:
return ret;
}
-static int max14577_i2c_remove(struct i2c_client *i2c)
+static void max14577_i2c_remove(struct i2c_client *i2c)
{
struct max14577 *max14577 = i2c_get_clientdata(i2c);
@@ -471,8 +471,6 @@ static int max14577_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(max14577->irq, max14577->irq_data);
if (max14577->dev_type == MAXIM_DEVICE_TYPE_MAX77836)
max77836_remove(max14577);
-
- return 0;
}
static const struct i2c_device_id max14577_i2c_id[] = {
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 4e6244e17559..7088cb6f9174 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -294,7 +294,7 @@ err_i2c_haptic:
return ret;
}
-static int max77693_i2c_remove(struct i2c_client *i2c)
+static void max77693_i2c_remove(struct i2c_client *i2c)
{
struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
@@ -307,8 +307,6 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
i2c_unregister_device(max77693->i2c_muic);
i2c_unregister_device(max77693->i2c_haptic);
-
- return 0;
}
static const struct i2c_device_id max77693_i2c_id[] = {
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 41f566e6a096..c340080971ce 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -282,7 +282,7 @@ err_alloc_drvdata:
return ret;
}
-static int max8907_i2c_remove(struct i2c_client *i2c)
+static void max8907_i2c_remove(struct i2c_client *i2c)
{
struct max8907 *max8907 = i2c_get_clientdata(i2c);
@@ -293,8 +293,6 @@ static int max8907_i2c_remove(struct i2c_client *i2c)
regmap_del_irq_chip(max8907->i2c_gen->irq, max8907->irqc_chg);
i2c_unregister_device(max8907->i2c_rtc);
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 114e905bef25..04101da42bd3 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -198,14 +198,13 @@ static int max8925_probe(struct i2c_client *client,
return 0;
}
-static int max8925_remove(struct i2c_client *client)
+static void max8925_remove(struct i2c_client *client)
{
struct max8925_chip *chip = i2c_get_clientdata(client);
max8925_device_exit(chip);
i2c_unregister_device(chip->adc);
i2c_unregister_device(chip->rtc);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index fb937f66277e..eb94f3004cf3 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -85,10 +85,9 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
return mc13xxx_common_init(&client->dev);
}
-static int mc13xxx_i2c_remove(struct i2c_client *client)
+static void mc13xxx_i2c_remove(struct i2c_client *client)
{
mc13xxx_common_exit(&client->dev);
- return 0;
}
static struct i2c_driver mc13xxx_i2c_driver = {
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 07e0ca2e467c..eb08f69001f9 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1222,14 +1222,13 @@ fail:
return err;
}
-static int menelaus_remove(struct i2c_client *client)
+static void menelaus_remove(struct i2c_client *client)
{
struct menelaus_chip *menelaus = i2c_get_clientdata(client);
free_irq(client->irq, menelaus);
flush_work(&menelaus->work);
the_menelaus = NULL;
- return 0;
}
static const struct i2c_device_id menelaus_id[] = {
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8b058200d5ad..16d1861e9682 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -105,7 +105,7 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
.ids = ids,
};
- strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
+ strscpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
acpi_dev_for_each_child(parent, match_device_ids, &wd);
adev = wd.adev;
} else {
@@ -368,6 +368,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
{
struct platform_device *pdev;
const struct mfd_cell *cell;
+ struct mfd_of_node_entry *of_entry, *tmp;
int *level = data;
if (dev->type != &mfd_dev_type)
@@ -382,6 +383,12 @@ static int mfd_remove_devices_fn(struct device *dev, void *data)
if (cell->swnode)
device_remove_software_node(&pdev->dev);
+ list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list)
+ if (of_entry->dev == &pdev->dev) {
+ list_del(&of_entry->list);
+ kfree(of_entry);
+ }
+
regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies,
cell->num_parent_supplies);
diff --git a/drivers/mfd/mt6370.c b/drivers/mfd/mt6370.c
new file mode 100644
index 000000000000..cf19cce2fdc0
--- /dev/null
+++ b/drivers/mfd/mt6370.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "mt6370.h"
+
+#define MT6370_REG_DEV_INFO 0x100
+#define MT6370_REG_CHG_IRQ1 0x1C0
+#define MT6370_REG_CHG_MASK1 0x1E0
+#define MT6370_REG_MAXADDR 0x1FF
+
+#define MT6370_VENID_MASK GENMASK(7, 4)
+
+#define MT6370_NUM_IRQREGS 16
+#define MT6370_USBC_I2CADDR 0x4E
+#define MT6370_MAX_ADDRLEN 2
+
+#define MT6370_VENID_RT5081 0x8
+#define MT6370_VENID_RT5081A 0xA
+#define MT6370_VENID_MT6370 0xE
+#define MT6370_VENID_MT6371 0xF
+#define MT6370_VENID_MT6372P 0x9
+#define MT6370_VENID_MT6372CP 0xB
+
+static const struct regmap_irq mt6370_irqs[] = {
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHGON, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TREG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_AICR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_MIVR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_PWR_RDY, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FL_CHG_VINOVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VSYSUV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VSYSOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VBATOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_VINOVPCHG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_COLD, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_COOL, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_WARM, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_BAT_HOT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TS_STATC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_FAULT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_STATC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_BATABS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_ADPBAD, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_RVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_TSHUTDOWN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_IINMEAS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_ICCMEAS, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHGDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_WDTMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_SSFINISH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_RECHG, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_TERM, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHG_IEOC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_ADC_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_PUMPX_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_BATUV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_MIDOV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BST_OLP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_ATTACH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DETACH, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_STPDONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_VBUSDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_HVDCP_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_CHGDET, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DCDT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_VGOK, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_WDTMR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_UC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_OC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DIRCHG_OV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_SWON, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_UVP_D, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_UVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_OVP_D, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OVPCTRL_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_STRBPIN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_TORPIN, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_TX, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED_LVF, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_SHORT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_SHORT, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_STRB, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB, 8),
+ REGMAP_IRQ_REG_LINE(mT6370_IRQ_FLED2_STRB_TO, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_STRB_TO, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED2_TOR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_FLED1_TOR, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_OTP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_VDDA_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_VDDA_UV, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_LDO_OC, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BLED_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_BLED_OVP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VNEG_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VPOS_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_BST_OCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VNEG_SCP, 8),
+ REGMAP_IRQ_REG_LINE(MT6370_IRQ_DSV_VPOS_SCP, 8),
+};
+
+static const struct regmap_irq_chip mt6370_irq_chip = {
+ .name = "mt6370-irqs",
+ .status_base = MT6370_REG_CHG_IRQ1,
+ .mask_base = MT6370_REG_CHG_MASK1,
+ .num_regs = MT6370_NUM_IRQREGS,
+ .irqs = mt6370_irqs,
+ .num_irqs = ARRAY_SIZE(mt6370_irqs),
+};
+
+static const struct resource mt6370_regulator_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VPOS_SCP, "db_vpos_scp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VNEG_SCP, "db_vneg_scp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_BST_OCP, "db_vbst_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VPOS_OCP, "db_vpos_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_DSV_VNEG_OCP, "db_vneg_ocp"),
+ DEFINE_RES_IRQ_NAMED(MT6370_IRQ_LDO_OC, "ldo_oc"),
+};
+
+static const struct mfd_cell mt6370_devices[] = {
+ MFD_CELL_OF("mt6370-adc",
+ NULL, NULL, 0, 0, "mediatek,mt6370-adc"),
+ MFD_CELL_OF("mt6370-charger",
+ NULL, NULL, 0, 0, "mediatek,mt6370-charger"),
+ MFD_CELL_OF("mt6370-flashlight",
+ NULL, NULL, 0, 0, "mediatek,mt6370-flashlight"),
+ MFD_CELL_OF("mt6370-indicator",
+ NULL, NULL, 0, 0, "mediatek,mt6370-indicator"),
+ MFD_CELL_OF("mt6370-tcpc",
+ NULL, NULL, 0, 0, "mediatek,mt6370-tcpc"),
+ MFD_CELL_RES("mt6370-regulator", mt6370_regulator_irqs),
+};
+
+static const struct mfd_cell mt6370_exclusive_devices[] = {
+ MFD_CELL_OF("mt6370-backlight",
+ NULL, NULL, 0, 0, "mediatek,mt6370-backlight"),
+};
+
+static const struct mfd_cell mt6372_exclusive_devices[] = {
+ MFD_CELL_OF("mt6370-backlight",
+ NULL, NULL, 0, 0, "mediatek,mt6372-backlight"),
+};
+
+static int mt6370_check_vendor_info(struct device *dev, struct regmap *rmap,
+ int *vid)
+{
+ unsigned int devinfo;
+ int ret;
+
+ ret = regmap_read(rmap, MT6370_REG_DEV_INFO, &devinfo);
+ if (ret)
+ return ret;
+
+ *vid = FIELD_GET(MT6370_VENID_MASK, devinfo);
+ switch (*vid) {
+ case MT6370_VENID_RT5081:
+ case MT6370_VENID_RT5081A:
+ case MT6370_VENID_MT6370:
+ case MT6370_VENID_MT6371:
+ case MT6370_VENID_MT6372P:
+ case MT6370_VENID_MT6372CP:
+ return 0;
+ default:
+ dev_err(dev, "Unknown Vendor ID 0x%02x\n", devinfo);
+ return -ENODEV;
+ }
+}
+
+static int mt6370_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ struct mt6370_info *info = context;
+ const u8 *u8_buf = reg_buf;
+ u8 bank_idx, bank_addr;
+ int ret;
+
+ bank_idx = u8_buf[0];
+ bank_addr = u8_buf[1];
+
+ ret = i2c_smbus_read_i2c_block_data(info->i2c[bank_idx], bank_addr,
+ val_size, val_buf);
+ if (ret < 0)
+ return ret;
+
+ if (ret != val_size)
+ return -EIO;
+
+ return 0;
+}
+
+static int mt6370_regmap_write(void *context, const void *data, size_t count)
+{
+ struct mt6370_info *info = context;
+ const u8 *u8_buf = data;
+ u8 bank_idx, bank_addr;
+ int len = count - MT6370_MAX_ADDRLEN;
+
+ bank_idx = u8_buf[0];
+ bank_addr = u8_buf[1];
+
+ return i2c_smbus_write_i2c_block_data(info->i2c[bank_idx], bank_addr,
+ len, data + MT6370_MAX_ADDRLEN);
+}
+
+static const struct regmap_bus mt6370_regmap_bus = {
+ .read = mt6370_regmap_read,
+ .write = mt6370_regmap_write,
+};
+
+static const struct regmap_config mt6370_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .max_register = MT6370_REG_MAXADDR,
+};
+
+static int mt6370_probe(struct i2c_client *i2c)
+{
+ struct mt6370_info *info;
+ struct i2c_client *usbc_i2c;
+ struct regmap *regmap;
+ struct device *dev = &i2c->dev;
+ int ret, vid;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ usbc_i2c = devm_i2c_new_dummy_device(dev, i2c->adapter,
+ MT6370_USBC_I2CADDR);
+ if (IS_ERR(usbc_i2c))
+ return dev_err_probe(dev, PTR_ERR(usbc_i2c),
+ "Failed to register USBC I2C client\n");
+
+ /* Assign I2C client for PMU and TypeC */
+ info->i2c[MT6370_PMU_I2C] = i2c;
+ info->i2c[MT6370_USBC_I2C] = usbc_i2c;
+
+ regmap = devm_regmap_init(dev, &mt6370_regmap_bus,
+ info, &mt6370_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ ret = mt6370_check_vendor_info(dev, regmap, &vid);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check vendor info\n");
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, i2c->irq,
+ IRQF_ONESHOT, -1, &mt6370_irq_chip,
+ &info->irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add irq chip\n");
+
+ switch (vid) {
+ case MT6370_VENID_MT6372P:
+ case MT6370_VENID_MT6372CP:
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6372_exclusive_devices,
+ ARRAY_SIZE(mt6372_exclusive_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+ break;
+ default:
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6370_exclusive_devices,
+ ARRAY_SIZE(mt6370_exclusive_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+ break;
+ }
+
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add the exclusive devices\n");
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ mt6370_devices, ARRAY_SIZE(mt6370_devices),
+ NULL, 0,
+ regmap_irq_get_domain(info->irq_data));
+}
+
+static const struct of_device_id mt6370_match_table[] = {
+ { .compatible = "mediatek,mt6370" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_match_table);
+
+static struct i2c_driver mt6370_driver = {
+ .driver = {
+ .name = "mt6370",
+ .of_match_table = mt6370_match_table,
+ },
+ .probe_new = mt6370_probe,
+};
+module_i2c_driver(mt6370_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 SubPMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mt6370.h b/drivers/mfd/mt6370.h
new file mode 100644
index 000000000000..094e59e4af4e
--- /dev/null
+++ b/drivers/mfd/mt6370.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef __MFD_MT6370_H__
+#define __MFD_MT6370_H__
+
+/* IRQ definitions */
+#define MT6370_IRQ_DIRCHGON 0
+#define MT6370_IRQ_CHG_TREG 4
+#define MT6370_IRQ_CHG_AICR 5
+#define MT6370_IRQ_CHG_MIVR 6
+#define MT6370_IRQ_PWR_RDY 7
+#define MT6370_IRQ_FL_CHG_VINOVP 11
+#define MT6370_IRQ_CHG_VSYSUV 12
+#define MT6370_IRQ_CHG_VSYSOV 13
+#define MT6370_IRQ_CHG_VBATOV 14
+#define MT6370_IRQ_CHG_VINOVPCHG 15
+#define MT6370_IRQ_TS_BAT_COLD 20
+#define MT6370_IRQ_TS_BAT_COOL 21
+#define MT6370_IRQ_TS_BAT_WARM 22
+#define MT6370_IRQ_TS_BAT_HOT 23
+#define MT6370_IRQ_TS_STATC 24
+#define MT6370_IRQ_CHG_FAULT 25
+#define MT6370_IRQ_CHG_STATC 26
+#define MT6370_IRQ_CHG_TMR 27
+#define MT6370_IRQ_CHG_BATABS 28
+#define MT6370_IRQ_CHG_ADPBAD 29
+#define MT6370_IRQ_CHG_RVP 30
+#define MT6370_IRQ_TSHUTDOWN 31
+#define MT6370_IRQ_CHG_IINMEAS 32
+#define MT6370_IRQ_CHG_ICCMEAS 33
+#define MT6370_IRQ_CHGDET_DONE 34
+#define MT6370_IRQ_WDTMR 35
+#define MT6370_IRQ_SSFINISH 36
+#define MT6370_IRQ_CHG_RECHG 37
+#define MT6370_IRQ_CHG_TERM 38
+#define MT6370_IRQ_CHG_IEOC 39
+#define MT6370_IRQ_ADC_DONE 40
+#define MT6370_IRQ_PUMPX_DONE 41
+#define MT6370_IRQ_BST_BATUV 45
+#define MT6370_IRQ_BST_MIDOV 46
+#define MT6370_IRQ_BST_OLP 47
+#define MT6370_IRQ_ATTACH 48
+#define MT6370_IRQ_DETACH 49
+#define MT6370_IRQ_HVDCP_STPDONE 51
+#define MT6370_IRQ_HVDCP_VBUSDET_DONE 52
+#define MT6370_IRQ_HVDCP_DET 53
+#define MT6370_IRQ_CHGDET 54
+#define MT6370_IRQ_DCDT 55
+#define MT6370_IRQ_DIRCHG_VGOK 59
+#define MT6370_IRQ_DIRCHG_WDTMR 60
+#define MT6370_IRQ_DIRCHG_UC 61
+#define MT6370_IRQ_DIRCHG_OC 62
+#define MT6370_IRQ_DIRCHG_OV 63
+#define MT6370_IRQ_OVPCTRL_SWON 67
+#define MT6370_IRQ_OVPCTRL_UVP_D 68
+#define MT6370_IRQ_OVPCTRL_UVP 69
+#define MT6370_IRQ_OVPCTRL_OVP_D 70
+#define MT6370_IRQ_OVPCTRL_OVP 71
+#define MT6370_IRQ_FLED_STRBPIN 72
+#define MT6370_IRQ_FLED_TORPIN 73
+#define MT6370_IRQ_FLED_TX 74
+#define MT6370_IRQ_FLED_LVF 75
+#define MT6370_IRQ_FLED2_SHORT 78
+#define MT6370_IRQ_FLED1_SHORT 79
+#define MT6370_IRQ_FLED2_STRB 80
+#define MT6370_IRQ_FLED1_STRB 81
+#define mT6370_IRQ_FLED2_STRB_TO 82
+#define MT6370_IRQ_FLED1_STRB_TO 83
+#define MT6370_IRQ_FLED2_TOR 84
+#define MT6370_IRQ_FLED1_TOR 85
+#define MT6370_IRQ_OTP 93
+#define MT6370_IRQ_VDDA_OVP 94
+#define MT6370_IRQ_VDDA_UV 95
+#define MT6370_IRQ_LDO_OC 103
+#define MT6370_IRQ_BLED_OCP 118
+#define MT6370_IRQ_BLED_OVP 119
+#define MT6370_IRQ_DSV_VNEG_OCP 123
+#define MT6370_IRQ_DSV_VPOS_OCP 124
+#define MT6370_IRQ_DSV_BST_OCP 125
+#define MT6370_IRQ_DSV_VNEG_SCP 126
+#define MT6370_IRQ_DSV_VPOS_SCP 127
+
+enum {
+ MT6370_USBC_I2C = 0,
+ MT6370_PMU_I2C,
+ MT6370_MAX_I2C
+};
+
+struct mt6370_info {
+ struct i2c_client *i2c[MT6370_MAX_I2C];
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __MFD_MT6375_H__ */
diff --git a/drivers/mfd/ntxec.c b/drivers/mfd/ntxec.c
index b711e73eedcb..e16a7a82a929 100644
--- a/drivers/mfd/ntxec.c
+++ b/drivers/mfd/ntxec.c
@@ -239,15 +239,13 @@ static int ntxec_probe(struct i2c_client *client)
return res;
}
-static int ntxec_remove(struct i2c_client *client)
+static void ntxec_remove(struct i2c_client *client)
{
if (client == poweroff_restart_client) {
poweroff_restart_client = NULL;
pm_power_off = NULL;
unregister_restart_handler(&ntxec_restart_handler);
}
-
- return 0;
}
static const struct of_device_id of_ntxec_match_table[] = {
diff --git a/drivers/mfd/ocelot-core.c b/drivers/mfd/ocelot-core.c
new file mode 100644
index 000000000000..1816d52c65c5
--- /dev/null
+++ b/drivers/mfd/ocelot-core.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Core driver for the Ocelot chip family.
+ *
+ * The VSC7511, 7512, 7513, and 7514 can be controlled internally via an
+ * on-chip MIPS processor, or externally via SPI, I2C, PCIe. This core driver is
+ * intended to be the bus-agnostic glue between, for example, the SPI bus and
+ * the child devices.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ocelot.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <soc/mscc/ocelot.h>
+
+#include "ocelot.h"
+
+#define REG_GCB_SOFT_RST 0x0008
+
+#define BIT_SOFT_CHIP_RST BIT(0)
+
+#define VSC7512_MIIM0_RES_START 0x7107009c
+#define VSC7512_MIIM1_RES_START 0x710700c0
+#define VSC7512_MIIM_RES_SIZE 0x024
+
+#define VSC7512_PHY_RES_START 0x710700f0
+#define VSC7512_PHY_RES_SIZE 0x004
+
+#define VSC7512_GPIO_RES_START 0x71070034
+#define VSC7512_GPIO_RES_SIZE 0x06c
+
+#define VSC7512_SIO_CTRL_RES_START 0x710700f8
+#define VSC7512_SIO_CTRL_RES_SIZE 0x100
+
+#define VSC7512_GCB_RST_SLEEP_US 100
+#define VSC7512_GCB_RST_TIMEOUT_US 100000
+
+static int ocelot_gcb_chip_rst_status(struct ocelot_ddata *ddata)
+{
+ int val, err;
+
+ err = regmap_read(ddata->gcb_regmap, REG_GCB_SOFT_RST, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+int ocelot_chip_reset(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ int ret, val;
+
+ /*
+ * Reset the entire chip here to put it into a completely known state.
+ * Other drivers may want to reset their own subsystems. The register
+ * self-clears, so one write is all that is needed and wait for it to
+ * clear.
+ */
+ ret = regmap_write(ddata->gcb_regmap, REG_GCB_SOFT_RST, BIT_SOFT_CHIP_RST);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(ocelot_gcb_chip_rst_status, ddata, val, !val,
+ VSC7512_GCB_RST_SLEEP_US, VSC7512_GCB_RST_TIMEOUT_US);
+}
+EXPORT_SYMBOL_NS(ocelot_chip_reset, MFD_OCELOT);
+
+static const struct resource vsc7512_miim0_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM0_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim0"),
+ DEFINE_RES_REG_NAMED(VSC7512_PHY_RES_START, VSC7512_PHY_RES_SIZE, "gcb_phy"),
+};
+
+static const struct resource vsc7512_miim1_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM1_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim1"),
+};
+
+static const struct resource vsc7512_pinctrl_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_GPIO_RES_START, VSC7512_GPIO_RES_SIZE, "gcb_gpio"),
+};
+
+static const struct resource vsc7512_sgpio_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_SIO_CTRL_RES_START, VSC7512_SIO_CTRL_RES_SIZE, "gcb_sio"),
+};
+
+static const struct mfd_cell vsc7512_devs[] = {
+ {
+ .name = "ocelot-pinctrl",
+ .of_compatible = "mscc,ocelot-pinctrl",
+ .num_resources = ARRAY_SIZE(vsc7512_pinctrl_resources),
+ .resources = vsc7512_pinctrl_resources,
+ }, {
+ .name = "ocelot-sgpio",
+ .of_compatible = "mscc,ocelot-sgpio",
+ .num_resources = ARRAY_SIZE(vsc7512_sgpio_resources),
+ .resources = vsc7512_sgpio_resources,
+ }, {
+ .name = "ocelot-miim0",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM0_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim0_resources),
+ .resources = vsc7512_miim0_resources,
+ }, {
+ .name = "ocelot-miim1",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM1_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim1_resources),
+ .resources = vsc7512_miim1_resources,
+ },
+};
+
+static void ocelot_core_try_add_regmap(struct device *dev,
+ const struct resource *res)
+{
+ if (dev_get_regmap(dev, res->name))
+ return;
+
+ ocelot_spi_init_regmap(dev, res);
+}
+
+static void ocelot_core_try_add_regmaps(struct device *dev,
+ const struct mfd_cell *cell)
+{
+ int i;
+
+ for (i = 0; i < cell->num_resources; i++)
+ ocelot_core_try_add_regmap(dev, &cell->resources[i]);
+}
+
+int ocelot_core_init(struct device *dev)
+{
+ int i, ndevs;
+
+ ndevs = ARRAY_SIZE(vsc7512_devs);
+
+ for (i = 0; i < ndevs; i++)
+ ocelot_core_try_add_regmaps(dev, &vsc7512_devs[i]);
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, vsc7512_devs, ndevs, NULL, 0, NULL);
+}
+EXPORT_SYMBOL_NS(ocelot_core_init, MFD_OCELOT);
+
+MODULE_DESCRIPTION("Externally Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster <colin.foster@in-advantage.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT_SPI);
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
new file mode 100644
index 000000000000..2ecd271de2fb
--- /dev/null
+++ b/drivers/mfd/ocelot-spi.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * SPI core driver for the Ocelot chip family.
+ *
+ * This driver will handle everything necessary to allow for communication over
+ * SPI to the VSC7511, VSC7512, VSC7513 and VSC7514 chips. The main functions
+ * are to prepare the chip's SPI interface for a specific bus speed, and a host
+ * processor's endianness. This will create and distribute regmaps for any
+ * children.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "ocelot.h"
+
+#define REG_DEV_CPUORG_IF_CTRL 0x0000
+#define REG_DEV_CPUORG_IF_CFGSTAT 0x0004
+
+#define CFGSTAT_IF_NUM_VCORE (0 << 24)
+#define CFGSTAT_IF_NUM_VRAP (1 << 24)
+#define CFGSTAT_IF_NUM_SI (2 << 24)
+#define CFGSTAT_IF_NUM_MIIM (3 << 24)
+
+#define VSC7512_DEVCPU_ORG_RES_START 0x71000000
+#define VSC7512_DEVCPU_ORG_RES_SIZE 0x38
+
+#define VSC7512_CHIP_REGS_RES_START 0x71070000
+#define VSC7512_CHIP_REGS_RES_SIZE 0x14
+
+static const struct resource vsc7512_dev_cpuorg_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_DEVCPU_ORG_RES_START,
+ VSC7512_DEVCPU_ORG_RES_SIZE,
+ "devcpu_org");
+
+static const struct resource vsc7512_gcb_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_CHIP_REGS_RES_START,
+ VSC7512_CHIP_REGS_RES_SIZE,
+ "devcpu_gcb_chip_regs");
+
+static int ocelot_spi_initialize(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ u32 val, check;
+ int err;
+
+ val = OCELOT_SPI_BYTE_ORDER;
+
+ /*
+ * The SPI address must be big-endian, but we want the payload to match
+ * our CPU. These are two bits (0 and 1) but they're repeated such that
+ * the write from any configuration will be valid. The four
+ * configurations are:
+ *
+ * 0b00: little-endian, MSB first
+ * | 111111 | 22221111 | 33222222 |
+ * | 76543210 | 54321098 | 32109876 | 10987654 |
+ *
+ * 0b01: big-endian, MSB first
+ * | 33222222 | 22221111 | 111111 | |
+ * | 10987654 | 32109876 | 54321098 | 76543210 |
+ *
+ * 0b10: little-endian, LSB first
+ * | 111111 | 11112222 | 22222233 |
+ * | 01234567 | 89012345 | 67890123 | 45678901 |
+ *
+ * 0b11: big-endian, LSB first
+ * | 22222233 | 11112222 | 111111 | |
+ * | 45678901 | 67890123 | 89012345 | 01234567 |
+ */
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CTRL, val);
+ if (err)
+ return err;
+
+ /*
+ * Apply the number of padding bytes between a read request and the data
+ * payload. Some registers have access times of up to 1us, so if the
+ * first payload bit is shifted out too quickly, the read will fail.
+ */
+ val = ddata->spi_padding_bytes;
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, val);
+ if (err)
+ return err;
+
+ /*
+ * After we write the interface configuration, read it back here. This
+ * will verify several different things. The first is that the number of
+ * padding bytes actually got written correctly. These are found in bits
+ * 0:3.
+ *
+ * The second is that bit 16 is cleared. Bit 16 is IF_CFGSTAT:IF_STAT,
+ * and will be set if the register access is too fast. This would be in
+ * the condition that the number of padding bytes is insufficient for
+ * the SPI bus frequency.
+ *
+ * The last check is for bits 31:24, which define the interface by which
+ * the registers are being accessed. Since we're accessing them via the
+ * serial interface, it must return IF_NUM_SI.
+ */
+ check = val | CFGSTAT_IF_NUM_SI;
+
+ err = regmap_read(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, &val);
+ if (err)
+ return err;
+
+ if (check != val)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct regmap_config ocelot_spi_regmap_config = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .reg_downshift = 2,
+ .val_bits = 32,
+
+ .write_flag_mask = 0x80,
+
+ .use_single_write = true,
+ .can_multi_write = false,
+
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+static int ocelot_spi_regmap_bus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct spi_transfer xfers[3] = {0};
+ struct device *dev = context;
+ struct ocelot_ddata *ddata;
+ struct spi_device *spi;
+ struct spi_message msg;
+ unsigned int index = 0;
+
+ ddata = dev_get_drvdata(dev);
+ spi = to_spi_device(dev);
+
+ xfers[index].tx_buf = reg;
+ xfers[index].len = reg_size;
+ index++;
+
+ if (ddata->spi_padding_bytes) {
+ xfers[index].len = ddata->spi_padding_bytes;
+ xfers[index].tx_buf = ddata->dummy_buf;
+ xfers[index].dummy_data = 1;
+ index++;
+ }
+
+ xfers[index].rx_buf = val;
+ xfers[index].len = val_size;
+ index++;
+
+ spi_message_init_with_transfers(&msg, xfers, index);
+
+ return spi_sync(spi, &msg);
+}
+
+static int ocelot_spi_regmap_bus_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static const struct regmap_bus ocelot_spi_regmap_bus = {
+ .write = ocelot_spi_regmap_bus_write,
+ .read = ocelot_spi_regmap_bus_read,
+};
+
+struct regmap *ocelot_spi_init_regmap(struct device *dev, const struct resource *res)
+{
+ struct regmap_config regmap_config;
+
+ memcpy(&regmap_config, &ocelot_spi_regmap_config, sizeof(regmap_config));
+
+ regmap_config.name = res->name;
+ regmap_config.max_register = resource_size(res) - 1;
+ regmap_config.reg_base = res->start;
+
+ return devm_regmap_init(dev, &ocelot_spi_regmap_bus, dev, &regmap_config);
+}
+EXPORT_SYMBOL_NS(ocelot_spi_init_regmap, MFD_OCELOT_SPI);
+
+static int ocelot_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ocelot_ddata *ddata;
+ struct regmap *r;
+ int err;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ddata);
+
+ if (spi->max_speed_hz <= 500000) {
+ ddata->spi_padding_bytes = 0;
+ } else {
+ /*
+ * Calculation taken from the manual for IF_CFGSTAT:IF_CFG.
+ * Register access time is 1us, so we need to configure and send
+ * out enough padding bytes between the read request and data
+ * transmission that lasts at least 1 microsecond.
+ */
+ ddata->spi_padding_bytes = 1 + (spi->max_speed_hz / HZ_PER_MHZ + 2) / 8;
+
+ ddata->dummy_buf = devm_kzalloc(dev, ddata->spi_padding_bytes, GFP_KERNEL);
+ if (!ddata->dummy_buf)
+ return -ENOMEM;
+ }
+
+ spi->bits_per_word = 8;
+
+ err = spi_setup(spi);
+ if (err)
+ return dev_err_probe(&spi->dev, err, "Error performing SPI setup\n");
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_dev_cpuorg_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->cpuorg_regmap = r;
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_gcb_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->gcb_regmap = r;
+
+ /*
+ * The chip must be set up for SPI before it gets initialized and reset.
+ * This must be done before calling init, and after a chip reset is
+ * performed.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus\n");
+
+ err = ocelot_chip_reset(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error resetting device\n");
+
+ /*
+ * A chip reset will clear the SPI configuration, so it needs to be done
+ * again before we can access any registers.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus after reset\n");
+
+ err = ocelot_core_init(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing Ocelot core\n");
+
+ return 0;
+}
+
+static const struct spi_device_id ocelot_spi_ids[] = {
+ { "vsc7512", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ocelot_spi_ids);
+
+static const struct of_device_id ocelot_spi_of_match[] = {
+ { .compatible = "mscc,vsc7512" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ocelot_spi_of_match);
+
+static struct spi_driver ocelot_spi_driver = {
+ .driver = {
+ .name = "ocelot-soc",
+ .of_match_table = ocelot_spi_of_match,
+ },
+ .id_table = ocelot_spi_ids,
+ .probe = ocelot_spi_probe,
+};
+module_spi_driver(ocelot_spi_driver);
+
+MODULE_DESCRIPTION("SPI Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster <colin.foster@in-advantage.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/mfd/ocelot.h b/drivers/mfd/ocelot.h
new file mode 100644
index 000000000000..b8bc2f1486e2
--- /dev/null
+++ b/drivers/mfd/ocelot.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2021, 2022 Innovative Advantage Inc. */
+
+#ifndef _MFD_OCELOT_H
+#define _MFD_OCELOT_H
+
+#include <linux/kconfig.h>
+
+struct device;
+struct regmap;
+struct resource;
+
+/**
+ * struct ocelot_ddata - Private data for an external Ocelot chip
+ * @gcb_regmap: General Configuration Block regmap. Used for
+ * operations like chip reset.
+ * @cpuorg_regmap: CPU Device Origin Block regmap. Used for operations
+ * like SPI bus configuration.
+ * @spi_padding_bytes: Number of padding bytes that must be thrown out before
+ * read data gets returned. This is calculated during
+ * initialization based on bus speed.
+ * @dummy_buf: Zero-filled buffer of spi_padding_bytes size. The dummy
+ * bytes that will be sent out between the address and
+ * data of a SPI read operation.
+ */
+struct ocelot_ddata {
+ struct regmap *gcb_regmap;
+ struct regmap *cpuorg_regmap;
+ int spi_padding_bytes;
+ void *dummy_buf;
+};
+
+int ocelot_chip_reset(struct device *dev);
+int ocelot_core_init(struct device *dev);
+
+/* SPI-specific routines that won't be necessary for other interfaces */
+struct regmap *ocelot_spi_init_regmap(struct device *dev,
+ const struct resource *res);
+
+#define OCELOT_SPI_BYTE_ORDER_LE 0x00000000
+#define OCELOT_SPI_BYTE_ORDER_BE 0x81818181
+
+#ifdef __LITTLE_ENDIAN
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_LE
+#else
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_BE
+#endif
+
+#endif
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index f5b3fa973b13..8b7429bd2e3e 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -700,7 +700,7 @@ err_i2c:
return ret;
}
-static int palmas_i2c_remove(struct i2c_client *i2c)
+static void palmas_i2c_remove(struct i2c_client *i2c)
{
struct palmas *palmas = i2c_get_clientdata(i2c);
int i;
@@ -716,8 +716,6 @@ static int palmas_i2c_remove(struct i2c_client *i2c)
pm_power_off = NULL;
palmas_dev = NULL;
}
-
- return 0;
}
static const struct i2c_device_id palmas_i2c_id[] = {
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index e9c565cf0f54..4ccc2c3e7681 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -273,7 +273,7 @@ err2:
return ret;
}
-static int pcf50633_remove(struct i2c_client *client)
+static void pcf50633_remove(struct i2c_client *client)
{
struct pcf50633 *pcf = i2c_get_clientdata(client);
int i;
@@ -289,8 +289,6 @@ static int pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
-
- return 0;
}
static const struct i2c_device_id pcf50633_id_table[] = {
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 00003a868d28..7e2cd79d17eb 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -60,6 +60,7 @@ static const struct of_device_id pmic_spmi_id_table[] = {
{ .compatible = "qcom,pmi8994", .data = N_USIDS(2) },
{ .compatible = "qcom,pmi8998", .data = N_USIDS(2) },
{ .compatible = "qcom,pmk8002", .data = N_USIDS(2) },
+ { .compatible = "qcom,pmp8074", .data = N_USIDS(2) },
{ .compatible = "qcom,smb2351", .data = N_USIDS(2) },
{ .compatible = "qcom,spmi-pmic", .data = N_USIDS(1) },
{ }
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index c748fd29a220..3b5acf7ca39c 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -287,7 +287,7 @@ static int retu_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return 0;
}
-static int retu_remove(struct i2c_client *i2c)
+static void retu_remove(struct i2c_client *i2c)
{
struct retu_dev *rdev = i2c_get_clientdata(i2c);
@@ -297,8 +297,6 @@ static int retu_remove(struct i2c_client *i2c)
}
mfd_remove_devices(rdev->dev);
regmap_del_irq_chip(i2c->irq, rdev->irq_data);
-
- return 0;
}
static const struct i2c_device_id retu_id[] = {
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 4142b638e5fa..e00da7c7e3b1 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -67,6 +67,10 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
case RK817_SECONDS_REG ... RK817_WEEKS_REG:
case RK817_RTC_STATUS_REG:
case RK817_CODEC_DTOP_LPT_SRST:
+ case RK817_GAS_GAUGE_ADC_CONFIG0 ... RK817_GAS_GAUGE_CUR_ADC_K0:
+ case RK817_PMIC_CHRG_STS:
+ case RK817_PMIC_CHRG_OUT:
+ case RK817_PMIC_CHRG_IN:
case RK817_INT_STS_REG0:
case RK817_INT_STS_REG1:
case RK817_INT_STS_REG2:
@@ -74,7 +78,7 @@ static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg)
return true;
}
- return true;
+ return false;
}
static const struct regmap_config rk818_regmap_config = {
@@ -127,6 +131,11 @@ static const struct resource rk817_pwrkey_resources[] = {
DEFINE_RES_IRQ(RK817_IRQ_PWRON_FALL),
};
+static const struct resource rk817_charger_resources[] = {
+ DEFINE_RES_IRQ(RK817_IRQ_PLUG_IN),
+ DEFINE_RES_IRQ(RK817_IRQ_PLUG_OUT),
+};
+
static const struct mfd_cell rk805s[] = {
{ .name = "rk808-clkout", },
{ .name = "rk808-regulator", },
@@ -166,6 +175,11 @@ static const struct mfd_cell rk817s[] = {
.resources = &rk817_rtc_resources[0],
},
{ .name = "rk817-codec",},
+ {
+ .name = "rk817-charger",
+ .num_resources = ARRAY_SIZE(rk817_charger_resources),
+ .resources = &rk817_charger_resources[0],
+ },
};
static const struct mfd_cell rk818s[] = {
@@ -778,7 +792,7 @@ err_irq:
return ret;
}
-static int rk808_remove(struct i2c_client *client)
+static void rk808_remove(struct i2c_client *client)
{
struct rk808 *rk808 = i2c_get_clientdata(client);
@@ -792,8 +806,6 @@ static int rk808_remove(struct i2c_client *client)
pm_power_off = NULL;
unregister_restart_handler(&rk808_restart_handler);
-
- return 0;
}
static int __maybe_unused rk8xx_suspend(struct device *dev)
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 384acb459427..eb8005b4e58d 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -241,7 +241,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c)
return rn5t618_irq_init(priv);
}
-static int rn5t618_i2c_remove(struct i2c_client *i2c)
+static void rn5t618_i2c_remove(struct i2c_client *i2c)
{
if (i2c == rn5t618_pm_power_off) {
rn5t618_pm_power_off = NULL;
@@ -249,8 +249,6 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
}
unregister_restart_handler(&rn5t618_restart_handler);
-
- return 0;
}
static int __maybe_unused rn5t618_i2c_suspend(struct device *dev)
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index dc001c9791c1..f716ab8039a0 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -146,13 +146,11 @@ static int rsmu_i2c_probe(struct i2c_client *client,
return rsmu_core_init(rsmu);
}
-static int rsmu_i2c_remove(struct i2c_client *client)
+static void rsmu_i2c_remove(struct i2c_client *client)
{
struct rsmu_ddata *rsmu = i2c_get_clientdata(client);
rsmu_core_exit(rsmu);
-
- return 0;
}
static const struct i2c_device_id rsmu_i2c_id[] = {
diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c
index fb3bd788a3eb..c6d34dc2b520 100644
--- a/drivers/mfd/rt4831.c
+++ b/drivers/mfd/rt4831.c
@@ -87,7 +87,7 @@ static int rt4831_probe(struct i2c_client *client)
ARRAY_SIZE(rt4831_subdevs), NULL, 0, NULL);
}
-static int rt4831_remove(struct i2c_client *client)
+static void rt4831_remove(struct i2c_client *client)
{
struct regmap *regmap = dev_get_regmap(&client->dev, NULL);
int ret;
@@ -96,8 +96,6 @@ static int rt4831_remove(struct i2c_client *client)
ret = regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK);
if (ret)
dev_warn(&client->dev, "Failed to disable outputs (%pe)\n", ERR_PTR(ret));
-
- return 0;
}
static const struct of_device_id __maybe_unused rt4831_of_match[] = {
diff --git a/drivers/mfd/rt5120.c b/drivers/mfd/rt5120.c
new file mode 100644
index 000000000000..8046e383bc92
--- /dev/null
+++ b/drivers/mfd/rt5120.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#define RT5120_REG_INTENABLE 0x1D
+#define RT5120_REG_INTSTAT 0x1E
+#define RT5120_REG_FZCMODE 0x44
+
+#define RT5120_INT_HOTDIE 0
+#define RT5120_INT_PWRKEY_REL 5
+#define RT5120_INT_PWRKEY_PRESS 6
+
+static const struct regmap_range rt5120_rd_yes_ranges[] = {
+ regmap_reg_range(0x03, 0x13),
+ regmap_reg_range(0x1c, 0x20),
+ regmap_reg_range(0x44, 0x44),
+};
+
+static const struct regmap_range rt5120_wr_yes_ranges[] = {
+ regmap_reg_range(0x06, 0x13),
+ regmap_reg_range(0x1c, 0x20),
+ regmap_reg_range(0x44, 0x44),
+};
+
+static const struct regmap_access_table rt5120_rd_table = {
+ .yes_ranges = rt5120_rd_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rt5120_rd_yes_ranges),
+};
+
+static const struct regmap_access_table rt5120_wr_table = {
+ .yes_ranges = rt5120_wr_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rt5120_wr_yes_ranges),
+};
+
+static const struct regmap_config rt5120_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5120_REG_FZCMODE,
+
+ .wr_table = &rt5120_wr_table,
+ .rd_table = &rt5120_rd_table,
+};
+
+static const struct regmap_irq rt5120_irqs[] = {
+ REGMAP_IRQ_REG_LINE(RT5120_INT_HOTDIE, 8),
+ REGMAP_IRQ_REG_LINE(RT5120_INT_PWRKEY_REL, 8),
+ REGMAP_IRQ_REG_LINE(RT5120_INT_PWRKEY_PRESS, 8),
+};
+
+static const struct regmap_irq_chip rt5120_irq_chip = {
+ .name = "rt5120-pmic",
+ .status_base = RT5120_REG_INTSTAT,
+ .mask_base = RT5120_REG_INTENABLE,
+ .ack_base = RT5120_REG_INTSTAT,
+ .mask_invert = true,
+ .use_ack = true,
+ .num_regs = 1,
+ .irqs = rt5120_irqs,
+ .num_irqs = ARRAY_SIZE(rt5120_irqs),
+};
+
+static const struct resource rt5120_regulator_resources[] = {
+ DEFINE_RES_IRQ(RT5120_INT_HOTDIE),
+};
+
+static const struct resource rt5120_pwrkey_resources[] = {
+ DEFINE_RES_IRQ_NAMED(RT5120_INT_PWRKEY_PRESS, "pwrkey-press"),
+ DEFINE_RES_IRQ_NAMED(RT5120_INT_PWRKEY_REL, "pwrkey-release"),
+};
+
+static const struct mfd_cell rt5120_devs[] = {
+ MFD_CELL_RES("rt5120-regulator", rt5120_regulator_resources),
+ MFD_CELL_OF("rt5120-pwrkey", rt5120_pwrkey_resources, NULL, 0, 0, "richtek,rt5120-pwrkey"),
+};
+
+static int rt5120_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &rt5120_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, i2c->irq, IRQF_ONESHOT, 0,
+ &rt5120_irq_chip, &irq_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, rt5120_devs,
+ ARRAY_SIZE(rt5120_devs), NULL, 0,
+ regmap_irq_get_domain(irq_data));
+}
+
+static const struct of_device_id rt5120_device_match_table[] = {
+ { .compatible = "richtek,rt5120" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt5120_device_match_table);
+
+static struct i2c_driver rt5120_driver = {
+ .driver = {
+ .name = "rt5120",
+ .of_match_table = rt5120_device_match_table,
+ },
+ .probe_new = rt5120_probe,
+};
+module_i2c_driver(rt5120_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("Richtek RT5120 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index a2635c2d9d1a..8166949b725c 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -835,7 +835,7 @@ free_gpio:
return rval;
}
-static int si476x_core_remove(struct i2c_client *client)
+static void si476x_core_remove(struct i2c_client *client)
{
struct si476x_core *core = i2c_get_clientdata(client);
@@ -851,8 +851,6 @@ static int si476x_core_remove(struct i2c_client *client)
if (gpio_is_valid(core->gpio_reset))
gpio_free(core->gpio_reset);
-
- return 0;
}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index bc0a2c38653e..3ac4508a6742 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1720,7 +1720,12 @@ static struct platform_driver sm501_plat_driver = {
static int __init sm501_base_init(void)
{
- platform_driver_register(&sm501_plat_driver);
+ int ret;
+
+ ret = platform_driver_register(&sm501_plat_driver);
+ if (ret < 0)
+ return ret;
+
return pci_register_driver(&sm501_pci_driver);
}
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 122f96094410..5dd7d9688459 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -467,13 +467,11 @@ err_chip_exit:
return ret;
}
-static int stmfx_remove(struct i2c_client *client)
+static void stmfx_remove(struct i2c_client *client)
{
stmfx_irq_exit(client);
stmfx_chip_exit(client);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index d3eedf3d607e..4d55494a97c4 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -91,13 +91,11 @@ stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return stmpe_probe(&i2c_ci, partnum);
}
-static int stmpe_i2c_remove(struct i2c_client *i2c)
+static void stmpe_i2c_remove(struct i2c_client *i2c)
{
struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
stmpe_remove(stmpe);
-
- return 0;
}
static const struct i2c_device_id stmpe_i2c_id[] = {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index aeb9ea55f97d..0c4f74197d3e 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -8,14 +8,13 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mfd/core.h>
@@ -30,17 +29,12 @@
* @irq_trigger: IRQ trigger to use for the interrupt to the host
* @autosleep: bool to enable/disable stmpe autosleep
* @autosleep_timeout: inactivity timeout in milliseconds for autosleep
- * @irq_over_gpio: true if gpio is used to get irq
- * @irq_gpio: gpio number over which irq will be requested (significant only if
- * irq_over_gpio is true)
*/
struct stmpe_platform_data {
int id;
unsigned int blocks;
unsigned int irq_trigger;
bool autosleep;
- bool irq_over_gpio;
- int irq_gpio;
int autosleep_timeout;
};
@@ -1349,32 +1343,22 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
if (pdata->id < 0)
pdata->id = -1;
- pdata->irq_gpio = of_get_named_gpio_flags(np, "irq-gpio", 0,
- &pdata->irq_trigger);
- if (gpio_is_valid(pdata->irq_gpio))
- pdata->irq_over_gpio = 1;
- else
- pdata->irq_trigger = IRQF_TRIGGER_NONE;
-
of_property_read_u32(np, "st,autosleep-timeout",
&pdata->autosleep_timeout);
pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
for_each_available_child_of_node(np, child) {
- if (of_node_name_eq(child, "stmpe_gpio")) {
+ if (of_device_is_compatible(child, stmpe_gpio_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_GPIO;
- } else if (of_node_name_eq(child, "stmpe_keypad")) {
+ else if (of_device_is_compatible(child, stmpe_keypad_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_KEYPAD;
- } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
+ else if (of_device_is_compatible(child, stmpe_ts_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
- } else if (of_node_name_eq(child, "stmpe_adc")) {
+ else if (of_device_is_compatible(child, stmpe_adc_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_ADC;
- } else if (of_node_name_eq(child, "stmpe_pwm")) {
+ else if (of_device_is_compatible(child, stmpe_pwm_cell.of_compatible))
pdata->blocks |= STMPE_BLOCK_PWM;
- } else if (of_node_name_eq(child, "stmpe_rotator")) {
- pdata->blocks |= STMPE_BLOCK_ROTATOR;
- }
}
}
@@ -1384,6 +1368,7 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
struct stmpe_platform_data *pdata;
struct device_node *np = ci->dev->of_node;
struct stmpe *stmpe;
+ struct gpio_desc *irq_gpio;
int ret;
u32 val;
@@ -1437,18 +1422,20 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
if (ci->init)
ci->init(stmpe);
- if (pdata->irq_over_gpio) {
- ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
- GPIOF_DIR_IN, "stmpe");
- if (ret) {
- dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
- ret);
- return ret;
- }
+ irq_gpio = devm_gpiod_get_optional(ci->dev, "irq", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(irq_gpio);
+ if (ret) {
+ dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n", ret);
+ return ret;
+ }
- stmpe->irq = gpio_to_irq(pdata->irq_gpio);
+ if (irq_gpio) {
+ stmpe->irq = gpiod_to_irq(irq_gpio);
+ pdata->irq_trigger = gpiod_is_active_low(irq_gpio) ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
} else {
stmpe->irq = ci->irq;
+ pdata->irq_trigger = IRQF_TRIGGER_NONE;
}
if (stmpe->irq < 0) {
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index bdb2ce7ff03b..9489e80e905a 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -66,14 +66,6 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
goto err_map;
}
- /* Parse the device's DT node for an endianness specification */
- if (of_property_read_bool(np, "big-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
- else if (of_property_read_bool(np, "little-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
- else if (of_property_read_bool(np, "native-endian"))
- syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE;
-
/*
* search for reg-io-width property in DT. If it is not provided,
* default to 4 bytes. regmap_init_mmio will return an error if values
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 13583cdb93b6..d5d0ec117acb 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -429,13 +429,11 @@ static int tc3589x_probe(struct i2c_client *i2c,
return 0;
}
-static int tc3589x_remove(struct i2c_client *client)
+static void tc3589x_remove(struct i2c_client *client)
{
struct tc3589x *tc3589x = i2c_get_clientdata(client);
mfd_remove_devices(tc3589x->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index c906324d293e..b360568ea675 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -179,7 +179,7 @@ static int tps6105x_probe(struct i2c_client *client,
return ret;
}
-static int tps6105x_remove(struct i2c_client *client)
+static void tps6105x_remove(struct i2c_client *client)
{
struct tps6105x *tps6105x = i2c_get_clientdata(client);
@@ -189,8 +189,6 @@ static int tps6105x_remove(struct i2c_client *client)
regmap_update_bits(tps6105x->regmap, TPS6105X_REG_0,
TPS6105X_REG0_MODE_MASK,
TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
-
- return 0;
}
static const struct i2c_device_id tps6105x_id[] = {
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 7e7dbee58ca9..c2afa2e69f42 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -501,7 +501,7 @@ static int tps65010_gpio_get(struct gpio_chip *chip, unsigned offset)
static struct tps65010 *the_tps;
-static int tps65010_remove(struct i2c_client *client)
+static void tps65010_remove(struct i2c_client *client)
{
struct tps65010 *tps = i2c_get_clientdata(client);
struct tps65010_board *board = dev_get_platdata(&client->dev);
@@ -517,7 +517,6 @@ static int tps65010_remove(struct i2c_client *client)
cancel_delayed_work_sync(&tps->work);
debugfs_remove(tps->file);
the_tps = NULL;
- return 0;
}
static int tps65010_probe(struct i2c_client *client,
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index cbae9777a24e..81a7360a87bb 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -111,14 +111,12 @@ static int tps65086_probe(struct i2c_client *client,
return ret;
}
-static int tps65086_remove(struct i2c_client *client)
+static void tps65086_remove(struct i2c_client *client)
{
struct tps65086 *tps = i2c_get_clientdata(client);
if (tps->irq > 0)
regmap_del_irq_chip(tps->irq, tps->irq_data);
-
- return 0;
}
static const struct i2c_device_id tps65086_id_table[] = {
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 8e8da204a02e..eebd60601b01 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -374,7 +374,7 @@ static int tps65217_probe(struct i2c_client *client)
return 0;
}
-static int tps65217_remove(struct i2c_client *client)
+static void tps65217_remove(struct i2c_client *client)
{
struct tps65217 *tps = i2c_get_clientdata(client);
unsigned int virq;
@@ -388,8 +388,6 @@ static int tps65217_remove(struct i2c_client *client)
irq_domain_remove(tps->irq_domain);
tps->irq_domain = NULL;
-
- return 0;
}
static const struct i2c_device_id tps65217_id_table[] = {
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index c9303d3d6602..fb340da64bbc 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -579,7 +579,7 @@ err_mfd_add:
return ret;
}
-static int tps6586x_i2c_remove(struct i2c_client *client)
+static void tps6586x_i2c_remove(struct i2c_client *client)
{
struct tps6586x *tps6586x = i2c_get_clientdata(client);
@@ -587,7 +587,6 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
mfd_remove_devices(tps6586x->dev);
if (client->irq)
free_irq(client->irq, tps6586x);
- return 0;
}
static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index afb7f7d97dc0..7e2b19efe867 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -43,13 +43,11 @@ static int tps65912_i2c_probe(struct i2c_client *client,
return tps65912_device_init(tps);
}
-static int tps65912_i2c_remove(struct i2c_client *client)
+static void tps65912_i2c_remove(struct i2c_client *client)
{
struct tps65912 *tps = i2c_get_clientdata(client);
tps65912_device_exit(tps);
-
- return 0;
}
static const struct i2c_device_id tps65912_i2c_id_table[] = {
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 2cb9326f3e61..f6b4b9d94bbd 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -727,7 +727,7 @@ static void clocks_init(struct device *dev)
/*----------------------------------------------------------------------*/
-static int twl_remove(struct i2c_client *client)
+static void twl_remove(struct i2c_client *client)
{
unsigned i, num_slaves;
@@ -745,7 +745,6 @@ static int twl_remove(struct i2c_client *client)
twl->client = NULL;
}
twl_priv->ready = false;
- return 0;
}
static struct of_dev_auxdata twl_auxdata_lookup[] = {
@@ -883,7 +882,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
* SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0.
*
* Also, always enable SmartReflex bit as that's needed for omaps to
- * to do anything over I2C4 for voltage scaling even if SmartReflex
+ * do anything over I2C4 for voltage scaling even if SmartReflex
* is disabled. Without the SmartReflex bit omap sys_clkreq idle
* signal will never trigger for retention idle.
*/
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 4f576f0160a9..87496c1cb8bc 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -14,6 +14,7 @@
* by syed khasim <x0khasim@ti.com>
*/
+#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index b9c6d94b4002..f429b8f00db6 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -808,7 +808,7 @@ gpio_err:
return ret;
}
-static int twl6040_remove(struct i2c_client *client)
+static void twl6040_remove(struct i2c_client *client)
{
struct twl6040 *twl6040 = i2c_get_clientdata(client);
@@ -820,8 +820,6 @@ static int twl6040_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
-
- return 0;
}
static const struct i2c_device_id twl6040_i2c_id[] = {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 7b1d270722ba..7e88f5b0abe6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -657,13 +657,11 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
return wm8994_device_init(wm8994, i2c->irq);
}
-static int wm8994_i2c_remove(struct i2c_client *i2c)
+static void wm8994_i2c_remove(struct i2c_client *i2c)
{
struct wm8994 *wm8994 = i2c_get_clientdata(i2c);
wm8994_device_exit(wm8994);
-
- return 0;
}
static const struct i2c_device_id wm8994_i2c_id[] = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 94e9fb4cdd76..358ad56f6524 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -513,4 +513,5 @@ source "drivers/misc/cardreader/Kconfig"
source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
+source "drivers/misc/mchp_pci1xxxx/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2be8542616dd..ac9b3e757ba1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -60,4 +60,5 @@ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o
obj-$(CONFIG_OPEN_DICE) += open-dice.o
-obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o \ No newline at end of file
+obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/
+obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 0ee0c6d808c3..28ffb4377d98 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -67,10 +67,9 @@ static int ad_dpot_i2c_probe(struct i2c_client *client,
return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
-static int ad_dpot_i2c_remove(struct i2c_client *client)
+static void ad_dpot_i2c_remove(struct i2c_client *client)
{
ad_dpot_remove(&client->dev);
- return 0;
}
static const struct i2c_device_id ad_dpot_id[] = {
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 075f3a36d512..a58b7cb81d98 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -1014,7 +1014,7 @@ exit_done:
* ...argument 0 is string ID
*/
count = strlen(msg_buff);
- strlcpy(&msg_buff[count],
+ strscpy(&msg_buff[count],
&p[str_table + args[0]],
ALTERA_MESSAGE_LENGTH - count);
break;
@@ -2146,7 +2146,7 @@ static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
- strlcpy(value, value_ptr, vallen);
+ strscpy(value, value_ptr, vallen);
}
}
@@ -2162,13 +2162,13 @@ static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
status = 0;
if (key != NULL)
- strlcpy(key, &p[note_strings +
+ strscpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
keylen);
if (value != NULL)
- strlcpy(value, &p[note_strings +
+ strscpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
vallen);
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index 6fff44b952bd..a32431f4b370 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -242,7 +242,7 @@ als_error1:
return res;
}
-static int apds9802als_remove(struct i2c_client *client)
+static void apds9802als_remove(struct i2c_client *client)
{
struct als_data *data = i2c_get_clientdata(client);
@@ -256,7 +256,6 @@ static int apds9802als_remove(struct i2c_client *client)
pm_runtime_put_noidle(&client->dev);
kfree(data);
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 45f5b997a0e1..e2100cc42ce8 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1185,7 +1185,7 @@ fail1:
return err;
}
-static int apds990x_remove(struct i2c_client *client)
+static void apds990x_remove(struct i2c_client *client)
{
struct apds990x_chip *chip = i2c_get_clientdata(client);
@@ -1205,7 +1205,6 @@ static int apds990x_remove(struct i2c_client *client)
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
kfree(chip);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/bcm-vk/bcm_vk_dev.c b/drivers/misc/bcm-vk/bcm_vk_dev.c
index a16b99bdaa13..d4a96137728d 100644
--- a/drivers/misc/bcm-vk/bcm_vk_dev.c
+++ b/drivers/misc/bcm-vk/bcm_vk_dev.c
@@ -1339,7 +1339,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, vk);
irq = pci_alloc_irq_vectors(pdev,
- 1,
+ VK_MSIX_IRQ_MIN_REQ,
VK_MSIX_IRQ_MAX,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
@@ -1401,7 +1401,7 @@ static int bcm_vk_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bcm_vk_tty_set_irq_enabled(vk, i);
}
- id = ida_simple_get(&bcm_vk_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&bcm_vk_ida, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "unable to get id\n");
@@ -1500,7 +1500,7 @@ err_kfree_name:
misc_device->name = NULL;
err_ida_remove:
- ida_simple_remove(&bcm_vk_ida, id);
+ ida_free(&bcm_vk_ida, id);
err_irq:
for (i = 0; i < vk->num_irqs; i++)
@@ -1573,7 +1573,7 @@ static void bcm_vk_remove(struct pci_dev *pdev)
if (misc_device->name) {
misc_deregister(misc_device);
kfree(misc_device->name);
- ida_simple_remove(&bcm_vk_ida, vk->devid);
+ ida_free(&bcm_vk_ida, vk->devid);
}
for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 0581bb9cef2e..d0dfa674414c 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1280,7 +1280,7 @@ fail0:
return err;
}
-static int bh1770_remove(struct i2c_client *client)
+static void bh1770_remove(struct i2c_client *client)
{
struct bh1770_chip *chip = i2c_get_clientdata(client);
@@ -1299,8 +1299,6 @@ static int bh1770_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index 42f316c2d719..0698ddc5f4d5 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -228,11 +228,10 @@ static int ds1682_probe(struct i2c_client *client,
return rc;
}
-static int ds1682_remove(struct i2c_client *client)
+static void ds1682_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
sysfs_remove_group(&client->dev.kobj, &ds1682_group);
- return 0;
}
static const struct i2c_device_id ds1682_id[] = {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 633e1cf08d6e..938c4f41b98c 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -791,7 +791,7 @@ static int at24_probe(struct i2c_client *client)
return 0;
}
-static int at24_remove(struct i2c_client *client)
+static void at24_remove(struct i2c_client *client)
{
struct at24_data *at24 = i2c_get_clientdata(client);
@@ -801,8 +801,6 @@ static int at24_remove(struct i2c_client *client)
regulator_disable(at24->vcc_reg);
pm_runtime_set_suspended(&client->dev);
}
-
- return 0;
}
static int __maybe_unused at24_suspend(struct device *dev)
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 9fbfe784d710..c8c6deb7ed89 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -219,14 +219,12 @@ static int ee1004_probe(struct i2c_client *client)
return err;
}
-static int ee1004_remove(struct i2c_client *client)
+static void ee1004_remove(struct i2c_client *client)
{
/* Remove page select clients if this is the last device */
mutex_lock(&ee1004_bus_lock);
ee1004_cleanup(EE1004_NUM_PAGES);
mutex_unlock(&ee1004_bus_lock);
-
- return 0;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 34fa385dfd4b..8a841a75d893 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -136,7 +136,7 @@ static int eeprom_detect(struct i2c_client *client, struct i2c_board_info *info)
&& !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- strlcpy(info->type, "eeprom", I2C_NAME_SIZE);
+ strscpy(info->type, "eeprom", I2C_NAME_SIZE);
return 0;
}
@@ -183,11 +183,9 @@ static int eeprom_probe(struct i2c_client *client,
return sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
}
-static int eeprom_remove(struct i2c_client *client)
+static void eeprom_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
-
- return 0;
}
static const struct i2c_device_id eeprom_id[] = {
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 9aec3338e37d..bb3ed352b95f 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1075,7 +1075,7 @@ static const struct i2c_device_id *idt_ee_match_id(struct fwnode_handle *fwnode)
return NULL;
p = strchr(compatible, ',');
- strlcpy(devname, p ? p + 1 : compatible, sizeof(devname));
+ strscpy(devname, p ? p + 1 : compatible, sizeof(devname));
/* Search through the device name */
while (id->name[0]) {
if (strcmp(devname, id->name) == 0)
@@ -1405,7 +1405,7 @@ err_free_pdev:
/*
* idt_remove() - IDT 89HPESx driver remove() callback method
*/
-static int idt_remove(struct i2c_client *client)
+static void idt_remove(struct i2c_client *client)
{
struct idt_89hpesx_dev *pdev = i2c_get_clientdata(client);
@@ -1417,8 +1417,6 @@ static int idt_remove(struct i2c_client *client)
/* Discard driver data structure */
idt_free_pdev(pdev);
-
- return 0;
}
/*
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 9da81f6d4a1c..6bd4f4339af4 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -173,7 +173,7 @@ exit_kfree:
return err;
}
-static int max6875_remove(struct i2c_client *client)
+static void max6875_remove(struct i2c_client *client)
{
struct max6875_data *data = i2c_get_clientdata(client);
@@ -181,8 +181,6 @@ static int max6875_remove(struct i2c_client *client)
sysfs_remove_bin_file(&client->dev.kobj, &user_eeprom_attr);
kfree(data);
-
- return 0;
}
static const struct i2c_device_id max6875_id[] = {
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..7ff0b63c25e3 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -25,7 +25,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
@@ -1515,7 +1515,7 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr
args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
args[1].length = dsp_attr_buf_len;
args[1].fd = -1;
- fl->pd = 1;
+ fl->pd = USER_PD;
return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
FASTRPC_SCALARS(0, 1, 1), args);
@@ -1943,7 +1943,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1956,13 +1961,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 861c81006c6d..bd01d0d940c0 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -10,6 +10,7 @@ config HABANA_AI
select HWMON
select DMA_SHARED_BUFFER
select CRC32
+ select FW_LOADER
help
Enables PCIe card driver for Habana's AI Processors (AIP) that are
designed to accelerate Deep Learning inference and training workloads.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index b35d7000c86b..a48a9e0969ed 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -8,13 +8,13 @@ obj-$(CONFIG_HABANA_AI) := habanalabs.o
include $(src)/common/Makefile
habanalabs-y += $(HL_COMMON_FILES)
-include $(src)/goya/Makefile
-habanalabs-y += $(HL_GOYA_FILES)
+include $(src)/gaudi2/Makefile
+habanalabs-y += $(HL_GAUDI2_FILES)
include $(src)/gaudi/Makefile
habanalabs-y += $(HL_GAUDI_FILES)
-include $(src)/gaudi2/Makefile
-habanalabs-y += $(HL_GAUDI2_FILES)
+include $(src)/goya/Makefile
+habanalabs-y += $(HL_GOYA_FILES)
habanalabs-$(CONFIG_DEBUG_FS) += common/debugfs.o
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index b027f66f8bd4..2b332991ac6a 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -12,20 +12,18 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
+#define CB_VA_POOL_SIZE (4UL * SZ_1G)
+
static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- struct hl_vm_va_block *va_block, *tmp;
- dma_addr_t bus_addr;
- u64 virt_addr;
u32 page_size = prop->pmmu.page_size;
- s32 offset;
int rc;
if (!hdev->supports_cb_mapping) {
dev_err_ratelimited(hdev->dev,
- "Cannot map CB because no VA range is allocated for CB mapping\n");
+ "Mapping a CB to the device's MMU is not supported\n");
return -EINVAL;
}
@@ -35,106 +33,45 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
return -EINVAL;
}
- INIT_LIST_HEAD(&cb->va_block_list);
-
- for (bus_addr = cb->bus_address;
- bus_addr < cb->bus_address + cb->size;
- bus_addr += page_size) {
-
- virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
- if (!virt_addr) {
- dev_err(hdev->dev,
- "Failed to allocate device virtual address for CB\n");
- rc = -ENOMEM;
- goto err_va_pool_free;
- }
+ if (cb->is_mmu_mapped)
+ return 0;
- va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
- if (!va_block) {
- rc = -ENOMEM;
- gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
- goto err_va_pool_free;
- }
+ cb->roundup_size = roundup(cb->size, page_size);
- va_block->start = virt_addr;
- va_block->end = virt_addr + page_size - 1;
- va_block->size = page_size;
- list_add_tail(&va_block->node, &cb->va_block_list);
+ cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size);
+ if (!cb->virtual_addr) {
+ dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n");
+ return -ENOMEM;
}
- mutex_lock(&ctx->mmu_lock);
-
- bus_addr = cb->bus_address;
- offset = 0;
- list_for_each_entry(va_block, &cb->va_block_list, node) {
- rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
- va_block->size, list_is_last(&va_block->node,
- &cb->va_block_list));
- if (rc) {
- dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
- va_block->start);
- goto err_va_umap;
- }
-
- bus_addr += va_block->size;
- offset += va_block->size;
+ mutex_lock(&hdev->mmu_lock);
+ rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr);
+ goto err_va_umap;
}
-
rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
-
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
cb->is_mmu_mapped = true;
-
return rc;
err_va_umap:
- list_for_each_entry(va_block, &cb->va_block_list, node) {
- if (offset <= 0)
- break;
- hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
- offset <= va_block->size);
- offset -= va_block->size;
- }
-
- rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
-
- mutex_unlock(&ctx->mmu_lock);
-
-err_va_pool_free:
- list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
- gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
- list_del(&va_block->node);
- kfree(va_block);
- }
-
+ mutex_unlock(&hdev->mmu_lock);
+ gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
return rc;
}
static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
struct hl_device *hdev = ctx->hdev;
- struct hl_vm_va_block *va_block, *tmp;
-
- mutex_lock(&ctx->mmu_lock);
-
- list_for_each_entry(va_block, &cb->va_block_list, node)
- if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
- list_is_last(&va_block->node,
- &cb->va_block_list)))
- dev_warn_ratelimited(hdev->dev,
- "Failed to unmap CB's va 0x%llx\n",
- va_block->start);
+ mutex_lock(&hdev->mmu_lock);
+ hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
+ mutex_unlock(&hdev->mmu_lock);
- mutex_unlock(&ctx->mmu_lock);
-
- list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
- gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
- list_del(&va_block->node);
- kfree(va_block);
- }
+ gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size);
}
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
@@ -376,7 +313,6 @@ int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
static int hl_cb_info(struct hl_mem_mgr *mmg,
u64 handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
- struct hl_vm_va_block *va_block;
struct hl_cb *cb;
int rc = 0;
@@ -388,9 +324,8 @@ static int hl_cb_info(struct hl_mem_mgr *mmg,
}
if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
- va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
- if (va_block) {
- *device_va = va_block->start;
+ if (cb->is_mmu_mapped) {
+ *device_va = cb->virtual_addr;
} else {
dev_err(mmg->dev, "CB is not mapped to the device's MMU\n");
rc = -EINVAL;
@@ -566,16 +501,23 @@ int hl_cb_va_pool_init(struct hl_ctx *ctx)
return -ENOMEM;
}
- rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
- prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
+ ctx->cb_va_pool_base = hl_reserve_va_block(hdev, ctx, HL_VA_RANGE_TYPE_HOST,
+ CB_VA_POOL_SIZE, HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
+ if (!ctx->cb_va_pool_base) {
+ rc = -ENOMEM;
+ goto err_pool_destroy;
+ }
+ rc = gen_pool_add(ctx->cb_va_pool, ctx->cb_va_pool_base, CB_VA_POOL_SIZE, -1);
if (rc) {
dev_err(hdev->dev,
"Failed to add memory to VA gen pool for CB mapping\n");
- goto err_pool_destroy;
+ goto err_unreserve_va_block;
}
return 0;
+err_unreserve_va_block:
+ hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
err_pool_destroy:
gen_pool_destroy(ctx->cb_va_pool);
@@ -590,4 +532,5 @@ void hl_cb_va_pool_fini(struct hl_ctx *ctx)
return;
gen_pool_destroy(ctx->cb_va_pool);
+ hl_unreserve_va_block(hdev, ctx, ctx->cb_va_pool_base, CB_VA_POOL_SIZE);
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 90a4574cbe2d..fa05770865c6 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -12,7 +12,9 @@
#include <linux/slab.h>
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
- HL_CS_FLAGS_COLLECTIVE_WAIT)
+ HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
+ HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+
#define MAX_TS_ITER_NUM 10
@@ -824,10 +826,10 @@ static void cs_timedout(struct work_struct *work)
}
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_enable, 1, 0);
+ rc = atomic_cmpxchg(&hdev->captured_err_info.cs_timeout.write_enable, 1, 0);
if (rc) {
- hdev->last_error.cs_timeout.timestamp = ktime_get();
- hdev->last_error.cs_timeout.seq = cs->sequence;
+ hdev->captured_err_info.cs_timeout.timestamp = ktime_get();
+ hdev->captured_err_info.cs_timeout.seq = cs->sequence;
event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
@@ -1242,6 +1244,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_RESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY)
return CS_UNRESERVE_SIGNALS;
+ else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+ return CS_TYPE_ENGINE_CORE;
else
return CS_TYPE_DEFAULT;
}
@@ -1253,6 +1257,7 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
u32 cs_type_flags, num_chunks;
enum hl_device_status status;
enum hl_cs_type cs_type;
+ bool is_sync_stream;
if (!hl_device_operational(hdev, &status)) {
return -EBUSY;
@@ -1276,9 +1281,10 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
cs_type = hl_cs_get_cs_type(cs_type_flags);
num_chunks = args->in.num_chunks_execute;
- if (unlikely((cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
- cs_type == CS_TYPE_COLLECTIVE_WAIT) &&
- !hdev->supports_sync_stream)) {
+ is_sync_stream = (cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT);
+
+ if (unlikely(is_sync_stream && !hdev->supports_sync_stream)) {
dev_err(hdev->dev, "Sync stream CS is not supported\n");
return -EINVAL;
}
@@ -1288,7 +1294,7 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
return -EINVAL;
}
- } else if (num_chunks != 1) {
+ } else if (is_sync_stream && num_chunks != 1) {
dev_err(hdev->dev,
"Sync stream CS mandates one chunk only, context %d\n",
ctx->asid);
@@ -1584,13 +1590,14 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
bool need_soft_reset = false;
- int rc = 0, do_ctx_switch;
+ int rc = 0, do_ctx_switch = 0;
void __user *chunks;
u32 num_chunks, tmp;
u16 sob_count;
int ret;
- do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
+ if (hdev->supports_ctx_switch)
+ do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
mutex_lock(&hpriv->restore_phase_mutex);
@@ -1661,9 +1668,10 @@ wait_again:
}
}
- ctx->thread_ctx_switch_wait_token = 1;
+ if (hdev->supports_ctx_switch)
+ ctx->thread_ctx_switch_wait_token = 1;
- } else if (!ctx->thread_ctx_switch_wait_token) {
+ } else if (hdev->supports_ctx_switch && !ctx->thread_ctx_switch_wait_token) {
rc = hl_poll_timeout_memory(hdev,
&ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
100, jiffies_to_usecs(hdev->timeout_jiffies), false);
@@ -2351,6 +2359,41 @@ out:
return rc;
}
+static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
+ u32 num_engine_cores, u32 core_command)
+{
+ int rc;
+ struct hl_device *hdev = hpriv->hdev;
+ void __user *engine_cores_arr;
+ u32 *cores;
+
+ if (!num_engine_cores || num_engine_cores > hdev->asic_prop.num_engine_cores) {
+ dev_err(hdev->dev, "Number of engine cores %d is invalid\n", num_engine_cores);
+ return -EINVAL;
+ }
+
+ if (core_command != HL_ENGINE_CORE_RUN && core_command != HL_ENGINE_CORE_HALT) {
+ dev_err(hdev->dev, "Engine core command is invalid\n");
+ return -EINVAL;
+ }
+
+ engine_cores_arr = (void __user *) (uintptr_t) engine_cores;
+ cores = kmalloc_array(num_engine_cores, sizeof(u32), GFP_KERNEL);
+ if (!cores)
+ return -ENOMEM;
+
+ if (copy_from_user(cores, engine_cores_arr, num_engine_cores * sizeof(u32))) {
+ dev_err(hdev->dev, "Failed to copy core-ids array from user\n");
+ kfree(cores);
+ return -EFAULT;
+ }
+
+ rc = hdev->asic_funcs->set_engine_cores(hdev, cores, num_engine_cores, core_command);
+ kfree(cores);
+
+ return rc;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
@@ -2403,6 +2446,10 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_unreserve_signals(hpriv,
args->in.encaps_sig_handle_id);
break;
+ case CS_TYPE_ENGINE_CORE:
+ rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
+ args->in.num_engine_cores, args->in.core_command);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
@@ -2524,7 +2571,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
ktime_t max_ktime, first_cs_time;
enum hl_cs_wait_status status;
- memset(fence_ptr, 0, arr_len * sizeof(*fence_ptr));
+ memset(fence_ptr, 0, arr_len * sizeof(struct hl_fence *));
/* get all fences under the same lock */
rc = hl_ctx_get_fences(mcs_data->ctx, seq_arr, fence_ptr, arr_len);
@@ -2826,7 +2873,7 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
/* allocate array for the fences */
- fence_arr = kmalloc_array(seq_arr_len, sizeof(*fence_arr), GFP_KERNEL);
+ fence_arr = kmalloc_array(seq_arr_len, sizeof(struct hl_fence *), GFP_KERNEL);
if (!fence_arr) {
rc = -ENOMEM;
goto free_seq_arr;
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 64439f33a19b..48d3ec8b5c82 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -291,14 +291,16 @@ static int vm_show(struct seq_file *s, void *data)
if (ctx->asid != HL_KERNEL_ASID_ID &&
!list_empty(&ctx->hw_block_mem_list)) {
seq_puts(s, "\nhw_block mappings:\n\n");
- seq_puts(s, " virtual address size HW block id\n");
- seq_puts(s, "-------------------------------------------\n");
+ seq_puts(s,
+ " virtual address block size mapped size HW block id\n");
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
mutex_lock(&ctx->hw_block_list_lock);
- list_for_each_entry(lnode, &ctx->hw_block_mem_list,
- node) {
+ list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
seq_printf(s,
- " 0x%-14lx %-6u %-9u\n",
- lnode->vaddr, lnode->size, lnode->id);
+ " 0x%-14lx %-6u %-6u %-9u\n",
+ lnode->vaddr, lnode->block_size, lnode->mapped_size,
+ lnode->id);
}
mutex_unlock(&ctx->hw_block_list_lock);
}
@@ -591,6 +593,7 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
+ struct engines_data eng_data;
if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
@@ -598,7 +601,25 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
- hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s);
+ eng_data.actual_size = 0;
+ eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
+ eng_data.buf = vmalloc(eng_data.allocated_buf_size);
+ if (!eng_data.buf)
+ return -ENOMEM;
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
+
+ if (eng_data.actual_size > eng_data.allocated_buf_size) {
+ dev_err(hdev->dev,
+ "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
+ eng_data.actual_size, eng_data.allocated_buf_size);
+ vfree(eng_data.buf);
+ return -ENOMEM;
+ }
+
+ seq_write(s, eng_data.buf, eng_data.actual_size);
+
+ vfree(eng_data.buf);
return 0;
}
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index b30aeb1c657f..233d8b46c831 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -13,6 +13,8 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
+#include <trace/events/habanalabs.h>
+
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
enum dma_alloc_type {
@@ -26,8 +28,9 @@ enum dma_alloc_type {
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
- * @hdev: pointer to habanalabs device structure
+ * @hdev: pointer to habanalabs device structure.
* @addr: the address the caller wants to access.
+ * @region: the PCI region.
*
* @return: the old BAR base address on success, U64_MAX for failure.
* The caller should set it back to the old address after use.
@@ -37,58 +40,64 @@ enum dma_alloc_type {
* This function can be called also if the bar doesn't need to be set,
* in that case it just won't change the base.
*/
-static uint64_t hl_set_dram_bar(struct hl_device *hdev, u64 addr)
+static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 bar_base_addr;
+ u64 bar_base_addr, old_base;
- bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+ if (is_power_of_2(prop->dram_pci_bar_size))
+ bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
+ else
+ bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
+ prop->dram_pci_bar_size;
- return hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
-}
+ old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
+ /* in case of success we need to update the new BAR base */
+ if (old_base != U64_MAX)
+ region->region_base = bar_base_addr;
+
+ return old_base;
+}
static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type, enum pci_region region_type)
{
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
+ void __iomem *acc_addr;
u64 old_base = 0, rc;
if (region_type == PCI_REGION_DRAM) {
- old_base = hl_set_dram_bar(hdev, addr);
+ old_base = hl_set_dram_bar(hdev, addr, region);
if (old_base == U64_MAX)
return -EIO;
}
+ acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base +
+ region->offset_in_bar;
switch (acc_type) {
case DEBUGFS_READ8:
- *val = readb(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readb(acc_addr);
break;
case DEBUGFS_WRITE8:
- writeb(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writeb(*val, acc_addr);
break;
case DEBUGFS_READ32:
- *val = readl(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readl(acc_addr);
break;
case DEBUGFS_WRITE32:
- writel(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writel(*val, acc_addr);
break;
case DEBUGFS_READ64:
- *val = readq(hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ *val = readq(acc_addr);
break;
case DEBUGFS_WRITE64:
- writeq(*val, hdev->pcie_bar[region->bar_id] +
- addr - region->region_base + region->offset_in_bar);
+ writeq(*val, acc_addr);
break;
}
if (region_type == PCI_REGION_DRAM) {
- rc = hl_set_dram_bar(hdev, old_base);
+ rc = hl_set_dram_bar(hdev, old_base, region);
if (rc == U64_MAX)
return -EIO;
}
@@ -97,9 +106,10 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
}
static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag, enum dma_alloc_type alloc_type)
+ gfp_t flag, enum dma_alloc_type alloc_type,
+ const char *caller)
{
- void *ptr;
+ void *ptr = NULL;
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
@@ -113,11 +123,16 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
break;
}
+ if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
+ trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
+ caller);
+
return ptr;
}
static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, enum dma_alloc_type alloc_type)
+ dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
+ const char *caller)
{
switch (alloc_type) {
case DMA_ALLOC_COHERENT:
@@ -130,39 +145,44 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
break;
}
+
+ trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller);
}
-void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
+void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT);
+ return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
}
-void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, const char *caller)
{
- hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT);
+ hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
}
-void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+ return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
-void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
+ const char *caller)
{
- hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+ hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
}
-void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
- dma_addr_t *dma_handle)
+void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle, const char *caller)
{
- return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL);
+ return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
}
-void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
+ const char *caller)
{
- hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL);
+ hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
}
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
@@ -267,6 +287,30 @@ int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
return 0;
}
+void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
+{
+ va_list args;
+ int str_size;
+
+ va_start(args, fmt);
+ /* Calculate formatted string length. Assuming each string is null terminated, hence
+ * increment result by 1
+ */
+ str_size = vsnprintf(NULL, 0, fmt, args) + 1;
+ va_end(args);
+
+ if ((e->actual_size + str_size) < e->allocated_buf_size) {
+ va_start(args, fmt);
+ vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
+ va_end(args);
+ }
+
+ /* Need to update the size even when not updating destination buffer to get the exact size
+ * of all input strings
+ */
+ e->actual_size += str_size;
+}
+
enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
@@ -322,6 +366,8 @@ static void hpriv_release(struct kref *ref)
hdev = hpriv->hdev;
+ hdev->asic_funcs->send_device_activity(hdev, false);
+
put_pid(hpriv->taskpid);
hl_debugfs_remove_file(hpriv);
@@ -673,7 +719,7 @@ static int device_early_init(struct hl_device *hdev)
if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
- sizeof(*hdev->cq_wq),
+ sizeof(struct workqueue_struct *),
GFP_KERNEL);
if (!hdev->cq_wq) {
rc = -ENOMEM;
@@ -1091,7 +1137,9 @@ int hl_device_resume(struct hl_device *hdev)
/* 'in_reset' was set to true during suspend, now we must clear it in order
* for hard reset to be performed
*/
+ spin_lock(&hdev->reset_info.lock);
hdev->reset_info.in_reset = 0;
+ spin_unlock(&hdev->reset_info.lock);
rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
if (rc) {
@@ -1518,6 +1566,13 @@ kill_processes:
*/
hdev->disabled = false;
+ /* F/W security enabled indication might be updated after hard-reset */
+ if (hard_reset) {
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc)
+ goto out_err;
+ }
+
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
@@ -1556,7 +1611,7 @@ kill_processes:
if (!hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
} else {
- rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
+ rc = hdev->asic_funcs->compute_reset_late_init(hdev);
if (rc) {
if (reset_upon_device_release)
dev_err(hdev->dev,
@@ -1704,7 +1759,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
char *name;
bool add_cdev_sysfs_on_err = false;
- name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
+ hdev->cdev_idx = hdev->id / 2;
+
+ name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto out_disabled;
@@ -1719,7 +1776,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto out_disabled;
- name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
+ name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
if (!name) {
rc = -ENOMEM;
goto free_dev;
@@ -1806,7 +1863,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
- sizeof(*hdev->shadow_cs_queue), GFP_KERNEL);
+ sizeof(struct hl_cs *), GFP_KERNEL);
if (!hdev->shadow_cs_queue) {
rc = -ENOMEM;
goto cq_fini;
@@ -1997,10 +2054,10 @@ out_disabled:
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
"Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id / 2);
+ hdev->cdev_idx);
else
pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->id / 2);
+ hdev->cdev_idx);
return rc;
}
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 608ca67527a5..2de6a9bd564d 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,14 +15,6 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
-struct fw_binning_conf {
- u64 tpc_binning;
- u32 dec_binning;
- u32 hbm_binning;
- u32 edma_binning;
- u32 mme_redundancy;
-};
-
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -260,7 +252,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
struct hl_bd *sent_bd;
- u32 tmp, expected_ack_val, pi;
+ u32 tmp, expected_ack_val, pi, opcode;
int rc;
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
@@ -327,8 +319,35 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
- dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
- rc, (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+ opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
+
+ if (!prop->supports_advanced_cpucp_rc) {
+ dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
+ goto scrub_descriptor;
+ }
+
+ switch (rc) {
+ case cpucp_packet_invalid:
+ dev_err(hdev->dev,
+ "CPU packet %d is not supported by F/W\n", opcode);
+ break;
+ case cpucp_packet_fault:
+ dev_err(hdev->dev,
+ "F/W failed processing CPU packet %d\n", opcode);
+ break;
+ case cpucp_packet_invalid_pkt:
+ dev_dbg(hdev->dev,
+ "CPU packet %d is not supported by F/W\n", opcode);
+ break;
+ case cpucp_packet_invalid_params:
+ dev_err(hdev->dev,
+ "F/W reports invalid parameters for CPU packet %d\n", opcode);
+ break;
+
+ default:
+ dev_err(hdev->dev,
+ "Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
+ }
/* propagate the return code from the f/w to the callers who want to check it */
if (result)
@@ -340,6 +359,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
*result = le64_to_cpu(pkt->result);
}
+scrub_descriptor:
/* Scrub previous buffer descriptor 'ctl' field which contains the
* previous PI value written during packet submission.
* We must do this or else F/W can read an old value upon queue wraparound.
@@ -462,6 +482,21 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
size);
}
+int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(open);
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
+ if (rc)
+ dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
+
+ return rc;
+}
+
int hl_fw_send_heartbeat(struct hl_device *hdev)
{
struct cpucp_packet hb_pkt;
@@ -581,6 +616,15 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
/* All warnings should go here in order not to reach the unknown error validation */
+ if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - EEPROM failure detected, default settings applied\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
+ }
+
if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
dev_warn(hdev->dev,
"Device boot warning - Skipped DRAM initialization\n");
@@ -1476,6 +1520,8 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
*/
prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
+ prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
+
dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
cpu_boot_dev_sts0);
@@ -1514,7 +1560,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev)
hdev->asic_funcs->init_firmware_preload_params(hdev);
/*
- * In order to determine boot method (static VS dymanic) we need to
+ * In order to determine boot method (static VS dynamic) we need to
* read the boot caps register
*/
rc = hl_fw_read_preboot_caps(hdev);
@@ -1781,7 +1827,7 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
*
* @return the CRC32 result
*
- * NOTE: kernel's CRC32 differ's from standard CRC32 calculation.
+ * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
* in order to be aligned we need to flip the bits of both the input
* initial CRC and kernel's CRC32 result.
* in addition both sides use initial CRC of 0,
@@ -1798,7 +1844,7 @@ static u32 hl_fw_compat_crc32(u8 *data, size_t size)
*
* @hdev: pointer to the habanalabs device structure
* @addr: device address of memory transfer
- * @size: memory transter size
+ * @size: memory transfer size
* @region: PCI memory region
*
* @return 0 on success, otherwise non-zero error code
@@ -1854,50 +1900,36 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
u64 addr;
int rc;
- if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC) {
- dev_err(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
+ if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
+ dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
fw_desc->header.magic);
- return -EIO;
- }
- if (fw_desc->header.version != HL_COMMS_DESC_VER) {
- dev_err(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
+ if (fw_desc->header.version != HL_COMMS_DESC_VER)
+ dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
fw_desc->header.version);
- return -EIO;
- }
/*
- * calc CRC32 of data without header.
+ * Calc CRC32 of data without header. use the size of the descriptor
+ * reported by firmware, without calculating it ourself, to allow adding
+ * more fields to the lkd_fw_comms_desc structure.
* note that no alignment/stride address issues here as all structures
- * are 64 bit padded
+ * are 64 bit padded.
*/
- data_size = sizeof(struct lkd_fw_comms_desc) -
- sizeof(struct comms_desc_header);
data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
-
- if (le16_to_cpu(fw_desc->header.size) != data_size) {
- dev_err(hdev->dev,
- "Invalid descriptor size 0x%x, expected size 0x%zx\n",
- le16_to_cpu(fw_desc->header.size), data_size);
- return -EIO;
- }
+ data_size = le16_to_cpu(fw_desc->header.size);
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
-
if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
- dev_err(hdev->dev,
- "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
- data_crc32, fw_desc->header.crc32);
+ dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
+ data_crc32, fw_desc->header.crc32);
return -EIO;
}
/* find memory region to which to copy the image */
addr = le64_to_cpu(fw_desc->img_addr);
region_id = hl_get_pci_memory_region(hdev, addr);
- if ((region_id != PCI_REGION_SRAM) &&
- ((region_id != PCI_REGION_DRAM))) {
- dev_err(hdev->dev,
- "Invalid region to copy FW image address=%llx\n", addr);
+ if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
+ dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
return -EIO;
}
@@ -1914,8 +1946,7 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
fw_loader->dynamic_loader.fw_image_size,
region);
if (rc) {
- dev_err(hdev->dev,
- "invalid mem transfer request for FW image\n");
+ dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
return rc;
}
@@ -2422,18 +2453,6 @@ static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
msg.reset_cause = *(__u8 *) data;
break;
- case HL_COMMS_BINNING_CONF_TYPE:
- {
- struct fw_binning_conf *binning_conf = (struct fw_binning_conf *) data;
-
- msg.tpc_binning_conf = cpu_to_le64(binning_conf->tpc_binning);
- msg.dec_binning_conf = cpu_to_le32(binning_conf->dec_binning);
- msg.hbm_binning_conf = cpu_to_le32(binning_conf->hbm_binning);
- msg.edma_binning_conf = cpu_to_le32(binning_conf->edma_binning);
- msg.mme_redundancy_conf = cpu_to_le32(binning_conf->mme_redundancy);
- break;
- }
-
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
@@ -2503,13 +2522,6 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
*/
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
- /* if no preboot loaded indication- wait for preboot */
- if (!(hdev->fw_loader.fw_comp_loaded & FW_TYPE_PREBOOT_CPU)) {
- rc = hl_fw_wait_preboot_ready(hdev);
- if (rc)
- return -EIO;
- }
-
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
0, true,
fw_loader->cpu_timeout);
@@ -2547,7 +2559,7 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
/*
* when testing FW load (without Linux) on PLDM we don't want to
* wait until boot fit is active as it may take several hours.
- * instead, we load the bootfit and let it do all initializations in
+ * instead, we load the bootfit and let it do all initialization in
* the background.
*/
if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
@@ -2961,3 +2973,49 @@ void hl_fw_set_max_power(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
}
+
+static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
+ u32 nonce, u32 timeout)
+{
+ struct cpucp_packet pkt = {};
+ dma_addr_t req_dma_addr;
+ void *req_cpu_addr;
+ int rc;
+
+ req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
+ if (!data) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
+ return -ENOMEM;
+ }
+
+ memset(data, 0, size);
+
+ pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(req_dma_addr);
+ pkt.data_max_size = cpu_to_le32(size);
+ pkt.nonce = cpu_to_le32(nonce);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ timeout, NULL);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
+ goto out;
+ }
+
+ memcpy(data, req_cpu_addr, size);
+
+out:
+ hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
+
+ return rc;
+}
+
+int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
+ u32 nonce)
+{
+ return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
+ sizeof(struct cpucp_sec_attest_info), nonce,
+ HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
+}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index d59bba9e55c9..58c95b13be69 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -66,6 +66,7 @@ struct hl_fpriv;
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_MON_DUMP_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC 10000000 /* 10s */
#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
#define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC 1000000 /* 1s */
@@ -94,7 +95,7 @@ struct hl_fpriv;
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
/**
- * enum hl_mmu_page_table_locaion - mmu page table location
+ * enum hl_mmu_page_table_location - mmu page table location
* @MMU_DR_PGT: page-table is located on device DRAM.
* @MMU_HR_PGT: page-table is located on host memory.
* @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
@@ -143,6 +144,25 @@ enum hl_mmu_enablement {
#define HL_MAX_DCORES 8
+/* DMA alloc/free wrappers */
+#define hl_asic_dma_alloc_coherent(hdev, size, dma_handle, flags) \
+ hl_asic_dma_alloc_coherent_caller(hdev, size, dma_handle, flags, __func__)
+
+#define hl_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle) \
+ hl_cpu_accessible_dma_pool_alloc_caller(hdev, size, dma_handle, __func__)
+
+#define hl_asic_dma_pool_zalloc(hdev, size, mem_flags, dma_handle) \
+ hl_asic_dma_pool_zalloc_caller(hdev, size, mem_flags, dma_handle, __func__)
+
+#define hl_asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle) \
+ hl_asic_dma_free_coherent_caller(hdev, size, cpu_addr, dma_handle, __func__)
+
+#define hl_cpu_accessible_dma_pool_free(hdev, size, vaddr) \
+ hl_cpu_accessible_dma_pool_free_caller(hdev, size, vaddr, __func__)
+
+#define hl_asic_dma_pool_free(hdev, vaddr, dma_addr) \
+ hl_asic_dma_pool_free_caller(hdev, vaddr, dma_addr, __func__)
+
/*
* Reset Flags
*
@@ -208,6 +228,7 @@ enum hl_protection_levels {
* struct iterate_module_ctx - HW module iterator
* @fn: function to apply to each HW module instance
* @data: optional internal data to the function iterator
+ * @rc: return code for optional use of iterator/iterator-caller
*/
struct iterate_module_ctx {
/*
@@ -217,10 +238,12 @@ struct iterate_module_ctx {
* @inst: HW module instance within the block
* @offset: current HW module instance offset from the 1-st HW module instance
* in the 1-st block
- * @data: function specific data
+ * @ctx: the iterator context.
*/
- void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset, void *data);
+ void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset,
+ struct iterate_module_ctx *ctx);
void *data;
+ int rc;
};
struct hl_block_glbl_sec {
@@ -342,7 +365,8 @@ enum hl_cs_type {
CS_TYPE_WAIT,
CS_TYPE_COLLECTIVE_WAIT,
CS_RESERVE_SIGNALS,
- CS_UNRESERVE_SIGNALS
+ CS_UNRESERVE_SIGNALS,
+ CS_TYPE_ENGINE_CORE
};
/*
@@ -544,10 +568,6 @@ struct hl_hints_range {
* @tpc_binning_mask: which TPCs are binned. 0 means usable and 1 means binned.
* @dram_enabled_mask: which DRAMs are enabled.
* @dram_binning_mask: which DRAMs are binned. 0 means usable, 1 means binned.
- * @cb_va_start_addr: virtual start address of command buffers which are mapped
- * to the device's MMU.
- * @cb_va_end_addr: virtual end address of command buffers which are mapped to
- * the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
* @cfg_base_address: config space base address.
@@ -614,6 +634,7 @@ struct hl_hints_range {
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
+ * @num_engine_cores: number of engine cpu cores
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -658,6 +679,7 @@ struct hl_hints_range {
* @set_max_power_on_device_init: true if need to set max power in F/W on device init.
* @supports_user_set_page_size: true if user can set the allocation page size.
* @dma_mask: the dma mask to be set for this device
+ * @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -689,8 +711,6 @@ struct asic_fixed_properties {
u64 tpc_binning_mask;
u64 dram_enabled_mask;
u64 dram_binning_mask;
- u64 cb_va_start_addr;
- u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 cfg_base_address;
u64 mmu_cache_mng_addr;
@@ -734,6 +754,7 @@ struct asic_fixed_properties {
u32 faulty_dram_cluster_map;
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
+ u32 num_engine_cores;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -766,6 +787,7 @@ struct asic_fixed_properties {
u8 set_max_power_on_device_init;
u8 supports_user_set_page_size;
u8 dma_mask;
+ u8 supports_advanced_cpucp_rc;
};
/**
@@ -797,7 +819,7 @@ struct hl_fence {
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
* @hw_sob: the H/W SOB used in this signal/wait CS.
- * @encaps_sig_hdl: encaps signals hanlder.
+ * @encaps_sig_hdl: encaps signals handler.
* @cs_seq: command submission sequence number.
* @type: type of the CS - signal/wait.
* @sob_val: the SOB value that is used in this signal/wait CS.
@@ -898,14 +920,14 @@ struct hl_mmap_mem_buf {
* @buf: back pointer to the parent mappable memory buffer
* @debugfs_list: node in debugfs list of command buffers.
* @pool_list: node in pool list of command buffers.
- * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
- * the device's MMU.
* @kernel_address: Holds the CB's kernel virtual address.
+ * @virtual_addr: Holds the CB's virtual address.
* @bus_address: Holds the CB's DMA address.
* @size: holds the CB's size.
+ * @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in.
* @is_pool: true if CB was acquired from the pool, false otherwise.
- * @is_internal: internaly allocated
+ * @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
*/
struct hl_cb {
@@ -914,10 +936,11 @@ struct hl_cb {
struct hl_mmap_mem_buf *buf;
struct list_head debugfs_list;
struct list_head pool_list;
- struct list_head va_block_list;
void *kernel_address;
+ u64 virtual_addr;
dma_addr_t bus_address;
u32 size;
+ u32 roundup_size;
atomic_t cs_cnt;
u8 is_pool;
u8 is_internal;
@@ -1113,7 +1136,7 @@ struct timestamp_reg_info {
* @fence: hl fence object for interrupt completion
* @cq_target_value: CQ target value
* @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
- * handler for taget value comparison
+ * handler for target value comparison
*/
struct hl_user_pending_interrupt {
struct timestamp_reg_info ts_reg_info;
@@ -1372,6 +1395,18 @@ struct fw_load_mgr {
struct hl_cs;
/**
+ * struct engines_data - asic engines data
+ * @buf: buffer for engines data in ascii
+ * @actual_size: actual size of data that was written by the driver to the allocated buffer
+ * @allocated_buf_size: total size of allocated buffer
+ */
+struct engines_data {
+ char *buf;
+ int actual_size;
+ u32 allocated_buf_size;
+};
+
+/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
* @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
@@ -1434,11 +1469,9 @@ struct hl_cs;
* @send_heartbeat: send is-alive packet to CPU-CP and verify response.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
- * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
+ * @compute_reset_late_init: perform certain actions needed after a compute reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
- * @kdma_lock: acquire H/W queues lock. Relevant from GRECO ASIC
- * @kdma_unlock: release H/W queues lock. Relevant from GRECO ASIC
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @get_monitor_dump: retrieve monitor registers dump from F/W.
@@ -1498,6 +1531,8 @@ struct hl_cs;
* @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
+ * @set_engine_cores: set a config command to enigne cores
+ * @send_device_activity: indication to FW about device availability
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1570,13 +1605,11 @@ struct hl_asic_funcs {
int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
- bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s);
- int (*non_hard_reset_late_init)(struct hl_device *hdev);
+ bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e);
+ int (*compute_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
- void (*kdma_lock)(struct hl_device *hdev, int dcore_id);
- void (*kdma_unlock)(struct hl_device *hdev, int dcore_id);
u32 (*get_pci_id)(struct hl_device *hdev);
int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
int (*get_monitor_dump)(struct hl_device *hdev, void *data);
@@ -1634,6 +1667,9 @@ struct hl_asic_funcs {
int (*access_dev_mem)(struct hl_device *hdev, enum pci_region region_type,
u64 addr, u64 *val, enum debugfs_access_type acc_type);
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
+ int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
+ u32 num_cores, u32 core_command);
+ int (*send_device_activity)(struct hl_device *hdev, bool open);
};
@@ -1727,10 +1763,10 @@ struct hl_cs_outcome {
/**
* struct hl_cs_outcome_store - represents a limited store of completed CS outcomes
- * @outcome_map: index of completed CS searcheable by sequence number
+ * @outcome_map: index of completed CS searchable by sequence number
* @used_list: list of outcome objects currently in use
* @free_list: list of outcome objects currently not in use
- * @nodes_pool: a static pool of preallocated outcome objects
+ * @nodes_pool: a static pool of pre-allocated outcome objects
* @db_lock: any operation on the store must take this lock
*/
struct hl_cs_outcome_store {
@@ -1754,12 +1790,10 @@ struct hl_cs_outcome_store {
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
- * @outcome_store: storage data structure used to remember ouitcomes of completed
+ * @outcome_store: storage data structure used to remember outcomes of completed
* command submissions for a long time after CS id wraparound.
* @va_range: holds available virtual addresses for host and dram mappings.
* @mem_hash_lock: protects the mem_hash.
- * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
- * MMU hash or walking the PGT requires talking this lock.
* @hw_block_list_lock: protects the HW block memory list.
* @debugfs_list: node in debugfs list of contexts.
* @hw_block_mem_list: list of HW block virtual mapped addresses.
@@ -1767,6 +1801,7 @@ struct hl_cs_outcome_store {
* @cb_va_pool: device VA pool for command buffers which are mapped to the
* device's MMU.
* @sig_mgr: encaps signals handle manager.
+ * @cb_va_pool_base: the base address for the device VA pool
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
* index to cs_pending array.
@@ -1795,13 +1830,13 @@ struct hl_ctx {
struct hl_cs_outcome_store outcome_store;
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
- struct mutex mmu_lock;
struct mutex hw_block_list_lock;
struct list_head debugfs_list;
struct list_head hw_block_mem_list;
struct hl_cs_counters_atomic cs_counters;
struct gen_pool *cb_va_pool;
struct hl_encaps_signals_mgr sig_mgr;
+ u64 cb_va_pool_base;
u64 cs_sequence;
u64 *dram_default_hops;
spinlock_t cs_lock;
@@ -1823,7 +1858,6 @@ struct hl_ctx_mgr {
};
-
/*
* COMMAND SUBMISSIONS
*/
@@ -1889,7 +1923,7 @@ struct hl_userptr {
* @tdr_active: true if TDR was activated for this CS (to prevent
* double TDR activation).
* @aborted: true if CS was aborted due to some device error.
- * @timestamp: true if a timestmap must be captured upon completion.
+ * @timestamp: true if a timestamp must be captured upon completion.
* @staged_last: true if this is the last staged CS and needs completion.
* @staged_first: true if this is the first staged CS and we need to receive
* timeout for this CS.
@@ -2047,14 +2081,16 @@ struct hl_vm_hash_node {
* @node: node to hang on the list in context object.
* @ctx: the context this node belongs to.
* @vaddr: virtual address of the HW block.
- * @size: size of the block.
+ * @block_size: size of the block.
+ * @mapped_size: size of the block which is mapped. May change if partial un-mappings are done.
* @id: HW block id (handle).
*/
struct hl_vm_hw_block_list_node {
struct list_head node;
struct hl_ctx *ctx;
unsigned long vaddr;
- u32 size;
+ u32 block_size;
+ u32 mapped_size;
u32 id;
};
@@ -2214,7 +2250,7 @@ struct hl_info_list {
/**
* struct hl_debugfs_entry - debugfs dentry wrapper.
- * @info_ent: dentry realted ops.
+ * @info_ent: dentry related ops.
* @dev_entry: ASIC specific debugfs manager.
*/
struct hl_debugfs_entry {
@@ -2492,7 +2528,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \
(val) = __elbi_read; \
} else {\
- (val) = RREG32((u32)addr); \
+ (val) = RREG32((u32)(addr)); \
} \
if (cond) \
break; \
@@ -2503,7 +2539,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
break; \
(val) = __elbi_read; \
} else {\
- (val) = RREG32((u32)addr); \
+ (val) = RREG32((u32)(addr)); \
} \
break; \
} \
@@ -2919,7 +2955,7 @@ struct razwi_info {
* struct undefined_opcode_info - info about last undefined opcode error
* @timestamp: timestamp of the undefined opcode error
* @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
- * entiers. In case all streams array entries are
+ * entries. In case all streams array entries are
* filled with values, it means the execution was in Lower-CP.
* @cq_addr: the address of the current handled command buffer
* @cq_size: the size of the current handled command buffer
@@ -2946,12 +2982,12 @@ struct undefined_opcode_info {
};
/**
- * struct last_error_session_info - info about last session errors occurred.
- * @cs_timeout: CS timeout error last information.
- * @razwi: razwi last information.
+ * struct hl_error_info - holds information collected during an error.
+ * @cs_timeout: CS timeout error information.
+ * @razwi: razwi information.
* @undef_opcode: undefined opcode information
*/
-struct last_error_session_info {
+struct hl_error_info {
struct cs_timeout_info cs_timeout;
struct razwi_info razwi;
struct undefined_opcode_info undef_opcode;
@@ -2960,7 +2996,7 @@ struct last_error_session_info {
/**
* struct hl_reset_info - holds current device reset information.
* @lock: lock to protect critical reset flows.
- * @compute_reset_cnt: number of compte resets since the driver was loaded.
+ * @compute_reset_cnt: number of compute resets since the driver was loaded.
* @hard_reset_cnt: number of hard resets since the driver was loaded.
* @hard_reset_schedule_flags: hard reset is scheduled to after current compute reset,
* here we hold the hard reset flags.
@@ -2971,7 +3007,7 @@ struct last_error_session_info {
* @hard_reset_pending: is there a hard reset work pending.
* @curr_reset_cause: saves an enumerated reset cause when a hard reset is
* triggered, and cleared after it is shared with preboot.
- * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overridden
* with a new value on next reset
* @reset_trigger_repeated: set if device reset is triggered more than once with
* same cause.
@@ -3041,6 +3077,12 @@ struct hl_reset_info {
* @asid_mutex: protects asid_bitmap.
* @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
* @debug_lock: protects critical section of setting debug mode for device
+ * @mmu_lock: protects the MMU page tables and invalidation h/w. Although the
+ * page tables are per context, the invalidation h/w is per MMU.
+ * Therefore, we can't allow multiple contexts (we only have two,
+ * user and kernel) to access the invalidation h/w at the same time.
+ * In addition, any change to the PGT, modifying the MMU hash or
+ * walking the PGT requires talking this lock.
* @asic_prop: ASIC specific immutable properties.
* @asic_funcs: ASIC specific functions.
* @asic_specific: ASIC specific information to use only from ASIC files.
@@ -3049,7 +3091,7 @@ struct hl_reset_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
- * @cb_pool: list of preallocated CBs.
+ * @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
* @internal_cb_pool_dma_addr: internal command buffer pool dma address.
@@ -3070,7 +3112,7 @@ struct hl_reset_info {
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
* @clk_throttling: holds information about current/previous clock throttling events
- * @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_major_version: major version of current loaded preboot.
@@ -3111,7 +3153,8 @@ struct hl_reset_info {
* @edma_binning: contains mask of edma engines that is received from the f/w which
* indicates which edma engines are binned-out
* @id: device minor.
- * @id_control: minor of the control device
+ * @id_control: minor of the control device.
+ * @cdev_idx: char device index. Used for setting its name.
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
@@ -3165,6 +3208,7 @@ struct hl_reset_info {
* Used only for testing.
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
+ * @supports_ctx_switch: true if a ctx switch is required upon first submission.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -3204,6 +3248,7 @@ struct hl_device {
struct mutex asid_mutex;
struct mutex send_cpu_message_lock;
struct mutex debug_lock;
+ struct mutex mmu_lock;
struct asic_fixed_properties asic_prop;
const struct hl_asic_funcs *asic_funcs;
void *asic_specific;
@@ -3242,7 +3287,7 @@ struct hl_device {
struct multi_cs_completion multi_cs_completion[
MULTI_CS_MAX_USER_CTX];
struct hl_clk_throttle clk_throttling;
- struct last_error_session_info last_error;
+ struct hl_error_info captured_err_info;
struct hl_reset_info reset_info;
@@ -3271,6 +3316,7 @@ struct hl_device {
u32 edma_binning;
u16 id;
u16 id_control;
+ u16 cdev_idx;
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
@@ -3300,6 +3346,7 @@ struct hl_device {
u8 compute_ctx_in_release;
u8 supports_mmu_prefetch;
u8 reset_upon_device_release;
+ u8 supports_ctx_switch;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -3426,15 +3473,18 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
-void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag);
-void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
-void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
-void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
- dma_addr_t *dma_handle);
-void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr);
+void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, const char *caller);
+void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, const char *caller);
+void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, const char *caller);
+void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
+ const char *caller);
+void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle, const char *caller);
+void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
+ const char *caller);
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
@@ -3513,6 +3563,7 @@ void hl_sysfs_fini(struct hl_device *hdev);
int hl_hwmon_init(struct hl_device *hdev);
void hl_hwmon_fini(struct hl_device *hdev);
+void hl_hwmon_release_resources(struct hl_device *hdev);
int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
@@ -3557,7 +3608,7 @@ void hl_hw_block_mem_init(struct hl_ctx *ctx);
void hl_hw_block_mem_fini(struct hl_ctx *ctx);
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size, u32 alignment);
+ enum hl_va_range_type type, u64 size, u32 alignment);
int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
u64 start_addr, u64 size);
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
@@ -3674,6 +3725,7 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
struct cpucp_hbm_row_info *info);
int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
+int hl_fw_send_device_activity(struct hl_device *hdev, bool open);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
@@ -3697,6 +3749,8 @@ int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *va
void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value);
long hl_fw_get_max_power(struct hl_device *hdev);
void hl_fw_set_max_power(struct hl_device *hdev);
+int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
+ u32 nonce);
int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
@@ -3743,6 +3797,7 @@ struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args);
+__printf(2, 3) void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index f733ead605e7..112632afe7d5 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -14,6 +14,9 @@
#include <linux/aer.h>
#include <linux/module.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/habanalabs.h>
+
#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team"
#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators"
@@ -27,7 +30,10 @@ static struct class *hl_class;
static DEFINE_IDR(hl_devs_idr);
static DEFINE_MUTEX(hl_devs_idr_lock);
-static int timeout_locked = 30;
+#define HL_DEFAULT_TIMEOUT_LOCKED 30 /* 30 seconds */
+#define GAUDI_DEFAULT_TIMEOUT_LOCKED 600 /* 10 minutes */
+
+static int timeout_locked = HL_DEFAULT_TIMEOUT_LOCKED;
static int reset_on_lockup = 1;
static int memory_scrub;
static ulong boot_error_status_mask = ULONG_MAX;
@@ -55,14 +61,12 @@ MODULE_PARM_DESC(boot_error_status_mask,
#define PCI_IDS_GAUDI_SEC 0x1010
#define PCI_IDS_GAUDI2 0x1020
-#define PCI_IDS_GAUDI2_SEC 0x1030
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2), },
- { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2_SEC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
@@ -92,9 +96,6 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GAUDI2:
asic_type = ASIC_GAUDI2;
break;
- case PCI_IDS_GAUDI2_SEC:
- asic_type = ASIC_GAUDI2_SEC;
- break;
default:
asic_type = ASIC_INVALID;
break;
@@ -107,7 +108,6 @@ static bool is_asic_secured(enum hl_asic_type asic_type)
{
switch (asic_type) {
case ASIC_GAUDI_SEC:
- case ASIC_GAUDI2_SEC:
return true;
default:
return false;
@@ -161,7 +161,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
mutex_lock(&hdev->fpriv_list_lock);
if (!hl_device_operational(hdev, &status)) {
- dev_err_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
@@ -207,11 +207,13 @@ int hl_device_open(struct inode *inode, struct file *filp)
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
+ hdev->asic_funcs->send_device_activity(hdev, true);
+
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_timeout.write_enable, 1);
- atomic_set(&hdev->last_error.razwi.write_enable, 1);
- hdev->last_error.undef_opcode.write_enable = true;
+ atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
+ atomic_set(&hdev->captured_err_info.razwi.write_enable, 1);
+ hdev->captured_err_info.undef_opcode.write_enable = true;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -269,7 +271,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_device_operational(hdev, NULL)) {
- dev_err_ratelimited(hdev->dev_ctrl,
+ dev_dbg_ratelimited(hdev->dev_ctrl,
"Can't open %s because it is disabled or in reset\n",
dev_name(hdev->dev_ctrl));
rc = -EPERM;
@@ -314,12 +316,22 @@ static void copy_kernel_module_params_to_device(struct hl_device *hdev)
hdev->boot_error_status_mask = boot_error_status_mask;
}
-static void fixup_device_params_per_asic(struct hl_device *hdev)
+static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
{
switch (hdev->asic_type) {
- case ASIC_GOYA:
case ASIC_GAUDI:
case ASIC_GAUDI_SEC:
+ /* If user didn't request a different timeout than the default one, we have
+ * a different default timeout for Gaudi
+ */
+ if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
+ hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
+ MSEC_PER_SEC);
+
+ hdev->reset_upon_device_release = 0;
+ break;
+
+ case ASIC_GOYA:
hdev->reset_upon_device_release = 0;
break;
@@ -339,7 +351,7 @@ static int fixup_device_params(struct hl_device *hdev)
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
if (tmp_timeout)
- hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * 1000);
+ hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * MSEC_PER_SEC);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
@@ -360,7 +372,7 @@ static int fixup_device_params(struct hl_device *hdev)
if (!hdev->cpu_queues_enable)
hdev->heartbeat = 0;
- fixup_device_params_per_asic(hdev);
+ fixup_device_params_per_asic(hdev, tmp_timeout);
return 0;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 6a30bd98ab5e..43afe40966e5 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -14,6 +14,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
@@ -103,6 +104,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
+ hw_ip.security_enabled = prop->fw_security_enabled;
return copy_to_user(out, &hw_ip,
min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
@@ -591,8 +593,8 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.seq = hdev->last_error.cs_timeout.seq;
- info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout.timestamp);
+ info.seq = hdev->captured_err_info.cs_timeout.seq;
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -607,12 +609,12 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.razwi.timestamp);
- info.addr = hdev->last_error.razwi.addr;
- info.engine_id_1 = hdev->last_error.razwi.engine_id_1;
- info.engine_id_2 = hdev->last_error.razwi.engine_id_2;
- info.no_engine_id = hdev->last_error.razwi.non_engine_initiator;
- info.error_type = hdev->last_error.razwi.type;
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.razwi.timestamp);
+ info.addr = hdev->captured_err_info.razwi.addr;
+ info.engine_id_1 = hdev->captured_err_info.razwi.engine_id_1;
+ info.engine_id_2 = hdev->captured_err_info.razwi.engine_id_2;
+ info.no_engine_id = hdev->captured_err_info.razwi.non_engine_initiator;
+ info.error_type = hdev->captured_err_info.razwi.type;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -627,13 +629,13 @@ static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *ar
if ((!max_size) || (!out))
return -EINVAL;
- info.timestamp = ktime_to_ns(hdev->last_error.undef_opcode.timestamp);
- info.engine_id = hdev->last_error.undef_opcode.engine_id;
- info.cq_addr = hdev->last_error.undef_opcode.cq_addr;
- info.cq_size = hdev->last_error.undef_opcode.cq_size;
- info.stream_id = hdev->last_error.undef_opcode.stream_id;
- info.cb_addr_streams_len = hdev->last_error.undef_opcode.cb_addr_streams_len;
- memcpy(info.cb_addr_streams, hdev->last_error.undef_opcode.cb_addr_streams,
+ info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
+ info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
+ info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
+ info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
+ info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
+ info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
+ memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
sizeof(info.cb_addr_streams));
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
@@ -660,6 +662,55 @@ static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct cpucp_sec_attest_info *sec_attest_info;
+ struct hl_info_sec_attest *info;
+ u32 max_size = args->return_size;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
+ if (!sec_attest_info)
+ return -ENOMEM;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ rc = -ENOMEM;
+ goto free_sec_attest_info;
+ }
+
+ rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
+ if (rc)
+ goto free_info;
+
+ info->nonce = le32_to_cpu(sec_attest_info->nonce);
+ info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
+ info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
+ info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
+ info->pcr_num_reg = sec_attest_info->pcr_num_reg;
+ info->pcr_reg_len = sec_attest_info->pcr_reg_len;
+ info->quote_sig_len = sec_attest_info->quote_sig_len;
+ memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
+ memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
+ memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
+ memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
+ memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
+
+ rc = copy_to_user(out, info,
+ min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
+
+free_info:
+ kfree(info);
+free_sec_attest_info:
+ kfree(sec_attest_info);
+
+ return rc;
+}
+
static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
int rc;
@@ -697,6 +748,42 @@ static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
return 0;
}
+static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ u32 status_buf_size = args->return_size;
+ struct hl_device *hdev = hpriv->hdev;
+ struct engines_data eng_data;
+ int rc;
+
+ if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
+ return -EINVAL;
+
+ eng_data.actual_size = 0;
+ eng_data.allocated_buf_size = status_buf_size;
+ eng_data.buf = vmalloc(status_buf_size);
+ if (!eng_data.buf)
+ return -ENOMEM;
+
+ hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
+
+ if (eng_data.actual_size > eng_data.allocated_buf_size) {
+ dev_err(hdev->dev,
+ "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
+ eng_data.actual_size, status_buf_size);
+ vfree(eng_data.buf);
+ return -ENOMEM;
+ }
+
+ args->user_buffer_actual_size = eng_data.actual_size;
+ rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
+ -EFAULT : 0;
+
+ vfree(eng_data.buf);
+
+ return rc;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -806,12 +893,18 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_DRAM_PENDING_ROWS:
return dram_pending_rows_info(hpriv, args);
+ case HL_INFO_SECURED_ATTESTATION:
+ return sec_attest_info(hpriv, args);
+
case HL_INFO_REGISTER_EVENTFD:
return eventfd_register(hpriv, args);
case HL_INFO_UNREGISTER_EVENTFD:
return eventfd_unregister(hpriv, args);
+ case HL_INFO_ENGINE_STATUS:
+ return engine_status_info(hpriv, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 3f15ab9d827f..d0087c0ec48c 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -826,9 +826,7 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
q->kernel_address = p;
- q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
- sizeof(*q->shadow_queue),
- GFP_KERNEL);
+ q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, sizeof(struct hl_cs_job *), GFP_KERNEL);
if (!q->shadow_queue) {
dev_err(hdev->dev,
"Failed to allocate shadow queue for H/W queue %d\n",
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 57f5d2c48330..55eb0203817f 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -194,7 +194,8 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sen
curr_arr[sensors_by_type_next_index[type]++] = flags;
}
- channels_info = kcalloc(num_active_sensor_types + 1, sizeof(*channels_info), GFP_KERNEL);
+ channels_info = kcalloc(num_active_sensor_types + 1, sizeof(struct hwmon_channel_info *),
+ GFP_KERNEL);
if (!channels_info) {
rc = -ENOMEM;
goto channels_info_array_err;
@@ -910,3 +911,24 @@ void hl_hwmon_fini(struct hl_device *hdev)
hwmon_device_unregister(hdev->hwmon_dev);
}
+
+void hl_hwmon_release_resources(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 61bc1bfe984a..ef28f3b37b93 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -457,7 +457,7 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
prev = list_prev_entry(va_block, node);
if (&prev->node != va_list && prev->end + 1 == va_block->start) {
prev->end = va_block->end;
- prev->size = prev->end - prev->start;
+ prev->size = prev->end - prev->start + 1;
list_del(&va_block->node);
kfree(va_block);
va_block = prev;
@@ -466,7 +466,7 @@ static void merge_va_blocks_locked(struct hl_device *hdev,
next = list_next_entry(va_block, node);
if (&next->node != va_list && va_block->end + 1 == next->start) {
next->start = va_block->start;
- next->size = next->end - next->start;
+ next->size = next->end - next->start + 1;
list_del(&va_block->node);
kfree(va_block);
}
@@ -755,7 +755,7 @@ out:
* - Return the start address of the virtual block.
*/
u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
- enum hl_va_range_type type, u32 size, u32 alignment)
+ enum hl_va_range_type type, u64 size, u32 alignment)
{
return get_va_block(hdev, ctx->va_range[type], size, 0,
max(alignment, ctx->va_range[type]->page_size),
@@ -1210,18 +1210,18 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
goto va_block_err;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto map_err;
@@ -1362,7 +1362,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
else
vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
@@ -1375,7 +1375,7 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
phys_pg_pack->total_size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
/*
* If the context is closing we don't need to check for the MMU cache
@@ -1418,18 +1418,23 @@ vm_type_err:
return rc;
}
-static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
- u32 *size)
+static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
{
- u32 block_id = 0;
+ u32 block_id;
int rc;
+ *handle = 0;
+ if (size)
+ *size = 0;
+
rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
+ if (rc)
+ return rc;
*handle = block_id | HL_MMAP_TYPE_BLOCK;
*handle <<= PAGE_SHIFT;
- return rc;
+ return 0;
}
static void hw_block_vm_close(struct vm_area_struct *vma)
@@ -1437,6 +1442,13 @@ static void hw_block_vm_close(struct vm_area_struct *vma)
struct hl_vm_hw_block_list_node *lnode =
(struct hl_vm_hw_block_list_node *) vma->vm_private_data;
struct hl_ctx *ctx = lnode->ctx;
+ long new_mmap_size;
+
+ new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
+ if (new_mmap_size > 0) {
+ lnode->mapped_size = new_mmap_size;
+ return;
+ }
mutex_lock(&ctx->hw_block_list_lock);
list_del(&lnode->node);
@@ -1487,23 +1499,23 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
if (!lnode)
return -ENOMEM;
- vma->vm_ops = &hw_block_vm_ops;
- vma->vm_private_data = lnode;
-
- hl_ctx_get(ctx);
-
rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
if (rc) {
- hl_ctx_put(ctx);
kfree(lnode);
return rc;
}
+ hl_ctx_get(ctx);
+
lnode->ctx = ctx;
lnode->vaddr = vma->vm_start;
- lnode->size = block_size;
+ lnode->block_size = block_size;
+ lnode->mapped_size = lnode->block_size;
lnode->id = block_id;
+ vma->vm_private_data = lnode;
+ vma->vm_ops = &hw_block_vm_ops;
+
mutex_lock(&ctx->hw_block_list_lock);
list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
mutex_unlock(&ctx->hw_block_list_lock);
@@ -2296,8 +2308,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
return -EFAULT;
}
- userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
- GFP_KERNEL);
+ userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!userptr->pages)
return -ENOMEM;
@@ -2759,13 +2770,13 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
unmap_device_va(ctx, &args, true);
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
/* invalidate the cache once after the unmapping loop */
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
INIT_LIST_HEAD(&free_list);
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
index 56df962d2f3c..1936d653699e 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -11,7 +11,7 @@
* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
* the buffer descriptor.
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Find the buffer in the store and return a pointer to its descriptor.
@@ -104,7 +104,7 @@ int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
* hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
* given handle.
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @handle: requested buffer handle
*
* Decrease the reference to the buffer, and release it if it was the last one.
@@ -137,7 +137,7 @@ int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
/**
* hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
* @gfp: gfp flags to use for the memory allocations
* @args: additional args passed to behavior->alloc
@@ -222,7 +222,7 @@ static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
/**
* hl_mem_mgr_mmap - map the given buffer to the user
*
- * @mmg: unifed memory manager
+ * @mmg: unified memory manager
* @vma: the vma object for which mmap was closed.
* @args: additional args passed to behavior->mmap
*
@@ -322,7 +322,7 @@ void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
/**
* hl_mem_mgr_fini - release unified memory manager
*
- * @mmg: parent unifed memory manager
+ * @mmg: parent unified memory manager
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 60740de47b34..cf8946266615 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -9,6 +9,8 @@
#include "../habanalabs.h"
+#include <trace/events/habanalabs.h>
+
/**
* hl_mmu_get_funcs() - get MMU functions structure
* @hdev: habanalabs device structure.
@@ -45,6 +47,8 @@ int hl_mmu_init(struct hl_device *hdev)
if (!hdev->mmu_enable)
return 0;
+ mutex_init(&hdev->mmu_lock);
+
if (hdev->mmu_func[MMU_DR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].init(hdev);
if (rc)
@@ -86,6 +90,8 @@ void hl_mmu_fini(struct hl_device *hdev)
if (hdev->mmu_func[MMU_HR_PGT].fini != NULL)
hdev->mmu_func[MMU_HR_PGT].fini(hdev);
+
+ mutex_destroy(&hdev->mmu_lock);
}
/**
@@ -104,8 +110,6 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
if (!hdev->mmu_enable)
return 0;
- mutex_init(&ctx->mmu_lock);
-
if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx);
if (rc)
@@ -149,8 +153,6 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx)
if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL)
hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx);
-
- mutex_destroy(&ctx->mmu_lock);
}
/*
@@ -259,6 +261,9 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
if (flush_pte)
mmu_funcs->flush(ctx);
+ if (trace_habanalabs_mmu_unmap_enabled() && !rc)
+ trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
+
return rc;
}
@@ -344,6 +349,8 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
if (flush_pte)
mmu_funcs->flush(ctx);
+ trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
+
return 0;
err:
@@ -403,6 +410,8 @@ int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
dev_err(hdev->dev,
"Map failed for va 0x%llx to pa 0x%llx\n",
curr_va, curr_pa);
+ /* last mapping failed so don't try to unmap it - reduce off by page_size */
+ off -= page_size;
goto unmap;
}
}
@@ -600,9 +609,9 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT;
mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr);
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
return rc;
@@ -692,16 +701,16 @@ static void hl_mmu_prefetch_work_function(struct work_struct *work)
{
struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
struct hl_ctx *ctx = pfw->ctx;
+ struct hl_device *hdev = ctx->hdev;
- if (!hl_device_operational(ctx->hdev, NULL))
+ if (!hl_device_operational(hdev, NULL))
goto put_ctx;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
- ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
- pfw->va, pfw->size);
+ hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
put_ctx:
/*
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 6c5271f01160..36e9814139d1 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -375,6 +375,14 @@ out:
return max_size;
}
+static ssize_t security_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", hdev->asic_prop.fw_security_enabled);
+}
+
static DEVICE_ATTR_RO(armcp_kernel_ver);
static DEVICE_ATTR_RO(armcp_ver);
static DEVICE_ATTR_RO(cpld_ver);
@@ -393,6 +401,7 @@ static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(thermal_ver);
static DEVICE_ATTR_RO(uboot_ver);
static DEVICE_ATTR_RO(fw_os_ver);
+static DEVICE_ATTR_RO(security_enabled);
static struct bin_attribute bin_attr_eeprom = {
.attr = {.name = "eeprom", .mode = (0444)},
@@ -417,6 +426,7 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
&dev_attr_fw_os_ver.attr,
+ &dev_attr_security_enabled.attr,
NULL,
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index cb2988e2c7a8..92560414e843 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -899,12 +899,13 @@ static int gaudi_early_fini(struct hl_device *hdev)
*/
static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
int rc;
- if (hdev->asic_prop.fw_security_enabled) {
+ if ((hdev->fw_components & FW_TYPE_LINUX) &&
+ (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
struct gaudi_device *gaudi = hdev->asic_specific;
if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
@@ -939,9 +940,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
else
freq = pll_clk / (div_fctr + 1);
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
+ dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel);
freq = 0;
}
}
@@ -985,9 +984,10 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev,
init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
- dst_addr = (prop->sram_user_base_address &
- GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
- GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+
+ /* TPC_CMD is configured with I$ prefetch enabled, so address should be aligned to 8KB */
+ dst_addr = FIELD_PREP(GAUDI_PKT_LIN_DMA_DST_ADDR_MASK,
+ round_up(prop->sram_user_base_address, SZ_8K));
init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
@@ -1683,23 +1683,7 @@ disable_pci_access:
static void gaudi_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
- int i = 0;
-
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
@@ -4723,7 +4707,7 @@ static int gaudi_scrub_device_mem(struct hl_device *hdev)
addr = prop->sram_user_base_address;
size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET;
- dev_dbg(hdev->dev, "Scrubing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
+ dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n",
addr, addr + size, val);
rc = gaudi_memset_device_memory(hdev, addr, size, val);
if (rc) {
@@ -6911,9 +6895,9 @@ static void gaudi_handle_sw_config_stream_data(struct hl_device *hdev, u32 strea
stream, cq_ptr, size);
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
- hdev->last_error.undef_opcode.cq_addr = cq_ptr;
- hdev->last_error.undef_opcode.cq_size = size;
- hdev->last_error.undef_opcode.stream_id = stream;
+ hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr;
+ hdev->captured_err_info.undef_opcode.cq_size = size;
+ hdev->captured_err_info.undef_opcode.stream_id = stream;
}
}
@@ -6979,7 +6963,7 @@ static void gaudi_handle_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
}
if (event_mask & HL_NOTIFIER_EVENT_UNDEFINED_OPCODE) {
- struct undefined_opcode_info *undef_opcode = &hdev->last_error.undef_opcode;
+ struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode;
u32 arr_idx = undef_opcode->cb_addr_streams_len;
if (arr_idx == 0) {
@@ -7063,11 +7047,11 @@ static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
}
/* check for undefined opcode */
if (glbl_sts_val & TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK &&
- hdev->last_error.undef_opcode.write_enable) {
- memset(&hdev->last_error.undef_opcode, 0,
- sizeof(hdev->last_error.undef_opcode));
+ hdev->captured_err_info.undef_opcode.write_enable) {
+ memset(&hdev->captured_err_info.undef_opcode, 0,
+ sizeof(hdev->captured_err_info.undef_opcode));
- hdev->last_error.undef_opcode.write_enable = false;
+ hdev->captured_err_info.undef_opcode.write_enable = false;
*event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE;
}
@@ -7233,12 +7217,6 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *e
switch (event_type) {
case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
- /* In TPC QM event, notify on TPC assertion. While there isn't
- * a specific event for assertion yet, the FW generates QM event.
- * The SW upper layer will inspect an internal mapped area to indicate
- * if the event is a tpc assertion or tpc QM.
- */
- *event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
index = event_type - GAUDI_EVENT_TPC0_QM;
qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
@@ -7349,18 +7327,19 @@ static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
/* In case it's the first razwi, save its parameters*/
- rc = atomic_cmpxchg(&hdev->last_error.razwi.write_enable, 1, 0);
+ rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0);
if (rc) {
- hdev->last_error.razwi.timestamp = ktime_get();
- hdev->last_error.razwi.addr = razwi_addr;
- hdev->last_error.razwi.engine_id_1 = engine_id_1;
- hdev->last_error.razwi.engine_id_2 = engine_id_2;
+ hdev->captured_err_info.razwi.timestamp = ktime_get();
+ hdev->captured_err_info.razwi.addr = razwi_addr;
+ hdev->captured_err_info.razwi.engine_id_1 = engine_id_1;
+ hdev->captured_err_info.razwi.engine_id_2 = engine_id_2;
/*
* If first engine id holds non valid value the razwi initiator
* does not have engine id
*/
- hdev->last_error.razwi.non_engine_initiator = (engine_id_1 == U16_MAX);
- hdev->last_error.razwi.type = razwi_type;
+ hdev->captured_err_info.razwi.non_engine_initiator =
+ (engine_id_1 == U16_MAX);
+ hdev->captured_err_info.razwi.type = razwi_type;
}
}
@@ -7427,7 +7406,7 @@ static void gaudi_print_nic_axi_irq_info(struct hl_device *hdev, u16 event_type,
event_type, desc);
}
-static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
+static int gaudi_compute_reset_late_init(struct hl_device *hdev)
{
/* GAUDI doesn't support any reset except hard-reset */
return -EPERM;
@@ -7702,6 +7681,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
@@ -7711,6 +7691,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7722,6 +7703,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -7733,6 +7715,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_TPC0_DEC:
@@ -7743,10 +7726,17 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_TPC5_DEC:
case GAUDI_EVENT_TPC6_DEC:
case GAUDI_EVENT_TPC7_DEC:
+ /* In TPC DEC event, notify on TPC assertion. While there isn't
+ * a specific event for assertion yet, the FW generates TPC DEC event.
+ * The SW upper layer will inspect an internal mapped area to indicate
+ * if the event is a TPC Assertion or a "real" TPC DEC.
+ */
+ event_mask |= HL_NOTIFIER_EVENT_TPC_ASSERT;
gaudi_print_irq_info(hdev, event_type, true);
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_dec_event_to_tpc_id(event_type),
"AXI_SLV_DEC_Error");
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
@@ -7755,6 +7745,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
@@ -7770,6 +7761,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
reset_required = gaudi_tpc_read_interrupts(hdev,
tpc_krn_event_to_tpc_id(event_type),
"KRN_ERR");
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (reset_required) {
dev_err(hdev->dev, "reset required due to %s\n",
gaudi_irq_map_table[event_type].name);
@@ -7778,6 +7770,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
goto reset_device;
} else {
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
}
break;
@@ -7806,9 +7799,25 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI_EVENT_PCIE_DEC:
+ case GAUDI_EVENT_CPU_AXI_SPLITTER:
+ case GAUDI_EVENT_PSOC_AXI_DEC:
+ case GAUDI_EVENT_PSOC_PRSTN_FALL:
+ gaudi_print_irq_info(hdev, event_type, true);
+ hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ break;
+
+ case GAUDI_EVENT_MMU_PAGE_FAULT:
+ case GAUDI_EVENT_MMU_WR_PERM:
+ gaudi_print_irq_info(hdev, event_type, true);
+ hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ break;
+
case GAUDI_EVENT_MME0_WBC_RSP:
case GAUDI_EVENT_MME0_SBAB0_RSP:
case GAUDI_EVENT_MME1_WBC_RSP:
@@ -7817,11 +7826,6 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_MME2_SBAB0_RSP:
case GAUDI_EVENT_MME3_WBC_RSP:
case GAUDI_EVENT_MME3_SBAB0_RSP:
- case GAUDI_EVENT_CPU_AXI_SPLITTER:
- case GAUDI_EVENT_PSOC_AXI_DEC:
- case GAUDI_EVENT_PSOC_PRSTN_FALL:
- case GAUDI_EVENT_MMU_PAGE_FAULT:
- case GAUDI_EVENT_MMU_WR_PERM:
case GAUDI_EVENT_RAZWI_OR_ADC:
case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
@@ -7841,10 +7845,12 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_qman_err(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= (HL_NOTIFIER_EVENT_USER_ENGINE_ERR | HL_NOTIFIER_EVENT_DEVICE_RESET);
break;
case GAUDI_EVENT_RAZWI_OR_ADC_SW:
gaudi_print_irq_info(hdev, event_type, true);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
goto reset_device;
case GAUDI_EVENT_TPC0_BMON_SPMU:
@@ -7858,11 +7864,13 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
gaudi_print_irq_info(hdev, event_type, false);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_NIC_SEI_0 ... GAUDI_EVENT_NIC_SEI_4:
gaudi_print_nic_axi_irq_info(hdev, event_type, &data);
hl_fw_unmask_irq(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
@@ -7870,6 +7878,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
gaudi_print_sm_sei_info(hdev, event_type,
&eq_entry->sm_sei_data);
rc = hl_state_dump(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
if (rc)
dev_err(hdev->dev,
"Error during system state dump %d\n", rc);
@@ -7880,6 +7889,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break;
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
gaudi_print_clk_change_info(hdev, event_type);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -7889,20 +7899,24 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
dev_err(hdev->dev,
"Received high temp H/W interrupt %d (cause %d)\n",
event_type, cause);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI_EVENT_DEV_RESET_REQ:
gaudi_print_irq_info(hdev, event_type, false);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
case GAUDI_EVENT_FW_ALIVE_S:
gaudi_print_irq_info(hdev, event_type, false);
gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
goto reset_device;
default:
@@ -8066,8 +8080,8 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
struct gaudi_device *gaudi = hdev->asic_specific;
const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
@@ -8079,8 +8093,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
u64 offset;
int i, dma_id, port;
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
"--- ------- ------------ ---------- -------------\n");
@@ -8097,14 +8111,14 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
- if (s)
- seq_printf(s, fmt, dma_id,
+ if (e)
+ hl_engine_data_sprintf(e, fmt, dma_id,
is_eng_idle ? "Y" : "N", qm_glbl_sts0,
qm_cgm_sts, dma_core_sts0);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
"--- ------- ------------ ---------- ----------\n");
@@ -8119,14 +8133,14 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
- if (s)
- seq_printf(s, fmt, i,
+ if (e)
+ hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
"--- ------- ------------ ---------- -----------\n");
@@ -8147,20 +8161,21 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
- if (s) {
+ if (e) {
if (!is_slave)
- seq_printf(s, fmt, i,
+ hl_engine_data_sprintf(e, fmt, i,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
else
- seq_printf(s, mme_slave_fmt, i,
+ hl_engine_data_sprintf(e, mme_slave_fmt, i,
is_eng_idle ? "Y" : "N", "-",
"-", mme_arch_sts);
}
}
- if (s)
- seq_puts(s, "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
"--- ------- ------------ ----------\n");
for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
@@ -8174,8 +8189,8 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
- if (s)
- seq_printf(s, nic_fmt, port,
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
@@ -8189,15 +8204,15 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
- if (s)
- seq_printf(s, nic_fmt, port,
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, port,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts);
}
}
- if (s)
- seq_puts(s, "\n");
+ if (e)
+ hl_engine_data_sprintf(e, "\n");
return is_idle;
}
@@ -8392,13 +8407,13 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
goto destroy_internal_cb_pool;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto unreserve_internal_cb_pool;
@@ -8425,13 +8440,13 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
return;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9148,6 +9163,11 @@ static void gaudi_add_device_attr(struct hl_device *hdev, struct attribute_group
dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs;
}
+static int gaudi_send_device_activity(struct hl_device *hdev, bool open)
+{
+ return 0;
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -9192,11 +9212,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
.send_heartbeat = gaudi_send_heartbeat,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
- .non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
+ .compute_reset_late_init = gaudi_compute_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
- .kdma_lock = NULL,
- .kdma_unlock = NULL,
.get_pci_id = gaudi_get_pci_id,
.get_eeprom_data = gaudi_get_eeprom_data,
.get_monitor_dump = gaudi_get_monitor_dump,
@@ -9242,6 +9260,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
+ .send_device_activity = gaudi_send_device_activity,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/misc/habanalabs/gaudi2/gaudi2.c
index 98336a1a84b0..75c4bef7841c 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2.c
@@ -21,7 +21,7 @@
#define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */
-#define GAUDI2_RESET_TIMEOUT_MSEC 500 /* 500ms */
+#define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
#define GAUDI2_RESET_POLL_TIMEOUT_USEC 50000 /* 50ms */
#define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */
#define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */
@@ -117,6 +117,12 @@
#define MMU_RANGE_INV_ASID_EN_SHIFT 1
#define MMU_RANGE_INV_ASID_SHIFT 2
+/* The last SPI_SEI cause bit, "burst_fifo_full", is expected to be triggered in PMMU because it has
+ * a 2 entries FIFO, and hence it is not enabled for it.
+ */
+#define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0)
+#define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0)
+
#define GAUDI2_MAX_STRING_LEN 64
#define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \
@@ -610,7 +616,7 @@ static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] =
"qman_axi_err",
"wap sei (wbc axi err)",
"arc sei",
- "mme_cfg_unalign_addr",
+ "cfg access error",
"qm_sw_err",
"sbte_dbg_intr_0",
"sbte_dbg_intr_1",
@@ -1525,17 +1531,57 @@ static const u32 rtr_coordinates_to_rtr_id[NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES]
RTR_ID_X_Y(17, 11)
};
+enum rtr_id {
+ DCORE0_RTR0,
+ DCORE0_RTR1,
+ DCORE0_RTR2,
+ DCORE0_RTR3,
+ DCORE0_RTR4,
+ DCORE0_RTR5,
+ DCORE0_RTR6,
+ DCORE0_RTR7,
+ DCORE1_RTR0,
+ DCORE1_RTR1,
+ DCORE1_RTR2,
+ DCORE1_RTR3,
+ DCORE1_RTR4,
+ DCORE1_RTR5,
+ DCORE1_RTR6,
+ DCORE1_RTR7,
+ DCORE2_RTR0,
+ DCORE2_RTR1,
+ DCORE2_RTR2,
+ DCORE2_RTR3,
+ DCORE2_RTR4,
+ DCORE2_RTR5,
+ DCORE2_RTR6,
+ DCORE2_RTR7,
+ DCORE3_RTR0,
+ DCORE3_RTR1,
+ DCORE3_RTR2,
+ DCORE3_RTR3,
+ DCORE3_RTR4,
+ DCORE3_RTR5,
+ DCORE3_RTR6,
+ DCORE3_RTR7,
+};
+
static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
- 1, 1, 2, 2, 3, 3, 14, 14, 13, 13, 12, 12, 19, 19, 18, 18, 17,
- 17, 28, 28, 29, 29, 30, 30, 0
+ DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3,
+ DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4,
+ DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1,
+ DCORE3_RTR4, DCORE3_RTR4, DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6,
+ DCORE0_RTR0
};
static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = {
- 0, 0, 15, 15, 16, 16, 31, 31, 0, 0
+ DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0,
+ DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = {
- 15, 15, 15, 15, 15, 16, 16, 16, 16, 31, 31, 31
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
+ DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
struct sft_info {
@@ -1548,11 +1594,11 @@ static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE
};
static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = {
- 0, 0
+ DCORE0_RTR0, DCORE0_RTR0
};
static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = {
- 16, 31
+ DCORE2_RTR0, DCORE3_RTR7
};
struct mme_initiators_rtr_id {
@@ -1663,7 +1709,7 @@ struct gaudi2_cache_invld_params {
};
struct gaudi2_tpc_idle_data {
- struct seq_file *s;
+ struct engines_data *e;
unsigned long *mask;
bool *is_idle;
const char *tpc_fmt;
@@ -1706,6 +1752,9 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
int dcore, inst, tpc_seq;
u32 offset;
+ /* init the return code */
+ ctx->rc = 0;
+
for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) {
for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) {
tpc_seq = dcore * NUM_OF_TPC_PER_DCORE + inst;
@@ -1715,7 +1764,12 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
offset = (DCORE_OFFSET * dcore) + (DCORE_TPC_OFFSET * inst);
- ctx->fn(hdev, dcore, inst, offset, ctx->data);
+ ctx->fn(hdev, dcore, inst, offset, ctx);
+ if (ctx->rc) {
+ dev_err(hdev->dev, "TPC iterator failed for DCORE%d TPC%d\n",
+ dcore, inst);
+ return;
+ }
}
}
@@ -1724,7 +1778,9 @@ void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx)
/* special check for PCI TPC (DCORE0_TPC6) */
offset = DCORE_TPC_OFFSET * (NUM_DCORE0_TPC - 1);
- ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx->data);
+ ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx);
+ if (ctx->rc)
+ dev_err(hdev->dev, "TPC iterator failed for DCORE0 TPC6\n");
}
static bool gaudi2_host_phys_addr_valid(u64 addr)
@@ -1973,6 +2029,7 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END;
}
+ prop->num_engine_cores = CPU_ID_MAX;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
prop->num_of_events = GAUDI2_EVENT_SIZE;
@@ -2005,9 +2062,6 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
- prop->cb_va_start_addr = VA_HOST_SPACE_USER_MAPPED_CB_START;
- prop->cb_va_end_addr = VA_HOST_SPACE_USER_MAPPED_CB_END;
-
prop->max_dec = NUMBER_OF_DEC;
prop->clk_pll_index = HL_GAUDI2_MME_PLL;
@@ -2477,7 +2531,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pci_dev *pdev = hdev->pdev;
resource_size_t pci_bar_size;
- u32 fw_boot_status;
int rc;
rc = gaudi2_set_fixed_properties(hdev);
@@ -2505,22 +2558,14 @@ static int gaudi2_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID);
hdev->dram_pci_bar_start = pci_resource_start(pdev, DRAM_BAR_ID);
- /* If FW security is enabled at this point it means no access to ELBI */
- if (hdev->asic_prop.fw_security_enabled) {
- hdev->asic_prop.iatu_done_by_fw = true;
- goto pci_init;
- }
-
- rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0, &fw_boot_status);
- if (rc)
- goto free_queue_props;
-
- /* Check whether FW is configuring iATU */
- if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
- (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
+ /*
+ * Only in pldm driver config iATU
+ */
+ if (hdev->pldm)
+ hdev->asic_prop.iatu_done_by_fw = false;
+ else
hdev->asic_prop.iatu_done_by_fw = true;
-pci_init:
rc = hl_pci_init(hdev);
if (rc)
goto free_queue_props;
@@ -2676,6 +2721,8 @@ static int gaudi2_late_init(struct hl_device *hdev)
struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc;
+ hdev->asic_prop.supports_advanced_cpucp_rc = true;
+
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
gaudi2->virt_msix_db_dma_addr);
if (rc) {
@@ -2703,23 +2750,7 @@ disable_pci_access:
static void gaudi2_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
- int i = 0;
-
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static void gaudi2_user_mapped_dec_init(struct gaudi2_device *gaudi2, u32 start_idx)
@@ -2994,7 +3025,6 @@ static int gaudi2_sw_init(struct hl_device *hdev)
}
spin_lock_init(&gaudi2->hw_queues_lock);
- spin_lock_init(&gaudi2->kdma_lock);
gaudi2->scratchpad_kernel_address = hl_asic_dma_alloc_coherent(hdev, PAGE_SIZE,
&gaudi2->scratchpad_bus_address,
@@ -3551,7 +3581,7 @@ static int gaudi2_enable_msix(struct hl_device *hdev)
rc = gaudi2_dec_enable_msix(hdev);
if (rc) {
dev_err(hdev->dev, "Failed to enable decoder IRQ");
- goto free_completion_irq;
+ goto free_event_irq;
}
for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0;
@@ -3582,6 +3612,10 @@ free_user_irq:
gaudi2_dec_disable_msix(hdev, GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1);
+free_event_irq:
+ irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_EVENT_QUEUE);
+ free_irq(irq, cq);
+
free_completion_irq:
irq = pci_irq_vector(hdev->pdev, GAUDI2_IRQ_NUM_COMPLETION);
free_irq(irq, cq);
@@ -3745,14 +3779,16 @@ static void gaudi2_stop_dec(struct hl_device *hdev)
gaudi2_stop_pcie_dec(hdev);
}
-static void gaudi2_halt_arc(struct hl_device *hdev, u32 cpu_id)
+static void gaudi2_set_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
{
u32 reg_base, reg_val;
reg_base = gaudi2_arc_blocks_bases[cpu_id];
+ if (run_mode == HL_ENGINE_CORE_RUN)
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 1);
+ else
+ reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
- /* Halt ARC */
- reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1);
WREG32(reg_base + ARC_HALT_REQ_OFFSET, reg_val);
}
@@ -3762,8 +3798,35 @@ static void gaudi2_halt_arcs(struct hl_device *hdev)
for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) {
if (gaudi2_is_arc_enabled(hdev, arc_id))
- gaudi2_halt_arc(hdev, arc_id);
+ gaudi2_set_arc_running_mode(hdev, arc_id, HL_ENGINE_CORE_HALT);
+ }
+}
+
+static int gaudi2_verify_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode)
+{
+ int rc;
+ u32 reg_base, val, ack_mask, timeout_usec = 100000;
+
+ if (hdev->pldm)
+ timeout_usec *= 100;
+
+ reg_base = gaudi2_arc_blocks_bases[cpu_id];
+ if (run_mode == HL_ENGINE_CORE_RUN)
+ ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK;
+ else
+ ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK;
+
+ rc = hl_poll_timeout(hdev, reg_base + ARC_HALT_ACK_OFFSET,
+ val, ((val & ack_mask) == ack_mask),
+ 1000, timeout_usec);
+
+ if (!rc) {
+ /* Clear */
+ val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0);
+ WREG32(reg_base + ARC_HALT_REQ_OFFSET, val);
}
+
+ return rc;
}
static void gaudi2_reset_arcs(struct hl_device *hdev)
@@ -3790,8 +3853,39 @@ static void gaudi2_nic_qmans_manual_flush(struct hl_device *hdev)
queue_id = GAUDI2_QUEUE_ID_NIC_0_0;
- for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN)
+ for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) {
+ if (!(hdev->nic_ports_mask & BIT(i)))
+ continue;
+
gaudi2_qman_manual_flush_common(hdev, queue_id);
+ }
+}
+
+static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids,
+ u32 num_cores, u32 core_command)
+{
+ int i, rc;
+
+
+ for (i = 0 ; i < num_cores ; i++) {
+ if (gaudi2_is_arc_enabled(hdev, core_ids[i]))
+ gaudi2_set_arc_running_mode(hdev, core_ids[i], core_command);
+ }
+
+ for (i = 0 ; i < num_cores ; i++) {
+ if (gaudi2_is_arc_enabled(hdev, core_ids[i])) {
+ rc = gaudi2_verify_arc_running_mode(hdev, core_ids[i], core_command);
+
+ if (rc) {
+ dev_err(hdev->dev, "failed to %s arc: %d\n",
+ (core_command == HL_ENGINE_CORE_HALT) ?
+ "HALT" : "RUN", core_ids[i]);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
}
static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
@@ -4124,11 +4218,15 @@ static void gaudi2_init_qman_common(struct hl_device *hdev, u32 reg_base,
WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0);
/* Enable the QMAN channel.
- * PDMA1 QMAN configuration is different, as we do not allow user to
- * access CP2/3, it is reserved for the ARC usage.
+ * PDMA QMAN configuration is different, as we do not allow user to
+ * access some of the CPs.
+ * PDMA0: CP2/3 are reserved for the ARC usage.
+ * PDMA1: CP1/2/3 are reserved for the ARC usage.
*/
if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0])
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA1_QMAN_ENABLE);
+ else if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0])
+ WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA0_QMAN_ENABLE);
else
WREG32(reg_base + QM_GLBL_CFG0_OFFSET, QMAN_ENABLE);
}
@@ -4501,10 +4599,10 @@ struct gaudi2_tpc_init_cfg_data {
};
static void gaudi2_init_tpc_config(struct hl_device *hdev, int dcore, int inst,
- u32 offset, void *data)
+ u32 offset, struct iterate_module_ctx *ctx)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
- struct gaudi2_tpc_init_cfg_data *cfg_data = data;
+ struct gaudi2_tpc_init_cfg_data *cfg_data = ctx->data;
u32 queue_id_base;
u8 seq;
@@ -4956,8 +5054,7 @@ static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base)
return 0;
}
-static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base,
- u32 stlb_base)
+static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base)
{
u32 status, timeout_usec;
int rc;
@@ -4985,7 +5082,6 @@ static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base,
return rc;
WREG32(mmu_base + MMU_BYPASS_OFFSET, 0);
- WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, 0xF);
rc = hl_poll_timeout(
hdev,
@@ -5042,6 +5138,8 @@ static int gaudi2_pci_mmu_init(struct hl_device *hdev)
DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK);
}
+ WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK);
+
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
@@ -5092,6 +5190,8 @@ static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id,
RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, 1,
STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK);
+ WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK);
+
rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base);
if (rc)
return rc;
@@ -5339,7 +5439,10 @@ static void gaudi2_execute_soft_reset(struct hl_device *hdev, u32 reset_sleep_ms
if (!driver_performs_reset) {
/* set SP to indicate reset request sent to FW */
- WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
+ if (dyn_regs->cpu_rst_status)
+ WREG32(le32_to_cpu(dyn_regs->cpu_rst_status), CPU_RST_STATUS_NA);
+ else
+ WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
WREG32(le32_to_cpu(dyn_regs->gic_host_soft_rst_irq),
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
@@ -5527,10 +5630,11 @@ static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id)
u64 hw_test_cap_bit = 0;
switch (hw_queue_id) {
- case GAUDI2_QUEUE_ID_PDMA_0_0 ... GAUDI2_QUEUE_ID_PDMA_1_1:
+ case GAUDI2_QUEUE_ID_PDMA_0_0:
+ case GAUDI2_QUEUE_ID_PDMA_0_1:
+ case GAUDI2_QUEUE_ID_PDMA_1_0:
hw_cap_mask = HW_CAP_PDMA_MASK;
break;
-
case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3:
hw_test_cap_bit = HW_CAP_EDMA_SHIFT +
((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0) >> 2);
@@ -6129,7 +6233,7 @@ done:
return ret_val;
}
-static int gaudi2_non_hard_reset_late_init(struct hl_device *hdev)
+static int gaudi2_compute_reset_late_init(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
size_t irq_arr_size;
@@ -6147,9 +6251,9 @@ static int gaudi2_non_hard_reset_late_init(struct hl_device *hdev)
}
static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_idle_data *idle_data = (struct gaudi2_tpc_idle_data *)data;
+ struct gaudi2_tpc_idle_data *idle_data = ctx->data;
u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts;
bool is_eng_idle;
int engine_idx;
@@ -6172,14 +6276,15 @@ static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int ins
if (idle_data->mask && !is_eng_idle)
set_bit(engine_idx, idle_data->mask);
- if (idle_data->s)
- seq_printf(idle_data->s, idle_data->tpc_fmt, dcore, inst,
+ if (idle_data->e)
+ hl_engine_data_sprintf(idle_data->e,
+ idle_data->tpc_fmt, dcore, inst,
is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
}
-static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_idle_ind_mask,
mme_arch_sts, dec_swreg15, dec_enabled_bit;
@@ -6197,7 +6302,7 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
struct gaudi2_tpc_idle_data tpc_idle_data = {
.tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n",
- .s = s,
+ .e = e,
.mask = mask,
.is_idle = &is_idle,
};
@@ -6209,8 +6314,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
int engine_idx, i, j;
/* EDMA, Two engines per Dcore */
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
"---- ---- ------- ------------ ----------------------\n");
@@ -6239,19 +6344,19 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, edma_fmt, i, j,
- is_eng_idle ? "Y" : "N",
- qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ if (e)
+ hl_engine_data_sprintf(e, edma_fmt, i, j,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0,
+ dma_core_idle_ind_mask);
}
}
/* PDMA, Two engines in Full chip */
- if (s)
- seq_puts(s,
- "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
- "---- ------- ------------ ----------------------\n");
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_IDLE_IND_MASK\n"
+ "---- ------- ------------ ----------------------\n");
for (i = 0 ; i < NUM_OF_PDMA ; i++) {
engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i;
@@ -6269,16 +6374,16 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, pdma_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
- dma_core_idle_ind_mask);
+ if (e)
+ hl_engine_data_sprintf(e, pdma_fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, dma_core_idle_ind_mask);
}
/* NIC, twelve macros in Full chip */
- if (s && hdev->nic_ports_mask)
- seq_puts(s,
- "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
- "--- ------- ------------ ----------\n");
+ if (e && hdev->nic_ports_mask)
+ hl_engine_data_sprintf(e,
+ "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
+ "--- ------- ------------ ----------\n");
for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
if (!(i & 1))
@@ -6302,15 +6407,15 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, nic_fmt, i, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
- qm_cgm_sts);
+ if (e)
+ hl_engine_data_sprintf(e, nic_fmt, i, is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts);
}
- if (s)
- seq_puts(s,
- "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
- "--- ---- ------- ------------ ---------------\n");
+ if (e)
+ hl_engine_data_sprintf(e,
+ "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n"
+ "--- ---- ------- ------------ ---------------\n");
/* MME, one per Dcore */
for (i = 0 ; i < NUM_OF_DCORES ; i++) {
engine_idx = GAUDI2_DCORE0_ENGINE_ID_MME + i * GAUDI2_ENGINE_ID_DCORE_OFFSET;
@@ -6327,8 +6432,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
is_eng_idle &= IS_MME_IDLE(mme_arch_sts);
is_idle &= is_eng_idle;
- if (s)
- seq_printf(s, mme_fmt, i, "N",
+ if (e)
+ hl_engine_data_sprintf(e, mme_fmt, i, "N",
is_eng_idle ? "Y" : "N",
qm_glbl_sts0,
mme_arch_sts);
@@ -6340,16 +6445,16 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
/*
* TPC
*/
- if (s && prop->tpc_enabled_mask)
- seq_puts(s,
+ if (e && prop->tpc_enabled_mask)
+ hl_engine_data_sprintf(e,
"\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_IDLE_IND_MASK\n"
"---- --- -------- ------------ ---------- ----------------------\n");
gaudi2_iterate_tpcs(hdev, &tpc_iter);
/* Decoders, two each Dcore and two shared PCIe decoders */
- if (s && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
- seq_puts(s,
+ if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK)))
+ hl_engine_data_sprintf(e,
"\nCORE DEC is_idle VSI_CMD_SWREG15\n"
"---- --- ------- ---------------\n");
@@ -6370,13 +6475,14 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, dec_fmt, i, j, is_eng_idle ? "Y" : "N", dec_swreg15);
+ if (e)
+ hl_engine_data_sprintf(e, dec_fmt, i, j,
+ is_eng_idle ? "Y" : "N", dec_swreg15);
}
}
- if (s && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
- seq_puts(s,
+ if (e && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK))
+ hl_engine_data_sprintf(e,
"\nPCIe DEC is_idle VSI_CMD_SWREG15\n"
"-------- ------- ---------------\n");
@@ -6395,12 +6501,13 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, pcie_dec_fmt, i, is_eng_idle ? "Y" : "N", dec_swreg15);
+ if (e)
+ hl_engine_data_sprintf(e, pcie_dec_fmt, i,
+ is_eng_idle ? "Y" : "N", dec_swreg15);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nCORE ROT is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
"---- ---- ------- ------------ ---------- -------------\n");
@@ -6419,8 +6526,8 @@ static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(engine_idx, mask);
- if (s)
- seq_printf(s, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, qm_cgm_sts, "-");
}
@@ -6443,22 +6550,6 @@ static void gaudi2_hw_queues_unlock(struct hl_device *hdev)
spin_unlock(&gaudi2->hw_queues_lock);
}
-static void gaudi2_kdma_lock(struct hl_device *hdev, int dcore_id)
- __acquires(&gaudi2->kdma_lock)
-{
- struct gaudi2_device *gaudi2 = hdev->asic_specific;
-
- spin_lock(&gaudi2->kdma_lock);
-}
-
-static void gaudi2_kdma_unlock(struct hl_device *hdev, int dcore_id)
- __releases(&gaudi2->kdma_lock)
-{
- struct gaudi2_device *gaudi2 = hdev->asic_specific;
-
- spin_unlock(&gaudi2->kdma_lock);
-}
-
static u32 gaudi2_get_pci_id(struct hl_device *hdev)
{
return hdev->pdev->device;
@@ -6725,9 +6816,9 @@ static int gaudi2_mmu_shared_prepare(struct hl_device *hdev, u32 asid)
}
static void gaudi2_tpc_mmu_prepare(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_mmu_data *mmu_data = (struct gaudi2_tpc_mmu_data *)data;
+ struct gaudi2_tpc_mmu_data *mmu_data = ctx->data;
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0);
WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_ASID + offset, mmu_data->rw_asid);
@@ -7020,10 +7111,6 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg);
razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg);
}
-
- dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR HBW WR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
- name, razwi_hi, razwi_lo, razwi_xy);
} else {
if (read_razwi_regs) {
razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
@@ -7034,11 +7121,11 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg);
razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg);
}
-
- dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR HBW AR error, captured address HI 0x%x LO 0x%x, Initiator coordinates 0x%x\n",
- name, razwi_hi, razwi_lo, razwi_xy);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n",
+ name, is_write ? "WR" : "RD", (u64)razwi_hi << 32 | razwi_lo, razwi_xy);
}
static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
@@ -7296,7 +7383,79 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
-static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev,
+static const char *gaudi2_get_initiators_name(u32 rtr_id)
+{
+ switch (rtr_id) {
+ case DCORE0_RTR0:
+ return "DEC0/1/8/9, TPC24, PDMA0/1, PMMU, PCIE_IF, EDMA0/2, HMMU0/2/4/6, CPU";
+ case DCORE0_RTR1:
+ return "TPC0/1";
+ case DCORE0_RTR2:
+ return "TPC2/3";
+ case DCORE0_RTR3:
+ return "TPC4/5";
+ case DCORE0_RTR4:
+ return "MME0_SBTE0/1";
+ case DCORE0_RTR5:
+ return "MME0_WAP0/SBTE2";
+ case DCORE0_RTR6:
+ return "MME0_CTRL_WR/SBTE3";
+ case DCORE0_RTR7:
+ return "MME0_WAP1/CTRL_RD/SBTE4";
+ case DCORE1_RTR0:
+ return "MME1_WAP1/CTRL_RD/SBTE4";
+ case DCORE1_RTR1:
+ return "MME1_CTRL_WR/SBTE3";
+ case DCORE1_RTR2:
+ return "MME1_WAP0/SBTE2";
+ case DCORE1_RTR3:
+ return "MME1_SBTE0/1";
+ case DCORE1_RTR4:
+ return "TPC10/11";
+ case DCORE1_RTR5:
+ return "TPC8/9";
+ case DCORE1_RTR6:
+ return "TPC6/7";
+ case DCORE1_RTR7:
+ return "DEC2/3, NIC0/1/2/3/4, ARC_FARM, KDMA, EDMA1/3, HMMU1/3/5/7";
+ case DCORE2_RTR0:
+ return "DEC4/5, NIC5/6/7/8, EDMA4/6, HMMU8/10/12/14, ROT0";
+ case DCORE2_RTR1:
+ return "TPC16/17";
+ case DCORE2_RTR2:
+ return "TPC14/15";
+ case DCORE2_RTR3:
+ return "TPC12/13";
+ case DCORE2_RTR4:
+ return "MME2_SBTE0/1";
+ case DCORE2_RTR5:
+ return "MME2_WAP0/SBTE2";
+ case DCORE2_RTR6:
+ return "MME2_CTRL_WR/SBTE3";
+ case DCORE2_RTR7:
+ return "MME2_WAP1/CTRL_RD/SBTE4";
+ case DCORE3_RTR0:
+ return "MME3_WAP1/CTRL_RD/SBTE4";
+ case DCORE3_RTR1:
+ return "MME3_CTRL_WR/SBTE3";
+ case DCORE3_RTR2:
+ return "MME3_WAP0/SBTE2";
+ case DCORE3_RTR3:
+ return "MME3_SBTE0/1";
+ case DCORE3_RTR4:
+ return "TPC18/19";
+ case DCORE3_RTR5:
+ return "TPC20/21";
+ case DCORE3_RTR6:
+ return "TPC22/23";
+ case DCORE3_RTR7:
+ return "DEC6/7, NIC9/10/11, EDMA5/7, HMMU9/11/13/15, ROT1, PSOC";
+ default:
+ return "N/A";
+ }
+}
+
+static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev, u32 rtr_id,
u64 rtr_ctrl_base_addr, bool is_write)
{
u32 razwi_hi, razwi_lo;
@@ -7305,50 +7464,47 @@ static void gaudi2_razwi_unmapped_addr_hbw_printf_info(struct hl_device *hdev,
razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_HI);
razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_ADDR_LO);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW WR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
- rtr_ctrl_base_addr, razwi_hi, razwi_lo);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1);
} else {
razwi_hi = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_HI);
-
razwi_lo = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_ADDR_LO);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped HBW AR error, ctr_base 0x%llx, captured address HI 0x%x, LO 0x%x\n",
- rtr_ctrl_base_addr, razwi_hi, razwi_lo);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped HBW %s error, rtr id %u, address %#llx\n",
+ is_write ? "WR" : "RD", rtr_id, (u64)razwi_hi << 32 | razwi_lo);
+
+ dev_err_ratelimited(hdev->dev,
+ "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
}
-static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev,
- u64 rtr_ctrl_base_addr, bool is_write)
+static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u32 rtr_id,
+ u64 rtr_ctrl_base_addr, bool is_write)
{
u32 razwi_addr;
if (is_write) {
razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW WR error, ctr_base 0x%llx, captured address 0x%x\n",
- rtr_ctrl_base_addr, razwi_addr);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
} else {
razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
- dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW AR error, ctr_base 0x%llx, captured address 0x%x\n",
- rtr_ctrl_base_addr, razwi_addr);
-
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1);
}
+
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n",
+ is_write ? "WR" : "RD", rtr_id, razwi_addr);
+
+ dev_err_ratelimited(hdev->dev,
+ "Initiators: %s\n", gaudi2_get_initiators_name(rtr_id));
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
@@ -7366,21 +7522,16 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev)
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
-
- xy = (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_SHIFT;
+ xy = FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info);
dev_err_ratelimited(hdev->dev,
- "PSOC RAZWI interrupt: Mask %d, WAS_AR %d, WAS_AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_SHIFT,
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_SHIFT,
- (razwi_mask_info & PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_SHIFT, xy,
- (razwi_mask_info &
- PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK)
- >> PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_SHIFT);
+ "PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n",
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info),
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info),
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info),
+ xy,
+ FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info));
+
if (xy == 0) {
dev_err_ratelimited(hdev->dev,
"PSOC RAZWI interrupt: received event from 0 rtr coordinates\n");
@@ -7410,16 +7561,20 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev)
lbw_ar_set = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET);
if (hbw_aw_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, true);
if (hbw_ar_set)
- gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+ gaudi2_razwi_unmapped_addr_hbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, false);
if (lbw_aw_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, true);
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, true);
if (lbw_ar_set)
- gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_ctrl_base_addr, false);
+ gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
+ rtr_ctrl_base_addr, false);
clear:
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
@@ -7811,14 +7966,58 @@ static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_
gaudi2_dma_core_interrupts_cause[i]);
}
+static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev)
+{
+ u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr;
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+
+ razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED;
+ if (RREG32(razwi_happened_addr)) {
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
+ NULL);
+ WREG32(razwi_happened_addr, 0x1);
+ }
+}
+
static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data)
{
int i;
- for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE; i++)
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
- gaudi2_pcie_addr_dec_error_cause[i]);
+ for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
+ if (!(intr_cause_data & BIT_ULL(i)))
+ continue;
+
+ dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
+ gaudi2_pcie_addr_dec_error_cause[i]);
+
+ switch (intr_cause_data & BIT_ULL(i)) {
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ break;
+ case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
+ gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev);
+ break;
+ }
+ }
}
static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data)
@@ -8158,10 +8357,17 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
return true;
}
- dev_err_ratelimited(hdev->dev,
- "System Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Critical(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- sei_data->hdr.is_critical, hbm_mc_sei_cause[cause_idx]);
+ if (sei_data->hdr.is_critical)
+ dev_err(hdev->dev,
+ "System Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
+
+ else
+ dev_err_ratelimited(hdev->dev,
+ "System Non-Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
/* Print error-specific info */
switch (cause_idx) {
@@ -8371,6 +8577,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, skip_reset = false;
int index, sbte_index;
+ u64 event_mask = 0;
u16 event_type;
ctl = le32_to_cpu(eq_entry->hdr.ctl);
@@ -8392,6 +8599,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
fallthrough;
case GAUDI2_EVENT_ROTATOR0_SERR ... GAUDI2_EVENT_ROTATOR1_DERR:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
break;
@@ -8401,21 +8609,25 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
fallthrough;
case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
gaudi2_handle_qman_err(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
gaudi2_handle_arc_farm_sei_err(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
gaudi2_handle_cpu_sei_err(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
@@ -8423,6 +8635,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
@@ -8430,11 +8643,13 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP",
&eq_entry->razwi_with_intr_cause);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_KERNEL_ERR:
@@ -8465,6 +8680,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
(GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_SPI:
@@ -8480,6 +8696,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
(GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE:
@@ -8492,6 +8709,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_handle_mme_err(hdev, index,
"CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info);
gaudi2_handle_qm_sei_err(hdev, event_type, NULL);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_QMAN_SW_ERROR:
@@ -8502,6 +8720,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
(GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID:
@@ -8512,22 +8731,27 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
(GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_KDMA0_CORE:
gaudi2_handle_kdma_core_event(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
gaudi2_handle_dma_core_event(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
gaudi2_print_pcie_addr_dec_info(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
@@ -8536,25 +8760,30 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
gaudi2_handle_mmu_spi_sei_err(hdev, event_type);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
gaudi2_handle_hif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PMMU_FATAL_0:
gaudi2_handle_pif_fatal(hdev,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
gaudi2_ack_psoc_razwi_event_handler(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE ... GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
@@ -8563,25 +8792,31 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ECC:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_L2_RAM_ECC:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
@@ -8595,17 +8830,24 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
gaudi2_handle_mme_sbte_err(hdev, index, sbte_index,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ break;
case GAUDI2_EVENT_PSOC_PRSTN_FALL:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FATAL_ERR:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_TPC0_BMON_SPMU:
case GAUDI2_EVENT_TPC1_BMON_SPMU:
@@ -8657,6 +8899,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC8_BMON_SPMU:
case GAUDI2_EVENT_DEC9_BMON_SPMU:
case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S:
@@ -8664,43 +8907,53 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
gaudi2_print_clk_change_info(hdev, event_type);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
/* Do nothing- FW will handle it */
break;
case GAUDI2_EVENT_PCIE_P2P_MSIX:
gaudi2_handle_pcie_p2p_msix(hdev);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
skip_reset = !gaudi2_handle_sm_err(hdev, index);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err);
+ event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_ARC_DCCM_FULL:
hl_arc_event_handle(hdev, &eq_entry->arc_data);
+ event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
default:
@@ -8716,15 +8969,22 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
if (!gaudi2_irq_map_table[event_type].msg)
hl_fw_unmask_irq(hdev, event_type);
+ if (event_mask)
+ hl_notifier_event_send_all(hdev, event_mask);
+
return;
reset_device:
if (hdev->hard_reset_on_fw_events) {
hl_device_reset(hdev, reset_flags);
+ event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
} else {
if (!gaudi2_irq_map_table[event_type].msg)
hl_fw_unmask_irq(hdev, event_type);
}
+
+ if (event_mask)
+ hl_notifier_event_send_all(hdev, event_mask);
}
static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val)
@@ -9090,19 +9350,17 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
}
/* Create mapping on asic side */
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, reserved_va_base, host_mem_dma_addr, SZ_2M);
hl_mmu_invalidate_cache_range(hdev, false,
MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc) {
dev_err(hdev->dev, "Failed to create mapping on asic mmu\n");
goto unreserve_va;
}
- hdev->asic_funcs->kdma_lock(hdev, 0);
-
/* Enable MMU on KDMA */
gaudi2_kdma_set_mmbp_asid(hdev, false, ctx->asid);
@@ -9130,13 +9388,11 @@ static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, v
gaudi2_kdma_set_mmbp_asid(hdev, true, HL_KERNEL_ASID_ID);
- hdev->asic_funcs->kdma_unlock(hdev, 0);
-
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, reserved_va_base, SZ_2M);
hl_mmu_invalidate_cache_range(hdev, false, MMU_OP_USERPTR,
ctx->asid, reserved_va_base, SZ_2M);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
unreserve_va:
hl_unreserve_va_block(hdev, ctx, reserved_va_base, SZ_2M);
free_data_buffer:
@@ -9189,11 +9445,11 @@ static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *c
goto destroy_internal_cb_pool;
}
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
if (rc)
goto unreserve_internal_cb_pool;
@@ -9218,11 +9474,11 @@ static void gaudi2_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *
if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU))
return;
- mutex_lock(&ctx->mmu_lock);
+ mutex_lock(&hdev->mmu_lock);
hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ);
hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
- mutex_unlock(&ctx->mmu_lock);
+ mutex_unlock(&hdev->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9336,7 +9592,7 @@ static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
static u32 gaudi2_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, u32 size, bool eb)
{
- struct hl_cb *cb = (struct hl_cb *) data;
+ struct hl_cb *cb = data;
struct packet_msg_short *pkt;
u32 value, ctl, pkt_size = sizeof(*pkt);
@@ -9429,7 +9685,7 @@ static u32 gaudi2_add_fence_pkt(struct packet_fence *pkt)
static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop)
{
- struct hl_cb *cb = (struct hl_cb *) prop->data;
+ struct hl_cb *cb = prop->data;
void *buf = (void *) (uintptr_t) (cb->kernel_address);
u64 monitor_base, fence_addr = 0;
@@ -9481,7 +9737,7 @@ static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_propert
static void gaudi2_reset_sob(struct hl_device *hdev, void *data)
{
- struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+ struct hl_hw_sob *hw_sob = data;
dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, hw_sob->sob_id);
@@ -9724,7 +9980,7 @@ static int gaudi2_get_mmu_base(struct hl_device *hdev, u64 mmu_id, u32 *mmu_base
static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id)
{
- bool is_pmmu = (mmu_id == HW_CAP_PMMU ? true : false);
+ bool is_pmmu = (mmu_id == HW_CAP_PMMU);
struct gaudi2_device *gaudi2 = hdev->asic_specific;
u32 mmu_base;
@@ -9881,6 +10137,17 @@ static int gaudi2_get_monitor_dump(struct hl_device *hdev, void *data)
return -EOPNOTSUPP;
}
+int gaudi2_send_device_activity(struct hl_device *hdev, bool open)
+{
+ struct gaudi2_device *gaudi2 = hdev->asic_specific;
+
+ if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q) || hdev->fw_major_version < 37)
+ return 0;
+
+ /* TODO: add check for FW version using minor ver once it's known */
+ return hl_fw_send_device_activity(hdev, open);
+}
+
static const struct hl_asic_funcs gaudi2_funcs = {
.early_init = gaudi2_early_init,
.early_fini = gaudi2_early_fini,
@@ -9927,11 +10194,9 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.send_heartbeat = gaudi2_send_heartbeat,
.debug_coresight = gaudi2_debug_coresight,
.is_device_idle = gaudi2_is_device_idle,
- .non_hard_reset_late_init = gaudi2_non_hard_reset_late_init,
+ .compute_reset_late_init = gaudi2_compute_reset_late_init,
.hw_queues_lock = gaudi2_hw_queues_lock,
.hw_queues_unlock = gaudi2_hw_queues_unlock,
- .kdma_lock = gaudi2_kdma_lock,
- .kdma_unlock = gaudi2_kdma_unlock,
.get_pci_id = gaudi2_get_pci_id,
.get_eeprom_data = gaudi2_get_eeprom_data,
.get_monitor_dump = gaudi2_get_monitor_dump,
@@ -9978,6 +10243,8 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.mmu_get_real_page_size = gaudi2_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
+ .set_engine_cores = gaudi2_set_engine_cores,
+ .send_device_activity = gaudi2_send_device_activity,
};
void gaudi2_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
index e4bc4009f05b..a99c348bbf39 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2P.h
@@ -15,7 +15,6 @@
#include "../include/gaudi2/gaudi2_packets.h"
#include "../include/gaudi2/gaudi2_fw_if.h"
#include "../include/gaudi2/gaudi2_async_events.h"
-#include "../include/gaudi2/gaudi2_async_virt_events.h"
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
@@ -140,9 +139,6 @@
#define VA_HOST_SPACE_HPAGE_START 0xFFF0800000000000ull
#define VA_HOST_SPACE_HPAGE_END 0xFFF1000000000000ull /* 140TB */
-#define VA_HOST_SPACE_USER_MAPPED_CB_START 0xFFF1000000000000ull
-#define VA_HOST_SPACE_USER_MAPPED_CB_END 0xFFF1000100000000ull /* 4GB */
-
/* 140TB */
#define VA_HOST_SPACE_PAGE_SIZE (VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
@@ -458,7 +454,6 @@ struct dup_block_ctx {
* the user can map.
* @lfsr_rand_seeds: array of MME ACC random seeds to set.
* @hw_queues_lock: protects the H/W queues from concurrent access.
- * @kdma_lock: protects the KDMA engine from concurrent access.
* @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
* this memory region should be write-only.
* currently used for HBW QMAN writes which is
@@ -510,9 +505,6 @@ struct dup_block_ctx {
* @flush_db_fifo: flag to force flush DB FIFO after a write.
* @hbm_cfg: HBM subsystem settings
* @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
- * @kdma_lock_mutex: used by simulator instead of kdma_lock.
- * @use_deprecated_event_mappings: use old event mappings which are about to be
- * deprecated
*/
struct gaudi2_device {
int (*cpucp_info_get)(struct hl_device *hdev);
@@ -521,7 +513,6 @@ struct gaudi2_device {
int lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
spinlock_t hw_queues_lock;
- spinlock_t kdma_lock;
void *scratchpad_kernel_address;
dma_addr_t scratchpad_bus_address;
@@ -562,5 +553,6 @@ void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32
u32 offended_addr);
int gaudi2_init_security(struct hl_device *hdev);
void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
+int gaudi2_send_device_activity(struct hl_device *hdev, bool open);
#endif /* GAUDI2P_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
index eed16d642a5a..e9ac87828221 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
@@ -51,12 +51,18 @@
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
-#define PDMA1_QMAN_ENABLE \
+#define PDMA0_QMAN_ENABLE \
((0x3 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+#define PDMA1_QMAN_ENABLE \
+ ((0x1 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
+ (0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
+
/* QM_IDLE_MASK is valid for all engines QM idle check */
#define QM_IDLE_MASK (DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
@@ -138,4 +144,17 @@
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_SHIFT 15
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK 0x8000
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_SHIFT 0
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK 0x1
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_SHIFT 1
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK 0x2
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_SHIFT 2
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK 0x4
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK_SHIFT 3
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_ERR_INTR_MASK_MASK 0x8
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK_SHIFT 4
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK_MASK 0x10
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK_SHIFT 5
+#define PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK_MASK 0x20
+
#endif /* GAUDI2_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
index 89a06ff5ba34..c6906fb14229 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
@@ -2559,6 +2559,10 @@ static const u32 gaudi2_pb_pcie[] = {
mmPCIE_WRAP_BASE,
};
+static const u32 gaudi2_pb_pcie_unsecured_regs[] = {
+ mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0,
+};
+
static const u32 gaudi2_pb_thermal_sensor0[] = {
mmDCORE0_XFT_BASE,
mmDCORE0_TSTDVS_BASE,
@@ -2583,9 +2587,9 @@ struct gaudi2_tpc_pb_data {
};
static void gaudi2_config_tpcs_glbl_sec(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_pb_data *pb_data = (struct gaudi2_tpc_pb_data *)data;
+ struct gaudi2_tpc_pb_data *pb_data = ctx->data;
hl_config_glbl_sec(hdev, gaudi2_pb_dcr0_tpc0, pb_data->glbl_sec,
offset, pb_data->block_array_size);
@@ -2660,15 +2664,14 @@ static int gaudi2_init_pb_tpc(struct hl_device *hdev)
struct gaudi2_tpc_arc_pb_data {
u32 unsecured_regs_arr_size;
u32 arc_regs_arr_size;
- int rc;
};
static void gaudi2_config_tpcs_pb_ranges(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_tpc_arc_pb_data *pb_data = (struct gaudi2_tpc_arc_pb_data *)data;
+ struct gaudi2_tpc_arc_pb_data *pb_data = ctx->data;
- pb_data->rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
+ ctx->rc = hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA, 1,
offset, gaudi2_pb_dcr0_tpc0_arc,
pb_data->arc_regs_arr_size,
gaudi2_pb_dcr0_tpc0_arc_unsecured_regs,
@@ -2683,12 +2686,12 @@ static int gaudi2_init_pb_tpc_arc(struct hl_device *hdev)
tpc_arc_pb_data.arc_regs_arr_size = ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc);
tpc_arc_pb_data.unsecured_regs_arr_size =
ARRAY_SIZE(gaudi2_pb_dcr0_tpc0_arc_unsecured_regs);
- tpc_arc_pb_data.rc = 0;
+
tpc_iter.fn = &gaudi2_config_tpcs_pb_ranges;
tpc_iter.data = &tpc_arc_pb_data;
gaudi2_iterate_tpcs(hdev, &tpc_iter);
- return tpc_arc_pb_data.rc;
+ return tpc_iter.rc;
}
static int gaudi2_init_pb_sm_objs(struct hl_device *hdev)
@@ -3419,7 +3422,8 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
rc |= hl_init_pb(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_pcie, ARRAY_SIZE(gaudi2_pb_pcie),
- NULL, HL_PB_NA);
+ gaudi2_pb_pcie_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_pcie_unsecured_regs));
/* Thermal Sensor.
* Skip when security is enabled in F/W, because the blocks are protected by privileged RR.
@@ -3547,9 +3551,9 @@ struct gaudi2_ack_pb_tpc_data {
};
static void gaudi2_ack_pb_tpc_config(struct hl_device *hdev, int dcore, int inst, u32 offset,
- void *data)
+ struct iterate_module_ctx *ctx)
{
- struct gaudi2_ack_pb_tpc_data *pb_data = (struct gaudi2_ack_pb_tpc_data *)data;
+ struct gaudi2_ack_pb_tpc_data *pb_data = ctx->data;
hl_ack_pb_single_dcore(hdev, offset, HL_PB_SINGLE_INSTANCE, HL_PB_NA,
gaudi2_pb_dcr0_tpc0, pb_data->tpc_regs_array_size);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index db4487c33582..5ef9e3ca97a6 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -916,26 +916,11 @@ int goya_late_init(struct hl_device *hdev)
*/
void goya_late_fini(struct hl_device *hdev)
{
- const struct hwmon_channel_info **channel_info_arr;
struct goya_device *goya = hdev->asic_specific;
- int i = 0;
cancel_delayed_work_sync(&goya->goya_work->work_freq);
- if (!hdev->hl_chip_info->info)
- return;
-
- channel_info_arr = hdev->hl_chip_info->info;
-
- while (channel_info_arr[i]) {
- kfree(channel_info_arr[i]->config);
- kfree(channel_info_arr[i]);
- i++;
- }
-
- kfree(channel_info_arr);
-
- hdev->hl_chip_info->info = NULL;
+ hl_hwmon_release_resources(hdev);
}
static void goya_set_pci_memory_regions(struct hl_device *hdev)
@@ -1040,6 +1025,7 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_prop.supports_compute_reset = true;
hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
+ hdev->supports_ctx_switch = true;
hdev->asic_funcs->set_pci_memory_regions(hdev);
@@ -4559,7 +4545,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
return rc;
}
-static int goya_non_hard_reset_late_init(struct hl_device *hdev)
+static int goya_compute_reset_late_init(struct hl_device *hdev)
{
/*
* Unmask all IRQs since some could have been received
@@ -5137,8 +5123,8 @@ int goya_cpucp_info_get(struct hl_device *hdev)
return 0;
}
-static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
- u8 mask_len, struct seq_file *s)
+static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
+ struct engines_data *e)
{
const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
@@ -5149,9 +5135,9 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
u64 offset;
int i;
- if (s)
- seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
- "--- ------- ------------ -------------\n");
+ if (e)
+ hl_engine_data_sprintf(e, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
+ "--- ------- ------------ -------------\n");
offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
@@ -5164,13 +5150,13 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
- if (s)
- seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, dma_fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, dma_core_sts0);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
"--- ------- ------------ -------------- ----------\n");
@@ -5187,13 +5173,13 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
- if (s)
- seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
+ if (e)
+ hl_engine_data_sprintf(e, fmt, i, is_eng_idle ? "Y" : "N",
qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
}
- if (s)
- seq_puts(s,
+ if (e)
+ hl_engine_data_sprintf(e,
"\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
"--- ------- ------------ -------------- -----------\n");
@@ -5207,10 +5193,10 @@ static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
if (mask && !is_eng_idle)
set_bit(GOYA_ENGINE_ID_MME_0, mask);
- if (s) {
- seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ if (e) {
+ hl_engine_data_sprintf(e, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
cmdq_glbl_sts0, mme_arch_sts);
- seq_puts(s, "\n");
+ hl_engine_data_sprintf(e, "\n");
}
return is_idle;
@@ -5434,6 +5420,11 @@ static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
return -EOPNOTSUPP;
}
+static int goya_send_device_activity(struct hl_device *hdev, bool open)
+{
+ return 0;
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5478,11 +5469,9 @@ static const struct hl_asic_funcs goya_funcs = {
.send_heartbeat = goya_send_heartbeat,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
- .non_hard_reset_late_init = goya_non_hard_reset_late_init,
+ .compute_reset_late_init = goya_compute_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
- .kdma_lock = NULL,
- .kdma_unlock = NULL,
.get_pci_id = goya_get_pci_id,
.get_eeprom_data = goya_get_eeprom_data,
.get_monitor_dump = goya_get_monitor_dump,
@@ -5528,6 +5517,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_get_real_page_size = hl_mmu_get_real_page_size,
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
+ .send_device_activity = goya_send_device_activity,
};
/*
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index abf40e1c4965..baa5aa43b6f4 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -629,6 +629,12 @@ enum pq_init_status {
* CPUCP_PACKET_ENGINE_CORE_ASID_SET -
* Packet to perform engine core ASID configuration
*
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
* CPUCP_PACKET_MONITOR_DUMP_GET -
* Get monitors registers dump from the CpuCP kernel.
* The CPU will put the registers dump in the a buffer allocated by the driver
@@ -636,6 +642,10 @@ enum pq_init_status {
* passes the max size it allows the CpuCP to write to the structure, to prevent
* data corruption in case of mismatched driver/FW versions.
* Relevant only to Gaudi.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
*/
enum cpucp_packet_id {
@@ -687,10 +697,17 @@ enum cpucp_packet_id {
CPUCP_PACKET_RESERVED, /* not used */
CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
CPUCP_PACKET_RESERVED3, /* not used */
CPUCP_PACKET_RESERVED4, /* not used */
- CPUCP_PACKET_RESERVED5, /* not used */
CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -783,6 +800,9 @@ struct cpucp_packet {
* result cannot be used to hold general purpose data.
*/
__le32 status_mask;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
};
/* For NIC requests */
@@ -813,10 +833,25 @@ enum cpucp_led_index {
CPUCP_LED2_INDEX
};
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support Goya and Gaudi platform.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported. This is
+ * supported Greco onwards.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc. Supported Greco onwards.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
enum cpucp_packet_rc {
cpucp_packet_success,
cpucp_packet_invalid,
- cpucp_packet_fault
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
};
/*
@@ -1193,6 +1228,70 @@ enum cpu_reset_status {
CPU_RST_STATUS_SOFT_RST_DONE = 1,
};
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
/*
* struct dcore_monitor_regs_data - DCORE monitor regs data.
* the structure follows sync manager block layout. relevant only to Gaudi.
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index a3594119bc51..e0ea51cc7475 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -34,6 +34,7 @@ enum cpu_boot_err {
CPU_BOOT_ERR_BINNING_FAIL = 19,
CPU_BOOT_ERR_TPM_FAIL = 20,
CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
CPU_BOOT_ERR_ENABLED = 31,
CPU_BOOT_ERR_SCND_EN = 63,
CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
@@ -115,6 +116,9 @@ enum cpu_boot_err {
* CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
* sensor.
*
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -139,6 +143,7 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
@@ -426,7 +431,9 @@ struct cpu_dyn_regs {
__le32 gic_host_ints_irq;
__le32 gic_host_soft_rst_irq;
__le32 gic_rot_qm_irq_ctrl;
- __le32 reserved1[22]; /* reserve for future use */
+ __le32 cpu_rst_status;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved1[20]; /* reserve for future use */
};
/* TODO: remove the desc magic after the code is updated to use message */
@@ -465,6 +472,26 @@ enum comms_msg_type {
HL_COMMS_BINNING_CONF_TYPE = 3,
};
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask - TPC binning information
+ * @dec_mask - Decoder binning information
+ * @hbm_mask - HBM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @reserved - reserved field for 64 bit alignment
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask;
+ __le32 dec_mask;
+ __le32 hbm_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 reserved;
+};
+
/* TODO: remove this struct after the code is updated to use message */
/* this is the comms descriptor header - meta data */
struct comms_desc_header {
@@ -525,13 +552,7 @@ struct lkd_fw_comms_msg {
struct {
__u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
};
- struct {
- __le64 tpc_binning_conf;
- __le32 dec_binning_conf;
- __le32 hbm_binning_conf;
- __le32 edma_binning_conf;
- __le32 mme_redundancy_conf; /* use MME_REDUNDANT_COLUMN */
- };
+ struct lkd_fw_binning_info binning_info;
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index d0e2c68a639f..6aa1b1412462 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -132,6 +132,7 @@
#include "dcore0_mme_ctrl_lo_arch_tensor_a_regs.h"
#include "dcore0_mme_ctrl_lo_arch_tensor_b_regs.h"
#include "dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h"
+#include "pcie_wrap_special_regs.h"
#include "pdma0_qm_masks.h"
#include "pdma0_core_masks.h"
@@ -239,6 +240,7 @@
#define SFT_IF_RTR_OFFSET (mmSFT0_HBW_RTR_IF1_RTR_H3_BASE - mmSFT0_HBW_RTR_IF0_RTR_H3_BASE)
#define ARC_HALT_REQ_OFFSET (mmARC_FARM_ARC0_AUX_RUN_HALT_REQ - mmARC_FARM_ARC0_AUX_BASE)
+#define ARC_HALT_ACK_OFFSET (mmARC_FARM_ARC0_AUX_RUN_HALT_ACK - mmARC_FARM_ARC0_AUX_BASE)
#define ARC_REGION_CFG_OFFSET(region) \
(mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_0 + (region * 4) - mmARC_FARM_ARC0_AUX_BASE)
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
new file mode 100644
index 000000000000..46558e7a7f63
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_
+#define ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_
+
+/*
+ *****************************************
+ * PCIE_WRAP_SPECIAL
+ * (Prototype: SPECIAL_REGS)
+ *****************************************
+ */
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_0 0x4C01E80
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_1 0x4C01E84
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_2 0x4C01E88
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_3 0x4C01E8C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_4 0x4C01E90
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_5 0x4C01E94
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_6 0x4C01E98
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_7 0x4C01E9C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_8 0x4C01EA0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_9 0x4C01EA4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_10 0x4C01EA8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_11 0x4C01EAC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_12 0x4C01EB0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_13 0x4C01EB4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_14 0x4C01EB8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_15 0x4C01EBC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_16 0x4C01EC0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_17 0x4C01EC4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_18 0x4C01EC8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_19 0x4C01ECC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_20 0x4C01ED0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_21 0x4C01ED4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_22 0x4C01ED8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_23 0x4C01EDC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_24 0x4C01EE0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_25 0x4C01EE4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_26 0x4C01EE8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_27 0x4C01EEC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_28 0x4C01EF0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_29 0x4C01EF4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_30 0x4C01EF8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_PRIV_31 0x4C01EFC
+
+#define mmPCIE_WRAP_SPECIAL_MEM_GW_DATA 0x4C01F00
+
+#define mmPCIE_WRAP_SPECIAL_MEM_GW_REQ 0x4C01F04
+
+#define mmPCIE_WRAP_SPECIAL_MEM_NUMOF 0x4C01F0C
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_SEL 0x4C01F10
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_CTL 0x4C01F14
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_MASK 0x4C01F18
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_GLBL_ERR_MASK 0x4C01F1C
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_STS 0x4C01F20
+
+#define mmPCIE_WRAP_SPECIAL_MEM_ECC_ERR_ADDR 0x4C01F24
+
+#define mmPCIE_WRAP_SPECIAL_MEM_RM 0x4C01F28
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_MASK 0x4C01F40
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_ADDR 0x4C01F44
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_ERR_CAUSE 0x4C01F48
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0 0x4C01F60
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_1 0x4C01F64
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_2 0x4C01F68
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SPARE_3 0x4C01F6C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_0 0x4C01F80
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_1 0x4C01F84
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_2 0x4C01F88
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_3 0x4C01F8C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_4 0x4C01F90
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_5 0x4C01F94
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_6 0x4C01F98
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_7 0x4C01F9C
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_8 0x4C01FA0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_9 0x4C01FA4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_10 0x4C01FA8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_11 0x4C01FAC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_12 0x4C01FB0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_13 0x4C01FB4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_14 0x4C01FB8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_15 0x4C01FBC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_16 0x4C01FC0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_17 0x4C01FC4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_18 0x4C01FC8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_19 0x4C01FCC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_20 0x4C01FD0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_21 0x4C01FD4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_22 0x4C01FD8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_23 0x4C01FDC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_24 0x4C01FE0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_25 0x4C01FE4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_26 0x4C01FE8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_27 0x4C01FEC
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_28 0x4C01FF0
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_29 0x4C01FF4
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_30 0x4C01FF8
+
+#define mmPCIE_WRAP_SPECIAL_GLBL_SEC_31 0x4C01FFC
+
+#endif /* ASIC_REG_PCIE_WRAP_SPECIAL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h b/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
deleted file mode 100644
index 6d6ed7838a64..000000000000
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_virt_events.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2022 HabanaLabs, Ltd.
- * All Rights Reserved.
- *
- */
-
-#ifndef __GAUDI2_ASYNC_VIRT_EVENTS_H_
-#define __GAUDI2_ASYNC_VIRT_EVENTS_H_
-
-enum gaudi2_async_virt_event_id {
- GAUDI2_EVENT_NIC3_QM1_OLD = 1206,
- GAUDI2_EVENT_NIC4_QM0_OLD = 1207,
- GAUDI2_EVENT_NIC4_QM1_OLD = 1208,
- GAUDI2_EVENT_NIC5_QM0_OLD = 1209,
- GAUDI2_EVENT_NIC5_QM1_OLD = 1210,
- GAUDI2_EVENT_NIC6_QM0_OLD = 1211,
- GAUDI2_EVENT_NIC6_QM1_OLD = 1212,
- GAUDI2_EVENT_NIC7_QM0_OLD = 1213,
- GAUDI2_EVENT_NIC7_QM1_OLD = 1214,
- GAUDI2_EVENT_NIC8_QM0_OLD = 1215,
- GAUDI2_EVENT_NIC8_QM1_OLD = 1216,
- GAUDI2_EVENT_NIC9_QM0_OLD = 1217,
- GAUDI2_EVENT_NIC9_QM1_OLD = 1218,
- GAUDI2_EVENT_NIC10_QM0_OLD = 1219,
- GAUDI2_EVENT_NIC10_QM1_OLD = 1220,
- GAUDI2_EVENT_NIC11_QM0_OLD = 1221,
- GAUDI2_EVENT_NIC11_QM1_OLD = 1222,
- GAUDI2_EVENT_CPU_PKT_SANITY_FAILED_OLD = 1223,
- GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0_OLD = 1224,
- GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG1_OLD = 1225,
- GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG0_OLD = 1226,
- GAUDI2_EVENT_CPU1_STATUS_NIC1_ENG1_OLD = 1227,
- GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG0_OLD = 1228,
- GAUDI2_EVENT_CPU2_STATUS_NIC2_ENG1_OLD = 1229,
- GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG0_OLD = 1230,
- GAUDI2_EVENT_CPU3_STATUS_NIC3_ENG1_OLD = 1231,
- GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG0_OLD = 1232,
- GAUDI2_EVENT_CPU4_STATUS_NIC4_ENG1_OLD = 1233,
- GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG0_OLD = 1234,
- GAUDI2_EVENT_CPU5_STATUS_NIC5_ENG1_OLD = 1235,
- GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG0_OLD = 1236,
- GAUDI2_EVENT_CPU6_STATUS_NIC6_ENG1_OLD = 1237,
- GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG0_OLD = 1238,
- GAUDI2_EVENT_CPU7_STATUS_NIC7_ENG1_OLD = 1239,
- GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG0_OLD = 1240,
- GAUDI2_EVENT_CPU8_STATUS_NIC8_ENG1_OLD = 1241,
- GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG0_OLD = 1242,
- GAUDI2_EVENT_CPU9_STATUS_NIC9_ENG1_OLD = 1243,
- GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG0_OLD = 1244,
- GAUDI2_EVENT_CPU10_STATUS_NIC10_ENG1_OLD = 1245,
- GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG0_OLD = 1246,
- GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1_OLD = 1247,
- GAUDI2_EVENT_ARC_DCCM_FULL_OLD = 1248,
-};
-
-#endif /* __GAUDI2_ASYNC_VIRT_EVENTS_H_ */
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 572a2ff10f00..42b9adef28a3 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -116,10 +116,9 @@ static int hmc6352_probe(struct i2c_client *client,
return 0;
}
-static int hmc6352_remove(struct i2c_client *client)
+static void hmc6352_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
- return 0;
}
static const struct i2c_device_id hmc6352_id[] = {
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index c0fe3295c330..cbaf6d35e854 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1039,6 +1039,7 @@ static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
static ssize_t ibmvmc_write(struct file *file, const char *buffer,
size_t count, loff_t *ppos)
{
+ struct inode *inode;
struct ibmvmc_buffer *vmc_buffer;
struct ibmvmc_file_session *session;
struct crq_server_adapter *adapter;
@@ -1122,8 +1123,9 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer,
if (p == buffer)
goto out;
- file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
- mark_inode_dirty(file->f_path.dentry->d_inode);
+ inode = file_inode(file);
+ inode->i_mtime = current_time(inode);
+ mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
(unsigned long)file, (unsigned long)count);
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 0f9ea75b0b18..1cb71df966a4 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -93,7 +93,7 @@ static int ics932s401_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info);
-static int ics932s401_remove(struct i2c_client *client);
+static void ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
{ "ics932s401", 0 },
@@ -424,7 +424,7 @@ static int ics932s401_detect(struct i2c_client *client,
if (revision != ICS932S401_REV)
dev_info(&adapter->dev, "Unknown revision %d\n", revision);
- strlcpy(info->type, "ics932s401", I2C_NAME_SIZE);
+ strscpy(info->type, "ics932s401", I2C_NAME_SIZE);
return 0;
}
@@ -460,13 +460,12 @@ exit:
return err;
}
-static int ics932s401_remove(struct i2c_client *client)
+static void ics932s401_remove(struct i2c_client *client)
{
struct ics932s401_data *data = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &data->attrs);
kfree(data);
- return 0;
}
module_i2c_driver(ics932s401_driver);
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 703d20e83ebd..8ab61be79c76 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -410,12 +410,11 @@ exit_kfree:
return err;
}
-static int isl29003_remove(struct i2c_client *client)
+static void isl29003_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group);
isl29003_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index fc5ff2805b94..c6f2a94f501a 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -171,11 +171,10 @@ static int isl29020_probe(struct i2c_client *client,
return res;
}
-static int isl29020_remove(struct i2c_client *client)
+static void isl29020_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
- return 0;
}
static const struct i2c_device_id isl29020_id[] = {
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 52555d2e824b..d7daa01fe7ca 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -177,7 +177,7 @@ fail:
return ret;
}
-static int lis3lv02d_i2c_remove(struct i2c_client *client)
+static void lis3lv02d_i2c_remove(struct i2c_client *client)
{
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
struct lis3lv02d_platform_data *pdata = client->dev.platform_data;
@@ -190,7 +190,6 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
regulator_bulk_free(ARRAY_SIZE(lis3->regulators),
lis3_dev.regulators);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index 71483cb1e422..5245cf6013c9 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -20,6 +20,13 @@ static noinline int lkdtm_increment_int(int *counter)
return *counter;
}
+
+/* Don't allow the compiler to inline the calls. */
+static noinline void lkdtm_indirect_call(void (*func)(int *))
+{
+ func(&called_count);
+}
+
/*
* This tries to call an indirect function with a mismatched prototype.
*/
@@ -29,15 +36,11 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
* Matches lkdtm_increment_void()'s prototype, but not
* lkdtm_increment_int()'s prototype.
*/
- void (*func)(int *);
-
pr_info("Calling matched prototype ...\n");
- func = lkdtm_increment_void;
- func(&called_count);
+ lkdtm_indirect_call(lkdtm_increment_void);
pr_info("Calling mismatched prototype ...\n");
- func = (void *)lkdtm_increment_int;
- func(&called_count);
+ lkdtm_indirect_call((void *)lkdtm_increment_int);
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index 080293fa3c52..015927665678 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,28 +10,31 @@
static volatile int fortify_scratch_space;
-static void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFY_STR_OBJECT(void)
{
struct target {
char a[10];
- } target[2] = {};
+ int foo;
+ } target[3] = {};
/*
* Using volatile prevents the compiler from determining the value of
* 'size' at compile time. Without that, we would get a compile error
* rather than a runtime error.
*/
- volatile int size = 11;
+ volatile int size = 20;
+
+ pr_info("trying to strcmp() past the end of a struct\n");
- pr_info("trying to read past the end of a struct\n");
+ strncpy(target[0].a, target[1].a, size);
/* Store result to global to prevent the code from being eliminated */
- fortify_scratch_space = memcmp(&target[0], &target[1], size);
+ fortify_scratch_space = target[0].a[3];
- pr_err("FAIL: fortify did not block an object overread!\n");
+ pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-static void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFY_STR_MEMBER(void)
{
struct target {
char a[10];
@@ -44,7 +47,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
- pr_info("trying to strncpy past the end of a member of a struct\n");
+ pr_info("trying to strncpy() past the end of a struct member...\n");
/*
* strncpy(target.a, src, 20); will hit a compile error because the
@@ -56,7 +59,72 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
/* Store result to global to prevent the code from being eliminated */
fortify_scratch_space = target.a[3];
- pr_err("FAIL: fortify did not block an sub-object overrun!\n");
+ pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+static void lkdtm_FORTIFY_MEM_OBJECT(void)
+{
+ int before[10];
+ struct target {
+ char a[10];
+ int foo;
+ } target = {};
+ int after[10];
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 20;
+
+ memset(before, 0, sizeof(before));
+ memset(after, 0, sizeof(after));
+ fortify_scratch_space = before[5];
+ fortify_scratch_space = after[5];
+
+ pr_info("trying to memcpy() past the end of a struct\n");
+
+ pr_info("0: %zu\n", __builtin_object_size(&target, 0));
+ pr_info("1: %zu\n", __builtin_object_size(&target, 1));
+ pr_info("s: %d\n", size);
+ memcpy(&target, &before, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+}
+
+static void lkdtm_FORTIFY_MEM_MEMBER(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ volatile int size = 20;
+ char *src;
+
+ src = kmalloc(size, GFP_KERNEL);
+ strscpy(src, "over ten bytes", size);
+ size = strlen(src) + 1;
+
+ pr_info("trying to memcpy() past the end of a struct member...\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use a
+ * volatile to force a runtime error.
+ */
+ memcpy(target.a, src, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n");
pr_expected_config(CONFIG_FORTIFY_SOURCE);
kfree(src);
@@ -67,7 +135,7 @@ static void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-static void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFY_STRSCPY(void)
{
char *src;
char dst[5];
@@ -136,9 +204,11 @@ static void lkdtm_FORTIFIED_STRSCPY(void)
}
static struct crashtype crashtypes[] = {
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
+ CRASHTYPE(FORTIFY_STR_OBJECT),
+ CRASHTYPE(FORTIFY_STR_MEMBER),
+ CRASHTYPE(FORTIFY_MEM_OBJECT),
+ CRASHTYPE(FORTIFY_MEM_MEMBER),
+ CRASHTYPE(FORTIFY_STRSCPY),
};
struct crashtype_category fortify_crashtypes = {
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 6215ec995cd3..67db57249a34 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -330,7 +330,7 @@ static void lkdtm_USERCOPY_KERNEL(void)
pr_info("attempting bad copy_to_user from kernel text: %px\n",
vm_mmap);
- if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap),
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
diff --git a/drivers/misc/mchp_pci1xxxx/Kconfig b/drivers/misc/mchp_pci1xxxx/Kconfig
new file mode 100644
index 000000000000..4abb47de7219
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/Kconfig
@@ -0,0 +1,13 @@
+config GP_PCI1XXXX
+ tristate "Microchip PCI1XXXX PCIe to GPIO Expander + OTP/EEPROM manager"
+ depends on PCI
+ depends on GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select AUXILIARY_BUS
+ help
+ PCI1XXXX is a PCIe GEN 3 switch with one of the endpoints having
+ multiple functions and one of the functions is a GPIO controller
+ which also has registers to interface with the OTP and EEPROM.
+ Select yes, no or module here to include or exclude the driver
+ for the GPIO function.
+
diff --git a/drivers/misc/mchp_pci1xxxx/Makefile b/drivers/misc/mchp_pci1xxxx/Makefile
new file mode 100644
index 000000000000..fc4615cfe28b
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
new file mode 100644
index 000000000000..32af2b14ff34
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 Microchip Technology Inc.
+
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include "mchp_pci1xxxx_gp.h"
+
+struct aux_bus_device {
+ struct auxiliary_device_wrapper *aux_device_wrapper[2];
+};
+
+static DEFINE_IDA(gp_client_ida);
+static const char aux_dev_otp_e2p_name[15] = "gp_otp_e2p";
+static const char aux_dev_gpio_name[15] = "gp_gpio";
+
+static void gp_auxiliary_device_release(struct device *dev)
+{
+ struct auxiliary_device_wrapper *aux_device_wrapper =
+ (struct auxiliary_device_wrapper *)container_of(dev,
+ struct auxiliary_device_wrapper, aux_dev.dev);
+
+ ida_free(&gp_client_ida, aux_device_wrapper->aux_dev.id);
+ kfree(aux_device_wrapper);
+}
+
+static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct aux_bus_device *aux_bus;
+ int retval;
+
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
+ aux_bus = devm_kzalloc(&pdev->dev, sizeof(*aux_bus), GFP_KERNEL);
+ if (!aux_bus)
+ return -ENOMEM;
+
+ aux_bus->aux_device_wrapper[0] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[0]),
+ GFP_KERNEL);
+ if (!aux_bus->aux_device_wrapper[0])
+ return -ENOMEM;
+
+ retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ if (retval < 0)
+ goto err_ida_alloc_0;
+
+ aux_bus->aux_device_wrapper[0]->aux_dev.name = aux_dev_otp_e2p_name;
+ aux_bus->aux_device_wrapper[0]->aux_dev.dev.parent = &pdev->dev;
+ aux_bus->aux_device_wrapper[0]->aux_dev.dev.release = gp_auxiliary_device_release;
+ aux_bus->aux_device_wrapper[0]->aux_dev.id = retval;
+
+ aux_bus->aux_device_wrapper[0]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
+ aux_bus->aux_device_wrapper[0]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
+
+ retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ if (retval < 0)
+ goto err_aux_dev_init_0;
+
+ retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ if (retval)
+ goto err_aux_dev_add_0;
+
+ aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
+ GFP_KERNEL);
+ if (!aux_bus->aux_device_wrapper[1])
+ return -ENOMEM;
+
+ retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
+ if (retval < 0)
+ goto err_ida_alloc_1;
+
+ aux_bus->aux_device_wrapper[1]->aux_dev.name = aux_dev_gpio_name;
+ aux_bus->aux_device_wrapper[1]->aux_dev.dev.parent = &pdev->dev;
+ aux_bus->aux_device_wrapper[1]->aux_dev.dev.release = gp_auxiliary_device_release;
+ aux_bus->aux_device_wrapper[1]->aux_dev.id = retval;
+
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.region_start = pci_resource_start(pdev, 0);
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.region_length = pci_resource_end(pdev, 0);
+
+ retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ retval = pci_irq_vector(pdev, 0);
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ pdev->irq = retval;
+ aux_bus->aux_device_wrapper[1]->gp_aux_data.irq_num = pdev->irq;
+
+ retval = auxiliary_device_init(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ if (retval < 0)
+ goto err_aux_dev_init_1;
+
+ retval = auxiliary_device_add(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ if (retval)
+ goto err_aux_dev_add_1;
+
+ pci_set_drvdata(pdev, aux_bus);
+ pci_set_master(pdev);
+
+ return 0;
+
+err_aux_dev_add_1:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+
+err_aux_dev_init_1:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
+
+err_ida_alloc_1:
+ kfree(aux_bus->aux_device_wrapper[1]);
+
+err_aux_dev_add_0:
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+
+err_aux_dev_init_0:
+ ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
+
+err_ida_alloc_0:
+ kfree(aux_bus->aux_device_wrapper[0]);
+
+ return retval;
+}
+
+static void gp_aux_bus_remove(struct pci_dev *pdev)
+{
+ struct aux_bus_device *aux_bus = pci_get_drvdata(pdev);
+
+ auxiliary_device_delete(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ auxiliary_device_delete(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+}
+
+static const struct pci_device_id pci1xxxx_tbl[] = {
+ { PCI_DEVICE(0x1055, 0xA005) },
+ { PCI_DEVICE(0x1055, 0xA015) },
+ { PCI_DEVICE(0x1055, 0xA025) },
+ { PCI_DEVICE(0x1055, 0xA035) },
+ { PCI_DEVICE(0x1055, 0xA045) },
+ { PCI_DEVICE(0x1055, 0xA055) },
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pci1xxxx_tbl);
+
+static struct pci_driver pci1xxxx_gp_driver = {
+ .name = "PCI1xxxxGP",
+ .id_table = pci1xxxx_tbl,
+ .probe = gp_aux_bus_probe,
+ .remove = gp_aux_bus_remove,
+};
+
+module_pci_driver(pci1xxxx_gp_driver);
+
+MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GP expander");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h
new file mode 100644
index 000000000000..37eec73b20d7
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022 Microchip Technology Inc. */
+
+#ifndef _GPIO_PCI1XXXX_H
+#define _GPIO_PCI1XXXX_H
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/auxiliary_bus.h>
+
+/* Perform operations like variable length write, read and write with read back for OTP / EEPROM
+ * Perform bit mode write in OTP
+ */
+
+struct gp_aux_data_type {
+ int irq_num;
+ resource_size_t region_start;
+ resource_size_t region_length;
+};
+
+struct auxiliary_device_wrapper {
+ struct auxiliary_device aux_dev;
+ struct gp_aux_data_type gp_aux_data;
+};
+
+#endif
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
new file mode 100644
index 000000000000..3389803cb281
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022 Microchip Technology Inc.
+// pci1xxxx gpio driver
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/bio.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mchp_pci1xxxx_gp.h"
+
+#define PCI1XXXX_NR_PINS 93
+#define PERI_GEN_RESET 0
+#define OUT_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400)
+#define INP_EN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x10)
+#define OUT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x20)
+#define INP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x30)
+#define PULLUP_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x40)
+#define PULLDOWN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x50)
+#define OPENDRAIN_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x60)
+#define WAKEMASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x70)
+#define MODE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x80)
+#define INTR_LO_TO_HI_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0x90)
+#define INTR_HI_TO_LO_EDGE_CONFIG(x) ((((x) / 32) * 4) + 0x400 + 0xA0)
+#define INTR_LEVEL_CONFIG_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xB0)
+#define INTR_LEVEL_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xC0)
+#define INTR_STAT_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xD0)
+#define DEBOUNCE_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0xE0)
+#define PIO_GLOBAL_CONFIG_OFFSET (0x400 + 0xF0)
+#define PIO_PCI_CTRL_REG_OFFSET (0x400 + 0xF4)
+#define INTR_MASK_OFFSET(x) ((((x) / 32) * 4) + 0x400 + 0x100)
+#define INTR_STATUS_OFFSET(x) (((x) * 4) + 0x400 + 0xD0)
+
+struct pci1xxxx_gpio {
+ struct auxiliary_device *aux_dev;
+ void __iomem *reg_base;
+ struct gpio_chip gpio;
+ spinlock_t lock;
+ int irq_base;
+};
+
+static int pci1xxxx_gpio_get_direction(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ u32 data;
+ int ret = -EINVAL;
+
+ data = readl(priv->reg_base + INP_EN_OFFSET(nr));
+ if (data & BIT(nr % 32)) {
+ ret = 1;
+ } else {
+ data = readl(priv->reg_base + OUT_EN_OFFSET(nr));
+ if (data & BIT(nr % 32))
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline void pci1xxx_assign_bit(void __iomem *base_addr, unsigned int reg_offset,
+ unsigned int bitpos, bool set)
+{
+ u32 data;
+
+ data = readl(base_addr + reg_offset);
+ if (set)
+ data |= BIT(bitpos);
+ else
+ data &= ~BIT(bitpos);
+ writel(data, base_addr + reg_offset);
+}
+
+static int pci1xxxx_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), true);
+ pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_get(struct gpio_chip *gpio, unsigned int nr)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+
+ return (readl(priv->reg_base + INP_OFFSET(nr)) >> (nr % 32)) & 1;
+}
+
+static int pci1xxxx_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned int nr, int val)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INP_EN_OFFSET(nr), (nr % 32), false);
+ pci1xxx_assign_bit(priv->reg_base, OUT_EN_OFFSET(nr), (nr % 32), true);
+ data = readl(priv->reg_base + OUT_OFFSET(nr));
+ if (val)
+ data |= (1 << (nr % 32));
+ else
+ data &= ~(1 << (nr % 32));
+ writel(data, priv->reg_base + OUT_OFFSET(nr));
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void pci1xxxx_gpio_set(struct gpio_chip *gpio,
+ unsigned int nr, int val)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, OUT_OFFSET(nr), (nr % 32), val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(gpio);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), true);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), true);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ pci1xxx_assign_bit(priv->reg_base, PULLUP_OFFSET(offset), (offset % 32), false);
+ pci1xxx_assign_bit(priv->reg_base, PULLDOWN_OFFSET(offset), (offset % 32), false);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static void pci1xxxx_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void pci1xxxx_gpio_irq_set_mask(struct irq_data *data, bool set)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, INTR_MASK_OFFSET(gpio), (gpio % 32), set);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void pci1xxxx_gpio_irq_mask(struct irq_data *data)
+{
+ pci1xxxx_gpio_irq_set_mask(data, true);
+}
+
+static void pci1xxxx_gpio_irq_unmask(struct irq_data *data)
+{
+ pci1xxxx_gpio_irq_set_mask(data, false);
+}
+
+static int pci1xxxx_gpio_set_type(struct irq_data *data, unsigned int trigger_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pci1xxxx_gpio *priv = gpiochip_get_data(chip);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bitpos = gpio % 32;
+
+ if (trigger_type & IRQ_TYPE_EDGE_FALLING) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio),
+ bitpos, false);
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pci1xxx_assign_bit(priv->reg_base, INTR_HI_TO_LO_EDGE_CONFIG(gpio),
+ bitpos, true);
+ }
+
+ if (trigger_type & IRQ_TYPE_EDGE_RISING) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ false);
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LO_TO_HI_EDGE_CONFIG(gpio),
+ bitpos, true);
+ }
+
+ if (trigger_type & IRQ_TYPE_LEVEL_LOW) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
+ bitpos, true);
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ true);
+ irq_set_handler_locked(data, handle_edge_irq);
+ }
+
+ if (trigger_type & IRQ_TYPE_LEVEL_HIGH) {
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_CONFIG_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio),
+ bitpos, false);
+ pci1xxx_assign_bit(priv->reg_base, MODE_OFFSET(gpio), bitpos,
+ true);
+ irq_set_handler_locked(data, handle_edge_irq);
+ }
+
+ if ((!(trigger_type & IRQ_TYPE_LEVEL_LOW)) && (!(trigger_type & IRQ_TYPE_LEVEL_HIGH)))
+ pci1xxx_assign_bit(priv->reg_base, INTR_LEVEL_MASK_OFFSET(gpio), bitpos, true);
+
+ return true;
+}
+
+static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id)
+{
+ struct pci1xxxx_gpio *priv = dev_id;
+ struct gpio_chip *gc = &priv->gpio;
+ unsigned long int_status = 0;
+ unsigned long flags;
+ u8 pincount;
+ int bit;
+ u8 gpiobank;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ for (gpiobank = 0; gpiobank < 3; gpiobank++) {
+ spin_lock_irqsave(&priv->lock, flags);
+ int_status = readl(priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (gpiobank == 2)
+ pincount = 29;
+ else
+ pincount = 32;
+ for_each_set_bit(bit, &int_status, pincount) {
+ unsigned int irq;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank));
+ spin_unlock_irqrestore(&priv->lock, flags);
+ irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32)));
+ generic_handle_irq(irq);
+ }
+ }
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET, 16, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_chip pci1xxxx_gpio_irqchip = {
+ .name = "pci1xxxx_gpio",
+ .irq_ack = pci1xxxx_gpio_irq_ack,
+ .irq_mask = pci1xxxx_gpio_irq_mask,
+ .irq_unmask = pci1xxxx_gpio_irq_unmask,
+ .irq_set_type = pci1xxxx_gpio_set_type,
+};
+
+static int pci1xxxx_gpio_suspend(struct device *dev)
+{
+ struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 16, true);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 17, false);
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_resume(struct device *dev)
+{
+ struct pci1xxxx_gpio *priv = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 17, true);
+ pci1xxx_assign_bit(priv->reg_base, PIO_GLOBAL_CONFIG_OFFSET,
+ 16, false);
+ pci1xxx_assign_bit(priv->reg_base, PERI_GEN_RESET, 16, false);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
+{
+ struct gpio_chip *gchip = &priv->gpio;
+ struct gpio_irq_chip *girq;
+ int retval;
+
+ gchip->label = dev_name(&priv->aux_dev->dev);
+ gchip->parent = &priv->aux_dev->dev;
+ gchip->owner = THIS_MODULE;
+ gchip->direction_input = pci1xxxx_gpio_direction_input;
+ gchip->direction_output = pci1xxxx_gpio_direction_output;
+ gchip->get_direction = pci1xxxx_gpio_get_direction;
+ gchip->get = pci1xxxx_gpio_get;
+ gchip->set = pci1xxxx_gpio_set;
+ gchip->set_config = pci1xxxx_gpio_set_config;
+ gchip->dbg_show = NULL;
+ gchip->base = -1;
+ gchip->ngpio = PCI1XXXX_NR_PINS;
+ gchip->can_sleep = false;
+
+ retval = devm_request_threaded_irq(&priv->aux_dev->dev, irq,
+ NULL, pci1xxxx_gpio_irq_handler,
+ IRQF_ONESHOT, "PCI1xxxxGPIO", priv);
+
+ if (retval)
+ return retval;
+
+ girq = &priv->gpio.irq;
+ girq->chip = &pci1xxxx_gpio_irqchip;
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_bad_irq;
+
+ return 0;
+}
+
+static int pci1xxxx_gpio_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+
+{
+ struct auxiliary_device_wrapper *aux_dev_wrapper;
+ struct gp_aux_data_type *pdata;
+ struct pci1xxxx_gpio *priv;
+ int retval;
+
+ aux_dev_wrapper = (struct auxiliary_device_wrapper *)
+ container_of(aux_dev, struct auxiliary_device_wrapper, aux_dev);
+
+ pdata = &aux_dev_wrapper->gp_aux_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&aux_dev->dev, sizeof(struct pci1xxxx_gpio), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ priv->aux_dev = aux_dev;
+
+ if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start, 0x800, aux_dev->name))
+ return -EBUSY;
+
+ priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start, 0x800);
+ if (!priv->reg_base)
+ return -ENOMEM;
+
+ writel(0x0264, (priv->reg_base + 0x400 + 0xF0));
+
+ retval = pci1xxxx_gpio_setup(priv, pdata->irq_num);
+
+ if (retval < 0)
+ return retval;
+
+ dev_set_drvdata(&aux_dev->dev, priv);
+
+ return devm_gpiochip_add_data(&aux_dev->dev, &priv->gpio, priv);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_gpio_pm_ops, pci1xxxx_gpio_suspend, pci1xxxx_gpio_resume);
+
+static const struct auxiliary_device_id pci1xxxx_gpio_auxiliary_id_table[] = {
+ {.name = "mchp_pci1xxxx_gp.gp_gpio"},
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_gpio_auxiliary_id_table);
+
+static struct auxiliary_driver pci1xxxx_gpio_driver = {
+ .driver = {
+ .name = "PCI1xxxxGPIO",
+ .pm = &pci1xxxx_gpio_pm_ops,
+ },
+ .probe = pci1xxxx_gpio_probe,
+ .id_table = pci1xxxx_gpio_auxiliary_id_table
+};
+module_auxiliary_driver(pci1xxxx_gpio_driver);
+
+MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx GPIO controller");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 59506ba6fc48..71fbf0bc8453 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -15,6 +15,7 @@
#include "mei_dev.h"
#include "client.h"
+#include "mkhi.h"
#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
@@ -89,20 +90,6 @@ struct mei_os_ver {
u8 reserved2;
} __packed;
-#define MKHI_FEATURE_PTT 0x10
-
-struct mkhi_rule_id {
- __le16 rule_type;
- u8 feature_id;
- u8 reserved;
-} __packed;
-
-struct mkhi_fwcaps {
- struct mkhi_rule_id id;
- u8 len;
- u8 data[];
-} __packed;
-
struct mkhi_fw_ver_block {
u16 minor;
u8 major;
@@ -115,22 +102,6 @@ struct mkhi_fw_ver {
struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS];
} __packed;
-#define MKHI_FWCAPS_GROUP_ID 0x3
-#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
-#define MKHI_GEN_GROUP_ID 0xFF
-#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
-struct mkhi_msg_hdr {
- u8 group_id;
- u8 command;
- u8 reserved;
- u8 result;
-} __packed;
-
-struct mkhi_msg {
- struct mkhi_msg_hdr hdr;
- u8 data[];
-} __packed;
-
#define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fwcaps) + \
sizeof(struct mei_os_ver))
@@ -164,7 +135,6 @@ static int mei_osver(struct mei_cl_device *cldev)
sizeof(struct mkhi_fw_ver))
#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fw_ver_block) * (__num))
-#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
@@ -187,7 +157,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
ret = 0;
bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
- MKHI_RCV_TIMEOUT);
+ cldev->bus->timeouts.mkhi_recv);
if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
/*
* Should be at least one version block,
@@ -218,6 +188,19 @@ static int mei_fwver(struct mei_cl_device *cldev)
return ret;
}
+static int mei_gfx_memory_ready(struct mei_cl_device *cldev)
+{
+ struct mkhi_gfx_mem_ready req = {0};
+ unsigned int mode = MEI_CL_IO_TX_INTERNAL;
+
+ req.hdr.group_id = MKHI_GROUP_ID_GFX;
+ req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ;
+ req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED;
+
+ dev_dbg(&cldev->dev, "Sending memory ready command\n");
+ return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode);
+}
+
static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
@@ -264,6 +247,39 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
dev_err(&cldev->dev, "FW version command failed %d\n", ret);
mei_cldev_disable(cldev);
}
+
+static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ /* No need to enable the client if nothing is needed from it */
+ if (!cldev->bus->fw_f_fw_ver_supported &&
+ cldev->bus->pxp_mode != MEI_DEV_PXP_INIT)
+ return;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret)
+ return;
+
+ if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) {
+ ret = mei_gfx_memory_ready(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "memory ready command failed %d\n", ret);
+ else
+ dev_dbg(&cldev->dev, "memory ready command sent\n");
+ /* we go to reset after that */
+ cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP;
+ goto out;
+ }
+
+ ret = mei_fwver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "FW version command failed %d\n",
+ ret);
+out:
+ mei_cldev_disable(cldev);
+}
+
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
@@ -471,7 +487,7 @@ static void mei_nfc(struct mei_cl_device *cldev)
}
dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
- strlcpy(cldev->name, radio_name, sizeof(cldev->name));
+ strscpy(cldev->name, radio_name, sizeof(cldev->name));
disconnect:
mutex_lock(&bus->device_lock);
@@ -503,6 +519,26 @@ static void vt_support(struct mei_cl_device *cldev)
cldev->do_match = 1;
}
+/**
+ * pxp_is_ready - enable bus client if pxp is ready
+ *
+ * @cldev: me clients device
+ */
+static void pxp_is_ready(struct mei_cl_device *cldev)
+{
+ struct mei_device *bus = cldev->bus;
+
+ switch (bus->pxp_mode) {
+ case MEI_DEV_PXP_READY:
+ case MEI_DEV_PXP_DEFAULT:
+ cldev->do_match = 1;
+ break;
+ default:
+ cldev->do_match = 0;
+ break;
+ }
+}
+
#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
static struct mei_fixup {
@@ -516,10 +552,10 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
- MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver),
+ MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
MEI_FIXUP(MEI_UUID_ANY, vt_support),
- MEI_FIXUP(MEI_UUID_PAVP, whitelist),
+ MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready),
};
/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 31264ab2eb13..0b2fbe1335a7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -870,7 +870,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
@@ -945,7 +945,7 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
wait_event_timeout(cl->wait,
cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl->state == MEI_FILE_DISCONNECTED,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
rets = cl->status;
@@ -1065,7 +1065,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -1164,7 +1164,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
@@ -1562,7 +1562,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
cl->notify_en == request ||
cl->status ||
!mei_cl_is_connected(cl),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->notify_en != request && !cl->status)
@@ -2336,7 +2336,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!cl->dma_mapped && !cl->status)
@@ -2415,7 +2415,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->dma_mapped && !cl->status)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 1ce61e9e24fc..3b098d4c8e3d 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2016, Intel Corporation. All rights reserved
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -86,6 +86,20 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active);
+static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state)
+{
+#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state
+ switch (state) {
+ MEI_PXP_MODE(DEFAULT);
+ MEI_PXP_MODE(INIT);
+ MEI_PXP_MODE(SETUP);
+ MEI_PXP_MODE(READY);
+ default:
+ return "unknown";
+ }
+#undef MEI_PXP_MODE
+}
+
static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
{
struct mei_device *dev = m->private;
@@ -112,6 +126,9 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused)
seq_printf(m, "pg: %s, %s\n",
mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED",
mei_pg_state_str(mei_pg_state(dev)));
+
+ seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index c8145e9b62b6..e63cabd0818d 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -13,6 +13,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
#include "mei_dev.h"
#include "hw-me.h"
@@ -31,6 +32,17 @@ static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val)
return 0;
}
+static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem)
+{
+ u32 low = lower_32_bits(mem->start);
+ u32 hi = upper_32_bits(mem->start);
+ u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID;
+
+ iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG);
+ iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG);
+ iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG);
+}
+
static int mei_gsc_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *aux_dev_id)
{
@@ -47,7 +59,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
device = &aux_dev->dev;
- dev = mei_me_dev_init(device, cfg);
+ dev = mei_me_dev_init(device, cfg, adev->slow_firmware);
if (!dev) {
ret = -ENOMEM;
goto err;
@@ -56,7 +68,6 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
hw = to_me_hw(dev);
hw->mem_addr = devm_ioremap_resource(device, &adev->bar);
if (IS_ERR(hw->mem_addr)) {
- dev_err(device, "mmio not mapped\n");
ret = PTR_ERR(hw->mem_addr);
goto err;
}
@@ -66,13 +77,33 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
dev_set_drvdata(device, dev);
- ret = devm_request_threaded_irq(device, hw->irq,
- mei_me_irq_quick_handler,
- mei_me_irq_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- if (ret) {
- dev_err(device, "irq register failed %d\n", ret);
- goto err;
+ if (adev->ext_op_mem.start) {
+ mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
+ dev->pxp_mode = MEI_DEV_PXP_INIT;
+ }
+
+ /* use polling */
+ if (mei_me_hw_use_polling(hw)) {
+ mei_disable_interrupts(dev);
+ mei_clear_interrupts(dev);
+ init_waitqueue_head(&hw->wait_active);
+ hw->is_active = true; /* start in active mode for initialization */
+ hw->polling_thread = kthread_run(mei_me_polling_thread, dev,
+ "kmegscirqd/%s", dev_name(device));
+ if (IS_ERR(hw->polling_thread)) {
+ ret = PTR_ERR(hw->polling_thread);
+ dev_err(device, "unable to create kernel thread: %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = devm_request_threaded_irq(device, hw->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "irq register failed %d\n", ret);
+ goto err;
+ }
}
pm_runtime_get_noresume(device);
@@ -98,7 +129,8 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
register_err:
mei_stop(dev);
- devm_free_irq(device, hw->irq, dev);
+ if (!mei_me_hw_use_polling(hw))
+ devm_free_irq(device, hw->irq, dev);
err:
dev_err(device, "probe failed: %d\n", ret);
@@ -119,12 +151,17 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
mei_stop(dev);
+ hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ kthread_stop(hw->polling_thread);
+
mei_deregister(dev);
pm_runtime_disable(&aux_dev->dev);
mei_disable_interrupts(dev);
- devm_free_irq(&aux_dev->dev, hw->irq, dev);
+ if (!mei_me_hw_use_polling(hw))
+ devm_free_irq(&aux_dev->dev, hw->irq, dev);
}
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
@@ -144,11 +181,22 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
static int __maybe_unused mei_gsc_pm_resume(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
+ struct auxiliary_device *aux_dev;
+ struct mei_aux_device *adev;
int err;
+ struct mei_me_hw *hw;
if (!dev)
return -ENODEV;
+ hw = to_me_hw(dev);
+ aux_dev = to_auxiliary_dev(device);
+ adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+ if (adev->ext_op_mem.start) {
+ mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem);
+ dev->pxp_mode = MEI_DEV_PXP_INIT;
+ }
+
err = mei_restart(dev);
if (err)
return err;
@@ -185,6 +233,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
if (mei_write_is_idle(dev)) {
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_ON;
+
+ if (mei_me_hw_use_polling(hw))
+ hw->is_active = false;
ret = 0;
} else {
ret = -EAGAIN;
@@ -209,6 +260,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_OFF;
+ if (mei_me_hw_use_polling(hw)) {
+ hw->is_active = true;
+ wake_up(&hw->wait_active);
+ }
+
mutex_unlock(&dev->device_lock);
irq_ret = mei_me_irq_thread_handler(1, dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index cf2b8261da14..de712cbf5d07 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
@@ -232,7 +232,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
ret = wait_event_timeout(dev->wait_hbm_start,
dev->hbm_state != MEI_HBM_STARTING,
- mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
+ dev->timeouts.hbm);
mutex_lock(&dev->device_lock);
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
@@ -275,7 +275,7 @@ int mei_hbm_start_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_STARTING;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -316,7 +316,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_DR_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -351,7 +351,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
}
dev->hbm_state = MEI_HBM_CAP_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -385,7 +385,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
@@ -751,7 +751,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
return ret;
}
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 15e8e2b322b1..99966cd3e7d8 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#ifndef _MEI_HW_MEI_REGS_H_
@@ -127,6 +127,8 @@
# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
+# define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003
+# define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3
#define PCI_CFG_HFS_6 0x6C
/* MEI registers */
@@ -143,6 +145,11 @@
/* H_D0I3C - D0I3 Control */
#define H_D0I3C 0x800
+#define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100
+#define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104
+#define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108
+#define GSC_EXT_OP_MEM_VALID BIT(31)
+
/* register bits of H_CSR (Host Control Status register) */
/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
#define H_CBD 0xFF000000
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 3a95fe7d4e33..9e2f781c6ed5 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
+#include <linux/delay.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev)
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
- u32 hcsr = mei_hcsr_read(dev);
+ u32 hcsr;
+
+ if (mei_me_hw_use_polling(to_me_hw(dev)))
+ return;
- hcsr |= H_CSR_IE_MASK;
+ hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
@@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ return;
+
synchronize_irq(hw->irq);
}
@@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
+ if (!mei_me_hw_use_polling(to_me_hw(dev)))
+ hcsr |= H_CSR_IE_MASK;
+
+ hcsr |= H_IG | H_RDY;
mei_hcsr_set(dev, hcsr);
}
@@ -424,6 +434,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev)
}
/**
+ * mei_gsc_pxp_check - check for gsc firmware entering pxp mode
+ *
+ * @dev: the device structure
+ */
+static void mei_gsc_pxp_check(struct mei_device *dev)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 fwsts5 = 0;
+
+ if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT)
+ return;
+
+ hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+ if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
+ dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+ dev->pxp_mode = MEI_DEV_PXP_READY;
+ } else {
+ dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+ }
+}
+
+/**
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
* or timeout is reached
*
@@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+ dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait hw ready failed\n");
return -ETIME;
}
+ mei_gsc_pxp_check(dev);
+
mei_me_hw_reset_release(dev);
dev->recvd_hw_ready = false;
return 0;
@@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev)
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
dev->pg_event = MEI_PG_EVENT_WAIT;
@@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
@@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
@@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
reply:
@@ -762,7 +797,8 @@ reply:
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
@@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev)
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
- unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
u32 reg;
@@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
@@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -980,7 +1016,6 @@ on:
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
int ret;
u32 reg;
@@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
@@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
- if (!intr_enable)
+ if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
hcsr &= ~H_CSR_IE_MASK;
dev->recvd_hw_ready = false;
@@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "FW not ready: resetting.\n");
+ dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n",
+ dev->dev_state, dev->pxp_mode);
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN)
mei_cl_all_disconnect(dev);
@@ -1331,6 +1368,66 @@ end:
}
EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
+#define MEI_POLLING_TIMEOUT_ACTIVE 100
+#define MEI_POLLING_TIMEOUT_IDLE 500
+
+/**
+ * mei_me_polling_thread - interrupt register polling thread
+ *
+ * The thread monitors the interrupt source register and calls
+ * mei_me_irq_thread_handler() to handle the firmware
+ * input.
+ *
+ * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout
+ * in case there was an event, in idle case the polling
+ * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE
+ * up to MEI_POLLING_TIMEOUT_IDLE.
+ *
+ * @_dev: mei device
+ *
+ * Return: always 0
+ */
+int mei_me_polling_thread(void *_dev)
+{
+ struct mei_device *dev = _dev;
+ irqreturn_t irq_ret;
+ long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+
+ dev_dbg(dev->dev, "kernel thread is running\n");
+ while (!kthread_should_stop()) {
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr;
+
+ wait_event_timeout(hw->wait_active,
+ hw->is_active || kthread_should_stop(),
+ msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
+
+ if (kthread_should_stop())
+ break;
+
+ hcsr = mei_hcsr_read(dev);
+ if (me_intr_src(hcsr)) {
+ polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+ irq_ret = mei_me_irq_thread_handler(1, dev);
+ if (irq_ret != IRQ_HANDLED)
+ dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+ } else {
+ /*
+ * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
+ * up to MEI_POLLING_TIMEOUT_IDLE
+ */
+ polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_IDLE);
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_me_polling_thread);
+
static const struct mei_hw_ops mei_me_hw_ops = {
.trc_status = mei_me_trc_status,
@@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg);
*
* @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
+ * @slow_fw: configure longer timeouts as FW is slow
*
* Return: The mei_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg)
+ const struct mei_cfg *cfg, bool slow_fw)
{
struct mei_device *dev;
struct mei_me_hw *hw;
@@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, parent, &mei_me_hw_ops);
+ mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index a071c645e905..95cf830b7c7b 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -51,6 +51,9 @@ struct mei_cfg {
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
* @read_fws: read FW status register handler
+ * @polling_thread: interrupt polling thread
+ * @wait_active: the polling thread activity wait queue
+ * @is_active: the device is active
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
@@ -60,10 +63,19 @@ struct mei_me_hw {
bool d0i3_supported;
u8 hbuf_depth;
int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
+ /* polling */
+ struct task_struct *polling_thread;
+ wait_queue_head_t wait_active;
+ bool is_active;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw)
+{
+ return hw->irq < 0;
+}
+
/**
* enum mei_cfg_idx - indices to platform specific configurations.
*
@@ -120,12 +132,13 @@ enum mei_cfg_idx {
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg);
+ const struct mei_cfg *cfg, bool slow_fw);
int mei_me_pg_enter_sync(struct mei_device *dev);
int mei_me_pg_exit_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+int mei_me_polling_thread(void *_dev);
#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 00652c137cc7..5d0f68b95c29 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -176,7 +176,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
* @dev: the device structure
*
* Extract HICR_HOST_ALIVENESS_RESP_ACK bit from
- * from HICR_HOST_ALIVENESS_REQ register value
+ * HICR_HOST_ALIVENESS_REQ register value
*
* Return: SICR_HOST_ALIVENESS_REQ_REQUESTED bit value
*/
@@ -1201,7 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
- mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
+ mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
hw = to_txe_hw(dev);
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b46077b17114..e7e020dba6b1 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -16,11 +16,16 @@
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
+#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */
+
+#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
+#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
/*
* FW page size for DMA allocations
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index eb052005ca86..bac8852aad51 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -218,16 +218,6 @@ int mei_start(struct mei_device *dev)
goto err;
}
- if (!mei_host_is_ready(dev)) {
- dev_err(dev->dev, "host is not ready.\n");
- goto err;
- }
-
- if (!mei_hw_is_ready(dev)) {
- dev_err(dev->dev, "ME is not ready.\n");
- goto err;
- }
-
if (!mei_hbm_version_is_supported(dev)) {
dev_dbg(dev->dev, "MEI start failed.\n");
goto err;
@@ -320,6 +310,8 @@ void mei_stop(struct mei_device *dev)
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
+ /* to catch HW-initiated reset */
+ mei_cancel_work(dev);
mutex_lock(&dev->device_lock);
@@ -357,14 +349,16 @@ bool mei_write_is_idle(struct mei_device *dev)
EXPORT_SYMBOL_GPL(mei_write_is_idle);
/**
- * mei_device_init -- initialize mei_device structure
+ * mei_device_init - initialize mei_device structure
*
* @dev: the mei device
* @device: the device structure
+ * @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
/* setup our list array */
@@ -393,6 +387,8 @@ void mei_device_init(struct mei_device *dev,
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
+ dev->pxp_mode = MEI_DEV_PXP_DEFAULT;
+
/*
* Reserving the first client ID
* 0: Reserved for MEI Bus Message communications
@@ -402,6 +398,21 @@ void mei_device_init(struct mei_device *dev,
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
dev->dev = device;
+
+ dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
+ dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
+ dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+ if (slow_fw) {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW);
+ } else {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
+ }
}
EXPORT_SYMBOL_GPL(mei_device_init);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 786f7c8f7f61..930887e7e38d 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -571,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file,
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 694f866f87ef..6bb3e1ba9ded 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -62,6 +62,21 @@ enum mei_dev_state {
MEI_DEV_POWER_UP
};
+/**
+ * enum mei_dev_pxp_mode - MEI PXP mode state
+ *
+ * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required
+ * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware
+ * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse
+ * @MEI_DEV_PXP_READY: device initialized
+ */
+enum mei_dev_pxp_mode {
+ MEI_DEV_PXP_DEFAULT = 0,
+ MEI_DEV_PXP_INIT = 1,
+ MEI_DEV_PXP_SETUP = 2,
+ MEI_DEV_PXP_READY = 3,
+};
+
const char *mei_dev_state_str(int state);
enum mei_file_transaction_states {
@@ -415,6 +430,17 @@ struct mei_fw_version {
#define MEI_MAX_FW_VER_BLOCKS 3
+struct mei_dev_timeouts {
+ unsigned long hw_ready; /* Timeout on ready message, in jiffies */
+ int connect; /* HPS: at least 2 seconds, in seconds */
+ unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */
+ int client_init; /* HPS: Clients Enumeration Timeout, in seconds */
+ unsigned long pgi; /* PG Isolation time response, in jiffies */
+ unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
+ unsigned long hbm; /* HBM operation timeout, in jiffies */
+ unsigned long mkhi_recv; /* receive timeout, in jiffies */
+};
+
/**
* struct mei_device - MEI private device struct
*
@@ -443,6 +469,7 @@ struct mei_fw_version {
* @reset_count : number of consecutive resets
* @dev_state : device state
* @hbm_state : state of host bus message protocol
+ * @pxp_mode : PXP device mode
* @init_clients_timer : HBM init handshake timeout
*
* @pg_event : power gating event
@@ -480,6 +507,8 @@ struct mei_fw_version {
* @allow_fixed_address: allow user space to connect a fixed client
* @override_fixed_address: force allow fixed address behavior
*
+ * @timeouts: actual timeout values
+ *
* @reset_work : work item for the device reset
* @bus_rescan_work : work item for the bus rescan
*
@@ -524,6 +553,7 @@ struct mei_device {
unsigned long reset_count;
enum mei_dev_state dev_state;
enum mei_hbm_state hbm_state;
+ enum mei_dev_pxp_mode pxp_mode;
u16 init_clients_timer;
/*
@@ -568,6 +598,8 @@ struct mei_device {
bool allow_fixed_address;
bool override_fixed_address;
+ struct mei_dev_timeouts timeouts;
+
struct work_struct reset_work;
struct work_struct bus_rescan_work;
@@ -632,6 +664,7 @@ static inline u32 mei_slots2data(int slots)
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops);
int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
diff --git a/drivers/misc/mei/mkhi.h b/drivers/misc/mei/mkhi.h
new file mode 100644
index 000000000000..1473ea489666
--- /dev/null
+++ b/drivers/misc/mei/mkhi.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+
+#ifndef _MEI_MKHI_H_
+#define _MEI_MKHI_H_
+
+#include <linux/types.h>
+
+#define MKHI_FEATURE_PTT 0x10
+
+#define MKHI_FWCAPS_GROUP_ID 0x3
+#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+#define MKHI_GEN_GROUP_ID 0xFF
+#define MKHI_GEN_GET_FW_VERSION_CMD 0x2
+
+#define MKHI_GROUP_ID_GFX 0x30
+#define MKHI_GFX_RESET_WARN_CMD_REQ 0x0
+#define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1
+
+/* Allow transition to PXP mode without approval */
+#define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1
+
+struct mkhi_rule_id {
+ __le16 rule_type;
+ u8 feature_id;
+ u8 reserved;
+} __packed;
+
+struct mkhi_fwcaps {
+ struct mkhi_rule_id id;
+ u8 len;
+ u8 data[];
+} __packed;
+
+struct mkhi_msg_hdr {
+ u8 group_id;
+ u8 command;
+ u8 reserved;
+ u8 result;
+} __packed;
+
+struct mkhi_msg {
+ struct mkhi_msg_hdr hdr;
+ u8 data[];
+} __packed;
+
+struct mkhi_gfx_mem_ready {
+ struct mkhi_msg_hdr hdr;
+ u32 flags;
+} __packed;
+
+#endif /* _MEI_MKHI_H_ */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 5435604327a7..704cd0caa172 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -203,7 +203,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(&pdev->dev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg, false);
if (!dev) {
err = -ENOMEM;
goto end;
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 6777c419a8da..d46dba2df5a1 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -257,6 +257,8 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
if (IS_ERR(ev_ctx))
return PTR_ERR(ev_ctx);
rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx);
+ if (rc)
+ eventfd_ctx_put(ev_ctx);
break;
case OCXL_IOCTL_GET_METADATA:
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 8f786a225dcf..11530b4ec389 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -332,6 +332,22 @@ static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
return false;
}
+static int pci_endpoint_test_validate_xfer_params(struct device *dev,
+ struct pci_endpoint_test_xfer_param *param, size_t alignment)
+{
+ if (!param->size) {
+ dev_dbg(dev, "Data size is zero\n");
+ return -EINVAL;
+ }
+
+ if (param->size > SIZE_MAX - alignment) {
+ dev_dbg(dev, "Maximum transfer data size exceeded\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
@@ -363,9 +379,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -497,9 +515,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
@@ -595,9 +615,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
return false;
}
+ err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
+ if (err)
+ return false;
+
size = param.size;
- if (size > SIZE_MAX - alignment)
- goto err;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
index 9f9af77f8d2e..f1336f43d3bd 100644
--- a/drivers/misc/sgi-xp/xp.h
+++ b/drivers/misc/sgi-xp/xp.h
@@ -334,10 +334,6 @@ extern int (*xp_cpu_to_nasid) (int);
extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
-extern u64 xp_nofault_PIOR_target;
-extern int xp_nofault_PIOR(void *);
-extern int xp_error_PIOR(void);
-
extern struct device *xp;
extern enum xp_retval xp_init_uv(void);
extern void xp_exit_uv(void);
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 6d71865c8042..1652fb9b3856 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -389,7 +389,7 @@ exit:
return err;
}
-static int tsl2550_remove(struct i2c_client *client)
+static void tsl2550_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
@@ -397,8 +397,6 @@ static int tsl2550_remove(struct i2c_client *client)
tsl2550_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8f2de1893245..e71068f7759b 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -324,7 +324,7 @@ static void *qp_alloc_queue(u64 size, u32 flags)
/*
* Copies from a given buffer or iovector to a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
@@ -345,7 +345,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
size_t to_copy;
if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
+ va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
@@ -359,12 +359,12 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
from)) {
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
}
return VMCI_SUCCESS;
@@ -372,7 +372,7 @@ static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
/*
* Copies to a given buffer or iovector from a VMCI Queue. Uses
- * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * kmap_local_page() to dynamically map required portions of the queue
* by traversing the offset -> page translation structure for the queue.
* Assumes that offset + size does not wrap around in the queue.
*/
@@ -393,7 +393,7 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to,
int err;
if (kernel_if->host)
- va = kmap(kernel_if->u.h.page[page_index]);
+ va = kmap_local_page(kernel_if->u.h.page[page_index]);
else
va = kernel_if->u.g.vas[page_index + 1];
/* Skip header. */
@@ -407,12 +407,12 @@ static int qp_memcpy_from_queue_iter(struct iov_iter *to,
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
if (err != to_copy) {
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
return VMCI_ERROR_INVALID_ARGS;
}
bytes_copied += to_copy;
if (kernel_if->host)
- kunmap(kernel_if->u.h.page[page_index]);
+ kunmap_local(va);
}
return VMCI_SUCCESS;
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index d6e3c650bd11..cb9506f9cbd0 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -636,7 +636,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
}
for (i = 0; i < nr_pages; i++) {
- addr = kmap(pages[i]);
+ addr = kmap_local_page(pages[i]);
do {
xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) *
@@ -645,6 +645,7 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
reg++;
} while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
+ kunmap_local(addr);
unpin_user_page(pages[i]);
}
return 0;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0fd91f749b3a..b89dca1f15e9 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -565,7 +565,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
- INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
+ INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cee4c0b59f43..3662bf5320ce 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -870,7 +870,8 @@ try_again:
* the CCS bit is set as well. We deliberately deviate from the spec in
* regards to this, which allows UHS-I to be supported for SDSC cards.
*/
- if (!mmc_host_is_spi(host) && rocr && (*rocr & SD_ROCR_S18A)) {
+ if (!mmc_host_is_spi(host) && (ocr & SD_OCR_S18R) &&
+ rocr && (*rocr & SD_ROCR_S18A)) {
err = mmc_set_uhs_voltage(host, pocr);
if (err == -EAGAIN) {
retries--;
@@ -949,16 +950,17 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1480,26 +1482,15 @@ retry:
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
- }
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
@@ -1534,7 +1525,7 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
@@ -1566,7 +1557,7 @@ retry:
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 0b682a31cd3e..f64b9ac76a5c 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1043,7 +1043,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
/* Prevent processing of SDIO IRQs in suspended state. */
mmc_card_set_suspended(host->card);
- cancel_delayed_work_sync(&host->sdio_irq_work);
+ cancel_work_sync(&host->sdio_irq_work);
mmc_claim_host(host);
@@ -1103,7 +1103,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
wake_up_process(host->sdio_irq_thread);
else if (host->caps & MMC_CAP_SDIO_IRQ)
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
out:
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 4b1f7c966ec8..2b24bdf38296 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -124,7 +124,7 @@ static void sdio_run_irqs(struct mmc_host *host)
void sdio_irq_work(struct work_struct *work)
{
struct mmc_host *host =
- container_of(work, struct mmc_host, sdio_irq_work.work);
+ container_of(work, struct mmc_host, sdio_irq_work);
sdio_run_irqs(host);
}
@@ -132,7 +132,7 @@ void sdio_irq_work(struct work_struct *work)
void sdio_signal_irq(struct mmc_host *host)
{
host->sdio_irq_pending = true;
- queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ schedule_work(&host->sdio_irq_work);
}
EXPORT_SYMBOL_GPL(sdio_signal_irq);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 414aa82abc39..ae7ef2e038be 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -246,7 +246,7 @@ static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
static void sdio_uart_change_speed(struct sdio_uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned char cval, fcr = 0;
unsigned int baud, quot;
@@ -859,7 +859,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
static void sdio_uart_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
unsigned int cflag = tty->termios.c_cflag;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e63608834411..f324daadaf70 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -157,6 +157,7 @@ config MMC_SDHCI_OF_ARASAN
config MMC_SDHCI_OF_ASPEED
tristate "SDHCI OF support for the ASPEED SDHCI controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
depends on OF && OF_ADDRESS
select MMC_SDHCI_IO_ACCESSORS
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index a9a0837153d8..c88b039dc9fb 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1097,8 +1097,9 @@ out5:
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-out_clk:
+
clk_disable_unprepare(host->clk);
+out_clk:
clk_put(host->clk);
out_irq:
free_irq(host->irq, host);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b1d563b2ed1b..dc2db9c185ea 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -298,7 +298,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
enum dma_data_direction dir = mmc_get_dma_dir(data);
- int sg_count;
+ unsigned int sg_count;
if (data->host_cookie == COOKIE_PREMAPPED)
return data->sg_count;
@@ -308,7 +308,7 @@ static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
data->sg_len,
dir);
- if (sg_count <= 0) {
+ if (!sg_count) {
dev_err(mmc_dev(host->mmc),
"Failed to map scatterlist for DMA operation\n");
return -EINVAL;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index fc462995cf94..df05e60bed9a 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -41,14 +41,17 @@
#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
#define CLK_V2_ALWAYS_ON BIT(24)
+#define CLK_V2_IRQ_SDIO_SLEEP BIT(25)
#define CLK_V3_TX_DELAY_MASK GENMASK(21, 16)
#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
#define CLK_V3_ALWAYS_ON BIT(28)
+#define CLK_V3_IRQ_SDIO_SLEEP BIT(29)
#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
#define CLK_ALWAYS_ON(h) (h->data->always_on)
+#define CLK_IRQ_SDIO_SLEEP(h) (h->data->irq_sdio_sleep)
#define SD_EMMC_DELAY 0x4
#define SD_EMMC_ADJUST 0x8
@@ -101,8 +104,7 @@
#define IRQ_RESP_STATUS BIT(14)
#define IRQ_SDIO BIT(15)
#define IRQ_EN_MASK \
- (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN | IRQ_RESP_STATUS |\
- IRQ_SDIO)
+ (IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN)
#define SD_EMMC_CMD_CFG 0x50
#define SD_EMMC_CMD_ARG 0x54
@@ -136,6 +138,7 @@ struct meson_mmc_data {
unsigned int rx_delay_mask;
unsigned int always_on;
unsigned int adjust;
+ unsigned int irq_sdio_sleep;
};
struct sd_emmc_desc {
@@ -175,6 +178,7 @@ struct meson_host {
bool vqmmc_enabled;
bool needs_pre_post_req;
+ spinlock_t lock;
};
#define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
@@ -431,6 +435,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
+ clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@@ -929,33 +934,54 @@ static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
}
}
+static void __meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ u32 reg_irqen = IRQ_EN_MASK;
+
+ if (enable)
+ reg_irqen |= IRQ_SDIO;
+ writel(reg_irqen, host->regs + SD_EMMC_IRQ_EN);
+}
+
static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_command *cmd;
- struct mmc_data *data;
- u32 irq_en, status, raw_status;
+ u32 status, raw_status;
irqreturn_t ret = IRQ_NONE;
- irq_en = readl(host->regs + SD_EMMC_IRQ_EN);
raw_status = readl(host->regs + SD_EMMC_STATUS);
- status = raw_status & irq_en;
+ status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
if (!status) {
dev_dbg(host->dev,
- "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
- irq_en, raw_status);
+ "Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
+ IRQ_EN_MASK | IRQ_SDIO, raw_status);
return IRQ_NONE;
}
- if (WARN_ON(!host) || WARN_ON(!host->cmd))
+ if (WARN_ON(!host))
return IRQ_NONE;
/* ack all raised interrupts */
writel(status, host->regs + SD_EMMC_STATUS);
cmd = host->cmd;
- data = cmd->data;
+
+ if (status & IRQ_SDIO) {
+ spin_lock(&host->lock);
+ __meson_mmc_enable_sdio_irq(host->mmc, 0);
+ sdio_signal_irq(host->mmc);
+ spin_unlock(&host->lock);
+ status &= ~IRQ_SDIO;
+ if (!status)
+ return IRQ_HANDLED;
+ }
+
+ if (WARN_ON(!cmd))
+ return IRQ_NONE;
+
cmd->error = 0;
if (status & IRQ_CRC_ERR) {
dev_dbg(host->dev, "CRC Error - status 0x%08x\n", status);
@@ -973,12 +999,9 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
meson_mmc_read_resp(host->mmc, cmd);
- if (status & IRQ_SDIO) {
- dev_dbg(host->dev, "IRQ: SDIO TODO.\n");
- ret = IRQ_HANDLED;
- }
-
if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) {
+ struct mmc_data *data = cmd->data;
+
if (data && !cmd->error)
data->bytes_xfered = data->blksz * data->blocks;
if (meson_mmc_bounce_buf_read(data) ||
@@ -1121,6 +1144,21 @@ static int meson_mmc_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
return -EINVAL;
}
+static void meson_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct meson_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ __meson_mmc_enable_sdio_irq(mmc, enable);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static void meson_mmc_ack_sdio_irq(struct mmc_host *mmc)
+{
+ meson_mmc_enable_sdio_irq(mmc, 1);
+}
+
static const struct mmc_host_ops meson_mmc_ops = {
.request = meson_mmc_request,
.set_ios = meson_mmc_set_ios,
@@ -1130,6 +1168,8 @@ static const struct mmc_host_ops meson_mmc_ops = {
.execute_tuning = meson_mmc_resampling_tuning,
.card_busy = meson_mmc_card_busy,
.start_signal_voltage_switch = meson_mmc_voltage_switch,
+ .enable_sdio_irq = meson_mmc_enable_sdio_irq,
+ .ack_sdio_irq = meson_mmc_ack_sdio_irq,
};
static int meson_mmc_probe(struct platform_device *pdev)
@@ -1226,10 +1266,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* clear, ack and enable interrupts */
writel(0, host->regs + SD_EMMC_IRQ_EN);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_STATUS);
- writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
- host->regs + SD_EMMC_IRQ_EN);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS);
+ writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN);
ret = request_threaded_irq(host->irq, meson_mmc_irq,
meson_mmc_irq_thread, IRQF_ONESHOT,
@@ -1237,7 +1275,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
if (ret)
goto err_init_clk;
+ spin_lock_init(&host->lock);
+
mmc->caps |= MMC_CAP_CMD23;
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
if (host->dram_access_quirk) {
/* Limit segments to 1 due to low available sram memory */
mmc->max_segs = 1;
@@ -1328,6 +1372,7 @@ static const struct meson_mmc_data meson_gx_data = {
.rx_delay_mask = CLK_V2_RX_DELAY_MASK,
.always_on = CLK_V2_ALWAYS_ON,
.adjust = SD_EMMC_ADJUST,
+ .irq_sdio_sleep = CLK_V2_IRQ_SDIO_SLEEP,
};
static const struct meson_mmc_data meson_axg_data = {
@@ -1335,6 +1380,7 @@ static const struct meson_mmc_data meson_axg_data = {
.rx_delay_mask = CLK_V3_RX_DELAY_MASK,
.always_on = CLK_V3_ALWAYS_ON,
.adjust = SD_EMMC_V3_ADJUST,
+ .irq_sdio_sleep = CLK_V3_IRQ_SDIO_SLEEP,
};
static const struct of_device_id meson_mmc_of_match[] = {
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index e92e63cb5641..da85c2f2acb8 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -381,14 +381,14 @@ static void meson_mx_sdhc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
static int meson_mx_sdhc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- int dma_len;
+ unsigned int dma_len;
if (!data)
return 0;
dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
- if (dma_len <= 0) {
+ if (!dma_len) {
dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
return -ENOMEM;
}
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index a5e05ed0fda3..9d35453e7371 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -34,7 +34,7 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
spin_lock_irqsave(&hsq->lock, flags);
/* Make sure we are not already running a request now */
- if (hsq->mrq) {
+ if (hsq->mrq || hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags);
return;
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index b6eb75f4bbfc..dfc3ffd5b1f8 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -111,8 +111,8 @@
#define CLK_DIV_MASK 0x7f
/* REG_BUS_WIDTH */
-#define BUS_WIDTH_8 BIT(2)
-#define BUS_WIDTH_4 BIT(1)
+#define BUS_WIDTH_4_SUPPORT BIT(3)
+#define BUS_WIDTH_4 BIT(2)
#define BUS_WIDTH_1 BIT(0)
#define MMC_VDD_360 23
@@ -524,9 +524,6 @@ static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_BUS_WIDTH_4:
writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH);
break;
- case MMC_BUS_WIDTH_8:
- writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH);
- break;
default:
writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH);
break;
@@ -651,16 +648,8 @@ static int moxart_probe(struct platform_device *pdev)
dmaengine_slave_config(host->dma_chan_rx, &cfg);
}
- switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) {
- case 1:
+ if (readl(host->base + REG_BUS_WIDTH) & BUS_WIDTH_4_SUPPORT)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- break;
- case 2:
- mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- break;
- default:
- break;
- }
writel(0, host->base + REG_INTERRUPT_MASK);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 69d78604d1fc..df941438aef5 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -474,33 +474,20 @@ struct msdc_host {
struct cqhci_host *cq_host;
};
-static const struct mtk_mmc_compatible mt8135_compat = {
- .clk_div_bits = 8,
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
.recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
- .busy_check = false,
- .stop_clk_fix = false,
- .enhance_rx = false,
- .support_64g = false,
-};
-
-static const struct mtk_mmc_compatible mt8173_compat = {
- .clk_div_bits = 8,
- .recheck_sdio_irq = true,
- .hs400_tune = true,
- .pad_tune_reg = MSDC_PAD_TUNE,
- .async_fifo = false,
- .data_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8183_compat = {
+static const struct mtk_mmc_compatible mt2712_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
@@ -513,30 +500,43 @@ static const struct mtk_mmc_compatible mt8183_compat = {
.support_64g = true,
};
-static const struct mtk_mmc_compatible mt2701_compat = {
+static const struct mtk_mmc_compatible mt6779_compat = {
.clk_div_bits = 12,
- .recheck_sdio_irq = true,
+ .recheck_sdio_irq = false,
.hs400_tune = false,
.pad_tune_reg = MSDC_PAD_TUNE0,
.async_fifo = true,
.data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+ .support_64g = true,
+};
+
+static const struct mtk_mmc_compatible mt6795_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = false,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt2712_compat = {
- .clk_div_bits = 12,
- .recheck_sdio_irq = false,
+static const struct mtk_mmc_compatible mt7620_compat = {
+ .clk_div_bits = 8,
+ .recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE0,
- .async_fifo = true,
- .data_tune = true,
- .busy_check = true,
- .stop_clk_fix = true,
- .enhance_rx = true,
- .support_64g = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .use_internal_cd = true,
};
static const struct mtk_mmc_compatible mt7622_compat = {
@@ -552,31 +552,33 @@ static const struct mtk_mmc_compatible mt7622_compat = {
.support_64g = false,
};
-static const struct mtk_mmc_compatible mt8516_compat = {
- .clk_div_bits = 12,
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
.recheck_sdio_irq = true,
.hs400_tune = false,
- .pad_tune_reg = MSDC_PAD_TUNE0,
- .async_fifo = true,
- .data_tune = true,
- .busy_check = true,
- .stop_clk_fix = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+ .support_64g = false,
};
-static const struct mtk_mmc_compatible mt7620_compat = {
+static const struct mtk_mmc_compatible mt8173_compat = {
.clk_div_bits = 8,
.recheck_sdio_irq = true,
- .hs400_tune = false,
+ .hs400_tune = true,
.pad_tune_reg = MSDC_PAD_TUNE,
.async_fifo = false,
.data_tune = false,
.busy_check = false,
.stop_clk_fix = false,
.enhance_rx = false,
- .use_internal_cd = true,
+ .support_64g = false,
};
-static const struct mtk_mmc_compatible mt6779_compat = {
+static const struct mtk_mmc_compatible mt8183_compat = {
.clk_div_bits = 12,
.recheck_sdio_irq = false,
.hs400_tune = false,
@@ -589,16 +591,29 @@ static const struct mtk_mmc_compatible mt6779_compat = {
.support_64g = true,
};
+static const struct mtk_mmc_compatible mt8516_compat = {
+ .clk_div_bits = 12,
+ .recheck_sdio_irq = true,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+};
+
static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
- { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
- { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+ { .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat},
+ { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat},
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
- { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
- { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat},
+
{}
};
MODULE_DEVICE_TABLE(of, msdc_of_ids);
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 5fe4528e296e..5798aee06653 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1042,7 +1042,6 @@ static int sd_set_timing(struct rtsx_usb_sdmmc *host,
unsigned char timing, bool *ddr_mode)
{
struct rtsx_ucr *ucr = host->ucr;
- int err;
*ddr_mode = false;
@@ -1097,9 +1096,7 @@ static int sd_set_timing(struct rtsx_usb_sdmmc *host,
break;
}
- err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
-
- return err;
+ return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index dc2991422a87..3a091a387ecb 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2441,6 +2441,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
*/
{.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
{.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
+ {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
{.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
{},
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 622b7de96c7f..169b84761041 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -297,6 +297,27 @@ static const struct sdhci_pci_fixes sdhci_ricoh_mmc = {
SDHCI_QUIRK_MISSING_CAPS
};
+static void ene_714_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_set_ios(mmc, ios);
+
+ /*
+ * Some (ENE) controllers misbehave on some ios operations,
+ * signalling timeout and CRC errors even on CMD0. Resetting
+ * it on each ios seems to solve the problem.
+ */
+ if (!(host->flags & SDHCI_DEVICE_DEAD))
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+}
+
+static int ene_714_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc_host_ops.set_ios = ene_714_set_ios;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_ene_712 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_BROKEN_DMA,
@@ -304,8 +325,8 @@ static const struct sdhci_pci_fixes sdhci_ene_712 = {
static const struct sdhci_pci_fixes sdhci_ene_714 = {
.quirks = SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS |
SDHCI_QUIRK_BROKEN_DMA,
+ .probe_slot = ene_714_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_cafe = {
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 0d4d343dbb77..ad457cd9cbaa 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -317,11 +317,12 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
u32 reg_val;
/*
- * This handler only implements the eMMC tuning that is specific to
+ * This handler implements the hardware tuning that is specific to
* this controller. Fall back to the standard method for other TIMING.
*/
if ((host->timing != MMC_TIMING_MMC_HS200) &&
- (host->timing != MMC_TIMING_UHS_SDR104))
+ (host->timing != MMC_TIMING_UHS_SDR104) &&
+ (host->timing != MMC_TIMING_UHS_SDR50))
return sdhci_execute_tuning(mmc, opcode);
if (WARN_ON((opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
@@ -631,6 +632,8 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
+
sdhci_pci_o2_enable_msi(chip, host);
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index f33e9349e4e6..46c55ab4884c 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -205,14 +205,14 @@ static inline u32 sdhci_sprd_calc_div(u32 base_clk, u32 clk)
if ((base_clk / div) > (clk * 2))
div++;
- if (div > SDHCI_SPRD_CLK_MAX_DIV)
- div = SDHCI_SPRD_CLK_MAX_DIV;
-
if (div % 2)
div = (div + 1) / 2;
else
div = div / 2;
+ if (div > SDHCI_SPRD_CLK_MAX_DIV)
+ div = SDHCI_SPRD_CLK_MAX_DIV;
+
return div;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 7689ffec5ad1..fef03de85b99 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -233,28 +233,62 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
}
EXPORT_SYMBOL_GPL(sdhci_reset);
-static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
+static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
struct mmc_host *mmc = host->mmc;
if (!mmc->ops->get_cd(mmc))
- return;
+ return false;
}
host->ops->reset(host, mask);
- if (mask & SDHCI_RESET_ALL) {
+ return true;
+}
+
+static void sdhci_reset_for_all(struct sdhci_host *host)
+{
+ if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
}
-
/* Resetting the controller clears many */
host->preset_enabled = false;
}
}
+enum sdhci_reset_reason {
+ SDHCI_RESET_FOR_INIT,
+ SDHCI_RESET_FOR_REQUEST_ERROR,
+ SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
+ SDHCI_RESET_FOR_TUNING_ABORT,
+ SDHCI_RESET_FOR_CARD_REMOVED,
+ SDHCI_RESET_FOR_CQE_RECOVERY,
+};
+
+static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
+{
+ switch (reason) {
+ case SDHCI_RESET_FOR_INIT:
+ sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR:
+ case SDHCI_RESET_FOR_TUNING_ABORT:
+ case SDHCI_RESET_FOR_CARD_REMOVED:
+ case SDHCI_RESET_FOR_CQE_RECOVERY:
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ break;
+ }
+}
+
+#define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
+
static void sdhci_set_default_irqs(struct sdhci_host *host)
{
host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
@@ -323,9 +357,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
unsigned long flags;
if (soft)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ sdhci_reset_for(host, INIT);
else
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -1538,8 +1572,9 @@ static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
*/
if (data->error) {
if (!host->cmd || host->cmd == data_cmd)
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, REQUEST_ERROR);
+ else
+ sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
}
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
@@ -2403,14 +2438,6 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->ops->set_clock(host, host->clock);
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
-
- /*
- * Some (ENE) controllers go apeshit on some ios operation,
- * signalling timeout and CRC errors even on CMD0. Resetting
- * it on each ios seems to solve the problem.
- */
- if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
- sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
}
EXPORT_SYMBOL_GPL(sdhci_set_ios);
@@ -2718,8 +2745,7 @@ void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
{
sdhci_reset_tuning(host);
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, TUNING_ABORT);
sdhci_end_tuning(host);
@@ -2987,8 +3013,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
pr_err("%s: Resetting controller.\n",
mmc_hostname(mmc));
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, CARD_REMOVED);
sdhci_error_out_mrqs(host, -ENOMEDIUM);
}
@@ -3059,12 +3084,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
- /*
- * Spec says we should do both at the same time, but Ricoh
- * controllers do not like that.
- */
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
+ sdhci_reset_for(host, REQUEST_ERROR);
host->pending_reset = false;
}
@@ -3905,10 +3925,8 @@ void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
host->cqe_on = false;
- if (recovery) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
+ if (recovery)
+ sdhci_reset_for(host, CQE_RECOVERY);
pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
mmc_hostname(mmc), host->ier,
@@ -3928,7 +3946,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
*cmd_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, CMD_CRC);
} else if (intmask & SDHCI_INT_TIMEOUT) {
*cmd_error = -ETIMEDOUT;
@@ -3938,7 +3956,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
*data_error = -EILSEQ;
- if (!mmc_op_tuning(host->cmd->opcode))
+ if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
*data_error = -ETIMEDOUT;
@@ -4066,7 +4084,7 @@ void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
if (debug_quirks2)
host->quirks2 = debug_quirks2;
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
if (host->v4_mode)
sdhci_do_enable_v4_mode(host);
@@ -4807,7 +4825,7 @@ int __sdhci_add_host(struct sdhci_host *host)
unled:
sdhci_led_unregister(host);
unirq:
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
@@ -4865,7 +4883,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
sdhci_led_unregister(host);
if (!dead)
- sdhci_do_reset(host, SDHCI_RESET_ALL);
+ sdhci_reset_for_all(host);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 95a08f09df30..d750c464bd1e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -379,8 +379,6 @@ struct sdhci_host {
#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
/* Controller doesn't like clearing the power reg before a change */
#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
-/* Controller has flaky internal state so reset it on each ios change */
-#define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
/* Controller has an unusable DMA engine */
#define SDHCI_QUIRK_BROKEN_DMA (1<<5)
/* Controller has an unusable ADMA engine */
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index e7ced1496a07..8f1023480e12 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -554,7 +554,6 @@ static const struct cqhci_host_ops sdhci_am654_cqhci_ops = {
static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
{
struct cqhci_host *cq_host;
- int ret;
cq_host = devm_kzalloc(mmc_dev(host->mmc), sizeof(struct cqhci_host),
GFP_KERNEL);
@@ -568,9 +567,7 @@ static int sdhci_am654_cqe_add_host(struct sdhci_host *host)
host->mmc->caps2 |= MMC_CAP2_CQE;
- ret = cqhci_init(cq_host, host->mmc, 1);
-
- return ret;
+ return cqhci_init(cq_host, host->mmc, 1);
}
static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 163ac9df8cca..9b5c503e3a3f 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -846,7 +846,7 @@ static int wmt_mci_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_sdmmc)) {
dev_err(&pdev->dev, "Error getting clock\n");
ret = PTR_ERR(priv->clk_sdmmc);
- goto fail5;
+ goto fail5_and_a_half;
}
ret = clk_prepare_enable(priv->clk_sdmmc);
@@ -863,6 +863,9 @@ static int wmt_mci_probe(struct platform_device *pdev)
return 0;
fail6:
clk_put(priv->clk_sdmmc);
+fail5_and_a_half:
+ dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
+ priv->dma_desc_buffer, priv->dma_desc_device_addr);
fail5:
free_irq(dma_irq, priv);
fail4:
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 40d7211485da..4cd37ec45762 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -461,7 +461,7 @@ static int block2mtd_setup(const char *val, const struct kernel_param *kp)
the device (even kmalloc() fails). Deter that work to
block2mtd_setup2(). */
- strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
+ strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
return 0;
#endif
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 5b0ae5ddad74..a7714e3de887 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -300,7 +300,7 @@ static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
}
/**
- * doc_set_data_mode - Sets the flash to normal or reliable data mode
+ * doc_set_reliable_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
@@ -442,7 +442,7 @@ static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
}
/**
- * doc_seek - Set both flash planes to the specified block, page for reading
+ * doc_read_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
* @block1: the second plane block index
@@ -871,6 +871,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ struct mtd_ecc_stats old_stats;
int max_bitflips = 0;
if (buf)
@@ -895,6 +896,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = 0;
skip = from % DOC_LAYOUT_PAGE_SIZE;
mutex_lock(&docg3->cascade->lock);
+ old_stats = mtd->ecc_stats;
while (ret >= 0 && (len > 0 || ooblen > 0)) {
calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
@@ -966,6 +968,12 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
}
out:
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
mutex_unlock(&docg3->cascade->lock);
return ret;
err_in_read:
@@ -1951,7 +1959,7 @@ static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * docg3_probe - Probe the IO space for a DiskOnChip G3 chip
* @pdev: platform device
*
* Probes for a G3 chip at the specified IO space in the platform data
@@ -1974,9 +1982,14 @@ static int __init docg3_probe(struct platform_device *pdev)
dev_err(dev, "No I/O memory resource defined\n");
return ret;
}
- base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
ret = -ENOMEM;
+ base = devm_ioremap(dev, ress->start, DOC_IOSPACE_SIZE);
+ if (!base) {
+ dev_err(dev, "devm_ioremap dev failed\n");
+ return ret;
+ }
+
cascade = devm_kcalloc(dev, DOC_MAX_NBFLOORS, sizeof(*cascade),
GFP_KERNEL);
if (!cascade)
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f655d2905270..8c22064ead38 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -941,7 +941,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
- partition_t *part = (void *)dev;
+ partition_t *part = container_of(dev, struct partition_t, mbd);
u_long sect;
/* Sort of arbitrary: round size down to 4KiB boundary */
@@ -969,7 +969,7 @@ static int ftl_writesect(struct mtd_blktrans_dev *dev,
static int ftl_discardsect(struct mtd_blktrans_dev *dev,
unsigned long sector, unsigned nr_sects)
{
- partition_t *part = (void *)dev;
+ partition_t *part = container_of(dev, struct partition_t, mbd);
uint32_t bsize = 1 << part->header.EraseUnitSize;
pr_debug("FTL erase sector %ld for %d sectors\n",
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 6b48397c750c..58ca1c21ebe6 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -136,7 +136,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -156,7 +156,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -176,7 +176,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 85eca6a192e6..c73854da5136 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -300,6 +300,9 @@ static const char *of_select_probe_type(struct platform_device *dev)
const char *probe_type;
match = of_match_device(of_flash_match, &dev->dev);
+ if (!match)
+ return NULL;
+
probe_type = match->data;
if (probe_type)
return probe_type;
diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c
index 946ba80f9758..5fcefcd0baca 100644
--- a/drivers/mtd/maps/pismo.c
+++ b/drivers/mtd/maps/pismo.c
@@ -195,7 +195,7 @@ static void pismo_add_one(struct pismo_data *pismo, int i,
}
}
-static int pismo_remove(struct i2c_client *client)
+static void pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
@@ -204,8 +204,6 @@ static int pismo_remove(struct i2c_client *client)
platform_device_unregister(pismo->dev[i]);
kfree(pismo);
-
- return 0;
}
static int pismo_probe(struct i2c_client *client,
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 05860288a7af..01f1c6792df9 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -688,6 +688,137 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
return ret;
}
+static int mtdchar_read_ioctl(struct mtd_info *mtd,
+ struct mtd_read_req __user *argp)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ struct mtd_read_req req;
+ void __user *usr_data, *usr_oob;
+ uint8_t *datbuf = NULL, *oobbuf = NULL;
+ size_t datbuf_len, oobbuf_len;
+ size_t orig_len, orig_ooblen;
+ int ret = 0;
+
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+
+ orig_len = req.len;
+ orig_ooblen = req.ooblen;
+
+ usr_data = (void __user *)(uintptr_t)req.usr_data;
+ usr_oob = (void __user *)(uintptr_t)req.usr_oob;
+
+ if (!master->_read_oob)
+ return -EOPNOTSUPP;
+
+ if (!usr_data)
+ req.len = 0;
+
+ if (!usr_oob)
+ req.ooblen = 0;
+
+ req.ecc_stats.uncorrectable_errors = 0;
+ req.ecc_stats.corrected_bitflips = 0;
+ req.ecc_stats.max_bitflips = 0;
+
+ req.len &= 0xffffffff;
+ req.ooblen &= 0xffffffff;
+
+ if (req.start + req.len > mtd->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ datbuf_len = min_t(size_t, req.len, mtd->erasesize);
+ if (datbuf_len > 0) {
+ datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
+ if (!datbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
+ if (oobbuf_len > 0) {
+ oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
+ if (!oobbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
+ struct mtd_req_stats stats;
+ struct mtd_oob_ops ops = {
+ .mode = req.mode,
+ .len = min_t(size_t, req.len, datbuf_len),
+ .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
+ .datbuf = datbuf,
+ .oobbuf = oobbuf,
+ .stats = &stats,
+ };
+
+ /*
+ * Shorten non-page-aligned, eraseblock-sized reads so that the
+ * read ends on an eraseblock boundary. This is necessary in
+ * order to prevent OOB data for some pages from being
+ * duplicated in the output of non-page-aligned reads requiring
+ * multiple mtd_read_oob() calls to be completed.
+ */
+ if (ops.len == mtd->erasesize)
+ ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
+
+ ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
+
+ req.ecc_stats.uncorrectable_errors +=
+ stats.uncorrectable_errors;
+ req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
+ req.ecc_stats.max_bitflips =
+ max(req.ecc_stats.max_bitflips, stats.max_bitflips);
+
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ break;
+
+ if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
+ copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ req.start += ops.retlen;
+ req.len -= ops.retlen;
+ usr_data += ops.retlen;
+
+ req.ooblen -= ops.oobretlen;
+ usr_oob += ops.oobretlen;
+ }
+
+ /*
+ * As multiple iterations of the above loop (and therefore multiple
+ * mtd_read_oob() calls) may be necessary to complete the read request,
+ * adjust the final return code to ensure it accounts for all detected
+ * ECC errors.
+ */
+ if (!ret || mtd_is_bitflip(ret)) {
+ if (req.ecc_stats.uncorrectable_errors > 0)
+ ret = -EBADMSG;
+ else if (req.ecc_stats.corrected_bitflips > 0)
+ ret = -EUCLEAN;
+ }
+
+out:
+ req.len = orig_len - req.len;
+ req.ooblen = orig_ooblen - req.ooblen;
+
+ if (copy_to_user(argp, &req, sizeof(req)))
+ ret = -EFAULT;
+
+ kvfree(datbuf);
+ kvfree(oobbuf);
+
+ return ret;
+}
+
static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -710,6 +841,7 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
case MEMGETINFO:
case MEMREADOOB:
case MEMREADOOB64:
+ case MEMREAD:
case MEMISLOCKED:
case MEMGETOOBSEL:
case MEMGETBADBLOCK:
@@ -884,6 +1016,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
break;
}
+ case MEMREAD:
+ {
+ ret = mtdchar_read_ioctl(mtd,
+ (struct mtd_read_req __user *)arg);
+ break;
+ }
+
case MEMLOCK:
{
struct erase_info_user einfo;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index f685a581df48..193428de6a4b 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -836,7 +836,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
/*
* walk the map of the new device once more and fill in
- * in erase region info:
+ * erase region info:
*/
curr_erasesize = subdev[0]->erasesize;
begin = position = 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index a9b8be9f40dc..18aa54460d36 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1218,6 +1218,34 @@ int __get_mtd_device(struct mtd_info *mtd)
EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
+ * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
+ *
+ * @np: device tree node
+ */
+struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
+{
+ struct mtd_info *mtd = NULL;
+ struct mtd_info *tmp;
+ int err;
+
+ mutex_lock(&mtd_table_mutex);
+
+ err = -EPROBE_DEFER;
+ mtd_for_each_device(tmp) {
+ if (mtd_get_of_node(tmp) == np) {
+ mtd = tmp;
+ err = __get_mtd_device(mtd);
+ break;
+ }
+ }
+
+ mutex_unlock(&mtd_table_mutex);
+
+ return err ? ERR_PTR(err) : mtd;
+}
+EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
+
+/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
* device name
* @name: MTD device name to open
@@ -1624,6 +1652,9 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
+ if (ops->stats)
+ memset(ops->stats, 0, sizeof(*ops->stats));
+
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
else
@@ -1641,6 +1672,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
return ret_code;
if (mtd->ecc_strength == 0)
return 0; /* device lacks ecc */
+ if (ops->stats)
+ ops->stats->max_bitflips = ret_code;
return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index e13d42c0acb0..7ac8ac901306 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -401,7 +401,7 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
/*
* kmsg_size must be aligned to 4096 Bytes, which is limited by
* psblk. The default value of kmsg_size is 64KB. If kmsg_size
- * is larger than erasesize, some errors will occur since mtdpsotre
+ * is larger than erasesize, some errors will occur since mtdpstore
* is designed on it.
*/
if (mtd->erasesize < info->kmsg_size) {
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index dc7f1532a37f..680366616da2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -323,7 +323,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
struct mtdswap_oobdata *data, *data2;
int ret;
loff_t offset;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
offset = mtdswap_eb_offset(d, eb);
@@ -370,7 +370,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
struct mtdswap_oobdata n;
int ret;
loff_t offset;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.ooboffs = 0;
ops.oobbuf = (uint8_t *)&n;
@@ -878,7 +878,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
loff_t base, pos;
unsigned int *p1 = (unsigned int *)d->page_buf;
unsigned char *p2 = (unsigned char *)d->oob_buf;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_AUTO_OOB;
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 64af6898131d..db4f93a903e4 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -24,11 +24,8 @@ int nanddev_bbt_init(struct nand_device *nand)
{
unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
unsigned int nblocks = nanddev_neraseblocks(nand);
- unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
- BITS_PER_LONG);
- nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
- GFP_KERNEL);
+ nand->bbt.cache = bitmap_zalloc(nblocks * bits_per_block, GFP_KERNEL);
if (!nand->bbt.cache)
return -ENOMEM;
@@ -44,7 +41,7 @@ EXPORT_SYMBOL_GPL(nanddev_bbt_init);
*/
void nanddev_bbt_cleanup(struct nand_device *nand)
{
- kfree(nand->bbt.cache);
+ bitmap_free(nand->bbt.cache);
}
EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index 958bac54b190..f66385faf631 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -1440,6 +1440,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats old_stats;
int ret;
switch (ops->mode) {
@@ -1453,12 +1454,23 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
}
onenand_get_device(mtd, FL_READING);
+
+ old_stats = mtd->ecc_stats;
+
if (ops->datbuf)
ret = ONENAND_IS_4KB_PAGE(this) ?
onenand_mlc_read_ops_nolock(mtd, from, ops) :
onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
+
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
onenand_release_device(mtd);
return ret;
@@ -2935,7 +2947,7 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len,
struct onenand_chip *this = mtd->priv;
unsigned char *pbuf = buf;
int ret;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
/* Force buffer page aligned */
if (len < mtd->writesize) {
@@ -2977,7 +2989,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
if (FLEXONENAND(this)) {
diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c
index b17315f8e1d4..d7fe35bc45cb 100644
--- a/drivers/mtd/nand/onenand/onenand_bbt.c
+++ b/drivers/mtd/nand/onenand/onenand_bbt.c
@@ -61,7 +61,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
int startblock;
loff_t from;
size_t readlen;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 8b6d7a515445..4cd40af362de 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -200,27 +200,7 @@ config MTD_NAND_TMIO
Support for NAND flash connected to a Toshiba Mobile IO
Controller in some PDAs, including the Sharp SL6000x.
-config MTD_NAND_BRCMNAND
- tristate "Broadcom STB NAND controller"
- depends on ARM || ARM64 || MIPS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Enables the Broadcom NAND controller driver. The controller was
- originally designed for Set-Top Box but is used on various BCM7xxx,
- BCM3xxx, BCM63xxx, iProc/Cygnus and more.
-
-if MTD_NAND_BRCMNAND
-
-config MTD_NAND_BRCMNAND_BCMA
- tristate "Broadcom BCMA NAND controller"
- depends on BCMA_NFLASH
- depends on BCMA
- help
- Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
- The glue driver will take care of performing the low-level I/O
- operations to interface the BRCMNAND controller over the BCMA bus.
-
-endif # MTD_NAND_BRCMNAND
+source "drivers/mtd/nand/raw/brcmnand/Kconfig"
config MTD_NAND_BCM47XXNFLASH
tristate "BCM4706 BCMA NAND controller"
@@ -410,7 +390,7 @@ config MTD_NAND_STM32_FMC2
config MTD_NAND_MESON
tristate "Support for NAND controller on Amlogic's Meson SoCs"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
select MFD_SYSCON
help
Enables support for NAND controller on Amlogic's Meson SoCs.
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 296fb16c8dc3..ec7e6eeac55f 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -915,7 +915,7 @@ static int anfc_check_op(struct nand_chip *chip,
if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
return -ENOTSUPP;
- if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+ if (anfc_pkt_len_config(instr->ctx.data.len, NULL, NULL))
return -ENOTSUPP;
break;
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index c9ac3baf68c0..41c6bd6e2d72 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -405,6 +405,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
dma_async_issue_pending(nc->dmac);
wait_for_completion(&finished);
+ dma_unmap_single(nc->dev, buf_dma, len, dir);
return 0;
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 8bb17c5a66c3..6487dfc64258 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -14,7 +14,7 @@
#include <linux/bcma/bcma.h>
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown ~1000 retries as maxiumum. */
+ * shown ~1000 retries as maximum. */
#define NFLASH_READY_RETRIES 10000
#define NFLASH_SECTOR_SIZE 512
diff --git a/drivers/mtd/nand/raw/brcmnand/Kconfig b/drivers/mtd/nand/raw/brcmnand/Kconfig
new file mode 100644
index 000000000000..4bc51bf60aca
--- /dev/null
+++ b/drivers/mtd/nand/raw/brcmnand/Kconfig
@@ -0,0 +1,49 @@
+config MTD_NAND_BRCMNAND
+ tristate "Broadcom STB NAND controller"
+ depends on ARM || ARM64 || MIPS || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Enables the Broadcom NAND controller driver. The controller was
+ originally designed for Set-Top Box but is used on various BCM7xxx,
+ BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
+if MTD_NAND_BRCMNAND
+
+config MTD_NAND_BRCMNAND_BCM63XX
+ tristate "Broadcom BCM63xx NAND controller glue"
+ default BCM63XX
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCM63xx MIPS-based DSL platforms.
+
+config MTD_NAND_BRCMNAND_BCMA
+ tristate "Broadcom BCMA NAND controller"
+ depends on BCMA_NFLASH
+ depends on BCMA
+ help
+ Enables the BRCMNAND controller over BCMA on BCM47186/BCM5358 SoCs.
+ The glue driver will take care of performing the low-level I/O
+ operations to interface the BRCMNAND controller over the BCMA bus.
+
+config MTD_NAND_BRCMNAND_BCMBCA
+ tristate "Broadcom BCMBCA NAND controller glue"
+ default ARCH_BCMBCA
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom BCA platforms.
+
+config MTD_NAND_BRCMNAND_BRCMSTB
+ tristate "Broadcom STB Nand controller glue"
+ default ARCH_BRCMSTB
+ help
+ Enables the BRCMNAND glue driver to register the NAND controller
+ on Broadcom STB platforms.
+
+config MTD_NAND_BRCMNAND_IPROC
+ tristate "Broadcom iProc NAND controller glue"
+ default ARCH_BCM_IPROC
+ help
+ Enables the BRCMNAND controller glue driver to register the NAND
+ controller on Broadcom iProc platforms.
+
+endif # MTD_NAND_BRCMNAND
diff --git a/drivers/mtd/nand/raw/brcmnand/Makefile b/drivers/mtd/nand/raw/brcmnand/Makefile
index 16dc7254200e..9907e3ec4bb2 100644
--- a/drivers/mtd/nand/raw/brcmnand/Makefile
+++ b/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm63138_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += bcm6368_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_IPROC) += iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMBCA) += bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BCM63XX) += bcm6368_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND_BRCMSTB) += brcmstb_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND_BCMA) += bcma_nand.o
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 0d72672f8b64..9dac3ca69d57 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -1979,7 +1979,6 @@ static int cadence_nand_force_byte_access(struct nand_chip *chip,
bool force_8bit)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
- int status;
/*
* Callers of this function do not verify if the NAND is using a 16-bit
@@ -1990,9 +1989,7 @@ static int cadence_nand_force_byte_access(struct nand_chip *chip,
if (!(chip->options & NAND_BUSWIDTH_16))
return 0;
- status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
-
- return status;
+ return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
}
static int cadence_nand_cmd_opcode(struct nand_chip *chip,
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index af119e376352..66385c4fb994 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -358,7 +358,7 @@ static int cafe_nand_read_oob(struct nand_chip *chip, int page)
return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
- * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * cafe_nand_read_page - [REPLACEABLE] hardware ecc syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
* @oob_required: caller expects OOB data read to chip->oob_poi
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index aab93b9e6052..a18d121396aa 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -726,36 +726,40 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip)
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
unsigned int al;
- switch (chip->ecc.engine_type) {
/*
* if ECC was not chosen in DT, decide whether to use HW or SW ECC from
* CS Base Register
*/
- case NAND_ECC_ENGINE_TYPE_NONE:
+ if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID) {
/* If CS Base Register selects full hardware ECC then use it */
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
- chip->ecc.read_page = fsl_elbc_read_page;
- chip->ecc.write_page = fsl_elbc_write_page;
- chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
- mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
}
+ }
+
+ switch (chip->ecc.engine_type) {
+ /* if HW ECC was chosen, setup ecc and oob layout */
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+ chip->ecc.write_subpage = fsl_elbc_write_subpage;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.strength = 1;
break;
- /* if SW ECC was chosen in DT, we do not need to set anything here */
+ /* if none or SW ECC was chosen, we do not need to set anything here */
+ case NAND_ECC_ENGINE_TYPE_NONE:
case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
break;
- /* should we also implement *_ECC_ENGINE_CONTROLLER to do as above? */
default:
return -EINVAL;
}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 93da23682d86..01ccbde748f3 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1361,7 +1361,7 @@ error_alloc:
/*
* Handles block mark swapping.
* It can be called in swapping the block mark, or swapping it back,
- * because the the operations are the same.
+ * because the operations are the same.
*/
static void block_mark_swapping(struct gpmi_nand_data *this,
void *payload, void *auxiliary)
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index e91b879b32bd..d4a0987e93ac 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -16,6 +16,7 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -99,15 +100,12 @@
#define HSNAND_ECC_OFFSET 0x008
-#define NAND_DATA_IFACE_CHECK_ONLY -1
-
#define MAX_CS 2
#define USEC_PER_SEC 1000000L
struct ebu_nand_cs {
void __iomem *chipaddr;
- dma_addr_t nand_pa;
u32 addr_sel;
};
@@ -120,7 +118,6 @@ struct ebu_nand_controller {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
struct completion dma_access_complete;
- unsigned long clk_rate;
struct clk *clk;
u32 nd_para0;
u8 cs_num;
@@ -580,6 +577,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ebu_nand_controller *ebu_host;
+ struct device_node *chip_np;
struct nand_chip *nand;
struct mtd_info *mtd;
struct resource *res;
@@ -594,17 +592,20 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->dev = dev;
nand_controller_init(&ebu_host->controller);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand");
- ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res);
+ ebu_host->ebu = devm_platform_ioremap_resource_byname(pdev, "ebunand");
if (IS_ERR(ebu_host->ebu))
return PTR_ERR(ebu_host->ebu);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand");
- ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res);
+ ebu_host->hsnand = devm_platform_ioremap_resource_byname(pdev, "hsnand");
if (IS_ERR(ebu_host->hsnand))
return PTR_ERR(ebu_host->hsnand);
- ret = device_property_read_u32(dev, "reg", &cs);
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not find child node for the NAND chip\n");
+
+ ret = of_property_read_u32(chip_np, "reg", &cs);
if (ret) {
dev_err(dev, "failed to get chip select: %d\n", ret);
return ret;
@@ -617,11 +618,10 @@ static int ebu_nand_probe(struct platform_device *pdev)
ebu_host->cs_num = cs;
resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
- ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res);
+ ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
+ resname);
if (IS_ERR(ebu_host->cs[cs].chipaddr))
return PTR_ERR(ebu_host->cs[cs].chipaddr);
- ebu_host->cs[cs].nand_pa = res->start;
ebu_host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ebu_host->clk))
@@ -633,7 +633,6 @@ static int ebu_nand_probe(struct platform_device *pdev)
dev_err(dev, "failed to enable clock: %d\n", ret);
return ret;
}
- ebu_host->clk_rate = clk_get_rate(ebu_host->clk);
ebu_host->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ebu_host->dma_tx)) {
@@ -660,7 +659,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN,
ebu_host->ebu + EBU_ADDR_SEL(cs));
- nand_set_flash_node(&ebu_host->chip, dev->of_node);
+ nand_set_flash_node(&ebu_host->chip, chip_np);
mtd = nand_to_mtd(&ebu_host->chip);
if (!mtd->name) {
@@ -716,7 +715,6 @@ static int ebu_nand_remove(struct platform_device *pdev)
}
static const struct of_device_id ebu_nand_match[] = {
- { .compatible = "intel,nand-controller" },
{ .compatible = "intel,lgm-ebunand" },
{}
};
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2455a581fd70..d9f2f1d0b5ef 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -865,13 +865,19 @@ static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
marvell_nfc_enable_dma(nfc);
/* Prepare the DMA transfer */
sg_init_one(&sg, nfc->dma_buf, dma_len);
- dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ ret = dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ if (!ret) {
+ dev_err(nfc->dev, "Could not map DMA S/G list\n");
+ return -ENXIO;
+ }
+
tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
direction == DMA_FROM_DEVICE ?
DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
return -ENXIO;
}
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 829b76b303aa..5ee01231ac4c 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/mtd.h>
#include <linux/mfd/syscon.h>
@@ -56,6 +57,9 @@
#define NFC_RB_IRQ_EN BIT(21)
+#define CLK_DIV_SHIFT 0
+#define CLK_DIV_WIDTH 6
+
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
@@ -151,15 +155,15 @@ struct meson_nfc {
struct nand_controller controller;
struct clk *core_clk;
struct clk *device_clk;
- struct clk *phase_tx;
- struct clk *phase_rx;
+ struct clk *nand_clk;
+ struct clk_divider nand_divider;
unsigned long clk_rate;
u32 bus_timing;
struct device *dev;
void __iomem *reg_base;
- struct regmap *reg_clk;
+ void __iomem *reg_clk;
struct completion completion;
struct list_head chips;
const struct meson_nfc_data *data;
@@ -235,7 +239,7 @@ static void meson_nfc_select_chip(struct nand_chip *nand, int chip)
nfc->timing.tbers_max = meson_chip->tbers_max;
if (nfc->clk_rate != meson_chip->clk_rate) {
- ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate);
+ ret = clk_set_rate(nfc->nand_clk, meson_chip->clk_rate);
if (ret) {
dev_err(nfc->dev, "failed to set clock rate\n");
return;
@@ -454,7 +458,7 @@ static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips,
if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) {
mtd->ecc_stats.corrected += ECC_ERR_CNT(*info);
*bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info));
- *correct_bitmap |= 1 >> i;
+ *correct_bitmap |= BIT_ULL(i);
continue;
}
if ((nand->options & NAND_NEED_SCRAMBLING) &&
@@ -800,7 +804,7 @@ static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf,
u8 *data = buf + i * ecc->size;
u8 *oob = nand->oob_poi + i * (ecc->bytes + 2);
- if (correct_bitmap & (1 << i))
+ if (correct_bitmap & BIT_ULL(i))
continue;
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 2,
@@ -987,6 +991,8 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
static int meson_nfc_clk_init(struct meson_nfc *nfc)
{
+ struct clk_parent_data nfc_divider_parent_data[1];
+ struct clk_init_data init = {0};
int ret;
/* request core clock */
@@ -1002,21 +1008,28 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
return PTR_ERR(nfc->device_clk);
}
- nfc->phase_tx = devm_clk_get(nfc->dev, "tx");
- if (IS_ERR(nfc->phase_tx)) {
- dev_err(nfc->dev, "failed to get TX clk\n");
- return PTR_ERR(nfc->phase_tx);
- }
-
- nfc->phase_rx = devm_clk_get(nfc->dev, "rx");
- if (IS_ERR(nfc->phase_rx)) {
- dev_err(nfc->dev, "failed to get RX clk\n");
- return PTR_ERR(nfc->phase_rx);
- }
+ init.name = devm_kasprintf(nfc->dev,
+ GFP_KERNEL, "%s#div",
+ dev_name(nfc->dev));
+ init.ops = &clk_divider_ops;
+ nfc_divider_parent_data[0].fw_name = "device";
+ init.parent_data = nfc_divider_parent_data;
+ init.num_parents = 1;
+ nfc->nand_divider.reg = nfc->reg_clk;
+ nfc->nand_divider.shift = CLK_DIV_SHIFT;
+ nfc->nand_divider.width = CLK_DIV_WIDTH;
+ nfc->nand_divider.hw.init = &init;
+ nfc->nand_divider.flags = CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ALLOW_ZERO;
+
+ nfc->nand_clk = devm_clk_register(nfc->dev, &nfc->nand_divider.hw);
+ if (IS_ERR(nfc->nand_clk))
+ return PTR_ERR(nfc->nand_clk);
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
- regmap_update_bits(nfc->reg_clk,
- 0, CLK_SELECT_NAND, CLK_SELECT_NAND);
+ writel(CLK_SELECT_NAND | readl(nfc->reg_clk),
+ nfc->reg_clk);
ret = clk_prepare_enable(nfc->core_clk);
if (ret) {
@@ -1030,29 +1043,21 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
goto err_device_clk;
}
- ret = clk_prepare_enable(nfc->phase_tx);
+ ret = clk_prepare_enable(nfc->nand_clk);
if (ret) {
- dev_err(nfc->dev, "failed to enable TX clock\n");
- goto err_phase_tx;
+ dev_err(nfc->dev, "pre enable NFC divider fail\n");
+ goto err_nand_clk;
}
- ret = clk_prepare_enable(nfc->phase_rx);
- if (ret) {
- dev_err(nfc->dev, "failed to enable RX clock\n");
- goto err_phase_rx;
- }
-
- ret = clk_set_rate(nfc->device_clk, 24000000);
+ ret = clk_set_rate(nfc->nand_clk, 24000000);
if (ret)
- goto err_disable_rx;
+ goto err_disable_clk;
return 0;
-err_disable_rx:
- clk_disable_unprepare(nfc->phase_rx);
-err_phase_rx:
- clk_disable_unprepare(nfc->phase_tx);
-err_phase_tx:
+err_disable_clk:
+ clk_disable_unprepare(nfc->nand_clk);
+err_nand_clk:
clk_disable_unprepare(nfc->device_clk);
err_device_clk:
clk_disable_unprepare(nfc->core_clk);
@@ -1061,8 +1066,7 @@ err_device_clk:
static void meson_nfc_disable_clk(struct meson_nfc *nfc)
{
- clk_disable_unprepare(nfc->phase_rx);
- clk_disable_unprepare(nfc->phase_tx);
+ clk_disable_unprepare(nfc->nand_clk);
clk_disable_unprepare(nfc->device_clk);
clk_disable_unprepare(nfc->core_clk);
}
@@ -1368,7 +1372,6 @@ static int meson_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_nfc *nfc;
- struct resource *res;
int ret, irq;
nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
@@ -1385,18 +1388,13 @@ static int meson_nfc_probe(struct platform_device *pdev)
nfc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->reg_base = devm_ioremap_resource(dev, res);
+ nfc->reg_base = devm_platform_ioremap_resource_byname(pdev, "nfc");
if (IS_ERR(nfc->reg_base))
return PTR_ERR(nfc->reg_base);
- nfc->reg_clk =
- syscon_regmap_lookup_by_phandle(dev->of_node,
- "amlogic,mmc-syscon");
- if (IS_ERR(nfc->reg_clk)) {
- dev_err(dev, "Failed to lookup clock base\n");
+ nfc->reg_clk = devm_platform_ioremap_resource_byname(pdev, "emmc");
+ if (IS_ERR(nfc->reg_clk))
return PTR_ERR(nfc->reg_clk);
- }
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 6b67b7dfe7ce..33f2c98a030e 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -335,8 +335,6 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
* @chip: NAND chip structure
*
* Lock the device and its controller for exclusive access
- *
- * Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
static void nand_get_device(struct nand_chip *chip)
{
@@ -3818,6 +3816,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_ecc_stats old_stats;
int ret;
ops->retlen = 0;
@@ -3829,11 +3828,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
nand_get_device(chip);
+ old_stats = mtd->ecc_stats;
+
if (!ops->datbuf)
ret = nand_do_read_oob(chip, from, ops);
else
ret = nand_do_read_ops(chip, from, ops);
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
nand_release_device(chip);
return ret;
}
@@ -5331,11 +5339,10 @@ static int of_get_nand_secure_regions(struct nand_chip *chip)
int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
unsigned int *ncs_array)
{
- struct device_node *np = dev->of_node;
struct gpio_desc **descs;
int ndescs, i;
- ndescs = of_gpio_named_count(np, "cs-gpios");
+ ndescs = gpiod_count(dev, "cs");
if (ndescs < 0) {
dev_dbg(dev, "No valid cs-gpios property\n");
return 0;
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index a3723da2e0a0..e4664fa6fd9e 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -313,7 +313,7 @@ static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len)
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res, ret = 0;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -354,7 +354,7 @@ static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooboffs = 0;
@@ -416,7 +416,7 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
{
struct mtd_info *mtd = nand_to_mtd(this);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret, page_offset;
ops.ooblen = mtd->oobsize;
@@ -756,7 +756,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
uint8_t rcode = td->reserved_block_code;
size_t retlen, len = 0;
loff_t to;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.ooblen = mtd->oobsize;
ops.ooboffs = 0;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 2c87c7d89205..1bfecf502216 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -170,18 +170,11 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- info->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- if (ret == -ENOENT) {
- info->clk = NULL;
- } else {
- dev_err(&pdev->dev, "failed to get clock!\n");
- return ret;
- }
- }
+ /* Not all platforms can gate the clock, so it is optional. */
+ info->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
+ "failed to get clock!\n");
ret = clk_prepare_enable(info->clk);
if (ret) {
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index b2b42dd1a2de..24f52a30fb13 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -99,7 +99,7 @@ static const struct mtd_ooblayout_ops oob_sm_small_ops = {
static int sm_block_markbad(struct nand_chip *chip, loff_t ofs)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct sm_oob oob;
int ret;
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 87c1c7dd97eb..5d627048c420 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -862,8 +862,8 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
eccsteps, dma_data_dir);
- if (ret < 0)
- return ret;
+ if (!ret)
+ return -EIO;
desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
eccsteps, dma_transfer_dir,
@@ -893,8 +893,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
- if (ret < 0)
+ if (!ret) {
+ ret = -EIO;
goto err_unmap_data;
+ }
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
nfc->dma_ecc_sg.sgl,
@@ -1799,9 +1801,8 @@ static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
nand->cs_used[i] = cs;
}
- nand->wp_gpio = devm_gpiod_get_from_of_node(nfc->dev, dn,
- "wp-gpios", 0,
- GPIOD_OUT_HIGH, "wp");
+ nand->wp_gpio = devm_fwnode_gpiod_get(nfc->dev, of_fwnode_handle(dn),
+ "wp", GPIOD_OUT_HIGH, "wp");
if (IS_ERR(nand->wp_gpio)) {
ret = PTR_ERR(nand->wp_gpio);
if (ret != -ENOENT)
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 9d73910a7ae8..dacd9c0e8b20 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -635,6 +635,7 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
unsigned int max_bitflips = 0;
struct nand_io_iter iter;
bool disable_ecc = false;
@@ -646,6 +647,8 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_lock(&spinand->lock);
+ old_stats = mtd->ecc_stats;
+
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -668,6 +671,13 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
ops->oobretlen += iter.req.ooblen;
}
+ if (ops->stats) {
+ ops->stats->uncorrectable_errors +=
+ mtd->ecc_stats.failed - old_stats.failed;
+ ops->stats->corrected_bitflips +=
+ mtd->ecc_stats.corrected - old_stats.corrected;
+ }
+
mutex_unlock(&spinand->lock);
if (ecc_failed && !ret)
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 913db0dd6a8d..64d319e959b2 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -124,7 +124,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -145,7 +145,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -168,7 +168,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
size_t *retlen, uint8_t *buf, uint8_t *oob)
{
loff_t mask = mtd->writesize - 1;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int res;
ops.mode = MTD_OPS_PLACE_OOB;
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index b43df73927a0..aaa06050c9bc 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -20,6 +20,16 @@ config MTD_BCM63XX_PARTS
This provides partition parsing for BCM63xx devices with CFE
bootloaders.
+config MTD_BRCM_U_BOOT
+ tristate "Broadcom's U-Boot partition parser"
+ depends on ARCH_BCM4908 || COMPILE_TEST
+ help
+ Broadcom uses a custom way of storing U-Boot environment variables.
+ They are placed inside U-Boot partition itself at unspecified offset.
+ It's possible to locate them by looking for a custom header with a
+ magic value. This driver does that and creates subpartitions for
+ each found environment variables block.
+
config MTD_CMDLINE_PARTS
tristate "Command line partition table parsing"
depends on MTD
@@ -69,8 +79,8 @@ config MTD_OF_PARTS
config MTD_OF_PARTS_BCM4908
bool "BCM4908 partitioning support"
- depends on MTD_OF_PARTS && (ARCH_BCM4908 || COMPILE_TEST)
- default ARCH_BCM4908
+ depends on MTD_OF_PARTS && (ARCH_BCMBCA || COMPILE_TEST)
+ default ARCH_BCMBCA
help
This provides partitions parser for BCM4908 family devices
that can have multiple "firmware" partitions. It takes care of
@@ -78,7 +88,7 @@ config MTD_OF_PARTS_BCM4908
config MTD_OF_PARTS_LINKSYS_NS
bool "Linksys Northstar partitioning support"
- depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCM4908 || COMPILE_TEST)
+ depends on MTD_OF_PARTS && (ARCH_BCM_5301X || ARCH_BCMBCA || COMPILE_TEST)
default ARCH_BCM_5301X
help
This provides partitions parser for Linksys devices based on Broadcom
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 2fcf0ab9e7da..23fa4de4016f 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o
obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
+obj-$(CONFIG_MTD_BRCM_U_BOOT) += brcm_u-boot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
ofpart-y += ofpart_core.o
diff --git a/drivers/mtd/parsers/brcm_u-boot.c b/drivers/mtd/parsers/brcm_u-boot.c
new file mode 100644
index 000000000000..7c338dc7b8f3
--- /dev/null
+++ b/drivers/mtd/parsers/brcm_u-boot.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define BRCM_U_BOOT_MAX_OFFSET 0x200000
+#define BRCM_U_BOOT_STEP 0x1000
+
+#define BRCM_U_BOOT_MAX_PARTS 2
+
+#define BRCM_U_BOOT_MAGIC 0x75456e76 /* uEnv */
+
+struct brcm_u_boot_header {
+ __le32 magic;
+ __le32 length;
+} __packed;
+
+static const char *names[BRCM_U_BOOT_MAX_PARTS] = {
+ "u-boot-env",
+ "u-boot-env-backup",
+};
+
+static int brcm_u_boot_parse(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct brcm_u_boot_header header;
+ struct mtd_partition *parts;
+ size_t bytes_read;
+ size_t offset;
+ int err;
+ int i = 0;
+
+ parts = kcalloc(BRCM_U_BOOT_MAX_PARTS, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ for (offset = 0;
+ offset < min_t(size_t, mtd->size, BRCM_U_BOOT_MAX_OFFSET);
+ offset += BRCM_U_BOOT_STEP) {
+ err = mtd_read(mtd, offset, sizeof(header), &bytes_read, (uint8_t *)&header);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("Failed to read from %s at 0x%zx: %d\n", mtd->name, offset, err);
+ continue;
+ }
+
+ if (le32_to_cpu(header.magic) != BRCM_U_BOOT_MAGIC)
+ continue;
+
+ parts[i].name = names[i];
+ parts[i].offset = offset;
+ parts[i].size = sizeof(header) + le32_to_cpu(header.length);
+ i++;
+ pr_info("offset:0x%zx magic:0x%08x BINGO\n", offset, header.magic);
+
+ if (i == BRCM_U_BOOT_MAX_PARTS)
+ break;
+ }
+
+ *pparts = parts;
+
+ return i;
+};
+
+static const struct of_device_id brcm_u_boot_of_match_table[] = {
+ { .compatible = "brcm,u-boot" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcm_u_boot_of_match_table);
+
+static struct mtd_part_parser brcm_u_boot_mtd_parser = {
+ .parse_fn = brcm_u_boot_parse,
+ .name = "brcm_u-boot",
+ .of_match_table = brcm_u_boot_of_match_table,
+};
+module_mtd_part_parser(brcm_u_boot_mtd_parser);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index 0ddff1a4b51f..b34856def816 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -193,7 +193,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].mask_flags = mask_flags;
parts[this_part].add_flags = add_flags;
if (name)
- strlcpy(extra_mem, name, name_len + 1);
+ strscpy(extra_mem, name, name_len + 1);
else
sprintf(extra_mem, "Partition_%03d", this_part);
parts[this_part].name = extra_mem;
@@ -298,7 +298,7 @@ static int mtdpart_setup_real(char *s)
this_mtd->parts = parts;
this_mtd->num_parts = num_parts;
this_mtd->mtd_id = (char*)(this_mtd + 1);
- strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
+ strscpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
/* link into chain */
this_mtd->next = partitions;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 7f955fade838..4cfec3b7b446 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -239,7 +239,7 @@ static int sm_read_sector(struct sm_ftl *ftl,
uint8_t *buffer, struct sm_oob *oob)
{
struct mtd_info *mtd = ftl->trans->mtd;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct sm_oob tmp_oob;
int ret = -EIO;
int try = 0;
@@ -323,7 +323,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
struct mtd_info *mtd = ftl->trans->mtd;
int ret;
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 1d05c121904c..04da685c36be 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -163,7 +163,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
/* Read redundancy area (wrapper to MTD_READ_OOB */
static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int ret;
ops.mode = MTD_OPS_RAW;
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 08084c018a59..98d7508f95b1 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -99,7 +99,7 @@ static int write_page(int log)
static int rewrite_page(int log)
{
int err = 0;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
if (log)
pr_info("rewrite page\n");
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 532997e10e29..13fed398937e 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -56,7 +56,7 @@ static void do_vary_offset(void)
static int write_eraseblock(int ebnum)
{
int i;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
@@ -165,7 +165,7 @@ static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
static int verify_eraseblock(int ebnum)
{
int i;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t bitflips;
@@ -260,7 +260,7 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock_in_one_go(int ebnum)
{
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->oobavail * pgcnt;
@@ -338,7 +338,7 @@ static int __init mtd_oobtest_init(void)
int err = 0;
unsigned int i;
uint64_t tmp;
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
loff_t addr = 0, addr0;
printk(KERN_INFO "\n");
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
index e70d588083a3..99670ef91f2b 100644
--- a/drivers/mtd/tests/readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -47,7 +47,7 @@ static int read_eraseblock_by_page(int ebnum)
err = ret;
}
if (mtd->oobsize) {
- struct mtd_oob_ops ops;
+ struct mtd_oob_ops ops = { };
ops.mode = MTD_OPS_PLACE_OOB;
ops.len = 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2aaf02bfe6f7..9e63b8c43f3e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -86,8 +86,6 @@ config WIREGUARD
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
@@ -501,6 +499,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/pse-pd/Kconfig"
+
source "drivers/net/can/Kconfig"
source "drivers/net/mctp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3f1192d3c52d..6ce076462dbf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index f475eef14390..83214e2e70ab 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -68,7 +68,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 9a247eb7679c..2d20be6ffb7e 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -2894,8 +2894,7 @@ static void amt_event_work(struct work_struct *work)
amt_event_send_request(amt);
break;
default:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
break;
}
}
@@ -3033,8 +3032,7 @@ static int amt_dev_stop(struct net_device *dev)
cancel_work_sync(&amt->event_wq);
for (i = 0; i < AMT_MAX_EVENTS; i++) {
skb = amt->events[i].skb;
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
amt->events[i].event = AMT_EVENT_NONE;
amt->events[i].skb = NULL;
}
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 184608bd8999..e58a1e0cadd2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -88,8 +88,9 @@ static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2f4da2c13c0a..24bb50dfd362 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -865,12 +865,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -890,7 +886,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -901,10 +898,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -2166,16 +2165,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2447,7 +2444,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -3167,6 +3165,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
found:
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
dst_release(dst);
kfree(tags);
}
@@ -3198,12 +3199,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
return ret;
}
-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
struct in6_addr *saddr, struct in6_addr *daddr)
{
int i;
- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
__func__, saddr, daddr);
return;
@@ -3246,14 +3254,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
* see bond_arp_rcv().
*/
if (bond_is_active_slave(slave))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
out:
return RX_HANDLER_ANOTHER;
@@ -4174,6 +4182,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -4211,6 +4225,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -4222,6 +4239,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -4229,6 +4247,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -5619,7 +5650,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
@@ -6218,15 +6249,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 9b5a5df23d21..8996bd0a194a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -47,10 +47,10 @@ static ssize_t bonding_show_bonds(struct class *cls,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -178,10 +178,10 @@ static ssize_t bonding_show_slaves(struct device *d,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
@@ -203,7 +203,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +217,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +233,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +248,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +265,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +277,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +292,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -310,7 +310,7 @@ static ssize_t bonding_show_missed_max(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.missed_max);
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
}
static DEVICE_ATTR(arp_missed_max, 0644,
bonding_show_missed_max, bonding_sysfs_store_option);
@@ -322,7 +322,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -333,7 +333,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -345,8 +345,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -361,7 +361,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -375,7 +375,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -386,7 +386,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -400,7 +400,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -412,7 +412,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -426,7 +426,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -443,7 +443,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -462,8 +462,8 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
@@ -475,7 +475,7 @@ static ssize_t bonding_show_carrier(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -493,7 +493,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -509,7 +509,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -524,9 +524,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -545,9 +545,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -566,9 +566,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -587,9 +587,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -609,7 +609,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -634,11 +634,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -658,7 +658,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -670,7 +670,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -682,7 +682,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -693,7 +693,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -705,7 +705,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -717,7 +717,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -731,7 +731,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -746,7 +746,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 69b0a3751dff..313866f2c0e4 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -22,30 +22,30 @@ static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
@@ -64,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -79,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -94,11 +94,11 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index f23a03300a81..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index dc8132862f33..d6605dbb7737 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_packets += pkts;
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 3c18d028bd8c..b8da15ea6ad9 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -657,7 +657,6 @@ static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *c
cf->can_id = (idw >> 18) & CAN_SFF_MASK;
/* BRS, ESI, RTR Flags */
- cf->flags = 0;
if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
cf->flags |= CANFD_BRS;
@@ -1425,7 +1424,7 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->can.clock.freq = can_clk_rate;
- netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
ret = register_candev(ndev);
if (ret) {
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 89d54c2151e1..f83684f006ea 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -58,7 +58,6 @@ static int ctucan_platform_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr)) {
- dev_err(dev, "Cannot remap address.\n");
ret = PTR_ERR(addr);
goto err;
}
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index a32a01c172d4..81ebf0562c89 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -247,7 +247,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
@@ -329,7 +329,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 07e0feac8629..791a51e2f5d6 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -91,8 +91,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -108,16 +108,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -147,7 +143,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -191,6 +187,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -204,16 +214,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -235,23 +237,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -291,6 +321,14 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
}
return true;
@@ -299,18 +337,25 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct can_priv *priv = netdev_priv(dev);
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
goto inval_skb;
- } else {
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
goto inval_skb;
}
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index f857968efed7..5ee38e586fd8 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -295,45 +295,45 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -341,23 +341,23 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
@@ -365,8 +365,8 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -941,11 +941,6 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
@@ -974,6 +969,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
@@ -2085,20 +2085,20 @@ static int flexcan_probe(struct platform_device *pdev)
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) {
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
return -EINVAL;
}
if ((devtype_data->quirks &
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) {
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
dev_err(&pdev->dev,
"Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
devtype_data->quirks);
@@ -2177,8 +2177,7 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 8621a8ea1dea..025c3417031f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -63,11 +63,11 @@
/* Setup 16 mailboxes */
#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Device supports RX via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
/* Device supports RTR reception via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16)
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -121,7 +121,7 @@ flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
}
static inline bool
@@ -129,10 +129,10 @@ flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR);
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
}
static inline bool
@@ -140,7 +140,7 @@ flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
}
static inline bool
@@ -149,7 +149,7 @@ flexcan_active_rx_rtr(const struct flexcan_priv *priv)
const u32 quirks = priv->devtype_data.quirks;
if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
- if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
return true;
} else {
/* RX-FIFO is always RTR capable */
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index ad7a89b95da7..8d42b7e6661f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -973,7 +973,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index ed54c0b3c7d4..4e9680c8eb34 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -329,12 +329,9 @@ MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
- int ret;
-
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
- return ret;
+ return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 4709c012b1dc..dcb582563d5e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1467,8 +1467,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
}
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 27085b796e75..567620d215f8 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1880,10 +1880,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Global controller context */
gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
@@ -1904,12 +1903,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
@@ -1917,12 +1913,10 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+
gpriv->fcan = RCANFD_CANFDCLK;
} else {
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..ebd5941c3f53 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 98dfd5f295a7..1bb1129b0450 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -661,8 +661,6 @@ static const struct ethtool_ops sja1000_ethtool_ops = {
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
@@ -673,9 +671,7 @@ int register_sja1000dev(struct net_device *dev)
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 81bc741905fd..6779d5357069 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -103,6 +104,11 @@ static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *o
spin_lock_init(&tp->io_lock);
}
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG;
+}
+
static void sp_populate(struct sja1000_priv *priv,
struct sja1000_platform_data *pdata,
unsigned long resource_mem_flags)
@@ -153,11 +159,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -192,8 +200,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -210,6 +223,7 @@ static int sp_probe(struct platform_device *pdev)
struct device_node *of = pdev->dev.of_node;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -234,6 +248,11 @@ static int sp_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
} else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
@@ -262,6 +281,15 @@ static int sp_probe(struct platform_device *pdev)
priv->reg_base = addr;
if (of) {
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
+ goto exit_free;
+ }
+ }
+
sp_populate_of(priv, of);
if (of_data && of_data->init)
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index d769bdf740b7..640fe0a1df63 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -222,7 +222,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index baf749c8cda3..f0065d40eb24 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -10,20 +10,24 @@
*/
#include <linux/bitfield.h>
+#include <linux/clocksource.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
+#include <linux/timecounter.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
@@ -34,8 +38,16 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define GS_USB_ENDPOINT_IN 1
+#define GS_USB_ENDPOINT_OUT 2
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -52,6 +64,8 @@ enum gs_usb_breq {
GS_USB_BREQ_SET_USER_ID,
GS_USB_BREQ_DATA_BITTIMING,
GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
};
enum gs_can_mode {
@@ -75,6 +89,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -111,6 +133,7 @@ struct gs_device_config {
#define GS_CAN_MODE_FD BIT(8)
/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
struct gs_device_mode {
__le32 mode;
@@ -135,6 +158,10 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
@@ -146,7 +173,8 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_FD BIT(8)
#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
-#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_MASK GENMASK(11, 0)
/* internal quirks - keep in GS_CAN_FEATURE space for now */
@@ -199,6 +227,11 @@ struct classic_can {
u8 data[8];
} __packed;
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+
struct classic_can_quirk {
u8 data[8];
u8 quirk;
@@ -208,6 +241,11 @@ struct canfd {
u8 data[64];
} __packed;
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
struct canfd_quirk {
u8 data[64];
u8 quirk;
@@ -224,8 +262,10 @@ struct gs_host_frame {
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
};
} __packed;
@@ -259,6 +299,12 @@ struct gs_can {
struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
u32 feature;
unsigned int hf_size_tx;
@@ -268,8 +314,6 @@ struct gs_can {
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
- void *rxbuf[GS_MAX_RX_URBS];
- dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -328,27 +372,109 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
+{
+ struct gs_device_mode dm = {
+ .mode = GS_CAN_MODE_RESET,
+ };
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_can *dev,
+ u32 *timestamp_p)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &timestamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- dm->mode = GS_CAN_MODE_RESET;
+ *timestamp_p = le32_to_cpu(timestamp);
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel, 0, dm, sizeof(*dm), 1000);
+ return 0;
+}
- kfree(dm);
+static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_can *dev = container_of(cc, struct gs_can, cc);
+ u32 timestamp = 0;
+ int err;
+
+ lockdep_assert_held(&dev->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&dev->tc_lock);
+ err = gs_usb_get_timestamp(dev, &timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ if (err)
+ netdev_err(dev->netdev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
- return rc;
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_can *dev;
+
+ dev = container_of(delayed_work, struct gs_can, timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_read(&dev->tc);
+ spin_unlock_bh(&dev->tc_lock);
+
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ spin_lock_bh(&dev->tc_lock);
+ ns = timecounter_cyc2time(&dev->tc, timestamp);
+ spin_unlock_bh(&dev->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_can *dev)
+{
+ struct cyclecounter *cc = &dev->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&dev->tc_lock);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
+ spin_unlock_bh(&dev->tc_lock);
+
+ INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_can *dev)
+{
+ cancel_delayed_work_sync(&dev->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -376,6 +502,24 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP))
+ return;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
@@ -443,6 +587,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_update_state(dev, cf);
}
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -465,6 +611,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
+ skb = dev->can.echo_skb[hf->echo_id];
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
NULL);
@@ -491,7 +640,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN),
hf, dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, usbcan);
@@ -511,72 +660,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.data_bittiming;
- struct usb_interface *intf = dev->iface;
- struct gs_device_bittiming *dbt;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
u8 request = GS_USB_BREQ_DATA_BITTIMING;
- int rc;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
- /* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent,
- "Couldn't set data bittimings (err=%d)", rc);
-
- return (rc > 0) ? 0 : rc;
+ /* request data bit timings */
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -587,9 +708,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
-
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -618,8 +736,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
- &urb->transfer_dma);
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
goto nomem_hf;
@@ -659,11 +776,11 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -678,8 +795,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -699,8 +814,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ kfree(hf);
nomem_hf:
usb_free_urb(urb);
@@ -715,11 +829,13 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
@@ -744,7 +860,6 @@ static int gs_can_open(struct net_device *netdev)
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
- dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,10 +867,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- dev->parent->hf_size_rx,
- GFP_KERNEL,
- &buf_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -763,17 +876,15 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
}
- urb->transfer_dma = buf_dma;
-
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ GS_USB_ENDPOINT_IN),
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -786,17 +897,10 @@ static int gs_can_open(struct net_device *netdev)
"usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- buf,
- buf_dma);
usb_free_urb(urb);
break;
}
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -804,10 +908,6 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
@@ -823,25 +923,30 @@ static int gs_can_open(struct net_device *netdev)
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
+ /* start polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_init(dev);
+
/* finally start device */
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dm, sizeof(*dm), 1000);
-
- if (rc < 0) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
- kfree(dm);
-
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -854,19 +959,17 @@ static int gs_can_close(struct net_device *netdev)
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- unsigned int i;
netif_stop_queue(netdev);
+ /* stop polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+
/* Stop polling */
parent->active_channels--;
if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
- for (i = 0; i < GS_MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
}
/* Stop sending URBs */
@@ -890,52 +993,57 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_eth_ioctl_hwts(netdev, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = gs_can_eth_ioctl,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
-
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, imode, sizeof(*imode), 100);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -944,9 +1052,67 @@ static int gs_usb_set_phys_id(struct net_device *dev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -956,26 +1122,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
- struct gs_device_bt_const_extended *bt_const_extended;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const, sizeof(*bt_const), 1000);
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
- if (rc < 0) {
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -983,7 +1144,6 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
@@ -996,14 +1156,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -1020,13 +1180,13 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -1049,6 +1209,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
+
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
+
/* The CANtact Pro from LinkLayer Labs is based on the
* LPC54616 µC, which is affected by the NXP LPC USB transfer
* erratum. However, the current firmware (version 2) doesn't
@@ -1063,8 +1238,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
* GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
* issue.
*/
- if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
- dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
dev->udev->manufacturer && dev->udev->product &&
!strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
!strcmp(dev->udev->product, "CANtact Pro") &&
@@ -1072,61 +1247,57 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
-
- kfree(bt_const);
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
/* fetch extended bit timing constants if device has feature
* GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
*/
if (feature & GS_CAN_FEATURE_FD &&
feature & GS_CAN_FEATURE_BT_CONST_EXT) {
- bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
- if (!bt_const_extended)
- return ERR_PTR(-ENOMEM);
-
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST_EXT,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const_extended,
- sizeof(*bt_const_extended),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get extended bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const_extended);
- return ERR_PTR(rc);
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
- dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
- dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
- dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
- dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
- dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
- dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
- dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
- dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
dev->can.data_bittiming_const = &dev->data_bt_const;
-
- kfree(bt_const_extended);
}
SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
return dev;
+
+ out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
@@ -1142,76 +1313,61 @@ static int gs_usb_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct gs_host_frame *hf;
struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ int rc;
/* send host config */
- rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- hconf, sizeof(*hconf), 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- dconf, sizeof(*dconf), 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
- kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ if (!dev)
return -ENOMEM;
- }
init_usb_anchor(&dev->rx_submitted);
- /* default to classic CAN, switch to CAN-FD if at least one of
- * our channels support CAN-FD.
- */
- dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev);
dev->udev = udev;
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
+ unsigned int hf_size_rx = 0;
+
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -1222,18 +1378,28 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
- if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
- dev->hf_size_rx = struct_size(hf, canfd, 1);
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx);
}
- kfree(dconf);
-
return 0;
}
@@ -1258,8 +1424,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dd65c101bfb8..6871d474dabf 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -534,7 +534,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -573,7 +573,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
@@ -694,7 +694,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -735,7 +735,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1394,7 +1394,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1468,7 +1468,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1533,7 +1533,7 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
int sjw = bt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1567,7 +1567,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
int sjw = dbt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1711,7 +1711,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1851,7 +1851,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 8c9d53f6e24c..225697d70a9a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -962,7 +962,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 36b6310a2e5b..285635c23443 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -71,11 +71,10 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
@@ -86,14 +85,14 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
- int loop, len;
+ unsigned int len;
+ int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -137,7 +136,8 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index cffd107d8b28..26a472d2ea58 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -38,10 +38,9 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
@@ -70,7 +69,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
@@ -132,7 +131,8 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index d8ae0e8af2a0..07507b4820d7 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -76,7 +76,7 @@ config NET_DSA_SMSC_LAN9303
select NET_DSA_TAG_LAN9303
select REGMAP
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
@@ -90,11 +90,11 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 48cf344750ff..59cdfc51ce06 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -972,7 +972,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..6ddc03b58b28 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -356,8 +356,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..e968322dfbf0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,8 +316,6 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index da0b889880f6..bcb44034404d 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -667,8 +667,6 @@ static int b53_srab_remove(struct platform_device *pdev)
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index be0edfa093d0..cde253d27bd0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -94,6 +94,24 @@ static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -141,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -167,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -812,17 +815,10 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
if (priv->wol_ports_mask & BIT(port))
return;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
@@ -836,56 +832,56 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
-
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
if (tx_pause)
- reg |= TXFLOW_CNTL;
+ reg |= TX_PAUSE_EN;
if (rx_pause)
- reg |= RXFLOW_CNTL;
+ reg |= RX_PAUSE_EN;
- core_writel(priv, reg, offset);
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
@@ -987,7 +983,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -1011,7 +1007,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1555,8 +1551,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index edbe5e7f1cb6..c4010b7bf089 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1296,7 +1296,7 @@ void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
+ strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 263e41191c29..b9107fe40023 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -351,8 +351,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 01f90994dedd..951f7935c872 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -128,6 +128,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -288,7 +298,7 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
@@ -1537,6 +1547,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1720,7 +1769,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1772,7 +1824,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1809,22 +1864,43 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
struct hellcreek *hellcreek = ds->priv;
- if (type != TC_SETUP_QDISC_TAPRIO)
- return -EOPNOTSUPP;
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
- if (!hellcreek_validate_schedule(hellcreek, taprio))
- return -EOPNOTSUPP;
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
- return hellcreek_port_del_schedule(ds, port);
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1996,7 +2072,6 @@ static int hellcreek_remove(struct platform_device *pdev)
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..4a678f7d61ae 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -37,6 +37,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +73,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e03ff1f267bb..438e46af03e9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -22,6 +22,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -32,6 +36,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -851,15 +856,12 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -875,7 +877,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -1090,7 +1092,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
if (!dsa_port_is_user(dp))
return 0;
- vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
return lan9303_enable_processing_port(chip, port);
}
@@ -1103,7 +1105,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
@@ -1349,6 +1351,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1359,6 +1362,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..7d746cd9ca1b 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -65,18 +65,14 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return 0;
}
-static int lan9303_i2c_remove(struct i2c_client *client)
+static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return 0;
+ return;
lan9303_remove(&sw_dev->chip);
-
- i2c_set_clientdata(client, NULL);
-
- return 0;
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..4f33369a2de5 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index e531b93f3cb2..05ecaa007ab1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1989,11 +1989,9 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
@@ -2231,8 +2229,6 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 42c50cc4d853..8582b4b67d98 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -17,8 +17,8 @@ u32 ksz8_get_port_addr(int port, int offset);
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c79a5128235f..bd3b133e7085 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -552,7 +552,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 restart, speed, ctrl, link;
int processed = true;
@@ -560,14 +560,24 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
@@ -597,7 +607,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
@@ -618,7 +631,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
@@ -632,7 +648,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
@@ -648,8 +667,14 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
@@ -664,7 +689,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
@@ -674,13 +702,16 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
}
if (processed)
*val = data;
+
+ return 0;
}
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
u8 restart, speed, ctrl, data;
const u16 *regs;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
@@ -690,15 +721,26 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
@@ -724,9 +766,17 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
@@ -756,11 +806,19 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -777,8 +835,12 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
@@ -787,6 +849,8 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
default:
break;
}
+
+ return 0;
}
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -1187,7 +1251,6 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
if (i == dev->phy_port_cnt)
break;
p->on = 1;
- p->phy = 1;
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5247fdfb964d..ddb40838181e 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -180,8 +180,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index e4f446db0ca1..a6a0321a8931 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -193,6 +193,11 @@ int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ return 0;
+
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
@@ -264,9 +269,20 @@ void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
mutex_unlock(&mib->cnt_mutex);
}
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
+{
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
+}
+
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -274,7 +290,7 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -307,23 +323,25 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
+
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
}
*data = val;
+
+ return 0;
}
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
/* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return;
+ if (!dev->info->internal_phy[addr])
+ return 0;
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return;
-
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -869,7 +887,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
phy_interface_t interface;
bool gbit;
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
gbit = ksz_get_gbit(dev, port);
@@ -914,7 +932,7 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled (except on KSZ8565 which is 100Mbit)
*/
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
/* Register settings are required to meet data sheet
@@ -941,10 +959,35 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE;
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
}
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
@@ -976,7 +1019,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
- if (port < dev->phy_port_cnt) {
+ if (dev->info->internal_phy[port]) {
/* do not force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
@@ -999,7 +1042,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
@@ -1051,25 +1094,13 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->on = 1;
}
}
for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
-
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
-
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
- }
}
}
@@ -1158,29 +1189,6 @@ int ksz9477_switch_init(struct ksz_device *dev)
if (ret)
return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
-
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
-
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
-
- if (dev->chip_id == KSZ9893_CHIP_ID) {
- dev->features |= IS_9893;
-
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
- }
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index cd278b307b3c..00862c4cfb7f 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -16,8 +16,9 @@ u32 ksz9477_get_port_addr(int port, int offset);
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 99966514d444..3763930dc6fc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -52,16 +52,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int ksz9477_i2c_remove(struct i2c_client *i2c)
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
@@ -92,6 +88,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ9477]
},
{
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index ddf99d1e4bbd..53c68d286dd3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -189,8 +189,9 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -225,6 +226,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 6bd69a7e6809..d612181b3226 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,6 +14,9 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_mdio.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
@@ -170,12 +173,20 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.exit = ksz8_switch_exit,
};
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
.r_phy = ksz9477_r_phy,
.w_phy = ksz9477_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -196,6 +207,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
@@ -205,10 +217,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
+ .teardown = lan937x_teardown,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -230,6 +244,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
@@ -412,7 +427,636 @@ static const u8 lan937x_shifts[] = {
[ALU_STAT_INDEX] = 8,
};
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
.dev_name = "KSZ8795",
@@ -527,6 +1171,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -545,6 +1190,41 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, false},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
},
[KSZ9897] = {
@@ -555,6 +1235,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -573,6 +1254,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[KSZ9893] = {
@@ -583,6 +1265,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -596,6 +1279,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
},
[KSZ9567] = {
@@ -606,6 +1290,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -624,6 +1309,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[LAN9370] = {
@@ -634,6 +1320,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -657,6 +1344,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -680,6 +1368,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -707,6 +1396,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -734,6 +1424,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -965,9 +1656,280 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1016,11 +1978,55 @@ static int ksz_setup(struct dsa_switch *ds)
p = &dev->ports[dev->cpu_port];
p->learning = true;
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_pirq;
+ }
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
return 0;
+
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
}
static void port_r_cnt(struct ksz_device *dev, int port)
@@ -1104,8 +2110,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
@@ -1113,8 +2122,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
return 0;
}
@@ -1203,6 +2215,16 @@ static void ksz_port_fast_age(struct dsa_switch *ds, int port)
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->set_ageing_time(dev, msecs);
+}
+
static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
@@ -1366,10 +2388,12 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
@@ -1484,7 +2508,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
case PHY_INTERFACE_MODE_RGMII_RXID:
data8 |= bitval[P_RGMII_SEL];
/* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -1656,13 +2681,13 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct ksz_device *dev = ds->priv;
struct ksz_port *p;
p = &dev->ports[port];
@@ -1676,6 +2701,15 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
ksz_port_set_xmii_speed(dev, port, speed);
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
if (dev->dev_ops->phylink_mac_link_up)
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
@@ -1685,7 +2719,7 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
static int ksz_switch_detect(struct ksz_device *dev)
{
- u8 id1, id2;
+ u8 id1, id2, id4;
u16 id16;
u32 id32;
int ret;
@@ -1730,8 +2764,8 @@ static int ksz_switch_detect(struct ksz_device *dev)
switch (id32) {
case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
- case KSZ9893_CHIP_ID:
case KSZ9567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
@@ -1740,6 +2774,18 @@ static int ksz_switch_detect(struct ksz_device *dev)
case LAN9374_CHIP_ID:
dev->chip_id = id32;
break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
default:
dev_err(dev->dev,
"unsupported switch detected %x)\n", id32);
@@ -1753,6 +2799,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
+ .teardown = ksz_teardown,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
@@ -1760,6 +2807,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phylink_mac_link_up = ksz_phylink_mac_link_up,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
@@ -1917,6 +2965,9 @@ int ksz_switch_register(struct ksz_device *dev)
GFP_KERNEL);
if (!dev->ports[i].mib.counters)
return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
}
/* set the real number of ports */
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 0d9520dc6d2d..9cfa179575ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -13,9 +13,12 @@
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
+#include <linux/irq.h>
#define KSZ_MAX_NUM_PORTS 8
+struct ksz_device;
+
struct vlan_table {
u32 table[3];
};
@@ -42,6 +45,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ u8 port_nirqs;
const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
@@ -61,6 +65,20 @@ struct ksz_chip_data {
bool supports_rmii[KSZ_MAX_NUM_PORTS];
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
};
struct ksz_port {
@@ -70,9 +88,7 @@ struct ksz_port {
struct phy_device phydev;
u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
@@ -82,6 +98,9 @@ struct ksz_port {
u16 max_frame;
u32 rgmii_tx_val;
u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
};
struct ksz_device {
@@ -99,6 +118,7 @@ struct ksz_device {
struct regmap *regmap[3];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
@@ -118,17 +138,20 @@ struct ksz_device {
unsigned long mib_read_interval;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
};
/* List of supported models */
enum ksz_model {
+ KSZ8563,
KSZ8795,
KSZ8794,
KSZ8765,
KSZ8830,
KSZ9477,
+ KSZ9896,
KSZ9897,
KSZ9893,
KSZ9567,
@@ -140,11 +163,13 @@ enum ksz_model {
};
enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
KSZ8765_CHIP_ID = 0x8765,
KSZ8830_CHIP_ID = 0x8830,
KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
KSZ9567_CHIP_ID = 0x00956700,
@@ -254,13 +279,15 @@ struct alu_struct {
struct ksz_dev_ops {
int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
@@ -330,6 +357,10 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[0], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -339,6 +370,10 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[1], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -348,6 +383,10 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[2], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -358,7 +397,10 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -366,17 +408,38 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -391,40 +454,42 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
@@ -483,6 +548,10 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_REV_ID_M GENMASK(7, 4)
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
@@ -497,10 +566,6 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_START 0x01
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define IS_9893 BIT(2)
-
/* xMII configuration */
#define P_MII_DUPLEX_M BIT(6)
#define P_MII_100MBIT_M BIT(4)
@@ -511,6 +576,15 @@ static inline int is_lan937x(struct ksz_device *dev)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 05bd089795f8..1b6ab891b986 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -66,7 +66,10 @@ static int ksz_spi_probe(struct spi_device *spi)
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
@@ -85,6 +88,8 @@ static int ksz_spi_probe(struct spi_device *spi)
if (ret)
return ret;
+ dev->irq = spi->irq;
+
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
@@ -102,8 +107,6 @@ static void ksz_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
}
static void ksz_spi_shutdown(struct spi_device *spi)
@@ -147,6 +150,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ9477]
},
{
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
},
@@ -160,7 +167,7 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
@@ -197,6 +204,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8863" },
{ "ksz8873" },
{ "ksz9477" },
+ { "ksz9896" },
{ "ksz9897" },
{ "ksz9893" },
{ "ksz9563" },
@@ -226,6 +234,7 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
MODULE_ALIAS("spi:ksz9897");
MODULE_ALIAS("spi:ksz9893");
MODULE_ALIAS("spi:ksz9563");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 4e0b1dccec27..8e9e66d6728d 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -8,14 +8,16 @@
int lan937x_reset_switch(struct ksz_device *dev);
int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
void lan937x_config_cpu_port(struct dsa_switch *ds);
int lan937x_switch_init(struct ksz_device *dev);
void lan937x_switch_exit(struct ksz_device *dev);
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index daedd2bf20c1..7e4f307a0387 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -7,7 +7,6 @@
#include <linux/iopoll.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/of_mdio.h>
#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/math.h>
@@ -128,81 +127,14 @@ static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
}
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- lan937x_internal_phy_read(dev, addr, reg, data);
+ return lan937x_internal_phy_read(dev, addr, reg, data);
}
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- lan937x_internal_phy_write(dev, addr, reg, val);
-}
-
-static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct ksz_device *dev = bus->priv;
- u16 val;
- int ret;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- ret = lan937x_internal_phy_read(dev, addr, regnum, &val);
- if (ret < 0)
- return ret;
-
- return val;
-}
-
-static int lan937x_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct ksz_device *dev = bus->priv;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- return lan937x_internal_phy_write(dev, addr, regnum, val);
-}
-
-static int lan937x_mdio_register(struct ksz_device *dev)
-{
- struct dsa_switch *ds = dev->ds;
- struct device_node *mdio_np;
- struct mii_bus *bus;
- int ret;
-
- mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
- if (!mdio_np) {
- dev_err(ds->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- bus = devm_mdiobus_alloc(ds->dev);
- if (!bus) {
- of_node_put(mdio_np);
- return -ENOMEM;
- }
-
- bus->priv = dev;
- bus->read = lan937x_sw_mdio_read;
- bus->write = lan937x_sw_mdio_write;
- bus->name = "lan937x slave smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
-
- ds->slave_mii_bus = bus;
-
- ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
- if (ret) {
- dev_err(ds->dev, "unable to register MDIO bus %s\n",
- bus->id);
- }
-
- of_node_put(mdio_np);
-
- return ret;
+ return lan937x_internal_phy_write(dev, addr, reg, val);
}
int lan937x_reset_switch(struct ksz_device *dev)
@@ -225,6 +157,10 @@ int lan937x_reset_switch(struct ksz_device *dev)
if (ret < 0)
return ret;
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
if (ret < 0)
return ret;
@@ -244,10 +180,6 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */
- lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH,
- false);
-
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
@@ -315,6 +247,23 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
return 0;
}
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
u16 reg, u8 val)
{
@@ -383,6 +332,13 @@ void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
}
}
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
int lan937x_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -395,12 +351,6 @@ int lan937x_setup(struct dsa_switch *ds)
return ret;
}
- ret = lan937x_mdio_register(dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to register the mdio");
- return ret;
- }
-
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
@@ -426,11 +376,9 @@ int lan937x_setup(struct dsa_switch *ds)
return 0;
}
-int lan937x_switch_init(struct ksz_device *dev)
+void lan937x_teardown(struct dsa_switch *ds)
{
- dev->port_mask = (1 << dev->info->port_cnt) - 1;
- return 0;
}
void lan937x_switch_exit(struct ksz_device *dev)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index ba4adaddb3ec..5bc16a4c4441 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -62,6 +62,12 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
#define REG_SW_MAC_CTRL_0 0x0330
#define SW_NEW_BACKOFF BIT(7)
#define SW_PAUSE_UNH_MODE BIT(1)
@@ -118,6 +124,18 @@
/* Port Registers */
/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
#define REG_PORT_CTRL_0 0x0020
#define PORT_MAC_LOOPBACK BIT(7)
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 835807911be0..e74c6b406172 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2699,9 +2708,6 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
return mt7531_sgmii_setup_mode_force(priv, port, interface);
default:
return -EINVAL;
@@ -2777,13 +2783,6 @@ unsupported:
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
@@ -2887,8 +2886,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
@@ -2922,6 +2919,9 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
+
/* This driver does not make use of the speed, duplex, pause or the
* advertisement in its mac_config, so it is safe to mark this driver
* as non-legacy.
@@ -2987,6 +2987,7 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
if (state->interface == PHY_INTERFACE_MODE_SGMII &&
(status & MT7531_SGMII_AN_ENABLE)) {
val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
@@ -3017,16 +3018,44 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
+
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
+}
+
static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
int port = pcs_to_mt753x_pcs(pcs)->port;
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
mt7531_sgmii_pcs_get_state_an(priv, port, state);
- else
- state->link = false;
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
+
+ state->link = false;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
@@ -3067,6 +3096,8 @@ mt753x_setup(struct dsa_switch *ds)
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
}
ret = priv->info->sw_setup(ds);
@@ -3300,8 +3331,6 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mt7530_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index e509af95c354..e8d966435350 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -373,6 +373,7 @@ enum mt7530_vlan_port_acc_frm {
#define MT7531_SGMII_LINK_STATUS BIT(18)
#define MT7531_SGMII_AN_ENABLE BIT(12)
#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
/* Register for SGMII PCS_SPPED_ABILITY */
#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 83dca9179aa0..fdda62d6eb16 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -297,8 +297,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 07e9a4da924c..2479be3a1e35 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -816,6 +816,14 @@ static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_10000FD;
}
}
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
@@ -1128,7 +1136,7 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
@@ -6585,14 +6593,17 @@ out:
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
if (!lag.id)
return false;
@@ -6601,14 +6612,20 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -6761,12 +6778,13 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
@@ -6819,12 +6837,13 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -7166,8 +7185,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 807aeaad9830..7536b8b0ad01 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,7 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 90c55f23b7c9..5c4195c635b0 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -517,6 +517,12 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_RMII:
cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -634,6 +640,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index aadb0bd7c24f..dd3a18cc89dd 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -42,6 +42,25 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@@ -422,6 +441,40 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
+}
+
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -433,6 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
@@ -445,6 +499,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
if (err)
return err;
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
dp->cpu_dp->index);
@@ -493,6 +550,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_tag_8021q_unregister(ds);
}
@@ -501,10 +561,24 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
return dsa_cpu_ports(ds);
}
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
@@ -667,6 +741,16 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
!!felix->host_flood_mc_mask, true);
}
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -855,11 +939,21 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
static int felix_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
- return ocelot_port_lag_join(ocelot, port, lag.dev, info);
+ return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
@@ -869,7 +963,11 @@ static int felix_lag_leave(struct dsa_switch *ds, int port,
ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -1007,6 +1105,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -1028,6 +1147,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -1144,11 +1312,55 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -1182,20 +1394,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1212,7 +1415,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1224,16 +1426,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1842,6 +2039,12 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
@@ -1851,6 +2054,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -1906,6 +2110,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index deb8dde1fc19..c9c29999c336 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -16,9 +16,13 @@
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -56,8 +60,6 @@ struct felix_info {
void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
- struct regmap *(*init_regmap)(struct ocelot *ocelot,
- struct resource *res);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -71,6 +73,9 @@ struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -83,7 +88,6 @@ struct felix {
struct mii_bus *imdio;
struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 1cdce8a98d1d..26a35ae322d1 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -22,6 +22,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -347,7 +348,7 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
REG(SYS_COUNT_DROP_LOCAL, 0x000400),
REG(SYS_COUNT_DROP_TAIL, 0x000404),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
@@ -366,6 +367,10 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -387,7 +392,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -473,100 +477,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -619,378 +566,7 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
};
static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
- [OCELOT_STAT_RX_OCTETS] = {
- .name = "rx_octets",
- .reg = SYS_COUNT_RX_OCTETS,
- },
- [OCELOT_STAT_RX_UNICAST] = {
- .name = "rx_unicast",
- .reg = SYS_COUNT_RX_UNICAST,
- },
- [OCELOT_STAT_RX_MULTICAST] = {
- .name = "rx_multicast",
- .reg = SYS_COUNT_RX_MULTICAST,
- },
- [OCELOT_STAT_RX_BROADCAST] = {
- .name = "rx_broadcast",
- .reg = SYS_COUNT_RX_BROADCAST,
- },
- [OCELOT_STAT_RX_SHORTS] = {
- .name = "rx_shorts",
- .reg = SYS_COUNT_RX_SHORTS,
- },
- [OCELOT_STAT_RX_FRAGMENTS] = {
- .name = "rx_fragments",
- .reg = SYS_COUNT_RX_FRAGMENTS,
- },
- [OCELOT_STAT_RX_JABBERS] = {
- .name = "rx_jabbers",
- .reg = SYS_COUNT_RX_JABBERS,
- },
- [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
- .name = "rx_crc_align_errs",
- .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
- },
- [OCELOT_STAT_RX_SYM_ERRS] = {
- .name = "rx_sym_errs",
- .reg = SYS_COUNT_RX_SYM_ERRS,
- },
- [OCELOT_STAT_RX_64] = {
- .name = "rx_frames_below_65_octets",
- .reg = SYS_COUNT_RX_64,
- },
- [OCELOT_STAT_RX_65_127] = {
- .name = "rx_frames_65_to_127_octets",
- .reg = SYS_COUNT_RX_65_127,
- },
- [OCELOT_STAT_RX_128_255] = {
- .name = "rx_frames_128_to_255_octets",
- .reg = SYS_COUNT_RX_128_255,
- },
- [OCELOT_STAT_RX_256_511] = {
- .name = "rx_frames_256_to_511_octets",
- .reg = SYS_COUNT_RX_256_511,
- },
- [OCELOT_STAT_RX_512_1023] = {
- .name = "rx_frames_512_to_1023_octets",
- .reg = SYS_COUNT_RX_512_1023,
- },
- [OCELOT_STAT_RX_1024_1526] = {
- .name = "rx_frames_1024_to_1526_octets",
- .reg = SYS_COUNT_RX_1024_1526,
- },
- [OCELOT_STAT_RX_1527_MAX] = {
- .name = "rx_frames_over_1526_octets",
- .reg = SYS_COUNT_RX_1527_MAX,
- },
- [OCELOT_STAT_RX_PAUSE] = {
- .name = "rx_pause",
- .reg = SYS_COUNT_RX_PAUSE,
- },
- [OCELOT_STAT_RX_CONTROL] = {
- .name = "rx_control",
- .reg = SYS_COUNT_RX_CONTROL,
- },
- [OCELOT_STAT_RX_LONGS] = {
- .name = "rx_longs",
- .reg = SYS_COUNT_RX_LONGS,
- },
- [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
- .name = "rx_classified_drops",
- .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
- },
- [OCELOT_STAT_RX_RED_PRIO_0] = {
- .name = "rx_red_prio_0",
- .reg = SYS_COUNT_RX_RED_PRIO_0,
- },
- [OCELOT_STAT_RX_RED_PRIO_1] = {
- .name = "rx_red_prio_1",
- .reg = SYS_COUNT_RX_RED_PRIO_1,
- },
- [OCELOT_STAT_RX_RED_PRIO_2] = {
- .name = "rx_red_prio_2",
- .reg = SYS_COUNT_RX_RED_PRIO_2,
- },
- [OCELOT_STAT_RX_RED_PRIO_3] = {
- .name = "rx_red_prio_3",
- .reg = SYS_COUNT_RX_RED_PRIO_3,
- },
- [OCELOT_STAT_RX_RED_PRIO_4] = {
- .name = "rx_red_prio_4",
- .reg = SYS_COUNT_RX_RED_PRIO_4,
- },
- [OCELOT_STAT_RX_RED_PRIO_5] = {
- .name = "rx_red_prio_5",
- .reg = SYS_COUNT_RX_RED_PRIO_5,
- },
- [OCELOT_STAT_RX_RED_PRIO_6] = {
- .name = "rx_red_prio_6",
- .reg = SYS_COUNT_RX_RED_PRIO_6,
- },
- [OCELOT_STAT_RX_RED_PRIO_7] = {
- .name = "rx_red_prio_7",
- .reg = SYS_COUNT_RX_RED_PRIO_7,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
- .name = "rx_yellow_prio_0",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
- .name = "rx_yellow_prio_1",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
- .name = "rx_yellow_prio_2",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
- .name = "rx_yellow_prio_3",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
- .name = "rx_yellow_prio_4",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
- .name = "rx_yellow_prio_5",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
- .name = "rx_yellow_prio_6",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
- .name = "rx_yellow_prio_7",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_0] = {
- .name = "rx_green_prio_0",
- .reg = SYS_COUNT_RX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_1] = {
- .name = "rx_green_prio_1",
- .reg = SYS_COUNT_RX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_2] = {
- .name = "rx_green_prio_2",
- .reg = SYS_COUNT_RX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_3] = {
- .name = "rx_green_prio_3",
- .reg = SYS_COUNT_RX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_4] = {
- .name = "rx_green_prio_4",
- .reg = SYS_COUNT_RX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_5] = {
- .name = "rx_green_prio_5",
- .reg = SYS_COUNT_RX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_6] = {
- .name = "rx_green_prio_6",
- .reg = SYS_COUNT_RX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_7] = {
- .name = "rx_green_prio_7",
- .reg = SYS_COUNT_RX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_OCTETS] = {
- .name = "tx_octets",
- .reg = SYS_COUNT_TX_OCTETS,
- },
- [OCELOT_STAT_TX_UNICAST] = {
- .name = "tx_unicast",
- .reg = SYS_COUNT_TX_UNICAST,
- },
- [OCELOT_STAT_TX_MULTICAST] = {
- .name = "tx_multicast",
- .reg = SYS_COUNT_TX_MULTICAST,
- },
- [OCELOT_STAT_TX_BROADCAST] = {
- .name = "tx_broadcast",
- .reg = SYS_COUNT_TX_BROADCAST,
- },
- [OCELOT_STAT_TX_COLLISION] = {
- .name = "tx_collision",
- .reg = SYS_COUNT_TX_COLLISION,
- },
- [OCELOT_STAT_TX_DROPS] = {
- .name = "tx_drops",
- .reg = SYS_COUNT_TX_DROPS,
- },
- [OCELOT_STAT_TX_PAUSE] = {
- .name = "tx_pause",
- .reg = SYS_COUNT_TX_PAUSE,
- },
- [OCELOT_STAT_TX_64] = {
- .name = "tx_frames_below_65_octets",
- .reg = SYS_COUNT_TX_64,
- },
- [OCELOT_STAT_TX_65_127] = {
- .name = "tx_frames_65_to_127_octets",
- .reg = SYS_COUNT_TX_65_127,
- },
- [OCELOT_STAT_TX_128_255] = {
- .name = "tx_frames_128_255_octets",
- .reg = SYS_COUNT_TX_128_255,
- },
- [OCELOT_STAT_TX_256_511] = {
- .name = "tx_frames_256_511_octets",
- .reg = SYS_COUNT_TX_256_511,
- },
- [OCELOT_STAT_TX_512_1023] = {
- .name = "tx_frames_512_1023_octets",
- .reg = SYS_COUNT_TX_512_1023,
- },
- [OCELOT_STAT_TX_1024_1526] = {
- .name = "tx_frames_1024_1526_octets",
- .reg = SYS_COUNT_TX_1024_1526,
- },
- [OCELOT_STAT_TX_1527_MAX] = {
- .name = "tx_frames_over_1526_octets",
- .reg = SYS_COUNT_TX_1527_MAX,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
- .name = "tx_yellow_prio_0",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
- .name = "tx_yellow_prio_1",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
- .name = "tx_yellow_prio_2",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
- .name = "tx_yellow_prio_3",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
- .name = "tx_yellow_prio_4",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
- .name = "tx_yellow_prio_5",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
- .name = "tx_yellow_prio_6",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
- .name = "tx_yellow_prio_7",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_0] = {
- .name = "tx_green_prio_0",
- .reg = SYS_COUNT_TX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_1] = {
- .name = "tx_green_prio_1",
- .reg = SYS_COUNT_TX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_2] = {
- .name = "tx_green_prio_2",
- .reg = SYS_COUNT_TX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_3] = {
- .name = "tx_green_prio_3",
- .reg = SYS_COUNT_TX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_4] = {
- .name = "tx_green_prio_4",
- .reg = SYS_COUNT_TX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_5] = {
- .name = "tx_green_prio_5",
- .reg = SYS_COUNT_TX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_6] = {
- .name = "tx_green_prio_6",
- .reg = SYS_COUNT_TX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_7] = {
- .name = "tx_green_prio_7",
- .reg = SYS_COUNT_TX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_AGED] = {
- .name = "tx_aged",
- .reg = SYS_COUNT_TX_AGING,
- },
- [OCELOT_STAT_DROP_LOCAL] = {
- .name = "drop_local",
- .reg = SYS_COUNT_DROP_LOCAL,
- },
- [OCELOT_STAT_DROP_TAIL] = {
- .name = "drop_tail",
- .reg = SYS_COUNT_DROP_TAIL,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
- .name = "drop_yellow_prio_0",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
- .name = "drop_yellow_prio_1",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
- .name = "drop_yellow_prio_2",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
- .name = "drop_yellow_prio_3",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
- .name = "drop_yellow_prio_4",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
- .name = "drop_yellow_prio_5",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
- .name = "drop_yellow_prio_6",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
- .name = "drop_yellow_prio_7",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
- .name = "drop_green_prio_0",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
- .name = "drop_green_prio_1",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
- .name = "drop_green_prio_2",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
- .name = "drop_green_prio_3",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
- .name = "drop_green_prio_4",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
- .name = "drop_green_prio_5",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
- .name = "drop_green_prio_6",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
- .name = "drop_green_prio_7",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
- },
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1370,9 +946,11 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1388,10 +966,11 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1478,6 +1057,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
/* Extract shortest continuous gate open intervals in ns for each traffic class
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
* considered U64_MAX. If the gate is always closed, it is considered 0.
@@ -1539,6 +1135,73 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
min_gate_len[tc] = 0;
}
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
* values (the default value is 1518). Also, for traffic class windows smaller
@@ -1548,6 +1211,7 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
u64 min_gate_len[OCELOT_NUM_TC];
int speed, picos_per_byte;
u64 needed_bit_time_ps;
@@ -1557,6 +1221,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
lockdep_assert_held(&ocelot->tas_lock);
+ taprio = ocelot_port->taprio;
+
val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
@@ -1593,17 +1259,23 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
"port %d: max frame size %d needs %llu ps at speed %d\n",
port, maxlen, needed_bit_time_ps, speed);
- vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ mutex_lock(&ocelot->fwd_domain_lock);
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
u32 max_sdu;
- if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
- min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
- max_sdu = 0;
+ max_sdu = requested_max_sdu;
dev_dbg(ocelot->dev,
"port %d tc %d min gate len %llu"
", sending all frames\n",
@@ -1612,9 +1284,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* If traffic class doesn't support a full MTU sized
* frame, make sure to enable oversize frame dropping
* for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
*/
- max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
- picos_per_byte);
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
* special case where all packets are oversized.
* Any limit smaller than 64 octets accomplishes this
@@ -1628,6 +1306,10 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
*/
if (max_sdu > 20)
max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
dev_info(ocelot->dev,
"port %d tc %d min gate length %llu"
" ns not enough for max frame size %d at %d"
@@ -1637,47 +1319,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
max_sdu);
}
- /* ocelot_write_rix is a macro that concatenates
- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
- * the writes to each traffic class
- */
- switch (tc) {
- case 0:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
- port);
- break;
- case 1:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
- port);
- break;
- case 2:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
- port);
- break;
- case 3:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
- port);
- break;
- case 4:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
- port);
- break;
- case 5:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
- port);
- break;
- case 6:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
- port);
- break;
- case 7:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
- port);
- break;
- }
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
}
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
@@ -1704,13 +1353,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
- mutex_lock(&ocelot->tas_lock);
-
if (ocelot_port->taprio)
vsc9959_tas_guard_bands_update(ocelot, port);
@@ -1950,6 +1599,21 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1957,6 +1621,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
@@ -1988,7 +1654,15 @@ struct felix_stream {
u32 ssid;
};
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
struct list_head list;
refcount_t refcount;
u32 index;
@@ -2003,13 +1677,6 @@ struct felix_stream_filter {
u32 maxsdu;
};
-struct felix_stream_filter_counters {
- u32 match;
- u32 not_pass_gate;
- u32 not_pass_sdu;
- u32 red;
-};
-
struct felix_stream_gate {
u32 index;
u8 enable;
@@ -2513,29 +2180,6 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
}
}
-static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
- struct felix_stream_filter_counters *counters)
-{
- spin_lock(&ocelot->stats_lock);
-
- ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
- SYS_STAT_CFG_STAT_VIEW_M,
- SYS_STAT_CFG);
-
- counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
- counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
- counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
- counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
-
- /* Clear the PSFP counter. */
- ocelot_write(ocelot,
- SYS_STAT_CFG_STAT_VIEW(index) |
- SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
- SYS_STAT_CFG);
-
- spin_unlock(&ocelot->stats_lock);
-}
-
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
struct flow_cls_offload *f)
{
@@ -2560,6 +2204,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
return ret;
}
+ mutex_lock(&psfp->lock);
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_GATE:
@@ -2601,6 +2247,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
sfi.maxsdu = a->police.mtu;
break;
default:
+ mutex_unlock(&psfp->lock);
return -EOPNOTSUPP;
}
}
@@ -2670,6 +2317,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
goto err;
}
+ mutex_unlock(&psfp->lock);
+
return 0;
err:
@@ -2679,6 +2328,8 @@ err:
if (sfi.fm_valid)
ocelot_vcap_policer_del(ocelot, sfi.fmid);
+ mutex_unlock(&psfp->lock);
+
return ret;
}
@@ -2686,18 +2337,22 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
struct flow_cls_offload *f)
{
struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
static struct felix_stream_filter *sfi;
- struct ocelot_psfp_list *psfp;
- psfp = &ocelot->psfp;
+ mutex_lock(&psfp->lock);
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
- if (!stream)
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
- if (!sfi)
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
if (sfi->sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
@@ -2723,27 +2378,83 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
stream_entry->ports);
}
+ mutex_unlock(&psfp->lock);
+
return 0;
}
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
struct flow_cls_offload *f,
struct flow_stats *stats)
{
- struct felix_stream_filter_counters counters;
- struct ocelot_psfp_list *psfp;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
struct felix_stream *stream;
- psfp = &ocelot->psfp;
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream)
return -ENOMEM;
- vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
- stats->pkts = counters.match;
- stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
- counters.red;
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
return 0;
}
@@ -2755,6 +2466,7 @@ static void vsc9959_psfp_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&psfp->stream_list);
INIT_LIST_HEAD(&psfp->sfi_list);
INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
}
/* When using cut-through forwarding and the egress port runs at a higher data
@@ -2770,7 +2482,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
- int port, other_port;
+ int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2814,19 +2526,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
min_speed = other_ocelot_port->speed;
}
- /* Enable cut-through forwarding for all traffic classes. */
- if (ocelot_port->speed == min_speed)
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0);
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
- val ? "enabling" : "disabling");
+ val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
@@ -2845,12 +2565,13 @@ static const struct ocelot_ops vsc9959_ops = {
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
.tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
};
static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -2872,7 +2593,6 @@ static const struct felix_info felix_info_vsc9959 = {
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
- .init_regmap = ocelot_regmap_init,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -2924,7 +2644,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
@@ -2985,8 +2704,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index b34f4cdfe814..7af33b2c685d 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -343,7 +343,7 @@ static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
REG(SYS_COUNT_DROP_LOCAL, 0x000200),
REG(SYS_COUNT_DROP_TAIL, 0x000204),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
@@ -383,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -459,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -615,378 +544,7 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
};
static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
- [OCELOT_STAT_RX_OCTETS] = {
- .name = "rx_octets",
- .reg = SYS_COUNT_RX_OCTETS,
- },
- [OCELOT_STAT_RX_UNICAST] = {
- .name = "rx_unicast",
- .reg = SYS_COUNT_RX_UNICAST,
- },
- [OCELOT_STAT_RX_MULTICAST] = {
- .name = "rx_multicast",
- .reg = SYS_COUNT_RX_MULTICAST,
- },
- [OCELOT_STAT_RX_BROADCAST] = {
- .name = "rx_broadcast",
- .reg = SYS_COUNT_RX_BROADCAST,
- },
- [OCELOT_STAT_RX_SHORTS] = {
- .name = "rx_shorts",
- .reg = SYS_COUNT_RX_SHORTS,
- },
- [OCELOT_STAT_RX_FRAGMENTS] = {
- .name = "rx_fragments",
- .reg = SYS_COUNT_RX_FRAGMENTS,
- },
- [OCELOT_STAT_RX_JABBERS] = {
- .name = "rx_jabbers",
- .reg = SYS_COUNT_RX_JABBERS,
- },
- [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
- .name = "rx_crc_align_errs",
- .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
- },
- [OCELOT_STAT_RX_SYM_ERRS] = {
- .name = "rx_sym_errs",
- .reg = SYS_COUNT_RX_SYM_ERRS,
- },
- [OCELOT_STAT_RX_64] = {
- .name = "rx_frames_below_65_octets",
- .reg = SYS_COUNT_RX_64,
- },
- [OCELOT_STAT_RX_65_127] = {
- .name = "rx_frames_65_to_127_octets",
- .reg = SYS_COUNT_RX_65_127,
- },
- [OCELOT_STAT_RX_128_255] = {
- .name = "rx_frames_128_to_255_octets",
- .reg = SYS_COUNT_RX_128_255,
- },
- [OCELOT_STAT_RX_256_511] = {
- .name = "rx_frames_256_to_511_octets",
- .reg = SYS_COUNT_RX_256_511,
- },
- [OCELOT_STAT_RX_512_1023] = {
- .name = "rx_frames_512_to_1023_octets",
- .reg = SYS_COUNT_RX_512_1023,
- },
- [OCELOT_STAT_RX_1024_1526] = {
- .name = "rx_frames_1024_to_1526_octets",
- .reg = SYS_COUNT_RX_1024_1526,
- },
- [OCELOT_STAT_RX_1527_MAX] = {
- .name = "rx_frames_over_1526_octets",
- .reg = SYS_COUNT_RX_1527_MAX,
- },
- [OCELOT_STAT_RX_PAUSE] = {
- .name = "rx_pause",
- .reg = SYS_COUNT_RX_PAUSE,
- },
- [OCELOT_STAT_RX_CONTROL] = {
- .name = "rx_control",
- .reg = SYS_COUNT_RX_CONTROL,
- },
- [OCELOT_STAT_RX_LONGS] = {
- .name = "rx_longs",
- .reg = SYS_COUNT_RX_LONGS,
- },
- [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
- .name = "rx_classified_drops",
- .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
- },
- [OCELOT_STAT_RX_RED_PRIO_0] = {
- .name = "rx_red_prio_0",
- .reg = SYS_COUNT_RX_RED_PRIO_0,
- },
- [OCELOT_STAT_RX_RED_PRIO_1] = {
- .name = "rx_red_prio_1",
- .reg = SYS_COUNT_RX_RED_PRIO_1,
- },
- [OCELOT_STAT_RX_RED_PRIO_2] = {
- .name = "rx_red_prio_2",
- .reg = SYS_COUNT_RX_RED_PRIO_2,
- },
- [OCELOT_STAT_RX_RED_PRIO_3] = {
- .name = "rx_red_prio_3",
- .reg = SYS_COUNT_RX_RED_PRIO_3,
- },
- [OCELOT_STAT_RX_RED_PRIO_4] = {
- .name = "rx_red_prio_4",
- .reg = SYS_COUNT_RX_RED_PRIO_4,
- },
- [OCELOT_STAT_RX_RED_PRIO_5] = {
- .name = "rx_red_prio_5",
- .reg = SYS_COUNT_RX_RED_PRIO_5,
- },
- [OCELOT_STAT_RX_RED_PRIO_6] = {
- .name = "rx_red_prio_6",
- .reg = SYS_COUNT_RX_RED_PRIO_6,
- },
- [OCELOT_STAT_RX_RED_PRIO_7] = {
- .name = "rx_red_prio_7",
- .reg = SYS_COUNT_RX_RED_PRIO_7,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
- .name = "rx_yellow_prio_0",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
- .name = "rx_yellow_prio_1",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
- .name = "rx_yellow_prio_2",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
- .name = "rx_yellow_prio_3",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
- .name = "rx_yellow_prio_4",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
- .name = "rx_yellow_prio_5",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
- .name = "rx_yellow_prio_6",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
- .name = "rx_yellow_prio_7",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_0] = {
- .name = "rx_green_prio_0",
- .reg = SYS_COUNT_RX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_1] = {
- .name = "rx_green_prio_1",
- .reg = SYS_COUNT_RX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_2] = {
- .name = "rx_green_prio_2",
- .reg = SYS_COUNT_RX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_3] = {
- .name = "rx_green_prio_3",
- .reg = SYS_COUNT_RX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_4] = {
- .name = "rx_green_prio_4",
- .reg = SYS_COUNT_RX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_5] = {
- .name = "rx_green_prio_5",
- .reg = SYS_COUNT_RX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_6] = {
- .name = "rx_green_prio_6",
- .reg = SYS_COUNT_RX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_7] = {
- .name = "rx_green_prio_7",
- .reg = SYS_COUNT_RX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_OCTETS] = {
- .name = "tx_octets",
- .reg = SYS_COUNT_TX_OCTETS,
- },
- [OCELOT_STAT_TX_UNICAST] = {
- .name = "tx_unicast",
- .reg = SYS_COUNT_TX_UNICAST,
- },
- [OCELOT_STAT_TX_MULTICAST] = {
- .name = "tx_multicast",
- .reg = SYS_COUNT_TX_MULTICAST,
- },
- [OCELOT_STAT_TX_BROADCAST] = {
- .name = "tx_broadcast",
- .reg = SYS_COUNT_TX_BROADCAST,
- },
- [OCELOT_STAT_TX_COLLISION] = {
- .name = "tx_collision",
- .reg = SYS_COUNT_TX_COLLISION,
- },
- [OCELOT_STAT_TX_DROPS] = {
- .name = "tx_drops",
- .reg = SYS_COUNT_TX_DROPS,
- },
- [OCELOT_STAT_TX_PAUSE] = {
- .name = "tx_pause",
- .reg = SYS_COUNT_TX_PAUSE,
- },
- [OCELOT_STAT_TX_64] = {
- .name = "tx_frames_below_65_octets",
- .reg = SYS_COUNT_TX_64,
- },
- [OCELOT_STAT_TX_65_127] = {
- .name = "tx_frames_65_to_127_octets",
- .reg = SYS_COUNT_TX_65_127,
- },
- [OCELOT_STAT_TX_128_255] = {
- .name = "tx_frames_128_255_octets",
- .reg = SYS_COUNT_TX_128_255,
- },
- [OCELOT_STAT_TX_256_511] = {
- .name = "tx_frames_256_511_octets",
- .reg = SYS_COUNT_TX_256_511,
- },
- [OCELOT_STAT_TX_512_1023] = {
- .name = "tx_frames_512_1023_octets",
- .reg = SYS_COUNT_TX_512_1023,
- },
- [OCELOT_STAT_TX_1024_1526] = {
- .name = "tx_frames_1024_1526_octets",
- .reg = SYS_COUNT_TX_1024_1526,
- },
- [OCELOT_STAT_TX_1527_MAX] = {
- .name = "tx_frames_over_1526_octets",
- .reg = SYS_COUNT_TX_1527_MAX,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
- .name = "tx_yellow_prio_0",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
- .name = "tx_yellow_prio_1",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
- .name = "tx_yellow_prio_2",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
- .name = "tx_yellow_prio_3",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
- .name = "tx_yellow_prio_4",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
- .name = "tx_yellow_prio_5",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
- .name = "tx_yellow_prio_6",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
- .name = "tx_yellow_prio_7",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_0] = {
- .name = "tx_green_prio_0",
- .reg = SYS_COUNT_TX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_1] = {
- .name = "tx_green_prio_1",
- .reg = SYS_COUNT_TX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_2] = {
- .name = "tx_green_prio_2",
- .reg = SYS_COUNT_TX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_3] = {
- .name = "tx_green_prio_3",
- .reg = SYS_COUNT_TX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_4] = {
- .name = "tx_green_prio_4",
- .reg = SYS_COUNT_TX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_5] = {
- .name = "tx_green_prio_5",
- .reg = SYS_COUNT_TX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_6] = {
- .name = "tx_green_prio_6",
- .reg = SYS_COUNT_TX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_7] = {
- .name = "tx_green_prio_7",
- .reg = SYS_COUNT_TX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_AGED] = {
- .name = "tx_aged",
- .reg = SYS_COUNT_TX_AGING,
- },
- [OCELOT_STAT_DROP_LOCAL] = {
- .name = "drop_local",
- .reg = SYS_COUNT_DROP_LOCAL,
- },
- [OCELOT_STAT_DROP_TAIL] = {
- .name = "drop_tail",
- .reg = SYS_COUNT_DROP_TAIL,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
- .name = "drop_yellow_prio_0",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
- .name = "drop_yellow_prio_1",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
- .name = "drop_yellow_prio_2",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
- .name = "drop_yellow_prio_3",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
- .name = "drop_yellow_prio_4",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
- .name = "drop_yellow_prio_5",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
- .name = "drop_yellow_prio_6",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
- .name = "drop_yellow_prio_7",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
- .name = "drop_green_prio_0",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
- .name = "drop_green_prio_1",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
- .name = "drop_green_prio_2",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
- .name = "drop_green_prio_3",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
- .name = "drop_green_prio_4",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
- .name = "drop_green_prio_5",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
- .name = "drop_green_prio_6",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
- .name = "drop_green_prio_7",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
- },
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
@@ -1432,8 +990,9 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
@@ -1450,7 +1009,6 @@ static const struct felix_info seville_info_vsc9953 = {
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.port_modes = vsc9953_port_modes,
- .init_regmap = ocelot_regmap_init,
};
static int seville_probe(struct platform_device *pdev)
@@ -1525,8 +1083,6 @@ static int seville_remove(struct platform_device *pdev)
kfree(felix->ds);
kfree(felix);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 0796b7cf8cae..e7b98b864fa1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1099,8 +1099,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1d3e7782a71f..5669c92c93f7 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1889,9 +1889,9 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
@@ -1957,8 +1957,6 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index bba95613e218..fb45b598847b 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -1017,7 +1017,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
static bool qca8k_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp;
int members = 0;
@@ -1029,15 +1030,24 @@ static bool qca8k_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
return false;
+ }
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
return false;
+ }
return true;
}
@@ -1160,11 +1170,12 @@ static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
int ret;
- if (!qca8k_lag_can_offload(ds, lag, info))
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index e36ecc9777f4..0b7a5cb12321 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -512,7 +512,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
/* Common port LAG function */
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index c58f49d558d2..3e54fac5f902 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -245,8 +245,6 @@ static void realtek_mdio_remove(struct mdio_device *mdiodev)
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 45992f79ec8d..1b447d96b9c4 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -522,8 +522,6 @@ static int realtek_smi_remove(struct platform_device *pdev)
if (priv->reset)
gpiod_set_value(priv->reset, 1);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 0744e8162e1d..ed413d555bec 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1025,8 +1025,6 @@ static int a5psw_remove(struct platform_device *pdev)
clk_disable_unprepare(a5psw->hclk);
clk_disable_unprepare(a5psw->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b03d0d0c3dbf..412666111b0c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3351,8 +3351,6 @@ static void sja1105_remove(struct spi_device *spi)
return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
}
static void sja1105_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..bd4206e8f9af 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -121,8 +121,6 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
vsc73xx_remove(&vsc_platform->vsc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 97a92e6da60d..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -167,8 +167,6 @@ static void vsc73xx_spi_remove(struct spi_device *spi)
return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 3887ed33c5fe..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..54065cdedd35 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -105,18 +105,14 @@ static int xrs700x_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int xrs700x_i2c_remove(struct i2c_client *i2c)
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
- return 0;
+ return;
xrs700x_switch_remove(priv);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..aa0fc00faecb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -102,7 +102,7 @@ static const struct net_device_ops dummy_netdev_ops = {
static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static const struct ethtool_ops dummy_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..d2f4358cc550 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1527,7 +1527,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..82f94b1635bf 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -480,7 +480,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..082388bb6169 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index cad4f354cc76..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -969,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -984,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..af603256b724 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index e7b879123bb1..05d39ecb97ff 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 21047ae1bc3d..8a7918d33419 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -450,8 +450,7 @@ static int mcf8390_remove(struct platform_device *pdev)
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a55c1d5a0a1..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -121,6 +121,7 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c06e75ed4231..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..cd4d71b83c33 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1576,7 +1576,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8f0a6b9c518e..857361c74f5d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1844,8 +1844,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..da3bdd302502
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..aaee7c4248e6
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <net/switchdev.h>
+
+#include <asm/unaligned.h>
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t[2] = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t[1].rx_buf = &priv->data[header_len];
+ t[1].len = read_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t[2] = {0};
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return ret;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ t[1].rx_buf = &rxb->data[0];
+ t[1].len = round_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = 1;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, priv->spidev->chip_select);
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ mii_bus->probe_capabilities = MDIOBUS_C22;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ memset(mask, 0xFF, ETH_ALEN);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(port_priv, false, true);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void *data)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL);
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_setup_notifiers(priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+module_spi_driver(adin1110_driver);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..e104fb02817d 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1112,9 +1112,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1507,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index d19d1579c415..5fab589b3ddf 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2952,8 +2952,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3969,7 +3969,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index ce353b0c02a3..a30d0f172986 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1531,8 +1531,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 621ce742ad21..a94c62956eed 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -331,8 +331,8 @@ prepare_err:
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 22fe98555b24..d7762da8b2c0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2691,12 +2691,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..dd7fd41ccde5 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -3,6 +3,8 @@ config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
select PHYLIB
+ select PHYLINK
+ select PCS_ALTERA_TSE
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..db5eed06e92d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -423,6 +413,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -480,6 +473,10 @@ struct altera_tse_private {
u32 msg_enable;
struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 8c5828582c21..7633b227b2ca 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/pcs-altera-tse.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
@@ -86,27 +87,6 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -236,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -358,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -371,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
unsigned int next_entry;
+ unsigned int count = 0;
struct sk_buff *skb;
- unsigned int entry = priv->rx_cons % priv->rx_ring_size;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -448,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -497,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -561,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int nopaged_len = skb_headlen(skb);
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -619,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -768,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -1012,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1087,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1171,14 +896,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1236,8 +953,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1265,13 +986,10 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1317,11 +1035,79 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_an_restart(struct phylink_config *config)
+{
+}
+
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_an_restart = alt_tse_mac_an_restart,
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1350,13 +1136,15 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ const struct of_device_id *of_id = NULL;
+ struct altera_tse_private *priv;
struct resource *control_port;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int pcs_reg_width = 2;
+ int ret = -ENODEV;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1467,6 +1255,17 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res,
+ &priv->pcs_base);
+ if (ret) {
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_reg_width = 4;
+ }
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1566,7 +1365,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
@@ -1590,11 +1389,32 @@ static int altera_tse_probe(struct platform_device *pdev)
(unsigned long) control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width);
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
goto err_init_phy;
}
+
return 0;
err_init_phy:
@@ -1614,16 +1434,10 @@ static int altera_tse_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 39242c5a1729..98d6386b7f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -462,8 +462,8 @@ static void ena_get_drvinfo(struct net_device *dev,
{
struct ena_adapter *adapter = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 6a356a6cee15..d350eeec8bad 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2265,10 +2265,8 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
@@ -3166,7 +3164,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
+ strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3a351d3396bf..68983b717145 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -695,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 5d1baa01360f..ea6cfc2095e1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..9d570adb295b 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -600,7 +600,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4ea7b9f3c424..38153e633231 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -731,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 27869164c6e6..3222c48ce6ae 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -581,15 +581,15 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
+ case NEW_RIEBL:
lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
addr[i] =
@@ -854,7 +854,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -995,7 +995,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d5f2c6989221..c5cec4e79489 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 462016666752..fb8686214a32 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -880,7 +880,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -1186,7 +1186,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..823a329a921f 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index b5ff47283cfe..72db9f9e7bee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -1249,7 +1249,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -2018,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2026,7 +2026,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2365,7 +2365,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..246f34c43765 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -341,7 +341,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +796,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..68ca1225eedc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f342bb853189..7b666106feee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -952,14 +952,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6ceb1cdf6eba..6e83ff59172a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -402,8 +402,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..379d19d18dbe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -672,7 +672,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 53dc8d5fede8..d6cfea65a714 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1977,14 +1977,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1daecd483b8d..a08f221e30d4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -238,7 +238,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..3d0e16791e1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -292,9 +292,6 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -306,9 +303,6 @@ static int aq_mdo_dev_stop(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -543,9 +531,6 @@ static int aq_mdo_del_secy(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -601,9 +586,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -631,9 +613,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -681,9 +660,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -780,9 +756,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -809,9 +782,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -876,9 +846,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -948,9 +915,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -978,9 +942,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1029,9 +990,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1044,9 +1002,6 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1073,9 +1028,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1106,9 +1058,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1147,9 +1096,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1196,9 +1142,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 88595863d8bc..8a0af371e7dc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -94,11 +94,8 @@ static int aq_ndev_close(struct net_device *ndev)
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 275324c9e51e..80b44043e6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1217,8 +1217,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f0fdf20f01c1..f5db1c44e9b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -119,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 288e2961823e..ba0646b3b122 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 6ba5b024a7be..8b7cdf015a16 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -293,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -381,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e461f4764066..cc932b3cf873 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -451,8 +451,8 @@ static void ag71xx_get_drvinfo(struct net_device *ndev,
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index a89b93cb4e26..d30d11872719 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -752,7 +752,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev)
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be4b1f8eef29..40c781695d58 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2732,7 +2732,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
atl1c_clean_tx);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 57a51fb7746c..5db0f3495a32 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2354,7 +2354,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index ff1fe09abf9f..c8444bcdf527 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2977,7 +2977,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3340,8 +3340,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..1b487c071cb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1980,9 +1980,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 56e0fb07aec7..f4e1ca68d831 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -53,8 +53,8 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
- depends on ARCH_BCM4908 || COMPILE_TEST
- default y if ARCH_BCM4908
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ default y if ARCH_BCMBCA
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5857e88c207..7f876721596c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1790,13 +1790,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -2375,7 +2375,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index c131d8118489..93ccf549e2ed 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -507,7 +507,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -716,6 +716,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
@@ -723,17 +725,20 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
static int bcm4908_enet_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 1c6aea12db72..d91fdb0c2649 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 47fc8e6963d5..867f14c30e09 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -308,8 +308,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -2564,7 +2564,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 93580484a3f4..5fb3af5670ec 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,7 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
@@ -1395,8 +1395,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b97ed9b5f685..fec57f1982c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -176,12 +176,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -7042,9 +7042,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -8522,7 +8522,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 712b5595bc39..16c490692f42 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -44,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -55,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -150,7 +148,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
+ strscpy(buf, bp->fw_ver, buf_len);
snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
@@ -789,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 0e319ac7799f..bda3ccc28eca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,7 +1126,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
@@ -1135,7 +1135,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 962253db25b8..51b1690fd045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3385,7 +3385,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 2dac704dc346..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..0657a0f5170f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f46eefb5a029..eed98c10ca9d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -659,7 +659,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -668,6 +667,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -688,8 +689,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -698,9 +700,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
@@ -9366,16 +9366,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 87eb5362ad70..f57e524c7e30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1371,9 +1371,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -3876,7 +3876,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strscpy(str, fw_str, ETH_GSTRING_LEN);
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
if (test_info->offline_mask & (1 << i))
strncat(str, " (offline)",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 7f3c0875b6f5..2132ce63193c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -317,9 +317,9 @@ void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE))) {
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
- PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE);
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
}
@@ -505,9 +505,13 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
if (netif_running(bp->dev)) {
- rc = bnxt_close_nic(bp, false, false);
- if (!rc)
- rc = bnxt_open_nic(bp, false, false);
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
if (!rc && !ptp->tstamp_filters)
rc = -EIO;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index eb4803b11c0e..fcc65890820a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 8309fb993cdb..25c450606985 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1146,7 +1146,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -2707,8 +2707,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index db1e9d810b41..4179a12fc881 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7380,9 +7380,9 @@ static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -12302,9 +12302,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 29dd0f93d6c0..d6d90f9722a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1891,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
+ bnad_napi_poll_rx);
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 8aca768571b2..df10edff5603 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66c7d08d376a..51c9fd6f68a4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/ptp_classify.h>
#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -3977,8 +3978,8 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
spin_lock_init(&queue->tx_ptr_lock);
- netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT);
- netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -4621,6 +4622,25 @@ static int init_reset_optional(struct platform_device *pdev)
"failed to init SGMII PHY\n");
}
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
/* Fully reset controller at hardware level if mapped in device tree */
ret = device_reset_optional(&pdev->dev);
if (ret) {
@@ -4629,6 +4649,8 @@ static int init_reset_optional(struct platform_device *pdev)
}
ret = macb_init(pdev);
+
+err_out_phy_exit:
if (ret)
phy_exit(bp->sgmii_phy);
@@ -5109,6 +5131,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5198,6 +5221,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 1281d1565ef8..f4f87dfa9687 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..882b2be06ea0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -851,7 +851,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index bee35ce60171..d312bd594935 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,11 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 103591dcea1c..edde0b8fa49c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1342,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1396,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5a9fad61e9ea..e5c71f907852 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768ea426d49f..98f3dc460ca7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index f4054d2553ea..d2286adf09fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -1053,7 +1053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 174b1e156669..a52e6b6e2876 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -609,8 +609,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1627,8 +1626,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 77897edd2bc0..8477a93cee6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d0061921529f..9cbce1faab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3903,8 +3903,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ee52e3b1d74f..46809e2d94ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -4467,7 +4467,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index c2822e635f89..54db79f4dcfe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 43b2ceb6aa32..2d0cf76fb3c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2336,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index ddfe9208529a..f90bfba4b303 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1069,8 +1069,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1240,7 +1239,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..1e55b12fee51 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..8627ab19d470 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -689,7 +689,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -812,7 +812,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 60d8c0fbc037..08b7cc0a1809 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,10 +131,10 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 372fb7b3a282..29500d32e362 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2633,16 +2633,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9e6de2f968fa..fdf10318758b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
@@ -2471,7 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
ret = of_get_mac_address(np, mac);
if (!ret) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..b21e56de6167 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..cd3dc4b89518 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..3188ba7b450f 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index b8e46c4849ef..ecfad43df45a 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..ff080ab0f116 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1db19463fd46..37fba39c0056 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1374,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..2c67a857a42f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1235,8 +1235,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 8dd7bf9014ec..43def191f26f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1644,8 +1644,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..08184f20f510 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index b4f5e57d0285..08ec84cd21c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1878,9 +1878,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
@@ -2373,7 +2373,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2442,9 +2442,9 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2473,7 +2473,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bd0df189d871..77edc3d9b505 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 414362febbb9..a92a74761546 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2982,8 +2982,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
index f4e2b1102d8f..3df6bf476ae7 100644
--- a/drivers/net/ethernet/engleder/Kconfig
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -21,6 +21,7 @@ config TSNEP
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
+ select PAGE_POOL
help
Support for the Engleder TSN endpoint Ethernet MAC IP Core.
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index cce2191cb889..b6e3b16623de 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- $(tsnep-y)
+ tsnep_rxnfc.o $(tsnep-y)
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 23bbece6b7de..09a723b827c7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -21,8 +21,6 @@
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
-#define TSNEP_QUEUES 1
-
struct tsnep_gcl {
void __iomem *addr;
@@ -39,6 +37,24 @@ struct tsnep_gcl {
bool change;
};
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
struct tsnep_tx_entry {
struct tsnep_tx_desc *desc;
struct tsnep_tx_desc_wb *desc_wb;
@@ -55,6 +71,7 @@ struct tsnep_tx_entry {
struct tsnep_tx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -79,14 +96,15 @@ struct tsnep_rx_entry {
u32 properties;
- struct sk_buff *skb;
+ struct page *page;
size_t len;
- DEFINE_DMA_UNMAP_ADDR(dma);
+ dma_addr_t dma;
};
struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -95,6 +113,7 @@ struct tsnep_rx {
int read;
u32 owner_counter;
int increment_owner_counter;
+ struct page_pool *page_pool;
u32 packets;
u32 bytes;
@@ -104,12 +123,14 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 9];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
struct napi_struct napi;
+ int irq;
u32 irq_mask;
};
@@ -125,7 +146,6 @@ struct tsnep_adapter {
struct platform_device *pdev;
struct device *dmadev;
void __iomem *addr;
- int irq;
bool gate_control;
/* gate control lock */
@@ -140,6 +160,12 @@ struct tsnep_adapter {
/* ptp clock lock */
spinlock_t ptp_lock;
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -160,6 +186,18 @@ void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index e6760dc68ddd..a713a126b227 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -250,6 +250,44 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int tsnep_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -287,6 +325,8 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_strings = tsnep_ethtool_get_strings,
.get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
.get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 916ceac3ada2..315dada75323 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -34,6 +34,7 @@
#define ECM_INT_LINK 0x00000020
#define ECM_INT_TX_0 0x00000100
#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
#define ECM_INT_ALL 0x7FFFFFFF
#define ECM_INT_DISABLE 0x80000000
@@ -92,8 +93,7 @@
/* tsnep register */
#define TSNEP_INFO 0x0100
-#define TSNEP_INFO_RX_ASSIGN 0x00010000
-#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_INFO_TX_TIME 0x00010000
#define TSNEP_CONTROL 0x0108
#define TSNEP_CONTROL_TX_RESET 0x00000001
#define TSNEP_CONTROL_TX_ENABLE 0x00000002
@@ -122,10 +122,6 @@
#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
-#define TSNEP_RX_ASSIGN 0x01A0
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
#define TSNEP_MAC_ADDRESS_LOW 0x0800
#define TSNEP_MAC_ADDRESS_HIGH 0x0804
#define TSNEP_RX_FILTER 0x0806
@@ -152,6 +148,14 @@
#define TSNEP_GCL_A 0x2000
#define TSNEP_GCL_B 0x2800
#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
/* tsnep gate control list operation */
struct tsnep_gcl_operation {
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index a5f7152a1716..48fb391951dd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -27,10 +27,10 @@
#include <linux/phy.h>
#include <linux/iopoll.h>
-#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
- TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
-#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
-#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
@@ -60,22 +60,29 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
/* handle link interrupt */
- if ((active & ECM_INT_LINK) != 0) {
- if (adapter->netdev->phydev)
- phy_mac_interrupt(adapter->netdev->phydev);
- }
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- if (adapter->netdev) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
- }
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
}
return IRQ_HANDLED;
}
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(&queue->napi);
+
+ return IRQ_HANDLED;
+}
+
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
@@ -124,30 +131,51 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
return 0;
}
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
static void tsnep_phy_link_status_change(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- u32 mode;
- if (phydev->link) {
- switch (phydev->speed) {
- case SPEED_100:
- mode = ECM_LINK_MODE_100;
- break;
- case SPEED_1000:
- mode = ECM_LINK_MODE_1000;
- break;
- default:
- mode = ECM_LINK_MODE_OFF;
- break;
- }
- iowrite32(mode, adapter->addr + ECM_STATUS);
- }
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
phy_print_status(netdev->phydev);
}
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
@@ -241,14 +269,14 @@ alloc_failed:
return retval;
}
-static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
{
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
if (entry->skb) {
- entry->properties =
- skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
@@ -313,6 +341,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
struct tsnep_tx_entry *entry;
unsigned int len;
dma_addr_t dma;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -335,15 +364,18 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
}
- return 0;
+ return map_len;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -360,9 +392,12 @@ static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
+ map_len += entry->len;
entry->len = 0;
}
}
+
+ return map_len;
}
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
@@ -371,6 +406,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
+ int length;
int i;
int retval;
@@ -394,7 +430,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry->skb = skb;
retval = tsnep_tx_map(skb, tx, count);
- if (retval != 0) {
+ if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -407,12 +443,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ length = retval;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < count; i++)
- tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
i == (count - 1));
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
@@ -428,9 +465,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev);
}
- tx->packets++;
- tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
-
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_OK;
@@ -442,6 +476,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
int budget = 128;
struct tsnep_tx_entry *entry;
int count;
+ int length;
spin_lock_irqsave(&tx->lock, flags);
@@ -464,7 +499,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, tx->read, count);
+ length = tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -491,6 +526,9 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
budget--;
} while (likely(budget));
@@ -505,7 +543,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
}
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_tx *tx)
+ int queue_index, struct tsnep_tx *tx)
{
dma_addr_t dma;
int retval;
@@ -513,6 +551,7 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(tx, 0, sizeof(*tx));
tx->adapter = adapter;
tx->addr = addr;
+ tx->queue_index = queue_index;
retval = tsnep_tx_ring_init(tx);
if (retval)
@@ -548,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
- if (dma_unmap_addr(entry, dma))
- dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
- dma_unmap_len(entry, len),
- DMA_FROM_DEVICE);
- if (entry->skb)
- dev_kfree_skb(entry->skb);
+ if (entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ entry->page = NULL;
}
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
@@ -568,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
}
}
-static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
- struct tsnep_rx_entry *entry)
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
{
- struct device *dmadev = rx->adapter->dmadev;
- struct sk_buff *skb;
- dma_addr_t dma;
+ struct page *page;
- skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
- GFP_ATOMIC | GFP_DMA);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- skb_reserve(skb, RX_SKB_RESERVE);
-
- dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dmadev, dma)) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- entry->skb = skb;
- entry->len = RX_SKB_LENGTH;
- dma_unmap_addr_set(entry, dma, dma);
- entry->desc->rx = __cpu_to_le64(dma);
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
return 0;
}
@@ -601,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry;
int i, j;
int retval;
@@ -622,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
}
}
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_SKB_PAD;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (retval)
goto failed;
}
@@ -643,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
- /* RX_SKB_LENGTH is a multiple of 4 */
+ /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) {
@@ -666,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties);
}
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_SKB_PAD);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
int done = 0;
+ enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct page *page;
struct sk_buff *skb;
- size_t len;
- dma_addr_t dma;
int length;
bool enable = false;
int retval;
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+
while (likely(done < budget)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
@@ -691,42 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- skb = entry->skb;
- len = dma_unmap_len(entry, len);
- dma = dma_unmap_addr(entry, dma);
+ prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
+ length, dma_dir);
+ page = entry->page;
/* forward skb only if allocation is successful, otherwise
- * skb is reused and frame dropped
+ * page is reused and frame dropped
*/
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (!retval) {
- dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
-
- length = __le32_to_cpu(entry->desc_wb->properties) &
- TSNEP_DESC_LENGTH_MASK;
- skb_put(skb, length - ETH_FCS_LEN);
- if (rx->adapter->hwtstamp_config.rx_filter ==
- HWTSTAMP_FILTER_ALL) {
- struct skb_shared_hwtstamps *hwtstamps =
- skb_hwtstamps(skb);
- struct tsnep_rx_inline *rx_inline =
- (struct tsnep_rx_inline *)skb->data;
-
- skb_shinfo(skb)->tx_flags |=
- SKBTX_HW_TSTAMP_NETDEV;
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->netdev_data = rx_inline;
- }
- skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, page);
- rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
- if (skb->pkt_type == PACKET_MULTICAST)
- rx->multicast++;
+ rx->packets++;
+ rx->bytes += length -
+ TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
- napi_gro_receive(napi, skb);
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
+ }
done++;
} else {
rx->dropped++;
@@ -752,7 +822,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
}
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_rx *rx)
+ int queue_index, struct tsnep_rx *rx)
{
dma_addr_t dma;
int i;
@@ -761,6 +831,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(rx, 0, sizeof(*rx));
rx->adapter = adapter;
rx->addr = addr;
+ rx->queue_index = queue_index;
retval = tsnep_rx_ring_init(rx);
if (retval)
@@ -821,6 +892,56 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
return min(done, budget - 1);
}
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ sprintf(queue->name, "%s-txrx-%d", name,
+ queue->rx->queue_index);
+ else if (queue->tx)
+ sprintf(queue->name, "%s-tx-%d", name,
+ queue->tx->queue_index);
+ else
+ sprintf(queue->name, "%s-rx-%d", name,
+ queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -830,15 +951,11 @@ static int tsnep_netdev_open(struct net_device *netdev)
int rx_queue_index = 0;
int retval;
- retval = tsnep_phy_open(adapter);
- if (retval)
- return retval;
-
for (i = 0; i < adapter->num_queues; i++) {
adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
- retval = tsnep_tx_open(adapter, addr,
+ retval = tsnep_tx_open(adapter, addr, tx_queue_index,
adapter->queue[i].tx);
if (retval)
goto failed;
@@ -847,11 +964,20 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
retval = tsnep_rx_open(adapter, addr,
+ rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
+
+ retval = tsnep_request_irq(&adapter->queue[i], i == 0);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n",
+ adapter->queue[i].irq);
+ goto failed;
+ }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -863,9 +989,14 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (retval)
goto failed;
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
for (i = 0; i < adapter->num_queues; i++) {
netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll, 64);
+ tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -873,14 +1004,18 @@ static int tsnep_netdev_open(struct net_device *netdev)
return 0;
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
return retval;
}
@@ -889,20 +1024,23 @@ static int tsnep_netdev_close(struct net_device *netdev)
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
for (i = 0; i < adapter->num_queues; i++) {
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
netif_napi_del(&adapter->queue[i].napi);
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
-
return 0;
}
@@ -1017,6 +1155,22 @@ static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles)
@@ -1038,9 +1192,9 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_start_xmit = tsnep_netdev_xmit_frame,
.ndo_eth_ioctl = tsnep_netdev_ioctl,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
-
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
};
@@ -1141,6 +1295,52 @@ static int tsnep_phy_init(struct tsnep_adapter *adapter)
return 0;
}
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = irq_mask;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ }
+
+ return 0;
+}
+
static int tsnep_probe(struct platform_device *pdev)
{
struct tsnep_adapter *adapter;
@@ -1149,6 +1349,7 @@ static int tsnep_probe(struct platform_device *pdev)
u32 type;
int revision;
int version;
+ int queue_count;
int retval;
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -1170,41 +1371,39 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adapter->addr = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
- adapter->irq = platform_get_irq(pdev, 0);
netdev->mem_start = io->start;
netdev->mem_end = io->end;
- netdev->irq = adapter->irq;
type = ioread32(adapter->addr + ECM_TYPE);
revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
adapter->gate_control = type & ECM_GATE_CONTROL;
-
- adapter->num_tx_queues = TSNEP_QUEUES;
- adapter->num_rx_queues = TSNEP_QUEUES;
- adapter->num_queues = TSNEP_QUEUES;
- adapter->queue[0].tx = &adapter->tx[0];
- adapter->queue[0].rx = &adapter->rx[0];
- adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
tsnep_disable_irq(adapter, ECM_INT_ALL);
- retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
- 0, TSNEP, adapter);
- if (retval != 0) {
- dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
- adapter->irq);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
return retval;
}
- tsnep_enable_irq(adapter, ECM_INT_LINK);
retval = tsnep_mac_init(adapter);
if (retval)
- goto mac_init_failed;
+ return retval;
retval = tsnep_mdio_init(adapter);
if (retval)
@@ -1222,10 +1421,14 @@ static int tsnep_probe(struct platform_device *pdev)
if (retval)
goto tc_init_failed;
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
netdev->netdev_ops = &tsnep_netdev_ops;
netdev->ethtool_ops = &tsnep_ethtool_ops;
netdev->features = NETIF_F_SG;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -1242,6 +1445,8 @@ static int tsnep_probe(struct platform_device *pdev)
return 0;
register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
tsnep_tc_cleanup(adapter);
tc_init_failed:
tsnep_ptp_cleanup(adapter);
@@ -1250,8 +1455,6 @@ phy_init_failed:
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
mdio_init_failed:
-mac_init_failed:
- tsnep_disable_irq(adapter, ECM_INT_ALL);
return retval;
}
@@ -1261,6 +1464,8 @@ static int tsnep_remove(struct platform_device *pdev)
unregister_netdev(adapter->netdev);
+ tsnep_rxnfc_cleanup(adapter);
+
tsnep_tc_cleanup(adapter);
tsnep_ptp_cleanup(adapter);
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 437c5acfe222..95cbad198b4b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1224,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c03663785a8d..a03879a27b04 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1063,8 +1063,8 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static void
@@ -1506,7 +1506,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1701,10 +1701,14 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
if (!netdev->phydev)
return;
phy_disconnect(netdev->phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1867,6 +1871,26 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+ } else if (np && of_phy_is_fixed_link(np)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(np);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ goto err_phy_connect;
+ }
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+ of_phy_deregister_fixed_link(np);
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+
+ /* Display what we found */
+ phy_attached_info(phy);
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..d95d78230828 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -807,8 +807,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1075,6 +1075,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1091,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,6 +1142,7 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..ed18450fd2cc 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..ce866ae3df03 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,16 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select PAGE_POOL
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index a770bab4d1ed..31cfa121333d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -197,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -216,10 +219,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)mac_dev->vaddr;
+ net_dev->mem_end = (unsigned long)mac_dev->vaddr_end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
@@ -246,7 +249,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -261,6 +264,9 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ mac_dev->net_dev = net_dev;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
@@ -288,10 +294,9 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -826,10 +831,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * callback.
*/
if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
cs_th = DPAA_CS_THRESHOLD_10G;
@@ -858,6 +863,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = mac_dev->net_dev;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -2946,11 +2976,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phy_start(priv->mac_dev->phy_dev);
netif_tx_start_all_queues(net_dev);
@@ -3152,8 +3183,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..258eb6c8f4c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev,
if (mac_dev)
return sprintf(buf, "%llx",
- (unsigned long long)mac_dev->res->start);
+ (unsigned long long)mac_dev->vaddr);
else
return sprintf(buf, "none");
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 73f07881ce2d..769e936a263c 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -80,9 +80,9 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 75d51572693d..8d029addddad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4565,8 +4565,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index c9bee9a0c9b2..49ff85633783 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -549,7 +549,7 @@ void dpaa2_mac_get_strings(u8 *data)
int i;
for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index e507e9065214..2b5909fa93cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -3373,9 +3373,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- &ethsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..e0e8dfd13793 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -9,7 +9,6 @@ fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 4470a4a3e4c3..54bc92fc6bf0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2116,13 +2116,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@@ -2155,13 +2156,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1);
}
@@ -2169,13 +2171,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -2263,13 +2265,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -2432,10 +2435,11 @@ int enetc_close(struct net_device *ndev)
return 0;
}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
u8 num_tc;
@@ -2452,7 +2456,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ enetc_set_bdr_prio(hw, tx_ring->index, 0);
}
return 0;
@@ -2471,7 +2475,7 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ enetc_set_bdr_prio(hw, tx_ring->index, i);
}
/* Reset the number of netdev queues based on the TC count */
@@ -2486,25 +2490,6 @@ static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
return 0;
}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -2600,52 +2585,29 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2657,11 +2619,6 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
@@ -2808,8 +2765,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v->rx_dim_en = true;
}
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 29922c20531f..161930a65f61 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -393,11 +393,9 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
@@ -455,7 +453,11 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
data, *dma);
}
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -465,22 +467,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -521,6 +525,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
@@ -540,4 +545,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index ff872e40ce85..c8369e3752b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -125,68 +125,68 @@ static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -236,7 +236,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -258,7 +258,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
+ strscpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -301,6 +301,113 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+
+ *ranges = enetc_rmon_ranges;
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_mac_stats(hw, 0, mac_stats);
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+}
+
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -766,6 +873,10 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 647c87f73bf7..18ca1f42b1f7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -276,58 +276,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -943,13 +945,13 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
}
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index c4a0e836d4f0..bdf94335ee99 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -516,15 +516,34 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_reset_ptcmsdur(hw);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -709,6 +728,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -722,7 +748,30 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -739,7 +788,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 582a663ed0ba..e6416332ec79 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -7,18 +7,19 @@
#include <linux/math64.h>
#include <linux/refcount.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gate.h>
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
@@ -61,15 +61,14 @@ static int enetc_setup_taprio(struct net_device *ndev,
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_PTGCR);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
priv->active_offloads &= ~ENETC_F_QBV;
@@ -117,27 +116,28 @@ static int enetc_setup_taprio(struct net_device *ndev,
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- if (!err)
- priv->active_offloads |= ENETC_F_QBV;
+ if (err)
+ return err;
- return err;
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int err;
int i;
@@ -147,16 +147,14 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
return err;
@@ -178,7 +176,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -199,15 +197,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -224,13 +222,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -239,7 +237,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -259,8 +257,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -280,10 +278,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -293,6 +291,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -304,12 +303,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return -EINVAL;
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -1517,6 +1515,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
@@ -1578,3 +1599,23 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0cebe4b63adb..33f84a30e167 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,8 +16,12 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -343,8 +347,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
+
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -498,6 +505,9 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -511,6 +521,12 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -526,7 +542,14 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -579,6 +602,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -608,6 +632,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
@@ -634,12 +659,7 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
- struct {
- struct timespec64 ts_phc;
- u64 ns_sys;
- u32 at_corr;
- u8 at_inc_corr;
- } ptp_saved_state;
+ struct imx_sc_ipc *ipc_handle;
u64 ethtool_stats[];
};
@@ -651,8 +671,5 @@ void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
-void fec_ptp_save_state(struct fec_enet_private *fep);
-int fec_ptp_restore_state(struct fec_enet_private *fep);
-
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b0d60f898249..98d5cd313fdd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -66,6 +66,8 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
@@ -111,7 +113,8 @@ static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -155,6 +158,13 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_DELAYED_CLKS_SUPPORT,
};
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -188,6 +198,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx8qm-fec",
.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
}, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
+ }, {
/* sentinel */
}
};
@@ -203,6 +216,7 @@ enum imx_fec_type {
IMX6UL_FEC,
IMX8MQ_FEC,
IMX8QM_FEC,
+ S32V234_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -215,6 +229,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -285,11 +300,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_RESET BIT(0)
-#define FEC_ECR_ETHEREN BIT(1)
-#define FEC_ECR_MAGICEN BIT(2)
-#define FEC_ECR_SLEEP BIT(3)
-#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_MAGICEN (1 << 2)
+#define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -412,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -985,9 +1039,6 @@ fec_restart(struct net_device *ndev)
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
- struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
-
- fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1141,7 +1192,7 @@ fec_restart(struct net_device *ndev)
}
if (fep->bufdesc_ex)
- ecntl |= FEC_ECR_EN1588;
+ ecntl |= (1 << 4);
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1162,14 +1213,6 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
- /* Restart PPS if needed */
- if (fep->pps_enable) {
- /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
- fep->pps_enable = 0;
- fec_ptp_restore_state(fep);
- fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
- }
-
/* Enable interrupts we wish to service */
if (fep->link)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1181,6 +1224,34 @@ fec_restart(struct net_device *ndev)
}
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
+}
+
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
{
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -1196,6 +1267,8 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
@@ -1220,8 +1293,6 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
- struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS };
- u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -1231,8 +1302,6 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- fec_ptp_save_state(fep);
-
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself.
@@ -1252,28 +1321,12 @@ fec_stop(struct net_device *ndev)
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
- if (fep->bufdesc_ex)
- ecntl |= FEC_ECR_EN1588;
-
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- ecntl |= FEC_ECR_ETHEREN;
+ writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
-
- writel(ecntl, fep->hwp + FEC_ECNTRL);
-
- if (fep->bufdesc_ex)
- fec_ptp_start_cyclecounter(ndev);
-
- /* Restart PPS if needed */
- if (fep->pps_enable) {
- /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */
- fep->pps_enable = 0;
- fec_ptp_restore_state(fep);
- fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1);
- }
}
@@ -1441,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int
+static int __maybe_unused
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1461,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
return 0;
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static bool __maybe_unused
+fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1487,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true;
}
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
+{
+ struct page *new_page;
+ dma_addr_t phys_addr;
+
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
+
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -1499,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
@@ -1508,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct page *page;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1561,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- }
-
- prefetch(skb->data - NET_IP_ALIGN);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
data = skb->data;
- if (!is_copybreak && need_swap)
+ if (need_swap)
swap_buffer(data, pkt_len);
#if !defined(CONFIG_M5272)
@@ -1640,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -2138,13 +2190,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2328,9 +2380,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2993,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3102,24 +3147,31 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3244,6 +3296,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3285,6 +3340,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
@@ -3593,7 +3651,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
@@ -3851,6 +3909,10 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@ -4048,6 +4110,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4092,6 +4155,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4116,6 +4180,15 @@ static int __maybe_unused fec_suspend(struct device *dev)
}
/* It's safe to disable clocks since interrupts are masked */
fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4146,6 +4219,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c74d04f4b2fd..cffd9ad499dd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -578,7 +578,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
@@ -633,36 +633,7 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- if (fep->pps_enable)
- fec_ptp_enable_pps(fep, 0);
-
cancel_delayed_work_sync(&fep->time_keep);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
-
-void fec_ptp_save_state(struct fec_enet_private *fep)
-{
- u32 atime_inc_corr;
-
- fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
- fep->ptp_saved_state.ns_sys = ktime_get_ns();
-
- fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR);
- atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK;
- fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET);
-}
-
-int fec_ptp_restore_state(struct fec_enet_private *fep)
-{
- u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
- u64 ns_sys;
-
- writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR);
- atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET;
- writel(atime_inc, fep->hwp + FEC_ATIME_INC);
-
- ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys;
- timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys);
- return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc);
-}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 8f0db61cb1f6..9d85fb136e34 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..2ea575a46675 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..6617932fd3fd 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -327,7 +301,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -840,73 +814,45 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->maximum_frame = new_val;
-
- return 0;
-}
-
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
-
- return 0;
-}
-
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) &
- ~TCTRL_GTS, &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) &
- ~RCTRL_GRS, &regs->rctrl);
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, &regs->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -916,58 +862,42 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
/* Enable */
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
iowrite32be(tmp, &regs->maccfg1);
/* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
/* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
iowrite32be(tmp, &regs->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
@@ -989,26 +919,20 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg1);
if (en)
@@ -1017,25 +941,18 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
-
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +960,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1114,7 +1032,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1133,7 +1051,7 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
@@ -1158,7 +1076,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1229,7 +1148,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -1258,21 +1177,15 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(&regs->maccfg2);
@@ -1293,12 +1206,12 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
tmp &= ~DTSEC_ECNTRL_R100M;
iowrite32be(tmp, &regs->ecntrl);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
+static int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
@@ -1316,20 +1229,31 @@ int dtsec_restart_autoneg(struct fman_mac *dtsec)
return 0;
}
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+static void adjust_link_dtsec(struct mac_device *mac_dev)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
- *mac_version = ioread32be(&regs->tsec_id);
+ return;
+ }
- return 0;
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
@@ -1382,7 +1306,7 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
@@ -1476,7 +1400,7 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
@@ -1487,13 +1411,11 @@ int dtsec_free(struct fman_mac *dtsec)
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1432,10 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1452,87 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- if (!params->internal_phy_node) {
+ /* Save FMan revision */
+ fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+
+ return dtsec;
+
+err_dtsec:
+ kfree(dtsec);
+ return NULL;
+}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_dtsec;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node) {
pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
+ dtsec->tbiphy = of_phy_find_device(phy_node);
if (!dtsec->tbiphy) {
pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
-
put_device(&dtsec->tbiphy->mdio.dev);
- /* Save FMan revision */
- fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
- return dtsec;
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
-err_dtsec:
- kfree(dtsec);
- return NULL;
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..65887a3160d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include <linux/if_ether.h>
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,30 +159,23 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
/* Note that the speed should indicate the maximum rate that
* this MAC should support rather than the actual speed;
*/
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
/* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
@@ -200,8 +184,6 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..32d26cf17843 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/io.h>
@@ -337,7 +311,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -712,7 +686,7 @@ static bool is_init_done(struct memac_cfg *memac_drv_params)
return false;
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_enable(struct fman_mac *memac)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -721,36 +695,26 @@ int memac_enable(struct fman_mac *memac, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
-
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static void memac_disable(struct fman_mac *memac)
+
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -769,7 +733,7 @@ int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static int memac_adjust_link(struct fman_mac *memac, u16 speed)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -809,39 +773,26 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->max_frame_length = new_val;
-
- return 0;
-}
-
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
-}
-
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
+static void adjust_link_memac(struct mac_device *mac_dev)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- memac->memac_drv_param->fixed_link = fixed_link;
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
- return 0;
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -878,7 +829,7 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
return 0;
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -897,7 +848,8 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
@@ -907,7 +859,8 @@ int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_add
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
@@ -940,7 +893,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
@@ -963,12 +916,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -1001,8 +955,8 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
@@ -1024,13 +978,13 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
+ struct fixed_phy_status *fixed_link = NULL;
int err;
u32 reg32 = 0;
@@ -1141,7 +1095,7 @@ int memac_init(struct fman_mac *memac)
return 0;
}
-int memac_free(struct fman_mac *memac)
+static int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
@@ -1154,13 +1108,12 @@ int memac_free(struct fman_mac *memac)
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1131,121 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
+ memac->regs = mac_dev->vaddr;
memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->phy_if = mac_dev->phy_if;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ return memac;
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *phy_node;
+ struct fixed_phy_status *fixed_link;
+ struct fman_mac *memac;
+
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_memac;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ if (params->max_speed == SPEED_10000)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
+ phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
+ if (!phy_node) {
pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ memac->pcsphy = of_phy_find_device(phy_node);
if (!memac->pcsphy) {
pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
}
- return memac;
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_fm_mac_free;
+
+ fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
+ if (!fixed_link) {
+ err = -ENOMEM;
+ goto _return_fm_mac_free;
+ }
+
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
+ if (!phy) {
+ err = -EINVAL;
+ of_node_put(mac_dev->phy_node);
+ goto _return_fixed_link_free;
+ }
+
+ fixed_link->link = phy->link;
+ fixed_link->speed = phy->speed;
+ fixed_link->duplex = phy->duplex;
+ fixed_link->pause = phy->pause;
+ fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
+ memac->memac_drv_param->fixed_link = fixed_link;
+ }
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fixed_link_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fixed_link_free:
+ kfree(fixed_link);
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..f557d68e5b76 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 4c9d05c45c03..ab90fe2bee5e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..5a4be54ad459 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -206,7 +180,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -419,7 +393,7 @@ static bool is_init_done(struct tgec_cfg *cfg)
return false;
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_enable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -428,34 +402,25 @@ int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static void tgec_disable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(tgec->cfg));
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -473,18 +438,9 @@ int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
-{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
-}
-
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
@@ -496,7 +452,7 @@ int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
return 0;
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -514,7 +470,8 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
@@ -525,7 +482,8 @@ int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_add
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
@@ -562,7 +520,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
@@ -585,7 +543,7 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -605,7 +563,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -642,20 +601,15 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+static void tgec_adjust_link(struct mac_device *mac_dev)
{
- struct tgec_regs __iomem *regs = tgec->regs;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
- *mac_version = ioread32be(&regs->tgec_id);
-
- return 0;
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
}
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
@@ -681,7 +635,7 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
@@ -764,7 +718,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +728,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,8 +751,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
@@ -819,7 +772,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +780,52 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = tgec_adjust_link;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 39ae965cd4f6..7b7526fd7da3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -54,20 +28,12 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,222 +41,21 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static int set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!params->base_addr)
- return -ENOMEM;
-
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-
- return 0;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = tgec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = dtsec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, &params);
- if (err)
- goto _return;
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
{
struct mac_priv_s *priv;
struct mac_address *old_addr, *tmp;
@@ -424,109 +189,6 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
}
EXPORT_SYMBOL(fman_get_pause_cfg);
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
#define DTSEC_SUPPORTED \
(SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
@@ -577,7 +239,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -601,9 +263,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -611,50 +273,33 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
+ struct resource *res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
+ mac_dev->dev = dev;
INIT_LIST_HEAD(&priv->mc_addr_list);
@@ -663,8 +308,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -694,42 +338,33 @@ static int mac_probe(struct platform_device *_of_dev)
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ res = platform_get_mem_or_io(_of_dev, 0);
+ if (!res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
- goto _return_of_get_parent;
+ return -EIO;
}
+ mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res);
- if (!of_device_is_available(mac_node)) {
- err = -ENODEV;
- goto _return_of_get_parent;
- }
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
priv->cell_index = (u8)val;
@@ -743,15 +378,13 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = nph;
- goto _return_of_get_parent;
+ return nph;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -760,8 +393,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_node_put;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -793,7 +425,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->phy_if = phy_if;
priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
+ params.max_speed = priv->speed;
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
@@ -801,7 +433,7 @@ static int mac_probe(struct platform_device *_of_dev)
SUPPORTED_100baseT_Half);
/* Gigabit support (no half-duplex) */
- if (priv->max_speed == 1000)
+ if (params.max_speed == 1000)
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
@@ -810,42 +442,18 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the rest of the PHY information */
mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_of_get_parent;
-
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
- goto _return_of_get_parent;
- }
-
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
- priv->fixed_link->link = phy->link;
- priv->fixed_link->speed = phy->speed;
- priv->fixed_link->duplex = phy->duplex;
- priv->fixed_link->pause = phy->pause;
- priv->fixed_link->asym_pause = phy->asym_pause;
+ params.basex_if = false;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
- put_device(&phy->mdio.dev);
- }
-
- err = mac_dev->init(mac_dev);
+ err = init(mac_dev, mac_node, &params);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
+ return err;
}
/* pause frame autonegotiation enabled */
@@ -872,13 +480,10 @@ static int mac_probe(struct platform_device *_of_dev)
priv->eth_dev = NULL;
}
- goto _return;
+ return err;
_return_of_node_put:
of_node_put(dev_node);
-_return_of_get_parent:
- kfree(priv->fixed_link);
-_return:
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index daa285a9b8b2..b95d384271bd 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MAC_H
@@ -45,13 +19,16 @@ struct fman_mac;
struct mac_priv_s;
struct mac_device {
- struct resource *res;
+ void __iomem *vaddr;
+ void __iomem *vaddr_end;
+ struct device *dev;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
u32 if_support;
struct phy_device *phy_dev;
phy_interface_t phy_if;
struct device_node *phy_node;
+ struct net_device *net_dev;
bool autoneg_pause;
bool rx_pause_req;
@@ -61,9 +38,8 @@ struct mac_device {
bool promisc;
bool allmulti;
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
@@ -81,6 +57,8 @@ struct mac_device {
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
};
@@ -97,5 +75,6 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index b3dae17e067e..8844a9a04fcf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -791,7 +791,7 @@ static int fs_enet_close(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int fs_get_regs_len(struct net_device *dev)
@@ -883,9 +883,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-extern int fs_mii_connect(struct net_device *dev);
-extern void fs_mii_disconnect(struct net_device *dev);
-
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bf1524b68e..b2def295523a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3233,7 +3233,7 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, NAPI_POLL_WEIGHT);
+ gfar_poll_rx_sq);
netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 81fb68730138..b2b0d3c26fcc 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -163,7 +163,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
}
/* Return the length of the register structure */
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 823221c912ab..7a4cb4f07c32 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3712,7 +3712,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
dev->mtu = 1500;
dev->max_mtu = 1518;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 69b2b98b1525..601beb93d3b3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -337,8 +337,8 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index ec90da1de030..d7d39a58cd80 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -355,7 +355,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
if (ret)
return ret;
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
else if (is_acpi_node(fwnode))
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0d733e9a7c6..4859493471db 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1046,8 +1046,8 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index f247b7ad3a88..095f51c4d9d9 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -339,8 +339,7 @@ static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
return PTR_ERR(irq);
fp->num_rx_irqs++;
- netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
}
netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
@@ -1802,16 +1801,14 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
if (rc)
goto unreg_devlink;
- if (fp->dl_port.devlink)
- devlink_port_type_eth_set(&fp->dl_port, netdev);
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
return 0;
unreg_devlink:
ed->netdevs[portid] = NULL;
fun_ktls_cleanup(fp);
- if (fp->dl_port.devlink)
- devlink_port_unregister(&fp->dl_port);
+ devlink_port_unregister(&fp->dl_port);
free_stats:
fun_free_stats_area(fp);
free_rss:
@@ -1830,11 +1827,9 @@ static void fun_destroy_netdev(struct net_device *netdev)
struct funeth_priv *fp;
fp = netdev_priv(netdev);
- if (fp->dl_port.devlink) {
- devlink_port_type_clear(&fp->dl_port);
- devlink_port_unregister(&fp->dl_port);
- }
+ devlink_port_type_clear(&fp->dl_port);
unregister_netdev(netdev);
+ devlink_port_unregister(&fp->dl_port);
fun_ktls_cleanup(fp);
fun_free_stats_area(fp);
fun_free_rss(fp);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 53b7e95213a8..671f51135c26 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -206,9 +206,9 @@ struct funeth_rxq {
#define FUN_QSTAT_READ(q, seq, stats_copy) \
do { \
- seq = u64_stats_fetch_begin(&(q)->syncp); \
+ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
stats_copy = (q)->stats; \
- } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 50b384910c83..7b9a2d9d9624 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6cafee55efc3..d3e3ac242bfc 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -526,8 +526,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
}
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
@@ -1274,9 +1273,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8c939628e2d8..2e6461b0ea8b 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84ef494bd60..50c3f5d6611f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -830,8 +830,8 @@ static int hip04_set_coalesce(struct net_device *netdev,
static void hip04_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static const struct ethtool_ops hip04_ethtool_ops = {
@@ -990,7 +990,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
- netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index d7e62eca050f..ffcf797dfa90 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1243,7 +1243,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto out_phy_node;
- netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
if (HAS_CAP_TSO(priv->hw_cap)) {
ret = hix5hd2_init_sg_desc_queue(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d94cc8c6681f..7cf10d1e2b31 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2109,8 +2109,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2122,8 +2121,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 7d4ae467f3ad..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -233,6 +233,17 @@ struct hclgevf_mbx_arq_ring {
__le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 94f80e1c4020..0179fc288f5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -97,13 +97,15 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -159,6 +161,12 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -187,6 +195,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -223,6 +232,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -270,6 +281,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -308,6 +320,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -560,14 +577,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -737,6 +757,8 @@ struct hnae3_ae_ops {
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
};
struct hnae3_dcb_ops {
@@ -745,6 +767,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -774,6 +798,8 @@ struct hnae3_tc_info {
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -784,6 +810,9 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -815,6 +844,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c8b151d29f53..f671a63cecde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -52,9 +52,9 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
{
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
}
@@ -91,6 +91,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
@@ -150,6 +151,10 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -162,6 +167,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
@@ -220,8 +226,10 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= ae_dev->pdev->revision;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
hclge_comm_parse_capability(ae_dev, is_pf, resp);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 7a7d4cf9bf35..b1f9383b418f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -20,6 +20,7 @@
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
@@ -102,6 +103,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
@@ -339,6 +341,10 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 93aeb615191d..66feb23f7b7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -106,6 +106,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.init = hns3_dbg_common_file_init,
},
{
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
@@ -395,6 +402,12 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
}
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 35d70041b9e8..4cb2421e71a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2963,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2988,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3271,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4650,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -5782,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 4a3253692dcc..133a054af6b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -744,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 4c7988e308a2..cdf76fb58d45 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -69,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -95,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -304,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -322,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -371,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -401,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -410,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -418,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -712,7 +739,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -794,6 +822,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -806,9 +835,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -845,10 +874,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -886,7 +919,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1612,6 +1645,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1621,12 +1667,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1637,12 +1683,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1668,6 +1715,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -2051,6 +2100,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -2081,6 +2131,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f9d89511eb32..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -321,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -347,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -359,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 69b8673436ca..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9b870e79c290..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,6 +14,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -1115,10 +1117,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1152,6 +1155,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1517,7 +1572,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1585,6 +1640,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -2374,6 +2432,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
{
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fae79764dc44..6962a9d69cf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -71,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -148,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -679,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -715,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -770,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -1003,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1101,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1574,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1617,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -2589,7 +2625,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2613,6 +2649,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2624,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2730,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2744,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2796,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2988,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3037,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3119,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3302,13 +3499,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -5334,7 +5531,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6339,7 +6536,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6395,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6431,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6458,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6473,7 +6670,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6497,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6715,7 +6912,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6778,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6878,7 +7075,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7167,6 +7364,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7220,6 +7423,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7282,6 +7488,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7705,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7732,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -10793,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10804,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11443,6 +11656,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11510,6 +11727,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -12763,6 +12981,21 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
}
}
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = h->kinfo.tc_map_mode;
+ if (priority)
+ *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ h->kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12786,6 +13019,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12865,6 +13099,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
.clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12872,7 +13107,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -12887,7 +13122,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 18caddd541f8..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -216,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -258,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -488,6 +490,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -826,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -1070,7 +1093,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e1012f7f9b73..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -779,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
@@ -814,152 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_GET_RING_VECTOR_MAP:
- ret = hclge_get_vf_ring_vector_map(vport, req,
- &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get VF ring vector map\n",
- ret);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- if (time_is_before_jiffies(hdev->last_mbx_scheduled +
- HCLGE_MBX_SCHED_TIMEOUT))
- dev_warn(&hdev->pdev->dev,
- "resp vport%u mbx(%u,%u) late\n",
- req->mbx_src_vfid,
- req->msg.code,
- req->msg.subcode);
-
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03d63b6a9b2b..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 2f33b036a47a..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -1275,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1646,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index d943943912f7..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -30,6 +30,9 @@ enum hclge_opcode_type;
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -262,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 26f87330173e..db6f7cdba958 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2125,7 +2125,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
@@ -3177,7 +3177,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3429,7 +3429,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3444,7 +3444,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
index e9e00cfa1329..e10f739d8339 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -12,7 +12,6 @@
#define TBL_ID_FUNC_CFG_SM_INST 1
#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-#define HINIC_FUNCTION_CONFIGURE_TABLE 1
struct hinic_cmd_lt_rd {
u8 status;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 93192f58ac88..f4b680286911 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -55,7 +55,6 @@
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
-#define OBJ_STR_MAX_LEN 32
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index a627237f694b..78190e88cd75 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -82,11 +82,6 @@
struct hinic_func_to_io, \
cmdqs)
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
enum completion_format {
COMPLETE_DIRECT = 0,
COMPLETE_SGE = 1,
@@ -509,8 +504,8 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
*
* Return 0 - Success, negative - Failure
**/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
{
struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
struct hinic_hwif *hwif = cmdqs->hwif;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 9c413e963a04..ff09cf0ed52b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -177,9 +177,6 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
enum hinic_mod_type mod, u8 cmd,
struct hinic_cmdq_buf *buf_in, u64 *out_param);
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index 7e84e4e33fff..d56e7413ace0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -22,7 +22,6 @@
(HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
(HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 2127a48749a8..94f470556295 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -29,7 +29,6 @@
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
-#define IO_STATUS_TIMEOUT 100
#define OUTBOUND_STATE_TIMEOUT 100
#define DB_STATE_TIMEOUT 100
@@ -42,11 +41,6 @@ enum intr_type {
INTR_MSIX_TYPE,
};
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
/**
* parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -837,8 +831,8 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
return 0;
}
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
{
u16 out_size = sizeof(*interrupt_info);
struct hinic_pfhwdev *pfhwdev;
@@ -1041,13 +1035,6 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->max_qps;
-}
-
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 416492e48274..d2d89b0a5ef0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -566,8 +566,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devli
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
-
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
@@ -587,9 +585,6 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
enum hinic_msix_state flag);
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
struct hinic_msix_config *interrupt_info);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 0428faa68e80..88567305d06e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -58,39 +58,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
}
/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
* hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
* @hwif: the HW interface of a pci function device
* @msix_index: msix_index
@@ -115,8 +82,6 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
* hinic_set_pf_action - set action on pf channel
* @hwif: the HW interface of a pci function device
* @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c06f2253151e..3d588896a367 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -131,10 +131,6 @@
(((u32)(val) & HINIC_MSIX_##member##_MASK) << \
HINIC_MSIX_##member##_SHIFT)
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
@@ -269,11 +265,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
u8 lli_timer_cfg, u8 lli_credit_limit,
u8 resend_timer);
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
enum hinic_msix_state flag);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 5078c0c73863..3f9c31d29215 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -117,7 +117,6 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
-#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -130,11 +129,8 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
-#define DST_AEQ_IDX_DEFAULT_VAL 0
-#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
-#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -146,7 +142,6 @@ enum hinic_mbox_tx_status {
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
-#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
@@ -621,7 +616,7 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
u64 mbox_header = *((u64 *)header);
@@ -649,7 +644,7 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
index 46953190d29e..33ac7814d3b3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -150,10 +150,6 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod);
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
-
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
-
int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 336248aa2e48..537a8098bc4e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -472,8 +472,7 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
return atomic_read(&wq->delta) - 1;
}
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
{
u32 ctrl_size, task_size, bufdesc_size;
@@ -588,18 +587,16 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
- * @prod_idx: pi value
* @sq_wqe: wqe to prepare
* @sges: sges for use by the wqe for send for buf addresses
* @nr_sges: number of sges
**/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
{
int i;
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 0dfa51ad5855..178dcc874370 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -175,9 +175,8 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
unsigned int cos);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 4daf6bf291ec..e1a1735c00c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -175,8 +175,6 @@ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index f4b6d2c1061f..c6bdeed5606e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -261,23 +261,6 @@
#define HINIC_RSS_TYPE_GET(val, member) \
(((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
enum hinic_l3_offload_type {
L3TYPE_UNKNOWN = 0,
IPV6_PKT = 1,
@@ -305,18 +288,10 @@ enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
};
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
enum hinic_l2type {
HINIC_L2TYPE_ETH = 0,
};
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
struct hinic_cmdq_header {
u32 header_info;
u32 saved_data;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index c23ee2ddbce3..e1f54a2f28b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -960,8 +960,6 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @in_size: input size
* @buf_out: output buffer
* @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
**/
static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
@@ -1382,8 +1380,6 @@ err_pci_regions:
return err;
}
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
-
static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
{
struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a866bea65110..d649c6e323c8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -50,7 +50,7 @@
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
**/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 507dcbae9085..8f7bd6a049bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,8 +41,6 @@ struct hinic_rxq {
struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
-
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index df555847afb5..a5f08b969e3f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -24,6 +24,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -774,7 +775,7 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u16 vlanprio, cur_vlanprio;
sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
@@ -820,7 +821,7 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
cur_trust = nic_io->vf_infos[vf].trust;
/* same request, so just return success */
- if ((setting && cur_trust) || (!setting && !cur_trust))
+ if (setting == cur_trust)
return 0;
err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
@@ -940,7 +941,7 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
/* same request, so just return success */
- if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ if (setting == cur_spoofchk)
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
@@ -1131,8 +1132,8 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
}
-static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
{
struct hinic_dev *nic_dev;
u16 func_idx, idx;
@@ -1145,8 +1146,6 @@ static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
HINIC_HW_WQ_PAGE_SIZE);
hinic_clear_vf_infos(nic_dev, idx);
}
-
- return 0;
}
int hinic_vf_func_init(struct hinic_hwdev *hwdev)
@@ -1293,7 +1292,7 @@ int hinic_pci_sriov_disable(struct pci_dev *pdev)
return 0;
}
-int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
int err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index ba627a362f9a..d4d4e63d31ea 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -98,8 +98,6 @@ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
int hinic_pci_sriov_disable(struct pci_dev *dev);
-int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
-
int hinic_vf_func_init(struct hinic_hwdev *hwdev);
void hinic_vf_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 5051cdff2384..e91476c8ff8b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -74,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
@@ -530,7 +530,7 @@ netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
@@ -614,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index b3c8657774a7..91dc778362f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -40,8 +40,6 @@ struct hinic_txq {
struct napi_struct napi;
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
-
void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6cb86032ce46..1db5b6790a41 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -159,8 +159,8 @@ static int ehea_nway_reset(struct net_device *dev)
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 5dc302880f5f..294bdbbeacc3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1546,7 +1546,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
kfree(init_attr);
- netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
ret = 0;
goto out;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index fbea9f7efe8c..9b08e41ccc29 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2284,8 +2284,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
dev->cell_index, dev->ofdev->dev.of_node);
}
@@ -2979,11 +2979,9 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->ofdev->dev, err,
+ "Can't get valid [local-]mac-address from OF !\n");
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5c6a04d29f5b..3b14dc93f59d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -141,6 +141,13 @@ static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
+static unsigned int ibmveth_real_max_tx_queues(void)
+{
+ unsigned int n_cpu = num_online_cpus();
+
+ return min(n_cpu, IBMVETH_MAX_QUEUES);
+}
+
/* setup the initial settings for a buffer pool */
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
u32 pool_index, u32 pool_size,
@@ -456,6 +463,38 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
+static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+}
+
+static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[idx]) {
+ netdev_err(adapter->netdev,
+ "unable to allocate tx long term buffer\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
+ adapter->tx_ltb_ptr[idx],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
+ netdev_err(adapter->netdev,
+ "unable to DMA map tx long term buffer\n");
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -538,6 +577,11 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ if (ibmveth_allocate_tx_ltb(adapter, i))
+ goto out_free_tx_ltb;
+ }
+
adapter->rx_queue.index = 0;
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
@@ -595,25 +639,15 @@ static int ibmveth_open(struct net_device *netdev)
rc = -ENOMEM;
- adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
- netdev->mtu + IBMVETH_BUFF_OH,
- &adapter->bounce_buffer_dma, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to alloc bounce buffer\n");
- goto out_free_irq;
- }
-
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
return 0;
-out_free_irq:
- free_irq(netdev->irq, netdev);
out_free_buffer_pools:
while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
@@ -623,6 +657,12 @@ out_free_buffer_pools:
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
+
+out_free_tx_ltb:
+ while (--i >= 0) {
+ ibmveth_free_tx_ltb(adapter, i);
+ }
+
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -651,7 +691,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -685,9 +725,8 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_free_coherent(&adapter->vdev->dev,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- adapter->bounce_buffer, adapter->bounce_buffer_dma);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
@@ -727,8 +766,8 @@ static void ibmveth_init_link_settings(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
- strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
+ strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
@@ -953,6 +992,69 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
}
+static void ibmveth_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ channels->max_tx = ibmveth_real_max_tx_queues();
+ channels->tx_count = netdev->real_num_tx_queues;
+
+ channels->max_rx = netdev->real_num_rx_queues;
+ channels->rx_count = netdev->real_num_rx_queues;
+}
+
+static int ibmveth_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int old = netdev->real_num_tx_queues,
+ goal = channels->tx_count;
+ int rc, i;
+
+ /* If ndo_open has not been called yet then don't allocate, just set
+ * desired netdev_queue's and return
+ */
+ if (!(netdev->flags & IFF_UP))
+ return netif_set_real_num_tx_queues(netdev, goal);
+
+ /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
+ * but we may need to alloc/free the ltb's.
+ */
+ netif_tx_stop_all_queues(netdev);
+
+ /* Allocate any queue that we need */
+ for (i = old; i < goal; i++) {
+ if (adapter->tx_ltb_ptr[i])
+ continue;
+
+ rc = ibmveth_allocate_tx_ltb(adapter, i);
+ if (!rc)
+ continue;
+
+ /* if something goes wrong, free everything we just allocated */
+ netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ break;
+ }
+ rc = netif_set_real_num_tx_queues(netdev, goal);
+ if (rc) {
+ netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ }
+ /* Free any that are no longer needed */
+ for (i = old; i > goal; i--) {
+ if (adapter->tx_ltb_ptr[i - 1])
+ ibmveth_free_tx_ltb(adapter, i - 1);
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -961,6 +1063,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ethtool_stats = ibmveth_get_ethtool_stats,
.get_link_ksettings = ibmveth_get_link_ksettings,
.set_link_ksettings = ibmveth_set_link_ksettings,
+ .get_channels = ibmveth_get_channels,
+ .set_channels = ibmveth_set_channels
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -969,7 +1073,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs, unsigned long mss)
+ unsigned long desc, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -982,12 +1086,9 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
retry_count = 1024;
correlator = 0;
do {
- ret = h_send_logical_lan(adapter->vdev->unit_address,
- descs[0].desc, descs[1].desc,
- descs[2].desc, descs[3].desc,
- descs[4].desc, descs[5].desc,
- correlator, &correlator, mss,
- adapter->fw_large_send_support);
+ ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -1020,34 +1121,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
- union ibmveth_buf_desc descs[6];
- int last, i;
- int force_bounce = 0;
- dma_addr_t dma_addr;
+ unsigned int desc_flags, total_bytes;
+ union ibmveth_buf_desc desc;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
- /* veth doesn't handle frag_list, so linearize the skb.
- * When GRO is enabled SKB's can have frag_list.
- */
- if (adapter->is_active_trunk &&
- skb_has_frag_list(skb) && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
- /*
- * veth handles a maximum of 6 segments including the header, so
- * we have to linearize the skb if there are more than this.
- */
- if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1077,56 +1157,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags |= IBMVETH_BUF_LRG_SND;
}
-retry_bounce:
- memset(descs, 0, sizeof(descs));
-
- /*
- * If a linear packet is below the rx threshold then
- * copy it into the static bounce buffer. This avoids the
- * cost of a TCE insert and remove.
- */
- if (force_bounce || (!skb_is_nonlinear(skb) &&
- (skb->len < tx_copybreak))) {
- skb_copy_from_linear_data(skb, adapter->bounce_buffer,
- skb->len);
-
- descs[0].fields.flags_len = desc_flags | skb->len;
- descs[0].fields.address = adapter->bounce_buffer_dma;
-
- if (ibmveth_send(adapter, descs, 0)) {
- adapter->tx_send_failed++;
- netdev->stats.tx_dropped++;
- } else {
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- }
-
- goto out;
- }
-
- /* Map the header */
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed;
-
- descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
- descs[0].fields.address = dma_addr;
-
- /* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed_frags;
-
- descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
- descs[i+1].fields.address = dma_addr;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
@@ -1143,7 +1173,36 @@ retry_bounce:
}
}
- if (ibmveth_send(adapter, descs, mss)) {
+ /* Copy header into mapped buffer */
+ if (unlikely(skb->len > adapter->tx_ltb_size)) {
+ netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
+ skb->len, adapter->tx_ltb_size);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
+ total_bytes = skb_headlen(skb);
+ /* Copy frags into mapped buffers */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
+ total_bytes += skb_frag_size(frag);
+ }
+
+ if (unlikely(total_bytes != skb->len)) {
+ netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
+ skb->len, total_bytes);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ desc.fields.flags_len = desc_flags | skb->len;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
+ /* finish writing to long_term_buff before VIOS accessing it */
+ dma_wmb();
+
+ if (ibmveth_send(adapter, desc.desc, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1151,41 +1210,11 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
out:
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
-map_failed_frags:
- last = i+1;
- for (i = 1; i < last; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-map_failed:
- if (!firmware_has_feature(FW_FEATURE_CMO))
- netdev_err(netdev, "tx: unable to map xmit buffer\n");
- adapter->tx_map_failed++;
- if (skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
- force_bounce = 1;
- goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
@@ -1568,6 +1597,8 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
+ /* add size of mapped tx buffers */
+ ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1660,8 +1691,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
-
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1727,6 +1757,17 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues());
+ if (rc) {
+ netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
+ rc);
+ free_netdev(netdev);
+ return rc;
+ }
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
+ adapter->tx_ltb_ptr[i] = NULL;
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 27dfff200166..daf6f615c03f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -46,23 +46,23 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+/* FW allows us to send 6 descriptors but we only use one so mark
+ * the other 5 as unused (0)
+ */
static inline long h_send_logical_lan(unsigned long unit_address,
- unsigned long desc1, unsigned long desc2, unsigned long desc3,
- unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out,
- unsigned long mss, unsigned long large_send_support)
+ unsigned long desc, unsigned long corellator_in,
+ unsigned long *corellator_out, unsigned long mss,
+ unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (large_send_support)
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in, mss);
+ desc, 0, 0, 0, 0, 0, corellator_in, mss);
else
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in);
+ desc, 0, 0, 0, 0, 0, corellator_in);
*corellator_out = retbuf[0];
@@ -98,6 +98,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -137,6 +139,9 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
@@ -145,8 +150,6 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
- void *bounce_buffer;
- dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ab7c0f81e9a..65dbfbec487a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1262,7 +1262,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
netif_napi_add(adapter->netdev, &adapter->napi[i],
- ibmvnic_poll, NAPI_POLL_WEIGHT);
+ ibmvnic_poll);
}
adapter->num_active_rx_napi = adapter->req_rx_queues;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 11a884aa5082..560d1d442232 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2431,8 +2431,8 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 32803b0cf1e8..d06d29c6c037 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -531,10 +531,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000_driver_name,
+ strscpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 23299fc56199..61e60e4de600 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1012,7 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, e1000_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index b80ae9a82224..51a5afe9df2f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -639,7 +639,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -650,7 +650,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 321f2a95ae3a..49e926959ad3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7267,7 +7267,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ strscpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -7479,8 +7479,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -7676,7 +7676,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fd07c3679bb1..060b263348ce 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2697,9 +2697,14 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg &= ~BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
}
@@ -2715,9 +2720,14 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg |= BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
@@ -3037,7 +3047,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, MII_BMCR, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val) {
+ e_dbg("Error reading PHY register\n");
+ return ret_val;
+ }
if (data & BMCR_LOOPBACK)
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3362f26d7f99..4a6630586ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1595,8 +1595,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(interface->netdev, &q_vector->napi,
- fm10k_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
/* tie q_vector and interface together */
interface->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d86b6d349ea9..9a60d6b207f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
struct i40e_flex_pit {
struct list_head list;
u16 src_offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..10d7a982a5b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 2819e261a126..4f01e2a6b6bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4974,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -5012,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 2610338002fe..d9c51a238dcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \
- (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e9cd0fa6a0d2..7e75706f76db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2001,10 +2001,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9f1d5de7bf16..2c07fa8ecfc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
@@ -3878,7 +3879,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */
+ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
@@ -3894,6 +3895,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) {
+ /* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3903,7 +3905,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
-
+ /* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3972,7 +3974,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 val;
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
@@ -3989,28 +3990,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_RQCTL(0), val);
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) {
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
}
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(0), val);
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
i40e_flush(hw);
}
@@ -5909,6 +5902,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5930,10 +5943,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -6659,6 +6672,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
@@ -8221,9 +8237,9 @@ config_tc:
if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -10701,7 +10717,7 @@ static void i40e_send_version(struct i40e_pf *pf)
dv.minor_version = 0xff;
dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -10968,10 +10984,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -11925,8 +11941,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -16049,23 +16064,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strlcpy(width, "8", PCI_WIDTH_SIZE); break;
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strlcpy(width, "4", PCI_WIDTH_SIZE); break;
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strlcpy(width, "2", PCI_WIDTH_SIZE); break;
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strlcpy(width, "1", PCI_WIDTH_SIZE); break;
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 2d3533f38d7b..ffea0c9c82f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1390,7 +1390,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strlcpy(pf->ptp_caps.name, i40e_driver_name,
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4226161a3ef..69e67eb6aea7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3688,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4f184c50f6e8..7e9f6a69eb10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2039,6 +2039,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2139,6 +2158,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index e535d4c3da49..a056e1545615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -581,9 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f39440ad5c50..3fc572341781 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1077,7 +1077,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
- bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data);
int ret;
if (!is_valid_ether_addr(addr->sa_data))
@@ -1094,10 +1093,9 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
return 0;
}
- if (handle_mac)
- goto done;
-
- ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500));
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted.
* If ret == 0 then it means we got a timeout.
@@ -1111,7 +1109,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (!ret)
return -EAGAIN;
-done:
if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
return -EACCES;
@@ -1270,66 +1267,138 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
}
/**
- * iavf_down - Shutdown the connection processing
+ * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
+ * yet and mark other to be removed.
* @adapter: board private structure
- *
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-void iavf_down(struct iavf_adapter *adapter)
+static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct iavf_vlan_filter *vlf;
- struct iavf_cloud_filter *cf;
- struct iavf_fdir_fltr *fdir;
- struct iavf_mac_filter *f;
- struct iavf_adv_rss *rss;
-
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- iavf_napi_disable_all(adapter);
- iavf_irq_disable(adapter);
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
/* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->remove = true;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+ list) {
+ if (f->add) {
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->remove = true;
+ }
}
/* remove all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->remove = true;
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list) {
+ if (vlf->add) {
+ list_del(&vlf->list);
+ kfree(vlf);
+ } else {
+ vlf->remove = true;
+ }
}
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
+ * mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */
spin_lock_bh(&adapter->cloud_filter_list_lock);
- list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- cf->del = true;
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->add) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ } else {
+ cf->del = true;
+ }
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
+ * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
+{
+ struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock);
- list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
- rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ list_del(&rss->list);
+ kfree(rss);
+ } else {
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ }
+ }
spin_unlock_bh(&adapter->adv_rss_lock);
+}
+
+/**
+ * iavf_down - Shutdown the connection processing
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ **/
+void iavf_down(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->state <= __IAVF_DOWN_PENDING)
+ return;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ adapter->link_up = false;
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
+
+ iavf_clear_mac_vlan_filters(adapter);
+ iavf_clear_cloud_filters(adapter);
+ iavf_clear_fdir_filters(adapter);
+ iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
@@ -1338,11 +1407,16 @@ void iavf_down(struct iavf_adapter *adapter)
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ if (!list_empty(&adapter->mac_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!list_empty(&adapter->vlan_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!list_empty(&adapter->cloud_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ if (!list_empty(&adapter->fdir_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ if (!list_empty(&adapter->adv_rss_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1757,7 +1831,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll);
}
return 0;
@@ -2877,6 +2951,11 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
@@ -2884,7 +2963,7 @@ static void iavf_reset_task(struct work_struct *work)
if (adapter->state != __IAVF_REMOVE)
queue_work(iavf_wq, &adapter->reset_task);
- return;
+ goto reset_finish;
}
while (!mutex_trylock(&adapter->client_lock))
@@ -2954,7 +3033,6 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -3084,7 +3162,7 @@ continue_reset:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
if (running) {
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -3095,6 +3173,10 @@ reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
@@ -4173,6 +4255,7 @@ err_unlock:
static int iavf_close(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ u64 aq_to_restore;
int status;
mutex_lock(&adapter->crit_lock);
@@ -4185,6 +4268,29 @@ static int iavf_close(struct net_device *netdev)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
+ /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
+ * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
+ * deadlock with adminq_task() until iavf_close timeouts. We must send
+ * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
+ * disable queues possible for vf. Give only necessary flags to
+ * iavf_down and save other to set them right before iavf_close()
+ * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
+ * iavf will be in DOWN state.
+ */
+ aq_to_restore = adapter->aq_required;
+ adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
+
+ /* Remove flags which we do not want to send after close or we want to
+ * send before disable queues.
+ */
+ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
+ IAVF_FLAG_AQ_ENABLE_QUEUES |
+ IAVF_FLAG_AQ_CONFIGURE_QUEUES |
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER |
+ IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
+ IAVF_FLAG_AQ_ADD_FDIR_FILTER |
+ IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
@@ -4208,6 +4314,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+
+ mutex_lock(&adapter->crit_lock);
+ adapter->aq_required |= aq_to_restore;
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 06d18797d25a..18b6a702a1d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -1390,7 +1393,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
@@ -1548,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15ee85dc33bd..5a9e6563923e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -269,11 +269,14 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 841fa149c407..001500afc4a6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -864,6 +864,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 9939238573a4..1bdc70aa979d 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1423,6 +1423,56 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX 16
+
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+
+ u8 pending_port_option_status;
+#define ICE_AQC_PENDING_PORT_OPT_IDX_M GENMASK(3, 0)
+#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
+
+ u8 rsvd[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M GENMASK(3, 0)
+
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+
+ u8 global_scid[2];
+ u8 phy_scid[2];
+ u8 pf2port_cid[2];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
struct ice_aqc_gpio {
__le16 gpio_ctrl_handle;
@@ -1489,6 +1539,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -2082,6 +2138,8 @@ struct ice_aq_desc {
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
@@ -2243,6 +2301,8 @@ enum ice_adminq_opc {
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 136d7911adb4..9e36f01dfa4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -7,18 +7,6 @@
#include "ice_dcb_lib.h"
#include "ice_sriov.h"
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
-{
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
- return !!rx_ring->xdp_buf;
-}
-
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
-{
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- return !!rx_ring->rx_buf;
-}
-
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -142,8 +130,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
out:
/* tie q_vector and VSI together */
@@ -417,7 +404,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
- rlan_ctx.crcstrip = 1;
+ rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
* and it needs to remain 1 for non-DVM capable configurations to not
@@ -519,11 +506,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
- kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- if (!ice_alloc_rx_buf_zc(ring))
- return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -538,8 +522,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!ice_alloc_rx_buf(ring))
- return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 27d0cbbd29da..039342a0ed15 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -8,6 +8,108 @@
#define ICE_PF_RESET_WAIT_COUNT 300
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
+
+/**
+ * ice_dump_phy_type - helper function to dump phy_type
+ * @hw: pointer to the HW structure
+ * @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
+ * @prefix: prefix string to differentiate multiple dumps
+ */
+static void
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
+{
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
+}
+
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -80,9 +182,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T6:
+ case ICE_SUBDEV_ID_E810T7:
+ return true;
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T5:
return true;
+ }
break;
default:
break;
@@ -183,6 +299,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
+ const char *prefix;
struct ice_hw *hw;
int status;
@@ -204,29 +321,48 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
- ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
- report_mode);
- ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
- ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
+
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
+ prefix = "phy_caps_media";
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
+ prefix = "phy_caps_no_media";
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
+ prefix = "phy_caps_active";
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
+ prefix = "phy_caps_default";
+ break;
+ default:
+ prefix = "phy_caps_invalid";
+ }
+
+ ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
+ le64_to_cpu(pcaps->phy_type_high), prefix);
+
+ ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
+ prefix, report_mode);
+ ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
+ pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
- ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
- pcaps->module_compliance_enforcement);
- ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
- pcaps->extended_compliance_code);
- ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
+ prefix, pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
+ prefix, pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@@ -2397,6 +2533,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2414,6 +2552,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2776,6 +2916,26 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
}
/**
+ * ice_is_100m_speed_supported
+ * @hw: pointer to the HW struct
+ *
+ * returns true if 100M speeds are supported by the device,
+ * false otherwise.
+ */
+bool ice_is_100m_speed_supported(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -3535,6 +3695,121 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_get_port_options
+ * @hw: pointer to the HW struct
+ * @options: buffer for the resultant port options
+ * @option_count: input - size of the buffer in port options structures,
+ * output - number of returned port options
+ * @lport: logical port to call the command with (optional)
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @active_option_idx: index of active port option in returned buffer
+ * @active_option_valid: active option in returned buffer is valid
+ * @pending_option_idx: index of pending port option in returned buffer
+ * @pending_option_valid: pending option in returned buffer is valid
+ *
+ * Calls Get Port Options AQC (0x06ea) and verifies result.
+ */
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid)
+{
+ struct ice_aqc_get_port_options *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 i;
+
+ /* options buffer shall be able to hold max returned options */
+ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.get_port_options;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+ cmd->lport_num_valid = lport_valid;
+
+ status = ice_aq_send_cmd(hw, &desc, options,
+ *option_count * sizeof(*options), NULL);
+ if (status)
+ return status;
+
+ /* verify direct FW response & set output parameters */
+ *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
+ cmd->port_options_count);
+ ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
+ *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
+ cmd->port_options);
+ if (*active_option_valid) {
+ *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
+ cmd->port_options);
+ if (*active_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
+ *active_option_idx);
+ }
+
+ *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
+ cmd->pending_port_option_status);
+ if (*pending_option_valid) {
+ *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
+ cmd->pending_port_option_status);
+ if (*pending_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
+ *pending_option_idx);
+ }
+
+ /* mask output options fields */
+ for (i = 0; i < *option_count; i++) {
+ options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
+ options[i].pmd);
+ options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
+ options[i].max_lane_speed);
+ ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
+ options[i].pmd, options[i].max_lane_speed);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_port_option
+ * @hw: pointer to the HW struct
+ * @lport: logical port to call the command with
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @new_option: new port option to be written
+ *
+ * Calls Set Port Options AQC (0x06eb).
+ */
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option)
+{
+ struct ice_aqc_set_port_option *cmd;
+ struct ice_aq_desc desc;
+
+ if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.set_port_option;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+
+ cmd->lport_num_valid = lport_valid;
+ cmd->selected_port_option = new_option;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -5029,20 +5304,22 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware API is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ } else if (hw->api_maj_ver > maj) {
return true;
}
@@ -5050,6 +5327,19 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
}
/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
+/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
* @pi: pointer to the port info struct
@@ -5179,16 +5469,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5225,14 +5508,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 61b7c60db689..8b6712b92e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -151,6 +151,15 @@ int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid);
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option);
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -204,6 +213,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
+bool ice_is_100m_speed_supported(struct ice_hw *hw);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index b41bc3dc1745..6d560d1c74a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -24,6 +24,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 3337314a7b35..e6ec20079ced 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -9,6 +9,8 @@
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+static int ice_active_port_option = -1;
+
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -466,12 +468,259 @@ ice_devlink_reload_empr_finish(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
.reload_down = ice_devlink_reload_empr_start,
.reload_up = ice_devlink_reload_empr_finish,
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
@@ -695,6 +944,39 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
}
/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
*
@@ -722,6 +1004,12 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index e35371e61e07..f9f15acae90a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -292,8 +292,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
+ ice_napi_poll);
netif_keep_dst(vf->repr->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a6fff8ebaf9d..b7be84bbe72d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -136,6 +136,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
+ ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
+ ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
+ ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
+ ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
+ ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
};
static const u32 ice_regs_dump_list[] = {
@@ -1284,10 +1289,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- ice_down(vsi);
- ice_up(vsi);
- }
+ ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
@@ -1468,20 +1470,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
/**
* ice_mask_min_supported_speeds
+ * @hw: pointer to the HW structure
* @phy_types_high: PHY type high
* @phy_types_low: PHY type low to apply minimum supported speeds mask
*
* Apply minimum supported speeds mask to PHY type low. These are the speeds
* for ethtool supported link mode.
*/
-static
-void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+static void
+ice_mask_min_supported_speeds(struct ice_hw *hw,
+ u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
- else
+ else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
@@ -1531,7 +1535,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
- ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+ ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
+ &phy_types_low);
/* determine advertised modes based on link override only
* if it's supported and if the FW doesn't abstract the
* driver from having to account for link overrides
@@ -2826,6 +2831,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i--)
@@ -2884,6 +2890,7 @@ process_rx:
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index c9f7393b783d..ee5b36941ba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *master;
+ const char *name, *peer, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
- master = lag->master ? "TRUE" : "FALSE";
+ primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
switch (lag->role) {
@@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
- bonded, peer, upper, role, master);
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
+ bonded, peer, upper, role, primary);
}
/**
@@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
- netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
goto lag_out;
}
@@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
- /* if this is the first element in an LAG mark as master */
- lag->master = !!(peers == 1);
+ /* if this is the first element in an LAG mark as primary */
+ lag->primary = !!(peers == 1);
}
/**
@@ -264,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
return;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index c2e3688dd8fd..51b5cf467ce2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -24,7 +24,7 @@ struct ice_lag {
struct net_device *upper_netdev; /* upper bonding netdev */
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
- u8 master:1; /* this is a master */
+ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c4ec9264071..938ba8c215cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -914,7 +914,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
*/
static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -981,23 +981,25 @@ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ if (rx_count > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ rx_count, vsi->alloc_rxq);
return -EINVAL;
}
- vsi->num_txq = tx_count;
- if (vsi->num_txq > vsi->alloc_txq) {
+ if (tx_count > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ tx_count, vsi->alloc_txq);
return -EINVAL;
}
+ vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
+
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
@@ -1522,6 +1524,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
+ ring->cached_phctime = pf->ptp.cached_phc_time;
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1562,6 +1565,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
+ * @vsi: VSI to be configured
+ * @disable: set to true to have FCS / CRC in the frame data
+ */
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i)
+ if (disable)
+ vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+}
+
+/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
@@ -2969,9 +2988,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -3029,6 +3045,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
@@ -3276,6 +3295,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
+
+ /* disable or enable CRC stripping */
+ if (vsi->netdev)
+ ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
+ NETIF_F_RXFCS));
+
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
@@ -3490,6 +3515,7 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3530,21 +3556,24 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
- /* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- if (vsi->num_txq > vsi->alloc_txq) {
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
- vsi->num_txq, vsi->alloc_txq);
+ new_txq, vsi->alloc_txq);
return -EINVAL;
}
- vsi->num_rxq = offset + qcount_rx;
- if (vsi->num_rxq > vsi->alloc_rxq) {
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
- vsi->num_rxq, vsi->alloc_rxq);
+ new_rxq, vsi->alloc_rxq);
return -EINVAL;
}
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
+
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
@@ -3576,6 +3605,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
struct device *dev;
int i, ret = 0;
@@ -3600,6 +3630,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3616,8 +3647,10 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
else
ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret)
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 8712b1d2ceec..ec4bf0c89857 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -89,6 +89,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
+
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 173fe6c31341..0f6718719453 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2399,8 +2399,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2898,10 +2896,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -3087,7 +3093,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- ice_ptp_process_ts(pf);
+ if (!hw->reset_ongoing)
+ ret = IRQ_WAKE_THREAD;
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3122,7 +3129,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ice_service_task_schedule(pf);
}
}
- ret = IRQ_HANDLED;
+ if (!ret)
+ ret = IRQ_HANDLED;
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -3131,6 +3139,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
+ * ice_misc_intr_thread_fn - misc interrupt thread function
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+{
+ irqreturn_t ret = IRQ_HANDLED;
+ struct ice_pf *pf = data;
+ bool irq_handled;
+
+ irq_handled = ice_ptp_process_ts(pf);
+ if (!irq_handled)
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
*/
@@ -3242,10 +3268,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, 0, pf->int_name, pf);
+ err = devm_request_threaded_irq(dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, ice_misc_intr_thread_fn,
+ 0, pf->int_name, pf);
if (err) {
- dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -3282,7 +3310,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll, NAPI_POLL_WEIGHT);
+ ice_napi_poll);
}
/**
@@ -3385,6 +3413,11 @@ static void ice_set_netdev_features(struct net_device *netdev)
if (is_dvm_ena)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX;
+
+ /* Leave CRC / FCS stripping enabled by default, but allow the value to
+ * be changed at runtime
+ */
+ netdev->hw_features |= NETIF_F_RXFCS;
}
/**
@@ -3905,7 +3938,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
@@ -3917,87 +3950,134 @@ static int ice_init_pf(struct ice_pf *pf)
}
/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int num_cpus, v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf);
- int needed, err, i;
+ int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */
- needed = ICE_MIN_LAN_OICR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
- /* reserve for flow director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
- needed = ICE_FDIR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
- }
-
- /* reserve for switchdev */
- needed = ICE_ESWITCH_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
-
- /* total used for non-traffic vectors */
- v_other = v_budget;
-
- /* reserve vectors for LAN traffic */
- needed = num_cpus;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve vectors for RDMA auxiliary driver */
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
if (ice_is_rdma_ena(pf)) {
- needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_rdma_msix = needed;
- v_budget += needed;
- v_left -= needed;
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
}
- pf->msix_entries = devm_kcalloc(dev, v_budget,
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
- for (i = 0; i < v_budget; i++)
+ for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_budget);
+ ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
- if (v_actual < v_budget) {
+ if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_budget, v_actual);
+ v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
@@ -4006,38 +4086,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err;
} else {
int v_remain = v_actual - v_other;
- int v_rdma = 0, v_min_rdma = 0;
- if (ice_is_rdma_ena(pf)) {
- /* Need at least 1 interrupt in addition to
- * AEQ MSIX
- */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
- v_min_rdma = ICE_MIN_RDMA_MSIX;
- }
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX ||
- v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
- dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining
- * vectors to LAN MSIX
- */
- pf->num_rdma_msix = v_min_rdma;
- pf->num_lan_msix = v_remain - v_min_rdma;
- } else {
- /* Split remaining MSIX with RDMA after
- * accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
+ ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
@@ -4052,12 +4105,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err:
devm_kfree(dev, pf->msix_entries);
- goto exit_err;
-no_hw_vecs_left_err:
- dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
- needed, v_left);
- err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
@@ -4551,6 +4599,10 @@ static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create;
+
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4558,17 +4610,13 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
-err_devlink_create:
- unregister_netdev(vsi->netdev);
- clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4676,8 +4724,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw);
}
- hw->ucast_shared = true;
-
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5736,6 +5782,9 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX)
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_STAG_FILTER)
@@ -5822,6 +5871,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
NETIF_F_HW_VLAN_STAG_TX);
}
+ if (!(netdev->features & NETIF_F_RXFCS) &&
+ (features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) &&
+ !ice_vsi_has_non_zero_vlans(np->vsi)) {
+ netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ }
+
return features;
}
@@ -5915,6 +5972,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
if (current_vlan_features ^ requested_vlan_features) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
+ return -EIO;
+ }
+
err = ice_set_vlan_offload_features(vsi, features);
if (err)
return err;
@@ -5996,6 +6060,23 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (ret)
return ret;
+ /* Turn on receive of FCS aka CRC, and after setting this
+ * flag the packet data will have the 4 byte CRC appended
+ */
+ if (changed & NETIF_F_RXFCS) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
+ return -EIO;
+ }
+
+ ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
+ ret = ice_down_up(vsi);
+ if (ret)
+ return ret;
+ }
+
if (changed & NETIF_F_NTUPLE) {
bool ena = !!(features & NETIF_F_NTUPLE);
@@ -6643,7 +6724,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0, vlan_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
@@ -6677,20 +6758,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err || vlan_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6700,6 +6774,31 @@ int ice_down(struct ice_vsi *vsi)
}
/**
+ * ice_down_up - shutdown the VSI connection and bring it up
+ * @vsi: the VSI to be reconnected
+ */
+int ice_down_up(struct ice_vsi *vsi)
+{
+ int ret;
+
+ /* if DOWN already set, nothing to do */
+ if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ return 0;
+
+ ret = ice_down(vsi);
+ if (ret)
+ return ret;
+
+ ret = ice_up(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
*
@@ -6852,6 +6951,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -8884,6 +8985,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 13cdb5ea594d..c262dc886e6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1114,14 +1114,18 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*
- * cmd_flags controls which banks to activate, and the preservation level to
- * use when activating the NVM bank.
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
@@ -1130,7 +1134,8 @@ int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!err && response_flags)
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 856d1ad4398b..774c2317967d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -34,7 +34,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
int ice_nvm_validate_checksum(struct ice_hw *hw);
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
int ice_aq_nvm_update_empr(struct ice_hw *hw);
int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 560efc7654c7..02a4e1cf624e 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -44,6 +44,7 @@ enum ice_protocol_type {
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
+ ICE_L2TPV3,
ICE_VLAN_EX,
ICE_VLAN_IN,
ICE_VXLAN_GPE,
@@ -111,6 +112,7 @@ enum ice_prot_id {
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
#define ICE_PPPOE_HW 103
+#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
@@ -217,6 +219,11 @@ struct ice_pppoe_hdr {
__be16 ppp_prot_id; /* control and data only */
};
+struct ice_l2tpv3_sess_hdr {
+ __be32 session_id;
+ __be64 cookie;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -235,6 +242,7 @@ union ice_prot_hdr {
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
+ struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 72b663108a4a..011b727ab190 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -491,56 +491,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
}
/**
- * ice_ptp_update_cached_phctime - Update the cached PHC time values
- * @pf: Board specific private structure
- *
- * This function updates the system time values which are cached in the PF
- * structure and the Rx rings.
- *
- * This function must be called periodically to ensure that the cached value
- * is never more than 2 seconds old. It must also be called whenever the PHC
- * time has been changed.
- *
- * Return:
- * * 0 - OK, successfully updated
- * * -EAGAIN - PF was busy, need to reschedule the update
- */
-static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
-{
- u64 systime;
- int i;
-
- if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
- return -EAGAIN;
-
- /* Read the current PHC time */
- systime = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* Update the cached PHC time stored in the PF structure */
- WRITE_ONCE(pf->ptp.cached_phc_time, systime);
-
- ice_for_each_vsi(pf, i) {
- struct ice_vsi *vsi = pf->vsi[i];
- int j;
-
- if (!vsi)
- continue;
-
- if (vsi->type != ICE_VSI_PF)
- continue;
-
- ice_for_each_rxq(vsi, j) {
- if (!vsi->rx_rings[j])
- continue;
- WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
- }
- }
- clear_bit(ICE_CFG_BUSY, pf->state);
-
- return 0;
-}
-
-/**
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
* @cached_phc_time: recently cached copy of PHC time
* @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
@@ -636,12 +586,400 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
+ unsigned long discard_time;
+
+ /* Discard the hardware timestamp if the cached PHC time is too old */
+ discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (time_is_before_jiffies(discard_time)) {
+ pf->ptp.tx_hwtstamp_discarded++;
+ return 0;
+ }
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * @tx: the PTP Tx timestamp tracker
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, return true. This may cause us
+ * effectively poll even when not strictly necessary. We do this because it's
+ * possible a new timestamp was requested around the same time as the interrupt.
+ * In some cases hardware might not interrupt us again when the timestamp is
+ * captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the task. If a Tx thread starts
+ * a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ struct ice_ptp_port *ptp_port;
+ bool ts_handled = true;
+ struct ice_pf *pf;
+ u8 idx;
+
+ if (!tx->init)
+ return false;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ continue;
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ ts_handled = false;
+ spin_unlock(&tx->lock);
+
+ return ts_handled;
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ spin_lock(&tx->lock);
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ pf->ptp.tx_hwtstamp_flushed++;
+ }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ bitmap_free(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @pf: pointer to the PF struct
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ struct ice_hw *hw = &pf->hw;
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+ u64 raw_tstamp;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Count the number of Tx timestamps which have timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Note that the cached copy in the PF PTP structure is always updated, even
+ * if we can't update the copy in the Rx rings.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ unsigned long update_before;
+ u64 systime;
+ int i;
+
+ update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (pf->ptp.cached_phc_time &&
+ time_is_before_jiffies(update_before)) {
+ unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
+
+ dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
+ jiffies_to_msecs(time_taken));
+ pf->ptp.late_cached_phc_updates++;
+ }
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+ WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
+
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
+ * @pf: Board specific private structure
+ *
+ * This function must be called when the cached PHC time is no longer valid,
+ * such as after a time adjustment. It discards any outstanding Tx timestamps,
+ * and updates the cached PHC time for both the PF and Rx rings. If updating
+ * the PHC time cannot be done immediately, a warning message is logged and
+ * the work item is scheduled.
+ *
+ * These steps are required in order to ensure that we do not accidentally
+ * report a timestamp extended by the wrong PHC cached copy. Note that we
+ * do not directly update the cached timestamp here because it is possible
+ * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
+ * would have to try again. During that time window, timestamps might be
+ * requested and returned with an invalid extension. Thus, on failure to
+ * immediately update the cached PHC time we would need to zero the value
+ * anyways. For this reason, we just zero the value immediately and queue the
+ * update work item.
+ */
+static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Update the cached PHC time immediately if possible, otherwise
+ * schedule the work item to execute soon.
+ */
+ err = ice_ptp_update_cached_phctime(pf);
+ if (err) {
+ /* If another thread is updating the Rx rings, we won't
+ * properly reset them here. This could lead to reporting of
+ * invalid timestamps, but there isn't much we can do.
+ */
+ dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
+ __func__);
+
+ /* Queue the work item to update the Rx rings when possible */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
+ msecs_to_jiffies(10));
+ }
+
+ /* Flush any outstanding Tx timestamps */
+ ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
+}
+
+/**
* ice_ptp_read_time - Read the time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
@@ -900,6 +1238,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
+ if (ice_is_reset_in_progress(pf->state))
+ return;
+
if (ice_ptp_check_offset_valid(port)) {
/* Offsets not ready yet, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1509,7 +1850,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_unlock(hw);
if (!err)
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
@@ -1588,7 +1929,7 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return err;
}
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
return 0;
}
@@ -1796,26 +2137,31 @@ void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
+ struct skb_shared_hwtstamps *hwtstamps;
+ u64 ts_ns, cached_time;
u32 ts_high;
- u64 ts_ns;
- /* Populate timesync data into skb */
- if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
- struct skb_shared_hwtstamps *hwtstamps;
+ if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
+ return;
- /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
- * cached PHC value, rather than accessing the PF. This also
- * allows us to simply pass the upper 32bits of nanoseconds
- * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
- * it would just discard these bits itself.
- */
- ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
- ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+ cached_time = READ_ONCE(rx_ring->cached_phctime);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
- }
+ /* Do not report a timestamp if we don't have a cached PHC time */
+ if (!cached_time)
+ return;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
+ * PHC value, rather than accessing the PF. This also allows us to
+ * simply pass the upper 32bits of nanoseconds directly. Calling
+ * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
+ * bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
}
/**
@@ -1871,49 +2217,26 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
static void
-ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- /* Check if SMA controller is in the netlist */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
- !ice_is_pca9575_present(&pf->hw))
- ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
-
- if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
- info->n_per_out = N_PER_OUT_E810T_NO_SMA;
- return;
- }
+ info->n_per_out = N_PER_OUT_E810;
- info->n_per_out = N_PER_OUT_E810T;
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ info->n_ext_ts = N_EXT_TS_E810;
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
info->n_ext_ts = N_EXT_TS_E810;
info->n_pins = NUM_PTP_PINS_E810T;
info->verify = ice_verify_pin_e810t;
- }
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
-}
-
-/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- info->n_per_out = N_PER_OUT_E810;
-
- if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- return;
-
- info->n_ext_ts = N_EXT_TS_E810;
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
}
/**
@@ -1950,11 +2273,7 @@ static void
ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
-
- if (ice_is_e810t(&pf->hw))
- ice_ptp_setup_pins_e810t(pf, info);
- else
- ice_ptp_setup_pins_e810(pf, info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
@@ -2016,112 +2335,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
}
/**
- * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
- * @work: pointer to the kthread_work struct
- *
- * Process timestamps captured by the PHY associated with this port. To do
- * this, loop over each index with a waiting skb.
- *
- * If a given index has a valid timestamp, perform the following steps:
- *
- * 1) copy the timestamp out of the PHY register
- * 4) clear the timestamp valid bit in the PHY register
- * 5) unlock the index by clearing the associated in_use bit.
- * 2) extend the 40b timestamp value to get a 64bit timestamp
- * 3) send that timestamp to the stack
- *
- * After looping, if we still have waiting SKBs, then re-queue the work. This
- * may cause us effectively poll even when not strictly necessary. We do this
- * because it's possible a new timestamp was requested around the same time as
- * the interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
- * Note that we only take the tracking lock when clearing the bit and when
- * checking if we need to re-queue this task. The only place where bits can be
- * set is the hard xmit routine where an SKB has a request flag set. The only
- * places where we clear bits are this work function, or the periodic cleanup
- * thread. If the cleanup thread clears a bit we're processing we catch it
- * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
- * starts a new timestamp, we might not begin processing it right away but we
- * will notice it at the end when we re-queue the work item. If a Tx thread
- * starts a new timestamp just after this function exits without re-queuing,
- * the interrupt when the timestamp finishes should trigger. Avoiding holding
- * the lock for the entire function is important in order to ensure that Tx
- * threads do not get blocked while waiting for the lock.
- */
-static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
-{
- struct ice_ptp_port *ptp_port;
- struct ice_ptp_tx *tx;
- struct ice_pf *pf;
- struct ice_hw *hw;
- u8 idx;
-
- tx = container_of(work, struct ice_ptp_tx, work);
- if (!tx->init)
- return;
-
- ptp_port = container_of(tx, struct ice_ptp_port, tx);
- pf = ptp_port_to_pf(ptp_port);
- hw = &pf->hw;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct skb_shared_hwtstamps shhwtstamps = {};
- u8 phy_idx = idx + tx->quad_offset;
- u64 raw_tstamp, tstamp;
- struct sk_buff *skb;
- int err;
-
- ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
-
- err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
- &raw_tstamp);
- if (err)
- continue;
-
- ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
-
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
- continue;
-
- /* The timestamp is valid, so we'll go ahead and clear this
- * index and then send the timestamp up to the stack.
- */
- spin_lock(&tx->lock);
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
- clear_bit(idx, tx->in_use);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- spin_unlock(&tx->lock);
-
- /* it's (unlikely but) possible we raced with the cleanup
- * thread for discarding old timestamp requests.
- */
- if (!skb)
- continue;
-
- /* Extend the timestamp using cached PHC time */
- tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
- shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
-
- ice_trace(tx_tstamp_complete, skb, idx);
-
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
- }
-
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
- spin_lock(&tx->lock);
- if (!bitmap_empty(tx->in_use, tx->len))
- kthread_queue_work(pf->ptp.kworker, &tx->work);
- spin_unlock(&tx->lock);
-}
-
-/**
* ice_ptp_request_ts - Request an available Tx timestamp index
* @tx: the PTP Tx timestamp tracker to request from
* @skb: the SKB to associate with this timestamp request
@@ -2161,177 +2374,17 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
}
/**
- * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Queue work required to process the PTP Tx timestamps outside of interrupt
- * context.
+ * Returns true if timestamps are processed.
*/
-void ice_ptp_process_ts(struct ice_pf *pf)
+bool ice_ptp_process_ts(struct ice_pf *pf)
{
if (pf->ptp.port.tx.init)
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
-}
-
-/**
- * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
- * @tx: Tx tracking structure to initialize
- *
- * Assumes that the length has already been initialized. Do not call directly,
- * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
- */
-static int
-ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
-{
- tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
- if (!tx->tstamps)
- return -ENOMEM;
-
- tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
- if (!tx->in_use) {
- kfree(tx->tstamps);
- tx->tstamps = NULL;
- return -ENOMEM;
- }
-
- spin_lock_init(&tx->lock);
- kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
-
- tx->init = 1;
-
- return 0;
-}
-
-/**
- * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
- * @pf: Board private structure
- * @tx: the tracker to flush
- */
-static void
-ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- for (idx = 0; idx < tx->len; idx++) {
- u8 phy_idx = idx + tx->quad_offset;
-
- spin_lock(&tx->lock);
- if (tx->tstamps[idx].skb) {
- dev_kfree_skb_any(tx->tstamps[idx].skb);
- tx->tstamps[idx].skb = NULL;
- }
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
- }
-}
-
-/**
- * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
- * @pf: Board private structure
- * @tx: Tx tracking structure to release
- *
- * Free memory associated with the Tx timestamp tracker.
- */
-static void
-ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->init = 0;
-
- kthread_cancel_work_sync(&tx->work);
-
- ice_ptp_flush_tx_tracker(pf, tx);
-
- kfree(tx->tstamps);
- tx->tstamps = NULL;
-
- bitmap_free(tx->in_use);
- tx->in_use = NULL;
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
- tx->len = 0;
-}
-
-/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
- * the timestamp block is shared for all ports in the same quad. To avoid
- * ports using the same timestamp index, logically break the block of
- * registers into chunks based on the port number.
- */
-static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
-{
- tx->quad = port / ICE_PORTS_PER_QUAD;
- tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
- tx->len = INDEX_PER_PORT;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- *
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
- */
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->quad = pf->hw.port_info->lport;
- tx->quad_offset = 0;
- tx->len = INDEX_PER_QUAD;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
- * @hw: pointer to the hw struct
- * @tx: PTP Tx tracker to clean up
- *
- * Loop through the Tx timestamp requests and see if any of them have been
- * waiting for a long time. Discard any SKBs that have been waiting for more
- * than 2 seconds. This is long enough to be reasonably sure that the
- * timestamp will never be captured. This might happen if the packet gets
- * discarded before it reaches the PHY timestamping block.
- */
-static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- if (!tx->init)
- return;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct sk_buff *skb;
- u64 raw_tstamp;
-
- /* Check if this SKB has been waiting for too long */
- if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
- continue;
-
- /* Read tstamp to be able to use this register again */
- ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
- &raw_tstamp);
-
- spin_lock(&tx->lock);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Free the SKB after we've cleared the bit */
- dev_kfree_skb_any(skb);
- }
+ return false;
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2345,7 +2398,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 10e396abf130..028349295b71 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -105,7 +105,6 @@ struct ice_tx_tstamp {
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
- * @work: work function to handle processing of Tx timestamps
* @lock: lock to prevent concurrent write to in_use bitmap
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
@@ -117,7 +116,6 @@ struct ice_tx_tstamp {
* window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
- struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
@@ -163,6 +161,7 @@ struct ice_ptp_port {
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
@@ -171,12 +170,19 @@ struct ice_ptp_port {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
+ * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
+ * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
+ * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
+ * being too old to correctly extend timestamp
+ * @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
u8 ext_ts_irq;
struct kthread_worker *kworker;
@@ -185,6 +191,11 @@ struct ice_ptp {
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
u64 reset_time;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_flushed;
+ u32 tx_hwtstamp_discarded;
+ u32 late_cached_phc_updates;
};
#define __ptp_port_to_ptp(p) \
@@ -224,8 +235,8 @@ struct ice_ptp {
#define N_EXT_TS_E810 3
#define N_PER_OUT_E810 4
#define N_PER_OUT_E810T 3
-#define N_PER_OUT_E810T_NO_SMA 2
-#define N_EXT_TS_E810_NO_SMA 2
+#define N_PER_OUT_NO_SMA_E810T 2
+#define N_EXT_TS_NO_SMA_E810T 2
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
@@ -236,7 +247,7 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-void ice_ptp_process_ts(struct ice_pf *pf);
+bool ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
@@ -269,7 +280,10 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
-static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline bool ice_ptp_process_ts(struct ice_pf *pf)
+{
+ return true;
+}
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 6dff97d53d81..772b1f566d6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
+#include <linux/delay.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -2587,38 +2588,113 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
}
/**
- * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static int
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u32 val;
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
+ wr32(hw, PF_SB_ATQBAL, val);
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(FIELD_GET(TS_LL_READ_TS, val))) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return -EINVAL;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
* @hw: pointer to the HW struct
* @lport: the lport to read from
* @idx: the timestamp index to read
- * @tstamp: on return, the 40bit timestamp value
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
*
- * Read a 40bit timestamp value out of the timestamp block of the external PHY
- * on the E810 device.
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
*/
static int
-ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
{
- u32 lo_addr, hi_addr, lo, hi;
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
int err;
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
-
- err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
err);
return err;
}
- err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
err);
return err;
}
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static int
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ u32 lo = 0;
+ u8 hi = 0;
+ int err;
+
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
+
+ if (err)
+ return err;
+
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1246e4ee4b5d..2bda64c76abc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -402,6 +402,7 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
@@ -413,6 +414,12 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
+#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS BIT(31)
+
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 0dac67cd9c77..bd31748aae1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -377,10 +377,10 @@ static void ice_repr_rem(struct ice_vf *vf)
if (!vf->repr)
return;
- ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
+ ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
#ifdef CONFIG_ICE_SWITCHDEV
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7947223536e3..118595763bba 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1290,7 +1290,7 @@ err_init_port:
pi->root = NULL;
}
- devm_kfree(ice_hw_to_dev(hw), buf);
+ kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3808034f7e7e..9b762f7972ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -42,6 +42,7 @@ enum {
ICE_PKT_GTP_NOPAY = BIT(8),
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
+ ICE_PKT_L2TPV3 = BIT(11),
};
struct ice_dummy_pkt_offsets {
@@ -1258,6 +1259,65 @@ ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
+ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
ICE_PKT_GTP_NOPAY),
@@ -1297,6 +1357,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6 |
ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
@@ -2274,9 +2336,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
int status;
u16 i;
- rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
- GFP_KERNEL);
-
+ rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
if (!rbuf)
return -ENOMEM;
@@ -2324,7 +2384,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), rbuf);
+ kfree(rbuf);
return status;
}
@@ -3449,31 +3509,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
- *
- * IMPORTANT: When the ucast_shared flag is set to false and m_list has
- * multiple unicast addresses, the function assumes that all the
- * addresses are unique in a given add_mac call. It doesn't
- * check for duplicates in this case, removing duplicates from a given
- * list should be taken care of in the caller of this function.
*/
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
- struct list_head *rule_head;
- u16 total_elem_left, s_rule_size;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 num_unicast = 0;
int status = 0;
- u8 elem_sent;
if (!m_list || !hw)
return -EINVAL;
- s_rule = NULL;
- sw = hw->switch_info;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
@@ -3492,106 +3536,13 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
return -EINVAL;
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't overwrite the unicast address */
- mutex_lock(rule_lock);
- if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
- &m_list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -EEXIST;
- }
- mutex_unlock(rule_lock);
- num_unicast++;
- } else if (is_multicast_ether_addr(add) ||
- (is_unicast_ether_addr(add) && hw->ucast_shared)) {
- m_list_itr->status =
- ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
- m_list_itr);
- if (m_list_itr->status)
- return m_list_itr->status;
- }
- }
- mutex_lock(rule_lock);
- /* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast) {
- status = 0;
- goto ice_add_mac_exit;
- }
-
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_unicast_ether_addr(mac_addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
- ice_aqc_opc_add_sw_rules);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
-
- elem_sent = min_t(u8, total_elem_left,
- (ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_add_sw_rules,
- NULL);
- if (status)
- goto ice_add_mac_exit;
- r_iter = (typeof(s_rule))
- ((u8 *)r_iter + (elem_sent * s_rule_size));
+ m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
- /* Fill up rule ID based on the value returned from FW */
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_fltr_mgmt_list_entry *fm_entry;
-
- if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
- f_info->fltr_act = ICE_FWD_TO_VSI;
- /* Create an entry to track this MAC address */
- fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fm_entry), GFP_KERNEL);
- if (!fm_entry) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
- fm_entry->fltr_info = *f_info;
- fm_entry->vsi_count = 1;
- /* The book keeping entries will get removed when
- * base driver calls remove filter AQ command
- */
-
- list_add(&fm_entry->list_entry, rule_head);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
- }
-
-ice_add_mac_exit:
- mutex_unlock(rule_lock);
- if (s_rule)
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -3979,38 +3930,6 @@ ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
- * @hw: pointer to the hardware structure
- * @recp_id: lookup type for which the specified rule needs to be searched
- * @f_info: rule information
- *
- * Helper function to search for a unicast rule entry - this is to be used
- * to remove unicast MAC filter that is not shared with other VSIs on the
- * PF switch.
- *
- * Returns pointer to entry storing the rule if found
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
- struct ice_fltr_info *f_info)
-{
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *list_head;
-
- list_head = &sw->recp_list[recp_id].filt_rules;
- list_for_each_entry(list_itr, list_head, list_entry) {
- if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
- sizeof(f_info->l_data)) &&
- f_info->fwd_id.hw_vsi_id ==
- list_itr->fltr_info.fwd_id.hw_vsi_id &&
- f_info->flag == list_itr->fltr_info.flag)
- return list_itr;
- }
- return NULL;
-}
-
-/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -4026,15 +3945,12 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return -EINVAL;
- rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
- u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
@@ -4046,19 +3962,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't remove the unicast address that belongs to
- * another VSI on the switch, since it is not being
- * shared...
- */
- mutex_lock(rule_lock);
- if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
- &list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -ENOENT;
- }
- mutex_unlock(rule_lock);
- }
+
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
@@ -4648,6 +4552,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
{ ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
{ ICE_VLAN_EX, { 2, 0 } },
{ ICE_VLAN_IN, { 2, 0 } },
};
@@ -4671,6 +4576,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
{ ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
@@ -5754,7 +5660,8 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
htons(PPP_IPV6))
match |= ICE_PKT_OUTER_IPV6;
- }
+ } else if (lkups[i].type == ICE_L2TPV3)
+ match |= ICE_PKT_L2TPV3;
}
while (ret->match && (match & ret->match) != ret->match)
@@ -5855,6 +5762,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
+ case ICE_L2TPV3:
+ len = sizeof(struct ice_l2tpv3_sess_hdr);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index a298862857a8..f68c555be4e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -36,6 +36,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))
+ lkups_cnt++;
+
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
@@ -47,11 +51,11 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
lkups_cnt++;
/* is VLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
/* is CVLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
/* are PPPoE options specified? */
@@ -64,6 +68,13 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
+ lkups_cnt++;
+
+ /* are L2TPv3 options specified? */
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
+ lkups_cnt++;
+
/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
@@ -257,6 +268,50 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ hdr->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ hdr->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ hdr_h->hop_limit = hdr->l3_key.ttl;
+ hdr_m->hop_limit = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
hdr->l3_key.ip_proto == IPPROTO_UDP) {
list[i].type = ICE_UDP_OF;
@@ -334,7 +389,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
}
/* copy VLAN info */
- if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
@@ -343,15 +398,45 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
list[i].type = ICE_VLAN_EX;
else
list[i].type = ICE_VLAN_OFOS;
- list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->vlan_hdr.vlan_prio;
+ }
+
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
list[i].type = ICE_VLAN_IN;
- list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->cvlan_hdr.vlan_prio;
+ }
+
i++;
}
@@ -420,6 +505,61 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live =
+ headers->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live =
+ headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ headers->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ headers->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ hdr_h->hop_limit = headers->l3_key.ttl;
+ hdr_m->hop_limit = headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
+ list[i].type = ICE_L2TPV3;
+
+ list[i].h_u.l2tpv3_sess_hdr.session_id =
+ headers->l2tpv3_hdr.session_id;
+ list[i].m_u.l2tpv3_sess_hdr.session_id =
+ cpu_to_be32(0xFFFFFFFF);
+
+ i++;
+ }
+
/* copy L4 (src, dest) port */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
@@ -839,6 +979,40 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
}
/**
+ * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel
+ */
+static void
+ice_tc_set_tos_ttl(struct flow_match_ip *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ bool is_encap)
+{
+ if (match->mask->tos) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
+
+ headers->l3_key.tos = match->key->tos;
+ headers->l3_mask.tos = match->mask->tos;
+ }
+
+ if (match->mask->ttl) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
+
+ headers->l3_key.ttl = match->key->ttl;
+ headers->l3_mask.ttl = match->mask->ttl;
+ }
+}
+
+/**
* ice_tc_set_port - Parse ports from TC flower filter
* @match: Flow match structure
* @fltr: Pointer to filter structure
@@ -967,10 +1141,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- headers->l3_key.tos = match.key->tos;
- headers->l3_key.ttl = match.key->ttl;
- headers->l3_mask.tos = match.mask->tos;
- headers->l3_mask.ttl = match.mask->ttl;
+ ice_tc_set_tos_ttl(&match, fltr, headers, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
@@ -1039,9 +1210,11 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
+ BIT(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1137,16 +1310,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
return -EINVAL;
}
}
- headers->vlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ headers->vlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
+
if (match.mask->vlan_tpid)
headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
}
@@ -1164,6 +1343,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Bad CVLAN mask");
@@ -1171,10 +1353,12 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
}
}
- headers->cvlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ headers->cvlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
@@ -1217,6 +1401,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ ice_tc_set_tos_ttl(&match, fltr, headers, false);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
+ struct flow_match_l2tpv3 match;
+
+ flow_rule_match_l2tpv3(rule, &match);
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
+ headers->l2tpv3_hdr.session_id = match.key->session_id;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 91cd3d3778c7..92642faad595 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -26,9 +26,18 @@
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
+#define ICE_TC_FLWR_FIELD_IP_TOS BIT(22)
+#define ICE_TC_FLWR_FIELD_IP_TTL BIT(23)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(24)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(25)
+#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
+#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
+#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+#define ICE_IPV6_HDR_TC_MASK 0xFF00000
+
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
@@ -42,7 +51,7 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
- u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
__be16 vlan_tpid;
};
@@ -80,6 +89,10 @@ struct ice_tc_l3_hdr {
u8 ttl;
};
+struct ice_tc_l2tpv3_hdr {
+ __be32 session_id;
+};
+
struct ice_tc_l4_hdr {
__be16 dst_port;
__be16 src_port;
@@ -92,6 +105,7 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_vlan_hdr vlan_hdr;
struct ice_tc_vlan_hdr cvlan_hdr;
struct ice_tc_pppoe_hdr pppoe_hdr;
+ struct ice_tc_l2tpv3_hdr l2tpv3_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 836dce840712..dbe80e5053a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -610,7 +610,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -621,6 +621,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
@@ -1464,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -2255,8 +2258,10 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
- if (idx < 0)
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
return;
+ }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ca902af54bb4..932b5661ec4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -295,10 +295,11 @@ struct ice_rx_ring {
struct xsk_buff_pool *xsk_pool;
struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 861b64322959..e1abfcee96dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -347,6 +347,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -359,6 +360,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
@@ -564,6 +566,8 @@ enum ice_rl_type {
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+#define ICE_MAX_PORT_PER_PCI_DEV 8
+
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
@@ -885,8 +889,6 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
- u8 ucast_shared; /* true if VSIs can share unicast addr */
-
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index e48e29258450..056c904b83cc 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
+ ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
@@ -317,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -335,21 +392,20 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
goto failure;
}
- if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
- !is_power_of_2(vsi->tx_rings[qid]->count)) {
- netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
- pool_failure = -EINVAL;
- goto failure;
- }
-
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
@@ -471,11 +527,10 @@ exit:
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
- u16 batched, leftover, i, tail_bumps;
+ u16 leftover, i, tail_bumps;
- batched = ALIGN_DOWN(count, rx_thresh);
- tail_bumps = batched / rx_thresh;
- leftover = count & (rx_thresh - 1);
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
@@ -725,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
}
/**
- * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring
- * @xdp_ring: XDP ring to clean
- * @napi_budget: amount of descriptors that NAPI allows us to clean
- *
- * Returns count of cleaned descriptors
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
*/
-static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- int budget = napi_budget / tx_thresh;
- u16 next_dd = xdp_ring->next_dd;
- u16 ntc, cleared_dds = 0;
-
- do {
- struct ice_tx_desc *next_dd_desc;
- u16 desc_cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames;
- u16 i;
-
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
- cleared_dds++;
- xsk_frames = 0;
- if (likely(!xdp_ring->xdp_tx_active)) {
- xsk_frames = tx_thresh;
- goto skip;
- }
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if ((tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
+ if (last_rs >= ntc)
+ xsk_frames = last_rs - ntc + 1;
+ else
+ xsk_frames = last_rs + cnt - ntc + 1;
+ }
- ntc = xdp_ring->next_to_clean;
+ if (!xsk_frames)
+ return;
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ if (likely(!xdp_ring->xdp_tx_active))
+ goto skip;
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < xsk_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
+ if (tx_buf->raw_buf) {
+ ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
+ tx_buf->raw_buf = NULL;
+ } else {
+ xsk_frames++;
}
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
skip:
- xdp_ring->next_to_clean += tx_thresh;
- if (xdp_ring->next_to_clean >= desc_cnt)
- xdp_ring->next_to_clean -= desc_cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- next_dd_desc->cmd_type_offset_bsz = 0;
- next_dd = next_dd + tx_thresh;
- if (next_dd >= desc_cnt)
- next_dd = tx_thresh - 1;
- } while (--budget);
-
- xdp_ring->next_dd = next_dd;
-
- return cleared_dds * tx_thresh;
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += xsk_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
/**
@@ -822,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u16 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
u32 i;
@@ -842,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
}
xdp_ring->next_to_use = ntu;
-
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
}
/**
@@ -861,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
u32 nb_pkts, unsigned int *total_bytes)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
@@ -870,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+}
- if (xdp_ring->next_to_use > xdp_ring->next_rs) {
- struct ice_tx_desc *tx_desc;
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ */
+static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
- }
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
- * @budget: number of free descriptors on HW Tx ring that can be used
- * @napi_budget: amount of descriptors that NAPI allows us to clean
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
{
struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
+ int budget;
+
+ ice_clean_xdp_irq_zc(xdp_ring);
- if (budget < tx_thresh)
- budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget);
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
- struct ice_tx_desc *tx_desc;
-
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
xdp_ring->next_to_use = 0;
}
ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
&total_bytes);
+ ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
@@ -995,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 21faec8e97db..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -26,12 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool
-ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
- u32 __always_unused budget,
- int __always_unused napi_budget)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
{
return false;
}
@@ -72,5 +70,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c14fc871dd41..e5f3e7680dc6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -850,14 +850,14 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
- strlcpy(drvinfo->fw_version, adapter->fw_version,
+ strscpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2796e81d2726..f8e32833226c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1211,8 +1211,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -3138,7 +3137,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
adapter->i2c_algo.data = adapter;
adapter->i2c_adap.algo_data = &adapter->i2c_algo;
adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
+ strscpy(adapter->i2c_adap.name, "igb BB",
sizeof(adapter->i2c_adap.name));
status = i2c_bit_add_bus(&adapter->i2c_adap);
return status;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 9d4322b74163..83b97989a6bd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -169,8 +169,8 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f4e91db89fe5..3a32809510fc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1109,7 +1109,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
return -ENOMEM;
}
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5c66b97c0cfa..4f9d7f013a95 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -610,7 +610,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ebff0e04045d..34889be63e78 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
- return -EBUSY;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
+ return -EBUSY;
+ }
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
- buffer->next_to_watch = desc;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -4356,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 46efcfab7234..efa980514944 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -456,9 +456,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgb_driver_name,
+ strscpy(drvinfo->driver, ixgb_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 45be9a1ab6af..b4d47e7a76c8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -414,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgb_netdev_ops;
ixgb_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, ixgb_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 04f453eabef6..e88e3dfac8c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1106,12 +1106,12 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ strscpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
@@ -1964,15 +1964,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+ data = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
-
return match;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 0fcd82036d4e..7311bd545acf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1004,7 +1004,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
UTS_RELEASE);
/* Firmware Version */
- strlcpy(info->firmware_version, adapter->eeprom_id,
+ strscpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 86b11164655e..f8156fe4b1dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -874,8 +874,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d1e430b8c8aa..298cfbfcb7b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10849,7 +10849,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
@@ -11140,7 +11140,7 @@ skip_sriov:
err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
+ strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7f7ea468ffa9..2b00db92b08f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3712,7 +3712,9 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3722,6 +3724,7 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 35c2b9b8bd19..aa4bf6c9a2f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1721,9 +1721,59 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* change mode enforcement rules to hybrid */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x0400;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* manually control the config */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20002240;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* move the AN base page values */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x1;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* set the AN37 over CB mode */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20000000;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* restart AN manually */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index fed46872af2b..ccfa6b91aac6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -213,8 +213,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2f12fbe229c1..99933e89717a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2733,7 +2733,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f43d6616bc0d..1732ec3c3dbd 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2332,9 +2332,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -3009,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- netif_napi_add(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &jme->napi, jme_poll);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index df9a8eefa007..2b9335cb4bb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -416,7 +416,8 @@ static void korina_abort_rx(struct net_device *dev)
}
/* transmit packet */
-static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t korina_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
u32 chain_prev, chain_next;
@@ -938,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1354,7 +1355,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lp->napi, korina_poll);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = korina_mdio_read;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7cedbe1fdfd7..59aab4086dcc 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -470,7 +470,7 @@ ltq_etop_stop(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 57f27cc7724e..8d646c7f8c82 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -620,8 +620,7 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
xrx200_tx_housekeeping);
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index fdd99f0de424..35f24e0f0934 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -152,7 +152,8 @@ static int liteeth_stop(struct net_device *netdev)
return 0;
}
-static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct liteeth *priv = netdev_priv(netdev);
void __iomem *txbuffer;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b6be0552a6c1..707993b445d1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1603,12 +1603,12 @@ mv643xx_eth_set_link_ksettings(struct net_device *dev,
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int mv643xx_eth_get_coalesce(struct net_device *dev,
@@ -3183,7 +3183,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0caa2df87c04..ff3e361e06e7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4656,11 +4656,11 @@ mvneta_ethtool_get_coalesce(struct net_device *dev,
static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5600,14 +5600,13 @@ static int mvneta_probe(struct platform_device *pdev)
* operation, so only single NAPI should be initialized.
*/
if (pp->neta_armada3700) {
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
} else {
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port =
per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, mvneta_poll);
port->pp = pp;
}
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ad73a488fc5f..11e603686a27 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b84128b549b4..eb0fb8128096 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5425,11 +5425,11 @@ mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5770,8 +5770,7 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
v->irq = irq_of_parse_and_map(port_node, 0);
if (v->irq <= 0)
return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
port->nqvecs = 1;
@@ -5831,8 +5830,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
goto err;
}
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
}
return 0;
@@ -7706,7 +7704,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 97f080c66dd4..9089adcb75f9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -410,7 +410,7 @@ static void octep_napi_add(struct octep_device *oct)
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
- octep_napi_poll, 64);
+ octep_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index d9ae0937d17a..392d9b0da0d7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -158,8 +158,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto desc_dma_alloc_err;
}
- oq->buff_info = (struct octep_rx_buffer *)
- vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
if (unlikely(!oq->buff_info)) {
dev_err(&oct->pdev->dev,
"Failed to allocate buffer info for OQ-%d\n", q_no);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 40203560b291..3cf4c8285c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d7762577e285..8d5d5a0f68c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -293,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
- nix_bandprof_get_hwinfo_rsp)
-
-/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+ nix_bandprof_get_hwinfo_rsp) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -1471,6 +1525,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1478,6 +1533,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -1655,4 +1711,415 @@ enum cgx_af_status {
LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u8 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000000..5ba618aed6ad
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable and clear the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active)
+ goto exit;
+
+ return ret;
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000000..64dc2b80e15d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector Enumeration */
+enum mcs_int_vec_e {
+ MCS_INT_VEC_MIL_RX_GBL = 0x0,
+ MCS_INT_VEC_MIL_RX_LMACX = 0x1,
+ MCS_INT_VEC_MIL_TX_LMACX = 0x5,
+ MCS_INT_VEC_HIL_RX_GBL = 0x9,
+ MCS_INT_VEC_HIL_RX_LMACX = 0xa,
+ MCS_INT_VEC_HIL_TX_GBL = 0xe,
+ MCS_INT_VEC_HIL_TX_LMACX = 0xf,
+ MCS_INT_VEC_IP = 0x13,
+ MCS_INT_VEC_CNT = 0x14,
+};
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 4ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000000..7b6205414428
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000000..c95a8b8f5eaf
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000000..fa8029a94068
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 67a6821d2dff..3411e2e47d46 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
@@ -50,12 +52,23 @@
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
static bool cn10k_ptp_errata(struct ptp *ptp)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
@@ -72,6 +85,43 @@ static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
return false;
}
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
{
u64 sec, sec1, nsec;
@@ -246,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -270,6 +324,18 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
@@ -282,14 +348,39 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
- *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -329,6 +420,11 @@ static int ptp_probe(struct pci_dev *pdev,
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -353,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -420,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 95a955159f40..b9d92abc3844 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -17,7 +17,10 @@ struct ptp {
void __iomem *reg_base;
u64 (*read_ptp_tstmp)(struct ptp *ptp);
spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index ef59de43b11e..a70e1153fa04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -415,11 +415,26 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
- else
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
}
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index c2bd6e54ea51..77f2ef9e1425 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -16,6 +16,7 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -72,6 +73,10 @@
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 7282a826d81e..3f5e09b77d4b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -16,6 +16,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
+#include "mcs.h"
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
@@ -23,8 +24,6 @@
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
-
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@@ -1159,6 +1158,12 @@ cpt:
rvu_program_channels(rvu);
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
return 0;
nix_err:
@@ -3293,6 +3298,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@@ -3370,6 +3383,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d15bc443335d..76474385a602 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -497,6 +503,8 @@ struct rvu {
struct ptp *ptp;
+ int mcs_blk_cnt;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -504,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_exit(struct rvu *rvu);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index f42a09f04b25..a1970ebedf95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -19,6 +19,7 @@
#include "lmac_common.h"
#include "npc.h"
#include "rvu_npc_hash.h"
+#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -227,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@@ -3053,6 +3398,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0879a48411f3..7646bb2ec89b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4296,8 +4296,14 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
- rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 77a9ade91f3e..0e0d536645ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -266,6 +266,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index d463dc72d80a..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..64f3acd7f67b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ };
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[0] = ~0ULL;
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->protect_frames == txsc->last_protect_frames)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_protect_frames)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->protect_frames)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d686c7b6252f..9ac9e6615ae7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -586,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -602,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -611,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -619,14 +626,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
@@ -635,11 +642,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -650,8 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
@@ -676,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -806,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -1792,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b28029cc4316..282db6fe3b08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,6 +19,7 @@
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@@ -33,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -40,6 +42,11 @@
#define NAME_SIZE 32
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -196,7 +203,7 @@ struct otx2_hw {
/* NIX */
u8 txschq_link_cfg_lvl;
- u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -238,6 +245,8 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -271,6 +280,13 @@ struct refill_work {
struct otx2_nic *pf;
};
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -286,6 +302,9 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -335,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_protect_frames;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -358,6 +437,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -415,10 +495,16 @@ struct otx2_nic {
/* PFC */
u8 pfc_en;
u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
#endif
/* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -458,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -487,7 +578,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -743,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@@ -785,6 +881,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+#ifdef CONFIG_DCB
+ if (pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+
+ return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -807,7 +913,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -888,6 +994,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -907,5 +1015,24 @@ void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 723d2506d309..ccaf97bb1ce0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -7,6 +7,289 @@
#include "otx2_common.h"
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_free_req *free_req;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* free PFC TLx nodes */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
struct cgx_pfc_cfg *req;
@@ -128,6 +411,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
/* Save PFC configuration to interface */
pfvf->pfc_en = pfc->pfc_en;
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err)
+ return err;
+
+process_pfc:
err = otx2_config_priority_flow_ctrl(pfvf);
if (err)
return err;
@@ -136,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
if (pfc->pfc_en)
otx2_nix_config_bp(pfvf, true);
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 3f60a80e34c8..0eb74e8c553d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
}
static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
@@ -963,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -1313,8 +1315,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *vf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 9376d0e62914..5803d7f9137c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@@ -1389,18 +1399,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
goto err_free_sq_ptrs;
}
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
err = otx2_config_nix_queues(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
+
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl);
+ err = otx2_txschq_config(pf, lvl, 0, false);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
+#endif
+
mutex_unlock(&mbox->lock);
return err;
@@ -1455,6 +1487,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
mutex_lock(&mbox->lock);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
@@ -1634,8 +1671,7 @@ int otx2_open(struct net_device *netdev)
cq_poll->dev = (void *)pf;
cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
- netif_napi_add(netdev, &cq_poll->napi,
- otx2_napi_handler, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
@@ -1853,6 +1889,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u8 vlan_prio;
+#endif
+
+#ifdef CONFIG_DCB
+ if (!skb->vlan_present)
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1987,8 +2047,19 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -2447,6 +2518,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
@@ -2702,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2916,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf);
}
+ cn10k_mcs_free(pf);
+
#ifdef CONFIG_DCB
/* Disable PFC config */
if (pf->pfc_en) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index fdc2c9315b91..896b2f9bac34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
- struct ptp_rsp *rsp;
- int err;
if (!ptp->nic)
- return 0;
+ return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
- return 0;
+ return -ENOMEM;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp))
- return 0;
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
- return rsp->clk;
+ return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
+{
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ *tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(ptp, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;
@@ -302,6 +349,8 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
}
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
@@ -316,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index 4bbd12ff26e6..aa205a0d158f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_51_16 : 36;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index a18e8efd0f1e..5ec11d71bf60 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -19,6 +19,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
@@ -686,7 +692,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
@@ -696,6 +703,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = udp_csum;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -952,16 +966,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *udp_csum = 1;
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ }
+
+ msgtype = data + *offset;
+
+ /* Check PTP messageId is SYNC or not */
+ return (*msgtype & 0xf) == 0;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
+ struct ptpv2_tstamp *origin_tstamp;
+ int ptp_offset = 0, udp_csum = 0;
+ struct timespec64 ts;
u64 iova;
- if (!skb_shinfo(skb)->gso_size &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+ if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum);
} else {
skb_tx_timestamp(skb);
}
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index d395f4131648..df14cee80153 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -4,6 +4,6 @@ prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
prestera_flower.o prestera_span.o prestera_counter.o \
- prestera_router.o prestera_router_hw.o
+ prestera_router.o prestera_router_hw.o prestera_matchall.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2f84d0fb4094..35554ee805cd 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -306,17 +306,27 @@ struct prestera_switch {
struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
+ u32 size_tbl_router_nexthop;
};
struct prestera_router {
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable nh_neigh_ht;
+ struct rhashtable nexthop_group_ht;
struct rhashtable fib_ht;
+ struct rhashtable kern_neigh_cache_ht;
struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */
+ unsigned long nhgrp_hw_cache_kick; /* jiffies */
+ struct {
+ struct delayed_work dw;
+ } neighs_update;
};
struct prestera_rxtx_params {
@@ -362,11 +372,15 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
void prestera_queue_work(struct work_struct *work);
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
+void prestera_queue_drain(void);
int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 3d4b85f2d541..cba89fda504b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -54,6 +54,10 @@ struct prestera_acl_ruleset {
struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
struct prestera_acl *acl;
+ struct {
+ u32 min;
+ u32 max;
+ } prio;
unsigned long rule_count;
refcount_t refcount;
void *keymask;
@@ -162,6 +166,9 @@ prestera_acl_ruleset_create(struct prestera_acl *acl,
ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
ruleset->index = uid;
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
@@ -178,10 +185,14 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask)
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
+ if (!ruleset->keymask)
+ return -ENOMEM;
+
+ return 0;
}
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
@@ -365,6 +376,26 @@ prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
block->ruleset_zero = NULL;
}
+static void
+prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl,
+ struct prestera_acl_ruleset *ruleset)
+{
+ struct prestera_acl_rule *rule;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ list_for_each_entry(rule, &acl->rules, list) {
+ if (ruleset->ingress != rule->ruleset->ingress)
+ continue;
+ if (ruleset->ht_key.chain_index != rule->chain_index)
+ continue;
+
+ ruleset->prio.min = min(ruleset->prio.min, rule->priority);
+ ruleset->prio.max = max(ruleset->prio.max, rule->priority);
+ }
+}
+
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
@@ -389,6 +420,13 @@ u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
return ruleset->index;
}
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max)
+{
+ *prio_min = ruleset->prio.min;
+ *prio_max = ruleset->prio.max;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
@@ -429,6 +467,13 @@ void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
kfree(rule);
}
+static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset,
+ u32 prio)
+{
+ ruleset->prio.min = min(ruleset->prio.min, prio);
+ ruleset->prio.max = max(ruleset->prio.max, prio);
+}
+
int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule)
{
@@ -468,6 +513,7 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
list_add_tail(&rule->list, &sw->acl->rules);
ruleset->rule_count++;
+ prestera_acl_ruleset_prio_update(ruleset, rule->priority);
return 0;
err_acl_block_bind:
@@ -492,6 +538,7 @@ void prestera_acl_rule_del(struct prestera_switch *sw,
list_del(&rule->list);
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+ prestera_acl_ruleset_prio_refresh(sw->acl, ruleset);
/* unbind block (all ports) */
if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 03fc5b9dc925..a35cc0609a1d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -185,8 +185,8 @@ struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
struct prestera_flow_block *block,
u32 chain_index);
-void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
- void *keymask);
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
@@ -195,6 +195,8 @@ int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 1da7ff889417..2f52daba58e6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -300,8 +300,8 @@ static void prestera_ethtool_get_drvinfo(struct net_device *dev,
struct prestera_port *port = netdev_priv(dev);
struct prestera_switch *sw = port->sw;
- strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index 2262693bd5cf..9f4267f326b0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -7,8 +7,9 @@
#include "prestera.h"
#include "prestera_acl.h"
#include "prestera_flow.h"
-#include "prestera_span.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
static LIST_HEAD(prestera_block_cb_list);
@@ -17,9 +18,9 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return prestera_span_replace(block, f);
+ return prestera_mall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
return 0;
default:
return -EOPNOTSUPP;
@@ -89,6 +90,9 @@ prestera_flow_block_create(struct prestera_switch *sw,
INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
block->ingress = ingress;
return block;
@@ -263,7 +267,7 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
block = flow_block_cb_priv(block_cb);
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
err = prestera_flow_block_unbind(block, port);
if (err)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 0c9e13263261..a85a3eb40279 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -22,6 +22,11 @@ struct prestera_flow_block {
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
struct list_head template_list;
+ struct {
+ u32 prio_min;
+ u32 prio_max;
+ bool bound;
+ } mall;
unsigned int rule_count;
bool ingress;
};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 19d3b55c578e..91a478b75cbf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -5,6 +5,7 @@
#include "prestera_acl.h"
#include "prestera_flow.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
@@ -360,6 +361,49 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
f->common.extack);
}
+static int prestera_flower_prio_check(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ u32 mall_prio_min;
+ u32 mall_prio_max;
+ int err;
+
+ err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+
+ if (f->common.prio <= mall_prio_max && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= mall_prio_min && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
+ return 0;
+}
+
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
@@ -368,6 +412,10 @@ int prestera_flower_replace(struct prestera_flow_block *block,
struct prestera_acl_rule *rule;
int err;
+ err = prestera_flower_prio_check(block, f);
+ if (err)
+ return err;
+
ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
@@ -452,7 +500,9 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
}
/* preserve keymask/template to this ruleset */
- prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ if (err)
+ goto err_ruleset_keymask_set;
/* skip error, as it is not possible to reject template operation,
* so, keep the reference to the ruleset for rules to be added
@@ -468,6 +518,8 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
list_add_rcu(&template->list, &block->template_list);
return 0;
+err_ruleset_keymask_set:
+ prestera_acl_ruleset_put(ruleset);
err_ruleset_get:
kfree(template);
err_malloc:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index 495f151e6fa9..1181115fe6fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -19,5 +19,7 @@ int prestera_flower_tmplt_create(struct prestera_flow_block *block,
void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
void prestera_flower_template_cleanup(struct prestera_flow_block *block);
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 962d7e0c0cb5..fc6f7d2746e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -10,11 +10,14 @@
#include "prestera_hw.h"
#include "prestera_acl.h"
#include "prestera_counter.h"
+#include "prestera_router_hw.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
#define PRESTERA_MIN_MTU 64
+#define PRESTERA_MSG_CHUNK_SIZE 1024
+
enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
@@ -57,6 +60,10 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
@@ -78,9 +85,11 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
- PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
- PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
@@ -101,6 +110,7 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
@@ -285,6 +295,7 @@ union prestera_msg_port_param {
u8 duplex;
u8 fec;
u8 fc;
+ u8 br_locked;
union {
struct {
u8 admin;
@@ -538,6 +549,14 @@ struct prestera_msg_ip_addr {
u8 __pad[3];
};
+struct prestera_msg_nh {
+ struct prestera_msg_iface oif;
+ __le32 hw_id;
+ u8 mac[ETH_ALEN];
+ u8 is_active;
+ u8 pad;
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
@@ -563,6 +582,34 @@ struct prestera_msg_lpm_req {
u8 __pad[2];
};
+struct prestera_msg_nh_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
+ __le32 size;
+ __le32 grp_id;
+};
+
+struct prestera_msg_nh_chunk_req {
+ struct prestera_msg_cmd cmd;
+ __le32 offset;
+};
+
+struct prestera_msg_nh_chunk_resp {
+ struct prestera_msg_ret ret;
+ u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
+};
+
+struct prestera_msg_nh_grp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 grp_id;
+ __le32 size;
+};
+
+struct prestera_msg_nh_grp_resp {
+ struct prestera_msg_ret ret;
+ __le32 grp_id;
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
@@ -725,11 +772,15 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -745,6 +796,9 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -1022,6 +1076,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
+ sw->size_tbl_router_nexthop =
+ __le32_to_cpu(resp.size_tbl_router_nexthop);
return 0;
}
@@ -1431,27 +1487,39 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
return 0;
}
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
- &req.cmd, sizeof(req));
}
-int prestera_hw_span_unbind(const struct prestera_port *port)
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
+ enum prestera_cmd_type_t cmd_type;
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
- &req.cmd, sizeof(req));
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
@@ -1639,6 +1707,22 @@ int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .br_locked = br_locked,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -2004,6 +2088,85 @@ int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
sizeof(req));
}
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id)
+{
+ struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
+ .grp_id = __cpu_to_le32(grp_id) };
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ req.nh[i].is_active = nhs[i].connected;
+ memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
+ err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
+ if (err)
+ return err;
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */)
+{
+ static struct prestera_msg_nh_chunk_resp resp;
+ struct prestera_msg_nh_chunk_req req;
+ u32 buf_offset;
+ int err;
+
+ memset(&hw_state[0], 0, buf_size);
+ buf_offset = 0;
+ while (1) {
+ if (buf_offset >= buf_size)
+ break;
+
+ memset(&req, 0, sizeof(req));
+ req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
+ err = prestera_cmd_ret(sw,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
+ &req.cmd, sizeof(req), &resp.ret,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ memcpy(&hw_state[buf_offset], &resp.hw_state[0],
+ buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
+ buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
+ buf_offset += PRESTERA_MSG_CHUNK_SIZE;
+ }
+
+ return 0;
+}
+
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id)
+{
+ struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
+ struct prestera_msg_nh_grp_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *grp_id = __le32_to_cpu(resp.grp_id);
+ return err;
+}
+
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id)
+{
+ struct prestera_msg_nh_grp_req req = {
+ .grp_id = __cpu_to_le32(grp_id),
+ .size = __cpu_to_le32(nh_count)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 56e043146dd2..0a929279e1ce 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -146,6 +146,7 @@ struct prestera_counter_stats;
struct prestera_iface;
struct prestera_flood_domain;
struct prestera_mdb_entry;
+struct prestera_neigh_info;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -183,6 +184,8 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -243,8 +246,9 @@ int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
-int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress);
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
/* Router API */
@@ -263,6 +267,16 @@ int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
__be32 dst, u32 dst_len);
+/* NH API */
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id);
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */);
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id);
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index ede3e53b9790..24f9d6024745 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -36,6 +36,17 @@ void prestera_queue_work(struct work_struct *work)
queue_work(prestera_owq, work);
}
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+ queue_delayed_work(prestera_wq, work, delay);
+}
+
+void prestera_queue_drain(void)
+{
+ drain_workqueue(prestera_wq);
+ drain_workqueue(prestera_owq);
+}
+
int prestera_port_learning_set(struct prestera_port *port, bool learn)
{
return prestera_hw_port_learning_set(port, learn);
@@ -51,6 +62,11 @@ int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
return prestera_hw_port_mc_flood_set(port, flood);
}
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked)
+{
+ return prestera_hw_port_br_locked_set(port, br_locked);
+}
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
enum prestera_accept_frm_type frm_type;
@@ -368,6 +384,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
if (!sw->np)
return 0;
+ of_node_get(sw->np);
ports = of_find_node_by_name(sw->np, "ports");
for_each_child_of_node(ports, node) {
@@ -417,6 +434,7 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
}
out:
+ of_node_put(node);
of_node_put(ports);
return err;
}
@@ -797,32 +815,30 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
caching_dw = &port->cached_hw_stats.caching_dw;
- if (port->phy_link) {
- memset(&smac, 0, sizeof(smac));
- smac.valid = true;
- smac.oper = pevt->data.mac.oper;
- if (smac.oper) {
- smac.mode = pevt->data.mac.mode;
- smac.speed = pevt->data.mac.speed;
- smac.duplex = pevt->data.mac.duplex;
- smac.fc = pevt->data.mac.fc;
- smac.fec = pevt->data.mac.fec;
- phylink_mac_change(port->phy_link, true);
- } else {
- phylink_mac_change(port->phy_link, false);
- }
- prestera_port_mac_state_cache_write(port, &smac);
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
}
+ prestera_port_mac_state_cache_write(port, &smac);
if (port->state_mac.oper) {
- if (!port->phy_link)
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, true);
+ else
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
- } else if (netif_running(port->dev) &&
- netif_carrier_ok(port->dev)) {
- if (!port->phy_link)
+ } else {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, false);
+ else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
netif_carrier_off(port->dev);
if (delayed_work_pending(caching_dw))
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
new file mode 100644
index 000000000000..6f2b95a5263e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static int prestera_mall_prio_check(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ u32 flower_prio_min;
+ u32 flower_prio_max;
+ int err;
+
+ err = prestera_flower_prio_get(block, f->common.chain_index,
+ &flower_prio_min, &flower_prio_max);
+ if (err == -ENOENT)
+ /* No flower filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+
+ if (f->common.prio <= flower_prio_max && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= flower_prio_min && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max)
+{
+ if (!block->mall.bound)
+ return -ENOENT;
+
+ *prio_min = block->mall.prio_min;
+ *prio_max = block->mall.prio_max;
+ return 0;
+}
+
+static void prestera_mall_prio_update(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ block->mall.prio_min = min(block->mall.prio_min, f->common.prio);
+ block->mall.prio_max = max(block->mall.prio_max, f->common.prio);
+}
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ err = prestera_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err)
+ goto rollback;
+ }
+
+ prestera_mall_prio_update(block, f);
+
+ block->mall.bound = true;
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+ return err;
+}
+
+void prestera_mall_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
new file mode 100644
index 000000000000..fed08be80257
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_MATCHALL_H_
+#define _PRESTERA_MATCHALL_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_mall_destroy(struct prestera_flow_block *block);
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_MATCHALL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..59470d99f522 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -872,6 +872,7 @@ static void prestera_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id prestera_pci_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xCC1E) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 58f4e44d5ad7..4046be0e86ff 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -7,10 +7,35 @@
#include <net/inet_dscp.h>
#include <net/switchdev.h>
#include <linux/rhashtable.h>
+#include <net/nexthop.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <net/netevent.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
+
+struct prestera_kern_neigh_cache_key {
+ struct prestera_ip_addr addr;
+ struct net_device *dev;
+};
+
+struct prestera_kern_neigh_cache {
+ struct prestera_kern_neigh_cache_key key;
+ struct rhash_head ht_node;
+ struct list_head kern_fib_cache_list;
+ /* Hold prepared nh_neigh info if is in_kernel */
+ struct prestera_neigh_info nh_neigh_info;
+ /* Indicate if neighbour is reachable by direct route */
+ bool reachable;
+ /* Lock cache if neigh is present in kernel */
+ bool in_kernel;
+};
+
struct prestera_kern_fib_cache_key {
struct prestera_ip_addr addr;
u32 prefix_len;
@@ -23,15 +48,29 @@ struct prestera_kern_fib_cache {
struct {
struct prestera_fib_key fib_key;
enum prestera_fib_type fib_type;
+ struct prestera_nexthop_group_key nh_grp_key;
} lpm_info; /* hold prepared lpm info */
/* Indicate if route is not overlapped by another table */
struct rhash_head ht_node; /* node of prestera_router */
- struct fib_info *fi;
- dscp_t kern_dscp;
- u8 kern_type;
+ struct prestera_kern_neigh_cache_head {
+ struct prestera_kern_fib_cache *this;
+ struct list_head head;
+ struct prestera_kern_neigh_cache *n_cache;
+ } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
+ union {
+ struct fib_notifier_info info; /* point to any of 4/6 */
+ struct fib_entry_notifier_info fen4_info;
+ };
bool reachable;
};
+static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_neigh_cache, key),
+ .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_neigh_cache_key),
+ .automatic_shrinking = true,
+};
+
static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
.key_offset = offsetof(struct prestera_kern_fib_cache, key),
.head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
@@ -51,15 +90,450 @@ static u32 prestera_fix_tb_id(u32 tb_id)
}
static void
-prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
+prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
struct prestera_kern_fib_cache_key *key)
{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
memset(key, 0, sizeof(*key));
+ key->addr.v = PRESTERA_IPV4;
key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
key->prefix_len = fen_info->dst_len;
key->kern_tb_id = fen_info->tb_id;
}
+static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ if (nhc->nhc_gw_family == AF_INET) {
+ nk->addr.v = PRESTERA_IPV4;
+ nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
+ } else {
+ nk->addr.v = PRESTERA_IPV6;
+ nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
+ }
+
+ nk->dev = nhc->nhc_dev;
+ return 0;
+}
+
+static void
+prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
+ struct prestera_nh_neigh_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ nk->addr = ck->addr;
+ nk->rif = (void *)ck->dev;
+}
+
+static bool
+prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ struct prestera_kern_neigh_cache_key tk;
+ int err;
+
+ err = prestera_util_nhc2nc_key(sw, nhc, &tk);
+ if (err)
+ return false;
+
+ if (memcmp(&tk, nk, sizeof(tk)))
+ return false;
+
+ return true;
+}
+
+static int
+prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ if (n->tbl->family == AF_INET) {
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = *(__be32 *)n->primary_key;
+ } else {
+ return -ENOENT;
+ }
+
+ key->dev = n->dev;
+
+ return 0;
+}
+
+static bool __prestera_fi_is_direct(struct fib_info *fi)
+{
+ struct fib_nh *fib_nh;
+
+ if (fib_info_num_path(fi) == 1) {
+ fib_nh = fib_info_nh(fi, 0);
+ if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
+ return true;
+ }
+
+ return false;
+}
+
+static bool prestera_fi_is_direct(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi_is_direct(fi);
+}
+
+static bool prestera_fi_is_nh(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi_is_direct(fi);
+}
+
+static bool __prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (!fi->fib6_nh->nh_common.nhc_gw_family)
+ return true;
+
+ return false;
+}
+
+static bool prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fi6_is_nh(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_direct(fen_info->fi);
+ else
+ return prestera_fi6_is_direct(fen6_info->rt);
+}
+
+static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_nh(fen_info->fi);
+ else
+ return prestera_fi6_is_nh(fen6_info->rt);
+}
+
+/* must be called with rcu_read_lock() */
+static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
+ __be32 *addr)
+{
+ struct flowi4 fl4;
+
+ /* TODO: walkthrough appropriate tables in kernel
+ * to know if the same prefix exists in several tables
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = *addr;
+ return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
+}
+
+static bool
+__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ struct net_device *dev)
+{
+ struct fib_nh *fib_nh;
+ struct fib_result res;
+ bool reachable;
+
+ reachable = false;
+
+ if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ if (prestera_fi_is_direct(res.fi)) {
+ fib_nh = fib_info_nh(res.fi, 0);
+ if (dev == fib_nh->fib_nh_dev)
+ reachable = true;
+ }
+
+ return reachable;
+}
+
+/* Check if neigh route is reachable */
+static bool
+prestera_util_kern_n_is_reachable(u32 tb_id,
+ struct prestera_ip_addr *addr,
+ struct net_device *dev)
+{
+ if (addr->v == PRESTERA_IPV4)
+ return __prestera_util_kern_n_is_reachable_v4(tb_id,
+ &addr->u.ipv4,
+ dev);
+ else
+ return false;
+}
+
+static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
+ bool offloaded)
+{
+ if (offloaded)
+ n->flags |= NTF_OFFLOADED;
+ else
+ n->flags &= ~NTF_OFFLOADED;
+}
+
+static void
+prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
+{
+ if (offloaded)
+ nhc->nhc_flags |= RTNH_F_OFFLOAD;
+ else
+ nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
+
+ if (trap)
+ nhc->nhc_flags |= RTNH_F_TRAP;
+ else
+ nhc->nhc_flags &= ~RTNH_F_TRAP;
+}
+
+static struct fib_nh_common *
+prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+ struct fib6_info *iter;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return &fib_info_nh(fen4_info->fi, n)->nh_common;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ if (!n)
+ return &fen6_info->rt->fib6_nh->nh_common;
+
+ list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
+ fib6_siblings) {
+ if (!--n)
+ return &iter->fib6_nh->nh_common;
+ }
+ }
+
+ /* if family is incorrect - than upper functions has BUG */
+ /* if doesn't find requested index - there is alsi bug, because
+ * valid index must be produced by nhs, which checks list length
+ */
+ WARN(1, "Invalid parameters passed to %s n=%d i=%p",
+ __func__, n, info);
+ return NULL;
+}
+
+static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_num_path(fen4_info->fi);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ return fen6_info->rt->fib6_nsiblings + 1;
+ }
+
+ return 0;
+}
+
+static unsigned char
+prestera_kern_fib_info_type(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fen4_info->fi->fib_type;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ /* TODO: ECMP in ipv6 is several routes.
+ * Every route has single nh.
+ */
+ return fen6_info->rt->fib6_type;
+ }
+
+ return RTN_UNSPEC;
+}
+
+/* Decided, that uc_nh route with key==nh is obviously neighbour route */
+static bool
+prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
+{
+ if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
+ return false;
+
+ if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
+ return false;
+
+ if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
+ return false;
+
+ if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
+ &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
+ return false;
+
+ return true;
+}
+
+static int prestera_dev_if_type(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan;
+
+ if (is_vlan_dev(dev) &&
+ netif_is_bridge_master(vlan_dev_real_dev(dev))) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_bridge_master(dev)) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_lag_master(dev)) {
+ return PRESTERA_IF_LAG_E;
+ } else if (netif_is_macvlan(dev)) {
+ vlan = netdev_priv(dev);
+ return prestera_dev_if_type(vlan->lowerdev);
+ } else {
+ return PRESTERA_IF_PORT_E;
+ }
+}
+
+static int
+prestera_neigh_iface_init(struct prestera_switch *sw,
+ struct prestera_iface *iface,
+ struct neighbour *n)
+{
+ struct prestera_port *port;
+
+ iface->vlan_id = 0; /* TODO: vlan egress */
+ iface->type = prestera_dev_if_type(n->dev);
+ if (iface->type != PRESTERA_IF_PORT_E)
+ return -EINVAL;
+
+ if (!prestera_netdev_check(n->dev))
+ return -EINVAL;
+
+ port = netdev_priv(n->dev);
+ iface->dev_port.hw_dev_num = port->dev_id;
+ iface->dev_port.port_num = port->hw_id;
+
+ return 0;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache =
+ rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
+ __prestera_kern_neigh_cache_ht_params);
+ return IS_ERR(n_cache) ? NULL : n_cache;
+}
+
+static void
+__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ dev_put(n_cache->key.dev);
+}
+
+static void
+__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static struct prestera_kern_neigh_cache *
+__prestera_kern_neigh_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
+ if (!n_cache)
+ goto err_kzalloc;
+
+ memcpy(&n_cache->key, key, sizeof(*key));
+ dev_hold(n_cache->key.dev);
+
+ INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
+ err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return n_cache;
+
+err_ht_insert:
+ dev_put(n_cache->key.dev);
+ kfree(n_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_get(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, key);
+ if (!n_cache)
+ n_cache = __prestera_kern_neigh_cache_create(sw, key);
+
+ return n_cache;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_put(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ if (!n_cache->in_kernel &&
+ list_empty(&n_cache->kern_fib_cache_list)) {
+ __prestera_kern_neigh_cache_destroy(sw, n_cache);
+ return NULL;
+ }
+
+ return n_cache;
+}
+
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_find(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key)
@@ -73,24 +547,79 @@ prestera_kern_fib_cache_find(struct prestera_switch *sw,
}
static void
+__prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int i;
+
+ for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
+ n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
+ if (n_cache) {
+ list_del(&fib_cache->kern_neigh_cache_head[i].head);
+ prestera_kern_neigh_cache_put(sw, n_cache);
+ }
+ }
+
+ fib_info_put(fib_cache->fen4_info.fi);
+}
+
+static void
prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fib_cache)
{
- fib_info_put(fib_cache->fi);
rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
&fib_cache->ht_node,
__prestera_kern_fib_cache_ht_params);
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
kfree(fib_cache);
}
+static int
+__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_neigh_cache_key nc_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ struct fib_nh_common *nhc;
+ int i, nhs, err;
+
+ if (!prestera_fib_info_is_nh(&fc->info))
+ return 0;
+
+ nhs = prestera_kern_fib_info_nhs(&fc->info);
+ if (nhs > PRESTERA_NHGR_SIZE_MAX)
+ return 0;
+
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
+ err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
+ if (err)
+ return 0;
+
+ n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
+ if (!n_cache)
+ return 0;
+
+ fc->kern_neigh_cache_head[i].this = fc;
+ fc->kern_neigh_cache_head[i].n_cache = n_cache;
+ list_add(&fc->kern_neigh_cache_head[i].head,
+ &n_cache->kern_fib_cache_list);
+ }
+
+ return 0;
+}
+
/* Operations on fi (offload, etc) must be wrapped in utils.
* This function just create storage.
*/
static struct prestera_kern_fib_cache *
prestera_kern_fib_cache_create(struct prestera_switch *sw,
struct prestera_kern_fib_cache_key *key,
- struct fib_info *fi, dscp_t dscp, u8 type)
+ struct fib_notifier_info *info)
{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
struct prestera_kern_fib_cache *fib_cache;
int err;
@@ -99,10 +628,8 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
goto err_kzalloc;
memcpy(&fib_cache->key, key, sizeof(*key));
- fib_info_hold(fi);
- fib_cache->fi = fi;
- fib_cache->kern_dscp = dscp;
- fib_cache->kern_type = type;
+ fib_info_hold(fen_info->fi);
+ memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
&fib_cache->ht_node,
@@ -110,48 +637,270 @@ prestera_kern_fib_cache_create(struct prestera_switch *sw,
if (err)
goto err_ht_insert;
+ /* Handle nexthops */
+ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
+ if (err)
+ goto out; /* Not critical */
+
+out:
return fib_cache;
err_ht_insert:
- fib_info_put(fi);
+ fib_info_put(fen_info->fi);
kfree(fib_cache);
err_kzalloc:
return NULL;
}
static void
+__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fibc,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded, bool trap)
+{
+ struct fib_nh_common *nhc;
+ int i, nhs;
+
+ nhs = prestera_kern_fib_info_nhs(&fibc->info);
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
+ if (!nc) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ continue;
+ }
+
+ if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ break;
+ }
+ }
+}
+
+static void
+__prestera_k_arb_n_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded)
+{
+ struct neighbour *n;
+
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ return;
+
+ prestera_util_kern_set_neigh_offload(n, offloaded);
+ neigh_release(n);
+}
+
+static void
__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fc,
bool fail, bool offload, bool trap)
{
struct fib_rt_info fri;
- if (fc->key.addr.v != PRESTERA_IPV4)
+ switch (fc->key.addr.v) {
+ case PRESTERA_IPV4:
+ fri.fi = fc->fen4_info.fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.dscp = fc->fen4_info.dscp;
+ fri.type = fc->fen4_info.type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+ return;
+ case PRESTERA_IPV6:
+ /* TODO */
return;
+ }
+}
+
+static void
+__prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache,
+ bool enabled)
+{
+ struct prestera_nexthop_group_key nh_grp_key;
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ struct prestera_fib_node *fib_node;
+ struct prestera_fib_key fib_key;
+
+ /* Exception for fc with prefix 32: LPM entry is already used by fib */
+ memset(&fc_key, 0, sizeof(fc_key));
+ fc_key.addr = n_cache->key.addr;
+ fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ /* But better to use tb_id of route, which pointed to this neighbour. */
+ /* We take it from rif, because rif inconsistent.
+ * Must be separated in_rif and out_rif.
+ * Also note: for each fib pointed to this neigh should be separated
+ * neigh lpm entry (for each ingress vr)
+ */
+ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ memset(&fib_key, 0, sizeof(fib_key));
+ fib_key.addr = n_cache->key.addr;
+ fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
+ fib_node = prestera_fib_node_find(sw, &fib_key);
+ if (!fib_cache || !fib_cache->reachable) {
+ if (!enabled && fib_node) {
+ if (prestera_fib_node_util_is_neighbour(fib_node))
+ prestera_fib_node_destroy(sw, fib_node);
+ return;
+ }
+ }
+
+ if (enabled && !fib_node) {
+ memset(&nh_grp_key, 0, sizeof(nh_grp_key));
+ prestera_util_nc_key2nh_key(&n_cache->key,
+ &nh_grp_key.neigh[0]);
+ fib_node = prestera_fib_node_create(sw, &fib_key,
+ PRESTERA_FIB_TYPE_UC_NH,
+ &nh_grp_key);
+ if (!fib_node)
+ pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
+ &fib_key.addr.u.ipv4);
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
+ &nc->key.addr, nc->key.dev))
+ nc->reachable = true;
+ else
+ nc->reachable = false;
+}
+
+/* Kernel neighbour -> neigh_cache info */
+static void
+__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct neighbour *n;
+ int err;
+
+ memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
+ if (!n)
+ goto out;
+
+ read_lock_bh(&n->lock);
+ if (n->nud_state & NUD_VALID && !n->dead) {
+ err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
+ n);
+ if (err)
+ goto n_read_out;
- fri.fi = fc->fi;
- fri.tb_id = fc->key.kern_tb_id;
- fri.dst = fc->key.addr.u.ipv4;
- fri.dst_len = fc->key.prefix_len;
- fri.dscp = fc->kern_dscp;
- fri.type = fc->kern_type;
- /* flags begin */
- fri.offload = offload;
- fri.trap = trap;
- fri.offload_failed = fail;
- /* flags end */
- fib_alias_hw_flags_set(&init_net, &fri);
+ memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
+ nc->nh_neigh_info.connected = true;
+ }
+n_read_out:
+ read_unlock_bh(&n->lock);
+out:
+ nc->in_kernel = nc->nh_neigh_info.connected;
+ if (n)
+ neigh_release(n);
+}
+
+/* neigh_cache info -> lpm update */
+static void
+__prestera_k_arb_nc_apply(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_kern_neigh_cache_head *nhead;
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ int err;
+
+ __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
+ __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh)
+ goto out;
+
+ /* Do hw update only if something changed to prevent nh flap */
+ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
+ sizeof(nh_neigh->info))) {
+ memcpy(&nh_neigh->info, &nc->nh_neigh_info,
+ sizeof(nh_neigh->info));
+ err = prestera_nh_neigh_set(sw, nh_neigh);
+ if (err) {
+ pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
+ "prestera_nh_neigh_set", err,
+ &nh_neigh->key.addr.u.ipv4,
+ &nh_neigh->info.ha[0]);
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
+ __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
+ nc->in_kernel,
+ !nc->in_kernel);
+ }
}
static int
__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
struct prestera_kern_fib_cache *fc)
{
+ struct fib_nh_common *nhc;
+ int nh_cnt;
+
memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
- switch (fc->fi->fib_type) {
+ switch (prestera_kern_fib_info_type(&fc->info)) {
case RTN_UNICAST:
- fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ if (prestera_fib_info_is_direct(&fc->info) &&
+ fc->key.prefix_len ==
+ PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
+ /* This is special case.
+ * When prefix is 32. Than we will have conflict in lpm
+ * for direct route - once TRAP added, there is no
+ * place for neighbour entry. So represent direct route
+ * with prefix 32, as NH. So neighbour will be resolved
+ * as nexthop of this route.
+ */
+ nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
+ fc->lpm_info.nh_grp_key.neigh[0].addr =
+ fc->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[0].rif =
+ nhc->nhc_dev;
+
+ break;
+ }
+
+ /* We can also get nh_grp_key from fi. This will be correct to
+ * because cache not always represent, what actually written to
+ * lpm. But we use nh cache, as well for now (for this case).
+ */
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
+ break;
+
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
+ }
+
+ fc->lpm_info.fib_type = nh_cnt ?
+ PRESTERA_FIB_TYPE_UC_NH :
+ PRESTERA_FIB_TYPE_TRAP;
break;
/* Unsupported. Leave it for kernel: */
case RTN_BROADCAST:
@@ -191,7 +940,8 @@ static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
return 0;
fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
- fc->lpm_info.fib_type);
+ fc->lpm_info.fib_type,
+ &fc->lpm_info.nh_grp_key);
if (!fib_node) {
dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
@@ -220,6 +970,10 @@ static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
}
switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ fc->reachable, false);
+ break;
case PRESTERA_FIB_TYPE_TRAP:
__prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
false, fc->reachable);
@@ -271,17 +1025,140 @@ __prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
return rfc;
}
+static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ struct neighbour *n;
+ bool hw_active;
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh) {
+ pr_err("Cannot find nh_neigh for cached %pI4n",
+ &nc->key.addr.u.ipv4);
+ return;
+ }
+
+ hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
+
+#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+ if (!hw_active && nc->in_kernel)
+ goto out;
+#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+ if (!hw_active)
+ goto out;
+#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+
+ if (nc->key.addr.v == PRESTERA_IPV4) {
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ } else {
+ n = NULL;
+ }
+
+ if (!IS_ERR(n) && n) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ } else {
+ pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
+ }
+
+out:
+ return;
+}
+
+/* Propagate hw state to kernel */
+static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_hw_state_upd(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+/* Propagate kernel event to hw */
+static void prestera_k_arb_n_evt(struct prestera_switch *sw,
+ struct neighbour *n)
+{
+ struct prestera_kern_neigh_cache_key n_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ err = prestera_util_neigh2nc_key(sw, n, &n_key);
+ if (err)
+ return;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
+ if (!n_cache) {
+ n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
+ if (!n_cache)
+ return;
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ }
+
+ __prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+
+ prestera_kern_neigh_cache_put(sw, n_cache);
+}
+
+static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
static int
prestera_k_arb_fib_evt(struct prestera_switch *sw,
bool replace, /* replace or del */
- struct fib_entry_notifier_info *fen_info)
+ struct fib_notifier_info *info)
{
struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
struct prestera_kern_fib_cache_key fc_key;
struct prestera_kern_fib_cache *fib_cache;
int err;
- prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
+ prestera_util_fen_info2fib_cache_key(info, &fc_key);
fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
if (fib_cache) {
fib_cache->reachable = false;
@@ -304,10 +1181,7 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
}
if (replace) {
- fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
- fen_info->fi,
- fen_info->dscp,
- fen_info->type);
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
if (!fib_cache) {
dev_err(sw->dev->dev, "fib_cache == NULL");
return -ENOENT;
@@ -331,9 +1205,65 @@ prestera_k_arb_fib_evt(struct prestera_switch *sw,
dev_err(sw->dev->dev, "Applying fib_cache failed");
}
+ /* Update all neighs to resolve overlapped and apply related */
+ __prestera_k_arb_fib_evt2nc(sw);
+
return 0;
}
+static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_neigh_cache *n_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ if (!list_empty(&n_cache->kern_fib_cache_list)) {
+ WARN_ON(1); /* BUG */
+ return;
+ }
+ __prestera_k_arb_n_offload_set(sw, n_cache, false);
+ n_cache->in_kernel = false;
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_fib_cache *fib_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
+ false, false,
+ false);
+ __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
+ false, false);
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static void prestera_k_arb_abort(struct prestera_switch *sw)
+{
+ /* Function to remove all arbiter entries and related hw objects. */
+ /* Sequence:
+ * 1) Clear arbiter tables, but don't touch hw
+ * 2) Clear hw
+ * We use such approach, because arbiter object is not directly mapped
+ * to hw. So deletion of one arbiter object may even lead to creation of
+ * hw object (e.g. in case of overlapped routes).
+ */
+ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
+ __prestera_k_arb_abort_fib_ht_cb,
+ sw);
+ rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
+ __prestera_k_arb_abort_neigh_ht_cb,
+ sw);
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
@@ -469,13 +1399,15 @@ static void __prestera_router_fib_event_work(struct work_struct *work)
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
+ err = prestera_k_arb_fib_evt(sw, true,
+ &fib_work->fen_info.info);
if (err)
goto err_out;
break;
case FIB_EVENT_ENTRY_DEL:
- err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
+ err = prestera_k_arb_fib_evt(sw, false,
+ &fib_work->fen_info.info);
if (err)
goto err_out;
@@ -534,10 +1466,89 @@ static int __prestera_router_fib_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct prestera_netevent_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct neighbour *n;
+};
+
+static void prestera_router_neigh_event_work(struct work_struct *work)
+{
+ struct prestera_netevent_work *net_work =
+ container_of(work, struct prestera_netevent_work, work);
+ struct prestera_switch *sw = net_work->sw;
+ struct neighbour *n = net_work->n;
+
+ /* neigh - its not hw related object. It stored only in kernel. So... */
+ rtnl_lock();
+
+ prestera_k_arb_n_evt(sw, n);
+
+ neigh_release(n);
+ rtnl_unlock();
+ kfree(net_work);
+}
+
+static int prestera_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_netevent_work *net_work;
+ struct prestera_router *router;
+ struct neighbour *n = ptr;
+
+ router = container_of(nb, struct prestera_router, netevent_nb);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl->family != AF_INET)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (WARN_ON(!net_work))
+ return NOTIFY_BAD;
+
+ neigh_clone(n);
+ net_work->n = n;
+ net_work->sw = router->sw;
+ INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
+ prestera_queue_work(&net_work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void prestera_router_update_neighs_work(struct work_struct *work)
+{
+ struct prestera_router *router;
+
+ router = container_of(work, struct prestera_router,
+ neighs_update.dw.work);
+ rtnl_lock();
+
+ prestera_k_arb_hw_evt(router->sw);
+
+ rtnl_unlock();
+ prestera_queue_delayed_work(&router->neighs_update.dw,
+ msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
+}
+
+static int prestera_neigh_work_init(struct prestera_switch *sw)
+{
+ INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
+ prestera_router_update_neighs_work);
+ prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
+ return 0;
+}
+
+static void prestera_neigh_work_fini(struct prestera_switch *sw)
+{
+ cancel_delayed_work_sync(&sw->router->neighs_update.dw);
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
- int err;
+ int err, nhgrp_cache_bytes;
router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
if (!router)
@@ -555,6 +1566,22 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_kern_fib_cache_ht_init;
+ err = rhashtable_init(&router->kern_neigh_cache_ht,
+ &__prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_kern_neigh_cache_ht_init;
+
+ nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
+ router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
+ if (!router->nhgrp_hw_state_cache) {
+ err = -ENOMEM;
+ goto err_nh_state_cache_alloc;
+ }
+
+ err = prestera_neigh_work_init(sw);
+ if (err)
+ goto err_neigh_work_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
@@ -565,6 +1592,11 @@ int prestera_router_init(struct prestera_switch *sw)
if (err)
goto err_register_inetaddr_notifier;
+ router->netevent_nb.notifier_call = prestera_router_netevent_event;
+ err = register_netevent_notifier(&router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
router->fib_nb.notifier_call = __prestera_router_fib_event;
err = register_fib_notifier(&init_net, &router->fib_nb,
/* TODO: flush fib entries */ NULL, NULL);
@@ -574,10 +1606,18 @@ int prestera_router_init(struct prestera_switch *sw)
return 0;
err_register_fib_notifier:
+ unregister_netevent_notifier(&router->netevent_nb);
+err_register_netevent_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
+ prestera_neigh_work_fini(sw);
+err_neigh_work_init:
+ kfree(router->nhgrp_hw_state_cache);
+err_nh_state_cache_alloc:
+ rhashtable_destroy(&router->kern_neigh_cache_ht);
+err_kern_neigh_cache_ht_init:
rhashtable_destroy(&router->kern_fib_cache_ht);
err_kern_fib_cache_ht_init:
prestera_router_hw_fini(sw);
@@ -589,8 +1629,15 @@ err_router_lib_init:
void prestera_router_fini(struct prestera_switch *sw)
{
unregister_fib_notifier(&init_net, &sw->router->fib_nb);
+ unregister_netevent_notifier(&sw->router->netevent_nb);
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_neigh_work_fini(sw);
+ prestera_queue_drain();
+
+ prestera_k_arb_abort(sw);
+
+ kfree(sw->router->nhgrp_hw_state_cache);
rhashtable_destroy(&sw->router->kern_fib_cache_ht);
prestera_router_hw_fini(sw);
kfree(sw->router);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 5b0cf3be9a9e..4f65df0ae5e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -8,10 +8,16 @@
#include "prestera_router_hw.h"
#include "prestera_acl.h"
-/* +--+
- * +------->|vr|<-+
- * | +--+ |
- * | |
+/* Nexthop is pointed
+ * to port (not rif)
+ * +-------+
+ * +>|nexthop|
+ * | +-------+
+ * |
+ * +--+ +-----++
+ * +------->|vr|<-+ +>|nh_grp|
+ * | +--+ | | +------+
+ * | | |
* +-+-------+ +--+---+-+
* |rif_entry| |fib_node|
* +---------+ +--------+
@@ -23,6 +29,8 @@
#define PRESTERA_NHGR_UNUSED (0)
#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+/* Need to merge it with router_manager */
+#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */
static const struct rhashtable_params __prestera_fib_ht_params = {
.key_offset = offsetof(struct prestera_fib_node, key),
@@ -31,10 +39,45 @@ static const struct rhashtable_params __prestera_fib_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params __prestera_nh_neigh_ht_params = {
+ .key_offset = offsetof(struct prestera_nh_neigh, key),
+ .key_len = sizeof(struct prestera_nh_neigh_key),
+ .head_offset = offsetof(struct prestera_nh_neigh, ht_node),
+};
+
+static const struct rhashtable_params __prestera_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct prestera_nexthop_group, key),
+ .key_len = sizeof(struct prestera_nexthop_group_key),
+ .head_offset = offsetof(struct prestera_nexthop_group, ht_node),
+};
+
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg);
+
+/* TODO: move to router.h as macros */
+static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key)
+{
+ return memchr_inv(key, 0, sizeof(*key)) ? true : false;
+}
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
int err;
+ err = rhashtable_init(&sw->router->nh_neigh_ht,
+ &__prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_nh_neigh_ht_init;
+
+ err = rhashtable_init(&sw->router->nexthop_group_ht,
+ &__prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_grp_ht_init;
+
err = rhashtable_init(&sw->router->fib_ht,
&__prestera_fib_ht_params);
if (err)
@@ -43,15 +86,25 @@ int prestera_router_hw_init(struct prestera_switch *sw)
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
+ return 0;
+
err_fib_ht_init:
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+err_nexthop_grp_ht_init:
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+err_nh_neigh_ht_init:
return 0;
}
void prestera_router_hw_fini(struct prestera_switch *sw)
{
+ rhashtable_free_and_destroy(&sw->router->fib_ht,
+ prestera_fib_node_destroy_ht_cb, sw);
WARN_ON(!list_empty(&sw->router->vr_list));
WARN_ON(!list_empty(&sw->router->rif_entry_list));
rhashtable_destroy(&sw->router->fib_ht);
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
@@ -232,6 +285,286 @@ err_kzalloc:
return NULL;
}
+static void __prestera_nh_neigh_destroy(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ rhashtable_remove_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ kfree(neigh);
+}
+
+static struct prestera_nh_neigh *
+__prestera_nh_neigh_create(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+ int err;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
+ if (!neigh)
+ goto err_kzalloc;
+
+ memcpy(&neigh->key, key, sizeof(*key));
+ neigh->info.connected = false;
+ INIT_LIST_HEAD(&neigh->nexthop_group_list);
+ err = rhashtable_insert_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return neigh;
+
+err_rhashtable_insert:
+ kfree(neigh);
+err_kzalloc:
+ return NULL;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *nh_neigh;
+
+ nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
+ key, __prestera_nh_neigh_ht_params);
+ return IS_ERR(nh_neigh) ? NULL : nh_neigh;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+
+ neigh = prestera_nh_neigh_find(sw, key);
+ if (!neigh)
+ return __prestera_nh_neigh_create(sw, key);
+
+ return neigh;
+}
+
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ if (list_empty(&neigh->nexthop_group_list))
+ __prestera_nh_neigh_destroy(sw, neigh);
+}
+
+/* Updates new prestera_neigh_info */
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ struct prestera_nh_neigh_head *nh_head;
+ struct prestera_nexthop_group *nh_grp;
+ int err;
+
+ list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) {
+ nh_grp = nh_head->this;
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh)
+{
+ bool state;
+ struct prestera_nh_neigh_head *nh_head, *tmp;
+
+ state = false;
+ list_for_each_entry_safe(nh_head, tmp,
+ &nh_neigh->nexthop_group_list, head) {
+ state = prestera_nexthop_group_util_hw_state(sw, nh_head->this);
+ if (state)
+ goto out;
+ }
+
+out:
+ return state;
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_create(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt, err, gid;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ goto err_kzalloc;
+
+ memcpy(&nh_grp->key, key, sizeof(*key));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt]))
+ break;
+
+ nh_neigh = prestera_nh_neigh_get(sw,
+ &nh_grp->key.neigh[nh_cnt]);
+ if (!nh_neigh)
+ goto err_nh_neigh_get;
+
+ nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh;
+ nh_grp->nh_neigh_head[nh_cnt].this = nh_grp;
+ list_add(&nh_grp->nh_neigh_head[nh_cnt].head,
+ &nh_neigh->nexthop_group_list);
+ }
+
+ err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id);
+ if (err)
+ goto err_nh_group_create;
+
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ goto err_nexthop_group_set;
+
+ err = rhashtable_insert_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* reset cache for created group */
+ gid = nh_grp->grp_id;
+ sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8);
+
+ return nh_grp;
+
+err_ht_insert:
+err_nexthop_group_set:
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+err_nh_group_create:
+err_nh_neigh_get:
+ for (nh_cnt--; nh_cnt >= 0; nh_cnt--) {
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh);
+ }
+
+ kfree(nh_grp);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_nexthop_group_destroy(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt;
+
+ rhashtable_remove_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!nh_neigh)
+ break;
+
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_neigh);
+ }
+
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+ kfree(nh_grp);
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_find(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
+ key, __prestera_nexthop_group_ht_params);
+ return IS_ERR(nh_grp) ? NULL : nh_grp;
+}
+
+static struct prestera_nexthop_group *
+prestera_nexthop_group_get(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = __prestera_nexthop_group_find(sw, key);
+ if (nh_grp) {
+ refcount_inc(&nh_grp->refcount);
+ } else {
+ nh_grp = __prestera_nexthop_group_create(sw, key);
+ if (IS_ERR(nh_grp))
+ return ERR_CAST(nh_grp);
+
+ refcount_set(&nh_grp->refcount, 1);
+ }
+
+ return nh_grp;
+}
+
+static void prestera_nexthop_group_put(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ if (refcount_dec_and_test(&nh_grp->refcount))
+ __prestera_nexthop_group_destroy(sw, nh_grp);
+}
+
+/* Updates with new nh_neigh's info */
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX];
+ struct prestera_nh_neigh *neigh;
+ int nh_cnt;
+
+ memset(&info[0], 0, sizeof(info));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!neigh)
+ break;
+
+ memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info));
+ }
+
+ return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id);
+}
+
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ int err;
+ u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1;
+ u32 gid = nh_grp->grp_id;
+ u8 *cache = sw->router->nhgrp_hw_state_cache;
+
+ /* Antijitter
+ * Prevent situation, when we read state of nh_grp twice in short time,
+ * and state bit is still cleared on second call. So just stuck active
+ * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred.
+ */
+ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick +
+ msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) {
+ err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size);
+ if (err) {
+ pr_err("Failed to get hw state nh_grp's");
+ return false;
+ }
+
+ sw->router->nhgrp_hw_cache_kick = jiffies;
+ }
+
+ if (cache[gid / 8] & BIT(gid % 8))
+ return true;
+
+ return false;
+}
+
struct prestera_fib_node *
prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
{
@@ -251,6 +584,9 @@ static void __prestera_fib_node_destruct(struct prestera_switch *sw,
prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
fib_node->key.prefix_len);
switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+ break;
case PRESTERA_FIB_TYPE_TRAP:
break;
case PRESTERA_FIB_TYPE_DROP:
@@ -272,10 +608,20 @@ void prestera_fib_node_destroy(struct prestera_switch *sw,
kfree(fib_node);
}
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_fib_node *node = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_fib_node_destruct(sw, node);
+ kfree(node);
+}
+
struct prestera_fib_node *
prestera_fib_node_create(struct prestera_switch *sw,
struct prestera_fib_key *key,
- enum prestera_fib_type fib_type)
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key)
{
struct prestera_fib_node *fib_node;
u32 grp_id;
@@ -302,6 +648,14 @@ prestera_fib_node_create(struct prestera_switch *sw,
case PRESTERA_FIB_TYPE_DROP:
grp_id = PRESTERA_NHGR_DROP;
break;
+ case PRESTERA_FIB_TYPE_UC_NH:
+ fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
+ nh_grp_key);
+ if (!fib_node->info.nh_grp)
+ goto err_nh_grp_get;
+
+ grp_id = fib_node->info.nh_grp->grp_id;
+ break;
default:
pr_err("Unsupported fib_type %d", fib_type);
goto err_nh_grp_get;
@@ -323,6 +677,8 @@ err_ht_insert:
prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
key->prefix_len);
err_lpm_add:
+ if (fib_type == PRESTERA_FIB_TYPE_UC_NH)
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
err_nh_grp_get:
prestera_vr_put(sw, vr);
err_vr_get:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index 67dbb49c8bd4..9ca97919c863 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -31,6 +31,63 @@ struct prestera_ip_addr {
PRESTERA_IPV4 = 0,
PRESTERA_IPV6
} v;
+#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \
+ /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */)
+};
+
+struct prestera_nh_neigh_key {
+ struct prestera_ip_addr addr;
+ /* Seems like rif is obsolete, because there is iface in info ?
+ * Key can contain functional fields, or fields, which is used to
+ * filter duplicate objects on logical level (before you pass it to
+ * HW)... also key can be used to cover hardware restrictions.
+ * In our case rif - is logical interface (even can be VLAN), which
+ * is used in combination with IP address (which is also not related to
+ * hardware nexthop) to provide logical compression of created nexthops.
+ * You even can imagine, that rif+IPaddr is just cookie.
+ */
+ /* struct prestera_rif *rif; */
+ /* Use just as cookie, to divide ARP domains (in order with addr) */
+ void *rif;
+};
+
+/* Used for hw call */
+struct prestera_neigh_info {
+ struct prestera_iface iface;
+ unsigned char ha[ETH_ALEN];
+ u8 connected; /* bool. indicate, if mac/oif valid */
+ u8 __pad[1];
+};
+
+/* Used to notify nh about neigh change */
+struct prestera_nh_neigh {
+ struct prestera_nh_neigh_key key;
+ struct prestera_neigh_info info;
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct list_head nexthop_group_list;
+};
+
+#define PRESTERA_NHGR_SIZE_MAX 4
+
+struct prestera_nexthop_group {
+ struct prestera_nexthop_group_key {
+ struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX];
+ } key;
+ /* Store intermediate object here.
+ * This prevent overhead kzalloc call.
+ */
+ /* nh_neigh is used only to notify nexthop_group */
+ struct prestera_nh_neigh_head {
+ struct prestera_nexthop_group *this;
+ struct list_head head;
+ /* ptr to neigh is not necessary.
+ * It used to prevent lookup of nh_neigh by key (n) on destroy
+ */
+ struct prestera_nh_neigh *neigh;
+ } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX];
+ struct rhash_head ht_node; /* node of prestera_vr */
+ refcount_t refcount;
+ u32 grp_id; /* hw */
};
struct prestera_fib_key {
@@ -44,12 +101,16 @@ struct prestera_fib_info {
struct list_head vr_node;
enum prestera_fib_type {
PRESTERA_FIB_TYPE_INVALID = 0,
+ /* must be pointer to nh_grp id */
+ PRESTERA_FIB_TYPE_UC_NH,
/* It can be connected route
* and will be overlapped with neighbours
*/
PRESTERA_FIB_TYPE_TRAP,
PRESTERA_FIB_TYPE_DROP
} type;
+ /* Valid only if type = UC_NH*/
+ struct prestera_nexthop_group *nh_grp;
};
struct prestera_fib_node {
@@ -67,6 +128,18 @@ struct prestera_rif_entry *
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh);
struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
struct prestera_fib_key *key);
void prestera_fib_node_destroy(struct prestera_switch *sw,
@@ -74,7 +147,8 @@ void prestera_fib_node_destroy(struct prestera_switch *sw,
struct prestera_fib_node *
prestera_fib_node_create(struct prestera_switch *sw,
struct prestera_fib_key *key,
- enum prestera_fib_type fib_type);
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key);
int prestera_router_hw_init(struct prestera_switch *sw);
void prestera_router_hw_fini(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index dc3e3ddc60bf..42ee963e9f75 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -659,7 +659,7 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
init_dummy_netdev(&sdma->napi_dev);
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 845e9d8c8cc7..f0e9d6ea88c5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -120,8 +120,9 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
return 0;
}
-static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
- struct prestera_port *to_port)
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress)
{
struct prestera_switch *sw = binding->port->sw;
u8 span_id;
@@ -135,7 +136,7 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
if (err)
return err;
- err = prestera_hw_span_bind(binding->port, span_id);
+ err = prestera_hw_span_bind(binding->port, span_id, ingress);
if (err) {
prestera_span_put(sw, span_id);
return err;
@@ -145,11 +146,12 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
return 0;
}
-static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress)
{
int err;
- err = prestera_hw_span_unbind(binding->port);
+ err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
@@ -161,60 +163,6 @@ static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
return 0;
}
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f)
-{
- struct prestera_flow_block_binding *binding;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- struct prestera_port *port;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- act = &f->rule->action.entries[0];
-
- if (!prestera_netdev_check(act->dev)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only Marvell Prestera port is supported");
- return -EINVAL;
- }
- if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
- return -EOPNOTSUPP;
- if (act->id != FLOW_ACTION_MIRRED)
- return -EOPNOTSUPP;
- if (protocol != htons(ETH_P_ALL))
- return -EOPNOTSUPP;
-
- port = netdev_priv(act->dev);
-
- list_for_each_entry(binding, &block->binding_list, list) {
- err = prestera_span_rule_add(binding, port);
- if (err)
- goto rollback;
- }
-
- return 0;
-
-rollback:
- list_for_each_entry_continue_reverse(binding,
- &block->binding_list, list)
- prestera_span_rule_del(binding);
- return err;
-}
-
-void prestera_span_destroy(struct prestera_flow_block *block)
-{
- struct prestera_flow_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- prestera_span_rule_del(binding);
-}
-
int prestera_span_init(struct prestera_switch *sw)
{
struct prestera_span *span;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
index f0644521f78a..493b68524bcb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -8,13 +8,17 @@
#define PRESTERA_SPAN_INVALID_ID -1
+struct prestera_port;
struct prestera_switch;
-struct prestera_flow_block;
+struct prestera_flow_block_binding;
int prestera_span_init(struct prestera_switch *sw);
void prestera_span_fini(struct prestera_switch *sw);
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f);
-void prestera_span_destroy(struct prestera_flow_block *block);
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress);
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress);
#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 71cde97d85c8..e548cd32582e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -143,6 +143,7 @@ prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
prestera_port_uc_flood_set(port, false);
prestera_port_mc_flood_set(port, false);
prestera_port_learning_set(port, false);
+ prestera_port_br_locked_set(port, false);
}
static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
@@ -162,6 +163,11 @@ static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
if (err)
goto err_out;
+ err = prestera_port_br_locked_set(port,
+ br_port->flags & BR_PORT_LOCKED);
+ if (err)
+ goto err_out;
+
return 0;
err_out:
@@ -1163,7 +1169,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags.mask &
- ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 349b8a94e939..cf456d62677f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1354,10 +1354,10 @@ static void pxa168_eth_netpoll(struct net_device *dev)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c1e985416c0e..1b43704baceb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,9 +394,9 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(skge->hw->pdev),
sizeof(info->bus_info));
}
@@ -3832,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_HIGHDMA;
skge = netdev_priv(dev);
- netif_napi_add(dev, &skge->napi, skge_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &skge->napi, skge_poll);
skge->netdev = dev;
skge->hw = hw;
skge->msg_enable = netif_msg_init(debug, default_msg);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index bbea5458000b..ab33ba1c3023 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3687,9 +3687,9 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sky2->hw->pdev),
sizeof(info->bus_info));
}
@@ -4937,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &hw->napi, sky2_poll);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 5ace4609de47..4fba7cb0144b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -73,6 +73,12 @@ static const struct mtk_reg_map mtk_reg_map = {
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
};
static const struct mtk_reg_map mt7628_reg_map = {
@@ -126,6 +132,12 @@ static const struct mtk_reg_map mt7986_reg_map = {
.fq_blen = 0x472c,
},
.gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
};
/* strings used by ethtool */
@@ -1458,7 +1470,7 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
- return !eth->hwlro;
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
@@ -1573,8 +1585,8 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
.last = !xdp_frame_has_frags(xdpf),
};
int err, index = 0, n_desc = 1, nr_frags;
- struct mtk_tx_dma *htxd, *txd, *txd_pdma;
struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
@@ -1608,7 +1620,6 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
- txd_pdma = qdma_to_pdma(ring, txd);
if (txd == ring->last_free)
goto unmap;
@@ -1629,7 +1640,8 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
- txd_pdma = qdma_to_pdma(ring, txd);
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
if (index & 1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1660,13 +1672,15 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- txd_pdma = qdma_to_pdma(ring, htxd);
tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
}
@@ -1892,12 +1906,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
bytes += skb->len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
skb_set_hash(skb, jhash_1word(hash, 0),
PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
} else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY)
skb_set_hash(skb, jhash_1word(hash, 0),
@@ -1911,9 +1927,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb, hash);
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
@@ -2976,21 +2991,25 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- u32 gdm_config = MTK_GDMA_TO_PDMA;
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
- gdm_config = MTK_GDMA_TO_PPE;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -3028,6 +3047,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -3055,8 +3075,8 @@ static int mtk_stop(struct net_device *dev)
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
return 0;
}
@@ -3556,8 +3576,8 @@ static void mtk_get_drvinfo(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
@@ -3925,6 +3945,7 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
static int mtk_probe(struct platform_device *pdev)
{
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -4005,20 +4026,31 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- for (i = 0;; i++) {
- struct device_node *np = of_parse_phandle(pdev->dev.of_node,
- "mediatek,wed", i);
- static const u32 wdma_regs[] = {
- MTK_WDMA0_BASE,
- MTK_WDMA1_BASE
- };
- void __iomem *wdma;
-
- if (!np || i >= ARRAY_SIZE(wdma_regs))
- break;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ }
- wdma = eth->base + wdma_regs[i];
- mtk_wed_add_hw(np, eth, wdma, i);
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
+ }
}
for (i = 0; i < 3; i++) {
@@ -4096,10 +4128,19 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
- if (!eth->ppe) {
- err = -ENOMEM;
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+ eth->soc->offload_version, i);
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
}
err = mtk_eth_offload_init(eth);
@@ -4125,10 +4166,8 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- NAPI_POLL_WEIGHT);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
@@ -4192,6 +4231,8 @@ static const struct mtk_soc_data mt7621_data = {
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4210,6 +4251,8 @@ static const struct mtk_soc_data mt7622_data = {
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4227,6 +4270,8 @@ static const struct mtk_soc_data mt7623_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
@@ -4258,8 +4303,11 @@ static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index ecf85e9ed824..b52f3b0177ef 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -105,7 +105,6 @@
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_TO_PDMA 0x0
-#define MTK_GDMA_TO_PPE 0x4444
#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
@@ -269,9 +268,6 @@
#define TX_DMA_FPORT_MASK_V2 0xf
#define TX_DMA_SWC_V2 BIT(30)
-#define MTK_WDMA0_BASE 0x2800
-#define MTK_WDMA1_BASE 0x2c00
-
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -319,8 +315,8 @@
#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
-#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf)
-#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7)
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
/* PDMA V2 descriptor rxd3 */
#define RX_DMA_VTAG_V2 BIT(0)
@@ -955,6 +951,9 @@ struct mtk_reg_map {
u32 fq_blen; /* fq free page buffer length */
} qdma;
u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+ u32 wdma_base[2];
};
/* struct mtk_eth_data - This is the structure holding all differences
@@ -968,6 +967,8 @@ struct mtk_reg_map {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @foe_entry_size Foe table entry size.
* @txd_size Tx DMA descriptor size.
* @rxd_size Rx DMA descriptor size.
* @rx_irq_done_mask Rx irq done register mask.
@@ -982,6 +983,8 @@ struct mtk_soc_data {
u32 required_clks;
bool required_pctl;
u8 offload_version;
+ u8 hash_offset;
+ u16 foe_entry_size;
netdev_features_t hw_features;
struct {
u32 txd_size;
@@ -1111,7 +1114,7 @@ struct mtk_eth {
int ip_align;
- struct mtk_ppe *ppe;
+ struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -1142,6 +1145,86 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index dab8f3f771f8..ae00e572390d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -56,7 +56,7 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
static u32 mtk_eth_timestamp(struct mtk_eth *eth)
{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
}
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
@@ -88,12 +88,12 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -122,16 +122,16 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash <<= 1;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
}
static inline struct mtk_foe_mac_info *
-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.l2;
@@ -143,9 +143,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
}
static inline u32 *
-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.ib2;
@@ -156,27 +156,38 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
return &entry->ipv4.ib2;
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac)
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac)
{
struct mtk_foe_mac_info *l2;
u32 ports_pad, val;
memset(entry, 0, sizeof(*entry));
- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
- MTK_FOE_IB1_BIND_TTL |
- MTK_FOE_IB1_BIND_CACHE;
- entry->ib1 = val;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+ entry->ib1 = val;
- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+ } else {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+ }
if (is_multicast_ether_addr(dest_mac))
- val |= MTK_FOE_IB2_MULTICAST;
+ val |= mtk_get_ib2_multicast_mask(eth);
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
@@ -210,24 +221,30 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
return 0;
}
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port)
{
- u32 *ib2 = mtk_foe_entry_ib2(entry);
- u32 val;
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ u32 val = *ib2;
- val = *ib2;
- val &= ~MTK_FOE_IB2_DEST_PORT;
- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+ } else {
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ }
*ib2 = val;
return 0;
}
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool egress,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
struct mtk_ipv4_tuple *t;
switch (type) {
@@ -262,11 +279,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
return 0;
}
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
int i;
@@ -297,39 +315,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
return 0;
}
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
l2->etype = BIT(port);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
else
l2->etype |= BIT(8);
- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
return 0;
}
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
case 0:
- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+ mtk_prep_ib1_vlan_layer(eth, 1);
l2->vlan1 = vid;
return 0;
case 1:
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
l2->vlan1 = vid;
l2->etype |= BIT(8);
} else {
l2->vlan2 = vid;
- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
}
return 0;
default:
@@ -337,34 +357,42 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
}
}
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
l2->etype = ETH_P_PPP_SES;
- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
l2->pppoe_id = sid;
return 0;
}
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid)
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
- u32 *ib2 = mtk_foe_entry_ib2(entry);
-
- *ib2 &= ~MTK_FOE_IB2_PORT_MG;
- *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
- if (wdma_idx)
- *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
- l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
- FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
- FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ }
return 0;
}
@@ -376,14 +404,15 @@ static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
}
static bool
-mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+ struct mtk_foe_entry *data)
{
int type, len;
if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
return false;
- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
len = offsetof(struct mtk_foe_entry, ipv6._rsv);
else
@@ -410,9 +439,10 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
hlist_del_init(&entry->list);
if (entry->hash != 0xffff) {
- ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
- ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_BIND);
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
}
entry->hash = 0xffff;
@@ -426,14 +456,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
{
- u16 timestamp;
- u16 now;
-
- now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
- timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ u16 now = mtk_eth_timestamp(ppe->eth);
+ u16 timestamp = ib1 & ib1_ts_mask;
if (timestamp > now)
- return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
+ return ib1_ts_mask + 1 - timestamp + now;
else
return now - timestamp;
}
@@ -441,6 +469,7 @@ static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
static void
mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
struct hlist_node *tmp;
@@ -451,7 +480,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
int cur_idle;
u32 ib1;
- hwe = &ppe->foe_table[cur->hash];
+ hwe = mtk_foe_get_entry(ppe, cur->hash);
ib1 = READ_ONCE(hwe->ib1);
if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
@@ -465,16 +494,16 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
continue;
idle = cur_idle;
- entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->data.ib1 &= ~ib1_ts_mask;
+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
}
}
static void
mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
+ struct mtk_foe_entry foe = {};
struct mtk_foe_entry *hwe;
- struct mtk_foe_entry foe;
spin_lock_bh(&ppe_lock);
@@ -486,9 +515,9 @@ mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
if (entry->hash == 0xffff)
goto out;
- hwe = &ppe->foe_table[entry->hash];
- memcpy(&foe, hwe, sizeof(foe));
- if (!mtk_flow_entry_match(entry, &foe)) {
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
entry->hash = 0xffff;
goto out;
}
@@ -503,16 +532,22 @@ static void
__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
u16 hash)
{
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
struct mtk_foe_entry *hwe;
- u16 timestamp;
- timestamp = mtk_eth_timestamp(ppe->eth);
- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+ timestamp);
+ } else {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+ timestamp);
+ }
- hwe = &ppe->foe_table[hash];
- memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
wmb();
hwe->ib1 = entry->ib1;
@@ -539,16 +574,17 @@ mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
u32 hash;
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return mtk_foe_entry_commit_l2(ppe, entry);
- hash = mtk_ppe_hash_entry(&entry->data);
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
entry->hash = 0xffff;
spin_lock_bh(&ppe_lock);
- hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
spin_unlock_bh(&ppe_lock);
return 0;
@@ -558,10 +594,11 @@ static void
mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u16 hash)
{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
- struct mtk_foe_entry foe, *hwe;
+ struct mtk_foe_entry foe = {}, *hwe;
struct mtk_foe_mac_info *l2;
- u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
@@ -572,32 +609,34 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
flow_info->l2_data.base_flow = entry;
flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
flow_info->hash = hash;
- hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
- hwe = &ppe->foe_table[hash];
- memcpy(&foe, hwe, sizeof(foe));
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&foe, hwe, soc->foe_entry_size);
foe.ib1 &= ib1_mask;
foe.ib1 |= entry->data.ib1 & ~ib1_mask;
- l2 = mtk_foe_entry_l2(&foe);
+ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
- type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
l2->etype = ETH_P_IPV6;
- *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
__mtk_foe_entry_commit(ppe, &foe, hash);
}
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- struct hlist_head *head = &ppe->foe_flow[hash / 2];
- struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
struct hlist_node *n;
@@ -621,7 +660,7 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
continue;
}
- if (found || !mtk_flow_entry_match(entry, hwe)) {
+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
if (entry->hash != 0xffff)
entry->hash = 0xffff;
continue;
@@ -678,11 +717,13 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
}
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
- int version)
+ int version, int index)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct device *dev = eth->dev;
- struct mtk_foe_entry *foe;
struct mtk_ppe *ppe;
+ u32 foe_flow_size;
+ void *foe;
ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
@@ -698,14 +739,21 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
ppe->dev = dev;
ppe->version = version;
- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+ foe = dmam_alloc_coherent(ppe->dev,
+ MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
return NULL;
ppe->foe_table = foe;
- mtk_ppe_debugfs_init(ppe);
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return NULL;
+
+ mtk_ppe_debugfs_init(ppe, index);
return ppe;
}
@@ -715,21 +763,30 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
+ memset(ppe->foe_table, 0,
+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
/* skip all entries that cross the 1024 byte boundary */
- for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
- for (k = 0; k < ARRAY_SIZE(skip); k++)
- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+ for (k = 0; k < ARRAY_SIZE(skip); k++) {
+ struct mtk_foe_entry *hwe;
+
+ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+ hwe->ib1 |= MTK_FOE_IB1_STATIC;
+ }
+ }
}
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
+ if (!ppe)
+ return;
+
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
@@ -748,6 +805,8 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -755,15 +814,21 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, true);
- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_6RD |
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+ MTK_PPE_MD_TOAP_BYP_CRSN1 |
+ MTK_PPE_MD_TOAP_BYP_CRSN2 |
+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+ else
+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
@@ -798,7 +863,10 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- return 0;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+ }
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -806,9 +874,15 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
u32 val;
int i;
- for (i = 0; i < MTK_PPE_ENTRIES; i++)
- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_INVALID);
+ if (!ppe)
+ return 0;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+ }
mtk_ppe_cache_enable(ppe, false);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 1f5cf1c9a947..0b7a67a958e4 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -8,8 +8,6 @@
#include <linux/bitfield.h>
#include <linux/rhashtable.h>
-#define MTK_ETH_PPE_BASE 0xc00
-
#define MTK_PPE_ENTRIES_SHIFT 3
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
@@ -34,6 +32,15 @@
#define MTK_FOE_IB1_UDP BIT(30)
#define MTK_FOE_IB1_STATIC BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
+
enum {
MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
@@ -55,14 +62,25 @@ enum {
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
+
#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
+
enum {
MTK_FOE_STATE_INVALID,
MTK_FOE_STATE_UNBIND,
@@ -83,6 +101,9 @@ struct mtk_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u16 minfo;
+ u16 winfo;
};
/* software-only entry type */
@@ -200,7 +221,7 @@ struct mtk_foe_entry {
struct mtk_foe_ipv4_dslite dslite;
struct mtk_foe_ipv6 ipv6;
struct mtk_foe_ipv6_6rd ipv6_6rd;
- u32 data[19];
+ u32 data[23];
};
};
@@ -249,6 +270,7 @@ struct mtk_flow_entry {
};
u8 type;
s8 wed_index;
+ u8 ppe_index;
u16 hash;
union {
struct mtk_foe_entry data;
@@ -267,20 +289,22 @@ struct mtk_ppe {
struct device *dev;
void __iomem *base;
int version;
+ char dirname[5];
- struct mtk_foe_entry *foe_table;
+ void *foe_table;
dma_addr_t foe_phys;
u16 foe_check_time[MTK_PPE_ENTRIES];
- struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
+ struct hlist_head *foe_flow;
struct rhashtable l2_flows;
void *acct_table;
};
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
@@ -293,6 +317,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
if (!ppe)
return;
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
+
now = (u16)jiffies;
diff = now - ppe->foe_check_time[hash];
if (diff < HZ / 10)
@@ -302,34 +329,30 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
__mtk_ppe_check_skb(ppe, skb, hash);
}
-static inline int
-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
-{
- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
-
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
- return -1;
-
- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
-}
-
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac);
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool orig,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
- int bss, int wcid);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index eb0b598f14e4..391b071bcff3 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -79,7 +79,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
int i;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
- struct mtk_foe_entry *entry = &ppe->foe_table[i];
+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
unsigned char h_source[ETH_ALEN];
@@ -162,52 +162,28 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
}
static int
-mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, false);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
static int
-mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, true);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
-static int
-mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
- return single_open(file, mtk_ppe_debugfs_foe_show_all,
- inode->i_private);
-}
-
-static int
-mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
-{
- return single_open(file, mtk_ppe_debugfs_foe_show_bind,
- inode->i_private);
-}
-
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
-{
- static const struct file_operations fops_all = {
- .open = mtk_ppe_debugfs_foe_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
- static const struct file_operations fops_bind = {
- .open = mtk_ppe_debugfs_foe_open_bind,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
struct dentry *root;
- root = debugfs_create_dir("mtk_ppe", NULL);
- debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
- debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+ root = debugfs_create_dir(ppe->dirname, NULL);
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 25dc3c3aa31d..28bbd1df3e30 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -52,18 +52,19 @@ static const struct rhashtable_params mtk_flow_ht_params = {
};
static int
-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
- bool egress)
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data, bool egress)
{
- return mtk_foe_entry_set_ipv4_tuple(foe, egress,
+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
data->v4.src_addr, data->src_port,
data->v4.dst_addr, data->dst_port);
}
static int
-mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data)
{
- return mtk_foe_entry_set_ipv6_tuple(foe,
+ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
data->v6.src_addr.s6_addr32, data->src_port,
data->v6.dst_addr.s6_addr32, data->dst_port);
}
@@ -173,7 +174,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dp->cpu_dp->master;
+ *dev = dsa_port_to_master(dp);
return dp->index;
#else
@@ -190,16 +191,29 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
int pse_port, dsa_port;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
- mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
- info.wcid);
- pse_port = 3;
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+ info.bss, info.wcid);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ switch (info.wdma_idx) {
+ case 0:
+ pse_port = 8;
+ break;
+ case 1:
+ pse_port = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pse_port = 3;
+ }
*wed_index = info.wdma_idx;
goto out;
}
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(foe, dsa_port);
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
if (dev == eth->netdev[0])
pse_port = 1;
@@ -209,7 +223,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
return -EOPNOTSUPP;
out:
- mtk_foe_entry_set_pse_port(foe, pse_port);
+ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
return 0;
}
@@ -333,9 +347,8 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
- data.eth.h_source,
- data.eth.h_dest);
+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+ data.eth.h_source, data.eth.h_dest);
if (err)
return err;
@@ -360,7 +373,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv4_addr(&foe, &data, false);
+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
@@ -371,7 +384,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v6.src_addr = addrs.key->src;
data.v6.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv6_addr(&foe, &data);
+ mtk_flow_set_ipv6_addr(eth, &foe, &data);
}
flow_action_for_each(i, act, &rule->action) {
@@ -401,7 +414,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
if (err)
return err;
}
@@ -413,10 +426,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
}
if (data.pppoe.num == 1)
- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
&wed_index);
@@ -434,7 +447,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
- err = mtk_foe_entry_commit(eth->ppe, entry);
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
if (err < 0)
goto free;
@@ -446,7 +459,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return 0;
clear:
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
if (wed_index >= 0)
@@ -464,7 +477,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (entry->wed_index >= 0)
@@ -485,7 +498,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- idle = mtk_foe_entry_idle_time(eth->ppe, entry);
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -537,7 +550,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe || !eth->ppe->foe_table)
+ if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -589,8 +602,5 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe || !eth->ppe->foe_table)
- return 0;
-
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 0c45ea0900f1..59596d823d8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -21,6 +21,9 @@
#define MTK_PPE_GLO_CFG_BUSY BIT(31)
#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
@@ -54,6 +57,7 @@
#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
enum {
MTK_PPE_SCAN_MODE_DISABLED,
@@ -112,6 +116,8 @@ enum {
#define MTK_PPE_DEFAULT_CPU_PORT 0x248
#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
+
#define MTK_PPE_MTU_DROP 0x308
#define MTK_PPE_VLAN_MTU0 0x30c
@@ -141,4 +147,6 @@ enum {
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+#define MTK_PPE_SBW_CTRL 0x374
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 3f0e5e64de50..7e890f81148e 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1255,7 +1255,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
static void mtk_star_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+ strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
}
/* TODO Add ethtool stats. */
@@ -1651,8 +1651,7 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 29be2fcafea3..099b6e0df619 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -25,6 +25,11 @@
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
+#define MTK_WED_MAX_GROUP_SIZE 0x100
+#define MTK_WED_VLD_GROUP_SIZE 0x40
+#define MTK_WED_PER_GROUP_PKT 128
+
+#define MTK_WED_FBUF_SIZE 128
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
@@ -80,11 +85,31 @@ static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
struct mtk_wed_hw *hw;
+ int i;
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw)
+ return NULL;
+
+ if (!hw->wed_dev)
+ goto out;
+
+ if (hw->version == 1)
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+ }
+ /* MT7986 PCIE or AXI */
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = hw_list[i];
+ if (hw && !hw->wed_dev)
+ goto out;
+ }
- hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
- if (!hw || hw->wed_dev)
- return NULL;
+ return NULL;
+out:
hw->wed_dev = dev;
return hw;
}
@@ -150,10 +175,17 @@ mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
desc->buf0 = cpu_to_le32(buf_phys);
desc->buf1 = cpu_to_le32(buf_phys + txd_size);
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+
+ if (dev->hw->version == 1)
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ else
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG0;
desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
desc++;
@@ -209,7 +241,7 @@ mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
if (!ring->desc)
return;
- dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
ring->desc, ring->desc_phys);
}
@@ -229,6 +261,14 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ if (dev->hw->version == 1)
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
@@ -237,9 +277,54 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
}
static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ } else {
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ }
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0);
+
+ if (dev->hw->version == 1) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_m32(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0);
+ } else {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ mtk_wed_set_512_support(dev, false);
+ }
+}
+
+static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
- regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ mtk_wed_dma_disable(dev);
mtk_wed_set_ext_int(dev, false);
wed_clr(dev, MTK_WED_CTRL,
@@ -252,21 +337,11 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
-
- wed_clr(dev, MTK_WED_GLO_CFG,
- MTK_WED_GLO_CFG_TX_DMA_EN |
- MTK_WED_GLO_CFG_RX_DMA_EN);
- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
- wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
- MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
}
static void
mtk_wed_detach(struct mtk_wed_device *dev)
{
- struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
struct mtk_wed_hw *hw = dev->hw;
mutex_lock(&hw_lock);
@@ -281,9 +356,14 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wed_free_buffer(dev);
mtk_wed_free_tx_rings(dev);
- if (of_dma_is_coherent(wlan_node))
- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
- BIT(hw->index), BIT(hw->index));
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ struct device_node *wlan_node;
+
+ wlan_node = dev->wlan.pci_dev->dev.of_node;
+ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+ }
if (!hw_list[!hw->index]->wed_dev &&
hw->eth->dma_dev != hw->eth->dev)
@@ -296,14 +376,76 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_unlock(&hw_lock);
}
+#define PCIE_BASE_ADDR0 0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
+
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ }
+}
+
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
u32 mask, set;
- u32 offset;
mtk_wed_stop(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
@@ -313,14 +455,33 @@ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
- wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
+ if (dev->hw->version == 1) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
- offset = dev->hw->index ? 0x04000400 : 0;
- wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
- wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ MTK_PCIE_BASE(dev->hw->index));
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+ MTK_WDMA_INT_STATUS) |
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+ MTK_WDMA_GLO_CFG));
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+ MTK_WDMA_RING_TX(0)) |
+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+ MTK_WDMA_RING_RX(0)));
+ }
}
static void
@@ -340,37 +501,65 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
- wed_w32(dev, MTK_WED_TX_BM_TKID,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start + dev->wlan.nbuf - 1));
-
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
- wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
- FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
- MTK_WED_TX_BM_DYN_THR_HI);
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ dev->buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->buf_ring.size / 128));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- wed_set(dev, MTK_WED_CTRL,
- MTK_WED_CTRL_WED_TX_BM_EN |
- MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ if (dev->hw->version == 1)
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ else
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
}
static void
-mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size)
{
+ void *head = (void *)ring->desc;
int i;
for (i = 0; i < size; i++) {
- desc[i].buf0 = 0;
- desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
- desc[i].buf1 = 0;
- desc[i].info = 0;
+ struct mtk_wdma_desc *desc;
+
+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = 0;
}
}
@@ -421,12 +610,10 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
- struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
-
- if (!desc)
+ if (!dev->tx_ring[i].desc)
continue;
- mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE);
}
if (mtk_wed_poll_busy(dev))
@@ -483,16 +670,16 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
static int
mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
- int size)
+ int size, u32 desc_size)
{
- ring->desc = dma_alloc_coherent(dev->hw->dev,
- size * sizeof(*ring->desc),
+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
+ ring->desc_size = desc_size;
ring->size = size;
- mtk_wed_ring_reset(ring->desc, size);
+ mtk_wed_ring_reset(ring, size);
return 0;
}
@@ -500,9 +687,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
static int
mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
- if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -520,43 +708,63 @@ mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
}
static void
-mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
- u32 wdma_mask;
- u32 val;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
- if (!dev->tx_wdma[i].desc)
- mtk_wed_wdma_ring_setup(dev, i, 16);
-
- wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
-
- mtk_wed_hw_init(dev);
+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+ /* wed control cr set */
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
- wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
- MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
- MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
- wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
- MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+ dev->wlan.tx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+ dev->wlan.tx_tbit[1]));
+
+ /* initail txfree interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+ dev->wdma_idx));
+ }
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
- wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
-
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
@@ -567,16 +775,54 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ wed_set(dev, MTK_WED_WPDMA_CTRL,
+ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ if (!dev->tx_wdma[i].desc)
+ mtk_wed_wdma_ring_setup(dev, i, 16);
+
+ mtk_wed_hw_init(dev);
+ mtk_wed_configure_irq(dev, irq_mask);
+
mtk_wed_set_ext_int(dev, true);
- val = dev->wlan.wpdma_phys |
- MTK_PCIE_MIRROR_MAP_EN |
- FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
- if (dev->hw->index)
- val |= BIT(1);
- val |= BIT(0);
- regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ if (dev->hw->version == 1) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ } else {
+ mtk_wed_set_512_support(dev, true);
+ }
+ mtk_wed_dma_enable(dev);
dev->running = true;
}
@@ -585,12 +831,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
__releases(RCU)
{
struct mtk_wed_hw *hw;
+ struct device *device;
int ret = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"mtk_wed_attach without holding the RCU read lock");
- if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
!try_module_get(THIS_MODULE))
ret = -ENODEV;
@@ -608,7 +856,11 @@ mtk_wed_attach(struct mtk_wed_device *dev)
goto out;
}
- dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+ ? &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
+ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
dev->hw = hw;
dev->dev = hw->dev;
@@ -626,7 +878,9 @@ mtk_wed_attach(struct mtk_wed_device *dev)
}
mtk_wed_hw_init_early(dev);
- regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
+ if (hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
out:
mutex_unlock(&hw_lock);
@@ -653,7 +907,8 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring));
- if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc)))
return -ENOMEM;
if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
@@ -680,21 +935,21 @@ static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
- int i;
+ int i, index = dev->hw->version == 1;
/*
* For txfree event handling, the same DMA ring is shared between WED
* and WLAN. The WLAN driver accesses the ring index registers through
* WED
*/
- ring->reg_base = MTK_WED_RING_RX(1);
+ ring->reg_base = MTK_WED_RING_RX(index);
ring->wpdma = regs;
for (i = 0; i < 12; i += 4) {
u32 val = readl(regs + i);
- wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
- wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
}
return 0;
@@ -703,11 +958,19 @@ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
- u32 val;
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
- val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ val &= ext_mask;
if (!dev->hw->num_flows)
val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
if (val && net_ratelimit())
@@ -782,7 +1045,8 @@ out:
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index)
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
{
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
@@ -829,26 +1093,33 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
goto unlock;
+
hw->node = np;
hw->regs = regs;
hw->eth = eth;
hw->dev = &pdev->dev;
+ hw->wdma_phy = wdma_phy;
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
- "mediatek,pcie-mirror");
- hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
- "mediatek,hifsys");
- if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
- kfree(hw);
- goto unlock;
- }
+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+ if (hw->version == 1) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
- if (!index) {
- regmap_write(hw->mirror, 0, 0);
- regmap_write(hw->mirror, 4, 0);
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
}
+
mtk_wed_hw_add_debugfs(hw);
hw_list[index] = hw;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index 981ec613f4b0..ae420ca01a48 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -18,11 +18,13 @@ struct mtk_wed_hw {
struct regmap *hifsys;
struct device *dev;
void __iomem *wdma;
+ phys_addr_t wdma_phy;
struct regmap *mirror;
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
u32 debugfs_reg;
u32 num_flows;
+ u8 version;
char dirname[5];
int irq;
int index;
@@ -101,14 +103,16 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index);
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
void mtk_wed_exit(void);
int mtk_wed_flow_add(int index);
void mtk_wed_flow_remove(int index);
#else
static inline void
mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
- void __iomem *wdma, int index)
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
{
}
static inline void
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index a81d3fd1a439..f420f187e837 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -116,6 +116,9 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA(WDMA_GLO_CFG),
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+ DUMP_STR("TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 0a0465ea58b4..e270fb336143 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -5,6 +5,7 @@
#define __MTK_WED_REGS_H
#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
@@ -41,6 +42,7 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_RESERVE_EN BIT(12)
#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
#define MTK_WED_EXT_INT_STATUS 0x020
@@ -57,7 +59,8 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
-#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
@@ -65,8 +68,7 @@ struct mtk_wdma_desc {
MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
- MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
- MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
#define MTK_WED_EXT_INT_MASK 0x028
@@ -81,6 +83,7 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_BM_BASE 0x084
#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_V2 0x0c8
#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
@@ -94,7 +97,25 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_BM_DYN_THR 0x0a0
#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL 0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR 0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0 0x120
+#define MTK_WED_TXP_DW1 0x124
+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL 0x130
+#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB 0x1cc
#define MTK_WED_INT_STATUS 0x200
#define MTK_WED_INT_MASK 0x204
@@ -125,6 +146,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
@@ -155,21 +177,64 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+
#define MTK_WED_WPDMA_RESET_IDX 0x50c
#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WPDMA_CTRL 0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
+
#define MTK_WED_WPDMA_INT_CTRL 0x520
#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_WPDMA_INT_MASK 0x524
+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
+
#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_INTM 0x564
+#define MTK_WED_PCIE_CFG_MSIS 0x568
#define MTK_WED_PCIE_INT_TRIGGER 0x570
#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+#define MTK_WED_PCIE_INT_CTRL 0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+
#define MTK_WED_WPDMA_CFG_BASE 0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+#define MTK_WED_WPDMA_CFG_TX 0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
@@ -203,15 +268,24 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+#define MTK_WED_WDMA_INT_CLR 0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
+
#define MTK_WED_WDMA_INT_TRIGGER 0xa28
#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
#define MTK_WED_WDMA_INT_CTRL 0xa2c
#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_WDMA_CFG_BASE 0xaa0
#define MTK_WED_WDMA_OFFSET0 0xaa4
#define MTK_WED_WDMA_OFFSET1 0xaa8
+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
+
#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
@@ -221,15 +295,22 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_OFS_CPU_IDX 0x08
#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
#define MTK_WDMA_GLO_CFG 0x204
-#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
#define MTK_WDMA_RESET_IDX 0x208
#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WDMA_INT_STATUS 0x220
+
#define MTK_WDMA_INT_MASK 0x228
#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 6affbd241264..1184ac5751e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -152,7 +152,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
napi_enable(&cq->napi);
break;
case TX_XDP:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 6400a827173c..7d45f1d55f79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,15 +89,15 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
+ strscpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dcb9eb1899ce..fe48d20d6118 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1779,7 +1779,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index d89a3da89e5a..59b8b3c73582 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -208,7 +208,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
@@ -222,7 +222,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
chunk->npages, DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 78c5f40382c9..d3fc86cd3c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3071,6 +3071,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
@@ -3093,6 +3094,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
@@ -3109,6 +3111,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index bfc0cd5ec423..26685fd0fdaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -139,6 +139,14 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
+config MLX5_EN_MACSEC
+ bool "Connect-X support for MACSec offload"
+ depends on MLX5_CORE_EN
+ depends on MACSEC
+ default n
+ help
+ Build support for MACsec cryptography-offload acceleration in the NIC.
+
config MLX5_EN_IPSEC
bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a3773a8177ed..a22c32aabf11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -92,6 +92,9 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
+mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+ en_accel/macsec_stats.o
+
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
en_accel/ipsec_offload.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a560df446bac..26a23047f1f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -93,29 +93,26 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_LOG_WQE_SZ 18
-#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
- MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
-#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-
-#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
-#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
-#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
-/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
- * WQEs, This page will absorb write overflow by the hardware, when
- * receiving packets larger than MTU. These oversize packets are
- * dropped by the driver at a later stage.
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+
+/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
+ * These are theoretical maximums, which can be further restricted by
+ * capabilities. These values are used for static resource allocations and
+ * sanity checks.
+ * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
+ * size actually used at runtime, but it's not a problem when calculating static
+ * array sizes.
*/
-#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
-#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5_UMR_MAX_MTT_SPACE \
+ (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
+ MLX5_UMR_MTT_ALIGNMENT))
+#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
+ rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+
#define MLX5E_MAX_RQ_NUM_MTTS \
- (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
+#define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
- (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
-#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
- (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
- (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
@@ -127,8 +124,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
- MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
@@ -150,13 +146,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_UMR_WQE_INLINE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + \
- ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
- MLX5_UMR_MTT_ALIGNMENT))
-#define MLX5E_UMR_WQEBBS \
- (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-
#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
(sizeof(struct mlx5e_umr_wqe) +\
(sizeof(struct mlx5_klm) * (sgl_len)))
@@ -174,8 +163,7 @@ struct page_pool;
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
- mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -189,12 +177,6 @@ do { \
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
-enum mlx5e_rq_group {
- MLX5E_RQ_GROUP_REGULAR,
- MLX5E_RQ_GROUP_XSK,
-#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
-};
-
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -227,13 +209,15 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
* bytes units. Driver hardens the limitation to 1KB (16
* WQEBBs), unless firmware capability is stricter.
*/
-static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
{
- return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
- MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+ BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX);
+
+ return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}
-static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
+static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
* Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,8 +226,9 @@ static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
* than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
* cache-aligned.
*/
- u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+ u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
#if L1_CACHE_BYTES >= 128
wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
@@ -272,6 +257,7 @@ struct mlx5e_umr_wqe {
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
+ DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
@@ -476,15 +462,11 @@ struct mlx5e_txqsq {
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
- u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
-struct mlx5e_dma_info {
- dma_addr_t addr;
- union {
- struct page *page;
- struct xdp_buff *xsk;
- };
+union mlx5e_alloc_unit {
+ struct page *page;
+ struct xdp_buff *xsk;
};
/* XDP packets can be transmitted in different ways. On completion, we need to
@@ -580,7 +562,6 @@ struct mlx5e_xdpsq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
- u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
@@ -609,25 +590,20 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
- u16 max_sq_wqebbs;
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
u32 offset;
bool last_in_page;
};
-struct mlx5e_umr_dma_info {
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
- DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
+ union mlx5e_alloc_unit alloc_units[];
};
#define MLX5E_MAX_RX_FRAGS 4
@@ -635,13 +611,13 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
-#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
- MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
struct mlx5e_page_cache {
u32 head;
u32 tail;
- struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+ struct page *page_cache[MLX5E_CACHE_SIZE];
};
struct mlx5e_rq;
@@ -674,6 +650,12 @@ struct mlx5e_rq_frags_info {
u8 num_frags;
u8 log_num_frags;
u8 wqe_bulk;
+ u8 wqe_index_mask;
+};
+
+struct mlx5e_dma_info {
+ dma_addr_t addr;
+ struct page *page;
};
struct mlx5e_shampo_hd {
@@ -695,13 +677,20 @@ struct mlx5e_hw_gro_data {
int second_ip_id;
};
+enum mlx5e_mpwrq_umr_mode {
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_UNALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_OVERSIZED,
+ MLX5E_MPWRQ_UMR_MODE_TRIPLE,
+};
+
struct mlx5e_rq {
/* data path */
union {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
@@ -710,6 +699,7 @@ struct mlx5e_rq {
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
+ __be32 umr_mkey_be;
u16 num_strides;
u16 actual_wq_head;
u8 log_stride_sz;
@@ -717,6 +707,11 @@ struct mlx5e_rq {
u8 umr_last_bulk;
u8 umr_completed;
u8 min_wqe_bulk;
+ u8 page_shift;
+ u8 pages_per_wqe;
+ u8 umr_wqebbs;
+ u8 mtts_per_wqe;
+ u8 umr_mode;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -767,7 +762,6 @@ struct mlx5e_rq {
u32 rqn;
struct mlx5_core_dev *mdev;
struct mlx5e_channel *channel;
- u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -856,11 +850,6 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-enum {
- MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -959,6 +948,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_MACSEC
+ struct mlx5e_macsec *macsec;
+#endif
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
@@ -1010,7 +1002,6 @@ struct mlx5e_profile {
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
- u8 rq_groups;
u32 features;
};
@@ -1019,7 +1010,8 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
@@ -1047,6 +1039,7 @@ struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
@@ -1101,7 +1094,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
@@ -1136,6 +1129,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1148,8 +1142,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
-void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index e7c14c0de0a7..48581ea3adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -10,28 +10,33 @@ unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
return chs->num;
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix)
{
- struct mlx5e_channel *c;
+ WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs));
+ return chs->c[ix];
+}
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- *rqn = c->rq.rqn;
+ return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
{
- struct mlx5e_channel *c;
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+ *rqn = c->rq.rqn;
+}
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- return false;
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
+
+ WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
- return true;
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index ca00cbc827cb..637ca90daaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -9,8 +9,9 @@
struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 9b8cdf2e68ad..bf2741eb7f9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -8,6 +8,7 @@
#include "lib/fs_ttc.h"
struct mlx5e_post_act;
+struct mlx5e_tc_table;
enum {
MLX5E_TC_FT_LEVEL = 0,
@@ -15,6 +16,11 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
+};
+
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
@@ -83,54 +89,28 @@ enum {
#endif
};
-struct mlx5e_priv;
-
-#ifdef CONFIG_MLX5_EN_RXNFC
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-#else
-static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
-static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
-{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{ return -EOPNOTSUPP; }
-#endif /* CONFIG_MLX5_EN_RXNFC */
+struct mlx5e_flow_steering;
+struct mlx5e_rx_res;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables;
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple);
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple);
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs);
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
+{ return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
#endif
#ifdef CONFIG_MLX5_EN_TLS
@@ -142,54 +122,63 @@ struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
-struct mlx5e_flow_steering {
- bool state_destroy;
- bool vlan_strip_disable;
- struct mlx5_core_dev *mdev;
- struct mlx5_flow_namespace *ns;
-#ifdef CONFIG_MLX5_EN_RXNFC
- struct mlx5e_ethtool_steering ethtool;
-#endif
- struct mlx5e_tc_table *tc;
- struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table *vlan;
- struct mlx5e_l2_table l2;
- struct mlx5_ttc_table *ttc;
- struct mlx5_ttc_table *inner_ttc;
-#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables *arfs;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5e_accel_fs_tcp *accel_tcp;
-#endif
- struct mlx5e_fs_udp *udp;
- struct mlx5e_fs_any *any;
- struct mlx5e_ptp_fs *ptp_fs;
-};
-
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel);
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev);
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile);
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
-
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+#ifdef CONFIG_MLX5_EN_RXNFC
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
+#endif
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner);
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner);
+#ifdef CONFIG_MLX5_EN_ARFS
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs);
+#endif
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs);
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
+#endif
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy);
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs, bool vlan_strip_disable);
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs);
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
@@ -198,5 +187,18 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+
+#define fs_err(fs, fmt, ...) \
+ mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_dbg(fs, fmt, ...) \
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn(fs, fmt, ...) \
+ mlx5_core_warn(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn_once(fs, fmt, ...) \
+ mlx5_core_warn_once(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
new file mode 100644
index 000000000000..9e276fd3c0cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5E_FS_ETHTOOL_H__
+#define __MLX5E_FS_ETHTOOL_H__
+
+struct mlx5e_priv;
+struct mlx5e_ethtool_steering;
+#ifdef CONFIG_MLX5_EN_RXNFC
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
+{ return 0; }
+static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
+#endif
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index e153d6119e02..03cb79adf912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
#include "en/fs_tt_redirect.h"
#include "fs_core.h"
+#include "mlx5_core.h"
enum fs_udp_type {
FS_IPV4_UDP,
@@ -74,17 +74,17 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port)
{
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
enum fs_udp_type type = tt2fs_udp(ttc_type);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_udp *fs_udp;
int err;
if (type == FS_UDP_NUM_TYPES)
@@ -94,7 +94,6 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
@@ -106,31 +105,30 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add %s rule failed, err %d\n",
- __func__, fs_udp_type2str(type), err);
+ fs_err(fs, "%s: add %s rule failed, err %d\n",
+ __func__, fs_udp_type2str(type), err);
}
return rule;
}
-static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_add_default_rule(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5e_flow_table *fs_udp_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_udp *fs_udp;
int err;
- fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -206,33 +204,36 @@ out:
return err;
}
-static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
+static int fs_udp_create_table(struct mlx5e_flow_steering *fs, enum fs_udp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_flow_table *ft;
int err;
+ ft = &fs_udp->tables[type];
ft->num_groups = 0;
ft_attr.max_fte = MLX5E_FS_UDP_TABLE_SIZE;
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs %s table id %u level %u\n",
- fs_udp_type2str(type), ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs %s table id %u level %u\n",
+ fs_udp_type2str(type), ft->t->id, ft->t->level);
err = fs_udp_create_groups(ft, type);
if (err)
goto err;
- err = fs_udp_add_default_rule(priv, type);
+ err = fs_udp_add_default_rule(fs, type);
if (err)
goto err;
@@ -253,17 +254,17 @@ static void fs_udp_destroy_table(struct mlx5e_fs_udp *fs_udp, int i)
fs_udp->tables[i].t = NULL;
}
-static int fs_udp_disable(struct mlx5e_priv *priv)
+static int fs_udp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_udp2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
@@ -271,30 +272,31 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int fs_udp_enable(struct mlx5e_priv *priv)
+static int fs_udp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- dest.ft = priv->fs->udp->tables[i].t;
+ dest.ft = udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_udp2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_udp2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_udp2tt(i), err);
return err;
}
}
return 0;
}
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
+ struct mlx5e_fs_udp *fs_udp = mlx5e_fs_get_udp(fs);
int i;
if (!fs_udp)
@@ -303,48 +305,50 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
if (--fs_udp->ref_cnt)
return;
- fs_udp_disable(priv);
+ fs_udp_disable(fs);
for (i = 0; i < FS_UDP_NUM_TYPES; i++)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
- priv->fs->udp = NULL;
+ mlx5e_fs_set_udp(fs, NULL);
}
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_udp *udp = mlx5e_fs_get_udp(fs);
int i, err;
- if (priv->fs->udp) {
- priv->fs->udp->ref_cnt++;
+ if (udp) {
+ udp->ref_cnt++;
return 0;
}
- priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
- if (!priv->fs->udp)
+ udp = kzalloc(sizeof(*udp), GFP_KERNEL);
+ if (!udp)
return -ENOMEM;
+ mlx5e_fs_set_udp(fs, udp);
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
- err = fs_udp_create_table(priv, i);
+ err = fs_udp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = fs_udp_enable(priv);
+ err = fs_udp_enable(fs);
if (err)
goto err_destroy_tables;
- priv->fs->udp->ref_cnt = 1;
+ udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
- fs_udp_destroy_table(priv->fs->udp, i);
+ fs_udp_destroy_table(udp, i);
- kfree(priv->fs->udp);
- priv->fs->udp = NULL;
+ kfree(udp);
+ mlx5e_fs_set_udp(fs, NULL);
return err;
}
@@ -356,22 +360,21 @@ static void fs_any_set_ethertype_flow(struct mlx5_flow_spec *spec, u16 ether_typ
}
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft = NULL;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct mlx5e_fs_any *fs_any;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
@@ -383,31 +386,29 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "%s: add ANY rule failed, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add ANY rule failed, err %d\n",
+ __func__, err);
}
return rule;
}
-static int fs_any_add_default_rule(struct mlx5e_priv *priv)
+static int fs_any_add_default_rule(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
struct mlx5e_flow_table *fs_any_t;
struct mlx5_flow_destination dest;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
- struct mlx5e_fs_any *fs_any;
int err;
- fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
-
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
+ dest = mlx5_ttc_get_default_dest(ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, fs type=ANY, err %d\n",
- __func__, err);
+ fs_err(fs, "%s: add default rule failed, fs type=ANY, err %d\n",
+ __func__, err);
return err;
}
@@ -472,9 +473,11 @@ err:
return err;
}
-static int fs_any_create_table(struct mlx5e_priv *priv)
+static int fs_any_create_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_flow_table *ft = &priv->fs->any->table;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
+ struct mlx5e_flow_table *ft = &fs_any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -484,21 +487,21 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs ANY table id %u level %u\n",
- ft->t->id, ft->t->level);
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), "Created fs ANY table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = fs_any_create_groups(ft);
if (err)
goto err;
- err = fs_any_add_default_rule(priv);
+ err = fs_any_add_default_rule(fs);
if (err)
goto err;
@@ -509,35 +512,38 @@ err:
return err;
}
-static int fs_any_disable(struct mlx5e_priv *priv)
+static int fs_any_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
+ err = mlx5_ttc_fwd_default_dest(ttc, MLX5_TT_ANY);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
}
-static int fs_any_enable(struct mlx5e_priv *priv)
+static int fs_any_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_fs_any *any = mlx5e_fs_get_any(fs);
struct mlx5_flow_destination dest = {};
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fs->any->table.t;
+ dest.ft = any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
+ err = mlx5_ttc_fwd_dest(ttc, MLX5_TT_ANY, &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, MLX5_TT_ANY, err);
+ fs_err(fs,
+ "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, MLX5_TT_ANY, err);
return err;
}
return 0;
@@ -553,9 +559,9 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
fs_any->table.t = NULL;
}
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_fs_any *fs_any = priv->fs->any;
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
if (!fs_any)
return;
@@ -563,43 +569,45 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
if (--fs_any->ref_cnt)
return;
- fs_any_disable(priv);
+ fs_any_disable(fs);
fs_any_destroy_table(fs_any);
kfree(fs_any);
- priv->fs->any = NULL;
+ mlx5e_fs_set_any(fs, NULL);
}
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_fs_any *fs_any = mlx5e_fs_get_any(fs);
int err;
- if (priv->fs->any) {
- priv->fs->any->ref_cnt++;
+ if (fs_any) {
+ fs_any->ref_cnt++;
return 0;
}
- priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
- if (!priv->fs->any)
+ fs_any = kzalloc(sizeof(*fs_any), GFP_KERNEL);
+ if (!fs_any)
return -ENOMEM;
+ mlx5e_fs_set_any(fs, fs_any);
- err = fs_any_create_table(priv);
+ err = fs_any_create_table(fs);
if (err)
return err;
- err = fs_any_enable(priv);
+ err = fs_any_enable(fs);
if (err)
goto err_destroy_table;
- priv->fs->any->ref_cnt = 1;
+ fs_any->ref_cnt = 1;
return 0;
err_destroy_table:
- fs_any_destroy_table(priv->fs->any);
+ fs_any_destroy_table(fs_any);
- kfree(priv->fs->any);
- priv->fs->any = NULL;
+ kfree(fs_any);
+ mlx5e_fs_set_any(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
index 7a70c4f38fda..5780fd7ad507 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h
@@ -4,23 +4,22 @@
#ifndef __MLX5E_FS_TT_REDIRECT_H__
#define __MLX5E_FS_TT_REDIRECT_H__
-#include "en.h"
#include "en/fs.h"
void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_flow_steering *fs,
enum mlx5_traffic_types ttc_type,
u32 tir_num, u16 d_port);
-void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_flow_steering *fs);
/* ANY traffic type redirect*/
struct mlx5_flow_handle *
-mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
+mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_flow_steering *fs,
u32 tir_num, u16 ether_type);
-void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
-int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
+void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_flow_steering *fs);
+int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index e025040350ba..29dd3a04c154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,11 +6,212 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include <net/xdp_sock_drv.h>
-static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
+{
+ u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
+
+ return min_page_shift ? : 12;
+}
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ u8 req_page_shift = xsk ? order_base_2(xsk->chunk_size) : PAGE_SHIFT;
+ u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev);
+
+ /* Regular RQ uses order-0 pages, the NIC must be able to map them. */
+ if (WARN_ON_ONCE(!xsk && req_page_shift < min_page_shift))
+ min_page_shift = req_page_shift;
+
+ return max(req_page_shift, min_page_shift);
+}
+
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk)
+{
+ /* Different memory management schemes use different mechanisms to map
+ * user-mode memory. The stricter guarantees we have, the faster
+ * mechanisms we use:
+ * 1. MTT - direct mapping in page granularity.
+ * 2. KSM - indirect mapping to another MKey to arbitrary addresses, but
+ * all mappings have the same size.
+ * 3. KLM - indirect mapping to another MKey to arbitrary addresses, and
+ * mappings can have different sizes.
+ */
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ bool oversized = false;
+
+ if (xsk) {
+ oversized = xsk->chunk_size < (1 << page_shift);
+ WARN_ON_ONCE(xsk->chunk_size > (1 << page_shift));
+ }
+
+ /* XSK frame size doesn't match the UMR page size, either because the
+ * frame size is not a power of two, or it's smaller than the minimal
+ * page size supported by the firmware.
+ * It's possible to receive packets bigger than MTU in certain setups.
+ * To avoid writing over the XSK frame boundary, the top region of each
+ * stride is mapped to a garbage page, resulting in two mappings of
+ * different sizes per frame.
+ */
+ if (oversized) {
+ /* An optimization for frame sizes equal to 3 * power_of_two.
+ * 3 KSMs point to the frame, and one KSM points to the garbage
+ * page, which works faster than KLM.
+ */
+ if (xsk->chunk_size % 3 == 0 && is_power_of_2(xsk->chunk_size / 3))
+ return MLX5E_MPWRQ_UMR_MODE_TRIPLE;
+
+ return MLX5E_MPWRQ_UMR_MODE_OVERSIZED;
+ }
+
+ /* XSK frames can start at arbitrary unaligned locations, but they all
+ * have the same size which is a power of two. It allows to optimize to
+ * one KSM per frame.
+ */
+ if (unaligned)
+ return MLX5E_MPWRQ_UMR_MODE_UNALIGNED;
+
+ /* XSK: frames are naturally aligned, MTT can be used.
+ * Non-XSK: Allocations happen in units of CPU pages, therefore, the
+ * mappings are naturally aligned.
+ */
+ return MLX5E_MPWRQ_UMR_MODE_ALIGNED;
+}
+
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode)
+{
+ switch (mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return sizeof(struct mlx5_mtt);
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return sizeof(struct mlx5_ksm);
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return sizeof(struct mlx5_klm) * 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return sizeof(struct mlx5_ksm) * 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode);
+ return 0;
+}
+
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u16 max_wqe_size;
+
+ /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
+ max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
+ MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+ max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+
+ return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+}
+
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- return params->xdp_prog || xsk;
+ u8 log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
+ u8 pages_per_wqe;
+
+ pages_per_wqe = log_wqe_sz > page_shift ? (1 << (log_wqe_sz - page_shift)) : 1;
+
+ /* Two MTTs are needed to form an octword. The number of MTTs is encoded
+ * in octwords in a UMR WQE, so we need at least two to avoid mapping
+ * garbage addresses.
+ */
+ if (WARN_ON_ONCE(pages_per_wqe < 2 && umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ pages_per_wqe = 2;
+
+ /* Sanity check for further calculations to succeed. */
+ BUILD_BUG_ON(MLX5_MPWRQ_MAX_PAGES_PER_WQE > 64);
+ if (WARN_ON_ONCE(pages_per_wqe > MLX5_MPWRQ_MAX_PAGES_PER_WQE))
+ return MLX5_MPWRQ_MAX_PAGES_PER_WQE;
+
+ return pages_per_wqe;
+}
+
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+ u16 umr_wqe_sz;
+
+ umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
+ ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+
+ WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
+
+ return umr_wqe_sz;
+}
+
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(mdev, page_shift, umr_mode),
+ MLX5_SEND_WQE_BB);
+}
+
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 pages_per_wqe = mlx5e_mpwrq_pages_per_wqe(mdev, page_shift, umr_mode);
+
+ /* Add another page as a buffer between WQEs. This page will absorb
+ * write overflow by the hardware, when receiving packets larger than
+ * MTU. These oversize packets are dropped by the driver at a later
+ * stage.
+ */
+ return ALIGN(pages_per_wqe + 1,
+ MLX5_SEND_WQE_BB / mlx5e_mpwrq_umr_entry_size(umr_mode));
+}
+
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ /* Same limits apply to KSMs and KLMs. */
+ u32 klm_limit = min(MLX5E_MAX_RQ_NUM_KSMS,
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5E_MAX_RQ_NUM_MTTS;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return klm_limit;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ /* Each entry is two KLMs. */
+ return klm_limit / 2;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ /* Each entry is four KSMs. */
+ return klm_limit / 4;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static u8 mlx5e_mpwrq_max_log_rq_size(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 mtts_per_wqe = mlx5e_mpwrq_mtts_per_wqe(mdev, page_shift, umr_mode);
+ u32 max_entries = mlx5e_mpwrq_max_num_entries(mdev, umr_mode);
+
+ return ilog2(max_entries / mtts_per_wqe);
+}
+
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ return mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode) +
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ MLX5E_ORDER2_MAX_PACKET_MTU;
}
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
@@ -22,7 +223,7 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return xsk->headroom;
headroom = NET_IP_ALIGN;
- if (mlx5e_rx_is_xdp(params, xsk))
+ if (params->xdp_prog)
headroom += XDP_PACKET_HEADROOM;
else
headroom += MLX5_RX_HEADROOM;
@@ -30,70 +231,80 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return headroom;
}
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
- return linear_rq_headroom + hw_mtu;
+ return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
{
- u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
-
- /* AF_XDP doesn't build SKBs in place. */
- if (!xsk)
- frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
+ /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
+ u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
+ u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
- * special case. It can run with frames smaller than a page, as it
- * doesn't allocate pages dynamically. However, here we pretend that
- * fragments are page-sized: it allows to treat XSK frames like pages
- * by redirecting alloc and free operations to XSK rings and by using
- * the fact there are no multiple packets per "page" (which is a frame).
- * The latter is important, because frames may come in a random order,
- * and we will have trouble assemblying a real page of multiple frames.
- */
- if (mlx5e_rx_is_xdp(params, xsk))
- frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
+ return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
+}
- /* Even if we can go with a smaller fragment size, we must not put
- * multiple packets into a single frame.
+static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk,
+ bool mpwqe)
+{
+ /* XSK frames are mapped as individual pages, because frames may come in
+ * an arbitrary order from random locations in the UMEM.
*/
if (xsk)
- frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
+ return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
+
+ /* XDP in mlx5e doesn't support multiple packets per page. */
+ if (params->xdp_prog)
+ return PAGE_SIZE;
- return frag_sz;
+ return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
}
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
+ u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
- return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
+ order_base_2(linear_stride_sz);
}
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
- * than one page. For this, check both with and without xsk.
+ if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
+ return false;
+
+ /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ * must fit into a CPU page.
*/
- u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
- mlx5e_rx_get_linear_frag_sz(params, NULL));
+ if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
+ return false;
+
+ /* XSK frames must be big enough to hold the packet data. */
+ if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
+ return false;
- return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
- linear_frag_sz <= PAGE_SIZE;
+ return true;
}
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides)
+static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
+ u8 log_stride_sz, u8 log_num_strides,
+ u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
+ if (log_stride_sz + log_num_strides !=
+ mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode))
return false;
if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
@@ -113,28 +324,53 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- s8 log_num_strides;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_num_strides;
u8 log_stride_sz;
+ u8 log_wqe_sz;
- if (!mlx5e_rx_is_linear_skb(params, xsk))
+ if (!mlx5e_rx_is_linear_skb(mdev, params, xsk))
return false;
- log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
+ log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
+ log_wqe_sz = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
- return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
+ if (log_wqe_sz < log_stride_sz)
+ return false;
+
+ log_num_strides = log_wqe_sz - log_stride_sz;
+
+ return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz,
+ log_num_strides, page_shift,
+ umr_mode);
}
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 log_pkts_per_wqe, page_shift, max_log_rq_size;
+
+ log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(mdev, params, xsk);
+ page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ max_log_rq_size = mlx5e_mpwrq_max_log_rq_size(mdev, page_shift, umr_mode);
/* Numbers are unsigned, don't subtract to avoid underflow. */
if (params->log_rq_mtu_frames <
log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ /* Ethtool's rx_max_pending is calculated for regular RQ, that uses
+ * pages of PAGE_SIZE. Max length of an XSK RQ might differ if it uses a
+ * frame size not equal to PAGE_SIZE.
+ * A stricter condition is checked in mlx5e_mpwrq_validate_xsk, WARN on
+ * unexpected failure.
+ */
+ if (WARN_ON_ONCE(params->log_rq_mtu_frames > log_pkts_per_wqe + max_log_rq_size))
+ return max_log_rq_size;
+
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
@@ -164,7 +400,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk)
{
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
+ return order_base_2(mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, true));
return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
@@ -173,7 +409,10 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
- return MLX5_MPWRQ_LOG_WQE_SZ -
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+
+ return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
@@ -209,11 +448,11 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
stop_room = mlx5e_ktls_get_stop_room(mdev, params);
stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
- /* A MPWQE can take up to the maximum-sized WQE + all the normal
- * stop room can be taken if a new packet breaks the active
- * MPWQE session and allocates its WQEs right away.
+ /* A MPWQE can take up to the maximum cacheline-aligned WQE +
+ * all the normal stop room can be taken if a new packet breaks
+ * the active MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_max_wqe(mdev);
+ stop_room += mlx5e_stop_room_for_mpwqe(mdev);
return stop_room;
}
@@ -320,22 +559,46 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return false;
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, NULL);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, NULL);
- if (params->xdp_prog) {
- /* XSK params are not considered here. If striding RQ is in use,
- * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
- * be called with the known XSK params.
- */
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- return false;
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
+
+ if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ bool unaligned = xsk ? xsk->unaligned : false;
+ u16 max_mtu_pkts;
+
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ return -EOPNOTSUPP;
+
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ return -EINVAL;
+
+ /* Current RQ length is too big for the given frame size, the
+ * needed number of WQEs exceeds the maximum.
+ */
+ max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
+ if (params->log_rq_mtu_frames > max_mtu_pkts) {
+ mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
+ 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+ return -EINVAL;
}
- return true;
+ return 0;
}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -348,7 +611,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
+ BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
@@ -356,8 +619,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+ params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_CYCLIC;
}
@@ -374,9 +636,9 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
*/
if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
- mlx5e_striding_rq_possible(mdev, params) &&
+ !mlx5e_mpwrq_validate_regular(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
- !mlx5e_rx_is_linear_skb(params, NULL)))
+ !mlx5e_rx_is_linear_skb(mdev, params, NULL)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -419,16 +681,22 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
int max_mtu;
int i;
- if (mlx5e_rx_is_linear_skb(params, xsk)) {
+ if (mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
int frag_stride;
- frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
- frag_stride = roundup_pow_of_two(frag_stride);
+ frag_stride = mlx5e_rx_get_linear_stride_sz(mdev, params, xsk, false);
info->arr[0].frag_size = byte_count;
info->arr[0].frag_stride = frag_stride;
info->num_frags = 1;
- info->wqe_bulk = PAGE_SIZE / frag_stride;
+
+ /* N WQEs share the same page, N = PAGE_SIZE / frag_stride. The
+ * first WQE in the page is responsible for allocation of this
+ * page, this WQE's index is k*N. If WQEs [k*N+1; k*N+N-1] are
+ * still not completed, the allocation must stop before k*N.
+ */
+ info->wqe_index_mask = (PAGE_SIZE / frag_stride) - 1;
+
goto out;
}
@@ -477,11 +745,40 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
i++;
}
info->num_frags = i;
- /* number of different wqes sharing a page */
- info->wqe_bulk = 1 + (info->num_frags % 2);
+
+ /* The last fragment of WQE with index 2*N may share the page with the
+ * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
+ * is not completed yet, WQE 2*N must not be allocated, as it's
+ * responsible for allocating a new page.
+ */
+ if (frag_size_max == PAGE_SIZE) {
+ /* No WQE can start in the middle of a page. */
+ info->wqe_index_mask = 0;
+ } else {
+ /* PAGE_SIZEs starting from 8192 don't use 2K-sized fragments,
+ * because there would be more than MLX5E_MAX_RX_FRAGS of them.
+ */
+ WARN_ON(PAGE_SIZE != 2 * DEFAULT_FRAG_SIZE);
+
+ /* Odd number of fragments allows to pack the last fragment of
+ * the previous WQE and the first fragment of the next WQE into
+ * the same page.
+ * As long as DEFAULT_FRAG_SIZE is 2048, and MLX5E_MAX_RX_FRAGS
+ * is 4, the last fragment can be bigger than the rest only if
+ * it's the fourth one, so WQEs consisting of 3 fragments will
+ * always share a page.
+ * When a page is shared, WQE bulk size is 2, otherwise just 1.
+ */
+ info->wqe_index_mask = info->num_frags % 2;
+ }
out:
- info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
+ /* Bulking optimization to skip allocation until at least 8 WQEs can be
+ * allocated in a row. At the same time, never start allocation when
+ * the page is still used by older WQEs.
+ */
+ info->wqe_bulk = max_t(u8, info->wqe_index_mask + 1, 8);
+
info->log_num_frags = order_base_2(info->num_frags);
return 0;
@@ -520,7 +817,7 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
- int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
int wqe_size = BIT(log_stride_sz) * num_strides;
/* +1 is for the case that the pkt_per_rsrv dont consume the reservation
@@ -544,7 +841,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
else
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -587,12 +884,16 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
- log_wqe_num_of_strides)) {
+ log_wqe_num_of_strides,
+ page_shift, umr_mode)) {
mlx5_core_err(mdev,
- "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
- log_wqe_stride_size, log_wqe_num_of_strides);
+ "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u, umr_mode %d\n",
+ log_wqe_stride_size, log_wqe_num_of_strides,
+ umr_mode);
return -EINVAL;
}
@@ -600,7 +901,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
- MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
MLX5_SET(wq, wq, shampo_enable, true);
MLX5_SET(wq, wq, log_reservation_size,
@@ -712,13 +1013,6 @@ static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
-{
- void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
- return MLX5_GET(wq, wq, log_wq_sz);
-}
-
/* This function calculates the maximum number of headers entries that are needed
* per WQE, the formula is based on the size of the reservations and the
* restriction we have about max packets for reservation that is equal to max
@@ -779,31 +1073,92 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
return wqebbs;
}
+static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 umr_wqebbs;
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+
+ return umr_wqebbs * (1 << mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk));
+}
+
static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
- u32 wqebbs;
+ u32 wqebbs, total_pages, useful_space;
/* MLX5_WQ_TYPE_CYCLIC */
if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ /* UMR WQEs for the regular RQ. */
+ wqebbs = mlx5e_mpwrq_total_umr_wqebbs(mdev, params, NULL);
/* If XDP program is attached, XSK may be turned on at any time without
* restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
* both regular RQ and XSK RQ.
- * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it
- * doesn't affect its return value, as long as params->xdp_prog != NULL,
- * so we can just multiply by 2.
+ *
+ * XSK uses different values of page_shift, and the total number of UMR
+ * WQEBBs depends on it. This dependency is complex and not monotonic,
+ * especially taking into consideration that some of the parameters come
+ * from capabilities. Hence, we have to try all valid values of XSK
+ * frame size (and page_shift) to find the maximum.
*/
- if (params->xdp_prog)
- wqebbs *= 2;
+ if (params->xdp_prog) {
+ u32 max_xsk_wqebbs = 0;
+ u8 frame_shift;
+
+ for (frame_shift = XDP_UMEM_MIN_CHUNK_SHIFT;
+ frame_shift <= PAGE_SHIFT; frame_shift++) {
+ /* The headroom doesn't affect the calculation. */
+ struct mlx5e_xsk_param xsk = {
+ .chunk_size = 1 << frame_shift,
+ .unaligned = false,
+ };
+
+ /* XSK aligned mode. */
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a power of two. */
+ xsk.unaligned = true;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is not equal to stride size. */
+ xsk.chunk_size -= 1;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+
+ /* XSK unaligned mode, frame size is a triple power of two. */
+ xsk.chunk_size = (1 << frame_shift) / 4 * 3;
+ max_xsk_wqebbs = max(max_xsk_wqebbs,
+ mlx5e_mpwrq_total_umr_wqebbs(mdev, params, &xsk));
+ }
+
+ wqebbs += max_xsk_wqebbs;
+ }
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+ /* UMR WQEs don't cross the page boundary, they are padded with NOPs.
+ * This padding is always smaller than the max WQE size. That gives us
+ * at least (PAGE_SIZE - (max WQE size - MLX5_SEND_WQE_BB)) useful bytes
+ * per page. The number of pages is estimated as the total size of WQEs
+ * divided by the useful space in page, rounding up. If some WQEs don't
+ * fully fit into the useful space, they can occupy part of the padding,
+ * which proves this estimation to be correct (reserve enough space).
+ */
+ useful_space = PAGE_SIZE - mlx5e_get_max_sq_wqebbs(mdev) + MLX5_SEND_WQE_BB;
+ total_pages = DIV_ROUND_UP(wqebbs * MLX5_SEND_WQE_BB, useful_space);
+ wqebbs = total_pages * (PAGE_SIZE / MLX5_SEND_WQE_BB);
+
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
@@ -857,7 +1212,7 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(params, xsk);
+ param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index f5c46e78eebc..034debd140bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -9,6 +9,7 @@
struct mlx5e_xsk_param {
u16 headroom;
u16 chunk_size;
+ bool unaligned;
};
struct mlx5e_cq_param {
@@ -52,37 +53,26 @@ struct mlx5e_create_sq_param {
u8 min_inline_mode;
};
-static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
- u16 qid,
- enum mlx5e_rq_group group,
- u16 *ix)
-{
- int nch = params->num_channels;
- int ch = qid - nch * group;
-
- if (ch < 0 || ch >= nch)
- return false;
-
- *ix = ch;
- return true;
-}
-
-static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
- u16 qid,
- u16 *ix,
- enum mlx5e_rq_group *group)
-{
- u16 nch = params->num_channels;
-
- *ix = qid % nch;
- *group = qid / nch;
-}
-
-static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
- struct mlx5e_params *params, u64 qid)
-{
- return qid < params->num_channels * profile->rq_groups;
-}
+/* Striding RQ dynamic parameters */
+
+u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+enum mlx5e_mpwrq_umr_mode
+mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwrq_umr_entry_size(enum mlx5e_mpwrq_umr_mode mode);
+u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_umr_wqebbs(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_mtts_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u32 mlx5e_mpwrq_max_num_entries(struct mlx5_core_dev *mdev,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
+u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
/* Parameter calculations */
@@ -92,25 +82,23 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
-bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
- u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk);
-bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
+bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 903de88bab53..8469e9c38670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -622,37 +622,39 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
}
-static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
+static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
if (!ptp_fs->valid)
return;
mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
ptp_fs->valid = false;
}
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_flow_steering *fs = priv->fs;
struct mlx5_flow_handle *rule;
+ struct mlx5e_ptp_fs *ptp_fs;
int err;
+ ptp_fs = mlx5e_fs_get_ptp(fs);
if (ptp_fs->valid)
return 0;
- err = mlx5e_fs_tt_redirect_udp_create(priv);
+ err = mlx5e_fs_tt_redirect_udp_create(fs);
if (err)
goto out_free;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -660,7 +662,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v4_rule = rule;
- rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP,
+ rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
tirn, PTP_EV_PORT);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -668,11 +670,11 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
}
ptp_fs->udp_v6_rule = rule;
- err = mlx5e_fs_tt_redirect_any_create(priv);
+ err = mlx5e_fs_tt_redirect_any_create(fs);
if (err)
goto out_destroy_udp_v6_rule;
- rule = mlx5e_fs_tt_redirect_any_add_rule(priv, tirn, ETH_P_1588);
+ rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto out_destroy_fs_any;
@@ -683,13 +685,13 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
return 0;
out_destroy_fs_any:
- mlx5e_fs_tt_redirect_any_destroy(priv);
+ mlx5e_fs_tt_redirect_any_destroy(fs);
out_destroy_udp_v6_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
out_destroy_udp_v4_rule:
mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
out_destroy_fs_udp:
- mlx5e_fs_tt_redirect_udp_destroy(priv);
+ mlx5e_fs_tt_redirect_udp_destroy(fs);
out_free:
return err;
}
@@ -723,7 +725,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (err)
goto err_free;
- netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
mlx5e_ptp_build_params(c, cparams, params);
@@ -797,29 +799,31 @@ int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
return 0;
}
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
struct mlx5e_ptp_fs *ptp_fs;
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return 0;
ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
if (!ptp_fs)
return -ENOMEM;
+ mlx5e_fs_set_ptp(fs, ptp_fs);
- priv->fs->ptp_fs = ptp_fs;
return 0;
}
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile)
{
- struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
+ struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
- if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
+ if (!mlx5e_profile_feature_cap(profile, PTP_RX))
return;
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(fs);
kfree(ptp_fs);
}
@@ -845,6 +849,6 @@ int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
return -EINVAL;
}
- mlx5e_ptp_rx_unset_fs(priv);
+ mlx5e_ptp_rx_unset_fs(priv->fs);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 92dbbec472ec..5bce554e131a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -74,8 +74,10 @@ void mlx5e_ptp_close(struct mlx5e_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
-int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
-void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
+int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
+void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
+ const struct mlx5e_profile *profile);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index fc366e66d0b0..5f6f95ad6888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -134,38 +134,17 @@ out:
return err;
}
-static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
-{
- struct net_device *dev = rq->netdev;
- int err;
-
- err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
- return err;
- }
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
- if (err) {
- netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
{
struct mlx5e_rq *rq = ctx;
int err;
mlx5e_deactivate_rq(rq);
- mlx5e_free_rx_descs(rq);
-
- err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
if (err)
- goto out;
+ return err;
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
mlx5e_activate_rq(rq);
rq->stats->recover++;
if (rq->channel)
@@ -173,9 +152,6 @@ static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
else
mlx5e_trigger_napi_sched(rq->cq.napi);
return 0;
-out:
- clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
- return err;
}
static int mlx5e_rx_reporter_timeout_recover(void *ctx)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 24c32f73040a..e1095bc36543 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -24,8 +24,6 @@ struct mlx5e_rx_res {
struct {
struct mlx5e_rqt direct_rqt;
struct mlx5e_tir direct_tir;
- struct mlx5e_rqt xsk_rqt;
- struct mlx5e_tir xsk_tir;
} *channels;
struct {
@@ -320,48 +318,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
mlx5e_tir_builder_clear(builder);
}
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- goto out;
-
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt,
- res->mdev, false, res->drop_rqn);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_rqts;
- }
- }
-
- for (ix = 0; ix < res->max_nch; ix++) {
- mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
- mlx5e_tir_builder_build_direct(builder);
-
- err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
- if (err) {
- mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n",
- err, ix);
- goto err_destroy_xsk_tirs;
- }
-
- mlx5e_tir_builder_clear(builder);
- }
-
goto out;
-err_destroy_xsk_tirs:
- while (--ix >= 0)
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
-
- ix = res->max_nch;
-err_destroy_xsk_rqts:
- while (--ix >= 0)
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
-
- ix = res->max_nch;
err_destroy_direct_tirs:
while (--ix >= 0)
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
@@ -420,12 +378,6 @@ static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) {
mlx5e_tir_destroy(&res->channels[ix].direct_tir);
mlx5e_rqt_destroy(&res->channels[ix].direct_rqt);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- mlx5e_tir_destroy(&res->channels[ix].xsk_tir);
- mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt);
}
kvfree(res->channels);
@@ -491,13 +443,6 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
}
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix)
-{
- WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK));
-
- return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir);
-}
-
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt)
{
struct mlx5e_rss *rss = res->rss[0];
@@ -523,56 +468,53 @@ static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int i
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
-void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
+ struct mlx5e_channels *chs,
+ unsigned int ix)
{
- unsigned int nch, ix;
+ u32 rqn = res->rss_rqns[ix];
int err;
- nch = mlx5e_channels_get_num(chs);
-
- for (ix = 0; ix < chs->num; ix++)
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
- res->rss_nch = chs->num;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ rqn, ix, err);
+}
- mlx5e_rx_res_rss_enable(res);
+static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
+ unsigned int ix)
+{
+ int err;
- for (ix = 0; ix < nch; ix++) {
- u32 rqn;
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ if (err)
+ mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
+ mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
+ res->drop_rqn, ix, err);
+}
- mlx5e_channels_get_regular_rqn(chs, ix, &rqn);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- rqn, ix, err);
+void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs)
+{
+ unsigned int nch, ix;
+ int err;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ nch = mlx5e_channels_get_num(chs);
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
+ for (ix = 0; ix < chs->num; ix++) {
+ if (mlx5e_channels_is_xsk(chs, ix))
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
}
- for (ix = nch; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
+ res->rss_nch = chs->num;
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < nch; ix++)
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
+ for (ix = nch; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
@@ -595,22 +537,8 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_rss_disable(res);
- for (ix = 0; ix < res->max_nch; ix++) {
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
- res->drop_rqn, ix, err);
-
- if (!(res->features & MLX5E_RX_RES_FEATURE_XSK))
- continue;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- }
+ for (ix = 0; ix < res->max_nch; ix++)
+ mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
@@ -621,33 +549,17 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
}
}
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix)
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk)
{
- u32 rqn;
- int err;
-
- if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn))
- return -EINVAL;
-
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- rqn, ix, err);
- return err;
-}
+ if (xsk)
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ else
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
-{
- int err;
+ mlx5e_rx_res_rss_enable(res);
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn);
- if (err)
- mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n",
- mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
- res->drop_rqn, ix, err);
- return err;
+ mlx5e_rx_res_channel_activate_direct(res, chs, ix);
}
int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index b39b20a720e0..5d5f64fab60f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -17,8 +17,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
- MLX5E_RX_RES_FEATURE_XSK = BIT(1),
- MLX5E_RX_RES_FEATURE_PTP = BIT(2),
+ MLX5E_RX_RES_FEATURE_PTP = BIT(1),
};
/* Setup */
@@ -32,7 +31,6 @@ void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
@@ -40,9 +38,8 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
-int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
- unsigned int ix);
-int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix);
+void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
+ unsigned int ix, bool xsk);
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 69949ab830b6..25174f68613e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -12,6 +12,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
bool is_esw = mlx5e_is_eswitch_flow(flow);
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
@@ -21,7 +22,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
- chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
+ chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index 11f2a7fb72a9..201ac7dd338f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -147,7 +147,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
- netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll);
err = mlx5e_open_trap_rq(priv, t);
if (unlikely(err))
@@ -230,12 +230,12 @@ static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_vlan_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ err = mlx5e_add_mac_trap(priv->fs, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
@@ -256,10 +256,10 @@ static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
{
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(priv->fs);
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
- mlx5e_remove_mac_trap(priv);
+ mlx5e_remove_mac_trap(priv->fs);
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index c208ea307bff..4456ad5cedf1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -439,16 +439,24 @@ static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}
-static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
{
- u16 room = sq->reserved_room;
+ u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
- WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
- "wqe_size %u is greater than max SQ WQEBBs %u",
- wqe_size, sq->max_sq_wqebbs);
+ return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
+}
- room += MLX5E_STOP_ROOM(wqe_size);
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+ u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
+
+static inline struct mlx5e_mpw_info *mlx5e_get_mpw_info(struct mlx5e_rq *rq, int i)
+{
+ size_t isz = struct_size(rq->mpwqe.info, alloc_units, rq->mpwqe.pages_per_wqe);
+
+ return (struct mlx5e_mpw_info *)((char *)rq->mpwqe.info + array_size(i, isz));
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 8f321a6c0809..4685c652c97e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -333,7 +333,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 287e17911251..bc2d9034af5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -122,7 +122,7 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 2c520394aa1d..ebada0c5af3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -72,6 +72,7 @@ void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *x
{
xsk->headroom = xsk_pool_get_headroom(pool);
xsk->chunk_size = xsk_pool_get_chunk_size(pool);
+ xsk->unaligned = pool->unaligned;
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
@@ -98,6 +99,15 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
mlx5e_build_xsk_param(pool, &xsk);
+ if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ mlx5e_mpwrq_umr_mode(priv->mdev, &xsk) == MLX5E_MPWRQ_UMR_MODE_OVERSIZED) {
+ const char *recommendation = is_power_of_2(xsk.chunk_size) ?
+ "Upgrade firmware" : "Disable striding RQ";
+
+ mlx5_core_warn(priv->mdev, "Expected slowdown with XSK frame size %u. %s for better performance.\n",
+ xsk.chunk_size, recommendation);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
goto validate_closed;
@@ -123,15 +133,12 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
* any Fill Ring entries at the setup stage.
*/
- err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix);
- if (unlikely(err))
- goto err_deactivate;
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, true);
- return 0;
+ mlx5e_deactivate_rq(&c->rq);
+ mlx5e_flush_rq(&c->rq, MLX5_RQC_STATE_RDY);
-err_deactivate:
- mlx5e_deactivate_xsk(c);
- mlx5e_close_xsk(c);
+ return 0;
err_remove_pool:
mlx5e_xsk_remove_pool(&priv->xsk, ix);
@@ -170,7 +177,13 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
goto remove_pool;
c = priv->channels.c[ix];
- mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix);
+
+ mlx5e_activate_rq(&c->rq);
+ mlx5e_trigger_napi_icosq(c);
+ mlx5e_wait_for_min_rx_wqes(&c->rq, MLX5E_RQ_WQES_TIMEOUT);
+
+ mlx5e_rx_res_xsk_update(priv->rx_res, &priv->channels, ix, false);
+
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
@@ -208,11 +221,10 @@ int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
- u16 ix;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
- mlx5e_xsk_disable_pool(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, qid) :
+ mlx5e_xsk_disable_pool(priv, qid);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 9a1553598a7c..c91b54d9ff27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -8,18 +8,221 @@
/* RX data path */
-static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, void *data,
- u32 cqe_bcnt)
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ struct mlx5e_icosq *icosq = rq->icosq;
+ struct mlx5_wq_cyc *wq = &icosq->wq;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int batch, i;
+ u32 offset; /* 17-bit value with MTT. */
+ u16 pi;
+
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe)))
+ goto err;
+
+ BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
+ rq->mpwqe.pages_per_wqe);
+
+ /* If batch < pages_per_wqe, either:
+ * 1. Some (or all) descriptors were invalid.
+ * 2. dma_need_sync is true, and it fell back to allocating one frame.
+ * In either case, try to continue allocating frames one by one, until
+ * the first error, which will mean there are no more valid descriptors.
+ */
+ for (; batch < rq->mpwqe.pages_per_wqe; batch++) {
+ wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!wi->alloc_units[batch].xsk))
+ goto err_reuse_batch;
+ }
+
+ pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
+ }
+ } else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ }
+ } else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
+ u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 1] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 2] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr + mapping_size * 2),
+ };
+ umr_wqe->inline_ksms[(i << 2) + 3] = (struct mlx5_ksm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ };
+ }
+ } else {
+ __be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
+ rq->xsk_pool->chunk_size);
+ __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
+
+ umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(addr),
+ .bcount = frame_size,
+ };
+ umr_wqe->inline_klms[(i << 1) + 1] = (struct mlx5_klm) {
+ .key = rq->mkey_be,
+ .va = cpu_to_be64(rq->wqe_overflow.addr),
+ .bcount = pad_size,
+ };
+ }
+ }
+
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
+ wi->consumed_strides = 0;
+
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
+
+ /* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
+ offset = ix * rq->mpwqe.mtts_per_wqe;
+ if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ offset = offset * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_OVERSIZED))
+ offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
+ else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
+ offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+
+ icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
+ .umr.rq = rq,
+ };
+
+ icosq->pc += rq->mpwqe.umr_wqebbs;
+
+ icosq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+err_reuse_batch:
+ while (--batch >= 0)
+ xsk_buff_free(wi->alloc_units[batch].xsk);
+
+err:
+ rq->stats->buff_alloc_err++;
+ return -ENOMEM;
+}
+
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct xdp_buff **buffs;
+ u32 contig, alloc;
+ int i;
+
+ /* mlx5e_init_frags_partition creates a 1:1 mapping between
+ * rq->wqe.frags and rq->wqe.alloc_units, which allows us to
+ * allocate XDP buffers straight into alloc_units.
+ */
+ BUILD_BUG_ON(sizeof(rq->wqe.alloc_units[0]) !=
+ sizeof(rq->wqe.alloc_units[0].xsk));
+ buffs = (struct xdp_buff **)rq->wqe.alloc_units;
+ contig = mlx5_wq_cyc_get_size(wq) - ix;
+ if (wqe_bulk <= contig) {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk);
+ } else {
+ alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig);
+ if (likely(alloc == contig))
+ alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig);
+ }
+
+ for (i = 0; i < alloc; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return alloc;
+}
+
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
+{
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ int i;
+
+ for (i = 0; i < wqe_bulk; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+ struct mlx5e_rx_wqe_cyc *wqe;
+ dma_addr_t addr;
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
+ /* Assumes log_num_frags == 0. */
+ frag = &rq->wqe.frags[j];
+
+ frag->au->xsk = xsk_buff_alloc(rq->xsk_pool);
+ if (unlikely(!frag->au->xsk))
+ return i;
+
+ addr = xsk_buff_xdp_get_frame_dma(frag->au->xsk);
+ wqe->data[0].addr = cpu_to_be64(addr + rq->buff.headroom);
+ }
+
+ return wqe_bulk;
+}
+
+static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_buff *xdp)
+{
+ u32 totallen = xdp->data_end - xdp->data_meta;
+ u32 metalen = xdp->data - xdp->data_meta;
struct sk_buff *skb;
- skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt);
+ skb = napi_alloc_skb(rq->cq.napi, totallen);
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
return NULL;
}
- skb_put_data(skb, data, cqe_bcnt);
+ skb_put_data(skb, xdp->data_meta, totallen);
+
+ if (metalen) {
+ skb_metadata_set(skb, metalen);
+ __skb_pull(skb, metalen);
+ }
return skb;
}
@@ -30,7 +233,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
+ struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -46,8 +249,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xdp->data_end = xdp->data + cqe_bcnt;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -76,14 +278,14 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->di->xsk;
+ struct xdp_buff *xdp = wi->au->xsk;
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -93,8 +295,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xdp->data_end = xdp->data + cqe_bcnt;
- xdp_set_data_meta_invalid(xdp);
+ xsk_buff_set_size(xdp, cqe_bcnt);
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
@@ -103,8 +304,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
- * will be handled by mlx5e_put_rx_frag.
+ * will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+ return mlx5e_xsk_construct_skb(rq, xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index cc18d97d8ee0..087c943bd8e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,12 +5,12 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
-
-#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL
/* RX data path */
+int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
+int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
u16 cqe_bcnt,
@@ -20,46 +20,4 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
-retry:
- dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
- if (!dma_info->xsk)
- return -ENOMEM;
-
- /* Store the DMA address without headroom. In striding RQ case, we just
- * provide pages for UMR, and headroom is counted at the setup stage
- * when creating a WQE. In non-striding RQ case, headroom is accounted
- * in mlx5e_alloc_rx_wqe.
- */
- dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
-
- /* MTT page mapping has alignment requirements. If they are not
- * satisfied, leak the descriptor so that it won't come again, and try
- * to allocate a new one.
- */
- if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) {
- xsk_buff_discard(dma_info->xsk);
- goto retry;
- }
- }
-
- return 0;
-}
-
-static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
-{
- if (!xsk_uses_need_wakeup(rq->xsk_pool))
- return alloc_err;
-
- if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rq->xsk_pool);
-
- return false;
-}
-
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 98ed9ef3a6bd..ff03c43833bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -5,24 +5,19 @@
#include "en/params.h"
#include "en/txrx.h"
#include "en/health.h"
+#include <net/xdp_sock_drv.h>
-/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
- * change unexpectedly, and mlx5e has a minimum valid stride size for striding
- * RQ, keep this check in the driver.
+/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
+ * stride size of striding RQ.
*/
-#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
+#define MLX5E_MIN_XSK_CHUNK_SIZE max(2048, XDP_UMEM_MIN_CHUNK_SIZE)
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE ||
- xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
- return false;
-
- /* Current MTU and XSK headroom don't allow packets to fit the frames. */
- if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear
@@ -30,9 +25,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
*/
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+ return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return mlx5e_rx_is_linear_skb(params, xsk);
+ return mlx5e_rx_is_linear_skb(mdev, params, xsk);
}
}
@@ -71,7 +66,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
rq->xsk_pool = pool;
rq->stats = &c->priv->channel_stats[c->ix]->xskrq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
- rq_xdp_ix = c->ix + params->num_channels * MLX5E_RQ_GROUP_XSK;
+ rq_xdp_ix = c->ix;
err = mlx5e_rq_set_handlers(rq, params, xsk);
if (err)
return err;
@@ -159,7 +154,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 4902ef74fedf..367a9505ca4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -12,18 +12,14 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_channel *c;
- u16 ix;
if (unlikely(!mlx5e_xdp_is_active(priv)))
return -ENETDOWN;
- if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
+ if (unlikely(qid >= params->num_channels))
return -EINVAL;
- c = priv->channels.c[ix];
-
- if (unlikely(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)))
- return -EINVAL;
+ c = priv->channels.c[qid];
if (!napi_if_scheduled_mark_missed(&c->napi)) {
/* To avoid WQE overrun, don't post a NOP if async_icosq is not
@@ -36,9 +32,7 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0;
- spin_lock_bh(&c->async_icosq_lock);
- mlx5e_trigger_irq(&c->async_icosq);
- spin_unlock_bh(&c->async_icosq_lock);
+ mlx5e_trigger_napi_icosq(c);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index a05085035f23..9c505158b975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,7 +5,6 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
-#include <net/xdp_sock_drv.h>
/* TX data path */
@@ -13,15 +12,4 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
-static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
-{
- if (!xsk_uses_need_wakeup(sq->xsk_pool))
- return;
-
- if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->xsk_pool);
- else
- xsk_set_tx_need_wakeup(sq->xsk_pool);
-}
-
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1839f1ab1ddd..07187028f0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -39,6 +39,7 @@
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
+#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
@@ -137,6 +138,15 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
+ return false;
+ }
+#endif
+
return true;
}
@@ -163,6 +173,11 @@ static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
#endif
+#ifdef CONFIG_MLX5_EN_MACSEC
+ if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
+ mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
+#endif
+
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 20a4f1e585af..285d32d2fd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-#include <linux/netdevice.h>
+#include <mlx5_core.h>
#include "en_accel/fs_tcp.h"
#include "fs_core.h"
@@ -71,13 +71,13 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
struct mlx5e_flow_table *ft = NULL;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
@@ -86,19 +86,17 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
- fs_tcp = priv->fs->accel_tcp;
-
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (sk->sk_family) {
case AF_INET:
accel_fs_tcp_set_ipv4_flow(spec, sk);
ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
- mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
- &inet_sk(sk)->inet_rcv_saddr,
- inet_sk(sk)->inet_sport,
- &inet_sk(sk)->inet_daddr,
- inet_sk(sk)->inet_dport);
+ fs_dbg(fs, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
+ &inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_sport,
+ &inet_sk(sk)->inet_daddr,
+ inet_sk(sk)->inet_dport);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
@@ -140,34 +138,32 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
- PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
out:
kvfree(spec);
return flow;
}
-static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
+static int accel_fs_tcp_add_default_rule(struct mlx5e_flow_steering *fs,
enum accel_fs_tcp_type type)
{
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5e_flow_table *accel_fs_t;
struct mlx5_flow_destination dest;
- struct mlx5e_accel_fs_tcp *fs_tcp;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
int err = 0;
- fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
- dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
+ dest = mlx5_ttc_get_default_dest(ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev,
- "%s: add default rule failed, accel_fs type=%d, err %d\n",
- __func__, type, err);
+ fs_err(fs, "%s: add default rule failed, accel_fs type=%d, err %d\n",
+ __func__, type, err);
return err;
}
@@ -265,9 +261,11 @@ out:
return err;
}
-static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
+static int accel_fs_tcp_create_table(struct mlx5e_flow_steering *fs, enum accel_fs_tcp_type type)
{
- struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_flow_table *ft = &accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -277,21 +275,21 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
- ft->t->id, ft->t->level);
+ fs_dbg(fs, "Created fs accel table id %u level %u\n",
+ ft->t->id, ft->t->level);
err = accel_fs_tcp_create_groups(ft, type);
if (err)
goto err;
- err = accel_fs_tcp_add_default_rule(priv, type);
+ err = accel_fs_tcp_add_default_rule(fs, type);
if (err)
goto err;
@@ -301,17 +299,18 @@ err:
return err;
}
-static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, fs_accel2tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
@@ -319,32 +318,32 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
return 0;
}
-static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
+static int accel_fs_tcp_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- dest.ft = priv->fs->accel_tcp->tables[i].t;
+ dest.ft = accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, fs_accel2tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
- __func__, fs_accel2tt(i), err);
+ fs_err(fs, "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+ __func__, fs_accel2tt(i), err);
return err;
}
}
return 0;
}
-static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
+static void accel_fs_tcp_destroy_table(struct mlx5e_flow_steering *fs, int i)
{
- struct mlx5e_accel_fs_tcp *fs_tcp;
+ struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
- fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
@@ -353,40 +352,43 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
fs_tcp->tables[i].t = NULL;
}
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp = mlx5e_fs_get_accel_tcp(fs);
int i;
- if (!priv->fs->accel_tcp)
+ if (!accel_tcp)
return;
- accel_fs_tcp_disable(priv);
+ accel_fs_tcp_disable(fs);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
- accel_fs_tcp_destroy_table(priv, i);
+ accel_fs_tcp_destroy_table(fs, i);
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
}
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_accel_fs_tcp *accel_tcp;
int i, err;
- if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
- if (!priv->fs->accel_tcp)
+ accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ if (!accel_tcp)
return -ENOMEM;
+ mlx5e_fs_set_accel_tcp(fs, accel_tcp);
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
- err = accel_fs_tcp_create_table(priv, i);
+ err = accel_fs_tcp_create_table(fs, i);
if (err)
goto err_destroy_tables;
}
- err = accel_fs_tcp_enable(priv);
+ err = accel_fs_tcp_enable(fs);
if (err)
goto err_destroy_tables;
@@ -394,9 +396,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_tables:
while (--i >= 0)
- accel_fs_tcp_destroy_table(priv, i);
-
- kfree(priv->fs->accel_tcp);
- priv->fs->accel_tcp = NULL;
+ accel_fs_tcp_destroy_table(fs, i);
+ kfree(accel_tcp);
+ mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index 589235824543..a032bff482a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -4,19 +4,19 @@
#ifndef __MLX5E_ACCEL_FS_TCP_H__
#define __MLX5E_ACCEL_FS_TCP_H__
-#include "en.h"
+#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
-void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
-struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
-static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
-static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
+static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
+static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
uint32_t flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index f8113fd23265..b859e4a4c744 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -174,6 +174,8 @@ static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -182,15 +184,14 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
-
fs_prot->default_dest =
- mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
@@ -205,7 +206,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
@@ -230,6 +231,7 @@ err_add:
static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_esp *accel_esp;
@@ -249,7 +251,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
+ mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
@@ -260,6 +262,7 @@ out:
static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@@ -271,7 +274,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
@@ -385,7 +388,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
0xff, 16);
}
- flow_act->ipsec_obj_id = ipsec_obj_id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act->crypto.obj_id = ipsec_obj_id;
flow_act->flags |= FLOW_ACT_NO_APPEND;
}
@@ -441,7 +445,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
flow_act.modify_hdr = modify_hdr;
@@ -497,7 +501,7 @@ static int tx_add_rule(struct mlx5e_priv *priv,
MLX5_ETH_WQE_FT_META_IPSEC);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -573,7 +577,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
if (!ns)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 0ae4e12ce528..1878a70b9031 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -39,9 +39,9 @@
#include "en.h"
#include "en/txrx.h"
-/* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */
+/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
-#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0))
+#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
struct mlx5e_accel_tx_ipsec_state {
@@ -77,11 +77,6 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
}
-static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
-{
- return ipsec_st->x;
-}
-
static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 30a70d139046..da2184c94203 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -92,6 +92,24 @@ static const struct tlsdev_ops mlx5e_ktls_ops = {
.tls_dev_resync = mlx5e_ktls_resync,
};
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ return false;
+
+ /* Check the possibility to post the required ICOSQ WQEs. */
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS))
+ return false;
+ if (WARN_ON_ONCE(max_sq_wqebbs < MLX5E_KTLS_GET_PROGRESS_WQEBBS))
+ return false;
+
+ return true;
+}
+
void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
@@ -118,9 +136,9 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable)
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
else
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
mutex_unlock(&priv->state_lock);
return err;
@@ -138,7 +156,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
- err = mlx5e_accel_fs_tcp_create(priv);
+ err = mlx5e_accel_fs_tcp_create(priv->fs);
if (err) {
destroy_workqueue(priv->tls->rx_wq);
return err;
@@ -154,7 +172,7 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
- mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_fs_tcp_destroy(priv->fs);
destroy_workqueue(priv->tls->rx_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 948400dee525..1c35045e41fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
+ return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
+ MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
}
static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
if (crypto_info->version == TLS_1_2_VERSION)
return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
break;
+ case TLS_CIPHER_AES_GCM_256:
+ if (crypto_info->version == TLS_1_2_VERSION)
+ return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256);
+ break;
}
return false;
@@ -56,10 +61,7 @@ static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
}
-static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_rx);
-}
+bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 27483aa7be8a..3e54834747ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
};
struct mlx5e_ktls_offload_context_rx {
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct accel_rule rule;
struct sock *sk;
struct mlx5e_rq_stats *rq_stats;
@@ -111,7 +111,7 @@ static void accel_rule_handle_work(struct work_struct *work)
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
+ rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk,
mlx5e_tir_get_tirn(&priv_rx->tir),
MLX5_FS_DEFAULT_FLOW_TAG);
if (!IS_ERR_OR_NULL(rule))
@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
struct mlx5e_channel *c)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
struct mlx5e_ktls_resync_resp *ktls_resync;
struct mlx5e_icosq *sq;
bool trigger_poll;
@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
spin_lock_bh(&ktls_resync->lock);
spin_lock_bh(&priv_rx->lock);
- memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+ switch (priv_rx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &priv_rx->crypto_info.crypto_info_128;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &priv_rx->crypto_info.crypto_info_256;
+
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
+ sizeof(info->rec_seq));
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_rx->crypto_info.crypto_info.cipher_type);
+ spin_unlock_bh(&priv_rx->lock);
+ spin_unlock_bh(&ktls_resync->lock);
+ return;
+ }
+
if (list_empty(&priv_rx->list)) {
list_add_tail(&priv_rx->list, &ktls_resync->list);
trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
@@ -461,6 +484,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct net_device *netdev = rq->netdev;
+ struct net *net = dev_net(netdev);
struct sock *sk = NULL;
unsigned int datalen;
struct iphdr *iph;
@@ -475,7 +499,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
@@ -485,7 +509,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
@@ -603,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
- priv_rx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_rx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_rx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3a1f76eac542..2e0335246967 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx {
bool ctx_post_pending;
/* control / resync */
struct list_head list_node; /* member of the pool */
- struct tls12_crypto_info_aes_gcm_128 crypto_info;
+ union mlx5e_crypto_info crypto_info;
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_key;
priv_tx->expected_seq = start_offload_tcp_sn;
- priv_tx->crypto_info =
- *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ priv_tx->crypto_info.crypto_info_128 =
+ *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ priv_tx->crypto_info.crypto_info_256 =
+ *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->cipher_type);
+ return -EOPNOTSUPP;
+ }
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
struct mlx5e_ktls_offload_context_tx *priv_tx,
u64 rcd_sn)
{
- struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
__be64 rn_be = cpu_to_be64(rcd_sn);
bool skip_static_post;
u16 rec_seq_sz;
char *rec_seq;
- rec_seq = info->rec_seq;
- rec_seq_sz = sizeof(info->rec_seq);
+ switch (priv_tx->crypto_info.crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
+
+ rec_seq = info->rec_seq;
+ rec_seq_sz = sizeof(info->rec_seq);
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ priv_tx->crypto_info.crypto_info.cipher_type);
+ return;
+ }
skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
if (!skip_static_post)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
index ac29aeb8af49..570a912dd6fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
@@ -21,7 +21,7 @@ enum {
static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 key_id, u32 resync_tcp_sn)
{
char *initial_rn, *gcm_iv;
@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
ctx = params->ctx;
- EXTRACT_INFO_FIELDS;
+ switch (crypto_info->crypto_info.cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *info =
+ &crypto_info->crypto_info_128;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *info =
+ &crypto_info->crypto_info_256;
+
+ EXTRACT_INFO_FIELDS;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "Unsupported cipher type %u\n",
+ crypto_info->crypto_info.cipher_type);
+ return;
+ }
gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction)
{
@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
- fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
+ fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
index 0dc715c4c10d..3d79cd379890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
+union mlx5e_crypto_info {
+ struct tls_crypto_info crypto_info;
+ struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
+ struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
+};
+
struct mlx5e_set_tls_static_params_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
u16 pc, u32 sqn,
- struct tls12_crypto_info_aes_gcm_128 *info,
+ union mlx5e_crypto_info *crypto_info,
u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
bool fence, enum tls_offload_ctx_dir direction);
void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
new file mode 100644
index 000000000000..5da746da898d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -0,0 +1,1870 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/xarray.h>
+
+#include "en.h"
+#include "lib/aso.h"
+#include "lib/mlx5.h"
+#include "en_accel/macsec.h"
+#include "en_accel/macsec_fs.h"
+
+#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
+#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
+
+enum mlx5_macsec_aso_event_arm {
+ MLX5E_ASO_EPN_ARM = BIT(0),
+};
+
+enum {
+ MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
+struct mlx5e_macsec_handle {
+ struct mlx5e_macsec *macsec;
+ u32 obj_id;
+ u8 idx;
+};
+
+enum {
+ MLX5_MACSEC_EPN,
+};
+
+struct mlx5e_macsec_aso_out {
+ u8 event_arm;
+ u32 mode_param;
+};
+
+struct mlx5e_macsec_aso_in {
+ u8 mode;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_epn_state {
+ u32 epn_msb;
+ u8 epn_enabled;
+ u8 overlap;
+};
+
+struct mlx5e_macsec_async_work {
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ struct work_struct work;
+ u32 obj_id;
+};
+
+struct mlx5e_macsec_sa {
+ bool active;
+ u8 assoc_num;
+ u32 macsec_obj_id;
+ u32 enc_key_id;
+ u32 next_pn;
+ sci_t sci;
+ salt_t salt;
+
+ struct rhash_head hash;
+ u32 fs_id;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct rcu_head rcu_head;
+ struct mlx5e_macsec_epn_state epn_state;
+};
+
+struct mlx5e_macsec_rx_sc;
+struct mlx5e_macsec_rx_sc_xarray_element {
+ u32 fs_id;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+};
+
+struct mlx5e_macsec_rx_sc {
+ bool active;
+ sci_t sci;
+ struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
+ struct list_head rx_sc_list_element;
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct metadata_dst *md_dst;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_macsec_umr {
+ dma_addr_t dma_addr;
+ u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ u32 mkey;
+};
+
+struct mlx5e_macsec_aso {
+ /* ASO */
+ struct mlx5_aso *maso;
+ /* Protects macsec ASO */
+ struct mutex aso_lock;
+ /* UMR */
+ struct mlx5e_macsec_umr *umr;
+
+ u32 pdn;
+};
+
+static const struct rhashtable_params rhash_sci = {
+ .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
+ .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
+ .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
+ .automatic_shrinking = true,
+ .min_size = 1,
+};
+
+struct mlx5e_macsec_device {
+ const struct net_device *netdev;
+ struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
+ struct list_head macsec_rx_sc_list_head;
+ unsigned char *dev_addr;
+ struct list_head macsec_device_list_element;
+};
+
+struct mlx5e_macsec {
+ struct list_head macsec_device_list_head;
+ int num_of_devices;
+ struct mlx5e_macsec_fs *macsec_fs;
+ struct mutex lock; /* Protects mlx5e_macsec internal contexts */
+
+ /* Tx sci -> fs id mapping handling */
+ struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
+
+ /* Rx fs_id -> rx_sc mapping */
+ struct xarray sc_xarray;
+
+ struct mlx5_core_dev *mdev;
+
+ /* Stats manage */
+ struct mlx5e_macsec_stats stats;
+
+ /* ASO */
+ struct mlx5e_macsec_aso aso;
+
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_macsec_obj_attrs {
+ u32 aso_pdn;
+ u32 next_pn;
+ __be64 sci;
+ u32 enc_key_id;
+ bool encrypt;
+ struct mlx5e_macsec_epn_state epn_state;
+ salt_t salt;
+ __be32 ssci;
+ bool replay_protect;
+ u32 replay_window;
+};
+
+struct mlx5_aso_ctrl_param {
+ u8 data_mask_mode;
+ u8 condition_0_operand;
+ u8 condition_1_operand;
+ u8 condition_0_offset;
+ u8 condition_1_offset;
+ u8 data_offset;
+ u8 condition_operand;
+ u32 condition_0_data;
+ u32 condition_0_mask;
+ u32 condition_1_data;
+ u32 condition_1_mask;
+ u64 bitwise_data;
+ u64 data_mask;
+};
+
+static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr;
+ struct device *dma_device;
+ dma_addr_t dma_addr;
+ int err;
+
+ umr = kzalloc(sizeof(*umr), GFP_KERNEL);
+ if (!umr) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ dma_device = &mdev->pdev->dev;
+ dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(dma_device, dma_addr);
+ if (err) {
+ mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
+ goto out_dma;
+ }
+
+ err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
+ goto out_mkey;
+ }
+
+ umr->dma_addr = dma_addr;
+
+ aso->umr = umr;
+
+ return 0;
+
+out_mkey:
+ dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+out_dma:
+ kfree(umr);
+ return err;
+}
+
+static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
+{
+ struct mlx5e_macsec_umr *umr = aso->umr;
+
+ mlx5_core_destroy_mkey(mdev, umr->mkey);
+ dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
+ kfree(umr);
+}
+
+static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
+{
+ u8 window_sz;
+
+ if (!attrs->replay_protect)
+ return 0;
+
+ switch (attrs->replay_window) {
+ case 256:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ case 128:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 64:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 32:
+ window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
+
+ return 0;
+}
+
+static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
+ struct mlx5_macsec_obj_attrs *attrs,
+ bool is_tx,
+ u32 *macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *aso_ctx;
+ void *obj;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
+ aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
+
+ MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
+ MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
+ MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
+ MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
+ MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
+
+ /* Epn */
+ if (attrs->epn_state.epn_enabled) {
+ void *salt_p;
+ int i;
+
+ MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
+ salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
+ for (i = 0; i < 3 ; i++)
+ memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
+ } else {
+ MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
+ }
+
+ MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
+ if (is_tx) {
+ MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
+ } else {
+ err = macsec_set_replay_protection(attrs, aso_ctx);
+ if (err)
+ return err;
+ }
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to create MACsec object (err = %d)\n",
+ err);
+ return err;
+ }
+
+ *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa,
+ bool is_tx)
+{
+ int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ if ((is_tx) && sa->fs_id) {
+ /* Make sure ongoing datapath readers sees a valid SA */
+ rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ sa->fs_id = 0;
+ }
+
+ if (!sa->macsec_rule)
+ return;
+
+ mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+ sa->macsec_rule = NULL;
+}
+
+static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa,
+ bool encrypt,
+ bool is_tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_macsec_obj_attrs obj_attrs;
+ union mlx5e_macsec_rule *macsec_rule;
+ struct macsec_key *key;
+ int err;
+
+ obj_attrs.next_pn = sa->next_pn;
+ obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
+ obj_attrs.enc_key_id = sa->enc_key_id;
+ obj_attrs.encrypt = encrypt;
+ obj_attrs.aso_pdn = macsec->aso.pdn;
+ obj_attrs.epn_state = sa->epn_state;
+
+ if (is_tx) {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
+ key = &ctx->sa.tx_sa->key;
+ } else {
+ obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
+ key = &ctx->sa.rx_sa->key;
+ }
+
+ memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+ obj_attrs.replay_window = ctx->secy->replay_window;
+ obj_attrs.replay_protect = ctx->secy->replay_protect;
+
+ err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
+ if (!macsec_rule) {
+ err = -ENOMEM;
+ goto destroy_macsec_object;
+ }
+
+ sa->macsec_rule = macsec_rule;
+
+ if (is_tx) {
+ err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
+ if (err)
+ goto destroy_macsec_object_and_rule;
+ }
+
+ return 0;
+
+destroy_macsec_object_and_rule:
+ mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
+destroy_macsec_object:
+ mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
+
+ return err;
+}
+
+static struct mlx5e_macsec_rx_sc *
+mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
+{
+ struct mlx5e_macsec_rx_sc *iter;
+
+ list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
+ if (iter->sci == sci)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *rx_sa,
+ bool active)
+{
+ struct mlx5_core_dev *mdev = macsec->mdev;
+ struct mlx5_macsec_obj_attrs attrs;
+ int err = 0;
+
+ if (rx_sa->active != active)
+ return 0;
+
+ rx_sa->active = active;
+ if (!active) {
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ return 0;
+ }
+
+ attrs.sci = rx_sa->sci;
+ attrs.enc_key_id = rx_sa->enc_key_id;
+ err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
+{
+ const struct net_device *netdev = ctx->netdev;
+ const struct macsec_secy *secy = ctx->secy;
+
+ if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when validate_frame is in strict mode\n");
+ return false;
+ }
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
+ netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
+ MACSEC_DEFAULT_ICV_LEN);
+ return false;
+ }
+
+ if (!secy->protect_frames) {
+ netdev_err(netdev,
+ "MACsec offload is supported only when protect_frames is set\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct mlx5e_macsec_device *
+mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
+ const struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_device *iter;
+ const struct list_head *list;
+
+ list = &macsec->macsec_device_list_head;
+ list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
+ if (iter->netdev == ctx->secy->netdev)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
+ const pn_t *next_pn_halves)
+{
+ struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
+
+ sa->salt = key->salt;
+ epn_state->epn_enabled = 1;
+ epn_state->epn_msb = next_pn_halves->upper;
+ epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
+}
+
+static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_secy *secy = ctx->secy;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (macsec_device->tx_sa[assoc_num]) {
+ netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
+ if (!tx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+ tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
+ tx_sa->sci = secy->sci;
+ tx_sa->assoc_num = assoc_num;
+
+ if (secy->xpn)
+ update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &tx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ macsec_device->tx_sa[assoc_num] = tx_sa;
+ if (!secy->operational ||
+ assoc_num != tx_sc->encoding_sa ||
+ !tx_sa->active)
+ goto out;
+
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto destroy_encryption_key;
+
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+destroy_encryption_key:
+ macsec_device->tx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
+destroy_sa:
+ kfree(tx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct net_device *netdev;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ netdev = ctx->netdev;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+ netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (tx_sa->active == ctx_tx_sa->active)
+ goto out;
+
+ if (tx_sa->assoc_num != tx_sc->encoding_sa)
+ goto out;
+
+ if (ctx_tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ } else {
+ if (!tx_sa->macsec_rule) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ tx_sa->active = ctx_tx_sa->active;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ tx_sa = macsec_device->tx_sa[assoc_num];
+ if (!tx_sa) {
+ netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree_rcu(tx_sa);
+ macsec_device->tx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
+{
+ struct mlx5e_macsec_sa *macsec_sa;
+ u32 fs_id = 0;
+
+ rcu_read_lock();
+ macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
+ if (macsec_sa)
+ fs_id = macsec_sa->fs_id;
+ rcu_read_unlock();
+
+ return fs_id;
+}
+
+static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct list_head *rx_sc_list;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
+ if (rx_sc) {
+ netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
+ ctx_rx_sc->sci);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
+ if (!sc_xarray_element) {
+ err = -ENOMEM;
+ goto destroy_rx_sc;
+ }
+
+ sc_xarray_element->rx_sc = rx_sc;
+ err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
+ XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
+ if (err)
+ goto destroy_sc_xarray_elemenet;
+
+ rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!rx_sc->md_dst) {
+ err = -ENOMEM;
+ goto erase_xa_alloc;
+ }
+
+ rx_sc->sci = ctx_rx_sc->sci;
+ rx_sc->active = ctx_rx_sc->active;
+ list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
+
+ rx_sc->sc_xarray_element = sc_xarray_element;
+ rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
+ mutex_unlock(&macsec->lock);
+
+ return 0;
+
+erase_xa_alloc:
+ xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
+destroy_sc_xarray_elemenet:
+ kfree(sc_xarray_element);
+destroy_rx_sc:
+ kfree(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int i;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
+ if (!rx_sc) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sc->active = ctx_rx_sc->active;
+ if (rx_sc->active == ctx_rx_sc->active)
+ goto out;
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+/*
+ * At this point the relevant MACsec offload Rx rule already removed at
+ * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
+ * Rx related data propagating using xa_erase which uses rcu to sync,
+ * once fs_id is erased then this rx_sc is hidden from datapath.
+ */
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+ xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
+ metadata_dst_free(rx_sc->md_dst);
+ kfree(rx_sc->sc_xarray_element);
+
+ kfree_rcu(rx_sc);
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (rx_sc->rx_sa[assoc_num]) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
+ if (!rx_sa) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rx_sa->active = ctx_rx_sa->active;
+ rx_sa->next_pn = ctx_rx_sa->next_pn;
+ rx_sa->sci = sci;
+ rx_sa->assoc_num = assoc_num;
+ rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
+
+ if (ctx->secy->xpn)
+ update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+
+ err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
+ MLX5_ACCEL_OBJ_MACSEC_KEY,
+ &rx_sa->enc_key_id);
+ if (err)
+ goto destroy_sa;
+
+ rx_sc->rx_sa[assoc_num] = rx_sa;
+ if (!rx_sa->active)
+ goto out;
+
+ //TODO - add support for both authentication and encryption flows
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
+ if (err)
+ goto destroy_encryption_key;
+
+ goto out;
+
+destroy_encryption_key:
+ rx_sc->rx_sa[assoc_num] = NULL;
+ mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
+destroy_sa:
+ kfree(rx_sa);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
+{
+ const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ sci_t sci = ctx_rx_sa->sc->sci;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+ netdev_err(ctx->netdev,
+ "MACsec offload update RX sa %d PN isn't supported\n",
+ assoc_num);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active);
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ sci_t sci = ctx->sa.rx_sa->sc->sci;
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ u8 assoc_num = ctx->sa.assoc_num;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
+ if (!rx_sc) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld doesn't exist\n",
+ ctx->sa.rx_sa->sc->sci);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rx_sa = rx_sc->rx_sa[assoc_num];
+ if (rx_sa) {
+ netdev_err(ctx->netdev,
+ "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
+ sci, assoc_num);
+ err = -EEXIST;
+ goto out;
+ }
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[assoc_num] = NULL;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ const struct net_device *netdev = ctx->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec *macsec;
+ int err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
+ netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
+ goto out;
+ }
+
+ if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
+ netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
+ err = -EBUSY;
+ goto out;
+ }
+
+ macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
+ if (!macsec_device) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
+ if (!macsec_device->dev_addr) {
+ kfree(macsec_device);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ macsec_device->netdev = dev;
+
+ INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
+ list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
+
+ ++macsec->num_of_devices;
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
+ struct mlx5e_macsec_device *macsec_device)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct list_head *list;
+ int i, err = 0;
+
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa || !rx_sa->macsec_rule)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ }
+ }
+
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ if (rx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, rx_sa, false, false);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+ memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
+out:
+ return err;
+}
+
+/* this function is called from 2 macsec ops functions:
+ * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
+ * and create new Tx contexts(macsec object + steering).
+ * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
+ * destroy Tx and Rx contexts(macsec object + steering)
+ */
+static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
+{
+ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ const struct net_device *dev = ctx->secy->netdev;
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ int i, err = 0;
+
+ if (!mlx5e_macsec_secy_features_validate(ctx))
+ return -EINVAL;
+
+ mutex_lock(&priv->macsec->lock);
+
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
+ if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
+ err = macsec_upd_secy_hw_address(ctx, macsec_device);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
+ err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
+{
+ struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
+ struct mlx5e_macsec_device *macsec_device;
+ struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
+ struct mlx5e_macsec_sa *rx_sa;
+ struct mlx5e_macsec_sa *tx_sa;
+ struct mlx5e_macsec *macsec;
+ struct list_head *list;
+ int err = 0;
+ int i;
+
+ mutex_lock(&priv->macsec->lock);
+ macsec = priv->macsec;
+ macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
+ if (!macsec_device) {
+ netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
+ err = -EINVAL;
+
+ goto out;
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ tx_sa = macsec_device->tx_sa[i];
+ if (!tx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
+ mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
+ kfree(tx_sa);
+ macsec_device->tx_sa[i] = NULL;
+ }
+
+ list = &macsec_device->macsec_rx_sc_list_head;
+ list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ rx_sa = rx_sc->rx_sa[i];
+ if (!rx_sa)
+ continue;
+
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
+ mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
+ kfree(rx_sa);
+ rx_sc->rx_sa[i] = NULL;
+ }
+
+ list_del_rcu(&rx_sc->rx_sc_list_element);
+
+ kfree_rcu(rx_sc);
+ }
+
+ kfree(macsec_device->dev_addr);
+ macsec_device->dev_addr = NULL;
+
+ list_del_rcu(&macsec_device->macsec_device_list_element);
+ --macsec->num_of_devices;
+
+out:
+ mutex_unlock(&macsec->lock);
+
+ return err;
+}
+
+static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
+ struct mlx5_macsec_obj_attrs *attrs)
+{
+ attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
+ attrs->epn_state.overlap = sa->epn_state.overlap;
+}
+
+static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5_aso_ctrl_param *param)
+{
+ memset(aso_ctrl, 0, sizeof(*aso_ctrl));
+ if (macsec_aso->umr->dma_addr) {
+ aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
+ }
+
+ if (!param)
+ return;
+
+ aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
+ aso_ctrl->condition_1_0_operand = param->condition_1_operand |
+ param->condition_0_operand << 4;
+ aso_ctrl->condition_1_0_offset = param->condition_1_offset |
+ param->condition_0_offset << 4;
+ aso_ctrl->data_offset_condition_operand = param->data_offset |
+ param->condition_operand << 6;
+ aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
+ aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
+ aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
+ aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
+ aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
+ aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
+}
+
+static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
+ u32 macsec_id)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
+ u64 modify_field_select = 0;
+ void *obj;
+ int err;
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
+ macsec_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
+ modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
+
+ /* EPN */
+ if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
+ mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
+ macsec_id);
+ return -EOPNOTSUPP;
+ }
+
+ obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
+ MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
+ MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
+ MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
+
+ /* General object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
+ struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5_aso_ctrl_param param = {};
+
+ param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
+ param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
+ param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
+ if (in->mode == MLX5_MACSEC_EPN) {
+ param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ param.bitwise_data = BIT_ULL(54);
+ param.data_mask = param.bitwise_data;
+ }
+ macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
+}
+
+static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ mutex_unlock(&aso->aso_lock);
+
+ return err;
+}
+
+static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
+{
+ struct mlx5e_macsec_aso *aso;
+ struct mlx5_aso_wqe *aso_wqe;
+ struct mlx5_aso *maso;
+ int err;
+
+ aso = &macsec->aso;
+ maso = aso->maso;
+
+ mutex_lock(&aso->aso_lock);
+
+ aso_wqe = mlx5_aso_get_wqe(maso);
+ mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
+ macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+
+ mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+ err = mlx5_aso_poll_cq(maso, false, 10);
+ if (err)
+ goto err_out;
+
+ if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
+ out->event_arm |= MLX5E_ASO_EPN_ARM;
+
+ out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
+
+err_out:
+ mutex_unlock(&aso->aso_lock);
+ return err;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = iter->tx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
+ const u32 obj_id)
+{
+ const struct list_head *device_list, *sc_list;
+ struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec_device *iter;
+ int i;
+
+ device_list = &macsec->macsec_device_list_head;
+
+ list_for_each_entry(iter, device_list, macsec_device_list_element) {
+ sc_list = &iter->macsec_rx_sc_list_head;
+ list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
+ for (i = 0; i < MACSEC_NUM_AN; ++i) {
+ macsec_sa = mlx5e_rx_sc->rx_sa[i];
+ if (!macsec_sa || !macsec_sa->active)
+ continue;
+ if (macsec_sa->macsec_obj_id == obj_id)
+ return macsec_sa;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
+ struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
+{
+ struct mlx5_macsec_obj_attrs attrs = {};
+ struct mlx5e_macsec_aso_in in = {};
+
+ /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
+ * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
+ * esn_overlap to OLD (1).
+ * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
+ * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
+ * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
+ */
+
+ if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
+ sa->epn_state.epn_msb++;
+ sa->epn_state.overlap = 0;
+ } else {
+ sa->epn_state.overlap = 1;
+ }
+
+ macsec_build_accel_attrs(sa, &attrs);
+ mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
+
+ /* Re-set EPN arm event */
+ in.obj_id = obj_id;
+ in.mode = MLX5_MACSEC_EPN;
+ macsec_aso_set_arm_event(mdev, macsec, &in);
+}
+
+static void macsec_async_event(struct work_struct *work)
+{
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5e_macsec_aso_out out = {};
+ struct mlx5e_macsec_aso_in in = {};
+ struct mlx5e_macsec_sa *macsec_sa;
+ struct mlx5e_macsec *macsec;
+ struct mlx5_core_dev *mdev;
+ u32 obj_id;
+
+ async_work = container_of(work, struct mlx5e_macsec_async_work, work);
+ macsec = async_work->macsec;
+ mdev = async_work->mdev;
+ obj_id = async_work->obj_id;
+ macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
+ if (!macsec_sa) {
+ mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
+ goto out_async_work;
+ }
+ }
+
+ /* Query MACsec ASO context */
+ in.obj_id = obj_id;
+ macsec_aso_query(mdev, macsec, &in, &out);
+
+ /* EPN case */
+ if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
+ macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
+
+out_async_work:
+ kfree(async_work);
+}
+
+static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
+ struct mlx5e_macsec_async_work *async_work;
+ struct mlx5_eqe_obj_change *obj_change;
+ struct mlx5_eqe *eqe = data;
+ u16 obj_type;
+ u32 obj_id;
+
+ if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+ return NOTIFY_DONE;
+
+ obj_change = &eqe->data.obj_change;
+ obj_type = be16_to_cpu(obj_change->obj_type);
+ obj_id = be32_to_cpu(obj_change->obj_id);
+
+ if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
+ return NOTIFY_DONE;
+
+ async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
+ if (!async_work)
+ return NOTIFY_DONE;
+
+ async_work->macsec = macsec;
+ async_work->mdev = macsec->mdev;
+ async_work->obj_id = obj_id;
+
+ INIT_WORK(&async_work->work, macsec_async_event);
+
+ WARN_ON(!queue_work(macsec->wq, &async_work->work));
+
+ return NOTIFY_OK;
+}
+
+static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ struct mlx5_aso *maso;
+ int err;
+
+ err = mlx5_core_alloc_pd(mdev, &aso->pdn);
+ if (err) {
+ mlx5_core_err(mdev,
+ "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
+ err);
+ return err;
+ }
+
+ maso = mlx5_aso_create(mdev, aso->pdn);
+ if (IS_ERR(maso)) {
+ err = PTR_ERR(maso);
+ goto err_aso;
+ }
+
+ err = mlx5e_macsec_aso_reg_mr(mdev, aso);
+ if (err)
+ goto err_aso_reg;
+
+ mutex_init(&aso->aso_lock);
+
+ aso->maso = maso;
+
+ return 0;
+
+err_aso_reg:
+ mlx5_aso_destroy(maso);
+err_aso:
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+ return err;
+}
+
+static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
+{
+ if (!aso)
+ return;
+
+ mlx5e_macsec_aso_dereg_mr(mdev, aso);
+
+ mlx5_aso_destroy(aso->maso);
+
+ mlx5_core_dealloc_pd(mdev, aso->pdn);
+}
+
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
+{
+ mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
+}
+
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
+{
+ if (!macsec)
+ return NULL;
+
+ return &macsec->stats;
+}
+
+static const struct macsec_ops macsec_offload_ops = {
+ .mdo_add_txsa = mlx5e_macsec_add_txsa,
+ .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
+ .mdo_del_txsa = mlx5e_macsec_del_txsa,
+ .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
+ .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
+ .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
+ .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
+ .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
+ .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
+ .mdo_add_secy = mlx5e_macsec_add_secy,
+ .mdo_upd_secy = mlx5e_macsec_upd_secy,
+ .mdo_del_secy = mlx5e_macsec_del_secy,
+};
+
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ goto err_out;
+
+ return true;
+
+err_out:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ u32 fs_id;
+
+ fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
+ if (!fs_id)
+ return;
+
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
+ u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_macsec_rx_sc *rx_sc;
+ struct mlx5e_macsec *macsec;
+ u32 fs_id;
+
+ macsec = priv->macsec;
+ if (!macsec)
+ return;
+
+ fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data);
+
+ rcu_read_lock();
+ sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
+ rx_sc = sc_xarray_element->rx_sc;
+ if (rx_sc) {
+ dst_hold(&rx_sc->md_dst->dst);
+ skb_dst_set(skb, &rx_sc->md_dst->dst);
+ }
+
+ rcu_read_unlock();
+}
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return;
+
+ /* Enable MACsec */
+ mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
+ netdev->macsec_ops = &macsec_offload_ops;
+ netdev->features |= NETIF_F_HW_MACSEC;
+ netif_keep_dst(netdev);
+}
+
+int mlx5e_macsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_macsec *macsec = NULL;
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ if (!mlx5e_is_macsec_device(priv->mdev)) {
+ mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
+ return 0;
+ }
+
+ macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
+ if (!macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&macsec->macsec_device_list_head);
+ mutex_init(&macsec->lock);
+
+ err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
+ err);
+ goto err_hash;
+ }
+
+ err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
+ if (err) {
+ mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
+ goto err_aso;
+ }
+
+ macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
+ if (!macsec->wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
+
+ priv->macsec = macsec;
+
+ macsec->mdev = mdev;
+
+ macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
+ if (!macsec_fs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ macsec->macsec_fs = macsec_fs;
+
+ macsec->nb.notifier_call = macsec_obj_change_event;
+ mlx5_notifier_register(mdev, &macsec->nb);
+
+ mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
+
+ return 0;
+
+err_out:
+ destroy_workqueue(macsec->wq);
+err_wq:
+ mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
+err_aso:
+ rhashtable_destroy(&macsec->sci_hash);
+err_hash:
+ kfree(macsec);
+ priv->macsec = NULL;
+ return err;
+}
+
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_macsec *macsec = priv->macsec;
+ struct mlx5_core_dev *mdev = macsec->mdev;
+
+ if (!macsec)
+ return;
+
+ mlx5_notifier_unregister(mdev, &macsec->nb);
+
+ mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
+
+ /* Cleanup workqueue */
+ destroy_workqueue(macsec->wq);
+
+ mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
+
+ priv->macsec = NULL;
+
+ rhashtable_destroy(&macsec->sci_hash);
+
+ mutex_destroy(&macsec->lock);
+
+ kfree(macsec);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
new file mode 100644
index 000000000000..d580b4a91253
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACCEL_MACSEC_H__
+#define __MLX5_EN_ACCEL_MACSEC_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include <linux/mlx5/driver.h>
+#include <net/macsec.h>
+#include <net/dst_metadata.h>
+
+/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */
+#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
+#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0))
+
+struct mlx5e_priv;
+struct mlx5e_macsec;
+
+struct mlx5e_macsec_stats {
+ u64 macsec_rx_pkts;
+ u64 macsec_rx_bytes;
+ u64 macsec_rx_pkts_drop;
+ u64 macsec_rx_bytes_drop;
+ u64 macsec_tx_pkts;
+ u64 macsec_tx_bytes;
+ u64 macsec_tx_pkts_drop;
+ u64 macsec_tx_bytes_drop;
+};
+
+void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_macsec_init(struct mlx5e_priv *priv);
+void mlx5e_macsec_cleanup(struct mlx5e_priv *priv);
+bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb);
+void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ return md_dst && (md_dst->type == METADATA_MACSEC);
+}
+
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_MACSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev);
+void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats);
+struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec);
+
+#else
+
+static inline void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv) {}
+static inline int mlx5e_macsec_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) {}
+static inline bool mlx5e_macsec_skb_is_offload(struct sk_buff *skb) { return false; }
+static inline bool mlx5e_macsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{}
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
+#endif /* CONFIG_MLX5_EN_MACSEC */
+
+#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
new file mode 100644
index 000000000000..13dc628b988a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <net/macsec.h>
+#include <linux/netdevice.h>
+#include <linux/mlx5/qp.h>
+#include "fs_core.h"
+#include "en/fs.h"
+#include "en_accel/macsec_fs.h"
+#include "mlx5_core.h"
+
+/* MACsec TX flow steering */
+#define CRYPTO_NUM_MAXSEC_FTE BIT(15)
+#define CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE 1
+
+#define TX_CRYPTO_TABLE_LEVEL 0
+#define TX_CRYPTO_TABLE_NUM_GROUPS 3
+#define TX_CRYPTO_TABLE_MKE_GROUP_SIZE 1
+#define TX_CRYPTO_TABLE_SA_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - (TX_CRYPTO_TABLE_MKE_GROUP_SIZE + \
+ CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE))
+#define TX_CHECK_TABLE_LEVEL 1
+#define TX_CHECK_TABLE_NUM_FTE 2
+#define RX_CRYPTO_TABLE_LEVEL 0
+#define RX_CHECK_TABLE_LEVEL 1
+#define RX_CHECK_TABLE_NUM_FTE 3
+#define RX_CRYPTO_TABLE_NUM_GROUPS 3
+#define RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE \
+ ((CRYPTO_NUM_MAXSEC_FTE - CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE) / 2)
+#define RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE \
+ (CRYPTO_NUM_MAXSEC_FTE - RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE)
+#define RX_NUM_OF_RULES_PER_SA 2
+
+#define MLX5_MACSEC_TAG_LEN 8 /* SecTAG length with ethertype and without the optional SCI */
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK 0x23
+#define MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET 0x8
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET 0x5
+#define MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT (0x1 << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET)
+#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
+#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
+
+/* MACsec RX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
+
+struct mlx5_sectag_header {
+ __be16 ethertype;
+ u8 tci_an;
+ u8 sl;
+ u32 pn;
+ u8 sci[MACSEC_SCI_LEN]; /* optional */
+} __packed;
+
+struct mlx5e_macsec_tx_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u32 fs_id;
+};
+
+struct mlx5e_macsec_tables {
+ struct mlx5e_flow_table ft_crypto;
+ struct mlx5_flow_handle *crypto_miss_rule;
+
+ struct mlx5_flow_table *ft_check;
+ struct mlx5_flow_group *ft_check_group;
+ struct mlx5_fc *check_miss_rule_counter;
+ struct mlx5_flow_handle *check_miss_rule;
+ struct mlx5_fc *check_rule_counter;
+
+ u32 refcnt;
+};
+
+struct mlx5e_macsec_tx {
+ struct mlx5_flow_handle *crypto_mke_rule;
+ struct mlx5_flow_handle *check_rule;
+
+ struct ida tx_halloc;
+
+ struct mlx5e_macsec_tables tables;
+};
+
+struct mlx5e_macsec_rx_rule {
+ struct mlx5_flow_handle *rule[RX_NUM_OF_RULES_PER_SA];
+ struct mlx5_modify_hdr *meta_modhdr;
+};
+
+struct mlx5e_macsec_rx {
+ struct mlx5_flow_handle *check_rule[2];
+ struct mlx5_pkt_reformat *check_rule_pkt_reformat[2];
+
+ struct mlx5e_macsec_tables tables;
+};
+
+union mlx5e_macsec_rule {
+ struct mlx5e_macsec_tx_rule tx_rule;
+ struct mlx5e_macsec_rx_rule rx_rule;
+};
+
+struct mlx5e_macsec_fs {
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5e_macsec_rx *rx_fs;
+};
+
+static void macsec_fs_tx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ tx_tables = &tx_fs->tables;
+
+ /* Tx check table */
+ if (tx_fs->check_rule) {
+ mlx5_del_flow_rules(tx_fs->check_rule);
+ tx_fs->check_rule = NULL;
+ }
+
+ if (tx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->check_miss_rule);
+ tx_tables->check_miss_rule = NULL;
+ }
+
+ if (tx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(tx_tables->ft_check_group);
+ tx_tables->ft_check_group = NULL;
+ }
+
+ if (tx_tables->ft_check) {
+ mlx5_destroy_flow_table(tx_tables->ft_check);
+ tx_tables->ft_check = NULL;
+ }
+
+ /* Tx crypto table */
+ if (tx_fs->crypto_mke_rule) {
+ mlx5_del_flow_rules(tx_fs->crypto_mke_rule);
+ tx_fs->crypto_mke_rule = NULL;
+ }
+
+ if (tx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(tx_tables->crypto_miss_rule);
+ tx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&tx_tables->ft_crypto);
+}
+
+static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(TX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = kvzalloc(inlen, GFP_KERNEL);
+
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow Group for MKE match */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_MKE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for SA rules */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_SET(fte_match_param, mc, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += TX_CRYPTO_TABLE_SA_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static struct mlx5_flow_table
+ *macsec_fs_auto_group_table_create(struct mlx5_flow_namespace *ns, int flags,
+ int level, int max_fte)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *fdb = NULL;
+
+ /* reserve entry for the match all miss group and rule */
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.prio = 0;
+ ft_attr.flags = flags;
+ ft_attr.level = level;
+ ft_attr.max_fte = max_fte;
+
+ fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+
+ return fdb;
+}
+
+static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto out_spec;
+
+ tx_tables = &tx_fs->tables;
+ ft_crypto = &tx_tables->ft_crypto;
+
+ /* Tx crypto table */
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.level = TX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Tx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Tx crypto table groups */
+ err = macsec_fs_tx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ /* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
+ memset(&flow_act, 0, sizeof(flow_act));
+ memset(spec, 0, sizeof(*spec));
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_PAE);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX MKE rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->crypto_mke_rule = rule;
+
+ /* Tx crypto table Default miss rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Tx table default miss rule %d\n", err);
+ goto err;
+ }
+ tx_tables->crypto_miss_rule = rule;
+
+ /* Tx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns, 0, TX_CHECK_TABLE_LEVEL,
+ TX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec TX check table, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->ft_check = flow_table;
+
+ /* Tx check table Default miss group/rule */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(tx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+ tx_tables->ft_check_group = flow_group;
+
+ /* Tx check table default drop rule */
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec tx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ tx_tables->check_miss_rule = rule;
+
+ /* Tx check table rule */
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(tx_tables->ft_check, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec check rule, err=%d\n", err);
+ goto err;
+ }
+ tx_fs->check_rule = rule;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_tx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_tx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5e_macsec_tables *tx_tables;
+ int err = 0;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_tx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ tx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_tx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+
+ if (--tx_tables->refcnt)
+ return;
+
+ macsec_fs_tx_destroy(macsec_fs);
+}
+
+static int macsec_fs_tx_setup_fte(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ u32 macsec_obj_id,
+ u32 *fs_id)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ int err = 0;
+ u32 id;
+
+ err = ida_alloc_range(&tx_fs->tx_halloc, 1,
+ MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES,
+ GFP_KERNEL);
+ if (err < 0)
+ return err;
+
+ id = err;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Metadata match */
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_MACSEC | id << 2);
+
+ *fs_id = id;
+ flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ flow_act->crypto.obj_id = macsec_obj_id;
+
+ mlx5_core_dbg(macsec_fs->mdev, "Tx fte: macsec obj_id %u, fs_id %u\n", macsec_obj_id, id);
+ return 0;
+}
+
+static void macsec_fs_tx_create_sectag_header(const struct macsec_context *ctx,
+ char *reformatbf,
+ size_t *reformat_size)
+{
+ const struct macsec_secy *secy = ctx->secy;
+ bool sci_present = macsec_send_sci(secy);
+ struct mlx5_sectag_header sectag = {};
+ const struct macsec_tx_sc *tx_sc;
+
+ tx_sc = &secy->tx_sc;
+ sectag.ethertype = htons(ETH_P_MACSEC);
+
+ if (sci_present) {
+ sectag.tci_an |= MACSEC_TCI_SC;
+ memcpy(&sectag.sci, &secy->sci,
+ sizeof(sectag.sci));
+ } else {
+ if (tx_sc->end_station)
+ sectag.tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ sectag.tci_an |= MACSEC_TCI_SCB;
+ }
+
+ /* With GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ sectag.tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ sectag.tci_an |= MACSEC_TCI_C;
+
+ sectag.tci_an |= tx_sc->encoding_sa;
+
+ *reformat_size = MLX5_MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+
+ memcpy(reformatbf, &sectag, *reformat_size);
+}
+
+static void macsec_fs_tx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_tx_rule *tx_rule)
+{
+ if (tx_rule->rule) {
+ mlx5_del_flow_rules(tx_rule->rule);
+ tx_rule->rule = NULL;
+ }
+
+ if (tx_rule->pkt_reformat) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev, tx_rule->pkt_reformat);
+ tx_rule->pkt_reformat = NULL;
+ }
+
+ if (tx_rule->fs_id) {
+ ida_free(&macsec_fs->tx_fs->tx_halloc, tx_rule->fs_id);
+ tx_rule->fs_id = 0;
+ }
+
+ kfree(tx_rule);
+
+ macsec_fs_tx_ft_put(macsec_fs);
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_tx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ char reformatbf[MLX5_MACSEC_TAG_LEN + MACSEC_SCI_LEN];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx_rule *tx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ size_t reformat_size;
+ int err = 0;
+ u32 fs_id;
+
+ tx_tables = &tx_fs->tables;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_tx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_tx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ tx_rule = &macsec_rule->tx_rule;
+
+ /* Tx crypto table crypto rule */
+ macsec_fs_tx_create_sectag_header(macsec_ctx, reformatbf, &reformat_size);
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_ADD_MACSEC;
+ reformat_params.size = reformat_size;
+ reformat_params.data = reformatbf;
+ flow_act.pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
+ if (IS_ERR(flow_act.pkt_reformat)) {
+ err = PTR_ERR(flow_act.pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Tx reformat context err=%d\n", err);
+ goto err;
+ }
+ tx_rule->pkt_reformat = flow_act.pkt_reformat;
+
+ err = macsec_fs_tx_setup_fte(macsec_fs, spec, &flow_act, attrs->macsec_obj_id, &fs_id);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to add packet reformat for MACsec TX crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+
+ tx_rule->fs_id = fs_id;
+ *sa_fs_id = fs_id;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx_tables->ft_check;
+ rule = mlx5_add_flow_rules(tx_tables->ft_crypto.t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec TX crypto rule, err=%d\n", err);
+ goto err;
+ }
+ tx_rule->rule = rule;
+
+ goto out_spec;
+
+err:
+ macsec_fs_tx_del_rule(macsec_fs, tx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+
+ return macsec_rule;
+}
+
+static void macsec_fs_tx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tx *tx_fs = macsec_fs->tx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+
+ if (!tx_fs)
+ return;
+
+ tx_tables = &tx_fs->tables;
+ if (tx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload tx_fs, refcnt(%u) isn't 0\n",
+ tx_tables->refcnt);
+ return;
+ }
+
+ ida_destroy(&tx_fs->tx_halloc);
+
+ if (tx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_miss_rule_counter);
+ tx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (tx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+}
+
+static int macsec_fs_tx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *tx_tables;
+ struct mlx5e_macsec_tx *tx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ tx_tables = &tx_fs->tables;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+ tx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Tx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ tx_tables->check_miss_rule_counter = flow_counter;
+
+ ida_init(&tx_fs->tx_halloc);
+
+ macsec_fs->tx_fs = tx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, tx_tables->check_rule_counter);
+ tx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(tx_fs);
+ macsec_fs->tx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_destroy(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5e_macsec_tables *rx_tables;
+ int i;
+
+ /* Rx check table */
+ for (i = 1; i >= 0; --i) {
+ if (rx_fs->check_rule[i]) {
+ mlx5_del_flow_rules(rx_fs->check_rule[i]);
+ rx_fs->check_rule[i] = NULL;
+ }
+
+ if (rx_fs->check_rule_pkt_reformat[i]) {
+ mlx5_packet_reformat_dealloc(macsec_fs->mdev,
+ rx_fs->check_rule_pkt_reformat[i]);
+ rx_fs->check_rule_pkt_reformat[i] = NULL;
+ }
+ }
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->check_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->check_miss_rule);
+ rx_tables->check_miss_rule = NULL;
+ }
+
+ if (rx_tables->ft_check_group) {
+ mlx5_destroy_flow_group(rx_tables->ft_check_group);
+ rx_tables->ft_check_group = NULL;
+ }
+
+ if (rx_tables->ft_check) {
+ mlx5_destroy_flow_table(rx_tables->ft_check);
+ rx_tables->ft_check = NULL;
+ }
+
+ /* Rx crypto table */
+ if (rx_tables->crypto_miss_rule) {
+ mlx5_del_flow_rules(rx_tables->crypto_miss_rule);
+ rx_tables->crypto_miss_rule = NULL;
+ }
+
+ mlx5e_destroy_flow_table(&rx_tables->ft_crypto);
+}
+
+static int macsec_fs_rx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int mclen = MLX5_ST_SZ_BYTES(fte_match_param);
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(RX_CRYPTO_TABLE_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ /* Flow group for SA rule with SCI */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_2);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters_5.macsec_tag_3);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITH_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow group for SA rule without SCI */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS_5);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+ MLX5_SET(fte_match_param, mc, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += RX_CRYPTO_TABLE_SA_RULE_WITHOUT_SCI_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
+ memset(mc, 0, mclen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += CRYPTO_TABLE_DEFAULT_RULE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
+
+ return err;
+}
+
+static int macsec_fs_rx_create_check_decap_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_spec *spec,
+ int reformat_param_size)
+{
+ int rule_index = (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI) ? 0 : 1;
+ u8 mlx5_reformat_buf[MLX5_SECTAG_HEADER_SIZE_WITH_SCI];
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ rx_tables = &rx_fs->tables;
+
+ /* Rx check table decap 16B rule */
+ memset(dest, 0, sizeof(*dest));
+ memset(flow_act, 0, sizeof(*flow_act));
+ memset(spec, 0, sizeof(*spec));
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_MACSEC;
+ reformat_params.size = reformat_param_size;
+ reformat_params.data = mlx5_reformat_buf;
+ flow_act->pkt_reformat = mlx5_packet_reformat_alloc(macsec_fs->mdev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (IS_ERR(flow_act->pkt_reformat)) {
+ err = PTR_ERR(flow_act->pkt_reformat);
+ netdev_err(netdev, "Failed to allocate MACsec Rx reformat context err=%d\n", err);
+ return err;
+ }
+ rx_fs->check_rule_pkt_reformat[rule_index] = flow_act->pkt_reformat;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ /* MACsec syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.macsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.macsec_syndrome, 0);
+ /* ASO return reg syndrome match */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 0);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+ /* Sectag TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (reformat_param_size == MLX5_SECTAG_HEADER_SIZE_WITH_SCI)
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_SC_FIELD_BIT <<
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ flow_act->flags = FLOW_ACT_NO_APPEND;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest->counter_id = mlx5_fc_id(rx_tables->check_rule_counter);
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, spec, flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to add MACsec Rx check rule, err=%d\n", err);
+ return err;
+ }
+
+ rx_fs->check_rule[rule_index] = rule;
+
+ return 0;
+}
+
+static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_flow_group *flow_group;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
+ if (!ns)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ goto free_spec;
+
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Rx crypto table */
+ ft_attr.level = RX_CRYPTO_TABLE_LEVEL;
+ ft_attr.max_fte = CRYPTO_NUM_MAXSEC_FTE;
+
+ flow_table = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "Failed to create MACsec Rx crypto table err(%d)\n", err);
+ goto out_flow_group;
+ }
+ ft_crypto->t = flow_table;
+
+ /* Rx crypto table groups */
+ err = macsec_fs_rx_create_crypto_table_groups(ft_crypto);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Tx crypto table err(%d)\n",
+ err);
+ goto err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ rule = mlx5_add_flow_rules(ft_crypto->t, NULL, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add MACsec Rx crypto table default miss rule %d\n",
+ err);
+ goto err;
+ }
+ rx_tables->crypto_miss_rule = rule;
+
+ /* Rx check table */
+ flow_table = macsec_fs_auto_group_table_create(ns,
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT,
+ RX_CHECK_TABLE_LEVEL,
+ RX_CHECK_TABLE_NUM_FTE);
+ if (IS_ERR(flow_table)) {
+ err = PTR_ERR(flow_table);
+ netdev_err(netdev, "fail to create MACsec RX check table, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->ft_check = flow_table;
+
+ /* Rx check table Default miss group/rule */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_table->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_table->max_fte - 1);
+ flow_group = mlx5_create_flow_group(rx_tables->ft_check, flow_group_in);
+ if (IS_ERR(flow_group)) {
+ err = PTR_ERR(flow_group);
+ netdev_err(netdev,
+ "Failed to create default flow group for MACsec Rx check table err(%d)\n",
+ err);
+ goto err;
+ }
+ rx_tables->ft_check_group = flow_group;
+
+ /* Rx check table default drop rule */
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(rx_tables->check_miss_rule_counter);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx_tables->ft_check, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev, "Failed to added MACsec Rx check drop rule, err(%d)\n", err);
+ goto err;
+ }
+ rx_tables->check_miss_rule = rule;
+
+ /* Rx check table decap rules */
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITH_SCI);
+ if (err)
+ goto err;
+
+ err = macsec_fs_rx_create_check_decap_rule(macsec_fs, &dest, &flow_act, spec,
+ MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI);
+ if (err)
+ goto err;
+
+ goto out_flow_group;
+
+err:
+ macsec_fs_rx_destroy(macsec_fs);
+out_flow_group:
+ kvfree(flow_group_in);
+free_spec:
+ kvfree(spec);
+ return err;
+}
+
+static int macsec_fs_rx_ft_get(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ int err = 0;
+
+ if (rx_tables->refcnt)
+ goto out;
+
+ err = macsec_fs_rx_create(macsec_fs);
+ if (err)
+ return err;
+
+out:
+ rx_tables->refcnt++;
+ return err;
+}
+
+static void macsec_fs_rx_ft_put(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+
+ if (--rx_tables->refcnt)
+ return;
+
+ macsec_fs_rx_destroy(macsec_fs);
+}
+
+static void macsec_fs_rx_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ struct mlx5e_macsec_rx_rule *rx_rule)
+{
+ int i;
+
+ for (i = 0; i < RX_NUM_OF_RULES_PER_SA; ++i) {
+ if (rx_rule->rule[i]) {
+ mlx5_del_flow_rules(rx_rule->rule[i]);
+ rx_rule->rule[i] = NULL;
+ }
+ }
+
+ if (rx_rule->meta_modhdr) {
+ mlx5_modify_header_dealloc(macsec_fs->mdev, rx_rule->meta_modhdr);
+ rx_rule->meta_modhdr = NULL;
+ }
+
+ kfree(rx_rule);
+
+ macsec_fs_rx_ft_put(macsec_fs);
+}
+
+static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_macsec_rule_attrs *attrs,
+ bool sci_present)
+{
+ u8 tci_an = (sci_present << MLX5_MACSEC_SECTAG_TCI_SC_FIELD_OFFSET) | attrs->assoc_num;
+ struct mlx5_flow_act_crypto_params *crypto_params = &flow_act->crypto;
+ __be32 *sci_p = (__be32 *)(&attrs->sci);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ /* MACsec ethertype */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ETH_P_MACSEC);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5;
+
+ /* Sectag AN + TCI SC present bit*/
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_5.macsec_tag_0,
+ MLX5_MACSEC_SECTAG_TCI_AN_FIELD_BITMASK << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_0,
+ tci_an << MLX5_MACSEC_SECTAG_TCI_AN_FIELD_OFFSET);
+
+ if (sci_present) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_2);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_2,
+ be32_to_cpu(sci_p[0]));
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_5.macsec_tag_3);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_5.macsec_tag_3,
+ be32_to_cpu(sci_p[1]));
+ } else {
+ /* When SCI isn't present in the Sectag, need to match the source */
+ /* MAC address only if the SCI contains the default MACsec PORT */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16),
+ sci_p, ETH_ALEN);
+ }
+
+ crypto_params->type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
+ crypto_params->obj_id = attrs->macsec_obj_id;
+}
+
+static union mlx5e_macsec_rule *
+macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 fs_id)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct net_device *netdev = macsec_fs->netdev;
+ union mlx5e_macsec_rule *macsec_rule = NULL;
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx_rule *rx_rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_flow_table *ft_crypto;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return NULL;
+
+ err = macsec_fs_rx_ft_get(macsec_fs);
+ if (err)
+ goto out_spec;
+
+ macsec_rule = kzalloc(sizeof(*macsec_rule), GFP_KERNEL);
+ if (!macsec_rule) {
+ macsec_fs_rx_ft_put(macsec_fs);
+ goto out_spec;
+ }
+
+ rx_rule = &macsec_rule->rx_rule;
+ rx_tables = &rx_fs->tables;
+ ft_crypto = &rx_tables->ft_crypto;
+
+ /* Set bit[31 - 30] macsec marker - 0x01 */
+ /* Set bit[3-0] fs id */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, fs_id | BIT(30));
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ netdev_err(netdev, "fail to alloc MACsec set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto err;
+ }
+ rx_rule->meta_modhdr = modify_hdr;
+
+ /* Rx crypto table with SCI rule */
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, true);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA with SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[0] = rule;
+
+ /* Rx crypto table without SCI rule */
+ if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) {
+ memset(spec, 0, sizeof(struct mlx5_flow_spec));
+ memset(&dest, 0, sizeof(struct mlx5_flow_destination));
+ memset(&flow_act, 0, sizeof(flow_act));
+
+ macsec_fs_rx_setup_fte(spec, &flow_act, attrs, false);
+
+ flow_act.modify_hdr = modify_hdr;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = rx_tables->ft_check;
+ rule = mlx5_add_flow_rules(ft_crypto->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(netdev,
+ "Failed to add SA without SCI rule to Rx crypto rule, err=%d\n",
+ err);
+ goto err;
+ }
+ rx_rule->rule[1] = rule;
+ }
+
+ return macsec_rule;
+
+err:
+ macsec_fs_rx_del_rule(macsec_fs, rx_rule);
+ macsec_rule = NULL;
+out_spec:
+ kvfree(spec);
+ return macsec_rule;
+}
+
+static int macsec_fs_rx_init(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct net_device *netdev = macsec_fs->netdev;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+ struct mlx5e_macsec_rx *rx_fs;
+ struct mlx5_fc *flow_counter;
+ int err;
+
+ rx_fs = kzalloc(sizeof(*rx_fs), GFP_KERNEL);
+ if (!rx_fs)
+ return -ENOMEM;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx encrypt flow counter, err(%d)\n",
+ err);
+ goto err_encrypt_counter;
+ }
+
+ rx_tables = &rx_fs->tables;
+ rx_tables->check_rule_counter = flow_counter;
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ netdev_err(netdev,
+ "Failed to create MACsec Rx drop flow counter, err(%d)\n",
+ err);
+ goto err_drop_counter;
+ }
+ rx_tables->check_miss_rule_counter = flow_counter;
+
+ macsec_fs->rx_fs = rx_fs;
+
+ return 0;
+
+err_drop_counter:
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+
+err_encrypt_counter:
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+
+ return err;
+}
+
+static void macsec_fs_rx_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ struct mlx5e_macsec_rx *rx_fs = macsec_fs->rx_fs;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+ struct mlx5e_macsec_tables *rx_tables;
+
+ if (!rx_fs)
+ return;
+
+ rx_tables = &rx_fs->tables;
+
+ if (rx_tables->refcnt) {
+ netdev_err(macsec_fs->netdev,
+ "Can't destroy MACsec offload rx_fs, refcnt(%u) isn't 0\n",
+ rx_tables->refcnt);
+ return;
+ }
+
+ if (rx_tables->check_miss_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_miss_rule_counter);
+ rx_tables->check_miss_rule_counter = NULL;
+ }
+
+ if (rx_tables->check_rule_counter) {
+ mlx5_fc_destroy(mdev, rx_tables->check_rule_counter);
+ rx_tables->check_rule_counter = NULL;
+ }
+
+ kfree(rx_fs);
+ macsec_fs->rx_fs = NULL;
+}
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats)
+{
+ struct mlx5e_macsec_stats *stats = (struct mlx5e_macsec_stats *)macsec_stats;
+ struct mlx5e_macsec_tables *tx_tables = &macsec_fs->tx_fs->tables;
+ struct mlx5e_macsec_tables *rx_tables = &macsec_fs->rx_fs->tables;
+ struct mlx5_core_dev *mdev = macsec_fs->mdev;
+
+ if (tx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_rule_counter,
+ &stats->macsec_tx_pkts, &stats->macsec_tx_bytes);
+
+ if (tx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, tx_tables->check_miss_rule_counter,
+ &stats->macsec_tx_pkts_drop, &stats->macsec_tx_bytes_drop);
+
+ if (rx_tables->check_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_rule_counter,
+ &stats->macsec_rx_pkts, &stats->macsec_rx_bytes);
+
+ if (rx_tables->check_miss_rule_counter)
+ mlx5_fc_query(mdev, rx_tables->check_miss_rule_counter,
+ &stats->macsec_rx_pkts_drop, &stats->macsec_rx_bytes_drop);
+}
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *macsec_ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id)
+{
+ return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
+ macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
+}
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action)
+{
+ (action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
+ macsec_fs_tx_del_rule(macsec_fs, &macsec_rule->tx_rule) :
+ macsec_fs_rx_del_rule(macsec_fs, &macsec_rule->rx_rule);
+}
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs)
+{
+ macsec_fs_rx_cleanup(macsec_fs);
+ macsec_fs_tx_cleanup(macsec_fs);
+ kfree(macsec_fs);
+}
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_macsec_fs *macsec_fs;
+ int err;
+
+ macsec_fs = kzalloc(sizeof(*macsec_fs), GFP_KERNEL);
+ if (!macsec_fs)
+ return NULL;
+
+ macsec_fs->mdev = mdev;
+ macsec_fs->netdev = netdev;
+
+ err = macsec_fs_tx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto err;
+ }
+
+ err = macsec_fs_rx_init(macsec_fs);
+ if (err) {
+ netdev_err(netdev, "MACsec offload: Failed to init tx_fs, err=%d\n", err);
+ goto tx_cleanup;
+ }
+
+ return macsec_fs;
+
+tx_cleanup:
+ macsec_fs_tx_cleanup(macsec_fs);
+err:
+ kfree(macsec_fs);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
new file mode 100644
index 000000000000..b429648d4ee7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_MACSEC_STEERING_H__
+#define __MLX5_MACSEC_STEERING_H__
+
+#ifdef CONFIG_MLX5_EN_MACSEC
+
+#include "en_accel/macsec.h"
+
+#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
+
+struct mlx5e_macsec_fs;
+union mlx5e_macsec_rule;
+
+struct mlx5_macsec_rule_attrs {
+ sci_t sci;
+ u32 macsec_obj_id;
+ u8 assoc_num;
+ int action;
+};
+
+enum mlx5_macsec_action {
+ MLX5_ACCEL_MACSEC_ACTION_ENCRYPT,
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT,
+};
+
+void mlx5e_macsec_fs_cleanup(struct mlx5e_macsec_fs *macsec_fs);
+
+struct mlx5e_macsec_fs *
+mlx5e_macsec_fs_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
+
+union mlx5e_macsec_rule *
+mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
+ const struct macsec_context *ctx,
+ struct mlx5_macsec_rule_attrs *attrs,
+ u32 *sa_fs_id);
+
+void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
+ union mlx5e_macsec_rule *macsec_rule,
+ int action);
+
+void mlx5e_macsec_fs_get_stats_fill(struct mlx5e_macsec_fs *macsec_fs, void *macsec_stats);
+
+#endif
+
+#endif /* __MLX5_MACSEC_STEERING_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
new file mode 100644
index 000000000000..e50a2e3f3d18
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/ethtool.h>
+#include <net/sock.h>
+
+#include "en.h"
+#include "en_accel/macsec.h"
+
+static const struct counter_desc mlx5e_macsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_rx_bytes_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_pkts_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_macsec_stats, macsec_tx_bytes_drop) },
+};
+
+#define NUM_MACSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_macsec_hw_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(macsec_hw)
+{
+ if (!priv->macsec)
+ return 0;
+
+ if (mlx5e_is_macsec_device(priv->mdev))
+ return NUM_MACSEC_HW_COUNTERS;
+
+ return 0;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(macsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
+{
+ unsigned int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_macsec_hw_stats_desc[i].format);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
+{
+ int i;
+
+ if (!priv->macsec)
+ return idx;
+
+ if (!mlx5e_is_macsec_device(priv->mdev))
+ return idx;
+
+ mlx5e_macsec_get_stats_fill(priv->macsec, mlx5e_macsec_get_stats(priv->macsec));
+ for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5e_macsec_get_stats(priv->macsec),
+ mlx5e_macsec_hw_stats_desc,
+ i);
+
+ return idx;
+}
+
+MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index cd7f245dcf14..0ae1865086ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -114,47 +114,49 @@ static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
}
}
-static int arfs_disable(struct mlx5e_priv *priv)
+static int arfs_disable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
int err, i;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
- err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
+ err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] default destination failed, err(%d)\n",
- __func__, arfs_get_tt(i), err);
+ fs_err(fs,
+ "%s: modify ttc[%d] default destination failed, err(%d)\n",
+ __func__, arfs_get_tt(i), err);
return err;
}
}
return 0;
}
-static void arfs_del_rules(struct mlx5e_priv *priv);
+static void arfs_del_rules(struct mlx5e_flow_steering *fs);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
- arfs_del_rules(priv);
+ arfs_del_rules(fs);
- return arfs_disable(priv);
+ return arfs_disable(fs);
}
-int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5_flow_destination dest = {};
int err, i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
+ dest.ft = arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
- err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
+ err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
if (err) {
- netdev_err(priv->netdev,
- "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
- __func__, arfs_get_tt(i), err);
- arfs_disable(priv);
+ fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
+ __func__, arfs_get_tt(i), err);
+ arfs_disable(fs);
return err;
}
}
@@ -167,31 +169,37 @@ static void arfs_destroy_table(struct arfs_table *arfs_t)
mlx5e_destroy_flow_table(&arfs_t->ft);
}
-static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
+static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
int i;
- arfs_del_rules(priv);
- destroy_workqueue(priv->fs->arfs->wq);
+ arfs_del_rules(fs);
+ destroy_workqueue(arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
- arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
+ if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
+ arfs_destroy_table(&arfs->arfs_tables[i]);
}
}
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
{
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+
+ if (!ntuple)
return;
- _mlx5e_cleanup_tables(priv);
- kvfree(priv->fs->arfs);
+ _mlx5e_cleanup_tables(fs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
}
-static int arfs_add_default_rule(struct mlx5e_priv *priv,
+static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
+ struct arfs_table *arfs_t = &arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
@@ -200,23 +208,21 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
tt = arfs_get_tt(type);
if (tt == -EINVAL) {
- netdev_err(priv->netdev, "%s: bad arfs_type: %d\n",
- __func__, type);
+ fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
return -EINVAL;
}
/* FIXME: Must use mlx5_ttc_get_default_dest(),
* but can't since TTC default is not setup yet !
*/
- dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
&flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
- netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
- __func__, type);
+ fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
}
return err;
@@ -318,10 +324,12 @@ out:
return err;
}
-static int arfs_create_table(struct mlx5e_priv *priv,
+static int arfs_create_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
enum arfs_type type)
{
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -332,7 +340,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -343,7 +351,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
if (err)
goto err;
- err = arfs_add_default_rule(priv, type);
+ err = arfs_add_default_rule(fs, rx_res, type);
if (err)
goto err;
@@ -353,35 +361,40 @@ err:
return err;
}
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
{
+ struct mlx5e_arfs_tables *arfs;
int err = -ENOMEM;
int i;
- if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ if (!ntuple)
return 0;
- priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
- if (!priv->fs->arfs)
+ arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
+ if (!arfs)
return -ENOMEM;
- spin_lock_init(&priv->fs->arfs->arfs_lock);
- INIT_LIST_HEAD(&priv->fs->arfs->rules);
- priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
- if (!priv->fs->arfs->wq)
+ spin_lock_init(&arfs->arfs_lock);
+ INIT_LIST_HEAD(&arfs->rules);
+ arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!arfs->wq)
goto err;
+ mlx5e_fs_set_arfs(fs, arfs);
+
for (i = 0; i < ARFS_NUM_TYPES; i++) {
- err = arfs_create_table(priv, i);
+ err = arfs_create_table(fs, rx_res, i);
if (err)
goto err_des;
}
return 0;
err_des:
- _mlx5e_cleanup_tables(priv);
+ _mlx5e_cleanup_tables(fs);
err:
- kvfree(priv->fs->arfs);
+ mlx5e_fs_set_arfs(fs, NULL);
+ kvfree(arfs);
return err;
}
@@ -389,6 +402,7 @@ err:
static void arfs_may_expire_flow(struct mlx5e_priv *priv)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *arfs_rule;
struct hlist_node *htmp;
HLIST_HEAD(del_list);
@@ -396,8 +410,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
@@ -408,7 +422,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
@@ -417,20 +431,21 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
}
}
-static void arfs_del_rules(struct mlx5e_priv *priv)
+static void arfs_del_rules(struct mlx5e_flow_steering *fs)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
struct hlist_node *htmp;
struct arfs_rule *rule;
HLIST_HEAD(del_list);
int i;
int j;
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
- mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
+ spin_lock_bh(&arfs->arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
@@ -474,7 +489,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
@@ -588,13 +603,15 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule,
arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5e_arfs_tables *arfs;
struct mlx5_flow_handle *rule;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&priv->fs->arfs->arfs_lock);
+ spin_lock_bh(&arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&priv->fs->arfs->arfs_lock);
+ spin_unlock_bh(&arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
@@ -620,6 +637,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
+ struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
struct arfs_rule *rule;
struct arfs_tuple *tuple;
@@ -647,7 +665,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
- rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
+ rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
@@ -691,11 +709,12 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
- struct arfs_table *arfs_t;
+ struct mlx5e_arfs_tables *arfs;
struct arfs_rule *arfs_rule;
+ struct arfs_table *arfs_t;
struct flow_keys fk;
+ arfs = mlx5e_fs_get_arfs(priv->fs);
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
@@ -725,7 +744,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
- queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
+ queue_work(arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index c0f409c195bf..68f19324db93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -46,8 +46,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
-static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- u32 *mkey)
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index b811207fe5ed..24aa25da482b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -38,18 +38,19 @@
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
+#include "en/fs_ethtool.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
- strlcpy(drvinfo->bus_info, dev_name(mdev->device),
+ strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
@@ -310,7 +311,15 @@ void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ /* Limitation for regular RQ. XSK RQ may clamp the queue length in
+ * mlx5e_mpwqe_get_log_rq_size.
+ */
+ u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev,
+ PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED);
+
+ param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
+ max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
@@ -494,14 +503,14 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
- mlx5e_arfs_disable(priv);
+ mlx5e_arfs_disable(priv->fs);
/* Switch to new channels, set new parameters and close old ones */
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
- int err2 = mlx5e_arfs_enable(priv);
+ int err2 = mlx5e_arfs_enable(priv->fs);
if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
@@ -1996,10 +2005,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
struct mlx5e_params new_params;
if (enable) {
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
- return -EOPNOTSUPP;
- if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
- return -EINVAL;
+ /* Checking the regular RQ here; mlx5e_validate_xsk_param called
+ * from mlx5e_open_xsk will check for each XSK queue, and
+ * mlx5e_safe_switch_params will be reverted if any check fails.
+ */
+ int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
+
+ if (err)
+ return err;
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e0ce5a233d0b..1892ccb889b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -36,10 +36,38 @@
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
-#include "en.h"
#include "en_tc.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_flow_steering {
+ struct work_struct set_rx_mode_work;
+ bool state_destroy;
+ bool vlan_strip_disable;
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *egress_ns;
+#ifdef CONFIG_MLX5_EN_RXNFC
+ struct mlx5e_ethtool_steering *ethtool;
+#endif
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_promisc_table promisc;
+ struct mlx5e_vlan_table *vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_ttc_table *inner_ttc;
+#ifdef CONFIG_MLX5_EN_ARFS
+ struct mlx5e_arfs_tables *arfs;
+#endif
+#ifdef CONFIG_MLX5_EN_TLS
+ struct mlx5e_accel_fs_tcp *accel_tcp;
+#endif
+ struct mlx5e_fs_udp *udp;
+ struct mlx5e_fs_any *any;
+ struct mlx5e_ptp_fs *ptp_fs;
+};
static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_l2_rule *ai, int type);
@@ -148,9 +176,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
if (list_size > max_list_size) {
- mlx5_core_warn(fs->mdev,
- "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
- list_size, max_list_size);
+ fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
list_size = max_list_size;
}
@@ -167,8 +194,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
if (err)
- mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n",
- err);
+ fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
+ err);
kvfree(vlans);
return err;
@@ -249,7 +276,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__);
+ fs_err(fs, "%s: add rule failed\n", __func__);
}
return err;
@@ -351,78 +378,78 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
return rule;
}
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs->vlan->ft.t;
+ struct mlx5_flow_table *ft = fs->vlan->ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs->vlan->trap_rule = NULL;
- mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ fs->vlan->trap_rule = NULL;
+ fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs->vlan->trap_rule = rule;
+ fs->vlan->trap_rule = rule;
return 0;
}
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs->vlan->trap_rule) {
- mlx5_del_flow_rules(priv->fs->vlan->trap_rule);
- priv->fs->vlan->trap_rule = NULL;
+ if (fs->vlan->trap_rule) {
+ mlx5_del_flow_rules(fs->vlan->trap_rule);
+ fs->vlan->trap_rule = NULL;
}
}
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
{
- struct mlx5_flow_table *ft = priv->fs->l2.ft.t;
+ struct mlx5_flow_table *ft = fs->l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- priv->fs->l2.trap_rule = NULL;
- mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ fs->l2.trap_rule = NULL;
+ fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
return err;
}
- priv->fs->l2.trap_rule = rule;
+ fs->l2.trap_rule = rule;
return 0;
}
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
{
- if (priv->fs->l2.trap_rule) {
- mlx5_del_flow_rules(priv->fs->l2.trap_rule);
- priv->fs->l2.trap_rule = NULL;
+ if (fs->l2.trap_rule) {
+ mlx5_del_flow_rules(fs->l2.trap_rule);
+ fs->l2.trap_rule = NULL;
}
}
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (!priv->fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan->cvlan_filter_disabled)
return;
- priv->fs->vlan->cvlan_filter_disabled = false;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = false;
+ if (promisc)
return;
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (priv->fs->vlan->cvlan_filter_disabled)
+ if (fs->vlan->cvlan_filter_disabled)
return;
- priv->fs->vlan->cvlan_filter_disabled = true;
- if (priv->netdev->flags & IFF_PROMISC)
+ fs->vlan->cvlan_filter_disabled = true;
+ if (promisc)
return;
- mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
@@ -462,7 +489,7 @@ int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
{
if (!fs->vlan) {
- mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
@@ -479,7 +506,7 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
__be16 proto, u16 vid)
{
if (!fs->vlan) {
- mlx5_core_err(fs->mdev, "Vlan doesn't exist\n");
+ fs_err(fs, "Vlan doesn't exist\n");
return -EINVAL;
}
@@ -512,28 +539,28 @@ static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
mlx5e_fs_add_any_vid_rules(fs);
}
-static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
{
int i;
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) {
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID)
- mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+ for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
+ mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
- WARN_ON_ONCE(priv->fs->state_destroy);
+ WARN_ON_ONCE(fs->state_destroy);
- mlx5e_remove_vlan_trap(priv);
+ mlx5e_remove_vlan_trap(fs);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
- if (priv->fs->vlan->cvlan_filter_disabled)
- mlx5e_fs_del_any_vid_rules(priv->fs);
+ if (fs->vlan->cvlan_filter_disabled)
+ mlx5e_fs_del_any_vid_rules(fs);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
@@ -568,8 +595,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
}
if (l2_err)
- mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
+ fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
+ action == MLX5E_ACTION_ADD ? "add" : "del",
+ mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
@@ -640,9 +668,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
size++;
if (size > max_size) {
- mlx5_core_warn(fs->mdev,
- "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
- is_uc ? "UC" : "MC", size, max_size);
+ fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
size = max_size;
}
@@ -658,9 +685,8 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
out:
if (err)
- mlx5_core_err(fs->mdev,
- "Failed to modify vport %s list err(%d)\n",
- is_uc ? "UC" : "MC", err);
+ fs_err(fs, "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
kfree(addr_array);
}
@@ -730,7 +756,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__);
+ fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
@@ -750,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
- mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err);
+ fs_err(fs, "fail to create promisc table err=%d\n", err);
return err;
}
@@ -807,8 +833,8 @@ void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
if (err)
enable_promisc = false;
if (!fs->vlan_strip_disable && !err)
- mlx5_core_warn_once(fs->mdev,
- "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
+ fs_warn_once(fs,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
@@ -856,14 +882,15 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
ft->t = NULL;
}
-static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
+static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -872,13 +899,14 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res,
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss_inner(rx_res,
tt);
}
}
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
struct ttc_params *ttc_params, bool tunnel)
{
@@ -886,7 +914,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev,
+ ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -895,19 +923,19 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) :
- mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt);
+ mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
+ mlx5e_rx_res_get_tirn_rss(rx_res, tt);
}
ttc_params->inner_ttc = tunnel;
- if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
ttc_params->tunnel_dests[tt].type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
ttc_params->tunnel_dests[tt].ft =
- mlx5_get_ttc_flow_table(priv->fs->inner_ttc);
+ mlx5_get_ttc_flow_table(fs->inner_ttc);
}
}
@@ -959,8 +987,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n",
- __func__, mv_dmac);
+ fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
@@ -1044,14 +1071,14 @@ err_destroy_groups:
return err;
}
-static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_destroy_flow_table(&priv->fs->l2.ft);
+ mlx5e_destroy_flow_table(&fs->l2.ft);
}
-static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
{
- struct mlx5e_l2_table *l2_table = &priv->fs->l2;
+ struct mlx5e_l2_table *l2_table = &fs->l2;
struct mlx5e_flow_table *ft = &l2_table->ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
@@ -1062,7 +1089,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_L2_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
- ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
+ ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -1221,126 +1248,128 @@ err_destroy_vlan_table:
return err;
}
-static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
{
- mlx5e_del_vlan_rules(priv);
- mlx5e_destroy_flow_table(&priv->fs->vlan->ft);
+ mlx5e_del_vlan_rules(fs);
+ mlx5e_destroy_flow_table(&fs->vlan->ft);
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
{
- if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return;
- mlx5_destroy_ttc_table(priv->fs->inner_ttc);
+ mlx5_destroy_ttc_table(fs->inner_ttc);
}
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
{
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(fs->ttc);
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev))
+ if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
return 0;
- mlx5e_set_inner_ttc_params(priv, &ttc_params);
- priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev,
- &ttc_params);
- if (IS_ERR(priv->fs->inner_ttc))
- return PTR_ERR(priv->fs->inner_ttc);
+ mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
+ fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
+ &ttc_params);
+ if (IS_ERR(fs->inner_ttc))
+ return PTR_ERR(fs->inner_ttc);
return 0;
}
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res)
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(priv, &ttc_params, true);
- priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params);
- if (IS_ERR(priv->fs->ttc))
- return PTR_ERR(priv->fs->ttc);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
+ if (IS_ERR(fs->ttc))
+ return PTR_ERR(fs->ttc);
return 0;
}
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev)
{
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
-
- if (!priv->fs->ns)
+ if (!ns)
return -EOPNOTSUPP;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(fs, ns, false);
+ err = mlx5e_arfs_create_tables(fs, rx_res,
+ !!(netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n",
- err);
- priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
+ netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_inner_ttc_table(priv);
+ err = mlx5e_create_inner_ttc_table(fs, rx_res);
if (err) {
- mlx5_core_err(priv->fs->mdev,
- "Failed to create inner ttc table, err=%d\n", err);
+ fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
goto err_destroy_arfs_tables;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(fs, rx_res);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create ttc table, err=%d\n", err);
goto err_destroy_inner_ttc_table;
}
- err = mlx5e_create_l2_table(priv);
+ err = mlx5e_create_l2_table(fs);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create l2 table, err=%d\n", err);
goto err_destroy_ttc_table;
}
- err = mlx5e_fs_create_vlan_table(priv->fs);
+ err = mlx5e_fs_create_vlan_table(fs);
if (err) {
- mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n",
- err);
+ fs_err(fs, "Failed to create vlan table, err=%d\n", err);
goto err_destroy_l2_table;
}
- err = mlx5e_ptp_alloc_rx_fs(priv);
+ err = mlx5e_ptp_alloc_rx_fs(fs, profile);
if (err)
goto err_destory_vlan_table;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(fs);
return 0;
err_destory_vlan_table:
- mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_vlan_table(fs);
err_destroy_l2_table:
- mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_l2_table(fs);
err_destroy_ttc_table:
- mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_ttc_table(fs);
err_destroy_inner_ttc_table:
- mlx5e_destroy_inner_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile)
{
- mlx5e_ptp_free_rx_fs(priv);
- mlx5e_destroy_vlan_table(priv);
- mlx5e_destroy_l2_table(priv);
- mlx5e_destroy_ttc_table(priv);
- mlx5e_destroy_inner_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ptp_free_rx_fs(fs, profile);
+ mlx5e_destroy_vlan_table(fs);
+ mlx5e_destroy_l2_table(fs);
+ mlx5e_destroy_ttc_table(fs);
+ mlx5e_destroy_inner_ttc_table(fs);
+ mlx5e_arfs_destroy_tables(fs, ntuple);
+ mlx5e_ethtool_cleanup_steering(fs);
}
static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
@@ -1356,6 +1385,11 @@ static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
kvfree(fs->vlan);
}
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
+{
+ return fs->vlan;
+}
+
static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
{
fs->tc = mlx5e_tc_table_alloc();
@@ -1369,6 +1403,32 @@ static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
mlx5e_tc_table_free(fs->tc);
}
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
+{
+ return fs->tc;
+}
+
+#ifdef CONFIG_MLX5_EN_RXNFC
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{
+ return mlx5e_ethtool_alloc(&fs->ethtool);
+}
+
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
+{
+ mlx5e_ethtool_free(fs->ethtool);
+}
+
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
+{
+ return fs->ethtool;
+}
+#else
+static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
+{ return 0; }
+static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
+#endif
+
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy)
@@ -1394,8 +1454,13 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
goto err_free_vlan;
}
- return fs;
+ err = mlx5e_fs_ethtool_alloc(fs);
+ if (err)
+ goto err_free_tc;
+ return fs;
+err_free_tc:
+ mlx5e_fs_tc_free(fs);
err_free_vlan:
mlx5e_fs_vlan_free(fs);
err_free_fs:
@@ -1406,7 +1471,109 @@ err:
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
+ mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
kvfree(fs);
}
+
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
+{
+ return &fs->l2;
+}
+
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
+{
+ return egress ? fs->egress_ns : fs->ns;
+}
+
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
+{
+ if (!egress)
+ fs->ns = ns;
+ else
+ fs->egress_ns = ns;
+}
+
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
+{
+ return inner ? fs->inner_ttc : fs->ttc;
+}
+
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
+{
+ if (!inner)
+ fs->ttc = ttc;
+ else
+ fs->inner_ttc = ttc;
+}
+
+#ifdef CONFIG_MLX5_EN_ARFS
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
+{
+ return fs->arfs;
+}
+
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
+{
+ fs->arfs = arfs;
+}
+#endif
+
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
+{
+ return fs->ptp_fs;
+}
+
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
+{
+ fs->ptp_fs = ptp_fs;
+}
+
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
+{
+ return fs->any;
+}
+
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
+{
+ fs->any = any;
+}
+
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
+{
+ return fs->accel_tcp;
+}
+
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
+{
+ fs->accel_tcp = accel_tcp;
+}
+#endif
+
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
+{
+ fs->state_destroy = state_destroy;
+}
+
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
+ bool vlan_strip_disable)
+{
+ fs->vlan_strip_disable = vlan_strip_disable;
+}
+
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
+{
+ return fs->udp;
+}
+
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
+{
+ fs->udp = udp;
+}
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
+{
+ return fs->mdev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3e4bc7836ef4..aac32e505c14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -34,6 +34,22 @@
#include "en.h"
#include "en/params.h"
#include "en/xsk/pool.h"
+#include "en/fs_ethtool.h"
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
static int flow_type_to_traffic_type(u32 flow_type);
@@ -66,6 +82,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
@@ -81,18 +98,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
- eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
+ eth_ft = &ethtool->l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
- eth_ft = &priv->fs->ethtool.l2_ft[prio];
+ eth_ft = &ethtool->l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
@@ -382,15 +399,16 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
+ struct list_head *head = &ethtool->rules;
struct mlx5e_ethtool_rule *iter;
- struct list_head *head = &priv->fs->ethtool.rules;
- list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
- priv->fs->ethtool.tot_num_rules++;
+ ethtool->tot_num_rules++;
list_add(&rule->list, head);
}
@@ -433,15 +451,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
eth_rule->rss = rss;
mlx5e_rss_refcnt_inc(eth_rule->rss);
} else {
- struct mlx5e_params *params = &priv->channels.params;
- enum mlx5e_rq_group group;
- u16 ix;
-
- mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
-
- *tirn = group == MLX5E_RQ_GROUP_XSK ?
- mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
- mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
+ *tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
}
return 0;
@@ -499,15 +509,16 @@ free:
return err ? ERR_PTR(err) : rule;
}
-static void del_ethtool_rule(struct mlx5e_priv *priv,
+static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_ethtool_rule *eth_rule)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
- priv->fs->ethtool.tot_num_rules--;
+ ethtool->tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
@@ -515,9 +526,10 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *iter;
- list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(iter, &ethtool->rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
@@ -531,7 +543,7 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
@@ -662,8 +674,7 @@ static int validate_flow(struct mlx5e_priv *priv,
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
- if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
- fs->ring_cookie))
+ if (fs->ring_cookie >= priv->channels.params.num_channels)
return -EINVAL;
switch (flow_type_mask(fs->flow_type)) {
@@ -754,7 +765,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
return 0;
del_ethtool_rule:
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
return err;
}
@@ -774,7 +785,7 @@ mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
goto out;
}
- del_ethtool_rule(priv, eth_rule);
+ del_ethtool_rule(priv->fs, eth_rule);
out:
return err;
}
@@ -783,12 +794,13 @@ static int
mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
- list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
+ list_for_each_entry(eth_rule, &ethtool->rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
@@ -826,18 +838,34 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
return err;
}
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
{
+ *ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
+ if (!*ethtool)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
+{
+ kvfree(ethtool);
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
+{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
- list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
- del_ethtool_rule(priv, iter);
+ list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
+ del_ethtool_rule(fs, iter);
}
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
{
- INIT_LIST_HEAD(&priv->fs->ethtool.rules);
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
+
+ INIT_LIST_HEAD(&ethtool->rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
@@ -959,11 +987,12 @@ int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
+ struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
- info->rule_cnt = priv->fs->ethtool.tot_num_rules;
+ info->rule_cnt = ethtool->tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 02eb2f0fa2ae..364f04309149 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -45,6 +45,7 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
#include "lib/vxlan.h"
@@ -67,22 +68,25 @@
#include "qos.h"
#include "en/trap.h"
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode)
{
- bool striding_rq_umr, inline_umr;
- u16 max_wqe_sz_cap;
+ u16 umr_wqebbs, max_wqebbs;
+ bool striding_rq_umr;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
- max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
- inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
if (!striding_rq_umr)
return false;
- if (!inline_umr) {
- mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
- (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
+
+ umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
+ max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
+ /* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
+ * calculated from mlx5e_get_max_sq_aligned_wqebbs.
+ */
+ if (WARN_ON(umr_wqebbs > max_wqebbs))
return false;
- }
+
return true;
}
@@ -199,21 +203,35 @@ static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
}
+static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
+
+ WARN_ON_ONCE(entries * umr_entry_size % MLX5_OCTWORD);
+
+ return entries * umr_entry_size / MLX5_OCTWORD;
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
+ u16 octowords;
+ u8 ds_cnt;
+
+ ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode),
+ MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
- cseg->umr_mkey = rq->mkey_be;
+ cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- ucseg->xlt_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
+ ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
@@ -259,10 +277,12 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ size_t alloc_size;
+
+ alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units,
+ rq->mpwqe.pages_per_wqe));
- rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
- sizeof(*rq->mpwqe.info)),
- GFP_KERNEL, node);
+ rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
@@ -271,18 +291,52 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift, u32 *umr_mkey,
- dma_addr_t filler_addr)
+
+static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
+{
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ return MLX5_MKC_ACCESS_MODE_MTT;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ return MLX5_MKC_ACCESS_MODE_KLMS;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ return MLX5_MKC_ACCESS_MODE_KSM;
+ }
+ WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
+ return 0;
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
+ u32 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr,
+ enum mlx5e_mpwrq_umr_mode umr_mode,
+ u32 xsk_chunk_size)
{
struct mlx5_mtt *mtt;
+ struct mlx5_ksm *ksm;
+ struct mlx5_klm *klm;
+ u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages;
+ if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
+ umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
+ !MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
+ mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
+ return -EINVAL;
+ }
+
+ octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
+
+ inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
+ MLX5_OCTWORD, octwords);
+ if (inlen < 0)
+ return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
@@ -294,16 +348,17 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
- MLX5_SET(mkc, mkc, translations_octword_size,
- MLX5_MTT_OCTW(npages));
- MLX5_SET(mkc, mkc, log_page_size, page_shift);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- MLX5_MTT_OCTW(npages));
+ MLX5_SET(mkc, mkc, translations_octword_size, octwords);
+ if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
+ else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
@@ -311,9 +366,47 @@ static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
- mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- for (i = 0 ; i < npages ; i++)
- mtt[i].ptag = cpu_to_be64(filler_addr);
+ switch (umr_mode) {
+ case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
+ klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++) {
+ klm[i << 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32(xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ klm[(i << 1) + 1] = (struct mlx5_klm) {
+ .va = cpu_to_be64(filler_addr),
+ .bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ };
+ }
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
+ mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages; i++)
+ mtt[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(filler_addr),
+ };
+ break;
+ case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
+ ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ for (i = 0; i < npages * 4; i++) {
+ ksm[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
+ .va = cpu_to_be64(filler_addr),
+ };
+ }
+ break;
+ }
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
@@ -356,10 +449,27 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
- u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
+ u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
+ u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ u32 num_entries, max_num_entries;
+ u32 umr_mkey;
+ int err;
+
+ max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
+
+ /* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
+ if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
+ &num_entries) ||
+ num_entries > max_num_entries))
+ mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
+ __func__, wq_size, rq->mpwqe.mtts_per_wqe,
+ max_num_entries);
- return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
- &rq->umr_mkey, rq->wqe_overflow.addr);
+ err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
+ &umr_mkey, rq->wqe_overflow.addr,
+ rq->mpwqe.umr_mode, xsk_chunk_size);
+ rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
+ return err;
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
@@ -376,18 +486,20 @@ static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
&rq->mpwqe.shampo->mkey);
}
-static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
-{
- return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
-}
-
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
- next_frag.di = &rq->wqe.di[0];
+ if (rq->xsk_pool) {
+ /* Assumptions used by XSK batched allocator. */
+ WARN_ON(rq->wqe.info.num_frags != 1);
+ WARN_ON(rq->wqe.info.log_num_frags != 0);
+ WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
+ }
+
+ next_frag.au = &rq->wqe.alloc_units[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -397,7 +509,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
- next_frag.di++;
+ next_frag.au++;
next_frag.offset = 0;
if (prev)
prev->last_in_page = true;
@@ -414,12 +526,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
+static int mlx5e_init_au_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
- if (!rq->wqe.di)
+ rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.alloc_units)
return -ENOMEM;
mlx5e_init_frags_partition(rq);
@@ -427,9 +540,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
return 0;
}
-void mlx5e_free_di_list(struct mlx5e_rq *rq)
+static void mlx5e_free_au_list(struct mlx5e_rq *rq)
{
- kvfree(rq->wqe.di);
+ kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
@@ -485,7 +598,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
+ return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -572,6 +685,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
@@ -587,8 +702,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
- mlx5e_mpwqe_get_log_rq_size(params, xsk);
+ rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
+ rq->mpwqe.pages_per_wqe =
+ mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.umr_wqebbs =
+ mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+ rq->mpwqe.mtts_per_wqe =
+ mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
+ rq->mpwqe.umr_mode);
+
+ pool_size = rq->mpwqe.pages_per_wqe <<
+ mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
@@ -600,7 +727,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
@@ -608,7 +734,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
if (err)
- goto err_free_by_rq_type;
+ goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -633,11 +759,9 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, node);
+ err = mlx5e_init_au_list(rq, wq_sz, node);
if (err)
goto err_rq_frags;
-
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@ -662,14 +786,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_shampo;
+ goto err_free_by_rq_type;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_shampo;
+ goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -677,13 +801,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
- u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
+ rq->mpwqe.page_shift;
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0 : rq->buff.headroom;
wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
- wqe->data[0].lkey = rq->mkey_be;
+ wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
} else {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
@@ -721,19 +846,21 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
-err_free_shampo:
- mlx5e_rq_free_shampo(rq);
+err_destroy_page_pool:
+ page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ mlx5e_rq_free_shampo(rq);
+err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
err_rq_frags:
kvfree(rq->wqe.frags);
}
@@ -761,24 +888,22 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
- mlx5e_free_di_list(rq);
+ mlx5e_free_au_list(rq);
}
for (i = rq->page_cache.head; i != rq->page_cache.tail;
i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
- struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
-
/* With AF_XDP, page_cache is not used, so this loop is not
* entered, and it's safe to call mlx5e_page_release_dynamic
* directly.
*/
- mlx5e_page_release_dynamic(rq, dma_info->page, false);
+ mlx5e_page_release_dynamic(rq, rq->page_cache.page_cache[i], false);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
@@ -833,7 +958,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -862,6 +987,32 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
+{
+ mlx5e_free_rx_descs(rq);
+
+ return mlx5e_rq_to_ready(rq, curr_state);
+}
+
static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1154,9 +1305,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
- sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
+ sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
+ mlx5e_stop_room_for_max_wqe(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1231,7 +1382,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1317,8 +1467,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -2280,7 +2429,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
- netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2318,10 +2467,11 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_icosq(&c->async_icosq);
- mlx5e_activate_rq(&c->rq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
+ else
+ mlx5e_activate_rq(&c->rq);
mlx5e_trigger_napi_icosq(c);
}
@@ -2332,8 +2482,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
+ else
+ mlx5e_deactivate_rq(&c->rq);
- mlx5e_deactivate_rq(&c->rq);
mlx5e_deactivate_icosq(&c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
@@ -2425,8 +2576,6 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
mlx5e_ptp_activate_channel(chs->ptp);
}
-#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
-
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
@@ -2434,8 +2583,12 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
for (i = 0; i < chs->num; i++) {
int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
+ struct mlx5e_channel *c = chs->c[i];
- err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ continue;
+
+ err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
/* Don't wait on the XSK RQ, because the newer xdpsock sample
* doesn't provide any Fill Ring entries at the setup stage.
@@ -2600,7 +2753,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
- int num_rxqs, nch, ntc;
+ int nch, ntc;
int err;
int i;
@@ -2611,7 +2764,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.mqprio.num_tc;
- num_rxqs = nch * priv->profile->rq_groups;
tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
@@ -2620,7 +2772,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+ err = netif_set_real_num_rx_queues(netdev, nch);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
@@ -2738,7 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
- mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_activate_channels(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2752,7 +2904,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
- mlx5e_remove_sqs_fwd_rules(priv);
+ mlx5e_rep_deactivate_channels(priv);
/* The results of ndo_select_queue are unreliable, while netdev config
* is being changed (real_num_tx_queues, num_tc). Stop all queues to
@@ -3547,7 +3699,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
- PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
+ VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
stats->rx_crc_errors =
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
@@ -3669,9 +3822,11 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_cvlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
else
- mlx5e_disable_cvlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv->fs,
+ !!(priv->netdev->flags & IFF_PROMISC));
return 0;
}
@@ -3780,7 +3935,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
- priv->fs->vlan_strip_disable = !enable;
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
priv->channels.params.vlan_strip_disable = !enable;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
@@ -3788,7 +3943,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
if (err) {
- priv->fs->vlan_strip_disable = enable;
+ mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
priv->channels.params.vlan_strip_disable = enable;
}
unlock:
@@ -3826,9 +3981,9 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
int err;
if (enable)
- err = mlx5e_arfs_enable(priv);
+ err = mlx5e_arfs_enable(priv->fs);
else
- err = mlx5e_arfs_disable(priv);
+ err = mlx5e_arfs_disable(priv->fs);
return err;
}
@@ -3912,12 +4067,14 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
+ vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
- if (!priv->fs->vlan ||
- !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
+ if (!vlan ||
+ !bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
@@ -4006,7 +4163,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
* 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
- max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
+ max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min(max_mtu_frame, max_mtu_page);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
@@ -4018,14 +4175,16 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
return true;
}
-static bool mlx5e_params_validate_xdp(struct net_device *netdev, struct mlx5e_params *params)
+static bool mlx5e_params_validate_xdp(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
- is_linear = mlx5e_rx_is_linear_skb(params, NULL);
+ is_linear = mlx5e_rx_is_linear_skb(mdev, params, NULL);
if (!is_linear && params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
@@ -4062,7 +4221,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (err)
goto out;
- if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, &new_params)) {
+ if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
+ &new_params)) {
err = -EINVAL;
goto out;
}
@@ -4077,19 +4237,21 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
- if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL);
- u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params, NULL);
- u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_params, NULL);
+ u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
+ u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change.
* If XSK is active, XSK RQs are linear.
+ * Reset if the RQ size changed, even if it's non-linear.
*/
if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
- ppw_old == ppw_new)
+ sz_old == sz_new)
reset = false;
}
@@ -4539,7 +4701,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- if (!mlx5e_params_validate_xdp(netdev, &new_params))
+ if (!mlx5e_params_validate_xdp(netdev, priv->mdev, &new_params))
return -EINVAL;
return 0;
@@ -4577,8 +4739,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
new_params = priv->channels.params;
new_params.xdp_prog = prog;
- if (reset)
- mlx5e_set_rq_type(priv->mdev, &new_params);
+
+ /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
+ * supported with the new parameters: if PAGE_SIZE is bigger than
+ * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
+ * the MTU is small enough for the linear mode, because XDP uses strides
+ * of PAGE_SIZE on regular RQs.
+ */
+ if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+ /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
+ err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
+ if (err)
+ goto unlock;
+ }
+
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
@@ -4898,7 +5072,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
- mlx5e_check_fragmented_striding_rq_cap(mdev))
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
@@ -4986,6 +5161,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_netdev_dev_addr(netdev);
+ mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
mlx5e_ktls_build_netdev(priv);
}
@@ -5087,7 +5263,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
+ features = MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
@@ -5097,7 +5273,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_close_drop_rq;
- err = mlx5e_create_flow_steering(priv);
+ err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
+ priv->netdev);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_rx_res;
@@ -5120,7 +5297,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
@@ -5136,7 +5314,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
@@ -5188,9 +5367,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
+ err = mlx5e_macsec_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
+
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
@@ -5248,6 +5432,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
+ mlx5e_macsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5269,7 +5454,6 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
@@ -5302,8 +5486,7 @@ mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
- tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
- max_nch = min_t(unsigned int, max_nch, tmp);
+ max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
/* netdev tx queues */
tmp = netdev->num_tx_queues;
@@ -5447,11 +5630,7 @@ static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
- unsigned int nch;
-
- nch = mlx5e_profile_max_num_channels(mdev, profile);
-
- return nch * profile->rq_groups;
+ return mlx5e_profile_max_num_channels(mdev, profile);
}
struct net_device *
@@ -5512,7 +5691,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
@@ -5573,7 +5753,8 @@ out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
cancel_work_sync(&priv->update_stats_work);
return err;
}
@@ -5584,7 +5765,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
- priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ mlx5e_fs_set_state_destroy(priv->fs,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (profile->disable)
profile->disable(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 759f7d3c2cfd..794cd8dfe9c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -56,6 +56,7 @@
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
#include "en/ptp.h"
+#include "en/fs_ethtool.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -69,7 +70,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
@@ -397,7 +398,8 @@ out_err:
return err;
}
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+static int
+mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
{
int sqs_per_channel = mlx5e_get_dcb_num_tc(&priv->channels.params);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -451,7 +453,8 @@ out:
return err;
}
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+static void
+mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -460,6 +463,49 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
mlx5e_sqs2vport_stop(esw, rep);
}
+static int
+mlx5e_rep_add_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_group *g;
+
+ g = esw->fdb_table.offloads.send_to_vport_meta_grp;
+ if (!g)
+ return 0;
+
+ flow_rule = mlx5_eswitch_add_send_to_vport_meta_rule(esw, rep->vport);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ rpriv->send_to_vport_meta_rule = flow_rule;
+
+ return 0;
+}
+
+static void
+mlx5e_rep_del_meta_tunnel_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ if (rpriv->send_to_vport_meta_rule)
+ mlx5_eswitch_del_send_to_vport_meta_rule(rpriv->send_to_vport_meta_rule);
+}
+
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_add_sqs_fwd_rules(priv);
+ mlx5e_rep_add_meta_tunnel_rule(priv);
+}
+
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv)
+{
+ mlx5e_rep_del_meta_tunnel_rule(priv);
+ mlx5e_remove_sqs_fwd_rules(priv);
+}
+
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -747,19 +793,20 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ mlx5e_fs_set_ns(priv->fs,
+ mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
- priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
- if (IS_ERR(priv->fs->ttc)) {
- err = PTR_ERR(priv->fs->ttc);
+ mlx5e_fs_set_ttc(priv->fs, mlx5_create_ttc_table(priv->mdev, &ttc_params), false);
+ if (IS_ERR(mlx5e_fs_get_ttc(priv->fs, false))) {
+ err = PTR_ERR(mlx5e_fs_get_ttc(priv->fs, false));
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
@@ -779,7 +826,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
- rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
+ rpriv->root_ft = mlx5_get_ttc_flow_table(mlx5e_fs_get_ttc(priv->fs, false));
return 0;
}
@@ -887,14 +934,14 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_root_ft;
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
@@ -908,10 +955,10 @@ err_free_fs:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_ethtool_cleanup_steering(priv->fs);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
- mlx5_destroy_ttc_table(priv->fs->ttc);
+ mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
@@ -1177,7 +1224,6 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
.max_nch_limit = mlx5e_rep_max_nch_limit,
@@ -1197,8 +1243,6 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
- /* XSK is needed so we can replace profile with NIC netdev */
- .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index dec183ccd4ac..b4e691760da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -111,6 +111,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct mlx5_flow_handle *send_to_vport_meta_rule;
struct rhashtable tc_ht;
};
@@ -241,8 +242,8 @@ int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_rep_activate_channels(struct mlx5e_priv *priv);
+void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
@@ -256,8 +257,8 @@ static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
-static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_activate_channels(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_deactivate_channels(struct mlx5e_priv *priv) {}
static inline int mlx5e_rep_init(void) { return 0; };
static inline void mlx5e_rep_cleanup(void) {};
static inline bool mlx5e_rep_has_offload_stats(const struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 24de37b79f5a..58084650151f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -41,6 +41,7 @@
#include <net/gro.h>
#include <net/udp.h>
#include <net/tcp.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@ -49,6 +50,7 @@
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
+#include "en_accel/macsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -237,69 +239,61 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
return false;
}
- cache->page_cache[cache->tail].page = page;
- cache->page_cache[cache->tail].addr = page_pool_get_dma_addr(page);
+ cache->page_cache[cache->tail] = page;
cache->tail = tail_next;
return true;
}
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
+ dma_addr_t addr;
if (unlikely(cache->head == cache->tail)) {
stats->cache_empty++;
return false;
}
- if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ if (page_ref_count(cache->page_cache[cache->head]) != 1) {
stats->cache_busy++;
return false;
}
- *dma_info = cache->page_cache[cache->head];
+ au->page = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
stats->cache_reuse++;
- dma_sync_single_for_device(rq->pdev, dma_info->addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ addr = page_pool_get_dma_addr(au->page);
+ /* Non-XSK always uses PAGE_SIZE. */
+ dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE);
return true;
}
-static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
+static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_unit *au)
{
- if (mlx5e_rx_cache_get(rq, dma_info))
+ dma_addr_t addr;
+
+ if (mlx5e_rx_cache_get(rq, au))
return 0;
- dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
- if (unlikely(!dma_info->page))
+ au->page = page_pool_dev_alloc_pages(rq->page_pool);
+ if (unlikely(!au->page))
return -ENOMEM;
- dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
- rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- page_pool_recycle_direct(rq->page_pool, dma_info->page);
- dma_info->page = NULL;
+ /* Non-XSK always uses PAGE_SIZE. */
+ addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE,
+ rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(rq->pdev, addr))) {
+ page_pool_recycle_direct(rq->page_pool, au->page);
+ au->page = NULL;
return -ENOMEM;
}
- page_pool_set_dma_addr(dma_info->page, dma_info->addr);
+ page_pool_set_dma_addr(au->page, addr);
return 0;
}
-static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info)
-{
- if (rq->xsk_pool)
- return mlx5e_xsk_page_alloc_pool(rq, dma_info);
- else
- return mlx5e_page_alloc_pool(rq, dma_info);
-}
-
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
@@ -324,32 +318,18 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec
}
}
-static inline void mlx5e_page_release(struct mlx5e_rq *rq,
- struct mlx5e_dma_info *dma_info,
- bool recycle)
-{
- if (rq->xsk_pool)
- /* The `recycle` parameter is ignored, and the page is always
- * put into the Reuse Ring, because there is no way to return
- * the page to the userspace when the interface goes down.
- */
- xsk_buff_free(dma_info->xsk);
- else
- mlx5e_page_release_dynamic(rq, dma_info->page, recycle);
-}
-
static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *frag)
{
int err = 0;
if (!frag->offset)
- /* On first frag (offset == 0), replenish page (dma_info actually).
- * Other frags that point to the same dma_info (with a different
+ /* On first frag (offset == 0), replenish page (alloc_unit actually).
+ * Other frags that point to the same alloc_unit (with a different
* offset) should just use the new one without replenishing again
* by themselves.
*/
- err = mlx5e_page_alloc(rq, frag->di);
+ err = mlx5e_page_alloc_pool(rq, frag->au);
return err;
}
@@ -359,7 +339,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
bool recycle)
{
if (frag->last_in_page)
- mlx5e_page_release(rq, frag->di, recycle);
+ mlx5e_page_release_dynamic(rq, frag->au->page, recycle);
}
static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -375,6 +355,7 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
int i;
for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
+ dma_addr_t addr;
u16 headroom;
err = mlx5e_get_rx_frag(rq, frag);
@@ -382,8 +363,8 @@ static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
goto free_frags;
headroom = i == 0 ? rq->buff.headroom : 0;
- wqe->data[i].addr = cpu_to_be64(frag->di->addr +
- frag->offset + headroom);
+ addr = page_pool_get_dma_addr(frag->au->page);
+ wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
}
return 0;
@@ -401,6 +382,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
{
int i;
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ xsk_buff_free(wi->au->xsk);
+ return;
+ }
+
for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
mlx5e_put_rx_frag(rq, wi, recycle);
}
@@ -412,84 +402,76 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
mlx5e_free_rx_wqe(rq, wi, false);
}
-static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
+static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- int err;
int i;
- if (rq->xsk_pool) {
- int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
-
- /* Check in advance that we have enough frames, instead of
- * allocating one-by-one, failing and moving frames to the
- * Reuse Ring.
- */
- if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
- return -ENOMEM;
- }
-
for (i = 0; i < wqe_bulk; i++) {
- struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
-
- err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
- if (unlikely(err))
- goto free_wqes;
- }
+ int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
+ struct mlx5e_rx_wqe_cyc *wqe;
- return 0;
+ wqe = mlx5_wq_cyc_get_wqe(wq, j);
-free_wqes:
- while (--i >= 0)
- mlx5e_dealloc_rx_wqe(rq, ix + i);
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
+ break;
+ }
- return err;
+ return i;
}
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
+ union mlx5e_alloc_unit *au, u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_sync_single_for_cpu(rq->pdev,
- di->addr + frag_offset,
- len, DMA_FROM_DEVICE);
- page_ref_inc(di->page);
+ dma_addr_t addr = page_pool_get_dma_addr(au->page);
+
+ dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE);
+ page_ref_inc(au->page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- di->page, frag_offset, len, truesize);
+ au->page, frag_offset, len, truesize);
}
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
- struct mlx5e_dma_info *dma_info,
+ struct page *page, dma_addr_t addr,
int offset_from, int dma_offset, u32 headlen)
{
- const void *from = page_address(dma_info->page) + offset_from;
+ const void *from = page_address(page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
{
+ union mlx5e_alloc_unit *alloc_units = wi->alloc_units;
bool no_xdp_xmit;
- struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
int i;
/* A common case for AF_XDP. */
- if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
+ if (bitmap_full(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe))
return;
- no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
- MLX5_MPWRQ_PAGES_PER_WQE);
+ no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
- if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
- mlx5e_page_release(rq, &dma_info[i], recycle);
+ if (rq->xsk_pool) {
+ /* The `recycle` parameter is ignored, and the page is always
+ * put into the Reuse Ring, because there is no way to return
+ * the page to the userspace when the interface goes down.
+ */
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ xsk_buff_free(alloc_units[i].xsk);
+ } else {
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
+ if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
+ mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle);
+ }
}
static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
@@ -574,11 +556,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
- err = mlx5e_page_alloc(rq, dma_info);
+ union mlx5e_alloc_unit au;
+
+ err = mlx5e_page_alloc_pool(rq, &au);
if (unlikely(err))
goto err_unmap;
- addr = dma_info->addr;
- page = dma_info->page;
+ page = dma_info->page = au.page;
+ addr = dma_info->addr = page_pool_get_dma_addr(au.page);
} else {
dma_info->addr = addr + header_offset;
dma_info->page = page;
@@ -611,7 +595,7 @@ err_unmap:
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release(rq, dma_info, true);
+ mlx5e_page_release_dynamic(rq, dma_info->page, true);
}
}
rq->stats->buff_alloc_err++;
@@ -659,57 +643,55 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
+ union mlx5e_alloc_unit *au = &wi->alloc_units[0];
struct mlx5e_icosq *sq = rq->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
+ u32 offset; /* 17-bit value with MTT. */
u16 pi;
int err;
int i;
- /* Check in advance that we have enough frames, instead of allocating
- * one-by-one, failing and moving frames to the Reuse Ring.
- */
- if (rq->xsk_pool &&
- unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
- err = -ENOMEM;
- goto err;
- }
-
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
err = mlx5e_alloc_rx_hd_mpwqe(rq);
if (unlikely(err))
goto err;
}
- pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
+ pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
+ memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
+
+ for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, au++) {
+ dma_addr_t addr;
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
- err = mlx5e_page_alloc(rq, dma_info);
+ err = mlx5e_page_alloc_pool(rq, au);
if (unlikely(err))
goto err_unmap;
- umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ addr = page_pool_get_dma_addr(au->page);
+ umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
+ .ptag = cpu_to_be64(addr | MLX5_EN_WR),
+ };
}
- bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->uctrl.xlt_offset =
- cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
+
+ offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
- .num_wqebbs = MLX5E_UMR_WQEBBS,
+ .num_wqebbs = rq->mpwqe.umr_wqebbs,
.umr.rq = rq,
};
- sq->pc += MLX5E_UMR_WQEBBS;
+ sq->pc += rq->mpwqe.umr_wqebbs;
sq->doorbell_cseg = &umr_wqe->ctrl;
@@ -717,8 +699,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
err_unmap:
while (--i >= 0) {
- dma_info--;
- mlx5e_page_release(rq, dma_info, true);
+ au--;
+ mlx5e_page_release_dynamic(rq, au->page, true);
}
err:
@@ -752,7 +734,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
if (hd_info->page != deleted_page) {
deleted_page = hd_info->page;
- mlx5e_page_release(rq, hd_info, false);
+ mlx5e_page_release_dynamic(rq, hd_info->page, false);
}
}
@@ -767,7 +749,7 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
/* Don't recycle, this function is called on rq/netdev close */
mlx5e_free_rx_mpwqe(rq, wi, false);
}
@@ -775,38 +757,51 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
- u8 wqe_bulk;
- int err;
+ int wqe_bulk, count;
+ bool busy = false;
+ u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- wqe_bulk = rq->wqe.info.wqe_bulk;
-
- if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
+ if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
return false;
if (rq->page_pool)
page_pool_nid_changed(rq->page_pool, numa_mem_id());
- do {
- u16 head = mlx5_wq_cyc_get_head(wq);
+ wqe_bulk = mlx5_wq_cyc_missing(wq);
+ head = mlx5_wq_cyc_get_head(wq);
- err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
- if (unlikely(err)) {
- rq->stats->buff_alloc_err++;
- break;
- }
+ /* Don't allow any newly allocated WQEs to share the same page with old
+ * WQEs that aren't completed yet. Stop earlier.
+ */
+ wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
- mlx5_wq_cyc_push_n(wq, wqe_bulk);
- } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
+ if (!rq->xsk_pool)
+ count = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
+ else if (likely(!rq->xsk_pool->dma_need_sync))
+ count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
+ else
+ /* If dma_need_sync is true, it's more efficient to call
+ * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
+ * because the latter does the same check and returns only one
+ * frame.
+ */
+ count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
+
+ mlx5_wq_cyc_push_n(wq, count);
+ if (unlikely(count != wqe_bulk)) {
+ rq->stats->buff_alloc_err++;
+ busy = true;
+ }
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
mlx5_wq_cyc_update_db_record(wq);
- return !!err;
+ return busy;
}
void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
@@ -974,7 +969,8 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
- alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
+ alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
+ mlx5e_alloc_rx_mpwqe(rq, head);
if (unlikely(alloc_err))
break;
@@ -1421,6 +1417,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+ if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
+ mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@ -1524,19 +1523,21 @@ static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
- struct mlx5e_dma_info *di = wi->di;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
- va = page_address(di->page) + wi->offset;
+ va = page_address(au->page) + wi->offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
@@ -1546,7 +1547,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di->page, prog, &xdp))
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
@@ -1559,7 +1560,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
@@ -1570,20 +1571,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
+ union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
- struct mlx5e_dma_info *di = wi->di;
struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct sk_buff *skb;
+ dma_addr_t addr;
u32 truesize;
void *va;
- va = page_address(di->page) + wi->offset;
+ va = page_address(au->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
@@ -1599,11 +1602,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
while (cqe_bcnt) {
skb_frag_t *frag;
- di = wi->di;
+ au = wi->au;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, DMA_FROM_DEVICE);
if (!xdp_buff_has_frags(&xdp)) {
@@ -1616,11 +1620,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
}
frag = &sinfo->frags[sinfo->nr_frags++];
- __skb_frag_set_page(frag, di->page);
+ __skb_frag_set_page(frag, au->page);
skb_frag_off_set(frag, wi->offset);
skb_frag_size_set(frag, frag_consumed_bytes);
- if (page_is_pfmemalloc(di->page))
+ if (page_is_pfmemalloc(au->page))
xdp_buff_set_frag_pfmemalloc(&xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
@@ -1631,10 +1635,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
}
- di = head_wi->di;
+ au = head_wi->au;
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1651,7 +1655,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (unlikely(!skb))
return NULL;
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
if (unlikely(xdp_buff_has_frags(&xdp))) {
int i;
@@ -1706,9 +1710,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
- skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
+ mlx5e_xsk_skb_from_cqe_linear,
rq, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
@@ -1791,11 +1796,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1846,12 +1851,13 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
- u32 data_bcnt, u32 data_offset)
+mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ union mlx5e_alloc_unit *au, u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
while (data_bcnt) {
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
unsigned int truesize;
@@ -1860,12 +1866,12 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_i
else
truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
- mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ mlx5e_add_skb_frag(rq, skb, au, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
- di++;
+ au++;
}
}
@@ -1873,12 +1879,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
u32 frag_offset = head_offset + headlen;
u32 byte_cnt = cqe_bcnt - headlen;
- struct mlx5e_dma_info *head_di = di;
+ union mlx5e_alloc_unit *head_au = au;
struct sk_buff *skb;
+ dma_addr_t addr;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
@@ -1889,14 +1896,17 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
net_prefetchw(skb->data);
+ /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
if (unlikely(frag_offset >= PAGE_SIZE)) {
- di++;
+ au++;
frag_offset -= PAGE_SIZE;
}
- mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
+ mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
+ addr = page_pool_get_dma_addr(head_au->page);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr,
+ head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1908,12 +1918,13 @@ static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
{
- struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
+ union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
struct bpf_prog *prog;
struct sk_buff *skb;
u32 metasize = 0;
void *va, *data;
+ dma_addr_t addr;
u32 frag_size;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -1922,11 +1933,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- va = page_address(di->page) + head_offset;
+ va = page_address(au->page) + head_offset;
data = va + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
+ addr = page_pool_get_dma_addr(au->page);
+ dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
frag_size, DMA_FROM_DEVICE);
net_prefetch(data);
@@ -1936,7 +1948,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, di->page, prog, &xdp)) {
+ if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -1952,7 +1964,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
/* queue up for recycling/reuse */
- page_ref_inc(di->page);
+ page_ref_inc(au->page);
return skb;
}
@@ -1997,7 +2009,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
prefetchw(skb->data);
- mlx5e_copy_skb_header(rq->pdev, skb, head,
+ mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */
@@ -2049,7 +2061,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release(rq, &shampo->info[header_index], true);
+ mlx5e_page_release_dynamic(rq, shampo->info[header_index].page, true);
}
bitmap_clear(shampo->bitmap, header_index, 1);
}
@@ -2070,11 +2082,11 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_unit *au;
struct mlx5e_mpw_info *wi;
struct mlx5_wq_ll *wq;
- wi = &rq->mpwqe.info[wqe_id];
+ wi = mlx5e_get_mpw_info(rq, wqe_id);
wi->consumed_strides += cstrides;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
@@ -2120,8 +2132,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ au = &wi->alloc_units[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, au, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
@@ -2143,11 +2155,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+ struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
- u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
- u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
+ u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -2170,9 +2182,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ mlx5e_xsk_skb_from_cqe_mpwrq_linear,
rq, wi, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -2417,7 +2430,7 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
mlx5e_xsk_skb_from_cqe_linear :
- mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
@@ -2471,7 +2484,7 @@ free_wqe:
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 7409829d1201..03c1841970f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -641,17 +641,26 @@ static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
};
+static const struct counter_desc vnic_env_stats_drop_desc[] = {
+ { "rx_oversize_pkts_buffer",
+ VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
+};
+
#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
+#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
+ ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
{
return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
- NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
+ NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
@@ -665,6 +674,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
vnic_env_stats_dev_oob_desc[i].format);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_drop_desc[i].format);
+
return idx;
}
@@ -679,6 +693,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
vnic_env_stats_dev_oob_desc, i);
+
+ for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i);
+
return idx;
}
@@ -2451,6 +2470,9 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
&MLX5E_STATS_GRP(qos),
+#ifdef CONFIG_MLX5_EN_MACSEC
+ &MLX5E_STATS_GRP(macsec_hw),
+#endif
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index ed4fc940e4ef..9f781085be47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -273,6 +273,10 @@ struct mlx5e_qcounter_stats {
u32 rx_if_down_packets;
};
+#define VNIC_ENV_GET(vnic_env_stats, c) \
+ MLX5_GET(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
+ vport_env.c)
+
struct mlx5e_vnic_env_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
@@ -486,5 +490,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
+extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f154bda668ad..70a7a61f9708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -311,6 +311,7 @@ mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -322,7 +323,7 @@ get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv;
}
- return priv->fs->tc->ct;
+ return tc->ct;
}
static struct mlx5e_tc_psample *
@@ -345,6 +346,7 @@ get_sample_priv(struct mlx5e_priv *priv)
static struct mlx5e_post_act *
get_post_action(struct mlx5e_priv *priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
@@ -356,7 +358,7 @@ get_post_action(struct mlx5e_priv *priv)
return uplink_priv->post_act;
}
- return priv->fs->tc->post_act;
+ return tc->post_act;
}
struct mlx5_flow_handle *
@@ -607,11 +609,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
- &priv->fs->tc->mod_hdr;
+ &tc->mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
@@ -810,6 +813,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
+ struct mlx5_ttc_table *ttc;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
@@ -827,9 +831,10 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
goto err_create_ttc_table;
}
+ ttc = mlx5e_fs_get_ttc(priv->fs, false);
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
- mlx5_get_ttc_flow_table(priv->fs->ttc)->id);
+ mlx5_get_ttc_flow_table(ttc)->id);
return 0;
@@ -916,10 +921,11 @@ static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
- hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe,
+ hash_for_each_possible(tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
@@ -933,11 +939,12 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
/* no more hairpin flows for us, release the hairpin pair */
- if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock))
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
@@ -993,6 +1000,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
@@ -1021,10 +1029,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
if (err)
return err;
- mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_lock(&tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
@@ -1036,7 +1044,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
return -ENOMEM;
}
@@ -1048,9 +1056,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
- hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist,
+ hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
params.log_data_size = 16;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -1126,8 +1134,9 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
+ struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
- struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
@@ -1163,7 +1172,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
- dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan);
+ dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
}
dest_ix++;
}
@@ -1191,7 +1200,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- rule = ERR_CAST(priv->fs->tc->t);
+ rule = ERR_CAST(tc->t);
goto err_ft_get;
}
}
@@ -1293,8 +1302,10 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_fs_chains *nic_chains;
+ nic_chains = mlx5e_nic_chains(tc);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
@@ -1309,8 +1320,8 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5e_tc_table *tc = priv->fs->tc;
flow_flag_clear(flow, OFFLOADED);
@@ -1322,13 +1333,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
- mutex_lock(&priv->fs->tc->t_lock);
+ mutex_lock(&tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
- priv->fs->tc->t = NULL;
+ tc->t = NULL;
}
- mutex_unlock(&priv->fs->tc->t_lock);
+ mutex_unlock(&tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
@@ -1494,8 +1505,11 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
- if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
- route_mdev->coredev_type != MLX5_COREDEV_VF)
+ if (out_mdev->coredev_type != MLX5_COREDEV_PF)
+ return false;
+
+ if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
+ route_mdev->coredev_type != MLX5_COREDEV_SF)
return false;
return mlx5e_same_hw_devs(out_priv, route_priv);
@@ -4058,13 +4072,14 @@ static const struct rhashtable_params tc_ht_params = {
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
- return &priv->fs->tc->ht;
+ return &tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
@@ -4448,7 +4463,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
int err = 0;
if (!mlx5_esw_hold(priv->mdev))
- return -EAGAIN;
+ return -EBUSY;
mlx5_esw_get(priv->mdev);
@@ -4772,6 +4787,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe, *tmp;
LIST_HEAD(init_wait_list);
@@ -4783,11 +4799,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- mutex_lock(&priv->fs->tc->hairpin_tbl_lock);
- hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
- mutex_unlock(&priv->fs->tc->hairpin_tbl_lock);
+ mutex_unlock(&tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
@@ -4841,7 +4857,8 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
- struct mlx5_flow_table **ft = &priv->fs->tc->miss_t;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_flow_table **ft = &tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
@@ -4863,12 +4880,14 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
- mlx5_destroy_flow_table(priv->fs->tc->miss_t);
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+
+ mlx5_destroy_flow_table(tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = priv->fs->tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
@@ -4909,7 +4928,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
- attr.default_ft = priv->fs->tc->miss_t;
+ attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
tc->chains = mlx5_chains_create(dev, &attr);
@@ -4958,7 +4977,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_table *tc = priv->fs->tc;
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
@@ -5163,13 +5182,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, chain_tag, reg_b, zone_restore_id;
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5e_tc_table *tc = priv->fs->tc;
struct mlx5_mapped_obj mapped_obj;
struct tc_skb_ext *tc_skb_ext;
+ struct mlx5e_tc_table *tc;
int err;
reg_b = be32_to_cpu(cqe->ft_metadata);
-
+ tc = mlx5e_fs_get_tc(priv->fs);
chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 6ce1ab6b86b7..48241317a535 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -54,6 +54,7 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
+struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 27f791feb517..bf2232a2a836 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -485,7 +486,7 @@ err_drop:
static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
{
return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
- !attr->insz;
+ !attr->insz && !mlx5e_macsec_skb_is_offload(skb);
}
static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 833be29170a1..9a458a5d9853 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -31,6 +31,7 @@
*/
#include <linux/irq.h>
+#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en/xdp.h"
@@ -86,26 +87,36 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
{
+ bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool);
bool busy_xsk = false, xsk_rx_alloc_err;
- /* Handle the race between the application querying need_wakeup and the
- * driver setting it:
- * 1. Update need_wakeup both before and after the TX. If it goes to
- * "yes", it can only happen with the first update.
- * 2. If the application queried need_wakeup before we set it, the
- * packets will be transmitted anyway, even w/o a wakeup.
- * 3. Give a chance to clear need_wakeup after new packets were queued
- * for TX.
+ /* If SQ is empty, there are no TX completions to trigger NAPI, so set
+ * need_wakeup. Do it before queuing packets for TX to avoid race
+ * condition with userspace.
*/
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ if (need_wakeup && xsksq->pc == xsksq->cc)
+ xsk_set_tx_need_wakeup(xsksq->xsk_pool);
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
- mlx5e_xsk_update_tx_wakeup(xsksq);
+ /* If we queued some packets for TX, no need for wakeup anymore. */
+ if (need_wakeup && xsksq->pc != xsksq->cc)
+ xsk_clear_tx_need_wakeup(xsksq->xsk_pool);
+ /* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it
+ * before refilling to avoid race condition with userspace.
+ */
+ if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq))
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
xskrq);
- busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
+ /* Ask for wakeup if WQ is not full after refill. */
+ if (!need_wakeup)
+ busy_xsk |= xsk_rx_alloc_err;
+ else if (xsk_rx_alloc_err)
+ xsk_set_rx_need_wakeup(xskrq->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(xskrq->xsk_pool);
return busy_xsk;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 229728c80233..a0242dc15741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -575,6 +575,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+ if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index 0abef71cb839..c9a91158e99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -78,12 +78,16 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
struct mlx5_core_dev *dest_mdev)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool vf_sf_vport;
+
+ vf_sf_vport = mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
/* Use indirect table for all IP traffic from UL to VF with vport
* destination when source rewrite flag is set.
*/
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
- mlx5_eswitch_is_vf_vport(esw, vport_num) &&
+ vf_sf_vport &&
esw->dev == dest_mdev &&
attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 694c54066955..4f8a24d84a86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -924,12 +924,16 @@ int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
struct mlx5_esw_rate_group *group,
struct netlink_ext_ack *extack)
{
- int err;
+ int err = 0;
mutex_lock(&esw->state_lock);
+ if (!vport->qos.enabled && !group)
+ goto unlock;
+
err = esw_qos_vport_enable(esw, vport, 0, 0, extack);
if (!err)
err = esw_qos_vport_update_group(esw, vport, group, extack);
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6aa58044b949..c59107fa9e6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1360,7 +1360,6 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
- esw_offloads_del_send_to_vport_meta_rules(esw);
devl_rate_nodes_destroy(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 87ce5a208cb5..f68dc2d0dbe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -244,6 +244,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_flow_group *vport_rx_drop_group;
+ struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
@@ -344,7 +346,10 @@ void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
-void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw);
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a9f4c652f859..4e50df3139c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -70,6 +70,8 @@
#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -481,25 +483,27 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
!(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
- } else if (attr->dest_ft) {
- esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
- (*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
- } else if (attr->dest_chain) {
- err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
- 1, 0, *i);
- (*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+
+ if (attr->dest_ft) {
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ }
}
return err;
@@ -1058,52 +1062,23 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{
- struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
- int i = 0, num_vfs = esw->esw_funcs.num_vfs;
-
- if (!num_vfs || !flows)
- return;
-
- for (i = 0; i < num_vfs; i++)
- mlx5_del_flow_rules(flows[i]);
-
- kvfree(flows);
- /* If changing eswitch mode from switchdev to legacy, but num_vfs is not 0,
- * meta rules could be freed again. So set it to NULL.
- */
- esw->fdb_table.offloads.send_to_vport_meta_rules = NULL;
+ if (rule)
+ mlx5_del_flow_rules(rule);
}
-void esw_offloads_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
-{
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
-}
-
-static int
-mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
- int num_vfs, rule_idx = 0, err = 0;
struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_handle **flows;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
- unsigned long i;
- u16 vport_num;
-
- num_vfs = esw->esw_funcs.num_vfs;
- flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
- if (!flows)
- return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- err = -ENOMEM;
- goto alloc_err;
- }
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
@@ -1116,34 +1091,18 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- vport_num = vport->vport;
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
- dest.vport.num = vport_num;
-
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
- rule_idx, PTR_ERR(flow_rule));
- goto rule_err;
- }
- flows[rule_idx++] = flow_rule;
- }
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
- esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
- kvfree(spec);
- return 0;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
+ vport_num, PTR_ERR(flow_rule));
-rule_err:
- while (--rule_idx >= 0)
- mlx5_del_flow_rules(flows[rule_idx]);
kvfree(spec);
-alloc_err:
- kvfree(flows);
- return err;
+ return flow_rule;
}
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
@@ -1668,18 +1627,200 @@ esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
#endif
+static int
+esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int count, err = 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ /* See comment at table_size calculation */
+ count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
+ *ix += count;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!esw_src_port_rewrite_supported(esw))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev,
+ "Failed to create send-to-vport meta flow group err(%d)\n", err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ return 0;
+
+send_vport_meta_err:
+ return err;
+}
+
+static int
+esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return 0;
+
+ memset(flow_group_in, 0, inlen);
+
+ esw_set_flow_group_source_port(esw, flow_group_in);
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + esw->total_vports - 1);
+ *ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
+
+out:
+ return err;
+}
+
+static int
+esw_create_miss_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *fdb,
+ u32 *flow_group_in,
+ int *ix)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ int err = 0;
+ u8 *dmac;
+
+ memset(flow_group_in, 0, inlen);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ *ix + MLX5_ESW_MISS_FLOWS);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ return err;
+}
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
- int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
+ int table_size, ix = 0, err = 0;
u32 flags = 0, *flow_group_in;
- struct mlx5_flow_group *g;
- void *match_criteria;
- u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
@@ -1713,7 +1854,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
- MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
+ esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1754,139 +1895,29 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
goto fdb_chains_err;
}
- /* create send-to-vport group */
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- /* See comment above table_size calculation */
- ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
goto send_vport_err;
- }
- esw->fdb_table.offloads.send_to_vport_grp = g;
-
- if (esw_src_port_rewrite_supported(esw)) {
- /* meta send to vport */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
-
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, match_criteria,
- misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
-
- num_vfs = esw->esw_funcs.num_vfs;
- if (num_vfs) {
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in,
- end_flow_index, ix + num_vfs - 1);
- ix += num_vfs;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
- err);
- goto send_vport_meta_err;
- }
- esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
- err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
- if (err)
- goto meta_rule_err;
- }
- }
-
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
- /* create peer esw miss group */
- memset(flow_group_in, 0, inlen);
-
- esw_set_flow_group_source_port(esw, flow_group_in);
-
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in,
- match_criteria);
-
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
-
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + esw->total_vports - 1);
- ix += esw->total_vports;
-
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
- goto peer_miss_err;
- }
- esw->fdb_table.offloads.peer_miss_grp = g;
- }
- /* create miss group */
- memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_OUTER_HEADERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
- match_criteria);
- dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
- outer_headers.dmac_47_16);
- dmac[0] = 0x01;
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + MLX5_ESW_MISS_FLOWS);
+ err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto send_vport_meta_err;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
- goto miss_err;
- }
- esw->fdb_table.offloads.miss_grp = g;
+ err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
+ if (err)
+ goto peer_miss_err;
- err = esw_add_fdb_miss_rule(esw);
+ err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
- goto miss_rule_err;
+ goto miss_err;
kvfree(flow_group_in);
return 0;
-miss_rule_err:
- mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
-meta_rule_err:
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
send_vport_meta_err:
@@ -1913,7 +1944,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
- mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
@@ -1931,7 +1961,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
-static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
+static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{
int nvports;
@@ -1956,7 +1986,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = esw_get_offloads_ft_size(esw);
+ ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
+ MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1985,7 +2016,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
- nvports = esw_get_offloads_ft_size(esw);
+ nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -2015,6 +2046,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
+static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
+{
+ /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+ * for the drop rule, which is placed at the end of the table.
+ * So return the total of vport and int_port as rule index.
+ */
+ return esw_get_nr_ft_offloads_steering_src_ports(esw);
+}
+
+static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int flow_index;
+ int err = 0;
+
+ flow_index = esw_create_vport_rx_drop_rule_index(esw);
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_drop_group = g;
+out:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_group)
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
@@ -2063,6 +2140,32 @@ out:
return flow_rule;
}
+static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
+ &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx drop rule err %ld\n",
+ PTR_ERR(flow_rule));
+ return PTR_ERR(flow_rule);
+ }
+
+ esw->offloads.vport_rx_drop_rule = flow_rule;
+
+ return 0;
+}
+
+static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
+{
+ if (esw->offloads.vport_rx_drop_rule)
+ mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
+}
+
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
@@ -3063,8 +3166,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err)
goto create_fg_err;
+ err = esw_create_vport_rx_drop_group(esw);
+ if (err)
+ goto create_rx_drop_fg_err;
+
+ err = esw_create_vport_rx_drop_rule(esw);
+ if (err)
+ goto create_rx_drop_rule_err;
+
return 0;
+create_rx_drop_rule_err:
+ esw_destroy_vport_rx_drop_group(esw);
+create_rx_drop_fg_err:
+ esw_destroy_vport_rx_group(esw);
create_fg_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
@@ -3082,6 +3197,8 @@ create_indir_err:
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
+ esw_destroy_vport_rx_drop_rule(esw);
+ esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index a1ac3a654962..9459e56ee90a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -36,6 +36,7 @@ static struct mlx5_nb events_nbs_ref[] = {
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_OBJECT_CHANGE },
/* QP/WQ resource events to forward */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
@@ -132,6 +133,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e735e19461ba..32d4c967469c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -577,7 +577,10 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
- MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
+ fte->action.crypto.type);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
+ fte->action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
@@ -919,13 +922,15 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e3960cdf5131..d53749248fa0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -104,6 +104,10 @@
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
+#define KERNEL_RX_MACSEC_NUM_PRIOS 1
+#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
+
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
@@ -126,11 +130,15 @@
#define LAG_PRIO_NUM_LEVELS 1
#define LAG_NUM_PRIOS 1
-#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 1
-#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
+#define KERNEL_TX_MACSEC_NUM_PRIOS 1
+#define KERNEL_TX_MACSEC_NUM_LEVELS 2
+#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
struct node_caps {
size_t arr_sz;
@@ -149,12 +157,16 @@ static struct init_tree_node {
enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 7,
+ .ar_size = 8,
.children = (struct init_tree_node[]){
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
+ KERNEL_RX_MACSEC_NUM_LEVELS))),
ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
@@ -186,18 +198,23 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
+ ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
KERNEL_TX_IPSEC_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
+ KERNEL_TX_MACSEC_NUM_LEVELS))),
}
};
@@ -2269,6 +2286,7 @@ static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
{
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_LAG:
case MLX5_FLOW_NAMESPACE_OFFLOADS:
case MLX5_FLOW_NAMESPACE_ETHTOOL:
@@ -2315,7 +2333,8 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
prio = FDB_BYPASS_PATH;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
- case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
root_ns = steering->egress_root_ns;
prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 079fa44ada71..f34e758a2f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -273,6 +273,19 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_MACSEC);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, adv_virtualization)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2cf2c9948446..86ed87d704f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -601,7 +601,7 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
fw_reporter_ctx.miss_counter = health->miss_counter;
if (fw_reporter_ctx.err_synd) {
devlink_health_report(health->fw_reporter,
- "FW syndrom reported", &fw_reporter_ctx);
+ "FW syndrome reported", &fw_reporter_ctx);
return;
}
if (fw_reporter_ctx.miss_counter)
@@ -702,11 +702,25 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.dump = mlx5_fw_fatal_reporter_dump,
};
-#define MLX5_REPORTER_FW_GRACEFUL_PERIOD 1200000
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct devlink *devlink = priv_to_devlink(dev);
+ u64 grace_period;
+
+ if (mlx5_core_is_ecpf(dev)) {
+ grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ } else if (mlx5_core_is_pf(dev)) {
+ grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ } else {
+ /* VF or SF */
+ grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ }
health->fw_reporter =
devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
@@ -718,7 +732,7 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
health->fw_fatal_reporter =
devlink_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops,
- MLX5_REPORTER_FW_GRACEFUL_PERIOD,
+ grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
@@ -843,9 +857,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
-
- if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -862,6 +873,14 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
del_timer_sync(&health->timer);
}
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
+}
+
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -875,13 +894,6 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
cancel_work_sync(&health->fatal_report_work);
}
-void mlx5_health_flush(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_health *health = &dev->priv.health;
-
- flush_workqueue(health->wq);
-}
-
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ac3757beaea2..c247cca154e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
@@ -39,7 +40,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
- strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
+ strscpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
sizeof(drvinfo->driver));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index c02b7b08fb4c..4e3a75496dd9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -35,6 +35,7 @@
#include "en.h"
#include "en/params.h"
#include "ipoib.h"
+#include "en/fs_ethtool.h"
#define IB_DEFAULT_Q_KEY 0xb1b
#define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9
@@ -320,43 +321,47 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
+ struct mlx5_flow_namespace *ns =
+ mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
int err;
- priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fs->ns)
+ if (!ns)
return -EINVAL;
- err = mlx5e_arfs_create_tables(priv);
+ mlx5e_fs_set_ns(priv->fs, ns, false);
+ err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
- err = mlx5e_create_ttc_table(priv);
+ err = mlx5e_create_ttc_table(priv->fs, priv->rx_res);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
- mlx5e_ethtool_init_steering(priv);
+ mlx5e_ethtool_init_steering(priv->fs);
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
return err;
}
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_destroy_ttc_table(priv);
- mlx5e_arfs_destroy_tables(priv);
- mlx5e_ethtool_cleanup_steering(priv);
+ mlx5e_destroy_ttc_table(priv->fs);
+ mlx5e_arfs_destroy_tables(priv->fs,
+ !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_ethtool_cleanup_steering(priv->fs);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
@@ -458,7 +463,6 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.update_carrier = NULL, /* no HW update in IB link */
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 0b86e78dbc0e..0227a521d301 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -349,7 +349,6 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.update_stats = NULL,
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
- .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 065102278cb8..a9f4ede4a9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -65,6 +65,21 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY;
}
+static u8 lag_active_port_bits(struct mlx5_lag *ldev)
+{
+ u8 enabled_ports[MLX5_MAX_PORTS] = {};
+ u8 active_port = 0;
+ int num_enabled;
+ int idx;
+
+ mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports,
+ &num_enabled);
+ for (idx = 0; idx < num_enabled; idx++)
+ active_port |= BIT_MASK(enabled_ports[idx]);
+
+ return active_port;
+}
+
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
unsigned long flags)
{
@@ -77,9 +92,21 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);
- if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {
+
+ switch (port_sel_mode) {
+ case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY:
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
+ break;
+ case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT:
+ if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass))
+ break;
+
+ MLX5_SET(lagc, lag_ctx, active_port,
+ lag_active_port_bits(mlx5_lag_dev(dev)));
+ break;
+ default:
+ break;
}
MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode);
@@ -386,12 +413,37 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
}
}
+static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
+ void *lag_ctx;
+
+ lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+ MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+ MLX5_SET(modify_lag_in, in, field_select, 0x2);
+
+ MLX5_SET(lagc, lag_ctx, active_port, ports);
+
+ return mlx5_cmd_exec_in(dev, modify_lag, in);
+}
+
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ u8 active_ports;
+ int ret;
+
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
+ ret = mlx5_lag_port_sel_modify(ldev, ports);
+ if (ret ||
+ !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass))
+ return ret;
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags))
- return mlx5_lag_port_sel_modify(ldev, ports);
+ active_ports = lag_active_port_bits(ldev);
+
+ return mlx5_cmd_modify_active_port(dev0, active_ports);
+ }
return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
}
@@ -432,21 +484,22 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
mlx5_lag_drop_rule_setup(ldev, tracker);
}
-#define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4
static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
unsigned long *flags)
{
- struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- if (ldev->ports == MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED) {
- /* Four ports are support only in hash mode */
- if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table))
- return -EINVAL;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+ if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
+ return -EINVAL;
+ return 0;
}
+ if (ldev->ports > 2)
+ ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
+
+ set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+
return 0;
}
@@ -1275,6 +1328,22 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_is_active);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ unsigned long flags;
+ bool res = 0;
+
+ spin_lock_irqsave(&lag_lock, flags);
+ ldev = mlx5_lag_dev(dev);
+ if (ldev)
+ res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags);
+ spin_unlock_irqrestore(&lag_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(mlx5_lag_mode_is_hash);
+
bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index b3bbf284fe71..d854e01d7fc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -11,7 +11,9 @@
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define ASO_CTRL_READ_EN BIT(0)
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
@@ -70,6 +72,7 @@ enum {
enum {
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 91e806c1aa21..d3a9ae80fd30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -65,6 +65,8 @@ enum {
MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+ MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
};
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
@@ -72,6 +74,13 @@ static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
}
+static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_real_time_mode(mdev) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, npps_period) &&
+ MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns));
+}
+
static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
@@ -459,9 +468,95 @@ static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
return find_target_cycles(mdev, target_ns);
}
-static u64 perout_conf_real_time(s64 sec)
+static u64 perout_conf_real_time(s64 sec, u32 nsec)
+{
+ return (u64)nsec | (u64)sec << 32;
+}
+
+static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u64 *time_stamp, bool real_time)
+{
+ struct timespec64 ts;
+ s64 ns;
+
+ ts.tv_nsec = rq->perout.period.nsec;
+ ts.tv_sec = rq->perout.period.sec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) :
+ perout_conf_internal_timer(mdev, rq->perout.start.sec);
+
+ return 0;
+}
+
+#define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1)
+static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev,
+ struct ptp_clock_request *rq,
+ u32 *out_pulse_duration_ns)
{
- return (u64)sec << 32;
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ u32 out_pulse_duration;
+ struct timespec64 ts;
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ ts.tv_sec = rq->perout.on.sec;
+ ts.tv_nsec = rq->perout.on.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts);
+ } else {
+ /* out_pulse_duration_ns should be up to 50% of the
+ * pulse period as default
+ */
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1;
+ }
+
+ if (out_pulse_duration < pps_info->min_out_pulse_duration_ns ||
+ out_pulse_duration > MLX5_MAX_PULSE_DURATION) {
+ mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n",
+ out_pulse_duration, pps_info->min_out_pulse_duration_ns,
+ MLX5_MAX_PULSE_DURATION);
+ return -EINVAL;
+ }
+ *out_pulse_duration_ns = out_pulse_duration;
+
+ return 0;
+}
+
+static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq,
+ u32 *field_select, u32 *out_pulse_duration_ns,
+ u64 *period, u64 *time_stamp)
+{
+ struct mlx5_pps *pps_info = &mdev->clock.pps_info;
+ struct ptp_clock_time *time = &rq->perout.start;
+ struct timespec64 ts;
+
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ if (timespec64_to_ns(&ts) < pps_info->min_npps_period) {
+ mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n",
+ pps_info->min_npps_period);
+ return -EINVAL;
+ }
+ *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec);
+
+ if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns))
+ return -EINVAL;
+
+ *time_stamp = perout_conf_real_time(time->sec, time->nsec);
+ *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD |
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS;
+
+ return 0;
+}
+
+static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags)
+{
+ return ((!mlx5_npps_real_time_supported(mdev) && flags) ||
+ (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE));
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
@@ -474,20 +569,20 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
container_of(clock, struct mlx5_core_dev, clock);
bool rt_mode = mlx5_real_time_mode(mdev);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- struct timespec64 ts;
+ u32 out_pulse_duration_ns = 0;
u32 field_select = 0;
+ u64 npps_period = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
int err = 0;
- s64 ns;
if (!MLX5_PPS_CAP(mdev))
return -EOPNOTSUPP;
/* Reject requests with unsupported flags */
- if (rq->perout.flags)
+ if (mlx5_perout_verify_flags(mdev, rq->perout.flags))
return -EOPNOTSUPP;
if (rq->perout.index >= clock->ptp_info.n_pins)
@@ -500,29 +595,25 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
- s64 sec = rq->perout.start.sec;
-
- if (rq->perout.start.nsec)
- return -EINVAL;
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
- if ((ns >> 1) != 500000000LL)
+ if (rt_mode && rq->perout.start.sec > U32_MAX)
return -EINVAL;
- if (rt_mode && sec > U32_MAX)
- return -EINVAL;
-
- time_stamp = rt_mode ? perout_conf_real_time(sec) :
- perout_conf_internal_timer(mdev, sec);
-
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_TIME_STAMP;
+
+ if (mlx5_npps_real_time_supported(mdev))
+ err = perout_conf_npps_real_time(mdev, rq, &field_select,
+ &out_pulse_duration_ns, &npps_period,
+ &time_stamp);
+ else
+ err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode);
+ if (err)
+ return err;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -531,7 +622,8 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_SET(mtpps_reg, in, enable, on);
MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
MLX5_SET(mtpps_reg, in, field_select, field_select);
-
+ MLX5_SET64(mtpps_reg, in, npps_period, npps_period);
+ MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns);
err = mlx5_set_mtpps(mdev, in, sizeof(in));
if (err)
return err;
@@ -687,6 +779,13 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
cap_max_num_of_pps_out_pins);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period))
+ clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_npps_period);
+ if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns))
+ clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out,
+ cap_log_min_out_pulse_duration_ns);
+
clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2f536c5d30b1..032adb21ad4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -83,6 +83,7 @@ int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, voi
enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
};
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c085b031abfc..0b459d841c3a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -494,6 +494,24 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+
+ if (!err)
+ return val.vbool;
+
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
@@ -597,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported))
- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev);
if (max_uc_list > 0)
@@ -623,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
*/
static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
(!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
}
@@ -652,6 +671,33 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
return err;
}
+static int handle_hca_cap_port_selection(struct mlx5_core_dev *dev,
+ void *set_ctx)
+{
+ void *set_hca_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, port_selection_cap))
+ return 0;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass) ||
+ !MLX5_CAP_PORT_SELECTION_MAX(dev, port_select_flow_table_bypass))
+ return 0;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur,
+ MLX5_ST_SZ_BYTES(port_selection_cap));
+ MLX5_SET(port_selection_cap, set_hca_cap, port_select_flow_table_bypass, 1);
+
+ err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION);
+
+ return err;
+}
+
static int set_hca_cap(struct mlx5_core_dev *dev)
{
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
@@ -696,6 +742,13 @@ static int set_hca_cap(struct mlx5_core_dev *dev)
goto out;
}
+ memset(set_ctx, 0, set_sz);
+ err = handle_hca_cap_port_selection(dev, set_ctx);
+ if (err) {
+ mlx5_core_err(dev, "handle_hca_cap_port_selection failed\n");
+ goto out;
+ }
+
out:
kfree(set_ctx);
return err;
@@ -1039,7 +1092,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
{
int err;
@@ -1077,10 +1130,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto err_cmd_cleanup;
+ goto stop_health_poll;
}
err = mlx5_core_set_issi(dev);
@@ -1132,8 +1187,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout)
mlx5_core_err(dev, "query hca failed\n");
goto reclaim_boot_pages;
}
-
- mlx5_start_health_poll(dev);
+ mlx5_start_health_fw_log_up(dev);
return 0;
@@ -1141,6 +1195,8 @@ reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
+stop_health_poll:
+ mlx5_stop_health_poll(dev, boot);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1152,7 +1208,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
{
int err;
- mlx5_stop_health_poll(dev, boot);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
@@ -1160,6 +1215,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
+ mlx5_stop_health_poll(dev, boot);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
@@ -1309,7 +1365,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
- err = mlx5_function_setup(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ err = mlx5_function_setup(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err)
goto err_function;
@@ -1397,7 +1453,7 @@ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
else
timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
- err = mlx5_function_setup(dev, timeout);
+ err = mlx5_function_setup(dev, false, timeout);
if (err)
goto err_function;
@@ -1488,6 +1544,8 @@ static const int types[] = {
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
MLX5_CAP_DEV_SHAMPO,
+ MLX5_CAP_MACSEC,
+ MLX5_CAP_ADV_VIRTUALIZATION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index ad61b86d5769..a806e3de7b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -143,6 +143,36 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
+static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
+ size_t item_size, size_t num_items,
+ const char *func, int line)
+{
+ int inlen;
+
+ if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) {
+ mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ if (check_add_overflow((int)fixed, inlen, &inlen)) {
+ mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n",
+ __func__, func, line, fixed, item_size, num_items);
+ return -ENOMEM;
+ }
+
+ return inlen;
+}
+
+#define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \
+ mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__)
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e1bd54574ea5..a1548e6bfb35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -493,29 +493,6 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz)
-{
- u32 *in;
- int err;
-
- in = kvzalloc(sz, GFP_KERNEL);
- if (!in) {
- err = -ENOMEM;
- return err;
- }
-
- MLX5_SET(ppcnt_reg, in, local_port, port_num);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
- err = mlx5_core_access_reg(dev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
-
- kvfree(in);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
-
static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out,
u32 out_size)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 062c7c74a1f3..1777a1e508e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1294,20 +1294,6 @@ struct mlx5dr_cmd_gid_attr {
u32 roce_ver;
};
-struct mlx5dr_cmd_qp_create_attr {
- u32 page_id;
- u32 pdn;
- u32 cqn;
- u32 pm_state;
- u32 service_type;
- u32 buff_umem_id;
- u32 db_umem_id;
- u32 sq_wqe_cnt;
- u32 rq_wqe_cnt;
- u32 rq_wqe_shift;
- u8 isolate_vl_tc:1;
-};
-
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
index 1fb185d6ac7f..d168622063d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
@@ -14,10 +14,6 @@ struct mlx5_fs_dr_action {
struct mlx5dr_action *dr_action;
};
-struct mlx5_fs_dr_ns {
- struct mlx5_dr_ns *dr_ns;
-};
-
struct mlx5_fs_dr_rule {
struct mlx5dr_rule *dr_rule;
/* Only actions created by fs_dr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e5c4dcd1425e..4d629e5ddbc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -123,7 +123,7 @@ static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq)
wq->cur_sz++;
}
-static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n)
+static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u16 n)
{
wq->wqe_ctr += n;
wq->cur_sz += n;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5fdf9b7179f5..5a1027b07215 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -75,6 +75,7 @@ struct mlxbf_gige {
struct net_device *netdev;
struct platform_device *pdev;
void __iomem *mdio_io;
+ void __iomem *clk_io;
struct mii_bus *mdiobus;
spinlock_t lock; /* for packet processing indices */
u16 rx_q_entries;
@@ -137,7 +138,8 @@ enum mlxbf_gige_res {
MLXBF_GIGE_RES_MDIO9,
MLXBF_GIGE_RES_GPIO0,
MLXBF_GIGE_RES_LLU,
- MLXBF_GIGE_RES_PLU
+ MLXBF_GIGE_RES_PLU,
+ MLXBF_GIGE_RES_CLK
};
/* Version of register data returned by mlxbf_gige_get_regs() */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index b03e1c66bac0..2292d63a279c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -156,7 +156,7 @@ static int mlxbf_gige_open(struct net_device *netdev)
phy_start(phydev);
- netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 2e6c1b7af096..aa780b1614a3 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -22,10 +22,23 @@
#include <linux/property.h>
#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
+#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
+#define MLXBF_GIGE_MDC_CLK_NS 400
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
+#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
+#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
+#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
+#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
+
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
@@ -50,27 +63,76 @@
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
+#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+
+static struct resource corepll_params[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ },
+};
+
+/* Returns core clock i1clk in Hz */
+static u64 calculate_i1clk(struct mlxbf_gige *priv)
+{
+ u8 core_od, core_r;
+ u64 freq_output;
+ u32 reg1, reg2;
+ u32 core_f;
+
+ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
+ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
+
+ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
+ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
+ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
+
+ /* Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * freq_output = freq_reference * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ */
+ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
+ MLXBF_GIGE_MDIO_COREPLL_CONST);
+ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
+
+ return freq_output;
+}
+
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
- * mdc_clk = 2*(val + 1)*i1clk
+ * mdc_clk = 2*(val + 1)*(core clock in sec)
*
- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ * i1clk is in Hz:
+ * 400 ns = 2*(val + 1)*(1/i1clk)
*
- * val = (((400 * 430 / 1000) / 2) - 1)
+ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
+ * val = (400/2 * i1clk)/10^9 - 1
*/
-#define MLXBF_GIGE_I1CLK_MHZ 430
-#define MLXBF_GIGE_MDC_CLK_NS 400
+static u8 mdio_period_map(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u64 i1clk;
-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+ i1clk = calculate_i1clk(priv);
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
- MLXBF_GIGE_MDIO_PERIOD) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
+
+ return mdio_period;
+}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
int phy_reg, u32 opcode)
@@ -117,6 +179,9 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
/* Only return ad bits of the gw register */
ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
@@ -124,9 +189,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
+ u32 temp;
u32 cmd;
int ret;
- u32 temp;
if (phy_reg & MII_ADDR_C45)
return -EOPNOTSUPP;
@@ -141,21 +206,50 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
5, 1000000);
+ /* The MDIO lock is set on read. To release it, clear gw register */
+ writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+
return ret;
}
+static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u32 val;
+
+ mdio_period = mdio_period_map(priv);
+
+ val = MLXBF_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+}
+
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
- /* Configure mdio parameters */
- writel(MLXBF_GIGE_MDIO_CFG_VAL,
- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ /* clk resource shared with other drivers so cannot use
+ * devm_platform_ioremap_resource
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
+ if (!res) {
+ /* For backward compatibility with older ACPI tables, also keep
+ * CLK resource internal to the driver.
+ */
+ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ }
+
+ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (!priv->clk_io)
+ return -ENOMEM;
+
+ mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 5fb33c9294bf..7be3a793984d 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,6 +8,8 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#define MLXBF_GIGE_VERSION 0x0000
+#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 60232fb8ccd7..09bef04b11d1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -703,6 +703,9 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
/* cmd_mbox_config_profile_max_lag
* Maximum number of LAG IDs requested.
+ * Reserved when Spectrum-1/2/3, supported from Spectrum-4 and above.
+ * For Spectrum-4, firmware sets 128 for values between 1-128 and 256 for values
+ * between 129-256.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 75553eb2c7f2..e2a985ec2c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -70,6 +70,8 @@ struct mlxsw_core {
struct workqueue_struct *emad_wq;
struct list_head rx_listener_list;
struct list_head event_listener_list;
+ struct list_head irq_event_handler_list;
+ struct mutex irq_event_handler_lock; /* Locks access to handlers list */
struct {
atomic64_t tid;
struct list_head trans_list;
@@ -184,6 +186,23 @@ unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_max_ports);
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
+{
+ struct mlxsw_driver *driver = mlxsw_core->driver;
+
+ if (driver->profile->used_max_lag) {
+ *p_max_lag = driver->profile->max_lag;
+ return 0;
+ }
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG))
+ return -EIO;
+
+ *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_max_lag);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -633,7 +652,7 @@ static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
return;
string = mlxsw_emad_string_tlv_string_data(string_tlv);
- strlcpy(trans->emad_err_string, string,
+ strscpy(trans->emad_err_string, string,
MLXSW_EMAD_STRING_TLV_STRING_LEN);
}
@@ -1305,21 +1324,6 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
extack);
}
-static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type port_type)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
-
- if (!mlxsw_driver->port_type_set)
- return -EOPNOTSUPP;
-
- return mlxsw_driver->port_type_set(mlxsw_core,
- mlxsw_core_port->local_port,
- port_type);
-}
-
static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
@@ -1650,7 +1654,6 @@ static const struct devlink_ops mlxsw_devlink_ops = {
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
- .port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
@@ -2090,6 +2093,18 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
}
+static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
+{
+ INIT_LIST_HEAD(&mlxsw_core->irq_event_handler_list);
+ mutex_init(&mlxsw_core->irq_event_handler_lock);
+}
+
+static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core)
+{
+ mutex_destroy(&mlxsw_core->irq_event_handler_lock);
+ WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list));
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -2101,6 +2116,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
size_t alloc_size;
+ u16 max_lag;
int err;
mlxsw_driver = mlxsw_core_driver_get(device_kind);
@@ -2125,6 +2141,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
mlxsw_core->bus = mlxsw_bus;
mlxsw_core->bus_priv = bus_priv;
mlxsw_core->bus_info = mlxsw_bus_info;
+ mlxsw_core_irq_event_handler_init(mlxsw_core);
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->res);
@@ -2141,10 +2158,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_ports_init;
- if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
- MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
- alloc_size = sizeof(*mlxsw_core->lag.mapping) *
- MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
+ err = mlxsw_core_max_lag(mlxsw_core, &max_lag);
+ if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
+ alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
@@ -2233,6 +2249,7 @@ err_ports_init:
err_register_resources:
mlxsw_bus->fini(bus_priv);
err_bus_init:
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
devl_unlock(devlink);
devlink_free(devlink);
@@ -2302,6 +2319,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (!reload)
devl_resources_unregister(devlink);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
devl_unlock(devlink);
devlink_free(devlink);
@@ -2772,6 +2790,57 @@ int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
}
EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+struct mlxsw_core_irq_event_handler_item {
+ struct list_head list;
+ void (*cb)(struct mlxsw_core *mlxsw_core);
+};
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ item->cb = cb;
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_add_tail(&item->list, &mlxsw_core->irq_event_handler_list);
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register);
+
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb)
+{
+ struct mlxsw_core_irq_event_handler_item *item, *tmp;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry_safe(item, tmp,
+ &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb == cb) {
+ list_del(&item->list);
+ kfree(item);
+ }
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister);
+
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_core_irq_event_handler_item *item;
+
+ mutex_lock(&mlxsw_core->irq_event_handler_lock);
+ list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) {
+ if (item->cb)
+ item->cb(mlxsw_core);
+ }
+ mutex_unlock(&mlxsw_core->irq_event_handler_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
@@ -3115,18 +3184,6 @@ void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_type_ib_set(devlink_port, NULL);
-}
-EXPORT_SYMBOL(mlxsw_core_port_ib_set);
-
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv)
{
@@ -3139,18 +3196,6 @@ void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
}
EXPORT_SYMBOL(mlxsw_core_port_clear);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port)
-{
- struct mlxsw_core_port *mlxsw_core_port =
- &mlxsw_core->ports[local_port];
- struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
-
- return devlink_port->type;
-}
-EXPORT_SYMBOL(mlxsw_core_port_type_get);
-
-
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 02d9cc2ef0c8..ca0c3d2bee6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -35,6 +35,8 @@ struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
+int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core);
@@ -215,6 +217,14 @@ int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+typedef void mlxsw_irq_event_cb_t(struct mlxsw_core *mlxsw_core);
+
+int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
+ mlxsw_irq_event_cb_t cb);
+void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -256,12 +266,8 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv, struct net_device *dev);
-void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u16 local_port,
- void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u16 local_port,
void *port_driver_priv);
-enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
- u16 local_port);
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u16 local_port);
@@ -291,6 +297,7 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
+ used_max_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -306,6 +313,7 @@ struct mlxsw_config_profile {
used_kvd_sizes:1,
used_cqe_time_stamp_type:1;
u8 max_vepa_channels;
+ u16 max_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -341,8 +349,6 @@ struct mlxsw_driver {
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
void (*fini)(struct mlxsw_core *mlxsw_core);
- int (*port_type_set)(struct mlxsw_core *mlxsw_core, u16 local_port,
- enum devlink_port_type new_type);
int (*port_split)(struct mlxsw_core *mlxsw_core, u16 local_port,
unsigned int count, struct netlink_ext_ack *extack);
int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u16 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 636db9a87457..9dfe7148199f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -737,8 +737,9 @@ mlxsw_afa_cookie_create(struct mlxsw_afa *mlxsw_afa,
if (!cookie)
return ERR_PTR(-ENOMEM);
refcount_set(&cookie->ref_count, 1);
- memcpy(&cookie->fa_cookie, fa_cookie,
- sizeof(*fa_cookie) + fa_cookie->cookie_len);
+ cookie->fa_cookie = *fa_cookie;
+ memcpy(cookie->fa_cookie.cookie, fa_cookie->cookie,
+ fa_cookie->cookie_len);
err = rhashtable_insert_fast(&mlxsw_afa->cookie_ht, &cookie->ht_node,
mlxsw_afa_cookie_ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index ca59f0b946da..83d2dc91ba2c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -785,6 +785,21 @@ static int mlxsw_linecard_status_get_and_process(struct mlxsw_core *mlxsw_core,
return mlxsw_linecard_status_process(linecards, linecard, mddq_pl);
}
+static void mlxsw_linecards_irq_event_handler(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_linecards *linecards = mlxsw_core_linecards(mlxsw_core);
+ int i;
+
+ /* Handle change of line card active state. */
+ for (i = 0; i < linecards->count; i++) {
+ struct mlxsw_linecard *linecard = mlxsw_linecard_get(linecards,
+ i + 1);
+
+ mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
+ linecard);
+ }
+}
+
static const char * const mlxsw_linecard_status_event_type_name[] = {
[MLXSW_LINECARD_STATUS_EVENT_TYPE_PROVISION] = "provision",
[MLXSW_LINECARD_STATUS_EVENT_TYPE_UNPROVISION] = "unprovision",
@@ -1238,7 +1253,6 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
{
struct devlink_linecard *devlink_linecard;
struct mlxsw_linecard *linecard;
- int err;
linecard = mlxsw_linecard_get(linecards, slot_index);
linecard->slot_index = slot_index;
@@ -1248,17 +1262,45 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
slot_index, &mlxsw_linecard_ops,
linecard);
- if (IS_ERR(devlink_linecard)) {
- err = PTR_ERR(devlink_linecard);
- goto err_devlink_linecard_create;
- }
+ if (IS_ERR(devlink_linecard))
+ return PTR_ERR(devlink_linecard);
+
linecard->devlink_linecard = devlink_linecard;
INIT_DELAYED_WORK(&linecard->status_event_to_dw,
&mlxsw_linecard_status_event_to_work);
+ return 0;
+}
+
+static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
+ cancel_delayed_work_sync(&linecard->status_event_to_dw);
+ /* Make sure all scheduled events are processed */
+ mlxsw_core_flush_owq();
+ if (linecard->active)
+ mlxsw_linecard_active_clear(linecard);
+ mlxsw_linecard_bdev_del(linecard);
+ devlink_linecard_destroy(linecard->devlink_linecard);
+ mutex_destroy(&linecard->lock);
+}
+
+static int
+mlxsw_linecard_event_delivery_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
+{
+ struct mlxsw_linecard *linecard;
+ int err;
+
+ linecard = mlxsw_linecard_get(linecards, slot_index);
err = mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, true);
if (err)
- goto err_event_delivery_set;
+ return err;
err = mlxsw_linecard_status_get_and_process(mlxsw_core, linecards,
linecard);
@@ -1269,29 +1311,18 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
err_status_get_and_process:
mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
-err_event_delivery_set:
- devlink_linecard_destroy(linecard->devlink_linecard);
-err_devlink_linecard_create:
- mutex_destroy(&linecard->lock);
return err;
}
-static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
- struct mlxsw_linecards *linecards,
- u8 slot_index)
+static void
+mlxsw_linecard_event_delivery_fini(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_linecards *linecards,
+ u8 slot_index)
{
struct mlxsw_linecard *linecard;
linecard = mlxsw_linecard_get(linecards, slot_index);
mlxsw_linecard_event_delivery_set(mlxsw_core, linecard, false);
- cancel_delayed_work_sync(&linecard->status_event_to_dw);
- /* Make sure all scheduled events are processed */
- mlxsw_core_flush_owq();
- if (linecard->active)
- mlxsw_linecard_active_clear(linecard);
- mlxsw_linecard_bdev_del(linecard);
- devlink_linecard_destroy(linecard->devlink_linecard);
- mutex_destroy(&linecard->lock);
}
/* LINECARDS INI BUNDLE FILE
@@ -1505,6 +1536,11 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
if (err)
goto err_traps_register;
+ err = mlxsw_core_irq_event_handler_register(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+ if (err)
+ goto err_irq_event_handler_register;
+
mlxsw_core_linecards_set(mlxsw_core, linecards);
for (i = 0; i < linecards->count; i++) {
@@ -1513,11 +1549,25 @@ int mlxsw_linecards_init(struct mlxsw_core *mlxsw_core,
goto err_linecard_init;
}
+ for (i = 0; i < linecards->count; i++) {
+ err = mlxsw_linecard_event_delivery_init(mlxsw_core, linecards,
+ i + 1);
+ if (err)
+ goto err_linecard_event_delivery_init;
+ }
+
return 0;
+err_linecard_event_delivery_init:
+ for (i--; i >= 0; i--)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ i = linecards->count;
err_linecard_init:
for (i--; i >= 0; i--)
mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
+err_irq_event_handler_register:
mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
ARRAY_SIZE(mlxsw_linecard_listener),
mlxsw_core);
@@ -1536,7 +1586,11 @@ void mlxsw_linecards_fini(struct mlxsw_core *mlxsw_core)
if (!linecards)
return;
for (i = 0; i < linecards->count; i++)
+ mlxsw_linecard_event_delivery_fini(mlxsw_core, linecards, i + 1);
+ for (i = 0; i < linecards->count; i++)
mlxsw_linecard_fini(mlxsw_core, linecards, i + 1);
+ mlxsw_core_irq_event_handler_unregister(mlxsw_core,
+ mlxsw_linecards_irq_event_handler);
mlxsw_core_traps_unregister(mlxsw_core, mlxsw_linecard_listener,
ARRAY_SIZE(mlxsw_linecard_listener),
mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 3548fe1df7c8..987fe5c9d5a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -21,7 +21,6 @@
#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
-#define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0)
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MIN_STATE 2
#define MLXSW_THERMAL_MAX_DUTY 255
@@ -101,8 +100,6 @@ struct mlxsw_thermal {
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
- unsigned int tz_highest_score;
- struct thermal_zone_device *tz_highest_dev;
struct mlxsw_thermal_area line_cards[];
};
@@ -193,34 +190,6 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
return 0;
}
-static void mlxsw_thermal_tz_score_update(struct mlxsw_thermal *thermal,
- struct thermal_zone_device *tzdev,
- struct mlxsw_thermal_trip *trips,
- int temp)
-{
- struct mlxsw_thermal_trip *trip = trips;
- unsigned int score, delta, i, shift = 1;
-
- /* Calculate thermal zone score, if temperature is above the hot
- * threshold score is set to MLXSW_THERMAL_TEMP_SCORE_MAX.
- */
- score = MLXSW_THERMAL_TEMP_SCORE_MAX;
- for (i = MLXSW_THERMAL_TEMP_TRIP_NORM; i < MLXSW_THERMAL_NUM_TRIPS;
- i++, trip++) {
- if (temp < trip->temp) {
- delta = DIV_ROUND_CLOSEST(temp, trip->temp - temp);
- score = delta * shift;
- break;
- }
- shift *= 256;
- }
-
- if (score > thermal->tz_highest_score) {
- thermal->tz_highest_score = score;
- thermal->tz_highest_dev = tzdev;
- }
-}
-
static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
struct thermal_cooling_device *cdev)
{
@@ -286,9 +255,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return err;
}
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
- temp);
*p_temp = temp;
return 0;
@@ -349,21 +315,6 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
return 0;
}
-static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
-
static struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
@@ -377,7 +328,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.set_trip_temp = mlxsw_thermal_set_trip_temp,
.get_trip_hyst = mlxsw_thermal_get_trip_hyst,
.set_trip_hyst = mlxsw_thermal_set_trip_hyst,
- .get_trend = mlxsw_thermal_trend_get,
};
static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
@@ -463,7 +413,6 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
int temp, crit_temp, emerg_temp;
struct device *dev;
u16 sensor_index;
- int err;
dev = thermal->bus_info->dev;
sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
@@ -479,10 +428,8 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
/* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
- crit_temp, emerg_temp);
- if (!err && temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
+ mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
return 0;
}
@@ -546,22 +493,6 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
-static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trend *trend)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- if (tzdev == thermal->tz_highest_dev)
- return 1;
-
- *trend = THERMAL_TREND_STABLE;
- return 0;
-}
-
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@@ -571,7 +502,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@@ -592,8 +522,6 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
return err;
mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
- if (temp > 0)
- mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
*p_temp = temp;
return 0;
@@ -608,7 +536,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index ce843ea91464..f5f5f8dc3d19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -9,6 +9,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/platform_data/mlxreg.h>
#include <linux/slab.h>
#include "cmd.h"
@@ -51,6 +52,15 @@
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
+/* Driver can be initialized by kernel platform driver or from the user
+ * space. In the first case IRQ line number is passed through the platform
+ * data, otherwise default IRQ line is to be used. Default IRQ is relevant
+ * only for specific I2C slave address, allowing 3.4 MHz I2C path to the chip
+ * (special hardware feature for I2C acceleration).
+ */
+#define MLXSW_I2C_DEFAULT_IRQ 17
+#define MLXSW_FAST_I2C_SLAVE 0x37
+
/**
* struct mlxsw_i2c - device private data:
* @cmd: command attributes;
@@ -63,6 +73,9 @@
* @core: switch core pointer;
* @bus_info: bus info block;
* @block_size: maximum block size allowed to pass to under layer;
+ * @pdata: device platform data;
+ * @irq_work: interrupts work item;
+ * @irq: IRQ line number;
*/
struct mlxsw_i2c {
struct {
@@ -76,6 +89,9 @@ struct mlxsw_i2c {
struct mlxsw_core *core;
struct mlxsw_bus_info bus_info;
u16 block_size;
+ struct mlxreg_core_hotplug_platform_data *pdata;
+ struct work_struct irq_work;
+ int irq;
};
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \
@@ -546,6 +562,67 @@ static void mlxsw_i2c_fini(void *bus_priv)
mlxsw_i2c->core = NULL;
}
+static void mlxsw_i2c_work_handler(struct work_struct *work)
+{
+ struct mlxsw_i2c *mlxsw_i2c;
+
+ mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work);
+ mlxsw_core_irq_event_handlers_call(mlxsw_i2c->core);
+}
+
+static irqreturn_t mlxsw_i2c_irq_handler(int irq, void *dev)
+{
+ struct mlxsw_i2c *mlxsw_i2c = dev;
+
+ mlxsw_core_schedule_work(&mlxsw_i2c->irq_work);
+
+ /* Interrupt handler shares IRQ line with 'main' interrupt handler.
+ * Return here IRQ_NONE, while main handler will return IRQ_HANDLED.
+ */
+ return IRQ_NONE;
+}
+
+static int mlxsw_i2c_irq_init(struct mlxsw_i2c *mlxsw_i2c, u8 addr)
+{
+ int err;
+
+ /* Initialize interrupt handler if system hotplug driver is reachable,
+ * otherwise interrupt line is not enabled and interrupts will not be
+ * raised to CPU. Also request_irq() call will be not valid.
+ */
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG))
+ return 0;
+
+ /* Set default interrupt line. */
+ if (mlxsw_i2c->pdata && mlxsw_i2c->pdata->irq)
+ mlxsw_i2c->irq = mlxsw_i2c->pdata->irq;
+ else if (addr == MLXSW_FAST_I2C_SLAVE)
+ mlxsw_i2c->irq = MLXSW_I2C_DEFAULT_IRQ;
+
+ if (!mlxsw_i2c->irq)
+ return 0;
+
+ INIT_WORK(&mlxsw_i2c->irq_work, mlxsw_i2c_work_handler);
+ err = request_irq(mlxsw_i2c->irq, mlxsw_i2c_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED, "mlxsw-i2c",
+ mlxsw_i2c);
+ if (err) {
+ dev_err(mlxsw_i2c->bus_info.dev, "Failed to request irq: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_i2c_irq_fini(struct mlxsw_i2c *mlxsw_i2c)
+{
+ if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG) || !mlxsw_i2c->irq)
+ return;
+ cancel_work_sync(&mlxsw_i2c->irq_work);
+ free_irq(mlxsw_i2c->irq, mlxsw_i2c);
+}
+
static const struct mlxsw_bus mlxsw_i2c_bus = {
.kind = "i2c",
.init = mlxsw_i2c_init,
@@ -638,17 +715,24 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
mlxsw_i2c->bus_info.dev = &client->dev;
mlxsw_i2c->bus_info.low_frequency = true;
mlxsw_i2c->dev = &client->dev;
+ mlxsw_i2c->pdata = client->dev.platform_data;
+
+ err = mlxsw_i2c_irq_init(mlxsw_i2c, client->addr);
+ if (err)
+ goto errout;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info,
&mlxsw_i2c_bus, mlxsw_i2c, false,
NULL, NULL);
if (err) {
dev_err(&client->dev, "Fail to register core bus\n");
- return err;
+ goto err_bus_device_register;
}
return 0;
+err_bus_device_register:
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
errout:
mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
@@ -656,14 +740,13 @@ errout:
return err;
}
-static int mlxsw_i2c_remove(struct i2c_client *client)
+static void mlxsw_i2c_remove(struct i2c_client *client)
{
struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false);
+ mlxsw_i2c_irq_fini(mlxsw_i2c);
mutex_destroy(&mlxsw_i2c->cmd.lock);
-
- return 0;
}
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index bb1cd4bae82e..55b3c42bb007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -26,20 +26,29 @@ static const struct mlxsw_fw_rev mlxsw_m_fw_rev = {
struct mlxsw_m_port;
+struct mlxsw_m_line_card {
+ bool active;
+ int module_to_port[];
+};
+
struct mlxsw_m {
struct mlxsw_m_port **ports;
- int *module_to_port;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
u8 base_mac[ETH_ALEN];
u8 max_ports;
+ u8 max_modules_per_slot; /* Maximum number of modules per-slot. */
+ u8 num_of_slots; /* Including the main board. */
+ struct mlxsw_m_line_card **line_cards;
};
struct mlxsw_m_port {
struct net_device *dev;
struct mlxsw_m *mlxsw_m;
u16 local_port;
+ u8 slot_index;
u8 module;
+ u8 module_offset;
};
static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
@@ -94,14 +103,14 @@ static void mlxsw_m_module_get_drvinfo(struct net_device *dev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- strlcpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_m->bus_info->device_kind,
sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_m->bus_info->fw_rev.major,
mlxsw_m->bus_info->fw_rev.minor,
mlxsw_m->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_m->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
@@ -111,8 +120,9 @@ static int mlxsw_m_get_module_info(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_info(netdev, core, 0, mlxsw_m_port->module,
- modinfo);
+ return mlxsw_env_get_module_info(netdev, core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module, modinfo);
}
static int
@@ -122,7 +132,8 @@ mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom(netdev, core, 0,
+ return mlxsw_env_get_module_eeprom(netdev, core,
+ mlxsw_m_port->slot_index,
mlxsw_m_port->module, ee, data);
}
@@ -134,7 +145,8 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_eeprom_by_page(core, 0,
+ return mlxsw_env_get_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
mlxsw_m_port->module,
page, extack);
}
@@ -144,7 +156,8 @@ static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_reset_module(netdev, core, 0, mlxsw_m_port->module,
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
flags);
}
@@ -156,7 +169,8 @@ mlxsw_m_get_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_get_module_power_mode(core, 0, mlxsw_m_port->module,
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params, extack);
}
@@ -168,7 +182,8 @@ mlxsw_m_set_module_power_mode(struct net_device *netdev,
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- return mlxsw_env_set_module_power_mode(core, 0, mlxsw_m_port->module,
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
params->policy, extack);
}
@@ -184,7 +199,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
static int
mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
- u8 *p_module, u8 *p_width)
+ u8 *p_module, u8 *p_width, u8 *p_slot_index)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -195,6 +210,7 @@ mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u16 local_port,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
return 0;
}
@@ -212,18 +228,25 @@ mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
if (err)
return err;
mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
- eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1 +
+ mlxsw_m_port->module_offset);
return 0;
}
+static bool mlxsw_m_port_created(struct mlxsw_m *mlxsw_m, u16 local_port)
+{
+ return mlxsw_m->ports[local_port];
+}
+
static int
-mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
+mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 slot_index,
+ u8 module)
{
struct mlxsw_m_port *mlxsw_m_port;
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port, 0,
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port, slot_index,
module + 1, false, 0, false,
0, mlxsw_m->base_mac,
sizeof(mlxsw_m->base_mac));
@@ -246,6 +269,15 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u16 local_port, u8 module)
mlxsw_m_port->mlxsw_m = mlxsw_m;
mlxsw_m_port->local_port = local_port;
mlxsw_m_port->module = module;
+ mlxsw_m_port->slot_index = slot_index;
+ /* Add module offset for line card. Offset for main board iz zero.
+ * For line card in slot #n offset is calculated as (#n - 1)
+ * multiplied by maximum modules number, which could be found on a line
+ * card.
+ */
+ mlxsw_m_port->module_offset = mlxsw_m_port->slot_index ?
+ (mlxsw_m_port->slot_index - 1) *
+ mlxsw_m->max_modules_per_slot : 0;
dev->netdev_ops = &mlxsw_m_port_netdev_ops;
dev->ethtool_ops = &mlxsw_m_port_ethtool_ops;
@@ -291,19 +323,29 @@ static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u16 local_port)
mlxsw_core_port_fini(mlxsw_m->core, local_port);
}
+static int*
+mlxsw_m_port_mapping_get(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
+{
+ return &mlxsw_m->line_cards[slot_index]->module_to_port[module];
+}
+
static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
u8 *last_module)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 module, width;
+ u8 module, width, slot_index;
+ int *module_to_port;
int err;
/* Fill out to local port mapping array */
err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module,
- &width);
+ &width, &slot_index);
if (err)
return err;
+ /* Skip if line card has been already configured */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ return 0;
if (!width)
return 0;
/* Skip, if port belongs to the cluster */
@@ -313,91 +355,220 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u16 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
- mlxsw_env_module_port_map(mlxsw_m->core, 0, module);
- mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
+ mlxsw_env_module_port_map(mlxsw_m->core, slot_index, module);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, module);
+ *module_to_port = local_port;
return 0;
}
-static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
+static void
+mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index, u8 module)
{
- mlxsw_m->module_to_port[module] = -1;
- mlxsw_env_module_port_unmap(mlxsw_m->core, 0, module);
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index,
+ module);
+ *module_to_port = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, slot_index, module);
}
-static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+static int mlxsw_m_linecards_init(struct mlxsw_m *mlxsw_m)
{
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
- u8 last_module = max_ports;
- int i;
- int err;
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ u8 num_of_modules;
+ int i, j, err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl, 0);
+ err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &num_of_modules,
+ &mlxsw_m->num_of_slots);
+ /* If the system is modular, get the maximum number of modules per-slot.
+ * Otherwise, get the maximum number of modules on the main board.
+ */
+ if (mlxsw_m->num_of_slots)
+ mlxsw_m->max_modules_per_slot =
+ mlxsw_reg_mgpir_max_modules_per_slot_get(mgpir_pl);
+ else
+ mlxsw_m->max_modules_per_slot = num_of_modules;
+ /* Add slot for main board. */
+ mlxsw_m->num_of_slots += 1;
mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports),
GFP_KERNEL);
if (!mlxsw_m->ports)
return -ENOMEM;
- mlxsw_m->module_to_port = kmalloc_array(max_ports, sizeof(int),
- GFP_KERNEL);
- if (!mlxsw_m->module_to_port) {
+ mlxsw_m->line_cards = kcalloc(mlxsw_m->num_of_slots,
+ sizeof(*mlxsw_m->line_cards),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards) {
err = -ENOMEM;
- goto err_module_to_port_alloc;
+ goto err_kcalloc;
}
- /* Invalidate the entries of module to local port mapping array */
- for (i = 0; i < max_ports; i++)
- mlxsw_m->module_to_port[i] = -1;
+ for (i = 0; i < mlxsw_m->num_of_slots; i++) {
+ mlxsw_m->line_cards[i] =
+ kzalloc(struct_size(mlxsw_m->line_cards[i],
+ module_to_port,
+ mlxsw_m->max_modules_per_slot),
+ GFP_KERNEL);
+ if (!mlxsw_m->line_cards[i]) {
+ err = -ENOMEM;
+ goto err_kmalloc_array;
+ }
- /* Fill out module to local port mapping array */
- for (i = 1; i < max_ports; i++) {
- err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
- if (err)
- goto err_module_to_port_map;
+ /* Invalidate the entries of module to local port mapping array. */
+ for (j = 0; j < mlxsw_m->max_modules_per_slot; j++)
+ mlxsw_m->line_cards[i]->module_to_port[j] = -1;
}
- /* Create port objects for each valid entry */
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0) {
- err = mlxsw_m_port_create(mlxsw_m,
- mlxsw_m->module_to_port[i],
- i);
+ return 0;
+
+err_kmalloc_array:
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+err_kcalloc:
+ kfree(mlxsw_m->ports);
+ return err;
+}
+
+static void mlxsw_m_linecards_fini(struct mlxsw_m *mlxsw_m)
+{
+ int i = mlxsw_m->num_of_slots;
+
+ for (i--; i >= 0; i--)
+ kfree(mlxsw_m->line_cards[i]);
+ kfree(mlxsw_m->line_cards);
+ kfree(mlxsw_m->ports);
+}
+
+static void
+mlxsw_m_linecard_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int i;
+
+ for (i = mlxsw_m->max_modules_per_slot - 1; i >= 0; i--) {
+ int *module_to_port;
+
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0)
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
+ }
+}
+
+static int
+mlxsw_m_linecard_ports_create(struct mlxsw_m *mlxsw_m, u8 slot_index)
+{
+ int *module_to_port;
+ int i, err;
+
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0) {
+ err = mlxsw_m_port_create(mlxsw_m, *module_to_port,
+ slot_index, i);
if (err)
- goto err_module_to_port_create;
+ goto err_port_create;
+ /* Mark slot as active */
+ if (!mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = true;
}
}
-
return 0;
-err_module_to_port_create:
+err_port_create:
for (i--; i >= 0; i--) {
- if (mlxsw_m->module_to_port[i] > 0)
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
+ module_to_port = mlxsw_m_port_mapping_get(mlxsw_m, slot_index, i);
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ /* Mark slot as inactive */
+ if (mlxsw_m->line_cards[slot_index]->active)
+ mlxsw_m->line_cards[slot_index]->active = false;
+ }
}
- i = max_ports;
-err_module_to_port_map:
- for (i--; i > 0; i--)
- mlxsw_m_port_module_unmap(mlxsw_m, i);
- kfree(mlxsw_m->module_to_port);
-err_module_to_port_alloc:
- kfree(mlxsw_m->ports);
return err;
}
-static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+static void
+mlxsw_m_linecard_ports_remove(struct mlxsw_m *mlxsw_m, u8 slot_index)
{
int i;
- for (i = 0; i < mlxsw_m->max_ports; i++) {
- if (mlxsw_m->module_to_port[i] > 0) {
- mlxsw_m_port_remove(mlxsw_m,
- mlxsw_m->module_to_port[i]);
- mlxsw_m_port_module_unmap(mlxsw_m, i);
+ for (i = 0; i < mlxsw_m->max_modules_per_slot; i++) {
+ int *module_to_port = mlxsw_m_port_mapping_get(mlxsw_m,
+ slot_index, i);
+
+ if (*module_to_port > 0 &&
+ mlxsw_m_port_created(mlxsw_m, *module_to_port)) {
+ mlxsw_m_port_remove(mlxsw_m, *module_to_port);
+ mlxsw_m_port_module_unmap(mlxsw_m, slot_index, i);
}
}
+}
- kfree(mlxsw_m->module_to_port);
- kfree(mlxsw_m->ports);
+static int mlxsw_m_ports_module_map(struct mlxsw_m *mlxsw_m)
+{
+ unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core);
+ u8 last_module = max_ports;
+ int i, err;
+
+ for (i = 1; i < max_ports; i++) {
+ err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
+{
+ int err;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, 0);
+ if (err)
+ goto err_linecard_ports_create;
+
+ return 0;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, 0);
+
+ return err;
+}
+
+static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m)
+{
+ mlxsw_m_linecard_ports_remove(mlxsw_m, 0);
+}
+
+static void
+mlxsw_m_ports_remove_selected(struct mlxsw_core *mlxsw_core,
+ bool (*selector)(void *priv, u16 local_port),
+ void *priv)
+{
+ struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_linecard *linecard_priv = priv;
+ struct mlxsw_m_line_card *linecard;
+
+ linecard = mlxsw_m->line_cards[linecard_priv->slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, linecard_priv->slot_index);
+ linecard->active = false;
}
static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
@@ -418,6 +589,60 @@ static int mlxsw_m_fw_rev_validate(struct mlxsw_m *mlxsw_m)
return -EINVAL;
}
+static void
+mlxsw_m_got_active(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+ int err;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+ /* Skip if line card has been already configured during init */
+ if (linecard->active)
+ return;
+
+ /* Fill out module to local port mapping array */
+ err = mlxsw_m_ports_module_map(mlxsw_m);
+ if (err)
+ goto err_ports_module_map;
+
+ /* Create port objects for each valid entry */
+ err = mlxsw_m_linecard_ports_create(mlxsw_m, slot_index);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create port for line card at slot %d\n",
+ slot_index);
+ goto err_linecard_ports_create;
+ }
+
+ linecard->active = true;
+
+ return;
+
+err_linecard_ports_create:
+err_ports_module_map:
+ mlxsw_m_linecard_port_module_unmap(mlxsw_m, slot_index);
+}
+
+static void
+mlxsw_m_got_inactive(struct mlxsw_core *mlxsw_core, u8 slot_index, void *priv)
+{
+ struct mlxsw_m_line_card *linecard;
+ struct mlxsw_m *mlxsw_m = priv;
+
+ linecard = mlxsw_m->line_cards[slot_index];
+
+ if (WARN_ON(!linecard->active))
+ return;
+
+ mlxsw_m_linecard_ports_remove(mlxsw_m, slot_index);
+ linecard->active = false;
+}
+
+static struct mlxsw_linecards_event_ops mlxsw_m_event_ops = {
+ .got_active = mlxsw_m_got_active,
+ .got_inactive = mlxsw_m_got_inactive,
+};
+
static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack)
@@ -438,13 +663,33 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_m_linecards_init(mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to create line cards\n");
+ return err;
+ }
+
+ err = mlxsw_linecards_event_ops_register(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ if (err) {
+ dev_err(mlxsw_m->bus_info->dev, "Failed to register line cards operations\n");
+ goto linecards_event_ops_register;
+ }
+
err = mlxsw_m_ports_create(mlxsw_m);
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n");
- return err;
+ goto err_ports_create;
}
return 0;
+
+err_ports_create:
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+linecards_event_ops_register:
+ mlxsw_m_linecards_fini(mlxsw_m);
+ return err;
}
static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
@@ -452,6 +697,9 @@ static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_m_ports_remove(mlxsw_m);
+ mlxsw_linecards_event_ops_unregister(mlxsw_core,
+ &mlxsw_m_event_ops, mlxsw_m);
+ mlxsw_m_linecards_fini(mlxsw_m);
}
static const struct mlxsw_config_profile mlxsw_m_config_profile;
@@ -461,6 +709,7 @@ static struct mlxsw_driver mlxsw_m_driver = {
.priv_size = sizeof(struct mlxsw_m),
.init = mlxsw_m_init,
.fini = mlxsw_m_fini,
+ .ports_remove_selected = mlxsw_m_ports_remove_selected,
.profile = &mlxsw_m_config_profile,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 50527adc5b5a..c968309657dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1187,6 +1187,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
+ if (profile->used_max_lag) {
+ mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
+ profile->max_lag);
+ }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f27bdecdf952..0777bed5bb1a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2218,76 +2218,6 @@ static inline void mlxsw_reg_smpe_pack(char *payload, u16 local_port,
mlxsw_reg_smpe_evid_set(payload, evid);
}
-/* SFTR-V2 - Switch Flooding Table Version 2 Register
- * --------------------------------------------------
- * The switch flooding table is used for flooding packet replication. The table
- * defines a bit mask of ports for packet replication.
- */
-#define MLXSW_REG_SFTR2_ID 0x202F
-#define MLXSW_REG_SFTR2_LEN 0x120
-
-MLXSW_REG_DEFINE(sftr2, MLXSW_REG_SFTR2_ID, MLXSW_REG_SFTR2_LEN);
-
-/* reg_sftr2_swid
- * Switch partition ID with which to associate the port.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, swid, 0x00, 24, 8);
-
-/* reg_sftr2_flood_table
- * Flooding table index to associate with the specific type on the specific
- * switch partition.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, flood_table, 0x00, 16, 6);
-
-/* reg_sftr2_index
- * Index. Used as an index into the Flooding Table in case the table is
- * configured to use VID / FID or FID Offset.
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, index, 0x00, 0, 16);
-
-/* reg_sftr2_table_type
- * See mlxsw_flood_table_type
- * Access: RW
- */
-MLXSW_ITEM32(reg, sftr2, table_type, 0x04, 16, 3);
-
-/* reg_sftr2_range
- * Range of entries to update
- * Access: Index
- */
-MLXSW_ITEM32(reg, sftr2, range, 0x04, 0, 16);
-
-/* reg_sftr2_port
- * Local port membership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port, 0x20, 0x80, 1);
-
-/* reg_sftr2_port_mask
- * Local port mask (1 bit per port).
- * Access: WO
- */
-MLXSW_ITEM_BIT_ARRAY(reg, sftr2, port_mask, 0xA0, 0x80, 1);
-
-static inline void mlxsw_reg_sftr2_pack(char *payload,
- unsigned int flood_table,
- unsigned int index,
- enum mlxsw_flood_table_type table_type,
- unsigned int range, u16 port, bool set)
-{
- MLXSW_REG_ZERO(sftr2, payload);
- mlxsw_reg_sftr2_swid_set(payload, 0);
- mlxsw_reg_sftr2_flood_table_set(payload, flood_table);
- mlxsw_reg_sftr2_index_set(payload, index);
- mlxsw_reg_sftr2_table_type_set(payload, table_type);
- mlxsw_reg_sftr2_range_set(payload, range);
- mlxsw_reg_sftr2_port_set(payload, port, set);
- mlxsw_reg_sftr2_port_mask_set(payload, port, 1);
-}
-
/* SMID-V2 - Switch Multicast ID Version 2 Register
* ------------------------------------------------
* The MID record maps from a MID (Multicast ID), which is a unique identifier
@@ -4729,25 +4659,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
-/* reg_ptys_ib_link_width_cap
- * IB port supported widths.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16);
-
-#define MLXSW_REG_PTYS_IB_SPEED_SDR BIT(0)
-#define MLXSW_REG_PTYS_IB_SPEED_DDR BIT(1)
-#define MLXSW_REG_PTYS_IB_SPEED_QDR BIT(2)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR10 BIT(3)
-#define MLXSW_REG_PTYS_IB_SPEED_FDR BIT(4)
-#define MLXSW_REG_PTYS_IB_SPEED_EDR BIT(5)
-
-/* reg_ptys_ib_proto_cap
- * IB port supported speeds and protocols.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16);
-
/* reg_ptys_ext_eth_proto_admin
* Extended speed and protocol to set port to.
* Access: RW
@@ -4760,18 +4671,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
-/* reg_ptys_ib_link_width_admin
- * IB width to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16);
-
-/* reg_ptys_ib_proto_admin
- * IB speeds and protocols to set port to.
- * Access: RW
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16);
-
/* reg_ptys_ext_eth_proto_oper
* The extended current speed and protocol configured for the port.
* Access: RO
@@ -4784,18 +4683,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
-/* reg_ptys_ib_link_width_oper
- * The current IB width to set port to.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16);
-
-/* reg_ptys_ib_proto_oper
- * The current IB speed and protocol.
- * Access: RO
- */
-MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16);
-
enum mlxsw_reg_ptys_connector_type {
MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR,
MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE,
@@ -4866,33 +4753,6 @@ static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload,
mlxsw_reg_ptys_ext_eth_proto_oper_get(payload);
}
-static inline void mlxsw_reg_ptys_ib_pack(char *payload, u16 local_port,
- u16 proto_admin, u16 link_width)
-{
- MLXSW_REG_ZERO(ptys, payload);
- mlxsw_reg_ptys_local_port_set(payload, local_port);
- mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_IB);
- mlxsw_reg_ptys_ib_proto_admin_set(payload, proto_admin);
- mlxsw_reg_ptys_ib_link_width_admin_set(payload, link_width);
-}
-
-static inline void mlxsw_reg_ptys_ib_unpack(char *payload, u16 *p_ib_proto_cap,
- u16 *p_ib_link_width_cap,
- u16 *p_ib_proto_oper,
- u16 *p_ib_link_width_oper)
-{
- if (p_ib_proto_cap)
- *p_ib_proto_cap = mlxsw_reg_ptys_ib_proto_cap_get(payload);
- if (p_ib_link_width_cap)
- *p_ib_link_width_cap =
- mlxsw_reg_ptys_ib_link_width_cap_get(payload);
- if (p_ib_proto_oper)
- *p_ib_proto_oper = mlxsw_reg_ptys_ib_proto_oper_get(payload);
- if (p_ib_link_width_oper)
- *p_ib_link_width_oper =
- mlxsw_reg_ptys_ib_link_width_oper_get(payload);
-}
-
/* PPAD - Port Physical Address Register
* -------------------------------------
* The PPAD register configures the per port physical MAC address.
@@ -5666,27 +5526,6 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u16 local_port,
mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
}
-/* PLIB - Port Local to InfiniBand Port
- * ------------------------------------
- * The PLIB register performs mapping from Local Port into InfiniBand Port.
- */
-#define MLXSW_REG_PLIB_ID 0x500A
-#define MLXSW_REG_PLIB_LEN 0x10
-
-MLXSW_REG_DEFINE(plib, MLXSW_REG_PLIB_ID, MLXSW_REG_PLIB_LEN);
-
-/* reg_plib_local_port
- * Local port number.
- * Access: Index
- */
-MLXSW_ITEM32_LP(reg, plib, 0x00, 16, 0x00, 12);
-
-/* reg_plib_ib_port
- * InfiniBand port remapping for local_port.
- * Access: RW
- */
-MLXSW_ITEM32(reg, plib, ib_port, 0x00, 0, 8);
-
/* PPTB - Port Prio To Buffer Register
* -----------------------------------
* Configures the switch priority to buffer table.
@@ -12924,7 +12763,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(spvc),
MLXSW_REG(spevet),
MLXSW_REG(smpe),
- MLXSW_REG(sftr2),
MLXSW_REG(smid2),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
@@ -12962,7 +12800,6 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(paos),
MLXSW_REG(pfcc),
MLXSW_REG(ppcnt),
- MLXSW_REG(plib),
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 30c7b0e15721..5bcf5bceff71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2691,6 +2691,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ u16 max_lag;
u32 seed;
int err;
@@ -2709,12 +2710,14 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
- !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
return -EIO;
- mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
- sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
if (!mlxsw_sp->lags)
return -ENOMEM;
@@ -3509,6 +3512,33 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
};
+/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
+ * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
+ * table.
+ */
+#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
+
+static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
+ .used_max_lag = 1,
+ .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
+ .used_flood_mode = 1,
+ .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
+ .used_max_ib_mc = 1,
+ .max_ib_mc = 0,
+ .used_max_pkey = 1,
+ .max_pkey = 0,
+ .used_ubridge = 1,
+ .ubridge = 1,
+ .swid_config = {
+ {
+ .used_type = 1,
+ .type = MLXSW_PORT_SWID_TYPE_ETH,
+ }
+ },
+ .used_cqe_time_stamp_type = 1,
+ .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+};
+
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -4039,7 +4069,7 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp2_config_profile,
+ .profile = &mlxsw_sp4_config_profile,
.sdq_supports_cqe_v2 = true,
};
@@ -4263,10 +4293,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
- u64 max_lag;
- int i;
+ u16 max_lag;
+ int err, i;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
- max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
for (i = 0; i < max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 915dffb85a1c..dcd79d7e2af4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -14,16 +14,16 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+ strscpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+ strscpy(drvinfo->version, mlxsw_sp_driver_version,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
mlxsw_sp->bus_info->fw_rev.major,
mlxsw_sp->bus_info->fw_rev.minor,
mlxsw_sp->bus_info->fw_rev.subminor);
- strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+ strscpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2c4443c6b964..48f1fa62a4fd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1819,7 +1819,7 @@ void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
/* The configuration where several tunnels have the same local address in the
* same underlay table needs special treatment in the HW. That is currently not
* implemented in the driver. This function finds and demotes the first tunnel
- * with a given source address, except the one passed in in the argument
+ * with a given source address, except the one passed in the argument
* `except'.
*/
bool
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 39904dacf4f0..b3472fb94617 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 6f34a61739b6..fecd43754cea 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -403,7 +403,7 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
struct regulator *vdd_io;
- int gpio;
+ struct gpio_desc *gpio;
struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 691206f19ea7..cfbc900d4aeb 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -17,10 +17,9 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
@@ -703,9 +702,9 @@ static const struct net_device_ops ks8851_netdev_ops = {
static void ks8851_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *di)
{
- strlcpy(di->driver, "KS8851", sizeof(di->driver));
- strlcpy(di->version, "1.00", sizeof(di->version));
- strlcpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+ strscpy(di->driver, "KS8851", sizeof(di->driver));
+ strscpy(di->version, "1.00", sizeof(di->version));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
}
static u32 ks8851_get_msglevel(struct net_device *dev)
@@ -1117,24 +1116,23 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
{
struct ks8851_net *ks = netdev_priv(netdev);
unsigned cider;
- int gpio;
int ret;
ks->netdev = netdev;
ks->tx_space = 6144;
- gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
- if (gpio == -EPROBE_DEFER)
- return gpio;
-
- ks->gpio = gpio;
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(dev, gpio,
- GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
- if (ret) {
- dev_err(dev, "reset gpio request failed\n");
- return ret;
- }
+ ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(ks->gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "reset gpio request failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(ks->gpio, "ks8851_rst_n");
+ if (ret) {
+ dev_err(dev, "failed to set reset gpio name: %d\n", ret);
+ return ret;
}
ks->vdd_io = devm_regulator_get(dev, "vdd-io");
@@ -1161,9 +1159,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
goto err_reg;
}
- if (gpio_is_valid(gpio)) {
+ if (ks->gpio) {
usleep_range(10000, 11000);
- gpio_set_value(gpio, 1);
+ gpiod_set_value_cansleep(ks->gpio, 0);
}
spin_lock_init(&ks->statelock);
@@ -1239,8 +1237,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
err_id:
ks8851_unregister_mdiobus(ks);
err_mdio:
- if (gpio_is_valid(gpio))
- gpio_set_value(gpio, 0);
+ if (ks->gpio)
+ gpiod_set_value_cansleep(ks->gpio, 1);
regulator_disable(ks->vdd_reg);
err_reg:
regulator_disable(ks->vdd_io);
@@ -1259,8 +1257,8 @@ void ks8851_remove_common(struct device *dev)
dev_info(dev, "remove\n");
unregister_netdev(priv->netdev);
- if (gpio_is_valid(priv->gpio))
- gpio_set_value(priv->gpio, 0);
+ if (priv->gpio)
+ gpiod_set_value_cansleep(priv->gpio, 1);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
}
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 82d55fc27edc..70bc7253454f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -413,7 +413,8 @@ static int ks8851_probe_spi(struct spi_device *spi)
spi->bits_per_word = 8;
- ks = netdev_priv(netdev);
+ kss = netdev_priv(netdev);
+ ks = &kss->ks8851;
ks->lock = ks8851_lock_spi;
ks->unlock = ks8851_unlock_spi;
@@ -433,8 +434,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
IRQ_RXPSI) /* RX process stop */
ks->rc_ier = STD_IRQ;
- kss = to_ks8851_spi(ks);
-
kss->spidev = spi;
mutex_init(&kss->lock);
INIT_WORK(&kss->tx_work, ks8851_tx_work);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 2b3eb5ed8233..468520079c65 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5998,9 +5998,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(hw_priv->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 559ad94a44d0..176efbeae127 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1467,9 +1467,9 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
static void
enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info,
dev_name(dev->dev.parent), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index dc1840cb5b10..d7c8aa77ec75 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -925,9 +925,9 @@ static void encx24j600_get_regs(struct net_device *dev,
static void encx24j600_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index b1c74e6cb012..c739d60ee17d 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -579,8 +579,8 @@ static void lan743x_ethtool_get_drvinfo(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info,
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info,
pci_name(adapter->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index a9a1dea6d731..50eeecba1f18 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1585,6 +1585,9 @@ static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
rfctl |= RFE_CTL_AM_;
}
+ if (netdev->features & NETIF_F_RXCSUM)
+ rfctl |= RFE_CTL_IP_COE_ | RFE_CTL_TCP_UDP_COE_;
+
memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
if (netdev_mc_count(netdev)) {
struct netdev_hw_addr *ha;
@@ -2066,11 +2069,13 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
{
int required_number_of_descriptors = 0;
unsigned int start_frame_length = 0;
+ netdev_tx_t retval = NETDEV_TX_OK;
unsigned int frame_length = 0;
unsigned int head_length = 0;
unsigned long irq_flags = 0;
bool do_timestamp = false;
bool ignore_sync = false;
+ struct netdev_queue *txq;
int nr_frags = 0;
bool gso = false;
int j;
@@ -2083,9 +2088,12 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
if (required_number_of_descriptors > (tx->ring_size - 1)) {
dev_kfree_skb_irq(skb);
} else {
- /* save to overflow buffer */
- tx->overflow_skb = skb;
- netif_stop_queue(tx->adapter->netdev);
+ /* save how many descriptors we needed to restart the queue */
+ tx->rqd_descriptors = required_number_of_descriptors;
+ retval = NETDEV_TX_BUSY;
+ txq = netdev_get_tx_queue(tx->adapter->netdev,
+ tx->channel_number);
+ netif_tx_stop_queue(txq);
}
goto unlock;
}
@@ -2144,15 +2152,15 @@ finish:
unlock:
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- return NETDEV_TX_OK;
+ return retval;
}
static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
{
struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
struct lan743x_adapter *adapter = tx->adapter;
- bool start_transmitter = false;
unsigned long irq_flags = 0;
+ struct netdev_queue *txq;
u32 ioc_bit = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
@@ -2163,24 +2171,20 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
/* clean up tx ring */
lan743x_tx_release_completed_descriptors(tx);
- if (netif_queue_stopped(adapter->netdev)) {
- if (tx->overflow_skb) {
- if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
- lan743x_tx_get_avail_desc(tx))
- start_transmitter = true;
+ txq = netdev_get_tx_queue(adapter->netdev, tx->channel_number);
+ if (netif_tx_queue_stopped(txq)) {
+ if (tx->rqd_descriptors) {
+ if (tx->rqd_descriptors <=
+ lan743x_tx_get_avail_desc(tx)) {
+ tx->rqd_descriptors = 0;
+ netif_tx_wake_queue(txq);
+ }
} else {
- netif_wake_queue(adapter->netdev);
+ netif_tx_wake_queue(txq);
}
}
spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
- if (start_transmitter) {
- /* space is now available, transmit overflow skb */
- lan743x_tx_xmit_frame(tx, tx->overflow_skb);
- tx->overflow_skb = NULL;
- netif_wake_queue(adapter->netdev);
- }
-
if (!napi_complete(napi))
goto done;
@@ -2304,10 +2308,7 @@ static void lan743x_tx_close(struct lan743x_tx *tx)
lan743x_tx_release_all_descriptors(tx);
- if (tx->overflow_skb) {
- dev_kfree_skb(tx->overflow_skb);
- tx->overflow_skb = NULL;
- }
+ tx->rqd_descriptors = 0;
lan743x_tx_ring_cleanup(tx);
}
@@ -2387,7 +2388,7 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
(tx->channel_number));
netif_napi_add_tx_weight(adapter->netdev,
&tx->napi, lan743x_tx_napi_poll,
- tx->ring_size - 1);
+ NAPI_POLL_WEIGHT);
napi_enable(&tx->napi);
data = 0;
@@ -2549,6 +2550,7 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
struct lan743x_rx_buffer_info *buffer_info;
int frame_length, buffer_length;
+ bool is_ice, is_tce, is_icsm;
int extension_index = -1;
bool is_last, is_first;
struct sk_buff *skb;
@@ -2595,6 +2597,9 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
frame_length =
RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
buffer_length = buffer_info->buffer_length;
+ is_ice = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICE_;
+ is_tce = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_TCE_;
+ is_icsm = le32_to_cpu(descriptor->data1) & RX_DESC_DATA1_STATUS_ICSM_;
netdev_dbg(netdev, "%s%schunk: %d/%d",
is_first ? "first " : " ",
@@ -2663,6 +2668,10 @@ process_extension:
if (is_last && rx->skb_head) {
rx->skb_head->protocol = eth_type_trans(rx->skb_head,
rx->adapter->netdev);
+ if (rx->adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (!is_ice && !is_tce && !is_icsm)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
netdev_dbg(netdev, "sending %d byte frame to OS",
rx->skb_head->len);
napi_gro_receive(&rx->napi, rx->skb_head);
@@ -2866,9 +2875,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
if (ret)
goto return_error;
- netif_napi_add(adapter->netdev,
- &rx->napi, lan743x_rx_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &rx->napi, lan743x_rx_napi_poll);
lan743x_csr_write(adapter, DMAC_CMD,
DMAC_CMD_RX_SWR_(rx->channel_number));
@@ -3347,8 +3354,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
PCI11X1X_USED_TX_CHANNELS,
LAN743X_USED_RX_CHANNELS);
} else {
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ LAN743X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
}
if (!netdev)
@@ -3383,7 +3392,8 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
adapter->netdev->netdev_ops = &lan743x_netdev_ops;
adapter->netdev->ethtool_ops = &lan743x_ethtool_ops;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
adapter->netdev->hw_features = adapter->netdev->features;
/* carrier off reporting is important to ethtool even BEFORE open */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 72adae4f2aa0..67877d3b6dd9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -266,6 +266,8 @@
#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x)))
#define RFE_CTL (0x508)
+#define RFE_CTL_TCP_UDP_COE_ BIT(12)
+#define RFE_CTL_IP_COE_ BIT(11)
#define RFE_CTL_AB_ BIT(10)
#define RFE_CTL_AM_ BIT(9)
#define RFE_CTL_AU_ BIT(8)
@@ -954,8 +956,7 @@ struct lan743x_tx {
struct napi_struct napi;
u32 frame_count;
-
- struct sk_buff *overflow_skb;
+ u32 rqd_descriptors;
};
void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
@@ -1110,7 +1111,7 @@ struct lan743x_tx_buffer_info {
unsigned int buffer_length;
};
-#define LAN743X_TX_RING_SIZE (50)
+#define LAN743X_TX_RING_SIZE (128)
/* OWN bit is set. ie, Descs are owned by RX DMAC */
#define RX_DESC_DATA0_OWN_ (0x00008000)
@@ -1122,6 +1123,9 @@ struct lan743x_tx_buffer_info {
(((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16)
#define RX_DESC_DATA0_EXT_ (0x00004000)
#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF)
+#define RX_DESC_DATA1_STATUS_ICE_ (0x00020000)
+#define RX_DESC_DATA1_STATUS_TCE_ (0x00010000)
+#define RX_DESC_DATA1_STATUS_ICSM_ (0x00000001)
#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF)
#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 6a11e2ceb013..da3ea905adbb 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1049,6 +1049,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
enum ptp_pin_function func,
unsigned int chan)
{
+ struct lan743x_ptp *lan_ptp =
+ container_of(ptp, struct lan743x_ptp, ptp_clock_info);
+ struct lan743x_adapter *adapter =
+ container_of(lan_ptp, struct lan743x_adapter, ptp);
int result = 0;
/* Confirm the requested function is supported. Parameter
@@ -1057,7 +1061,10 @@ static int lan743x_ptpci_verify_pin_config(struct ptp_clock_info *ptp,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
+ break;
case PTP_PF_EXTTS:
+ if (!adapter->is_pci11x1x)
+ result = -1;
break;
case PTP_PF_PHYSYNC:
default:
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
index 4241ff0e5098..49e1464a4313 100644
--- a/drivers/net/ethernet/microchip/lan966x/Kconfig
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -4,6 +4,7 @@ config LAN966X_SWITCH
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
+ depends on BRIDGE || BRIDGE=n
select PHYLINK
select PACKING
help
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index fd2e0ebb2427..962f7c5f9e7d 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -8,4 +8,7 @@ obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o \
- lan966x_ptp.o lan966x_fdma.o
+ lan966x_ptp.o lan966x_fdma.o lan966x_lag.o \
+ lan966x_tc.o lan966x_mqprio.o lan966x_taprio.o \
+ lan966x_tbf.o lan966x_cbs.o lan966x_ets.o \
+ lan966x_tc_matchall.o lan966x_police.o lan966x_mirror.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
new file mode 100644
index 000000000000..70cbbf8d2b67
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 cir, cbs;
+ u8 se_idx;
+
+ /* Check for invalid values */
+ if (qopt->idleslope <= 0 ||
+ qopt->sendslope >= 0 ||
+ qopt->locredit >= qopt->hicredit)
+ return -EINVAL;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+ cir = qopt->idleslope;
+ cbs = (qopt->idleslope - qopt->sendslope) *
+ (qopt->hicredit - qopt->locredit) /
+ -qopt->sendslope;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 se_idx;
+
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + qopt->queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(1) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
new file mode 100644
index 000000000000..8310d3f35404
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ets.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define DWRR_COST_BIT_WIDTH BIT(5)
+
+static u32 lan966x_ets_hw_cost(u32 w_min, u32 weight)
+{
+ u32 res;
+
+ /* Round half up: Multiply with 16 before division,
+ * add 8 and divide result with 16 again
+ */
+ res = (((DWRR_COST_BIT_WIDTH << 4) * w_min / weight) + 8) >> 4;
+ return max_t(u32, 1, res) - 1;
+}
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params;
+ struct lan966x *lan966x = port->lan966x;
+ u32 w_min = 100;
+ u8 count = 0;
+ u32 se_idx;
+ u8 i;
+
+ /* Check the input */
+ if (qopt->parent != TC_H_ROOT)
+ return -EINVAL;
+
+ params = &qopt->replace_params;
+ if (params->bands != NUM_PRIO_QUEUES)
+ return -EINVAL;
+
+ for (i = 0; i < params->bands; ++i) {
+ /* In the switch the DWRR is always on the lowest consecutive
+ * priorities. Due to this, the first priority must map to the
+ * first DWRR band.
+ */
+ if (params->priomap[i] != (7 - i))
+ return -EINVAL;
+
+ if (params->quanta[i] && params->weights[i] == 0)
+ return -EINVAL;
+ }
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ /* Find minimum weight */
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < params->bands; ++i) {
+ if (params->quanta[i] == 0)
+ continue;
+
+ ++count;
+
+ lan_wr(lan966x_ets_hw_cost(w_min, params->weights[i]),
+ lan966x, QSYS_SE_DWRR_CFG(se_idx, 7 - i));
+ }
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(count) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 se_idx;
+ int i;
+
+ se_idx = SE_IDX_PORT + port->chip_port;
+
+ for (i = 0; i < NUM_PRIO_QUEUES; ++i)
+ lan_wr(0, lan966x, QSYS_SE_DWRR_CFG(se_idx, i));
+
+ lan_rmw(QSYS_SE_CFG_SE_DWRR_CNT_SET(0) |
+ QSYS_SE_CFG_SE_RR_ENA_SET(0),
+ QSYS_SE_CFG_SE_DWRR_CNT |
+ QSYS_SE_CFG_SE_RR_ENA,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
index da5ca7188679..2ea263e893ee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -8,6 +8,7 @@ struct lan966x_fdb_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
struct net_device *dev;
+ struct net_device *orig_dev;
struct lan966x *lan966x;
unsigned long event;
};
@@ -127,75 +128,119 @@ void lan966x_fdb_deinit(struct lan966x *lan966x)
lan966x_fdb_purge_entries(lan966x);
}
-static void lan966x_fdb_event_work(struct work_struct *work)
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x)
+{
+ flush_workqueue(lan966x->fdb_work);
+}
+
+static void lan966x_fdb_port_event_work(struct lan966x_fdb_event_work *fdb_work)
{
- struct lan966x_fdb_event_work *fdb_work =
- container_of(work, struct lan966x_fdb_event_work, work);
struct switchdev_notifier_fdb_info *fdb_info;
- struct net_device *dev = fdb_work->dev;
struct lan966x_port *port;
struct lan966x *lan966x;
- int ret;
- fdb_info = &fdb_work->fdb_info;
lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->orig_dev);
+ fdb_info = &fdb_work->fdb_info;
- if (lan966x_netdevice_check(dev)) {
- port = netdev_priv(dev);
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_bridge_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x *lan966x;
+ int ret;
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
- fdb_info->vid);
+ lan966x = fdb_work->lan966x;
+ fdb_info = &fdb_work->fdb_info;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- if (!fdb_info->added_by_user)
- break;
- lan966x_mac_del_entry(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
break;
- }
- } else {
- if (!netif_is_bridge_master(dev))
- goto out;
-
- /* In case the bridge is called */
- switch (fdb_work->event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE:
- /* If there is no front port in this vlan, there is no
- * point to copy the frame to CPU because it would be
- * just dropped at later point. So add it only if
- * there is a port but it is required to store the fdb
- * entry for later point when a port actually gets in
- * the vlan.
- */
- lan966x_fdb_add_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
- fdb_info->vid);
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+}
+
+static void lan966x_fdb_lag_event_work(struct lan966x_fdb_event_work *fdb_work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+
+ if (!lan966x_lag_first_port(fdb_work->orig_dev, fdb_work->dev))
+ return;
+
+ lan966x = fdb_work->lan966x;
+ port = netdev_priv(fdb_work->dev);
+ fdb_info = &fdb_work->fdb_info;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- case SWITCHDEV_FDB_DEL_TO_DEVICE:
- ret = lan966x_fdb_del_entry(lan966x, fdb_info);
- if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
- fdb_info->vid))
- break;
-
- if (ret)
- lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
- fdb_info->vid);
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
break;
- }
+ lan966x_mac_del_entry(lan966x, fdb_info->addr, fdb_info->vid);
+ break;
}
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+
+ if (lan966x_netdevice_check(fdb_work->orig_dev))
+ lan966x_fdb_port_event_work(fdb_work);
+ else if (netif_is_bridge_master(fdb_work->orig_dev))
+ lan966x_fdb_bridge_event_work(fdb_work);
+ else if (netif_is_lag_master(fdb_work->orig_dev))
+ lan966x_fdb_lag_event_work(fdb_work);
-out:
kfree(fdb_work->fdb_info.addr);
kfree(fdb_work);
- dev_put(dev);
}
int lan966x_handle_fdb(struct net_device *dev,
@@ -221,7 +266,8 @@ int lan966x_handle_fdb(struct net_device *dev,
if (!fdb_work)
return -ENOMEM;
- fdb_work->dev = orig_dev;
+ fdb_work->dev = dev;
+ fdb_work->orig_dev = orig_dev;
fdb_work->lan966x = lan966x;
fdb_work->event = event;
INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
@@ -231,7 +277,6 @@ int lan966x_handle_fdb(struct net_device *dev,
goto err_addr_alloc;
ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
- dev_hold(orig_dev);
queue_work(lan966x->fdb_work, &fdb_work->work);
break;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 6dea7f8c1481..7e4061c854f0 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -425,7 +425,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
lan966x_ifh_get_src_port(skb->data, &src_port);
lan966x_ifh_get_timestamp(skb->data, &timestamp);
- WARN_ON(src_port >= lan966x->num_phys_ports);
+ if (WARN_ON(src_port >= lan966x->num_phys_ports))
+ goto free_skb;
skb->dev = lan966x->ports[src_port]->dev;
skb_pull(skb, IFH_LEN * sizeof(u32));
@@ -449,6 +450,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
return skb;
+free_skb:
+ kfree_skb(skb);
unmap_page:
dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
FDMA_DCB_STATUS_BLOCKL(db->status),
@@ -784,8 +787,7 @@ void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
return;
lan966x->fdma_ndev = dev;
- netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
napi_enable(&lan966x->napi);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
new file mode 100644
index 000000000000..41fa2523d91d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
+{
+ u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
+ int p, lag, i;
+
+ /* Reset destination and aggregation PGIDS */
+ for (p = 0; p < lan966x->num_phys_ports; ++p)
+ lan_wr(ANA_PGID_PGID_SET(BIT(p)),
+ lan966x, ANA_PGID(p));
+
+ for (p = PGID_AGGR; p < PGID_SRC; ++p)
+ lan_wr(ANA_PGID_PGID_SET(visited),
+ lan966x, ANA_PGID(p));
+
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ visited &= ~BIT(p);
+ }
+
+ /* Now, set PGIDs for each active LAG */
+ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
+ struct net_device *bond = lan966x->ports[lag]->bond;
+ int num_active_ports = 0;
+ unsigned long bond_mask;
+ u8 aggr_idx[16];
+
+ if (!bond || (visited & BIT(lag)))
+ continue;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+
+ for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ lan_wr(ANA_PGID_PGID_SET(bond_mask),
+ lan966x, ANA_PGID(p));
+ if (port->lag_tx_active)
+ aggr_idx[num_active_ports++] = p;
+ }
+
+ for (i = PGID_AGGR; i < PGID_SRC; ++i) {
+ u32 ac;
+
+ ac = lan_rd(lan966x, ANA_PGID(i));
+ ac &= ~bond_mask;
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
+ lan_wr(ANA_PGID_PGID_SET(ac),
+ lan966x, ANA_PGID(i));
+ }
+
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (p = lag; p < lan966x->num_phys_ports; p++) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ visited |= BIT(p);
+ }
+ }
+}
+
+static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ u32 bond_mask;
+ u32 lag_id;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ lag_id = port->chip_port;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ }
+}
+
+static void lan966x_lag_update_ids(struct lan966x *lan966x)
+{
+ lan966x_lag_set_port_ids(lan966x);
+ lan966x_update_fwd_mask(lan966x);
+ lan966x_lag_set_aggr_pgids(lan966x);
+}
+
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ u32 lag_id = -1;
+ u32 bond_mask;
+ int err;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, bond);
+ if (bond_mask)
+ lag_id = __ffs(bond_mask);
+
+ port->bond = bond;
+ lan966x_lag_update_ids(lan966x);
+
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ goto out;
+
+ lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
+
+ if (lan966x_lag_first_port(port->bond, port->dev) &&
+ lag_id != -1)
+ lan966x_mac_lag_replace_port_entry(lan966x,
+ lan966x->ports[lag_id],
+ port);
+
+ return 0;
+
+out:
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+
+ return err;
+}
+
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 bond_mask;
+ u32 lag_id;
+
+ if (lan966x_lag_first_port(port->bond, port->dev)) {
+ bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
+ bond_mask &= ~BIT(port->chip_port);
+ if (bond_mask) {
+ lag_id = __ffs(bond_mask);
+ lan966x_mac_lag_replace_port_entry(lan966x, port,
+ lan966x->ports[lag_id]);
+ } else {
+ lan966x_mac_lag_remove_port_entry(lan966x, port);
+ }
+ }
+
+ port->bond = NULL;
+ lan966x_lag_update_ids(lan966x);
+ lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
+}
+
+static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
+ enum netdev_lag_hash hash_type)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ struct lan966x_port *port = lan966x->ports[p];
+
+ if (!port || !port->bond)
+ continue;
+
+ if (port->hash_type != hash_type)
+ return false;
+ }
+
+ return true;
+}
+
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct netdev_lag_upper_info *lui;
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ lui = info->upper_info;
+ if (!lui) {
+ port->hash_type = NETDEV_LAG_HASH_NONE;
+ return NOTIFY_DONE;
+ }
+
+ if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported Tx type");
+ return -EINVAL;
+ }
+
+ if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG devices can have only the same hash_type");
+ return -EINVAL;
+ }
+
+ switch (lui->hash_type) {
+ case NETDEV_LAG_HASH_L2:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L34:
+ lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ case NETDEV_LAG_HASH_L23:
+ lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
+ lan966x, ANA_AGGR_CFG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG device using unsupported hash type");
+ return -EINVAL;
+ }
+
+ port->hash_type = lui->hash_type;
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct netdev_lag_lower_state_info *lag = info->lower_state_info;
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ bool is_active;
+
+ if (!port->bond)
+ return NOTIFY_DONE;
+
+ is_active = lag->link_up && lag->tx_enabled;
+ if (port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ port->lag_tx_active = is_active;
+ lan966x_lag_set_aggr_pgids(lan966x);
+
+ return NOTIFY_OK;
+}
+
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_prechangeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port;
+ struct net_device *lower;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ if (!lan966x_netdevice_check(lower))
+ continue;
+
+ port = netdev_priv(lower);
+ if (port->bond != dev)
+ continue;
+
+ err = lan966x_port_changeupper(lower, dev, info);
+ if (err)
+ return err;
+ }
+
+ return NOTIFY_DONE;
+}
+
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ unsigned long bond_mask;
+
+ if (port->bond != lag)
+ return false;
+
+ bond_mask = lan966x_lag_get_mask(lan966x, lag);
+ if (bond_mask && port->chip_port == __ffs(bond_mask))
+ return true;
+
+ return false;
+}
+
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
+{
+ struct lan966x_port *port;
+ u32 mask = 0;
+ int p;
+
+ if (!bond)
+ return mask;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->bond == bond)
+ mask |= BIT(p);
+ }
+
+ return mask;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index 5893770bfd94..baa3a30c039f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -22,6 +22,7 @@ struct lan966x_mac_entry {
u16 vid;
u16 port_index;
int row;
+ bool lag;
};
struct lan966x_mac_raw_entry {
@@ -69,15 +70,14 @@ static void lan966x_mac_select(struct lan966x *lan966x,
lan_wr(mach, lan966x, ANA_MACHDATA);
}
-static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
- bool cpu_copy,
- const unsigned char mac[ETH_ALEN],
- unsigned int vid,
- enum macaccess_entry_type type)
+static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
{
- int ret;
+ lockdep_assert_held(&lan966x->mac_lock);
- spin_lock(&lan966x->mac_lock);
lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */
@@ -89,7 +89,19 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
lan966x, ANA_MACACCESS);
- ret = lan966x_mac_wait_for_completion(lan966x);
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ int ret;
+
+ spin_lock(&lan966x->mac_lock);
+ ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
spin_unlock(&lan966x->mac_lock);
return ret;
@@ -119,6 +131,16 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port,
return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
}
+static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
+}
+
static int lan966x_mac_forget_locked(struct lan966x *lan966x,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
@@ -178,8 +200,9 @@ void lan966x_mac_init(struct lan966x *lan966x)
INIT_LIST_HEAD(&lan966x->mac_entries);
}
-static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
- u16 vid, u16 port_index)
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
+ const unsigned char *mac,
+ u16 vid)
{
struct lan966x_mac_entry *mac_entry;
@@ -189,8 +212,9 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma
memcpy(mac_entry->mac, mac, ETH_ALEN);
mac_entry->vid = vid;
- mac_entry->port_index = port_index;
+ mac_entry->port_index = port->chip_port;
mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ mac_entry->lag = port->bond ? true : false;
return mac_entry;
}
@@ -269,7 +293,7 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
goto mac_learn;
}
- mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return -ENOMEM;
@@ -278,7 +302,8 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
list_add_tail(&mac_entry->list, &lan966x->mac_entries);
spin_unlock(&lan966x->mac_lock);
- lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
+ port->bond ?: port->dev);
mac_learn:
lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
@@ -309,6 +334,50 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
return 0;
}
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ lan966x_mac_learn_locked(lan966x, dst->chip_port,
+ mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+ mac_entry->port_index = dst->chip_port;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->port_index == src->chip_port &&
+ mac_entry->lag) {
+ lan966x_mac_forget_locked(lan966x, mac_entry->mac,
+ mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
void lan966x_mac_purge_entries(struct lan966x *lan966x)
{
struct lan966x_mac_entry *mac_entry, *tmp;
@@ -354,6 +423,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
struct lan966x_mac_entry *mac_entry, *tmp;
unsigned char mac[ETH_ALEN] __aligned(2);
struct list_head mac_deleted_entries;
+ struct lan966x_port *port;
u32 dest_idx;
u32 column;
u16 vid;
@@ -406,9 +476,10 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
/* Notify the bridge that the entry doesn't exist
* anymore in the HW
*/
+ port = lan966x->ports[mac_entry->port_index];
lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
mac_entry->mac, mac_entry->vid,
- lan966x->ports[mac_entry->port_index]->dev);
+ port->bond ?: port->dev);
list_del(&mac_entry->list);
kfree(mac_entry);
}
@@ -440,7 +511,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
continue;
}
- mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ port = lan966x->ports[dest_idx];
+ mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
if (!mac_entry) {
spin_unlock(&lan966x->mac_lock);
return;
@@ -451,7 +523,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
spin_unlock(&lan966x->mac_lock);
lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- mac, vid, lan966x->ports[dest_idx]->dev);
+ mac, vid, port->bond ?: port->dev);
}
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index d928b75f3780..be2fd030cccb 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -344,7 +344,8 @@ static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp)
IFH_POS_TIMESTAMP, IFH_LEN * 4, PACK, 0);
}
-static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lan966x_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
@@ -466,6 +467,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_set_mac_address = lan966x_port_set_mac_address,
.ndo_get_port_parent_id = lan966x_port_get_parent_id,
.ndo_eth_ioctl = lan966x_port_ioctl,
+ .ndo_setup_tc = lan966x_tc_setup,
};
bool lan966x_netdevice_check(const struct net_device *dev)
@@ -738,7 +740,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
return -EINVAL;
dev = devm_alloc_etherdev_mqs(lan966x->dev,
- sizeof(struct lan966x_port), 8, 1);
+ sizeof(struct lan966x_port),
+ NUM_PRIO_QUEUES, 1);
if (!dev)
return -ENOMEM;
@@ -754,7 +757,9 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
dev->netdev_ops = &lan966x_port_netdev_ops;
dev->ethtool_ops = &lan966x_ethtool_ops;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
dev->needed_headroom = IFH_LEN * sizeof(u32);
eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
@@ -770,6 +775,7 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
@@ -778,6 +784,8 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QUSGMII,
+ port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
port->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
@@ -956,6 +964,8 @@ static void lan966x_init(struct lan966x *lan966x)
lan966x, ANA_ANAINTR);
spin_lock_init(&lan966x->tx_lock);
+
+ lan966x_taprio_init(lan966x);
}
static int lan966x_ram_init(struct lan966x *lan966x)
@@ -969,7 +979,8 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
int val = 0;
int ret;
- switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ switch_reset = devm_reset_control_get_optional_shared(lan966x->dev,
+ "switch");
if (IS_ERR(switch_reset))
return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
"Could not obtain switch reset");
@@ -1164,6 +1175,7 @@ static int lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
+ lan966x_taprio_deinit(lan966x);
lan966x_fdma_deinit(lan966x);
lan966x_cleanup_ports(lan966x);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 2787055c1847..9656071b8289 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -9,6 +9,8 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/switchdev.h>
#include "lan966x_regs.h"
@@ -36,6 +38,7 @@
#define NUM_PHYS_PORTS 8
#define CPU_PORT 8
+#define NUM_PRIO_QUEUES 8
/* Reserved PGIDs */
#define PGID_CPU (PGID_AGGR - 6)
@@ -79,6 +82,9 @@
#define FDMA_INJ_CHANNEL 0
#define FDMA_DCB_MAX 512
+#define SE_IDX_QUEUE 0 /* 0-79 : Queue scheduler elements */
+#define SE_IDX_PORT 80 /* 80-89 : Port schedular elements */
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -258,6 +264,11 @@ struct lan966x {
struct lan966x_rx rx;
struct lan966x_tx tx;
struct napi_struct napi;
+
+ /* Mirror */
+ struct lan966x_port *mirror_monitor;
+ u32 mirror_mask[2];
+ u32 mirror_count;
};
struct lan966x_port_config {
@@ -270,6 +281,15 @@ struct lan966x_port_config {
bool autoneg;
};
+struct lan966x_port_tc {
+ bool ingress_shared_block;
+ unsigned long police_id;
+ unsigned long ingress_mirror_id;
+ unsigned long egress_mirror_id;
+ struct flow_stats police_stat;
+ struct flow_stats mirror_stat;
+};
+
struct lan966x_port {
struct net_device *dev;
struct lan966x *lan966x;
@@ -292,11 +312,19 @@ struct lan966x_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
+
+ struct net_device *bond;
+ bool lag_tx_active;
+ enum netdev_lag_hash hash_type;
+
+ struct lan966x_port_tc tc;
};
extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
extern const struct ethtool_ops lan966x_ethtool_ops;
+extern struct notifier_block lan966x_switchdev_nb __read_mostly;
+extern struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
bool lan966x_netdevice_check(const struct net_device *dev);
@@ -345,6 +373,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x,
struct lan966x_port *port,
const unsigned char *addr,
u16 vid);
+void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src,
+ struct lan966x_port *dst);
+void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
+ struct lan966x_port *src);
void lan966x_mac_purge_entries(struct lan966x *lan966x);
irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
@@ -369,6 +402,7 @@ void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
int lan966x_fdb_init(struct lan966x *lan966x);
void lan966x_fdb_deinit(struct lan966x *lan966x);
+void lan966x_fdb_flush_workqueue(struct lan966x *lan966x);
int lan966x_handle_fdb(struct net_device *dev,
struct net_device *orig_dev,
unsigned long event, const void *ctx,
@@ -397,6 +431,8 @@ void lan966x_ptp_txtstamp_release(struct lan966x_port *port,
struct sk_buff *skb);
irqreturn_t lan966x_ptp_irq_handler(int irq, void *args);
irqreturn_t lan966x_ptp_ext_irq_handler(int irq, void *args);
+u32 lan966x_ptp_get_period_ps(void);
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
@@ -406,6 +442,89 @@ int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
+int lan966x_lag_port_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
+ struct net_device *bond,
+ struct netlink_ext_ack *extack);
+void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond);
+int lan966x_lag_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_port_changelowerstate(struct net_device *dev,
+ struct netdev_notifier_changelowerstate_info *info);
+int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_lag_netdev_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info);
+bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev);
+u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond);
+
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info);
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state);
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t);
+void lan966x_update_fwd_mask(struct lan966x *lan966x);
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc);
+int lan966x_mqprio_del(struct lan966x_port *port);
+
+void lan966x_taprio_init(struct lan966x *lan966x);
+void lan966x_taprio_deinit(struct lan966x *lan966x);
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt);
+int lan966x_taprio_del(struct lan966x_port *port);
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed);
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt);
+
+int lan966x_cbs_add(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+int lan966x_cbs_del(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt);
+
+int lan966x_ets_add(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+int lan966x_ets_del(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt);
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress);
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack);
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats);
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack);
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress);
+
static inline void __iomem *lan_addr(void __iomem *base[],
int id, int tinst, int tcnt,
int gbase, int ginst,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
new file mode 100644
index 000000000000..7e1ba3f40c35
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mirror_port_add(struct lan966x_port *port,
+ struct flow_action_entry *action,
+ unsigned long mirror_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_port *monitor_port;
+
+ if (!lan966x_netdevice_check(action->dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an lan966x port");
+ return -EOPNOTSUPP;
+ }
+
+ monitor_port = netdev_priv(action->dev);
+
+ if (lan966x->mirror_mask[ingress] & BIT(port->chip_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Mirror already exists");
+ return -EEXIST;
+ }
+
+ if (lan966x->mirror_monitor &&
+ lan966x->mirror_monitor != monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change mirror port while in use");
+ return -EBUSY;
+ }
+
+ if (port == monitor_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot mirror the monitor port");
+ return -EINVAL;
+ }
+
+ lan966x->mirror_mask[ingress] |= BIT(port->chip_port);
+
+ lan966x->mirror_monitor = monitor_port;
+ lan_wr(BIT(monitor_port->chip_port), lan966x, ANA_MIRRORPORTS);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(1),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count++;
+
+ if (ingress)
+ port->tc.ingress_mirror_id = mirror_id;
+ else
+ port->tc.egress_mirror_id = mirror_id;
+
+ return 0;
+}
+
+int lan966x_mirror_port_del(struct lan966x_port *port,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->mirror_mask[ingress] & BIT(port->chip_port))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "There is no mirroring for this port");
+ return -ENOENT;
+ }
+
+ lan966x->mirror_mask[ingress] &= ~BIT(port->chip_port);
+
+ if (ingress) {
+ lan_rmw(ANA_PORT_CFG_SRC_MIRROR_ENA_SET(0),
+ ANA_PORT_CFG_SRC_MIRROR_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+ } else {
+ lan_wr(lan966x->mirror_mask[0], lan966x,
+ ANA_EMIRRORPORTS);
+ }
+
+ lan966x->mirror_count--;
+
+ if (lan966x->mirror_count == 0) {
+ lan966x->mirror_monitor = NULL;
+ lan_wr(0, lan966x, ANA_MIRRORPORTS);
+ }
+
+ if (ingress)
+ port->tc.ingress_mirror_id = 0;
+ else
+ port->tc.egress_mirror_id = 0;
+
+ return 0;
+}
+
+void lan966x_mirror_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats,
+ bool ingress)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.mirror_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ if (ingress) {
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(stats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
new file mode 100644
index 000000000000..7fa76e74f9e2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_mqprio_add(struct lan966x_port *port, u8 num_tc)
+{
+ u8 i;
+
+ if (num_tc != NUM_PRIO_QUEUES) {
+ netdev_err(port->dev, "Only %d traffic classes supported\n",
+ NUM_PRIO_QUEUES);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(port->dev, num_tc);
+
+ for (i = 0; i < num_tc; ++i)
+ netdev_set_tc_queue(port->dev, i, 1, i);
+
+ return 0;
+}
+
+int lan966x_mqprio_del(struct lan966x_port *port)
+{
+ netdev_reset_tc(port->dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
index 38a7e95d69b4..e4ac59480514 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -28,11 +28,12 @@ static int lan966x_phylink_mac_prepare(struct phylink_config *config,
phy_interface_t iface)
{
struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ phy_interface_t serdes_mode = iface;
int err;
if (port->serdes) {
err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
- iface);
+ serdes_mode);
if (err) {
netdev_err(to_net_dev(config->dev),
"Could not set mode of SerDes\n");
@@ -59,6 +60,9 @@ static void lan966x_phylink_mac_link_up(struct phylink_config *config,
port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+ if (phy_interface_mode_is_rgmii(interface))
+ phy_set_speed(port->serdes, speed);
+
lan966x_port_config_up(port);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
new file mode 100644
index 000000000000..a9aec900d608
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+/* 0-8 : 9 port policers */
+#define POL_IDX_PORT 0
+
+/* Policer order: Serial (QoS -> Port -> VCAP) */
+#define POL_ORDER 0x1d3
+
+struct lan966x_tc_policer {
+ /* kilobit per second */
+ u32 rate;
+ /* bytes */
+ u32 burst;
+};
+
+static int lan966x_police_add(struct lan966x_port *port,
+ struct lan966x_tc_policer *pol,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Rate unit is 33 1/3 kpps */
+ pol->rate = DIV_ROUND_UP(pol->rate * 3, 100);
+ /* Avoid zero burst size */
+ pol->burst = pol->burst ?: 1;
+ /* Unit is 4kB */
+ pol->burst = DIV_ROUND_UP(pol->burst, 4096);
+
+ if (pol->rate > GENMASK(15, 0) ||
+ pol->burst > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(1) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(pol->rate) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(pol->burst),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_del(struct lan966x_port *port,
+ u16 pol_idx)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(0) |
+ ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(0) |
+ ANA_POL_MODE_IPG_SIZE_SET(20) |
+ ANA_POL_MODE_FRM_MODE_SET(2) |
+ ANA_POL_MODE_OVERSHOOT_ENA_SET(1),
+ lan966x, ANA_POL_MODE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_STATE_PIR_LVL_SET(0),
+ lan966x, ANA_POL_PIR_STATE(pol_idx));
+
+ lan_wr(ANA_POL_PIR_CFG_PIR_RATE_SET(GENMASK(14, 0)) |
+ ANA_POL_PIR_CFG_PIR_BURST_SET(0),
+ lan966x, ANA_POL_PIR_CFG(pol_idx));
+
+ return 0;
+}
+
+static int lan966x_police_validate(struct lan966x_port *port,
+ const struct flow_action *action,
+ const struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.ingress_shared_block) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Policer is not supported on shared ingress blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (port->tc.police_id && port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one policer per port is supported");
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int lan966x_police_port_add(struct lan966x_port *port,
+ struct flow_action *action,
+ struct flow_action_entry *act,
+ unsigned long police_id,
+ bool ingress,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ struct rtnl_link_stats64 new_stats;
+ struct lan966x_tc_policer pol;
+ struct flow_stats *old_stats;
+ int err;
+
+ err = lan966x_police_validate(port, action, act, police_id, ingress,
+ extack);
+ if (err)
+ return err;
+
+ memset(&pol, 0, sizeof(pol));
+
+ pol.rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol.burst = act->police.burst;
+
+ err = lan966x_police_add(port, &pol, POL_IDX_PORT + port->chip_port);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(1) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = police_id;
+
+ /* Setup initial stats */
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+
+ return 0;
+}
+
+int lan966x_police_port_del(struct lan966x_port *port,
+ unsigned long police_id,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ if (port->tc.police_id != police_id) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid policer id");
+ return -EINVAL;
+ }
+
+ err = lan966x_police_del(port, port->tc.police_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to add policer to port");
+ return err;
+ }
+
+ lan_rmw(ANA_POL_CFG_PORT_POL_ENA_SET(0) |
+ ANA_POL_CFG_POL_ORDER_SET(POL_ORDER),
+ ANA_POL_CFG_PORT_POL_ENA |
+ ANA_POL_CFG_POL_ORDER,
+ lan966x, ANA_POL_CFG(port->chip_port));
+
+ port->tc.police_id = 0;
+
+ return 0;
+}
+
+void lan966x_police_port_stats(struct lan966x_port *port,
+ struct flow_stats *stats)
+{
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &port->tc.police_stat;
+ lan966x_stats_get(port->dev, &new_stats);
+
+ flow_stats_update(stats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index f141644e4372..1a61c6cdb077 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -165,10 +165,12 @@ static void lan966x_port_link_up(struct lan966x_port *port)
break;
}
+ lan966x_taprio_speed_set(port, config->speed);
+
/* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
* port speed for QSGMII ports.
*/
- if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ if (phy_interface_num_ports(config->portmode) == 4)
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
lan_wr(config->duplex | mode,
@@ -331,10 +333,14 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
bool inband_aneg = false;
bool outband;
+ bool full_preamble = false;
+
+ if (config->portmode == PHY_INTERFACE_MODE_QUSGMII)
+ full_preamble = true;
if (config->inband) {
if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
- config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ phy_interface_num_ports(config->portmode) == 4)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
config->autoneg)
@@ -345,9 +351,15 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
outband = true;
}
- /* Disable or enable inband */
- lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
- DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ /* Disable or enable inband.
+ * For QUSGMII, we rely on the preamble to transmit data such as
+ * timestamps, therefore force full preamble transmission, and prevent
+ * premable shortening
+ */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband) |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(full_preamble),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA |
+ DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA,
lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
/* Enable PCS */
@@ -396,7 +408,7 @@ void lan966x_port_init(struct lan966x_port *port)
if (lan966x->fdma)
lan966x_fdma_netdev_init(lan966x, port->dev);
- if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ if (phy_interface_num_ports(config->portmode) != 4)
return;
lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index 3a621c5165bc..e5a2bbe064f8 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -464,8 +464,7 @@ static int lan966x_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
-static int lan966x_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct lan966x_phc *phc = container_of(ptp, struct lan966x_phc, info);
struct lan966x *lan966x = phc->lan966x;
@@ -890,3 +889,9 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = full_ts_in_ns;
}
+
+u32 lan966x_ptp_get_period_ps(void)
+{
+ /* This represents the system clock period in picoseconds */
+ return 15125;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
index 8265ad89f0bc..1d90b93dd417 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -90,6 +90,24 @@ enum lan966x_target {
#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+/* ANA:ANA:MIRRORPORTS */
+#define ANA_MIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 60, 0, 1, 4)
+
+#define ANA_MIRRORPORTS_MIRRORPORTS GENMASK(8, 0)
+#define ANA_MIRRORPORTS_MIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_MIRRORPORTS_MIRRORPORTS, x)
+#define ANA_MIRRORPORTS_MIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_MIRRORPORTS_MIRRORPORTS, x)
+
+/* ANA:ANA:EMIRRORPORTS */
+#define ANA_EMIRRORPORTS __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 64, 0, 1, 4)
+
+#define ANA_EMIRRORPORTS_EMIRRORPORTS GENMASK(8, 0)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_SET(x)\
+ FIELD_PREP(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+#define ANA_EMIRRORPORTS_EMIRRORPORTS_GET(x)\
+ FIELD_GET(ANA_EMIRRORPORTS_EMIRRORPORTS, x)
+
/* ANA:ANA:FLOODING */
#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
@@ -330,6 +348,12 @@ enum lan966x_target {
/* ANA:PORT:PORT_CFG */
#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x)
+
#define ANA_PORT_CFG_LEARNAUTO BIT(6)
#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
@@ -354,6 +378,21 @@ enum lan966x_target {
#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+/* ANA:PORT:POL_CFG */
+#define ANA_POL_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 116, 0, 1, 4)
+
+#define ANA_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_POL_CFG_PORT_POL_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_PORT_POL_ENA, x)
+#define ANA_POL_CFG_PORT_POL_ENA_GET(x)\
+ FIELD_GET(ANA_POL_CFG_PORT_POL_ENA, x)
+
+#define ANA_POL_CFG_POL_ORDER GENMASK(8, 0)
+#define ANA_POL_CFG_POL_ORDER_SET(x)\
+ FIELD_PREP(ANA_POL_CFG_POL_ORDER, x)
+#define ANA_POL_CFG_POL_ORDER_GET(x)\
+ FIELD_GET(ANA_POL_CFG_POL_ORDER, x)
+
/* ANA:PFC:PFC_CFG */
#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
@@ -363,6 +402,108 @@ enum lan966x_target {
#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+/* ANA:COMMON:AGGR_CFG */
+#define ANA_AGGR_CFG __REG(TARGET_ANA, 0, 1, 31232, 0, 1, 552, 0, 0, 1, 4)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x)
+#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x)
+
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x)
+
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\
+ FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\
+ FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x)
+
+/* ANA:POL:POL_PIR_CFG */
+#define ANA_POL_PIR_CFG(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 0, 0, 1, 4)
+
+#define ANA_POL_PIR_CFG_PIR_RATE GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_RATE, x)
+#define ANA_POL_PIR_CFG_PIR_RATE_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_RATE, x)
+
+#define ANA_POL_PIR_CFG_PIR_BURST GENMASK(5, 0)
+#define ANA_POL_PIR_CFG_PIR_BURST_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_CFG_PIR_BURST, x)
+#define ANA_POL_PIR_CFG_PIR_BURST_GET(x)\
+ FIELD_GET(ANA_POL_PIR_CFG_PIR_BURST, x)
+
+/* ANA:POL:POL_MODE_CFG */
+#define ANA_POL_MODE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 8, 0, 1, 4)
+
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+#define ANA_POL_MODE_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA BIT(10)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_POL_MODE_IPG_SIZE GENMASK(9, 5)
+#define ANA_POL_MODE_IPG_SIZE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_IPG_SIZE, x)
+#define ANA_POL_MODE_IPG_SIZE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_IPG_SIZE, x)
+
+#define ANA_POL_MODE_FRM_MODE GENMASK(4, 3)
+#define ANA_POL_MODE_FRM_MODE_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_FRM_MODE, x)
+#define ANA_POL_MODE_FRM_MODE_GET(x)\
+ FIELD_GET(ANA_POL_MODE_FRM_MODE, x)
+
+#define ANA_POL_MODE_OVERSHOOT_ENA BIT(0)
+#define ANA_POL_MODE_OVERSHOOT_ENA_SET(x)\
+ FIELD_PREP(ANA_POL_MODE_OVERSHOOT_ENA, x)
+#define ANA_POL_MODE_OVERSHOOT_ENA_GET(x)\
+ FIELD_GET(ANA_POL_MODE_OVERSHOOT_ENA, x)
+
+/* ANA:POL:POL_PIR_STATE */
+#define ANA_POL_PIR_STATE(g) __REG(TARGET_ANA, 0, 1, 16384, g, 345, 32, 12, 0, 1, 4)
+
+#define ANA_POL_PIR_STATE_PIR_LVL GENMASK(21, 0)
+#define ANA_POL_PIR_STATE_PIR_LVL_SET(x)\
+ FIELD_PREP(ANA_POL_PIR_STATE_PIR_LVL, x)
+#define ANA_POL_PIR_STATE_PIR_LVL_GET(x)\
+ FIELD_GET(ANA_POL_PIR_STATE_PIR_LVL, x)
+
/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
@@ -504,6 +645,12 @@ enum lan966x_target {
#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA BIT(1)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SAVE_PREAMBLE_ENA, x)
+
/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
@@ -967,6 +1114,215 @@ enum lan966x_target {
/* QSYS:RES_CTRL:RES_CFG */
#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+/* QSYS:HSCH:CIR_CFG */
+#define QSYS_CIR_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 0, 0, 1, 4)
+
+#define QSYS_CIR_CFG_CIR_RATE GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_RATE, x)
+#define QSYS_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_RATE, x)
+
+#define QSYS_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define QSYS_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(QSYS_CIR_CFG_CIR_BURST, x)
+#define QSYS_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(QSYS_CIR_CFG_CIR_BURST, x)
+
+/* QSYS:HSCH:SE_CFG */
+#define QSYS_SE_CFG(g) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 8, 0, 1, 4)
+
+#define QSYS_SE_CFG_SE_DWRR_CNT GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_DWRR_CNT, x)
+#define QSYS_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_DWRR_CNT, x)
+
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_RR_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_RR_ENA, x)
+#define QSYS_SE_CFG_SE_RR_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_RR_ENA, x)
+
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_AVB_ENA, x)
+#define QSYS_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_AVB_ENA, x)
+
+#define QSYS_SE_CFG_SE_FRM_MODE GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(QSYS_SE_CFG_SE_FRM_MODE, x)
+#define QSYS_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(QSYS_SE_CFG_SE_FRM_MODE, x)
+
+#define QSYS_SE_DWRR_CFG(g, r) __REG(TARGET_QSYS, 0, 1, 16384, g, 90, 128, 12, r, 12, 4)
+
+#define QSYS_SE_DWRR_CFG_DWRR_COST GENMASK(4, 0)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_SET(x)\
+ FIELD_PREP(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+#define QSYS_SE_DWRR_CFG_DWRR_COST_GET(x)\
+ FIELD_GET(QSYS_SE_DWRR_CFG_DWRR_COST, x)
+
+/* QSYS:TAS_CONFIG:TAS_CFG_CTRL */
+#define QSYS_TAS_CFG_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 0, 0, 1, 4)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX GENMASK(27, 23)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX, x)
+
+#define QSYS_TAS_CFG_CTRL_LIST_NUM GENMASK(22, 18)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+#define QSYS_TAS_CFG_CTRL_LIST_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_LIST_NUM, x)
+
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q BIT(17)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+#define QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q, x)
+
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM GENMASK(16, 5)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+#define QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM, x)
+
+/* QSYS:TAS_CONFIG:TAS_GATE_STATE_CTRL */
+#define QSYS_TAS_GS_CTRL __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 4, 0, 1, 4)
+
+#define QSYS_TAS_GS_CTRL_HSCH_POS GENMASK(2, 0)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+#define QSYS_TAS_GS_CTRL_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GS_CTRL_HSCH_POS, x)
+
+/* QSYS:TAS_CONFIG:TAS_STATEMACHINE_CFG */
+#define QSYS_TAS_STM_CFG __REG(TARGET_QSYS, 0, 1, 57372, 0, 1, 12, 8, 0, 1, 4)
+
+#define QSYS_TAS_STM_CFG_REVISIT_DLY GENMASK(7, 0)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_SET(x)\
+ FIELD_PREP(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+#define QSYS_TAS_STM_CFG_REVISIT_DLY_GET(x)\
+ FIELD_GET(QSYS_TAS_STM_CFG_REVISIT_DLY, x)
+
+/* QSYS:TAS_PROFILE_CFG:TAS_PROFILE_CONFIG */
+#define QSYS_TAS_PROFILE_CFG(g) __REG(TARGET_QSYS, 0, 1, 30720, g, 16, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM GENMASK(21, 19)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+#define QSYS_TAS_PROFILE_CFG_PORT_NUM_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_PORT_NUM, x)
+
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED GENMASK(18, 16)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+#define QSYS_TAS_PROFILE_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(QSYS_TAS_PROFILE_CFG_LINK_SPEED, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_NSEC */
+#define QSYS_TAS_BT_NSEC __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 0, 0, 1, 4)
+
+#define QSYS_TAS_BT_NSEC_NSEC GENMASK(29, 0)
+#define QSYS_TAS_BT_NSEC_NSEC_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_NSEC_NSEC, x)
+#define QSYS_TAS_BT_NSEC_NSEC_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_NSEC_NSEC, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_LSB */
+#define QSYS_TAS_BT_SEC_LSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 4, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_BASE_TIME_SEC_MSB */
+#define QSYS_TAS_BT_SEC_MSB __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 8, 0, 1, 4)
+
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB GENMASK(15, 0)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(x)\
+ FIELD_PREP(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+#define QSYS_TAS_BT_SEC_MSB_SEC_MSB_GET(x)\
+ FIELD_GET(QSYS_TAS_BT_SEC_MSB_SEC_MSB, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_CYCLE_TIME_CFG */
+#define QSYS_TAS_CT_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 24, 0, 1, 4)
+
+/* QSYS:TAS_LIST_CFG:TAS_STARTUP_CFG */
+#define QSYS_TAS_STARTUP_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 28, 0, 1, 4)
+
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX GENMASK(27, 23)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(x)\
+ FIELD_PREP(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+#define QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_GET(x)\
+ FIELD_GET(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_CFG */
+#define QSYS_TAS_LIST_CFG __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 32, 0, 1, 4)
+
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR GENMASK(11, 0)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(x)\
+ FIELD_PREP(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+#define QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(x)\
+ FIELD_GET(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR, x)
+
+/* QSYS:TAS_LIST_CFG:TAS_LIST_STATE */
+#define QSYS_TAS_LST __REG(TARGET_QSYS, 0, 1, 27904, 0, 1, 64, 36, 0, 1, 4)
+
+#define QSYS_TAS_LST_LIST_STATE GENMASK(2, 0)
+#define QSYS_TAS_LST_LIST_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_LST_LIST_STATE, x)
+#define QSYS_TAS_LST_LIST_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_LST_LIST_STATE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG */
+#define QSYS_TAS_GCL_CT_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 0, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS GENMASK(12, 10)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+#define QSYS_TAS_GCL_CT_CFG_HSCH_POS_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_HSCH_POS, x)
+
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE GENMASK(9, 2)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+#define QSYS_TAS_GCL_CT_CFG_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_GATE_STATE, x)
+
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE GENMASK(1, 0)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+#define QSYS_TAS_GCL_CT_CFG_OP_TYPE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG_OP_TYPE, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_CTRL_CFG2 */
+#define QSYS_TAS_GCL_CT_CFG2 __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 4, 0, 1, 4)
+
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE GENMASK(15, 12)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+#define QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE, x)
+
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL GENMASK(11, 0)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(x)\
+ FIELD_PREP(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+#define QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(x)\
+ FIELD_GET(QSYS_TAS_GCL_CT_CFG2_NEXT_GCL, x)
+
+/* QSYS:TAS_GCL_CFG:TAS_GCL_TIME_CFG */
+#define QSYS_TAS_GCL_TM_CFG __REG(TARGET_QSYS, 0, 1, 27968, 0, 1, 16, 8, 0, 1, 4)
+
+/* QSYS:HSCH_TAS_STATE:TAS_GATE_STATE */
+#define QSYS_TAS_GATE_STATE __REG(TARGET_QSYS, 0, 1, 28004, 0, 1, 4, 0, 0, 1, 4)
+
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE GENMASK(7, 0)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(x)\
+ FIELD_PREP(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+#define QSYS_TAS_GATE_STATE_TAS_GATE_STATE_GET(x)\
+ FIELD_GET(QSYS_TAS_GATE_STATE_TAS_GATE_STATE, x)
+
/* REW:PORT:PORT_VLAN_CFG */
#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index df2bee678559..1c88120eb291 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -6,8 +6,6 @@
#include "lan966x_main.h"
static struct notifier_block lan966x_netdevice_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_nb __read_mostly;
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
static void lan966x_port_set_mcast_ip_flood(struct lan966x_port *port,
u32 pgid_ip)
@@ -132,7 +130,7 @@ static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
return 0;
}
-static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+void lan966x_update_fwd_mask(struct lan966x *lan966x)
{
int i;
@@ -140,9 +138,14 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
struct lan966x_port *port = lan966x->ports[i];
unsigned long mask = 0;
- if (port && lan966x->bridge_fwd_mask & BIT(i))
+ if (port && lan966x->bridge_fwd_mask & BIT(i)) {
mask = lan966x->bridge_fwd_mask & ~BIT(i);
+ if (port->bond)
+ mask &= ~lan966x_lag_get_mask(lan966x,
+ port->bond);
+ }
+
mask |= BIT(CPU_PORT);
lan_wr(ANA_PGID_PGID_SET(mask),
@@ -150,7 +153,7 @@ static void lan966x_update_fwd_mask(struct lan966x *lan966x)
}
}
-static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
{
struct lan966x *lan966x = port->lan966x;
bool learn_ena = false;
@@ -171,8 +174,8 @@ static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
lan966x_update_fwd_mask(lan966x);
}
-static void lan966x_port_ageing_set(struct lan966x_port *port,
- unsigned long ageing_clock_t)
+void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
@@ -241,6 +244,7 @@ static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
}
static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *brport_dev,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
@@ -258,7 +262,7 @@ static int lan966x_port_bridge_join(struct lan966x_port *port,
}
}
- err = switchdev_bridge_port_offload(dev, dev, port,
+ err = switchdev_bridge_port_offload(brport_dev, dev, port,
&lan966x_switchdev_nb,
&lan966x_switchdev_blocking_nb,
false, extack);
@@ -295,8 +299,9 @@ static void lan966x_port_bridge_leave(struct lan966x_port *port,
lan966x_vlan_port_apply(port);
}
-static int lan966x_port_changeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
@@ -306,44 +311,68 @@ static int lan966x_port_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = lan966x_port_bridge_join(port, info->upper_dev,
+ err = lan966x_port_bridge_join(port, brport_dev,
+ info->upper_dev,
extack);
else
lan966x_port_bridge_leave(port, info->upper_dev);
}
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_lag_port_join(port, info->upper_dev,
+ info->upper_dev,
+ extack);
+ else
+ lan966x_lag_port_leave(port, info->upper_dev);
+ }
+
return err;
}
-static int lan966x_port_prechangeupper(struct net_device *dev,
- struct netdev_notifier_changeupper_info *info)
+int lan966x_port_prechangeupper(struct net_device *dev,
+ struct net_device *brport_dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
+ int err = NOTIFY_DONE;
- if (netif_is_bridge_master(info->upper_dev) && !info->linking)
- switchdev_bridge_port_unoffload(port->dev, port,
- NULL, NULL);
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking) {
+ switchdev_bridge_port_unoffload(port->dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
- return NOTIFY_DONE;
+ if (netif_is_lag_master(info->upper_dev)) {
+ err = lan966x_lag_port_prechangeupper(dev, info);
+ if (err || info->linking)
+ return err;
+
+ switchdev_bridge_port_unoffload(brport_dev, port, NULL, NULL);
+ lan966x_fdb_flush_workqueue(port->lan966x);
+ }
+
+ return err;
}
-static int lan966x_foreign_bridging_check(struct net_device *bridge,
+static int lan966x_foreign_bridging_check(struct net_device *upper,
+ bool *has_foreign,
+ bool *seen_lan966x,
struct netlink_ext_ack *extack)
{
struct lan966x *lan966x = NULL;
- bool has_foreign = false;
struct net_device *dev;
struct list_head *iter;
- if (!netif_is_bridge_master(bridge))
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper))
return 0;
- netdev_for_each_lower_dev(bridge, dev, iter) {
+ netdev_for_each_lower_dev(upper, dev, iter) {
if (lan966x_netdevice_check(dev)) {
struct lan966x_port *port = netdev_priv(dev);
if (lan966x) {
- /* Bridge already has at least one port of a
+ /* Upper already has at least one port of a
* lan966x switch inside it, check that it's
* the same instance of the driver.
*/
@@ -354,15 +383,24 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
}
} else {
/* This is the first lan966x port inside this
- * bridge
+ * upper device
*/
lan966x = port->lan966x;
+ *seen_lan966x = true;
}
+ } else if (netif_is_lag_master(dev)) {
+ /* Allow to have bond interfaces that have only lan966x
+ * devices
+ */
+ if (lan966x_foreign_bridging_check(dev, has_foreign,
+ seen_lan966x,
+ extack))
+ return -EINVAL;
} else {
- has_foreign = true;
+ *has_foreign = true;
}
- if (lan966x && has_foreign) {
+ if (*seen_lan966x && *has_foreign) {
NL_SET_ERR_MSG_MOD(extack,
"Bridging lan966x ports with foreign interfaces disallowed");
return -EINVAL;
@@ -375,7 +413,12 @@ static int lan966x_foreign_bridging_check(struct net_device *bridge,
static int lan966x_bridge_check(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
+ bool has_foreign = false;
+ bool seen_lan966x = false;
+
return lan966x_foreign_bridging_check(info->upper_dev,
+ &has_foreign,
+ &seen_lan966x,
info->info.extack);
}
@@ -386,21 +429,44 @@ static int lan966x_netdevice_port_event(struct net_device *dev,
int err = 0;
if (!lan966x_netdevice_check(dev)) {
- if (event == NETDEV_CHANGEUPPER)
- return lan966x_bridge_check(dev, ptr);
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ if (netif_is_lag_master(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ err = lan966x_lag_netdev_changeupper(dev,
+ ptr);
+ else
+ err = lan966x_lag_netdev_prechangeupper(dev,
+ ptr);
+
+ return err;
+ }
+ break;
+ default:
+ return 0;
+ }
+
return 0;
}
switch (event) {
case NETDEV_PRECHANGEUPPER:
- err = lan966x_port_prechangeupper(dev, ptr);
+ err = lan966x_port_prechangeupper(dev, dev, ptr);
break;
case NETDEV_CHANGEUPPER:
err = lan966x_bridge_check(dev, ptr);
if (err)
return err;
- err = lan966x_port_changeupper(dev, ptr);
+ err = lan966x_port_changeupper(dev, dev, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ err = lan966x_lag_port_changelowerstate(dev, ptr);
break;
}
@@ -418,19 +484,23 @@ static int lan966x_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-/* We don't offload uppers such as LAG as bridge ports, so every device except
- * the bridge itself is foreign.
- */
static bool lan966x_foreign_dev_check(const struct net_device *dev,
const struct net_device *foreign_dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
+ int i;
if (netif_is_bridge_master(foreign_dev))
if (lan966x->bridge == foreign_dev)
return false;
+ if (netif_is_lag_master(foreign_dev))
+ for (i = 0; i < lan966x->num_phys_ports; ++i)
+ if (lan966x->ports[i] &&
+ lan966x->ports[i]->bond == foreign_dev)
+ return false;
+
return true;
}
@@ -571,11 +641,11 @@ static struct notifier_block lan966x_netdevice_nb __read_mostly = {
.notifier_call = lan966x_netdevice_event,
};
-static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_nb __read_mostly = {
.notifier_call = lan966x_switchdev_event,
};
-static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
.notifier_call = lan966x_switchdev_blocking_event,
};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
new file mode 100644
index 000000000000..3f5b212066c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define LAN966X_TAPRIO_TIMEOUT_MS 1000
+#define LAN966X_TAPRIO_ENTRIES_PER_PORT 2
+
+/* Minimum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MIN_CYCLE_TIME_NS NSEC_PER_USEC
+
+/* Maximum supported cycle time in nanoseconds */
+#define LAN966X_TAPRIO_MAX_CYCLE_TIME_NS (NSEC_PER_SEC - 1)
+
+/* Total number of TAS GCL entries */
+#define LAN966X_TAPRIO_NUM_GCL 256
+
+/* TAPRIO link speeds for calculation of guard band */
+enum lan966x_taprio_link_speed {
+ LAN966X_TAPRIO_SPEED_NO_GB,
+ LAN966X_TAPRIO_SPEED_10,
+ LAN966X_TAPRIO_SPEED_100,
+ LAN966X_TAPRIO_SPEED_1000,
+ LAN966X_TAPRIO_SPEED_2500,
+};
+
+/* TAPRIO list states */
+enum lan966x_taprio_state {
+ LAN966X_TAPRIO_STATE_ADMIN,
+ LAN966X_TAPRIO_STATE_ADVANCING,
+ LAN966X_TAPRIO_STATE_PENDING,
+ LAN966X_TAPRIO_STATE_OPERATING,
+ LAN966X_TAPRIO_STATE_TERMINATING,
+ LAN966X_TAPRIO_STATE_MAX,
+};
+
+/* TAPRIO GCL command */
+enum lan966x_taprio_gcl_cmd {
+ LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES = 0,
+};
+
+static u32 lan966x_taprio_list_index(struct lan966x_port *port, u8 entry)
+{
+ return port->chip_port * LAN966X_TAPRIO_ENTRIES_PER_PORT + entry;
+}
+
+static u32 lan966x_taprio_list_state_get(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ val = lan_rd(lan966x, QSYS_TAS_LST);
+ return QSYS_TAS_LST_LIST_STATE_GET(val);
+}
+
+static u32 lan966x_taprio_list_index_state_get(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ return lan966x_taprio_list_state_get(port);
+}
+
+static void lan966x_taprio_list_state_set(struct lan966x_port *port,
+ u32 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(state),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+}
+
+static int lan966x_taprio_list_shutdown(struct lan966x_port *port,
+ u32 list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool pending, operating;
+ unsigned long end;
+ u32 state;
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ /* It is required to try multiple times to set the state of list,
+ * because the HW can overwrite this.
+ */
+ do {
+ state = lan966x_taprio_list_state_get(port);
+
+ pending = false;
+ operating = false;
+
+ if (state == LAN966X_TAPRIO_STATE_ADVANCING ||
+ state == LAN966X_TAPRIO_STATE_PENDING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_ADMIN);
+ pending = true;
+ }
+
+ if (state == LAN966X_TAPRIO_STATE_OPERATING) {
+ lan966x_taprio_list_state_set(port,
+ LAN966X_TAPRIO_STATE_TERMINATING);
+ operating = true;
+ }
+
+ /* If the entry was in pending and now gets in admin, then there
+ * is nothing else to do, so just bail out
+ */
+ state = lan966x_taprio_list_state_get(port);
+ if (pending &&
+ state == LAN966X_TAPRIO_STATE_ADMIN)
+ return 0;
+
+ /* If the list was in operating and now is in terminating or
+ * admin, then is OK to exit but it needs to wait until the list
+ * will get in admin. It is not required to set the state
+ * again.
+ */
+ if (operating &&
+ (state == LAN966X_TAPRIO_STATE_TERMINATING ||
+ state == LAN966X_TAPRIO_STATE_ADMIN))
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ end = jiffies + msecs_to_jiffies(LAN966X_TAPRIO_TIMEOUT_MS);
+ do {
+ state = lan966x_taprio_list_state_get(port);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ break;
+
+ } while (!time_after(jiffies, end));
+
+ /* If the list was in operating mode, it could be stopped while some
+ * queues where closed, so make sure to restore "all-queues-open"
+ */
+ if (operating) {
+ lan_wr(QSYS_TAS_GS_CTRL_HSCH_POS_SET(port->chip_port),
+ lan966x, QSYS_TAS_GS_CTRL);
+
+ lan_wr(QSYS_TAS_GATE_STATE_TAS_GATE_STATE_SET(0xff),
+ lan966x, QSYS_TAS_GATE_STATE);
+ }
+
+ return 0;
+}
+
+static int lan966x_taprio_shutdown(struct lan966x_port *port)
+{
+ u32 i, list, state;
+ int err;
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list = lan966x_taprio_list_index(port, i);
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ err = lan966x_taprio_list_shutdown(port, list);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Find a suitable list for a new schedule. First priority is a list in state
+ * pending. Second priority is a list in state admin.
+ */
+static int lan966x_taprio_find_list(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int *new_list, int *obs_list)
+{
+ int state[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int list[LAN966X_TAPRIO_ENTRIES_PER_PORT];
+ int err, oper = -1;
+ u32 i;
+
+ *new_list = -1;
+ *obs_list = -1;
+
+ /* If there is already an entry in operating mode, return this list in
+ * obs_list, such that when the new list will get activated the
+ * operating list will be stopped. In this way is possible to have
+ * smooth transitions between the lists
+ */
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ list[i] = lan966x_taprio_list_index(port, i);
+ state[i] = lan966x_taprio_list_index_state_get(port, list[i]);
+ if (state[i] == LAN966X_TAPRIO_STATE_OPERATING)
+ oper = list[i];
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_PENDING) {
+ err = lan966x_taprio_shutdown(port);
+ if (err)
+ return err;
+
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ for (i = 0; i < LAN966X_TAPRIO_ENTRIES_PER_PORT; ++i) {
+ if (state[i] == LAN966X_TAPRIO_STATE_ADMIN) {
+ *new_list = list[i];
+ *obs_list = (oper == -1) ? *new_list : oper;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int lan966x_taprio_check(struct tc_taprio_qopt_offload *qopt)
+{
+ u64 total_time = 0;
+ u32 i;
+
+ /* This is not supported by th HW */
+ if (qopt->cycle_time_extension)
+ return -EOPNOTSUPP;
+
+ /* There is a limited number of gcl entries that can be used, they are
+ * shared by all ports
+ */
+ if (qopt->num_entries > LAN966X_TAPRIO_NUM_GCL)
+ return -EINVAL;
+
+ /* Don't allow cycle times bigger than 1 sec or smaller than 1 usec */
+ if (qopt->cycle_time < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ qopt->cycle_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ for (i = 0; i < qopt->num_entries; ++i) {
+ struct tc_taprio_sched_entry *entry = &qopt->entries[i];
+
+ /* Don't allow intervals bigger than 1 sec or smaller than 1
+ * usec
+ */
+ if (entry->interval < LAN966X_TAPRIO_MIN_CYCLE_TIME_NS ||
+ entry->interval > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+
+ total_time += qopt->entries[i].interval;
+ }
+
+ /* Don't allow the total time of intervals be bigger than 1 sec */
+ if (total_time > LAN966X_TAPRIO_MAX_CYCLE_TIME_NS)
+ return -EINVAL;
+
+ /* The HW expects that the cycle time to be at least as big as sum of
+ * each interval of gcl
+ */
+ if (qopt->cycle_time < total_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_taprio_gcl_free_get(struct lan966x_port *port,
+ unsigned long *free_list)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 num_free, state, list;
+ u32 base, next, max_list;
+
+ /* By default everything is free */
+ bitmap_fill(free_list, LAN966X_TAPRIO_NUM_GCL);
+ num_free = LAN966X_TAPRIO_NUM_GCL;
+
+ /* Iterate over all gcl entries and find out which are free. And mark
+ * those that are not free.
+ */
+ max_list = lan966x->num_phys_ports * LAN966X_TAPRIO_ENTRIES_PER_PORT;
+ for (list = 0; list < max_list; ++list) {
+ state = lan966x_taprio_list_index_state_get(port, list);
+ if (state == LAN966X_TAPRIO_STATE_ADMIN)
+ continue;
+
+ base = lan_rd(lan966x, QSYS_TAS_LIST_CFG);
+ base = QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_GET(base);
+ next = base;
+
+ do {
+ clear_bit(next, free_list);
+ num_free--;
+
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ next = lan_rd(lan966x, QSYS_TAS_GCL_CT_CFG2);
+ next = QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_GET(next);
+ } while (base != next);
+ }
+
+ return num_free;
+}
+
+static void lan966x_taprio_gcl_setup_entry(struct lan966x_port *port,
+ struct tc_taprio_sched_entry *entry,
+ u32 next_entry)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Setup a single gcl entry */
+ lan_wr(QSYS_TAS_GCL_CT_CFG_GATE_STATE_SET(entry->gate_mask) |
+ QSYS_TAS_GCL_CT_CFG_HSCH_POS_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG_OP_TYPE_SET(LAN966X_TAPRIO_GCL_CMD_SET_GATE_STATES),
+ lan966x, QSYS_TAS_GCL_CT_CFG);
+
+ lan_wr(QSYS_TAS_GCL_CT_CFG2_PORT_PROFILE_SET(port->chip_port) |
+ QSYS_TAS_GCL_CT_CFG2_NEXT_GCL_SET(next_entry),
+ lan966x, QSYS_TAS_GCL_CT_CFG2);
+
+ lan_wr(entry->interval, lan966x, QSYS_TAS_GCL_TM_CFG);
+}
+
+static int lan966x_taprio_gcl_setup(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt,
+ int list)
+{
+ DECLARE_BITMAP(free_list, LAN966X_TAPRIO_NUM_GCL);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, base, next;
+
+ if (lan966x_taprio_gcl_free_get(port, free_list) < qopt->num_entries)
+ return -ENOSPC;
+
+ /* Select list */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_SET(list),
+ QSYS_TAS_CFG_CTRL_LIST_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* Setup the address of the first gcl entry */
+ base = find_first_bit(free_list, LAN966X_TAPRIO_NUM_GCL);
+ lan_rmw(QSYS_TAS_LIST_CFG_LIST_BASE_ADDR_SET(base),
+ QSYS_TAS_LIST_CFG_LIST_BASE_ADDR,
+ lan966x, QSYS_TAS_LIST_CFG);
+
+ /* Iterate over entries and add them to the gcl list */
+ next = base;
+ for (i = 0; i < qopt->num_entries; ++i) {
+ lan_rmw(QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM_SET(next),
+ QSYS_TAS_CFG_CTRL_GCL_ENTRY_NUM,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ /* If the entry is last, point back to the start of the list */
+ if (i == qopt->num_entries - 1)
+ next = base;
+ else
+ next = find_next_bit(free_list, LAN966X_TAPRIO_NUM_GCL,
+ next + 1);
+
+ lan966x_taprio_gcl_setup_entry(port, &qopt->entries[i], next);
+ }
+
+ return 0;
+}
+
+/* Calculate new base_time based on cycle_time. The HW recommends to have the
+ * new base time at least 2 * cycle type + current time
+ */
+static void lan966x_taprio_new_base_time(struct lan966x *lan966x,
+ const u32 cycle_time,
+ const ktime_t org_base_time,
+ ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time;
+ struct timespec64 ts;
+
+ /* Get the current time and calculate the threshold_time */
+ lan966x_ptp_gettime64(&lan966x->phc[LAN966X_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+
+ /* If the org_base_time is in enough in future just use it */
+ if (org_base_time >= threshold_time) {
+ *new_base_time = org_base_time;
+ return;
+ }
+
+ /* If the org_base_time is smaller than current_time, calculate the new
+ * base time as following.
+ */
+ if (org_base_time <= current_time) {
+ u64 tmp = current_time - org_base_time;
+ u32 rem = 0;
+
+ if (tmp > cycle_time)
+ div_u64_rem(tmp, cycle_time, &rem);
+ rem = cycle_time - rem;
+ *new_base_time = threshold_time + rem;
+ return;
+ }
+
+ /* The only left place for org_base_time is between current_time and
+ * threshold_time. In this case the new_base_time is calculated like
+ * org_base_time + 2 * cycletime
+ */
+ *new_base_time = org_base_time + 2 * cycle_time;
+}
+
+int lan966x_taprio_speed_set(struct lan966x_port *port, int speed)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 taprio_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ taprio_speed = LAN966X_TAPRIO_SPEED_10;
+ break;
+ case SPEED_100:
+ taprio_speed = LAN966X_TAPRIO_SPEED_100;
+ break;
+ case SPEED_1000:
+ taprio_speed = LAN966X_TAPRIO_SPEED_1000;
+ break;
+ case SPEED_2500:
+ taprio_speed = LAN966X_TAPRIO_SPEED_2500;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ lan_rmw(QSYS_TAS_PROFILE_CFG_LINK_SPEED_SET(taprio_speed),
+ QSYS_TAS_PROFILE_CFG_LINK_SPEED,
+ lan966x, QSYS_TAS_PROFILE_CFG(port->chip_port));
+
+ return 0;
+}
+
+int lan966x_taprio_add(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ int err, new_list, obs_list;
+ struct timespec64 ts;
+ ktime_t base_time;
+
+ err = lan966x_taprio_check(qopt);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_find_list(port, qopt, &new_list, &obs_list);
+ if (err)
+ return err;
+
+ err = lan966x_taprio_gcl_setup(port, qopt, new_list);
+ if (err)
+ return err;
+
+ lan966x_taprio_new_base_time(lan966x, qopt->cycle_time,
+ qopt->base_time, &base_time);
+
+ ts = ktime_to_timespec64(base_time);
+ lan_wr(QSYS_TAS_BT_NSEC_NSEC_SET(ts.tv_nsec),
+ lan966x, QSYS_TAS_BT_NSEC);
+
+ lan_wr(lower_32_bits(ts.tv_sec),
+ lan966x, QSYS_TAS_BT_SEC_LSB);
+
+ lan_wr(QSYS_TAS_BT_SEC_MSB_SEC_MSB_SET(upper_32_bits(ts.tv_sec)),
+ lan966x, QSYS_TAS_BT_SEC_MSB);
+
+ lan_wr(qopt->cycle_time, lan966x, QSYS_TAS_CT_CFG);
+
+ lan_rmw(QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX_SET(obs_list),
+ QSYS_TAS_STARTUP_CFG_OBSOLETE_IDX,
+ lan966x, QSYS_TAS_STARTUP_CFG);
+
+ /* Start list processing */
+ lan_rmw(QSYS_TAS_LST_LIST_STATE_SET(LAN966X_TAPRIO_STATE_ADVANCING),
+ QSYS_TAS_LST_LIST_STATE,
+ lan966x, QSYS_TAS_LST);
+
+ return err;
+}
+
+int lan966x_taprio_del(struct lan966x_port *port)
+{
+ return lan966x_taprio_shutdown(port);
+}
+
+void lan966x_taprio_init(struct lan966x *lan966x)
+{
+ int num_taprio_lists;
+ int p;
+
+ lan_wr(QSYS_TAS_STM_CFG_REVISIT_DLY_SET((256 * 1000) /
+ lan966x_ptp_get_period_ps()),
+ lan966x, QSYS_TAS_STM_CFG);
+
+ num_taprio_lists = lan966x->num_phys_ports *
+ LAN966X_TAPRIO_ENTRIES_PER_PORT;
+
+ /* For now we always use guard band on all queues */
+ lan_rmw(QSYS_TAS_CFG_CTRL_LIST_NUM_MAX_SET(num_taprio_lists) |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q_SET(1),
+ QSYS_TAS_CFG_CTRL_LIST_NUM_MAX |
+ QSYS_TAS_CFG_CTRL_ALWAYS_GB_SCH_Q,
+ lan966x, QSYS_TAS_CFG_CTRL);
+
+ for (p = 0; p < lan966x->num_phys_ports; p++)
+ lan_rmw(QSYS_TAS_PROFILE_CFG_PORT_NUM_SET(p),
+ QSYS_TAS_PROFILE_CFG_PORT_NUM,
+ lan966x, QSYS_TAS_PROFILE_CFG(p));
+}
+
+void lan966x_taprio_deinit(struct lan966x *lan966x)
+{
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (!lan966x->ports[p])
+ continue;
+
+ lan966x_taprio_del(lan966x->ports[p]);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
new file mode 100644
index 000000000000..4555a35d0d28
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+int lan966x_tbf_add(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 cir, cbs;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ cir = div_u64(qopt->replace_params.rate.rate_bytes_ps, 1000) * 8;
+ cbs = qopt->replace_params.max_size;
+
+ /* Rate unit is 100 kbps */
+ cir = DIV_ROUND_UP(cir, 100);
+ /* Avoid using zero rate */
+ cir = cir ?: 1;
+ /* Burst unit is 4kB */
+ cbs = DIV_ROUND_UP(cbs, 4096);
+ /* Avoid using zero burst */
+ cbs = cbs ?: 1;
+
+ /* Check that actually the result can be written */
+ if (cir > GENMASK(15, 0) ||
+ cbs > GENMASK(6, 0))
+ return -EINVAL;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(1),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(cir) |
+ QSYS_CIR_CFG_CIR_BURST_SET(cbs),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
+
+int lan966x_tbf_del(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool root = qopt->parent == TC_H_ROOT;
+ u32 queue = 0;
+ u32 se_idx;
+
+ if (!root) {
+ queue = TC_H_MIN(qopt->parent) - 1;
+ if (queue >= NUM_PRIO_QUEUES)
+ return -EOPNOTSUPP;
+ }
+
+ if (root)
+ se_idx = SE_IDX_PORT + port->chip_port;
+ else
+ se_idx = SE_IDX_QUEUE + port->chip_port * NUM_PRIO_QUEUES + queue;
+
+ lan_rmw(QSYS_SE_CFG_SE_AVB_ENA_SET(0) |
+ QSYS_SE_CFG_SE_FRM_MODE_SET(0),
+ QSYS_SE_CFG_SE_AVB_ENA |
+ QSYS_SE_CFG_SE_FRM_MODE,
+ lan966x, QSYS_SE_CFG(se_idx));
+
+ lan_wr(QSYS_CIR_CFG_CIR_RATE_SET(0) |
+ QSYS_CIR_CFG_CIR_BURST_SET(0),
+ lan966x, QSYS_CIR_CFG(se_idx));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
new file mode 100644
index 000000000000..651d5493ae55
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/pkt_cls.h>
+
+#include "lan966x_main.h"
+
+static LIST_HEAD(lan966x_tc_block_cb_list);
+
+static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port,
+ struct tc_mqprio_qopt_offload *mqprio)
+{
+ u8 num_tc = mqprio->qopt.num_tc;
+
+ mqprio->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return num_tc ? lan966x_mqprio_add(port, num_tc) :
+ lan966x_mqprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ return taprio->enable ? lan966x_taprio_add(port, taprio) :
+ lan966x_taprio_del(port);
+}
+
+static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return lan966x_tbf_add(port, qopt);
+ case TC_TBF_DESTROY:
+ return lan966x_tbf_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_setup_qdisc_cbs(struct lan966x_port *port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ return qopt->enable ? lan966x_cbs_add(port, qopt) :
+ lan966x_cbs_del(port, qopt);
+}
+
+static int lan966x_tc_setup_qdisc_ets(struct lan966x_port *port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ return lan966x_ets_add(port, qopt);
+ case TC_ETS_DESTROY:
+ return lan966x_ets_del(port, qopt);
+ default:
+ return -EOPNOTSUPP;
+ };
+
+ return -EOPNOTSUPP;
+}
+
+static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct lan966x_port *port = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return lan966x_tc_matchall(port, type_data, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan966x_tc_block_cb_ingress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int lan966x_tc_block_cb_egress(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return lan966x_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int lan966x_tc_setup_block(struct lan966x_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb;
+ bool ingress;
+
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+ cb = lan966x_tc_block_cb_ingress;
+ port->tc.ingress_shared_block = f->block_shared;
+ ingress = true;
+ } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+ cb = lan966x_tc_block_cb_egress;
+ ingress = false;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return flow_block_cb_setup_simple(f, &lan966x_tc_block_cb_list,
+ cb, port, port, ingress);
+}
+
+int lan966x_tc_setup(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return lan966x_tc_setup_qdisc_mqprio(port, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return lan966x_tc_setup_qdisc_taprio(port, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return lan966x_tc_setup_qdisc_tbf(port, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return lan966x_tc_setup_qdisc_cbs(port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return lan966x_tc_setup_qdisc_ets(port, type_data);
+ case TC_SETUP_BLOCK:
+ return lan966x_tc_setup_block(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
new file mode 100644
index 000000000000..7368433b9277
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+static int lan966x_tc_matchall_add(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ struct flow_action_entry *act;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only once action per filter is supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ return lan966x_police_port_add(port, &f->rule->action, act,
+ f->cookie, ingress,
+ f->common.extack);
+ case FLOW_ACTION_MIRRED:
+ return lan966x_mirror_port_add(port, act, f->cookie,
+ ingress, f->common.extack);
+ default:
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_del(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ return lan966x_police_port_del(port, f->cookie,
+ f->common.extack);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ return lan966x_mirror_port_del(port, ingress,
+ f->common.extack);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_tc_matchall_stats(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (f->cookie == port->tc.police_id) {
+ lan966x_police_port_stats(port, &f->stats);
+ } else if (f->cookie == port->tc.ingress_mirror_id ||
+ f->cookie == port->tc.egress_mirror_id) {
+ lan966x_mirror_port_stats(port, &f->stats, ingress);
+ } else {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int lan966x_tc_matchall(struct lan966x_port *port,
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
+{
+ if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Only chain zero is supported");
+ return -EOPNOTSUPP;
+ }
+
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return lan966x_tc_matchall_add(port, f, ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ return lan966x_tc_matchall_del(port, f, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return lan966x_tc_matchall_stats(port, f, ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 4402c3ed1dc5..d1c6ad966747 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o
sparx5-switch-objs := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
- sparx5_ptp.o sparx5_pgid.o
+ sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index a5837dbe0c7e..4af285918ea2 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -186,8 +186,8 @@ bool sparx5_mact_getnext(struct sparx5 *sparx5,
return ret == 0;
}
-bool sparx5_mact_find(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2)
{
int ret;
u32 cfg2;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 01be7bd84181..62a325e96345 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -27,6 +27,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_qos.h"
#define QLIM_WM(fraction) \
((SPX5_BUFFER_MEMORY / SPX5_BUFFER_CELL_SZ - 100) * (fraction) / 100)
@@ -277,6 +278,7 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
+ spx5_port->is_mrouter = false;
sparx5->ports[config->portno] = spx5_port;
err = sparx5_port_init(sparx5, spx5_port, &config->conf);
@@ -661,6 +663,9 @@ static int sparx5_start(struct sparx5 *sparx5)
queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work,
SPX5_MACT_PULL_DELAY);
+ mutex_init(&sparx5->mdb_lock);
+ INIT_LIST_HEAD(&sparx5->mdb_entries);
+
err = sparx5_register_netdevs(sparx5);
if (err)
return err;
@@ -864,6 +869,12 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
goto cleanup_ports;
}
+ err = sparx5_qos_init(sparx5);
+ if (err) {
+ dev_err(sparx5->dev, "Failed to initialize QoS\n");
+ goto cleanup_ports;
+ }
+
err = sparx5_ptp_init(sparx5);
if (err) {
dev_err(sparx5->dev, "PTP failed\n");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index b197129044b5..7a83222caa73 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -190,6 +190,7 @@ struct sparx5_port {
u8 ptp_cmd;
u16 ts_id;
struct sk_buff_head tx_skbs;
+ bool is_mrouter;
};
enum sparx5_core_clockfreq {
@@ -215,6 +216,15 @@ struct sparx5_skb_cb {
unsigned long jiffies;
};
+struct sparx5_mdb_entry {
+ struct list_head list;
+ DECLARE_BITMAP(port_mask, SPX5_PORTS);
+ unsigned char addr[ETH_ALEN];
+ bool cpu_copy;
+ u16 vid;
+ u16 pgid_idx;
+};
+
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
@@ -256,6 +266,10 @@ struct sparx5 {
struct list_head mact_entries;
/* mac table list (mact_entries) mutex */
struct mutex mact_lock;
+ /* SW MDB table */
+ struct list_head mdb_entries;
+ /* mdb list mutex */
+ struct mutex mdb_lock;
struct delayed_work mact_work;
struct workqueue_struct *mact_queue;
/* Board specifics */
@@ -291,7 +305,7 @@ struct frame_info {
void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp);
void sparx5_ifh_parse(u32 *ifh, struct frame_info *info);
irqreturn_t sparx5_xtr_handler(int irq, void *_priv);
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev);
int sparx5_manual_injection_mode(struct sparx5 *sparx5);
void sparx5_port_inj_timer_setup(struct sparx5_port *port);
@@ -307,8 +321,8 @@ int sparx5_mact_learn(struct sparx5 *sparx5, int port,
const unsigned char mac[ETH_ALEN], u16 vid);
bool sparx5_mact_getnext(struct sparx5 *sparx5,
unsigned char mac[ETH_ALEN], u16 *vid, u32 *pcfg2);
-bool sparx5_mact_find(struct sparx5 *sparx5,
- const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
+int sparx5_mact_find(struct sparx5 *sparx5,
+ const unsigned char mac[ETH_ALEN], u16 vid, u32 *pcfg2);
int sparx5_mact_forget(struct sparx5 *sparx5,
const unsigned char mac[ETH_ALEN], u16 vid);
int sparx5_add_mact_entry(struct sparx5 *sparx5,
@@ -325,6 +339,7 @@ void sparx5_mact_init(struct sparx5 *sparx5);
/* sparx5_vlan.c */
void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable);
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid);
void sparx5_pgid_read_mask(struct sparx5 *sparx5, int pgid, u32 portmask[3]);
void sparx5_update_fwd(struct sparx5 *sparx5);
void sparx5_vlan_init(struct sparx5 *sparx5);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index c94de436b281..fa2eb70f487a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -2993,6 +2993,147 @@ enum sparx5_target {
#define GCB_SIO_CLOCK_SYS_CLK_PERIOD_GET(x)\
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
+/* HSCH:HSCH_CFG:CIR_CFG */
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+
+#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
+#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_RATE, x)
+#define HSCH_CIR_CFG_CIR_RATE_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_RATE, x)
+
+#define HSCH_CIR_CFG_CIR_BURST GENMASK(5, 0)
+#define HSCH_CIR_CFG_CIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_CIR_CFG_CIR_BURST, x)
+#define HSCH_CIR_CFG_CIR_BURST_GET(x)\
+ FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
+
+/* HSCH:HSCH_CFG:EIR_CFG */
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+
+#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
+#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_RATE, x)
+#define HSCH_EIR_CFG_EIR_RATE_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_RATE, x)
+
+#define HSCH_EIR_CFG_EIR_BURST GENMASK(5, 0)
+#define HSCH_EIR_CFG_EIR_BURST_SET(x)\
+ FIELD_PREP(HSCH_EIR_CFG_EIR_BURST, x)
+#define HSCH_EIR_CFG_EIR_BURST_GET(x)\
+ FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
+
+/* HSCH:HSCH_CFG:SE_CFG */
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+
+#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
+#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_CNT, x)
+#define HSCH_SE_CFG_SE_DWRR_CNT_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_CNT, x)
+
+#define HSCH_SE_CFG_SE_AVB_ENA BIT(5)
+#define HSCH_SE_CFG_SE_AVB_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_AVB_ENA, x)
+#define HSCH_SE_CFG_SE_AVB_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_AVB_ENA, x)
+
+#define HSCH_SE_CFG_SE_FRM_MODE GENMASK(4, 3)
+#define HSCH_SE_CFG_SE_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE GENMASK(2, 1)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+#define HSCH_SE_CFG_SE_DWRR_FRM_MODE_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_DWRR_FRM_MODE, x)
+
+#define HSCH_SE_CFG_SE_STOP BIT(0)
+#define HSCH_SE_CFG_SE_STOP_SET(x)\
+ FIELD_PREP(HSCH_SE_CFG_SE_STOP, x)
+#define HSCH_SE_CFG_SE_STOP_GET(x)\
+ FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
+
+/* HSCH:HSCH_CFG:SE_CONNECT */
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+
+#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
+ FIELD_PREP(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+#define HSCH_SE_CONNECT_SE_LEAK_LINK_GET(x)\
+ FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
+
+/* HSCH:HSCH_CFG:SE_DLB_SENSE */
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT GENMASK(9, 3)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA BIT(2)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_SE_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(1)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_PRIO_ENA, x)
+
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_SET(x)\
+ FIELD_PREP(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+#define HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA_GET(x)\
+ FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
+
+/* HSCH:HSCH_DWRR:DWRR_ENTRY */
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+
+#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
+#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_COST, x)
+#define HSCH_DWRR_ENTRY_DWRR_COST_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_COST, x)
+
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE GENMASK(19, 0)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_SET(x)\
+ FIELD_PREP(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+#define HSCH_DWRR_ENTRY_DWRR_BALANCE_GET(x)\
+ FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
+
+/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CFG_SE_IDX, x)
+
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER GENMASK(13, 12)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+#define HSCH_HSCH_CFG_CFG_HSCH_LAYER_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_HSCH_LAYER, x)
+
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT GENMASK(11, 0)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_SET(x)\
+ FIELD_PREP(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+#define HSCH_HSCH_CFG_CFG_CSR_GRANT_GET(x)\
+ FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
+
/* HSCH:HSCH_MISC:SYS_CLK_PER */
#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
@@ -3002,6 +3143,30 @@ enum sparx5_target {
#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
+ FIELD_PREP(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(x)\
+ FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
+
+/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_FIRST, x)
+
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR BIT(0)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_SET(x)\
+ FIELD_PREP(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+#define HSCH_HSCH_LEAK_CFG_LEAK_ERR_GET(x)\
+ FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
+
/* HSCH:SYSTEM:FLUSH_CTRL */
#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index af4d3e1f1a6d..19516ccad533 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -7,6 +7,7 @@
#include "sparx5_main_regs.h"
#include "sparx5_main.h"
#include "sparx5_port.h"
+#include "sparx5_tc.h"
/* The IFH bit position of the first VSTAX bit. This is because the
* VSTAX bit positions in Data sheet is starting from zero.
@@ -228,6 +229,7 @@ static const struct net_device_ops sparx5_port_netdev_ops = {
.ndo_get_stats64 = sparx5_get_stats64,
.ndo_get_port_parent_id = sparx5_get_port_parent_id,
.ndo_eth_ioctl = sparx5_port_ioctl,
+ .ndo_setup_tc = sparx5_port_setup_tc,
};
bool sparx5_netdevice_check(const struct net_device *dev)
@@ -240,10 +242,14 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
struct sparx5_port *spx5_port;
struct net_device *ndev;
- ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
+ ndev = devm_alloc_etherdev_mqs(sparx5->dev, sizeof(struct sparx5_port),
+ SPX5_PRIOS, 1);
if (!ndev)
return ERR_PTR(-ENOMEM);
+ ndev->hw_features |= NETIF_F_HW_TC;
+ ndev->features |= NETIF_F_HW_TC;
+
SET_NETDEV_DEV(ndev, sparx5->dev);
spx5_port = netdev_priv(ndev);
spx5_port->ndev = ndev;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 304f84aadc36..83c16ca5b30f 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -113,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* This assumes STATUS_WORD_POS == 1, Status
* just after last data
*/
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
byte_cnt -= (4 - XTR_VALID_BYTES(val));
eof_flag = true;
break;
@@ -220,13 +222,13 @@ static int sparx5_inject(struct sparx5 *sparx5,
return NETDEV_TX_OK;
}
-int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
u32 ifh[IFH_LEN];
- int ret;
+ netdev_tx_t ret;
memset(ifh, 0, IFH_LEN * 4);
sparx5_set_port_ifh(ifh, port->portno);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
new file mode 100644
index 000000000000..1e79d0ef0cb8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+/* Max rates for leak groups */
+static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
+ 1048568, /* 1.049 Gbps */
+ 2621420, /* 2.621 Gbps */
+ 10485680, /* 10.486 Gbps */
+ 26214200 /* 26.214 Gbps */
+};
+
+static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
+
+static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
+ return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
+}
+
+static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
+ HSCH_HSCH_TIMER_CFG(layer, group));
+}
+
+static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
+ return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
+}
+
+static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+
+{
+ u32 value;
+
+ value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
+ return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
+}
+
+static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_lg_get_first(sparx5, layer, group);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
+}
+
+static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx)
+{
+ return idx == sparx5_lg_get_first(sparx5, layer, group);
+}
+
+static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
+}
+
+static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ if (sparx5_lg_is_empty(sparx5, layer, group))
+ return false;
+
+ return sparx5_lg_get_first(sparx5, layer, group) ==
+ sparx5_lg_get_last(sparx5, layer, group);
+}
+
+static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 leak_time)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
+}
+
+static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
+{
+ sparx5_lg_set_leak_time(sparx5, layer, group, 0);
+}
+
+static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
+ u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ if (sparx5_lg_is_empty(sparx5, layer, i))
+ continue;
+
+ itr = sparx5_lg_get_first(sparx5, layer, i);
+
+ for (;;) {
+ next = sparx5_lg_get_next(sparx5, layer, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
+{
+ struct sparx5_layer *l = &layers[layer];
+ struct sparx5_lg *lg;
+ u32 i;
+
+ for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
+ lg = &l->leak_groups[i];
+ if (rate <= lg->max_rate) {
+ *group = i;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 idx, u32 *prev, u32 *next, u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_lg_get_first(sparx5, layer, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_lg_get_next(sparx5, layer, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -1; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+
+ return -1;
+}
+
+static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
+ u32 se_first, u32 idx, u32 idx_next, bool empty)
+{
+ u32 leak_time = layers[layer].leak_groups[group].leak_time;
+
+ /* Stop leaking */
+ sparx5_lg_disable(sparx5, layer, group);
+
+ if (empty)
+ return 0;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Link elements */
+ spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
+ HSCH_SE_CONNECT(idx));
+
+ /* Set the first element. */
+ spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
+ HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
+ HSCH_HSCH_LEAK_CFG(layer, group));
+
+ /* Start leaking */
+ sparx5_lg_enable(sparx5, layer, group, leak_time);
+
+ return 0;
+}
+
+static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ /* idx *must* be present in the leak group */
+ WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
+ &first) < 0);
+
+ if (sparx5_lg_is_singular(sparx5, layer, group)) {
+ empty = true;
+ } else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
+ empty);
+}
+
+static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
+ u32 idx)
+{
+ u32 first, next, old_group;
+
+ pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
+ idx);
+
+ /* Is this SE already shaping ? */
+ if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
+ if (old_group != new_group) {
+ /* Delete from old group */
+ sparx5_lg_del(sparx5, layer, old_group, idx);
+ } else {
+ /* Nothing to do here */
+ return 0;
+ }
+ }
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_lg_is_empty(sparx5, layer, new_group))
+ next = idx;
+ else
+ next = sparx5_lg_get_first(sparx5, layer, new_group);
+
+ return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
+ false);
+}
+
+static int sparx5_shaper_conf_set(struct sparx5_port *port,
+ const struct sparx5_shaper *sh, u32 layer,
+ u32 idx, u32 group)
+{
+ int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ if (!sh->rate && !sh->burst)
+ sparx5_lg_action = &sparx5_lg_del;
+ else
+ sparx5_lg_action = &sparx5_lg_add;
+
+ /* Select layer */
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Set frame mode */
+ spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
+ sparx5, HSCH_SE_CFG(idx));
+
+ /* Set committed rate and burst */
+ spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
+ HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
+ sparx5, HSCH_CIR_CFG(idx));
+
+ /* This has to be done after the shaper configuration has been set */
+ sparx5_lg_action(sparx5, layer, group, idx);
+
+ return 0;
+}
+
+static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
+{
+ return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
+ 1;
+}
+
+static int sparx5_dwrr_conf_set(struct sparx5_port *port,
+ struct sparx5_dwrr *dwrr)
+{
+ int i;
+
+ spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
+ HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
+ HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
+ port->sparx5, HSCH_HSCH_CFG_CFG);
+
+ /* Number of *lower* indexes that are arbitrated dwrr */
+ spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
+ HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
+ HSCH_SE_CFG(port->portno));
+
+ for (i = 0; i < dwrr->count; i++) {
+ spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
+ HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
+ HSCH_DWRR_ENTRY(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_leak_groups_init(struct sparx5 *sparx5)
+{
+ struct sparx5_layer *layer;
+ u32 sys_clk_per_100ps;
+ struct sparx5_lg *lg;
+ u32 leak_time_us;
+ int i, ii;
+
+ sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
+
+ for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
+ layer = &layers[i];
+ for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
+ lg = &layer->leak_groups[ii];
+ lg->max_rate = spx5_hsch_max_group_rate[ii];
+
+ /* Calculate the leak time in us, to serve a maximum
+ * rate of 'max_rate' for this group
+ */
+ leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
+
+ /* Hardware wants leak time in ns */
+ lg->leak_time = 1000 * leak_time_us;
+
+ /* Calculate resolution */
+ lg->resolution = 1000 / leak_time_us;
+
+ /* Maximum number of shapers that can be served by
+ * this leak group
+ */
+ lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
+
+ /* Example:
+ * Wanted bandwidth is 100Mbit:
+ *
+ * 100 mbps can be served by leak group zero.
+ *
+ * leak_time is 125000 ns.
+ * resolution is: 8
+ *
+ * cir = 100000 / 8 = 12500
+ * leaks_pr_sec = 125000 / 10^9 = 8000
+ * bw = 12500 * 8000 = 10^8 (100 Mbit)
+ */
+
+ /* Disable by default - this also indicates an empty
+ * leak group
+ */
+ sparx5_lg_disable(sparx5, i, ii);
+ }
+ }
+
+ return 0;
+}
+
+int sparx5_qos_init(struct sparx5 *sparx5)
+{
+ int ret;
+
+ ret = sparx5_leak_groups_init(sparx5);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
+{
+ int i;
+
+ if (num_tc != SPX5_PRIOS) {
+ netdev_err(ndev, "Only %d traffic classes supported\n",
+ SPX5_PRIOS);
+ return -EINVAL;
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(ndev, i, 1, i);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_mqprio_del(struct net_device *ndev)
+{
+ netdev_reset_tc(ndev);
+
+ netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
+ ndev->num_tc, ndev->real_num_tx_queues);
+
+ return 0;
+}
+
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {
+ .mode = SPX5_SE_MODE_DATARATE,
+ .rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
+ .burst = params->max_size,
+ };
+ struct sparx5_lg *lg;
+ u32 group;
+
+ /* Find suitable group for this se */
+ if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
+ pr_debug("Could not find leak group for se with rate: %d",
+ sh.rate);
+ return -EINVAL;
+ }
+
+ lg = &layers[layer].leak_groups[group];
+
+ pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
+
+ if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
+ return -EINVAL;
+
+ /* Calculate committed rate and burst */
+ sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
+ sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
+
+ if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
+ return -EINVAL;
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
+{
+ struct sparx5_shaper sh = {0};
+ u32 group;
+
+ sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
+
+ return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
+}
+
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params)
+{
+ struct sparx5_dwrr dwrr = {0};
+ /* Minimum weight for each iteration */
+ unsigned int w_min = 100;
+ int i;
+
+ /* Find minimum weight for all dwrr bands */
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ if (params->quanta[i] == 0)
+ continue;
+ w_min = min(w_min, params->weights[i]);
+ }
+
+ for (i = 0; i < SPX5_PRIOS; i++) {
+ /* Strict band; skip */
+ if (params->quanta[i] == 0)
+ continue;
+
+ dwrr.count++;
+
+ /* On the sparx5, bands with higher indexes are preferred and
+ * arbitrated strict. Strict bands are put in the lower indexes,
+ * by tc, so we reverse the bands here.
+ *
+ * Also convert the weight to something the hardware
+ * understands.
+ */
+ dwrr.cost[SPX5_PRIOS - i - 1] =
+ sparx5_weight_to_hw_cost(w_min, params->weights[i]);
+ }
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
+
+int sparx5_tc_ets_del(struct sparx5_port *port)
+{
+ struct sparx5_dwrr dwrr = {0};
+
+ return sparx5_dwrr_conf_set(port, &dwrr);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
new file mode 100644
index 000000000000..ced35033a6c5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_QOS_H__
+#define __SPARX5_QOS_H__
+
+#include <linux/netdevice.h>
+
+/* Number of Layers */
+#define SPX5_HSCH_LAYER_CNT 3
+
+/* Scheduling elements per layer */
+#define SPX5_HSCH_L0_SE_CNT 5040
+#define SPX5_HSCH_L1_SE_CNT 64
+#define SPX5_HSCH_L2_SE_CNT 64
+
+/* Calculate Layer 0 Scheduler Element when using normal hierarchy */
+#define SPX5_HSCH_L0_GET_IDX(port, queue) ((64 * (port)) + (8 * (queue)))
+
+/* Number of leak groups */
+#define SPX5_HSCH_LEAK_GRP_CNT 4
+
+/* Scheduler modes */
+#define SPX5_SE_MODE_LINERATE 0
+#define SPX5_SE_MODE_DATARATE 1
+
+/* Rate and burst */
+#define SPX5_SE_RATE_MAX 262143
+#define SPX5_SE_BURST_MAX 127
+#define SPX5_SE_RATE_MIN 1
+#define SPX5_SE_BURST_MIN 1
+#define SPX5_SE_BURST_UNIT 4096
+
+/* Dwrr */
+#define SPX5_DWRR_COST_MAX 63
+
+struct sparx5_shaper {
+ u32 mode;
+ u32 rate;
+ u32 burst;
+};
+
+struct sparx5_lg {
+ u32 max_rate;
+ u32 resolution;
+ u32 leak_time;
+ u32 max_ses;
+};
+
+struct sparx5_layer {
+ struct sparx5_lg leak_groups[SPX5_HSCH_LEAK_GRP_CNT];
+};
+
+struct sparx5_dwrr {
+ u32 count; /* Number of inputs running dwrr */
+ u8 cost[SPX5_PRIOS];
+};
+
+int sparx5_qos_init(struct sparx5 *sparx5);
+
+/* Multi-Queue Priority */
+int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc);
+int sparx5_tc_mqprio_del(struct net_device *ndev);
+
+/* Token Bucket Filter */
+struct tc_tbf_qopt_offload_replace_params;
+int sparx5_tc_tbf_add(struct sparx5_port *port,
+ struct tc_tbf_qopt_offload_replace_params *params,
+ u32 layer, u32 idx);
+int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx);
+
+/* Enhanced Transmission Selection */
+struct tc_ets_qopt_offload_replace_params;
+int sparx5_tc_ets_add(struct sparx5_port *port,
+ struct tc_ets_qopt_offload_replace_params *params);
+
+int sparx5_tc_ets_del(struct sparx5_port *port);
+
+#endif /* __SPARX5_QOS_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index ec07f7d0528c..4af85d108a06 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -29,14 +29,23 @@ static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
return 0;
}
+static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
+{
+ bool should_flood = flood_flag || port->is_mrouter;
+ int pgid;
+
+ for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
+ sparx5_pgid_update_mask(port, pgid, should_flood);
+}
+
static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
struct switchdev_brport_flags flags)
{
- int pgid;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
+ sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
+ }
- if (flags.mask & BR_MCAST_FLOOD)
- for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
- sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_FLOOD)
sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
@@ -82,6 +91,37 @@ static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
sparx5_set_ageing(port->sparx5, ageing_time);
}
+static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
+ struct net_device *orig_dev,
+ bool enable)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mdb_entry *e;
+ bool flood_flag;
+
+ if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
+ return;
+
+ /* Add/del mrouter port on all active mdb entries in HW.
+ * Don't change entry port mask, since that represents
+ * ports that actually joined that group.
+ */
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (!test_bit(port->portno, e->port_mask) &&
+ ether_addr_is_ip_mcast(e->addr))
+ sparx5_pgid_update_mask(port, e->pgid_idx, enable);
+ }
+ mutex_unlock(&sparx5->mdb_lock);
+
+ /* Enable/disable flooding depending on if port is mrouter port
+ * or if mcast flood is enabled.
+ */
+ port->is_mrouter = enable;
+ flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
+ sparx5_port_update_mcast_ip_flood(port, flood_flag);
+}
+
static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -110,6 +150,11 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
port->vlan_aware = attr->u.vlan_filtering;
sparx5_vlan_port_apply(port->sparx5, port);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ sparx5_port_attr_mrouter_set(port,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -386,16 +431,95 @@ static int sparx5_handle_port_vlan_add(struct net_device *dev,
v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
}
+static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid,
+ struct sparx5_mdb_entry **entry_out)
+{
+ struct sparx5_mdb_entry *entry;
+ u16 pgid_idx;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
+ if (err) {
+ kfree(entry);
+ return err;
+ }
+
+ memcpy(entry->addr, addr, ETH_ALEN);
+ entry->vid = vid;
+ entry->pgid_idx = pgid_idx;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_add_tail(&entry->list, &sparx5->mdb_entries);
+ mutex_unlock(&sparx5->mdb_lock);
+
+ *entry_out = entry;
+ return 0;
+}
+
+static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *entry, *tmp;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
+ if ((vid == 0 || entry->vid == vid) &&
+ ether_addr_equal(addr, entry->addr)) {
+ list_del(&entry->list);
+
+ sparx5_pgid_free(sparx5, entry->pgid_idx);
+ kfree(entry);
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+}
+
+static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
+ const unsigned char *addr,
+ u16 vid)
+{
+ struct sparx5_mdb_entry *e, *found = NULL;
+
+ mutex_lock(&sparx5->mdb_lock);
+ list_for_each_entry(e, &sparx5->mdb_entries, list) {
+ if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
+ found = e;
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&sparx5->mdb_lock);
+ return found;
+}
+
+static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
+{
+ spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
+ ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
+ ANA_AC_PGID_MISC_CFG(pgid));
+}
+
static int sparx5_handle_port_mdb_add(struct net_device *dev,
struct notifier_block *nb,
const struct switchdev_obj_port_mdb *v)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
- u16 pgid_idx, vid;
- u32 mact_entry;
- bool is_host;
- int res, err;
+ struct sparx5_mdb_entry *entry;
+ bool is_host, is_new;
+ int err, i;
+ u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
@@ -410,66 +534,36 @@ static int sparx5_handle_port_mdb_add(struct net_device *dev,
else
vid = v->vid;
- res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
-
- if (res == 0) {
- pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
-
- /* MC_IDX starts after the port masks in the PGID table */
- pgid_idx += SPX5_PORTS;
-
- if (is_host)
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, true);
-
- } else {
- err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
- if (err) {
- netdev_warn(dev, "multicast pgid table full\n");
+ is_new = false;
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry) {
+ err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
+ is_new = true;
+ if (err)
return err;
- }
-
- if (is_host)
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, true);
-
- err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
-
- if (err) {
- netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
- sparx5_pgid_free(spx5, pgid_idx);
- sparx5_pgid_update_mask(port, pgid_idx, false);
- return err;
- }
}
- return 0;
-}
+ mutex_lock(&spx5->mdb_lock);
+
+ /* Add any mrouter ports to the new entry */
+ if (is_new && ether_addr_is_ip_mcast(v->addr))
+ for (i = 0; i < SPX5_PORTS; i++)
+ if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
+ sparx5_pgid_update_mask(spx5->ports[i],
+ entry->pgid_idx,
+ true);
+
+ if (is_host && !entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
+ entry->cpu_copy = true;
+ } else if (!is_host) {
+ sparx5_pgid_update_mask(port, entry->pgid_idx, true);
+ set_bit(port->portno, entry->port_mask);
+ }
+ mutex_unlock(&spx5->mdb_lock);
-static int sparx5_mdb_del_entry(struct net_device *dev,
- struct sparx5 *spx5,
- const unsigned char mac[ETH_ALEN],
- const u16 vid,
- u16 pgid_idx)
-{
- int err;
+ sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
- err = sparx5_mact_forget(spx5, mac, vid);
- if (err) {
- netdev_warn(dev, "could not forget mac address %pM", mac);
- return err;
- }
- err = sparx5_pgid_free(spx5, pgid_idx);
- if (err) {
- netdev_err(dev, "attempted to free already freed pgid\n");
- return err;
- }
return 0;
}
@@ -479,42 +573,45 @@ static int sparx5_handle_port_mdb_del(struct net_device *dev,
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *spx5 = port->sparx5;
- u16 pgid_idx, vid;
- u32 mact_entry, res, pgid_entry[3], misc_cfg;
- bool host_ena;
+ struct sparx5_mdb_entry *entry;
+ bool is_host;
+ u16 vid;
if (!sparx5_netdevice_check(dev))
return -EOPNOTSUPP;
+ is_host = netif_is_bridge_master(v->obj.orig_dev);
+
if (!br_vlan_enabled(spx5->hw_bridge_dev))
vid = 1;
else
vid = v->vid;
- res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
-
- if (res == 0) {
- pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
-
- /* MC_IDX starts after the port masks in the PGID table */
- pgid_idx += SPX5_PORTS;
-
- if (netif_is_bridge_master(v->obj.orig_dev))
- spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
- ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
- ANA_AC_PGID_MISC_CFG(pgid_idx));
- else
- sparx5_pgid_update_mask(port, pgid_idx, false);
+ entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
+ if (!entry)
+ return 0;
- misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
- host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
+ mutex_lock(&spx5->mdb_lock);
+ if (is_host && entry->cpu_copy) {
+ sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
+ entry->cpu_copy = false;
+ } else if (!is_host) {
+ clear_bit(port->portno, entry->port_mask);
- sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
- if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
- /* No ports or CPU are in MC group. Remove entry */
- return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
+ /* Port not mrouter port or addr is L2 mcast, remove port from mask. */
+ if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
+ sparx5_pgid_update_mask(port, entry->pgid_idx, false);
+ }
+ mutex_unlock(&spx5->mdb_lock);
+
+ if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
+ /* Clear pgid in case mrouter ports exists
+ * that are not part of the group.
+ */
+ sparx5_pgid_clear(spx5, entry->pgid_idx);
+ sparx5_mact_forget(spx5, entry->addr, entry->vid);
+ sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
}
-
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
new file mode 100644
index 000000000000..e05429c751ee
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/pkt_cls.h>
+
+#include "sparx5_tc.h"
+#include "sparx5_main.h"
+#include "sparx5_qos.h"
+
+static void sparx5_tc_get_layer_and_idx(u32 parent, u32 portno, u32 *layer,
+ u32 *idx)
+{
+ if (parent == TC_H_ROOT) {
+ *layer = 2;
+ *idx = portno;
+ } else {
+ u32 queue = TC_H_MIN(parent) - 1;
+ *layer = 0;
+ *idx = SPX5_HSCH_L0_GET_IDX(portno, queue);
+ }
+}
+
+static int sparx5_tc_setup_qdisc_mqprio(struct net_device *ndev,
+ struct tc_mqprio_qopt_offload *m)
+{
+ m->qopt.hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ if (m->qopt.num_tc == 0)
+ return sparx5_tc_mqprio_del(ndev);
+ else
+ return sparx5_tc_mqprio_add(ndev, m->qopt.num_tc);
+}
+
+static int sparx5_tc_setup_qdisc_tbf(struct net_device *ndev,
+ struct tc_tbf_qopt_offload *qopt)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 layer, se_idx;
+
+ sparx5_tc_get_layer_and_idx(qopt->parent, port->portno, &layer,
+ &se_idx);
+
+ switch (qopt->command) {
+ case TC_TBF_REPLACE:
+ return sparx5_tc_tbf_add(port, &qopt->replace_params, layer,
+ se_idx);
+ case TC_TBF_DESTROY:
+ return sparx5_tc_tbf_del(port, layer, se_idx);
+ case TC_TBF_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int sparx5_tc_setup_qdisc_ets(struct net_device *ndev,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct tc_ets_qopt_offload_replace_params *params =
+ &qopt->replace_params;
+ struct sparx5_port *port = netdev_priv(ndev);
+ int i;
+
+ /* Only allow ets on ports */
+ if (qopt->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+
+ /* We support eight priorities */
+ if (params->bands != SPX5_PRIOS)
+ return -EOPNOTSUPP;
+
+ /* Sanity checks */
+ for (i = 0; i < SPX5_PRIOS; ++i) {
+ /* Priority map is *always* reverse e.g: 7 6 5 .. 0 */
+ if (params->priomap[i] != (7 - i))
+ return -EOPNOTSUPP;
+ /* Throw an error if we receive zero weights by tc */
+ if (params->quanta[i] && params->weights[i] == 0) {
+ pr_err("Invalid ets configuration; band %d has weight zero",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ sparx5_tc_ets_add(port, params);
+ break;
+ case TC_ETS_DESTROY:
+
+ sparx5_tc_ets_del(port);
+
+ break;
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return sparx5_tc_setup_qdisc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TBF:
+ return sparx5_tc_setup_qdisc_tbf(ndev, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return sparx5_tc_setup_qdisc_ets(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
new file mode 100644
index 000000000000..5b55e11b77e1
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#ifndef __SPARX5_TC_H__
+#define __SPARX5_TC_H__
+
+#include <linux/netdevice.h>
+
+int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+
+#endif /* __SPARX5_TC_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 37e4ac965849..34f954bbf815 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -138,6 +138,13 @@ void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
}
}
+void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
+{
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
+ spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
+}
+
void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
{
portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 5f9240182351..a6f99b4344d9 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -397,6 +397,11 @@ static void mana_gd_process_eq_events(void *arg)
break;
}
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading eqe.
+ */
+ rmb();
+
mana_gd_process_eqe(eq);
eq->head++;
@@ -1134,6 +1139,11 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
if (WARN_ON_ONCE(owner_bits != new_bits))
return -1;
+ /* Per GDMA spec, rmb is necessary after checking owner_bits, before
+ * reading completion info
+ */
+ rmb();
+
comp->wq_num = cqe->cqe_info.wq_num;
comp->is_sq = cqe->cqe_info.is_sq;
memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
@@ -1465,10 +1475,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
static const struct pci_device_id mana_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 9e57d23e57bf..3da99b62797d 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -29,12 +29,12 @@
#include "moxart_ether.h"
-static inline void moxart_desc_write(u32 data, u32 *desc)
+static inline void moxart_desc_write(u32 data, __le32 *desc)
{
*desc = cpu_to_le32(data);
}
-static inline u32 moxart_desc_read(u32 *desc)
+static inline u32 moxart_desc_read(__le32 *desc)
{
return le32_to_cpu(*desc);
}
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 41b34a509308..5d435a565d4c 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -2,16 +2,17 @@
obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o
mscc_ocelot_switch_lib-y := \
ocelot.o \
+ ocelot_devlink.o \
+ ocelot_flower.o \
ocelot_io.o \
ocelot_police.o \
- ocelot_vcap.o \
- ocelot_flower.o \
ocelot_ptp.o \
- ocelot_devlink.o \
+ ocelot_stats.o \
+ ocelot_vcap.o \
vsc7514_regs.o
mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_fdma.o \
- ocelot_vsc7514.o \
- ocelot_net.o
+ ocelot_net.o \
+ ocelot_vsc7514.o
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 306026e6aa11..13b14110a060 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,7 +6,6 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
-#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -290,6 +289,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
if (!(vlan->portmask & BIT(port)))
continue;
+ /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
+ * because this is never active in hardware at the same time as
+ * the bridge VLANs, which only matter in VLAN-aware mode.
+ */
+ if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
+ continue;
+
if (vlan->untagged & BIT(port))
num_untagged++;
}
@@ -910,211 +916,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
-static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
- struct sk_buff *clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- unsigned long flags;
-
- spin_lock_irqsave(&ocelot->ts_id_lock, flags);
-
- if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
- ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
- return -EBUSY;
- }
-
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
- OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
-
- ocelot_port->ts_id++;
- if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
- ocelot_port->ts_id = 0;
-
- ocelot_port->ptp_skbs_in_flight++;
- ocelot->ptp_skbs_in_flight++;
-
- skb_queue_tail(&ocelot_port->tx_skbs, clone);
-
- spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
-
- return 0;
-}
-
-static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
- unsigned int ptp_class)
-{
- struct ptp_header *hdr;
- u8 msgtype, twostep;
-
- hdr = ptp_parse_header(skb, ptp_class);
- if (!hdr)
- return false;
-
- msgtype = ptp_get_msgtype(hdr, ptp_class);
- twostep = hdr->flag_field[0] & 0x2;
-
- if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
- return true;
-
- return false;
-}
-
-int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
- struct sk_buff *skb,
- struct sk_buff **clone)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u8 ptp_cmd = ocelot_port->ptp_cmd;
- unsigned int ptp_class;
- int err;
-
- /* Don't do anything if PTP timestamping not enabled */
- if (!ptp_cmd)
- return 0;
-
- ptp_class = ptp_classify_raw(skb);
- if (ptp_class == PTP_CLASS_NONE)
- return -EINVAL;
-
- /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
- if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
- if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- return 0;
- }
-
- /* Fall back to two-step timestamping */
- ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- }
-
- if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- *clone = skb_clone_sk(skb);
- if (!(*clone))
- return -ENOMEM;
-
- err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
- if (err)
- return err;
-
- OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
- OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ocelot_port_txtstamp_request);
-
-static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
- struct timespec64 *ts)
-{
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-
- /* Read current PTP time to get seconds */
- val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
-
- val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
- val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
- ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
- ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
-
- /* Read packet HW timestamp from FIFO */
- val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
- ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
-
- /* Sec has incremented since the ts was registered */
- if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
- ts->tv_sec--;
-
- spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
-}
-
-static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
-{
- struct ptp_header *hdr;
-
- hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
- if (WARN_ON(!hdr))
- return false;
-
- return seqid == ntohs(hdr->sequence_id);
-}
-
-void ocelot_get_txtstamp(struct ocelot *ocelot)
-{
- int budget = OCELOT_PTP_QUEUE_SZ;
-
- while (budget--) {
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct skb_shared_hwtstamps shhwtstamps;
- u32 val, id, seqid, txport;
- struct ocelot_port *port;
- struct timespec64 ts;
- unsigned long flags;
-
- val = ocelot_read(ocelot, SYS_PTP_STATUS);
-
- /* Check if a timestamp can be retrieved */
- if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
- break;
-
- WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
-
- /* Retrieve the ts ID and Tx port */
- id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
- txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
- seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
-
- port = ocelot->ports[txport];
-
- spin_lock(&ocelot->ts_id_lock);
- port->ptp_skbs_in_flight--;
- ocelot->ptp_skbs_in_flight--;
- spin_unlock(&ocelot->ts_id_lock);
-
- /* Retrieve its associated skb */
-try_again:
- spin_lock_irqsave(&port->tx_skbs.lock, flags);
-
- skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
- if (OCELOT_SKB_CB(skb)->ts_id != id)
- continue;
- __skb_unlink(skb, &port->tx_skbs);
- skb_match = skb;
- break;
- }
-
- spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
-
- if (WARN_ON(!skb_match))
- continue;
-
- if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
- dev_err_ratelimited(ocelot->dev,
- "port %d received stale TX timestamp for seqid %d, discarding\n",
- txport, seqid);
- dev_kfree_skb_any(skb);
- goto try_again;
- }
-
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
- /* Set the timestamp into the skb */
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_complete_tx_timestamp(skb_match, &shhwtstamps);
-
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
- }
-}
-EXPORT_SYMBOL(ocelot_get_txtstamp);
-
static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
u32 *rval)
{
@@ -1366,50 +1167,6 @@ int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
}
EXPORT_SYMBOL(ocelot_fdb_del);
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data)
-{
- struct ocelot_dump_ctx *dump = data;
- u32 portid = NETLINK_CB(dump->cb->skb).portid;
- u32 seq = dump->cb->nlh->nlmsg_seq;
- struct nlmsghdr *nlh;
- struct ndmsg *ndm;
-
- if (dump->idx < dump->cb->args[2])
- goto skip;
-
- nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
- sizeof(*ndm), NLM_F_MULTI);
- if (!nlh)
- return -EMSGSIZE;
-
- ndm = nlmsg_data(nlh);
- ndm->ndm_family = AF_BRIDGE;
- ndm->ndm_pad1 = 0;
- ndm->ndm_pad2 = 0;
- ndm->ndm_flags = NTF_SELF;
- ndm->ndm_type = 0;
- ndm->ndm_ifindex = dump->dev->ifindex;
- ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
- if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
- goto nla_put_failure;
-
- if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
- goto nla_put_failure;
-
- nlmsg_end(dump->skb, nlh);
-
-skip:
- dump->idx++;
- return 0;
-
-nla_put_failure:
- nlmsg_cancel(dump->skb, nlh);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
-
/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
@@ -1541,53 +1298,6 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_fdb_dump);
-static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
-}
-
-static void
-ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv4.dport.value = PTP_EV_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv6.dport.value = PTP_EV_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV4;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv4.dport.value = PTP_GEN_PORT;
- trap->key.ipv4.dport.mask = 0xffff;
-}
-
-static void
-ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
-{
- trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
- trap->key.ipv6.dport.value = PTP_GEN_PORT;
- trap->key.ipv6.dport.mask = 0xffff;
-}
-
int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie, bool take_ts,
void (*populate)(struct ocelot_vcap_filter *f))
@@ -1656,381 +1366,6 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
return ocelot_vcap_filter_replace(ocelot, trap);
}
-static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
-
- return ocelot_trap_add(ocelot, port, l2_cookie, true,
- ocelot_populate_l2_ptp_trap_key);
-}
-
-static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
-
- return ocelot_trap_del(ocelot, port, l2_cookie);
-}
-
-static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
- unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
- ocelot_populate_ipv4_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
- ocelot_populate_ipv4_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
- unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
- unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
- ocelot_populate_ipv6_ptp_event_trap_key);
- if (err)
- return err;
-
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
- ocelot_populate_ipv6_ptp_general_trap_key);
- if (err)
- ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
-
- return err;
-}
-
-static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
-{
- unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
- unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
- int err;
-
- err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
- err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
- return err;
-}
-
-static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
- bool l2, bool l4)
-{
- int err;
-
- if (l2)
- err = ocelot_l2_ptp_trap_add(ocelot, port);
- else
- err = ocelot_l2_ptp_trap_del(ocelot, port);
- if (err)
- return err;
-
- if (l4) {
- err = ocelot_ipv4_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv4;
-
- err = ocelot_ipv6_ptp_trap_add(ocelot, port);
- if (err)
- goto err_ipv6;
- } else {
- err = ocelot_ipv4_ptp_trap_del(ocelot, port);
-
- err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
- }
- if (err)
- return err;
-
- return 0;
-
-err_ipv6:
- ocelot_ipv4_ptp_trap_del(ocelot, port);
-err_ipv4:
- if (l2)
- ocelot_l2_ptp_trap_del(ocelot, port);
- return err;
-}
-
-int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
- sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_get);
-
-int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- bool l2 = false, l4 = false;
- struct hwtstamp_config cfg;
- int err;
-
- if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
- return -EFAULT;
-
- /* Tx type sanity check */
- switch (cfg.tx_type) {
- case HWTSTAMP_TX_ON:
- ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
- break;
- case HWTSTAMP_TX_ONESTEP_SYNC:
- /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
- * need to update the origin time.
- */
- ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
- break;
- case HWTSTAMP_TX_OFF:
- ocelot_port->ptp_cmd = 0;
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&ocelot->ptp_lock);
-
- switch (cfg.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- l2 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- l2 = true;
- l4 = true;
- break;
- default:
- mutex_unlock(&ocelot->ptp_lock);
- return -ERANGE;
- }
-
- err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
- if (err) {
- mutex_unlock(&ocelot->ptp_lock);
- return err;
- }
-
- if (l2 && l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- else if (l2)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (l4)
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- else
- cfg.rx_filter = HWTSTAMP_FILTER_NONE;
-
- /* Commit back the result & save it */
- memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
- mutex_unlock(&ocelot->ptp_lock);
-
- return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
-}
-EXPORT_SYMBOL(ocelot_hwstamp_set);
-
-void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
-{
- int i;
-
- if (sset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (ocelot->stats_layout[i].name[0] == '\0')
- continue;
-
- memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
- ETH_GSTRING_LEN);
- }
-}
-EXPORT_SYMBOL(ocelot_get_strings);
-
-/* Caller must hold &ocelot->stats_lock */
-static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
-{
- unsigned int idx = port * OCELOT_NUM_STATS;
- struct ocelot_stats_region *region;
- int err, j;
-
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
-
- list_for_each_entry(region, &ocelot->stats_regions, node) {
- err = ocelot_bulk_read(ocelot, region->base, region->buf,
- region->count);
- if (err)
- return err;
-
- for (j = 0; j < region->count; j++) {
- u64 *stat = &ocelot->stats[idx + j];
- u64 val = region->buf[j];
-
- if (val < (*stat & U32_MAX))
- *stat += (u64)1 << 32;
-
- *stat = (*stat & ~(u64)U32_MAX) + val;
- }
-
- idx += region->count;
- }
-
- return err;
-}
-
-static void ocelot_check_stats_work(struct work_struct *work)
-{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot,
- stats_work);
- int i, err;
-
- spin_lock(&ocelot->stats_lock);
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- err = ocelot_port_update_stats(ocelot, i);
- if (err)
- break;
- }
- spin_unlock(&ocelot->stats_lock);
-
- if (err)
- dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
-
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-}
-
-void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
-{
- int i, err;
-
- spin_lock(&ocelot->stats_lock);
-
- /* check and update now */
- err = ocelot_port_update_stats(ocelot, port);
-
- /* Copy all supported counters */
- for (i = 0; i < OCELOT_NUM_STATS; i++) {
- int index = port * OCELOT_NUM_STATS + i;
-
- if (ocelot->stats_layout[i].name[0] == '\0')
- continue;
-
- *data++ = ocelot->stats[index];
- }
-
- spin_unlock(&ocelot->stats_lock);
-
- if (err)
- dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
-}
-EXPORT_SYMBOL(ocelot_get_ethtool_stats);
-
-int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
-{
- int i, num_stats = 0;
-
- if (sset != ETH_SS_STATS)
- return -EOPNOTSUPP;
-
- for (i = 0; i < OCELOT_NUM_STATS; i++)
- if (ocelot->stats_layout[i].name[0] != '\0')
- num_stats++;
-
- return num_stats;
-}
-EXPORT_SYMBOL(ocelot_get_sset_count);
-
-static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
-{
- struct ocelot_stats_region *region = NULL;
- unsigned int last;
- int i;
-
- INIT_LIST_HEAD(&ocelot->stats_regions);
-
- for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (ocelot->stats_layout[i].name[0] == '\0')
- continue;
-
- if (region && ocelot->stats_layout[i].reg == last + 4) {
- region->count++;
- } else {
- region = devm_kzalloc(ocelot->dev, sizeof(*region),
- GFP_KERNEL);
- if (!region)
- return -ENOMEM;
-
- region->base = ocelot->stats_layout[i].reg;
- region->count = 1;
- list_add_tail(&region->node, &ocelot->stats_regions);
- }
-
- last = ocelot->stats_layout[i].reg;
- }
-
- list_for_each_entry(region, &ocelot->stats_regions, node) {
- region->buf = devm_kcalloc(ocelot->dev, region->count,
- sizeof(*region->buf), GFP_KERNEL);
- if (!region->buf)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
-{
- info->phc_index = ocelot->ptp_clock ?
- ptp_clock_index(ocelot->ptp_clock) : -1;
- if (info->phc_index == -1) {
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- return 0;
- }
- info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
- BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
-
- return 0;
-}
-EXPORT_SYMBOL(ocelot_get_ts_info);
-
static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
u32 mask = 0;
@@ -2054,7 +1389,7 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
/* The logical port number of a LAG is equal to the lowest numbered physical
* port ID present in that LAG. It may change if that port ever leaves the LAG.
*/
-static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
+int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
int bond_mask = ocelot_get_bond_mask(ocelot, bond);
@@ -2063,7 +1398,18 @@ static int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
return __ffs(bond_mask);
}
+EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
+/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
+ * Note that when CPU ports are in a LAG, the user ports are assigned to the
+ * 'primary' CPU port, the one whose physical port number gives the logical
+ * port number of the LAG.
+ *
+ * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
+ * (to which no user port is assigned), but it appears that forwarding from
+ * this secondary CPU port looks at the PGID_SRC associated with the logical
+ * port ID that it's assigned to, which *is* configured properly.
+ */
static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
struct ocelot_port *cpu)
{
@@ -2080,9 +1426,15 @@ static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
mask |= BIT(port);
}
+ if (cpu->bond)
+ mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
+
return mask;
}
+/* Returns the DSA tag_8021q CPU port that the given port is assigned to,
+ * or the bit mask of CPU ports if said CPU port is in a LAG.
+ */
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -2091,6 +1443,9 @@ u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
if (!cpu_port)
return 0;
+ if (cpu_port->bond)
+ return ocelot_get_bond_mask(ocelot, cpu_port->bond);
+
return BIT(cpu_port->index);
}
EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
@@ -2214,61 +1569,61 @@ static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
}
-void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
- int cpu)
+void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
-
- if (!cpu_port->is_dsa_8021q_cpu) {
- cpu_port->is_dsa_8021q_cpu = true;
+ cpu_port->is_dsa_8021q_cpu = true;
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_add(ocelot, cpu, vid, true);
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_add(ocelot, cpu, vid, true);
- ocelot_update_pgid_cpu(ocelot);
- }
-
- ocelot_apply_bridge_fwd_mask(ocelot, true);
+ ocelot_update_pgid_cpu(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
-EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
+EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
-void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
- struct ocelot_port *cpu_port = ocelot->ports[port]->dsa_8021q_cpu;
- bool keep = false;
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
- int p;
mutex_lock(&ocelot->fwd_domain_lock);
- ocelot->ports[port]->dsa_8021q_cpu = NULL;
+ cpu_port->is_dsa_8021q_cpu = false;
- for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (!ocelot->ports[p])
- continue;
+ for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
+ ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
- if (ocelot->ports[p]->dsa_8021q_cpu == cpu_port) {
- keep = true;
- break;
- }
- }
+ ocelot_update_pgid_cpu(ocelot);
- if (!keep) {
- cpu_port->is_dsa_8021q_cpu = false;
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
- for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
+void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
+ int cpu)
+{
+ struct ocelot_port *cpu_port = ocelot->ports[cpu];
- ocelot_update_pgid_cpu(ocelot);
- }
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
+ ocelot_apply_bridge_fwd_mask(ocelot, true);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
+
+void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
+{
+ mutex_lock(&ocelot->fwd_domain_lock);
+ ocelot->ports[port]->dsa_8021q_cpu = NULL;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
@@ -2785,10 +2140,14 @@ static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return -EOPNOTSUPP;
+ }
mutex_lock(&ocelot->fwd_domain_lock);
@@ -3358,7 +2717,6 @@ static void ocelot_detect_features(struct ocelot *ocelot)
int ocelot_init(struct ocelot *ocelot)
{
- char queue_name[32];
int i, ret;
u32 port;
@@ -3370,29 +2728,21 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->stats = devm_kcalloc(ocelot->dev,
- ocelot->num_phys_ports * OCELOT_NUM_STATS,
- sizeof(u64), GFP_KERNEL);
- if (!ocelot->stats)
- return -ENOMEM;
-
- spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
mutex_init(&ocelot->tas_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
- snprintf(queue_name, sizeof(queue_name), "%s-stats",
- dev_name(ocelot->dev));
- ocelot->stats_queue = create_singlethread_workqueue(queue_name);
- if (!ocelot->stats_queue)
- return -ENOMEM;
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
- if (!ocelot->owq) {
- destroy_workqueue(ocelot->stats_queue);
+ if (!ocelot->owq)
return -ENOMEM;
+
+ ret = ocelot_stats_init(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->owq);
+ return ret;
}
INIT_LIST_HEAD(&ocelot->multicast);
@@ -3504,25 +2854,13 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- ret = ocelot_prepare_stats_regions(ocelot);
- if (ret) {
- destroy_workqueue(ocelot->stats_queue);
- destroy_workqueue(ocelot->owq);
- return ret;
- }
-
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
- queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
- OCELOT_STATS_CHECK_DELAY);
-
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
- destroy_workqueue(ocelot->stats_queue);
+ ocelot_stats_deinit(ocelot);
destroy_workqueue(ocelot->owq);
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 6d65cc87d757..70dbd9c4e512 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -51,13 +51,6 @@ struct ocelot_port_private {
struct ocelot_port_tc tc;
};
-struct ocelot_dump_ctx {
- struct net_device *dev;
- struct sk_buff *skb;
- struct netlink_callback *cb;
- int idx;
-};
-
/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
* possibilities of egress port masks for L2 multicast traffic.
* For a switch with 9 user ports, there are 512 possible port masks, but the
@@ -84,8 +77,6 @@ struct ocelot_multicast {
int ocelot_bridge_num_find(struct ocelot *ocelot,
const struct net_device *bridge);
-int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
- bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type);
@@ -115,6 +106,9 @@ struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
struct netlink_ext_ack *extack);
void ocelot_mirror_put(struct ocelot *ocelot);
+int ocelot_stats_init(struct ocelot *ocelot);
+void ocelot_stats_deinit(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 330d30841cdc..50858cc10fef 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -20,6 +20,13 @@
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
+struct ocelot_dump_ctx {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx;
+};
+
static bool ocelot_netdevice_dev_check(const struct net_device *dev);
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
@@ -725,42 +732,8 @@ static void ocelot_get_stats64(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
- u64 *s;
-
- spin_lock(&ocelot->stats_lock);
-
- s = &ocelot->stats[port * OCELOT_NUM_STATS];
-
- /* Get Rx stats */
- stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
- stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
- s[OCELOT_STAT_RX_FRAGMENTS] +
- s[OCELOT_STAT_RX_JABBERS] +
- s[OCELOT_STAT_RX_LONGS] +
- s[OCELOT_STAT_RX_64] +
- s[OCELOT_STAT_RX_65_127] +
- s[OCELOT_STAT_RX_128_255] +
- s[OCELOT_STAT_RX_256_511] +
- s[OCELOT_STAT_RX_512_1023] +
- s[OCELOT_STAT_RX_1024_1526] +
- s[OCELOT_STAT_RX_1527_MAX];
- stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
- stats->rx_dropped = dev->stats.rx_dropped;
-
- /* Get Tx stats */
- stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
- stats->tx_packets = s[OCELOT_STAT_TX_64] +
- s[OCELOT_STAT_TX_65_127] +
- s[OCELOT_STAT_TX_128_255] +
- s[OCELOT_STAT_TX_256_511] +
- s[OCELOT_STAT_TX_512_1023] +
- s[OCELOT_STAT_TX_1024_1526] +
- s[OCELOT_STAT_TX_1527_MAX];
- stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
- s[OCELOT_STAT_TX_AGED];
- stats->collisions = s[OCELOT_STAT_TX_COLLISION];
-
- spin_unlock(&ocelot->stats_lock);
+
+ return ocelot_port_get_stats64(ocelot, port, stats);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -790,6 +763,49 @@ static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
}
+static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+ bool is_static, void *data)
+{
+ struct ocelot_dump_ctx *dump = data;
+ u32 portid = NETLINK_CB(dump->cb->skb).portid;
+ u32 seq = dump->cb->nlh->nlmsg_seq;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ if (dump->idx < dump->cb->args[2])
+ goto skip;
+
+ nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+ sizeof(*ndm), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = AF_BRIDGE;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = NTF_SELF;
+ ndm->ndm_type = 0;
+ ndm->ndm_ifindex = dump->dev->ifindex;
+ ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
+ goto nla_put_failure;
+
+ if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
+ goto nla_put_failure;
+
+ nlmsg_end(dump->skb, nlh);
+
+skip:
+ dump->idx++;
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(dump->skb, nlh);
+ return -EMSGSIZE;
+}
+
static int ocelot_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1396,11 +1412,10 @@ static int ocelot_netdevice_lag_join(struct net_device *dev,
int port = priv->port.index;
int err;
- err = ocelot_port_lag_join(ocelot, port, bond, info);
- if (err == -EOPNOTSUPP) {
- NL_SET_ERR_MSG_MOD(extack, "Offloading not supported");
+ err = ocelot_port_lag_join(ocelot, port, bond, info, extack);
+ if (err == -EOPNOTSUPP)
+ /* Offloading not supported, fall back to software LAG */
return 0;
- }
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 09c703efe946..1a82f10c8853 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -6,9 +6,13 @@
*/
#include <linux/time64.h>
+#include <linux/dsa/ocelot.h>
+#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
+#include "ocelot.h"
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
@@ -310,6 +314,483 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_EV_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_EV_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv4.dport.value = PTP_GEN_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
+ ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
+ ocelot_populate_ipv4_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
+ ocelot_populate_ipv4_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
+ ocelot_populate_ipv6_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
+ ocelot_populate_ipv6_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+ return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ bool l2, bool l4)
+{
+ int err;
+
+ if (l2)
+ err = ocelot_l2_ptp_trap_add(ocelot, port);
+ else
+ err = ocelot_l2_ptp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ if (l4) {
+ err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv4;
+
+ err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv6;
+ } else {
+ err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+ err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+ }
+ if (err)
+ return err;
+
+ return 0;
+
+err_ipv6:
+ ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+ if (l2)
+ ocelot_l2_ptp_trap_del(ocelot, port);
+ return err;
+}
+
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_get);
+
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool l2 = false, l4 = false;
+ struct hwtstamp_config cfg;
+ int err;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ ocelot_port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ l2 = true;
+ l4 = true;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ if (err) {
+ mutex_unlock(&ocelot->ptp_lock);
+ return err;
+ }
+
+ if (l2 && l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (l2)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ else
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(ocelot_hwstamp_set);
+
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info)
+{
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_get_ts_info);
+
+static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ocelot->ts_id_lock, flags);
+
+ if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
+ ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+ return -EBUSY;
+ }
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
+ OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
+
+ ocelot_port->ts_id++;
+ if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
+ ocelot_port->ts_id = 0;
+
+ ocelot_port->ptp_skbs_in_flight++;
+ ocelot->ptp_skbs_in_flight++;
+
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
+
+ return 0;
+}
+
+static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
+ unsigned int ptp_class)
+{
+ struct ptp_header *hdr;
+ u8 msgtype, twostep;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return false;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ twostep = hdr->flag_field[0] & 0x2;
+
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
+ return true;
+
+ return false;
+}
+
+int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
+ struct sk_buff *skb,
+ struct sk_buff **clone)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u8 ptp_cmd = ocelot_port->ptp_cmd;
+ unsigned int ptp_class;
+ int err;
+
+ /* Don't do anything if PTP timestamping not enabled */
+ if (!ptp_cmd)
+ return 0;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
+ if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ return 0;
+ }
+
+ /* Fall back to two-step timestamping */
+ ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ }
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ *clone = skb_clone_sk(skb);
+ if (!(*clone))
+ return -ENOMEM;
+
+ err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
+ if (err)
+ return err;
+
+ OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
+ OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_txtstamp_request);
+
+static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
+ struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+
+static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
+{
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
+ if (WARN_ON(!hdr))
+ return false;
+
+ return seqid == ntohs(hdr->sequence_id);
+}
+
+void ocelot_get_txtstamp(struct ocelot *ocelot)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+
+ while (budget--) {
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shhwtstamps;
+ u32 val, id, seqid, txport;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+ seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
+
+ port = ocelot->ports[txport];
+
+ spin_lock(&ocelot->ts_id_lock);
+ port->ptp_skbs_in_flight--;
+ ocelot->ptp_skbs_in_flight--;
+ spin_unlock(&ocelot->ts_id_lock);
+
+ /* Retrieve its associated skb */
+try_again:
+ spin_lock_irqsave(&port->tx_skbs.lock, flags);
+
+ skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
+ if (OCELOT_SKB_CB(skb)->ts_id != id)
+ continue;
+ __skb_unlink(skb, &port->tx_skbs);
+ skb_match = skb;
+ break;
+ }
+
+ spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
+
+ if (WARN_ON(!skb_match))
+ continue;
+
+ if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
+ dev_err_ratelimited(ocelot->dev,
+ "port %d received stale TX timestamp for seqid %d, discarding\n",
+ txport, seqid);
+ dev_kfree_skb_any(skb);
+ goto try_again;
+ }
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_txtstamp);
+
int ocelot_init_timestamp(struct ocelot *ocelot,
const struct ptp_clock_info *info)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
new file mode 100644
index 000000000000..dbd20b125cea
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Statistics for Ocelot switch family
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright 2022 NXP
+ */
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "ocelot.h"
+
+/* Read the counters from hardware and keep them in region->buf.
+ * Caller must hold &ocelot->stat_view_lock.
+ */
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
+{
+ struct ocelot_stats_region *region;
+ int err;
+
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read(ocelot, region->base, region->buf,
+ region->count);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Transfer the counters from region->buf to ocelot->stats.
+ * Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock.
+ */
+static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
+{
+ unsigned int idx = port * OCELOT_NUM_STATS;
+ struct ocelot_stats_region *region;
+ int j;
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
+
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
+
+ *stat = (*stat & ~(u64)U32_MAX) + val;
+ }
+
+ idx += region->count;
+ }
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+ int port, err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err)
+ break;
+
+ spin_lock(&ocelot->stats_lock);
+ ocelot_port_transfer_stats(ocelot, port);
+ spin_unlock(&ocelot->stats_lock);
+ }
+
+ if (!err && ocelot->ops->update_stats)
+ ocelot->ops->update_stats(ocelot);
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
+
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+}
+
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
+{
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name,
+ ETH_GSTRING_LEN);
+ }
+}
+EXPORT_SYMBOL(ocelot_get_strings);
+
+/* Update ocelot->stats for the given port and run the given callback */
+static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv,
+ void (*cb)(struct ocelot *ocelot, int port,
+ void *priv))
+{
+ int err;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ err = ocelot_port_update_stats(ocelot, port);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n",
+ port, ERR_PTR(err));
+ goto out_unlock;
+ }
+
+ spin_lock(&ocelot->stats_lock);
+
+ ocelot_port_transfer_stats(ocelot, port);
+ cb(ocelot, port, priv);
+
+ spin_unlock(&ocelot->stats_lock);
+
+out_unlock:
+ mutex_unlock(&ocelot->stat_view_lock);
+}
+
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
+{
+ int i, num_stats = 0;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++)
+ if (ocelot->stats_layout[i].name[0] != '\0')
+ num_stats++;
+
+ return num_stats;
+}
+EXPORT_SYMBOL(ocelot_get_sset_count);
+
+static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *data = priv;
+ int i;
+
+ /* Copy all supported counters */
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ int index = port * OCELOT_NUM_STATS + i;
+
+ if (ocelot->stats_layout[i].name[0] == '\0')
+ continue;
+
+ *data++ = ocelot->stats[index];
+ }
+}
+
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
+{
+ ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb);
+}
+EXPORT_SYMBOL(ocelot_get_ethtool_stats);
+
+static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_pause_stats *pause_stats = priv;
+
+ pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE];
+ pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
+}
+
+void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pause_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
+
+static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1526 },
+ { 1527, 65535 },
+ {},
+};
+
+static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_rmon_stats *rmon_stats = priv;
+
+ rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS];
+ rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS];
+ rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS];
+ rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS];
+
+ rmon_stats->hist[0] = s[OCELOT_STAT_RX_64];
+ rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127];
+ rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255];
+ rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511];
+ rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023];
+ rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526];
+ rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX];
+
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
+}
+
+void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ *ranges = ocelot_rmon_ranges;
+
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_rmon_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
+
+static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
+
+ ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
+}
+
+void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_ctrl_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
+
+static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_mac_stats *mac_stats = priv;
+
+ mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS];
+ mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS];
+ mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_1] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_2] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_3] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_4] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_5] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_6] +
+ s[OCELOT_STAT_RX_GREEN_PRIO_7] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_RX_YELLOW_PRIO_7];
+ mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST];
+ mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST];
+ mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST];
+ mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST];
+ mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS];
+ /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
+ * counted individually.
+ */
+ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+ mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
+}
+
+void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_mac_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
+
+static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_phy_stats *phy_stats = priv;
+
+ phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
+}
+
+void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_phy_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
+
+void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+
+ spin_lock(&ocelot->stats_lock);
+
+ /* Get Rx stats */
+ stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
+ stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
+ s[OCELOT_STAT_RX_FRAGMENTS] +
+ s[OCELOT_STAT_RX_JABBERS] +
+ s[OCELOT_STAT_RX_LONGS] +
+ s[OCELOT_STAT_RX_64] +
+ s[OCELOT_STAT_RX_65_127] +
+ s[OCELOT_STAT_RX_128_255] +
+ s[OCELOT_STAT_RX_256_511] +
+ s[OCELOT_STAT_RX_512_1023] +
+ s[OCELOT_STAT_RX_1024_1526] +
+ s[OCELOT_STAT_RX_1527_MAX];
+ stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
+ stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL];
+ stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] +
+ s[OCELOT_STAT_RX_RED_PRIO_1] +
+ s[OCELOT_STAT_RX_RED_PRIO_2] +
+ s[OCELOT_STAT_RX_RED_PRIO_3] +
+ s[OCELOT_STAT_RX_RED_PRIO_4] +
+ s[OCELOT_STAT_RX_RED_PRIO_5] +
+ s[OCELOT_STAT_RX_RED_PRIO_6] +
+ s[OCELOT_STAT_RX_RED_PRIO_7] +
+ s[OCELOT_STAT_DROP_LOCAL] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_0] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_1] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_2] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_3] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_4] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_5] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_6] +
+ s[OCELOT_STAT_DROP_YELLOW_PRIO_7] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_0] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_1] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_2] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_3] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_4] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_5] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_6] +
+ s[OCELOT_STAT_DROP_GREEN_PRIO_7];
+
+ /* Get Tx stats */
+ stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
+ stats->tx_packets = s[OCELOT_STAT_TX_64] +
+ s[OCELOT_STAT_TX_65_127] +
+ s[OCELOT_STAT_TX_128_255] +
+ s[OCELOT_STAT_TX_256_511] +
+ s[OCELOT_STAT_TX_512_1023] +
+ s[OCELOT_STAT_TX_1024_1526] +
+ s[OCELOT_STAT_TX_1527_MAX];
+ stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
+ s[OCELOT_STAT_TX_AGED];
+ stats->collisions = s[OCELOT_STAT_TX_COLLISION];
+
+ spin_unlock(&ocelot->stats_lock);
+}
+EXPORT_SYMBOL(ocelot_port_get_stats64);
+
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < OCELOT_NUM_STATS; i++) {
+ if (!ocelot->stats_layout[i].reg)
+ continue;
+
+ if (region && ocelot->stats_layout[i].reg == last + 4) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->base = ocelot->stats_layout[i].reg;
+ region->count = 1;
+ list_add_tail(&region->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].reg;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int ocelot_stats_init(struct ocelot *ocelot)
+{
+ char queue_name[32];
+ int ret;
+
+ ocelot->stats = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports * OCELOT_NUM_STATS,
+ sizeof(u64), GFP_KERNEL);
+ if (!ocelot->stats)
+ return -ENOMEM;
+
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(ocelot->dev));
+ ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+ if (!ocelot->stats_queue)
+ return -ENOMEM;
+
+ spin_lock_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->stat_view_lock);
+
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
+ queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
+ OCELOT_STATS_CHECK_DELAY);
+
+ return 0;
+}
+
+void ocelot_stats_deinit(struct ocelot *ocelot)
+{
+ cancel_delayed_work(&ocelot->stats_work);
+ destroy_workqueue(ocelot->stats_queue);
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 9c488953f541..6f22aea08a64 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,6 +6,7 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -25,6 +26,9 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
static const u32 *ocelot_regmap[TARGET_MAX] = {
[ANA] = vsc7514_ana_regmap,
[QS] = vsc7514_qs_regmap,
@@ -97,378 +101,7 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
};
static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
- [OCELOT_STAT_RX_OCTETS] = {
- .name = "rx_octets",
- .reg = SYS_COUNT_RX_OCTETS,
- },
- [OCELOT_STAT_RX_UNICAST] = {
- .name = "rx_unicast",
- .reg = SYS_COUNT_RX_UNICAST,
- },
- [OCELOT_STAT_RX_MULTICAST] = {
- .name = "rx_multicast",
- .reg = SYS_COUNT_RX_MULTICAST,
- },
- [OCELOT_STAT_RX_BROADCAST] = {
- .name = "rx_broadcast",
- .reg = SYS_COUNT_RX_BROADCAST,
- },
- [OCELOT_STAT_RX_SHORTS] = {
- .name = "rx_shorts",
- .reg = SYS_COUNT_RX_SHORTS,
- },
- [OCELOT_STAT_RX_FRAGMENTS] = {
- .name = "rx_fragments",
- .reg = SYS_COUNT_RX_FRAGMENTS,
- },
- [OCELOT_STAT_RX_JABBERS] = {
- .name = "rx_jabbers",
- .reg = SYS_COUNT_RX_JABBERS,
- },
- [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
- .name = "rx_crc_align_errs",
- .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
- },
- [OCELOT_STAT_RX_SYM_ERRS] = {
- .name = "rx_sym_errs",
- .reg = SYS_COUNT_RX_SYM_ERRS,
- },
- [OCELOT_STAT_RX_64] = {
- .name = "rx_frames_below_65_octets",
- .reg = SYS_COUNT_RX_64,
- },
- [OCELOT_STAT_RX_65_127] = {
- .name = "rx_frames_65_to_127_octets",
- .reg = SYS_COUNT_RX_65_127,
- },
- [OCELOT_STAT_RX_128_255] = {
- .name = "rx_frames_128_to_255_octets",
- .reg = SYS_COUNT_RX_128_255,
- },
- [OCELOT_STAT_RX_256_511] = {
- .name = "rx_frames_256_to_511_octets",
- .reg = SYS_COUNT_RX_256_511,
- },
- [OCELOT_STAT_RX_512_1023] = {
- .name = "rx_frames_512_to_1023_octets",
- .reg = SYS_COUNT_RX_512_1023,
- },
- [OCELOT_STAT_RX_1024_1526] = {
- .name = "rx_frames_1024_to_1526_octets",
- .reg = SYS_COUNT_RX_1024_1526,
- },
- [OCELOT_STAT_RX_1527_MAX] = {
- .name = "rx_frames_over_1526_octets",
- .reg = SYS_COUNT_RX_1527_MAX,
- },
- [OCELOT_STAT_RX_PAUSE] = {
- .name = "rx_pause",
- .reg = SYS_COUNT_RX_PAUSE,
- },
- [OCELOT_STAT_RX_CONTROL] = {
- .name = "rx_control",
- .reg = SYS_COUNT_RX_CONTROL,
- },
- [OCELOT_STAT_RX_LONGS] = {
- .name = "rx_longs",
- .reg = SYS_COUNT_RX_LONGS,
- },
- [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
- .name = "rx_classified_drops",
- .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
- },
- [OCELOT_STAT_RX_RED_PRIO_0] = {
- .name = "rx_red_prio_0",
- .reg = SYS_COUNT_RX_RED_PRIO_0,
- },
- [OCELOT_STAT_RX_RED_PRIO_1] = {
- .name = "rx_red_prio_1",
- .reg = SYS_COUNT_RX_RED_PRIO_1,
- },
- [OCELOT_STAT_RX_RED_PRIO_2] = {
- .name = "rx_red_prio_2",
- .reg = SYS_COUNT_RX_RED_PRIO_2,
- },
- [OCELOT_STAT_RX_RED_PRIO_3] = {
- .name = "rx_red_prio_3",
- .reg = SYS_COUNT_RX_RED_PRIO_3,
- },
- [OCELOT_STAT_RX_RED_PRIO_4] = {
- .name = "rx_red_prio_4",
- .reg = SYS_COUNT_RX_RED_PRIO_4,
- },
- [OCELOT_STAT_RX_RED_PRIO_5] = {
- .name = "rx_red_prio_5",
- .reg = SYS_COUNT_RX_RED_PRIO_5,
- },
- [OCELOT_STAT_RX_RED_PRIO_6] = {
- .name = "rx_red_prio_6",
- .reg = SYS_COUNT_RX_RED_PRIO_6,
- },
- [OCELOT_STAT_RX_RED_PRIO_7] = {
- .name = "rx_red_prio_7",
- .reg = SYS_COUNT_RX_RED_PRIO_7,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
- .name = "rx_yellow_prio_0",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
- .name = "rx_yellow_prio_1",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
- .name = "rx_yellow_prio_2",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
- .name = "rx_yellow_prio_3",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
- .name = "rx_yellow_prio_4",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
- .name = "rx_yellow_prio_5",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
- .name = "rx_yellow_prio_6",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
- .name = "rx_yellow_prio_7",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_0] = {
- .name = "rx_green_prio_0",
- .reg = SYS_COUNT_RX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_1] = {
- .name = "rx_green_prio_1",
- .reg = SYS_COUNT_RX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_2] = {
- .name = "rx_green_prio_2",
- .reg = SYS_COUNT_RX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_3] = {
- .name = "rx_green_prio_3",
- .reg = SYS_COUNT_RX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_4] = {
- .name = "rx_green_prio_4",
- .reg = SYS_COUNT_RX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_5] = {
- .name = "rx_green_prio_5",
- .reg = SYS_COUNT_RX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_6] = {
- .name = "rx_green_prio_6",
- .reg = SYS_COUNT_RX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_7] = {
- .name = "rx_green_prio_7",
- .reg = SYS_COUNT_RX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_OCTETS] = {
- .name = "tx_octets",
- .reg = SYS_COUNT_TX_OCTETS,
- },
- [OCELOT_STAT_TX_UNICAST] = {
- .name = "tx_unicast",
- .reg = SYS_COUNT_TX_UNICAST,
- },
- [OCELOT_STAT_TX_MULTICAST] = {
- .name = "tx_multicast",
- .reg = SYS_COUNT_TX_MULTICAST,
- },
- [OCELOT_STAT_TX_BROADCAST] = {
- .name = "tx_broadcast",
- .reg = SYS_COUNT_TX_BROADCAST,
- },
- [OCELOT_STAT_TX_COLLISION] = {
- .name = "tx_collision",
- .reg = SYS_COUNT_TX_COLLISION,
- },
- [OCELOT_STAT_TX_DROPS] = {
- .name = "tx_drops",
- .reg = SYS_COUNT_TX_DROPS,
- },
- [OCELOT_STAT_TX_PAUSE] = {
- .name = "tx_pause",
- .reg = SYS_COUNT_TX_PAUSE,
- },
- [OCELOT_STAT_TX_64] = {
- .name = "tx_frames_below_65_octets",
- .reg = SYS_COUNT_TX_64,
- },
- [OCELOT_STAT_TX_65_127] = {
- .name = "tx_frames_65_to_127_octets",
- .reg = SYS_COUNT_TX_65_127,
- },
- [OCELOT_STAT_TX_128_255] = {
- .name = "tx_frames_128_255_octets",
- .reg = SYS_COUNT_TX_128_255,
- },
- [OCELOT_STAT_TX_256_511] = {
- .name = "tx_frames_256_511_octets",
- .reg = SYS_COUNT_TX_256_511,
- },
- [OCELOT_STAT_TX_512_1023] = {
- .name = "tx_frames_512_1023_octets",
- .reg = SYS_COUNT_TX_512_1023,
- },
- [OCELOT_STAT_TX_1024_1526] = {
- .name = "tx_frames_1024_1526_octets",
- .reg = SYS_COUNT_TX_1024_1526,
- },
- [OCELOT_STAT_TX_1527_MAX] = {
- .name = "tx_frames_over_1526_octets",
- .reg = SYS_COUNT_TX_1527_MAX,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
- .name = "tx_yellow_prio_0",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
- .name = "tx_yellow_prio_1",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
- .name = "tx_yellow_prio_2",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
- .name = "tx_yellow_prio_3",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
- .name = "tx_yellow_prio_4",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
- .name = "tx_yellow_prio_5",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
- .name = "tx_yellow_prio_6",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
- .name = "tx_yellow_prio_7",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_0] = {
- .name = "tx_green_prio_0",
- .reg = SYS_COUNT_TX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_1] = {
- .name = "tx_green_prio_1",
- .reg = SYS_COUNT_TX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_2] = {
- .name = "tx_green_prio_2",
- .reg = SYS_COUNT_TX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_3] = {
- .name = "tx_green_prio_3",
- .reg = SYS_COUNT_TX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_4] = {
- .name = "tx_green_prio_4",
- .reg = SYS_COUNT_TX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_5] = {
- .name = "tx_green_prio_5",
- .reg = SYS_COUNT_TX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_6] = {
- .name = "tx_green_prio_6",
- .reg = SYS_COUNT_TX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_7] = {
- .name = "tx_green_prio_7",
- .reg = SYS_COUNT_TX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_AGED] = {
- .name = "tx_aged",
- .reg = SYS_COUNT_TX_AGING,
- },
- [OCELOT_STAT_DROP_LOCAL] = {
- .name = "drop_local",
- .reg = SYS_COUNT_DROP_LOCAL,
- },
- [OCELOT_STAT_DROP_TAIL] = {
- .name = "drop_tail",
- .reg = SYS_COUNT_DROP_TAIL,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
- .name = "drop_yellow_prio_0",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
- .name = "drop_yellow_prio_1",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
- .name = "drop_yellow_prio_2",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
- .name = "drop_yellow_prio_3",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
- .name = "drop_yellow_prio_4",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
- .name = "drop_yellow_prio_5",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
- .name = "drop_yellow_prio_6",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
- .name = "drop_yellow_prio_7",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
- .name = "drop_green_prio_0",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
- .name = "drop_green_prio_1",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
- .name = "drop_green_prio_2",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
- .name = "drop_green_prio_3",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
- .name = "drop_green_prio_4",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
- .name = "drop_green_prio_5",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
- .name = "drop_green_prio_6",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
- .name = "drop_green_prio_7",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
- },
+ OCELOT_COMMON_STATS,
};
static void ocelot_pll5_init(struct ocelot *ocelot)
@@ -562,27 +195,43 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
static int ocelot_reset(struct ocelot *ocelot)
{
- int retries = 100;
+ int err;
u32 val;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
- do {
- msleep(1);
- regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
- } while (val && --retries);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- if (!retries)
- return -ETIMEDOUT;
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
- return 0;
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
}
/* Watermark encode
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index 9cf82ecf191c..9d2d3e13cacf 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -242,7 +242,7 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
REG(SYS_COUNT_DROP_LOCAL, 0x000200),
REG(SYS_COUNT_DROP_TAIL, 0x000204),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
@@ -283,7 +283,6 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_MMGT_FAST, 0x0006a0),
REG(SYS_EVENTS_DIF, 0x0006a4),
REG(SYS_EVENTS_CORE, 0x0006b4),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x0006b8),
REG(SYS_PTP_TXSTAMP, 0x0006bc),
REG(SYS_PTP_NXT, 0x0006c0),
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 971dde8c3286..9063e2e22cd5 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1647,10 +1647,10 @@ myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- strlcpy(info->driver, "myri10ge", sizeof(info->driver));
- strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
- strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "myri10ge", sizeof(info->driver));
+ strscpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+ strscpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int myri10ge_get_coalesce(struct net_device *netdev,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 9aae7f1eb5d2..650a5a166070 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -869,7 +869,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
np = netdev_priv(dev);
np->ioaddr = ioaddr;
- netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+ netif_napi_add(dev, &np->napi, natsemi_poll);
np->dev = dev;
np->pci_dev = pdev;
@@ -2564,9 +2564,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 49ea130c9067..998586872599 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1351,9 +1351,9 @@ static int ns83820_set_link_ksettings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strlcpy(info->driver, "ns83820", sizeof(info->driver));
- strlcpy(info->version, VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, "ns83820", sizeof(info->driver));
+ strscpy(info->version, VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 30f955efa830..dcf8212119f9 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5348,9 +5348,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strlcpy(info->version, s2io_driver_version, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strscpy(info->version, s2io_driver_version, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
}
/**
@@ -7359,10 +7359,9 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
int get_off = ring_data->rx_curr_get_info.offset;
int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
- unsigned char *buff = skb_push(skb, buf0_len);
struct buffAdd *ba = &ring_data->ba[get_block][get_off];
- memcpy(buff, ba->ba_0, buf0_len);
+ skb_put_data(skb, ba->ba_0, buf0_len);
skb_put(skb, buf2_len);
}
@@ -7905,10 +7904,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
for (i = 0; i < config->rx_ring_num ; i++) {
struct ring_info *ring = &mac_control->rings[i];
- netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
+ netif_napi_add(dev, &ring->napi, s2io_poll_msix);
}
} else {
- netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta);
}
/* Not needed for Herc */
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 78368e71ce83..f80f1a6953fa 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -474,6 +474,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_tls_offload_ctx *ntls;
+ struct net *net = dev_net(netdev);
struct ipv6hdr *ipv6h;
struct tcphdr *th;
struct iphdr *iph;
@@ -494,13 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
iph->saddr, th->source, iph->daddr,
th->dest, netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
&ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index b3b2a23b8d89..f693119541d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2021 Corigine, Inc. */
+#include <net/tc_act/tc_csum.h>
+#include <net/tc_act/tc_ct.h>
+
#include "conntrack.h"
#include "../nfp_port.h"
@@ -56,9 +59,17 @@ bool is_pre_ct_flow(struct flow_cls_offload *flow)
int i;
flow_action_for_each(i, act, &flow->rule->action) {
- if (act->id == FLOW_ACTION_CT && !act->ct.action)
- return true;
+ if (act->id == FLOW_ACTION_CT) {
+ /* The pre_ct rule only have the ct or ct nat action, cannot
+ * contains other ct action e.g ct commit and so on.
+ */
+ if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
+ return true;
+ else
+ return false;
+ }
}
+
return false;
}
@@ -66,13 +77,37 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_action_entry *act;
+ bool exist_ct_clear = false;
struct flow_match_ct ct;
+ int i;
+
+ /* post ct entry cannot contains any ct action except ct_clear. */
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT) {
+ /* ignore ct clear action. */
+ if (act->ct.action == TCA_CT_ACT_CLEAR) {
+ exist_ct_clear = true;
+ continue;
+ }
+
+ return false;
+ }
+ }
if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
flow_rule_match_ct(rule, &ct);
if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
return true;
+ } else {
+ /* when do nat with ct, the post ct entry ignore the ct status,
+ * will match the nat field(sip/dip) instead. In this situation,
+ * the flow chain index is not zero and contains ct clear action.
+ */
+ if (flow->common.chain_index && exist_ct_clear)
+ return true;
}
+
return false;
}
@@ -168,6 +203,20 @@ static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
return buf;
}
+/* Note entry1 and entry2 are not swappable. only skip ip and
+ * tport merge check for pre_ct and post_ct when pre_ct do nat.
+ */
+static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
+ if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
+ entry2->type == CT_TYPE_POST_CT)
+ return false;
+
+ return true;
+}
+
/* Note entry1 and entry2 are not swappable, entry1 should be
* the former flow whose mangle action need be taken into account
* if existed, and entry2 should be the latter flow whose action
@@ -225,7 +274,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv4_addrs match1, match2;
flow_rule_match_ipv4_addrs(entry1->rule, &match1);
@@ -242,7 +296,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ /* if pre ct entry do nat, the nat ip exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this ip merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
struct flow_match_ipv6_addrs match1, match2;
flow_rule_match_ipv6_addrs(entry1->rule, &match1);
@@ -259,7 +318,12 @@ static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
goto check_failed;
}
- if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ /* if pre ct entry do nat, the nat tport exists in nft entry,
+ * will be do merge check when do nft and post ct merge,
+ * so skip this tport merge check here.
+ */
+ if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
+ nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
struct flow_match_ports match1, match2;
@@ -404,12 +468,55 @@ check_failed:
return -EINVAL;
}
+static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ struct flow_match_vlan match;
+
+ if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
+ return -EOPNOTSUPP;
+
+ /* post_ct does not match VLAN KEY, can be merged. */
+ if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
+ return 0;
+
+ switch (a_in->id) {
+ /* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
+ case FLOW_ACTION_VLAN_POP:
+ return -EOPNOTSUPP;
+
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_MANGLE:
+ flow_rule_match_vlan(rule, &match);
+ /* different vlan id, cannot be merged. */
+ if ((match.key->vlan_id & match.mask->vlan_id) ^
+ (a_in->vlan.vid & match.mask->vlan_id))
+ return -EOPNOTSUPP;
+
+ /* different tpid, cannot be merged. */
+ if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
+ (a_in->vlan.proto & match.mask->vlan_tpid))
+ return -EOPNOTSUPP;
+
+ /* different priority, cannot be merged. */
+ if ((match.key->vlan_priority & match.mask->vlan_priority) ^
+ (a_in->vlan.prio & match.mask->vlan_priority))
+ return -EOPNOTSUPP;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
struct nfp_fl_ct_flow_entry *post_ct_entry,
struct nfp_fl_ct_flow_entry *nft_entry)
{
struct flow_action_entry *act;
- int i;
+ int i, err;
/* Check for pre_ct->action conflicts */
flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
@@ -417,6 +524,10 @@ static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
case FLOW_ACTION_VLAN_MANGLE:
+ err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
case FLOW_ACTION_MPLS_PUSH:
case FLOW_ACTION_MPLS_POP:
case FLOW_ACTION_MPLS_MANGLE:
@@ -468,6 +579,12 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
return -EINVAL;
return 0;
+ } else {
+ /* post_ct with ct clear action will not match the
+ * ct status when nft is nat entry.
+ */
+ if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
+ return 0;
}
return -EINVAL;
@@ -537,11 +654,37 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
return key_size;
}
+/* get the csum flag according the ip proto and mangle action. */
+static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
+{
+ if (a_in->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (a_in->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
+ if (ip_proto == IPPROTO_TCP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ else if (ip_proto == IPPROTO_UDP)
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
+ break;
+ default:
+ break;
+ }
+}
+
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
struct nfp_flower_priv *priv,
struct net_device *netdev,
struct nfp_fl_payload *flow_pay)
{
+ enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
struct flow_action_entry *a_in;
int i, j, num_actions, id;
struct flow_rule *a_rule;
@@ -551,15 +694,25 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
rules[CT_TYPE_NFT]->action.num_entries +
rules[CT_TYPE_POST_CT]->action.num_entries;
- a_rule = flow_rule_alloc(num_actions);
+ /* Add one action to make sure there is enough room to add an checksum action
+ * when do nat.
+ */
+ a_rule = flow_rule_alloc(num_actions + 1);
if (!a_rule)
return -ENOMEM;
/* Actions need a BASIC dissector. */
a_rule->match = rules[CT_TYPE_PRE_CT]->match;
+ /* post_ct entry have one action at least. */
+ if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
+ tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
+ }
/* Copy actions */
for (j = 0; j < _CT_TYPE_MAX; j++) {
+ u32 csum_updated = 0;
+ u8 ip_proto = 0;
+
if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -571,8 +724,10 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
* through the subflows and assign the proper subflow to a_rule
*/
flow_rule_match_basic(rules[j], &match);
- if (match.mask->ip_proto)
+ if (match.mask->ip_proto) {
a_rule->match = rules[j]->match;
+ ip_proto = match.key->ip_proto;
+ }
}
for (i = 0; i < rules[j]->action.num_entries; i++) {
@@ -589,11 +744,32 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
case FLOW_ACTION_CT_METADATA:
continue;
default:
+ /* nft entry is generated by tc ct, which mangle action do not care
+ * the stats, inherit the post entry stats to meet the
+ * flow_action_hw_stats_check.
+ */
+ if (j == CT_TYPE_NFT) {
+ if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+ a_in->hw_stats = tmp_stats;
+ nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
+ }
memcpy(&a_rule->action.entries[offset++],
a_in, sizeof(struct flow_action_entry));
break;
}
}
+ /* nft entry have mangle action, but do not have checksum action when do NAT,
+ * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
+ * to meet csum action check.
+ */
+ if (csum_updated) {
+ struct flow_action_entry *csum_action;
+
+ csum_action = &a_rule->action.entries[offset++];
+ csum_action->id = FLOW_ACTION_CSUM;
+ csum_action->csum_flags = csum_updated;
+ csum_action->hw_stats = tmp_stats;
+ }
}
/* Some actions would have been ignored, so update the num_entries field */
@@ -1191,6 +1367,49 @@ static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
return NULL;
}
+static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
+{
+ if (mangle_action->id != FLOW_ACTION_MANGLE)
+ return;
+
+ switch (mangle_action->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
+ return;
+
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
+ mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
+ return;
+
+ default:
+ return;
+ }
+}
+
+static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
+ struct nfp_fl_ct_flow_entry *entry)
+{
+ switch (act->id) {
+ case FLOW_ACTION_CT:
+ if (act->ct.action == TCA_CT_ACT_NAT)
+ entry->flags |= NFP_FL_ACTION_DO_NAT;
+ break;
+
+ case FLOW_ACTION_MANGLE:
+ entry->flags |= NFP_FL_ACTION_DO_MANGLE;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct
nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
struct net_device *netdev,
@@ -1257,6 +1476,13 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
new_act = &entry->rule->action.entries[i];
memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* nft entry mangle field is host byte order, need translate to
+ * network byte order.
+ */
+ if (is_nft)
+ nfp_nft_ct_translate_mangle_action(new_act);
+
+ nfp_nft_ct_set_flow_flag(new_act, entry);
/* Entunnel is a special case, need to allocate and copy
* tunnel info.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
index beb6cceff9d8..762c0b36e269 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -103,6 +103,10 @@ enum nfp_nfp_layer_name {
_FLOW_PAY_LAYERS_MAX
};
+/* NFP flow entry flags. */
+#define NFP_FL_ACTION_DO_NAT BIT(0)
+#define NFP_FL_ACTION_DO_MANGLE BIT(1)
+
/**
* struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
* @cookie: Flow cookie, same as original TC flow, used as key
@@ -115,6 +119,7 @@ enum nfp_nfp_layer_name {
* @rule: Reference to the original TC flow rule
* @stats: Used to cache stats for updating
* @tun_offset: Used to indicate tunnel action offset in action list
+ * @flags: Used to indicate flow flag like NAT which used by merge.
*/
struct nfp_fl_ct_flow_entry {
unsigned long cookie;
@@ -127,6 +132,7 @@ struct nfp_fl_ct_flow_entry {
struct flow_rule *rule;
struct flow_stats stats;
u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+ u8 flags;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 83c97154c0c7..3ab3e4536b99 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1301,9 +1301,14 @@ static bool offload_pre_check(struct flow_cls_offload *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
- if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
- return false;
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ /* Allow special case where CT match is all 0 */
+ if (memchr_inv(ct.key, 0, sizeof(*ct.key)))
+ return false;
+ }
if (flow->common.chain_index)
return false;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 4e5df9f2c372..99052a925d9e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -119,7 +119,8 @@ int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
static int nfp_policer_validate(const struct flow_action *action,
const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ bool ingress)
{
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
@@ -127,11 +128,20 @@ static int nfp_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
- return -EOPNOTSUPP;
+ if (ingress) {
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue or ok");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
@@ -217,7 +227,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
- err = nfp_policer_validate(&flow->rule->action, action, extack);
+ err = nfp_policer_validate(&flow->rule->action, action, extack, true);
if (err)
return err;
@@ -686,6 +696,7 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
bool pps_support, pps;
bool add = false;
u64 rate;
+ int err;
pps_support = !!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_PPS);
@@ -697,6 +708,11 @@ nfp_act_install_actions(struct nfp_app *app, struct flow_offload_action *fl_act,
"unsupported offload: qos rate limit offload requires police action");
continue;
}
+
+ err = nfp_policer_validate(&fl_act->action, action, extack, false);
+ if (err)
+ return err;
+
if (action->police.rate_bytes_ps > 0) {
rate = action->police.rate_bytes_ps;
burst = action->police.burst;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 65e243168765..5d9db8c2a5b4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -84,7 +84,7 @@ static void nfp_nfd3_xsk_rx_skb(struct nfp_net_rx_ring *rx_ring,
nfp_net_xsk_rx_drop(r_vec, xrxbuf);
return;
}
- memcpy(skb_put(skb, pkt_len), xrxbuf->xdp->data, pkt_len);
+ skb_put_data(skb, xrxbuf->xdp->data, pkt_len);
skb->mark = meta->mark;
skb_set_hash(skb, meta->hash, meta->hash_type);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 873429f7a6da..e66e548919d4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -691,6 +691,71 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf)
return 0;
}
+int nfp_net_pf_get_app_id(struct nfp_pf *pf)
+{
+ return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
+ NFP_APP_CORE_NIC);
+}
+
+static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf)
+{
+ char name[32];
+ int err = 0;
+ u64 val;
+
+ snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp));
+
+ val = nfp_rtsym_read_le(pf->rtbl, name, &err);
+ if (err) {
+ if (err != -ENOENT)
+ nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
+
+ return 0;
+ }
+
+ return val;
+}
+
+static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff)
+{
+ struct nfp_nsp *nsp;
+ char hwinfo[32];
+ int err;
+
+ nsp = nfp_nsp_open(pf->cpp);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff);
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ /* Not a fatal error, no need to return error to stop driver from loading */
+ if (err) {
+ nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err);
+ } else {
+ /* Need reinit eth_tbl since the eth table state may change
+ * after sp_indiff is configured.
+ */
+ kfree(pf->eth_tbl);
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ }
+
+ nfp_nsp_close(nsp);
+ return 0;
+}
+
+static int nfp_pf_nsp_cfg(struct nfp_pf *pf)
+{
+ bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) ||
+ (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF);
+
+ return nfp_pf_cfg_hwinfo(pf, sp_indiff);
+}
+
+static void nfp_pf_nsp_clean(struct nfp_pf *pf)
+{
+ nfp_pf_cfg_hwinfo(pf, false);
+}
+
static int nfp_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
@@ -791,10 +856,14 @@ static int nfp_pci_probe(struct pci_dev *pdev,
goto err_fw_unload;
}
- err = nfp_net_pci_probe(pf);
+ err = nfp_pf_nsp_cfg(pf);
if (err)
goto err_fw_unload;
+ err = nfp_net_pci_probe(pf);
+ if (err)
+ goto err_nsp_clean;
+
err = nfp_hwmon_register(pf);
if (err) {
dev_err(&pdev->dev, "Failed to register hwmon info\n");
@@ -805,6 +874,8 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err_net_remove:
nfp_net_pci_remove(pf);
+err_nsp_clean:
+ nfp_pf_nsp_clean(pf);
err_fw_unload:
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
@@ -844,6 +915,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
nfp_net_pci_remove(pf);
+ nfp_pf_nsp_clean(pf);
vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index f56ca11de134..afd3edfa2428 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -161,6 +161,7 @@ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
unsigned int default_val);
+int nfp_net_pf_get_app_id(struct nfp_pf *pf);
u8 __iomem *
nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
@@ -190,4 +191,7 @@ int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb,
int nfp_devlink_params_register(struct nfp_pf *pf);
void nfp_devlink_params_unregister(struct nfp_pf *pf);
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate);
+unsigned int nfp_net_speed2lr(unsigned int speed);
#endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index cf4d6f1129fa..27f4786ace4f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -474,19 +474,22 @@ static void nfp_net_read_link_status(struct nfp_net *nn)
{
unsigned long flags;
bool link_up;
- u32 sts;
+ u16 sts;
spin_lock_irqsave(&nn->link_status_lock, flags);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
link_up = !!(sts & NFP_NET_CFG_STS_LINK);
if (nn->link_up == link_up)
goto out;
nn->link_up = link_up;
- if (nn->port)
+ if (nn->port) {
set_bit(NFP_PORT_CHANGED, &nn->port->flags);
+ if (nn->port->link_cb)
+ nn->port->link_cb(nn->port);
+ }
if (nn->link_up) {
netif_carrier_on(nn->dp.netdev);
@@ -768,9 +771,7 @@ nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
if (dp->netdev)
netif_napi_add(dp->netdev, &r_vec->napi,
- nfp_net_has_xsk_pool_slow(dp, idx) ?
- dp->ops->xsk_poll : dp->ops->poll,
- NAPI_POLL_WEIGHT);
+ nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll);
else
tasklet_enable(&r_vec->tasklet);
}
@@ -1630,21 +1631,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ac05ec34d69e..6714d5e8fdab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -14,6 +14,9 @@
#include <linux/types.h>
+/* 64-bit per app capabilities */
+#define NFP_NET_APP_CAP_SP_INDIFF BIT_ULL(0) /* indifferent to port speed */
+
/* Configuration BAR size.
*
* The configuration BAR is 8K in size, but due to
@@ -193,6 +196,10 @@
#define NFP_NET_CFG_STS_LINK_RATE_40G 5
#define NFP_NET_CFG_STS_LINK_RATE_50G 6
#define NFP_NET_CFG_STS_LINK_RATE_100G 7
+/* NSP Link rate is a 16-bit word. It's determined by NSP and
+ * written to CFG BAR by NFP driver.
+ */
+#define NFP_NET_CFG_STS_NSP_LINK_RATE 0x0036
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index eeb1455a4e5d..22a5d2419084 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -205,7 +205,7 @@ nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
{
char nsp_version[ETHTOOL_FWVERS_LEN] = {};
- strlcpy(drvinfo->driver, dev_driver_string(&pdev->dev),
+ strscpy(drvinfo->driver, dev_driver_string(&pdev->dev),
sizeof(drvinfo->driver));
nfp_net_get_nspinfo(app, nsp_version);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -222,18 +222,49 @@ nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
nn->fw_ver.extend, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor);
- strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
+ strscpy(drvinfo->bus_info, pci_name(nn->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
}
+static int
+nfp_net_nway_reset(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, false);
+ if (err) {
+ netdev_info(netdev, "Link down failed: %d\n", err);
+ return err;
+ }
+
+ err = nfp_eth_set_configured(port->app->cpp, eth_port->index, true);
+ if (err) {
+ netdev_info(netdev, "Link up failed: %d\n", err);
+ return err;
+ }
+
+ netdev_info(netdev, "Link reset succeeded\n");
+ return 0;
+}
+
static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct nfp_app *app = nfp_app_from_netdev(netdev);
- strlcpy(drvinfo->bus_info, pci_name(app->pdev),
+ strscpy(drvinfo->bus_info, pci_name(app->pdev),
sizeof(drvinfo->bus_info));
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
@@ -273,20 +304,11 @@ static int
nfp_net_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- static const u32 ls_to_ethtool[] = {
- [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
- [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
- [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
- [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
- [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
- [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
- [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
- [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
- };
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_net *nn;
- u32 sts, ls;
+ unsigned int speed;
+ u16 sts;
/* Init to unknowns */
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
@@ -299,8 +321,13 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
if (eth_port) {
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
- cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
- AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if (eth_port->supp_aneg) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (eth_port->aneg == NFP_ANEG_AUTO) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ }
+ }
nfp_net_set_fec_link_mode(eth_port, cmd);
}
@@ -319,18 +346,15 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
nn = netdev_priv(netdev);
- sts = nn_readl(nn, NFP_NET_CFG_STS);
-
- ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+ speed = nfp_net_lr2speed(FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts));
+ if (!speed)
return -EOPNOTSUPP;
- if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
- ls >= ARRAY_SIZE(ls_to_ethtool))
- return 0;
-
- cmd->base.speed = ls_to_ethtool[ls];
- cmd->base.duplex = DUPLEX_FULL;
+ if (speed != SPEED_UNKNOWN) {
+ cmd->base.speed = speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ }
return 0;
}
@@ -339,6 +363,7 @@ static int
nfp_net_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ bool req_aneg = (cmd->base.autoneg == AUTONEG_ENABLE);
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
struct nfp_nsp *nsp;
@@ -358,13 +383,25 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (IS_ERR(nsp))
return PTR_ERR(nsp);
- err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
- NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
+ if (req_aneg && !eth_port->supp_aneg) {
+ netdev_warn(netdev, "Autoneg is not supported.\n");
+ err = -EOPNOTSUPP;
+ goto err_bad_set;
+ }
+
+ err = __nfp_eth_set_aneg(nsp, req_aneg ? NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
if (err)
goto err_bad_set;
+
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ if (req_aneg) {
+ netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
+ err = -EINVAL;
+ goto err_bad_set;
+ }
+
err = __nfp_eth_set_speed(nsp, speed);
if (err)
goto err_bad_set;
@@ -649,7 +686,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -657,10 +694,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -670,7 +707,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
@@ -1008,7 +1045,7 @@ nfp_port_get_fecparam(struct net_device *netdev,
return 0;
param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
- param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
+ param->active_fec = nfp_port_fec_nsp_to_ethtool(BIT(eth_port->act_fec));
return 0;
}
@@ -1676,11 +1713,166 @@ static int nfp_net_set_phys_id(struct net_device *netdev,
return err;
}
+#define NFP_EEPROM_LEN ETH_ALEN
+
+static int
+nfp_net_get_eeprom_len(struct net_device *netdev)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return 0;
+
+ return NFP_EEPROM_LEN;
+}
+
+static int
+nfp_net_get_nsp_hwindex(struct net_device *netdev,
+ struct nfp_nsp **nspptr,
+ u32 *index)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
+ netdev_err(netdev, "NSP doesn't support PF MAC generation\n");
+ nfp_nsp_close(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ *nspptr = nsp;
+ *index = eth_port->eth_index;
+
+ return 0;
+}
+
+static int
+nfp_net_get_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac", index);
+ err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "Reading persistent MAC address failed: %d\n",
+ err);
+ return -EOPNOTSUPP;
+ }
+
+ if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &mac_addr[0], &mac_addr[1], &mac_addr[2],
+ &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+ netdev_err(netdev, "Can't parse persistent MAC address (%s)\n",
+ hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_set_port_mac_by_hwinfo(struct net_device *netdev,
+ u8 *mac_addr)
+{
+ char hwinfo[32] = {};
+ struct nfp_nsp *nsp;
+ u32 index;
+ int err;
+
+ err = nfp_net_get_nsp_hwindex(netdev, &nsp, &index);
+ if (err)
+ return err;
+
+ snprintf(hwinfo, sizeof(hwinfo),
+ "eth%u.mac=%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ index, mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5]);
+
+ err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo));
+ nfp_nsp_close(nsp);
+ if (err) {
+ netdev_err(netdev, "HWinfo set failed: %d, hwinfo: %s\n",
+ err, hwinfo);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+nfp_net_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ memcpy(bytes, buf + eeprom->offset, eeprom->len);
+
+ return 0;
+}
+
+static int
+nfp_net_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ u8 buf[NFP_EEPROM_LEN] = {};
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ return -EINVAL;
+
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ memcpy(buf + eeprom->offset, bytes, eeprom->len);
+ if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static const struct ethtool_ops nfp_net_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = nfp_net_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
@@ -1699,6 +1891,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
@@ -1715,6 +1910,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
const struct ethtool_ops nfp_port_ethtool_ops = {
.get_drvinfo = nfp_app_get_drvinfo,
+ .nway_reset = nfp_net_nway_reset,
.get_link = ethtool_op_get_link,
.get_strings = nfp_port_get_strings,
.get_ethtool_stats = nfp_port_get_stats,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index ca4e05650fe6..3bae92dc899e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -77,12 +77,6 @@ static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
}
-static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
-{
- return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
- NFP_APP_CORE_NIC);
-}
-
static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
{
if (nfp_net_is_data_vnic(nn))
@@ -202,6 +196,9 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
goto err_free_prev;
}
+ if (nn->port)
+ nn->port->link_cb = nfp_net_refresh_port_table;
+
ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
/* Kill the vNIC if app init marked it as invalid */
@@ -523,6 +520,57 @@ err_unmap_ctrl:
return err;
}
+static const unsigned int lr_to_speed[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
+};
+
+unsigned int nfp_net_lr2speed(unsigned int linkrate)
+{
+ if (linkrate < ARRAY_SIZE(lr_to_speed))
+ return lr_to_speed[linkrate];
+
+ return SPEED_UNKNOWN;
+}
+
+unsigned int nfp_net_speed2lr(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
+ if (speed == lr_to_speed[i])
+ return i;
+ }
+
+ return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
+}
+
+static void nfp_net_notify_port_speed(struct nfp_port *port)
+{
+ struct net_device *netdev = port->netdev;
+ struct nfp_net *nn;
+ u16 sts;
+
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return;
+
+ nn = netdev_priv(netdev);
+ sts = nn_readw(nn, NFP_NET_CFG_STS);
+
+ if (!(sts & NFP_NET_CFG_STS_LINK)) {
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
+ return;
+ }
+
+ nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
+}
+
static int
nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
struct nfp_eth_table *eth_table)
@@ -544,6 +592,7 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
}
memcpy(port->eth_port, eth_port, sizeof(*eth_port));
+ nfp_net_notify_port_speed(port);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index d1ebe6c72f7f..6793cdf9ff11 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -46,6 +46,7 @@ enum nfp_port_flags {
* @tc_offload_cnt: number of active TC offloads, how offloads are counted
* is not defined, use as a boolean
* @app: backpointer to the app structure
+ * @link_cb: callback when link status changed
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
@@ -66,6 +67,7 @@ struct nfp_port {
unsigned long tc_offload_cnt;
struct nfp_app *app;
+ void (*link_cb)(struct nfp_port *port);
struct devlink_port dl_port;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 77d66855be42..992d72ac98d3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -132,6 +132,7 @@ enum nfp_eth_fec {
* @ports.interface: interface (module) plugged in
* @ports.media: media type of the @interface
* @ports.fec: forward error correction mode
+ * @ports.act_fec: active forward error correction mode
* @ports.aneg: auto negotiation mode
* @ports.mac_addr: interface MAC address
* @ports.label_port: port id
@@ -162,6 +163,7 @@ struct nfp_eth_table {
enum nfp_eth_media media;
enum nfp_eth_fec fec;
+ enum nfp_eth_fec act_fec;
enum nfp_eth_aneg aneg;
u8 mac_addr[ETH_ALEN];
@@ -172,6 +174,7 @@ struct nfp_eth_table {
bool enabled;
bool tx_enabled;
bool rx_enabled;
+ bool supp_aneg;
bool override_changed;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index edd300033735..bb64efec4c46 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -27,6 +27,7 @@
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+#define NSP_ETH_PORT_SUPP_ANEG BIT_ULL(63)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
@@ -40,6 +41,7 @@
#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -170,7 +172,14 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
if (dst->fec_modes_supported)
dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
- dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state);
+ dst->act_fec = dst->fec;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 33)
+ return;
+
+ dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
+ dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
}
static void
@@ -507,6 +516,7 @@ int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
nfp_err(nfp_nsp_cpp(nsp),
"set id mode operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 4b3482ce90a1..3db4a2431741 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -990,8 +990,8 @@ static const struct net_device_ops nixge_netdev_ops = {
static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, "nixge", sizeof(ed->driver));
- strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info));
+ strscpy(ed->driver, "nixge", sizeof(ed->driver));
+ strscpy(ed->bus_info, "platform", sizeof(ed->bus_info));
}
static int
@@ -1294,7 +1294,7 @@ static int nixge_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->dev = &pdev->dev;
- netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, nixge_poll);
err = nixge_of_get_resources(pdev);
if (err)
goto free_netdev;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 5116badaf091..daa028729d44 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -4291,9 +4291,9 @@ static void nv_do_stats_poll(struct timer_list *t)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -5876,7 +5876,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
else
dev->netdev_ops = &nv_netdev_ops_optimized;
- netif_napi_add(dev, &np->napi, nv_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &np->napi, nv_napi_poll);
dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index f606d75b33b4..1a4a272f4c5c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1184,9 +1184,9 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 84cc79e928c8..541b8bcd3223 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -169,9 +169,9 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 46da937ad27f..3f2c30184752 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2516,8 +2516,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->netdev_ops = &pch_gbe_netdev_ops;
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
- netif_napi_add(netdev, &adapter->napi,
- pch_gbe_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &adapter->napi, pch_gbe_napi_poll);
netdev->hw_features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features = netdev->hw_features;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 9c408328be0d..1cc001087193 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1819,9 +1819,9 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct hamachi_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 03650022d444..640ac01689fb 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1340,9 +1340,9 @@ static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct yellowfin_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index f0ace3a0e85c..aaab590ef548 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1697,7 +1697,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mac->pdev = pdev;
mac->netdev = dev;
- netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);
+ netif_napi_add(dev, &mac->napi, pasemi_mac_poll);
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_GSO;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 0a7a757494bc..ce436e97324a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -320,16 +320,16 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lif_register(ionic->lif);
+ err = ionic_devlink_register(ionic);
if (err) {
- dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ dev_err(dev, "Cannot register devlink: %d\n", err);
goto err_out_deinit_lifs;
}
- err = ionic_devlink_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register devlink: %d\n", err);
- goto err_out_deregister_lifs;
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
+ goto err_out_deregister_devlink;
}
mod_timer(&ionic->watchdog_timer,
@@ -337,8 +337,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-err_out_deregister_lifs:
- ionic_lif_unregister(ionic->lif);
+err_out_deregister_devlink:
+ ionic_devlink_unregister(ionic);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
ionic_lif_deinit(ionic->lif);
@@ -380,8 +380,8 @@ static void ionic_remove(struct pci_dev *pdev)
del_timer_sync(&ionic->watchdog_timer);
if (ionic->lif) {
- ionic_devlink_unregister(ionic);
ionic_lif_unregister(ionic->lif);
+ ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
ionic_lif_free(ionic->lif);
ionic->lif = NULL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 0be79c516781..5d58fd99be3c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -774,8 +774,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -830,11 +829,9 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
- netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3165,8 +3162,7 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
- netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
napi_enable(&qcq->napi);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3c4a84ea6321..8c4cb910e09b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -65,9 +65,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ strscpy(drvinfo->driver, netxen_nic_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ strscpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
@@ -75,7 +75,7 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 4e6f00af17d9..de8d54b23f73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -173,8 +173,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- netif_napi_add(netdev, &sds_ring->napi,
- netxen_nic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index d701ecd3ba00..2661c483c67e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1119,7 +1119,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strlcpy(bit_name,
+ strscpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 97a7ab0826ed..8034d812d5a0 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -624,7 +624,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
struct qede_dev *edev = netdev_priv(ndev);
char mbi[ETHTOOL_FWVERS_LEN];
- strlcpy(info->driver, "qede", sizeof(info->driver));
+ strscpy(info->driver, "qede", sizeof(info->driver));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -661,7 +661,7 @@ static void qede_get_drvinfo(struct net_device *ndev,
"mfw %s", mfw);
}
- strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
}
static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index f56b679adb4b..953f304b8588 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1214,7 +1214,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* Start the Slowpath-process */
memset(&sp_params, 0, sizeof(sp_params));
sp_params.int_mode = QED_INT_MODE_MSIX;
- strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
rc = qed_ops->common->slowpath_start(cdev, &sp_params);
if (rc) {
pr_notice("Cannot start slowpath\n");
@@ -1904,8 +1904,7 @@ static void qede_napi_add_enable(struct qede_dev *edev)
/* Add NAPI objects */
for_each_queue(i) {
- netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
- qede_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
napi_enable(&edev->fp_array[i].napi);
}
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 06f4d9a9e938..76072f8c3d2f 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,10 +1736,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, ql3xxx_driver_version,
+ strscpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ql3xxx_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
sizeof(drvinfo->bus_info));
}
@@ -3813,7 +3813,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
- netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
+ netif_napi_add(ndev, &qdev->napi, ql_poll);
ndev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 54a2d653be63..1ee491f78c6b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -277,10 +277,10 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ strscpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
sizeof(drvinfo->version));
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 9da5e97f8a0a..92930a055cbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1586,17 +1586,15 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_check_multi_tx(adapter) &&
!adapter->ahw->diag_test) {
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll);
} else {
if (ring == (adapter->drv_sds_rings - 1))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_rx_poll);
}
}
@@ -2115,17 +2113,14 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_rx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_rx_poll);
else
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_msix_sriov_vf_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_msix_sriov_vf_poll);
} else {
netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_poll,
- NAPI_POLL_WEIGHT);
+ qlcnic_83xx_poll);
}
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index a55c52696d49..3115b2c12898 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -684,8 +684,7 @@ static int emac_probe(struct platform_device *pdev)
/* Initialize queues */
emac_mac_rx_tx_ring_init_all(pdev, adpt);
- netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx);
ret = register_netdev(netdev);
if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 792ce9a323cd..f62c39544e08 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -164,10 +164,10 @@ qcaspi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *p)
{
struct qcaspi *qca = netdev_priv(dev);
- strlcpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
- strlcpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
- strlcpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
- strlcpy(p->bus_info, dev_name(&qca->spi_dev->dev),
+ strscpy(p->driver, QCASPI_DRV_NAME, sizeof(p->driver));
+ strscpy(p->version, QCASPI_DRV_VERSION, sizeof(p->version));
+ strscpy(p->fw_version, "QCA7000", sizeof(p->fw_version));
+ strscpy(p->bus_info, dev_name(&qca->spi_dev->dev),
sizeof(p->bus_info));
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index e5a0b38f7dbe..2b033060fc20 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -19,7 +19,7 @@ struct rmnet_map_control_command {
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
- u8 data[0];
+ DECLARE_FLEX_ARRAY(u8, data);
};
} __aligned(1);
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index a6bf7d505178..eecd52ed1ed2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -939,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1127,7 +1127,7 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, r6040_poll, 64);
+ netif_napi_add(dev, &lp->napi, r6040_poll);
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e0feeec13da6..f5786d78ed23 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1382,9 +1382,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 15b40fd93cd2..469e2e229c6e 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1002,7 +1002,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
dev->netdev_ops = &rtl8139_netdev_ops;
dev->ethtool_ops = &rtl8139_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
+ netif_napi_add(dev, &tp->napi, rtl8139_poll);
/* note: the hardware is not capable of sg/csum/highdma, however
* through the use of skb_copy_and_csum_dev we enable these
@@ -2380,9 +2380,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
}
static int rtl8139_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 8da4b66b71b5..55ef8251feb5 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -23,10 +23,10 @@ enum mac_version {
RTL_GIGA_MAC_VER_09,
RTL_GIGA_MAC_VER_10,
RTL_GIGA_MAC_VER_11,
- RTL_GIGA_MAC_VER_12,
- RTL_GIGA_MAC_VER_13,
+ /* RTL_GIGA_MAC_VER_12 was handled the same as VER_17 */
+ /* RTL_GIGA_MAC_VER_13 was merged with VER_10 */
RTL_GIGA_MAC_VER_14,
- RTL_GIGA_MAC_VER_16,
+ /* RTL_GIGA_MAC_VER_16 was merged with VER_10 */
RTL_GIGA_MAC_VER_17,
RTL_GIGA_MAC_VER_18,
RTL_GIGA_MAC_VER_19,
@@ -51,20 +51,20 @@ enum mac_version {
RTL_GIGA_MAC_VER_38,
RTL_GIGA_MAC_VER_39,
RTL_GIGA_MAC_VER_40,
- RTL_GIGA_MAC_VER_41,
+ /* support for RTL_GIGA_MAC_VER_41 has been removed */
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
RTL_GIGA_MAC_VER_44,
- RTL_GIGA_MAC_VER_45,
+ /* support for RTL_GIGA_MAC_VER_45 has been removed */
RTL_GIGA_MAC_VER_46,
- RTL_GIGA_MAC_VER_47,
+ /* support for RTL_GIGA_MAC_VER_47 has been removed */
RTL_GIGA_MAC_VER_48,
- RTL_GIGA_MAC_VER_49,
- RTL_GIGA_MAC_VER_50,
+ /* support for RTL_GIGA_MAC_VER_49 has been removed */
+ /* support for RTL_GIGA_MAC_VER_50 has been removed */
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
RTL_GIGA_MAC_VER_53,
- RTL_GIGA_MAC_VER_60,
+ /* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
RTL_GIGA_MAC_NONE
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 1b7fdb4f056b..a73d061d9fcb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -49,10 +49,8 @@
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
-#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw"
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw"
-#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
@@ -102,12 +100,9 @@ static const struct {
[RTL_GIGA_MAC_VER_07] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_08] = {"RTL8102e" },
[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e" },
- [RTL_GIGA_MAC_VER_10] = {"RTL8101e" },
+ [RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" },
- [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" },
[RTL_GIGA_MAC_VER_14] = {"RTL8401" },
- [RTL_GIGA_MAC_VER_16] = {"RTL8101e" },
[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" },
[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c" },
@@ -131,20 +126,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] = {"RTL8411", FIRMWARE_8411_1 },
[RTL_GIGA_MAC_VER_39] = {"RTL8106e", FIRMWARE_8106E_1},
[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g", FIRMWARE_8168G_2},
- [RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g" },
[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu", FIRMWARE_8168G_3},
[RTL_GIGA_MAC_VER_43] = {"RTL8106eus", FIRMWARE_8106E_2},
[RTL_GIGA_MAC_VER_44] = {"RTL8411b", FIRMWARE_8411_2 },
- [RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h", FIRMWARE_8168H_1},
[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h", FIRMWARE_8168H_2},
- [RTL_GIGA_MAC_VER_47] = {"RTL8107e", FIRMWARE_8107E_1},
[RTL_GIGA_MAC_VER_48] = {"RTL8107e", FIRMWARE_8107E_2},
- [RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
- [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
[RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
- [RTL_GIGA_MAC_VER_60] = {"RTL8125A" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
@@ -658,10 +647,8 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
MODULE_FIRMWARE(FIRMWARE_8168G_3);
-MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
MODULE_FIRMWARE(FIRMWARE_8168FP_3);
-MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
@@ -689,7 +676,7 @@ static void rtl_pci_commit(struct rtl8169_private *tp)
static bool rtl_is_8125(struct rtl8169_private *tp)
{
- return tp->mac_version >= RTL_GIGA_MAC_VER_60;
+ return tp->mac_version >= RTL_GIGA_MAC_VER_61;
}
static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
@@ -892,8 +879,6 @@ static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
if (value & BMCR_RESET || !(value & BMCR_PDOWN))
rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
else
@@ -1207,7 +1192,7 @@ static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
return RTL_DASH_NONE;
@@ -1423,11 +1408,11 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
if (rtl_fw)
- strlcpy(info->fw_version, rtl_fw->version,
+ strscpy(info->fw_version, rtl_fw->version,
sizeof(info->fw_version));
}
@@ -2011,7 +1996,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168F family. */
{ 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
- { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ /* It seems this chip version never made it to
+ * the wild. Let's disable detection.
+ * { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ */
{ 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
@@ -2041,7 +2029,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
{ 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
@@ -2054,19 +2041,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
{ 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
{ 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
- { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
{ 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
- { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
- { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
- { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR
- * Not even r8101 vendor driver knows these id's,
- * so let's disable detection for now. -- HK
- * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
- * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
- */
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_10 },
/* 8110 family. */
{ 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2088,8 +2066,6 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
if (ver != RTL_GIGA_MAC_NONE && !gmii) {
if (ver == RTL_GIGA_MAC_VER_42)
ver = RTL_GIGA_MAC_VER_43;
- else if (ver == RTL_GIGA_MAC_VER_45)
- ver = RTL_GIGA_MAC_VER_47;
else if (ver == RTL_GIGA_MAC_VER_46)
ver = RTL_GIGA_MAC_VER_48;
}
@@ -2271,7 +2247,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
default:
@@ -2338,7 +2314,6 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
readrq = 512;
@@ -2455,7 +2430,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
case RTL_GIGA_MAC_VER_63:
@@ -2468,6 +2443,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
}
}
+static void rtl_disable_rxdvgate(struct rtl8169_private *tp)
+{
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+}
+
static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
{
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -2700,8 +2680,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2712,8 +2692,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
}
} else {
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_45 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
@@ -2985,7 +2965,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_reset_packet_filter(tp);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3223,7 +3203,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3274,7 +3254,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3288,45 +3268,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_pcie_state_l2l3_disable(tp);
}
-static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_1[] = {
- { 0x00, 0xffff, 0x10ab },
- { 0x06, 0xffff, 0xf030 },
- { 0x08, 0xffff, 0x2006 },
- { 0x0d, 0xffff, 0x1666 },
- { 0x0c, 0x3ff0, 0x0000 }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_1);
-
- rtl_hw_start_8168ep(tp);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
-static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8168ep_2[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20ea }
- };
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_2);
-
- rtl_hw_start_8168ep(tp);
-
- RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
- RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
-
- rtl_hw_aspm_clkreq_enable(tp, true);
-}
-
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168ep_3[] = {
@@ -3377,7 +3318,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ rtl_disable_rxdvgate(tp);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
@@ -3621,48 +3562,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
else
rtl8125a_config_eee_mac(tp);
- RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
- udelay(10);
-}
-
-static void rtl_hw_start_8125a_1(struct rtl8169_private *tp)
-{
- static const struct ephy_info e_info_8125a_1[] = {
- { 0x01, 0xffff, 0xa812 },
- { 0x09, 0xffff, 0x520c },
- { 0x04, 0xffff, 0xd000 },
- { 0x0d, 0xffff, 0xf702 },
- { 0x0a, 0xffff, 0x8653 },
- { 0x06, 0xffff, 0x001e },
- { 0x08, 0xffff, 0x3595 },
- { 0x20, 0xffff, 0x9455 },
- { 0x21, 0xffff, 0x99ff },
- { 0x02, 0xffff, 0x6046 },
- { 0x29, 0xffff, 0xfe00 },
- { 0x23, 0xffff, 0xab62 },
-
- { 0x41, 0xffff, 0xa80c },
- { 0x49, 0xffff, 0x520c },
- { 0x44, 0xffff, 0xd000 },
- { 0x4d, 0xffff, 0xf702 },
- { 0x4a, 0xffff, 0x8653 },
- { 0x46, 0xffff, 0x001e },
- { 0x48, 0xffff, 0x3595 },
- { 0x60, 0xffff, 0x9455 },
- { 0x61, 0xffff, 0x99ff },
- { 0x42, 0xffff, 0x6046 },
- { 0x69, 0xffff, 0xfe00 },
- { 0x63, 0xffff, 0xab62 },
- };
-
- rtl_set_def_aspm_entry_latency(tp);
-
- /* disable aspm and clock request before access ephy */
- rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8125a_1);
-
- rtl_hw_start_8125_common(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
+ rtl_disable_rxdvgate(tp);
}
static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
@@ -3721,10 +3621,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
@@ -3748,20 +3645,14 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
- [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
- [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
- [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
- [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
- [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
};
@@ -4156,7 +4047,6 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
padto = max_t(unsigned int, padto, ETH_ZLEN);
@@ -4677,8 +4567,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
pm_runtime_idle(&tp->pci_dev->dev);
}
- if (net_ratelimit())
- phy_print_status(tp->phydev);
+ phy_print_status(tp->phydev);
}
static int r8169_phy_connect(struct rtl8169_private *tp)
@@ -4954,23 +4843,6 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
rtl8169_runtime_idle)
};
-static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
-{
- /* WoL fails with 8168b when the receiver is disabled. */
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- pci_clear_master(tp->pci_dev);
-
- RTL_W8(tp, ChipCmd, CmdRxEnb);
- rtl_pci_commit(tp);
- break;
- default:
- break;
- }
-}
-
static void rtl_shutdown(struct pci_dev *pdev)
{
struct rtl8169_private *tp = pci_get_drvdata(pdev);
@@ -4984,9 +4856,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF &&
tp->dash_type == RTL_DASH_NONE) {
- if (tp->saved_wolopts)
- rtl_wol_shutdown_quirk(tp);
-
pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
@@ -5194,13 +5063,13 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
rtl_hw_init_8125(tp);
break;
default:
@@ -5220,7 +5089,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
return JUMBO_7K;
/* RTL8168b */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
return JUMBO_4K;
/* RTL8168c */
@@ -5231,37 +5099,6 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
}
}
-static void rtl_disable_clk(void *data)
-{
- clk_disable_unprepare(data);
-}
-
-static int rtl_get_ether_clk(struct rtl8169_private *tp)
-{
- struct device *d = tp_to_dev(tp);
- struct clk *clk;
- int rc;
-
- clk = devm_clk_get(d, "ether_clk");
- if (IS_ERR(clk)) {
- rc = PTR_ERR(clk);
- if (rc == -ENOENT)
- /* clk-core allows NULL (for suspend / resume) */
- rc = 0;
- else
- dev_err_probe(d, rc, "failed to get clk\n");
- } else {
- tp->clk = clk;
- rc = clk_prepare_enable(clk);
- if (rc)
- dev_err(d, "failed to enable clk: %d\n", rc);
- else
- rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
- }
-
- return rc;
-}
-
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
u8 mac_addr[ETH_ALEN] __aligned(2) = {};
@@ -5291,7 +5128,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_60 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5325,9 +5162,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
/* Get the *optional* external "ether_clk" used on some boards */
- rc = rtl_get_ether_clk(tp);
- if (rc)
- return rc;
+ tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
+ if (IS_ERR(tp->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n");
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
@@ -5346,12 +5183,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- /* check for weird/broken PCI region reporting */
- if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
- return -ENODEV;
- }
-
rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
if (rc < 0) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
@@ -5378,7 +5209,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (rtl_aspm_is_safe(tp))
rc = 0;
- else if (tp->mac_version >= RTL_GIGA_MAC_VER_45)
+ else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
@@ -5413,7 +5244,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &rtl8169_ethtool_ops;
- netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &tp->napi, rtl8169_poll);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 15c295f90196..930496cd34ed 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -793,71 +793,6 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- u16 dout_tapbin;
- u32 data;
-
- r8169_apply_firmware(tp);
-
- /* CHN EST parameters adjust - giga master */
- r8168g_phy_param(phydev, 0x809b, 0xf800, 0x8000);
- r8168g_phy_param(phydev, 0x80a2, 0xff00, 0x8000);
- r8168g_phy_param(phydev, 0x80a4, 0xff00, 0x8500);
- r8168g_phy_param(phydev, 0x809c, 0xff00, 0xbd00);
-
- /* CHN EST parameters adjust - giga slave */
- r8168g_phy_param(phydev, 0x80ad, 0xf800, 0x7000);
- r8168g_phy_param(phydev, 0x80b4, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x80ac, 0xff00, 0x4000);
-
- /* CHN EST parameters adjust - fnet */
- r8168g_phy_param(phydev, 0x808e, 0xff00, 0x1200);
- r8168g_phy_param(phydev, 0x8090, 0xff00, 0xe500);
- r8168g_phy_param(phydev, 0x8092, 0xff00, 0x9f00);
-
- /* enable R-tune & PGA-retune function */
- dout_tapbin = 0;
- data = phy_read_paged(phydev, 0x0a46, 0x13);
- data &= 3;
- data <<= 2;
- dout_tapbin |= data;
- data = phy_read_paged(phydev, 0x0a46, 0x12);
- data &= 0xc000;
- data >>= 14;
- dout_tapbin |= data;
- dout_tapbin = ~(dout_tapbin ^ 0x08);
- dout_tapbin <<= 12;
- dout_tapbin &= 0xf000;
-
- r8168g_phy_param(phydev, 0x827a, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827b, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827c, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x827d, 0xf000, dout_tapbin);
- r8168g_phy_param(phydev, 0x0811, 0x0000, 0x0800);
- phy_modify_paged(phydev, 0x0a42, 0x16, 0x0000, 0x0002);
-
- rtl8168g_enable_gphy_10m(phydev);
-
- /* SAR ADC performance */
- phy_modify_paged(phydev, 0x0bca, 0x17, BIT(12) | BIT(13), BIT(14));
-
- r8168g_phy_param(phydev, 0x803f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8047, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x804f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8057, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x805f, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x8067, 0x3000, 0x0000);
- r8168g_phy_param(phydev, 0x806f, 0x3000, 0x0000);
-
- /* disable phy pfm mode */
- phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
-
- rtl8168g_disable_aldps(phydev);
- rtl8168h_config_eee_phy(phydev);
-}
-
static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -895,27 +830,6 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168g_config_eee_phy(phydev);
}
-static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- /* Enable PHY auto speed down */
- phy_modify_paged(phydev, 0x0a44, 0x11, 0, BIT(3) | BIT(2));
-
- rtl8168g_phy_adjust_10m_aldps(phydev);
-
- /* Enable EEE auto-fallback function */
- phy_modify_paged(phydev, 0x0a4b, 0x11, 0, BIT(2));
-
- /* Enable UC LPF tune function */
- r8168g_phy_param(phydev, 0x8012, 0x0000, 0x8000);
-
- /* set rg_sel_sdm_rate */
- phy_modify_paged(phydev, 0x0c42, 0x11, BIT(13), BIT(14));
-
- rtl8168g_disable_aldps(phydev);
- rtl8168g_config_eee_phy(phydev);
-}
-
static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1081,44 +995,6 @@ static void rtl8125_legacy_force_mode(struct phy_device *phydev)
phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0);
}
-static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
- phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
- phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
- phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
- phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
- phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
- phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
- phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
- phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
-
- r8168g_phy_param(phydev, 0x80ea, 0xff00, 0xc400);
- r8168g_phy_param(phydev, 0x80eb, 0x0700, 0x0300);
- r8168g_phy_param(phydev, 0x80f8, 0xff00, 0x1c00);
- r8168g_phy_param(phydev, 0x80f1, 0xff00, 0x3000);
- r8168g_phy_param(phydev, 0x80fe, 0xff00, 0xa500);
- r8168g_phy_param(phydev, 0x8102, 0xff00, 0x5000);
- r8168g_phy_param(phydev, 0x8105, 0xff00, 0x3300);
- r8168g_phy_param(phydev, 0x8100, 0xff00, 0x7000);
- r8168g_phy_param(phydev, 0x8104, 0xff00, 0xf000);
- r8168g_phy_param(phydev, 0x8106, 0xff00, 0x6500);
- r8168g_phy_param(phydev, 0x80dc, 0xff00, 0xed00);
- r8168g_phy_param(phydev, 0x80df, 0x0000, 0x0100);
- r8168g_phy_param(phydev, 0x80e1, 0x0100, 0x0000);
-
- phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
- r8168g_phy_param(phydev, 0x819f, 0xffff, 0xd0b6);
-
- phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
- phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
- phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
- rtl8168g_enable_gphy_10m(phydev);
-
- rtl8125a_config_eee_phy(phydev);
-}
-
static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1239,10 +1115,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
[RTL_GIGA_MAC_VER_10] = NULL,
[RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
- [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
- [RTL_GIGA_MAC_VER_13] = NULL,
[RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
- [RTL_GIGA_MAC_VER_16] = NULL,
[RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
[RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
[RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
@@ -1266,20 +1139,14 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
[RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
[RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_41] = NULL,
[RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
[RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
[RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
- [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
- [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
};
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b980bce763d3..e0f8276cffed 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -189,6 +189,7 @@ enum ravb_reg {
PSR = 0x0528,
PIPR = 0x052c,
CXR31 = 0x0530, /* RZ/G2L only */
+ CXR35 = 0x0540, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
@@ -965,6 +966,13 @@ enum CXR31_BIT {
CXR31_SEL_LINK1 = 0x00000008,
};
+enum CXR35_BIT {
+ CXR35_SEL_XMII = 0x00000003,
+ CXR35_SEL_XMII_RGMII = 0x00000000,
+ CXR35_SEL_XMII_MII = 0x00000002,
+ CXR35_HALFCYC_CLKSW = 0xffff0000,
+};
+
enum CSR0_BIT {
CSR0_TPE = 0x00000010,
CSR0_RPE = 0x00000020,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index b357ac4c56c5..36324126db6d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -540,7 +540,13 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ } else {
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -1449,6 +1455,8 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -2512,6 +2520,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2832,9 +2841,9 @@ static int ravb_probe(struct platform_device *pdev)
goto out_dma_free;
}
- netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
/* Network device register */
error = register_netdev(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 67ade78fb767..71a499113308 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2029,6 +2029,8 @@ static int sh_eth_phy_init(struct net_device *ndev)
if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
phy_set_max_speed(phydev, SPEED_100);
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
return 0;
@@ -3366,7 +3368,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
- netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
+ netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
/* network device register */
ret = register_netdev(ndev);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fc83ec23bd1d..023682cd2768 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2226,8 +2226,8 @@ rocker_port_set_link_ksettings(struct net_device *dev,
static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
@@ -2574,8 +2574,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
netif_napi_add_tx(dev, &rocker_port->napi_tx, rocker_port_poll_tx);
- netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx);
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bc70c6abd6a5..58cf7cc54f40 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 98edb01024f0..8ba017ec9849 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -175,8 +175,8 @@ static int sxgbe_set_eee(struct net_device *dev,
static void sxgbe_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 sxgbe_getmsglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index a1c10b61269b..9664f029fa16 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2143,7 +2143,7 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
}
- netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+ netif_napi_add(ndev, &priv->napi, sxgbe_poll);
spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index bb06fa228367..b5e45fc6337e 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -9,7 +9,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
- mae.o tc.o
+ mae.o tc.o tc_bindings.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ee734b69150f..d1e1aa19a68e 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ev_test_generate = efx_ef10_ev_test_generate,
.filter_table_probe = efx_ef10_filter_table_probe,
.filter_table_restore = efx_mcdi_filter_table_restore,
- .filter_table_remove = efx_mcdi_filter_table_remove,
+ .filter_table_remove = efx_ef10_filter_table_remove,
.filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
.filter_insert = efx_mcdi_filter_insert,
.filter_remove_safe = efx_mcdi_filter_remove_safe,
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 702abbe59b76..135ece2f1375 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -43,6 +43,8 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
.get_sset_count = efx_ethtool_get_sset_count,
+ .get_priv_flags = efx_ethtool_get_priv_flags,
+ .set_priv_flags = efx_ethtool_set_priv_flags,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.get_link_ksettings = efx_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 17b9d37218cb..88fa29572e23 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -23,6 +23,7 @@
#include "mcdi_filters.h"
#include "rx_common.h"
#include "ef100_sriov.h"
+#include "tc_bindings.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -246,6 +247,9 @@ static const struct net_device_ops ef100_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
+#ifdef CONFIG_SFC_SRIOV
+ .ndo_setup_tc = efx_tc_setup,
+#endif
};
/* Netdev registration
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 8061efdaf82c..ad686c671ab8 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1137,6 +1137,9 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
*/
netif_warn(efx, probe, net_dev, "Failed to probe MAE rc %d\n",
rc);
+ } else {
+ net_dev->features |= NETIF_F_HW_TC;
+ efx->fixed_features |= NETIF_F_HW_TC;
}
#endif
return 0;
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 73ae4656a6e7..81ab22c74635 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -14,6 +14,7 @@
#include "ef100_nic.h"
#include "mae.h"
#include "rx_common.h"
+#include "tc_bindings.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
@@ -42,8 +43,7 @@ static int efx_ef100_rep_open(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
- netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll);
napi_enable(&efv->napi);
return 0;
}
@@ -107,6 +107,20 @@ static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
return 0;
}
+static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct efx_rep *efv = netdev_priv(net_dev);
+ struct efx_nic *efx = efv->parent;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, efv);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, efv);
+
+ return -EOPNOTSUPP;
+}
+
static void efx_ef100_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -120,13 +134,14 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev,
stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
}
-static const struct net_device_ops efx_ef100_rep_netdev_ops = {
+const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
.ndo_get_stats64 = efx_ef100_rep_get_stats64,
+ .ndo_setup_tc = efx_ef100_rep_setup_tc,
};
static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index 070f700893c1..c21bc716f847 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -66,4 +66,5 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
* Caller must hold rcu_read_lock().
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
+extern const struct net_device_ops efx_ef100_rep_netdev_ops;
#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 153d68e29b8b..054d5ce6029e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -778,7 +778,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
return;
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -1175,6 +1175,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1279,6 +1290,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 032b8c0bd788..aaa381743bca 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -319,7 +319,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1313,7 +1313,7 @@ void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
}
void efx_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index a929a1aaba92..c2224e41a694 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -996,7 +996,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->rx_prefix_size = efx->type->rx_prefix_size;
efx->rx_ip_align =
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bc840ede3053..6649a2327d03 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -101,15 +101,23 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+static const char efx_ethtool_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "log-tc-errors",
+};
+
+#define EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS BIT(0)
+
+#define EFX_ETHTOOL_PRIV_FLAGS_COUNT ARRAY_SIZE(efx_ethtool_priv_flags_strings)
+
void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
@@ -452,6 +460,8 @@ int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
efx_ptp_describe_stats(efx, NULL);
case ETH_SS_TEST:
return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+ case ETH_SS_PRIV_FLAGS:
+ return EFX_ETHTOOL_PRIV_FLAGS_COUNT;
default:
return -EINVAL;
}
@@ -468,7 +478,7 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (efx_describe_per_queue_stats(efx, strings) *
@@ -478,12 +488,39 @@ void efx_ethtool_get_strings(struct net_device *net_dev,
case ETH_SS_TEST:
efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < EFX_ETHTOOL_PRIV_FLAGS_COUNT; i++)
+ strscpy(strings + i * ETH_GSTRING_LEN,
+ efx_ethtool_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ break;
default:
/* No other string sets */
break;
}
}
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ u32 ret_flags = 0;
+
+ if (efx->log_tc_errs)
+ ret_flags |= EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS;
+
+ return ret_flags;
+}
+
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ efx->log_tc_errs =
+ !!(flags & EFX_ETHTOOL_PRIV_FLAGS_LOG_TC_ERRS);
+
+ return 0;
+}
+
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index 659491932101..0afc74021a5e 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -27,6 +27,8 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
u8 *strings);
+u32 efx_ethtool_get_priv_flags(struct net_device *net_dev);
+int efx_ethtool_set_priv_flags(struct net_device *net_dev, u32 flags);
void efx_ethtool_get_stats(struct net_device *net_dev,
struct ethtool_stats *stats __attribute__ ((unused)),
u64 *data);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index a63f40b09856..e151b0957751 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2012,7 +2012,7 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
struct ef4_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
}
static void ef4_init_napi(struct ef4_nic *efx)
@@ -2329,7 +2329,7 @@ static void ef4_unregister_netdev(struct ef4_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (ef4_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
@@ -2640,7 +2640,7 @@ static int ef4_init_struct(struct ef4_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 907254b36663..3976a333f7e3 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -162,9 +162,9 @@ static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct ef4_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
@@ -412,7 +412,7 @@ static void ef4_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (ef4_describe_per_queue_stats(efx, strings) *
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 3324a6219a09..7a1c9337081b 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -2387,7 +2387,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
board->i2c_data.data = efx;
board->i2c_adap.algo_data = &board->i2c_data;
board->i2c_adap.dev.parent = &efx->pci_dev->dev;
- strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
+ strscpy(board->i2c_adap.name, "SFC4000 GPIO",
sizeof(board->i2c_adap.name));
rc = i2c_bit_add_bus(&board->i2c_adap);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index 156da315ec89..78c851b5a56f 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -452,7 +452,7 @@ size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 4d928839d292..be72e71da027 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/in6.h>
#include <asm/byteorder.h>
/**
@@ -224,6 +225,27 @@ efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
}
/**
+ * efx_filter_set_ipv6_local - specify IPv6 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv6_local(struct efx_filter_spec *spec, u8 proto,
+ const struct in6_addr *host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IPV6);
+ spec->ip_proto = proto;
+ memcpy(spec->loc_host, host, sizeof(spec->loc_host));
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
* @proto: Transport layer protocol number
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 97627f5e3674..874c765b2465 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -112,6 +112,167 @@ int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
return 0;
}
+static int efx_mae_get_basic_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_CAPS_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_GET_CAPS, NULL, 0, outbuf,
+ sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ caps->match_field_count = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
+ caps->action_prios = MCDI_DWORD(outbuf, MAE_GET_CAPS_OUT_ACTION_PRIOS);
+ return 0;
+}
+
+static int efx_mae_get_rule_fields(struct efx_nic *efx, u32 cmd,
+ u8 *field_support)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_GET_AR_CAPS_OUT_LEN(MAE_NUM_FIELDS));
+ MCDI_DECLARE_STRUCT_PTR(caps);
+ unsigned int count;
+ size_t outlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_MAE_GET_AR_CAPS_IN_LEN);
+
+ rc = efx_mcdi_rpc(efx, cmd, NULL, 0, outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ count = MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_COUNT);
+ memset(field_support, MAE_FIELD_UNSUPPORTED, MAE_NUM_FIELDS);
+ caps = _MCDI_DWORD(outbuf, MAE_GET_AR_CAPS_OUT_FIELD_FLAGS);
+ /* We're only interested in the support status enum, not any other
+ * flags, so just extract that from each entry.
+ */
+ for (i = 0; i < count; i++)
+ if (i * sizeof(*outbuf) + MC_CMD_MAE_GET_AR_CAPS_OUT_FIELD_FLAGS_OFST < outlen)
+ field_support[i] = EFX_DWORD_FIELD(caps[i], MAE_FIELD_FLAGS_SUPPORT_STATUS);
+ return 0;
+}
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps)
+{
+ int rc;
+
+ rc = efx_mae_get_basic_caps(efx, caps);
+ if (rc)
+ return rc;
+ return efx_mae_get_rule_fields(efx, MC_CMD_MAE_GET_AR_CAPS,
+ caps->action_rule_fields);
+}
+
+/* Bit twiddling:
+ * Prefix: 1...110...0
+ * ~: 0...001...1
+ * + 1: 0...010...0 is power of two
+ * so (~x) & ((~x) + 1) == 0. Converse holds also.
+ */
+#define is_prefix_byte(_x) !(((_x) ^ 0xff) & (((_x) ^ 0xff) + 1))
+
+enum mask_type { MASK_ONES, MASK_ZEROES, MASK_PREFIX, MASK_OTHER };
+
+static const char *mask_type_name(enum mask_type typ)
+{
+ switch (typ) {
+ case MASK_ONES:
+ return "all-1s";
+ case MASK_ZEROES:
+ return "all-0s";
+ case MASK_PREFIX:
+ return "prefix";
+ case MASK_OTHER:
+ return "arbitrary";
+ default: /* can't happen */
+ return "unknown";
+ }
+}
+
+/* Checks a (big-endian) bytestring is a bit prefix */
+static enum mask_type classify_mask(const u8 *mask, size_t len)
+{
+ bool zeroes = true; /* All bits seen so far are zeroes */
+ bool ones = true; /* All bits seen so far are ones */
+ bool prefix = true; /* Valid prefix so far */
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (ones) {
+ if (!is_prefix_byte(mask[i]))
+ prefix = false;
+ } else if (mask[i]) {
+ prefix = false;
+ }
+ if (mask[i] != 0xff)
+ ones = false;
+ if (mask[i])
+ zeroes = false;
+ }
+ if (ones)
+ return MASK_ONES;
+ if (zeroes)
+ return MASK_ZEROES;
+ if (prefix)
+ return MASK_PREFIX;
+ return MASK_OTHER;
+}
+
+static int efx_mae_match_check_cap_typ(u8 support, enum mask_type typ)
+{
+ switch (support) {
+ case MAE_FIELD_UNSUPPORTED:
+ case MAE_FIELD_SUPPORTED_MATCH_NEVER:
+ if (typ == MASK_ZEROES)
+ return 0;
+ return -EOPNOTSUPP;
+ case MAE_FIELD_SUPPORTED_MATCH_OPTIONAL:
+ if (typ == MASK_ZEROES)
+ return 0;
+ fallthrough;
+ case MAE_FIELD_SUPPORTED_MATCH_ALWAYS:
+ if (typ == MASK_ONES)
+ return 0;
+ return -EINVAL;
+ case MAE_FIELD_SUPPORTED_MATCH_PREFIX:
+ if (typ == MASK_OTHER)
+ return -EOPNOTSUPP;
+ return 0;
+ case MAE_FIELD_SUPPORTED_MATCH_MASK:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *supported_fields = efx->tc->caps->action_rule_fields;
+ __be32 ingress_port = cpu_to_be32(mask->ingress_port);
+ enum mask_type ingress_port_mask_type;
+ int rc;
+
+ /* Check for _PREFIX assumes big-endian, so we need to convert */
+ ingress_port_mask_type = classify_mask((const u8 *)&ingress_port,
+ sizeof(ingress_port));
+ rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
+ ingress_port_mask_type);
+ if (rc) {
+ efx_tc_err(efx, "No support for %s mask in field ingress_port\n",
+ mask_type_name(ingress_port_mask_type));
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mask type for ingress_port");
+ return rc;
+ }
+ return 0;
+}
+
static bool efx_mae_asl_id(u32 id)
{
return !!(id & BIT(31));
@@ -279,6 +440,10 @@ static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
}
MCDI_STRUCT_SET_DWORD(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_INGRESS_MPORT_SELECTOR_MASK,
match->mask.ingress_port);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID,
+ match->value.recirc_id);
+ MCDI_STRUCT_SET_BYTE(match_crit, MAE_FIELD_MASK_VALUE_PAIRS_V2_RECIRC_ID_MASK,
+ match->mask.recirc_id);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 0369be4d8983..3e0cd238d523 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -27,6 +27,20 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+#define MAE_NUM_FIELDS (MAE_FIELD_ENC_VNET_ID + 1)
+
+struct mae_caps {
+ u32 match_field_count;
+ u32 action_prios;
+ u8 action_rule_fields[MAE_NUM_FIELDS];
+};
+
+int efx_mae_get_caps(struct efx_nic *efx, struct mae_caps *caps);
+
+int efx_mae_match_check_caps(struct efx_nic *efx,
+ const struct efx_tc_match_fields *mask,
+ struct netlink_ext_ack *extack);
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 26bc69f76801..1f18e9dc62e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -201,6 +201,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
((u8 *)(_buf) + (_offset))
#define MCDI_PTR(_buf, _field) \
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+/* Use MCDI_STRUCT_ functions to access members of MCDI structuredefs.
+ * _buf should point to the start of the structure, typically obtained with
+ * MCDI_DECLARE_STRUCT_PTR(structure) = _MCDI_DWORD(mcdi_buf, FIELD_WHICH_IS_STRUCT);
+ */
+#define MCDI_STRUCT_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, _field ## _OFST)
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
#define _MCDI_DWORD(_buf, _field) \
@@ -208,6 +214,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define _MCDI_STRUCT_DWORD(_buf, _field) \
((_buf) + (_MCDI_CHECK_ALIGN(_field ## _OFST, 4) >> 2))
+#define MCDI_STRUCT_SET_BYTE(_buf, _field, _value) do { \
+ BUILD_BUG_ON(_field ## _LEN != 1); \
+ *(u8 *)MCDI_STRUCT_PTR(_buf, _field) = _value; \
+ } while (0)
#define MCDI_BYTE(_buf, _field) \
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
*MCDI_PTR(_buf, _field))
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 5954fcfee2b1..f5128db7c7e7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -285,7 +285,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- strlcpy(attr->name, name, sizeof(attr->name));
+ strscpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 7ef823d7a89a..2e9ba0cfe848 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -855,6 +855,7 @@ enum efx_xdp_tx_queues_mode {
* @timer_max_ns: Interrupt timer maximum value, in nanoseconds
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irqs_hooked: Channel interrupts are hooked
+ * @log_tc_errs: Error logging for TC filter insertion is enabled
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
@@ -1017,6 +1018,7 @@ struct efx_nic {
unsigned int timer_max_ns;
bool irq_rx_adaptive;
bool irqs_hooked;
+ bool log_tc_errs;
unsigned int irq_mod_step_us;
unsigned int irq_rx_moderation_us;
u32 msg_enable;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 22fbb0ae77fb..63e2394382bb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -465,7 +465,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 10ad0b93d283..eaef4a15008a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -118,9 +118,14 @@
#define PTP_MIN_LENGTH 63
-#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */
+#define PTP_RXFILTERS_LEN 5
+
+#define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */
+#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0x01, 0x81} /* ff0e::181 */
#define PTP_EVENT_PORT 319
#define PTP_GENERAL_PORT 320
+#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */
/* Annoyingly the format of the version numbers are different between
* versions 1 and 2 so it isn't possible to simply look for 1 or 2.
@@ -224,9 +229,8 @@ struct efx_ptp_timeset {
* @work: Work task
* @reset_required: A serious error has occurred and the PTP task needs to be
* reset (disable, enable).
- * @rxfilter_event: Receive filter when operating
- * @rxfilter_general: Receive filter when operating
- * @rxfilter_installed: Receive filter installed
+ * @rxfilters: Receive filters when operating
+ * @rxfilters_count: Num of installed rxfilters, should be == PTP_RXFILTERS_LEN
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
@@ -295,9 +299,8 @@ struct efx_ptp_data {
struct workqueue_struct *workwq;
struct work_struct work;
bool reset_required;
- u32 rxfilter_event;
- u32 rxfilter_general;
- bool rxfilter_installed;
+ u32 rxfilters[PTP_RXFILTERS_LEN];
+ size_t rxfilters_count;
struct hwtstamp_config config;
bool enabled;
unsigned int mode;
@@ -1290,61 +1293,108 @@ static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- if (ptp->rxfilter_installed) {
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_general);
+ while (ptp->rxfilters_count) {
+ ptp->rxfilters_count--;
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
- ptp->rxfilter_installed = false;
+ ptp->rxfilters[ptp->rxfilters_count]);
}
}
-static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+static void efx_ptp_init_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
+{
+ struct efx_channel *channel = efx->ptp_data->channel;
+ struct efx_rx_queue *queue = efx_channel_get_rx_queue(channel);
+
+ efx_filter_init_rx(rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+ efx_rx_queue_index(queue));
+}
+
+static int efx_ptp_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *rxfilter)
{
struct efx_ptp_data *ptp = efx->ptp_data;
+
+ int rc = efx_filter_insert_filter(efx, rxfilter, true);
+ if (rc < 0)
+ return rc;
+ ptp->rxfilters[ptp->rxfilters_count] = rc;
+ ptp->rxfilters_count++;
+ return 0;
+}
+
+static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx, u16 port)
+{
struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, htonl(PTP_ADDR_IPV4),
+ htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx, u16 port)
+{
+ const struct in6_addr addr = {{PTP_ADDR_IPV6}};
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_ipv6_local(&rxfilter, IPPROTO_UDP, &addr, htons(port));
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_eth_filter(struct efx_nic *efx)
+{
+ const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER;
+ struct efx_filter_spec rxfilter;
+
+ efx_ptp_init_filter(efx, &rxfilter);
+ efx_filter_set_eth_local(&rxfilter, EFX_FILTER_VID_UNSPEC, addr);
+ rxfilter.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ rxfilter.ether_type = htons(ETH_P_1588);
+ return efx_ptp_insert_filter(efx, &rxfilter);
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+ struct efx_ptp_data *ptp = efx->ptp_data;
int rc;
- if (!ptp->channel || ptp->rxfilter_installed)
+ if (!ptp->channel || ptp->rxfilters_count)
return 0;
/* Must filter on both event and general ports to ensure
* that there is no packet re-ordering.
*/
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_EVENT_PORT));
- if (rc != 0)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_EVENT_PORT);
if (rc < 0)
- return rc;
- ptp->rxfilter_event = rc;
-
- efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
- efx_rx_queue_index(
- efx_channel_get_rx_queue(ptp->channel)));
- rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
- htonl(PTP_ADDRESS),
- htons(PTP_GENERAL_PORT));
- if (rc != 0)
goto fail;
- rc = efx_filter_insert_filter(efx, &rxfilter, true);
+ rc = efx_ptp_insert_ipv4_filter(efx, PTP_GENERAL_PORT);
if (rc < 0)
goto fail;
- ptp->rxfilter_general = rc;
- ptp->rxfilter_installed = true;
+ /* if the NIC supports hw timestamps by the MAC, we can support
+ * PTP over IPv6 and Ethernet
+ */
+ if (efx_ptp_use_mac_tx_timestamps(efx)) {
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_EVENT_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_ipv6_filter(efx, PTP_GENERAL_PORT);
+ if (rc < 0)
+ goto fail;
+
+ rc = efx_ptp_insert_eth_filter(efx);
+ if (rc < 0)
+ goto fail;
+ }
+
return 0;
fail:
- efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
- ptp->rxfilter_event);
+ efx_ptp_remove_multicast_filters(efx);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 63d999e63960..60e5b7c8ccf9 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -775,7 +775,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
if (efx_dev_registered(efx)) {
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_siena_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
@@ -1148,6 +1148,17 @@ static int efx_pm_freeze(struct device *dev)
return 0;
}
+static void efx_pci_shutdown(struct pci_dev *pci_dev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+ if (!efx)
+ return;
+
+ efx_pm_freeze(&pci_dev->dev);
+ pci_disable_device(pci_dev);
+}
+
static int efx_pm_thaw(struct device *dev)
{
int rc;
@@ -1252,6 +1263,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .shutdown = efx_pci_shutdown,
.err_handler = &efx_siena_err_handlers,
#ifdef CONFIG_SFC_SIENA_SRIOV
.sriov_configure = efx_pci_sriov_configure,
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 017212a40df3..06ed74994e36 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -320,7 +320,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
- efx->tx_channel_offset = 1;
+ efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
@@ -1317,7 +1317,7 @@ static void efx_init_napi_channel(struct efx_channel *channel)
struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev;
- netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
+ netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll);
}
void efx_siena_init_napi(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 954daf464abb..1fd396b00bfb 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1006,7 +1006,7 @@ int efx_siena_init_struct(struct efx_nic *efx,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_UNINIT;
- strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+ strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
index 0207d07f54e3..f590e87e5a23 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -105,10 +105,10 @@ void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
efx_siena_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev)
@@ -467,7 +467,7 @@ void efx_siena_ethtool_get_strings(struct net_device *net_dev,
strings += (efx->type->describe_stats(efx, strings) *
ETH_GSTRING_LEN);
for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
- strlcpy(strings + i * ETH_GSTRING_LEN,
+ strscpy(strings + i * ETH_GSTRING_LEN,
efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
strings += (efx_describe_per_queue_stats(efx, strings) *
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
index c7ea703c5d7a..56a9c56ed9e3 100644
--- a/drivers/net/ethernet/sfc/siena/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
@@ -285,7 +285,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
- strlcpy(attr->name, name, sizeof(attr->name));
+ strscpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
index abf9a4adf139..0ea0433a6230 100644
--- a/drivers/net/ethernet/sfc/siena/nic.c
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -458,7 +458,7 @@ size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t coun
for_each_set_bit(index, mask, count) {
if (desc[index].name) {
if (names) {
- strlcpy(names, desc[index].name,
+ strscpy(names, desc[index].name,
ETH_GSTRING_LEN);
names += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
index e166dcb9b99c..91e87594ed1e 100644
--- a/drivers/net/ethernet/sfc/siena/tx.c
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -336,7 +336,7 @@ netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0c0aeb91f500..3478860d4023 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -9,11 +9,60 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <net/pkt_cls.h>
#include "tc.h"
+#include "tc_bindings.h"
#include "mae.h"
#include "ef100_rep.h"
#include "efx.h"
+#define EFX_EFV_PF NULL
+/* Look up the representor information (efv) for a device.
+ * May return NULL for the PF (us), or an error pointer for a device that
+ * isn't supported as a TC offload endpoint
+ */
+static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx,
+ struct net_device *dev)
+{
+ struct efx_rep *efv;
+
+ if (!dev)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it us (the PF)? */
+ if (dev == efx->net_dev)
+ return EFX_EFV_PF;
+ /* Is it an efx vfrep at all? */
+ if (dev->netdev_ops != &efx_ef100_rep_netdev_ops)
+ return ERR_PTR(-EOPNOTSUPP);
+ /* Is it ours? We don't support TC rules that include another
+ * EF100's netdevices (not even on another port of the same NIC).
+ */
+ efv = netdev_priv(dev);
+ if (efv->parent != efx)
+ return ERR_PTR(-EOPNOTSUPP);
+ return efv;
+}
+
+/* Convert a driver-internal vport ID into an external device (wire or VF) */
+static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
+{
+ u32 mport;
+
+ if (IS_ERR(efv))
+ return PTR_ERR(efv);
+ if (!efv) /* device is PF (us) */
+ efx_mae_mport_wire(efx, &mport);
+ else /* device is repr */
+ efx_mae_mport_mport(efx, efv->mport, &mport);
+ return mport;
+}
+
+static const struct rhashtable_params efx_tc_match_action_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct efx_tc_flow_rule, cookie),
+ .head_offset = offsetof(struct efx_tc_flow_rule, linkage),
+};
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -58,6 +107,333 @@ static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rul
rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
}
+static void efx_tc_flow_free(void *ptr, void *arg)
+{
+ struct efx_tc_flow_rule *rule = ptr;
+ struct efx_nic *efx = arg;
+
+ netif_err(efx, drv, efx->net_dev,
+ "tc rule %lx still present at teardown, removing\n",
+ rule->cookie);
+
+ efx_mae_delete_rule(efx, rule->fw_id);
+
+ /* Release entries in subsidiary tables */
+ efx_tc_free_action_set_list(efx, &rule->acts, true);
+
+ kfree(rule);
+}
+
+static int efx_tc_flower_parse_match(struct efx_nic *efx,
+ struct flow_rule *rule,
+ struct efx_tc_match *match,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control fm;
+
+ flow_rule_match_control(rule, &fm);
+
+ if (fm.mask->flags) {
+ efx_tc_err(efx, "Unsupported match on control.flags %#x\n",
+ fm.mask->flags);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC))) {
+ efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic fm;
+
+ flow_rule_match_basic(rule, &fm);
+ if (fm.mask->n_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ if (fm.mask->ip_proto) {
+ EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_tc_flower_replace(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc,
+ struct efx_rep *efv)
+{
+ struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_action_set *act = NULL;
+ const struct flow_action_entry *fa;
+ struct efx_rep *from_efv, *to_efv;
+ struct efx_tc_match match;
+ s64 rc;
+ int i;
+
+ if (!tc_can_offload_extack(efx->net_dev, extack))
+ return -EOPNOTSUPP;
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+ if (WARN_ON(!efx->tc->up))
+ return -ENETDOWN;
+
+ from_efv = efx_tc_flower_lookup_efv(efx, net_dev);
+ if (IS_ERR(from_efv)) {
+ /* Might be a tunnel decap rule from an indirect block.
+ * Support for those not implemented yet.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ if (efv != from_efv) {
+ /* can't happen */
+ efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n",
+ netdev_name(net_dev), efv ? "non-" : "",
+ from_efv ? "non-" : "");
+ if (efv)
+ NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)");
+ else
+ NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)");
+ return -EINVAL;
+ }
+
+ /* Parse match */
+ memset(&match, 0, sizeof(match));
+ rc = efx_tc_flower_external_mport(efx, from_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port");
+ return rc;
+ }
+ match.value.ingress_port = rc;
+ match.mask.ingress_port = ~0;
+ rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
+ if (rc)
+ return rc;
+
+ if (tc->common.chain_index) {
+ EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index");
+ return -EOPNOTSUPP;
+ }
+ match.mask.recirc_id = 0xff;
+
+ rc = efx_mae_match_check_caps(efx, &match.mask, extack);
+ if (rc)
+ return rc;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&rule->acts.list);
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+
+ flow_action_for_each(i, fa, &fr->action) {
+ struct efx_tc_action_set save;
+
+ if (!act) {
+ /* more actions after a non-pipe action */
+ EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action");
+ rc = -EINVAL;
+ goto release;
+ }
+
+ switch (fa->id) {
+ case FLOW_ACTION_DROP:
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* end of the line */
+ break;
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED:
+ save = *act;
+ to_efv = efx_tc_flower_lookup_efv(efx, fa->dev);
+ if (IS_ERR(to_efv)) {
+ EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch");
+ rc = PTR_ERR(to_efv);
+ goto release;
+ }
+ rc = efx_tc_flower_external_mport(efx, to_efv);
+ if (rc < 0) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port");
+ goto release;
+ }
+ act->dest_mport = rc;
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL;
+ if (fa->id == FLOW_ACTION_REDIRECT)
+ break; /* end of the line */
+ /* Mirror, so continue on with saved act */
+ act = kzalloc(sizeof(*act), GFP_USER);
+ if (!act) {
+ rc = -ENOMEM;
+ goto release;
+ }
+ *act = save;
+ break;
+ default:
+ efx_tc_err(efx, "Unhandled action %u\n", fa->id);
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ goto release;
+ }
+ }
+
+ if (act) {
+ /* Not shot/redirected, so deliver to default dest */
+ if (from_efv == EFX_EFV_PF)
+ /* Rule applies to traffic from the wire,
+ * and default dest is thus the PF
+ */
+ efx_mae_mport_uplink(efx, &act->dest_mport);
+ else
+ /* Representor, so rule applies to traffic from
+ * representee, and default dest is thus the rep.
+ * All reps use the same mport for delivery
+ */
+ efx_mae_mport_mport(efx, efx->tc->reps_mport_id,
+ &act->dest_mport);
+ act->deliver = 1;
+ rc = efx_mae_alloc_action_set(efx, act);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)");
+ goto release;
+ }
+ list_add_tail(&act->list, &rule->acts.list);
+ act = NULL; /* Prevent double-free in error path */
+ }
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed filter (cookie %lx)\n",
+ tc->cookie);
+
+ rule->match = match;
+
+ rc = efx_mae_alloc_action_set_list(efx, &rule->acts);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw");
+ goto release;
+ }
+ rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC,
+ rule->acts.fw_id, &rule->fw_id);
+ if (rc) {
+ EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw");
+ goto release_acts;
+ }
+ return 0;
+
+release_acts:
+ efx_mae_free_action_set_list(efx, &rule->acts);
+release:
+ /* We failed to insert the rule, so free up any entries we created in
+ * subsidiary tables.
+ */
+ if (act)
+ efx_tc_free_action_set(efx, act, false);
+ if (rule) {
+ rhashtable_remove_fast(&efx->tc->match_action_ht,
+ &rule->linkage,
+ efx_tc_match_action_ht_params);
+ efx_tc_free_action_set_list(efx, &rule->acts, false);
+ }
+ kfree(rule);
+ return rc;
+}
+
+static int efx_tc_flower_destroy(struct efx_nic *efx,
+ struct net_device *net_dev,
+ struct flow_cls_offload *tc)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_flow_rule *rule;
+
+ rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie,
+ efx_tc_match_action_ht_params);
+ if (!rule) {
+ /* Only log a message if we're the ingress device. Otherwise
+ * it's a foreign filter and we might just not have been
+ * interested (e.g. we might not have been the egress device
+ * either).
+ */
+ if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev)))
+ netif_warn(efx, drv, efx->net_dev,
+ "Filter %lx not found to remove\n", tc->cookie);
+ NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules");
+ return -ENOENT;
+ }
+
+ /* Remove it from HW */
+ efx_tc_delete_rule(efx, rule);
+ /* Delete it from SW */
+ rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage,
+ efx_tc_match_action_ht_params);
+ netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie);
+ kfree(rule);
+ return 0;
+}
+
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv)
+{
+ int rc;
+
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&efx->tc->mutex);
+ switch (tc->command) {
+ case FLOW_CLS_REPLACE:
+ rc = efx_tc_flower_replace(efx, net_dev, tc, efv);
+ break;
+ case FLOW_CLS_DESTROY:
+ rc = efx_tc_flower_destroy(efx, net_dev, tc);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&efx->tc->mutex);
+ return rc;
+}
+
static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port,
u32 eg_port, struct efx_tc_flow_rule *rule)
{
@@ -201,13 +577,37 @@ int efx_init_tc(struct efx_nic *efx)
{
int rc;
+ rc = efx_mae_get_caps(efx, efx->tc->caps);
+ if (rc)
+ return rc;
+ if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS)
+ /* Firmware supports some match fields the driver doesn't know
+ * about. Not fatal, unless any of those fields are required
+ * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know.
+ */
+ netif_warn(efx, probe, efx->net_dev,
+ "FW reports additional match fields %u\n",
+ efx->tc->caps->match_field_count);
+ if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) {
+ netif_err(efx, probe, efx->net_dev,
+ "Too few action prios supported (have %u, need %u)\n",
+ efx->tc->caps->action_prios, EFX_TC_PRIO__NUM);
+ return -EIO;
+ }
rc = efx_tc_configure_default_rule_pf(efx);
if (rc)
return rc;
rc = efx_tc_configure_default_rule_wire(efx);
if (rc)
return rc;
- return efx_tc_configure_rep_mport(efx);
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+ efx->tc->up = true;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
+ return 0;
}
void efx_fini_tc(struct efx_nic *efx)
@@ -215,20 +615,35 @@ void efx_fini_tc(struct efx_nic *efx)
/* We can get called even if efx_init_struct_tc() failed */
if (!efx->tc)
return;
+ if (efx->tc->up)
+ flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind);
efx_tc_deconfigure_rep_mport(efx);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
+ efx->tc->up = false;
}
int efx_init_struct_tc(struct efx_nic *efx)
{
+ int rc;
+
if (efx->type->is_vf)
return 0;
efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL);
if (!efx->tc)
return -ENOMEM;
+ efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL);
+ if (!efx->tc->caps) {
+ rc = -ENOMEM;
+ goto fail_alloc_caps;
+ }
+ INIT_LIST_HEAD(&efx->tc->block_list);
+ mutex_init(&efx->tc->mutex);
+ rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params);
+ if (rc < 0)
+ goto fail_match_action_ht;
efx->tc->reps_filter_uc = -1;
efx->tc->reps_filter_mc = -1;
INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list);
@@ -236,6 +651,13 @@ int efx_init_struct_tc(struct efx_nic *efx)
INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
return 0;
+fail_match_action_ht:
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
+fail_alloc_caps:
+ kfree(efx->tc);
+ efx->tc = NULL;
+ return rc;
}
void efx_fini_struct_tc(struct efx_nic *efx)
@@ -243,10 +665,16 @@ void efx_fini_struct_tc(struct efx_nic *efx)
if (!efx->tc)
return;
+ mutex_lock(&efx->tc->mutex);
EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
+ efx);
+ mutex_unlock(&efx->tc->mutex);
+ mutex_destroy(&efx->tc->mutex);
+ kfree(efx->tc->caps);
kfree(efx->tc);
efx->tc = NULL;
}
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 309123c6b386..196fd74ed973 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -11,8 +11,28 @@
#ifndef EFX_TC_H
#define EFX_TC_H
+#include <net/flow_offload.h>
+#include <linux/rhashtable.h>
#include "net_driver.h"
+/* Error reporting: convenience macros. For indicating why a given filter
+ * insertion is not supported; errors in internal operation or in the
+ * hardware should be netif_err()s instead.
+ */
+/* Used when error message is constant. */
+#define EFX_TC_ERR_MSG(efx, extack, message) do { \
+ NL_SET_ERR_MSG_MOD(extack, message); \
+ if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, "%s\n", message); \
+} while (0)
+/* Used when error message is not constant; caller should also supply a
+ * constant extack message with NL_SET_ERR_MSG_MOD().
+ */
+#define efx_tc_err(efx, fmt, args...) do { \
+if (efx->log_tc_errs) \
+ netif_info(efx, drv, efx->net_dev, fmt, ##args);\
+} while (0)
+
struct efx_tc_action_set {
u16 deliver:1;
u32 dest_mport;
@@ -23,6 +43,7 @@ struct efx_tc_action_set {
struct efx_tc_match_fields {
/* L1 */
u32 ingress_port;
+ u8 recirc_id;
};
struct efx_tc_match {
@@ -36,12 +57,15 @@ struct efx_tc_action_set_list {
};
struct efx_tc_flow_rule {
+ unsigned long cookie;
+ struct rhash_head linkage;
struct efx_tc_match match;
struct efx_tc_action_set_list acts;
u32 fw_id;
};
enum efx_tc_rule_prios {
+ EFX_TC_PRIO_TC, /* Rule inserted by TC */
EFX_TC_PRIO_DFLT, /* Default switch rule; one of efx_tc_default_rules */
EFX_TC_PRIO__NUM
};
@@ -49,6 +73,10 @@ enum efx_tc_rule_prios {
/**
* struct efx_tc_state - control plane data for TC offload
*
+ * @caps: MAE capabilities reported by MCDI
+ * @block_list: List of &struct efx_tc_block_binding
+ * @mutex: Used to serialise operations on TC hashtables
+ * @match_action_ht: Hashtable of TC match-action rules
* @reps_mport_id: MAE port allocated for representor RX
* @reps_filter_uc: VNIC filter for representor unicast RX (promisc)
* @reps_filter_mc: VNIC filter for representor multicast RX (allmulti)
@@ -57,14 +85,20 @@ enum efx_tc_rule_prios {
* %EFX_TC_PRIO_DFLT. Named by *ingress* port
* @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
* @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
+ * @up: have TC datastructures been set up?
*/
struct efx_tc_state {
+ struct mae_caps *caps;
+ struct list_head block_list;
+ struct mutex mutex;
+ struct rhashtable match_action_ht;
u32 reps_mport_id, reps_mport_vport_id;
s32 reps_filter_uc, reps_filter_mc;
struct {
struct efx_tc_flow_rule pf;
struct efx_tc_flow_rule wire;
} dflt;
+ bool up;
};
struct efx_rep;
@@ -72,6 +106,8 @@ struct efx_rep;
int efx_tc_configure_default_rule_rep(struct efx_rep *efv);
void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
struct efx_tc_flow_rule *rule);
+int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev,
+ struct flow_cls_offload *tc, struct efx_rep *efv);
int efx_tc_insert_rep_filters(struct efx_nic *efx);
void efx_tc_remove_rep_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/tc_bindings.c b/drivers/net/ethernet/sfc/tc_bindings.c
new file mode 100644
index 000000000000..c18d64519c2d
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "tc_bindings.h"
+#include "tc.h"
+
+struct efx_tc_block_binding {
+ struct list_head list;
+ struct efx_nic *efx;
+ struct efx_rep *efv;
+ struct net_device *otherdev; /* may actually be us */
+ struct flow_block *block;
+};
+
+static struct efx_tc_block_binding *efx_tc_find_binding(struct efx_nic *efx,
+ struct net_device *otherdev)
+{
+ struct efx_tc_block_binding *binding;
+
+ ASSERT_RTNL();
+ list_for_each_entry(binding, &efx->tc->block_list, list)
+ if (binding->otherdev == otherdev)
+ return binding;
+ return NULL;
+}
+
+static int efx_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+ struct flow_cls_offload *tcf = type_data;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return efx_tc_flower(binding->efx, binding->otherdev,
+ tcf, binding->efv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void efx_tc_block_unbind(void *cb_priv)
+{
+ struct efx_tc_block_binding *binding = cb_priv;
+
+ list_del(&binding->list);
+ kfree(binding);
+}
+
+static struct efx_tc_block_binding *efx_tc_create_binding(
+ struct efx_nic *efx, struct efx_rep *efv,
+ struct net_device *otherdev, struct flow_block *block)
+{
+ struct efx_tc_block_binding *binding = kmalloc(sizeof(*binding), GFP_KERNEL);
+
+ if (!binding)
+ return ERR_PTR(-ENOMEM);
+ binding->efx = efx;
+ binding->efv = efv;
+ binding->otherdev = otherdev;
+ binding->block = block;
+ list_add(&binding->list, &efx->tc->block_list);
+ return binding;
+}
+
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv)
+{
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ int rc;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(!efx->tc))
+ return -ENETDOWN;
+
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, efv, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind %sdirect block for device %s, rc %d\n",
+ net_dev == efx->net_dev ? "" :
+ efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (binding) {
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (block_cb) {
+ flow_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbound %sdirect block for device %s\n",
+ net_dev == efx->net_dev ? "" :
+ binding->efv ? "semi" : "in",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ }
+ }
+ /* If we're in driver teardown, then we expect to have
+ * already unbound all our blocks (we did it early while
+ * we still had MCDI to remove the filters), so getting
+ * unbind callbacks now isn't a problem.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ !efx->tc->up, warn,
+ "%sdirect block unbind for device %s, was never bound\n",
+ net_dev == efx->net_dev ? "" : "in",
+ net_dev ? net_dev->name : NULL);
+ return -ENOENT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct flow_block_offload *tcb = type_data;
+ struct efx_tc_block_binding *binding;
+ struct flow_block_cb *block_cb;
+ struct efx_nic *efx = cb_priv;
+ bool is_ovs_int_port;
+ int rc;
+
+ if (!net_dev)
+ return -EOPNOTSUPP;
+
+ if (tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ tcb->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ is_ovs_int_port = netif_is_ovs_master(net_dev);
+ if (tcb->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ !is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ if (is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ switch (tcb->command) {
+ case FLOW_BLOCK_BIND:
+ binding = efx_tc_create_binding(efx, NULL, net_dev, tcb->block);
+ if (IS_ERR(binding))
+ return PTR_ERR(binding);
+ block_cb = flow_indr_block_cb_alloc(efx_tc_block_cb, binding,
+ binding, efx_tc_block_unbind,
+ tcb, net_dev, sch, data, binding,
+ cleanup);
+ rc = PTR_ERR_OR_ZERO(block_cb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "bind indr block for device %s, rc %d\n",
+ net_dev ? net_dev->name : NULL, rc);
+ if (rc) {
+ list_del(&binding->list);
+ kfree(binding);
+ } else {
+ flow_block_cb_add(block_cb, tcb);
+ }
+ return rc;
+ case FLOW_BLOCK_UNBIND:
+ binding = efx_tc_find_binding(efx, net_dev);
+ if (!binding)
+ return -ENOENT;
+ block_cb = flow_block_cb_lookup(tcb->block,
+ efx_tc_block_cb,
+ binding);
+ if (!block_cb)
+ return -ENOENT;
+ flow_indr_block_cb_remove(block_cb, tcb);
+ netif_dbg(efx, drv, efx->net_dev,
+ "unbind indr block for device %s\n",
+ net_dev ? net_dev->name : NULL);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* .ndo_setup_tc implementation
+ * Entry point for flower block and filter management.
+ */
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+
+ if (efx->type->is_vf)
+ return -EOPNOTSUPP;
+ if (!efx->tc)
+ return -EOPNOTSUPP;
+
+ if (type == TC_SETUP_CLSFLOWER)
+ return efx_tc_flower(efx, net_dev, type_data, NULL);
+ if (type == TC_SETUP_BLOCK)
+ return efx_tc_setup_block(net_dev, efx, type_data, NULL);
+
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/tc_bindings.h b/drivers/net/ethernet/sfc/tc_bindings.h
new file mode 100644
index 000000000000..c210bb09150e
--- /dev/null
+++ b/drivers/net/ethernet/sfc/tc_bindings.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2022 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TC_BINDINGS_H
+#define EFX_TC_BINDINGS_H
+#include "net_driver.h"
+
+#include <net/sch_generic.h>
+
+struct efx_rep;
+
+void efx_tc_block_unbind(void *cb_priv);
+int efx_tc_setup_block(struct net_device *net_dev, struct efx_nic *efx,
+ struct flow_block_offload *tcb, struct efx_rep *efv);
+int efx_tc_setup(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data);
+
+int efx_tc_indr_setup_cb(struct net_device *net_dev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type,
+ void *type_data, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+#endif /* EFX_TC_BINDINGS_H */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index d12474042c84..c5f88f7a7a04 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -549,7 +549,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
* previous packets out.
*/
if (!netdev_xmit_more())
- efx_tx_send_pending(tx_queue->channel);
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index e2d009866a7b..8fc3f5272fa7 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1158,9 +1158,9 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
static void ioc3_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
- strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strscpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 216bb2d34d7c..dda4e488c77a 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1769,9 +1769,9 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(tp->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 23a336c5096e..cb7fec226cab 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2027,9 +2027,9 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ strscpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sis_priv->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0329caf63279..013e90d69182 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -482,7 +482,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &epic_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &ep->napi, epic_poll, 64);
+ netif_napi_add(dev, &ep->napi, epic_poll);
ret = register_netdev(dev);
if (ret < 0)
@@ -1392,9 +1392,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 24d66af797d4..52ecfb461c41 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1509,9 +1509,9 @@ smc911x_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 37c822e27207..29bb19f42de9 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a31c159e96ea..35e99bf0c401 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1588,9 +1588,9 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->version, version, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->version, version, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3bf20211cceb..a2e511912e6a 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -1953,9 +1955,9 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
- strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
+ strscpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -2587,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ if (!device_may_wakeup(dev))
+ phy_stop(ndev->phydev);
}
/* enable wake on LAN, energy detection and the external PME
@@ -2628,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
+ if (!device_may_wakeup(dev))
+ phy_start(ndev->phydev);
}
return 0;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 0c68c7f8056d..71fbb358bb7d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -215,10 +215,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(pd->pdev),
sizeof(drvinfo->bus_info));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
@@ -1585,7 +1585,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->netdev_ops = &smsc9420_netdev_ops;
dev->ethtool_ops = &smsc9420_ethtool_ops;
- netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pd->napi, smsc9420_rx_poll);
result = register_netdev(dev);
if (result) {
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index b0c5a44785fa..2240f6d0b89b 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -526,8 +526,8 @@ static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
static void netsec_et_get_drvinfo(struct net_device *net_device,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "netsec", sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+ strscpy(info->driver, "netsec", sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(net_device->dev.parent),
sizeof(info->bus_info));
}
@@ -2093,7 +2093,7 @@ static int netsec_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision %d.%d\n",
hw_ver >> 16, hw_ver & 0xffff);
- netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, netsec_napi_poll);
ndev->netdev_ops = &netsec_netdev_ops;
ndev->ethtool_ops = &netsec_ethtool_ops;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f0c8de2c6075..1fa09b49ba7f 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -395,8 +395,8 @@ static void ave_ethtool_get_drvinfo(struct net_device *ndev,
{
struct device *dev = ndev->dev.parent;
- strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+ strscpy(info->driver, dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
}
@@ -1687,8 +1687,7 @@ static int ave_probe(struct platform_device *pdev)
pdev->name, pdev->id);
/* Register as a NAPI supported driver */
- netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 358fc26f8d1f..80efdeeb0b59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -445,9 +445,7 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to probe subdriver: %d\n",
- ret);
+ dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
goto remove_config;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 4f2b82a884b9..0a2afc1a3124 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -610,7 +610,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
plat->ext_snapshot_num = AUX_SNAPSHOT0;
- plat->has_crossts = true;
plat->crosststamp = intel_crosststamp;
plat->int_snapshot_en = 0;
@@ -1136,8 +1135,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
- pcim_iounmap_regions(pdev, BIT(0));
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index c469abc91fa1..f7269d79a385 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -32,6 +32,8 @@ struct rk_gmac_ops {
void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
+ bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
bool regs_valid;
u32 regs[];
@@ -66,6 +68,7 @@ struct rk_priv_data {
int rx_delay;
struct regmap *grf;
+ struct regmap *php_grf;
};
#define HIWORD_UPDATE(val, mask, shift) \
@@ -1101,6 +1104,147 @@ static const struct rk_gmac_ops rk3568_ops = {
},
};
+/* sys_grf */
+#define RK3588_GRF_GMAC_CON7 0X031c
+#define RK3588_GRF_GMAC_CON8 0X0320
+#define RK3588_GRF_GMAC_CON9 0X0324
+
+#define RK3588_GMAC_RXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 3)
+#define RK3588_GMAC_RXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 3)
+#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
+#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+/* php_grf */
+#define RK3588_GRF_GMAC_CON0 0X0008
+#define RK3588_GRF_CLK_CON1 0X0070
+
+#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
+ (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
+ (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+
+#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
+#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+
+#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+
+#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
+#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
+ (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+
+#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
+#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+
+static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 offset_con, id = bsp_priv->id;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
+ RK3588_GRF_GMAC_CON8;
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RGMII_MODE(id));
+
+ regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
+ RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
+ RK3588_GMAC_TXCLK_DLY_ENABLE(id));
+
+ regmap_write(bsp_priv->grf, offset_con,
+ RK3588_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
+}
+
+static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, id = bsp_priv->id;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV20(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV50(id);
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV2(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV5(id);
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMAC_CLK_RGMII_DIV1(id);
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+
+ val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
+ RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+}
+
+static const struct rk_gmac_ops rk3588_ops = {
+ .set_to_rgmii = rk3588_set_to_rgmii,
+ .set_to_rmii = rk3588_set_to_rmii,
+ .set_rgmii_speed = rk3588_set_gmac_speed,
+ .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_clock_selection = rk3588_set_clock_selection,
+};
+
#define RV1108_GRF_GMAC_CON0 0X0900
/* RV1108_GRF_GMAC_CON0 */
@@ -1153,6 +1297,130 @@ static const struct rk_gmac_ops rv1108_ops = {
.set_rmii_speed = rv1108_set_rmii_speed,
};
+#define RV1126_GRF_GMAC_CON0 0X0070
+#define RV1126_GRF_GMAC_CON1 0X0074
+#define RV1126_GRF_GMAC_CON2 0X0078
+
+/* RV1126_GRF_GMAC_CON0 */
+#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
+#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
+#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RV1126_GMAC_M0_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RV1126_GMAC_M0_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RV1126_GMAC_M0_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RV1126_GMAC_M1_RXCLK_DLY_ENABLE GRF_BIT(3)
+#define RV1126_GMAC_M1_RXCLK_DLY_DISABLE GRF_CLR_BIT(3)
+#define RV1126_GMAC_M1_TXCLK_DLY_ENABLE GRF_BIT(2)
+#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
+
+/* RV1126_GRF_GMAC_CON1 */
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+/* RV1126_GRF_GMAC_CON2 */
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON1,
+ RV1126_GMAC_M0_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M0_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON2,
+ RV1126_GMAC_M1_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rv1126_ops = {
+ .set_to_rgmii = rv1126_set_to_rgmii,
+ .set_to_rmii = rv1126_set_to_rmii,
+ .set_rgmii_speed = rv1126_set_rgmii_speed,
+ .set_rmii_speed = rv1126_set_rmii_speed,
+};
+
#define RK_GRF_MACPHY_CON0 0xb00
#define RK_GRF_MACPHY_CON1 0xb04
#define RK_GRF_MACPHY_CON2 0xb08
@@ -1304,6 +1572,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
if (!IS_ERR(bsp_priv->clk_mac_speed))
clk_prepare_enable(bsp_priv->clk_mac_speed);
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, true);
+
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_prepare_enable(bsp_priv->clk_mac);
@@ -1330,6 +1602,10 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
clk_disable_unprepare(bsp_priv->mac_clk_tx);
clk_disable_unprepare(bsp_priv->clk_mac_speed);
+
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
/**
* if (!IS_ERR(bsp_priv->clk_mac))
* clk_disable_unprepare(bsp_priv->clk_mac);
@@ -1444,6 +1720,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
+ bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1680,7 +1958,9 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
{ .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
+ { .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
{ }
};
MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 35ab8d0bdce7..7ab791c8d355 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -56,7 +56,7 @@
#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+#define MAC_CORE_INIT (MAC_CONTROL_HBD)
/* MAC FLOW CTRL defines */
#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 3c73453725f9..4296ddda8aaa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -126,7 +126,7 @@ enum inter_frame_gap {
#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
-#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
/* GMAC Frame Filter defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 76edb9b72675..0e00dd83d027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,7 +15,6 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
@@ -24,7 +23,6 @@
static void dwmac1000_core_init(struct mac_device_info *hw,
struct net_device *dev)
{
- struct stmmac_priv *priv = netdev_priv(dev);
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
int mtu = dev->mtu;
@@ -32,13 +30,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
/* Configure GMAC core */
value |= GMAC_CORE_INIT;
- /* Clear ACS bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
- value &= ~GMAC_CONTROL_ACS;
-
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 75071a7d551a..a6e8d7bd9588 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,6 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <net/dsa.h>
#include <asm/io.h>
#include "stmmac.h"
#include "dwmac100.h"
@@ -28,13 +27,6 @@ static void dwmac100_core_init(struct mac_device_info *hw,
value |= MAC_CORE_INIT;
- /* Clear ASTP bit because Ethernet switch tagging formats such as
- * Broadcom tags can look like invalid LLC/SNAP packets and cause the
- * hardware to truncate packets on reception.
- */
- if (netdev_uses_dsa(dev))
- value &= ~MAC_CONTROL_ASTP;
-
writel(value, ioaddr + MAC_CONTROL);
#ifdef STMMAC_VLAN_TAG_USED
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index d8f1fbc25bdd..c25bfecb4a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
-#include <net/dsa.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac4.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d6a44d53fe08..f453b0d09366 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -287,15 +287,15 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac || priv->plat->has_gmac4)
- strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else if (priv->plat->has_xgmac)
- strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
+ strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ strscpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
if (priv->plat->pdev) {
- strlcpy(info->bus_info, pci_name(priv->plat->pdev),
+ strscpy(info->bus_info, pci_name(priv->plat->pdev),
sizeof(info->bus_info));
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 592d29abcb1c..65c96773c6d2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3801,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev,
stmmac_reset_queues_param(priv);
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
@@ -3904,6 +3913,10 @@ static int stmmac_release(struct net_device *dev)
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -5076,16 +5089,8 @@ read_again:
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
buf1_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
}
@@ -5262,16 +5267,8 @@ read_again:
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
len += buf2_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
@@ -6890,8 +6887,7 @@ static void stmmac_napi_add(struct net_device *dev)
spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
}
if (queue < priv->plat->tx_queues_to_use) {
netif_napi_add_tx(dev, &ch->tx_napi,
@@ -6900,8 +6896,7 @@ static void stmmac_napi_add(struct net_device *dev)
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_add(dev, &ch->rxtx_napi,
- stmmac_napi_poll_rxtx,
- NAPI_POLL_WEIGHT);
+ stmmac_napi_poll_rxtx);
}
}
}
@@ -7293,14 +7288,6 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
@@ -7315,8 +7302,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 9f5cac4000da..50f6b4a14be4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -440,11 +440,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
- /* Default to get clk_csr from stmmac_clk_crs_set(),
+ /* Default to get clk_csr from stmmac_clk_csr_set(),
* or get clk_csr from device tree.
*/
plat->clk_csr = -1;
- of_property_read_u32(np, "clk_csr", &plat->clk_csr);
+ if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
+ of_property_read_u32(np, "clk_csr", &plat->clk_csr);
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 0b08b0e085e8..0aca193d9550 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4484,9 +4484,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static int cas_get_link_ksettings(struct net_device *dev,
@@ -5050,7 +5050,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->watchdog_timeo = CAS_TX_TIMEOUT;
#ifdef USE_NAPI
- netif_napi_add(dev, &cp->napi, cas_poll, 64);
+ netif_napi_add(dev, &cp->napi, cas_poll);
#endif
dev->irq = pdev->irq;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 0cd8493b810f..8addee6d04bd 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -63,8 +63,8 @@ static struct vio_version vsw_versions[] = {
static void vsw_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vsw_get_msglevel(struct net_device *dev)
@@ -354,8 +354,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
dev_set_drvdata(&vdev->dev, port);
- netif_napi_add(dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, sunvnet_poll_common);
spin_lock_irqsave(&vp->lock, flags);
list_add_rcu(&port->list, &vp->port_list);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index df70df29deea..e6144d963eaa 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6798,12 +6798,12 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strlcpy(info->bus_info, pci_name(np->pdev),
+ strscpy(info->bus_info, pci_name(np->pdev),
sizeof(info->bus_info));
}
@@ -9115,7 +9115,7 @@ static int niu_ldg_init(struct niu *np)
for (i = 0; i < np->num_ldg; i++) {
struct niu_ldg *lp = &np->ldg[i];
- netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
+ netif_napi_add(np->dev, &lp->napi, niu_poll);
lp->np = np;
lp->ldg_num = ldg_num_map[i];
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 531a6f449afa..34b94153bf0c 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1038,8 +1038,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunbmac", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->driver, "sunbmac", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index a14591b41acb..4154e68639ac 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2521,9 +2521,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_link_ksettings(struct net_device *dev,
@@ -2980,7 +2980,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_consistent;
dev->netdev_ops = &gem_netdev_ops;
- netif_napi_add(dev, &gp->napi, gem_poll, 64);
+ netif_napi_add(dev, &gp->napi, gem_poll);
dev->ethtool_ops = &gem_ethtool_ops;
dev->watchdog_timeo = 5 * HZ;
dev->dma = 0;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 8594ee839628..62deed210a95 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -61,15 +61,8 @@
#include "sunhme.h"
#define DRV_NAME "sunhme"
-#define DRV_VERSION "3.10"
-#define DRV_RELDATE "August 26, 2008"
-#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
-static char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
MODULE_LICENSE("GPL");
@@ -87,13 +80,17 @@ static struct quattro *qfe_sbus_list;
static struct quattro *qfe_pci_list;
#endif
-#undef HMEDEBUG
-#undef SXDEBUG
-#undef RXDEBUG
-#undef TXDEBUG
-#undef TXLOGGING
+#define hme_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
+#define HMD hme_debug
+
+/* "Auto Switch Debug" aka phy debug */
+#if 1
+#define ASD hme_debug
+#else
+#define ASD(...)
+#endif
-#ifdef TXLOGGING
+#if 0
struct hme_tx_logent {
unsigned int tstamp;
int tx_new, tx_old;
@@ -128,46 +125,16 @@ static __inline__ void tx_dump_log(void)
this = txlog_cur_entry;
for (i = 0; i < TX_LOG_LEN; i++) {
- printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+ pr_err("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
tx_log[this].tstamp,
tx_log[this].tx_new, tx_log[this].tx_old,
tx_log[this].action, tx_log[this].status);
this = (this + 1) & (TX_LOG_LEN - 1);
}
}
-static __inline__ void tx_dump_ring(struct happy_meal *hp)
-{
- struct hmeal_init_block *hb = hp->happy_block;
- struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
- int i;
-
- for (i = 0; i < TX_RING_SIZE; i+=4) {
- printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
- i, i + 4,
- le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
- le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
- le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
- le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
- }
-}
-#else
-#define tx_add_log(hp, a, s) do { } while(0)
-#define tx_dump_log() do { } while(0)
-#define tx_dump_ring(hp) do { } while(0)
-#endif
-
-#ifdef HMEDEBUG
-#define HMD(x) printk x
-#else
-#define HMD(x)
-#endif
-
-/* #define AUTO_SWITCH_DEBUG */
-
-#ifdef AUTO_SWITCH_DEBUG
-#define ASD(x) printk x
#else
-#define ASD(x)
+#define tx_add_log(hp, a, s)
+#define tx_dump_log()
#endif
#define DEFAULT_IPG0 16 /* For lance-mode only */
@@ -343,8 +310,6 @@ static int happy_meal_bb_read(struct happy_meal *hp,
int retval = 0;
int i;
- ASD(("happy_meal_bb_read: reg=%d ", reg));
-
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -378,7 +343,7 @@ static int happy_meal_bb_read(struct happy_meal *hp,
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
- ASD(("value=%x\n", retval));
+ ASD("reg=%d value=%x\n", reg, retval);
return retval;
}
@@ -389,7 +354,7 @@ static void happy_meal_bb_write(struct happy_meal *hp,
u32 tmp;
int i;
- ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+ ASD("reg=%d value=%x\n", reg, value);
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
@@ -433,14 +398,13 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
int tries = TCVR_READ_TRIES;
int retval;
- ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
if (hp->tcvr_type == none) {
- ASD(("no transceiver, value=TCVR_FAILURE\n"));
+ ASD("no transceiver, value=TCVR_FAILURE\n");
return TCVR_FAILURE;
}
if (!(hp->happy_flags & HFLAG_FENABLE)) {
- ASD(("doing bit bang\n"));
+ ASD("doing bit bang\n");
return happy_meal_bb_read(hp, tregs, reg);
}
@@ -449,11 +413,11 @@ static int happy_meal_tcvr_read(struct happy_meal *hp,
while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
udelay(20);
if (!tries) {
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF read bolixed\n");
return TCVR_FAILURE;
}
retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
- ASD(("value=%04x\n", retval));
+ ASD("reg=0x%02x value=%04x\n", reg, retval);
return retval;
}
@@ -465,7 +429,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
{
int tries = TCVR_WRITE_TRIES;
- ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+ ASD("reg=0x%02x value=%04x\n", reg, value);
/* Welcome to Sun Microsystems, can I take your order please? */
if (!(hp->happy_flags & HFLAG_FENABLE)) {
@@ -482,7 +446,7 @@ static void happy_meal_tcvr_write(struct happy_meal *hp,
/* Anything else? */
if (!tries)
- printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+ netdev_err(hp->dev, "Aieee, transceiver MIF write bolixed\n");
/* Fifty-two cents is your change, have a nice day. */
}
@@ -660,8 +624,8 @@ static void happy_meal_timer(struct timer_list *t)
/* Enter force mode. */
do_force_mode:
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
- printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto-Negotiation unsuccessful, trying force link mode\n");
hp->sw_bmcr = BMCR_SPEED100;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -720,8 +684,8 @@ static void happy_meal_timer(struct timer_list *t)
restart_timer = 0;
} else {
if (hp->timer_ticks >= 10) {
- printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
- "not completely up.\n", hp->dev->name);
+ netdev_notice(hp->dev,
+ "Auto negotiation successful, link still not completely up.\n");
hp->timer_ticks = 0;
restart_timer = 1;
} else {
@@ -776,14 +740,14 @@ static void happy_meal_timer(struct timer_list *t)
*/
/* Let the user know... */
- printk(KERN_NOTICE "%s: Link down, cable problem?\n",
- hp->dev->name);
+ netdev_notice(hp->dev,
+ "Link down, cable problem?\n");
ret = happy_meal_init(hp);
if (ret) {
/* ho hum... */
- printk(KERN_ERR "%s: Error, cannot re-init the "
- "Happy Meal.\n", hp->dev->name);
+ netdev_err(hp->dev,
+ "Error, cannot re-init the Happy Meal.\n");
}
goto out;
}
@@ -805,8 +769,8 @@ static void happy_meal_timer(struct timer_list *t)
case asleep:
default:
/* Can't happens.... */
- printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Aieee, link timer is asleep but we got one anyways!\n");
restart_timer = 0;
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
@@ -830,7 +794,7 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = TX_RESET_TRIES;
- HMD(("happy_meal_tx_reset: reset, "));
+ HMD("reset...\n");
/* Would you like to try our SMCC Delux? */
hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
@@ -839,10 +803,10 @@ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Lettuce, tomato, buggy hardware (no extra charge)? */
if (!tries)
- printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Transceiver BigMac ATTACK!");
/* Take care. */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -850,7 +814,7 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = RX_RESET_TRIES;
- HMD(("happy_meal_rx_reset: reset, "));
+ HMD("reset...\n");
/* We have a special on GNU/Viking hardware bugs today. */
hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
@@ -859,10 +823,10 @@ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
/* Will that be all? */
if (!tries)
- printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+ netdev_err(hp->dev, "Receiver BigMac ATTACK!\n");
/* Don't forget your vik_1137125_wa. Have a nice day. */
- HMD(("done\n"));
+ HMD("done\n");
}
#define STOP_TRIES 16
@@ -872,7 +836,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
{
int tries = STOP_TRIES;
- HMD(("happy_meal_stop: reset, "));
+ HMD("reset...\n");
/* We're consolidating our STB products, it's your lucky day. */
hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
@@ -881,10 +845,10 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* Come back next week when we are "Sun Microelectronics". */
if (!tries)
- printk(KERN_ERR "happy meal: Fry guys.");
+ netdev_err(hp->dev, "Fry guys.\n");
/* Remember: "Different name, same old buggy as shit hardware." */
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -913,21 +877,18 @@ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
/* hp->happy_lock must be held */
static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
{
- ASD(("happy_meal_poll_stop: "));
-
/* If polling disabled or not polling already, nothing to do. */
if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
(HFLAG_POLLENABLE | HFLAG_POLL)) {
- HMD(("not polling, return\n"));
+ ASD("not polling, return\n");
return;
}
/* Shut up the MIF. */
- ASD(("were polling, mif ints off, "));
+ ASD("were polling, mif ints off, polling off\n");
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* Turn off polling. */
- ASD(("polling off, "));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
@@ -936,7 +897,7 @@ static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
/* Let the bits set. */
udelay(200);
- ASD(("done\n"));
+ ASD("done\n");
}
/* Only Sun can take such nice parts and fuck up the programming interface
@@ -952,44 +913,40 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
int result, tries = TCVR_RESET_TRIES;
tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+ ASD("tcfg=%08x\n", tconfig);
if (hp->tcvr_type == external) {
- ASD(("external<"));
hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail\n");
return -1;
}
- ASD(("phyread_ok,PSELECT>"));
+ ASD("external: ISOLATE, phyread_ok, PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->tcvr_type = external;
hp->paddr = TCV_PADDR_ETX;
} else {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("internal<PSELECT,"));
hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
- ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
- ASD(("phyread_fail>\n"));
+ ASD("phyread_fail>\n");
return -1;
}
- ASD(("phyread_ok,~PSELECT>"));
+ ASD("internal: PSELECT, ISOLATE, phyread_ok, ~PSELECT\n");
hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
}
}
- ASD(("BMCR_RESET "));
+ ASD("BMCR_RESET...\n");
happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
while (--tries) {
@@ -1002,10 +959,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD(("BMCR RESET FAILED!\n"));
+ ASD("BMCR RESET FAILED!\n");
return -1;
}
- ASD(("RESET_OK\n"));
+ ASD("RESET_OK\n");
/* Get fresh copies of the PHY registers. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
@@ -1013,7 +970,7 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
- ASD(("UNISOLATE"));
+ ASD("UNISOLATE...\n");
hp->sw_bmcr &= ~(BMCR_ISOLATE);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1027,10 +984,10 @@ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
udelay(20);
}
if (!tries) {
- ASD((" FAILED!\n"));
+ ASD("UNISOLATE FAILED!\n");
return -1;
}
- ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+ ASD("SUCCESS and CSCONFIG_DFBYPASS\n");
if (!is_lucent_phy(hp)) {
result = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
@@ -1048,60 +1005,55 @@ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tr
{
unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
- ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+ ASD("tcfg=%08lx\n", tconfig);
if (hp->happy_flags & HFLAG_POLL) {
/* If we are polling, we must stop to get the transceiver type. */
- ASD(("<polling> "));
if (hp->tcvr_type == internal) {
if (tconfig & TCV_CFG_MDIO1) {
- ASD(("<internal> <poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
tconfig &= ~(TCV_CFG_PENABLE);
tconfig |= TCV_CFG_PSELECT;
hme_write32(hp, tregs + TCVR_CFG, tconfig);
+ ASD("poll stop, internal->external\n");
}
} else {
if (hp->tcvr_type == external) {
- ASD(("<external> "));
if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
- ASD(("<poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) &
~(TCV_CFG_PSELECT));
+ ASD("poll stop, external->internal\n");
}
- ASD(("\n"));
} else {
- ASD(("<none>\n"));
+ ASD("polling, none\n");
}
}
} else {
u32 reread = hme_read32(hp, tregs + TCVR_CFG);
/* Else we can just work off of the MDIO bits. */
- ASD(("<not polling> "));
if (reread & TCV_CFG_MDIO1) {
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
- ASD(("<external>\n"));
+ ASD("not polling, external\n");
} else {
if (reread & TCV_CFG_MDIO0) {
hme_write32(hp, tregs + TCVR_CFG,
tconfig & ~(TCV_CFG_PSELECT));
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
- ASD(("<internal>\n"));
+ ASD("not polling, internal\n");
} else {
- printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+ netdev_err(hp->dev,
+ "Transceiver and a coke please.");
hp->tcvr_type = none; /* Grrr... */
- ASD(("<none>\n"));
+ ASD("not polling, none\n");
}
}
}
@@ -1208,15 +1160,14 @@ static void happy_meal_init_rings(struct happy_meal *hp)
struct hmeal_init_block *hb = hp->happy_block;
int i;
- HMD(("happy_meal_init_rings: counters to zero, "));
+ HMD("counters to zero\n");
hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
/* Free any skippy bufs left around in the rings. */
- HMD(("clean, "));
happy_meal_clean_rings(hp);
/* Now get new skippy bufs for the receive ring. */
- HMD(("init rxring, "));
+ HMD("init rxring\n");
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
u32 mapping;
@@ -1243,11 +1194,11 @@ static void happy_meal_init_rings(struct happy_meal *hp)
skb_reserve(skb, RX_OFFSET);
}
- HMD(("init txring, "));
+ HMD("init txring\n");
for (i = 0; i < TX_RING_SIZE; i++)
hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
- HMD(("done\n"));
+ HMD("done\n");
}
/* hp->happy_lock must be held */
@@ -1294,17 +1245,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
* XXX so I completely skip checking for it in the BMSR for now.
*/
-#ifdef AUTO_SWITCH_DEBUG
- ASD(("%s: Advertising [ ", hp->dev->name));
- if (hp->sw_advertise & ADVERTISE_10HALF)
- ASD(("10H "));
- if (hp->sw_advertise & ADVERTISE_10FULL)
- ASD(("10F "));
- if (hp->sw_advertise & ADVERTISE_100HALF)
- ASD(("100H "));
- if (hp->sw_advertise & ADVERTISE_100FULL)
- ASD(("100F "));
-#endif
+ ASD("Advertising [ %s%s%s%s]\n",
+ hp->sw_advertise & ADVERTISE_10HALF ? "10H " : "",
+ hp->sw_advertise & ADVERTISE_10FULL ? "10F " : "",
+ hp->sw_advertise & ADVERTISE_100HALF ? "100H " : "",
+ hp->sw_advertise & ADVERTISE_100FULL ? "100F " : "");
/* Enable Auto-Negotiation, this is usually on already... */
hp->sw_bmcr |= BMCR_ANENABLE;
@@ -1324,10 +1269,11 @@ happy_meal_begin_auto_negotiation(struct happy_meal *hp,
udelay(10);
}
if (!timeout) {
- printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
- "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
- printk(KERN_NOTICE "%s: Performing force link detection.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal would not start auto negotiation BMCR=0x%04x\n",
+ hp->sw_bmcr);
+ netdev_notice(hp->dev,
+ "Performing force link detection.\n");
goto force_link;
} else {
hp->timer_state = arbwait;
@@ -1382,70 +1328,69 @@ static int happy_meal_init(struct happy_meal *hp)
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
+ const char *bursts;
u32 regtmp, rxcfg;
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
- HMD(("happy_meal_init: happy_flags[%08x] ",
- hp->happy_flags));
+ HMD("happy_flags[%08x]\n", hp->happy_flags);
if (!(hp->happy_flags & HFLAG_INIT)) {
- HMD(("set HFLAG_INIT, "));
+ HMD("set HFLAG_INIT\n");
hp->happy_flags |= HFLAG_INIT;
happy_meal_get_counters(hp, bregs);
}
/* Stop polling. */
- HMD(("to happy_meal_poll_stop\n"));
+ HMD("to happy_meal_poll_stop\n");
happy_meal_poll_stop(hp, tregs);
/* Stop transmitter and receiver. */
- HMD(("happy_meal_init: to happy_meal_stop\n"));
+ HMD("to happy_meal_stop\n");
happy_meal_stop(hp, gregs);
/* Alloc and reset the tx/rx descriptor chains. */
- HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+ HMD("to happy_meal_init_rings\n");
happy_meal_init_rings(hp);
/* Shut up the MIF. */
- HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
- hme_read32(hp, tregs + TCVR_IMASK)));
+ HMD("Disable all MIF irqs (old[%08x])\n",
+ hme_read32(hp, tregs + TCVR_IMASK));
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* See if we can enable the MIF frame on this card to speak to the DP83840. */
if (hp->happy_flags & HFLAG_FENABLE) {
- HMD(("use frame old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use frame old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
} else {
- HMD(("use bitbang old[%08x], ",
- hme_read32(hp, tregs + TCVR_CFG)));
+ HMD("use bitbang old[%08x]\n",
+ hme_read32(hp, tregs + TCVR_CFG));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
}
/* Check the state of the transceiver. */
- HMD(("to happy_meal_transceiver_check\n"));
+ HMD("to happy_meal_transceiver_check\n");
happy_meal_transceiver_check(hp, tregs);
/* Put the Big Mac into a sane state. */
- HMD(("happy_meal_init: "));
switch(hp->tcvr_type) {
case none:
/* Cannot operate if we don't know the transceiver type! */
- HMD(("AAIEEE no transceiver type, EAGAIN"));
+ HMD("AAIEEE no transceiver type, EAGAIN\n");
return -EAGAIN;
case internal:
/* Using the MII buffers. */
- HMD(("internal, using MII, "));
+ HMD("internal, using MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, 0);
break;
case external:
/* Not using the MII, disable it. */
- HMD(("external, disable MII, "));
+ HMD("external, disable MII\n");
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
}
@@ -1454,18 +1399,16 @@ static int happy_meal_init(struct happy_meal *hp)
return -EAGAIN;
/* Reset the Happy Meal Big Mac transceiver and the receiver. */
- HMD(("tx/rx reset, "));
+ HMD("tx/rx reset\n");
happy_meal_tx_reset(hp, bregs);
happy_meal_rx_reset(hp, bregs);
/* Set jam size and inter-packet gaps to reasonable defaults. */
- HMD(("jsize/ipg1/ipg2, "));
hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
/* Load up the MAC address and random seed. */
- HMD(("rseed/macaddr, "));
/* The docs recommend to use the 10LSB of our MAC here. */
hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
@@ -1474,7 +1417,6 @@ static int happy_meal_init(struct happy_meal *hp)
hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
- HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
@@ -1504,9 +1446,9 @@ static int happy_meal_init(struct happy_meal *hp)
}
/* Set the RX and TX ring ptrs. */
- HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
- ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+ HMD("ring ptrs rxr[%08x] txr[%08x]\n",
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+ ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
hme_write32(hp, erxregs + ERX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
hme_write32(hp, etxregs + ETX_RING,
@@ -1524,9 +1466,6 @@ static int happy_meal_init(struct happy_meal *hp)
| 0x4);
/* Set the supported burst sizes. */
- HMD(("happy_meal_init: old[%08x] bursts<",
- hme_read32(hp, gregs + GREG_CFG)));
-
#ifndef CONFIG_SPARC
/* It is always PCI and can handle 64byte bursts. */
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
@@ -1554,34 +1493,35 @@ static int happy_meal_init(struct happy_meal *hp)
}
#endif
- HMD(("64>"));
+ bursts = "64";
hme_write32(hp, gregs + GREG_CFG, gcfg);
} else if (hp->happy_bursts & DMA_BURST32) {
- HMD(("32>"));
+ bursts = "32";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
} else if (hp->happy_bursts & DMA_BURST16) {
- HMD(("16>"));
+ bursts = "16";
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
} else {
- HMD(("XXX>"));
+ bursts = "XXX";
hme_write32(hp, gregs + GREG_CFG, 0);
}
#endif /* CONFIG_SPARC */
+ HMD("old[%08x] bursts<%s>\n",
+ hme_read32(hp, gregs + GREG_CFG), bursts);
+
/* Turn off interrupts we do not want to hear. */
- HMD((", enable global interrupts, "));
hme_write32(hp, gregs + GREG_IMASK,
(GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
/* Set the transmit ring buffer size. */
- HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
- hme_read32(hp, etxregs + ETX_RSIZE)));
+ HMD("tx rsize=%d oreg[%08x]\n", (int)TX_RING_SIZE,
+ hme_read32(hp, etxregs + ETX_RSIZE));
hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
/* Enable transmitter DVMA. */
- HMD(("tx dma enable old[%08x], ",
- hme_read32(hp, etxregs + ETX_CFG)));
+ HMD("tx dma enable old[%08x]\n", hme_read32(hp, etxregs + ETX_CFG));
hme_write32(hp, etxregs + ETX_CFG,
hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
@@ -1590,21 +1530,23 @@ static int happy_meal_init(struct happy_meal *hp)
* properly. I cannot think of a sane way to provide complete
* coverage for this hardware bug yet.
*/
- HMD(("erx regs bug old[%08x]\n",
- hme_read32(hp, erxregs + ERX_CFG)));
+ HMD("erx regs bug old[%08x]\n",
+ hme_read32(hp, erxregs + ERX_CFG));
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
regtmp = hme_read32(hp, erxregs + ERX_CFG);
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
- printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
- printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
- ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+ netdev_err(hp->dev,
+ "Eieee, rx config register gets greasy fries.\n");
+ netdev_err(hp->dev,
+ "Trying to set %08x, reread gives %08x\n",
+ ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
/* XXX Should return failure here... */
}
/* Enable Big Mac hash table filter. */
- HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("enable hash rx_cfg_old[%08x]\n",
+ hme_read32(hp, bregs + BMAC_RXCFG));
rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
if (hp->dev->flags & IFF_PROMISC)
rxcfg |= BIGMAC_RXCFG_PMISC;
@@ -1614,7 +1556,7 @@ static int happy_meal_init(struct happy_meal *hp)
udelay(10);
/* Ok, configure the Big Mac transmitter. */
- HMD(("BIGMAC init, "));
+ HMD("BIGMAC init\n");
regtmp = 0;
if (hp->happy_flags & HFLAG_FULL)
regtmp |= BIGMAC_TXCFG_FULLDPLX;
@@ -1638,14 +1580,13 @@ static int happy_meal_init(struct happy_meal *hp)
if (hp->tcvr_type == external)
regtmp |= BIGMAC_XCFG_MIIDISAB;
- HMD(("XIF config old[%08x], ",
- hme_read32(hp, bregs + BMAC_XIFCFG)));
+ HMD("XIF config old[%08x]\n", hme_read32(hp, bregs + BMAC_XIFCFG));
hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
/* Start things up. */
- HMD(("tx old[%08x] and rx [%08x] ON!\n",
- hme_read32(hp, bregs + BMAC_TXCFG),
- hme_read32(hp, bregs + BMAC_RXCFG)));
+ HMD("tx old[%08x] and rx [%08x] ON!\n",
+ hme_read32(hp, bregs + BMAC_TXCFG),
+ hme_read32(hp, bregs + BMAC_RXCFG));
/* Set larger TX/RX size to allow for 802.1q */
hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
@@ -1735,25 +1676,26 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
GREG_STAT_SLVPERR))
- printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
- hp->dev->name, status);
+ netdev_err(hp->dev,
+ "Error interrupt for happy meal, status = %08x\n",
+ status);
if (status & GREG_STAT_RFIFOVF) {
/* Receive FIFO overflow is harmless and the hardware will take
care of it, just some packets are lost. Who cares. */
- printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+ netdev_dbg(hp->dev, "Happy Meal receive FIFO overflow.\n");
}
if (status & GREG_STAT_STSTERR) {
/* BigMAC SQE link test failed. */
- printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal BigMAC SQE test failed.\n");
reset = 1;
}
if (status & GREG_STAT_TFIFO_UND) {
/* Transmit FIFO underrun, again DMA error likely. */
- printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "Happy Meal transmitter FIFO underrun, DMA error.\n");
reset = 1;
}
@@ -1761,7 +1703,7 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver error, tried to transmit something larger
* than ethernet max mtu.
*/
- printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MAX Packet size error.\n");
reset = 1;
}
@@ -1771,21 +1713,16 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
* faster than the interrupt handler could keep up
* with.
*/
- printk(KERN_INFO "%s: Happy Meal out of receive "
- "descriptors, packet dropped.\n",
- hp->dev->name);
+ netdev_info(hp->dev,
+ "Happy Meal out of receive descriptors, packet dropped.\n");
}
if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
/* All sorts of DMA receive errors. */
- printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_RXERR)
- printk("GenericError ");
- if (status & GREG_STAT_RXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_RXTERR)
- printk("RxTagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal rx DMA errors [ %s%s%s]\n",
+ status & GREG_STAT_RXERR ? "GenericError " : "",
+ status & GREG_STAT_RXPERR ? "ParityError " : "",
+ status & GREG_STAT_RXTERR ? "RxTagBotch " : "");
reset = 1;
}
@@ -1793,29 +1730,24 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Driver bug, didn't set EOP bit in tx descriptor given
* to the happy meal.
*/
- printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
- hp->dev->name);
+ netdev_err(hp->dev,
+ "EOP not set in happy meal transmit descriptor!\n");
reset = 1;
}
if (status & GREG_STAT_MIFIRQ) {
/* MIF signalled an interrupt, were we polling it? */
- printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+ netdev_err(hp->dev, "Happy Meal MIF interrupt.\n");
}
if (status &
(GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
/* All sorts of transmit DMA errors. */
- printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
- if (status & GREG_STAT_TXEACK)
- printk("GenericError ");
- if (status & GREG_STAT_TXLERR)
- printk("LateError ");
- if (status & GREG_STAT_TXPERR)
- printk("ParityError ");
- if (status & GREG_STAT_TXTERR)
- printk("TagBotch ");
- printk("]\n");
+ netdev_err(hp->dev, "Happy Meal tx DMA errors [ %s%s%s%s]\n",
+ status & GREG_STAT_TXEACK ? "GenericError " : "",
+ status & GREG_STAT_TXLERR ? "LateError " : "",
+ status & GREG_STAT_TXPERR ? "ParityError " : "",
+ status & GREG_STAT_TXTERR ? "TagBotch " : "");
reset = 1;
}
@@ -1823,14 +1755,14 @@ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
/* Bus or parity error when cpu accessed happy meal registers
* or it's internal FIFO's. Should never see this.
*/
- printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
- hp->dev->name,
- (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+ netdev_err(hp->dev,
+ "Happy Meal register access SBUS slave (%s) error.\n",
+ (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
reset = 1;
}
if (reset) {
- printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+ netdev_notice(hp->dev, "Resetting...\n");
happy_meal_init(hp);
return 1;
}
@@ -1842,22 +1774,22 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
- printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+ netdev_info(hp->dev, "Link status change.\n");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
/* Use the fastest transmission protocol possible. */
if (hp->sw_lpa & LPA_100FULL) {
- printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100Mbps at full duplex.\n");
hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
} else if (hp->sw_lpa & LPA_100HALF) {
- printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 100MBps at half duplex.\n");
hp->sw_bmcr |= BMCR_SPEED100;
} else if (hp->sw_lpa & LPA_10FULL) {
- printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Switching to 10MBps at full duplex.\n");
hp->sw_bmcr |= BMCR_FULLDPLX;
} else {
- printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+ netdev_info(hp->dev, "Using 10Mbps at half duplex.\n");
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
@@ -1865,12 +1797,6 @@ static void happy_meal_mif_interrupt(struct happy_meal *hp)
happy_meal_poll_stop(hp, tregs);
}
-#ifdef TXDEBUG
-#define TXD(x) printk x
-#else
-#define TXD(x)
-#endif
-
/* hp->happy_lock must be held */
static void happy_meal_tx(struct happy_meal *hp)
{
@@ -1880,13 +1806,12 @@ static void happy_meal_tx(struct happy_meal *hp)
int elem;
elem = hp->tx_old;
- TXD(("TX<"));
while (elem != hp->tx_new) {
struct sk_buff *skb;
u32 flags, dma_addr, dma_len;
int frag;
- TXD(("[%d]", elem));
+ netdev_vdbg(hp->dev, "TX[%d]\n", elem);
this = &txbase[elem];
flags = hme_read_desc32(hp, &this->tx_flags);
if (flags & TXFLAG_OWN)
@@ -1922,19 +1847,12 @@ static void happy_meal_tx(struct happy_meal *hp)
dev->stats.tx_packets++;
}
hp->tx_old = elem;
- TXD((">"));
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(dev);
}
-#ifdef RXDEBUG
-#define RXD(x) printk x
-#else
-#define RXD(x)
-#endif
-
/* Originally I used to handle the allocation failure by just giving back just
* that one ring buffer to the happy meal. Problem is that usually when that
* condition is triggered, the happy meal expects you to do something reasonable
@@ -1951,7 +1869,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
int elem = hp->rx_new, drops = 0;
u32 flags;
- RXD(("RX<"));
this = &rxbase[elem];
while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
struct sk_buff *skb;
@@ -1959,11 +1876,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
u16 csum = flags & RXFLAG_CSUM;
u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
- RXD(("[%d ", elem));
-
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
- RXD(("ERR(%08x)]", flags));
+ netdev_vdbg(dev, "RX[%d ERR(%08x)]", elem, flags);
dev->stats.rx_errors++;
if (len < ETH_ZLEN)
dev->stats.rx_length_errors++;
@@ -2020,9 +1935,9 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
- dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
- dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(hp->dma_dev, dma_addr, len + 2, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
@@ -2035,7 +1950,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->csum = csum_unfold(~(__force __sum16)htons(csum));
skb->ip_summed = CHECKSUM_COMPLETE;
- RXD(("len=%d csum=%4x]", len, csum));
+ netdev_vdbg(dev, "RX[%d len=%d csum=%4x]", elem, len, csum);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -2047,8 +1962,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
hp->rx_new = elem;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
- RXD((">"));
+ netdev_info(hp->dev, "Memory squeeze, deferring packet.\n");
}
static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
@@ -2057,32 +1971,25 @@ static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
spin_lock(&hp->happy_lock);
if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
goto out;
}
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
- HMD(("done\n"));
+ HMD("done\n");
out:
spin_unlock(&hp->happy_lock);
@@ -2100,7 +2007,7 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
- HMD(("quattro_interrupt: status=%08x ", happy_status));
+ HMD("status=%08x\n", happy_status);
if (!(happy_status & (GREG_STAT_ERRORS |
GREG_STAT_MIFIRQ |
@@ -2110,31 +2017,23 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
spin_lock(&hp->happy_lock);
- if (happy_status & GREG_STAT_ERRORS) {
- HMD(("ERRORS "));
+ if (happy_status & GREG_STAT_ERRORS)
if (happy_meal_is_not_so_happy(hp, happy_status))
goto next;
- }
- if (happy_status & GREG_STAT_MIFIRQ) {
- HMD(("MIFIRQ "));
+ if (happy_status & GREG_STAT_MIFIRQ)
happy_meal_mif_interrupt(hp);
- }
- if (happy_status & GREG_STAT_TXALL) {
- HMD(("TXALL "));
+ if (happy_status & GREG_STAT_TXALL)
happy_meal_tx(hp);
- }
- if (happy_status & GREG_STAT_RXTOHOST) {
- HMD(("RXTOHOST "));
+ if (happy_status & GREG_STAT_RXTOHOST)
happy_meal_rx(hp, dev);
- }
next:
spin_unlock(&hp->happy_lock);
}
- HMD(("done\n"));
+ HMD("done\n");
return IRQ_HANDLED;
}
@@ -2145,8 +2044,6 @@ static int happy_meal_open(struct net_device *dev)
struct happy_meal *hp = netdev_priv(dev);
int res;
- HMD(("happy_meal_open: "));
-
/* On SBUS Quattro QFE cards, all hme interrupts are concentrated
* into a single source which we register handling at probe time.
*/
@@ -2154,15 +2051,14 @@ static int happy_meal_open(struct net_device *dev)
res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
dev->name, dev);
if (res) {
- HMD(("EAGAIN\n"));
- printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
- hp->irq);
+ HMD("EAGAIN\n");
+ netdev_err(dev, "Can't order irq %d to go.\n", hp->irq);
return -EAGAIN;
}
}
- HMD(("to happy_meal_init\n"));
+ HMD("to happy_meal_init\n");
spin_lock_irq(&hp->happy_lock);
res = happy_meal_init(hp);
@@ -2196,22 +2092,16 @@ static int happy_meal_close(struct net_device *dev)
return 0;
}
-#ifdef SXDEBUG
-#define SXD(x) printk x
-#else
-#define SXD(x)
-#endif
-
static void happy_meal_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct happy_meal *hp = netdev_priv(dev);
- printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tx_dump_log();
- printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
- hme_read32(hp, hp->gregs + GREG_STAT),
- hme_read32(hp, hp->etxregs + ETX_CFG),
- hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+ netdev_err(dev, "Happy Status %08x TX[%08x:%08x]\n",
+ hme_read32(hp, hp->gregs + GREG_STAT),
+ hme_read32(hp, hp->etxregs + ETX_CFG),
+ hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
spin_lock_irq(&hp->happy_lock);
happy_meal_init(hp);
@@ -2261,13 +2151,12 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&hp->happy_lock);
- printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
entry = hp->tx_new;
- SXD(("SX<l[%d]e[%d]>", len, entry));
+ netdev_vdbg(dev, "SX<l[%d]e[%d]>\n", skb->len, entry);
hp->tx_skbs[entry] = skb;
if (skb_shinfo(skb)->nr_frags == 0) {
@@ -2467,11 +2356,10 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strlcpy(info->driver, "sunhme", sizeof(info->driver));
- strlcpy(info->version, "2.02", sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2504,8 +2392,6 @@ static const struct ethtool_ops hme_ethtool_ops = {
.set_link_ksettings = hme_set_link_ksettings,
};
-static int hme_version_printed;
-
#ifdef CONFIG_SBUS
/* Given a happy meal sbus device, find it's quattro parent.
* If none exist, allocate and return a new one.
@@ -2523,19 +2409,15 @@ static struct quattro *quattro_sbus_find(struct platform_device *child)
if (qp)
return qp;
- qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
-
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
- qp->quattro_dev = child;
- qp->next = qfe_sbus_list;
- qfe_sbus_list = qp;
+ qp->quattro_dev = child;
+ qp->next = qfe_sbus_list;
+ qfe_sbus_list = qp;
- platform_set_drvdata(op, qp);
- }
+ platform_set_drvdata(op, qp);
return qp;
}
@@ -2563,8 +2445,9 @@ static int __init quattro_sbus_register_irqs(void)
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
- printk(KERN_ERR "Quattro HME: IRQ registration "
- "error %d.\n", err);
+ dev_err(&op->dev,
+ "Quattro HME: IRQ registration error %d.\n",
+ err);
return err;
}
}
@@ -2595,30 +2478,33 @@ static void quattro_sbus_free_irqs(void)
#ifdef CONFIG_PCI
static struct quattro *quattro_pci_find(struct pci_dev *pdev)
{
+ int i;
struct pci_dev *bdev = pdev->bus->self;
struct quattro *qp;
- if (!bdev) return NULL;
+ if (!bdev)
+ return ERR_PTR(-ENODEV);
+
for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
struct pci_dev *qpdev = qp->quattro_dev;
if (qpdev == bdev)
return qp;
}
+
qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
- if (qp != NULL) {
- int i;
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
- for (i = 0; i < 4; i++)
- qp->happy_meals[i] = NULL;
+ for (i = 0; i < 4; i++)
+ qp->happy_meals[i] = NULL;
- qp->quattro_dev = bdev;
- qp->next = qfe_pci_list;
- qfe_pci_list = qp;
+ qp->quattro_dev = bdev;
+ qp->next = qfe_pci_list;
+ qfe_pci_list = qp;
- /* No range tricks necessary on PCI. */
- qp->nranges = 0;
- }
+ /* No range tricks necessary on PCI. */
+ qp->nranges = 0;
return qp;
}
#endif /* CONFIG_PCI */
@@ -2668,9 +2554,6 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out;
SET_NETDEV_DEV(dev, &op->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
/* If user did not specify a MAC address specifically, use
* the Quattro local-mac-address property...
*/
@@ -2712,35 +2595,35 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
hp->gregs = of_ioremap(&op->resource[0], 0,
GREG_REG_SIZE, "HME Global Regs");
if (!hp->gregs) {
- printk(KERN_ERR "happymeal: Cannot map global registers.\n");
+ dev_err(&op->dev, "Cannot map global registers.\n");
goto err_out_free_netdev;
}
hp->etxregs = of_ioremap(&op->resource[1], 0,
ETX_REG_SIZE, "HME TX Regs");
if (!hp->etxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC TX registers.\n");
goto err_out_iounmap;
}
hp->erxregs = of_ioremap(&op->resource[2], 0,
ERX_REG_SIZE, "HME RX Regs");
if (!hp->erxregs) {
- printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
+ dev_err(&op->dev, "Cannot map MAC RX registers.\n");
goto err_out_iounmap;
}
hp->bigmacregs = of_ioremap(&op->resource[3], 0,
BMAC_REG_SIZE, "HME BIGMAC Regs");
if (!hp->bigmacregs) {
- printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
+ dev_err(&op->dev, "Cannot map BIGMAC registers.\n");
goto err_out_iounmap;
}
hp->tcvregs = of_ioremap(&op->resource[4], 0,
TCVR_REG_SIZE, "HME Tranceiver Regs");
if (!hp->tcvregs) {
- printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
+ dev_err(&op->dev, "Cannot map TCVR registers.\n");
goto err_out_iounmap;
}
@@ -2807,21 +2690,19 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
err = register_netdev(hp->dev);
if (err) {
- printk(KERN_ERR "happymeal: Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting.\n");
goto err_out_free_coherent;
}
platform_set_drvdata(op, hp);
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (SBUS) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev, "HAPPY MEAL (SBUS) 10/100baseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
@@ -2949,7 +2830,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
struct happy_meal *hp;
struct net_device *dev;
void __iomem *hpreg_base;
- unsigned long hpreg_res;
+ struct resource *hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
u8 addr[ETH_ALEN];
@@ -2966,32 +2847,33 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
strcpy(prom_name, "SUNW,hme");
#endif
- err = -ENODEV;
-
- if (pci_enable_device(pdev))
+ err = pcim_enable_device(pdev);
+ if (err)
goto err_out;
pci_set_master(pdev);
if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
qp = quattro_pci_find(pdev);
- if (qp == NULL)
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err_out;
+ }
+
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
- if (qp->happy_meals[qfe_slot] == NULL)
+ if (!qp->happy_meals[qfe_slot])
break;
+
if (qfe_slot == 4)
goto err_out;
}
- dev = alloc_etherdev(sizeof(struct happy_meal));
- err = -ENOMEM;
- if (!dev)
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct happy_meal));
+ if (!dev) {
+ err = -ENOMEM;
goto err_out;
+ }
SET_NETDEV_DEV(dev, &pdev->dev);
- if (hme_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
hp = netdev_priv(dev);
hp->happy_dev = pdev;
@@ -3005,21 +2887,26 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
qp->happy_meals[qfe_slot] = dev;
}
- hpreg_res = pci_resource_start(pdev, 0);
- err = -ENODEV;
+ err = -EINVAL;
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
- printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+ dev_err(&pdev->dev,
+ "Cannot find proper PCI device base address.\n");
goto err_out_clear_quattro;
}
- if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
- "aborting.\n");
+
+ hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), DRV_NAME);
+ if (IS_ERR(hpreg_res)) {
+ err = PTR_ERR(hpreg_res);
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
goto err_out_clear_quattro;
}
- if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
- printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
- goto err_out_free_res;
+ hpreg_base = pcim_iomap(pdev, 0, 0x8000);
+ if (!hpreg_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Unable to remap card memory.\n");
+ goto err_out_clear_quattro;
}
for (i = 0; i < 6; i++) {
@@ -3085,11 +2972,12 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
- &hp->hblock_dvma, GFP_KERNEL);
- err = -ENODEV;
- if (!hp->happy_block)
- goto err_out_iounmap;
+ hp->happy_block = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
+ if (!hp->happy_block) {
+ err = -ENOMEM;
+ goto err_out_clear_quattro;
+ }
hp->linkcheck = 0;
hp->timer_state = asleep;
@@ -3123,11 +3011,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
- err = register_netdev(hp->dev);
+ err = devm_register_netdev(&pdev->dev, dev);
if (err) {
- printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
- "aborting.\n");
- goto err_out_free_coherent;
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clear_quattro;
}
pci_set_drvdata(pdev, hp);
@@ -3140,61 +3027,30 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
int i = simple_strtoul(dev->name + 3, NULL, 10);
sprintf(prom_name, "-%d", i + 3);
}
- printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
- if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
- qpdev->device == PCI_DEVICE_ID_DEC_21153)
- printk("DEC 21153 PCI Bridge\n");
- else
- printk("unknown bridge %04x.%04x\n",
- qpdev->vendor, qpdev->device);
+ netdev_info(dev,
+ "%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet bridge %04x.%04x\n",
+ prom_name, qpdev->vendor, qpdev->device);
}
if (qfe_slot != -1)
- printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
- dev->name, qfe_slot);
+ netdev_info(dev,
+ "Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet %pM\n",
+ qfe_slot, dev->dev_addr);
else
- printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
- dev->name);
-
- printk("%pM\n", dev->dev_addr);
+ netdev_info(dev,
+ "HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
-err_out_free_coherent:
- dma_free_coherent(hp->dma_dev, PAGE_SIZE,
- hp->happy_block, hp->hblock_dvma);
-
-err_out_iounmap:
- iounmap(hp->gregs);
-
-err_out_free_res:
- pci_release_regions(pdev);
-
err_out_clear_quattro:
if (qp != NULL)
qp->happy_meals[qfe_slot] = NULL;
- free_netdev(dev);
-
err_out:
return err;
}
-static void happy_meal_pci_remove(struct pci_dev *pdev)
-{
- struct happy_meal *hp = pci_get_drvdata(pdev);
- struct net_device *net_dev = hp->dev;
-
- unregister_netdev(net_dev);
-
- dma_free_coherent(hp->dma_dev, PAGE_SIZE,
- hp->happy_block, hp->hblock_dvma);
- iounmap(hp->gregs);
- pci_release_regions(hp->happy_dev);
-
- free_netdev(net_dev);
-}
-
static const struct pci_device_id happymeal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
@@ -3206,7 +3062,6 @@ static struct pci_driver hme_pci_driver = {
.name = "hme",
.id_table = happymeal_pci_ids,
.probe = happy_meal_pci_probe,
- .remove = happy_meal_pci_remove,
};
static int __init happy_meal_pci_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index efe0d33f6024..6418fcc3139f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -684,8 +684,8 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strlcpy(info->driver, "sunqe", sizeof(info->driver));
- strlcpy(info->version, "3.0", sizeof(info->version));
+ strscpy(info->driver, "sunqe", sizeof(info->driver));
+ strscpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index da8119625cf3..acda6cbd0238 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -60,8 +60,8 @@ static struct vio_version vnet_versions[] = {
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -467,8 +467,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index 546206640492..9be585237277 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -62,7 +62,8 @@ static int spl2sw_ethernet_stop(struct net_device *ndev)
return 0;
}
-static int spl2sw_ethernet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t spl2sw_ethernet_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
@@ -248,8 +249,8 @@ static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *
/* Check if mac address is valid */
if (!is_valid_ether_addr(mac)) {
- kfree(mac);
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
+ kfree(mac);
return -EINVAL;
}
@@ -492,7 +493,7 @@ static int spl2sw_probe(struct platform_device *pdev)
}
/* Add and enable napi. */
- netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll);
napi_enable(&comm->rx_napi);
netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index 5c9b6c90942b..f8e133604146 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -54,8 +54,8 @@ static void xlgmac_default_config(struct xlgmac_pdata *pdata)
pdata->phy_speed = SPEED_25000;
pdata->sysclk_rate = XLGMAC_SYSCLOCK;
- strlcpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
- strlcpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
+ strscpy(pdata->drv_name, XLGMAC_DRV_NAME, sizeof(pdata->drv_name));
+ strscpy(pdata->drv_ver, XLGMAC_DRV_VERSION, sizeof(pdata->drv_ver));
}
static void xlgmac_init_all_ops(struct xlgmac_pdata *pdata)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
index 49f8c6be9459..e794da727fe0 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c
@@ -102,9 +102,9 @@ static void xlgmac_ethtool_get_drvinfo(struct net_device *netdev,
u32 ver = pdata->hw_feat.version;
u32 snpsver, devid, userver;
- strlcpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
/* S|SNPSVER: Synopsys-defined Version
* D|DEVID: Indicates the Device family
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index e54ce73396ee..36b948820c1e 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -419,15 +419,14 @@ static void xlgmac_napi_enable(struct xlgmac_pdata *pdata, unsigned int add)
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xlgmac_one_poll,
- NAPI_POLL_WEIGHT);
+ xlgmac_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xlgmac_all_poll, NAPI_POLL_WEIGHT);
+ xlgmac_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 985073eba3bd..ca409515ead5 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1994,7 +1994,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->nic = nic;
priv->msg_enable = BDX_DEF_MSG_ENABLE;
- netif_napi_add(ndev, &priv->napi, bdx_poll, 64);
+ netif_napi_add(ndev, &priv->napi, bdx_poll);
if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) {
DBG("HW statistics not supported\n");
@@ -2133,10 +2133,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
+ strscpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index fb30bc5d56cb..fce06663e1e1 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -33,6 +33,7 @@ config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
select PHYLIB
+ select MDIO_BITBANG
help
This driver supports TI's DaVinci MDIO module.
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index abc1e4276cf0..c51e2af91f69 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -402,9 +402,9 @@ static void am65_cpsw_get_drvinfo(struct net_device *ndev,
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- strlcpy(info->driver, dev_driver_string(common->dev),
+ strscpy(info->driver, dev_driver_string(common->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
}
static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index f4a6b590a1e3..3cbe4ec46234 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -74,6 +74,9 @@
#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
+
#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
@@ -360,8 +363,7 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
-static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
- netdev_features_t features)
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
int port_idx, i, ret;
@@ -574,7 +576,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
for (i = 0; i < common->tx_ch_num; i++)
netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
- ret = am65_cpsw_nuss_common_open(common, ndev->features);
+ ret = am65_cpsw_nuss_common_open(common);
if (ret)
return ret;
@@ -590,11 +592,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
- port->slave.phy_if);
- if (ret)
- goto error_cleanup;
-
ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
if (ret)
goto error_cleanup;
@@ -1409,7 +1406,14 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- /* Currently not used */
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+
+ if (common->pdata.extra_modes & BIT(state->interface))
+ writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
}
static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
@@ -1847,6 +1851,8 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->common = common;
port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ if (common->pdata.extra_modes)
+ port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
(AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
port->name = of_get_property(port_np, "label", NULL);
@@ -1886,6 +1892,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
}
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ if (ret)
+ goto of_node_put;
+
ret = of_get_mac_address(port_np, port->slave.mac_addr);
if (ret) {
am65_cpsw_am654_get_efuse_macid(port_np,
@@ -1981,7 +1991,18 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
port->slave.phylink_config.type = PHYLINK_NETDEV;
port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
- phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
phylink = phylink_create(&port->slave.phylink_config,
of_node_to_fwnode(port->slave.phy_node),
@@ -2023,7 +2044,7 @@ static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
}
netif_napi_add(common->dma_ndev, &common->napi_rx,
- am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
+ am65_cpsw_nuss_rx_poll);
return ret;
}
@@ -2611,10 +2632,18 @@ static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
+static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
+ { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index ac945631bf2f..2c9850fdfcb6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -46,6 +46,7 @@ struct am65_cpsw_port {
const char *name;
u32 port_id;
void __iomem *port_base;
+ void __iomem *sgmii_base;
void __iomem *stat_base;
void __iomem *fetch_ram_base;
bool disabled;
@@ -88,6 +89,7 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata {
u32 quirks;
+ u64 extra_modes;
enum k3_ring_mode fdqring_mode;
const char *ale_dev_id;
};
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c30a6e510aa3..e2f0fb286143 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -943,9 +943,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->irq = of_irq_get_byname(node, "cpts");
if (cpts->irq <= 0) {
ret = cpts->irq ?: -ENXIO;
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get IRQ number (err = %d)\n",
- ret);
+ dev_err_probe(dev, ret, "Failed to get IRQ number\n");
return ERR_PTR(ret);
}
@@ -965,8 +963,7 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
if (IS_ERR(cpts->refclk)) {
ret = PTR_ERR(cpts->refclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get refclk %d\n", ret);
+ dev_err_probe(dev, ret, "Failed to get refclk\n");
return ERR_PTR(ret);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index bef5e68dac31..80eeeb463c4f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -851,8 +851,8 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "cpmac", sizeof(info->driver));
- strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ strscpy(info->driver, "cpmac", sizeof(info->driver));
+ strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
}
@@ -1109,7 +1109,7 @@ static int cpmac_probe(struct platform_device *pdev)
dev->netdev_ops = &cpmac_netdev_ops;
dev->ethtool_ops = &cpmac_ethtool_ops;
- netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+ netif_napi_add(dev, &priv->napi, cpmac_poll);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->rx_lock);
@@ -1169,7 +1169,7 @@ static struct platform_driver cpmac_driver = {
.remove = cpmac_remove,
};
-int cpmac_init(void)
+int __init cpmac_init(void)
{
u32 mask;
int i, res;
@@ -1239,7 +1239,7 @@ fail_alloc:
return res;
}
-void cpmac_exit(void)
+void __exit cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ed66c4d4d830..709ca6dd6ecb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1172,9 +1172,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct platform_device *pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw", sizeof(info->driver));
- strlcpy(info->version, "1.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw", sizeof(info->driver));
+ strscpy(info->version, "1.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1319,8 +1319,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
*/
ret = of_phy_register_fixed_link(slave_node);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
goto err_node_put;
}
slave_data->phy_node = of_node_get(slave_node);
@@ -1638,8 +1637,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
- NAPI_POLL_WEIGHT);
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 353e58b22c51..83596ec0c7cb 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1146,9 +1146,9 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
struct platform_device *pdev;
pdev = to_platform_device(cpsw->dev);
- strlcpy(info->driver, "cpsw-switch", sizeof(info->driver));
- strlcpy(info->version, "2.0", sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int cpsw_set_pauseparam(struct net_device *ndev,
@@ -1288,9 +1288,8 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
+ dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
+ port_np);
goto err_node_put;
}
slave_data->phy_node = of_node_get(port_np);
@@ -1417,9 +1416,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
* accordingly.
*/
netif_napi_add(ndev, &cpsw->napi_rx,
- cpsw->quirk_irq ?
- cpsw_rx_poll : cpsw_rx_mq_poll,
- NAPI_POLL_WEIGHT);
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
netif_napi_add_tx(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ?
cpsw_tx_poll : cpsw_tx_mq_poll);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2a3e4e842fa5..2eb9d5a32588 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -374,8 +374,8 @@ static char *emac_rxhost_errcodes[16] = {
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, emac_version_string, sizeof(info->driver));
- strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+ strscpy(info->driver, emac_version_string, sizeof(info->driver));
+ strscpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -949,7 +949,7 @@ static void emac_tx_handler(void *token, int len, int status)
*
* Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
*/
-static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
@@ -1948,7 +1948,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
- netif_napi_add(ndev, &priv->napi, emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, emac_poll);
pm_runtime_enable(&pdev->dev);
rc = pm_runtime_resume_and_get(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index ea3772618043..946b9753ccfb 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -26,6 +26,8 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/sys_soc.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
@@ -41,6 +43,7 @@
struct davinci_mdio_of_param {
int autosuspend_delay_ms;
+ bool manual_mode;
};
struct davinci_mdio_regs {
@@ -49,6 +52,15 @@ struct davinci_mdio_regs {
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
#define CONTROL_MAX_DIV (0xffff)
+#define CONTROL_CLKDIV GENMASK(15, 0)
+
+#define MDIO_MAN_MDCLK_O BIT(2)
+#define MDIO_MAN_OE BIT(1)
+#define MDIO_MAN_PIN BIT(0)
+#define MDIO_MANUALMODE BIT(31)
+
+#define MDIO_PIN 0
+
u32 alive;
u32 link;
@@ -59,7 +71,9 @@ struct davinci_mdio_regs {
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclr;
- u32 __reserved_1[20];
+ u32 manualif;
+ u32 poll;
+ u32 __reserved_1[18];
struct {
u32 access;
@@ -79,6 +93,7 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
+ struct mdiobb_ctrl bb_ctrl;
struct davinci_mdio_regs __iomem *regs;
struct clk *clk;
struct device *dev;
@@ -90,6 +105,7 @@ struct davinci_mdio_data {
*/
bool skip_scan;
u32 clk_div;
+ bool manual_mode;
};
static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
@@ -128,9 +144,122 @@ static void davinci_mdio_enable(struct davinci_mdio_data *data)
writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
-static int davinci_mdio_reset(struct mii_bus *bus)
+static void davinci_mdio_disable(struct davinci_mdio_data *data)
+{
+ u32 reg;
+
+ /* Disable MDIO state machine */
+ reg = readl(&data->regs->control);
+
+ reg &= ~CONTROL_CLKDIV;
+ reg |= data->clk_div;
+
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &data->regs->control);
+}
+
+static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
+{
+ u32 reg;
+ /* set manual mode */
+ reg = readl(&data->regs->poll);
+ reg |= MDIO_MANUALMODE;
+ writel(reg, &data->regs->poll);
+}
+
+static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (level)
+ reg |= MDIO_MAN_MDCLK_O;
+ else
+ reg &= ~MDIO_MAN_MDCLK_O;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (output)
+ reg |= MDIO_MAN_OE;
+ else
+ reg &= ~MDIO_MAN_OE;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (value)
+ reg |= MDIO_MAN_PIN;
+ else
+ reg &= ~MDIO_MAN_PIN;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct davinci_mdio_data *data;
+ unsigned long reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+ return test_bit(MDIO_PIN, &reg);
+}
+
+static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read(bus, phy, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
{
- struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
int ret;
@@ -138,6 +267,11 @@ static int davinci_mdio_reset(struct mii_bus *bus)
if (ret < 0)
return ret;
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ }
+
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -171,6 +305,23 @@ done:
return 0;
}
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+
+ return davinci_mdio_common_reset(data);
+}
+
+static int davinci_mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ struct davinci_mdio_data *data;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+
+ return davinci_mdio_common_reset(data);
+}
+
/* wait until hardware is ready for another user access */
static inline int wait_for_user_access(struct davinci_mdio_data *data)
{
@@ -318,6 +469,28 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return 0;
}
+struct k3_mdio_soc_data {
+ bool manual_mode;
+};
+
+static const struct k3_mdio_soc_data am65_mdio_soc_data = {
+ .manual_mode = true,
+};
+
+static const struct soc_device_attribute k3_mdio_socinfo[] = {
+ { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { /* sentinel */ },
+};
+
#if IS_ENABLED(CONFIG_OF)
static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
.autosuspend_delay_ms = 100,
@@ -331,6 +504,14 @@ static const struct of_device_id davinci_mdio_of_mtable[] = {
MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
#endif
+static const struct mdiobb_ops davinci_mdiobb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = davinci_set_mdc,
+ .set_mdio_dir = davinci_set_mdio_dir,
+ .set_mdio_data = davinci_set_mdio_data,
+ .get_mdio_data = davinci_get_mdio_data,
+};
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -345,7 +526,26 @@ static int davinci_mdio_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- data->bus = devm_mdiobus_alloc(dev);
+ data->manual_mode = false;
+ data->bb_ctrl.ops = &davinci_mdiobb_ops;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct soc_device_attribute *soc_match_data;
+
+ soc_match_data = soc_device_match(k3_mdio_socinfo);
+ if (soc_match_data && soc_match_data->data) {
+ const struct k3_mdio_soc_data *socdata =
+ soc_match_data->data;
+
+ data->manual_mode = socdata->manual_mode;
+ }
+ }
+
+ if (data->manual_mode)
+ data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
+ else
+ data->bus = devm_mdiobus_alloc(dev);
+
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
return -ENOMEM;
@@ -371,11 +571,20 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
data->bus->name = dev_name(dev);
- data->bus->read = davinci_mdio_read;
- data->bus->write = davinci_mdio_write;
- data->bus->reset = davinci_mdio_reset;
+
+ if (data->manual_mode) {
+ data->bus->read = davinci_mdiobb_read;
+ data->bus->write = davinci_mdiobb_write;
+ data->bus->reset = davinci_mdiobb_reset;
+
+ dev_info(dev, "Configuring MDIO in manual mode\n");
+ } else {
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
+ data->bus->priv = data;
+ }
data->bus->parent = dev;
- data->bus->priv = data;
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
@@ -433,9 +642,13 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus)
+ if (data->bus) {
mdiobus_unregister(data->bus);
+ if (data->manual_mode)
+ free_mdio_bitbang(data->bus);
+ }
+
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -452,7 +665,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
writel(ctrl, &data->regs->control);
- wait_for_idle(data);
+
+ if (!data->manual_mode)
+ wait_for_idle(data);
return 0;
}
@@ -461,7 +676,12 @@ static int davinci_mdio_runtime_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- davinci_mdio_enable(data);
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ } else {
+ davinci_mdio_enable(data);
+ }
return 0;
}
#endif
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index b15d44261e76..aba70bef4894 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2095,7 +2095,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
}
/* NAPI register */
- netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll);
netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
/* Register the network device */
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 741c42c6a417..b3da76efa8f5 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -762,12 +762,12 @@ static void tlan_get_drvinfo(struct net_device *dev,
{
struct tlan_priv *priv = netdev_priv(dev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
if (priv->pci_dev)
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+ strscpy(info->bus_info, "EISA", sizeof(info->bus_info));
}
static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3dbfb1b20649..cf8de8a7a8a1 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1187,8 +1187,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_link_ksettings(struct net_device *netdev,
@@ -1441,7 +1441,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
- netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, napi, gelic_net_poll);
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index bc4914c758ad..50d7eacfec58 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2270,8 +2270,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->aneg_count = 0;
timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
- netif_napi_add(netdev, &card->napi,
- spider_net_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &card->napi, spider_net_poll);
spider_net_setup_netdev_ops(netdev);
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 93110dba0bfa..fef9fd127b5e 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -63,12 +63,12 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- strlcpy(drvinfo->driver, spider_net_driver_name,
+ strscpy(drvinfo->driver, spider_net_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "no information",
+ strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->fw_version, "no information",
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ strscpy(drvinfo->bus_info, pci_name(card->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 47aab9c132c8..b50be67b398b 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1956,9 +1956,9 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
{
struct tc35815_local *lp = netdev_priv(dev);
- strlcpy(info->driver, MODNAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, MODNAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static u32 tc35815_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 5251fc324221..2cd2afc3fff0 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -59,9 +59,6 @@
/* Check the phy status every half a second. */
#define CHECK_PHY_INTERVAL (HZ/2)
-static int tsi108_init_one(struct platform_device *pdev);
-static int tsi108_ether_remove(struct platform_device *pdev);
-
struct tsi108_prv_data {
void __iomem *regs; /* Base of normal regs */
void __iomem *phyregs; /* Base of register bank used for PHY access */
@@ -144,16 +141,6 @@ struct tsi108_prv_data {
struct platform_device *pdev;
};
-/* Structure for a device driver */
-
-static struct platform_driver tsi_eth_driver = {
- .probe = tsi108_init_one,
- .remove = tsi108_ether_remove,
- .driver = {
- .name = "tsi-ethernet",
- },
-};
-
static void tsi108_timed_checker(struct timer_list *t);
#ifdef DEBUG
@@ -1598,7 +1585,7 @@ tsi108_init_one(struct platform_device *pdev)
data->phy_type = einfo->phy_type;
data->irq_num = einfo->irq_num;
data->id = pdev->id;
- netif_napi_add(dev, &data->napi, tsi108_poll, 64);
+ netif_napi_add(dev, &data->napi, tsi108_poll);
dev->netdev_ops = &tsi108_netdev_ops;
dev->ethtool_ops = &tsi108_ethtool_ops;
@@ -1683,6 +1670,16 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+ .probe = tsi108_init_one,
+ .remove = tsi108_ether_remove,
+ .driver = {
+ .name = "tsi-ethernet",
+ },
+};
module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c
index eb39a45de012..aeed2a093e34 100644
--- a/drivers/net/ethernet/vertexcom/mse102x.c
+++ b/drivers/net/ethernet/vertexcom/mse102x.c
@@ -750,6 +750,13 @@ static const struct of_device_id mse102x_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mse102x_match_table);
+static const struct spi_device_id mse102x_ids[] = {
+ { "mse1021" },
+ { "mse1022" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mse102x_ids);
+
static struct spi_driver mse102x_driver = {
.driver = {
.name = DRV_NAME,
@@ -758,10 +765,11 @@ static struct spi_driver mse102x_driver = {
},
.probe = mse102x_probe_spi,
.remove = mse102x_remove_spi,
+ .id_table = mse102x_ids,
};
module_spi_driver(mse102x_driver);
MODULE_DESCRIPTION("MSE102x Network driver");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@in-tech.com>");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@chargebyte.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 509c5e9b29df..0fb15a17b547 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -965,7 +965,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
+ netif_napi_add(dev, &rp->napi, rhine_napipoll);
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
@@ -2281,8 +2281,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct device *hwdev = dev->dev.parent;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ff0c102cb578..a502812ac418 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2846,7 +2846,7 @@ static int velocity_probe(struct device *dev, int irq,
netdev->netdev_ops = &velocity_netdev_ops;
netdev->ethtool_ops = &velocity_ethtool_ops;
- netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &vptr->napi, velocity_poll);
netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX;
@@ -3419,13 +3419,13 @@ static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
{
struct velocity_info *vptr = netdev_priv(dev);
- strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
- strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strscpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strscpy(info->version, VELOCITY_VERSION, sizeof(info->version));
if (vptr->pdev)
- strlcpy(info->bus_info, pci_name(vptr->pdev),
+ strscpy(info->bus_info, pci_name(vptr->pdev),
sizeof(info->bus_info));
else
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index b4a4fa0a58f8..f5d43d8c9629 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -16,6 +16,19 @@ config NET_VENDOR_WANGXUN
if NET_VENDOR_WANGXUN
+config NGBE
+ tristate "Wangxun(R) GbE PCI Express adapters support"
+ depends on PCI
+ help
+ This driver supports Wangxun(R) GbE PCI Express family of
+ adapters.
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ngbe.
+
config TXGBE
tristate "Wangxun(R) 10GbE PCI Express adapters support"
depends on PCI
diff --git a/drivers/net/ethernet/wangxun/Makefile b/drivers/net/ethernet/wangxun/Makefile
index c34db1bead25..ac3fb06b233c 100644
--- a/drivers/net/ethernet/wangxun/Makefile
+++ b/drivers/net/ethernet/wangxun/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_TXGBE) += txgbe/
+obj-$(CONFIG_NGBE) += ngbe/
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
new file mode 100644
index 000000000000..0baf75907496
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+#
+# Makefile for the Wangxun(R) GbE PCI Express ethernet driver
+#
+
+obj-$(CONFIG_NGBE) += ngbe.o
+
+ngbe-objs := ngbe_main.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
new file mode 100644
index 000000000000..f5fa6e5238cc
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_H_
+#define _NGBE_H_
+
+#include "ngbe_type.h"
+
+#define NGBE_MAX_FDIR_INDICES 7
+
+#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+
+/* board specific private data structure */
+struct ngbe_adapter {
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+};
+
+extern char ngbe_driver_name[];
+
+#endif /* _NGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
new file mode 100644
index 000000000000..7674cb6e5700
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+
+#include "ngbe.h"
+char ngbe_driver_name[] = "ngbe";
+
+/* ngbe_pci_tbl - PCI Device ID Table
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id ngbe_pci_tbl[] = {
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860LC), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1), 0},
+ { PCI_VDEVICE(WANGXUN, NGBE_DEV_ID_EM_WX1860A1L), 0},
+ /* required last entry */
+ { .device = 0 }
+};
+
+static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+ struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static void ngbe_shutdown(struct pci_dev *pdev)
+{
+ bool wake;
+
+ ngbe_dev_shutdown(pdev, &wake);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+/**
+ * ngbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ngbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ngbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int ngbe_probe(struct pci_dev *pdev,
+ const struct pci_device_id __always_unused *ent)
+{
+ struct ngbe_adapter *adapter = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_pci_disable_dev;
+ }
+
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ ngbe_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_disable_dev;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct ngbe_adapter),
+ NGBE_MAX_TX_QUEUES,
+ NGBE_MAX_RX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_pci_release_regions;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->io_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr) {
+ err = -EIO;
+ goto err_pci_release_regions;
+ }
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ pci_set_drvdata(pdev, adapter);
+
+ return 0;
+
+err_pci_release_regions:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_disable_dev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ngbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ngbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void ngbe_remove(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ngbe_driver = {
+ .name = ngbe_driver_name,
+ .id_table = ngbe_pci_tbl,
+ .probe = ngbe_probe,
+ .remove = ngbe_remove,
+ .shutdown = ngbe_shutdown,
+};
+
+module_pci_driver(ngbe_driver);
+
+MODULE_DEVICE_TABLE(pci, ngbe_pci_tbl);
+MODULE_AUTHOR("Beijing WangXun Technology Co., Ltd, <software@net-swift.com>");
+MODULE_DESCRIPTION("WangXun(R) Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
new file mode 100644
index 000000000000..26e776c3539a
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_TYPE_H_
+#define _NGBE_TYPE_H_
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+/************ NGBE_register.h ************/
+/* Vendor ID */
+#ifndef PCI_VENDOR_ID_WANGXUN
+#define PCI_VENDOR_ID_WANGXUN 0x8088
+#endif
+
+/* Device IDs */
+#define NGBE_DEV_ID_EM_WX1860AL_W 0x0100
+#define NGBE_DEV_ID_EM_WX1860A2 0x0101
+#define NGBE_DEV_ID_EM_WX1860A2S 0x0102
+#define NGBE_DEV_ID_EM_WX1860A4 0x0103
+#define NGBE_DEV_ID_EM_WX1860A4S 0x0104
+#define NGBE_DEV_ID_EM_WX1860AL2 0x0105
+#define NGBE_DEV_ID_EM_WX1860AL2S 0x0106
+#define NGBE_DEV_ID_EM_WX1860AL4 0x0107
+#define NGBE_DEV_ID_EM_WX1860AL4S 0x0108
+#define NGBE_DEV_ID_EM_WX1860LC 0x0109
+#define NGBE_DEV_ID_EM_WX1860A1 0x010a
+#define NGBE_DEV_ID_EM_WX1860A1L 0x010b
+
+/* Subsystem ID */
+#define NGBE_SUBID_M88E1512_SFP 0x0003
+#define NGBE_SUBID_OCP_CARD 0x0040
+#define NGBE_SUBID_LY_M88E1512_SFP 0x0050
+#define NGBE_SUBID_M88E1512_RJ45 0x0051
+#define NGBE_SUBID_M88E1512_MIX 0x0052
+#define NGBE_SUBID_YT8521S_SFP 0x0060
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP 0x0061
+#define NGBE_SUBID_YT8521S_SFP_GPIO 0x0062
+#define NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO 0x0064
+#define NGBE_SUBID_LY_YT8521S_SFP 0x0070
+#define NGBE_SUBID_RGMII_FPGA 0x0080
+
+#define NGBE_OEM_MASK 0x00FF
+
+#define NGBE_NCSI_SUP 0x8000
+#define NGBE_NCSI_MASK 0x8000
+#define NGBE_WOL_SUP 0x4000
+#define NGBE_WOL_MASK 0x4000
+
+#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index acd78120e53c..634946e87e5f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -719,9 +719,9 @@ static void w5100_hw_close(struct w5100_priv *priv)
static void w5100_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 773f8c77909a..b0958fe8111e 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -282,9 +282,9 @@ static void w5300_hw_close(struct w5300_priv *priv)
static void w5300_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(ndev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index c6395c406418..6668d1b760d8 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -21,36 +21,45 @@
/* Configuration options */
/* Accept all incoming packets.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_PROMISC (1 << 0)
/* Jumbo frame support for Tx & Rx.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_JUMBO (1 << 1)
/* VLAN Rx & Tx frame support.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_VLAN (1 << 2)
/* Enable recognition of flow control frames on Rx
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames.
* Note: PAD from VLAN frames is not stripped.
- * This option defaults to disabled (set) */
+ * This option defaults to disabled (set)
+ */
#define XTE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
-set, the MAC will filter frames that have a mismatched type/length field
-and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
-types of frames are encountered. When this option is cleared, the MAC will
-allow these types of frames to be received.
-This option defaults to enabled (set) */
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received.
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_TXEN (1 << 11)
/* Enable the receiver
-* This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_RXEN (1 << 12)
/* Default options set when device is initialized or reset */
@@ -68,18 +77,18 @@ This option defaults to enabled (set) */
#define TX_TAILDESC_PTR 0x04 /* rw */
#define TX_CHNL_CTRL 0x05 /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
-*/
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
+ */
#define CHNL_CTRL_IRQ_IOE (1 << 9)
#define CHNL_CTRL_IRQ_EN (1 << 7)
#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
@@ -87,35 +96,35 @@ This option defaults to enabled (set) */
#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
#define TX_IRQ_REG 0x06 /* rw */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
- 29 2 ErrIrq
- 30 1 DlyIrq
- 31 0 CoalIrq
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ * 29 2 ErrIrq
+ * 30 1 DlyIrq
+ * 31 0 CoalIrq
*/
#define TX_CHNL_STS 0x07 /* r */
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define RX_NXTDESC_PTR 0x08 /* r */
#define RX_CURBUF_ADDR 0x09 /* r */
@@ -124,17 +133,17 @@ This option defaults to enabled (set) */
#define RX_TAILDESC_PTR 0x0c /* rw */
#define RX_CHNL_CTRL 0x0d /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
*/
#define RX_IRQ_REG 0x0e /* rw */
#define IRQ_COAL (1 << 0)
@@ -142,13 +151,13 @@ This option defaults to enabled (set) */
#define IRQ_ERR (1 << 2)
#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
-*/
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ */
#define RX_CHNL_STS 0x0f /* r */
#define CHNL_STS_ENGBUSY (1 << 1)
#define CHNL_STS_EOP (1 << 2)
@@ -165,23 +174,23 @@ This option defaults to enabled (set) */
#define CHNL_STS_CMPERR (1 << 20)
#define CHNL_STS_TAILERR (1 << 21)
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define DMA_CONTROL_REG 0x10 /* rw */
#define DMA_CONTROL_RST (1 << 0)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 3f6b9dfca095..1066420d6a83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -117,8 +117,8 @@ int temac_indirect_busywait(struct temac_local *lp)
spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
if (WARN_ON(!hard_acs_rdy(lp)))
return -ETIMEDOUT;
- else
- return 0;
+
+ return 0;
}
/*
@@ -261,7 +261,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
unsigned int dcrs;
@@ -286,7 +286,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
* such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
return -1;
}
@@ -307,11 +307,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
- else {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(lp->rx_skb[i]);
- }
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skb[i]);
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
@@ -430,7 +428,8 @@ static void temac_do_set_mac_address(struct net_device *ndev)
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
- * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ * so don't affect them Set MAC bits [47:32] in EUAW1
+ */
temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
@@ -530,66 +529,66 @@ static struct temac_option {
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXJMBO_MASK,
+ .m_or = XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXVLAN_MASK,
+ .m_or = XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXVLAN_MASK,
+ .m_or = XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXFCS_MASK,
+ .m_or = XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXFCS_MASK,
+ .m_or = XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXLT_MASK,
+ .m_or = XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_RXFLO_MASK,
+ .m_or = XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_TXFLO_MASK,
+ .m_or = XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
- .m_or =XTE_AFM_EPPRM_MASK,
+ .m_or = XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXEN_MASK,
+ .m_or = XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXEN_MASK,
+ .m_or = XTE_RXC1_RXEN_MASK,
},
{}
};
@@ -641,7 +640,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset RX reset timeout!!\n");
+ "%s RX reset timeout!!\n", __func__);
break;
}
}
@@ -653,7 +652,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset TX reset timeout!!\n");
+ "%s TX reset timeout!!\n", __func__);
break;
}
}
@@ -672,7 +671,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset DMA reset timeout!!\n");
+ "%s DMA reset timeout!!\n", __func__);
break;
}
}
@@ -680,7 +679,7 @@ static void temac_device_reset(struct net_device *ndev)
if (temac_dma_bd_init(ndev)) {
dev_err(&ndev->dev,
- "temac_device_reset descriptor allocation failed\n");
+ "%s descriptor allocation failed\n", __func__);
}
spin_lock_irqsave(lp->indirect_lock, flags);
@@ -691,7 +690,8 @@ static void temac_device_reset(struct net_device *ndev)
spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
- * but leave receiver and transmitter disabled. */
+ * but leave receiver and transmitter disabled.
+ */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
@@ -723,9 +723,15 @@ static void temac_adjust_link(struct net_device *ndev)
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
- case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
- case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
- case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ case SPEED_1000:
+ mii_speed |= XTE_EMCFG_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ mii_speed |= XTE_EMCFG_LINKSPD_100;
+ break;
+ case SPEED_10:
+ mii_speed |= XTE_EMCFG_LINKSPD_10;
+ break;
}
/* Write new speed setting out to TEMAC */
@@ -1007,7 +1013,6 @@ static void ll_temac_recv(struct net_device *ndev)
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
-
/* Convert from device endianness (be32) to cpu
* endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
@@ -1563,16 +1568,12 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
- if (lp->rx_irq < 0) {
- if (lp->rx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA RX irq\n");
- return lp->rx_irq;
- }
- if (lp->tx_irq < 0) {
- if (lp->tx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA TX irq\n");
- return lp->tx_irq;
- }
+ if (lp->rx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->rx_irq,
+ "could not get DMA RX irq\n");
+ if (lp->tx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->tx_irq,
+ "could not get DMA TX irq\n");
if (temac_np) {
/* Retrieve the MAC address */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 6fd2dea4e60f..2371c072b53f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -29,7 +29,8 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
- * in the LSW0 register */
+ * in the LSW0 register
+ */
spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
@@ -88,7 +89,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
}
/* Enable the MDIO bus by asserting the enable bit and writing
- * in the clock config */
+ * in the clock config
+ */
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
bus = devm_mdiobus_alloc(&pdev->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index f2e2261b4b7d..6370c447ac5c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -402,6 +402,9 @@ struct axidma_bd {
* @rx_bd_num: Size of RX buffer descriptor ring
* @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
* accessed currently.
+ * @rx_packets: RX packet count for statistics
+ * @rx_bytes: RX byte count for statistics
+ * @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
* @tx_dma_cr: Nominal content of TX DMA control register
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
@@ -411,6 +414,9 @@ struct axidma_bd {
* complete. Only updated at runtime by TX NAPI poll.
* @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring
* to be populated.
+ * @tx_packets: TX packet count for statistics
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -458,6 +464,9 @@ struct axienet_local {
dma_addr_t rx_bd_p;
u32 rx_bd_num;
u32 rx_bd_ci;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
u32 tx_dma_cr;
@@ -466,6 +475,9 @@ struct axienet_local {
u32 tx_bd_num;
u32 tx_bd_ci;
u32 tx_bd_tail;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync tx_stat_sync;
struct work_struct dma_err_task;
@@ -591,7 +603,7 @@ static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
#else /* CONFIG_64BIT */
static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
+ dma_addr_t addr)
{
axienet_dma_out32(lp, reg, lower_32_bits(addr));
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1760930ec0c4..d1d772580da9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -597,7 +597,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -645,7 +645,7 @@ static int axienet_device_reset(struct net_device *ndev)
* @nr_bds: Max number of descriptors to clean up
* @force: Whether to clean descriptors even if not complete
* @sizep: Pointer to a u32 filled with the total sum of all bytes
- * in all cleaned-up descriptors. Ignored if NULL.
+ * in all cleaned-up descriptors. Ignored if NULL.
* @budget: NAPI budget (use 0 when not called from NAPI poll)
*
* Would either be called after a successful transmit operation, or after
@@ -752,8 +752,10 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci %= lp->tx_bd_num;
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+ u64_stats_update_end(&lp->tx_stat_sync);
/* Matches barrier in axienet_start_xmit */
smp_mb();
@@ -984,8 +986,10 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- lp->ndev->stats.rx_packets += packets;
- lp->ndev->stats.rx_bytes += size;
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, packets);
+ u64_stats_add(&lp->rx_bytes, size);
+ u64_stats_update_end(&lp->rx_stat_sync);
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
@@ -1292,10 +1296,32 @@ static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(lp->phylink, rq, cmd);
}
+static void
+axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
.ndo_start_xmit = axienet_start_xmit,
+ .ndo_get_stats64 = axienet_get_stats64,
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -1317,8 +1343,8 @@ static const struct net_device_ops axienet_netdev_ops = {
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
- strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/**
@@ -1349,7 +1375,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1850,8 +1876,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
- netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll, NAPI_POLL_WEIGHT);
- netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll, NAPI_POLL_WEIGHT);
+ u64_stats_init(&lp->rx_stat_sync);
+ u64_stats_init(&lp->tx_stat_sync);
+
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2772a79cd3ed..0b3b6935c558 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -126,7 +126,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
return ret;
}
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 016a9c4f2c6c..05848ff15fb5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1060,7 +1060,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
}
static const struct ethtool_ops xemaclite_ethtool_ops = {
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index f9587e55b842..894e92ef415b 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1402,7 +1402,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
+ strscpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3591b9edc9a1..3b0c5f177447 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -841,7 +841,7 @@ static void eth_txdone_irq(void *unused)
}
}
-static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = netdev_priv(dev);
unsigned int txreadyq = port->plat->txreadyq;
@@ -999,11 +999,11 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
{
struct port *port = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 746736c83873..19c99529566b 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -151,11 +151,11 @@ static void fjes_get_drvinfo(struct net_device *netdev,
plat_dev = adapter->plat_dev;
- strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, fjes_driver_version,
+ strscpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, fjes_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"platform:%s", plat_dev->name);
}
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5805e4a56385..1eff202f6a1f 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -32,68 +32,12 @@ MODULE_VERSION(DRV_VERSION);
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
-static int fjes_request_irq(struct fjes_adapter *);
-static void fjes_free_irq(struct fjes_adapter *);
-
-static int fjes_open(struct net_device *);
-static int fjes_close(struct net_device *);
-static int fjes_setup_resources(struct fjes_adapter *);
-static void fjes_free_resources(struct fjes_adapter *);
-static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
-static void fjes_raise_intr_rxdata_task(struct work_struct *);
-static void fjes_tx_stall_task(struct work_struct *);
-static void fjes_force_close_task(struct work_struct *);
-static irqreturn_t fjes_intr(int, void*);
-static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
-static int fjes_change_mtu(struct net_device *, int);
-static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
-static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
-static void fjes_tx_retry(struct net_device *, unsigned int txqueue);
-
-static int fjes_acpi_add(struct acpi_device *);
-static int fjes_acpi_remove(struct acpi_device *);
-static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
-
-static int fjes_probe(struct platform_device *);
-static int fjes_remove(struct platform_device *);
-
-static int fjes_sw_init(struct fjes_adapter *);
-static void fjes_netdev_setup(struct net_device *);
-static void fjes_irq_watch_task(struct work_struct *);
-static void fjes_watch_unshare_task(struct work_struct *);
-static void fjes_rx_irq(struct fjes_adapter *, int);
-static int fjes_poll(struct napi_struct *, int);
-
static const struct acpi_device_id fjes_acpi_ids[] = {
{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
-static struct acpi_driver fjes_acpi_driver = {
- .name = DRV_NAME,
- .class = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = fjes_acpi_ids,
- .ops = {
- .add = fjes_acpi_add,
- .remove = fjes_acpi_remove,
- },
-};
-
-static struct platform_driver fjes_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = fjes_probe,
- .remove = fjes_remove,
-};
-
-static struct resource fjes_resource[] = {
- DEFINE_RES_MEM(0, 1),
- DEFINE_RES_IRQ(0)
-};
-
static bool is_extended_socket_device(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
@@ -139,43 +83,6 @@ static int acpi_check_extended_socket_status(struct acpi_device *device)
return 0;
}
-static int fjes_acpi_add(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
- acpi_status status;
-
- if (!is_extended_socket_device(device))
- return -ENODEV;
-
- if (acpi_check_extended_socket_status(device))
- return -ENODEV;
-
- status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
- fjes_get_acpi_resource, fjes_resource);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- /* create platform_device */
- plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
- ARRAY_SIZE(fjes_resource));
- if (IS_ERR(plat_dev))
- return PTR_ERR(plat_dev);
-
- device->driver_data = plat_dev;
-
- return 0;
-}
-
-static int fjes_acpi_remove(struct acpi_device *device)
-{
- struct platform_device *plat_dev;
-
- plat_dev = (struct platform_device *)acpi_driver_data(device);
- platform_device_unregister(plat_dev);
-
- return 0;
-}
-
static acpi_status
fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -206,143 +113,59 @@ fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
-static int fjes_request_irq(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int result = -1;
-
- adapter->interrupt_watch_enable = true;
- if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
- }
-
- if (!adapter->irq_registered) {
- result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
- IRQF_SHARED, netdev->name, adapter);
- if (result)
- adapter->irq_registered = false;
- else
- adapter->irq_registered = true;
- }
-
- return result;
-}
-
-static void fjes_free_irq(struct fjes_adapter *adapter)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- adapter->interrupt_watch_enable = false;
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
-
- if (adapter->irq_registered) {
- free_irq(adapter->hw.hw_res.irq, adapter);
- adapter->irq_registered = false;
- }
-}
-
-static const struct net_device_ops fjes_netdev_ops = {
- .ndo_open = fjes_open,
- .ndo_stop = fjes_close,
- .ndo_start_xmit = fjes_xmit_frame,
- .ndo_get_stats64 = fjes_get_stats64,
- .ndo_change_mtu = fjes_change_mtu,
- .ndo_tx_timeout = fjes_tx_retry,
- .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+static struct resource fjes_resource[] = {
+ DEFINE_RES_MEM(0, 1),
+ DEFINE_RES_IRQ(0)
};
-/* fjes_open - Called when a network interface is made active */
-static int fjes_open(struct net_device *netdev)
+static int fjes_acpi_add(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- int result;
-
- if (adapter->open_guard)
- return -ENXIO;
-
- result = fjes_setup_resources(adapter);
- if (result)
- goto err_setup_res;
-
- hw->txrx_stop_req_bit = 0;
- hw->epstop_req_bit = 0;
+ struct platform_device *plat_dev;
+ acpi_status status;
- napi_enable(&adapter->napi);
+ if (!is_extended_socket_device(device))
+ return -ENODEV;
- fjes_hw_capture_interrupt_status(hw);
+ if (acpi_check_extended_socket_status(device))
+ return -ENODEV;
- result = fjes_request_irq(adapter);
- if (result)
- goto err_req_irq;
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ fjes_get_acpi_resource, fjes_resource);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+ /* create platform_device */
+ plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+ ARRAY_SIZE(fjes_resource));
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
- netif_tx_start_all_queues(netdev);
- netif_carrier_on(netdev);
+ device->driver_data = plat_dev;
return 0;
-
-err_req_irq:
- fjes_free_irq(adapter);
- napi_disable(&adapter->napi);
-
-err_setup_res:
- fjes_free_resources(adapter);
- return result;
}
-/* fjes_close - Disables a network interface */
-static int fjes_close(struct net_device *netdev)
+static int fjes_acpi_remove(struct acpi_device *device)
{
- struct fjes_adapter *adapter = netdev_priv(netdev);
- struct fjes_hw *hw = &adapter->hw;
- unsigned long flags;
- int epidx;
-
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
-
- fjes_hw_raise_epstop(hw);
-
- napi_disable(&adapter->napi);
-
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- for (epidx = 0; epidx < hw->max_epid; epidx++) {
- if (epidx == hw->my_epid)
- continue;
-
- if (fjes_hw_get_partner_ep_status(hw, epidx) ==
- EP_PARTNER_SHARED)
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
- }
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- fjes_free_irq(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- adapter->unshare_watch_bitmask = 0;
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
-
- cancel_work_sync(&hw->update_zone_task);
- cancel_work_sync(&hw->epstop_task);
-
- fjes_hw_wait_epstop(hw);
+ struct platform_device *plat_dev;
- fjes_free_resources(adapter);
+ plat_dev = (struct platform_device *)acpi_driver_data(device);
+ platform_device_unregister(plat_dev);
return 0;
}
+static struct acpi_driver fjes_acpi_driver = {
+ .name = DRV_NAME,
+ .class = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = fjes_acpi_ids,
+ .ops = {
+ .add = fjes_acpi_add,
+ .remove = fjes_acpi_remove,
+ },
+};
+
static int fjes_setup_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -421,6 +244,188 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
return 0;
}
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+ adapter->unset_rx_last = true;
+ napi_schedule(&adapter->napi);
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ fallthrough;
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ break;
+ case EP_PARTNER_SHARED:
+ set_bit(src_epid, &hw->epstop_req_bit);
+
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq, &hw->epstop_task);
+ break;
+ }
+ trace_fjes_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status status;
+ unsigned long flags;
+
+ status = fjes_hw_get_partner_ep_status(hw, src_epid);
+ trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
+ switch (status) {
+ case EP_PARTNER_UNSHARE:
+ case EP_PARTNER_COMPLETE:
+ default:
+ break;
+ case EP_PARTNER_WAITING:
+ if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+ FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+ clear_bit(src_epid, &hw->txrx_stop_req_bit);
+ set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+ if (!work_pending(&adapter->unshare_watch_task))
+ queue_work(adapter->control_wq,
+ &adapter->unshare_watch_task);
+ }
+ break;
+ case EP_PARTNER_SHARED:
+ if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+ FJES_RX_STOP_REQ_REQUEST) {
+ set_bit(src_epid, &hw->epstop_req_bit);
+ if (!work_pending(&hw->epstop_task))
+ queue_work(adapter->control_wq,
+ &hw->epstop_task);
+ }
+ break;
+ }
+ trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+ int src_epid)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ if (!work_pending(&hw->update_zone_task))
+ queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+ struct fjes_adapter *adapter = data;
+ struct fjes_hw *hw = &adapter->hw;
+ irqreturn_t ret;
+ u32 icr;
+
+ icr = fjes_hw_capture_interrupt_status(hw);
+
+ if (icr & REG_IS_MASK_IS_ASSERT) {
+ if (icr & REG_ICTL_MASK_RX_DATA) {
+ fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_rx += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
+ fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_stop += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
+ fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_unshare += 1;
+ }
+
+ if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+ fjes_hw_set_irqmask(hw,
+ REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+ if (icr & REG_ICTL_MASK_INFO_UPDATE) {
+ fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+ hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
+ .recv_intr_zoneupdate += 1;
+ }
+
+ ret = IRQ_HANDLED;
+ } else {
+ ret = IRQ_NONE;
+ }
+
+ return ret;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int result = -1;
+
+ adapter->interrupt_watch_enable = true;
+ if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+
+ if (!adapter->irq_registered) {
+ result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+ IRQF_SHARED, netdev->name, adapter);
+ if (result)
+ adapter->irq_registered = false;
+ else
+ adapter->irq_registered = true;
+ }
+
+ return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+ struct fjes_hw *hw = &adapter->hw;
+
+ adapter->interrupt_watch_enable = false;
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+ if (adapter->irq_registered) {
+ free_irq(adapter->hw.hw_res.irq, adapter);
+ adapter->irq_registered = false;
+ }
+}
+
static void fjes_free_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -477,121 +482,91 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
}
}
-static void fjes_tx_stall_task(struct work_struct *work)
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, tx_stall_task);
- struct net_device *netdev = adapter->netdev;
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- int all_queue_available, sendable;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
- union ep_buffer_info *info;
- int i;
-
- if (((long)jiffies -
- dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
- netif_wake_queue(netdev);
- return;
- }
-
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ int result;
- for (i = 0; i < 5; i++) {
- all_queue_available = 1;
+ if (adapter->open_guard)
+ return -ENXIO;
- for (epid = 0; epid < max_epid; epid++) {
- if (my_epid == epid)
- continue;
+ result = fjes_setup_resources(adapter);
+ if (result)
+ goto err_setup_res;
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- sendable = (pstatus == EP_PARTNER_SHARED);
- if (!sendable)
- continue;
+ hw->txrx_stop_req_bit = 0;
+ hw->epstop_req_bit = 0;
- info = adapter->hw.ep_shm_info[epid].tx.info;
+ napi_enable(&adapter->napi);
- if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
- return;
+ fjes_hw_capture_interrupt_status(hw);
- if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
- info->v1i.count_max)) {
- all_queue_available = 0;
- break;
- }
- }
+ result = fjes_request_irq(adapter);
+ if (result)
+ goto err_req_irq;
- if (all_queue_available) {
- netif_wake_queue(netdev);
- return;
- }
- }
+ fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
- usleep_range(50, 100);
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
- queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
-}
+ return 0;
-static void fjes_force_close_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, force_close_task);
- struct net_device *netdev = adapter->netdev;
+err_req_irq:
+ fjes_free_irq(adapter);
+ napi_disable(&adapter->napi);
- rtnl_lock();
- dev_close(netdev);
- rtnl_unlock();
+err_setup_res:
+ fjes_free_resources(adapter);
+ return result;
}
-static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
{
- struct fjes_adapter *adapter = container_of(work,
- struct fjes_adapter, raise_intr_rxdata_task);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status pstatus;
- int max_epid, my_epid, epid;
+ unsigned long flags;
+ int epidx;
- my_epid = hw->my_epid;
- max_epid = hw->max_epid;
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
- for (epid = 0; epid < max_epid; epid++)
- hw->ep_shm_info[epid].tx_status_work = 0;
+ fjes_hw_raise_epstop(hw);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ napi_disable(&adapter->napi);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if (pstatus == EP_PARTNER_SHARED) {
- hw->ep_shm_info[epid].tx_status_work =
- hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
- if (hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) {
- hw->ep_shm_info[epid].tx.info->v1i.tx_status =
- FJES_TX_DELAY_SEND_NONE;
- }
- }
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- for (epid = 0; epid < max_epid; epid++) {
- if (epid == my_epid)
- continue;
+ fjes_free_irq(adapter);
- pstatus = fjes_hw_get_partner_ep_status(hw, epid);
- if ((hw->ep_shm_info[epid].tx_status_work ==
- FJES_TX_DELAY_SEND_PENDING) &&
- (pstatus == EP_PARTNER_SHARED) &&
- !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
- FJES_RX_POLL_WORK)) {
- fjes_hw_raise_interrupt(hw, epid,
- REG_ICTL_MASK_RX_DATA);
- hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
- }
- }
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ adapter->unshare_watch_bitmask = 0;
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
- usleep_range(500, 1000);
+ cancel_work_sync(&hw->update_zone_task);
+ cancel_work_sync(&hw->epstop_task);
+
+ fjes_hw_wait_epstop(hw);
+
+ fjes_free_resources(adapter);
+
+ return 0;
}
static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
@@ -787,13 +762,6 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return ret;
}
-static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
-{
- struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
-
- netif_tx_wake_queue(queue);
-}
-
static void
fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
@@ -871,6 +839,13 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
+{
+ struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+ netif_tx_wake_queue(queue);
+}
+
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
@@ -907,137 +882,29 @@ static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
return 0;
}
-static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- break;
- case EP_PARTNER_WAITING:
- if (src_epid < hw->my_epid) {
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
-
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
-
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- }
- break;
- case EP_PARTNER_SHARED:
- if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
- FJES_RX_STOP_REQ_REQUEST) {
- set_bit(src_epid, &hw->epstop_req_bit);
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq,
- &hw->epstop_task);
- }
- break;
- }
- trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
- enum ep_partner_status status;
- unsigned long flags;
-
- set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
-
- status = fjes_hw_get_partner_ep_status(hw, src_epid);
- trace_fjes_stop_req_irq_pre(hw, src_epid, status);
- switch (status) {
- case EP_PARTNER_WAITING:
- spin_lock_irqsave(&hw->rx_status_lock, flags);
- hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
- FJES_RX_STOP_REQ_DONE;
- spin_unlock_irqrestore(&hw->rx_status_lock, flags);
- clear_bit(src_epid, &hw->txrx_stop_req_bit);
- fallthrough;
- case EP_PARTNER_UNSHARE:
- case EP_PARTNER_COMPLETE:
- default:
- set_bit(src_epid, &adapter->unshare_watch_bitmask);
- if (!work_pending(&adapter->unshare_watch_task))
- queue_work(adapter->control_wq,
- &adapter->unshare_watch_task);
- break;
- case EP_PARTNER_SHARED:
- set_bit(src_epid, &hw->epstop_req_bit);
-
- if (!work_pending(&hw->epstop_task))
- queue_work(adapter->control_wq, &hw->epstop_task);
- break;
- }
- trace_fjes_stop_req_irq_post(hw, src_epid);
-}
-
-static void fjes_update_zone_irq(struct fjes_adapter *adapter,
- int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- if (!work_pending(&hw->update_zone_task))
- queue_work(adapter->control_wq, &hw->update_zone_task);
-}
+static const struct net_device_ops fjes_netdev_ops = {
+ .ndo_open = fjes_open,
+ .ndo_stop = fjes_close,
+ .ndo_start_xmit = fjes_xmit_frame,
+ .ndo_get_stats64 = fjes_get_stats64,
+ .ndo_change_mtu = fjes_change_mtu,
+ .ndo_tx_timeout = fjes_tx_retry,
+ .ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
-static irqreturn_t fjes_intr(int irq, void *data)
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
{
- struct fjes_adapter *adapter = data;
- struct fjes_hw *hw = &adapter->hw;
- irqreturn_t ret;
- u32 icr;
-
- icr = fjes_hw_capture_interrupt_status(hw);
-
- if (icr & REG_IS_MASK_IS_ASSERT) {
- if (icr & REG_ICTL_MASK_RX_DATA) {
- fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_rx += 1;
- }
-
- if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
- fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_stop += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
- fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_unshare += 1;
- }
-
- if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
- fjes_hw_set_irqmask(hw,
- REG_ICTL_MASK_TXRX_STOP_DONE, true);
-
- if (icr & REG_ICTL_MASK_INFO_UPDATE) {
- fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
- hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
- .recv_intr_zoneupdate += 1;
- }
-
- ret = IRQ_HANDLED;
- } else {
- ret = IRQ_NONE;
- }
+ ether_setup(netdev);
- return ret;
+ netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+ netdev->netdev_ops = &fjes_netdev_ops;
+ fjes_set_ethtool_ops(netdev);
+ netdev->mtu = fjes_support_mtu[3];
+ netdev->min_mtu = fjes_support_mtu[0];
+ netdev->max_mtu = fjes_support_mtu[3];
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
@@ -1087,16 +954,6 @@ static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
}
-static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
-{
- struct fjes_hw *hw = &adapter->hw;
-
- fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
-
- adapter->unset_rx_last = true;
- napi_schedule(&adapter->napi);
-}
-
static int fjes_poll(struct napi_struct *napi, int budget)
{
struct fjes_adapter *adapter =
@@ -1196,182 +1053,130 @@ static int fjes_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* fjes_probe - Device Initialization Routine */
-static int fjes_probe(struct platform_device *plat_dev)
+static int fjes_sw_init(struct fjes_adapter *adapter)
{
- struct fjes_adapter *adapter;
- struct net_device *netdev;
- struct resource *res;
- struct fjes_hw *hw;
- u8 addr[ETH_ALEN];
- int err;
-
- err = -ENOMEM;
- netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
- NET_NAME_UNKNOWN, fjes_netdev_setup,
- FJES_MAX_QUEUES);
-
- if (!netdev)
- goto err_out;
+ struct net_device *netdev = adapter->netdev;
- SET_NETDEV_DEV(netdev, &plat_dev->dev);
+ netif_napi_add(netdev, &adapter->napi, fjes_poll);
- dev_set_drvdata(&plat_dev->dev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->plat_dev = plat_dev;
- hw = &adapter->hw;
- hw->back = adapter;
+ return 0;
+}
- /* setup the private structure */
- err = fjes_sw_init(adapter);
- if (err)
- goto err_free_netdev;
+static void fjes_force_close_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, force_close_task);
+ struct net_device *netdev = adapter->netdev;
- INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
- adapter->force_reset = false;
- adapter->open_guard = false;
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+}
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->txrx_wq)) {
- err = -ENOMEM;
- goto err_free_netdev;
- }
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, tx_stall_task);
+ struct net_device *netdev = adapter->netdev;
+ struct fjes_hw *hw = &adapter->hw;
+ int all_queue_available, sendable;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
+ union ep_buffer_info *info;
+ int i;
- adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
- if (unlikely(!adapter->control_wq)) {
- err = -ENOMEM;
- goto err_free_txrx_wq;
+ if (((long)jiffies -
+ dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
+ netif_wake_queue(netdev);
+ return;
}
- INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
- INIT_WORK(&adapter->raise_intr_rxdata_task,
- fjes_raise_intr_rxdata_task);
- INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
- adapter->unshare_watch_bitmask = 0;
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
- INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
- adapter->interrupt_watch_enable = false;
+ for (i = 0; i < 5; i++) {
+ all_queue_available = 1;
- res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -EINVAL;
- goto err_free_control_wq;
- }
- hw->hw_res.start = res->start;
- hw->hw_res.size = resource_size(res);
- hw->hw_res.irq = platform_get_irq(plat_dev, 0);
- if (hw->hw_res.irq < 0) {
- err = hw->hw_res.irq;
- goto err_free_control_wq;
- }
+ for (epid = 0; epid < max_epid; epid++) {
+ if (my_epid == epid)
+ continue;
- err = fjes_hw_init(&adapter->hw);
- if (err)
- goto err_free_control_wq;
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ sendable = (pstatus == EP_PARTNER_SHARED);
+ if (!sendable)
+ continue;
- /* setup MAC address (02:00:00:00:00:[epid])*/
- addr[0] = 2;
- addr[1] = 0;
- addr[2] = 0;
- addr[3] = 0;
- addr[4] = 0;
- addr[5] = hw->my_epid; /* EPID */
- eth_hw_addr_set(netdev, addr);
+ info = adapter->hw.ep_shm_info[epid].tx.info;
- err = register_netdev(netdev);
- if (err)
- goto err_hw_exit;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return;
- netif_carrier_off(netdev);
+ if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+ info->v1i.count_max)) {
+ all_queue_available = 0;
+ break;
+ }
+ }
- fjes_dbg_adapter_init(adapter);
+ if (all_queue_available) {
+ netif_wake_queue(netdev);
+ return;
+ }
+ }
- return 0;
+ usleep_range(50, 100);
-err_hw_exit:
- fjes_hw_exit(&adapter->hw);
-err_free_control_wq:
- destroy_workqueue(adapter->control_wq);
-err_free_txrx_wq:
- destroy_workqueue(adapter->txrx_wq);
-err_free_netdev:
- free_netdev(netdev);
-err_out:
- return err;
+ queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
}
-/* fjes_remove - Device Removal Routine */
-static int fjes_remove(struct platform_device *plat_dev)
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
{
- struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
- struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_adapter *adapter = container_of(work,
+ struct fjes_adapter, raise_intr_rxdata_task);
struct fjes_hw *hw = &adapter->hw;
+ enum ep_partner_status pstatus;
+ int max_epid, my_epid, epid;
- fjes_dbg_adapter_exit(adapter);
-
- cancel_delayed_work_sync(&adapter->interrupt_watch_task);
- cancel_work_sync(&adapter->unshare_watch_task);
- cancel_work_sync(&adapter->raise_intr_rxdata_task);
- cancel_work_sync(&adapter->tx_stall_task);
- if (adapter->control_wq)
- destroy_workqueue(adapter->control_wq);
- if (adapter->txrx_wq)
- destroy_workqueue(adapter->txrx_wq);
-
- unregister_netdev(netdev);
-
- fjes_hw_exit(hw);
-
- netif_napi_del(&adapter->napi);
-
- free_netdev(netdev);
-
- return 0;
-}
-
-static int fjes_sw_init(struct fjes_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
-
- netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
-
- return 0;
-}
+ my_epid = hw->my_epid;
+ max_epid = hw->max_epid;
-/* fjes_netdev_setup - netdevice initialization routine */
-static void fjes_netdev_setup(struct net_device *netdev)
-{
- ether_setup(netdev);
+ for (epid = 0; epid < max_epid; epid++)
+ hw->ep_shm_info[epid].tx_status_work = 0;
- netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
- netdev->netdev_ops = &fjes_netdev_ops;
- fjes_set_ethtool_ops(netdev);
- netdev->mtu = fjes_support_mtu[3];
- netdev->min_mtu = fjes_support_mtu[0];
- netdev->max_mtu = fjes_support_mtu[3];
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-}
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
-static void fjes_irq_watch_task(struct work_struct *work)
-{
- struct fjes_adapter *adapter = container_of(to_delayed_work(work),
- struct fjes_adapter, interrupt_watch_task);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if (pstatus == EP_PARTNER_SHARED) {
+ hw->ep_shm_info[epid].tx_status_work =
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status;
- local_irq_disable();
- fjes_intr(adapter->hw.hw_res.irq, adapter);
- local_irq_enable();
+ if (hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) {
+ hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+ FJES_TX_DELAY_SEND_NONE;
+ }
+ }
+ }
- if (fjes_rxframe_search_exist(adapter, 0) >= 0)
- napi_schedule(&adapter->napi);
+ for (epid = 0; epid < max_epid; epid++) {
+ if (epid == my_epid)
+ continue;
- if (adapter->interrupt_watch_enable) {
- if (!delayed_work_pending(&adapter->interrupt_watch_task))
- queue_delayed_work(adapter->control_wq,
- &adapter->interrupt_watch_task,
- FJES_IRQ_WATCH_DELAY);
+ pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+ if ((hw->ep_shm_info[epid].tx_status_work ==
+ FJES_TX_DELAY_SEND_PENDING) &&
+ (pstatus == EP_PARTNER_SHARED) &&
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+ FJES_RX_POLL_WORK)) {
+ fjes_hw_raise_interrupt(hw, epid,
+ REG_ICTL_MASK_RX_DATA);
+ hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
+ }
}
+
+ usleep_range(500, 1000);
}
static void fjes_watch_unshare_task(struct work_struct *work)
@@ -1508,6 +1313,169 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
}
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+ struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+ struct fjes_adapter, interrupt_watch_task);
+
+ local_irq_disable();
+ fjes_intr(adapter->hw.hw_res.irq, adapter);
+ local_irq_enable();
+
+ if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+ napi_schedule(&adapter->napi);
+
+ if (adapter->interrupt_watch_enable) {
+ if (!delayed_work_pending(&adapter->interrupt_watch_task))
+ queue_delayed_work(adapter->control_wq,
+ &adapter->interrupt_watch_task,
+ FJES_IRQ_WATCH_DELAY);
+ }
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+ struct fjes_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *res;
+ struct fjes_hw *hw;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = -ENOMEM;
+ netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+ NET_NAME_UNKNOWN, fjes_netdev_setup,
+ FJES_MAX_QUEUES);
+
+ if (!netdev)
+ goto err_out;
+
+ SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+ dev_set_drvdata(&plat_dev->dev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->plat_dev = plat_dev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ /* setup the private structure */
+ err = fjes_sw_init(adapter);
+ if (err)
+ goto err_free_netdev;
+
+ INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+ adapter->force_reset = false;
+ adapter->open_guard = false;
+
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->txrx_wq)) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
+ WQ_MEM_RECLAIM, 0);
+ if (unlikely(!adapter->control_wq)) {
+ err = -ENOMEM;
+ goto err_free_txrx_wq;
+ }
+
+ INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+ INIT_WORK(&adapter->raise_intr_rxdata_task,
+ fjes_raise_intr_rxdata_task);
+ INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+ adapter->unshare_watch_bitmask = 0;
+
+ INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+ adapter->interrupt_watch_enable = false;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
+ hw->hw_res.start = res->start;
+ hw->hw_res.size = resource_size(res);
+ hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ if (hw->hw_res.irq < 0) {
+ err = hw->hw_res.irq;
+ goto err_free_control_wq;
+ }
+
+ err = fjes_hw_init(&adapter->hw);
+ if (err)
+ goto err_free_control_wq;
+
+ /* setup MAC address (02:00:00:00:00:[epid])*/
+ addr[0] = 2;
+ addr[1] = 0;
+ addr[2] = 0;
+ addr[3] = 0;
+ addr[4] = 0;
+ addr[5] = hw->my_epid; /* EPID */
+ eth_hw_addr_set(netdev, addr);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_hw_exit;
+
+ netif_carrier_off(netdev);
+
+ fjes_dbg_adapter_init(adapter);
+
+ return 0;
+
+err_hw_exit:
+ fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+ destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+ destroy_workqueue(adapter->txrx_wq);
+err_free_netdev:
+ free_netdev(netdev);
+err_out:
+ return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+ struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+ struct fjes_adapter *adapter = netdev_priv(netdev);
+ struct fjes_hw *hw = &adapter->hw;
+
+ fjes_dbg_adapter_exit(adapter);
+
+ cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+ cancel_work_sync(&adapter->unshare_watch_task);
+ cancel_work_sync(&adapter->raise_intr_rxdata_task);
+ cancel_work_sync(&adapter->tx_stall_task);
+ if (adapter->control_wq)
+ destroy_workqueue(adapter->control_wq);
+ if (adapter->txrx_wq)
+ destroy_workqueue(adapter->txrx_wq);
+
+ unregister_netdev(netdev);
+
+ fjes_hw_exit(hw);
+
+ netif_napi_del(&adapter->napi);
+
+ free_netdev(netdev);
+
+ return 0;
+}
+
+static struct platform_driver fjes_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = fjes_probe,
+ .remove = fjes_remove,
+};
+
static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
void *context, void **return_value)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 7962c37b3f14..f393e454f45c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -503,12 +503,9 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
off_gnv = skb_gro_offset(skb);
hlen = off_gnv + sizeof(*gh);
- gh = skb_gro_header_fast(skb, off_gnv);
- if (skb_gro_header_hard(skb, hlen)) {
- gh = skb_gro_header_slow(skb, hlen, off_gnv);
- if (unlikely(!gh))
- goto out;
- }
+ gh = skb_gro_header(skb, hlen, off_gnv);
+ if (unlikely(!gh))
+ goto out;
if (gh->ver != GENEVE_VER || gh->oam)
goto out;
@@ -1200,8 +1197,8 @@ static const struct net_device_ops geneve_netdev_ops = {
static void geneve_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
}
static const struct ethtool_ops geneve_ethtool_ops = {
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index a208e2b1a9af..15c7dc82107f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1859,6 +1859,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = gtp_genl_ops,
.n_small_ops = ARRAY_SIZE(gtp_genl_ops),
+ .resv_start_op = GTP_CMD_ECHOREQ + 1,
.mcgrps = gtp_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8297411e87ea..a6184d6c7b15 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -600,7 +600,7 @@ static int hdlcdrv_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
- strlcpy(bi.data.drivername, s->ops->drvname,
+ strscpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 74e845fa2e07..aa8f828a0ae7 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
pci_release_regions(pdev);
+ pci_disable_device(pdev);
out2:
free_netdev(dev);
out3:
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6e42cb03e226..f066de0da492 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1779,8 +1779,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
}
/* Enable NAPI handler before init callbacks */
- netif_napi_add(ndev, &net_device->chan_table[0].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 15ebd5426604..5f08482065ca 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -935,8 +935,8 @@ int netvsc_recv_callback(struct net_device *net,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 6da36cb8af80..11f767a20444 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1575,7 +1575,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
for (i = 1; i < net_device->num_chn; i++)
netif_napi_add(net, &net_device->chan_table[i].napi,
- netvsc_poll, NAPI_POLL_WEIGHT);
+ netvsc_poll);
return net_device;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 6afdf1622944..5cf218c674a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1310,10 +1310,11 @@ static void adf7242_remove(struct spi_device *spi)
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 42c0b451088d..450b16ad40a4 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2293,7 +2293,7 @@ static int ca8210_set_csma_params(
* @retries: Number of retries
*
* Sets the number of times to retry a transmission if no acknowledgment was
- * was received from the other end when one was requested.
+ * received from the other end when one was requested.
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 1e1f40f628a0..c69b87d3837d 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 38c217bd7c82..2f0544dd7c2a 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -630,6 +630,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_nl_ops,
.n_small_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .resv_start_op = MAC802154_HWSIM_CMD_NEW_EDGE + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 8b2220eb6b92..48255fc4b25c 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -13,4 +13,6 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
+ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
+
ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 1c1895aea811..e0d71f609272 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -526,7 +526,7 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 58b708d2fc75..383ef1890065 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -407,11 +407,11 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_TX_NOT_USING_BRESP_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9e307eebd33f..bea2da1c4c51 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -56,9 +56,9 @@
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
- * Each TRE refers to a block of data--also located DRAM. After writing one
- * or more TREs to a channel, the writer (either the IPA or an EE) writes a
- * doorbell register to inform the receiving side how many elements have
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
@@ -710,43 +710,32 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- const struct list_head *list;
+ u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* There is a small chance a TX transaction got allocated just
- * before we disabled transmits, so check for that.
- */
- if (channel->toward_ipa) {
- list = &trans_info->alloc;
- if (!list_empty(list))
- goto done;
- list = &trans_info->committed;
- if (!list_empty(list))
- goto done;
- list = &trans_info->pending;
- if (!list_empty(list))
- goto done;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
}
- /* Otherwise (TX or RX) we want to wait for anything that
- * has completed, or has been polled but not released yet.
- */
- list = &trans_info->complete;
- if (!list_empty(list))
- goto done;
- list = &trans_info->polled;
- if (list_empty(list))
- list = NULL;
-done:
- trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
-
/* Caller will wait for this, so take a reference */
- if (trans)
- refcount_inc(&trans->refcount);
-
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
return trans;
}
@@ -1358,8 +1347,8 @@ gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
* we update transactions to record their actual received lengths.
*
* When an event for a TX channel arrives we use information in the
- * transaction to report the number of requests and bytes have been
- * transferred.
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
*
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
@@ -1485,8 +1474,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
}
-/* Consult hardware, move any newly completed transactions to completed list */
-static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1505,12 +1494,12 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return NULL;
+ return;
/* Get the transaction for the latest completed event. */
trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
if (!trans)
- return NULL;
+ return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
@@ -1518,8 +1507,6 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
* up the network stack.
*/
gsi_evt_ring_update(gsi, evt_ring_id, index);
-
- return gsi_channel_trans_complete(channel);
}
/**
@@ -1528,21 +1515,18 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
*
* Return: Transaction pointer, or null if none are available
*
- * This function returns the first entry on a channel's completed transaction
- * list. If that list is empty, the hardware is consulted to determine
- * whether any new transactions have completed. If so, they're moved to the
- * completed list and the new first entry is returned. If there are no more
- * completed transactions, a null pointer is returned.
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
- /* Get the first transaction from the completed list */
+ /* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
- if (!trans) /* List is empty; see if there's more to do */
- trans = gsi_channel_update(channel);
-
if (trans)
gsi_trans_move_polled(trans);
@@ -1623,7 +1607,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ gsi_channel_poll);
return 0;
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 23de5f67374c..49dcadba4e0b 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -31,14 +31,6 @@ struct gsi_trans;
struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
-/* Execution environment IDs */
-enum gsi_ee_id {
- GSI_EE_AP = 0x0,
- GSI_EE_MODEM = 0x1,
- GSI_EE_UC = 0x2,
- GSI_EE_TZ = 0x3,
-};
-
struct gsi_ring {
void *virt; /* ring array base address */
dma_addr_t addr; /* primarily low 32 bits used */
@@ -82,18 +74,18 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
- struct gsi_trans_pool pool; /* transaction pool */
+
+ u16 free_id; /* first free trans in array */
+ u16 allocated_id; /* first allocated transaction */
+ u16 committed_id; /* first committed transaction */
+ u16 pending_id; /* first pending transaction */
+ u16 completed_id; /* first completed transaction */
+ u16 polled_id; /* first polled transaction */
+ struct gsi_trans *trans; /* transaction array */
struct gsi_trans **map; /* TRE -> transaction map */
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
-
- spinlock_t spinlock; /* protects updates to the lists */
- struct list_head alloc; /* allocated, not committed */
- struct list_head committed; /* committed, awaiting doorbell */
- struct list_head pending; /* pending, awaiting completion */
- struct list_head complete; /* completed, awaiting poll */
- struct list_head polled; /* returned by gsi_channel_poll_one() */
};
/* Hardware values signifying the state of a channel */
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index 0b2516fa21b5..c65f7c5cdc8d 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -18,13 +18,13 @@ struct gsi_channel;
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
- * @trans: Transaction to commit
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_complete(struct gsi_trans *trans);
/**
* gsi_trans_move_polled() - Mark a transaction polled
- * @trans: Transaction to update
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_polled(struct gsi_trans *trans);
@@ -94,6 +94,14 @@ void gsi_channel_trans_exit(struct gsi_channel *channel);
*/
void gsi_channel_doorbell(struct gsi_channel *channel);
+/* gsi_channel_update() - Update knowledge of channel hardware state
+ * @channel: Channel to be updated
+ *
+ * Consult hardware, change the state of any newly-completed transactions
+ * on a channel.
+ */
+void gsi_channel_update(struct gsi_channel *channel);
+
/**
* gsi_ring_virt() - Return virtual address for a ring entry
* @ring: Ring whose address is to be translated
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 5bd8b31656d3..3763359f208f 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -55,14 +55,10 @@
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c020 + 0x1000 * (ee))
+ (0x0000c020 + 0x1000 * GSI_EE_AP)
#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c024 + 0x1000 * (ee))
+ (0x0000c024 + 0x1000 * GSI_EE_AP)
/* All other register offsets are relative to gsi->virt */
@@ -81,9 +77,7 @@ enum gsi_channel_type {
};
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
- (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
@@ -112,9 +106,7 @@ chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
}
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
- (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
static inline u32 r_length_encoded(enum ipa_version version, u32 length)
@@ -125,19 +117,13 @@ static inline u32 r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
- (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
- (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_QOS_OFFSET(ch) \
- GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
- (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
@@ -158,29 +144,19 @@ enum gsi_prefetch_mode {
};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
- (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
- (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
- (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
- (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
- (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4)
@@ -190,9 +166,7 @@ enum gsi_prefetch_mode {
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
- (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
{
@@ -202,83 +176,53 @@ static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
- (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
- (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
- (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
- (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define MODT_FMASK GENMASK(15, 0)
#define MODC_FMASK GENMASK(23, 16)
#define MOD_CNT_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
- (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
- (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
- (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
- (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
- (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
- (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
- (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
- (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+ (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
- (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+ (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
#define GSI_GSI_STATUS_OFFSET \
- GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
- (0x0001f000 + 0x4000 * (ee))
+ (0x0001f000 + 0x4000 * GSI_EE_AP)
#define ENABLED_FMASK GENMASK(0, 0)
#define GSI_CH_CMD_OFFSET \
- GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CH_CMD_OFFSET(ee) \
- (0x0001f008 + 0x4000 * (ee))
+ (0x0001f008 + 0x4000 * GSI_EE_AP)
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
@@ -293,9 +237,7 @@ enum gsi_ch_cmd_opcode {
};
#define GSI_EV_CH_CMD_OFFSET \
- GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
- (0x0001f010 + 0x4000 * (ee))
+ (0x0001f010 + 0x4000 * GSI_EE_AP)
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
@@ -307,9 +249,7 @@ enum gsi_evt_cmd_opcode {
};
#define GSI_GENERIC_CMD_OFFSET \
- GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
- (0x0001f018 + 0x4000 * (ee))
+ (0x0001f018 + 0x4000 * GSI_EE_AP)
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
@@ -326,9 +266,7 @@ enum gsi_generic_cmd_opcode {
/* The next register is present for IPA v3.5.1 and above */
#define GSI_GSI_HW_PARAM_2_OFFSET \
- GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
- (0x0001f040 + 0x4000 * (ee))
+ (0x0001f040 + 0x4000 * GSI_EE_AP)
#define IRAM_SIZE_FMASK GENMASK(2, 0)
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
@@ -357,13 +295,9 @@ enum gsi_iram_size {
/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
- (0x0001f080 + 0x4000 * (ee))
+ (0x0001f080 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
+ (0x0001f088 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
enum gsi_irq_type_id {
@@ -377,62 +311,38 @@ enum gsi_irq_type_id {
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
- (0x0001f090 + 0x4000 * (ee))
+ (0x0001f090 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
- (0x0001f094 + 0x4000 * (ee))
+ (0x0001f094 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f098 + 0x4000 * (ee))
+ (0x0001f098 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f09c + 0x4000 * (ee))
+ (0x0001f09c + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a0 + 0x4000 * (ee))
+ (0x0001f0a0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a4 + 0x4000 * (ee))
+ (0x0001f0a4 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
- (0x0001f0b0 + 0x4000 * (ee))
+ (0x0001f0b0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
- (0x0001f0b8 + 0x4000 * (ee))
+ (0x0001f0b8 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f0c0 + 0x4000 * (ee))
+ (0x0001f0c0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
- (0x0001f100 + 0x4000 * (ee))
+ (0x0001f100 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
- (0x0001f108 + 0x4000 * (ee))
+ (0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f110 + 0x4000 * (ee))
+ (0x0001f110 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the GLOB_IRQ_* registers */
enum gsi_global_irq_id {
ERROR_INT = 0x0,
@@ -442,17 +352,11 @@ enum gsi_global_irq_id {
};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
- (0x0001f118 + 0x4000 * (ee))
+ (0x0001f118 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
- (0x0001f120 + 0x4000 * (ee))
+ (0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
- (0x0001f128 + 0x4000 * (ee))
+ (0x0001f128 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the (general) GSI_IRQ_* registers */
enum gsi_general_id {
BREAK_POINT = 0x0,
@@ -462,15 +366,11 @@ enum gsi_general_id {
};
#define GSI_CNTXT_INTSET_OFFSET \
- GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
- (0x0001f180 + 0x4000 * (ee))
+ (0x0001f180 + 0x4000 * GSI_EE_AP)
#define INTYPE_FMASK GENMASK(0, 0)
#define GSI_ERROR_LOG_OFFSET \
- GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
- (0x0001f200 + 0x4000 * (ee))
+ (0x0001f200 + 0x4000 * GSI_EE_AP)
/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
@@ -501,14 +401,10 @@ enum gsi_err_type {
};
#define GSI_ERROR_LOG_CLR_OFFSET \
- GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
- (0x0001f210 + 0x4000 * (ee))
+ (0x0001f210 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SCRATCH_0_OFFSET \
- GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
- (0x0001f400 + 0x4000 * (ee))
+ (0x0001f400 + 0x4000 * GSI_EE_AP)
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 18e7e8c405be..26b7f683a3e1 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -22,37 +22,36 @@
* DOC: GSI Transactions
*
* A GSI transaction abstracts the behavior of a GSI channel by representing
- * everything about a related group of IPA commands in a single structure.
- * (A "command" in this sense is either a data transfer or an IPA immediate
+ * everything about a related group of IPA operations in a single structure.
+ * (A "operation" in this sense is either a data transfer or an IPA immediate
* command.) Most details of interaction with the GSI hardware are managed
- * by the GSI transaction core, allowing users to simply describe commands
+ * by the GSI transaction core, allowing users to simply describe operations
* to be performed. When a transaction has completed a callback function
* (dependent on the type of endpoint associated with the channel) allows
* cleanup of resources associated with the transaction.
*
- * To perform a command (or set of them), a user of the GSI transaction
+ * To perform an operation (or set of them), a user of the GSI transaction
* interface allocates a transaction, indicating the number of TREs required
- * (one per command). If sufficient TREs are available, they are reserved
+ * (one per operation). If sufficient TREs are available, they are reserved
* for use in the transaction and the allocation succeeds. This way
- * exhaustion of the available TREs in a channel ring is detected
- * as early as possible. All resources required to complete a transaction
- * are allocated at transaction allocation time.
+ * exhaustion of the available TREs in a channel ring is detected as early
+ * as possible. Any other resources that might be needed to complete a
+ * transaction are also allocated when the transaction is allocated.
*
- * Commands performed as part of a transaction are represented in an array
- * of Linux scatterlist structures. This array is allocated with the
- * transaction, and its entries are initialized using standard scatterlist
- * functions (such as sg_set_buf() or skb_to_sgvec()).
+ * Operations performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures, allocated with the transaction. These
+ * scatterlist structures are initialized by "adding" operations to the
+ * transaction. If a buffer in an operation must be mapped for DMA, this is
+ * done at the time it is added to the transaction. It is possible for a
+ * mapping error to occur when an operation is added. In this case the
+ * transaction should simply be freed; this correctly releases resources
+ * associated with the transaction.
*
- * Once a transaction's scatterlist structures have been initialized, the
- * transaction is committed. The caller is responsible for mapping buffers
- * for DMA if necessary, and this should be done *before* allocating
- * the transaction. Between a successful allocation and commit of a
- * transaction no errors should occur.
- *
- * Committing transfers ownership of the entire transaction to the GSI
- * transaction core. The GSI transaction code formats the content of
- * the scatterlist array into the channel ring buffer and informs the
- * hardware that new TREs are available to process.
+ * Once all operations have been successfully added to a transaction, the
+ * transaction is committed. Committing transfers ownership of the entire
+ * transaction to the GSI transaction core. The GSI transaction code
+ * formats the content of the scatterlist array into the channel ring
+ * buffer and informs the hardware that new TREs are available to process.
*
* The last TRE in each transaction is marked to interrupt the AP when the
* GSI hardware has completed it. Because transfers described by TREs are
@@ -125,11 +124,10 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
memset(pool, 0, sizeof(*pool));
}
-/* Allocate the requested number of (zeroed) entries from the pool */
-/* Home-grown DMA pool. This way we can preallocate and use the tre_count
- * to guarantee allocations will succeed. Even though we specify max_alloc
- * (and it can be more than one), we only allow allocation of a single
- * element from a DMA pool.
+/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
+ * allocations will succeed. The immediate commands in a transaction can
+ * require up to max_alloc elements from the pool. But we only allow
+ * allocation of a single element from a DMA pool at a time.
*/
int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size_t size, u32 count, u32 max_alloc)
@@ -237,68 +235,63 @@ gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
/* Return the oldest completed transaction for a channel (or null) */
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
{
- return list_first_entry_or_null(&channel->trans_info.complete,
- struct gsi_trans, links);
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->completed_id;
+
+ if (trans_id == trans_info->pending_id) {
+ gsi_channel_update(channel);
+ if (trans_id == trans_info->pending_id)
+ return NULL;
+ }
+
+ return &trans_info->trans[trans_id %= channel->tre_count];
}
-/* Move a transaction from the allocated list to the committed list */
+/* Move a transaction from allocated to committed state */
static void gsi_trans_move_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->committed);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This allocated transaction is now committed */
+ trans_info->allocated_id++;
}
-/* Move transactions from the committed list to the pending list */
+/* Move committed transactions to pending state */
static void gsi_trans_move_pending(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
-
- spin_lock_bh(&trans_info->spinlock);
+ u16 trans_index = trans - &trans_info->trans[0];
+ u16 delta;
- /* Move this transaction and all predecessors to the pending list */
- list_cut_position(&list, &trans_info->committed, &trans->links);
- list_splice_tail(&list, &trans_info->pending);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* These committed transactions are now pending */
+ delta = trans_index - trans_info->committed_id + 1;
+ trans_info->committed_id += delta % channel->tre_count;
}
-/* Move a transaction and all of its predecessors from the pending list
- * to the completed list.
- */
+/* Move pending transactions to completed state */
void gsi_trans_move_complete(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
+ u16 trans_index = trans - trans_info->trans;
+ u16 delta;
- spin_lock_bh(&trans_info->spinlock);
-
- /* Move this transaction and all predecessors to completed list */
- list_cut_position(&list, &trans_info->pending, &trans->links);
- list_splice_tail(&list, &trans_info->complete);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* These pending transactions are now completed */
+ delta = trans_index - trans_info->pending_id + 1;
+ delta %= channel->tre_count;
+ trans_info->pending_id += delta;
}
-/* Move a transaction from the completed list to the polled list */
+/* Move a transaction from completed to polled state */
void gsi_trans_move_polled(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->polled);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This completed transaction is now polled */
+ trans_info->completed_id++;
}
/* Reserve some number of TREs on a channel. Returns true if successful */
@@ -343,20 +336,22 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
+ u16 trans_index;
if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
- /* We reserve the TREs now, but consume them at commit time.
- * If there aren't enough available, we're done.
- */
+ /* If we can't reserve the TREs for the transaction, we're done */
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the transaction */
- trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans_index = trans_info->free_id % channel->tre_count;
+ trans = &trans_info->trans[trans_index];
+ memset(trans, 0, sizeof(*trans));
+
+ /* Initialize non-zero fields in the transaction */
trans->gsi = gsi;
trans->channel_id = channel_id;
trans->rsvd_count = tre_count;
@@ -367,45 +362,37 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
sg_init_marker(trans->sgl, tre_count);
trans->direction = direction;
-
- spin_lock_bh(&trans_info->spinlock);
-
- list_add_tail(&trans->links, &trans_info->alloc);
-
- spin_unlock_bh(&trans_info->spinlock);
-
refcount_set(&trans->refcount, 1);
+ /* This free transaction is now allocated */
+ trans_info->free_id++;
+
return trans;
}
/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
- refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
- bool last;
- /* We must hold the lock to release the last reference */
- if (refcount_dec_not_one(refcount))
+ if (!refcount_dec_and_test(&trans->refcount))
return;
+ /* Unused transactions are allocated but never committed, pending,
+ * completed, or polled.
+ */
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* Reference might have been added before we got the lock */
- last = refcount_dec_and_test(refcount);
- if (last)
- list_del(&trans->links);
-
- spin_unlock_bh(&trans_info->spinlock);
-
- if (!last)
- return;
-
- if (trans->used_count)
+ if (!trans->used_count) {
+ trans_info->allocated_id++;
+ trans_info->committed_id++;
+ trans_info->pending_id++;
+ trans_info->completed_id++;
+ } else {
ipa_gsi_trans_release(trans);
+ }
+
+ /* This transaction is now free */
+ trans_info->polled_id++;
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
@@ -548,8 +535,8 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
*
* Formats channel ring TRE entries based on the content of the scatterlist.
* Maps a transaction pointer to the last ring entry used for the transaction,
- * so it can be recovered when it completes. Moves the transaction to the
- * pending list. Finally, updates the channel ring pointer and optionally
+ * so it can be recovered when it completes. Moves the transaction to
+ * pending state. Finally, updates the channel ring pointer and optionally
* rings the doorbell.
*/
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
@@ -654,23 +641,27 @@ void gsi_trans_complete(struct gsi_trans *trans)
void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct gsi_trans *trans;
- bool cancelled;
+ u16 trans_id = trans_info->pending_id;
/* channel->gsi->mutex is held by caller */
- spin_lock_bh(&trans_info->spinlock);
- cancelled = !list_empty(&trans_info->pending);
- list_for_each_entry(trans, &trans_info->pending, links)
- trans->cancelled = true;
+ /* If there are no pending transactions, we're done */
+ if (trans_id == trans_info->committed_id)
+ return;
- list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+ /* Mark all pending transactions cancelled */
+ do {
+ struct gsi_trans *trans;
+
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ trans->cancelled = true;
+ } while (++trans_id != trans_info->committed_id);
- spin_unlock_bh(&trans_info->spinlock);
+ /* All pending transactions are now completed */
+ trans_info->pending_id = trans_info->committed_id;
/* Schedule NAPI polling to complete the cancelled transactions */
- if (cancelled)
- napi_schedule(&channel->napi);
+ napi_schedule(&channel->napi);
}
/* Issue a command to read a single byte from a channel */
@@ -736,10 +727,16 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
* modulo that number to determine the next one that's free.
* Transactions are allocated one at a time.
*/
- ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
- tre_max, 1);
- if (ret)
+ trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
+ GFP_KERNEL);
+ if (!trans_info->trans)
return -ENOMEM;
+ trans_info->free_id = 0; /* all modulo channel->tre_count */
+ trans_info->allocated_id = 0;
+ trans_info->committed_id = 0;
+ trans_info->pending_id = 0;
+ trans_info->completed_id = 0;
+ trans_info->polled_id = 0;
/* A completion event contains a pointer to the TRE that caused
* the event (which will be the last one used by the transaction).
@@ -765,19 +762,13 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
if (ret)
goto err_map_free;
- spin_lock_init(&trans_info->spinlock);
- INIT_LIST_HEAD(&trans_info->alloc);
- INIT_LIST_HEAD(&trans_info->committed);
- INIT_LIST_HEAD(&trans_info->pending);
- INIT_LIST_HEAD(&trans_info->complete);
- INIT_LIST_HEAD(&trans_info->polled);
return 0;
err_map_free:
kfree(trans_info->map);
err_trans_free:
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
@@ -791,6 +782,6 @@ void gsi_channel_trans_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
gsi_trans_pool_exit(&trans_info->sg_pool);
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
kfree(trans_info->map);
}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 7084507830c2..30c1c2dc77c6 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
@@ -29,7 +29,6 @@ struct gsi_trans_pool;
* struct gsi_trans - a GSI transaction
*
* Most fields in this structure for internal use by the transaction core code:
- * @links: Links for channel transaction lists by state
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
@@ -50,8 +49,6 @@ struct gsi_trans_pool;
* received.
*/
struct gsi_trans {
- struct list_head links; /* gsi_channel lists */
-
struct gsi *gsi;
u8 channel_id;
@@ -77,7 +74,7 @@ struct gsi_trans {
/**
* gsi_trans_pool_init() - Initialize a pool of structures for transactions
- * @pool: GSI transaction poll pointer
+ * @pool: GSI transaction pool pointer
* @size: Size of elements in the pool
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 4fc3c72359f5..09ead433ec38 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
@@ -44,6 +44,7 @@ struct ipa_interrupt;
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
+ * @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
@@ -90,6 +91,7 @@ struct ipa {
dma_addr_t reg_addr;
void __iomem *reg_virt;
+ const struct ipa_regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 6dea40259b60..26c3db9f52b1 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -32,7 +32,7 @@
* immediate command's opcode. The payload for a command resides in AP
* memory and is described by a single scatterlist entry in its transaction.
* Commands do not require a transaction completion callback, and are
- * (currently) always issued using gsi_trans_commit_wait().
+ * always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
@@ -305,6 +305,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
const char *name;
u32 offset;
@@ -312,7 +313,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -325,7 +327,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 9215ddad1010..8e4243c1f0bb 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index e15eb3cd3e33..e5a6ce75c7dd 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
@@ -31,7 +31,7 @@
* communication path between the IPA and a particular execution environment
* (EE), such as the AP or Modem. Each EE has a set of channels associated
* with it, and each channel has an ID unique for that EE. For the most part
- * the only GSI channels of concern to this driver belong to the AP
+ * the only GSI channels of concern to this driver belong to the AP.
*
* An endpoint is an IPA construct representing a single channel anywhere
* in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 66d2bfdf9e42..093e11ec7c2d 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -23,8 +23,6 @@
#include "ipa_gsi.h"
#include "ipa_power.h"
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-
/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
@@ -72,14 +70,6 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{
@@ -111,6 +101,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
+ const struct ipa_reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -171,7 +162,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
*/
aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- limit = aggr_byte_limit_max(ipa->version);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+
+ limit = ipa_reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -182,6 +175,15 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true; /* Nothing more to check for RX */
}
+ /* Starting with IPA v4.5 sequencer replication is obsolete */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ if (data->endpoint.config.tx.seq_rep_type) {
+ dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+ }
+
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint;
if (other_name >= count) {
@@ -299,8 +301,10 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
- u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 field_id;
+ u32 offset;
bool state;
u32 mask;
u32 val;
@@ -310,9 +314,13 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
- mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
-
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
+
+ field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
+ mask = ipa_reg_bit(reg, field_id);
+
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
@@ -339,13 +347,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
WARN_ON(!(mask & ipa->available));
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
return !!(val & mask);
}
@@ -354,10 +362,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -456,6 +466,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
+ const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
@@ -465,7 +476,8 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -485,22 +497,23 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->config.checksum) {
- enum ipa_version version = endpoint->ipa->version;
+ enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
- u32 checksum_offset;
+ u32 off;
/* Checksum header offset is in 4-byte units */
- checksum_offset = sizeof(struct rmnet_map_header);
- checksum_offset /= sizeof(u32);
- val |= u32_encode_bits(checksum_offset,
- CS_METADATA_HDR_OFFSET_FMASK);
+ off = sizeof(struct rmnet_map_header) / sizeof(u32);
+ val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -513,24 +526,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
+ val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
- u32 offset;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
- val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
+ val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static u32
@@ -554,6 +569,50 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
return header_size;
}
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static u32 ipa_header_size_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 header_size)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(header_size > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(field_max);
+ WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
+ val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static u32 ipa_metadata_offset_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 offset)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(offset > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(field_max);
+ WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+
+ return val;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -577,36 +636,38 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
- val = ipa_header_size_encoded(version, header_size);
+ val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 offset; /* Field offset within header */
+ u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
- offset = offsetof(struct rmnet_map_header, mux_id);
- val |= ipa_metadata_offset_encoded(version, offset);
+ off = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
- offset = offsetof(struct rmnet_map_header, pkt_len);
+ off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+ off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
+ val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -614,19 +675,21 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
+ val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -636,16 +699,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
+ val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+ val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -653,191 +716,170 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 offset;
+ u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 off; /* Field offset within header */
- offset = offsetof(struct rmnet_map_header, pkt_len);
- offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
- val |= u32_encode_bits(offset,
- HDR_OFST_PKT_SIZE_MSB_FMASK);
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Low bits are in the ENDP_INIT_HDR register */
+ off >>= hweight32(mask);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + offset);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
- u32 dma_endpoint_id;
+ u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
-
- val = u32_encode_bits(IPA_DMA, MODE_FMASK);
- val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
}
-/* Encoded values for AGGR endpoint register fields */
-static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
+/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
+ * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
+ * they're configured to have granularity 100 usec and 1 msec, respectively.
+ *
+ * The return value is the positive or negative Qtime value to use to
+ * express the (microsecond) time provided. A positive return value
+ * means pulse generator 0 can be used; otherwise use pulse generator 1.
+ */
+static int ipa_qtime_val(u32 microseconds, u32 max)
{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+ u32 val;
- return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+ /* Use 100 microsecond granularity if possible */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val <= max)
+ return (int)val;
+
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ WARN_ON(val > max);
+
+ return (int)-val;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
- u32 gran_sel;
- u32 fmask;
+ u32 max;
u32 val;
- if (version < IPA_VERSION_4_5) {
- /* We set aggregation granularity in ipa_hardware_config() */
- fmask = aggr_time_limit_fmask(true);
- val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
- WARN(val > field_max(fmask),
- "aggr_time_limit too large (%u > %u usec)\n",
- val, field_max(fmask) * IPA_AGGR_GRANULARITY);
-
- return u32_encode_bits(val, fmask);
- }
-
- /* IPA v4.5 expresses the time limit using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- fmask = aggr_time_limit_fmask(false);
- val = DIV_ROUND_CLOSEST(limit, 100);
- if (val > field_max(fmask)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = AGGR_GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(limit, 1000);
- WARN(val > field_max(fmask),
- "aggr_time_limit too large (%u > %u usec)\n",
- limit, field_max(fmask) * 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
+ if (!microseconds)
+ return 0; /* Nothing to compute if time limit is 0 */
- return gran_sel | u32_encode_bits(val, fmask);
-}
+ max = ipa_reg_field_max(reg, TIME_LIMIT);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
-static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
-{
- u32 val = enabled ? 1 : 0;
+ return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ }
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
+ /* We program aggregation granularity in ipa_hardware_config() */
+ val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ microseconds, max * IPA_AGGR_GRANULARITY);
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+ return ipa_reg_encode(reg, TIME_LIMIT, val);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
- enum ipa_version version = endpoint->ipa->version;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
- bool close_eof;
u32 limit;
rx_config = &endpoint->config.rx;
- val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= aggr_byte_limit_encoded(version, limit);
+ val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
- val |= aggr_time_limit_encoded(version, limit);
+ val |= aggr_time_limit_encode(ipa, reg, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = rx_config->aggr_close_eof;
- val |= aggr_sw_eof_active_encoded(version, close_eof);
+ if (rx_config->aggr_close_eof)
+ val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
- AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
-}
-
-/* Return the Qtime-based head-of-line blocking timer value that
- * represents the given number of microseconds. The result
- * includes both the timer value and the selected timer granularity.
- */
-static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
-{
- u32 gran_sel;
- u32 val;
-
- /* IPA v4.5 expresses time limits using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val > field_max(TIME_LIMIT_FMASK)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
-
- return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -845,12 +887,11 @@ static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
* derived from the 19.2 MHz SoC XO clock. For older IPA versions
* each tick represents 128 cycles of the IPA core clock.
*
- * Return the encoded value that should be written to that register
- * that represents the timeout period provided. For IPA v4.2 this
- * encodes a base and scale value, while for earlier versions the
- * value is a simple tick count.
+ * Return the encoded value representing the timeout period provided
+ * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
u32 width;
u32 scale;
@@ -862,18 +903,34 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
- if (ipa->version >= IPA_VERSION_4_5)
- return hol_block_timer_qtime_val(ipa, microseconds);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
- /* Use 64 bit arithmetic to avoid overflow... */
+ return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ }
+
+ /* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
- /* ...but we still need to fit into a 32-bit register */
- WARN_ON(ticks > U32_MAX);
+
+ /* We still need the result to fit into the field */
+ WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return (u32)ticks;
+ return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -883,8 +940,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
* count, and extract the number of bits in the base field
* such that high bit is included.
*/
- high = fls(ticks); /* 1..32 */
- width = HWEIGHT32(BASE_VALUE_FMASK);
+ high = fls(ticks); /* 1..32 (or warning above) */
+ width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -894,8 +951,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
scale++;
}
- val = u32_encode_bits(scale, SCALE_FMASK);
- val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
+ val = ipa_reg_encode(reg, TIMER_SCALE, scale);
+ val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -906,28 +963,34 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = hol_block_timer_val(ipa, microseconds);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
+ val = hol_block_timer_encode(ipa, reg, microseconds);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- val = enable ? HOL_BLOCK_EN_FMASK : 0;
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
+ val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+
+ iowrite32(val, ipa->reg_virt + offset);
+
/* When enabling, the register must be written twice for IPA v4.5+ */
- if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */
@@ -960,46 +1023,58 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ u32 resource_group = endpoint->config.resource_group;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
+ val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
- u32 val = 0;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
+
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
+ val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
- /* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
- SEQ_REP_TYPE_FMASK);
+ /* Second byte (if supported) configures replicated packet processing */
+ if (ipa->version < IPA_VERSION_4_5)
+ val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/**
@@ -1049,13 +1124,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
-
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= STATUS_EN_FMASK;
+ val |= ipa_reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1063,16 +1137,16 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= u32_encode_bits(status_endpoint_id,
- STATUS_ENDP_FMASK);
+ val |= ipa_reg_encode(reg, STATUS_ENDP,
+ status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5)
+ * packet (not present for IPA v4.5+)
*/
- /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1412,16 +1486,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_HDR_TABLE_FMASK;
- val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
- val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+ val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ /* ROUTE_DEF_HDR_OFST is 0 */
+ val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1765,6 +1841,7 @@ void ipa_endpoint_teardown(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
u32 initialized;
u32 rx_base;
u32 rx_mask;
@@ -1791,11 +1868,12 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
+ rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1804,7 +1882,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
+ max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 28e0a7386fd7..d8dfa24f5214 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 307bed2ee707..c269432f9c2e 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -53,13 +53,15 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
@@ -80,6 +82,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ const struct ipa_reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -95,7 +98,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_irq_stts_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = ipa_reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -112,7 +116,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
@@ -128,6 +133,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
@@ -137,7 +143,8 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
@@ -164,18 +171,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- offset = ipa_reg_irq_suspend_info_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
@@ -189,7 +196,7 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
@@ -198,8 +205,9 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Remove the handler for an IPA interrupt type */
@@ -207,15 +215,16 @@ void
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
interrupt->handler[ipa_irq] = NULL;
}
@@ -225,8 +234,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
+ const struct ipa_reg *reg;
unsigned int irq;
- u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
@@ -244,8 +253,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 231390cea52a..f31fd9965fdc 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 32962d885acd..3461ad3029ab 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -183,31 +183,97 @@ static void ipa_teardown(struct ipa *ipa)
gsi_teardown(&ipa->gsi);
}
+static void
+ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct ipa_reg *reg;
+ u32 val;
+
+ /* IPA v4.5+ has no backward compatibility register */
+ if (ipa->version >= IPA_VERSION_4_5)
+ return;
+
+ reg = ipa_reg(ipa, IPA_BCR);
+ val = data->backward_compat;
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_tx(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
+ return;
+
+ /* Disable PA mask to allow HOLB drop */
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = ipa_reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_hardware_config_clkon(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 val;
+
+ if (version >= IPA_VERSION_4_5)
+ return;
+
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
+
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1) {
+ /* Disable MISC clock gating */
+ val = ipa_reg_bit(reg, CLKON_MISC);
+ } else { /* IPA v4.0+ */
+ /* Enable open global clocks in the CLKON configuration */
+ val = ipa_reg_bit(reg, CLKON_GLOBAL);
+ val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ }
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
- val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
- val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
- val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
- /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
+ /* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
- val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
- iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
@@ -216,6 +282,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
+ const struct ipa_reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -224,25 +291,31 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
- val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= u32_encode_bits(data1->max_writes,
- GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
+ data1->max_writes);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
- val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data0->max_reads_beats,
- GEN_QMB_0_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val |= u32_encode_bits(data1->max_reads,
- GEN_QMB_1_MAX_READS_FMASK);
+ val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
+ data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data1->max_reads_beats,
- GEN_QMB_1_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -278,48 +351,96 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+ val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
- val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
+ val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
- val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+
+ iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ val |= ipa_reg_bit(reg, DIV_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Before IPA v4.5 timing is controlled by a counter register */
+static void ipa_hardware_config_counter(struct ipa *ipa)
+{
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct ipa_reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, COUNTER_CFG);
+ /* If defined, EOT_COAL_GRANULARITY is 0 */
+ val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_timing(struct ipa *ipa)
+{
+ if (ipa->version < IPA_VERSION_4_5)
+ ipa_hardware_config_counter(ipa);
+ else
+ ipa_qtime_config(ipa);
+}
+
+static void ipa_hardware_config_hashing(struct ipa *ipa)
+{
+ const struct ipa_reg *reg;
+
+ if (ipa->version != IPA_VERSION_4_2)
+ return;
+
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+
+ /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
+ * IPV4_FILTER_HASH are all zero.
+ */
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- val = u32_encode_bits(enter_idle_debounce_thresh,
- ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
+ val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= CONST_NON_IDLE_ENABLE_FMASK;
+ val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
- offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -349,55 +470,13 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
- enum ipa_version version = ipa->version;
- u32 granularity;
- u32 val;
-
- /* IPA v4.5+ has no backward compatibility register */
- if (version < IPA_VERSION_4_5) {
- val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
- }
-
- /* Implement some hardware workarounds */
- if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Disable PA mask to allow HOLB drop */
- val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
-
- /* Enable open global clocks in the CLKON configuration */
- val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
- } else if (version == IPA_VERSION_3_1) {
- val = MISC_FMASK; /* Disable MISC clock gating */
- } else {
- val = 0; /* No CLKON configuration needed */
- }
- if (val)
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
+ ipa_hardware_config_bcr(ipa, data);
+ ipa_hardware_config_tx(ipa);
+ ipa_hardware_config_clkon(ipa);
ipa_hardware_config_comp(ipa);
-
- /* Configure system bus limits */
ipa_hardware_config_qsb(ipa, data);
-
- if (version < IPA_VERSION_4_5) {
- /* Configure aggregation timer granularity */
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- } else {
- ipa_qtime_config(ipa);
- }
-
- /* IPA v4.2 does not support hashed tables, so disable them */
- if (version == IPA_VERSION_4_2) {
- u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
-
- iowrite32(0, ipa->reg_virt + offset);
- }
-
- /* Enable dynamic clock division */
+ ipa_hardware_config_timing(ipa);
+ ipa_hardware_config_hashing(ipa);
ipa_hardware_dcd_config(ipa);
}
@@ -612,29 +691,6 @@ static void ipa_validate_build(void)
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
- BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY_FMASK));
-}
-
-static bool ipa_version_valid(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_0:
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- return true;
-
- default:
- return false;
- }
}
/**
@@ -678,8 +734,8 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_valid(data->version)) {
- dev_err(dev, "invalid IPA version\n");
+ if (!ipa_version_supported(data->version)) {
+ dev_err(dev, "unsupported IPA version %u\n", data->version);
return -EINVAL;
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 53a1dbeaffa6..f84c6830495a 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -112,8 +113,10 @@ int ipa_mem_setup(struct ipa *ipa)
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
- val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
+
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
return 0;
}
@@ -306,6 +309,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
@@ -314,12 +318,14 @@ int ipa_mem_config(struct ipa *ipa)
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+ mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index c8b1c4d9c507..423422a2a445 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/errno.h>
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index e64ccc2402e9..d85718db9a57 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index db5ac7552286..8420f93128a2 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/clk.h>
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 6f84f057a209..896f052e51a1 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index ec010cf2e816..8295fd4b70d1 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 856ef629ccc8..1c236826c17a 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..97c0befe8d86 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/stddef.h>
#include <linux/soc/qcom/qmi.h>
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 495e85abe50b..e29663965f43 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index e6147a1cd787..22f067741d9b 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/io.h>
@@ -9,11 +9,105 @@
#include "ipa.h"
#include "ipa_reg.h"
+/* Is this register valid and defined for the current IPA version? */
+static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ enum ipa_version version = ipa->version;
+ bool valid;
+
+ /* Check for bogus (out of range) register IDs */
+ if ((u32)reg_id >= ipa->regs->reg_count)
+ return false;
+
+ switch (reg_id) {
+ case IPA_BCR:
+ case COUNTER_CFG:
+ valid = version < IPA_VERSION_4_5;
+ break;
+
+ case IPA_TX_CFG:
+ case FLAVOR_0:
+ case IDLE_INDICATION_CFG:
+ valid = version >= IPA_VERSION_3_5;
+ break;
+
+ case QTIME_TIMESTAMP_CFG:
+ case TIMERS_XO_CLK_DIV_CFG:
+ case TIMERS_PULSE_GRAN_CFG:
+ valid = version >= IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1;
+ break;
+
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ valid = version != IPA_VERSION_4_2;
+ break;
+
+ case IRQ_SUSPEND_EN:
+ case IRQ_SUSPEND_CLR:
+ valid = version >= IPA_VERSION_3_1;
+ break;
+
+ default:
+ valid = true; /* Others should be defined for all versions */
+ break;
+ }
+
+ /* To be valid, it must be defined */
+
+ return valid && ipa->regs->reg[reg_id];
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ return NULL;
+
+ return ipa->regs->reg[reg_id];
+}
+
+static const struct ipa_regs *ipa_regs(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ return &ipa_regs_v3_1;
+ case IPA_VERSION_3_5_1:
+ return &ipa_regs_v3_5_1;
+ case IPA_VERSION_4_2:
+ return &ipa_regs_v4_2;
+ case IPA_VERSION_4_5:
+ return &ipa_regs_v4_5;
+ case IPA_VERSION_4_9:
+ return &ipa_regs_v4_9;
+ case IPA_VERSION_4_11:
+ return &ipa_regs_v4_11;
+ default:
+ return NULL;
+ }
+}
+
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_regs *regs;
struct resource *res;
+ regs = ipa_regs(ipa->version);
+ if (!regs)
+ return -EINVAL;
+
+ if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
+ return -EINVAL;
+
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
@@ -28,6 +122,7 @@ int ipa_reg_init(struct ipa *ipa)
return -ENOMEM;
}
ipa->reg_addr = res->start;
+ ipa->regs = regs;
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 6f35438cda89..7bf70f70f63f 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
#include <linux/bitfield.h>
+#include <linux/bug.h>
#include "ipa_version.h"
@@ -16,304 +17,325 @@ struct ipa;
* DOC: IPA Registers
*
* IPA registers are located within the "ipa-reg" address space defined by
- * Device Tree. The offset of each register within that space is specified
- * by symbols defined below. The address space is mapped to virtual memory
- * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ * Device Tree. Each register has a specified offset within that space,
+ * which is mapped into virtual memory space in ipa_mem_init(). Each
+ * has a unique identifer, taken from the ipa_reg_id enumerated type.
+ * All IPA registers are 32 bits wide.
*
- * Certain register types are duplicated for a number of instances of
- * something. For example, each IPA endpoint has an set of registers
- * defining its configuration. The offset to an endpoint's set of registers
- * is computed based on an "base" offset, plus an endpoint's ID multiplied
- * and a "stride" value for the register. For such registers, the offset is
- * computed by a function-like macro that takes a parameter used in the
- * computation.
+ * Certain "parameterized" register types are duplicated for a number of
+ * instances of something. For example, each IPA endpoint has an set of
+ * registers defining its configuration. The offset to an endpoint's set
+ * of registers is computed based on an "base" offset, plus an endpoint's
+ * ID multiplied and a "stride" value for the register. Similarly, some
+ * registers have an offset that depends on execution environment. In
+ * this case, the stride is multiplied by a member of the gsi_ee_id
+ * enumerated type.
*
- * Some register offsets depend on execution environment. For these an "ee"
- * parameter is supplied to the offset macro. The "ee" value is a member of
- * the gsi_ee enumerated type.
+ * Each version of IPA implements an array of ipa_reg structures indexed
+ * by register ID. Each entry in the array specifies the base offset and
+ * (for parameterized registers) a non-zero stride value. Not all versions
+ * of IPA define all registers. The offset for a register is returned by
+ * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * zero is returned for an undefined register (this should never happen).
*
- * The offset of a register dependent on endpoint ID is computed by a macro
- * that is supplied a parameter "ep", "txep", or "rxep". A register with an
- * "ep" parameter is valid for any endpoint; a register with a "txep" or
- * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
- * "*ep" value is assumed to be less than the maximum valid endpoint ID
- * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
- *
- * The offset of registers related to filter and route tables is computed
- * by a macro that is supplied a parameter "er". The "er" represents an
- * endpoint ID for filters, or a route ID for routes. For filters, the
- * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
- * because not all endpoints support filtering. For routes, the route ID
- * must be less than IPA_ROUTE_MAX.
- *
- * The offset of registers related to resource types is computed by a macro
- * that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is a member of the ipa_resource_type_src enumerated type for
- * source endpoint resources or the ipa_resource_type_dst enumerated type
- * for destination endpoint resources.
- *
- * Some registers encode multiple fields within them. For these, each field
- * has a symbol below defining a field mask that encodes both the position
- * and width of the field within its register.
- *
- * In some cases, different versions of IPA hardware use different offset or
- * field mask values. In such cases an inline_function(ipa) is used rather
- * than a MACRO to define the offset or field mask to use.
- *
- * Finally, some registers hold bitmasks representing endpoints. In such
- * cases the @available field in the @ipa structure defines the "full" set
- * of valid bits for the register.
+ * Some registers encode multiple fields within them. Each field in
+ * such a register has a unique identifier (from an enumerated type).
+ * The position and width of the fields in a register are defined by
+ * an array of field masks, indexed by field ID. Two functions are
+ * used to access register fields; both take an ipa_reg structure as
+ * argument. To encode a value to be represented in a register field,
+ * the value and field ID are passed to ipa_reg_encode(). To extract
+ * a value encoded in a register field, the field ID is passed to
+ * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * can be used to either encode the bit value, or to generate a mask
+ * used to extract the bit value.
*/
-#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
-/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */
-#define ENABLE_FMASK GENMASK(0, 0)
-/* The next field is present for IPA v4.7+ */
-#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0)
-#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
-#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
-#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
-/* The next field is not present for IPA v4.5+ */
-#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
-/* The next twelve fields are present for IPA v4.0+ */
-#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
-#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
-#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
-#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
-#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
-#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
-#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
-#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
-#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
-#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
-#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
-#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
-/* The next five fields are present for IPA v4.9+ */
-#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19)
-#define GENQMB_AOOOWR_FMASK GENMASK(20, 20)
-#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21)
-#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30)
-#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31)
-
-/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */
-static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
- u32 mask)
-{
- WARN_ON(version < IPA_VERSION_4_0);
+/* enum ipa_reg_id - IPA register IDs */
+enum ipa_reg_id {
+ COMP_CFG,
+ CLKON_CFG,
+ ROUTE,
+ SHARED_MEM_SIZE,
+ QSB_MAX_WRITES,
+ QSB_MAX_READS,
+ FILT_ROUT_HASH_EN,
+ FILT_ROUT_HASH_FLUSH,
+ STATE_AGGR_ACTIVE,
+ IPA_BCR, /* Not IPA v4.5+ */
+ LOCAL_PKT_PROC_CNTXT,
+ AGGR_FORCE_CLOSE,
+ COUNTER_CFG, /* Not IPA v4.5+ */
+ IPA_TX_CFG, /* IPA v3.5+ */
+ FLAVOR_0, /* IPA v3.5+ */
+ IDLE_INDICATION_CFG, /* IPA v3.5+ */
+ QTIME_TIMESTAMP_CFG, /* IPA v4.5+ */
+ TIMERS_XO_CLK_DIV_CFG, /* IPA v4.5+ */
+ TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
+ SRC_RSRC_GRP_01_RSRC_TYPE,
+ SRC_RSRC_GRP_23_RSRC_TYPE,
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_01_RSRC_TYPE,
+ DST_RSRC_GRP_23_RSRC_TYPE,
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
+ ENDP_INIT_CFG,
+ ENDP_INIT_NAT, /* TX only */
+ ENDP_INIT_HDR,
+ ENDP_INIT_HDR_EXT,
+ ENDP_INIT_HDR_METADATA_MASK, /* RX only */
+ ENDP_INIT_MODE, /* TX only */
+ ENDP_INIT_AGGR,
+ ENDP_INIT_HOL_BLOCK_EN, /* RX only */
+ ENDP_INIT_HOL_BLOCK_TIMER, /* RX only */
+ ENDP_INIT_DEAGGR, /* TX only */
+ ENDP_INIT_RSRC_GRP,
+ ENDP_INIT_SEQ, /* TX only */
+ ENDP_STATUS,
+ ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
+ /* The IRQ registers are only used for GSI_EE_AP */
+ IPA_IRQ_STTS,
+ IPA_IRQ_EN,
+ IPA_IRQ_CLR,
+ IPA_IRQ_UC,
+ IRQ_SUSPEND_INFO,
+ IRQ_SUSPEND_EN, /* IPA v3.1+ */
+ IRQ_SUSPEND_CLR, /* IPA v3.1+ */
+ IPA_REG_ID_COUNT, /* Last; not an ID */
+};
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(20, 17));
+/**
+ * struct ipa_reg - An IPA register descriptor
+ * @offset: Register offset relative to base of the "ipa-reg" memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the IPA register
+ */
+struct ipa_reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
- if (version == IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(24, 22));
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define IPA_REG(__NAME, __reg_id, __offset) \
+ IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
- return u32_encode_bits(mask, GENMASK(23, 22));
-}
+/* Helper macro for defining parameterized registers, specifying stride */
+#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
-/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */
-static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
- bool enable)
-{
- u32 val = enable ? 1 : 0;
+#define IPA_REG_FIELDS(__NAME, __name, __offset) \
+ IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
- WARN_ON(version < IPA_VERSION_4_5);
+#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
+ .fmask = ipa_reg_ ## __name ## _fmask, \
+ }
- if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
- return u32_encode_bits(val, GENMASK(21, 21));
+/**
+ * struct ipa_regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct ipa_regs {
+ u32 reg_count;
+ const struct ipa_reg **reg;
+};
- return u32_encode_bits(val, GENMASK(17, 17));
-}
+/* COMP_CFG register */
+enum ipa_reg_comp_cfg_field_id {
+ COMP_CFG_ENABLE, /* Not IPA v4.0+ */
+ RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS, /* IPA v4.7+ */
+ GSI_SNOC_BYPASS_DIS,
+ GEN_QMB_0_SNOC_BYPASS_DIS,
+ GEN_QMB_1_SNOC_BYPASS_DIS,
+ IPA_DCMP_FAST_CLK_EN, /* Not IPA v4.5+ */
+ IPA_QMB_SELECT_CONS_EN, /* IPA v4.0+ */
+ IPA_QMB_SELECT_PROD_EN, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS, /* IPA v4.0+ */
+ GSI_SNOC_CNOC_LOOP_PROT_DISABLE, /* IPA v4.0+ */
+ GSI_MULTI_AXI_MASTERS_DIS, /* IPA v4.0+ */
+ IPA_QMB_SELECT_GLOBAL_EN, /* IPA v4.0+ */
+ QMB_RAM_RD_CACHE_DISABLE, /* IPA v4.9+ */
+ GENQMB_AOOOWR, /* IPA v4.9+ */
+ IF_OUT_OF_BUF_STOP_RESET_MASK_EN, /* IPA v4.9+ */
+ GEN_QMB_1_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ GEN_QMB_0_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ ATOMIC_FETCHER_ARB_LOCK_DIS, /* IPA v4.0+ */
+ FULL_FLUSH_WAIT_RS_CLOSURE_EN, /* IPA v4.5+ */
+};
-#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
-#define RX_FMASK GENMASK(0, 0)
-#define PROC_FMASK GENMASK(1, 1)
-#define TX_WRAPPER_FMASK GENMASK(2, 2)
-#define MISC_FMASK GENMASK(3, 3)
-#define RAM_ARB_FMASK GENMASK(4, 4)
-#define FTCH_HPS_FMASK GENMASK(5, 5)
-#define FTCH_DPS_FMASK GENMASK(6, 6)
-#define HPS_FMASK GENMASK(7, 7)
-#define DPS_FMASK GENMASK(8, 8)
-#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
-#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
-#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
-#define RSRC_MNGR_FMASK GENMASK(12, 12)
-#define CTX_HANDLER_FMASK GENMASK(13, 13)
-#define ACK_MNGR_FMASK GENMASK(14, 14)
-#define D_DCPH_FMASK GENMASK(15, 15)
-#define H_DCPH_FMASK GENMASK(16, 16)
-/* The next field is not present for IPA v4.5+ */
-#define DCMP_FMASK GENMASK(17, 17)
-/* The next three fields are present for IPA v3.5+ */
-#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
-#define TX_0_FMASK GENMASK(19, 19)
-#define TX_1_FMASK GENMASK(20, 20)
-/* The next field is present for IPA v3.5.1+ */
-#define FNR_FMASK GENMASK(21, 21)
-/* The next eight fields are present for IPA v4.0+ */
-#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
-#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
-#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
-#define QMB_FMASK GENMASK(25, 25)
-#define WEIGHT_ARB_FMASK GENMASK(26, 26)
-#define GSI_IF_FMASK GENMASK(27, 27)
-#define GLOBAL_FMASK GENMASK(28, 28)
-#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
-/* The next field is present for IPA v4.5+ */
-#define DPL_FIFO_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.7+ */
-#define DRBIP_FMASK GENMASK(31, 31)
-
-#define IPA_REG_ROUTE_OFFSET 0x00000048
-#define ROUTE_DIS_FMASK GENMASK(0, 0)
-#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
-#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
-#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
-#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
-#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
-
-#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
-#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
-#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
-
-#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
-#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
-
-#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
-#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0+ */
-#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
-#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-
-static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x000008c;
+/* CLKON_CFG register */
+enum ipa_reg_clkon_cfg_field_id {
+ CLKON_RX,
+ CLKON_PROC,
+ TX_WRAPPER,
+ CLKON_MISC,
+ RAM_ARB,
+ FTCH_HPS,
+ FTCH_DPS,
+ CLKON_HPS,
+ CLKON_DPS,
+ RX_HPS_CMDQS,
+ HPS_DPS_CMDQS,
+ DPS_TX_CMDQS,
+ RSRC_MNGR,
+ CTX_HANDLER,
+ ACK_MNGR,
+ D_DCPH,
+ H_DCPH,
+ CLKON_DCMP, /* IPA v4.5+ */
+ NTF_TX_CMDQS, /* IPA v3.5+ */
+ CLKON_TX_0, /* IPA v3.5+ */
+ CLKON_TX_1, /* IPA v3.5+ */
+ CLKON_FNR, /* IPA v3.5.1+ */
+ QSB2AXI_CMDQ_L, /* IPA v4.0+ */
+ AGGR_WRAPPER, /* IPA v4.0+ */
+ RAM_SLAVEWAY, /* IPA v4.0+ */
+ CLKON_QMB, /* IPA v4.0+ */
+ WEIGHT_ARB, /* IPA v4.0+ */
+ GSI_IF, /* IPA v4.0+ */
+ CLKON_GLOBAL, /* IPA v4.0+ */
+ GLOBAL_2X_CLK, /* IPA v4.0+ */
+ DPL_FIFO, /* IPA v4.5+ */
+ DRBIP, /* IPA v4.7+ */
+};
- return 0x0000148;
-}
+/* ROUTE register */
+enum ipa_reg_route_field_id {
+ ROUTE_DIS,
+ ROUTE_DEF_PIPE,
+ ROUTE_DEF_HDR_TABLE,
+ ROUTE_DEF_HDR_OFST,
+ ROUTE_FRAG_DEF_PIPE,
+ ROUTE_DEF_RETAIN_HDR,
+};
-static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000090;
+/* SHARED_MEM_SIZE register */
+enum ipa_reg_shared_mem_size_field_id {
+ MEM_SIZE,
+ MEM_BADDR,
+};
- return 0x000014c;
-}
+/* QSB_MAX_WRITES register */
+enum ipa_reg_qsb_max_writes_field_id {
+ GEN_QMB_0_MAX_WRITES,
+ GEN_QMB_1_MAX_WRITES,
+};
-/* The next four fields are used for the hash enable and flush registers */
-#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+/* QSB_MAX_READS register */
+enum ipa_reg_qsb_max_reads_field_id {
+ GEN_QMB_0_MAX_READS,
+ GEN_QMB_1_MAX_READS,
+ GEN_QMB_0_MAX_READS_BEATS, /* IPA v4.0+ */
+ GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
+};
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000010c;
+/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
+enum ipa_reg_rout_hash_field_id {
+ IPV6_ROUTER_HASH,
+ IPV6_FILTER_HASH,
+ IPV4_ROUTER_HASH,
+ IPV4_FILTER_HASH,
+};
- return 0x000000b4;
-}
+/* BCR register */
+enum ipa_bcr_compat {
+ BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
+ BCR_TX_NOT_USING_BRESP = 0x1, /* Not IPA v4.2+ */
+ BCR_TX_SUSPEND_IRQ_ASSERT_ONCE = 0x2, /* Not IPA v4.0+ */
+ BCR_SUSPEND_L2_IRQ = 0x3, /* Not IPA v4.2+ */
+ BCR_HOLB_DROP_L2_IRQ = 0x4, /* Not IPA v4.2+ */
+ BCR_DUAL_TX = 0x5, /* IPA v3.5+ */
+ BCR_ENABLE_FILTER_DATA_CACHE = 0x6, /* IPA v3.5+ */
+ BCR_NOTIF_PRIORITY_OVER_ZLT = 0x7, /* IPA v3.5+ */
+ BCR_FILTER_PREFETCH_EN = 0x8, /* IPA v3.5+ */
+ BCR_ROUTER_PREFETCH_EN = 0x9, /* IPA v3.5+ */
+};
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_BCR_OFFSET 0x000001d0
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
-#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
-/* The next field is invalid for IPA v4.0+ */
-#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
-#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
-/* The next five fields are present for IPA v3.5+ */
-#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
-#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
-#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
-#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
-#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
-
-/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */
-#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8
-
-/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */
-static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version,
- u32 addr)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(addr, GENMASK(16, 0));
+/* LOCAL_PKT_PROC_CNTXT register */
+enum ipa_reg_local_pkt_proc_cntxt_field_id {
+ IPA_BASE_ADDR,
+};
- return u32_encode_bits(addr, GENMASK(17, 0));
-}
+/* COUNTER_CFG register */
+enum ipa_reg_counter_cfg_field_id {
+ EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ AGGR_GRANULARITY,
+};
-/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
-
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-/* The next field is not present for IPA v3.5+ */
-#define EOT_COAL_GRANULARITY GENMASK(3, 0)
-#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_TX_CFG_OFFSET 0x000001fc
-/* The next three fields are not present for IPA v4.0+ */
-#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
-/* The next six fields are present for IPA v4.0+ */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
-#define PA_MASK_EN_FMASK GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
-/* The next field is present for IPA v4.5+ */
-#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
-/* The next field is present for IPA v4.2+, but not IPA v4.5 */
-#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
-/* The next field is present for IPA v4.2 only */
-#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
-#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
-
-/* The next register is present for IPA v3.5+ */
-static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
-{
- if (version >= IPA_VERSION_4_2)
- return 0x00000240;
+/* IPA_TX_CFG register */
+enum ipa_reg_ipa_tx_cfg_field_id {
+ TX0_PREFETCH_DISABLE, /* Not v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
+ PA_MASK_EN, /* v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
+ DUAL_TX_ENABLE, /* v4.5+ */
+ SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+};
- return 0x00000220;
-}
+/* FLAVOR_0 register */
+enum ipa_reg_flavor_0_field_id {
+ MAX_PIPES,
+ MAX_CONS_PIPES,
+ MAX_PROD_PIPES,
+ PROD_LOWEST,
+};
+
+/* IDLE_INDICATION_CFG register */
+enum ipa_reg_idle_indication_cfg_field_id {
+ ENTER_IDLE_DEBOUNCE_THRESH,
+ CONST_NON_IDLE_ENABLE,
+};
+
+/* QTIME_TIMESTAMP_CFG register */
+enum ipa_reg_qtime_timestamp_cfg_field_id {
+ DPL_TIMESTAMP_LSB,
+ DPL_TIMESTAMP_SEL,
+ TAG_TIMESTAMP_LSB,
+ NAT_TIMESTAMP_LSB,
+};
-#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
-#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
-#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
-#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
-#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
-#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
-#define DIV_VALUE_FMASK GENMASK(8, 0)
-#define DIV_ENABLE_FMASK GENMASK(31, 31)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
-#define GRAN_0_FMASK GENMASK(2, 0)
-#define GRAN_1_FMASK GENMASK(5, 3)
-#define GRAN_2_FMASK GENMASK(8, 6)
-/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+/* TIMERS_XO_CLK_DIV_CFG register */
+enum ipa_reg_timers_xo_clk_div_cfg_field_id {
+ DIV_VALUE,
+ DIV_ENABLE,
+};
+
+/* TIMERS_PULSE_GRAN_CFG register */
+enum ipa_reg_timers_pulse_gran_cfg_field_id {
+ PULSE_GRAN_0,
+ PULSE_GRAN_1,
+ PULSE_GRAN_2,
+};
+
+/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
enum ipa_pulse_gran {
IPA_GRAN_10_US = 0x0,
IPA_GRAN_20_US = 0x1,
@@ -325,267 +347,160 @@ enum ipa_pulse_gran {
IPA_GRAN_655350_US = 0x7,
};
-/* Not all of the following are present (depends on IPA version) */
-#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000400 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000404 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000408 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000040c + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000500 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000504 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000508 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000050c + 0x0020 * (rt))
-/* The next four fields are used for all resource group registers */
-#define X_MIN_LIM_FMASK GENMASK(5, 0)
-#define X_MAX_LIM_FMASK GENMASK(13, 8)
-/* The next two fields are not always present (if resource count is odd) */
-#define Y_MIN_LIM_FMASK GENMASK(21, 16)
-#define Y_MAX_LIM_FMASK GENMASK(29, 24)
-
-#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
- (0x00000800 + 0x0070 * (ep))
-/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */
-#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
-/* Valid only for TX (IPA consumer) endpoints */
-#define ENDP_DELAY_FMASK GENMASK(1, 1)
-
-#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
- (0x00000808 + 0x0070 * (ep))
-#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
-#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
-#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
-#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/* {SRC,DST}_RSRC_GRP_{01,23,45,67}_RSRC_TYPE registers */
+enum ipa_reg_rsrc_grp_rsrc_type_field_id {
+ X_MIN_LIM,
+ X_MAX_LIM,
+ Y_MIN_LIM,
+ Y_MAX_LIM,
+};
+
+/* ENDP_INIT_CTRL register */
+enum ipa_reg_endp_init_ctrl_field_id {
+ ENDP_SUSPEND, /* Not v4.0+ */
+ ENDP_DELAY, /* Not v4.2+ */
+};
+
+/* ENDP_INIT_CFG register */
+enum ipa_reg_endp_init_cfg_field_id {
+ FRAG_OFFLOAD_EN,
+ CS_OFFLOAD_EN,
+ CS_METADATA_HDR_OFFSET,
+ CS_GEN_QMB_MASTER_SEL,
+};
/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0x0,
- IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
- IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
- IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL /* TX */ = 0x1, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_DL /* RX */ = 0x2, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_INLINE /* TX and RX */ = 0x1, /* IPA v4.5+ */
};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \
- (0x0000080c + 0x0070 * (ep))
-#define NAT_EN_FMASK GENMASK(1, 0)
+/* ENDP_INIT_NAT register */
+enum ipa_reg_endp_init_nat_field_id {
+ NAT_EN,
+};
/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
-};
-
-#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
- (0x00000810 + 0x0070 * (ep))
-#define HDR_LEN_FMASK GENMASK(5, 0)
-#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
-#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
-#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
-#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
-#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
-/* The next field is not present for IPA v4.9+ */
-#define HDR_A5_MUX_FMASK GENMASK(26, 26)
-#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
-/* The next two fields are present for IPA v4.5+ */
-#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
-#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
-
-/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
-static inline u32 ipa_header_size_encoded(enum ipa_version version,
- u32 header_size)
-{
- u32 size = header_size & field_mask(HDR_LEN_FMASK);
- u32 val;
-
- val = u32_encode_bits(size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(header_size != size);
- return val;
- }
-
- /* IPA v4.5 adds a few more most-significant bits */
- size = header_size >> hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
-
- return val;
-}
-
-/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
-static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
- u32 offset)
-{
- u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
- u32 val;
-
- val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(offset != off);
- return val;
- }
+ IPA_NAT_BYPASS = 0x0,
+ IPA_NAT_SRC = 0x1,
+ IPA_NAT_DST = 0x2,
+};
- /* IPA v4.5 adds a few more most-significant bits */
- off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
+/* ENDP_INIT_HDR register */
+enum ipa_reg_endp_init_hdr_field_id {
+ HDR_LEN,
+ HDR_OFST_METADATA_VALID,
+ HDR_OFST_METADATA,
+ HDR_ADDITIONAL_CONST_LEN,
+ HDR_OFST_PKT_SIZE_VALID,
+ HDR_OFST_PKT_SIZE,
+ HDR_A5_MUX, /* Not v4.9+ */
+ HDR_LEN_INC_DEAGG_HDR,
+ HDR_METADATA_REG_VALID, /* Not v4.5+ */
+ HDR_LEN_MSB, /* v4.5+ */
+ HDR_OFST_METADATA_MSB, /* v4.5+ */
+};
- return val;
-}
+/* ENDP_INIT_HDR_EXT register */
+enum ipa_reg_endp_init_hdr_ext_field_id {
+ HDR_ENDIANNESS,
+ HDR_TOTAL_LEN_OR_PAD_VALID,
+ HDR_TOTAL_LEN_OR_PAD,
+ HDR_PAYLOAD_LEN_INC_PADDING,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET,
+ HDR_PAD_TO_ALIGNMENT,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+};
-#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
- (0x00000814 + 0x0070 * (ep))
-#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
-#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
-#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
-#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
-#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-/* The next three fields are present for IPA v4.5+ */
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
-#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
-#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
- (0x00000818 + 0x0070 * (rxep))
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
- (0x00000820 + 0x0070 * (txep))
-#define MODE_FMASK GENMASK(2, 0)
-/* The next field is present for IPA v4.5+ */
-#define DCPH_ENABLE_FMASK GENMASK(3, 3)
-#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
-#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
-#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
-#define PAD_EN_FMASK GENMASK(29, 29)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.9+ */
-#define DRBIP_ACL_ENABLE GENMASK(30, 30)
+/* ENDP_INIT_MODE register */
+enum ipa_reg_endp_init_mode_field_id {
+ ENDP_MODE,
+ DCPH_ENABLE, /* v4.5+ */
+ DEST_PIPE_INDEX,
+ BYTE_THRESHOLD,
+ PIPE_REPLICATION_EN,
+ PAD_EN,
+ HDR_FTCH_DISABLE, /* v4.5+ */
+ DRBIP_ACL_ENABLE, /* v4.9+ */
+};
/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
enum ipa_mode {
- IPA_BASIC = 0x0,
- IPA_ENABLE_FRAMING_HDLC = 0x1,
- IPA_ENABLE_DEFRAMING_HDLC = 0x2,
- IPA_DMA = 0x3,
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
};
-#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
- (0x00000824 + 0x0070 * (ep))
-#define AGGR_EN_FMASK GENMASK(1, 0)
-#define AGGR_TYPE_FMASK GENMASK(4, 2)
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_byte_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_time_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_pkt_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_sw_eof_active_fmask(bool legacy)
-{
- return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_force_close_fmask(bool legacy)
-{
- return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
-{
- return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
-}
-
-/* The next field is present for IPA v4.5+ */
-#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+/* ENDP_INIT_AGGR register */
+enum ipa_reg_endp_init_aggr_field_id {
+ AGGR_EN,
+ AGGR_TYPE,
+ BYTE_LIMIT,
+ TIME_LIMIT,
+ PKT_LIMIT,
+ SW_EOF_ACTIVE,
+ FORCE_CLOSE,
+ HARD_BYTE_LIMIT_EN,
+ AGGR_GRAN_SEL,
+};
/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */
- IPA_ENABLE_AGGR = 0x1, /* (RX) */
- IPA_ENABLE_DEAGGR = 0x2, /* (TX) */
+ IPA_BYPASS_AGGR /* TX and RX */ = 0x0,
+ IPA_ENABLE_AGGR /* RX */ = 0x1,
+ IPA_ENABLE_DEAGGR /* TX */ = 0x2,
};
/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
enum ipa_aggr_type {
- IPA_MBIM_16 = 0x0,
- IPA_HDLC = 0x1,
- IPA_TLP = 0x2,
- IPA_RNDIS = 0x3,
- IPA_GENERIC = 0x4,
- IPA_COALESCE = 0x5,
- IPA_QCMAP = 0x6,
-};
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
- (0x0000082c + 0x0070 * (rxep))
-#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
- (0x00000830 + 0x0070 * (rxep))
-/* The next two fields are present for IPA v4.2 only */
-#define BASE_VALUE_FMASK GENMASK(4, 0)
-#define SCALE_FMASK GENMASK(12, 8)
-/* The next two fields are present for IPA v4.5 */
-#define TIME_LIMIT_FMASK GENMASK(4, 0)
-#define GRAN_SEL_FMASK GENMASK(8, 8)
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
- (0x00000834 + 0x0070 * (txep))
-#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
-#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
-#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
-#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
-#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
-#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
-
-#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
- (0x00000838 + 0x0070 * (ep))
-/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
-static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
-{
- if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5)
- return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
- if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7)
- return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+/* ENDP_INIT_HOL_BLOCK_EN register */
+enum ipa_reg_endp_init_hol_block_en_field_id {
+ HOL_BLOCK_EN,
+};
- return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
-}
+/* ENDP_INIT_HOL_BLOCK_TIMER register */
+enum ipa_reg_endp_init_hol_block_timer_field_id {
+ TIMER_BASE_VALUE, /* Not v4.5+ */
+ TIMER_SCALE, /* v4.2 only */
+ TIMER_LIMIT, /* v4.5+ */
+ TIMER_GRAN_SEL, /* v4.5+ */
+};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
- (0x0000083c + 0x0070 * (txep))
-#define SEQ_TYPE_FMASK GENMASK(7, 0)
-#define SEQ_REP_TYPE_FMASK GENMASK(15, 8)
+/* ENDP_INIT_DEAGGR register */
+enum ipa_reg_endp_deaggr_field_id {
+ DEAGGR_HDR_LEN,
+ SYSPIPE_ERR_DETECTION,
+ PACKET_OFFSET_VALID,
+ PACKET_OFFSET_LOCATION,
+ IGNORE_MIN_PKT_ERR,
+ MAX_PACKET_LEN,
+};
+
+/* ENDP_INIT_RSRC_GRP register */
+enum ipa_reg_endp_init_rsrc_grp_field_id {
+ ENDP_RSRC_GRP,
+};
+
+/* ENDP_INIT_SEQ register */
+enum ipa_reg_endp_init_seq_field_id {
+ SEQ_TYPE,
+ SEQ_REP_TYPE, /* Not v4.5+ */
+};
/**
* enum ipa_seq_type - HPS and DPS sequencer type
@@ -629,76 +544,36 @@ enum ipa_seq_rep_type {
IPA_SEQ_REP_DMA_PARSER = 0x08,
};
-#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
- (0x00000840 + 0x0070 * (ep))
-#define STATUS_EN_FMASK GENMASK(0, 0)
-#define STATUS_ENDP_FMASK GENMASK(5, 1)
-/* The next field is not present for IPA v4.5+ */
-#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0+ */
-#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-
-/* The next register is not present for IPA v4.2 (which no hashing support) */
-#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
- (0x0000085c + 0x0070 * (er))
-#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
-#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
-#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
-#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
-#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
-#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
-#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
-#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
-
-#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
-#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
-#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
-#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
-#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
-#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
-#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
-#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
-
-static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version,
- u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003008 + 0x1000 * ee;
-
- return 0x00004008 + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version)
-{
- return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000300c + 0x1000 * ee;
-
- return 0x0000400c + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_en_offset(enum ipa_version version)
-{
- return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003010 + 0x1000 * ee;
-
- return 0x00004010 + 0x1000 * ee;
-}
+/* ENDP_STATUS register */
+enum ipa_reg_endp_status_field_id {
+ STATUS_EN,
+ STATUS_ENDP,
+ STATUS_LOCATION, /* Not v4.5+ */
+ STATUS_PKT_SUPPRESS, /* v4.0+ */
+};
-static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version)
-{
- return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP);
-}
+/* ENDP_FILTER_ROUTER_HSH_CFG register */
+enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
+ FILTER_HASH_MSK_SRC_ID,
+ FILTER_HASH_MSK_SRC_IP,
+ FILTER_HASH_MSK_DST_IP,
+ FILTER_HASH_MSK_SRC_PORT,
+ FILTER_HASH_MSK_DST_PORT,
+ FILTER_HASH_MSK_PROTOCOL,
+ FILTER_HASH_MSK_METADATA,
+ FILTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+
+ ROUTER_HASH_MSK_SRC_ID,
+ ROUTER_HASH_MSK_SRC_IP,
+ ROUTER_HASH_MSK_DST_IP,
+ ROUTER_HASH_MSK_SRC_PORT,
+ ROUTER_HASH_MSK_DST_PORT,
+ ROUTER_HASH_MSK_PROTOCOL,
+ ROUTER_HASH_MSK_METADATA,
+ ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+};
+/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
* @IPA_IRQ_UC_0: Microcontroller event interrupt
@@ -774,74 +649,82 @@ enum ipa_irq_id {
IPA_IRQ_COUNT, /* Last; not an id */
};
-static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000301c + 0x1000 * ee;
+/* IPA_IRQ_UC register */
+enum ipa_reg_ipa_irq_uc_field_id {
+ UC_INTR,
+};
- return 0x0000401c + 0x1000 * ee;
-}
+extern const struct ipa_regs ipa_regs_v3_1;
+extern const struct ipa_regs ipa_regs_v3_5_1;
+extern const struct ipa_regs ipa_regs_v4_2;
+extern const struct ipa_regs ipa_regs_v4_5;
+extern const struct ipa_regs ipa_regs_v4_9;
+extern const struct ipa_regs ipa_regs_v4_11;
-static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version)
+/* Return the field mask for a field in a register */
+static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
{
- return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP);
-}
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
-#define UC_INTR_FMASK GENMASK(0, 0)
+ return reg->fmask[field_id];
+}
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-static inline u32
-ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the mask for a single-bit field in a register */
+static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
{
- if (version == IPA_VERSION_3_0)
- return 0x00003098 + 0x1000 * ee;
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003030 + 0x1000 * ee;
+ WARN_ON(!is_power_of_2(fmask));
- return 0x00004030 + 0x1000 * ee;
+ return fmask;
}
+/* Encode a value into the given field of a register */
static inline u32
-ipa_reg_irq_suspend_info_offset(enum ipa_version version)
+ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP);
-}
+ u32 fmask = ipa_reg_fmask(reg, field_id);
-/* ipa->available defines the valid bits in the SUSPEND_EN register */
-static inline u32
-ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- WARN_ON(version == IPA_VERSION_3_0);
+ if (!fmask)
+ return 0;
- if (version < IPA_VERSION_4_9)
- return 0x00003034 + 0x1000 * ee;
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
- return 0x00004034 + 0x1000 * ee;
+ return val;
}
+/* Given a register value, decode (extract) the value in the given field */
static inline u32
-ipa_reg_irq_suspend_en_offset(enum ipa_version version)
+ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
-/* ipa->available defines the valid bits in the SUSPEND_CLR register */
-static inline u32
-ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
{
- WARN_ON(version == IPA_VERSION_3_0);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003038 + 0x1000 * ee;
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
- return 0x00004038 + 0x1000 * ee;
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
+{
+ return reg ? reg->offset : 0;
}
-static inline u32
-ipa_reg_irq_suspend_clr_offset(enum ipa_version version)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
{
- return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
+ return reg ? reg->offset + n * reg->stride : 0;
}
int ipa_reg_init(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 06cec7199382..a257f0e5e361 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -69,20 +69,21 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
}
static void
-ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
+ const struct ipa_reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
- val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -91,34 +92,35 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_src[resource_type];
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
@@ -127,34 +129,35 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_dst[resource_type];
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
/* Configure resources; there is no ipa_resource_deconfig() */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 211233612039..5620dc271fac 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 59cee31a7383..9b969b03d1a4 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SMP2P_H_
#define _IPA_SMP2P_H_
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index c0c8641cdd14..5cbc15a971f9 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2022 Linaro Ltd. */
#include <linux/kernel.h>
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index 4a3ffd1e4e3f..58ba22810bab 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2f5a58bfc529..510ff2dc8999 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
@@ -386,8 +384,9 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ const struct ipa_reg *reg;
struct gsi_trans *trans;
+ u32 offset;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -399,8 +398,13 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
- val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
+
+ val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
ipa_cmd_register_write_add(trans, offset, val, val, false);
@@ -518,15 +522,18 @@ int ipa_table_setup(struct ipa *ipa)
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -567,13 +574,17 @@ static bool ipa_route_id_modem(u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, route_id);
+
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..395189f75d78 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index fe11910518d9..f0ee47281015 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -222,7 +222,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* Fill in the command data */
@@ -233,9 +233,10 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
- val = u32_encode_bits(1, UC_INTR_FMASK);
- offset = ipa_reg_irq_uc_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
+ val = ipa_reg_bit(reg, UC_INTR);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 23847f934d64..8514096e6f36 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 6c16c895d842..7870e0cc3d7c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
@@ -19,10 +19,10 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
- * Please update ipa_version_valid() and ipa_version_string() whenever a
- * new version is added.
+ * Please update ipa_version_string() whenever a new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
@@ -36,6 +36,30 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_COUNT, /* Last; not a version */
+};
+
+static inline bool ipa_version_supported(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
new file mode 100644
index 000000000000..116b27717e3d
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
new file mode 100644
index 000000000000..6e2f939b18f1
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ [TX0_PREFETCH_DISABLE] = BIT(0),
+ [TX1_PREFETCH_DISABLE] = BIT(1),
+ [PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
new file mode 100644
index 000000000000..8fd36569bb9f
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
+ /* Bits 24-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
new file mode 100644
index 000000000000..f8e78e1907c8
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ /* Bit 17 reserved */
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ [SSPND_PA_NO_BQ_STATE] = BIT(19),
+ /* Bits 20-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_SCALE] = GENMASK(12, 8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
new file mode 100644
index 000000000000..d32b805abb11
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ /* Bits 18-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
new file mode 100644
index 000000000000..eabbc5451937
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
+ /* Bits 25-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index dfeb5b392e64..bb1c298c1e78 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -495,7 +495,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
static int ipvlan_process_outbound(struct sk_buff *skb)
{
- struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
/* The ipvlan is a pseudo-L2 device, so the packets that we receive
@@ -505,6 +504,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
if (skb_mac_header_was_set(skb)) {
/* In this mode we dont care about
* multicast and broadcast traffic */
+ struct ethhdr *ethh = eth_hdr(skb);
+
if (is_multicast_ether_addr(ethh->h_dest)) {
pr_debug_ratelimited(
"Dropped {multi|broad}cast of type=[%x]\n",
@@ -589,7 +590,7 @@ out:
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ethhdr *eth = eth_hdr(skb);
+ struct ethhdr *eth = skb_eth_hdr(skb);
struct ipvl_addr *addr;
void *lyr3h;
int addr_type;
@@ -619,6 +620,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
return dev_forward_skb(ipvlan->phy_dev, skb);
} else if (is_multicast_ether_addr(eth->h_dest)) {
+ skb_reset_mac_header(skb);
ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 49ba8a50dfb1..54c94a69c2bb 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -408,8 +408,8 @@ static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
}
static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c6d271e5687e..c891b60937a7 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -18,14 +18,13 @@
#include <net/sock.h>
#include <net/gro_cells.h>
#include <net/macsec.h>
+#include <net/dst_metadata.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
#include <uapi/linux/if_macsec.h>
-#define MACSEC_SCI_LEN 8
-
/* SecTAG length = macsec_eth_header without the optional SCI */
#define MACSEC_TAG_LEN 6
@@ -46,20 +45,10 @@ struct macsec_eth_header {
u8 secure_channel_id[8]; /* optional */
} __packed;
-#define MACSEC_TCI_VERSION 0x80
-#define MACSEC_TCI_ES 0x40 /* end station */
-#define MACSEC_TCI_SC 0x20 /* SCI present */
-#define MACSEC_TCI_SCB 0x10 /* epon */
-#define MACSEC_TCI_E 0x08 /* encryption */
-#define MACSEC_TCI_C 0x04 /* changed text */
-#define MACSEC_AN_MASK 0x03 /* association number */
-#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
-
/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
#define MIN_NON_SHORT_LEN 48
#define GCM_AES_IV_LEN 12
-#define DEFAULT_ICV_LEN 16
#define for_each_rxsc(secy, sc) \
for (sc = rcu_dereference_bh(secy->rx_sc); \
@@ -243,7 +232,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
return (struct macsec_cb *)skb->cb;
}
-#define MACSEC_PORT_ES (htons(0x0001))
#define MACSEC_PORT_SCB (0x0000)
#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
@@ -258,14 +246,6 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCODING_SA 0
#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
-static bool send_sci(const struct macsec_secy *secy)
-{
- const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
-
- return tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
-}
-
static sci_t make_sci(const u8 *addr, __be16 port)
{
sci_t sci;
@@ -330,7 +310,7 @@ static void macsec_fill_sectag(struct macsec_eth_header *h,
/* with GCM, C/E clear for !encrypt, both set for encrypt */
if (tx_sc->encrypt)
h->tci_an |= MACSEC_TCI_CONFID;
- else if (secy->icv_len != DEFAULT_ICV_LEN)
+ else if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
h->tci_an |= MACSEC_TCI_C;
h->tci_an |= tx_sc->encoding_sa;
@@ -654,7 +634,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- sci_present = send_sci(secy);
+ sci_present = macsec_send_sci(secy);
hh = skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
@@ -1024,11 +1004,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
/* Deliver to the uncontrolled port by default */
enum rx_handler_result ret = RX_HANDLER_PASS;
struct ethhdr *hdr = eth_hdr(skb);
+ struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ md_dst = skb_metadata_dst(skb);
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1039,6 +1021,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
+ if (md_dst && md_dst->type == METADATA_MACSEC &&
+ (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ continue;
+
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1296,7 +1282,7 @@ nosci:
/* 10.6.1 if the SC is not found */
cbit = !!(hdr->tci_an & MACSEC_TCI_C);
if (!cbit)
- macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
+ macsec_finalize_skb(skb, MACSEC_DEFAULT_ICV_LEN,
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
@@ -1677,22 +1663,8 @@ static int macsec_offload(int (* const func)(struct macsec_context *),
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_lock(&ctx->phydev->lock);
- /* Phase I: prepare. The drive should fail here if there are going to be
- * issues in the commit phase.
- */
- ctx->prepare = true;
ret = (*func)(ctx);
- if (ret)
- goto phy_unlock;
- /* Phase II: commit. This step cannot fail. */
- ctx->prepare = false;
- ret = (*func)(ctx);
- /* This should never happen: commit is not allowed to fail */
- if (unlikely(ret))
- WARN(1, "MACsec offloading commit failed (%d)\n", ret);
-
-phy_unlock:
if (ctx->offload == MACSEC_OFFLOAD_PHY)
mutex_unlock(&ctx->phydev->lock);
@@ -1842,6 +1814,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
+ if (secy->xpn) {
+ rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -1864,12 +1842,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@@ -2084,6 +2056,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
+ if (secy->xpn) {
+ tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
+ nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
+ MACSEC_SALT_LEN);
+ }
+
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@@ -2106,12 +2084,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
- if (secy->xpn) {
- tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
- nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
- MACSEC_SALT_LEN);
- }
-
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
@@ -3404,6 +3376,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.module = THIS_MODULE,
.small_ops = macsec_genl_ops,
.n_small_ops = ARRAY_SIZE(macsec_genl_ops),
+ .resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
@@ -3415,6 +3388,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
int ret, len;
if (macsec_is_offloaded(netdev_priv(dev))) {
+ struct metadata_dst *md_dst = secy->tx_sc.md_dst;
+
+ skb_dst_drop(skb);
+ dst_hold(&md_dst->dst);
+ skb_dst_set(skb, &md_dst->dst);
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
@@ -3742,6 +3720,8 @@ static void macsec_free_netdev(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ if (macsec->secy.tx_sc.md_dst)
+ metadata_dst_free(macsec->secy.tx_sc.md_dst);
free_percpu(macsec->stats);
free_percpu(macsec->secy.tx_sc.stats);
@@ -4014,6 +3994,13 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
return -ENOMEM;
}
+ secy->tx_sc.md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
+ if (!secy->tx_sc.md_dst) {
+ free_percpu(secy->tx_sc.stats);
+ free_percpu(macsec->stats);
+ return -ENOMEM;
+ }
+
if (sci == MACSEC_UNDEF_SCI)
sci = dev_to_sci(dev, MACSEC_PORT_ES);
@@ -4027,6 +4014,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
secy->xpn = DEFAULT_XPN;
secy->sci = sci;
+ secy->tx_sc.md_dst->u.macsec_info.sci = sci;
secy->tx_sc.active = true;
secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
@@ -4045,7 +4033,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
{
struct macsec_dev *macsec = macsec_priv(dev);
rx_handler_func_t *rx_handler;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
int err, mtu;
sci_t sci;
@@ -4169,7 +4157,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
- u8 icv_len = DEFAULT_ICV_LEN;
+ u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
int flag;
bool es, scb, sci;
@@ -4181,7 +4169,7 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
- if (icv_len != DEFAULT_ICV_LEN) {
+ if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
char dummy_key[DEFAULT_SAK_LEN] = { 0 };
struct crypto_aead *dummy_tfm;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1080d6ebff63..713e3354cb2e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1043,8 +1043,8 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index cecf8c63096c..d1f435788e90 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -207,7 +207,7 @@ static struct notifier_block macvtap_notifier_block __read_mostly = {
.notifier_call = macvtap_device_event,
};
-static int macvtap_init(void)
+static int __init macvtap_init(void)
{
int err;
@@ -241,7 +241,7 @@ out1:
}
module_init(macvtap_init);
-static void macvtap_exit(void)
+static void __exit macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index 53846c6b56ca..0762c735dd8a 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -986,7 +986,7 @@ out:
return rc;
}
-static int mctp_i2c_remove(struct i2c_client *client)
+static void mctp_i2c_remove(struct i2c_client *client)
{
struct mctp_i2c_client *mcli = i2c_get_clientdata(client);
struct mctp_i2c_dev *midev = NULL, *tmp = NULL;
@@ -999,8 +999,6 @@ static int mctp_i2c_remove(struct i2c_client *client)
mctp_i2c_free_client(mcli);
mutex_unlock(&driver_clients_lock);
- /* Callers ignore return code */
- return 0;
}
/* We look for a 'mctp-controller' property on I2C busses as they are
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 3e79c2c51929..689e728345ce 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -10,10 +10,31 @@
#include <linux/fwnode_mdio.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pse-pd/pse.h>
MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
MODULE_LICENSE("GPL");
+static struct pse_control *
+fwnode_find_pse_control(struct fwnode_handle *fwnode)
+{
+ struct pse_control *psec;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_PSE_CONTROLLER))
+ return NULL;
+
+ np = to_of_node(fwnode);
+ if (!np)
+ return NULL;
+
+ psec = of_pse_control_get(np);
+ if (PTR_ERR(psec) == -ENOENT)
+ return NULL;
+
+ return psec;
+}
+
static struct mii_timestamper *
fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
{
@@ -47,7 +68,9 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
- rc = -ENODEV;
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
if (rc > 0) {
phy->irq = rc;
@@ -89,14 +112,21 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
struct fwnode_handle *child, u32 addr)
{
struct mii_timestamper *mii_ts = NULL;
+ struct pse_control *psec = NULL;
struct phy_device *phy;
bool is_c45 = false;
u32 phy_id;
int rc;
+ psec = fwnode_find_pse_control(child);
+ if (IS_ERR(psec))
+ return PTR_ERR(psec);
+
mii_ts = fwnode_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
+ if (IS_ERR(mii_ts)) {
+ rc = PTR_ERR(mii_ts);
+ goto clean_pse;
+ }
rc = fwnode_property_match_string(child, "compatible",
"ethernet-phy-ieee802.3-c45");
@@ -108,8 +138,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
else
phy = phy_device_create(bus, addr, phy_id, 0, NULL);
if (IS_ERR(phy)) {
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
+ rc = PTR_ERR(phy);
+ goto clean_mii_ts;
}
if (is_acpi_node(child)) {
@@ -123,25 +153,33 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
/* All data is now stored in the phy struct, so register it */
rc = phy_device_register(phy);
if (rc) {
- phy_device_free(phy);
fwnode_handle_put(phy->mdio.dev.fwnode);
- return rc;
+ goto clean_phy;
}
} else if (is_of_node(child)) {
rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr);
- if (rc) {
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
- return rc;
- }
+ if (rc)
+ goto clean_phy;
}
+ phy->psec = psec;
+
/* phy->mii_ts may already be defined by the PHY driver. A
* mii_timestamper probed via the device tree will still have
* precedence.
*/
if (mii_ts)
phy->mii_ts = mii_ts;
+
return 0;
+
+clean_phy:
+ phy_device_free(phy);
+clean_mii_ts:
+ unregister_mii_timestamper(mii_ts);
+clean_pse:
+ pse_control_put(psec);
+
+ return rc;
}
EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 09200a70b315..bf8bf5e20faf 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -3,6 +3,7 @@
* MDIO I2C bridge
*
* Copyright (C) 2015-2016 Russell King
+ * Copyright (C) 2021 Marek Behun
*
* Network PHYs can appear on I2C buses when they are part of SFP module.
* This driver exposes these PHYs to the networking PHY code, allowing
@@ -12,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
+#include <linux/sfp.h>
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
@@ -28,7 +30,7 @@ static unsigned int i2c_mii_phy_addr(int phy_id)
return phy_id + 0x40;
}
-static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
@@ -62,7 +64,8 @@ static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg)
return data[0] << 8 | data[1];
}
-static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
@@ -91,9 +94,288 @@ static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
return ret < 0 ? ret : 0;
}
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
+/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
+ * instead via address 0x51, when SFP page is set to 0x03 and password to
+ * 0xffffffff.
+ *
+ * address size contents description
+ * ------- ---- -------- -----------
+ * 0x80 1 CMD 0x01/0x02/0x04 for write/read/done
+ * 0x81 1 DEV Clause 45 device
+ * 0x82 2 REG Clause 45 register
+ * 0x84 2 VAL Register value
+ */
+#define ROLLBALL_PHY_I2C_ADDR 0x51
+
+#define ROLLBALL_PASSWORD (SFP_VSL + 3)
+
+#define ROLLBALL_CMD_ADDR 0x80
+#define ROLLBALL_DATA_ADDR 0x81
+
+#define ROLLBALL_CMD_WRITE 0x01
+#define ROLLBALL_CMD_READ 0x02
+#define ROLLBALL_CMD_DONE 0x04
+
+#define SFP_PAGE_ROLLBALL_MDIO 3
+
+static int __i2c_transfer_err(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ int num)
+{
+ int ret;
+
+ ret = __i2c_transfer(i2c, msgs, num);
+ if (ret < 0)
+ return ret;
+ else if (ret != num)
+ return -EIO;
+ else
+ return 0;
+}
+
+static int __i2c_rollball_get_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 *page)
+{
+ struct i2c_msg msgs[2];
+ u8 addr = SFP_PAGE;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = page;
+
+ return __i2c_transfer_err(i2c, msgs, 2);
+}
+
+static int __i2c_rollball_set_page(struct i2c_adapter *i2c, int bus_addr,
+ u8 page)
+{
+ struct i2c_msg msg;
+ u8 buf[2];
+
+ buf[0] = SFP_PAGE;
+ buf[1] = page;
+
+ msg.addr = bus_addr;
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = buf;
+
+ return __i2c_transfer_err(i2c, &msg, 1);
+}
+
+/* In order to not interfere with other SFP code (which possibly may manipulate
+ * SFP_PAGE), for every transfer we do this:
+ * 1. lock the bus
+ * 2. save content of SFP_PAGE
+ * 3. set SFP_PAGE to 3
+ * 4. do the transfer
+ * 5. restore original SFP_PAGE
+ * 6. unlock the bus
+ * Note that one might think that steps 2 to 5 could be theoretically done all
+ * in one call to i2c_transfer (by constructing msgs array in such a way), but
+ * unfortunately tests show that this does not work :-( Changed SFP_PAGE does
+ * not take into account until i2c_transfer() is done.
+ */
+static int i2c_transfer_rollball(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ int ret, main_err = 0;
+ u8 saved_page;
+
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ /* save original page */
+ ret = __i2c_rollball_get_page(i2c, msgs->addr, &saved_page);
+ if (ret)
+ goto unlock;
+
+ /* change to RollBall MDIO page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, SFP_PAGE_ROLLBALL_MDIO);
+ if (ret)
+ goto unlock;
+
+ /* do the transfer; we try to restore original page if this fails */
+ ret = __i2c_transfer_err(i2c, msgs, num);
+ if (ret)
+ main_err = ret;
+
+ /* restore original page */
+ ret = __i2c_rollball_set_page(i2c, msgs->addr, saved_page);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ return main_err ? : ret;
+}
+
+static int i2c_rollball_mii_poll(struct mii_bus *bus, int bus_addr, u8 *buf,
+ size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmd_addr, tmp, *res;
+ int i, ret;
+
+ cmd_addr = ROLLBALL_CMD_ADDR;
+
+ res = buf ? buf : &tmp;
+ len = buf ? len : 1;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1;
+ msgs[0].buf = &cmd_addr;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = res;
+
+ /* By experiment it takes up to 70 ms to access a register for these
+ * SFPs. Sleep 20ms between iterations and try 10 times.
+ */
+ i = 10;
+ do {
+ msleep(20);
+
+ ret = i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+ if (ret)
+ return ret;
+
+ if (*res == ROLLBALL_CMD_DONE)
+ return 0;
+ } while (i-- > 0);
+
+ dev_dbg(&bus->dev, "poll timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
+ u8 *data, size_t len)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ struct i2c_msg msgs[2];
+ u8 cmdbuf[2];
+
+ cmdbuf[0] = ROLLBALL_CMD_ADDR;
+ cmdbuf[1] = cmd;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = len;
+ msgs[0].buf = data;
+
+ msgs[1].addr = bus_addr;
+ msgs[1].flags = 0;
+ msgs[1].len = sizeof(cmdbuf);
+ msgs[1].buf = cmdbuf;
+
+ return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
+}
+
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+{
+ u8 buf[4], res[6];
+ int bus_addr, ret;
+ u16 val;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0xffff;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_READ, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, res, sizeof(res));
+ if (ret == -ETIMEDOUT)
+ return 0xffff;
+ else if (ret < 0)
+ return ret;
+
+ val = res[4] << 8 | res[5];
+
+ return val;
+}
+
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ int bus_addr, ret;
+ u8 buf[6];
+
+ if (!(reg & MII_ADDR_C45))
+ return -EOPNOTSUPP;
+
+ bus_addr = i2c_mii_phy_addr(phy_id);
+ if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
+ return 0;
+
+ buf[0] = ROLLBALL_DATA_ADDR;
+ buf[1] = (reg >> 16) & 0x1f;
+ buf[2] = (reg >> 8) & 0xff;
+ buf[3] = reg & 0xff;
+ buf[4] = val >> 8;
+ buf[5] = val & 0xff;
+
+ ret = i2c_rollball_mii_cmd(bus, bus_addr, ROLLBALL_CMD_WRITE, buf,
+ sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_rollball_mii_poll(bus, bus_addr, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
+{
+ struct i2c_msg msg;
+ u8 pw[5];
+ int ret;
+
+ pw[0] = ROLLBALL_PASSWORD;
+ pw[1] = 0xff;
+ pw[2] = 0xff;
+ pw[3] = 0xff;
+ pw[4] = 0xff;
+
+ msg.addr = ROLLBALL_PHY_I2C_ADDR;
+ msg.flags = 0;
+ msg.len = sizeof(pw);
+ msg.buf = pw;
+
+ ret = i2c_transfer(i2c, &msg, 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != 1)
+ return -EIO;
+ else
+ return 0;
+}
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
+ int ret;
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return ERR_PTR(-EINVAL);
@@ -104,10 +386,28 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c)
snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent));
mii->parent = parent;
- mii->read = i2c_mii_read;
- mii->write = i2c_mii_write;
mii->priv = i2c;
+ switch (protocol) {
+ case MDIO_I2C_ROLLBALL:
+ ret = i2c_mii_init_rollball(i2c);
+ if (ret < 0) {
+ dev_err(parent,
+ "Cannot initialize RollBall MDIO I2C protocol: %d\n",
+ ret);
+ mdiobus_free(mii);
+ return ERR_PTR(ret);
+ }
+
+ mii->read = i2c_mii_read_rollball;
+ mii->write = i2c_mii_write_rollball;
+ break;
+ default:
+ mii->read = i2c_mii_read_default;
+ mii->write = i2c_mii_write_default;
+ break;
+ }
+
return mii;
}
EXPORT_SYMBOL_GPL(mdio_i2c_alloc);
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 08541007b18a..51f68daac152 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/mfd/ocelot.h>
#include <linux/module.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
@@ -270,44 +271,25 @@ static int mscc_miim_clk_set(struct mii_bus *bus)
static int mscc_miim_probe(struct platform_device *pdev)
{
- struct regmap *mii_regmap, *phy_regmap = NULL;
struct device_node *np = pdev->dev.of_node;
+ struct regmap *mii_regmap, *phy_regmap;
struct device *dev = &pdev->dev;
- void __iomem *regs, *phy_regs;
struct mscc_miim_dev *miim;
- struct resource *res;
struct mii_bus *bus;
int ret;
- regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(regs)) {
- dev_err(dev, "Unable to map MIIM registers\n");
- return PTR_ERR(regs);
- }
-
- mii_regmap = devm_regmap_init_mmio(dev, regs, &mscc_miim_regmap_config);
-
- if (IS_ERR(mii_regmap)) {
- dev_err(dev, "Unable to create MIIM regmap\n");
- return PTR_ERR(mii_regmap);
- }
+ mii_regmap = ocelot_regmap_from_resource(pdev, 0,
+ &mscc_miim_regmap_config);
+ if (IS_ERR(mii_regmap))
+ return dev_err_probe(dev, PTR_ERR(mii_regmap),
+ "Unable to create MIIM regmap\n");
/* This resource is optional */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- phy_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(phy_regs)) {
- dev_err(dev, "Unable to map internal phy registers\n");
- return PTR_ERR(phy_regs);
- }
-
- phy_regmap = devm_regmap_init_mmio(dev, phy_regs,
- &mscc_miim_phy_regmap_config);
- if (IS_ERR(phy_regmap)) {
- dev_err(dev, "Unable to create phy register regmap\n");
- return PTR_ERR(phy_regmap);
- }
- }
+ phy_regmap = ocelot_regmap_from_resource_optional(pdev, 1,
+ &mscc_miim_phy_regmap_config);
+ if (IS_ERR(phy_regmap))
+ return dev_err_probe(dev, PTR_ERR(phy_regmap),
+ "Unable to create phy register regmap\n");
ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
if (ret < 0) {
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index b8866bc3f2e8..4a2e94faf57e 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -233,11 +233,9 @@ static int g12a_ephy_glue_clk_register(struct device *dev)
snprintf(in_name, sizeof(in_name), "clkin%d", i);
clk = devm_clk_get(dev, in_name);
- if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- dev_err(dev, "Missing clock %s\n", in_name);
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Missing clock %s\n", in_name);
parent_names[i] = __clk_get_name(clk);
}
@@ -317,12 +315,9 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
return PTR_ERR(priv->regs);
priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk)) {
- ret = PTR_ERR(priv->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get peripheral clock\n");
- return ret;
- }
+ if (IS_ERR(priv->pclk))
+ return dev_err_probe(dev, PTR_ERR(priv->pclk),
+ "failed to get peripheral clock\n");
/* Make sure the device registers are clocked */
ret = clk_prepare_enable(priv->pclk);
@@ -339,8 +334,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
&priv->mux_handle, dev, NULL);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "mdio multiplexer init failed: %d", ret);
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
goto err;
}
diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index c02fb2a067ee..c02c9c660016 100644
--- a/drivers/net/mdio/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
@@ -159,12 +159,9 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
mdio_mux_mmioreg_switch_fn,
&s->mux_handle, s, NULL);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to register mdio-mux bus %pOF\n", np);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register mdio-mux bus %pOF\n", np);
pdev->dev.platform_data = s;
diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index 527acfc3c045..bfa5af577b0a 100644
--- a/drivers/net/mdio/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
@@ -72,12 +72,9 @@ static int mdio_mux_multiplexer_probe(struct platform_device *pdev)
return -ENOMEM;
s->muxc = devm_mux_control_get(dev, NULL);
- if (IS_ERR(s->muxc)) {
- ret = PTR_ERR(s->muxc);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get mux: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(s->muxc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(s->muxc),
+ "Failed to get mux\n");
platform_set_drvdata(pdev, s);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 9e3c815a070f..796e9c7857d0 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -231,6 +231,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
return 0;
unregister:
+ of_node_put(child);
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 21a0435c02de..7a28e082436e 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -324,8 +324,8 @@ static const struct net_device_ops failover_dev_ops = {
static void nfo_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version));
}
static int nfo_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ddac61d79145..bdff9ac5056d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(oops_only, "Only log oops messages");
#ifndef MODULE
static int __init option_setup(char *opt)
{
- strlcpy(config, opt, MAX_PARAM_LENGTH);
+ strscpy(config, opt, MAX_PARAM_LENGTH);
return 1;
}
__setup("netconsole=", option_setup);
@@ -178,7 +178,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
goto fail;
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -414,7 +414,7 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
return -EINVAL;
}
- strlcpy(nt->np.dev_name, buf, IFNAMSIZ);
+ strscpy(nt->np.dev_name, buf, IFNAMSIZ);
/* Get rid of possible trailing newline from echo(1) */
len = strnlen(nt->np.dev_name, IFNAMSIZ);
@@ -630,7 +630,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
return ERR_PTR(-ENOMEM);
nt->np.name = "netconsole";
- strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
+ strscpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
eth_broadcast_addr(nt->np.remote_mac);
@@ -708,7 +708,7 @@ restart:
if (nt->np.dev == dev) {
switch (event) {
case NETDEV_CHANGENAME:
- strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
+ strscpy(nt->np.dev_name, dev->name, IFNAMSIZ);
break;
case NETDEV_RELEASE:
case NETDEV_JOIN:
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index e88f783c297e..794fc0cc73b8 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -965,7 +965,6 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
- int ret;
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
@@ -976,15 +975,25 @@ static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_actio
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- ret = nsim_dev_reload_create(nsim_dev, extack);
- return ret;
+
+ return nsim_dev_reload_create(nsim_dev, extack);
}
static int nsim_dev_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
- return devlink_info_driver_name_put(req, DRV_NAME);
+ int err;
+
+ err = devlink_info_driver_name_put(req, DRV_NAME);
+ if (err)
+ return err;
+ err = devlink_info_version_stored_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
+ if (err)
+ return err;
+ return devlink_info_version_running_put_ext(req, "fw.mgmt", "10.20.30",
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
#define NSIM_DEV_FLASH_SIZE 500000
@@ -1312,8 +1321,7 @@ nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
static const struct devlink_ops nsim_dev_devlink_ops = {
.eswitch_mode_set = nsim_devlink_eswitch_mode_set,
.eswitch_mode_get = nsim_devlink_eswitch_mode_get,
- .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
- DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
diff --git a/drivers/net/netdevsim/hwstats.c b/drivers/net/netdevsim/hwstats.c
index 605a38e16db0..0e58aa7f0374 100644
--- a/drivers/net/netdevsim/hwstats.c
+++ b/drivers/net/netdevsim/hwstats.c
@@ -433,11 +433,11 @@ int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
goto err_remove_hwstats_recursive;
}
- debugfs_create_file("enable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_enable_fops.fops);
- debugfs_create_file("disable_ifindex", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_disable_fops.fops);
- debugfs_create_file("fail_next_enable", 0600, hwstats->l3_ddir, hwstats,
+ debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_fail_fops.fops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e470e3398abc..9a1a5b203624 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 80bdc07f2cd3..464d88ca8ab0 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -364,9 +364,9 @@ static void ntb_get_drvinfo(struct net_device *ndev,
{
struct ntb_netdev *dev = netdev_priv(ndev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
}
static int ntb_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 6289b7c765f1..6e7e6c346a3e 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -26,4 +26,10 @@ config PCS_RZN1_MIIC
on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
pass-through mode for MII.
+config PCS_ALTERA_TSE
+ tristate
+ help
+ This module provides helper functions for the Altera Triple Speed
+ Ethernet SGMII PCS, that can be found on the Intel Socfpga family.
+
endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index 0ff5388fcdea..4c780d8f2e98 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -6,3 +6,4 @@ pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
obj-$(CONFIG_PCS_RZN1_MIIC) += pcs-rzn1-miic.o
+obj-$(CONFIG_PCS_ALTERA_TSE) += pcs-altera-tse.o
diff --git a/drivers/net/pcs/pcs-altera-tse.c b/drivers/net/pcs/pcs-altera-tse.c
new file mode 100644
index 000000000000..97a7cabff962
--- /dev/null
+++ b/drivers/net/pcs/pcs-altera-tse.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Bootlin
+ *
+ * Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/pcs-altera-tse.h>
+
+/* SGMII PCS register addresses
+ */
+#define SGMII_PCS_SCRATCH 0x10
+#define SGMII_PCS_REV 0x11
+#define SGMII_PCS_LINK_TIMER_0 0x12
+#define SGMII_PCS_LINK_TIMER_REG(x) (0x12 + (x))
+#define SGMII_PCS_LINK_TIMER_1 0x13
+#define SGMII_PCS_IF_MODE 0x14
+#define PCS_IF_MODE_SGMII_ENA BIT(0)
+#define PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define PCS_IF_MODE_SGMI_SPEED_MASK GENMASK(3, 2)
+#define PCS_IF_MODE_SGMI_SPEED_10 (0 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_100 (1 << 2)
+#define PCS_IF_MODE_SGMI_SPEED_1000 (2 << 2)
+#define PCS_IF_MODE_SGMI_HALF_DUPLEX BIT(4)
+#define PCS_IF_MODE_SGMI_PHY_AN BIT(5)
+#define SGMII_PCS_DIS_READ_TO 0x15
+#define SGMII_PCS_READ_TO 0x16
+#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
+
+struct altera_tse_pcs {
+ struct phylink_pcs pcs;
+ void __iomem *base;
+ int reg_width;
+};
+
+static struct altera_tse_pcs *phylink_pcs_to_tse_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct altera_tse_pcs, pcs);
+}
+
+static u16 tse_pcs_read(struct altera_tse_pcs *tse_pcs, int regnum)
+{
+ if (tse_pcs->reg_width == 4)
+ return readl(tse_pcs->base + regnum * 4);
+ else
+ return readw(tse_pcs->base + regnum * 2);
+}
+
+static void tse_pcs_write(struct altera_tse_pcs *tse_pcs, int regnum,
+ u16 value)
+{
+ if (tse_pcs->reg_width == 4)
+ writel(value, tse_pcs->base + regnum * 4);
+ else
+ writew(value, tse_pcs->base + regnum * 2);
+}
+
+static int tse_pcs_reset(struct altera_tse_pcs *tse_pcs)
+{
+ int i = 0;
+ u16 bmcr;
+
+ /* Reset PCS block */
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_RESET;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ for (i = 0; i < SGMII_PCS_SW_RESET_TIMEOUT; i++) {
+ if (!(tse_pcs_read(tse_pcs, MII_BMCR) & BMCR_RESET))
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alt_tse_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ return 1;
+
+ return -EINVAL;
+}
+
+static int alt_tse_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u32 ctrl, if_mode;
+
+ ctrl = tse_pcs_read(tse_pcs, MII_BMCR);
+ if_mode = tse_pcs_read(tse_pcs, SGMII_PCS_IF_MODE);
+
+ /* Set link timer to 1.6ms, as per the MegaCore Function User Guide */
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_0, 0x0D40);
+ tse_pcs_write(tse_pcs, SGMII_PCS_LINK_TIMER_1, 0x03);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ if_mode |= PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ if_mode &= ~(PCS_IF_MODE_USE_SGMII_AN | PCS_IF_MODE_SGMII_ENA);
+ if_mode |= PCS_IF_MODE_SGMI_SPEED_1000;
+ }
+
+ ctrl |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
+
+ tse_pcs_write(tse_pcs, MII_BMCR, ctrl);
+ tse_pcs_write(tse_pcs, SGMII_PCS_IF_MODE, if_mode);
+
+ return tse_pcs_reset(tse_pcs);
+}
+
+static void alt_tse_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmsr, lpa;
+
+ bmsr = tse_pcs_read(tse_pcs, MII_BMSR);
+ lpa = tse_pcs_read(tse_pcs, MII_LPA);
+
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lpa);
+}
+
+static void alt_tse_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct altera_tse_pcs *tse_pcs = phylink_pcs_to_tse_pcs(pcs);
+ u16 bmcr;
+
+ bmcr = tse_pcs_read(tse_pcs, MII_BMCR);
+ bmcr |= BMCR_ANRESTART;
+ tse_pcs_write(tse_pcs, MII_BMCR, bmcr);
+
+ /* This PCS seems to require a soft reset to re-sync the AN logic */
+ tse_pcs_reset(tse_pcs);
+}
+
+static const struct phylink_pcs_ops alt_tse_pcs_ops = {
+ .pcs_validate = alt_tse_pcs_validate,
+ .pcs_get_state = alt_tse_pcs_get_state,
+ .pcs_config = alt_tse_pcs_config,
+ .pcs_an_restart = alt_tse_pcs_an_restart,
+};
+
+struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
+ void __iomem *pcs_base, int reg_width)
+{
+ struct altera_tse_pcs *tse_pcs;
+
+ if (reg_width != 4 && reg_width != 2)
+ return ERR_PTR(-EINVAL);
+
+ tse_pcs = devm_kzalloc(&ndev->dev, sizeof(*tse_pcs), GFP_KERNEL);
+ if (!tse_pcs)
+ return ERR_PTR(-ENOMEM);
+
+ tse_pcs->pcs.ops = &alt_tse_pcs_ops;
+ tse_pcs->base = pcs_base;
+ tse_pcs->reg_width = reg_width;
+
+ return &tse_pcs->pcs;
+}
+EXPORT_SYMBOL_GPL(alt_tse_pcs_create);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Altera TSE PCS driver");
+MODULE_AUTHOR("Maxime Chevallier <maxime.chevallier@bootlin.com>");
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index ee374a85544a..134637584a83 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -749,7 +749,7 @@ static void adin_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) {
- strlcpy(&data[i * ETH_GSTRING_LEN],
+ strscpy(&data[i * ETH_GSTRING_LEN],
adin_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index b6d139501199..7619d6185801 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -15,6 +15,8 @@
#include <linux/property.h>
#define PHY_ID_ADIN1100 0x0283bc81
+#define PHY_ID_ADIN1110 0x0283bc91
+#define PHY_ID_ADIN2111 0x0283bca1
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -265,7 +267,8 @@ static int adin_probe(struct phy_device *phydev)
static struct phy_driver adin_driver[] = {
{
- PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100),
+ .phy_id = PHY_ID_ADIN1100,
+ .phy_id_mask = 0xffffffcf,
.name = "ADIN1100",
.get_features = adin_get_features,
.soft_reset = adin_soft_reset,
@@ -284,6 +287,8 @@ module_phy_driver(adin_driver);
static struct mdio_device_id __maybe_unused adin_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1100) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1110) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN2111) },
{ }
};
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 8b7a46db30e0..47a76df36b74 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -27,9 +27,12 @@
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX 1
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII 3
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI 4
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI 7
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
#define MDIO_AN_VEND_PROV 0xc400
@@ -91,6 +94,22 @@
#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+#define VEND1_GLOBAL_GEN_STAT2 0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG BIT(15)
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M 0x0310
+#define VEND1_GLOBAL_CFG_100M 0x031b
+#define VEND1_GLOBAL_CFG_1G 0x031c
+#define VEND1_GLOBAL_CFG_2_5G 0x031d
+#define VEND1_GLOBAL_CFG_5G 0x031e
+#define VEND1_GLOBAL_CFG_10G 0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE 2
+
#define VEND1_GLOBAL_RSVD_STAT1 0xc885
#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
@@ -125,6 +144,12 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+/* Sleep and timeout for checking if the Processor-Intensive
+ * MDIO operation is finished
+ */
+#define AQR107_OP_IN_PROG_SLEEP 1000
+#define AQR107_OP_IN_PROG_TIMEOUT 100000
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -335,40 +360,57 @@ static int aqr_read_status(struct phy_device *phydev)
static int aqr107_read_rate(struct phy_device *phydev)
{
+ u32 config_reg;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
if (val < 0)
return val;
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
case MDIO_AN_TX_VEND_STATUS1_10BASET:
phydev->speed = SPEED_10;
+ config_reg = VEND1_GLOBAL_CFG_10M;
break;
case MDIO_AN_TX_VEND_STATUS1_100BASETX:
phydev->speed = SPEED_100;
+ config_reg = VEND1_GLOBAL_CFG_100M;
break;
case MDIO_AN_TX_VEND_STATUS1_1000BASET:
phydev->speed = SPEED_1000;
+ config_reg = VEND1_GLOBAL_CFG_1G;
break;
case MDIO_AN_TX_VEND_STATUS1_2500BASET:
phydev->speed = SPEED_2500;
+ config_reg = VEND1_GLOBAL_CFG_2_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_5000BASET:
phydev->speed = SPEED_5000;
+ config_reg = VEND1_GLOBAL_CFG_5G;
break;
case MDIO_AN_TX_VEND_STATUS1_10GBASET:
phydev->speed = SPEED_10000;
+ config_reg = VEND1_GLOBAL_CFG_10G;
break;
default:
phydev->speed = SPEED_UNKNOWN;
- break;
+ return 0;
}
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
+ if (val < 0)
+ return val;
+
+ if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
+ VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
else
- phydev->duplex = DUPLEX_HALF;
+ phydev->rate_matching = RATE_MATCH_NONE;
return 0;
}
@@ -392,15 +434,24 @@ static int aqr107_read_status(struct phy_device *phydev)
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
phydev->interface = PHY_INTERFACE_MODE_10GKR;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
phydev->interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
phydev->interface = PHY_INTERFACE_MODE_USXGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
phydev->interface = PHY_INTERFACE_MODE_SGMII;
break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
break;
@@ -513,11 +564,14 @@ static int aqr107_config_init(struct phy_device *phydev)
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
- phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+ phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+ phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+ phydev->interface != PHY_INTERFACE_MODE_RXAUI)
return -ENODEV;
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
@@ -597,16 +651,62 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+{
+ int val, err;
+
+ /* The datasheet notes to wait at least 1ms after issuing a
+ * processor intensive operation before checking.
+ * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+ * because that just determines the maximum time slept, not the minimum.
+ */
+ usleep_range(1000, 5000);
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_GEN_STAT2, val,
+ !(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (err) {
+ phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int aqr107_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ if (iface == PHY_INTERFACE_MODE_10GBASER ||
+ iface == PHY_INTERFACE_MODE_2500BASEX ||
+ iface == PHY_INTERFACE_MODE_NA)
+ return RATE_MATCH_PAUSE;
+ return RATE_MATCH_NONE;
+}
+
static int aqr107_suspend(struct phy_device *phydev)
{
- return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_resume(struct phy_device *phydev)
{
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
- MDIO_CTRL1_LPOWER);
+ int err;
+
+ err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+ if (err)
+ return err;
+
+ return aqr107_wait_processor_intensive_op(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
@@ -658,6 +758,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -676,6 +777,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
@@ -702,6 +804,7 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 59fe356942b5..9e9adde335c8 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -115,6 +115,7 @@
#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
#define AT803X_DEBUG_REG_3C 0x3C
@@ -192,6 +193,9 @@
#define AT803X_KEEP_PLL_ENABLED BIT(0)
#define AT803X_DISABLE_SMARTEEE BIT(1)
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
/* ADC threshold */
#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
@@ -672,6 +676,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
phy_interface_t iface;
linkmode_zero(phy_support);
@@ -682,7 +687,7 @@ static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phylink_set(phy_support, Asym_Pause);
linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
/* Some modules support 10G modes as well as others we support.
* Mask out non-supported modes so the correct interface is picked.
*/
@@ -730,6 +735,9 @@ static int at803x_parse_dt(struct phy_device *phydev)
if (of_property_read_bool(node, "qca,disable-smarteee"))
priv->flags |= AT803X_DISABLE_SMARTEEE;
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
if (!tw || tw > 255) {
phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
@@ -999,6 +1007,20 @@ static int at8031_pll_config(struct phy_device *phydev)
AT803X_DEBUG_PLL_ON, 0);
}
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
static int at803x_config_init(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
@@ -1051,6 +1073,10 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
/* Ar803x extended next page bit is enabled by default. Cisco
* multigig switches read this bit and attempt to negotiate 10Gbps
* rates even if the next page bit is disabled. This is incorrect
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 287cccf8f7f4..b2c0baa51f39 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -519,7 +519,7 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
}
EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 31fbcdddc9ad..ad71c88c87e7 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -766,6 +766,41 @@ static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
+static int brcm_fet_suspend(struct phy_device *phydev)
+{
+ int reg, err, err2, brcmtest;
+
+ /* We cannot use a read/modify/write here otherwise the PHY continues
+ * to drive LEDs which defeats the purpose of low power mode.
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_PDOWN);
+ if (err < 0)
+ return err;
+
+ /* Enable shadow register access */
+ brcmtest = phy_read(phydev, MII_BRCM_FET_BRCMTEST);
+ if (brcmtest < 0)
+ return brcmtest;
+
+ reg = brcmtest | MII_BRCM_FET_BT_SRE;
+
+ err = phy_write(phydev, MII_BRCM_FET_BRCMTEST, reg);
+ if (err < 0)
+ return err;
+
+ /* Set standby mode */
+ err = phy_modify(phydev, MII_BRCM_FET_SHDW_AUXMODE4,
+ MII_BRCM_FET_SHDW_AM4_STANDBY,
+ MII_BRCM_FET_SHDW_AM4_STANDBY);
+
+ /* Disable shadow register access */
+ err2 = phy_write(phydev, MII_BRCM_FET_BRCMTEST, brcmtest);
+ if (!err)
+ err = err2;
+
+ return err;
+}
+
static int bcm54xx_phy_probe(struct phy_device *phydev)
{
struct bcm54xx_phy_priv *priv;
@@ -1033,6 +1068,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
@@ -1041,6 +1078,8 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = brcm_fet_config_init,
.config_intr = brcm_fet_config_intr,
.handle_interrupt = brcm_fet_handle_interrupt,
+ .suspend = brcm_fet_suspend,
+ .resume = brcm_fet_config_init,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index f070776ca904..fd9ad4820192 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -478,6 +478,7 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
@@ -489,7 +490,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
priv = (struct mv2222_data *)phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a714150f5e8c..2810f4f9da0c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1952,7 +1952,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < count; i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -2845,6 +2845,7 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
phy_interface_t interface;
struct device *dev;
@@ -2856,7 +2857,7 @@ static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported);
+ sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
interface = sfp_select_interface(phydev->sfp_bus, supported);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 2b7d0720720b..383a9c9f36e5 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -96,6 +96,11 @@ enum {
MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+ /* SerDes reinitialization 88E21X0 */
+ MV_AN_21X0_SERDES_CTRL2 = 0x800f,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS = BIT(13),
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT = BIT(15),
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -117,16 +122,16 @@ enum {
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_NO_SGMII_AN = 0x5,
MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH = 0x6,
MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII = 0x7,
- MV_V2_PORT_INTR_STS = 0xf040,
- MV_V2_PORT_INTR_MASK = 0xf043,
- MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
- MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
- MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
- MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
+ MV_V2_PORT_INTR_STS = 0xf040,
+ MV_V2_PORT_INTR_MASK = 0xf043,
+ MV_V2_PORT_INTR_STS_WOL_EN = BIT(8),
+ MV_V2_MAGIC_PKT_WORD0 = 0xf06b,
+ MV_V2_MAGIC_PKT_WORD1 = 0xf06c,
+ MV_V2_MAGIC_PKT_WORD2 = 0xf06d,
/* Wake on LAN registers */
- MV_V2_WOL_CTRL = 0xf06e,
- MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
- MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
+ MV_V2_WOL_CTRL = 0xf06e,
+ MV_V2_WOL_CTRL_CLEAR_STS = BIT(15),
+ MV_V2_WOL_CTRL_MAGIC_PKT_EN = BIT(0),
/* Temperature control/read registers (88X3310 only) */
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
@@ -140,6 +145,8 @@ struct mv3310_chip {
bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
+ int (*set_mactype)(struct phy_device *phydev, int mactype);
+ int (*select_mactype)(unsigned long *interfaces);
int (*init_interface)(struct phy_device *phydev, int mactype);
#ifdef CONFIG_HWMON
@@ -466,9 +473,10 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support);
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
iface = sfp_select_interface(phydev->sfp_bus, support);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
@@ -593,6 +601,49 @@ static int mv2110_get_mactype(struct phy_device *phydev)
return mactype & MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv2110_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int err, val;
+
+ mactype &= MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK;
+ err = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_21X0_PORT_CTRL,
+ MV_PMA_21X0_PORT_CTRL_SWRST |
+ MV_PMA_21X0_PORT_CTRL_MACTYPE_MASK,
+ MV_PMA_21X0_PORT_CTRL_SWRST | mactype);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS |
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT);
+ if (err)
+ return err;
+
+ err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_AN,
+ MV_AN_21X0_SERDES_CTRL2, val,
+ !(val &
+ MV_AN_21X0_SERDES_CTRL2_RUN_INIT),
+ 5000, 100000, true);
+ if (err)
+ return err;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MV_AN_21X0_SERDES_CTRL2,
+ MV_AN_21X0_SERDES_CTRL2_AUTO_INIT_DIS);
+}
+
+static int mv2110_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ !test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_5GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_PMA_21X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else
+ return -1;
+}
+
static int mv3310_get_mactype(struct phy_device *phydev)
{
int mactype;
@@ -604,6 +655,46 @@ static int mv3310_get_mactype(struct phy_device *phydev)
return mactype & MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
}
+static int mv3310_set_mactype(struct phy_device *phydev, int mactype)
+{
+ int ret;
+
+ mactype &= MV_V2_33X0_PORT_CTRL_MACTYPE_MASK;
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_MACTYPE_MASK,
+ mactype);
+ if (ret <= 0)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_33X0_PORT_CTRL_SWRST);
+}
+
+static int mv3310_select_mactype(unsigned long *interfaces)
+{
+ if (test_bit(PHY_INTERFACE_MODE_USXGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_USXGMII;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces) &&
+ test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI;
+ else if (test_bit(PHY_INTERFACE_MODE_10GBASER, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_RXAUI, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_RXAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_XAUI, interfaces))
+ return MV_V2_3310_PORT_CTRL_MACTYPE_XAUI_RATE_MATCH;
+ else if (test_bit(PHY_INTERFACE_MODE_SGMII, interfaces))
+ return MV_V2_33X0_PORT_CTRL_MACTYPE_10GBASER;
+ else
+ return -1;
+}
+
static int mv2110_init_interface(struct phy_device *phydev, int mactype)
{
struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
@@ -687,6 +778,20 @@ static int mv3310_config_init(struct phy_device *phydev)
if (err)
return err;
+ /* If host provided host supported interface modes, try to select the
+ * best one
+ */
+ if (!phy_interface_empty(phydev->host_interfaces)) {
+ mactype = chip->select_mactype(phydev->host_interfaces);
+ if (mactype >= 0) {
+ phydev_info(phydev, "Changing MACTYPE to %i\n",
+ mactype);
+ err = chip->set_mactype(phydev, mactype);
+ if (err)
+ return err;
+ }
+ }
+
mactype = chip->get_mactype(phydev);
if (mactype < 0)
return mactype;
@@ -1049,6 +1154,8 @@ static const struct mv3310_chip mv3310_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3310_init_interface,
#ifdef CONFIG_HWMON
@@ -1060,6 +1167,8 @@ static const struct mv3310_chip mv3340_type = {
.has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
+ .set_mactype = mv3310_set_mactype,
+ .select_mactype = mv3310_select_mactype,
.init_interface = mv3340_init_interface,
#ifdef CONFIG_HWMON
@@ -1070,6 +1179,8 @@ static const struct mv3310_chip mv3340_type = {
static const struct mv3310_chip mv2110_type = {
.init_supported_interfaces = mv2110_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
@@ -1080,6 +1191,8 @@ static const struct mv3310_chip mv2110_type = {
static const struct mv3310_chip mv2111_type = {
.init_supported_interfaces = mv2111_init_supported_interfaces,
.get_mactype = mv2110_get_mactype,
+ .set_mactype = mv2110_set_mactype,
+ .select_mactype = mv2110_select_mactype,
.init_interface = mv2110_init_interface,
#ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8a2dbe849866..f82090bdf7ab 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -232,7 +232,7 @@ static ssize_t mdio_bus_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[sattr->addr],
sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
@@ -251,7 +251,7 @@ static ssize_t mdio_bus_device_stat_field_show(struct device *dev,
val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset);
- return sprintf(buf, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
#define MDIO_BUS_STATS_ATTR_DECL(field, file) \
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 73f7962a37d3..c49062ad72c6 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
irq_status == INTSRC_ENERGY_DETECT)
return IRQ_HANDLED;
- /* Give PHY some time before MAC starts sending data. This works
- * around an issue where network doesn't come up properly.
- */
- if (!(irq_status & INTSRC_LINK_DOWN))
- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
- else
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e78d0bf69bc3..3757e069c486 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -92,6 +92,15 @@
#define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0)
#define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0)
+#define KSZPHY_WIRE_PAIR_MASK 0x3
+
+#define LAN8814_CABLE_DIAG 0x12
+#define LAN8814_CABLE_DIAG_STAT_MASK GENMASK(9, 8)
+#define LAN8814_CABLE_DIAG_VCT_DATA_MASK GENMASK(7, 0)
+#define LAN8814_PAIR_BIT_SHIFT 12
+
+#define LAN8814_WIRE_PAIR_MASK 0xF
+
/* Lan8814 general Interrupt control/status reg in GPHY specific block. */
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
@@ -257,6 +266,8 @@ static struct kszphy_hw_stat kszphy_hw_stats[] = {
struct kszphy_type {
u32 led_mode_reg;
u16 interrupt_level_mask;
+ u16 cable_diag_reg;
+ unsigned long pair_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
@@ -313,6 +324,13 @@ struct kszphy_priv {
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
+static const struct kszphy_type ksz886x_type = {
+ .cable_diag_reg = KSZ8081_LMD,
+ .pair_mask = KSZPHY_WIRE_PAIR_MASK,
};
static const struct kszphy_type ksz8021_type = {
@@ -1650,7 +1668,7 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
int i;
for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
}
}
@@ -1796,6 +1814,17 @@ static int kszphy_probe(struct phy_device *phydev)
return 0;
}
+static int lan8814_cable_test_start(struct phy_device *phydev)
+{
+ /* If autoneg is enabled, we won't be able to test cross pair
+ * short. In this case, the PHY will "detect" a link and
+ * confuse the internal state machine - disable auto neg here.
+ * Set the speed to 1000mbit and full duplex.
+ */
+ return phy_modify(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100,
+ BMCR_SPEED1000 | BMCR_FULLDPLX);
+}
+
static int ksz886x_cable_test_start(struct phy_device *phydev)
{
if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA)
@@ -1809,9 +1838,9 @@ static int ksz886x_cable_test_start(struct phy_device *phydev)
return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
}
-static int ksz886x_cable_test_result_trans(u16 status)
+static int ksz886x_cable_test_result_trans(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_NORMAL:
return ETHTOOL_A_CABLE_RESULT_CODE_OK;
case KSZ8081_LMD_STAT_SHORT:
@@ -1825,15 +1854,15 @@ static int ksz886x_cable_test_result_trans(u16 status)
}
}
-static bool ksz886x_cable_test_failed(u16 status)
+static bool ksz886x_cable_test_failed(u16 status, u16 mask)
{
- return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) ==
+ return FIELD_GET(mask, status) ==
KSZ8081_LMD_STAT_FAIL;
}
-static bool ksz886x_cable_test_fault_length_valid(u16 status)
+static bool ksz886x_cable_test_fault_length_valid(u16 status, u16 mask)
{
- switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ switch (FIELD_GET(mask, status)) {
case KSZ8081_LMD_STAT_OPEN:
fallthrough;
case KSZ8081_LMD_STAT_SHORT:
@@ -1842,29 +1871,79 @@ static bool ksz886x_cable_test_fault_length_valid(u16 status)
return false;
}
-static int ksz886x_cable_test_fault_length(u16 status)
+static int ksz886x_cable_test_fault_length(struct phy_device *phydev, u16 status, u16 data_mask)
{
int dt;
/* According to the data sheet the distance to the fault is
- * DELTA_TIME * 0.4 meters.
+ * DELTA_TIME * 0.4 meters for ksz phys.
+ * (DELTA_TIME - 22) * 0.8 for lan8814 phy.
*/
- dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status);
+ dt = FIELD_GET(data_mask, status);
- return (dt * 400) / 10;
+ if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_LAN8814)
+ return ((dt - 22) * 800) / 10;
+ else
+ return (dt * 400) / 10;
}
static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
int val, ret;
- ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val,
+ ret = phy_read_poll_timeout(phydev, type->cable_diag_reg, val,
!(val & KSZ8081_LMD_ENABLE_TEST),
30000, 100000, true);
return ret < 0 ? ret : 0;
}
+static int lan8814_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = { ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ u32 fault_length;
+ int ret;
+ int val;
+
+ val = KSZ8081_LMD_ENABLE_TEST;
+ val = val | (pair << LAN8814_PAIR_BIT_SHIFT);
+
+ ret = phy_write(phydev, LAN8814_CABLE_DIAG, val);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz886x_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, LAN8814_CABLE_DIAG);
+ if (val < 0)
+ return val;
+
+ if (ksz886x_cable_test_failed(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_result_trans(val,
+ LAN8814_CABLE_DIAG_STAT_MASK
+ ));
+ if (ret)
+ return ret;
+
+ if (!ksz886x_cable_test_fault_length_valid(val, LAN8814_CABLE_DIAG_STAT_MASK))
+ return 0;
+
+ fault_length = ksz886x_cable_test_fault_length(phydev, val,
+ LAN8814_CABLE_DIAG_VCT_DATA_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
+}
+
static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
{
static const int ethtool_pair[] = {
@@ -1872,6 +1951,7 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
ETHTOOL_A_CABLE_PAIR_B,
};
int ret, val, mdix;
+ u32 fault_length;
/* There is no way to choice the pair, like we do one ksz9031.
* We can workaround this limitation by using the MDI-X functionality.
@@ -1910,25 +1990,27 @@ static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
if (val < 0)
return val;
- if (ksz886x_cable_test_failed(val))
+ if (ksz886x_cable_test_failed(val, KSZ8081_LMD_STAT_MASK))
return -EAGAIN;
ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
- ksz886x_cable_test_result_trans(val));
+ ksz886x_cable_test_result_trans(val, KSZ8081_LMD_STAT_MASK));
if (ret)
return ret;
- if (!ksz886x_cable_test_fault_length_valid(val))
+ if (!ksz886x_cable_test_fault_length_valid(val, KSZ8081_LMD_STAT_MASK))
return 0;
- return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- ksz886x_cable_test_fault_length(val));
+ fault_length = ksz886x_cable_test_fault_length(phydev, val, KSZ8081_LMD_DELTA_TIME_MASK);
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair], fault_length);
}
static int ksz886x_cable_test_get_status(struct phy_device *phydev,
bool *finished)
{
- unsigned long pair_mask = 0x3;
+ const struct kszphy_type *type = phydev->drv->driver_data;
+ unsigned long pair_mask = type->pair_mask;
int retries = 20;
int pair, ret;
@@ -1937,7 +2019,10 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
/* Try harder if link partner is active */
while (pair_mask && retries--) {
for_each_set_bit(pair, &pair_mask, 4) {
- ret = ksz886x_cable_test_one_pair(phydev, pair);
+ if (type->cable_diag_reg == LAN8814_CABLE_DIAG)
+ ret = lan8814_cable_test_one_pair(phydev, pair);
+ else
+ ret = ksz886x_cable_test_one_pair(phydev, pair);
if (ret == -EAGAIN)
continue;
if (ret < 0)
@@ -2676,19 +2761,82 @@ static int lan8804_config_init(struct phy_device *phydev)
return 0;
}
+static irqreturn_t lan8804_handle_interrupt(struct phy_device *phydev)
+{
+ int status;
+
+ status = phy_read(phydev, LAN8814_INTS);
+ if (status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (status > 0)
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+#define LAN8804_OUTPUT_CONTROL 25
+#define LAN8804_OUTPUT_CONTROL_INTR_BUFFER BIT(14)
+#define LAN8804_CONTROL 31
+#define LAN8804_CONTROL_INTR_POLARITY BIT(14)
+
+static int lan8804_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ /* This is an internal PHY of lan966x and is not possible to change the
+ * polarity on the GIC found in lan966x, therefore change the polarity
+ * of the interrupt in the PHY from being active low instead of active
+ * high.
+ */
+ phy_write(phydev, LAN8804_CONTROL, LAN8804_CONTROL_INTR_POLARITY);
+
+ /* By default interrupt buffer is open-drain in which case the interrupt
+ * can be active only low. Therefore change the interrupt buffer to be
+ * push-pull to be able to change interrupt polarity
+ */
+ phy_write(phydev, LAN8804_OUTPUT_CONTROL,
+ LAN8804_OUTPUT_CONTROL_INTR_BUFFER);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ if (err)
+ return err;
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
int irq_status, tsu_irq_status;
+ int ret = IRQ_NONE;
irq_status = phy_read(phydev, LAN8814_INTS);
- if (irq_status > 0 && (irq_status & LAN8814_INT_LINK))
- phy_trigger_machine(phydev);
-
if (irq_status < 0) {
phy_error(phydev);
return IRQ_NONE;
}
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
while (1) {
tsu_irq_status = lanphy_read_page_reg(phydev, 4,
LAN8814_INTR_STS_REG);
@@ -2697,12 +2845,15 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
(tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
LAN8814_INTR_STS_REG_1588_TSU1_ |
LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_)))
+ LAN8814_INTR_STS_REG_1588_TSU3_))) {
lan8814_handle_ptp_interrupt(phydev);
- else
+ ret = IRQ_HANDLED;
+ } else {
break;
+ }
}
- return IRQ_HANDLED;
+
+ return ret;
}
static int lan8814_ack_interrupt(struct phy_device *phydev)
@@ -2729,9 +2880,9 @@ static int lan8814_config_intr(struct phy_device *phydev)
if (err)
return err;
- err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
+ err = phy_write(phydev, LAN8814_INTC, LAN8814_INT_LINK);
} else {
- err = phy_write(phydev, LAN8814_INTC, 0);
+ err = phy_write(phydev, LAN8814_INTC, 0);
if (err)
return err;
@@ -2873,12 +3024,18 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
+/* It is expected that there will not be any 'lan8814_take_coma_mode'
+ * function called in suspend. Because the GPIO line can be shared, so if one of
+ * the phys goes back in coma mode, then all the other PHYs will go, which is
+ * wrong.
+ */
static int lan8814_release_coma_mode(struct phy_device *phydev)
{
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
+ GPIOD_OUT_HIGH_OPEN_DRAIN |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
@@ -3105,6 +3262,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip INDY Gigabit Quad PHY",
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
.driver_data = &lan8814_type,
.probe = lan8814_probe,
@@ -3117,6 +3275,8 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.config_intr = lan8814_config_intr,
.handle_interrupt = lan8814_handle_interrupt,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.phy_id = PHY_ID_LAN8804,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -3131,6 +3291,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = kszphy_resume,
+ .config_intr = lan8804_config_intr,
+ .handle_interrupt = lan8804_handle_interrupt,
}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -3163,6 +3325,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
+ .driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
.config_init = kszphy_config_init,
@@ -3185,6 +3348,8 @@ static struct phy_driver ksphy_driver[] = {
.name = "Microchip KSZ9477",
/* PHY_GBIT_FEATURES */
.config_init = kszphy_config_init,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index d4c93d59bc53..8569a545e0a3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -28,12 +28,16 @@
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
/* Interrupt Mask Register */
#define LAN87XX_INTERRUPT_MASK (0x19)
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
+#define LAN87XX_MASK_COMM_RDY BIT(10)
+
/* MISC Control 1 Register */
#define LAN87XX_CTRL_1 (0x11)
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
int rc, val = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ /* clear all interrupt */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (rc < 0)
+ return rc;
+
+ /* enable link down and comm ready interrupt */
+ val = LAN87XX_MASK_LINK_DOWN;
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ val = LAN87XX_MASK_COMM_RDY;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
} else {
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
- if (rc)
+ if (rc < 0)
return rc;
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
}
return rc < 0 ? rc : 0;
@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
if (irq_status < 0) {
phy_error(phydev);
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b7b2521c73fb..ee5b17edca39 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -706,14 +706,6 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
struct phy_device *phydev = ctx->phydev;
struct vsc8531_private *priv = phydev->priv;
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->rx_sa = ctx->sa.rx_sa;
@@ -730,24 +722,13 @@ static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
struct macsec_flow *flow, bool update)
{
- struct phy_device *phydev = ctx->phydev;
- struct vsc8531_private *priv = phydev->priv;
-
- if (!flow) {
- flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
- }
-
flow->assoc_num = ctx->sa.assoc_num;
flow->tx_sa = ctx->sa.tx_sa;
/* Always match untagged packets on egress */
flow->match.untagged = 1;
- return vsc8584_macsec_add_flow(phydev, flow, update);
+ return vsc8584_macsec_add_flow(ctx->phydev, flow, update);
}
static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
@@ -755,10 +736,6 @@ static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_enable(ctx->phydev, flow);
@@ -770,10 +747,6 @@ static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_flow_disable(ctx->phydev, flow);
@@ -785,12 +758,8 @@ static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_secy *secy = ctx->secy;
- if (ctx->prepare) {
- if (priv->secy)
- return -EEXIST;
-
- return 0;
- }
+ if (priv->secy)
+ return -EEXIST;
priv->secy = secy;
@@ -807,10 +776,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
vsc8584_macsec_del_flow(ctx->phydev, flow);
@@ -823,10 +788,6 @@ static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
{
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
vsc8584_macsec_del_secy(ctx);
return vsc8584_macsec_add_secy(ctx);
}
@@ -847,10 +808,6 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
struct vsc8531_private *priv = ctx->phydev->priv;
struct macsec_flow *flow, *tmp;
- /* No operation to perform before the commit step */
- if (ctx->prepare)
- return 0;
-
list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
if (flow->bank == MACSEC_INGR && flow->rx_sa &&
flow->rx_sa->sc->sci == ctx->rx_sc->sci)
@@ -862,33 +819,40 @@ static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_rxsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_rxsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -899,11 +863,8 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
@@ -911,33 +872,40 @@ static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
{
- struct macsec_flow *flow = NULL;
-
- if (ctx->prepare)
- return __vsc8584_macsec_add_txsa(ctx, flow, false);
+ struct phy_device *phydev = ctx->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct macsec_flow *flow;
+ int ret;
- flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
+ flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- vsc8584_macsec_flow_enable(ctx->phydev, flow);
+ memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
+
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, false);
+ if (ret)
+ return ret;
+
+ vsc8584_macsec_flow_enable(phydev, flow);
return 0;
}
static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
{
struct macsec_flow *flow;
+ int ret;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare) {
- /* Make sure the flow is disabled before updating it */
- vsc8584_macsec_flow_disable(ctx->phydev, flow);
+ /* Make sure the flow is disabled before updating it */
+ vsc8584_macsec_flow_disable(ctx->phydev, flow);
- return __vsc8584_macsec_add_txsa(ctx, flow, true);
- }
+ ret = __vsc8584_macsec_add_txsa(ctx, flow, true);
+ if (ret)
+ return ret;
vsc8584_macsec_flow_enable(ctx->phydev, flow);
return 0;
@@ -948,11 +916,8 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
struct macsec_flow *flow;
flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
-
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (ctx->prepare)
- return 0;
vsc8584_macsec_del_flow(ctx->phydev, flow);
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7e3017e7a1c0..8a13b1ad9a33 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -136,7 +136,7 @@ static void vsc85xx_get_strings(struct phy_device *phydev, u8 *data)
return;
for (i = 0; i < priv->nstats; i++)
- strlcpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
+ strscpy(data + i * ETH_GSTRING_LEN, priv->hw_stats[i].string,
ETH_GSTRING_LEN);
}
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index 2a8195c50d14..ec91e671f8aa 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -10,6 +10,7 @@
#include <linux/mdio.h>
#include <linux/mii.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/hwmon.h>
#include <linux/bitfield.h>
@@ -34,6 +35,11 @@
#define MII_CFG1 18
#define MII_CFG1_MASTER_SLAVE BIT(15)
#define MII_CFG1_AUTO_OP BIT(14)
+#define MII_CFG1_INTERFACE_MODE_MASK GENMASK(9, 8)
+#define MII_CFG1_MII_MODE (0x0 << 8)
+#define MII_CFG1_RMII_MODE_REFCLK_IN BIT(8)
+#define MII_CFG1_RMII_MODE_REFCLK_OUT BIT(9)
+#define MII_CFG1_REVMII_MODE GENMASK(9, 8)
#define MII_CFG1_SLEEP_CONFIRM BIT(6)
#define MII_CFG1_LED_MODE_MASK GENMASK(5, 4)
#define MII_CFG1_LED_MODE_LINKUP 0
@@ -72,11 +78,15 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+/* Configure REF_CLK as input in RMII mode */
+#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
+
struct tja11xx_priv {
char *hwmon_name;
struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
+ u32 flags;
};
struct tja11xx_phy_stats {
@@ -251,8 +261,34 @@ do_test:
return __genphy_config_aneg(phydev, changed);
}
+static int tja11xx_get_interface_mode(struct phy_device *phydev)
+{
+ struct tja11xx_priv *priv = phydev->priv;
+ int mii_mode;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ mii_mode = MII_CFG1_MII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ mii_mode = MII_CFG1_REVMII_MODE;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (priv->flags & TJA110X_RMII_MODE_REFCLK_IN)
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_IN;
+ else
+ mii_mode = MII_CFG1_RMII_MODE_REFCLK_OUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mii_mode;
+}
+
static int tja11xx_config_init(struct phy_device *phydev)
{
+ u16 reg_mask, reg_val;
int ret;
ret = tja11xx_enable_reg_write(phydev);
@@ -265,15 +301,32 @@ static int tja11xx_config_init(struct phy_device *phydev)
switch (phydev->phy_id & PHY_ID_MASK) {
case PHY_ID_TJA1100:
- ret = phy_modify(phydev, MII_CFG1,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
- MII_CFG1_LED_ENABLE,
- MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
- MII_CFG1_LED_ENABLE);
+ reg_mask = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_MASK |
+ MII_CFG1_LED_ENABLE;
+ reg_val = MII_CFG1_AUTO_OP | MII_CFG1_LED_MODE_LINKUP |
+ MII_CFG1_LED_ENABLE;
+
+ reg_mask |= MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val |= (ret & 0xffff);
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
if (ret)
return ret;
break;
case PHY_ID_TJA1101:
+ reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
+ ret = tja11xx_get_interface_mode(phydev);
+ if (ret < 0)
+ return ret;
+
+ reg_val = ret & 0xffff;
+ ret = phy_modify(phydev, MII_CFG1, reg_mask, reg_val);
+ if (ret)
+ return ret;
+ fallthrough;
case PHY_ID_TJA1102:
ret = phy_set_bits(phydev, MII_COMMCFG, MII_COMMCFG_AUTO_OP);
if (ret)
@@ -458,16 +511,36 @@ static int tja11xx_hwmon_register(struct phy_device *phydev,
return PTR_ERR_OR_ZERO(priv->hwmon_dev);
}
+static int tja11xx_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct tja11xx_priv *priv = phydev->priv;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "nxp,rmii-refclk-in"))
+ priv->flags |= TJA110X_RMII_MODE_REFCLK_IN;
+
+ return 0;
+}
+
static int tja11xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct tja11xx_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->phydev = phydev;
+ phydev->priv = priv;
+
+ ret = tja11xx_parse_dt(phydev);
+ if (ret)
+ return ret;
return tja11xx_hwmon_register(phydev, priv);
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 1f2531a1a876..2c8bf438ea61 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -74,6 +74,80 @@ const char *phy_duplex_to_str(unsigned int duplex)
}
EXPORT_SYMBOL_GPL(phy_duplex_to_str);
+/**
+ * phy_rate_matching_to_str - Return a string describing the rate matching
+ *
+ * @rate_matching: Type of rate matching to describe
+ */
+const char *phy_rate_matching_to_str(int rate_matching)
+{
+ switch (rate_matching) {
+ case RATE_MATCH_NONE:
+ return "none";
+ case RATE_MATCH_PAUSE:
+ return "pause";
+ case RATE_MATCH_CRS:
+ return "crs";
+ case RATE_MATCH_OPEN_LOOP:
+ return "open-loop";
+ }
+ return "Unsupported (update phy-core.c)";
+}
+EXPORT_SYMBOL_GPL(phy_rate_matching_to_str);
+
+/**
+ * phy_interface_num_ports - Return the number of links that can be carried by
+ * a given MAC-PHY physical link. Returns 0 if this is
+ * unknown, the number of links else.
+ *
+ * @interface: The interface mode we want to get the number of ports
+ */
+int phy_interface_num_ports(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XLGMII:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_5GBASER:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_25GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return 1;
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return 4;
+ case PHY_INTERFACE_MODE_MAX:
+ WARN_ONCE(1, "PHY_INTERFACE_MODE_MAX isn't a valid interface mode");
+ return 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(phy_interface_num_ports);
+
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed.
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8d3ee3a6495b..e741d8aebffe 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -115,6 +115,33 @@ void phy_print_status(struct phy_device *phydev)
EXPORT_SYMBOL(phy_print_status);
/**
+ * phy_get_rate_matching - determine if rate matching is supported
+ * @phydev: The phy device to return rate matching for
+ * @iface: The interface mode to use
+ *
+ * This determines the type of rate matching (if any) that @phy supports
+ * using @iface. @iface may be %PHY_INTERFACE_MODE_NA to determine if any
+ * interface supports rate matching.
+ *
+ * Return: The type of rate matching @phy supports for @iface, or
+ * %RATE_MATCH_NONE.
+ */
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int ret = RATE_MATCH_NONE;
+
+ if (phydev->drv->get_rate_matching) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_rate_matching(phydev, iface);
+ mutex_unlock(&phydev->lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_get_rate_matching);
+
+/**
* phy_config_interrupt - configure the PHY device for the requested interrupts
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
@@ -256,6 +283,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.duplex = phydev->duplex;
cmd->base.master_slave_cfg = phydev->master_slave_get;
cmd->base.master_slave_state = phydev->master_slave_state;
+ cmd->base.rate_matching = phydev->rate_matching;
if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
cmd->base.port = PORT_BNC;
else
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 12ff276b80ae..57849ac0384e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phy_led_triggers.h>
+#include <linux/pse-pd/pse.h>
#include <linux/property.h>
#include <linux/sfp.h>
#include <linux/skbuff.h>
@@ -316,11 +317,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
phydev->suspended_by_mdio_bus = 0;
- /* If we manged to get here with the PHY state machine in a state neither
- * PHY_HALTED nor PHY_READY this is an indication that something went wrong
- * and we should most likely be using MAC managed PM and we are not.
+ /* If we managed to get here with the PHY state machine in a state
+ * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication
+ * that something went wrong and we should most likely be using
+ * MAC managed PM, but we are not.
*/
- WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY);
+ WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY &&
+ phydev->state != PHY_UP);
ret = phy_init_hw(phydev);
if (ret < 0)
@@ -370,7 +373,7 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
if (!fixup)
return -ENOMEM;
- strlcpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
+ strscpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
fixup->phy_uid = phy_uid;
fixup->phy_uid_mask = phy_uid_mask;
fixup->run = run;
@@ -520,7 +523,7 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
+ return sysfs_emit(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
}
static DEVICE_ATTR_RO(phy_id);
@@ -535,7 +538,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
else
mode = phy_modes(phydev->interface);
- return sprintf(buf, "%s\n", mode);
+ return sysfs_emit(buf, "%s\n", mode);
}
static DEVICE_ATTR_RO(phy_interface);
@@ -545,7 +548,7 @@ phy_has_fixups_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", phydev->has_fixups);
+ return sysfs_emit(buf, "%d\n", phydev->has_fixups);
}
static DEVICE_ATTR_RO(phy_has_fixups);
@@ -555,7 +558,7 @@ static ssize_t phy_dev_flags_show(struct device *dev,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "0x%08x\n", phydev->dev_flags);
+ return sysfs_emit(buf, "0x%08x\n", phydev->dev_flags);
}
static DEVICE_ATTR_RO(phy_dev_flags);
@@ -989,6 +992,7 @@ EXPORT_SYMBOL(phy_device_register);
void phy_device_remove(struct phy_device *phydev)
{
unregister_mii_timestamper(phydev->mii_ts);
+ pse_control_put(phydev->psec);
device_del(&phydev->mdio.dev);
@@ -1310,7 +1314,7 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr,
{
struct phy_device *phydev = to_phy_device(dev);
- return sprintf(buf, "%d\n", !phydev->attached_dev);
+ return sysfs_emit(buf, "%d\n", !phydev->attached_dev);
}
static DEVICE_ATTR_RO(phy_standalone);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9bd69328dc4d..75464df191ef 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -77,6 +77,7 @@ struct phylink {
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
+ DECLARE_PHY_INTERFACE_MASK(sfp_interfaces);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
u8 sfp_port;
};
@@ -155,8 +156,84 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
-static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
- unsigned long caps)
+/**
+ * phylink_interface_max_speed() - get the maximum speed of a phy interface
+ * @interface: phy interface mode defined by &typedef phy_interface_t
+ *
+ * Determine the maximum speed of a phy interface. This is intended to help
+ * determine the correct speed to pass to the MAC when the phy is performing
+ * rate matching.
+ *
+ * Return: The maximum speed of @interface
+ */
+static int phylink_interface_max_speed(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ return SPEED_100;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ return SPEED_1000;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return SPEED_2500;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ return SPEED_5000;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return SPEED_10000;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ return SPEED_25000;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ return SPEED_40000;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ /* No idea! Garbage in, unknown out */
+ return SPEED_UNKNOWN;
+ }
+
+ /* If we get here, someone forgot to add an interface mode above */
+ WARN_ON_ONCE(1);
+ return SPEED_UNKNOWN;
+}
+
+/**
+ * phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
+ * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * @caps: bitmask of MAC capabilities
+ *
+ * Set all possible pause, speed and duplex linkmodes in @linkmodes that are
+ * supported by the @caps. @linkmodes must have been initialised previously.
+ */
+void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
{
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
@@ -295,21 +372,72 @@ static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
__set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
}
}
+EXPORT_SYMBOL_GPL(phylink_caps_to_linkmodes);
+
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF },
+};
/**
- * phylink_get_linkmodes() - get acceptable link modes
- * @linkmodes: ethtool linkmode mask (must be already initialised)
+ * phylink_cap_from_speed_duplex - Get mac capability from speed/duplex
+ * @speed: the speed to search for
+ * @duplex: the duplex to search for
+ *
+ * Find the mac capability for a given speed and duplex.
+ *
+ * Return: A mask with the mac capability patching @speed and @duplex, or 0 if
+ * there were no matches.
+ */
+static unsigned long phylink_cap_from_speed_duplex(int speed,
+ unsigned int duplex)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) {
+ if (speed == phylink_caps_params[i].speed &&
+ duplex == phylink_caps_params[i].duplex)
+ return phylink_caps_params[i].mask;
+ }
+
+ return 0;
+}
+
+/**
+ * phylink_get_capabilities() - get capabilities for a given MAC
* @interface: phy interface mode defined by &typedef phy_interface_t
* @mac_capabilities: bitmask of MAC capabilities
+ * @rate_matching: type of rate matching being performed
*
- * Set all possible pause, speed and duplex linkmodes in @linkmodes that
- * are supported by the @interface mode and @mac_capabilities. @linkmodes
- * must have been initialised previously.
+ * Get the MAC capabilities that are supported by the @interface mode and
+ * @mac_capabilities.
*/
-void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
- unsigned long mac_capabilities)
+unsigned long phylink_get_capabilities(phy_interface_t interface,
+ unsigned long mac_capabilities,
+ int rate_matching)
{
+ int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ unsigned long matched_caps = 0;
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
@@ -321,6 +449,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_GMII:
caps |= MAC_1000HD | MAC_1000FD;
@@ -344,6 +473,7 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
case PHY_INTERFACE_MODE_1000BASEX:
caps |= MAC_1000HD;
fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
case PHY_INTERFACE_MODE_TRGMII:
caps |= MAC_1000FD;
break;
@@ -381,9 +511,55 @@ void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface,
break;
}
- phylink_caps_to_linkmodes(linkmodes, caps & mac_capabilities);
+ switch (rate_matching) {
+ case RATE_MATCH_OPEN_LOOP:
+ /* TODO */
+ fallthrough;
+ case RATE_MATCH_NONE:
+ matched_caps = 0;
+ break;
+ case RATE_MATCH_PAUSE: {
+ /* The MAC must support asymmetric pause towards the local
+ * device for this. We could allow just symmetric pause, but
+ * then we might have to renegotiate if the link partner
+ * doesn't support pause. This is because there's no way to
+ * accept pause frames without transmitting them if we only
+ * support symmetric pause.
+ */
+ if (!(mac_capabilities & MAC_SYM_PAUSE) ||
+ !(mac_capabilities & MAC_ASYM_PAUSE))
+ break;
+
+ /* We can't adapt if the MAC doesn't support the interface's
+ * max speed at full duplex.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_FULL)) {
+ /* Although a duplex-matching phy might exist, we
+ * conservatively remove these modes because the MAC
+ * will not be aware of the half-duplex nature of the
+ * link.
+ */
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
+ }
+ break;
+ }
+ case RATE_MATCH_CRS:
+ /* The MAC must support half duplex at the interface's max
+ * speed.
+ */
+ if (mac_capabilities &
+ phylink_cap_from_speed_duplex(max_speed, DUPLEX_HALF)) {
+ matched_caps = GENMASK(__fls(caps), __fls(MAC_10HD));
+ matched_caps &= mac_capabilities;
+ }
+ break;
+ }
+
+ return (caps & mac_capabilities) | matched_caps;
}
-EXPORT_SYMBOL_GPL(phylink_get_linkmodes);
+EXPORT_SYMBOL_GPL(phylink_get_capabilities);
/**
* phylink_generic_validate() - generic validate() callback implementation
@@ -400,10 +576,14 @@ void phylink_generic_validate(struct phylink_config *config,
struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ unsigned long caps;
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
- phylink_get_linkmodes(mask, state->interface, config->mac_capabilities);
+ caps = phylink_get_capabilities(state->interface,
+ config->mac_capabilities,
+ state->rate_matching);
+ phylink_caps_to_linkmodes(mask, caps);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
@@ -458,8 +638,9 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl,
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
}
-static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
- struct phylink_link_state *state)
+static int phylink_validate_mask(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state,
+ const unsigned long *interfaces)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, };
@@ -468,7 +649,7 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
int intf;
for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
- if (test_bit(intf, pl->config->supported_interfaces)) {
+ if (test_bit(intf, interfaces)) {
linkmode_copy(s, supported);
t = *state;
@@ -489,12 +670,14 @@ static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
- if (!phy_interface_empty(pl->config->supported_interfaces)) {
+ const unsigned long *interfaces = pl->config->supported_interfaces;
+
+ if (!phy_interface_empty(interfaces)) {
if (state->interface == PHY_INTERFACE_MODE_NA)
- return phylink_validate_any(pl, supported, state);
+ return phylink_validate_mask(pl, supported, state,
+ interfaces);
- if (!test_bit(state->interface,
- pl->config->supported_interfaces))
+ if (!test_bit(state->interface, interfaces))
return -EINVAL;
}
@@ -632,6 +815,12 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RTBI:
phylink_set(pl->supported, 10baseT_Half);
phylink_set(pl->supported, 10baseT_Full);
phylink_set(pl->supported, 100baseT_Half);
@@ -774,11 +963,12 @@ static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
phylink_dbg(pl,
- "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
+ "%s: mode=%s/%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n",
__func__, phylink_an_mode_str(pl->cur_link_an_mode),
phy_modes(state->interface),
phy_speed_to_str(state->speed),
phy_duplex_to_str(state->duplex),
+ phy_rate_matching_to_str(state->rate_matching),
__ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising,
state->pause, state->link, state->an_enabled);
@@ -915,7 +1105,8 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- if (state->an_enabled) {
+ state->rate_matching = pl->link_config.rate_matching;
+ if (state->an_enabled) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
state->pause = MLO_PAUSE_NONE;
@@ -998,19 +1189,43 @@ static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
+ int speed, duplex;
+ bool rx_pause;
+
+ speed = link_state.speed;
+ duplex = link_state.duplex;
+ rx_pause = !!(link_state.pause & MLO_PAUSE_RX);
+
+ switch (link_state.rate_matching) {
+ case RATE_MATCH_PAUSE:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will send
+ * pause frames to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_FULL;
+ rx_pause = true;
+ break;
+
+ case RATE_MATCH_CRS:
+ /* The PHY is doing rate matchion from the media rate (in
+ * the link_state) to the interface speed, and will cause
+ * collisions to the MAC to limit its transmission speed.
+ */
+ speed = phylink_interface_max_speed(link_state.interface);
+ duplex = DUPLEX_HALF;
+ break;
+ }
pl->cur_interface = link_state.interface;
if (pl->pcs && pl->pcs->ops->pcs_link_up)
pl->pcs->ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
- pl->cur_interface,
- link_state.speed, link_state.duplex);
+ pl->cur_interface, speed, duplex);
- pl->mac_ops->mac_link_up(pl->config, pl->phydev,
- pl->cur_link_an_mode, pl->cur_interface,
- link_state.speed, link_state.duplex,
- !!(link_state.pause & MLO_PAUSE_TX),
- !!(link_state.pause & MLO_PAUSE_RX));
+ pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
+ pl->cur_interface, speed, duplex,
+ !!(link_state.pause & MLO_PAUSE_TX), rx_pause);
if (ndev)
netif_carrier_on(ndev);
@@ -1102,6 +1317,17 @@ static void phylink_resolve(struct work_struct *w)
}
link_state.interface = pl->phy_state.interface;
+ /* If we are doing rate matching, then the
+ * link speed/duplex comes from the PHY
+ */
+ if (pl->phy_state.rate_matching) {
+ link_state.rate_matching =
+ pl->phy_state.rate_matching;
+ link_state.speed = pl->phy_state.speed;
+ link_state.duplex =
+ pl->phy_state.duplex;
+ }
+
/* If we have a PHY, we need to update with
* the PHY flow control bits.
*/
@@ -1336,6 +1562,7 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
mutex_lock(&pl->state_mutex);
pl->phy_state.speed = phydev->speed;
pl->phy_state.duplex = phydev->duplex;
+ pl->phy_state.rate_matching = phydev->rate_matching;
pl->phy_state.pause = MLO_PAUSE_NONE;
if (tx_pause)
pl->phy_state.pause |= MLO_PAUSE_TX;
@@ -1347,10 +1574,11 @@ static void phylink_phy_change(struct phy_device *phydev, bool up)
phylink_run_resolve(pl);
- phylink_dbg(pl, "phy link %s %s/%s/%s/%s\n", up ? "up" : "down",
+ phylink_dbg(pl, "phy link %s %s/%s/%s/%s/%s\n", up ? "up" : "down",
phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
+ phy_rate_matching_to_str(phydev->rate_matching),
phylink_pause_to_str(pl->phy_state.pause));
}
@@ -1387,6 +1615,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
config.interface = PHY_INTERFACE_MODE_NA;
else
config.interface = interface;
+ config.rate_matching = phy_get_rate_matching(phy, config.interface);
ret = phylink_validate(pl, supported, &config);
if (ret) {
@@ -1414,6 +1643,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
pl->phy_state.pause = MLO_PAUSE_NONE;
pl->phy_state.speed = SPEED_UNKNOWN;
pl->phy_state.duplex = DUPLEX_UNKNOWN;
+ pl->phy_state.rate_matching = RATE_MATCH_NONE;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
@@ -1439,7 +1669,7 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
{
if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(interface))))
+ phy_interface_mode_is_8023z(interface) && !pl->sfp_bus)))
return -EINVAL;
if (pl->phydev)
@@ -1856,8 +2086,10 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
{
phylink_merge_link_mode(kset->link_modes.advertising, state->advertising);
linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising);
- kset->base.speed = state->speed;
- kset->base.duplex = state->duplex;
+ if (kset->base.rate_matching == RATE_MATCH_NONE) {
+ kset->base.speed = state->speed;
+ kset->base.duplex = state->duplex;
+ }
kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE :
AUTONEG_DISABLE;
}
@@ -2571,21 +2803,85 @@ static void phylink_sfp_detach(void *upstream, struct sfp_bus *bus)
pl->netdev->sfp_bus = NULL;
}
-static int phylink_sfp_config(struct phylink *pl, u8 mode,
- const unsigned long *supported,
- const unsigned long *advertising)
+static const phy_interface_t phylink_sfp_interface_preference[] = {
+ PHY_INTERFACE_MODE_25GBASER,
+ PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_5GBASER,
+ PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_1000BASEX,
+ PHY_INTERFACE_MODE_100BASEX,
+};
+
+static DECLARE_PHY_INTERFACE_MASK(phylink_sfp_interfaces);
+
+static phy_interface_t phylink_choose_sfp_interface(struct phylink *pl,
+ const unsigned long *intf)
+{
+ phy_interface_t interface;
+ size_t i;
+
+ interface = PHY_INTERFACE_MODE_NA;
+ for (i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); i++)
+ if (test_bit(phylink_sfp_interface_preference[i], intf)) {
+ interface = phylink_sfp_interface_preference[i];
+ break;
+ }
+
+ return interface;
+}
+
+static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ bool changed = false;
+
+ phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
+ phylink_an_mode_str(mode), phy_modes(state->interface),
+ __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
+
+ if (!linkmode_equal(pl->supported, supported)) {
+ linkmode_copy(pl->supported, supported);
+ changed = true;
+ }
+
+ if (!linkmode_equal(pl->link_config.advertising, state->advertising)) {
+ linkmode_copy(pl->link_config.advertising, state->advertising);
+ changed = true;
+ }
+
+ if (pl->cur_link_an_mode != mode ||
+ pl->link_config.interface != state->interface) {
+ pl->cur_link_an_mode = mode;
+ pl->link_config.interface = state->interface;
+
+ changed = true;
+
+ phylink_info(pl, "switched to %s/%s link mode\n",
+ phylink_an_mode_str(mode),
+ phy_modes(state->interface));
+ }
+
+ if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
+ &pl->phylink_disable_state))
+ phylink_mac_initial_config(pl, false);
+}
+
+static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
+ struct phy_device *phy)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
phy_interface_t iface;
- bool changed;
int ret;
- linkmode_copy(support, supported);
+ linkmode_copy(support, phy->supported);
memset(&config, 0, sizeof(config));
- linkmode_copy(config.advertising, advertising);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
@@ -2622,60 +2918,100 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode,
return ret;
}
- phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
- phylink_an_mode_str(mode), phy_modes(config.interface),
- __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ pl->link_port = pl->sfp_port;
+
+ phylink_sfp_set_config(pl, mode, support, &config);
+
+ return 0;
+}
+
+static int phylink_sfp_config_optical(struct phylink *pl)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phylink_link_state config;
+ phy_interface_t interface;
+ int ret;
+
+ phylink_dbg(pl, "optical SFP: interfaces=[mac=%*pbl, sfp=%*pbl]\n",
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->config->supported_interfaces,
+ (int)PHY_INTERFACE_MODE_MAX,
+ pl->sfp_interfaces);
- if (phy_interface_mode_is_8023z(iface) && pl->phydev)
+ /* Find the union of the supported interfaces by the PCS/MAC and
+ * the SFP module.
+ */
+ phy_interface_and(interfaces, pl->config->supported_interfaces,
+ pl->sfp_interfaces);
+ if (phy_interface_empty(interfaces)) {
+ phylink_err(pl, "unsupported SFP module: no common interface modes\n");
return -EINVAL;
+ }
- changed = !linkmode_equal(pl->supported, support) ||
- !linkmode_equal(pl->link_config.advertising,
- config.advertising);
- if (changed) {
- linkmode_copy(pl->supported, support);
- linkmode_copy(pl->link_config.advertising, config.advertising);
+ memset(&config, 0, sizeof(config));
+ linkmode_copy(support, pl->sfp_support);
+ linkmode_copy(config.advertising, pl->sfp_support);
+ config.speed = SPEED_UNKNOWN;
+ config.duplex = DUPLEX_UNKNOWN;
+ config.pause = MLO_PAUSE_AN;
+ config.an_enabled = true;
+
+ /* For all the interfaces that are supported, reduce the sfp_support
+ * mask to only those link modes that can be supported.
+ */
+ ret = phylink_validate_mask(pl, pl->sfp_support, &config, interfaces);
+ if (ret) {
+ phylink_err(pl, "unsupported SFP module: validation with support %*pb failed\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support);
+ return ret;
}
- if (pl->cur_link_an_mode != mode ||
- pl->link_config.interface != config.interface) {
- pl->link_config.interface = config.interface;
- pl->cur_link_an_mode = mode;
+ interface = phylink_choose_sfp_interface(pl, interfaces);
+ if (interface == PHY_INTERFACE_MODE_NA) {
+ phylink_err(pl, "failed to select SFP interface\n");
+ return -EINVAL;
+ }
- changed = true;
+ phylink_dbg(pl, "optical SFP: chosen %s interface\n",
+ phy_modes(interface));
- phylink_info(pl, "switched to %s/%s link mode\n",
- phylink_an_mode_str(mode),
- phy_modes(config.interface));
+ config.interface = interface;
+
+ /* Ignore errors if we're expecting a PHY to attach later */
+ ret = phylink_validate(pl, support, &config);
+ if (ret) {
+ phylink_err(pl, "validation with support %*pb failed: %pe\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support,
+ ERR_PTR(ret));
+ return ret;
}
pl->link_port = pl->sfp_port;
- if (changed && !test_bit(PHYLINK_DISABLE_STOPPED,
- &pl->phylink_disable_state))
- phylink_mac_initial_config(pl, false);
+ phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
- return ret;
+ return 0;
}
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
struct phylink *pl = upstream;
- unsigned long *support = pl->sfp_support;
ASSERT_RTNL();
- linkmode_zero(support);
- sfp_parse_support(pl->sfp_bus, id, support);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, support);
+ linkmode_zero(pl->sfp_support);
+ phy_interface_zero(pl->sfp_interfaces);
+ sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
+ pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
/* If this module may have a PHY connecting later, defer until later */
pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND, support, support);
+ return phylink_sfp_config_optical(pl);
}
static int phylink_sfp_module_start(void *upstream)
@@ -2694,8 +3030,7 @@ static int phylink_sfp_module_start(void *upstream)
if (!pl->sfp_may_have_phy)
return 0;
- return phylink_sfp_config(pl, MLO_AN_INBAND,
- pl->sfp_support, pl->sfp_support);
+ return phylink_sfp_config_optical(pl);
}
static void phylink_sfp_module_stop(void *upstream)
@@ -2755,8 +3090,12 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
else
mode = MLO_AN_INBAND;
+ /* Set the PHY's host supported interfaces */
+ phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
+ pl->config->supported_interfaces);
+
/* Do the initial configuration */
- ret = phylink_sfp_config(pl, mode, phy->supported, phy->advertising);
+ ret = phylink_sfp_config_phy(pl, mode, phy);
if (ret < 0)
return ret;
@@ -2929,6 +3268,7 @@ void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
@@ -3107,4 +3447,15 @@ void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
}
EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
+static int __init phylink_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
+ __set_bit(phylink_sfp_interface_preference[i],
+ phylink_sfp_interfaces);
+
+ return 0;
+}
+
+module_init(phylink_init);
+
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a5671ab896b3..3d99fd6664d7 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -70,6 +70,7 @@
#define RTLGEN_SPEED_MASK 0x0630
#define RTL_GENERIC_PHYID 0x001cc800
+#define RTL_8211FVD_PHYID 0x001cc878
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -78,6 +79,7 @@ MODULE_LICENSE("GPL");
struct rtl821x_priv {
u16 phycr1;
u16 phycr2;
+ bool has_phycr2;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -94,6 +96,7 @@ static int rtl821x_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct rtl821x_priv *priv;
+ u32 phy_id = phydev->drv->phy_id;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -108,13 +111,16 @@ static int rtl821x_probe(struct phy_device *phydev)
if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
- ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
- if (ret < 0)
- return ret;
+ priv->has_phycr2 = !(phy_id == RTL_8211FVD_PHYID);
+ if (priv->has_phycr2) {
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ if (ret < 0)
+ return ret;
- priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
- if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
- priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
+ if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
+ priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+ }
phydev->priv = priv;
@@ -400,12 +406,14 @@ static int rtl8211f_config_init(struct phy_device *phydev)
val_rxdly ? "enabled" : "disabled");
}
- ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
- RTL8211F_CLKOUT_EN, priv->phycr2);
- if (ret < 0) {
- dev_err(dev, "clkout configuration failed: %pe\n",
- ERR_PTR(ret));
- return ret;
+ if (priv->has_phycr2) {
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_CLKOUT_EN, priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
return genphy_soft_reset(phydev);
@@ -924,6 +932,18 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
+ .name = "RTL8211F-VD Gigabit Ethernet",
+ .probe = rtl821x_probe,
+ .config_init = &rtl8211f_config_init,
+ .read_status = rtlgen_read_status,
+ .config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
+ .suspend = genphy_suspend,
+ .resume = rtl821x_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
.name = "Generic FE-GE Realtek PHY",
.match_phy_device = rtlgen_match_phy_device,
.read_status = rtlgen_read_status,
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 15aa5ac1ff49..29e3fa86bac3 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -10,12 +10,6 @@
#include "sfp.h"
-struct sfp_quirk {
- const char *vendor;
- const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes);
-};
-
/**
* struct sfp_bus - internal representation of a sfp bus
*/
@@ -38,93 +32,6 @@ struct sfp_bus {
bool started;
};
-static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- phylink_set(modes, 2500baseX_Full);
-}
-
-static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes)
-{
- /* Ubiquiti U-Fiber Instant module claims that support all transceiver
- * types including 10G Ethernet which is not truth. So clear all claimed
- * modes and set only one mode which module supports: 1000baseX_Full.
- */
- phylink_zero(modes);
- phylink_set(modes, 1000baseX_Full);
-}
-
-static const struct sfp_quirk sfp_quirks[] = {
- {
- // Alcatel Lucent G-010S-P can operate at 2500base-X, but
- // incorrectly report 2500MBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "G010SP",
- .modes = sfp_quirk_2500basex,
- }, {
- // Alcatel Lucent G-010S-A can operate at 2500base-X, but
- // report 3.2GBd NRZ in their EEPROM
- .vendor = "ALCATELLUCENT",
- .part = "3FE46541AA",
- .modes = sfp_quirk_2500basex,
- }, {
- // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd
- // NRZ in their EEPROM
- .vendor = "HUAWEI",
- .part = "MA5671A",
- .modes = sfp_quirk_2500basex,
- }, {
- // Lantech 8330-262D-E can operate at 2500base-X, but
- // incorrectly report 2500MBd NRZ in their EEPROM
- .vendor = "Lantech",
- .part = "8330-262D-E",
- .modes = sfp_quirk_2500basex,
- }, {
- .vendor = "UBNT",
- .part = "UF-INSTANT",
- .modes = sfp_quirk_ubnt_uf_instant,
- },
-};
-
-static size_t sfp_strlen(const char *str, size_t maxlen)
-{
- size_t size, i;
-
- /* Trailing characters should be filled with space chars */
- for (i = 0, size = 0; i < maxlen; i++)
- if (str[i] != ' ')
- size = i + 1;
-
- return size;
-}
-
-static bool sfp_match(const char *qs, const char *str, size_t len)
-{
- if (!qs)
- return true;
- if (strlen(qs) != len)
- return false;
- return !strncmp(qs, str, len);
-}
-
-static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
-{
- const struct sfp_quirk *q;
- unsigned int i;
- size_t vs, ps;
-
- vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
- ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
-
- for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
- if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
- sfp_match(q->part, id->base.vendor_pn, ps))
- return q;
-
- return NULL;
-}
-
/**
* sfp_parse_port() - Parse the EEPROM base ID, setting the port type
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
@@ -232,12 +139,14 @@ EXPORT_SYMBOL_GPL(sfp_may_have_phy);
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
* @support: pointer to an array of unsigned long for the ethtool support mask
+ * @interfaces: pointer to an array of unsigned long for phy interface modes
+ * mask
*
* Parse the EEPROM identification information and derive the supported
* ethtool link modes for the module.
*/
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+ unsigned long *support, unsigned long *interfaces)
{
unsigned int br_min, br_nom, br_max;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
@@ -264,54 +173,81 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
/* Set ethtool support from the compliance fields. */
- if (id->base.e10g_base_sr)
+ if (id->base.e10g_base_sr) {
phylink_set(modes, 10000baseSR_Full);
- if (id->base.e10g_base_lr)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lr) {
phylink_set(modes, 10000baseLR_Full);
- if (id->base.e10g_base_lrm)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_lrm) {
phylink_set(modes, 10000baseLRM_Full);
- if (id->base.e10g_base_er)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (id->base.e10g_base_er) {
phylink_set(modes, 10000baseER_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
if (id->base.e1000_base_sx ||
id->base.e1000_base_lx ||
- id->base.e1000_base_cx)
+ id->base.e1000_base_cx) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
if (id->base.e1000_base_t) {
phylink_set(modes, 1000baseT_Half);
phylink_set(modes, 1000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, interfaces);
}
/* 1000Base-PX or 1000Base-BX10 */
if ((id->base.e_base_px || id->base.e_base_bx10) &&
- br_min <= 1300 && br_max >= 1200)
+ br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
/* 100Base-FX, 100Base-LX, 100Base-PX, 100Base-BX10 */
- if (id->base.e100_base_fx || id->base.e100_base_lx)
+ if (id->base.e100_base_fx || id->base.e100_base_lx) {
phylink_set(modes, 100baseFX_Full);
- if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100)
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
+ if ((id->base.e_base_px || id->base.e_base_bx10) && br_nom == 100) {
phylink_set(modes, 100baseFX_Full);
+ __set_bit(PHY_INTERFACE_MODE_100BASEX, interfaces);
+ }
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
*/
if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
/* This may look odd, but some manufacturers use 12000MBd */
- if (br_min <= 12000 && br_max >= 10300)
+ if (br_min <= 12000 && br_max >= 10300) {
phylink_set(modes, 10000baseCR_Full);
- if (br_min <= 3200 && br_max >= 3100)
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 3100) {
phylink_set(modes, 2500baseX_Full);
- if (br_min <= 1300 && br_max >= 1200)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
if (id->base.sfp_ct_passive) {
- if (id->base.passive.sff8431_app_e)
+ if (id->base.passive.sff8431_app_e) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
+ }
}
if (id->base.sfp_ct_active) {
if (id->base.active.sff8431_app_e ||
id->base.active.sff8431_lim) {
phylink_set(modes, 10000baseCR_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
}
}
@@ -336,12 +272,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case SFF8024_ECC_10GBASE_T_SFI:
case SFF8024_ECC_10GBASE_T_SR:
phylink_set(modes, 10000baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, interfaces);
break;
case SFF8024_ECC_5GBASE_T:
phylink_set(modes, 5000baseT_Full);
break;
case SFF8024_ECC_2_5GBASE_T:
phylink_set(modes, 2500baseT_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
break;
default:
dev_warn(bus->sfp_dev,
@@ -354,10 +292,14 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (id->base.fc_speed_100 ||
id->base.fc_speed_200 ||
id->base.fc_speed_400) {
- if (id->base.br_nominal >= 31)
+ if (id->base.br_nominal >= 31) {
phylink_set(modes, 2500baseX_Full);
- if (id->base.br_nominal >= 12)
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
+ if (id->base.br_nominal >= 12) {
phylink_set(modes, 1000baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
}
/* If we haven't discovered any modes that this module supports, try
@@ -370,14 +312,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
* 2500BASE-X, so we allow some slack here.
*/
if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS) && br_nom) {
- if (br_min <= 1300 && br_max >= 1200)
+ if (br_min <= 1300 && br_max >= 1200) {
phylink_set(modes, 1000baseX_Full);
- if (br_min <= 3200 && br_max >= 2500)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+ }
+ if (br_min <= 3200 && br_max >= 2500) {
phylink_set(modes, 2500baseX_Full);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ }
}
- if (bus->sfp_quirk)
- bus->sfp_quirk->modes(id, modes);
+ if (bus->sfp_quirk && bus->sfp_quirk->modes)
+ bus->sfp_quirk->modes(id, modes, interfaces);
linkmode_or(support, support, modes);
@@ -786,12 +732,13 @@ void sfp_link_down(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_link_down);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
{
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = sfp_lookup_quirk(id);
+ bus->sfp_quirk = quirk;
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 63f90fe9a4d2..40c9a64c5e30 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -166,6 +166,7 @@ static const enum gpiod_flags gpio_flags[] = {
* on board (for a copper SFP) time to initialise.
*/
#define T_WAIT msecs_to_jiffies(50)
+#define T_WAIT_ROLLBALL msecs_to_jiffies(25000)
#define T_START_UP msecs_to_jiffies(300)
#define T_START_UP_BAD_GPON msecs_to_jiffies(60000)
@@ -205,8 +206,11 @@ static const enum gpiod_flags gpio_flags[] = {
/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
+ * RollBall SFPs access phy via SFP Enhanced Digital Diagnostic Interface
+ * via address 0x51 (mdio-i2c will use RollBall protocol on this address).
*/
-#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR 22
+#define SFP_PHY_ADDR_ROLLBALL 17
struct sff_data {
unsigned int gpios;
@@ -218,6 +222,7 @@ struct sfp {
struct i2c_adapter *i2c;
struct mii_bus *i2c_mii;
struct sfp_bus *sfp_bus;
+ enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
size_t i2c_block_size;
@@ -234,6 +239,7 @@ struct sfp {
bool need_poll;
struct mutex st_mutex; /* Protects state */
+ unsigned int state_hw_mask;
unsigned int state_soft_mask;
unsigned int state;
struct delayed_work poll;
@@ -250,8 +256,11 @@ struct sfp {
struct sfp_eeprom_id id;
unsigned int module_power_mW;
unsigned int module_t_start_up;
+ unsigned int module_t_wait;
bool tx_fault_ignore;
+ const struct sfp_quirk *quirk;
+
#if IS_ENABLED(CONFIG_HWMON)
struct sfp_diag diag;
struct delayed_work hwmon_probe;
@@ -308,6 +317,136 @@ static const struct of_device_id sfp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sfp_of_match);
+static void sfp_fixup_long_startup(struct sfp *sfp)
+{
+ sfp->module_t_start_up = T_START_UP_BAD_GPON;
+}
+
+static void sfp_fixup_ignore_tx_fault(struct sfp *sfp)
+{
+ sfp->tx_fault_ignore = true;
+}
+
+static void sfp_fixup_halny_gsfp(struct sfp *sfp)
+{
+ /* Ignore the TX_FAULT and LOS signals on this module.
+ * these are possibly used for other purposes on this
+ * module, e.g. a serial port.
+ */
+ sfp->state_hw_mask &= ~(SFP_F_TX_FAULT | SFP_F_LOS);
+}
+
+static void sfp_fixup_rollball(struct sfp *sfp)
+{
+ sfp->mdio_protocol = MDIO_I2C_ROLLBALL;
+ sfp->module_t_wait = T_WAIT_ROLLBALL;
+}
+
+static void sfp_fixup_rollball_cc(struct sfp *sfp)
+{
+ sfp_fixup_rollball(sfp);
+
+ /* Some RollBall SFPs may have wrong (zero) extended compliance code
+ * burned in EEPROM. For PHY probing we need the correct one.
+ */
+ sfp->id.base.extended_cc = SFF8024_ECC_10GBASE_T_SFI;
+}
+
+static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+}
+
+static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
+ unsigned long *modes,
+ unsigned long *interfaces)
+{
+ /* Ubiquiti U-Fiber Instant module claims that support all transceiver
+ * types including 10G Ethernet which is not truth. So clear all claimed
+ * modes and set only one mode which module supports: 1000baseX_Full.
+ */
+ linkmode_zero(modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+}
+
+#define SFP_QUIRK(_v, _p, _m, _f) \
+ { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
+#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
+
+static const struct sfp_quirk sfp_quirks[] = {
+ // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly
+ // report 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex),
+
+ // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd
+ // NRZ in their EEPROM
+ SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
+ sfp_fixup_long_startup),
+
+ SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+
+ // Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
+ // their EEPROM
+ SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+ sfp_fixup_ignore_tx_fault),
+
+ // Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
+ // 2500MBd NRZ in their EEPROM
+ SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+
+ SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+
+ SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball),
+ SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball),
+};
+
+static size_t sfp_strlen(const char *str, size_t maxlen)
+{
+ size_t size, i;
+
+ /* Trailing characters should be filled with space chars, but
+ * some manufacturers can't read SFF-8472 and use NUL.
+ */
+ for (i = 0, size = 0; i < maxlen; i++)
+ if (str[i] != ' ' && str[i] != '\0')
+ size = i + 1;
+
+ return size;
+}
+
+static bool sfp_match(const char *qs, const char *str, size_t len)
+{
+ if (!qs)
+ return true;
+ if (strlen(qs) != len)
+ return false;
+ return !strncmp(qs, str, len);
+}
+
+static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
+{
+ const struct sfp_quirk *q;
+ unsigned int i;
+ size_t vs, ps;
+
+ vs = sfp_strlen(id->base.vendor_name, ARRAY_SIZE(id->base.vendor_name));
+ ps = sfp_strlen(id->base.vendor_pn, ARRAY_SIZE(id->base.vendor_pn));
+
+ for (i = 0, q = sfp_quirks; i < ARRAY_SIZE(sfp_quirks); i++, q++)
+ if (sfp_match(q->vendor, id->base.vendor_name, vs) &&
+ sfp_match(q->part, id->base.vendor_pn, ps))
+ return q;
+
+ return NULL;
+}
+
static unsigned long poll_jiffies;
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -419,9 +558,6 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
{
- struct mii_bus *i2c_mii;
- int ret;
-
if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
return -EINVAL;
@@ -429,7 +565,15 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
sfp->read = sfp_i2c_read;
sfp->write = sfp_i2c_write;
- i2c_mii = mdio_i2c_alloc(sfp->dev, i2c);
+ return 0;
+}
+
+static int sfp_i2c_mdiobus_create(struct sfp *sfp)
+{
+ struct mii_bus *i2c_mii;
+ int ret;
+
+ i2c_mii = mdio_i2c_alloc(sfp->dev, sfp->i2c, sfp->mdio_protocol);
if (IS_ERR(i2c_mii))
return PTR_ERR(i2c_mii);
@@ -447,6 +591,12 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
return 0;
}
+static void sfp_i2c_mdiobus_destroy(struct sfp *sfp)
+{
+ mdiobus_unregister(sfp->i2c_mii);
+ sfp->i2c_mii = NULL;
+}
+
/* Interface */
static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
{
@@ -499,17 +649,18 @@ static void sfp_soft_set_state(struct sfp *sfp, unsigned int state)
static void sfp_soft_start_poll(struct sfp *sfp)
{
const struct sfp_eeprom_id *id = &sfp->id;
+ unsigned int mask = 0;
sfp->state_soft_mask = 0;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE &&
- !sfp->gpio[GPIO_TX_DISABLE])
- sfp->state_soft_mask |= SFP_F_TX_DISABLE;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT &&
- !sfp->gpio[GPIO_TX_FAULT])
- sfp->state_soft_mask |= SFP_F_TX_FAULT;
- if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS &&
- !sfp->gpio[GPIO_LOS])
- sfp->state_soft_mask |= SFP_F_LOS;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_DISABLE)
+ mask |= SFP_F_TX_DISABLE;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_TX_FAULT)
+ mask |= SFP_F_TX_FAULT;
+ if (id->ext.enhopts & SFP_ENHOPTS_SOFT_RX_LOS)
+ mask |= SFP_F_LOS;
+
+ // Poll the soft state for hardware pins we want to ignore
+ sfp->state_soft_mask = ~sfp->state_hw_mask & mask;
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
@@ -523,10 +674,11 @@ static void sfp_soft_stop_poll(struct sfp *sfp)
static unsigned int sfp_get_state(struct sfp *sfp)
{
- unsigned int state = sfp->get_state(sfp);
+ unsigned int soft = sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT);
+ unsigned int state;
- if (state & SFP_F_PRESENT &&
- sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT))
+ state = sfp->get_state(sfp) & sfp->state_hw_mask;
+ if (state & SFP_F_PRESENT && soft)
state |= sfp_soft_get_state(sfp);
return state;
@@ -1195,90 +1347,45 @@ static const struct hwmon_ops sfp_hwmon_ops = {
.read_string = sfp_hwmon_read_string,
};
-static u32 sfp_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_chip = {
- .type = hwmon_chip,
- .config = sfp_hwmon_chip_config,
-};
-
-static u32 sfp_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
- HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_temp_channel_info = {
- .type = hwmon_temp,
- .config = sfp_hwmon_temp_config,
-};
-
-static u32 sfp_hwmon_vcc_config[] = {
- HWMON_I_INPUT |
- HWMON_I_MAX | HWMON_I_MIN |
- HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
- HWMON_I_CRIT | HWMON_I_LCRIT |
- HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
- HWMON_I_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_vcc_channel_info = {
- .type = hwmon_in,
- .config = sfp_hwmon_vcc_config,
-};
-
-static u32 sfp_hwmon_bias_config[] = {
- HWMON_C_INPUT |
- HWMON_C_MAX | HWMON_C_MIN |
- HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
- HWMON_C_CRIT | HWMON_C_LCRIT |
- HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
- HWMON_C_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_bias_channel_info = {
- .type = hwmon_curr,
- .config = sfp_hwmon_bias_config,
-};
-
-static u32 sfp_hwmon_power_config[] = {
- /* Transmit power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- /* Receive power */
- HWMON_P_INPUT |
- HWMON_P_MAX | HWMON_P_MIN |
- HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
- HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
- HWMON_P_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info sfp_hwmon_power_channel_info = {
- .type = hwmon_power,
- .config = sfp_hwmon_power_config,
-};
-
static const struct hwmon_channel_info *sfp_hwmon_info[] = {
- &sfp_hwmon_chip,
- &sfp_hwmon_vcc_channel_info,
- &sfp_hwmon_temp_channel_info,
- &sfp_hwmon_bias_channel_info,
- &sfp_hwmon_power_channel_info,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT |
+ HWMON_I_MAX | HWMON_I_MIN |
+ HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
+ HWMON_I_CRIT | HWMON_I_LCRIT |
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
+ HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT |
+ HWMON_C_MAX | HWMON_C_MIN |
+ HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
+ HWMON_C_CRIT | HWMON_C_LCRIT |
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ /* Transmit power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
+ /* Receive power */
+ HWMON_P_INPUT |
+ HWMON_P_MAX | HWMON_P_MIN |
+ HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
+ HWMON_P_CRIT | HWMON_P_LCRIT |
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL),
NULL,
};
@@ -1505,12 +1612,12 @@ static void sfp_sm_phy_detach(struct sfp *sfp)
sfp->mod_phy = NULL;
}
-static int sfp_sm_probe_phy(struct sfp *sfp, bool is_c45)
+static int sfp_sm_probe_phy(struct sfp *sfp, int addr, bool is_c45)
{
struct phy_device *phy;
int err;
- phy = get_phy_device(sfp->i2c_mii, SFP_PHY_ADDR, is_c45);
+ phy = get_phy_device(sfp->i2c_mii, addr, is_c45);
if (phy == ERR_PTR(-ENODEV))
return PTR_ERR(phy);
if (IS_ERR(phy)) {
@@ -1606,6 +1713,14 @@ static void sfp_sm_fault(struct sfp *sfp, unsigned int next_state, bool warn)
}
}
+static int sfp_sm_add_mdio_bus(struct sfp *sfp)
+{
+ if (sfp->mdio_protocol != MDIO_I2C_NONE)
+ return sfp_i2c_mdiobus_create(sfp);
+
+ return 0;
+}
+
/* Probe a SFP for a PHY device if the module supports copper - the PHY
* normally sits at I2C bus address 0x56, and may either be a clause 22
* or clause 45 PHY.
@@ -1621,19 +1736,23 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp)
{
int err = 0;
- switch (sfp->id.base.extended_cc) {
- case SFF8024_ECC_10GBASE_T_SFI:
- case SFF8024_ECC_10GBASE_T_SR:
- case SFF8024_ECC_5GBASE_T:
- case SFF8024_ECC_2_5GBASE_T:
- err = sfp_sm_probe_phy(sfp, true);
+ switch (sfp->mdio_protocol) {
+ case MDIO_I2C_NONE:
break;
- default:
- if (sfp->id.base.e1000_base_t)
- err = sfp_sm_probe_phy(sfp, false);
+ case MDIO_I2C_MARVELL_C22:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, false);
+ break;
+
+ case MDIO_I2C_C45:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR, true);
+ break;
+
+ case MDIO_I2C_ROLLBALL:
+ err = sfp_sm_probe_phy(sfp, SFP_PHY_ADDR_ROLLBALL, true);
break;
}
+
return err;
}
@@ -1947,17 +2066,33 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
if (ret < 0)
return ret;
- if (!memcmp(id.base.vendor_name, "ALCATELLUCENT ", 16) &&
- !memcmp(id.base.vendor_pn, "3FE46541AA ", 16))
- sfp->module_t_start_up = T_START_UP_BAD_GPON;
+ /* Initialise state bits to use from hardware */
+ sfp->state_hw_mask = SFP_F_PRESENT;
+ if (sfp->gpio[GPIO_TX_DISABLE])
+ sfp->state_hw_mask |= SFP_F_TX_DISABLE;
+ if (sfp->gpio[GPIO_TX_FAULT])
+ sfp->state_hw_mask |= SFP_F_TX_FAULT;
+ if (sfp->gpio[GPIO_LOS])
+ sfp->state_hw_mask |= SFP_F_LOS;
+
+ sfp->module_t_start_up = T_START_UP;
+ sfp->module_t_wait = T_WAIT;
+
+ sfp->tx_fault_ignore = false;
+
+ if (sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SFI ||
+ sfp->id.base.extended_cc == SFF8024_ECC_10GBASE_T_SR ||
+ sfp->id.base.extended_cc == SFF8024_ECC_5GBASE_T ||
+ sfp->id.base.extended_cc == SFF8024_ECC_2_5GBASE_T)
+ sfp->mdio_protocol = MDIO_I2C_C45;
+ else if (sfp->id.base.e1000_base_t)
+ sfp->mdio_protocol = MDIO_I2C_MARVELL_C22;
else
- sfp->module_t_start_up = T_START_UP;
+ sfp->mdio_protocol = MDIO_I2C_NONE;
- if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) &&
- !memcmp(id.base.vendor_pn, "MA5671A ", 16))
- sfp->tx_fault_ignore = true;
- else
- sfp->tx_fault_ignore = false;
+ sfp->quirk = sfp_lookup_quirk(&id);
+ if (sfp->quirk && sfp->quirk->fixup)
+ sfp->quirk->fixup(sfp);
return 0;
}
@@ -2071,7 +2206,8 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
break;
/* Report the module insertion to the upstream device */
- err = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ err = sfp_module_insert(sfp->sfp_bus, &sfp->id,
+ sfp->quirk);
if (err < 0) {
sfp_sm_mod_next(sfp, SFP_MOD_ERROR, 0);
break;
@@ -2130,6 +2266,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp_module_stop(sfp->sfp_bus);
if (sfp->mod_phy)
sfp_sm_phy_detach(sfp);
+ if (sfp->i2c_mii)
+ sfp_i2c_mdiobus_destroy(sfp);
sfp_module_tx_disable(sfp);
sfp_soft_stop_poll(sfp);
sfp_sm_next(sfp, SFP_S_DOWN, 0);
@@ -2153,9 +2291,10 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
/* We need to check the TX_FAULT state, which is not defined
* while TX_DISABLE is asserted. The earliest we want to do
- * anything (such as probe for a PHY) is 50ms.
+ * anything (such as probe for a PHY) is 50ms (or more on
+ * specific modules).
*/
- sfp_sm_next(sfp, SFP_S_WAIT, T_WAIT);
+ sfp_sm_next(sfp, SFP_S_WAIT, sfp->module_t_wait);
break;
case SFP_S_WAIT:
@@ -2169,8 +2308,8 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
* deasserting.
*/
timeout = sfp->module_t_start_up;
- if (timeout > T_WAIT)
- timeout -= T_WAIT;
+ if (timeout > sfp->module_t_wait)
+ timeout -= sfp->module_t_wait;
else
timeout = 1;
@@ -2192,6 +2331,12 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
sfp->sm_fault_retries == N_FAULT_INIT);
} else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) {
init_done:
+ /* Create mdiobus and start trying for PHY */
+ ret = sfp_sm_add_mdio_bus(sfp);
+ if (ret < 0) {
+ sfp_sm_next(sfp, SFP_S_FAIL, 0);
+ break;
+ }
sfp->sm_phy_retries = R_PHY_RETRY;
goto phy_probe;
}
@@ -2573,6 +2718,8 @@ static int sfp_probe(struct platform_device *pdev)
return PTR_ERR(sfp->gpio[i]);
}
+ sfp->state_hw_mask = SFP_F_PRESENT;
+
sfp->get_state = sfp_gpio_get_state;
sfp->set_state = sfp_gpio_set_state;
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 27226535c72b..6cf1643214d3 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -6,6 +6,14 @@
struct sfp;
+struct sfp_quirk {
+ const char *vendor;
+ const char *part;
+ void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
+ unsigned long *interfaces);
+ void (*fixup)(struct sfp *sfp);
+};
+
struct sfp_socket_ops {
void (*attach)(struct sfp *sfp);
void (*detach)(struct sfp *sfp);
@@ -23,7 +31,8 @@ int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
void sfp_remove_phy(struct sfp_bus *bus);
void sfp_link_up(struct sfp_bus *bus);
void sfp_link_down(struct sfp_bus *bus);
-int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
+int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk);
void sfp_module_remove(struct sfp_bus *bus);
int sfp_module_start(struct sfp_bus *bus);
void sfp_module_stop(struct sfp_bus *bus);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 69423b8965b3..ac7481ce2fc1 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -46,7 +46,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
struct smsc_phy_priv {
u16 intmask;
bool energy_enable;
- struct clk *refclk;
};
static int smsc_phy_ack_interrupt(struct phy_device *phydev)
@@ -285,20 +284,12 @@ static void smsc_get_stats(struct phy_device *phydev,
data[i] = smsc_get_stat(phydev, i);
}
-static void smsc_phy_remove(struct phy_device *phydev)
-{
- struct smsc_phy_priv *priv = phydev->priv;
-
- clk_disable_unprepare(priv->refclk);
- clk_put(priv->refclk);
-}
-
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
- int ret;
+ struct clk *refclk;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -312,22 +303,12 @@ static int smsc_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
/* Make clk optional to keep DTB backward compatibility. */
- priv->refclk = clk_get_optional(dev, NULL);
- if (IS_ERR(priv->refclk))
- return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ refclk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(refclk))
+ return dev_err_probe(dev, PTR_ERR(refclk),
"Failed to request clock\n");
- ret = clk_prepare_enable(priv->refclk);
- if (ret)
- return ret;
-
- ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
- if (ret) {
- clk_disable_unprepare(priv->refclk);
- return ret;
- }
-
- return 0;
+ return clk_set_rate(refclk, 50 * 1000 * 1000);
}
static struct phy_driver smsc_phy_driver[] = {
@@ -429,7 +410,6 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
- .remove = smsc_phy_remove,
/* basic functions */
.read_status = lan87xx_read_status,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index ff37f8ba6758..d4202d40d47a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -17,7 +17,6 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
@@ -137,15 +136,10 @@ static const struct ks8995_chip_params ks8995_chip[] = {
},
};
-struct ks8995_pdata {
- int reset_gpio;
- enum of_gpio_flags reset_gpio_flags;
-};
-
struct ks8995_switch {
struct spi_device *spi;
struct mutex lock;
- struct ks8995_pdata *pdata;
+ struct gpio_desc *reset_gpio;
struct bin_attribute regs_attr;
const struct ks8995_chip_params *chip;
int revision_id;
@@ -401,24 +395,6 @@ err_out:
return err;
}
-/* ks8995_parse_dt - setup platform data from devicetree
- * @ks: pointer to switch instance
- *
- * Parses supported DT properties and sets up platform data
- * accordingly.
- */
-static void ks8995_parse_dt(struct ks8995_switch *ks)
-{
- struct device_node *np = ks->spi->dev.of_node;
- struct ks8995_pdata *pdata = ks->pdata;
-
- if (!np)
- return;
-
- pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0,
- &pdata->reset_gpio_flags);
-}
-
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
@@ -449,38 +425,22 @@ static int ks8995_probe(struct spi_device *spi)
ks->spi = spi;
ks->chip = &ks8995_chip[variant];
- if (ks->spi->dev.of_node) {
- ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata),
- GFP_KERNEL);
- if (!ks->pdata)
- return -ENOMEM;
-
- ks->pdata->reset_gpio = -1;
-
- ks8995_parse_dt(ks);
+ ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(ks->reset_gpio);
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset gpio: %d\n", err);
+ return err;
}
- if (!ks->pdata)
- ks->pdata = spi->dev.platform_data;
+ err = gpiod_set_consumer_name(ks->reset_gpio, "switch-reset");
+ if (err)
+ return err;
/* de-assert switch reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) {
- unsigned long flags;
-
- flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ?
- GPIOF_ACTIVE_LOW : 0);
-
- err = devm_gpio_request_one(&spi->dev,
- ks->pdata->reset_gpio,
- flags, "switch-reset");
- if (err) {
- dev_err(&spi->dev,
- "failed to get reset-gpios: %d\n", err);
- return -EIO;
- }
-
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0);
- }
+ /* FIXME: this likely requires a delay */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
spi_set_drvdata(spi, ks);
@@ -524,8 +484,7 @@ static void ks8995_remove(struct spi_device *spi)
sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
/* assert reset */
- if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
- gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
+ gpiod_set_value_cansleep(ks->reset_gpio, 1);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
new file mode 100644
index 000000000000..73d163704068
--- /dev/null
+++ b/drivers/net/pse-pd/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Ethernet Power Sourcing Equipment drivers
+#
+
+menuconfig PSE_CONTROLLER
+ bool "Ethernet Power Sourcing Equipment Support"
+ help
+ Generic Power Sourcing Equipment Controller support.
+
+ If unsure, say no.
+
+if PSE_CONTROLLER
+
+config PSE_REGULATOR
+ tristate "Regulator based PSE controller"
+ help
+ This module provides support for simple regulator based Ethernet Power
+ Sourcing Equipment without automatic classification support. For
+ example for basic implementation of PoDL (802.3bu) specification.
+
+endif
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
new file mode 100644
index 000000000000..1b8aa4c70f0b
--- /dev/null
+++ b/drivers/net/pse-pd/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for Linux PSE drivers
+
+obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
+
+obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
new file mode 100644
index 000000000000..146b81f08a89
--- /dev/null
+++ b/drivers/net/pse-pd/pse_core.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Framework for Ethernet Power Sourcing Equipment
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/pse-pd/pse.h>
+
+static DEFINE_MUTEX(pse_list_mutex);
+static LIST_HEAD(pse_controller_list);
+
+/**
+ * struct pse_control - a PSE control
+ * @pcdev: a pointer to the PSE controller device
+ * this PSE control belongs to
+ * @list: list entry for the pcdev's PSE controller list
+ * @id: ID of the PSE line in the PSE controller device
+ * @refcnt: Number of gets of this pse_control
+ */
+struct pse_control {
+ struct pse_controller_dev *pcdev;
+ struct list_head list;
+ unsigned int id;
+ struct kref refcnt;
+};
+
+/**
+ * of_pse_zero_xlate - dummy function for controllers with one only control
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with #pse-cells = <0>.
+ */
+static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ return 0;
+}
+
+/**
+ * of_pse_simple_xlate - translate pse_spec to the PSE line number
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * This static translation function is used by default if of_xlate in
+ * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
+ * controllers with 1:1 mapping, where PSE lines can be indexed by number
+ * without gaps.
+ */
+static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ if (pse_spec->args[0] >= pcdev->nr_lines)
+ return -EINVAL;
+
+ return pse_spec->args[0];
+}
+
+/**
+ * pse_controller_register - register a PSE controller device
+ * @pcdev: a pointer to the initialized PSE controller device
+ */
+int pse_controller_register(struct pse_controller_dev *pcdev)
+{
+ if (!pcdev->of_xlate) {
+ if (pcdev->of_pse_n_cells == 0)
+ pcdev->of_xlate = of_pse_zero_xlate;
+ else if (pcdev->of_pse_n_cells == 1)
+ pcdev->of_xlate = of_pse_simple_xlate;
+ }
+
+ mutex_init(&pcdev->lock);
+ INIT_LIST_HEAD(&pcdev->pse_control_head);
+
+ mutex_lock(&pse_list_mutex);
+ list_add(&pcdev->list, &pse_controller_list);
+ mutex_unlock(&pse_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pse_controller_register);
+
+/**
+ * pse_controller_unregister - unregister a PSE controller device
+ * @pcdev: a pointer to the PSE controller device
+ */
+void pse_controller_unregister(struct pse_controller_dev *pcdev)
+{
+ mutex_lock(&pse_list_mutex);
+ list_del(&pcdev->list);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_controller_unregister);
+
+static void devm_pse_controller_release(struct device *dev, void *res)
+{
+ pse_controller_unregister(*(struct pse_controller_dev **)res);
+}
+
+/**
+ * devm_pse_controller_register - resource managed pse_controller_register()
+ * @dev: device that is registering this PSE controller
+ * @pcdev: a pointer to the initialized PSE controller device
+ *
+ * Managed pse_controller_register(). For PSE controllers registered by
+ * this function, pse_controller_unregister() is automatically called on
+ * driver detach. See pse_controller_register() for more information.
+ */
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev)
+{
+ struct pse_controller_dev **pcdevp;
+ int ret;
+
+ pcdevp = devres_alloc(devm_pse_controller_release, sizeof(*pcdevp),
+ GFP_KERNEL);
+ if (!pcdevp)
+ return -ENOMEM;
+
+ ret = pse_controller_register(pcdev);
+ if (ret) {
+ devres_free(pcdevp);
+ return ret;
+ }
+
+ *pcdevp = pcdev;
+ devres_add(dev, pcdevp);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_pse_controller_register);
+
+/* PSE control section */
+
+static void __pse_control_release(struct kref *kref)
+{
+ struct pse_control *psec = container_of(kref, struct pse_control,
+ refcnt);
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ module_put(psec->pcdev->owner);
+
+ list_del(&psec->list);
+ kfree(psec);
+}
+
+static void __pse_control_put_internal(struct pse_control *psec)
+{
+ lockdep_assert_held(&pse_list_mutex);
+
+ kref_put(&psec->refcnt, __pse_control_release);
+}
+
+/**
+ * pse_control_put - free the PSE control
+ * @psec: PSE control pointer
+ */
+void pse_control_put(struct pse_control *psec)
+{
+ if (IS_ERR_OR_NULL(psec))
+ return;
+
+ mutex_lock(&pse_list_mutex);
+ __pse_control_put_internal(psec);
+ mutex_unlock(&pse_list_mutex);
+}
+EXPORT_SYMBOL_GPL(pse_control_put);
+
+static struct pse_control *
+pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
+{
+ struct pse_control *psec;
+
+ lockdep_assert_held(&pse_list_mutex);
+
+ list_for_each_entry(psec, &pcdev->pse_control_head, list) {
+ if (psec->id == index) {
+ kref_get(&psec->refcnt);
+ return psec;
+ }
+ }
+
+ psec = kzalloc(sizeof(*psec), GFP_KERNEL);
+ if (!psec)
+ return ERR_PTR(-ENOMEM);
+
+ if (!try_module_get(pcdev->owner)) {
+ kfree(psec);
+ return ERR_PTR(-ENODEV);
+ }
+
+ psec->pcdev = pcdev;
+ list_add(&psec->list, &pcdev->pse_control_head);
+ psec->id = index;
+ kref_init(&psec->refcnt);
+
+ return psec;
+}
+
+struct pse_control *
+of_pse_control_get(struct device_node *node)
+{
+ struct pse_controller_dev *r, *pcdev;
+ struct of_phandle_args args;
+ struct pse_control *psec;
+ int psec_id;
+ int ret;
+
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
+ ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_lock(&pse_list_mutex);
+ pcdev = NULL;
+ list_for_each_entry(r, &pse_controller_list, list) {
+ if (args.np == r->dev->of_node) {
+ pcdev = r;
+ break;
+ }
+ }
+
+ if (!pcdev) {
+ psec = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ if (WARN_ON(args.args_count != pcdev->of_pse_n_cells)) {
+ psec = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ psec_id = pcdev->of_xlate(pcdev, &args);
+ if (psec_id < 0) {
+ psec = ERR_PTR(psec_id);
+ goto out;
+ }
+
+ /* pse_list_mutex also protects the pcdev's pse_control list */
+ psec = pse_control_get_internal(pcdev, psec_id);
+
+out:
+ mutex_unlock(&pse_list_mutex);
+ of_node_put(args.np);
+
+ return psec;
+}
+EXPORT_SYMBOL_GPL(of_pse_control_get);
+
+/**
+ * pse_ethtool_get_status - get status of PSE control
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @status: struct to store PSE status
+ */
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_get_status) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not support status report");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
+
+/**
+ * pse_ethtool_set_config - set PSE control configuration
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ const struct pse_controller_ops *ops;
+ int err;
+
+ ops = psec->pcdev->ops;
+
+ if (!ops->ethtool_set_config) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not configuration");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&psec->pcdev->lock);
+ err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
+ mutex_unlock(&psec->pcdev->lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
new file mode 100644
index 000000000000..e2bf8306ca90
--- /dev/null
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Driver for the regulator based Ethernet Power Sourcing Equipment, without
+// auto classification support.
+//
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+#include <linux/regulator/consumer.h>
+
+struct pse_reg_priv {
+ struct pse_controller_dev pcdev;
+ struct regulator *ps; /*power source */
+ enum ethtool_podl_pse_admin_state admin_state;
+};
+
+static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct pse_reg_priv, pcdev);
+}
+
+static int
+pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ if (priv->admin_state == config->admin_cotrol)
+ return 0;
+
+ switch (config->admin_cotrol) {
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
+ ret = regulator_enable(priv->ps);
+ break;
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
+ ret = regulator_disable(priv->ps);
+ break;
+ default:
+ dev_err(pcdev->dev, "Unknown admin state %i\n",
+ config->admin_cotrol);
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ priv->admin_state = config->admin_cotrol;
+
+ return 0;
+}
+
+static int
+pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
+ else
+ status->podl_pw_status =
+ ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
+
+ status->podl_admin_state = priv->admin_state;
+
+ return 0;
+}
+
+static const struct pse_controller_ops pse_reg_ops = {
+ .ethtool_get_status = pse_reg_ethtool_get_status,
+ .ethtool_set_config = pse_reg_ethtool_set_config,
+};
+
+static int
+pse_reg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pse_reg_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (!pdev->dev.of_node)
+ return -ENOENT;
+
+ priv->ps = devm_regulator_get_exclusive(dev, "pse");
+ if (IS_ERR(priv->ps))
+ return dev_err_probe(dev, PTR_ERR(priv->ps),
+ "failed to get PSE regulator.\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = regulator_is_enabled(priv->ps);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ else
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
+
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &pse_reg_ops;
+ priv->pcdev.dev = dev;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "failed to register PSE controller (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+static const __maybe_unused struct of_device_id pse_reg_of_match[] = {
+ { .compatible = "podl-pse-regulator", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pse_reg_of_match);
+
+static struct platform_driver pse_reg_driver = {
+ .probe = pse_reg_probe,
+ .driver = {
+ .name = "PSE regulator",
+ .of_match_table = of_match_ptr(pse_reg_of_match),
+ },
+};
+module_platform_driver(pse_reg_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("regulator based Ethernet Power Sourcing Equipment");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pse-regulator");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 39e61e07e489..fbcb9d05da64 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -443,10 +443,10 @@ static void rionet_get_drvinfo(struct net_device *ndev,
{
struct rionet_private *rnet = netdev_priv(ndev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "n/a", sizeof(info->fw_version));
- strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "n/a", sizeof(info->fw_version));
+ strscpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info));
}
static u32 rionet_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index aac133a1e27a..62ade69295a9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1275,10 +1275,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
}
}
- netif_addr_lock_bh(dev);
- dev_uc_sync_multiple(port_dev, dev);
- dev_mc_sync_multiple(port_dev, dev);
- netif_addr_unlock_bh(dev);
+ if (dev->flags & IFF_UP) {
+ netif_addr_lock_bh(dev);
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
+ }
port->index = -1;
list_add_tail_rcu(&port->list, &team->port_list);
@@ -1349,8 +1351,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
- dev_uc_unsync(port_dev, dev);
- dev_mc_unsync(port_dev, dev);
+ if (dev->flags & IFF_UP) {
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
+ }
dev_close(port_dev);
team_port_leave(team, port);
@@ -1700,6 +1704,14 @@ static int team_open(struct net_device *dev)
static int team_close(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ dev_uc_unsync(port->dev, dev);
+ dev_mc_unsync(port->dev, dev);
+ }
+
return 0;
}
@@ -2070,8 +2082,8 @@ static const struct net_device_ops team_netdev_ops = {
static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
@@ -2840,6 +2852,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = team_nl_ops,
.n_small_ops = ARRAY_SIZE(team_nl_ops),
+ .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1,
.mcgrps = team_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
};
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ff5d0e98a088..83fcaeb2ac5e 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
+ * Networking over Thunderbolt/USB4 cables using USB4NET protocol
+ * (formerly Apple ThunderboltIP).
*
* Copyright (C) 2017, Intel Corporation
* Authors: Amir Levy <amir.jer.levy@intel.com>
@@ -30,6 +31,7 @@
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
+#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
@@ -209,6 +211,10 @@ static const uuid_t tbnet_svc_uuid =
static struct tb_property_dir *tbnet_dir;
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -612,18 +618,13 @@ static void tbnet_connected_work(struct work_struct *work)
return;
}
- /* Both logins successful so enable the high-speed DMA paths and
- * start the network device queue.
+ /* Both logins successful so enable the rings, high-speed DMA
+ * paths and start the network device queue.
+ *
+ * Note we enable the DMA paths last to make sure we have primed
+ * the Rx ring before any incoming packets are allowed to
+ * arrive.
*/
- ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
- net->rx_ring.ring->hop,
- net->remote_transmit_path,
- net->tx_ring.ring->hop);
- if (ret) {
- netdev_err(net->dev, "failed to enable DMA paths\n");
- return;
- }
-
tb_ring_start(net->tx_ring.ring);
tb_ring_start(net->rx_ring.ring);
@@ -635,10 +636,21 @@ static void tbnet_connected_work(struct work_struct *work)
if (ret)
goto err_free_rx_buffers;
+ ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
+ net->rx_ring.ring->hop,
+ net->remote_transmit_path,
+ net->tx_ring.ring->hop);
+ if (ret) {
+ netdev_err(net->dev, "failed to enable DMA paths\n");
+ goto err_free_tx_buffers;
+ }
+
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
return;
+err_free_tx_buffers:
+ tbnet_free_buffers(&net->tx_ring);
err_free_rx_buffers:
tbnet_free_buffers(&net->rx_ring);
err_stop_rings:
@@ -867,6 +879,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
+ unsigned int flags;
int hopid;
netif_carrier_off(dev);
@@ -891,9 +904,14 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
- ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
- RING_FLAG_FRAME, 0, sof_mask, eof_mask,
- tbnet_start_poll, net);
+ flags = RING_FLAG_FRAME;
+ /* Only enable full E2E if the other end supports it too */
+ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+ flags |= RING_FLAG_E2E;
+
+ ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+ net->tx_ring.ring->hop, sof_mask,
+ eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_ring_free(net->tx_ring.ring);
@@ -1264,7 +1282,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
dev->features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
- netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &net->napi, tbnet_poll);
/* MTU range: 68 - 65522 */
dev->min_mtu = ETH_MIN_MTU;
@@ -1356,6 +1374,7 @@ static struct tb_service_driver tbnet_driver = {
static int __init tbnet_init(void)
{
+ unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1365,12 +1384,11 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
- /* Currently only announce support for match frags ID (bit 1). Bit 0
- * is reserved for full E2E flow control which we do not support at
- * the moment.
- */
- tb_property_add_immediate(tbnet_dir, "prtcstns",
- TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+ flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+ if (tbnet_e2e)
+ flags |= TBNET_E2E;
+ tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret) {
@@ -1393,5 +1411,5 @@ module_exit(tbnet_exit);
MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
-MODULE_DESCRIPTION("Thunderbolt network driver");
+MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 259b2b84b2b3..27c6d235cbda 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2664,7 +2664,7 @@ static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "0x%x\n", tun_flags(tun));
+ return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
@@ -2672,9 +2672,9 @@ static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
- sprintf(buf, "%u\n",
- from_kuid_munged(current_user_ns(), tun->owner)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kuid_munged(current_user_ns(), tun->owner)) :
+ sysfs_emit(buf, "-1\n");
}
static ssize_t group_show(struct device *dev, struct device_attribute *attr,
@@ -2682,9 +2682,9 @@ static ssize_t group_show(struct device *dev, struct device_attribute *attr,
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
- sprintf(buf, "%u\n",
- from_kgid_munged(current_user_ns(), tun->group)):
- sprintf(buf, "-1\n");
+ sysfs_emit(buf, "%u\n",
+ from_kgid_munged(current_user_ns(), tun->group)) :
+ sysfs_emit(buf, "-1\n");
}
static DEVICE_ATTR_RO(tun_flags);
@@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
rcu_assign_pointer(tfile->tun, tun);
}
- netif_carrier_on(tun->dev);
+ if (ifr->ifr_flags & IFF_NO_CARRIER)
+ netif_carrier_off(tun->dev);
+ else
+ netif_carrier_on(tun->dev);
/* Make sure persistent devices do not get stuck in
* xoff state.
@@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
- return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
- (unsigned int __user*)argp);
+ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
+ TUN_FEATURES, (unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE) {
return tun_set_queue(file, &ifr);
} else if (cmd == SIOCGSKNS) {
@@ -3540,15 +3543,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
- strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case IFF_TAP:
- strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
+ strscpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 76659c1c525a..4402eedb3d1a 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -168,7 +168,7 @@ config USB_NET_AX8817X
tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
depends on USB_USBNET
select CRC32
- select PHYLIB
+ select PHYLINK
select AX88796B_PHY
imply NET_SELFTESTS
default y
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 3020e81159d0..a017e9de2119 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -201,7 +201,7 @@ static void aqc111_get_drvinfo(struct net_device *net,
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
aqc111_data->fw_ver.major,
aqc111_data->fw_ver.minor,
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 21c1ca275cc4..74162190bccc 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -27,6 +27,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <net/selftests.h>
+#include <linux/phylink.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -185,6 +186,8 @@ struct asix_common_private {
struct mii_bus *mdio;
struct phy_device *phydev;
struct phy_device *phydev_int;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
u16 phy_addr;
bool embd_phy;
u8 chipcode;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 9ea91c3ff045..72ffc89b477a 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -752,8 +752,8 @@ void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
int asix_set_mac_address(struct net_device *net, void *p)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 5b5eb630c4b7..11f60d32be82 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -303,6 +303,24 @@ static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset)
}
}
+static void ax88772_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+}
+
+static int ax88772_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct usbnet *dev = netdev_priv(ndev);
+ struct asix_common_private *priv = dev->driver_priv;
+
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+}
+
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = usbnet_get_link,
@@ -319,6 +337,8 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.self_test = net_selftest,
.get_strings = ax88772_ethtool_get_strings,
.get_sset_count = ax88772_ethtool_get_sset_count,
+ .get_pauseparam = ax88772_ethtool_get_pauseparam,
+ .set_pauseparam = ax88772_ethtool_set_pauseparam,
};
static int ax88772_reset(struct usbnet *dev)
@@ -343,7 +363,7 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
- phy_start(priv->phydev);
+ phylink_start(priv->phylink);
return 0;
@@ -590,8 +610,11 @@ static void ax88772_suspend(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
u16 medium;
- if (netif_running(dev->net))
- phy_stop(priv->phydev);
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_suspend(priv->phylink, false);
+ rtnl_unlock();
+ }
/* Stop MAC operation */
medium = asix_read_medium_status(dev, 1);
@@ -622,8 +645,11 @@ static void ax88772_resume(struct usbnet *dev)
if (!priv->reset(dev, 1))
break;
- if (netif_running(dev->net))
- phy_start(priv->phydev);
+ if (netif_running(dev->net)) {
+ rtnl_lock();
+ phylink_resume(priv->phylink);
+ rtnl_unlock();
+ }
}
static int asix_resume(struct usb_interface *intf)
@@ -667,8 +693,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return -ENODEV;
}
- ret = phy_connect_direct(dev->net, priv->phydev, &asix_adjust_link,
- PHY_INTERFACE_MODE_INTERNAL);
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
if (ret) {
netdev_err(dev->net, "Could not connect PHY\n");
return ret;
@@ -688,6 +713,9 @@ static int ax88772_init_phy(struct usbnet *dev)
*/
priv->phydev_int = mdiobus_get_phy(priv->mdio, AX_EMBD_PHY_ADDR);
if (!priv->phydev_int) {
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
netdev_err(dev->net, "Could not find internal PHY\n");
return -ENODEV;
}
@@ -698,6 +726,89 @@ static int ax88772_init_phy(struct usbnet *dev)
return 0;
}
+static void ax88772_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do */
+}
+
+static void ax88772_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+
+ asix_write_medium_mode(dev, 0, 0);
+ usbnet_link_change(dev, false, false);
+}
+
+static void ax88772_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct usbnet *dev = netdev_priv(to_net_dev(config->dev));
+ u16 m = AX_MEDIUM_AC | AX_MEDIUM_RE;
+
+ m |= duplex ? AX_MEDIUM_FD : 0;
+
+ switch (speed) {
+ case SPEED_100:
+ m |= AX_MEDIUM_PS;
+ break;
+ case SPEED_10:
+ break;
+ default:
+ return;
+ }
+
+ if (tx_pause)
+ m |= AX_MEDIUM_TFC;
+
+ if (rx_pause)
+ m |= AX_MEDIUM_RFC;
+
+ asix_write_medium_mode(dev, m, 0);
+ usbnet_link_change(dev, true, false);
+}
+
+static const struct phylink_mac_ops ax88772_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = ax88772_mac_config,
+ .mac_link_down = ax88772_mac_link_down,
+ .mac_link_up = ax88772_mac_link_up,
+};
+
+static int ax88772_phylink_setup(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ phy_interface_t phy_if_mode;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &dev->net->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ priv->phylink_config.supported_interfaces);
+
+ if (priv->embd_phy)
+ phy_if_mode = PHY_INTERFACE_MODE_INTERNAL;
+ else
+ phy_if_mode = PHY_INTERFACE_MODE_RMII;
+
+ phylink = phylink_create(&priv->phylink_config, dev->net->dev.fwnode,
+ phy_if_mode, &ax88772_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phylink = phylink;
+ return 0;
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_common_private *priv;
@@ -788,14 +899,22 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
return ret;
- return ax88772_init_phy(dev);
+ ret = ax88772_phylink_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = ax88772_init_phy(dev);
+ if (ret)
+ phylink_destroy(priv->phylink);
+
+ return ret;
}
static int ax88772_stop(struct usbnet *dev)
{
struct asix_common_private *priv = dev->driver_priv;
- phy_stop(priv->phydev);
+ phylink_stop(priv->phylink);
return 0;
}
@@ -804,7 +923,10 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_common_private *priv = dev->driver_priv;
- phy_disconnect(priv->phydev);
+ rtnl_lock();
+ phylink_disconnect_phy(priv->phylink);
+ rtnl_unlock();
+ phylink_destroy(priv->phylink);
asix_rx_fixup_common_free(dev->driver_priv);
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 843893482abd..ff439ef535ac 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -672,8 +672,8 @@ static void catc_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct catc *catc = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2de09ad5bac0..e11f70911acc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f8221a7acf62..ce1f6081d582 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1380,7 +1380,8 @@ static void hso_serial_cleanup(struct tty_struct *tty)
}
/* setup the term */
-static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void hso_serial_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct hso_serial *serial = tty->driver_data;
unsigned long flags;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3226ab33afae..f18ab8e220db 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -4374,7 +4374,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
- netif_napi_add(netdev, &dev->napi, lan78xx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &dev->napi, lan78xx_poll);
INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
init_usb_anchor(&dev->deferred);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index feb247e355f7..81ca64debc5b 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -894,7 +894,7 @@ static void pegasus_get_drvinfo(struct net_device *dev,
{
pegasus_t *pegasus = netdev_priv(dev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 709e3c59e340..26c34a7c21bd 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1087,6 +1087,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1401,6 +1402,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d142ac8fcf6e..a481a1d831e2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -770,6 +770,8 @@ enum rtl8152_flags {
RX_EPROTO,
};
+#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
@@ -1873,7 +1875,9 @@ static void intr_callback(struct urb *urb)
"Stop submitting intr, status %d\n", status);
return;
case -EOVERFLOW:
- netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ if (net_ratelimit())
+ netif_info(tp, intr, tp->netdev,
+ "intr status -EOVERFLOW\n");
goto resubmit;
/* -EPIPE: should clear the halt */
default:
@@ -2726,22 +2730,26 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
ocp_data |= RCR_AM | RCR_AAP;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(netdev) > multicast_filter_limit) ||
- (netdev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev->flags & IFF_MULTICAST &&
+ netdev_mc_count(netdev) > multicast_filter_limit) ||
+ (netdev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
ocp_data |= RCR_AM;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
} else {
- struct netdev_hw_addr *ha;
-
mc_filter[1] = 0;
mc_filter[0] = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- ocp_data |= RCR_AM;
+ if (netdev->flags & IFF_MULTICAST) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ ocp_data |= RCR_AM;
+ }
}
}
@@ -8601,11 +8609,11 @@ static void rtl8152_get_drvinfo(struct net_device *netdev,
{
struct r8152 *tp = netdev_priv(netdev);
- strlcpy(info->driver, MODULENAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, MODULENAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info));
if (!IS_ERR_OR_NULL(tp->rtl_fw.fw))
- strlcpy(info->fw_version, tp->rtl_fw.version,
+ strscpy(info->fw_version, tp->rtl_fw.version,
sizeof(info->fw_version));
}
@@ -9581,6 +9589,8 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
if (vendor_id == VENDOR_ID_LENOVO) {
switch (product_id) {
+ case DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB:
+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
@@ -9828,6 +9838,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 3d2bf2acca94..97afd7335d86 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -769,8 +769,8 @@ static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinf
{
rtl8150_t *dev = netdev_priv(netdev);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index bb4cbe8fc846..b3ae949e6f1c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -612,8 +612,8 @@ static void sierra_net_get_drvinfo(struct net_device *net,
{
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
- strlcpy(info->driver, driver_name, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, driver_name, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
}
static u32 sierra_net_get_link(struct net_device *net)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aaa89b4cfd50..64a9a80b2309 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1050,9 +1050,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strlcpy (info->driver, dev->driver_name, sizeof info->driver);
- strlcpy (info->fw_version, dev->driver_info->description,
- sizeof info->fw_version);
+ strscpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strscpy(info->fw_version, dev->driver_info->description,
+ sizeof(info->fw_version));
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
@@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf)
struct usbnet *dev;
struct usb_device *xdev;
struct net_device *net;
+ struct urb *urb;
dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
@@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf)
net = dev->net;
unregister_netdev (net);
- usb_scuttle_anchored_urbs(&dev->deferred);
+ while ((urb = usb_get_from_anchor(&dev->deferred))) {
+ dev_kfree_skb(urb->context);
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
if (dev->driver_info->unbind)
dev->driver_info->unbind(dev, intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 466da01ba2e3..09682ea3354e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -128,8 +128,8 @@ static int veth_get_link_ksettings(struct net_device *dev,
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -1070,7 +1070,7 @@ static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
struct veth_rq *rq = &priv->rq[i];
if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
if (err < 0)
goto err_rxq_reg;
@@ -1184,7 +1184,7 @@ static int veth_napi_enable_range(struct net_device *dev, int start, int end)
for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll);
}
err = __veth_napi_enable_range(dev, start, end);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9cce7dec7366..7106932c6f88 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -225,6 +225,9 @@ struct virtnet_info {
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* number of sg entries allocated for big packets */
+ unsigned int big_packets_num_skbfrags;
+
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
@@ -1331,10 +1334,10 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
char *p;
int i, err, offset;
- sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+ sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
- /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
- for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
+ for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
first = get_a_page(rq, gfp);
if (!first) {
if (list)
@@ -1365,7 +1368,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
first, gfp);
if (err < 0)
give_pages(rq, first);
@@ -2594,9 +2597,9 @@ static void virtnet_get_drvinfo(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct virtio_device *vdev = vi->vdev;
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
}
@@ -3682,13 +3685,35 @@ static int virtnet_validate(struct virtio_device *vdev)
return 0;
}
+static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
+{
+ return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO);
+}
+
+static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
+{
+ bool guest_gso = virtnet_check_guest_gso(vi);
+
+ /* If device can receive ANY guest GSO packets, regardless of mtu,
+ * allocate packets of maximum size, otherwise limit it to only
+ * mtu size worth only.
+ */
+ if (mtu > ETH_DATA_LEN || guest_gso) {
+ vi->big_packets = true;
+ vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
+ }
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
struct net_device *dev;
struct virtnet_info *vi;
u16 max_queue_pairs;
- int mtu;
+ int mtu = 0;
/* Find if host supports multiqueue/rss virtio_net device */
max_queue_pairs = 1;
@@ -3776,13 +3801,6 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- /* If we can receive ANY GSO packets, we must allocate large ones. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
- vi->big_packets = true;
-
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
@@ -3848,12 +3866,10 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->mtu = mtu;
dev->max_mtu = mtu;
-
- /* TODO: size buffers correctly in this case. */
- if (dev->mtu > ETH_DATA_LEN)
- vi->big_packets = true;
}
+ virtnet_set_big_packets(vi, mtu);
+
if (vi->any_header_sg)
dev->needed_headroom = vi->hdr_len;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 53b3b241e027..d3e7b27eb933 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3882,11 +3882,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
netif_napi_add(adapter->netdev,
&adapter->rx_queue[i].napi,
- vmxnet3_poll_rx_only, 64);
+ vmxnet3_poll_rx_only);
}
} else {
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
- vmxnet3_poll, 64);
+ vmxnet3_poll);
}
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e2034adc3a1a..18cf7c723201 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -209,12 +209,12 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
+ strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 5df7a0abc39d..badf6f09ae51 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1541,8 +1541,8 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
static void vrf_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops vrf_ethtool_ops = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index c3285242f74f..6ab669dcd1c6 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -713,12 +713,9 @@ static struct sk_buff *vxlan_gro_receive(struct sock *sk,
off_vx = skb_gro_offset(skb);
hlen = off_vx + sizeof(*vh);
- vh = skb_gro_header_fast(skb, off_vx);
- if (skb_gro_header_hard(skb, hlen)) {
- vh = skb_gro_header_slow(skb, hlen, off_vx);
- if (unlikely(!vh))
- goto out;
- }
+ vh = skb_gro_header(skb, hlen, off_vx);
+ if (unlikely(!vh))
+ goto out;
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
@@ -3313,8 +3310,8 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
static void vxlan_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
}
static int vxlan_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index d0f3b6d7f408..43c8c84e7ea8 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -436,14 +436,13 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
+ struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) &&
- addr->sa_family == AF_INET) ||
- (len == sizeof(struct sockaddr_in6) &&
- addr->sa_family == AF_INET6)) {
- struct endpoint endpoint = { { { 0 } } };
-
- memcpy(&endpoint.addr, addr, len);
+ if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
+ endpoint.addr4 = *(struct sockaddr_in *)addr;
+ wg_socket_set_peer_endpoint(peer, &endpoint);
+ } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
+ endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
@@ -621,6 +620,7 @@ static const struct genl_ops genl_ops[] = {
static struct genl_family genl_family __ro_after_init = {
.ops = genl_ops,
.n_ops = ARRAY_SIZE(genl_ops),
+ .resv_start_op = WG_CMD_SET_DEVICE + 1,
.name = WG_GENL_NAME,
.version = WG_GENL_VERSION,
.maxattr = WGDEVICE_A_MAX,
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index 1acd00ab2fbc..1cb502a932e0 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -54,8 +54,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
skb_queue_head_init(&peer->staged_packet_queue);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
- netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll);
napi_enable(&peer->napi);
list_add_tail(&peer->peer_list, &wg->peer_list);
INIT_LIST_HEAD(&peer->allowedips_list);
diff --git a/drivers/net/wireguard/selftest/ratelimiter.c b/drivers/net/wireguard/selftest/ratelimiter.c
index ba87d294604f..d4bb40a695ab 100644
--- a/drivers/net/wireguard/selftest/ratelimiter.c
+++ b/drivers/net/wireguard/selftest/ratelimiter.c
@@ -6,29 +6,28 @@
#ifdef DEBUG
#include <linux/jiffies.h>
-#include <linux/hrtimer.h>
static const struct {
bool result;
- u64 nsec_to_sleep_before;
+ unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
- [PACKETS_BURSTABLE + 1] = { true, NSEC_PER_SEC / PACKETS_PER_SECOND },
+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
- [PACKETS_BURSTABLE + 3] = { true, (NSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
- u64 total_nsecs = 2 * NSEC_PER_SEC / PACKETS_PER_SECOND / 3;
+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
- total_nsecs += expected_results[i].nsec_to_sleep_before;
- return nsecs_to_jiffies(total_nsecs);
+ total_msecs += expected_results[i].msec_to_sleep_before;
+ return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
@@ -43,12 +42,8 @@ static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
- if (expected_results[i].nsec_to_sleep_before) {
- ktime_t timeout = ktime_add(ktime_add_ns(ktime_get_coarse_boottime(), TICK_NSEC * 4 / 3),
- ns_to_ktime(expected_results[i].nsec_to_sleep_before));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout_range_clock(&timeout, 0, HRTIMER_MODE_ABS, CLOCK_BOOTTIME);
- }
+ if (expected_results[i].msec_to_sleep_before)
+ msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
@@ -132,7 +127,7 @@ bool __init wg_ratelimiter_selftest(void)
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
- BUILD_BUG_ON(NSEC_PER_SEC % PACKETS_PER_SECOND != 0);
+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
@@ -172,7 +167,7 @@ bool __init wg_ratelimiter_selftest(void)
++test;
#endif
- for (trials = TRIALS_BEFORE_GIVING_UP;;) {
+ for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 4481ed375f55..af6546572df2 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -101,7 +101,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
/* Step 1: Read 4 bytes of the target info and check if it is
- * the special sentinal version word or the first word in the
+ * the special sentinel version word or the first word in the
* version response.
*/
resplen = sizeof(u32);
@@ -111,7 +111,7 @@ int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
return ret;
}
- /* Some SDIO boards have a special sentinal byte before the real
+ /* Some SDIO boards have a special sentinel byte before the real
* version response.
*/
if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index c45c814fd122..59926227bd49 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1323,7 +1323,7 @@ EXPORT_SYMBOL(ath10k_ce_per_engine_service);
/*
* Handler for per-engine interrupts on ALL active CEs.
* This is used in cases where the system is sharing a
- * single interrput for all CEs
+ * single interrupt for all CEs
*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 276954b70d63..400f332a7ff0 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -98,6 +98,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA988X_HW_2_0_VERSION,
@@ -136,6 +137,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -175,6 +177,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -209,6 +212,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -247,6 +251,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -285,6 +290,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -323,6 +329,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -365,6 +372,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.supports_peer_stats_info = true,
.dynamic_sar_support = true,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -409,6 +417,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -460,6 +469,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -508,6 +518,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -546,6 +557,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -586,6 +598,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -617,6 +630,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = true,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -662,6 +676,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = false,
.hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@@ -693,6 +708,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false,
.dynamic_sar_support = true,
.hw_restart_disconnect = true,
+ .use_fw_tx_credits = false,
},
};
@@ -3080,7 +3096,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* enabled always.
*
* We can still enable BTCOEX if firmware has the support
- * eventhough btceox_support value is
+ * even though btceox_support value is
* ATH10K_DT_BTCOEX_NOT_FOUND
*/
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index d70d7d088a2b..f5de8ce8fb45 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -76,7 +76,7 @@
/* The magic used by QCA spec */
#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
-/* Default Airtime weight multipler (Tuned for multiclient performance) */
+/* Default Airtime weight multiplier (Tuned for multiclient performance) */
#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
#define ATH10K_MAX_RETRY_COUNT 30
@@ -857,7 +857,7 @@ enum ath10k_dev_flags {
/* Disable HW crypto engine */
ATH10K_FLAG_HW_CRYPTO_DISABLED,
- /* Bluetooth coexistance enabled */
+ /* Bluetooth coexistence enabled */
ATH10K_FLAG_BTCOEX,
/* Per Station statistics service */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index fe6b6f97a916..2d1634a890dd 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -531,7 +531,7 @@ static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
{0x40000, 0x400A4},
- /* SI register is skiped here.
+ /* SI register is skipped here.
* Because it will cause bus hang
*
* {0x50000, 0x50018},
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 240d70515088..437b9759f05d 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -125,7 +125,7 @@ enum ath10k_mem_region_type {
* To minimize the size of the array, the list must obey the format:
* '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
* also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
- * we may encouter error in the dump processing.
+ * we may encounter error in the dump processing.
*/
struct ath10k_mem_section {
u32 start;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 39378e3f9b2b..c861e66ef6bc 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1081,7 +1081,7 @@ exit:
* struct available..
*/
-/* This generally cooresponds to the debugfs fw_stats file */
+/* This generally corresponds to the debugfs fw_stats file */
static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 367539f2c370..87a3365330ff 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -498,7 +498,7 @@ static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
{
switch (i) {
case ATH10K_AMPDU_SUBFRM_NUM_10:
- return "upto 10";
+ return "up to 10";
case ATH10K_AMPDU_SUBFRM_NUM_20:
return "11-20";
case ATH10K_AMPDU_SUBFRM_NUM_30:
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index fab398046a3f..6d1784f74bea 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -947,13 +947,18 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
return -ECOMM;
}
- htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ if (ar->hw_params.use_fw_tx_credits)
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ else
+ htc->total_transmit_credits = 1;
+
htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
ath10k_dbg(ar, ATH10K_DBG_HTC,
- "Target ready! transmit resources: %d size:%d\n",
+ "Target ready! transmit resources: %d size:%d actual credits:%d\n",
htc->total_transmit_credits,
- htc->target_credit_size);
+ htc->target_credit_size,
+ msg->ready.credit_count);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 8a075a711b71..e76aab973320 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -301,12 +301,16 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
ath10k_htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
+
dma_free_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
htt->rx_ring.alloc_idx.vaddr,
htt->rx_ring.alloc_idx.paddr);
+ htt->rx_ring.alloc_idx.vaddr = NULL;
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
}
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
@@ -846,8 +850,10 @@ err_dma_idx:
ath10k_htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
err_netbuf:
return -ENOMEM;
}
@@ -2496,7 +2502,7 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
- * same limitiation here as well.
+ * same limitation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a19b0795c86d..bd603feb7953 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1112,7 +1112,7 @@ int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
int len = 0;
int ret;
- /* Response IDs are echo-ed back only for host driver convienence
+ /* Response IDs are echo-ed back only for host driver convenience
* purposes. They aren't used for anything in the driver yet so use 0.
*/
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index e52e41a70321..6d32b43a4da6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -84,7 +84,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ /* Note: qca99x0 supports up to 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
* It is not really necessary to assign address for newly supported
* CEs in this address table.
@@ -120,7 +120,7 @@ const struct ath10k_hw_regs qca4019_regs = {
.ce5_base_address = 0x0004b400,
.ce6_base_address = 0x0004b800,
.ce7_base_address = 0x0004bc00,
- /* qca4019 supports upto 12 copy engines. Since base address
+ /* qca4019 supports up to 12 copy engines. Since base address
* of ce8 to ce11 are not directly referred in the code,
* no need have them in separate members in this table.
* Copy Engine Address
@@ -924,7 +924,7 @@ static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
ath10k_hif_write32(ar, address, msb);
}
-/* 1. Write to memory region of target, such as IRAM adn DRAM.
+/* 1. Write to memory region of target, such as IRAM and DRAM.
* 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
* can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
* 3. In order to access the region other than the above,
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 93acf0dd580a..1b99f3a39a11 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -635,6 +635,8 @@ struct ath10k_hw_params {
bool dynamic_sar_support;
bool hw_restart_disconnect;
+
+ bool use_fw_tx_credits;
};
struct htt_resp;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 9dd3b8fba4b0..ec8d5b29bc72 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -864,11 +864,36 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
- int peer_id;
- int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -880,25 +905,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
- for_each_set_bit(peer_id, peer->peer_ids,
- ATH10K_MAX_NUM_PEER_IDS) {
- ar->peer_map[peer_id] = NULL;
- }
-
- /* Double check that peer is properly un-referenced from
- * the peer_map
- */
- for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
- if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
- peer->addr, peer, i);
- ar->peer_map[i] = NULL;
- }
- }
-
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
spin_unlock_bh(&ar->data_lock);
}
@@ -4044,7 +4051,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
ath10k_tx_h_seq_no(vif, skb);
break;
case ATH10K_HW_TXRX_ETHERNET:
- /* Convert 802.11->802.3 header only if the frame was erlier
+ /* Convert 802.11->802.3 header only if the frame was earlier
* encapsulated to 802.11 by mac80211. Otherwise pass it as is.
*/
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
@@ -7621,10 +7628,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* Clean up the peer object as well since we
* must have failed to do this above.
*/
- list_del(&peer->list);
- ar->peer_map[i] = NULL;
- kfree(peer);
- ar->num_peers--;
+ ath10k_peer_map_cleanup(ar, peer);
}
}
spin_unlock_bh(&ar->data_lock);
@@ -8093,7 +8097,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* TODO: Implement this function properly
* For now it is needed to reply to Probe Requests in IBSS mode.
- * Propably we need this information from FW.
+ * Probably we need this information from FW.
*/
static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
{
@@ -8516,7 +8520,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth,
sta->deflink.rx_nss,
- sta->smps_mode);
+ sta->deflink.smps_mode);
if (changed & IEEE80211_RC_BW_CHANGED) {
bw = WMI_PEER_CHWIDTH_20MHZ;
@@ -8550,7 +8554,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -8563,7 +8567,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
break;
case IEEE80211_SMPS_NUM_MODES:
ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -9682,7 +9686,7 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
},
};
-/* FIXME: This is not thouroughly tested. These combinations may over- or
+/* FIXME: This is not thoroughly tested. These combinations may over- or
* underestimate hw/fw capabilities.
*/
static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
@@ -9922,7 +9926,7 @@ int ath10k_mac_register(struct ath10k *ar)
WLAN_CIPHER_SUITE_BIP_GMAC_128,
WLAN_CIPHER_SUITE_BIP_GMAC_256,
- /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
+ /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
* and CCMP-256 in hardware.
*/
WLAN_CIPHER_SUITE_GCMP,
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bf1c938be7d0..e56c6a6b1379 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1244,7 +1244,7 @@ static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
unsigned int nbytes, max_nbytes, nentries;
int orig_len;
- /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ /* No need to acquire ce_lock for CE5, since this is the only place CE5
* is processed other than init and deinit. Before releasing CE5
* buffers, interrupts are disabled. Thus CE5 access is serialized.
*/
@@ -3215,8 +3215,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index cf64898b9447..480cd97ab739 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -81,7 +81,7 @@ struct ath10k_pci_pipe {
/* Handle of underlying Copy Engine */
struct ath10k_ce_pipe *ce_hdl;
- /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ /* Our pipe number; facilitates use of pipe_info ptrs. */
u8 pipe_num;
/* Convenience back pointer to hif_ce_state. */
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index d7e406916bc8..66cb7a1e628a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -792,7 +792,7 @@ static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
return;
/*
- * HACK: sleep for a while inbetween receiving the msa info response
+ * HACK: sleep for a while between receiving the msa info response
* and the XPU update to prevent SDM845 from crashing due to a security
* violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
*/
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 6ce2a8b1060d..777e53aa69dc 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -448,7 +448,7 @@ struct rx_mpdu_end {
* - 4 bytes for WEP
* - 8 bytes for TKIP, AES
* [padding to 4 bytes]
- * c) A-MSDU subframe header (14 bytes) if appliable
+ * c) A-MSDU subframe header (14 bytes) if applicable
* d) LLC/SNAP (RFC1042, 8 bytes)
*
* In case of A-MSDU only first frame in sequence contains (a) and (b).
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 24283c02a5ef..79e09c7a82b3 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1057,7 +1057,7 @@ static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
out:
/* An optimization to bypass reading the IRQ status registers
- * unecessarily which can re-wake the target, if upper layers
+ * unnecessarily which can re-wake the target, if upper layers
* determine that we are in a low-throughput mode, we can rely on
* taking another interrupt rather than re-checking the status
* registers which can re-wake the target.
@@ -2531,8 +2531,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 5576ad9fd116..cfcb759a87de 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1242,8 +1242,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 36c9a1364253..cefd97323dfe 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -98,7 +98,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index 5fdb020f4da3..1f4de9fbf2b3 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -19,7 +19,7 @@ struct ath10k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
u32 quiet_period;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index ad6471b21796..b0067af685b1 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1014,8 +1014,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
index 34d683e8fc18..48e066ba8162 100644
--- a/drivers/net/wireless/ath/ath10k/usb.h
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -26,7 +26,7 @@
#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
-/* diagnostic command defnitions */
+/* diagnostic command definitions */
#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index b39c9b78b32b..dbb48d70f2e9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1813,7 +1813,7 @@ struct wmi_tlv_pdev_get_temp_cmd {
struct wmi_tlv_pdev_temperature_event {
__le32 tlv_hdr;
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
__le32 pdev_id;
} __packed;
@@ -2548,7 +2548,7 @@ struct nlo_channel_prediction_cfg {
/* Preconfigured stationary threshold.
* Lesser value means more conservative. Bigger value means more aggressive.
- * Maximum is 100 and mininum is 0.
+ * Maximum is 100 and minimum is 0.
*/
__le32 stationary_threshold;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 074d8ba5072a..980d4124fa28 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3555,7 +3555,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
__le32 t;
u32 v, tim_len;
- /* When FW reports 0 in tim_len, ensure atleast first byte
+ /* When FW reports 0 in tim_len, ensure at least first byte
* in tim_bitmap is considered for pvm calculation.
*/
tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4abd12e78028..6de3cc4640a0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3170,7 +3170,7 @@ struct wmi_start_scan_common {
/* dwell time in msec on passive channels */
__le32 dwell_time_passive;
/*
- * min time in msec on the BSS channel,only valid if atleast one
+ * min time in msec on the BSS channel,only valid if at least one
* VDEV is active
*/
__le32 min_rest_time;
@@ -3196,7 +3196,7 @@ struct wmi_start_scan_common {
* and bssid_list
*/
__le32 repeat_probe_time;
- /* time in msec between 2 consequetive probe requests with in a set. */
+ /* time in msec between 2 consecutive probe requests with in a set. */
__le32 probe_spacing_time;
/*
* data inactivity time in msec on bss channel that will be used by
@@ -4397,7 +4397,7 @@ struct wmi_pdev_stats_tx {
/* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
@@ -5240,7 +5240,7 @@ enum wmi_vdev_param {
* scheduler.
*/
WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_VDEV_PARAM_ATIM_WINDOW,
@@ -5372,7 +5372,7 @@ enum wmi_10x_vdev_param {
* scheduler.
*/
WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
- /* enable/dsiable WDS for this VDEV */
+ /* enable/disable WDS for this VDEV */
WMI_10X_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_10X_VDEV_PARAM_ATIM_WINDOW,
@@ -5904,7 +5904,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
/*
- * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
@@ -6947,7 +6947,7 @@ struct wmi_echo_ev_arg {
};
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
__le32 temperature;
} __packed;
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index c47414710138..d34a4d6325b2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -16,6 +16,8 @@
#include "hif.h"
#include <linux/remoteproc.h>
#include "pcic.h"
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
static const struct of_device_id ath11k_ahb_of_match[] = {
/* TODO: Should we change the compatible string to something similar
@@ -359,6 +361,7 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
@@ -406,7 +409,8 @@ static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab)
int timeout;
if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done ||
- ab->hw_params.cold_boot_calib == 0)
+ ab->hw_params.cold_boot_calib == 0 ||
+ ab->hw_params.cbcal_restart_fw == 0)
return 0;
ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n");
@@ -541,7 +545,7 @@ static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
@@ -685,11 +689,90 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
return 0;
}
+static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = enable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device suspended\n");
+
+ return ret;
+}
+
+static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = disable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "ahb device resumed\n");
+
+ return 0;
+}
+
static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
.start = ath11k_ahb_start,
.stop = ath11k_ahb_stop,
.read32 = ath11k_ahb_read32,
.write32 = ath11k_ahb_write32,
+ .read = NULL,
.irq_enable = ath11k_ahb_ext_irq_enable,
.irq_disable = ath11k_ahb_ext_irq_disable,
.map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
@@ -702,6 +785,7 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
+ .read = NULL,
.irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pcic_get_msi_address,
@@ -709,6 +793,10 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
.map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.power_down = ath11k_ahb_power_down,
.power_up = ath11k_ahb_power_up,
+ .suspend = ath11k_ahb_hif_suspend,
+ .resume = ath11k_ahb_hif_resume,
+ .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
+ .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
};
static int ath11k_core_get_rproc(struct ath11k_base *ab)
@@ -783,6 +871,34 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
+ &ab_ahb->smp2p_info.smem_bit);
+ if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
+ ath11k_err(ab, "failed to fetch smem state: %ld\n",
+ PTR_ERR(ab_ahb->smp2p_info.smem_state));
+ return PTR_ERR(ab_ahb->smp2p_info.smem_state);
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return;
+
+ qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
+}
+
static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
{
struct platform_device *pdev = ab->pdev;
@@ -1038,10 +1154,14 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ret = ath11k_hal_srng_init(ab);
+ ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
goto err_fw_deinit;
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_release_smp2p_handle;
+
ret = ath11k_ce_alloc_pipes(ab);
if (ret) {
ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
@@ -1078,6 +1198,9 @@ err_ce_free:
err_hal_srng_deinit:
ath11k_hal_srng_deinit(ab);
+err_release_smp2p_handle:
+ ath11k_ahb_release_smp2p_handle(ab);
+
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
@@ -1088,20 +1211,10 @@ err_core_free:
return ret;
}
-static int ath11k_ahb_remove(struct platform_device *pdev)
+static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
{
- struct ath11k_base *ab = platform_get_drvdata(pdev);
unsigned long left;
- if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
- ath11k_debugfs_soc_destroy(ab);
- ath11k_qmi_deinit_service(ab);
- goto qmi_fail;
- }
-
- reinit_completion(&ab->driver_recovery);
-
if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
left = wait_for_completion_timeout(&ab->driver_recovery,
ATH11K_AHB_RECOVERY_TIMEOUT);
@@ -1111,19 +1224,61 @@ static int ath11k_ahb_remove(struct platform_device *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath11k_ahb_free_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
- ath11k_core_deinit(ab);
-qmi_fail:
ath11k_ahb_free_irq(ab);
ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_ahb_power_down(ab);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ ath11k_ahb_remove_prepare(ab);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_ahb_free_resources(ab);
return 0;
}
+static void ath11k_ahb_shutdown(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ /* platform shutdown() & remove() are mutually exclusive.
+ * remove() is invoked during rmmod & shutdown() during
+ * system reboot/shutdown.
+ */
+ ath11k_ahb_remove_prepare(ab);
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
+ goto free_resources;
+
+ ath11k_core_deinit(ab);
+
+free_resources:
+ ath11k_ahb_free_resources(ab);
+}
+
static struct platform_driver ath11k_ahb_driver = {
.driver = {
.name = "ath11k",
@@ -1131,6 +1286,7 @@ static struct platform_driver ath11k_ahb_driver = {
},
.probe = ath11k_ahb_probe,
.remove = ath11k_ahb_remove,
+ .shutdown = ath11k_ahb_shutdown,
};
static int ath11k_ahb_init(void)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 58a945411c5b..415ddfd26654 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_AHB_H
#define ATH11K_AHB_H
@@ -8,6 +9,16 @@
#include "core.h"
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+
+enum ath11k_ahb_smp2p_msg_id {
+ ATH11K_AHB_POWER_SAVE_ENTER = 1,
+ ATH11K_AHB_POWER_SAVE_EXIT,
+};
+
struct ath11k_base;
struct ath11k_ahb {
@@ -21,6 +32,11 @@ struct ath11k_ahb {
u32 ce_size;
bool use_tz;
} fw;
+ struct {
+ unsigned short seq_no;
+ unsigned int smem_bit;
+ struct qcom_smem_state *smem_state;
+ } smp2p_info;
};
static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index c14c51f38709..f2da95fd4253 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -250,7 +250,7 @@ const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
static bool ath11k_ce_need_shadow_fix(int ce_id)
{
- /* only ce4 needs shadow workaroud*/
+ /* only ce4 needs shadow workaround */
if (ce_id == 4)
return true;
return false;
@@ -1042,7 +1042,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
ret = ath11k_ce_alloc_pipe(ab, i);
if (ret) {
- /* Free any parial successful allocation */
+ /* Free any partial successful allocation */
ath11k_ce_free_pipes(ab);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c3e9e4f7bc24..b99180bc8172 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -70,6 +70,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -81,6 +82,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -106,6 +108,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -141,6 +150,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 16,
.max_fft_bins = 512,
+ .fragment_160mhz = true,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -152,6 +162,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = true,
+ .cbcal_restart_fw = true,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -177,6 +188,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qca6390 hw2.0",
@@ -212,6 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -222,6 +241,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -247,6 +267,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0171ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "qcn9074 hw1.0",
@@ -281,6 +311,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 16,
.fft_hdr_len = 24,
.max_fft_bins = 1024,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -292,6 +323,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = false,
.supports_sta_ps = false,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 2,
.num_vdevs = 8,
.num_peers = 128,
@@ -317,6 +349,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.0",
@@ -352,6 +391,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -362,6 +402,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -387,6 +428,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6855 hw2.1",
@@ -422,6 +473,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -431,6 +483,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.idle_ps = true,
.supports_sta_ps = true,
.cold_boot_calib = false,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -456,6 +509,16 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hybrid_bus_type = false,
.fixed_fw_mem = false,
.support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
},
{
.name = "wcn6750 hw1.0",
@@ -468,7 +531,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.max_radios = 1,
.bdf_addr = 0x4B0C0000,
.hw_ops = &wcn6750_ops,
- .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .ring_mask = &ath11k_hw_ring_mask_wcn6750,
.internal_sleep_clock = false,
.regs = &wcn6750_regs,
.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
@@ -491,6 +554,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.summary_pad_sz = 0,
.fft_hdr_len = 0,
.max_fft_bins = 0,
+ .fragment_160mhz = false,
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
@@ -499,7 +563,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_shadow_regs = true,
.idle_ps = true,
.supports_sta_ps = true,
- .cold_boot_calib = false,
+ .cold_boot_calib = true,
+ .cbcal_restart_fw = false,
.fw_mem_mode = 0,
.num_vdevs = 16 + 1,
.num_peers = 512,
@@ -508,8 +573,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_regdb = true,
.fix_l1ss = false,
.credit_flow = true,
- .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
- .hal_params = &ath11k_hw_hal_params_qca6390,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_wcn6750,
.supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false,
.supports_rssi_stats = true,
@@ -524,7 +589,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.static_window_map = true,
.hybrid_bus_type = true,
.fixed_fw_mem = true,
- .support_off_channel_tx = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = false,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+ .smp2p_wow_exit = true,
},
};
@@ -535,6 +607,52 @@ static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base
return &ab->pdevs[0];
}
+void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_init(struct ath11k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+{
+ ath11k_fw_stats_pdevs_free(&stats->pdevs);
+ ath11k_fw_stats_vdevs_free(&stats->vdevs);
+ ath11k_fw_stats_bcn_free(&stats->bcn);
+}
+
int ath11k_core_suspend(struct ath11k_base *ab)
{
int ret;
@@ -1544,7 +1662,7 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
complete(&ar->scan.started);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
@@ -1563,6 +1681,8 @@ static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq);
+
+ reinit_completion(&ab->driver_recovery);
}
static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index afad8f55e433..cf2f52cc4e30 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -498,6 +498,13 @@ struct ath11k_sta {
bool use_4addr_set;
u16 tcl_metadata;
+
+ /* Protected with ar->data_lock */
+ enum ath11k_wmi_peer_ps_state peer_ps_state;
+ u64 ps_start_time;
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
};
#define ATH11K_MIN_5G_FREQ 4150
@@ -545,9 +552,6 @@ struct ath11k_debug {
struct dentry *debugfs_pdev;
struct ath11k_dbg_htt_stats htt_stats;
u32 extd_tx_stats;
- struct ath11k_fw_stats fw_stats;
- struct completion fw_stats_complete;
- bool fw_stats_done;
u32 extd_rx_stats;
u32 pktlog_filter;
u32 pktlog_mode;
@@ -710,6 +714,13 @@ struct ath11k {
u8 twt_enabled;
bool nlo_enabled;
u8 alpha2[REG_ALPHA2_LEN + 1];
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ /* protected by conf_mutex */
+ bool ps_state_enable;
+ bool ps_timekeeper_enable;
};
struct ath11k_band_cap {
@@ -887,7 +898,7 @@ struct ath11k_base {
/* Below regd's are protected by ab->data_lock */
/* This is the regd set for every radio
- * by the firmware during initializatin
+ * by the firmware during initialization
*/
struct ieee80211_regdomain *default_regd[MAX_RADIOS];
/* This regd is set during dynamic country setting
@@ -1112,6 +1123,12 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
+void ath11k_fw_stats_init(struct ath11k *ar);
+void ath11k_fw_stats_pdevs_free(struct list_head *head);
+void ath11k_fw_stats_vdevs_free(struct list_head *head);
+void ath11k_fw_stats_bcn_free(struct list_head *head);
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
+
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 9648e0017393..ccdf3d5ba1ab 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -14,6 +14,7 @@
#include "dp_tx.h"
#include "debugfs_htt_stats.h"
#include "peer.h"
+#include "hif.h"
static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
"REO2SW1_RING",
@@ -91,91 +92,35 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_fw_stats_pdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_pdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_vdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_vdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_bcn_free(struct list_head *head)
-{
- struct ath11k_fw_stats_bcn *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
{
spin_lock_bh(&ar->data_lock);
- ar->debug.fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
spin_unlock_bh(&ar->data_lock);
}
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
- struct ath11k_fw_stats stats = {};
- struct ath11k *ar;
+ struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev;
bool is_end;
static unsigned int num_vdev, num_bcn;
size_t total_vdevs_started = 0;
- int i, ret;
-
- INIT_LIST_HEAD(&stats.pdevs);
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
-
- ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
- if (ret) {
- ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
- goto free;
- }
-
- rcu_read_lock();
- ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
- if (!ar) {
- rcu_read_unlock();
- ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
- stats.pdev_id, ret);
- goto free;
- }
+ int i;
- spin_lock_bh(&ar->data_lock);
+ /* WMI_REQUEST_PDEV_STAT request has been already processed */
- if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
- ar->debug.fw_stats_done = true;
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->debug.fw_stats_done = true;
- goto complete;
+ if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ ar->fw_stats_done = true;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats.vdevs)) {
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
ath11k_warn(ab, "empty vdev stats");
- goto complete;
+ return;
}
/* FW sends all the active VDEV stats irrespective of PDEV,
* hence limit until the count of all VDEVs started
@@ -188,43 +133,34 @@ void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb
is_end = ((++num_vdev) == total_vdevs_started);
- list_splice_tail_init(&stats.vdevs,
- &ar->debug.fw_stats.vdevs);
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_vdev = 0;
}
- goto complete;
+ return;
}
- if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats.bcn)) {
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
- goto complete;
+ return;
}
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
is_end = ((++num_bcn) == ar->num_started_vdevs);
- list_splice_tail_init(&stats.bcn,
- &ar->debug.fw_stats.bcn);
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
if (is_end) {
- ar->debug.fw_stats_done = true;
+ ar->fw_stats_done = true;
num_bcn = 0;
}
}
-complete:
- complete(&ar->debug.fw_stats_complete);
- rcu_read_unlock();
- spin_unlock_bh(&ar->data_lock);
-
-free:
- ath11k_fw_stats_pdevs_free(&stats.pdevs);
- ath11k_fw_stats_vdevs_free(&stats.vdevs);
- ath11k_fw_stats_bcn_free(&stats.bcn);
}
static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
@@ -245,7 +181,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
ath11k_debugfs_fw_stats_reset(ar);
- reinit_completion(&ar->debug.fw_stats_complete);
+ reinit_completion(&ar->fw_stats_complete);
ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
@@ -255,9 +191,8 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
return ret;
}
- time_left =
- wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1 * HZ);
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
if (!time_left)
return -ETIMEDOUT;
@@ -266,7 +201,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
break;
spin_lock_bh(&ar->data_lock);
- if (ar->debug.fw_stats_done) {
+ if (ar->fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
@@ -338,8 +273,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -410,8 +344,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
goto err_free;
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
file->private_data = buf;
@@ -488,14 +421,13 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
}
}
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
/* since beacon stats request is looped for all active VDEVs, saved fw
* stats is not freed for each request until done for all active VDEVs
*/
spin_lock_bh(&ar->data_lock);
- ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
spin_unlock_bh(&ar->data_lock);
file->private_data = buf;
@@ -982,6 +914,63 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
+static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
+{
+ struct ath11k_base *ab = inode->i_private;
+ u8 *buf;
+ u32 start, end;
+ int ret;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+
+ buf = vmalloc(end - start + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath11k_hif_read(ab, buf, start, end);
+ if (ret) {
+ ath11k_warn(ab, "failed to dump sram: %d\n", ret);
+ vfree(buf);
+ return ret;
+ }
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t ath11k_read_sram_dump(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->f_inode->i_private;
+ const char *buf = file->private_data;
+ int len;
+ u32 start, end;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+ len = end - start + 1;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops_sram_dump = {
+ .open = ath11k_open_sram_dump,
+ .read = ath11k_read_sram_dump,
+ .release = ath11k_release_sram_dump,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
{
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
@@ -997,6 +986,10 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
+ if (ab->hw_params.sram_dump.start != 0)
+ debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
+ &fops_sram_dump);
+
return 0;
}
@@ -1025,7 +1018,7 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
ar->debug.debugfs_pdev);
- ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+ ar->fw_stats.debugfs_fwstats = fwstats_dir;
/* all stats debugfs files created are under "fw_stats" directory
* created per PDEV
@@ -1036,12 +1029,6 @@ void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
&fops_vdev_stats);
debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
&fops_bcn_stats);
-
- INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
-
- init_completion(&ar->debug.fw_stats_complete);
}
static ssize_t ath11k_write_pktlog_filter(struct file *file,
@@ -1382,6 +1369,193 @@ static const struct file_operations fops_dbr_debug = {
.llseek = default_llseek,
};
+static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ ssize_t ret;
+ u8 ps_timekeeper_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_timekeeper_enable = {
+ .read = ath11k_read_ps_timekeeper_enable,
+ .write = ath11k_write_ps_timekeeper_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_reset_peer_ps_duration(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_reset_ps_duration(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+ u8 reset_ps_duration;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_reset_peer_ps_duration,
+ ar);
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_ps_duration = {
+ .write = ath11k_write_reset_ps_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ arsta->ps_start_time = 0;
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ps_state_enable = !!ps_state_enable;
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
+ ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable) {
+ ar->ps_timekeeper_enable = false;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_peer_ps_state_disable,
+ ar);
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath11k_read_ps_state_enable,
+ .write = ath11k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath11k_debugfs_register(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -1428,6 +1602,20 @@ int ath11k_debugfs_register(struct ath11k *ar)
debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
ar, &fops_dbr_debug);
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
+ &fops_ps_state_enable);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("ps_timekeeper_enable", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_ps_timekeeper_enable);
+
+ debugfs_create_file("reset_ps_duration", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_reset_ps_duration);
+ }
+
return 0;
}
@@ -1456,11 +1644,13 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_add_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
u8 buf[128] = {0};
int ret;
- if (arvif->ar->twt_enabled == 0) {
- ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
@@ -1490,13 +1680,38 @@ static ssize_t ath11k_write_twt_add_dialog(struct file *file,
if (ret != 16)
return -EINVAL;
+ /* In the case of station vif, TWT is entirely handled by
+ * the firmware based on the input parameters in the TWT enable
+ * WMI command that is sent to the target during assoc.
+ * For manually testing the TWT feature, we need to first disable
+ * TWT and send enable command again with TWT input parameter
+ * sta_cong_timer_ms set to 0.
+ */
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ twt_params.sta_cong_timer_ms = 0;
+
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
params.vdev_id = arvif->vdev_id;
ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
if (ret)
- return ret;
+ goto err_twt_add_dialog;
return count;
+
+err_twt_add_dialog:
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return ret;
}
static ssize_t ath11k_write_twt_del_dialog(struct file *file,
@@ -1505,11 +1720,13 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
{
struct ath11k_vif *arvif = file->private_data;
struct wmi_twt_del_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
u8 buf[64] = {0};
int ret;
- if (arvif->ar->twt_enabled == 0) {
- ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
return -EOPNOTSUPP;
}
@@ -1535,6 +1752,12 @@ static ssize_t ath11k_write_twt_del_dialog(struct file *file,
if (ret)
return ret;
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
return count;
}
@@ -1638,36 +1861,35 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
-int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
- if (arvif->vif->type == NL80211_IFTYPE_AP && !arvif->debugfs_twt) {
- arvif->debugfs_twt = debugfs_create_dir("twt",
- arvif->vif->debugfs_dir);
- if (!arvif->debugfs_twt || IS_ERR(arvif->debugfs_twt)) {
- ath11k_warn(arvif->ar->ab,
- "failed to create directory %p\n",
- arvif->debugfs_twt);
- arvif->debugfs_twt = NULL;
- return -1;
- }
+ struct ath11k_base *ab = arvif->ar->ab;
- debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_add_dialog);
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
- debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_del_dialog);
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
- debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_pause_dialog);
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
- debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
- arvif, &ath11k_fops_twt_resume_dialog);
- }
- return 0;
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
+ if (!arvif->debugfs_twt)
+ return;
+
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index 30c00cb28311..3af0169f6cf2 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -269,7 +269,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
int ath11k_debugfs_register(struct ath11k *ar);
void ath11k_debugfs_unregister(struct ath11k *ar);
-void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
@@ -306,7 +306,7 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
-int ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
@@ -341,8 +341,8 @@ static inline void ath11k_debugfs_unregister(struct ath11k *ar)
{
}
-static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
+ struct ath11k_fw_stats *stats)
{
}
@@ -386,9 +386,8 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
-static inline int ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
- return 0;
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 5d722b51b125..2b97cbbd28cb 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -630,7 +630,7 @@ struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
* completing the burst, we identify the txop used in the burst and
* incr the corresponding bin.
* Each bin represents 1ms & we have 10 bins in this histogram.
- * they are deined in FW using the following macros
+ * they are defined in FW using the following macros
* #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
* #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
*/
@@ -1897,7 +1897,7 @@ struct htt_phy_counters_tlv {
u32 phytx_abort_cnt;
/* number of times rx abort initiated by phy */
u32 phyrx_abort_cnt;
- /* number of rx defered count initiated by phy */
+ /* number of rx deferred count initiated by phy */
u32 phyrx_defer_abort_cnt;
/* number of sizing events generated at LSTF */
u32 rx_gain_adj_lstf_event_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 1b1acbdf837a..9cc4ef28e751 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -751,6 +751,102 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
+static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u64 time_since_station_in_power_save;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ time_since_station_in_power_save = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies);
+ else
+ time_since_station_in_power_save = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n",
+ time_since_station_in_power_save);
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_current_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_current_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ u64 power_save_duration;
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ power_save_duration = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies)
+ + arsta->ps_total_duration;
+ else
+ power_save_duration = arsta->ps_total_duration;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_total_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_total_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -778,4 +874,15 @@ void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vi
ar->ab->wmi_ab.svc_map))
debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
&fops_htt_peer_stats_reset);
+
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("current_ps_duration", 0440, dir, sta,
+ &fops_current_ps_duration);
+ debugfs_create_file("total_ps_duration", 0440, dir, sta,
+ &fops_total_ps_duration);
+ }
}
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8b790ce72e5d..f5156a7fbdd7 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -131,13 +132,11 @@ static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
switch (type) {
case HAL_WBM2SW_RELEASE:
- if (ring_num < 3) {
- grp_mask = &ab->hw_params.ring_mask->tx[0];
- } else if (ring_num == 3) {
+ if (ring_num == DP_RX_RELEASE_RING_NUM) {
grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
- return -ENOENT;
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
}
break;
case HAL_REO_EXCEPTION:
@@ -371,6 +370,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
+ u8 tcl_num, wbm_num;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -396,9 +396,12 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+ wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
- HAL_TCL_DATA, i, 0,
- DP_TCL_DATA_RING_SIZE);
+ HAL_TCL_DATA, tcl_num, 0,
+ ab->hw_params.tx_ring_size);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
i, ret);
@@ -406,7 +409,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
- HAL_WBM2SW_RELEASE, i, 0,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
DP_TX_COMP_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
@@ -431,7 +434,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
- 3, 0, DP_RX_RELEASE_RING_SIZE);
+ DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
if (ret) {
ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
goto err;
@@ -774,9 +777,10 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int i, j;
int tot_work_done = 0;
- if (ab->hw_params.ring_mask->tx[grp_id]) {
- i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
- ath11k_dp_tx_completion_handler(ab, i);
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
+ ab->hw_params.ring_mask->tx[grp_id])
+ ath11k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params.ring_mask->rx_err[grp_id]) {
@@ -963,7 +967,7 @@ static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
/* When v2_map_support is true:for STA mode, enable address
* search index, tcl uses ast_hash value in the descriptor.
- * When v2_map_support is false: for STA mode, dont' enable
+ * When v2_map_support is false: for STA mode, don't enable
* address search index.
*/
switch (arvif->vdev_type) {
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index e9dfa209098b..be9eafc872b3 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -203,6 +204,7 @@ struct ath11k_pdev_dp {
#define DP_WBM_RELEASE_RING_SIZE 64
#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
#define DP_TX_COMP_RING_SIZE 32768
#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
#define DP_TCL_CMD_RING_SIZE 32
@@ -222,6 +224,8 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+#define DP_RX_RELEASE_RING_NUM 3
+
#define DP_RX_BUFFER_SIZE 2048
#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
@@ -299,7 +303,7 @@ struct ath11k_dp {
#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-/* HTT tx completion is overlayed in wbm_release_ring */
+/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
@@ -466,7 +470,7 @@ enum htt_srng_ring_id {
* 3'b010: 4 usec
* 3'b011: 8 usec (default)
* 3'b100: 16 usec
- * Others: Reserverd
+ * Others: Reserved
* b'19 - response_required:
* Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
* b'20:31 - reserved: reserved for future use
@@ -993,8 +997,7 @@ struct htt_rx_ring_tlv_filter {
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
-/**
- * Enumeration for full monitor mode destination ring select
+/* Enumeration for full monitor mode destination ring select
* 0 - REO destination ring select
* 1 - FW destination ring select
* 2 - SW destination ring select
@@ -1391,8 +1394,7 @@ struct htt_ppdu_stats_info {
struct list_head list;
};
-/**
- * @brief target -> host packet log message
+/* @brief target -> host packet log message
*
* @details
* The following field definitions describe the format of the packet log
@@ -1430,8 +1432,7 @@ struct htt_pktlog_msg {
u8 payload[];
};
-/**
- * @brief host -> target FW extended statistics retrieve
+/* @brief host -> target FW extended statistics retrieve
*
* @details
* The following field definitions describe the format of the HTT host
@@ -1566,8 +1567,7 @@ struct htt_ext_stats_cfg_params {
u32 cfg3;
};
-/**
- * @brief target -> host extended statistics upload
+/* @brief target -> host extended statistics upload
*
* @details
* The following field definitions describe the format of the HTT target
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 2148acf37071..c5a4c34d7749 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2499,7 +2499,7 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
/* PN for multicast packets are not validate in HW,
* so skip 802.3 rx path
- * Also, fast_rx expectes the STA to be authorized, hence
+ * Also, fast_rx expects the STA to be authorized, hence
* eapol packets are sent in slow path.
*/
if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
@@ -5197,7 +5197,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
- memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c17a2620aad7..8afbba236935 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -93,7 +94,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
u8 pool_id;
u8 hal_ring_id;
int ret;
- u8 ring_selector = 0, ring_map = 0;
+ u32 ring_selector = 0;
+ u8 ring_map = 0;
bool tcl_ring_retry;
if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
@@ -105,19 +107,13 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
- /* Let the default ring selection be based on current processor
- * number, where one of the 3 tcl rings are selected based on
- * the smp_processor_id(). In case that ring
- * is full/busy, we resort to other available rings.
- * If all rings are full, we drop the packet.
- * //TODO Add throttling logic when all rings are full
- */
- ring_selector = smp_processor_id();
+ ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
tcl_ring_sel:
tcl_ring_retry = false;
ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
ring_map |= BIT(ti.ring_id);
@@ -129,7 +125,8 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (unlikely(ret < 0)) {
- if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ !ab->hw_params.tcl_ring_retry) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
@@ -247,7 +244,7 @@ tcl_ring_sel:
* Restart ring selection if some rings are not checked yet.
*/
if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
- ab->hw_params.max_tx_ring > 1) {
+ ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -755,7 +752,7 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
return 0;
/* Can this be optimized so that we keep the pending command list only
- * for tid delete command to free up the resoruce on the command status
+ * for tid delete command to free up the resource on the command status
* indication?
*/
dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index bda71ab5a1f2..2fd224480d45 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -126,7 +126,7 @@ static const struct hal_srng_config hw_srng_config_template[] = {
},
{ /* WBM2SW_RELEASE */
.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
- .max_rings = 4,
+ .max_rings = 5,
.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
@@ -1164,7 +1164,7 @@ void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
{
lockdep_assert_held(&srng->lock);
- /* check whether the ring is emptry. Update the shadow
+ /* check whether the ring is empty. Update the shadow
* HP only when then ring isn't empty.
*/
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 110c337ddf33..6a1f78ee6eb6 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -243,7 +243,7 @@ struct ath11k_base;
#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
-/* TCL ring feild mask and offset */
+/* TCL ring field mask and offset */
#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
@@ -268,7 +268,7 @@ struct ath11k_base;
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
-/* REO ring feild mask and offset */
+/* REO ring field mask and offset */
#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
@@ -389,6 +389,7 @@ enum hal_srng_ring_id {
HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
HAL_SRNG_RING_ID_UMAC_ID_END = 127,
HAL_SRNG_RING_ID_LMAC1_ID_START,
@@ -450,13 +451,13 @@ enum hal_ring_type {
/**
* enum hal_reo_cmd_type: Enum for REO command type
- * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
- * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
- * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
- * @CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
* earlier with a 'REO_FLUSH_CACHE' command
- * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
- * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
*/
enum hal_reo_cmd_type {
HAL_REO_CMD_GET_QUEUE_STATS = 0,
@@ -635,7 +636,7 @@ struct hal_srng {
} u;
};
-/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
@@ -678,6 +679,7 @@ enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_SW1_BM,
HAL_RX_BUF_RBM_SW2_BM,
HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
};
#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
@@ -873,8 +875,7 @@ struct hal_reo_status {
} u;
};
-/**
- * HAL context to be used to access SRNG APIs (currently used by data path
+/* HAL context to be used to access SRNG APIs (currently used by data path
* and transport (CE) modules)
*/
struct ath11k_hal {
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index 24e72e75a8c7..d895ea878d9f 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -607,7 +607,7 @@ struct rx_msdu_desc {
*
* msdu_continuation
* When set, this MSDU buffer was not able to hold the entire MSDU.
- * The next buffer will therefor contain additional information
+ * The next buffer will therefore contain additional information
* related to this MSDU.
*
* msdu_length
@@ -643,7 +643,7 @@ struct rx_msdu_desc {
*
* da_idx_timeout
* Indicates, an unsuccessful MAC destination address search due
- * to the expiration of search timer fot this MSDU.
+ * to the expiration of search timer for this MSDU.
*/
enum hal_reo_dest_ring_buffer_type {
@@ -1678,7 +1678,7 @@ struct hal_wbm_release_ring {
* Producer: SW/TQM/RXDMA/REO/SWITCH
* Consumer: WBM/SW/FW
*
- * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
* for software based completions.
*
* buf_addr_info
@@ -2159,7 +2159,7 @@ struct hal_reo_status_hdr {
* commands.
*
* execution_time (in us)
- * The amount of time REO took to excecute the command. Note that
+ * The amount of time REO took to execute the command. Note that
* this time does not include the duration of the command waiting
* in the command ring, before the execution started.
*
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index c8929de8ce6c..d1b0e36e04a9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hal_desc.h"
@@ -44,8 +45,7 @@ void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
tcl_cmd->buf_addr_info.info1 |=
- FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR,
- (ti->ring_id + HAL_RX_BUF_RBM_SW0_BM)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
tcl_cmd->info0 =
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index 36f4f6f6cbc2..c5e88364afe5 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -35,6 +36,7 @@ struct hal_tx_info {
u8 lmac_id;
u8 dscp_tid_tbl_idx;
bool enable_mesh;
+ u8 rbm_id;
};
/* TODO: Check if the actual desc macros can be used instead */
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index e9366f786fbb..659b80d2abd4 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -11,6 +11,7 @@
struct ath11k_hif_ops {
u32 (*read32)(struct ath11k_base *sc, u32 address);
void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
+ int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
void (*irq_enable)(struct ath11k_base *sc);
void (*irq_disable)(struct ath11k_base *sc);
int (*start)(struct ath11k_base *sc);
@@ -99,6 +100,15 @@ static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 d
sc->hif.ops->write32(sc, address, data);
}
+static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
+ u32 start, u32 end)
+{
+ if (!ab->hif.ops->read)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->read(ab, buf, start, end);
+}
+
static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
@@ -134,4 +144,5 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
else
*msi_data_idx = ce_id;
}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 96db85c55585..dbcc0c4035b6 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -820,6 +820,30 @@ static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
__le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
}
+static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ *
+ * TODO: Add throttling logic when all rings are full
+ */
+ return smp_processor_id();
+}
+
+static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Select the TCL ring based on the flow hash of the SKB instead
+ * of CPU ID. Since applications pumping the traffic can be scheduled
+ * on multiple CPUs, there is a chance that packets of the same flow
+ * could end on different TCL rings, this could sometimes results in
+ * an out of order arrival of the packets at the receiver.
+ */
+ return skb_get_hash(skb);
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -857,6 +881,7 @@ const struct ath11k_hw_ops ipq8074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -896,6 +921,7 @@ const struct ath11k_hw_ops ipq6018_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -935,6 +961,7 @@ const struct ath11k_hw_ops qca6390_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -974,6 +1001,7 @@ const struct ath11k_hw_ops qcn9074_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -1013,6 +1041,7 @@ const struct ath11k_hw_ops wcn6855_ops = {
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
};
const struct ath11k_hw_ops wcn6750_ops = {
@@ -1052,11 +1081,14 @@ const struct ath11k_hw_ops wcn6750_ops = {
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
.rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
.rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
-#define ATH11K_TX_RING_MASK_0 0x1
-#define ATH11K_TX_RING_MASK_1 0x2
-#define ATH11K_TX_RING_MASK_2 0x4
+#define ATH11K_TX_RING_MASK_0 BIT(0)
+#define ATH11K_TX_RING_MASK_1 BIT(1)
+#define ATH11K_TX_RING_MASK_2 BIT(2)
+#define ATH11K_TX_RING_MASK_3 BIT(3)
+#define ATH11K_TX_RING_MASK_4 BIT(4)
#define ATH11K_RX_RING_MASK_0 0x1
#define ATH11K_RX_RING_MASK_1 0x2
@@ -1903,6 +1935,43 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
},
};
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ 0,
+ ATH11K_TX_RING_MASK_2,
+ 0,
+ ATH11K_TX_RING_MASK_4,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -2332,12 +2401,55 @@ const struct ath11k_hw_regs wcn6750_regs = {
.hal_reo1_misc_ctl = 0x000005d8,
};
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
};
const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
};
static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index bb5ac940e470..8a3f24862edc 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -122,8 +122,15 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
+struct ath11k_hw_tcl2wbm_rbm_map {
+ u8 tcl_ring_num;
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
struct ath11k_hw_hal_params {
enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
};
struct ath11k_hw_params {
@@ -166,6 +173,7 @@ struct ath11k_hw_params {
u8 summary_pad_sz;
u8 fft_hdr_len;
u16 max_fft_bins;
+ bool fragment_160mhz;
} spectral;
u16 interface_modes;
@@ -175,6 +183,7 @@ struct ath11k_hw_params {
bool idle_ps;
bool supports_sta_ps;
bool cold_boot_calib;
+ bool cbcal_restart_fw;
int fw_mem_mode;
u32 num_vdevs;
u32 num_peers;
@@ -200,6 +209,16 @@ struct ath11k_hw_params {
bool hybrid_bus_type;
bool fixed_fw_mem;
bool support_off_channel_tx;
+ bool supports_multi_bssid;
+
+ struct {
+ u32 start;
+ u32 end;
+ } sram_dump;
+
+ bool tcl_ring_retry;
+ u32 tx_ring_size;
+ bool smp2p_wow_exit;
};
struct ath11k_hw_ops {
@@ -242,6 +261,7 @@ struct ath11k_hw_ops {
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ u32 (*get_ring_selector)(struct sk_buff *skb);
};
extern const struct ath11k_hw_ops ipq8074_ops;
@@ -254,9 +274,11 @@ extern const struct ath11k_hw_ops wcn6750_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
@@ -397,4 +419,5 @@ static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
}
extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 7e91e347c9ff..84d956ad4093 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3059,7 +3059,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for SRG */
+ /* Enable all partial BSSID mask for SRG */
ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3077,7 +3077,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return ret;
}
- /* Enable all patial BSSID mask for non-SRG */
+ /* Enable all partial BSSID mask for non-SRG */
ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
if (ret) {
ath11k_warn(ar->ab,
@@ -3350,10 +3350,15 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath11k_recalculate_mgmt_rate(ar, vif, &def);
if (changed & BSS_CHANGED_TWT) {
- if (info->twt_requester || info->twt_responder)
- ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
- else
+ struct wmi_twt_enable_params twt_params = {0};
+
+ if (info->twt_requester || info->twt_responder) {
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
+ &twt_params);
+ } else {
ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
}
if (changed & BSS_CHANGED_HE_OBSS_PD)
@@ -3451,7 +3456,7 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
cancel_delayed_work(&ar->scan.timeout);
- complete(&ar->scan.completed);
+ complete_all(&ar->scan.completed);
break;
}
}
@@ -4524,6 +4529,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
@@ -4701,7 +4707,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->deflink.bandwidth,
sta->deflink.rx_nss,
- sta->smps_mode);
+ sta->deflink.smps_mode);
spin_lock_bh(&ar->data_lock);
@@ -4737,7 +4743,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
if (changed & IEEE80211_RC_SMPS_CHANGED) {
smps = WMI_PEER_SMPS_PS_NONE;
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
smps = WMI_PEER_SMPS_PS_NONE;
@@ -4750,7 +4756,7 @@ static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
break;
default:
ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
- sta->smps_mode, sta->addr);
+ sta->deflink.smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
}
@@ -4954,6 +4960,8 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -4994,7 +5002,7 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
{
bool subfer, subfee;
- int sound_dim = 0;
+ int sound_dim = 0, nsts = 0;
subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
@@ -5004,6 +5012,11 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
subfer = false;
}
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
/* If SU Beaformer is not set, then disable MU Beamformer Capability */
if (!subfer)
*vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
@@ -5016,7 +5029,9 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
*vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
- /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
/* Enable Sounding Dimension Field only if SU BF is enabled */
if (subfer) {
@@ -5028,9 +5043,15 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
*vht_cap |= sound_dim;
}
- /* Use the STS advertised by FW unless SU Beamformee is not supported*/
- if (!subfee)
- *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
}
static struct ieee80211_sta_vht_cap
@@ -6173,6 +6194,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ /* In the case of hardware recovery, debugfs files are
+ * not deleted since ieee80211_ops.remove_interface() is
+ * not invoked. In such cases, try to delete the files.
+ * These will be re-created later.
+ */
+ ath11k_debugfs_remove_interface(arvif);
+
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@@ -6354,9 +6382,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
}
- ret = ath11k_debugfs_add_interface(arvif);
- if (ret)
- goto err_peer_del;
+ ath11k_debugfs_add_interface(arvif);
mutex_unlock(&ar->conf_mutex);
@@ -8421,6 +8447,95 @@ exit:
return ret;
}
+static int ath11k_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param = {0};
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ mutex_unlock(&ar->conf_mutex);
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.start = ath11k_mac_op_start,
@@ -8471,6 +8586,7 @@ static const struct ieee80211_ops ath11k_ops = {
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = ath11k_mac_op_ipv6_changed,
#endif
+ .get_txpower = ath11k_mac_op_get_txpower,
.set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
.remain_on_channel = ath11k_mac_op_remain_on_channel,
@@ -8777,6 +8893,11 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ if (ab->hw_params.supports_multi_bssid) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
@@ -8967,6 +9088,7 @@ int ath11k_mac_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i;
int ret;
+ u8 mac_addr[ETH_ALEN] = {0};
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
@@ -8979,13 +9101,18 @@ int ath11k_mac_register(struct ath11k_base *ab)
if (ret)
return ret;
+ device_get_mac_address(ab->dev, mac_addr);
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ab->pdevs_macaddr_valid) {
ether_addr_copy(ar->mac_addr, pdev->mac_addr);
} else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ if (is_zero_ether_addr(mac_addr))
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, mac_addr);
ar->mac_addr[4] += i;
}
@@ -9079,6 +9206,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
init_completion(&ar->completed_11d_scan);
+
+ ath11k_fw_stats_init(ar);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index c44df17719f6..86995e8dc913 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -402,8 +402,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ret = ath11k_mhi_get_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to get msi for mhi\n");
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
@@ -412,7 +411,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
if (ret < 0)
- return ret;
+ goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
mhi_ctrl->iova_stop = 0xFFFFFFFF;
@@ -440,18 +439,22 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
default:
ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
ab->hw_rev);
- mhi_free_controller(mhi_ctrl);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_controller;
}
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
- mhi_free_controller(mhi_ctrl);
- return ret;
+ goto free_controller;
}
return 0;
+
+free_controller:
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+ return ret;
}
void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 5bd34a6273d9..99cf3357c66e 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -685,6 +685,7 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.stop = ath11k_pcic_stop,
.read32 = ath11k_pcic_read32,
.write32 = ath11k_pcic_write32,
+ .read = ath11k_pcic_read,
.power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend,
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 1adf20ebef27..380f9d37b644 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -140,55 +140,100 @@ int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ if (offset < ATH11K_PCI_WINDOW_START)
+ iowrite32(value, ab->mem + offset);
+ else
+ ab->pci.ops->window_write32(ab, offset, value);
+}
+
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
int ret = 0;
+ bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START)
- iowrite32(value, ab->mem + offset);
- else
- ab->pci.ops->window_write32(ab, offset, value);
+ __ath11k_pcic_write32(ab, offset, value);
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
- !ret)
+ if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
}
EXPORT_SYMBOL(ath11k_pcic_write32);
+static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val;
+
+ if (offset < ATH11K_PCI_WINDOW_START)
+ val = ioread32(ab->mem + offset);
+ else
+ val = ab->pci.ops->window_read32(ab, offset);
+
+ return val;
+}
+
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
int ret = 0;
u32 val;
+ bool wakeup_required;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
- if (offset < ATH11K_PCI_WINDOW_START)
- val = ioread32(ab->mem + offset);
- else
- val = ab->pci.ops->window_read32(ab, offset);
+ val = __ath11k_pcic_read32(ab, offset);
- if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
- offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
- !ret)
+ if (wakeup_required && !ret && ab->pci.ops->release)
ab->pci.ops->release(ab);
return val;
}
EXPORT_SYMBOL(ath11k_pcic_read32);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
+{
+ int ret = 0;
+ bool wakeup_required;
+ u32 *data = buf;
+ u32 i;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup) {
+ ret = ab->pci.ops->wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup for read from 0x%x: %d\n",
+ start, ret);
+ return ret;
+ }
+ }
+
+ for (i = start; i < end + 1; i += 4)
+ *data++ = __ath11k_pcic_read32(ab, i);
+
+ if (wakeup_required && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_read);
+
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
@@ -414,6 +459,7 @@ void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
+ dev_set_threaded(&irq_grp->napi_ndev, true);
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
@@ -517,7 +563,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
- ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+ ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
@@ -731,3 +777,37 @@ int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
+
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
+
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+ struct ath11k_ce_pipe *ce_pipe;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ synchronize_irq(ab->irq_num[irq_idx]);
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
index 0afbb34510db..ac012e88bf6d 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.h
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -12,6 +12,8 @@
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
+#define ATH11K_PCI_CE_WAKE_IRQ 2
+
#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
@@ -45,4 +47,8 @@ void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
const struct ath11k_pci_ops *pci_ops);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 9e22aaf34b88..1ae7af02c364 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -302,6 +302,21 @@ static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, addr);
+ /* Check if the found peer is what we want to remove.
+ * While the sta is transitioning to another band we may
+ * have 2 peer with the same addr assigned to different
+ * vdev_id. Make sure we are deleting the correct peer.
+ */
+ if (peer && peer->vdev_id == vdev_id)
+ ath11k_peer_rhash_delete(ab, peer);
+
+ /* Fallback to peer list search if the correct peer can't be found.
+ * Skip the deletion of the peer from the rhash since it has already
+ * been deleted in peer add.
+ */
+ if (!peer)
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+
if (!peer) {
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
@@ -312,8 +327,6 @@ static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
return -EINVAL;
}
- ath11k_peer_rhash_delete(ab, peer);
-
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
@@ -372,8 +385,17 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) {
- spin_unlock_bh(&ar->ab->base_lock);
- return -EINVAL;
+ if (peer->vdev_id == param->vdev_id) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ return -EINVAL;
+ }
+
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
}
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 00136601cb7d..51de2208b789 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1696,6 +1696,13 @@ static struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
},
};
+static struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
{
struct qmi_wlanfw_host_cap_req_msg_v01 req;
@@ -1872,7 +1879,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
/* For QCA6390 by default FW requests a block of ~4M contiguous
* DMA memory, it's hard to allocate from OS. So host returns
- * failure to FW and FW will then request mulitple blocks of small
+ * failure to FW and FW will then request multiple blocks of small
* chunk size memory.
*/
if (!(ab->hw_params.fixed_mem_region ||
@@ -3006,6 +3013,12 @@ static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
struct ath11k_base *ab = qmi->ab;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware ready\n");
+
+ if (!ab->qmi.cal_done) {
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ }
+
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
}
@@ -3023,6 +3036,19 @@ static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cold boot calibration done\n");
}
+static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi firmware init done\n");
+}
+
static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
{
.type = QMI_INDICATION,
@@ -3053,6 +3079,14 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
};
static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3145,7 +3179,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
}
break;
- case ATH11K_QMI_EVENT_FW_READY:
+ case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
ath11k_hal_dump_srng_stats(ab);
@@ -3169,6 +3203,22 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
}
break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ /* For targets requiring a FW restart upon cold
+ * boot completion, there is no need to process
+ * FW ready; such targets will receive FW init
+ * done message after FW restart.
+ */
+ if (ab->hw_params.cbcal_restart_fw)
+ break;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ath11k_core_qmi_firmware_ready(ab);
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index c83cf822be81..2ec56a34fa81 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -31,8 +31,9 @@
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
-#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0021
-#define QMI_WLFW_FW_READY_IND_V01 0x0038
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
#define ATH11K_FIRMWARE_MODE_OFF 4
@@ -69,6 +70,7 @@ enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
ATH11K_QMI_EVENT_POWER_UP,
ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_FW_INIT_DONE,
ATH11K_QMI_EVENT_MAX,
};
@@ -291,6 +293,10 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
char placeholder;
};
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 26ecc1bcd9d5..786d5f36f5e5 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -877,7 +877,7 @@ struct rx_msdu_start_wcn6855 {
*
* l4_offset
* Depending upon mode bit, this field either indicates the
- * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * L4 offset in bytes from the start of RX_HEADER (only valid
* if either ipv4_proto or ipv6_proto is set to 1) or indicates
* the offset in bytes to the start of TCP or UDP header from
* the start of the IP header after decapsulation (Only valid if
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 516a7b4cd180..705868198df4 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -30,6 +30,7 @@
#define ATH11K_SPECTRAL_20MHZ 20
#define ATH11K_SPECTRAL_40MHZ 40
#define ATH11K_SPECTRAL_80MHZ 80
+#define ATH11K_SPECTRAL_160MHZ 160
#define ATH11K_SPECTRAL_SIGNATURE 0xFA
@@ -183,6 +184,8 @@ static int ath11k_spectral_scan_trigger(struct ath11k *ar)
if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
return 0;
+ ar->spectral.is_primary = true;
+
ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
@@ -585,6 +588,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
u8 chan_width_mhz, bin_sz;
int ret;
u32 check_length;
+ bool fragment_sample = false;
lockdep_assert_held(&ar->spectral.lock);
@@ -639,6 +643,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
case ATH11K_SPECTRAL_80MHZ:
fft_sample->chan_width_mhz = chan_width_mhz;
break;
+ case ATH11K_SPECTRAL_160MHZ:
+ if (ab->hw_params.spectral.fragment_160mhz) {
+ chan_width_mhz /= 2;
+ fragment_sample = true;
+ }
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
default:
ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
return -EINVAL;
@@ -663,6 +674,17 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
+ /* If freq2 is available then the spectral scan results are fragmented
+ * as primary and secondary
+ */
+ if (fragment_sample && freq) {
+ if (!ar->spectral.is_primary)
+ fft_sample->freq1 = cpu_to_be16(freq);
+
+ /* We have to toggle the is_primary to handle the next report */
+ ar->spectral.is_primary = !ar->spectral.is_primary;
+ }
+
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
ab->hw_params.spectral.fft_sz);
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
index 081744265f2a..96bfa16e18e9 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.h
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -35,6 +35,7 @@ struct ath11k_spectral {
u16 count;
u8 fft_size;
bool enabled;
+ bool is_primary;
};
#ifdef CONFIG_ATH11K_SPECTRAL
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c96b26f39a25..23ed01bd44f9 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -99,7 +99,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock);
- /* display in millidegree celcius */
+ /* display in millidegree Celsius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
index f9af55f3682d..3e39675ef7f5 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.h
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -19,7 +19,7 @@ struct ath11k_thermal {
/* protected by conf_mutex */
u32 throttle_state;
- /* temperature value in Celcius degree
+ /* temperature value in Celsius degree
* protected by data_lock
*/
int temperature;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 76560587bea0..9535745fe026 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -305,6 +305,34 @@ TRACE_EVENT(ath11k_wmi_diag,
)
);
+TRACE_EVENT(ath11k_ps_timekeeper,
+ TP_PROTO(struct ath11k *ar, const void *peer_addr,
+ u32 peer_ps_timestamp, u8 peer_ps_state),
+ TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
+
+ TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __dynamic_array(u8, peer_addr, ETH_ALEN)
+ __field(u8, peer_ps_state)
+ __field(u32, peer_ps_timestamp)
+ ),
+
+ TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ memcpy(__get_dynamic_array(peer_addr), peer_addr,
+ ETH_ALEN);
+ __entry->peer_ps_state = peer_ps_state;
+ __entry->peer_ps_timestamp = peer_ps_timestamp;
+ ),
+
+ TP_printk("%s %s %u %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->peer_ps_state,
+ __entry->peer_ps_timestamp
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 88ee4f9d19da..fad9f8d308a2 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -416,7 +416,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
/* tx/rx chainmask reported from fw depends on the actual hw chains used,
* For example, for 4x4 capable macphys, first 4 chains can be used for first
- * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
* In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
* will be advertised for second mac or vice-versa. Compute the shift value
* for tx/rx chainmask which will be used to advertise supported ht/vht rates to
@@ -991,9 +991,13 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
struct sk_buff *skb;
int ret;
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
if (!skb)
return -ENOMEM;
@@ -1007,6 +1011,17 @@ int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->trans_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->profile_idx = bss_conf->bssid_index;
+ cmd->profile_num = bss_conf->bssid_indicator;
+ }
+ }
+
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
@@ -3064,8 +3079,34 @@ int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
return ret;
}
-int
-ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct ath11k_base *ab = wmi->wmi_ab->ab;
@@ -3083,28 +3124,22 @@ ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->pdev_id = pdev_id;
- cmd->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
- cmd->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
- cmd->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
- cmd->congestion_thresh_teardown =
- ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
- cmd->congestion_thresh_critical =
- ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
- cmd->interference_thresh_teardown =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
- cmd->interference_thresh_setup =
- ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
- cmd->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
- cmd->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
- cmd->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
- cmd->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
- cmd->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
- cmd->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
- cmd->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
- cmd->remove_sta_slot_interval =
- ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
- /* TODO add MBSSID support */
- cmd->mbss_support = 0;
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
if (ret) {
@@ -6767,6 +6802,107 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
rcu_read_unlock();
}
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "peer sta ps chnange ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k *ar;
@@ -7409,7 +7545,53 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debugfs_fw_stats_process(ab, skb);
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+ * are currently requested only via debugfs fw stats. Hence, processing these
+ * in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_free(&stats);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -7960,6 +8142,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb);
break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
@@ -8962,12 +9147,13 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
cmd->interval = arg->interval;
cmd->method = arg->method;
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
- arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
- arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
- WMI_TAG_STA_KEEPALVE_ARP_RESPONSE) |
- FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
arp->src_ip4_addr = arg->src_ip4_addr;
arp->dest_ip4_addr = arg->dest_ip4_addr;
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 4da248ffa318..8f2c07d70a4a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -17,7 +17,7 @@ struct ath11k_vif;
#define PSOC_HOST_MAX_NUM_SS (8)
-/* defines to set Packet extension values whic can be 0 us, 8 usec or 16 usec */
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
#define MAX_HE_NSS 8
#define MAX_HE_MODULATION 8
#define MAX_HE_RU 4
@@ -1214,7 +1214,7 @@ enum wmi_tlv_tag {
WMI_TAG_NS_OFFLOAD_TUPLE,
WMI_TAG_FTM_INTG_CMD,
WMI_TAG_STA_KEEPALIVE_CMD,
- WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
WMI_TAG_AP_PS_PEER_CMD,
WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
@@ -2090,6 +2090,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
/* The second 128 bits */
@@ -4482,7 +4483,7 @@ struct wmi_pdev_radar_ev {
} __packed;
struct wmi_pdev_temperature_event {
- /* temperature value in Celcius degree */
+ /* temperature value in Celsius degree */
s32 temp;
u32 pdev_id;
} __packed;
@@ -4708,7 +4709,7 @@ enum wmi_sta_ps_param_tx_wake_threshold {
*/
enum wmi_sta_ps_param_pspoll_count {
WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
- /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ /* Values greater than 0 indicate the maximum number of PS-Poll frames
* FW will send before waking up.
*/
};
@@ -4820,9 +4821,9 @@ enum wmi_rate_preamble {
/**
* enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
- * @WMI_RTS_CTS_DISABLED : RTS/CTS protection is disabled.
- * @WMI_USE_RTS_CTS : RTS/CTS Enabled.
- * @WMI_USE_CTS2SELF : CTS to self protection Enabled.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
*/
enum wmi_rtscts_prot_mode {
WMI_RTS_CTS_DISABLED = 0,
@@ -4833,13 +4834,13 @@ enum wmi_rtscts_prot_mode {
/**
* enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
* protection mode.
- * @WMI_RTSCTS_FOR_NO_RATESERIES - Neither of rate-series should use RTS-CTS
- * @WMI_RTSCTS_FOR_SECOND_RATESERIES - Only second rate-series will use RTS-CTS
- * @WMI_RTSCTS_ACROSS_SW_RETRIES - Only the second rate-series will use RTS-CTS,
- * but if there's a sw retry, both the rate
- * series will use RTS-CTS.
- * @WMI_RTSCTS_ERP - RTS/CTS used for ERP protection for every PPDU.
- * @WMI_RTSCTS_FOR_ALL_RATESERIES - Enable RTS-CTS for all rate series.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
*/
enum wmi_rtscts_profile {
WMI_RTSCTS_FOR_NO_RATESERIES = 0,
@@ -4933,6 +4934,25 @@ struct wmi_wmm_params_all_arg {
#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+struct wmi_twt_enable_params {
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+};
+
struct wmi_twt_enable_params_cmd {
u32 tlv_header;
u32 pdev_id;
@@ -5350,6 +5370,26 @@ struct wmi_debug_log_config_cmd_fixed_param {
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+enum ath11k_wmi_peer_ps_state {
+ WMI_PEER_PS_STATE_OFF,
+ WMI_PEER_PS_STATE_ON,
+ WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+ /* Used to indicate that power save state change is valid */
+ WMI_PEER_PS_VALID = 0x1,
+ WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_ps_state;
+ u32 ps_supported_bitmap;
+ u32 peer_ps_valid;
+ u32 peer_ps_timestamp;
+} __packed;
+
struct ath11k_wmi_base {
struct ath11k_base *ab;
struct ath11k_pdev_wmi wmi[MAX_RADIOS];
@@ -6039,7 +6079,9 @@ void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
struct ath11k_fw_stats *fw_stats, u32 stats_id,
char *buf);
int ath11k_wmi_simulate_radar(struct ath11k *ar);
-int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id);
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params);
int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
struct wmi_twt_add_dialog_params *params);
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
index b3e65cd13d83..1dec23b0699c 100644
--- a/drivers/net/wireless/ath/ath11k/wow.c
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -67,6 +68,13 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
int ret;
+ /* In the case of WCN6750, WoW wakeup is done
+ * by sending SMP2P power save exit message
+ * to the target processor.
+ */
+ if (ab->hw_params.smp2p_wow_exit)
+ return 0;
+
reinit_completion(&ab->wow.wakeup_completed);
ret = ath11k_wmi_wow_host_wakeup_ind(ar);
@@ -664,6 +672,12 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
int ret;
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
mutex_lock(&ar->conf_mutex);
ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
@@ -695,13 +709,6 @@ int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
- ath11k_mac_drain_tx(ar);
- ret = ath11k_mac_wait_tx_complete(ar);
- if (ret) {
- ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
- goto cleanup;
- }
-
ret = ath11k_wow_set_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e11c7e9accc0..a20e0aeae284 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1124,7 +1124,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
@@ -1249,7 +1249,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr)
{
struct ath6kl *ar = ath6kl_priv(ndev);
@@ -1279,7 +1279,7 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback) (void *cookie,
struct key_params *))
@@ -1314,7 +1314,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 9b5c7d8f2b95..201e45554070 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1014,7 +1014,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
switch (ie_id) {
case ATH6KL_FW_IE_FW_VERSION:
- strlcpy(ar->wiphy->fw_version, data,
+ strscpy(ar->wiphy->fw_version, data,
min(sizeof(ar->wiphy->fw_version), ie_len+1));
ath6kl_dbg(ATH6KL_DBG_BOOT,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index dc0e5ea25673..090ff0600c81 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1744,7 +1744,7 @@ static void ar9003_hw_spectral_scan_config(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA);
REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE);
- /* on AR93xx and newer, count = 0 will make the the chip send
+ /* on AR93xx and newer, count = 0 will make the chip send
* spectral samples endlessly. Check if this really was intended,
* and fix otherwise.
*/
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 6cf087522157..571062f2e82a 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif, false);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, -1, false);
if (!skb)
return false;
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 994ec48b2f66..ca05b07a45e6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -364,33 +364,27 @@ ret:
}
static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 len)
{
uint32_t *pattern = (uint32_t *)skb->data;
- switch (*pattern) {
- case 0x33221199:
- {
+ if (*pattern == 0x33221199 && len >= sizeof(struct htc_panic_bad_vaddr)) {
struct htc_panic_bad_vaddr *htc_panic;
htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
htc_panic->exccause, htc_panic->pc,
htc_panic->badvaddr);
- break;
- }
- case 0x33221299:
- {
+ return;
+ }
+ if (*pattern == 0x33221299) {
struct htc_panic_bad_epid *htc_panic;
htc_panic = (struct htc_panic_bad_epid *) skb->data;
dev_err(htc_handle->dev, "ath: firmware panic! "
"bad epid: 0x%08x\n", htc_panic->epid);
- break;
- }
- default:
- dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
- break;
+ return;
}
+ dev_err(htc_handle->dev, "ath: unknown panic pattern!\n");
}
/*
@@ -411,16 +405,26 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
if (!htc_handle || !skb)
return;
+ /* A valid message requires len >= 8.
+ *
+ * sizeof(struct htc_frame_hdr) == 8
+ * sizeof(struct htc_ready_msg) == 8
+ * sizeof(struct htc_panic_bad_vaddr) == 16
+ * sizeof(struct htc_panic_bad_epid) == 8
+ */
+ if (unlikely(len < sizeof(struct htc_frame_hdr)))
+ goto invalid;
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
if (epid == 0x99) {
- ath9k_htc_fw_panic_report(htc_handle, skb);
+ ath9k_htc_fw_panic_report(htc_handle, skb, len);
kfree_skb(skb);
return;
}
if (epid < 0 || epid >= ENDPOINT_MAX) {
+invalid:
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
else
@@ -432,21 +436,30 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
/* Handle trailer */
if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
- if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
+ if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000) {
/* Move past the Watchdog pattern */
htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
+ len -= 4;
+ }
}
/* Get the message ID */
+ if (unlikely(len < sizeof(struct htc_frame_hdr) + sizeof(__be16)))
+ goto invalid;
msg_id = (__be16 *) ((void *) htc_hdr +
sizeof(struct htc_frame_hdr));
/* Now process HTC messages */
switch (be16_to_cpu(*msg_id)) {
case HTC_MSG_READY_ID:
+ if (unlikely(len < sizeof(struct htc_ready_msg)))
+ goto invalid;
htc_process_target_rdy(htc_handle, htc_hdr);
break;
case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
+ if (unlikely(len < sizeof(struct htc_frame_hdr) +
+ sizeof(struct htc_conn_svc_rspmsg)))
+ goto invalid;
htc_process_conn_rsp(htc_handle, htc_hdr);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 096a206f49ed..450ab19b1d4e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -710,7 +710,7 @@ struct ath_spec_scan {
/**
* struct ath_hw_ops - callbacks used by hardware code and driver code
*
- * This structure contains callbacks designed to to be used internally by
+ * This structure contains callbacks designed to be used internally by
* hardware code and also by the lower level driver.
*
* @config_pci_powersave:
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index cb5414265a9b..58c0ab01771b 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -83,7 +83,8 @@ static int ath9k_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
if (!wait || !max || likely(bytes_read) || fail_stats > 110)
break;
- msleep_interruptible(ath9k_rng_delay_get(++fail_stats));
+ if (hwrng_msleep(rng, ath9k_rng_delay_get(++fail_stats)))
+ break;
}
if (wait && !bytes_read && max)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index ba16a7f3e23d..ba271a10d4ab 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2160,7 +2160,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
fi->keyix = an->ps_key;
else
fi->keyix = ATH9K_TXKEYIX_INVALID;
- fi->dyn_smps = sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC;
+ fi->dyn_smps = sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC;
fi->keytype = keytype;
fi->framelen = framelen;
fi->tx_power = txpower;
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 1ab09e1c9ec5..4c1aecd1163c 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -105,7 +105,7 @@ static void carl9170_fw_info(struct ar9170 *ar)
CARL9170FW_GET_MONTH(fw_date),
CARL9170FW_GET_DAY(fw_date));
- strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
+ strscpy(ar->hw->wiphy->fw_version, motd_desc->release,
sizeof(ar->hw->wiphy->fw_version));
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index f1a43fd1d957..d3a9d00e65e1 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -2677,7 +2677,7 @@ struct ani_global_security_stats {
* management information base (MIB) object is enabled */
u32 rx_wep_unencrypted_frm_cnt;
- /* The number of received MSDU packets that that the 802.11 station
+ /* The number of received MSDU packets that the 802.11 station
* discarded because of MIC failures */
u32 rx_mic_fail_cnt;
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 8da3955995b6..0802ed728824 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/random.h>
#include "txrx.h"
static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
@@ -278,6 +279,7 @@ static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
struct ieee80211_supported_band *sband;
int idx;
int i;
+ u8 snr_sample = snr & 0xff;
idx = 0;
if (band == NL80211_BAND_5GHZ)
@@ -297,6 +299,8 @@ static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
wcn->chan_survey[idx].rssi = rssi;
wcn->chan_survey[idx].snr = snr;
spin_unlock(&wcn->survey_lock);
+
+ add_device_randomness(&snr_sample, sizeof(snr_sample));
}
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f93bdffa4d1d..40f9a7ef8980 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1620,7 +1620,7 @@ static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
@@ -1696,7 +1696,7 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
@@ -1723,7 +1723,7 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
/* Need to be present or wiphy_new() will WARN */
static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
+ struct net_device *ndev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -2072,8 +2072,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
key_params.key = vif->gtk;
key_params.key_len = vif->gtk_len;
key_params.seq_len = IEEE80211_GCMP_PN_LEN;
- rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false,
- NULL, &key_params);
+ rc = wil_cfg80211_add_key(wiphy, ndev, -1, vif->gtk_index,
+ false, NULL, &key_params);
if (rc)
wil_err(wil, "vif %d recovery add key failed (%d)\n",
i, rc);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 7da87c9f363f..94e61dbe94f8 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1305,7 +1305,7 @@ void wil_get_board_file(struct wil6210_priv *wil, char *buf, size_t len)
board_file = WIL_BOARD_FILE_NAME;
}
- strlcpy(buf, board_file, len);
+ strscpy(buf, board_file, len);
}
static int wil_get_bl_info(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 87a88f26233e..ee7d7e9c2718 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -445,7 +445,7 @@ int wil_if_add(struct wil6210_priv *wil)
wil_dbg_misc(wil, "entered");
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
rc = wiphy_register(wiphy);
if (rc < 0) {
@@ -456,14 +456,12 @@ int wil_if_add(struct wil6210_priv *wil)
init_dummy_netdev(&wil->napi_ndev);
if (wil->use_enhanced_dma_hw) {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx_edma,
- NAPI_POLL_WEIGHT);
+ wil6210_netdev_poll_rx_edma);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
- wil6210_netdev_poll_rx,
- NAPI_POLL_WEIGHT);
+ wil6210_netdev_poll_rx);
netif_napi_add_tx(&wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ea7bd403e706..6a5976a2944c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -780,7 +780,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len)
return; /* FW load will fail after timeout */
}
/* ignore MAC address, we already have it from the boot loader */
- strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+ strscpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) {
wil_dbg_wmi(wil, "rfc calibration result %d\n",
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index 0361c8eb2008..45d079b93384 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1518,7 +1518,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->firmware = NULL;
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
- strlcpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
+ strscpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c
index 982a772a9d87..bfe1be345844 100644
--- a/drivers/net/wireless/broadcom/b43/leds.c
+++ b/drivers/net/wireless/broadcom/b43/leds.c
@@ -118,7 +118,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index aa5c99465674..2c0c019a815d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -2479,11 +2479,7 @@ static void b43_nphy_gain_ctl_workarounds_rev19(struct b43_wldev *dev)
static void b43_nphy_gain_ctl_workarounds_rev7(struct b43_wldev *dev)
{
- struct b43_phy *phy = &dev->phy;
-
- switch (phy->rev) {
- /* TODO */
- }
+ /* TODO - should depend on phy->rev */
}
static void b43_nphy_gain_ctl_workarounds_rev3(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.c b/drivers/net/wireless/broadcom/b43legacy/leds.c
index 38b5be3a84e2..79e6fd205bfb 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.c
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.c
@@ -88,7 +88,7 @@ static int b43legacy_register_led(struct b43legacy_wldev *dev,
led->dev = dev;
led->index = led_index;
led->activelow = activelow;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 2c95a08a5871..9ec0c60b6da1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -87,6 +87,8 @@ struct brcmf_proto_bcdc_header {
* plus any space that might be needed
* for bus alignment padding.
*/
+#define ROUND_UP_MARGIN 2048
+
struct brcmf_bcdc {
u16 reqid;
u8 bus_header[BUS_HEADER_LEN];
@@ -368,8 +370,7 @@ brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
/* await txstatus signal for firmware if active */
if (brcmf_fws_fc_active(bcdc->fws)) {
- if (!success)
- brcmf_fws_bustxfail(bcdc->fws, txp);
+ brcmf_fws_bustxcomplete(bcdc->fws, txp, success);
} else {
if (brcmf_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp))
brcmu_pkt_buf_free_skb(txp);
@@ -471,7 +472,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
- sizeof(struct brcmf_proto_bcdc_dcmd);
+ sizeof(struct brcmf_proto_bcdc_dcmd) + ROUND_UP_MARGIN;
return 0;
fail:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index d639bb8b51ae..d0daef674e72 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -983,6 +983,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index ae5af76e2568..2208ab3aa795 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -6,6 +6,8 @@
#ifndef BRCMFMAC_BUS_H
#define BRCMFMAC_BUS_H
+#include <linux/kernel.h>
+#include <linux/firmware.h>
#include "debug.h"
/* IDs of the 6 default common rings of msgbuf protocol */
@@ -34,6 +36,11 @@ enum brcmf_bus_protocol_type {
BRCMF_PROTO_MSGBUF
};
+/* Firmware blobs that may be available */
+enum brcmf_blob_type {
+ BRCMF_BLOB_CLM,
+};
+
struct brcmf_mp_device;
struct brcmf_bus_dcmd {
@@ -60,7 +67,7 @@ struct brcmf_bus_dcmd {
* @wowl_config: specify if dongle is configured for wowl when going to suspend
* @get_ramsize: obtain size of device memory.
* @get_memdump: obtain device memory dump in provided buffer.
- * @get_fwname: obtain firmware name.
+ * @get_blob: obtain a firmware blob.
*
* This structure provides an abstract interface towards the
* bus specific driver. For control messages to common driver
@@ -77,8 +84,8 @@ struct brcmf_bus_ops {
void (*wowl_config)(struct device *dev, bool enabled);
size_t (*get_ramsize)(struct device *dev);
int (*get_memdump)(struct device *dev, void *data, size_t len);
- int (*get_fwname)(struct device *dev, const char *ext,
- unsigned char *fw_name);
+ int (*get_blob)(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type);
void (*debugfs_create)(struct device *dev);
int (*reset)(struct device *dev);
};
@@ -220,10 +227,10 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
}
static inline
-int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext,
- unsigned char *fw_name)
+int brcmf_bus_get_blob(struct brcmf_bus *bus, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- return bus->ops->get_fwname(bus->dev, ext, fw_name);
+ return bus->ops->get_blob(bus->dev, fw, type);
}
static inline
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index db45da33adfd..dfcfb3333369 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2361,7 +2361,8 @@ done:
static s32
brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool unicast, bool multicast)
+ int link_id, u8 key_idx, bool unicast,
+ bool multicast)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
@@ -2395,7 +2396,8 @@ done:
static s32
brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key *key;
@@ -2432,8 +2434,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -2457,8 +2459,8 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
if (params->key_len == 0)
- return brcmf_cfg80211_del_key(wiphy, ndev, key_idx, pairwise,
- mac_addr);
+ return brcmf_cfg80211_del_key(wiphy, ndev, -1, key_idx,
+ pairwise, mac_addr);
if (params->key_len > sizeof(key->data)) {
bphy_err(drvr, "Too long key length (%u)\n", params->key_len);
@@ -2553,8 +2555,9 @@ done:
}
static s32
-brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
- bool pairwise, const u8 *mac_addr, void *cookie,
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
+ int link_id, u8 key_idx, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie,
struct key_params *params))
{
@@ -2610,7 +2613,8 @@ done:
static s32
brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_idx)
+ struct net_device *ndev, int link_id,
+ u8 key_idx)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -3160,10 +3164,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_bss_info_le *bi;
- const struct brcmf_tlv *tim;
- size_t ie_len;
- u8 *ie;
+ struct brcmf_bss_info_le *bi = NULL;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -3177,29 +3178,8 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
bphy_err(drvr, "Could not get bss info %d\n", err);
goto update_bss_info_out;
}
-
bi = (struct brcmf_bss_info_le *)(cfg->extra_buf + 4);
err = brcmf_inform_single_bss(cfg, bi);
- if (err)
- goto update_bss_info_out;
-
- ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
- ie_len = le32_to_cpu(bi->ie_length);
-
- tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (!tim) {
- /*
- * active scan was done so we could not get dtim
- * information out of probe response.
- * so we speficially query dtim information to dongle.
- */
- u32 var;
- err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
- if (err) {
- bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
- goto update_bss_info_out;
- }
- }
update_bss_info_out:
brcmf_dbg(TRACE, "Exit");
@@ -3984,7 +3964,6 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
struct brcmf_pmk_list_le *pmk_list;
int i;
u32 npmk;
- s32 err;
pmk_list = &cfg->pmk_list;
npmk = le32_to_cpu(pmk_list->npmk);
@@ -3993,10 +3972,8 @@ brcmf_update_pmklist(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp)
for (i = 0; i < npmk; i++)
brcmf_dbg(CONN, "PMK[%d]: %pM\n", i, &pmk_list->pmk[i].bssid);
- err = brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
- sizeof(*pmk_list));
-
- return err;
+ return brcmf_fil_iovar_data_set(ifp, "pmkid_info", pmk_list,
+ sizeof(*pmk_list));
}
static s32
@@ -5042,13 +5019,10 @@ brcmf_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_beacon_data *info)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err;
brcmf_dbg(TRACE, "Enter\n");
- err = brcmf_config_ap_mgmt_ie(ifp->vif, info);
-
- return err;
+ return brcmf_config_ap_mgmt_ie(ifp->vif, info);
}
static int
@@ -6431,6 +6405,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
cfg->dongle_up = false; /* dongle down */
brcmf_abort_scanning(cfg);
brcmf_deinit_priv_mem(cfg);
+ brcmf_clear_assoc_ies(cfg);
}
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
@@ -7485,6 +7460,7 @@ static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
return true;
switch (drvr->bus_if->chip) {
+ case BRCM_CC_43430_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
case BRCM_CC_43602_CHIP_ID:
return true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 4ec7773b6906..121893bbaa1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -641,6 +641,7 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
*srsize = (32 * 1024);
break;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
/* assume sr for now as we can not check
* firmware sr capability at this point.
*/
@@ -732,6 +733,10 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
return 0x160000;
case CY_CC_43752_CHIP_ID:
return 0x170000;
+ case BRCM_CC_4378_CHIP_ID:
+ return 0x352000;
+ case CY_CC_89459_CHIP_ID:
+ return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
@@ -1258,7 +1263,8 @@ brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip)
brcmf_chip_resetcore(core, 0, 0, 0);
/* disable bank #3 remap for this device */
- if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) {
+ if (chip->pub.chip == BRCM_CC_43430_CHIP_ID ||
+ chip->pub.chip == CY_CC_43439_CHIP_ID) {
sr = container_of(core, struct brcmf_core_priv, pub);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3);
brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0);
@@ -1416,10 +1422,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
reg = chip->ops->read32(chip->ctx, addr);
return (reg & pmu_cc3_mask) != 0;
case BRCM_CC_43430_CHIP_ID:
+ case CY_CC_43439_CHIP_ID:
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
case CY_CC_4373_CHIP_ID:
+ case CY_CC_89459_CHIP_ID:
/* explicitly check SR engine enable bit */
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 7485e784be2a..74020fa10065 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -123,7 +123,6 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
struct brcmf_bus *bus = drvr->bus_if;
struct brcmf_dload_data_le *chunk_buf;
const struct firmware *clm = NULL;
- u8 clm_name[BRCMF_FW_NAME_LEN];
u32 chunk_len;
u32 datalen;
u32 cumulative_len;
@@ -133,15 +132,8 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
brcmf_dbg(TRACE, "Enter\n");
- memset(clm_name, 0, sizeof(clm_name));
- err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name);
- if (err) {
- bphy_err(drvr, "get CLM blob file name failed (%d)\n", err);
- return err;
- }
-
- err = firmware_request_nowarn(&clm, clm_name, bus->dev);
- if (err) {
+ err = brcmf_bus_get_blob(bus, &clm, BRCMF_BLOB_CLM);
+ if (err || !clm) {
brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
err);
return 0;
@@ -261,7 +253,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
&revinfo, sizeof(revinfo));
if (err < 0) {
bphy_err(drvr, "retrieving revision info failed, %d\n", err);
- strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
+ strscpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname));
} else {
ri->vendorid = le32_to_cpu(revinfo.vendorid);
ri->deviceid = le32_to_cpu(revinfo.deviceid);
@@ -314,7 +306,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* locate firmware version number for ethtool */
ptr = strrchr(buf, ' ') + 1;
- strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+ strscpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
/* Query for 'clmver' to get CLM version info from firmware */
memset(buf, 0, sizeof(buf));
@@ -424,11 +416,11 @@ static void brcmf_mp_attach(void)
* if not set then if available use the platform data version. To make
* sure it gets initialized at all, always copy the module param version
*/
- strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
+ strscpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
BRCMF_FW_ALTPATH_LEN);
if ((brcmfmac_pdata) && (brcmfmac_pdata->fw_alternative_path) &&
(brcmf_mp_global.firmware_path[0] == '\0')) {
- strlcpy(brcmf_mp_global.firmware_path,
+ strscpy(brcmf_mp_global.firmware_path,
brcmfmac_pdata->fw_alternative_path,
BRCMF_FW_ALTPATH_LEN);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 6c5a22a32a96..aa25abffcc7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -53,6 +53,7 @@ struct brcmf_mp_device {
struct brcmfmac_pd_cc *country_codes;
const char *board_type;
unsigned char mac[ETH_ALEN];
+ const char *antenna_sku;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bd164a0821f9..595ae3ae561e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -292,6 +292,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
struct brcmf_pub *drvr = ifp->drvr;
struct ethhdr *eh;
int head_delta;
+ unsigned int tx_bytes = skb->len;
brcmf_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
@@ -366,7 +367,7 @@ done:
ndev->stats.tx_dropped++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += tx_bytes;
}
/* Return ok: we always eat the packet */
@@ -561,10 +562,10 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
if (drvr->revinfo.result == 0)
brcmu_dotrev_str(drvr->revinfo.driverrev, drev);
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, drev, sizeof(info->version));
- strlcpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
- strlcpy(info->bus_info, dev_name(drvr->bus_if->dev),
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->version, drev, sizeof(info->version));
+ strscpy(info->fw_version, drvr->fwver, sizeof(info->fw_version));
+ strscpy(info->bus_info, dev_name(drvr->bus_if->dev),
sizeof(info->bus_info));
}
@@ -1480,8 +1481,10 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp)
!brcmf_get_pend_8021x_cnt(ifp),
MAX_WAIT_FOR_8021X_TX);
- if (!err)
+ if (!err) {
bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n");
+ atomic_set(&ifp->pend_8021x_cnt, 0);
+ }
return !err;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 0af452dca766..86ff174936a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -24,6 +24,13 @@ static const struct brcmf_dmi_data acepc_t8_data = {
BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
};
+/* The Chuwi Hi8 Pro uses the same Ampak AP6212 module as the Chuwi Vi8 Plus
+ * and the nvram for the Vi8 Plus is already in linux-firmware, so use that.
+ */
+static const struct brcmf_dmi_data chuwi_hi8_pro_data = {
+ BRCM_CC_43430_CHIP_ID, 0, "ilife-S806"
+};
+
static const struct brcmf_dmi_data gpd_win_pocket_data = {
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
};
@@ -76,6 +83,17 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "MRD"),
+ /* Above strings are too generic, also match on BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "05/10/2016"),
+ },
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ },
+ {
/* Cyberbook T116 rugged tablet */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index d2ac844e1e9f..2c2f3e026c13 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -249,7 +249,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
memset(&gscan_cfg, 0, sizeof(gscan_cfg));
if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID &&
drvr->bus_if->chip != BRCM_CC_4345_CHIP_ID &&
- drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID)
+ drvr->bus_if->chip != BRCM_CC_43454_CHIP_ID &&
+ drvr->bus_if->chip != CY_CC_43439_CHIP_ID)
brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
"pfn_gscan_cfg",
&gscan_cfg, sizeof(gscan_cfg));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index b8379e4034a4..f2207793f6e2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -21,6 +21,8 @@
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff"
+#define BRCMF_FW_MACADDR_FMT "macaddr=%pM"
+#define BRCMF_FW_MACADDR_LEN (7 + ETH_ALEN * 3)
enum nvram_parser_state {
IDLE,
@@ -44,6 +46,7 @@ enum nvram_parser_state {
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
* @boardrev_found: nvram contains boardrev information.
+ * @strip_mac: strip the MAC address.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -57,6 +60,7 @@ struct nvram_parser {
bool multi_dev_v1;
bool multi_dev_v2;
bool boardrev_found;
+ bool strip_mac;
};
/*
@@ -121,6 +125,10 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v2 = true;
if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
nvp->boardrev_found = true;
+ /* strip macaddr if platform MAC overrides */
+ if (nvp->strip_mac &&
+ strncmp(&nvp->data[nvp->entry], "macaddr", 7) == 0)
+ st = COMMENT;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
@@ -209,6 +217,7 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
size = data_len;
/* Add space for properties we may add */
size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
+ size += BRCMF_FW_MACADDR_LEN + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
@@ -368,22 +377,37 @@ static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
nvp->nvram_len++;
}
+static void brcmf_fw_add_macaddr(struct nvram_parser *nvp, u8 *mac)
+{
+ int len;
+
+ len = scnprintf(&nvp->nvram[nvp->nvram_len], BRCMF_FW_MACADDR_LEN + 1,
+ BRCMF_FW_MACADDR_FMT, mac);
+ WARN_ON(len != BRCMF_FW_MACADDR_LEN);
+ nvp->nvram_len += len + 1;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
* End of buffer is completed with token identifying length of buffer.
*/
static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
- u32 *new_length, u16 domain_nr, u16 bus_nr)
+ u32 *new_length, u16 domain_nr, u16 bus_nr,
+ struct device *dev)
{
struct nvram_parser nvp;
u32 pad;
u32 token;
__le32 token_le;
+ u8 mac[ETH_ALEN];
if (brcmf_init_nvram_parser(&nvp, data, data_len) < 0)
return NULL;
+ if (eth_platform_get_mac_address(dev, mac) == 0)
+ nvp.strip_mac = true;
+
while (nvp.pos < data_len) {
nvp.state = nv_parser_states[nvp.state](&nvp);
if (nvp.state == END)
@@ -404,6 +428,9 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
brcmf_fw_add_defaults(&nvp);
+ if (nvp.strip_mac)
+ brcmf_fw_add_macaddr(&nvp, mac);
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
@@ -430,6 +457,7 @@ struct brcmf_fw {
struct device *dev;
struct brcmf_fw_request *req;
u32 curpos;
+ unsigned int board_index;
void (*done)(struct device *dev, int err, struct brcmf_fw_request *req);
};
@@ -537,7 +565,8 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (data)
nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
fwctx->req->domain_nr,
- fwctx->req->bus_nr);
+ fwctx->req->bus_nr,
+ fwctx->dev);
if (free_bcm47xx_nvram)
bcm47xx_nvram_release_contents(data);
@@ -587,39 +616,50 @@ static int brcmf_fw_complete_request(const struct firmware *fw,
static char *brcm_alt_fw_path(const char *path, const char *board_type)
{
- char alt_path[BRCMF_FW_NAME_LEN];
- char suffix[5];
+ char base[BRCMF_FW_NAME_LEN];
+ const char *suffix;
+ char *ret;
+
+ if (!board_type)
+ return NULL;
- strscpy(alt_path, path, BRCMF_FW_NAME_LEN);
- /* At least one character + suffix */
- if (strlen(alt_path) < 5)
+ suffix = strrchr(path, '.');
+ if (!suffix || suffix == path)
return NULL;
- /* strip .txt or .bin at the end */
- strscpy(suffix, alt_path + strlen(alt_path) - 4, 5);
- alt_path[strlen(alt_path) - 4] = 0;
- strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
- strlcat(alt_path, board_type, BRCMF_FW_NAME_LEN);
- strlcat(alt_path, suffix, BRCMF_FW_NAME_LEN);
+ /* strip extension at the end */
+ strscpy(base, path, BRCMF_FW_NAME_LEN);
+ base[suffix - path] = 0;
- return kstrdup(alt_path, GFP_KERNEL);
+ ret = kasprintf(GFP_KERNEL, "%s.%s%s", base, board_type, suffix);
+ if (!ret)
+ brcmf_err("out of memory allocating firmware path for '%s'\n",
+ path);
+
+ brcmf_dbg(TRACE, "FW alt path: %s\n", ret);
+
+ return ret;
}
static int brcmf_fw_request_firmware(const struct firmware **fw,
struct brcmf_fw *fwctx)
{
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
+ unsigned int i;
int ret;
- /* Files can be board-specific, first try a board-specific path */
- if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
+ /* Files can be board-specific, first try board-specific paths */
+ for (i = 0; i < ARRAY_SIZE(fwctx->req->board_types); i++) {
char *alt_path;
- alt_path = brcm_alt_fw_path(cur->path, fwctx->req->board_type);
+ if (!fwctx->req->board_types[i])
+ goto fallback;
+ alt_path = brcm_alt_fw_path(cur->path,
+ fwctx->req->board_types[i]);
if (!alt_path)
goto fallback;
- ret = request_firmware(fw, alt_path, fwctx->dev);
+ ret = firmware_request_nowarn(fw, alt_path, fwctx->dev);
kfree(alt_path);
if (ret == 0)
return ret;
@@ -653,15 +693,40 @@ static void brcmf_fw_request_done_alt_path(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *first = &fwctx->req->items[0];
+ const char *board_type, *alt_path;
int ret = 0;
- /* Fall back to canonical path if board firmware not found */
- if (!fw)
- ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ if (fw) {
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
+
+ /* Try next board firmware */
+ if (fwctx->board_index < ARRAY_SIZE(fwctx->req->board_types)) {
+ board_type = fwctx->req->board_types[fwctx->board_index++];
+ if (!board_type)
+ goto fallback;
+ alt_path = brcm_alt_fw_path(first->path, board_type);
+ if (!alt_path)
+ goto fallback;
+
+ ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
- brcmf_fw_request_done);
+ brcmf_fw_request_done_alt_path);
+ kfree(alt_path);
+
+ if (ret < 0)
+ brcmf_fw_request_done(fw, ctx);
+ return;
+ }
- if (fw || ret < 0)
+fallback:
+ /* Fall back to canonical path if board firmware not found */
+ ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_done);
+
+ if (ret < 0)
brcmf_fw_request_done(fw, ctx);
}
@@ -705,10 +770,11 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- if (fwctx->req->board_type)
+ if (fwctx->req->board_types[0])
alt_path = brcm_alt_fw_path(first->path,
- fwctx->req->board_type);
+ fwctx->req->board_types[0]);
if (alt_path) {
+ fwctx->board_index++;
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
brcmf_fw_request_done_alt_path);
@@ -769,7 +835,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
fwnames[j].path[0] = '\0';
/* check if firmware path is provided by module parameter */
if (brcmf_mp_global.firmware_path[0] != '\0') {
- strlcpy(fwnames[j].path, mp_path,
+ strscpy(fwnames[j].path, mp_path,
BRCMF_FW_NAME_LEN);
if (end != '/') {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index e290dec9c53d..1266cbaee072 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -11,6 +11,8 @@
#define BRCMF_FW_DEFAULT_PATH "brcm/"
+#define BRCMF_FW_MAX_BOARD_TYPES 8
+
/**
* struct brcmf_firmware_mapping - Used to map chipid/revmask to firmware
* filename and nvram filename. Each bus type implementation should create
@@ -66,7 +68,7 @@ struct brcmf_fw_request {
u16 domain_nr;
u16 bus_nr;
u32 n_items;
- const char *board_type;
+ const char *board_types[BRCMF_FW_MAX_BOARD_TYPES];
struct brcmf_fw_item items[];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 096f6b969dd8..e1127d7e086d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -419,7 +419,6 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
flowid = flow->hash[i].flowid;
if (flow->rings[flowid]->status != RING_OPEN)
continue;
- flow->rings[flowid]->status = RING_CLOSING;
brcmf_msgbuf_delete_flowring(drvr, flowid);
}
}
@@ -458,10 +457,8 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
(hash[i].ifidx == ifidx)) {
flowid = flow->hash[i].flowid;
- if (flow->rings[flowid]->status == RING_OPEN) {
- flow->rings[flowid]->status = RING_CLOSING;
+ if (flow->rings[flowid]->status == RING_OPEN)
brcmf_msgbuf_delete_flowring(drvr, flowid);
- }
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index c87b829adb0d..f518e025d6e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -135,7 +135,7 @@
/* Link Down indication in WoWL mode: */
#define BRCMF_WOWL_LINKDOWN (1 << 31)
-#define BRCMF_WOWL_MAXPATTERNS 8
+#define BRCMF_WOWL_MAXPATTERNS 16
#define BRCMF_WOWL_MAXPATTERNSIZE 128
#define BRCMF_COUNTRY_BUF_SZ 4
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index d58525ebe618..36af81975855 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -688,7 +688,7 @@ static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
struct brcmf_fws_mac_descriptor *desc)
{
if (desc == &fws->desc.other)
- strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+ strscpy(desc->name, "MAC-OTHER", sizeof(desc->name));
else if (desc->mac_handle)
scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
desc->mac_handle, desc->interface_id);
@@ -2475,7 +2475,8 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
}
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success)
{
u32 hslot;
@@ -2483,11 +2484,14 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
brcmu_pkt_buf_free_skb(skb);
return;
}
- brcmf_fws_lock(fws);
- hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0,
- 1);
- brcmf_fws_unlock(fws);
+
+ if (!success) {
+ brcmf_fws_lock(fws);
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot,
+ 0, 0, 1);
+ brcmf_fws_unlock(fws);
+ }
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b16a9d1c0508..f9c36cd8f1de 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -40,7 +40,8 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_fws_reset_interface(struct brcmf_if *ifp);
void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
-void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bustxcomplete(struct brcmf_fws_info *fws, struct sk_buff *skb,
+ bool success);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index b2d0f7570aa9..cec53f934940 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -71,6 +71,7 @@
#define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48
+#define BRCMF_MAX_TXSTATUS_WAIT_RETRIES 10
struct msgbuf_common_hdr {
u8 msgtype;
@@ -806,8 +807,12 @@ static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
if (flowid == BRCMF_FLOWRING_INVALID_ID) {
flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
- if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
return -ENOMEM;
+ } else {
+ brcmf_flowring_enqueue(flow, flowid, skb);
+ return 0;
+ }
}
queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
@@ -1395,9 +1400,27 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct msgbuf_tx_flowring_delete_req *delete;
struct brcmf_commonring *commonring;
+ struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid];
+ struct brcmf_flowring *flow = msgbuf->flow;
void *ret_ptr;
u8 ifidx;
int err;
+ int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES;
+
+ /* make sure it is not in txflow */
+ brcmf_commonring_lock(commonring_del);
+ flow->rings[flowid]->status = RING_CLOSING;
+ brcmf_commonring_unlock(commonring_del);
+
+ /* wait for commonring txflow finished */
+ while (retry && atomic_read(&commonring_del->outstanding_tx)) {
+ usleep_range(5000, 10000);
+ retry--;
+ }
+ if (!retry) {
+ brcmf_err("timed out waiting for txstatus\n");
+ atomic_set(&commonring_del->outstanding_tx, 0);
+ }
/* no need to submit if firmware can not be reached */
if (drvr->bus_if->state != BRCMF_BUS_UP) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 2e322edbb907..6a849f4a94dd 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -8,10 +8,10 @@
#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64
-#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512
+#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 1024
#define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64
#define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024
-#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512
+#define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 1024
#define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512
#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 79388d49c256..a83699de01ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -70,14 +70,24 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
+ const char *prop;
int irq;
int err;
u32 irqf;
u32 val;
+ /* Apple ARM64 platforms have their own idea of board type, passed in
+ * via the device tree. They also have an antenna SKU parameter
+ */
+ if (!of_property_read_string(np, "brcm,board-type", &prop))
+ settings->board_type = prop;
+
+ if (!of_property_read_string(np, "apple,antenna-sku", &prop))
+ settings->antenna_sku = prop;
+
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
- if (root) {
+ if (root && !settings->board_type) {
char *board_type;
const char *tmp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 97f0f13dfe50..80083f9ea311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -59,6 +59,8 @@ BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
+BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -66,6 +68,7 @@ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.clm_blob");
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
@@ -87,6 +90,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+ BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
+ BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
@@ -118,6 +123,12 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
+#define BRCMF_PCIE_64_PCIE2REG_INTMASK 0xC14
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30
+#define BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20
+#define BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24
+
#define BRCMF_PCIE2_INTA 0x01
#define BRCMF_PCIE2_INTB 0x02
@@ -137,6 +148,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
#define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
+#define BRCMF_PCIE_MB_INT_FN0 (BRCMF_PCIE_MB_INT_FN0_0 | \
+ BRCMF_PCIE_MB_INT_FN0_1)
#define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
BRCMF_PCIE_MB_INT_D2H0_DB1 | \
BRCMF_PCIE_MB_INT_D2H1_DB0 | \
@@ -146,6 +159,40 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_PCIE_MB_INT_D2H3_DB0 | \
BRCMF_PCIE_MB_INT_D2H3_DB1)
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB0 0x1
+#define BRCMF_PCIE_64_MB_INT_D2H0_DB1 0x2
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB0 0x4
+#define BRCMF_PCIE_64_MB_INT_D2H1_DB1 0x8
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB0 0x10
+#define BRCMF_PCIE_64_MB_INT_D2H2_DB1 0x20
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB0 0x40
+#define BRCMF_PCIE_64_MB_INT_D2H3_DB1 0x80
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB0 0x100
+#define BRCMF_PCIE_64_MB_INT_D2H4_DB1 0x200
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB0 0x400
+#define BRCMF_PCIE_64_MB_INT_D2H5_DB1 0x800
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB0 0x1000
+#define BRCMF_PCIE_64_MB_INT_D2H6_DB1 0x2000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB0 0x4000
+#define BRCMF_PCIE_64_MB_INT_D2H7_DB1 0x8000
+
+#define BRCMF_PCIE_64_MB_INT_D2H_DB (BRCMF_PCIE_64_MB_INT_D2H0_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H0_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H1_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H2_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H3_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H4_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H5_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H6_DB1 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB0 | \
+ BRCMF_PCIE_64_MB_INT_D2H7_DB1)
+
#define BRCMF_PCIE_SHARED_VERSION_7 7
#define BRCMF_PCIE_MIN_SHARED_VERSION 5
#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
@@ -255,12 +302,24 @@ struct brcmf_pcie_core_info {
u32 wrapbase;
};
+#define BRCMF_OTP_MAX_PARAM_LEN 16
+
+struct brcmf_otp_params {
+ char module[BRCMF_OTP_MAX_PARAM_LEN];
+ char vendor[BRCMF_OTP_MAX_PARAM_LEN];
+ char version[BRCMF_OTP_MAX_PARAM_LEN];
+ bool valid;
+};
+
struct brcmf_pciedev_info {
enum brcmf_pcie_state state;
bool in_irq;
struct pci_dev *pdev;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
+ const struct firmware *clm_fw;
+ const struct brcmf_pcie_reginfo *reginfo;
void __iomem *regs;
void __iomem *tcm;
u32 ram_base;
@@ -280,6 +339,7 @@ struct brcmf_pciedev_info {
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
struct brcmf_mp_device *settings;
+ struct brcmf_otp_params otp;
};
struct brcmf_pcie_ringbuf {
@@ -346,11 +406,49 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
};
+struct brcmf_pcie_reginfo {
+ u32 intmask;
+ u32 mailboxint;
+ u32 mailboxmask;
+ u32 h2d_mailbox_0;
+ u32 h2d_mailbox_1;
+ u32 int_d2h_db;
+ u32 int_fn0;
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_default = {
+ .intmask = BRCMF_PCIE_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_MB_INT_D2H_DB,
+ .int_fn0 = BRCMF_PCIE_MB_INT_FN0,
+};
+
+static const struct brcmf_pcie_reginfo brcmf_reginfo_64 = {
+ .intmask = BRCMF_PCIE_64_PCIE2REG_INTMASK,
+ .mailboxint = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT,
+ .mailboxmask = BRCMF_PCIE_64_PCIE2REG_MAILBOXMASK,
+ .h2d_mailbox_0 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_0,
+ .h2d_mailbox_1 = BRCMF_PCIE_64_PCIE2REG_H2D_MAILBOX_1,
+ .int_d2h_db = BRCMF_PCIE_64_MB_INT_D2H_DB,
+ .int_fn0 = 0,
+};
+
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq);
static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
+static u16
+brcmf_pcie_read_reg16(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+{
+ void __iomem *address = devinfo->regs + reg_offset;
+
+ return ioread16(address);
+}
+
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
{
@@ -496,6 +594,8 @@ brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
}
+#define READCC32(devinfo, reg) brcmf_pcie_read_reg32(devinfo, \
+ CHIPCREGOFFS(reg))
#define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
CHIPCREGOFFS(reg), value)
@@ -779,30 +879,29 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0);
}
static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
{
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- BRCMF_PCIE_MB_INT_D2H_DB |
- BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask,
+ devinfo->reginfo->int_d2h_db |
+ devinfo->reginfo->int_fn0);
}
static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
{
if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
brcmf_pcie_write_reg32(devinfo,
- BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
+ devinfo->reginfo->h2d_mailbox_1, 1);
}
static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) {
brcmf_pcie_intr_disable(devinfo);
brcmf_dbg(PCIE, "Enter\n");
return IRQ_WAKE_THREAD;
@@ -817,15 +916,14 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
u32 status;
devinfo->in_irq = true;
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
brcmf_dbg(PCIE, "Enter %x\n", status);
if (status) {
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint,
status);
- if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1))
+ if (status & devinfo->reginfo->int_fn0)
brcmf_pcie_handle_mb_data(devinfo);
- if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
+ if (status & devinfo->reginfo->int_d2h_db) {
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_proto_msgbuf_rx_trigger(
&devinfo->pdev->dev);
@@ -884,8 +982,8 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
if (devinfo->in_irq)
brcmf_err(bus, "Still in IRQ (processing) !!!\n");
- status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
+ status = brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status);
devinfo->irq_allocated = false;
}
@@ -937,7 +1035,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
brcmf_dbg(PCIE, "RING !\n");
/* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
+ brcmf_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1);
return 0;
}
@@ -1382,23 +1480,25 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
return 0;
}
-static
-int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_pcie_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_pcie_fwnames,
- ARRAY_SIZE(brcmf_pcie_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = devinfo->clm_fw;
+ devinfo->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
@@ -1445,7 +1545,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.wowl_config = brcmf_pcie_wowl_config,
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
- .get_fwname = brcmf_pcie_get_fwname,
+ .get_blob = brcmf_pcie_get_blob,
.reset = brcmf_pcie_reset,
};
@@ -1698,15 +1798,22 @@ static int brcmf_pcie_buscoreprep(void *ctx)
static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
- u32 val;
+ struct brcmf_core *core;
+ u32 val, reg;
devinfo->ci = chip;
brcmf_pcie_reset_device(devinfo);
- val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ /* reginfo is not ready yet */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ reg = BRCMF_PCIE_64_PCIE2REG_MAILBOXINT;
+ else
+ reg = BRCMF_PCIE_PCIE2REG_MAILBOXINT;
+
+ val = brcmf_pcie_read_reg32(devinfo, reg);
if (val != 0xffffffff)
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
- val);
+ brcmf_pcie_write_reg32(devinfo, reg, val);
return 0;
}
@@ -1729,8 +1836,206 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
.write32 = brcmf_pcie_buscore_write32,
};
+#define BRCMF_OTP_SYS_VENDOR 0x15
+#define BRCMF_OTP_BRCM_CIS 0x80
+
+#define BRCMF_OTP_VENDOR_HDR 0x00000008
+
+static int
+brcmf_pcie_parse_otp_sys_vendor(struct brcmf_pciedev_info *devinfo,
+ u8 *data, size_t size)
+{
+ int idx = 4;
+ const char *chip_params;
+ const char *board_params;
+ const char *p;
+
+ /* 4-byte header and two empty strings */
+ if (size < 6)
+ return -EINVAL;
+
+ if (get_unaligned_le32(data) != BRCMF_OTP_VENDOR_HDR)
+ return -EINVAL;
+
+ chip_params = &data[idx];
+
+ /* Skip first string, including terminator */
+ idx += strnlen(chip_params, size - idx) + 1;
+ if (idx >= size)
+ return -EINVAL;
+
+ board_params = &data[idx];
+
+ /* Skip to terminator of second string */
+ idx += strnlen(board_params, size - idx);
+ if (idx >= size)
+ return -EINVAL;
+
+ /* At this point both strings are guaranteed NUL-terminated */
+ brcmf_dbg(PCIE, "OTP: chip_params='%s' board_params='%s'\n",
+ chip_params, board_params);
+
+ p = skip_spaces(board_params);
+ while (*p) {
+ char tag = *p++;
+ const char *end;
+ size_t len;
+
+ if (*p++ != '=') /* implicit NUL check */
+ return -EINVAL;
+
+ /* *p might be NUL here, if so end == p and len == 0 */
+ end = strchrnul(p, ' ');
+ len = end - p;
+
+ /* leave 1 byte for NUL in destination string */
+ if (len > (BRCMF_OTP_MAX_PARAM_LEN - 1))
+ return -EINVAL;
+
+ /* Copy len characters plus a NUL terminator */
+ switch (tag) {
+ case 'M':
+ strscpy(devinfo->otp.module, p, len + 1);
+ break;
+ case 'V':
+ strscpy(devinfo->otp.vendor, p, len + 1);
+ break;
+ case 'm':
+ strscpy(devinfo->otp.version, p, len + 1);
+ break;
+ }
+
+ /* Skip to next arg, if any */
+ p = skip_spaces(end);
+ }
+
+ brcmf_dbg(PCIE, "OTP: module=%s vendor=%s version=%s\n",
+ devinfo->otp.module, devinfo->otp.vendor,
+ devinfo->otp.version);
+
+ if (!devinfo->otp.module[0] ||
+ !devinfo->otp.vendor[0] ||
+ !devinfo->otp.version[0])
+ return -EINVAL;
+
+ devinfo->otp.valid = true;
+ return 0;
+}
+
+static int
+brcmf_pcie_parse_otp(struct brcmf_pciedev_info *devinfo, u8 *otp, size_t size)
+{
+ int p = 0;
+ int ret = -EINVAL;
+
+ brcmf_dbg(PCIE, "parse_otp size=%zd\n", size);
+
+ while (p < (size - 1)) {
+ u8 type = otp[p];
+ u8 length = otp[p + 1];
+
+ if (type == 0)
+ break;
+
+ if ((p + 2 + length) > size)
+ break;
+
+ switch (type) {
+ case BRCMF_OTP_SYS_VENDOR:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): SYS_VENDOR\n",
+ p, length);
+ ret = brcmf_pcie_parse_otp_sys_vendor(devinfo,
+ &otp[p + 2],
+ length);
+ break;
+ case BRCMF_OTP_BRCM_CIS:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): BRCM_CIS\n",
+ p, length);
+ break;
+ default:
+ brcmf_dbg(PCIE, "OTP @ 0x%x (%d): Unknown type 0x%x\n",
+ p, length, type);
+ break;
+ }
+
+ p += 2 + length;
+ }
+
+ return ret;
+}
+
+static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
+{
+ const struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
+ u32 coreid, base, words, idx, sromctl;
+ u16 *otp;
+ struct brcmf_core *core;
+ int ret;
+
+ switch (devinfo->ci->chip) {
+ case BRCM_CC_4378_CHIP_ID:
+ coreid = BCMA_CORE_GCI;
+ base = 0x1120;
+ words = 0x170;
+ break;
+ default:
+ /* OTP not supported on this chip */
+ return 0;
+ }
+
+ core = brcmf_chip_get_core(devinfo->ci, coreid);
+ if (!core) {
+ brcmf_err(bus, "No OTP core\n");
+ return -ENODEV;
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ /* Chips with OTP accessed via ChipCommon need additional
+ * handling to access the OTP
+ */
+ brcmf_pcie_select_core(devinfo, coreid);
+ sromctl = READCC32(devinfo, sromcontrol);
+
+ if (!(sromctl & BCMA_CC_SROM_CONTROL_OTP_PRESENT)) {
+ /* Chip lacks OTP, try without it... */
+ brcmf_err(bus,
+ "OTP unavailable, using default firmware\n");
+ return 0;
+ }
+
+ /* Map OTP to shadow area */
+ WRITECC32(devinfo, sromcontrol,
+ sromctl | BCMA_CC_SROM_CONTROL_OTPSEL);
+ }
+
+ otp = kcalloc(words, sizeof(u16), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ /* Map bus window to SROM/OTP shadow area in core */
+ base = brcmf_pcie_buscore_prep_addr(devinfo->pdev, base + core->base);
+
+ brcmf_dbg(PCIE, "OTP data:\n");
+ for (idx = 0; idx < words; idx++) {
+ otp[idx] = brcmf_pcie_read_reg16(devinfo, base + 2 * idx);
+ brcmf_dbg(PCIE, "[%8x] 0x%04x\n", base + 2 * idx, otp[idx]);
+ }
+
+ if (coreid == BCMA_CORE_CHIPCOMMON) {
+ brcmf_pcie_select_core(devinfo, coreid);
+ WRITECC32(devinfo, sromcontrol, sromctl);
+ }
+
+ ret = brcmf_pcie_parse_otp(devinfo, (u8 *)otp, 2 * words);
+ kfree(otp);
+
+ return ret;
+}
+
#define BRCMF_PCIE_FW_CODE 0
#define BRCMF_PCIE_FW_NVRAM 1
+#define BRCMF_PCIE_FW_CLM 2
static void brcmf_pcie_setup(struct device *dev, int ret,
struct brcmf_fw_request *fwreq)
@@ -1755,6 +2060,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
+ devinfo->clm_fw = fwreq->items[BRCMF_PCIE_FW_CLM].binary;
kfree(fwreq);
ret = brcmf_chip_get_raminfo(devinfo->ci);
@@ -1830,6 +2136,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
struct brcmf_fw_name fwnames[] = {
{ ".bin", devinfo->fw_name },
{ ".txt", devinfo->nvram_name },
+ { ".clm_blob", devinfo->clm_name },
};
fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
@@ -1842,11 +2149,51 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
- fwreq->board_type = devinfo->settings->board_type;
+ fwreq->items[BRCMF_PCIE_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_PCIE_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
+ /* Apple platforms with fancy firmware/NVRAM selection */
+ if (devinfo->settings->board_type &&
+ devinfo->settings->antenna_sku &&
+ devinfo->otp.valid) {
+ const struct brcmf_otp_params *otp = &devinfo->otp;
+ struct device *dev = &devinfo->pdev->dev;
+ const char **bt = fwreq->board_types;
+
+ brcmf_dbg(PCIE, "Apple board: %s\n",
+ devinfo->settings->board_type);
+
+ /* Example: apple,shikoku-RASP-m-6.11-X3 */
+ bt[0] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version,
+ devinfo->settings->antenna_sku);
+ bt[1] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor, otp->version);
+ bt[2] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s-%s",
+ devinfo->settings->board_type,
+ otp->module, otp->vendor);
+ bt[3] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ otp->module);
+ bt[4] = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
+ devinfo->settings->board_type,
+ devinfo->settings->antenna_sku);
+ bt[5] = devinfo->settings->board_type;
+
+ if (!bt[0] || !bt[1] || !bt[2] || !bt[3] || !bt[4]) {
+ kfree(fwreq);
+ return NULL;
+ }
+ } else {
+ brcmf_dbg(PCIE, "Board: %s\n", devinfo->settings->board_type);
+ fwreq->board_types[0] = devinfo->settings->board_type;
+ }
+
return fwreq;
}
@@ -1857,6 +2204,7 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct brcmf_fw_request *fwreq;
struct brcmf_pciedev_info *devinfo;
struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_core *core;
struct brcmf_bus *bus;
brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
@@ -1876,6 +2224,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+ if (core->rev >= 64)
+ devinfo->reginfo = &brcmf_reginfo_64;
+ else
+ devinfo->reginfo = &brcmf_reginfo_default;
+
pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
if (pcie_bus_dev == NULL) {
ret = -ENOMEM;
@@ -1918,6 +2272,12 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto fail_bus;
+ ret = brcmf_pcie_read_otp(devinfo);
+ if (ret) {
+ brcmf_err(bus, "failed to parse OTP\n");
+ goto fail_brcmf;
+ }
+
fwreq = brcmf_pcie_prepare_fw_request(devinfo);
if (!fwreq) {
ret = -ENOMEM;
@@ -1981,6 +2341,7 @@ brcmf_pcie_remove(struct pci_dev *pdev)
brcmf_pcie_release_ringbuffers(devinfo);
brcmf_pcie_reset_device(devinfo);
brcmf_pcie_release_resource(devinfo);
+ release_firmware(devinfo->clm_fw);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
@@ -2038,7 +2399,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev)
brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
/* Check if device is still up and running, if so we are ready */
- if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
+ if (brcmf_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) {
brcmf_dbg(PCIE, "Try to wakeup device....\n");
if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
goto cleanup;
@@ -2105,6 +2466,9 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID),
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
index fabfbb0b40b0..d0a7465be586 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c
@@ -158,12 +158,12 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
struct brcmf_pno_macaddr_le pfn_mac;
u8 *mac_addr = NULL;
u8 *mac_mask = NULL;
- int err, i;
+ int err, i, ri;
- for (i = 0; i < pi->n_reqs; i++)
- if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- mac_addr = pi->reqs[i]->mac_addr;
- mac_mask = pi->reqs[i]->mac_addr_mask;
+ for (ri = 0; ri < pi->n_reqs; ri++)
+ if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ mac_addr = pi->reqs[ri]->mac_addr;
+ mac_mask = pi->reqs[ri]->mac_addr_mask;
break;
}
@@ -185,7 +185,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
pfn_mac.mac[0] |= 0x02;
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
- pi->reqs[i]->reqid, pfn_mac.mac);
+ pi->reqs[ri]->reqid, pfn_mac.mac);
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
sizeof(pfn_mac));
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8968809399c7..465d95d83759 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -618,6 +618,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
BRCMF_FW_DEF(43430B0, "brcmfmac43430b0-sdio");
+BRCMF_FW_CLM_DEF(43439, "brcmfmac43439-sdio");
BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
@@ -657,6 +658,7 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
+ BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
@@ -4129,23 +4131,24 @@ brcmf_sdio_watchdog(struct timer_list *t)
}
}
-static
-int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_sdio_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
- fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
- brcmf_sdio_fwnames,
- ARRAY_SIZE(brcmf_sdio_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
+ switch (type) {
+ case BRCMF_BLOB_CLM:
+ *fw = sdiodev->clm_fw;
+ sdiodev->clm_fw = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!*fw)
+ return -ENOENT;
- kfree(fwreq);
return 0;
}
@@ -4180,13 +4183,14 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.wowl_config = brcmf_sdio_wowl_config,
.get_ramsize = brcmf_sdio_bus_get_ramsize,
.get_memdump = brcmf_sdio_bus_get_memdump,
- .get_fwname = brcmf_sdio_get_fwname,
+ .get_blob = brcmf_sdio_get_blob,
.debugfs_create = brcmf_sdio_debugfs_create,
.reset = brcmf_sdio_bus_reset
};
#define BRCMF_SDIO_FW_CODE 0
#define BRCMF_SDIO_FW_NVRAM 1
+#define BRCMF_SDIO_FW_CLM 2
static void brcmf_sdio_firmware_callback(struct device *dev, int err,
struct brcmf_fw_request *fwreq)
@@ -4209,6 +4213,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
code = fwreq->items[BRCMF_SDIO_FW_CODE].binary;
nvram = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.data;
nvram_len = fwreq->items[BRCMF_SDIO_FW_NVRAM].nv_data.len;
+ sdiod->clm_fw = fwreq->items[BRCMF_SDIO_FW_CLM].binary;
kfree(fwreq);
/* try to download image and nvram to the dongle */
@@ -4407,6 +4412,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
struct brcmf_fw_name fwnames[] = {
{ ".bin", bus->sdiodev->fw_name },
{ ".txt", bus->sdiodev->nvram_name },
+ { ".clm_blob", bus->sdiodev->clm_name },
};
fwreq = brcmf_fw_alloc_request(bus->ci->chip, bus->ci->chiprev,
@@ -4418,7 +4424,9 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
- fwreq->board_type = bus->sdiodev->settings->board_type;
+ fwreq->items[BRCMF_SDIO_FW_CLM].type = BRCMF_FW_TYPE_BINARY;
+ fwreq->items[BRCMF_SDIO_FW_CLM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->board_types[0] = bus->sdiodev->settings->board_type;
return fwreq;
}
@@ -4574,6 +4582,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->sdiodev->settings)
brcmf_release_module_param(bus->sdiodev->settings);
+ release_firmware(bus->sdiodev->clm_fw);
+ bus->sdiodev->clm_fw = NULL;
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 47351ff458ca..b76d34d36bde 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -186,9 +186,11 @@ struct brcmf_sdio_dev {
struct sg_table sgtable;
char fw_name[BRCMF_FW_NAME_LEN];
char nvram_name[BRCMF_FW_NAME_LEN];
+ char clm_name[BRCMF_FW_NAME_LEN];
bool wowl_enabled;
enum brcmf_sdiod_state state;
struct brcmf_sdiod_freezer *freezer;
+ const struct firmware *clm_fw;
};
/* sdio core registers */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 9fb68c2dc7e3..85e18fb9c497 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1154,24 +1154,11 @@ error:
return NULL;
}
-static
-int brcmf_usb_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
+static int brcmf_usb_get_blob(struct device *dev, const struct firmware **fw,
+ enum brcmf_blob_type type)
{
- struct brcmf_bus *bus = dev_get_drvdata(dev);
- struct brcmf_fw_request *fwreq;
- struct brcmf_fw_name fwnames[] = {
- { ext, fw_name },
- };
-
- fwreq = brcmf_fw_alloc_request(bus->chip, bus->chiprev,
- brcmf_usb_fwnames,
- ARRAY_SIZE(brcmf_usb_fwnames),
- fwnames, ARRAY_SIZE(fwnames));
- if (!fwreq)
- return -ENOMEM;
-
- kfree(fwreq);
- return 0;
+ /* No blobs for USB devices... */
+ return -ENOENT;
}
static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
@@ -1180,7 +1167,7 @@ static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
- .get_fwname = brcmf_usb_get_fwname,
+ .get_blob = brcmf_usb_get_blob,
};
#define BRCMF_USB_FW_CODE 0
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
index ae1f3ad40d45..2b0df07ced74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h
@@ -123,7 +123,7 @@
*/
/********************************************************************
- * Phy/Core Configuration. Defines macros to to check core phy/rev *
+ * Phy/Core Configuration. Defines macros to check core phy/rev *
* compile-time configuration. Defines default core support. *
* ******************************************************************
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index ed0b707f0cdf..f4939cf62767 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -51,9 +51,12 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4378_CHIP_ID 0x4378
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
+#define CY_CC_43439_CHIP_ID 43439
#define CY_CC_43752_CHIP_ID 43752
+#define CY_CC_89459_CHIP_ID 0x4355
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -87,7 +90,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
-
+#define BRCM_PCIE_4378_DEVICE_ID 0x4425
+#define CY_PCIE_89459_DEVICE_ID 0x4415
+#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 5234511dac78..b0f23cf1a621 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5907,8 +5907,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5916,7 +5916,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
}
@@ -6529,7 +6529,7 @@ static struct pci_driver ipw2100_pci_driver = {
.shutdown = ipw2100_shutdown,
};
-/**
+/*
* Initialize the ipw2100 driver/module
*
* @returns 0 if ok, < 0 errno node con error.
@@ -6561,7 +6561,7 @@ out:
return ret;
}
-/**
+/*
* Cleanup ipw2100 driver registration
*/
static void __exit ipw2100_exit(void)
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 029dacebe751..5b483de18c81 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -10424,8 +10424,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10434,7 +10434,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strlcpy(info->bus_info, pci_name(p->pci_dev),
+ strscpy(info->bus_info, pci_name(p->pci_dev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 55cac934f4ee..09ddd21608d4 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -651,7 +651,7 @@ struct ipw_rx_notification {
struct notif_link_deterioration link_deterioration;
struct notif_calibration calibration;
struct notif_noise noise;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 7964ef7d15f0..bec7bc273748 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -405,7 +405,7 @@ struct libipw_auth {
__le16 transaction;
__le16 status;
/* challenge */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_channel_switch {
@@ -423,7 +423,6 @@ struct libipw_action {
union {
struct libipw_action_exchange {
u8 token;
- struct libipw_info_element info_element[0];
} exchange;
struct libipw_channel_switch channel_switch;
@@ -441,7 +440,7 @@ struct libipw_disassoc {
struct libipw_probe_request {
struct libipw_hdr_3addr header;
/* SSID, supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_probe_response {
@@ -451,7 +450,7 @@ struct libipw_probe_response {
__le16 capability;
/* SSID, supported rates, FH params, DS params,
* CF params, IBSS params, TIM (if beacon), RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
/* Alias beacon for probe_response */
@@ -462,7 +461,7 @@ struct libipw_assoc_request {
__le16 capability;
__le16 listen_interval;
/* SSID, supported rates, RSN */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_reassoc_request {
@@ -470,7 +469,7 @@ struct libipw_reassoc_request {
__le16 capability;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_assoc_response {
@@ -479,7 +478,7 @@ struct libipw_assoc_response {
__le16 status;
__le16 aid;
/* supported rates */
- struct libipw_info_element info_element[];
+ u8 variable[];
} __packed;
struct libipw_txb {
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index 7a684b76f39b..48d6870bbf4e 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -1329,8 +1329,8 @@ static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_as
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (frame->info_element, stats->len - sizeof(*frame), network))
+ if (libipw_parse_info_param((void *)frame->variable,
+ stats->len - sizeof(*frame), network))
return 1;
network->mode = 0;
@@ -1389,8 +1389,8 @@ static int libipw_network_init(struct libipw_device *ieee, struct libipw_probe_r
network->wpa_ie_len = 0;
network->rsn_ie_len = 0;
- if (libipw_parse_info_param
- (beacon->info_element, stats->len - sizeof(*beacon), network))
+ if (libipw_parse_info_param((void *)beacon->variable,
+ stats->len - sizeof(*beacon), network))
return 1;
network->mode = 0;
@@ -1510,7 +1510,7 @@ static void libipw_process_probe_response(struct libipw_device
struct libipw_network *target;
struct libipw_network *oldest = NULL;
#ifdef CONFIG_LIBIPW_DEBUG
- struct libipw_info_element *info_element = beacon->info_element;
+ struct libipw_info_element *info_element = (void *)beacon->variable;
#endif
unsigned long flags;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 846138d6e33d..7352d5b2095f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3254,7 +3254,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
if (count) {
char *p = buffer;
- strlcpy(buffer, buf, sizeof(buffer));
+ strscpy(buffer, buf, sizeof(buffer));
channel = simple_strtoul(p, NULL, 0);
if (channel)
params.channel = channel;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index c62f299b9e0a..718efb1aa1b0 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -1167,7 +1167,7 @@ il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
cpu_to_le32(new_rate);
repeat_rate--;
idx++;
- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
- goto out;
}
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
repeat_rate--;
}
-out:
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 4a97310f8fee..28cf4e832152 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -1710,7 +1710,7 @@ struct il4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ DECLARE_FLEX_ARRAY(struct agg_tx_status, agg_status); /* for each agg frame */
} u;
} __packed;
@@ -3365,7 +3365,7 @@ struct il_rx_pkt {
struct il_compressed_ba_resp compressed_ba;
struct il_missed_beacon_notif missed_beacon;
__le32 status;
- u8 raw[0];
+ DECLARE_FLEX_ARRAY(u8, raw);
} u;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 04d27a26260b..341c17fe2af4 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -1870,15 +1870,15 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
goto done;
D_ASSOC("spatial multiplexing power save mode: %s\n",
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
"disabled");
sta_flags = il->stations[idx].sta.station_flags;
sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
sta_flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -1888,7 +1888,7 @@ il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
case IEEE80211_SMPS_OFF:
break;
default:
- IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IL_WARN("Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index a647a406b87b..b20409f8c13a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -140,6 +140,7 @@ config IWLMEI
depends on INTEL_MEI
depends on PM
depends on CFG80211
+ depends on BROKEN
help
Enables the iwlmei kernel module.
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 8ff967edc8f0..110fda65bd21 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,13 +56,16 @@
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
+#define IWL_BZ_A_FM4_A_FW_PRE "iwlwifi-bz-a0-fm4-a0-"
#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-"
+#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0-"
#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-"
#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-"
#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-"
#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-"
#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-"
#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-"
+#define IWL_BNJ_B_FM_B_FW_PRE "iwlwifi-BzBnj-b0-fm-b0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
@@ -119,8 +122,12 @@
IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_FM4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_FM4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_GL_B_FM_B_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \
IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \
@@ -131,6 +138,8 @@
IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_BNJ_B_FM_B_MODULE_FIRMWARE(api) \
+ IWL_BNJ_B_FM_B_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -240,7 +249,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
}, \
}
-#define IWL_DEVICE_BZ_COMMON \
+#define IWL_DEVICE_BZ \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
@@ -276,16 +285,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.addr = LDBG_M2S_BUF_WRAP_CNT, \
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
}, \
- }
-
-#define IWL_DEVICE_BZ \
- IWL_DEVICE_BZ_COMMON, \
+ }, \
.trans.umac_prph_offset = 0x300000, \
.trans.device_family = IWL_DEVICE_FAMILY_BZ, \
.trans.base_params = &iwl_ax210_base_params, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
- .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
+ .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
@@ -926,6 +932,13 @@ const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0 = {
+ .fw_name_pre = IWL_BZ_A_FM4_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.fw_name_pre = IWL_GL_A_FM_A_FW_PRE,
.uhb_supported = true,
@@ -933,6 +946,13 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_gl_b0_fm_b0 = {
+ .fw_name_pre = IWL_GL_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = {
.fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE,
.uhb_supported = true,
@@ -974,6 +994,13 @@ const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = {
IWL_DEVICE_BZ,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+
+const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0 = {
+ .fw_name_pre = IWL_BNJ_B_FM_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -1007,3 +1034,6 @@ MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BNJ_B_FM_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 411a6f6638b4..fefaa414272b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -112,7 +112,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type);
int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len);
+ const struct iwl_calib_cmd *cmd, size_t len);
void iwl_calib_free_results(struct iwl_priv *priv);
int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index a11884fa254b..f488620d2844 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -19,8 +19,7 @@
struct iwl_calib_result {
struct list_head list;
size_t cmd_len;
- struct iwl_calib_hdr hdr;
- /* data follows */
+ struct iwl_calib_cmd cmd;
};
struct statistics_general_data {
@@ -43,12 +42,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
int ret;
hcmd.len[0] = res->cmd_len;
- hcmd.data[0] = &res->hdr;
+ hcmd.data[0] = &res->cmd;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
ret = iwl_dvm_send_cmd(priv, &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d on calib cmd %d\n",
- ret, res->hdr.op_code);
+ ret, res->cmd.hdr.op_code);
return ret;
}
}
@@ -57,19 +56,22 @@ int iwl_send_calib_results(struct iwl_priv *priv)
}
int iwl_calib_set(struct iwl_priv *priv,
- const struct iwl_calib_hdr *cmd, int len)
+ const struct iwl_calib_cmd *cmd, size_t len)
{
struct iwl_calib_result *res, *tmp;
- res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
- GFP_ATOMIC);
+ if (check_sub_overflow(len, sizeof(*cmd), &len))
+ return -ENOMEM;
+
+ res = kmalloc(struct_size(res, cmd.data, len), GFP_ATOMIC);
if (!res)
return -ENOMEM;
- memcpy(&res->hdr, cmd, len);
- res->cmd_len = len;
+ res->cmd = *cmd;
+ memcpy(res->cmd.data, cmd->data, len);
+ res->cmd_len = struct_size(cmd, data, len);
list_for_each_entry(tmp, &priv->calib_results, list) {
- if (tmp->hdr.op_code == res->hdr.op_code) {
+ if (tmp->cmd.hdr.op_code == res->cmd.hdr.op_code) {
list_replace(&tmp->list, &res->list);
kfree(tmp);
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index bbd574091201..1a9eadace188 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -696,6 +696,7 @@ struct iwl_priv {
/* Scan related variables */
unsigned long scan_start;
unsigned long scan_start_tsf;
+ size_t scan_cmd_size;
void *scan_cmd;
enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index baffa1cbe8fc..687c906a9d72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2019 - 2020 Intel Corporation
+ * Copyright (C) 2019 - 2020, 2022 Intel Corporation
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -1242,7 +1242,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
@@ -1297,7 +1297,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
if (!conf_is_ht(conf) || !sta->deflink.ht_cap.ht_supported)
return -1;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 2d38227dfdd2..a7e85c5c8c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -626,7 +626,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
int ret;
- int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+ size_t scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
priv->fw->ucode_capa.max_probe_length;
const u8 *ssid = NULL;
@@ -649,9 +649,15 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
"fail to allocate memory for scan\n");
return -ENOMEM;
}
+ priv->scan_cmd_size = scan_cmd_size;
+ }
+ if (priv->scan_cmd_size < scan_cmd_size) {
+ IWL_DEBUG_SCAN(priv,
+ "memory needed for scan grew unexpectedly\n");
+ return -ENOMEM;
}
scan = priv->scan_cmd;
- memset(scan, 0, scan_cmd_size);
+ memset(scan, 0, priv->scan_cmd_size);
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 476068c0abb7..cef43cf80620 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2022 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -161,12 +161,12 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
sta->addr,
- (sta->smps_mode == IEEE80211_SMPS_STATIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ?
"static" :
- (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
+ (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ?
"dynamic" : "disabled");
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_STATIC:
*flags |= STA_FLG_MIMO_DIS_MSK;
break;
@@ -176,7 +176,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
case IEEE80211_SMPS_OFF:
break;
default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
+ IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->deflink.smps_mode);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index 4b27a53d0bb4..bb13ca5d666c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -356,18 +356,18 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt, void *data)
{
struct iwl_priv *priv = data;
- struct iwl_calib_hdr *hdr;
+ struct iwl_calib_cmd *cmd;
if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
return true;
}
- hdr = (struct iwl_calib_hdr *)pkt->data;
+ cmd = (struct iwl_calib_cmd *)pkt->data;
- if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
+ if (iwl_calib_set(priv, cmd, iwl_rx_packet_payload_len(pkt)))
IWL_ERR(priv, "Failed to record calibration data %d\n",
- hdr->op_code);
+ cmd->hdr.op_code);
return false;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index c78d2f1c722c..0b052c2e563a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_commands_h__
#define __iwl_fw_api_commands_h__
@@ -20,6 +20,8 @@
* &enum iwl_phy_ops_subcmd_ids
* @DATA_PATH_GROUP: data path group, uses command IDs from
* &enum iwl_data_path_subcmd_ids
+ * @SCAN_GROUP: scan group, uses command IDs from
+ * &enum iwl_scan_subcmd_ids
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
* @LOCATION_GROUP: location group, uses command IDs from
* &enum iwl_location_subcmd_ids
@@ -36,6 +38,7 @@ enum iwl_mvm_command_groups {
MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
+ SCAN_GROUP = 0x6,
NAN_GROUP = 0x7,
LOCATION_GROUP = 0x8,
PROT_OFFLOAD_GROUP = 0xb,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 4cd9ab23954e..df0833890e55 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -766,6 +766,65 @@ struct iwl_wowlan_status_v12 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */
+/**
+ * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @reserved2: reserved
+ */
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 reserved2[2];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
+ * @wake_packet_length: wakeup packet length
+ * @station_id: station id
+ * @reserved: unused
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_wake_pkt_notif {
+ __le32 wake_packet_length;
+ u8 station_id;
+ u8 reserved[3];
+ u8 wake_packet[1];
+} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_d3_end_notif - d3 end notification
+ * @flags: See &enum iwl_d0i3_flags
+ */
+struct iwl_mvm_d3_end_notif {
+ __le32 flags;
+} __packed;
+
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 5204aa94e72a..a0123f81f5d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -13,6 +13,21 @@
*/
enum iwl_prot_offload_subcmd_ids {
/**
+ * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif
+ */
+ WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC,
+
+ /**
+ * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif
+ */
+ WOWLAN_INFO_NOTIFICATION = 0xFD,
+
+ /**
+ * @D3_END_NOTIFICATION: End D3 state notification
+ */
+ D3_END_NOTIFICATION = 0xFE,
+
+ /**
* @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
*/
STORED_BEACON_NTF = 0xFF,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 1989b270862b..74a01888715b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -668,7 +668,7 @@ struct iwl_rx_no_data {
__le32 phy_info[2];
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
- TX_NO_DATA_NTFY_API_S_VER_2 */
+ RX_NO_DATA_NTFY_API_S_VER_2 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5543d9cb74c8..7ba0e3409199 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,6 +9,16 @@
/* Scan Commands, Responses, Notifications */
+/**
+ * enum iwl_scan_subcmd_ids - scan commands
+ */
+enum iwl_scan_subcmd_ids {
+ /**
+ * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
+ */
+ OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
+};
+
/* Max number of IEs for direct SSID scans in a command */
#define PROBE_OPTION_MAX 20
@@ -1188,7 +1198,7 @@ struct iwl_scan_offload_profile_match {
} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
/**
- * struct iwl_scan_offload_profiles_query - match results query response
+ * struct iwl_scan_offload_match_info - match results information
* @matched_profiles: bitmap of matched profiles, referencing the
* matches passed in the scan offload request
* @last_scan_age: age of the last offloaded scan
@@ -1200,7 +1210,7 @@ struct iwl_scan_offload_profile_match {
* @reserved: reserved
* @matches: array of match information, one for each match
*/
-struct iwl_scan_offload_profiles_query {
+struct iwl_scan_offload_match_info {
__le32 matched_profiles;
__le32 last_scan_age;
__le32 n_scans_done;
@@ -1210,7 +1220,9 @@ struct iwl_scan_offload_profiles_query {
u8 self_recovery;
__le16 reserved;
struct iwl_scan_offload_profile_match matches[];
-} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and
+ * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1
+ */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5b556a103e8..cfa5e1b3c3f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -649,13 +649,16 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_gl_b0_fm_b0;
extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0;
+extern const struct iwl_cfg iwl_cfg_bnj_b0_fm_b0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index aeb0015b73d2..919b1f478b4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1427,7 +1427,7 @@ struct iwl_wowlan_status_data {
u8 flags;
} igtk;
- u8 wake_packet[];
+ u8 *wake_packet;
};
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
@@ -1480,7 +1480,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
- if (status->wake_packet_bufsize) {
+ if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
const u8 *pktdata = status->wake_packet;
@@ -1944,57 +1944,6 @@ out:
return true;
}
-/* Occasionally, templates would be nice. This is one of those times ... */
-#define iwl_mvm_parse_wowlan_status_common(_ver) \
-static struct iwl_wowlan_status_data * \
-iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
- struct iwl_wowlan_status_ ##_ver *data,\
- int len) \
-{ \
- struct iwl_wowlan_status_data *status; \
- int data_size, i; \
- \
- if (len < sizeof(*data)) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return NULL; \
- } \
- \
- data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
- if (len != sizeof(*data) + data_size) { \
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
- return NULL; \
- } \
- \
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
- if (!status) \
- return NULL; \
- \
- /* copy all the common fields */ \
- status->replay_ctr = le64_to_cpu(data->replay_ctr); \
- status->pattern_number = le16_to_cpu(data->pattern_number); \
- status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
- for (i = 0; i < 8; i++) \
- status->qos_seq_ctr[i] = \
- le16_to_cpu(data->qos_seq_ctr[i]); \
- status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
- status->num_of_gtk_rekeys = \
- le32_to_cpu(data->num_of_gtk_rekeys); \
- status->received_beacons = le32_to_cpu(data->received_beacons); \
- status->wake_packet_length = \
- le32_to_cpu(data->wake_packet_length); \
- status->wake_packet_bufsize = \
- le32_to_cpu(data->wake_packet_bufsize); \
- memcpy(status->wake_packet, data->wake_packet, \
- status->wake_packet_bufsize); \
- \
- return status; \
-}
-
-iwl_mvm_parse_wowlan_status_common(v6)
-iwl_mvm_parse_wowlan_status_common(v7)
-iwl_mvm_parse_wowlan_status_common(v9)
-iwl_mvm_parse_wowlan_status_common(v12)
-
static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_gtk_status_v2 *data)
{
@@ -2054,6 +2003,96 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
((u64)ipn[0] << 40);
}
+static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, &data->gtk[0]);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
+
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+}
+
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status_data * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ struct iwl_wowlan_status_ ##_ver *data,\
+ int len) \
+{ \
+ struct iwl_wowlan_status_data *status; \
+ int data_size, i; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return NULL; \
+ } \
+ \
+ status = kzalloc(sizeof(*status), GFP_KERNEL); \
+ if (!status) \
+ return NULL; \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = le64_to_cpu(data->replay_ctr); \
+ status->pattern_number = le16_to_cpu(data->pattern_number); \
+ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
+ for (i = 0; i < 8; i++) \
+ status->qos_seq_ctr[i] = \
+ le16_to_cpu(data->qos_seq_ctr[i]); \
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
+ status->num_of_gtk_rekeys = \
+ le32_to_cpu(data->num_of_gtk_rekeys); \
+ status->received_beacons = le32_to_cpu(data->received_beacons); \
+ status->wake_packet_length = \
+ le32_to_cpu(data->wake_packet_length); \
+ status->wake_packet_bufsize = \
+ le32_to_cpu(data->wake_packet_bufsize); \
+ if (status->wake_packet_bufsize) { \
+ status->wake_packet = \
+ kmemdup(data->wake_packet, \
+ status->wake_packet_bufsize, \
+ GFP_KERNEL); \
+ if (!status->wake_packet) { \
+ kfree(status); \
+ return NULL; \
+ } \
+ } else { \
+ status->wake_packet = NULL; \
+ } \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+iwl_mvm_parse_wowlan_status_common(v9)
+iwl_mvm_parse_wowlan_status_common(v12)
+
static struct iwl_wowlan_status_data *
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
@@ -2173,36 +2212,15 @@ out_free_resp:
return status;
}
-static struct iwl_wowlan_status_data *
-iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
-{
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD,
- IWL_FW_CMD_VER_UNKNOWN);
- __le32 station_id = cpu_to_le32(sta_id);
- u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
-
- if (!mvm->net_detect) {
- /* only for tracing for now */
- int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
- cmd_size, &station_id);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
- }
-
- return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
-}
-
/* releases the MVM mutex */
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status_data *status)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_wowlan_status_data *status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
if (!status)
goto out_unlock;
@@ -2212,7 +2230,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
/* still at hard-coded place 0 for D3 image */
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
if (!mvm_ap_sta)
- goto out_free;
+ goto out_unlock;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status->qos_seq_ctr[i];
@@ -2235,11 +2253,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
- kfree(status);
return keep;
-out_free:
- kfree(status);
out_unlock:
mutex_unlock(&mvm->mutex);
return false;
@@ -2248,16 +2263,16 @@ out_unlock:
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
IWL_SCAN_MAX_PROFILES)
-struct iwl_mvm_nd_query_results {
+struct iwl_mvm_nd_results {
u32 matched_profiles;
u8 matches[ND_QUERY_BUF_LEN];
};
static int
iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *results)
+ struct iwl_mvm_nd_results *results)
{
- struct iwl_scan_offload_profiles_query *query;
+ struct iwl_scan_offload_match_info *query;
struct iwl_host_cmd cmd = {
.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
.flags = CMD_WANT_SKB,
@@ -2274,7 +2289,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
- query_len = sizeof(struct iwl_scan_offload_profiles_query);
+ query_len = sizeof(struct iwl_scan_offload_match_info);
matches_len = sizeof(struct iwl_scan_offload_profile_match) *
max_profiles;
} else {
@@ -2305,7 +2320,7 @@ out_free_resp:
}
static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
int idx)
{
int n_chans = 0, i;
@@ -2313,13 +2328,13 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
n_chans += hweight8(matches[idx].matching_channels[i]);
@@ -2329,7 +2344,7 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
}
static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
- struct iwl_mvm_nd_query_results *query,
+ struct iwl_mvm_nd_results *results,
struct cfg80211_wowlan_nd_match *match,
int idx)
{
@@ -2338,7 +2353,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
struct iwl_scan_offload_profile_match *matches =
- (struct iwl_scan_offload_profile_match *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2346,7 +2361,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
mvm->nd_channels[i]->center_freq;
} else {
struct iwl_scan_offload_profile_match_v1 *matches =
- (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+ (void *)results->matches;
for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
@@ -2355,25 +2370,50 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
}
}
+/**
+ * enum iwl_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received
+ */
+enum iwl_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+/* manage d3 resume data */
+struct iwl_d3_data {
+ struct iwl_wowlan_status_data *status;
+ bool test;
+ u32 d3_end_flags;
+ u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
+ u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
+ struct iwl_mvm_nd_results *nd_results;
+ bool nd_results_valid;
+};
+
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
{
struct cfg80211_wowlan_nd_info *net_detect = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- struct iwl_wowlan_status_data *status;
- struct iwl_mvm_nd_query_results query;
unsigned long matched_profiles;
u32 reasons = 0;
int i, n_matches, ret;
- status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
- if (status) {
- reasons = status->wakeup_reasons;
- kfree(status);
- }
+ if (WARN_ON(!d3_data || !d3_data->status))
+ goto out;
+
+ reasons = d3_data->status->wakeup_reasons;
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
wakeup.rfkill_release = true;
@@ -2381,13 +2421,22 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
goto out;
- ret = iwl_mvm_netdetect_query_results(mvm, &query);
- if (ret || !query.matched_profiles) {
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0)) {
+ IWL_INFO(mvm, "Query FW for ND results\n");
+ ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results);
+
+ } else {
+ IWL_INFO(mvm, "Notification based ND results\n");
+ ret = d3_data->nd_results_valid ? 0 : -1;
+ }
+
+ if (ret || !d3_data->nd_results->matched_profiles) {
wakeup_report = NULL;
goto out;
}
- matched_profiles = query.matched_profiles;
+ matched_profiles = d3_data->nd_results->matched_profiles;
if (mvm->n_nd_match_sets) {
n_matches = hweight_long(matched_profiles);
} else {
@@ -2404,7 +2453,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct cfg80211_wowlan_nd_match *match;
int idx, n_channels = 0;
- n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
+ n_channels = iwl_mvm_query_num_match_chans(mvm,
+ d3_data->nd_results,
+ i);
match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
@@ -2424,7 +2475,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (mvm->n_nd_channels < n_channels)
continue;
- iwl_mvm_query_set_freqs(mvm, &query, match, i);
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
}
out_report_nd:
@@ -2504,16 +2555,317 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
return false;
}
+/*
+ * This function assumes:
+ * 1. The mutex is already held.
+ * 2. The callee functions unlock the mutex.
+ */
+static bool
+iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_d3_data *d3_data)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ /* if FW uses status notification, status shouldn't be NULL here */
+ if (!d3_data->status) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : mvmvif->ap_sta_id;
+
+ d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
+ }
+
+ if (mvm->net_detect) {
+ iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
+ } else {
+ bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (keep)
+ mvm->keep_vif = vif;
+#endif
+
+ return keep;
+ }
+ return false;
+}
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm,
+ struct iwl_wowlan_wake_pkt_notif *notif,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length);
+
+ if (len < sizeof(*notif)) {
+ IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!status)) {
+ IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n");
+ return -EIO;
+ }
+
+ if (WARN_ON(!(status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) {
+ IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n",
+ status->wakeup_reasons);
+ return -EIO;
+ }
+
+ data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet);
+
+ /* data_size got the padding from the notification, remove it. */
+ if (packet_len < data_size)
+ data_size = packet_len;
+
+ status->wake_packet = kmemdup(notif->wake_packet, data_size,
+ GFP_ATOMIC);
+
+ if (!status->wake_packet)
+ return -ENOMEM;
+
+ status->wake_packet_length = packet_len;
+ status->wake_packet_bufsize = data_size;
+
+ return 0;
+}
+
+static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data,
+ struct iwl_scan_offload_match_info *notif,
+ u32 len)
+{
+ struct iwl_wowlan_status_data *status = d3_data->status;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_nd_results *results = d3_data->nd_results;
+ size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ iwl_umac_scan_get_max_profiles(mvm->fw);
+
+ if (IS_ERR_OR_NULL(vif))
+ return;
+
+ if (len < sizeof(struct iwl_scan_offload_match_info)) {
+ IWL_ERR(mvm, "Invalid scan match info notification\n");
+ return;
+ }
+
+ if (!mvm->net_detect) {
+ IWL_ERR(mvm, "Unexpected scan match info notification\n");
+ return;
+ }
+
+ if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ IWL_ERR(mvm,
+ "Ignore scan match info notification: no reason\n");
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done);
+#endif
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_INFO(mvm, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles) {
+ memcpy(results->matches, notif->matches, matches_len);
+ d3_data->nd_results_valid = TRUE;
+ }
+
+ /* no scan should be active at this point */
+ mvm->scan_status = 0;
+ for (i = 0; i < mvm->max_scans; i++)
+ mvm->scan_uid_status[i] = 0;
+}
+
+static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_d3_data *d3_data = data;
+ u32 len;
+ int ret;
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+ struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ /* We might get two notifications due to dual bss */
+ IWL_DEBUG_WOWLAN(mvm,
+ "Got additional wowlan info notification\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+ len);
+ if (d3_data->status &&
+ d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ /* We are supposed to get also wake packet notif */
+ d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_ERR(mvm,
+ "Got additional wowlan wake packet notification\n");
+ } else {
+ d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ len = iwl_rx_packet_payload_len(pkt);
+ ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif,
+ d3_data->status,
+ len);
+ if (ret)
+ IWL_ERR(mvm,
+ "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n");
+ }
+
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+
+ if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mvm,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ d3_data->notif_received |= IWL_D3_ND_MATCH_INFO;
+
+ /* explicitly set this in the 'expected' as well */
+ d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO;
+
+ len = iwl_rx_packet_payload_len(pkt);
+ iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len);
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data;
+
+ d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
+ d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return d3_data->notif_received == d3_data->notif_expected;
+}
+
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+{
+ int ret;
+ enum iwl_d3_status d3_status;
+ struct iwl_host_cmd cmd = {
+ .id = D0I3_END_CMD,
+ .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
+ };
+ bool reset = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ if (ret)
+ return ret;
+
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mvm, "Device was reset during suspend\n");
+ return -ENOENT;
+ }
+
+ /*
+ * We should trigger resume flow using command only for 22000 family
+ * AX210 and above don't need the command since they have
+ * the doorbell interrupt.
+ */
+ if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 &&
+ fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) {
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+
+static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
+ struct iwl_d3_data *d3_data)
+{
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ int ret;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+
+ ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif,
+ IWL_MVM_D3_NOTIF_TIMEOUT);
+}
+
+static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
+{
+ return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_WAKE_PKT_NOTIFICATION, 0) &&
+ iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0);
+}
+
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
- enum iwl_d3_status d3_status;
- bool keep = false;
+ struct iwl_mvm_nd_results results = {};
+ struct iwl_d3_data d3_data = {
+ .test = test,
+ .notif_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .nd_results_valid = false,
+ .nd_results = &results,
+ };
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+ bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ bool keep = false;
mutex_lock(&mvm->mutex);
@@ -2537,54 +2889,30 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
- if (ret)
- goto err;
-
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- goto err;
- }
-
- if (d0i3_first) {
- struct iwl_host_cmd cmd = {
- .id = D0I3_END_CMD,
- .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
- };
- int len;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret < 0) {
- IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
- ret);
+ if (resume_notif_based) {
+ d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL);
+ if (!d3_data.status) {
+ IWL_ERR(mvm, "Failed to allocate wowlan status\n");
+ ret = -ENOMEM;
goto err;
}
- switch (mvm->cmd_ver.d0i3_resp) {
- case 0:
- break;
- case 1:
- len = iwl_rx_packet_payload_len(cmd.resp_pkt);
- if (len != sizeof(u32)) {
- IWL_ERR(mvm,
- "Error with D0I3_END_CMD response size (%d)\n",
- len);
- goto err;
- }
- if (IWL_D0I3_RESET_REQUIRE &
- le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
- iwl_write32(mvm->trans, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- iwl_free_resp(&cmd);
- }
- break;
- default:
- WARN_ON(1);
- }
+
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ if (ret)
+ goto err;
+ } else {
+ ret = iwl_mvm_resume_firmware(mvm, test);
+ if (ret < 0)
+ goto err;
}
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ /* when reset is required we can't send these following commands */
+ if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
+ goto query_wakeup_reasons;
+
/*
* Query the current location and source from the D3 firmware so we
* can play it back when we re-intiailize the D0 firmware
@@ -2598,41 +2926,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* Re-configure default SAR profile */
iwl_mvm_sar_select_profile(mvm, 1, 1);
- if (mvm->net_detect) {
+ if (mvm->net_detect && unified_image) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
* fails, continue and try to get the wake-up reasons,
* but trigger a HW restart by keeping a failure code
* in ret.
*/
- if (unified_image)
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
- false);
-
- iwl_mvm_query_netdetect_reasons(mvm, vif);
- /* has unlocked the mutex, so skip that */
- goto out;
- } else {
- keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
- /* has unlocked the mutex, so skip that */
- goto out_iterate;
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+ false);
}
+query_wakeup_reasons:
+ keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data);
+ /* has unlocked the mutex, so skip that */
+ goto out;
+
err:
- iwl_mvm_free_nd(mvm);
mutex_unlock(&mvm->mutex);
+out:
+ if (d3_data.status)
+ kfree(d3_data.status->wake_packet);
+ kfree(d3_data.status);
+ iwl_mvm_free_nd(mvm);
-out_iterate:
- if (!test)
+ if (!d3_data.test && !mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d3_disconnect_iter,
+ keep ? vif : NULL);
-out:
clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
/* no need to reset the device in unified images, if successful */
@@ -2641,9 +2964,14 @@ out:
if (d0i3_first)
return 0;
- ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
- if (!ret)
+ if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ D3_END_NOTIFICATION, 0)) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+ if (!ret)
+ return 0;
+ } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) {
return 0;
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index c0bd697b080a..1e8123140973 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -430,14 +430,16 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta,
return -EBUSY;
if (amsdu_len) {
- mvmsta->orig_amsdu_len = sta->max_amsdu_len;
- sta->max_amsdu_len = amsdu_len;
- for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++)
- sta->max_tid_amsdu_len[i] = amsdu_len;
+ mvmsta->orig_amsdu_len = sta->cur->max_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ sta->deflink.agg.max_amsdu_len = amsdu_len;
+ for (i = 0; i < ARRAY_SIZE(sta->deflink.agg.max_tid_amsdu_len); i++)
+ sta->deflink.agg.max_tid_amsdu_len[i] = amsdu_len;
} else {
- sta->max_amsdu_len = mvmsta->orig_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = mvmsta->orig_amsdu_len;
mvmsta->orig_amsdu_len = 0;
}
+
return count;
}
@@ -451,7 +453,7 @@ static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file,
char buf[32];
int pos;
- pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len);
+ pos = scnprintf(buf, sizeof(buf), "current %d ", sta->cur->max_amsdu_len);
pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n",
mvmsta->orig_amsdu_len);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5eb28f8ee87e..8464c9b7baf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1833,8 +1833,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm,
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
- IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
- MAX_HE_SUPP_NSS);
+ IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
@@ -3193,7 +3193,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_SETUP);
}
- sta->max_rc_amsdu_len = 1;
+ sta->deflink.agg.max_rc_amsdu_len = 1;
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
/*
@@ -4949,6 +4949,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
{
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
@@ -5019,9 +5020,12 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
RATE_HT_MCS_INDEX(rate_n_flags) :
u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
- if (format == RATE_MCS_HE_MSK) {
- u32 gi_ltf = u32_get_bits(rate_n_flags,
- RATE_MCS_HE_GI_LTF_MSK);
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_HE_MSK:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -5060,19 +5064,14 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
- return;
- }
-
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
- if (format == RATE_MCS_HT_MSK) {
+ break;
+ case RATE_MCS_HT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_MCS;
-
- } else if (format == RATE_MCS_VHT_MSK) {
+ break;
+ case RATE_MCS_VHT_MSK:
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
}
-
}
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bf35e130c876..97cba526e465 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -860,6 +860,7 @@ struct iwl_mvm {
/* Scan status, cmd (pre-allocated) and auxiliary station */
unsigned int scan_status;
+ size_t scan_cmd_size;
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
/* For CDB this is low band scan type, for non-CDB - type. */
@@ -1079,7 +1080,6 @@ struct iwl_mvm {
struct list_head resp_pasn_list;
struct {
- u8 d0i3_resp;
u8 range_resp;
} cmd_ver;
@@ -1705,7 +1705,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_size(struct iwl_mvm *mvm);
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index db43c8a83a31..d2d42cd48af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -557,6 +557,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
@@ -574,6 +581,9 @@ static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
+ HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
+ HCMD_NAME(D3_END_NOTIFICATION),
HCMD_NAME(STORED_BEACON_NTF),
};
@@ -593,6 +603,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
[LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
@@ -1065,7 +1076,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
static const u8 no_reclaim_cmds[] = {
TX_CMD,
};
- int scan_size;
+ size_t scan_size;
u32 min_backoff;
struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
@@ -1188,13 +1199,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
- mvm->cmd_ver.d0i3_resp =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD,
- 0);
- /* we only support version 1 */
- if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
- goto out_free;
-
mvm->cmd_ver.range_resp =
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
@@ -1299,6 +1303,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
if (!mvm->scan_cmd)
goto out_free;
+ mvm->scan_cmd_size = scan_size;
/* invalidate ids to prevent accidental removal of sta_id 0 */
mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index d8c3d7ff4f44..2e9081cb6627 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -143,7 +143,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
};
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
max_nss = 1;
for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -205,7 +205,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
u8 nss = sta->deflink.rx_nss;
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
nss = 1;
for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
@@ -270,7 +270,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cpu_to_le16(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
0;
else
@@ -340,9 +340,9 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
- if (sta->max_amsdu_len < size) {
+ if (sta->deflink.agg.max_amsdu_len < size) {
/*
- * In debug sta->max_amsdu_len < size
+ * In debug sta->deflink.agg.max_amsdu_len < size
* so also check with orig_amsdu_len which holds the
* original data before debugfs changed the value
*/
@@ -352,18 +352,18 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
mvmsta->max_amsdu_len = size;
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled & BIT(i))
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
IWL_DEBUG_RATE(mvm,
@@ -450,7 +450,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->max_amsdu_len = max_amsdu_len;
+ sta->deflink.agg.max_amsdu_len = max_amsdu_len;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(DATA_PATH_GROUP,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index a79043f30775..0b50b816684a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -138,7 +138,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (!sta->deflink.ht_cap.ht_supported)
return false;
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+ if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC)
return false;
if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
@@ -1491,7 +1491,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
int i;
- sta->max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
+ sta->deflink.agg.max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
/*
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
@@ -1506,22 +1506,23 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (mvmsta->vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->deflink.agg.max_amsdu_len;
else
- mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500);
+ mvmsta->max_amsdu_len =
+ min_t(int, sta->deflink.agg.max_amsdu_len, 8500);
- sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
+ sta->deflink.agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
if (mvmsta->amsdu_enabled)
- sta->max_tid_amsdu_len[i] =
+ sta->deflink.agg.max_tid_amsdu_len[i] =
iwl_mvm_max_amsdu_size(mvm, sta, i);
else
/*
* Not so elegant, but this will effectively
* prevent AMSDU on this TID
*/
- sta->max_tid_amsdu_len[i] = 1;
+ sta->deflink.agg.max_tid_amsdu_len[i] = 1;
}
}
@@ -2933,7 +2934,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = mvmsta->sta_id;
mvmsta->amsdu_enabled = 0;
- mvmsta->max_amsdu_len = sta->max_amsdu_len;
+ mvmsta->max_amsdu_len = sta->cur->max_amsdu_len;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 2c43a9989783..1aadccd8841f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -1191,16 +1191,22 @@ struct iwl_mvm_rx_phy_data {
enum iwl_rx_phy_info_type info_type;
__le32 d0, d1, d2, d3;
__le16 d4;
+
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
};
static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he_mu *he_mu)
{
u32 phy_data2 = le32_to_cpu(phy_data->d2);
u32 phy_data3 = le32_to_cpu(phy_data->d3);
u16 phy_data4 = le16_to_cpu(phy_data->d4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
he_mu->flags1 |=
@@ -1246,7 +1252,6 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
static void
iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status)
@@ -1260,6 +1265,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
u8 offs = 0;
@@ -1331,7 +1337,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status,
- u32 rate_n_flags, int queue)
+ int queue)
{
switch (phy_data->info_type) {
case IWL_RX_PHY_INFO_TYPE_NONE:
@@ -1430,7 +1436,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
le16_encode_bits(le16_get_bits(phy_data->d4,
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
- iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu);
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu);
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_MU:
he_mu->flags2 |=
@@ -1444,8 +1450,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
fallthrough;
case IWL_RX_PHY_INFO_TYPE_HE_TB:
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
- iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags,
- he, he_mu, rx_status);
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
break;
case IWL_RX_PHY_INFO_TYPE_HE_SU:
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
@@ -1461,13 +1466,14 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_rx_phy_data *phy_data,
- u32 rate_n_flags, u16 phy_info, int queue)
+ int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
- u8 stbc, ltf;
+ u8 ltf;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
@@ -1484,6 +1490,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
+ u16 phy_info = phy_data->phy_info;
he = skb_put_data(skb, &known, sizeof(known));
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
@@ -1504,7 +1511,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
- rate_n_flags, queue);
+ queue);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
@@ -1531,19 +1538,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
- stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_HE;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
-
- rx_status->he_dcm =
- !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
-
#define CHECK_TYPE(F) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
@@ -1661,6 +1655,107 @@ static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
rx_sta_csa->all_sta_unblocked = false;
}
+/*
+ * Note: requires also rx_status->band to be prefilled, as well
+ * as phy_data (apart from phy_data->info_type)
+ */
+static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
+ struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ bool is_sgi;
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ phy_data->info_type =
+ le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_HE_MSK)
+ iwl_mvm_rx_he(mvm, skb, phy_data, queue);
+
+ iwl_mvm_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
+ phy_data->energy_a, phy_data->energy_b);
+
+ if (unlikely(mvm->monitor_on))
+ iwl_mvm_add_rtap_sniffer_config(mvm, skb);
+
+ is_sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
+ rx_status->encoding = RX_ENC_VHT;
+ break;
+ case RATE_MCS_HE_MSK:
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_VHT_MSK:
+ case RATE_MCS_HE_MSK:
+ rx_status->nss =
+ u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ rx_status->rate_idx = rate;
+
+ if (WARN_ONCE(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band))
+ rx_status->rate_idx = 0;
+ break;
+ }
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -1670,17 +1765,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_hdr *hdr;
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
- u32 rate_n_flags, gp2_on_air_rise;
- u16 phy_info;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 crypt_len = 0, channel, energy_a, energy_b;
+ u8 crypt_len = 0;
size_t desc_size;
- struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
- };
+ struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
- bool is_sgi;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1696,35 +1786,37 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
- channel = desc->v3.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
- energy_a = desc->v3.energy_a;
- energy_b = desc->v3.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data.channel = desc->v3.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data.energy_a = desc->v3.energy_a;
+ phy_data.energy_b = desc->v3.energy_b;
phy_data.d0 = desc->v3.phy_data0;
phy_data.d1 = desc->v3.phy_data1;
phy_data.d2 = desc->v3.phy_data2;
phy_data.d3 = desc->v3.phy_data3;
} else {
- rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
- channel = desc->v1.channel;
- gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
- energy_a = desc->v1.energy_a;
- energy_b = desc->v1.energy_b;
+ phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+ phy_data.channel = desc->v1.channel;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+ phy_data.energy_a = desc->v1.energy_a;
+ phy_data.energy_b = desc->v1.energy_b;
phy_data.d0 = desc->v1.phy_data0;
phy_data.d1 = desc->v1.phy_data1;
phy_data.d2 = desc->v1.phy_data2;
phy_data.d3 = desc->v1.phy_data3;
}
+
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
REPLY_RX_MPDU_CMD, 0) < 4) {
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -1733,14 +1825,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
}
- phy_info = le16_to_cpu(desc->phy_info);
+ phy_data.phy_info = le16_to_cpu(desc->phy_info);
phy_data.d4 = desc->phy_data4;
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
- phy_data.info_type =
- le32_get_bits(phy_data.d1,
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
-
hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
@@ -1763,27 +1850,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
/*
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
@@ -1794,12 +1860,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
le32_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
+
/* set the preamble flag if appropriate */
if (format == RATE_MCS_CCK_MSK &&
- phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
- if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
u64 tsf_on_air_rise;
if (mvm->trans->trans_cfg->device_family >=
@@ -1813,24 +1880,20 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
}
- rx_status->device_timestamp = gp2_on_air_rise;
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
} else {
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
}
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
/* update aggregation data for monitor sake on default queue */
- if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
- bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ bool toggle_bit;
+ toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
/*
* Toggle is switched whenever new aggregation starts. Make
@@ -1846,9 +1909,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->ampdu_reference = mvm->ampdu_ref;
}
- if (unlikely(mvm->monitor_on))
- iwl_mvm_add_rtap_sniffer_config(mvm, skb);
-
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
@@ -1867,13 +1927,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
}
- if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
+ if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
kfree_skb(skb);
goto out;
}
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
+
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
@@ -1971,43 +2033,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >>
- RATE_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(format == RATE_MCS_HE_MSK)) {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
- }
-
/* management stuff on default queue */
if (!queue) {
if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
@@ -2039,32 +2064,32 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data *desc = (void *)pkt->data;
- u32 rate_n_flags = le32_to_cpu(desc->rate);
- u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
u32 rssi = le32_to_cpu(desc->rssi);
u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
- u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
- u8 channel, energy_a, energy_b;
- u32 format;
struct iwl_mvm_rx_phy_data phy_data = {
- .info_type = le32_get_bits(desc->phy_info[1],
- IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
.d1 = desc->phy_info[1],
+ .phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD,
+ .gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time),
+ .rate_n_flags = le32_to_cpu(desc->rate),
+ .energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK),
+ .energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK),
+ .channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK),
};
- bool is_sgi;
+ u32 format;
if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
RX_NO_DATA_NOTIF, 0) < 2) {
IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
- rate_n_flags);
- rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ phy_data.rate_n_flags);
+ phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags);
IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
- rate_n_flags);
+ phy_data.rate_n_flags);
}
- format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
return;
@@ -2072,10 +2097,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
- energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
- channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
-
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -2106,86 +2127,31 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
- /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- rx_status->bw = RATE_INFO_BW_40;
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- rx_status->bw = RATE_INFO_BW_80;
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- rx_status->bw = RATE_INFO_BW_160;
- break;
- }
-
- if (format == RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
- phy_info, queue);
-
- iwl_mvm_decode_lsig(skb, &phy_data);
-
- rx_status->device_timestamp = gp2_on_air_rise;
- rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
- rx_status->freq = ieee80211_channel_to_frequency(channel,
- rx_status->band);
- iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
- energy_b);
- rcu_read_lock();
+ iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
- is_sgi = format == RATE_MCS_HE_MSK ?
- iwl_he_is_sgi(rate_n_flags) :
- rate_n_flags & RATE_MCS_SGI_MSK;
-
- if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
- rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (format == RATE_MCS_HT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (format == RATE_MCS_VHT_MSK) {
- u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
- rx_status->encoding = RX_ENC_VHT;
- rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- if (rate_n_flags & RATE_MCS_BF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_BF;
- /*
- * take the nss from the rx_vec since the rate_n_flags has
- * only 2 bits for the nss which gives a max of 4 ss but
- * there may be up to 8 spatial streams
- */
+ /*
+ * Override the nss from the rx_vec since the rate_n_flags has
+ * only 2 bits for the nss which gives a max of 4 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
- } else if (format == RATE_MCS_HE_MSK) {
+ break;
+ case RATE_MCS_HE_MSK:
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
- } else {
- int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
- rx_status->band);
-
- if (WARN(rate < 0 || rate > 0xFF,
- "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band)) {
- kfree_skb(skb);
- goto out;
- }
- rx_status->rate_idx = rate;
+ break;
}
+ rcu_read_lock();
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
-out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 582a95ffc7ab..acd8803dbcdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2626,7 +2626,7 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
u8 scan_ver;
lockdep_assert_held(&mvm->mutex);
- memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd));
+ memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -3091,7 +3091,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1 * HZ);
}
-static int iwl_scan_req_umac_get_size(u8 scan_ver)
+static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
{
switch (scan_ver) {
case 12:
@@ -3104,7 +3104,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver)
return 0;
}
-int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ff0d3b3df140..cc92706b3d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -116,7 +116,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->smps_mode) {
+ switch (sta->deflink.smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index f9e08b339e0c..86d20e13bf47 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -926,7 +926,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
* Take the min of ieee80211 station and mvm station
*/
max_amsdu_len =
- min_t(unsigned int, sta->max_amsdu_len,
+ min_t(unsigned int, sta->cur->max_amsdu_len,
iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b16d4ae182d1..4f699862e7f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1155,10 +1155,20 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY,
iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_bz_a0_fm4_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET,
+ iwl_cfg_gl_b0_fm_b0, iwl_bz_name),
/* BZ Z step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1169,11 +1179,16 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
/* BNJ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_A_STEP,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
iwl_cfg_bnj_a0_fm_a0, iwl_bz_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, SILICON_B_STEP,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET,
+ iwl_cfg_bnj_b0_fm_b0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 68a4572cee53..9c9f87fe8377 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1110,7 +1110,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
poll = iwl_pcie_napi_poll_msix;
netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
- poll, NAPI_POLL_WEIGHT);
+ poll);
napi_enable(&rxq->napi);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 0a376f112db9..4e0a0c881697 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3848,7 +3848,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strlcpy(info->driver, "hostap", sizeof(info->driver));
+ strscpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index b925e327e091..e127453ab51a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -635,7 +635,7 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx,
/*
* hw/fw has not accumulated enough sample sets.
* Wait for 100ms, this ought to be enough to
- * to get at least one non-null set of channel
+ * get at least one non-null set of channel
* usage statistics.
*/
msleep(100);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e55f153ff26..df51b5b1f171 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -229,6 +229,7 @@ static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
struct hwsim_sta_priv {
u32 magic;
unsigned int last_link;
+ u16 active_links_rx;
};
#define HWSIM_STA_MAGIC 0x6d537749
@@ -652,7 +653,6 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
- struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -1299,6 +1299,8 @@ static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw,
struct sk_buff *skb;
void *msg_head;
+ WARN_ON(!is_valid_ether_addr(addr));
+
if (!_portid && !hwsim_virtio_enabled)
return;
@@ -1561,6 +1563,42 @@ static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb)
#endif
}
+static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ (ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control))) {
+ struct ieee80211_sta *sta;
+ unsigned int link_id;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_link_addrs(data->hw, hdr->addr2,
+ hdr->addr1, &link_id);
+ if (sta) {
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ if (ieee80211_has_pm(hdr->frame_control))
+ sp->active_links_rx &= ~BIT(link_id);
+ else
+ sp->active_links_rx |= BIT(link_id);
+ }
+ rcu_read_unlock();
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
+
+ mac80211_hwsim_add_vendor_rtap(skb);
+
+ data->rx_pkts++;
+ data->rx_bytes += skb->len;
+ ieee80211_rx_irqsafe(data->hw, skb);
+}
+
static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct ieee80211_channel *chan)
@@ -1688,13 +1726,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
rx_status.mactime = now + data2->tsf_offset;
- memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
-
- mac80211_hwsim_add_vendor_rtap(nskb);
-
- data2->rx_pkts++;
- data2->rx_bytes += nskb->len;
- ieee80211_rx_irqsafe(data2->hw, nskb);
+ mac80211_hwsim_rx(data2, &rx_status, nskb);
}
spin_unlock(&hwsim_radio_lock);
@@ -1714,12 +1746,7 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
if (!vif->valid_links)
return &vif->bss_conf;
- /* FIXME: handle multicast TX properly */
- if (is_multicast_ether_addr(hdr->addr1) || WARN_ON_ONCE(!sta)) {
- unsigned int first_link = ffs(vif->valid_links) - 1;
-
- return rcu_dereference(vif->link_conf[first_link]);
- }
+ WARN_ON(is_multicast_ether_addr(hdr->addr1));
if (WARN_ON_ONCE(!sta->valid_links))
return &vif->bss_conf;
@@ -1731,6 +1758,12 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
/* round-robin the available link IDs */
link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf);
+ if (!(vif->active_links & BIT(link_id)))
+ continue;
+
+ if (!(sp->active_links_rx & BIT(link_id)))
+ continue;
+
*link_sta = rcu_dereference(sta->link[link_id]);
if (!*link_sta)
continue;
@@ -1739,6 +1772,10 @@ mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data,
if (WARN_ON_ONCE(!bss_conf))
continue;
+ /* can happen while switching links */
+ if (!rcu_access_pointer(bss_conf->chanctx_conf))
+ continue;
+
sp->last_link = link_id;
return bss_conf;
}
@@ -2401,10 +2438,19 @@ static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
hwsim_check_magic(vif);
hwsim_set_sta_magic(sta);
mac80211_hwsim_sta_rc_update(hw, vif, sta, 0);
+ if (sta->valid_links) {
+ WARN(hweight16(sta->valid_links) > 1,
+ "expect to add STA with single link, have 0x%x\n",
+ sta->valid_links);
+ sp->active_links_rx = sta->valid_links;
+ }
+
return 0;
}
@@ -2430,6 +2476,14 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_NOTEXIST)
return mac80211_hwsim_sta_add(hw, vif, sta);
+ /*
+ * when client is authorized (AP station marked as such),
+ * enable all links
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
+ ieee80211_set_active_links_async(vif, vif->valid_links);
+
return 0;
}
@@ -2866,11 +2920,6 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2882,11 +2931,6 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = NULL;
- mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@@ -2899,11 +2943,6 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct mac80211_hwsim_data *hwsim = hw->priv;
-
- mutex_lock(&hwsim->mutex);
- hwsim->chanctx = ctx;
- mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@@ -2919,6 +2958,18 @@ static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+ /* if we activate a link while already associated wake it up */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
+
return 0;
}
@@ -2929,6 +2980,22 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
hwsim_check_magic(vif);
hwsim_check_chanctx_magic(ctx);
+
+ /* if we deactivate a link while associated suspend it first */
+ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true);
+ if (skb) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+ local_bh_disable();
+ mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan);
+ local_bh_enable();
+ }
+ }
}
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -2995,18 +3062,22 @@ static int mac80211_hwsim_change_vif_links(struct ieee80211_hw *hw,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
- unsigned long rem = old_links & ~new_links ?: BIT(0);
+ unsigned long rem = old_links & ~new_links;
unsigned long add = new_links & ~old_links;
int i;
+ if (!old_links)
+ rem |= BIT(0);
+ if (!new_links)
+ add |= BIT(0);
+
for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS)
mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false);
for_each_set_bit(i, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf;
- /* FIXME: figure out how to get the locking here */
- link_conf = rcu_dereference_protected(vif->link_conf[i], 1);
+ link_conf = link_conf_dereference_protected(vif, i);
if (WARN_ON(!link_conf))
continue;
@@ -3021,6 +3092,13 @@ static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
+ struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
+
+ hwsim_check_sta_magic(sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ sp->active_links_rx = new_links;
+
return 0;
}
@@ -3208,8 +3286,112 @@ out_err:
static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xffff),
+ .tx_mcs_160 = cpu_to_le16(0xffff),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * Since B0, B1, B2 and B3 are not set in
+ * the supported channel width set field in the
+ * HE PHY capabilities information field the
+ * device is a 20MHz only device on 2.4GHz band.
+ */
+ .only_20mhz = {
+ .rx_tx_mcs7_max_nss = 0x88,
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -3356,9 +3538,132 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field include all
+ * the following MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -3529,9 +3834,153 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P?*/
- .types_mask = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ /* TODO: should we support other types, e.g., P2P? */
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_6ghz_capa = {
+ .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN |
+ IEEE80211_HE_6GHZ_CAP_SM_PS |
+ IEEE80211_HE_6GHZ_CAP_RD_RESPONDER |
+ IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS),
+ },
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ .eht_cap = {
+ .has_eht = true,
+ .eht_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1,
+ .phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE |
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK,
+ .phy_cap_info[1] =
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK,
+ .phy_cap_info[2] =
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK |
+ IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK,
+ .phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK,
+ .phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
+ IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP |
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI |
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK,
+ .phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT |
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK |
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK,
+ .phy_cap_info[6] =
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK |
+ IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
+ IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
+ .phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ,
+ },
+
+ /* For all MCS and bandwidth, set 8 NSS for both Tx and
+ * Rx
+ */
+ .eht_mcs_nss_supp = {
+ /*
+ * As B1 and B2 are set in the supported
+ * channel width set field in the HE PHY
+ * capabilities information field and 320MHz in
+ * 6GHz is supported include all the following
+ * MCS/NSS.
+ */
+ .bw._80 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._160 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ .bw._320 = {
+ .rx_tx_mcs9_max_nss = 0x88,
+ .rx_tx_mcs11_max_nss = 0x88,
+ .rx_tx_mcs13_max_nss = 0x88,
+ },
+ },
+ /* PPE threshold information is not supported */
+ },
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -3896,7 +4345,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
- data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@@ -4471,13 +4919,9 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (data2->use_chanctx) {
if (data2->tmp_chan)
channel = data2->tmp_chan;
- else if (data2->chanctx)
- channel = data2->chanctx->def.chan;
} else {
channel = data2->channel;
}
- if (!channel)
- goto out;
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
@@ -4508,6 +4952,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
rx_status.freq);
if (!iter_data.channel)
goto out;
+ rx_status.band = iter_data.channel->band;
mutex_lock(&data2->mutex);
if (!hwsim_chans_compat(iter_data.channel, channel)) {
@@ -4520,11 +4965,13 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
}
}
mutex_unlock(&data2->mutex);
+ } else if (!channel) {
+ goto out;
} else {
rx_status.freq = channel->center_freq;
+ rx_status.band = channel->band;
}
- rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
@@ -4534,10 +4981,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
ieee80211_is_probe_resp(hdr->frame_control))
rx_status.boottime_ns = ktime_get_boottime_ns();
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
- data2->rx_pkts++;
- data2->rx_bytes += skb->len;
- ieee80211_rx_irqsafe(data2->hw, skb);
+ mac80211_hwsim_rx(data2, &rx_status, skb);
return 0;
err:
@@ -4912,6 +5356,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = hwsim_ops,
.n_small_ops = ARRAY_SIZE(hwsim_ops),
+ .resv_start_op = HWSIM_CMD_DEL_MAC_ADDR + 1,
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -5060,6 +5505,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -5102,7 +5551,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index b0b3f59dabc6..3e065cbb0af9 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -546,7 +546,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
pos = scanresp->bssdesc_and_tlvbuffer;
lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer,
- scanresp->bssdescriptsize);
+ bsssize);
tsfdesc = pos + bsssize;
tsfsize = 4 + 8 * scanresp->nr_sets;
@@ -1435,7 +1435,7 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
}
static int lbs_cfg_set_default_key(struct wiphy *wiphy,
- struct net_device *netdev,
+ struct net_device *netdev, int link_id,
u8 key_index, bool unicast,
bool multicast)
{
@@ -1455,8 +1455,8 @@ static int lbs_cfg_set_default_key(struct wiphy *wiphy,
static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 idx, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct lbs_private *priv = wiphy_priv(wiphy);
u16 key_info;
@@ -1516,7 +1516,8 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
lbs_deb_assoc("del_key: key_idx %d, mac_addr %pM\n",
diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c
index d8e4f29b690d..9f53308a9935 100644
--- a/drivers/net/wireless/marvell/libertas/ethtool.c
+++ b/drivers/net/wireless/marvell/libertas/ethtool.c
@@ -20,8 +20,8 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strlcpy(info->driver, "libertas", sizeof(info->driver));
- strlcpy(info->version, lbs_driver_version, sizeof(info->version));
+ strscpy(info->driver, "libertas", sizeof(info->driver));
+ strscpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 5c9f295536ea..8f5220cee112 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -39,8 +39,7 @@ unsigned int lbs_debug;
EXPORT_SYMBOL_GPL(lbs_debug);
module_param_named(libertas_debug, lbs_debug, int, 0644);
-unsigned int lbs_disablemesh;
-EXPORT_SYMBOL_GPL(lbs_disablemesh);
+static unsigned int lbs_disablemesh;
module_param_named(libertas_disablemesh, lbs_disablemesh, int, 0644);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index bd835288ce57..a04b66284af4 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -335,7 +335,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
struct mwifiex_sta_node *node;
/*
- * If we get a TID, ta pair which is already present dispatch all the
+ * If we get a TID, ta pair which is already present dispatch all
* the packets and move the window size until the ssn
*/
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 134114ac1ac0..535995e8279f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -142,7 +142,8 @@ static void *mwifiex_cfg80211_get_adapter(struct wiphy *wiphy)
*/
static int
mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -431,7 +432,7 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
*/
static int
mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast,
+ int link_id, u8 key_index, bool unicast,
bool multicast)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
@@ -456,8 +457,8 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
*/
static int
mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
struct mwifiex_wep_key *wep_key;
@@ -494,6 +495,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
struct net_device *netdev,
+ int link_id,
u8 key_index)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 26a48d8f49be..b4f945a549f7 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -2104,7 +2104,7 @@ struct mwifiex_fw_mef_entry {
struct host_cmd_ds_mef_cfg {
__le32 criteria;
__le16 num_entries;
- struct mwifiex_fw_mef_entry mef_entry[];
+ u8 mef_entry_data[];
} __packed;
#define CONNECTION_TYPE_INFRA 0
@@ -2254,7 +2254,7 @@ struct coalesce_receive_filt_rule {
struct host_cmd_ds_coalesce_cfg {
__le16 action;
__le16 num_of_rules;
- struct coalesce_receive_filt_rule rule[];
+ u8 rule_data[];
} __packed;
struct host_cmd_ds_multi_chan_policy {
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index fc77489cc511..7dddb4b5dea1 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -51,9 +51,10 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
-static void fw_dump_timer_fn(struct timer_list *t)
+static void fw_dump_work(struct work_struct *work)
{
- struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
+ struct mwifiex_adapter *adapter =
+ container_of(work, struct mwifiex_adapter, devdump_work.work);
mwifiex_upload_device_dump(adapter);
}
@@ -309,7 +310,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
adapter->devdump_len = 0;
- timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
+ INIT_DELAYED_WORK(&adapter->devdump_work, fw_dump_work);
}
/*
@@ -388,7 +389,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 87729d251fed..63f861e6b28a 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -37,6 +37,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "decl.h"
#include "ioctl.h"
@@ -1043,7 +1044,7 @@ struct mwifiex_adapter {
/* Device dump data/length */
void *devdump_data;
int devdump_len;
- struct timer_list devdump_timer;
+ struct delayed_work devdump_work;
bool ignore_btcoex_events;
};
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index f7f9277602a5..5dcf61761a16 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -644,7 +644,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- int retval;
+ int retval __maybe_unused;
mwifiex_dbg(adapter, EVENT,
"event: Wakeup device...\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 512b5bb9cf6f..e2800a831c8e 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1435,7 +1435,7 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
mef_entry = (struct mwifiex_fw_mef_entry *)pos;
mef_entry->mode = mef->mef_entry[i].mode;
mef_entry->action = mef->mef_entry[i].action;
- pos += sizeof(*mef_cfg->mef_entry);
+ pos += sizeof(*mef_entry);
if (mwifiex_cmd_append_rpn_expression(priv,
&mef->mef_entry[i], &pos))
@@ -1631,7 +1631,7 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
coalesce_cfg->action = cpu_to_le16(cmd_action);
coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
- rule = coalesce_cfg->rule;
+ rule = (void *)coalesce_cfg->rule_data;
for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index b95e90a7d124..df9cdd10a494 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -611,8 +611,8 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
* transmission event get lost, in this cornel case,
* user would still get partial of the dump.
*/
- mod_timer(&adapter->devdump_timer,
- jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
+ schedule_delayed_work(&adapter->devdump_work,
+ msecs_to_jiffies(MWIFIEX_TIMER_10S));
}
/* Overflow check */
@@ -623,7 +623,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
adapter->event_skb->data, event_skb->len);
adapter->devdump_len += event_skb->len;
- if (le16_to_cpu(fw_dump_hdr->type == FW_DUMP_INFO_ENDED)) {
+ if (le16_to_cpu(fw_dump_hdr->type) == FW_DUMP_INFO_ENDED) {
mwifiex_dbg(adapter, MSG,
"receive end of transmission flag event!\n");
goto upload_dump;
@@ -631,7 +631,7 @@ mwifiex_fw_dump_info_event(struct mwifiex_private *priv,
return;
upload_dump:
- del_timer_sync(&adapter->devdump_timer);
+ cancel_delayed_work_sync(&adapter->devdump_work);
mwifiex_upload_device_dump(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index c2f2ce2a3f95..d3ab9572e711 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -911,14 +911,14 @@ static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter,
memcpy(payload, skb_tmp->data, skb_tmp->len);
if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
/* do not padding for last packet*/
- *(u16 *)payload = cpu_to_le16(skb_tmp->len);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
skb_trim(skb_aggr, skb_aggr->len - pad);
} else {
/* add aggregation interface header */
- *(u16 *)payload = cpu_to_le16(skb_tmp->len + pad);
- *(u16 *)&payload[2] =
+ *(__le16 *)payload = cpu_to_le16(skb_tmp->len + pad);
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2);
}
@@ -1097,9 +1097,9 @@ send_aggr_buf:
}
payload = skb->data;
- *(u16 *)&payload[2] =
+ *(__le16 *)&payload[2] =
cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
- *(u16 *)payload = cpu_to_le16(skb->len);
+ *(__le16 *)payload = cpu_to_le16(skb->len);
skb_send = skb;
context = &port->tx_data_list[port->tx_data_ix++];
return mwifiex_usb_construct_send_urb(adapter, port, ep,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 40cb91097b2e..4901aa02b4fb 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -758,7 +758,7 @@ mt76_dma_init(struct mt76_dev *dev,
dev->napi_dev.threaded = 1;
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
+ netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 253cbc1956d1..6de13d641438 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -267,7 +267,8 @@ static void mt76_init_stream_cap(struct mt76_phy *phy,
}
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
- vht_cap->vht_mcs.tx_highest |=
+ if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 4da77d47b0a6..87db9498dea4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -252,6 +252,30 @@ struct mt76_queue_ops {
void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
};
+enum mt76_phy_type {
+ MT_PHY_TYPE_CCK,
+ MT_PHY_TYPE_OFDM,
+ MT_PHY_TYPE_HT,
+ MT_PHY_TYPE_HT_GF,
+ MT_PHY_TYPE_VHT,
+ MT_PHY_TYPE_HE_SU = 8,
+ MT_PHY_TYPE_HE_EXT_SU,
+ MT_PHY_TYPE_HE_TB,
+ MT_PHY_TYPE_HE_MU,
+ __MT_PHY_TYPE_HE_MAX,
+};
+
+struct mt76_sta_stats {
+ u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
+ u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_nss[4]; /* 1, 2, 3, 4 */
+ u64 tx_mcs[16]; /* mcs idx */
+ u64 tx_bytes;
+ u32 tx_packets;
+ u32 tx_retries;
+ u32 tx_failed;
+};
+
enum mt76_wcid_flags {
MT_WCID_FLAG_CHECK_PS,
MT_WCID_FLAG_PS,
@@ -299,6 +323,8 @@ struct mt76_wcid {
struct list_head list;
struct idr pktid;
+
+ struct mt76_sta_stats stats;
};
struct mt76_txq {
@@ -342,7 +368,8 @@ struct mt76_rx_tid {
#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
-#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_WED 2
+#define MT_PACKET_ID_FIRST 3
#define MT_PACKET_ID_HAS_RATE BIT(7)
/* This is timer for when to give up when waiting for TXS callback,
* with starting time being the time at which the DMA_DONE callback
@@ -527,7 +554,6 @@ struct mt76_usb {
struct mt76_reg_pair *rp;
int rp_len;
u32 base;
- bool burst;
} mcu;
};
@@ -815,26 +841,6 @@ struct mt76_power_limits {
s8 ru[7][12];
};
-enum mt76_phy_type {
- MT_PHY_TYPE_CCK,
- MT_PHY_TYPE_OFDM,
- MT_PHY_TYPE_HT,
- MT_PHY_TYPE_HT_GF,
- MT_PHY_TYPE_VHT,
- MT_PHY_TYPE_HE_SU = 8,
- MT_PHY_TYPE_HE_EXT_SU,
- MT_PHY_TYPE_HE_TB,
- MT_PHY_TYPE_HE_MU,
- __MT_PHY_TYPE_HE_MAX,
-};
-
-struct mt76_sta_stats {
- u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
- u64 tx_bw[4]; /* 20, 40, 80, 160 */
- u64 tx_nss[4]; /* 1, 2, 3, 4 */
- u64 tx_mcs[16]; /* mcs idx */
-};
-
struct mt76_ethtool_worker_info {
u64 *data;
int idx;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 051715ed90dd..ca50feb0b3a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -658,7 +658,7 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates);
msta->rate_probe = false;
mt7603_wtbl_set_smps(dev, msta,
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
spin_unlock_bh(&dev->mt76.lock);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ad6c7d632eed..d6aae60c440d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1088,7 +1088,7 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
offset %= 32;
val = mt76_rr(dev, addr);
- val >>= (tid % 32);
+ val >>= offset;
if (offset > 20) {
addr += 4;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 9bf8545c8c17..8d4733f87cda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1195,12 +1195,16 @@ static void mt7615_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ mt7615_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
mt7615_mcu_set_sta_decap_offload(dev, vif, sta);
+
+ mt7615_mutex_release(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 49ab3a1f3b9b..304212f5f8da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -83,6 +83,7 @@ static int mt7663s_probe(struct sdio_func *func,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
@@ -180,7 +181,6 @@ static void mt7663s_remove(struct sdio_func *func)
mt76_free_device(&dev->mt76);
}
-#ifdef CONFIG_PM
static int mt7663s_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
@@ -235,28 +235,20 @@ static int mt7663s_resume(struct device *dev)
return err;
}
-static const struct dev_pm_ops mt7663s_pm_ops = {
- .suspend = mt7663s_suspend,
- .resume = mt7663s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7663s_table);
MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
MODULE_FIRMWARE(MT7663_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7663s_pm_ops, mt7663s_suspend, mt7663s_resume);
+
static struct sdio_driver mt7663s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7663s_probe,
.remove = mt7663s_remove,
.id_table = mt7663s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7663s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7663s_pm_ops),
};
module_sdio_driver(mt7663s_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 967641aebf5f..f2d651d7adff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -119,6 +119,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
.rx_skb = mt7615_queue_rx_skb,
+ .rx_check = mt7615_rx_check,
.sta_ps = mt7615_sta_ps,
.sta_add = mt7615_mac_sta_add,
.sta_remove = mt7615_mac_sta_remove,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 75afcb469d3c..635192c878cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -63,6 +63,12 @@ enum {
REPEATER_BSSID_MAX = 0x3f,
};
+struct mt76_connac_reg_map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
struct mt76_connac_pm {
bool enable:1;
bool enable_user:1;
@@ -348,9 +354,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
- int pid, __le32 *txs_data,
- struct mt76_sta_stats *stats);
+ int pid, __le32 *txs_data);
void mt76_connac2_mac_decode_he_radiotap(struct mt76_dev *dev,
struct sk_buff *skb,
__le32 *rxv, u32 mode);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index 67ce216fb564..f33171bcd343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -158,6 +158,14 @@ enum {
#define MT_TXS4_TIMESTAMP GENMASK(31, 0)
+/* PPDU based TXS */
+#define MT_TXS5_MPDU_TX_BYTE GENMASK(22, 0)
+#define MT_TXS5_MPDU_TX_CNT GENMASK(31, 23)
+
+#define MT_TXS6_MPDU_FAIL_CNT GENMASK(31, 23)
+
+#define MT_TXS7_MPDU_RETRY_CNT GENMASK(31, 23)
+
/* RXD DW1 */
#define MT_RXD1_NORMAL_WLAN_IDX GENMASK(9, 0)
#define MT_RXD1_NORMAL_GROUP_1 BIT(11)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index 18dea8e1fb20..34ac3d81a510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -490,6 +490,10 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
+
+ /* counting non-offloading skbs */
+ wcid->stats.tx_bytes += skb->len;
+ wcid->stats.tx_packets++;
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
@@ -550,35 +554,29 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
-bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
- int pid, __le32 *txs_data,
- struct mt76_sta_stats *stats)
+bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ __le32 *txs_data)
{
+ struct mt76_sta_stats *stats = &wcid->stats;
struct ieee80211_supported_band *sband;
struct mt76_phy *mphy;
- struct ieee80211_tx_info *info;
- struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
bool cck = false;
u32 txrate, txs, mode;
- mt76_tx_status_lock(dev, &list);
- skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
- if (!skb)
- goto out;
-
txs = le32_to_cpu(txs_data[0]);
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
-
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len = !!(info->flags &
- IEEE80211_TX_STAT_ACK);
-
- info->status.rates[0].idx = -1;
+ /* PPDU based reporting */
+ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
+ stats->tx_bytes +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+ stats->tx_packets +=
+ le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
+ stats->tx_failed +=
+ le32_get_bits(txs_data[6], MT_TXS6_MPDU_FAIL_CNT);
+ stats->tx_retries +=
+ le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_CNT);
+ }
txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
@@ -613,7 +611,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
if (rate.mcs > 31)
- goto out;
+ return false;
rate.flags = RATE_INFO_FLAGS_MCS;
if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
@@ -621,7 +619,7 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
break;
case MT_PHY_TYPE_VHT:
if (rate.mcs > 9)
- goto out;
+ return false;
rate.flags = RATE_INFO_FLAGS_VHT_MCS;
break;
@@ -630,14 +628,14 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
case MT_PHY_TYPE_HE_TB:
case MT_PHY_TYPE_HE_MU:
if (rate.mcs > 11)
- goto out;
+ return false;
rate.he_gi = wcid->rate.he_gi;
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
rate.flags = RATE_INFO_FLAGS_HE_MCS;
break;
default:
- goto out;
+ return false;
}
stats->tx_mode[mode]++;
@@ -662,10 +660,34 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
wcid->rate = rate;
-out:
- if (skb)
- mt76_tx_status_skb_done(dev, skb, &list);
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_txs);
+
+bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ int pid, __le32 *txs_data)
+{
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ mt76_tx_status_lock(dev, &list);
+ skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool noacked = !(info->flags & IEEE80211_TX_STAT_ACK);
+
+ if (!(le32_to_cpu(txs_data[0]) & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !noacked;
+ info->status.rates[0].idx = -1;
+ wcid->stats.tx_failed += noacked;
+
+ mt76_connac2_mac_fill_txs(dev, wcid, txs_data);
+ mt76_tx_status_skb_done(dev, skb, &list);
+ }
mt76_tx_status_unlock(dev, &list);
return !!skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 9b17bd97ec09..011fc9729b38 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -260,8 +260,10 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, len);
+ if (sta_hdr) {
+ len += le16_to_cpu(sta_hdr->len);
+ sta_hdr->len = cpu_to_le16(len);
+ }
return ptlv;
}
@@ -594,14 +596,14 @@ mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
vif->type != NL80211_IFTYPE_STATION)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ amsdu->max_mpdu_size = sta->deflink.agg.max_amsdu_len >=
IEEE80211_MAX_MPDU_LEN_VHT_7991;
wcid->amsdu = true;
@@ -896,7 +898,7 @@ void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
+ smps->smps = (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
@@ -2648,7 +2650,7 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+/* SIFS 20us + 512 byte beacon transmitted by 1Mbps (3906us) */
#define BCN_TX_ESTIMATE_TIME (4096 + 20)
void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
{
@@ -2886,6 +2888,10 @@ int mt76_connac2_load_ram(struct mt76_dev *dev, const char *fw_wm,
goto out;
}
+ snprintf(dev->hw->wiphy->fw_version,
+ sizeof(dev->hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
+
release_firmware(fw);
if (!fw_wa)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index f1d7c05bd794..718f427d8f6b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -10,6 +10,7 @@
#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
#define FW_FEATURE_ENCRY_MODE BIT(4)
#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+#define FW_FEATURE_NON_DL BIT(6)
#define DL_MODE_ENCRYPT BIT(0)
#define DL_MODE_KEY_IDX GENMASK(2, 1)
@@ -33,6 +34,12 @@
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
+enum {
+ FW_TYPE_DEFAULT = 0,
+ FW_TYPE_CLC = 2,
+ FW_TYPE_MAX_NUM = 255
+};
+
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
#define MCU_PKT_ID 0xa0
@@ -174,7 +181,8 @@ struct mt76_connac2_fw_region {
__le32 addr;
__le32 len;
u8 feature_set;
- u8 rsv1[15];
+ u8 type;
+ u8 rsv1[14];
} __packed;
struct tlv {
@@ -1172,6 +1180,7 @@ enum {
MCU_CE_CMD_SET_ROC = 0x1c,
MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
+ MCU_CE_CMD_SET_CLC = 0x5c,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index de30cf5e2d2f..93d96739f802 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -404,7 +404,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
- if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ if (nss > 1 && sta && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index c6c16fe8ee85..02da543dfc5c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -21,29 +21,16 @@ static void
mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- u32 reg, val;
int i;
- if (usb->mcu.burst) {
- WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
-
- reg = usb->mcu.rp[0].reg - usb->mcu.base;
- for (i = 0; i < usb->mcu.rp_len; i++) {
- val = get_unaligned_le32(data + 4 * i);
- usb->mcu.rp[i].reg = reg++;
- usb->mcu.rp[i].value = val;
- }
- } else {
- WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
-
- for (i = 0; i < usb->mcu.rp_len; i++) {
- reg = get_unaligned_le32(data + 8 * i) -
- usb->mcu.base;
- val = get_unaligned_le32(data + 8 * i + 4);
-
- WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
- usb->mcu.rp[i].value = val;
- }
+ WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
+
+ for (i = 0; i < usb->mcu.rp_len; i++) {
+ u32 reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base;
+ u32 val = get_unaligned_le32(data + 8 * i + 4);
+
+ WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
+ usb->mcu.rp[i].value = val;
}
}
@@ -207,7 +194,6 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
usb->mcu.rp = data;
usb->mcu.rp_len = n;
usb->mcu.base = base;
- usb->mcu.burst = false;
ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index fd76db8f5269..6ef3431cad64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -23,9 +23,9 @@ mt7915_implicit_txbf_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
- return -EBUSY;
-
+ /* The existing connected stations shall reconnect to apply
+ * new implicit txbf configuration.
+ */
dev->ibf = !!val;
return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 60ae834d95a6..be97dede2634 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -176,7 +176,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
/*
* We don't support reading GI info from txs packets.
* For accurate tx status reporting and AQL improvement,
- we need to make sure that flags match so polling GI
+ * we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
rate = &msta->wcid.rate;
@@ -232,7 +232,7 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
bool unicast, insert_ccmp_hdr = false;
u8 remove_pad, amsdu_info;
u8 mode = 0, qos_ctl = 0;
- struct mt7915_sta *msta;
+ struct mt7915_sta *msta = NULL;
bool hdr_trans;
u16 hdr_gap;
u16 seq_ctrl = 0;
@@ -1001,7 +1001,7 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
- if (pid < MT_PACKET_ID_FIRST)
+ if (pid < MT_PACKET_ID_WED)
return;
if (wcidx >= mt7915_wtbl_size(dev))
@@ -1015,8 +1015,11 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
msta = container_of(wcid, struct mt7915_sta, wcid);
- mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
- &msta->stats);
+ if (pid == MT_PACKET_ID_WED)
+ mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
+ else
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
+
if (!wcid->sta)
goto out;
@@ -1047,7 +1050,7 @@ bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
return false;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
mt7915_debugfs_rx_fw_monitor(dev, data, len);
@@ -1084,7 +1087,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
break;
case PKT_TYPE_TXS:
for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7915_mac_add_txs(dev, rxd);
+ mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_FW_MONITOR:
@@ -2071,8 +2074,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2122,8 +2126,9 @@ void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
(twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index bd3386bf0f8a..89b519cfd14c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1010,6 +1010,23 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* offloading flows bypass networking stack, so driver counts and
+ * reports sta statistics via NL80211_STA_INFO when WED is active.
+ */
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+
+ sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+
+ sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+ }
}
static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
@@ -1224,7 +1241,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index f83067961945..8d297e4aa7d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -925,7 +925,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->max_amsdu_len)
+ if (!sta->deflink.agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
@@ -934,7 +934,7 @@ mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
amsdu->amsdu_en = true;
msta->wcid.amsdu = true;
- switch (sta->max_amsdu_len) {
+ switch (sta->deflink.agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
if (!is_mt7915(&dev->mt76)) {
amsdu->max_mpdu_size =
@@ -1304,7 +1304,7 @@ int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
ra->phy = *phy;
break;
case RATE_PARAM_MMPS_UPDATE:
- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
break;
default:
break;
@@ -1360,7 +1360,7 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
struct sta_phy phy = {};
int ret, nrates = 0;
-#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \
+#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
do { \
u8 i, gi = mask->control[band]._gi; \
gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
@@ -1373,15 +1373,17 @@ mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
continue; \
nrates += hweight16(mask->control[band]._mcs[i]); \
phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ if (_ht) \
+ phy.mcs += 8 * i; \
} \
} while (0)
if (sta->deflink.he_cap.has_he) {
- __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
+ __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
} else if (sta->deflink.vht_cap.vht_supported) {
- __sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
+ __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
} else if (sta->deflink.ht_cap.ht_supported) {
- __sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
+ __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
phy.mcs = ffs(mask->control[band].legacy) - 1;
@@ -1459,7 +1461,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->channel = chandef->chan->hw_value;
ra->bw = sta->deflink.bandwidth;
ra->phy.bw = sta->deflink.bandwidth;
- ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->smps_mode);
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 4499a630e8f1..7bd5f6725d7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -75,6 +75,7 @@ static const u32 mt7915_offs[] = {
[AGG_AWSCR0] = 0x05c,
[AGG_PCR0] = 0x06c,
[AGG_ACR0] = 0x084,
+ [AGG_ACR4] = 0x08c,
[AGG_MRCR] = 0x098,
[AGG_ATCR1] = 0x0f0,
[AGG_ATCR3] = 0x0f4,
@@ -148,6 +149,7 @@ static const u32 mt7916_offs[] = {
[AGG_AWSCR0] = 0x030,
[AGG_PCR0] = 0x040,
[AGG_ACR0] = 0x054,
+ [AGG_ACR4] = 0x05c,
[AGG_MRCR] = 0x068,
[AGG_ATCR1] = 0x1a8,
[AGG_ATCR3] = 0x080,
@@ -204,147 +206,147 @@ static const u32 mt7916_offs[] = {
[ETBF_PAR_RPT0] = 0x100,
};
-static const struct __map mt7915_reg_map[] = {
+static const struct mt76_connac_reg_map mt7915_reg_map[] = {
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
-static const struct __map mt7916_reg_map[] = {
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
- { 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
- { 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
- { 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- { 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
- { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
- { 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+static const struct mt76_connac_reg_map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
-static const struct __map mt7986_reg_map[] = {
- { 0x54000000, 0x402000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
- { 0x55000000, 0x403000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
- { 0x56000000, 0x404000, 0x1000 }, /* WFDMA_2 (Reserved) */
- { 0x57000000, 0x405000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
- { 0x58000000, 0x406000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
- { 0x59000000, 0x407000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
- { 0x820c0000, 0x408000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x40c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x40e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820e0000, 0x420000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x420400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x420800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x420c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x421000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x421400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820ce000, 0x421c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820e7000, 0x421e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820cf000, 0x422000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e9000, 0x423400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x424000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x424200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x424600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x424800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820ca000, 0x426000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
- { 0x820d0000, 0x430000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x00400000, 0x480000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x490000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x820f0000, 0x4a0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0x4a0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0x4a0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0x4a0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0x4a1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0x4a1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0x4a1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0x4a3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0x4a4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0x4a4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0x4a4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0x4a4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- { 0x820c4000, 0x4a8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
- { 0x820b0000, 0x4ae000, 0x1000 }, /* [APB2] WFSYS_ON */
- { 0x80020000, 0x4b0000, 0x10000}, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0x4c0000, 0x10000}, /* WF_TOP_MISC_ON */
- { 0x89000000, 0x4d0000, 0x1000 }, /* WF_MCU_CFG_ON */
- { 0x89010000, 0x4d1000, 0x1000 }, /* WF_MCU_CIRQ */
- { 0x89020000, 0x4d2000, 0x1000 }, /* WF_MCU_GPT */
- { 0x89030000, 0x4d3000, 0x1000 }, /* WF_MCU_WDT */
- { 0x80010000, 0x4d4000, 0x1000 }, /* WF_AXIDMA */
+static const struct mt76_connac_reg_map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x01000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x01000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x01000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x01000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x01000 }, /* WF_AXIDMA */
{ 0x0, 0x0, 0x0 }, /* imply end of search */
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 54ef2a12a443..1eb11617a625 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -127,8 +127,6 @@ struct mt7915_sta {
unsigned long jiffies;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
-
struct mt76_connac_sta_key_conf bip;
struct {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index d74f609775d3..728a879c3b00 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -99,6 +99,7 @@ static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
int ret;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
@@ -112,18 +113,38 @@ static int mt7915_wed_offload_enable(struct mtk_wed_device *wed)
if (!ret)
return -EAGAIN;
+ phy = &dev->phy;
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_set(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
+
return 0;
}
static void mt7915_wed_offload_disable(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
spin_lock_bh(&dev->mt76.token_lock);
dev->mt76.token_size = MT7915_TOKEN_SIZE;
spin_unlock_bh(&dev->mt76.token_lock);
+
+ /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+ * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+ */
+ phy = &dev->phy;
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx), MT_AGG_ACR_PPDU_TXS2H);
+
+ phy = dev->mt76.phys[MT_BAND1] ? dev->mt76.phys[MT_BAND1]->priv : NULL;
+ if (phy)
+ mt76_clear(dev, MT_AGG_ACR4(phy->band_idx),
+ MT_AGG_ACR_PPDU_TXS2H);
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index 2493c3ad3c56..5920e705835a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -4,17 +4,11 @@
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
-struct __map {
- u32 phys;
- u32 maps;
- u32 size;
-};
-
/* used to differentiate between generations */
struct mt7915_reg_desc {
const u32 *reg_rev;
const u32 *offs_rev;
- const struct __map *map;
+ const struct mt76_connac_reg_map *map;
u32 map_size;
};
@@ -52,6 +46,7 @@ enum offs_rev {
AGG_AWSCR0,
AGG_PCR0,
AGG_ACR0,
+ AGG_ACR4,
AGG_MRCR,
AGG_ATCR1,
AGG_ATCR3,
@@ -471,6 +466,9 @@ enum offs_rev {
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR4))
+#define MT_AGG_ACR_PPDU_TXS2H BIT(1)
+
#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
index be4f07ad3af9..47e034a9b003 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -13,6 +13,7 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
acpi_handle root, handle;
acpi_status status;
u32 i = 0;
+ int ret;
root = ACPI_HANDLE(mdev->dev);
if (!root)
@@ -52,9 +53,11 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
*(*tbl + i) = (u8)sar_unit->integer.value;
}
free:
+ ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+
kfree(sar_root);
- return (i == sar_root->package.count) ? 0 : -EINVAL;
+ return ret;
}
/* MTCL : Country List Table for 6G band */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
index 54f30401343c..4b647278eb30 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h
@@ -11,12 +11,15 @@ enum mt7921_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_WIFI_CONF = 0x07c,
- __MT_EE_MAX = 0x3bf
+ MT_EE_HW_TYPE = 0x55b,
+ __MT_EE_MAX = 0x9ff
};
#define MT_EE_WIFI_CONF_TX_MASK BIT(0)
#define MT_EE_WIFI_CONF_BAND_SEL GENMASK(3, 2)
+#define MT_EE_HW_TYPE_ENCAP BIT(0)
+
enum mt7921_eeprom_band {
MT_EE_NA,
MT_EE_5GHZ,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index cd960e23770f..dcdb3cf04ac1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -39,6 +39,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
+ mt7921_mcu_set_clc(dev, request->alpha2, request->country_ie_env);
mt76_connac_mcu_set_channel_domain(hw->priv);
mt7921_set_tx_sar_pwr(hw, NULL);
mt7921_mutex_release(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 47f0aa81ab02..e4868c492bc0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -235,7 +235,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- struct mt7921_sta *msta;
+ struct mt7921_sta *msta = NULL;
u16 seq_ctrl = 0;
__le16 fc = 0;
u8 mode = 0;
@@ -486,7 +486,7 @@ mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
return 0;
}
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
u16 fc, tid;
@@ -509,7 +509,6 @@ void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
-EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
@@ -539,8 +538,7 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
msta = container_of(wcid, struct mt7921_sta, wcid);
- mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data,
- &msta->stats);
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
@@ -552,7 +550,134 @@ void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
out:
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
+
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt76_connac_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ wcid_idx = wcid->idx;
+ } else {
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+EXPORT_SYMBOL_GPL(mt7921_txwi_free);
+
+static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+
+ count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ if (WARN_ON_ONCE((void *)&tx_info[count] > end))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(tx_info[i]);
+ u8 stat;
+
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7921_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
+ }
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+
+ rcu_read_lock();
+ mt7921_mac_sta_poll(dev);
+ rcu_read_unlock();
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, data, len); /* mmio */
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7921_rx_check);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
@@ -570,6 +695,11 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
type = PKT_TYPE_NORMAL_MCU;
switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
+ mt7921_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
case PKT_TYPE_RX_EVENT:
mt7921_mcu_rx_event(dev, skb);
break;
@@ -780,6 +910,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
void mt7921_reset(struct mt76_dev *mdev)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct mt76_connac_pm *pm = &dev->pm;
if (!dev->hw_init_done)
return;
@@ -787,8 +918,12 @@ void mt7921_reset(struct mt76_dev *mdev)
if (dev->hw_full_reset)
return;
+ if (pm->suspended)
+ return;
+
queue_work(dev->mt76.wq, &dev->reset_work);
}
+EXPORT_SYMBOL_GPL(mt7921_reset);
void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 1438a9f8d1fd..7e409ac7d9a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -752,6 +752,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
@@ -1045,7 +1046,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats);
}
static
@@ -1404,6 +1405,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ mt7921_mutex_acquire(dev);
+
if (enabled)
set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
else
@@ -1411,6 +1414,8 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
+
+ mt7921_mutex_release(dev);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1526,17 +1531,23 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
+ mt7921_mutex_acquire(dev);
+
err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
true);
if (err)
- return err;
+ goto out;
err = mt7921_mcu_set_bss_pm(dev, vif, true);
if (err)
- return err;
+ goto out;
+
+ err = mt7921_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+out:
+ mt7921_mutex_release(dev);
- return mt7921_mcu_sta_update(dev, NULL, vif, true,
- MT76_STA_INFO_STATE_NONE);
+ return err;
}
static void
@@ -1548,11 +1559,16 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7921_dev *dev = mt7921_hw_dev(hw);
int err;
+ mt7921_mutex_acquire(dev);
+
err = mt7921_mcu_set_bss_pm(dev, vif, false);
if (err)
- return;
+ goto out;
mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false);
+
+out:
+ mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index da12d0ae0835..67bf92969a7b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -2,14 +2,20 @@
/* Copyright (C) 2020 MediaTek Inc. */
#include <linux/fs.h>
+#include <linux/firmware.h>
#include "mt7921.h"
#include "mt7921_trace.h"
+#include "eeprom.h"
#include "mcu.h"
#include "mac.h"
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
+static bool mt7921_disable_clc;
+module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
+MODULE_PARM_DESC(disable_clc, "disable CLC support");
+
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
@@ -84,6 +90,27 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
}
EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
+static int mt7921_mcu_read_eeprom(struct mt7921_dev *dev, u32 offset, u8 *val)
+{
+ struct mt7921_mcu_eeprom_info *res, req = {
+ .addr = cpu_to_le32(round_down(offset,
+ MT7921_EEPROM_BLOCK_SIZE)),
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7921_mcu_eeprom_info *)skb->data;
+ *val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int
@@ -354,6 +381,90 @@ static char *mt7921_ram_name(struct mt7921_dev *dev)
return ret;
}
+static int mt7921_load_clc(struct mt7921_dev *dev, const char *fw_name)
+{
+ const struct mt76_connac2_fw_trailer *hdr;
+ const struct mt76_connac2_fw_region *region;
+ const struct mt7921_clc *clc;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt7921_phy *phy = &dev->phy;
+ const struct firmware *fw;
+ int ret, i, len, offset = 0;
+ u8 *clc_base = NULL, hw_encap = 0;
+
+ if (mt7921_disable_clc ||
+ mt76_is_usb(&dev->mt76))
+ return 0;
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
+ }
+
+ ret = request_firmware(&fw, fw_name, mdev->dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(mdev->dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
+ for (i = 0; i < hdr->n_region; i++) {
+ region = (const void *)((const u8 *)hdr -
+ (hdr->n_region - i) * sizeof(*region));
+ len = le32_to_cpu(region->len);
+
+ /* check if we have valid buffer size */
+ if (offset + len > fw->size) {
+ dev_err(mdev->dev, "Invalid firmware region\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((region->feature_set & FW_FEATURE_NON_DL) &&
+ region->type == FW_TYPE_CLC) {
+ clc_base = (u8 *)(fw->data + offset);
+ break;
+ }
+ offset += len;
+ }
+
+ if (!clc_base)
+ goto out;
+
+ for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
+ clc = (const struct mt7921_clc *)(clc_base + offset);
+
+ /* do not init buf again if chip reset triggered */
+ if (phy->clc[clc->idx])
+ continue;
+
+ /* header content sanity */
+ if (clc->idx == MT7921_CLC_POWER &&
+ u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
+
+ phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
+ le32_to_cpu(clc->len),
+ GFP_KERNEL);
+
+ if (!phy->clc[clc->idx]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
static int mt7921_load_firmware(struct mt7921_dev *dev)
{
int ret;
@@ -423,6 +534,10 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ err = mt7921_load_clc(dev, mt7921_ram_name(dev));
+ if (err)
+ return err;
+
return mt7921_mcu_fw_log_2_host(dev, 1);
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
@@ -930,3 +1045,86 @@ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
&req, sizeof(req), true);
}
+
+static
+int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap,
+ struct mt7921_clc *clc,
+ u8 idx)
+{
+ struct sk_buff *skb;
+ struct {
+ u8 ver;
+ u8 pad0;
+ __le16 len;
+ u8 idx;
+ u8 env;
+ u8 pad1[2];
+ u8 alpha2[2];
+ u8 type[2];
+ u8 rsvd[64];
+ } __packed req = {
+ .idx = idx,
+ .env = env_cap,
+ };
+ int ret, valid_cnt = 0;
+ u8 i, *pos;
+
+ if (!clc)
+ return 0;
+
+ pos = clc->data;
+ for (i = 0; i < clc->nr_country; i++) {
+ struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
+ u16 len = le16_to_cpu(rule->len);
+
+ pos += len + sizeof(*rule);
+ if (rule->alpha2[0] != alpha2[0] ||
+ rule->alpha2[1] != alpha2[1])
+ continue;
+
+ memcpy(req.alpha2, rule->alpha2, 2);
+ memcpy(req.type, rule->type, 2);
+
+ req.len = cpu_to_le16(sizeof(req) + len);
+ skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
+ le16_to_cpu(req.len),
+ sizeof(req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ skb_put_data(skb, rule->data, len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_CE_CMD(SET_CLC), false);
+ if (ret < 0)
+ return ret;
+ valid_cnt++;
+ }
+
+ if (!valid_cnt)
+ return -ENOENT;
+
+ return 0;
+}
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap)
+{
+ struct mt7921_phy *phy = (struct mt7921_phy *)&dev->phy;
+ int i, ret;
+
+ /* submit all clc config */
+ for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
+ ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
+ phy->clc[i], i);
+
+ /* If no country found, set "00" as default */
+ if (ret == -ENOENT)
+ ret = __mt7921_mcu_set_clc(dev, "00",
+ ENVIRON_INDOOR,
+ phy->clc[i], i);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index 0d20f7d8d474..96dc870fd35e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -41,7 +41,7 @@ enum {
struct mt7921_mcu_eeprom_info {
__le32 addr;
__le32 valid;
- u8 data[16];
+ u8 data[MT7921_EEPROM_BLOCK_SIZE];
} __packed;
#define MT_RA_RATE_NSS GENMASK(8, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index c161031ac62a..eaba114a9c7e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -41,6 +41,8 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
+#define MT7921_EEPROM_BLOCK_SIZE 16
+
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
@@ -100,7 +102,6 @@ struct mt7921_sta {
unsigned long last_txs;
unsigned long ampdu_state;
- struct mt76_sta_stats stats;
struct mt76_connac_sta_key_conf bip;
};
@@ -149,6 +150,29 @@ struct mib_stats {
u32 tx_amsdu_cnt;
};
+enum {
+ MT7921_CLC_POWER,
+ MT7921_CLC_CHAN,
+ MT7921_CLC_MAX_NUM,
+};
+
+struct mt7921_clc_rule {
+ u8 alpha2[2];
+ u8 type[2];
+ __le16 len;
+ u8 data[];
+} __packed;
+
+struct mt7921_clc {
+ __le32 len;
+ u8 idx;
+ u8 ver;
+ u8 nr_country;
+ u8 type;
+ u8 rsv[8];
+ u8 data[];
+};
+
struct mt7921_phy {
struct mt76_phy *mt76;
struct mt7921_dev *dev;
@@ -174,6 +198,8 @@ struct mt7921_phy {
#ifdef CONFIG_ACPI
struct mt7921_acpi_sar *acpisar;
#endif
+
+ struct mt7921_clc *clc[MT7921_CLC_MAX_NUM];
};
#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
@@ -380,6 +406,7 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_token_put(struct mt7921_dev *dev);
+bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
@@ -410,14 +437,13 @@ int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
-void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
+void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list);
void mt7921_mac_sta_poll(struct mt7921_dev *dev);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921e_mac_reset(struct mt7921_dev *dev);
int mt7921e_mcu_init(struct mt7921_dev *dev);
@@ -479,4 +505,7 @@ mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
#endif
int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
+
+int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
+ enum environment_cap env_cap);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index ea3069d18c35..8a53d8f286db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -123,54 +123,51 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
+ static const struct mt76_connac_reg_map fixed_map[] = {
{ 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
{ 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
{ 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
{ 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
{ 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
+ { 0x74030000, 0x10000, 0x10000 }, /* PCIE_MAC_IREG */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
};
int i;
@@ -187,7 +184,7 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
if (ofs > fixed_map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return fixed_map[i].maps + ofs;
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
@@ -238,8 +235,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt76_connac_tx_complete_skb,
- .rx_check = mt7921e_rx_check,
- .rx_skb = mt7921e_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
+ .rx_skb = mt7921_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -288,6 +285,8 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
goto err_free_pci_vec;
}
+ pci_set_drvdata(pdev, mdev);
+
dev = container_of(mdev, struct mt7921_dev, mt76);
dev->hif_ops = &mt7921_pcie_ops;
@@ -367,6 +366,7 @@ static int mt7921_pci_suspend(struct device *device)
int i, err;
pm->suspended = true;
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -409,9 +409,6 @@ static int mt7921_pci_suspend(struct device *device)
if (err)
goto restore_napi;
- if (err)
- goto restore_napi;
-
return 0;
restore_napi:
@@ -428,6 +425,9 @@ restore_napi:
restore_suspend:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
@@ -441,7 +441,7 @@ static int mt7921_pci_resume(struct device *device)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt7921_wpdma_reinit_cond(dev);
@@ -471,11 +471,12 @@ static int mt7921_pci_resume(struct device *device)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index e1800674089a..8dd60408b117 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -53,154 +53,6 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
-static void
-mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
- struct list_head *free_list)
-{
- struct mt76_dev *mdev = &dev->mt76;
- __le32 *txwi;
- u16 wcid_idx;
-
- mt76_connac_txp_skb_unmap(mdev, t);
- if (!t->skb)
- goto out;
-
- txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- wcid_idx = wcid->idx;
- } else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
- }
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-
-out:
- t->skb = NULL;
- mt76_put_txwi(mdev, t);
-}
-
-static void
-mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
-{
- struct mt76_connac_tx_free *free = data;
- __le32 *tx_info = (__le32 *)(data + sizeof(*free));
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- struct sk_buff *skb, *tmp;
- void *end = data + len;
- LIST_HEAD(free_list);
- bool wake = false;
- u8 i, count;
-
- /* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-
- count = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
- if (WARN_ON_ONCE((void *)&tx_info[count] > end))
- return;
-
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(tx_info[i]);
- u8 stat;
-
- /* 1'b1: new wcid pair.
- * 1'b0: msdu_id with the same 'wcid pair' as above.
- */
- if (info & MT_TX_FREE_PAIR) {
- struct mt7921_sta *msta;
- struct mt76_wcid *wcid;
- u16 idx;
-
- count++;
- idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- sta = wcid_to_sta(wcid);
- if (!sta)
- continue;
-
- msta = container_of(wcid, struct mt7921_sta, wcid);
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
- continue;
- }
-
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- stat = FIELD_GET(MT_TX_FREE_STATUS, info);
-
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
- continue;
-
- mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
- }
-
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
-
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
- }
-
- rcu_read_lock();
- mt7921_mac_sta_poll(dev);
- rcu_read_unlock();
-
- mt76_worker_schedule(&dev->mt76.tx_worker);
-}
-
-bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)data;
- __le32 *end = (__le32 *)&rxd[len / 4];
- enum rx_pkt_type type;
-
- type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921e_mac_tx_free(dev, data, len);
- return false;
- case PKT_TYPE_TXS:
- for (rxd += 2; rxd + 8 <= end; rxd += 8)
- mt7921_mac_add_txs(dev, rxd);
- return false;
- default:
- return true;
- }
-}
-
-void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- enum rx_pkt_type type;
-
- type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921e_mac_tx_free(dev, skb->data, skb->len);
- napi_consume_skb(skb, 1);
- break;
- default:
- mt7921_queue_rx_skb(mdev, q, skb);
- break;
- }
-}
-
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
@@ -261,7 +113,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
err = mt7921e_driver_own(dev);
if (err)
- return err;
+ goto out;
err = mt7921_run_firmware(dev);
if (err)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 5efda694fb9d..86340d3205c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -30,12 +30,7 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
txq = MT_MCUQ_FWDL;
@@ -59,6 +54,8 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
if (err)
return err;
+ mt76_rmw_field(dev, MT_PCIE_MAC_PM, MT_PCIE_MAC_PM_L0S_DIS, 1);
+
err = mt7921_run_firmware(dev);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index ea643260ceb6..c65582acfa55 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -440,6 +440,8 @@
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+#define MT_PCIE_MAC_PM MT_PCIE_MAC(0x194)
+#define MT_PCIE_MAC_PM_L0S_DIS BIT(8)
#define MT_DMA_SHDL(ofs) (0x7c026000 + (ofs))
#define MT_DMASHDL_SW_CONTROL MT_DMA_SHDL(0x004)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 487acd6e2be8..3b25a06fd946 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -96,6 +96,7 @@ static int mt7921s_probe(struct sdio_func *func,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
@@ -194,7 +195,6 @@ static void mt7921s_remove(struct sdio_func *func)
mt7921s_unregister_device(dev);
}
-#ifdef CONFIG_PM
static int mt7921s_suspend(struct device *__dev)
{
struct sdio_func *func = dev_to_sdio_func(__dev);
@@ -206,6 +206,7 @@ static int mt7921s_suspend(struct device *__dev)
pm->suspended = true;
set_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
+ flush_work(&dev->reset_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
@@ -261,6 +262,9 @@ restore_suspend:
clear_bit(MT76_STATE_SUSPEND, &mdev->phy.state);
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
@@ -276,7 +280,7 @@ static int mt7921s_resume(struct device *__dev)
err = mt7921_mcu_drv_pmctrl(dev);
if (err < 0)
- return err;
+ goto failed;
mt76_worker_enable(&mdev->tx_worker);
mt76_worker_enable(&mdev->sdio.txrx_worker);
@@ -288,34 +292,27 @@ static int mt7921s_resume(struct device *__dev)
mt76_connac_mcu_set_deep_sleep(mdev, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
- if (err)
- return err;
-
+failed:
pm->suspended = false;
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
return err;
}
-static const struct dev_pm_ops mt7921s_pm_ops = {
- .suspend = mt7921s_suspend,
- .resume = mt7921s_resume,
-};
-#endif
-
MODULE_DEVICE_TABLE(sdio, mt7921s_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
+static DEFINE_SIMPLE_DEV_PM_OPS(mt7921s_pm_ops, mt7921s_suspend, mt7921s_resume);
+
static struct sdio_driver mt7921s_driver = {
.name = KBUILD_MODNAME,
.probe = mt7921s_probe,
.remove = mt7921s_remove,
.id_table = mt7921s_table,
-#ifdef CONFIG_PM
- .drv = {
- .pm = &mt7921s_pm_ops,
- }
-#endif
+ .drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
index e038d7404323..5c1489766d9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -33,12 +33,7 @@ mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd == MCU_CMD(FW_SCATTER))
type = MT7921_SDIO_FWDL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index dd3b8884e162..29c0ee330dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -106,12 +106,7 @@ mt7921u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
return ret;
- if (cmd == MCU_UNI_CMD(HIF_CTRL) ||
- cmd == MCU_UNI_CMD(SUSPEND) ||
- cmd == MCU_UNI_CMD(OFFLOAD))
- mdev->mcu.timeout = HZ;
- else
- mdev->mcu.timeout = 3 * HZ;
+ mdev->mcu.timeout = 3 * HZ;
if (cmd != MCU_CMD(FW_SCATTER))
ep = MT_EP_OUT_INBAND_CMD;
@@ -183,6 +178,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt7921_usb_sdio_tx_complete_skb,
.tx_status_data = mt7921_usb_sdio_tx_status_data,
.rx_skb = mt7921_queue_rx_skb,
+ .rx_check = mt7921_rx_check,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
.sta_assoc = mt7921_mac_sta_assoc,
@@ -300,23 +296,34 @@ static void mt7921u_disconnect(struct usb_interface *usb_intf)
static int mt7921u_suspend(struct usb_interface *intf, pm_message_t state)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
int err;
+ pm->suspended = true;
+ flush_work(&dev->reset_work);
+
err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true);
if (err)
- return err;
+ goto failed;
mt76u_stop_rx(&dev->mt76);
mt76u_stop_tx(&dev->mt76);
- set_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
-
return 0;
+
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
+
+ return err;
}
static int mt7921u_resume(struct usb_interface *intf)
{
struct mt7921_dev *dev = usb_get_intfdata(intf);
+ struct mt76_connac_pm *pm = &dev->pm;
bool reinit = true;
int err, i;
@@ -338,16 +345,21 @@ static int mt7921u_resume(struct usb_interface *intf)
if (reinit || mt7921_dma_need_reinit(dev)) {
err = mt7921u_dma_init(dev, true);
if (err)
- return err;
+ goto failed;
}
- clear_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
-
err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
- return err;
+ goto failed;
+
+ err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+failed:
+ pm->suspended = false;
+
+ if (err < 0)
+ mt7921_reset(&dev->mt76);
- return mt76_connac_mcu_set_hif_suspend(&dev->mt76, false);
+ return err;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index aba2a9865821..0ec308f99af5 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -478,14 +478,14 @@ static void mt76s_status_worker(struct mt76_worker *w)
if (ndata_frames > 0)
resched = true;
- if (dev->drv->tx_status_data &&
+ if (dev->drv->tx_status_data && ndata_frames > 0 &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
!test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
- queue_work(dev->wq, &dev->sdio.stat_work);
+ ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
} while (nframes > 0);
if (resched)
- mt76_worker_schedule(&dev->sdio.txrx_worker);
+ mt76_worker_schedule(&dev->tx_worker);
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work)
}
if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
- queue_work(dev->wq, &sdio->stat_work);
+ ieee80211_queue_work(dev->hw, &sdio->stat_work);
else
clear_bit(MT76_READING_STATS, &dev->phy.state);
}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index a2601aa9e7b1..bfc4de50a4d2 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -85,7 +85,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i;
struct page *page;
- u8 *buf;
+ u8 *buf, *end;
for (i = 0; i < intr->rx.num[qid]; i++)
len += round_up(intr->rx.len[qid][i] + 4, 4);
@@ -112,20 +112,29 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return err;
}
- for (i = 0; i < intr->rx.num[qid]; i++) {
+ end = buf + len;
+ i = 0;
+
+ while (i < intr->rx.num[qid] && buf < end) {
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
__le32 *rxd = (__le32 *)buf;
/* parse rxd to get the actual packet length */
len = le32_get_bits(rxd[0], GENMASK(15, 0));
- e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
- if (!e->skb)
- break;
+ /* Optimized path for TXS */
+ if (!dev->drv->rx_check || dev->drv->rx_check(dev, buf, len)) {
+ e->skb = mt76s_build_rx_skb(buf, len,
+ round_up(len + 4, 4));
+ if (!e->skb)
+ break;
+
+ if (q->queued + i + 1 == q->ndesc)
+ break;
+ i++;
+ }
buf += round_up(len + 4, 4);
- if (q->queued + i + 1 == q->ndesc)
- break;
}
put_page(page);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 71fd3fbfa7d2..0accc71a91c9 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/random.h>
#include "mt76.h"
const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
@@ -123,12 +125,14 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
if (!head)
return -ENOMEM;
- hdr = __skb_put_zero(head, head_len);
+ hdr = __skb_put_zero(head, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, td->addr[0], ETH_ALEN);
memcpy(hdr->addr2, td->addr[1], ETH_ALEN);
memcpy(hdr->addr3, td->addr[2], ETH_ALEN);
skb_set_queue_mapping(head, IEEE80211_AC_BE);
+ get_random_bytes(__skb_put(head, head_len - sizeof(*hdr)),
+ head_len - sizeof(*hdr));
info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
@@ -154,7 +158,7 @@ int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
return -ENOMEM;
}
- __skb_put_zero(frag, frag_len);
+ get_random_bytes(__skb_put(frag, frag_len), frag_len);
head->len += frag->len;
head->data_len += frag->len;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 6b8964c19f50..4c4033bb1bb3 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -528,6 +528,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
data_len = min_t(int, len, data_len - head_room);
+
+ if (len == data_len &&
+ dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
+ return 0;
+
skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 3ac373d29d93..b89047965e78 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -540,8 +540,9 @@ static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info,
return 0;
}
-static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, struct key_params *params)
+static int add_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
{
int ret = 0, keylen = params->key_len;
@@ -644,7 +645,7 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
return ret;
}
-static int del_key(struct wiphy *wiphy, struct net_device *netdev,
+static int del_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
u8 key_index,
bool pairwise,
const u8 *mac_addr)
@@ -685,8 +686,9 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
return 0;
}
-static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
- bool pairwise, const u8 *mac_addr, void *cookie,
+static int get_key(struct wiphy *wiphy, struct net_device *netdev, int link_id,
+ u8 key_index, bool pairwise, const u8 *mac_addr,
+ void *cookie,
void (*callback)(void *cookie, struct key_params *))
{
struct wilc_vif *vif = netdev_priv(netdev);
@@ -723,13 +725,14 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
/* wiphy_new_nm() will WARNON if not present */
static int set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
return 0;
}
static int set_default_mgmt_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index)
+ int link_id, u8 key_index)
{
struct wilc_vif *vif = netdev_priv(netdev);
@@ -994,12 +997,11 @@ bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size)
{
struct wilc *wl = vif->wilc;
struct wilc_priv *priv = &vif->priv;
- int freq, ret;
+ int freq;
freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ);
- ret = cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
- return ret;
+ return cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0);
}
void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size)
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index b5a1b65c087c..03b7229a0ff5 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -229,7 +229,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
return NULL;
wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP;
- strlcpy(wl->monitor_dev->name, name, IFNAMSIZ);
+ strscpy(wl->monitor_dev->name, name, IFNAMSIZ);
wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
wl->monitor_dev->needs_free_netdev = true;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 43c085c74b7a..bb1a315a7b7e 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -245,6 +245,7 @@ struct wilc {
u8 *rx_buffer;
u32 rx_buffer_offset;
u8 *tx_buffer;
+ u32 *vmm_table;
struct txq_handle txq[NQUEUES];
int txq_entries;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 600cc57e9da2..7390f94cd4ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -28,6 +28,7 @@ struct wilc_sdio {
u32 block_size;
bool isinit;
int has_thrpt_enh3;
+ u8 *cmd53_buf;
};
struct sdio_cmd52 {
@@ -47,6 +48,7 @@ struct sdio_cmd53 {
u32 count: 9;
u8 *buffer;
u32 block_size;
+ bool use_global_buf;
};
static const struct wilc_hif_func wilc_hif_sdio;
@@ -91,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+ u8 *buf = cmd->buffer;
sdio_claim_host(func);
@@ -101,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
else
size = cmd->count;
+ if (cmd->use_global_buf) {
+ if (size > sizeof(u32))
+ return -EINVAL;
+
+ buf = sdio_priv->cmd53_buf;
+ }
+
if (cmd->read_write) { /* write */
- ret = sdio_memcpy_toio(func, cmd->address,
- (void *)cmd->buffer, size);
+ if (cmd->use_global_buf)
+ memcpy(buf, cmd->buffer, size);
+
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
} else { /* read */
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
- cmd->address, size);
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
+
+ if (cmd->use_global_buf)
+ memcpy(cmd->buffer, buf, size);
}
sdio_release_host(func);
@@ -128,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
if (!sdio_priv)
return -ENOMEM;
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdio_priv->cmd53_buf) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret)
@@ -161,6 +182,7 @@ dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
free:
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
return ret;
}
@@ -172,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
@@ -375,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)&data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret)
@@ -414,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -492,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -535,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 947d9a0a494e..58bbf50081e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
int ret = 0;
int counter;
int timeout;
- u32 vmm_table[WILC_VMM_TBL_SIZE];
+ u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
@@ -1252,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
@@ -1489,6 +1491,14 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
+ if (!wilc->vmm_table)
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+ goto fail;
+ }
+
if (!wilc->tx_buffer)
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
@@ -1513,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
return 0;
fail:
-
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 1593e810b3ca..bfdf03bfa6c5 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -532,8 +532,8 @@ qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -548,7 +548,8 @@ static int qtnf_add_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -569,7 +570,8 @@ static int qtnf_del_key(struct wiphy *wiphy, struct net_device *dev,
}
static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -585,7 +587,7 @@ static int qtnf_set_default_key(struct wiphy *wiphy, struct net_device *dev,
static int
qtnf_set_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index)
+ int link_id, u8 key_index)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -721,9 +723,8 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
return -EFAULT;
}
- if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
- }
ret = qtnf_cmd_send_disconnect(vif, reason_code);
if (ret)
@@ -750,7 +751,6 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan;
int ret;
-
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -1223,7 +1223,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
mac->macinfo.extended_capabilities_len;
}
- strlcpy(wiphy->fw_version, hw_info->fw_version,
+ strscpy(wiphy->fw_version, hw_info->fw_version,
sizeof(wiphy->fw_version));
wiphy->hw_version = hw_info->hw_version;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 0fad53693292..b1b73478d89b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -967,7 +967,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_rx_chain, hwinfo->total_tx_chain,
hwinfo->fw_ver);
- strlcpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
+ strscpy(hwinfo->fw_version, bld_label, sizeof(hwinfo->fw_version));
hwinfo->hw_version = hw_ver;
return 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index d758e8874457..de2ee5ffc34e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -1016,6 +1016,8 @@
*/
#define MAC_STATUS_CFG 0x1200
#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_TX FIELD32(0x00000001)
+#define MAC_STATUS_CFG_BBP_RF_BUSY_RX FIELD32(0x00000002)
/*
* PWR_PIN_CFG:
@@ -2739,6 +2741,7 @@ enum rt2800_eeprom_word {
#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_EXTERNAL_PA FIELD16(0x8000)
/*
* EEPROM LNA
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 18102fbe36d6..cbbb1a4849cf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -198,6 +198,26 @@ static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value);
}
+static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ rt2800_bbp_write(rt2x00dev, 159, value);
+}
+
+static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
+{
+ rt2800_bbp_write(rt2x00dev, 158, reg);
+ return rt2800_bbp_read(rt2x00dev, 159);
+}
+
+static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
+ const u8 reg, const u8 value)
+{
+ rt2800_bbp_write(rt2x00dev, 195, reg);
+ rt2800_bbp_write(rt2x00dev, 196, value);
+}
+
static u8 rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
const unsigned int word)
{
@@ -2143,6 +2163,48 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev,
+ const struct rt2x00_field32 mask)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
+ if (!rt2x00_get_field32(reg, mask))
+ return 0;
+
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
+ return -EACCES;
+}
+
+static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u8 value;
+
+ /*
+ * BBP was enabled after firmware was loaded,
+ * but we need to reactivate it now.
+ */
+ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ msleep(1);
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ value = rt2800_bbp_read(rt2x00dev, 0);
+ if ((value != 0xff) && (value != 0x00))
+ return 0;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
+ return -EACCES;
+}
+
static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -3793,16 +3855,23 @@ static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev,
rfcsr |= tx_agc_fc;
rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr);
}
+
+ if (conf_is_ht40(conf)) {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f);
+ } else {
+ rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a);
+ rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40);
+ }
}
static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level) {
u16 eeprom, target_power, max_power;
- u32 mac_sys_ctrl, mac_status;
+ u32 mac_sys_ctrl;
u32 reg;
u8 bbp;
- int i;
/* hardware unit is 0.5dBm, limited to 23.5dBm */
power_level *= 2;
@@ -3838,16 +3907,8 @@ static void rt2800_config_alc(struct rt2x00_dev *rt2x00dev,
/* Disable Tx/Rx */
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
/* Check MAC Tx/Rx idle */
- for (i = 0; i < 10000; i++) {
- mac_status = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (mac_status & 0x3)
- usleep_range(50, 200);
- else
- break;
- }
-
- if (i == 10000)
- rt2x00_warn(rt2x00dev, "Wait MAC Status to MAX !!!\n");
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "RF busy while configuring ALC\n");
if (chan->center_freq > 2457) {
bbp = rt2800_bbp_read(rt2x00dev, 30);
@@ -4164,7 +4225,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
- rt2800_bbp_write(rt2x00dev, 86, 0);
+ if (rt2x00_rt(rt2x00dev, RT6352))
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
}
if (rf->channel <= 14) {
@@ -4365,7 +4429,45 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain;
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
- rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ if (rt2x00_rt(rt2x00dev, RT5592))
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ }
+
+ if (rt2x00_rt(rt2x00dev, RT6352)) {
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags)) {
+ reg = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, reg);
+
+ reg = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ reg |= 0x00000101;
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, reg);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00);
+
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT,
+ 0x36303636);
+ rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN,
+ 0x6C6C6B6C);
+ }
}
bbp = rt2800_bbp_read(rt2x00dev, 4);
@@ -5644,7 +5746,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT3593) ||
- rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2x00_rt(rt2x00dev, RT3883) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5867,7 +5970,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
} else if (rt2x00_rt(rt2x00dev, RT6352)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401);
- rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000);
rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0);
@@ -6129,6 +6232,27 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_soc(rt2x00dev)) {
+ struct clk *clk = clk_get_sys("bus", NULL);
+ int rate;
+
+ if (IS_ERR(clk)) {
+ clk = clk_get_sys("cpu", NULL);
+
+ if (IS_ERR(clk)) {
+ rate = 125;
+ } else {
+ rate = clk_get_rate(clk) / 3000000;
+ clk_put(clk);
+ }
+ } else {
+ rate = clk_get_rate(clk) / 1000000;
+ clk_put(clk);
+ }
+
+ reg = rt2800_register_read(rt2x00dev, US_CYC_CNT);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, rate);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0);
@@ -6212,46 +6336,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return 0;
}
-static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG);
- if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
- return 0;
-
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n");
- return -EACCES;
-}
-
-static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u8 value;
-
- /*
- * BBP was enabled after firmware was loaded,
- * but we need to reactivate it now.
- */
- rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
- rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
- msleep(1);
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- value = rt2800_bbp_read(rt2x00dev, 0);
- if ((value != 0xff) && (value != 0x00))
- return 0;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n");
- return -EACCES;
-}
static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
{
@@ -6916,26 +7000,6 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
-static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 195, reg);
- rt2800_bbp_write(rt2x00dev, 196, value);
-}
-
-static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev,
- const u8 reg, const u8 value)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- rt2800_bbp_write(rt2x00dev, 159, value);
-}
-
-static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg)
-{
- rt2800_bbp_write(rt2x00dev, 158, reg);
- return rt2800_bbp_read(rt2x00dev, 159);
-}
-
static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev)
{
u8 bbp;
@@ -8398,6 +8462,1519 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
rt2800_led_open_drain_enable(rt2x00dev);
}
+static void rt2800_rf_self_txdc_cal(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb5r1_org, rfb7r1_org, rfvalue;
+ u32 mac0518, mac051c, mac0528, mac052c;
+ u8 i;
+
+ mac0518 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ mac051c = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ mac0528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ mac052c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0xC);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x3306);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, 0x3330);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0xfffff);
+ rfb5r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ rfb7r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rfb5r1_org);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, 0x4);
+ for (i = 0; i < 100; ++i) {
+ usleep_range(50, 100);
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1);
+ if ((rfvalue & 0x04) != 0x4)
+ break;
+ }
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, rfb7r1_org);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, mac0518);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, mac051c);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, mac0528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, mac052c);
+}
+
+static int rt2800_calcrcalibrationcode(struct rt2x00_dev *rt2x00dev, int d1, int d2)
+{
+ int calcode = ((d2 - d1) * 1000) / 43;
+
+ if ((calcode % 10) >= 5)
+ calcode += 10;
+ calcode = (calcode / 10);
+
+ return calcode;
+}
+
+static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u32 savemacsysctrl;
+ u8 saverfb0r1, saverfb0r34, saverfb0r35;
+ u8 saverfb5r4, saverfb5r17, saverfb5r18;
+ u8 saverfb5r19, saverfb5r20;
+ u8 savebbpr22, savebbpr47, savebbpr49;
+ u8 bytevalue = 0;
+ int rcalcode;
+ u8 r_cal_code = 0;
+ char d1 = 0, d2 = 0;
+ u8 rfvalue;
+ u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG;
+ u32 maccfg;
+
+ saverfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ saverfb0r34 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 34);
+ saverfb0r35 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ savebbpr22 = rt2800_bbp_read(rt2x00dev, 22);
+ savebbpr47 = rt2800_bbp_read(rt2x00dev, 47);
+ savebbpr49 = rt2800_bbp_read(rt2x00dev, 49);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ MAC_PWR_PIN_CFG = rt2800_register_read(rt2x00dev, PWR_PIN_CFG);
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n");
+
+ maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ maccfg &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "Wait MAC Rx Status to MAX !!!\n");
+
+ rfvalue = (MAC_RF_BYPASS0 | 0x3004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, rfvalue);
+ rfvalue = (MAC_RF_CONTROL0 | (~0x3002));
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, rfvalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x27);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0x83);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, 0x13);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x1);
+
+ rt2800_bbp_write(rt2x00dev, 47, 0x04);
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d1 = bytevalue - 256;
+ else
+ d1 = (char)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x80);
+ usleep_range(100, 200);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 49);
+ if (bytevalue > 128)
+ d2 = bytevalue - 256;
+ else
+ d2 = (char)bytevalue;
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2);
+ if (rcalcode < 0)
+ r_cal_code = 256 + rcalcode;
+ else
+ r_cal_code = (u8)rcalcode;
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 7, r_cal_code);
+
+ rt2800_bbp_write(rt2x00dev, 22, 0x0);
+
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+ bytevalue = rt2800_bbp_read(rt2x00dev, 21);
+ bytevalue &= (~0x1);
+ rt2800_bbp_write(rt2x00dev, 21, bytevalue);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, saverfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, saverfb0r34);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, saverfb0r35);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20);
+
+ rt2800_bbp_write(rt2x00dev, 22, savebbpr22);
+ rt2800_bbp_write(rt2x00dev, 47, savebbpr47);
+ rt2800_bbp_write(rt2x00dev, 49, savebbpr49);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, MAC_PWR_PIN_CFG);
+}
+
+static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbpreg = 0;
+ u32 macvalue = 0;
+ u8 saverfb0r2, saverfb5r4, saverfb7r4, rfvalue;
+ int i;
+
+ saverfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfvalue = saverfb0r2;
+ rfvalue |= 0x03;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg |= 0x10;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x8);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in RX RXDCOC calibration\n");
+
+ saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ saverfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ saverfb5r4 = saverfb5r4 & (~0x40);
+ saverfb7r4 = saverfb7r4 & (~0x40);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x64);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+ bbpreg |= 0x48;
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ for (i = 0; i < 10000; i++) {
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpreg & 0x40) == 0)
+ break;
+ usleep_range(50, 100);
+ }
+
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg = bbpreg & (~0x40);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ rt2800_bbp_write(rt2x00dev, 158, 141);
+ bbpreg = rt2800_bbp_read(rt2x00dev, 159);
+ bbpreg &= (~0x10);
+ rt2800_bbp_write(rt2x00dev, 159, bbpreg);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, saverfb0r2);
+}
+
+static u32 rt2800_do_sqrt_accumulation(u32 si)
+{
+ u32 root, root_pre, bit;
+ char i;
+
+ bit = 1 << 15;
+ root = 0;
+ for (i = 15; i >= 0; i = i - 1) {
+ root_pre = root + bit;
+ if ((root_pre * root_pre) <= si)
+ root = root_pre;
+ bit = bit >> 1;
+ }
+
+ return root;
+}
+
+static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfb0r1, rfb0r2, rfb0r42;
+ u8 rfb4r0, rfb4r19;
+ u8 rfb5r3, rfb5r4, rfb5r17, rfb5r18, rfb5r19, rfb5r20;
+ u8 rfb6r0, rfb6r19;
+ u8 rfb7r3, rfb7r4, rfb7r17, rfb7r18, rfb7r19, rfb7r20;
+
+ u8 bbp1, bbp4;
+ u8 bbpr241, bbpr242;
+ u32 i;
+ u8 ch_idx;
+ u8 bbpval;
+ u8 rfval, vga_idx = 0;
+ int mi = 0, mq = 0, si = 0, sq = 0, riq = 0;
+ int sigma_i, sigma_q, r_iq, g_rx;
+ int g_imb;
+ int ph_rx;
+ u32 savemacsysctrl = 0;
+ u32 orig_RF_CONTROL0 = 0;
+ u32 orig_RF_BYPASS0 = 0;
+ u32 orig_RF_CONTROL1 = 0;
+ u32 orig_RF_BYPASS1 = 0;
+ u32 orig_RF_CONTROL3 = 0;
+ u32 orig_RF_BYPASS3 = 0;
+ u32 bbpval1 = 0;
+ static const u8 rf_vga_table[] = {0x20, 0x21, 0x22, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ orig_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ orig_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ orig_RF_CONTROL1 = rt2800_register_read(rt2x00dev, RF_CONTROL1);
+ orig_RF_BYPASS1 = rt2800_register_read(rt2x00dev, RF_BYPASS1);
+ orig_RF_CONTROL3 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ orig_RF_BYPASS3 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbp1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbp4 = rt2800_bbp_read(rt2x00dev, 4);
+
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x0);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
+ rt2x00_warn(rt2x00dev, "Timeout waiting for MAC status in RXIQ calibration\n");
+
+ bbpval = bbp4 & (~0x18);
+ bbpval = bbp4 | 0x00;
+ rt2800_bbp_write(rt2x00dev, 4, bbpval);
+
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval = bbpval | 1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ bbpval = bbpval & 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, 0x00000202);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, 0x00000303);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0101);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0000);
+
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0xf1f1);
+
+ rfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rfb4r0 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rfb4r19 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 19);
+ rfb5r3 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+
+ rfb6r0 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rfb6r19 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 19);
+ rfb7r3 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rfb7r17 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rfb7r18 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rfb7r19 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rfb7r20 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x87);
+ rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0x27);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x38);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x80);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0xC1);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x60);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x0);
+ rt2800_bbp_write(rt2x00dev, 24, 0x0);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 5, 0x0);
+
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x10);
+ rt2800_bbp_write(rt2x00dev, 242, 0x84);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+
+ bbpval = rt2800_bbp_dcoc_read(rt2x00dev, 3);
+ bbpval = bbpval & (~0x7);
+ rt2800_bbp_dcoc_write(rt2x00dev, 3, bbpval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ usleep_range(1, 200);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003376);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x02);
+ rt2800_bbp_write(rt2x00dev, 24, 0x02);
+ }
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) {
+ if (ch_idx == 0) {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x1;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x11;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x10;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00);
+ } else {
+ rfval = rfb0r1 & (~0x3);
+ rfval = rfb0r1 | 0x2;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval);
+ rfval = rfb0r2 & (~0x33);
+ rfval = rfb0r2 | 0x22;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval);
+ rfval = rfb0r42 & (~0x50);
+ rfval = rfb0r42 | 0x40;
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002006);
+ udelay(1);
+
+ bbpval = bbp1 & (~0x18);
+ bbpval = bbpval | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbpval);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x01);
+ }
+ usleep_range(500, 1500);
+
+ vga_idx = 0;
+ while (vga_idx < 11) {
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rf_vga_table[vga_idx]);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rf_vga_table[vga_idx]);
+
+ rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x93);
+
+ for (i = 0; i < 10000; i++) {
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ if ((bbpval & 0xff) == 0x93)
+ usleep_range(50, 100);
+ else
+ break;
+ }
+
+ if ((bbpval & 0xff) == 0x93) {
+ rt2x00_warn(rt2x00dev, "Fatal Error: Calibration doesn't finish");
+ goto restore_value;
+ }
+ for (i = 0; i < 5; i++) {
+ u32 bbptemp = 0;
+ u8 value = 0;
+ int result = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x1e);
+ rt2800_bbp_write(rt2x00dev, 159, i);
+ rt2800_bbp_write(rt2x00dev, 158, 0x22);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 24);
+ rt2800_bbp_write(rt2x00dev, 158, 0x21);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 16);
+ rt2800_bbp_write(rt2x00dev, 158, 0x20);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + (value << 8);
+ rt2800_bbp_write(rt2x00dev, 158, 0x1f);
+ value = rt2800_bbp_read(rt2x00dev, 159);
+ bbptemp = bbptemp + value;
+
+ if (i < 2 && (bbptemp & 0x800000))
+ result = (bbptemp & 0xffffff) - 0x1000000;
+ else if (i == 4)
+ result = bbptemp;
+ else
+ result = bbptemp;
+
+ if (i == 0)
+ mi = result / 4096;
+ else if (i == 1)
+ mq = result / 4096;
+ else if (i == 2)
+ si = bbptemp / 4096;
+ else if (i == 3)
+ sq = bbptemp / 4096;
+ else
+ riq = result / 4096;
+ }
+
+ bbpval1 = si - mi * mi;
+ rt2x00_dbg(rt2x00dev,
+ "RXIQ si=%d, sq=%d, riq=%d, bbpval %d, vga_idx %d",
+ si, sq, riq, bbpval1, vga_idx);
+
+ if (bbpval1 >= (100 * 100))
+ break;
+
+ if (bbpval1 <= 100)
+ vga_idx = vga_idx + 9;
+ else if (bbpval1 <= 158)
+ vga_idx = vga_idx + 8;
+ else if (bbpval1 <= 251)
+ vga_idx = vga_idx + 7;
+ else if (bbpval1 <= 398)
+ vga_idx = vga_idx + 6;
+ else if (bbpval1 <= 630)
+ vga_idx = vga_idx + 5;
+ else if (bbpval1 <= 1000)
+ vga_idx = vga_idx + 4;
+ else if (bbpval1 <= 1584)
+ vga_idx = vga_idx + 3;
+ else if (bbpval1 <= 2511)
+ vga_idx = vga_idx + 2;
+ else
+ vga_idx = vga_idx + 1;
+ }
+
+ sigma_i = rt2800_do_sqrt_accumulation(100 * (si - mi * mi));
+ sigma_q = rt2800_do_sqrt_accumulation(100 * (sq - mq * mq));
+ r_iq = 10 * (riq - (mi * mq));
+
+ rt2x00_dbg(rt2x00dev, "Sigma_i=%d, Sigma_q=%d, R_iq=%d", sigma_i, sigma_q, r_iq);
+
+ if (sigma_i <= 1400 && sigma_i >= 1000 &&
+ (sigma_i - sigma_q) <= 112 &&
+ (sigma_i - sigma_q) >= -112 &&
+ mi <= 32 && mi >= -32 &&
+ mq <= 32 && mq >= -32) {
+ r_iq = 10 * (riq - (mi * mq));
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+
+ g_rx = (1000 * sigma_q) / sigma_i;
+ g_imb = ((-2) * 128 * (1000 - g_rx)) / (1000 + g_rx);
+ ph_rx = (r_iq * 2292) / (sigma_i * sigma_q);
+
+ if (ph_rx > 20 || ph_rx < -20) {
+ ph_rx = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (g_imb > 12 || g_imb < -12) {
+ g_imb = 0;
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+ } else {
+ g_imb = 0;
+ ph_rx = 0;
+ rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n",
+ sigma_i, sigma_q, r_iq);
+ rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL");
+ }
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x37);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x35);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x55);
+ rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f);
+ rt2800_bbp_write(rt2x00dev, 158, 0x53);
+ rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f);
+ }
+ }
+
+restore_value:
+ rt2800_bbp_write(rt2x00dev, 158, 0x3);
+ bbpval = rt2800_bbp_read(rt2x00dev, 159);
+ rt2800_bbp_write(rt2x00dev, 159, (bbpval | 0x07));
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 1, bbp1);
+ rt2800_bbp_write(rt2x00dev, 4, bbp4);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+ bbpval = rt2800_bbp_read(rt2x00dev, 21);
+ bbpval |= 0x1;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+ usleep_range(10, 200);
+ bbpval &= 0xfe;
+ rt2800_bbp_write(rt2x00dev, 21, bbpval);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfb0r1);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfb0r2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, rfb4r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 19, rfb4r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rfb5r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rfb5r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rfb5r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, rfb5r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, rfb5r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, rfb5r20);
+
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, rfb6r0);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 19, rfb6r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, rfb7r3);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, rfb7r4);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, rfb7r17);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, rfb7r18);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, rfb7r19);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, rfb7r20);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, orig_RF_CONTROL0);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, orig_RF_BYPASS0);
+ rt2800_register_write(rt2x00dev, RF_CONTROL1, orig_RF_CONTROL1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS1, orig_RF_BYPASS1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, orig_RF_CONTROL3);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, orig_RF_BYPASS3);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+}
+
+static void rt2800_rf_configstore(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_reg_record[][13], u8 chain)
+{
+ u8 rfvalue = 0;
+
+ if (chain == CHAIN_0) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_0][0].bank = 0;
+ rf_reg_record[CHAIN_0][0].reg = 1;
+ rf_reg_record[CHAIN_0][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_0][1].bank = 0;
+ rf_reg_record[CHAIN_0][1].reg = 2;
+ rf_reg_record[CHAIN_0][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_0][2].bank = 0;
+ rf_reg_record[CHAIN_0][2].reg = 35;
+ rf_reg_record[CHAIN_0][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_0][3].bank = 0;
+ rf_reg_record[CHAIN_0][3].reg = 42;
+ rf_reg_record[CHAIN_0][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0);
+ rf_reg_record[CHAIN_0][4].bank = 4;
+ rf_reg_record[CHAIN_0][4].reg = 0;
+ rf_reg_record[CHAIN_0][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 2);
+ rf_reg_record[CHAIN_0][5].bank = 4;
+ rf_reg_record[CHAIN_0][5].reg = 2;
+ rf_reg_record[CHAIN_0][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 34);
+ rf_reg_record[CHAIN_0][6].bank = 4;
+ rf_reg_record[CHAIN_0][6].reg = 34;
+ rf_reg_record[CHAIN_0][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3);
+ rf_reg_record[CHAIN_0][7].bank = 5;
+ rf_reg_record[CHAIN_0][7].reg = 3;
+ rf_reg_record[CHAIN_0][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4);
+ rf_reg_record[CHAIN_0][8].bank = 5;
+ rf_reg_record[CHAIN_0][8].reg = 4;
+ rf_reg_record[CHAIN_0][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17);
+ rf_reg_record[CHAIN_0][9].bank = 5;
+ rf_reg_record[CHAIN_0][9].reg = 17;
+ rf_reg_record[CHAIN_0][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18);
+ rf_reg_record[CHAIN_0][10].bank = 5;
+ rf_reg_record[CHAIN_0][10].reg = 18;
+ rf_reg_record[CHAIN_0][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19);
+ rf_reg_record[CHAIN_0][11].bank = 5;
+ rf_reg_record[CHAIN_0][11].reg = 19;
+ rf_reg_record[CHAIN_0][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20);
+ rf_reg_record[CHAIN_0][12].bank = 5;
+ rf_reg_record[CHAIN_0][12].reg = 20;
+ rf_reg_record[CHAIN_0][12].value = rfvalue;
+ } else if (chain == CHAIN_1) {
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1);
+ rf_reg_record[CHAIN_1][0].bank = 0;
+ rf_reg_record[CHAIN_1][0].reg = 1;
+ rf_reg_record[CHAIN_1][0].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2);
+ rf_reg_record[CHAIN_1][1].bank = 0;
+ rf_reg_record[CHAIN_1][1].reg = 2;
+ rf_reg_record[CHAIN_1][1].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35);
+ rf_reg_record[CHAIN_1][2].bank = 0;
+ rf_reg_record[CHAIN_1][2].reg = 35;
+ rf_reg_record[CHAIN_1][2].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+ rf_reg_record[CHAIN_1][3].bank = 0;
+ rf_reg_record[CHAIN_1][3].reg = 42;
+ rf_reg_record[CHAIN_1][3].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0);
+ rf_reg_record[CHAIN_1][4].bank = 6;
+ rf_reg_record[CHAIN_1][4].reg = 0;
+ rf_reg_record[CHAIN_1][4].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 2);
+ rf_reg_record[CHAIN_1][5].bank = 6;
+ rf_reg_record[CHAIN_1][5].reg = 2;
+ rf_reg_record[CHAIN_1][5].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 34);
+ rf_reg_record[CHAIN_1][6].bank = 6;
+ rf_reg_record[CHAIN_1][6].reg = 34;
+ rf_reg_record[CHAIN_1][6].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3);
+ rf_reg_record[CHAIN_1][7].bank = 7;
+ rf_reg_record[CHAIN_1][7].reg = 3;
+ rf_reg_record[CHAIN_1][7].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4);
+ rf_reg_record[CHAIN_1][8].bank = 7;
+ rf_reg_record[CHAIN_1][8].reg = 4;
+ rf_reg_record[CHAIN_1][8].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17);
+ rf_reg_record[CHAIN_1][9].bank = 7;
+ rf_reg_record[CHAIN_1][9].reg = 17;
+ rf_reg_record[CHAIN_1][9].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18);
+ rf_reg_record[CHAIN_1][10].bank = 7;
+ rf_reg_record[CHAIN_1][10].reg = 18;
+ rf_reg_record[CHAIN_1][10].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19);
+ rf_reg_record[CHAIN_1][11].bank = 7;
+ rf_reg_record[CHAIN_1][11].reg = 19;
+ rf_reg_record[CHAIN_1][11].value = rfvalue;
+ rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20);
+ rf_reg_record[CHAIN_1][12].bank = 7;
+ rf_reg_record[CHAIN_1][12].reg = 20;
+ rf_reg_record[CHAIN_1][12].value = rfvalue;
+ } else {
+ rt2x00_warn(rt2x00dev, "Unknown chain = %u\n", chain);
+ }
+}
+
+static void rt2800_rf_configrecover(struct rt2x00_dev *rt2x00dev,
+ struct rf_reg_pair rf_record[][13])
+{
+ u8 chain_index = 0, record_index = 0;
+ u8 bank = 0, rf_register = 0, value = 0;
+
+ for (chain_index = 0; chain_index < 2; chain_index++) {
+ for (record_index = 0; record_index < 13; record_index++) {
+ bank = rf_record[chain_index][record_index].bank;
+ rf_register = rf_record[chain_index][record_index].reg;
+ value = rf_record[chain_index][record_index].value;
+ rt2800_rfcsr_write_bank(rt2x00dev, bank, rf_register, value);
+ rt2x00_dbg(rt2x00dev, "bank: %d, rf_register: %d, value: %x\n",
+ bank, rf_register, value);
+ }
+ }
+}
+
+static void rt2800_setbbptonegenerator(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 158, 0xAA);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAB);
+ rt2800_bbp_write(rt2x00dev, 159, 0x0A);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAC);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xAD);
+ rt2800_bbp_write(rt2x00dev, 159, 0x3F);
+
+ rt2800_bbp_write(rt2x00dev, 244, 0x40);
+}
+
+static u32 rt2800_do_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx, u8 read_neg)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+ u8 bbp = 0;
+ u8 tidxi;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x9b);
+
+ bbp = 0x9b;
+
+ while (bbp == 0x9b) {
+ usleep_range(10, 50);
+ bbp = rt2800_bbp_read(rt2x00dev, 159);
+ bbp = bbp & 0xff;
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+ rt2x00_dbg(rt2x00dev, "I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, pint);
+ if (read_neg) {
+ pint = pint >> 1;
+ tidxi = 0x40 - tidx;
+ tidxi = tidxi & 0x3f;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xba);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+ rt2800_bbp_write(rt2x00dev, 159, tidxi);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ ptmp = ptmp >> 1;
+ pint = pint + ptmp;
+ }
+
+ return pint;
+}
+
+static u32 rt2800_read_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx)
+{
+ u32 macvalue = 0;
+ int fftout_i = 0, fftout_q = 0;
+ u32 ptmp = 0, pint = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xBA);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+ rt2800_bbp_write(rt2x00dev, 159, tidx);
+
+ macvalue = rt2800_register_read(rt2x00dev, 0x057C);
+
+ fftout_i = (macvalue >> 16);
+ fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i;
+ fftout_q = (macvalue & 0xffff);
+ fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q;
+ ptmp = (fftout_i * fftout_i);
+ ptmp = ptmp + (fftout_q * fftout_q);
+ pint = ptmp;
+
+ return pint;
+}
+
+static void rt2800_write_dc(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc, u8 iorq, u8 dc)
+{
+ u8 bbp = 0;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ bbp = alc | 0x80;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (ch_idx == 0)
+ bbp = (iorq == 0) ? 0xb1 : 0xb2;
+ else
+ bbp = (iorq == 0) ? 0xb8 : 0xb9;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ bbp = dc;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+}
+
+static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx,
+ u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2])
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ char idx0 = 0, idx1 = 0;
+ u8 idxf[] = {0x00, 0x00};
+ u8 ibit = 0x20;
+ u8 iorq;
+ char bidx;
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (bidx = 5; bidx >= 0; bidx--) {
+ for (iorq = 0; iorq <= 1; iorq++) {
+ if (idxf[iorq] == 0x20) {
+ idx0 = 0x20;
+ p0 = pf;
+ } else {
+ idx0 = idxf[iorq] - ibit;
+ idx0 = idx0 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx0);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ }
+
+ idx1 = idxf[iorq] + (bidx == 5 ? 0 : ibit);
+ idx1 = idx1 & 0x3F;
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx1);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+
+ rt2x00_dbg(rt2x00dev, "alc=%u, IorQ=%u, idx_final=%2x\n",
+ alc_idx, iorq, idxf[iorq]);
+ rt2x00_dbg(rt2x00dev, "p0=%x, p1=%x, pf=%x, idx_0=%x, idx_1=%x, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, ibit);
+
+ if (bidx != 5 && pf <= p0 && pf < p1) {
+ idxf[iorq] = idxf[iorq];
+ } else if (p0 < p1) {
+ pf = p0;
+ idxf[iorq] = idx0 & 0x3F;
+ } else {
+ pf = p1;
+ idxf[iorq] = idx1 & 0x3F;
+ }
+ rt2x00_dbg(rt2x00dev, "IorQ=%u, idx_final[%u]:%x, pf:%8x\n",
+ iorq, iorq, idxf[iorq], pf);
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idxf[iorq]);
+ }
+ ibit = ibit >> 1;
+ }
+ dc_result[ch_idx][alc_idx][0] = idxf[0];
+ dc_result[ch_idx][alc_idx][1] = idxf[1];
+}
+
+static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes)
+{
+ u32 p0 = 0, p1 = 0, pf = 0;
+ char perr = 0, gerr = 0, iq_err = 0;
+ char pef = 0, gef = 0;
+ char psta, pend;
+ char gsta, gend;
+
+ u8 ibit = 0x20;
+ u8 first_search = 0x00, touch_neg_max = 0x00;
+ char idx0 = 0, idx1 = 0;
+ u8 gop;
+ u8 bbp = 0;
+ char bidx;
+
+ for (bidx = 5; bidx >= 1; bidx--) {
+ for (gop = 0; gop < 2; gop++) {
+ if (gop == 1 || bidx < 4) {
+ if (gop == 0)
+ iq_err = gerr;
+ else
+ iq_err = perr;
+
+ first_search = (gop == 0) ? (bidx == 3) : (bidx == 5);
+ touch_neg_max = (gop) ? ((iq_err & 0x0F) == 0x08) :
+ ((iq_err & 0x3F) == 0x20);
+
+ if (touch_neg_max) {
+ p0 = pf;
+ idx0 = iq_err;
+ } else {
+ idx0 = iq_err - ibit;
+ bbp = (ch_idx == 0) ? ((gop == 0) ? 0x28 : 0x29) :
+ ((gop == 0) ? 0x46 : 0x47);
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx0);
+
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ }
+
+ idx1 = iq_err + (first_search ? 0 : ibit);
+ idx1 = (gop == 0) ? (idx1 & 0x0F) : (idx1 & 0x3F);
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, idx1);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+
+ rt2x00_dbg(rt2x00dev,
+ "p0=%x, p1=%x, pwer_final=%x, idx0=%x, idx1=%x, iq_err=%x, gop=%d, ibit=%x\n",
+ p0, p1, pf, idx0, idx1, iq_err, gop, ibit);
+
+ if (!(!first_search && pf <= p0 && pf < p1)) {
+ if (p0 < p1) {
+ pf = p0;
+ iq_err = idx0;
+ } else {
+ pf = p1;
+ iq_err = idx1;
+ }
+ }
+
+ bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 :
+ (gop == 0) ? 0x46 : 0x47;
+
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, iq_err);
+
+ if (gop == 0)
+ gerr = iq_err;
+ else
+ perr = iq_err;
+
+ rt2x00_dbg(rt2x00dev, "IQCalibration pf=%8x (%2x, %2x) !\n",
+ pf, gerr & 0x0F, perr & 0x3F);
+ }
+ }
+
+ if (bidx > 0)
+ ibit = (ibit >> 1);
+ }
+ gerr = (gerr & 0x08) ? (gerr & 0x0F) - 0x10 : (gerr & 0x0F);
+ perr = (perr & 0x20) ? (perr & 0x3F) - 0x40 : (perr & 0x3F);
+
+ gerr = (gerr < -0x07) ? -0x07 : (gerr > 0x05) ? 0x05 : gerr;
+ gsta = gerr - 1;
+ gend = gerr + 2;
+
+ perr = (perr < -0x1f) ? -0x1f : (perr > 0x1d) ? 0x1d : perr;
+ psta = perr - 1;
+ pend = perr + 2;
+
+ for (gef = gsta; gef <= gend; gef = gef + 1)
+ for (pef = psta; pef <= pend; pef = pef + 1) {
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, gef & 0x0F);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, pef & 0x3F);
+
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1);
+ if (gef == gsta && pef == psta) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ } else if (pf > p1) {
+ pf = p1;
+ gerr = gef;
+ perr = pef;
+ }
+ rt2x00_dbg(rt2x00dev, "Fine IQCalibration p1=%8x pf=%8x (%2x, %2x) !\n",
+ p1, pf, gef & 0x0F, pef & 0x3F);
+ }
+
+ ges[ch_idx] = gerr & 0x0F;
+ pes[ch_idx] = perr & 0x3F;
+}
+
+static void rt2800_rf_aux_tx0_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x21);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x10);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x1b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 4, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20);
+}
+
+static void rt2800_rf_aux_tx1_loopback(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x22);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x20);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x4b);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 2, 0x81);
+ rt2800_rfcsr_write_bank(rt2x00dev, 6, 34, 0xee);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, 0x2d);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, 0xd7);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, 0xa2);
+ rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, 0x20);
+}
+
+static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ struct rf_reg_pair rf_store[CHAIN_NUM][13];
+ u32 macorg1 = 0;
+ u32 macorg2 = 0;
+ u32 macorg3 = 0;
+ u32 macorg4 = 0;
+ u32 macorg5 = 0;
+ u32 orig528 = 0;
+ u32 orig52c = 0;
+
+ u32 savemacsysctrl = 0;
+ u32 macvalue = 0;
+ u32 mac13b8 = 0;
+ u32 p0 = 0, p1 = 0;
+ u32 p0_idx10 = 0, p1_idx10 = 0;
+
+ u8 rfvalue;
+ u8 loft_dc_search_result[CHAIN_NUM][RF_ALC_NUM][2];
+ u8 ger[CHAIN_NUM], per[CHAIN_NUM];
+
+ u8 vga_gain[] = {14, 14};
+ u8 bbp = 0, ch_idx = 0, rf_alc_idx = 0, idx = 0;
+ u8 bbpr30, rfb0r39, rfb0r42;
+ u8 bbpr1;
+ u8 bbpr4;
+ u8 bbpr241, bbpr242;
+ u8 count_step;
+
+ static const u8 rf_gain[] = {0x00, 0x01, 0x02, 0x04, 0x08, 0x0c};
+ static const u8 rfvga_gain_table[] = {0x24, 0x25, 0x26, 0x27, 0x28, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x31, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3F};
+ static const u8 bbp_2324gain[] = {0x16, 0x14, 0x12, 0x10, 0x0c, 0x08};
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+ orig528 = rt2800_register_read(rt2x00dev, RF_CONTROL2);
+ orig52c = rt2800_register_read(rt2x00dev, RF_BYPASS2);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ bbpr30 = rt2800_bbp_read(rt2x00dev, 30);
+ rfb0r39 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 39);
+ rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42);
+
+ rt2800_bbp_write(rt2x00dev, 30, 0x1F);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, 0x80);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x5B);
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_setbbptonegenerator(rt2x00dev);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x10);
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ else
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+
+ udelay(1);
+
+ if (ch_idx == 0)
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ else
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ if (ch_idx == 0)
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ else
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+
+ vga_gain[ch_idx] = 18;
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, bbp_2324gain[rf_alc_idx]);
+ rt2800_bbp_write(rt2x00dev, 24, bbp_2324gain[rf_alc_idx]);
+
+ macvalue = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macvalue &= (~0x0000F1F1);
+ macvalue |= (rf_gain[rf_alc_idx] << 4);
+ macvalue |= (rf_gain[rf_alc_idx] << 12);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macvalue);
+ macvalue = (0x0000F1F1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macvalue);
+
+ if (rf_alc_idx == 0) {
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x21);
+ for (; vga_gain[ch_idx] > 0;
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 2) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0);
+ rt2x00_dbg(rt2x00dev, "LOFT AGC %d %d\n", p0, p1);
+ if ((p0 < 7000 * 7000) && (p1 < (7000 * 7000)))
+ break;
+ }
+
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00);
+ rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00);
+
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ if (vga_gain[ch_idx] < 0)
+ vga_gain[ch_idx] = 0;
+ }
+
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ rt2800_loft_search(rt2x00dev, ch_idx, rf_alc_idx, loft_dc_search_result);
+ }
+ }
+
+ for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) {
+ for (idx = 0; idx < 4; idx++) {
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ bbp = (idx << 2) + rf_alc_idx;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " ALC %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb1);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb2);
+ bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q0 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb8);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x00];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " I1 %2x,", bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0xb9);
+ bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x01];
+ bbp = bbp & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+ rt2x00_dbg(rt2x00dev, " Q1 %2x\n", bbp);
+ }
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ bbp = 0x00;
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, RF_CONTROL2, orig528);
+ rt2800_register_write(rt2x00dev, RF_BYPASS2, orig52c);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+
+ savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG);
+ macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0);
+ macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0);
+ macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3);
+ macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3);
+
+ bbpr1 = rt2800_bbp_read(rt2x00dev, 1);
+ bbpr4 = rt2800_bbp_read(rt2x00dev, 4);
+ bbpr241 = rt2800_bbp_read(rt2x00dev, 241);
+ bbpr242 = rt2800_bbp_read(rt2x00dev, 242);
+ mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8);
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x04);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX)))
+ rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n");
+
+ macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL);
+ macvalue &= (~0x08);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue);
+
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX)))
+ rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n");
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000101);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4 & (~0x18));
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 241, 0x14);
+ rt2800_bbp_write(rt2x00dev, 242, 0x80);
+ rt2800_bbp_write(rt2x00dev, 244, 0x31);
+ } else {
+ rt2800_setbbptonegenerator(rt2x00dev);
+ }
+
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306);
+ udelay(1);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F);
+
+ if (!test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000000);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1);
+ }
+
+ rt2800_register_write(rt2x00dev, 0x13b8, 0x00000010);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++)
+ rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx);
+
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x3B);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x3B);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x03);
+ rt2800_bbp_write(rt2x00dev, 159, 0x60);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x80);
+
+ for (ch_idx = 0; ch_idx < 2; ch_idx++) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+
+ if (ch_idx == 0) {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x00;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx0_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 158, 0x01);
+ rt2800_bbp_write(rt2x00dev, 159, 0x01);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags)) {
+ bbp = bbpr1 & (~0x18);
+ bbp = bbp | 0x08;
+ rt2800_bbp_write(rt2x00dev, 1, bbp);
+ }
+ rt2800_rf_aux_tx1_loopback(rt2x00dev);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x05);
+ rt2800_bbp_write(rt2x00dev, 159, 0x04);
+
+ bbp = (ch_idx == 0) ? 0x28 : 0x46;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 23, 0x06);
+ rt2800_bbp_write(rt2x00dev, 24, 0x06);
+ count_step = 1;
+ } else {
+ rt2800_bbp_write(rt2x00dev, 23, 0x1F);
+ rt2800_bbp_write(rt2x00dev, 24, 0x1F);
+ count_step = 2;
+ }
+
+ for (; vga_gain[ch_idx] < 19; vga_gain[ch_idx] = (vga_gain[ch_idx] + count_step)) {
+ rfvalue = rfvga_gain_table[vga_gain[ch_idx]];
+ rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue);
+ rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ p0_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x21);
+ p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0);
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags))
+ p1_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A);
+
+ rt2x00_dbg(rt2x00dev, "IQ AGC %d %d\n", p0, p1);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2x00_dbg(rt2x00dev, "IQ AGC IDX 10 %d %d\n", p0_idx10, p1_idx10);
+ if ((p0_idx10 > 7000 * 7000) || (p1_idx10 > 7000 * 7000)) {
+ if (vga_gain[ch_idx] != 0)
+ vga_gain[ch_idx] = vga_gain[ch_idx] - 1;
+ break;
+ }
+ }
+
+ if ((p0 > 2500 * 2500) || (p1 > 2500 * 2500))
+ break;
+ }
+
+ if (vga_gain[ch_idx] > 18)
+ vga_gain[ch_idx] = 18;
+ rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx],
+ rfvga_gain_table[vga_gain[ch_idx]]);
+
+ bbp = (ch_idx == 0) ? 0x29 : 0x47;
+ rt2800_bbp_write(rt2x00dev, 158, bbp);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_iq_search(rt2x00dev, ch_idx, ger, per);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 23, 0x00);
+ rt2800_bbp_write(rt2x00dev, 24, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x28);
+ bbp = ger[CHAIN_0] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x29);
+ bbp = per[CHAIN_0] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x46);
+ bbp = ger[CHAIN_1] & 0x0F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x47);
+ bbp = per[CHAIN_1] & 0x3F;
+ rt2800_bbp_write(rt2x00dev, 159, bbp);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) {
+ rt2800_bbp_write(rt2x00dev, 1, bbpr1);
+ rt2800_bbp_write(rt2x00dev, 241, bbpr241);
+ rt2800_bbp_write(rt2x00dev, 242, bbpr242);
+ }
+ rt2800_bbp_write(rt2x00dev, 244, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 158, 0x00);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+ rt2800_bbp_write(rt2x00dev, 158, 0xB0);
+ rt2800_bbp_write(rt2x00dev, 159, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 30, bbpr30);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, rfb0r39);
+ rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42);
+
+ if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags))
+ rt2800_bbp_write(rt2x00dev, 4, bbpr4);
+
+ rt2800_bbp_write(rt2x00dev, 21, 0x01);
+ udelay(1);
+ rt2800_bbp_write(rt2x00dev, 21, 0x00);
+
+ rt2800_rf_configrecover(rt2x00dev, rf_store);
+
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00);
+ rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2);
+ udelay(1);
+ rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3);
+ rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4);
+ rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5);
+ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl);
+ rt2800_register_write(rt2x00dev, 0x13b8, mac13b8);
+}
+
static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev,
bool set_bw, bool is_ht40)
{
@@ -9005,8 +10582,13 @@ static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00);
rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C);
+ rt2800_r_calibration(rt2x00dev);
+ rt2800_rf_self_txdc_cal(rt2x00dev);
+ rt2800_rxdcoc_calibration(rt2x00dev);
rt2800_bw_filter_calibration(rt2x00dev, true);
rt2800_bw_filter_calibration(rt2x00dev, false);
+ rt2800_loft_iq_calibration(rt2x00dev);
+ rt2800_rxiq_calibration(rt2x00dev);
}
static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
@@ -9073,7 +10655,7 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Wait BBP/RF to wake up.
*/
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
+ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY)))
return -EIO;
/*
@@ -9435,6 +11017,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rf = RF3853;
else if (rt2x00_rt(rt2x00dev, RT5350))
rf = RF5350;
+ else if (rt2x00_rt(rt2x00dev, RT5592))
+ rf = RF5592;
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -9564,7 +11148,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1);
- if (rt2x00_rt(rt2x00dev, RT3352)) {
+ if (rt2x00_rt(rt2x00dev, RT3352) ||
+ rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
__set_bit(CAPABILITY_EXTERNAL_PA_TX0,
@@ -9575,6 +11160,18 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
&rt2x00dev->cap_flags);
}
+ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF2);
+
+ if (rt2x00_rt(rt2x00dev, RT6352) && eeprom != 0 && eeprom != 0xffff) {
+ if (!rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF2_EXTERNAL_PA)) {
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX0,
+ &rt2x00dev->cap_flags);
+ __clear_bit(CAPABILITY_EXTERNAL_PA_TX1,
+ &rt2x00dev->cap_flags);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index e1761f467b94..3cbef77b4bd3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -17,6 +17,16 @@
#define WCID_START 33
#define WCID_END 222
#define STA_IDS_SIZE (WCID_END - WCID_START + 2)
+#define CHAIN_0 0x0
+#define CHAIN_1 0x1
+#define RF_ALC_NUM 6
+#define CHAIN_NUM 2
+
+struct rf_reg_pair {
+ u8 bank;
+ u8 reg;
+ u8 value;
+};
/* RT2800 driver data structure */
struct rt2800_drv_data {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 8f5772b98f58..07a6a5a9ce13 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -1309,8 +1309,11 @@ void rt2x00queue_unmap_skb(struct queue_entry *entry);
*/
static inline struct data_queue *
rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
+ enum data_queue_qid queue)
{
+ if (queue >= rt2x00dev->ops->tx_queues && queue < IEEE80211_NUM_ACS)
+ queue = rt2x00dev->ops->tx_queues - 1;
+
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index e95c101c2711..3a035afcf7f9 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1093,6 +1093,19 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
kfree(rt2x00dev->spec.channels_info);
}
+static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
+ { .throughput = 0 * 1024, .blink_time = 334 },
+ { .throughput = 1 * 1024, .blink_time = 260 },
+ { .throughput = 2 * 1024, .blink_time = 220 },
+ { .throughput = 5 * 1024, .blink_time = 190 },
+ { .throughput = 10 * 1024, .blink_time = 170 },
+ { .throughput = 25 * 1024, .blink_time = 150 },
+ { .throughput = 54 * 1024, .blink_time = 130 },
+ { .throughput = 120 * 1024, .blink_time = 110 },
+ { .throughput = 265 * 1024, .blink_time = 80 },
+ { .throughput = 586 * 1024, .blink_time = 50 },
+};
+
static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -1174,6 +1187,11 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
#undef RT2X00_TASKLET_INIT
+ ieee80211_create_tpt_led_trigger(rt2x00dev->hw,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO,
+ rt2x00_tpt_blink,
+ ARRAY_SIZE(rt2x00_tpt_blink));
+
/*
* Register HW.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 4d06038afd83..98df0aef8168 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -318,7 +318,7 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
* when using more then one tx stream (>MCS7).
*/
if (sta && txdesc->u.ht.mcs > 7 &&
- sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
index 49421d10e22b..f7d95c9624a0 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
@@ -143,7 +143,7 @@ static int rtl8187_register_led(struct ieee80211_hw *dev,
led->dev = dev;
led->ledpin = ledpin;
led->is_radio = is_radio;
- strlcpy(led->name, name, sizeof(led->name));
+ strscpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7ddce3c3f0c4..782b089a2e1b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1425,7 +1425,7 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1511,9 +1511,9 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index c66f0726b253..ac641a56efb0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1878,13 +1878,6 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
/* We have 8 bits to indicate validity */
map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (word_mask & BIT(i)) {
@@ -1895,6 +1888,13 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
if (ret)
goto exit;
+ if (map_addr >= EFUSE_MAP_LEN - 1) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
+ ret = -EINVAL;
+ goto exit;
+ }
priv->efuse_wifi.raw[map_addr++] = val8;
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
@@ -2929,12 +2929,12 @@ bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
}
if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
- /* path B RX OK */
+ /* path B TX OK */
for (i = 4; i < 6; i++)
result[3][i] = result[c1][i];
}
- if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ if (!(simubitmap & 0xc0) && priv->tx_paths > 1) {
/* path B RX OK */
for (i = 6; i < 8; i++)
result[3][i] = result[c1][i];
@@ -4320,7 +4320,7 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
}
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
@@ -4340,10 +4340,15 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
{
struct h2c_cmd h2c;
- u8 bw = RTL8XXXU_CHANNEL_WIDTH_20;
+ u8 bw;
+
+ if (txbw_40mhz)
+ bw = RTL8XXXU_CHANNEL_WIDTH_40;
+ else
+ bw = RTL8XXXU_CHANNEL_WIDTH_20;
memset(&h2c, 0, sizeof(struct h2c_cmd));
@@ -4353,15 +4358,14 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
- h2c.ramask.arg = 0x80;
h2c.b_macid_cfg.data1 = rateid;
if (sgi)
h2c.b_macid_cfg.data1 |= BIT(7);
h2c.b_macid_cfg.data2 = bw;
- dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
- __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, rateid %02x, sgi %d, size %zi\n",
+ __func__, ramask, rateid, sgi, sizeof(h2c.b_macid_cfg));
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
@@ -4556,6 +4560,53 @@ rtl8xxxu_wireless_mode(struct ieee80211_hw *hw, struct ieee80211_sta *sta)
return network_type;
}
+static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
+{
+ u32 reg_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM,
+ [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM,
+ [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM,
+ [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM,
+ };
+ u32 val32;
+ u16 wireless_mode = 0;
+ u8 aifs, aifsn, sifs;
+ int i;
+
+ if (priv->vif) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ if (sta)
+ wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
+ rcu_read_unlock();
+ }
+
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
+ (wireless_mode & WIRELESS_MODE_N_24G))
+ sifs = 16;
+ else
+ sifs = 10;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ val32 = rtl8xxxu_read32(priv, reg_edca_param[i]);
+
+ /* It was set in conf_tx. */
+ aifsn = val32 & 0xff;
+
+ /* aifsn not set yet or already fixed */
+ if (aifsn < 2 || aifsn > 15)
+ continue;
+
+ aifs = aifsn * slot_time + sifs;
+
+ val32 &= ~0xff;
+ val32 |= aifs;
+ rtl8xxxu_write32(priv, reg_edca_param[i], val32);
+ }
+}
+
static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
@@ -4622,7 +4673,11 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
RATE_INFO_FLAGS_SHORT_GI;
}
- rarpt->txrate.bw |= RATE_INFO_BW_20;
+ if (rtl8xxxu_ht40_2g &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+ rarpt->txrate.bw = RATE_INFO_BW_40;
+ else
+ rarpt->txrate.bw = RATE_INFO_BW_20;
}
bit_rate = cfg80211_calculate_bitrate(&rarpt->txrate);
rarpt->bit_rate = bit_rate;
@@ -4631,7 +4686,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
priv->vif = vif;
priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
- priv->fops->update_rate_mask(priv, ramask, 0, sgi);
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi, rarpt->txrate.bw == RATE_INFO_BW_40);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -4671,6 +4726,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
else
val8 = 20;
rtl8xxxu_write8(priv, REG_SLOT, val8);
+
+ rtl8xxxu_set_aifs(priv, val8);
}
if (changed & BSS_CHANGED_BSSID) {
@@ -4710,9 +4767,8 @@ static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
return rtlqueue;
}
-static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+static u32 rtl8xxxu_queue_select(struct ieee80211_hdr *hdr, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u32 queue;
if (ieee80211_is_mgmt(hdr->frame_control))
@@ -5062,6 +5118,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
+ queue = rtl8xxxu_queue_select(hdr, skb);
+
tx_desc = skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
@@ -5074,7 +5132,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
- queue = rtl8xxxu_queue_select(hw, skb);
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
if (tx_info->control.hw_key) {
@@ -6344,7 +6401,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
}
priv->rssi_level = rssi_level;
- priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi);
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz);
}
}
@@ -6657,7 +6714,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
if (!hw) {
ret = -ENOMEM;
- priv = NULL;
goto err_put_dev;
}
@@ -6768,11 +6824,9 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
err_set_intfdata:
usb_set_intfdata(interface, NULL);
- if (priv) {
- kfree(priv->fw_data);
- mutex_destroy(&priv->usb_buf_mutex);
- mutex_destroy(&priv->h2c_mutex);
- }
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
ieee80211_free_hw(hw);
err_put_dev:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 15e6a6aded31..d18c092b6142 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -2386,11 +2386,10 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
"Just Read IQK Matrix reg for channel:%d....\n",
channel);
- _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
- rtlphy->iqk_matrix[
- indexforchannel].value, 0,
- (rtlphy->iqk_matrix[
- indexforchannel].value[0][2] == 0));
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
+ _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
if (IS_92D_SINGLEPHY(rtlhal->version)) {
if ((rtlphy->iqk_matrix[
indexforchannel].value[0][4] != 0)
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index 76c7f3257dd3..038a30b170ef 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -30,11 +30,11 @@ void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
struct rtw_bfee *bfee = &rtwvif->bfee;
struct rtw_bf_info *bfinfo = &rtwdev->bf_info;
- struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_sta *sta;
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_sta_vht_cap *ic_vht_cap;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index cac053f485c3..6276ad624299 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -13,7 +13,7 @@
static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
u8 rssi, u8 rssi_thresh)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tol = chip->rssi_tolerance;
u8 next_state;
@@ -36,7 +36,7 @@ static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
static void rtw_coex_limited_tx(struct rtw_dev *rtwdev,
bool tx_limit_en, bool ampdu_limit_en)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 num_of_active_port = 1;
@@ -365,7 +365,7 @@ static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap,
void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
u16 val = 0x2;
@@ -400,7 +400,7 @@ EXPORT_SYMBOL(rtw_coex_write_scbd);
static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->scbd_support)
return 0;
@@ -410,7 +410,7 @@ static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
static void rtw_coex_check_rfk(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
@@ -489,7 +489,7 @@ static void rtw_coex_monitor_bt_ctr(struct rtw_dev *rtwdev)
static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
@@ -524,10 +524,10 @@ static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_traffic_stats *stats = &rtwdev->stats;
bool is_5G = false;
bool wl_busy = false;
@@ -706,10 +706,10 @@ static const char *rtw_coex_get_bt_status_string(u8 bt_status)
static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 i;
u8 rssi_state;
u8 rssi_step;
@@ -806,7 +806,7 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
@@ -933,7 +933,7 @@ EXPORT_SYMBOL(rtw_coex_write_indirect_reg);
static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_hw_reg *btg_reg = chip->btg_reg;
if (wifi_control) {
@@ -981,7 +981,7 @@ static void rtw_coex_mimo_ps(struct rtw_dev *rtwdev, bool force, bool state)
static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force,
u8 table_case)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 h2c_para[6] = {0};
u32 table_wl = 0x5a5a5a5a;
@@ -1065,9 +1065,9 @@ static void rtw_coex_set_table(struct rtw_dev *rtwdev, bool force, u32 table0,
static void rtw_coex_table(struct rtw_dev *rtwdev, bool force, u8 type)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_stat *coex_stat = &coex->stat;
@@ -1135,9 +1135,9 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
u8 byte3, u8 byte4, u8 byte5)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 ps_type = COEX_PS_WIFI_NATIVE;
bool ap_enable = false;
@@ -1193,10 +1193,10 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 n, type;
bool turn_on;
@@ -1526,8 +1526,8 @@ static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1549,11 +1549,11 @@ static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 level = 0;
bool bt_afh_loss = true;
@@ -1594,8 +1594,8 @@ static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1619,8 +1619,8 @@ static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1644,10 +1644,10 @@ static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1684,11 +1684,11 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_rfe *coex_rfe = &coex->rfe;
u8 table_case = 0xff, tdma_case = 0xff;
@@ -1753,10 +1753,10 @@ exit:
static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
bool wl_hi_pri = false;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -1853,11 +1853,11 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
struct rtw_coex_dm *coex_dm = &coex->dm;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1901,10 +1901,10 @@ static void rtw_coex_action_bt_game_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -1932,10 +1932,10 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
bool bt_multi_link_remain = false, is_toggle_table = false;
@@ -2015,11 +2015,11 @@ static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -2057,10 +2057,10 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool ap_enable = false;
@@ -2096,10 +2096,10 @@ static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2133,11 +2133,11 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case, interval = 0;
u32 slot_type = 0;
bool is_toggle_table = false;
@@ -2190,10 +2190,10 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
bool wl_cpt_test = false, bt_cpt_test = false;
@@ -2247,10 +2247,10 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2282,10 +2282,10 @@ static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2316,9 +2316,9 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
@@ -2348,8 +2348,8 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2372,9 +2372,9 @@ static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
u8 table_case, tdma_case;
@@ -2411,10 +2411,10 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
u32 slot_type = 0;
@@ -2451,8 +2451,8 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 table_case, tdma_case;
rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__);
@@ -2528,8 +2528,8 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
struct rtw_coex_stat *coex_stat = &coex->stat;
bool rf4ce_en = false;
@@ -3002,9 +3002,9 @@ void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type)
void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_dm *coex_dm = &coex->dm;
u32 bt_relink_time;
u8 i, rsp_source = 0, type;
@@ -3270,8 +3270,8 @@ static const u8 coex_bt_hidinfo_xb[] = {0x58, 0x62, 0x6f};
void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_hid *hidinfo;
struct rtw_coex_hid_info_a *hida;
@@ -3360,8 +3360,8 @@ void rtw_coex_bt_hid_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
void rtw_coex_query_bt_hid_list(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex_stat *coex_stat = &coex->stat;
struct rtw_coex_hid *hidinfo;
u8 i, handle;
@@ -3582,7 +3582,7 @@ static const char *rtw_coex_get_reason_string(u8 reason)
static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
u32 wl_reg_6c4)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u8 ans = 0xFF;
u8 n, i;
@@ -3618,8 +3618,8 @@ static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0,
static u8 rtw_coex_get_tdma_index(struct rtw_dev *rtwdev, u8 *tdma_para)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
u8 ans = 0xFF;
u8 n, i, j;
u8 load_cur_tab_val;
@@ -3736,7 +3736,7 @@ static int rtw_coex_val_info(struct rtw_dev *rtwdev,
static void rtw_coex_set_coexinfo_hw(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_reg_domain *reg;
char addr_info[INFO_SIZE];
int n_addr = 0;
@@ -3910,7 +3910,7 @@ static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode)
void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
struct rtw_coex *coex = &rtwdev->coex;
struct rtw_coex_stat *coex_stat = &coex->stat;
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
index 07fa7aa34d4b..57cf29da9ea4 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.h
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -327,7 +327,7 @@ struct coex_rf_para {
static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_init(rtwdev);
}
@@ -335,7 +335,7 @@ static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
static inline
void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->ops->coex_set_ant_switch)
return;
@@ -345,28 +345,28 @@ void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_fix(rtwdev);
}
static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_gnt_debug(rtwdev);
}
static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_rfe_type(rtwdev);
}
static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr);
}
@@ -374,7 +374,7 @@ static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
static inline
void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 7cde6bcf253b..9ebe544e51d0 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -621,11 +621,13 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_hal *hal = &rtwdev->hal;
- u8 path, rate;
+ u8 path, rate, bw, ch, regd;
struct rtw_power_params pwr_param = {0};
- u8 bw = hal->current_band_width;
- u8 ch = hal->current_channel;
- u8 regd = rtw_regd_get(rtwdev);
+
+ mutex_lock(&rtwdev->mutex);
+ bw = hal->current_band_width;
+ ch = hal->current_channel;
+ regd = rtw_regd_get(rtwdev);
seq_printf(m, "channel: %u\n", ch);
seq_printf(m, "bandwidth: %u\n", bw);
@@ -667,6 +669,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
}
mutex_unlock(&hal->tx_power_mutex);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
index c266c84ef233..b85075cd68d0 100644
--- a/drivers/net/wireless/realtek/rtw88/efuse.c
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -86,7 +86,7 @@ static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 size = rtwdev->efuse.physical_size;
u32 efuse_ctl;
u32 addr;
@@ -145,7 +145,7 @@ EXPORT_SYMBOL(rtw_read8_physical_efuse);
int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
u32 phy_size = efuse->physical_size;
u32 log_size = efuse->logical_size;
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 4fdab0329695..0b5f903c0f36 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -14,6 +14,8 @@
#include "util.h"
#include "wow.h"
#include "ps.h"
+#include "phy.h"
+#include "mac.h"
static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
struct sk_buff *skb)
@@ -116,7 +118,7 @@ legacy:
si->ra_report.desc_rate = rate;
si->ra_report.bit_rate = bit_rate;
- sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
}
static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
@@ -904,7 +906,7 @@ void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct rtw_nlo_info_hdr *nlo_hdr;
struct cfg80211_ssid *ssid;
@@ -959,7 +961,7 @@ static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
struct ieee80211_channel *channels = pno_req->channels;
struct sk_buff *skb;
@@ -993,7 +995,7 @@ static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
struct rtw_lps_pg_dpk_hdr *dpk_hdr;
struct sk_buff *skb;
@@ -1018,7 +1020,7 @@ static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_lps_conf *conf = &rtwdev->lps_conf;
struct rtw_lps_pg_info_hdr *pg_info_hdr;
struct rtw_wow_param *rtw_wow = &rtwdev->wow;
@@ -1080,10 +1082,10 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
skb_new = ieee80211_proberesp_get(hw, vif);
break;
case RSVD_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, false);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
break;
case RSVD_QOS_NULL:
- skb_new = ieee80211_nullfunc_get(hw, vif, true);
+ skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
break;
case RSVD_LPS_PG_DPK:
skb_new = rtw_lps_pg_dpk_get(hw);
@@ -1122,7 +1124,7 @@ static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
struct rtw_tx_pkt_info pkt_info = {0};
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 *pkt_desc;
rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
@@ -1433,7 +1435,7 @@ static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
{
struct ieee80211_hw *hw = rtwdev->hw;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *iter;
struct rtw_rsvd_page *rsvd_pkt;
u32 page = 0;
@@ -1647,7 +1649,7 @@ out:
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
u32 offset, u32 size, u32 *buf)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 start_pg, residue;
if (sel >= RTW_FW_FIFO_MAX) {
@@ -1706,7 +1708,7 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
u8 location)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
@@ -1818,8 +1820,8 @@ static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
struct sk_buff_head *list, u8 *bands,
struct rtw_vif *rtwvif)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
- struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *new;
u8 idx;
@@ -1841,16 +1843,23 @@ static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
+ u8 page_cnt, pages;
unsigned int pkt_len;
int ret;
+ if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ page_cnt = RTW_OLD_PROBE_PG_CNT;
+ else
+ page_cnt = RTW_PROBE_PG_CNT;
+
+ pages = page_offset + num_probes * page_cnt;
+
buf = kzalloc(page_size * pages, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1859,7 +1868,7 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
skb_queue_walk_safe(probe_req_list, skb, tmp) {
skb_unlink(skb, probe_req_list);
rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
- if (skb->len > page_size * RTW_PROBE_PG_CNT) {
+ if (skb->len > page_size * page_cnt) {
ret = -EINVAL;
goto out;
}
@@ -1869,8 +1878,8 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
__rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
- buf_offset += RTW_PROBE_PG_CNT * page_size;
- page_offset += RTW_PROBE_PG_CNT;
+ buf_offset += page_cnt * page_size;
+ page_offset += page_cnt;
kfree_skb(skb);
}
@@ -2048,6 +2057,9 @@ void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw_leave_lps_deep(rtwdev);
+ rtw_hci_flush_all_queues(rtwdev, false);
+ rtw_mac_flush_all_queues(rtwdev, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
req->mac_addr_mask);
@@ -2080,10 +2092,9 @@ void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_core_scan_complete(rtwdev, vif, true);
rtwvif = (struct rtw_vif *)vif->drv_priv;
- if (rtwvif->net_type == RTW_NET_MGD_LINKED) {
- hal->current_channel = chan;
- hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
- }
+ if (chan)
+ rtw_store_op_chan(rtwdev, false);
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
@@ -2124,6 +2135,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_ch_switch_option cs_option = {0};
struct rtw_chan_list chan_list = {0};
int ret = 0;
@@ -2132,7 +2144,7 @@ int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
return -EINVAL;
cs_option.switch_en = enable;
- cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED;
+ cs_option.back_op_en = scan_info->op_chan != 0;
if (enable) {
ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
if (ret)
@@ -2171,14 +2183,33 @@ void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
}
-void rtw_store_op_chan(struct rtw_dev *rtwdev)
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
{
struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw_hal *hal = &rtwdev->hal;
+ u8 band;
+
+ if (backup) {
+ scan_info->op_chan = hal->current_channel;
+ scan_info->op_bw = hal->current_band_width;
+ scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+ scan_info->op_pri_ch = hal->primary_channel;
+ } else {
+ band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, scan_info->op_chan,
+ scan_info->op_pri_ch,
+ band, scan_info->op_bw);
+ }
+}
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
+void rtw_clear_op_chan(struct rtw_dev *rtwdev)
+{
+ struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
+
+ scan_info->op_chan = 0;
+ scan_info->op_bw = 0;
+ scan_info->op_pri_ch_idx = 0;
+ scan_info->op_pri_ch = 0;
}
static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
@@ -2193,7 +2224,7 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_c2h_cmd *c2h;
enum rtw_scan_notify_id id;
- u8 chan, status;
+ u8 chan, band, status;
if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
return;
@@ -2204,10 +2235,13 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
status = GET_CHAN_SWITCH_STATUS(c2h->payload);
if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
- if (rtw_is_op_chan(rtwdev, chan))
+ band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ rtw_update_channel(rtwdev, chan, chan, band,
+ RTW_CHANNEL_WIDTH_20);
+ if (rtw_is_op_chan(rtwdev, chan)) {
+ rtw_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
- hal->current_channel = chan;
- hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ }
} else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
if (IS_CH_5G_BAND(chan)) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -2220,7 +2254,12 @@ void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
rtw_coex_switchband_notify(rtwdev, chan_type);
}
- if (rtw_is_op_chan(rtwdev, chan))
+ /* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
+ * channel that hardware will switch. We need to stop queue
+ * if next channel is non-op channel.
+ */
+ if (!rtw_is_op_chan(rtwdev, chan) &&
+ rtw_is_op_chan(rtwdev, hal->current_channel))
ieee80211_stop_queues(rtwdev->hw);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 7a37675c61e8..a5a965803a3c 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,7 +41,8 @@
#define RTW_EX_CH_INFO_HDR_SIZE 2
#define RTW_SCAN_WIDTH 0
#define RTW_PRI_CH_IDX 1
-#define RTW_PROBE_PG_CNT 2
+#define RTW_OLD_PROBE_PG_CNT 2
+#define RTW_PROBE_PG_CNT 4
enum rtw_c2h_cmd_id {
C2H_CCX_TX_RPT = 0x03,
@@ -120,6 +121,10 @@ enum rtw_fw_feature {
FW_FEATURE_MAX = BIT(31),
};
+enum rtw_fw_feature_ext {
+ FW_FEATURE_EXT_OLD_PAGE_NUM = BIT(0),
+};
+
enum rtw_beacon_filter_offload_mode {
BCN_FILTER_OFFLOAD_MODE_0 = 0,
BCN_FILTER_OFFLOAD_MODE_1,
@@ -323,6 +328,11 @@ struct rtw_fw_hdr_legacy {
__le32 rsvd5;
} __packed;
+#define RTW_FW_VER_CODE(ver, sub_ver, idx) \
+ (((ver) << 16) | ((sub_ver) << 8) | (idx))
+#define RTW_FW_SUIT_VER_CODE(s) \
+ RTW_FW_VER_CODE((s).version, (s).sub_version, (s).sub_index)
+
/* C2H */
#define GET_CCX_REPORT_SEQNUM_V0(c2h_payload) (c2h_payload[6] & 0xfc)
#define GET_CCX_REPORT_STATUS_V0(c2h_payload) (c2h_payload[0] & 0xc0)
@@ -770,6 +780,12 @@ static inline bool rtw_fw_feature_check(struct rtw_fw_state *fw,
return !!(fw->feature & feature);
}
+static inline bool rtw_fw_feature_ext_check(struct rtw_fw_state *fw,
+ enum rtw_fw_feature_ext feature)
+{
+ return !!(fw->feature_ext & feature);
+}
+
void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
@@ -831,7 +847,8 @@ int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
-void rtw_store_op_chan(struct rtw_dev *rtwdev);
+void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup);
+void rtw_clear_op_chan(struct rtw_dev *rtwdev);
void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index caf2603da2d6..52076e89d59a 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -243,7 +243,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_pwr_seq_cmd **pwr_seq;
u8 rpwm;
bool cur_pwr;
@@ -587,7 +587,7 @@ static int
download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
u32 src, u32 dst, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u32 desc_size = chip->tx_pkt_desc_sz;
u8 first_part;
u32 mem_offset;
@@ -934,7 +934,7 @@ static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
u32 prio_queue, bool drop)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_prioq_addr *addr;
bool wsize;
u16 avail_page, rsvd_page;
@@ -996,7 +996,7 @@ void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
static int txdma_queue_mapping(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rqpn *rqpn = NULL;
u16 txdma_pq_map = 0;
@@ -1037,8 +1037,8 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev)
static int set_trx_fifo_info(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u16 cur_pg_addr;
u8 csi_buf_pg_num = chip->csi_buf_pg_num;
@@ -1092,8 +1092,8 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
@@ -1123,8 +1123,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
const struct rtw_page_table *pg_tbl,
u16 pubq_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 val32;
val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
@@ -1149,8 +1149,8 @@ static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
static int priority_queue_cfg(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_page_table *pg_tbl = NULL;
u16 pubq_num;
int ret;
@@ -1277,7 +1277,7 @@ static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
int rtw_mac_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
ret = rtw_init_trx_cfg(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index c7b98a0599d5..07578ccc4bab 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -101,7 +101,8 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
rtw_set_channel(rtwdev);
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (hw->conf.flags & IEEE80211_CONF_IDLE))
+ (hw->conf.flags & IEEE80211_CONF_IDLE) &&
+ !test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
rtw_enter_ips(rtwdev);
out:
@@ -377,7 +378,6 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
- rtw_store_op_chan(rtwdev);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
@@ -395,6 +395,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
config |= PORT_SET_BSSID;
+ if (is_zero_ether_addr(rtwvif->bssid))
+ rtw_clear_op_chan(rtwdev);
+ else
+ rtw_store_op_chan(rtwdev, true);
}
if (changed & BSS_CHANGED_BEACON_INT) {
@@ -434,7 +438,7 @@ static int rtw_ops_start_ap(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
mutex_lock(&rtwdev->mutex);
chip->ops->phy_calibration(rtwdev);
@@ -752,7 +756,7 @@ static int rtw_ops_set_antenna(struct ieee80211_hw *hw,
u32 rx_antenna)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int ret;
if (!chip->ops->set_antenna)
@@ -872,7 +876,9 @@ static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ mutex_lock(&rtwdev->mutex);
rtw_set_sar_specs(rtwdev, sar);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 76dc9da88f6c..67151dbf8384 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -353,7 +353,7 @@ struct rtw_fwcd_hdr {
static int rtw_fwcd_prep(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc;
const struct rtw_fwcd_segs *segs = chip->fwcd_segs;
u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr);
@@ -675,67 +675,126 @@ void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period)
rtw_write8(rtwdev, REG_DTIM_COUNTER_ROOT, dtim_period - 1);
}
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw_hw_to_nl80211_band(band);
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 *cch_by_bw = hal->cch_by_bw;
+ u32 center_freq, primary_freq;
+ enum rtw_sar_bands sar_band;
+ u8 primary_channel_idx;
+
+ center_freq = ieee80211_channel_to_frequency(center_channel, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_channel, nl_band);
+
+ /* assign the center channel used while 20M bw is selected */
+ cch_by_bw[RTW_CHANNEL_WIDTH_20] = primary_channel;
+
+ /* assign the center channel used while current bw is selected */
+ cch_by_bw[bandwidth] = center_channel;
+
+ switch (bandwidth) {
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ primary_channel_idx = RTW_SC_DONT_CARE;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWER;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10)
+ primary_channel_idx = RTW_SC_20_UPPER;
+ else
+ primary_channel_idx = RTW_SC_20_UPMOST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel + 4;
+ } else {
+ if (center_freq - primary_freq == 10)
+ primary_channel_idx = RTW_SC_20_LOWER;
+ else
+ primary_channel_idx = RTW_SC_20_LOWEST;
+
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_channel - 4;
+ }
+ break;
+ }
+
+ switch (center_channel) {
+ case 1 ... 14:
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ case 36 ... 64:
+ sar_band = RTW_SAR_BAND_1;
+ break;
+ case 100 ... 144:
+ sar_band = RTW_SAR_BAND_3;
+ break;
+ case 149 ... 177:
+ sar_band = RTW_SAR_BAND_4;
+ break;
+ default:
+ WARN(1, "unknown ch(%u) to SAR band\n", center_channel);
+ sar_band = RTW_SAR_BAND_0;
+ break;
+ }
+
+ hal->current_primary_channel_index = primary_channel_idx;
+ hal->current_band_width = bandwidth;
+ hal->primary_channel = primary_channel;
+ hal->current_channel = center_channel;
+ hal->current_band_type = band;
+ hal->sar_band = sar_band;
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *chan_params)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
- u8 *cch_by_bw = chan_params->cch_by_bw;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
- u8 i;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
center_freq = chandef->center_freq1;
- /* assign the center channel used while 20M bw is selected */
- cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value;
-
switch (width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW_CHANNEL_WIDTH_40;
- if (primary_freq > center_freq) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq > center_freq)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWER;
+ else
center_chan += 2;
- }
break;
case NL80211_CHAN_WIDTH_80:
bandwidth = RTW_CHANNEL_WIDTH_80;
if (primary_freq > center_freq) {
- if (primary_freq - center_freq == 10) {
- primary_chan_idx = RTW_SC_20_UPPER;
+ if (primary_freq - center_freq == 10)
center_chan -= 2;
- } else {
- primary_chan_idx = RTW_SC_20_UPMOST;
+ else
center_chan -= 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4;
} else {
- if (center_freq - primary_freq == 10) {
- primary_chan_idx = RTW_SC_20_LOWER;
+ if (center_freq - primary_freq == 10)
center_chan += 2;
- } else {
- primary_chan_idx = RTW_SC_20_LOWEST;
+ else
center_chan += 6;
- }
- /* assign the center channel used
- * while 40M bw is selected
- */
- cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4;
}
break;
default:
@@ -745,60 +804,30 @@ void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
chan_params->center_chan = center_chan;
chan_params->bandwidth = bandwidth;
- chan_params->primary_chan_idx = primary_chan_idx;
-
- /* assign the center channel used while current bw is selected */
- cch_by_bw[bandwidth] = center_chan;
-
- for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- cch_by_bw[i] = 0;
+ chan_params->primary_chan = channel->hw_value;
}
void rtw_set_channel(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_channel_params ch_param;
- u8 center_chan, bandwidth, primary_chan_idx;
- u8 i;
+ u8 center_chan, primary_chan, bandwidth, band;
rtw_get_channel_params(&hw->conf.chandef, &ch_param);
if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
return;
center_chan = ch_param.center_chan;
+ primary_chan = ch_param.primary_chan;
bandwidth = ch_param.bandwidth;
- primary_chan_idx = ch_param.primary_chan_idx;
-
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->current_primary_channel_index = primary_chan_idx;
- hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- case 36 ... 64:
- hal->sar_band = RTW_SAR_BAND_1;
- break;
- case 100 ... 144:
- hal->sar_band = RTW_SAR_BAND_3;
- break;
- case 149 ... 177:
- hal->sar_band = RTW_SAR_BAND_4;
- break;
- default:
- WARN(1, "unknown ch(%u) to SAR band\n", center_chan);
- hal->sar_band = RTW_SAR_BAND_0;
- break;
- }
+ band = ch_param.center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
- for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++)
- hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
+ rtw_update_channel(rtwdev, center_chan, primary_chan, band, bandwidth);
- chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+ chip->ops->set_channel(rtwdev, center_chan, bandwidth,
+ hal->current_primary_channel_index);
if (hal->current_band_type == RTW_BAND_5G) {
rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
@@ -821,7 +850,7 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
void rtw_chip_prepare_tx(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtwdev->need_rfk) {
rtwdev->need_rfk = false;
@@ -890,8 +919,8 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
hw_ant_num >= hal->rf_path_num)
@@ -1240,7 +1269,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw;
fw = &rtwdev->fw;
@@ -1261,7 +1290,7 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev)
static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
struct rtw_fw_state *fw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported ||
!fw->feature)
@@ -1280,7 +1309,7 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev,
static int rtw_power_on(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
bool wifi_only;
int ret;
@@ -1469,8 +1498,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw_chip_info *chip = rtwdev->chip;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1552,8 +1581,23 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
vht_cap->vht_mcs.tx_highest = highest;
}
+static u16 rtw_get_max_scan_ie_len(struct rtw_dev *rtwdev)
+{
+ u16 len;
+
+ len = rtwdev->chip->max_scan_ie_len;
+
+ if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD) &&
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822C)
+ len = IEEE80211_MAX_DATA_LEN;
+ else if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
+ len -= RTW_OLD_PROBE_PG_CNT * TX_PAGE_SIZE;
+
+ return len;
+}
+
static void rtw_set_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
struct rtw_dev *rtwdev = hw->priv;
struct ieee80211_supported_band *sband;
@@ -1585,7 +1629,7 @@ err_out:
}
static void rtw_unset_supported_band(struct ieee80211_hw *hw,
- struct rtw_chip_info *chip)
+ const struct rtw_chip_info *chip)
{
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
@@ -1607,7 +1651,7 @@ static void rtw_vif_smps_iter(void *data, u8 *mac,
void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool txrx_1ss)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
if (!chip->ops->config_txrx_mode || rtwdev->hal.txrx_1ss == txrx_1ss)
@@ -1631,6 +1675,10 @@ static void __update_firmware_feature(struct rtw_dev *rtwdev,
feature = le32_to_cpu(fw_hdr->feature);
fw->feature = feature & FW_FEATURE_SIG ? feature : 0;
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C &&
+ RTW_FW_SUIT_VER_CODE(rtwdev->fw) < RTW_FW_VER_CODE(9, 9, 13))
+ fw->feature_ext |= FW_FEATURE_EXT_OLD_PAGE_NUM;
}
static void __update_firmware_info(struct rtw_dev *rtwdev,
@@ -1724,7 +1772,7 @@ static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type)
static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
@@ -1982,7 +2030,7 @@ static void rtw_stats_init(struct rtw_dev *rtwdev)
int rtw_core_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_coex *coex = &rtwdev->coex;
int ret;
@@ -2045,7 +2093,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW);
if (ret) {
rtw_warn(rtwdev, "no firmware loaded\n");
- return ret;
+ goto out;
}
if (chip->wow_fw_name) {
@@ -2055,11 +2103,15 @@ int rtw_core_init(struct rtw_dev *rtwdev)
wait_for_completion(&rtwdev->fw.completion);
if (rtwdev->fw.firmware)
release_firmware(rtwdev->fw.firmware);
- return ret;
+ goto out;
}
}
return 0;
+
+out:
+ destroy_workqueue(rtwdev->tx_wq);
+ return ret;
}
EXPORT_SYMBOL(rtw_core_init);
@@ -2136,7 +2188,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
- hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN;
+ hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
@@ -2180,7 +2232,7 @@ EXPORT_SYMBOL(rtw_register_hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
ieee80211_unregister_hw(hw);
rtw_unset_supported_band(hw, chip);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 7db627fc26be..bccd7b28f60c 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -22,7 +22,6 @@
#define MAX_PG_CAM_BACKUP_NUM 8
#define RTW_SCAN_MAX_SSIDS 4
-#define RTW_SCAN_MAX_IE_LEN 128
#define RTW_MAX_PATTERN_NUM 12
#define RTW_MAX_PATTERN_MASK_SIZE 16
@@ -33,6 +32,7 @@
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
#define TX_PAGE_SIZE_SHIFT 7
+#define TX_PAGE_SIZE (1 << TX_PAGE_SIZE_SHIFT)
#define RTW_CHANNEL_WIDTH_MAX 3
#define RTW_RF_PATH_MAX 4
@@ -510,12 +510,8 @@ struct rtw_timer_list {
struct rtw_channel_params {
u8 center_chan;
+ u8 primary_chan;
u8 bandwidth;
- u8 primary_chan_idx;
- /* center channel by different available bandwidth,
- * val of (bw > current bandwidth) is invalid
- */
- u8 cch_by_bw[RTW_MAX_CHANNEL_WIDTH + 1];
};
struct rtw_hw_reg {
@@ -1232,6 +1228,7 @@ struct rtw_chip_info {
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
+ const u16 max_scan_ie_len;
/* coex paras */
u32 coex_para_ver;
@@ -1853,6 +1850,7 @@ struct rtw_fw_state {
u8 sub_index;
u16 h2c_version;
u32 feature;
+ u32 feature_ext;
};
enum rtw_sar_sources {
@@ -1896,6 +1894,7 @@ struct rtw_hal {
u8 current_primary_channel_index;
u8 current_band_width;
u8 current_band_type;
+ u8 primary_channel;
/* center channel for different available bandwidth,
* val of (bw > current_band_width) is invalid
@@ -1967,6 +1966,7 @@ struct rtw_hw_scan_info {
struct ieee80211_vif *scanning_vif;
u8 probe_pg_size;
u8 op_pri_ch_idx;
+ u8 op_pri_ch;
u8 op_chan;
u8 op_bw;
};
@@ -1978,7 +1978,7 @@ struct rtw_dev {
struct rtw_hci hci;
struct rtw_hw_scan_info scan_info;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
struct rtw_hal hal;
struct rtw_fifo_conf fifo;
struct rtw_fw_state fw;
@@ -2132,6 +2132,20 @@ static inline int rtw_chip_dump_fw_crash(struct rtw_dev *rtwdev)
return 0;
}
+static inline
+enum nl80211_band rtw_hw_to_nl80211_band(enum rtw_supported_band hw_band)
+{
+ switch (hw_band) {
+ default:
+ case RTW_BAND_2G:
+ return NL80211_BAND_2GHZ;
+ case RTW_BAND_5G:
+ return NL80211_BAND_5GHZ;
+ case RTW_BAND_60G:
+ return NL80211_BAND_60GHZ;
+ }
+}
+
void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel);
void rtw_set_dtim_period(struct rtw_dev *rtwdev, int dtim_period);
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
@@ -2173,4 +2187,7 @@ int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size,
u32 fwcd_item);
int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size);
void rtw_set_txrx_1ss(struct rtw_dev *rtwdev, bool config_1ss);
+void rtw_update_channel(struct rtw_dev *rtwdev, u8 center_channel,
+ u8 primary_channel, enum rtw_supported_band band,
+ enum rtw_bandwidth bandwidth);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 24d5695363d3..0975d27240e4 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -322,7 +322,7 @@ static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
struct rtw_pci_rx_ring *rx_ring;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
int tx_desc_size, rx_desc_size;
u32 len;
@@ -721,7 +721,7 @@ static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
u32 idx)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_rx_buffer_desc *buf_desc;
u32 desc_sz = chip->rx_buf_desc_sz;
u16 total_pkt_size;
@@ -834,7 +834,7 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
struct sk_buff *skb, u8 queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci_tx_ring *ring;
struct rtw_pci_tx_data *tx_data;
dma_addr_t dma;
@@ -1073,7 +1073,7 @@ static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u8 hw_queue, u32 limit)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct napi_struct *napi = &rtwpci->napi;
struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
struct rtw_rx_pkt_stat pkt_stat;
@@ -1425,7 +1425,7 @@ static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct pci_dev *pdev = rtwpci->pdev;
u16 link_ctrl;
@@ -1467,7 +1467,7 @@ static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
switch (chip->id) {
case RTW_CHIP_TYPE_8822C:
@@ -1483,7 +1483,7 @@ static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1538,7 +1538,7 @@ static int __maybe_unused rtw_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1550,7 +1550,7 @@ static int __maybe_unused rtw_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
@@ -1717,8 +1717,7 @@ static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
init_dummy_netdev(&rtwpci->netdev);
- netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1848,7 +1847,7 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw_dev *rtwdev;
- struct rtw_chip_info *chip;
+ const struct rtw_chip_info *chip;
if (!hw)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8982e0c98dac..bd7d05e08084 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(rtw_phy_set_edcca_th);
void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
/* turn off in debugfs for debug usage */
@@ -165,7 +165,7 @@ void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
rtw_phy_adaptivity_set_mode(rtwdev);
if (chip->ops->adaptivity_init)
@@ -180,7 +180,7 @@ static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_init)
chip->ops->cfo_init(rtwdev);
@@ -199,7 +199,7 @@ static void rtw_phy_tx_path_div_init(struct rtw_dev *rtwdev)
void rtw_phy_init(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 addr, mask;
@@ -226,7 +226,7 @@ EXPORT_SYMBOL(rtw_phy_init);
void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u32 addr, mask;
u8 path;
@@ -245,7 +245,7 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
chip->ops->false_alarm_statistics(rtwdev);
}
@@ -603,7 +603,7 @@ static void rtw_phy_rrsr_update(struct rtw_dev *rtwdev)
static void rtw_phy_dpk_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->dpk_track)
chip->ops->dpk_track(rtwdev);
@@ -659,7 +659,7 @@ EXPORT_SYMBOL(rtw_phy_parsing_cfo);
static void rtw_phy_cfo_track(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (chip->ops->cfo_track)
chip->ops->cfo_track(rtwdev);
@@ -720,8 +720,8 @@ static u8 rtw_phy_cck_pd_lv(struct rtw_dev *rtwdev)
static void rtw_phy_cck_pd(struct rtw_dev *rtwdev)
{
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- struct rtw_chip_info *chip = rtwdev->chip;
u32 cck_fa = dm_info->cck_fa_cnt;
u8 level;
@@ -816,23 +816,18 @@ static u8 rtw_phy_linear_2_db(u64 linear)
u8 j;
u32 dB;
- if (linear >= db_invert_table[11][7])
- return 96; /* maximum 96 dB */
-
for (i = 0; i < 12; i++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
- break;
- else if (i > 2 && linear <= db_invert_table[i][7])
- break;
+ for (j = 0; j < 8; j++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
+ goto cnt;
+ else if (i > 2 && linear <= db_invert_table[i][j])
+ goto cnt;
+ }
}
- for (j = 0; j < 8; j++) {
- if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
- break;
- else if (i > 2 && linear <= db_invert_table[i][j])
- break;
- }
+ return 96; /* maximum 96 dB */
+cnt:
if (j == 0 && i == 0)
goto end;
@@ -900,7 +895,7 @@ u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 val, direct_addr;
@@ -923,7 +918,7 @@ u32 rtw_phy_read_rf_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_rf_sipi_addr *rf_sipi_addr;
const struct rtw_rf_sipi_addr *rf_sipi_addr_a;
u32 val32;
@@ -972,8 +967,8 @@ bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
- u32 *sipi_addr = chip->rf_sipi_addr;
+ const struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *sipi_addr = chip->rf_sipi_addr;
u32 data_and_addr;
u32 old_data = 0;
u32 shift;
@@ -1012,7 +1007,7 @@ bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
struct rtw_hal *hal = &rtwdev->hal;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const u32 *base_addr = chip->rf_base_addr;
u32 direct_addr;
@@ -1747,7 +1742,7 @@ EXPORT_SYMBOL(rtw_phy_cfg_rf);
static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
if (!chip->rfk_init_tbl)
@@ -1766,7 +1761,7 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
@@ -1875,7 +1870,7 @@ static u8 rtw_get_channel_group(u8 channel, u8 rate)
static s8 rtw_phy_get_dis_dpd_by_rate_diff(struct rtw_dev *rtwdev, u16 rate)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
s8 dpd_diff = 0;
if (!chip->en_dis_dpd)
@@ -1909,7 +1904,7 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
bool mcs_rate;
bool above_2ss;
@@ -1956,7 +1951,7 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
enum rtw_bandwidth bandwidth,
u8 rate, u8 group)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
u8 tx_power;
u8 upper, lower;
bool mcs_rate;
@@ -2209,7 +2204,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
u8 path;
@@ -2484,7 +2479,7 @@ static void rtw_phy_set_tx_path_by_reg(struct rtw_dev *rtwdev,
{
struct rtw_path_div *path_div = &rtwdev->dm_path_div;
enum rtw_bb_path tx_path_sel_cck = tx_path_sel_1ss;
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (tx_path_sel_1ss == path_div->current_tx_path)
return;
@@ -2539,7 +2534,7 @@ static void rtw_phy_tx_path_diversity_2ss(struct rtw_dev *rtwdev)
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
if (!chip->path_div_supported)
return;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index b6c5ae60a462..ccfcbd3ced03 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -114,7 +114,7 @@ const struct rtw_table name ## _tbl = { \
static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
const struct rtw_rfe_def *rfe_def = NULL;
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index bfa64c038f5f..c93da743681f 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -19,14 +19,14 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "leave idle state failed\n");
rtw_set_channel(rtwdev);
- clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
return ret;
}
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags);
+ if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -50,6 +50,9 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
+ if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ return 0;
+
rtw_hci_link_ps(rtwdev, false);
ret = rtw_ips_pwr_up(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 315c2b193e92..2f547cbcf6da 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -479,6 +479,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
rtwdev->regd.state, next_regd.state);
+ mutex_lock(&rtwdev->mutex);
rtwdev->regd = next_regd;
rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
request->alpha2[0],
@@ -487,6 +488,7 @@ void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+ mutex_unlock(&rtwdev->mutex);
}
u8 rtw_regd_get(struct rtw_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index 993bd6b1d723..0a4f770fcbb7 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -2720,7 +2720,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.ht_supported = true,
.vht_supported = false,
@@ -2748,6 +2748,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
.iqk_threshold = 8,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x2007022f,
.bt_desired_ver = 0x2f,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 025262a8970e..9afdc5ce86b4 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -1898,7 +1898,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -1926,6 +1926,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x19092746,
.bt_desired_ver = 0x46,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 321848870561..690e35c98f6e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2517,7 +2517,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.max_power_index = 0x3f,
.csi_buf_pg_num = 0,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
.ht_supported = true,
.vht_supported = true,
@@ -2549,6 +2549,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
.l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 09f9e4adcf34..fccb15dfb959 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5330,7 +5330,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.max_power_index = 0x7f,
.csi_buf_pg_num = 50,
.band = RTW_BAND_2G | RTW_BAND_5G,
- .page_size = 128,
+ .page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
.default_1ss_tx_path = BB_PATH_A,
.path_div_supported = true,
@@ -5375,6 +5375,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.wowlan_stub = &rtw_wowlan_stub_8822c,
.max_sched_scan_ssids = 4,
#endif
+ .max_scan_ie_len = (RTW_PROBE_PG_CNT - 1) * TX_PAGE_SIZE,
.coex_para_ver = 0x22020720,
.bt_desired_ver = 0x20,
.scbd_support = true,
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 60d40a5c2c6a..ab39245e9c2f 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -384,7 +384,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rtw_sta_info *si;
@@ -424,7 +424,7 @@ void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
struct sk_buff *skb,
enum rtw_rsvd_packet_type type)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool bmc;
@@ -475,7 +475,7 @@ rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
@@ -501,7 +501,7 @@ rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
u32 tx_pkt_desc_sz;
u32 length;
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index 2c515af214e7..cdfd66a85075 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(check_hw_ready);
bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
@@ -37,7 +37,7 @@ bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
{
- struct rtw_chip_info *chip = rtwdev->chip;
+ const struct rtw_chip_info *chip = rtwdev->chip;
const struct rtw_ltecoex_addr *ltecoex = chip->ltecoex_addr;
if (!check_hw_ready(rtwdev, ltecoex->ctrl, LTECOEX_READY, 1))
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 3006482d25c7..a87f2aff4def 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -12,6 +12,7 @@ rtw89_core-y += core.o \
sar.o \
coex.o \
ps.o \
+ chan.o \
ser.o
obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
new file mode 100644
index 000000000000..a4f61c2f6512
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+
+static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
+ u8 center_chan)
+{
+ switch (band) {
+ default:
+ case RTW89_BAND_2G:
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ return RTW89_CH_2G;
+ }
+ case RTW89_BAND_5G:
+ switch (center_chan) {
+ default:
+ case 36 ... 64:
+ return RTW89_CH_5G_BAND_1;
+ case 100 ... 144:
+ return RTW89_CH_5G_BAND_3;
+ case 149 ... 177:
+ return RTW89_CH_5G_BAND_4;
+ }
+ case RTW89_BAND_6G:
+ switch (center_chan) {
+ default:
+ case 1 ... 29:
+ return RTW89_CH_6G_BAND_IDX0;
+ case 33 ... 61:
+ return RTW89_CH_6G_BAND_IDX1;
+ case 65 ... 93:
+ return RTW89_CH_6G_BAND_IDX2;
+ case 97 ... 125:
+ return RTW89_CH_6G_BAND_IDX3;
+ case 129 ... 157:
+ return RTW89_CH_6G_BAND_IDX4;
+ case 161 ... 189:
+ return RTW89_CH_6G_BAND_IDX5;
+ case 193 ... 221:
+ return RTW89_CH_6G_BAND_IDX6;
+ case 225 ... 253:
+ return RTW89_CH_6G_BAND_IDX7;
+ }
+ }
+}
+
+static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,
+ u32 center_freq,
+ u32 primary_freq)
+{
+ u8 primary_chan_idx;
+ u32 offset;
+
+ switch (bw) {
+ default:
+ case RTW89_CHANNEL_WIDTH_20:
+ primary_chan_idx = RTW89_SC_DONT_CARE;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ if (primary_freq > center_freq)
+ primary_chan_idx = RTW89_SC_20_UPPER;
+ else
+ primary_chan_idx = RTW89_SC_20_LOWER;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_160:
+ if (primary_freq > center_freq) {
+ offset = (primary_freq - center_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
+ } else {
+ offset = (center_freq - primary_freq - 10) / 20;
+ primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
+ }
+ break;
+ }
+
+ return primary_chan_idx;
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth)
+{
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u32 center_freq, primary_freq;
+
+ memset(chan, 0, sizeof(*chan));
+ chan->channel = center_chan;
+ chan->primary_channel = primary_chan;
+ chan->band_type = band;
+ chan->band_width = bandwidth;
+
+ center_freq = ieee80211_channel_to_frequency(center_chan, nl_band);
+ primary_freq = ieee80211_channel_to_frequency(primary_chan, nl_band);
+
+ chan->freq = center_freq;
+ chan->subband_type = rtw89_get_subband_type(band, center_chan);
+ chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq,
+ primary_freq);
+}
+
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chan *chan = &hal->chan[idx];
+ struct rtw89_chan_rcd *rcd = &hal->chan_rcd[idx];
+ bool band_changed;
+
+ rcd->prev_primary_channel = chan->primary_channel;
+ rcd->prev_band_type = chan->band_type;
+ band_changed = new->band_type != chan->band_type;
+
+ *chan = *new;
+ return band_changed;
+}
+
+static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef,
+ bool from_stack)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ hal->chandef[idx] = *chandef;
+
+ if (from_stack)
+ set_bit(idx, hal->entity_map);
+}
+
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef)
+{
+ __rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
+}
+
+static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
+{
+ struct cfg80211_chan_def chandef = {0};
+
+ rtw89_get_default_chandef(&chandef);
+ __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
+}
+
+void rtw89_entity_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_config_default_chandef(rtwdev);
+}
+
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_entity_mode mode;
+ u8 weight;
+
+ weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ switch (weight) {
+ default:
+ rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
+ bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ fallthrough;
+ case 0:
+ rtw89_config_default_chandef(rtwdev);
+ fallthrough;
+ case 1:
+ mode = RTW89_ENTITY_MODE_SCC;
+ break;
+ }
+
+ rtw89_set_entity_mode(rtwdev, mode);
+ return mode;
+}
+
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 idx;
+
+ idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ if (idx >= chip->support_chanctx_num)
+ return -ENOENT;
+
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ cfg->idx = idx;
+ return 0;
+}
+
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+
+ clear_bit(cfg->idx, hal->entity_map);
+ rtw89_set_channel(rtwdev);
+}
+
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ u8 idx = cfg->idx;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+ rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
+ rtw89_set_channel(rtwdev);
+ }
+}
+
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return 0;
+}
+
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
new file mode 100644
index 000000000000..ecbd4503bead
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ * Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#ifndef __RTW89_CHAN_H__
+#define __RTW89_CHAN_H__
+
+#include "core.h"
+
+static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_active);
+}
+
+static inline void rtw89_set_entity_state(struct rtw89_dev *rtwdev, bool active)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_active, active);
+}
+
+static inline
+enum rtw89_entity_mode rtw89_get_entity_mode(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return READ_ONCE(hal->entity_mode);
+}
+
+static inline void rtw89_set_entity_mode(struct rtw89_dev *rtwdev,
+ enum rtw89_entity_mode mode)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ WRITE_ONCE(hal->entity_mode, mode);
+}
+
+void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
+ enum rtw89_band band, enum rtw89_bandwidth bandwidth);
+bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct rtw89_chan *new);
+void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx,
+ const struct cfg80211_chan_def *chandef);
+void rtw89_entity_init(struct rtw89_dev *rtwdev);
+enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
+int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed);
+int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct ieee80211_chanctx_conf *ctx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 683854bba217..bbdfa9ac203c 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -9,6 +9,7 @@
#include "ps.h"
#include "reg.h"
+#define RTW89_COEX_VERSION 0x06030013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
enum btc_fbtc_tdma_template {
@@ -77,21 +78,21 @@ static const struct rtw89_btc_fbtc_tdma t_def[] = {
static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2W] = __DEF_FBTC_SLOT(5, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W1] = __DEF_FBTC_SLOT(70, 0x5a5a5a5a, SLOT_ISO),
- [CXST_W2] = __DEF_FBTC_SLOT(70, 0x5a5a5aaa, SLOT_ISO),
- [CXST_W2B] = __DEF_FBTC_SLOT(15, 0x5a5a5a5a, SLOT_ISO),
- [CXST_B1] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
- [CXST_B2] = __DEF_FBTC_SLOT(7, 0x6a5a5a5a, SLOT_MIX),
- [CXST_B3] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
- [CXST_B4] = __DEF_FBTC_SLOT(50, 0x55555555, SLOT_MIX),
- [CXST_LK] = __DEF_FBTC_SLOT(20, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_B2W] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W1] = __DEF_FBTC_SLOT(70, 0xea5a5a5a, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(70, 0xea5a5aaa, SLOT_ISO),
+ [CXST_W2B] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
+ [CXST_B1] = __DEF_FBTC_SLOT(100, 0xe5555555, SLOT_MIX),
+ [CXST_B2] = __DEF_FBTC_SLOT(7, 0xea5a5a5a, SLOT_MIX),
+ [CXST_B3] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
+ [CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
+ [CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(20, 0x6a5a5a5a, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
- [CXST_EBT] = __DEF_FBTC_SLOT(20, 0x55555555, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(20, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
- [CXST_WLK] = __DEF_FBTC_SLOT(250, 0x6a5a6a5a, SLOT_MIX),
+ [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
};
@@ -99,13 +100,13 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
static const u32 cxtbl[] = {
0xffffffff, /* 0 */
0xaaaaaaaa, /* 1 */
- 0x55555555, /* 2 */
- 0x66555555, /* 3 */
- 0x66556655, /* 4 */
+ 0xe5555555, /* 2 */
+ 0xee555555, /* 3 */
+ 0xd5555555, /* 4 */
0x5a5a5a5a, /* 5 */
- 0x5a5a5aaa, /* 6 */
- 0xaa5a5a5a, /* 7 */
- 0x6a5a5a5a, /* 8 */
+ 0xfa5a5a5a, /* 6 */
+ 0xda5a5a5a, /* 7 */
+ 0xea5a5a5a, /* 8 */
0x6a5a5aaa, /* 9 */
0x6a5a6a5a, /* 10 */
0x6a5a6aaa, /* 11 */
@@ -261,6 +262,12 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ /* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+
+ /* TDMA off + pri: WL_Hi-Tx = BT */
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -270,6 +277,21 @@ enum btc_cx_poicy_type {
/* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
BTC_CXP_OFFE_DEF2 = (BTC_CXP_OFFE << 8) | 1,
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWISOB = (BTC_CXP_OFFE << 8) | 2,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
+ BTC_CXP_OFFE_2GISOB = (BTC_CXP_OFFE << 8) | 3,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot WL > BT */
+ BTC_CXP_OFFE_2GBWMIXB = (BTC_CXP_OFFE << 8) | 4,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G/EBT-slot WL > BT */
+ BTC_CXP_OFFE_WL = (BTC_CXP_OFFE << 8) | 5,
+
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_2GBWMIXB2 = (BTC_CXP_OFFE << 8) | 6,
+
/* TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_FIX_TD3030 = (BTC_CXP_FIX << 8) | 0,
@@ -300,6 +322,9 @@ enum btc_cx_poicy_type {
/* TDMA Fix slot-9: W1:B1 = 40:20 */
BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9,
+ /* TDMA Fix slot-9: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010ISO = (BTC_CXP_FIX << 8) | 10,
+
/* PS-TDMA Fix slot-0: W1:B1 = 30:30 */
BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0,
@@ -322,25 +347,25 @@ enum btc_cx_poicy_type {
BTC_CXP_PFIX_TDW1B1 = (BTC_CXP_PFIX << 8) | 6,
/* TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_AUTO_TD50200 = (BTC_CXP_AUTO << 8) | 0,
+ BTC_CXP_AUTO_TD50B1 = (BTC_CXP_AUTO << 8) | 0,
/* TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_AUTO_TD60200 = (BTC_CXP_AUTO << 8) | 1,
+ BTC_CXP_AUTO_TD60B1 = (BTC_CXP_AUTO << 8) | 1,
/* TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_AUTO_TD20200 = (BTC_CXP_AUTO << 8) | 2,
+ BTC_CXP_AUTO_TD20B1 = (BTC_CXP_AUTO << 8) | 2,
/* TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_AUTO_TDW1B1 = (BTC_CXP_AUTO << 8) | 3,
/* PS-TDMA Auto slot-0: W1:B1 = 50:200 */
- BTC_CXP_PAUTO_TD50200 = (BTC_CXP_PAUTO << 8) | 0,
+ BTC_CXP_PAUTO_TD50B1 = (BTC_CXP_PAUTO << 8) | 0,
/* PS-TDMA Auto slot-1: W1:B1 = 60:200 */
- BTC_CXP_PAUTO_TD60200 = (BTC_CXP_PAUTO << 8) | 1,
+ BTC_CXP_PAUTO_TD60B1 = (BTC_CXP_PAUTO << 8) | 1,
/* PS-TDMA Auto slot-2: W1:B1 = 20:200 */
- BTC_CXP_PAUTO_TD20200 = (BTC_CXP_PAUTO << 8) | 2,
+ BTC_CXP_PAUTO_TD20B1 = (BTC_CXP_PAUTO << 8) | 2,
/* PS-TDMA Auto slot-3: W1:B1 = user-define */
BTC_CXP_PAUTO_TDW1B1 = (BTC_CXP_PAUTO << 8) | 3,
@@ -412,7 +437,7 @@ enum btc_w2b_scoreboard {
BTC_WSCB_TDMA = BIT(9),
BTC_WSCB_FIX2M = BIT(10),
BTC_WSCB_WLRFK = BIT(11),
- BTC_WSCB_BTRFK_GNT = BIT(12), /* not used, use mailbox to inform BT */
+ BTC_WSCB_RXSCAN_PRI = BIT(12),
BTC_WSCB_BT_HILNA = BIT(13),
BTC_WSCB_BTLOG = BIT(14),
BTC_WSCB_ALL = GENMASK(23, 0),
@@ -434,6 +459,16 @@ enum btc_wl_link_mode {
BTC_WLINK_MAX
};
+enum btc_wl_mrole_type {
+ BTC_WLMROLE_NONE = 0x0,
+ BTC_WLMROLE_STA_GC,
+ BTC_WLMROLE_STA_GC_NOA,
+ BTC_WLMROLE_STA_GO,
+ BTC_WLMROLE_STA_GO_NOA,
+ BTC_WLMROLE_STA_STA,
+ BTC_WLMROLE_MAX
+};
+
enum btc_bt_hid_type {
BTC_HID_218 = BIT(0),
BTC_HID_418 = BIT(1),
@@ -460,6 +495,11 @@ enum btc_gnt_state {
BTC_GNT_MAX
};
+enum btc_ctr_path {
+ BTC_CTRL_BY_BT = 0,
+ BTC_CTRL_BY_WL
+};
+
enum btc_wl_max_tx_time {
BTC_MAX_TX_TIME_L1 = 500,
BTC_MAX_TX_TIME_L2 = 1000,
@@ -531,6 +571,7 @@ enum btc_reason_and_action {
#define BTC_FREERUN_ANTISO_MIN 30
#define BTC_TDMA_BTHID_MAX 2
#define BTC_BLINK_NOCONNECT 0
+#define BTC_B1_MAX 250 /* unit ms */
static void _run_coex(struct rtw89_dev *rtwdev,
enum btc_reason_and_action reason);
@@ -551,8 +592,10 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
return;
- } else if ((wl->status.map.rf_off_pre == 1 && wl->status.map.rf_off == 1) ||
- (wl->status.map.lps_pre == 1 && wl->status.map.lps == 1)) {
+ } else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
+ (wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
+ wl->status.map.lps == BTC_LPS_RF_OFF)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
@@ -616,8 +659,6 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
}
-#define BTC_FWINFO_BUF 1024
-
#define BTC_RPT_HDR_SIZE 3
#define BTC_CHK_WLSLOT_DRIFT_MAX 15
#define BTC_CHK_HANG_MAX 3
@@ -869,18 +910,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *prptbuf, u32 index)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- struct rtw89_btc_fbtc_rpt_ctrl *prpt = NULL;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_fbtc_rpt_ctrl *prpt;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prpt_v1;
struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1 = NULL;
struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
struct rtw89_btc_prpt *btc_prpt = NULL;
struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
- u8 rpt_type = 0, *rpt_content = NULL, *pfinfo = NULL;
- u16 wl_slot_set = 0;
+ void *rpt_content = NULL, *pfinfo = NULL;
+ u8 rpt_type = 0;
+ u16 wl_slot_set = 0, wl_slot_real = 0;
u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t;
+ u32 cnt_leak_slot = 0, bt_slot_real = 0, cnt_rx_imr = 0;
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -904,100 +951,129 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
- pcinfo->req_fver = BTCRPT_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxbtcrpt_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_TDMA:
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
- pcinfo->req_fver = FCXTDMA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxtdma_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_slots.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
- pcinfo->req_fver = FCXSLOTS_VER;
+ pcinfo->req_fver = chip->fcxslots_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_CYSTA:
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_cysta.finfo);
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
- pcinfo->req_fver = FCXCYSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo;
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
+ rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxcysta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_step.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
- trace_step + 8;
- pcinfo->req_fver = FCXSTEP_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps, step);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo_v1.step[0]) *
+ trace_step +
+ offsetof(struct rtw89_btc_fbtc_steps_v1, step);
+ }
+ pcinfo->req_fver = chip->fcxstep_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_NULLSTA:
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
- pcinfo->req_fver = FCXNULLSTA_VER;
+ if (chip->chip_id == RTL8852A) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo_v1);
+ }
+ pcinfo->req_fver = chip->fcxnullsta_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_MREG:
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_mregval.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
- pcinfo->req_fver = FCXMREG_VER;
+ pcinfo->req_fver = chip->fcxmreg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
- pcinfo->req_fver = FCXGPIODBG_VER;
+ pcinfo->req_fver = chip->fcxgpiodbg_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btver.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
- pcinfo->req_fver = FCX_BTVER_VER;
+ pcinfo->req_fver = chip->fcxbtver_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_SCAN:
pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btscan.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
- pcinfo->req_fver = FCX_BTSCAN_VER;
+ pcinfo->req_fver = chip->fcxbtscan_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_AFH:
pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btafh.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
- pcinfo->req_fver = FCX_BTAFH_VER;
+ pcinfo->req_fver = chip->fcxbtafh_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
case BTC_RPT_TYPE_BT_DEVICE:
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
- pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btdev.finfo);
+ pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
- pcinfo->req_fver = FCX_BTDEVINFO_VER;
+ pcinfo->req_fver = chip->fcxbtdevinfo_ver;
pcinfo->rx_len = rpt_len;
pcinfo->rx_cnt++;
break;
@@ -1026,7 +1102,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcpy(pfinfo, rpt_content, pcinfo->req_len);
pcinfo->valid = 1;
- if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ if (rpt_type == BTC_RPT_TYPE_TDMA && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): check %d %zu\n", __func__,
BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
@@ -1039,7 +1115,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
dm->tdma_now.type, dm->tdma_now.rxflctrl,
dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rsvd0, dm->tdma_now.rsvd1);
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
@@ -1050,14 +1127,46 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd0,
- pfwinfo->rpt_fbtc_tdma.finfo.rsvd1);
+ pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo.option_ctrl);
}
_chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo,
sizeof(dm->tdma_now)));
+ } else if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
+
+ if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)) != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ dm->tdma_now.type, dm->tdma_now.rxflctrl,
+ dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
+ dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
+ dm->tdma_now.rxflctrl_role,
+ dm->tdma_now.option_ctrl);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.type,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.txpause,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.wtgle_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.leak_n,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.ext_ctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl_role,
+ pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.option_ctrl);
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
+ sizeof(dm->tdma_now)));
}
if (rpt_type == BTC_RPT_TYPE_SLOT) {
@@ -1097,7 +1206,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
sizeof(dm->slot_now)));
}
- if (rpt_type == BTC_RPT_TYPE_CYSTA &&
+ if (rpt_type == BTC_RPT_TYPE_CYSTA && chip->chip_id == RTL8852A &&
pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
/* Check Leak-AP */
if (pcysta->slot_cnt[CXST_LK] != 0 &&
@@ -1120,16 +1229,55 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
}
_chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_B1]);
_chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
+ } else if (rpt_type == BTC_RPT_TYPE_CYSTA && pcysta_v1 &&
+ le16_to_cpu(pcysta_v1->cycles) >= BTC_CYSTA_CHK_PERIOD) {
+ cnt_leak_slot = le32_to_cpu(pcysta_v1->slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta_v1->leak_slot.cnt_rximr);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
+ }
+
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_BT]);
+
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le32_to_cpu(pcysta_v1->slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ (u32)le16_to_cpu(pcysta_v1->cycles));
}
- if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ if (rpt_type == BTC_RPT_TYPE_CTRL && chip->chip_id == RTL8852A) {
prpt = &pfwinfo->rpt_ctrl.finfo;
btc->fwinfo.rpt_en_map = prpt->rpt_enable;
wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
wl->ver_info.fw = prpt->wl_fw_ver;
- dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload);
+ dm->wl_fw_cx_offload = !!prpt->wl_fw_cx_offload;
_chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
pfwinfo->event[BTF_EVNT_RPT]);
@@ -1142,6 +1290,33 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
btc->cx.cnt_bt[BTC_BCNT_POLUT] =
rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
}
+ } else if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ prpt_v1 = &pfwinfo->rpt_ctrl.finfo_v1;
+ btc->fwinfo.rpt_en_map = le32_to_cpu(prpt_v1->rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt_v1->wl_fw_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt_v1->wl_fw_info.fw_ver);
+ dm->wl_fw_cx_offload = !!le32_to_cpu(prpt_v1->wl_fw_info.cx_offload);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt_v1->gnt_val[i],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_POLLUTED]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ if (le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
}
if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
@@ -1155,6 +1330,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *pbuf, u32 buf_len)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc_prpt *btc_prpt = NULL;
u32 index = 0, rpt_len = 0;
@@ -1164,7 +1340,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
while (pbuf) {
btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
- if (index + 2 >= BTC_FWINFO_BUF)
+ if (index + 2 >= chip->btc_fwinfo_buf)
break;
/* At least 3 bytes: type(1) & len(2) */
rpt_len = le16_to_cpu(btc_prpt->len);
@@ -1182,10 +1358,12 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
static void _append_tdma(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_btf_tlv *tlv = NULL;
- struct rtw89_btc_fbtc_tdma *v = NULL;
+ struct rtw89_btc_btf_tlv *tlv;
+ struct rtw89_btc_fbtc_tdma *v;
+ struct rtw89_btc_fbtc_tdma_v1 *v1;
u16 len = btc->policy_len;
if (!btc->update_policy_force &&
@@ -1197,12 +1375,19 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
}
tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
- v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->type = CXPOLICY_TDMA;
- tlv->len = sizeof(*v);
-
- memcpy(v, &dm->tdma, sizeof(*v));
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ if (chip->chip_id == RTL8852A) {
+ v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
+ tlv->len = sizeof(*v);
+ memcpy(v, &dm->tdma, sizeof(*v));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ } else {
+ tlv->len = sizeof(*v1);
+ v1 = (struct rtw89_btc_fbtc_tdma_v1 *)&tlv->val[0];
+ v1->fver = chip->fcxtdma_ver;
+ v1->tdma = dm->tdma;
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v1);
+ }
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): type:%d, rxflctrl=%d, txpause=%d, wtgle_n=%d, leak_n=%d, ext_ctrl=%d\n",
@@ -1408,12 +1593,17 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
switch (type) {
case CXDRVINFO_INIT:
rtw89_fw_h2c_cxdrv_init(rtwdev);
break;
case CXDRVINFO_ROLE:
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ rtw89_fw_h2c_cxdrv_role(rtwdev);
+ else
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
break;
case CXDRVINFO_CTRL:
rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
@@ -1448,7 +1638,7 @@ void btc_fw_event(struct rtw89_dev *rtwdev, u8 evt_id, void *data, u32 len)
}
}
-static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
+static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -1462,7 +1652,7 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
if (!(phy_map & BIT(i)))
continue;
- switch (state) {
+ switch (wl_state) {
case BTC_GNT_HW:
g[i].gnt_wl_sw_en = 0;
g[i].gnt_wl = 0;
@@ -1476,6 +1666,21 @@ static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
g[i].gnt_wl = 1;
break;
}
+
+ switch (bt_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
}
rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
@@ -1534,6 +1739,7 @@ static void _set_wl_tx_power(struct rtw89_dev *rtwdev, u32 level)
static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -1546,6 +1752,8 @@ static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
+
+ chip->ops->btc_set_wl_rx_gain(rtwdev, level);
}
static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
@@ -1683,28 +1891,45 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ struct rtw89_btc_wl_active_role *r;
+ struct rtw89_btc_wl_active_role_v1 *r1;
u8 en = 0, i, ch = 0, bw = 0;
+ u8 mode, connect_cnt;
if (btc->ctrl.manual || wl->status.map.scan)
return;
- /* TODO if include module->ant.type == BTC_ANT_SHARED */
+ if (chip->chip_id == RTL8852A) {
+ mode = wl_rinfo->link_mode;
+ connect_cnt = wl_rinfo->connect_cnt;
+ } else {
+ mode = wl_rinfo_v1->link_mode;
+ connect_cnt = wl_rinfo_v1->connect_cnt;
+ }
+
if (wl->status.map.rf_off || bt->whql_test ||
- wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
- wl_rinfo->link_mode == BTC_WLINK_5G ||
- wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_5G ||
+ connect_cnt > BTC_TDMA_WLROLE_MAX) {
en = false;
- } else if (wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
- wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ } else if (mode == BTC_WLINK_2G_MCC || mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_GO ||
- wl_rinfo->active_role[i].role ==
- RTW89_WIFI_ROLE_P2P_CLIENT) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ (r->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ (r1->role == RTW89_WIFI_ROLE_P2P_GO ||
+ r1->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
@@ -1712,10 +1937,18 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
en = true;
/* get 2g channel */
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (wl_rinfo->active_role[i].connected &&
- wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
- ch = wl_rinfo->active_role[i].ch;
- bw = wl_rinfo->active_role[i].bw;
+ r = &wl_rinfo->active_role[i];
+ r1 = &wl_rinfo_v1->active_role_v1[i];
+
+ if (chip->chip_id == RTL8852A &&
+ r->connected && r->band == RTW89_BAND_2G) {
+ ch = r->ch;
+ bw = r->bw;
+ break;
+ } else if (chip->chip_id != RTL8852A &&
+ r1->connected && r1->band == RTW89_BAND_2G) {
+ ch = r1->ch;
+ bw = r1->bw;
break;
}
}
@@ -1768,6 +2001,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
@@ -1777,7 +2011,8 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* The below is dedicated antenna case */
- if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX ||
+ wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -1826,6 +2061,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
#define _tdma_set_flctrl(btc, flc) ({(btc)->dm.tdma.rxflctrl = flc; })
+#define _tdma_set_flctrl_role(btc, role) ({(btc)->dm.tdma.rxflctrl_role = role; })
#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
@@ -1904,6 +2140,15 @@ union btc_btinfo {
static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
enum btc_reason_and_action action)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->btc_set_policy(rtwdev, policy_type);
+ _fw_set_policy(rtwdev, policy_type, action);
+}
+
+#define BTC_B1_MAX 250 /* unit ms */
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
+{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
@@ -1964,6 +2209,9 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
case BTC_CXP_OFF_BWB1:
_slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
}
break;
case BTC_CXP_OFFB: /* TDMA off + beacon protect */
@@ -2080,17 +2328,361 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO];
switch (policy_type) {
- case BTC_CXP_AUTO_TD50200:
+ case BTC_CXP_AUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO_TD50B1:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD60B1:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD20B1:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TDW1B1:
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO2];
+ switch (policy_type) {
+ case BTC_CXP_AUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO2];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL(rtw89_btc_set_policy);
+
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
+ struct rtw89_btc_fbtc_slot *s = dm->slot;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
+ struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
+ struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ u8 type, null_role;
+ u32 tbl_w1, tbl_b1, tbl_b4;
+
+ type = FIELD_GET(BTC_CXP_MASK, policy_type);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ tbl_w1 = cxtbl[1];
+ else if (hid->exist && hid->type == BTC_HID_218)
+ tbl_w1 = cxtbl[7]; /* Ack/BA no break bt Hi-Pri-rx */
+ else
+ tbl_w1 = cxtbl[8];
+
+ if (dm->leak_ap &&
+ (type == BTC_CXP_PFIX || type == BTC_CXP_PAUTO2)) {
+ tbl_b1 = cxtbl[3];
+ tbl_b4 = cxtbl[3];
+ } else if (hid->exist && hid->type == BTC_HID_218) {
+ tbl_b1 = cxtbl[4]; /* Ack/BA no break bt Hi-Pri-rx */
+ tbl_b4 = cxtbl[4];
+ } else {
+ tbl_b1 = cxtbl[2];
+ tbl_b4 = cxtbl[2];
+ }
+ } else {
+ tbl_w1 = cxtbl[16];
+ tbl_b1 = cxtbl[17];
+ tbl_b4 = cxtbl[17];
+ }
+
+ btc->bt_req_en = false;
+
+ switch (type) {
+ case BTC_CXP_USERDEF0:
+ btc->update_policy_force = true;
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF: /* TDMA off */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFF_BT:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF_WL:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[1]);
+ break;
+ case BTC_CXP_OFF_EQ0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ break;
+ case BTC_CXP_OFF_EQ1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
+ break;
+ case BTC_CXP_OFF_EQ2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ break;
+ case BTC_CXP_OFF_EQ3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ break;
+ case BTC_CXP_OFF_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
+ break;
+ case BTC_CXP_OFF_BWB1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ case BTC_CXP_OFF_BWB2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[7]);
+ break;
+ case BTC_CXP_OFF_BWB3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[6]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFB: /* TDMA off + beacon protect */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF_B2];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFB_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_OFFE: /* TDMA off + beacon protect + Ext_control */
+ btc->bt_req_en = true;
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_OFF_EXT];
+
+ /* To avoid wl-s0 tx break by hid/hfp tx */
+ if (hid->exist || hfp->exist)
+ tbl_w1 = cxtbl[16];
+
+ switch (policy_type) {
+ case BTC_CXP_OFFE_DEF:
+ s[CXST_E2G] = s_def[CXST_E2G];
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ case BTC_CXP_OFFE_DEF2:
+ _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_FIX: /* TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_FIX];
+
+ switch (policy_type) {
+ case BTC_CXP_FIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010:
+ _slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010ISO:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD7010:
+ _slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD3060:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_PFIX: /* PS-TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PFIX];
+
+ switch (policy_type) {
+ case BTC_CXP_PFIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ default:
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO: /* TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO];
+
+ switch (policy_type) {
+ case BTC_CXP_AUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD60200:
+ case BTC_CXP_AUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_AUTO_TD20200:
+ case BTC_CXP_AUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2098,23 +2690,26 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO];
+
switch (policy_type) {
- case BTC_CXP_PAUTO_TD50200:
+ case BTC_CXP_PAUTO_TD50B1:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD60200:
+ case BTC_CXP_PAUTO_TD60B1:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
- case BTC_CXP_PAUTO_TD20200:
+ case BTC_CXP_PAUTO_TD20B1:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
break;
case BTC_CXP_PAUTO_TDW1B1:
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
@@ -2122,119 +2717,112 @@ static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
_slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
tbl_b1, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_AUTO2];
+
switch (policy_type) {
case BTC_CXP_AUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
*t = t_def[CXTD_PAUTO2];
+
switch (policy_type) {
case BTC_CXP_PAUTO2_TD3050:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD3070:
_slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD5050:
_slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD6060:
_slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TD2080:
_slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
- _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B1, BTC_B1_MAX, tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
break;
case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
_slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
_slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
tbl_b4, SLOT_MIX);
break;
+ default:
+ break;
}
break;
}
- _fw_set_policy(rtwdev, policy_type, action);
-}
-
-static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_mac_ax_gnt *g = dm->gnt.band;
- u8 i;
-
- if (phy_map > BTC_PHY_ALL)
- return;
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC && dm->tdma.rxflctrl) {
+ null_role = FIELD_PREP(0x0f, dm->wl_scc.null_role1) |
+ FIELD_PREP(0xf0, dm->wl_scc.null_role2);
+ _tdma_set_flctrl_role(btc, null_role);
+ }
- for (i = 0; i < RTW89_PHY_MAX; i++) {
- if (!(phy_map & BIT(i)))
- continue;
+ /* enter leak_slot after each null-1 */
+ if (dm->leak_ap && dm->tdma.leak_n > 1)
+ _tdma_set_lek(btc, 1);
- switch (state) {
- case BTC_GNT_HW:
- g[i].gnt_bt_sw_en = 0;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_LO:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 0;
- break;
- case BTC_GNT_SW_HI:
- g[i].gnt_bt_sw_en = 1;
- g[i].gnt_bt = 1;
- break;
- }
+ if (dm->tdma_instant_excute) {
+ btc->dm.tdma.option_ctrl |= BIT(0);
+ btc->update_policy_force = true;
}
-
- rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
+EXPORT_SYMBOL(rtw89_btc_set_policy_v1);
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
u8 tx_val, u8 rx_val)
@@ -2300,86 +2888,74 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
switch (type) {
case BTC_ANT_WPOWERON:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
break;
case BTC_ANT_WINIT:
- if (bt->enable.now) {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
- } else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- }
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ if (bt->enable.now)
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
+ else
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
break;
case BTC_ANT_WONLY:
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WOFF:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W2G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
if (rtwdev->dbcc_en) {
for (i = 0; i < RTW89_PHY_MAX; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
- _set_gnt_wl(rtwdev, BIT(i), gnt_wl_ctrl);
-
gnt_bt_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
/* BT should control by GNT_BT if WL_2G at S0 */
if (i == 1 &&
wl_dinfo->real_band[0] == RTW89_BAND_2G &&
wl_dinfo->real_band[1] == RTW89_BAND_5G)
gnt_bt_ctrl = BTC_GNT_HW;
- _set_gnt_bt(rtwdev, BIT(i), gnt_bt_ctrl);
-
+ _set_gnt(rtwdev, BIT(i), gnt_wl_ctrl, gnt_bt_ctrl);
plt_ctrl = b2g ? BTC_PLT_BT : BTC_PLT_NONE;
_set_bt_plut(rtwdev, BIT(i),
plt_ctrl, plt_ctrl);
}
} else {
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_BT, BTC_PLT_BT);
}
break;
case BTC_ANT_W5G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_W25G:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_HW, BTC_GNT_HW);
_set_bt_plut(rtwdev, BTC_PHY_ALL,
BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
break;
case BTC_ANT_FREERUN:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_WRFK:
- rtw89_chip_cfg_ctrl_path(rtwdev, true);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
case BTC_ANT_BRFK:
- rtw89_chip_cfg_ctrl_path(rtwdev, false);
- _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
- _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_BT);
+ _set_gnt(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI);
_set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
break;
default:
@@ -2491,14 +3067,19 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
static void _action_bt_hfp(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
- if (btc->cx.wl.status.map._4way)
+ if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HFP);
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB2, BTC_ACT_BT_HFP);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ }
} else {
_set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
@@ -2506,17 +3087,37 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
static void _action_bt_hid(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_hid_desc *hid = &bt->link_info.hid_desc;
+ u16 policy_type = BTC_CXP_OFF_BT;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) /* shared-antenna */
- if (btc->cx.wl.status.map._4way)
- _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HID);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HID);
- else /* dedicated-antenna */
- _set_policy(rtwdev, BTC_CXP_OFF_EQ3, BTC_ACT_BT_HID);
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (wl->status.map._4way) {
+ policy_type = BTC_CXP_OFF_WL;
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ btc->cx.bt.scan_rx_low_pri = true;
+ if (hid->type & BTC_HID_BLE)
+ policy_type = BTC_CXP_OFF_BWB0;
+ else
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (hid->type == BTC_HID_218) {
+ bt->scan_rx_low_pri = true;
+ policy_type = BTC_CXP_OFF_BWB2;
+ } else if (chip->para_ver == 0x1) {
+ policy_type = BTC_CXP_OFF_BWB3;
+ } else {
+ policy_type = BTC_CXP_OFF_BWB1;
+ }
+ } else { /* dedicated-antenna */
+ policy_type = BTC_CXP_OFF_EQ3;
+ }
+
+ _set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
}
static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
@@ -2537,7 +3138,7 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
@@ -2554,12 +3155,12 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP);
}
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- _set_policy(rtwdev, BTC_CXP_AUTO_TD20200, BTC_ACT_BT_A2DP);
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
break;
}
}
@@ -2639,7 +3240,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
} else {
_set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP_HID);
+ BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
}
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
@@ -2657,7 +3258,7 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
BTC_ACT_BT_A2DP_HID);
} else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
BTC_ACT_BT_A2DP_HID);
}
break;
@@ -2792,19 +3393,27 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- bool is_btg = false;
+ bool is_btg;
+ u8 mode;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
/* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
- if (wl_rinfo->link_mode == BTC_WLINK_5G) /* always 0 if 5G */
+ if (mode == BTC_WLINK_5G) /* always 0 if 5G */
is_btg = false;
- else if (wl_rinfo->link_mode == BTC_WLINK_25G_DBCC &&
+ else if (mode == BTC_WLINK_25G_DBCC &&
wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
is_btg = false;
else
@@ -2816,7 +3425,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
btc->dm.wl_btg_rx = is_btg;
- if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ if (mode == BTC_WLINK_25G_MCC)
return;
rtw89_ctrl_btg(rtwdev, is_btg);
@@ -2889,6 +3498,7 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -2898,16 +3508,22 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode = wl_rinfo->link_mode;
- u8 tx_retry = 0;
- u32 tx_time = 0;
- u16 enable = 0;
+ u8 mode;
+ u8 tx_retry;
+ u32 tx_time;
+ u16 enable;
bool reenable = false;
if (btc->ctrl.manual)
return;
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
@@ -2951,13 +3567,21 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
+ u8 mode;
+
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
- if (wl_rinfo->link_mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
+ if (mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
bt_hi_lna_rx = true;
if (bt_hi_lna_rx == bt->hi_lna_rx)
@@ -2966,14 +3590,34 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_BT_HILNA, bt_hi_lna_rx);
}
+static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+
+ _write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
+}
+
/* TODO add these functions */
static void _action_common(struct rtw89_dev *rtwdev)
{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
_set_btg_ctrl(rtwdev);
_set_wl_tx_limit(rtwdev);
_set_bt_afh_info(rtwdev);
_set_bt_rx_agc(rtwdev);
_set_rf_trx_para(rtwdev);
+ _set_bt_rx_scan_pri(rtwdev);
+
+ if (wl->scbd_change) {
+ rtw89_mac_cfg_sb(rtwdev, wl->scbd);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
+ wl->scbd);
+ wl->scbd_change = false;
+ btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ }
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -3145,6 +3789,68 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
}
}
+static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ u16 policy_type = BTC_CXP_OFF_BT;
+ u32 dur;
+
+ if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ policy_type = BTC_CXP_OFF_EQ0;
+ } else {
+ /* shared-antenna */
+ switch (wl_rinfo->mrole_type) {
+ case BTC_WLMROLE_STA_GC:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_P2P_CLIENT;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_STA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.ebt_null = 0; /* no ext-slot-control */
+ _action_by_bt(rtwdev);
+ return;
+ case BTC_WLMROLE_STA_GC_NOA:
+ case BTC_WLMROLE_STA_GO:
+ case BTC_WLMROLE_STA_GO_NOA:
+ dm->wl_scc.null_role1 = RTW89_WIFI_ROLE_STATION;
+ dm->wl_scc.null_role2 = RTW89_WIFI_ROLE_NONE;
+ dur = wl_rinfo->mrole_noa_duration;
+
+ if (wl->status.map._4way) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_WL;
+ } else if (bt->link_info.status.map.connect == 0) {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ } else if (bt->link_info.a2dp_desc.exist &&
+ dur < btc->bt_req_len) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWMIXB2;
+ } else if (bt->link_info.a2dp_desc.exist ||
+ bt->link_info.pan_desc.exist) {
+ dm->wl_scc.ebt_null = 1; /* tx null at EBT */
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else {
+ dm->wl_scc.ebt_null = 0;
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
+}
+
static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -3234,20 +3940,20 @@ static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 scbd_val = 0;
+ u8 force_exec = false;
if (!chip->scbd)
return;
scbd_val = state ? wl->scbd | val : wl->scbd & ~val;
- if (scbd_val == wl->scbd)
- return;
- rtw89_mac_cfg_sb(rtwdev, scbd_val);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
- scbd_val);
- wl->scbd = scbd_val;
+ if (val & BTC_WSCB_ACTIVE || val & BTC_WSCB_ON)
+ force_exec = true;
- btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+ if (scbd_val != wl->scbd || force_exec) {
+ wl->scbd = scbd_val;
+ wl->scbd_change = true;
+ }
}
static u8
@@ -3428,8 +4134,158 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], cnt_connect = %d, link_mode = %d\n",
- cnt_connect, wl_rinfo->link_mode);
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
+
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
+static void _update_wl_info_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &wl->role_info_v1;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ u8 cnt_connect = 0, cnt_connecting = 0, cnt_active = 0;
+ u8 cnt_2g = 0, cnt_5g = 0, phy;
+ u32 wl_2g_ch[2] = {}, wl_5g_ch[2] = {};
+ bool b2g = false, b5g = false, client_joined = false;
+ u8 i;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (!wl_linfo[i].active)
+ continue;
+
+ cnt_active++;
+ wl_rinfo->active_role_v1[cnt_active - 1].role = wl_linfo[i].role;
+ wl_rinfo->active_role_v1[cnt_active - 1].pid = wl_linfo[i].pid;
+ wl_rinfo->active_role_v1[cnt_active - 1].phy = wl_linfo[i].phy;
+ wl_rinfo->active_role_v1[cnt_active - 1].band = wl_linfo[i].band;
+ wl_rinfo->active_role_v1[cnt_active - 1].noa = (u8)wl_linfo[i].noa;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 0;
+
+ wl->port_id[wl_linfo[i].role] = wl_linfo[i].pid;
+
+ phy = wl_linfo[i].phy;
+
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ wl_dinfo->role[phy] = wl_linfo[i].role;
+ wl_dinfo->op_band[phy] = wl_linfo[i].band;
+ _update_dbcc_band(rtwdev, phy);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ cnt_connecting++;
+ } else {
+ cnt_connect++;
+ if ((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ }
+
+ wl_rinfo->role_map.val |= BIT(wl_linfo[i].role);
+ wl_rinfo->active_role_v1[cnt_active - 1].ch = wl_linfo[i].ch;
+ wl_rinfo->active_role_v1[cnt_active - 1].bw = wl_linfo[i].bw;
+ wl_rinfo->active_role_v1[cnt_active - 1].connected = 1;
+
+ /* only care 2 roles + BT coex */
+ if (wl_linfo[i].band != RTW89_BAND_2G) {
+ if (cnt_5g <= ARRAY_SIZE(wl_5g_ch) - 1)
+ wl_5g_ch[cnt_5g] = wl_linfo[i].ch;
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (cnt_2g <= ARRAY_SIZE(wl_2g_ch) - 1)
+ wl_2g_ch[cnt_2g] = wl_linfo[i].ch;
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+
+ wl_rinfo->connect_cnt = cnt_connect;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt_connect == 0) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map.role.none = 1;
+ } else if (!b2g && b5g) {
+ wl_rinfo->link_mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map.role.nan) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_NAN;
+ } else if (cnt_connect > BTC_TDMA_WLROLE_MAX) {
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ } else if (b2g && b5g && cnt_connect == 2) {
+ if (rtwdev->dbcc_en) {
+ switch (wl_dinfo->role[RTW89_PHY_0]) {
+ case RTW89_WIFI_ROLE_STATION:
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ break;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ break;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ break;
+ case RTW89_WIFI_ROLE_AP:
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ break;
+ default:
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ break;
+ }
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_25G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 2) {
+ if (wl_rinfo->role_map.role.station &&
+ (wl_rinfo->role_map.role.p2p_go ||
+ wl_rinfo->role_map.role.p2p_gc ||
+ wl_rinfo->role_map.role.ap)) {
+ if (wl_2g_ch[0] == wl_2g_ch[1])
+ wl_rinfo->link_mode = BTC_WLINK_2G_SCC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 1) {
+ if (wl_rinfo->role_map.role.station)
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ else if (wl_rinfo->role_map.role.ap)
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ else if (wl_rinfo->role_map.role.p2p_go)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ else if (wl_rinfo->role_map.role.p2p_gc)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ }
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (wl_rinfo->role_map.role.p2p_go || wl_rinfo->role_map.role.ap) {
+ if (!client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt = 1;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt = 0;
+ }
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], cnt_connect = %d, connecting = %d, link_mode = %d\n",
+ cnt_connect, cnt_connecting, wl_rinfo->link_mode);
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
@@ -3584,23 +4440,32 @@ static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
static
void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
- u8 mode = wl_rinfo->link_mode;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
lockdep_assert_held(&rtwdev->mutex);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
- __func__, reason, mode);
- rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
- __func__, dm->wl_only, dm->bt_only);
dm->run_reason = reason;
_update_dm_step(rtwdev, reason);
_update_btc_state_map(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
+ __func__, reason, mode);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
+ __func__, dm->wl_only, dm->bt_only);
+
/* Be careful to change the following function sequence!! */
if (btc->ctrl.manual) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -3657,6 +4522,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
btc->ctrl.igno_bt = false;
dm->freerun = false;
+ bt->scan_rx_low_pri = false;
if (reason == BTC_RSN_NTFY_INIT) {
_action_wl_init(rtwdev);
@@ -3699,21 +4565,30 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_sta(rtwdev);
break;
case BTC_WLINK_2G_AP:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_ap(rtwdev);
break;
case BTC_WLINK_2G_GO:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_go(rtwdev);
break;
case BTC_WLINK_2G_GC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_gc(rtwdev);
break;
case BTC_WLINK_2G_SCC:
- _action_wl_2g_scc(rtwdev);
+ bt->scan_rx_low_pri = true;
+ if (chip->chip_id == RTL8852A)
+ _action_wl_2g_scc(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ _action_wl_2g_scc_v1(rtwdev);
break;
case BTC_WLINK_2G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_2g_mcc(rtwdev);
break;
case BTC_WLINK_25G_MCC:
+ bt->scan_rx_low_pri = true;
_action_wl_25g_mcc(rtwdev);
break;
case BTC_WLINK_5G:
@@ -3743,11 +4618,14 @@ void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev)
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
btc->dm.cnt_notify[BTC_NCNT_POWER_OFF]++;
btc->cx.wl.status.map.rf_off = 1;
+ btc->cx.wl.status.map.busy = 0;
+ wl->status.map.lps = BTC_LPS_OFF;
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
_run_coex(rtwdev, BTC_RSN_NTFY_POWEROFF);
@@ -3807,7 +4685,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_write_scbd(rtwdev,
BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
_update_bt_scbd(rtwdev, true);
- if (rtw89_mac_get_ctrl_path(rtwdev)) {
+ if (rtw89_mac_get_ctrl_path(rtwdev) && chip->chip_id == RTL8852A) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): PTA owner warning!!\n",
__func__);
@@ -4150,7 +5028,8 @@ enum btc_wl_mode {
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4165,8 +5044,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
vif->type == NL80211_IFTYPE_STATION);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n",
- hal->current_band_type, hal->current_channel,
- hal->current_band_width);
+ chan->band_type, chan->channel, chan->band_width);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n",
state == BTC_ROLE_MSTS_STA_CONN_END);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -4205,9 +5083,9 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
r.connected = MLME_LINKED;
r.bcn_period = vif->bss_conf.beacon_int;
r.dtim_period = vif->bss_conf.dtim_period;
- r.band = hal->current_band_type;
- r.ch = hal->current_channel;
- r.bw = hal->current_band_width;
+ r.band = chan->band_type;
+ r.ch = chan->channel;
+ r.bw = chan->band_width;
ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
@@ -4218,7 +5096,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
memcpy(wlinfo, &r, sizeof(*wlinfo));
- _update_wl_info(rtwdev);
+ if (chip->chip_id == RTL8852A)
+ _update_wl_info(rtwdev);
+ else
+ _update_wl_info_v1(rtwdev);
if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
wlinfo->connected == MLME_NO_LINK)
@@ -4240,6 +5121,7 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 val;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rf_state = %d\n",
__func__, rf_state);
@@ -4249,10 +5131,12 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
case BTC_RFCTRL_WL_OFF:
wl->status.map.rf_off = 1;
wl->status.map.lps = BTC_LPS_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_FW_CTRL:
wl->status.map.rf_off = 0;
wl->status.map.lps = BTC_LPS_RF_OFF;
+ wl->status.map.busy = 0;
break;
case BTC_RFCTRL_WL_ON:
default:
@@ -4262,14 +5146,17 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
}
if (rf_state == BTC_RFCTRL_WL_ON) {
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
rtw89_btc_fw_en_rpt(rtwdev,
RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE, true);
+ val = BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG;
+ _write_scbd(rtwdev, val, true);
_update_bt_scbd(rtwdev, true);
chip->ops->btc_init_cfg(rtwdev);
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false);
- _write_scbd(rtwdev, BTC_WSCB_ACTIVE | BTC_WSCB_WLBUSY, false);
+ if (rf_state == BTC_RFCTRL_WL_OFF)
+ _write_scbd(rtwdev, BTC_WSCB_ALL, false);
}
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
@@ -4609,10 +5496,10 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
chip->chip_id);
- ver_main = FIELD_GET(GENMASK(31, 24), chip->para_ver);
- ver_sub = FIELD_GET(GENMASK(23, 16), chip->para_ver);
- ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->para_ver);
- id_branch = FIELD_GET(GENMASK(7, 0), chip->para_ver);
+ ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
+ ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION);
+ id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION);
seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
"[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
@@ -4726,23 +5613,29 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
return;
seq_puts(m, "========== [WL Status] ==========\n");
- seq_printf(m, " %-15s : link_mode:%d, ",
- "[status]", (u32)wl_rinfo->link_mode);
+ if (chip->chip_id == RTL8852A)
+ mode = wl_rinfo->link_mode;
+ else
+ mode = wl_rinfo_v1->link_mode;
+
+ seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode);
seq_printf(m,
- "rf_off:%s, power_save:%s, scan:%s(band:%d/phy_map:0x%x), ",
- wl->status.map.rf_off ? "Y" : "N",
- wl->status.map.lps ? "Y" : "N",
+ "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off, wl->status.map.lps,
wl->status.map.scan ? "Y" : "N",
wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
@@ -4908,6 +5801,7 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_ACT_STR(e) case BTC_ACT_ ## e | BTC_ACT_EXT_BIT: return #e
#define CASE_BTC_POLICY_STR(e) \
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
+#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
static const char *steps_to_str(u16 step)
{
@@ -4969,9 +5863,16 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ3);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
+ CASE_BTC_POLICY_STR(OFF_BWB2);
+ CASE_BTC_POLICY_STR(OFF_BWB3);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
+ CASE_BTC_POLICY_STR(OFFE_2GBWISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GISOB);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB);
+ CASE_BTC_POLICY_STR(OFFE_WL);
+ CASE_BTC_POLICY_STR(OFFE_2GBWMIXB2);
CASE_BTC_POLICY_STR(FIX_TD3030);
CASE_BTC_POLICY_STR(FIX_TD5050);
CASE_BTC_POLICY_STR(FIX_TD2030);
@@ -4982,6 +5883,7 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(FIX_TD2080);
CASE_BTC_POLICY_STR(FIX_TDW1B1);
CASE_BTC_POLICY_STR(FIX_TD4020);
+ CASE_BTC_POLICY_STR(FIX_TD4010ISO);
CASE_BTC_POLICY_STR(PFIX_TD3030);
CASE_BTC_POLICY_STR(PFIX_TD5050);
CASE_BTC_POLICY_STR(PFIX_TD2030);
@@ -4989,13 +5891,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(PFIX_TD3070);
CASE_BTC_POLICY_STR(PFIX_TD2080);
CASE_BTC_POLICY_STR(PFIX_TDW1B1);
- CASE_BTC_POLICY_STR(AUTO_TD50200);
- CASE_BTC_POLICY_STR(AUTO_TD60200);
- CASE_BTC_POLICY_STR(AUTO_TD20200);
+ CASE_BTC_POLICY_STR(AUTO_TD50B1);
+ CASE_BTC_POLICY_STR(AUTO_TD60B1);
+ CASE_BTC_POLICY_STR(AUTO_TD20B1);
CASE_BTC_POLICY_STR(AUTO_TDW1B1);
- CASE_BTC_POLICY_STR(PAUTO_TD50200);
- CASE_BTC_POLICY_STR(PAUTO_TD60200);
- CASE_BTC_POLICY_STR(PAUTO_TD20200);
+ CASE_BTC_POLICY_STR(PAUTO_TD50B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD60B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD20B1);
CASE_BTC_POLICY_STR(PAUTO_TDW1B1);
CASE_BTC_POLICY_STR(AUTO2_TD3050);
CASE_BTC_POLICY_STR(AUTO2_TD3070);
@@ -5014,6 +5916,32 @@ static const char *steps_to_str(u16 step)
}
}
+static const char *id_to_slot(u32 id)
+{
+ switch (id) {
+ CASE_BTC_SLOT_STR(OFF);
+ CASE_BTC_SLOT_STR(B2W);
+ CASE_BTC_SLOT_STR(W1);
+ CASE_BTC_SLOT_STR(W2);
+ CASE_BTC_SLOT_STR(W2B);
+ CASE_BTC_SLOT_STR(B1);
+ CASE_BTC_SLOT_STR(B2);
+ CASE_BTC_SLOT_STR(B3);
+ CASE_BTC_SLOT_STR(B4);
+ CASE_BTC_SLOT_STR(LK);
+ CASE_BTC_SLOT_STR(BLK);
+ CASE_BTC_SLOT_STR(E2G);
+ CASE_BTC_SLOT_STR(E5G);
+ CASE_BTC_SLOT_STR(EBT);
+ CASE_BTC_SLOT_STR(ENULL);
+ CASE_BTC_SLOT_STR(WLK);
+ CASE_BTC_SLOT_STR(W1FDD);
+ CASE_BTC_SLOT_STR(B1FDD);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -5105,21 +6033,31 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
(bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
seq_printf(m,
- " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU\n",
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
"[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
- dm->wl_tx_limit.tx_retry, btc->bt_req_len);
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri);
}
static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_cysta *pcysta = NULL;
-
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ struct rtw89_btc_fbtc_cysta *pcysta;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1;
+ u32 except_cnt, exception_map;
+
+ if (chip->chip_id == RTL8852A) {
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ except_cnt = le32_to_cpu(pcysta->except_cnt);
+ exception_map = le32_to_cpu(pcysta->exception);
+ } else {
+ pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ except_cnt = le32_to_cpu(pcysta_v1->except_cnt);
+ exception_map = le32_to_cpu(pcysta_v1->except_map);
+ }
- if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 &&
- pcysta->except_cnt == 0 &&
+ if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
!pfwinfo->len_mismch && !pfwinfo->fver_mismch)
return;
@@ -5144,16 +6082,17 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
}
/* cycle statistics exceptions */
- if (pcysta->exception || pcysta->except_cnt) {
+ if (exception_map || except_cnt) {
seq_printf(m,
"exception-type: 0x%x, exception-cnt = %d",
- pcysta->exception, pcysta->except_cnt);
+ exception_map, except_cnt);
}
seq_puts(m, "\n");
}
static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -5166,7 +6105,10 @@ static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ if (chip->chip_id == RTL8852A)
+ t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ else
+ t = &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma;
seq_printf(m,
" %-15s : ", "[tdma_policy]");
@@ -5369,12 +6311,145 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
}
}
+static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_a2dp_trx_stat *a2dp_trx;
+ struct rtw89_btc_fbtc_cysta_v1 *pcysta;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u8 i, cnt = 0, slot_pair, divide_cnt;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le32_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+
+ seq_printf(m, ", %s:%d", id_to_slot(i),
+ le32_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (le32_to_cpu(pcysta->collision_cnt))
+ seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt));
+
+ if (le32_to_cpu(pcysta->skip_cnt))
+ seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt));
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+
+ cycle = le16_to_cpu(pcysta->cycles);
+ if (cycle == 0)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (cycle <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = cycle - slot_pair + 1;
+
+ c_end = cycle;
+
+ if (a2dp->exist)
+ divide_cnt = 3;
+ else
+ divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1) {
+ seq_printf(m, "\n\r %-15s : ", "[cycle_step]");
+ } else {
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ }
+ if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m, "%-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+
+ seq_puts(m, "\n");
+ }
+}
+
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_cynullsta *ns = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_fbtc_cynullsta *ns;
+ struct rtw89_btc_fbtc_cynullsta_v1 *ns_v1;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
@@ -5384,25 +6459,58 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ if (chip->chip_id == RTL8852A) {
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
- seq_printf(m, " %-15s : ", "[null_sta]");
+ seq_printf(m, " %-15s : ", "[null_sta]");
- for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[ok:%d/", le32_to_cpu(ns->result[i][1]));
- seq_printf(m, "fail:%d/", le32_to_cpu(ns->result[i][0]));
- seq_printf(m, "on_time:%d/", le32_to_cpu(ns->result[i][2]));
- seq_printf(m, "retry:%d/", le32_to_cpu(ns->result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->avg_t[i]) / 1000,
- le32_to_cpu(ns->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns->max_t[i]) / 1000,
- le32_to_cpu(ns->max_t[i]) % 1000);
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->avg_t[i]) / 1000,
+ le32_to_cpu(ns->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns->max_t[i]) / 1000,
+ le32_to_cpu(ns->max_t[i]) % 1000);
+ }
+ } else {
+ ns_v1 = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
+
+ seq_printf(m, " %-15s : ", "[null_sta]");
+
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[Tx:%d/",
+ le32_to_cpu(ns_v1->result[i][4]));
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns_v1->result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns_v1->result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns_v1->result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns_v1->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns_v1->avg_t[i]) / 1000,
+ le32_to_cpu(ns_v1->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns_v1->max_t[i]) / 1000,
+ le32_to_cpu(ns_v1->max_t[i]) % 1000);
+ }
}
seq_puts(m, "\n");
}
@@ -5478,6 +6586,7 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
@@ -5486,11 +6595,57 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_error(rtwdev, m);
_show_fbtc_tdma(rtwdev, m);
_show_fbtc_slots(rtwdev, m);
- _show_fbtc_cysta(rtwdev, m);
+
+ if (chip->chip_id == RTL8852A)
+ _show_fbtc_cysta(rtwdev, m);
+ else
+ _show_fbtc_cysta_v1(rtwdev, m);
+
_show_fbtc_nullsta(rtwdev, m);
_show_fbtc_step(rtwdev, m);
}
+static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_ax_gnt *gnt;
+ u32 val, status;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) {
+ rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
+ rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0_STA);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SW_CTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1_STA);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SW_CTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1_STA);
+ } else if (chip->chip_id == RTL8852C) {
+ val = rtw89_read32(rtwdev, R_AX_GNT_SW_CTRL);
+ status = rtw89_read32(rtwdev, R_AX_GNT_VAL_V1);
+
+ gnt = &gnt_cfg->band[0];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S0_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S0);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S0_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S0);
+
+ gnt = &gnt_cfg->band[1];
+ gnt->gnt_bt_sw_en = !!(val & B_AX_GNT_BT_RFC_S1_SWCTRL);
+ gnt->gnt_bt = !!(status & B_AX_GNT_BT_RFC_S1);
+ gnt->gnt_wl_sw_en = !!(val & B_AX_GNT_WL_RFC_S1_SWCTRL);
+ gnt->gnt_wl = !!(status & B_AX_GNT_WL_RFC_S1);
+ } else {
+ return;
+ }
+}
+
static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5502,7 +6657,8 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_mac_ax_gnt gnt[2] = {0};
+ struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
+ struct rtw89_mac_ax_gnt gnt;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
@@ -5519,45 +6675,28 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
/* To avoid I/O if WL LPS or power-off */
if (!wl->status.map.lps && !wl->status.map.rf_off) {
- rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
- if (val & (B_AX_GNT_BT_RFC_S0_SW_VAL |
- B_AX_GNT_BT_BB_S0_SW_VAL))
- gnt[0].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S0_SW_CTRL |
- B_AX_GNT_BT_BB_S0_SW_CTRL))
- gnt[0].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_VAL |
- B_AX_GNT_WL_BB_S0_SW_VAL))
- gnt[0].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S0_SW_CTRL |
- B_AX_GNT_WL_BB_S0_SW_CTRL))
- gnt[0].gnt_wl_sw_en = true;
-
- if (val & (B_AX_GNT_BT_RFC_S1_SW_VAL |
- B_AX_GNT_BT_BB_S1_SW_VAL))
- gnt[1].gnt_bt = true;
- if (val & (B_AX_GNT_BT_RFC_S1_SW_CTRL |
- B_AX_GNT_BT_BB_S1_SW_CTRL))
- gnt[1].gnt_bt_sw_en = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_VAL |
- B_AX_GNT_WL_BB_S1_SW_VAL))
- gnt[1].gnt_wl = true;
- if (val & (B_AX_GNT_WL_RFC_S1_SW_CTRL |
- B_AX_GNT_WL_BB_S1_SW_CTRL))
- gnt[1].gnt_wl_sw_en = true;
+ if (chip->chip_id == RTL8852A)
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+ else if (chip->chip_id == RTL8852C)
+ btc->dm.pta_owner = 0;
+ _get_gnt(rtwdev, &gnt_cfg);
+ gnt = gnt_cfg.band[0];
seq_printf(m,
" %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
"[gnt_status]",
- (rtw89_mac_get_ctrl_path(rtwdev) ? "WL" : "BT"),
- (gnt[0].gnt_wl_sw_en ? "SW" : "HW"), gnt[0].gnt_wl,
- (gnt[0].gnt_bt_sw_en ? "SW" : "HW"), gnt[0].gnt_bt);
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ gnt = gnt_cfg.band[1];
seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- (gnt[1].gnt_wl_sw_en ? "SW" : "HW"), gnt[1].gnt_wl,
- (gnt[1].gnt_bt_sw_en ? "SW" : "HW"), gnt[1].gnt_bt);
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
}
-
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5714,8 +6853,121 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo_v1;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le32_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en),
+ dm->error.val);
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
+
+ if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ seq_puts(m, " (WL FW report invalid!!)\n");
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
@@ -5746,5 +6998,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_info(rtwdev, m);
_show_fw_dm_msg(rtwdev, m);
_show_mreg(rtwdev, m);
- _show_summary(rtwdev, m);
+ if (chip->chip_id == RTL8852A)
+ _show_summary(rtwdev, m);
+ else
+ _show_summary_v1(rtwdev, m);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index c3a722d259d7..ca16afa97ec0 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -162,17 +162,19 @@ void rtw89_coex_act1_work(struct work_struct *work);
void rtw89_coex_bt_devinfo_work(struct work_struct *work);
void rtw89_coex_rfk_chk_work(struct work_struct *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
+void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
+void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
enum rtw89_rf_path_bit paths)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 phy_map;
phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
FIELD_PREP(BTC_RFK_PHY_MAP, BIT(phy_idx)) |
- FIELD_PREP(BTC_RFK_BAND_MAP, hal->current_band_type);
+ FIELD_PREP(BTC_RFK_BAND_MAP, chan->band_type);
return phy_map;
}
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index a5880a54812e..bc2994865372 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -5,6 +5,7 @@
#include <linux/udp.h>
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
@@ -224,18 +225,22 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
- struct rtw89_channel_params *chan_param)
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef)
+{
+ cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0],
+ NL80211_CHAN_NO_HT);
+}
+
+static void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
+ struct rtw89_chan *chan)
{
struct ieee80211_channel *channel = chandef->chan;
enum nl80211_chan_width width = chandef->width;
u32 primary_freq, center_freq;
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
- u8 primary_chan_idx = 0;
u32 offset;
u8 band;
- u8 subband;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
@@ -245,15 +250,12 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
bandwidth = RTW89_CHANNEL_WIDTH_20;
- primary_chan_idx = RTW89_SC_DONT_CARE;
break;
case NL80211_CHAN_WIDTH_40:
bandwidth = RTW89_CHANNEL_WIDTH_40;
if (primary_freq > center_freq) {
- primary_chan_idx = RTW89_SC_20_UPPER;
center_chan -= 2;
} else {
- primary_chan_idx = RTW89_SC_20_LOWER;
center_chan += 2;
}
break;
@@ -262,11 +264,9 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
bandwidth = nl_to_rtw89_bandwidth(width);
if (primary_freq > center_freq) {
offset = (primary_freq - center_freq - 10) / 20;
- primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
center_chan -= 2 + offset * 4;
} else {
offset = (center_freq - primary_freq - 10) / 20;
- primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
center_chan += 2 + offset * 4;
}
break;
@@ -288,110 +288,76 @@ static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
break;
}
- switch (band) {
- default:
- case RTW89_BAND_2G:
- switch (center_chan) {
- default:
- case 1 ... 14:
- subband = RTW89_CH_2G;
- break;
- }
- break;
- case RTW89_BAND_5G:
- switch (center_chan) {
- default:
- case 36 ... 64:
- subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- subband = RTW89_CH_5G_BAND_4;
- break;
- }
- break;
- case RTW89_BAND_6G:
- switch (center_chan) {
- default:
- case 1 ... 29:
- subband = RTW89_CH_6G_BAND_IDX0;
- break;
- case 33 ... 61:
- subband = RTW89_CH_6G_BAND_IDX1;
- break;
- case 65 ... 93:
- subband = RTW89_CH_6G_BAND_IDX2;
- break;
- case 97 ... 125:
- subband = RTW89_CH_6G_BAND_IDX3;
- break;
- case 129 ... 157:
- subband = RTW89_CH_6G_BAND_IDX4;
- break;
- case 161 ... 189:
- subband = RTW89_CH_6G_BAND_IDX5;
- break;
- case 193 ... 221:
- subband = RTW89_CH_6G_BAND_IDX6;
- break;
- case 225 ... 253:
- subband = RTW89_CH_6G_BAND_IDX7;
- break;
- }
- break;
- }
+ rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
+}
- chan_param->center_chan = center_chan;
- chan_param->center_freq = center_freq;
- chan_param->primary_chan = channel->hw_value;
- chan_param->bandwidth = bandwidth;
- chan_param->pri_ch_idx = primary_chan_idx;
- chan_param->band_type = band;
- chan_param->subband_type = subband;
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_chan *chan;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_phy_idx phy_idx;
+ enum rtw89_entity_mode mode;
+ bool entity_active;
+
+ entity_active = rtw89_get_entity_state(rtwdev);
+ if (!entity_active)
+ return;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
+ return;
+
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ phy_idx = RTW89_PHY_0;
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ if (chip->ops->set_txpwr)
+ chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
{
- struct ieee80211_hw *hw = rtwdev->hw;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_channel_params ch_param;
+ const struct cfg80211_chan_def *chandef;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ enum rtw89_mac_idx mac_idx;
+ enum rtw89_phy_idx phy_idx;
+ struct rtw89_chan chan;
struct rtw89_channel_help_params bak;
- u8 center_chan, bandwidth;
+ enum rtw89_entity_mode mode;
bool band_changed;
+ bool entity_active;
- rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
- if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ entity_active = rtw89_get_entity_state(rtwdev);
+
+ mode = rtw89_entity_recalc(rtwdev);
+ if (WARN(mode != RTW89_ENTITY_MODE_SCC, "Invalid ent mode: %d\n", mode))
return;
- center_chan = ch_param.center_chan;
- bandwidth = ch_param.bandwidth;
- band_changed = hal->current_band_type != ch_param.band_type ||
- hal->current_channel == 0;
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ mac_idx = RTW89_MAC_0;
+ phy_idx = RTW89_PHY_0;
+ chandef = rtw89_chandef_get(rtwdev, sub_entity_idx);
+ rtw89_get_channel_params(chandef, &chan);
+ if (WARN(chan.channel == 0, "Invalid channel\n"))
+ return;
- hal->current_band_width = bandwidth;
- hal->current_channel = center_chan;
- hal->current_freq = ch_param.center_freq;
- hal->prev_primary_channel = hal->current_primary_channel;
- hal->prev_band_type = hal->current_band_type;
- hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = ch_param.band_type;
- hal->current_subband = ch_param.subband_type;
+ band_changed = rtw89_assign_entity_chan(rtwdev, sub_entity_idx, &chan);
- rtw89_chip_set_channel_prepare(rtwdev, &bak);
+ rtw89_chip_set_channel_prepare(rtwdev, &bak, &chan, mac_idx, phy_idx);
- chip->ops->set_channel(rtwdev, &ch_param);
+ chip->ops->set_channel(rtwdev, &chan, mac_idx, phy_idx);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
- rtw89_chip_set_channel_done(rtwdev, &bak);
+ rtw89_chip_set_channel_done(rtwdev, &bak, &chan, mac_idx, phy_idx);
- if (band_changed) {
- rtw89_btc_ntfy_switch_band(rtwdev, RTW89_PHY_0, hal->current_band_type);
- rtw89_chip_rfk_band_changed(rtwdev);
+ if (!entity_active || band_changed) {
+ rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan.band_type);
+ rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
}
+
+ rtw89_set_entity_state(rtwdev, true);
}
static enum rtw89_core_tx_type
@@ -529,9 +495,15 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = tx_info->control.vif;
- struct rtw89_hal *hal = &rtwdev->hal;
- u16 lowest_rate = hal->current_band_type == RTW89_BAND_2G ?
- RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 lowest_rate;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
return lowest_rate;
@@ -546,6 +518,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 qsel, ch_dma;
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
@@ -564,9 +537,9 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req);
rtw89_debug(rtwdev, RTW89_DBG_TXRX,
- "tx mgmt frame with rate 0x%x on channel %d (bw %d)\n",
- desc_info->data_rate, rtwdev->hal.current_channel,
- rtwdev->hal.current_band_width);
+ "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
+ desc_info->data_rate, chan->channel, chan->band_type,
+ chan->band_width);
}
static void
@@ -591,15 +564,16 @@ static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 om_bandwidth;
if (!chip->dis_2g_40m_ul_ofdma ||
- hal->current_band_type != RTW89_BAND_2G ||
- hal->current_band_width != RTW89_CHANNEL_WIDTH_40)
+ chan->band_type != RTW89_BAND_2G ||
+ chan->band_width != RTW89_CHANNEL_WIDTH_40)
return;
- om_bandwidth = hal->current_band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
- rtw89_bandwidth_to_om[hal->current_band_width] : 0;
+ om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
+ rtw89_bandwidth_to_om[chan->band_width] : 0;
*htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
@@ -617,6 +591,7 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
enum btc_pkt_type pkt_type)
{
struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct sk_buff *skb = tx_req->skb;
struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -634,6 +609,9 @@ __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
return false;
+ if (rtwsta && rtwsta->ra_report.might_fallback_legacy)
+ return false;
+
return true;
}
@@ -713,7 +691,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif = tx_req->vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 tid, tid_indicate;
@@ -736,9 +714,11 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
if (IEEE80211_SKB_CB(skb)->control.hw_key)
rtw89_core_tx_update_sec_key(rtwdev, tx_req);
- if (rate_pattern->enable)
+ if (vif->p2p)
+ desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (rate_pattern->enable)
desc_info->data_retry_lowest_rate = rate_pattern->rate;
- else if (hal->current_band_type == RTW89_BAND_2G)
+ else if (chan->band_type == RTW89_BAND_2G)
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1;
else
desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
@@ -796,13 +776,16 @@ static void
rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
return;
if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
return;
- if (tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
+ if (chip->chip_id != RTL8852C &&
+ tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
return;
rtw89_mac_notify_wake(rtwdev);
@@ -872,6 +855,7 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_FW,
"ignore h2c due to power is off with firmware state=%d\n",
test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
+ dev_kfree_skb(skb);
return 0;
}
@@ -1021,7 +1005,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
@@ -1171,9 +1156,14 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ int i;
- if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self)
+ if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) {
ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
+ }
}
#define VAR_LEN 0xff
@@ -1229,15 +1219,15 @@ static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr,
static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
{
- s8 *rssi = phy_ppdu->rssi;
+ u8 *rssi = phy_ppdu->rssi;
u8 *buf = phy_ppdu->buf;
phy_ppdu->ie = RTW89_GET_PHY_STS_IE_MAP(buf);
phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf);
- rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf));
- rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf));
- rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf));
- rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf));
+ rssi[RF_PATH_A] = RTW89_GET_PHY_STS_RSSI_A(buf);
+ rssi[RF_PATH_B] = RTW89_GET_PHY_STS_RSSI_B(buf);
+ rssi[RF_PATH_C] = RTW89_GET_PHY_STS_RSSI_C(buf);
+ rssi[RF_PATH_D] = RTW89_GET_PHY_STS_RSSI_D(buf);
}
static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
@@ -1448,8 +1438,11 @@ static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
- u16 chan = rtwdev->hal.prev_primary_channel;
- u8 band = chan <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ const struct rtw89_chan_rcd *rcd =
+ rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u16 chan = rcd->prev_primary_channel;
+ u8 band = rcd->prev_band_type == RTW89_BAND_2G ?
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (status->band != NL80211_BAND_2GHZ &&
status->encoding == RX_ENC_LEGACY &&
@@ -1661,19 +1654,20 @@ static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct ieee80211_rx_status *rx_status)
{
- struct ieee80211_hw *hw = rtwdev->hw;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct cfg80211_chan_def *chandef =
+ rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u16 data_rate;
u8 data_rate_mode;
/* currently using single PHY */
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = chandef->chan->center_freq;
+ rx_status->band = chandef->chan->band;
if (rtwdev->scanning &&
RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- u8 chan = hal->current_primary_channel;
- u8 band = hal->current_band_type;
+ u8 chan = cur->primary_channel;
+ u8 band = cur->band_type;
enum nl80211_band nl_band;
nl_band = rtw89_hw_to_nl80211_band(band);
@@ -1727,7 +1721,8 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (rtw89_disable_ps_mode || !chip->ps_mode_supported)
+ if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
+ RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
@@ -1810,7 +1805,7 @@ void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
{
init_dummy_netdev(&rtwdev->netdev);
netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
- rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT);
+ rtwdev->hci.ops->napi_poll);
}
EXPORT_SYMBOL(rtw89_core_napi_init);
@@ -1907,21 +1902,14 @@ static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
return;
spin_lock_bh(&rtwdev->ba_lock);
- if (!list_empty(&rtwtxq->list)) {
- list_del_init(&rtwtxq->list);
- goto out;
- }
-
- set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
+ spin_unlock_bh(&rtwdev->ba_lock);
- list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
ieee80211_stop_tx_ba_session(sta, txq->tid);
cancel_delayed_work(&rtwdev->forbid_ba_work);
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
RTW89_FORBID_BA_TIMER);
-
-out:
- spin_unlock_bh(&rtwdev->ba_lock);
}
static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
@@ -1933,6 +1921,9 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta = txq->sta;
struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+ if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
+ return;
+
if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
return;
@@ -1941,9 +1932,6 @@ static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
if (unlikely(!sta))
return;
- if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
- return;
-
if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
return;
@@ -2179,12 +2167,13 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
- rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ rtw89_enter_lps(rtwdev, rtwvif);
}
static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
@@ -2237,6 +2226,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_chip_rfk_track(rtwdev);
rtw89_phy_ra_update(rtwdev);
rtw89_phy_cfo_track(rtwdev);
+ rtw89_phy_tx_path_div_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
@@ -2266,45 +2256,69 @@ void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
bitmap_zero(addr, nbits);
}
-int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
{
- struct rtw89_ba_cam_entry *entry;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
u8 idx;
+ int i;
- idx = rtw89_core_acquire_bit_map(rtwsta->ba_cam_map, RTW89_BA_CAM_NUM);
- if (idx == RTW89_BA_CAM_NUM) {
- /* allocate a static BA CAM to tid=0, so replace the existing
+ lockdep_assert_held(&rtwdev->mutex);
+
+ idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
+ if (idx == chip->bacam_num) {
+ /* allocate a static BA CAM to tid=0/5, so replace the existing
* one if BA CAM is full. Hardware will process the original tid
* automatically.
*/
- if (tid != 0)
+ if (tid != 0 && tid != 5)
return -ENOSPC;
- idx = 0;
+ for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) {
+ tmp = &cam_info->ba_cam_entry[i];
+ if (tmp->tid == 0 || tmp->tid == 5)
+ continue;
+
+ idx = i;
+ entry = tmp;
+ list_del(&entry->list);
+ break;
+ }
+
+ if (!entry)
+ return -ENOSPC;
+ } else {
+ entry = &cam_info->ba_cam_entry[idx];
}
- entry = &rtwsta->ba_cam_entry[idx];
entry->tid = tid;
+ list_add_tail(&entry->list, &rtwsta->ba_cam_list);
+
*cam_idx = idx;
return 0;
}
-int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
{
- struct rtw89_ba_cam_entry *entry;
- int i;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_ba_cam_entry *entry = NULL, *tmp;
+ u8 idx;
- for (i = 0; i < RTW89_BA_CAM_NUM; i++) {
- if (!test_bit(i, rtwsta->ba_cam_map))
- continue;
+ lockdep_assert_held(&rtwdev->mutex);
- entry = &rtwsta->ba_cam_entry[i];
+ list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) {
if (entry->tid != tid)
continue;
- rtw89_core_release_bit_map(rtwsta->ba_cam_map, i);
- *cam_idx = i;
+ idx = entry - cam_info->ba_cam_entry;
+ list_del(&entry->list);
+
+ rtw89_core_release_bit_map(cam_info->ba_cam_map, idx);
+ *cam_idx = idx;
return 0;
}
@@ -2320,9 +2334,19 @@ void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_STATION;
+ break;
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
+ else
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_AP;
+ break;
RTW89_TYPE_MAPPING(ADHOC);
- RTW89_TYPE_MAPPING(STATION);
- RTW89_TYPE_MAPPING(AP);
RTW89_TYPE_MAPPING(MONITOR);
RTW89_TYPE_MAPPING(MESH_POINT);
default:
@@ -2365,13 +2389,17 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int i;
+ rtwsta->rtwdev = rtwdev;
rtwsta->rtwvif = rtwvif;
rtwsta->prev_rssi = 0;
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
rtw89_core_txq_init(rtwdev, sta->txq[i]);
ewma_rssi_init(&rtwsta->avg_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_rssi_init(&rtwsta->rssi[i]);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
@@ -2541,6 +2569,60 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
return 0;
}
+static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_cfg *tid_conf)
+{
+ struct ieee80211_txq *txq;
+ struct rtw89_txq *rtwtxq;
+ u32 mask = tid_conf->mask;
+ u8 tids = tid_conf->tids;
+ int tids_nbit = BITS_PER_BYTE;
+ int i;
+
+ for (i = 0; i < tids_nbit; i++, tids >>= 1) {
+ if (!tids)
+ break;
+
+ if (!(tids & BIT(0)))
+ continue;
+
+ txq = sta->txq[i];
+ rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) {
+ clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ } else {
+ if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags))
+ ieee80211_stop_tx_ba_session(sta, txq->tid);
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_del_init(&rtwtxq->list);
+ set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
+ spin_unlock_bh(&rtwdev->ba_lock);
+ }
+ }
+
+ if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) {
+ if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE)
+ sta->max_amsdu_subframes = 0;
+ else
+ sta->max_amsdu_subframes = 1;
+ }
+ }
+}
+
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ int i;
+
+ for (i = 0; i < tid_config->n_tid_conf; i++)
+ _rtw89_core_set_tid_config(rtwdev, sta,
+ &tid_config->tid_conf[i]);
+}
+
static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
struct ieee80211_sta_ht_cap *ht_cap)
{
@@ -2669,8 +2751,7 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
he_cap->has_he = true;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
if (i == NL80211_IFTYPE_STATION)
mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
@@ -2706,6 +2787,8 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bw160)
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
phy_cap_info[5] = no_ng16 ? 0 :
IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
@@ -2866,7 +2949,9 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
/* efuse process */
/* pre-config BB/RF, BB reset/RFC reset */
- rtw89_chip_disable_bb_rf(rtwdev);
+ ret = rtw89_chip_disable_bb_rf(rtwdev);
+ if (ret)
+ return ret;
ret = rtw89_chip_enable_bb_rf(rtwdev);
if (ret)
return ret;
@@ -2894,6 +2979,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable);
+ rtw89_fw_h2c_init_ba_cam(rtwdev);
return 0;
}
@@ -2987,6 +3073,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
return ret;
}
rtw89_ser_init(rtwdev);
+ rtw89_entity_init(rtwdev);
return 0;
}
@@ -3007,7 +3094,7 @@ EXPORT_SYMBOL(rtw89_core_deinit);
void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -3015,7 +3102,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
rtw89_leave_ips(rtwdev);
ether_addr_copy(rtwvif->mac_addr, mac_addr);
- rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
rtw89_chip_rfk_scan(rtwdev, true);
rtw89_hci_recalc_int_mit(rtwdev);
@@ -3141,6 +3228,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->vif_data_size = sizeof(struct rtw89_vif);
hw->sta_data_size = sizeof(struct rtw89_sta);
hw->txq_data_size = sizeof(struct rtw89_txq);
+ hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
@@ -3148,6 +3236,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->queues = IEEE80211_NUM_ACS;
hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -3164,17 +3253,26 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
+ WIPHY_FLAG_AP_UAPSD;
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+ hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
+
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
ret = rtw89_core_set_supported_band(rtwdev);
@@ -3234,6 +3332,63 @@ void rtw89_core_unregister(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_unregister);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip)
+{
+ struct ieee80211_hw *hw;
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_ops *ops;
+ u32 driver_data_size;
+ u32 early_feat_map = 0;
+ bool no_chanctx;
+
+ rtw89_early_fw_feature_recognize(device, chip, &early_feat_map);
+
+ ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL);
+ if (!ops)
+ goto err;
+
+ no_chanctx = chip->support_chanctx_num == 0 ||
+ !(early_feat_map & BIT(RTW89_FW_FEATURE_SCAN_OFFLOAD));
+
+ if (no_chanctx) {
+ ops->add_chanctx = NULL;
+ ops->remove_chanctx = NULL;
+ ops->change_chanctx = NULL;
+ ops->assign_vif_chanctx = NULL;
+ ops->unassign_vif_chanctx = NULL;
+ }
+
+ driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
+ hw = ieee80211_alloc_hw(driver_data_size, ops);
+ if (!hw)
+ goto err;
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = device;
+ rtwdev->ops = ops;
+ rtwdev->chip = chip;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
+ no_chanctx ? "without" : "with");
+
+ return rtwdev;
+
+err:
+ kfree(ops);
+ return NULL;
+}
+EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw);
+
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev)
+{
+ kfree(rtwdev->ops);
+ ieee80211_free_hw(rtwdev->hw);
+}
+EXPORT_SYMBOL(rtw89_free_ieee80211_hw);
+
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 7a9d6f5d8a51..db041b32a8c2 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -34,6 +34,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
+#define RTW89_TX_DIV_RSSI_RAW_TH (2 << RSSI_FACTOR)
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
@@ -522,7 +523,7 @@ struct rtw89_rx_phy_ppdu {
u8 *buf;
u32 len;
u8 rssi_avg;
- s8 rssi[RF_PATH_MAX];
+ u8 rssi[RF_PATH_MAX];
u8 mac_id;
u8 chan_idx;
u8 ie;
@@ -542,6 +543,12 @@ enum rtw89_phy_idx {
RTW89_PHY_MAX
};
+enum rtw89_sub_entity_idx {
+ RTW89_SUB_ENTITY_0 = 0,
+
+ NUM_OF_RTW89_SUB_ENTITY,
+};
+
enum rtw89_rf_path {
RF_PATH_A = 0,
RF_PATH_B = 1,
@@ -624,14 +631,23 @@ enum rtw89_sc_offset {
RTW89_SC_40_LOWER = 10,
};
-struct rtw89_channel_params {
- u8 center_chan;
- u32 center_freq;
- u8 primary_chan;
- u8 bandwidth;
- u8 pri_ch_idx;
- u8 band_type;
- u8 subband_type;
+struct rtw89_chan {
+ u8 channel;
+ u8 primary_channel;
+ enum rtw89_band band_type;
+ enum rtw89_bandwidth band_width;
+
+ /* The follow-up are derived from the above. We must ensure that it
+ * is assigned correctly in rtw89_chan_create() if new one is added.
+ */
+ u32 freq;
+ enum rtw89_subband subband_type;
+ enum rtw89_sc_offset pri_ch_idx;
+};
+
+struct rtw89_chan_rcd {
+ u8 prev_primary_channel;
+ enum rtw89_band prev_band_type;
};
struct rtw89_channel_help_params {
@@ -793,7 +809,7 @@ struct rtw89_mac_ax_gnt {
u8 gnt_bt;
u8 gnt_wl_sw_en;
u8 gnt_wl;
-};
+} __packed;
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
@@ -848,6 +864,7 @@ enum rtw89_btc_dcnt {
BTC_DCNT_SLOT_NONSYNC,
BTC_DCNT_BTCNT_FREEZE,
BTC_DCNT_WL_SLOT_DRIFT,
+ BTC_DCNT_BT_SLOT_DRIFT,
BTC_DCNT_WL_STA_LAST,
BTC_DCNT_NUM,
};
@@ -920,12 +937,12 @@ struct rtw89_btc_wl_smap {
u32 roaming: 1;
u32 _4way: 1;
u32 rf_off: 1;
- u32 lps: 1;
+ u32 lps: 2;
u32 ips: 1;
u32 init_ok: 1;
u32 traffic_dir : 2;
u32 rf_off_pre: 1;
- u32 lps_pre: 1;
+ u32 lps_pre: 2;
};
enum rtw89_tfc_lv {
@@ -1108,6 +1125,27 @@ struct rtw89_btc_wl_active_role {
u16 rx_rate;
};
+struct rtw89_btc_wl_active_role_v1 {
+ u8 connected: 1;
+ u8 pid: 3;
+ u8 phy: 1;
+ u8 noa: 1;
+ u8 band: 2;
+
+ u8 client_ps: 1;
+ u8 bw: 7;
+
+ u8 role;
+ u8 ch;
+
+ u16 tx_lvl;
+ u16 rx_lvl;
+ u16 tx_rate;
+ u16 rx_rate;
+
+ u32 noa_duration; /* ms */
+};
+
struct rtw89_btc_wl_role_info_bpos {
u16 none: 1;
u16 station: 1;
@@ -1123,6 +1161,12 @@ struct rtw89_btc_wl_role_info_bpos {
u16 nan: 1;
};
+struct rtw89_btc_wl_scc_ctrl {
+ u8 null_role1;
+ u8 null_role2;
+ u8 ebt_null; /* if tx null at EBT slot */
+};
+
union rtw89_btc_wl_role_info_map {
u16 val;
struct rtw89_btc_wl_role_info_bpos role;
@@ -1135,6 +1179,21 @@ struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
};
+struct rtw89_btc_wl_role_info_v1 { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ union rtw89_btc_wl_role_info_map role_map;
+ struct rtw89_btc_wl_active_role_v1 active_role_v1[RTW89_PORT_NUM];
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+
+ u32 dbcc_en: 1;
+ u32 dbcc_chg: 1;
+ u32 dbcc_2g_phy: 2; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+ u32 link_mode_chg: 1;
+ u32 rsvd: 27;
+};
+
struct rtw89_btc_wl_ver_info {
u32 fw_coex; /* match with which coex_ver */
u32 fw;
@@ -1240,6 +1299,7 @@ struct rtw89_btc_wl_info {
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
struct rtw89_btc_wl_role_info role_info;
+ struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
struct rtw89_btc_rf_para rf_para;
@@ -1248,6 +1308,7 @@ struct rtw89_btc_wl_info {
u8 port_id[RTW89_WIFI_ROLE_MLME_MAX];
u8 rssi_level;
+ bool scbd_change;
u32 scbd;
};
@@ -1333,7 +1394,8 @@ struct rtw89_btc_bt_info {
u32 pag: 1;
u32 run_patch_code: 1;
u32 hi_lna_rx: 1;
- u32 rsvd: 22;
+ u32 scan_rx_low_pri: 1;
+ u32 rsvd: 21;
};
struct rtw89_btc_cx {
@@ -1346,32 +1408,43 @@ struct rtw89_btc_cx {
};
struct rtw89_btc_fbtc_tdma {
- u8 type;
+ u8 type; /* chip_info::fcxtdma_ver */
u8 rxflctrl;
u8 txpause;
u8 wtgle_n;
u8 leak_n;
u8 ext_ctrl;
- u8 rsvd0;
- u8 rsvd1;
+ u8 rxflctrl_role;
+ u8 option_ctrl;
+} __packed;
+
+struct rtw89_btc_fbtc_tdma_v1 {
+ u8 fver; /* chip_info::fcxtdma_ver */
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_tdma tdma;
} __packed;
#define CXMREG_MAX 30
#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
-#define BTCRPT_VER 1
#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
-enum rtw89_btc_bt_rfk_counter {
+enum rtw89_btc_bt_sta_counter {
BTC_BCNT_RFK_REQ = 0,
BTC_BCNT_RFK_GO = 1,
BTC_BCNT_RFK_REJECT = 2,
BTC_BCNT_RFK_FAIL = 3,
BTC_BCNT_RFK_TIMEOUT = 4,
- BTC_BCNT_RFK_MAX
+ BTC_BCNT_HI_TX = 5,
+ BTC_BCNT_HI_RX = 6,
+ BTC_BCNT_LO_TX = 7,
+ BTC_BCNT_LO_RX = 8,
+ BTC_BCNT_POLLUTED = 9,
+ BTC_BCNT_STA_MAX
};
struct rtw89_btc_fbtc_rpt_ctrl {
- u16 fver;
+ u16 fver; /* chip_info::fcxbtcrpt_ver */
u16 rpt_cnt; /* tmr counters */
u32 wl_fw_coex_ver; /* match which driver's coex version */
u32 wl_fw_cx_offload;
@@ -1384,11 +1457,56 @@ struct rtw89_btc_fbtc_rpt_ctrl {
u32 mb_a2dp_empty_cnt; /* a2dp empty count */
u32 mb_a2dp_flct_cnt; /* a2dp empty flow control counter */
u32 mb_a2dp_full_cnt; /* a2dp empty full counter */
- u32 bt_rfk_cnt[BTC_BCNT_RFK_MAX];
+ u32 bt_rfk_cnt[BTC_BCNT_HI_TX];
u32 c2h_cnt; /* fw send c2h counter */
u32 h2c_cnt; /* fw recv h2c counter */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info {
+ __le32 cnt; /* fw report counter */
+ __le32 en; /* report map */
+ __le32 para; /* not used */
+
+ __le32 cnt_c2h; /* fw send c2h counter */
+ __le32 cnt_h2c; /* fw recv h2c counter */
+ __le32 len_c2h; /* The total length of the last C2H */
+
+ __le32 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le32 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 cx_offload;
+ __le32 fw_ver;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty {
+ __le32 cnt_empty; /* a2dp empty count */
+ __le32 cnt_flowctrl; /* a2dp empty flow control counter */
+ __le32 cnt_tx;
+ __le32 cnt_ack;
+ __le32 cnt_nack;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox {
+ __le32 cnt_send_ok; /* fw send mailbox ok counter */
+ __le32 cnt_send_fail; /* fw send mailbox fail counter */
+ __le32 cnt_recv; /* fw recv mailbox counter */
+ struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty a2dp;
+} __packed;
+
+struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd1;
+ struct rtw89_btc_fbtc_rpt_ctrl_info rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+ __le32 bt_cnt[BTC_BCNT_STA_MAX];
+ struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
+} __packed;
+
enum rtw89_fbtc_ext_ctrl_type {
CXECTL_OFF = 0x0, /* tdma off */
CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
@@ -1457,10 +1575,9 @@ enum { /* STEP TYPE */
CXSTEP_MAX,
};
-#define FCXGPIODBG_VER 1
#define BTC_DBG_MAX1 32
struct rtw89_btc_fbtc_gpio_dbg {
- u8 fver;
+ u8 fver; /* chip_info::fcxgpiodbg_ver */
u8 rsvd;
u16 rsvd2;
u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
@@ -1468,9 +1585,8 @@ struct rtw89_btc_fbtc_gpio_dbg {
u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
} __packed;
-#define FCXMREG_VER 1
struct rtw89_btc_fbtc_mreg_val {
- u8 fver;
+ u8 fver; /* chip_info::fcxmreg_ver */
u8 reg_num;
__le16 rsvd;
__le32 mreg_val[CXMREG_MAX];
@@ -1492,16 +1608,14 @@ struct rtw89_btc_fbtc_slot {
__le16 cxtype;
} __packed;
-#define FCXSLOTS_VER 1
struct rtw89_btc_fbtc_slots {
- u8 fver;
+ u8 fver; /* chip_info::fcxslots_ver */
u8 tbl_num;
__le16 rsvd;
__le32 update_map;
struct rtw89_btc_fbtc_slot slot[CXST_MAX];
} __packed;
-#define FCXSTEP_VER 2
struct rtw89_btc_fbtc_step {
u8 type;
u8 val;
@@ -1509,7 +1623,7 @@ struct rtw89_btc_fbtc_step {
} __packed;
struct rtw89_btc_fbtc_steps {
- u8 fver;
+ u8 fver; /* chip_info::fcxstep_ver */
u8 rsvd;
__le16 cnt;
__le16 pos_old;
@@ -1517,9 +1631,16 @@ struct rtw89_btc_fbtc_steps {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-#define FCXCYSTA_VER 2
-struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+struct rtw89_btc_fbtc_steps_v1 {
u8 fver;
+ u8 en;
+ __le16 rsvd;
+ __le32 cnt;
+ struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
+} __packed;
+
+struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+ u8 fver; /* chip_info::fcxcysta_ver */
u8 rsvd;
__le16 cycles; /* total cycle number */
__le16 cycles_a2dp[CXT_FLCTRL_MAX];
@@ -1544,19 +1665,80 @@ struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
__le16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
} __packed;
-#define FCXNULLSTA_VER 1
-struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+struct rtw89_btc_fbtc_fdd_try_info {
+ __le16 cycles[CXT_FLCTRL_MAX];
+ __le16 tavg[CXT_FLCTRL_MAX]; /* avg try BT-Slot-TDD/BT-slot-FDD time */
+ __le16 tmax[CXT_FLCTRL_MAX]; /* max try BT-Slot-TDD/BT-slot-FDD time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_time_info {
+ __le16 tavg[CXT_MAX]; /* avg wl/bt cycle time */
+ __le16 tmax[CXT_MAX]; /* max wl/bt cycle time */
+ __le16 tmaxdiff[CXT_MAX]; /* max wl-wl bt-bt cycle diff time */
+} __packed;
+
+struct rtw89_btc_fbtc_a2dp_trx_stat {
+ u8 empty_cnt;
+ u8 retry_cnt;
+ u8 tx_rate;
+ u8 tx_cnt;
+ u8 ack_cnt;
+ u8 nack_cnt;
+ u8 rsvd1;
+ u8 rsvd2;
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_a2dp_empty_info {
+ __le16 cnt; /* a2dp empty cnt */
+ __le16 cnt_timeout; /* a2dp empty timeout cnt*/
+ __le16 tavg; /* avg a2dp empty time */
+ __le16 tmax; /* max a2dp empty time */
+} __packed;
+
+struct rtw89_btc_fbtc_cycle_leak_info {
+ __le32 cnt_rximr; /* the rximr occur at leak slot */
+ __le16 tavg; /* avg leak-slot time */
+ __le16 tmax; /* max leak-slot time */
+} __packed;
+
+struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
u8 fver;
u8 rsvd;
+ __le16 cycles; /* total cycle number */
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_time_info cycle_time;
+ struct rtw89_btc_fbtc_fdd_try_info fdd_try;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_leak_info leak_slot;
+ __le32 slot_cnt[CXST_MAX]; /* slot count */
+ __le32 bcn_cnt[CXBCN_MAX];
+ __le32 collision_cnt; /* counter for event/timer occur at the same time */
+ __le32 skip_cnt;
+ __le32 except_cnt;
+ __le32 except_map;
+} __packed;
+
+struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
__le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
__le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
} __packed;
-#define FCX_BTVER_VER 1
+struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
+ u8 fver; /* chip_info::fcxnullsta_ver */
+ u8 rsvd;
+ __le16 rsvd2;
+ __le32 max_t[2]; /* max_t for 0:null0/1:null1 */
+ __le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
+ __le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
+} __packed;
+
struct rtw89_btc_fbtc_btver {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtver_ver */
u8 rsvd;
__le16 rsvd2;
__le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
@@ -1564,17 +1746,15 @@ struct rtw89_btc_fbtc_btver {
__le32 feature;
} __packed;
-#define FCX_BTSCAN_VER 1
struct rtw89_btc_fbtc_btscan {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtscan_ver */
u8 rsvd;
__le16 rsvd2;
u8 scan[6];
} __packed;
-#define FCX_BTAFH_VER 1
struct rtw89_btc_fbtc_btafh {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtafh_ver */
u8 rsvd;
__le16 rsvd2;
u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
@@ -1582,9 +1762,8 @@ struct rtw89_btc_fbtc_btafh {
u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
} __packed;
-#define FCX_BTDEVINFO_VER 1
struct rtw89_btc_fbtc_btdevinfo {
- u8 fver;
+ u8 fver; /* chip_info::fcxbtdevinfo_ver */
u8 rsvd;
__le16 vendor_id;
__le32 dev_name; /* only 24 bits valid */
@@ -1609,6 +1788,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
+ struct rtw89_btc_wl_scc_ctrl wl_scc;
union rtw89_btc_dm_error_map error;
u32 cnt_dm[BTC_DCNT_NUM];
u32 cnt_notify[BTC_NCNT_NUM];
@@ -1628,7 +1808,9 @@ struct rtw89_btc_dm {
u32 wl_btg_rx: 1;
u32 trx_para_level: 8;
u32 wl_stb_chg: 1;
- u32 rsvd: 3;
+ u32 pta_owner: 1;
+ u32 tdma_instant_excute: 1;
+ u32 rsvd: 1;
u16 slot_dur[CXST_MAX];
@@ -1650,8 +1832,6 @@ struct rtw89_btc_dbg {
u32 rb_val;
};
-#define FCXTDMA_VER 1
-
enum rtw89_btc_btf_fw_event {
BTF_EVNT_RPT = 0,
BTF_EVNT_BT_INFO = 1,
@@ -1704,12 +1884,18 @@ struct rtw89_btc_rpt_cmn_info {
struct rtw89_btc_report_ctrl_state {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+ struct rtw89_btc_fbtc_tdma_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_slots {
@@ -1719,17 +1905,26 @@ struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_fbtc_cysta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cysta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cysta finfo; /* info from fw for 52A*/
+ struct rtw89_btc_fbtc_cysta_v1 finfo_v1; /* info from fw for 52C*/
+ };
};
struct rtw89_btc_rpt_fbtc_step {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+ struct rtw89_btc_fbtc_steps_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_nullsta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ union {
+ struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+ struct rtw89_btc_fbtc_cynullsta_v1 finfo_v1; /* info from fw */
+ };
};
struct rtw89_btc_rpt_fbtc_mreg {
@@ -1887,7 +2082,9 @@ struct rtw89_ra_info {
u8 ra_csi_rate_en:1;
u8 fixed_csi_rate_en:1;
u8 cr_tbl_sel:1;
- u8 rsvd2:5;
+ u8 fix_giltf_en:1;
+ u8 fix_giltf:3;
+ u8 rsvd2:1;
u8 csi_mcs_ss_idx;
u8 csi_mode:2;
u8 csi_gi_ltf:3;
@@ -1911,19 +2108,20 @@ struct rtw89_ra_report {
struct rate_info txrate;
u32 bit_rate;
u16 hw_rate;
+ bool might_fallback_legacy;
};
DECLARE_EWMA(rssi, 10, 16);
-#define RTW89_BA_CAM_NUM 2
-
struct rtw89_ba_cam_entry {
+ struct list_head list;
u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
+#define RTW89_MAX_BA_CAM_NUM 8
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -1967,18 +2165,21 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta {
u8 mac_id;
bool disassoc;
+ struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
struct rtw89_ra_info ra;
struct rtw89_ra_report ra_report;
int max_agg_wait;
u8 prev_rssi;
struct ewma_rssi avg_rssi;
+ struct ewma_rssi rssi[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
struct rtw89_addr_cam_entry addr_cam; /* AP mode or TDLS peer only */
struct rtw89_bssid_cam_entry bssid_cam; /* TDLS peer only */
+ struct list_head ba_cam_list;
bool use_cfg_mask;
struct cfg80211_bitrate_mask mask;
@@ -1987,9 +2188,6 @@ struct rtw89_sta {
u32 ampdu_max_time:4;
bool cctl_tx_retry_limit;
u32 data_tx_cnt_lmt:6;
-
- DECLARE_BITMAP(ba_cam_map, RTW89_BA_CAM_NUM);
- struct rtw89_ba_cam_entry ba_cam_entry[RTW89_BA_CAM_NUM];
};
struct rtw89_efuse {
@@ -2007,6 +2205,8 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+#define RTW89_P2P_MAX_NOA_NUM 2
+
struct rtw89_vif {
struct list_head list;
struct rtw89_dev *rtwdev;
@@ -2022,6 +2222,7 @@ struct rtw89_vif {
u8 wmm;
u8 bcn_hit_cond;
u8 hit_rule;
+ u8 last_noa_nr;
bool trigger;
bool lsig_txop;
u8 tgt_ind;
@@ -2091,7 +2292,7 @@ struct rtw89_hci_info {
struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
- void (*disable_bb_rf)(struct rtw89_dev *rtwdev);
+ int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -2100,20 +2301,29 @@ struct rtw89_chip_ops {
bool (*write_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void (*set_channel)(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param);
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
void (*set_channel_help)(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p);
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx);
int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map);
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
- void (*rfk_band_changed)(struct rtw89_dev *rtwdev);
+ void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
void (*rfk_track)(struct rtw89_dev *rtwdev);
void (*power_trim)(struct rtw89_dev *rtwdev);
- void (*set_txpwr)(struct rtw89_dev *rtwdev);
- void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg);
@@ -2150,6 +2360,8 @@ struct rtw89_chip_ops {
void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev);
void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+ void (*btc_set_policy)(struct rtw89_dev *rtwdev, u16 policy_type);
+ void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level);
};
enum rtw89_dma_ch {
@@ -2351,6 +2563,7 @@ struct rtw89_imr_info {
u32 cpu_disp_imr_set;
u32 other_disp_imr_clr;
u32 other_disp_imr_set;
+ u32 bbrpt_com_err_imr_reg;
u32 bbrpt_chinfo_err_imr_reg;
u32 bbrpt_err_imr_set;
u32 bbrpt_dfs_err_imr_reg;
@@ -2373,17 +2586,40 @@ struct rtw89_imr_info {
u32 tmac_imr_set;
};
+struct rtw89_rrsr_cfgs {
+ struct rtw89_reg3_def ref_rate;
+ struct rtw89_reg3_def rsc;
+};
+
+struct rtw89_dig_regs {
+ u32 seg0_pd_reg;
+ u32 pd_lower_bound_mask;
+ u32 pd_spatial_reuse_en;
+ struct rtw89_reg_def p0_lna_init;
+ struct rtw89_reg_def p1_lna_init;
+ struct rtw89_reg_def p0_tia_init;
+ struct rtw89_reg_def p1_tia_init;
+ struct rtw89_reg_def p0_rxb_init;
+ struct rtw89_reg_def p1_rxb_init;
+ struct rtw89_reg_def p0_p20_pagcugc_en;
+ struct rtw89_reg_def p0_s20_pagcugc_en;
+ struct rtw89_reg_def p1_p20_pagcugc_en;
+ struct rtw89_reg_def p1_s20_pagcugc_en;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
const char *fw_name;
u32 fifo_size;
+ u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
u32 rsvd_ple_ofst;
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_chanctx_num;
u8 support_bands;
bool support_bw160;
bool hw_sec_hdr;
@@ -2393,6 +2629,9 @@ struct rtw89_chip_info {
u8 acam_num;
u8 bcam_num;
u8 scam_num;
+ u8 bacam_num;
+ u8 bacam_dynamic_num;
+ bool bacam_v1;
u8 sec_ctrl_efuse_size;
u32 physical_efuse_size;
@@ -2411,6 +2650,7 @@ struct rtw89_chip_info {
const struct rtw89_phy_table *nctl_table;
const struct rtw89_txpwr_table *byr_table;
const struct rtw89_phy_dig_gain_table *dig_table;
+ const struct rtw89_dig_regs *dig_regs;
const struct rtw89_phy_tssi_dbw_table *tssi_dbw_table;
const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
@@ -2436,6 +2676,20 @@ struct rtw89_chip_info {
u8 btcx_desired;
u8 scbd;
u8 mailbox;
+ u16 btc_fwinfo_buf;
+
+ u8 fcxbtcrpt_ver;
+ u8 fcxtdma_ver;
+ u8 fcxslots_ver;
+ u8 fcxcysta_ver;
+ u8 fcxstep_ver;
+ u8 fcxnullsta_ver;
+ u8 fcxmreg_ver;
+ u8 fcxgpiodbg_ver;
+ u8 fcxbtver_ver;
+ u8 fcxbtscan_ver;
+ u8 fcxbtafh_ver;
+ u8 fcxbtdevinfo_ver;
u8 afh_guard_ch;
const u8 *wl_rssi_thres;
@@ -2463,6 +2717,8 @@ struct rtw89_chip_info {
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
+ const struct rtw89_rrsr_cfgs *rrsr_cfgs;
+ u32 dma_ch_mask;
};
union rtw89_bus_info {
@@ -2514,6 +2770,8 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD,
RTW89_FW_FEATURE_TX_WAKE,
RTW89_FW_FEATURE_CRASH_TRIGGER,
+ RTW89_FW_FEATURE_PACKET_DROP,
+ RTW89_FW_FEATURE_NO_DEEP_PS,
};
struct rtw89_fw_suit {
@@ -2536,6 +2794,18 @@ struct rtw89_fw_suit {
#define RTW89_FW_SUIT_VER_CODE(s) \
RTW89_FW_VER_CODE((s)->major_ver, (s)->minor_ver, (s)->sub_ver, (s)->sub_idex)
+#define RTW89_MFW_HDR_VER_CODE(mfw_hdr) \
+ RTW89_FW_VER_CODE((mfw_hdr)->ver.major, \
+ (mfw_hdr)->ver.minor, \
+ (mfw_hdr)->ver.sub, \
+ (mfw_hdr)->ver.idx)
+
+#define RTW89_FW_HDR_VER_CODE(fw_hdr) \
+ RTW89_FW_VER_CODE(GET_FW_HDR_MAJOR_VERSION(fw_hdr), \
+ GET_FW_HDR_MINOR_VERSION(fw_hdr), \
+ GET_FW_HDR_SUBVERSION(fw_hdr), \
+ GET_FW_HDR_SUBINDEX(fw_hdr))
+
struct rtw89_fw_info {
const struct firmware *firmware;
struct rtw89_dev *rtwdev;
@@ -2558,6 +2828,8 @@ struct rtw89_cam_info {
DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM);
DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+ DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM];
};
enum rtw89_sar_sources {
@@ -2599,24 +2871,34 @@ struct rtw89_sar_info {
};
};
+struct rtw89_chanctx_cfg {
+ enum rtw89_sub_entity_idx idx;
+};
+
+enum rtw89_entity_mode {
+ RTW89_ENTITY_MODE_SCC,
+};
+
struct rtw89_hal {
u32 rx_fltr;
u8 cv;
- u8 current_channel;
- u32 current_freq;
- u8 prev_primary_channel;
- u8 current_primary_channel;
- enum rtw89_subband current_subband;
- u8 current_band_width;
- u8 prev_band_type;
- u8 current_band_type;
u32 sw_amsdu_max_size;
u32 antenna_tx;
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool tx_path_diversity;
bool support_cckpd;
bool support_igi;
+
+ DECLARE_BITMAP(entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ struct cfg80211_chan_def chandef[NUM_OF_RTW89_SUB_ENTITY];
+
+ bool entity_active;
+ enum rtw89_entity_mode entity_mode;
+
+ struct rtw89_chan chan[NUM_OF_RTW89_SUB_ENTITY];
+ struct rtw89_chan_rcd chan_rcd[NUM_OF_RTW89_SUB_ENTITY];
};
#define RTW89_MAX_MAC_ID_NUM 128
@@ -2632,11 +2914,37 @@ enum rtw89_flags {
RTW89_FLAG_LEISURE_PS,
RTW89_FLAG_LOW_POWER_MODE,
RTW89_FLAG_INACTIVE_PS,
- RTW89_FLAG_RESTART_TRIGGER,
+ RTW89_FLAG_CRASH_SIMULATING,
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_pkt_drop_sel {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_ALL,
+ RTW89_PKT_DROP_SEL_MG0_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_ONCE,
+ RTW89_PKT_DROP_SEL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_HIQ_MBSSID,
+ RTW89_PKT_DROP_SEL_BAND,
+ RTW89_PKT_DROP_SEL_BAND_ONCE,
+ RTW89_PKT_DROP_SEL_REL_MACID,
+ RTW89_PKT_DROP_SEL_REL_HIQ_PORT,
+ RTW89_PKT_DROP_SEL_REL_HIQ_MBSSID,
+};
+
+struct rtw89_pkt_drop_params {
+ enum rtw89_pkt_drop_sel sel;
+ enum rtw89_mac_idx mac_band;
+ u8 macid;
+ u8 port;
+ u8 mbssid;
+ bool tf_trs;
+};
+
struct rtw89_pkt_stat {
u16 beacon_nr;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
@@ -3073,6 +3381,7 @@ struct rtw89_hw_scan_info {
u8 op_chan;
u8 op_bw;
u8 op_band;
+ u32 last_chan_idx;
};
enum rtw89_phy_bb_gain_band {
@@ -3119,6 +3428,7 @@ struct rtw89_phy_efuse_gain {
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
+ const struct ieee80211_ops *ops;
bool dbcc_en;
struct rtw89_hw_scan_info scan_info;
@@ -3498,6 +3808,16 @@ static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw89_vif *rtwvif)
return container_of(p, struct ieee80211_vif, drv_priv);
}
+static inline struct ieee80211_vif *rtwvif_to_vif_safe(struct rtw89_vif *rtwvif)
+{
+ return rtwvif ? rtwvif_to_vif(rtwvif) : NULL;
+}
+
+static inline struct rtw89_vif *vif_to_rtwvif_safe(struct ieee80211_vif *vif)
+{
+ return vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
+}
+
static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
{
void *p = rtwsta;
@@ -3542,6 +3862,20 @@ enum nl80211_band rtw89_hw_to_nl80211_band(enum rtw89_band hw_band)
}
static inline
+enum rtw89_band rtw89_nl80211_to_hw_band(enum nl80211_band nl_band)
+{
+ switch (nl_band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ return RTW89_BAND_2G;
+ case NL80211_BAND_5GHZ:
+ return RTW89_BAND_5G;
+ case NL80211_BAND_6GHZ:
+ return RTW89_BAND_6G;
+ }
+}
+
+static inline
enum rtw89_bandwidth nl_to_rtw89_bandwidth(enum nl80211_chan_width width)
{
switch (width) {
@@ -3588,16 +3922,51 @@ struct rtw89_bssid_cam_entry *rtw89_get_bssid_cam_of(struct rtw89_vif *rtwvif,
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, true, p);
+ rtwdev->chip->ops->set_channel_help(rtwdev, true, p, chan,
+ mac_idx, phy_idx);
}
static inline
void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtwdev->chip->ops->set_channel_help(rtwdev, false, p, chan,
+ mac_idx, phy_idx);
+}
+
+static inline
+const struct cfg80211_chan_def *rtw89_chandef_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
{
- rtwdev->chip->ops->set_channel_help(rtwdev, false, p);
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chandef[idx];
+}
+
+static inline
+const struct rtw89_chan *rtw89_chan_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan[idx];
+}
+
+static inline
+const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ return &hal->chan_rcd[idx];
}
static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
@@ -3632,12 +4001,13 @@ static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
chip->ops->rfk_channel(rtwdev);
}
-static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev)
+static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->rfk_band_changed)
- chip->ops->rfk_band_changed(rtwdev);
+ chip->ops->rfk_band_changed(rtwdev, phy_idx);
}
static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -3661,19 +4031,7 @@ static inline void rtw89_chip_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
if (chip->ops->set_txpwr_ctrl)
- chip->ops->set_txpwr_ctrl(rtwdev);
-}
-
-static inline void rtw89_chip_set_txpwr(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel;
-
- if (!ch)
- return;
-
- if (chip->ops->set_txpwr)
- chip->ops->set_txpwr(rtwdev);
+ chip->ops->set_txpwr_ctrl(rtwdev, RTW89_PHY_0);
}
static inline void rtw89_chip_power_trim(struct rtw89_dev *rtwdev)
@@ -3902,16 +4260,27 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
void rtw89_core_unregister(struct rtw89_dev *rtwdev);
+struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
+ u32 bus_data_size,
+ const struct rtw89_chip_info *chip);
+void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
+void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_set_channel(struct rtw89_dev *rtwdev);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
-int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
-int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 829c61da99bb..730e83d54257 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -525,7 +525,8 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev)
{
- u8 band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
switch (regd) {
@@ -2189,6 +2190,37 @@ out:
return count;
}
+static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true);
+ switch (pkt_id) {
+ case 0xffff:
+ return -ETIMEDOUT;
+ case 0xfff:
+ return -ENOMEM;
+ default:
+ break;
+ }
+
+ /* intentionally, enqueue two pkt, but has only one pkt id */
+ ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
+ ctrl_para.start_pktid = pkt_id;
+ ctrl_para.end_pktid = pkt_id;
+ ctrl_para.pkt_num = 1; /* start from 0 */
+ ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
+ ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
+
+ if (rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true))
+ return -EFAULT;
+
+ return 0;
+}
+
static int
rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
{
@@ -2196,10 +2228,15 @@ rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
seq_printf(m, "%d\n",
- test_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags));
+ test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
return 0;
}
+enum rtw89_dbg_crash_simulation_type {
+ RTW89_DBG_SIM_CPU_EXCEPTION = 1,
+ RTW89_DBG_SIM_CTRL_ERROR = 2,
+};
+
static ssize_t
rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
size_t count, loff_t *loff)
@@ -2207,22 +2244,30 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
struct seq_file *m = (struct seq_file *)filp->private_data;
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- bool fw_crash;
+ int (*sim)(struct rtw89_dev *rtwdev);
+ u8 crash_type;
int ret;
- if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
- return -EOPNOTSUPP;
-
- ret = kstrtobool_from_user(user_buf, count, &fw_crash);
+ ret = kstrtou8_from_user(user_buf, count, 0, &crash_type);
if (ret)
return -EINVAL;
- if (!fw_crash)
+ switch (crash_type) {
+ case RTW89_DBG_SIM_CPU_EXCEPTION:
+ if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw))
+ return -EOPNOTSUPP;
+ sim = rtw89_fw_h2c_trigger_cpu_exception;
+ break;
+ case RTW89_DBG_SIM_CTRL_ERROR:
+ sim = rtw89_dbg_trigger_ctrl_error;
+ break;
+ default:
return -EINVAL;
+ }
mutex_lock(&rtwdev->mutex);
- set_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
- ret = rtw89_fw_h2c_trigger_cpu_exception(rtwdev);
+ set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+ ret = sim(rtwdev);
mutex_unlock(&rtwdev->mutex);
if (ret)
@@ -2289,7 +2334,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct rate_info *rate = &rtwsta->ra_report.txrate;
struct ieee80211_rx_status *status = &rtwsta->rx_status;
struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_hal *hal = &rtwdev->hal;
u8 rssi;
+ int i;
seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
@@ -2305,9 +2353,10 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
he_gi_str[rate->he_gi] : "N/A");
else
seq_printf(m, "Legacy %d", rate->legacy);
+ seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : "");
seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
- sta->max_rc_amsdu_len);
+ sta->deflink.agg.max_rc_amsdu_len);
seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id);
@@ -2333,8 +2382,15 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n",
+ seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
+ rssi = ewma_rssi_read(&rtwsta->rssi[i]);
+ seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
+ hal->tx_path_diversity && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == rtwdev->chip->rf_path_num ? "" : ", ");
+ }
+ seq_puts(m, "]\n");
}
static void
@@ -2433,6 +2489,26 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
}
+static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
+{
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_ba_cam_entry *entry;
+ bool first = true;
+
+ list_for_each_entry(entry, &rtwsta->ba_cam_list, list) {
+ if (first) {
+ seq_puts(m, "\tba_cam ");
+ first = false;
+ } else {
+ seq_puts(m, ", ");
+ }
+ seq_printf(m, "tid[%u]=%d", entry->tid,
+ (int)(entry - rtwdev->cam_info.ba_cam_entry));
+ }
+ seq_puts(m, "\n");
+}
+
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -2441,6 +2517,7 @@ static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
sta->tdls ? "(TDLS)" : "");
rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+ rtw89_dump_ba_cam(m, rtwsta);
}
static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
@@ -2449,6 +2526,8 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ mutex_lock(&rtwdev->mutex);
+
seq_puts(m, "map:\n");
seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
rtwdev->mac_id_map);
@@ -2458,12 +2537,16 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
cam_info->bssid_cam_map);
seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
cam_info->sec_cam_map);
+ seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
+ cam_info->ba_cam_map);
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+ mutex_unlock(&rtwdev->mutex);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index 6176152dbf6b..ee243aadde87 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -25,6 +25,7 @@ enum rtw89_debug_mask {
RTW89_DBG_BF = BIT(14),
RTW89_DBG_HW_SCAN = BIT(15),
RTW89_DBG_SAR = BIT(16),
+ RTW89_DBG_STATE = BIT(17),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 6473015a6b2a..d57e3610fb88 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -224,6 +225,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
};
static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
@@ -247,6 +254,46 @@ static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
}
}
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map)
+{
+ union {
+ struct rtw89_mfw_hdr mfw_hdr;
+ u8 fw_hdr[RTW89_FW_HDR_SIZE];
+ } buf = {};
+ const struct firmware *firmware;
+ u32 ver_code;
+ int ret;
+ int i;
+
+ ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
+ device, &buf, sizeof(buf), 0);
+ if (ret) {
+ dev_err(device, "failed to early request firmware: %d\n", ret);
+ return;
+ }
+
+ ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ?
+ RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) :
+ RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr);
+ if (!ver_code)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
+ const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
+
+ if (chip->chip_id != ent->chip_id)
+ continue;
+
+ if (ent->cond(ver_code, ent->ver_code))
+ *early_feat_map |= BIT(ent->feature);
+ }
+
+out:
+ release_firmware(firmware);
+}
+
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
int ret;
@@ -571,6 +618,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
if (!skb) {
@@ -587,7 +635,8 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
H2C_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -596,7 +645,7 @@ int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_DCTL_SEC_CAM_LEN 68
@@ -605,6 +654,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
if (!skb) {
@@ -621,7 +671,8 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
H2C_DCTL_SEC_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -630,7 +681,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
@@ -638,14 +689,16 @@ EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
u8 entry_idx;
int ret;
ret = valid ?
- rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
- rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
if (ret) {
/* it still works even if we don't have static BA CAM, because
* hardware can create dynamic BA CAM automatically.
@@ -663,7 +716,10 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ if (chip->bacam_v1)
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ else
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
@@ -676,6 +732,11 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
+ if (chip->bacam_v1) {
+ SET_BA_CAM_STD_EN(skb->data, 1);
+ SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ }
+
end:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
@@ -683,7 +744,8 @@ end:
H2C_FUNC_MAC_BA_CAM, 0, 1,
H2C_BA_CAM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -692,7 +754,59 @@ end:
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
+ u8 entry_idx, u8 uid)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BA_CAM_LEN);
+
+ SET_BA_CAM_VALID(skb->data, 1);
+ SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ SET_BA_CAM_UID(skb->data, uid);
+ SET_BA_CAM_BAND(skb->data, 0);
+ SET_BA_CAM_STD_EN(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM, 0, 1,
+ H2C_BA_CAM_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 entry_idx = chip->bacam_num;
+ u8 uid = 0;
+ int i;
+
+ for (i = 0; i < chip->bacam_dynamic_num; i++) {
+ rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
+ entry_idx++;
+ uid++;
+ }
}
#define H2C_LOG_CFG_LEN 12
@@ -701,6 +815,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
struct sk_buff *skb;
u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
@@ -720,7 +835,8 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
H2C_FUNC_LOG_CFG, 0, 0,
H2C_LOG_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -729,7 +845,7 @@ int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_GENERAL_PKT_LEN 6
@@ -737,6 +853,7 @@ fail:
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
@@ -757,7 +874,8 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
H2C_GENERAL_PKT_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -766,7 +884,7 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LPS_PARM_LEN 8
@@ -774,6 +892,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
if (!skb) {
@@ -799,7 +918,8 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_LPS_PARM, 0, 1,
H2C_LPS_PARM_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -808,7 +928,73 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+#define H2C_P2P_ACT_LEN 20
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
+ u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ struct sk_buff *skb;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_P2P_ACT_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
+ RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
+ RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
+ RTW89_SET_FWCMD_P2P_ACT(cmd, act);
+ RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
+ RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
+ if (desc) {
+ RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
+ RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
+ RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
+ RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
+ RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_P2P_ACT, 0, 0,
+ H2C_P2P_ACT_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
+ u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+
+ SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
+ SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
+ SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
}
#define H2C_CMC_TBL_LEN 68
@@ -816,11 +1002,9 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
- u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
- u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
u8 macid = rtwvif->mac_id;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
@@ -832,11 +1016,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CTRL_INFO_OPERATION(skb->data, 1);
if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
- SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
- SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
- SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
- SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
SET_CMC_TBL_ANTSEL_A(skb->data, 0);
SET_CMC_TBL_ANTSEL_B(skb->data, 0);
SET_CMC_TBL_ANTSEL_C(skb->data, 0);
@@ -852,7 +1032,8 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -861,7 +1042,7 @@ int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
@@ -926,17 +1107,26 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u16 lowest_rate;
+ int ret;
memset(pads, 0, sizeof(pads));
if (sta)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
@@ -947,10 +1137,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
- if (hal->current_band_type == RTW89_BAND_2G)
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
- else
- SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
+ SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
if (vif->type == NL80211_IFTYPE_STATION)
@@ -980,7 +1167,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -989,7 +1177,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
@@ -997,6 +1185,7 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
if (!skb) {
@@ -1020,7 +1209,47 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
chip->h2c_cctl_func_id, 0, 1,
H2C_CMC_TBL_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+
+ __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1029,19 +1258,28 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct sk_buff *skb;
struct sk_buff *skb_beacon;
u16 tim_offset;
int bcn_total_len;
+ u16 beacon_rate;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
NULL, 0);
@@ -1066,8 +1304,7 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
- RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
+ SET_BCN_UPD_RATE(skb->data, beacon_rate);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -1077,10 +1314,11 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_BCN_UPD, 0, 1,
bcn_total_len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
return 0;
@@ -1095,6 +1333,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role;
+ int ret;
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
if (rtwsta)
@@ -1121,7 +1360,8 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
H2C_ROLE_MAINTAIN_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1130,7 +1370,7 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_JOIN_INFO_LEN 4
@@ -1141,6 +1381,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
u8 net_type = rtwvif->net_type;
+ int ret;
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
@@ -1172,7 +1413,8 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_MAC_JOININFO, 0, 1,
H2C_JOIN_INFO_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1181,7 +1423,7 @@ int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
@@ -1190,6 +1432,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
struct rtw89_fw_macid_pause_grp h2c = {{0}};
u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
if (!skb) {
@@ -1206,7 +1449,8 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1215,7 +1459,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_EDCA_LEN 12
@@ -1223,6 +1467,7 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u8 ac, u32 val)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
if (!skb) {
@@ -1241,7 +1486,8 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
H2C_FUNC_USR_EDCA, 0, 1,
H2C_EDCA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1250,7 +1496,47 @@ int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+#define H2C_TSF32_TOGL_LEN 4
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en)
+{
+ struct sk_buff *skb;
+ u16 early_us = en ? 2000 : 0;
+ u8 *cmd;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_TSF32_TOGL_LEN);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
+ RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
+ RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
+ RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_TSF32_TOGL, 0, 0,
+ H2C_TSF32_TOGL_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
#define H2C_OFLD_CFG_LEN 8
@@ -1258,6 +1544,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
{
static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
if (!skb) {
@@ -1271,7 +1558,8 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
H2C_FUNC_OFLD_CFG, 0, 1,
H2C_OFLD_CFG_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1280,7 +1568,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_RA_LEN 16
@@ -1288,6 +1576,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
{
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
if (!skb) {
@@ -1318,6 +1607,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
+ RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
if (csi) {
RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
@@ -1336,7 +1627,8 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
H2C_RA_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1345,7 +1637,7 @@ int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVHDR 2
@@ -1359,6 +1651,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
struct rtw89_btc_ant_info *ant = &module->ant;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
if (!skb) {
@@ -1395,7 +1688,8 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_INIT);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1404,10 +1698,15 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
+#define PORT_DATA_OFFSET 4
+#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
+ H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
+ H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1416,7 +1715,9 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role *active = role_info->active_role;
struct sk_buff *skb;
+ u8 offset = 0;
u8 *cmd;
+ int ret;
int i;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
@@ -1447,19 +1748,19 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
- RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
- RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
- RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
- RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
- RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
- RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
- RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
- RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1467,7 +1768,8 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_ROLE);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1476,16 +1778,101 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
+}
+
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
+ struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
+ struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
+ struct sk_buff *skb;
+ u8 *cmd, offset;
+ int ret;
+ int i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
+
+ RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
+ RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
+ RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
+ RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
+
+ offset = PORT_DATA_OFFSET;
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
+ }
+
+ offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
+ RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
+ RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
+ RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_ROLE_V1);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
if (!skb) {
@@ -1501,14 +1888,16 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
- RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
+ if (chip->chip_id == RTL8852A)
+ RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_CTRL);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1517,7 +1906,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
@@ -1528,6 +1917,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
if (!skb) {
@@ -1551,7 +1941,8 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
SET_DRV_INFO, 0, 0,
H2C_LEN_CXDRVINFO_RFK);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1560,7 +1951,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_PKT_OFLD 4
@@ -1568,6 +1959,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
{
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
if (!skb) {
@@ -1585,7 +1977,8 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1594,7 +1987,7 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
@@ -1603,6 +1996,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb;
u8 *cmd;
u8 alloc_id;
+ int ret;
alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
RTW89_MAX_PKT_OFLD_NUM);
@@ -1629,7 +2023,8 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD + skb_ofld->len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1638,7 +2033,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
#define H2C_LEN_SCAN_LIST_OFFLOAD 4
@@ -1649,6 +2044,7 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
struct sk_buff *skb;
int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
@@ -1693,7 +2089,8 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1702,10 +2099,10 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
-#define H2C_LEN_SCAN_OFFLOAD 20
+#define H2C_LEN_SCAN_OFFLOAD 28
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif *rtwvif)
@@ -1713,6 +2110,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct sk_buff *skb;
u8 *cmd;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
if (!skb) {
@@ -1736,6 +2134,8 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
scan_info->op_pri_ch);
RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
scan_info->op_chan);
+ RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
+ scan_info->op_band);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1743,7 +2143,8 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
H2C_LEN_SCAN_OFFLOAD);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1752,7 +2153,7 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
@@ -1762,6 +2163,7 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
u8 class = info->rf_path == RF_PATH_A ?
H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -1774,7 +2176,8 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, class, page, 0, 0,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1783,14 +2186,16 @@ int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
struct rtw89_fw_h2c_rf_get_mccch *mccch;
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
if (!skb) {
@@ -1804,15 +2209,16 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
- mccch->current_channel = cpu_to_le32(rtwdev->hal.current_channel);
- mccch->current_band_type = cpu_to_le32(rtwdev->hal.current_band_type);
+ mccch->current_channel = cpu_to_le32(chan->channel);
+ mccch->current_band_type = cpu_to_le32(chan->band_type);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
sizeof(*mccch));
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1821,7 +2227,7 @@ int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
@@ -1830,6 +2236,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
bool rack, bool dack)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -1842,7 +2249,8 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1851,12 +2259,13 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
if (!skb) {
@@ -1865,7 +2274,8 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
}
skb_put_data(skb, buf, len);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -1874,7 +2284,7 @@ int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
@@ -2169,7 +2579,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
if (ssid_num) {
ch_info->num_pkt = ssid_num;
- band = ch_info->ch_band;
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
list_for_each_entry(info, &scan_info->pkt_list[band], list) {
ch_info->probe_id = info->id;
@@ -2211,13 +2621,16 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
struct ieee80211_channel *channel;
struct list_head chan_list;
bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
- int list_len = req->n_channels, off_chan_time = 0;
+ int list_len, off_chan_time = 0;
enum rtw89_chan_type type;
- int ret = 0, i;
+ int ret = 0;
+ u32 idx;
INIT_LIST_HEAD(&chan_list);
- for (i = 0; i < req->n_channels; i++) {
- channel = req->channels[i];
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
if (!ch_info) {
ret = -ENOMEM;
@@ -2226,7 +2639,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
ch_info->period = req->duration_mandatory ?
req->duration : RTW89_CHANNEL_TIME;
- ch_info->ch_band = channel->band;
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
ch_info->central_ch = channel->hw_value;
ch_info->pri_ch = channel->hw_value;
ch_info->rand_seq_num = random_seq;
@@ -2258,6 +2671,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
list_add_tail(&ch_info->list, &chan_list);
off_chan_time += ch_info->period;
}
+ rtwdev->scan_info.last_chan_idx = idx;
ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
out:
@@ -2289,9 +2703,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct cfg80211_scan_request *req = &scan_req->req;
+ u32 rx_fltr = rtwdev->hal.rx_fltr;
u8 mac_addr[ETH_ALEN];
rtwdev->scan_info.scanning_vif = vif;
+ rtwdev->scan_info.last_chan_idx = 0;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
@@ -2303,13 +2719,13 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
ether_addr_copy(mac_addr, vif->addr);
rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
- rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
- rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
- rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rx_fltr &= ~B_AX_A_BC;
+ rx_fltr &= ~B_AX_A_A1_MATCH;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
B_AX_RX_FLTR_CFG_MASK,
- rtwdev->hal.rx_fltr);
+ rx_fltr);
}
void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
@@ -2323,9 +2739,6 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (!vif)
return;
- rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
- rtwdev->hal.rx_fltr |= B_AX_A_BC;
- rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
B_AX_RX_FLTR_CFG_MASK,
@@ -2339,6 +2752,7 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
+ rtwdev->scan_info.last_chan_idx = 0;
rtwdev->scan_info.scanning_vif = NULL;
if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
@@ -2377,18 +2791,18 @@ out:
void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_chan new;
if (backup) {
- scan_info->op_pri_ch = hal->current_primary_channel;
- scan_info->op_chan = hal->current_channel;
- scan_info->op_bw = hal->current_band_width;
- scan_info->op_band = hal->current_band_type;
+ scan_info->op_pri_ch = cur->primary_channel;
+ scan_info->op_chan = cur->channel;
+ scan_info->op_bw = cur->band_width;
+ scan_info->op_band = cur->band_type;
} else {
- hal->current_primary_channel = scan_info->op_pri_ch;
- hal->current_channel = scan_info->op_chan;
- hal->current_band_width = scan_info->op_bw;
- hal->current_band_type = scan_info->op_band;
+ rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
+ scan_info->op_band, scan_info->op_bw);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
}
}
@@ -2397,6 +2811,7 @@ void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
{
struct sk_buff *skb;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
if (!skb) {
@@ -2415,7 +2830,62 @@ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
H2C_FUNC_CPU_EXCEPTION, 0, 0,
H2C_FW_CPU_EXCEPTION_LEN);
- if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+#define H2C_PKT_DROP_LEN 24
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev,
+ "failed to alloc skb for packet drop\n");
+ return -ENOMEM;
+ }
+
+ switch (params->sel) {
+ case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
+ case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "H2C of pkt drop might not fully support sel: %d yet\n",
+ params->sel);
+ break;
+ }
+
+ skb_put(skb, H2C_PKT_DROP_LEN);
+ RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
+ RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
+ RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
+ RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
+ RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
+ RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_PKT_DROP, 0, 0,
+ H2C_PKT_DROP_LEN);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
@@ -2424,5 +2894,5 @@ int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
fail:
dev_kfree_skb_any(skb);
- return -EBUSY;
+ return ret;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index e75ad22aa85d..0047d5d0e9b1 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -63,21 +63,32 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
};
-struct rtw89_c2h_phy_cap {
- u32 func:7;
- u32 ack:1;
- u32 len:4;
- u32 seq:4;
- u32 rx_nss:8;
- u32 bw:8;
-
- u32 tx_nss:8;
- u32 prot:8;
- u32 nic:8;
- u32 wl_func:8;
-
- u32 hw_type:8;
-} __packed;
+#define RTW89_GET_C2H_PHYCAP_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(6, 0))
+#define RTW89_GET_C2H_PHYCAP_ACK(info) \
+ u32_get_bits(*((const u32 *)(info)), BIT(7))
+#define RTW89_GET_C2H_PHYCAP_LEN(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(11, 8))
+#define RTW89_GET_C2H_PHYCAP_SEQ(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(15, 12))
+#define RTW89_GET_C2H_PHYCAP_RX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_BW(info) \
+ u32_get_bits(*((const u32 *)(info)), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_TX_NSS(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_PROT(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_NIC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(23, 16))
+#define RTW89_GET_C2H_PHYCAP_WL_FUNC(info) \
+ u32_get_bits(*((const u32 *)(info) + 1), GENMASK(31, 24))
+#define RTW89_GET_C2H_PHYCAP_HW_TYPE(info) \
+ u32_get_bits(*((const u32 *)(info) + 2), GENMASK(7, 0))
+#define RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(15, 8))
+#define RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(info) \
+ u32_get_bits(*((const u32 *)(info) + 3), GENMASK(23, 16))
enum rtw89_fw_c2h_category {
RTW89_C2H_CAT_TEST,
@@ -144,6 +155,13 @@ enum rtw89_chan_type {
RTW89_CHAN_DFS,
};
+enum rtw89_p2pps_action {
+ RTW89_P2P_ACT_INIT = 0,
+ RTW89_P2P_ACT_UPDATE = 1,
+ RTW89_P2P_ACT_REMOVE = 2,
+ RTW89_P2P_ACT_TERMINATE = 3,
+};
+
#define FWDL_SECTION_MAX_NUM 10
#define FWDL_SECTION_CHKSUM_LEN 8
#define FWDL_SECTION_PER_PKT_LEN 2020
@@ -177,6 +195,7 @@ struct rtw89_h2creg_sch_tx_en {
u16 rsvd:15;
} __packed;
+#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
@@ -186,7 +205,10 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
-#define RTW89_MAC_CHINFO_SIZE 20
+#define RTW89_MAC_CHINFO_SIZE 24
+#define RTW89_SCAN_LIST_GUARD 4
+#define RTW89_SCAN_LIST_LIMIT \
+ ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD)
struct rtw89_mac_chinfo {
u8 period;
@@ -346,6 +368,16 @@ static inline void RTW89_SET_FWCMD_RA_CR_TBL_SEL(void *cmd, u32 val)
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10));
}
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(11));
+}
+
+static inline void RTW89_SET_FWCMD_RA_FIX_GILTF(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(14, 12));
+}
+
static inline void RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16));
@@ -1798,6 +1830,36 @@ static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
}
+static inline void RTW89_SET_FWCMD_PKT_DROP_SEL(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 8));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(23, 16));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 24));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_MBSSID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd + 1, val, GENMASK(15, 8));
+}
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2006,69 +2068,104 @@ static inline void RTW89_SET_FWCMD_CXROLE_ROLE_NAN(void *cmd, u16 val)
le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PID(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(3, 1));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_PHY(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(4));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, BIT(5));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BAND(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6));
+ u8p_replace_bits((u8 *)cmd + (6 + (12 + offset) * n), val, GENMASK(7, 6));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0));
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, BIT(0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_BW(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1));
+ u8p_replace_bits((u8 *)cmd + (7 + (12 + offset) * n), val, GENMASK(7, 1));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_ROLE(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0));
+ u8p_replace_bits((u8 *)cmd + (8 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_CH(void *cmd, u8 val, int n, u8 offset)
{
- u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0));
+ u8p_replace_bits((u8 *)cmd + (9 + (12 + offset) * n), val, GENMASK(7, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (10 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (12 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (14 + (12 + offset) * n)), val, GENMASK(15, 0));
}
-static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n)
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(void *cmd, u16 val, int n, u8 offset)
{
- le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0));
+ le16p_replace_bits((__le16 *)((u8 *)cmd + (16 + (12 + offset) * n)), val, GENMASK(15, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(void *cmd, u32 val, int n, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + (20 + (12 + offset) * n)), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_MROLE_NOA(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 4), val, GENMASK(31, 0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_EN(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_CHG(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, GENMASK(3, 2));
+}
+
+static inline void RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(void *cmd, u32 val, u8 offset)
+{
+ le32p_replace_bits((__le32 *)((u8 *)cmd + offset + 8), val, BIT(4));
}
static inline void RTW89_SET_FWCMD_CXCTRL_MANUAL(void *cmd, u32 val)
@@ -2352,6 +2449,86 @@ static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(31, 0));
}
+static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_P2PID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(11, 8));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_NOAID(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(15, 12));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ACT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(19, 16));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_TYPE(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(20));
+}
+
+static inline void RTW89_SET_FWCMD_P2P_ALL_SLEP(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(21));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_START_TIME(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 1) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_INTERVAL(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 2) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_DURATION(void *cmd, __le32 val)
+{
+ *((__le32 *)cmd + 3) = val;
+}
+
+static inline void RTW89_SET_FWCMD_NOA_COUNT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)(cmd) + 4, val, GENMASK(7, 0));
+}
+
+static inline void RTW89_SET_FWCMD_NOA_CTWINDOW(void *cmd, u32 val)
+{
+ u8 ctwnd;
+
+ if (!(val & IEEE80211_P2P_OPPPS_ENABLE_BIT))
+ return;
+ ctwnd = FIELD_GET(IEEE80211_P2P_OPPPS_CTWINDOW_MASK, val);
+ le32p_replace_bits((__le32 *)(cmd) + 4, ctwnd, GENMASK(23, 8));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_BAND(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(0));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EN(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, BIT(1));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_PORT(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(4, 2));
+}
+
+static inline void RTW89_SET_FWCMD_TSF32_TOGL_EARLY(void *cmd, u32 val)
+{
+ le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 16));
+}
+
#define RTW89_C2H_HEADER_LEN 8
#define RTW89_GET_C2H_CATEGORY(c2h) \
@@ -2421,6 +2598,8 @@ static inline void RTW89_SET_FWCMD_SCANOFLD_TSF_SLOW(void *cmd, u32 val)
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
+#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
@@ -2446,7 +2625,14 @@ struct rtw89_mfw_info {
struct rtw89_mfw_hdr {
u8 sig; /* RTW89_MFW_SIG */
u8 fw_nr;
- u8 rsvd[14];
+ u8 rsvd0[2];
+ struct {
+ u8 major;
+ u8 minor;
+ u8 sub;
+ u8 idx;
+ } ver;
+ u8 rsvd1[8];
struct rtw89_mfw_info info[];
} __packed;
@@ -2493,6 +2679,7 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
#define H2C_FUNC_MAC_LPS_PARM 0x0
+#define H2C_FUNC_P2P_ACT 0x1
/* CLASS 3 - FW download */
#define H2C_CL_MAC_FWDL 0x3
@@ -2519,9 +2706,11 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_PACKET_OFLD 0x1
#define H2C_FUNC_MAC_MACID_PAUSE 0x8
#define H2C_FUNC_USR_EDCA 0xF
+#define H2C_FUNC_TSF32_TOGL 0x10
#define H2C_FUNC_OFLD_CFG 0x14
#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
#define H2C_FUNC_SCANOFLD 0x17
+#define H2C_FUNC_PKT_DROP 0x1b
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -2552,7 +2741,7 @@ struct rtw89_fw_h2c_rf_get_mccch {
#define RTW89_FW_RSVD_PLE_SIZE 0x800
-#define RTW89_WCPU_BASE_ADDR 0xA0000000
+#define RTW89_WCPU_BASE_MASK GENMASK(27, 0)
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
#define RTW89_VALID_FW_BACKTRACE_SIZE(_size) \
@@ -2563,6 +2752,9 @@ struct rtw89_fw_h2c_rf_get_mccch {
int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev);
int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
+void rtw89_early_fw_feature_recognize(struct device *device,
+ const struct rtw89_chip_info *chip,
+ u32 *early_feat_map);
int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type);
int rtw89_load_firmware(struct rtw89_dev *rtwdev);
void rtw89_unload_firmware(struct rtw89_dev *rtwdev);
@@ -2577,6 +2769,8 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
@@ -2600,6 +2794,7 @@ int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
@@ -2623,6 +2818,7 @@ void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
@@ -2642,5 +2838,20 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
+ const struct rtw89_pkt_drop_params *params);
+int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_p2p_noa_desc *desc,
+ u8 act, u8 noa_id);
+int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool en);
+
+static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->bacam_v1)
+ rtw89_fw_h2c_init_ba_cam_v1(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 93124b815825..0508dfca8edf 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -826,6 +827,8 @@ static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 dma_ch_mask = chip->dma_ch_mask;
u8 ch;
u32 ret = 0;
@@ -847,6 +850,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_ch_ctrl(rtwdev, ch);
if (ret)
return ret;
@@ -862,6 +867,8 @@ static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
udelay(10);
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ if (dma_ch_mask & BIT(ch))
+ continue;
ret = hfc_upd_ch_info(rtwdev, ch);
if (ret)
return ret;
@@ -1053,18 +1060,29 @@ void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
enum rtw89_rpwm_req_pwr_state state;
unsigned long delay = enter ? 10 : 150;
int ret;
+ int i;
if (enter)
state = rtw89_mac_get_req_pwr_state(rtwdev);
else
state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
- rtw89_mac_send_rpwm(rtwdev, state, false);
- ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
- delay, 15000, false, rtwdev, state);
- if (ret)
- rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
- enter ? "entering" : "leaving");
+ for (i = 0; i < RPWM_TRY_CNT; i++) {
+ rtw89_mac_send_rpwm(rtwdev, state, false);
+ ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret,
+ !ret, delay, 15000, false,
+ rtwdev, state);
+ if (!ret)
+ break;
+
+ if (i == RPWM_TRY_CNT - 1)
+ rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
+ enter ? "entering" : "leaving");
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "%d time firmware failed to ack for %s ps mode\n",
+ i + 1, enter ? "entering" : "leaving");
+ }
}
void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev)
@@ -1081,7 +1099,6 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_pwr_cfg * const *cfg_seq;
int (*cfg_func)(struct rtw89_dev *rtwdev);
- struct rtw89_hal *hal = &rtwdev->hal;
int ret;
u8 val;
@@ -1113,7 +1130,7 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
- hal->current_channel = 0;
+ rtw89_set_entity_state(rtwdev, false);
}
return 0;
@@ -1207,8 +1224,8 @@ static int chip_func_en(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A)
- rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0,
+ if (chip_id == RTL8852A || chip_id == RTL8852B)
+ rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
B_AX_OCP_L1_MASK);
return 0;
@@ -1239,6 +1256,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size0 = {RTW89_WDE_PG_64, 4095, 1,},
/* DLFW */
.wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
+ /* PCIE 64 */
+ .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+ /* DLFW */
+ .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
/* 8852C DLFW */
.wde_size18 = {RTW89_WDE_PG_64, 0, 2048,},
/* 8852C PCIE SCC */
@@ -1247,6 +1268,10 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
+ /* PCIE 64 */
+ .ple_size6 = {RTW89_PLE_PG_128, 496, 16,},
+ /* DLFW */
+ .ple_size8 = {RTW89_PLE_PG_128, 64, 960,},
/* 8852C DLFW */
.ple_size18 = {RTW89_PLE_PG_128, 2544, 16,},
/* 8852C PCIE SCC */
@@ -1255,6 +1280,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt0 = {3792, 196, 0, 107,},
/* DLFW */
.wde_qt4 = {0, 0, 0, 0,},
+ /* PCIE 64 */
+ .wde_qt6 = {448, 48, 0, 16,},
/* 8852C DLFW */
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
@@ -1265,6 +1292,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,},
/* DLFW */
.ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,},
+ /* PCIE 64 */
+ .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,},
/* DLFW 52C */
.ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,},
/* DLFW 52C */
@@ -1273,6 +1302,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,},
/* 8852C PCIE SCC */
.ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,},
+ /* PCIE 64 */
+ .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -1307,6 +1338,17 @@ static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
}
+static u32 dle_expected_used_size(struct rtw89_dev *rtwdev,
+ enum rtw89_qta_mode mode)
+{
+ u32 size = rtwdev->chip->fifo_size;
+
+ if (mode == RTW89_QTA_SCC)
+ size -= rtwdev->chip->dle_scc_rsvd_size;
+
+ return size;
+}
+
static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
{
if (enable)
@@ -1474,7 +1516,8 @@ static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
ret = -EINVAL;
goto error;
@@ -1734,7 +1777,7 @@ static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32(rtwdev, reg, val);
ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR),
- 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR);
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, reg);
if (ret) {
rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
return ret;
@@ -1747,13 +1790,19 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 ret;
u32 reg;
+ u32 val;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
return ret;
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_1, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1_V1);
+ else
+ rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
+ SIFS_MACTXEN_T1);
if (rtwdev->chip->chip_id == RTL8852B) {
reg = rtw89_mac_reg_by_idx(R_AX_SCH_EXT_CTRL, mac_idx);
@@ -1764,7 +1813,16 @@ static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN);
reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US);
+ if (rtwdev->chip->chip_id == RTL8852C) {
+ val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
+ B_AX_TX_PARTIAL_MODE);
+ if (!val)
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ } else {
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK,
+ SCH_PREBKF_24US);
+ }
return 0;
}
@@ -1910,7 +1968,7 @@ static int nav_ctrl_init(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN |
B_AX_WMAC_TF_UP_NAV_EN |
B_AX_WMAC_NAV_UPPER_EN);
- rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_12MS);
+ rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS);
return 0;
}
@@ -1953,6 +2011,8 @@ static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs;
u32 reg, val, sifs;
int ret;
@@ -1983,6 +2043,11 @@ static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN);
+ reg = rtw89_mac_reg_by_idx(rrsr->ref_rate.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data);
+ reg = rtw89_mac_reg_by_idx(rrsr->rsc.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data);
+
return 0;
}
@@ -2061,6 +2126,7 @@ static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val, reg;
int ret;
@@ -2075,6 +2141,11 @@ static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
rtw89_write32(rtwdev, reg, val);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_RRSR1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
+ }
+
return 0;
}
@@ -2134,6 +2205,25 @@ static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return 0;
}
+static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg;
+ int ret;
+
+ if (chip_id != RTL8852A && chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXDMA_CTRL_0, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE);
+
+ return 0;
+}
+
static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
{
int ret;
@@ -2209,6 +2299,12 @@ static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
return ret;
}
+ ret = cmac_dma_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret);
+ return ret;
+ }
+
return ret;
}
@@ -2236,23 +2332,42 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_mac_c2h_info c2h_info = {0};
- struct rtw89_c2h_phy_cap *cap =
- (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0];
+ u8 tx_nss;
+ u8 rx_nss;
+ u8 tx_ant;
+ u8 rx_ant;
u32 ret;
ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
if (ret)
return ret;
- hal->tx_nss = cap->tx_nss ?
- min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss;
- hal->rx_nss = cap->rx_nss ?
- min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss;
+ tx_nss = RTW89_GET_C2H_PHYCAP_TX_NSS(c2h_info.c2hreg);
+ rx_nss = RTW89_GET_C2H_PHYCAP_RX_NSS(c2h_info.c2hreg);
+ tx_ant = RTW89_GET_C2H_PHYCAP_ANT_TX_NUM(c2h_info.c2hreg);
+ rx_ant = RTW89_GET_C2H_PHYCAP_ANT_RX_NUM(c2h_info.c2hreg);
+
+ hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss;
+ hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss;
+
+ if (tx_ant == 1)
+ hal->antenna_tx = RF_B;
+ if (rx_ant == 1)
+ hal->antenna_rx = RF_B;
+
+ if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) {
+ hal->antenna_tx = RF_B;
+ hal->tx_path_diversity = true;
+ }
rtw89_debug(rtwdev, RTW89_DBG_FW,
"phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
- hal->tx_nss, cap->tx_nss, chip->tx_nss,
- hal->rx_nss, cap->rx_nss, chip->rx_nss);
+ hal->tx_nss, tx_nss, chip->tx_nss,
+ hal->rx_nss, rx_nss, chip->rx_nss);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n",
+ tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx);
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
return 0;
}
@@ -2429,8 +2544,7 @@ int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
}
EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1);
-static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
- bool wd)
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd)
{
u32 val, reg;
int ret;
@@ -2450,9 +2564,8 @@ static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val);
}
-static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
- struct rtw89_cpuio_ctrl *ctrl_para,
- bool wd)
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd)
{
u32 val, cmd_type, reg;
int ret;
@@ -2517,7 +2630,8 @@ static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
return -EINVAL;
}
- if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
+ dle_expected_used_size(rtwdev, mode)) {
rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
return -EINVAL;
}
@@ -2766,7 +2880,7 @@ static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev)
{
const struct rtw89_imr_info *imr = rtwdev->chip->imr_info;
- rtw89_write32_set(rtwdev, R_AX_BBRPT_COM_ERR_IMR,
+ rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg,
B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN);
rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg,
B_AX_BBRPT_CHINFO_IMR_CLR);
@@ -3026,6 +3140,8 @@ static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_H2C, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H, 0);
rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
@@ -3103,14 +3219,6 @@ dle:
return ret;
}
-static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_chip_info *chip = rtwdev->chip;
-
- rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
- B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
-}
-
int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
@@ -3124,7 +3232,7 @@ int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_mac_enable_bb_rf);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -3132,6 +3240,8 @@ void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+
+ return 0;
}
EXPORT_SYMBOL(rtw89_mac_disable_bb_rf);
@@ -3147,7 +3257,7 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
return ret;
}
- rtw89_mac_hci_func_en(rtwdev);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
ret = rtw89_mac_dmac_pre_init(rtwdev);
if (ret)
@@ -3524,6 +3634,26 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
+static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u16 val;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
+ B_AX_TBTT_SHIFT_OFST_SIGN;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift,
+ B_AX_TBTT_SHIFT_OFST_MASK, val);
+}
+
int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -3598,6 +3728,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
@@ -3607,6 +3738,50 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
}
+static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *data)
+{
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+ bool *tolerated = data;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ *tolerated = false;
+ rcu_read_unlock();
+}
+
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_hw *hw = rtwdev->hw;
+ bool tolerated = true;
+ u32 reg;
+
+ if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ rtw89_mac_check_he_obss_narrow_bw_ru_iter,
+ &tolerated);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ if (tolerated)
+ rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ else
+ rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+}
+
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
@@ -3655,22 +3830,26 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
u32 len)
{
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
- struct rtw89_hal *hal = &rtwdev->hal;
- u8 reason, status, tx_fail, band;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ struct rtw89_chan new;
+ u8 reason, status, tx_fail, band, actual_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx;
u16 chan;
+ int ret;
tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
+ actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d\n",
- band, chan, reason, status, tx_fail);
+ "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ band, chan, reason, status, tx_fail, actual_period);
switch (reason) {
case RTW89_SCAN_LEAVE_CH_NOTIFY:
@@ -3678,15 +3857,20 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
ieee80211_stop_queues(rtwdev->hw);
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ if (rtwvif && rtwvif->scan_req &&
+ last_chan < rtwvif->scan_req->n_channels) {
+ ret = rtw89_hw_scan_offload(rtwdev, vif, true);
+ if (ret) {
+ rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
+ }
+ } else {
+ rtw89_hw_scan_complete(rtwdev, vif, false);
+ }
break;
case RTW89_SCAN_ENTER_CH_NOTIFY:
- hal->prev_band_type = hal->current_band_type;
- hal->current_band_type = band;
- hal->prev_primary_channel = hal->current_primary_channel;
- hal->current_primary_channel = chan;
- hal->current_channel = chan;
- hal->current_band_width = RTW89_CHANNEL_WIDTH_20;
+ rtw89_chan_create(&new, chan, chan, band, RTW89_CHANNEL_WIDTH_20);
+ rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_store_op_chan(rtwdev, false);
ieee80211_wake_queues(rtwdev->hw);
@@ -3738,6 +3922,12 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
{
}
+static void
+rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3747,6 +3937,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
+ [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt,
};
static
@@ -4628,3 +4819,48 @@ int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val)
return 0;
}
+
+static
+void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+{
+ static const enum rtw89_pkt_drop_sel sels[] = {
+ RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VI_ONCE,
+ RTW89_PKT_DROP_SEL_MACID_VO_ONCE,
+ };
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_pkt_drop_params params = {0};
+ int i;
+
+ params.mac_band = RTW89_MAC_0;
+ params.macid = rtwsta->mac_id;
+ params.port = rtwvif->port;
+ params.mbssid = 0;
+ params.tf_trs = rtwvif->trigger;
+
+ for (i = 0; i < ARRAY_SIZE(sels); i++) {
+ params.sel = sels[i];
+ rtw89_fw_h2c_pkt_drop(rtwdev, &params);
+ }
+}
+
+static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif *target = data;
+
+ if (rtwvif != target)
+ return;
+
+ rtw89_mac_pkt_drop_sta(rtwdev, rtwsta);
+}
+
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_mac_pkt_drop_vif_iter,
+ rtwvif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index f66619354734..6f4ada1869a1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -6,11 +6,13 @@
#define __RTW89_MAC_H__
#include "core.h"
+#include "reg.h"
#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
#define ADDR_CAM_ENT_SIZE 0x40
#define BSSID_CAM_ENT_SIZE 0x08
#define HFC_PAGE_UNIT 64
+#define RPWM_TRY_CNT 3
enum rtw89_mac_hwmod_sel {
RTW89_DMAC_SEL = 0,
@@ -304,6 +306,7 @@ enum rtw89_mac_c2h_ofld_func {
RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
RTW89_MAC_C2H_FUNC_BCN_RESEND,
RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT = 0x6,
RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9,
RTW89_MAC_C2H_FUNC_OFLD_MAX,
};
@@ -688,23 +691,30 @@ struct rtw89_mac_size_set {
const struct rtw89_hfc_prec_cfg hfc_preccfg_pcie;
const struct rtw89_dle_size wde_size0;
const struct rtw89_dle_size wde_size4;
+ const struct rtw89_dle_size wde_size6;
+ const struct rtw89_dle_size wde_size9;
const struct rtw89_dle_size wde_size18;
const struct rtw89_dle_size wde_size19;
const struct rtw89_dle_size ple_size0;
const struct rtw89_dle_size ple_size4;
+ const struct rtw89_dle_size ple_size6;
+ const struct rtw89_dle_size ple_size8;
const struct rtw89_dle_size ple_size18;
const struct rtw89_dle_size ple_size19;
const struct rtw89_wde_quota wde_qt0;
const struct rtw89_wde_quota wde_qt4;
+ const struct rtw89_wde_quota wde_qt6;
const struct rtw89_wde_quota wde_qt17;
const struct rtw89_wde_quota wde_qt18;
const struct rtw89_ple_quota ple_qt4;
const struct rtw89_ple_quota ple_qt5;
const struct rtw89_ple_quota ple_qt13;
+ const struct rtw89_ple_quota ple_qt18;
const struct rtw89_ple_quota ple_qt44;
const struct rtw89_ple_quota ple_qt45;
const struct rtw89_ple_quota ple_qt46;
const struct rtw89_ple_quota ple_qt47;
+ const struct rtw89_ple_quota ple_qt58;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
@@ -798,9 +808,11 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
-void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
{
@@ -809,11 +821,11 @@ static inline int rtw89_chip_enable_bb_rf(struct rtw89_dev *rtwdev)
return chip->ops->enable_bb_rf(rtwdev);
}
-static inline void rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
+static inline int rtw89_chip_disable_bb_rf(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- chip->ops->disable_bb_rf(rtwdev);
+ return chip->ops->disable_bb_rf(rtwdev);
}
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
@@ -911,6 +923,45 @@ static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev,
return 0;
}
+static inline void rtw89_mac_ctrl_hci_dma_tx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_rx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_RXDMA_EN);
+}
+
+static inline void rtw89_mac_ctrl_hci_dma_trx(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (enable)
+ rtw89_write32_set(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+ else
+ rtw89_write32_clr(rtwdev, chip->hci_func_en_addr,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+}
+
int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool resume, u32 tx_time);
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
@@ -944,8 +995,10 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_HIGH_ADDR_MASK GENMASK(2, 0)
XTAL_SI_READ_VAL = 0x7A,
XTAL_SI_WL_RFC_S0 = 0x80,
+#define XTAL_SI_RF00S_EN GENMASK(2, 0)
#define XTAL_SI_RF00 BIT(0)
XTAL_SI_WL_RFC_S1 = 0x81,
+#define XTAL_SI_RF10S_EN GENMASK(2, 0)
#define XTAL_SI_RF10 BIT(0)
XTAL_SI_ANAPAR_WL = 0x90,
#define XTAL_SI_SRAM2RFC BIT(7)
@@ -962,5 +1015,9 @@ enum rtw89_mac_xtal_si_offset {
int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask);
int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val);
+void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd);
+int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index cef27e781ae2..a296bfa8188f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -3,6 +3,7 @@
*/
#include "cam.h"
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -12,6 +13,7 @@
#include "reg.h"
#include "sar.h"
#include "ser.h"
+#include "util.h"
static void rtw89_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
@@ -85,8 +87,11 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
+ &hw->conf.chandef);
rtw89_set_channel(rtwdev);
+ }
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
(hw->conf.flags & IEEE80211_CONF_IDLE))
@@ -104,6 +109,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
int ret = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
mutex_lock(&rtwdev->mutex);
rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
@@ -146,6 +154,9 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
+ vif->addr, vif->type, vif->p2p);
+
cancel_work_sync(&rtwvif->update_beacon_work);
mutex_lock(&rtwdev->mutex);
@@ -157,6 +168,23 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype type, bool p2p)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_debug(rtwdev, RTW89_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+ vif->addr, vif->type, type, vif->p2p, p2p);
+
+ rtw89_ops_remove_interface(hw, vif);
+
+ vif->type = type;
+ vif->p2p = p2p;
+
+ return rtw89_ops_add_interface(hw, vif);
+}
+
static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
@@ -235,11 +263,12 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 aifsn)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 slot_time;
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = rtwdev->hal.current_band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
return aifsn * slot_time + sifs;
}
@@ -350,6 +379,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, vif);
rtw89_store_op_chan(rtwdev, true);
} else {
/* Abort ongoing scan if cancel_scan isn't issued
@@ -378,6 +408,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_MU_GROUPS)
rtw89_mac_bf_set_gid_table(rtwdev, vif, conf);
+ if (changed & BSS_CHANGED_P2P_PS)
+ rtw89_process_p2p_ps(rtwdev, vif);
+
mutex_unlock(&rtwdev->mutex);
}
@@ -605,6 +638,20 @@ static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
+static
+void __rtw89_drop_packets(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif;
+
+ if (vif) {
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ } else {
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_pkt_drop_vif(rtwdev, rtwvif);
+ }
+}
+
static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
@@ -613,7 +660,12 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
rtw89_hci_flush_queues(rtwdev, queues, drop);
- rtw89_mac_flush_txq(rtwdev, queues, drop);
+
+ if (drop && RTW89_CHK_FW_FEATURE(PACKET_DROP, &rtwdev->fw))
+ __rtw89_drop_packets(rtwdev, vif);
+ else
+ rtw89_mac_flush_txq(rtwdev, queues, drop);
+
mutex_unlock(&rtwdev->mutex);
}
@@ -629,7 +681,7 @@ static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
- if (vif != br_data->vif)
+ if (vif != br_data->vif || vif->p2p)
return;
rtwsta->use_cfg_mask = true;
@@ -669,12 +721,13 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
- if (rx_ant != hw->wiphy->available_antennas_rx)
+ if (rx_ant != hw->wiphy->available_antennas_rx && rx_ant != hal->antenna_rx)
return -EINVAL;
mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
+ hal->tx_path_diversity = false;
mutex_unlock(&rtwdev->mutex);
return 0;
@@ -772,6 +825,97 @@ static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
rtw89_phy_ra_updata_sta(rtwdev, sta, changed);
}
+static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_add(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_remove(rtwdev, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_change(rtwdev, ctx, changed);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif, ctx);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_set_tid_config_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct cfg80211_tid_config *tid_config = data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwvif->rtwdev;
+
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+}
+
+static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ if (sta)
+ rtw89_core_set_tid_config(rtwdev, sta, tid_config);
+ else
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_set_tid_config_iter,
+ tid_config);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
const struct ieee80211_ops rtw89_ops = {
.tx = rtw89_ops_tx,
.wake_tx_queue = rtw89_ops_wake_tx_queue,
@@ -779,6 +923,7 @@ const struct ieee80211_ops rtw89_ops = {
.stop = rtw89_ops_stop,
.config = rtw89_ops_config,
.add_interface = rtw89_ops_add_interface,
+ .change_interface = rtw89_ops_change_interface,
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
@@ -800,7 +945,13 @@ const struct ieee80211_ops rtw89_ops = {
.reconfig_complete = rtw89_ops_reconfig_complete,
.hw_scan = rtw89_ops_hw_scan,
.cancel_hw_scan = rtw89_ops_cancel_hw_scan,
+ .add_chanctx = rtw89_ops_add_chanctx,
+ .remove_chanctx = rtw89_ops_remove_chanctx,
+ .change_chanctx = rtw89_ops_change_chanctx,
+ .assign_vif_chanctx = rtw89_ops_assign_vif_chanctx,
+ .unassign_vif_chanctx = rtw89_ops_unassign_vif_chanctx,
.set_sar_specs = rtw89_ops_set_sar_specs,
.sta_rc_update = rtw89_ops_sta_rc_update,
+ .set_tid_config = rtw89_ops_set_tid_config,
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index c68fec9eb5a6..5f8e19639362 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -169,6 +169,23 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
+ const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
+
+ if (enable) {
+ rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ } else {
+ rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
+ if (dma_stop2->addr)
+ rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
+ }
+}
+
static bool
rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
struct sk_buff *new,
@@ -760,7 +777,8 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
enable_intr:
spin_lock_irqsave(&rtwpci->irq_lock, flags);
- rtw89_chip_enable_intr(rtwdev, rtwpci);
+ if (likely(rtwpci->running))
+ rtw89_chip_enable_intr(rtwdev, rtwpci);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
return IRQ_HANDLED;
}
@@ -925,10 +943,12 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
@@ -1073,12 +1093,15 @@ static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
bool drop)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
u8 i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
/* It may be unnecessary to flush FWCMD queue. */
if (i == RTW89_TXCH_CH12)
continue;
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
if (txchs & BIT(i))
__pci_flush_txch(rtwdev, i, drop);
@@ -1357,6 +1380,7 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
@@ -1368,6 +1392,9 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
+
tx_ring = &rtwpci->tx_rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = &bd_ram_table[i];
@@ -1411,12 +1438,15 @@ static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
int txch;
rtw89_pci_reset_trx_rings(rtwdev);
spin_lock_bh(&rtwpci->trx_lock);
for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (info->tx_dma_ch_mask & BIT(txch))
+ continue;
if (txch == RTW89_TXCH_CH12) {
rtw89_pci_release_fwcmd(rtwdev, rtwpci,
skb_queue_len(&rtwpci->h2c_queue), true);
@@ -1604,33 +1634,41 @@ static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
writel(data, rtwpci->mmap + addr);
}
-static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- u32 txhci_en = info->txhci_en_bit;
- u32 rxhci_en = info->rxhci_en_bit;
- if (enable) {
- if (chip_id != RTL8852C)
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
- B_AX_STOP_PCIEIO);
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- txhci_en | rxhci_en);
- if (chip_id == RTL8852C)
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
+ if (enable)
+ rtw89_write32_set(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+ else
+ rtw89_write32_clr(rtwdev, info->init_cfg_reg,
+ info->rxhci_en_bit | info->txhci_en_bit);
+}
+
+static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 reg, mask;
+
+ if (chip_id == RTL8852C) {
+ reg = R_AX_HAXI_INIT_CFG1;
+ mask = B_AX_STOP_AXI_MST;
} else {
- if (chip_id != RTL8852C)
- rtw89_write32_set(rtwdev, info->dma_stop1_reg,
- B_AX_STOP_PCIEIO);
- else
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
- if (chip_id == RTL8852C)
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_STOP_AXI_MST);
+ reg = R_AX_PCIE_DMA_STOP1;
+ mask = B_AX_STOP_PCIEIO;
}
+
+ if (enable)
+ rtw89_write32_clr(rtwdev, reg, mask);
+ else
+ rtw89_write32_set(rtwdev, reg, mask);
+}
+
+static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_pci_ctrl_dma_io(rtwdev, enable);
+ rtw89_pci_ctrl_dma_trx(rtwdev, enable);
}
static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
@@ -1836,6 +1874,18 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate
return 0;
}
+static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852B)
+ return 0;
+
+ ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
+ PCIE_AUTOK_4, PCIE_PHY_GEN1);
+ return ret;
+}
+
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
enum rtw89_pcie_phy phy_rate;
@@ -2049,7 +2099,7 @@ static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
{
- if (rtwdev->chip->chip_id != RTL8852A)
+ if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B)
return;
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
@@ -2234,19 +2284,19 @@ static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
- u32 dma_busy1 = info->dma_busy1_reg;
+ u32 dma_busy1 = info->dma_busy1.addr;
u32 dma_busy2 = info->dma_busy2_reg;
- check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY |
- B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY |
- B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY |
- B_AX_CH9_BUSY | B_AX_CH12_BUSY;
+ check = info->dma_busy1.mask;
ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
10, 100, false, rtwdev, dma_busy1);
if (ret)
return ret;
+ if (!dma_busy2)
+ return 0;
+
check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
@@ -2414,6 +2464,12 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_hci_ldo(rtwdev);
rtw89_pci_dphy_delay(rtwdev);
+ ret = rtw89_pci_autok_x(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
+ return ret;
+ }
+
ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
@@ -2432,7 +2488,7 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
rtw89_pci_set_dbg(rtwdev);
rtw89_pci_set_keep_reg(rtwdev);
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA);
+ rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
/* stop DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, false);
@@ -2455,10 +2511,9 @@ static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
return ret;
}
- /* enable FW CMD queue to download firmware */
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12);
- rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
+ /* disable all channels except to FW CMD channel to download firmware */
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr, B_AX_STOP_CH12);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -2486,15 +2541,15 @@ int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
if (rtw89_pci_ltr_is_err_reg_val(val))
return -EINVAL;
- rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
- rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
+ B_AX_LTR_WD_NOEMP_CHK);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
PCI_LTR_SPC_500US);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
- PCI_LTR_IDLE_TIMER_800US);
+ PCI_LTR_IDLE_TIMER_3_2MS);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
- rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
+ rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
return 0;
@@ -2571,11 +2626,10 @@ static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
}
/* enable DMA for all queues */
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL);
- rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL);
+ rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
/* Release PCI IO */
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg,
+ rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
return 0;
@@ -2696,10 +2750,13 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
@@ -2887,6 +2944,7 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
u32 desc_size;
u32 len;
@@ -2894,6 +2952,8 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
int ret;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ if (info->tx_dma_ch_mask & BIT(i))
+ continue;
tx_ring = &rtwpci->tx_rings[i];
desc_size = sizeof(struct rtw89_pci_tx_bd_32);
len = RTW89_PCI_TXBD_NUM_MAX;
@@ -3219,8 +3279,79 @@ static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
pci_free_irq_vectors(pdev);
}
+static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
+{
+ u16 bin = 0, gray_bit;
+ u32 bit_idx;
+
+ for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
+ gray_bit = (gray_code >> bit_idx) & 0x1;
+ if (bit_num - bit_idx > 1)
+ gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
+ bin |= (gray_bit << bit_idx);
+ }
+
+ return bin;
+}
+
+static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 val16, filter_out_val;
+ u32 val, phy_offset;
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return 0;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
+ if (val == B_AX_ASPM_CTRL_L1)
+ return 0;
+
+ ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
+ if (val == RTW89_PCIE_GEN1_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ } else if (val == RTW89_PCIE_GEN2_SPEED) {
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
+ val16 | B_PCIE_BIT_PINOUT_DIS);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
+ val16 & ~B_PCIE_BIT_RD_SEL);
+
+ val16 = rtw89_read16_mask(rtwdev,
+ phy_offset + RAC_ANA1F * RAC_MULT,
+ FILTER_OUT_EQ_MASK);
+ val16 = gray_code_to_bin(val16, hweight16(val16));
+ filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
+ RAC_MULT);
+ filter_out_val &= ~REG_FILTER_OUT_MASK;
+ filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
+
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
+ filter_out_val);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
+ B_BAC_EQ_SEL);
+ rtw89_write16_set(rtwdev,
+ R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
+ B_PCIE_BIT_PSAVE);
+
+ return 0;
+}
+
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
if (rtw89_pci_disable_clkreq)
@@ -3231,19 +3362,33 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
- if (ret)
- rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
+ B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_AX_CLK_REQ_N);
+ }
}
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u8 value = 0;
int ret;
@@ -3262,12 +3407,23 @@ static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ } else if (chip_id == RTL8852C) {
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_ASPM_CTRL_L1);
+ }
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
@@ -3328,17 +3484,34 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (enable)
- ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- else
- ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
- if (ret)
- rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
- enable ? "set" : "unset", ret);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (enable)
+ ret = rtw89_pci_config_byte_set(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ else
+ ret = rtw89_pci_config_byte_clr(rtwdev,
+ RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
+ enable ? "set" : "unset", ret);
+ } else if (chip_id == RTL8852C) {
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
+ RTW89_PCIE_BIT_ASPM_L11 |
+ RTW89_PCIE_BIT_PCI_L11);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
+ if (enable)
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ else
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_AX_L1SUB_DISABLE);
+ }
}
static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
@@ -3360,26 +3533,6 @@ static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
rtw89_pci_l1ss_set(rtwdev, true);
}
-static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
-{
- const struct rtw89_pci_info *info = rtwdev->pci_info;
- u32 val32;
-
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- } else {
- val32 = B_AX_STOP_PCIEIO;
- rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32);
-
- val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
- }
-}
-
static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
@@ -3399,10 +3552,13 @@ static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
{
- u32 val, dma_rst = 0;
+ u32 val;
int ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
ret = rtw89_pci_poll_io_idle(rtwdev);
if (ret) {
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
@@ -3410,12 +3566,10 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
"[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
R_AX_DBG_ERR_FLAG, val);
if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
- dma_rst |= B_AX_HCI_TXDMA_EN;
+ rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
if (val & B_AX_RX_STUCK)
- dma_rst |= B_AX_HCI_RXDMA_EN;
- val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
- rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
+ rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
ret = rtw89_pci_poll_io_idle(rtwdev);
val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
rtw89_debug(rtwdev, RTW89_DBG_HCI,
@@ -3426,18 +3580,7 @@ static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
return ret;
}
-static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
-{
- u32 val32;
- if (en == MAC_AX_FUNC_EN) {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
- } else {
- val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
- rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
- }
-}
static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
{
@@ -3457,15 +3600,18 @@ static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
{
u32 ret;
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
- rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
+ rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
rtw89_pci_clr_idx_all(rtwdev);
ret = rtw89_pci_rst_bdram(rtwdev);
if (ret)
return ret;
- rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
+ rtw89_pci_ctrl_dma_all(rtwdev, true);
return ret;
}
@@ -3535,14 +3681,20 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ }
return 0;
}
@@ -3563,15 +3715,24 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtw89_dev *rtwdev = hw->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
- B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
- B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ } else {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
+ B_AX_SEL_REQ_ENTR_L1);
+ }
rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -3614,27 +3775,23 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
const struct rtw89_driver_info *info;
const struct rtw89_pci_info *pci_info;
- int driver_data_size;
int ret;
- driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
- hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
- if (!hw) {
+ info = (const struct rtw89_driver_info *)id->driver_data;
+
+ rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
+ sizeof(struct rtw89_pci),
+ info->chip);
+ if (!rtwdev) {
dev_err(&pdev->dev, "failed to allocate hw\n");
return -ENOMEM;
}
- info = (const struct rtw89_driver_info *)id->driver_data;
pci_info = info->bus.pci;
- rtwdev = hw->priv;
- rtwdev->hw = hw;
- rtwdev->dev = &pdev->dev;
- rtwdev->chip = info->chip;
rtwdev->pci_info = info->bus.pci;
rtwdev->hci.ops = &rtw89_pci_ops;
rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
@@ -3667,6 +3824,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_clear_resource;
}
+ rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -3696,7 +3854,7 @@ err_declaim_pci:
err_core_deinit:
rtw89_core_deinit(rtwdev);
err_release_hw:
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
return ret;
}
@@ -3715,7 +3873,7 @@ void rtw89_pci_remove(struct pci_dev *pdev)
rtw89_pci_clear_resource(rtwdev, pdev);
rtw89_pci_declaim_device(rtwdev, pdev);
rtw89_core_deinit(rtwdev);
- ieee80211_free_hw(hw);
+ rtw89_free_ieee80211_hw(rtwdev);
}
EXPORT_SYMBOL(rtw89_pci_remove);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index a118647213e3..179740607778 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -11,11 +11,21 @@
#define MDIO_PG1_G1 1
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
+#define RAC_CTRL_PPR 0x00
+#define RAC_ANA0A 0x0A
+#define B_BAC_EQ_SEL BIT(5)
+#define RAC_ANA0C 0x0C
+#define B_PCIE_BIT_PSAVE BIT(15)
#define RAC_ANA10 0x10
+#define B_PCIE_BIT_PINOUT_DIS BIT(3)
#define RAC_REG_REV2 0x1B
#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
#define PCIE_DPHY_DLY_25US 0x1
#define RAC_ANA19 0x19
+#define B_PCIE_BIT_RD_SEL BIT(2)
+#define RAC_REG_FLD_0 0x1D
+#define BAC_AUTOK_N_MASK GENMASK(3, 2)
+#define PCIE_AUTOK_4 0x3
#define RAC_ANA1F 0x1F
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
@@ -45,9 +55,26 @@
#define B_AX_SEL_REQ_ENTR_L1 BIT(2)
#define B_AX_SEL_REQ_EXIT_L1 BIT(0)
+#define R_AX_PCIE_MIX_CFG_V1 0x300C
+#define B_AX_ASPM_CTRL_L1 BIT(17)
+#define B_AX_ASPM_CTRL_L0 BIT(16)
+#define B_AX_ASPM_CTRL_MASK GENMASK(17, 16)
+#define B_AX_XFER_PENDING_FW BIT(11)
+#define B_AX_XFER_PENDING BIT(10)
+#define B_AX_REQ_EXIT_L1 BIT(9)
+#define B_AX_REQ_ENTR_L1 BIT(8)
+#define B_AX_L1SUB_DISABLE BIT(0)
+
+#define R_AX_L1_CLK_CTRL 0x3010
+#define B_AX_CLK_REQ_N BIT(1)
+
#define R_AX_PCIE_BG_CLR 0x303C
#define B_AX_BG_CLR_ASYNC_M3 BIT(4)
+#define R_AX_PCIE_LAT_CTRL 0x3044
+#define B_AX_CLK_REQ_SEL_OPT BIT(1)
+#define B_AX_CLK_REQ_SEL BIT(0)
+
#define R_AX_PCIE_IO_RCY_M1 0x3100
#define B_AX_PCIE_IO_RCY_P_M1 BIT(5)
#define B_AX_PCIE_IO_RCY_WDT_P_M1 BIT(4)
@@ -88,7 +115,10 @@
#define B_AX_PCIE_WDT_TIMER_S1_MASK GENMASK(31, 0)
#define R_RAC_DIRECT_OFFSET_G1 0x3800
+#define FILTER_OUT_EQ_MASK GENMASK(14, 10)
#define R_RAC_DIRECT_OFFSET_G2 0x3880
+#define REG_FILTER_OUT_MASK GENMASK(6, 2)
+#define RAC_MULT 2
#define RTW89_PCI_WR_RETRY_CNT 20
@@ -383,6 +413,16 @@
#define B_AX_STOP_RPQ BIT(1)
#define B_AX_STOP_RXQ BIT(0)
#define B_AX_TX_STOP1_ALL GENMASK(18, 8)
+#define B_AX_TX_STOP1_MASK (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | \
+ B_AX_STOP_ACH6 | B_AX_STOP_ACH7 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
+#define B_AX_TX_STOP1_MASK_V1 (B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | \
+ B_AX_STOP_ACH2 | B_AX_STOP_ACH3 | \
+ B_AX_STOP_CH8 | B_AX_STOP_CH9 | \
+ B_AX_STOP_CH12)
#define R_AX_PCIE_DMA_STOP2 0x1310
#define B_AX_STOP_CH11 BIT(1)
@@ -431,6 +471,13 @@
#define B_AX_ACH0_BUSY BIT(8)
#define B_AX_RPQ_BUSY BIT(1)
#define B_AX_RXQ_BUSY BIT(0)
+#define DMA_BUSY1_CHECK (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | \
+ B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | \
+ B_AX_CH9_BUSY | B_AX_CH12_BUSY)
+#define DMA_BUSY1_CHECK_V1 (B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | \
+ B_AX_ACH3_BUSY | B_AX_CH8_BUSY | B_AX_CH9_BUSY | \
+ B_AX_CH12_BUSY)
#define R_AX_PCIE_DMA_BUSY2 0x131C
#define B_AX_CH11_BUSY BIT(1)
@@ -505,6 +552,17 @@
#define RTW89_PCI_MULTITAG 8
/* PCIE CFG register */
+#define RTW89_PCIE_L1_STS_V1 0x80
+#define RTW89_BCFG_LINK_SPEED_MASK GENMASK(19, 16)
+#define RTW89_PCIE_GEN1_SPEED 0x01
+#define RTW89_PCIE_GEN2_SPEED 0x02
+#define RTW89_PCIE_PHY_RATE 0x82
+#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+#define RTW89_PCIE_L1SS_STS_V1 0x0168
+#define RTW89_PCIE_BIT_ASPM_L11 BIT(3)
+#define RTW89_PCIE_BIT_ASPM_L12 BIT(2)
+#define RTW89_PCIE_BIT_PCI_L11 BIT(1)
+#define RTW89_PCIE_BIT_PCI_L12 BIT(0)
#define RTW89_PCIE_ASPM_CTRL 0x070F
#define RTW89_L1DLY_MASK GENMASK(5, 3)
#define RTW89_L0DLY_MASK GENMASK(2, 0)
@@ -516,8 +574,7 @@
#define RTW89_PCIE_CLK_CTRL 0x0725
#define RTW89_PCIE_RST_MSTATE 0x0B48
#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0)
-#define RTW89_PCIE_PHY_RATE 0x82
-#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+
#define INTF_INTGRA_MINREF_V1 90
#define INTF_INTGRA_HOSTREF_V1 100
@@ -527,11 +584,6 @@ enum rtw89_pcie_phy {
PCIE_PHY_GEN1_UNDEFINE = 0x7F,
};
-enum mac_ax_func_sw {
- MAC_AX_FUNC_DIS,
- MAC_AX_FUNC_EN,
-};
-
enum rtw89_pcie_l0sdly {
PCIE_L0SDLY_1US = 0,
PCIE_L0SDLY_2US = 1,
@@ -710,14 +762,15 @@ struct rtw89_pci_info {
u32 max_tag_num_mask;
u32 rxbd_rwptr_clr_reg;
u32 txbd_rwptr_clr2_reg;
- u32 dma_stop1_reg;
- u32 dma_stop2_reg;
- u32 dma_busy1_reg;
+ struct rtw89_reg_def dma_stop1;
+ struct rtw89_reg_def dma_stop2;
+ struct rtw89_reg_def dma_busy1;
u32 dma_busy2_reg;
u32 dma_busy3_reg;
u32 rpwm_addr;
u32 cpwm_addr;
+ u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 1532c0a6bbc4..6a6bdc652e09 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -14,23 +14,14 @@
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
- const struct rate_info *txrate = &report->txrate;
u32 bit_rate = report->bit_rate;
- u8 mcs;
/* lower than ofdm, do not aggregate */
if (bit_rate < 550)
return 1;
- /* prevent hardware rate fallback to G mode rate */
- if (txrate->flags & RATE_INFO_FLAGS_MCS)
- mcs = txrate->mcs & 0x07;
- else if (txrate->flags & (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))
- mcs = txrate->mcs;
- else
- mcs = 0;
-
- if (mcs <= 2)
+ /* avoid AMSDU for legacy rate */
+ if (report->might_fallback_legacy)
return 1;
/* lower than 20M vht 2ss mcs8, make it small */
@@ -142,8 +133,8 @@ static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
- struct rtw89_hal *hal = &rtwdev->hal;
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
enum nl80211_band band;
u64 cfg_mask;
@@ -151,7 +142,7 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
if (!rtwsta->use_cfg_mask)
return -1;
- switch (hal->current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
band = NL80211_BAND_2GHZ;
cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
@@ -168,7 +159,7 @@ static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtw
RA_MASK_OFDM_RATES);
break;
default:
- rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
+ rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
return -1;
}
@@ -202,6 +193,40 @@ static const u64
rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
+static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ bool *fix_giltf_en, u8 *fix_giltf)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
+ u8 band = chan->band_type;
+ enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
+ u8 he_gi = mask->control[nl_band].he_gi;
+ u8 he_ltf = mask->control[nl_band].he_ltf;
+
+ if (!rtwsta->use_cfg_mask)
+ return;
+
+ if (he_ltf == 2 && he_gi == 2) {
+ *fix_giltf = RTW89_GILTF_LGI_4XHE32;
+ } else if (he_ltf == 2 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_SGI_4XHE08;
+ } else if (he_ltf == 1 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_2XHE16;
+ } else if (he_ltf == 1 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_2XHE08;
+ } else if (he_ltf == 0 && he_gi == 1) {
+ *fix_giltf = RTW89_GILTF_1XHE16;
+ } else if (he_ltf == 0 && he_gi == 0) {
+ *fix_giltf = RTW89_GILTF_1XHE08;
+ } else {
+ *fix_giltf_en = false;
+ return;
+ }
+
+ *fix_giltf_en = true;
+}
+
static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, bool csi)
{
@@ -209,6 +234,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
struct rtw89_ra_info *ra = &rtwsta->ra;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
u64 ra_mask = 0;
@@ -218,8 +245,10 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
u8 bw_mode = 0;
u8 stbc_en = 0;
u8 ldpc_en = 0;
+ u8 fix_giltf = 0;
u8 i;
bool sgi = false;
+ bool fix_giltf_en = false;
memset(ra, 0, sizeof(*ra));
/* Set the ra mask from sta's capability */
@@ -234,6 +263,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] &
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
ldpc_en = 1;
+ rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, &fix_giltf_en, &fix_giltf);
} else if (sta->deflink.vht_cap.vht_supported) {
u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
@@ -260,13 +290,13 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ldpc_en = 1;
}
- switch (rtwdev->hal.current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ];
- if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] <= 0xf)
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf)
mode |= RTW89_RA_MODE_CCK;
- else
- mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
+ if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0)
+ mode |= RTW89_RA_MODE_OFDM;
break;
case RTW89_BAND_5G:
ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
@@ -329,7 +359,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
ra->dcm_cap = 1;
- if (rate_pattern->enable) {
+ if (rate_pattern->enable && !vif->p2p) {
ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
ra_mask &= rate_pattern->ra_mask;
mode = rate_pattern->ra_mode;
@@ -343,6 +373,8 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
ra->en_sgi = sgi;
ra->ra_mask = ra_mask;
+ ra->fix_giltf_en = fix_giltf_en;
+ ra->fix_giltf = fix_giltf;
if (!csi)
return;
@@ -416,6 +448,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
struct ieee80211_supported_band *sband;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_phy_rate_pattern next_pattern = {0};
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0,
RTW89_HW_RATE_HE_NSS2_MCS0,
RTW89_HW_RATE_HE_NSS3_MCS0,
@@ -428,7 +461,7 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
RTW89_HW_RATE_MCS8,
RTW89_HW_RATE_MCS16,
RTW89_HW_RATE_MCS24};
- u8 band = rtwdev->hal.current_band_type;
+ u8 band = chan->band_type;
enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
u8 tx_nss = rtwdev->hal.tx_nss;
u8 i;
@@ -542,12 +575,12 @@ void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
}
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw)
{
- enum rtw89_bandwidth cbw = param->bandwidth;
- u8 pri_ch = param->primary_chan;
- u8 central_ch = param->center_chan;
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
u8 txsc_idx = 0;
u8 tmp = 0;
@@ -1468,10 +1501,9 @@ EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
(txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \
})
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
s8 *byr;
u8 idx;
@@ -1538,11 +1570,10 @@ static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
}
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 band = rtwdev->hal.current_band_type;
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt = 0, sar;
@@ -1578,11 +1609,12 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
-#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
+#define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \
do { \
u8 __i; \
for (__i = 0; __i < RTW89_BF_NUM; __i++) \
ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
+ band, \
bw, ntx, \
rs, __i, \
(ch)); \
@@ -1590,64 +1622,75 @@ EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
- __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_CCK, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_CCK, ch);
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch);
}
static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
u8 i;
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch);
- __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
for (i = 0; i < RTW89_BF_NUM; i++)
@@ -1656,7 +1699,7 @@ static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit *lmt,
- u8 ntx, u8 ch, u8 pri_ch)
+ u8 band, u8 ntx, u8 ch, u8 pri_ch)
{
s8 val_0p5_n[RTW89_BF_NUM];
s8 val_0p5_p[RTW89_BF_NUM];
@@ -1665,60 +1708,75 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
u8 i;
/* fill ofdm section */
- __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_OFDM, pri_ch);
/* fill mcs 20m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 14);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 10);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch - 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 2);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 6);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 10);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], RTW89_CHANNEL_WIDTH_20,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
+ RTW89_CHANNEL_WIDTH_20,
ntx, RTW89_RS_MCS, ch + 14);
/* fill mcs 40m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 12);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
+ RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 12);
/* fill mcs 80m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch - 8);
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], RTW89_CHANNEL_WIDTH_80,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
+ RTW89_CHANNEL_WIDTH_80,
ntx, RTW89_RS_MCS, ch + 8);
/* fill mcs 160m section */
- __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, RTW89_CHANNEL_WIDTH_160,
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
+ RTW89_CHANNEL_WIDTH_160,
ntx, RTW89_RS_MCS, ch);
/* fill mcs 40m 0p5 section */
- __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 4);
- __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 4);
for (i = 0; i < RTW89_BF_NUM; i++)
lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
/* fill mcs 40m 2p5 section */
- __fill_txpwr_limit_nonbf_bf(val_2p5_n, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch - 8);
- __fill_txpwr_limit_nonbf_bf(val_2p5_p, RTW89_CHANNEL_WIDTH_40,
+ __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
ntx, RTW89_RS_MCS, ch + 8);
for (i = 0; i < RTW89_BF_NUM; i++)
@@ -1726,37 +1784,41 @@ static void rtw89_phy_fill_txpwr_limit_160m(struct rtw89_dev *rtwdev,
}
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx)
{
- u8 pri_ch = rtwdev->hal.current_primary_channel;
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 pri_ch = chan->primary_channel;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt, 0, sizeof(*lmt));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, band, ntx, ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
case RTW89_CHANNEL_WIDTH_160:
- rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, ntx, ch, pri_ch);
+ rtw89_phy_fill_txpwr_limit_160m(rtwdev, lmt, band, ntx, ch,
+ pri_ch);
break;
}
}
EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
-static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- u8 band = rtwdev->hal.current_band_type;
u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
u8 regd = rtw89_regd_get(rtwdev, band);
s8 lmt_ru = 0, sar;
@@ -1794,85 +1856,106 @@ static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
static void
rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch);
}
static void
rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
}
static void
rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
- lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 6);
- lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch - 2);
- lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 2);
- lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU26,
ntx, ch + 6);
- lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 6);
- lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch - 2);
- lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 2);
- lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU52,
ntx, ch + 6);
- lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 6);
- lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch - 2);
- lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 2);
- lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
+ RTW89_RU106,
ntx, ch + 6);
}
static void
rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
struct rtw89_txpwr_limit_ru *lmt_ru,
- u8 ntx, u8 ch)
+ u8 band, u8 ntx, u8 ch)
{
static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
int i;
static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM);
for (i = 0; i < RTW89_RU_SEC_NUM; i++) {
- lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU26,
ntx,
ch + ofst[i]);
- lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU52,
ntx,
ch + ofst[i]);
- lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev,
+ lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
RTW89_RU106,
ntx,
ch + ofst[i]);
@@ -1880,26 +1963,32 @@ rtw89_phy_fill_txpwr_limit_ru_160m(struct rtw89_dev *rtwdev,
}
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx)
{
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
memset(lmt_ru, 0, sizeof(*lmt_ru));
switch (bw) {
case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
case RTW89_CHANNEL_WIDTH_160:
- rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, ntx, ch);
+ rtw89_phy_fill_txpwr_limit_ru_160m(rtwdev, lmt_ru, band, ntx,
+ ch);
break;
}
}
@@ -1920,6 +2009,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
u8 mode, rate, bw, giltf, mac_id;
u16 legacy_bitrate;
bool valid;
+ u8 mcs = 0;
mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data);
if (mac_id != rtwsta->mac_id)
@@ -1936,7 +2026,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
return;
}
- memset(ra_report, 0, sizeof(*ra_report));
+ memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
switch (mode) {
case RTW89_RA_RPT_MODE_LEGACY:
@@ -1952,6 +2042,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.mcs = rate;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs & 0x07;
break;
case RTW89_RA_RPT_MODE_VHT:
ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
@@ -1959,6 +2050,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1;
if (giltf)
ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ mcs = ra_report->txrate.mcs;
break;
case RTW89_RA_RPT_MODE_HE:
ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
@@ -1970,6 +2062,7 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
else
ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ mcs = ra_report->txrate.mcs;
break;
}
@@ -1977,8 +2070,9 @@ static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
- sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
- rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1;
+ ra_report->might_fallback_legacy = mcs <= 2;
+ sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
+ rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1;
}
static void
@@ -3247,10 +3341,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
{
struct rtw89_dig_info *dig = &rtwdev->dig;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
- switch (rtwdev->hal.current_band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
dig->lna_gain = dig->lna_gain_g;
dig->tia_gain = dig->tia_gain_g;
@@ -3410,26 +3505,32 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_LNA_INIT,
- B_PATH0_LNA_INIT_IDX_MSK, lna_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_LNA_INIT,
- B_PATH1_LNA_INIT_IDX_MSK, lna_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
+ dig_regs->p0_lna_init.mask, lna_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
+ dig_regs->p1_lna_init.mask, lna_idx);
}
static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_TIA_INIT,
- B_PATH0_TIA_INIT_IDX_MSK, tia_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_TIA_INIT,
- B_PATH1_TIA_INIT_IDX_MSK, tia_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
+ dig_regs->p0_tia_init.mask, tia_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
+ dig_regs->p1_tia_init.mask, tia_idx);
}
static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
{
- rtw89_phy_write32_mask(rtwdev, R_PATH0_RXB_INIT,
- B_PATH0_RXB_INIT_IDX_MSK, rxb_idx);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_RXB_INIT,
- B_PATH1_RXB_INIT_IDX_MSK, rxb_idx);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
+ dig_regs->p0_rxb_init.mask, rxb_idx);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
+ dig_regs->p1_rxb_init.mask, rxb_idx);
}
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
@@ -3443,21 +3544,19 @@ static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
set.lna_idx, set.tia_idx, set.rxb_idx);
}
-static const struct rtw89_reg_def sdagc_config[4] = {
- {R_PATH0_P20_FOLLOW_BY_PAGCUGC, B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH0_S20_FOLLOW_BY_PAGCUGC, B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_P20_FOLLOW_BY_PAGCUGC, B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
- {R_PATH1_S20_FOLLOW_BY_PAGCUGC, B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
-};
-
static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
bool enable)
{
- u8 i = 0;
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- for (i = 0; i < ARRAY_SIZE(sdagc_config); i++)
- rtw89_phy_write32_mask(rtwdev, sdagc_config[i].addr,
- sdagc_config[i].mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
+ dig_regs->p0_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
+ dig_regs->p0_s20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
+ dig_regs->p1_p20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
+ dig_regs->p1_s20_pagcugc_en.mask, enable);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
@@ -3483,7 +3582,9 @@ static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
bool enable)
{
- enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
+ enum rtw89_bandwidth cbw = chan->band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
u8 ofdm_cca_th;
@@ -3525,10 +3626,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- pd_val);
- rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
- B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val);
+ rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, enable);
if (!rtwdev->hal.support_cckpd)
return;
@@ -3604,6 +3705,62 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
}
+static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool *done = data;
+ u8 rssi_a, rssi_b;
+ u32 candidate;
+
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls)
+ return;
+
+ if (*done)
+ return;
+
+ *done = true;
+
+ rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]);
+ rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]);
+
+ if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_A;
+ else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_B;
+ else
+ return;
+
+ if (hal->antenna_tx == candidate)
+ return;
+
+ hal->antenna_tx = candidate;
+ rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta);
+
+ if (hal->antenna_tx == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
+ } else if (hal->antenna_tx == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
+ }
+}
+
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool done = false;
+
+ if (!hal->tx_path_diversity)
+ return;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_tx_path_div_sta_iter,
+ &done);
+}
+
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
rtw89_phy_ccx_top_setting_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index e20636f54b55..ee3bc5e111e1 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -56,7 +56,7 @@
#define CFO_TRK_STOP_TH (2 << 2)
#define CFO_SW_COMP_FINE_TUNE (2 << 2)
#define CFO_PERIOD_CNT 15
-#define CFO_BOUND 32
+#define CFO_BOUND 64
#define CFO_TP_UPPER 100
#define CFO_TP_LOWER 50
#define CFO_COMP_PERIOD 250
@@ -439,7 +439,7 @@ rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl);
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
@@ -460,15 +460,17 @@ void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
-s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band,
const struct rtw89_rate_desc *rate_desc);
void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit *lmt,
u8 ntx);
void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
struct rtw89_txpwr_limit_ru *lmt_ru,
u8 ntx);
-s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
@@ -489,6 +491,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
+void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index a90b33720588..bf41a1141679 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -59,8 +59,11 @@ static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
rtw89_mac_power_mode_change(rtwdev, enter);
}
-static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
+static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
+ if (rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
+ return;
+
if (!rtwdev->ps_mode)
return;
@@ -111,23 +114,23 @@ void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
__rtw89_leave_ps_mode(rtwdev);
}
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
lockdep_assert_held(&rtwdev->mutex);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, mac_id);
- __rtw89_enter_ps_mode(rtwdev);
+ __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_ps_mode(rtwdev, rtwvif);
}
static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
return;
- __rtw89_leave_ps_mode(rtwdev);
__rtw89_leave_lps(rtwdev, rtwvif->mac_id);
}
@@ -140,6 +143,8 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
+ __rtw89_leave_ps_mode(rtwdev);
+
rtw89_for_each_rtwvif(rtwdev, rtwvif)
rtw89_leave_lps_vif(rtwdev, rtwvif);
}
@@ -178,3 +183,64 @@ void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl)
if (btc_ctrl)
rtw89_leave_lps(rtwdev);
}
+
+static void rtw89_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ enum rtw89_p2pps_action act)
+{
+ if (act == RTW89_P2P_ACT_UPDATE || act == RTW89_P2P_ACT_REMOVE)
+ return;
+
+ if (act == RTW89_P2P_ACT_INIT)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, true);
+ else if (act == RTW89_P2P_ACT_TERMINATE)
+ rtw89_fw_h2c_tsf32_toggle(rtwdev, rtwvif, false);
+}
+
+static void rtw89_p2p_disable_all_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ if (rtwvif->last_noa_nr == 0)
+ return;
+
+ for (noa_id = 0; noa_id < rtwvif->last_noa_nr; noa_id++) {
+ if (noa_id == rtwvif->last_noa_nr - 1)
+ act = RTW89_P2P_ACT_TERMINATE;
+ else
+ act = RTW89_P2P_ACT_REMOVE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, NULL, act, noa_id);
+ }
+}
+
+static void rtw89_p2p_update_noa(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_p2p_noa_desc *desc;
+ enum rtw89_p2pps_action act;
+ u8 noa_id;
+
+ for (noa_id = 0; noa_id < RTW89_P2P_MAX_NOA_NUM; noa_id++) {
+ desc = &vif->bss_conf.p2p_noa_attr.desc[noa_id];
+ if (!desc->count || !desc->duration)
+ break;
+
+ if (noa_id == 0)
+ act = RTW89_P2P_ACT_INIT;
+ else
+ act = RTW89_P2P_ACT_UPDATE;
+ rtw89_tsf32_toggle(rtwdev, rtwvif, act);
+ rtw89_fw_h2c_p2p_act(rtwdev, vif, desc, act, noa_id);
+ }
+ rtwvif->last_noa_nr = noa_id;
+}
+
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ rtw89_p2p_disable_all_noa(rtwdev, vif);
+ rtw89_p2p_update_noa(rtwdev, vif);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
index a184b68994aa..0feae3991623 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.h
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -5,12 +5,13 @@
#ifndef __RTW89_PS_H_
#define __RTW89_PS_H_
-void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id);
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_leave_lps(struct rtw89_dev *rtwdev);
void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
void rtw89_enter_ips(struct rtw89_dev *rtwdev);
void rtw89_leave_ips(struct rtw89_dev *rtwdev);
void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index ebf28719d935..ca20bb024b40 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -51,9 +51,6 @@
#define B_AX_EF_POR BIT(10)
#define B_AX_EF_CELL_SEL_MASK GENMASK(9, 8)
-#define R_AX_SPSLDO_ON_CTRL0 0x0200
-#define B_AX_OCP_L1_MASK GENMASK(15, 13)
-
#define R_AX_EFUSE_CTRL 0x0030
#define B_AX_EF_MODE_SEL_MASK GENMASK(31, 30)
#define B_AX_EF_RDY BIT(29)
@@ -143,6 +140,18 @@
#define R_AX_PMC_DBG_CTRL2 0x00CC
#define B_AX_SYSON_DIS_PMCR_AX_WRMSK BIT(2)
+#define R_AX_PCIE_MIO_INTF 0x00E4
+#define B_AX_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
+#define B_AX_PCIE_MIO_BYIOREG BIT(13)
+#define B_AX_PCIE_MIO_RE BIT(12)
+#define B_AX_PCIE_MIO_WE_MASK GENMASK(11, 8)
+#define MIO_WRITE_BYTE_ALL 0xF
+#define B_AX_PCIE_MIO_ADDR_MASK GENMASK(7, 0)
+#define MIO_ADDR_PAGE_MASK GENMASK(12, 8)
+
+#define R_AX_PCIE_MIO_INTD 0x00E8
+#define B_AX_PCIE_MIO_DATA_MASK GENMASK(31, 0)
+
#define R_AX_SYS_CFG1 0x00F0
#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
@@ -191,6 +200,12 @@
#define R_AX_UDM2 0x01F8
#define R_AX_UDM3 0x01FC
+#define R_AX_SPS_DIG_ON_CTRL0 0x0200
+#define B_AX_VREFPFM_L_MASK GENMASK(25, 22)
+#define B_AX_REG_ZCDC_H_MASK GENMASK(18, 17)
+#define B_AX_OCP_L1_MASK GENMASK(15, 13)
+#define B_AX_VOL_L1_MASK GENMASK(3, 0)
+
#define R_AX_LDO_AON_CTRL0 0x0218
#define B_AX_PD_REGU_L BIT(16)
@@ -383,6 +398,7 @@
#define R_AX_PHYREG_SET 0x8040
#define PHYREG_SET_ALL_CYCLE 0x8
+#define PHYREG_SET_XYN_CYCLE 0xE
#define R_AX_HD0IMR 0x8110
#define B_AX_WDT_PTFM_INT_EN BIT(5)
@@ -467,6 +483,7 @@
#define R_AX_LTR_CTRL_0 0x8410
#define B_AX_LTR_SPACE_IDX_MASK GENMASK(13, 12)
#define B_AX_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_AX_LTR_WD_NOEMP_CHK BIT(6)
#define B_AX_APP_LTR_ACT BIT(5)
#define B_AX_APP_LTR_IDLE BIT(4)
#define B_AX_LTR_EN BIT(1)
@@ -1024,15 +1041,13 @@
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
#define B_AX_WDE_IMR_SET (B_AX_WDE_BUFREQ_QTAID_ERR_INT_EN | \
- B_AX_WDE_BUFREQ_SIZE0_INT_EN | \
- B_AX_WDE_BUFREQ_SIZELMT_INT_EN | \
- B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN_V1 | \
- B_AX_WDE_GETNPG_STRPG_ERR_INT_EN_V1 | \
- B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN_V1 | \
- B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN_V1 | \
+ B_AX_WDE_BUFREQ_UNAVAL_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_INVLD_PKTID_ERR_INT_EN | \
+ B_AX_WDE_BUFRTN_SIZE_ERR_INT_EN | \
+ B_AX_WDE_BUFREQ_SRCHTAILPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_STRPG_ERR_INT_EN | \
+ B_AX_WDE_GETNPG_PGOFST_ERR_INT_EN | \
+ B_AX_WDE_BUFMGN_FRZTO_ERR_INT_EN | \
B_AX_WDE_QUE_CMDTYPE_ERR_INT_EN | \
B_AX_WDE_QUE_DSTQUEID_ERR_INT_EN | \
B_AX_WDE_QUE_SRCQUEID_ERR_INT_EN | \
@@ -1043,10 +1058,7 @@
B_AX_WDE_QUEMGN_FRZTO_ERR_INT_EN | \
B_AX_WDE_DATCHN_ARBT_ERR_INT_EN | \
B_AX_WDE_DATCHN_NULLPG_ERR_INT_EN | \
- B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN | \
- B_AX_WDE_DATCHN_RRDY_ERR_INT_EN | \
- B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN | \
- B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN)
+ B_AX_WDE_DATCHN_FRZTO_ERR_INT_EN)
#define B_AX_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_AX_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -1826,6 +1838,13 @@
#define B_AX_TXSC_40M_MASK GENMASK(7, 4)
#define B_AX_TXSC_20M_MASK GENMASK(3, 0)
+#define R_AX_PTCL_RRSR1 0xC090
+#define R_AX_PTCL_RRSR1_C1 0xE090
+#define B_AX_RRSR_RATE_EN_MASK GENMASK(11, 8)
+#define RRSR_OFDM_CCK_EN 3
+#define B_AX_RSC_MASK GENMASK(7, 6)
+#define B_AX_RRSR_CCK_MASK GENMASK(3, 0)
+
#define R_AX_CMAC_ERR_IMR 0xC160
#define R_AX_CMAC_ERR_IMR_C1 0xE160
#define B_AX_WMAC_TX_ERR_IND_EN BIT(7)
@@ -1882,6 +1901,7 @@
#define B_AX_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
#define B_AX_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
#define SIFS_MACTXEN_T1 0x47
+#define SIFS_MACTXEN_T1_V1 0x41
#define R_AX_CCA_CFG_0 0xC340
#define R_AX_CCA_CFG_0_C1 0xE340
@@ -2098,6 +2118,8 @@
#define R_AX_TBTT_SHIFT_P3 0xC4E8
#define R_AX_TBTT_SHIFT_P4 0xC528
#define B_AX_TBTT_SHIFT_OFST_MASK GENMASK(11, 0)
+#define B_AX_TBTT_SHIFT_OFST_SIGN BIT(11)
+#define B_AX_TBTT_SHIFT_OFST_MAG GENMASK(10, 0)
#define R_AX_BCN_CNT_TMR_P0 0xC434
#define R_AX_BCN_CNT_TMR_P1 0xC474
@@ -2258,6 +2280,7 @@
#define B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN BIT(8)
#define B_AX_FSM1_TIMEOUT_ERR_INT_EN BIT(1)
#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+#define B_AX_PTCL_IMR_CLR_ALL GENMASK(31, 0)
#define B_AX_PTCL_IMR_CLR (B_AX_FSM_TIMEOUT_ERR_INT_EN | \
B_AX_F2PCMDRPT_FULL_DROP_ERR_INT_EN | \
B_AX_TXPRT_FULL_DROP_ERR_INT_EN | \
@@ -2315,6 +2338,28 @@
#define B_AX_DLE_IMR_SET (B_AX_RXSTS_FSM_HANG_ERROR_IMR | \
B_AX_RXDATA_FSM_HANG_ERROR_IMR)
+#define R_AX_RXDMA_CTRL_0 0xC804
+#define R_AX_RXDMA_CTRL_0_C1 0xE804
+#define B_AX_RXDMA_DBGOUT_EN BIT(31)
+#define B_AX_RXDMA_DBG_SEL_MASK GENMASK(30, 29)
+#define B_AX_RXDMA_FIFO_DBG_SEL_MASK GENMASK(28, 25)
+#define B_AX_RXDMA_DEFAULT_PAGE_MASK GENMASK(22, 21)
+#define B_AX_RXDMA_BUFF_REQ_PRI_MASK GENMASK(20, 19)
+#define B_AX_RXDMA_TGT_QUEID_MASK GENMASK(18, 13)
+#define B_AX_RXDMA_TGT_PRID_MASK GENMASK(12, 10)
+#define B_AX_RXDMA_DIS_CSI_RELEASE BIT(9)
+#define B_AX_RXDMA_DIS_RXSTS_WAIT_PTR_CLR BIT(7)
+#define B_AX_RXDMA_DIS_CSI_WAIT_PTR_CLR BIT(6)
+#define B_AX_RXSTS_PTR_FULL_MODE BIT(5)
+#define B_AX_CSI_PTR_FULL_MODE BIT(4)
+#define B_AX_RU3_PTR_FULL_MODE BIT(3)
+#define B_AX_RU2_PTR_FULL_MODE BIT(2)
+#define B_AX_RU1_PTR_FULL_MODE BIT(1)
+#define B_AX_RU0_PTR_FULL_MODE BIT(0)
+#define RX_FULL_MODE (B_AX_RU0_PTR_FULL_MODE | B_AX_RU1_PTR_FULL_MODE | \
+ B_AX_RU2_PTR_FULL_MODE | B_AX_RU3_PTR_FULL_MODE | \
+ B_AX_CSI_PTR_FULL_MODE | B_AX_RXSTS_PTR_FULL_MODE)
+
#define R_AX_RXDMA_PKT_INFO_0 0xC814
#define R_AX_RXDMA_PKT_INFO_1 0xC818
#define R_AX_RXDMA_PKT_INFO_2 0xC81C
@@ -2553,6 +2598,20 @@
#define WMAC_SPEC_SIFS_OFDM_52C 0x11
#define WMAC_SPEC_SIFS_CCK 0xA
+#define R_AX_TRXPTCL_RRSR_CTL_0 0xCC08
+#define R_AX_TRXPTCL_RRSR_CTL_0_C1 0xEC08
+#define B_AX_RESP_TX_MACID_CCA_TH_EN BIT(31)
+#define B_AX_RESP_TX_PWRMODE_MASK GENMASK(30, 28)
+#define B_AX_FTM_RRSR_RATE_EN_MASK GENMASK(27, 24)
+#define B_AX_NESS_MASK GENMASK(23, 22)
+#define B_AX_WMAC_RESP_DOPPLEB_AX_EN BIT(21)
+#define B_AX_WMAC_RESP_DCM_EN BIT(20)
+#define B_AX_WMAC_RRSB_AX_CCK_MASK GENMASK(19, 16)
+#define B_AX_WMAC_RESP_RATE_EN_MASK GENMASK(15, 12)
+#define B_AX_WMAC_RESP_RSC_MASK GENMASK(11, 10)
+#define B_AX_WMAC_RESP_REF_RATE_SEL BIT(9)
+#define B_AX_WMAC_RESP_REF_RATE_MASK GENMASK(8, 0)
+
#define R_AX_MAC_LOOPBACK 0xCC20
#define R_AX_MAC_LOOPBACK_C1 0xEC20
#define B_AX_MACLBK_EN BIT(0)
@@ -2565,6 +2624,7 @@
#define B_AX_WMAC_TF_UP_NAV_EN BIT(16)
#define B_AX_WMAC_NAV_UPPER_MASK GENMASK(15, 8)
#define NAV_12MS 0xBC
+#define NAV_25MS 0xC4
#define B_AX_WMAC_RTS_RST_DUR_MASK GENMASK(7, 0)
#define R_AX_RXTRIG_TEST_USER_2 0xCCB0
@@ -2968,18 +3028,18 @@
#define R_AX_PATH_COM0 0xD800
#define AX_PATH_COM0_DFVAL 0x00000000
-#define AX_PATH_COM0_PATHA 0x08888880
-#define AX_PATH_COM0_PATHB 0x11111100
+#define AX_PATH_COM0_PATHA 0x08889880
+#define AX_PATH_COM0_PATHB 0x11111900
#define AX_PATH_COM0_PATHAB 0x19999980
#define R_AX_PATH_COM1 0xD804
#define AX_PATH_COM1_DFVAL 0x00000000
-#define AX_PATH_COM1_PATHA 0x11111111
-#define AX_PATH_COM1_PATHB 0x22222222
+#define AX_PATH_COM1_PATHA 0x13111111
+#define AX_PATH_COM1_PATHB 0x23222222
#define AX_PATH_COM1_PATHAB 0x33333333
#define R_AX_PATH_COM2 0xD808
#define AX_PATH_COM2_DFVAL 0x00000000
-#define AX_PATH_COM2_PATHA 0x01209111
-#define AX_PATH_COM2_PATHB 0x01209222
+#define AX_PATH_COM2_PATHA 0x01209313
+#define AX_PATH_COM2_PATHB 0x01209323
#define AX_PATH_COM2_PATHAB 0x01209333
#define R_AX_PATH_COM3 0xD80C
#define AX_PATH_COM3_DFVAL 0x49249249
@@ -3125,6 +3185,18 @@
#define B_AX_GNT_WL_BB_VAL BIT(1)
#define B_AX_GNT_WL_BB_SWCTRL BIT(0)
+#define R_AX_GNT_VAL 0x0054
+#define B_AX_GNT_BT_RFC_S1_STA BIT(5)
+#define B_AX_GNT_WL_RFC_S1_STA BIT(4)
+#define B_AX_GNT_BT_RFC_S0_STA BIT(3)
+#define B_AX_GNT_WL_RFC_S0_STA BIT(2)
+
+#define R_AX_GNT_VAL_V1 0xDA4C
+#define B_AX_GNT_BT_RFC_S1 BIT(4)
+#define B_AX_GNT_BT_RFC_S0 BIT(3)
+#define B_AX_GNT_WL_RFC_S1 BIT(2)
+#define B_AX_GNT_WL_RFC_S0 BIT(1)
+
#define R_AX_TDMA_MODE 0xDA4C
#define R_AX_TDMA_MODE_C1 0xFA4C
#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
@@ -3356,6 +3428,7 @@
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_DONE BIT(5)
#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
#define RR_DCK2 0x94
@@ -3431,8 +3504,9 @@
#define R_MAC_PIN_SEL 0x0734
#define B_CH_IDX_SEG0 GENMASK(23, 16)
#define R_PLCP_HISTOGRAM 0x0738
-#define B_STS_DIS_TRIG_BY_BRK BIT(2)
+#define B_STS_PARSING_TIME GENMASK(19, 16)
#define B_STS_DIS_TRIG_BY_FAIL BIT(3)
+#define B_STS_DIS_TRIG_BY_BRK BIT(2)
#define R_PHY_STS_BITMAP_ADDR_START R_PHY_STS_BITMAP_SEARCH_FAIL
#define B_PHY_STS_BITMAP_ADDR_MASK GENMASK(6, 2)
#define R_PHY_STS_BITMAP_SEARCH_FAIL 0x073C
@@ -3542,6 +3616,9 @@
#define B_P0_RXCK_VAL GENMASK(18, 16)
#define B_P0_TXCK_ON BIT(15)
#define B_P0_TXCK_VAL GENMASK(14, 12)
+#define R_P0_RFMODE 0x12AC
+#define B_P0_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P0_RFMODE_MUX GENMASK(11, 4)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
#define R_S0_RXDC 0x12D4
@@ -3648,6 +3725,9 @@
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
+#define R_P1_RFMODE 0x32AC
+#define B_P1_RFMODE_ORI_TXRX_FTM_TX GENMASK(31, 4)
+#define B_P1_RFMODE_MUX GENMASK(11, 4)
#define R_P1_DBGMOD 0x32B8
#define B_P1_DBGMOD_ON BIT(30)
#define R_S1_RXDC 0x32D4
@@ -3663,6 +3743,8 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_MUIC 0x40F8
+#define B_MUIC_EN BIT(0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(1, 0)
#define R_SEG0CSI 0x42AC
@@ -3745,15 +3827,22 @@
#define R_PATH0_RXB_INIT 0x4658
#define B_PATH0_RXB_INIT_IDX_MSK GENMASK(9, 5)
#define R_PATH0_LNA_INIT 0x4668
+#define R_PATH0_LNA_INIT_V1 0x472C
#define B_PATH0_LNA_INIT_IDX_MSK GENMASK(26, 24)
#define R_PATH0_BTG 0x466C
#define B_PATH0_BTG_SHEN GENMASK(18, 17)
#define R_PATH0_TIA_INIT 0x4674
#define B_PATH0_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH0_RXB_INIT_V1 0x46A8
+#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
#define R_PATH0_G_LNA6_OP1DB_V1 0x4688
#define B_PATH0_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH0_G_TIA0_LNA6_OP1DB_V1 0x4694
@@ -3780,7 +3869,10 @@
#define R_P0_AGC_CTL 0x4730
#define B_P0_AGC_EN BIT(31)
#define R_PATH1_LNA_INIT 0x473C
+#define R_PATH1_LNA_INIT_V1 0x4A80
#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24)
+#define R_PATH0_TIA_INIT_V1 0x473C
+#define B_PATH0_TIA_INIT_IDX_MSK_V1 BIT(9)
#define R_PATH1_TIA_INIT 0x4748
#define B_PATH1_TIA_INIT_IDX_MSK BIT(17)
#define R_PATH1_BTG 0x4740
@@ -3790,8 +3882,12 @@
#define R_PATH1_G_LNA6_OP1DB_V1 0x476C
#define B_PATH1_G_LNA6_OP1DB_V1 GENMASK(31, 24)
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -3807,6 +3903,8 @@
#define B_P1_NBIIDX_VAL GENMASK(11, 0)
#define B_P1_NBIIDX_NOTCH_EN BIT(12)
#define R_SEG0R_PD 0x481C
+#define R_SEG0R_PD_V1 0x4860
+#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
#define R_2P4G_BAND 0x4970
@@ -3830,8 +3928,12 @@
#define B_BK_FC0_INV_MSK_V1 GENMASK(18, 0)
#define R_CCK_FC0_INV_V1 0x4A20
#define B_CCK_FC0_INV_MSK_V1 GENMASK(18, 0)
+#define R_PATH1_RXB_INIT_V1 0x4A5C
+#define B_PATH1_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
#define R_P1_AGC_CTL 0x4A9C
#define B_P1_AGC_EN BIT(31)
+#define R_PATH1_TIA_INIT_V1 0x4AA8
+#define B_PATH1_TIA_INIT_IDX_MSK_V1 BIT(9)
#define R_PATH0_RXBB_V1 0x4AD4
#define B_PATH0_RXBB_MSK_V1 GENMASK(31, 0)
#define R_PATH1_RXBB_V1 0x4AE0
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 20c7afd3e70f..6e5a740b128f 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -346,7 +346,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
rtw89_debug_regd(rtwdev, rtwdev->regd, "get from initiator %d, alpha2",
request->initiator);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 81bd0c4fe21b..784147680353 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -431,6 +431,7 @@ static const struct rtw89_imr_info rtw8852a_imr_info = {
.cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET,
.other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR,
.other_disp_imr_set = 0,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR_ISR,
.bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR_ISR,
.bbrpt_err_imr_set = 0,
.bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR_ISR,
@@ -453,6 +454,31 @@ static const struct rtw89_imr_info rtw8852a_imr_info = {
.tmac_imr_set = B_AX_TMAC_IMR_SET,
};
+static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT, B_PATH0_TIA_INIT_IDX_MSK},
+ .p1_tia_init = {R_PATH1_TIA_INIT, B_PATH1_TIA_INIT_IDX_MSK},
+ .p0_rxb_init = {R_PATH0_RXB_INIT, B_PATH0_RXB_INIT_IDX_MSK},
+ .p1_rxb_init = {R_PATH1_RXB_INIT, B_PATH1_RXB_INIT_IDX_MSK},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
struct rtw8852a_efuse *map)
{
@@ -660,7 +686,7 @@ static void rtw8852a_power_trim(struct rtw89_dev *rtwdev)
}
static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 mac_idx)
{
u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
@@ -669,20 +695,20 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
u8 txsc20 = 0, txsc40 = 0;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_40);
fallthrough;
case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_20);
break;
default:
break;
}
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_80:
rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
@@ -699,7 +725,7 @@ static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
break;
}
- if (param->center_chan > 14)
+ if (chan->channel > 14)
rtw89_write8_set(rtwdev, chk_rate,
B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
else
@@ -1102,11 +1128,12 @@ static void rtw8852a_bb_sethw(struct rtw89_dev *rtwdev)
if (rtwdev->hal.cv <= CHIP_CCV) {
rtw89_phy_write32_set(rtwdev, R_RSTB_WATCH_DOG, B_P0_RSTB_WATCH_DOG);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_1, 0x864FA000);
- rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x3F);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x43F);
rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_3, 0x7FFF);
rtw89_phy_write32_set(rtwdev, R_SPOOF_ASYNC_RST, B_SPOOF_ASYNC_RST);
rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_PLCP_HISTOGRAM, B_STS_PARSING_TIME);
}
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK0, B_CFO_TRK_MSK, 0x1f);
rtw89_phy_write32_mask(rtwdev, R_CFO_TRK1, B_CFO_TRK_MSK, 0x0c);
@@ -1130,35 +1157,38 @@ static void rtw8852a_bbrst_for_rfk(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->center_chan <= 14;
- u8 pri_ch_idx = param->pri_ch_idx;
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
if (cck_en)
- rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
- param->primary_chan, param->bandwidth);
+ rtw8852a_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
- rtw8852a_ctrl_ch(rtwdev, param->center_chan, phy_idx);
- rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ rtw8852a_ctrl_ch(rtwdev, chan->channel, phy_idx);
+ rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
if (cck_en) {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
} else {
rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
rtw8852a_bbrst_for_rfk(rtwdev, phy_idx);
}
- rtw8852a_spur_elimination(rtwdev, param->center_chan);
+ rtw8852a_spur_elimination(rtwdev, chan->channel);
rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0,
- param->primary_chan);
+ chan->primary_channel);
rtw8852a_bb_reset_all(rtwdev, phy_idx);
}
static void rtw8852a_set_channel(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *params)
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_channel_mac(rtwdev, params, RTW89_MAC_0);
- rtw8852a_set_channel_bb(rtwdev, params, RTW89_PHY_0);
+ rtw8852a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852a_set_channel_bb(rtwdev, chan, phy_idx);
}
static void rtw8852a_dfs_en(struct rtw89_dev *rtwdev, bool en)
@@ -1209,25 +1239,27 @@ static void rtw8852a_adc_en(struct rtw89_dev *rtwdev, bool en)
}
static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- u8 phy_idx = RTW89_PHY_0;
-
if (enter) {
- rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852a_dfs_en(rtwdev, false);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
rtw8852a_adc_en(rtwdev, false);
fsleep(40);
rtw8852a_bb_reset_en(rtwdev, phy_idx, false);
} else {
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852a_adc_en(rtwdev, true);
rtw8852a_dfs_en(rtwdev, true);
- rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
}
@@ -1277,9 +1309,10 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
rtw8852a_dpk(rtwdev, phy_idx);
}
-static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev)
+static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_tssi_scan(rtwdev, RTW89_PHY_0);
+ rtw8852a_tssi_scan(rtwdev, phy_idx);
}
static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -1378,9 +1411,11 @@ static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 ch = rtwdev->hal.current_channel;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -1406,7 +1441,8 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
cur.idx = j;
shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
val |= (tmp << shf);
if ((j + 1) % 4)
@@ -1421,8 +1457,10 @@ static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 band = chan->band_type;
struct rtw89_rate_desc desc = {
.nss = RTW89_NSS_1,
.rs = RTW89_RS_OFFSET,
@@ -1433,7 +1471,7 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
val |= ((v & 0xf) << (4 * desc.idx));
}
@@ -1442,29 +1480,31 @@ static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit lmt[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
ptr = (s8 *)&lmt[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1473,30 +1513,32 @@ static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
}
static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852A; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_RU_LMT + j +
__MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
ptr = (s8 *)&lmt_ru[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -1505,17 +1547,20 @@ static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
}
-static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852a_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
-static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852a_set_txpwr_ref(rtwdev, RTW89_PHY_0);
- rtw8852a_set_txpwr_offset(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_ref(rtwdev, phy_idx);
}
static int
@@ -1592,10 +1637,12 @@ void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
enum rtw89_phy_idx idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
if (!tx_info->en_pmac_tx) {
rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
- if (rtwdev->hal.current_band_type == RTW89_BAND_2G)
+ if (chan->band_type == RTW89_BAND_2G)
rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
return;
}
@@ -1797,6 +1844,9 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
} else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
@@ -2010,6 +2060,51 @@ void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852a_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852a_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852a_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852a_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
static void rtw8852a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2030,12 +2125,12 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
u8 path;
- s8 *rx_power = phy_ppdu->rssi;
+ u8 *rx_power = phy_ppdu->rssi;
- status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
- status->chain_signal[path] = rx_power[path];
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
}
if (phy_ppdu->valid)
rtw8852a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
@@ -2086,6 +2181,8 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852a_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy,
};
const struct rtw89_chip_info rtw8852a_chip_info = {
@@ -2093,6 +2190,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.ops = &rtw8852a_chip_ops,
.fw_name = "rtw89/rtw8852a_fw.bin",
.fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x6f800,
@@ -2114,7 +2212,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bw160 = false,
@@ -2125,6 +2225,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.acam_num = 128,
.bcam_num = 10,
.scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_v1 = false,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
@@ -2133,11 +2236,26 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dav_log_efuse_size = 0,
.phycap_addr = 0x580,
.phycap_size = 128,
- .para_ver = 0x05050864,
- .wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
+ .para_ver = 0x0,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
+ .btc_fwinfo_buf = 1024,
+
+ .fcxbtcrpt_ver = 1,
+ .fcxtdma_ver = 1,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 2,
+ .fcxstep_ver = 2,
+ .fcxnullsta_ver = 1,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852a_bt_rssi_thres,
@@ -2163,7 +2281,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.page_regs = &rtw8852a_page_regs,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 3,
- .imr_info = &rtw8852a_imr_info
+ .imr_info = &rtw8852a_imr_info,
+ .rrsr_cfgs = &rtw8852a_rrsr_cfgs,
+ .dma_ch_mask = 0,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 3d60feb78312..582ff0d3a9ea 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -1359,7 +1359,7 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, u8 path)
{
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u32 reg_rf18 = 0x0, reg_35c = 0x0;
u8 idx = 0;
u8 get_empty_table = false;
@@ -1380,9 +1380,9 @@ static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
- iqk_info->iqk_band[path] = hal->current_band_type;
- iqk_info->iqk_bw[path] = hal->current_band_width;
- iqk_info->iqk_ch[path] = hal->current_channel;
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
@@ -1879,13 +1879,12 @@ static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- struct rtw89_hal *hal = &rtwdev->hal;
-
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 kidx = dpk->cur_idx[path];
- dpk->bp[path][kidx].band = hal->current_band_type;
- dpk->bp[path][kidx].ch = hal->current_channel;
- dpk->bp[path][kidx].bw = hal->current_band_width;
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
@@ -2358,6 +2357,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
#define DPK_RXBB_UPPER 0x1f
#define DPK_RXBB_LOWER 0
#define DPK_GL_CRIT 7
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
u8 agc_cnt = 0;
bool limited_rxbb = false;
@@ -2404,7 +2404,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
"[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
tmp_rxbb);
if (offset != 0 || agc_cnt == 0) {
- if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
+ if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
_dpk_bypass_rxcfir(rtwdev, path, true);
else
_dpk_lbk_rxiqk(rtwdev, phy, path);
@@ -2548,11 +2548,12 @@ static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
bool is_reload = false;
u8 idx, cur_band, cur_ch;
- cur_band = rtwdev->hal.current_band_type;
- cur_ch = rtwdev->hal.current_channel;
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
if (cur_band != dpk->bp[path][idx].band ||
@@ -2681,12 +2682,13 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
return true;
@@ -2842,7 +2844,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (band == RTW89_BAND_2G)
rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
@@ -2852,7 +2855,8 @@ static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2863,7 +2867,8 @@ static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
&rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
@@ -2905,8 +2910,9 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
const s8 *thm_down_a = NULL;
const s8 *thm_up_b = NULL;
@@ -3099,7 +3105,8 @@ static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 subband = chan->subband_type;
switch (subband) {
default:
@@ -3275,7 +3282,8 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st = 0;
s8 de_2nd = 0;
@@ -3312,7 +3320,8 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
s8 tde_2nd = 0;
@@ -3350,6 +3359,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
{
#define __DE_MASK 0x003ff000
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
@@ -3358,7 +3368,7 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
- u8 ch = rtwdev->hal.current_channel;
+ u8 ch = chan->channel;
u8 i, gidx;
s8 ofdm_de;
s8 trim_de;
@@ -3478,9 +3488,11 @@ static void _tssi_track(struct rtw89_dev *rtwdev)
static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
s8 power;
s32 xdbm;
@@ -3491,7 +3503,7 @@ static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, bw, RTW89_1TX,
RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
xdbm = power * 100 / 4;
@@ -3523,9 +3535,11 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
const struct rtw89_chip_info *mac_reg = rtwdev->chip;
- u8 ch = rtwdev->hal.current_channel, ch_tmp;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel, ch_tmp;
+ u8 bw = chan->band_width;
+ u8 band = chan->band_type;
u32 tx_en;
u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
s8 power;
@@ -3539,8 +3553,9 @@ static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
else
ch_tmp = ch;
- power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
- RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
+ power = rtw89_phy_read_txpwr_limit(rtwdev, band, RTW89_CHANNEL_WIDTH_20,
+ RTW89_1TX, RTW89_RS_OFDM,
+ RTW89_NONBF, ch_tmp);
xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 190c4aefb02e..0cd8c0c44d19 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -33,14 +33,15 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2,
- .dma_stop1_reg = R_AX_PCIE_DMA_STOP1,
- .dma_stop2_reg = R_AX_PCIE_DMA_STOP2,
- .dma_busy1_reg = R_AX_PCIE_DMA_BUSY1,
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK},
.dma_busy2_reg = R_AX_PCIE_DMA_BUSY2,
.dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
+ .tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
new file mode 100644
index 000000000000..9f9908418ee4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2022 Realtek Corporation
+ */
+
+#include "core.h"
+#include "mac.h"
+#include "reg.h"
+
+static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
+ &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ &rtw89_mac_size.ple_qt58},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
+
+ return 0;
+}
+
+static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ u8 wl_rfc_s0;
+ u8 wl_rfc_s1;
+ int ret;
+
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
+ if (ret)
+ return ret;
+ wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
+ if (ret)
+ return ret;
+ wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
+ FULL_BIT_MASK);
+ return ret;
+}
+
+static const struct rtw89_chip_ops rtw8852b_chip_ops = {
+ .enable_bb_rf = rtw8852b_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+};
+
+const struct rtw89_chip_info rtw8852b_chip_info = {
+ .chip_id = RTL8852B,
+ .fifo_size = 196608,
+ .dle_scc_rsvd_size = 98304,
+ .dle_mem = rtw8852b_dle_mem_pcie,
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+};
+EXPORT_SYMBOL(rtw8852b_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852b_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
new file mode 100644
index 000000000000..7bf95c38d3eb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2022 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+
+static const struct rtw89_pci_info rtw8852b_pci_info = {
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+};
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852BE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index b697aef2faf2..67653b3e1a35 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -109,6 +109,7 @@ static const struct rtw89_imr_info rtw8852c_imr_info = {
.cpu_disp_imr_set = B_AX_CPU_DISP_IMR_SET_V1,
.other_disp_imr_clr = B_AX_OTHER_DISP_IMR_CLR_V1,
.other_disp_imr_set = B_AX_OTHER_DISP_IMR_SET_V1,
+ .bbrpt_com_err_imr_reg = R_AX_BBRPT_COM_ERR_IMR,
.bbrpt_chinfo_err_imr_reg = R_AX_BBRPT_CHINFO_ERR_IMR,
.bbrpt_err_imr_set = R_AX_BBRPT_CHINFO_IMR_SET_V1,
.bbrpt_dfs_err_imr_reg = R_AX_BBRPT_DFS_ERR_IMR,
@@ -131,7 +132,34 @@ static const struct rtw89_imr_info rtw8852c_imr_info = {
.tmac_imr_set = B_AX_TMAC_IMR_SET_V1,
};
+static const struct rtw89_rrsr_cfgs rtw8852c_rrsr_cfgs = {
+ .ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_AX_PTCL_RRSR1, B_AX_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8852c_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
static void rtw8852c_ctrl_btg(struct rtw89_dev *rtwdev, bool btg);
+static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path,
+ enum rtw89_mac_idx mac_idx);
static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
{
@@ -567,7 +595,7 @@ static void rtw8852c_power_trim(struct rtw89_dev *rtwdev)
}
static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 mac_idx)
{
u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
@@ -578,24 +606,24 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
u8 rf_mod_val = 0, chk_rate_mask = 0;
u32 txsc;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_160:
- txsc80 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc80 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_80);
fallthrough;
case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_40);
fallthrough;
case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan,
RTW89_CHANNEL_WIDTH_20);
break;
default:
break;
}
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_160:
rf_mod_val = AX_WMAC_RFMOD_160M;
txsc = FIELD_PREP(B_AX_TXSC_20M_MASK, txsc20) |
@@ -620,7 +648,7 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, rf_mod_val);
rtw89_write32(rtwdev, sub_carr, txsc);
- switch (param->band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
chk_rate_mask = B_AX_BAND_MODE;
break;
@@ -629,7 +657,7 @@ static void rtw8852c_set_channel_mac(struct rtw89_dev *rtwdev,
chk_rate_mask = B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6;
break;
default:
- rtw89_warn(rtwdev, "Invalid band_type:%d\n", param->band_type);
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
return;
}
rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE | B_AX_CHECK_CCK_EN |
@@ -920,7 +948,7 @@ static void rtw8852c_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
}
static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
- const struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx,
enum rtw89_rf_path path)
{
@@ -939,7 +967,7 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
if (rtwdev->dbcc_en && path == RF_PATH_B)
phy_idx = RTW89_PHY_1;
- if (param->band_type == RTW89_BAND_2G) {
+ if (chan->band_type == RTW89_BAND_2G) {
offset_q0 = efuse_gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK];
offset_base_q4 = efuse_gain->offset_base[phy_idx];
@@ -948,7 +976,7 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
rtw89_phy_write32_mask(rtwdev, R_RPL_OFST, B_RPL_OFST_MASK, tmp & 0x7f);
}
- switch (param->subband_type) {
+ switch (chan->subband_type) {
default:
case RTW89_CH_2G:
gain_band = RTW89_GAIN_OFFSET_2G_OFDM;
@@ -977,14 +1005,14 @@ static void rtw8852c_set_gain_offset(struct rtw89_dev *rtwdev,
}
static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
- const struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
u8 sco;
- u16 central_freq = param->center_freq;
- u8 central_ch = param->center_chan;
- u8 band = param->band_type;
- u8 subband = param->subband_type;
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ u8 subband = chan->subband_type;
bool is_2g = band == RTW89_BAND_2G;
u8 chan_idx;
@@ -996,7 +1024,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
if (phy_idx == RTW89_PHY_0) {
/* Path A */
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_A);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_A);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_A);
if (is_2g)
rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
@@ -1009,7 +1037,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
/* Path B */
if (!rtwdev->dbcc_en) {
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
if (is_2g)
rtw89_phy_write32_idx(rtwdev,
@@ -1038,7 +1066,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
} else {
/* Path B */
rtw8852c_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852c_set_gain_offset(rtwdev, param, phy_idx, RF_PATH_B);
+ rtw8852c_set_gain_offset(rtwdev, chan, phy_idx, RF_PATH_B);
if (is_2g)
rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
@@ -1095,7 +1123,7 @@ static void rtw8852c_ctrl_ch(struct rtw89_dev *rtwdev,
}
}
- chan_idx = rtw8852c_encode_chan_idx(rtwdev, param->primary_chan, band);
+ chan_idx = rtw8852c_encode_chan_idx(rtwdev, chan->primary_channel, band);
rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
}
@@ -1246,12 +1274,12 @@ rtw8852c_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
}
static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param)
+ const struct rtw89_chan *chan)
{
- u8 center_chan = param->center_chan;
- u8 bw = param->bandwidth;
+ u8 center_chan = chan->channel;
+ u8 bw = chan->band_width;
- switch (param->band_type) {
+ switch (chan->band_type) {
case RTW89_BAND_2G:
if (bw == RTW89_CHANNEL_WIDTH_20) {
if (center_chan >= 5 && center_chan <= 8)
@@ -1285,19 +1313,19 @@ static u32 rtw8852c_spur_freq(struct rtw89_dev *rtwdev,
#define MAX_TONE_NUM 2048
static void rtw8852c_set_csi_tone_idx(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
u32 spur_freq;
s32 freq_diff, csi_idx, csi_tone_idx;
- spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
if (spur_freq == 0) {
rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN, 0, phy_idx);
return;
}
- freq_diff = (spur_freq - param->center_freq) * 1000000;
+ freq_diff = (spur_freq - chan->freq) * 1000000;
csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
@@ -1325,7 +1353,7 @@ static const struct rtw89_nbi_reg_def rtw8852c_nbi_reg_def[] = {
};
static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_rf_path path)
{
const struct rtw89_nbi_reg_def *nbi = &rtw8852c_nbi_reg_def[path];
@@ -1335,34 +1363,37 @@ static void rtw8852c_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
s32 nbi_frac_idx, nbi_frac_tone_idx;
bool notch2_chk = false;
- spur_freq = rtw8852c_spur_freq(rtwdev, param);
+ spur_freq = rtw8852c_spur_freq(rtwdev, chan);
if (spur_freq == 0) {
rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
rtw89_phy_write32_mask(rtwdev, nbi->notch1_en.addr, nbi->notch1_en.mask, 0);
return;
}
- fc = param->center_freq;
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160) {
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
fc = (spur_freq > fc) ? fc + 40 : fc - 40;
- if ((fc > spur_freq && param->center_chan < param->primary_chan) ||
- (fc < spur_freq && param->center_chan > param->primary_chan))
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
notch2_chk = true;
}
freq_diff = (spur_freq - fc) * 1000000;
nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5, &nbi_frac_idx);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_20) {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
} else {
- u16 tone_para = (param->bandwidth == RTW89_CHANNEL_WIDTH_40) ? 128 : 256;
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
}
nbi_frac_tone_idx = s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
rtw89_phy_write32_mask(rtwdev, nbi->notch2_idx.addr,
nbi->notch2_idx.mask, nbi_tone_idx);
rtw89_phy_write32_mask(rtwdev, nbi->notch2_frac_idx.addr,
@@ -1404,42 +1435,42 @@ static void rtw8852c_spur_notch(struct rtw89_dev *rtwdev, u32 val,
}
static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
u8 pri_ch_idx,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_csi_tone_idx(rtwdev, param, phy_idx);
+ rtw8852c_set_csi_tone_idx(rtwdev, chan, phy_idx);
if (phy_idx == RTW89_PHY_0) {
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_LOWER ||
pri_ch_idx == RTW89_SC_20_UP3X)) {
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_0);
if (!rtwdev->dbcc_en)
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
- } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_UPPER ||
pri_ch_idx == RTW89_SC_20_LOW3X)) {
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_0);
if (!rtwdev->dbcc_en)
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
} else {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_A);
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A);
if (!rtwdev->dbcc_en)
- rtw8852c_set_nbi_tone_idx(rtwdev, param,
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan,
RF_PATH_B);
}
} else {
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_LOWER ||
pri_ch_idx == RTW89_SC_20_UP3X)) {
rtw8852c_spur_notch(rtwdev, 0xe7f, RTW89_PHY_1);
- } else if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ } else if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
(pri_ch_idx == RTW89_SC_20_UPPER ||
pri_ch_idx == RTW89_SC_20_LOW3X)) {
rtw8852c_spur_notch(rtwdev, 0x280, RTW89_PHY_1);
} else {
- rtw8852c_set_nbi_tone_idx(rtwdev, param, RF_PATH_B);
+ rtw8852c_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B);
}
}
@@ -1450,14 +1481,14 @@ static void rtw8852c_spur_elimination(struct rtw89_dev *rtwdev,
}
static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 pri_ch = param->primary_chan;
+ u8 pri_ch = chan->primary_channel;
bool mask_5m_low;
bool mask_5m_en;
- switch (param->bandwidth) {
+ switch (chan->band_width) {
case RTW89_CHANNEL_WIDTH_40:
mask_5m_en = true;
mask_5m_low = pri_ch == 2;
@@ -1526,11 +1557,9 @@ static void rtw8852c_bb_reset_all(struct rtw89_dev *rtwdev,
phy_idx);
}
-static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev,
+static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
enum rtw89_phy_idx phy_idx, bool en)
{
- struct rtw89_hal *hal = &rtwdev->hal;
-
if (en) {
rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS,
B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
@@ -1538,7 +1567,7 @@ static void rtw8852c_bb_reset_en(struct rtw89_dev *rtwdev,
B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
phy_idx);
- if (hal->current_band_type == RTW89_BAND_2G)
+ if (band == RTW89_BAND_2G)
rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0x0);
rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
} else {
@@ -1690,21 +1719,24 @@ static void rtw8852c_bb_sethw(struct rtw89_dev *rtwdev)
}
static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- bool cck_en = param->band_type == RTW89_BAND_2G;
- u8 pri_ch_idx = param->pri_ch_idx;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_ch_idx = chan->pri_ch_idx;
u32 mask, reg;
u32 ru_alloc_msk[2] = {B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0,
B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY1};
+ u8 ntx_path;
- if (param->band_type == RTW89_BAND_2G)
- rtw8852c_ctrl_sco_cck(rtwdev, param->center_chan,
- param->primary_chan, param->bandwidth);
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8852c_ctrl_sco_cck(rtwdev, chan->channel,
+ chan->primary_channel,
+ chan->band_width);
- rtw8852c_ctrl_ch(rtwdev, param, phy_idx);
- rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ rtw8852c_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852c_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
if (cck_en) {
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
rtw89_phy_write32_mask(rtwdev, R_RXCCA_V1, B_RXCCA_DIS_V1, 0);
@@ -1717,17 +1749,17 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
B_PD_ARBITER_OFF, 0x1, phy_idx);
}
- rtw8852c_spur_elimination(rtwdev, param, pri_ch_idx, phy_idx);
- rtw8852c_ctrl_btg(rtwdev, param->band_type == RTW89_BAND_2G);
- rtw8852c_5m_mask(rtwdev, param, phy_idx);
+ rtw8852c_spur_elimination(rtwdev, chan, pri_ch_idx, phy_idx);
+ rtw8852c_ctrl_btg(rtwdev, chan->band_type == RTW89_BAND_2G);
+ rtw8852c_5m_mask(rtwdev, chan, phy_idx);
- if (param->bandwidth == RTW89_CHANNEL_WIDTH_160 &&
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
rtwdev->hal.cv != CHIP_CAV) {
rtw89_phy_write32_idx(rtwdev, R_P80_AT_HIGH_FREQ,
B_P80_AT_HIGH_FREQ, 0x0, phy_idx);
reg = rtw89_mac_reg_by_idx(R_P80_AT_HIGH_FREQ_BB_WRP,
phy_idx);
- if (param->primary_chan > param->center_chan) {
+ if (chan->primary_channel > chan->channel) {
rtw89_phy_write32_mask(rtwdev,
R_P80_AT_HIGH_FREQ_RU_ALLOC,
ru_alloc_msk[phy_idx], 1);
@@ -1742,8 +1774,8 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
}
}
- if (param->band_type == RTW89_BAND_6G &&
- param->bandwidth == RTW89_CHANNEL_WIDTH_160)
+ if (chan->band_type == RTW89_BAND_6G &&
+ chan->band_width == RTW89_CHANNEL_WIDTH_160)
rtw89_phy_write32_idx(rtwdev, R_CDD_EVM_CHK_EN,
B_CDD_EVM_CHK_EN, 0, phy_idx);
else
@@ -1769,15 +1801,29 @@ static void rtw8852c_set_channel_bb(struct rtw89_dev *rtwdev,
}
}
+ if (chan->band_type == RTW89_BAND_6G)
+ rtw89_phy_write32_set(rtwdev, R_MUIC, B_MUIC_EN);
+ else
+ rtw89_phy_write32_clr(rtwdev, R_MUIC, B_MUIC_EN);
+
+ if (hal->antenna_tx)
+ ntx_path = hal->antenna_tx;
+ else
+ ntx_path = chan->band_type == RTW89_BAND_6G ? RF_B : RF_AB;
+
+ rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, (enum rtw89_mac_idx)phy_idx);
+
rtw8852c_bb_reset_all(rtwdev, phy_idx);
}
static void rtw8852c_set_channel(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *params)
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_channel_mac(rtwdev, params, RTW89_MAC_0);
- rtw8852c_set_channel_bb(rtwdev, params, RTW89_PHY_0);
- rtw8852c_set_channel_rf(rtwdev, params, RTW89_PHY_0);
+ rtw8852c_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852c_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852c_set_channel_rf(rtwdev, chan, phy_idx);
}
static void rtw8852c_dfs_en(struct rtw89_dev *rtwdev, bool en)
@@ -1799,25 +1845,27 @@ static void rtw8852c_adc_en(struct rtw89_dev *rtwdev, bool en)
}
static void rtw8852c_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
- struct rtw89_channel_help_params *p)
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
{
- u8 phy_idx = RTW89_PHY_0;
-
if (enter) {
- rtw89_chip_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, &p->tx_en,
+ RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
rtw8852c_dfs_en(rtwdev, false);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
rtw8852c_adc_en(rtwdev, false);
fsleep(40);
- rtw8852c_bb_reset_en(rtwdev, phy_idx, false);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, false);
} else {
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
rtw8852c_adc_en(rtwdev, true);
rtw8852c_dfs_en(rtwdev, true);
- rtw8852c_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
- rtw8852c_bb_reset_en(rtwdev, phy_idx, true);
- rtw89_chip_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ rtw8852c_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8852c_bb_reset_en(rtwdev, chan->band_type, phy_idx, true);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, p->tx_en);
}
}
@@ -1847,9 +1895,10 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
-static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev)
+static void rtw8852c_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_tssi_scan(rtwdev, RTW89_PHY_0);
+ rtw8852c_tssi_scan(rtwdev, phy_idx);
}
static void rtw8852c_rfk_scan(struct rtw89_dev *rtwdev, bool start)
@@ -1958,9 +2007,11 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 ch = rtwdev->hal.current_channel;
+ u8 band = chan->band_type;
+ u8 ch = chan->channel;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -1986,7 +2037,8 @@ static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
cur.idx = j;
shf = (j % 4) * 8;
- tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, band,
+ &cur);
val |= (tmp << shf);
if ((j + 1) % 4)
@@ -2001,8 +2053,10 @@ static void rtw8852c_set_txpwr_byrate(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 band = chan->band_type;
struct rtw89_rate_desc desc = {
.nss = RTW89_NSS_1,
.rs = RTW89_RS_OFFSET,
@@ -2013,7 +2067,7 @@ static void rtw8852c_set_txpwr_offset(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
- v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, band, &desc);
val |= ((v & 0xf) << (4 * desc.idx));
}
@@ -2045,7 +2099,8 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_ADDR(filter,
0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
0x45C4, 0x45C8);
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
const u32 *param;
int i;
@@ -2076,9 +2131,10 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- u8 band = rtwdev->hal.current_band_type;
+ u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
u8 tx_shape_cck = rtw89_8852c_tx_shape[band][RTW89_RS_CCK][regd];
u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
@@ -2092,29 +2148,31 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_PAGE_SIZE 40
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit lmt[NTX_NUM_8852C];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+ rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
ptr = (s8 *)&lmt[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -2123,30 +2181,32 @@ static void rtw8852c_set_txpwr_limit(struct rtw89_dev *rtwdev,
}
static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
- u8 ch = rtwdev->hal.current_channel;
- u8 bw = rtwdev->hal.current_band_width;
+ u8 ch = chan->channel;
+ u8 bw = chan->band_width;
struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852C];
u32 addr, val;
const s8 *ptr;
- u8 i, j, k;
+ u8 i, j;
rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
"[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
for (i = 0; i < NTX_NUM_8852C; i++) {
- rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru[i], i);
for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
addr = R_AX_PWR_RU_LMT + j +
__MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
ptr = (s8 *)&lmt_ru[i] + j;
- val = 0;
- for (k = 0; k < 4; k++)
- val |= (ptr[k] << (8 * k));
+ val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
+ FIELD_PREP(GENMASK(15, 8), ptr[1]) |
+ FIELD_PREP(GENMASK(23, 16), ptr[2]) |
+ FIELD_PREP(GENMASK(31, 24), ptr[3]);
rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
}
@@ -2155,18 +2215,21 @@ static void rtw8852c_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
}
-static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev)
+static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_offset(rtwdev, RTW89_PHY_0);
- rtw8852c_set_tx_shape(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_limit(rtwdev, RTW89_PHY_0);
- rtw8852c_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852c_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw8852c_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
}
-static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
{
- rtw8852c_set_txpwr_ref(rtwdev, RTW89_PHY_0);
+ rtw8852c_set_txpwr_ref(rtwdev, phy_idx);
}
static void
@@ -2222,7 +2285,8 @@ rtw8852c_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
{
- struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
u32 rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
u32 rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
@@ -2316,7 +2380,7 @@ static void rtw8852c_bb_cfg_rx_path(struct rtw89_dev *rtwdev, u8 rx_path)
1);
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS,
1);
- rtw8852c_ctrl_btg(rtwdev, hal->current_band_type == RTW89_BAND_2G);
+ rtw8852c_ctrl_btg(rtwdev, band == RTW89_BAND_2G);
rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
rst_mask0, 1);
rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB,
@@ -2458,7 +2522,6 @@ static void rtw8852c_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
- u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
rtw8852c_bb_cfg_rx_path(rtwdev, RF_PATH_AB);
@@ -2473,8 +2536,6 @@ static void rtw8852c_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
}
-
- rtw8852c_ctrl_tx_path_tmac(rtwdev, ntx_path, RTW89_MAC_0);
}
static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
@@ -2773,23 +2834,7 @@ void rtw8852c_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
static
void rtw8852c_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_cx *cx = &btc->cx;
- u32 val;
-
- val = rtw89_read32(rtwdev, R_BTC_BT_CNT_HIGH);
- cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
- cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
-
- val = rtw89_read32(rtwdev, R_BTC_BT_CNT_LOW);
- cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val);
- cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val);
-
- /* clock-gate off before reset counter*/
- rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
- rtw89_write32_clr(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
- rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_RST);
- rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+ /* Feature move to firmware */
}
static
@@ -2810,6 +2855,59 @@ void rtw8852c_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
}
+static void rtw8852c_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ /* level=0 Default: TIA 1/0= (LNA2,TIAN6) = (7,1)/(5,1) = 21dB/12dB
+ * level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
+ * To improve BT ACI in co-rx
+ */
+
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void rtw8852c_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ switch (level) {
+ case 0: /* original */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, true);
+ rtw8852c_set_wl_lna2(rtwdev, 0);
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852c_bb_ctrl_btc_preagc(rtwdev, false);
+ rtw8852c_set_wl_lna2(rtwdev, 1);
+ break;
+ }
+}
+
static void rtw8852c_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2831,12 +2929,12 @@ static void rtw8852c_query_ppdu(struct rtw89_dev *rtwdev,
struct ieee80211_rx_status *status)
{
u8 path;
- s8 *rx_power = phy_ppdu->rssi;
+ u8 *rx_power = phy_ppdu->rssi;
- status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
- status->chain_signal[path] = rx_power[path];
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
}
if (phy_ppdu->valid)
rtw8852c_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
@@ -2879,10 +2977,12 @@ static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
-static void rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ return 0;
}
static const struct rtw89_chip_ops rtw8852c_chip_ops = {
@@ -2930,6 +3030,8 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.btc_bt_aci_imp = rtw8852c_btc_bt_aci_imp,
.btc_update_bt_cnt = rtw8852c_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852c_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852c_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8852c_chip_info = {
@@ -2937,6 +3039,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
.fifo_size = 458752,
+ .dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
.dis_2g_40m_ul_ofdma = false,
.rsvd_ple_ofst = 0x6f800,
@@ -2960,7 +3063,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
@@ -2972,6 +3077,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 128,
+ .bacam_num = 8,
+ .bacam_dynamic_num = 8,
+ .bacam_v1 = true,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -2980,11 +3088,26 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dav_log_efuse_size = 16,
.phycap_addr = 0x590,
.phycap_size = 0x60,
- .para_ver = 0x05050764,
- .wlcx_desired = 0x05050000,
- .btcx_desired = 0x5,
+ .para_ver = 0x1,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
+ .btc_fwinfo_buf = 1280,
+
+ .fcxbtcrpt_ver = 4,
+ .fcxtdma_ver = 3,
+ .fcxslots_ver = 1,
+ .fcxcysta_ver = 3,
+ .fcxstep_ver = 3,
+ .fcxnullsta_ver = 2,
+ .fcxmreg_ver = 1,
+ .fcxgpiodbg_ver = 1,
+ .fcxbtver_ver = 1,
+ .fcxbtscan_ver = 1,
+ .fcxbtafh_ver = 1,
+ .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852c_bt_rssi_thres,
@@ -2995,7 +3118,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rf_para_ulink = rtw89_btc_8852c_rf_ul,
.rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852c_rf_dl),
.rf_para_dlink = rtw89_btc_8852c_rf_dl,
- .ps_mode_supported = 0,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_V1,
@@ -3009,7 +3134,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.page_regs = &rtw8852c_page_regs,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 5,
- .imr_info = &rtw8852c_imr_info
+ .imr_info = &rtw8852c_imr_info,
+ .rrsr_cfgs = &rtw8852c_rrsr_cfgs,
+ .dma_ch_mask = 0,
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 4186d825d19b..006c2cf93111 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -1294,14 +1294,14 @@ static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u
static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy, u8 path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
- struct rtw89_hal *hal = &rtwdev->hal;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- iqk_info->iqk_band[path] = hal->current_band_type;
- iqk_info->iqk_bw[path] = hal->current_band_width;
- iqk_info->iqk_ch[path] = hal->current_channel;
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
@@ -1546,7 +1546,8 @@ static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
ret = read_poll_timeout_atomic(rtw89_read_rf, val, val,
- 2, 1000, false, rtwdev, path, 0x93, BIT(5));
+ 2, 2000, false, rtwdev, path,
+ RR_DCK1, RR_DCK1_DONE);
if (ret)
rtw89_warn(rtwdev, "[RX_DCK] S%d RXDCK timeout\n", path);
else
@@ -1691,14 +1692,14 @@ static void _dpk_information(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- struct rtw89_hal *hal = &rtwdev->hal;
u8 kidx = dpk->cur_idx[path];
- dpk->bp[path][kidx].band = hal->current_band_type;
- dpk->bp[path][kidx].ch = hal->current_channel;
- dpk->bp[path][kidx].bw = hal->current_band_width;
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
@@ -2272,12 +2273,13 @@ static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
bool is_reload = false;
u8 idx, cur_band, cur_ch;
- cur_band = rtwdev->hal.current_band_type;
- cur_ch = rtwdev->hal.current_channel;
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
if (cur_band != dpk->bp[path][idx].band ||
@@ -2530,17 +2532,19 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
{
struct rtw89_fem_info *fem = &rtwdev->fem;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 band = chan->band_type;
- if (rtwdev->hal.cv == CHIP_CAV && rtwdev->hal.current_band_type != RTW89_BAND_2G) {
+ if (rtwdev->hal.cv == CHIP_CAV && band != RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to CAV & not 2G!!\n");
return true;
- } else if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ } else if (fem->epa_2g && band == RTW89_BAND_2G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ } else if (fem->epa_5g && band == RTW89_BAND_5G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
return true;
- } else if (fem->epa_6g && rtwdev->hal.current_band_type == RTW89_BAND_6G) {
+ } else if (fem->epa_6g && band == RTW89_BAND_6G) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
return true;
}
@@ -2663,7 +2667,8 @@ static void _dpk_track(struct rtw89_dev *rtwdev)
static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_sys_defs_tbl);
@@ -2697,7 +2702,8 @@ static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
rtw89_rfk_parser(rtwdev, &rtw8852c_tssi_dck_defs_a_tbl);
@@ -2735,8 +2741,9 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
__val; \
})
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
- u8 subband = rtwdev->hal.current_subband;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
const s8 *thm_up_a = NULL;
const s8 *thm_down_a = NULL;
const s8 *thm_up_b = NULL;
@@ -2908,7 +2915,8 @@ static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx ph
static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
if (path == RF_PATH_A) {
rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
@@ -2924,7 +2932,8 @@ static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy
static void _tssi_set_aligk_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
- enum rtw89_band band = rtwdev->hal.current_band_type;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
const struct rtw89_rfk_tbl *tbl;
if (path == RF_PATH_A) {
@@ -3335,8 +3344,9 @@ static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- enum rtw89_band band = rtwdev->hal.current_band_type;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
u32 gidx, gidx_1st, gidx_2nd;
s8 de_1st;
s8 de_2nd;
@@ -3398,8 +3408,9 @@ static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- enum rtw89_band band = rtwdev->hal.current_band_type;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
u32 tgidx, tgidx_1st, tgidx_2nd;
s8 tde_1st = 0;
s8 tde_2nd = 0;
@@ -3462,7 +3473,8 @@ static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy)
{
struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
- u8 ch = rtwdev->hal.current_channel;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
u8 gidx;
s8 ofdm_de;
s8 trim_de;
@@ -3802,15 +3814,17 @@ void rtw8852c_ctrl_bw_ch(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
}
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
- rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, param->center_chan, param->band_type,
- param->bandwidth);
+ rtw8852c_ctrl_bw_ch(rtwdev, phy_idx, chan->channel,
+ chan->band_type,
+ chan->band_width);
}
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
u8 idx = mcc_info->table_idx;
int i;
@@ -3823,8 +3837,8 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
}
mcc_info->table_idx = idx;
- mcc_info->ch[idx] = rtwdev->hal.current_channel;
- mcc_info->band[idx] = rtwdev->hal.current_band_type;
+ mcc_info->ch[idx] = chan->channel;
+ mcc_info->band[idx] = chan->band_type;
}
void rtw8852c_rck(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
index 5118a49da8d3..928a587cdd05 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h
@@ -21,7 +21,7 @@ void rtw8852c_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
void rtw8852c_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
enum rtw89_phy_idx phy_idx);
void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
- struct rtw89_channel_params *param,
+ const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx);
void rtw8852c_lck_init(struct rtw89_dev *rtwdev);
void rtw8852c_lck_track(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index feaa83b16171..11f35e7a7f0e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -1767,7 +1767,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_bb_reg_gain[] = {
{0x3070103, 0x34343C3C},
};
-static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
+static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xF0010000, 0x00000000},
{0xF0020000, 0x00000001},
{0xF0320000, 0x00000002},
@@ -1777,13 +1777,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xF0360000, 0x00000006},
{0xF0010001, 0x00000007},
{0xF0020001, 0x00000008},
- {0xF0320001, 0x00000009},
- {0xF0330001, 0x0000000A},
- {0xF0340001, 0x0000000B},
- {0xF0350001, 0x0000000C},
- {0xF0360001, 0x0000000D},
- {0xF03F0001, 0x0000000E},
- {0xF0400001, 0x0000000F},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
{0x005, 0x00000000},
{0x10005, 0x00000000},
{0x000, 0x00030001},
@@ -1795,7 +1799,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03E, 0x00000620},
{0x03F, 0x0000020C},
{0x0EF, 0x00000000},
- {0x05F, 0x00000032},
+ {0x05F, 0x00000038},
{0x097, 0x00043200},
{0x0A6, 0x00066DB7},
{0x0EF, 0x00004000},
@@ -1821,8 +1825,8 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x000, 0x00033C01},
{0x10000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x096, 0x00015200},
+ {0x10055, 0x00080080},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0004D000},
{0x0DA, 0x000D4009},
@@ -1850,6 +1854,18 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
@@ -1922,6 +1938,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000CC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000CC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000CC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -1958,6 +1982,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000C4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000C4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000C4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000C4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -1994,6 +2026,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000BC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2030,6 +2070,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000B4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2066,6 +2114,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000AC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2102,6 +2158,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2138,6 +2202,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000009C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2174,6 +2246,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000094},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2210,6 +2290,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000008C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2246,6 +2334,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000084},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2282,6 +2378,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000BC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000BC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000BC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2318,6 +2422,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000B4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000B4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000B4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2354,6 +2466,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000AC},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000AC},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000AC},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2390,6 +2510,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000000A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000000A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2426,6 +2554,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000009C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2462,6 +2598,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000094},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000094},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000094},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2498,6 +2642,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000008C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000008C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000008C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2534,6 +2686,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000084},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000084},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000084},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2570,6 +2730,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000003C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000003C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2606,6 +2774,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000034},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000034},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000034},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000034},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2642,6 +2818,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000002C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000002C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000002C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000002C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2678,6 +2862,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000024},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000024},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000024},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000024},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2714,6 +2906,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000001C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000001C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000001C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000001C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2750,6 +2950,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000014},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000014},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000014},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000014},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2786,6 +2994,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000000C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000000C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000000C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000000C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2822,6 +3038,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000004},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000004},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000004},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000004},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2871,6 +3095,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x08F, 0x000D1352},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -2905,6 +3137,52 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000015},
{0x033, 0x00000001},
{0x03F, 0x00000017},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x0EF, 0x00008000},
{0x033, 0x00000020},
@@ -3416,6 +3694,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000EFFF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3522,7 +3808,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000005},
{0x03F, 0x00004344},
{0x033, 0x00000006},
- {0x03F, 0x00004324},
+ {0x03F, 0x00004344},
{0x033, 0x00000007},
{0x03F, 0x00004344},
{0x033, 0x00000008},
@@ -3585,6 +3871,33 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000200},
{0x0EF, 0x00000000},
{0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000084DC},
{0x030, 0x000103C9},
{0x030, 0x00018399},
@@ -3597,6 +3910,241 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x030, 0x00050011},
{0x030, 0x00058000},
{0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
{0x030, 0x00068000},
{0x030, 0x00070000},
{0x0EF, 0x00000000},
@@ -3831,6 +4379,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x030, 0x000300FF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000300FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000300FF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000300FF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3901,6 +4457,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x095, 0x00000008},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -3920,101 +4484,2033 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0xB0000000, 0x00000000},
{0x0EE, 0x00001000},
{0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000038},
{0x03F, 0x000002E7},
{0x033, 0x0000003C},
{0x03F, 0x000003E7},
{0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000063},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
@@ -4034,20 +6530,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x00000152},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0xA0000000, 0x00000000},
{0x03F, 0x00000052},
{0xB0000000, 0x00000000},
@@ -4070,20 +6574,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000015A},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
{0xB0000000, 0x00000000},
@@ -4106,20 +6618,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x0000019C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
{0xB0000000, 0x00000000},
@@ -4142,20 +6662,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000001A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
{0xB0000000, 0x00000000},
@@ -4178,20 +6706,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000001E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
{0xB0000000, 0x00000000},
@@ -4214,20 +6750,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x03F, 0x000002E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
{0xB0000000, 0x00000000},
@@ -5271,131 +7815,131 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -5416,131 +7960,711 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -5561,1002 +8685,1002 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
{0x10030, 0x000781EF},
{0x10030, 0x000785E9},
{0x10030, 0x000789E3},
- {0x10030, 0x00078DA3},
- {0x10030, 0x00079161},
- {0x10030, 0x0007955B},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
{0x10030, 0x00079921},
{0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701EF},
+ {0x10030, 0x000705E7},
+ {0x10030, 0x000709A7},
+ {0x10030, 0x00070D61},
+ {0x10030, 0x0007115B},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071CE5},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728A1},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781EF},
+ {0x10030, 0x000785E9},
+ {0x10030, 0x000789E3},
+ {0x10030, 0x00078DA1},
+ {0x10030, 0x0007915F},
+ {0x10030, 0x00079559},
+ {0x10030, 0x00079921},
+ {0x10030, 0x00079D1B},
+ {0x10030, 0x0007A0E3},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B823},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0xA0000000, 0x00000000},
{0x10030, 0x000001EF},
{0x10030, 0x000005E9},
@@ -6724,6 +9848,1150 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00004017},
{0x100EE, 0x00000000},
{0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
{0x10030, 0x000780F4},
{0x10030, 0x000784F1},
{0x10030, 0x000788EE},
@@ -6777,9 +11045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000025},
{0x03F, 0x00008002},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
{0x03F, 0x00050002},
{0x033, 0x00000029},
@@ -6793,9 +11145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000002D},
{0x03F, 0x00008002},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x03F, 0x00050002},
{0x033, 0x00000031},
@@ -6809,9 +11245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000035},
{0x03F, 0x00008002},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
{0x03F, 0x00050002},
{0x033, 0x00000061},
@@ -6825,9 +11345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000065},
{0x03F, 0x00008002},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
{0x03F, 0x00050002},
{0x033, 0x00000069},
@@ -6841,9 +11445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000006D},
{0x03F, 0x00008002},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
{0x03F, 0x00050002},
{0x033, 0x00000071},
@@ -6857,9 +11545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000075},
{0x03F, 0x00008002},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
{0x03F, 0x00050002},
{0x033, 0x00000079},
@@ -6873,9 +11645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000007D},
{0x03F, 0x00008002},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A0},
{0x03F, 0x00050002},
{0x033, 0x000000A1},
@@ -6889,9 +11745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000A5},
{0x03F, 0x00008002},
{0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A8},
{0x03F, 0x00050002},
{0x033, 0x000000A9},
@@ -6905,9 +11845,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000AD},
{0x03F, 0x00008002},
{0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B0},
{0x03F, 0x00050002},
{0x033, 0x000000B1},
@@ -6921,9 +11945,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000B5},
{0x03F, 0x00008002},
{0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E0},
{0x03F, 0x00050002},
{0x033, 0x000000E1},
@@ -6937,9 +12045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000E5},
{0x03F, 0x00008002},
{0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E8},
{0x03F, 0x00050002},
{0x033, 0x000000E9},
@@ -6953,9 +12145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000ED},
{0x03F, 0x00008002},
{0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F0},
{0x03F, 0x00050002},
{0x033, 0x000000F1},
@@ -6969,9 +12245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000F5},
{0x03F, 0x00008002},
{0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F8},
{0x03F, 0x00050002},
{0x033, 0x000000F9},
@@ -6985,9 +12345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000000FD},
{0x03F, 0x00008002},
{0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000120},
{0x03F, 0x00050002},
{0x033, 0x00000121},
@@ -7001,9 +12445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000125},
{0x03F, 0x00008002},
{0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000128},
{0x03F, 0x00050002},
{0x033, 0x00000129},
@@ -7017,9 +12545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000012D},
{0x03F, 0x00008002},
{0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000130},
{0x03F, 0x00050002},
{0x033, 0x00000131},
@@ -7033,9 +12645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000135},
{0x03F, 0x00008002},
{0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000160},
{0x03F, 0x00050002},
{0x033, 0x00000161},
@@ -7049,9 +12745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000165},
{0x03F, 0x00008002},
{0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000168},
{0x03F, 0x00050002},
{0x033, 0x00000169},
@@ -7065,9 +12845,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000016D},
{0x03F, 0x00008002},
{0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000170},
{0x03F, 0x00050002},
{0x033, 0x00000171},
@@ -7081,9 +12945,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x00000175},
{0x03F, 0x00008002},
{0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000178},
{0x03F, 0x00050002},
{0x033, 0x00000179},
@@ -7097,9 +13045,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x0000017D},
{0x03F, 0x00008002},
{0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A0},
{0x03F, 0x00050002},
{0x033, 0x000001A1},
@@ -7113,9 +13145,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001A5},
{0x03F, 0x00008002},
{0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A8},
{0x03F, 0x00050002},
{0x033, 0x000001A9},
@@ -7129,9 +13245,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001AD},
{0x03F, 0x00008002},
{0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B0},
{0x03F, 0x00050002},
{0x033, 0x000001B1},
@@ -7145,9 +13345,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001B5},
{0x03F, 0x00008002},
{0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E0},
{0x03F, 0x00050002},
{0x033, 0x000001E1},
@@ -7161,9 +13445,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001E5},
{0x03F, 0x00008002},
{0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E8},
{0x03F, 0x00050002},
{0x033, 0x000001E9},
@@ -7177,9 +13545,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001ED},
{0x03F, 0x00008002},
{0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F0},
{0x03F, 0x00050002},
{0x033, 0x000001F1},
@@ -7193,9 +13645,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001F5},
{0x03F, 0x00008002},
{0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F8},
{0x03F, 0x00050002},
{0x033, 0x000001F9},
@@ -7209,9 +13745,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x033, 0x000001FD},
{0x03F, 0x00008002},
{0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x005, 0x00000001},
{0x10005, 0x00000001},
@@ -7253,7 +13873,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00022000},
{0x10030, 0x00023000},
{0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00026003},
{0x10030, 0x00027003},
{0x10030, 0x00028000},
@@ -7261,7 +13923,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x0002A000},
{0x10030, 0x0002B000},
{0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0002E003},
{0x10030, 0x0002F003},
{0x10030, 0x00030000},
@@ -7269,7 +13973,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00032000},
{0x10030, 0x00033000},
{0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00036003},
{0x10030, 0x00037003},
{0x10030, 0x00038000},
@@ -7277,7 +14023,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x0003A000},
{0x10030, 0x0003B000},
{0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0003E003},
{0x10030, 0x0003F003},
{0x10030, 0x00060000},
@@ -7285,35 +14073,283 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radioa_regs[] = {
{0x10030, 0x00062000},
{0x10030, 0x00063000},
{0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00065000},
{0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00067003},
{0x10030, 0x00068000},
{0x10030, 0x00069000},
{0x10030, 0x0006A000},
{0x10030, 0x0006B000},
{0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0006D000},
{0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0006F003},
{0x10030, 0x00070000},
{0x10030, 0x00071000},
{0x10030, 0x00072000},
{0x10030, 0x00073000},
{0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
{0x10030, 0x00075000},
{0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00077003},
{0x10030, 0x00078000},
{0x10030, 0x00079000},
{0x10030, 0x0007A000},
{0x10030, 0x0007B000},
{0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0007D000},
{0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0007F003},
{0x100EE, 0x00000000},
- {0x0FE, 0x00000031},
+ {0x0FE, 0x00000048},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
@@ -7326,13 +14362,17 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0xF0360000, 0x00000006},
{0xF0010001, 0x00000007},
{0xF0020001, 0x00000008},
- {0xF0320001, 0x00000009},
- {0xF0330001, 0x0000000A},
- {0xF0340001, 0x0000000B},
- {0xF0350001, 0x0000000C},
- {0xF0360001, 0x0000000D},
- {0xF03F0001, 0x0000000E},
- {0xF0400001, 0x0000000F},
+ {0xF0030001, 0x00000009},
+ {0xF0040001, 0x0000000A},
+ {0xF0050001, 0x0000000B},
+ {0xF0070001, 0x0000000C},
+ {0xF0320001, 0x0000000D},
+ {0xF0330001, 0x0000000E},
+ {0xF0340001, 0x0000000F},
+ {0xF0350001, 0x00000010},
+ {0xF0360001, 0x00000011},
+ {0xF03F0001, 0x00000012},
+ {0xF0400001, 0x00000013},
{0x005, 0x00000000},
{0x10005, 0x00000000},
{0x0B9, 0x00020440},
@@ -7340,42 +14380,69 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10000, 0x00030000},
{0x018, 0x00011124},
{0x10018, 0x00011124},
- {0x05F, 0x00000032},
+ {0x05F, 0x00000038},
{0x097, 0x00043200},
{0x0A6, 0x00066DB7},
{0x0EF, 0x00004000},
{0x033, 0x00000005},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x00000004},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x00000003},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x00000002},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000000},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x033, 0x0000000D},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x0000000B},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x0000000A},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000009},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000008},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x033, 0x00000015},
{0x03E, 0x00000000},
{0x03F, 0x00010500},
+ {0x033, 0x00000014},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000400},
{0x033, 0x00000013},
{0x03E, 0x00000000},
{0x03F, 0x00028B00},
{0x033, 0x00000012},
{0x03E, 0x00000000},
{0x03F, 0x0009AB00},
+ {0x033, 0x00000011},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x033, 0x00000010},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00002900},
{0x0EF, 0x00000000},
+ {0x10055, 0x00080080},
{0x000, 0x00033C01},
{0x10000, 0x00033C00},
{0x01A, 0x00040004},
- {0x0FE, 0x00000000},
{0x096, 0x00015200},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0004D000},
@@ -7404,6 +14471,18 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x067, 0x0000D300},
+ {0x0DA, 0x000D4000},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x067, 0x0000D300},
{0x0DA, 0x000D4000},
@@ -7430,7 +14509,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x0DA, 0x000D4009},
{0xB0000000, 0x00000000},
{0x057, 0x0000D589},
- {0x05A, 0x0007FFFF},
+ {0x05A, 0x0007F0F8},
{0x043, 0x00005000},
{0x018, 0x00001001},
{0x10018, 0x00001001},
@@ -7462,6 +14541,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x08F, 0x000D1352},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x08F, 0x000D1352},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x08F, 0x000D1352},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -7496,6 +14583,52 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000015},
{0x033, 0x00000001},
{0x03F, 0x00000017},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000017},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000007},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x0EF, 0x00008000},
{0x033, 0x00000020},
@@ -8007,6 +15140,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000EFFF},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000EFFF},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000EFFF},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -8113,7 +15254,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000005},
{0x03F, 0x00004344},
{0x033, 0x00000006},
- {0x03F, 0x00004324},
+ {0x03F, 0x00004344},
{0x033, 0x00000007},
{0x03F, 0x00004344},
{0x033, 0x00000008},
@@ -8176,6 +15317,85 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000200},
{0x0EF, 0x00000000},
{0x0EF, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x030, 0x000084DC},
{0x030, 0x000103C9},
{0x030, 0x00018399},
@@ -8188,6 +15408,189 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x030, 0x00050011},
{0x030, 0x00058000},
{0x030, 0x00060000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x030, 0x000085ED},
+ {0x030, 0x000105CC},
+ {0x030, 0x000184AA},
+ {0x030, 0x00020388},
+ {0x030, 0x00028377},
+ {0x030, 0x00030377},
+ {0x030, 0x00038255},
+ {0x030, 0x00040244},
+ {0x030, 0x00048133},
+ {0x030, 0x00050112},
+ {0x030, 0x00058101},
+ {0x030, 0x00060001},
+ {0xA0000000, 0x00000000},
+ {0x030, 0x000084DC},
+ {0x030, 0x000103C9},
+ {0x030, 0x00018399},
+ {0x030, 0x00020287},
+ {0x030, 0x00028277},
+ {0x030, 0x00030165},
+ {0x030, 0x00038144},
+ {0x030, 0x00040044},
+ {0x030, 0x00048022},
+ {0x030, 0x00050011},
+ {0x030, 0x00058000},
+ {0x030, 0x00060000},
+ {0xB0000000, 0x00000000},
{0x030, 0x00068000},
{0x030, 0x00070000},
{0x0EF, 0x00000000},
@@ -8458,6 +15861,14 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x095, 0x00000008},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x095, 0x00000008},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x095, 0x00000008},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
@@ -8477,101 +15888,2117 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0xB0000000, 0x00000000},
{0x0EE, 0x00001000},
{0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000052},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000009C},
+ {0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000003E6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000003E6},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000063},
{0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000052},
@@ -8591,20 +18018,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x00000152},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000152},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000152},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x00000152},
+ {0x03F, 0x00000052},
{0xA0000000, 0x00000000},
{0x03F, 0x00000052},
{0xB0000000, 0x00000000},
@@ -8627,20 +18062,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000015A},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000015A},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000015A},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000015A},
+ {0x03F, 0x0000005A},
{0xA0000000, 0x00000000},
{0x03F, 0x0000005A},
{0xB0000000, 0x00000000},
@@ -8663,20 +18106,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000019C},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000019C},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x0000019C},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x0000019C},
+ {0x03F, 0x0000009C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000009C},
{0xB0000000, 0x00000000},
@@ -8699,20 +18150,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000001A4},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001A4},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001A4},
+ {0x03F, 0x0000019C},
{0xA0000000, 0x00000000},
{0x03F, 0x0000019C},
{0xB0000000, 0x00000000},
@@ -8735,20 +18194,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000001E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000001E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000001E6},
+ {0x03F, 0x000001A4},
{0xA0000000, 0x00000000},
{0x03F, 0x000001A4},
{0xB0000000, 0x00000000},
@@ -8771,20 +18238,28 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x000002E6},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000002E6},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x000002E6},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x03F, 0x000002E6},
+ {0x03F, 0x000001E6},
{0xA0000000, 0x00000000},
{0x03F, 0x000001E6},
{0xB0000000, 0x00000000},
@@ -9828,131 +19303,131 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -9973,131 +19448,711 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
+ {0x10030, 0x000281EF},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
+ {0x10030, 0x000301EF},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x000001EF},
@@ -10118,1002 +20173,1002 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00003C5F},
{0x10030, 0x00004059},
{0x10030, 0x00004453},
- {0x10030, 0x000201ED},
- {0x10030, 0x000205AD},
- {0x10030, 0x000209A7},
- {0x10030, 0x00020DA1},
- {0x10030, 0x0002119B},
- {0x10030, 0x00021561},
- {0x10030, 0x0002195B},
- {0x10030, 0x00021D27},
- {0x10030, 0x00022121},
- {0x10030, 0x000224E9},
- {0x10030, 0x000228E3},
- {0x10030, 0x00022CA9},
- {0x10030, 0x000230A3},
- {0x10030, 0x00023469},
- {0x10030, 0x00023863},
- {0x10030, 0x00023C29},
- {0x10030, 0x00024023},
- {0x10030, 0x0002441D},
+ {0x10030, 0x000201EF},
+ {0x10030, 0x000205E9},
+ {0x10030, 0x000209E3},
+ {0x10030, 0x00020DA3},
+ {0x10030, 0x00021161},
+ {0x10030, 0x0002155B},
+ {0x10030, 0x0002191F},
+ {0x10030, 0x00021D19},
+ {0x10030, 0x000220E1},
+ {0x10030, 0x000224DB},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1B},
+ {0x10030, 0x00024015},
+ {0x10030, 0x0002440F},
{0x10030, 0x000281EF},
- {0x10030, 0x000285AF},
- {0x10030, 0x000289A9},
- {0x10030, 0x00028DA3},
- {0x10030, 0x0002919D},
- {0x10030, 0x00029563},
- {0x10030, 0x0002995D},
- {0x10030, 0x00029D25},
- {0x10030, 0x0002A11F},
- {0x10030, 0x0002A4E7},
- {0x10030, 0x0002A8E1},
- {0x10030, 0x0002ACA7},
- {0x10030, 0x0002B0A1},
- {0x10030, 0x0002B467},
- {0x10030, 0x0002B861},
- {0x10030, 0x0002BC27},
- {0x10030, 0x0002C021},
- {0x10030, 0x0002C41B},
+ {0x10030, 0x000285E7},
+ {0x10030, 0x000289A7},
+ {0x10030, 0x00028D65},
+ {0x10030, 0x0002915F},
+ {0x10030, 0x00029523},
+ {0x10030, 0x0002991D},
+ {0x10030, 0x00029CE5},
+ {0x10030, 0x0002A0DF},
+ {0x10030, 0x0002A4A7},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC67},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B427},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC19},
+ {0x10030, 0x0002C013},
+ {0x10030, 0x0002C40D},
{0x10030, 0x000301EF},
- {0x10030, 0x000305AF},
- {0x10030, 0x000309A9},
- {0x10030, 0x00030DA3},
- {0x10030, 0x0003119D},
- {0x10030, 0x00031563},
- {0x10030, 0x0003195D},
- {0x10030, 0x00031D25},
- {0x10030, 0x0003211F},
- {0x10030, 0x000324E7},
- {0x10030, 0x000328E1},
- {0x10030, 0x00032CA7},
- {0x10030, 0x000330A1},
- {0x10030, 0x00033467},
- {0x10030, 0x00033861},
- {0x10030, 0x00033C27},
- {0x10030, 0x00034021},
- {0x10030, 0x0003441B},
- {0x10030, 0x000601EB},
- {0x10030, 0x000605AB},
- {0x10030, 0x000609A5},
- {0x10030, 0x00060D9F},
- {0x10030, 0x00061199},
- {0x10030, 0x00061593},
- {0x10030, 0x00061959},
- {0x10030, 0x00061D53},
- {0x10030, 0x0006211B},
- {0x10030, 0x00062515},
- {0x10030, 0x000628DD},
- {0x10030, 0x00062CD7},
- {0x10030, 0x0006309D},
- {0x10030, 0x00063497},
- {0x10030, 0x0006385D},
- {0x10030, 0x00063C57},
- {0x10030, 0x0006401D},
- {0x10030, 0x00064417},
- {0x10030, 0x000681E7},
- {0x10030, 0x000685A7},
- {0x10030, 0x000689A1},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955F},
- {0x10030, 0x00069959},
- {0x10030, 0x00069D21},
- {0x10030, 0x0006A11B},
- {0x10030, 0x0006A4E3},
- {0x10030, 0x0006A8DD},
- {0x10030, 0x0006ACA5},
- {0x10030, 0x0006B09F},
- {0x10030, 0x0006B465},
- {0x10030, 0x0006B85F},
- {0x10030, 0x0006BC25},
- {0x10030, 0x0006C01F},
- {0x10030, 0x0006C419},
- {0x10030, 0x000701E7},
- {0x10030, 0x000705A7},
- {0x10030, 0x000709A1},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071955},
- {0x10030, 0x00071D1D},
- {0x10030, 0x00072117},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072CA1},
- {0x10030, 0x0007309B},
- {0x10030, 0x00073461},
- {0x10030, 0x0007385B},
- {0x10030, 0x00073C21},
- {0x10030, 0x0007401B},
- {0x10030, 0x0007441B},
- {0x10030, 0x000781EF},
- {0x10030, 0x000785E9},
- {0x10030, 0x000789E3},
+ {0x10030, 0x000305E7},
+ {0x10030, 0x000309A7},
+ {0x10030, 0x00030D65},
+ {0x10030, 0x0003115F},
+ {0x10030, 0x00031525},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031CE7},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324A9},
+ {0x10030, 0x000328A3},
+ {0x10030, 0x00032C69},
+ {0x10030, 0x00033063},
+ {0x10030, 0x00033429},
+ {0x10030, 0x00033823},
+ {0x10030, 0x00033C1D},
+ {0x10030, 0x00034013},
+ {0x10030, 0x0003440D},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x00072111},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
{0x10030, 0x00078DA3},
{0x10030, 0x00079161},
{0x10030, 0x0007955B},
- {0x10030, 0x00079921},
- {0x10030, 0x00079D1B},
- {0x10030, 0x0007A0E1},
- {0x10030, 0x0007A4DB},
- {0x10030, 0x0007A8A1},
- {0x10030, 0x0007AC9B},
- {0x10030, 0x0007B061},
- {0x10030, 0x0007B45B},
- {0x10030, 0x0007B821},
- {0x10030, 0x0007BC1B},
- {0x10030, 0x0007C015},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
{0x10030, 0x0007C40F},
{0x90330001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90340001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90350001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90360001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0x90400001, 0x00000000}, {0x40000000, 0x00000000},
- {0x10030, 0x000201DF},
- {0x10030, 0x000205D9},
- {0x10030, 0x000209D3},
- {0x10030, 0x00020D99},
- {0x10030, 0x00021193},
- {0x10030, 0x0002155F},
- {0x10030, 0x00021959},
- {0x10030, 0x00021D21},
- {0x10030, 0x00022119},
- {0x10030, 0x000224DF},
- {0x10030, 0x000228D9},
- {0x10030, 0x00022C9F},
- {0x10030, 0x00023099},
- {0x10030, 0x0002345F},
- {0x10030, 0x00023859},
- {0x10030, 0x00023C1F},
- {0x10030, 0x00024019},
- {0x10030, 0x00024413},
- {0x10030, 0x000281CD},
- {0x10030, 0x000285DB},
- {0x10030, 0x000289D5},
- {0x10030, 0x00028D9B},
- {0x10030, 0x0002918D},
- {0x10030, 0x00029555},
- {0x10030, 0x00029957},
- {0x10030, 0x00029D1F},
- {0x10030, 0x0002A119},
- {0x10030, 0x0002A4DF},
- {0x10030, 0x0002A8D9},
- {0x10030, 0x0002AC9F},
- {0x10030, 0x0002B099},
- {0x10030, 0x0002B45F},
- {0x10030, 0x0002B859},
- {0x10030, 0x0002BC1F},
- {0x10030, 0x0002C019},
- {0x10030, 0x0002C413},
- {0x10030, 0x000301D9},
- {0x10030, 0x000305DB},
- {0x10030, 0x000309D5},
- {0x10030, 0x00030D9B},
- {0x10030, 0x00031195},
- {0x10030, 0x0003155D},
- {0x10030, 0x00031955},
- {0x10030, 0x00031D1D},
- {0x10030, 0x00032119},
- {0x10030, 0x000324DF},
- {0x10030, 0x000328D9},
- {0x10030, 0x00032C9F},
- {0x10030, 0x00033099},
- {0x10030, 0x0003345F},
- {0x10030, 0x00033859},
- {0x10030, 0x00033C1F},
- {0x10030, 0x00034019},
- {0x10030, 0x00034413},
- {0x10030, 0x000601E1},
- {0x10030, 0x000605DB},
- {0x10030, 0x000609D5},
- {0x10030, 0x00060D9B},
- {0x10030, 0x00061195},
- {0x10030, 0x0006155B},
- {0x10030, 0x00061957},
- {0x10030, 0x00061D1F},
- {0x10030, 0x00062119},
- {0x10030, 0x000624DF},
- {0x10030, 0x000628D9},
- {0x10030, 0x00062C9F},
- {0x10030, 0x00063099},
- {0x10030, 0x0006345F},
- {0x10030, 0x00063859},
- {0x10030, 0x00063C1F},
- {0x10030, 0x00064019},
- {0x10030, 0x00064413},
- {0x10030, 0x000681E1},
- {0x10030, 0x000685DB},
- {0x10030, 0x000689D5},
- {0x10030, 0x00068D9B},
- {0x10030, 0x00069195},
- {0x10030, 0x0006955B},
- {0x10030, 0x00069957},
- {0x10030, 0x00069D1F},
- {0x10030, 0x0006A119},
- {0x10030, 0x0006A4DF},
- {0x10030, 0x0006A8D9},
- {0x10030, 0x0006AC9F},
- {0x10030, 0x0006B099},
- {0x10030, 0x0006B45F},
- {0x10030, 0x0006B859},
- {0x10030, 0x0006BC1F},
- {0x10030, 0x0006C019},
- {0x10030, 0x0006C413},
- {0x10030, 0x000701E1},
- {0x10030, 0x000705DB},
- {0x10030, 0x000709D5},
- {0x10030, 0x00070D9B},
- {0x10030, 0x00071195},
- {0x10030, 0x0007155B},
- {0x10030, 0x00071957},
- {0x10030, 0x00071D1F},
- {0x10030, 0x00072119},
- {0x10030, 0x000724DF},
- {0x10030, 0x000728D9},
- {0x10030, 0x00072C9F},
- {0x10030, 0x00073099},
- {0x10030, 0x0007345F},
- {0x10030, 0x00073859},
- {0x10030, 0x00073C1F},
- {0x10030, 0x00074019},
- {0x10030, 0x00074413},
- {0x10030, 0x000781DF},
- {0x10030, 0x000785D9},
- {0x10030, 0x000789D3},
- {0x10030, 0x00078D99},
- {0x10030, 0x00079193},
- {0x10030, 0x0007955F},
- {0x10030, 0x00079959},
- {0x10030, 0x00079D21},
- {0x10030, 0x0007A115},
- {0x10030, 0x0007A4DF},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007AC9F},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B45F},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC1F},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
- {0x10030, 0x00000000},
- {0x10030, 0x000785A9},
- {0x10030, 0x000789A3},
- {0x10030, 0x00078D9D},
- {0x10030, 0x00079197},
- {0x10030, 0x00079591},
- {0x10030, 0x00079957},
- {0x10030, 0x00079D51},
- {0x10030, 0x0007A119},
- {0x10030, 0x0007A513},
- {0x10030, 0x0007A8D9},
- {0x10030, 0x0007ACD3},
- {0x10030, 0x0007B099},
- {0x10030, 0x0007B493},
- {0x10030, 0x0007B859},
- {0x10030, 0x0007BC53},
- {0x10030, 0x0007C019},
- {0x10030, 0x0007C413},
+ {0x10030, 0x000001EF},
+ {0x10030, 0x000005E9},
+ {0x10030, 0x000009E3},
+ {0x10030, 0x00000DDD},
+ {0x10030, 0x000011D7},
+ {0x10030, 0x0000159F},
+ {0x10030, 0x00001999},
+ {0x10030, 0x00001D5F},
+ {0x10030, 0x00002159},
+ {0x10030, 0x0000251F},
+ {0x10030, 0x00002919},
+ {0x10030, 0x00002CDF},
+ {0x10030, 0x000030D9},
+ {0x10030, 0x0000349F},
+ {0x10030, 0x00003899},
+ {0x10030, 0x00003C5F},
+ {0x10030, 0x00004059},
+ {0x10030, 0x00004453},
+ {0x10030, 0x000201A7},
+ {0x10030, 0x000205A1},
+ {0x10030, 0x0002099B},
+ {0x10030, 0x00020D95},
+ {0x10030, 0x0002115B},
+ {0x10030, 0x00021555},
+ {0x10030, 0x00021921},
+ {0x10030, 0x00021D1B},
+ {0x10030, 0x000220E3},
+ {0x10030, 0x000224DD},
+ {0x10030, 0x000228A3},
+ {0x10030, 0x00022C9D},
+ {0x10030, 0x00023063},
+ {0x10030, 0x0002345D},
+ {0x10030, 0x00023823},
+ {0x10030, 0x00023C1D},
+ {0x10030, 0x00024017},
+ {0x10030, 0x00024411},
+ {0x10030, 0x000281A9},
+ {0x10030, 0x000285A3},
+ {0x10030, 0x0002899D},
+ {0x10030, 0x00028D97},
+ {0x10030, 0x0002915D},
+ {0x10030, 0x00029557},
+ {0x10030, 0x0002991F},
+ {0x10030, 0x00029D19},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DB},
+ {0x10030, 0x0002A8A1},
+ {0x10030, 0x0002AC9B},
+ {0x10030, 0x0002B061},
+ {0x10030, 0x0002B45B},
+ {0x10030, 0x0002B821},
+ {0x10030, 0x0002BC1B},
+ {0x10030, 0x0002C015},
+ {0x10030, 0x0002C40F},
+ {0x10030, 0x000301A9},
+ {0x10030, 0x000305A3},
+ {0x10030, 0x0003099D},
+ {0x10030, 0x00030D97},
+ {0x10030, 0x0003115D},
+ {0x10030, 0x00031557},
+ {0x10030, 0x0003191F},
+ {0x10030, 0x00031D19},
+ {0x10030, 0x000320E1},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328A1},
+ {0x10030, 0x00032C9B},
+ {0x10030, 0x00033061},
+ {0x10030, 0x0003345B},
+ {0x10030, 0x00033821},
+ {0x10030, 0x00033C1B},
+ {0x10030, 0x00034015},
+ {0x10030, 0x0003440F},
+ {0x10030, 0x000601F1},
+ {0x10030, 0x000605E9},
+ {0x10030, 0x000609A9},
+ {0x10030, 0x00060D65},
+ {0x10030, 0x0006115F},
+ {0x10030, 0x00061525},
+ {0x10030, 0x0006191F},
+ {0x10030, 0x00061CE7},
+ {0x10030, 0x000620E1},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628A3},
+ {0x10030, 0x00062C69},
+ {0x10030, 0x00063063},
+ {0x10030, 0x00063429},
+ {0x10030, 0x00063823},
+ {0x10030, 0x00063C1D},
+ {0x10030, 0x00064013},
+ {0x10030, 0x0006440D},
+ {0x10030, 0x000681EF},
+ {0x10030, 0x000685E7},
+ {0x10030, 0x000689A7},
+ {0x10030, 0x00068D61},
+ {0x10030, 0x0006915B},
+ {0x10030, 0x00069523},
+ {0x10030, 0x0006991D},
+ {0x10030, 0x00069CE5},
+ {0x10030, 0x0006A0DF},
+ {0x10030, 0x0006A4A7},
+ {0x10030, 0x0006A8A1},
+ {0x10030, 0x0006AC67},
+ {0x10030, 0x0006B061},
+ {0x10030, 0x0006B429},
+ {0x10030, 0x0006B823},
+ {0x10030, 0x0006BC1D},
+ {0x10030, 0x0006C017},
+ {0x10030, 0x0006C40D},
+ {0x10030, 0x000701F1},
+ {0x10030, 0x000705E9},
+ {0x10030, 0x000709A9},
+ {0x10030, 0x00070D63},
+ {0x10030, 0x0007115D},
+ {0x10030, 0x00071523},
+ {0x10030, 0x0007191D},
+ {0x10030, 0x00071D17},
+ {0x10030, 0x000720DF},
+ {0x10030, 0x000724D9},
+ {0x10030, 0x000728D3},
+ {0x10030, 0x00072C67},
+ {0x10030, 0x00073061},
+ {0x10030, 0x00073427},
+ {0x10030, 0x00073821},
+ {0x10030, 0x00073C1B},
+ {0x10030, 0x00074015},
+ {0x10030, 0x0007440D},
+ {0x10030, 0x000781F1},
+ {0x10030, 0x000785EB},
+ {0x10030, 0x000789E5},
+ {0x10030, 0x00078DA3},
+ {0x10030, 0x00079161},
+ {0x10030, 0x0007955B},
+ {0x10030, 0x00079923},
+ {0x10030, 0x00079D1D},
+ {0x10030, 0x0007A117},
+ {0x10030, 0x0007A4DD},
+ {0x10030, 0x0007A8D7},
+ {0x10030, 0x0007AC9D},
+ {0x10030, 0x0007B063},
+ {0x10030, 0x0007B45D},
+ {0x10030, 0x0007B857},
+ {0x10030, 0x0007BC1D},
+ {0x10030, 0x0007C017},
+ {0x10030, 0x0007C40F},
{0xA0000000, 0x00000000},
{0x10030, 0x000001EF},
{0x10030, 0x000005E9},
@@ -11281,6 +21336,1150 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00004017},
{0x100EE, 0x00000000},
{0x100EE, 0x00002000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x000200E8},
+ {0x10030, 0x000204E5},
+ {0x10030, 0x000208E2},
+ {0x10030, 0x00020CDF},
+ {0x10030, 0x000210DC},
+ {0x10030, 0x000214D9},
+ {0x10030, 0x000218D6},
+ {0x10030, 0x00021CD3},
+ {0x10030, 0x000220D0},
+ {0x10030, 0x000224CD},
+ {0x10030, 0x000228CD},
+ {0x10030, 0x00022CCD},
+ {0x10030, 0x000230CD},
+ {0x10030, 0x000234CD},
+ {0x10030, 0x000238CD},
+ {0x10030, 0x00023CCD},
+ {0x10030, 0x000240CD},
+ {0x10030, 0x000280E8},
+ {0x10030, 0x000284E5},
+ {0x10030, 0x000288E2},
+ {0x10030, 0x00028CDF},
+ {0x10030, 0x000290DC},
+ {0x10030, 0x000294D9},
+ {0x10030, 0x000298D6},
+ {0x10030, 0x00029CD3},
+ {0x10030, 0x0002A0D0},
+ {0x10030, 0x0002A4CD},
+ {0x10030, 0x0002A8CD},
+ {0x10030, 0x0002ACCD},
+ {0x10030, 0x0002B0CD},
+ {0x10030, 0x0002B4CD},
+ {0x10030, 0x0002B8CD},
+ {0x10030, 0x0002BCCD},
+ {0x10030, 0x0002C0CD},
+ {0x10030, 0x000300E8},
+ {0x10030, 0x000304E5},
+ {0x10030, 0x000308E2},
+ {0x10030, 0x00030CDF},
+ {0x10030, 0x000310DC},
+ {0x10030, 0x000314D9},
+ {0x10030, 0x000318D6},
+ {0x10030, 0x00031CD3},
+ {0x10030, 0x000320D0},
+ {0x10030, 0x000324CD},
+ {0x10030, 0x000328CD},
+ {0x10030, 0x00032CCD},
+ {0x10030, 0x000330CD},
+ {0x10030, 0x000334CD},
+ {0x10030, 0x000338CD},
+ {0x10030, 0x00033CCD},
+ {0x10030, 0x000340CD},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x000200FA},
+ {0x10030, 0x000204F7},
+ {0x10030, 0x000208F4},
+ {0x10030, 0x00020CF1},
+ {0x10030, 0x000210EE},
+ {0x10030, 0x000214EB},
+ {0x10030, 0x000218E8},
+ {0x10030, 0x00021CE5},
+ {0x10030, 0x000220E2},
+ {0x10030, 0x000224DF},
+ {0x10030, 0x000228DC},
+ {0x10030, 0x00022CD9},
+ {0x10030, 0x000230D6},
+ {0x10030, 0x000234D3},
+ {0x10030, 0x000238D0},
+ {0x10030, 0x00023C0D},
+ {0x10030, 0x0002400A},
+ {0x10030, 0x000280F9},
+ {0x10030, 0x000284F6},
+ {0x10030, 0x000288F3},
+ {0x10030, 0x00028CF0},
+ {0x10030, 0x000290ED},
+ {0x10030, 0x000294EA},
+ {0x10030, 0x000298E7},
+ {0x10030, 0x00029CE4},
+ {0x10030, 0x0002A0E1},
+ {0x10030, 0x0002A4DE},
+ {0x10030, 0x0002A8DB},
+ {0x10030, 0x0002ACD8},
+ {0x10030, 0x0002B0D5},
+ {0x10030, 0x0002B4D2},
+ {0x10030, 0x0002B8CF},
+ {0x10030, 0x0002BC0C},
+ {0x10030, 0x0002C009},
+ {0x10030, 0x000300F6},
+ {0x10030, 0x000304F3},
+ {0x10030, 0x000308F0},
+ {0x10030, 0x00030CED},
+ {0x10030, 0x000310EA},
+ {0x10030, 0x000314E7},
+ {0x10030, 0x000318E4},
+ {0x10030, 0x00031CE1},
+ {0x10030, 0x000320DE},
+ {0x10030, 0x000324DB},
+ {0x10030, 0x000328D8},
+ {0x10030, 0x00032CD5},
+ {0x10030, 0x000330D2},
+ {0x10030, 0x000334CF},
+ {0x10030, 0x000338CC},
+ {0x10030, 0x00033C09},
+ {0x10030, 0x00034006},
+ {0xB0000000, 0x00000000},
+ {0x10030, 0x000600F6},
+ {0x10030, 0x000604F3},
+ {0x10030, 0x000608F0},
+ {0x10030, 0x00060CED},
+ {0x10030, 0x000610EA},
+ {0x10030, 0x000614E7},
+ {0x10030, 0x000618E4},
+ {0x10030, 0x00061CE1},
+ {0x10030, 0x000620DE},
+ {0x10030, 0x000624DB},
+ {0x10030, 0x000628D8},
+ {0x10030, 0x00062CD5},
+ {0x10030, 0x000630D2},
+ {0x10030, 0x000634CF},
+ {0x10030, 0x000638CC},
+ {0x10030, 0x00063C09},
+ {0x10030, 0x00064006},
+ {0x10030, 0x000680F5},
+ {0x10030, 0x000684F2},
+ {0x10030, 0x000688EF},
+ {0x10030, 0x00068CEC},
+ {0x10030, 0x000690E9},
+ {0x10030, 0x000694E6},
+ {0x10030, 0x000698E3},
+ {0x10030, 0x00069CE0},
+ {0x10030, 0x0006A0DD},
+ {0x10030, 0x0006A4DA},
+ {0x10030, 0x0006A8D7},
+ {0x10030, 0x0006ACD4},
+ {0x10030, 0x0006B0D1},
+ {0x10030, 0x0006B4CE},
+ {0x10030, 0x0006B8CB},
+ {0x10030, 0x0006BC08},
+ {0x10030, 0x0006C005},
+ {0x10030, 0x000700F5},
+ {0x10030, 0x000704F2},
+ {0x10030, 0x000708EF},
+ {0x10030, 0x00070CEC},
+ {0x10030, 0x000710E9},
+ {0x10030, 0x000714E6},
+ {0x10030, 0x000718E3},
+ {0x10030, 0x00071CE0},
+ {0x10030, 0x000720DD},
+ {0x10030, 0x000724DA},
+ {0x10030, 0x000728D7},
+ {0x10030, 0x00072CD4},
+ {0x10030, 0x000730D1},
+ {0x10030, 0x000734CE},
+ {0x10030, 0x000738CB},
+ {0x10030, 0x00073C08},
+ {0x10030, 0x00074005},
{0x10030, 0x000780F4},
{0x10030, 0x000784F1},
{0x10030, 0x000788EE},
@@ -11334,9 +22533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000025},
{0x03F, 0x00008002},
{0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000028},
{0x03F, 0x00050002},
{0x033, 0x00000029},
@@ -11350,9 +22633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000002D},
{0x03F, 0x00008002},
{0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000030},
{0x03F, 0x00050002},
{0x033, 0x00000031},
@@ -11366,9 +22733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000035},
{0x03F, 0x00008002},
{0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000060},
{0x03F, 0x00050002},
{0x033, 0x00000061},
@@ -11382,9 +22833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000065},
{0x03F, 0x00008002},
{0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000068},
{0x03F, 0x00050002},
{0x033, 0x00000069},
@@ -11398,9 +22933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000006D},
{0x03F, 0x00008002},
{0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000070},
{0x03F, 0x00050002},
{0x033, 0x00000071},
@@ -11414,9 +23033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000075},
{0x03F, 0x00008002},
{0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000078},
{0x03F, 0x00050002},
{0x033, 0x00000079},
@@ -11430,9 +23133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000007D},
{0x03F, 0x00008002},
{0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A0},
{0x03F, 0x00050002},
{0x033, 0x000000A1},
@@ -11446,9 +23233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000A5},
{0x03F, 0x00008002},
{0x033, 0x000000A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000A8},
{0x03F, 0x00050002},
{0x033, 0x000000A9},
@@ -11462,9 +23333,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000AD},
{0x03F, 0x00008002},
{0x033, 0x000000AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B0},
{0x03F, 0x00050002},
{0x033, 0x000000B1},
@@ -11478,9 +23433,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000B5},
{0x03F, 0x00008002},
{0x033, 0x000000B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E0},
{0x03F, 0x00050002},
{0x033, 0x000000E1},
@@ -11494,9 +23533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000E5},
{0x03F, 0x00008002},
{0x033, 0x000000E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000E8},
{0x03F, 0x00050002},
{0x033, 0x000000E9},
@@ -11510,9 +23633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000ED},
{0x03F, 0x00008002},
{0x033, 0x000000EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F0},
{0x03F, 0x00050002},
{0x033, 0x000000F1},
@@ -11526,9 +23733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000F5},
{0x03F, 0x00008002},
{0x033, 0x000000F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000F8},
{0x03F, 0x00050002},
{0x033, 0x000000F9},
@@ -11542,9 +23833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000000FD},
{0x03F, 0x00008002},
{0x033, 0x000000FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000000FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000120},
{0x03F, 0x00050002},
{0x033, 0x00000121},
@@ -11558,9 +23933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000125},
{0x03F, 0x00008002},
{0x033, 0x00000126},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000127},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000128},
{0x03F, 0x00050002},
{0x033, 0x00000129},
@@ -11574,9 +24033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000012D},
{0x03F, 0x00008002},
{0x033, 0x0000012E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000012F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000130},
{0x03F, 0x00050002},
{0x033, 0x00000131},
@@ -11590,9 +24133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000135},
{0x03F, 0x00008002},
{0x033, 0x00000136},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000137},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000160},
{0x03F, 0x00050002},
{0x033, 0x00000161},
@@ -11606,9 +24233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000165},
{0x03F, 0x00008002},
{0x033, 0x00000166},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000167},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000168},
{0x03F, 0x00050002},
{0x033, 0x00000169},
@@ -11622,9 +24333,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000016D},
{0x03F, 0x00008002},
{0x033, 0x0000016E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000016F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000170},
{0x03F, 0x00050002},
{0x033, 0x00000171},
@@ -11638,9 +24433,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x00000175},
{0x03F, 0x00008002},
{0x033, 0x00000176},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000177},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x00000178},
{0x03F, 0x00050002},
{0x033, 0x00000179},
@@ -11654,9 +24533,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x0000017D},
{0x03F, 0x00008002},
{0x033, 0x0000017E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x0000017F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A0},
{0x03F, 0x00050002},
{0x033, 0x000001A1},
@@ -11670,9 +24633,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001A5},
{0x03F, 0x00008002},
{0x033, 0x000001A6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001A8},
{0x03F, 0x00050002},
{0x033, 0x000001A9},
@@ -11686,9 +24733,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001AD},
{0x03F, 0x00008002},
{0x033, 0x000001AE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001AF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B0},
{0x03F, 0x00050002},
{0x033, 0x000001B1},
@@ -11702,9 +24833,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001B5},
{0x03F, 0x00008002},
{0x033, 0x000001B6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001B7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E0},
{0x03F, 0x00050002},
{0x033, 0x000001E1},
@@ -11718,9 +24933,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001E5},
{0x03F, 0x00008002},
{0x033, 0x000001E6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001E8},
{0x03F, 0x00050002},
{0x033, 0x000001E9},
@@ -11734,9 +25033,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001ED},
{0x03F, 0x00008002},
{0x033, 0x000001EE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001EF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
{0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F0},
{0x03F, 0x00050002},
{0x033, 0x000001F1},
@@ -11750,9 +25133,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001F5},
{0x03F, 0x00008002},
{0x033, 0x000001F6},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F7},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001F8},
{0x03F, 0x00050002},
{0x033, 0x000001F9},
@@ -11766,9 +25233,93 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x033, 0x000001FD},
{0x03F, 0x00008002},
{0x033, 0x000001FE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x033, 0x000001FF},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
{0x03F, 0x00000003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008002},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000003},
+ {0xB0000000, 0x00000000},
{0x0EF, 0x00000000},
{0x005, 0x00000001},
{0x10005, 0x00000001},
@@ -11810,7 +25361,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00022000},
{0x10030, 0x00023000},
{0x10030, 0x00024000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00025000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00025003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00025000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00026003},
{0x10030, 0x00027003},
{0x10030, 0x00028000},
@@ -11818,7 +25411,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x0002A000},
{0x10030, 0x0002B000},
{0x10030, 0x0002C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0002D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0002D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0002D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0002E003},
{0x10030, 0x0002F003},
{0x10030, 0x00030000},
@@ -11826,7 +25461,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00032000},
{0x10030, 0x00033000},
{0x10030, 0x00034000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00035003},
+ {0xA0000000, 0x00000000},
{0x10030, 0x00035000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00036003},
{0x10030, 0x00037003},
{0x10030, 0x00038000},
@@ -11834,7 +25511,49 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x0003A000},
{0x10030, 0x0003B000},
{0x10030, 0x0003C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0003D000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0003D003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0003D000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0003E003},
{0x10030, 0x0003F003},
{0x10030, 0x00060000},
@@ -11842,32 +25561,280 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x10030, 0x00062000},
{0x10030, 0x00063000},
{0x10030, 0x00064000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00065000},
{0x10030, 0x00066000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00065003},
+ {0x10030, 0x00066003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00065000},
+ {0x10030, 0x00066000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00067003},
{0x10030, 0x00068000},
{0x10030, 0x00069000},
{0x10030, 0x0006A000},
{0x10030, 0x0006B000},
{0x10030, 0x0006C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0006D000},
{0x10030, 0x0006E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0006D003},
+ {0x10030, 0x0006E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0006D000},
+ {0x10030, 0x0006E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0006F003},
{0x10030, 0x00070000},
{0x10030, 0x00071000},
{0x10030, 0x00072000},
{0x10030, 0x00073000},
{0x10030, 0x00074000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x00075000},
{0x10030, 0x00076000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x00075003},
+ {0x10030, 0x00076003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x00075000},
+ {0x10030, 0x00076000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x00077003},
{0x10030, 0x00078000},
{0x10030, 0x00079000},
{0x10030, 0x0007A000},
{0x10030, 0x0007B000},
{0x10030, 0x0007C000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90020000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90320000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90330000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90340000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90350000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0x90360000, 0x00000000}, {0x40000000, 0x00000000},
{0x10030, 0x0007D000},
{0x10030, 0x0007E000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90040001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90050001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90070001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x903f0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0x90400001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x10030, 0x0007D003},
+ {0x10030, 0x0007E003},
+ {0xA0000000, 0x00000000},
+ {0x10030, 0x0007D000},
+ {0x10030, 0x0007E000},
+ {0xB0000000, 0x00000000},
{0x10030, 0x0007F003},
{0x0ED, 0x00000010},
{0x033, 0x00000001},
@@ -11884,7 +25851,7 @@ static const struct rtw89_reg2_def rtw89_8852c_phy_radiob_regs[] = {
{0x03F, 0x0000000A},
{0x0ED, 0x00000000},
{0x100EE, 0x00000000},
- {0x0FE, 0x00000031},
+ {0x0FE, 0x00000048},
};
static const struct rtw89_reg2_def rtw89_8852c_phy_nctl_regs[] = {
@@ -13825,1207 +27792,1722 @@ static const s8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
const u8 rtw89_8852c_tx_shape[RTW89_BAND_MAX][RTW89_RS_TX_SHAPE_NUM]
[RTW89_REGD_NUM] = {
[0][0][RTW89_ACMA] = 0,
+ [0][0][RTW89_CN] = 0,
[0][0][RTW89_ETSI] = 0,
[0][0][RTW89_FCC] = 1,
[0][0][RTW89_IC] = 1,
+ [0][0][RTW89_KCC] = 0,
[0][0][RTW89_MKK] = 0,
+ [0][0][RTW89_UK] = 0,
[0][1][RTW89_ACMA] = 0,
+ [0][1][RTW89_CN] = 0,
[0][1][RTW89_ETSI] = 0,
[0][1][RTW89_FCC] = 3,
[0][1][RTW89_IC] = 3,
+ [0][1][RTW89_KCC] = 0,
[0][1][RTW89_MKK] = 0,
+ [0][1][RTW89_UK] = 0,
[1][1][RTW89_ACMA] = 0,
+ [1][1][RTW89_CN] = 0,
[1][1][RTW89_ETSI] = 0,
[1][1][RTW89_FCC] = 3,
[1][1][RTW89_IC] = 3,
+ [1][1][RTW89_KCC] = 0,
[1][1][RTW89_MKK] = 0,
- [2][1][RTW89_FCC] = 1,
+ [1][1][RTW89_UK] = 0,
+ [2][1][RTW89_ETSI] = 0,
+ [2][1][RTW89_FCC] = 0,
+ [2][1][RTW89_KCC] = 0,
};
const s8 rtw89_8852c_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
- [0][0][0][0][RTW89_WW][0] = 60,
- [0][0][0][0][RTW89_WW][1] = 60,
- [0][0][0][0][RTW89_WW][2] = 60,
- [0][0][0][0][RTW89_WW][3] = 60,
- [0][0][0][0][RTW89_WW][4] = 60,
- [0][0][0][0][RTW89_WW][5] = 60,
- [0][0][0][0][RTW89_WW][6] = 60,
- [0][0][0][0][RTW89_WW][7] = 60,
- [0][0][0][0][RTW89_WW][8] = 60,
- [0][0][0][0][RTW89_WW][9] = 60,
- [0][0][0][0][RTW89_WW][10] = 60,
- [0][0][0][0][RTW89_WW][11] = 60,
- [0][0][0][0][RTW89_WW][12] = 48,
+ [0][0][0][0][RTW89_WW][0] = 58,
+ [0][0][0][0][RTW89_WW][1] = 58,
+ [0][0][0][0][RTW89_WW][2] = 58,
+ [0][0][0][0][RTW89_WW][3] = 58,
+ [0][0][0][0][RTW89_WW][4] = 58,
+ [0][0][0][0][RTW89_WW][5] = 58,
+ [0][0][0][0][RTW89_WW][6] = 58,
+ [0][0][0][0][RTW89_WW][7] = 58,
+ [0][0][0][0][RTW89_WW][8] = 58,
+ [0][0][0][0][RTW89_WW][9] = 58,
+ [0][0][0][0][RTW89_WW][10] = 58,
+ [0][0][0][0][RTW89_WW][11] = 58,
+ [0][0][0][0][RTW89_WW][12] = 46,
[0][0][0][0][RTW89_WW][13] = 72,
- [0][1][0][0][RTW89_WW][0] = 48,
- [0][1][0][0][RTW89_WW][1] = 48,
- [0][1][0][0][RTW89_WW][2] = 48,
- [0][1][0][0][RTW89_WW][3] = 48,
- [0][1][0][0][RTW89_WW][4] = 48,
- [0][1][0][0][RTW89_WW][5] = 48,
- [0][1][0][0][RTW89_WW][6] = 48,
- [0][1][0][0][RTW89_WW][7] = 48,
- [0][1][0][0][RTW89_WW][8] = 48,
- [0][1][0][0][RTW89_WW][9] = 48,
- [0][1][0][0][RTW89_WW][10] = 48,
- [0][1][0][0][RTW89_WW][11] = 46,
- [0][1][0][0][RTW89_WW][12] = 34,
+ [0][1][0][0][RTW89_WW][0] = 42,
+ [0][1][0][0][RTW89_WW][1] = 42,
+ [0][1][0][0][RTW89_WW][2] = 42,
+ [0][1][0][0][RTW89_WW][3] = 42,
+ [0][1][0][0][RTW89_WW][4] = 42,
+ [0][1][0][0][RTW89_WW][5] = 42,
+ [0][1][0][0][RTW89_WW][6] = 42,
+ [0][1][0][0][RTW89_WW][7] = 42,
+ [0][1][0][0][RTW89_WW][8] = 42,
+ [0][1][0][0][RTW89_WW][9] = 42,
+ [0][1][0][0][RTW89_WW][10] = 42,
+ [0][1][0][0][RTW89_WW][11] = 42,
+ [0][1][0][0][RTW89_WW][12] = 18,
[0][1][0][0][RTW89_WW][13] = 60,
[1][0][0][0][RTW89_WW][0] = 0,
[1][0][0][0][RTW89_WW][1] = 0,
- [1][0][0][0][RTW89_WW][2] = 42,
- [1][0][0][0][RTW89_WW][3] = 42,
- [1][0][0][0][RTW89_WW][4] = 42,
+ [1][0][0][0][RTW89_WW][2] = 44,
+ [1][0][0][0][RTW89_WW][3] = 58,
+ [1][0][0][0][RTW89_WW][4] = 58,
[1][0][0][0][RTW89_WW][5] = 58,
- [1][0][0][0][RTW89_WW][6] = 42,
- [1][0][0][0][RTW89_WW][7] = 42,
- [1][0][0][0][RTW89_WW][8] = 42,
- [1][0][0][0][RTW89_WW][9] = 34,
- [1][0][0][0][RTW89_WW][10] = 22,
+ [1][0][0][0][RTW89_WW][6] = 46,
+ [1][0][0][0][RTW89_WW][7] = 46,
+ [1][0][0][0][RTW89_WW][8] = 28,
+ [1][0][0][0][RTW89_WW][9] = 26,
+ [1][0][0][0][RTW89_WW][10] = 26,
[1][0][0][0][RTW89_WW][11] = 0,
[1][0][0][0][RTW89_WW][12] = 0,
[1][0][0][0][RTW89_WW][13] = 0,
[1][1][0][0][RTW89_WW][0] = 0,
[1][1][0][0][RTW89_WW][1] = 0,
- [1][1][0][0][RTW89_WW][2] = 38,
- [1][1][0][0][RTW89_WW][3] = 38,
- [1][1][0][0][RTW89_WW][4] = 38,
- [1][1][0][0][RTW89_WW][5] = 48,
- [1][1][0][0][RTW89_WW][6] = 26,
- [1][1][0][0][RTW89_WW][7] = 26,
- [1][1][0][0][RTW89_WW][8] = 26,
- [1][1][0][0][RTW89_WW][9] = 22,
- [1][1][0][0][RTW89_WW][10] = 22,
+ [1][1][0][0][RTW89_WW][2] = 46,
+ [1][1][0][0][RTW89_WW][3] = 46,
+ [1][1][0][0][RTW89_WW][4] = 46,
+ [1][1][0][0][RTW89_WW][5] = 46,
+ [1][1][0][0][RTW89_WW][6] = 40,
+ [1][1][0][0][RTW89_WW][7] = 40,
+ [1][1][0][0][RTW89_WW][8] = 14,
+ [1][1][0][0][RTW89_WW][9] = 14,
+ [1][1][0][0][RTW89_WW][10] = 12,
[1][1][0][0][RTW89_WW][11] = 0,
[1][1][0][0][RTW89_WW][12] = 0,
[1][1][0][0][RTW89_WW][13] = 0,
- [0][0][1][0][RTW89_WW][0] = 60,
- [0][0][1][0][RTW89_WW][1] = 60,
- [0][0][1][0][RTW89_WW][2] = 60,
- [0][0][1][0][RTW89_WW][3] = 60,
- [0][0][1][0][RTW89_WW][4] = 60,
- [0][0][1][0][RTW89_WW][5] = 60,
- [0][0][1][0][RTW89_WW][6] = 60,
- [0][0][1][0][RTW89_WW][7] = 60,
- [0][0][1][0][RTW89_WW][8] = 60,
- [0][0][1][0][RTW89_WW][9] = 60,
- [0][0][1][0][RTW89_WW][10] = 60,
- [0][0][1][0][RTW89_WW][11] = 46,
- [0][0][1][0][RTW89_WW][12] = 42,
+ [0][0][1][0][RTW89_WW][0] = 58,
+ [0][0][1][0][RTW89_WW][1] = 58,
+ [0][0][1][0][RTW89_WW][2] = 58,
+ [0][0][1][0][RTW89_WW][3] = 58,
+ [0][0][1][0][RTW89_WW][4] = 58,
+ [0][0][1][0][RTW89_WW][5] = 58,
+ [0][0][1][0][RTW89_WW][6] = 58,
+ [0][0][1][0][RTW89_WW][7] = 58,
+ [0][0][1][0][RTW89_WW][8] = 58,
+ [0][0][1][0][RTW89_WW][9] = 58,
+ [0][0][1][0][RTW89_WW][10] = 58,
+ [0][0][1][0][RTW89_WW][11] = 58,
+ [0][0][1][0][RTW89_WW][12] = 58,
[0][0][1][0][RTW89_WW][13] = 0,
- [0][1][1][0][RTW89_WW][0] = 48,
- [0][1][1][0][RTW89_WW][1] = 48,
- [0][1][1][0][RTW89_WW][2] = 48,
- [0][1][1][0][RTW89_WW][3] = 48,
- [0][1][1][0][RTW89_WW][4] = 48,
- [0][1][1][0][RTW89_WW][5] = 48,
- [0][1][1][0][RTW89_WW][6] = 48,
- [0][1][1][0][RTW89_WW][7] = 48,
- [0][1][1][0][RTW89_WW][8] = 48,
- [0][1][1][0][RTW89_WW][9] = 48,
- [0][1][1][0][RTW89_WW][10] = 48,
- [0][1][1][0][RTW89_WW][11] = 38,
- [0][1][1][0][RTW89_WW][12] = 34,
+ [0][1][1][0][RTW89_WW][0] = 46,
+ [0][1][1][0][RTW89_WW][1] = 46,
+ [0][1][1][0][RTW89_WW][2] = 46,
+ [0][1][1][0][RTW89_WW][3] = 46,
+ [0][1][1][0][RTW89_WW][4] = 46,
+ [0][1][1][0][RTW89_WW][5] = 46,
+ [0][1][1][0][RTW89_WW][6] = 46,
+ [0][1][1][0][RTW89_WW][7] = 46,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][9] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][11] = 46,
+ [0][1][1][0][RTW89_WW][12] = 36,
[0][1][1][0][RTW89_WW][13] = 0,
- [0][0][2][0][RTW89_WW][0] = 60,
- [0][0][2][0][RTW89_WW][1] = 60,
- [0][0][2][0][RTW89_WW][2] = 60,
- [0][0][2][0][RTW89_WW][3] = 60,
- [0][0][2][0][RTW89_WW][4] = 60,
- [0][0][2][0][RTW89_WW][5] = 60,
- [0][0][2][0][RTW89_WW][6] = 60,
- [0][0][2][0][RTW89_WW][7] = 60,
- [0][0][2][0][RTW89_WW][8] = 60,
- [0][0][2][0][RTW89_WW][9] = 60,
- [0][0][2][0][RTW89_WW][10] = 60,
- [0][0][2][0][RTW89_WW][11] = 46,
- [0][0][2][0][RTW89_WW][12] = 42,
+ [0][0][2][0][RTW89_WW][0] = 58,
+ [0][0][2][0][RTW89_WW][1] = 58,
+ [0][0][2][0][RTW89_WW][2] = 58,
+ [0][0][2][0][RTW89_WW][3] = 58,
+ [0][0][2][0][RTW89_WW][4] = 58,
+ [0][0][2][0][RTW89_WW][5] = 58,
+ [0][0][2][0][RTW89_WW][6] = 58,
+ [0][0][2][0][RTW89_WW][7] = 58,
+ [0][0][2][0][RTW89_WW][8] = 58,
+ [0][0][2][0][RTW89_WW][9] = 58,
+ [0][0][2][0][RTW89_WW][10] = 58,
+ [0][0][2][0][RTW89_WW][11] = 58,
+ [0][0][2][0][RTW89_WW][12] = 38,
[0][0][2][0][RTW89_WW][13] = 0,
- [0][1][2][0][RTW89_WW][0] = 48,
- [0][1][2][0][RTW89_WW][1] = 48,
- [0][1][2][0][RTW89_WW][2] = 48,
- [0][1][2][0][RTW89_WW][3] = 48,
- [0][1][2][0][RTW89_WW][4] = 48,
- [0][1][2][0][RTW89_WW][5] = 48,
- [0][1][2][0][RTW89_WW][6] = 48,
- [0][1][2][0][RTW89_WW][7] = 48,
- [0][1][2][0][RTW89_WW][8] = 48,
- [0][1][2][0][RTW89_WW][9] = 48,
- [0][1][2][0][RTW89_WW][10] = 48,
- [0][1][2][0][RTW89_WW][11] = 38,
- [0][1][2][0][RTW89_WW][12] = 34,
+ [0][1][2][0][RTW89_WW][0] = 46,
+ [0][1][2][0][RTW89_WW][1] = 46,
+ [0][1][2][0][RTW89_WW][2] = 46,
+ [0][1][2][0][RTW89_WW][3] = 46,
+ [0][1][2][0][RTW89_WW][4] = 46,
+ [0][1][2][0][RTW89_WW][5] = 46,
+ [0][1][2][0][RTW89_WW][6] = 46,
+ [0][1][2][0][RTW89_WW][7] = 46,
+ [0][1][2][0][RTW89_WW][8] = 46,
+ [0][1][2][0][RTW89_WW][9] = 46,
+ [0][1][2][0][RTW89_WW][10] = 46,
+ [0][1][2][0][RTW89_WW][11] = 46,
+ [0][1][2][0][RTW89_WW][12] = 16,
[0][1][2][0][RTW89_WW][13] = 0,
[0][1][2][1][RTW89_WW][0] = 36,
- [0][1][2][1][RTW89_WW][1] = 36,
- [0][1][2][1][RTW89_WW][2] = 36,
- [0][1][2][1][RTW89_WW][3] = 36,
- [0][1][2][1][RTW89_WW][4] = 36,
- [0][1][2][1][RTW89_WW][5] = 36,
- [0][1][2][1][RTW89_WW][6] = 36,
- [0][1][2][1][RTW89_WW][7] = 36,
- [0][1][2][1][RTW89_WW][8] = 36,
- [0][1][2][1][RTW89_WW][9] = 36,
- [0][1][2][1][RTW89_WW][10] = 36,
- [0][1][2][1][RTW89_WW][11] = 36,
- [0][1][2][1][RTW89_WW][12] = 34,
+ [0][1][2][1][RTW89_WW][1] = 34,
+ [0][1][2][1][RTW89_WW][2] = 34,
+ [0][1][2][1][RTW89_WW][3] = 34,
+ [0][1][2][1][RTW89_WW][4] = 34,
+ [0][1][2][1][RTW89_WW][5] = 34,
+ [0][1][2][1][RTW89_WW][6] = 34,
+ [0][1][2][1][RTW89_WW][7] = 34,
+ [0][1][2][1][RTW89_WW][8] = 34,
+ [0][1][2][1][RTW89_WW][9] = 34,
+ [0][1][2][1][RTW89_WW][10] = 34,
+ [0][1][2][1][RTW89_WW][11] = 34,
+ [0][1][2][1][RTW89_WW][12] = 16,
[0][1][2][1][RTW89_WW][13] = 0,
[1][0][2][0][RTW89_WW][0] = 0,
[1][0][2][0][RTW89_WW][1] = 0,
- [1][0][2][0][RTW89_WW][2] = 60,
- [1][0][2][0][RTW89_WW][3] = 60,
- [1][0][2][0][RTW89_WW][4] = 60,
- [1][0][2][0][RTW89_WW][5] = 60,
- [1][0][2][0][RTW89_WW][6] = 60,
- [1][0][2][0][RTW89_WW][7] = 60,
- [1][0][2][0][RTW89_WW][8] = 60,
- [1][0][2][0][RTW89_WW][9] = 60,
- [1][0][2][0][RTW89_WW][10] = 58,
+ [1][0][2][0][RTW89_WW][2] = 58,
+ [1][0][2][0][RTW89_WW][3] = 58,
+ [1][0][2][0][RTW89_WW][4] = 58,
+ [1][0][2][0][RTW89_WW][5] = 58,
+ [1][0][2][0][RTW89_WW][6] = 58,
+ [1][0][2][0][RTW89_WW][7] = 58,
+ [1][0][2][0][RTW89_WW][8] = 58,
+ [1][0][2][0][RTW89_WW][9] = 58,
+ [1][0][2][0][RTW89_WW][10] = 56,
[1][0][2][0][RTW89_WW][11] = 0,
[1][0][2][0][RTW89_WW][12] = 0,
[1][0][2][0][RTW89_WW][13] = 0,
[1][1][2][0][RTW89_WW][0] = 0,
[1][1][2][0][RTW89_WW][1] = 0,
- [1][1][2][0][RTW89_WW][2] = 46,
- [1][1][2][0][RTW89_WW][3] = 46,
- [1][1][2][0][RTW89_WW][4] = 48,
- [1][1][2][0][RTW89_WW][5] = 48,
- [1][1][2][0][RTW89_WW][6] = 48,
- [1][1][2][0][RTW89_WW][7] = 46,
- [1][1][2][0][RTW89_WW][8] = 46,
+ [1][1][2][0][RTW89_WW][2] = 34,
+ [1][1][2][0][RTW89_WW][3] = 34,
+ [1][1][2][0][RTW89_WW][4] = 34,
+ [1][1][2][0][RTW89_WW][5] = 34,
+ [1][1][2][0][RTW89_WW][6] = 34,
+ [1][1][2][0][RTW89_WW][7] = 34,
+ [1][1][2][0][RTW89_WW][8] = 34,
[1][1][2][0][RTW89_WW][9] = 34,
- [1][1][2][0][RTW89_WW][10] = 30,
+ [1][1][2][0][RTW89_WW][10] = 34,
[1][1][2][0][RTW89_WW][11] = 0,
[1][1][2][0][RTW89_WW][12] = 0,
[1][1][2][0][RTW89_WW][13] = 0,
[1][1][2][1][RTW89_WW][0] = 0,
[1][1][2][1][RTW89_WW][1] = 0,
- [1][1][2][1][RTW89_WW][2] = 36,
- [1][1][2][1][RTW89_WW][3] = 36,
- [1][1][2][1][RTW89_WW][4] = 36,
- [1][1][2][1][RTW89_WW][5] = 36,
- [1][1][2][1][RTW89_WW][6] = 36,
- [1][1][2][1][RTW89_WW][7] = 36,
- [1][1][2][1][RTW89_WW][8] = 36,
+ [1][1][2][1][RTW89_WW][2] = 34,
+ [1][1][2][1][RTW89_WW][3] = 34,
+ [1][1][2][1][RTW89_WW][4] = 34,
+ [1][1][2][1][RTW89_WW][5] = 34,
+ [1][1][2][1][RTW89_WW][6] = 34,
+ [1][1][2][1][RTW89_WW][7] = 34,
+ [1][1][2][1][RTW89_WW][8] = 34,
[1][1][2][1][RTW89_WW][9] = 34,
- [1][1][2][1][RTW89_WW][10] = 30,
+ [1][1][2][1][RTW89_WW][10] = 36,
[1][1][2][1][RTW89_WW][11] = 0,
[1][1][2][1][RTW89_WW][12] = 0,
[1][1][2][1][RTW89_WW][13] = 0,
- [0][0][0][0][RTW89_FCC][0] = 70,
+ [0][0][0][0][RTW89_FCC][0] = 76,
[0][0][0][0][RTW89_ETSI][0] = 60,
[0][0][0][0][RTW89_MKK][0] = 68,
- [0][0][0][0][RTW89_IC][0] = 74,
+ [0][0][0][0][RTW89_IC][0] = 76,
+ [0][0][0][0][RTW89_KCC][0] = 68,
[0][0][0][0][RTW89_ACMA][0] = 60,
- [0][0][0][0][RTW89_FCC][1] = 70,
+ [0][0][0][0][RTW89_CN][0] = 58,
+ [0][0][0][0][RTW89_UK][0] = 60,
+ [0][0][0][0][RTW89_FCC][1] = 76,
[0][0][0][0][RTW89_ETSI][1] = 60,
[0][0][0][0][RTW89_MKK][1] = 68,
- [0][0][0][0][RTW89_IC][1] = 74,
+ [0][0][0][0][RTW89_IC][1] = 76,
+ [0][0][0][0][RTW89_KCC][1] = 68,
[0][0][0][0][RTW89_ACMA][1] = 60,
- [0][0][0][0][RTW89_FCC][2] = 70,
+ [0][0][0][0][RTW89_CN][1] = 58,
+ [0][0][0][0][RTW89_UK][1] = 60,
+ [0][0][0][0][RTW89_FCC][2] = 76,
[0][0][0][0][RTW89_ETSI][2] = 60,
[0][0][0][0][RTW89_MKK][2] = 68,
- [0][0][0][0][RTW89_IC][2] = 74,
+ [0][0][0][0][RTW89_IC][2] = 76,
+ [0][0][0][0][RTW89_KCC][2] = 68,
[0][0][0][0][RTW89_ACMA][2] = 60,
- [0][0][0][0][RTW89_FCC][3] = 70,
+ [0][0][0][0][RTW89_CN][2] = 58,
+ [0][0][0][0][RTW89_UK][2] = 60,
+ [0][0][0][0][RTW89_FCC][3] = 76,
[0][0][0][0][RTW89_ETSI][3] = 60,
[0][0][0][0][RTW89_MKK][3] = 68,
- [0][0][0][0][RTW89_IC][3] = 74,
+ [0][0][0][0][RTW89_IC][3] = 76,
+ [0][0][0][0][RTW89_KCC][3] = 68,
[0][0][0][0][RTW89_ACMA][3] = 60,
- [0][0][0][0][RTW89_FCC][4] = 70,
+ [0][0][0][0][RTW89_CN][3] = 58,
+ [0][0][0][0][RTW89_UK][3] = 60,
+ [0][0][0][0][RTW89_FCC][4] = 76,
[0][0][0][0][RTW89_ETSI][4] = 60,
[0][0][0][0][RTW89_MKK][4] = 68,
- [0][0][0][0][RTW89_IC][4] = 74,
+ [0][0][0][0][RTW89_IC][4] = 76,
+ [0][0][0][0][RTW89_KCC][4] = 68,
[0][0][0][0][RTW89_ACMA][4] = 60,
- [0][0][0][0][RTW89_FCC][5] = 70,
+ [0][0][0][0][RTW89_CN][4] = 58,
+ [0][0][0][0][RTW89_UK][4] = 60,
+ [0][0][0][0][RTW89_FCC][5] = 76,
[0][0][0][0][RTW89_ETSI][5] = 60,
[0][0][0][0][RTW89_MKK][5] = 68,
- [0][0][0][0][RTW89_IC][5] = 74,
+ [0][0][0][0][RTW89_IC][5] = 76,
+ [0][0][0][0][RTW89_KCC][5] = 68,
[0][0][0][0][RTW89_ACMA][5] = 60,
- [0][0][0][0][RTW89_FCC][6] = 70,
+ [0][0][0][0][RTW89_CN][5] = 58,
+ [0][0][0][0][RTW89_UK][5] = 60,
+ [0][0][0][0][RTW89_FCC][6] = 76,
[0][0][0][0][RTW89_ETSI][6] = 60,
[0][0][0][0][RTW89_MKK][6] = 68,
- [0][0][0][0][RTW89_IC][6] = 74,
+ [0][0][0][0][RTW89_IC][6] = 76,
+ [0][0][0][0][RTW89_KCC][6] = 68,
[0][0][0][0][RTW89_ACMA][6] = 60,
- [0][0][0][0][RTW89_FCC][7] = 70,
+ [0][0][0][0][RTW89_CN][6] = 58,
+ [0][0][0][0][RTW89_UK][6] = 60,
+ [0][0][0][0][RTW89_FCC][7] = 76,
[0][0][0][0][RTW89_ETSI][7] = 60,
[0][0][0][0][RTW89_MKK][7] = 68,
- [0][0][0][0][RTW89_IC][7] = 74,
+ [0][0][0][0][RTW89_IC][7] = 76,
+ [0][0][0][0][RTW89_KCC][7] = 68,
[0][0][0][0][RTW89_ACMA][7] = 60,
- [0][0][0][0][RTW89_FCC][8] = 70,
+ [0][0][0][0][RTW89_CN][7] = 58,
+ [0][0][0][0][RTW89_UK][7] = 60,
+ [0][0][0][0][RTW89_FCC][8] = 76,
[0][0][0][0][RTW89_ETSI][8] = 60,
[0][0][0][0][RTW89_MKK][8] = 68,
- [0][0][0][0][RTW89_IC][8] = 74,
+ [0][0][0][0][RTW89_IC][8] = 76,
+ [0][0][0][0][RTW89_KCC][8] = 68,
[0][0][0][0][RTW89_ACMA][8] = 60,
- [0][0][0][0][RTW89_FCC][9] = 70,
+ [0][0][0][0][RTW89_CN][8] = 58,
+ [0][0][0][0][RTW89_UK][8] = 60,
+ [0][0][0][0][RTW89_FCC][9] = 76,
[0][0][0][0][RTW89_ETSI][9] = 60,
[0][0][0][0][RTW89_MKK][9] = 68,
- [0][0][0][0][RTW89_IC][9] = 74,
+ [0][0][0][0][RTW89_IC][9] = 76,
+ [0][0][0][0][RTW89_KCC][9] = 70,
[0][0][0][0][RTW89_ACMA][9] = 60,
- [0][0][0][0][RTW89_FCC][10] = 70,
+ [0][0][0][0][RTW89_CN][9] = 58,
+ [0][0][0][0][RTW89_UK][9] = 60,
+ [0][0][0][0][RTW89_FCC][10] = 76,
[0][0][0][0][RTW89_ETSI][10] = 60,
[0][0][0][0][RTW89_MKK][10] = 68,
- [0][0][0][0][RTW89_IC][10] = 74,
+ [0][0][0][0][RTW89_IC][10] = 76,
+ [0][0][0][0][RTW89_KCC][10] = 70,
[0][0][0][0][RTW89_ACMA][10] = 60,
- [0][0][0][0][RTW89_FCC][11] = 62,
+ [0][0][0][0][RTW89_CN][10] = 58,
+ [0][0][0][0][RTW89_UK][10] = 60,
+ [0][0][0][0][RTW89_FCC][11] = 58,
[0][0][0][0][RTW89_ETSI][11] = 60,
[0][0][0][0][RTW89_MKK][11] = 68,
- [0][0][0][0][RTW89_IC][11] = 72,
+ [0][0][0][0][RTW89_IC][11] = 58,
+ [0][0][0][0][RTW89_KCC][11] = 70,
[0][0][0][0][RTW89_ACMA][11] = 60,
- [0][0][0][0][RTW89_FCC][12] = 48,
+ [0][0][0][0][RTW89_CN][11] = 58,
+ [0][0][0][0][RTW89_UK][11] = 60,
+ [0][0][0][0][RTW89_FCC][12] = 46,
[0][0][0][0][RTW89_ETSI][12] = 60,
[0][0][0][0][RTW89_MKK][12] = 68,
- [0][0][0][0][RTW89_IC][12] = 58,
+ [0][0][0][0][RTW89_IC][12] = 46,
+ [0][0][0][0][RTW89_KCC][12] = 70,
[0][0][0][0][RTW89_ACMA][12] = 60,
+ [0][0][0][0][RTW89_CN][12] = 58,
+ [0][0][0][0][RTW89_UK][12] = 60,
[0][0][0][0][RTW89_FCC][13] = 127,
[0][0][0][0][RTW89_ETSI][13] = 127,
[0][0][0][0][RTW89_MKK][13] = 72,
[0][0][0][0][RTW89_IC][13] = 127,
+ [0][0][0][0][RTW89_KCC][13] = 127,
[0][0][0][0][RTW89_ACMA][13] = 127,
- [0][1][0][0][RTW89_FCC][0] = 66,
+ [0][0][0][0][RTW89_CN][13] = 127,
+ [0][0][0][0][RTW89_UK][13] = 127,
+ [0][1][0][0][RTW89_FCC][0] = 76,
[0][1][0][0][RTW89_ETSI][0] = 48,
[0][1][0][0][RTW89_MKK][0] = 58,
- [0][1][0][0][RTW89_IC][0] = 74,
+ [0][1][0][0][RTW89_IC][0] = 76,
+ [0][1][0][0][RTW89_KCC][0] = 56,
[0][1][0][0][RTW89_ACMA][0] = 48,
- [0][1][0][0][RTW89_FCC][1] = 66,
+ [0][1][0][0][RTW89_CN][0] = 42,
+ [0][1][0][0][RTW89_UK][0] = 48,
+ [0][1][0][0][RTW89_FCC][1] = 76,
[0][1][0][0][RTW89_ETSI][1] = 48,
[0][1][0][0][RTW89_MKK][1] = 58,
- [0][1][0][0][RTW89_IC][1] = 74,
+ [0][1][0][0][RTW89_IC][1] = 76,
+ [0][1][0][0][RTW89_KCC][1] = 56,
[0][1][0][0][RTW89_ACMA][1] = 48,
- [0][1][0][0][RTW89_FCC][2] = 66,
+ [0][1][0][0][RTW89_CN][1] = 42,
+ [0][1][0][0][RTW89_UK][1] = 48,
+ [0][1][0][0][RTW89_FCC][2] = 76,
[0][1][0][0][RTW89_ETSI][2] = 48,
[0][1][0][0][RTW89_MKK][2] = 58,
- [0][1][0][0][RTW89_IC][2] = 74,
+ [0][1][0][0][RTW89_IC][2] = 76,
+ [0][1][0][0][RTW89_KCC][2] = 56,
[0][1][0][0][RTW89_ACMA][2] = 48,
- [0][1][0][0][RTW89_FCC][3] = 66,
+ [0][1][0][0][RTW89_CN][2] = 42,
+ [0][1][0][0][RTW89_UK][2] = 48,
+ [0][1][0][0][RTW89_FCC][3] = 76,
[0][1][0][0][RTW89_ETSI][3] = 48,
[0][1][0][0][RTW89_MKK][3] = 58,
- [0][1][0][0][RTW89_IC][3] = 74,
+ [0][1][0][0][RTW89_IC][3] = 76,
+ [0][1][0][0][RTW89_KCC][3] = 56,
[0][1][0][0][RTW89_ACMA][3] = 48,
- [0][1][0][0][RTW89_FCC][4] = 66,
+ [0][1][0][0][RTW89_CN][3] = 42,
+ [0][1][0][0][RTW89_UK][3] = 48,
+ [0][1][0][0][RTW89_FCC][4] = 76,
[0][1][0][0][RTW89_ETSI][4] = 48,
[0][1][0][0][RTW89_MKK][4] = 58,
- [0][1][0][0][RTW89_IC][4] = 74,
+ [0][1][0][0][RTW89_IC][4] = 76,
+ [0][1][0][0][RTW89_KCC][4] = 56,
[0][1][0][0][RTW89_ACMA][4] = 48,
- [0][1][0][0][RTW89_FCC][5] = 66,
+ [0][1][0][0][RTW89_CN][4] = 42,
+ [0][1][0][0][RTW89_UK][4] = 48,
+ [0][1][0][0][RTW89_FCC][5] = 76,
[0][1][0][0][RTW89_ETSI][5] = 48,
[0][1][0][0][RTW89_MKK][5] = 58,
- [0][1][0][0][RTW89_IC][5] = 74,
+ [0][1][0][0][RTW89_IC][5] = 76,
+ [0][1][0][0][RTW89_KCC][5] = 56,
[0][1][0][0][RTW89_ACMA][5] = 48,
- [0][1][0][0][RTW89_FCC][6] = 66,
+ [0][1][0][0][RTW89_CN][5] = 42,
+ [0][1][0][0][RTW89_UK][5] = 48,
+ [0][1][0][0][RTW89_FCC][6] = 76,
[0][1][0][0][RTW89_ETSI][6] = 48,
[0][1][0][0][RTW89_MKK][6] = 58,
- [0][1][0][0][RTW89_IC][6] = 74,
+ [0][1][0][0][RTW89_IC][6] = 76,
+ [0][1][0][0][RTW89_KCC][6] = 56,
[0][1][0][0][RTW89_ACMA][6] = 48,
- [0][1][0][0][RTW89_FCC][7] = 66,
+ [0][1][0][0][RTW89_CN][6] = 42,
+ [0][1][0][0][RTW89_UK][6] = 48,
+ [0][1][0][0][RTW89_FCC][7] = 76,
[0][1][0][0][RTW89_ETSI][7] = 48,
[0][1][0][0][RTW89_MKK][7] = 58,
- [0][1][0][0][RTW89_IC][7] = 74,
+ [0][1][0][0][RTW89_IC][7] = 76,
+ [0][1][0][0][RTW89_KCC][7] = 56,
[0][1][0][0][RTW89_ACMA][7] = 48,
- [0][1][0][0][RTW89_FCC][8] = 66,
+ [0][1][0][0][RTW89_CN][7] = 42,
+ [0][1][0][0][RTW89_UK][7] = 48,
+ [0][1][0][0][RTW89_FCC][8] = 76,
[0][1][0][0][RTW89_ETSI][8] = 48,
[0][1][0][0][RTW89_MKK][8] = 58,
- [0][1][0][0][RTW89_IC][8] = 74,
+ [0][1][0][0][RTW89_IC][8] = 76,
+ [0][1][0][0][RTW89_KCC][8] = 56,
[0][1][0][0][RTW89_ACMA][8] = 48,
- [0][1][0][0][RTW89_FCC][9] = 66,
+ [0][1][0][0][RTW89_CN][8] = 42,
+ [0][1][0][0][RTW89_UK][8] = 48,
+ [0][1][0][0][RTW89_FCC][9] = 70,
[0][1][0][0][RTW89_ETSI][9] = 48,
[0][1][0][0][RTW89_MKK][9] = 58,
- [0][1][0][0][RTW89_IC][9] = 74,
+ [0][1][0][0][RTW89_IC][9] = 70,
+ [0][1][0][0][RTW89_KCC][9] = 56,
[0][1][0][0][RTW89_ACMA][9] = 48,
- [0][1][0][0][RTW89_FCC][10] = 66,
+ [0][1][0][0][RTW89_CN][9] = 42,
+ [0][1][0][0][RTW89_UK][9] = 48,
+ [0][1][0][0][RTW89_FCC][10] = 72,
[0][1][0][0][RTW89_ETSI][10] = 48,
[0][1][0][0][RTW89_MKK][10] = 58,
- [0][1][0][0][RTW89_IC][10] = 74,
+ [0][1][0][0][RTW89_IC][10] = 72,
+ [0][1][0][0][RTW89_KCC][10] = 56,
[0][1][0][0][RTW89_ACMA][10] = 48,
- [0][1][0][0][RTW89_FCC][11] = 46,
+ [0][1][0][0][RTW89_CN][10] = 42,
+ [0][1][0][0][RTW89_UK][10] = 48,
+ [0][1][0][0][RTW89_FCC][11] = 44,
[0][1][0][0][RTW89_ETSI][11] = 48,
[0][1][0][0][RTW89_MKK][11] = 58,
- [0][1][0][0][RTW89_IC][11] = 56,
+ [0][1][0][0][RTW89_IC][11] = 44,
+ [0][1][0][0][RTW89_KCC][11] = 56,
[0][1][0][0][RTW89_ACMA][11] = 48,
- [0][1][0][0][RTW89_FCC][12] = 34,
+ [0][1][0][0][RTW89_CN][11] = 42,
+ [0][1][0][0][RTW89_UK][11] = 48,
+ [0][1][0][0][RTW89_FCC][12] = 18,
[0][1][0][0][RTW89_ETSI][12] = 48,
[0][1][0][0][RTW89_MKK][12] = 58,
- [0][1][0][0][RTW89_IC][12] = 44,
+ [0][1][0][0][RTW89_IC][12] = 18,
+ [0][1][0][0][RTW89_KCC][12] = 56,
[0][1][0][0][RTW89_ACMA][12] = 48,
+ [0][1][0][0][RTW89_CN][12] = 42,
+ [0][1][0][0][RTW89_UK][12] = 48,
[0][1][0][0][RTW89_FCC][13] = 127,
[0][1][0][0][RTW89_ETSI][13] = 127,
[0][1][0][0][RTW89_MKK][13] = 60,
[0][1][0][0][RTW89_IC][13] = 127,
+ [0][1][0][0][RTW89_KCC][13] = 127,
[0][1][0][0][RTW89_ACMA][13] = 127,
+ [0][1][0][0][RTW89_CN][13] = 127,
+ [0][1][0][0][RTW89_UK][13] = 127,
[1][0][0][0][RTW89_FCC][0] = 127,
[1][0][0][0][RTW89_ETSI][0] = 127,
[1][0][0][0][RTW89_MKK][0] = 127,
[1][0][0][0][RTW89_IC][0] = 127,
+ [1][0][0][0][RTW89_KCC][0] = 127,
[1][0][0][0][RTW89_ACMA][0] = 127,
+ [1][0][0][0][RTW89_CN][0] = 127,
+ [1][0][0][0][RTW89_UK][0] = 127,
[1][0][0][0][RTW89_FCC][1] = 127,
[1][0][0][0][RTW89_ETSI][1] = 127,
[1][0][0][0][RTW89_MKK][1] = 127,
[1][0][0][0][RTW89_IC][1] = 127,
+ [1][0][0][0][RTW89_KCC][1] = 127,
[1][0][0][0][RTW89_ACMA][1] = 127,
- [1][0][0][0][RTW89_FCC][2] = 42,
+ [1][0][0][0][RTW89_CN][1] = 127,
+ [1][0][0][0][RTW89_UK][1] = 127,
+ [1][0][0][0][RTW89_FCC][2] = 44,
[1][0][0][0][RTW89_ETSI][2] = 60,
[1][0][0][0][RTW89_MKK][2] = 66,
- [1][0][0][0][RTW89_IC][2] = 52,
+ [1][0][0][0][RTW89_IC][2] = 44,
+ [1][0][0][0][RTW89_KCC][2] = 68,
[1][0][0][0][RTW89_ACMA][2] = 60,
- [1][0][0][0][RTW89_FCC][3] = 42,
+ [1][0][0][0][RTW89_CN][2] = 58,
+ [1][0][0][0][RTW89_UK][2] = 60,
+ [1][0][0][0][RTW89_FCC][3] = 60,
[1][0][0][0][RTW89_ETSI][3] = 60,
[1][0][0][0][RTW89_MKK][3] = 66,
- [1][0][0][0][RTW89_IC][3] = 52,
+ [1][0][0][0][RTW89_IC][3] = 60,
+ [1][0][0][0][RTW89_KCC][3] = 68,
[1][0][0][0][RTW89_ACMA][3] = 60,
- [1][0][0][0][RTW89_FCC][4] = 42,
+ [1][0][0][0][RTW89_CN][3] = 58,
+ [1][0][0][0][RTW89_UK][3] = 60,
+ [1][0][0][0][RTW89_FCC][4] = 60,
[1][0][0][0][RTW89_ETSI][4] = 60,
[1][0][0][0][RTW89_MKK][4] = 66,
- [1][0][0][0][RTW89_IC][4] = 52,
+ [1][0][0][0][RTW89_IC][4] = 60,
+ [1][0][0][0][RTW89_KCC][4] = 68,
[1][0][0][0][RTW89_ACMA][4] = 60,
- [1][0][0][0][RTW89_FCC][5] = 58,
+ [1][0][0][0][RTW89_CN][4] = 58,
+ [1][0][0][0][RTW89_UK][4] = 60,
+ [1][0][0][0][RTW89_FCC][5] = 62,
[1][0][0][0][RTW89_ETSI][5] = 60,
[1][0][0][0][RTW89_MKK][5] = 66,
- [1][0][0][0][RTW89_IC][5] = 68,
+ [1][0][0][0][RTW89_IC][5] = 62,
+ [1][0][0][0][RTW89_KCC][5] = 68,
[1][0][0][0][RTW89_ACMA][5] = 60,
- [1][0][0][0][RTW89_FCC][6] = 42,
+ [1][0][0][0][RTW89_CN][5] = 58,
+ [1][0][0][0][RTW89_UK][5] = 60,
+ [1][0][0][0][RTW89_FCC][6] = 46,
[1][0][0][0][RTW89_ETSI][6] = 60,
[1][0][0][0][RTW89_MKK][6] = 66,
- [1][0][0][0][RTW89_IC][6] = 52,
+ [1][0][0][0][RTW89_IC][6] = 46,
+ [1][0][0][0][RTW89_KCC][6] = 68,
[1][0][0][0][RTW89_ACMA][6] = 60,
- [1][0][0][0][RTW89_FCC][7] = 42,
+ [1][0][0][0][RTW89_CN][6] = 58,
+ [1][0][0][0][RTW89_UK][6] = 60,
+ [1][0][0][0][RTW89_FCC][7] = 46,
[1][0][0][0][RTW89_ETSI][7] = 60,
[1][0][0][0][RTW89_MKK][7] = 66,
- [1][0][0][0][RTW89_IC][7] = 52,
+ [1][0][0][0][RTW89_IC][7] = 46,
+ [1][0][0][0][RTW89_KCC][7] = 68,
[1][0][0][0][RTW89_ACMA][7] = 60,
- [1][0][0][0][RTW89_FCC][8] = 42,
+ [1][0][0][0][RTW89_CN][7] = 58,
+ [1][0][0][0][RTW89_UK][7] = 60,
+ [1][0][0][0][RTW89_FCC][8] = 28,
[1][0][0][0][RTW89_ETSI][8] = 60,
[1][0][0][0][RTW89_MKK][8] = 66,
- [1][0][0][0][RTW89_IC][8] = 52,
+ [1][0][0][0][RTW89_IC][8] = 28,
+ [1][0][0][0][RTW89_KCC][8] = 70,
[1][0][0][0][RTW89_ACMA][8] = 60,
- [1][0][0][0][RTW89_FCC][9] = 34,
+ [1][0][0][0][RTW89_CN][8] = 58,
+ [1][0][0][0][RTW89_UK][8] = 60,
+ [1][0][0][0][RTW89_FCC][9] = 26,
[1][0][0][0][RTW89_ETSI][9] = 60,
[1][0][0][0][RTW89_MKK][9] = 66,
- [1][0][0][0][RTW89_IC][9] = 44,
+ [1][0][0][0][RTW89_IC][9] = 26,
+ [1][0][0][0][RTW89_KCC][9] = 70,
[1][0][0][0][RTW89_ACMA][9] = 60,
- [1][0][0][0][RTW89_FCC][10] = 22,
+ [1][0][0][0][RTW89_CN][9] = 58,
+ [1][0][0][0][RTW89_UK][9] = 60,
+ [1][0][0][0][RTW89_FCC][10] = 26,
[1][0][0][0][RTW89_ETSI][10] = 60,
[1][0][0][0][RTW89_MKK][10] = 66,
- [1][0][0][0][RTW89_IC][10] = 32,
+ [1][0][0][0][RTW89_IC][10] = 26,
+ [1][0][0][0][RTW89_KCC][10] = 70,
[1][0][0][0][RTW89_ACMA][10] = 60,
+ [1][0][0][0][RTW89_CN][10] = 58,
+ [1][0][0][0][RTW89_UK][10] = 60,
[1][0][0][0][RTW89_FCC][11] = 127,
[1][0][0][0][RTW89_ETSI][11] = 127,
[1][0][0][0][RTW89_MKK][11] = 127,
[1][0][0][0][RTW89_IC][11] = 127,
+ [1][0][0][0][RTW89_KCC][11] = 127,
[1][0][0][0][RTW89_ACMA][11] = 127,
+ [1][0][0][0][RTW89_CN][11] = 127,
+ [1][0][0][0][RTW89_UK][11] = 127,
[1][0][0][0][RTW89_FCC][12] = 127,
[1][0][0][0][RTW89_ETSI][12] = 127,
[1][0][0][0][RTW89_MKK][12] = 127,
[1][0][0][0][RTW89_IC][12] = 127,
+ [1][0][0][0][RTW89_KCC][12] = 127,
[1][0][0][0][RTW89_ACMA][12] = 127,
+ [1][0][0][0][RTW89_CN][12] = 127,
+ [1][0][0][0][RTW89_UK][12] = 127,
[1][0][0][0][RTW89_FCC][13] = 127,
[1][0][0][0][RTW89_ETSI][13] = 127,
[1][0][0][0][RTW89_MKK][13] = 127,
[1][0][0][0][RTW89_IC][13] = 127,
+ [1][0][0][0][RTW89_KCC][13] = 127,
[1][0][0][0][RTW89_ACMA][13] = 127,
+ [1][0][0][0][RTW89_CN][13] = 127,
+ [1][0][0][0][RTW89_UK][13] = 127,
[1][1][0][0][RTW89_FCC][0] = 127,
[1][1][0][0][RTW89_ETSI][0] = 127,
[1][1][0][0][RTW89_MKK][0] = 127,
[1][1][0][0][RTW89_IC][0] = 127,
+ [1][1][0][0][RTW89_KCC][0] = 127,
[1][1][0][0][RTW89_ACMA][0] = 127,
+ [1][1][0][0][RTW89_CN][0] = 127,
+ [1][1][0][0][RTW89_UK][0] = 127,
[1][1][0][0][RTW89_FCC][1] = 127,
[1][1][0][0][RTW89_ETSI][1] = 127,
[1][1][0][0][RTW89_MKK][1] = 127,
[1][1][0][0][RTW89_IC][1] = 127,
+ [1][1][0][0][RTW89_KCC][1] = 127,
[1][1][0][0][RTW89_ACMA][1] = 127,
- [1][1][0][0][RTW89_FCC][2] = 38,
+ [1][1][0][0][RTW89_CN][1] = 127,
+ [1][1][0][0][RTW89_UK][1] = 127,
+ [1][1][0][0][RTW89_FCC][2] = 46,
[1][1][0][0][RTW89_ETSI][2] = 48,
[1][1][0][0][RTW89_MKK][2] = 58,
- [1][1][0][0][RTW89_IC][2] = 48,
+ [1][1][0][0][RTW89_IC][2] = 46,
+ [1][1][0][0][RTW89_KCC][2] = 56,
[1][1][0][0][RTW89_ACMA][2] = 48,
- [1][1][0][0][RTW89_FCC][3] = 38,
+ [1][1][0][0][RTW89_CN][2] = 46,
+ [1][1][0][0][RTW89_UK][2] = 48,
+ [1][1][0][0][RTW89_FCC][3] = 46,
[1][1][0][0][RTW89_ETSI][3] = 48,
[1][1][0][0][RTW89_MKK][3] = 58,
- [1][1][0][0][RTW89_IC][3] = 48,
+ [1][1][0][0][RTW89_IC][3] = 46,
+ [1][1][0][0][RTW89_KCC][3] = 56,
[1][1][0][0][RTW89_ACMA][3] = 48,
- [1][1][0][0][RTW89_FCC][4] = 38,
+ [1][1][0][0][RTW89_CN][3] = 46,
+ [1][1][0][0][RTW89_UK][3] = 48,
+ [1][1][0][0][RTW89_FCC][4] = 46,
[1][1][0][0][RTW89_ETSI][4] = 48,
[1][1][0][0][RTW89_MKK][4] = 58,
- [1][1][0][0][RTW89_IC][4] = 48,
+ [1][1][0][0][RTW89_IC][4] = 46,
+ [1][1][0][0][RTW89_KCC][4] = 56,
[1][1][0][0][RTW89_ACMA][4] = 48,
- [1][1][0][0][RTW89_FCC][5] = 54,
+ [1][1][0][0][RTW89_CN][4] = 46,
+ [1][1][0][0][RTW89_UK][4] = 48,
+ [1][1][0][0][RTW89_FCC][5] = 48,
[1][1][0][0][RTW89_ETSI][5] = 48,
[1][1][0][0][RTW89_MKK][5] = 58,
- [1][1][0][0][RTW89_IC][5] = 64,
+ [1][1][0][0][RTW89_IC][5] = 48,
+ [1][1][0][0][RTW89_KCC][5] = 56,
[1][1][0][0][RTW89_ACMA][5] = 48,
- [1][1][0][0][RTW89_FCC][6] = 26,
+ [1][1][0][0][RTW89_CN][5] = 46,
+ [1][1][0][0][RTW89_UK][5] = 48,
+ [1][1][0][0][RTW89_FCC][6] = 40,
[1][1][0][0][RTW89_ETSI][6] = 48,
[1][1][0][0][RTW89_MKK][6] = 58,
- [1][1][0][0][RTW89_IC][6] = 36,
+ [1][1][0][0][RTW89_IC][6] = 40,
+ [1][1][0][0][RTW89_KCC][6] = 56,
[1][1][0][0][RTW89_ACMA][6] = 48,
- [1][1][0][0][RTW89_FCC][7] = 26,
+ [1][1][0][0][RTW89_CN][6] = 46,
+ [1][1][0][0][RTW89_UK][6] = 48,
+ [1][1][0][0][RTW89_FCC][7] = 40,
[1][1][0][0][RTW89_ETSI][7] = 48,
[1][1][0][0][RTW89_MKK][7] = 58,
- [1][1][0][0][RTW89_IC][7] = 36,
+ [1][1][0][0][RTW89_IC][7] = 40,
+ [1][1][0][0][RTW89_KCC][7] = 56,
[1][1][0][0][RTW89_ACMA][7] = 48,
- [1][1][0][0][RTW89_FCC][8] = 26,
+ [1][1][0][0][RTW89_CN][7] = 46,
+ [1][1][0][0][RTW89_UK][7] = 48,
+ [1][1][0][0][RTW89_FCC][8] = 14,
[1][1][0][0][RTW89_ETSI][8] = 48,
[1][1][0][0][RTW89_MKK][8] = 58,
- [1][1][0][0][RTW89_IC][8] = 36,
+ [1][1][0][0][RTW89_IC][8] = 14,
+ [1][1][0][0][RTW89_KCC][8] = 58,
[1][1][0][0][RTW89_ACMA][8] = 48,
- [1][1][0][0][RTW89_FCC][9] = 22,
+ [1][1][0][0][RTW89_CN][8] = 46,
+ [1][1][0][0][RTW89_UK][8] = 48,
+ [1][1][0][0][RTW89_FCC][9] = 14,
[1][1][0][0][RTW89_ETSI][9] = 48,
[1][1][0][0][RTW89_MKK][9] = 58,
- [1][1][0][0][RTW89_IC][9] = 32,
+ [1][1][0][0][RTW89_IC][9] = 14,
+ [1][1][0][0][RTW89_KCC][9] = 58,
[1][1][0][0][RTW89_ACMA][9] = 48,
- [1][1][0][0][RTW89_FCC][10] = 22,
+ [1][1][0][0][RTW89_CN][9] = 46,
+ [1][1][0][0][RTW89_UK][9] = 48,
+ [1][1][0][0][RTW89_FCC][10] = 12,
[1][1][0][0][RTW89_ETSI][10] = 48,
[1][1][0][0][RTW89_MKK][10] = 56,
- [1][1][0][0][RTW89_IC][10] = 32,
+ [1][1][0][0][RTW89_IC][10] = 12,
+ [1][1][0][0][RTW89_KCC][10] = 58,
[1][1][0][0][RTW89_ACMA][10] = 48,
+ [1][1][0][0][RTW89_CN][10] = 46,
+ [1][1][0][0][RTW89_UK][10] = 48,
[1][1][0][0][RTW89_FCC][11] = 127,
[1][1][0][0][RTW89_ETSI][11] = 127,
[1][1][0][0][RTW89_MKK][11] = 127,
[1][1][0][0][RTW89_IC][11] = 127,
+ [1][1][0][0][RTW89_KCC][11] = 127,
[1][1][0][0][RTW89_ACMA][11] = 127,
+ [1][1][0][0][RTW89_CN][11] = 127,
+ [1][1][0][0][RTW89_UK][11] = 127,
[1][1][0][0][RTW89_FCC][12] = 127,
[1][1][0][0][RTW89_ETSI][12] = 127,
[1][1][0][0][RTW89_MKK][12] = 127,
[1][1][0][0][RTW89_IC][12] = 127,
+ [1][1][0][0][RTW89_KCC][12] = 127,
[1][1][0][0][RTW89_ACMA][12] = 127,
+ [1][1][0][0][RTW89_CN][12] = 127,
+ [1][1][0][0][RTW89_UK][12] = 127,
[1][1][0][0][RTW89_FCC][13] = 127,
[1][1][0][0][RTW89_ETSI][13] = 127,
[1][1][0][0][RTW89_MKK][13] = 127,
[1][1][0][0][RTW89_IC][13] = 127,
+ [1][1][0][0][RTW89_KCC][13] = 127,
[1][1][0][0][RTW89_ACMA][13] = 127,
- [0][0][1][0][RTW89_FCC][0] = 68,
+ [1][1][0][0][RTW89_CN][13] = 127,
+ [1][1][0][0][RTW89_UK][13] = 127,
+ [0][0][1][0][RTW89_FCC][0] = 66,
[0][0][1][0][RTW89_ETSI][0] = 60,
[0][0][1][0][RTW89_MKK][0] = 76,
- [0][0][1][0][RTW89_IC][0] = 78,
+ [0][0][1][0][RTW89_IC][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 68,
[0][0][1][0][RTW89_ACMA][0] = 60,
+ [0][0][1][0][RTW89_CN][0] = 58,
+ [0][0][1][0][RTW89_UK][0] = 60,
[0][0][1][0][RTW89_FCC][1] = 68,
[0][0][1][0][RTW89_ETSI][1] = 60,
[0][0][1][0][RTW89_MKK][1] = 78,
- [0][0][1][0][RTW89_IC][1] = 78,
+ [0][0][1][0][RTW89_IC][1] = 68,
+ [0][0][1][0][RTW89_KCC][1] = 68,
[0][0][1][0][RTW89_ACMA][1] = 60,
- [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_CN][1] = 58,
+ [0][0][1][0][RTW89_UK][1] = 60,
+ [0][0][1][0][RTW89_FCC][2] = 72,
[0][0][1][0][RTW89_ETSI][2] = 60,
[0][0][1][0][RTW89_MKK][2] = 78,
- [0][0][1][0][RTW89_IC][2] = 78,
+ [0][0][1][0][RTW89_IC][2] = 72,
+ [0][0][1][0][RTW89_KCC][2] = 68,
[0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][3] = 70,
+ [0][0][1][0][RTW89_CN][2] = 58,
+ [0][0][1][0][RTW89_UK][2] = 60,
+ [0][0][1][0][RTW89_FCC][3] = 76,
[0][0][1][0][RTW89_ETSI][3] = 60,
[0][0][1][0][RTW89_MKK][3] = 78,
- [0][0][1][0][RTW89_IC][3] = 78,
+ [0][0][1][0][RTW89_IC][3] = 76,
+ [0][0][1][0][RTW89_KCC][3] = 68,
[0][0][1][0][RTW89_ACMA][3] = 60,
- [0][0][1][0][RTW89_FCC][4] = 70,
+ [0][0][1][0][RTW89_CN][3] = 58,
+ [0][0][1][0][RTW89_UK][3] = 60,
+ [0][0][1][0][RTW89_FCC][4] = 80,
[0][0][1][0][RTW89_ETSI][4] = 60,
[0][0][1][0][RTW89_MKK][4] = 78,
- [0][0][1][0][RTW89_IC][4] = 78,
+ [0][0][1][0][RTW89_IC][4] = 80,
+ [0][0][1][0][RTW89_KCC][4] = 76,
[0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][5] = 70,
+ [0][0][1][0][RTW89_CN][4] = 58,
+ [0][0][1][0][RTW89_UK][4] = 60,
+ [0][0][1][0][RTW89_FCC][5] = 80,
[0][0][1][0][RTW89_ETSI][5] = 60,
[0][0][1][0][RTW89_MKK][5] = 78,
- [0][0][1][0][RTW89_IC][5] = 78,
+ [0][0][1][0][RTW89_IC][5] = 80,
+ [0][0][1][0][RTW89_KCC][5] = 76,
[0][0][1][0][RTW89_ACMA][5] = 60,
- [0][0][1][0][RTW89_FCC][6] = 70,
+ [0][0][1][0][RTW89_CN][5] = 58,
+ [0][0][1][0][RTW89_UK][5] = 60,
+ [0][0][1][0][RTW89_FCC][6] = 80,
[0][0][1][0][RTW89_ETSI][6] = 60,
[0][0][1][0][RTW89_MKK][6] = 76,
- [0][0][1][0][RTW89_IC][6] = 78,
+ [0][0][1][0][RTW89_IC][6] = 80,
+ [0][0][1][0][RTW89_KCC][6] = 76,
[0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][7] = 70,
+ [0][0][1][0][RTW89_CN][6] = 58,
+ [0][0][1][0][RTW89_UK][6] = 60,
+ [0][0][1][0][RTW89_FCC][7] = 80,
[0][0][1][0][RTW89_ETSI][7] = 60,
[0][0][1][0][RTW89_MKK][7] = 78,
- [0][0][1][0][RTW89_IC][7] = 78,
+ [0][0][1][0][RTW89_IC][7] = 80,
+ [0][0][1][0][RTW89_KCC][7] = 76,
[0][0][1][0][RTW89_ACMA][7] = 60,
- [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_CN][7] = 58,
+ [0][0][1][0][RTW89_UK][7] = 60,
+ [0][0][1][0][RTW89_FCC][8] = 80,
[0][0][1][0][RTW89_ETSI][8] = 60,
[0][0][1][0][RTW89_MKK][8] = 78,
- [0][0][1][0][RTW89_IC][8] = 78,
+ [0][0][1][0][RTW89_IC][8] = 80,
+ [0][0][1][0][RTW89_KCC][8] = 76,
[0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][9] = 66,
+ [0][0][1][0][RTW89_CN][8] = 58,
+ [0][0][1][0][RTW89_UK][8] = 60,
+ [0][0][1][0][RTW89_FCC][9] = 76,
[0][0][1][0][RTW89_ETSI][9] = 60,
[0][0][1][0][RTW89_MKK][9] = 78,
[0][0][1][0][RTW89_IC][9] = 76,
+ [0][0][1][0][RTW89_KCC][9] = 70,
[0][0][1][0][RTW89_ACMA][9] = 60,
+ [0][0][1][0][RTW89_CN][9] = 58,
+ [0][0][1][0][RTW89_UK][9] = 60,
[0][0][1][0][RTW89_FCC][10] = 66,
[0][0][1][0][RTW89_ETSI][10] = 60,
[0][0][1][0][RTW89_MKK][10] = 78,
- [0][0][1][0][RTW89_IC][10] = 76,
+ [0][0][1][0][RTW89_IC][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 70,
[0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][11] = 46,
+ [0][0][1][0][RTW89_CN][10] = 58,
+ [0][0][1][0][RTW89_UK][10] = 60,
+ [0][0][1][0][RTW89_FCC][11] = 62,
[0][0][1][0][RTW89_ETSI][11] = 60,
[0][0][1][0][RTW89_MKK][11] = 78,
- [0][0][1][0][RTW89_IC][11] = 56,
+ [0][0][1][0][RTW89_IC][11] = 62,
+ [0][0][1][0][RTW89_KCC][11] = 70,
[0][0][1][0][RTW89_ACMA][11] = 60,
- [0][0][1][0][RTW89_FCC][12] = 42,
+ [0][0][1][0][RTW89_CN][11] = 58,
+ [0][0][1][0][RTW89_UK][11] = 60,
+ [0][0][1][0][RTW89_FCC][12] = 60,
[0][0][1][0][RTW89_ETSI][12] = 60,
[0][0][1][0][RTW89_MKK][12] = 78,
- [0][0][1][0][RTW89_IC][12] = 52,
+ [0][0][1][0][RTW89_IC][12] = 60,
+ [0][0][1][0][RTW89_KCC][12] = 70,
[0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_CN][12] = 58,
+ [0][0][1][0][RTW89_UK][12] = 60,
[0][0][1][0][RTW89_FCC][13] = 127,
[0][0][1][0][RTW89_ETSI][13] = 127,
[0][0][1][0][RTW89_MKK][13] = 127,
[0][0][1][0][RTW89_IC][13] = 127,
+ [0][0][1][0][RTW89_KCC][13] = 127,
[0][0][1][0][RTW89_ACMA][13] = 127,
- [0][1][1][0][RTW89_FCC][0] = 54,
+ [0][0][1][0][RTW89_CN][13] = 127,
+ [0][0][1][0][RTW89_UK][13] = 127,
+ [0][1][1][0][RTW89_FCC][0] = 66,
[0][1][1][0][RTW89_ETSI][0] = 48,
[0][1][1][0][RTW89_MKK][0] = 66,
- [0][1][1][0][RTW89_IC][0] = 64,
+ [0][1][1][0][RTW89_IC][0] = 66,
+ [0][1][1][0][RTW89_KCC][0] = 64,
[0][1][1][0][RTW89_ACMA][0] = 48,
- [0][1][1][0][RTW89_FCC][1] = 54,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 48,
+ [0][1][1][0][RTW89_FCC][1] = 68,
[0][1][1][0][RTW89_ETSI][1] = 48,
[0][1][1][0][RTW89_MKK][1] = 66,
- [0][1][1][0][RTW89_IC][1] = 64,
+ [0][1][1][0][RTW89_IC][1] = 68,
+ [0][1][1][0][RTW89_KCC][1] = 64,
[0][1][1][0][RTW89_ACMA][1] = 48,
- [0][1][1][0][RTW89_FCC][2] = 58,
+ [0][1][1][0][RTW89_CN][1] = 46,
+ [0][1][1][0][RTW89_UK][1] = 48,
+ [0][1][1][0][RTW89_FCC][2] = 72,
[0][1][1][0][RTW89_ETSI][2] = 48,
[0][1][1][0][RTW89_MKK][2] = 66,
- [0][1][1][0][RTW89_IC][2] = 68,
+ [0][1][1][0][RTW89_IC][2] = 72,
+ [0][1][1][0][RTW89_KCC][2] = 64,
[0][1][1][0][RTW89_ACMA][2] = 48,
- [0][1][1][0][RTW89_FCC][3] = 62,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 48,
+ [0][1][1][0][RTW89_FCC][3] = 76,
[0][1][1][0][RTW89_ETSI][3] = 48,
[0][1][1][0][RTW89_MKK][3] = 66,
- [0][1][1][0][RTW89_IC][3] = 72,
+ [0][1][1][0][RTW89_IC][3] = 76,
+ [0][1][1][0][RTW89_KCC][3] = 64,
[0][1][1][0][RTW89_ACMA][3] = 48,
- [0][1][1][0][RTW89_FCC][4] = 70,
+ [0][1][1][0][RTW89_CN][3] = 46,
+ [0][1][1][0][RTW89_UK][3] = 48,
+ [0][1][1][0][RTW89_FCC][4] = 80,
[0][1][1][0][RTW89_ETSI][4] = 48,
[0][1][1][0][RTW89_MKK][4] = 66,
- [0][1][1][0][RTW89_IC][4] = 78,
+ [0][1][1][0][RTW89_IC][4] = 80,
+ [0][1][1][0][RTW89_KCC][4] = 66,
[0][1][1][0][RTW89_ACMA][4] = 48,
- [0][1][1][0][RTW89_FCC][5] = 70,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 48,
+ [0][1][1][0][RTW89_FCC][5] = 80,
[0][1][1][0][RTW89_ETSI][5] = 48,
[0][1][1][0][RTW89_MKK][5] = 66,
- [0][1][1][0][RTW89_IC][5] = 78,
+ [0][1][1][0][RTW89_IC][5] = 80,
+ [0][1][1][0][RTW89_KCC][5] = 66,
[0][1][1][0][RTW89_ACMA][5] = 48,
- [0][1][1][0][RTW89_FCC][6] = 70,
+ [0][1][1][0][RTW89_CN][5] = 46,
+ [0][1][1][0][RTW89_UK][5] = 48,
+ [0][1][1][0][RTW89_FCC][6] = 80,
[0][1][1][0][RTW89_ETSI][6] = 48,
[0][1][1][0][RTW89_MKK][6] = 66,
- [0][1][1][0][RTW89_IC][6] = 78,
+ [0][1][1][0][RTW89_IC][6] = 80,
+ [0][1][1][0][RTW89_KCC][6] = 66,
[0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][7] = 62,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 48,
+ [0][1][1][0][RTW89_FCC][7] = 78,
[0][1][1][0][RTW89_ETSI][7] = 48,
[0][1][1][0][RTW89_MKK][7] = 66,
- [0][1][1][0][RTW89_IC][7] = 72,
+ [0][1][1][0][RTW89_IC][7] = 78,
+ [0][1][1][0][RTW89_KCC][7] = 66,
[0][1][1][0][RTW89_ACMA][7] = 48,
- [0][1][1][0][RTW89_FCC][8] = 58,
+ [0][1][1][0][RTW89_CN][7] = 46,
+ [0][1][1][0][RTW89_UK][7] = 48,
+ [0][1][1][0][RTW89_FCC][8] = 74,
[0][1][1][0][RTW89_ETSI][8] = 48,
[0][1][1][0][RTW89_MKK][8] = 66,
- [0][1][1][0][RTW89_IC][8] = 68,
+ [0][1][1][0][RTW89_IC][8] = 74,
+ [0][1][1][0][RTW89_KCC][8] = 66,
[0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][9] = 54,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 48,
+ [0][1][1][0][RTW89_FCC][9] = 70,
[0][1][1][0][RTW89_ETSI][9] = 48,
[0][1][1][0][RTW89_MKK][9] = 66,
- [0][1][1][0][RTW89_IC][9] = 64,
+ [0][1][1][0][RTW89_IC][9] = 70,
+ [0][1][1][0][RTW89_KCC][9] = 64,
[0][1][1][0][RTW89_ACMA][9] = 48,
- [0][1][1][0][RTW89_FCC][10] = 54,
+ [0][1][1][0][RTW89_CN][9] = 46,
+ [0][1][1][0][RTW89_UK][9] = 48,
+ [0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 48,
[0][1][1][0][RTW89_MKK][10] = 66,
- [0][1][1][0][RTW89_IC][10] = 64,
+ [0][1][1][0][RTW89_IC][10] = 62,
+ [0][1][1][0][RTW89_KCC][10] = 64,
[0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][11] = 38,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 48,
+ [0][1][1][0][RTW89_FCC][11] = 60,
[0][1][1][0][RTW89_ETSI][11] = 48,
[0][1][1][0][RTW89_MKK][11] = 66,
- [0][1][1][0][RTW89_IC][11] = 48,
+ [0][1][1][0][RTW89_IC][11] = 60,
+ [0][1][1][0][RTW89_KCC][11] = 64,
[0][1][1][0][RTW89_ACMA][11] = 48,
- [0][1][1][0][RTW89_FCC][12] = 34,
+ [0][1][1][0][RTW89_CN][11] = 46,
+ [0][1][1][0][RTW89_UK][11] = 48,
+ [0][1][1][0][RTW89_FCC][12] = 36,
[0][1][1][0][RTW89_ETSI][12] = 48,
[0][1][1][0][RTW89_MKK][12] = 66,
- [0][1][1][0][RTW89_IC][12] = 44,
+ [0][1][1][0][RTW89_IC][12] = 36,
+ [0][1][1][0][RTW89_KCC][12] = 64,
[0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 48,
[0][1][1][0][RTW89_FCC][13] = 127,
[0][1][1][0][RTW89_ETSI][13] = 127,
[0][1][1][0][RTW89_MKK][13] = 127,
[0][1][1][0][RTW89_IC][13] = 127,
+ [0][1][1][0][RTW89_KCC][13] = 127,
[0][1][1][0][RTW89_ACMA][13] = 127,
- [0][0][2][0][RTW89_FCC][0] = 68,
+ [0][1][1][0][RTW89_CN][13] = 127,
+ [0][1][1][0][RTW89_UK][13] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 66,
[0][0][2][0][RTW89_ETSI][0] = 60,
[0][0][2][0][RTW89_MKK][0] = 78,
- [0][0][2][0][RTW89_IC][0] = 78,
+ [0][0][2][0][RTW89_IC][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 70,
[0][0][2][0][RTW89_ACMA][0] = 60,
- [0][0][2][0][RTW89_FCC][1] = 68,
+ [0][0][2][0][RTW89_CN][0] = 58,
+ [0][0][2][0][RTW89_UK][0] = 60,
+ [0][0][2][0][RTW89_FCC][1] = 70,
[0][0][2][0][RTW89_ETSI][1] = 60,
[0][0][2][0][RTW89_MKK][1] = 78,
- [0][0][2][0][RTW89_IC][1] = 78,
+ [0][0][2][0][RTW89_IC][1] = 70,
+ [0][0][2][0][RTW89_KCC][1] = 70,
[0][0][2][0][RTW89_ACMA][1] = 60,
- [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_CN][1] = 58,
+ [0][0][2][0][RTW89_UK][1] = 60,
+ [0][0][2][0][RTW89_FCC][2] = 74,
[0][0][2][0][RTW89_ETSI][2] = 60,
[0][0][2][0][RTW89_MKK][2] = 78,
- [0][0][2][0][RTW89_IC][2] = 78,
+ [0][0][2][0][RTW89_IC][2] = 74,
+ [0][0][2][0][RTW89_KCC][2] = 70,
[0][0][2][0][RTW89_ACMA][2] = 60,
- [0][0][2][0][RTW89_FCC][3] = 70,
+ [0][0][2][0][RTW89_CN][2] = 58,
+ [0][0][2][0][RTW89_UK][2] = 60,
+ [0][0][2][0][RTW89_FCC][3] = 78,
[0][0][2][0][RTW89_ETSI][3] = 60,
[0][0][2][0][RTW89_MKK][3] = 78,
[0][0][2][0][RTW89_IC][3] = 78,
+ [0][0][2][0][RTW89_KCC][3] = 70,
[0][0][2][0][RTW89_ACMA][3] = 60,
- [0][0][2][0][RTW89_FCC][4] = 70,
+ [0][0][2][0][RTW89_CN][3] = 58,
+ [0][0][2][0][RTW89_UK][3] = 60,
+ [0][0][2][0][RTW89_FCC][4] = 80,
[0][0][2][0][RTW89_ETSI][4] = 60,
[0][0][2][0][RTW89_MKK][4] = 78,
- [0][0][2][0][RTW89_IC][4] = 78,
+ [0][0][2][0][RTW89_IC][4] = 80,
+ [0][0][2][0][RTW89_KCC][4] = 78,
[0][0][2][0][RTW89_ACMA][4] = 60,
- [0][0][2][0][RTW89_FCC][5] = 70,
+ [0][0][2][0][RTW89_CN][4] = 58,
+ [0][0][2][0][RTW89_UK][4] = 60,
+ [0][0][2][0][RTW89_FCC][5] = 80,
[0][0][2][0][RTW89_ETSI][5] = 60,
[0][0][2][0][RTW89_MKK][5] = 78,
- [0][0][2][0][RTW89_IC][5] = 78,
+ [0][0][2][0][RTW89_IC][5] = 80,
+ [0][0][2][0][RTW89_KCC][5] = 78,
[0][0][2][0][RTW89_ACMA][5] = 60,
- [0][0][2][0][RTW89_FCC][6] = 70,
+ [0][0][2][0][RTW89_CN][5] = 58,
+ [0][0][2][0][RTW89_UK][5] = 60,
+ [0][0][2][0][RTW89_FCC][6] = 80,
[0][0][2][0][RTW89_ETSI][6] = 60,
[0][0][2][0][RTW89_MKK][6] = 78,
- [0][0][2][0][RTW89_IC][6] = 78,
+ [0][0][2][0][RTW89_IC][6] = 80,
+ [0][0][2][0][RTW89_KCC][6] = 78,
[0][0][2][0][RTW89_ACMA][6] = 60,
- [0][0][2][0][RTW89_FCC][7] = 70,
+ [0][0][2][0][RTW89_CN][6] = 58,
+ [0][0][2][0][RTW89_UK][6] = 60,
+ [0][0][2][0][RTW89_FCC][7] = 80,
[0][0][2][0][RTW89_ETSI][7] = 60,
[0][0][2][0][RTW89_MKK][7] = 78,
- [0][0][2][0][RTW89_IC][7] = 78,
+ [0][0][2][0][RTW89_IC][7] = 80,
+ [0][0][2][0][RTW89_KCC][7] = 78,
[0][0][2][0][RTW89_ACMA][7] = 60,
- [0][0][2][0][RTW89_FCC][8] = 68,
+ [0][0][2][0][RTW89_CN][7] = 58,
+ [0][0][2][0][RTW89_UK][7] = 60,
+ [0][0][2][0][RTW89_FCC][8] = 78,
[0][0][2][0][RTW89_ETSI][8] = 60,
[0][0][2][0][RTW89_MKK][8] = 78,
[0][0][2][0][RTW89_IC][8] = 78,
+ [0][0][2][0][RTW89_KCC][8] = 78,
[0][0][2][0][RTW89_ACMA][8] = 60,
- [0][0][2][0][RTW89_FCC][9] = 64,
+ [0][0][2][0][RTW89_CN][8] = 58,
+ [0][0][2][0][RTW89_UK][8] = 60,
+ [0][0][2][0][RTW89_FCC][9] = 74,
[0][0][2][0][RTW89_ETSI][9] = 60,
[0][0][2][0][RTW89_MKK][9] = 78,
[0][0][2][0][RTW89_IC][9] = 74,
+ [0][0][2][0][RTW89_KCC][9] = 66,
[0][0][2][0][RTW89_ACMA][9] = 60,
- [0][0][2][0][RTW89_FCC][10] = 64,
+ [0][0][2][0][RTW89_CN][9] = 58,
+ [0][0][2][0][RTW89_UK][9] = 60,
+ [0][0][2][0][RTW89_FCC][10] = 62,
[0][0][2][0][RTW89_ETSI][10] = 60,
[0][0][2][0][RTW89_MKK][10] = 78,
- [0][0][2][0][RTW89_IC][10] = 74,
+ [0][0][2][0][RTW89_IC][10] = 62,
+ [0][0][2][0][RTW89_KCC][10] = 66,
[0][0][2][0][RTW89_ACMA][10] = 60,
- [0][0][2][0][RTW89_FCC][11] = 46,
+ [0][0][2][0][RTW89_CN][10] = 58,
+ [0][0][2][0][RTW89_UK][10] = 60,
+ [0][0][2][0][RTW89_FCC][11] = 60,
[0][0][2][0][RTW89_ETSI][11] = 60,
[0][0][2][0][RTW89_MKK][11] = 78,
- [0][0][2][0][RTW89_IC][11] = 56,
+ [0][0][2][0][RTW89_IC][11] = 60,
+ [0][0][2][0][RTW89_KCC][11] = 66,
[0][0][2][0][RTW89_ACMA][11] = 60,
- [0][0][2][0][RTW89_FCC][12] = 42,
+ [0][0][2][0][RTW89_CN][11] = 58,
+ [0][0][2][0][RTW89_UK][11] = 60,
+ [0][0][2][0][RTW89_FCC][12] = 38,
[0][0][2][0][RTW89_ETSI][12] = 60,
[0][0][2][0][RTW89_MKK][12] = 78,
- [0][0][2][0][RTW89_IC][12] = 52,
+ [0][0][2][0][RTW89_IC][12] = 38,
+ [0][0][2][0][RTW89_KCC][12] = 66,
[0][0][2][0][RTW89_ACMA][12] = 60,
+ [0][0][2][0][RTW89_CN][12] = 58,
+ [0][0][2][0][RTW89_UK][12] = 60,
[0][0][2][0][RTW89_FCC][13] = 127,
[0][0][2][0][RTW89_ETSI][13] = 127,
[0][0][2][0][RTW89_MKK][13] = 127,
[0][0][2][0][RTW89_IC][13] = 127,
+ [0][0][2][0][RTW89_KCC][13] = 127,
[0][0][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][0][RTW89_FCC][0] = 50,
+ [0][0][2][0][RTW89_CN][13] = 127,
+ [0][0][2][0][RTW89_UK][13] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 64,
[0][1][2][0][RTW89_ETSI][0] = 48,
[0][1][2][0][RTW89_MKK][0] = 68,
- [0][1][2][0][RTW89_IC][0] = 60,
+ [0][1][2][0][RTW89_IC][0] = 64,
+ [0][1][2][0][RTW89_KCC][0] = 66,
[0][1][2][0][RTW89_ACMA][0] = 48,
- [0][1][2][0][RTW89_FCC][1] = 50,
+ [0][1][2][0][RTW89_CN][0] = 46,
+ [0][1][2][0][RTW89_UK][0] = 48,
+ [0][1][2][0][RTW89_FCC][1] = 70,
[0][1][2][0][RTW89_ETSI][1] = 48,
[0][1][2][0][RTW89_MKK][1] = 68,
- [0][1][2][0][RTW89_IC][1] = 60,
+ [0][1][2][0][RTW89_IC][1] = 70,
+ [0][1][2][0][RTW89_KCC][1] = 66,
[0][1][2][0][RTW89_ACMA][1] = 48,
- [0][1][2][0][RTW89_FCC][2] = 54,
+ [0][1][2][0][RTW89_CN][1] = 46,
+ [0][1][2][0][RTW89_UK][1] = 48,
+ [0][1][2][0][RTW89_FCC][2] = 74,
[0][1][2][0][RTW89_ETSI][2] = 48,
[0][1][2][0][RTW89_MKK][2] = 68,
- [0][1][2][0][RTW89_IC][2] = 64,
+ [0][1][2][0][RTW89_IC][2] = 74,
+ [0][1][2][0][RTW89_KCC][2] = 66,
[0][1][2][0][RTW89_ACMA][2] = 48,
- [0][1][2][0][RTW89_FCC][3] = 58,
+ [0][1][2][0][RTW89_CN][2] = 46,
+ [0][1][2][0][RTW89_UK][2] = 48,
+ [0][1][2][0][RTW89_FCC][3] = 78,
[0][1][2][0][RTW89_ETSI][3] = 48,
[0][1][2][0][RTW89_MKK][3] = 68,
- [0][1][2][0][RTW89_IC][3] = 68,
+ [0][1][2][0][RTW89_IC][3] = 78,
+ [0][1][2][0][RTW89_KCC][3] = 66,
[0][1][2][0][RTW89_ACMA][3] = 48,
- [0][1][2][0][RTW89_FCC][4] = 64,
+ [0][1][2][0][RTW89_CN][3] = 46,
+ [0][1][2][0][RTW89_UK][3] = 48,
+ [0][1][2][0][RTW89_FCC][4] = 80,
[0][1][2][0][RTW89_ETSI][4] = 48,
[0][1][2][0][RTW89_MKK][4] = 68,
- [0][1][2][0][RTW89_IC][4] = 74,
+ [0][1][2][0][RTW89_IC][4] = 80,
+ [0][1][2][0][RTW89_KCC][4] = 66,
[0][1][2][0][RTW89_ACMA][4] = 48,
- [0][1][2][0][RTW89_FCC][5] = 70,
+ [0][1][2][0][RTW89_CN][4] = 46,
+ [0][1][2][0][RTW89_UK][4] = 48,
+ [0][1][2][0][RTW89_FCC][5] = 80,
[0][1][2][0][RTW89_ETSI][5] = 48,
[0][1][2][0][RTW89_MKK][5] = 68,
- [0][1][2][0][RTW89_IC][5] = 78,
+ [0][1][2][0][RTW89_IC][5] = 80,
+ [0][1][2][0][RTW89_KCC][5] = 66,
[0][1][2][0][RTW89_ACMA][5] = 48,
- [0][1][2][0][RTW89_FCC][6] = 66,
+ [0][1][2][0][RTW89_CN][5] = 46,
+ [0][1][2][0][RTW89_UK][5] = 48,
+ [0][1][2][0][RTW89_FCC][6] = 80,
[0][1][2][0][RTW89_ETSI][6] = 48,
[0][1][2][0][RTW89_MKK][6] = 68,
- [0][1][2][0][RTW89_IC][6] = 76,
+ [0][1][2][0][RTW89_IC][6] = 80,
+ [0][1][2][0][RTW89_KCC][6] = 66,
[0][1][2][0][RTW89_ACMA][6] = 48,
- [0][1][2][0][RTW89_FCC][7] = 58,
+ [0][1][2][0][RTW89_CN][6] = 46,
+ [0][1][2][0][RTW89_UK][6] = 48,
+ [0][1][2][0][RTW89_FCC][7] = 74,
[0][1][2][0][RTW89_ETSI][7] = 48,
[0][1][2][0][RTW89_MKK][7] = 68,
- [0][1][2][0][RTW89_IC][7] = 68,
+ [0][1][2][0][RTW89_IC][7] = 74,
+ [0][1][2][0][RTW89_KCC][7] = 66,
[0][1][2][0][RTW89_ACMA][7] = 48,
- [0][1][2][0][RTW89_FCC][8] = 54,
+ [0][1][2][0][RTW89_CN][7] = 46,
+ [0][1][2][0][RTW89_UK][7] = 48,
+ [0][1][2][0][RTW89_FCC][8] = 70,
[0][1][2][0][RTW89_ETSI][8] = 48,
[0][1][2][0][RTW89_MKK][8] = 68,
- [0][1][2][0][RTW89_IC][8] = 64,
+ [0][1][2][0][RTW89_IC][8] = 70,
+ [0][1][2][0][RTW89_KCC][8] = 66,
[0][1][2][0][RTW89_ACMA][8] = 48,
- [0][1][2][0][RTW89_FCC][9] = 50,
+ [0][1][2][0][RTW89_CN][8] = 46,
+ [0][1][2][0][RTW89_UK][8] = 48,
+ [0][1][2][0][RTW89_FCC][9] = 66,
[0][1][2][0][RTW89_ETSI][9] = 48,
[0][1][2][0][RTW89_MKK][9] = 68,
- [0][1][2][0][RTW89_IC][9] = 60,
+ [0][1][2][0][RTW89_IC][9] = 66,
+ [0][1][2][0][RTW89_KCC][9] = 64,
[0][1][2][0][RTW89_ACMA][9] = 48,
- [0][1][2][0][RTW89_FCC][10] = 50,
+ [0][1][2][0][RTW89_CN][9] = 46,
+ [0][1][2][0][RTW89_UK][9] = 48,
+ [0][1][2][0][RTW89_FCC][10] = 58,
[0][1][2][0][RTW89_ETSI][10] = 48,
[0][1][2][0][RTW89_MKK][10] = 68,
- [0][1][2][0][RTW89_IC][10] = 60,
+ [0][1][2][0][RTW89_IC][10] = 58,
+ [0][1][2][0][RTW89_KCC][10] = 64,
[0][1][2][0][RTW89_ACMA][10] = 48,
- [0][1][2][0][RTW89_FCC][11] = 38,
+ [0][1][2][0][RTW89_CN][10] = 46,
+ [0][1][2][0][RTW89_UK][10] = 48,
+ [0][1][2][0][RTW89_FCC][11] = 58,
[0][1][2][0][RTW89_ETSI][11] = 48,
[0][1][2][0][RTW89_MKK][11] = 68,
- [0][1][2][0][RTW89_IC][11] = 48,
+ [0][1][2][0][RTW89_IC][11] = 58,
+ [0][1][2][0][RTW89_KCC][11] = 64,
[0][1][2][0][RTW89_ACMA][11] = 48,
- [0][1][2][0][RTW89_FCC][12] = 34,
+ [0][1][2][0][RTW89_CN][11] = 46,
+ [0][1][2][0][RTW89_UK][11] = 48,
+ [0][1][2][0][RTW89_FCC][12] = 16,
[0][1][2][0][RTW89_ETSI][12] = 48,
[0][1][2][0][RTW89_MKK][12] = 68,
- [0][1][2][0][RTW89_IC][12] = 44,
+ [0][1][2][0][RTW89_IC][12] = 16,
+ [0][1][2][0][RTW89_KCC][12] = 64,
[0][1][2][0][RTW89_ACMA][12] = 48,
+ [0][1][2][0][RTW89_CN][12] = 46,
+ [0][1][2][0][RTW89_UK][12] = 48,
[0][1][2][0][RTW89_FCC][13] = 127,
[0][1][2][0][RTW89_ETSI][13] = 127,
[0][1][2][0][RTW89_MKK][13] = 127,
[0][1][2][0][RTW89_IC][13] = 127,
+ [0][1][2][0][RTW89_KCC][13] = 127,
[0][1][2][0][RTW89_ACMA][13] = 127,
- [0][1][2][1][RTW89_FCC][0] = 50,
+ [0][1][2][0][RTW89_CN][13] = 127,
+ [0][1][2][0][RTW89_UK][13] = 127,
+ [0][1][2][1][RTW89_FCC][0] = 64,
[0][1][2][1][RTW89_ETSI][0] = 36,
[0][1][2][1][RTW89_MKK][0] = 68,
- [0][1][2][1][RTW89_IC][0] = 60,
+ [0][1][2][1][RTW89_IC][0] = 64,
+ [0][1][2][1][RTW89_KCC][0] = 66,
[0][1][2][1][RTW89_ACMA][0] = 36,
- [0][1][2][1][RTW89_FCC][1] = 50,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 36,
+ [0][1][2][1][RTW89_FCC][1] = 70,
[0][1][2][1][RTW89_ETSI][1] = 36,
[0][1][2][1][RTW89_MKK][1] = 68,
- [0][1][2][1][RTW89_IC][1] = 60,
+ [0][1][2][1][RTW89_IC][1] = 70,
+ [0][1][2][1][RTW89_KCC][1] = 66,
[0][1][2][1][RTW89_ACMA][1] = 36,
- [0][1][2][1][RTW89_FCC][2] = 54,
+ [0][1][2][1][RTW89_CN][1] = 34,
+ [0][1][2][1][RTW89_UK][1] = 36,
+ [0][1][2][1][RTW89_FCC][2] = 74,
[0][1][2][1][RTW89_ETSI][2] = 36,
[0][1][2][1][RTW89_MKK][2] = 68,
- [0][1][2][1][RTW89_IC][2] = 64,
+ [0][1][2][1][RTW89_IC][2] = 74,
+ [0][1][2][1][RTW89_KCC][2] = 66,
[0][1][2][1][RTW89_ACMA][2] = 36,
- [0][1][2][1][RTW89_FCC][3] = 58,
+ [0][1][2][1][RTW89_CN][2] = 34,
+ [0][1][2][1][RTW89_UK][2] = 36,
+ [0][1][2][1][RTW89_FCC][3] = 78,
[0][1][2][1][RTW89_ETSI][3] = 36,
[0][1][2][1][RTW89_MKK][3] = 68,
- [0][1][2][1][RTW89_IC][3] = 68,
+ [0][1][2][1][RTW89_IC][3] = 78,
+ [0][1][2][1][RTW89_KCC][3] = 66,
[0][1][2][1][RTW89_ACMA][3] = 36,
- [0][1][2][1][RTW89_FCC][4] = 64,
+ [0][1][2][1][RTW89_CN][3] = 34,
+ [0][1][2][1][RTW89_UK][3] = 36,
+ [0][1][2][1][RTW89_FCC][4] = 80,
[0][1][2][1][RTW89_ETSI][4] = 36,
[0][1][2][1][RTW89_MKK][4] = 68,
- [0][1][2][1][RTW89_IC][4] = 74,
+ [0][1][2][1][RTW89_IC][4] = 80,
+ [0][1][2][1][RTW89_KCC][4] = 66,
[0][1][2][1][RTW89_ACMA][4] = 36,
- [0][1][2][1][RTW89_FCC][5] = 70,
+ [0][1][2][1][RTW89_CN][4] = 34,
+ [0][1][2][1][RTW89_UK][4] = 36,
+ [0][1][2][1][RTW89_FCC][5] = 80,
[0][1][2][1][RTW89_ETSI][5] = 36,
[0][1][2][1][RTW89_MKK][5] = 68,
- [0][1][2][1][RTW89_IC][5] = 78,
+ [0][1][2][1][RTW89_IC][5] = 80,
+ [0][1][2][1][RTW89_KCC][5] = 66,
[0][1][2][1][RTW89_ACMA][5] = 36,
- [0][1][2][1][RTW89_FCC][6] = 66,
+ [0][1][2][1][RTW89_CN][5] = 34,
+ [0][1][2][1][RTW89_UK][5] = 36,
+ [0][1][2][1][RTW89_FCC][6] = 80,
[0][1][2][1][RTW89_ETSI][6] = 36,
[0][1][2][1][RTW89_MKK][6] = 68,
- [0][1][2][1][RTW89_IC][6] = 76,
+ [0][1][2][1][RTW89_IC][6] = 80,
+ [0][1][2][1][RTW89_KCC][6] = 66,
[0][1][2][1][RTW89_ACMA][6] = 36,
- [0][1][2][1][RTW89_FCC][7] = 58,
+ [0][1][2][1][RTW89_CN][6] = 34,
+ [0][1][2][1][RTW89_UK][6] = 36,
+ [0][1][2][1][RTW89_FCC][7] = 74,
[0][1][2][1][RTW89_ETSI][7] = 36,
[0][1][2][1][RTW89_MKK][7] = 68,
- [0][1][2][1][RTW89_IC][7] = 68,
+ [0][1][2][1][RTW89_IC][7] = 74,
+ [0][1][2][1][RTW89_KCC][7] = 66,
[0][1][2][1][RTW89_ACMA][7] = 36,
- [0][1][2][1][RTW89_FCC][8] = 54,
+ [0][1][2][1][RTW89_CN][7] = 34,
+ [0][1][2][1][RTW89_UK][7] = 36,
+ [0][1][2][1][RTW89_FCC][8] = 70,
[0][1][2][1][RTW89_ETSI][8] = 36,
[0][1][2][1][RTW89_MKK][8] = 68,
- [0][1][2][1][RTW89_IC][8] = 64,
+ [0][1][2][1][RTW89_IC][8] = 70,
+ [0][1][2][1][RTW89_KCC][8] = 66,
[0][1][2][1][RTW89_ACMA][8] = 36,
- [0][1][2][1][RTW89_FCC][9] = 50,
+ [0][1][2][1][RTW89_CN][8] = 34,
+ [0][1][2][1][RTW89_UK][8] = 36,
+ [0][1][2][1][RTW89_FCC][9] = 66,
[0][1][2][1][RTW89_ETSI][9] = 36,
[0][1][2][1][RTW89_MKK][9] = 68,
- [0][1][2][1][RTW89_IC][9] = 60,
+ [0][1][2][1][RTW89_IC][9] = 66,
+ [0][1][2][1][RTW89_KCC][9] = 64,
[0][1][2][1][RTW89_ACMA][9] = 36,
- [0][1][2][1][RTW89_FCC][10] = 50,
+ [0][1][2][1][RTW89_CN][9] = 34,
+ [0][1][2][1][RTW89_UK][9] = 36,
+ [0][1][2][1][RTW89_FCC][10] = 58,
[0][1][2][1][RTW89_ETSI][10] = 36,
[0][1][2][1][RTW89_MKK][10] = 68,
- [0][1][2][1][RTW89_IC][10] = 60,
+ [0][1][2][1][RTW89_IC][10] = 58,
+ [0][1][2][1][RTW89_KCC][10] = 64,
[0][1][2][1][RTW89_ACMA][10] = 36,
- [0][1][2][1][RTW89_FCC][11] = 38,
+ [0][1][2][1][RTW89_CN][10] = 34,
+ [0][1][2][1][RTW89_UK][10] = 36,
+ [0][1][2][1][RTW89_FCC][11] = 58,
[0][1][2][1][RTW89_ETSI][11] = 36,
[0][1][2][1][RTW89_MKK][11] = 68,
- [0][1][2][1][RTW89_IC][11] = 48,
+ [0][1][2][1][RTW89_IC][11] = 58,
+ [0][1][2][1][RTW89_KCC][11] = 64,
[0][1][2][1][RTW89_ACMA][11] = 36,
- [0][1][2][1][RTW89_FCC][12] = 34,
+ [0][1][2][1][RTW89_CN][11] = 34,
+ [0][1][2][1][RTW89_UK][11] = 36,
+ [0][1][2][1][RTW89_FCC][12] = 16,
[0][1][2][1][RTW89_ETSI][12] = 36,
[0][1][2][1][RTW89_MKK][12] = 68,
- [0][1][2][1][RTW89_IC][12] = 44,
+ [0][1][2][1][RTW89_IC][12] = 16,
+ [0][1][2][1][RTW89_KCC][12] = 64,
[0][1][2][1][RTW89_ACMA][12] = 36,
+ [0][1][2][1][RTW89_CN][12] = 34,
+ [0][1][2][1][RTW89_UK][12] = 36,
[0][1][2][1][RTW89_FCC][13] = 127,
[0][1][2][1][RTW89_ETSI][13] = 127,
[0][1][2][1][RTW89_MKK][13] = 127,
[0][1][2][1][RTW89_IC][13] = 127,
+ [0][1][2][1][RTW89_KCC][13] = 127,
[0][1][2][1][RTW89_ACMA][13] = 127,
+ [0][1][2][1][RTW89_CN][13] = 127,
+ [0][1][2][1][RTW89_UK][13] = 127,
[1][0][2][0][RTW89_FCC][0] = 127,
[1][0][2][0][RTW89_ETSI][0] = 127,
[1][0][2][0][RTW89_MKK][0] = 127,
[1][0][2][0][RTW89_IC][0] = 127,
+ [1][0][2][0][RTW89_KCC][0] = 127,
[1][0][2][0][RTW89_ACMA][0] = 127,
+ [1][0][2][0][RTW89_CN][0] = 127,
+ [1][0][2][0][RTW89_UK][0] = 127,
[1][0][2][0][RTW89_FCC][1] = 127,
[1][0][2][0][RTW89_ETSI][1] = 127,
[1][0][2][0][RTW89_MKK][1] = 127,
[1][0][2][0][RTW89_IC][1] = 127,
+ [1][0][2][0][RTW89_KCC][1] = 127,
[1][0][2][0][RTW89_ACMA][1] = 127,
- [1][0][2][0][RTW89_FCC][2] = 62,
+ [1][0][2][0][RTW89_CN][1] = 127,
+ [1][0][2][0][RTW89_UK][1] = 127,
+ [1][0][2][0][RTW89_FCC][2] = 64,
[1][0][2][0][RTW89_ETSI][2] = 60,
[1][0][2][0][RTW89_MKK][2] = 74,
- [1][0][2][0][RTW89_IC][2] = 72,
+ [1][0][2][0][RTW89_IC][2] = 64,
+ [1][0][2][0][RTW89_KCC][2] = 68,
[1][0][2][0][RTW89_ACMA][2] = 60,
- [1][0][2][0][RTW89_FCC][3] = 62,
+ [1][0][2][0][RTW89_CN][2] = 58,
+ [1][0][2][0][RTW89_UK][2] = 60,
+ [1][0][2][0][RTW89_FCC][3] = 64,
[1][0][2][0][RTW89_ETSI][3] = 60,
[1][0][2][0][RTW89_MKK][3] = 74,
- [1][0][2][0][RTW89_IC][3] = 72,
+ [1][0][2][0][RTW89_IC][3] = 64,
+ [1][0][2][0][RTW89_KCC][3] = 68,
[1][0][2][0][RTW89_ACMA][3] = 60,
- [1][0][2][0][RTW89_FCC][4] = 64,
+ [1][0][2][0][RTW89_CN][3] = 58,
+ [1][0][2][0][RTW89_UK][3] = 60,
+ [1][0][2][0][RTW89_FCC][4] = 68,
[1][0][2][0][RTW89_ETSI][4] = 60,
[1][0][2][0][RTW89_MKK][4] = 74,
- [1][0][2][0][RTW89_IC][4] = 74,
+ [1][0][2][0][RTW89_IC][4] = 68,
+ [1][0][2][0][RTW89_KCC][4] = 68,
[1][0][2][0][RTW89_ACMA][4] = 60,
- [1][0][2][0][RTW89_FCC][5] = 64,
+ [1][0][2][0][RTW89_CN][4] = 58,
+ [1][0][2][0][RTW89_UK][4] = 60,
+ [1][0][2][0][RTW89_FCC][5] = 68,
[1][0][2][0][RTW89_ETSI][5] = 60,
[1][0][2][0][RTW89_MKK][5] = 74,
- [1][0][2][0][RTW89_IC][5] = 74,
+ [1][0][2][0][RTW89_IC][5] = 68,
+ [1][0][2][0][RTW89_KCC][5] = 74,
[1][0][2][0][RTW89_ACMA][5] = 60,
- [1][0][2][0][RTW89_FCC][6] = 64,
+ [1][0][2][0][RTW89_CN][5] = 58,
+ [1][0][2][0][RTW89_UK][5] = 60,
+ [1][0][2][0][RTW89_FCC][6] = 66,
[1][0][2][0][RTW89_ETSI][6] = 60,
[1][0][2][0][RTW89_MKK][6] = 74,
- [1][0][2][0][RTW89_IC][6] = 74,
+ [1][0][2][0][RTW89_IC][6] = 66,
+ [1][0][2][0][RTW89_KCC][6] = 74,
[1][0][2][0][RTW89_ACMA][6] = 60,
- [1][0][2][0][RTW89_FCC][7] = 60,
+ [1][0][2][0][RTW89_CN][6] = 58,
+ [1][0][2][0][RTW89_UK][6] = 60,
+ [1][0][2][0][RTW89_FCC][7] = 62,
[1][0][2][0][RTW89_ETSI][7] = 60,
[1][0][2][0][RTW89_MKK][7] = 74,
- [1][0][2][0][RTW89_IC][7] = 70,
+ [1][0][2][0][RTW89_IC][7] = 62,
+ [1][0][2][0][RTW89_KCC][7] = 74,
[1][0][2][0][RTW89_ACMA][7] = 60,
- [1][0][2][0][RTW89_FCC][8] = 60,
+ [1][0][2][0][RTW89_CN][7] = 58,
+ [1][0][2][0][RTW89_UK][7] = 60,
+ [1][0][2][0][RTW89_FCC][8] = 62,
[1][0][2][0][RTW89_ETSI][8] = 60,
[1][0][2][0][RTW89_MKK][8] = 74,
- [1][0][2][0][RTW89_IC][8] = 70,
+ [1][0][2][0][RTW89_IC][8] = 62,
+ [1][0][2][0][RTW89_KCC][8] = 68,
[1][0][2][0][RTW89_ACMA][8] = 60,
+ [1][0][2][0][RTW89_CN][8] = 58,
+ [1][0][2][0][RTW89_UK][8] = 60,
[1][0][2][0][RTW89_FCC][9] = 60,
[1][0][2][0][RTW89_ETSI][9] = 60,
[1][0][2][0][RTW89_MKK][9] = 74,
- [1][0][2][0][RTW89_IC][9] = 70,
+ [1][0][2][0][RTW89_IC][9] = 60,
+ [1][0][2][0][RTW89_KCC][9] = 68,
[1][0][2][0][RTW89_ACMA][9] = 60,
- [1][0][2][0][RTW89_FCC][10] = 58,
+ [1][0][2][0][RTW89_CN][9] = 58,
+ [1][0][2][0][RTW89_UK][9] = 60,
+ [1][0][2][0][RTW89_FCC][10] = 56,
[1][0][2][0][RTW89_ETSI][10] = 60,
[1][0][2][0][RTW89_MKK][10] = 74,
- [1][0][2][0][RTW89_IC][10] = 68,
+ [1][0][2][0][RTW89_IC][10] = 56,
+ [1][0][2][0][RTW89_KCC][10] = 68,
[1][0][2][0][RTW89_ACMA][10] = 60,
+ [1][0][2][0][RTW89_CN][10] = 58,
+ [1][0][2][0][RTW89_UK][10] = 60,
[1][0][2][0][RTW89_FCC][11] = 127,
[1][0][2][0][RTW89_ETSI][11] = 127,
[1][0][2][0][RTW89_MKK][11] = 127,
[1][0][2][0][RTW89_IC][11] = 127,
+ [1][0][2][0][RTW89_KCC][11] = 127,
[1][0][2][0][RTW89_ACMA][11] = 127,
+ [1][0][2][0][RTW89_CN][11] = 127,
+ [1][0][2][0][RTW89_UK][11] = 127,
[1][0][2][0][RTW89_FCC][12] = 127,
[1][0][2][0][RTW89_ETSI][12] = 127,
[1][0][2][0][RTW89_MKK][12] = 127,
[1][0][2][0][RTW89_IC][12] = 127,
+ [1][0][2][0][RTW89_KCC][12] = 127,
[1][0][2][0][RTW89_ACMA][12] = 127,
+ [1][0][2][0][RTW89_CN][12] = 127,
+ [1][0][2][0][RTW89_UK][12] = 127,
[1][0][2][0][RTW89_FCC][13] = 127,
[1][0][2][0][RTW89_ETSI][13] = 127,
[1][0][2][0][RTW89_MKK][13] = 127,
[1][0][2][0][RTW89_IC][13] = 127,
+ [1][0][2][0][RTW89_KCC][13] = 127,
[1][0][2][0][RTW89_ACMA][13] = 127,
+ [1][0][2][0][RTW89_CN][13] = 127,
+ [1][0][2][0][RTW89_UK][13] = 127,
[1][1][2][0][RTW89_FCC][0] = 127,
[1][1][2][0][RTW89_ETSI][0] = 127,
[1][1][2][0][RTW89_MKK][0] = 127,
[1][1][2][0][RTW89_IC][0] = 127,
+ [1][1][2][0][RTW89_KCC][0] = 127,
[1][1][2][0][RTW89_ACMA][0] = 127,
+ [1][1][2][0][RTW89_CN][0] = 127,
+ [1][1][2][0][RTW89_UK][0] = 127,
[1][1][2][0][RTW89_FCC][1] = 127,
[1][1][2][0][RTW89_ETSI][1] = 127,
[1][1][2][0][RTW89_MKK][1] = 127,
[1][1][2][0][RTW89_IC][1] = 127,
+ [1][1][2][0][RTW89_KCC][1] = 127,
[1][1][2][0][RTW89_ACMA][1] = 127,
- [1][1][2][0][RTW89_FCC][2] = 46,
+ [1][1][2][0][RTW89_CN][1] = 127,
+ [1][1][2][0][RTW89_UK][1] = 127,
+ [1][1][2][0][RTW89_FCC][2] = 60,
[1][1][2][0][RTW89_ETSI][2] = 48,
[1][1][2][0][RTW89_MKK][2] = 68,
- [1][1][2][0][RTW89_IC][2] = 56,
+ [1][1][2][0][RTW89_IC][2] = 60,
+ [1][1][2][0][RTW89_KCC][2] = 64,
[1][1][2][0][RTW89_ACMA][2] = 48,
- [1][1][2][0][RTW89_FCC][3] = 46,
+ [1][1][2][0][RTW89_CN][2] = 34,
+ [1][1][2][0][RTW89_UK][2] = 48,
+ [1][1][2][0][RTW89_FCC][3] = 60,
[1][1][2][0][RTW89_ETSI][3] = 48,
[1][1][2][0][RTW89_MKK][3] = 68,
- [1][1][2][0][RTW89_IC][3] = 56,
+ [1][1][2][0][RTW89_IC][3] = 60,
+ [1][1][2][0][RTW89_KCC][3] = 64,
[1][1][2][0][RTW89_ACMA][3] = 48,
- [1][1][2][0][RTW89_FCC][4] = 50,
+ [1][1][2][0][RTW89_CN][3] = 34,
+ [1][1][2][0][RTW89_UK][3] = 48,
+ [1][1][2][0][RTW89_FCC][4] = 60,
[1][1][2][0][RTW89_ETSI][4] = 48,
[1][1][2][0][RTW89_MKK][4] = 68,
[1][1][2][0][RTW89_IC][4] = 60,
+ [1][1][2][0][RTW89_KCC][4] = 64,
[1][1][2][0][RTW89_ACMA][4] = 48,
- [1][1][2][0][RTW89_FCC][5] = 58,
+ [1][1][2][0][RTW89_CN][4] = 34,
+ [1][1][2][0][RTW89_UK][4] = 48,
+ [1][1][2][0][RTW89_FCC][5] = 60,
[1][1][2][0][RTW89_ETSI][5] = 48,
[1][1][2][0][RTW89_MKK][5] = 68,
- [1][1][2][0][RTW89_IC][5] = 68,
+ [1][1][2][0][RTW89_IC][5] = 60,
+ [1][1][2][0][RTW89_KCC][5] = 66,
[1][1][2][0][RTW89_ACMA][5] = 48,
- [1][1][2][0][RTW89_FCC][6] = 50,
+ [1][1][2][0][RTW89_CN][5] = 34,
+ [1][1][2][0][RTW89_UK][5] = 48,
+ [1][1][2][0][RTW89_FCC][6] = 58,
[1][1][2][0][RTW89_ETSI][6] = 48,
[1][1][2][0][RTW89_MKK][6] = 68,
- [1][1][2][0][RTW89_IC][6] = 60,
+ [1][1][2][0][RTW89_IC][6] = 58,
+ [1][1][2][0][RTW89_KCC][6] = 66,
[1][1][2][0][RTW89_ACMA][6] = 48,
- [1][1][2][0][RTW89_FCC][7] = 46,
+ [1][1][2][0][RTW89_CN][6] = 34,
+ [1][1][2][0][RTW89_UK][6] = 48,
+ [1][1][2][0][RTW89_FCC][7] = 54,
[1][1][2][0][RTW89_ETSI][7] = 48,
[1][1][2][0][RTW89_MKK][7] = 68,
- [1][1][2][0][RTW89_IC][7] = 56,
+ [1][1][2][0][RTW89_IC][7] = 54,
+ [1][1][2][0][RTW89_KCC][7] = 66,
[1][1][2][0][RTW89_ACMA][7] = 48,
- [1][1][2][0][RTW89_FCC][8] = 46,
+ [1][1][2][0][RTW89_CN][7] = 34,
+ [1][1][2][0][RTW89_UK][7] = 48,
+ [1][1][2][0][RTW89_FCC][8] = 54,
[1][1][2][0][RTW89_ETSI][8] = 48,
[1][1][2][0][RTW89_MKK][8] = 68,
- [1][1][2][0][RTW89_IC][8] = 56,
+ [1][1][2][0][RTW89_IC][8] = 54,
+ [1][1][2][0][RTW89_KCC][8] = 64,
[1][1][2][0][RTW89_ACMA][8] = 48,
- [1][1][2][0][RTW89_FCC][9] = 34,
+ [1][1][2][0][RTW89_CN][8] = 34,
+ [1][1][2][0][RTW89_UK][8] = 48,
+ [1][1][2][0][RTW89_FCC][9] = 54,
[1][1][2][0][RTW89_ETSI][9] = 48,
[1][1][2][0][RTW89_MKK][9] = 68,
- [1][1][2][0][RTW89_IC][9] = 44,
+ [1][1][2][0][RTW89_IC][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 64,
[1][1][2][0][RTW89_ACMA][9] = 48,
- [1][1][2][0][RTW89_FCC][10] = 30,
+ [1][1][2][0][RTW89_CN][9] = 34,
+ [1][1][2][0][RTW89_UK][9] = 48,
+ [1][1][2][0][RTW89_FCC][10] = 46,
[1][1][2][0][RTW89_ETSI][10] = 48,
[1][1][2][0][RTW89_MKK][10] = 68,
- [1][1][2][0][RTW89_IC][10] = 40,
+ [1][1][2][0][RTW89_IC][10] = 46,
+ [1][1][2][0][RTW89_KCC][10] = 64,
[1][1][2][0][RTW89_ACMA][10] = 48,
+ [1][1][2][0][RTW89_CN][10] = 34,
+ [1][1][2][0][RTW89_UK][10] = 48,
[1][1][2][0][RTW89_FCC][11] = 127,
[1][1][2][0][RTW89_ETSI][11] = 127,
[1][1][2][0][RTW89_MKK][11] = 127,
[1][1][2][0][RTW89_IC][11] = 127,
+ [1][1][2][0][RTW89_KCC][11] = 127,
[1][1][2][0][RTW89_ACMA][11] = 127,
+ [1][1][2][0][RTW89_CN][11] = 127,
+ [1][1][2][0][RTW89_UK][11] = 127,
[1][1][2][0][RTW89_FCC][12] = 127,
[1][1][2][0][RTW89_ETSI][12] = 127,
[1][1][2][0][RTW89_MKK][12] = 127,
[1][1][2][0][RTW89_IC][12] = 127,
+ [1][1][2][0][RTW89_KCC][12] = 127,
[1][1][2][0][RTW89_ACMA][12] = 127,
+ [1][1][2][0][RTW89_CN][12] = 127,
+ [1][1][2][0][RTW89_UK][12] = 127,
[1][1][2][0][RTW89_FCC][13] = 127,
[1][1][2][0][RTW89_ETSI][13] = 127,
[1][1][2][0][RTW89_MKK][13] = 127,
[1][1][2][0][RTW89_IC][13] = 127,
+ [1][1][2][0][RTW89_KCC][13] = 127,
[1][1][2][0][RTW89_ACMA][13] = 127,
+ [1][1][2][0][RTW89_CN][13] = 127,
+ [1][1][2][0][RTW89_UK][13] = 127,
[1][1][2][1][RTW89_FCC][0] = 127,
[1][1][2][1][RTW89_ETSI][0] = 127,
[1][1][2][1][RTW89_MKK][0] = 127,
[1][1][2][1][RTW89_IC][0] = 127,
+ [1][1][2][1][RTW89_KCC][0] = 127,
[1][1][2][1][RTW89_ACMA][0] = 127,
+ [1][1][2][1][RTW89_CN][0] = 127,
+ [1][1][2][1][RTW89_UK][0] = 127,
[1][1][2][1][RTW89_FCC][1] = 127,
[1][1][2][1][RTW89_ETSI][1] = 127,
[1][1][2][1][RTW89_MKK][1] = 127,
[1][1][2][1][RTW89_IC][1] = 127,
+ [1][1][2][1][RTW89_KCC][1] = 127,
[1][1][2][1][RTW89_ACMA][1] = 127,
- [1][1][2][1][RTW89_FCC][2] = 46,
+ [1][1][2][1][RTW89_CN][1] = 127,
+ [1][1][2][1][RTW89_UK][1] = 127,
+ [1][1][2][1][RTW89_FCC][2] = 60,
[1][1][2][1][RTW89_ETSI][2] = 36,
[1][1][2][1][RTW89_MKK][2] = 68,
- [1][1][2][1][RTW89_IC][2] = 56,
+ [1][1][2][1][RTW89_IC][2] = 60,
+ [1][1][2][1][RTW89_KCC][2] = 64,
[1][1][2][1][RTW89_ACMA][2] = 36,
- [1][1][2][1][RTW89_FCC][3] = 46,
+ [1][1][2][1][RTW89_CN][2] = 34,
+ [1][1][2][1][RTW89_UK][2] = 36,
+ [1][1][2][1][RTW89_FCC][3] = 60,
[1][1][2][1][RTW89_ETSI][3] = 36,
[1][1][2][1][RTW89_MKK][3] = 68,
- [1][1][2][1][RTW89_IC][3] = 56,
+ [1][1][2][1][RTW89_IC][3] = 60,
+ [1][1][2][1][RTW89_KCC][3] = 64,
[1][1][2][1][RTW89_ACMA][3] = 36,
- [1][1][2][1][RTW89_FCC][4] = 50,
+ [1][1][2][1][RTW89_CN][3] = 34,
+ [1][1][2][1][RTW89_UK][3] = 36,
+ [1][1][2][1][RTW89_FCC][4] = 60,
[1][1][2][1][RTW89_ETSI][4] = 36,
[1][1][2][1][RTW89_MKK][4] = 68,
[1][1][2][1][RTW89_IC][4] = 60,
+ [1][1][2][1][RTW89_KCC][4] = 64,
[1][1][2][1][RTW89_ACMA][4] = 36,
- [1][1][2][1][RTW89_FCC][5] = 58,
+ [1][1][2][1][RTW89_CN][4] = 34,
+ [1][1][2][1][RTW89_UK][4] = 36,
+ [1][1][2][1][RTW89_FCC][5] = 60,
[1][1][2][1][RTW89_ETSI][5] = 36,
[1][1][2][1][RTW89_MKK][5] = 68,
- [1][1][2][1][RTW89_IC][5] = 68,
+ [1][1][2][1][RTW89_IC][5] = 60,
+ [1][1][2][1][RTW89_KCC][5] = 66,
[1][1][2][1][RTW89_ACMA][5] = 36,
- [1][1][2][1][RTW89_FCC][6] = 50,
+ [1][1][2][1][RTW89_CN][5] = 34,
+ [1][1][2][1][RTW89_UK][5] = 36,
+ [1][1][2][1][RTW89_FCC][6] = 58,
[1][1][2][1][RTW89_ETSI][6] = 36,
[1][1][2][1][RTW89_MKK][6] = 68,
- [1][1][2][1][RTW89_IC][6] = 60,
+ [1][1][2][1][RTW89_IC][6] = 58,
+ [1][1][2][1][RTW89_KCC][6] = 66,
[1][1][2][1][RTW89_ACMA][6] = 36,
- [1][1][2][1][RTW89_FCC][7] = 46,
+ [1][1][2][1][RTW89_CN][6] = 34,
+ [1][1][2][1][RTW89_UK][6] = 36,
+ [1][1][2][1][RTW89_FCC][7] = 54,
[1][1][2][1][RTW89_ETSI][7] = 36,
[1][1][2][1][RTW89_MKK][7] = 68,
- [1][1][2][1][RTW89_IC][7] = 56,
+ [1][1][2][1][RTW89_IC][7] = 54,
+ [1][1][2][1][RTW89_KCC][7] = 66,
[1][1][2][1][RTW89_ACMA][7] = 36,
- [1][1][2][1][RTW89_FCC][8] = 46,
+ [1][1][2][1][RTW89_CN][7] = 34,
+ [1][1][2][1][RTW89_UK][7] = 36,
+ [1][1][2][1][RTW89_FCC][8] = 54,
[1][1][2][1][RTW89_ETSI][8] = 36,
[1][1][2][1][RTW89_MKK][8] = 68,
- [1][1][2][1][RTW89_IC][8] = 56,
+ [1][1][2][1][RTW89_IC][8] = 54,
+ [1][1][2][1][RTW89_KCC][8] = 64,
[1][1][2][1][RTW89_ACMA][8] = 36,
- [1][1][2][1][RTW89_FCC][9] = 34,
+ [1][1][2][1][RTW89_CN][8] = 34,
+ [1][1][2][1][RTW89_UK][8] = 36,
+ [1][1][2][1][RTW89_FCC][9] = 54,
[1][1][2][1][RTW89_ETSI][9] = 36,
[1][1][2][1][RTW89_MKK][9] = 68,
- [1][1][2][1][RTW89_IC][9] = 44,
+ [1][1][2][1][RTW89_IC][9] = 54,
+ [1][1][2][1][RTW89_KCC][9] = 64,
[1][1][2][1][RTW89_ACMA][9] = 36,
- [1][1][2][1][RTW89_FCC][10] = 30,
+ [1][1][2][1][RTW89_CN][9] = 34,
+ [1][1][2][1][RTW89_UK][9] = 36,
+ [1][1][2][1][RTW89_FCC][10] = 46,
[1][1][2][1][RTW89_ETSI][10] = 36,
[1][1][2][1][RTW89_MKK][10] = 68,
- [1][1][2][1][RTW89_IC][10] = 40,
+ [1][1][2][1][RTW89_IC][10] = 46,
+ [1][1][2][1][RTW89_KCC][10] = 64,
[1][1][2][1][RTW89_ACMA][10] = 36,
+ [1][1][2][1][RTW89_CN][10] = 36,
+ [1][1][2][1][RTW89_UK][10] = 36,
[1][1][2][1][RTW89_FCC][11] = 127,
[1][1][2][1][RTW89_ETSI][11] = 127,
[1][1][2][1][RTW89_MKK][11] = 127,
[1][1][2][1][RTW89_IC][11] = 127,
+ [1][1][2][1][RTW89_KCC][11] = 127,
[1][1][2][1][RTW89_ACMA][11] = 127,
+ [1][1][2][1][RTW89_CN][11] = 127,
+ [1][1][2][1][RTW89_UK][11] = 127,
[1][1][2][1][RTW89_FCC][12] = 127,
[1][1][2][1][RTW89_ETSI][12] = 127,
[1][1][2][1][RTW89_MKK][12] = 127,
[1][1][2][1][RTW89_IC][12] = 127,
+ [1][1][2][1][RTW89_KCC][12] = 127,
[1][1][2][1][RTW89_ACMA][12] = 127,
+ [1][1][2][1][RTW89_CN][12] = 127,
+ [1][1][2][1][RTW89_UK][12] = 127,
[1][1][2][1][RTW89_FCC][13] = 127,
[1][1][2][1][RTW89_ETSI][13] = 127,
[1][1][2][1][RTW89_MKK][13] = 127,
[1][1][2][1][RTW89_IC][13] = 127,
+ [1][1][2][1][RTW89_KCC][13] = 127,
[1][1][2][1][RTW89_ACMA][13] = 127,
+ [1][1][2][1][RTW89_CN][13] = 127,
+ [1][1][2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][1][0][RTW89_WW][0] = 60,
- [0][0][1][0][RTW89_WW][2] = 60,
- [0][0][1][0][RTW89_WW][4] = 60,
- [0][0][1][0][RTW89_WW][6] = 60,
- [0][0][1][0][RTW89_WW][8] = 60,
- [0][0][1][0][RTW89_WW][10] = 60,
- [0][0][1][0][RTW89_WW][12] = 60,
- [0][0][1][0][RTW89_WW][14] = 60,
- [0][0][1][0][RTW89_WW][15] = 60,
- [0][0][1][0][RTW89_WW][17] = 60,
- [0][0][1][0][RTW89_WW][19] = 60,
- [0][0][1][0][RTW89_WW][21] = 60,
- [0][0][1][0][RTW89_WW][23] = 60,
+ [0][0][1][0][RTW89_WW][0] = 50,
+ [0][0][1][0][RTW89_WW][2] = 50,
+ [0][0][1][0][RTW89_WW][4] = 50,
+ [0][0][1][0][RTW89_WW][6] = 50,
+ [0][0][1][0][RTW89_WW][8] = 50,
+ [0][0][1][0][RTW89_WW][10] = 50,
+ [0][0][1][0][RTW89_WW][12] = 50,
+ [0][0][1][0][RTW89_WW][14] = 50,
+ [0][0][1][0][RTW89_WW][15] = 66,
+ [0][0][1][0][RTW89_WW][17] = 66,
+ [0][0][1][0][RTW89_WW][19] = 66,
+ [0][0][1][0][RTW89_WW][21] = 66,
+ [0][0][1][0][RTW89_WW][23] = 66,
[0][0][1][0][RTW89_WW][25] = 66,
[0][0][1][0][RTW89_WW][27] = 66,
[0][0][1][0][RTW89_WW][29] = 66,
- [0][0][1][0][RTW89_WW][31] = 60,
- [0][0][1][0][RTW89_WW][33] = 60,
+ [0][0][1][0][RTW89_WW][31] = 66,
+ [0][0][1][0][RTW89_WW][33] = 66,
[0][0][1][0][RTW89_WW][35] = 60,
- [0][0][1][0][RTW89_WW][37] = 70,
+ [0][0][1][0][RTW89_WW][37] = 64,
[0][0][1][0][RTW89_WW][38] = 30,
[0][0][1][0][RTW89_WW][40] = 30,
[0][0][1][0][RTW89_WW][42] = 30,
[0][0][1][0][RTW89_WW][44] = 30,
[0][0][1][0][RTW89_WW][46] = 30,
- [0][0][1][0][RTW89_WW][48] = 70,
- [0][0][1][0][RTW89_WW][50] = 70,
- [0][0][1][0][RTW89_WW][52] = 70,
- [0][1][1][0][RTW89_WW][0] = 42,
- [0][1][1][0][RTW89_WW][2] = 42,
- [0][1][1][0][RTW89_WW][4] = 42,
- [0][1][1][0][RTW89_WW][6] = 42,
- [0][1][1][0][RTW89_WW][8] = 48,
- [0][1][1][0][RTW89_WW][10] = 48,
- [0][1][1][0][RTW89_WW][12] = 48,
- [0][1][1][0][RTW89_WW][14] = 48,
- [0][1][1][0][RTW89_WW][15] = 48,
- [0][1][1][0][RTW89_WW][17] = 48,
- [0][1][1][0][RTW89_WW][19] = 48,
- [0][1][1][0][RTW89_WW][21] = 48,
- [0][1][1][0][RTW89_WW][23] = 48,
+ [0][0][1][0][RTW89_WW][48] = 72,
+ [0][0][1][0][RTW89_WW][50] = 72,
+ [0][0][1][0][RTW89_WW][52] = 72,
+ [0][1][1][0][RTW89_WW][0] = 34,
+ [0][1][1][0][RTW89_WW][2] = 34,
+ [0][1][1][0][RTW89_WW][4] = 34,
+ [0][1][1][0][RTW89_WW][6] = 36,
+ [0][1][1][0][RTW89_WW][8] = 46,
+ [0][1][1][0][RTW89_WW][10] = 46,
+ [0][1][1][0][RTW89_WW][12] = 46,
+ [0][1][1][0][RTW89_WW][14] = 46,
+ [0][1][1][0][RTW89_WW][15] = 54,
+ [0][1][1][0][RTW89_WW][17] = 54,
+ [0][1][1][0][RTW89_WW][19] = 54,
+ [0][1][1][0][RTW89_WW][21] = 54,
+ [0][1][1][0][RTW89_WW][23] = 54,
[0][1][1][0][RTW89_WW][25] = 54,
[0][1][1][0][RTW89_WW][27] = 54,
[0][1][1][0][RTW89_WW][29] = 54,
- [0][1][1][0][RTW89_WW][31] = 48,
- [0][1][1][0][RTW89_WW][33] = 48,
- [0][1][1][0][RTW89_WW][35] = 48,
- [0][1][1][0][RTW89_WW][37] = 60,
+ [0][1][1][0][RTW89_WW][31] = 54,
+ [0][1][1][0][RTW89_WW][33] = 54,
+ [0][1][1][0][RTW89_WW][35] = 52,
+ [0][1][1][0][RTW89_WW][37] = 52,
[0][1][1][0][RTW89_WW][38] = 18,
- [0][1][1][0][RTW89_WW][40] = 16,
+ [0][1][1][0][RTW89_WW][40] = 18,
[0][1][1][0][RTW89_WW][42] = 18,
- [0][1][1][0][RTW89_WW][44] = 16,
+ [0][1][1][0][RTW89_WW][44] = 18,
[0][1][1][0][RTW89_WW][46] = 18,
[0][1][1][0][RTW89_WW][48] = 48,
[0][1][1][0][RTW89_WW][50] = 48,
[0][1][1][0][RTW89_WW][52] = 48,
- [0][0][2][0][RTW89_WW][0] = 62,
- [0][0][2][0][RTW89_WW][2] = 62,
- [0][0][2][0][RTW89_WW][4] = 62,
- [0][0][2][0][RTW89_WW][6] = 60,
- [0][0][2][0][RTW89_WW][8] = 58,
- [0][0][2][0][RTW89_WW][10] = 62,
- [0][0][2][0][RTW89_WW][12] = 62,
- [0][0][2][0][RTW89_WW][14] = 62,
- [0][0][2][0][RTW89_WW][15] = 62,
- [0][0][2][0][RTW89_WW][17] = 62,
- [0][0][2][0][RTW89_WW][19] = 62,
- [0][0][2][0][RTW89_WW][21] = 62,
- [0][0][2][0][RTW89_WW][23] = 62,
+ [0][0][2][0][RTW89_WW][0] = 52,
+ [0][0][2][0][RTW89_WW][2] = 52,
+ [0][0][2][0][RTW89_WW][4] = 52,
+ [0][0][2][0][RTW89_WW][6] = 52,
+ [0][0][2][0][RTW89_WW][8] = 52,
+ [0][0][2][0][RTW89_WW][10] = 52,
+ [0][0][2][0][RTW89_WW][12] = 52,
+ [0][0][2][0][RTW89_WW][14] = 52,
+ [0][0][2][0][RTW89_WW][15] = 66,
+ [0][0][2][0][RTW89_WW][17] = 66,
+ [0][0][2][0][RTW89_WW][19] = 66,
+ [0][0][2][0][RTW89_WW][21] = 66,
+ [0][0][2][0][RTW89_WW][23] = 66,
[0][0][2][0][RTW89_WW][25] = 66,
[0][0][2][0][RTW89_WW][27] = 66,
[0][0][2][0][RTW89_WW][29] = 66,
- [0][0][2][0][RTW89_WW][31] = 62,
- [0][0][2][0][RTW89_WW][33] = 62,
- [0][0][2][0][RTW89_WW][35] = 62,
- [0][0][2][0][RTW89_WW][37] = 70,
+ [0][0][2][0][RTW89_WW][31] = 66,
+ [0][0][2][0][RTW89_WW][33] = 66,
+ [0][0][2][0][RTW89_WW][35] = 56,
+ [0][0][2][0][RTW89_WW][37] = 64,
[0][0][2][0][RTW89_WW][38] = 30,
[0][0][2][0][RTW89_WW][40] = 30,
[0][0][2][0][RTW89_WW][42] = 30,
[0][0][2][0][RTW89_WW][44] = 30,
[0][0][2][0][RTW89_WW][46] = 30,
- [0][0][2][0][RTW89_WW][48] = 70,
- [0][0][2][0][RTW89_WW][50] = 70,
- [0][0][2][0][RTW89_WW][52] = 70,
- [0][1][2][0][RTW89_WW][0] = 44,
- [0][1][2][0][RTW89_WW][2] = 44,
- [0][1][2][0][RTW89_WW][4] = 44,
- [0][1][2][0][RTW89_WW][6] = 44,
- [0][1][2][0][RTW89_WW][8] = 42,
- [0][1][2][0][RTW89_WW][10] = 50,
- [0][1][2][0][RTW89_WW][12] = 50,
- [0][1][2][0][RTW89_WW][14] = 50,
- [0][1][2][0][RTW89_WW][15] = 50,
- [0][1][2][0][RTW89_WW][17] = 50,
- [0][1][2][0][RTW89_WW][19] = 50,
- [0][1][2][0][RTW89_WW][21] = 50,
- [0][1][2][0][RTW89_WW][23] = 50,
+ [0][0][2][0][RTW89_WW][48] = 72,
+ [0][0][2][0][RTW89_WW][50] = 72,
+ [0][0][2][0][RTW89_WW][52] = 72,
+ [0][1][2][0][RTW89_WW][0] = 36,
+ [0][1][2][0][RTW89_WW][2] = 36,
+ [0][1][2][0][RTW89_WW][4] = 36,
+ [0][1][2][0][RTW89_WW][6] = 38,
+ [0][1][2][0][RTW89_WW][8] = 40,
+ [0][1][2][0][RTW89_WW][10] = 40,
+ [0][1][2][0][RTW89_WW][12] = 40,
+ [0][1][2][0][RTW89_WW][14] = 40,
+ [0][1][2][0][RTW89_WW][15] = 54,
+ [0][1][2][0][RTW89_WW][17] = 54,
+ [0][1][2][0][RTW89_WW][19] = 54,
+ [0][1][2][0][RTW89_WW][21] = 54,
+ [0][1][2][0][RTW89_WW][23] = 54,
[0][1][2][0][RTW89_WW][25] = 54,
[0][1][2][0][RTW89_WW][27] = 54,
[0][1][2][0][RTW89_WW][29] = 54,
- [0][1][2][0][RTW89_WW][31] = 50,
- [0][1][2][0][RTW89_WW][33] = 50,
- [0][1][2][0][RTW89_WW][35] = 50,
- [0][1][2][0][RTW89_WW][37] = 62,
+ [0][1][2][0][RTW89_WW][31] = 54,
+ [0][1][2][0][RTW89_WW][33] = 54,
+ [0][1][2][0][RTW89_WW][35] = 46,
+ [0][1][2][0][RTW89_WW][37] = 52,
[0][1][2][0][RTW89_WW][38] = 18,
[0][1][2][0][RTW89_WW][40] = 18,
[0][1][2][0][RTW89_WW][42] = 18,
[0][1][2][0][RTW89_WW][44] = 18,
[0][1][2][0][RTW89_WW][46] = 18,
- [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][48] = 48,
[0][1][2][0][RTW89_WW][50] = 50,
- [0][1][2][0][RTW89_WW][52] = 50,
- [0][1][2][1][RTW89_WW][0] = 38,
- [0][1][2][1][RTW89_WW][2] = 38,
- [0][1][2][1][RTW89_WW][4] = 38,
- [0][1][2][1][RTW89_WW][6] = 38,
- [0][1][2][1][RTW89_WW][8] = 38,
- [0][1][2][1][RTW89_WW][10] = 38,
- [0][1][2][1][RTW89_WW][12] = 38,
- [0][1][2][1][RTW89_WW][14] = 38,
- [0][1][2][1][RTW89_WW][15] = 38,
- [0][1][2][1][RTW89_WW][17] = 38,
- [0][1][2][1][RTW89_WW][19] = 38,
- [0][1][2][1][RTW89_WW][21] = 38,
- [0][1][2][1][RTW89_WW][23] = 38,
+ [0][1][2][0][RTW89_WW][52] = 48,
+ [0][1][2][1][RTW89_WW][0] = 36,
+ [0][1][2][1][RTW89_WW][2] = 36,
+ [0][1][2][1][RTW89_WW][4] = 36,
+ [0][1][2][1][RTW89_WW][6] = 36,
+ [0][1][2][1][RTW89_WW][8] = 36,
+ [0][1][2][1][RTW89_WW][10] = 36,
+ [0][1][2][1][RTW89_WW][12] = 36,
+ [0][1][2][1][RTW89_WW][14] = 36,
+ [0][1][2][1][RTW89_WW][15] = 40,
+ [0][1][2][1][RTW89_WW][17] = 40,
+ [0][1][2][1][RTW89_WW][19] = 40,
+ [0][1][2][1][RTW89_WW][21] = 40,
+ [0][1][2][1][RTW89_WW][23] = 40,
[0][1][2][1][RTW89_WW][25] = 40,
[0][1][2][1][RTW89_WW][27] = 40,
[0][1][2][1][RTW89_WW][29] = 40,
- [0][1][2][1][RTW89_WW][31] = 38,
- [0][1][2][1][RTW89_WW][33] = 38,
- [0][1][2][1][RTW89_WW][35] = 38,
- [0][1][2][1][RTW89_WW][37] = 60,
+ [0][1][2][1][RTW89_WW][31] = 40,
+ [0][1][2][1][RTW89_WW][33] = 40,
+ [0][1][2][1][RTW89_WW][35] = 40,
+ [0][1][2][1][RTW89_WW][37] = 40,
[0][1][2][1][RTW89_WW][38] = 6,
[0][1][2][1][RTW89_WW][40] = 6,
[0][1][2][1][RTW89_WW][42] = 6,
[0][1][2][1][RTW89_WW][44] = 6,
[0][1][2][1][RTW89_WW][46] = 6,
- [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][48] = 48,
[0][1][2][1][RTW89_WW][50] = 50,
- [0][1][2][1][RTW89_WW][52] = 50,
- [1][0][2][0][RTW89_WW][1] = 58,
- [1][0][2][0][RTW89_WW][5] = 66,
- [1][0][2][0][RTW89_WW][9] = 66,
- [1][0][2][0][RTW89_WW][13] = 58,
+ [0][1][2][1][RTW89_WW][52] = 48,
+ [1][0][2][0][RTW89_WW][1] = 54,
+ [1][0][2][0][RTW89_WW][5] = 54,
+ [1][0][2][0][RTW89_WW][9] = 54,
+ [1][0][2][0][RTW89_WW][13] = 52,
[1][0][2][0][RTW89_WW][16] = 56,
- [1][0][2][0][RTW89_WW][20] = 66,
- [1][0][2][0][RTW89_WW][24] = 66,
+ [1][0][2][0][RTW89_WW][20] = 56,
+ [1][0][2][0][RTW89_WW][24] = 56,
[1][0][2][0][RTW89_WW][28] = 66,
- [1][0][2][0][RTW89_WW][32] = 66,
- [1][0][2][0][RTW89_WW][36] = 66,
+ [1][0][2][0][RTW89_WW][32] = 62,
+ [1][0][2][0][RTW89_WW][36] = 64,
[1][0][2][0][RTW89_WW][39] = 30,
[1][0][2][0][RTW89_WW][43] = 30,
[1][0][2][0][RTW89_WW][47] = 68,
[1][0][2][0][RTW89_WW][51] = 68,
- [1][1][2][0][RTW89_WW][1] = 48,
- [1][1][2][0][RTW89_WW][5] = 52,
- [1][1][2][0][RTW89_WW][9] = 52,
- [1][1][2][0][RTW89_WW][13] = 52,
- [1][1][2][0][RTW89_WW][16] = 48,
+ [1][1][2][0][RTW89_WW][1] = 42,
+ [1][1][2][0][RTW89_WW][5] = 42,
+ [1][1][2][0][RTW89_WW][9] = 42,
+ [1][1][2][0][RTW89_WW][13] = 42,
+ [1][1][2][0][RTW89_WW][16] = 54,
[1][1][2][0][RTW89_WW][20] = 54,
[1][1][2][0][RTW89_WW][24] = 54,
[1][1][2][0][RTW89_WW][28] = 54,
[1][1][2][0][RTW89_WW][32] = 54,
- [1][1][2][0][RTW89_WW][36] = 66,
+ [1][1][2][0][RTW89_WW][36] = 52,
[1][1][2][0][RTW89_WW][39] = 18,
[1][1][2][0][RTW89_WW][43] = 18,
- [1][1][2][0][RTW89_WW][47] = 60,
- [1][1][2][0][RTW89_WW][51] = 58,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 60,
[1][1][2][1][RTW89_WW][1] = 40,
[1][1][2][1][RTW89_WW][5] = 40,
[1][1][2][1][RTW89_WW][9] = 40,
@@ -15035,2082 +29517,3694 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][24] = 40,
[1][1][2][1][RTW89_WW][28] = 40,
[1][1][2][1][RTW89_WW][32] = 40,
- [1][1][2][1][RTW89_WW][36] = 60,
+ [1][1][2][1][RTW89_WW][36] = 40,
[1][1][2][1][RTW89_WW][39] = 6,
[1][1][2][1][RTW89_WW][43] = 6,
- [1][1][2][1][RTW89_WW][47] = 60,
- [1][1][2][1][RTW89_WW][51] = 58,
- [2][0][2][0][RTW89_WW][3] = 56,
- [2][0][2][0][RTW89_WW][11] = 58,
- [2][0][2][0][RTW89_WW][18] = 54,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 60,
+ [2][0][2][0][RTW89_WW][3] = 54,
+ [2][0][2][0][RTW89_WW][11] = 50,
+ [2][0][2][0][RTW89_WW][18] = 56,
[2][0][2][0][RTW89_WW][26] = 60,
[2][0][2][0][RTW89_WW][34] = 60,
[2][0][2][0][RTW89_WW][41] = 30,
- [2][0][2][0][RTW89_WW][49] = 56,
- [2][1][2][0][RTW89_WW][3] = 48,
- [2][1][2][0][RTW89_WW][11] = 52,
- [2][1][2][0][RTW89_WW][18] = 48,
- [2][1][2][0][RTW89_WW][26] = 54,
- [2][1][2][0][RTW89_WW][34] = 60,
+ [2][0][2][0][RTW89_WW][49] = 62,
+ [2][1][2][0][RTW89_WW][3] = 46,
+ [2][1][2][0][RTW89_WW][11] = 38,
+ [2][1][2][0][RTW89_WW][18] = 50,
+ [2][1][2][0][RTW89_WW][26] = 52,
+ [2][1][2][0][RTW89_WW][34] = 52,
[2][1][2][0][RTW89_WW][41] = 18,
- [2][1][2][0][RTW89_WW][49] = 50,
+ [2][1][2][0][RTW89_WW][49] = 62,
[2][1][2][1][RTW89_WW][3] = 40,
- [2][1][2][1][RTW89_WW][11] = 40,
+ [2][1][2][1][RTW89_WW][11] = 38,
[2][1][2][1][RTW89_WW][18] = 40,
[2][1][2][1][RTW89_WW][26] = 42,
- [2][1][2][1][RTW89_WW][34] = 60,
+ [2][1][2][1][RTW89_WW][34] = 40,
[2][1][2][1][RTW89_WW][41] = 6,
- [2][1][2][1][RTW89_WW][49] = 50,
- [3][0][2][0][RTW89_WW][7] = 38,
- [3][0][2][0][RTW89_WW][22] = 50,
- [3][0][2][0][RTW89_WW][45] = 0,
- [3][1][2][0][RTW89_WW][7] = 26,
- [3][1][2][0][RTW89_WW][22] = 42,
- [3][1][2][0][RTW89_WW][45] = 0,
- [3][1][2][1][RTW89_WW][7] = 14,
- [3][1][2][1][RTW89_WW][22] = 30,
- [3][1][2][1][RTW89_WW][45] = 0,
- [0][0][1][0][RTW89_FCC][0] = 70,
+ [2][1][2][1][RTW89_WW][49] = 62,
+ [3][0][2][0][RTW89_WW][7] = 40,
+ [3][0][2][0][RTW89_WW][22] = 42,
+ [3][0][2][0][RTW89_WW][45] = 52,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 36,
+ [3][1][2][0][RTW89_WW][45] = 46,
+ [3][1][2][1][RTW89_WW][7] = 32,
+ [3][1][2][1][RTW89_WW][22] = 36,
+ [3][1][2][1][RTW89_WW][45] = 46,
+ [0][0][1][0][RTW89_FCC][0] = 72,
[0][0][1][0][RTW89_ETSI][0] = 66,
[0][0][1][0][RTW89_MKK][0] = 66,
- [0][0][1][0][RTW89_IC][0] = 62,
- [0][0][1][0][RTW89_ACMA][0] = 60,
- [0][0][1][0][RTW89_FCC][2] = 70,
+ [0][0][1][0][RTW89_IC][0] = 60,
+ [0][0][1][0][RTW89_KCC][0] = 52,
+ [0][0][1][0][RTW89_ACMA][0] = 66,
+ [0][0][1][0][RTW89_CN][0] = 50,
+ [0][0][1][0][RTW89_UK][0] = 66,
+ [0][0][1][0][RTW89_FCC][2] = 72,
[0][0][1][0][RTW89_ETSI][2] = 66,
[0][0][1][0][RTW89_MKK][2] = 66,
- [0][0][1][0][RTW89_IC][2] = 62,
- [0][0][1][0][RTW89_ACMA][2] = 60,
- [0][0][1][0][RTW89_FCC][4] = 70,
+ [0][0][1][0][RTW89_IC][2] = 60,
+ [0][0][1][0][RTW89_KCC][2] = 52,
+ [0][0][1][0][RTW89_ACMA][2] = 66,
+ [0][0][1][0][RTW89_CN][2] = 50,
+ [0][0][1][0][RTW89_UK][2] = 66,
+ [0][0][1][0][RTW89_FCC][4] = 72,
[0][0][1][0][RTW89_ETSI][4] = 66,
[0][0][1][0][RTW89_MKK][4] = 66,
- [0][0][1][0][RTW89_IC][4] = 62,
- [0][0][1][0][RTW89_ACMA][4] = 60,
- [0][0][1][0][RTW89_FCC][6] = 70,
+ [0][0][1][0][RTW89_IC][4] = 60,
+ [0][0][1][0][RTW89_KCC][4] = 52,
+ [0][0][1][0][RTW89_ACMA][4] = 66,
+ [0][0][1][0][RTW89_CN][4] = 50,
+ [0][0][1][0][RTW89_UK][4] = 66,
+ [0][0][1][0][RTW89_FCC][6] = 72,
[0][0][1][0][RTW89_ETSI][6] = 66,
[0][0][1][0][RTW89_MKK][6] = 66,
- [0][0][1][0][RTW89_IC][6] = 62,
- [0][0][1][0][RTW89_ACMA][6] = 60,
- [0][0][1][0][RTW89_FCC][8] = 70,
+ [0][0][1][0][RTW89_IC][6] = 58,
+ [0][0][1][0][RTW89_KCC][6] = 62,
+ [0][0][1][0][RTW89_ACMA][6] = 66,
+ [0][0][1][0][RTW89_CN][6] = 50,
+ [0][0][1][0][RTW89_UK][6] = 66,
+ [0][0][1][0][RTW89_FCC][8] = 72,
[0][0][1][0][RTW89_ETSI][8] = 66,
[0][0][1][0][RTW89_MKK][8] = 66,
- [0][0][1][0][RTW89_IC][8] = 66,
- [0][0][1][0][RTW89_ACMA][8] = 60,
- [0][0][1][0][RTW89_FCC][10] = 70,
+ [0][0][1][0][RTW89_IC][8] = 64,
+ [0][0][1][0][RTW89_KCC][8] = 70,
+ [0][0][1][0][RTW89_ACMA][8] = 66,
+ [0][0][1][0][RTW89_CN][8] = 50,
+ [0][0][1][0][RTW89_UK][8] = 66,
+ [0][0][1][0][RTW89_FCC][10] = 72,
[0][0][1][0][RTW89_ETSI][10] = 66,
[0][0][1][0][RTW89_MKK][10] = 66,
- [0][0][1][0][RTW89_IC][10] = 66,
- [0][0][1][0][RTW89_ACMA][10] = 60,
- [0][0][1][0][RTW89_FCC][12] = 70,
+ [0][0][1][0][RTW89_IC][10] = 64,
+ [0][0][1][0][RTW89_KCC][10] = 70,
+ [0][0][1][0][RTW89_ACMA][10] = 66,
+ [0][0][1][0][RTW89_CN][10] = 50,
+ [0][0][1][0][RTW89_UK][10] = 66,
+ [0][0][1][0][RTW89_FCC][12] = 72,
[0][0][1][0][RTW89_ETSI][12] = 66,
[0][0][1][0][RTW89_MKK][12] = 66,
- [0][0][1][0][RTW89_IC][12] = 66,
- [0][0][1][0][RTW89_ACMA][12] = 60,
+ [0][0][1][0][RTW89_IC][12] = 64,
+ [0][0][1][0][RTW89_KCC][12] = 66,
+ [0][0][1][0][RTW89_ACMA][12] = 66,
+ [0][0][1][0][RTW89_CN][12] = 50,
+ [0][0][1][0][RTW89_UK][12] = 66,
[0][0][1][0][RTW89_FCC][14] = 70,
[0][0][1][0][RTW89_ETSI][14] = 66,
[0][0][1][0][RTW89_MKK][14] = 66,
- [0][0][1][0][RTW89_IC][14] = 66,
- [0][0][1][0][RTW89_ACMA][14] = 60,
- [0][0][1][0][RTW89_FCC][15] = 68,
+ [0][0][1][0][RTW89_IC][14] = 64,
+ [0][0][1][0][RTW89_KCC][14] = 66,
+ [0][0][1][0][RTW89_ACMA][14] = 66,
+ [0][0][1][0][RTW89_CN][14] = 50,
+ [0][0][1][0][RTW89_UK][14] = 66,
+ [0][0][1][0][RTW89_FCC][15] = 72,
[0][0][1][0][RTW89_ETSI][15] = 66,
[0][0][1][0][RTW89_MKK][15] = 70,
- [0][0][1][0][RTW89_IC][15] = 70,
- [0][0][1][0][RTW89_ACMA][15] = 60,
- [0][0][1][0][RTW89_FCC][17] = 70,
+ [0][0][1][0][RTW89_IC][15] = 72,
+ [0][0][1][0][RTW89_KCC][15] = 70,
+ [0][0][1][0][RTW89_ACMA][15] = 66,
+ [0][0][1][0][RTW89_CN][15] = 127,
+ [0][0][1][0][RTW89_UK][15] = 66,
+ [0][0][1][0][RTW89_FCC][17] = 72,
[0][0][1][0][RTW89_ETSI][17] = 66,
[0][0][1][0][RTW89_MKK][17] = 70,
- [0][0][1][0][RTW89_IC][17] = 70,
- [0][0][1][0][RTW89_ACMA][17] = 60,
- [0][0][1][0][RTW89_FCC][19] = 70,
+ [0][0][1][0][RTW89_IC][17] = 72,
+ [0][0][1][0][RTW89_KCC][17] = 70,
+ [0][0][1][0][RTW89_ACMA][17] = 66,
+ [0][0][1][0][RTW89_CN][17] = 127,
+ [0][0][1][0][RTW89_UK][17] = 66,
+ [0][0][1][0][RTW89_FCC][19] = 72,
[0][0][1][0][RTW89_ETSI][19] = 66,
[0][0][1][0][RTW89_MKK][19] = 70,
- [0][0][1][0][RTW89_IC][19] = 70,
- [0][0][1][0][RTW89_ACMA][19] = 60,
- [0][0][1][0][RTW89_FCC][21] = 70,
+ [0][0][1][0][RTW89_IC][19] = 72,
+ [0][0][1][0][RTW89_KCC][19] = 70,
+ [0][0][1][0][RTW89_ACMA][19] = 66,
+ [0][0][1][0][RTW89_CN][19] = 127,
+ [0][0][1][0][RTW89_UK][19] = 66,
+ [0][0][1][0][RTW89_FCC][21] = 72,
[0][0][1][0][RTW89_ETSI][21] = 66,
[0][0][1][0][RTW89_MKK][21] = 70,
- [0][0][1][0][RTW89_IC][21] = 70,
- [0][0][1][0][RTW89_ACMA][21] = 60,
- [0][0][1][0][RTW89_FCC][23] = 70,
+ [0][0][1][0][RTW89_IC][21] = 72,
+ [0][0][1][0][RTW89_KCC][21] = 70,
+ [0][0][1][0][RTW89_ACMA][21] = 66,
+ [0][0][1][0][RTW89_CN][21] = 127,
+ [0][0][1][0][RTW89_UK][21] = 66,
+ [0][0][1][0][RTW89_FCC][23] = 72,
[0][0][1][0][RTW89_ETSI][23] = 66,
[0][0][1][0][RTW89_MKK][23] = 70,
- [0][0][1][0][RTW89_IC][23] = 70,
- [0][0][1][0][RTW89_ACMA][23] = 60,
- [0][0][1][0][RTW89_FCC][25] = 70,
+ [0][0][1][0][RTW89_IC][23] = 72,
+ [0][0][1][0][RTW89_KCC][23] = 70,
+ [0][0][1][0][RTW89_ACMA][23] = 66,
+ [0][0][1][0][RTW89_CN][23] = 127,
+ [0][0][1][0][RTW89_UK][23] = 66,
+ [0][0][1][0][RTW89_FCC][25] = 72,
[0][0][1][0][RTW89_ETSI][25] = 66,
[0][0][1][0][RTW89_MKK][25] = 70,
[0][0][1][0][RTW89_IC][25] = 127,
+ [0][0][1][0][RTW89_KCC][25] = 70,
[0][0][1][0][RTW89_ACMA][25] = 127,
- [0][0][1][0][RTW89_FCC][27] = 70,
+ [0][0][1][0][RTW89_CN][25] = 127,
+ [0][0][1][0][RTW89_UK][25] = 66,
+ [0][0][1][0][RTW89_FCC][27] = 72,
[0][0][1][0][RTW89_ETSI][27] = 66,
[0][0][1][0][RTW89_MKK][27] = 70,
[0][0][1][0][RTW89_IC][27] = 127,
+ [0][0][1][0][RTW89_KCC][27] = 70,
[0][0][1][0][RTW89_ACMA][27] = 127,
- [0][0][1][0][RTW89_FCC][29] = 70,
+ [0][0][1][0][RTW89_CN][27] = 127,
+ [0][0][1][0][RTW89_UK][27] = 66,
+ [0][0][1][0][RTW89_FCC][29] = 72,
[0][0][1][0][RTW89_ETSI][29] = 66,
[0][0][1][0][RTW89_MKK][29] = 70,
[0][0][1][0][RTW89_IC][29] = 127,
+ [0][0][1][0][RTW89_KCC][29] = 70,
[0][0][1][0][RTW89_ACMA][29] = 127,
- [0][0][1][0][RTW89_FCC][31] = 70,
+ [0][0][1][0][RTW89_CN][29] = 127,
+ [0][0][1][0][RTW89_UK][29] = 66,
+ [0][0][1][0][RTW89_FCC][31] = 72,
[0][0][1][0][RTW89_ETSI][31] = 66,
[0][0][1][0][RTW89_MKK][31] = 70,
- [0][0][1][0][RTW89_IC][31] = 70,
- [0][0][1][0][RTW89_ACMA][31] = 60,
- [0][0][1][0][RTW89_FCC][33] = 70,
+ [0][0][1][0][RTW89_IC][31] = 72,
+ [0][0][1][0][RTW89_KCC][31] = 70,
+ [0][0][1][0][RTW89_ACMA][31] = 66,
+ [0][0][1][0][RTW89_CN][31] = 127,
+ [0][0][1][0][RTW89_UK][31] = 66,
+ [0][0][1][0][RTW89_FCC][33] = 72,
[0][0][1][0][RTW89_ETSI][33] = 66,
[0][0][1][0][RTW89_MKK][33] = 70,
- [0][0][1][0][RTW89_IC][33] = 70,
- [0][0][1][0][RTW89_ACMA][33] = 60,
- [0][0][1][0][RTW89_FCC][35] = 62,
+ [0][0][1][0][RTW89_IC][33] = 72,
+ [0][0][1][0][RTW89_KCC][33] = 70,
+ [0][0][1][0][RTW89_ACMA][33] = 66,
+ [0][0][1][0][RTW89_CN][33] = 127,
+ [0][0][1][0][RTW89_UK][33] = 66,
+ [0][0][1][0][RTW89_FCC][35] = 60,
[0][0][1][0][RTW89_ETSI][35] = 66,
[0][0][1][0][RTW89_MKK][35] = 70,
- [0][0][1][0][RTW89_IC][35] = 70,
- [0][0][1][0][RTW89_ACMA][35] = 60,
- [0][0][1][0][RTW89_FCC][37] = 70,
+ [0][0][1][0][RTW89_IC][35] = 60,
+ [0][0][1][0][RTW89_KCC][35] = 70,
+ [0][0][1][0][RTW89_ACMA][35] = 66,
+ [0][0][1][0][RTW89_CN][35] = 127,
+ [0][0][1][0][RTW89_UK][35] = 66,
+ [0][0][1][0][RTW89_FCC][37] = 72,
[0][0][1][0][RTW89_ETSI][37] = 127,
[0][0][1][0][RTW89_MKK][37] = 70,
- [0][0][1][0][RTW89_IC][37] = 70,
+ [0][0][1][0][RTW89_IC][37] = 72,
+ [0][0][1][0][RTW89_KCC][37] = 70,
[0][0][1][0][RTW89_ACMA][37] = 70,
- [0][0][1][0][RTW89_FCC][38] = 70,
+ [0][0][1][0][RTW89_CN][37] = 127,
+ [0][0][1][0][RTW89_UK][37] = 64,
+ [0][0][1][0][RTW89_FCC][38] = 72,
[0][0][1][0][RTW89_ETSI][38] = 30,
[0][0][1][0][RTW89_MKK][38] = 127,
- [0][0][1][0][RTW89_IC][38] = 70,
+ [0][0][1][0][RTW89_IC][38] = 72,
+ [0][0][1][0][RTW89_KCC][38] = 62,
[0][0][1][0][RTW89_ACMA][38] = 70,
- [0][0][1][0][RTW89_FCC][40] = 70,
+ [0][0][1][0][RTW89_CN][38] = 68,
+ [0][0][1][0][RTW89_UK][38] = 64,
+ [0][0][1][0][RTW89_FCC][40] = 72,
[0][0][1][0][RTW89_ETSI][40] = 30,
[0][0][1][0][RTW89_MKK][40] = 127,
- [0][0][1][0][RTW89_IC][40] = 70,
+ [0][0][1][0][RTW89_IC][40] = 72,
+ [0][0][1][0][RTW89_KCC][40] = 62,
[0][0][1][0][RTW89_ACMA][40] = 70,
- [0][0][1][0][RTW89_FCC][42] = 70,
+ [0][0][1][0][RTW89_CN][40] = 68,
+ [0][0][1][0][RTW89_UK][40] = 64,
+ [0][0][1][0][RTW89_FCC][42] = 72,
[0][0][1][0][RTW89_ETSI][42] = 30,
[0][0][1][0][RTW89_MKK][42] = 127,
- [0][0][1][0][RTW89_IC][42] = 70,
+ [0][0][1][0][RTW89_IC][42] = 72,
+ [0][0][1][0][RTW89_KCC][42] = 62,
[0][0][1][0][RTW89_ACMA][42] = 70,
- [0][0][1][0][RTW89_FCC][44] = 70,
+ [0][0][1][0][RTW89_CN][42] = 68,
+ [0][0][1][0][RTW89_UK][42] = 64,
+ [0][0][1][0][RTW89_FCC][44] = 72,
[0][0][1][0][RTW89_ETSI][44] = 30,
[0][0][1][0][RTW89_MKK][44] = 127,
- [0][0][1][0][RTW89_IC][44] = 70,
+ [0][0][1][0][RTW89_IC][44] = 72,
+ [0][0][1][0][RTW89_KCC][44] = 62,
[0][0][1][0][RTW89_ACMA][44] = 70,
- [0][0][1][0][RTW89_FCC][46] = 70,
+ [0][0][1][0][RTW89_CN][44] = 68,
+ [0][0][1][0][RTW89_UK][44] = 64,
+ [0][0][1][0][RTW89_FCC][46] = 72,
[0][0][1][0][RTW89_ETSI][46] = 30,
[0][0][1][0][RTW89_MKK][46] = 127,
- [0][0][1][0][RTW89_IC][46] = 70,
+ [0][0][1][0][RTW89_IC][46] = 72,
+ [0][0][1][0][RTW89_KCC][46] = 62,
[0][0][1][0][RTW89_ACMA][46] = 70,
- [0][0][1][0][RTW89_FCC][48] = 70,
+ [0][0][1][0][RTW89_CN][46] = 68,
+ [0][0][1][0][RTW89_UK][46] = 64,
+ [0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
[0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
- [0][0][1][0][RTW89_FCC][50] = 70,
+ [0][0][1][0][RTW89_CN][48] = 127,
+ [0][0][1][0][RTW89_UK][48] = 127,
+ [0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
[0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
- [0][0][1][0][RTW89_FCC][52] = 70,
+ [0][0][1][0][RTW89_CN][50] = 127,
+ [0][0][1][0][RTW89_UK][50] = 127,
+ [0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
[0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
+ [0][0][1][0][RTW89_CN][52] = 127,
+ [0][0][1][0][RTW89_UK][52] = 127,
[0][1][1][0][RTW89_FCC][0] = 60,
[0][1][1][0][RTW89_ETSI][0] = 54,
[0][1][1][0][RTW89_MKK][0] = 54,
- [0][1][1][0][RTW89_IC][0] = 42,
- [0][1][1][0][RTW89_ACMA][0] = 48,
+ [0][1][1][0][RTW89_IC][0] = 34,
+ [0][1][1][0][RTW89_KCC][0] = 40,
+ [0][1][1][0][RTW89_ACMA][0] = 54,
+ [0][1][1][0][RTW89_CN][0] = 46,
+ [0][1][1][0][RTW89_UK][0] = 54,
[0][1][1][0][RTW89_FCC][2] = 60,
[0][1][1][0][RTW89_ETSI][2] = 54,
[0][1][1][0][RTW89_MKK][2] = 54,
- [0][1][1][0][RTW89_IC][2] = 42,
- [0][1][1][0][RTW89_ACMA][2] = 48,
+ [0][1][1][0][RTW89_IC][2] = 34,
+ [0][1][1][0][RTW89_KCC][2] = 40,
+ [0][1][1][0][RTW89_ACMA][2] = 54,
+ [0][1][1][0][RTW89_CN][2] = 46,
+ [0][1][1][0][RTW89_UK][2] = 54,
[0][1][1][0][RTW89_FCC][4] = 60,
[0][1][1][0][RTW89_ETSI][4] = 54,
[0][1][1][0][RTW89_MKK][4] = 54,
- [0][1][1][0][RTW89_IC][4] = 42,
- [0][1][1][0][RTW89_ACMA][4] = 48,
+ [0][1][1][0][RTW89_IC][4] = 34,
+ [0][1][1][0][RTW89_KCC][4] = 40,
+ [0][1][1][0][RTW89_ACMA][4] = 54,
+ [0][1][1][0][RTW89_CN][4] = 46,
+ [0][1][1][0][RTW89_UK][4] = 54,
[0][1][1][0][RTW89_FCC][6] = 60,
[0][1][1][0][RTW89_ETSI][6] = 54,
[0][1][1][0][RTW89_MKK][6] = 54,
- [0][1][1][0][RTW89_IC][6] = 42,
- [0][1][1][0][RTW89_ACMA][6] = 48,
- [0][1][1][0][RTW89_FCC][8] = 60,
+ [0][1][1][0][RTW89_IC][6] = 36,
+ [0][1][1][0][RTW89_KCC][6] = 60,
+ [0][1][1][0][RTW89_ACMA][6] = 54,
+ [0][1][1][0][RTW89_CN][6] = 46,
+ [0][1][1][0][RTW89_UK][6] = 54,
+ [0][1][1][0][RTW89_FCC][8] = 62,
[0][1][1][0][RTW89_ETSI][8] = 54,
[0][1][1][0][RTW89_MKK][8] = 52,
- [0][1][1][0][RTW89_IC][8] = 54,
- [0][1][1][0][RTW89_ACMA][8] = 48,
- [0][1][1][0][RTW89_FCC][10] = 60,
+ [0][1][1][0][RTW89_IC][8] = 52,
+ [0][1][1][0][RTW89_KCC][8] = 60,
+ [0][1][1][0][RTW89_ACMA][8] = 54,
+ [0][1][1][0][RTW89_CN][8] = 46,
+ [0][1][1][0][RTW89_UK][8] = 54,
+ [0][1][1][0][RTW89_FCC][10] = 62,
[0][1][1][0][RTW89_ETSI][10] = 54,
[0][1][1][0][RTW89_MKK][10] = 54,
- [0][1][1][0][RTW89_IC][10] = 54,
- [0][1][1][0][RTW89_ACMA][10] = 48,
- [0][1][1][0][RTW89_FCC][12] = 60,
+ [0][1][1][0][RTW89_IC][10] = 52,
+ [0][1][1][0][RTW89_KCC][10] = 60,
+ [0][1][1][0][RTW89_ACMA][10] = 54,
+ [0][1][1][0][RTW89_CN][10] = 46,
+ [0][1][1][0][RTW89_UK][10] = 54,
+ [0][1][1][0][RTW89_FCC][12] = 62,
[0][1][1][0][RTW89_ETSI][12] = 54,
[0][1][1][0][RTW89_MKK][12] = 54,
- [0][1][1][0][RTW89_IC][12] = 54,
- [0][1][1][0][RTW89_ACMA][12] = 48,
+ [0][1][1][0][RTW89_IC][12] = 52,
+ [0][1][1][0][RTW89_KCC][12] = 60,
+ [0][1][1][0][RTW89_ACMA][12] = 54,
+ [0][1][1][0][RTW89_CN][12] = 46,
+ [0][1][1][0][RTW89_UK][12] = 54,
[0][1][1][0][RTW89_FCC][14] = 60,
[0][1][1][0][RTW89_ETSI][14] = 54,
[0][1][1][0][RTW89_MKK][14] = 54,
- [0][1][1][0][RTW89_IC][14] = 54,
- [0][1][1][0][RTW89_ACMA][14] = 48,
- [0][1][1][0][RTW89_FCC][15] = 58,
+ [0][1][1][0][RTW89_IC][14] = 52,
+ [0][1][1][0][RTW89_KCC][14] = 60,
+ [0][1][1][0][RTW89_ACMA][14] = 54,
+ [0][1][1][0][RTW89_CN][14] = 46,
+ [0][1][1][0][RTW89_UK][14] = 54,
+ [0][1][1][0][RTW89_FCC][15] = 60,
[0][1][1][0][RTW89_ETSI][15] = 54,
[0][1][1][0][RTW89_MKK][15] = 70,
- [0][1][1][0][RTW89_IC][15] = 68,
- [0][1][1][0][RTW89_ACMA][15] = 48,
+ [0][1][1][0][RTW89_IC][15] = 60,
+ [0][1][1][0][RTW89_KCC][15] = 60,
+ [0][1][1][0][RTW89_ACMA][15] = 54,
+ [0][1][1][0][RTW89_CN][15] = 127,
+ [0][1][1][0][RTW89_UK][15] = 54,
[0][1][1][0][RTW89_FCC][17] = 60,
[0][1][1][0][RTW89_ETSI][17] = 54,
[0][1][1][0][RTW89_MKK][17] = 70,
- [0][1][1][0][RTW89_IC][17] = 70,
- [0][1][1][0][RTW89_ACMA][17] = 48,
+ [0][1][1][0][RTW89_IC][17] = 60,
+ [0][1][1][0][RTW89_KCC][17] = 60,
+ [0][1][1][0][RTW89_ACMA][17] = 54,
+ [0][1][1][0][RTW89_CN][17] = 127,
+ [0][1][1][0][RTW89_UK][17] = 54,
[0][1][1][0][RTW89_FCC][19] = 60,
[0][1][1][0][RTW89_ETSI][19] = 54,
[0][1][1][0][RTW89_MKK][19] = 70,
- [0][1][1][0][RTW89_IC][19] = 70,
- [0][1][1][0][RTW89_ACMA][19] = 48,
+ [0][1][1][0][RTW89_IC][19] = 60,
+ [0][1][1][0][RTW89_KCC][19] = 60,
+ [0][1][1][0][RTW89_ACMA][19] = 54,
+ [0][1][1][0][RTW89_CN][19] = 127,
+ [0][1][1][0][RTW89_UK][19] = 54,
[0][1][1][0][RTW89_FCC][21] = 60,
[0][1][1][0][RTW89_ETSI][21] = 54,
[0][1][1][0][RTW89_MKK][21] = 70,
- [0][1][1][0][RTW89_IC][21] = 70,
- [0][1][1][0][RTW89_ACMA][21] = 48,
+ [0][1][1][0][RTW89_IC][21] = 60,
+ [0][1][1][0][RTW89_KCC][21] = 60,
+ [0][1][1][0][RTW89_ACMA][21] = 54,
+ [0][1][1][0][RTW89_CN][21] = 127,
+ [0][1][1][0][RTW89_UK][21] = 54,
[0][1][1][0][RTW89_FCC][23] = 60,
[0][1][1][0][RTW89_ETSI][23] = 54,
[0][1][1][0][RTW89_MKK][23] = 70,
- [0][1][1][0][RTW89_IC][23] = 70,
- [0][1][1][0][RTW89_ACMA][23] = 48,
+ [0][1][1][0][RTW89_IC][23] = 60,
+ [0][1][1][0][RTW89_KCC][23] = 60,
+ [0][1][1][0][RTW89_ACMA][23] = 54,
+ [0][1][1][0][RTW89_CN][23] = 127,
+ [0][1][1][0][RTW89_UK][23] = 54,
[0][1][1][0][RTW89_FCC][25] = 60,
[0][1][1][0][RTW89_ETSI][25] = 54,
[0][1][1][0][RTW89_MKK][25] = 70,
[0][1][1][0][RTW89_IC][25] = 127,
+ [0][1][1][0][RTW89_KCC][25] = 60,
[0][1][1][0][RTW89_ACMA][25] = 127,
+ [0][1][1][0][RTW89_CN][25] = 127,
+ [0][1][1][0][RTW89_UK][25] = 54,
[0][1][1][0][RTW89_FCC][27] = 60,
[0][1][1][0][RTW89_ETSI][27] = 54,
[0][1][1][0][RTW89_MKK][27] = 70,
[0][1][1][0][RTW89_IC][27] = 127,
+ [0][1][1][0][RTW89_KCC][27] = 60,
[0][1][1][0][RTW89_ACMA][27] = 127,
+ [0][1][1][0][RTW89_CN][27] = 127,
+ [0][1][1][0][RTW89_UK][27] = 54,
[0][1][1][0][RTW89_FCC][29] = 60,
[0][1][1][0][RTW89_ETSI][29] = 54,
[0][1][1][0][RTW89_MKK][29] = 70,
[0][1][1][0][RTW89_IC][29] = 127,
+ [0][1][1][0][RTW89_KCC][29] = 60,
[0][1][1][0][RTW89_ACMA][29] = 127,
+ [0][1][1][0][RTW89_CN][29] = 127,
+ [0][1][1][0][RTW89_UK][29] = 54,
[0][1][1][0][RTW89_FCC][31] = 60,
[0][1][1][0][RTW89_ETSI][31] = 54,
[0][1][1][0][RTW89_MKK][31] = 70,
- [0][1][1][0][RTW89_IC][31] = 70,
- [0][1][1][0][RTW89_ACMA][31] = 48,
+ [0][1][1][0][RTW89_IC][31] = 60,
+ [0][1][1][0][RTW89_KCC][31] = 58,
+ [0][1][1][0][RTW89_ACMA][31] = 54,
+ [0][1][1][0][RTW89_CN][31] = 127,
+ [0][1][1][0][RTW89_UK][31] = 54,
[0][1][1][0][RTW89_FCC][33] = 60,
[0][1][1][0][RTW89_ETSI][33] = 54,
[0][1][1][0][RTW89_MKK][33] = 70,
- [0][1][1][0][RTW89_IC][33] = 70,
- [0][1][1][0][RTW89_ACMA][33] = 48,
- [0][1][1][0][RTW89_FCC][35] = 58,
+ [0][1][1][0][RTW89_IC][33] = 60,
+ [0][1][1][0][RTW89_KCC][33] = 58,
+ [0][1][1][0][RTW89_ACMA][33] = 54,
+ [0][1][1][0][RTW89_CN][33] = 127,
+ [0][1][1][0][RTW89_UK][33] = 54,
+ [0][1][1][0][RTW89_FCC][35] = 52,
[0][1][1][0][RTW89_ETSI][35] = 54,
[0][1][1][0][RTW89_MKK][35] = 70,
- [0][1][1][0][RTW89_IC][35] = 68,
- [0][1][1][0][RTW89_ACMA][35] = 48,
- [0][1][1][0][RTW89_FCC][37] = 60,
+ [0][1][1][0][RTW89_IC][35] = 52,
+ [0][1][1][0][RTW89_KCC][35] = 58,
+ [0][1][1][0][RTW89_ACMA][35] = 54,
+ [0][1][1][0][RTW89_CN][35] = 127,
+ [0][1][1][0][RTW89_UK][35] = 54,
+ [0][1][1][0][RTW89_FCC][37] = 62,
[0][1][1][0][RTW89_ETSI][37] = 127,
[0][1][1][0][RTW89_MKK][37] = 70,
- [0][1][1][0][RTW89_IC][37] = 70,
- [0][1][1][0][RTW89_ACMA][37] = 70,
- [0][1][1][0][RTW89_FCC][38] = 70,
+ [0][1][1][0][RTW89_IC][37] = 62,
+ [0][1][1][0][RTW89_KCC][37] = 58,
+ [0][1][1][0][RTW89_ACMA][37] = 64,
+ [0][1][1][0][RTW89_CN][37] = 127,
+ [0][1][1][0][RTW89_UK][37] = 52,
+ [0][1][1][0][RTW89_FCC][38] = 72,
[0][1][1][0][RTW89_ETSI][38] = 18,
[0][1][1][0][RTW89_MKK][38] = 127,
- [0][1][1][0][RTW89_IC][38] = 70,
+ [0][1][1][0][RTW89_IC][38] = 72,
+ [0][1][1][0][RTW89_KCC][38] = 60,
[0][1][1][0][RTW89_ACMA][38] = 70,
- [0][1][1][0][RTW89_FCC][40] = 70,
+ [0][1][1][0][RTW89_CN][38] = 64,
+ [0][1][1][0][RTW89_UK][38] = 52,
+ [0][1][1][0][RTW89_FCC][40] = 72,
[0][1][1][0][RTW89_ETSI][40] = 18,
[0][1][1][0][RTW89_MKK][40] = 127,
- [0][1][1][0][RTW89_IC][40] = 70,
- [0][1][1][0][RTW89_ACMA][40] = 16,
- [0][1][1][0][RTW89_FCC][42] = 70,
+ [0][1][1][0][RTW89_IC][40] = 72,
+ [0][1][1][0][RTW89_KCC][40] = 60,
+ [0][1][1][0][RTW89_ACMA][40] = 70,
+ [0][1][1][0][RTW89_CN][40] = 64,
+ [0][1][1][0][RTW89_UK][40] = 52,
+ [0][1][1][0][RTW89_FCC][42] = 72,
[0][1][1][0][RTW89_ETSI][42] = 18,
[0][1][1][0][RTW89_MKK][42] = 127,
- [0][1][1][0][RTW89_IC][42] = 70,
+ [0][1][1][0][RTW89_IC][42] = 72,
+ [0][1][1][0][RTW89_KCC][42] = 60,
[0][1][1][0][RTW89_ACMA][42] = 70,
- [0][1][1][0][RTW89_FCC][44] = 70,
+ [0][1][1][0][RTW89_CN][42] = 64,
+ [0][1][1][0][RTW89_UK][42] = 52,
+ [0][1][1][0][RTW89_FCC][44] = 72,
[0][1][1][0][RTW89_ETSI][44] = 18,
[0][1][1][0][RTW89_MKK][44] = 127,
- [0][1][1][0][RTW89_IC][44] = 70,
- [0][1][1][0][RTW89_ACMA][44] = 16,
- [0][1][1][0][RTW89_FCC][46] = 70,
+ [0][1][1][0][RTW89_IC][44] = 72,
+ [0][1][1][0][RTW89_KCC][44] = 60,
+ [0][1][1][0][RTW89_ACMA][44] = 70,
+ [0][1][1][0][RTW89_CN][44] = 60,
+ [0][1][1][0][RTW89_UK][44] = 52,
+ [0][1][1][0][RTW89_FCC][46] = 72,
[0][1][1][0][RTW89_ETSI][46] = 18,
[0][1][1][0][RTW89_MKK][46] = 127,
- [0][1][1][0][RTW89_IC][46] = 70,
+ [0][1][1][0][RTW89_IC][46] = 72,
+ [0][1][1][0][RTW89_KCC][46] = 60,
[0][1][1][0][RTW89_ACMA][46] = 70,
+ [0][1][1][0][RTW89_CN][46] = 60,
+ [0][1][1][0][RTW89_UK][46] = 52,
[0][1][1][0][RTW89_FCC][48] = 48,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
[0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
+ [0][1][1][0][RTW89_CN][48] = 127,
+ [0][1][1][0][RTW89_UK][48] = 127,
[0][1][1][0][RTW89_FCC][50] = 48,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
[0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
+ [0][1][1][0][RTW89_CN][50] = 127,
+ [0][1][1][0][RTW89_UK][50] = 127,
[0][1][1][0][RTW89_FCC][52] = 48,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
[0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
+ [0][1][1][0][RTW89_CN][52] = 127,
+ [0][1][1][0][RTW89_UK][52] = 127,
[0][0][2][0][RTW89_FCC][0] = 70,
[0][0][2][0][RTW89_ETSI][0] = 66,
[0][0][2][0][RTW89_MKK][0] = 68,
- [0][0][2][0][RTW89_IC][0] = 66,
- [0][0][2][0][RTW89_ACMA][0] = 62,
- [0][0][2][0][RTW89_FCC][2] = 70,
+ [0][0][2][0][RTW89_IC][0] = 60,
+ [0][0][2][0][RTW89_KCC][0] = 54,
+ [0][0][2][0][RTW89_ACMA][0] = 66,
+ [0][0][2][0][RTW89_CN][0] = 52,
+ [0][0][2][0][RTW89_UK][0] = 66,
+ [0][0][2][0][RTW89_FCC][2] = 72,
[0][0][2][0][RTW89_ETSI][2] = 66,
[0][0][2][0][RTW89_MKK][2] = 68,
- [0][0][2][0][RTW89_IC][2] = 66,
- [0][0][2][0][RTW89_ACMA][2] = 62,
- [0][0][2][0][RTW89_FCC][4] = 70,
+ [0][0][2][0][RTW89_IC][2] = 60,
+ [0][0][2][0][RTW89_KCC][2] = 54,
+ [0][0][2][0][RTW89_ACMA][2] = 66,
+ [0][0][2][0][RTW89_CN][2] = 52,
+ [0][0][2][0][RTW89_UK][2] = 66,
+ [0][0][2][0][RTW89_FCC][4] = 72,
[0][0][2][0][RTW89_ETSI][4] = 66,
[0][0][2][0][RTW89_MKK][4] = 68,
- [0][0][2][0][RTW89_IC][4] = 66,
- [0][0][2][0][RTW89_ACMA][4] = 62,
- [0][0][2][0][RTW89_FCC][6] = 70,
+ [0][0][2][0][RTW89_IC][4] = 60,
+ [0][0][2][0][RTW89_KCC][4] = 54,
+ [0][0][2][0][RTW89_ACMA][4] = 66,
+ [0][0][2][0][RTW89_CN][4] = 52,
+ [0][0][2][0][RTW89_UK][4] = 66,
+ [0][0][2][0][RTW89_FCC][6] = 72,
[0][0][2][0][RTW89_ETSI][6] = 66,
[0][0][2][0][RTW89_MKK][6] = 60,
- [0][0][2][0][RTW89_IC][6] = 66,
- [0][0][2][0][RTW89_ACMA][6] = 62,
- [0][0][2][0][RTW89_FCC][8] = 70,
+ [0][0][2][0][RTW89_IC][6] = 60,
+ [0][0][2][0][RTW89_KCC][6] = 68,
+ [0][0][2][0][RTW89_ACMA][6] = 66,
+ [0][0][2][0][RTW89_CN][6] = 52,
+ [0][0][2][0][RTW89_UK][6] = 66,
+ [0][0][2][0][RTW89_FCC][8] = 72,
[0][0][2][0][RTW89_ETSI][8] = 66,
[0][0][2][0][RTW89_MKK][8] = 58,
- [0][0][2][0][RTW89_IC][8] = 66,
- [0][0][2][0][RTW89_ACMA][8] = 62,
- [0][0][2][0][RTW89_FCC][10] = 70,
+ [0][0][2][0][RTW89_IC][8] = 64,
+ [0][0][2][0][RTW89_KCC][8] = 70,
+ [0][0][2][0][RTW89_ACMA][8] = 66,
+ [0][0][2][0][RTW89_CN][8] = 52,
+ [0][0][2][0][RTW89_UK][8] = 66,
+ [0][0][2][0][RTW89_FCC][10] = 72,
[0][0][2][0][RTW89_ETSI][10] = 66,
[0][0][2][0][RTW89_MKK][10] = 70,
- [0][0][2][0][RTW89_IC][10] = 66,
- [0][0][2][0][RTW89_ACMA][10] = 62,
- [0][0][2][0][RTW89_FCC][12] = 70,
+ [0][0][2][0][RTW89_IC][10] = 64,
+ [0][0][2][0][RTW89_KCC][10] = 70,
+ [0][0][2][0][RTW89_ACMA][10] = 66,
+ [0][0][2][0][RTW89_CN][10] = 52,
+ [0][0][2][0][RTW89_UK][10] = 66,
+ [0][0][2][0][RTW89_FCC][12] = 72,
[0][0][2][0][RTW89_ETSI][12] = 66,
[0][0][2][0][RTW89_MKK][12] = 70,
- [0][0][2][0][RTW89_IC][12] = 66,
- [0][0][2][0][RTW89_ACMA][12] = 62,
- [0][0][2][0][RTW89_FCC][14] = 70,
+ [0][0][2][0][RTW89_IC][12] = 64,
+ [0][0][2][0][RTW89_KCC][12] = 66,
+ [0][0][2][0][RTW89_ACMA][12] = 66,
+ [0][0][2][0][RTW89_CN][12] = 52,
+ [0][0][2][0][RTW89_UK][12] = 66,
+ [0][0][2][0][RTW89_FCC][14] = 68,
[0][0][2][0][RTW89_ETSI][14] = 66,
[0][0][2][0][RTW89_MKK][14] = 70,
- [0][0][2][0][RTW89_IC][14] = 66,
- [0][0][2][0][RTW89_ACMA][14] = 62,
- [0][0][2][0][RTW89_FCC][15] = 66,
+ [0][0][2][0][RTW89_IC][14] = 64,
+ [0][0][2][0][RTW89_KCC][14] = 66,
+ [0][0][2][0][RTW89_ACMA][14] = 66,
+ [0][0][2][0][RTW89_CN][14] = 52,
+ [0][0][2][0][RTW89_UK][14] = 66,
+ [0][0][2][0][RTW89_FCC][15] = 70,
[0][0][2][0][RTW89_ETSI][15] = 66,
[0][0][2][0][RTW89_MKK][15] = 70,
[0][0][2][0][RTW89_IC][15] = 70,
- [0][0][2][0][RTW89_ACMA][15] = 62,
- [0][0][2][0][RTW89_FCC][17] = 70,
+ [0][0][2][0][RTW89_KCC][15] = 70,
+ [0][0][2][0][RTW89_ACMA][15] = 66,
+ [0][0][2][0][RTW89_CN][15] = 127,
+ [0][0][2][0][RTW89_UK][15] = 66,
+ [0][0][2][0][RTW89_FCC][17] = 72,
[0][0][2][0][RTW89_ETSI][17] = 66,
[0][0][2][0][RTW89_MKK][17] = 70,
- [0][0][2][0][RTW89_IC][17] = 70,
- [0][0][2][0][RTW89_ACMA][17] = 62,
- [0][0][2][0][RTW89_FCC][19] = 70,
+ [0][0][2][0][RTW89_IC][17] = 72,
+ [0][0][2][0][RTW89_KCC][17] = 70,
+ [0][0][2][0][RTW89_ACMA][17] = 66,
+ [0][0][2][0][RTW89_CN][17] = 127,
+ [0][0][2][0][RTW89_UK][17] = 66,
+ [0][0][2][0][RTW89_FCC][19] = 72,
[0][0][2][0][RTW89_ETSI][19] = 66,
[0][0][2][0][RTW89_MKK][19] = 70,
- [0][0][2][0][RTW89_IC][19] = 70,
- [0][0][2][0][RTW89_ACMA][19] = 62,
- [0][0][2][0][RTW89_FCC][21] = 70,
+ [0][0][2][0][RTW89_IC][19] = 72,
+ [0][0][2][0][RTW89_KCC][19] = 70,
+ [0][0][2][0][RTW89_ACMA][19] = 66,
+ [0][0][2][0][RTW89_CN][19] = 127,
+ [0][0][2][0][RTW89_UK][19] = 66,
+ [0][0][2][0][RTW89_FCC][21] = 72,
[0][0][2][0][RTW89_ETSI][21] = 66,
[0][0][2][0][RTW89_MKK][21] = 70,
- [0][0][2][0][RTW89_IC][21] = 70,
- [0][0][2][0][RTW89_ACMA][21] = 62,
- [0][0][2][0][RTW89_FCC][23] = 70,
+ [0][0][2][0][RTW89_IC][21] = 72,
+ [0][0][2][0][RTW89_KCC][21] = 70,
+ [0][0][2][0][RTW89_ACMA][21] = 66,
+ [0][0][2][0][RTW89_CN][21] = 127,
+ [0][0][2][0][RTW89_UK][21] = 66,
+ [0][0][2][0][RTW89_FCC][23] = 72,
[0][0][2][0][RTW89_ETSI][23] = 66,
[0][0][2][0][RTW89_MKK][23] = 70,
- [0][0][2][0][RTW89_IC][23] = 70,
- [0][0][2][0][RTW89_ACMA][23] = 62,
- [0][0][2][0][RTW89_FCC][25] = 70,
+ [0][0][2][0][RTW89_IC][23] = 72,
+ [0][0][2][0][RTW89_KCC][23] = 70,
+ [0][0][2][0][RTW89_ACMA][23] = 66,
+ [0][0][2][0][RTW89_CN][23] = 127,
+ [0][0][2][0][RTW89_UK][23] = 66,
+ [0][0][2][0][RTW89_FCC][25] = 72,
[0][0][2][0][RTW89_ETSI][25] = 66,
[0][0][2][0][RTW89_MKK][25] = 70,
[0][0][2][0][RTW89_IC][25] = 127,
+ [0][0][2][0][RTW89_KCC][25] = 70,
[0][0][2][0][RTW89_ACMA][25] = 127,
- [0][0][2][0][RTW89_FCC][27] = 70,
+ [0][0][2][0][RTW89_CN][25] = 127,
+ [0][0][2][0][RTW89_UK][25] = 66,
+ [0][0][2][0][RTW89_FCC][27] = 72,
[0][0][2][0][RTW89_ETSI][27] = 66,
[0][0][2][0][RTW89_MKK][27] = 70,
[0][0][2][0][RTW89_IC][27] = 127,
+ [0][0][2][0][RTW89_KCC][27] = 70,
[0][0][2][0][RTW89_ACMA][27] = 127,
- [0][0][2][0][RTW89_FCC][29] = 70,
+ [0][0][2][0][RTW89_CN][27] = 127,
+ [0][0][2][0][RTW89_UK][27] = 66,
+ [0][0][2][0][RTW89_FCC][29] = 72,
[0][0][2][0][RTW89_ETSI][29] = 66,
[0][0][2][0][RTW89_MKK][29] = 70,
[0][0][2][0][RTW89_IC][29] = 127,
+ [0][0][2][0][RTW89_KCC][29] = 70,
[0][0][2][0][RTW89_ACMA][29] = 127,
- [0][0][2][0][RTW89_FCC][31] = 70,
+ [0][0][2][0][RTW89_CN][29] = 127,
+ [0][0][2][0][RTW89_UK][29] = 66,
+ [0][0][2][0][RTW89_FCC][31] = 72,
[0][0][2][0][RTW89_ETSI][31] = 66,
[0][0][2][0][RTW89_MKK][31] = 70,
- [0][0][2][0][RTW89_IC][31] = 70,
- [0][0][2][0][RTW89_ACMA][31] = 62,
- [0][0][2][0][RTW89_FCC][33] = 70,
+ [0][0][2][0][RTW89_IC][31] = 72,
+ [0][0][2][0][RTW89_KCC][31] = 70,
+ [0][0][2][0][RTW89_ACMA][31] = 66,
+ [0][0][2][0][RTW89_CN][31] = 127,
+ [0][0][2][0][RTW89_UK][31] = 66,
+ [0][0][2][0][RTW89_FCC][33] = 72,
[0][0][2][0][RTW89_ETSI][33] = 66,
[0][0][2][0][RTW89_MKK][33] = 70,
- [0][0][2][0][RTW89_IC][33] = 70,
- [0][0][2][0][RTW89_ACMA][33] = 62,
- [0][0][2][0][RTW89_FCC][35] = 62,
+ [0][0][2][0][RTW89_IC][33] = 72,
+ [0][0][2][0][RTW89_KCC][33] = 70,
+ [0][0][2][0][RTW89_ACMA][33] = 66,
+ [0][0][2][0][RTW89_CN][33] = 127,
+ [0][0][2][0][RTW89_UK][33] = 66,
+ [0][0][2][0][RTW89_FCC][35] = 56,
[0][0][2][0][RTW89_ETSI][35] = 66,
[0][0][2][0][RTW89_MKK][35] = 70,
- [0][0][2][0][RTW89_IC][35] = 70,
- [0][0][2][0][RTW89_ACMA][35] = 62,
- [0][0][2][0][RTW89_FCC][37] = 70,
+ [0][0][2][0][RTW89_IC][35] = 56,
+ [0][0][2][0][RTW89_KCC][35] = 70,
+ [0][0][2][0][RTW89_ACMA][35] = 66,
+ [0][0][2][0][RTW89_CN][35] = 127,
+ [0][0][2][0][RTW89_UK][35] = 66,
+ [0][0][2][0][RTW89_FCC][37] = 72,
[0][0][2][0][RTW89_ETSI][37] = 127,
[0][0][2][0][RTW89_MKK][37] = 70,
- [0][0][2][0][RTW89_IC][37] = 70,
+ [0][0][2][0][RTW89_IC][37] = 72,
+ [0][0][2][0][RTW89_KCC][37] = 70,
[0][0][2][0][RTW89_ACMA][37] = 70,
- [0][0][2][0][RTW89_FCC][38] = 70,
+ [0][0][2][0][RTW89_CN][37] = 127,
+ [0][0][2][0][RTW89_UK][37] = 64,
+ [0][0][2][0][RTW89_FCC][38] = 72,
[0][0][2][0][RTW89_ETSI][38] = 30,
[0][0][2][0][RTW89_MKK][38] = 127,
- [0][0][2][0][RTW89_IC][38] = 70,
+ [0][0][2][0][RTW89_IC][38] = 72,
+ [0][0][2][0][RTW89_KCC][38] = 58,
[0][0][2][0][RTW89_ACMA][38] = 70,
- [0][0][2][0][RTW89_FCC][40] = 70,
+ [0][0][2][0][RTW89_CN][38] = 68,
+ [0][0][2][0][RTW89_UK][38] = 64,
+ [0][0][2][0][RTW89_FCC][40] = 72,
[0][0][2][0][RTW89_ETSI][40] = 30,
[0][0][2][0][RTW89_MKK][40] = 127,
- [0][0][2][0][RTW89_IC][40] = 70,
+ [0][0][2][0][RTW89_IC][40] = 72,
+ [0][0][2][0][RTW89_KCC][40] = 58,
[0][0][2][0][RTW89_ACMA][40] = 70,
- [0][0][2][0][RTW89_FCC][42] = 70,
+ [0][0][2][0][RTW89_CN][40] = 68,
+ [0][0][2][0][RTW89_UK][40] = 64,
+ [0][0][2][0][RTW89_FCC][42] = 72,
[0][0][2][0][RTW89_ETSI][42] = 30,
[0][0][2][0][RTW89_MKK][42] = 127,
- [0][0][2][0][RTW89_IC][42] = 70,
+ [0][0][2][0][RTW89_IC][42] = 72,
+ [0][0][2][0][RTW89_KCC][42] = 58,
[0][0][2][0][RTW89_ACMA][42] = 70,
- [0][0][2][0][RTW89_FCC][44] = 70,
+ [0][0][2][0][RTW89_CN][42] = 68,
+ [0][0][2][0][RTW89_UK][42] = 64,
+ [0][0][2][0][RTW89_FCC][44] = 72,
[0][0][2][0][RTW89_ETSI][44] = 30,
[0][0][2][0][RTW89_MKK][44] = 127,
- [0][0][2][0][RTW89_IC][44] = 70,
+ [0][0][2][0][RTW89_IC][44] = 72,
+ [0][0][2][0][RTW89_KCC][44] = 58,
[0][0][2][0][RTW89_ACMA][44] = 70,
- [0][0][2][0][RTW89_FCC][46] = 70,
+ [0][0][2][0][RTW89_CN][44] = 68,
+ [0][0][2][0][RTW89_UK][44] = 64,
+ [0][0][2][0][RTW89_FCC][46] = 72,
[0][0][2][0][RTW89_ETSI][46] = 30,
[0][0][2][0][RTW89_MKK][46] = 127,
- [0][0][2][0][RTW89_IC][46] = 70,
+ [0][0][2][0][RTW89_IC][46] = 72,
+ [0][0][2][0][RTW89_KCC][46] = 58,
[0][0][2][0][RTW89_ACMA][46] = 70,
- [0][0][2][0][RTW89_FCC][48] = 70,
+ [0][0][2][0][RTW89_CN][46] = 68,
+ [0][0][2][0][RTW89_UK][46] = 64,
+ [0][0][2][0][RTW89_FCC][48] = 72,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
[0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
- [0][0][2][0][RTW89_FCC][50] = 70,
+ [0][0][2][0][RTW89_CN][48] = 127,
+ [0][0][2][0][RTW89_UK][48] = 127,
+ [0][0][2][0][RTW89_FCC][50] = 72,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
[0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
- [0][0][2][0][RTW89_FCC][52] = 70,
+ [0][0][2][0][RTW89_CN][50] = 127,
+ [0][0][2][0][RTW89_UK][50] = 127,
+ [0][0][2][0][RTW89_FCC][52] = 72,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
[0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
- [0][1][2][0][RTW89_FCC][0] = 62,
+ [0][0][2][0][RTW89_CN][52] = 127,
+ [0][0][2][0][RTW89_UK][52] = 127,
+ [0][1][2][0][RTW89_FCC][0] = 60,
[0][1][2][0][RTW89_ETSI][0] = 54,
[0][1][2][0][RTW89_MKK][0] = 54,
- [0][1][2][0][RTW89_IC][0] = 44,
- [0][1][2][0][RTW89_ACMA][0] = 50,
+ [0][1][2][0][RTW89_IC][0] = 36,
+ [0][1][2][0][RTW89_KCC][0] = 40,
+ [0][1][2][0][RTW89_ACMA][0] = 54,
+ [0][1][2][0][RTW89_CN][0] = 40,
+ [0][1][2][0][RTW89_UK][0] = 54,
[0][1][2][0][RTW89_FCC][2] = 62,
[0][1][2][0][RTW89_ETSI][2] = 54,
[0][1][2][0][RTW89_MKK][2] = 54,
- [0][1][2][0][RTW89_IC][2] = 44,
- [0][1][2][0][RTW89_ACMA][2] = 50,
+ [0][1][2][0][RTW89_IC][2] = 36,
+ [0][1][2][0][RTW89_KCC][2] = 40,
+ [0][1][2][0][RTW89_ACMA][2] = 54,
+ [0][1][2][0][RTW89_CN][2] = 40,
+ [0][1][2][0][RTW89_UK][2] = 54,
[0][1][2][0][RTW89_FCC][4] = 62,
[0][1][2][0][RTW89_ETSI][4] = 54,
[0][1][2][0][RTW89_MKK][4] = 54,
- [0][1][2][0][RTW89_IC][4] = 44,
- [0][1][2][0][RTW89_ACMA][4] = 50,
+ [0][1][2][0][RTW89_IC][4] = 36,
+ [0][1][2][0][RTW89_KCC][4] = 40,
+ [0][1][2][0][RTW89_ACMA][4] = 54,
+ [0][1][2][0][RTW89_CN][4] = 40,
+ [0][1][2][0][RTW89_UK][4] = 54,
[0][1][2][0][RTW89_FCC][6] = 62,
[0][1][2][0][RTW89_ETSI][6] = 54,
[0][1][2][0][RTW89_MKK][6] = 50,
- [0][1][2][0][RTW89_IC][6] = 44,
- [0][1][2][0][RTW89_ACMA][6] = 50,
+ [0][1][2][0][RTW89_IC][6] = 38,
+ [0][1][2][0][RTW89_KCC][6] = 64,
+ [0][1][2][0][RTW89_ACMA][6] = 54,
+ [0][1][2][0][RTW89_CN][6] = 40,
+ [0][1][2][0][RTW89_UK][6] = 54,
[0][1][2][0][RTW89_FCC][8] = 62,
[0][1][2][0][RTW89_ETSI][8] = 54,
[0][1][2][0][RTW89_MKK][8] = 42,
- [0][1][2][0][RTW89_IC][8] = 54,
- [0][1][2][0][RTW89_ACMA][8] = 50,
+ [0][1][2][0][RTW89_IC][8] = 52,
+ [0][1][2][0][RTW89_KCC][8] = 62,
+ [0][1][2][0][RTW89_ACMA][8] = 54,
+ [0][1][2][0][RTW89_CN][8] = 40,
+ [0][1][2][0][RTW89_UK][8] = 54,
[0][1][2][0][RTW89_FCC][10] = 62,
[0][1][2][0][RTW89_ETSI][10] = 54,
[0][1][2][0][RTW89_MKK][10] = 54,
- [0][1][2][0][RTW89_IC][10] = 54,
- [0][1][2][0][RTW89_ACMA][10] = 50,
+ [0][1][2][0][RTW89_IC][10] = 52,
+ [0][1][2][0][RTW89_KCC][10] = 62,
+ [0][1][2][0][RTW89_ACMA][10] = 54,
+ [0][1][2][0][RTW89_CN][10] = 40,
+ [0][1][2][0][RTW89_UK][10] = 54,
[0][1][2][0][RTW89_FCC][12] = 62,
[0][1][2][0][RTW89_ETSI][12] = 54,
[0][1][2][0][RTW89_MKK][12] = 54,
- [0][1][2][0][RTW89_IC][12] = 54,
- [0][1][2][0][RTW89_ACMA][12] = 50,
+ [0][1][2][0][RTW89_IC][12] = 52,
+ [0][1][2][0][RTW89_KCC][12] = 62,
+ [0][1][2][0][RTW89_ACMA][12] = 54,
+ [0][1][2][0][RTW89_CN][12] = 40,
+ [0][1][2][0][RTW89_UK][12] = 54,
[0][1][2][0][RTW89_FCC][14] = 62,
[0][1][2][0][RTW89_ETSI][14] = 54,
[0][1][2][0][RTW89_MKK][14] = 54,
- [0][1][2][0][RTW89_IC][14] = 54,
- [0][1][2][0][RTW89_ACMA][14] = 50,
+ [0][1][2][0][RTW89_IC][14] = 52,
+ [0][1][2][0][RTW89_KCC][14] = 62,
+ [0][1][2][0][RTW89_ACMA][14] = 54,
+ [0][1][2][0][RTW89_CN][14] = 40,
+ [0][1][2][0][RTW89_UK][14] = 54,
[0][1][2][0][RTW89_FCC][15] = 60,
[0][1][2][0][RTW89_ETSI][15] = 54,
[0][1][2][0][RTW89_MKK][15] = 68,
- [0][1][2][0][RTW89_IC][15] = 70,
- [0][1][2][0][RTW89_ACMA][15] = 50,
+ [0][1][2][0][RTW89_IC][15] = 60,
+ [0][1][2][0][RTW89_KCC][15] = 64,
+ [0][1][2][0][RTW89_ACMA][15] = 54,
+ [0][1][2][0][RTW89_CN][15] = 127,
+ [0][1][2][0][RTW89_UK][15] = 54,
[0][1][2][0][RTW89_FCC][17] = 62,
[0][1][2][0][RTW89_ETSI][17] = 54,
[0][1][2][0][RTW89_MKK][17] = 68,
- [0][1][2][0][RTW89_IC][17] = 70,
- [0][1][2][0][RTW89_ACMA][17] = 50,
+ [0][1][2][0][RTW89_IC][17] = 62,
+ [0][1][2][0][RTW89_KCC][17] = 64,
+ [0][1][2][0][RTW89_ACMA][17] = 54,
+ [0][1][2][0][RTW89_CN][17] = 127,
+ [0][1][2][0][RTW89_UK][17] = 54,
[0][1][2][0][RTW89_FCC][19] = 62,
[0][1][2][0][RTW89_ETSI][19] = 54,
[0][1][2][0][RTW89_MKK][19] = 68,
- [0][1][2][0][RTW89_IC][19] = 70,
- [0][1][2][0][RTW89_ACMA][19] = 50,
+ [0][1][2][0][RTW89_IC][19] = 62,
+ [0][1][2][0][RTW89_KCC][19] = 64,
+ [0][1][2][0][RTW89_ACMA][19] = 54,
+ [0][1][2][0][RTW89_CN][19] = 127,
+ [0][1][2][0][RTW89_UK][19] = 54,
[0][1][2][0][RTW89_FCC][21] = 62,
[0][1][2][0][RTW89_ETSI][21] = 54,
[0][1][2][0][RTW89_MKK][21] = 68,
- [0][1][2][0][RTW89_IC][21] = 70,
- [0][1][2][0][RTW89_ACMA][21] = 50,
+ [0][1][2][0][RTW89_IC][21] = 62,
+ [0][1][2][0][RTW89_KCC][21] = 64,
+ [0][1][2][0][RTW89_ACMA][21] = 54,
+ [0][1][2][0][RTW89_CN][21] = 127,
+ [0][1][2][0][RTW89_UK][21] = 54,
[0][1][2][0][RTW89_FCC][23] = 62,
[0][1][2][0][RTW89_ETSI][23] = 54,
[0][1][2][0][RTW89_MKK][23] = 68,
- [0][1][2][0][RTW89_IC][23] = 70,
- [0][1][2][0][RTW89_ACMA][23] = 50,
+ [0][1][2][0][RTW89_IC][23] = 62,
+ [0][1][2][0][RTW89_KCC][23] = 64,
+ [0][1][2][0][RTW89_ACMA][23] = 54,
+ [0][1][2][0][RTW89_CN][23] = 127,
+ [0][1][2][0][RTW89_UK][23] = 54,
[0][1][2][0][RTW89_FCC][25] = 62,
[0][1][2][0][RTW89_ETSI][25] = 54,
[0][1][2][0][RTW89_MKK][25] = 68,
[0][1][2][0][RTW89_IC][25] = 127,
+ [0][1][2][0][RTW89_KCC][25] = 64,
[0][1][2][0][RTW89_ACMA][25] = 127,
+ [0][1][2][0][RTW89_CN][25] = 127,
+ [0][1][2][0][RTW89_UK][25] = 54,
[0][1][2][0][RTW89_FCC][27] = 62,
[0][1][2][0][RTW89_ETSI][27] = 54,
[0][1][2][0][RTW89_MKK][27] = 68,
[0][1][2][0][RTW89_IC][27] = 127,
+ [0][1][2][0][RTW89_KCC][27] = 64,
[0][1][2][0][RTW89_ACMA][27] = 127,
+ [0][1][2][0][RTW89_CN][27] = 127,
+ [0][1][2][0][RTW89_UK][27] = 54,
[0][1][2][0][RTW89_FCC][29] = 62,
[0][1][2][0][RTW89_ETSI][29] = 54,
[0][1][2][0][RTW89_MKK][29] = 68,
[0][1][2][0][RTW89_IC][29] = 127,
+ [0][1][2][0][RTW89_KCC][29] = 64,
[0][1][2][0][RTW89_ACMA][29] = 127,
+ [0][1][2][0][RTW89_CN][29] = 127,
+ [0][1][2][0][RTW89_UK][29] = 54,
[0][1][2][0][RTW89_FCC][31] = 62,
[0][1][2][0][RTW89_ETSI][31] = 54,
[0][1][2][0][RTW89_MKK][31] = 68,
- [0][1][2][0][RTW89_IC][31] = 70,
- [0][1][2][0][RTW89_ACMA][31] = 50,
+ [0][1][2][0][RTW89_IC][31] = 62,
+ [0][1][2][0][RTW89_KCC][31] = 62,
+ [0][1][2][0][RTW89_ACMA][31] = 54,
+ [0][1][2][0][RTW89_CN][31] = 127,
+ [0][1][2][0][RTW89_UK][31] = 54,
[0][1][2][0][RTW89_FCC][33] = 62,
[0][1][2][0][RTW89_ETSI][33] = 54,
[0][1][2][0][RTW89_MKK][33] = 68,
- [0][1][2][0][RTW89_IC][33] = 70,
- [0][1][2][0][RTW89_ACMA][33] = 50,
- [0][1][2][0][RTW89_FCC][35] = 58,
+ [0][1][2][0][RTW89_IC][33] = 62,
+ [0][1][2][0][RTW89_KCC][33] = 62,
+ [0][1][2][0][RTW89_ACMA][33] = 54,
+ [0][1][2][0][RTW89_CN][33] = 127,
+ [0][1][2][0][RTW89_UK][33] = 54,
+ [0][1][2][0][RTW89_FCC][35] = 46,
[0][1][2][0][RTW89_ETSI][35] = 54,
[0][1][2][0][RTW89_MKK][35] = 68,
- [0][1][2][0][RTW89_IC][35] = 68,
- [0][1][2][0][RTW89_ACMA][35] = 50,
- [0][1][2][0][RTW89_FCC][37] = 62,
+ [0][1][2][0][RTW89_IC][35] = 46,
+ [0][1][2][0][RTW89_KCC][35] = 62,
+ [0][1][2][0][RTW89_ACMA][35] = 54,
+ [0][1][2][0][RTW89_CN][35] = 127,
+ [0][1][2][0][RTW89_UK][35] = 54,
+ [0][1][2][0][RTW89_FCC][37] = 64,
[0][1][2][0][RTW89_ETSI][37] = 127,
[0][1][2][0][RTW89_MKK][37] = 68,
- [0][1][2][0][RTW89_IC][37] = 70,
- [0][1][2][0][RTW89_ACMA][37] = 70,
- [0][1][2][0][RTW89_FCC][38] = 70,
+ [0][1][2][0][RTW89_IC][37] = 64,
+ [0][1][2][0][RTW89_KCC][37] = 62,
+ [0][1][2][0][RTW89_ACMA][37] = 64,
+ [0][1][2][0][RTW89_CN][37] = 127,
+ [0][1][2][0][RTW89_UK][37] = 52,
+ [0][1][2][0][RTW89_FCC][38] = 72,
[0][1][2][0][RTW89_ETSI][38] = 18,
[0][1][2][0][RTW89_MKK][38] = 127,
- [0][1][2][0][RTW89_IC][38] = 70,
+ [0][1][2][0][RTW89_IC][38] = 72,
+ [0][1][2][0][RTW89_KCC][38] = 56,
[0][1][2][0][RTW89_ACMA][38] = 70,
- [0][1][2][0][RTW89_FCC][40] = 70,
+ [0][1][2][0][RTW89_CN][38] = 68,
+ [0][1][2][0][RTW89_UK][38] = 52,
+ [0][1][2][0][RTW89_FCC][40] = 72,
[0][1][2][0][RTW89_ETSI][40] = 18,
[0][1][2][0][RTW89_MKK][40] = 127,
- [0][1][2][0][RTW89_IC][40] = 70,
+ [0][1][2][0][RTW89_IC][40] = 72,
+ [0][1][2][0][RTW89_KCC][40] = 56,
[0][1][2][0][RTW89_ACMA][40] = 70,
- [0][1][2][0][RTW89_FCC][42] = 70,
+ [0][1][2][0][RTW89_CN][40] = 68,
+ [0][1][2][0][RTW89_UK][40] = 52,
+ [0][1][2][0][RTW89_FCC][42] = 72,
[0][1][2][0][RTW89_ETSI][42] = 18,
[0][1][2][0][RTW89_MKK][42] = 127,
- [0][1][2][0][RTW89_IC][42] = 70,
+ [0][1][2][0][RTW89_IC][42] = 72,
+ [0][1][2][0][RTW89_KCC][42] = 56,
[0][1][2][0][RTW89_ACMA][42] = 70,
- [0][1][2][0][RTW89_FCC][44] = 70,
+ [0][1][2][0][RTW89_CN][42] = 68,
+ [0][1][2][0][RTW89_UK][42] = 52,
+ [0][1][2][0][RTW89_FCC][44] = 72,
[0][1][2][0][RTW89_ETSI][44] = 18,
[0][1][2][0][RTW89_MKK][44] = 127,
- [0][1][2][0][RTW89_IC][44] = 70,
+ [0][1][2][0][RTW89_IC][44] = 72,
+ [0][1][2][0][RTW89_KCC][44] = 56,
[0][1][2][0][RTW89_ACMA][44] = 70,
- [0][1][2][0][RTW89_FCC][46] = 70,
+ [0][1][2][0][RTW89_CN][44] = 68,
+ [0][1][2][0][RTW89_UK][44] = 52,
+ [0][1][2][0][RTW89_FCC][46] = 72,
[0][1][2][0][RTW89_ETSI][46] = 18,
[0][1][2][0][RTW89_MKK][46] = 127,
- [0][1][2][0][RTW89_IC][46] = 70,
+ [0][1][2][0][RTW89_IC][46] = 72,
+ [0][1][2][0][RTW89_KCC][46] = 56,
[0][1][2][0][RTW89_ACMA][46] = 70,
- [0][1][2][0][RTW89_FCC][48] = 50,
+ [0][1][2][0][RTW89_CN][46] = 68,
+ [0][1][2][0][RTW89_UK][46] = 52,
+ [0][1][2][0][RTW89_FCC][48] = 48,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
[0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
+ [0][1][2][0][RTW89_CN][48] = 127,
+ [0][1][2][0][RTW89_UK][48] = 127,
[0][1][2][0][RTW89_FCC][50] = 50,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
[0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
- [0][1][2][0][RTW89_FCC][52] = 50,
+ [0][1][2][0][RTW89_CN][50] = 127,
+ [0][1][2][0][RTW89_UK][50] = 127,
+ [0][1][2][0][RTW89_FCC][52] = 48,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
[0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
+ [0][1][2][0][RTW89_CN][52] = 127,
+ [0][1][2][0][RTW89_UK][52] = 127,
[0][1][2][1][RTW89_FCC][0] = 60,
[0][1][2][1][RTW89_ETSI][0] = 40,
[0][1][2][1][RTW89_MKK][0] = 54,
- [0][1][2][1][RTW89_IC][0] = 42,
- [0][1][2][1][RTW89_ACMA][0] = 38,
- [0][1][2][1][RTW89_FCC][2] = 60,
+ [0][1][2][1][RTW89_IC][0] = 40,
+ [0][1][2][1][RTW89_KCC][0] = 40,
+ [0][1][2][1][RTW89_ACMA][0] = 40,
+ [0][1][2][1][RTW89_CN][0] = 36,
+ [0][1][2][1][RTW89_UK][0] = 40,
+ [0][1][2][1][RTW89_FCC][2] = 62,
[0][1][2][1][RTW89_ETSI][2] = 40,
[0][1][2][1][RTW89_MKK][2] = 54,
- [0][1][2][1][RTW89_IC][2] = 42,
- [0][1][2][1][RTW89_ACMA][2] = 38,
- [0][1][2][1][RTW89_FCC][4] = 60,
+ [0][1][2][1][RTW89_IC][2] = 40,
+ [0][1][2][1][RTW89_KCC][2] = 40,
+ [0][1][2][1][RTW89_ACMA][2] = 40,
+ [0][1][2][1][RTW89_CN][2] = 36,
+ [0][1][2][1][RTW89_UK][2] = 40,
+ [0][1][2][1][RTW89_FCC][4] = 62,
[0][1][2][1][RTW89_ETSI][4] = 40,
[0][1][2][1][RTW89_MKK][4] = 54,
- [0][1][2][1][RTW89_IC][4] = 42,
- [0][1][2][1][RTW89_ACMA][4] = 38,
- [0][1][2][1][RTW89_FCC][6] = 60,
+ [0][1][2][1][RTW89_IC][4] = 40,
+ [0][1][2][1][RTW89_KCC][4] = 40,
+ [0][1][2][1][RTW89_ACMA][4] = 40,
+ [0][1][2][1][RTW89_CN][4] = 36,
+ [0][1][2][1][RTW89_UK][4] = 40,
+ [0][1][2][1][RTW89_FCC][6] = 62,
[0][1][2][1][RTW89_ETSI][6] = 40,
[0][1][2][1][RTW89_MKK][6] = 50,
- [0][1][2][1][RTW89_IC][6] = 42,
- [0][1][2][1][RTW89_ACMA][6] = 38,
- [0][1][2][1][RTW89_FCC][8] = 60,
+ [0][1][2][1][RTW89_IC][6] = 40,
+ [0][1][2][1][RTW89_KCC][6] = 64,
+ [0][1][2][1][RTW89_ACMA][6] = 40,
+ [0][1][2][1][RTW89_CN][6] = 36,
+ [0][1][2][1][RTW89_UK][6] = 40,
+ [0][1][2][1][RTW89_FCC][8] = 62,
[0][1][2][1][RTW89_ETSI][8] = 40,
[0][1][2][1][RTW89_MKK][8] = 42,
- [0][1][2][1][RTW89_IC][8] = 42,
- [0][1][2][1][RTW89_ACMA][8] = 38,
- [0][1][2][1][RTW89_FCC][10] = 60,
+ [0][1][2][1][RTW89_IC][8] = 40,
+ [0][1][2][1][RTW89_KCC][8] = 62,
+ [0][1][2][1][RTW89_ACMA][8] = 40,
+ [0][1][2][1][RTW89_CN][8] = 36,
+ [0][1][2][1][RTW89_UK][8] = 40,
+ [0][1][2][1][RTW89_FCC][10] = 62,
[0][1][2][1][RTW89_ETSI][10] = 40,
- [0][1][2][1][RTW89_MKK][10] = 66,
- [0][1][2][1][RTW89_IC][10] = 42,
- [0][1][2][1][RTW89_ACMA][10] = 38,
- [0][1][2][1][RTW89_FCC][12] = 60,
+ [0][1][2][1][RTW89_MKK][10] = 54,
+ [0][1][2][1][RTW89_IC][10] = 40,
+ [0][1][2][1][RTW89_KCC][10] = 62,
+ [0][1][2][1][RTW89_ACMA][10] = 40,
+ [0][1][2][1][RTW89_CN][10] = 36,
+ [0][1][2][1][RTW89_UK][10] = 40,
+ [0][1][2][1][RTW89_FCC][12] = 62,
[0][1][2][1][RTW89_ETSI][12] = 40,
- [0][1][2][1][RTW89_MKK][12] = 66,
- [0][1][2][1][RTW89_IC][12] = 42,
- [0][1][2][1][RTW89_ACMA][12] = 38,
- [0][1][2][1][RTW89_FCC][14] = 60,
+ [0][1][2][1][RTW89_MKK][12] = 54,
+ [0][1][2][1][RTW89_IC][12] = 40,
+ [0][1][2][1][RTW89_KCC][12] = 62,
+ [0][1][2][1][RTW89_ACMA][12] = 40,
+ [0][1][2][1][RTW89_CN][12] = 36,
+ [0][1][2][1][RTW89_UK][12] = 40,
+ [0][1][2][1][RTW89_FCC][14] = 62,
[0][1][2][1][RTW89_ETSI][14] = 40,
- [0][1][2][1][RTW89_MKK][14] = 66,
- [0][1][2][1][RTW89_IC][14] = 42,
- [0][1][2][1][RTW89_ACMA][14] = 38,
+ [0][1][2][1][RTW89_MKK][14] = 54,
+ [0][1][2][1][RTW89_IC][14] = 40,
+ [0][1][2][1][RTW89_KCC][14] = 62,
+ [0][1][2][1][RTW89_ACMA][14] = 40,
+ [0][1][2][1][RTW89_CN][14] = 36,
+ [0][1][2][1][RTW89_UK][14] = 40,
[0][1][2][1][RTW89_FCC][15] = 60,
[0][1][2][1][RTW89_ETSI][15] = 40,
[0][1][2][1][RTW89_MKK][15] = 68,
- [0][1][2][1][RTW89_IC][15] = 70,
- [0][1][2][1][RTW89_ACMA][15] = 38,
- [0][1][2][1][RTW89_FCC][17] = 60,
+ [0][1][2][1][RTW89_IC][15] = 60,
+ [0][1][2][1][RTW89_KCC][15] = 64,
+ [0][1][2][1][RTW89_ACMA][15] = 40,
+ [0][1][2][1][RTW89_CN][15] = 127,
+ [0][1][2][1][RTW89_UK][15] = 40,
+ [0][1][2][1][RTW89_FCC][17] = 62,
[0][1][2][1][RTW89_ETSI][17] = 40,
[0][1][2][1][RTW89_MKK][17] = 68,
- [0][1][2][1][RTW89_IC][17] = 70,
- [0][1][2][1][RTW89_ACMA][17] = 38,
- [0][1][2][1][RTW89_FCC][19] = 60,
+ [0][1][2][1][RTW89_IC][17] = 62,
+ [0][1][2][1][RTW89_KCC][17] = 64,
+ [0][1][2][1][RTW89_ACMA][17] = 40,
+ [0][1][2][1][RTW89_CN][17] = 127,
+ [0][1][2][1][RTW89_UK][17] = 40,
+ [0][1][2][1][RTW89_FCC][19] = 62,
[0][1][2][1][RTW89_ETSI][19] = 40,
[0][1][2][1][RTW89_MKK][19] = 68,
- [0][1][2][1][RTW89_IC][19] = 70,
- [0][1][2][1][RTW89_ACMA][19] = 38,
- [0][1][2][1][RTW89_FCC][21] = 60,
+ [0][1][2][1][RTW89_IC][19] = 62,
+ [0][1][2][1][RTW89_KCC][19] = 64,
+ [0][1][2][1][RTW89_ACMA][19] = 40,
+ [0][1][2][1][RTW89_CN][19] = 127,
+ [0][1][2][1][RTW89_UK][19] = 40,
+ [0][1][2][1][RTW89_FCC][21] = 62,
[0][1][2][1][RTW89_ETSI][21] = 40,
[0][1][2][1][RTW89_MKK][21] = 68,
- [0][1][2][1][RTW89_IC][21] = 70,
- [0][1][2][1][RTW89_ACMA][21] = 38,
- [0][1][2][1][RTW89_FCC][23] = 60,
+ [0][1][2][1][RTW89_IC][21] = 62,
+ [0][1][2][1][RTW89_KCC][21] = 64,
+ [0][1][2][1][RTW89_ACMA][21] = 40,
+ [0][1][2][1][RTW89_CN][21] = 127,
+ [0][1][2][1][RTW89_UK][21] = 40,
+ [0][1][2][1][RTW89_FCC][23] = 62,
[0][1][2][1][RTW89_ETSI][23] = 40,
[0][1][2][1][RTW89_MKK][23] = 68,
- [0][1][2][1][RTW89_IC][23] = 70,
- [0][1][2][1][RTW89_ACMA][23] = 38,
- [0][1][2][1][RTW89_FCC][25] = 58,
+ [0][1][2][1][RTW89_IC][23] = 62,
+ [0][1][2][1][RTW89_KCC][23] = 64,
+ [0][1][2][1][RTW89_ACMA][23] = 40,
+ [0][1][2][1][RTW89_CN][23] = 127,
+ [0][1][2][1][RTW89_UK][23] = 40,
+ [0][1][2][1][RTW89_FCC][25] = 46,
[0][1][2][1][RTW89_ETSI][25] = 40,
[0][1][2][1][RTW89_MKK][25] = 68,
[0][1][2][1][RTW89_IC][25] = 127,
+ [0][1][2][1][RTW89_KCC][25] = 64,
[0][1][2][1][RTW89_ACMA][25] = 127,
- [0][1][2][1][RTW89_FCC][27] = 58,
+ [0][1][2][1][RTW89_CN][25] = 127,
+ [0][1][2][1][RTW89_UK][25] = 40,
+ [0][1][2][1][RTW89_FCC][27] = 46,
[0][1][2][1][RTW89_ETSI][27] = 40,
[0][1][2][1][RTW89_MKK][27] = 68,
[0][1][2][1][RTW89_IC][27] = 127,
+ [0][1][2][1][RTW89_KCC][27] = 64,
[0][1][2][1][RTW89_ACMA][27] = 127,
- [0][1][2][1][RTW89_FCC][29] = 58,
+ [0][1][2][1][RTW89_CN][27] = 127,
+ [0][1][2][1][RTW89_UK][27] = 40,
+ [0][1][2][1][RTW89_FCC][29] = 46,
[0][1][2][1][RTW89_ETSI][29] = 40,
[0][1][2][1][RTW89_MKK][29] = 68,
[0][1][2][1][RTW89_IC][29] = 127,
+ [0][1][2][1][RTW89_KCC][29] = 64,
[0][1][2][1][RTW89_ACMA][29] = 127,
- [0][1][2][1][RTW89_FCC][31] = 58,
+ [0][1][2][1][RTW89_CN][29] = 127,
+ [0][1][2][1][RTW89_UK][29] = 40,
+ [0][1][2][1][RTW89_FCC][31] = 46,
[0][1][2][1][RTW89_ETSI][31] = 40,
[0][1][2][1][RTW89_MKK][31] = 68,
- [0][1][2][1][RTW89_IC][31] = 68,
- [0][1][2][1][RTW89_ACMA][31] = 38,
- [0][1][2][1][RTW89_FCC][33] = 58,
+ [0][1][2][1][RTW89_IC][31] = 46,
+ [0][1][2][1][RTW89_KCC][31] = 62,
+ [0][1][2][1][RTW89_ACMA][31] = 40,
+ [0][1][2][1][RTW89_CN][31] = 127,
+ [0][1][2][1][RTW89_UK][31] = 40,
+ [0][1][2][1][RTW89_FCC][33] = 46,
[0][1][2][1][RTW89_ETSI][33] = 40,
[0][1][2][1][RTW89_MKK][33] = 68,
- [0][1][2][1][RTW89_IC][33] = 68,
- [0][1][2][1][RTW89_ACMA][33] = 38,
- [0][1][2][1][RTW89_FCC][35] = 58,
+ [0][1][2][1][RTW89_IC][33] = 46,
+ [0][1][2][1][RTW89_KCC][33] = 62,
+ [0][1][2][1][RTW89_ACMA][33] = 40,
+ [0][1][2][1][RTW89_CN][33] = 127,
+ [0][1][2][1][RTW89_UK][33] = 40,
+ [0][1][2][1][RTW89_FCC][35] = 46,
[0][1][2][1][RTW89_ETSI][35] = 40,
[0][1][2][1][RTW89_MKK][35] = 68,
- [0][1][2][1][RTW89_IC][35] = 68,
- [0][1][2][1][RTW89_ACMA][35] = 38,
- [0][1][2][1][RTW89_FCC][37] = 60,
+ [0][1][2][1][RTW89_IC][35] = 46,
+ [0][1][2][1][RTW89_KCC][35] = 62,
+ [0][1][2][1][RTW89_ACMA][35] = 40,
+ [0][1][2][1][RTW89_CN][35] = 127,
+ [0][1][2][1][RTW89_UK][35] = 40,
+ [0][1][2][1][RTW89_FCC][37] = 64,
[0][1][2][1][RTW89_ETSI][37] = 127,
[0][1][2][1][RTW89_MKK][37] = 68,
- [0][1][2][1][RTW89_IC][37] = 70,
- [0][1][2][1][RTW89_ACMA][37] = 70,
- [0][1][2][1][RTW89_FCC][38] = 70,
+ [0][1][2][1][RTW89_IC][37] = 64,
+ [0][1][2][1][RTW89_KCC][37] = 62,
+ [0][1][2][1][RTW89_ACMA][37] = 64,
+ [0][1][2][1][RTW89_CN][37] = 127,
+ [0][1][2][1][RTW89_UK][37] = 40,
+ [0][1][2][1][RTW89_FCC][38] = 72,
[0][1][2][1][RTW89_ETSI][38] = 6,
[0][1][2][1][RTW89_MKK][38] = 127,
- [0][1][2][1][RTW89_IC][38] = 70,
+ [0][1][2][1][RTW89_IC][38] = 72,
+ [0][1][2][1][RTW89_KCC][38] = 56,
[0][1][2][1][RTW89_ACMA][38] = 70,
- [0][1][2][1][RTW89_FCC][40] = 70,
+ [0][1][2][1][RTW89_CN][38] = 60,
+ [0][1][2][1][RTW89_UK][38] = 40,
+ [0][1][2][1][RTW89_FCC][40] = 72,
[0][1][2][1][RTW89_ETSI][40] = 6,
[0][1][2][1][RTW89_MKK][40] = 127,
- [0][1][2][1][RTW89_IC][40] = 70,
+ [0][1][2][1][RTW89_IC][40] = 72,
+ [0][1][2][1][RTW89_KCC][40] = 56,
[0][1][2][1][RTW89_ACMA][40] = 70,
- [0][1][2][1][RTW89_FCC][42] = 70,
+ [0][1][2][1][RTW89_CN][40] = 60,
+ [0][1][2][1][RTW89_UK][40] = 40,
+ [0][1][2][1][RTW89_FCC][42] = 72,
[0][1][2][1][RTW89_ETSI][42] = 6,
[0][1][2][1][RTW89_MKK][42] = 127,
- [0][1][2][1][RTW89_IC][42] = 70,
+ [0][1][2][1][RTW89_IC][42] = 72,
+ [0][1][2][1][RTW89_KCC][42] = 56,
[0][1][2][1][RTW89_ACMA][42] = 70,
- [0][1][2][1][RTW89_FCC][44] = 70,
+ [0][1][2][1][RTW89_CN][42] = 60,
+ [0][1][2][1][RTW89_UK][42] = 40,
+ [0][1][2][1][RTW89_FCC][44] = 72,
[0][1][2][1][RTW89_ETSI][44] = 6,
[0][1][2][1][RTW89_MKK][44] = 127,
- [0][1][2][1][RTW89_IC][44] = 70,
+ [0][1][2][1][RTW89_IC][44] = 72,
+ [0][1][2][1][RTW89_KCC][44] = 56,
[0][1][2][1][RTW89_ACMA][44] = 70,
- [0][1][2][1][RTW89_FCC][46] = 70,
+ [0][1][2][1][RTW89_CN][44] = 54,
+ [0][1][2][1][RTW89_UK][44] = 40,
+ [0][1][2][1][RTW89_FCC][46] = 72,
[0][1][2][1][RTW89_ETSI][46] = 6,
[0][1][2][1][RTW89_MKK][46] = 127,
- [0][1][2][1][RTW89_IC][46] = 70,
+ [0][1][2][1][RTW89_IC][46] = 72,
+ [0][1][2][1][RTW89_KCC][46] = 56,
[0][1][2][1][RTW89_ACMA][46] = 70,
- [0][1][2][1][RTW89_FCC][48] = 50,
+ [0][1][2][1][RTW89_CN][46] = 54,
+ [0][1][2][1][RTW89_UK][46] = 40,
+ [0][1][2][1][RTW89_FCC][48] = 48,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
[0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
+ [0][1][2][1][RTW89_CN][48] = 127,
+ [0][1][2][1][RTW89_UK][48] = 127,
[0][1][2][1][RTW89_FCC][50] = 50,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
[0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
- [0][1][2][1][RTW89_FCC][52] = 50,
+ [0][1][2][1][RTW89_CN][50] = 127,
+ [0][1][2][1][RTW89_UK][50] = 127,
+ [0][1][2][1][RTW89_FCC][52] = 48,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
[0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
- [1][0][2][0][RTW89_FCC][1] = 58,
+ [0][1][2][1][RTW89_CN][52] = 127,
+ [0][1][2][1][RTW89_UK][52] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 64,
[1][0][2][0][RTW89_ETSI][1] = 66,
[1][0][2][0][RTW89_MKK][1] = 66,
- [1][0][2][0][RTW89_IC][1] = 66,
+ [1][0][2][0][RTW89_IC][1] = 62,
+ [1][0][2][0][RTW89_KCC][1] = 66,
[1][0][2][0][RTW89_ACMA][1] = 66,
+ [1][0][2][0][RTW89_CN][1] = 54,
+ [1][0][2][0][RTW89_UK][1] = 66,
[1][0][2][0][RTW89_FCC][5] = 68,
[1][0][2][0][RTW89_ETSI][5] = 66,
[1][0][2][0][RTW89_MKK][5] = 66,
- [1][0][2][0][RTW89_IC][5] = 66,
+ [1][0][2][0][RTW89_IC][5] = 64,
+ [1][0][2][0][RTW89_KCC][5] = 54,
[1][0][2][0][RTW89_ACMA][5] = 66,
+ [1][0][2][0][RTW89_CN][5] = 54,
+ [1][0][2][0][RTW89_UK][5] = 66,
[1][0][2][0][RTW89_FCC][9] = 68,
[1][0][2][0][RTW89_ETSI][9] = 66,
[1][0][2][0][RTW89_MKK][9] = 66,
- [1][0][2][0][RTW89_IC][9] = 66,
+ [1][0][2][0][RTW89_IC][9] = 64,
+ [1][0][2][0][RTW89_KCC][9] = 66,
[1][0][2][0][RTW89_ACMA][9] = 66,
- [1][0][2][0][RTW89_FCC][13] = 58,
+ [1][0][2][0][RTW89_CN][9] = 54,
+ [1][0][2][0][RTW89_UK][9] = 66,
+ [1][0][2][0][RTW89_FCC][13] = 60,
[1][0][2][0][RTW89_ETSI][13] = 66,
[1][0][2][0][RTW89_MKK][13] = 66,
- [1][0][2][0][RTW89_IC][13] = 66,
+ [1][0][2][0][RTW89_IC][13] = 60,
+ [1][0][2][0][RTW89_KCC][13] = 52,
[1][0][2][0][RTW89_ACMA][13] = 66,
- [1][0][2][0][RTW89_FCC][16] = 56,
+ [1][0][2][0][RTW89_CN][13] = 54,
+ [1][0][2][0][RTW89_UK][13] = 66,
+ [1][0][2][0][RTW89_FCC][16] = 64,
[1][0][2][0][RTW89_ETSI][16] = 66,
[1][0][2][0][RTW89_MKK][16] = 66,
- [1][0][2][0][RTW89_IC][16] = 66,
+ [1][0][2][0][RTW89_IC][16] = 64,
+ [1][0][2][0][RTW89_KCC][16] = 56,
[1][0][2][0][RTW89_ACMA][16] = 66,
+ [1][0][2][0][RTW89_CN][16] = 127,
+ [1][0][2][0][RTW89_UK][16] = 66,
[1][0][2][0][RTW89_FCC][20] = 68,
[1][0][2][0][RTW89_ETSI][20] = 66,
[1][0][2][0][RTW89_MKK][20] = 66,
- [1][0][2][0][RTW89_IC][20] = 66,
+ [1][0][2][0][RTW89_IC][20] = 68,
+ [1][0][2][0][RTW89_KCC][20] = 56,
[1][0][2][0][RTW89_ACMA][20] = 66,
+ [1][0][2][0][RTW89_CN][20] = 127,
+ [1][0][2][0][RTW89_UK][20] = 66,
[1][0][2][0][RTW89_FCC][24] = 68,
[1][0][2][0][RTW89_ETSI][24] = 66,
[1][0][2][0][RTW89_MKK][24] = 66,
[1][0][2][0][RTW89_IC][24] = 127,
+ [1][0][2][0][RTW89_KCC][24] = 56,
[1][0][2][0][RTW89_ACMA][24] = 127,
+ [1][0][2][0][RTW89_CN][24] = 127,
+ [1][0][2][0][RTW89_UK][24] = 66,
[1][0][2][0][RTW89_FCC][28] = 68,
[1][0][2][0][RTW89_ETSI][28] = 66,
[1][0][2][0][RTW89_MKK][28] = 66,
[1][0][2][0][RTW89_IC][28] = 127,
+ [1][0][2][0][RTW89_KCC][28] = 66,
[1][0][2][0][RTW89_ACMA][28] = 127,
- [1][0][2][0][RTW89_FCC][32] = 68,
+ [1][0][2][0][RTW89_CN][28] = 127,
+ [1][0][2][0][RTW89_UK][28] = 66,
+ [1][0][2][0][RTW89_FCC][32] = 62,
[1][0][2][0][RTW89_ETSI][32] = 66,
[1][0][2][0][RTW89_MKK][32] = 66,
- [1][0][2][0][RTW89_IC][32] = 66,
+ [1][0][2][0][RTW89_IC][32] = 62,
+ [1][0][2][0][RTW89_KCC][32] = 66,
[1][0][2][0][RTW89_ACMA][32] = 66,
+ [1][0][2][0][RTW89_CN][32] = 127,
+ [1][0][2][0][RTW89_UK][32] = 66,
[1][0][2][0][RTW89_FCC][36] = 68,
[1][0][2][0][RTW89_ETSI][36] = 127,
[1][0][2][0][RTW89_MKK][36] = 66,
- [1][0][2][0][RTW89_IC][36] = 66,
+ [1][0][2][0][RTW89_IC][36] = 68,
+ [1][0][2][0][RTW89_KCC][36] = 66,
[1][0][2][0][RTW89_ACMA][36] = 66,
+ [1][0][2][0][RTW89_CN][36] = 127,
+ [1][0][2][0][RTW89_UK][36] = 64,
[1][0][2][0][RTW89_FCC][39] = 68,
[1][0][2][0][RTW89_ETSI][39] = 30,
[1][0][2][0][RTW89_MKK][39] = 127,
- [1][0][2][0][RTW89_IC][39] = 66,
+ [1][0][2][0][RTW89_IC][39] = 68,
+ [1][0][2][0][RTW89_KCC][39] = 66,
[1][0][2][0][RTW89_ACMA][39] = 66,
+ [1][0][2][0][RTW89_CN][39] = 62,
+ [1][0][2][0][RTW89_UK][39] = 64,
[1][0][2][0][RTW89_FCC][43] = 68,
[1][0][2][0][RTW89_ETSI][43] = 30,
[1][0][2][0][RTW89_MKK][43] = 127,
- [1][0][2][0][RTW89_IC][43] = 66,
+ [1][0][2][0][RTW89_IC][43] = 68,
+ [1][0][2][0][RTW89_KCC][43] = 66,
[1][0][2][0][RTW89_ACMA][43] = 66,
+ [1][0][2][0][RTW89_CN][43] = 66,
+ [1][0][2][0][RTW89_UK][43] = 64,
[1][0][2][0][RTW89_FCC][47] = 68,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
[1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
+ [1][0][2][0][RTW89_CN][47] = 127,
+ [1][0][2][0][RTW89_UK][47] = 127,
[1][0][2][0][RTW89_FCC][51] = 68,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
[1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
+ [1][0][2][0][RTW89_CN][51] = 127,
+ [1][0][2][0][RTW89_UK][51] = 127,
[1][1][2][0][RTW89_FCC][1] = 54,
[1][1][2][0][RTW89_ETSI][1] = 54,
[1][1][2][0][RTW89_MKK][1] = 48,
- [1][1][2][0][RTW89_IC][1] = 60,
- [1][1][2][0][RTW89_ACMA][1] = 60,
+ [1][1][2][0][RTW89_IC][1] = 48,
+ [1][1][2][0][RTW89_KCC][1] = 54,
+ [1][1][2][0][RTW89_ACMA][1] = 54,
+ [1][1][2][0][RTW89_CN][1] = 42,
+ [1][1][2][0][RTW89_UK][1] = 54,
[1][1][2][0][RTW89_FCC][5] = 68,
[1][1][2][0][RTW89_ETSI][5] = 54,
[1][1][2][0][RTW89_MKK][5] = 52,
- [1][1][2][0][RTW89_IC][5] = 60,
- [1][1][2][0][RTW89_ACMA][5] = 60,
+ [1][1][2][0][RTW89_IC][5] = 48,
+ [1][1][2][0][RTW89_KCC][5] = 54,
+ [1][1][2][0][RTW89_ACMA][5] = 54,
+ [1][1][2][0][RTW89_CN][5] = 42,
+ [1][1][2][0][RTW89_UK][5] = 54,
[1][1][2][0][RTW89_FCC][9] = 68,
[1][1][2][0][RTW89_ETSI][9] = 54,
[1][1][2][0][RTW89_MKK][9] = 52,
- [1][1][2][0][RTW89_IC][9] = 60,
- [1][1][2][0][RTW89_ACMA][9] = 60,
+ [1][1][2][0][RTW89_IC][9] = 52,
+ [1][1][2][0][RTW89_KCC][9] = 64,
+ [1][1][2][0][RTW89_ACMA][9] = 54,
+ [1][1][2][0][RTW89_CN][9] = 42,
+ [1][1][2][0][RTW89_UK][9] = 54,
[1][1][2][0][RTW89_FCC][13] = 54,
[1][1][2][0][RTW89_ETSI][13] = 54,
[1][1][2][0][RTW89_MKK][13] = 52,
- [1][1][2][0][RTW89_IC][13] = 60,
- [1][1][2][0][RTW89_ACMA][13] = 60,
- [1][1][2][0][RTW89_FCC][16] = 48,
+ [1][1][2][0][RTW89_IC][13] = 52,
+ [1][1][2][0][RTW89_KCC][13] = 52,
+ [1][1][2][0][RTW89_ACMA][13] = 54,
+ [1][1][2][0][RTW89_CN][13] = 42,
+ [1][1][2][0][RTW89_UK][13] = 54,
+ [1][1][2][0][RTW89_FCC][16] = 56,
[1][1][2][0][RTW89_ETSI][16] = 54,
[1][1][2][0][RTW89_MKK][16] = 66,
- [1][1][2][0][RTW89_IC][16] = 58,
- [1][1][2][0][RTW89_ACMA][16] = 60,
+ [1][1][2][0][RTW89_IC][16] = 56,
+ [1][1][2][0][RTW89_KCC][16] = 54,
+ [1][1][2][0][RTW89_ACMA][16] = 54,
+ [1][1][2][0][RTW89_CN][16] = 127,
+ [1][1][2][0][RTW89_UK][16] = 54,
[1][1][2][0][RTW89_FCC][20] = 68,
[1][1][2][0][RTW89_ETSI][20] = 54,
[1][1][2][0][RTW89_MKK][20] = 66,
- [1][1][2][0][RTW89_IC][20] = 66,
- [1][1][2][0][RTW89_ACMA][20] = 60,
+ [1][1][2][0][RTW89_IC][20] = 68,
+ [1][1][2][0][RTW89_KCC][20] = 54,
+ [1][1][2][0][RTW89_ACMA][20] = 54,
+ [1][1][2][0][RTW89_CN][20] = 127,
+ [1][1][2][0][RTW89_UK][20] = 54,
[1][1][2][0][RTW89_FCC][24] = 68,
[1][1][2][0][RTW89_ETSI][24] = 54,
[1][1][2][0][RTW89_MKK][24] = 66,
[1][1][2][0][RTW89_IC][24] = 127,
+ [1][1][2][0][RTW89_KCC][24] = 54,
[1][1][2][0][RTW89_ACMA][24] = 127,
+ [1][1][2][0][RTW89_CN][24] = 127,
+ [1][1][2][0][RTW89_UK][24] = 54,
[1][1][2][0][RTW89_FCC][28] = 68,
[1][1][2][0][RTW89_ETSI][28] = 54,
[1][1][2][0][RTW89_MKK][28] = 66,
[1][1][2][0][RTW89_IC][28] = 127,
+ [1][1][2][0][RTW89_KCC][28] = 66,
[1][1][2][0][RTW89_ACMA][28] = 127,
- [1][1][2][0][RTW89_FCC][32] = 60,
+ [1][1][2][0][RTW89_CN][28] = 127,
+ [1][1][2][0][RTW89_UK][28] = 54,
+ [1][1][2][0][RTW89_FCC][32] = 56,
[1][1][2][0][RTW89_ETSI][32] = 54,
[1][1][2][0][RTW89_MKK][32] = 66,
- [1][1][2][0][RTW89_IC][32] = 66,
+ [1][1][2][0][RTW89_IC][32] = 56,
+ [1][1][2][0][RTW89_KCC][32] = 66,
[1][1][2][0][RTW89_ACMA][32] = 54,
+ [1][1][2][0][RTW89_CN][32] = 127,
+ [1][1][2][0][RTW89_UK][32] = 54,
[1][1][2][0][RTW89_FCC][36] = 68,
[1][1][2][0][RTW89_ETSI][36] = 127,
[1][1][2][0][RTW89_MKK][36] = 66,
- [1][1][2][0][RTW89_IC][36] = 66,
+ [1][1][2][0][RTW89_IC][36] = 68,
+ [1][1][2][0][RTW89_KCC][36] = 66,
[1][1][2][0][RTW89_ACMA][36] = 66,
+ [1][1][2][0][RTW89_CN][36] = 127,
+ [1][1][2][0][RTW89_UK][36] = 52,
[1][1][2][0][RTW89_FCC][39] = 68,
[1][1][2][0][RTW89_ETSI][39] = 18,
[1][1][2][0][RTW89_MKK][39] = 127,
- [1][1][2][0][RTW89_IC][39] = 66,
+ [1][1][2][0][RTW89_IC][39] = 68,
+ [1][1][2][0][RTW89_KCC][39] = 56,
[1][1][2][0][RTW89_ACMA][39] = 66,
+ [1][1][2][0][RTW89_CN][39] = 62,
+ [1][1][2][0][RTW89_UK][39] = 52,
[1][1][2][0][RTW89_FCC][43] = 68,
[1][1][2][0][RTW89_ETSI][43] = 18,
[1][1][2][0][RTW89_MKK][43] = 127,
- [1][1][2][0][RTW89_IC][43] = 66,
+ [1][1][2][0][RTW89_IC][43] = 68,
+ [1][1][2][0][RTW89_KCC][43] = 56,
[1][1][2][0][RTW89_ACMA][43] = 66,
- [1][1][2][0][RTW89_FCC][47] = 60,
+ [1][1][2][0][RTW89_CN][43] = 66,
+ [1][1][2][0][RTW89_UK][43] = 52,
+ [1][1][2][0][RTW89_FCC][47] = 62,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
[1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
- [1][1][2][0][RTW89_FCC][51] = 58,
+ [1][1][2][0][RTW89_CN][47] = 127,
+ [1][1][2][0][RTW89_UK][47] = 127,
+ [1][1][2][0][RTW89_FCC][51] = 60,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
[1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
+ [1][1][2][0][RTW89_CN][51] = 127,
+ [1][1][2][0][RTW89_UK][51] = 127,
[1][1][2][1][RTW89_FCC][1] = 54,
[1][1][2][1][RTW89_ETSI][1] = 40,
[1][1][2][1][RTW89_MKK][1] = 48,
- [1][1][2][1][RTW89_IC][1] = 48,
- [1][1][2][1][RTW89_ACMA][1] = 48,
- [1][1][2][1][RTW89_FCC][5] = 60,
+ [1][1][2][1][RTW89_IC][1] = 40,
+ [1][1][2][1][RTW89_KCC][1] = 54,
+ [1][1][2][1][RTW89_ACMA][1] = 40,
+ [1][1][2][1][RTW89_CN][1] = 42,
+ [1][1][2][1][RTW89_UK][1] = 40,
+ [1][1][2][1][RTW89_FCC][5] = 68,
[1][1][2][1][RTW89_ETSI][5] = 40,
[1][1][2][1][RTW89_MKK][5] = 52,
- [1][1][2][1][RTW89_IC][5] = 48,
- [1][1][2][1][RTW89_ACMA][5] = 48,
- [1][1][2][1][RTW89_FCC][9] = 60,
+ [1][1][2][1][RTW89_IC][5] = 40,
+ [1][1][2][1][RTW89_KCC][5] = 54,
+ [1][1][2][1][RTW89_ACMA][5] = 40,
+ [1][1][2][1][RTW89_CN][5] = 42,
+ [1][1][2][1][RTW89_UK][5] = 40,
+ [1][1][2][1][RTW89_FCC][9] = 68,
[1][1][2][1][RTW89_ETSI][9] = 40,
[1][1][2][1][RTW89_MKK][9] = 52,
- [1][1][2][1][RTW89_IC][9] = 48,
- [1][1][2][1][RTW89_ACMA][9] = 48,
+ [1][1][2][1][RTW89_IC][9] = 40,
+ [1][1][2][1][RTW89_KCC][9] = 64,
+ [1][1][2][1][RTW89_ACMA][9] = 40,
+ [1][1][2][1][RTW89_CN][9] = 42,
+ [1][1][2][1][RTW89_UK][9] = 40,
[1][1][2][1][RTW89_FCC][13] = 54,
[1][1][2][1][RTW89_ETSI][13] = 40,
[1][1][2][1][RTW89_MKK][13] = 52,
- [1][1][2][1][RTW89_IC][13] = 48,
- [1][1][2][1][RTW89_ACMA][13] = 48,
- [1][1][2][1][RTW89_FCC][16] = 48,
+ [1][1][2][1][RTW89_IC][13] = 40,
+ [1][1][2][1][RTW89_KCC][13] = 52,
+ [1][1][2][1][RTW89_ACMA][13] = 40,
+ [1][1][2][1][RTW89_CN][13] = 42,
+ [1][1][2][1][RTW89_UK][13] = 40,
+ [1][1][2][1][RTW89_FCC][16] = 56,
[1][1][2][1][RTW89_ETSI][16] = 40,
[1][1][2][1][RTW89_MKK][16] = 66,
- [1][1][2][1][RTW89_IC][16] = 58,
- [1][1][2][1][RTW89_ACMA][16] = 48,
- [1][1][2][1][RTW89_FCC][20] = 60,
+ [1][1][2][1][RTW89_IC][16] = 56,
+ [1][1][2][1][RTW89_KCC][16] = 54,
+ [1][1][2][1][RTW89_ACMA][16] = 40,
+ [1][1][2][1][RTW89_CN][16] = 127,
+ [1][1][2][1][RTW89_UK][16] = 40,
+ [1][1][2][1][RTW89_FCC][20] = 68,
[1][1][2][1][RTW89_ETSI][20] = 40,
[1][1][2][1][RTW89_MKK][20] = 66,
- [1][1][2][1][RTW89_IC][20] = 66,
- [1][1][2][1][RTW89_ACMA][20] = 48,
- [1][1][2][1][RTW89_FCC][24] = 60,
+ [1][1][2][1][RTW89_IC][20] = 68,
+ [1][1][2][1][RTW89_KCC][20] = 54,
+ [1][1][2][1][RTW89_ACMA][20] = 40,
+ [1][1][2][1][RTW89_CN][20] = 127,
+ [1][1][2][1][RTW89_UK][20] = 40,
+ [1][1][2][1][RTW89_FCC][24] = 68,
[1][1][2][1][RTW89_ETSI][24] = 40,
[1][1][2][1][RTW89_MKK][24] = 66,
[1][1][2][1][RTW89_IC][24] = 127,
+ [1][1][2][1][RTW89_KCC][24] = 54,
[1][1][2][1][RTW89_ACMA][24] = 127,
- [1][1][2][1][RTW89_FCC][28] = 60,
+ [1][1][2][1][RTW89_CN][24] = 127,
+ [1][1][2][1][RTW89_UK][24] = 40,
+ [1][1][2][1][RTW89_FCC][28] = 68,
[1][1][2][1][RTW89_ETSI][28] = 40,
[1][1][2][1][RTW89_MKK][28] = 66,
[1][1][2][1][RTW89_IC][28] = 127,
+ [1][1][2][1][RTW89_KCC][28] = 66,
[1][1][2][1][RTW89_ACMA][28] = 127,
- [1][1][2][1][RTW89_FCC][32] = 60,
+ [1][1][2][1][RTW89_CN][28] = 127,
+ [1][1][2][1][RTW89_UK][28] = 40,
+ [1][1][2][1][RTW89_FCC][32] = 56,
[1][1][2][1][RTW89_ETSI][32] = 40,
[1][1][2][1][RTW89_MKK][32] = 66,
- [1][1][2][1][RTW89_IC][32] = 66,
- [1][1][2][1][RTW89_ACMA][32] = 42,
- [1][1][2][1][RTW89_FCC][36] = 60,
+ [1][1][2][1][RTW89_IC][32] = 56,
+ [1][1][2][1][RTW89_KCC][32] = 66,
+ [1][1][2][1][RTW89_ACMA][32] = 40,
+ [1][1][2][1][RTW89_CN][32] = 127,
+ [1][1][2][1][RTW89_UK][32] = 40,
+ [1][1][2][1][RTW89_FCC][36] = 68,
[1][1][2][1][RTW89_ETSI][36] = 127,
[1][1][2][1][RTW89_MKK][36] = 66,
- [1][1][2][1][RTW89_IC][36] = 66,
+ [1][1][2][1][RTW89_IC][36] = 68,
+ [1][1][2][1][RTW89_KCC][36] = 66,
[1][1][2][1][RTW89_ACMA][36] = 66,
+ [1][1][2][1][RTW89_CN][36] = 127,
+ [1][1][2][1][RTW89_UK][36] = 40,
[1][1][2][1][RTW89_FCC][39] = 68,
[1][1][2][1][RTW89_ETSI][39] = 6,
[1][1][2][1][RTW89_MKK][39] = 127,
- [1][1][2][1][RTW89_IC][39] = 66,
+ [1][1][2][1][RTW89_IC][39] = 68,
+ [1][1][2][1][RTW89_KCC][39] = 56,
[1][1][2][1][RTW89_ACMA][39] = 66,
+ [1][1][2][1][RTW89_CN][39] = 60,
+ [1][1][2][1][RTW89_UK][39] = 40,
[1][1][2][1][RTW89_FCC][43] = 68,
[1][1][2][1][RTW89_ETSI][43] = 6,
[1][1][2][1][RTW89_MKK][43] = 127,
- [1][1][2][1][RTW89_IC][43] = 66,
+ [1][1][2][1][RTW89_IC][43] = 68,
+ [1][1][2][1][RTW89_KCC][43] = 56,
[1][1][2][1][RTW89_ACMA][43] = 66,
- [1][1][2][1][RTW89_FCC][47] = 60,
+ [1][1][2][1][RTW89_CN][43] = 52,
+ [1][1][2][1][RTW89_UK][43] = 40,
+ [1][1][2][1][RTW89_FCC][47] = 62,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
[1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
- [1][1][2][1][RTW89_FCC][51] = 58,
+ [1][1][2][1][RTW89_CN][47] = 127,
+ [1][1][2][1][RTW89_UK][47] = 127,
+ [1][1][2][1][RTW89_FCC][51] = 60,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
[1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
- [2][0][2][0][RTW89_FCC][3] = 56,
+ [1][1][2][1][RTW89_CN][51] = 127,
+ [1][1][2][1][RTW89_UK][51] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 58,
[2][0][2][0][RTW89_ETSI][3] = 60,
[2][0][2][0][RTW89_MKK][3] = 60,
- [2][0][2][0][RTW89_IC][3] = 60,
+ [2][0][2][0][RTW89_IC][3] = 56,
+ [2][0][2][0][RTW89_KCC][3] = 60,
[2][0][2][0][RTW89_ACMA][3] = 60,
- [2][0][2][0][RTW89_FCC][11] = 58,
+ [2][0][2][0][RTW89_CN][3] = 54,
+ [2][0][2][0][RTW89_UK][3] = 60,
+ [2][0][2][0][RTW89_FCC][11] = 50,
[2][0][2][0][RTW89_ETSI][11] = 60,
[2][0][2][0][RTW89_MKK][11] = 60,
- [2][0][2][0][RTW89_IC][11] = 60,
+ [2][0][2][0][RTW89_IC][11] = 50,
+ [2][0][2][0][RTW89_KCC][11] = 58,
[2][0][2][0][RTW89_ACMA][11] = 60,
- [2][0][2][0][RTW89_FCC][18] = 54,
+ [2][0][2][0][RTW89_CN][11] = 54,
+ [2][0][2][0][RTW89_UK][11] = 60,
+ [2][0][2][0][RTW89_FCC][18] = 60,
[2][0][2][0][RTW89_ETSI][18] = 60,
[2][0][2][0][RTW89_MKK][18] = 60,
[2][0][2][0][RTW89_IC][18] = 60,
+ [2][0][2][0][RTW89_KCC][18] = 56,
[2][0][2][0][RTW89_ACMA][18] = 60,
+ [2][0][2][0][RTW89_CN][18] = 127,
+ [2][0][2][0][RTW89_UK][18] = 60,
[2][0][2][0][RTW89_FCC][26] = 62,
[2][0][2][0][RTW89_ETSI][26] = 60,
[2][0][2][0][RTW89_MKK][26] = 60,
[2][0][2][0][RTW89_IC][26] = 127,
+ [2][0][2][0][RTW89_KCC][26] = 60,
[2][0][2][0][RTW89_ACMA][26] = 127,
+ [2][0][2][0][RTW89_CN][26] = 127,
+ [2][0][2][0][RTW89_UK][26] = 60,
[2][0][2][0][RTW89_FCC][34] = 62,
[2][0][2][0][RTW89_ETSI][34] = 127,
[2][0][2][0][RTW89_MKK][34] = 60,
- [2][0][2][0][RTW89_IC][34] = 60,
+ [2][0][2][0][RTW89_IC][34] = 62,
+ [2][0][2][0][RTW89_KCC][34] = 60,
[2][0][2][0][RTW89_ACMA][34] = 60,
+ [2][0][2][0][RTW89_CN][34] = 127,
+ [2][0][2][0][RTW89_UK][34] = 60,
[2][0][2][0][RTW89_FCC][41] = 62,
[2][0][2][0][RTW89_ETSI][41] = 30,
[2][0][2][0][RTW89_MKK][41] = 127,
- [2][0][2][0][RTW89_IC][41] = 60,
+ [2][0][2][0][RTW89_IC][41] = 62,
+ [2][0][2][0][RTW89_KCC][41] = 58,
[2][0][2][0][RTW89_ACMA][41] = 60,
- [2][0][2][0][RTW89_FCC][49] = 56,
+ [2][0][2][0][RTW89_CN][41] = 62,
+ [2][0][2][0][RTW89_UK][41] = 60,
+ [2][0][2][0][RTW89_FCC][49] = 62,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
[2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
+ [2][0][2][0][RTW89_CN][49] = 127,
+ [2][0][2][0][RTW89_UK][49] = 127,
[2][1][2][0][RTW89_FCC][3] = 48,
[2][1][2][0][RTW89_ETSI][3] = 54,
[2][1][2][0][RTW89_MKK][3] = 56,
- [2][1][2][0][RTW89_IC][3] = 52,
- [2][1][2][0][RTW89_ACMA][3] = 52,
- [2][1][2][0][RTW89_FCC][11] = 54,
+ [2][1][2][0][RTW89_IC][3] = 46,
+ [2][1][2][0][RTW89_KCC][3] = 56,
+ [2][1][2][0][RTW89_ACMA][3] = 54,
+ [2][1][2][0][RTW89_CN][3] = 52,
+ [2][1][2][0][RTW89_UK][3] = 54,
+ [2][1][2][0][RTW89_FCC][11] = 38,
[2][1][2][0][RTW89_ETSI][11] = 54,
[2][1][2][0][RTW89_MKK][11] = 54,
- [2][1][2][0][RTW89_IC][11] = 52,
- [2][1][2][0][RTW89_ACMA][11] = 52,
- [2][1][2][0][RTW89_FCC][18] = 48,
+ [2][1][2][0][RTW89_IC][11] = 38,
+ [2][1][2][0][RTW89_KCC][11] = 52,
+ [2][1][2][0][RTW89_ACMA][11] = 54,
+ [2][1][2][0][RTW89_CN][11] = 52,
+ [2][1][2][0][RTW89_UK][11] = 54,
+ [2][1][2][0][RTW89_FCC][18] = 50,
[2][1][2][0][RTW89_ETSI][18] = 54,
[2][1][2][0][RTW89_MKK][18] = 60,
- [2][1][2][0][RTW89_IC][18] = 58,
- [2][1][2][0][RTW89_ACMA][18] = 52,
- [2][1][2][0][RTW89_FCC][26] = 62,
+ [2][1][2][0][RTW89_IC][18] = 50,
+ [2][1][2][0][RTW89_KCC][18] = 54,
+ [2][1][2][0][RTW89_ACMA][18] = 54,
+ [2][1][2][0][RTW89_CN][18] = 127,
+ [2][1][2][0][RTW89_UK][18] = 54,
+ [2][1][2][0][RTW89_FCC][26] = 52,
[2][1][2][0][RTW89_ETSI][26] = 54,
[2][1][2][0][RTW89_MKK][26] = 56,
[2][1][2][0][RTW89_IC][26] = 127,
+ [2][1][2][0][RTW89_KCC][26] = 60,
[2][1][2][0][RTW89_ACMA][26] = 127,
+ [2][1][2][0][RTW89_CN][26] = 127,
+ [2][1][2][0][RTW89_UK][26] = 54,
[2][1][2][0][RTW89_FCC][34] = 62,
[2][1][2][0][RTW89_ETSI][34] = 127,
[2][1][2][0][RTW89_MKK][34] = 60,
- [2][1][2][0][RTW89_IC][34] = 60,
+ [2][1][2][0][RTW89_IC][34] = 62,
+ [2][1][2][0][RTW89_KCC][34] = 60,
[2][1][2][0][RTW89_ACMA][34] = 60,
- [2][1][2][0][RTW89_FCC][41] = 62,
+ [2][1][2][0][RTW89_CN][34] = 127,
+ [2][1][2][0][RTW89_UK][34] = 52,
+ [2][1][2][0][RTW89_FCC][41] = 60,
[2][1][2][0][RTW89_ETSI][41] = 18,
[2][1][2][0][RTW89_MKK][41] = 127,
[2][1][2][0][RTW89_IC][41] = 60,
- [2][1][2][0][RTW89_ACMA][41] = 60,
- [2][1][2][0][RTW89_FCC][49] = 50,
+ [2][1][2][0][RTW89_KCC][41] = 50,
+ [2][1][2][0][RTW89_ACMA][41] = 58,
+ [2][1][2][0][RTW89_CN][41] = 62,
+ [2][1][2][0][RTW89_UK][41] = 52,
+ [2][1][2][0][RTW89_FCC][49] = 62,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
[2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
+ [2][1][2][0][RTW89_CN][49] = 127,
+ [2][1][2][0][RTW89_UK][49] = 127,
[2][1][2][1][RTW89_FCC][3] = 48,
[2][1][2][1][RTW89_ETSI][3] = 40,
[2][1][2][1][RTW89_MKK][3] = 56,
[2][1][2][1][RTW89_IC][3] = 40,
+ [2][1][2][1][RTW89_KCC][3] = 56,
[2][1][2][1][RTW89_ACMA][3] = 40,
- [2][1][2][1][RTW89_FCC][11] = 54,
+ [2][1][2][1][RTW89_CN][3] = 42,
+ [2][1][2][1][RTW89_UK][3] = 40,
+ [2][1][2][1][RTW89_FCC][11] = 38,
[2][1][2][1][RTW89_ETSI][11] = 40,
[2][1][2][1][RTW89_MKK][11] = 54,
- [2][1][2][1][RTW89_IC][11] = 40,
+ [2][1][2][1][RTW89_IC][11] = 38,
+ [2][1][2][1][RTW89_KCC][11] = 52,
[2][1][2][1][RTW89_ACMA][11] = 40,
- [2][1][2][1][RTW89_FCC][18] = 48,
+ [2][1][2][1][RTW89_CN][11] = 42,
+ [2][1][2][1][RTW89_UK][11] = 40,
+ [2][1][2][1][RTW89_FCC][18] = 50,
[2][1][2][1][RTW89_ETSI][18] = 40,
[2][1][2][1][RTW89_MKK][18] = 60,
- [2][1][2][1][RTW89_IC][18] = 58,
+ [2][1][2][1][RTW89_IC][18] = 50,
+ [2][1][2][1][RTW89_KCC][18] = 54,
[2][1][2][1][RTW89_ACMA][18] = 40,
- [2][1][2][1][RTW89_FCC][26] = 60,
+ [2][1][2][1][RTW89_CN][18] = 127,
+ [2][1][2][1][RTW89_UK][18] = 40,
+ [2][1][2][1][RTW89_FCC][26] = 52,
[2][1][2][1][RTW89_ETSI][26] = 42,
[2][1][2][1][RTW89_MKK][26] = 56,
[2][1][2][1][RTW89_IC][26] = 127,
+ [2][1][2][1][RTW89_KCC][26] = 60,
[2][1][2][1][RTW89_ACMA][26] = 127,
- [2][1][2][1][RTW89_FCC][34] = 60,
+ [2][1][2][1][RTW89_CN][26] = 127,
+ [2][1][2][1][RTW89_UK][26] = 42,
+ [2][1][2][1][RTW89_FCC][34] = 62,
[2][1][2][1][RTW89_ETSI][34] = 127,
[2][1][2][1][RTW89_MKK][34] = 60,
- [2][1][2][1][RTW89_IC][34] = 60,
+ [2][1][2][1][RTW89_IC][34] = 62,
+ [2][1][2][1][RTW89_KCC][34] = 60,
[2][1][2][1][RTW89_ACMA][34] = 60,
- [2][1][2][1][RTW89_FCC][41] = 62,
+ [2][1][2][1][RTW89_CN][34] = 127,
+ [2][1][2][1][RTW89_UK][34] = 40,
+ [2][1][2][1][RTW89_FCC][41] = 60,
[2][1][2][1][RTW89_ETSI][41] = 6,
[2][1][2][1][RTW89_MKK][41] = 127,
[2][1][2][1][RTW89_IC][41] = 60,
- [2][1][2][1][RTW89_ACMA][41] = 60,
- [2][1][2][1][RTW89_FCC][49] = 50,
+ [2][1][2][1][RTW89_KCC][41] = 50,
+ [2][1][2][1][RTW89_ACMA][41] = 58,
+ [2][1][2][1][RTW89_CN][41] = 40,
+ [2][1][2][1][RTW89_UK][41] = 40,
+ [2][1][2][1][RTW89_FCC][49] = 62,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
[2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
- [3][0][2][0][RTW89_FCC][7] = 38,
+ [2][1][2][1][RTW89_CN][49] = 127,
+ [2][1][2][1][RTW89_UK][49] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 40,
[3][0][2][0][RTW89_ETSI][7] = 50,
[3][0][2][0][RTW89_MKK][7] = 50,
- [3][0][2][0][RTW89_IC][7] = 50,
- [3][0][2][0][RTW89_ACMA][7] = 50,
- [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_IC][7] = 40,
+ [3][0][2][0][RTW89_KCC][7] = 44,
+ [3][0][2][0][RTW89_ACMA][7] = 127,
+ [3][0][2][0][RTW89_CN][7] = 66,
+ [3][0][2][0][RTW89_UK][7] = 127,
+ [3][0][2][0][RTW89_FCC][22] = 42,
[3][0][2][0][RTW89_ETSI][22] = 50,
[3][0][2][0][RTW89_MKK][22] = 50,
- [3][0][2][0][RTW89_IC][22] = 50,
- [3][0][2][0][RTW89_ACMA][22] = 50,
- [3][0][2][0][RTW89_FCC][45] = 127,
+ [3][0][2][0][RTW89_IC][22] = 127,
+ [3][0][2][0][RTW89_KCC][22] = 50,
+ [3][0][2][0][RTW89_ACMA][22] = 127,
+ [3][0][2][0][RTW89_CN][22] = 66,
+ [3][0][2][0][RTW89_UK][22] = 127,
+ [3][0][2][0][RTW89_FCC][45] = 52,
[3][0][2][0][RTW89_ETSI][45] = 127,
[3][0][2][0][RTW89_MKK][45] = 127,
[3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_KCC][45] = 127,
[3][0][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][0][RTW89_FCC][7] = 26,
+ [3][0][2][0][RTW89_CN][45] = 127,
+ [3][0][2][0][RTW89_UK][45] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
[3][1][2][0][RTW89_ETSI][7] = 50,
[3][1][2][0][RTW89_MKK][7] = 36,
[3][1][2][0][RTW89_IC][7] = 44,
- [3][1][2][0][RTW89_ACMA][7] = 44,
- [3][1][2][0][RTW89_FCC][22] = 42,
+ [3][1][2][0][RTW89_KCC][7] = 50,
+ [3][1][2][0][RTW89_ACMA][7] = 127,
+ [3][1][2][0][RTW89_CN][7] = 54,
+ [3][1][2][0][RTW89_UK][7] = 127,
+ [3][1][2][0][RTW89_FCC][22] = 36,
[3][1][2][0][RTW89_ETSI][22] = 50,
[3][1][2][0][RTW89_MKK][22] = 48,
- [3][1][2][0][RTW89_IC][22] = 44,
- [3][1][2][0][RTW89_ACMA][22] = 44,
- [3][1][2][0][RTW89_FCC][45] = 127,
+ [3][1][2][0][RTW89_IC][22] = 127,
+ [3][1][2][0][RTW89_KCC][22] = 50,
+ [3][1][2][0][RTW89_ACMA][22] = 127,
+ [3][1][2][0][RTW89_CN][22] = 54,
+ [3][1][2][0][RTW89_UK][22] = 127,
+ [3][1][2][0][RTW89_FCC][45] = 46,
[3][1][2][0][RTW89_ETSI][45] = 127,
[3][1][2][0][RTW89_MKK][45] = 127,
[3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_KCC][45] = 127,
[3][1][2][0][RTW89_ACMA][45] = 127,
- [3][1][2][1][RTW89_FCC][7] = 14,
+ [3][1][2][0][RTW89_CN][45] = 127,
+ [3][1][2][0][RTW89_UK][45] = 127,
+ [3][1][2][1][RTW89_FCC][7] = 32,
[3][1][2][1][RTW89_ETSI][7] = 42,
[3][1][2][1][RTW89_MKK][7] = 36,
- [3][1][2][1][RTW89_IC][7] = 32,
- [3][1][2][1][RTW89_ACMA][7] = 32,
- [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_IC][7] = 44,
+ [3][1][2][1][RTW89_KCC][7] = 50,
+ [3][1][2][1][RTW89_ACMA][7] = 127,
+ [3][1][2][1][RTW89_CN][7] = 42,
+ [3][1][2][1][RTW89_UK][7] = 127,
+ [3][1][2][1][RTW89_FCC][22] = 36,
[3][1][2][1][RTW89_ETSI][22] = 42,
[3][1][2][1][RTW89_MKK][22] = 48,
- [3][1][2][1][RTW89_IC][22] = 32,
- [3][1][2][1][RTW89_ACMA][22] = 32,
- [3][1][2][1][RTW89_FCC][45] = 127,
+ [3][1][2][1][RTW89_IC][22] = 127,
+ [3][1][2][1][RTW89_KCC][22] = 50,
+ [3][1][2][1][RTW89_ACMA][22] = 127,
+ [3][1][2][1][RTW89_CN][22] = 42,
+ [3][1][2][1][RTW89_UK][22] = 127,
+ [3][1][2][1][RTW89_FCC][45] = 46,
[3][1][2][1][RTW89_ETSI][45] = 127,
[3][1][2][1][RTW89_MKK][45] = 127,
[3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_KCC][45] = 127,
[3][1][2][1][RTW89_ACMA][45] = 127,
+ [3][1][2][1][RTW89_CN][45] = 127,
+ [3][1][2][1][RTW89_UK][45] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[RTW89_RS_LMT_NUM][RTW89_BF_NUM]
[RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
- [0][0][1][0][RTW89_WW][0] = 72,
- [0][0][1][0][RTW89_WW][2] = 72,
- [0][0][1][0][RTW89_WW][4] = 72,
- [0][0][1][0][RTW89_WW][6] = 72,
- [0][0][1][0][RTW89_WW][8] = 72,
- [0][0][1][0][RTW89_WW][10] = 72,
- [0][0][1][0][RTW89_WW][12] = 72,
- [0][0][1][0][RTW89_WW][14] = 72,
- [0][0][1][0][RTW89_WW][15] = 72,
- [0][0][1][0][RTW89_WW][17] = 72,
- [0][0][1][0][RTW89_WW][19] = 72,
- [0][0][1][0][RTW89_WW][21] = 72,
- [0][0][1][0][RTW89_WW][23] = 72,
- [0][0][1][0][RTW89_WW][25] = 72,
- [0][0][1][0][RTW89_WW][27] = 72,
- [0][0][1][0][RTW89_WW][29] = 72,
- [0][0][1][0][RTW89_WW][30] = 72,
- [0][0][1][0][RTW89_WW][32] = 72,
- [0][0][1][0][RTW89_WW][34] = 72,
- [0][0][1][0][RTW89_WW][36] = 72,
- [0][0][1][0][RTW89_WW][38] = 72,
- [0][0][1][0][RTW89_WW][40] = 72,
- [0][0][1][0][RTW89_WW][42] = 72,
- [0][0][1][0][RTW89_WW][44] = 72,
- [0][0][1][0][RTW89_WW][45] = 72,
- [0][0][1][0][RTW89_WW][47] = 72,
- [0][0][1][0][RTW89_WW][49] = 72,
- [0][0][1][0][RTW89_WW][51] = 72,
- [0][0][1][0][RTW89_WW][53] = 72,
- [0][0][1][0][RTW89_WW][55] = 72,
- [0][0][1][0][RTW89_WW][57] = 72,
- [0][0][1][0][RTW89_WW][59] = 72,
- [0][0][1][0][RTW89_WW][60] = 72,
- [0][0][1][0][RTW89_WW][62] = 72,
- [0][0][1][0][RTW89_WW][64] = 72,
- [0][0][1][0][RTW89_WW][66] = 72,
- [0][0][1][0][RTW89_WW][68] = 72,
- [0][0][1][0][RTW89_WW][70] = 72,
- [0][0][1][0][RTW89_WW][72] = 72,
- [0][0][1][0][RTW89_WW][74] = 72,
- [0][0][1][0][RTW89_WW][75] = 72,
- [0][0][1][0][RTW89_WW][77] = 72,
- [0][0][1][0][RTW89_WW][79] = 72,
- [0][0][1][0][RTW89_WW][81] = 72,
- [0][0][1][0][RTW89_WW][83] = 72,
- [0][0][1][0][RTW89_WW][85] = 72,
- [0][0][1][0][RTW89_WW][87] = 72,
- [0][0][1][0][RTW89_WW][89] = 72,
- [0][0][1][0][RTW89_WW][90] = 72,
- [0][0][1][0][RTW89_WW][92] = 72,
- [0][0][1][0][RTW89_WW][94] = 72,
- [0][0][1][0][RTW89_WW][96] = 72,
- [0][0][1][0][RTW89_WW][98] = 72,
- [0][0][1][0][RTW89_WW][100] = 72,
- [0][0][1][0][RTW89_WW][102] = 72,
- [0][0][1][0][RTW89_WW][104] = 72,
- [0][0][1][0][RTW89_WW][105] = 72,
- [0][0][1][0][RTW89_WW][107] = 72,
- [0][0][1][0][RTW89_WW][109] = 72,
+ [0][0][1][0][RTW89_WW][0] = 24,
+ [0][0][1][0][RTW89_WW][2] = 22,
+ [0][0][1][0][RTW89_WW][4] = 22,
+ [0][0][1][0][RTW89_WW][6] = 22,
+ [0][0][1][0][RTW89_WW][8] = 22,
+ [0][0][1][0][RTW89_WW][10] = 22,
+ [0][0][1][0][RTW89_WW][12] = 22,
+ [0][0][1][0][RTW89_WW][14] = 22,
+ [0][0][1][0][RTW89_WW][15] = 22,
+ [0][0][1][0][RTW89_WW][17] = 22,
+ [0][0][1][0][RTW89_WW][19] = 22,
+ [0][0][1][0][RTW89_WW][21] = 22,
+ [0][0][1][0][RTW89_WW][23] = 22,
+ [0][0][1][0][RTW89_WW][25] = 22,
+ [0][0][1][0][RTW89_WW][27] = 22,
+ [0][0][1][0][RTW89_WW][29] = 22,
+ [0][0][1][0][RTW89_WW][30] = 22,
+ [0][0][1][0][RTW89_WW][32] = 22,
+ [0][0][1][0][RTW89_WW][34] = 22,
+ [0][0][1][0][RTW89_WW][36] = 22,
+ [0][0][1][0][RTW89_WW][38] = 22,
+ [0][0][1][0][RTW89_WW][40] = 22,
+ [0][0][1][0][RTW89_WW][42] = 22,
+ [0][0][1][0][RTW89_WW][44] = 22,
+ [0][0][1][0][RTW89_WW][45] = 22,
+ [0][0][1][0][RTW89_WW][47] = 22,
+ [0][0][1][0][RTW89_WW][49] = 24,
+ [0][0][1][0][RTW89_WW][51] = 22,
+ [0][0][1][0][RTW89_WW][53] = 22,
+ [0][0][1][0][RTW89_WW][55] = 22,
+ [0][0][1][0][RTW89_WW][57] = 22,
+ [0][0][1][0][RTW89_WW][59] = 22,
+ [0][0][1][0][RTW89_WW][60] = 22,
+ [0][0][1][0][RTW89_WW][62] = 22,
+ [0][0][1][0][RTW89_WW][64] = 22,
+ [0][0][1][0][RTW89_WW][66] = 22,
+ [0][0][1][0][RTW89_WW][68] = 22,
+ [0][0][1][0][RTW89_WW][70] = 24,
+ [0][0][1][0][RTW89_WW][72] = 22,
+ [0][0][1][0][RTW89_WW][74] = 22,
+ [0][0][1][0][RTW89_WW][75] = 22,
+ [0][0][1][0][RTW89_WW][77] = 22,
+ [0][0][1][0][RTW89_WW][79] = 22,
+ [0][0][1][0][RTW89_WW][81] = 22,
+ [0][0][1][0][RTW89_WW][83] = 22,
+ [0][0][1][0][RTW89_WW][85] = 22,
+ [0][0][1][0][RTW89_WW][87] = 22,
+ [0][0][1][0][RTW89_WW][89] = 22,
+ [0][0][1][0][RTW89_WW][90] = 22,
+ [0][0][1][0][RTW89_WW][92] = 22,
+ [0][0][1][0][RTW89_WW][94] = 22,
+ [0][0][1][0][RTW89_WW][96] = 22,
+ [0][0][1][0][RTW89_WW][98] = 22,
+ [0][0][1][0][RTW89_WW][100] = 22,
+ [0][0][1][0][RTW89_WW][102] = 22,
+ [0][0][1][0][RTW89_WW][104] = 22,
+ [0][0][1][0][RTW89_WW][105] = 22,
+ [0][0][1][0][RTW89_WW][107] = 24,
+ [0][0][1][0][RTW89_WW][109] = 24,
[0][0][1][0][RTW89_WW][111] = 0,
[0][0][1][0][RTW89_WW][113] = 0,
[0][0][1][0][RTW89_WW][115] = 0,
[0][0][1][0][RTW89_WW][117] = 0,
[0][0][1][0][RTW89_WW][119] = 0,
- [0][1][1][0][RTW89_WW][0] = 60,
- [0][1][1][0][RTW89_WW][2] = 60,
- [0][1][1][0][RTW89_WW][4] = 60,
- [0][1][1][0][RTW89_WW][6] = 60,
- [0][1][1][0][RTW89_WW][8] = 60,
- [0][1][1][0][RTW89_WW][10] = 60,
- [0][1][1][0][RTW89_WW][12] = 60,
- [0][1][1][0][RTW89_WW][14] = 60,
- [0][1][1][0][RTW89_WW][15] = 60,
- [0][1][1][0][RTW89_WW][17] = 60,
- [0][1][1][0][RTW89_WW][19] = 60,
- [0][1][1][0][RTW89_WW][21] = 60,
- [0][1][1][0][RTW89_WW][23] = 60,
- [0][1][1][0][RTW89_WW][25] = 60,
- [0][1][1][0][RTW89_WW][27] = 60,
- [0][1][1][0][RTW89_WW][29] = 60,
- [0][1][1][0][RTW89_WW][30] = 60,
- [0][1][1][0][RTW89_WW][32] = 60,
- [0][1][1][0][RTW89_WW][34] = 60,
- [0][1][1][0][RTW89_WW][36] = 60,
- [0][1][1][0][RTW89_WW][38] = 60,
- [0][1][1][0][RTW89_WW][40] = 60,
- [0][1][1][0][RTW89_WW][42] = 60,
- [0][1][1][0][RTW89_WW][44] = 60,
- [0][1][1][0][RTW89_WW][45] = 60,
- [0][1][1][0][RTW89_WW][47] = 60,
- [0][1][1][0][RTW89_WW][49] = 60,
- [0][1][1][0][RTW89_WW][51] = 60,
- [0][1][1][0][RTW89_WW][53] = 60,
- [0][1][1][0][RTW89_WW][55] = 60,
- [0][1][1][0][RTW89_WW][57] = 60,
- [0][1][1][0][RTW89_WW][59] = 60,
- [0][1][1][0][RTW89_WW][60] = 60,
- [0][1][1][0][RTW89_WW][62] = 60,
- [0][1][1][0][RTW89_WW][64] = 60,
- [0][1][1][0][RTW89_WW][66] = 60,
- [0][1][1][0][RTW89_WW][68] = 60,
- [0][1][1][0][RTW89_WW][70] = 60,
- [0][1][1][0][RTW89_WW][72] = 60,
- [0][1][1][0][RTW89_WW][74] = 60,
- [0][1][1][0][RTW89_WW][75] = 60,
- [0][1][1][0][RTW89_WW][77] = 60,
- [0][1][1][0][RTW89_WW][79] = 60,
- [0][1][1][0][RTW89_WW][81] = 60,
- [0][1][1][0][RTW89_WW][83] = 60,
- [0][1][1][0][RTW89_WW][85] = 60,
- [0][1][1][0][RTW89_WW][87] = 60,
- [0][1][1][0][RTW89_WW][89] = 60,
- [0][1][1][0][RTW89_WW][90] = 60,
- [0][1][1][0][RTW89_WW][92] = 60,
- [0][1][1][0][RTW89_WW][94] = 60,
- [0][1][1][0][RTW89_WW][96] = 60,
- [0][1][1][0][RTW89_WW][98] = 60,
- [0][1][1][0][RTW89_WW][100] = 60,
- [0][1][1][0][RTW89_WW][102] = 60,
- [0][1][1][0][RTW89_WW][104] = 60,
- [0][1][1][0][RTW89_WW][105] = 60,
- [0][1][1][0][RTW89_WW][107] = 60,
- [0][1][1][0][RTW89_WW][109] = 60,
+ [0][1][1][0][RTW89_WW][0] = -2,
+ [0][1][1][0][RTW89_WW][2] = -4,
+ [0][1][1][0][RTW89_WW][4] = -4,
+ [0][1][1][0][RTW89_WW][6] = -4,
+ [0][1][1][0][RTW89_WW][8] = -4,
+ [0][1][1][0][RTW89_WW][10] = -4,
+ [0][1][1][0][RTW89_WW][12] = -4,
+ [0][1][1][0][RTW89_WW][14] = -4,
+ [0][1][1][0][RTW89_WW][15] = -4,
+ [0][1][1][0][RTW89_WW][17] = -4,
+ [0][1][1][0][RTW89_WW][19] = -4,
+ [0][1][1][0][RTW89_WW][21] = -4,
+ [0][1][1][0][RTW89_WW][23] = -4,
+ [0][1][1][0][RTW89_WW][25] = -4,
+ [0][1][1][0][RTW89_WW][27] = -4,
+ [0][1][1][0][RTW89_WW][29] = -4,
+ [0][1][1][0][RTW89_WW][30] = -4,
+ [0][1][1][0][RTW89_WW][32] = -4,
+ [0][1][1][0][RTW89_WW][34] = -4,
+ [0][1][1][0][RTW89_WW][36] = -4,
+ [0][1][1][0][RTW89_WW][38] = -4,
+ [0][1][1][0][RTW89_WW][40] = -4,
+ [0][1][1][0][RTW89_WW][42] = -4,
+ [0][1][1][0][RTW89_WW][44] = -2,
+ [0][1][1][0][RTW89_WW][45] = -2,
+ [0][1][1][0][RTW89_WW][47] = -2,
+ [0][1][1][0][RTW89_WW][49] = -2,
+ [0][1][1][0][RTW89_WW][51] = -2,
+ [0][1][1][0][RTW89_WW][53] = -2,
+ [0][1][1][0][RTW89_WW][55] = -2,
+ [0][1][1][0][RTW89_WW][57] = -2,
+ [0][1][1][0][RTW89_WW][59] = -2,
+ [0][1][1][0][RTW89_WW][60] = -2,
+ [0][1][1][0][RTW89_WW][62] = -2,
+ [0][1][1][0][RTW89_WW][64] = -2,
+ [0][1][1][0][RTW89_WW][66] = -2,
+ [0][1][1][0][RTW89_WW][68] = -2,
+ [0][1][1][0][RTW89_WW][70] = -2,
+ [0][1][1][0][RTW89_WW][72] = -2,
+ [0][1][1][0][RTW89_WW][74] = -2,
+ [0][1][1][0][RTW89_WW][75] = -2,
+ [0][1][1][0][RTW89_WW][77] = -2,
+ [0][1][1][0][RTW89_WW][79] = -2,
+ [0][1][1][0][RTW89_WW][81] = -2,
+ [0][1][1][0][RTW89_WW][83] = -2,
+ [0][1][1][0][RTW89_WW][85] = -2,
+ [0][1][1][0][RTW89_WW][87] = -2,
+ [0][1][1][0][RTW89_WW][89] = -2,
+ [0][1][1][0][RTW89_WW][90] = -2,
+ [0][1][1][0][RTW89_WW][92] = -2,
+ [0][1][1][0][RTW89_WW][94] = -2,
+ [0][1][1][0][RTW89_WW][96] = -2,
+ [0][1][1][0][RTW89_WW][98] = -2,
+ [0][1][1][0][RTW89_WW][100] = -2,
+ [0][1][1][0][RTW89_WW][102] = -2,
+ [0][1][1][0][RTW89_WW][104] = -2,
+ [0][1][1][0][RTW89_WW][105] = -2,
+ [0][1][1][0][RTW89_WW][107] = 1,
+ [0][1][1][0][RTW89_WW][109] = 1,
[0][1][1][0][RTW89_WW][111] = 0,
[0][1][1][0][RTW89_WW][113] = 0,
[0][1][1][0][RTW89_WW][115] = 0,
[0][1][1][0][RTW89_WW][117] = 0,
[0][1][1][0][RTW89_WW][119] = 0,
- [0][0][2][0][RTW89_WW][0] = 72,
- [0][0][2][0][RTW89_WW][2] = 72,
- [0][0][2][0][RTW89_WW][4] = 72,
- [0][0][2][0][RTW89_WW][6] = 72,
- [0][0][2][0][RTW89_WW][8] = 72,
- [0][0][2][0][RTW89_WW][10] = 72,
- [0][0][2][0][RTW89_WW][12] = 72,
- [0][0][2][0][RTW89_WW][14] = 72,
- [0][0][2][0][RTW89_WW][15] = 72,
- [0][0][2][0][RTW89_WW][17] = 72,
- [0][0][2][0][RTW89_WW][19] = 72,
- [0][0][2][0][RTW89_WW][21] = 72,
- [0][0][2][0][RTW89_WW][23] = 72,
- [0][0][2][0][RTW89_WW][25] = 72,
- [0][0][2][0][RTW89_WW][27] = 72,
- [0][0][2][0][RTW89_WW][29] = 72,
- [0][0][2][0][RTW89_WW][30] = 72,
- [0][0][2][0][RTW89_WW][32] = 72,
- [0][0][2][0][RTW89_WW][34] = 72,
- [0][0][2][0][RTW89_WW][36] = 72,
- [0][0][2][0][RTW89_WW][38] = 72,
- [0][0][2][0][RTW89_WW][40] = 72,
- [0][0][2][0][RTW89_WW][42] = 72,
- [0][0][2][0][RTW89_WW][44] = 72,
- [0][0][2][0][RTW89_WW][45] = 72,
- [0][0][2][0][RTW89_WW][47] = 72,
- [0][0][2][0][RTW89_WW][49] = 72,
- [0][0][2][0][RTW89_WW][51] = 72,
- [0][0][2][0][RTW89_WW][53] = 72,
- [0][0][2][0][RTW89_WW][55] = 72,
- [0][0][2][0][RTW89_WW][57] = 72,
- [0][0][2][0][RTW89_WW][59] = 72,
- [0][0][2][0][RTW89_WW][60] = 72,
- [0][0][2][0][RTW89_WW][62] = 72,
- [0][0][2][0][RTW89_WW][64] = 72,
- [0][0][2][0][RTW89_WW][66] = 72,
- [0][0][2][0][RTW89_WW][68] = 72,
- [0][0][2][0][RTW89_WW][70] = 72,
- [0][0][2][0][RTW89_WW][72] = 72,
- [0][0][2][0][RTW89_WW][74] = 72,
- [0][0][2][0][RTW89_WW][75] = 72,
- [0][0][2][0][RTW89_WW][77] = 72,
- [0][0][2][0][RTW89_WW][79] = 72,
- [0][0][2][0][RTW89_WW][81] = 72,
- [0][0][2][0][RTW89_WW][83] = 72,
- [0][0][2][0][RTW89_WW][85] = 72,
- [0][0][2][0][RTW89_WW][87] = 72,
- [0][0][2][0][RTW89_WW][89] = 72,
- [0][0][2][0][RTW89_WW][90] = 72,
- [0][0][2][0][RTW89_WW][92] = 72,
- [0][0][2][0][RTW89_WW][94] = 72,
- [0][0][2][0][RTW89_WW][96] = 72,
- [0][0][2][0][RTW89_WW][98] = 72,
- [0][0][2][0][RTW89_WW][100] = 72,
- [0][0][2][0][RTW89_WW][102] = 72,
- [0][0][2][0][RTW89_WW][104] = 72,
- [0][0][2][0][RTW89_WW][105] = 72,
- [0][0][2][0][RTW89_WW][107] = 72,
- [0][0][2][0][RTW89_WW][109] = 72,
+ [0][0][2][0][RTW89_WW][0] = 24,
+ [0][0][2][0][RTW89_WW][2] = 22,
+ [0][0][2][0][RTW89_WW][4] = 22,
+ [0][0][2][0][RTW89_WW][6] = 22,
+ [0][0][2][0][RTW89_WW][8] = 22,
+ [0][0][2][0][RTW89_WW][10] = 22,
+ [0][0][2][0][RTW89_WW][12] = 22,
+ [0][0][2][0][RTW89_WW][14] = 22,
+ [0][0][2][0][RTW89_WW][15] = 22,
+ [0][0][2][0][RTW89_WW][17] = 22,
+ [0][0][2][0][RTW89_WW][19] = 22,
+ [0][0][2][0][RTW89_WW][21] = 22,
+ [0][0][2][0][RTW89_WW][23] = 22,
+ [0][0][2][0][RTW89_WW][25] = 22,
+ [0][0][2][0][RTW89_WW][27] = 22,
+ [0][0][2][0][RTW89_WW][29] = 22,
+ [0][0][2][0][RTW89_WW][30] = 22,
+ [0][0][2][0][RTW89_WW][32] = 22,
+ [0][0][2][0][RTW89_WW][34] = 22,
+ [0][0][2][0][RTW89_WW][36] = 22,
+ [0][0][2][0][RTW89_WW][38] = 22,
+ [0][0][2][0][RTW89_WW][40] = 22,
+ [0][0][2][0][RTW89_WW][42] = 22,
+ [0][0][2][0][RTW89_WW][44] = 22,
+ [0][0][2][0][RTW89_WW][45] = 22,
+ [0][0][2][0][RTW89_WW][47] = 22,
+ [0][0][2][0][RTW89_WW][49] = 24,
+ [0][0][2][0][RTW89_WW][51] = 22,
+ [0][0][2][0][RTW89_WW][53] = 22,
+ [0][0][2][0][RTW89_WW][55] = 22,
+ [0][0][2][0][RTW89_WW][57] = 22,
+ [0][0][2][0][RTW89_WW][59] = 22,
+ [0][0][2][0][RTW89_WW][60] = 22,
+ [0][0][2][0][RTW89_WW][62] = 22,
+ [0][0][2][0][RTW89_WW][64] = 22,
+ [0][0][2][0][RTW89_WW][66] = 22,
+ [0][0][2][0][RTW89_WW][68] = 22,
+ [0][0][2][0][RTW89_WW][70] = 24,
+ [0][0][2][0][RTW89_WW][72] = 22,
+ [0][0][2][0][RTW89_WW][74] = 22,
+ [0][0][2][0][RTW89_WW][75] = 22,
+ [0][0][2][0][RTW89_WW][77] = 22,
+ [0][0][2][0][RTW89_WW][79] = 22,
+ [0][0][2][0][RTW89_WW][81] = 22,
+ [0][0][2][0][RTW89_WW][83] = 22,
+ [0][0][2][0][RTW89_WW][85] = 22,
+ [0][0][2][0][RTW89_WW][87] = 22,
+ [0][0][2][0][RTW89_WW][89] = 22,
+ [0][0][2][0][RTW89_WW][90] = 22,
+ [0][0][2][0][RTW89_WW][92] = 22,
+ [0][0][2][0][RTW89_WW][94] = 22,
+ [0][0][2][0][RTW89_WW][96] = 22,
+ [0][0][2][0][RTW89_WW][98] = 22,
+ [0][0][2][0][RTW89_WW][100] = 22,
+ [0][0][2][0][RTW89_WW][102] = 22,
+ [0][0][2][0][RTW89_WW][104] = 22,
+ [0][0][2][0][RTW89_WW][105] = 22,
+ [0][0][2][0][RTW89_WW][107] = 24,
+ [0][0][2][0][RTW89_WW][109] = 24,
[0][0][2][0][RTW89_WW][111] = 0,
[0][0][2][0][RTW89_WW][113] = 0,
[0][0][2][0][RTW89_WW][115] = 0,
[0][0][2][0][RTW89_WW][117] = 0,
[0][0][2][0][RTW89_WW][119] = 0,
- [0][1][2][0][RTW89_WW][0] = 60,
- [0][1][2][0][RTW89_WW][2] = 60,
- [0][1][2][0][RTW89_WW][4] = 60,
- [0][1][2][0][RTW89_WW][6] = 60,
- [0][1][2][0][RTW89_WW][8] = 60,
- [0][1][2][0][RTW89_WW][10] = 60,
- [0][1][2][0][RTW89_WW][12] = 60,
- [0][1][2][0][RTW89_WW][14] = 60,
- [0][1][2][0][RTW89_WW][15] = 60,
- [0][1][2][0][RTW89_WW][17] = 60,
- [0][1][2][0][RTW89_WW][19] = 60,
- [0][1][2][0][RTW89_WW][21] = 60,
- [0][1][2][0][RTW89_WW][23] = 60,
- [0][1][2][0][RTW89_WW][25] = 60,
- [0][1][2][0][RTW89_WW][27] = 60,
- [0][1][2][0][RTW89_WW][29] = 60,
- [0][1][2][0][RTW89_WW][30] = 60,
- [0][1][2][0][RTW89_WW][32] = 60,
- [0][1][2][0][RTW89_WW][34] = 60,
- [0][1][2][0][RTW89_WW][36] = 60,
- [0][1][2][0][RTW89_WW][38] = 60,
- [0][1][2][0][RTW89_WW][40] = 60,
- [0][1][2][0][RTW89_WW][42] = 60,
- [0][1][2][0][RTW89_WW][44] = 60,
- [0][1][2][0][RTW89_WW][45] = 60,
- [0][1][2][0][RTW89_WW][47] = 60,
- [0][1][2][0][RTW89_WW][49] = 60,
- [0][1][2][0][RTW89_WW][51] = 60,
- [0][1][2][0][RTW89_WW][53] = 60,
- [0][1][2][0][RTW89_WW][55] = 60,
- [0][1][2][0][RTW89_WW][57] = 60,
- [0][1][2][0][RTW89_WW][59] = 60,
- [0][1][2][0][RTW89_WW][60] = 60,
- [0][1][2][0][RTW89_WW][62] = 60,
- [0][1][2][0][RTW89_WW][64] = 60,
- [0][1][2][0][RTW89_WW][66] = 60,
- [0][1][2][0][RTW89_WW][68] = 60,
- [0][1][2][0][RTW89_WW][70] = 60,
- [0][1][2][0][RTW89_WW][72] = 60,
- [0][1][2][0][RTW89_WW][74] = 60,
- [0][1][2][0][RTW89_WW][75] = 60,
- [0][1][2][0][RTW89_WW][77] = 60,
- [0][1][2][0][RTW89_WW][79] = 60,
- [0][1][2][0][RTW89_WW][81] = 60,
- [0][1][2][0][RTW89_WW][83] = 60,
- [0][1][2][0][RTW89_WW][85] = 60,
- [0][1][2][0][RTW89_WW][87] = 60,
- [0][1][2][0][RTW89_WW][89] = 60,
- [0][1][2][0][RTW89_WW][90] = 60,
- [0][1][2][0][RTW89_WW][92] = 60,
- [0][1][2][0][RTW89_WW][94] = 60,
- [0][1][2][0][RTW89_WW][96] = 60,
- [0][1][2][0][RTW89_WW][98] = 60,
- [0][1][2][0][RTW89_WW][100] = 60,
- [0][1][2][0][RTW89_WW][102] = 60,
- [0][1][2][0][RTW89_WW][104] = 60,
- [0][1][2][0][RTW89_WW][105] = 60,
- [0][1][2][0][RTW89_WW][107] = 60,
- [0][1][2][0][RTW89_WW][109] = 60,
+ [0][1][2][0][RTW89_WW][0] = -2,
+ [0][1][2][0][RTW89_WW][2] = -4,
+ [0][1][2][0][RTW89_WW][4] = -4,
+ [0][1][2][0][RTW89_WW][6] = -4,
+ [0][1][2][0][RTW89_WW][8] = -4,
+ [0][1][2][0][RTW89_WW][10] = -4,
+ [0][1][2][0][RTW89_WW][12] = -4,
+ [0][1][2][0][RTW89_WW][14] = -4,
+ [0][1][2][0][RTW89_WW][15] = -4,
+ [0][1][2][0][RTW89_WW][17] = -4,
+ [0][1][2][0][RTW89_WW][19] = -4,
+ [0][1][2][0][RTW89_WW][21] = -4,
+ [0][1][2][0][RTW89_WW][23] = -4,
+ [0][1][2][0][RTW89_WW][25] = -4,
+ [0][1][2][0][RTW89_WW][27] = -4,
+ [0][1][2][0][RTW89_WW][29] = -4,
+ [0][1][2][0][RTW89_WW][30] = -4,
+ [0][1][2][0][RTW89_WW][32] = -4,
+ [0][1][2][0][RTW89_WW][34] = -4,
+ [0][1][2][0][RTW89_WW][36] = -4,
+ [0][1][2][0][RTW89_WW][38] = -4,
+ [0][1][2][0][RTW89_WW][40] = -4,
+ [0][1][2][0][RTW89_WW][42] = -4,
+ [0][1][2][0][RTW89_WW][44] = -2,
+ [0][1][2][0][RTW89_WW][45] = -2,
+ [0][1][2][0][RTW89_WW][47] = -2,
+ [0][1][2][0][RTW89_WW][49] = -2,
+ [0][1][2][0][RTW89_WW][51] = -2,
+ [0][1][2][0][RTW89_WW][53] = -2,
+ [0][1][2][0][RTW89_WW][55] = -2,
+ [0][1][2][0][RTW89_WW][57] = -2,
+ [0][1][2][0][RTW89_WW][59] = -2,
+ [0][1][2][0][RTW89_WW][60] = -2,
+ [0][1][2][0][RTW89_WW][62] = -2,
+ [0][1][2][0][RTW89_WW][64] = -2,
+ [0][1][2][0][RTW89_WW][66] = -2,
+ [0][1][2][0][RTW89_WW][68] = -2,
+ [0][1][2][0][RTW89_WW][70] = -2,
+ [0][1][2][0][RTW89_WW][72] = -2,
+ [0][1][2][0][RTW89_WW][74] = -2,
+ [0][1][2][0][RTW89_WW][75] = -2,
+ [0][1][2][0][RTW89_WW][77] = -2,
+ [0][1][2][0][RTW89_WW][79] = -2,
+ [0][1][2][0][RTW89_WW][81] = -2,
+ [0][1][2][0][RTW89_WW][83] = -2,
+ [0][1][2][0][RTW89_WW][85] = -2,
+ [0][1][2][0][RTW89_WW][87] = -2,
+ [0][1][2][0][RTW89_WW][89] = -2,
+ [0][1][2][0][RTW89_WW][90] = -2,
+ [0][1][2][0][RTW89_WW][92] = -2,
+ [0][1][2][0][RTW89_WW][94] = -2,
+ [0][1][2][0][RTW89_WW][96] = -2,
+ [0][1][2][0][RTW89_WW][98] = -2,
+ [0][1][2][0][RTW89_WW][100] = -2,
+ [0][1][2][0][RTW89_WW][102] = -2,
+ [0][1][2][0][RTW89_WW][104] = -2,
+ [0][1][2][0][RTW89_WW][105] = -2,
+ [0][1][2][0][RTW89_WW][107] = 1,
+ [0][1][2][0][RTW89_WW][109] = 1,
[0][1][2][0][RTW89_WW][111] = 0,
[0][1][2][0][RTW89_WW][113] = 0,
[0][1][2][0][RTW89_WW][115] = 0,
[0][1][2][0][RTW89_WW][117] = 0,
[0][1][2][0][RTW89_WW][119] = 0,
- [0][1][2][1][RTW89_WW][0] = 48,
- [0][1][2][1][RTW89_WW][2] = 48,
- [0][1][2][1][RTW89_WW][4] = 48,
- [0][1][2][1][RTW89_WW][6] = 48,
- [0][1][2][1][RTW89_WW][8] = 48,
- [0][1][2][1][RTW89_WW][10] = 48,
- [0][1][2][1][RTW89_WW][12] = 48,
- [0][1][2][1][RTW89_WW][14] = 48,
- [0][1][2][1][RTW89_WW][15] = 48,
- [0][1][2][1][RTW89_WW][17] = 48,
- [0][1][2][1][RTW89_WW][19] = 48,
- [0][1][2][1][RTW89_WW][21] = 48,
- [0][1][2][1][RTW89_WW][23] = 48,
- [0][1][2][1][RTW89_WW][25] = 48,
- [0][1][2][1][RTW89_WW][27] = 48,
- [0][1][2][1][RTW89_WW][29] = 48,
- [0][1][2][1][RTW89_WW][30] = 48,
- [0][1][2][1][RTW89_WW][32] = 48,
- [0][1][2][1][RTW89_WW][34] = 48,
- [0][1][2][1][RTW89_WW][36] = 48,
- [0][1][2][1][RTW89_WW][38] = 48,
- [0][1][2][1][RTW89_WW][40] = 48,
- [0][1][2][1][RTW89_WW][42] = 48,
- [0][1][2][1][RTW89_WW][44] = 48,
- [0][1][2][1][RTW89_WW][45] = 48,
- [0][1][2][1][RTW89_WW][47] = 48,
- [0][1][2][1][RTW89_WW][49] = 48,
- [0][1][2][1][RTW89_WW][51] = 48,
- [0][1][2][1][RTW89_WW][53] = 48,
- [0][1][2][1][RTW89_WW][55] = 48,
- [0][1][2][1][RTW89_WW][57] = 48,
- [0][1][2][1][RTW89_WW][59] = 48,
- [0][1][2][1][RTW89_WW][60] = 48,
- [0][1][2][1][RTW89_WW][62] = 48,
- [0][1][2][1][RTW89_WW][64] = 48,
- [0][1][2][1][RTW89_WW][66] = 48,
- [0][1][2][1][RTW89_WW][68] = 48,
- [0][1][2][1][RTW89_WW][70] = 48,
- [0][1][2][1][RTW89_WW][72] = 48,
- [0][1][2][1][RTW89_WW][74] = 48,
- [0][1][2][1][RTW89_WW][75] = 48,
- [0][1][2][1][RTW89_WW][77] = 48,
- [0][1][2][1][RTW89_WW][79] = 48,
- [0][1][2][1][RTW89_WW][81] = 48,
- [0][1][2][1][RTW89_WW][83] = 48,
- [0][1][2][1][RTW89_WW][85] = 48,
- [0][1][2][1][RTW89_WW][87] = 48,
- [0][1][2][1][RTW89_WW][89] = 48,
- [0][1][2][1][RTW89_WW][90] = 48,
- [0][1][2][1][RTW89_WW][92] = 48,
- [0][1][2][1][RTW89_WW][94] = 48,
- [0][1][2][1][RTW89_WW][96] = 48,
- [0][1][2][1][RTW89_WW][98] = 48,
- [0][1][2][1][RTW89_WW][100] = 48,
- [0][1][2][1][RTW89_WW][102] = 48,
- [0][1][2][1][RTW89_WW][104] = 48,
- [0][1][2][1][RTW89_WW][105] = 48,
- [0][1][2][1][RTW89_WW][107] = 48,
- [0][1][2][1][RTW89_WW][109] = 48,
+ [0][1][2][1][RTW89_WW][0] = -2,
+ [0][1][2][1][RTW89_WW][2] = -4,
+ [0][1][2][1][RTW89_WW][4] = -4,
+ [0][1][2][1][RTW89_WW][6] = -4,
+ [0][1][2][1][RTW89_WW][8] = -4,
+ [0][1][2][1][RTW89_WW][10] = -4,
+ [0][1][2][1][RTW89_WW][12] = -4,
+ [0][1][2][1][RTW89_WW][14] = -4,
+ [0][1][2][1][RTW89_WW][15] = -4,
+ [0][1][2][1][RTW89_WW][17] = -4,
+ [0][1][2][1][RTW89_WW][19] = -4,
+ [0][1][2][1][RTW89_WW][21] = -4,
+ [0][1][2][1][RTW89_WW][23] = -4,
+ [0][1][2][1][RTW89_WW][25] = -4,
+ [0][1][2][1][RTW89_WW][27] = -4,
+ [0][1][2][1][RTW89_WW][29] = -4,
+ [0][1][2][1][RTW89_WW][30] = -4,
+ [0][1][2][1][RTW89_WW][32] = -4,
+ [0][1][2][1][RTW89_WW][34] = -4,
+ [0][1][2][1][RTW89_WW][36] = -4,
+ [0][1][2][1][RTW89_WW][38] = -4,
+ [0][1][2][1][RTW89_WW][40] = -4,
+ [0][1][2][1][RTW89_WW][42] = -4,
+ [0][1][2][1][RTW89_WW][44] = -2,
+ [0][1][2][1][RTW89_WW][45] = -2,
+ [0][1][2][1][RTW89_WW][47] = -2,
+ [0][1][2][1][RTW89_WW][49] = -2,
+ [0][1][2][1][RTW89_WW][51] = -2,
+ [0][1][2][1][RTW89_WW][53] = -2,
+ [0][1][2][1][RTW89_WW][55] = -2,
+ [0][1][2][1][RTW89_WW][57] = -2,
+ [0][1][2][1][RTW89_WW][59] = -2,
+ [0][1][2][1][RTW89_WW][60] = -2,
+ [0][1][2][1][RTW89_WW][62] = -2,
+ [0][1][2][1][RTW89_WW][64] = -2,
+ [0][1][2][1][RTW89_WW][66] = -2,
+ [0][1][2][1][RTW89_WW][68] = -2,
+ [0][1][2][1][RTW89_WW][70] = -2,
+ [0][1][2][1][RTW89_WW][72] = -2,
+ [0][1][2][1][RTW89_WW][74] = -2,
+ [0][1][2][1][RTW89_WW][75] = -2,
+ [0][1][2][1][RTW89_WW][77] = -2,
+ [0][1][2][1][RTW89_WW][79] = -2,
+ [0][1][2][1][RTW89_WW][81] = -2,
+ [0][1][2][1][RTW89_WW][83] = -2,
+ [0][1][2][1][RTW89_WW][85] = -2,
+ [0][1][2][1][RTW89_WW][87] = -2,
+ [0][1][2][1][RTW89_WW][89] = -2,
+ [0][1][2][1][RTW89_WW][90] = -2,
+ [0][1][2][1][RTW89_WW][92] = -2,
+ [0][1][2][1][RTW89_WW][94] = -2,
+ [0][1][2][1][RTW89_WW][96] = -2,
+ [0][1][2][1][RTW89_WW][98] = -2,
+ [0][1][2][1][RTW89_WW][100] = -2,
+ [0][1][2][1][RTW89_WW][102] = -2,
+ [0][1][2][1][RTW89_WW][104] = -2,
+ [0][1][2][1][RTW89_WW][105] = -2,
+ [0][1][2][1][RTW89_WW][107] = 1,
+ [0][1][2][1][RTW89_WW][109] = 1,
[0][1][2][1][RTW89_WW][111] = 0,
[0][1][2][1][RTW89_WW][113] = 0,
[0][1][2][1][RTW89_WW][115] = 0,
[0][1][2][1][RTW89_WW][117] = 0,
[0][1][2][1][RTW89_WW][119] = 0,
- [1][0][2][0][RTW89_WW][1] = 72,
- [1][0][2][0][RTW89_WW][5] = 72,
- [1][0][2][0][RTW89_WW][9] = 72,
- [1][0][2][0][RTW89_WW][13] = 72,
- [1][0][2][0][RTW89_WW][16] = 72,
- [1][0][2][0][RTW89_WW][20] = 72,
- [1][0][2][0][RTW89_WW][24] = 72,
- [1][0][2][0][RTW89_WW][28] = 72,
- [1][0][2][0][RTW89_WW][31] = 72,
- [1][0][2][0][RTW89_WW][35] = 72,
- [1][0][2][0][RTW89_WW][39] = 72,
- [1][0][2][0][RTW89_WW][43] = 72,
- [1][0][2][0][RTW89_WW][46] = 72,
- [1][0][2][0][RTW89_WW][50] = 72,
- [1][0][2][0][RTW89_WW][54] = 72,
- [1][0][2][0][RTW89_WW][58] = 72,
- [1][0][2][0][RTW89_WW][61] = 72,
- [1][0][2][0][RTW89_WW][65] = 72,
- [1][0][2][0][RTW89_WW][69] = 72,
- [1][0][2][0][RTW89_WW][73] = 72,
- [1][0][2][0][RTW89_WW][76] = 72,
- [1][0][2][0][RTW89_WW][80] = 72,
- [1][0][2][0][RTW89_WW][84] = 72,
- [1][0][2][0][RTW89_WW][88] = 72,
- [1][0][2][0][RTW89_WW][91] = 72,
- [1][0][2][0][RTW89_WW][95] = 72,
- [1][0][2][0][RTW89_WW][99] = 72,
- [1][0][2][0][RTW89_WW][103] = 72,
- [1][0][2][0][RTW89_WW][106] = 72,
+ [1][0][2][0][RTW89_WW][1] = 34,
+ [1][0][2][0][RTW89_WW][5] = 34,
+ [1][0][2][0][RTW89_WW][9] = 34,
+ [1][0][2][0][RTW89_WW][13] = 34,
+ [1][0][2][0][RTW89_WW][16] = 34,
+ [1][0][2][0][RTW89_WW][20] = 34,
+ [1][0][2][0][RTW89_WW][24] = 36,
+ [1][0][2][0][RTW89_WW][28] = 34,
+ [1][0][2][0][RTW89_WW][31] = 34,
+ [1][0][2][0][RTW89_WW][35] = 34,
+ [1][0][2][0][RTW89_WW][39] = 34,
+ [1][0][2][0][RTW89_WW][43] = 34,
+ [1][0][2][0][RTW89_WW][46] = 34,
+ [1][0][2][0][RTW89_WW][50] = 34,
+ [1][0][2][0][RTW89_WW][54] = 36,
+ [1][0][2][0][RTW89_WW][58] = 36,
+ [1][0][2][0][RTW89_WW][61] = 34,
+ [1][0][2][0][RTW89_WW][65] = 34,
+ [1][0][2][0][RTW89_WW][69] = 34,
+ [1][0][2][0][RTW89_WW][73] = 34,
+ [1][0][2][0][RTW89_WW][76] = 34,
+ [1][0][2][0][RTW89_WW][80] = 34,
+ [1][0][2][0][RTW89_WW][84] = 34,
+ [1][0][2][0][RTW89_WW][88] = 34,
+ [1][0][2][0][RTW89_WW][91] = 36,
+ [1][0][2][0][RTW89_WW][95] = 34,
+ [1][0][2][0][RTW89_WW][99] = 34,
+ [1][0][2][0][RTW89_WW][103] = 34,
+ [1][0][2][0][RTW89_WW][106] = 36,
[1][0][2][0][RTW89_WW][110] = 0,
[1][0][2][0][RTW89_WW][114] = 0,
[1][0][2][0][RTW89_WW][118] = 0,
- [1][1][2][0][RTW89_WW][1] = 60,
- [1][1][2][0][RTW89_WW][5] = 60,
- [1][1][2][0][RTW89_WW][9] = 60,
- [1][1][2][0][RTW89_WW][13] = 60,
- [1][1][2][0][RTW89_WW][16] = 60,
- [1][1][2][0][RTW89_WW][20] = 60,
- [1][1][2][0][RTW89_WW][24] = 60,
- [1][1][2][0][RTW89_WW][28] = 60,
- [1][1][2][0][RTW89_WW][31] = 60,
- [1][1][2][0][RTW89_WW][35] = 60,
- [1][1][2][0][RTW89_WW][39] = 60,
- [1][1][2][0][RTW89_WW][43] = 60,
- [1][1][2][0][RTW89_WW][46] = 60,
- [1][1][2][0][RTW89_WW][50] = 60,
- [1][1][2][0][RTW89_WW][54] = 60,
- [1][1][2][0][RTW89_WW][58] = 60,
- [1][1][2][0][RTW89_WW][61] = 60,
- [1][1][2][0][RTW89_WW][65] = 60,
- [1][1][2][0][RTW89_WW][69] = 60,
- [1][1][2][0][RTW89_WW][73] = 60,
- [1][1][2][0][RTW89_WW][76] = 60,
- [1][1][2][0][RTW89_WW][80] = 60,
- [1][1][2][0][RTW89_WW][84] = 60,
- [1][1][2][0][RTW89_WW][88] = 60,
- [1][1][2][0][RTW89_WW][91] = 60,
- [1][1][2][0][RTW89_WW][95] = 60,
- [1][1][2][0][RTW89_WW][99] = 60,
- [1][1][2][0][RTW89_WW][103] = 60,
- [1][1][2][0][RTW89_WW][106] = 60,
+ [1][1][2][0][RTW89_WW][1] = 10,
+ [1][1][2][0][RTW89_WW][5] = 10,
+ [1][1][2][0][RTW89_WW][9] = 10,
+ [1][1][2][0][RTW89_WW][13] = 10,
+ [1][1][2][0][RTW89_WW][16] = 10,
+ [1][1][2][0][RTW89_WW][20] = 10,
+ [1][1][2][0][RTW89_WW][24] = 10,
+ [1][1][2][0][RTW89_WW][28] = 10,
+ [1][1][2][0][RTW89_WW][31] = 10,
+ [1][1][2][0][RTW89_WW][35] = 10,
+ [1][1][2][0][RTW89_WW][39] = 10,
+ [1][1][2][0][RTW89_WW][43] = 10,
+ [1][1][2][0][RTW89_WW][46] = 12,
+ [1][1][2][0][RTW89_WW][50] = 12,
+ [1][1][2][0][RTW89_WW][54] = 10,
+ [1][1][2][0][RTW89_WW][58] = 10,
+ [1][1][2][0][RTW89_WW][61] = 10,
+ [1][1][2][0][RTW89_WW][65] = 10,
+ [1][1][2][0][RTW89_WW][69] = 10,
+ [1][1][2][0][RTW89_WW][73] = 10,
+ [1][1][2][0][RTW89_WW][76] = 10,
+ [1][1][2][0][RTW89_WW][80] = 10,
+ [1][1][2][0][RTW89_WW][84] = 10,
+ [1][1][2][0][RTW89_WW][88] = 10,
+ [1][1][2][0][RTW89_WW][91] = 12,
+ [1][1][2][0][RTW89_WW][95] = 10,
+ [1][1][2][0][RTW89_WW][99] = 10,
+ [1][1][2][0][RTW89_WW][103] = 10,
+ [1][1][2][0][RTW89_WW][106] = 12,
[1][1][2][0][RTW89_WW][110] = 0,
[1][1][2][0][RTW89_WW][114] = 0,
[1][1][2][0][RTW89_WW][118] = 0,
- [1][1][2][1][RTW89_WW][1] = 48,
- [1][1][2][1][RTW89_WW][5] = 48,
- [1][1][2][1][RTW89_WW][9] = 48,
- [1][1][2][1][RTW89_WW][13] = 48,
- [1][1][2][1][RTW89_WW][16] = 48,
- [1][1][2][1][RTW89_WW][20] = 48,
- [1][1][2][1][RTW89_WW][24] = 48,
- [1][1][2][1][RTW89_WW][28] = 48,
- [1][1][2][1][RTW89_WW][31] = 48,
- [1][1][2][1][RTW89_WW][35] = 48,
- [1][1][2][1][RTW89_WW][39] = 48,
- [1][1][2][1][RTW89_WW][43] = 48,
- [1][1][2][1][RTW89_WW][46] = 48,
- [1][1][2][1][RTW89_WW][50] = 48,
- [1][1][2][1][RTW89_WW][54] = 48,
- [1][1][2][1][RTW89_WW][58] = 48,
- [1][1][2][1][RTW89_WW][61] = 48,
- [1][1][2][1][RTW89_WW][65] = 48,
- [1][1][2][1][RTW89_WW][69] = 48,
- [1][1][2][1][RTW89_WW][73] = 48,
- [1][1][2][1][RTW89_WW][76] = 48,
- [1][1][2][1][RTW89_WW][80] = 48,
- [1][1][2][1][RTW89_WW][84] = 48,
- [1][1][2][1][RTW89_WW][88] = 48,
- [1][1][2][1][RTW89_WW][91] = 48,
- [1][1][2][1][RTW89_WW][95] = 48,
- [1][1][2][1][RTW89_WW][99] = 48,
- [1][1][2][1][RTW89_WW][103] = 48,
- [1][1][2][1][RTW89_WW][106] = 48,
+ [1][1][2][1][RTW89_WW][1] = 10,
+ [1][1][2][1][RTW89_WW][5] = 10,
+ [1][1][2][1][RTW89_WW][9] = 10,
+ [1][1][2][1][RTW89_WW][13] = 10,
+ [1][1][2][1][RTW89_WW][16] = 10,
+ [1][1][2][1][RTW89_WW][20] = 10,
+ [1][1][2][1][RTW89_WW][24] = 10,
+ [1][1][2][1][RTW89_WW][28] = 10,
+ [1][1][2][1][RTW89_WW][31] = 10,
+ [1][1][2][1][RTW89_WW][35] = 10,
+ [1][1][2][1][RTW89_WW][39] = 10,
+ [1][1][2][1][RTW89_WW][43] = 10,
+ [1][1][2][1][RTW89_WW][46] = 12,
+ [1][1][2][1][RTW89_WW][50] = 12,
+ [1][1][2][1][RTW89_WW][54] = 10,
+ [1][1][2][1][RTW89_WW][58] = 10,
+ [1][1][2][1][RTW89_WW][61] = 10,
+ [1][1][2][1][RTW89_WW][65] = 10,
+ [1][1][2][1][RTW89_WW][69] = 10,
+ [1][1][2][1][RTW89_WW][73] = 10,
+ [1][1][2][1][RTW89_WW][76] = 10,
+ [1][1][2][1][RTW89_WW][80] = 10,
+ [1][1][2][1][RTW89_WW][84] = 10,
+ [1][1][2][1][RTW89_WW][88] = 10,
+ [1][1][2][1][RTW89_WW][91] = 12,
+ [1][1][2][1][RTW89_WW][95] = 10,
+ [1][1][2][1][RTW89_WW][99] = 10,
+ [1][1][2][1][RTW89_WW][103] = 10,
+ [1][1][2][1][RTW89_WW][106] = 12,
[1][1][2][1][RTW89_WW][110] = 0,
[1][1][2][1][RTW89_WW][114] = 0,
[1][1][2][1][RTW89_WW][118] = 0,
- [2][0][2][0][RTW89_WW][3] = 64,
- [2][0][2][0][RTW89_WW][11] = 64,
- [2][0][2][0][RTW89_WW][18] = 64,
- [2][0][2][0][RTW89_WW][26] = 64,
- [2][0][2][0][RTW89_WW][33] = 64,
- [2][0][2][0][RTW89_WW][41] = 64,
- [2][0][2][0][RTW89_WW][48] = 64,
- [2][0][2][0][RTW89_WW][56] = 64,
- [2][0][2][0][RTW89_WW][63] = 64,
- [2][0][2][0][RTW89_WW][71] = 64,
- [2][0][2][0][RTW89_WW][78] = 64,
- [2][0][2][0][RTW89_WW][86] = 64,
- [2][0][2][0][RTW89_WW][93] = 64,
- [2][0][2][0][RTW89_WW][101] = 64,
+ [2][0][2][0][RTW89_WW][3] = 46,
+ [2][0][2][0][RTW89_WW][11] = 46,
+ [2][0][2][0][RTW89_WW][18] = 46,
+ [2][0][2][0][RTW89_WW][26] = 46,
+ [2][0][2][0][RTW89_WW][33] = 46,
+ [2][0][2][0][RTW89_WW][41] = 46,
+ [2][0][2][0][RTW89_WW][48] = 46,
+ [2][0][2][0][RTW89_WW][56] = 46,
+ [2][0][2][0][RTW89_WW][63] = 46,
+ [2][0][2][0][RTW89_WW][71] = 46,
+ [2][0][2][0][RTW89_WW][78] = 46,
+ [2][0][2][0][RTW89_WW][86] = 46,
+ [2][0][2][0][RTW89_WW][93] = 46,
+ [2][0][2][0][RTW89_WW][101] = 44,
[2][0][2][0][RTW89_WW][108] = 0,
[2][0][2][0][RTW89_WW][116] = 0,
- [2][1][2][0][RTW89_WW][3] = 52,
- [2][1][2][0][RTW89_WW][11] = 52,
- [2][1][2][0][RTW89_WW][18] = 52,
- [2][1][2][0][RTW89_WW][26] = 52,
- [2][1][2][0][RTW89_WW][33] = 52,
- [2][1][2][0][RTW89_WW][41] = 52,
- [2][1][2][0][RTW89_WW][48] = 52,
- [2][1][2][0][RTW89_WW][56] = 52,
- [2][1][2][0][RTW89_WW][63] = 52,
- [2][1][2][0][RTW89_WW][71] = 52,
- [2][1][2][0][RTW89_WW][78] = 52,
- [2][1][2][0][RTW89_WW][86] = 52,
- [2][1][2][0][RTW89_WW][93] = 52,
- [2][1][2][0][RTW89_WW][101] = 52,
+ [2][1][2][0][RTW89_WW][3] = 22,
+ [2][1][2][0][RTW89_WW][11] = 20,
+ [2][1][2][0][RTW89_WW][18] = 20,
+ [2][1][2][0][RTW89_WW][26] = 20,
+ [2][1][2][0][RTW89_WW][33] = 20,
+ [2][1][2][0][RTW89_WW][41] = 22,
+ [2][1][2][0][RTW89_WW][48] = 22,
+ [2][1][2][0][RTW89_WW][56] = 20,
+ [2][1][2][0][RTW89_WW][63] = 22,
+ [2][1][2][0][RTW89_WW][71] = 20,
+ [2][1][2][0][RTW89_WW][78] = 20,
+ [2][1][2][0][RTW89_WW][86] = 20,
+ [2][1][2][0][RTW89_WW][93] = 22,
+ [2][1][2][0][RTW89_WW][101] = 22,
[2][1][2][0][RTW89_WW][108] = 0,
[2][1][2][0][RTW89_WW][116] = 0,
- [2][1][2][1][RTW89_WW][3] = 40,
- [2][1][2][1][RTW89_WW][11] = 40,
- [2][1][2][1][RTW89_WW][18] = 40,
- [2][1][2][1][RTW89_WW][26] = 40,
- [2][1][2][1][RTW89_WW][33] = 40,
- [2][1][2][1][RTW89_WW][41] = 40,
- [2][1][2][1][RTW89_WW][48] = 40,
- [2][1][2][1][RTW89_WW][56] = 40,
- [2][1][2][1][RTW89_WW][63] = 40,
- [2][1][2][1][RTW89_WW][71] = 40,
- [2][1][2][1][RTW89_WW][78] = 40,
- [2][1][2][1][RTW89_WW][86] = 40,
- [2][1][2][1][RTW89_WW][93] = 40,
- [2][1][2][1][RTW89_WW][101] = 40,
+ [2][1][2][1][RTW89_WW][3] = 22,
+ [2][1][2][1][RTW89_WW][11] = 20,
+ [2][1][2][1][RTW89_WW][18] = 20,
+ [2][1][2][1][RTW89_WW][26] = 20,
+ [2][1][2][1][RTW89_WW][33] = 20,
+ [2][1][2][1][RTW89_WW][41] = 22,
+ [2][1][2][1][RTW89_WW][48] = 22,
+ [2][1][2][1][RTW89_WW][56] = 20,
+ [2][1][2][1][RTW89_WW][63] = 22,
+ [2][1][2][1][RTW89_WW][71] = 20,
+ [2][1][2][1][RTW89_WW][78] = 20,
+ [2][1][2][1][RTW89_WW][86] = 20,
+ [2][1][2][1][RTW89_WW][93] = 22,
+ [2][1][2][1][RTW89_WW][101] = 22,
[2][1][2][1][RTW89_WW][108] = 0,
[2][1][2][1][RTW89_WW][116] = 0,
- [3][0][2][0][RTW89_WW][7] = 56,
- [3][0][2][0][RTW89_WW][22] = 56,
- [3][0][2][0][RTW89_WW][37] = 56,
- [3][0][2][0][RTW89_WW][52] = 56,
- [3][0][2][0][RTW89_WW][67] = 56,
- [3][0][2][0][RTW89_WW][82] = 56,
- [3][0][2][0][RTW89_WW][97] = 56,
+ [3][0][2][0][RTW89_WW][7] = 38,
+ [3][0][2][0][RTW89_WW][22] = 38,
+ [3][0][2][0][RTW89_WW][37] = 38,
+ [3][0][2][0][RTW89_WW][52] = 54,
+ [3][0][2][0][RTW89_WW][67] = 54,
+ [3][0][2][0][RTW89_WW][82] = 26,
+ [3][0][2][0][RTW89_WW][97] = 26,
[3][0][2][0][RTW89_WW][112] = 0,
- [3][1][2][0][RTW89_WW][7] = 44,
- [3][1][2][0][RTW89_WW][22] = 44,
- [3][1][2][0][RTW89_WW][37] = 44,
- [3][1][2][0][RTW89_WW][52] = 44,
- [3][1][2][0][RTW89_WW][67] = 44,
- [3][1][2][0][RTW89_WW][82] = 44,
- [3][1][2][0][RTW89_WW][97] = 44,
+ [3][1][2][0][RTW89_WW][7] = 32,
+ [3][1][2][0][RTW89_WW][22] = 30,
+ [3][1][2][0][RTW89_WW][37] = 30,
+ [3][1][2][0][RTW89_WW][52] = 30,
+ [3][1][2][0][RTW89_WW][67] = 32,
+ [3][1][2][0][RTW89_WW][82] = 24,
+ [3][1][2][0][RTW89_WW][97] = 14,
[3][1][2][0][RTW89_WW][112] = 0,
[3][1][2][1][RTW89_WW][7] = 32,
- [3][1][2][1][RTW89_WW][22] = 32,
- [3][1][2][1][RTW89_WW][37] = 32,
- [3][1][2][1][RTW89_WW][52] = 32,
+ [3][1][2][1][RTW89_WW][22] = 30,
+ [3][1][2][1][RTW89_WW][37] = 30,
+ [3][1][2][1][RTW89_WW][52] = 30,
[3][1][2][1][RTW89_WW][67] = 32,
- [3][1][2][1][RTW89_WW][82] = 32,
- [3][1][2][1][RTW89_WW][97] = 32,
+ [3][1][2][1][RTW89_WW][82] = 24,
+ [3][1][2][1][RTW89_WW][97] = 14,
[3][1][2][1][RTW89_WW][112] = 0,
- [0][0][1][0][RTW89_FCC][0] = 72,
- [0][0][1][0][RTW89_FCC][2] = 72,
- [0][0][1][0][RTW89_FCC][4] = 72,
- [0][0][1][0][RTW89_FCC][6] = 72,
- [0][0][1][0][RTW89_FCC][8] = 72,
- [0][0][1][0][RTW89_FCC][10] = 72,
- [0][0][1][0][RTW89_FCC][12] = 72,
- [0][0][1][0][RTW89_FCC][14] = 72,
- [0][0][1][0][RTW89_FCC][15] = 72,
- [0][0][1][0][RTW89_FCC][17] = 72,
- [0][0][1][0][RTW89_FCC][19] = 72,
- [0][0][1][0][RTW89_FCC][21] = 72,
- [0][0][1][0][RTW89_FCC][23] = 72,
- [0][0][1][0][RTW89_FCC][25] = 72,
- [0][0][1][0][RTW89_FCC][27] = 72,
- [0][0][1][0][RTW89_FCC][29] = 72,
- [0][0][1][0][RTW89_FCC][30] = 72,
- [0][0][1][0][RTW89_FCC][32] = 72,
- [0][0][1][0][RTW89_FCC][34] = 72,
- [0][0][1][0][RTW89_FCC][36] = 72,
- [0][0][1][0][RTW89_FCC][38] = 72,
- [0][0][1][0][RTW89_FCC][40] = 72,
- [0][0][1][0][RTW89_FCC][42] = 72,
- [0][0][1][0][RTW89_FCC][44] = 72,
- [0][0][1][0][RTW89_FCC][45] = 72,
- [0][0][1][0][RTW89_FCC][47] = 72,
- [0][0][1][0][RTW89_FCC][49] = 72,
- [0][0][1][0][RTW89_FCC][51] = 72,
- [0][0][1][0][RTW89_FCC][53] = 72,
- [0][0][1][0][RTW89_FCC][55] = 72,
- [0][0][1][0][RTW89_FCC][57] = 72,
- [0][0][1][0][RTW89_FCC][59] = 72,
- [0][0][1][0][RTW89_FCC][60] = 72,
- [0][0][1][0][RTW89_FCC][62] = 72,
- [0][0][1][0][RTW89_FCC][64] = 72,
- [0][0][1][0][RTW89_FCC][66] = 72,
- [0][0][1][0][RTW89_FCC][68] = 72,
- [0][0][1][0][RTW89_FCC][70] = 72,
- [0][0][1][0][RTW89_FCC][72] = 72,
- [0][0][1][0][RTW89_FCC][74] = 72,
- [0][0][1][0][RTW89_FCC][75] = 72,
- [0][0][1][0][RTW89_FCC][77] = 72,
- [0][0][1][0][RTW89_FCC][79] = 72,
- [0][0][1][0][RTW89_FCC][81] = 72,
- [0][0][1][0][RTW89_FCC][83] = 72,
- [0][0][1][0][RTW89_FCC][85] = 72,
- [0][0][1][0][RTW89_FCC][87] = 72,
- [0][0][1][0][RTW89_FCC][89] = 72,
- [0][0][1][0][RTW89_FCC][90] = 72,
- [0][0][1][0][RTW89_FCC][92] = 72,
- [0][0][1][0][RTW89_FCC][94] = 72,
- [0][0][1][0][RTW89_FCC][96] = 72,
- [0][0][1][0][RTW89_FCC][98] = 72,
- [0][0][1][0][RTW89_FCC][100] = 72,
- [0][0][1][0][RTW89_FCC][102] = 72,
- [0][0][1][0][RTW89_FCC][104] = 72,
- [0][0][1][0][RTW89_FCC][105] = 72,
- [0][0][1][0][RTW89_FCC][107] = 72,
- [0][0][1][0][RTW89_FCC][109] = 72,
+ [0][0][1][0][RTW89_FCC][0] = 24,
+ [0][0][1][0][RTW89_ETSI][0] = 66,
+ [0][0][1][0][RTW89_KCC][0] = 24,
+ [0][0][1][0][RTW89_FCC][2] = 22,
+ [0][0][1][0][RTW89_ETSI][2] = 66,
+ [0][0][1][0][RTW89_KCC][2] = 24,
+ [0][0][1][0][RTW89_FCC][4] = 22,
+ [0][0][1][0][RTW89_ETSI][4] = 66,
+ [0][0][1][0][RTW89_KCC][4] = 24,
+ [0][0][1][0][RTW89_FCC][6] = 22,
+ [0][0][1][0][RTW89_ETSI][6] = 66,
+ [0][0][1][0][RTW89_KCC][6] = 24,
+ [0][0][1][0][RTW89_FCC][8] = 22,
+ [0][0][1][0][RTW89_ETSI][8] = 66,
+ [0][0][1][0][RTW89_KCC][8] = 24,
+ [0][0][1][0][RTW89_FCC][10] = 22,
+ [0][0][1][0][RTW89_ETSI][10] = 66,
+ [0][0][1][0][RTW89_KCC][10] = 24,
+ [0][0][1][0][RTW89_FCC][12] = 22,
+ [0][0][1][0][RTW89_ETSI][12] = 66,
+ [0][0][1][0][RTW89_KCC][12] = 24,
+ [0][0][1][0][RTW89_FCC][14] = 22,
+ [0][0][1][0][RTW89_ETSI][14] = 66,
+ [0][0][1][0][RTW89_KCC][14] = 24,
+ [0][0][1][0][RTW89_FCC][15] = 22,
+ [0][0][1][0][RTW89_ETSI][15] = 66,
+ [0][0][1][0][RTW89_KCC][15] = 24,
+ [0][0][1][0][RTW89_FCC][17] = 22,
+ [0][0][1][0][RTW89_ETSI][17] = 66,
+ [0][0][1][0][RTW89_KCC][17] = 24,
+ [0][0][1][0][RTW89_FCC][19] = 22,
+ [0][0][1][0][RTW89_ETSI][19] = 66,
+ [0][0][1][0][RTW89_KCC][19] = 24,
+ [0][0][1][0][RTW89_FCC][21] = 22,
+ [0][0][1][0][RTW89_ETSI][21] = 66,
+ [0][0][1][0][RTW89_KCC][21] = 24,
+ [0][0][1][0][RTW89_FCC][23] = 22,
+ [0][0][1][0][RTW89_ETSI][23] = 66,
+ [0][0][1][0][RTW89_KCC][23] = 24,
+ [0][0][1][0][RTW89_FCC][25] = 22,
+ [0][0][1][0][RTW89_ETSI][25] = 66,
+ [0][0][1][0][RTW89_KCC][25] = 24,
+ [0][0][1][0][RTW89_FCC][27] = 22,
+ [0][0][1][0][RTW89_ETSI][27] = 66,
+ [0][0][1][0][RTW89_KCC][27] = 24,
+ [0][0][1][0][RTW89_FCC][29] = 22,
+ [0][0][1][0][RTW89_ETSI][29] = 66,
+ [0][0][1][0][RTW89_KCC][29] = 24,
+ [0][0][1][0][RTW89_FCC][30] = 22,
+ [0][0][1][0][RTW89_ETSI][30] = 66,
+ [0][0][1][0][RTW89_KCC][30] = 24,
+ [0][0][1][0][RTW89_FCC][32] = 22,
+ [0][0][1][0][RTW89_ETSI][32] = 66,
+ [0][0][1][0][RTW89_KCC][32] = 24,
+ [0][0][1][0][RTW89_FCC][34] = 22,
+ [0][0][1][0][RTW89_ETSI][34] = 66,
+ [0][0][1][0][RTW89_KCC][34] = 24,
+ [0][0][1][0][RTW89_FCC][36] = 22,
+ [0][0][1][0][RTW89_ETSI][36] = 66,
+ [0][0][1][0][RTW89_KCC][36] = 24,
+ [0][0][1][0][RTW89_FCC][38] = 22,
+ [0][0][1][0][RTW89_ETSI][38] = 66,
+ [0][0][1][0][RTW89_KCC][38] = 24,
+ [0][0][1][0][RTW89_FCC][40] = 22,
+ [0][0][1][0][RTW89_ETSI][40] = 66,
+ [0][0][1][0][RTW89_KCC][40] = 24,
+ [0][0][1][0][RTW89_FCC][42] = 22,
+ [0][0][1][0][RTW89_ETSI][42] = 66,
+ [0][0][1][0][RTW89_KCC][42] = 24,
+ [0][0][1][0][RTW89_FCC][44] = 22,
+ [0][0][1][0][RTW89_ETSI][44] = 66,
+ [0][0][1][0][RTW89_KCC][44] = 24,
+ [0][0][1][0][RTW89_FCC][45] = 22,
+ [0][0][1][0][RTW89_ETSI][45] = 127,
+ [0][0][1][0][RTW89_KCC][45] = 24,
+ [0][0][1][0][RTW89_FCC][47] = 22,
+ [0][0][1][0][RTW89_ETSI][47] = 127,
+ [0][0][1][0][RTW89_KCC][47] = 24,
+ [0][0][1][0][RTW89_FCC][49] = 24,
+ [0][0][1][0][RTW89_ETSI][49] = 127,
+ [0][0][1][0][RTW89_KCC][49] = 24,
+ [0][0][1][0][RTW89_FCC][51] = 22,
+ [0][0][1][0][RTW89_ETSI][51] = 127,
+ [0][0][1][0][RTW89_KCC][51] = 24,
+ [0][0][1][0][RTW89_FCC][53] = 22,
+ [0][0][1][0][RTW89_ETSI][53] = 127,
+ [0][0][1][0][RTW89_KCC][53] = 24,
+ [0][0][1][0][RTW89_FCC][55] = 22,
+ [0][0][1][0][RTW89_ETSI][55] = 127,
+ [0][0][1][0][RTW89_KCC][55] = 26,
+ [0][0][1][0][RTW89_FCC][57] = 22,
+ [0][0][1][0][RTW89_ETSI][57] = 127,
+ [0][0][1][0][RTW89_KCC][57] = 26,
+ [0][0][1][0][RTW89_FCC][59] = 22,
+ [0][0][1][0][RTW89_ETSI][59] = 127,
+ [0][0][1][0][RTW89_KCC][59] = 26,
+ [0][0][1][0][RTW89_FCC][60] = 22,
+ [0][0][1][0][RTW89_ETSI][60] = 127,
+ [0][0][1][0][RTW89_KCC][60] = 26,
+ [0][0][1][0][RTW89_FCC][62] = 22,
+ [0][0][1][0][RTW89_ETSI][62] = 127,
+ [0][0][1][0][RTW89_KCC][62] = 26,
+ [0][0][1][0][RTW89_FCC][64] = 22,
+ [0][0][1][0][RTW89_ETSI][64] = 127,
+ [0][0][1][0][RTW89_KCC][64] = 26,
+ [0][0][1][0][RTW89_FCC][66] = 22,
+ [0][0][1][0][RTW89_ETSI][66] = 127,
+ [0][0][1][0][RTW89_KCC][66] = 26,
+ [0][0][1][0][RTW89_FCC][68] = 22,
+ [0][0][1][0][RTW89_ETSI][68] = 127,
+ [0][0][1][0][RTW89_KCC][68] = 26,
+ [0][0][1][0][RTW89_FCC][70] = 24,
+ [0][0][1][0][RTW89_ETSI][70] = 127,
+ [0][0][1][0][RTW89_KCC][70] = 26,
+ [0][0][1][0][RTW89_FCC][72] = 22,
+ [0][0][1][0][RTW89_ETSI][72] = 127,
+ [0][0][1][0][RTW89_KCC][72] = 26,
+ [0][0][1][0][RTW89_FCC][74] = 22,
+ [0][0][1][0][RTW89_ETSI][74] = 127,
+ [0][0][1][0][RTW89_KCC][74] = 26,
+ [0][0][1][0][RTW89_FCC][75] = 22,
+ [0][0][1][0][RTW89_ETSI][75] = 127,
+ [0][0][1][0][RTW89_KCC][75] = 26,
+ [0][0][1][0][RTW89_FCC][77] = 22,
+ [0][0][1][0][RTW89_ETSI][77] = 127,
+ [0][0][1][0][RTW89_KCC][77] = 26,
+ [0][0][1][0][RTW89_FCC][79] = 22,
+ [0][0][1][0][RTW89_ETSI][79] = 127,
+ [0][0][1][0][RTW89_KCC][79] = 26,
+ [0][0][1][0][RTW89_FCC][81] = 22,
+ [0][0][1][0][RTW89_ETSI][81] = 127,
+ [0][0][1][0][RTW89_KCC][81] = 26,
+ [0][0][1][0][RTW89_FCC][83] = 22,
+ [0][0][1][0][RTW89_ETSI][83] = 127,
+ [0][0][1][0][RTW89_KCC][83] = 32,
+ [0][0][1][0][RTW89_FCC][85] = 22,
+ [0][0][1][0][RTW89_ETSI][85] = 127,
+ [0][0][1][0][RTW89_KCC][85] = 32,
+ [0][0][1][0][RTW89_FCC][87] = 22,
+ [0][0][1][0][RTW89_ETSI][87] = 127,
+ [0][0][1][0][RTW89_KCC][87] = 32,
+ [0][0][1][0][RTW89_FCC][89] = 22,
+ [0][0][1][0][RTW89_ETSI][89] = 127,
+ [0][0][1][0][RTW89_KCC][89] = 32,
+ [0][0][1][0][RTW89_FCC][90] = 22,
+ [0][0][1][0][RTW89_ETSI][90] = 127,
+ [0][0][1][0][RTW89_KCC][90] = 32,
+ [0][0][1][0][RTW89_FCC][92] = 22,
+ [0][0][1][0][RTW89_ETSI][92] = 127,
+ [0][0][1][0][RTW89_KCC][92] = 32,
+ [0][0][1][0][RTW89_FCC][94] = 22,
+ [0][0][1][0][RTW89_ETSI][94] = 127,
+ [0][0][1][0][RTW89_KCC][94] = 32,
+ [0][0][1][0][RTW89_FCC][96] = 22,
+ [0][0][1][0][RTW89_ETSI][96] = 127,
+ [0][0][1][0][RTW89_KCC][96] = 32,
+ [0][0][1][0][RTW89_FCC][98] = 22,
+ [0][0][1][0][RTW89_ETSI][98] = 127,
+ [0][0][1][0][RTW89_KCC][98] = 32,
+ [0][0][1][0][RTW89_FCC][100] = 22,
+ [0][0][1][0][RTW89_ETSI][100] = 127,
+ [0][0][1][0][RTW89_KCC][100] = 32,
+ [0][0][1][0][RTW89_FCC][102] = 22,
+ [0][0][1][0][RTW89_ETSI][102] = 127,
+ [0][0][1][0][RTW89_KCC][102] = 32,
+ [0][0][1][0][RTW89_FCC][104] = 22,
+ [0][0][1][0][RTW89_ETSI][104] = 127,
+ [0][0][1][0][RTW89_KCC][104] = 32,
+ [0][0][1][0][RTW89_FCC][105] = 22,
+ [0][0][1][0][RTW89_ETSI][105] = 127,
+ [0][0][1][0][RTW89_KCC][105] = 32,
+ [0][0][1][0][RTW89_FCC][107] = 24,
+ [0][0][1][0][RTW89_ETSI][107] = 127,
+ [0][0][1][0][RTW89_KCC][107] = 32,
+ [0][0][1][0][RTW89_FCC][109] = 24,
+ [0][0][1][0][RTW89_ETSI][109] = 127,
+ [0][0][1][0][RTW89_KCC][109] = 32,
[0][0][1][0][RTW89_FCC][111] = 127,
+ [0][0][1][0][RTW89_ETSI][111] = 127,
+ [0][0][1][0][RTW89_KCC][111] = 127,
[0][0][1][0][RTW89_FCC][113] = 127,
+ [0][0][1][0][RTW89_ETSI][113] = 127,
+ [0][0][1][0][RTW89_KCC][113] = 127,
[0][0][1][0][RTW89_FCC][115] = 127,
+ [0][0][1][0][RTW89_ETSI][115] = 127,
+ [0][0][1][0][RTW89_KCC][115] = 127,
[0][0][1][0][RTW89_FCC][117] = 127,
+ [0][0][1][0][RTW89_ETSI][117] = 127,
+ [0][0][1][0][RTW89_KCC][117] = 127,
[0][0][1][0][RTW89_FCC][119] = 127,
- [0][1][1][0][RTW89_FCC][0] = 60,
- [0][1][1][0][RTW89_FCC][2] = 60,
- [0][1][1][0][RTW89_FCC][4] = 60,
- [0][1][1][0][RTW89_FCC][6] = 60,
- [0][1][1][0][RTW89_FCC][8] = 60,
- [0][1][1][0][RTW89_FCC][10] = 60,
- [0][1][1][0][RTW89_FCC][12] = 60,
- [0][1][1][0][RTW89_FCC][14] = 60,
- [0][1][1][0][RTW89_FCC][15] = 60,
- [0][1][1][0][RTW89_FCC][17] = 60,
- [0][1][1][0][RTW89_FCC][19] = 60,
- [0][1][1][0][RTW89_FCC][21] = 60,
- [0][1][1][0][RTW89_FCC][23] = 60,
- [0][1][1][0][RTW89_FCC][25] = 60,
- [0][1][1][0][RTW89_FCC][27] = 60,
- [0][1][1][0][RTW89_FCC][29] = 60,
- [0][1][1][0][RTW89_FCC][30] = 60,
- [0][1][1][0][RTW89_FCC][32] = 60,
- [0][1][1][0][RTW89_FCC][34] = 60,
- [0][1][1][0][RTW89_FCC][36] = 60,
- [0][1][1][0][RTW89_FCC][38] = 60,
- [0][1][1][0][RTW89_FCC][40] = 60,
- [0][1][1][0][RTW89_FCC][42] = 60,
- [0][1][1][0][RTW89_FCC][44] = 60,
- [0][1][1][0][RTW89_FCC][45] = 60,
- [0][1][1][0][RTW89_FCC][47] = 60,
- [0][1][1][0][RTW89_FCC][49] = 60,
- [0][1][1][0][RTW89_FCC][51] = 60,
- [0][1][1][0][RTW89_FCC][53] = 60,
- [0][1][1][0][RTW89_FCC][55] = 60,
- [0][1][1][0][RTW89_FCC][57] = 60,
- [0][1][1][0][RTW89_FCC][59] = 60,
- [0][1][1][0][RTW89_FCC][60] = 60,
- [0][1][1][0][RTW89_FCC][62] = 60,
- [0][1][1][0][RTW89_FCC][64] = 60,
- [0][1][1][0][RTW89_FCC][66] = 60,
- [0][1][1][0][RTW89_FCC][68] = 60,
- [0][1][1][0][RTW89_FCC][70] = 60,
- [0][1][1][0][RTW89_FCC][72] = 60,
- [0][1][1][0][RTW89_FCC][74] = 60,
- [0][1][1][0][RTW89_FCC][75] = 60,
- [0][1][1][0][RTW89_FCC][77] = 60,
- [0][1][1][0][RTW89_FCC][79] = 60,
- [0][1][1][0][RTW89_FCC][81] = 60,
- [0][1][1][0][RTW89_FCC][83] = 60,
- [0][1][1][0][RTW89_FCC][85] = 60,
- [0][1][1][0][RTW89_FCC][87] = 60,
- [0][1][1][0][RTW89_FCC][89] = 60,
- [0][1][1][0][RTW89_FCC][90] = 60,
- [0][1][1][0][RTW89_FCC][92] = 60,
- [0][1][1][0][RTW89_FCC][94] = 60,
- [0][1][1][0][RTW89_FCC][96] = 60,
- [0][1][1][0][RTW89_FCC][98] = 60,
- [0][1][1][0][RTW89_FCC][100] = 60,
- [0][1][1][0][RTW89_FCC][102] = 60,
- [0][1][1][0][RTW89_FCC][104] = 60,
- [0][1][1][0][RTW89_FCC][105] = 60,
- [0][1][1][0][RTW89_FCC][107] = 60,
- [0][1][1][0][RTW89_FCC][109] = 60,
+ [0][0][1][0][RTW89_ETSI][119] = 127,
+ [0][0][1][0][RTW89_KCC][119] = 127,
+ [0][1][1][0][RTW89_FCC][0] = -2,
+ [0][1][1][0][RTW89_ETSI][0] = 54,
+ [0][1][1][0][RTW89_KCC][0] = 12,
+ [0][1][1][0][RTW89_FCC][2] = -4,
+ [0][1][1][0][RTW89_ETSI][2] = 54,
+ [0][1][1][0][RTW89_KCC][2] = 12,
+ [0][1][1][0][RTW89_FCC][4] = -4,
+ [0][1][1][0][RTW89_ETSI][4] = 54,
+ [0][1][1][0][RTW89_KCC][4] = 12,
+ [0][1][1][0][RTW89_FCC][6] = -4,
+ [0][1][1][0][RTW89_ETSI][6] = 54,
+ [0][1][1][0][RTW89_KCC][6] = 12,
+ [0][1][1][0][RTW89_FCC][8] = -4,
+ [0][1][1][0][RTW89_ETSI][8] = 54,
+ [0][1][1][0][RTW89_KCC][8] = 12,
+ [0][1][1][0][RTW89_FCC][10] = -4,
+ [0][1][1][0][RTW89_ETSI][10] = 54,
+ [0][1][1][0][RTW89_KCC][10] = 12,
+ [0][1][1][0][RTW89_FCC][12] = -4,
+ [0][1][1][0][RTW89_ETSI][12] = 54,
+ [0][1][1][0][RTW89_KCC][12] = 12,
+ [0][1][1][0][RTW89_FCC][14] = -4,
+ [0][1][1][0][RTW89_ETSI][14] = 54,
+ [0][1][1][0][RTW89_KCC][14] = 12,
+ [0][1][1][0][RTW89_FCC][15] = -4,
+ [0][1][1][0][RTW89_ETSI][15] = 54,
+ [0][1][1][0][RTW89_KCC][15] = 12,
+ [0][1][1][0][RTW89_FCC][17] = -4,
+ [0][1][1][0][RTW89_ETSI][17] = 54,
+ [0][1][1][0][RTW89_KCC][17] = 12,
+ [0][1][1][0][RTW89_FCC][19] = -4,
+ [0][1][1][0][RTW89_ETSI][19] = 54,
+ [0][1][1][0][RTW89_KCC][19] = 12,
+ [0][1][1][0][RTW89_FCC][21] = -4,
+ [0][1][1][0][RTW89_ETSI][21] = 54,
+ [0][1][1][0][RTW89_KCC][21] = 12,
+ [0][1][1][0][RTW89_FCC][23] = -4,
+ [0][1][1][0][RTW89_ETSI][23] = 54,
+ [0][1][1][0][RTW89_KCC][23] = 12,
+ [0][1][1][0][RTW89_FCC][25] = -4,
+ [0][1][1][0][RTW89_ETSI][25] = 54,
+ [0][1][1][0][RTW89_KCC][25] = 12,
+ [0][1][1][0][RTW89_FCC][27] = -4,
+ [0][1][1][0][RTW89_ETSI][27] = 54,
+ [0][1][1][0][RTW89_KCC][27] = 12,
+ [0][1][1][0][RTW89_FCC][29] = -4,
+ [0][1][1][0][RTW89_ETSI][29] = 54,
+ [0][1][1][0][RTW89_KCC][29] = 12,
+ [0][1][1][0][RTW89_FCC][30] = -4,
+ [0][1][1][0][RTW89_ETSI][30] = 54,
+ [0][1][1][0][RTW89_KCC][30] = 12,
+ [0][1][1][0][RTW89_FCC][32] = -4,
+ [0][1][1][0][RTW89_ETSI][32] = 54,
+ [0][1][1][0][RTW89_KCC][32] = 12,
+ [0][1][1][0][RTW89_FCC][34] = -4,
+ [0][1][1][0][RTW89_ETSI][34] = 54,
+ [0][1][1][0][RTW89_KCC][34] = 12,
+ [0][1][1][0][RTW89_FCC][36] = -4,
+ [0][1][1][0][RTW89_ETSI][36] = 54,
+ [0][1][1][0][RTW89_KCC][36] = 12,
+ [0][1][1][0][RTW89_FCC][38] = -4,
+ [0][1][1][0][RTW89_ETSI][38] = 54,
+ [0][1][1][0][RTW89_KCC][38] = 12,
+ [0][1][1][0][RTW89_FCC][40] = -4,
+ [0][1][1][0][RTW89_ETSI][40] = 54,
+ [0][1][1][0][RTW89_KCC][40] = 12,
+ [0][1][1][0][RTW89_FCC][42] = -4,
+ [0][1][1][0][RTW89_ETSI][42] = 54,
+ [0][1][1][0][RTW89_KCC][42] = 12,
+ [0][1][1][0][RTW89_FCC][44] = -2,
+ [0][1][1][0][RTW89_ETSI][44] = 54,
+ [0][1][1][0][RTW89_KCC][44] = 12,
+ [0][1][1][0][RTW89_FCC][45] = -2,
+ [0][1][1][0][RTW89_ETSI][45] = 127,
+ [0][1][1][0][RTW89_KCC][45] = 12,
+ [0][1][1][0][RTW89_FCC][47] = -2,
+ [0][1][1][0][RTW89_ETSI][47] = 127,
+ [0][1][1][0][RTW89_KCC][47] = 12,
+ [0][1][1][0][RTW89_FCC][49] = -2,
+ [0][1][1][0][RTW89_ETSI][49] = 127,
+ [0][1][1][0][RTW89_KCC][49] = 12,
+ [0][1][1][0][RTW89_FCC][51] = -2,
+ [0][1][1][0][RTW89_ETSI][51] = 127,
+ [0][1][1][0][RTW89_KCC][51] = 12,
+ [0][1][1][0][RTW89_FCC][53] = -2,
+ [0][1][1][0][RTW89_ETSI][53] = 127,
+ [0][1][1][0][RTW89_KCC][53] = 12,
+ [0][1][1][0][RTW89_FCC][55] = -2,
+ [0][1][1][0][RTW89_ETSI][55] = 127,
+ [0][1][1][0][RTW89_KCC][55] = 12,
+ [0][1][1][0][RTW89_FCC][57] = -2,
+ [0][1][1][0][RTW89_ETSI][57] = 127,
+ [0][1][1][0][RTW89_KCC][57] = 12,
+ [0][1][1][0][RTW89_FCC][59] = -2,
+ [0][1][1][0][RTW89_ETSI][59] = 127,
+ [0][1][1][0][RTW89_KCC][59] = 12,
+ [0][1][1][0][RTW89_FCC][60] = -2,
+ [0][1][1][0][RTW89_ETSI][60] = 127,
+ [0][1][1][0][RTW89_KCC][60] = 12,
+ [0][1][1][0][RTW89_FCC][62] = -2,
+ [0][1][1][0][RTW89_ETSI][62] = 127,
+ [0][1][1][0][RTW89_KCC][62] = 12,
+ [0][1][1][0][RTW89_FCC][64] = -2,
+ [0][1][1][0][RTW89_ETSI][64] = 127,
+ [0][1][1][0][RTW89_KCC][64] = 12,
+ [0][1][1][0][RTW89_FCC][66] = -2,
+ [0][1][1][0][RTW89_ETSI][66] = 127,
+ [0][1][1][0][RTW89_KCC][66] = 12,
+ [0][1][1][0][RTW89_FCC][68] = -2,
+ [0][1][1][0][RTW89_ETSI][68] = 127,
+ [0][1][1][0][RTW89_KCC][68] = 12,
+ [0][1][1][0][RTW89_FCC][70] = -2,
+ [0][1][1][0][RTW89_ETSI][70] = 127,
+ [0][1][1][0][RTW89_KCC][70] = 12,
+ [0][1][1][0][RTW89_FCC][72] = -2,
+ [0][1][1][0][RTW89_ETSI][72] = 127,
+ [0][1][1][0][RTW89_KCC][72] = 12,
+ [0][1][1][0][RTW89_FCC][74] = -2,
+ [0][1][1][0][RTW89_ETSI][74] = 127,
+ [0][1][1][0][RTW89_KCC][74] = 12,
+ [0][1][1][0][RTW89_FCC][75] = -2,
+ [0][1][1][0][RTW89_ETSI][75] = 127,
+ [0][1][1][0][RTW89_KCC][75] = 12,
+ [0][1][1][0][RTW89_FCC][77] = -2,
+ [0][1][1][0][RTW89_ETSI][77] = 127,
+ [0][1][1][0][RTW89_KCC][77] = 12,
+ [0][1][1][0][RTW89_FCC][79] = -2,
+ [0][1][1][0][RTW89_ETSI][79] = 127,
+ [0][1][1][0][RTW89_KCC][79] = 12,
+ [0][1][1][0][RTW89_FCC][81] = -2,
+ [0][1][1][0][RTW89_ETSI][81] = 127,
+ [0][1][1][0][RTW89_KCC][81] = 12,
+ [0][1][1][0][RTW89_FCC][83] = -2,
+ [0][1][1][0][RTW89_ETSI][83] = 127,
+ [0][1][1][0][RTW89_KCC][83] = 20,
+ [0][1][1][0][RTW89_FCC][85] = -2,
+ [0][1][1][0][RTW89_ETSI][85] = 127,
+ [0][1][1][0][RTW89_KCC][85] = 20,
+ [0][1][1][0][RTW89_FCC][87] = -2,
+ [0][1][1][0][RTW89_ETSI][87] = 127,
+ [0][1][1][0][RTW89_KCC][87] = 20,
+ [0][1][1][0][RTW89_FCC][89] = -2,
+ [0][1][1][0][RTW89_ETSI][89] = 127,
+ [0][1][1][0][RTW89_KCC][89] = 20,
+ [0][1][1][0][RTW89_FCC][90] = -2,
+ [0][1][1][0][RTW89_ETSI][90] = 127,
+ [0][1][1][0][RTW89_KCC][90] = 20,
+ [0][1][1][0][RTW89_FCC][92] = -2,
+ [0][1][1][0][RTW89_ETSI][92] = 127,
+ [0][1][1][0][RTW89_KCC][92] = 20,
+ [0][1][1][0][RTW89_FCC][94] = -2,
+ [0][1][1][0][RTW89_ETSI][94] = 127,
+ [0][1][1][0][RTW89_KCC][94] = 20,
+ [0][1][1][0][RTW89_FCC][96] = -2,
+ [0][1][1][0][RTW89_ETSI][96] = 127,
+ [0][1][1][0][RTW89_KCC][96] = 20,
+ [0][1][1][0][RTW89_FCC][98] = -2,
+ [0][1][1][0][RTW89_ETSI][98] = 127,
+ [0][1][1][0][RTW89_KCC][98] = 20,
+ [0][1][1][0][RTW89_FCC][100] = -2,
+ [0][1][1][0][RTW89_ETSI][100] = 127,
+ [0][1][1][0][RTW89_KCC][100] = 20,
+ [0][1][1][0][RTW89_FCC][102] = -2,
+ [0][1][1][0][RTW89_ETSI][102] = 127,
+ [0][1][1][0][RTW89_KCC][102] = 20,
+ [0][1][1][0][RTW89_FCC][104] = -2,
+ [0][1][1][0][RTW89_ETSI][104] = 127,
+ [0][1][1][0][RTW89_KCC][104] = 20,
+ [0][1][1][0][RTW89_FCC][105] = -2,
+ [0][1][1][0][RTW89_ETSI][105] = 127,
+ [0][1][1][0][RTW89_KCC][105] = 20,
+ [0][1][1][0][RTW89_FCC][107] = 0,
+ [0][1][1][0][RTW89_ETSI][107] = 127,
+ [0][1][1][0][RTW89_KCC][107] = 20,
+ [0][1][1][0][RTW89_FCC][109] = 0,
+ [0][1][1][0][RTW89_ETSI][109] = 127,
+ [0][1][1][0][RTW89_KCC][109] = 20,
[0][1][1][0][RTW89_FCC][111] = 127,
+ [0][1][1][0][RTW89_ETSI][111] = 127,
+ [0][1][1][0][RTW89_KCC][111] = 127,
[0][1][1][0][RTW89_FCC][113] = 127,
+ [0][1][1][0][RTW89_ETSI][113] = 127,
+ [0][1][1][0][RTW89_KCC][113] = 127,
[0][1][1][0][RTW89_FCC][115] = 127,
+ [0][1][1][0][RTW89_ETSI][115] = 127,
+ [0][1][1][0][RTW89_KCC][115] = 127,
[0][1][1][0][RTW89_FCC][117] = 127,
+ [0][1][1][0][RTW89_ETSI][117] = 127,
+ [0][1][1][0][RTW89_KCC][117] = 127,
[0][1][1][0][RTW89_FCC][119] = 127,
- [0][0][2][0][RTW89_FCC][0] = 72,
- [0][0][2][0][RTW89_FCC][2] = 72,
- [0][0][2][0][RTW89_FCC][4] = 72,
- [0][0][2][0][RTW89_FCC][6] = 72,
- [0][0][2][0][RTW89_FCC][8] = 72,
- [0][0][2][0][RTW89_FCC][10] = 72,
- [0][0][2][0][RTW89_FCC][12] = 72,
- [0][0][2][0][RTW89_FCC][14] = 72,
- [0][0][2][0][RTW89_FCC][15] = 72,
- [0][0][2][0][RTW89_FCC][17] = 72,
- [0][0][2][0][RTW89_FCC][19] = 72,
- [0][0][2][0][RTW89_FCC][21] = 72,
- [0][0][2][0][RTW89_FCC][23] = 72,
- [0][0][2][0][RTW89_FCC][25] = 72,
- [0][0][2][0][RTW89_FCC][27] = 72,
- [0][0][2][0][RTW89_FCC][29] = 72,
- [0][0][2][0][RTW89_FCC][30] = 72,
- [0][0][2][0][RTW89_FCC][32] = 72,
- [0][0][2][0][RTW89_FCC][34] = 72,
- [0][0][2][0][RTW89_FCC][36] = 72,
- [0][0][2][0][RTW89_FCC][38] = 72,
- [0][0][2][0][RTW89_FCC][40] = 72,
- [0][0][2][0][RTW89_FCC][42] = 72,
- [0][0][2][0][RTW89_FCC][44] = 72,
- [0][0][2][0][RTW89_FCC][45] = 72,
- [0][0][2][0][RTW89_FCC][47] = 72,
- [0][0][2][0][RTW89_FCC][49] = 72,
- [0][0][2][0][RTW89_FCC][51] = 72,
- [0][0][2][0][RTW89_FCC][53] = 72,
- [0][0][2][0][RTW89_FCC][55] = 72,
- [0][0][2][0][RTW89_FCC][57] = 72,
- [0][0][2][0][RTW89_FCC][59] = 72,
- [0][0][2][0][RTW89_FCC][60] = 72,
- [0][0][2][0][RTW89_FCC][62] = 72,
- [0][0][2][0][RTW89_FCC][64] = 72,
- [0][0][2][0][RTW89_FCC][66] = 72,
- [0][0][2][0][RTW89_FCC][68] = 72,
- [0][0][2][0][RTW89_FCC][70] = 72,
- [0][0][2][0][RTW89_FCC][72] = 72,
- [0][0][2][0][RTW89_FCC][74] = 72,
- [0][0][2][0][RTW89_FCC][75] = 72,
- [0][0][2][0][RTW89_FCC][77] = 72,
- [0][0][2][0][RTW89_FCC][79] = 72,
- [0][0][2][0][RTW89_FCC][81] = 72,
- [0][0][2][0][RTW89_FCC][83] = 72,
- [0][0][2][0][RTW89_FCC][85] = 72,
- [0][0][2][0][RTW89_FCC][87] = 72,
- [0][0][2][0][RTW89_FCC][89] = 72,
- [0][0][2][0][RTW89_FCC][90] = 72,
- [0][0][2][0][RTW89_FCC][92] = 72,
- [0][0][2][0][RTW89_FCC][94] = 72,
- [0][0][2][0][RTW89_FCC][96] = 72,
- [0][0][2][0][RTW89_FCC][98] = 72,
- [0][0][2][0][RTW89_FCC][100] = 72,
- [0][0][2][0][RTW89_FCC][102] = 72,
- [0][0][2][0][RTW89_FCC][104] = 72,
- [0][0][2][0][RTW89_FCC][105] = 72,
- [0][0][2][0][RTW89_FCC][107] = 72,
- [0][0][2][0][RTW89_FCC][109] = 72,
+ [0][1][1][0][RTW89_ETSI][119] = 127,
+ [0][1][1][0][RTW89_KCC][119] = 127,
+ [0][0][2][0][RTW89_FCC][0] = 24,
+ [0][0][2][0][RTW89_ETSI][0] = 66,
+ [0][0][2][0][RTW89_KCC][0] = 24,
+ [0][0][2][0][RTW89_FCC][2] = 22,
+ [0][0][2][0][RTW89_ETSI][2] = 66,
+ [0][0][2][0][RTW89_KCC][2] = 24,
+ [0][0][2][0][RTW89_FCC][4] = 22,
+ [0][0][2][0][RTW89_ETSI][4] = 66,
+ [0][0][2][0][RTW89_KCC][4] = 24,
+ [0][0][2][0][RTW89_FCC][6] = 22,
+ [0][0][2][0][RTW89_ETSI][6] = 66,
+ [0][0][2][0][RTW89_KCC][6] = 24,
+ [0][0][2][0][RTW89_FCC][8] = 22,
+ [0][0][2][0][RTW89_ETSI][8] = 66,
+ [0][0][2][0][RTW89_KCC][8] = 24,
+ [0][0][2][0][RTW89_FCC][10] = 22,
+ [0][0][2][0][RTW89_ETSI][10] = 66,
+ [0][0][2][0][RTW89_KCC][10] = 24,
+ [0][0][2][0][RTW89_FCC][12] = 22,
+ [0][0][2][0][RTW89_ETSI][12] = 66,
+ [0][0][2][0][RTW89_KCC][12] = 24,
+ [0][0][2][0][RTW89_FCC][14] = 22,
+ [0][0][2][0][RTW89_ETSI][14] = 66,
+ [0][0][2][0][RTW89_KCC][14] = 24,
+ [0][0][2][0][RTW89_FCC][15] = 22,
+ [0][0][2][0][RTW89_ETSI][15] = 66,
+ [0][0][2][0][RTW89_KCC][15] = 24,
+ [0][0][2][0][RTW89_FCC][17] = 22,
+ [0][0][2][0][RTW89_ETSI][17] = 66,
+ [0][0][2][0][RTW89_KCC][17] = 24,
+ [0][0][2][0][RTW89_FCC][19] = 22,
+ [0][0][2][0][RTW89_ETSI][19] = 66,
+ [0][0][2][0][RTW89_KCC][19] = 24,
+ [0][0][2][0][RTW89_FCC][21] = 22,
+ [0][0][2][0][RTW89_ETSI][21] = 66,
+ [0][0][2][0][RTW89_KCC][21] = 24,
+ [0][0][2][0][RTW89_FCC][23] = 22,
+ [0][0][2][0][RTW89_ETSI][23] = 66,
+ [0][0][2][0][RTW89_KCC][23] = 24,
+ [0][0][2][0][RTW89_FCC][25] = 22,
+ [0][0][2][0][RTW89_ETSI][25] = 66,
+ [0][0][2][0][RTW89_KCC][25] = 24,
+ [0][0][2][0][RTW89_FCC][27] = 22,
+ [0][0][2][0][RTW89_ETSI][27] = 66,
+ [0][0][2][0][RTW89_KCC][27] = 24,
+ [0][0][2][0][RTW89_FCC][29] = 22,
+ [0][0][2][0][RTW89_ETSI][29] = 66,
+ [0][0][2][0][RTW89_KCC][29] = 24,
+ [0][0][2][0][RTW89_FCC][30] = 22,
+ [0][0][2][0][RTW89_ETSI][30] = 66,
+ [0][0][2][0][RTW89_KCC][30] = 24,
+ [0][0][2][0][RTW89_FCC][32] = 22,
+ [0][0][2][0][RTW89_ETSI][32] = 66,
+ [0][0][2][0][RTW89_KCC][32] = 24,
+ [0][0][2][0][RTW89_FCC][34] = 22,
+ [0][0][2][0][RTW89_ETSI][34] = 66,
+ [0][0][2][0][RTW89_KCC][34] = 24,
+ [0][0][2][0][RTW89_FCC][36] = 22,
+ [0][0][2][0][RTW89_ETSI][36] = 66,
+ [0][0][2][0][RTW89_KCC][36] = 24,
+ [0][0][2][0][RTW89_FCC][38] = 22,
+ [0][0][2][0][RTW89_ETSI][38] = 66,
+ [0][0][2][0][RTW89_KCC][38] = 24,
+ [0][0][2][0][RTW89_FCC][40] = 22,
+ [0][0][2][0][RTW89_ETSI][40] = 66,
+ [0][0][2][0][RTW89_KCC][40] = 24,
+ [0][0][2][0][RTW89_FCC][42] = 22,
+ [0][0][2][0][RTW89_ETSI][42] = 66,
+ [0][0][2][0][RTW89_KCC][42] = 24,
+ [0][0][2][0][RTW89_FCC][44] = 22,
+ [0][0][2][0][RTW89_ETSI][44] = 66,
+ [0][0][2][0][RTW89_KCC][44] = 24,
+ [0][0][2][0][RTW89_FCC][45] = 22,
+ [0][0][2][0][RTW89_ETSI][45] = 127,
+ [0][0][2][0][RTW89_KCC][45] = 24,
+ [0][0][2][0][RTW89_FCC][47] = 22,
+ [0][0][2][0][RTW89_ETSI][47] = 127,
+ [0][0][2][0][RTW89_KCC][47] = 24,
+ [0][0][2][0][RTW89_FCC][49] = 24,
+ [0][0][2][0][RTW89_ETSI][49] = 127,
+ [0][0][2][0][RTW89_KCC][49] = 24,
+ [0][0][2][0][RTW89_FCC][51] = 22,
+ [0][0][2][0][RTW89_ETSI][51] = 127,
+ [0][0][2][0][RTW89_KCC][51] = 24,
+ [0][0][2][0][RTW89_FCC][53] = 22,
+ [0][0][2][0][RTW89_ETSI][53] = 127,
+ [0][0][2][0][RTW89_KCC][53] = 24,
+ [0][0][2][0][RTW89_FCC][55] = 22,
+ [0][0][2][0][RTW89_ETSI][55] = 127,
+ [0][0][2][0][RTW89_KCC][55] = 26,
+ [0][0][2][0][RTW89_FCC][57] = 22,
+ [0][0][2][0][RTW89_ETSI][57] = 127,
+ [0][0][2][0][RTW89_KCC][57] = 26,
+ [0][0][2][0][RTW89_FCC][59] = 22,
+ [0][0][2][0][RTW89_ETSI][59] = 127,
+ [0][0][2][0][RTW89_KCC][59] = 26,
+ [0][0][2][0][RTW89_FCC][60] = 22,
+ [0][0][2][0][RTW89_ETSI][60] = 127,
+ [0][0][2][0][RTW89_KCC][60] = 26,
+ [0][0][2][0][RTW89_FCC][62] = 22,
+ [0][0][2][0][RTW89_ETSI][62] = 127,
+ [0][0][2][0][RTW89_KCC][62] = 26,
+ [0][0][2][0][RTW89_FCC][64] = 22,
+ [0][0][2][0][RTW89_ETSI][64] = 127,
+ [0][0][2][0][RTW89_KCC][64] = 26,
+ [0][0][2][0][RTW89_FCC][66] = 22,
+ [0][0][2][0][RTW89_ETSI][66] = 127,
+ [0][0][2][0][RTW89_KCC][66] = 26,
+ [0][0][2][0][RTW89_FCC][68] = 22,
+ [0][0][2][0][RTW89_ETSI][68] = 127,
+ [0][0][2][0][RTW89_KCC][68] = 26,
+ [0][0][2][0][RTW89_FCC][70] = 24,
+ [0][0][2][0][RTW89_ETSI][70] = 127,
+ [0][0][2][0][RTW89_KCC][70] = 26,
+ [0][0][2][0][RTW89_FCC][72] = 22,
+ [0][0][2][0][RTW89_ETSI][72] = 127,
+ [0][0][2][0][RTW89_KCC][72] = 26,
+ [0][0][2][0][RTW89_FCC][74] = 22,
+ [0][0][2][0][RTW89_ETSI][74] = 127,
+ [0][0][2][0][RTW89_KCC][74] = 26,
+ [0][0][2][0][RTW89_FCC][75] = 22,
+ [0][0][2][0][RTW89_ETSI][75] = 127,
+ [0][0][2][0][RTW89_KCC][75] = 26,
+ [0][0][2][0][RTW89_FCC][77] = 22,
+ [0][0][2][0][RTW89_ETSI][77] = 127,
+ [0][0][2][0][RTW89_KCC][77] = 26,
+ [0][0][2][0][RTW89_FCC][79] = 22,
+ [0][0][2][0][RTW89_ETSI][79] = 127,
+ [0][0][2][0][RTW89_KCC][79] = 26,
+ [0][0][2][0][RTW89_FCC][81] = 22,
+ [0][0][2][0][RTW89_ETSI][81] = 127,
+ [0][0][2][0][RTW89_KCC][81] = 26,
+ [0][0][2][0][RTW89_FCC][83] = 22,
+ [0][0][2][0][RTW89_ETSI][83] = 127,
+ [0][0][2][0][RTW89_KCC][83] = 32,
+ [0][0][2][0][RTW89_FCC][85] = 22,
+ [0][0][2][0][RTW89_ETSI][85] = 127,
+ [0][0][2][0][RTW89_KCC][85] = 32,
+ [0][0][2][0][RTW89_FCC][87] = 22,
+ [0][0][2][0][RTW89_ETSI][87] = 127,
+ [0][0][2][0][RTW89_KCC][87] = 32,
+ [0][0][2][0][RTW89_FCC][89] = 22,
+ [0][0][2][0][RTW89_ETSI][89] = 127,
+ [0][0][2][0][RTW89_KCC][89] = 32,
+ [0][0][2][0][RTW89_FCC][90] = 22,
+ [0][0][2][0][RTW89_ETSI][90] = 127,
+ [0][0][2][0][RTW89_KCC][90] = 32,
+ [0][0][2][0][RTW89_FCC][92] = 22,
+ [0][0][2][0][RTW89_ETSI][92] = 127,
+ [0][0][2][0][RTW89_KCC][92] = 32,
+ [0][0][2][0][RTW89_FCC][94] = 22,
+ [0][0][2][0][RTW89_ETSI][94] = 127,
+ [0][0][2][0][RTW89_KCC][94] = 32,
+ [0][0][2][0][RTW89_FCC][96] = 22,
+ [0][0][2][0][RTW89_ETSI][96] = 127,
+ [0][0][2][0][RTW89_KCC][96] = 32,
+ [0][0][2][0][RTW89_FCC][98] = 22,
+ [0][0][2][0][RTW89_ETSI][98] = 127,
+ [0][0][2][0][RTW89_KCC][98] = 32,
+ [0][0][2][0][RTW89_FCC][100] = 22,
+ [0][0][2][0][RTW89_ETSI][100] = 127,
+ [0][0][2][0][RTW89_KCC][100] = 32,
+ [0][0][2][0][RTW89_FCC][102] = 22,
+ [0][0][2][0][RTW89_ETSI][102] = 127,
+ [0][0][2][0][RTW89_KCC][102] = 32,
+ [0][0][2][0][RTW89_FCC][104] = 22,
+ [0][0][2][0][RTW89_ETSI][104] = 127,
+ [0][0][2][0][RTW89_KCC][104] = 32,
+ [0][0][2][0][RTW89_FCC][105] = 22,
+ [0][0][2][0][RTW89_ETSI][105] = 127,
+ [0][0][2][0][RTW89_KCC][105] = 32,
+ [0][0][2][0][RTW89_FCC][107] = 24,
+ [0][0][2][0][RTW89_ETSI][107] = 127,
+ [0][0][2][0][RTW89_KCC][107] = 32,
+ [0][0][2][0][RTW89_FCC][109] = 24,
+ [0][0][2][0][RTW89_ETSI][109] = 127,
+ [0][0][2][0][RTW89_KCC][109] = 32,
[0][0][2][0][RTW89_FCC][111] = 127,
+ [0][0][2][0][RTW89_ETSI][111] = 127,
+ [0][0][2][0][RTW89_KCC][111] = 127,
[0][0][2][0][RTW89_FCC][113] = 127,
+ [0][0][2][0][RTW89_ETSI][113] = 127,
+ [0][0][2][0][RTW89_KCC][113] = 127,
[0][0][2][0][RTW89_FCC][115] = 127,
+ [0][0][2][0][RTW89_ETSI][115] = 127,
+ [0][0][2][0][RTW89_KCC][115] = 127,
[0][0][2][0][RTW89_FCC][117] = 127,
+ [0][0][2][0][RTW89_ETSI][117] = 127,
+ [0][0][2][0][RTW89_KCC][117] = 127,
[0][0][2][0][RTW89_FCC][119] = 127,
- [0][1][2][0][RTW89_FCC][0] = 60,
- [0][1][2][0][RTW89_FCC][2] = 60,
- [0][1][2][0][RTW89_FCC][4] = 60,
- [0][1][2][0][RTW89_FCC][6] = 60,
- [0][1][2][0][RTW89_FCC][8] = 60,
- [0][1][2][0][RTW89_FCC][10] = 60,
- [0][1][2][0][RTW89_FCC][12] = 60,
- [0][1][2][0][RTW89_FCC][14] = 60,
- [0][1][2][0][RTW89_FCC][15] = 60,
- [0][1][2][0][RTW89_FCC][17] = 60,
- [0][1][2][0][RTW89_FCC][19] = 60,
- [0][1][2][0][RTW89_FCC][21] = 60,
- [0][1][2][0][RTW89_FCC][23] = 60,
- [0][1][2][0][RTW89_FCC][25] = 60,
- [0][1][2][0][RTW89_FCC][27] = 60,
- [0][1][2][0][RTW89_FCC][29] = 60,
- [0][1][2][0][RTW89_FCC][30] = 60,
- [0][1][2][0][RTW89_FCC][32] = 60,
- [0][1][2][0][RTW89_FCC][34] = 60,
- [0][1][2][0][RTW89_FCC][36] = 60,
- [0][1][2][0][RTW89_FCC][38] = 60,
- [0][1][2][0][RTW89_FCC][40] = 60,
- [0][1][2][0][RTW89_FCC][42] = 60,
- [0][1][2][0][RTW89_FCC][44] = 60,
- [0][1][2][0][RTW89_FCC][45] = 60,
- [0][1][2][0][RTW89_FCC][47] = 60,
- [0][1][2][0][RTW89_FCC][49] = 60,
- [0][1][2][0][RTW89_FCC][51] = 60,
- [0][1][2][0][RTW89_FCC][53] = 60,
- [0][1][2][0][RTW89_FCC][55] = 60,
- [0][1][2][0][RTW89_FCC][57] = 60,
- [0][1][2][0][RTW89_FCC][59] = 60,
- [0][1][2][0][RTW89_FCC][60] = 60,
- [0][1][2][0][RTW89_FCC][62] = 60,
- [0][1][2][0][RTW89_FCC][64] = 60,
- [0][1][2][0][RTW89_FCC][66] = 60,
- [0][1][2][0][RTW89_FCC][68] = 60,
- [0][1][2][0][RTW89_FCC][70] = 60,
- [0][1][2][0][RTW89_FCC][72] = 60,
- [0][1][2][0][RTW89_FCC][74] = 60,
- [0][1][2][0][RTW89_FCC][75] = 60,
- [0][1][2][0][RTW89_FCC][77] = 60,
- [0][1][2][0][RTW89_FCC][79] = 60,
- [0][1][2][0][RTW89_FCC][81] = 60,
- [0][1][2][0][RTW89_FCC][83] = 60,
- [0][1][2][0][RTW89_FCC][85] = 60,
- [0][1][2][0][RTW89_FCC][87] = 60,
- [0][1][2][0][RTW89_FCC][89] = 60,
- [0][1][2][0][RTW89_FCC][90] = 60,
- [0][1][2][0][RTW89_FCC][92] = 60,
- [0][1][2][0][RTW89_FCC][94] = 60,
- [0][1][2][0][RTW89_FCC][96] = 60,
- [0][1][2][0][RTW89_FCC][98] = 60,
- [0][1][2][0][RTW89_FCC][100] = 60,
- [0][1][2][0][RTW89_FCC][102] = 60,
- [0][1][2][0][RTW89_FCC][104] = 60,
- [0][1][2][0][RTW89_FCC][105] = 60,
- [0][1][2][0][RTW89_FCC][107] = 60,
- [0][1][2][0][RTW89_FCC][109] = 60,
+ [0][0][2][0][RTW89_ETSI][119] = 127,
+ [0][0][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][0][RTW89_FCC][0] = -2,
+ [0][1][2][0][RTW89_ETSI][0] = 54,
+ [0][1][2][0][RTW89_KCC][0] = 12,
+ [0][1][2][0][RTW89_FCC][2] = -4,
+ [0][1][2][0][RTW89_ETSI][2] = 54,
+ [0][1][2][0][RTW89_KCC][2] = 12,
+ [0][1][2][0][RTW89_FCC][4] = -4,
+ [0][1][2][0][RTW89_ETSI][4] = 54,
+ [0][1][2][0][RTW89_KCC][4] = 12,
+ [0][1][2][0][RTW89_FCC][6] = -4,
+ [0][1][2][0][RTW89_ETSI][6] = 54,
+ [0][1][2][0][RTW89_KCC][6] = 12,
+ [0][1][2][0][RTW89_FCC][8] = -4,
+ [0][1][2][0][RTW89_ETSI][8] = 54,
+ [0][1][2][0][RTW89_KCC][8] = 12,
+ [0][1][2][0][RTW89_FCC][10] = -4,
+ [0][1][2][0][RTW89_ETSI][10] = 54,
+ [0][1][2][0][RTW89_KCC][10] = 12,
+ [0][1][2][0][RTW89_FCC][12] = -4,
+ [0][1][2][0][RTW89_ETSI][12] = 54,
+ [0][1][2][0][RTW89_KCC][12] = 12,
+ [0][1][2][0][RTW89_FCC][14] = -4,
+ [0][1][2][0][RTW89_ETSI][14] = 54,
+ [0][1][2][0][RTW89_KCC][14] = 12,
+ [0][1][2][0][RTW89_FCC][15] = -4,
+ [0][1][2][0][RTW89_ETSI][15] = 54,
+ [0][1][2][0][RTW89_KCC][15] = 12,
+ [0][1][2][0][RTW89_FCC][17] = -4,
+ [0][1][2][0][RTW89_ETSI][17] = 54,
+ [0][1][2][0][RTW89_KCC][17] = 12,
+ [0][1][2][0][RTW89_FCC][19] = -4,
+ [0][1][2][0][RTW89_ETSI][19] = 54,
+ [0][1][2][0][RTW89_KCC][19] = 12,
+ [0][1][2][0][RTW89_FCC][21] = -4,
+ [0][1][2][0][RTW89_ETSI][21] = 54,
+ [0][1][2][0][RTW89_KCC][21] = 12,
+ [0][1][2][0][RTW89_FCC][23] = -4,
+ [0][1][2][0][RTW89_ETSI][23] = 54,
+ [0][1][2][0][RTW89_KCC][23] = 12,
+ [0][1][2][0][RTW89_FCC][25] = -4,
+ [0][1][2][0][RTW89_ETSI][25] = 54,
+ [0][1][2][0][RTW89_KCC][25] = 12,
+ [0][1][2][0][RTW89_FCC][27] = -4,
+ [0][1][2][0][RTW89_ETSI][27] = 54,
+ [0][1][2][0][RTW89_KCC][27] = 12,
+ [0][1][2][0][RTW89_FCC][29] = -4,
+ [0][1][2][0][RTW89_ETSI][29] = 54,
+ [0][1][2][0][RTW89_KCC][29] = 12,
+ [0][1][2][0][RTW89_FCC][30] = -4,
+ [0][1][2][0][RTW89_ETSI][30] = 54,
+ [0][1][2][0][RTW89_KCC][30] = 12,
+ [0][1][2][0][RTW89_FCC][32] = -4,
+ [0][1][2][0][RTW89_ETSI][32] = 54,
+ [0][1][2][0][RTW89_KCC][32] = 12,
+ [0][1][2][0][RTW89_FCC][34] = -4,
+ [0][1][2][0][RTW89_ETSI][34] = 54,
+ [0][1][2][0][RTW89_KCC][34] = 12,
+ [0][1][2][0][RTW89_FCC][36] = -4,
+ [0][1][2][0][RTW89_ETSI][36] = 54,
+ [0][1][2][0][RTW89_KCC][36] = 12,
+ [0][1][2][0][RTW89_FCC][38] = -4,
+ [0][1][2][0][RTW89_ETSI][38] = 54,
+ [0][1][2][0][RTW89_KCC][38] = 12,
+ [0][1][2][0][RTW89_FCC][40] = -4,
+ [0][1][2][0][RTW89_ETSI][40] = 54,
+ [0][1][2][0][RTW89_KCC][40] = 12,
+ [0][1][2][0][RTW89_FCC][42] = -4,
+ [0][1][2][0][RTW89_ETSI][42] = 54,
+ [0][1][2][0][RTW89_KCC][42] = 12,
+ [0][1][2][0][RTW89_FCC][44] = -2,
+ [0][1][2][0][RTW89_ETSI][44] = 54,
+ [0][1][2][0][RTW89_KCC][44] = 12,
+ [0][1][2][0][RTW89_FCC][45] = -2,
+ [0][1][2][0][RTW89_ETSI][45] = 127,
+ [0][1][2][0][RTW89_KCC][45] = 12,
+ [0][1][2][0][RTW89_FCC][47] = -2,
+ [0][1][2][0][RTW89_ETSI][47] = 127,
+ [0][1][2][0][RTW89_KCC][47] = 12,
+ [0][1][2][0][RTW89_FCC][49] = -2,
+ [0][1][2][0][RTW89_ETSI][49] = 127,
+ [0][1][2][0][RTW89_KCC][49] = 12,
+ [0][1][2][0][RTW89_FCC][51] = -2,
+ [0][1][2][0][RTW89_ETSI][51] = 127,
+ [0][1][2][0][RTW89_KCC][51] = 12,
+ [0][1][2][0][RTW89_FCC][53] = -2,
+ [0][1][2][0][RTW89_ETSI][53] = 127,
+ [0][1][2][0][RTW89_KCC][53] = 12,
+ [0][1][2][0][RTW89_FCC][55] = -2,
+ [0][1][2][0][RTW89_ETSI][55] = 127,
+ [0][1][2][0][RTW89_KCC][55] = 12,
+ [0][1][2][0][RTW89_FCC][57] = -2,
+ [0][1][2][0][RTW89_ETSI][57] = 127,
+ [0][1][2][0][RTW89_KCC][57] = 12,
+ [0][1][2][0][RTW89_FCC][59] = -2,
+ [0][1][2][0][RTW89_ETSI][59] = 127,
+ [0][1][2][0][RTW89_KCC][59] = 12,
+ [0][1][2][0][RTW89_FCC][60] = -2,
+ [0][1][2][0][RTW89_ETSI][60] = 127,
+ [0][1][2][0][RTW89_KCC][60] = 12,
+ [0][1][2][0][RTW89_FCC][62] = -2,
+ [0][1][2][0][RTW89_ETSI][62] = 127,
+ [0][1][2][0][RTW89_KCC][62] = 12,
+ [0][1][2][0][RTW89_FCC][64] = -2,
+ [0][1][2][0][RTW89_ETSI][64] = 127,
+ [0][1][2][0][RTW89_KCC][64] = 12,
+ [0][1][2][0][RTW89_FCC][66] = -2,
+ [0][1][2][0][RTW89_ETSI][66] = 127,
+ [0][1][2][0][RTW89_KCC][66] = 12,
+ [0][1][2][0][RTW89_FCC][68] = -2,
+ [0][1][2][0][RTW89_ETSI][68] = 127,
+ [0][1][2][0][RTW89_KCC][68] = 12,
+ [0][1][2][0][RTW89_FCC][70] = -2,
+ [0][1][2][0][RTW89_ETSI][70] = 127,
+ [0][1][2][0][RTW89_KCC][70] = 12,
+ [0][1][2][0][RTW89_FCC][72] = -2,
+ [0][1][2][0][RTW89_ETSI][72] = 127,
+ [0][1][2][0][RTW89_KCC][72] = 12,
+ [0][1][2][0][RTW89_FCC][74] = -2,
+ [0][1][2][0][RTW89_ETSI][74] = 127,
+ [0][1][2][0][RTW89_KCC][74] = 12,
+ [0][1][2][0][RTW89_FCC][75] = -2,
+ [0][1][2][0][RTW89_ETSI][75] = 127,
+ [0][1][2][0][RTW89_KCC][75] = 12,
+ [0][1][2][0][RTW89_FCC][77] = -2,
+ [0][1][2][0][RTW89_ETSI][77] = 127,
+ [0][1][2][0][RTW89_KCC][77] = 12,
+ [0][1][2][0][RTW89_FCC][79] = -2,
+ [0][1][2][0][RTW89_ETSI][79] = 127,
+ [0][1][2][0][RTW89_KCC][79] = 12,
+ [0][1][2][0][RTW89_FCC][81] = -2,
+ [0][1][2][0][RTW89_ETSI][81] = 127,
+ [0][1][2][0][RTW89_KCC][81] = 12,
+ [0][1][2][0][RTW89_FCC][83] = -2,
+ [0][1][2][0][RTW89_ETSI][83] = 127,
+ [0][1][2][0][RTW89_KCC][83] = 20,
+ [0][1][2][0][RTW89_FCC][85] = -2,
+ [0][1][2][0][RTW89_ETSI][85] = 127,
+ [0][1][2][0][RTW89_KCC][85] = 20,
+ [0][1][2][0][RTW89_FCC][87] = -2,
+ [0][1][2][0][RTW89_ETSI][87] = 127,
+ [0][1][2][0][RTW89_KCC][87] = 20,
+ [0][1][2][0][RTW89_FCC][89] = -2,
+ [0][1][2][0][RTW89_ETSI][89] = 127,
+ [0][1][2][0][RTW89_KCC][89] = 20,
+ [0][1][2][0][RTW89_FCC][90] = -2,
+ [0][1][2][0][RTW89_ETSI][90] = 127,
+ [0][1][2][0][RTW89_KCC][90] = 20,
+ [0][1][2][0][RTW89_FCC][92] = -2,
+ [0][1][2][0][RTW89_ETSI][92] = 127,
+ [0][1][2][0][RTW89_KCC][92] = 20,
+ [0][1][2][0][RTW89_FCC][94] = -2,
+ [0][1][2][0][RTW89_ETSI][94] = 127,
+ [0][1][2][0][RTW89_KCC][94] = 20,
+ [0][1][2][0][RTW89_FCC][96] = -2,
+ [0][1][2][0][RTW89_ETSI][96] = 127,
+ [0][1][2][0][RTW89_KCC][96] = 20,
+ [0][1][2][0][RTW89_FCC][98] = -2,
+ [0][1][2][0][RTW89_ETSI][98] = 127,
+ [0][1][2][0][RTW89_KCC][98] = 20,
+ [0][1][2][0][RTW89_FCC][100] = -2,
+ [0][1][2][0][RTW89_ETSI][100] = 127,
+ [0][1][2][0][RTW89_KCC][100] = 20,
+ [0][1][2][0][RTW89_FCC][102] = -2,
+ [0][1][2][0][RTW89_ETSI][102] = 127,
+ [0][1][2][0][RTW89_KCC][102] = 20,
+ [0][1][2][0][RTW89_FCC][104] = -2,
+ [0][1][2][0][RTW89_ETSI][104] = 127,
+ [0][1][2][0][RTW89_KCC][104] = 20,
+ [0][1][2][0][RTW89_FCC][105] = -2,
+ [0][1][2][0][RTW89_ETSI][105] = 127,
+ [0][1][2][0][RTW89_KCC][105] = 20,
+ [0][1][2][0][RTW89_FCC][107] = 0,
+ [0][1][2][0][RTW89_ETSI][107] = 127,
+ [0][1][2][0][RTW89_KCC][107] = 20,
+ [0][1][2][0][RTW89_FCC][109] = 0,
+ [0][1][2][0][RTW89_ETSI][109] = 127,
+ [0][1][2][0][RTW89_KCC][109] = 20,
[0][1][2][0][RTW89_FCC][111] = 127,
+ [0][1][2][0][RTW89_ETSI][111] = 127,
+ [0][1][2][0][RTW89_KCC][111] = 127,
[0][1][2][0][RTW89_FCC][113] = 127,
+ [0][1][2][0][RTW89_ETSI][113] = 127,
+ [0][1][2][0][RTW89_KCC][113] = 127,
[0][1][2][0][RTW89_FCC][115] = 127,
+ [0][1][2][0][RTW89_ETSI][115] = 127,
+ [0][1][2][0][RTW89_KCC][115] = 127,
[0][1][2][0][RTW89_FCC][117] = 127,
+ [0][1][2][0][RTW89_ETSI][117] = 127,
+ [0][1][2][0][RTW89_KCC][117] = 127,
[0][1][2][0][RTW89_FCC][119] = 127,
- [0][1][2][1][RTW89_FCC][0] = 48,
- [0][1][2][1][RTW89_FCC][2] = 48,
- [0][1][2][1][RTW89_FCC][4] = 48,
- [0][1][2][1][RTW89_FCC][6] = 48,
- [0][1][2][1][RTW89_FCC][8] = 48,
- [0][1][2][1][RTW89_FCC][10] = 48,
- [0][1][2][1][RTW89_FCC][12] = 48,
- [0][1][2][1][RTW89_FCC][14] = 48,
- [0][1][2][1][RTW89_FCC][15] = 48,
- [0][1][2][1][RTW89_FCC][17] = 48,
- [0][1][2][1][RTW89_FCC][19] = 48,
- [0][1][2][1][RTW89_FCC][21] = 48,
- [0][1][2][1][RTW89_FCC][23] = 48,
- [0][1][2][1][RTW89_FCC][25] = 48,
- [0][1][2][1][RTW89_FCC][27] = 48,
- [0][1][2][1][RTW89_FCC][29] = 48,
- [0][1][2][1][RTW89_FCC][30] = 48,
- [0][1][2][1][RTW89_FCC][32] = 48,
- [0][1][2][1][RTW89_FCC][34] = 48,
- [0][1][2][1][RTW89_FCC][36] = 48,
- [0][1][2][1][RTW89_FCC][38] = 48,
- [0][1][2][1][RTW89_FCC][40] = 48,
- [0][1][2][1][RTW89_FCC][42] = 48,
- [0][1][2][1][RTW89_FCC][44] = 48,
- [0][1][2][1][RTW89_FCC][45] = 48,
- [0][1][2][1][RTW89_FCC][47] = 48,
- [0][1][2][1][RTW89_FCC][49] = 48,
- [0][1][2][1][RTW89_FCC][51] = 48,
- [0][1][2][1][RTW89_FCC][53] = 48,
- [0][1][2][1][RTW89_FCC][55] = 48,
- [0][1][2][1][RTW89_FCC][57] = 48,
- [0][1][2][1][RTW89_FCC][59] = 48,
- [0][1][2][1][RTW89_FCC][60] = 48,
- [0][1][2][1][RTW89_FCC][62] = 48,
- [0][1][2][1][RTW89_FCC][64] = 48,
- [0][1][2][1][RTW89_FCC][66] = 48,
- [0][1][2][1][RTW89_FCC][68] = 48,
- [0][1][2][1][RTW89_FCC][70] = 48,
- [0][1][2][1][RTW89_FCC][72] = 48,
- [0][1][2][1][RTW89_FCC][74] = 48,
- [0][1][2][1][RTW89_FCC][75] = 48,
- [0][1][2][1][RTW89_FCC][77] = 48,
- [0][1][2][1][RTW89_FCC][79] = 48,
- [0][1][2][1][RTW89_FCC][81] = 48,
- [0][1][2][1][RTW89_FCC][83] = 48,
- [0][1][2][1][RTW89_FCC][85] = 48,
- [0][1][2][1][RTW89_FCC][87] = 48,
- [0][1][2][1][RTW89_FCC][89] = 48,
- [0][1][2][1][RTW89_FCC][90] = 48,
- [0][1][2][1][RTW89_FCC][92] = 48,
- [0][1][2][1][RTW89_FCC][94] = 48,
- [0][1][2][1][RTW89_FCC][96] = 48,
- [0][1][2][1][RTW89_FCC][98] = 48,
- [0][1][2][1][RTW89_FCC][100] = 48,
- [0][1][2][1][RTW89_FCC][102] = 48,
- [0][1][2][1][RTW89_FCC][104] = 48,
- [0][1][2][1][RTW89_FCC][105] = 48,
- [0][1][2][1][RTW89_FCC][107] = 48,
- [0][1][2][1][RTW89_FCC][109] = 48,
+ [0][1][2][0][RTW89_ETSI][119] = 127,
+ [0][1][2][0][RTW89_KCC][119] = 127,
+ [0][1][2][1][RTW89_FCC][0] = -2,
+ [0][1][2][1][RTW89_ETSI][0] = 42,
+ [0][1][2][1][RTW89_KCC][0] = 12,
+ [0][1][2][1][RTW89_FCC][2] = -4,
+ [0][1][2][1][RTW89_ETSI][2] = 42,
+ [0][1][2][1][RTW89_KCC][2] = 12,
+ [0][1][2][1][RTW89_FCC][4] = -4,
+ [0][1][2][1][RTW89_ETSI][4] = 42,
+ [0][1][2][1][RTW89_KCC][4] = 12,
+ [0][1][2][1][RTW89_FCC][6] = -4,
+ [0][1][2][1][RTW89_ETSI][6] = 42,
+ [0][1][2][1][RTW89_KCC][6] = 12,
+ [0][1][2][1][RTW89_FCC][8] = -4,
+ [0][1][2][1][RTW89_ETSI][8] = 42,
+ [0][1][2][1][RTW89_KCC][8] = 12,
+ [0][1][2][1][RTW89_FCC][10] = -4,
+ [0][1][2][1][RTW89_ETSI][10] = 42,
+ [0][1][2][1][RTW89_KCC][10] = 12,
+ [0][1][2][1][RTW89_FCC][12] = -4,
+ [0][1][2][1][RTW89_ETSI][12] = 42,
+ [0][1][2][1][RTW89_KCC][12] = 12,
+ [0][1][2][1][RTW89_FCC][14] = -4,
+ [0][1][2][1][RTW89_ETSI][14] = 42,
+ [0][1][2][1][RTW89_KCC][14] = 12,
+ [0][1][2][1][RTW89_FCC][15] = -4,
+ [0][1][2][1][RTW89_ETSI][15] = 42,
+ [0][1][2][1][RTW89_KCC][15] = 12,
+ [0][1][2][1][RTW89_FCC][17] = -4,
+ [0][1][2][1][RTW89_ETSI][17] = 42,
+ [0][1][2][1][RTW89_KCC][17] = 12,
+ [0][1][2][1][RTW89_FCC][19] = -4,
+ [0][1][2][1][RTW89_ETSI][19] = 42,
+ [0][1][2][1][RTW89_KCC][19] = 12,
+ [0][1][2][1][RTW89_FCC][21] = -4,
+ [0][1][2][1][RTW89_ETSI][21] = 42,
+ [0][1][2][1][RTW89_KCC][21] = 12,
+ [0][1][2][1][RTW89_FCC][23] = -4,
+ [0][1][2][1][RTW89_ETSI][23] = 42,
+ [0][1][2][1][RTW89_KCC][23] = 12,
+ [0][1][2][1][RTW89_FCC][25] = -4,
+ [0][1][2][1][RTW89_ETSI][25] = 42,
+ [0][1][2][1][RTW89_KCC][25] = 12,
+ [0][1][2][1][RTW89_FCC][27] = -4,
+ [0][1][2][1][RTW89_ETSI][27] = 42,
+ [0][1][2][1][RTW89_KCC][27] = 12,
+ [0][1][2][1][RTW89_FCC][29] = -4,
+ [0][1][2][1][RTW89_ETSI][29] = 42,
+ [0][1][2][1][RTW89_KCC][29] = 12,
+ [0][1][2][1][RTW89_FCC][30] = -4,
+ [0][1][2][1][RTW89_ETSI][30] = 42,
+ [0][1][2][1][RTW89_KCC][30] = 12,
+ [0][1][2][1][RTW89_FCC][32] = -4,
+ [0][1][2][1][RTW89_ETSI][32] = 42,
+ [0][1][2][1][RTW89_KCC][32] = 12,
+ [0][1][2][1][RTW89_FCC][34] = -4,
+ [0][1][2][1][RTW89_ETSI][34] = 42,
+ [0][1][2][1][RTW89_KCC][34] = 12,
+ [0][1][2][1][RTW89_FCC][36] = -4,
+ [0][1][2][1][RTW89_ETSI][36] = 42,
+ [0][1][2][1][RTW89_KCC][36] = 12,
+ [0][1][2][1][RTW89_FCC][38] = -4,
+ [0][1][2][1][RTW89_ETSI][38] = 42,
+ [0][1][2][1][RTW89_KCC][38] = 12,
+ [0][1][2][1][RTW89_FCC][40] = -4,
+ [0][1][2][1][RTW89_ETSI][40] = 42,
+ [0][1][2][1][RTW89_KCC][40] = 12,
+ [0][1][2][1][RTW89_FCC][42] = -4,
+ [0][1][2][1][RTW89_ETSI][42] = 42,
+ [0][1][2][1][RTW89_KCC][42] = 12,
+ [0][1][2][1][RTW89_FCC][44] = -2,
+ [0][1][2][1][RTW89_ETSI][44] = 42,
+ [0][1][2][1][RTW89_KCC][44] = 12,
+ [0][1][2][1][RTW89_FCC][45] = -2,
+ [0][1][2][1][RTW89_ETSI][45] = 127,
+ [0][1][2][1][RTW89_KCC][45] = 12,
+ [0][1][2][1][RTW89_FCC][47] = -2,
+ [0][1][2][1][RTW89_ETSI][47] = 127,
+ [0][1][2][1][RTW89_KCC][47] = 12,
+ [0][1][2][1][RTW89_FCC][49] = -2,
+ [0][1][2][1][RTW89_ETSI][49] = 127,
+ [0][1][2][1][RTW89_KCC][49] = 12,
+ [0][1][2][1][RTW89_FCC][51] = -2,
+ [0][1][2][1][RTW89_ETSI][51] = 127,
+ [0][1][2][1][RTW89_KCC][51] = 12,
+ [0][1][2][1][RTW89_FCC][53] = -2,
+ [0][1][2][1][RTW89_ETSI][53] = 127,
+ [0][1][2][1][RTW89_KCC][53] = 12,
+ [0][1][2][1][RTW89_FCC][55] = -2,
+ [0][1][2][1][RTW89_ETSI][55] = 127,
+ [0][1][2][1][RTW89_KCC][55] = 12,
+ [0][1][2][1][RTW89_FCC][57] = -2,
+ [0][1][2][1][RTW89_ETSI][57] = 127,
+ [0][1][2][1][RTW89_KCC][57] = 12,
+ [0][1][2][1][RTW89_FCC][59] = -2,
+ [0][1][2][1][RTW89_ETSI][59] = 127,
+ [0][1][2][1][RTW89_KCC][59] = 12,
+ [0][1][2][1][RTW89_FCC][60] = -2,
+ [0][1][2][1][RTW89_ETSI][60] = 127,
+ [0][1][2][1][RTW89_KCC][60] = 12,
+ [0][1][2][1][RTW89_FCC][62] = -2,
+ [0][1][2][1][RTW89_ETSI][62] = 127,
+ [0][1][2][1][RTW89_KCC][62] = 12,
+ [0][1][2][1][RTW89_FCC][64] = -2,
+ [0][1][2][1][RTW89_ETSI][64] = 127,
+ [0][1][2][1][RTW89_KCC][64] = 12,
+ [0][1][2][1][RTW89_FCC][66] = -2,
+ [0][1][2][1][RTW89_ETSI][66] = 127,
+ [0][1][2][1][RTW89_KCC][66] = 12,
+ [0][1][2][1][RTW89_FCC][68] = -2,
+ [0][1][2][1][RTW89_ETSI][68] = 127,
+ [0][1][2][1][RTW89_KCC][68] = 12,
+ [0][1][2][1][RTW89_FCC][70] = -2,
+ [0][1][2][1][RTW89_ETSI][70] = 127,
+ [0][1][2][1][RTW89_KCC][70] = 12,
+ [0][1][2][1][RTW89_FCC][72] = -2,
+ [0][1][2][1][RTW89_ETSI][72] = 127,
+ [0][1][2][1][RTW89_KCC][72] = 12,
+ [0][1][2][1][RTW89_FCC][74] = -2,
+ [0][1][2][1][RTW89_ETSI][74] = 127,
+ [0][1][2][1][RTW89_KCC][74] = 12,
+ [0][1][2][1][RTW89_FCC][75] = -2,
+ [0][1][2][1][RTW89_ETSI][75] = 127,
+ [0][1][2][1][RTW89_KCC][75] = 12,
+ [0][1][2][1][RTW89_FCC][77] = -2,
+ [0][1][2][1][RTW89_ETSI][77] = 127,
+ [0][1][2][1][RTW89_KCC][77] = 12,
+ [0][1][2][1][RTW89_FCC][79] = -2,
+ [0][1][2][1][RTW89_ETSI][79] = 127,
+ [0][1][2][1][RTW89_KCC][79] = 12,
+ [0][1][2][1][RTW89_FCC][81] = -2,
+ [0][1][2][1][RTW89_ETSI][81] = 127,
+ [0][1][2][1][RTW89_KCC][81] = 12,
+ [0][1][2][1][RTW89_FCC][83] = -2,
+ [0][1][2][1][RTW89_ETSI][83] = 127,
+ [0][1][2][1][RTW89_KCC][83] = 20,
+ [0][1][2][1][RTW89_FCC][85] = -2,
+ [0][1][2][1][RTW89_ETSI][85] = 127,
+ [0][1][2][1][RTW89_KCC][85] = 20,
+ [0][1][2][1][RTW89_FCC][87] = -2,
+ [0][1][2][1][RTW89_ETSI][87] = 127,
+ [0][1][2][1][RTW89_KCC][87] = 20,
+ [0][1][2][1][RTW89_FCC][89] = -2,
+ [0][1][2][1][RTW89_ETSI][89] = 127,
+ [0][1][2][1][RTW89_KCC][89] = 20,
+ [0][1][2][1][RTW89_FCC][90] = -2,
+ [0][1][2][1][RTW89_ETSI][90] = 127,
+ [0][1][2][1][RTW89_KCC][90] = 20,
+ [0][1][2][1][RTW89_FCC][92] = -2,
+ [0][1][2][1][RTW89_ETSI][92] = 127,
+ [0][1][2][1][RTW89_KCC][92] = 20,
+ [0][1][2][1][RTW89_FCC][94] = -2,
+ [0][1][2][1][RTW89_ETSI][94] = 127,
+ [0][1][2][1][RTW89_KCC][94] = 20,
+ [0][1][2][1][RTW89_FCC][96] = -2,
+ [0][1][2][1][RTW89_ETSI][96] = 127,
+ [0][1][2][1][RTW89_KCC][96] = 20,
+ [0][1][2][1][RTW89_FCC][98] = -2,
+ [0][1][2][1][RTW89_ETSI][98] = 127,
+ [0][1][2][1][RTW89_KCC][98] = 20,
+ [0][1][2][1][RTW89_FCC][100] = -2,
+ [0][1][2][1][RTW89_ETSI][100] = 127,
+ [0][1][2][1][RTW89_KCC][100] = 20,
+ [0][1][2][1][RTW89_FCC][102] = -2,
+ [0][1][2][1][RTW89_ETSI][102] = 127,
+ [0][1][2][1][RTW89_KCC][102] = 20,
+ [0][1][2][1][RTW89_FCC][104] = -2,
+ [0][1][2][1][RTW89_ETSI][104] = 127,
+ [0][1][2][1][RTW89_KCC][104] = 20,
+ [0][1][2][1][RTW89_FCC][105] = -2,
+ [0][1][2][1][RTW89_ETSI][105] = 127,
+ [0][1][2][1][RTW89_KCC][105] = 20,
+ [0][1][2][1][RTW89_FCC][107] = 0,
+ [0][1][2][1][RTW89_ETSI][107] = 127,
+ [0][1][2][1][RTW89_KCC][107] = 20,
+ [0][1][2][1][RTW89_FCC][109] = 0,
+ [0][1][2][1][RTW89_ETSI][109] = 127,
+ [0][1][2][1][RTW89_KCC][109] = 20,
[0][1][2][1][RTW89_FCC][111] = 127,
+ [0][1][2][1][RTW89_ETSI][111] = 127,
+ [0][1][2][1][RTW89_KCC][111] = 127,
[0][1][2][1][RTW89_FCC][113] = 127,
+ [0][1][2][1][RTW89_ETSI][113] = 127,
+ [0][1][2][1][RTW89_KCC][113] = 127,
[0][1][2][1][RTW89_FCC][115] = 127,
+ [0][1][2][1][RTW89_ETSI][115] = 127,
+ [0][1][2][1][RTW89_KCC][115] = 127,
[0][1][2][1][RTW89_FCC][117] = 127,
+ [0][1][2][1][RTW89_ETSI][117] = 127,
+ [0][1][2][1][RTW89_KCC][117] = 127,
[0][1][2][1][RTW89_FCC][119] = 127,
- [1][0][2][0][RTW89_FCC][1] = 72,
- [1][0][2][0][RTW89_FCC][5] = 72,
- [1][0][2][0][RTW89_FCC][9] = 72,
- [1][0][2][0][RTW89_FCC][13] = 72,
- [1][0][2][0][RTW89_FCC][16] = 72,
- [1][0][2][0][RTW89_FCC][20] = 72,
- [1][0][2][0][RTW89_FCC][24] = 72,
- [1][0][2][0][RTW89_FCC][28] = 72,
- [1][0][2][0][RTW89_FCC][31] = 72,
- [1][0][2][0][RTW89_FCC][35] = 72,
- [1][0][2][0][RTW89_FCC][39] = 72,
- [1][0][2][0][RTW89_FCC][43] = 72,
- [1][0][2][0][RTW89_FCC][46] = 72,
- [1][0][2][0][RTW89_FCC][50] = 72,
- [1][0][2][0][RTW89_FCC][54] = 72,
- [1][0][2][0][RTW89_FCC][58] = 72,
- [1][0][2][0][RTW89_FCC][61] = 72,
- [1][0][2][0][RTW89_FCC][65] = 72,
- [1][0][2][0][RTW89_FCC][69] = 72,
- [1][0][2][0][RTW89_FCC][73] = 72,
- [1][0][2][0][RTW89_FCC][76] = 72,
- [1][0][2][0][RTW89_FCC][80] = 72,
- [1][0][2][0][RTW89_FCC][84] = 72,
- [1][0][2][0][RTW89_FCC][88] = 72,
- [1][0][2][0][RTW89_FCC][91] = 72,
- [1][0][2][0][RTW89_FCC][95] = 72,
- [1][0][2][0][RTW89_FCC][99] = 72,
- [1][0][2][0][RTW89_FCC][103] = 72,
- [1][0][2][0][RTW89_FCC][106] = 72,
+ [0][1][2][1][RTW89_ETSI][119] = 127,
+ [0][1][2][1][RTW89_KCC][119] = 127,
+ [1][0][2][0][RTW89_FCC][1] = 34,
+ [1][0][2][0][RTW89_ETSI][1] = 66,
+ [1][0][2][0][RTW89_KCC][1] = 40,
+ [1][0][2][0][RTW89_FCC][5] = 34,
+ [1][0][2][0][RTW89_ETSI][5] = 66,
+ [1][0][2][0][RTW89_KCC][5] = 40,
+ [1][0][2][0][RTW89_FCC][9] = 34,
+ [1][0][2][0][RTW89_ETSI][9] = 66,
+ [1][0][2][0][RTW89_KCC][9] = 40,
+ [1][0][2][0][RTW89_FCC][13] = 34,
+ [1][0][2][0][RTW89_ETSI][13] = 66,
+ [1][0][2][0][RTW89_KCC][13] = 40,
+ [1][0][2][0][RTW89_FCC][16] = 34,
+ [1][0][2][0][RTW89_ETSI][16] = 66,
+ [1][0][2][0][RTW89_KCC][16] = 40,
+ [1][0][2][0][RTW89_FCC][20] = 34,
+ [1][0][2][0][RTW89_ETSI][20] = 66,
+ [1][0][2][0][RTW89_KCC][20] = 40,
+ [1][0][2][0][RTW89_FCC][24] = 36,
+ [1][0][2][0][RTW89_ETSI][24] = 66,
+ [1][0][2][0][RTW89_KCC][24] = 40,
+ [1][0][2][0][RTW89_FCC][28] = 34,
+ [1][0][2][0][RTW89_ETSI][28] = 66,
+ [1][0][2][0][RTW89_KCC][28] = 40,
+ [1][0][2][0][RTW89_FCC][31] = 34,
+ [1][0][2][0][RTW89_ETSI][31] = 66,
+ [1][0][2][0][RTW89_KCC][31] = 40,
+ [1][0][2][0][RTW89_FCC][35] = 34,
+ [1][0][2][0][RTW89_ETSI][35] = 66,
+ [1][0][2][0][RTW89_KCC][35] = 40,
+ [1][0][2][0][RTW89_FCC][39] = 34,
+ [1][0][2][0][RTW89_ETSI][39] = 66,
+ [1][0][2][0][RTW89_KCC][39] = 40,
+ [1][0][2][0][RTW89_FCC][43] = 34,
+ [1][0][2][0][RTW89_ETSI][43] = 66,
+ [1][0][2][0][RTW89_KCC][43] = 40,
+ [1][0][2][0][RTW89_FCC][46] = 34,
+ [1][0][2][0][RTW89_ETSI][46] = 127,
+ [1][0][2][0][RTW89_KCC][46] = 40,
+ [1][0][2][0][RTW89_FCC][50] = 34,
+ [1][0][2][0][RTW89_ETSI][50] = 127,
+ [1][0][2][0][RTW89_KCC][50] = 40,
+ [1][0][2][0][RTW89_FCC][54] = 36,
+ [1][0][2][0][RTW89_ETSI][54] = 127,
+ [1][0][2][0][RTW89_KCC][54] = 40,
+ [1][0][2][0][RTW89_FCC][58] = 36,
+ [1][0][2][0][RTW89_ETSI][58] = 127,
+ [1][0][2][0][RTW89_KCC][58] = 40,
+ [1][0][2][0][RTW89_FCC][61] = 34,
+ [1][0][2][0][RTW89_ETSI][61] = 127,
+ [1][0][2][0][RTW89_KCC][61] = 40,
+ [1][0][2][0][RTW89_FCC][65] = 34,
+ [1][0][2][0][RTW89_ETSI][65] = 127,
+ [1][0][2][0][RTW89_KCC][65] = 40,
+ [1][0][2][0][RTW89_FCC][69] = 34,
+ [1][0][2][0][RTW89_ETSI][69] = 127,
+ [1][0][2][0][RTW89_KCC][69] = 40,
+ [1][0][2][0][RTW89_FCC][73] = 34,
+ [1][0][2][0][RTW89_ETSI][73] = 127,
+ [1][0][2][0][RTW89_KCC][73] = 40,
+ [1][0][2][0][RTW89_FCC][76] = 34,
+ [1][0][2][0][RTW89_ETSI][76] = 127,
+ [1][0][2][0][RTW89_KCC][76] = 40,
+ [1][0][2][0][RTW89_FCC][80] = 34,
+ [1][0][2][0][RTW89_ETSI][80] = 127,
+ [1][0][2][0][RTW89_KCC][80] = 42,
+ [1][0][2][0][RTW89_FCC][84] = 34,
+ [1][0][2][0][RTW89_ETSI][84] = 127,
+ [1][0][2][0][RTW89_KCC][84] = 42,
+ [1][0][2][0][RTW89_FCC][88] = 34,
+ [1][0][2][0][RTW89_ETSI][88] = 127,
+ [1][0][2][0][RTW89_KCC][88] = 42,
+ [1][0][2][0][RTW89_FCC][91] = 36,
+ [1][0][2][0][RTW89_ETSI][91] = 127,
+ [1][0][2][0][RTW89_KCC][91] = 42,
+ [1][0][2][0][RTW89_FCC][95] = 34,
+ [1][0][2][0][RTW89_ETSI][95] = 127,
+ [1][0][2][0][RTW89_KCC][95] = 42,
+ [1][0][2][0][RTW89_FCC][99] = 34,
+ [1][0][2][0][RTW89_ETSI][99] = 127,
+ [1][0][2][0][RTW89_KCC][99] = 42,
+ [1][0][2][0][RTW89_FCC][103] = 34,
+ [1][0][2][0][RTW89_ETSI][103] = 127,
+ [1][0][2][0][RTW89_KCC][103] = 42,
+ [1][0][2][0][RTW89_FCC][106] = 36,
+ [1][0][2][0][RTW89_ETSI][106] = 127,
+ [1][0][2][0][RTW89_KCC][106] = 42,
[1][0][2][0][RTW89_FCC][110] = 127,
+ [1][0][2][0][RTW89_ETSI][110] = 127,
+ [1][0][2][0][RTW89_KCC][110] = 127,
[1][0][2][0][RTW89_FCC][114] = 127,
+ [1][0][2][0][RTW89_ETSI][114] = 127,
+ [1][0][2][0][RTW89_KCC][114] = 127,
[1][0][2][0][RTW89_FCC][118] = 127,
- [1][1][2][0][RTW89_FCC][1] = 60,
- [1][1][2][0][RTW89_FCC][5] = 60,
- [1][1][2][0][RTW89_FCC][9] = 60,
- [1][1][2][0][RTW89_FCC][13] = 60,
- [1][1][2][0][RTW89_FCC][16] = 60,
- [1][1][2][0][RTW89_FCC][20] = 60,
- [1][1][2][0][RTW89_FCC][24] = 60,
- [1][1][2][0][RTW89_FCC][28] = 60,
- [1][1][2][0][RTW89_FCC][31] = 60,
- [1][1][2][0][RTW89_FCC][35] = 60,
- [1][1][2][0][RTW89_FCC][39] = 60,
- [1][1][2][0][RTW89_FCC][43] = 60,
- [1][1][2][0][RTW89_FCC][46] = 60,
- [1][1][2][0][RTW89_FCC][50] = 60,
- [1][1][2][0][RTW89_FCC][54] = 60,
- [1][1][2][0][RTW89_FCC][58] = 60,
- [1][1][2][0][RTW89_FCC][61] = 60,
- [1][1][2][0][RTW89_FCC][65] = 60,
- [1][1][2][0][RTW89_FCC][69] = 60,
- [1][1][2][0][RTW89_FCC][73] = 60,
- [1][1][2][0][RTW89_FCC][76] = 60,
- [1][1][2][0][RTW89_FCC][80] = 60,
- [1][1][2][0][RTW89_FCC][84] = 60,
- [1][1][2][0][RTW89_FCC][88] = 60,
- [1][1][2][0][RTW89_FCC][91] = 60,
- [1][1][2][0][RTW89_FCC][95] = 60,
- [1][1][2][0][RTW89_FCC][99] = 60,
- [1][1][2][0][RTW89_FCC][103] = 60,
- [1][1][2][0][RTW89_FCC][106] = 60,
+ [1][0][2][0][RTW89_ETSI][118] = 127,
+ [1][0][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][0][RTW89_FCC][1] = 10,
+ [1][1][2][0][RTW89_ETSI][1] = 54,
+ [1][1][2][0][RTW89_KCC][1] = 28,
+ [1][1][2][0][RTW89_FCC][5] = 10,
+ [1][1][2][0][RTW89_ETSI][5] = 54,
+ [1][1][2][0][RTW89_KCC][5] = 28,
+ [1][1][2][0][RTW89_FCC][9] = 10,
+ [1][1][2][0][RTW89_ETSI][9] = 54,
+ [1][1][2][0][RTW89_KCC][9] = 28,
+ [1][1][2][0][RTW89_FCC][13] = 10,
+ [1][1][2][0][RTW89_ETSI][13] = 54,
+ [1][1][2][0][RTW89_KCC][13] = 28,
+ [1][1][2][0][RTW89_FCC][16] = 10,
+ [1][1][2][0][RTW89_ETSI][16] = 54,
+ [1][1][2][0][RTW89_KCC][16] = 28,
+ [1][1][2][0][RTW89_FCC][20] = 10,
+ [1][1][2][0][RTW89_ETSI][20] = 54,
+ [1][1][2][0][RTW89_KCC][20] = 28,
+ [1][1][2][0][RTW89_FCC][24] = 10,
+ [1][1][2][0][RTW89_ETSI][24] = 54,
+ [1][1][2][0][RTW89_KCC][24] = 28,
+ [1][1][2][0][RTW89_FCC][28] = 10,
+ [1][1][2][0][RTW89_ETSI][28] = 54,
+ [1][1][2][0][RTW89_KCC][28] = 28,
+ [1][1][2][0][RTW89_FCC][31] = 10,
+ [1][1][2][0][RTW89_ETSI][31] = 54,
+ [1][1][2][0][RTW89_KCC][31] = 28,
+ [1][1][2][0][RTW89_FCC][35] = 10,
+ [1][1][2][0][RTW89_ETSI][35] = 54,
+ [1][1][2][0][RTW89_KCC][35] = 28,
+ [1][1][2][0][RTW89_FCC][39] = 10,
+ [1][1][2][0][RTW89_ETSI][39] = 54,
+ [1][1][2][0][RTW89_KCC][39] = 28,
+ [1][1][2][0][RTW89_FCC][43] = 10,
+ [1][1][2][0][RTW89_ETSI][43] = 54,
+ [1][1][2][0][RTW89_KCC][43] = 28,
+ [1][1][2][0][RTW89_FCC][46] = 12,
+ [1][1][2][0][RTW89_ETSI][46] = 127,
+ [1][1][2][0][RTW89_KCC][46] = 28,
+ [1][1][2][0][RTW89_FCC][50] = 12,
+ [1][1][2][0][RTW89_ETSI][50] = 127,
+ [1][1][2][0][RTW89_KCC][50] = 28,
+ [1][1][2][0][RTW89_FCC][54] = 10,
+ [1][1][2][0][RTW89_ETSI][54] = 127,
+ [1][1][2][0][RTW89_KCC][54] = 28,
+ [1][1][2][0][RTW89_FCC][58] = 10,
+ [1][1][2][0][RTW89_ETSI][58] = 127,
+ [1][1][2][0][RTW89_KCC][58] = 28,
+ [1][1][2][0][RTW89_FCC][61] = 10,
+ [1][1][2][0][RTW89_ETSI][61] = 127,
+ [1][1][2][0][RTW89_KCC][61] = 28,
+ [1][1][2][0][RTW89_FCC][65] = 10,
+ [1][1][2][0][RTW89_ETSI][65] = 127,
+ [1][1][2][0][RTW89_KCC][65] = 28,
+ [1][1][2][0][RTW89_FCC][69] = 10,
+ [1][1][2][0][RTW89_ETSI][69] = 127,
+ [1][1][2][0][RTW89_KCC][69] = 28,
+ [1][1][2][0][RTW89_FCC][73] = 10,
+ [1][1][2][0][RTW89_ETSI][73] = 127,
+ [1][1][2][0][RTW89_KCC][73] = 28,
+ [1][1][2][0][RTW89_FCC][76] = 10,
+ [1][1][2][0][RTW89_ETSI][76] = 127,
+ [1][1][2][0][RTW89_KCC][76] = 28,
+ [1][1][2][0][RTW89_FCC][80] = 10,
+ [1][1][2][0][RTW89_ETSI][80] = 127,
+ [1][1][2][0][RTW89_KCC][80] = 32,
+ [1][1][2][0][RTW89_FCC][84] = 10,
+ [1][1][2][0][RTW89_ETSI][84] = 127,
+ [1][1][2][0][RTW89_KCC][84] = 32,
+ [1][1][2][0][RTW89_FCC][88] = 10,
+ [1][1][2][0][RTW89_ETSI][88] = 127,
+ [1][1][2][0][RTW89_KCC][88] = 32,
+ [1][1][2][0][RTW89_FCC][91] = 12,
+ [1][1][2][0][RTW89_ETSI][91] = 127,
+ [1][1][2][0][RTW89_KCC][91] = 32,
+ [1][1][2][0][RTW89_FCC][95] = 10,
+ [1][1][2][0][RTW89_ETSI][95] = 127,
+ [1][1][2][0][RTW89_KCC][95] = 32,
+ [1][1][2][0][RTW89_FCC][99] = 10,
+ [1][1][2][0][RTW89_ETSI][99] = 127,
+ [1][1][2][0][RTW89_KCC][99] = 32,
+ [1][1][2][0][RTW89_FCC][103] = 10,
+ [1][1][2][0][RTW89_ETSI][103] = 127,
+ [1][1][2][0][RTW89_KCC][103] = 32,
+ [1][1][2][0][RTW89_FCC][106] = 12,
+ [1][1][2][0][RTW89_ETSI][106] = 127,
+ [1][1][2][0][RTW89_KCC][106] = 32,
[1][1][2][0][RTW89_FCC][110] = 127,
+ [1][1][2][0][RTW89_ETSI][110] = 127,
+ [1][1][2][0][RTW89_KCC][110] = 127,
[1][1][2][0][RTW89_FCC][114] = 127,
+ [1][1][2][0][RTW89_ETSI][114] = 127,
+ [1][1][2][0][RTW89_KCC][114] = 127,
[1][1][2][0][RTW89_FCC][118] = 127,
- [1][1][2][1][RTW89_FCC][1] = 48,
- [1][1][2][1][RTW89_FCC][5] = 48,
- [1][1][2][1][RTW89_FCC][9] = 48,
- [1][1][2][1][RTW89_FCC][13] = 48,
- [1][1][2][1][RTW89_FCC][16] = 48,
- [1][1][2][1][RTW89_FCC][20] = 48,
- [1][1][2][1][RTW89_FCC][24] = 48,
- [1][1][2][1][RTW89_FCC][28] = 48,
- [1][1][2][1][RTW89_FCC][31] = 48,
- [1][1][2][1][RTW89_FCC][35] = 48,
- [1][1][2][1][RTW89_FCC][39] = 48,
- [1][1][2][1][RTW89_FCC][43] = 48,
- [1][1][2][1][RTW89_FCC][46] = 48,
- [1][1][2][1][RTW89_FCC][50] = 48,
- [1][1][2][1][RTW89_FCC][54] = 48,
- [1][1][2][1][RTW89_FCC][58] = 48,
- [1][1][2][1][RTW89_FCC][61] = 48,
- [1][1][2][1][RTW89_FCC][65] = 48,
- [1][1][2][1][RTW89_FCC][69] = 48,
- [1][1][2][1][RTW89_FCC][73] = 48,
- [1][1][2][1][RTW89_FCC][76] = 48,
- [1][1][2][1][RTW89_FCC][80] = 48,
- [1][1][2][1][RTW89_FCC][84] = 48,
- [1][1][2][1][RTW89_FCC][88] = 48,
- [1][1][2][1][RTW89_FCC][91] = 48,
- [1][1][2][1][RTW89_FCC][95] = 48,
- [1][1][2][1][RTW89_FCC][99] = 48,
- [1][1][2][1][RTW89_FCC][103] = 48,
- [1][1][2][1][RTW89_FCC][106] = 48,
+ [1][1][2][0][RTW89_ETSI][118] = 127,
+ [1][1][2][0][RTW89_KCC][118] = 127,
+ [1][1][2][1][RTW89_FCC][1] = 10,
+ [1][1][2][1][RTW89_ETSI][1] = 42,
+ [1][1][2][1][RTW89_KCC][1] = 28,
+ [1][1][2][1][RTW89_FCC][5] = 10,
+ [1][1][2][1][RTW89_ETSI][5] = 42,
+ [1][1][2][1][RTW89_KCC][5] = 28,
+ [1][1][2][1][RTW89_FCC][9] = 10,
+ [1][1][2][1][RTW89_ETSI][9] = 42,
+ [1][1][2][1][RTW89_KCC][9] = 28,
+ [1][1][2][1][RTW89_FCC][13] = 10,
+ [1][1][2][1][RTW89_ETSI][13] = 42,
+ [1][1][2][1][RTW89_KCC][13] = 28,
+ [1][1][2][1][RTW89_FCC][16] = 10,
+ [1][1][2][1][RTW89_ETSI][16] = 42,
+ [1][1][2][1][RTW89_KCC][16] = 28,
+ [1][1][2][1][RTW89_FCC][20] = 10,
+ [1][1][2][1][RTW89_ETSI][20] = 42,
+ [1][1][2][1][RTW89_KCC][20] = 28,
+ [1][1][2][1][RTW89_FCC][24] = 10,
+ [1][1][2][1][RTW89_ETSI][24] = 42,
+ [1][1][2][1][RTW89_KCC][24] = 28,
+ [1][1][2][1][RTW89_FCC][28] = 10,
+ [1][1][2][1][RTW89_ETSI][28] = 42,
+ [1][1][2][1][RTW89_KCC][28] = 28,
+ [1][1][2][1][RTW89_FCC][31] = 10,
+ [1][1][2][1][RTW89_ETSI][31] = 42,
+ [1][1][2][1][RTW89_KCC][31] = 28,
+ [1][1][2][1][RTW89_FCC][35] = 10,
+ [1][1][2][1][RTW89_ETSI][35] = 42,
+ [1][1][2][1][RTW89_KCC][35] = 28,
+ [1][1][2][1][RTW89_FCC][39] = 10,
+ [1][1][2][1][RTW89_ETSI][39] = 42,
+ [1][1][2][1][RTW89_KCC][39] = 28,
+ [1][1][2][1][RTW89_FCC][43] = 10,
+ [1][1][2][1][RTW89_ETSI][43] = 42,
+ [1][1][2][1][RTW89_KCC][43] = 28,
+ [1][1][2][1][RTW89_FCC][46] = 12,
+ [1][1][2][1][RTW89_ETSI][46] = 127,
+ [1][1][2][1][RTW89_KCC][46] = 28,
+ [1][1][2][1][RTW89_FCC][50] = 12,
+ [1][1][2][1][RTW89_ETSI][50] = 127,
+ [1][1][2][1][RTW89_KCC][50] = 28,
+ [1][1][2][1][RTW89_FCC][54] = 10,
+ [1][1][2][1][RTW89_ETSI][54] = 127,
+ [1][1][2][1][RTW89_KCC][54] = 28,
+ [1][1][2][1][RTW89_FCC][58] = 10,
+ [1][1][2][1][RTW89_ETSI][58] = 127,
+ [1][1][2][1][RTW89_KCC][58] = 28,
+ [1][1][2][1][RTW89_FCC][61] = 10,
+ [1][1][2][1][RTW89_ETSI][61] = 127,
+ [1][1][2][1][RTW89_KCC][61] = 28,
+ [1][1][2][1][RTW89_FCC][65] = 10,
+ [1][1][2][1][RTW89_ETSI][65] = 127,
+ [1][1][2][1][RTW89_KCC][65] = 28,
+ [1][1][2][1][RTW89_FCC][69] = 10,
+ [1][1][2][1][RTW89_ETSI][69] = 127,
+ [1][1][2][1][RTW89_KCC][69] = 28,
+ [1][1][2][1][RTW89_FCC][73] = 10,
+ [1][1][2][1][RTW89_ETSI][73] = 127,
+ [1][1][2][1][RTW89_KCC][73] = 28,
+ [1][1][2][1][RTW89_FCC][76] = 10,
+ [1][1][2][1][RTW89_ETSI][76] = 127,
+ [1][1][2][1][RTW89_KCC][76] = 28,
+ [1][1][2][1][RTW89_FCC][80] = 10,
+ [1][1][2][1][RTW89_ETSI][80] = 127,
+ [1][1][2][1][RTW89_KCC][80] = 32,
+ [1][1][2][1][RTW89_FCC][84] = 10,
+ [1][1][2][1][RTW89_ETSI][84] = 127,
+ [1][1][2][1][RTW89_KCC][84] = 32,
+ [1][1][2][1][RTW89_FCC][88] = 10,
+ [1][1][2][1][RTW89_ETSI][88] = 127,
+ [1][1][2][1][RTW89_KCC][88] = 32,
+ [1][1][2][1][RTW89_FCC][91] = 12,
+ [1][1][2][1][RTW89_ETSI][91] = 127,
+ [1][1][2][1][RTW89_KCC][91] = 32,
+ [1][1][2][1][RTW89_FCC][95] = 10,
+ [1][1][2][1][RTW89_ETSI][95] = 127,
+ [1][1][2][1][RTW89_KCC][95] = 32,
+ [1][1][2][1][RTW89_FCC][99] = 10,
+ [1][1][2][1][RTW89_ETSI][99] = 127,
+ [1][1][2][1][RTW89_KCC][99] = 32,
+ [1][1][2][1][RTW89_FCC][103] = 10,
+ [1][1][2][1][RTW89_ETSI][103] = 127,
+ [1][1][2][1][RTW89_KCC][103] = 32,
+ [1][1][2][1][RTW89_FCC][106] = 12,
+ [1][1][2][1][RTW89_ETSI][106] = 127,
+ [1][1][2][1][RTW89_KCC][106] = 32,
[1][1][2][1][RTW89_FCC][110] = 127,
+ [1][1][2][1][RTW89_ETSI][110] = 127,
+ [1][1][2][1][RTW89_KCC][110] = 127,
[1][1][2][1][RTW89_FCC][114] = 127,
+ [1][1][2][1][RTW89_ETSI][114] = 127,
+ [1][1][2][1][RTW89_KCC][114] = 127,
[1][1][2][1][RTW89_FCC][118] = 127,
- [2][0][2][0][RTW89_FCC][3] = 64,
- [2][0][2][0][RTW89_FCC][11] = 64,
- [2][0][2][0][RTW89_FCC][18] = 64,
- [2][0][2][0][RTW89_FCC][26] = 64,
- [2][0][2][0][RTW89_FCC][33] = 64,
- [2][0][2][0][RTW89_FCC][41] = 64,
- [2][0][2][0][RTW89_FCC][48] = 64,
- [2][0][2][0][RTW89_FCC][56] = 64,
- [2][0][2][0][RTW89_FCC][63] = 64,
- [2][0][2][0][RTW89_FCC][71] = 64,
- [2][0][2][0][RTW89_FCC][78] = 64,
- [2][0][2][0][RTW89_FCC][86] = 64,
- [2][0][2][0][RTW89_FCC][93] = 64,
- [2][0][2][0][RTW89_FCC][101] = 64,
+ [1][1][2][1][RTW89_ETSI][118] = 127,
+ [1][1][2][1][RTW89_KCC][118] = 127,
+ [2][0][2][0][RTW89_FCC][3] = 46,
+ [2][0][2][0][RTW89_ETSI][3] = 48,
+ [2][0][2][0][RTW89_KCC][3] = 50,
+ [2][0][2][0][RTW89_FCC][11] = 46,
+ [2][0][2][0][RTW89_ETSI][11] = 48,
+ [2][0][2][0][RTW89_KCC][11] = 50,
+ [2][0][2][0][RTW89_FCC][18] = 46,
+ [2][0][2][0][RTW89_ETSI][18] = 48,
+ [2][0][2][0][RTW89_KCC][18] = 50,
+ [2][0][2][0][RTW89_FCC][26] = 46,
+ [2][0][2][0][RTW89_ETSI][26] = 48,
+ [2][0][2][0][RTW89_KCC][26] = 50,
+ [2][0][2][0][RTW89_FCC][33] = 46,
+ [2][0][2][0][RTW89_ETSI][33] = 48,
+ [2][0][2][0][RTW89_KCC][33] = 50,
+ [2][0][2][0][RTW89_FCC][41] = 46,
+ [2][0][2][0][RTW89_ETSI][41] = 48,
+ [2][0][2][0][RTW89_KCC][41] = 50,
+ [2][0][2][0][RTW89_FCC][48] = 46,
+ [2][0][2][0][RTW89_ETSI][48] = 127,
+ [2][0][2][0][RTW89_KCC][48] = 48,
+ [2][0][2][0][RTW89_FCC][56] = 46,
+ [2][0][2][0][RTW89_ETSI][56] = 127,
+ [2][0][2][0][RTW89_KCC][56] = 48,
+ [2][0][2][0][RTW89_FCC][63] = 46,
+ [2][0][2][0][RTW89_ETSI][63] = 127,
+ [2][0][2][0][RTW89_KCC][63] = 48,
+ [2][0][2][0][RTW89_FCC][71] = 46,
+ [2][0][2][0][RTW89_ETSI][71] = 127,
+ [2][0][2][0][RTW89_KCC][71] = 48,
+ [2][0][2][0][RTW89_FCC][78] = 46,
+ [2][0][2][0][RTW89_ETSI][78] = 127,
+ [2][0][2][0][RTW89_KCC][78] = 52,
+ [2][0][2][0][RTW89_FCC][86] = 46,
+ [2][0][2][0][RTW89_ETSI][86] = 127,
+ [2][0][2][0][RTW89_KCC][86] = 52,
+ [2][0][2][0][RTW89_FCC][93] = 46,
+ [2][0][2][0][RTW89_ETSI][93] = 127,
+ [2][0][2][0][RTW89_KCC][93] = 50,
+ [2][0][2][0][RTW89_FCC][101] = 44,
+ [2][0][2][0][RTW89_ETSI][101] = 127,
+ [2][0][2][0][RTW89_KCC][101] = 50,
[2][0][2][0][RTW89_FCC][108] = 127,
+ [2][0][2][0][RTW89_ETSI][108] = 127,
+ [2][0][2][0][RTW89_KCC][108] = 127,
[2][0][2][0][RTW89_FCC][116] = 127,
- [2][1][2][0][RTW89_FCC][3] = 52,
- [2][1][2][0][RTW89_FCC][11] = 52,
- [2][1][2][0][RTW89_FCC][18] = 52,
- [2][1][2][0][RTW89_FCC][26] = 52,
- [2][1][2][0][RTW89_FCC][33] = 52,
- [2][1][2][0][RTW89_FCC][41] = 52,
- [2][1][2][0][RTW89_FCC][48] = 52,
- [2][1][2][0][RTW89_FCC][56] = 52,
- [2][1][2][0][RTW89_FCC][63] = 52,
- [2][1][2][0][RTW89_FCC][71] = 52,
- [2][1][2][0][RTW89_FCC][78] = 52,
- [2][1][2][0][RTW89_FCC][86] = 52,
- [2][1][2][0][RTW89_FCC][93] = 52,
- [2][1][2][0][RTW89_FCC][101] = 52,
+ [2][0][2][0][RTW89_ETSI][116] = 127,
+ [2][0][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][0][RTW89_FCC][3] = 22,
+ [2][1][2][0][RTW89_ETSI][3] = 48,
+ [2][1][2][0][RTW89_KCC][3] = 38,
+ [2][1][2][0][RTW89_FCC][11] = 20,
+ [2][1][2][0][RTW89_ETSI][11] = 48,
+ [2][1][2][0][RTW89_KCC][11] = 38,
+ [2][1][2][0][RTW89_FCC][18] = 20,
+ [2][1][2][0][RTW89_ETSI][18] = 48,
+ [2][1][2][0][RTW89_KCC][18] = 38,
+ [2][1][2][0][RTW89_FCC][26] = 20,
+ [2][1][2][0][RTW89_ETSI][26] = 48,
+ [2][1][2][0][RTW89_KCC][26] = 38,
+ [2][1][2][0][RTW89_FCC][33] = 20,
+ [2][1][2][0][RTW89_ETSI][33] = 48,
+ [2][1][2][0][RTW89_KCC][33] = 38,
+ [2][1][2][0][RTW89_FCC][41] = 22,
+ [2][1][2][0][RTW89_ETSI][41] = 48,
+ [2][1][2][0][RTW89_KCC][41] = 38,
+ [2][1][2][0][RTW89_FCC][48] = 22,
+ [2][1][2][0][RTW89_ETSI][48] = 127,
+ [2][1][2][0][RTW89_KCC][48] = 38,
+ [2][1][2][0][RTW89_FCC][56] = 20,
+ [2][1][2][0][RTW89_ETSI][56] = 127,
+ [2][1][2][0][RTW89_KCC][56] = 38,
+ [2][1][2][0][RTW89_FCC][63] = 22,
+ [2][1][2][0][RTW89_ETSI][63] = 127,
+ [2][1][2][0][RTW89_KCC][63] = 38,
+ [2][1][2][0][RTW89_FCC][71] = 20,
+ [2][1][2][0][RTW89_ETSI][71] = 127,
+ [2][1][2][0][RTW89_KCC][71] = 38,
+ [2][1][2][0][RTW89_FCC][78] = 20,
+ [2][1][2][0][RTW89_ETSI][78] = 127,
+ [2][1][2][0][RTW89_KCC][78] = 38,
+ [2][1][2][0][RTW89_FCC][86] = 20,
+ [2][1][2][0][RTW89_ETSI][86] = 127,
+ [2][1][2][0][RTW89_KCC][86] = 38,
+ [2][1][2][0][RTW89_FCC][93] = 22,
+ [2][1][2][0][RTW89_ETSI][93] = 127,
+ [2][1][2][0][RTW89_KCC][93] = 38,
+ [2][1][2][0][RTW89_FCC][101] = 22,
+ [2][1][2][0][RTW89_ETSI][101] = 127,
+ [2][1][2][0][RTW89_KCC][101] = 38,
[2][1][2][0][RTW89_FCC][108] = 127,
+ [2][1][2][0][RTW89_ETSI][108] = 127,
+ [2][1][2][0][RTW89_KCC][108] = 127,
[2][1][2][0][RTW89_FCC][116] = 127,
- [2][1][2][1][RTW89_FCC][3] = 40,
- [2][1][2][1][RTW89_FCC][11] = 40,
- [2][1][2][1][RTW89_FCC][18] = 40,
- [2][1][2][1][RTW89_FCC][26] = 40,
- [2][1][2][1][RTW89_FCC][33] = 40,
- [2][1][2][1][RTW89_FCC][41] = 40,
- [2][1][2][1][RTW89_FCC][48] = 40,
- [2][1][2][1][RTW89_FCC][56] = 40,
- [2][1][2][1][RTW89_FCC][63] = 40,
- [2][1][2][1][RTW89_FCC][71] = 40,
- [2][1][2][1][RTW89_FCC][78] = 40,
- [2][1][2][1][RTW89_FCC][86] = 40,
- [2][1][2][1][RTW89_FCC][93] = 40,
- [2][1][2][1][RTW89_FCC][101] = 40,
+ [2][1][2][0][RTW89_ETSI][116] = 127,
+ [2][1][2][0][RTW89_KCC][116] = 127,
+ [2][1][2][1][RTW89_FCC][3] = 22,
+ [2][1][2][1][RTW89_ETSI][3] = 42,
+ [2][1][2][1][RTW89_KCC][3] = 38,
+ [2][1][2][1][RTW89_FCC][11] = 20,
+ [2][1][2][1][RTW89_ETSI][11] = 42,
+ [2][1][2][1][RTW89_KCC][11] = 38,
+ [2][1][2][1][RTW89_FCC][18] = 20,
+ [2][1][2][1][RTW89_ETSI][18] = 42,
+ [2][1][2][1][RTW89_KCC][18] = 38,
+ [2][1][2][1][RTW89_FCC][26] = 20,
+ [2][1][2][1][RTW89_ETSI][26] = 42,
+ [2][1][2][1][RTW89_KCC][26] = 38,
+ [2][1][2][1][RTW89_FCC][33] = 20,
+ [2][1][2][1][RTW89_ETSI][33] = 42,
+ [2][1][2][1][RTW89_KCC][33] = 38,
+ [2][1][2][1][RTW89_FCC][41] = 22,
+ [2][1][2][1][RTW89_ETSI][41] = 42,
+ [2][1][2][1][RTW89_KCC][41] = 38,
+ [2][1][2][1][RTW89_FCC][48] = 22,
+ [2][1][2][1][RTW89_ETSI][48] = 127,
+ [2][1][2][1][RTW89_KCC][48] = 38,
+ [2][1][2][1][RTW89_FCC][56] = 20,
+ [2][1][2][1][RTW89_ETSI][56] = 127,
+ [2][1][2][1][RTW89_KCC][56] = 38,
+ [2][1][2][1][RTW89_FCC][63] = 22,
+ [2][1][2][1][RTW89_ETSI][63] = 127,
+ [2][1][2][1][RTW89_KCC][63] = 38,
+ [2][1][2][1][RTW89_FCC][71] = 20,
+ [2][1][2][1][RTW89_ETSI][71] = 127,
+ [2][1][2][1][RTW89_KCC][71] = 38,
+ [2][1][2][1][RTW89_FCC][78] = 20,
+ [2][1][2][1][RTW89_ETSI][78] = 127,
+ [2][1][2][1][RTW89_KCC][78] = 38,
+ [2][1][2][1][RTW89_FCC][86] = 20,
+ [2][1][2][1][RTW89_ETSI][86] = 127,
+ [2][1][2][1][RTW89_KCC][86] = 38,
+ [2][1][2][1][RTW89_FCC][93] = 22,
+ [2][1][2][1][RTW89_ETSI][93] = 127,
+ [2][1][2][1][RTW89_KCC][93] = 38,
+ [2][1][2][1][RTW89_FCC][101] = 22,
+ [2][1][2][1][RTW89_ETSI][101] = 127,
+ [2][1][2][1][RTW89_KCC][101] = 38,
[2][1][2][1][RTW89_FCC][108] = 127,
+ [2][1][2][1][RTW89_ETSI][108] = 127,
+ [2][1][2][1][RTW89_KCC][108] = 127,
[2][1][2][1][RTW89_FCC][116] = 127,
- [3][0][2][0][RTW89_FCC][7] = 56,
- [3][0][2][0][RTW89_FCC][22] = 56,
- [3][0][2][0][RTW89_FCC][37] = 56,
- [3][0][2][0][RTW89_FCC][52] = 56,
- [3][0][2][0][RTW89_FCC][67] = 56,
- [3][0][2][0][RTW89_FCC][82] = 56,
- [3][0][2][0][RTW89_FCC][97] = 56,
+ [2][1][2][1][RTW89_ETSI][116] = 127,
+ [2][1][2][1][RTW89_KCC][116] = 127,
+ [3][0][2][0][RTW89_FCC][7] = 52,
+ [3][0][2][0][RTW89_ETSI][7] = 38,
+ [3][0][2][0][RTW89_KCC][7] = 42,
+ [3][0][2][0][RTW89_FCC][22] = 52,
+ [3][0][2][0][RTW89_ETSI][22] = 38,
+ [3][0][2][0][RTW89_KCC][22] = 42,
+ [3][0][2][0][RTW89_FCC][37] = 52,
+ [3][0][2][0][RTW89_ETSI][37] = 38,
+ [3][0][2][0][RTW89_KCC][37] = 42,
+ [3][0][2][0][RTW89_FCC][52] = 54,
+ [3][0][2][0][RTW89_ETSI][52] = 127,
+ [3][0][2][0][RTW89_KCC][52] = 56,
+ [3][0][2][0][RTW89_FCC][67] = 54,
+ [3][0][2][0][RTW89_ETSI][67] = 127,
+ [3][0][2][0][RTW89_KCC][67] = 54,
+ [3][0][2][0][RTW89_FCC][82] = 54,
+ [3][0][2][0][RTW89_ETSI][82] = 127,
+ [3][0][2][0][RTW89_KCC][82] = 26,
+ [3][0][2][0][RTW89_FCC][97] = 40,
+ [3][0][2][0][RTW89_ETSI][97] = 127,
+ [3][0][2][0][RTW89_KCC][97] = 26,
[3][0][2][0][RTW89_FCC][112] = 127,
- [3][1][2][0][RTW89_FCC][7] = 44,
- [3][1][2][0][RTW89_FCC][22] = 44,
- [3][1][2][0][RTW89_FCC][37] = 44,
- [3][1][2][0][RTW89_FCC][52] = 44,
- [3][1][2][0][RTW89_FCC][67] = 44,
- [3][1][2][0][RTW89_FCC][82] = 44,
- [3][1][2][0][RTW89_FCC][97] = 44,
+ [3][0][2][0][RTW89_ETSI][112] = 127,
+ [3][0][2][0][RTW89_KCC][112] = 127,
+ [3][1][2][0][RTW89_FCC][7] = 32,
+ [3][1][2][0][RTW89_ETSI][7] = 38,
+ [3][1][2][0][RTW89_KCC][7] = 40,
+ [3][1][2][0][RTW89_FCC][22] = 30,
+ [3][1][2][0][RTW89_ETSI][22] = 38,
+ [3][1][2][0][RTW89_KCC][22] = 40,
+ [3][1][2][0][RTW89_FCC][37] = 30,
+ [3][1][2][0][RTW89_ETSI][37] = 38,
+ [3][1][2][0][RTW89_KCC][37] = 40,
+ [3][1][2][0][RTW89_FCC][52] = 30,
+ [3][1][2][0][RTW89_ETSI][52] = 127,
+ [3][1][2][0][RTW89_KCC][52] = 48,
+ [3][1][2][0][RTW89_FCC][67] = 32,
+ [3][1][2][0][RTW89_ETSI][67] = 127,
+ [3][1][2][0][RTW89_KCC][67] = 48,
+ [3][1][2][0][RTW89_FCC][82] = 32,
+ [3][1][2][0][RTW89_ETSI][82] = 127,
+ [3][1][2][0][RTW89_KCC][82] = 24,
+ [3][1][2][0][RTW89_FCC][97] = 14,
+ [3][1][2][0][RTW89_ETSI][97] = 127,
+ [3][1][2][0][RTW89_KCC][97] = 24,
[3][1][2][0][RTW89_FCC][112] = 127,
+ [3][1][2][0][RTW89_ETSI][112] = 127,
+ [3][1][2][0][RTW89_KCC][112] = 127,
[3][1][2][1][RTW89_FCC][7] = 32,
- [3][1][2][1][RTW89_FCC][22] = 32,
- [3][1][2][1][RTW89_FCC][37] = 32,
- [3][1][2][1][RTW89_FCC][52] = 32,
+ [3][1][2][1][RTW89_ETSI][7] = 38,
+ [3][1][2][1][RTW89_KCC][7] = 40,
+ [3][1][2][1][RTW89_FCC][22] = 30,
+ [3][1][2][1][RTW89_ETSI][22] = 38,
+ [3][1][2][1][RTW89_KCC][22] = 40,
+ [3][1][2][1][RTW89_FCC][37] = 30,
+ [3][1][2][1][RTW89_ETSI][37] = 38,
+ [3][1][2][1][RTW89_KCC][37] = 40,
+ [3][1][2][1][RTW89_FCC][52] = 30,
+ [3][1][2][1][RTW89_ETSI][52] = 127,
+ [3][1][2][1][RTW89_KCC][52] = 48,
[3][1][2][1][RTW89_FCC][67] = 32,
+ [3][1][2][1][RTW89_ETSI][67] = 127,
+ [3][1][2][1][RTW89_KCC][67] = 48,
[3][1][2][1][RTW89_FCC][82] = 32,
- [3][1][2][1][RTW89_FCC][97] = 32,
+ [3][1][2][1][RTW89_ETSI][82] = 127,
+ [3][1][2][1][RTW89_KCC][82] = 24,
+ [3][1][2][1][RTW89_FCC][97] = 14,
+ [3][1][2][1][RTW89_ETSI][97] = 127,
+ [3][1][2][1][RTW89_KCC][97] = 24,
[3][1][2][1][RTW89_FCC][112] = 127,
+ [3][1][2][1][RTW89_ETSI][112] = 127,
+ [3][1][2][1][RTW89_KCC][112] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
@@ -17126,8 +33220,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_WW][8] = 32,
[0][0][RTW89_WW][9] = 32,
[0][0][RTW89_WW][10] = 32,
- [0][0][RTW89_WW][11] = 32,
- [0][0][RTW89_WW][12] = 24,
+ [0][0][RTW89_WW][11] = 26,
+ [0][0][RTW89_WW][12] = -20,
[0][0][RTW89_WW][13] = 0,
[0][1][RTW89_WW][0] = 20,
[0][1][RTW89_WW][1] = 22,
@@ -17141,7 +33235,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][9] = 22,
[0][1][RTW89_WW][10] = 22,
[0][1][RTW89_WW][11] = 22,
- [0][1][RTW89_WW][12] = 20,
+ [0][1][RTW89_WW][12] = -30,
[0][1][RTW89_WW][13] = 0,
[1][0][RTW89_WW][0] = 42,
[1][0][RTW89_WW][1] = 44,
@@ -17154,8 +33248,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_WW][8] = 44,
[1][0][RTW89_WW][9] = 44,
[1][0][RTW89_WW][10] = 44,
- [1][0][RTW89_WW][11] = 42,
- [1][0][RTW89_WW][12] = 30,
+ [1][0][RTW89_WW][11] = 36,
+ [1][0][RTW89_WW][12] = 4,
[1][0][RTW89_WW][13] = 0,
[1][1][RTW89_WW][0] = 32,
[1][1][RTW89_WW][1] = 32,
@@ -17169,7 +33263,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][9] = 32,
[1][1][RTW89_WW][10] = 32,
[1][1][RTW89_WW][11] = 30,
- [1][1][RTW89_WW][12] = 24,
+ [1][1][RTW89_WW][12] = -6,
[1][1][RTW89_WW][13] = 0,
[2][0][RTW89_WW][0] = 56,
[2][0][RTW89_WW][1] = 56,
@@ -17182,8 +33276,8 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_WW][8] = 56,
[2][0][RTW89_WW][9] = 56,
[2][0][RTW89_WW][10] = 56,
- [2][0][RTW89_WW][11] = 42,
- [2][0][RTW89_WW][12] = 38,
+ [2][0][RTW89_WW][11] = 48,
+ [2][0][RTW89_WW][12] = 16,
[2][0][RTW89_WW][13] = 0,
[2][1][RTW89_WW][0] = 44,
[2][1][RTW89_WW][1] = 44,
@@ -17196,2213 +33290,3353 @@ const s8 rtw89_8852c_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][8] = 44,
[2][1][RTW89_WW][9] = 44,
[2][1][RTW89_WW][10] = 44,
- [2][1][RTW89_WW][11] = 30,
- [2][1][RTW89_WW][12] = 26,
+ [2][1][RTW89_WW][11] = 44,
+ [2][1][RTW89_WW][12] = 6,
[2][1][RTW89_WW][13] = 0,
[0][0][RTW89_FCC][0] = 60,
[0][0][RTW89_ETSI][0] = 34,
[0][0][RTW89_MKK][0] = 36,
- [0][0][RTW89_IC][0] = 68,
- [0][0][RTW89_ACMA][0] = 32,
+ [0][0][RTW89_IC][0] = 60,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 34,
+ [0][0][RTW89_CN][0] = 32,
+ [0][0][RTW89_UK][0] = 34,
[0][0][RTW89_FCC][1] = 60,
[0][0][RTW89_ETSI][1] = 38,
[0][0][RTW89_MKK][1] = 40,
- [0][0][RTW89_IC][1] = 68,
- [0][0][RTW89_ACMA][1] = 32,
+ [0][0][RTW89_IC][1] = 60,
+ [0][0][RTW89_KCC][1] = 42,
+ [0][0][RTW89_ACMA][1] = 38,
+ [0][0][RTW89_CN][1] = 32,
+ [0][0][RTW89_UK][1] = 38,
[0][0][RTW89_FCC][2] = 64,
[0][0][RTW89_ETSI][2] = 38,
[0][0][RTW89_MKK][2] = 40,
- [0][0][RTW89_IC][2] = 72,
- [0][0][RTW89_ACMA][2] = 32,
+ [0][0][RTW89_IC][2] = 64,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 38,
+ [0][0][RTW89_CN][2] = 32,
+ [0][0][RTW89_UK][2] = 38,
[0][0][RTW89_FCC][3] = 68,
[0][0][RTW89_ETSI][3] = 38,
[0][0][RTW89_MKK][3] = 40,
- [0][0][RTW89_IC][3] = 76,
- [0][0][RTW89_ACMA][3] = 32,
+ [0][0][RTW89_IC][3] = 68,
+ [0][0][RTW89_KCC][3] = 42,
+ [0][0][RTW89_ACMA][3] = 38,
+ [0][0][RTW89_CN][3] = 32,
+ [0][0][RTW89_UK][3] = 38,
[0][0][RTW89_FCC][4] = 68,
[0][0][RTW89_ETSI][4] = 38,
[0][0][RTW89_MKK][4] = 40,
- [0][0][RTW89_IC][4] = 76,
- [0][0][RTW89_ACMA][4] = 32,
- [0][0][RTW89_FCC][5] = 76,
+ [0][0][RTW89_IC][4] = 68,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 38,
+ [0][0][RTW89_CN][4] = 32,
+ [0][0][RTW89_UK][4] = 38,
+ [0][0][RTW89_FCC][5] = 78,
[0][0][RTW89_ETSI][5] = 38,
[0][0][RTW89_MKK][5] = 40,
- [0][0][RTW89_IC][5] = 84,
- [0][0][RTW89_ACMA][5] = 32,
- [0][0][RTW89_FCC][6] = 66,
+ [0][0][RTW89_IC][5] = 78,
+ [0][0][RTW89_KCC][5] = 42,
+ [0][0][RTW89_ACMA][5] = 38,
+ [0][0][RTW89_CN][5] = 32,
+ [0][0][RTW89_UK][5] = 38,
+ [0][0][RTW89_FCC][6] = 54,
[0][0][RTW89_ETSI][6] = 38,
[0][0][RTW89_MKK][6] = 40,
- [0][0][RTW89_IC][6] = 74,
- [0][0][RTW89_ACMA][6] = 32,
- [0][0][RTW89_FCC][7] = 66,
+ [0][0][RTW89_IC][6] = 54,
+ [0][0][RTW89_KCC][6] = 42,
+ [0][0][RTW89_ACMA][6] = 38,
+ [0][0][RTW89_CN][6] = 32,
+ [0][0][RTW89_UK][6] = 38,
+ [0][0][RTW89_FCC][7] = 54,
[0][0][RTW89_ETSI][7] = 38,
[0][0][RTW89_MKK][7] = 40,
- [0][0][RTW89_IC][7] = 74,
- [0][0][RTW89_ACMA][7] = 32,
- [0][0][RTW89_FCC][8] = 62,
+ [0][0][RTW89_IC][7] = 54,
+ [0][0][RTW89_KCC][7] = 42,
+ [0][0][RTW89_ACMA][7] = 38,
+ [0][0][RTW89_CN][7] = 32,
+ [0][0][RTW89_UK][7] = 38,
+ [0][0][RTW89_FCC][8] = 50,
[0][0][RTW89_ETSI][8] = 38,
[0][0][RTW89_MKK][8] = 40,
- [0][0][RTW89_IC][8] = 70,
- [0][0][RTW89_ACMA][8] = 32,
- [0][0][RTW89_FCC][9] = 58,
+ [0][0][RTW89_IC][8] = 50,
+ [0][0][RTW89_KCC][8] = 42,
+ [0][0][RTW89_ACMA][8] = 38,
+ [0][0][RTW89_CN][8] = 32,
+ [0][0][RTW89_UK][8] = 38,
+ [0][0][RTW89_FCC][9] = 46,
[0][0][RTW89_ETSI][9] = 38,
[0][0][RTW89_MKK][9] = 40,
- [0][0][RTW89_IC][9] = 66,
- [0][0][RTW89_ACMA][9] = 32,
- [0][0][RTW89_FCC][10] = 58,
+ [0][0][RTW89_IC][9] = 46,
+ [0][0][RTW89_KCC][9] = 40,
+ [0][0][RTW89_ACMA][9] = 38,
+ [0][0][RTW89_CN][9] = 32,
+ [0][0][RTW89_UK][9] = 38,
+ [0][0][RTW89_FCC][10] = 46,
[0][0][RTW89_ETSI][10] = 38,
[0][0][RTW89_MKK][10] = 40,
- [0][0][RTW89_IC][10] = 66,
- [0][0][RTW89_ACMA][10] = 32,
- [0][0][RTW89_FCC][11] = 42,
+ [0][0][RTW89_IC][10] = 46,
+ [0][0][RTW89_KCC][10] = 40,
+ [0][0][RTW89_ACMA][10] = 38,
+ [0][0][RTW89_CN][10] = 32,
+ [0][0][RTW89_UK][10] = 38,
+ [0][0][RTW89_FCC][11] = 26,
[0][0][RTW89_ETSI][11] = 38,
[0][0][RTW89_MKK][11] = 40,
- [0][0][RTW89_IC][11] = 56,
- [0][0][RTW89_ACMA][11] = 32,
- [0][0][RTW89_FCC][12] = 24,
+ [0][0][RTW89_IC][11] = 26,
+ [0][0][RTW89_KCC][11] = 40,
+ [0][0][RTW89_ACMA][11] = 38,
+ [0][0][RTW89_CN][11] = 32,
+ [0][0][RTW89_UK][11] = 38,
+ [0][0][RTW89_FCC][12] = -20,
[0][0][RTW89_ETSI][12] = 34,
[0][0][RTW89_MKK][12] = 36,
- [0][0][RTW89_IC][12] = 32,
- [0][0][RTW89_ACMA][12] = 32,
+ [0][0][RTW89_IC][12] = -20,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 34,
+ [0][0][RTW89_CN][12] = 32,
+ [0][0][RTW89_UK][12] = 34,
[0][0][RTW89_FCC][13] = 127,
[0][0][RTW89_ETSI][13] = 127,
[0][0][RTW89_MKK][13] = 127,
[0][0][RTW89_IC][13] = 127,
+ [0][0][RTW89_KCC][13] = 127,
[0][0][RTW89_ACMA][13] = 127,
- [0][1][RTW89_FCC][0] = 46,
+ [0][0][RTW89_CN][13] = 127,
+ [0][0][RTW89_UK][13] = 127,
+ [0][1][RTW89_FCC][0] = 56,
[0][1][RTW89_ETSI][0] = 22,
[0][1][RTW89_MKK][0] = 24,
- [0][1][RTW89_IC][0] = 62,
- [0][1][RTW89_ACMA][0] = 20,
- [0][1][RTW89_FCC][1] = 46,
+ [0][1][RTW89_IC][0] = 56,
+ [0][1][RTW89_KCC][0] = 30,
+ [0][1][RTW89_ACMA][0] = 22,
+ [0][1][RTW89_CN][0] = 20,
+ [0][1][RTW89_UK][0] = 22,
+ [0][1][RTW89_FCC][1] = 56,
[0][1][RTW89_ETSI][1] = 24,
[0][1][RTW89_MKK][1] = 30,
- [0][1][RTW89_IC][1] = 62,
- [0][1][RTW89_ACMA][1] = 22,
- [0][1][RTW89_FCC][2] = 50,
+ [0][1][RTW89_IC][1] = 56,
+ [0][1][RTW89_KCC][1] = 30,
+ [0][1][RTW89_ACMA][1] = 24,
+ [0][1][RTW89_CN][1] = 22,
+ [0][1][RTW89_UK][1] = 24,
+ [0][1][RTW89_FCC][2] = 60,
[0][1][RTW89_ETSI][2] = 24,
[0][1][RTW89_MKK][2] = 30,
- [0][1][RTW89_IC][2] = 66,
- [0][1][RTW89_ACMA][2] = 22,
- [0][1][RTW89_FCC][3] = 54,
+ [0][1][RTW89_IC][2] = 60,
+ [0][1][RTW89_KCC][2] = 30,
+ [0][1][RTW89_ACMA][2] = 24,
+ [0][1][RTW89_CN][2] = 22,
+ [0][1][RTW89_UK][2] = 24,
+ [0][1][RTW89_FCC][3] = 64,
[0][1][RTW89_ETSI][3] = 24,
[0][1][RTW89_MKK][3] = 30,
- [0][1][RTW89_IC][3] = 70,
- [0][1][RTW89_ACMA][3] = 22,
- [0][1][RTW89_FCC][4] = 58,
+ [0][1][RTW89_IC][3] = 64,
+ [0][1][RTW89_KCC][3] = 30,
+ [0][1][RTW89_ACMA][3] = 24,
+ [0][1][RTW89_CN][3] = 22,
+ [0][1][RTW89_UK][3] = 24,
+ [0][1][RTW89_FCC][4] = 68,
[0][1][RTW89_ETSI][4] = 24,
[0][1][RTW89_MKK][4] = 30,
- [0][1][RTW89_IC][4] = 74,
- [0][1][RTW89_ACMA][4] = 22,
- [0][1][RTW89_FCC][5] = 66,
+ [0][1][RTW89_IC][4] = 68,
+ [0][1][RTW89_KCC][4] = 28,
+ [0][1][RTW89_ACMA][4] = 24,
+ [0][1][RTW89_CN][4] = 22,
+ [0][1][RTW89_UK][4] = 24,
+ [0][1][RTW89_FCC][5] = 76,
[0][1][RTW89_ETSI][5] = 24,
[0][1][RTW89_MKK][5] = 30,
- [0][1][RTW89_IC][5] = 74,
- [0][1][RTW89_ACMA][5] = 22,
- [0][1][RTW89_FCC][6] = 58,
+ [0][1][RTW89_IC][5] = 76,
+ [0][1][RTW89_KCC][5] = 28,
+ [0][1][RTW89_ACMA][5] = 24,
+ [0][1][RTW89_CN][5] = 22,
+ [0][1][RTW89_UK][5] = 24,
+ [0][1][RTW89_FCC][6] = 54,
[0][1][RTW89_ETSI][6] = 24,
[0][1][RTW89_MKK][6] = 30,
- [0][1][RTW89_IC][6] = 72,
- [0][1][RTW89_ACMA][6] = 22,
- [0][1][RTW89_FCC][7] = 54,
+ [0][1][RTW89_IC][6] = 54,
+ [0][1][RTW89_KCC][6] = 28,
+ [0][1][RTW89_ACMA][6] = 24,
+ [0][1][RTW89_CN][6] = 22,
+ [0][1][RTW89_UK][6] = 24,
+ [0][1][RTW89_FCC][7] = 50,
[0][1][RTW89_ETSI][7] = 24,
[0][1][RTW89_MKK][7] = 30,
- [0][1][RTW89_IC][7] = 68,
- [0][1][RTW89_ACMA][7] = 22,
- [0][1][RTW89_FCC][8] = 50,
+ [0][1][RTW89_IC][7] = 50,
+ [0][1][RTW89_KCC][7] = 28,
+ [0][1][RTW89_ACMA][7] = 24,
+ [0][1][RTW89_CN][7] = 22,
+ [0][1][RTW89_UK][7] = 24,
+ [0][1][RTW89_FCC][8] = 46,
[0][1][RTW89_ETSI][8] = 24,
[0][1][RTW89_MKK][8] = 30,
- [0][1][RTW89_IC][8] = 64,
- [0][1][RTW89_ACMA][8] = 22,
- [0][1][RTW89_FCC][9] = 46,
+ [0][1][RTW89_IC][8] = 46,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 24,
+ [0][1][RTW89_CN][8] = 22,
+ [0][1][RTW89_UK][8] = 24,
+ [0][1][RTW89_FCC][9] = 42,
[0][1][RTW89_ETSI][9] = 24,
[0][1][RTW89_MKK][9] = 30,
- [0][1][RTW89_IC][9] = 60,
- [0][1][RTW89_ACMA][9] = 22,
- [0][1][RTW89_FCC][10] = 46,
+ [0][1][RTW89_IC][9] = 42,
+ [0][1][RTW89_KCC][9] = 28,
+ [0][1][RTW89_ACMA][9] = 24,
+ [0][1][RTW89_CN][9] = 22,
+ [0][1][RTW89_UK][9] = 24,
+ [0][1][RTW89_FCC][10] = 42,
[0][1][RTW89_ETSI][10] = 24,
[0][1][RTW89_MKK][10] = 30,
- [0][1][RTW89_IC][10] = 60,
- [0][1][RTW89_ACMA][10] = 22,
- [0][1][RTW89_FCC][11] = 30,
+ [0][1][RTW89_IC][10] = 42,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 24,
+ [0][1][RTW89_CN][10] = 22,
+ [0][1][RTW89_UK][10] = 24,
+ [0][1][RTW89_FCC][11] = 22,
[0][1][RTW89_ETSI][11] = 24,
[0][1][RTW89_MKK][11] = 30,
- [0][1][RTW89_IC][11] = 52,
- [0][1][RTW89_ACMA][11] = 22,
- [0][1][RTW89_FCC][12] = 22,
+ [0][1][RTW89_IC][11] = 22,
+ [0][1][RTW89_KCC][11] = 28,
+ [0][1][RTW89_ACMA][11] = 24,
+ [0][1][RTW89_CN][11] = 22,
+ [0][1][RTW89_UK][11] = 24,
+ [0][1][RTW89_FCC][12] = -30,
[0][1][RTW89_ETSI][12] = 20,
[0][1][RTW89_MKK][12] = 24,
- [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_IC][12] = -30,
+ [0][1][RTW89_KCC][12] = 28,
[0][1][RTW89_ACMA][12] = 20,
+ [0][1][RTW89_CN][12] = 20,
+ [0][1][RTW89_UK][12] = 20,
[0][1][RTW89_FCC][13] = 127,
[0][1][RTW89_ETSI][13] = 127,
[0][1][RTW89_MKK][13] = 127,
[0][1][RTW89_IC][13] = 127,
+ [0][1][RTW89_KCC][13] = 127,
[0][1][RTW89_ACMA][13] = 127,
- [1][0][RTW89_FCC][0] = 64,
+ [0][1][RTW89_CN][13] = 127,
+ [0][1][RTW89_UK][13] = 127,
+ [1][0][RTW89_FCC][0] = 66,
[1][0][RTW89_ETSI][0] = 46,
[1][0][RTW89_MKK][0] = 48,
- [1][0][RTW89_IC][0] = 78,
- [1][0][RTW89_ACMA][0] = 42,
- [1][0][RTW89_FCC][1] = 64,
+ [1][0][RTW89_IC][0] = 66,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 46,
+ [1][0][RTW89_CN][0] = 42,
+ [1][0][RTW89_UK][0] = 46,
+ [1][0][RTW89_FCC][1] = 66,
[1][0][RTW89_ETSI][1] = 46,
[1][0][RTW89_MKK][1] = 48,
- [1][0][RTW89_IC][1] = 78,
- [1][0][RTW89_ACMA][1] = 44,
- [1][0][RTW89_FCC][2] = 68,
+ [1][0][RTW89_IC][1] = 66,
+ [1][0][RTW89_KCC][1] = 50,
+ [1][0][RTW89_ACMA][1] = 46,
+ [1][0][RTW89_CN][1] = 44,
+ [1][0][RTW89_UK][1] = 46,
+ [1][0][RTW89_FCC][2] = 70,
[1][0][RTW89_ETSI][2] = 46,
[1][0][RTW89_MKK][2] = 48,
- [1][0][RTW89_IC][2] = 82,
- [1][0][RTW89_ACMA][2] = 44,
- [1][0][RTW89_FCC][3] = 70,
+ [1][0][RTW89_IC][2] = 70,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 46,
+ [1][0][RTW89_CN][2] = 44,
+ [1][0][RTW89_UK][2] = 46,
+ [1][0][RTW89_FCC][3] = 72,
[1][0][RTW89_ETSI][3] = 46,
[1][0][RTW89_MKK][3] = 48,
- [1][0][RTW89_IC][3] = 84,
- [1][0][RTW89_ACMA][3] = 44,
- [1][0][RTW89_FCC][4] = 70,
+ [1][0][RTW89_IC][3] = 72,
+ [1][0][RTW89_KCC][3] = 50,
+ [1][0][RTW89_ACMA][3] = 46,
+ [1][0][RTW89_CN][3] = 44,
+ [1][0][RTW89_UK][3] = 46,
+ [1][0][RTW89_FCC][4] = 72,
[1][0][RTW89_ETSI][4] = 46,
[1][0][RTW89_MKK][4] = 48,
- [1][0][RTW89_IC][4] = 84,
- [1][0][RTW89_ACMA][4] = 44,
- [1][0][RTW89_FCC][5] = 76,
+ [1][0][RTW89_IC][4] = 72,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 46,
+ [1][0][RTW89_CN][4] = 44,
+ [1][0][RTW89_UK][4] = 46,
+ [1][0][RTW89_FCC][5] = 82,
[1][0][RTW89_ETSI][5] = 46,
[1][0][RTW89_MKK][5] = 48,
- [1][0][RTW89_IC][5] = 84,
- [1][0][RTW89_ACMA][5] = 44,
- [1][0][RTW89_FCC][6] = 64,
+ [1][0][RTW89_IC][5] = 82,
+ [1][0][RTW89_KCC][5] = 50,
+ [1][0][RTW89_ACMA][5] = 46,
+ [1][0][RTW89_CN][5] = 44,
+ [1][0][RTW89_UK][5] = 46,
+ [1][0][RTW89_FCC][6] = 58,
[1][0][RTW89_ETSI][6] = 44,
[1][0][RTW89_MKK][6] = 48,
- [1][0][RTW89_IC][6] = 78,
+ [1][0][RTW89_IC][6] = 58,
+ [1][0][RTW89_KCC][6] = 50,
[1][0][RTW89_ACMA][6] = 44,
- [1][0][RTW89_FCC][7] = 64,
+ [1][0][RTW89_CN][6] = 44,
+ [1][0][RTW89_UK][6] = 44,
+ [1][0][RTW89_FCC][7] = 58,
[1][0][RTW89_ETSI][7] = 46,
[1][0][RTW89_MKK][7] = 48,
- [1][0][RTW89_IC][7] = 78,
- [1][0][RTW89_ACMA][7] = 44,
- [1][0][RTW89_FCC][8] = 64,
+ [1][0][RTW89_IC][7] = 58,
+ [1][0][RTW89_KCC][7] = 50,
+ [1][0][RTW89_ACMA][7] = 46,
+ [1][0][RTW89_CN][7] = 44,
+ [1][0][RTW89_UK][7] = 46,
+ [1][0][RTW89_FCC][8] = 58,
[1][0][RTW89_ETSI][8] = 46,
[1][0][RTW89_MKK][8] = 48,
- [1][0][RTW89_IC][8] = 78,
- [1][0][RTW89_ACMA][8] = 44,
- [1][0][RTW89_FCC][9] = 60,
+ [1][0][RTW89_IC][8] = 58,
+ [1][0][RTW89_KCC][8] = 50,
+ [1][0][RTW89_ACMA][8] = 46,
+ [1][0][RTW89_CN][8] = 44,
+ [1][0][RTW89_UK][8] = 46,
+ [1][0][RTW89_FCC][9] = 54,
[1][0][RTW89_ETSI][9] = 46,
[1][0][RTW89_MKK][9] = 48,
- [1][0][RTW89_IC][9] = 74,
- [1][0][RTW89_ACMA][9] = 44,
- [1][0][RTW89_FCC][10] = 60,
+ [1][0][RTW89_IC][9] = 54,
+ [1][0][RTW89_KCC][9] = 50,
+ [1][0][RTW89_ACMA][9] = 46,
+ [1][0][RTW89_CN][9] = 44,
+ [1][0][RTW89_UK][9] = 46,
+ [1][0][RTW89_FCC][10] = 54,
[1][0][RTW89_ETSI][10] = 46,
[1][0][RTW89_MKK][10] = 48,
- [1][0][RTW89_IC][10] = 74,
- [1][0][RTW89_ACMA][10] = 44,
- [1][0][RTW89_FCC][11] = 42,
+ [1][0][RTW89_IC][10] = 54,
+ [1][0][RTW89_KCC][10] = 50,
+ [1][0][RTW89_ACMA][10] = 46,
+ [1][0][RTW89_CN][10] = 44,
+ [1][0][RTW89_UK][10] = 46,
+ [1][0][RTW89_FCC][11] = 36,
[1][0][RTW89_ETSI][11] = 46,
[1][0][RTW89_MKK][11] = 48,
- [1][0][RTW89_IC][11] = 72,
- [1][0][RTW89_ACMA][11] = 44,
- [1][0][RTW89_FCC][12] = 30,
+ [1][0][RTW89_IC][11] = 36,
+ [1][0][RTW89_KCC][11] = 50,
+ [1][0][RTW89_ACMA][11] = 46,
+ [1][0][RTW89_CN][11] = 44,
+ [1][0][RTW89_UK][11] = 46,
+ [1][0][RTW89_FCC][12] = 4,
[1][0][RTW89_ETSI][12] = 46,
[1][0][RTW89_MKK][12] = 46,
- [1][0][RTW89_IC][12] = 38,
- [1][0][RTW89_ACMA][12] = 42,
+ [1][0][RTW89_IC][12] = 4,
+ [1][0][RTW89_KCC][12] = 50,
+ [1][0][RTW89_ACMA][12] = 46,
+ [1][0][RTW89_CN][12] = 42,
+ [1][0][RTW89_UK][12] = 46,
[1][0][RTW89_FCC][13] = 127,
[1][0][RTW89_ETSI][13] = 127,
[1][0][RTW89_MKK][13] = 127,
[1][0][RTW89_IC][13] = 127,
+ [1][0][RTW89_KCC][13] = 127,
[1][0][RTW89_ACMA][13] = 127,
- [1][1][RTW89_FCC][0] = 46,
+ [1][0][RTW89_CN][13] = 127,
+ [1][0][RTW89_UK][13] = 127,
+ [1][1][RTW89_FCC][0] = 58,
[1][1][RTW89_ETSI][0] = 32,
[1][1][RTW89_MKK][0] = 34,
- [1][1][RTW89_IC][0] = 66,
+ [1][1][RTW89_IC][0] = 58,
+ [1][1][RTW89_KCC][0] = 38,
[1][1][RTW89_ACMA][0] = 32,
- [1][1][RTW89_FCC][1] = 46,
+ [1][1][RTW89_CN][0] = 32,
+ [1][1][RTW89_UK][0] = 32,
+ [1][1][RTW89_FCC][1] = 58,
[1][1][RTW89_ETSI][1] = 34,
[1][1][RTW89_MKK][1] = 34,
- [1][1][RTW89_IC][1] = 66,
- [1][1][RTW89_ACMA][1] = 32,
- [1][1][RTW89_FCC][2] = 50,
+ [1][1][RTW89_IC][1] = 58,
+ [1][1][RTW89_KCC][1] = 38,
+ [1][1][RTW89_ACMA][1] = 34,
+ [1][1][RTW89_CN][1] = 32,
+ [1][1][RTW89_UK][1] = 34,
+ [1][1][RTW89_FCC][2] = 62,
[1][1][RTW89_ETSI][2] = 34,
[1][1][RTW89_MKK][2] = 34,
- [1][1][RTW89_IC][2] = 70,
- [1][1][RTW89_ACMA][2] = 32,
- [1][1][RTW89_FCC][3] = 54,
+ [1][1][RTW89_IC][2] = 62,
+ [1][1][RTW89_KCC][2] = 38,
+ [1][1][RTW89_ACMA][2] = 34,
+ [1][1][RTW89_CN][2] = 32,
+ [1][1][RTW89_UK][2] = 34,
+ [1][1][RTW89_FCC][3] = 66,
[1][1][RTW89_ETSI][3] = 34,
[1][1][RTW89_MKK][3] = 34,
- [1][1][RTW89_IC][3] = 74,
- [1][1][RTW89_ACMA][3] = 32,
- [1][1][RTW89_FCC][4] = 58,
+ [1][1][RTW89_IC][3] = 66,
+ [1][1][RTW89_KCC][3] = 38,
+ [1][1][RTW89_ACMA][3] = 34,
+ [1][1][RTW89_CN][3] = 32,
+ [1][1][RTW89_UK][3] = 34,
+ [1][1][RTW89_FCC][4] = 70,
[1][1][RTW89_ETSI][4] = 34,
[1][1][RTW89_MKK][4] = 34,
- [1][1][RTW89_IC][4] = 74,
- [1][1][RTW89_ACMA][4] = 32,
- [1][1][RTW89_FCC][5] = 66,
+ [1][1][RTW89_IC][4] = 70,
+ [1][1][RTW89_KCC][4] = 38,
+ [1][1][RTW89_ACMA][4] = 34,
+ [1][1][RTW89_CN][4] = 32,
+ [1][1][RTW89_UK][4] = 34,
+ [1][1][RTW89_FCC][5] = 82,
[1][1][RTW89_ETSI][5] = 34,
[1][1][RTW89_MKK][5] = 34,
- [1][1][RTW89_IC][5] = 74,
- [1][1][RTW89_ACMA][5] = 32,
- [1][1][RTW89_FCC][6] = 58,
+ [1][1][RTW89_IC][5] = 82,
+ [1][1][RTW89_KCC][5] = 38,
+ [1][1][RTW89_ACMA][5] = 34,
+ [1][1][RTW89_CN][5] = 32,
+ [1][1][RTW89_UK][5] = 34,
+ [1][1][RTW89_FCC][6] = 60,
[1][1][RTW89_ETSI][6] = 34,
[1][1][RTW89_MKK][6] = 34,
- [1][1][RTW89_IC][6] = 74,
- [1][1][RTW89_ACMA][6] = 32,
- [1][1][RTW89_FCC][7] = 54,
+ [1][1][RTW89_IC][6] = 60,
+ [1][1][RTW89_KCC][6] = 38,
+ [1][1][RTW89_ACMA][6] = 34,
+ [1][1][RTW89_CN][6] = 32,
+ [1][1][RTW89_UK][6] = 34,
+ [1][1][RTW89_FCC][7] = 56,
[1][1][RTW89_ETSI][7] = 34,
[1][1][RTW89_MKK][7] = 34,
- [1][1][RTW89_IC][7] = 74,
- [1][1][RTW89_ACMA][7] = 32,
- [1][1][RTW89_FCC][8] = 50,
+ [1][1][RTW89_IC][7] = 56,
+ [1][1][RTW89_KCC][7] = 38,
+ [1][1][RTW89_ACMA][7] = 34,
+ [1][1][RTW89_CN][7] = 32,
+ [1][1][RTW89_UK][7] = 34,
+ [1][1][RTW89_FCC][8] = 52,
[1][1][RTW89_ETSI][8] = 34,
[1][1][RTW89_MKK][8] = 34,
- [1][1][RTW89_IC][8] = 70,
- [1][1][RTW89_ACMA][8] = 32,
- [1][1][RTW89_FCC][9] = 46,
+ [1][1][RTW89_IC][8] = 52,
+ [1][1][RTW89_KCC][8] = 38,
+ [1][1][RTW89_ACMA][8] = 34,
+ [1][1][RTW89_CN][8] = 32,
+ [1][1][RTW89_UK][8] = 34,
+ [1][1][RTW89_FCC][9] = 48,
[1][1][RTW89_ETSI][9] = 34,
[1][1][RTW89_MKK][9] = 34,
- [1][1][RTW89_IC][9] = 66,
- [1][1][RTW89_ACMA][9] = 32,
- [1][1][RTW89_FCC][10] = 46,
+ [1][1][RTW89_IC][9] = 48,
+ [1][1][RTW89_KCC][9] = 38,
+ [1][1][RTW89_ACMA][9] = 34,
+ [1][1][RTW89_CN][9] = 32,
+ [1][1][RTW89_UK][9] = 34,
+ [1][1][RTW89_FCC][10] = 48,
[1][1][RTW89_ETSI][10] = 34,
[1][1][RTW89_MKK][10] = 34,
- [1][1][RTW89_IC][10] = 66,
- [1][1][RTW89_ACMA][10] = 32,
+ [1][1][RTW89_IC][10] = 48,
+ [1][1][RTW89_KCC][10] = 38,
+ [1][1][RTW89_ACMA][10] = 34,
+ [1][1][RTW89_CN][10] = 32,
+ [1][1][RTW89_UK][10] = 34,
[1][1][RTW89_FCC][11] = 30,
[1][1][RTW89_ETSI][11] = 34,
[1][1][RTW89_MKK][11] = 34,
- [1][1][RTW89_IC][11] = 48,
- [1][1][RTW89_ACMA][11] = 32,
- [1][1][RTW89_FCC][12] = 24,
+ [1][1][RTW89_IC][11] = 30,
+ [1][1][RTW89_KCC][11] = 38,
+ [1][1][RTW89_ACMA][11] = 34,
+ [1][1][RTW89_CN][11] = 32,
+ [1][1][RTW89_UK][11] = 34,
+ [1][1][RTW89_FCC][12] = -6,
[1][1][RTW89_ETSI][12] = 34,
[1][1][RTW89_MKK][12] = 34,
- [1][1][RTW89_IC][12] = 32,
- [1][1][RTW89_ACMA][12] = 32,
+ [1][1][RTW89_IC][12] = -6,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 34,
+ [1][1][RTW89_CN][12] = 32,
+ [1][1][RTW89_UK][12] = 34,
[1][1][RTW89_FCC][13] = 127,
[1][1][RTW89_ETSI][13] = 127,
[1][1][RTW89_MKK][13] = 127,
[1][1][RTW89_IC][13] = 127,
+ [1][1][RTW89_KCC][13] = 127,
[1][1][RTW89_ACMA][13] = 127,
- [2][0][RTW89_FCC][0] = 64,
+ [1][1][RTW89_CN][13] = 127,
+ [1][1][RTW89_UK][13] = 127,
+ [2][0][RTW89_FCC][0] = 70,
[2][0][RTW89_ETSI][0] = 58,
[2][0][RTW89_MKK][0] = 58,
- [2][0][RTW89_IC][0] = 78,
- [2][0][RTW89_ACMA][0] = 56,
- [2][0][RTW89_FCC][1] = 64,
+ [2][0][RTW89_IC][0] = 70,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 58,
+ [2][0][RTW89_CN][0] = 56,
+ [2][0][RTW89_UK][0] = 58,
+ [2][0][RTW89_FCC][1] = 70,
[2][0][RTW89_ETSI][1] = 58,
[2][0][RTW89_MKK][1] = 58,
- [2][0][RTW89_IC][1] = 78,
- [2][0][RTW89_ACMA][1] = 56,
- [2][0][RTW89_FCC][2] = 66,
+ [2][0][RTW89_IC][1] = 70,
+ [2][0][RTW89_KCC][1] = 64,
+ [2][0][RTW89_ACMA][1] = 58,
+ [2][0][RTW89_CN][1] = 56,
+ [2][0][RTW89_UK][1] = 58,
+ [2][0][RTW89_FCC][2] = 72,
[2][0][RTW89_ETSI][2] = 58,
[2][0][RTW89_MKK][2] = 58,
- [2][0][RTW89_IC][2] = 80,
- [2][0][RTW89_ACMA][2] = 56,
- [2][0][RTW89_FCC][3] = 66,
+ [2][0][RTW89_IC][2] = 72,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 58,
+ [2][0][RTW89_CN][2] = 56,
+ [2][0][RTW89_UK][2] = 58,
+ [2][0][RTW89_FCC][3] = 72,
[2][0][RTW89_ETSI][3] = 58,
[2][0][RTW89_MKK][3] = 58,
- [2][0][RTW89_IC][3] = 80,
- [2][0][RTW89_ACMA][3] = 56,
- [2][0][RTW89_FCC][4] = 66,
+ [2][0][RTW89_IC][3] = 72,
+ [2][0][RTW89_KCC][3] = 64,
+ [2][0][RTW89_ACMA][3] = 58,
+ [2][0][RTW89_CN][3] = 56,
+ [2][0][RTW89_UK][3] = 58,
+ [2][0][RTW89_FCC][4] = 72,
[2][0][RTW89_ETSI][4] = 58,
[2][0][RTW89_MKK][4] = 58,
- [2][0][RTW89_IC][4] = 80,
- [2][0][RTW89_ACMA][4] = 56,
- [2][0][RTW89_FCC][5] = 76,
+ [2][0][RTW89_IC][4] = 72,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 58,
+ [2][0][RTW89_CN][4] = 56,
+ [2][0][RTW89_UK][4] = 58,
+ [2][0][RTW89_FCC][5] = 82,
[2][0][RTW89_ETSI][5] = 58,
[2][0][RTW89_MKK][5] = 58,
- [2][0][RTW89_IC][5] = 84,
- [2][0][RTW89_ACMA][5] = 56,
- [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_IC][5] = 82,
+ [2][0][RTW89_KCC][5] = 64,
+ [2][0][RTW89_ACMA][5] = 58,
+ [2][0][RTW89_CN][5] = 56,
+ [2][0][RTW89_UK][5] = 58,
+ [2][0][RTW89_FCC][6] = 66,
[2][0][RTW89_ETSI][6] = 56,
[2][0][RTW89_MKK][6] = 58,
- [2][0][RTW89_IC][6] = 76,
+ [2][0][RTW89_IC][6] = 66,
+ [2][0][RTW89_KCC][6] = 64,
[2][0][RTW89_ACMA][6] = 56,
- [2][0][RTW89_FCC][7] = 62,
+ [2][0][RTW89_CN][6] = 56,
+ [2][0][RTW89_UK][6] = 56,
+ [2][0][RTW89_FCC][7] = 66,
[2][0][RTW89_ETSI][7] = 58,
[2][0][RTW89_MKK][7] = 58,
- [2][0][RTW89_IC][7] = 76,
- [2][0][RTW89_ACMA][7] = 56,
- [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_IC][7] = 66,
+ [2][0][RTW89_KCC][7] = 64,
+ [2][0][RTW89_ACMA][7] = 58,
+ [2][0][RTW89_CN][7] = 56,
+ [2][0][RTW89_UK][7] = 58,
+ [2][0][RTW89_FCC][8] = 66,
[2][0][RTW89_ETSI][8] = 58,
[2][0][RTW89_MKK][8] = 58,
- [2][0][RTW89_IC][8] = 76,
- [2][0][RTW89_ACMA][8] = 56,
- [2][0][RTW89_FCC][9] = 60,
+ [2][0][RTW89_IC][8] = 66,
+ [2][0][RTW89_KCC][8] = 64,
+ [2][0][RTW89_ACMA][8] = 58,
+ [2][0][RTW89_CN][8] = 56,
+ [2][0][RTW89_UK][8] = 58,
+ [2][0][RTW89_FCC][9] = 64,
[2][0][RTW89_ETSI][9] = 58,
[2][0][RTW89_MKK][9] = 58,
- [2][0][RTW89_IC][9] = 74,
- [2][0][RTW89_ACMA][9] = 56,
- [2][0][RTW89_FCC][10] = 60,
+ [2][0][RTW89_IC][9] = 64,
+ [2][0][RTW89_KCC][9] = 64,
+ [2][0][RTW89_ACMA][9] = 58,
+ [2][0][RTW89_CN][9] = 56,
+ [2][0][RTW89_UK][9] = 58,
+ [2][0][RTW89_FCC][10] = 64,
[2][0][RTW89_ETSI][10] = 58,
[2][0][RTW89_MKK][10] = 58,
- [2][0][RTW89_IC][10] = 74,
- [2][0][RTW89_ACMA][10] = 56,
- [2][0][RTW89_FCC][11] = 42,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 64,
+ [2][0][RTW89_ACMA][10] = 58,
+ [2][0][RTW89_CN][10] = 56,
+ [2][0][RTW89_UK][10] = 58,
+ [2][0][RTW89_FCC][11] = 48,
[2][0][RTW89_ETSI][11] = 58,
[2][0][RTW89_MKK][11] = 58,
- [2][0][RTW89_IC][11] = 66,
- [2][0][RTW89_ACMA][11] = 56,
- [2][0][RTW89_FCC][12] = 38,
+ [2][0][RTW89_IC][11] = 48,
+ [2][0][RTW89_KCC][11] = 64,
+ [2][0][RTW89_ACMA][11] = 58,
+ [2][0][RTW89_CN][11] = 56,
+ [2][0][RTW89_UK][11] = 58,
+ [2][0][RTW89_FCC][12] = 16,
[2][0][RTW89_ETSI][12] = 58,
[2][0][RTW89_MKK][12] = 58,
- [2][0][RTW89_IC][12] = 56,
- [2][0][RTW89_ACMA][12] = 56,
+ [2][0][RTW89_IC][12] = 16,
+ [2][0][RTW89_KCC][12] = 64,
+ [2][0][RTW89_ACMA][12] = 58,
+ [2][0][RTW89_CN][12] = 56,
+ [2][0][RTW89_UK][12] = 58,
[2][0][RTW89_FCC][13] = 127,
[2][0][RTW89_ETSI][13] = 127,
[2][0][RTW89_MKK][13] = 127,
[2][0][RTW89_IC][13] = 127,
+ [2][0][RTW89_KCC][13] = 127,
[2][0][RTW89_ACMA][13] = 127,
- [2][1][RTW89_FCC][0] = 46,
+ [2][0][RTW89_CN][13] = 127,
+ [2][0][RTW89_UK][13] = 127,
+ [2][1][RTW89_FCC][0] = 64,
[2][1][RTW89_ETSI][0] = 46,
[2][1][RTW89_MKK][0] = 46,
- [2][1][RTW89_IC][0] = 70,
- [2][1][RTW89_ACMA][0] = 44,
- [2][1][RTW89_FCC][1] = 46,
+ [2][1][RTW89_IC][0] = 64,
+ [2][1][RTW89_KCC][0] = 52,
+ [2][1][RTW89_ACMA][0] = 46,
+ [2][1][RTW89_CN][0] = 44,
+ [2][1][RTW89_UK][0] = 46,
+ [2][1][RTW89_FCC][1] = 64,
[2][1][RTW89_ETSI][1] = 46,
[2][1][RTW89_MKK][1] = 46,
- [2][1][RTW89_IC][1] = 70,
- [2][1][RTW89_ACMA][1] = 44,
- [2][1][RTW89_FCC][2] = 50,
+ [2][1][RTW89_IC][1] = 64,
+ [2][1][RTW89_KCC][1] = 52,
+ [2][1][RTW89_ACMA][1] = 46,
+ [2][1][RTW89_CN][1] = 44,
+ [2][1][RTW89_UK][1] = 46,
+ [2][1][RTW89_FCC][2] = 68,
[2][1][RTW89_ETSI][2] = 46,
[2][1][RTW89_MKK][2] = 46,
- [2][1][RTW89_IC][2] = 74,
- [2][1][RTW89_ACMA][2] = 44,
- [2][1][RTW89_FCC][3] = 54,
+ [2][1][RTW89_IC][2] = 68,
+ [2][1][RTW89_KCC][2] = 52,
+ [2][1][RTW89_ACMA][2] = 46,
+ [2][1][RTW89_CN][2] = 44,
+ [2][1][RTW89_UK][2] = 46,
+ [2][1][RTW89_FCC][3] = 72,
[2][1][RTW89_ETSI][3] = 46,
[2][1][RTW89_MKK][3] = 46,
- [2][1][RTW89_IC][3] = 78,
- [2][1][RTW89_ACMA][3] = 44,
- [2][1][RTW89_FCC][4] = 56,
+ [2][1][RTW89_IC][3] = 72,
+ [2][1][RTW89_KCC][3] = 52,
+ [2][1][RTW89_ACMA][3] = 46,
+ [2][1][RTW89_CN][3] = 44,
+ [2][1][RTW89_UK][3] = 46,
+ [2][1][RTW89_FCC][4] = 74,
[2][1][RTW89_ETSI][4] = 46,
[2][1][RTW89_MKK][4] = 46,
- [2][1][RTW89_IC][4] = 80,
- [2][1][RTW89_ACMA][4] = 44,
- [2][1][RTW89_FCC][5] = 72,
+ [2][1][RTW89_IC][4] = 74,
+ [2][1][RTW89_KCC][4] = 50,
+ [2][1][RTW89_ACMA][4] = 46,
+ [2][1][RTW89_CN][4] = 44,
+ [2][1][RTW89_UK][4] = 46,
+ [2][1][RTW89_FCC][5] = 82,
[2][1][RTW89_ETSI][5] = 46,
[2][1][RTW89_MKK][5] = 46,
- [2][1][RTW89_IC][5] = 80,
- [2][1][RTW89_ACMA][5] = 44,
- [2][1][RTW89_FCC][6] = 54,
+ [2][1][RTW89_IC][5] = 82,
+ [2][1][RTW89_KCC][5] = 50,
+ [2][1][RTW89_ACMA][5] = 46,
+ [2][1][RTW89_CN][5] = 44,
+ [2][1][RTW89_UK][5] = 46,
+ [2][1][RTW89_FCC][6] = 72,
[2][1][RTW89_ETSI][6] = 44,
[2][1][RTW89_MKK][6] = 46,
- [2][1][RTW89_IC][6] = 78,
+ [2][1][RTW89_IC][6] = 72,
+ [2][1][RTW89_KCC][6] = 50,
[2][1][RTW89_ACMA][6] = 44,
- [2][1][RTW89_FCC][7] = 54,
+ [2][1][RTW89_CN][6] = 44,
+ [2][1][RTW89_UK][6] = 44,
+ [2][1][RTW89_FCC][7] = 72,
[2][1][RTW89_ETSI][7] = 46,
[2][1][RTW89_MKK][7] = 46,
- [2][1][RTW89_IC][7] = 78,
- [2][1][RTW89_ACMA][7] = 44,
- [2][1][RTW89_FCC][8] = 50,
+ [2][1][RTW89_IC][7] = 72,
+ [2][1][RTW89_KCC][7] = 50,
+ [2][1][RTW89_ACMA][7] = 46,
+ [2][1][RTW89_CN][7] = 44,
+ [2][1][RTW89_UK][7] = 46,
+ [2][1][RTW89_FCC][8] = 68,
[2][1][RTW89_ETSI][8] = 46,
[2][1][RTW89_MKK][8] = 46,
- [2][1][RTW89_IC][8] = 74,
- [2][1][RTW89_ACMA][8] = 44,
- [2][1][RTW89_FCC][9] = 46,
+ [2][1][RTW89_IC][8] = 68,
+ [2][1][RTW89_KCC][8] = 50,
+ [2][1][RTW89_ACMA][8] = 46,
+ [2][1][RTW89_CN][8] = 44,
+ [2][1][RTW89_UK][8] = 46,
+ [2][1][RTW89_FCC][9] = 64,
[2][1][RTW89_ETSI][9] = 46,
[2][1][RTW89_MKK][9] = 46,
- [2][1][RTW89_IC][9] = 70,
- [2][1][RTW89_ACMA][9] = 44,
- [2][1][RTW89_FCC][10] = 46,
+ [2][1][RTW89_IC][9] = 64,
+ [2][1][RTW89_KCC][9] = 52,
+ [2][1][RTW89_ACMA][9] = 46,
+ [2][1][RTW89_CN][9] = 44,
+ [2][1][RTW89_UK][9] = 46,
+ [2][1][RTW89_FCC][10] = 64,
[2][1][RTW89_ETSI][10] = 46,
[2][1][RTW89_MKK][10] = 46,
- [2][1][RTW89_IC][10] = 70,
- [2][1][RTW89_ACMA][10] = 44,
- [2][1][RTW89_FCC][11] = 30,
+ [2][1][RTW89_IC][10] = 64,
+ [2][1][RTW89_KCC][10] = 52,
+ [2][1][RTW89_ACMA][10] = 46,
+ [2][1][RTW89_CN][10] = 44,
+ [2][1][RTW89_UK][10] = 46,
+ [2][1][RTW89_FCC][11] = 46,
[2][1][RTW89_ETSI][11] = 46,
[2][1][RTW89_MKK][11] = 46,
- [2][1][RTW89_IC][11] = 60,
- [2][1][RTW89_ACMA][11] = 44,
- [2][1][RTW89_FCC][12] = 26,
+ [2][1][RTW89_IC][11] = 46,
+ [2][1][RTW89_KCC][11] = 52,
+ [2][1][RTW89_ACMA][11] = 46,
+ [2][1][RTW89_CN][11] = 44,
+ [2][1][RTW89_UK][11] = 46,
+ [2][1][RTW89_FCC][12] = 6,
[2][1][RTW89_ETSI][12] = 44,
[2][1][RTW89_MKK][12] = 46,
- [2][1][RTW89_IC][12] = 44,
- [2][1][RTW89_ACMA][12] = 42,
+ [2][1][RTW89_IC][12] = 6,
+ [2][1][RTW89_KCC][12] = 52,
+ [2][1][RTW89_ACMA][12] = 44,
+ [2][1][RTW89_CN][12] = 42,
+ [2][1][RTW89_UK][12] = 44,
[2][1][RTW89_FCC][13] = 127,
[2][1][RTW89_ETSI][13] = 127,
[2][1][RTW89_MKK][13] = 127,
[2][1][RTW89_IC][13] = 127,
+ [2][1][RTW89_KCC][13] = 127,
[2][1][RTW89_ACMA][13] = 127,
+ [2][1][RTW89_CN][13] = 127,
+ [2][1][RTW89_UK][13] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
- [0][0][RTW89_WW][0] = 24,
- [0][0][RTW89_WW][2] = 24,
- [0][0][RTW89_WW][4] = 22,
- [0][0][RTW89_WW][6] = 22,
- [0][0][RTW89_WW][8] = 18,
- [0][0][RTW89_WW][10] = 18,
- [0][0][RTW89_WW][12] = 24,
- [0][0][RTW89_WW][14] = 24,
- [0][0][RTW89_WW][15] = 24,
- [0][0][RTW89_WW][17] = 24,
- [0][0][RTW89_WW][19] = 24,
- [0][0][RTW89_WW][21] = 24,
- [0][0][RTW89_WW][23] = 24,
+ [0][0][RTW89_WW][0] = 16,
+ [0][0][RTW89_WW][2] = 16,
+ [0][0][RTW89_WW][4] = 16,
+ [0][0][RTW89_WW][6] = 10,
+ [0][0][RTW89_WW][8] = 16,
+ [0][0][RTW89_WW][10] = 16,
+ [0][0][RTW89_WW][12] = 16,
+ [0][0][RTW89_WW][14] = 16,
+ [0][0][RTW89_WW][15] = 30,
+ [0][0][RTW89_WW][17] = 30,
+ [0][0][RTW89_WW][19] = 30,
+ [0][0][RTW89_WW][21] = 30,
+ [0][0][RTW89_WW][23] = 30,
[0][0][RTW89_WW][25] = 30,
[0][0][RTW89_WW][27] = 30,
[0][0][RTW89_WW][29] = 30,
- [0][0][RTW89_WW][31] = 24,
- [0][0][RTW89_WW][33] = 24,
- [0][0][RTW89_WW][35] = 24,
- [0][0][RTW89_WW][37] = 44,
+ [0][0][RTW89_WW][31] = 30,
+ [0][0][RTW89_WW][33] = 30,
+ [0][0][RTW89_WW][35] = 30,
+ [0][0][RTW89_WW][37] = 30,
[0][0][RTW89_WW][38] = 28,
[0][0][RTW89_WW][40] = 28,
[0][0][RTW89_WW][42] = 28,
[0][0][RTW89_WW][44] = 28,
[0][0][RTW89_WW][46] = 28,
- [0][0][RTW89_WW][48] = 24,
- [0][0][RTW89_WW][50] = 24,
- [0][0][RTW89_WW][52] = 24,
- [0][1][RTW89_WW][0] = 0,
+ [0][0][RTW89_WW][48] = 46,
+ [0][0][RTW89_WW][50] = 44,
+ [0][0][RTW89_WW][52] = 34,
+ [0][1][RTW89_WW][0] = 4,
[0][1][RTW89_WW][2] = 4,
- [0][1][RTW89_WW][4] = 0,
- [0][1][RTW89_WW][6] = 0,
- [0][1][RTW89_WW][8] = 12,
- [0][1][RTW89_WW][10] = 12,
- [0][1][RTW89_WW][12] = 12,
- [0][1][RTW89_WW][14] = 12,
- [0][1][RTW89_WW][15] = 12,
- [0][1][RTW89_WW][17] = 12,
- [0][1][RTW89_WW][19] = 12,
- [0][1][RTW89_WW][21] = 12,
- [0][1][RTW89_WW][23] = 12,
+ [0][1][RTW89_WW][4] = 4,
+ [0][1][RTW89_WW][6] = 1,
+ [0][1][RTW89_WW][8] = 4,
+ [0][1][RTW89_WW][10] = 4,
+ [0][1][RTW89_WW][12] = 4,
+ [0][1][RTW89_WW][14] = 4,
+ [0][1][RTW89_WW][15] = 18,
+ [0][1][RTW89_WW][17] = 18,
+ [0][1][RTW89_WW][19] = 18,
+ [0][1][RTW89_WW][21] = 18,
+ [0][1][RTW89_WW][23] = 18,
[0][1][RTW89_WW][25] = 18,
[0][1][RTW89_WW][27] = 16,
[0][1][RTW89_WW][29] = 16,
- [0][1][RTW89_WW][31] = 12,
- [0][1][RTW89_WW][33] = 12,
- [0][1][RTW89_WW][35] = 12,
- [0][1][RTW89_WW][37] = 30,
+ [0][1][RTW89_WW][31] = 16,
+ [0][1][RTW89_WW][33] = 16,
+ [0][1][RTW89_WW][35] = 16,
+ [0][1][RTW89_WW][37] = 18,
[0][1][RTW89_WW][38] = 16,
[0][1][RTW89_WW][40] = 16,
[0][1][RTW89_WW][42] = 16,
[0][1][RTW89_WW][44] = 16,
[0][1][RTW89_WW][46] = 16,
- [0][1][RTW89_WW][48] = 12,
- [0][1][RTW89_WW][50] = 12,
- [0][1][RTW89_WW][52] = 12,
- [1][0][RTW89_WW][0] = 34,
- [1][0][RTW89_WW][2] = 34,
- [1][0][RTW89_WW][4] = 34,
- [1][0][RTW89_WW][6] = 34,
- [1][0][RTW89_WW][8] = 34,
- [1][0][RTW89_WW][10] = 34,
- [1][0][RTW89_WW][12] = 34,
- [1][0][RTW89_WW][14] = 34,
- [1][0][RTW89_WW][15] = 34,
- [1][0][RTW89_WW][17] = 34,
- [1][0][RTW89_WW][19] = 34,
- [1][0][RTW89_WW][21] = 34,
- [1][0][RTW89_WW][23] = 34,
+ [0][1][RTW89_WW][48] = 20,
+ [0][1][RTW89_WW][50] = 20,
+ [0][1][RTW89_WW][52] = 8,
+ [1][0][RTW89_WW][0] = 26,
+ [1][0][RTW89_WW][2] = 26,
+ [1][0][RTW89_WW][4] = 26,
+ [1][0][RTW89_WW][6] = 24,
+ [1][0][RTW89_WW][8] = 26,
+ [1][0][RTW89_WW][10] = 26,
+ [1][0][RTW89_WW][12] = 26,
+ [1][0][RTW89_WW][14] = 26,
+ [1][0][RTW89_WW][15] = 40,
+ [1][0][RTW89_WW][17] = 40,
+ [1][0][RTW89_WW][19] = 40,
+ [1][0][RTW89_WW][21] = 40,
+ [1][0][RTW89_WW][23] = 40,
[1][0][RTW89_WW][25] = 40,
[1][0][RTW89_WW][27] = 42,
[1][0][RTW89_WW][29] = 42,
- [1][0][RTW89_WW][31] = 34,
- [1][0][RTW89_WW][33] = 34,
- [1][0][RTW89_WW][35] = 34,
- [1][0][RTW89_WW][37] = 56,
+ [1][0][RTW89_WW][31] = 42,
+ [1][0][RTW89_WW][33] = 42,
+ [1][0][RTW89_WW][35] = 42,
+ [1][0][RTW89_WW][37] = 42,
[1][0][RTW89_WW][38] = 28,
[1][0][RTW89_WW][40] = 28,
[1][0][RTW89_WW][42] = 28,
[1][0][RTW89_WW][44] = 28,
[1][0][RTW89_WW][46] = 28,
- [1][0][RTW89_WW][48] = 36,
- [1][0][RTW89_WW][50] = 36,
- [1][0][RTW89_WW][52] = 36,
- [1][1][RTW89_WW][0] = 10,
+ [1][0][RTW89_WW][48] = 56,
+ [1][0][RTW89_WW][50] = 58,
+ [1][0][RTW89_WW][52] = 56,
+ [1][1][RTW89_WW][0] = 14,
[1][1][RTW89_WW][2] = 14,
- [1][1][RTW89_WW][4] = 10,
- [1][1][RTW89_WW][6] = 10,
- [1][1][RTW89_WW][8] = 20,
- [1][1][RTW89_WW][10] = 20,
- [1][1][RTW89_WW][12] = 22,
- [1][1][RTW89_WW][14] = 22,
- [1][1][RTW89_WW][15] = 22,
- [1][1][RTW89_WW][17] = 22,
- [1][1][RTW89_WW][19] = 22,
- [1][1][RTW89_WW][21] = 22,
- [1][1][RTW89_WW][23] = 22,
+ [1][1][RTW89_WW][4] = 14,
+ [1][1][RTW89_WW][6] = 8,
+ [1][1][RTW89_WW][8] = 14,
+ [1][1][RTW89_WW][10] = 14,
+ [1][1][RTW89_WW][12] = 14,
+ [1][1][RTW89_WW][14] = 14,
+ [1][1][RTW89_WW][15] = 28,
+ [1][1][RTW89_WW][17] = 28,
+ [1][1][RTW89_WW][19] = 28,
+ [1][1][RTW89_WW][21] = 28,
+ [1][1][RTW89_WW][23] = 28,
[1][1][RTW89_WW][25] = 28,
[1][1][RTW89_WW][27] = 30,
[1][1][RTW89_WW][29] = 30,
- [1][1][RTW89_WW][31] = 22,
- [1][1][RTW89_WW][33] = 22,
- [1][1][RTW89_WW][35] = 22,
- [1][1][RTW89_WW][37] = 40,
+ [1][1][RTW89_WW][31] = 30,
+ [1][1][RTW89_WW][33] = 30,
+ [1][1][RTW89_WW][35] = 30,
+ [1][1][RTW89_WW][37] = 32,
[1][1][RTW89_WW][38] = 16,
[1][1][RTW89_WW][40] = 16,
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 24,
- [1][1][RTW89_WW][50] = 24,
- [1][1][RTW89_WW][52] = 24,
- [2][0][RTW89_WW][0] = 46,
- [2][0][RTW89_WW][2] = 46,
- [2][0][RTW89_WW][4] = 46,
- [2][0][RTW89_WW][6] = 46,
- [2][0][RTW89_WW][8] = 44,
- [2][0][RTW89_WW][10] = 44,
- [2][0][RTW89_WW][12] = 48,
- [2][0][RTW89_WW][14] = 48,
- [2][0][RTW89_WW][15] = 48,
- [2][0][RTW89_WW][17] = 48,
- [2][0][RTW89_WW][19] = 48,
- [2][0][RTW89_WW][21] = 48,
- [2][0][RTW89_WW][23] = 48,
+ [1][1][RTW89_WW][48] = 34,
+ [1][1][RTW89_WW][50] = 34,
+ [1][1][RTW89_WW][52] = 30,
+ [2][0][RTW89_WW][0] = 40,
+ [2][0][RTW89_WW][2] = 40,
+ [2][0][RTW89_WW][4] = 40,
+ [2][0][RTW89_WW][6] = 36,
+ [2][0][RTW89_WW][8] = 40,
+ [2][0][RTW89_WW][10] = 40,
+ [2][0][RTW89_WW][12] = 40,
+ [2][0][RTW89_WW][14] = 40,
+ [2][0][RTW89_WW][15] = 52,
+ [2][0][RTW89_WW][17] = 52,
+ [2][0][RTW89_WW][19] = 52,
+ [2][0][RTW89_WW][21] = 52,
+ [2][0][RTW89_WW][23] = 52,
[2][0][RTW89_WW][25] = 52,
[2][0][RTW89_WW][27] = 52,
[2][0][RTW89_WW][29] = 52,
- [2][0][RTW89_WW][31] = 48,
- [2][0][RTW89_WW][33] = 48,
- [2][0][RTW89_WW][35] = 48,
- [2][0][RTW89_WW][37] = 62,
+ [2][0][RTW89_WW][31] = 52,
+ [2][0][RTW89_WW][33] = 52,
+ [2][0][RTW89_WW][35] = 52,
+ [2][0][RTW89_WW][37] = 52,
[2][0][RTW89_WW][38] = 28,
[2][0][RTW89_WW][40] = 28,
[2][0][RTW89_WW][42] = 28,
[2][0][RTW89_WW][44] = 28,
[2][0][RTW89_WW][46] = 28,
- [2][0][RTW89_WW][48] = 48,
- [2][0][RTW89_WW][50] = 48,
- [2][0][RTW89_WW][52] = 48,
- [2][1][RTW89_WW][0] = 20,
- [2][1][RTW89_WW][2] = 18,
- [2][1][RTW89_WW][4] = 22,
- [2][1][RTW89_WW][6] = 22,
- [2][1][RTW89_WW][8] = 32,
- [2][1][RTW89_WW][10] = 32,
- [2][1][RTW89_WW][12] = 36,
- [2][1][RTW89_WW][14] = 36,
- [2][1][RTW89_WW][15] = 36,
- [2][1][RTW89_WW][17] = 36,
- [2][1][RTW89_WW][19] = 36,
- [2][1][RTW89_WW][21] = 36,
- [2][1][RTW89_WW][23] = 36,
+ [2][0][RTW89_WW][48] = 64,
+ [2][0][RTW89_WW][50] = 64,
+ [2][0][RTW89_WW][52] = 64,
+ [2][1][RTW89_WW][0] = 26,
+ [2][1][RTW89_WW][2] = 26,
+ [2][1][RTW89_WW][4] = 26,
+ [2][1][RTW89_WW][6] = 20,
+ [2][1][RTW89_WW][8] = 28,
+ [2][1][RTW89_WW][10] = 28,
+ [2][1][RTW89_WW][12] = 28,
+ [2][1][RTW89_WW][14] = 28,
+ [2][1][RTW89_WW][15] = 40,
+ [2][1][RTW89_WW][17] = 40,
+ [2][1][RTW89_WW][19] = 40,
+ [2][1][RTW89_WW][21] = 40,
+ [2][1][RTW89_WW][23] = 40,
[2][1][RTW89_WW][25] = 40,
[2][1][RTW89_WW][27] = 40,
[2][1][RTW89_WW][29] = 40,
- [2][1][RTW89_WW][31] = 36,
- [2][1][RTW89_WW][33] = 36,
- [2][1][RTW89_WW][35] = 36,
+ [2][1][RTW89_WW][31] = 40,
+ [2][1][RTW89_WW][33] = 40,
+ [2][1][RTW89_WW][35] = 40,
[2][1][RTW89_WW][37] = 42,
[2][1][RTW89_WW][38] = 16,
[2][1][RTW89_WW][40] = 16,
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 36,
- [2][1][RTW89_WW][50] = 36,
- [2][1][RTW89_WW][52] = 36,
- [0][0][RTW89_FCC][0] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
+ [0][0][RTW89_FCC][0] = 50,
[0][0][RTW89_ETSI][0] = 30,
[0][0][RTW89_MKK][0] = 36,
- [0][0][RTW89_IC][0] = 24,
- [0][0][RTW89_ACMA][0] = 24,
- [0][0][RTW89_FCC][2] = 44,
+ [0][0][RTW89_IC][0] = 32,
+ [0][0][RTW89_KCC][0] = 42,
+ [0][0][RTW89_ACMA][0] = 30,
+ [0][0][RTW89_CN][0] = 16,
+ [0][0][RTW89_UK][0] = 30,
+ [0][0][RTW89_FCC][2] = 50,
[0][0][RTW89_ETSI][2] = 30,
[0][0][RTW89_MKK][2] = 36,
- [0][0][RTW89_IC][2] = 24,
- [0][0][RTW89_ACMA][2] = 24,
- [0][0][RTW89_FCC][4] = 44,
+ [0][0][RTW89_IC][2] = 32,
+ [0][0][RTW89_KCC][2] = 42,
+ [0][0][RTW89_ACMA][2] = 30,
+ [0][0][RTW89_CN][2] = 16,
+ [0][0][RTW89_UK][2] = 30,
+ [0][0][RTW89_FCC][4] = 50,
[0][0][RTW89_ETSI][4] = 30,
[0][0][RTW89_MKK][4] = 22,
- [0][0][RTW89_IC][4] = 24,
- [0][0][RTW89_ACMA][4] = 24,
- [0][0][RTW89_FCC][6] = 44,
+ [0][0][RTW89_IC][4] = 32,
+ [0][0][RTW89_KCC][4] = 42,
+ [0][0][RTW89_ACMA][4] = 30,
+ [0][0][RTW89_CN][4] = 16,
+ [0][0][RTW89_UK][4] = 30,
+ [0][0][RTW89_FCC][6] = 50,
[0][0][RTW89_ETSI][6] = 30,
[0][0][RTW89_MKK][6] = 22,
- [0][0][RTW89_IC][6] = 24,
- [0][0][RTW89_ACMA][6] = 24,
- [0][0][RTW89_FCC][8] = 44,
+ [0][0][RTW89_IC][6] = 32,
+ [0][0][RTW89_KCC][6] = 10,
+ [0][0][RTW89_ACMA][6] = 30,
+ [0][0][RTW89_CN][6] = 16,
+ [0][0][RTW89_UK][6] = 30,
+ [0][0][RTW89_FCC][8] = 52,
[0][0][RTW89_ETSI][8] = 28,
[0][0][RTW89_MKK][8] = 18,
[0][0][RTW89_IC][8] = 52,
- [0][0][RTW89_ACMA][8] = 24,
- [0][0][RTW89_FCC][10] = 44,
+ [0][0][RTW89_KCC][8] = 44,
+ [0][0][RTW89_ACMA][8] = 28,
+ [0][0][RTW89_CN][8] = 16,
+ [0][0][RTW89_UK][8] = 28,
+ [0][0][RTW89_FCC][10] = 52,
[0][0][RTW89_ETSI][10] = 28,
[0][0][RTW89_MKK][10] = 18,
[0][0][RTW89_IC][10] = 52,
- [0][0][RTW89_ACMA][10] = 24,
- [0][0][RTW89_FCC][12] = 44,
+ [0][0][RTW89_KCC][10] = 44,
+ [0][0][RTW89_ACMA][10] = 28,
+ [0][0][RTW89_CN][10] = 16,
+ [0][0][RTW89_UK][10] = 28,
+ [0][0][RTW89_FCC][12] = 52,
[0][0][RTW89_ETSI][12] = 28,
[0][0][RTW89_MKK][12] = 34,
[0][0][RTW89_IC][12] = 52,
- [0][0][RTW89_ACMA][12] = 24,
- [0][0][RTW89_FCC][14] = 44,
+ [0][0][RTW89_KCC][12] = 40,
+ [0][0][RTW89_ACMA][12] = 28,
+ [0][0][RTW89_CN][12] = 16,
+ [0][0][RTW89_UK][12] = 28,
+ [0][0][RTW89_FCC][14] = 52,
[0][0][RTW89_ETSI][14] = 28,
[0][0][RTW89_MKK][14] = 34,
[0][0][RTW89_IC][14] = 52,
- [0][0][RTW89_ACMA][14] = 24,
- [0][0][RTW89_FCC][15] = 44,
+ [0][0][RTW89_KCC][14] = 40,
+ [0][0][RTW89_ACMA][14] = 28,
+ [0][0][RTW89_CN][14] = 16,
+ [0][0][RTW89_UK][14] = 28,
+ [0][0][RTW89_FCC][15] = 52,
[0][0][RTW89_ETSI][15] = 30,
[0][0][RTW89_MKK][15] = 56,
[0][0][RTW89_IC][15] = 52,
- [0][0][RTW89_ACMA][15] = 24,
- [0][0][RTW89_FCC][17] = 44,
+ [0][0][RTW89_KCC][15] = 42,
+ [0][0][RTW89_ACMA][15] = 30,
+ [0][0][RTW89_CN][15] = 127,
+ [0][0][RTW89_UK][15] = 30,
+ [0][0][RTW89_FCC][17] = 52,
[0][0][RTW89_ETSI][17] = 30,
[0][0][RTW89_MKK][17] = 58,
[0][0][RTW89_IC][17] = 52,
- [0][0][RTW89_ACMA][17] = 24,
- [0][0][RTW89_FCC][19] = 44,
+ [0][0][RTW89_KCC][17] = 42,
+ [0][0][RTW89_ACMA][17] = 30,
+ [0][0][RTW89_CN][17] = 127,
+ [0][0][RTW89_UK][17] = 30,
+ [0][0][RTW89_FCC][19] = 52,
[0][0][RTW89_ETSI][19] = 30,
[0][0][RTW89_MKK][19] = 58,
[0][0][RTW89_IC][19] = 52,
- [0][0][RTW89_ACMA][19] = 24,
- [0][0][RTW89_FCC][21] = 44,
+ [0][0][RTW89_KCC][19] = 42,
+ [0][0][RTW89_ACMA][19] = 30,
+ [0][0][RTW89_CN][19] = 127,
+ [0][0][RTW89_UK][19] = 30,
+ [0][0][RTW89_FCC][21] = 52,
[0][0][RTW89_ETSI][21] = 30,
[0][0][RTW89_MKK][21] = 58,
[0][0][RTW89_IC][21] = 52,
- [0][0][RTW89_ACMA][21] = 24,
- [0][0][RTW89_FCC][23] = 44,
+ [0][0][RTW89_KCC][21] = 42,
+ [0][0][RTW89_ACMA][21] = 30,
+ [0][0][RTW89_CN][21] = 127,
+ [0][0][RTW89_UK][21] = 30,
+ [0][0][RTW89_FCC][23] = 52,
[0][0][RTW89_ETSI][23] = 30,
[0][0][RTW89_MKK][23] = 58,
[0][0][RTW89_IC][23] = 52,
- [0][0][RTW89_ACMA][23] = 24,
- [0][0][RTW89_FCC][25] = 44,
+ [0][0][RTW89_KCC][23] = 42,
+ [0][0][RTW89_ACMA][23] = 30,
+ [0][0][RTW89_CN][23] = 127,
+ [0][0][RTW89_UK][23] = 30,
+ [0][0][RTW89_FCC][25] = 52,
[0][0][RTW89_ETSI][25] = 30,
[0][0][RTW89_MKK][25] = 58,
[0][0][RTW89_IC][25] = 127,
+ [0][0][RTW89_KCC][25] = 42,
[0][0][RTW89_ACMA][25] = 127,
- [0][0][RTW89_FCC][27] = 44,
+ [0][0][RTW89_CN][25] = 127,
+ [0][0][RTW89_UK][25] = 30,
+ [0][0][RTW89_FCC][27] = 52,
[0][0][RTW89_ETSI][27] = 30,
[0][0][RTW89_MKK][27] = 58,
[0][0][RTW89_IC][27] = 127,
+ [0][0][RTW89_KCC][27] = 42,
[0][0][RTW89_ACMA][27] = 127,
- [0][0][RTW89_FCC][29] = 44,
+ [0][0][RTW89_CN][27] = 127,
+ [0][0][RTW89_UK][27] = 30,
+ [0][0][RTW89_FCC][29] = 52,
[0][0][RTW89_ETSI][29] = 30,
[0][0][RTW89_MKK][29] = 58,
[0][0][RTW89_IC][29] = 127,
+ [0][0][RTW89_KCC][29] = 42,
[0][0][RTW89_ACMA][29] = 127,
- [0][0][RTW89_FCC][31] = 44,
+ [0][0][RTW89_CN][29] = 127,
+ [0][0][RTW89_UK][29] = 30,
+ [0][0][RTW89_FCC][31] = 52,
[0][0][RTW89_ETSI][31] = 30,
[0][0][RTW89_MKK][31] = 58,
- [0][0][RTW89_IC][31] = 52,
- [0][0][RTW89_ACMA][31] = 24,
+ [0][0][RTW89_IC][31] = 44,
+ [0][0][RTW89_KCC][31] = 42,
+ [0][0][RTW89_ACMA][31] = 30,
+ [0][0][RTW89_CN][31] = 127,
+ [0][0][RTW89_UK][31] = 30,
[0][0][RTW89_FCC][33] = 44,
[0][0][RTW89_ETSI][33] = 30,
[0][0][RTW89_MKK][33] = 58,
- [0][0][RTW89_IC][33] = 52,
- [0][0][RTW89_ACMA][33] = 24,
+ [0][0][RTW89_IC][33] = 44,
+ [0][0][RTW89_KCC][33] = 42,
+ [0][0][RTW89_ACMA][33] = 30,
+ [0][0][RTW89_CN][33] = 127,
+ [0][0][RTW89_UK][33] = 30,
[0][0][RTW89_FCC][35] = 44,
[0][0][RTW89_ETSI][35] = 30,
[0][0][RTW89_MKK][35] = 58,
- [0][0][RTW89_IC][35] = 52,
- [0][0][RTW89_ACMA][35] = 24,
- [0][0][RTW89_FCC][37] = 44,
+ [0][0][RTW89_IC][35] = 44,
+ [0][0][RTW89_KCC][35] = 42,
+ [0][0][RTW89_ACMA][35] = 30,
+ [0][0][RTW89_CN][35] = 127,
+ [0][0][RTW89_UK][35] = 30,
+ [0][0][RTW89_FCC][37] = 52,
[0][0][RTW89_ETSI][37] = 127,
[0][0][RTW89_MKK][37] = 58,
[0][0][RTW89_IC][37] = 52,
+ [0][0][RTW89_KCC][37] = 42,
[0][0][RTW89_ACMA][37] = 52,
- [0][0][RTW89_FCC][38] = 76,
+ [0][0][RTW89_CN][37] = 127,
+ [0][0][RTW89_UK][37] = 30,
+ [0][0][RTW89_FCC][38] = 64,
[0][0][RTW89_ETSI][38] = 28,
[0][0][RTW89_MKK][38] = 127,
- [0][0][RTW89_IC][38] = 84,
- [0][0][RTW89_ACMA][38] = 84,
- [0][0][RTW89_FCC][40] = 76,
+ [0][0][RTW89_IC][38] = 64,
+ [0][0][RTW89_KCC][38] = 42,
+ [0][0][RTW89_ACMA][38] = 64,
+ [0][0][RTW89_CN][38] = 54,
+ [0][0][RTW89_UK][38] = 30,
+ [0][0][RTW89_FCC][40] = 64,
[0][0][RTW89_ETSI][40] = 28,
[0][0][RTW89_MKK][40] = 127,
- [0][0][RTW89_IC][40] = 84,
- [0][0][RTW89_ACMA][40] = 84,
- [0][0][RTW89_FCC][42] = 76,
+ [0][0][RTW89_IC][40] = 64,
+ [0][0][RTW89_KCC][40] = 42,
+ [0][0][RTW89_ACMA][40] = 64,
+ [0][0][RTW89_CN][40] = 54,
+ [0][0][RTW89_UK][40] = 30,
+ [0][0][RTW89_FCC][42] = 60,
[0][0][RTW89_ETSI][42] = 28,
[0][0][RTW89_MKK][42] = 127,
- [0][0][RTW89_IC][42] = 84,
- [0][0][RTW89_ACMA][42] = 84,
- [0][0][RTW89_FCC][44] = 76,
+ [0][0][RTW89_IC][42] = 60,
+ [0][0][RTW89_KCC][42] = 42,
+ [0][0][RTW89_ACMA][42] = 60,
+ [0][0][RTW89_CN][42] = 54,
+ [0][0][RTW89_UK][42] = 30,
+ [0][0][RTW89_FCC][44] = 60,
[0][0][RTW89_ETSI][44] = 28,
[0][0][RTW89_MKK][44] = 127,
- [0][0][RTW89_IC][44] = 84,
- [0][0][RTW89_ACMA][44] = 84,
- [0][0][RTW89_FCC][46] = 76,
+ [0][0][RTW89_IC][44] = 60,
+ [0][0][RTW89_KCC][44] = 42,
+ [0][0][RTW89_ACMA][44] = 60,
+ [0][0][RTW89_CN][44] = 54,
+ [0][0][RTW89_UK][44] = 30,
+ [0][0][RTW89_FCC][46] = 60,
[0][0][RTW89_ETSI][46] = 28,
[0][0][RTW89_MKK][46] = 127,
- [0][0][RTW89_IC][46] = 84,
- [0][0][RTW89_ACMA][46] = 84,
- [0][0][RTW89_FCC][48] = 24,
+ [0][0][RTW89_IC][46] = 60,
+ [0][0][RTW89_KCC][46] = 42,
+ [0][0][RTW89_ACMA][46] = 60,
+ [0][0][RTW89_CN][46] = 54,
+ [0][0][RTW89_UK][46] = 30,
+ [0][0][RTW89_FCC][48] = 46,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
[0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
- [0][0][RTW89_FCC][50] = 24,
+ [0][0][RTW89_CN][48] = 127,
+ [0][0][RTW89_UK][48] = 127,
+ [0][0][RTW89_FCC][50] = 44,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
[0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
- [0][0][RTW89_FCC][52] = 24,
+ [0][0][RTW89_CN][50] = 127,
+ [0][0][RTW89_UK][50] = 127,
+ [0][0][RTW89_FCC][52] = 34,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
[0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
- [0][1][RTW89_FCC][0] = 26,
+ [0][0][RTW89_CN][52] = 127,
+ [0][0][RTW89_UK][52] = 127,
+ [0][1][RTW89_FCC][0] = 30,
[0][1][RTW89_ETSI][0] = 18,
[0][1][RTW89_MKK][0] = 20,
- [0][1][RTW89_IC][0] = 0,
- [0][1][RTW89_ACMA][0] = 12,
- [0][1][RTW89_FCC][2] = 30,
+ [0][1][RTW89_IC][0] = 8,
+ [0][1][RTW89_KCC][0] = 26,
+ [0][1][RTW89_ACMA][0] = 18,
+ [0][1][RTW89_CN][0] = 4,
+ [0][1][RTW89_UK][0] = 18,
+ [0][1][RTW89_FCC][2] = 32,
[0][1][RTW89_ETSI][2] = 18,
[0][1][RTW89_MKK][2] = 20,
- [0][1][RTW89_IC][2] = 4,
- [0][1][RTW89_ACMA][2] = 12,
- [0][1][RTW89_FCC][4] = 26,
+ [0][1][RTW89_IC][2] = 8,
+ [0][1][RTW89_KCC][2] = 26,
+ [0][1][RTW89_ACMA][2] = 18,
+ [0][1][RTW89_CN][2] = 4,
+ [0][1][RTW89_UK][2] = 18,
+ [0][1][RTW89_FCC][4] = 30,
[0][1][RTW89_ETSI][4] = 18,
[0][1][RTW89_MKK][4] = 8,
- [0][1][RTW89_IC][4] = 0,
- [0][1][RTW89_ACMA][4] = 12,
- [0][1][RTW89_FCC][6] = 26,
+ [0][1][RTW89_IC][4] = 8,
+ [0][1][RTW89_KCC][4] = 26,
+ [0][1][RTW89_ACMA][4] = 18,
+ [0][1][RTW89_CN][4] = 4,
+ [0][1][RTW89_UK][4] = 18,
+ [0][1][RTW89_FCC][6] = 30,
[0][1][RTW89_ETSI][6] = 18,
[0][1][RTW89_MKK][6] = 8,
- [0][1][RTW89_IC][6] = 0,
- [0][1][RTW89_ACMA][6] = 12,
- [0][1][RTW89_FCC][8] = 26,
+ [0][1][RTW89_IC][6] = 8,
+ [0][1][RTW89_KCC][6] = 0,
+ [0][1][RTW89_ACMA][6] = 18,
+ [0][1][RTW89_CN][6] = 4,
+ [0][1][RTW89_UK][6] = 18,
+ [0][1][RTW89_FCC][8] = 30,
[0][1][RTW89_ETSI][8] = 16,
[0][1][RTW89_MKK][8] = 20,
- [0][1][RTW89_IC][8] = 34,
- [0][1][RTW89_ACMA][8] = 12,
- [0][1][RTW89_FCC][10] = 26,
+ [0][1][RTW89_IC][8] = 30,
+ [0][1][RTW89_KCC][8] = 28,
+ [0][1][RTW89_ACMA][8] = 16,
+ [0][1][RTW89_CN][8] = 4,
+ [0][1][RTW89_UK][8] = 16,
+ [0][1][RTW89_FCC][10] = 30,
[0][1][RTW89_ETSI][10] = 16,
[0][1][RTW89_MKK][10] = 20,
- [0][1][RTW89_IC][10] = 34,
- [0][1][RTW89_ACMA][10] = 12,
+ [0][1][RTW89_IC][10] = 30,
+ [0][1][RTW89_KCC][10] = 28,
+ [0][1][RTW89_ACMA][10] = 16,
+ [0][1][RTW89_CN][10] = 4,
+ [0][1][RTW89_UK][10] = 16,
[0][1][RTW89_FCC][12] = 30,
[0][1][RTW89_ETSI][12] = 16,
[0][1][RTW89_MKK][12] = 34,
- [0][1][RTW89_IC][12] = 38,
- [0][1][RTW89_ACMA][12] = 12,
- [0][1][RTW89_FCC][14] = 26,
+ [0][1][RTW89_IC][12] = 30,
+ [0][1][RTW89_KCC][12] = 28,
+ [0][1][RTW89_ACMA][12] = 16,
+ [0][1][RTW89_CN][12] = 4,
+ [0][1][RTW89_UK][12] = 16,
+ [0][1][RTW89_FCC][14] = 30,
[0][1][RTW89_ETSI][14] = 16,
[0][1][RTW89_MKK][14] = 34,
- [0][1][RTW89_IC][14] = 34,
- [0][1][RTW89_ACMA][14] = 12,
- [0][1][RTW89_FCC][15] = 26,
+ [0][1][RTW89_IC][14] = 30,
+ [0][1][RTW89_KCC][14] = 28,
+ [0][1][RTW89_ACMA][14] = 16,
+ [0][1][RTW89_CN][14] = 4,
+ [0][1][RTW89_UK][14] = 16,
+ [0][1][RTW89_FCC][15] = 32,
[0][1][RTW89_ETSI][15] = 18,
[0][1][RTW89_MKK][15] = 44,
- [0][1][RTW89_IC][15] = 34,
- [0][1][RTW89_ACMA][15] = 12,
- [0][1][RTW89_FCC][17] = 26,
+ [0][1][RTW89_IC][15] = 32,
+ [0][1][RTW89_KCC][15] = 28,
+ [0][1][RTW89_ACMA][15] = 18,
+ [0][1][RTW89_CN][15] = 127,
+ [0][1][RTW89_UK][15] = 18,
+ [0][1][RTW89_FCC][17] = 32,
[0][1][RTW89_ETSI][17] = 18,
[0][1][RTW89_MKK][17] = 44,
- [0][1][RTW89_IC][17] = 34,
- [0][1][RTW89_ACMA][17] = 12,
- [0][1][RTW89_FCC][19] = 30,
+ [0][1][RTW89_IC][17] = 32,
+ [0][1][RTW89_KCC][17] = 28,
+ [0][1][RTW89_ACMA][17] = 18,
+ [0][1][RTW89_CN][17] = 127,
+ [0][1][RTW89_UK][17] = 18,
+ [0][1][RTW89_FCC][19] = 32,
[0][1][RTW89_ETSI][19] = 18,
[0][1][RTW89_MKK][19] = 44,
- [0][1][RTW89_IC][19] = 38,
- [0][1][RTW89_ACMA][19] = 12,
- [0][1][RTW89_FCC][21] = 30,
+ [0][1][RTW89_IC][19] = 32,
+ [0][1][RTW89_KCC][19] = 28,
+ [0][1][RTW89_ACMA][19] = 18,
+ [0][1][RTW89_CN][19] = 127,
+ [0][1][RTW89_UK][19] = 18,
+ [0][1][RTW89_FCC][21] = 32,
[0][1][RTW89_ETSI][21] = 18,
[0][1][RTW89_MKK][21] = 44,
- [0][1][RTW89_IC][21] = 38,
- [0][1][RTW89_ACMA][21] = 12,
- [0][1][RTW89_FCC][23] = 30,
+ [0][1][RTW89_IC][21] = 32,
+ [0][1][RTW89_KCC][21] = 28,
+ [0][1][RTW89_ACMA][21] = 18,
+ [0][1][RTW89_CN][21] = 127,
+ [0][1][RTW89_UK][21] = 18,
+ [0][1][RTW89_FCC][23] = 32,
[0][1][RTW89_ETSI][23] = 18,
[0][1][RTW89_MKK][23] = 44,
- [0][1][RTW89_IC][23] = 38,
- [0][1][RTW89_ACMA][23] = 12,
- [0][1][RTW89_FCC][25] = 30,
+ [0][1][RTW89_IC][23] = 32,
+ [0][1][RTW89_KCC][23] = 28,
+ [0][1][RTW89_ACMA][23] = 18,
+ [0][1][RTW89_CN][23] = 127,
+ [0][1][RTW89_UK][23] = 18,
+ [0][1][RTW89_FCC][25] = 32,
[0][1][RTW89_ETSI][25] = 18,
[0][1][RTW89_MKK][25] = 44,
[0][1][RTW89_IC][25] = 127,
+ [0][1][RTW89_KCC][25] = 28,
[0][1][RTW89_ACMA][25] = 127,
- [0][1][RTW89_FCC][27] = 30,
+ [0][1][RTW89_CN][25] = 127,
+ [0][1][RTW89_UK][25] = 18,
+ [0][1][RTW89_FCC][27] = 32,
[0][1][RTW89_ETSI][27] = 16,
[0][1][RTW89_MKK][27] = 44,
[0][1][RTW89_IC][27] = 127,
+ [0][1][RTW89_KCC][27] = 28,
[0][1][RTW89_ACMA][27] = 127,
- [0][1][RTW89_FCC][29] = 30,
+ [0][1][RTW89_CN][27] = 127,
+ [0][1][RTW89_UK][27] = 16,
+ [0][1][RTW89_FCC][29] = 32,
[0][1][RTW89_ETSI][29] = 16,
[0][1][RTW89_MKK][29] = 44,
[0][1][RTW89_IC][29] = 127,
+ [0][1][RTW89_KCC][29] = 28,
[0][1][RTW89_ACMA][29] = 127,
- [0][1][RTW89_FCC][31] = 30,
+ [0][1][RTW89_CN][29] = 127,
+ [0][1][RTW89_UK][29] = 16,
+ [0][1][RTW89_FCC][31] = 32,
[0][1][RTW89_ETSI][31] = 16,
[0][1][RTW89_MKK][31] = 44,
- [0][1][RTW89_IC][31] = 34,
- [0][1][RTW89_ACMA][31] = 12,
- [0][1][RTW89_FCC][33] = 26,
+ [0][1][RTW89_IC][31] = 30,
+ [0][1][RTW89_KCC][31] = 28,
+ [0][1][RTW89_ACMA][31] = 16,
+ [0][1][RTW89_CN][31] = 127,
+ [0][1][RTW89_UK][31] = 16,
+ [0][1][RTW89_FCC][33] = 30,
[0][1][RTW89_ETSI][33] = 16,
[0][1][RTW89_MKK][33] = 44,
- [0][1][RTW89_IC][33] = 34,
- [0][1][RTW89_ACMA][33] = 12,
- [0][1][RTW89_FCC][35] = 26,
+ [0][1][RTW89_IC][33] = 30,
+ [0][1][RTW89_KCC][33] = 28,
+ [0][1][RTW89_ACMA][33] = 16,
+ [0][1][RTW89_CN][33] = 127,
+ [0][1][RTW89_UK][33] = 16,
+ [0][1][RTW89_FCC][35] = 30,
[0][1][RTW89_ETSI][35] = 16,
[0][1][RTW89_MKK][35] = 44,
- [0][1][RTW89_IC][35] = 34,
- [0][1][RTW89_ACMA][35] = 12,
- [0][1][RTW89_FCC][37] = 30,
+ [0][1][RTW89_IC][35] = 30,
+ [0][1][RTW89_KCC][35] = 28,
+ [0][1][RTW89_ACMA][35] = 16,
+ [0][1][RTW89_CN][35] = 127,
+ [0][1][RTW89_UK][35] = 16,
+ [0][1][RTW89_FCC][37] = 34,
[0][1][RTW89_ETSI][37] = 127,
[0][1][RTW89_MKK][37] = 44,
- [0][1][RTW89_IC][37] = 38,
- [0][1][RTW89_ACMA][37] = 38,
- [0][1][RTW89_FCC][38] = 74,
+ [0][1][RTW89_IC][37] = 34,
+ [0][1][RTW89_KCC][37] = 28,
+ [0][1][RTW89_ACMA][37] = 34,
+ [0][1][RTW89_CN][37] = 127,
+ [0][1][RTW89_UK][37] = 18,
+ [0][1][RTW89_FCC][38] = 62,
[0][1][RTW89_ETSI][38] = 16,
[0][1][RTW89_MKK][38] = 127,
- [0][1][RTW89_IC][38] = 82,
- [0][1][RTW89_ACMA][38] = 84,
- [0][1][RTW89_FCC][40] = 74,
+ [0][1][RTW89_IC][38] = 62,
+ [0][1][RTW89_KCC][38] = 28,
+ [0][1][RTW89_ACMA][38] = 62,
+ [0][1][RTW89_CN][38] = 42,
+ [0][1][RTW89_UK][38] = 18,
+ [0][1][RTW89_FCC][40] = 62,
[0][1][RTW89_ETSI][40] = 16,
[0][1][RTW89_MKK][40] = 127,
- [0][1][RTW89_IC][40] = 82,
- [0][1][RTW89_ACMA][40] = 84,
- [0][1][RTW89_FCC][42] = 74,
+ [0][1][RTW89_IC][40] = 62,
+ [0][1][RTW89_KCC][40] = 28,
+ [0][1][RTW89_ACMA][40] = 62,
+ [0][1][RTW89_CN][40] = 42,
+ [0][1][RTW89_UK][40] = 18,
+ [0][1][RTW89_FCC][42] = 58,
[0][1][RTW89_ETSI][42] = 16,
[0][1][RTW89_MKK][42] = 127,
- [0][1][RTW89_IC][42] = 82,
- [0][1][RTW89_ACMA][42] = 84,
- [0][1][RTW89_FCC][44] = 74,
+ [0][1][RTW89_IC][42] = 58,
+ [0][1][RTW89_KCC][42] = 28,
+ [0][1][RTW89_ACMA][42] = 58,
+ [0][1][RTW89_CN][42] = 42,
+ [0][1][RTW89_UK][42] = 18,
+ [0][1][RTW89_FCC][44] = 56,
[0][1][RTW89_ETSI][44] = 16,
[0][1][RTW89_MKK][44] = 127,
- [0][1][RTW89_IC][44] = 82,
- [0][1][RTW89_ACMA][44] = 84,
- [0][1][RTW89_FCC][46] = 74,
+ [0][1][RTW89_IC][44] = 56,
+ [0][1][RTW89_KCC][44] = 28,
+ [0][1][RTW89_ACMA][44] = 56,
+ [0][1][RTW89_CN][44] = 42,
+ [0][1][RTW89_UK][44] = 18,
+ [0][1][RTW89_FCC][46] = 56,
[0][1][RTW89_ETSI][46] = 16,
[0][1][RTW89_MKK][46] = 127,
- [0][1][RTW89_IC][46] = 82,
- [0][1][RTW89_ACMA][46] = 84,
- [0][1][RTW89_FCC][48] = 12,
+ [0][1][RTW89_IC][46] = 56,
+ [0][1][RTW89_KCC][46] = 28,
+ [0][1][RTW89_ACMA][46] = 56,
+ [0][1][RTW89_CN][46] = 42,
+ [0][1][RTW89_UK][46] = 18,
+ [0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
[0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
- [0][1][RTW89_FCC][50] = 12,
+ [0][1][RTW89_CN][48] = 127,
+ [0][1][RTW89_UK][48] = 127,
+ [0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
[0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
- [0][1][RTW89_FCC][52] = 12,
+ [0][1][RTW89_CN][50] = 127,
+ [0][1][RTW89_UK][50] = 127,
+ [0][1][RTW89_FCC][52] = 8,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
[0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
- [1][0][RTW89_FCC][0] = 54,
+ [0][1][RTW89_CN][52] = 127,
+ [0][1][RTW89_UK][52] = 127,
+ [1][0][RTW89_FCC][0] = 62,
[1][0][RTW89_ETSI][0] = 40,
[1][0][RTW89_MKK][0] = 48,
- [1][0][RTW89_IC][0] = 36,
- [1][0][RTW89_ACMA][0] = 34,
- [1][0][RTW89_FCC][2] = 54,
+ [1][0][RTW89_IC][0] = 42,
+ [1][0][RTW89_KCC][0] = 50,
+ [1][0][RTW89_ACMA][0] = 40,
+ [1][0][RTW89_CN][0] = 26,
+ [1][0][RTW89_UK][0] = 40,
+ [1][0][RTW89_FCC][2] = 62,
[1][0][RTW89_ETSI][2] = 40,
[1][0][RTW89_MKK][2] = 48,
- [1][0][RTW89_IC][2] = 36,
- [1][0][RTW89_ACMA][2] = 34,
- [1][0][RTW89_FCC][4] = 54,
+ [1][0][RTW89_IC][2] = 42,
+ [1][0][RTW89_KCC][2] = 50,
+ [1][0][RTW89_ACMA][2] = 40,
+ [1][0][RTW89_CN][2] = 26,
+ [1][0][RTW89_UK][2] = 40,
+ [1][0][RTW89_FCC][4] = 64,
[1][0][RTW89_ETSI][4] = 40,
[1][0][RTW89_MKK][4] = 40,
- [1][0][RTW89_IC][4] = 36,
- [1][0][RTW89_ACMA][4] = 34,
- [1][0][RTW89_FCC][6] = 54,
+ [1][0][RTW89_IC][4] = 42,
+ [1][0][RTW89_KCC][4] = 50,
+ [1][0][RTW89_ACMA][4] = 40,
+ [1][0][RTW89_CN][4] = 26,
+ [1][0][RTW89_UK][4] = 40,
+ [1][0][RTW89_FCC][6] = 64,
[1][0][RTW89_ETSI][6] = 40,
[1][0][RTW89_MKK][6] = 40,
- [1][0][RTW89_IC][6] = 36,
- [1][0][RTW89_ACMA][6] = 34,
- [1][0][RTW89_FCC][8] = 54,
+ [1][0][RTW89_IC][6] = 42,
+ [1][0][RTW89_KCC][6] = 24,
+ [1][0][RTW89_ACMA][6] = 40,
+ [1][0][RTW89_CN][6] = 26,
+ [1][0][RTW89_UK][6] = 40,
+ [1][0][RTW89_FCC][8] = 62,
[1][0][RTW89_ETSI][8] = 40,
[1][0][RTW89_MKK][8] = 34,
[1][0][RTW89_IC][8] = 62,
- [1][0][RTW89_ACMA][8] = 34,
- [1][0][RTW89_FCC][10] = 54,
+ [1][0][RTW89_KCC][8] = 52,
+ [1][0][RTW89_ACMA][8] = 40,
+ [1][0][RTW89_CN][8] = 26,
+ [1][0][RTW89_UK][8] = 40,
+ [1][0][RTW89_FCC][10] = 62,
[1][0][RTW89_ETSI][10] = 40,
[1][0][RTW89_MKK][10] = 34,
[1][0][RTW89_IC][10] = 62,
- [1][0][RTW89_ACMA][10] = 34,
- [1][0][RTW89_FCC][12] = 56,
+ [1][0][RTW89_KCC][10] = 52,
+ [1][0][RTW89_ACMA][10] = 40,
+ [1][0][RTW89_CN][10] = 26,
+ [1][0][RTW89_UK][10] = 40,
+ [1][0][RTW89_FCC][12] = 62,
[1][0][RTW89_ETSI][12] = 40,
[1][0][RTW89_MKK][12] = 46,
- [1][0][RTW89_IC][12] = 64,
- [1][0][RTW89_ACMA][12] = 34,
- [1][0][RTW89_FCC][14] = 54,
+ [1][0][RTW89_IC][12] = 62,
+ [1][0][RTW89_KCC][12] = 52,
+ [1][0][RTW89_ACMA][12] = 40,
+ [1][0][RTW89_CN][12] = 26,
+ [1][0][RTW89_UK][12] = 40,
+ [1][0][RTW89_FCC][14] = 62,
[1][0][RTW89_ETSI][14] = 40,
[1][0][RTW89_MKK][14] = 46,
[1][0][RTW89_IC][14] = 62,
- [1][0][RTW89_ACMA][14] = 34,
- [1][0][RTW89_FCC][15] = 54,
+ [1][0][RTW89_KCC][14] = 52,
+ [1][0][RTW89_ACMA][14] = 40,
+ [1][0][RTW89_CN][14] = 26,
+ [1][0][RTW89_UK][14] = 40,
+ [1][0][RTW89_FCC][15] = 62,
[1][0][RTW89_ETSI][15] = 40,
[1][0][RTW89_MKK][15] = 62,
[1][0][RTW89_IC][15] = 62,
- [1][0][RTW89_ACMA][15] = 34,
- [1][0][RTW89_FCC][17] = 54,
+ [1][0][RTW89_KCC][15] = 52,
+ [1][0][RTW89_ACMA][15] = 40,
+ [1][0][RTW89_CN][15] = 127,
+ [1][0][RTW89_UK][15] = 40,
+ [1][0][RTW89_FCC][17] = 62,
[1][0][RTW89_ETSI][17] = 40,
[1][0][RTW89_MKK][17] = 68,
[1][0][RTW89_IC][17] = 62,
- [1][0][RTW89_ACMA][17] = 34,
- [1][0][RTW89_FCC][19] = 54,
+ [1][0][RTW89_KCC][17] = 52,
+ [1][0][RTW89_ACMA][17] = 40,
+ [1][0][RTW89_CN][17] = 127,
+ [1][0][RTW89_UK][17] = 40,
+ [1][0][RTW89_FCC][19] = 64,
[1][0][RTW89_ETSI][19] = 40,
[1][0][RTW89_MKK][19] = 68,
- [1][0][RTW89_IC][19] = 62,
- [1][0][RTW89_ACMA][19] = 34,
- [1][0][RTW89_FCC][21] = 54,
+ [1][0][RTW89_IC][19] = 64,
+ [1][0][RTW89_KCC][19] = 52,
+ [1][0][RTW89_ACMA][19] = 40,
+ [1][0][RTW89_CN][19] = 127,
+ [1][0][RTW89_UK][19] = 40,
+ [1][0][RTW89_FCC][21] = 64,
[1][0][RTW89_ETSI][21] = 40,
[1][0][RTW89_MKK][21] = 68,
- [1][0][RTW89_IC][21] = 62,
- [1][0][RTW89_ACMA][21] = 34,
- [1][0][RTW89_FCC][23] = 54,
+ [1][0][RTW89_IC][21] = 64,
+ [1][0][RTW89_KCC][21] = 52,
+ [1][0][RTW89_ACMA][21] = 40,
+ [1][0][RTW89_CN][21] = 127,
+ [1][0][RTW89_UK][21] = 40,
+ [1][0][RTW89_FCC][23] = 64,
[1][0][RTW89_ETSI][23] = 40,
[1][0][RTW89_MKK][23] = 68,
- [1][0][RTW89_IC][23] = 62,
- [1][0][RTW89_ACMA][23] = 34,
- [1][0][RTW89_FCC][25] = 54,
+ [1][0][RTW89_IC][23] = 64,
+ [1][0][RTW89_KCC][23] = 52,
+ [1][0][RTW89_ACMA][23] = 40,
+ [1][0][RTW89_CN][23] = 127,
+ [1][0][RTW89_UK][23] = 40,
+ [1][0][RTW89_FCC][25] = 64,
[1][0][RTW89_ETSI][25] = 40,
[1][0][RTW89_MKK][25] = 68,
[1][0][RTW89_IC][25] = 127,
+ [1][0][RTW89_KCC][25] = 52,
[1][0][RTW89_ACMA][25] = 127,
- [1][0][RTW89_FCC][27] = 54,
+ [1][0][RTW89_CN][25] = 127,
+ [1][0][RTW89_UK][25] = 40,
+ [1][0][RTW89_FCC][27] = 64,
[1][0][RTW89_ETSI][27] = 42,
[1][0][RTW89_MKK][27] = 68,
[1][0][RTW89_IC][27] = 127,
+ [1][0][RTW89_KCC][27] = 52,
[1][0][RTW89_ACMA][27] = 127,
- [1][0][RTW89_FCC][29] = 54,
+ [1][0][RTW89_CN][27] = 127,
+ [1][0][RTW89_UK][27] = 42,
+ [1][0][RTW89_FCC][29] = 64,
[1][0][RTW89_ETSI][29] = 42,
[1][0][RTW89_MKK][29] = 68,
[1][0][RTW89_IC][29] = 127,
+ [1][0][RTW89_KCC][29] = 52,
[1][0][RTW89_ACMA][29] = 127,
- [1][0][RTW89_FCC][31] = 54,
+ [1][0][RTW89_CN][29] = 127,
+ [1][0][RTW89_UK][29] = 42,
+ [1][0][RTW89_FCC][31] = 64,
[1][0][RTW89_ETSI][31] = 42,
[1][0][RTW89_MKK][31] = 68,
- [1][0][RTW89_IC][31] = 62,
- [1][0][RTW89_ACMA][31] = 34,
- [1][0][RTW89_FCC][33] = 54,
+ [1][0][RTW89_IC][31] = 56,
+ [1][0][RTW89_KCC][31] = 52,
+ [1][0][RTW89_ACMA][31] = 42,
+ [1][0][RTW89_CN][31] = 127,
+ [1][0][RTW89_UK][31] = 42,
+ [1][0][RTW89_FCC][33] = 56,
[1][0][RTW89_ETSI][33] = 42,
[1][0][RTW89_MKK][33] = 68,
- [1][0][RTW89_IC][33] = 62,
- [1][0][RTW89_ACMA][33] = 34,
- [1][0][RTW89_FCC][35] = 54,
+ [1][0][RTW89_IC][33] = 56,
+ [1][0][RTW89_KCC][33] = 52,
+ [1][0][RTW89_ACMA][33] = 42,
+ [1][0][RTW89_CN][33] = 127,
+ [1][0][RTW89_UK][33] = 42,
+ [1][0][RTW89_FCC][35] = 56,
[1][0][RTW89_ETSI][35] = 42,
[1][0][RTW89_MKK][35] = 68,
- [1][0][RTW89_IC][35] = 62,
- [1][0][RTW89_ACMA][35] = 34,
- [1][0][RTW89_FCC][37] = 56,
+ [1][0][RTW89_IC][35] = 56,
+ [1][0][RTW89_KCC][35] = 52,
+ [1][0][RTW89_ACMA][35] = 42,
+ [1][0][RTW89_CN][35] = 127,
+ [1][0][RTW89_UK][35] = 42,
+ [1][0][RTW89_FCC][37] = 66,
[1][0][RTW89_ETSI][37] = 127,
[1][0][RTW89_MKK][37] = 68,
- [1][0][RTW89_IC][37] = 64,
- [1][0][RTW89_ACMA][37] = 64,
+ [1][0][RTW89_IC][37] = 66,
+ [1][0][RTW89_KCC][37] = 52,
+ [1][0][RTW89_ACMA][37] = 66,
+ [1][0][RTW89_CN][37] = 127,
+ [1][0][RTW89_UK][37] = 42,
[1][0][RTW89_FCC][38] = 76,
[1][0][RTW89_ETSI][38] = 28,
[1][0][RTW89_MKK][38] = 127,
- [1][0][RTW89_IC][38] = 84,
- [1][0][RTW89_ACMA][38] = 84,
+ [1][0][RTW89_IC][38] = 76,
+ [1][0][RTW89_KCC][38] = 54,
+ [1][0][RTW89_ACMA][38] = 76,
+ [1][0][RTW89_CN][38] = 66,
+ [1][0][RTW89_UK][38] = 44,
[1][0][RTW89_FCC][40] = 76,
[1][0][RTW89_ETSI][40] = 28,
[1][0][RTW89_MKK][40] = 127,
- [1][0][RTW89_IC][40] = 84,
- [1][0][RTW89_ACMA][40] = 84,
- [1][0][RTW89_FCC][42] = 76,
+ [1][0][RTW89_IC][40] = 76,
+ [1][0][RTW89_KCC][40] = 54,
+ [1][0][RTW89_ACMA][40] = 76,
+ [1][0][RTW89_CN][40] = 66,
+ [1][0][RTW89_UK][40] = 44,
+ [1][0][RTW89_FCC][42] = 68,
[1][0][RTW89_ETSI][42] = 28,
[1][0][RTW89_MKK][42] = 127,
- [1][0][RTW89_IC][42] = 84,
- [1][0][RTW89_ACMA][42] = 84,
- [1][0][RTW89_FCC][44] = 76,
+ [1][0][RTW89_IC][42] = 68,
+ [1][0][RTW89_KCC][42] = 54,
+ [1][0][RTW89_ACMA][42] = 68,
+ [1][0][RTW89_CN][42] = 66,
+ [1][0][RTW89_UK][42] = 44,
+ [1][0][RTW89_FCC][44] = 70,
[1][0][RTW89_ETSI][44] = 28,
[1][0][RTW89_MKK][44] = 127,
- [1][0][RTW89_IC][44] = 84,
- [1][0][RTW89_ACMA][44] = 84,
- [1][0][RTW89_FCC][46] = 76,
+ [1][0][RTW89_IC][44] = 70,
+ [1][0][RTW89_KCC][44] = 54,
+ [1][0][RTW89_ACMA][44] = 70,
+ [1][0][RTW89_CN][44] = 66,
+ [1][0][RTW89_UK][44] = 42,
+ [1][0][RTW89_FCC][46] = 70,
[1][0][RTW89_ETSI][46] = 28,
[1][0][RTW89_MKK][46] = 127,
- [1][0][RTW89_IC][46] = 84,
- [1][0][RTW89_ACMA][46] = 84,
- [1][0][RTW89_FCC][48] = 36,
+ [1][0][RTW89_IC][46] = 70,
+ [1][0][RTW89_KCC][46] = 54,
+ [1][0][RTW89_ACMA][46] = 70,
+ [1][0][RTW89_CN][46] = 66,
+ [1][0][RTW89_UK][46] = 42,
+ [1][0][RTW89_FCC][48] = 56,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
[1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
- [1][0][RTW89_FCC][50] = 36,
+ [1][0][RTW89_CN][48] = 127,
+ [1][0][RTW89_UK][48] = 127,
+ [1][0][RTW89_FCC][50] = 58,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
[1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
- [1][0][RTW89_FCC][52] = 36,
+ [1][0][RTW89_CN][50] = 127,
+ [1][0][RTW89_UK][50] = 127,
+ [1][0][RTW89_FCC][52] = 56,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
[1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
- [1][1][RTW89_FCC][0] = 34,
+ [1][0][RTW89_CN][52] = 127,
+ [1][0][RTW89_UK][52] = 127,
+ [1][1][RTW89_FCC][0] = 44,
[1][1][RTW89_ETSI][0] = 30,
[1][1][RTW89_MKK][0] = 34,
- [1][1][RTW89_IC][0] = 10,
- [1][1][RTW89_ACMA][0] = 22,
- [1][1][RTW89_FCC][2] = 36,
+ [1][1][RTW89_IC][0] = 20,
+ [1][1][RTW89_KCC][0] = 34,
+ [1][1][RTW89_ACMA][0] = 30,
+ [1][1][RTW89_CN][0] = 14,
+ [1][1][RTW89_UK][0] = 30,
+ [1][1][RTW89_FCC][2] = 44,
[1][1][RTW89_ETSI][2] = 30,
[1][1][RTW89_MKK][2] = 34,
- [1][1][RTW89_IC][2] = 14,
- [1][1][RTW89_ACMA][2] = 22,
- [1][1][RTW89_FCC][4] = 34,
+ [1][1][RTW89_IC][2] = 18,
+ [1][1][RTW89_KCC][2] = 34,
+ [1][1][RTW89_ACMA][2] = 30,
+ [1][1][RTW89_CN][2] = 14,
+ [1][1][RTW89_UK][2] = 30,
+ [1][1][RTW89_FCC][4] = 46,
[1][1][RTW89_ETSI][4] = 30,
[1][1][RTW89_MKK][4] = 26,
- [1][1][RTW89_IC][4] = 10,
- [1][1][RTW89_ACMA][4] = 22,
- [1][1][RTW89_FCC][6] = 34,
+ [1][1][RTW89_IC][4] = 20,
+ [1][1][RTW89_KCC][4] = 34,
+ [1][1][RTW89_ACMA][4] = 30,
+ [1][1][RTW89_CN][4] = 14,
+ [1][1][RTW89_UK][4] = 30,
+ [1][1][RTW89_FCC][6] = 46,
[1][1][RTW89_ETSI][6] = 30,
[1][1][RTW89_MKK][6] = 26,
- [1][1][RTW89_IC][6] = 10,
- [1][1][RTW89_ACMA][6] = 22,
- [1][1][RTW89_FCC][8] = 36,
+ [1][1][RTW89_IC][6] = 20,
+ [1][1][RTW89_KCC][6] = 8,
+ [1][1][RTW89_ACMA][6] = 30,
+ [1][1][RTW89_CN][6] = 14,
+ [1][1][RTW89_UK][6] = 30,
+ [1][1][RTW89_FCC][8] = 44,
[1][1][RTW89_ETSI][8] = 30,
[1][1][RTW89_MKK][8] = 20,
[1][1][RTW89_IC][8] = 44,
- [1][1][RTW89_ACMA][8] = 22,
- [1][1][RTW89_FCC][10] = 36,
+ [1][1][RTW89_KCC][8] = 34,
+ [1][1][RTW89_ACMA][8] = 30,
+ [1][1][RTW89_CN][8] = 14,
+ [1][1][RTW89_UK][8] = 30,
+ [1][1][RTW89_FCC][10] = 44,
[1][1][RTW89_ETSI][10] = 30,
[1][1][RTW89_MKK][10] = 20,
[1][1][RTW89_IC][10] = 44,
- [1][1][RTW89_ACMA][10] = 22,
- [1][1][RTW89_FCC][12] = 38,
+ [1][1][RTW89_KCC][10] = 34,
+ [1][1][RTW89_ACMA][10] = 30,
+ [1][1][RTW89_CN][10] = 14,
+ [1][1][RTW89_UK][10] = 30,
+ [1][1][RTW89_FCC][12] = 44,
[1][1][RTW89_ETSI][12] = 30,
[1][1][RTW89_MKK][12] = 34,
- [1][1][RTW89_IC][12] = 46,
- [1][1][RTW89_ACMA][12] = 22,
- [1][1][RTW89_FCC][14] = 34,
+ [1][1][RTW89_IC][12] = 44,
+ [1][1][RTW89_KCC][12] = 38,
+ [1][1][RTW89_ACMA][12] = 30,
+ [1][1][RTW89_CN][12] = 14,
+ [1][1][RTW89_UK][12] = 30,
+ [1][1][RTW89_FCC][14] = 44,
[1][1][RTW89_ETSI][14] = 30,
[1][1][RTW89_MKK][14] = 34,
- [1][1][RTW89_IC][14] = 40,
- [1][1][RTW89_ACMA][14] = 22,
- [1][1][RTW89_FCC][15] = 34,
+ [1][1][RTW89_IC][14] = 44,
+ [1][1][RTW89_KCC][14] = 38,
+ [1][1][RTW89_ACMA][14] = 30,
+ [1][1][RTW89_CN][14] = 14,
+ [1][1][RTW89_UK][14] = 30,
+ [1][1][RTW89_FCC][15] = 44,
[1][1][RTW89_ETSI][15] = 28,
[1][1][RTW89_MKK][15] = 56,
- [1][1][RTW89_IC][15] = 42,
- [1][1][RTW89_ACMA][15] = 22,
- [1][1][RTW89_FCC][17] = 34,
+ [1][1][RTW89_IC][15] = 44,
+ [1][1][RTW89_KCC][15] = 36,
+ [1][1][RTW89_ACMA][15] = 28,
+ [1][1][RTW89_CN][15] = 127,
+ [1][1][RTW89_UK][15] = 28,
+ [1][1][RTW89_FCC][17] = 44,
[1][1][RTW89_ETSI][17] = 28,
[1][1][RTW89_MKK][17] = 58,
- [1][1][RTW89_IC][17] = 42,
- [1][1][RTW89_ACMA][17] = 22,
- [1][1][RTW89_FCC][19] = 34,
+ [1][1][RTW89_IC][17] = 44,
+ [1][1][RTW89_KCC][17] = 36,
+ [1][1][RTW89_ACMA][17] = 28,
+ [1][1][RTW89_CN][17] = 127,
+ [1][1][RTW89_UK][17] = 28,
+ [1][1][RTW89_FCC][19] = 44,
[1][1][RTW89_ETSI][19] = 28,
[1][1][RTW89_MKK][19] = 58,
- [1][1][RTW89_IC][19] = 42,
- [1][1][RTW89_ACMA][19] = 22,
- [1][1][RTW89_FCC][21] = 34,
+ [1][1][RTW89_IC][19] = 44,
+ [1][1][RTW89_KCC][19] = 36,
+ [1][1][RTW89_ACMA][19] = 28,
+ [1][1][RTW89_CN][19] = 127,
+ [1][1][RTW89_UK][19] = 28,
+ [1][1][RTW89_FCC][21] = 44,
[1][1][RTW89_ETSI][21] = 28,
[1][1][RTW89_MKK][21] = 58,
- [1][1][RTW89_IC][21] = 42,
- [1][1][RTW89_ACMA][21] = 22,
- [1][1][RTW89_FCC][23] = 34,
+ [1][1][RTW89_IC][21] = 44,
+ [1][1][RTW89_KCC][21] = 36,
+ [1][1][RTW89_ACMA][21] = 28,
+ [1][1][RTW89_CN][21] = 127,
+ [1][1][RTW89_UK][21] = 28,
+ [1][1][RTW89_FCC][23] = 44,
[1][1][RTW89_ETSI][23] = 28,
[1][1][RTW89_MKK][23] = 58,
- [1][1][RTW89_IC][23] = 42,
- [1][1][RTW89_ACMA][23] = 22,
- [1][1][RTW89_FCC][25] = 34,
+ [1][1][RTW89_IC][23] = 44,
+ [1][1][RTW89_KCC][23] = 36,
+ [1][1][RTW89_ACMA][23] = 28,
+ [1][1][RTW89_CN][23] = 127,
+ [1][1][RTW89_UK][23] = 28,
+ [1][1][RTW89_FCC][25] = 44,
[1][1][RTW89_ETSI][25] = 28,
[1][1][RTW89_MKK][25] = 58,
[1][1][RTW89_IC][25] = 127,
+ [1][1][RTW89_KCC][25] = 36,
[1][1][RTW89_ACMA][25] = 127,
- [1][1][RTW89_FCC][27] = 34,
+ [1][1][RTW89_CN][25] = 127,
+ [1][1][RTW89_UK][25] = 28,
+ [1][1][RTW89_FCC][27] = 44,
[1][1][RTW89_ETSI][27] = 30,
[1][1][RTW89_MKK][27] = 58,
[1][1][RTW89_IC][27] = 127,
+ [1][1][RTW89_KCC][27] = 36,
[1][1][RTW89_ACMA][27] = 127,
- [1][1][RTW89_FCC][29] = 34,
+ [1][1][RTW89_CN][27] = 127,
+ [1][1][RTW89_UK][27] = 30,
+ [1][1][RTW89_FCC][29] = 44,
[1][1][RTW89_ETSI][29] = 30,
[1][1][RTW89_MKK][29] = 58,
[1][1][RTW89_IC][29] = 127,
+ [1][1][RTW89_KCC][29] = 36,
[1][1][RTW89_ACMA][29] = 127,
- [1][1][RTW89_FCC][31] = 34,
+ [1][1][RTW89_CN][29] = 127,
+ [1][1][RTW89_UK][29] = 30,
+ [1][1][RTW89_FCC][31] = 44,
[1][1][RTW89_ETSI][31] = 30,
[1][1][RTW89_MKK][31] = 58,
[1][1][RTW89_IC][31] = 38,
- [1][1][RTW89_ACMA][31] = 22,
- [1][1][RTW89_FCC][33] = 32,
+ [1][1][RTW89_KCC][31] = 36,
+ [1][1][RTW89_ACMA][31] = 30,
+ [1][1][RTW89_CN][31] = 127,
+ [1][1][RTW89_UK][31] = 30,
+ [1][1][RTW89_FCC][33] = 38,
[1][1][RTW89_ETSI][33] = 30,
[1][1][RTW89_MKK][33] = 58,
[1][1][RTW89_IC][33] = 38,
- [1][1][RTW89_ACMA][33] = 22,
- [1][1][RTW89_FCC][35] = 32,
+ [1][1][RTW89_KCC][33] = 36,
+ [1][1][RTW89_ACMA][33] = 30,
+ [1][1][RTW89_CN][33] = 127,
+ [1][1][RTW89_UK][33] = 30,
+ [1][1][RTW89_FCC][35] = 38,
[1][1][RTW89_ETSI][35] = 30,
[1][1][RTW89_MKK][35] = 58,
[1][1][RTW89_IC][35] = 38,
- [1][1][RTW89_ACMA][35] = 22,
- [1][1][RTW89_FCC][37] = 40,
+ [1][1][RTW89_KCC][35] = 36,
+ [1][1][RTW89_ACMA][35] = 30,
+ [1][1][RTW89_CN][35] = 127,
+ [1][1][RTW89_UK][35] = 30,
+ [1][1][RTW89_FCC][37] = 46,
[1][1][RTW89_ETSI][37] = 127,
[1][1][RTW89_MKK][37] = 58,
- [1][1][RTW89_IC][37] = 48,
- [1][1][RTW89_ACMA][37] = 48,
- [1][1][RTW89_FCC][38] = 76,
+ [1][1][RTW89_IC][37] = 46,
+ [1][1][RTW89_KCC][37] = 36,
+ [1][1][RTW89_ACMA][37] = 46,
+ [1][1][RTW89_CN][37] = 127,
+ [1][1][RTW89_UK][37] = 32,
+ [1][1][RTW89_FCC][38] = 74,
[1][1][RTW89_ETSI][38] = 16,
[1][1][RTW89_MKK][38] = 127,
- [1][1][RTW89_IC][38] = 84,
- [1][1][RTW89_ACMA][38] = 82,
- [1][1][RTW89_FCC][40] = 76,
+ [1][1][RTW89_IC][38] = 74,
+ [1][1][RTW89_KCC][38] = 36,
+ [1][1][RTW89_ACMA][38] = 74,
+ [1][1][RTW89_CN][38] = 54,
+ [1][1][RTW89_UK][38] = 30,
+ [1][1][RTW89_FCC][40] = 74,
[1][1][RTW89_ETSI][40] = 16,
[1][1][RTW89_MKK][40] = 127,
- [1][1][RTW89_IC][40] = 84,
- [1][1][RTW89_ACMA][40] = 82,
- [1][1][RTW89_FCC][42] = 76,
+ [1][1][RTW89_IC][40] = 74,
+ [1][1][RTW89_KCC][40] = 36,
+ [1][1][RTW89_ACMA][40] = 74,
+ [1][1][RTW89_CN][40] = 54,
+ [1][1][RTW89_UK][40] = 30,
+ [1][1][RTW89_FCC][42] = 74,
[1][1][RTW89_ETSI][42] = 16,
[1][1][RTW89_MKK][42] = 127,
- [1][1][RTW89_IC][42] = 84,
- [1][1][RTW89_ACMA][42] = 84,
- [1][1][RTW89_FCC][44] = 76,
+ [1][1][RTW89_IC][42] = 74,
+ [1][1][RTW89_KCC][42] = 36,
+ [1][1][RTW89_ACMA][42] = 74,
+ [1][1][RTW89_CN][42] = 54,
+ [1][1][RTW89_UK][42] = 30,
+ [1][1][RTW89_FCC][44] = 74,
[1][1][RTW89_ETSI][44] = 16,
[1][1][RTW89_MKK][44] = 127,
- [1][1][RTW89_IC][44] = 84,
- [1][1][RTW89_ACMA][44] = 84,
- [1][1][RTW89_FCC][46] = 76,
+ [1][1][RTW89_IC][44] = 74,
+ [1][1][RTW89_KCC][44] = 36,
+ [1][1][RTW89_ACMA][44] = 74,
+ [1][1][RTW89_CN][44] = 54,
+ [1][1][RTW89_UK][44] = 30,
+ [1][1][RTW89_FCC][46] = 74,
[1][1][RTW89_ETSI][46] = 16,
[1][1][RTW89_MKK][46] = 127,
- [1][1][RTW89_IC][46] = 84,
- [1][1][RTW89_ACMA][46] = 84,
- [1][1][RTW89_FCC][48] = 24,
+ [1][1][RTW89_IC][46] = 74,
+ [1][1][RTW89_KCC][46] = 36,
+ [1][1][RTW89_ACMA][46] = 74,
+ [1][1][RTW89_CN][46] = 54,
+ [1][1][RTW89_UK][46] = 30,
+ [1][1][RTW89_FCC][48] = 34,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
[1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
- [1][1][RTW89_FCC][50] = 24,
+ [1][1][RTW89_CN][48] = 127,
+ [1][1][RTW89_UK][48] = 127,
+ [1][1][RTW89_FCC][50] = 34,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
[1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
- [1][1][RTW89_FCC][52] = 24,
+ [1][1][RTW89_CN][50] = 127,
+ [1][1][RTW89_UK][50] = 127,
+ [1][1][RTW89_FCC][52] = 30,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
[1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
- [2][0][RTW89_FCC][0] = 62,
+ [1][1][RTW89_CN][52] = 127,
+ [1][1][RTW89_UK][52] = 127,
+ [2][0][RTW89_FCC][0] = 68,
[2][0][RTW89_ETSI][0] = 52,
[2][0][RTW89_MKK][0] = 60,
- [2][0][RTW89_IC][0] = 46,
- [2][0][RTW89_ACMA][0] = 48,
- [2][0][RTW89_FCC][2] = 62,
+ [2][0][RTW89_IC][0] = 52,
+ [2][0][RTW89_KCC][0] = 64,
+ [2][0][RTW89_ACMA][0] = 52,
+ [2][0][RTW89_CN][0] = 40,
+ [2][0][RTW89_UK][0] = 52,
+ [2][0][RTW89_FCC][2] = 64,
[2][0][RTW89_ETSI][2] = 52,
[2][0][RTW89_MKK][2] = 60,
- [2][0][RTW89_IC][2] = 46,
- [2][0][RTW89_ACMA][2] = 48,
- [2][0][RTW89_FCC][4] = 62,
+ [2][0][RTW89_IC][2] = 50,
+ [2][0][RTW89_KCC][2] = 64,
+ [2][0][RTW89_ACMA][2] = 52,
+ [2][0][RTW89_CN][2] = 40,
+ [2][0][RTW89_UK][2] = 52,
+ [2][0][RTW89_FCC][4] = 68,
[2][0][RTW89_ETSI][4] = 52,
[2][0][RTW89_MKK][4] = 50,
- [2][0][RTW89_IC][4] = 46,
- [2][0][RTW89_ACMA][4] = 48,
- [2][0][RTW89_FCC][6] = 62,
+ [2][0][RTW89_IC][4] = 50,
+ [2][0][RTW89_KCC][4] = 64,
+ [2][0][RTW89_ACMA][4] = 52,
+ [2][0][RTW89_CN][4] = 40,
+ [2][0][RTW89_UK][4] = 52,
+ [2][0][RTW89_FCC][6] = 68,
[2][0][RTW89_ETSI][6] = 52,
[2][0][RTW89_MKK][6] = 50,
- [2][0][RTW89_IC][6] = 46,
- [2][0][RTW89_ACMA][6] = 48,
- [2][0][RTW89_FCC][8] = 62,
+ [2][0][RTW89_IC][6] = 50,
+ [2][0][RTW89_KCC][6] = 36,
+ [2][0][RTW89_ACMA][6] = 52,
+ [2][0][RTW89_CN][6] = 40,
+ [2][0][RTW89_UK][6] = 52,
+ [2][0][RTW89_FCC][8] = 68,
[2][0][RTW89_ETSI][8] = 52,
[2][0][RTW89_MKK][8] = 44,
- [2][0][RTW89_IC][8] = 66,
- [2][0][RTW89_ACMA][8] = 48,
- [2][0][RTW89_FCC][10] = 62,
+ [2][0][RTW89_IC][8] = 64,
+ [2][0][RTW89_KCC][8] = 62,
+ [2][0][RTW89_ACMA][8] = 52,
+ [2][0][RTW89_CN][8] = 40,
+ [2][0][RTW89_UK][8] = 52,
+ [2][0][RTW89_FCC][10] = 68,
[2][0][RTW89_ETSI][10] = 52,
[2][0][RTW89_MKK][10] = 44,
- [2][0][RTW89_IC][10] = 66,
- [2][0][RTW89_ACMA][10] = 48,
- [2][0][RTW89_FCC][12] = 62,
+ [2][0][RTW89_IC][10] = 64,
+ [2][0][RTW89_KCC][10] = 62,
+ [2][0][RTW89_ACMA][10] = 52,
+ [2][0][RTW89_CN][10] = 40,
+ [2][0][RTW89_UK][10] = 52,
+ [2][0][RTW89_FCC][12] = 68,
[2][0][RTW89_ETSI][12] = 52,
[2][0][RTW89_MKK][12] = 58,
- [2][0][RTW89_IC][12] = 66,
- [2][0][RTW89_ACMA][12] = 48,
- [2][0][RTW89_FCC][14] = 62,
+ [2][0][RTW89_IC][12] = 64,
+ [2][0][RTW89_KCC][12] = 62,
+ [2][0][RTW89_ACMA][12] = 52,
+ [2][0][RTW89_CN][12] = 40,
+ [2][0][RTW89_UK][12] = 52,
+ [2][0][RTW89_FCC][14] = 68,
[2][0][RTW89_ETSI][14] = 52,
[2][0][RTW89_MKK][14] = 58,
- [2][0][RTW89_IC][14] = 66,
- [2][0][RTW89_ACMA][14] = 48,
- [2][0][RTW89_FCC][15] = 62,
+ [2][0][RTW89_IC][14] = 64,
+ [2][0][RTW89_KCC][14] = 62,
+ [2][0][RTW89_ACMA][14] = 52,
+ [2][0][RTW89_CN][14] = 40,
+ [2][0][RTW89_UK][14] = 52,
+ [2][0][RTW89_FCC][15] = 68,
[2][0][RTW89_ETSI][15] = 52,
[2][0][RTW89_MKK][15] = 68,
- [2][0][RTW89_IC][15] = 70,
- [2][0][RTW89_ACMA][15] = 48,
- [2][0][RTW89_FCC][17] = 62,
+ [2][0][RTW89_IC][15] = 68,
+ [2][0][RTW89_KCC][15] = 62,
+ [2][0][RTW89_ACMA][15] = 52,
+ [2][0][RTW89_CN][15] = 127,
+ [2][0][RTW89_UK][15] = 52,
+ [2][0][RTW89_FCC][17] = 68,
[2][0][RTW89_ETSI][17] = 52,
[2][0][RTW89_MKK][17] = 74,
- [2][0][RTW89_IC][17] = 70,
- [2][0][RTW89_ACMA][17] = 48,
- [2][0][RTW89_FCC][19] = 62,
+ [2][0][RTW89_IC][17] = 68,
+ [2][0][RTW89_KCC][17] = 62,
+ [2][0][RTW89_ACMA][17] = 52,
+ [2][0][RTW89_CN][17] = 127,
+ [2][0][RTW89_UK][17] = 52,
+ [2][0][RTW89_FCC][19] = 70,
[2][0][RTW89_ETSI][19] = 52,
[2][0][RTW89_MKK][19] = 74,
[2][0][RTW89_IC][19] = 70,
- [2][0][RTW89_ACMA][19] = 48,
- [2][0][RTW89_FCC][21] = 62,
+ [2][0][RTW89_KCC][19] = 62,
+ [2][0][RTW89_ACMA][19] = 52,
+ [2][0][RTW89_CN][19] = 127,
+ [2][0][RTW89_UK][19] = 52,
+ [2][0][RTW89_FCC][21] = 70,
[2][0][RTW89_ETSI][21] = 52,
[2][0][RTW89_MKK][21] = 74,
[2][0][RTW89_IC][21] = 70,
- [2][0][RTW89_ACMA][21] = 48,
- [2][0][RTW89_FCC][23] = 62,
+ [2][0][RTW89_KCC][21] = 62,
+ [2][0][RTW89_ACMA][21] = 52,
+ [2][0][RTW89_CN][21] = 127,
+ [2][0][RTW89_UK][21] = 52,
+ [2][0][RTW89_FCC][23] = 70,
[2][0][RTW89_ETSI][23] = 52,
[2][0][RTW89_MKK][23] = 74,
[2][0][RTW89_IC][23] = 70,
- [2][0][RTW89_ACMA][23] = 48,
- [2][0][RTW89_FCC][25] = 62,
+ [2][0][RTW89_KCC][23] = 62,
+ [2][0][RTW89_ACMA][23] = 52,
+ [2][0][RTW89_CN][23] = 127,
+ [2][0][RTW89_UK][23] = 52,
+ [2][0][RTW89_FCC][25] = 70,
[2][0][RTW89_ETSI][25] = 52,
[2][0][RTW89_MKK][25] = 74,
[2][0][RTW89_IC][25] = 127,
+ [2][0][RTW89_KCC][25] = 62,
[2][0][RTW89_ACMA][25] = 127,
- [2][0][RTW89_FCC][27] = 62,
+ [2][0][RTW89_CN][25] = 127,
+ [2][0][RTW89_UK][25] = 52,
+ [2][0][RTW89_FCC][27] = 70,
[2][0][RTW89_ETSI][27] = 52,
[2][0][RTW89_MKK][27] = 74,
[2][0][RTW89_IC][27] = 127,
+ [2][0][RTW89_KCC][27] = 62,
[2][0][RTW89_ACMA][27] = 127,
- [2][0][RTW89_FCC][29] = 62,
+ [2][0][RTW89_CN][27] = 127,
+ [2][0][RTW89_UK][27] = 52,
+ [2][0][RTW89_FCC][29] = 70,
[2][0][RTW89_ETSI][29] = 52,
[2][0][RTW89_MKK][29] = 74,
[2][0][RTW89_IC][29] = 127,
+ [2][0][RTW89_KCC][29] = 62,
[2][0][RTW89_ACMA][29] = 127,
- [2][0][RTW89_FCC][31] = 62,
+ [2][0][RTW89_CN][29] = 127,
+ [2][0][RTW89_UK][29] = 52,
+ [2][0][RTW89_FCC][31] = 70,
[2][0][RTW89_ETSI][31] = 52,
[2][0][RTW89_MKK][31] = 74,
- [2][0][RTW89_IC][31] = 72,
- [2][0][RTW89_ACMA][31] = 48,
- [2][0][RTW89_FCC][33] = 64,
+ [2][0][RTW89_IC][31] = 62,
+ [2][0][RTW89_KCC][31] = 62,
+ [2][0][RTW89_ACMA][31] = 52,
+ [2][0][RTW89_CN][31] = 127,
+ [2][0][RTW89_UK][31] = 52,
+ [2][0][RTW89_FCC][33] = 62,
[2][0][RTW89_ETSI][33] = 52,
[2][0][RTW89_MKK][33] = 74,
- [2][0][RTW89_IC][33] = 72,
- [2][0][RTW89_ACMA][33] = 48,
- [2][0][RTW89_FCC][35] = 64,
+ [2][0][RTW89_IC][33] = 62,
+ [2][0][RTW89_KCC][33] = 62,
+ [2][0][RTW89_ACMA][33] = 52,
+ [2][0][RTW89_CN][33] = 127,
+ [2][0][RTW89_UK][33] = 52,
+ [2][0][RTW89_FCC][35] = 62,
[2][0][RTW89_ETSI][35] = 52,
[2][0][RTW89_MKK][35] = 74,
- [2][0][RTW89_IC][35] = 72,
- [2][0][RTW89_ACMA][35] = 48,
- [2][0][RTW89_FCC][37] = 62,
+ [2][0][RTW89_IC][35] = 62,
+ [2][0][RTW89_KCC][35] = 62,
+ [2][0][RTW89_ACMA][35] = 52,
+ [2][0][RTW89_CN][35] = 127,
+ [2][0][RTW89_UK][35] = 52,
+ [2][0][RTW89_FCC][37] = 70,
[2][0][RTW89_ETSI][37] = 127,
[2][0][RTW89_MKK][37] = 74,
[2][0][RTW89_IC][37] = 70,
- [2][0][RTW89_ACMA][37] = 76,
- [2][0][RTW89_FCC][38] = 76,
+ [2][0][RTW89_KCC][37] = 62,
+ [2][0][RTW89_ACMA][37] = 70,
+ [2][0][RTW89_CN][37] = 127,
+ [2][0][RTW89_UK][37] = 52,
+ [2][0][RTW89_FCC][38] = 82,
[2][0][RTW89_ETSI][38] = 28,
[2][0][RTW89_MKK][38] = 127,
- [2][0][RTW89_IC][38] = 84,
- [2][0][RTW89_ACMA][38] = 84,
- [2][0][RTW89_FCC][40] = 76,
+ [2][0][RTW89_IC][38] = 82,
+ [2][0][RTW89_KCC][38] = 64,
+ [2][0][RTW89_ACMA][38] = 82,
+ [2][0][RTW89_CN][38] = 68,
+ [2][0][RTW89_UK][38] = 54,
+ [2][0][RTW89_FCC][40] = 82,
[2][0][RTW89_ETSI][40] = 28,
[2][0][RTW89_MKK][40] = 127,
- [2][0][RTW89_IC][40] = 84,
- [2][0][RTW89_ACMA][40] = 84,
+ [2][0][RTW89_IC][40] = 82,
+ [2][0][RTW89_KCC][40] = 64,
+ [2][0][RTW89_ACMA][40] = 82,
+ [2][0][RTW89_CN][40] = 68,
+ [2][0][RTW89_UK][40] = 54,
[2][0][RTW89_FCC][42] = 76,
[2][0][RTW89_ETSI][42] = 28,
[2][0][RTW89_MKK][42] = 127,
- [2][0][RTW89_IC][42] = 84,
- [2][0][RTW89_ACMA][42] = 84,
- [2][0][RTW89_FCC][44] = 76,
+ [2][0][RTW89_IC][42] = 76,
+ [2][0][RTW89_KCC][42] = 64,
+ [2][0][RTW89_ACMA][42] = 76,
+ [2][0][RTW89_CN][42] = 68,
+ [2][0][RTW89_UK][42] = 54,
+ [2][0][RTW89_FCC][44] = 80,
[2][0][RTW89_ETSI][44] = 28,
[2][0][RTW89_MKK][44] = 127,
- [2][0][RTW89_IC][44] = 84,
- [2][0][RTW89_ACMA][44] = 84,
- [2][0][RTW89_FCC][46] = 76,
+ [2][0][RTW89_IC][44] = 80,
+ [2][0][RTW89_KCC][44] = 64,
+ [2][0][RTW89_ACMA][44] = 80,
+ [2][0][RTW89_CN][44] = 68,
+ [2][0][RTW89_UK][44] = 54,
+ [2][0][RTW89_FCC][46] = 80,
[2][0][RTW89_ETSI][46] = 28,
[2][0][RTW89_MKK][46] = 127,
- [2][0][RTW89_IC][46] = 84,
- [2][0][RTW89_ACMA][46] = 84,
- [2][0][RTW89_FCC][48] = 48,
+ [2][0][RTW89_IC][46] = 80,
+ [2][0][RTW89_KCC][46] = 64,
+ [2][0][RTW89_ACMA][46] = 80,
+ [2][0][RTW89_CN][46] = 68,
+ [2][0][RTW89_UK][46] = 54,
+ [2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
- [2][0][RTW89_FCC][50] = 48,
+ [2][0][RTW89_CN][48] = 127,
+ [2][0][RTW89_UK][48] = 127,
+ [2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
- [2][0][RTW89_FCC][52] = 48,
+ [2][0][RTW89_CN][50] = 127,
+ [2][0][RTW89_UK][50] = 127,
+ [2][0][RTW89_FCC][52] = 64,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
- [2][1][RTW89_FCC][0] = 42,
+ [2][0][RTW89_CN][52] = 127,
+ [2][0][RTW89_UK][52] = 127,
+ [2][1][RTW89_FCC][0] = 50,
[2][1][RTW89_ETSI][0] = 40,
[2][1][RTW89_MKK][0] = 44,
- [2][1][RTW89_IC][0] = 20,
- [2][1][RTW89_ACMA][0] = 36,
- [2][1][RTW89_FCC][2] = 42,
+ [2][1][RTW89_IC][0] = 26,
+ [2][1][RTW89_KCC][0] = 44,
+ [2][1][RTW89_ACMA][0] = 40,
+ [2][1][RTW89_CN][0] = 28,
+ [2][1][RTW89_UK][0] = 40,
+ [2][1][RTW89_FCC][2] = 50,
[2][1][RTW89_ETSI][2] = 40,
[2][1][RTW89_MKK][2] = 44,
- [2][1][RTW89_IC][2] = 18,
- [2][1][RTW89_ACMA][2] = 36,
- [2][1][RTW89_FCC][4] = 42,
+ [2][1][RTW89_IC][2] = 26,
+ [2][1][RTW89_KCC][2] = 44,
+ [2][1][RTW89_ACMA][2] = 40,
+ [2][1][RTW89_CN][2] = 28,
+ [2][1][RTW89_UK][2] = 40,
+ [2][1][RTW89_FCC][4] = 50,
[2][1][RTW89_ETSI][4] = 40,
[2][1][RTW89_MKK][4] = 36,
- [2][1][RTW89_IC][4] = 22,
- [2][1][RTW89_ACMA][4] = 36,
- [2][1][RTW89_FCC][6] = 42,
+ [2][1][RTW89_IC][4] = 26,
+ [2][1][RTW89_KCC][4] = 44,
+ [2][1][RTW89_ACMA][4] = 40,
+ [2][1][RTW89_CN][4] = 28,
+ [2][1][RTW89_UK][4] = 40,
+ [2][1][RTW89_FCC][6] = 50,
[2][1][RTW89_ETSI][6] = 40,
[2][1][RTW89_MKK][6] = 36,
- [2][1][RTW89_IC][6] = 22,
- [2][1][RTW89_ACMA][6] = 36,
- [2][1][RTW89_FCC][8] = 42,
+ [2][1][RTW89_IC][6] = 26,
+ [2][1][RTW89_KCC][6] = 20,
+ [2][1][RTW89_ACMA][6] = 40,
+ [2][1][RTW89_CN][6] = 28,
+ [2][1][RTW89_UK][6] = 40,
+ [2][1][RTW89_FCC][8] = 50,
[2][1][RTW89_ETSI][8] = 40,
[2][1][RTW89_MKK][8] = 32,
[2][1][RTW89_IC][8] = 50,
- [2][1][RTW89_ACMA][8] = 36,
- [2][1][RTW89_FCC][10] = 42,
+ [2][1][RTW89_KCC][8] = 46,
+ [2][1][RTW89_ACMA][8] = 40,
+ [2][1][RTW89_CN][8] = 28,
+ [2][1][RTW89_UK][8] = 40,
+ [2][1][RTW89_FCC][10] = 50,
[2][1][RTW89_ETSI][10] = 40,
[2][1][RTW89_MKK][10] = 32,
[2][1][RTW89_IC][10] = 50,
- [2][1][RTW89_ACMA][10] = 36,
- [2][1][RTW89_FCC][12] = 44,
+ [2][1][RTW89_KCC][10] = 46,
+ [2][1][RTW89_ACMA][10] = 40,
+ [2][1][RTW89_CN][10] = 28,
+ [2][1][RTW89_UK][10] = 40,
+ [2][1][RTW89_FCC][12] = 48,
[2][1][RTW89_ETSI][12] = 40,
[2][1][RTW89_MKK][12] = 44,
- [2][1][RTW89_IC][12] = 52,
- [2][1][RTW89_ACMA][12] = 36,
- [2][1][RTW89_FCC][14] = 44,
+ [2][1][RTW89_IC][12] = 48,
+ [2][1][RTW89_KCC][12] = 46,
+ [2][1][RTW89_ACMA][12] = 40,
+ [2][1][RTW89_CN][12] = 28,
+ [2][1][RTW89_UK][12] = 40,
+ [2][1][RTW89_FCC][14] = 48,
[2][1][RTW89_ETSI][14] = 40,
[2][1][RTW89_MKK][14] = 44,
- [2][1][RTW89_IC][14] = 52,
- [2][1][RTW89_ACMA][14] = 36,
- [2][1][RTW89_FCC][15] = 42,
+ [2][1][RTW89_IC][14] = 48,
+ [2][1][RTW89_KCC][14] = 46,
+ [2][1][RTW89_ACMA][14] = 40,
+ [2][1][RTW89_CN][14] = 28,
+ [2][1][RTW89_UK][14] = 40,
+ [2][1][RTW89_FCC][15] = 50,
[2][1][RTW89_ETSI][15] = 40,
[2][1][RTW89_MKK][15] = 66,
[2][1][RTW89_IC][15] = 50,
- [2][1][RTW89_ACMA][15] = 36,
- [2][1][RTW89_FCC][17] = 42,
+ [2][1][RTW89_KCC][15] = 46,
+ [2][1][RTW89_ACMA][15] = 40,
+ [2][1][RTW89_CN][15] = 127,
+ [2][1][RTW89_UK][15] = 40,
+ [2][1][RTW89_FCC][17] = 50,
[2][1][RTW89_ETSI][17] = 40,
[2][1][RTW89_MKK][17] = 66,
[2][1][RTW89_IC][17] = 50,
- [2][1][RTW89_ACMA][17] = 36,
- [2][1][RTW89_FCC][19] = 42,
+ [2][1][RTW89_KCC][17] = 46,
+ [2][1][RTW89_ACMA][17] = 40,
+ [2][1][RTW89_CN][17] = 127,
+ [2][1][RTW89_UK][17] = 40,
+ [2][1][RTW89_FCC][19] = 50,
[2][1][RTW89_ETSI][19] = 40,
[2][1][RTW89_MKK][19] = 66,
[2][1][RTW89_IC][19] = 50,
- [2][1][RTW89_ACMA][19] = 36,
- [2][1][RTW89_FCC][21] = 42,
+ [2][1][RTW89_KCC][19] = 46,
+ [2][1][RTW89_ACMA][19] = 40,
+ [2][1][RTW89_CN][19] = 127,
+ [2][1][RTW89_UK][19] = 40,
+ [2][1][RTW89_FCC][21] = 50,
[2][1][RTW89_ETSI][21] = 40,
[2][1][RTW89_MKK][21] = 66,
[2][1][RTW89_IC][21] = 50,
- [2][1][RTW89_ACMA][21] = 36,
- [2][1][RTW89_FCC][23] = 42,
+ [2][1][RTW89_KCC][21] = 46,
+ [2][1][RTW89_ACMA][21] = 40,
+ [2][1][RTW89_CN][21] = 127,
+ [2][1][RTW89_UK][21] = 40,
+ [2][1][RTW89_FCC][23] = 50,
[2][1][RTW89_ETSI][23] = 40,
[2][1][RTW89_MKK][23] = 66,
[2][1][RTW89_IC][23] = 50,
- [2][1][RTW89_ACMA][23] = 36,
- [2][1][RTW89_FCC][25] = 42,
+ [2][1][RTW89_KCC][23] = 46,
+ [2][1][RTW89_ACMA][23] = 40,
+ [2][1][RTW89_CN][23] = 127,
+ [2][1][RTW89_UK][23] = 40,
+ [2][1][RTW89_FCC][25] = 50,
[2][1][RTW89_ETSI][25] = 40,
[2][1][RTW89_MKK][25] = 66,
[2][1][RTW89_IC][25] = 127,
+ [2][1][RTW89_KCC][25] = 46,
[2][1][RTW89_ACMA][25] = 127,
- [2][1][RTW89_FCC][27] = 42,
+ [2][1][RTW89_CN][25] = 127,
+ [2][1][RTW89_UK][25] = 40,
+ [2][1][RTW89_FCC][27] = 50,
[2][1][RTW89_ETSI][27] = 40,
[2][1][RTW89_MKK][27] = 66,
[2][1][RTW89_IC][27] = 127,
+ [2][1][RTW89_KCC][27] = 46,
[2][1][RTW89_ACMA][27] = 127,
- [2][1][RTW89_FCC][29] = 42,
+ [2][1][RTW89_CN][27] = 127,
+ [2][1][RTW89_UK][27] = 40,
+ [2][1][RTW89_FCC][29] = 50,
[2][1][RTW89_ETSI][29] = 40,
[2][1][RTW89_MKK][29] = 66,
[2][1][RTW89_IC][29] = 127,
+ [2][1][RTW89_KCC][29] = 46,
[2][1][RTW89_ACMA][29] = 127,
- [2][1][RTW89_FCC][31] = 42,
+ [2][1][RTW89_CN][29] = 127,
+ [2][1][RTW89_UK][29] = 40,
+ [2][1][RTW89_FCC][31] = 50,
[2][1][RTW89_ETSI][31] = 40,
[2][1][RTW89_MKK][31] = 66,
- [2][1][RTW89_IC][31] = 50,
- [2][1][RTW89_ACMA][31] = 36,
- [2][1][RTW89_FCC][33] = 42,
+ [2][1][RTW89_IC][31] = 48,
+ [2][1][RTW89_KCC][31] = 46,
+ [2][1][RTW89_ACMA][31] = 40,
+ [2][1][RTW89_CN][31] = 127,
+ [2][1][RTW89_UK][31] = 40,
+ [2][1][RTW89_FCC][33] = 48,
[2][1][RTW89_ETSI][33] = 40,
[2][1][RTW89_MKK][33] = 66,
- [2][1][RTW89_IC][33] = 50,
- [2][1][RTW89_ACMA][33] = 36,
- [2][1][RTW89_FCC][35] = 42,
+ [2][1][RTW89_IC][33] = 48,
+ [2][1][RTW89_KCC][33] = 46,
+ [2][1][RTW89_ACMA][33] = 40,
+ [2][1][RTW89_CN][33] = 127,
+ [2][1][RTW89_UK][33] = 40,
+ [2][1][RTW89_FCC][35] = 48,
[2][1][RTW89_ETSI][35] = 40,
[2][1][RTW89_MKK][35] = 66,
- [2][1][RTW89_IC][35] = 50,
- [2][1][RTW89_ACMA][35] = 36,
- [2][1][RTW89_FCC][37] = 42,
+ [2][1][RTW89_IC][35] = 48,
+ [2][1][RTW89_KCC][35] = 46,
+ [2][1][RTW89_ACMA][35] = 40,
+ [2][1][RTW89_CN][35] = 127,
+ [2][1][RTW89_UK][35] = 40,
+ [2][1][RTW89_FCC][37] = 52,
[2][1][RTW89_ETSI][37] = 127,
[2][1][RTW89_MKK][37] = 66,
- [2][1][RTW89_IC][37] = 50,
- [2][1][RTW89_ACMA][37] = 60,
- [2][1][RTW89_FCC][38] = 76,
+ [2][1][RTW89_IC][37] = 52,
+ [2][1][RTW89_KCC][37] = 46,
+ [2][1][RTW89_ACMA][37] = 52,
+ [2][1][RTW89_CN][37] = 127,
+ [2][1][RTW89_UK][37] = 42,
+ [2][1][RTW89_FCC][38] = 78,
[2][1][RTW89_ETSI][38] = 16,
[2][1][RTW89_MKK][38] = 127,
- [2][1][RTW89_IC][38] = 84,
- [2][1][RTW89_ACMA][38] = 84,
- [2][1][RTW89_FCC][40] = 76,
+ [2][1][RTW89_IC][38] = 78,
+ [2][1][RTW89_KCC][38] = 46,
+ [2][1][RTW89_ACMA][38] = 78,
+ [2][1][RTW89_CN][38] = 56,
+ [2][1][RTW89_UK][38] = 42,
+ [2][1][RTW89_FCC][40] = 78,
[2][1][RTW89_ETSI][40] = 16,
[2][1][RTW89_MKK][40] = 127,
- [2][1][RTW89_IC][40] = 84,
- [2][1][RTW89_ACMA][40] = 84,
- [2][1][RTW89_FCC][42] = 76,
+ [2][1][RTW89_IC][40] = 78,
+ [2][1][RTW89_KCC][40] = 46,
+ [2][1][RTW89_ACMA][40] = 78,
+ [2][1][RTW89_CN][40] = 56,
+ [2][1][RTW89_UK][40] = 42,
+ [2][1][RTW89_FCC][42] = 78,
[2][1][RTW89_ETSI][42] = 16,
[2][1][RTW89_MKK][42] = 127,
- [2][1][RTW89_IC][42] = 84,
- [2][1][RTW89_ACMA][42] = 84,
- [2][1][RTW89_FCC][44] = 76,
+ [2][1][RTW89_IC][42] = 78,
+ [2][1][RTW89_KCC][42] = 46,
+ [2][1][RTW89_ACMA][42] = 78,
+ [2][1][RTW89_CN][42] = 56,
+ [2][1][RTW89_UK][42] = 42,
+ [2][1][RTW89_FCC][44] = 74,
[2][1][RTW89_ETSI][44] = 16,
[2][1][RTW89_MKK][44] = 127,
- [2][1][RTW89_IC][44] = 84,
- [2][1][RTW89_ACMA][44] = 84,
- [2][1][RTW89_FCC][46] = 76,
+ [2][1][RTW89_IC][44] = 74,
+ [2][1][RTW89_KCC][44] = 46,
+ [2][1][RTW89_ACMA][44] = 74,
+ [2][1][RTW89_CN][44] = 56,
+ [2][1][RTW89_UK][44] = 42,
+ [2][1][RTW89_FCC][46] = 74,
[2][1][RTW89_ETSI][46] = 16,
[2][1][RTW89_MKK][46] = 127,
- [2][1][RTW89_IC][46] = 84,
- [2][1][RTW89_ACMA][46] = 84,
- [2][1][RTW89_FCC][48] = 36,
+ [2][1][RTW89_IC][46] = 74,
+ [2][1][RTW89_KCC][46] = 46,
+ [2][1][RTW89_ACMA][46] = 74,
+ [2][1][RTW89_CN][46] = 56,
+ [2][1][RTW89_UK][46] = 42,
+ [2][1][RTW89_FCC][48] = 40,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
[2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
- [2][1][RTW89_FCC][50] = 36,
+ [2][1][RTW89_CN][48] = 127,
+ [2][1][RTW89_UK][48] = 127,
+ [2][1][RTW89_FCC][50] = 40,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
[2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
- [2][1][RTW89_FCC][52] = 36,
+ [2][1][RTW89_CN][50] = 127,
+ [2][1][RTW89_UK][50] = 127,
+ [2][1][RTW89_FCC][52] = 40,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
[2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
+ [2][1][RTW89_CN][52] = 127,
+ [2][1][RTW89_UK][52] = 127,
};
const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_REGD_NUM][RTW89_6G_CH_NUM] = {
- [0][0][RTW89_WW][0] = 76,
- [0][0][RTW89_WW][2] = 76,
- [0][0][RTW89_WW][4] = 76,
- [0][0][RTW89_WW][6] = 76,
- [0][0][RTW89_WW][8] = 76,
- [0][0][RTW89_WW][10] = 76,
- [0][0][RTW89_WW][12] = 76,
- [0][0][RTW89_WW][14] = 76,
- [0][0][RTW89_WW][15] = 76,
- [0][0][RTW89_WW][17] = 76,
- [0][0][RTW89_WW][19] = 76,
- [0][0][RTW89_WW][21] = 76,
- [0][0][RTW89_WW][23] = 76,
- [0][0][RTW89_WW][25] = 76,
- [0][0][RTW89_WW][27] = 76,
- [0][0][RTW89_WW][29] = 76,
- [0][0][RTW89_WW][30] = 76,
- [0][0][RTW89_WW][32] = 76,
- [0][0][RTW89_WW][34] = 76,
- [0][0][RTW89_WW][36] = 76,
- [0][0][RTW89_WW][38] = 76,
- [0][0][RTW89_WW][40] = 76,
- [0][0][RTW89_WW][42] = 76,
- [0][0][RTW89_WW][44] = 76,
- [0][0][RTW89_WW][45] = 76,
- [0][0][RTW89_WW][47] = 76,
- [0][0][RTW89_WW][49] = 76,
- [0][0][RTW89_WW][51] = 76,
- [0][0][RTW89_WW][53] = 76,
- [0][0][RTW89_WW][55] = 76,
- [0][0][RTW89_WW][57] = 76,
- [0][0][RTW89_WW][59] = 76,
- [0][0][RTW89_WW][60] = 76,
- [0][0][RTW89_WW][62] = 76,
- [0][0][RTW89_WW][64] = 76,
- [0][0][RTW89_WW][66] = 76,
- [0][0][RTW89_WW][68] = 76,
- [0][0][RTW89_WW][70] = 76,
- [0][0][RTW89_WW][72] = 76,
- [0][0][RTW89_WW][74] = 76,
- [0][0][RTW89_WW][75] = 76,
- [0][0][RTW89_WW][77] = 76,
- [0][0][RTW89_WW][79] = 76,
- [0][0][RTW89_WW][81] = 76,
- [0][0][RTW89_WW][83] = 76,
- [0][0][RTW89_WW][85] = 76,
- [0][0][RTW89_WW][87] = 76,
- [0][0][RTW89_WW][89] = 76,
- [0][0][RTW89_WW][90] = 76,
- [0][0][RTW89_WW][92] = 76,
- [0][0][RTW89_WW][94] = 76,
- [0][0][RTW89_WW][96] = 76,
- [0][0][RTW89_WW][98] = 76,
- [0][0][RTW89_WW][100] = 76,
- [0][0][RTW89_WW][102] = 76,
- [0][0][RTW89_WW][104] = 76,
- [0][0][RTW89_WW][105] = 76,
- [0][0][RTW89_WW][107] = 76,
- [0][0][RTW89_WW][109] = 76,
+ [0][0][RTW89_WW][0] = -16,
+ [0][0][RTW89_WW][2] = -18,
+ [0][0][RTW89_WW][4] = -18,
+ [0][0][RTW89_WW][6] = -18,
+ [0][0][RTW89_WW][8] = -18,
+ [0][0][RTW89_WW][10] = -18,
+ [0][0][RTW89_WW][12] = -18,
+ [0][0][RTW89_WW][14] = -18,
+ [0][0][RTW89_WW][15] = -18,
+ [0][0][RTW89_WW][17] = -18,
+ [0][0][RTW89_WW][19] = -18,
+ [0][0][RTW89_WW][21] = -18,
+ [0][0][RTW89_WW][23] = -18,
+ [0][0][RTW89_WW][25] = -18,
+ [0][0][RTW89_WW][27] = -18,
+ [0][0][RTW89_WW][29] = -18,
+ [0][0][RTW89_WW][30] = -18,
+ [0][0][RTW89_WW][32] = -18,
+ [0][0][RTW89_WW][34] = -18,
+ [0][0][RTW89_WW][36] = -18,
+ [0][0][RTW89_WW][38] = -18,
+ [0][0][RTW89_WW][40] = -18,
+ [0][0][RTW89_WW][42] = -18,
+ [0][0][RTW89_WW][44] = -16,
+ [0][0][RTW89_WW][45] = -16,
+ [0][0][RTW89_WW][47] = -18,
+ [0][0][RTW89_WW][49] = -18,
+ [0][0][RTW89_WW][51] = -18,
+ [0][0][RTW89_WW][53] = -16,
+ [0][0][RTW89_WW][55] = -18,
+ [0][0][RTW89_WW][57] = -18,
+ [0][0][RTW89_WW][59] = -18,
+ [0][0][RTW89_WW][60] = -18,
+ [0][0][RTW89_WW][62] = -18,
+ [0][0][RTW89_WW][64] = -18,
+ [0][0][RTW89_WW][66] = -18,
+ [0][0][RTW89_WW][68] = -18,
+ [0][0][RTW89_WW][70] = -16,
+ [0][0][RTW89_WW][72] = -18,
+ [0][0][RTW89_WW][74] = -18,
+ [0][0][RTW89_WW][75] = -18,
+ [0][0][RTW89_WW][77] = -18,
+ [0][0][RTW89_WW][79] = -18,
+ [0][0][RTW89_WW][81] = -18,
+ [0][0][RTW89_WW][83] = -18,
+ [0][0][RTW89_WW][85] = -18,
+ [0][0][RTW89_WW][87] = -16,
+ [0][0][RTW89_WW][89] = -16,
+ [0][0][RTW89_WW][90] = -16,
+ [0][0][RTW89_WW][92] = -16,
+ [0][0][RTW89_WW][94] = -16,
+ [0][0][RTW89_WW][96] = -16,
+ [0][0][RTW89_WW][98] = -16,
+ [0][0][RTW89_WW][100] = -16,
+ [0][0][RTW89_WW][102] = -16,
+ [0][0][RTW89_WW][104] = -16,
+ [0][0][RTW89_WW][105] = -16,
+ [0][0][RTW89_WW][107] = -12,
+ [0][0][RTW89_WW][109] = -12,
[0][0][RTW89_WW][111] = 0,
[0][0][RTW89_WW][113] = 0,
[0][0][RTW89_WW][115] = 0,
[0][0][RTW89_WW][117] = 0,
[0][0][RTW89_WW][119] = 0,
- [0][1][RTW89_WW][0] = 76,
- [0][1][RTW89_WW][2] = 76,
- [0][1][RTW89_WW][4] = 76,
- [0][1][RTW89_WW][6] = 76,
- [0][1][RTW89_WW][8] = 76,
- [0][1][RTW89_WW][10] = 76,
- [0][1][RTW89_WW][12] = 76,
- [0][1][RTW89_WW][14] = 76,
- [0][1][RTW89_WW][15] = 76,
- [0][1][RTW89_WW][17] = 76,
- [0][1][RTW89_WW][19] = 76,
- [0][1][RTW89_WW][21] = 76,
- [0][1][RTW89_WW][23] = 76,
- [0][1][RTW89_WW][25] = 76,
- [0][1][RTW89_WW][27] = 76,
- [0][1][RTW89_WW][29] = 76,
- [0][1][RTW89_WW][30] = 76,
- [0][1][RTW89_WW][32] = 76,
- [0][1][RTW89_WW][34] = 76,
- [0][1][RTW89_WW][36] = 76,
- [0][1][RTW89_WW][38] = 76,
- [0][1][RTW89_WW][40] = 76,
- [0][1][RTW89_WW][42] = 76,
- [0][1][RTW89_WW][44] = 76,
- [0][1][RTW89_WW][45] = 76,
- [0][1][RTW89_WW][47] = 76,
- [0][1][RTW89_WW][49] = 76,
- [0][1][RTW89_WW][51] = 76,
- [0][1][RTW89_WW][53] = 76,
- [0][1][RTW89_WW][55] = 76,
- [0][1][RTW89_WW][57] = 76,
- [0][1][RTW89_WW][59] = 76,
- [0][1][RTW89_WW][60] = 76,
- [0][1][RTW89_WW][62] = 76,
- [0][1][RTW89_WW][64] = 76,
- [0][1][RTW89_WW][66] = 76,
- [0][1][RTW89_WW][68] = 76,
- [0][1][RTW89_WW][70] = 76,
- [0][1][RTW89_WW][72] = 76,
- [0][1][RTW89_WW][74] = 76,
- [0][1][RTW89_WW][75] = 76,
- [0][1][RTW89_WW][77] = 76,
- [0][1][RTW89_WW][79] = 76,
- [0][1][RTW89_WW][81] = 76,
- [0][1][RTW89_WW][83] = 76,
- [0][1][RTW89_WW][85] = 76,
- [0][1][RTW89_WW][87] = 76,
- [0][1][RTW89_WW][89] = 76,
- [0][1][RTW89_WW][90] = 76,
- [0][1][RTW89_WW][92] = 76,
- [0][1][RTW89_WW][94] = 76,
- [0][1][RTW89_WW][96] = 76,
- [0][1][RTW89_WW][98] = 76,
- [0][1][RTW89_WW][100] = 76,
- [0][1][RTW89_WW][102] = 76,
- [0][1][RTW89_WW][104] = 76,
- [0][1][RTW89_WW][105] = 76,
- [0][1][RTW89_WW][107] = 76,
- [0][1][RTW89_WW][109] = 76,
+ [0][1][RTW89_WW][0] = -40,
+ [0][1][RTW89_WW][2] = -40,
+ [0][1][RTW89_WW][4] = -40,
+ [0][1][RTW89_WW][6] = -40,
+ [0][1][RTW89_WW][8] = -40,
+ [0][1][RTW89_WW][10] = -40,
+ [0][1][RTW89_WW][12] = -40,
+ [0][1][RTW89_WW][14] = -40,
+ [0][1][RTW89_WW][15] = -40,
+ [0][1][RTW89_WW][17] = -40,
+ [0][1][RTW89_WW][19] = -40,
+ [0][1][RTW89_WW][21] = -40,
+ [0][1][RTW89_WW][23] = -40,
+ [0][1][RTW89_WW][25] = -40,
+ [0][1][RTW89_WW][27] = -40,
+ [0][1][RTW89_WW][29] = -40,
+ [0][1][RTW89_WW][30] = -40,
+ [0][1][RTW89_WW][32] = -40,
+ [0][1][RTW89_WW][34] = -40,
+ [0][1][RTW89_WW][36] = -40,
+ [0][1][RTW89_WW][38] = -40,
+ [0][1][RTW89_WW][40] = -40,
+ [0][1][RTW89_WW][42] = -40,
+ [0][1][RTW89_WW][44] = -40,
+ [0][1][RTW89_WW][45] = -40,
+ [0][1][RTW89_WW][47] = -40,
+ [0][1][RTW89_WW][49] = -40,
+ [0][1][RTW89_WW][51] = -40,
+ [0][1][RTW89_WW][53] = -40,
+ [0][1][RTW89_WW][55] = -40,
+ [0][1][RTW89_WW][57] = -40,
+ [0][1][RTW89_WW][59] = -40,
+ [0][1][RTW89_WW][60] = -40,
+ [0][1][RTW89_WW][62] = -40,
+ [0][1][RTW89_WW][64] = -40,
+ [0][1][RTW89_WW][66] = -40,
+ [0][1][RTW89_WW][68] = -40,
+ [0][1][RTW89_WW][70] = -38,
+ [0][1][RTW89_WW][72] = -38,
+ [0][1][RTW89_WW][74] = -38,
+ [0][1][RTW89_WW][75] = -38,
+ [0][1][RTW89_WW][77] = -38,
+ [0][1][RTW89_WW][79] = -38,
+ [0][1][RTW89_WW][81] = -38,
+ [0][1][RTW89_WW][83] = -38,
+ [0][1][RTW89_WW][85] = -38,
+ [0][1][RTW89_WW][87] = -40,
+ [0][1][RTW89_WW][89] = -38,
+ [0][1][RTW89_WW][90] = -38,
+ [0][1][RTW89_WW][92] = -38,
+ [0][1][RTW89_WW][94] = -38,
+ [0][1][RTW89_WW][96] = -38,
+ [0][1][RTW89_WW][98] = -38,
+ [0][1][RTW89_WW][100] = -38,
+ [0][1][RTW89_WW][102] = -38,
+ [0][1][RTW89_WW][104] = -38,
+ [0][1][RTW89_WW][105] = -38,
+ [0][1][RTW89_WW][107] = -34,
+ [0][1][RTW89_WW][109] = -34,
[0][1][RTW89_WW][111] = 0,
[0][1][RTW89_WW][113] = 0,
[0][1][RTW89_WW][115] = 0,
[0][1][RTW89_WW][117] = 0,
[0][1][RTW89_WW][119] = 0,
- [1][0][RTW89_WW][0] = 76,
- [1][0][RTW89_WW][2] = 76,
- [1][0][RTW89_WW][4] = 76,
- [1][0][RTW89_WW][6] = 76,
- [1][0][RTW89_WW][8] = 76,
- [1][0][RTW89_WW][10] = 76,
- [1][0][RTW89_WW][12] = 76,
- [1][0][RTW89_WW][14] = 76,
- [1][0][RTW89_WW][15] = 76,
- [1][0][RTW89_WW][17] = 76,
- [1][0][RTW89_WW][19] = 76,
- [1][0][RTW89_WW][21] = 76,
- [1][0][RTW89_WW][23] = 76,
- [1][0][RTW89_WW][25] = 76,
- [1][0][RTW89_WW][27] = 76,
- [1][0][RTW89_WW][29] = 76,
- [1][0][RTW89_WW][30] = 76,
- [1][0][RTW89_WW][32] = 76,
- [1][0][RTW89_WW][34] = 76,
- [1][0][RTW89_WW][36] = 76,
- [1][0][RTW89_WW][38] = 76,
- [1][0][RTW89_WW][40] = 76,
- [1][0][RTW89_WW][42] = 76,
- [1][0][RTW89_WW][44] = 76,
- [1][0][RTW89_WW][45] = 76,
- [1][0][RTW89_WW][47] = 76,
- [1][0][RTW89_WW][49] = 76,
- [1][0][RTW89_WW][51] = 76,
- [1][0][RTW89_WW][53] = 76,
- [1][0][RTW89_WW][55] = 76,
- [1][0][RTW89_WW][57] = 76,
- [1][0][RTW89_WW][59] = 76,
- [1][0][RTW89_WW][60] = 76,
- [1][0][RTW89_WW][62] = 76,
- [1][0][RTW89_WW][64] = 76,
- [1][0][RTW89_WW][66] = 76,
- [1][0][RTW89_WW][68] = 76,
- [1][0][RTW89_WW][70] = 76,
- [1][0][RTW89_WW][72] = 76,
- [1][0][RTW89_WW][74] = 76,
- [1][0][RTW89_WW][75] = 76,
- [1][0][RTW89_WW][77] = 76,
- [1][0][RTW89_WW][79] = 76,
- [1][0][RTW89_WW][81] = 76,
- [1][0][RTW89_WW][83] = 76,
- [1][0][RTW89_WW][85] = 76,
- [1][0][RTW89_WW][87] = 76,
- [1][0][RTW89_WW][89] = 76,
- [1][0][RTW89_WW][90] = 76,
- [1][0][RTW89_WW][92] = 76,
- [1][0][RTW89_WW][94] = 76,
- [1][0][RTW89_WW][96] = 76,
- [1][0][RTW89_WW][98] = 76,
- [1][0][RTW89_WW][100] = 76,
- [1][0][RTW89_WW][102] = 76,
- [1][0][RTW89_WW][104] = 76,
- [1][0][RTW89_WW][105] = 76,
- [1][0][RTW89_WW][107] = 76,
- [1][0][RTW89_WW][109] = 76,
+ [1][0][RTW89_WW][0] = -4,
+ [1][0][RTW89_WW][2] = -4,
+ [1][0][RTW89_WW][4] = -4,
+ [1][0][RTW89_WW][6] = -4,
+ [1][0][RTW89_WW][8] = -4,
+ [1][0][RTW89_WW][10] = -4,
+ [1][0][RTW89_WW][12] = -4,
+ [1][0][RTW89_WW][14] = -4,
+ [1][0][RTW89_WW][15] = -4,
+ [1][0][RTW89_WW][17] = -4,
+ [1][0][RTW89_WW][19] = -4,
+ [1][0][RTW89_WW][21] = -4,
+ [1][0][RTW89_WW][23] = -4,
+ [1][0][RTW89_WW][25] = -4,
+ [1][0][RTW89_WW][27] = -4,
+ [1][0][RTW89_WW][29] = -4,
+ [1][0][RTW89_WW][30] = -4,
+ [1][0][RTW89_WW][32] = -4,
+ [1][0][RTW89_WW][34] = -4,
+ [1][0][RTW89_WW][36] = -4,
+ [1][0][RTW89_WW][38] = -4,
+ [1][0][RTW89_WW][40] = -4,
+ [1][0][RTW89_WW][42] = -4,
+ [1][0][RTW89_WW][44] = -4,
+ [1][0][RTW89_WW][45] = -4,
+ [1][0][RTW89_WW][47] = -4,
+ [1][0][RTW89_WW][49] = -4,
+ [1][0][RTW89_WW][51] = -4,
+ [1][0][RTW89_WW][53] = -4,
+ [1][0][RTW89_WW][55] = -4,
+ [1][0][RTW89_WW][57] = -4,
+ [1][0][RTW89_WW][59] = -4,
+ [1][0][RTW89_WW][60] = -4,
+ [1][0][RTW89_WW][62] = -4,
+ [1][0][RTW89_WW][64] = -4,
+ [1][0][RTW89_WW][66] = -4,
+ [1][0][RTW89_WW][68] = -4,
+ [1][0][RTW89_WW][70] = -4,
+ [1][0][RTW89_WW][72] = -4,
+ [1][0][RTW89_WW][74] = -4,
+ [1][0][RTW89_WW][75] = -4,
+ [1][0][RTW89_WW][77] = -4,
+ [1][0][RTW89_WW][79] = -4,
+ [1][0][RTW89_WW][81] = -4,
+ [1][0][RTW89_WW][83] = -4,
+ [1][0][RTW89_WW][85] = -4,
+ [1][0][RTW89_WW][87] = -4,
+ [1][0][RTW89_WW][89] = -4,
+ [1][0][RTW89_WW][90] = -4,
+ [1][0][RTW89_WW][92] = -4,
+ [1][0][RTW89_WW][94] = -4,
+ [1][0][RTW89_WW][96] = -4,
+ [1][0][RTW89_WW][98] = -4,
+ [1][0][RTW89_WW][100] = -4,
+ [1][0][RTW89_WW][102] = -4,
+ [1][0][RTW89_WW][104] = -4,
+ [1][0][RTW89_WW][105] = -4,
+ [1][0][RTW89_WW][107] = 1,
+ [1][0][RTW89_WW][109] = 2,
[1][0][RTW89_WW][111] = 0,
[1][0][RTW89_WW][113] = 0,
[1][0][RTW89_WW][115] = 0,
[1][0][RTW89_WW][117] = 0,
[1][0][RTW89_WW][119] = 0,
- [1][1][RTW89_WW][0] = 76,
- [1][1][RTW89_WW][2] = 76,
- [1][1][RTW89_WW][4] = 76,
- [1][1][RTW89_WW][6] = 76,
- [1][1][RTW89_WW][8] = 76,
- [1][1][RTW89_WW][10] = 76,
- [1][1][RTW89_WW][12] = 76,
- [1][1][RTW89_WW][14] = 76,
- [1][1][RTW89_WW][15] = 76,
- [1][1][RTW89_WW][17] = 76,
- [1][1][RTW89_WW][19] = 76,
- [1][1][RTW89_WW][21] = 76,
- [1][1][RTW89_WW][23] = 76,
- [1][1][RTW89_WW][25] = 76,
- [1][1][RTW89_WW][27] = 76,
- [1][1][RTW89_WW][29] = 76,
- [1][1][RTW89_WW][30] = 76,
- [1][1][RTW89_WW][32] = 76,
- [1][1][RTW89_WW][34] = 76,
- [1][1][RTW89_WW][36] = 76,
- [1][1][RTW89_WW][38] = 76,
- [1][1][RTW89_WW][40] = 76,
- [1][1][RTW89_WW][42] = 76,
- [1][1][RTW89_WW][44] = 76,
- [1][1][RTW89_WW][45] = 76,
- [1][1][RTW89_WW][47] = 76,
- [1][1][RTW89_WW][49] = 76,
- [1][1][RTW89_WW][51] = 76,
- [1][1][RTW89_WW][53] = 76,
- [1][1][RTW89_WW][55] = 76,
- [1][1][RTW89_WW][57] = 76,
- [1][1][RTW89_WW][59] = 76,
- [1][1][RTW89_WW][60] = 76,
- [1][1][RTW89_WW][62] = 76,
- [1][1][RTW89_WW][64] = 76,
- [1][1][RTW89_WW][66] = 76,
- [1][1][RTW89_WW][68] = 76,
- [1][1][RTW89_WW][70] = 76,
- [1][1][RTW89_WW][72] = 76,
- [1][1][RTW89_WW][74] = 76,
- [1][1][RTW89_WW][75] = 76,
- [1][1][RTW89_WW][77] = 76,
- [1][1][RTW89_WW][79] = 76,
- [1][1][RTW89_WW][81] = 76,
- [1][1][RTW89_WW][83] = 76,
- [1][1][RTW89_WW][85] = 76,
- [1][1][RTW89_WW][87] = 76,
- [1][1][RTW89_WW][89] = 76,
- [1][1][RTW89_WW][90] = 76,
- [1][1][RTW89_WW][92] = 76,
- [1][1][RTW89_WW][94] = 76,
- [1][1][RTW89_WW][96] = 76,
- [1][1][RTW89_WW][98] = 76,
- [1][1][RTW89_WW][100] = 76,
- [1][1][RTW89_WW][102] = 76,
- [1][1][RTW89_WW][104] = 76,
- [1][1][RTW89_WW][105] = 76,
- [1][1][RTW89_WW][107] = 76,
- [1][1][RTW89_WW][109] = 76,
+ [1][1][RTW89_WW][0] = -26,
+ [1][1][RTW89_WW][2] = -28,
+ [1][1][RTW89_WW][4] = -28,
+ [1][1][RTW89_WW][6] = -28,
+ [1][1][RTW89_WW][8] = -28,
+ [1][1][RTW89_WW][10] = -28,
+ [1][1][RTW89_WW][12] = -28,
+ [1][1][RTW89_WW][14] = -28,
+ [1][1][RTW89_WW][15] = -28,
+ [1][1][RTW89_WW][17] = -28,
+ [1][1][RTW89_WW][19] = -28,
+ [1][1][RTW89_WW][21] = -28,
+ [1][1][RTW89_WW][23] = -28,
+ [1][1][RTW89_WW][25] = -28,
+ [1][1][RTW89_WW][27] = -28,
+ [1][1][RTW89_WW][29] = -28,
+ [1][1][RTW89_WW][30] = -28,
+ [1][1][RTW89_WW][32] = -28,
+ [1][1][RTW89_WW][34] = -28,
+ [1][1][RTW89_WW][36] = -28,
+ [1][1][RTW89_WW][38] = -28,
+ [1][1][RTW89_WW][40] = -28,
+ [1][1][RTW89_WW][42] = -28,
+ [1][1][RTW89_WW][44] = -28,
+ [1][1][RTW89_WW][45] = -26,
+ [1][1][RTW89_WW][47] = -28,
+ [1][1][RTW89_WW][49] = -28,
+ [1][1][RTW89_WW][51] = -28,
+ [1][1][RTW89_WW][53] = -26,
+ [1][1][RTW89_WW][55] = -28,
+ [1][1][RTW89_WW][57] = -28,
+ [1][1][RTW89_WW][59] = -28,
+ [1][1][RTW89_WW][60] = -28,
+ [1][1][RTW89_WW][62] = -28,
+ [1][1][RTW89_WW][64] = -28,
+ [1][1][RTW89_WW][66] = -28,
+ [1][1][RTW89_WW][68] = -28,
+ [1][1][RTW89_WW][70] = -26,
+ [1][1][RTW89_WW][72] = -28,
+ [1][1][RTW89_WW][74] = -28,
+ [1][1][RTW89_WW][75] = -28,
+ [1][1][RTW89_WW][77] = -28,
+ [1][1][RTW89_WW][79] = -28,
+ [1][1][RTW89_WW][81] = -28,
+ [1][1][RTW89_WW][83] = -28,
+ [1][1][RTW89_WW][85] = -28,
+ [1][1][RTW89_WW][87] = -28,
+ [1][1][RTW89_WW][89] = -26,
+ [1][1][RTW89_WW][90] = -26,
+ [1][1][RTW89_WW][92] = -26,
+ [1][1][RTW89_WW][94] = -26,
+ [1][1][RTW89_WW][96] = -26,
+ [1][1][RTW89_WW][98] = -26,
+ [1][1][RTW89_WW][100] = -26,
+ [1][1][RTW89_WW][102] = -26,
+ [1][1][RTW89_WW][104] = -26,
+ [1][1][RTW89_WW][105] = -26,
+ [1][1][RTW89_WW][107] = -22,
+ [1][1][RTW89_WW][109] = -22,
[1][1][RTW89_WW][111] = 0,
[1][1][RTW89_WW][113] = 0,
[1][1][RTW89_WW][115] = 0,
[1][1][RTW89_WW][117] = 0,
[1][1][RTW89_WW][119] = 0,
- [2][0][RTW89_WW][0] = 76,
- [2][0][RTW89_WW][2] = 76,
- [2][0][RTW89_WW][4] = 76,
- [2][0][RTW89_WW][6] = 76,
- [2][0][RTW89_WW][8] = 76,
- [2][0][RTW89_WW][10] = 76,
- [2][0][RTW89_WW][12] = 76,
- [2][0][RTW89_WW][14] = 76,
- [2][0][RTW89_WW][15] = 76,
- [2][0][RTW89_WW][17] = 76,
- [2][0][RTW89_WW][19] = 76,
- [2][0][RTW89_WW][21] = 76,
- [2][0][RTW89_WW][23] = 76,
- [2][0][RTW89_WW][25] = 76,
- [2][0][RTW89_WW][27] = 76,
- [2][0][RTW89_WW][29] = 76,
- [2][0][RTW89_WW][30] = 76,
- [2][0][RTW89_WW][32] = 76,
- [2][0][RTW89_WW][34] = 76,
- [2][0][RTW89_WW][36] = 76,
- [2][0][RTW89_WW][38] = 76,
- [2][0][RTW89_WW][40] = 76,
- [2][0][RTW89_WW][42] = 76,
- [2][0][RTW89_WW][44] = 76,
- [2][0][RTW89_WW][45] = 76,
- [2][0][RTW89_WW][47] = 76,
- [2][0][RTW89_WW][49] = 76,
- [2][0][RTW89_WW][51] = 76,
- [2][0][RTW89_WW][53] = 76,
- [2][0][RTW89_WW][55] = 76,
- [2][0][RTW89_WW][57] = 76,
- [2][0][RTW89_WW][59] = 76,
- [2][0][RTW89_WW][60] = 76,
- [2][0][RTW89_WW][62] = 76,
- [2][0][RTW89_WW][64] = 76,
- [2][0][RTW89_WW][66] = 76,
- [2][0][RTW89_WW][68] = 76,
- [2][0][RTW89_WW][70] = 76,
- [2][0][RTW89_WW][72] = 76,
- [2][0][RTW89_WW][74] = 76,
- [2][0][RTW89_WW][75] = 76,
- [2][0][RTW89_WW][77] = 76,
- [2][0][RTW89_WW][79] = 76,
- [2][0][RTW89_WW][81] = 76,
- [2][0][RTW89_WW][83] = 76,
- [2][0][RTW89_WW][85] = 76,
- [2][0][RTW89_WW][87] = 76,
- [2][0][RTW89_WW][89] = 76,
- [2][0][RTW89_WW][90] = 76,
- [2][0][RTW89_WW][92] = 76,
- [2][0][RTW89_WW][94] = 76,
- [2][0][RTW89_WW][96] = 76,
- [2][0][RTW89_WW][98] = 76,
- [2][0][RTW89_WW][100] = 76,
- [2][0][RTW89_WW][102] = 76,
- [2][0][RTW89_WW][104] = 76,
- [2][0][RTW89_WW][105] = 76,
- [2][0][RTW89_WW][107] = 76,
- [2][0][RTW89_WW][109] = 76,
+ [2][0][RTW89_WW][0] = 8,
+ [2][0][RTW89_WW][2] = 8,
+ [2][0][RTW89_WW][4] = 8,
+ [2][0][RTW89_WW][6] = 8,
+ [2][0][RTW89_WW][8] = 8,
+ [2][0][RTW89_WW][10] = 8,
+ [2][0][RTW89_WW][12] = 8,
+ [2][0][RTW89_WW][14] = 8,
+ [2][0][RTW89_WW][15] = 8,
+ [2][0][RTW89_WW][17] = 8,
+ [2][0][RTW89_WW][19] = 8,
+ [2][0][RTW89_WW][21] = 8,
+ [2][0][RTW89_WW][23] = 8,
+ [2][0][RTW89_WW][25] = 8,
+ [2][0][RTW89_WW][27] = 8,
+ [2][0][RTW89_WW][29] = 8,
+ [2][0][RTW89_WW][30] = 8,
+ [2][0][RTW89_WW][32] = 8,
+ [2][0][RTW89_WW][34] = 8,
+ [2][0][RTW89_WW][36] = 8,
+ [2][0][RTW89_WW][38] = 8,
+ [2][0][RTW89_WW][40] = 8,
+ [2][0][RTW89_WW][42] = 8,
+ [2][0][RTW89_WW][44] = 8,
+ [2][0][RTW89_WW][45] = 8,
+ [2][0][RTW89_WW][47] = 8,
+ [2][0][RTW89_WW][49] = 8,
+ [2][0][RTW89_WW][51] = 8,
+ [2][0][RTW89_WW][53] = 8,
+ [2][0][RTW89_WW][55] = 8,
+ [2][0][RTW89_WW][57] = 8,
+ [2][0][RTW89_WW][59] = 8,
+ [2][0][RTW89_WW][60] = 8,
+ [2][0][RTW89_WW][62] = 8,
+ [2][0][RTW89_WW][64] = 8,
+ [2][0][RTW89_WW][66] = 8,
+ [2][0][RTW89_WW][68] = 8,
+ [2][0][RTW89_WW][70] = 8,
+ [2][0][RTW89_WW][72] = 8,
+ [2][0][RTW89_WW][74] = 8,
+ [2][0][RTW89_WW][75] = 8,
+ [2][0][RTW89_WW][77] = 8,
+ [2][0][RTW89_WW][79] = 8,
+ [2][0][RTW89_WW][81] = 8,
+ [2][0][RTW89_WW][83] = 8,
+ [2][0][RTW89_WW][85] = 8,
+ [2][0][RTW89_WW][87] = 8,
+ [2][0][RTW89_WW][89] = 8,
+ [2][0][RTW89_WW][90] = 8,
+ [2][0][RTW89_WW][92] = 8,
+ [2][0][RTW89_WW][94] = 8,
+ [2][0][RTW89_WW][96] = 8,
+ [2][0][RTW89_WW][98] = 8,
+ [2][0][RTW89_WW][100] = 8,
+ [2][0][RTW89_WW][102] = 8,
+ [2][0][RTW89_WW][104] = 8,
+ [2][0][RTW89_WW][105] = 8,
+ [2][0][RTW89_WW][107] = 10,
+ [2][0][RTW89_WW][109] = 12,
[2][0][RTW89_WW][111] = 0,
[2][0][RTW89_WW][113] = 0,
[2][0][RTW89_WW][115] = 0,
[2][0][RTW89_WW][117] = 0,
[2][0][RTW89_WW][119] = 0,
- [2][1][RTW89_WW][0] = 76,
- [2][1][RTW89_WW][2] = 76,
- [2][1][RTW89_WW][4] = 76,
- [2][1][RTW89_WW][6] = 76,
- [2][1][RTW89_WW][8] = 76,
- [2][1][RTW89_WW][10] = 76,
- [2][1][RTW89_WW][12] = 76,
- [2][1][RTW89_WW][14] = 76,
- [2][1][RTW89_WW][15] = 76,
- [2][1][RTW89_WW][17] = 76,
- [2][1][RTW89_WW][19] = 76,
- [2][1][RTW89_WW][21] = 76,
- [2][1][RTW89_WW][23] = 76,
- [2][1][RTW89_WW][25] = 76,
- [2][1][RTW89_WW][27] = 76,
- [2][1][RTW89_WW][29] = 76,
- [2][1][RTW89_WW][30] = 76,
- [2][1][RTW89_WW][32] = 76,
- [2][1][RTW89_WW][34] = 76,
- [2][1][RTW89_WW][36] = 76,
- [2][1][RTW89_WW][38] = 76,
- [2][1][RTW89_WW][40] = 76,
- [2][1][RTW89_WW][42] = 76,
- [2][1][RTW89_WW][44] = 76,
- [2][1][RTW89_WW][45] = 76,
- [2][1][RTW89_WW][47] = 76,
- [2][1][RTW89_WW][49] = 76,
- [2][1][RTW89_WW][51] = 76,
- [2][1][RTW89_WW][53] = 76,
- [2][1][RTW89_WW][55] = 76,
- [2][1][RTW89_WW][57] = 76,
- [2][1][RTW89_WW][59] = 76,
- [2][1][RTW89_WW][60] = 76,
- [2][1][RTW89_WW][62] = 76,
- [2][1][RTW89_WW][64] = 76,
- [2][1][RTW89_WW][66] = 76,
- [2][1][RTW89_WW][68] = 76,
- [2][1][RTW89_WW][70] = 76,
- [2][1][RTW89_WW][72] = 76,
- [2][1][RTW89_WW][74] = 76,
- [2][1][RTW89_WW][75] = 76,
- [2][1][RTW89_WW][77] = 76,
- [2][1][RTW89_WW][79] = 76,
- [2][1][RTW89_WW][81] = 76,
- [2][1][RTW89_WW][83] = 76,
- [2][1][RTW89_WW][85] = 76,
- [2][1][RTW89_WW][87] = 76,
- [2][1][RTW89_WW][89] = 76,
- [2][1][RTW89_WW][90] = 76,
- [2][1][RTW89_WW][92] = 76,
- [2][1][RTW89_WW][94] = 76,
- [2][1][RTW89_WW][96] = 76,
- [2][1][RTW89_WW][98] = 76,
- [2][1][RTW89_WW][100] = 76,
- [2][1][RTW89_WW][102] = 76,
- [2][1][RTW89_WW][104] = 76,
- [2][1][RTW89_WW][105] = 76,
- [2][1][RTW89_WW][107] = 76,
- [2][1][RTW89_WW][109] = 76,
+ [2][1][RTW89_WW][0] = -16,
+ [2][1][RTW89_WW][2] = -16,
+ [2][1][RTW89_WW][4] = -16,
+ [2][1][RTW89_WW][6] = -16,
+ [2][1][RTW89_WW][8] = -16,
+ [2][1][RTW89_WW][10] = -16,
+ [2][1][RTW89_WW][12] = -16,
+ [2][1][RTW89_WW][14] = -16,
+ [2][1][RTW89_WW][15] = -16,
+ [2][1][RTW89_WW][17] = -16,
+ [2][1][RTW89_WW][19] = -16,
+ [2][1][RTW89_WW][21] = -16,
+ [2][1][RTW89_WW][23] = -16,
+ [2][1][RTW89_WW][25] = -16,
+ [2][1][RTW89_WW][27] = -16,
+ [2][1][RTW89_WW][29] = -16,
+ [2][1][RTW89_WW][30] = -16,
+ [2][1][RTW89_WW][32] = -16,
+ [2][1][RTW89_WW][34] = -16,
+ [2][1][RTW89_WW][36] = -16,
+ [2][1][RTW89_WW][38] = -16,
+ [2][1][RTW89_WW][40] = -16,
+ [2][1][RTW89_WW][42] = -16,
+ [2][1][RTW89_WW][44] = -16,
+ [2][1][RTW89_WW][45] = -16,
+ [2][1][RTW89_WW][47] = -16,
+ [2][1][RTW89_WW][49] = -16,
+ [2][1][RTW89_WW][51] = -16,
+ [2][1][RTW89_WW][53] = -16,
+ [2][1][RTW89_WW][55] = -16,
+ [2][1][RTW89_WW][57] = -16,
+ [2][1][RTW89_WW][59] = -16,
+ [2][1][RTW89_WW][60] = -16,
+ [2][1][RTW89_WW][62] = -16,
+ [2][1][RTW89_WW][64] = -16,
+ [2][1][RTW89_WW][66] = -16,
+ [2][1][RTW89_WW][68] = -16,
+ [2][1][RTW89_WW][70] = -16,
+ [2][1][RTW89_WW][72] = -16,
+ [2][1][RTW89_WW][74] = -16,
+ [2][1][RTW89_WW][75] = -16,
+ [2][1][RTW89_WW][77] = -16,
+ [2][1][RTW89_WW][79] = -16,
+ [2][1][RTW89_WW][81] = -16,
+ [2][1][RTW89_WW][83] = -16,
+ [2][1][RTW89_WW][85] = -18,
+ [2][1][RTW89_WW][87] = -16,
+ [2][1][RTW89_WW][89] = -16,
+ [2][1][RTW89_WW][90] = -16,
+ [2][1][RTW89_WW][92] = -16,
+ [2][1][RTW89_WW][94] = -16,
+ [2][1][RTW89_WW][96] = -16,
+ [2][1][RTW89_WW][98] = -16,
+ [2][1][RTW89_WW][100] = -16,
+ [2][1][RTW89_WW][102] = -16,
+ [2][1][RTW89_WW][104] = -16,
+ [2][1][RTW89_WW][105] = -16,
+ [2][1][RTW89_WW][107] = -12,
+ [2][1][RTW89_WW][109] = -10,
[2][1][RTW89_WW][111] = 0,
[2][1][RTW89_WW][113] = 0,
[2][1][RTW89_WW][115] = 0,
[2][1][RTW89_WW][117] = 0,
[2][1][RTW89_WW][119] = 0,
- [0][0][RTW89_FCC][0] = 76,
- [0][0][RTW89_FCC][2] = 76,
- [0][0][RTW89_FCC][4] = 76,
- [0][0][RTW89_FCC][6] = 76,
- [0][0][RTW89_FCC][8] = 76,
- [0][0][RTW89_FCC][10] = 76,
- [0][0][RTW89_FCC][12] = 76,
- [0][0][RTW89_FCC][14] = 76,
- [0][0][RTW89_FCC][15] = 76,
- [0][0][RTW89_FCC][17] = 76,
- [0][0][RTW89_FCC][19] = 76,
- [0][0][RTW89_FCC][21] = 76,
- [0][0][RTW89_FCC][23] = 76,
- [0][0][RTW89_FCC][25] = 76,
- [0][0][RTW89_FCC][27] = 76,
- [0][0][RTW89_FCC][29] = 76,
- [0][0][RTW89_FCC][30] = 76,
- [0][0][RTW89_FCC][32] = 76,
- [0][0][RTW89_FCC][34] = 76,
- [0][0][RTW89_FCC][36] = 76,
- [0][0][RTW89_FCC][38] = 76,
- [0][0][RTW89_FCC][40] = 76,
- [0][0][RTW89_FCC][42] = 76,
- [0][0][RTW89_FCC][44] = 76,
- [0][0][RTW89_FCC][45] = 76,
- [0][0][RTW89_FCC][47] = 76,
- [0][0][RTW89_FCC][49] = 76,
- [0][0][RTW89_FCC][51] = 76,
- [0][0][RTW89_FCC][53] = 76,
- [0][0][RTW89_FCC][55] = 76,
- [0][0][RTW89_FCC][57] = 76,
- [0][0][RTW89_FCC][59] = 76,
- [0][0][RTW89_FCC][60] = 76,
- [0][0][RTW89_FCC][62] = 76,
- [0][0][RTW89_FCC][64] = 76,
- [0][0][RTW89_FCC][66] = 76,
- [0][0][RTW89_FCC][68] = 76,
- [0][0][RTW89_FCC][70] = 76,
- [0][0][RTW89_FCC][72] = 76,
- [0][0][RTW89_FCC][74] = 76,
- [0][0][RTW89_FCC][75] = 76,
- [0][0][RTW89_FCC][77] = 76,
- [0][0][RTW89_FCC][79] = 76,
- [0][0][RTW89_FCC][81] = 76,
- [0][0][RTW89_FCC][83] = 76,
- [0][0][RTW89_FCC][85] = 76,
- [0][0][RTW89_FCC][87] = 76,
- [0][0][RTW89_FCC][89] = 76,
- [0][0][RTW89_FCC][90] = 76,
- [0][0][RTW89_FCC][92] = 76,
- [0][0][RTW89_FCC][94] = 76,
- [0][0][RTW89_FCC][96] = 76,
- [0][0][RTW89_FCC][98] = 76,
- [0][0][RTW89_FCC][100] = 76,
- [0][0][RTW89_FCC][102] = 76,
- [0][0][RTW89_FCC][104] = 76,
- [0][0][RTW89_FCC][105] = 76,
- [0][0][RTW89_FCC][107] = 76,
- [0][0][RTW89_FCC][109] = 76,
+ [0][0][RTW89_FCC][0] = -16,
+ [0][0][RTW89_ETSI][0] = 32,
+ [0][0][RTW89_FCC][2] = -18,
+ [0][0][RTW89_ETSI][2] = 32,
+ [0][0][RTW89_FCC][4] = -18,
+ [0][0][RTW89_ETSI][4] = 32,
+ [0][0][RTW89_FCC][6] = -18,
+ [0][0][RTW89_ETSI][6] = 32,
+ [0][0][RTW89_FCC][8] = -18,
+ [0][0][RTW89_ETSI][8] = 32,
+ [0][0][RTW89_FCC][10] = -18,
+ [0][0][RTW89_ETSI][10] = 32,
+ [0][0][RTW89_FCC][12] = -18,
+ [0][0][RTW89_ETSI][12] = 32,
+ [0][0][RTW89_FCC][14] = -18,
+ [0][0][RTW89_ETSI][14] = 32,
+ [0][0][RTW89_FCC][15] = -18,
+ [0][0][RTW89_ETSI][15] = 32,
+ [0][0][RTW89_FCC][17] = -18,
+ [0][0][RTW89_ETSI][17] = 32,
+ [0][0][RTW89_FCC][19] = -18,
+ [0][0][RTW89_ETSI][19] = 32,
+ [0][0][RTW89_FCC][21] = -18,
+ [0][0][RTW89_ETSI][21] = 32,
+ [0][0][RTW89_FCC][23] = -18,
+ [0][0][RTW89_ETSI][23] = 32,
+ [0][0][RTW89_FCC][25] = -18,
+ [0][0][RTW89_ETSI][25] = 32,
+ [0][0][RTW89_FCC][27] = -18,
+ [0][0][RTW89_ETSI][27] = 32,
+ [0][0][RTW89_FCC][29] = -18,
+ [0][0][RTW89_ETSI][29] = 32,
+ [0][0][RTW89_FCC][30] = -18,
+ [0][0][RTW89_ETSI][30] = 32,
+ [0][0][RTW89_FCC][32] = -18,
+ [0][0][RTW89_ETSI][32] = 32,
+ [0][0][RTW89_FCC][34] = -18,
+ [0][0][RTW89_ETSI][34] = 32,
+ [0][0][RTW89_FCC][36] = -18,
+ [0][0][RTW89_ETSI][36] = 32,
+ [0][0][RTW89_FCC][38] = -18,
+ [0][0][RTW89_ETSI][38] = 32,
+ [0][0][RTW89_FCC][40] = -18,
+ [0][0][RTW89_ETSI][40] = 32,
+ [0][0][RTW89_FCC][42] = -18,
+ [0][0][RTW89_ETSI][42] = 32,
+ [0][0][RTW89_FCC][44] = -16,
+ [0][0][RTW89_ETSI][44] = 32,
+ [0][0][RTW89_FCC][45] = -16,
+ [0][0][RTW89_ETSI][45] = 127,
+ [0][0][RTW89_FCC][47] = -18,
+ [0][0][RTW89_ETSI][47] = 127,
+ [0][0][RTW89_FCC][49] = -18,
+ [0][0][RTW89_ETSI][49] = 127,
+ [0][0][RTW89_FCC][51] = -18,
+ [0][0][RTW89_ETSI][51] = 127,
+ [0][0][RTW89_FCC][53] = -16,
+ [0][0][RTW89_ETSI][53] = 127,
+ [0][0][RTW89_FCC][55] = -18,
+ [0][0][RTW89_ETSI][55] = 127,
+ [0][0][RTW89_FCC][57] = -18,
+ [0][0][RTW89_ETSI][57] = 127,
+ [0][0][RTW89_FCC][59] = -18,
+ [0][0][RTW89_ETSI][59] = 127,
+ [0][0][RTW89_FCC][60] = -18,
+ [0][0][RTW89_ETSI][60] = 127,
+ [0][0][RTW89_FCC][62] = -18,
+ [0][0][RTW89_ETSI][62] = 127,
+ [0][0][RTW89_FCC][64] = -18,
+ [0][0][RTW89_ETSI][64] = 127,
+ [0][0][RTW89_FCC][66] = -18,
+ [0][0][RTW89_ETSI][66] = 127,
+ [0][0][RTW89_FCC][68] = -18,
+ [0][0][RTW89_ETSI][68] = 127,
+ [0][0][RTW89_FCC][70] = -16,
+ [0][0][RTW89_ETSI][70] = 127,
+ [0][0][RTW89_FCC][72] = -18,
+ [0][0][RTW89_ETSI][72] = 127,
+ [0][0][RTW89_FCC][74] = -18,
+ [0][0][RTW89_ETSI][74] = 127,
+ [0][0][RTW89_FCC][75] = -18,
+ [0][0][RTW89_ETSI][75] = 127,
+ [0][0][RTW89_FCC][77] = -18,
+ [0][0][RTW89_ETSI][77] = 127,
+ [0][0][RTW89_FCC][79] = -18,
+ [0][0][RTW89_ETSI][79] = 127,
+ [0][0][RTW89_FCC][81] = -18,
+ [0][0][RTW89_ETSI][81] = 127,
+ [0][0][RTW89_FCC][83] = -18,
+ [0][0][RTW89_ETSI][83] = 127,
+ [0][0][RTW89_FCC][85] = -18,
+ [0][0][RTW89_ETSI][85] = 127,
+ [0][0][RTW89_FCC][87] = -16,
+ [0][0][RTW89_ETSI][87] = 127,
+ [0][0][RTW89_FCC][89] = -16,
+ [0][0][RTW89_ETSI][89] = 127,
+ [0][0][RTW89_FCC][90] = -16,
+ [0][0][RTW89_ETSI][90] = 127,
+ [0][0][RTW89_FCC][92] = -16,
+ [0][0][RTW89_ETSI][92] = 127,
+ [0][0][RTW89_FCC][94] = -16,
+ [0][0][RTW89_ETSI][94] = 127,
+ [0][0][RTW89_FCC][96] = -16,
+ [0][0][RTW89_ETSI][96] = 127,
+ [0][0][RTW89_FCC][98] = -16,
+ [0][0][RTW89_ETSI][98] = 127,
+ [0][0][RTW89_FCC][100] = -16,
+ [0][0][RTW89_ETSI][100] = 127,
+ [0][0][RTW89_FCC][102] = -16,
+ [0][0][RTW89_ETSI][102] = 127,
+ [0][0][RTW89_FCC][104] = -16,
+ [0][0][RTW89_ETSI][104] = 127,
+ [0][0][RTW89_FCC][105] = -16,
+ [0][0][RTW89_ETSI][105] = 127,
+ [0][0][RTW89_FCC][107] = -12,
+ [0][0][RTW89_ETSI][107] = 127,
+ [0][0][RTW89_FCC][109] = -12,
+ [0][0][RTW89_ETSI][109] = 127,
[0][0][RTW89_FCC][111] = 127,
+ [0][0][RTW89_ETSI][111] = 127,
[0][0][RTW89_FCC][113] = 127,
+ [0][0][RTW89_ETSI][113] = 127,
[0][0][RTW89_FCC][115] = 127,
+ [0][0][RTW89_ETSI][115] = 127,
[0][0][RTW89_FCC][117] = 127,
+ [0][0][RTW89_ETSI][117] = 127,
[0][0][RTW89_FCC][119] = 127,
- [0][1][RTW89_FCC][0] = 76,
- [0][1][RTW89_FCC][2] = 76,
- [0][1][RTW89_FCC][4] = 76,
- [0][1][RTW89_FCC][6] = 76,
- [0][1][RTW89_FCC][8] = 76,
- [0][1][RTW89_FCC][10] = 76,
- [0][1][RTW89_FCC][12] = 76,
- [0][1][RTW89_FCC][14] = 76,
- [0][1][RTW89_FCC][15] = 76,
- [0][1][RTW89_FCC][17] = 76,
- [0][1][RTW89_FCC][19] = 76,
- [0][1][RTW89_FCC][21] = 76,
- [0][1][RTW89_FCC][23] = 76,
- [0][1][RTW89_FCC][25] = 76,
- [0][1][RTW89_FCC][27] = 76,
- [0][1][RTW89_FCC][29] = 76,
- [0][1][RTW89_FCC][30] = 76,
- [0][1][RTW89_FCC][32] = 76,
- [0][1][RTW89_FCC][34] = 76,
- [0][1][RTW89_FCC][36] = 76,
- [0][1][RTW89_FCC][38] = 76,
- [0][1][RTW89_FCC][40] = 76,
- [0][1][RTW89_FCC][42] = 76,
- [0][1][RTW89_FCC][44] = 76,
- [0][1][RTW89_FCC][45] = 76,
- [0][1][RTW89_FCC][47] = 76,
- [0][1][RTW89_FCC][49] = 76,
- [0][1][RTW89_FCC][51] = 76,
- [0][1][RTW89_FCC][53] = 76,
- [0][1][RTW89_FCC][55] = 76,
- [0][1][RTW89_FCC][57] = 76,
- [0][1][RTW89_FCC][59] = 76,
- [0][1][RTW89_FCC][60] = 76,
- [0][1][RTW89_FCC][62] = 76,
- [0][1][RTW89_FCC][64] = 76,
- [0][1][RTW89_FCC][66] = 76,
- [0][1][RTW89_FCC][68] = 76,
- [0][1][RTW89_FCC][70] = 76,
- [0][1][RTW89_FCC][72] = 76,
- [0][1][RTW89_FCC][74] = 76,
- [0][1][RTW89_FCC][75] = 76,
- [0][1][RTW89_FCC][77] = 76,
- [0][1][RTW89_FCC][79] = 76,
- [0][1][RTW89_FCC][81] = 76,
- [0][1][RTW89_FCC][83] = 76,
- [0][1][RTW89_FCC][85] = 76,
- [0][1][RTW89_FCC][87] = 76,
- [0][1][RTW89_FCC][89] = 76,
- [0][1][RTW89_FCC][90] = 76,
- [0][1][RTW89_FCC][92] = 76,
- [0][1][RTW89_FCC][94] = 76,
- [0][1][RTW89_FCC][96] = 76,
- [0][1][RTW89_FCC][98] = 76,
- [0][1][RTW89_FCC][100] = 76,
- [0][1][RTW89_FCC][102] = 76,
- [0][1][RTW89_FCC][104] = 76,
- [0][1][RTW89_FCC][105] = 76,
- [0][1][RTW89_FCC][107] = 76,
- [0][1][RTW89_FCC][109] = 76,
+ [0][0][RTW89_ETSI][119] = 127,
+ [0][1][RTW89_FCC][0] = -40,
+ [0][1][RTW89_ETSI][0] = 20,
+ [0][1][RTW89_FCC][2] = -40,
+ [0][1][RTW89_ETSI][2] = 20,
+ [0][1][RTW89_FCC][4] = -40,
+ [0][1][RTW89_ETSI][4] = 20,
+ [0][1][RTW89_FCC][6] = -40,
+ [0][1][RTW89_ETSI][6] = 20,
+ [0][1][RTW89_FCC][8] = -40,
+ [0][1][RTW89_ETSI][8] = 20,
+ [0][1][RTW89_FCC][10] = -40,
+ [0][1][RTW89_ETSI][10] = 20,
+ [0][1][RTW89_FCC][12] = -40,
+ [0][1][RTW89_ETSI][12] = 20,
+ [0][1][RTW89_FCC][14] = -40,
+ [0][1][RTW89_ETSI][14] = 20,
+ [0][1][RTW89_FCC][15] = -40,
+ [0][1][RTW89_ETSI][15] = 20,
+ [0][1][RTW89_FCC][17] = -40,
+ [0][1][RTW89_ETSI][17] = 20,
+ [0][1][RTW89_FCC][19] = -40,
+ [0][1][RTW89_ETSI][19] = 20,
+ [0][1][RTW89_FCC][21] = -40,
+ [0][1][RTW89_ETSI][21] = 20,
+ [0][1][RTW89_FCC][23] = -40,
+ [0][1][RTW89_ETSI][23] = 20,
+ [0][1][RTW89_FCC][25] = -40,
+ [0][1][RTW89_ETSI][25] = 20,
+ [0][1][RTW89_FCC][27] = -40,
+ [0][1][RTW89_ETSI][27] = 20,
+ [0][1][RTW89_FCC][29] = -40,
+ [0][1][RTW89_ETSI][29] = 20,
+ [0][1][RTW89_FCC][30] = -40,
+ [0][1][RTW89_ETSI][30] = 20,
+ [0][1][RTW89_FCC][32] = -40,
+ [0][1][RTW89_ETSI][32] = 20,
+ [0][1][RTW89_FCC][34] = -40,
+ [0][1][RTW89_ETSI][34] = 20,
+ [0][1][RTW89_FCC][36] = -40,
+ [0][1][RTW89_ETSI][36] = 20,
+ [0][1][RTW89_FCC][38] = -40,
+ [0][1][RTW89_ETSI][38] = 20,
+ [0][1][RTW89_FCC][40] = -40,
+ [0][1][RTW89_ETSI][40] = 20,
+ [0][1][RTW89_FCC][42] = -40,
+ [0][1][RTW89_ETSI][42] = 20,
+ [0][1][RTW89_FCC][44] = -40,
+ [0][1][RTW89_ETSI][44] = 20,
+ [0][1][RTW89_FCC][45] = -40,
+ [0][1][RTW89_ETSI][45] = 127,
+ [0][1][RTW89_FCC][47] = -40,
+ [0][1][RTW89_ETSI][47] = 127,
+ [0][1][RTW89_FCC][49] = -40,
+ [0][1][RTW89_ETSI][49] = 127,
+ [0][1][RTW89_FCC][51] = -40,
+ [0][1][RTW89_ETSI][51] = 127,
+ [0][1][RTW89_FCC][53] = -40,
+ [0][1][RTW89_ETSI][53] = 127,
+ [0][1][RTW89_FCC][55] = -40,
+ [0][1][RTW89_ETSI][55] = 127,
+ [0][1][RTW89_FCC][57] = -40,
+ [0][1][RTW89_ETSI][57] = 127,
+ [0][1][RTW89_FCC][59] = -40,
+ [0][1][RTW89_ETSI][59] = 127,
+ [0][1][RTW89_FCC][60] = -40,
+ [0][1][RTW89_ETSI][60] = 127,
+ [0][1][RTW89_FCC][62] = -40,
+ [0][1][RTW89_ETSI][62] = 127,
+ [0][1][RTW89_FCC][64] = -40,
+ [0][1][RTW89_ETSI][64] = 127,
+ [0][1][RTW89_FCC][66] = -40,
+ [0][1][RTW89_ETSI][66] = 127,
+ [0][1][RTW89_FCC][68] = -40,
+ [0][1][RTW89_ETSI][68] = 127,
+ [0][1][RTW89_FCC][70] = -38,
+ [0][1][RTW89_ETSI][70] = 127,
+ [0][1][RTW89_FCC][72] = -38,
+ [0][1][RTW89_ETSI][72] = 127,
+ [0][1][RTW89_FCC][74] = -38,
+ [0][1][RTW89_ETSI][74] = 127,
+ [0][1][RTW89_FCC][75] = -38,
+ [0][1][RTW89_ETSI][75] = 127,
+ [0][1][RTW89_FCC][77] = -38,
+ [0][1][RTW89_ETSI][77] = 127,
+ [0][1][RTW89_FCC][79] = -38,
+ [0][1][RTW89_ETSI][79] = 127,
+ [0][1][RTW89_FCC][81] = -38,
+ [0][1][RTW89_ETSI][81] = 127,
+ [0][1][RTW89_FCC][83] = -38,
+ [0][1][RTW89_ETSI][83] = 127,
+ [0][1][RTW89_FCC][85] = -38,
+ [0][1][RTW89_ETSI][85] = 127,
+ [0][1][RTW89_FCC][87] = -40,
+ [0][1][RTW89_ETSI][87] = 127,
+ [0][1][RTW89_FCC][89] = -38,
+ [0][1][RTW89_ETSI][89] = 127,
+ [0][1][RTW89_FCC][90] = -38,
+ [0][1][RTW89_ETSI][90] = 127,
+ [0][1][RTW89_FCC][92] = -38,
+ [0][1][RTW89_ETSI][92] = 127,
+ [0][1][RTW89_FCC][94] = -38,
+ [0][1][RTW89_ETSI][94] = 127,
+ [0][1][RTW89_FCC][96] = -38,
+ [0][1][RTW89_ETSI][96] = 127,
+ [0][1][RTW89_FCC][98] = -38,
+ [0][1][RTW89_ETSI][98] = 127,
+ [0][1][RTW89_FCC][100] = -38,
+ [0][1][RTW89_ETSI][100] = 127,
+ [0][1][RTW89_FCC][102] = -38,
+ [0][1][RTW89_ETSI][102] = 127,
+ [0][1][RTW89_FCC][104] = -38,
+ [0][1][RTW89_ETSI][104] = 127,
+ [0][1][RTW89_FCC][105] = -38,
+ [0][1][RTW89_ETSI][105] = 127,
+ [0][1][RTW89_FCC][107] = -34,
+ [0][1][RTW89_ETSI][107] = 127,
+ [0][1][RTW89_FCC][109] = -34,
+ [0][1][RTW89_ETSI][109] = 127,
[0][1][RTW89_FCC][111] = 127,
+ [0][1][RTW89_ETSI][111] = 127,
[0][1][RTW89_FCC][113] = 127,
+ [0][1][RTW89_ETSI][113] = 127,
[0][1][RTW89_FCC][115] = 127,
+ [0][1][RTW89_ETSI][115] = 127,
[0][1][RTW89_FCC][117] = 127,
+ [0][1][RTW89_ETSI][117] = 127,
[0][1][RTW89_FCC][119] = 127,
- [1][0][RTW89_FCC][0] = 76,
- [1][0][RTW89_FCC][2] = 76,
- [1][0][RTW89_FCC][4] = 76,
- [1][0][RTW89_FCC][6] = 76,
- [1][0][RTW89_FCC][8] = 76,
- [1][0][RTW89_FCC][10] = 76,
- [1][0][RTW89_FCC][12] = 76,
- [1][0][RTW89_FCC][14] = 76,
- [1][0][RTW89_FCC][15] = 76,
- [1][0][RTW89_FCC][17] = 76,
- [1][0][RTW89_FCC][19] = 76,
- [1][0][RTW89_FCC][21] = 76,
- [1][0][RTW89_FCC][23] = 76,
- [1][0][RTW89_FCC][25] = 76,
- [1][0][RTW89_FCC][27] = 76,
- [1][0][RTW89_FCC][29] = 76,
- [1][0][RTW89_FCC][30] = 76,
- [1][0][RTW89_FCC][32] = 76,
- [1][0][RTW89_FCC][34] = 76,
- [1][0][RTW89_FCC][36] = 76,
- [1][0][RTW89_FCC][38] = 76,
- [1][0][RTW89_FCC][40] = 76,
- [1][0][RTW89_FCC][42] = 76,
- [1][0][RTW89_FCC][44] = 76,
- [1][0][RTW89_FCC][45] = 76,
- [1][0][RTW89_FCC][47] = 76,
- [1][0][RTW89_FCC][49] = 76,
- [1][0][RTW89_FCC][51] = 76,
- [1][0][RTW89_FCC][53] = 76,
- [1][0][RTW89_FCC][55] = 76,
- [1][0][RTW89_FCC][57] = 76,
- [1][0][RTW89_FCC][59] = 76,
- [1][0][RTW89_FCC][60] = 76,
- [1][0][RTW89_FCC][62] = 76,
- [1][0][RTW89_FCC][64] = 76,
- [1][0][RTW89_FCC][66] = 76,
- [1][0][RTW89_FCC][68] = 76,
- [1][0][RTW89_FCC][70] = 76,
- [1][0][RTW89_FCC][72] = 76,
- [1][0][RTW89_FCC][74] = 76,
- [1][0][RTW89_FCC][75] = 76,
- [1][0][RTW89_FCC][77] = 76,
- [1][0][RTW89_FCC][79] = 76,
- [1][0][RTW89_FCC][81] = 76,
- [1][0][RTW89_FCC][83] = 76,
- [1][0][RTW89_FCC][85] = 76,
- [1][0][RTW89_FCC][87] = 76,
- [1][0][RTW89_FCC][89] = 76,
- [1][0][RTW89_FCC][90] = 76,
- [1][0][RTW89_FCC][92] = 76,
- [1][0][RTW89_FCC][94] = 76,
- [1][0][RTW89_FCC][96] = 76,
- [1][0][RTW89_FCC][98] = 76,
- [1][0][RTW89_FCC][100] = 76,
- [1][0][RTW89_FCC][102] = 76,
- [1][0][RTW89_FCC][104] = 76,
- [1][0][RTW89_FCC][105] = 76,
- [1][0][RTW89_FCC][107] = 76,
- [1][0][RTW89_FCC][109] = 76,
+ [0][1][RTW89_ETSI][119] = 127,
+ [1][0][RTW89_FCC][0] = -4,
+ [1][0][RTW89_ETSI][0] = 46,
+ [1][0][RTW89_FCC][2] = -4,
+ [1][0][RTW89_ETSI][2] = 46,
+ [1][0][RTW89_FCC][4] = -4,
+ [1][0][RTW89_ETSI][4] = 46,
+ [1][0][RTW89_FCC][6] = -4,
+ [1][0][RTW89_ETSI][6] = 46,
+ [1][0][RTW89_FCC][8] = -4,
+ [1][0][RTW89_ETSI][8] = 46,
+ [1][0][RTW89_FCC][10] = -4,
+ [1][0][RTW89_ETSI][10] = 46,
+ [1][0][RTW89_FCC][12] = -4,
+ [1][0][RTW89_ETSI][12] = 46,
+ [1][0][RTW89_FCC][14] = -4,
+ [1][0][RTW89_ETSI][14] = 46,
+ [1][0][RTW89_FCC][15] = -4,
+ [1][0][RTW89_ETSI][15] = 46,
+ [1][0][RTW89_FCC][17] = -4,
+ [1][0][RTW89_ETSI][17] = 46,
+ [1][0][RTW89_FCC][19] = -4,
+ [1][0][RTW89_ETSI][19] = 46,
+ [1][0][RTW89_FCC][21] = -4,
+ [1][0][RTW89_ETSI][21] = 46,
+ [1][0][RTW89_FCC][23] = -4,
+ [1][0][RTW89_ETSI][23] = 46,
+ [1][0][RTW89_FCC][25] = -4,
+ [1][0][RTW89_ETSI][25] = 46,
+ [1][0][RTW89_FCC][27] = -4,
+ [1][0][RTW89_ETSI][27] = 46,
+ [1][0][RTW89_FCC][29] = -4,
+ [1][0][RTW89_ETSI][29] = 46,
+ [1][0][RTW89_FCC][30] = -4,
+ [1][0][RTW89_ETSI][30] = 46,
+ [1][0][RTW89_FCC][32] = -4,
+ [1][0][RTW89_ETSI][32] = 46,
+ [1][0][RTW89_FCC][34] = -4,
+ [1][0][RTW89_ETSI][34] = 46,
+ [1][0][RTW89_FCC][36] = -4,
+ [1][0][RTW89_ETSI][36] = 46,
+ [1][0][RTW89_FCC][38] = -4,
+ [1][0][RTW89_ETSI][38] = 46,
+ [1][0][RTW89_FCC][40] = -4,
+ [1][0][RTW89_ETSI][40] = 46,
+ [1][0][RTW89_FCC][42] = -4,
+ [1][0][RTW89_ETSI][42] = 46,
+ [1][0][RTW89_FCC][44] = -4,
+ [1][0][RTW89_ETSI][44] = 46,
+ [1][0][RTW89_FCC][45] = -4,
+ [1][0][RTW89_ETSI][45] = 127,
+ [1][0][RTW89_FCC][47] = -4,
+ [1][0][RTW89_ETSI][47] = 127,
+ [1][0][RTW89_FCC][49] = -4,
+ [1][0][RTW89_ETSI][49] = 127,
+ [1][0][RTW89_FCC][51] = -4,
+ [1][0][RTW89_ETSI][51] = 127,
+ [1][0][RTW89_FCC][53] = -4,
+ [1][0][RTW89_ETSI][53] = 127,
+ [1][0][RTW89_FCC][55] = -4,
+ [1][0][RTW89_ETSI][55] = 127,
+ [1][0][RTW89_FCC][57] = -4,
+ [1][0][RTW89_ETSI][57] = 127,
+ [1][0][RTW89_FCC][59] = -4,
+ [1][0][RTW89_ETSI][59] = 127,
+ [1][0][RTW89_FCC][60] = -4,
+ [1][0][RTW89_ETSI][60] = 127,
+ [1][0][RTW89_FCC][62] = -4,
+ [1][0][RTW89_ETSI][62] = 127,
+ [1][0][RTW89_FCC][64] = -4,
+ [1][0][RTW89_ETSI][64] = 127,
+ [1][0][RTW89_FCC][66] = -4,
+ [1][0][RTW89_ETSI][66] = 127,
+ [1][0][RTW89_FCC][68] = -4,
+ [1][0][RTW89_ETSI][68] = 127,
+ [1][0][RTW89_FCC][70] = -4,
+ [1][0][RTW89_ETSI][70] = 127,
+ [1][0][RTW89_FCC][72] = -4,
+ [1][0][RTW89_ETSI][72] = 127,
+ [1][0][RTW89_FCC][74] = -4,
+ [1][0][RTW89_ETSI][74] = 127,
+ [1][0][RTW89_FCC][75] = -4,
+ [1][0][RTW89_ETSI][75] = 127,
+ [1][0][RTW89_FCC][77] = -4,
+ [1][0][RTW89_ETSI][77] = 127,
+ [1][0][RTW89_FCC][79] = -4,
+ [1][0][RTW89_ETSI][79] = 127,
+ [1][0][RTW89_FCC][81] = -4,
+ [1][0][RTW89_ETSI][81] = 127,
+ [1][0][RTW89_FCC][83] = -4,
+ [1][0][RTW89_ETSI][83] = 127,
+ [1][0][RTW89_FCC][85] = -4,
+ [1][0][RTW89_ETSI][85] = 127,
+ [1][0][RTW89_FCC][87] = -4,
+ [1][0][RTW89_ETSI][87] = 127,
+ [1][0][RTW89_FCC][89] = -4,
+ [1][0][RTW89_ETSI][89] = 127,
+ [1][0][RTW89_FCC][90] = -4,
+ [1][0][RTW89_ETSI][90] = 127,
+ [1][0][RTW89_FCC][92] = -4,
+ [1][0][RTW89_ETSI][92] = 127,
+ [1][0][RTW89_FCC][94] = -4,
+ [1][0][RTW89_ETSI][94] = 127,
+ [1][0][RTW89_FCC][96] = -4,
+ [1][0][RTW89_ETSI][96] = 127,
+ [1][0][RTW89_FCC][98] = -4,
+ [1][0][RTW89_ETSI][98] = 127,
+ [1][0][RTW89_FCC][100] = -4,
+ [1][0][RTW89_ETSI][100] = 127,
+ [1][0][RTW89_FCC][102] = -4,
+ [1][0][RTW89_ETSI][102] = 127,
+ [1][0][RTW89_FCC][104] = -4,
+ [1][0][RTW89_ETSI][104] = 127,
+ [1][0][RTW89_FCC][105] = -4,
+ [1][0][RTW89_ETSI][105] = 127,
+ [1][0][RTW89_FCC][107] = 0,
+ [1][0][RTW89_ETSI][107] = 127,
+ [1][0][RTW89_FCC][109] = 2,
+ [1][0][RTW89_ETSI][109] = 127,
[1][0][RTW89_FCC][111] = 127,
+ [1][0][RTW89_ETSI][111] = 127,
[1][0][RTW89_FCC][113] = 127,
+ [1][0][RTW89_ETSI][113] = 127,
[1][0][RTW89_FCC][115] = 127,
+ [1][0][RTW89_ETSI][115] = 127,
[1][0][RTW89_FCC][117] = 127,
+ [1][0][RTW89_ETSI][117] = 127,
[1][0][RTW89_FCC][119] = 127,
- [1][1][RTW89_FCC][0] = 76,
- [1][1][RTW89_FCC][2] = 76,
- [1][1][RTW89_FCC][4] = 76,
- [1][1][RTW89_FCC][6] = 76,
- [1][1][RTW89_FCC][8] = 76,
- [1][1][RTW89_FCC][10] = 76,
- [1][1][RTW89_FCC][12] = 76,
- [1][1][RTW89_FCC][14] = 76,
- [1][1][RTW89_FCC][15] = 76,
- [1][1][RTW89_FCC][17] = 76,
- [1][1][RTW89_FCC][19] = 76,
- [1][1][RTW89_FCC][21] = 76,
- [1][1][RTW89_FCC][23] = 76,
- [1][1][RTW89_FCC][25] = 76,
- [1][1][RTW89_FCC][27] = 76,
- [1][1][RTW89_FCC][29] = 76,
- [1][1][RTW89_FCC][30] = 76,
- [1][1][RTW89_FCC][32] = 76,
- [1][1][RTW89_FCC][34] = 76,
- [1][1][RTW89_FCC][36] = 76,
- [1][1][RTW89_FCC][38] = 76,
- [1][1][RTW89_FCC][40] = 76,
- [1][1][RTW89_FCC][42] = 76,
- [1][1][RTW89_FCC][44] = 76,
- [1][1][RTW89_FCC][45] = 76,
- [1][1][RTW89_FCC][47] = 76,
- [1][1][RTW89_FCC][49] = 76,
- [1][1][RTW89_FCC][51] = 76,
- [1][1][RTW89_FCC][53] = 76,
- [1][1][RTW89_FCC][55] = 76,
- [1][1][RTW89_FCC][57] = 76,
- [1][1][RTW89_FCC][59] = 76,
- [1][1][RTW89_FCC][60] = 76,
- [1][1][RTW89_FCC][62] = 76,
- [1][1][RTW89_FCC][64] = 76,
- [1][1][RTW89_FCC][66] = 76,
- [1][1][RTW89_FCC][68] = 76,
- [1][1][RTW89_FCC][70] = 76,
- [1][1][RTW89_FCC][72] = 76,
- [1][1][RTW89_FCC][74] = 76,
- [1][1][RTW89_FCC][75] = 76,
- [1][1][RTW89_FCC][77] = 76,
- [1][1][RTW89_FCC][79] = 76,
- [1][1][RTW89_FCC][81] = 76,
- [1][1][RTW89_FCC][83] = 76,
- [1][1][RTW89_FCC][85] = 76,
- [1][1][RTW89_FCC][87] = 76,
- [1][1][RTW89_FCC][89] = 76,
- [1][1][RTW89_FCC][90] = 76,
- [1][1][RTW89_FCC][92] = 76,
- [1][1][RTW89_FCC][94] = 76,
- [1][1][RTW89_FCC][96] = 76,
- [1][1][RTW89_FCC][98] = 76,
- [1][1][RTW89_FCC][100] = 76,
- [1][1][RTW89_FCC][102] = 76,
- [1][1][RTW89_FCC][104] = 76,
- [1][1][RTW89_FCC][105] = 76,
- [1][1][RTW89_FCC][107] = 76,
- [1][1][RTW89_FCC][109] = 76,
+ [1][0][RTW89_ETSI][119] = 127,
+ [1][1][RTW89_FCC][0] = -26,
+ [1][1][RTW89_ETSI][0] = 32,
+ [1][1][RTW89_FCC][2] = -28,
+ [1][1][RTW89_ETSI][2] = 32,
+ [1][1][RTW89_FCC][4] = -28,
+ [1][1][RTW89_ETSI][4] = 32,
+ [1][1][RTW89_FCC][6] = -28,
+ [1][1][RTW89_ETSI][6] = 32,
+ [1][1][RTW89_FCC][8] = -28,
+ [1][1][RTW89_ETSI][8] = 32,
+ [1][1][RTW89_FCC][10] = -28,
+ [1][1][RTW89_ETSI][10] = 32,
+ [1][1][RTW89_FCC][12] = -28,
+ [1][1][RTW89_ETSI][12] = 32,
+ [1][1][RTW89_FCC][14] = -28,
+ [1][1][RTW89_ETSI][14] = 32,
+ [1][1][RTW89_FCC][15] = -28,
+ [1][1][RTW89_ETSI][15] = 32,
+ [1][1][RTW89_FCC][17] = -28,
+ [1][1][RTW89_ETSI][17] = 32,
+ [1][1][RTW89_FCC][19] = -28,
+ [1][1][RTW89_ETSI][19] = 32,
+ [1][1][RTW89_FCC][21] = -28,
+ [1][1][RTW89_ETSI][21] = 32,
+ [1][1][RTW89_FCC][23] = -28,
+ [1][1][RTW89_ETSI][23] = 32,
+ [1][1][RTW89_FCC][25] = -28,
+ [1][1][RTW89_ETSI][25] = 32,
+ [1][1][RTW89_FCC][27] = -28,
+ [1][1][RTW89_ETSI][27] = 32,
+ [1][1][RTW89_FCC][29] = -28,
+ [1][1][RTW89_ETSI][29] = 32,
+ [1][1][RTW89_FCC][30] = -28,
+ [1][1][RTW89_ETSI][30] = 32,
+ [1][1][RTW89_FCC][32] = -28,
+ [1][1][RTW89_ETSI][32] = 32,
+ [1][1][RTW89_FCC][34] = -28,
+ [1][1][RTW89_ETSI][34] = 32,
+ [1][1][RTW89_FCC][36] = -28,
+ [1][1][RTW89_ETSI][36] = 32,
+ [1][1][RTW89_FCC][38] = -28,
+ [1][1][RTW89_ETSI][38] = 32,
+ [1][1][RTW89_FCC][40] = -28,
+ [1][1][RTW89_ETSI][40] = 32,
+ [1][1][RTW89_FCC][42] = -28,
+ [1][1][RTW89_ETSI][42] = 32,
+ [1][1][RTW89_FCC][44] = -28,
+ [1][1][RTW89_ETSI][44] = 34,
+ [1][1][RTW89_FCC][45] = -26,
+ [1][1][RTW89_ETSI][45] = 127,
+ [1][1][RTW89_FCC][47] = -28,
+ [1][1][RTW89_ETSI][47] = 127,
+ [1][1][RTW89_FCC][49] = -28,
+ [1][1][RTW89_ETSI][49] = 127,
+ [1][1][RTW89_FCC][51] = -28,
+ [1][1][RTW89_ETSI][51] = 127,
+ [1][1][RTW89_FCC][53] = -26,
+ [1][1][RTW89_ETSI][53] = 127,
+ [1][1][RTW89_FCC][55] = -28,
+ [1][1][RTW89_ETSI][55] = 127,
+ [1][1][RTW89_FCC][57] = -28,
+ [1][1][RTW89_ETSI][57] = 127,
+ [1][1][RTW89_FCC][59] = -28,
+ [1][1][RTW89_ETSI][59] = 127,
+ [1][1][RTW89_FCC][60] = -28,
+ [1][1][RTW89_ETSI][60] = 127,
+ [1][1][RTW89_FCC][62] = -28,
+ [1][1][RTW89_ETSI][62] = 127,
+ [1][1][RTW89_FCC][64] = -28,
+ [1][1][RTW89_ETSI][64] = 127,
+ [1][1][RTW89_FCC][66] = -28,
+ [1][1][RTW89_ETSI][66] = 127,
+ [1][1][RTW89_FCC][68] = -28,
+ [1][1][RTW89_ETSI][68] = 127,
+ [1][1][RTW89_FCC][70] = -26,
+ [1][1][RTW89_ETSI][70] = 127,
+ [1][1][RTW89_FCC][72] = -28,
+ [1][1][RTW89_ETSI][72] = 127,
+ [1][1][RTW89_FCC][74] = -28,
+ [1][1][RTW89_ETSI][74] = 127,
+ [1][1][RTW89_FCC][75] = -28,
+ [1][1][RTW89_ETSI][75] = 127,
+ [1][1][RTW89_FCC][77] = -28,
+ [1][1][RTW89_ETSI][77] = 127,
+ [1][1][RTW89_FCC][79] = -28,
+ [1][1][RTW89_ETSI][79] = 127,
+ [1][1][RTW89_FCC][81] = -28,
+ [1][1][RTW89_ETSI][81] = 127,
+ [1][1][RTW89_FCC][83] = -28,
+ [1][1][RTW89_ETSI][83] = 127,
+ [1][1][RTW89_FCC][85] = -28,
+ [1][1][RTW89_ETSI][85] = 127,
+ [1][1][RTW89_FCC][87] = -28,
+ [1][1][RTW89_ETSI][87] = 127,
+ [1][1][RTW89_FCC][89] = -26,
+ [1][1][RTW89_ETSI][89] = 127,
+ [1][1][RTW89_FCC][90] = -26,
+ [1][1][RTW89_ETSI][90] = 127,
+ [1][1][RTW89_FCC][92] = -26,
+ [1][1][RTW89_ETSI][92] = 127,
+ [1][1][RTW89_FCC][94] = -26,
+ [1][1][RTW89_ETSI][94] = 127,
+ [1][1][RTW89_FCC][96] = -26,
+ [1][1][RTW89_ETSI][96] = 127,
+ [1][1][RTW89_FCC][98] = -26,
+ [1][1][RTW89_ETSI][98] = 127,
+ [1][1][RTW89_FCC][100] = -26,
+ [1][1][RTW89_ETSI][100] = 127,
+ [1][1][RTW89_FCC][102] = -26,
+ [1][1][RTW89_ETSI][102] = 127,
+ [1][1][RTW89_FCC][104] = -26,
+ [1][1][RTW89_ETSI][104] = 127,
+ [1][1][RTW89_FCC][105] = -26,
+ [1][1][RTW89_ETSI][105] = 127,
+ [1][1][RTW89_FCC][107] = -22,
+ [1][1][RTW89_ETSI][107] = 127,
+ [1][1][RTW89_FCC][109] = -22,
+ [1][1][RTW89_ETSI][109] = 127,
[1][1][RTW89_FCC][111] = 127,
+ [1][1][RTW89_ETSI][111] = 127,
[1][1][RTW89_FCC][113] = 127,
+ [1][1][RTW89_ETSI][113] = 127,
[1][1][RTW89_FCC][115] = 127,
+ [1][1][RTW89_ETSI][115] = 127,
[1][1][RTW89_FCC][117] = 127,
+ [1][1][RTW89_ETSI][117] = 127,
[1][1][RTW89_FCC][119] = 127,
- [2][0][RTW89_FCC][0] = 76,
- [2][0][RTW89_FCC][2] = 76,
- [2][0][RTW89_FCC][4] = 76,
- [2][0][RTW89_FCC][6] = 76,
- [2][0][RTW89_FCC][8] = 76,
- [2][0][RTW89_FCC][10] = 76,
- [2][0][RTW89_FCC][12] = 76,
- [2][0][RTW89_FCC][14] = 76,
- [2][0][RTW89_FCC][15] = 76,
- [2][0][RTW89_FCC][17] = 76,
- [2][0][RTW89_FCC][19] = 76,
- [2][0][RTW89_FCC][21] = 76,
- [2][0][RTW89_FCC][23] = 76,
- [2][0][RTW89_FCC][25] = 76,
- [2][0][RTW89_FCC][27] = 76,
- [2][0][RTW89_FCC][29] = 76,
- [2][0][RTW89_FCC][30] = 76,
- [2][0][RTW89_FCC][32] = 76,
- [2][0][RTW89_FCC][34] = 76,
- [2][0][RTW89_FCC][36] = 76,
- [2][0][RTW89_FCC][38] = 76,
- [2][0][RTW89_FCC][40] = 76,
- [2][0][RTW89_FCC][42] = 76,
- [2][0][RTW89_FCC][44] = 76,
- [2][0][RTW89_FCC][45] = 76,
- [2][0][RTW89_FCC][47] = 76,
- [2][0][RTW89_FCC][49] = 76,
- [2][0][RTW89_FCC][51] = 76,
- [2][0][RTW89_FCC][53] = 76,
- [2][0][RTW89_FCC][55] = 76,
- [2][0][RTW89_FCC][57] = 76,
- [2][0][RTW89_FCC][59] = 76,
- [2][0][RTW89_FCC][60] = 76,
- [2][0][RTW89_FCC][62] = 76,
- [2][0][RTW89_FCC][64] = 76,
- [2][0][RTW89_FCC][66] = 76,
- [2][0][RTW89_FCC][68] = 76,
- [2][0][RTW89_FCC][70] = 76,
- [2][0][RTW89_FCC][72] = 76,
- [2][0][RTW89_FCC][74] = 76,
- [2][0][RTW89_FCC][75] = 76,
- [2][0][RTW89_FCC][77] = 76,
- [2][0][RTW89_FCC][79] = 76,
- [2][0][RTW89_FCC][81] = 76,
- [2][0][RTW89_FCC][83] = 76,
- [2][0][RTW89_FCC][85] = 76,
- [2][0][RTW89_FCC][87] = 76,
- [2][0][RTW89_FCC][89] = 76,
- [2][0][RTW89_FCC][90] = 76,
- [2][0][RTW89_FCC][92] = 76,
- [2][0][RTW89_FCC][94] = 76,
- [2][0][RTW89_FCC][96] = 76,
- [2][0][RTW89_FCC][98] = 76,
- [2][0][RTW89_FCC][100] = 76,
- [2][0][RTW89_FCC][102] = 76,
- [2][0][RTW89_FCC][104] = 76,
- [2][0][RTW89_FCC][105] = 76,
- [2][0][RTW89_FCC][107] = 76,
- [2][0][RTW89_FCC][109] = 76,
+ [1][1][RTW89_ETSI][119] = 127,
+ [2][0][RTW89_FCC][0] = 8,
+ [2][0][RTW89_ETSI][0] = 56,
+ [2][0][RTW89_FCC][2] = 8,
+ [2][0][RTW89_ETSI][2] = 56,
+ [2][0][RTW89_FCC][4] = 8,
+ [2][0][RTW89_ETSI][4] = 56,
+ [2][0][RTW89_FCC][6] = 8,
+ [2][0][RTW89_ETSI][6] = 56,
+ [2][0][RTW89_FCC][8] = 8,
+ [2][0][RTW89_ETSI][8] = 56,
+ [2][0][RTW89_FCC][10] = 8,
+ [2][0][RTW89_ETSI][10] = 56,
+ [2][0][RTW89_FCC][12] = 8,
+ [2][0][RTW89_ETSI][12] = 56,
+ [2][0][RTW89_FCC][14] = 8,
+ [2][0][RTW89_ETSI][14] = 56,
+ [2][0][RTW89_FCC][15] = 8,
+ [2][0][RTW89_ETSI][15] = 56,
+ [2][0][RTW89_FCC][17] = 8,
+ [2][0][RTW89_ETSI][17] = 56,
+ [2][0][RTW89_FCC][19] = 8,
+ [2][0][RTW89_ETSI][19] = 56,
+ [2][0][RTW89_FCC][21] = 8,
+ [2][0][RTW89_ETSI][21] = 56,
+ [2][0][RTW89_FCC][23] = 8,
+ [2][0][RTW89_ETSI][23] = 56,
+ [2][0][RTW89_FCC][25] = 8,
+ [2][0][RTW89_ETSI][25] = 56,
+ [2][0][RTW89_FCC][27] = 8,
+ [2][0][RTW89_ETSI][27] = 56,
+ [2][0][RTW89_FCC][29] = 8,
+ [2][0][RTW89_ETSI][29] = 56,
+ [2][0][RTW89_FCC][30] = 8,
+ [2][0][RTW89_ETSI][30] = 56,
+ [2][0][RTW89_FCC][32] = 8,
+ [2][0][RTW89_ETSI][32] = 56,
+ [2][0][RTW89_FCC][34] = 8,
+ [2][0][RTW89_ETSI][34] = 56,
+ [2][0][RTW89_FCC][36] = 8,
+ [2][0][RTW89_ETSI][36] = 56,
+ [2][0][RTW89_FCC][38] = 8,
+ [2][0][RTW89_ETSI][38] = 56,
+ [2][0][RTW89_FCC][40] = 8,
+ [2][0][RTW89_ETSI][40] = 56,
+ [2][0][RTW89_FCC][42] = 8,
+ [2][0][RTW89_ETSI][42] = 56,
+ [2][0][RTW89_FCC][44] = 8,
+ [2][0][RTW89_ETSI][44] = 56,
+ [2][0][RTW89_FCC][45] = 8,
+ [2][0][RTW89_ETSI][45] = 127,
+ [2][0][RTW89_FCC][47] = 8,
+ [2][0][RTW89_ETSI][47] = 127,
+ [2][0][RTW89_FCC][49] = 8,
+ [2][0][RTW89_ETSI][49] = 127,
+ [2][0][RTW89_FCC][51] = 8,
+ [2][0][RTW89_ETSI][51] = 127,
+ [2][0][RTW89_FCC][53] = 8,
+ [2][0][RTW89_ETSI][53] = 127,
+ [2][0][RTW89_FCC][55] = 8,
+ [2][0][RTW89_ETSI][55] = 127,
+ [2][0][RTW89_FCC][57] = 8,
+ [2][0][RTW89_ETSI][57] = 127,
+ [2][0][RTW89_FCC][59] = 8,
+ [2][0][RTW89_ETSI][59] = 127,
+ [2][0][RTW89_FCC][60] = 8,
+ [2][0][RTW89_ETSI][60] = 127,
+ [2][0][RTW89_FCC][62] = 8,
+ [2][0][RTW89_ETSI][62] = 127,
+ [2][0][RTW89_FCC][64] = 8,
+ [2][0][RTW89_ETSI][64] = 127,
+ [2][0][RTW89_FCC][66] = 8,
+ [2][0][RTW89_ETSI][66] = 127,
+ [2][0][RTW89_FCC][68] = 8,
+ [2][0][RTW89_ETSI][68] = 127,
+ [2][0][RTW89_FCC][70] = 8,
+ [2][0][RTW89_ETSI][70] = 127,
+ [2][0][RTW89_FCC][72] = 8,
+ [2][0][RTW89_ETSI][72] = 127,
+ [2][0][RTW89_FCC][74] = 8,
+ [2][0][RTW89_ETSI][74] = 127,
+ [2][0][RTW89_FCC][75] = 8,
+ [2][0][RTW89_ETSI][75] = 127,
+ [2][0][RTW89_FCC][77] = 8,
+ [2][0][RTW89_ETSI][77] = 127,
+ [2][0][RTW89_FCC][79] = 8,
+ [2][0][RTW89_ETSI][79] = 127,
+ [2][0][RTW89_FCC][81] = 8,
+ [2][0][RTW89_ETSI][81] = 127,
+ [2][0][RTW89_FCC][83] = 8,
+ [2][0][RTW89_ETSI][83] = 127,
+ [2][0][RTW89_FCC][85] = 8,
+ [2][0][RTW89_ETSI][85] = 127,
+ [2][0][RTW89_FCC][87] = 8,
+ [2][0][RTW89_ETSI][87] = 127,
+ [2][0][RTW89_FCC][89] = 8,
+ [2][0][RTW89_ETSI][89] = 127,
+ [2][0][RTW89_FCC][90] = 8,
+ [2][0][RTW89_ETSI][90] = 127,
+ [2][0][RTW89_FCC][92] = 8,
+ [2][0][RTW89_ETSI][92] = 127,
+ [2][0][RTW89_FCC][94] = 8,
+ [2][0][RTW89_ETSI][94] = 127,
+ [2][0][RTW89_FCC][96] = 8,
+ [2][0][RTW89_ETSI][96] = 127,
+ [2][0][RTW89_FCC][98] = 8,
+ [2][0][RTW89_ETSI][98] = 127,
+ [2][0][RTW89_FCC][100] = 8,
+ [2][0][RTW89_ETSI][100] = 127,
+ [2][0][RTW89_FCC][102] = 8,
+ [2][0][RTW89_ETSI][102] = 127,
+ [2][0][RTW89_FCC][104] = 8,
+ [2][0][RTW89_ETSI][104] = 127,
+ [2][0][RTW89_FCC][105] = 8,
+ [2][0][RTW89_ETSI][105] = 127,
+ [2][0][RTW89_FCC][107] = 10,
+ [2][0][RTW89_ETSI][107] = 127,
+ [2][0][RTW89_FCC][109] = 12,
+ [2][0][RTW89_ETSI][109] = 127,
[2][0][RTW89_FCC][111] = 127,
+ [2][0][RTW89_ETSI][111] = 127,
[2][0][RTW89_FCC][113] = 127,
+ [2][0][RTW89_ETSI][113] = 127,
[2][0][RTW89_FCC][115] = 127,
+ [2][0][RTW89_ETSI][115] = 127,
[2][0][RTW89_FCC][117] = 127,
+ [2][0][RTW89_ETSI][117] = 127,
[2][0][RTW89_FCC][119] = 127,
- [2][1][RTW89_FCC][0] = 76,
- [2][1][RTW89_FCC][2] = 76,
- [2][1][RTW89_FCC][4] = 76,
- [2][1][RTW89_FCC][6] = 76,
- [2][1][RTW89_FCC][8] = 76,
- [2][1][RTW89_FCC][10] = 76,
- [2][1][RTW89_FCC][12] = 76,
- [2][1][RTW89_FCC][14] = 76,
- [2][1][RTW89_FCC][15] = 76,
- [2][1][RTW89_FCC][17] = 76,
- [2][1][RTW89_FCC][19] = 76,
- [2][1][RTW89_FCC][21] = 76,
- [2][1][RTW89_FCC][23] = 76,
- [2][1][RTW89_FCC][25] = 76,
- [2][1][RTW89_FCC][27] = 76,
- [2][1][RTW89_FCC][29] = 76,
- [2][1][RTW89_FCC][30] = 76,
- [2][1][RTW89_FCC][32] = 76,
- [2][1][RTW89_FCC][34] = 76,
- [2][1][RTW89_FCC][36] = 76,
- [2][1][RTW89_FCC][38] = 76,
- [2][1][RTW89_FCC][40] = 76,
- [2][1][RTW89_FCC][42] = 76,
- [2][1][RTW89_FCC][44] = 76,
- [2][1][RTW89_FCC][45] = 76,
- [2][1][RTW89_FCC][47] = 76,
- [2][1][RTW89_FCC][49] = 76,
- [2][1][RTW89_FCC][51] = 76,
- [2][1][RTW89_FCC][53] = 76,
- [2][1][RTW89_FCC][55] = 76,
- [2][1][RTW89_FCC][57] = 76,
- [2][1][RTW89_FCC][59] = 76,
- [2][1][RTW89_FCC][60] = 76,
- [2][1][RTW89_FCC][62] = 76,
- [2][1][RTW89_FCC][64] = 76,
- [2][1][RTW89_FCC][66] = 76,
- [2][1][RTW89_FCC][68] = 76,
- [2][1][RTW89_FCC][70] = 76,
- [2][1][RTW89_FCC][72] = 76,
- [2][1][RTW89_FCC][74] = 76,
- [2][1][RTW89_FCC][75] = 76,
- [2][1][RTW89_FCC][77] = 76,
- [2][1][RTW89_FCC][79] = 76,
- [2][1][RTW89_FCC][81] = 76,
- [2][1][RTW89_FCC][83] = 76,
- [2][1][RTW89_FCC][85] = 76,
- [2][1][RTW89_FCC][87] = 76,
- [2][1][RTW89_FCC][89] = 76,
- [2][1][RTW89_FCC][90] = 76,
- [2][1][RTW89_FCC][92] = 76,
- [2][1][RTW89_FCC][94] = 76,
- [2][1][RTW89_FCC][96] = 76,
- [2][1][RTW89_FCC][98] = 76,
- [2][1][RTW89_FCC][100] = 76,
- [2][1][RTW89_FCC][102] = 76,
- [2][1][RTW89_FCC][104] = 76,
- [2][1][RTW89_FCC][105] = 76,
- [2][1][RTW89_FCC][107] = 76,
- [2][1][RTW89_FCC][109] = 76,
+ [2][0][RTW89_ETSI][119] = 127,
+ [2][1][RTW89_FCC][0] = -16,
+ [2][1][RTW89_ETSI][0] = 44,
+ [2][1][RTW89_FCC][2] = -16,
+ [2][1][RTW89_ETSI][2] = 44,
+ [2][1][RTW89_FCC][4] = -16,
+ [2][1][RTW89_ETSI][4] = 44,
+ [2][1][RTW89_FCC][6] = -16,
+ [2][1][RTW89_ETSI][6] = 44,
+ [2][1][RTW89_FCC][8] = -16,
+ [2][1][RTW89_ETSI][8] = 44,
+ [2][1][RTW89_FCC][10] = -16,
+ [2][1][RTW89_ETSI][10] = 44,
+ [2][1][RTW89_FCC][12] = -16,
+ [2][1][RTW89_ETSI][12] = 44,
+ [2][1][RTW89_FCC][14] = -16,
+ [2][1][RTW89_ETSI][14] = 44,
+ [2][1][RTW89_FCC][15] = -16,
+ [2][1][RTW89_ETSI][15] = 44,
+ [2][1][RTW89_FCC][17] = -16,
+ [2][1][RTW89_ETSI][17] = 44,
+ [2][1][RTW89_FCC][19] = -16,
+ [2][1][RTW89_ETSI][19] = 44,
+ [2][1][RTW89_FCC][21] = -16,
+ [2][1][RTW89_ETSI][21] = 44,
+ [2][1][RTW89_FCC][23] = -16,
+ [2][1][RTW89_ETSI][23] = 44,
+ [2][1][RTW89_FCC][25] = -16,
+ [2][1][RTW89_ETSI][25] = 44,
+ [2][1][RTW89_FCC][27] = -16,
+ [2][1][RTW89_ETSI][27] = 44,
+ [2][1][RTW89_FCC][29] = -16,
+ [2][1][RTW89_ETSI][29] = 44,
+ [2][1][RTW89_FCC][30] = -16,
+ [2][1][RTW89_ETSI][30] = 44,
+ [2][1][RTW89_FCC][32] = -16,
+ [2][1][RTW89_ETSI][32] = 44,
+ [2][1][RTW89_FCC][34] = -16,
+ [2][1][RTW89_ETSI][34] = 44,
+ [2][1][RTW89_FCC][36] = -16,
+ [2][1][RTW89_ETSI][36] = 44,
+ [2][1][RTW89_FCC][38] = -16,
+ [2][1][RTW89_ETSI][38] = 44,
+ [2][1][RTW89_FCC][40] = -16,
+ [2][1][RTW89_ETSI][40] = 44,
+ [2][1][RTW89_FCC][42] = -16,
+ [2][1][RTW89_ETSI][42] = 44,
+ [2][1][RTW89_FCC][44] = -16,
+ [2][1][RTW89_ETSI][44] = 44,
+ [2][1][RTW89_FCC][45] = -16,
+ [2][1][RTW89_ETSI][45] = 127,
+ [2][1][RTW89_FCC][47] = -16,
+ [2][1][RTW89_ETSI][47] = 127,
+ [2][1][RTW89_FCC][49] = -16,
+ [2][1][RTW89_ETSI][49] = 127,
+ [2][1][RTW89_FCC][51] = -16,
+ [2][1][RTW89_ETSI][51] = 127,
+ [2][1][RTW89_FCC][53] = -16,
+ [2][1][RTW89_ETSI][53] = 127,
+ [2][1][RTW89_FCC][55] = -16,
+ [2][1][RTW89_ETSI][55] = 127,
+ [2][1][RTW89_FCC][57] = -16,
+ [2][1][RTW89_ETSI][57] = 127,
+ [2][1][RTW89_FCC][59] = -16,
+ [2][1][RTW89_ETSI][59] = 127,
+ [2][1][RTW89_FCC][60] = -16,
+ [2][1][RTW89_ETSI][60] = 127,
+ [2][1][RTW89_FCC][62] = -16,
+ [2][1][RTW89_ETSI][62] = 127,
+ [2][1][RTW89_FCC][64] = -16,
+ [2][1][RTW89_ETSI][64] = 127,
+ [2][1][RTW89_FCC][66] = -16,
+ [2][1][RTW89_ETSI][66] = 127,
+ [2][1][RTW89_FCC][68] = -16,
+ [2][1][RTW89_ETSI][68] = 127,
+ [2][1][RTW89_FCC][70] = -16,
+ [2][1][RTW89_ETSI][70] = 127,
+ [2][1][RTW89_FCC][72] = -16,
+ [2][1][RTW89_ETSI][72] = 127,
+ [2][1][RTW89_FCC][74] = -16,
+ [2][1][RTW89_ETSI][74] = 127,
+ [2][1][RTW89_FCC][75] = -16,
+ [2][1][RTW89_ETSI][75] = 127,
+ [2][1][RTW89_FCC][77] = -16,
+ [2][1][RTW89_ETSI][77] = 127,
+ [2][1][RTW89_FCC][79] = -16,
+ [2][1][RTW89_ETSI][79] = 127,
+ [2][1][RTW89_FCC][81] = -16,
+ [2][1][RTW89_ETSI][81] = 127,
+ [2][1][RTW89_FCC][83] = -16,
+ [2][1][RTW89_ETSI][83] = 127,
+ [2][1][RTW89_FCC][85] = -18,
+ [2][1][RTW89_ETSI][85] = 127,
+ [2][1][RTW89_FCC][87] = -16,
+ [2][1][RTW89_ETSI][87] = 127,
+ [2][1][RTW89_FCC][89] = -16,
+ [2][1][RTW89_ETSI][89] = 127,
+ [2][1][RTW89_FCC][90] = -16,
+ [2][1][RTW89_ETSI][90] = 127,
+ [2][1][RTW89_FCC][92] = -16,
+ [2][1][RTW89_ETSI][92] = 127,
+ [2][1][RTW89_FCC][94] = -16,
+ [2][1][RTW89_ETSI][94] = 127,
+ [2][1][RTW89_FCC][96] = -16,
+ [2][1][RTW89_ETSI][96] = 127,
+ [2][1][RTW89_FCC][98] = -16,
+ [2][1][RTW89_ETSI][98] = 127,
+ [2][1][RTW89_FCC][100] = -16,
+ [2][1][RTW89_ETSI][100] = 127,
+ [2][1][RTW89_FCC][102] = -16,
+ [2][1][RTW89_ETSI][102] = 127,
+ [2][1][RTW89_FCC][104] = -16,
+ [2][1][RTW89_ETSI][104] = 127,
+ [2][1][RTW89_FCC][105] = -16,
+ [2][1][RTW89_ETSI][105] = 127,
+ [2][1][RTW89_FCC][107] = -12,
+ [2][1][RTW89_ETSI][107] = 127,
+ [2][1][RTW89_FCC][109] = -10,
+ [2][1][RTW89_ETSI][109] = 127,
[2][1][RTW89_FCC][111] = 127,
+ [2][1][RTW89_ETSI][111] = 127,
[2][1][RTW89_FCC][113] = 127,
+ [2][1][RTW89_ETSI][113] = 127,
[2][1][RTW89_FCC][115] = 127,
+ [2][1][RTW89_ETSI][115] = 127,
[2][1][RTW89_FCC][117] = 127,
+ [2][1][RTW89_ETSI][117] = 127,
[2][1][RTW89_FCC][119] = 127,
+ [2][1][RTW89_ETSI][119] = 127,
};
const struct rtw89_phy_table rtw89_8852c_phy_bb_table = {
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index fc0394494013..35901f64d17d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -42,14 +42,15 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.max_tag_num_mask = B_AX_MAX_TAG_NUM_V1_MASK,
.rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR_V1,
.txbd_rwptr_clr2_reg = R_AX_TXBD_RWPTR_CLR2_V1,
- .dma_stop1_reg = R_AX_HAXI_DMA_STOP1,
- .dma_stop2_reg = R_AX_HAXI_DMA_STOP2,
- .dma_busy1_reg = R_AX_HAXI_DMA_BUSY1,
+ .dma_stop1 = {R_AX_HAXI_DMA_STOP1, B_AX_TX_STOP1_MASK},
+ .dma_stop2 = {R_AX_HAXI_DMA_STOP2, B_AX_TX_STOP2_ALL},
+ .dma_busy1 = {R_AX_HAXI_DMA_BUSY1, DMA_BUSY1_CHECK},
.dma_busy2_reg = R_AX_HAXI_DMA_BUSY2,
.dma_busy3_reg = R_AX_HAXI_DMA_BUSY3,
.rpwm_addr = R_AX_PCIE_HRPWM_V1,
.cpwm_addr = R_AX_PCIE_CRPWM,
+ .tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index eb2d3ec28775..dfccae81c380 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -81,9 +81,9 @@ static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = {
static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg)
{
struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
- struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_band band = hal->current_band_type;
- u32 center_freq = hal->current_freq;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ u32 center_freq = chan->freq;
const struct rtw89_sar_span *span = NULL;
enum rtw89_sar_subband subband_l, subband_h;
int idx;
@@ -228,7 +228,7 @@ static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
}
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
- rtw89_chip_set_txpwr(rtwdev);
+ rtw89_core_set_chip_txpwr(rtwdev);
exit:
mutex_unlock(&rtwdev->mutex);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 726223f25dc6..c1a4bc1c64d1 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -5,6 +5,7 @@
#include <linux/devcoredump.h>
#include "cam.h"
+#include "chan.h"
#include "debug.h"
#include "fw.h"
#include "mac.h"
@@ -152,7 +153,10 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
+ mutex_lock(&rtwdev->mutex);
rtw89_leave_lps(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -298,7 +302,7 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtwvif->trigger = false;
}
-static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
+static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
@@ -308,15 +312,19 @@ static void ser_sta_deinit_addr_cam_iter(void *data, struct ieee80211_sta *sta)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
if (sta->tdls)
rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
+
+ INIT_LIST_HEAD(&rtwsta->ba_cam_list);
}
static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
ieee80211_iterate_stations_atomic(rtwdev->hw,
- ser_sta_deinit_addr_cam_iter,
+ ser_sta_deinit_cam_iter,
rtwvif);
rtw89_cam_deinit(rtwdev, rtwvif);
+
+ bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
}
static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
@@ -388,6 +396,7 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
rtw89_hci_recovery_complete(rtwdev);
+ clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
break;
case SER_EV_L1_RESET:
ser_state_goto(ser, SER_RESET_TRX_ST);
@@ -531,7 +540,7 @@ static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
const struct __fw_backtrace_entry *ent)
{
struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
- u32 fwbt_addr = ent->wcpu_addr - RTW89_WCPU_BASE_ADDR;
+ u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
u32 fwbt_size = ent->size;
u32 fwbt_key = ent->key;
u32 i;
@@ -601,6 +610,7 @@ bottom:
ser_reset_mac_binding(rtwdev);
rtw89_core_stop(rtwdev);
+ rtw89_entity_init(rtwdev);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
}
@@ -623,7 +633,6 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
fallthrough;
case SER_EV_L2_RECFG_DONE:
ser_state_goto(ser, SER_IDLE_ST);
- clear_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags);
break;
case SER_EV_STATE_OUT:
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 05524291d60c..82a7458e01ae 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -251,7 +251,7 @@ struct ndis_80211_bssid_ex {
struct ndis_80211_bssid_list_ex {
__le32 num_items;
- struct ndis_80211_bssid_ex bssid[];
+ u8 bssid_data[];
} __packed;
struct ndis_80211_fixed_ies {
@@ -489,14 +489,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params);
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr);
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr);
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast);
+ int link_id, u8 key_index, bool unicast,
+ bool multicast);
static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
const u8 *mac, struct station_info *sinfo);
@@ -2082,7 +2084,8 @@ resize_buf:
netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
bssid_len = 0;
- bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len);
+ bssid = next_bssid_list_item((void *)bssid_list->bssid_data,
+ &bssid_len, buf, len);
/* Device returns incorrect 'num_items'. Workaround by ignoring the
* received 'num_items' and walking through full bssid buffer instead.
@@ -2377,8 +2380,8 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2413,7 +2416,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
@@ -2424,7 +2428,8 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
}
static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index bf39c4bda26f..2fbec51c8f94 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -889,6 +889,7 @@ static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
* for a hardware TX queue.
* @hw: Pointer to the ieee80211_hw structure
* @vif: Pointer to the ieee80211_vif structure.
+ * @link_id: the link ID if MLO is used, otherwise 0
* @queue: Queue number.
* @params: Pointer to ieee80211_tx_queue_params structure.
*
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index e015bfb8d221..84d82ddded56 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -181,7 +181,7 @@ int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
while (len > 0) {
chunk_type = get_unaligned_le16(buf + 0);
chunk_len = get_unaligned_le16(buf + 2);
- if (chunk_len > len) {
+ if (chunk_len < 4 || chunk_len > len) {
dev_err(wdev->dev, "PDS:%d: corrupted file\n", chunk_num);
return -EINVAL;
}
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index e06da4b3b0d4..805a3c1bf8fe 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -91,23 +91,25 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
bool unlock)
{
struct cw1200_queue_stats *stats = queue->stats;
- struct cw1200_queue_item *item = NULL, *tmp;
+ struct cw1200_queue_item *item = NULL, *iter, *tmp;
bool wakeup_stats = false;
- list_for_each_entry_safe(item, tmp, &queue->queue, head) {
- if (time_is_after_jiffies(item->queue_timestamp + queue->ttl))
+ list_for_each_entry_safe(iter, tmp, &queue->queue, head) {
+ if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) {
+ item = iter;
break;
+ }
--queue->num_queued;
- --queue->link_map_cache[item->txpriv.link_id];
+ --queue->link_map_cache[iter->txpriv.link_id];
spin_lock_bh(&stats->lock);
--stats->num_queued;
- if (!--stats->link_map_cache[item->txpriv.link_id])
+ if (!--stats->link_map_cache[iter->txpriv.link_id])
wakeup_stats = true;
spin_unlock_bh(&stats->lock);
cw1200_debug_tx_ttl(stats->priv);
- cw1200_queue_register_post_gc(head, item);
- item->skb = NULL;
- list_move_tail(&item->head, &queue->free_pool);
+ cw1200_queue_register_post_gc(head, iter);
+ iter->skb = NULL;
+ list_move_tail(&iter->head, &queue->free_pool);
}
if (wakeup_stats)
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 26d3614519b1..8ef1d06b9bbd 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -195,7 +195,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, -1, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
@@ -2263,7 +2263,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif,-1, false);
if (!frame.skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index fde21fca6c5e..6894b919ff94 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -762,8 +762,7 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (ret)
goto drop;
- rcu_read_lock();
- sta = rcu_dereference(t.sta);
+ sta = t.sta;
spin_lock_bh(&priv->ps_state_lock);
{
@@ -776,8 +775,6 @@ void cw1200_tx(struct ieee80211_hw *dev,
if (tid_update && sta)
ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
- rcu_read_unlock();
-
cw1200_bh_wakeup(priv);
return;
@@ -1145,8 +1142,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
/* Remove TSF from the end of frame */
if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
- memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
- hdr->mactime = le64_to_cpu(hdr->mactime);
+ hdr->mactime = get_unaligned_le64(skb->data + skb->len - 8);
if (skb->len >= 8)
skb_trim(skb, skb->len - 8);
} else {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9144ef5538a8..289371689a8d 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -546,7 +546,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, -1, false);
if (!skb)
goto out;
size = skb->len;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 13d78ada4bb6..34d95f458e1a 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -131,10 +131,10 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
if (vector & TIME_SYNC_EVENT_ID)
wlcore_event_time_sync(wl,
- mbox->time_sync_tsf_high_msb,
- mbox->time_sync_tsf_high_lsb,
- mbox->time_sync_tsf_low_msb,
- mbox->time_sync_tsf_low_lsb);
+ le16_to_cpu(mbox->time_sync_tsf_high_msb),
+ le16_to_cpu(mbox->time_sync_tsf_high_lsb),
+ le16_to_cpu(mbox->time_sync_tsf_low_msb),
+ le16_to_cpu(mbox->time_sync_tsf_low_lsb));
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 138edd28b0de..a939fd89a7f5 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1065,7 +1065,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
} else {
skb = ieee80211_nullfunc_get(wl->hw,
wl12xx_wlvif_to_vif(wlvif),
- false);
+ -1, false);
if (!skb)
goto out;
size = skb->len;
@@ -1092,7 +1092,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif, false);
+ skb = ieee80211_nullfunc_get(wl->hw, vif,-1, false);
if (!skb)
goto out;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index dad38fc04243..1b532e00a56f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1441,7 +1441,7 @@ static void wl3501_detach(struct pcmcia_device *link)
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- strlcpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
+ strscpy(wrqu->name, "IEEE 802.11-DS", sizeof(wrqu->name));
return 0;
}
@@ -1652,7 +1652,7 @@ static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info,
if (wrqu->data.length > sizeof(this->nick))
return -E2BIG;
- strlcpy(this->nick, extra, wrqu->data.length);
+ strscpy(this->nick, extra, wrqu->data.length);
return 0;
}
@@ -1661,7 +1661,7 @@ static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info,
{
struct wl3501_card *this = netdev_priv(dev);
- strlcpy(extra, this->nick, 32);
+ strscpy(extra, this->nick, 32);
wrqu->data.length = strlen(extra);
return 0;
}
@@ -1965,7 +1965,7 @@ static int wl3501_config(struct pcmcia_device *link)
this->firmware_date[0] = '\0';
this->rssi = 255;
this->chan = iw_default_channel(this->reg_domain);
- strlcpy(this->nick, "Planet WL3501", sizeof(this->nick));
+ strscpy(this->nick, "Planet WL3501", sizeof(this->nick));
spin_lock_init(&this->lock);
init_waitqueue_head(&this->wait);
netif_start_queue(dev);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 57304a5adf68..b7f9237dedf7 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -590,7 +590,7 @@ int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
goto out;
}
- memcpy(skb_put(skb, count), buf, count);
+ skb_put_data(skb, buf, count);
IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 27151148c782..2f1f8b5d5b59 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -103,8 +103,8 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
}
/* Transmit a packet */
-static int ipc_wwan_link_transmit(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
@@ -323,15 +323,16 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
ipc_wwan->dev = dev;
ipc_wwan->ipc_imem = ipc_imem;
+ mutex_init(&ipc_wwan->if_mutex);
+
/* WWAN core will create a netdev for the default IP MUX channel */
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
IP_MUX_SESSION_DEFAULT)) {
+ mutex_destroy(&ipc_wwan->if_mutex);
kfree(ipc_wwan);
return NULL;
}
- mutex_init(&ipc_wwan->if_mutex);
-
return ipc_wwan;
}
diff --git a/drivers/net/wwan/mhi_wwan_ctrl.c b/drivers/net/wwan/mhi_wwan_ctrl.c
index e4d0f696687f..f7ca52353f40 100644
--- a/drivers/net/wwan/mhi_wwan_ctrl.c
+++ b/drivers/net/wwan/mhi_wwan_ctrl.c
@@ -258,6 +258,7 @@ static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
{ .chan = "DUN", .driver_data = WWAN_PORT_AT },
+ { .chan = "DUN2", .driver_data = WWAN_PORT_AT },
{ .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
{ .chan = "QMI", .driver_data = WWAN_PORT_QMI },
{ .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index c6b6547f2c6f..f71d3bc3b237 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -74,7 +74,7 @@ static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
return 0;
}
-static int t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
int skb_len = skb->len;
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index b8c7843730ed..62e9f7d6c9fe 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/wwan.h>
#include <net/rtnetlink.h>
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index fad642f9ffd8..ff09a8cedf93 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -157,8 +157,8 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if ((i + 1) < in->len && in->data[i + 1] == '\n')
i++;
n = i - s + 1;
- memcpy(skb_put(out, n), &in->data[s], n);/* Echo */
- memcpy(skb_put(out, 6), "\r\nOK\r\n", 6);
+ skb_put_data(out, &in->data[s], n);/* Echo */
+ skb_put_data(out, "\r\nOK\r\n", 6);
s = i + 1;
port->pstate = AT_PARSER_WAIT_A;
} else if (port->pstate == AT_PARSER_SKIP_LINE) {
@@ -171,7 +171,7 @@ static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
if (i > s) {
/* Echo the processed portion of a not yet completed command */
n = i - s;
- memcpy(skb_put(out, n), &in->data[s], n);
+ skb_put_data(out, &in->data[s], n);
}
consume_skb(in);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8174d7b2966c..1545cbee77a4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -62,7 +62,7 @@ struct pending_tx_info {
* ubuf_to_vif is a helper which finds the struct xenvif from a pointer
* to this field.
*/
- struct ubuf_info callback_struct;
+ struct ubuf_info_msgzc callback_struct;
};
#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index fb32ae82d9b0..650fa180220f 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -591,8 +591,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
}
for (i = 0; i < MAX_PENDING_REQS; i++) {
- queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
- { .callback = xenvif_zerocopy_callback,
+ queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
+ { { .callback = xenvif_zerocopy_callback },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
@@ -723,8 +723,7 @@ int xenvif_connect_data(struct xenvif_queue *queue,
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
- netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
queue->stalled = true;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index a256695fc89e..3d2081bbbc86 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -133,7 +133,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
+static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -1228,11 +1228,12 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
+void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
@@ -1241,7 +1242,7 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
- ubuf = (struct ubuf_info *) ubuf->ctx;
+ ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
index = pending_index(queue->dealloc_prod);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 990360d75cb6..c1ba4294f364 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -866,13 +865,12 @@ static int connect_data_rings(struct backend_info *be,
* queue-N.
*/
if (num_queues == 1) {
- xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
+ xspath = kstrdup(dev->otherend, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
- strcpy(xspath, dev->otherend);
} else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kzalloc(xspathsize, GFP_KERNEL);
@@ -984,6 +982,7 @@ static int netback_remove(struct xenbus_device *dev)
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 27a11cc08c61..9af2b027c19c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -673,7 +673,7 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
return nxmit;
}
-struct sk_buff *bounce_skb(const struct sk_buff *skb)
+static struct sk_buff *bounce_skb(const struct sk_buff *skb)
{
unsigned int headerlen = skb_headroom(skb);
/* Align size to allocate full pages and avoid contiguous data leaks */
@@ -2224,8 +2224,7 @@ static int xennet_create_queues(struct netfront_info *info,
return ret;
}
- netif_napi_add(queue->info->netdev, &queue->napi,
- xennet_poll, 64);
+ netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
if (netif_running(info->netdev))
napi_enable(&queue->napi);
}
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 28a9e1eb9bcf..2d53e0f88d2f 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -336,14 +336,12 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
return 0;
}
-static int fdp_nci_i2c_remove(struct i2c_client *client)
+static void fdp_nci_i2c_remove(struct i2c_client *client)
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);
-
- return 0;
}
static const struct acpi_device_id fdp_nci_i2c_acpi_match[] = {
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 067295124eb9..5eaa18f81355 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -268,15 +268,13 @@ err_irq:
return r;
}
-static int microread_i2c_remove(struct i2c_client *client)
+static void microread_i2c_remove(struct i2c_client *client)
{
struct microread_i2c_phy *phy = i2c_get_clientdata(client);
microread_remove(phy->hdev);
free_irq(client->irq, phy);
-
- return 0;
}
static const struct i2c_device_id microread_i2c_id[] = {
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 01329b91d59d..acef0cfd76af 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -231,13 +231,11 @@ static int nfcmrvl_i2c_probe(struct i2c_client *client,
return 0;
}
-static int nfcmrvl_i2c_remove(struct i2c_client *client)
+static void nfcmrvl_i2c_remove(struct i2c_client *client)
{
struct nfcmrvl_i2c_drv_data *drv_data = i2c_get_clientdata(client);
nfcmrvl_nci_unregister_dev(drv_data->priv);
-
- return 0;
}
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index ae2ba08d8ac3..ec6446511984 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -314,14 +314,12 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
return r;
}
-static int nxp_nci_i2c_remove(struct i2c_client *client)
+static void nxp_nci_i2c_remove(struct i2c_client *client)
{
struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
nxp_nci_remove(phy->ndev);
free_irq(client->irq, phy);
-
- return 0;
}
static const struct i2c_device_id nxp_nci_i2c_id_table[] = {
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 673eb5e9b887..ddf3db286bad 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -227,7 +227,7 @@ nfc_alloc_err:
return r;
}
-static int pn533_i2c_remove(struct i2c_client *client)
+static void pn533_i2c_remove(struct i2c_client *client)
{
struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
@@ -235,8 +235,6 @@ static int pn533_i2c_remove(struct i2c_client *client)
pn53x_unregister_nfc(phy->priv);
pn53x_common_clean(phy->priv);
-
- return 0;
}
static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = {
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 62a0f1a010cb..9e754abcfa2a 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -928,7 +928,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
return 0;
}
-static int pn544_hci_i2c_remove(struct i2c_client *client)
+static void pn544_hci_i2c_remove(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
@@ -940,8 +940,6 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
pn544_hci_i2c_disable(phy);
-
- return 0;
}
static const struct of_device_id of_pn544_i2c_match[] __maybe_unused = {
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 4d1cf1bb55b0..f824dc7099ce 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -246,14 +246,12 @@ disable_clk:
return ret;
}
-static int s3fwrn5_i2c_remove(struct i2c_client *client)
+static void s3fwrn5_i2c_remove(struct i2c_client *client)
{
struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
s3fwrn5_remove(phy->common.ndev);
clk_disable_unprepare(phy->clk);
-
- return 0;
}
static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index cbd968f013c7..89fa24d71bef 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -250,13 +250,11 @@ static int st_nci_i2c_probe(struct i2c_client *client,
return r;
}
-static int st_nci_i2c_remove(struct i2c_client *client)
+static void st_nci_i2c_remove(struct i2c_client *client)
{
struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
ndlc_remove(phy->ndlc);
-
- return 0;
}
static const struct i2c_device_id st_nci_i2c_id_table[] = {
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 42dc0e5eb161..76b55986bcf8 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -562,7 +562,7 @@ out_free:
return r;
}
-static int st21nfca_hci_i2c_remove(struct i2c_client *client)
+static void st21nfca_hci_i2c_remove(struct i2c_client *client)
{
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
@@ -571,8 +571,6 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
st21nfca_hci_i2c_disable(phy);
kfree_skb(phy->pending_skb);
-
- return 0;
}
static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index bf4f5c09d9b1..bbe5099c836d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1712,8 +1712,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
res->flags = IORESOURCE_MEM;
for (i = 0; i < nd_region->ndr_mappings; i++) {
- uuid_t uuid;
-
nsl_get_uuid(ndd, nd_label, &uuid);
if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
continue;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7e88cd242380..96e6e9a5f235 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset)
+static phys_addr_t pmem_to_phys(struct pmem_device *pmem, phys_addr_t offset)
{
return pmem->phys_addr + offset;
}
@@ -63,7 +63,7 @@ static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector)
static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
unsigned long pfn_start, pfn_end, pfn;
/* only pmem in the linear map supports HWPoison */
@@ -97,7 +97,7 @@ static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks)
static long __pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
- phys_addr_t phys = to_phys(pmem, offset);
+ phys_addr_t phys = pmem_to_phys(pmem, offset);
long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
if (cleared > 0) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index af367b22871b..059737c1a2c1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1111,8 +1111,8 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return effects;
}
-static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
- struct nvme_command *cmd, int status)
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status)
{
if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
nvme_unfreeze(ctrl);
@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
break;
}
}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
-int nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
- u32 effects;
- int ret;
- effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- ret = nvme_execute_rq(rq, false);
- if (effects) /* nothing to be done for zero cmd effects */
- nvme_passthru_end(ctrl, effects, cmd, ret);
-
- return ret;
+ *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ return nvme_execute_rq(rq, false);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1177,7 +1172,8 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
}
-static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
+static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
+ blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
unsigned long flags;
@@ -1189,7 +1185,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
dev_err(ctrl->device,
"failed nvme_keep_alive_end_io error=%d\n",
status);
- return;
+ return RQ_END_IO_NONE;
}
ctrl->comp_seen = false;
@@ -1200,6 +1196,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
nvme_queue_keep_alive_work(ctrl);
+ return RQ_END_IO_NONE;
}
static void nvme_keep_alive_work(struct work_struct *work)
@@ -2162,14 +2159,14 @@ static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
static int nvme_pr_clear(struct block_device *bdev, u64 key)
{
- u32 cdw10 = 1 | (key ? 1 << 3 : 0);
+ u32 cdw10 = 1 | (key ? 0 : 1 << 3);
- return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+ return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
- u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0);
+ u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
}
@@ -2696,7 +2693,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -2704,7 +2701,11 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
}
- /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
+ /*
+ * Generate a "fake" NQN similar to the one in Section 4.5 of the NVMe
+ * Base Specification 2.0. It is slightly different from the format
+ * specified there due to historic reasons, and we can't change it now.
+ */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
@@ -2894,7 +2895,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
nvme_init_subnqn(subsys, ctrl, id);
memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
memcpy(subsys->model, id->mn, sizeof(subsys->model));
- memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
@@ -3113,6 +3113,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->quirks |= core_quirks[i].quirks;
}
}
+ memcpy(ctrl->subsys->firmware_rev, id->fr,
+ sizeof(ctrl->subsys->firmware_rev));
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -3976,6 +3978,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
@@ -4703,6 +4706,8 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_start_queues(ctrl);
/* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
static u32 nvme_aer_type(u32 result)
@@ -4715,9 +4720,10 @@ static u32 nvme_aer_subtype(u32 result)
return (result & 0xff00) >> 8;
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = nvme_aer_subtype(result);
+ bool requeue = true;
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4734,6 +4740,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
*/
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
nvme_auth_stop(ctrl);
+ requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
}
break;
@@ -4750,6 +4757,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
+ return requeue;
}
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@ -4765,13 +4773,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
u32 result = le32_to_cpu(res->u32);
u32 aer_type = nvme_aer_type(result);
u32 aer_subtype = nvme_aer_subtype(result);
+ bool requeue = true;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
switch (aer_type) {
case NVME_AER_NOTICE:
- nvme_handle_aen_notice(ctrl, result);
+ requeue = nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
/*
@@ -4792,10 +4801,114 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
break;
}
- queue_work(nvme_wq, &ctrl->async_event_work);
+
+ if (requeue)
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size;
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = NVME_ADMIN_TIMEOUT;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ ctrl->admin_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+ goto out_free_tagset;
+ }
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->fabrics_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->fabrics_q)) {
+ ret = PTR_ERR(ctrl->fabrics_q);
+ goto out_cleanup_admin_q;
+ }
+ }
+
+ ctrl->admin_tagset = set;
+ return 0;
+
+out_cleanup_admin_q:
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+out_free_tagset:
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
+
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+ blk_mq_destroy_queue(ctrl->admin_q);
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->fabrics_q);
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
+
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size)
+{
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->queue_depth = ctrl->sqsize + 1;
+ set->reserved_tags = NVMF_RESERVED_TAGS;
+ set->numa_node = ctrl->numa_node;
+ set->flags = flags;
+ set->cmd_size = cmd_size,
+ set->driver_data = ctrl;
+ set->nr_hw_queues = ctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ if (ops->map_queues)
+ set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ret;
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->connect_q = blk_mq_init_queue(set);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+ }
+
+ ctrl->tagset = set;
+ return 0;
+
+out_free_tag_set:
+ blk_mq_free_tag_set(set);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
+
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_mq_destroy_queue(ctrl->connect_q);
+ blk_mq_free_tag_set(ctrl->tagset);
+}
+EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);
+
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
nvme_mpath_stop(ctrl);
@@ -4815,6 +4928,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_enable_aen(ctrl);
+ /*
+ * persistent discovery controllers need to send indication to userspace
+ * to re-read the discovery log page to learn about possible changes
+ * that were missed. We identify persistent discovery controllers by
+ * checking that they started once before, hence are reconnecting back.
+ */
+ if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+ nvme_discovery_ctrl(ctrl))
+ nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 10cc4a814602..ce27276f552d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -49,7 +49,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
goto out_unlock;
kref_init(&host->ref);
- strlcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
+ strscpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -971,13 +971,17 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
/*
- * Checking the local address is rough. In most cases, none is specified
- * and the host port is selected by the stack.
+ * Checking the local address or host interfaces is rough.
+ *
+ * In most cases, none is specified and the host port or
+ * host interface is selected by the stack.
*
* Assume no match if:
- * - local address is specified and address is not the same
- * - local address is not specified but remote is, or vice versa
- * (admin using specific host_traddr when it matters).
+ * - local address or host interface is specified and address
+ * or host interface is not the same
+ * - local address or host interface is not specified but
+ * remote is, or vice versa (admin using specific
+ * host_traddr/host_iface when it matters).
*/
if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
(ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
@@ -988,6 +992,15 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
return false;
}
+ if ((opts->mask & NVMF_OPT_HOST_IFACE) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ if (strcmp(opts->host_iface, ctrl->opts->host_iface))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_IFACE) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_IFACE)) {
+ return false;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 127abaf9ba5d..5d57a042dbca 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- return __nvme_fc_exit_request(set->driver_data, op);
+ return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
}
static int
@@ -2135,7 +2135,7 @@ static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
}
}
-static inline void
-__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
- unsigned int qidx)
+static inline int
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
{
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
struct nvme_fc_queue *queue = &ctrl->queues[qidx];
hctx->driver_data = queue;
queue->hctx = hctx;
+ return 0;
}
static int
-nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int hctx_idx)
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);
}
static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_fc_ctrl *ctrl = data;
-
- __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
-
- return 0;
+ return __nvme_fc_init_hctx(hctx, data, hctx_idx);
}
static void
@@ -2391,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref)
container_of(ref, struct nvme_fc_ctrl, ref);
unsigned long flags;
- if (ctrl->ctrl.tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (ctrl->ctrl.tagset)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
/* remove from rport list */
spin_lock_irqsave(&ctrl->rport->lock, flags);
@@ -2402,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
nvme_start_admin_queue(&ctrl->ctrl);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
@@ -2860,9 +2848,9 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
-static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_fc_ctrl *ctrl = set->driver_data;
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
int i;
for (i = 0; i < set->nr_maps; i++) {
@@ -2880,7 +2868,6 @@ static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
else
blk_mq_map_queues(map);
}
- return 0;
}
static const struct blk_mq_ops nvme_fc_mq_ops = {
@@ -2915,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl);
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_fc_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
return ret;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tag_set;
-
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
- goto out_cleanup_blk_queue;
+ goto out_cleanup_tagset;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
@@ -2952,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
out_delete_hw_queues:
nvme_fc_delete_hw_io_queues(ctrl);
-out_cleanup_blk_queue:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
nvme_fc_free_io_queues(ctrl);
/* force put free routine to ignore io queues */
@@ -3166,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
"to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
- }
-
- if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- /* warn if sqsize is lower than queue_size */
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl sqsize %u, reducing "
- "to sqsize\n",
- opts->queue_size, ctrl->ctrl.sqsize + 1);
- opts->queue_size = ctrl->ctrl.sqsize + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
}
ret = nvme_fc_init_aen_ops(ctrl);
@@ -3547,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
nvme_fc_init_queue(ctrl, 0);
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size =
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
- ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
goto out_free_queues;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- ret = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_admin_tag_set;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- ret = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/*
* Would have been nice to init io queues tag set as well.
@@ -3586,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
if (ret)
- goto out_cleanup_admin_q;
+ goto out_cleanup_tagset;
/* at this point, teardown path changes to ref counting on nvme ctrl */
@@ -3641,12 +3579,8 @@ fail_ctrl:
return ERR_PTR(-EIO);
-out_cleanup_admin_q:
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_admin_tag_set:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_cleanup_tagset:
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_queues:
kfree(ctrl->queues);
out_free_ida:
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 27614bee7380..81f5550b670d 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -20,19 +20,20 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
- unsigned len, u32 seed, bool write)
+static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
+ unsigned len, u32 seed)
{
struct bio_integrity_payload *bip;
int ret = -ENOMEM;
void *buf;
+ struct bio *bio = req->bio;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
goto out;
ret = -EFAULT;
- if (write && copy_from_user(buf, ubuf, len))
+ if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
goto out_free_meta;
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
@@ -45,9 +46,13 @@ static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf,
bip->bip_iter.bi_sector = seed;
ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
offset_in_page(buf));
- if (ret == len)
- return buf;
- ret = -ENOMEM;
+ if (ret != len) {
+ ret = -ENOMEM;
+ goto out_free_meta;
+ }
+
+ req->cmd_flags |= REQ_INTEGRITY;
+ return buf;
out_free_meta:
kfree(buf);
out:
@@ -65,90 +70,102 @@ static int nvme_finish_user_metadata(struct request *req, void __user *ubuf,
}
static struct request *nvme_alloc_user_request(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
- unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, void **metap, unsigned timeout, bool vec,
- blk_opf_t rq_flags, blk_mq_req_flags_t blk_flags)
+ struct nvme_command *cmd, blk_opf_t rq_flags,
+ blk_mq_req_flags_t blk_flags)
{
- bool write = nvme_is_write(cmd);
- struct nvme_ns *ns = q->queuedata;
- struct block_device *bdev = ns ? ns->disk->part0 : NULL;
struct request *req;
- struct bio *bio = NULL;
- void *meta = NULL;
- int ret;
req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags);
if (IS_ERR(req))
return req;
nvme_init_request(req, cmd);
-
- if (timeout)
- req->timeout = timeout;
nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ return req;
+}
- if (ubuffer && bufflen) {
- if (!vec)
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
- GFP_KERNEL);
- else {
- struct iovec fast_iov[UIO_FASTIOV];
- struct iovec *iov = fast_iov;
- struct iov_iter iter;
-
- ret = import_iovec(rq_data_dir(req), ubuffer, bufflen,
- UIO_FASTIOV, &iov, &iter);
- if (ret < 0)
- goto out;
- ret = blk_rq_map_user_iov(q, req, NULL, &iter,
- GFP_KERNEL);
- kfree(iov);
- }
- if (ret)
+static int nvme_map_user_request(struct request *req, u64 ubuffer,
+ unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
+ u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
+ bool vec)
+{
+ struct request_queue *q = req->q;
+ struct nvme_ns *ns = q->queuedata;
+ struct block_device *bdev = ns ? ns->disk->part0 : NULL;
+ struct bio *bio = NULL;
+ void *meta = NULL;
+ int ret;
+
+ if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
+ struct iov_iter iter;
+
+ /* fixedbufs is only for non-vectored io */
+ if (WARN_ON_ONCE(vec))
+ return -EINVAL;
+ ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ rq_data_dir(req), &iter, ioucmd);
+ if (ret < 0)
goto out;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
- if (bdev && meta_buffer && meta_len) {
- meta = nvme_add_user_metadata(bio, meta_buffer, meta_len,
- meta_seed, write);
- if (IS_ERR(meta)) {
- ret = PTR_ERR(meta);
- goto out_unmap;
- }
- req->cmd_flags |= REQ_INTEGRITY;
- *metap = meta;
+ ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
+ } else {
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
+ bufflen, GFP_KERNEL, vec, 0, 0,
+ rq_data_dir(req));
+ }
+
+ if (ret)
+ goto out;
+ bio = req->bio;
+ if (bdev)
+ bio_set_dev(bio, bdev);
+
+ if (bdev && meta_buffer && meta_len) {
+ meta = nvme_add_user_metadata(req, meta_buffer, meta_len,
+ meta_seed);
+ if (IS_ERR(meta)) {
+ ret = PTR_ERR(meta);
+ goto out_unmap;
}
+ *metap = meta;
}
- return req;
+ return ret;
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
out:
blk_mq_free_request(req);
- return ERR_PTR(ret);
+ return ret;
}
static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, void __user *ubuffer,
+ struct nvme_command *cmd, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
+ struct nvme_ctrl *ctrl;
struct request *req;
void *meta = NULL;
struct bio *bio;
+ u32 effects;
int ret;
- req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
- meta_len, meta_seed, &meta, timeout, vec, 0, 0);
+ req = nvme_alloc_user_request(q, cmd, 0, 0);
if (IS_ERR(req))
return PTR_ERR(req);
+ req->timeout = timeout;
+ if (ubuffer && bufflen) {
+ ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
+ meta_len, meta_seed, &meta, NULL, vec);
+ if (ret)
+ return ret;
+ }
+
bio = req->bio;
+ ctrl = nvme_req(req)->ctrl;
- ret = nvme_execute_passthru_rq(req);
+ ret = nvme_execute_passthru_rq(req, &effects);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -158,6 +175,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (bio)
blk_rq_unmap_user(bio);
blk_mq_free_request(req);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, cmd, ret);
+
return ret;
}
@@ -220,7 +241,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- nvme_to_user_ptr(io.addr), length,
+ io.addr, length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
false);
}
@@ -274,7 +295,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &result, timeout, false);
@@ -320,7 +341,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
timeout = msecs_to_jiffies(cmd.timeout_ms);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ cmd.addr, cmd.data_len,
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout, vec);
@@ -349,9 +370,15 @@ struct nvme_uring_cmd_pdu {
struct bio *bio;
struct request *req;
};
- void *meta; /* kernel-resident buffer */
- void __user *meta_buffer;
u32 meta_len;
+ u32 nvme_status;
+ union {
+ struct {
+ void *meta; /* kernel-resident buffer */
+ void __user *meta_buffer;
+ };
+ u64 result;
+ } u;
};
static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
@@ -360,11 +387,10 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
}
-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
{
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
struct request *req = pdu->req;
- struct bio *bio = req->bio;
int status;
u64 result;
@@ -375,27 +401,72 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
result = le64_to_cpu(nvme_req(req)->result.u64);
- if (pdu->meta)
- status = nvme_finish_user_metadata(req, pdu->meta_buffer,
- pdu->meta, pdu->meta_len, status);
- if (bio)
- blk_rq_unmap_user(bio);
+ if (pdu->meta_len)
+ status = nvme_finish_user_metadata(req, pdu->u.meta_buffer,
+ pdu->u.meta, pdu->meta_len, status);
+ if (req->bio)
+ blk_rq_unmap_user(req->bio);
blk_mq_free_request(req);
io_uring_cmd_done(ioucmd, status, result);
}
-static void nvme_uring_cmd_end_io(struct request *req, blk_status_t err)
+static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
+{
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+
+ if (pdu->bio)
+ blk_rq_unmap_user(pdu->bio);
+
+ io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ blk_status_t err)
{
struct io_uring_cmd *ioucmd = req->end_io_data;
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
- /* extract bio before reusing the same field for request */
- struct bio *bio = pdu->bio;
+ void *cookie = READ_ONCE(ioucmd->cookie);
+ req->bio = pdu->bio;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ pdu->nvme_status = -EINTR;
+ else
+ pdu->nvme_status = nvme_req(req)->status;
+ pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ return RQ_END_IO_FREE;
+}
+
+static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ blk_status_t err)
+{
+ struct io_uring_cmd *ioucmd = req->end_io_data;
+ struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ void *cookie = READ_ONCE(ioucmd->cookie);
+
+ req->bio = pdu->bio;
pdu->req = req;
- req->bio = bio;
- /* this takes care of moving rest of completion-work to task context */
- io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+
+ /*
+ * For iopoll, complete it directly.
+ * Otherwise, move the completion to task work.
+ */
+ if (cookie != NULL && blk_rq_is_poll(req))
+ nvme_uring_task_meta_cb(ioucmd);
+ else
+ io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+
+ return RQ_END_IO_NONE;
}
static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
@@ -410,6 +481,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -445,23 +517,45 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
rq_flags = REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
}
+ if (issue_flags & IO_URING_F_IOPOLL)
+ rq_flags |= REQ_POLLED;
- req = nvme_alloc_user_request(q, &c, nvme_to_user_ptr(d.addr),
- d.data_len, nvme_to_user_ptr(d.metadata),
- d.metadata_len, 0, &meta, d.timeout_ms ?
- msecs_to_jiffies(d.timeout_ms) : 0, vec, rq_flags,
- blk_flags);
+retry:
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags);
if (IS_ERR(req))
return PTR_ERR(req);
- req->end_io = nvme_uring_cmd_end_io;
- req->end_io_data = ioucmd;
+ req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0;
+
+ if (d.addr && d.data_len) {
+ ret = nvme_map_user_request(req, d.addr,
+ d.data_len, nvme_to_user_ptr(d.metadata),
+ d.metadata_len, 0, &meta, ioucmd, vec);
+ if (ret)
+ return ret;
+ }
+ if (issue_flags & IO_URING_F_IOPOLL && rq_flags & REQ_POLLED) {
+ if (unlikely(!req->bio)) {
+ /* we can't poll this, so alloc regular req instead */
+ blk_mq_free_request(req);
+ rq_flags &= ~REQ_POLLED;
+ goto retry;
+ } else {
+ WRITE_ONCE(ioucmd->cookie, req->bio);
+ req->bio->bi_opf |= REQ_POLLED;
+ }
+ }
/* to free bio on completion, as req->bio will be null at that time */
pdu->bio = req->bio;
- pdu->meta = meta;
- pdu->meta_buffer = nvme_to_user_ptr(d.metadata);
pdu->meta_len = d.metadata_len;
-
+ req->end_io_data = ioucmd;
+ if (pdu->meta_len) {
+ pdu->u.meta = meta;
+ pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata);
+ req->end_io = nvme_uring_cmd_end_io_meta;
+ } else {
+ req->end_io = nvme_uring_cmd_end_io;
+ }
blk_execute_rq_nowait(req, false);
return -EIOCBQUEUED;
}
@@ -559,9 +653,6 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static int nvme_uring_cmd_checks(unsigned int issue_flags)
{
- /* IOPOLL not supported yet */
- if (issue_flags & IO_URING_F_IOPOLL)
- return -EOPNOTSUPP;
/* NVMe passthrough requires big SQE/CQE support */
if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) !=
@@ -604,6 +695,25 @@ int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
return nvme_ns_uring_cmd(ns, ioucmd, issue_flags);
}
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct bio *bio;
+ int ret = 0;
+ struct nvme_ns *ns;
+ struct request_queue *q;
+
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ ns = container_of(file_inode(ioucmd->file)->i_cdev,
+ struct nvme_ns, cdev);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ return ret;
+}
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -685,6 +795,31 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
+
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct cdev *cdev = file_inode(ioucmd->file)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ struct bio *bio;
+ int ret = 0;
+ struct request_queue *q;
+
+ if (ns) {
+ rcu_read_lock();
+ bio = READ_ONCE(ioucmd->cookie);
+ q = ns->queue;
+ if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio
+ && bio->bi_bdev)
+ ret = bio_poll(bio, iob, poll_flags);
+ rcu_read_unlock();
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
@@ -692,6 +827,10 @@ int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
struct nvme_ctrl *ctrl = ioucmd->file->private_data;
int ret;
+ /* IOPOLL not supported yet */
+ if (issue_flags & IO_URING_F_IOPOLL)
+ return -EOPNOTSUPP;
+
ret = nvme_uring_cmd_checks(issue_flags);
if (ret)
return ret;
@@ -757,11 +896,17 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
dev_warn(ctrl->device, "resetting controller\n");
return nvme_reset_ctrl_sync(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
return nvme_reset_subsystem(ctrl);
case NVME_IOCTL_RESCAN:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
nvme_queue_scan(ctrl);
return 0;
default:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6ef497c75a16..00f2f81e20fa 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -439,6 +439,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.uring_cmd = nvme_ns_head_chr_uring_cmd,
+ .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1bdf714dcd9e..a29877217ee6 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -233,6 +233,12 @@ struct nvme_fault_inject {
#endif
};
+enum nvme_ctrl_flags {
+ NVME_CTRL_FAILFAST_EXPIRED = 0,
+ NVME_CTRL_ADMIN_Q_STOPPED = 1,
+ NVME_CTRL_STARTED_ONCE = 2,
+};
+
struct nvme_ctrl {
bool comp_seen;
enum nvme_ctrl_state state;
@@ -354,8 +360,6 @@ struct nvme_ctrl {
u16 maxcmd;
int nr_reconnects;
unsigned long flags;
-#define NVME_CTRL_FAILFAST_EXPIRED 0
-#define NVME_CTRL_ADMIN_Q_STOPPED 1
struct nvmf_ctrl_options *opts;
struct page *discard_page;
@@ -602,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {}
#endif
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
+
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
+ int ret;
+
if (!ctrl->subsystem)
return -ENOTTY;
- return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (!nvme_wait_reset(ctrl))
+ return -EBUSY;
+
+ ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
+ if (ret)
+ return ret;
+
+ return nvme_try_sched_reset(ctrl);
}
/*
@@ -712,7 +728,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
-bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -722,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
+int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
+int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int flags,
+ unsigned int cmd_size);
+void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
@@ -802,7 +825,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
-int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
@@ -821,6 +843,10 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
+int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
+int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
+ struct io_comp_batch *iob, unsigned int poll_flags);
int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
@@ -968,14 +994,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
#endif
-static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
-{
- ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
- if (IS_ERR(ctrl->connect_q))
- return PTR_ERR(ctrl->connect_q);
- return 0;
-}
-
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
return dev_to_disk(dev)->private_data;
@@ -1023,7 +1041,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
-int nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+ struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3a1c37f32f30..5b796efa325b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -226,12 +226,12 @@ struct nvme_queue {
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
- struct nvme_queue *nvmeq;
bool use_sgl;
- int aborted;
- int npages; /* In the PRP list. 0 means small pool in use */
- dma_addr_t first_dma;
+ bool aborted;
+ s8 nr_allocations; /* PRP list pool allocations. 0 means small
+ pool in use */
unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
};
@@ -430,11 +430,6 @@ static int nvme_pci_init_request(struct blk_mq_tag_set *set,
{
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = &dev->queues[queue_idx];
-
- BUG_ON(!nvmeq);
- iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl;
nvme_req(req)->cmd = &iod->cmd;
@@ -450,7 +445,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
return 0;
}
-static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
+static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
int i, qoff, offset;
@@ -477,8 +472,6 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
qoff += map->nr_queues;
offset += map->nr_queues;
}
-
- return 0;
}
/*
@@ -528,7 +521,7 @@ static void **nvme_pci_iod_list(struct request *req)
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
@@ -536,7 +529,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
if (!nvme_ctrl_sgl_supported(&dev->ctrl))
return false;
- if (!iod->nvmeq->qid)
+ if (!nvmeq->qid)
return false;
if (!sgl_threshold || avg_seg_size < sgl_threshold)
return false;
@@ -550,7 +543,7 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->npages; i++) {
+ for (i = 0; i < iod->nr_allocations; i++) {
__le64 *prp_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
@@ -566,7 +559,7 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
dma_addr_t dma_addr = iod->first_dma;
int i;
- for (i = 0; i < iod->npages; i++) {
+ for (i = 0; i < iod->nr_allocations; i++) {
struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
@@ -589,7 +582,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- if (iod->npages == 0)
+ if (iod->nr_allocations == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
else if (iod->use_sgl)
@@ -651,15 +644,15 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
list[0] = prp_list;
@@ -671,7 +664,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
- list[iod->npages++] = prp_list;
+ list[iod->nr_allocations++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
@@ -746,15 +739,15 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
pool = dev->prp_small_pool;
- iod->npages = 0;
+ iod->nr_allocations = 0;
} else {
pool = dev->prp_page_pool;
- iod->npages = 1;
+ iod->nr_allocations = 1;
}
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list) {
- iod->npages = -1;
+ iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
@@ -773,7 +766,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
goto free_sgls;
i = 0;
- nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+ nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list;
sg_list[i++] = *link;
nvme_pci_sgl_set_seg(link, sgl_dma, entries);
}
@@ -833,6 +826,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -840,7 +834,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
- if (iod->nvmeq->qid && sgl_threshold &&
+ if (nvmeq->qid && sgl_threshold &&
nvme_ctrl_sgl_supported(&dev->ctrl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
@@ -898,8 +892,8 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
- iod->aborted = 0;
- iod->npages = -1;
+ iod->aborted = false;
+ iod->nr_allocations = -1;
iod->sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
@@ -1019,12 +1013,16 @@ static void nvme_queue_rqs(struct request **rqlist)
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_dev *dev = iod->nvmeq->dev;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
+ if (blk_integrity_rq(req)) {
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- if (blk_integrity_rq(req))
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ }
+
if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
}
@@ -1270,15 +1268,15 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}
-static void abort_endio(struct request *req, blk_status_t error)
+static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
dev_warn(nvmeq->dev->ctrl.device,
"Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1335,7 +1333,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
static enum blk_eh_timer_return nvme_timeout(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = iod->nvmeq;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
@@ -1416,7 +1414,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
atomic_inc(&dev->ctrl.abort_limit);
return BLK_EH_RESET_TIMER;
}
- iod->aborted = 1;
+ iod->aborted = true;
cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = nvme_cid(req);
@@ -2450,22 +2448,25 @@ out_unlock:
return result;
}
-static void nvme_del_queue_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
complete(&nvmeq->delete_done);
+ return RQ_END_IO_NONE;
}
-static void nvme_del_cq_end(struct request *req, blk_status_t error)
+static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
+ blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
if (error)
set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
- nvme_del_queue_end(req, error);
+ return nvme_del_queue_end(req, error);
}
static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
@@ -2529,9 +2530,11 @@ static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
set->ops = &nvme_mq_ops;
set->nr_hw_queues = dev->online_queues - 1;
- set->nr_maps = 2; /* default + read */
+ set->nr_maps = 1;
+ if (dev->io_queues[HCTX_TYPE_READ])
+ set->nr_maps = 2;
if (dev->io_queues[HCTX_TYPE_POLL])
- set->nr_maps++;
+ set->nr_maps = 3;
set->timeout = NVME_IO_TIMEOUT;
set->numa_node = dev->ctrl.numa_node;
set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
@@ -2834,6 +2837,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_admin_queue(&dev->ctrl);
}
+ dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
+
/*
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
@@ -2846,7 +2851,6 @@ static void nvme_reset_work(struct work_struct *work)
* Don't limit the IOMMU merged segment size.
*/
dma_set_max_seg_size(dev->dev, 0xffffffff);
- dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
mutex_unlock(&dev->shutdown_lock);
@@ -3470,6 +3474,10 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */
+ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
@@ -3517,6 +3525,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
@@ -3563,6 +3573,8 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+ BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) >
+ S8_MAX);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3100643be299..5ad0ab2853a4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -295,7 +295,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
@@ -320,7 +320,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -332,7 +332,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -696,11 +696,12 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
return ret;
}
-static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
+static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
+ int first, int last)
{
int i, ret = 0;
- for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_rdma_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -709,7 +710,7 @@ static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_rdma_stop_queue(&ctrl->queues[i]);
return ret;
}
@@ -787,64 +788,21 @@ out_free_queues:
return ret;
}
-static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
+static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
- int ret;
+ unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- set->flags = BLK_MQ_F_NO_SCHED;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- ctrl->ctrl.admin_tagset = set;
- return ret;
-}
-
-static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->tag_set;
- int ret;
+ if (ctrl->max_integrity_segments)
+ cmd_size += sizeof(struct nvme_rdma_sgl) +
+ NVME_RDMA_METADATA_SGL_SIZE;
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE;
- set->cmd_size = sizeof(struct nvme_rdma_request) +
- NVME_RDMA_DATA_SGL_SIZE;
- if (nctrl->max_integrity_segments)
- set->cmd_size += sizeof(struct nvme_rdma_sgl) +
- NVME_RDMA_METADATA_SGL_SIZE;
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- ctrl->ctrl.tagset = set;
- return ret;
+ return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
+ &nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
}
-static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
- bool remove)
+static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
{
- if (remove) {
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
- }
if (ctrl->async_event_sqe.data) {
cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -886,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue;
if (new) {
- error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
+ &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
+ BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_rdma_request) +
+ NVME_RDMA_DATA_SGL_SIZE);
if (error)
goto out_free_async_qe;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_rdma_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_remove_admin_tag_set;
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
@@ -932,15 +883,9 @@ out_quiesce_queue:
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
-out_cleanup_queue:
- if (new)
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
+out_remove_admin_tag_set:
if (new)
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- if (new)
- blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_async_qe:
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -952,19 +897,9 @@ out_free_queue:
return error;
}
-static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
- bool remove)
-{
- if (remove) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
- }
- nvme_rdma_free_io_queues(ctrl);
-}
-
static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_rdma_alloc_io_queues(ctrl);
if (ret)
@@ -974,15 +909,17 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
if (ret)
goto out_free_io_queues;
-
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tag_set;
}
- ret = nvme_rdma_start_io_queues(ctrl);
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
+ ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
if (!new) {
nvme_start_queues(&ctrl->ctrl);
@@ -1000,19 +937,25 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
nvme_unfreeze(&ctrl->ctrl);
}
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
+ ctrl->tag_set.nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
return 0;
out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
-out_cleanup_connect_q:
+out_cleanup_tagset:
nvme_cancel_tagset(&ctrl->ctrl);
if (new)
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tag_set:
- if (new)
- blk_mq_free_tag_set(ctrl->ctrl.tagset);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
@@ -1025,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
- if (remove)
+ if (remove) {
nvme_start_admin_queue(&ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, remove);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_destroy_admin_queue(ctrl);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
@@ -1039,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
- if (remove)
+ if (remove) {
nvme_start_queues(&ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, remove);
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ }
+ nvme_rdma_free_io_queues(ctrl);
}
}
@@ -1163,14 +1110,18 @@ destroy_io:
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
nvme_cancel_tagset(&ctrl->ctrl);
- nvme_rdma_destroy_io_queues(ctrl, new);
+ if (new)
+ nvme_remove_io_tag_set(&ctrl->ctrl);
+ nvme_rdma_free_io_queues(ctrl);
}
destroy_admin:
nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
- nvme_rdma_destroy_admin_queue(ctrl, new);
+ if (new)
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl);
return ret;
}
@@ -2188,9 +2139,9 @@ static void nvme_rdma_complete_rq(struct request *rq)
nvme_complete_rq(rq);
}
-static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
+static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -2231,8 +2182,6 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 044da18c06f5..93e2e313fa70 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
- bool more_requests;
/* recv state */
void *pdu;
@@ -134,7 +133,6 @@ struct nvme_tcp_queue {
/* send state */
struct nvme_tcp_request *request;
- int queue_size;
u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
@@ -320,7 +318,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
+ !llist_empty(&queue->req_list);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -339,9 +337,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
*/
if (queue->io_cpu == raw_smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
- queue->more_requests = !last;
nvme_tcp_send_all(queue);
- queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
}
@@ -466,7 +462,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_cmd_pdu *pdu;
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
@@ -490,7 +486,7 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
hctx->driver_data = queue;
@@ -500,7 +496,7 @@ static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
struct nvme_tcp_queue *queue = &ctrl->queues[0];
hctx->driver_data = queue;
@@ -1229,7 +1225,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
else if (unlikely(result < 0))
return;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
@@ -1479,8 +1475,7 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
-static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
- int qid, size_t queue_size)
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
{
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1492,7 +1487,6 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
INIT_LIST_HEAD(&queue->send_list);
mutex_init(&queue->send_mutex);
INIT_WORK(&queue->io_work, nvme_tcp_io_work);
- queue->queue_size = queue_size;
if (qid > 0)
queue->cmnd_capsule_len = nctrl->ioccsz * 16;
@@ -1690,51 +1684,6 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
return ret;
}
-static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
- int ret;
-
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_admin_mq_ops;
- set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = 1;
- set->timeout = NVME_ADMIN_TIMEOUT;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- nctrl->admin_tagset = set;
- return ret;
-}
-
-static int nvme_tcp_alloc_tag_set(struct nvme_ctrl *nctrl)
-{
- struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
- struct blk_mq_tag_set *set = &ctrl->tag_set;
- int ret;
-
- memset(set, 0, sizeof(*set));
- set->ops = &nvme_tcp_mq_ops;
- set->queue_depth = nctrl->sqsize + 1;
- set->reserved_tags = NVMF_RESERVED_TAGS;
- set->numa_node = nctrl->numa_node;
- set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- set->cmd_size = sizeof(struct nvme_tcp_request);
- set->driver_data = ctrl;
- set->nr_hw_queues = nctrl->queue_count - 1;
- set->timeout = NVME_IO_TIMEOUT;
- set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
- ret = blk_mq_alloc_tag_set(set);
- if (!ret)
- nctrl->tagset = set;
- return ret;
-}
-
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
@@ -1762,11 +1711,12 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
nvme_tcp_stop_queue(ctrl, i);
}
-static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
+ int first, int last)
{
int i, ret;
- for (i = 1; i < ctrl->queue_count; i++) {
+ for (i = first; i < last; i++) {
ret = nvme_tcp_start_queue(ctrl, i);
if (ret)
goto out_stop_queues;
@@ -1775,7 +1725,7 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
return 0;
out_stop_queues:
- for (i--; i >= 1; i--)
+ for (i--; i >= first; i--)
nvme_tcp_stop_queue(ctrl, i);
return ret;
}
@@ -1784,7 +1734,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
{
int ret;
- ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+ ret = nvme_tcp_alloc_queue(ctrl, 0);
if (ret)
return ret;
@@ -1804,7 +1754,7 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_tcp_alloc_queue(ctrl, i, ctrl->sqsize + 1);
+ ret = nvme_tcp_alloc_queue(ctrl, i);
if (ret)
goto out_free_queues;
}
@@ -1892,32 +1842,35 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_io_queues(ctrl);
- if (remove) {
- blk_mq_destroy_queue(ctrl->connect_q);
- blk_mq_free_tag_set(ctrl->tagset);
- }
+ if (remove)
+ nvme_remove_io_tag_set(ctrl);
nvme_tcp_free_io_queues(ctrl);
}
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
{
- int ret;
+ int ret, nr_queues;
ret = nvme_tcp_alloc_io_queues(ctrl);
if (ret)
return ret;
if (new) {
- ret = nvme_tcp_alloc_tag_set(ctrl);
+ ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
+ &nvme_tcp_mq_ops,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
if (ret)
goto out_free_io_queues;
-
- ret = nvme_ctrl_init_connect_q(ctrl);
- if (ret)
- goto out_free_tag_set;
}
- ret = nvme_tcp_start_io_queues(ctrl);
+ /*
+ * Only start IO queues for which we have allocated the tagset
+ * and limitted it to the available queues. On reconnects, the
+ * queue number might have changed.
+ */
+ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
+ ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
if (ret)
goto out_cleanup_connect_q;
@@ -1937,6 +1890,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
nvme_unfreeze(ctrl);
}
+ /*
+ * If the number of queues has increased (reconnect case)
+ * start all new queues now.
+ */
+ ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
+ ctrl->tagset->nr_hw_queues + 1);
+ if (ret)
+ goto out_wait_freeze_timed_out;
+
return 0;
out_wait_freeze_timed_out:
@@ -1946,10 +1908,7 @@ out_wait_freeze_timed_out:
out_cleanup_connect_q:
nvme_cancel_tagset(ctrl);
if (new)
- blk_mq_destroy_queue(ctrl->connect_q);
-out_free_tag_set:
- if (new)
- blk_mq_free_tag_set(ctrl->tagset);
+ nvme_remove_io_tag_set(ctrl);
out_free_io_queues:
nvme_tcp_free_io_queues(ctrl);
return ret;
@@ -1958,11 +1917,8 @@ out_free_io_queues:
static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
{
nvme_tcp_stop_queue(ctrl, 0);
- if (remove) {
- blk_mq_destroy_queue(ctrl->admin_q);
- blk_mq_destroy_queue(ctrl->fabrics_q);
- blk_mq_free_tag_set(ctrl->admin_tagset);
- }
+ if (remove)
+ nvme_remove_admin_tag_set(ctrl);
nvme_tcp_free_admin_queue(ctrl);
}
@@ -1975,26 +1931,17 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return error;
if (new) {
- error = nvme_tcp_alloc_admin_tag_set(ctrl);
+ error = nvme_alloc_admin_tag_set(ctrl,
+ &to_tcp_ctrl(ctrl)->admin_tag_set,
+ &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
+ sizeof(struct nvme_tcp_request));
if (error)
goto out_free_queue;
-
- ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->fabrics_q)) {
- error = PTR_ERR(ctrl->fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
- if (IS_ERR(ctrl->admin_q)) {
- error = PTR_ERR(ctrl->admin_q);
- goto out_cleanup_fabrics_q;
- }
}
error = nvme_tcp_start_queue(ctrl, 0);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
error = nvme_enable_ctrl(ctrl);
if (error)
@@ -2014,15 +1961,9 @@ out_quiesce_queue:
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
-out_cleanup_queue:
- if (new)
- blk_mq_destroy_queue(ctrl->admin_q);
-out_cleanup_fabrics_q:
+out_cleanup_tagset:
if (new)
- blk_mq_destroy_queue(ctrl->fabrics_q);
-out_free_tagset:
- if (new)
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ nvme_remove_admin_tag_set(ctrl);
out_free_queue:
nvme_tcp_free_admin_queue(ctrl);
return error;
@@ -2471,9 +2412,9 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
- struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
@@ -2512,8 +2453,6 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT],
ctrl->io_queues[HCTX_TYPE_READ],
ctrl->io_queues[HCTX_TYPE_POLL]);
-
- return 0;
}
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
@@ -2532,6 +2471,25 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
return queue->nr_cqe;
}
+static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+ struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
+ struct sockaddr_storage src_addr;
+ int ret, len;
+
+ len = nvmf_get_address(ctrl, buf, size);
+
+ ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ if (ret > 0) {
+ if (len > 0)
+ len--; /* strip trailing newline */
+ len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ (len) ? "," : "", &src_addr);
+ }
+
+ return len;
+}
+
static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.commit_rqs = nvme_tcp_commit_rqs,
@@ -2563,7 +2521,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.free_ctrl = nvme_tcp_free_ctrl,
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
- .get_address = nvmf_get_address,
+ .get_address = nvme_tcp_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
};
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index fc8a957fad0a..c8a061ce3ee5 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -449,7 +449,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/*
* Max command capsule size is sqe + in-capsule data size.
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index cf690df34775..c4113b43dbfe 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -196,6 +196,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
if (IS_ERR(ctrl->ctrl_key)) {
ret = PTR_ERR(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
+ goto out_free_hash;
}
pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
ctrl->ctrl_key->hash > 0 ?
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2bcd60758919..e34a2896fedb 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1281,6 +1281,34 @@ static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
#endif
+static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
+}
+
+static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
+ const char *page, size_t cnt)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ u16 qid_max;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+
+ if (sscanf(page, "%hu\n", &qid_max) != 1)
+ return -EINVAL;
+
+ if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
+ return -EINVAL;
+
+ down_write(&nvmet_config_sem);
+ to_subsys(item)->max_qid = qid_max;
+ up_write(&nvmet_config_sem);
+ return cnt;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
+
static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_allow_any_host,
&nvmet_subsys_attr_attr_version,
@@ -1288,6 +1316,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_qid_max,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a1345790005f..14677145bbba 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
@@ -830,6 +832,7 @@ int nvmet_sq_init(struct nvmet_sq *sq)
}
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
+ nvmet_auth_sq_init(sq);
return 0;
}
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c2162eef8ce1..668d257fa986 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -292,7 +292,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
- strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
+ strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index ebdf9aa81041..7970a7640e58 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -23,17 +23,12 @@ static void nvmet_auth_expired_work(struct work_struct *work)
sq->dhchap_tid = -1;
}
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
+void nvmet_auth_sq_init(struct nvmet_sq *sq)
{
- u32 result = le32_to_cpu(req->cqe->result.u32);
-
/* Initialize in-band authentication */
- INIT_DELAYED_WORK(&req->sq->auth_expired_work,
- nvmet_auth_expired_work);
- req->sq->authenticated = false;
- req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
- result |= (u32)NVME_CONNECT_AUTHREQ_ATR << 16;
- req->cqe->result.u32 = cpu_to_le32(result);
+ INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
+ sq->authenticated = false;
+ sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
}
static u16 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
@@ -177,7 +172,7 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
return 0;
}
-static u16 nvmet_auth_failure2(struct nvmet_req *req, void *d)
+static u16 nvmet_auth_failure2(void *d)
{
struct nvmf_auth_dhchap_failure_data *data = d;
@@ -229,10 +224,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
}
status = nvmet_copy_from_sgl(req, 0, d, tl);
- if (status) {
- kfree(d);
- goto done;
- }
+ if (status)
+ goto done_kfree;
data = d;
pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
@@ -310,7 +303,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
goto done_kfree;
break;
case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
- status = nvmet_auth_failure2(req, d);
+ status = nvmet_auth_failure2(d);
if (status) {
pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
ctrl->cntlid, req->sq->qid, status);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index f91a56180d3d..43b5bd8bb6a5 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -198,6 +198,12 @@ err:
return ret;
}
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+{
+ return (u32)ctrl->cntlid |
+ (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+}
+
static void nvmet_execute_admin_connect(struct nvmet_req *req)
{
struct nvmf_connect_command *c = &req->cmd->connect;
@@ -269,10 +275,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
@@ -328,14 +331,8 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out_ctrl_put;
- /* pass back cntlid for successful completion */
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
-
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
- if (nvmet_has_auth(ctrl))
- nvmet_init_auth(ctrl, req);
-
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
out:
kfree(d);
complete:
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 2dc1c1035626..c2d6cea0236b 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -12,11 +12,9 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
- const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of logical blocks per physical block. */
- const u32 lpp = ql->physical_block_size / ql->logical_block_size;
/* Logical blocks per physical block, 0's based. */
- const __le16 lpp0b = to0based(lpp);
+ const __le16 lpp0b = to0based(bdev_physical_block_size(bdev) /
+ bdev_logical_block_size(bdev));
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -42,11 +40,12 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
- id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
+ id->npdg = to0based(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
/* NPDG = Namespace Preferred Deallocate Alignment */
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
- id->nows = to0based(ql->io_opt / ql->logical_block_size);
+ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -334,6 +333,11 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
{
struct bio *bio = &req->b.inline_bio;
+ if (!bdev_write_cache(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_SUCCESS);
+ return;
+ }
+
if (!nvmet_check_transfer_len(req, 0))
return;
@@ -347,6 +351,9 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req)
{
+ if (!bdev_write_cache(req->ns->bdev))
+ return 0;
+
if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9750a7fca268..b45fe3adf015 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -204,7 +204,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
struct request *req, unsigned int hctx_idx,
unsigned int numa_node)
{
- struct nvme_loop_ctrl *ctrl = set->driver_data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
nvme_req(req)->ctrl = &ctrl->ctrl;
@@ -218,7 +218,7 @@ static struct lock_class_key loop_hctx_fq_lock_key;
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
@@ -238,7 +238,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
- struct nvme_loop_ctrl *ctrl = data;
+ struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
struct nvme_loop_queue *queue = &ctrl->queues[0];
BUG_ON(hctx_idx != 0);
@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex);
- if (nctrl->tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->admin_tag_set.driver_data = ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error)
return error;
ctrl->ctrl.queue_count = 1;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
goto out_free_sq;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
-
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_queue:
+out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error;
@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
return ret;
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->tag_set.driver_data = ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)
goto out_destroy_queues;
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tagset;
-
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_connect_q:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;
@@ -601,7 +556,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = -ENOMEM;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
@@ -621,6 +575,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
}
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
if (opts->nr_io_queues) {
ret = nvme_loop_create_io_queues(ctrl);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ffeeb0a1c49..dfe3894205aa 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -704,7 +704,7 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
-void nvmet_init_auth(struct nvmet_ctrl *ctrl, struct nvmet_req *req);
+void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
@@ -726,8 +726,9 @@ static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
{
return 0;
}
-static inline void nvmet_init_auth(struct nvmet_ctrl *ctrl,
- struct nvmet_req *req) {};
+static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
+{
+}
static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
static inline bool nvmet_check_auth_status(struct nvmet_req *req)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 6f39a29828b1..79af5140af8b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -215,9 +215,11 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
+ struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ u32 effects;
int status;
- status = nvme_execute_passthru_rq(rq);
+ status = nvme_execute_passthru_rq(rq, &effects);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
@@ -238,16 +240,20 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
blk_mq_free_request(rq);
+
+ if (effects)
+ nvme_passthru_end(ctrl, effects, req->cmd, status);
}
-static void nvmet_passthru_req_done(struct request *rq,
- blk_status_t blk_status)
+static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
+ blk_status_t blk_status)
{
struct nvmet_req *req = rq->end_io_data;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, nvme_req(rq)->status);
blk_mq_free_request(rq);
+ return RQ_END_IO_NONE;
}
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc3b4dc8fe08..6c1476e086ef 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -77,9 +77,8 @@ struct nvmet_tcp_cmd {
u32 pdu_len;
u32 pdu_recv;
int sg_idx;
- int nr_mapped;
struct msghdr recv_msg;
- struct kvec *iov;
+ struct bio_vec *iov;
u32 flags;
struct list_head entry;
@@ -165,9 +164,7 @@ static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -301,35 +298,21 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
{
- WARN_ON(unlikely(cmd->nr_mapped > 0));
-
kfree(cmd->iov);
sgl_free(cmd->req.sg);
cmd->iov = NULL;
cmd->req.sg = NULL;
}
-static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
-{
- struct scatterlist *sg;
- int i;
-
- sg = &cmd->req.sg[cmd->sg_idx];
-
- for (i = 0; i < cmd->nr_mapped; i++)
- kunmap(sg_page(&sg[i]));
-
- cmd->nr_mapped = 0;
-}
-
-static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
- struct kvec *iov = cmd->iov;
+ struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
+ int nr_pages;
length = cmd->pdu_len;
- cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
@@ -338,8 +321,9 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
- iov->iov_len = iov_len;
+ iov->bv_page = sg_page(sg);
+ iov->bv_len = sg->length;
+ iov->bv_offset = sg->offset + sg_offset;
length -= iov_len;
sg = sg_next(sg);
@@ -347,8 +331,8 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
sg_offset = 0;
}
- iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
- cmd->nr_mapped, cmd->pdu_len);
+ iov_iter_bvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
+ nr_pages, cmd->pdu_len);
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -926,7 +910,7 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
}
@@ -935,10 +919,17 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- if (likely(queue->nr_cmds))
+ if (likely(queue->nr_cmds)) {
+ if (unlikely(data->ttag >= queue->nr_cmds)) {
+ pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
+ queue->idx, data->ttag, queue->nr_cmds);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
cmd = &queue->cmds[data->ttag];
- else
+ } else {
cmd = &queue->connect;
+ }
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
@@ -952,7 +943,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len = le32_to_cpu(data->data_length);
cmd->pdu_recv = 0;
- nvmet_tcp_map_pdu_iovec(cmd);
+ nvmet_tcp_build_pdu_iovec(cmd);
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -976,6 +967,13 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
return nvmet_tcp_handle_icreq(queue);
}
+ if (unlikely(hdr->type == nvme_tcp_icreq)) {
+ pr_err("queue %d: received icreq pdu in state %d\n",
+ queue->idx, queue->state);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+
if (hdr->type == nvme_tcp_h2c_data) {
ret = nvmet_tcp_handle_h2c_data_pdu(queue);
if (unlikely(ret))
@@ -1021,7 +1019,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_map_pdu_iovec(queue->cmd);
+ nvmet_tcp_build_pdu_iovec(queue->cmd);
return 0;
}
/* send back R2T */
@@ -1141,7 +1139,6 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
- nvmet_tcp_unmap_pdu_iovec(cmd);
if (queue->data_digest) {
nvmet_tcp_prep_recv_ddgst(cmd);
return 0;
@@ -1179,7 +1176,8 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
queue->idx, cmd->req.cmd->common.command_id,
queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
le32_to_cpu(cmd->exp_ddgst));
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_free_cmd_buffers(cmd);
nvmet_tcp_fatal_error(queue);
ret = -EPROTO;
goto out;
@@ -1408,13 +1406,6 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
write_unlock_bh(&sock->sk->sk_callback_lock);
}
-static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
-{
- nvmet_req_uninit(&cmd->req);
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
-}
-
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
{
struct nvmet_tcp_cmd *cmd = queue->cmds;
@@ -1423,17 +1414,28 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
nvmet_req_uninit(&cmd->req);
-
- nvmet_tcp_unmap_pdu_iovec(cmd);
- nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
/* failed in connect */
- nvmet_tcp_finish_cmd(&queue->connect);
+ nvmet_req_uninit(&queue->connect.req);
}
}
+static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_free_cmd_buffers(cmd);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
+ nvmet_tcp_free_cmd_buffers(&queue->connect);
+}
+
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
struct page *page;
@@ -1452,6 +1454,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
+ nvmet_tcp_free_cmd_data_in_buffers(queue);
sock_release(queue->sock);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
@@ -1506,6 +1509,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index c7ef69f29fe4..1254cf57e008 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
struct nvme_id_ns_zns *id_zns;
u64 zsze;
u16 status;
+ u32 mar, mor;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+ mor = bdev_max_open_zones(req->ns->bdev);
+ if (!mor)
+ mor = U32_MAX;
+ else
+ mor--;
+ id_zns->mor = cpu_to_le32(mor);
+
+ mar = bdev_max_active_zones(req->ns->bdev);
+ if (!mar)
+ mar = U32_MAX;
+ else
+ mar--;
+ id_zns->mar = cpu_to_le32(mar);
done:
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
@@ -387,7 +400,6 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
{
struct block_device *bdev = req->ns->bdev;
unsigned int nr_zones = bdev_nr_zones(bdev);
- struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = NULL;
sector_t sector = 0;
int ret;
@@ -396,7 +408,7 @@ static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req)
};
d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)),
- GFP_NOIO, q->node);
+ GFP_NOIO, bdev->bd_disk->node_id);
if (!d.zbitmap) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index d72d879a6d34..ec8a49c04003 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -21,6 +21,40 @@ config NVMEM_SYSFS
This interface is mostly used by userspace applications to
read/write directly into nvmem.
+# Devices
+
+config NVMEM_APPLE_EFUSES
+ tristate "Apple eFuse support"
+ depends on ARCH_APPLE || COMPILE_TEST
+ default ARCH_APPLE
+ help
+ Say y here to enable support for reading eFuses on Apple SoCs
+ such as the M1. These are e.g. used to store factory programmed
+ calibration data required for the PCIe or the USB-C PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-apple-efuses.
+
+config NVMEM_BCM_OCOTP
+ tristate "Broadcom On-Chip OTP Controller support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM
+ default ARCH_BCM_IPROC
+ help
+ Say y here to enable read/write access to the Broadcom OTP
+ controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-bcm-ocotp.
+
+config NVMEM_BRCM_NVRAM
+ tristate "Broadcom's NVRAM support"
+ depends on ARCH_BCM_5301X || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides support for Broadcom's NVRAM that can be accessed
+ using I/O mapping.
+
config NVMEM_IMX_IIM
tristate "i.MX IC Identification Module support"
depends on ARCH_MXC || COMPILE_TEST
@@ -52,7 +86,7 @@ config NVMEM_IMX_OCOTP_SCU
This is a driver for the SCU On-Chip OTP Controller (OCOTP)
available on i.MX8 SoCs.
-config JZ4780_EFUSE
+config NVMEM_JZ4780_EFUSE
tristate "JZ4780 EFUSE Memory Support"
depends on MACH_INGENIC || COMPILE_TEST
depends on HAS_IOMEM
@@ -64,6 +98,27 @@ config JZ4780_EFUSE
To compile this driver as a module, choose M here: the module
will be called nvmem_jz4780_efuse.
+config NVMEM_LAN9662_OTPC
+ tristate "Microchip LAN9662 OTP controller support"
+ depends on SOC_LAN966 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver enables the OTP controller available on Microchip LAN9662
+ SoCs. It controls the access to the OTP memory connected to it.
+
+config NVMEM_LAYERSCAPE_SFP
+ tristate "Layerscape SFP (Security Fuse Processor) support"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ This driver provides support to read the eFuses on Freescale
+ Layerscape SoC's. For example, the vendor provides a per part
+ unique ID there.
+
+ This driver can also be built as a module. If so, the module
+ will be called layerscape-sfp.
+
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -84,19 +139,34 @@ config NVMEM_LPC18XX_OTP
To compile this driver as a module, choose M here: the module
will be called nvmem_lpc18xx_otp.
-config NVMEM_MXS_OCOTP
- tristate "Freescale MXS On-Chip OTP Memory Support"
- depends on ARCH_MXS || COMPILE_TEST
- depends on HAS_IOMEM
+config NVMEM_MESON_EFUSE
+ tristate "Amlogic Meson GX eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
- If you say Y here, you will get readonly access to the
- One Time Programmable memory pages that are stored
- on the Freescale i.MX23/i.MX28 processor.
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
- will be called nvmem-mxs-ocotp.
+ will be called nvmem_meson_efuse.
+
+config NVMEM_MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
-config MTK_EFUSE
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_MICROCHIP_OTPC
+ tristate "Microchip OTPC support"
+ depends on ARCH_AT91 || COMPILE_TEST
+ help
+ This driver enable the OTP controller available on Microchip SAMA7G5
+ SoCs. It controlls the access to the OTP memory connected to it.
+
+config NVMEM_MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on HAS_IOMEM
@@ -107,12 +177,17 @@ config MTK_EFUSE
This driver can also be built as a module. If so, the module
will be called efuse-mtk.
-config MICROCHIP_OTPC
- tristate "Microchip OTPC support"
- depends on ARCH_AT91 || COMPILE_TEST
+config NVMEM_MXS_OCOTP
+ tristate "Freescale MXS On-Chip OTP Memory Support"
+ depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM
help
- This driver enable the OTP controller available on Microchip SAMA7G5
- SoCs. It controlls the access to the OTP memory connected to it.
+ If you say Y here, you will get readonly access to the
+ One Time Programmable memory pages that are stored
+ on the Freescale i.MX23/i.MX28 processor.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-mxs-ocotp.
config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support"
@@ -126,7 +201,7 @@ config NVMEM_NINTENDO_OTP
This driver can also be built as a module. If so, the module
will be called nvmem-nintendo-otp.
-config QCOM_QFPROM
+config NVMEM_QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
@@ -137,15 +212,23 @@ config QCOM_QFPROM
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
-config NVMEM_SPMI_SDAM
- tristate "SPMI SDAM Support"
- depends on SPMI
+config NVMEM_RAVE_SP_EEPROM
+ tristate "Rave SP EEPROM Support"
+ depends on RAVE_SP_CORE
help
- This driver supports the Shared Direct Access Memory Module on
- Qualcomm Technologies, Inc. PMICs. It provides the clients
- an interface to read/write to the SDAM module's shared memory.
+ Say y here to enable Rave SP EEPROM support.
+
+config NVMEM_RMEM
+ tristate "Reserved Memory Based Driver Support"
+ depends on HAS_IOMEM
+ help
+ This driver maps reserved memory into an nvmem device. It might be
+ useful to expose information left by firmware in memory.
-config ROCKCHIP_EFUSE
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-rmem.
+
+config NVMEM_ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
@@ -156,7 +239,7 @@ config ROCKCHIP_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_efuse.
-config ROCKCHIP_OTP
+config NVMEM_ROCKCHIP_OTP
tristate "Rockchip OTP controller support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
@@ -167,17 +250,45 @@ config ROCKCHIP_OTP
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_otp.
-config NVMEM_BCM_OCOTP
- tristate "Broadcom On-Chip OTP Controller support"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+config NVMEM_SC27XX_EFUSE
+ tristate "Spreadtrum SC27XX eFuse Support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
depends on HAS_IOMEM
- default ARCH_BCM_IPROC
help
- Say y here to enable read/write access to the Broadcom OTP
- controller.
+ This is a simple driver to dump specified values of Spreadtrum
+ SC27XX PMICs from eFuse.
This driver can also be built as a module. If so, the module
- will be called nvmem-bcm-ocotp.
+ will be called nvmem-sc27xx-efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
+config NVMEM_SPMI_SDAM
+ tristate "SPMI SDAM Support"
+ depends on SPMI
+ help
+ This driver supports the Shared Direct Access Memory Module on
+ Qualcomm Technologies, Inc. PMICs. It provides the clients
+ an interface to read/write to the SDAM module's shared memory.
+
+config NVMEM_SPRD_EFUSE
+ tristate "Spreadtrum SoC eFuse Support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of Spreadtrum
+ SoCs from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sprd-efuse.
config NVMEM_STM32_ROMEM
tristate "STMicroelectronics STM32 factory-programmed memory support"
@@ -189,6 +300,18 @@ config NVMEM_STM32_ROMEM
This driver can also be built as a module. If so, the module
will be called nvmem-stm32-romem.
+config NVMEM_SUNPLUS_OCOTP
+ tristate "Sunplus SoC OTP support"
+ depends on SOC_SP7021 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a driver for the On-chip OTP controller (OCOTP) available
+ on Sunplus SoCs. It provides access to 128 bytes of one-time
+ programmable eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-sunplus-ocotp.
+
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
@@ -199,7 +322,20 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
-config UNIPHIER_EFUSE
+config NVMEM_U_BOOT_ENV
+ tristate "U-Boot environment variables support"
+ depends on OF && MTD
+ select CRC32
+ help
+ U-Boot stores its setup as environment variables. This driver adds
+ support for verifying & exporting such data. It also exposes variables
+ as NVMEM cells so they can be referenced by other drivers.
+
+ Currently this drivers works only with env variables on top of MTD.
+
+ If compiled as module it will be called nvmem_u-boot-env.
+
+config NVMEM_UNIPHIER_EFUSE
tristate "UniPhier SoCs eFuse support"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on HAS_IOMEM
@@ -221,53 +357,6 @@ config NVMEM_VF610_OCOTP
This driver can also be build as a module. If so, the module will
be called nvmem-vf610-ocotp.
-config MESON_EFUSE
- tristate "Amlogic Meson GX eFuse Support"
- depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
- help
- This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson GX SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem_meson_efuse.
-
-config MESON_MX_EFUSE
- tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
- depends on ARCH_MESON || COMPILE_TEST
- help
- This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson6, Meson8 and Meson8b SoCs.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem_meson_mx_efuse.
-
-config NVMEM_SNVS_LPGPR
- tristate "Support for Low Power General Purpose Register"
- depends on ARCH_MXC || COMPILE_TEST
- help
- This is a driver for Low Power General Purpose Register (LPGPR) available on
- i.MX6 and i.MX7 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-snvs-lpgpr.
-
-config RAVE_SP_EEPROM
- tristate "Rave SP EEPROM Support"
- depends on RAVE_SP_CORE
- help
- Say y here to enable Rave SP EEPROM support.
-
-config SC27XX_EFUSE
- tristate "Spreadtrum SC27XX eFuse Support"
- depends on MFD_SC27XX_PMIC || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a simple driver to dump specified values of Spreadtrum
- SC27XX PMICs from eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sc27xx-efuse.
-
config NVMEM_ZYNQMP
bool "Xilinx ZYNQMP SoC nvmem firmware support"
depends on ARCH_ZYNQMP
@@ -278,70 +367,4 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
-config SPRD_EFUSE
- tristate "Spreadtrum SoC eFuse Support"
- depends on ARCH_SPRD || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a simple driver to dump specified values of Spreadtrum
- SoCs from eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sprd-efuse.
-
-config NVMEM_RMEM
- tristate "Reserved Memory Based Driver Support"
- depends on HAS_IOMEM
- help
- This driver maps reserved memory into an nvmem device. It might be
- useful to expose information left by firmware in memory.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-rmem.
-
-config NVMEM_BRCM_NVRAM
- tristate "Broadcom's NVRAM support"
- depends on ARCH_BCM_5301X || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This driver provides support for Broadcom's NVRAM that can be accessed
- using I/O mapping.
-
-config NVMEM_LAYERSCAPE_SFP
- tristate "Layerscape SFP (Security Fuse Processor) support"
- depends on ARCH_LAYERSCAPE || COMPILE_TEST
- depends on HAS_IOMEM
- select REGMAP_MMIO
- help
- This driver provides support to read the eFuses on Freescale
- Layerscape SoC's. For example, the vendor provides a per part
- unique ID there.
-
- This driver can also be built as a module. If so, the module
- will be called layerscape-sfp.
-
-config NVMEM_SUNPLUS_OCOTP
- tristate "Sunplus SoC OTP support"
- depends on SOC_SP7021 || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This is a driver for the On-chip OTP controller (OCOTP) available
- on Sunplus SoCs. It provides access to 128 bytes of one-time
- programmable eFuse.
-
- This driver can also be built as a module. If so, the module
- will be called nvmem-sunplus-ocotp.
-
-config NVMEM_APPLE_EFUSES
- tristate "Apple eFuse support"
- depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
- help
- Say y here to enable support for reading eFuses on Apple SoCs
- such as the M1. These are e.g. used to store factory programmed
- calibration data required for the PCIe or the USB-C PHY.
-
- This driver can also be built as a module. If so, the module will
- be called nvmem-apple-efuses.
-
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index c710b64f9fe4..fa80fe17e567 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -7,65 +7,69 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o
nvmem_core-y := core.o
# Devices
-obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
-nvmem-bcm-ocotp-y := bcm-ocotp.o
-obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
-nvmem-imx-iim-y := imx-iim.o
-obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
-nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
+nvmem-apple-efuses-y := apple-efuses.o
+obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y := bcm-ocotp.o
+obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
+nvmem_brcm_nvram-y := brcm_nvram.o
+obj-$(CONFIG_NVMEM_IMX_IIM) += nvmem-imx-iim.o
+nvmem-imx-iim-y := imx-iim.o
+obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
+nvmem-imx-ocotp-y := imx-ocotp.o
obj-$(CONFIG_NVMEM_IMX_OCOTP_SCU) += nvmem-imx-ocotp-scu.o
-nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
-obj-$(CONFIG_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
-nvmem_jz4780_efuse-y := jz4780-efuse.o
+nvmem-imx-ocotp-scu-y := imx-ocotp-scu.o
+obj-$(CONFIG_NVMEM_JZ4780_EFUSE) += nvmem_jz4780_efuse.o
+nvmem_jz4780_efuse-y := jz4780-efuse.o
+obj-$(CONFIG_NVMEM_LAN9662_OTPC) += nvmem-lan9662-otpc.o
+nvmem-lan9662-otpc-y := lan9662-otpc.o
+obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
+nvmem-layerscape-sfp-y := layerscape-sfp.o
obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
-nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
-obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
-nvmem_lpc18xx_otp-y := lpc18xx_otp.o
-obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
-nvmem-mxs-ocotp-y := mxs-ocotp.o
+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP) += nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y := lpc18xx_otp.o
+obj-$(CONFIG_NVMEM_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_NVMEM_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
+nvmem-microchip-otpc-y := microchip-otpc.o
+obj-$(CONFIG_NVMEM_MTK_EFUSE) += nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y := mtk-efuse.o
+obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
+nvmem-mxs-ocotp-y := mxs-ocotp.o
obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o
-nvmem-nintendo-otp-y := nintendo-otp.o
-obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
-nvmem_mtk-efuse-y := mtk-efuse.o
-obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
-nvmem_qfprom-y := qfprom.o
-obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
-nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
-obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
-nvmem_rockchip_efuse-y := rockchip-efuse.o
-obj-$(CONFIG_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
-nvmem-rockchip-otp-y := rockchip-otp.o
-obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
-nvmem_stm32_romem-y := stm32-romem.o
-obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
-nvmem_sunxi_sid-y := sunxi_sid.o
-obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
-nvmem-uniphier-efuse-y := uniphier-efuse.o
-obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
-nvmem-vf610-ocotp-y := vf610-ocotp.o
-obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
-nvmem_meson_efuse-y := meson-efuse.o
-obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
-nvmem_meson_mx_efuse-y := meson-mx-efuse.o
-obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
-nvmem_snvs_lpgpr-y := snvs_lpgpr.o
-obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
-nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
-obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
-nvmem-sc27xx-efuse-y := sc27xx-efuse.o
-obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
-nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
-obj-$(CONFIG_SPRD_EFUSE) += nvmem_sprd_efuse.o
-nvmem_sprd_efuse-y := sprd-efuse.o
-obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
-nvmem-rmem-y := rmem.o
-obj-$(CONFIG_NVMEM_BRCM_NVRAM) += nvmem_brcm_nvram.o
-nvmem_brcm_nvram-y := brcm_nvram.o
-obj-$(CONFIG_NVMEM_LAYERSCAPE_SFP) += nvmem-layerscape-sfp.o
-nvmem-layerscape-sfp-y := layerscape-sfp.o
+nvmem-nintendo-otp-y := nintendo-otp.o
+obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o
+nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
+nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
+obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
+nvmem-rmem-y := rmem.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
+nvmem_rockchip_efuse-y := rockchip-efuse.o
+obj-$(CONFIG_NVMEM_ROCKCHIP_OTP) += nvmem-rockchip-otp.o
+nvmem-rockchip-otp-y := rockchip-otp.o
+obj-$(CONFIG_NVMEM_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
+nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
+obj-$(CONFIG_NVMEM_SPMI_SDAM) += nvmem_qcom-spmi-sdam.o
+nvmem_qcom-spmi-sdam-y += qcom-spmi-sdam.o
+obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o
+nvmem_sprd_efuse-y := sprd-efuse.o
+obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
+nvmem_stm32_romem-y := stm32-romem.o
obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
-nvmem_sunplus_ocotp-y := sunplus-ocotp.o
-obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
-nvmem-apple-efuses-y := apple-efuses.o
-obj-$(CONFIG_MICROCHIP_OTPC) += nvmem-microchip-otpc.o
-nvmem-microchip-otpc-y := microchip-otpc.o
+nvmem_sunplus_ocotp-y := sunplus-ocotp.o
+obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o
+nvmem_u-boot-env-y := u-boot-env.o
+obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
+obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
+nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 450b927691c3..4441daa20965 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -96,7 +96,7 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
len = le32_to_cpu(header.len);
- data = kcalloc(1, len, GFP_KERNEL);
+ data = kzalloc(len, GFP_KERNEL);
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 1e3c754efd0d..321d7d63e068 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -810,18 +810,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
switch (config->id) {
case NVMEM_DEVID_NONE:
- dev_set_name(&nvmem->dev, "%s", config->name);
+ rval = dev_set_name(&nvmem->dev, "%s", config->name);
break;
case NVMEM_DEVID_AUTO:
- dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
+ rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
break;
default:
- dev_set_name(&nvmem->dev, "%s%d",
+ rval = dev_set_name(&nvmem->dev, "%s%d",
config->name ? : "nvmem",
config->name ? config->id : nvmem->id);
break;
}
+ if (rval) {
+ ida_free(&nvmem_ida, nvmem->id);
+ kfree(nvmem);
+ return ERR_PTR(rval);
+ }
+
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
@@ -829,21 +835,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.groups = nvmem_dev_groups;
#endif
- if (nvmem->nkeepout) {
- rval = nvmem_validate_keepouts(nvmem);
- if (rval) {
- ida_free(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
- }
- }
-
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_register(&nvmem->dev);
if (rval)
goto err_put_device;
+ if (nvmem->nkeepout) {
+ rval = nvmem_validate_keepouts(nvmem);
+ if (rval)
+ goto err_device_del;
+ }
+
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c
new file mode 100644
index 000000000000..f6732fd216d8
--- /dev/null
+++ b/drivers/nvmem/lan9662-otpc.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define OTP_OTP_PWR_DN(t) (t + 0x00)
+#define OTP_OTP_PWR_DN_OTP_PWRDN_N BIT(0)
+#define OTP_OTP_ADDR_HI(t) (t + 0x04)
+#define OTP_OTP_ADDR_LO(t) (t + 0x08)
+#define OTP_OTP_PRGM_DATA(t) (t + 0x10)
+#define OTP_OTP_PRGM_MODE(t) (t + 0x14)
+#define OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE BIT(0)
+#define OTP_OTP_RD_DATA(t) (t + 0x18)
+#define OTP_OTP_FUNC_CMD(t) (t + 0x20)
+#define OTP_OTP_FUNC_CMD_OTP_PROGRAM BIT(1)
+#define OTP_OTP_FUNC_CMD_OTP_READ BIT(0)
+#define OTP_OTP_CMD_GO(t) (t + 0x28)
+#define OTP_OTP_CMD_GO_OTP_GO BIT(0)
+#define OTP_OTP_PASS_FAIL(t) (t + 0x2c)
+#define OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED BIT(3)
+#define OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED BIT(2)
+#define OTP_OTP_PASS_FAIL_OTP_FAIL BIT(0)
+#define OTP_OTP_STATUS(t) (t + 0x30)
+#define OTP_OTP_STATUS_OTP_CPUMPEN BIT(1)
+#define OTP_OTP_STATUS_OTP_BUSY BIT(0)
+
+#define OTP_MEM_SIZE 8192
+#define OTP_SLEEP_US 10
+#define OTP_TIMEOUT_US 500000
+
+struct lan9662_otp {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static bool lan9662_otp_wait_flag_clear(void __iomem *reg, u32 flag)
+{
+ u32 val;
+
+ return readl_poll_timeout(reg, val, !(val & flag),
+ OTP_SLEEP_US, OTP_TIMEOUT_US);
+}
+
+static int lan9662_otp_power(struct lan9662_otp *otp, bool up)
+{
+ void __iomem *pwrdn = OTP_OTP_PWR_DN(otp->base);
+
+ if (up) {
+ writel(readl(pwrdn) & ~OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_CPUMPEN))
+ return -ETIMEDOUT;
+ } else {
+ writel(readl(pwrdn) | OTP_OTP_PWR_DN_OTP_PWRDN_N, pwrdn);
+ }
+
+ return 0;
+}
+
+static int lan9662_otp_execute(struct lan9662_otp *otp)
+{
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_CMD_GO(otp->base),
+ OTP_OTP_CMD_GO_OTP_GO))
+ return -ETIMEDOUT;
+
+ if (lan9662_otp_wait_flag_clear(OTP_OTP_STATUS(otp->base),
+ OTP_OTP_STATUS_OTP_BUSY))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void lan9662_otp_set_address(struct lan9662_otp *otp, u32 offset)
+{
+ writel(0xff & (offset >> 8), OTP_OTP_ADDR_HI(otp->base));
+ writel(0xff & offset, OTP_OTP_ADDR_LO(otp->base));
+}
+
+static int lan9662_otp_read_byte(struct lan9662_otp *otp, u32 offset, u8 *dst)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_FUNC_CMD_OTP_READ, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_READ_PROHIBITED)
+ return -EACCES;
+ *dst = (u8) readl(OTP_OTP_RD_DATA(otp->base));
+ }
+ return rc;
+}
+
+static int lan9662_otp_write_byte(struct lan9662_otp *otp, u32 offset, u8 data)
+{
+ u32 pass;
+ int rc;
+
+ lan9662_otp_set_address(otp, offset);
+ writel(OTP_OTP_PRGM_MODE_OTP_PGM_MODE_BYTE, OTP_OTP_PRGM_MODE(otp->base));
+ writel(data, OTP_OTP_PRGM_DATA(otp->base));
+ writel(OTP_OTP_FUNC_CMD_OTP_PROGRAM, OTP_OTP_FUNC_CMD(otp->base));
+ writel(OTP_OTP_CMD_GO_OTP_GO, OTP_OTP_CMD_GO(otp->base));
+
+ rc = lan9662_otp_execute(otp);
+ if (!rc) {
+ pass = readl(OTP_OTP_PASS_FAIL(otp->base));
+ if (pass & OTP_OTP_PASS_FAIL_OTP_WRITE_PROHIBITED)
+ return -EACCES;
+ if (pass & OTP_OTP_PASS_FAIL_OTP_FAIL)
+ return -EIO;
+ }
+ return rc;
+}
+
+static int lan9662_otp_read(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ uint8_t data;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+ *val++ = data;
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static int lan9662_otp_write(void *context, unsigned int offset,
+ void *_val, size_t bytes)
+{
+ struct lan9662_otp *otp = context;
+ u8 *val = _val;
+ u8 data, newdata;
+ int i, rc = 0;
+
+ lan9662_otp_power(otp, true);
+ for (i = 0; i < bytes; i++) {
+ /* Skip zero bytes */
+ if (val[i]) {
+ rc = lan9662_otp_read_byte(otp, offset + i, &data);
+ if (rc < 0)
+ break;
+
+ newdata = data | val[i];
+ if (newdata == data)
+ continue;
+
+ rc = lan9662_otp_write_byte(otp, offset + i,
+ newdata);
+ if (rc < 0)
+ break;
+ }
+ }
+ lan9662_otp_power(otp, false);
+
+ return rc;
+}
+
+static struct nvmem_config otp_config = {
+ .name = "lan9662-otp",
+ .stride = 1,
+ .word_size = 1,
+ .reg_read = lan9662_otp_read,
+ .reg_write = lan9662_otp_write,
+ .size = OTP_MEM_SIZE,
+};
+
+static int lan9662_otp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct lan9662_otp *otp;
+
+ otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+ if (!otp)
+ return -ENOMEM;
+
+ otp->dev = dev;
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(otp->base))
+ return PTR_ERR(otp->base);
+
+ otp_config.priv = otp;
+ otp_config.dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &otp_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id lan9662_otp_match[] = {
+ { .compatible = "microchip,lan9662-otp", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lan9662_otp_match);
+
+static struct platform_driver lan9662_otp_driver = {
+ .probe = lan9662_otp_probe,
+ .driver = {
+ .name = "lan9662-otp",
+ .of_match_table = lan9662_otp_match,
+ },
+};
+module_platform_driver(lan9662_otp_driver);
+
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_DESCRIPTION("lan9662 OTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
new file mode 100644
index 000000000000..8e72d1bbd649
--- /dev/null
+++ b/drivers/nvmem/u-boot-env.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+enum u_boot_env_format {
+ U_BOOT_FORMAT_SINGLE,
+ U_BOOT_FORMAT_REDUNDANT,
+};
+
+struct u_boot_env {
+ struct device *dev;
+ enum u_boot_env_format format;
+
+ struct mtd_info *mtd;
+
+ /* Cells */
+ struct nvmem_cell_info *cells;
+ int ncells;
+};
+
+struct u_boot_env_image_single {
+ __le32 crc32;
+ uint8_t data[];
+} __packed;
+
+struct u_boot_env_image_redundant {
+ __le32 crc32;
+ u8 mark;
+ uint8_t data[];
+} __packed;
+
+static int u_boot_env_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct u_boot_env *priv = context;
+ struct device *dev = priv->dev;
+ size_t bytes_read;
+ int err;
+
+ err = mtd_read(priv->mtd, offset, bytes, &bytes_read, val);
+ if (err && !mtd_is_bitflip(err)) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ return err;
+ }
+
+ if (bytes_read != bytes) {
+ dev_err(dev, "Failed to read %zu bytes\n", bytes);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+{
+ struct device *dev = priv->dev;
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+ int idx;
+
+ priv->ncells = 0;
+ for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
+ priv->ncells++;
+
+ priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+ if (!priv->cells)
+ return -ENOMEM;
+
+ for (var = data, idx = 0;
+ var < data + data_len && *var;
+ var = value + strlen(value) + 1, idx++) {
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+ priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+ if (!priv->cells[idx].name)
+ return -ENOMEM;
+ priv->cells[idx].offset = data_offset + value - data;
+ priv->cells[idx].bytes = strlen(value);
+ priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
+ }
+
+ if (WARN_ON(idx != priv->ncells))
+ priv->ncells = idx;
+
+ return 0;
+}
+
+static int u_boot_env_parse(struct u_boot_env *priv)
+{
+ struct device *dev = priv->dev;
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
+ size_t data_offset;
+ size_t data_len;
+ uint32_t crc32;
+ uint32_t calc;
+ size_t bytes;
+ uint8_t *buf;
+ int err;
+
+ buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
+ if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
+ dev_err(dev, "Failed to read from mtd: %d\n", err);
+ goto err_kfree;
+ }
+
+ switch (priv->format) {
+ case U_BOOT_FORMAT_SINGLE:
+ crc32_offset = offsetof(struct u_boot_env_image_single, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_single, data);
+ data_offset = offsetof(struct u_boot_env_image_single, data);
+ break;
+ case U_BOOT_FORMAT_REDUNDANT:
+ crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32);
+ crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark);
+ data_offset = offsetof(struct u_boot_env_image_redundant, data);
+ break;
+ }
+ crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
+ crc32_data_len = priv->mtd->size - crc32_data_offset;
+ data_len = priv->mtd->size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+ dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
+ err = -EINVAL;
+ goto err_kfree;
+ }
+
+ buf[priv->mtd->size - 1] = '\0';
+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
+ if (err)
+ dev_err(dev, "Failed to add cells: %d\n", err);
+
+err_kfree:
+ kfree(buf);
+err_out:
+ return err;
+}
+
+static int u_boot_env_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .name = "u-boot-env",
+ .reg_read = u_boot_env_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct u_boot_env *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ priv->format = (uintptr_t)of_device_get_match_data(dev);
+
+ priv->mtd = of_get_mtd_device_by_node(np);
+ if (IS_ERR(priv->mtd)) {
+ dev_err_probe(dev, PTR_ERR(priv->mtd), "Failed to get %pOF MTD\n", np);
+ return PTR_ERR(priv->mtd);
+ }
+
+ err = u_boot_env_parse(priv);
+ if (err)
+ return err;
+
+ config.dev = dev;
+ config.cells = priv->cells;
+ config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->mtd->size;
+
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
+}
+
+static const struct of_device_id u_boot_env_of_match_table[] = {
+ { .compatible = "u-boot,env", .data = (void *)U_BOOT_FORMAT_SINGLE, },
+ { .compatible = "u-boot,env-redundant-bool", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ { .compatible = "u-boot,env-redundant-count", .data = (void *)U_BOOT_FORMAT_REDUNDANT, },
+ {},
+};
+
+static struct platform_driver u_boot_env_driver = {
+ .probe = u_boot_env_probe,
+ .driver = {
+ .name = "u_boot_env",
+ .of_match_table = u_boot_env_of_match_table,
+ },
+};
+module_platform_driver(u_boot_env_driver);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, u_boot_env_of_match_table);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 96f0a12e507c..c34ac33b7338 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -579,7 +579,8 @@ u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_address);
-static struct device_node *__of_get_dma_parent(const struct device_node *np)
+#ifdef CONFIG_HAS_DMA
+struct device_node *__of_get_dma_parent(const struct device_node *np)
{
struct of_phandle_args args;
int ret, index;
@@ -596,6 +597,7 @@ static struct device_node *__of_get_dma_parent(const struct device_node *np)
return of_node_get(args.np);
}
+#endif
static struct device_node *of_get_next_dma_parent(struct device_node *np)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7fa960bd3df1..d5a5c35eba72 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -561,7 +561,7 @@ EXPORT_SYMBOL(of_device_is_compatible);
* a NULL terminated array of strings. Returns the best match
* score or 0.
*/
-int of_device_compatible_match(struct device_node *device,
+int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -578,6 +578,7 @@ int of_device_compatible_match(struct device_node *device,
return score;
}
+EXPORT_SYMBOL_GPL(of_device_compatible_match);
/**
* of_machine_is_compatible - Test root of device tree for a given compatible value
@@ -1228,7 +1229,7 @@ int of_modalias_node(struct device_node *node, char *modalias, int len)
if (!compatible || strlen(compatible) > cplen)
return -ENODEV;
p = strchr(compatible, ',');
- strlcpy(modalias, p ? p + 1 : compatible, len);
+ strscpy(modalias, p ? p + 1 : compatible, len);
return 0;
}
EXPORT_SYMBOL_GPL(of_modalias_node);
@@ -2088,12 +2089,13 @@ int of_find_last_cache_level(unsigned int cpu)
struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
while (np) {
+ of_node_put(prev);
prev = np;
- of_node_put(np);
np = of_find_next_cache_node(np);
}
of_property_read_u32(prev, "cache-level", &cache_level);
+ of_node_put(prev);
return cache_level;
}
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 75b6cbffa755..8cefe5a7d04e 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -116,12 +116,19 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
{
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL;
+ struct device_node *bus_np;
u64 dma_start = 0;
u64 mask, end, size = 0;
bool coherent;
int ret;
- ret = of_dma_get_range(np, &map);
+ if (np == dev->of_node)
+ bus_np = __of_get_dma_parent(np);
+ else
+ bus_np = of_node_get(np);
+
+ ret = of_dma_get_range(bus_np, &map);
+ of_node_put(bus_np);
if (ret < 0) {
/*
* For legacy reasons, we have to assume some devices need
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7bc92923104c..7b571a631639 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
@@ -828,15 +828,6 @@ uint32_t __init of_get_flat_dt_phandle(unsigned long node)
return fdt_get_phandle(initial_boot_params, node);
}
-struct fdt_scan_status {
- const char *name;
- int namelen;
- int depth;
- int found;
- int (*iterator)(unsigned long node, const char *uname, int depth, void *data);
- void *data;
-};
-
const char * __init of_flat_dt_get_machine_name(void)
{
const char *name;
@@ -936,6 +927,8 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
if (!prop)
return;
end = of_read_number(prop, len/4);
+ if (start > end)
+ return;
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
@@ -1178,7 +1171,7 @@ int __init early_init_dt_scan_chosen(char *cmdline)
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+ strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
@@ -1190,11 +1183,11 @@ int __init early_init_dt_scan_chosen(char *cmdline)
strlcat(cmdline, " ", COMMAND_LINE_SIZE);
strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)cmdline)[0])
- strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index d22f605fa7ee..2bac44f09554 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -592,6 +592,9 @@ void __init of_irq_init(const struct of_device_id *matches)
ret = desc->irq_init_cb(desc->dev,
desc->interrupt_parent);
if (ret) {
+ pr_err("%s: Failed to init %pOF (%p), parent %p\n",
+ __func__, desc->dev, desc->dev,
+ desc->interrupt_parent);
of_node_clear_flag(desc->dev, OF_POPULATED);
kfree(desc);
continue;
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 9324483397f6..fb6792d381a6 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -155,12 +155,17 @@ struct bus_dma_region;
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA)
int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map);
+struct device_node *__of_get_dma_parent(const struct device_node *np);
#else
static inline int of_dma_get_range(struct device_node *np,
const struct bus_dma_region **map)
{
return -ENODEV;
}
+static inline struct device_node *__of_get_dma_parent(const struct device_node *np)
+{
+ return of_get_parent(np);
+}
#endif
void fdt_init_reserved_mem(void);
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index eafa8ffefbd0..b89ab5d9fea5 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -2465,7 +2465,7 @@ static int unittest_i2c_bus_probe(struct platform_device *pdev)
adap = &std->adap;
i2c_set_adapdata(adap, std);
adap->nr = -1;
- strlcpy(adap->name, pdev->name, sizeof(adap->name));
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &unittest_i2c_algo;
adap->dev.parent = dev;
@@ -2524,13 +2524,12 @@ static int unittest_i2c_dev_probe(struct i2c_client *client,
return 0;
};
-static int unittest_i2c_dev_remove(struct i2c_client *client)
+static void unittest_i2c_dev_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
- return 0;
}
static const struct i2c_device_id unittest_i2c_dev_id[] = {
@@ -2601,7 +2600,7 @@ static int unittest_i2c_mux_probe(struct i2c_client *client,
return 0;
};
-static int unittest_i2c_mux_remove(struct i2c_client *client)
+static void unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
@@ -2609,7 +2608,6 @@ static int unittest_i2c_mux_remove(struct i2c_client *client)
dev_dbg(dev, "%s for node @%pOF\n", __func__, np);
i2c_mux_del_adapters(muxc);
- return 0;
}
static const struct i2c_device_id unittest_i2c_mux_id[] = {
@@ -3467,6 +3465,9 @@ static int __init of_unittest(void)
pr_info("start of unittest - you will see error messages\n");
+ /* Taint the kernel so we know we've run tests. */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
/* adding data for unittest */
if (IS_ENABLED(CONFIG_UML))
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 77d1ba3a4154..e87567dbe99f 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -873,7 +873,7 @@ int dev_pm_opp_config_clks_simple(struct device *dev,
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_config_clks_simple);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index f223afe47d10..a66386043aa6 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev)
}
ccio_ioc_init(ioc);
if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
kfree(ioc);
return -ENOMEM;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 3a8c98615634..bdef7a8d6ab8 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
static struct irt_entry *iosapic_alloc_irt(int num_entries)
{
- unsigned long a;
-
- /* The IRT needs to be 8-byte aligned for the PDC call.
- * Normally kmalloc would guarantee larger alignment, but
- * if CONFIG_DEBUG_SLAB is enabled, then we can get only
- * 4-byte alignment on 32-bit kernels
- */
- a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
- a = (a + 7UL) & ~7UL;
- return (struct irt_entry *)a;
+ return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
}
/**
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 374b9199878d..ecd870087a3d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -28,6 +28,12 @@
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
+/*
+ * The semantics of 64 register access on 32bit systems can't be guaranteed
+ * by the C standard, we hope the _lo_hi() macros defining readq and writeq
+ * here will behave as expected.
+ */
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/byteorder.h>
#include <asm/io.h>
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index eda4ded4d5e5..7c45927e2131 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2604,6 +2604,7 @@ enum parport_pc_pci_cards {
oxsemi_pcie_pport,
aks_0100,
mobility_pp,
+ netmos_9900,
netmos_9705,
netmos_9715,
netmos_9755,
@@ -2665,6 +2666,7 @@ static struct parport_pc_pci {
/* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
+ /* netmos_9900 */ { 1, { { 0, -1 }, } },
/* The netmos entries below are untested */
/* netmos_9705 */ { 1, { { 0, -1 }, } },
@@ -2746,6 +2748,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
{ 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
/* NetMos communication controllers */
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x2000, 0, 0, netmos_9900 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9715,
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index d1c5fcf00a8a..bfd9bac37e24 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -274,7 +274,7 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCM4908 || \
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || ARCH_BCMBCA || \
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index bf495bf0f48a..1525023e49b6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -240,10 +240,6 @@ config PCMCIA_PROBE
bool
default y if ISA && !ARCH_SA1100 && !PARISC
-config PCMCIA_VRC4171
- tristate "NEC VRC4171 Card Controllers support"
- depends on CPU_VR41XX && ISA && PCMCIA
-
config OMAP_CF
tristate "OMAP CompactFlash Controller"
depends on PCMCIA
@@ -252,15 +248,6 @@ config OMAP_CF
Say Y here to support the CompactFlash controller on OMAP.
Note that this doesn't support "True IDE" mode.
-config AT91_CF
- tristate "AT91 CompactFlash Controller"
- depends on PCI
- depends on OF
- depends on PCMCIA && ARCH_AT91
- help
- Say Y here to support the CompactFlash controller on AT91 chips.
- Or choose M to compile the driver as a module named "at91_cf".
-
config ELECTRA_CF
tristate "Electra CompactFlash Controller"
depends on PCMCIA && PPC_PASEMI
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c59ddde42007..b3a2accf47af 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -29,9 +29,7 @@ obj-$(CONFIG_PCMCIA_SA11XX_BASE) += sa11xx_base.o
obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa1111_cs.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
-obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
obj-$(CONFIG_OMAP_CF) += omap_cf.o
-obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
obj-$(CONFIG_PCMCIA_MAX1600) += max1600.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
deleted file mode 100644
index 92df2c2c5d07..000000000000
--- a/drivers/pcmcia/at91_cf.c
+++ /dev/null
@@ -1,407 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * at91_cf.c -- AT91 CompactFlash controller driver
- *
- * Copyright (C) 2005 David Brownell
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/sizes.h>
-#include <linux/mfd/syscon.h>
-#include <linux/mfd/syscon/atmel-mc.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/pci.h>
-#include <linux/regmap.h>
-
-#include <pcmcia/ss.h>
-
-/*
- * A0..A10 work in each range; A23 indicates I/O space; A25 is CFRNW;
- * some other bit in {A24,A22..A11} is nREG to flag memory access
- * (vs attributes). So more than 2KB/region would just be waste.
- * Note: These are offsets from the physical base address.
- */
-#define CF_ATTR_PHYS (0)
-#define CF_IO_PHYS (1 << 23)
-#define CF_MEM_PHYS (0x017ff800)
-
-struct at91_cf_data {
- int irq_pin; /* I/O IRQ */
- int det_pin; /* Card detect */
- int vcc_pin; /* power switching */
- int rst_pin; /* card reset */
- u8 chipselect; /* EBI Chip Select number */
- u8 flags;
-#define AT91_CF_TRUE_IDE 0x01
-#define AT91_IDE_SWAP_A0_A2 0x02
-};
-
-struct regmap *mc;
-
-/*--------------------------------------------------------------------------*/
-
-struct at91_cf_socket {
- struct pcmcia_socket socket;
-
- unsigned present:1;
-
- struct platform_device *pdev;
- struct at91_cf_data *board;
-
- unsigned long phys_baseaddr;
-};
-
-static inline int at91_cf_present(struct at91_cf_socket *cf)
-{
- return !gpio_get_value(cf->board->det_pin);
-}
-
-/*--------------------------------------------------------------------------*/
-
-static int at91_cf_ss_init(struct pcmcia_socket *s)
-{
- return 0;
-}
-
-static irqreturn_t at91_cf_irq(int irq, void *_cf)
-{
- struct at91_cf_socket *cf = _cf;
-
- if (irq == gpio_to_irq(cf->board->det_pin)) {
- unsigned present = at91_cf_present(cf);
-
- /* kick pccard as needed */
- if (present != cf->present) {
- cf->present = present;
- dev_dbg(&cf->pdev->dev, "card %s\n",
- present ? "present" : "gone");
- pcmcia_parse_events(&cf->socket, SS_DETECT);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
-{
- struct at91_cf_socket *cf;
-
- if (!sp)
- return -EINVAL;
-
- cf = container_of(s, struct at91_cf_socket, socket);
-
- /* NOTE: CF is always 3VCARD */
- if (at91_cf_present(cf)) {
- int rdy = gpio_is_valid(cf->board->irq_pin); /* RDY/nIRQ */
- int vcc = gpio_is_valid(cf->board->vcc_pin);
-
- *sp = SS_DETECT | SS_3VCARD;
- if (!rdy || gpio_get_value(cf->board->irq_pin))
- *sp |= SS_READY;
- if (!vcc || gpio_get_value(cf->board->vcc_pin))
- *sp |= SS_POWERON;
- } else
- *sp = 0;
-
- return 0;
-}
-
-static int
-at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
-{
- struct at91_cf_socket *cf;
-
- cf = container_of(sock, struct at91_cf_socket, socket);
-
- /* switch Vcc if needed and possible */
- if (gpio_is_valid(cf->board->vcc_pin)) {
- switch (s->Vcc) {
- case 0:
- gpio_set_value(cf->board->vcc_pin, 0);
- break;
- case 33:
- gpio_set_value(cf->board->vcc_pin, 1);
- break;
- default:
- return -EINVAL;
- }
- }
-
- /* toggle reset if needed */
- gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET);
-
- dev_dbg(&cf->pdev->dev, "Vcc %d, io_irq %d, flags %04x csc %04x\n",
- s->Vcc, s->io_irq, s->flags, s->csc_mask);
-
- return 0;
-}
-
-static int at91_cf_ss_suspend(struct pcmcia_socket *s)
-{
- return at91_cf_set_socket(s, &dead_socket);
-}
-
-/* we already mapped the I/O region */
-static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
-{
- struct at91_cf_socket *cf;
- u32 csr;
-
- cf = container_of(s, struct at91_cf_socket, socket);
- io->flags &= (MAP_ACTIVE | MAP_16BIT | MAP_AUTOSZ);
-
- /*
- * Use 16 bit accesses unless/until we need 8-bit i/o space.
- *
- * NOTE: this CF controller ignores IOIS16, so we can't really do
- * MAP_AUTOSZ. The 16bit mode allows single byte access on either
- * D0-D7 (even addr) or D8-D15 (odd), so it's close enough for many
- * purposes (and handles ide-cs).
- *
- * The 8bit mode is needed for odd byte access on D0-D7. It seems
- * some cards only like that way to get at the odd byte, despite
- * CF 3.0 spec table 35 also giving the D8-D15 option.
- */
- if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
- csr = AT91_MC_SMC_DBW_8;
- dev_dbg(&cf->pdev->dev, "8bit i/o bus\n");
- } else {
- csr = AT91_MC_SMC_DBW_16;
- dev_dbg(&cf->pdev->dev, "16bit i/o bus\n");
- }
- regmap_update_bits(mc, AT91_MC_SMC_CSR(cf->board->chipselect),
- AT91_MC_SMC_DBW, csr);
-
- io->start = cf->socket.io_offset;
- io->stop = io->start + SZ_2K - 1;
-
- return 0;
-}
-
-/* pcmcia layer maps/unmaps mem regions */
-static int
-at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
-{
- struct at91_cf_socket *cf;
-
- if (map->card_start)
- return -EINVAL;
-
- cf = container_of(s, struct at91_cf_socket, socket);
-
- map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
- if (map->flags & MAP_ATTRIB)
- map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
- else
- map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
-
- return 0;
-}
-
-static struct pccard_operations at91_cf_ops = {
- .init = at91_cf_ss_init,
- .suspend = at91_cf_ss_suspend,
- .get_status = at91_cf_get_status,
- .set_socket = at91_cf_set_socket,
- .set_io_map = at91_cf_set_io_map,
- .set_mem_map = at91_cf_set_mem_map,
-};
-
-/*--------------------------------------------------------------------------*/
-
-static const struct of_device_id at91_cf_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-cf" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91_cf_dt_ids);
-
-static int at91_cf_probe(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf;
- struct at91_cf_data *board;
- struct resource *io;
- struct resource realio;
- int status;
-
- board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
- if (!board)
- return -ENOMEM;
-
- board->irq_pin = of_get_gpio(pdev->dev.of_node, 0);
- board->det_pin = of_get_gpio(pdev->dev.of_node, 1);
- board->vcc_pin = of_get_gpio(pdev->dev.of_node, 2);
- board->rst_pin = of_get_gpio(pdev->dev.of_node, 3);
-
- mc = syscon_regmap_lookup_by_compatible("atmel,at91rm9200-sdramc");
- if (IS_ERR(mc))
- return PTR_ERR(mc);
-
- if (!gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
- return -ENODEV;
-
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -ENODEV;
-
- cf = devm_kzalloc(&pdev->dev, sizeof(*cf), GFP_KERNEL);
- if (!cf)
- return -ENOMEM;
-
- cf->board = board;
- cf->pdev = pdev;
- cf->phys_baseaddr = io->start;
- platform_set_drvdata(pdev, cf);
-
- /* must be a GPIO; ergo must trigger on both edges */
- status = devm_gpio_request(&pdev->dev, board->det_pin, "cf_det");
- if (status < 0)
- return status;
-
- status = devm_request_irq(&pdev->dev, gpio_to_irq(board->det_pin),
- at91_cf_irq, 0, "at91_cf detect", cf);
- if (status < 0)
- return status;
-
- device_init_wakeup(&pdev->dev, 1);
-
- status = devm_gpio_request(&pdev->dev, board->rst_pin, "cf_rst");
- if (status < 0)
- goto fail0a;
-
- if (gpio_is_valid(board->vcc_pin)) {
- status = devm_gpio_request(&pdev->dev, board->vcc_pin, "cf_vcc");
- if (status < 0)
- goto fail0a;
- }
-
- /*
- * The card driver will request this irq later as needed.
- * but it causes lots of "irqNN: nobody cared" messages
- * unless we report that we handle everything (sigh).
- * (Note: DK board doesn't wire the IRQ pin...)
- */
- if (gpio_is_valid(board->irq_pin)) {
- status = devm_gpio_request(&pdev->dev, board->irq_pin, "cf_irq");
- if (status < 0)
- goto fail0a;
-
- status = devm_request_irq(&pdev->dev, gpio_to_irq(board->irq_pin),
- at91_cf_irq, IRQF_SHARED, "at91_cf", cf);
- if (status < 0)
- goto fail0a;
- cf->socket.pci_irq = gpio_to_irq(board->irq_pin);
- } else
- cf->socket.pci_irq = nr_irqs + 1;
-
- /*
- * pcmcia layer only remaps "real" memory not iospace
- * io_offset is set to 0x10000 to avoid the check in static_find_io().
- * */
- cf->socket.io_offset = 0x10000;
- realio.start = cf->socket.io_offset;
- realio.end = realio.start + SZ_64K - 1;
- status = pci_remap_iospace(&realio, cf->phys_baseaddr + CF_IO_PHYS);
- if (status)
- goto fail0a;
-
- /* reserve chip-select regions */
- if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), "at91_cf")) {
- status = -ENXIO;
- goto fail0a;
- }
-
- dev_info(&pdev->dev, "irqs det #%d, io #%d\n",
- gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin));
-
- cf->socket.owner = THIS_MODULE;
- cf->socket.dev.parent = &pdev->dev;
- cf->socket.ops = &at91_cf_ops;
- cf->socket.resource_ops = &pccard_static_ops;
- cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP
- | SS_CAP_MEM_ALIGN;
- cf->socket.map_size = SZ_2K;
- cf->socket.io[0].res = io;
-
- status = pcmcia_register_socket(&cf->socket);
- if (status < 0)
- goto fail0a;
-
- return 0;
-
-fail0a:
- device_init_wakeup(&pdev->dev, 0);
- return status;
-}
-
-static int at91_cf_remove(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
-
- pcmcia_unregister_socket(&cf->socket);
- device_init_wakeup(&pdev->dev, 0);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
- struct at91_cf_data *board = cf->board;
-
- if (device_may_wakeup(&pdev->dev)) {
- enable_irq_wake(gpio_to_irq(board->det_pin));
- if (gpio_is_valid(board->irq_pin))
- enable_irq_wake(gpio_to_irq(board->irq_pin));
- }
- return 0;
-}
-
-static int at91_cf_resume(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf = platform_get_drvdata(pdev);
- struct at91_cf_data *board = cf->board;
-
- if (device_may_wakeup(&pdev->dev)) {
- disable_irq_wake(gpio_to_irq(board->det_pin));
- if (gpio_is_valid(board->irq_pin))
- disable_irq_wake(gpio_to_irq(board->irq_pin));
- }
-
- return 0;
-}
-
-#else
-#define at91_cf_suspend NULL
-#define at91_cf_resume NULL
-#endif
-
-static struct platform_driver at91_cf_driver = {
- .driver = {
- .name = "at91_cf",
- .of_match_table = at91_cf_dt_ids,
- },
- .probe = at91_cf_probe,
- .remove = at91_cf_remove,
- .suspend = at91_cf_suspend,
- .resume = at91_cf_resume,
-};
-
-module_platform_driver(at91_cf_driver);
-
-MODULE_DESCRIPTION("AT91 Compact Flash Driver");
-MODULE_AUTHOR("David Brownell");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_cf");
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 192c9049d654..a335748bdef5 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -661,12 +661,12 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket,
return 0;
}
-static int i82092aa_module_init(void)
+static int __init i82092aa_module_init(void)
{
return pci_register_driver(&i82092aa_pci_driver);
}
-static void i82092aa_module_exit(void)
+static void __exit i82092aa_module_exit(void)
{
pci_unregister_driver(&i82092aa_pci_driver);
if (sockets[0].io_base > 0)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 1972a8f6fa8e..d3f827d4224a 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -124,8 +124,6 @@ static int omap_cf_get_status(struct pcmcia_socket *s, u_int *sp)
static int
omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
{
- u16 control;
-
/* REVISIT some non-OSK boards may support power switching */
switch (s->Vcc) {
case 0:
@@ -135,7 +133,7 @@ omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
return -EINVAL;
}
- control = omap_readw(CF_CONTROL);
+ omap_readw(CF_CONTROL);
if (s->flags & SS_RESET)
omap_writew(CF_CONTROL_RESET, CF_CONTROL);
else
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 47b060c57418..c2b6e828c2c6 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -125,7 +125,7 @@ static int sa11x0_drv_pcmcia_legacy_probe(struct platform_device *dev)
return ret;
}
-static int sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
+static void sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
{
struct skt_dev_info *sinfo = platform_get_drvdata(dev);
int i;
@@ -134,8 +134,6 @@ static int sa11x0_drv_pcmcia_legacy_remove(struct platform_device *dev)
for (i = 0; i < sinfo->nskt; i++)
soc_pcmcia_remove_one(&sinfo->skt[i]);
-
- return 0;
}
static int sa11x0_drv_pcmcia_probe(struct platform_device *pdev)
@@ -167,8 +165,10 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
{
struct soc_pcmcia_socket *skt;
- if (dev->id == -1)
- return sa11x0_drv_pcmcia_legacy_remove(dev);
+ if (dev->id == -1) {
+ sa11x0_drv_pcmcia_legacy_remove(dev);
+ return 0;
+ }
skt = platform_get_drvdata(dev);
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
deleted file mode 100644
index 177d77892144..000000000000
--- a/drivers/pcmcia/vrc4171_card.c
+++ /dev/null
@@ -1,745 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
- *
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-
-#include <pcmcia/ss.h>
-
-#include "i82365.h"
-
-MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_LICENSE("GPL");
-
-#define CARD_MAX_SLOTS 2
-#define CARD_SLOTA 0
-#define CARD_SLOTB 1
-#define CARD_SLOTB_OFFSET 0x40
-
-#define CARD_MEM_START 0x10000000
-#define CARD_MEM_END 0x13ffffff
-#define CARD_MAX_MEM_OFFSET 0x3ffffff
-#define CARD_MAX_MEM_SPEED 1000
-
-#define CARD_CONTROLLER_INDEX 0x03e0
-#define CARD_CONTROLLER_DATA 0x03e1
- /* Power register */
- #define VPP_GET_VCC 0x01
- #define POWER_ENABLE 0x10
- #define CARD_VOLTAGE_SENSE 0x1f
- #define VCC_3VORXV_CAPABLE 0x00
- #define VCC_XV_ONLY 0x01
- #define VCC_3V_CAPABLE 0x02
- #define VCC_5V_ONLY 0x03
- #define CARD_VOLTAGE_SELECT 0x2f
- #define VCC_3V 0x01
- #define VCC_5V 0x00
- #define VCC_XV 0x02
- #define VCC_STATUS_3V 0x02
- #define VCC_STATUS_5V 0x01
- #define VCC_STATUS_XV 0x03
- #define GLOBAL_CONTROL 0x1e
- #define EXWRBK 0x04
- #define IRQPM_EN 0x08
- #define CLRPMIRQ 0x10
-
-#define INTERRUPT_STATUS 0x05fa
- #define IRQ_A 0x02
- #define IRQ_B 0x04
-
-#define CONFIGURATION1 0x05fe
- #define SLOTB_CONFIG 0xc000
- #define SLOTB_NONE 0x0000
- #define SLOTB_PCCARD 0x4000
- #define SLOTB_CF 0x8000
- #define SLOTB_FLASHROM 0xc000
-
-#define CARD_CONTROLLER_START CARD_CONTROLLER_INDEX
-#define CARD_CONTROLLER_END CARD_CONTROLLER_DATA
-
-#define IO_MAX_MAPS 2
-#define MEM_MAX_MAPS 5
-
-enum vrc4171_slot {
- SLOT_PROBE = 0,
- SLOT_NOPROBE_IO,
- SLOT_NOPROBE_MEM,
- SLOT_NOPROBE_ALL,
- SLOT_INITIALIZED,
-};
-
-enum vrc4171_slotb {
- SLOTB_IS_NONE,
- SLOTB_IS_PCCARD,
- SLOTB_IS_CF,
- SLOTB_IS_FLASHROM,
-};
-
-struct vrc4171_socket {
- enum vrc4171_slot slot;
- struct pcmcia_socket pcmcia_socket;
- char name[24];
- int csc_irq;
- int io_irq;
- spinlock_t lock;
-};
-
-static struct vrc4171_socket vrc4171_sockets[CARD_MAX_SLOTS];
-static enum vrc4171_slotb vrc4171_slotb = SLOTB_IS_NONE;
-static char vrc4171_card_name[] = "NEC VRC4171 Card Controller";
-static unsigned int vrc4171_irq;
-static uint16_t vrc4171_irq_mask = 0xdeb8;
-
-static struct resource vrc4171_card_resource[3] = {
- { .name = vrc4171_card_name,
- .start = CARD_CONTROLLER_START,
- .end = CARD_CONTROLLER_END,
- .flags = IORESOURCE_IO, },
- { .name = vrc4171_card_name,
- .start = INTERRUPT_STATUS,
- .end = INTERRUPT_STATUS,
- .flags = IORESOURCE_IO, },
- { .name = vrc4171_card_name,
- .start = CONFIGURATION1,
- .end = CONFIGURATION1,
- .flags = IORESOURCE_IO, },
-};
-
-static struct platform_device vrc4171_card_device = {
- .name = vrc4171_card_name,
- .id = 0,
- .num_resources = 3,
- .resource = vrc4171_card_resource,
-};
-
-static inline uint16_t vrc4171_get_irq_status(void)
-{
- return inw(INTERRUPT_STATUS);
-}
-
-static inline void vrc4171_set_multifunction_pin(enum vrc4171_slotb config)
-{
- uint16_t config1;
-
- config1 = inw(CONFIGURATION1);
- config1 &= ~SLOTB_CONFIG;
-
- switch (config) {
- case SLOTB_IS_NONE:
- config1 |= SLOTB_NONE;
- break;
- case SLOTB_IS_PCCARD:
- config1 |= SLOTB_PCCARD;
- break;
- case SLOTB_IS_CF:
- config1 |= SLOTB_CF;
- break;
- case SLOTB_IS_FLASHROM:
- config1 |= SLOTB_FLASHROM;
- break;
- default:
- break;
- }
-
- outw(config1, CONFIGURATION1);
-}
-
-static inline uint8_t exca_read_byte(int slot, uint8_t index)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index, CARD_CONTROLLER_INDEX);
- return inb(CARD_CONTROLLER_DATA);
-}
-
-static inline uint16_t exca_read_word(int slot, uint8_t index)
-{
- uint16_t data;
-
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index++, CARD_CONTROLLER_INDEX);
- data = inb(CARD_CONTROLLER_DATA);
-
- outb(index, CARD_CONTROLLER_INDEX);
- data |= ((uint16_t)inb(CARD_CONTROLLER_DATA)) << 8;
-
- return data;
-}
-
-static inline uint8_t exca_write_byte(int slot, uint8_t index, uint8_t data)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index, CARD_CONTROLLER_INDEX);
- outb(data, CARD_CONTROLLER_DATA);
-
- return data;
-}
-
-static inline uint16_t exca_write_word(int slot, uint8_t index, uint16_t data)
-{
- if (slot == CARD_SLOTB)
- index += CARD_SLOTB_OFFSET;
-
- outb(index++, CARD_CONTROLLER_INDEX);
- outb(data, CARD_CONTROLLER_DATA);
-
- outb(index, CARD_CONTROLLER_INDEX);
- outb((uint8_t)(data >> 8), CARD_CONTROLLER_DATA);
-
- return data;
-}
-
-static inline int search_nonuse_irq(void)
-{
- int i;
-
- for (i = 0; i < 16; i++) {
- if (vrc4171_irq_mask & (1 << i)) {
- vrc4171_irq_mask &= ~(1 << i);
- return i;
- }
- }
-
- return -1;
-}
-
-static int pccard_init(struct pcmcia_socket *sock)
-{
- struct vrc4171_socket *socket;
- unsigned int slot;
-
- sock->features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS;
- sock->irq_mask = 0;
- sock->map_size = 0x1000;
- sock->pci_irq = vrc4171_irq;
-
- slot = sock->sock;
- socket = &vrc4171_sockets[slot];
- socket->csc_irq = search_nonuse_irq();
- socket->io_irq = search_nonuse_irq();
- spin_lock_init(&socket->lock);
-
- return 0;
-}
-
-static int pccard_get_status(struct pcmcia_socket *sock, u_int *value)
-{
- unsigned int slot;
- uint8_t status, sense;
- u_int val = 0;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS || value == NULL)
- return -EINVAL;
-
- slot = sock->sock;
-
- status = exca_read_byte(slot, I365_STATUS);
- if (exca_read_byte(slot, I365_INTCTL) & I365_PC_IOCARD) {
- if (status & I365_CS_STSCHG)
- val |= SS_STSCHG;
- } else {
- if (!(status & I365_CS_BVD1))
- val |= SS_BATDEAD;
- else if ((status & (I365_CS_BVD1 | I365_CS_BVD2)) == I365_CS_BVD1)
- val |= SS_BATWARN;
- }
- if ((status & I365_CS_DETECT) == I365_CS_DETECT)
- val |= SS_DETECT;
- if (status & I365_CS_WRPROT)
- val |= SS_WRPROT;
- if (status & I365_CS_READY)
- val |= SS_READY;
- if (status & I365_CS_POWERON)
- val |= SS_POWERON;
-
- sense = exca_read_byte(slot, CARD_VOLTAGE_SENSE);
- switch (sense) {
- case VCC_3VORXV_CAPABLE:
- val |= SS_3VCARD | SS_XVCARD;
- break;
- case VCC_XV_ONLY:
- val |= SS_XVCARD;
- break;
- case VCC_3V_CAPABLE:
- val |= SS_3VCARD;
- break;
- default:
- /* 5V only */
- break;
- }
-
- *value = val;
-
- return 0;
-}
-
-static inline uint8_t set_Vcc_value(u_char Vcc)
-{
- switch (Vcc) {
- case 33:
- return VCC_3V;
- case 50:
- return VCC_5V;
- }
-
- /* Small voltage is chosen for safety. */
- return VCC_3V;
-}
-
-static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
-{
- struct vrc4171_socket *socket;
- unsigned int slot;
- uint8_t voltage, power, control, cscint;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- (state->Vpp != state->Vcc && state->Vpp != 0) ||
- (state->Vcc != 50 && state->Vcc != 33 && state->Vcc != 0))
- return -EINVAL;
-
- slot = sock->sock;
- socket = &vrc4171_sockets[slot];
-
- spin_lock_irq(&socket->lock);
-
- voltage = set_Vcc_value(state->Vcc);
- exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage);
-
- power = POWER_ENABLE;
- if (state->Vpp == state->Vcc)
- power |= VPP_GET_VCC;
- if (state->flags & SS_OUTPUT_ENA)
- power |= I365_PWR_OUT;
- exca_write_byte(slot, I365_POWER, power);
-
- control = 0;
- if (state->io_irq != 0)
- control |= socket->io_irq;
- if (state->flags & SS_IOCARD)
- control |= I365_PC_IOCARD;
- if (state->flags & SS_RESET)
- control &= ~I365_PC_RESET;
- else
- control |= I365_PC_RESET;
- exca_write_byte(slot, I365_INTCTL, control);
-
- cscint = 0;
- exca_write_byte(slot, I365_CSCINT, cscint);
- exca_read_byte(slot, I365_CSC); /* clear CardStatus change */
- if (state->csc_mask != 0)
- cscint |= socket->csc_irq << 8;
- if (state->flags & SS_IOCARD) {
- if (state->csc_mask & SS_STSCHG)
- cscint |= I365_CSC_STSCHG;
- } else {
- if (state->csc_mask & SS_BATDEAD)
- cscint |= I365_CSC_BVD1;
- if (state->csc_mask & SS_BATWARN)
- cscint |= I365_CSC_BVD2;
- }
- if (state->csc_mask & SS_READY)
- cscint |= I365_CSC_READY;
- if (state->csc_mask & SS_DETECT)
- cscint |= I365_CSC_DETECT;
- exca_write_byte(slot, I365_CSCINT, cscint);
-
- spin_unlock_irq(&socket->lock);
-
- return 0;
-}
-
-static int pccard_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
-{
- unsigned int slot;
- uint8_t ioctl, addrwin;
- u_char map;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- io == NULL || io->map >= IO_MAX_MAPS ||
- io->start > 0xffff || io->stop > 0xffff || io->start > io->stop)
- return -EINVAL;
-
- slot = sock->sock;
- map = io->map;
-
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- if (addrwin & I365_ENA_IO(map)) {
- addrwin &= ~I365_ENA_IO(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- exca_write_word(slot, I365_IO(map)+I365_W_START, io->start);
- exca_write_word(slot, I365_IO(map)+I365_W_STOP, io->stop);
-
- ioctl = 0;
- if (io->speed > 0)
- ioctl |= I365_IOCTL_WAIT(map);
- if (io->flags & MAP_16BIT)
- ioctl |= I365_IOCTL_16BIT(map);
- if (io->flags & MAP_AUTOSZ)
- ioctl |= I365_IOCTL_IOCS16(map);
- if (io->flags & MAP_0WS)
- ioctl |= I365_IOCTL_0WS(map);
- exca_write_byte(slot, I365_IOCTL, ioctl);
-
- if (io->flags & MAP_ACTIVE) {
- addrwin |= I365_ENA_IO(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- return 0;
-}
-
-static int pccard_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem)
-{
- unsigned int slot;
- uint16_t start, stop, offset;
- uint8_t addrwin;
- u_char map;
-
- if (sock == NULL || sock->sock >= CARD_MAX_SLOTS ||
- mem == NULL || mem->map >= MEM_MAX_MAPS ||
- mem->res->start < CARD_MEM_START || mem->res->start > CARD_MEM_END ||
- mem->res->end < CARD_MEM_START || mem->res->end > CARD_MEM_END ||
- mem->res->start > mem->res->end ||
- mem->card_start > CARD_MAX_MEM_OFFSET ||
- mem->speed > CARD_MAX_MEM_SPEED)
- return -EINVAL;
-
- slot = sock->sock;
- map = mem->map;
-
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- if (addrwin & I365_ENA_MEM(map)) {
- addrwin &= ~I365_ENA_MEM(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- start = (mem->res->start >> 12) & 0x3fff;
- if (mem->flags & MAP_16BIT)
- start |= I365_MEM_16BIT;
- exca_write_word(slot, I365_MEM(map)+I365_W_START, start);
-
- stop = (mem->res->end >> 12) & 0x3fff;
- switch (mem->speed) {
- case 0:
- break;
- case 1:
- stop |= I365_MEM_WS0;
- break;
- case 2:
- stop |= I365_MEM_WS1;
- break;
- default:
- stop |= I365_MEM_WS0 | I365_MEM_WS1;
- break;
- }
- exca_write_word(slot, I365_MEM(map)+I365_W_STOP, stop);
-
- offset = (mem->card_start >> 12) & 0x3fff;
- if (mem->flags & MAP_ATTRIB)
- offset |= I365_MEM_REG;
- if (mem->flags & MAP_WRPROT)
- offset |= I365_MEM_WRPROT;
- exca_write_word(slot, I365_MEM(map)+I365_W_OFF, offset);
-
- if (mem->flags & MAP_ACTIVE) {
- addrwin |= I365_ENA_MEM(map);
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- }
-
- return 0;
-}
-
-static struct pccard_operations vrc4171_pccard_operations = {
- .init = pccard_init,
- .get_status = pccard_get_status,
- .set_socket = pccard_set_socket,
- .set_io_map = pccard_set_io_map,
- .set_mem_map = pccard_set_mem_map,
-};
-
-static inline unsigned int get_events(int slot)
-{
- unsigned int events = 0;
- uint8_t status, csc;
-
- status = exca_read_byte(slot, I365_STATUS);
- csc = exca_read_byte(slot, I365_CSC);
-
- if (exca_read_byte(slot, I365_INTCTL) & I365_PC_IOCARD) {
- if ((csc & I365_CSC_STSCHG) && (status & I365_CS_STSCHG))
- events |= SS_STSCHG;
- } else {
- if (csc & (I365_CSC_BVD1 | I365_CSC_BVD2)) {
- if (!(status & I365_CS_BVD1))
- events |= SS_BATDEAD;
- else if ((status & (I365_CS_BVD1 | I365_CS_BVD2)) == I365_CS_BVD1)
- events |= SS_BATWARN;
- }
- }
- if ((csc & I365_CSC_READY) && (status & I365_CS_READY))
- events |= SS_READY;
- if ((csc & I365_CSC_DETECT) && ((status & I365_CS_DETECT) == I365_CS_DETECT))
- events |= SS_DETECT;
-
- return events;
-}
-
-static irqreturn_t pccard_interrupt(int irq, void *dev_id)
-{
- struct vrc4171_socket *socket;
- unsigned int events;
- irqreturn_t retval = IRQ_NONE;
- uint16_t status;
-
- status = vrc4171_get_irq_status();
- if (status & IRQ_A) {
- socket = &vrc4171_sockets[CARD_SLOTA];
- if (socket->slot == SLOT_INITIALIZED) {
- if (status & (1 << socket->csc_irq)) {
- events = get_events(CARD_SLOTA);
- if (events != 0) {
- pcmcia_parse_events(&socket->pcmcia_socket, events);
- retval = IRQ_HANDLED;
- }
- }
- }
- }
-
- if (status & IRQ_B) {
- socket = &vrc4171_sockets[CARD_SLOTB];
- if (socket->slot == SLOT_INITIALIZED) {
- if (status & (1 << socket->csc_irq)) {
- events = get_events(CARD_SLOTB);
- if (events != 0) {
- pcmcia_parse_events(&socket->pcmcia_socket, events);
- retval = IRQ_HANDLED;
- }
- }
- }
- }
-
- return retval;
-}
-
-static inline void reserve_using_irq(int slot)
-{
- unsigned int irq;
-
- irq = exca_read_byte(slot, I365_INTCTL);
- irq &= 0x0f;
- vrc4171_irq_mask &= ~(1 << irq);
-
- irq = exca_read_byte(slot, I365_CSCINT);
- irq = (irq & 0xf0) >> 4;
- vrc4171_irq_mask &= ~(1 << irq);
-}
-
-static int vrc4171_add_sockets(void)
-{
- struct vrc4171_socket *socket;
- int slot, retval;
-
- for (slot = 0; slot < CARD_MAX_SLOTS; slot++) {
- if (slot == CARD_SLOTB && vrc4171_slotb == SLOTB_IS_NONE)
- continue;
-
- socket = &vrc4171_sockets[slot];
- if (socket->slot != SLOT_PROBE) {
- uint8_t addrwin;
-
- switch (socket->slot) {
- case SLOT_NOPROBE_MEM:
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- addrwin &= 0x1f;
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- break;
- case SLOT_NOPROBE_IO:
- addrwin = exca_read_byte(slot, I365_ADDRWIN);
- addrwin &= 0xc0;
- exca_write_byte(slot, I365_ADDRWIN, addrwin);
- break;
- default:
- break;
- }
-
- reserve_using_irq(slot);
- continue;
- }
-
- sprintf(socket->name, "NEC VRC4171 Card Slot %1c", 'A' + slot);
- socket->pcmcia_socket.dev.parent = &vrc4171_card_device.dev;
- socket->pcmcia_socket.ops = &vrc4171_pccard_operations;
- socket->pcmcia_socket.owner = THIS_MODULE;
-
- retval = pcmcia_register_socket(&socket->pcmcia_socket);
- if (retval < 0)
- return retval;
-
- exca_write_byte(slot, I365_ADDRWIN, 0);
- exca_write_byte(slot, GLOBAL_CONTROL, 0);
-
- socket->slot = SLOT_INITIALIZED;
- }
-
- return 0;
-}
-
-static void vrc4171_remove_sockets(void)
-{
- struct vrc4171_socket *socket;
- int slot;
-
- for (slot = 0; slot < CARD_MAX_SLOTS; slot++) {
- if (slot == CARD_SLOTB && vrc4171_slotb == SLOTB_IS_NONE)
- continue;
-
- socket = &vrc4171_sockets[slot];
- if (socket->slot == SLOT_INITIALIZED)
- pcmcia_unregister_socket(&socket->pcmcia_socket);
-
- socket->slot = SLOT_PROBE;
- }
-}
-
-static int vrc4171_card_setup(char *options)
-{
- if (options == NULL || *options == '\0')
- return 1;
-
- if (strncmp(options, "irq:", 4) == 0) {
- int irq;
- options += 4;
- irq = simple_strtoul(options, &options, 0);
- if (irq >= 0 && irq < nr_irqs)
- vrc4171_irq = irq;
-
- if (*options != ',')
- return 1;
- options++;
- }
-
- if (strncmp(options, "slota:", 6) == 0) {
- options += 6;
- if (*options != '\0') {
- if (strncmp(options, "memnoprobe", 10) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_MEM;
- options += 10;
- } else if (strncmp(options, "ionoprobe", 9) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_IO;
- options += 9;
- } else if ( strncmp(options, "noprobe", 7) == 0) {
- vrc4171_sockets[CARD_SLOTA].slot = SLOT_NOPROBE_ALL;
- options += 7;
- }
-
- if (*options != ',')
- return 1;
- options++;
- } else
- return 1;
-
- }
-
- if (strncmp(options, "slotb:", 6) == 0) {
- options += 6;
- if (*options != '\0') {
- if (strncmp(options, "pccard", 6) == 0) {
- vrc4171_slotb = SLOTB_IS_PCCARD;
- options += 6;
- } else if (strncmp(options, "cf", 2) == 0) {
- vrc4171_slotb = SLOTB_IS_CF;
- options += 2;
- } else if (strncmp(options, "flashrom", 8) == 0) {
- vrc4171_slotb = SLOTB_IS_FLASHROM;
- options += 8;
- } else if (strncmp(options, "none", 4) == 0) {
- vrc4171_slotb = SLOTB_IS_NONE;
- options += 4;
- }
-
- if (*options != ',')
- return 1;
- options++;
-
- if (strncmp(options, "memnoprobe", 10) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_MEM;
- if (strncmp(options, "ionoprobe", 9) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_IO;
- if (strncmp(options, "noprobe", 7) == 0)
- vrc4171_sockets[CARD_SLOTB].slot = SLOT_NOPROBE_ALL;
- }
- }
-
- return 1;
-}
-
-__setup("vrc4171_card=", vrc4171_card_setup);
-
-static struct platform_driver vrc4171_card_driver = {
- .driver = {
- .name = vrc4171_card_name,
- },
-};
-
-static int vrc4171_card_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&vrc4171_card_driver);
- if (retval < 0)
- return retval;
-
- retval = platform_device_register(&vrc4171_card_device);
- if (retval < 0) {
- platform_driver_unregister(&vrc4171_card_driver);
- return retval;
- }
-
- vrc4171_set_multifunction_pin(vrc4171_slotb);
-
- retval = vrc4171_add_sockets();
- if (retval == 0)
- retval = request_irq(vrc4171_irq, pccard_interrupt, IRQF_SHARED,
- vrc4171_card_name, vrc4171_sockets);
-
- if (retval < 0) {
- vrc4171_remove_sockets();
- platform_device_unregister(&vrc4171_card_device);
- platform_driver_unregister(&vrc4171_card_driver);
- return retval;
- }
-
- printk(KERN_INFO "%s, connected to IRQ %d\n",
- vrc4171_card_driver.driver.name, vrc4171_irq);
-
- return 0;
-}
-
-static void vrc4171_card_exit(void)
-{
- free_irq(vrc4171_irq, vrc4171_sockets);
- vrc4171_remove_sockets();
- platform_device_unregister(&vrc4171_card_device);
- platform_driver_unregister(&vrc4171_card_driver);
-}
-
-module_init(vrc4171_card_init);
-module_exit(vrc4171_card_exit);
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 1925ddc13f00..731c5d8f75c6 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -523,7 +523,7 @@ static int aspeed_peci_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (!priv->irq)
+ if (priv->irq < 0)
return priv->irq;
ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index 68eb61c65d34..de4a7b3e5966 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
- auxiliary_device_uninit(adev);
-
kfree(adev->name);
kfree(adev);
}
@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
static int devm_adev_add(struct device *dev, int idx)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 1e2d69453771..44c07ea487f4 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -183,6 +183,13 @@ config APPLE_M1_CPU_PMU
Provides support for the non-architectural CPU PMUs present on
the Apple M1 SoCs and derivatives.
+config ALIBABA_UNCORE_DRW_PMU
+ tristate "Alibaba T-Head Yitian 710 DDR Sub-system Driveway PMU driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for Driveway PMU events monitoring on Yitian 710 DDR
+ Sub-system.
+
source "drivers/perf/hisilicon/Kconfig"
config MARVELL_CN10K_DDR_PMU
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 57a279c61df5..050d04ee19dd 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
+obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
new file mode 100644
index 000000000000..82729b874f09
--- /dev/null
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -0,0 +1,810 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alibaba DDR Sub-System Driveway PMU driver
+ *
+ * Copyright (C) 2022 Alibaba Inc
+ */
+
+#define ALI_DRW_PMUNAME "ali_drw"
+#define ALI_DRW_DRVNAME ALI_DRW_PMUNAME "_pmu"
+#define pr_fmt(fmt) ALI_DRW_DRVNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/refcount.h>
+
+
+#define ALI_DRW_PMU_COMMON_MAX_COUNTERS 16
+#define ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE 19
+
+#define ALI_DRW_PMU_PA_SHIFT 12
+#define ALI_DRW_PMU_CNT_INIT 0x00000000
+#define ALI_DRW_CNT_MAX_PERIOD 0xffffffff
+#define ALI_DRW_PMU_CYCLE_EVT_ID 0x80
+
+#define ALI_DRW_PMU_CNT_CTRL 0xC00
+#define ALI_DRW_PMU_CNT_RST BIT(2)
+#define ALI_DRW_PMU_CNT_STOP BIT(1)
+#define ALI_DRW_PMU_CNT_START BIT(0)
+
+#define ALI_DRW_PMU_CNT_STATE 0xC04
+#define ALI_DRW_PMU_TEST_CTRL 0xC08
+#define ALI_DRW_PMU_CNT_PRELOAD 0xC0C
+
+#define ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK GENMASK(23, 0)
+#define ALI_DRW_PMU_CYCLE_CNT_LOW_MASK GENMASK(31, 0)
+#define ALI_DRW_PMU_CYCLE_CNT_HIGH 0xC10
+#define ALI_DRW_PMU_CYCLE_CNT_LOW 0xC14
+
+/* PMU EVENT SEL 0-3 are paired in 32-bit registers on a 4-byte stride */
+#define ALI_DRW_PMU_EVENT_SEL0 0xC68
+/* counter 0-3 use sel0, counter 4-7 use sel1...*/
+#define ALI_DRW_PMU_EVENT_SELn(n) \
+ (ALI_DRW_PMU_EVENT_SEL0 + (n / 4) * 0x4)
+#define ALI_DRW_PMCOM_CNT_EN BIT(7)
+#define ALI_DRW_PMCOM_CNT_EVENT_MASK GENMASK(5, 0)
+#define ALI_DRW_PMCOM_CNT_EVENT_OFFSET(n) \
+ (8 * (n % 4))
+
+/* PMU COMMON COUNTER 0-15, are paired in 32-bit registers on a 4-byte stride */
+#define ALI_DRW_PMU_COMMON_COUNTER0 0xC78
+#define ALI_DRW_PMU_COMMON_COUNTERn(n) \
+ (ALI_DRW_PMU_COMMON_COUNTER0 + 0x4 * (n))
+
+#define ALI_DRW_PMU_OV_INTR_ENABLE_CTL 0xCB8
+#define ALI_DRW_PMU_OV_INTR_DISABLE_CTL 0xCBC
+#define ALI_DRW_PMU_OV_INTR_ENABLE_STATUS 0xCC0
+#define ALI_DRW_PMU_OV_INTR_CLR 0xCC4
+#define ALI_DRW_PMU_OV_INTR_STATUS 0xCC8
+#define ALI_DRW_PMCOM_CNT_OV_INTR_MASK GENMASK(23, 8)
+#define ALI_DRW_PMBW_CNT_OV_INTR_MASK GENMASK(7, 0)
+#define ALI_DRW_PMU_OV_INTR_MASK GENMASK_ULL(63, 0)
+
+static int ali_drw_cpuhp_state_num;
+
+static LIST_HEAD(ali_drw_pmu_irqs);
+static DEFINE_MUTEX(ali_drw_pmu_irqs_lock);
+
+struct ali_drw_pmu_irq {
+ struct hlist_node node;
+ struct list_head irqs_node;
+ struct list_head pmus_node;
+ int irq_num;
+ int cpu;
+ refcount_t refcount;
+};
+
+struct ali_drw_pmu {
+ void __iomem *cfg_base;
+ struct device *dev;
+
+ struct list_head pmus_node;
+ struct ali_drw_pmu_irq *irq;
+ int irq_num;
+ int cpu;
+ DECLARE_BITMAP(used_mask, ALI_DRW_PMU_COMMON_MAX_COUNTERS);
+ struct perf_event *events[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
+ int evtids[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
+
+ struct pmu pmu;
+};
+
+#define to_ali_drw_pmu(p) (container_of(p, struct ali_drw_pmu, pmu))
+
+#define DRW_CONFIG_EVENTID GENMASK(7, 0)
+#define GET_DRW_EVENTID(event) FIELD_GET(DRW_CONFIG_EVENTID, (event)->attr.config)
+
+static ssize_t ali_drw_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+/*
+ * PMU event attributes
+ */
+static ssize_t ali_drw_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+#define ALI_DRW_PMU_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define ALI_DRW_PMU_FORMAT_ATTR(_name, _config) \
+ ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_format_show, (void *)_config)
+#define ALI_DRW_PMU_EVENT_ATTR(_name, _config) \
+ ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_event_show, (unsigned long)_config)
+
+static struct attribute *ali_drw_pmu_events_attrs[] = {
+ ALI_DRW_PMU_EVENT_ATTR(hif_rd_or_wr, 0x0),
+ ALI_DRW_PMU_EVENT_ATTR(hif_wr, 0x1),
+ ALI_DRW_PMU_EVENT_ATTR(hif_rd, 0x2),
+ ALI_DRW_PMU_EVENT_ATTR(hif_rmw, 0x3),
+ ALI_DRW_PMU_EVENT_ATTR(hif_hi_pri_rd, 0x4),
+ ALI_DRW_PMU_EVENT_ATTR(dfi_wr_data_cycles, 0x7),
+ ALI_DRW_PMU_EVENT_ATTR(dfi_rd_data_cycles, 0x8),
+ ALI_DRW_PMU_EVENT_ATTR(hpr_xact_when_critical, 0x9),
+ ALI_DRW_PMU_EVENT_ATTR(lpr_xact_when_critical, 0xA),
+ ALI_DRW_PMU_EVENT_ATTR(wr_xact_when_critical, 0xB),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_activate, 0xC),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd_or_wr, 0xD),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd_activate, 0xE),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_rd, 0xF),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_wr, 0x10),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_mwr, 0x11),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_precharge, 0x12),
+ ALI_DRW_PMU_EVENT_ATTR(precharge_for_rdwr, 0x13),
+ ALI_DRW_PMU_EVENT_ATTR(precharge_for_other, 0x14),
+ ALI_DRW_PMU_EVENT_ATTR(rdwr_transitions, 0x15),
+ ALI_DRW_PMU_EVENT_ATTR(write_combine, 0x16),
+ ALI_DRW_PMU_EVENT_ATTR(war_hazard, 0x17),
+ ALI_DRW_PMU_EVENT_ATTR(raw_hazard, 0x18),
+ ALI_DRW_PMU_EVENT_ATTR(waw_hazard, 0x19),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk0, 0x1A),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk1, 0x1B),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk2, 0x1C),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk3, 0x1D),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk0, 0x1E),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk1, 0x1F),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk2, 0x20),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk3, 0x21),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk0, 0x26),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk1, 0x27),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk2, 0x28),
+ ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk3, 0x29),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_refresh, 0x2A),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_crit_ref, 0x2B),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_load_mode, 0x2D),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqcl, 0x2E),
+ ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_rd, 0x30),
+ ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_wr, 0x31),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mpc, 0x34),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mrr, 0x35),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_tcr_mrr, 0x36),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqstart, 0x37),
+ ALI_DRW_PMU_EVENT_ATTR(op_is_zqlatch, 0x38),
+ ALI_DRW_PMU_EVENT_ATTR(chi_txreq, 0x39),
+ ALI_DRW_PMU_EVENT_ATTR(chi_txdat, 0x3A),
+ ALI_DRW_PMU_EVENT_ATTR(chi_rxdat, 0x3B),
+ ALI_DRW_PMU_EVENT_ATTR(chi_rxrsp, 0x3C),
+ ALI_DRW_PMU_EVENT_ATTR(tsz_vio, 0x3D),
+ ALI_DRW_PMU_EVENT_ATTR(cycle, 0x80),
+ NULL,
+};
+
+static struct attribute_group ali_drw_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ali_drw_pmu_events_attrs,
+};
+
+static struct attribute *ali_drw_pmu_format_attr[] = {
+ ALI_DRW_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group ali_drw_pmu_format_group = {
+ .name = "format",
+ .attrs = ali_drw_pmu_format_attr,
+};
+
+static ssize_t ali_drw_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(drw_pmu->cpu));
+}
+
+static struct device_attribute ali_drw_pmu_cpumask_attr =
+ __ATTR(cpumask, 0444, ali_drw_pmu_cpumask_show, NULL);
+
+static struct attribute *ali_drw_pmu_cpumask_attrs[] = {
+ &ali_drw_pmu_cpumask_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
+ .attrs = ali_drw_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *ali_drw_pmu_attr_groups[] = {
+ &ali_drw_pmu_events_attr_group,
+ &ali_drw_pmu_cpumask_attr_group,
+ &ali_drw_pmu_format_group,
+ NULL,
+};
+
+/* find a counter for event, then in add func, hw.idx will equal to counter */
+static int ali_drw_get_counter_idx(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ int idx;
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; ++idx) {
+ if (!test_and_set_bit(idx, drw_pmu->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EBUSY;
+}
+
+static u64 ali_drw_pmu_read_counter(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ u64 cycle_high, cycle_low;
+
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
+ cycle_high = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_HIGH);
+ cycle_high &= ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK;
+ cycle_low = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_LOW);
+ cycle_low &= ALI_DRW_PMU_CYCLE_CNT_LOW_MASK;
+ return (cycle_high << 32 | cycle_low);
+ }
+
+ return readl(drw_pmu->cfg_base +
+ ALI_DRW_PMU_COMMON_COUNTERn(event->hw.idx));
+}
+
+static void ali_drw_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev, now;
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ now = ali_drw_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ /* handle overflow. */
+ delta = now - prev;
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID)
+ delta &= ALI_DRW_PMU_OV_INTR_MASK;
+ else
+ delta &= ALI_DRW_CNT_MAX_PERIOD;
+ local64_add(delta, &event->count);
+}
+
+static void ali_drw_pmu_event_set_period(struct perf_event *event)
+{
+ u64 pre_val;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ /* set a preload counter for test purpose */
+ writel(ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE + event->hw.idx,
+ drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
+
+ /* set conunter initial value */
+ pre_val = ALI_DRW_PMU_CNT_INIT;
+ writel(pre_val, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
+ local64_set(&event->hw.prev_count, pre_val);
+
+ /* set sel mode to zero to start test */
+ writel(0x0, drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
+}
+
+static void ali_drw_pmu_enable_counter(struct perf_event *event)
+{
+ u32 val, subval, reg, shift;
+ int counter = event->hw.idx;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ reg = ALI_DRW_PMU_EVENT_SELn(counter);
+ val = readl(drw_pmu->cfg_base + reg);
+ subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 1) |
+ FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, drw_pmu->evtids[counter]);
+
+ shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
+ val &= ~(GENMASK(7, 0) << shift);
+ val |= subval << shift;
+
+ writel(val, drw_pmu->cfg_base + reg);
+}
+
+static void ali_drw_pmu_disable_counter(struct perf_event *event)
+{
+ u32 val, reg, subval, shift;
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ int counter = event->hw.idx;
+
+ reg = ALI_DRW_PMU_EVENT_SELn(counter);
+ val = readl(drw_pmu->cfg_base + reg);
+ subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 0) |
+ FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, 0);
+
+ shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
+ val &= ~(GENMASK(7, 0) << shift);
+ val |= subval << shift;
+
+ writel(val, drw_pmu->cfg_base + reg);
+}
+
+static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
+{
+ struct ali_drw_pmu_irq *irq = data;
+ struct ali_drw_pmu *drw_pmu;
+ irqreturn_t ret = IRQ_NONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(drw_pmu, &irq->pmus_node, pmus_node) {
+ unsigned long status, clr_status;
+ struct perf_event *event;
+ unsigned int idx;
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
+ event = drw_pmu->events[idx];
+ if (!event)
+ continue;
+ ali_drw_pmu_disable_counter(event);
+ }
+
+ /* common counter intr status */
+ status = readl(drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_STATUS);
+ status = FIELD_GET(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
+ if (status) {
+ for_each_set_bit(idx, &status,
+ ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
+ event = drw_pmu->events[idx];
+ if (WARN_ON_ONCE(!event))
+ continue;
+ ali_drw_pmu_event_update(event);
+ ali_drw_pmu_event_set_period(event);
+ }
+
+ /* clear common counter intr status */
+ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
+ writel(clr_status,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
+ }
+
+ for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
+ event = drw_pmu->events[idx];
+ if (!event)
+ continue;
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ ali_drw_pmu_enable_counter(event);
+ }
+ if (status)
+ ret = IRQ_HANDLED;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct ali_drw_pmu_irq *__ali_drw_pmu_init_irq(struct platform_device
+ *pdev, int irq_num)
+{
+ int ret;
+ struct ali_drw_pmu_irq *irq;
+
+ list_for_each_entry(irq, &ali_drw_pmu_irqs, irqs_node) {
+ if (irq->irq_num == irq_num
+ && refcount_inc_not_zero(&irq->refcount))
+ return irq;
+ }
+
+ irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&irq->pmus_node);
+
+ /* Pick one CPU to be the preferred one to use */
+ irq->cpu = smp_processor_id();
+ refcount_set(&irq->refcount, 1);
+
+ /*
+ * FIXME: one of DDRSS Driveway PMU overflow interrupt shares the same
+ * irq number with MPAM ERR_IRQ. To register DDRSS PMU and MPAM drivers
+ * successfully, add IRQF_SHARED flag. Howerer, PMU interrupt should not
+ * share with other component.
+ */
+ ret = devm_request_irq(&pdev->dev, irq_num, ali_drw_pmu_isr,
+ IRQF_SHARED, dev_name(&pdev->dev), irq);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq_num, ret);
+ goto out_free;
+ }
+
+ ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
+ if (ret)
+ goto out_free;
+
+ ret = cpuhp_state_add_instance_nocalls(ali_drw_cpuhp_state_num,
+ &irq->node);
+ if (ret)
+ goto out_free;
+
+ irq->irq_num = irq_num;
+ list_add(&irq->irqs_node, &ali_drw_pmu_irqs);
+
+ return irq;
+
+out_free:
+ kfree(irq);
+ return ERR_PTR(ret);
+}
+
+static int ali_drw_pmu_init_irq(struct ali_drw_pmu *drw_pmu,
+ struct platform_device *pdev)
+{
+ int irq_num;
+ struct ali_drw_pmu_irq *irq;
+
+ /* Read and init IRQ */
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0)
+ return irq_num;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ irq = __ali_drw_pmu_init_irq(pdev, irq_num);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ drw_pmu->irq = irq;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_add_rcu(&drw_pmu->pmus_node, &irq->pmus_node);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ return 0;
+}
+
+static void ali_drw_pmu_uninit_irq(struct ali_drw_pmu *drw_pmu)
+{
+ struct ali_drw_pmu_irq *irq = drw_pmu->irq;
+
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_del_rcu(&drw_pmu->pmus_node);
+
+ if (!refcount_dec_and_test(&irq->refcount)) {
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+ return;
+ }
+
+ list_del(&irq->irqs_node);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
+ cpuhp_state_remove_instance_nocalls(ali_drw_cpuhp_state_num,
+ &irq->node);
+ kfree(irq);
+}
+
+static int ali_drw_pmu_event_init(struct perf_event *event)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+ struct device *dev = drw_pmu->pmu.dev;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event)) {
+ dev_err(dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->attach_state & PERF_ATTACH_TASK) {
+ dev_err(dev, "Per-task counter cannot allocate!\n");
+ return -EOPNOTSUPP;
+ }
+
+ event->cpu = drw_pmu->cpu;
+ if (event->cpu < 0) {
+ dev_err(dev, "Per-task mode not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->group_leader != event &&
+ !is_software_event(event->group_leader)) {
+ dev_err(dev, "driveway only allow one event!\n");
+ return -EINVAL;
+ }
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling != event && !is_software_event(sibling)) {
+ dev_err(dev, "driveway event not allowed!\n");
+ return -EINVAL;
+ }
+ }
+
+ /* reset all the pmu counters */
+ writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ hwc->idx = -1;
+
+ return 0;
+}
+
+static void ali_drw_pmu_start(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ event->hw.state = 0;
+
+ if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
+ writel(ALI_DRW_PMU_CNT_START,
+ drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+ return;
+ }
+
+ ali_drw_pmu_event_set_period(event);
+ if (flags & PERF_EF_RELOAD) {
+ unsigned long prev_raw_count =
+ local64_read(&event->hw.prev_count);
+ writel(prev_raw_count,
+ drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
+ }
+
+ ali_drw_pmu_enable_counter(event);
+
+ writel(ALI_DRW_PMU_CNT_START, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+}
+
+static void ali_drw_pmu_stop(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ if (GET_DRW_EVENTID(event) != ALI_DRW_PMU_CYCLE_EVT_ID)
+ ali_drw_pmu_disable_counter(event);
+
+ writel(ALI_DRW_PMU_CNT_STOP, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ ali_drw_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int ali_drw_pmu_add(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = -1;
+ int evtid;
+
+ evtid = GET_DRW_EVENTID(event);
+
+ if (evtid != ALI_DRW_PMU_CYCLE_EVT_ID) {
+ idx = ali_drw_get_counter_idx(event);
+ if (idx < 0)
+ return idx;
+ drw_pmu->events[idx] = event;
+ drw_pmu->evtids[idx] = evtid;
+ }
+ hwc->idx = idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ ali_drw_pmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void ali_drw_pmu_del(struct perf_event *event, int flags)
+{
+ struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ ali_drw_pmu_stop(event, PERF_EF_UPDATE);
+
+ if (idx >= 0 && idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
+ drw_pmu->events[idx] = NULL;
+ drw_pmu->evtids[idx] = 0;
+ clear_bit(idx, drw_pmu->used_mask);
+ }
+
+ perf_event_update_userpage(event);
+}
+
+static void ali_drw_pmu_read(struct perf_event *event)
+{
+ ali_drw_pmu_event_update(event);
+}
+
+static int ali_drw_pmu_probe(struct platform_device *pdev)
+{
+ struct ali_drw_pmu *drw_pmu;
+ struct resource *res;
+ char *name;
+ int ret;
+
+ drw_pmu = devm_kzalloc(&pdev->dev, sizeof(*drw_pmu), GFP_KERNEL);
+ if (!drw_pmu)
+ return -ENOMEM;
+
+ drw_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drw_pmu);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!drw_pmu->cfg_base)
+ return -ENOMEM;
+
+ name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
+ (u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
+ if (!name)
+ return -ENOMEM;
+
+ writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
+
+ /* enable the generation of interrupt by all common counters */
+ writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_ENABLE_CTL);
+
+ /* clearing interrupt status */
+ writel(0xffffff, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
+
+ drw_pmu->cpu = smp_processor_id();
+
+ ret = ali_drw_pmu_init_irq(drw_pmu, pdev);
+ if (ret)
+ return ret;
+
+ drw_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = ali_drw_pmu_event_init,
+ .add = ali_drw_pmu_add,
+ .del = ali_drw_pmu_del,
+ .start = ali_drw_pmu_start,
+ .stop = ali_drw_pmu_stop,
+ .read = ali_drw_pmu_read,
+ .attr_groups = ali_drw_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ ret = perf_pmu_register(&drw_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(drw_pmu->dev, "DRW Driveway PMU PMU register failed!\n");
+ ali_drw_pmu_uninit_irq(drw_pmu);
+ }
+
+ return ret;
+}
+
+static int ali_drw_pmu_remove(struct platform_device *pdev)
+{
+ struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
+
+ /* disable the generation of interrupt by all common counters */
+ writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
+ drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_DISABLE_CTL);
+
+ ali_drw_pmu_uninit_irq(drw_pmu);
+ perf_pmu_unregister(&drw_pmu->pmu);
+
+ return 0;
+}
+
+static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct ali_drw_pmu_irq *irq;
+ struct ali_drw_pmu *drw_pmu;
+ unsigned int target;
+ int ret;
+ cpumask_t node_online_cpus;
+
+ irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
+ if (cpu != irq->cpu)
+ return 0;
+
+ ret = cpumask_and(&node_online_cpus,
+ cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
+ if (ret)
+ target = cpumask_any_but(&node_online_cpus, cpu);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ /* We're only reading, but this isn't the place to be involving RCU */
+ mutex_lock(&ali_drw_pmu_irqs_lock);
+ list_for_each_entry(drw_pmu, &irq->pmus_node, pmus_node)
+ perf_pmu_migrate_context(&drw_pmu->pmu, irq->cpu, target);
+ mutex_unlock(&ali_drw_pmu_irqs_lock);
+
+ WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
+ irq->cpu = target;
+
+ return 0;
+}
+
+/*
+ * Due to historical reasons, the HID used in the production environment is
+ * ARMHD700, so we leave ARMHD700 as Compatible ID.
+ */
+static const struct acpi_device_id ali_drw_acpi_match[] = {
+ {"BABA5000", 0},
+ {"ARMHD700", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ali_drw_acpi_match);
+
+static struct platform_driver ali_drw_pmu_driver = {
+ .driver = {
+ .name = "ali_drw_pmu",
+ .acpi_match_table = ali_drw_acpi_match,
+ },
+ .probe = ali_drw_pmu_probe,
+ .remove = ali_drw_pmu_remove,
+};
+
+static int __init ali_drw_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "ali_drw_pmu:online",
+ NULL, ali_drw_pmu_offline_cpu);
+
+ if (ret < 0) {
+ pr_err("DRW Driveway PMU: setup hotplug failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ ali_drw_cpuhp_state_num = ret;
+
+ ret = platform_driver_register(&ali_drw_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
+
+ return ret;
+}
+
+static void __exit ali_drw_pmu_exit(void)
+{
+ platform_driver_unregister(&ali_drw_pmu_driver);
+ cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
+}
+
+module_init(ali_drw_pmu_init);
+module_exit(ali_drw_pmu_exit);
+
+MODULE_AUTHOR("Hongbo Yao <yaohongbo@linux.alibaba.com>");
+MODULE_AUTHOR("Neng Chen <nengchen@linux.alibaba.com>");
+MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba DDR Sub-System Driveway PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 80d8309652a4..b80a9b74662b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -36,7 +36,7 @@
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
-#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
+#define CMN_CHILD_NODE_ADDR GENMASK(29, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
#define CMN_MAX_DIMENSION 12
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index a36698a90d2f..4a15c86f45ef 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -639,6 +639,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
{
#ifdef CONFIG_ACPI
+ struct acpi_device *parent_adev = acpi_dev_parent(ACPI_COMPANION(dev));
int cpu;
/*
@@ -653,8 +654,7 @@ static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask)
continue;
acpi_dev = ACPI_COMPANION(cpu_dev);
- if (acpi_dev &&
- acpi_dev->parent == ACPI_COMPANION(dev)->parent)
+ if (acpi_dev && acpi_dev_parent(acpi_dev) == parent_adev)
cpumask_set_cpu(cpu, mask);
}
#endif
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 59d3980b8ca2..3f07df5a7e95 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -894,7 +894,7 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
* pmu::filter_match callback and pmu::event_init group
* validation).
*/
- .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
+ .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
};
pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 513de1f54e2d..933b96e243b8 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b65a7d9640e1..00e3a637f7b6 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -44,7 +44,9 @@
* This allows us to perform the check, i.e, perfmon_capable(),
* in the context of the event owner, once, during the event_init().
*/
-#define SPE_PMU_HW_FLAGS_CX BIT(0)
+#define SPE_PMU_HW_FLAGS_CX 0x00001
+
+static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_CX);
static void set_spe_event_has_cx(struct perf_event *event)
{
@@ -674,9 +676,9 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
- case ID_AA64DFR0_PMSVER_8_2:
+ case ID_AA64DFR0_EL1_PMSVer_IMP:
return SYS_PMSEVFR_EL1_RES0_8_2;
- case ID_AA64DFR0_PMSVER_8_3:
+ case ID_AA64DFR0_EL1_PMSVer_V1P1:
/* Return the highest version we support in default */
default:
return SYS_PMSEVFR_EL1_RES0_8_3;
@@ -958,7 +960,7 @@ static void __arm_spe_pmu_dev_probe(void *info)
struct device *dev = &spe_pmu->pdev->dev;
fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
- ID_AA64DFR0_PMSVER_SHIFT);
+ ID_AA64DFR0_EL1_PMSVer_SHIFT);
if (!fld) {
dev_err(dev,
"unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 30234c261b05..aaca6db7d8f6 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -840,16 +840,16 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev->parent);
struct platform_device *sdev = to_platform_device(dev);
- struct acpi_device *adev = ACPI_COMPANION(dev);
struct l2cache_pmu *l2cache_pmu = data;
struct cluster_pmu *cluster;
- unsigned long fw_cluster_id;
+ u64 fw_cluster_id;
int err;
int irq;
- if (!adev || kstrtoul(adev->pnp.unique_id, 10, &fw_cluster_id) < 0) {
+ err = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &fw_cluster_id);
+ if (err) {
dev_err(&pdev->dev, "unable to read ACPI uid\n");
- return -ENODEV;
+ return err;
}
cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
@@ -879,7 +879,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
}
dev_info(&pdev->dev,
- "Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
+ "Registered L2 cache PMU cluster %lld\n", fw_cluster_id);
spin_lock_init(&cluster->pmu_lock);
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 1ff2ff6582bf..346311a05460 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -742,7 +742,8 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
- acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id);
+ acpi_dev_parent(acpi_dev)->pnp.unique_id,
+ acpi_dev->pnp.unique_id);
if (!l3pmu || !name)
return -ENOMEM;
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 2c20b0de8cb0..ca9e20bfc7ac 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -14,7 +14,6 @@
#define RISCV_PMU_LEGACY_CYCLE 0
#define RISCV_PMU_LEGACY_INSTRET 1
-#define RISCV_PMU_LEGACY_NUM_CTR 2
static bool pmu_init_done;
@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
{
pr_info("Legacy PMU implementation is available\n");
- pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
+ pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
+ BIT(RISCV_PMU_LEGACY_INSTRET);
pmu->ctr_start = pmu_legacy_ctr_start;
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 6f6681bbfd36..15e5a47be7d5 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -18,6 +18,7 @@
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/cpu_pm.h>
+#include <linux/sched/clock.h>
#include <asm/sbi.h>
#include <asm/hwcap.h>
@@ -271,7 +272,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct sbiret ret;
int idx;
uint64_t cbase = 0;
- uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
unsigned long cflags = 0;
if (event->attr.exclude_kernel)
@@ -281,11 +281,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
- cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+ rvpmu->cmask, cflags, hwc->event_base, hwc->config,
+ hwc->config >> 32);
#else
- ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
- cflags, hwc->event_base, hwc->config, 0);
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+ rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -294,7 +295,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
}
idx = ret.value;
- if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
+ if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
return -ENOENT;
/* Additional sanity check for the counter id */
@@ -463,7 +464,7 @@ static int pmu_sbi_find_num_ctrs(void)
return sbi_err_map_linux_errno(ret.error);
}
-static int pmu_sbi_get_ctrinfo(int nctr)
+static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
{
struct sbiret ret;
int i, num_hw_ctr = 0, num_fw_ctr = 0;
@@ -473,11 +474,14 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (!pmu_ctr_list)
return -ENOMEM;
- for (i = 0; i <= nctr; i++) {
+ for (i = 0; i < nctr; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
continue;
+
+ *mask |= BIT(i);
+
cinfo.value = ret.value;
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
num_fw_ctr++;
@@ -498,7 +502,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
- 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
+ 0, pmu->cmask, 0, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -567,6 +571,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
unsigned long overflow;
unsigned long overflowed_ctrs = 0;
struct cpu_hw_events *cpu_hw_evt = dev;
+ u64 start_clock = sched_clock();
if (WARN_ON_ONCE(!cpu_hw_evt))
return IRQ_NONE;
@@ -635,7 +640,9 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
perf_event_overflow(event, &data, regs);
}
}
+
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
+ perf_sample_event_took(sched_clock() - start_clock);
return IRQ_HANDLED;
}
@@ -788,8 +795,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
- int num_counters;
+ unsigned long cmask = 0;
int ret = -ENODEV;
+ int num_counters;
pr_info("SBI PMU extension is available\n");
pmu = riscv_pmu_alloc();
@@ -803,7 +811,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
/* cache all the information about counters now */
- if (pmu_sbi_get_ctrinfo(num_counters))
+ if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
goto out_free;
ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -812,8 +820,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
}
+
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
- pmu->num_counters = num_counters;
+ pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
pmu->event_map = pmu_sbi_event_map;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 300b0f2b5f84..7bd00a11d074 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -91,6 +91,7 @@ source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+source "drivers/phy/sunplus/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
source "drivers/phy/intel/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 01e9efffc726..54f312c10a40 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -31,6 +31,7 @@ obj-y += allwinner/ \
samsung/ \
socionext/ \
st/ \
+ sunplus/ \
tegra/ \
ti/ \
xilinx/
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index d5f3b42eb8ce..3a3831f6059a 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -768,7 +768,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (data->cfg->dedicated_clocks)
snprintf(name, sizeof(name), "usb%d_phy", i);
else
- strlcpy(name, "usb_phy", sizeof(name));
+ strscpy(name, "usb_phy", sizeof(name));
phy->clk = devm_clk_get(dev, name);
if (IS_ERR(phy->clk)) {
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index 1027ece6ca12..a3e1108b736d 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -197,7 +197,7 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
struct phy_provider *phy;
struct device *dev = &pdev->dev;
struct phy_axg_mipi_pcie_analog_priv *priv;
- struct device_node *np = dev->of_node;
+ struct device_node *np = dev->of_node, *parent_np;
struct regmap *map;
int ret;
@@ -206,7 +206,9 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev)
return -ENOMEM;
/* Get the hhi system controller node */
- map = syscon_node_to_regmap(of_get_parent(dev->of_node));
+ parent_np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(map)) {
dev_err(dev,
"failed to get HHI regmap\n");
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 54d65a6f0fcc..d2a1da8d9e58 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -388,7 +388,6 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
struct phy_g12a_usb3_pcie_priv *priv;
struct phy_provider *phy_provider;
void __iomem *base;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -408,43 +407,24 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap_cr))
return PTR_ERR(priv->regmap_cr);
- priv->clk_ref = devm_clk_get(dev, "ref_clk");
+ priv->clk_ref = devm_clk_get_enabled(dev, "ref_clk");
if (IS_ERR(priv->clk_ref))
return PTR_ERR(priv->clk_ref);
- ret = clk_prepare_enable(priv->clk_ref);
- if (ret)
- return ret;
-
priv->reset = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(priv->reset)) {
- ret = PTR_ERR(priv->reset);
- goto err_disable_clk_ref;
- }
+ if (IS_ERR(priv->reset))
+ return PTR_ERR(priv->reset);
priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
- if (IS_ERR(priv->phy)) {
- ret = PTR_ERR(priv->phy);
- dev_err_probe(dev, ret, "failed to create PHY\n");
- goto err_disable_clk_ref;
- }
+ if (IS_ERR(priv->phy))
+ return dev_err_probe(dev, PTR_ERR(priv->phy), "failed to create PHY\n");
phy_set_drvdata(priv->phy, priv);
dev_set_drvdata(dev, priv);
phy_provider = devm_of_phy_provider_register(dev,
phy_g12a_usb3_pcie_xlate);
- if (IS_ERR(phy_provider)) {
- ret = PTR_ERR(phy_provider);
- goto err_disable_clk_ref;
- }
-
- return 0;
-
-err_disable_clk_ref:
- clk_disable_unprepare(priv->clk_ref);
-
- return ret;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
static const struct of_device_id phy_g12a_usb3_pcie_of_match[] = {
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 93a6a8ee4716..1d89a2fd9b79 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -93,11 +93,11 @@ config PHY_BRCM_SATA
config PHY_BRCM_USB
tristate "Broadcom STB USB PHY driver"
- depends on ARCH_BCM4908 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908 || ARCH_BRCMSTB
+ default ARCH_BCMBCA || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
diff --git a/drivers/phy/intel/phy-intel-lgm-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c
index 157683d10367..6010e246d52e 100644
--- a/drivers/phy/intel/phy-intel-lgm-combo.c
+++ b/drivers/phy/intel/phy-intel-lgm-combo.c
@@ -413,44 +413,29 @@ static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
u32 val;
cbphy->core_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(cbphy->core_clk)) {
- ret = PTR_ERR(cbphy->core_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get clk failed:%d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->core_clk))
+ return dev_err_probe(dev, PTR_ERR(cbphy->core_clk),
+ "Get clk failed!\n");
cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
- if (IS_ERR(cbphy->core_rst)) {
- ret = PTR_ERR(cbphy->core_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get core reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->core_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->core_rst),
+ "Get core reset control err!\n");
cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(cbphy->phy_rst)) {
- ret = PTR_ERR(cbphy->phy_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get PHY reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->phy_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->phy_rst),
+ "Get PHY reset control err!\n");
cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
- if (IS_ERR(cbphy->iphy[0].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[0].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->iphy[0].app_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->iphy[0].app_rst),
+ "Get phy0 reset control err!\n");
cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
- if (IS_ERR(cbphy->iphy[1].app_rst)) {
- ret = PTR_ERR(cbphy->iphy[1].app_rst);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
- return ret;
- }
+ if (IS_ERR(cbphy->iphy[1].app_rst))
+ return dev_err_probe(dev, PTR_ERR(cbphy->iphy[1].app_rst),
+ "Get phy1 reset control err!\n");
cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(cbphy->app_base))
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index a4d7d9bd100d..67712c77d806 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -274,7 +274,6 @@ struct mvebu_a3700_comphy_lane {
int submode;
bool invert_tx;
bool invert_rx;
- bool needs_reset;
};
struct gbe_phy_init_data_fix {
@@ -1097,40 +1096,12 @@ mvebu_a3700_comphy_pcie_power_off(struct mvebu_a3700_comphy_lane *lane)
0x0, PU_PLL_BIT | PU_RX_BIT | PU_TX_BIT);
}
-static int mvebu_a3700_comphy_reset(struct phy *phy)
+static void mvebu_a3700_comphy_usb3_power_off(struct mvebu_a3700_comphy_lane *lane)
{
- struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- u16 mask, data;
-
- dev_dbg(lane->dev, "resetting lane %d\n", lane->id);
-
- /* COMPHY reset for internal logic */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET,
- SFT_RST_NO_REG, SFT_RST_NO_REG);
-
- /* COMPHY register reset (cleared automatically) */
- comphy_lane_reg_set(lane, COMPHY_SFT_RESET, SFT_RST, SFT_RST);
-
- /* PIPE soft and register reset */
- data = PIPE_SOFT_RESET | PIPE_REG_RESET;
- mask = data;
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL, data, mask);
-
- /* Release PIPE register reset */
- comphy_lane_reg_set(lane, COMPHY_PIPE_RST_CLK_CTRL,
- 0x0, PIPE_REG_RESET);
-
- /* Reset SB configuration register (only for lanes 0 and 1) */
- if (lane->id == 0 || lane->id == 1) {
- u32 mask, data;
-
- data = PIN_RESET_CORE_BIT | PIN_RESET_COMPHY_BIT |
- PIN_PU_PLL_BIT | PIN_PU_RX_BIT | PIN_PU_TX_BIT;
- mask = data | PIN_PU_IVREF_BIT | PIN_TX_IDLE_BIT;
- comphy_periph_reg_set(lane, COMPHY_PHY_CFG1, data, mask);
- }
-
- return 0;
+ /*
+ * The USB3 MAC sets the USB3 PHY to low state, so we do not
+ * need to power off USB3 PHY again.
+ */
}
static bool mvebu_a3700_comphy_check_mode(int lane,
@@ -1171,10 +1142,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
(lane->mode != mode || lane->submode != submode))
return -EBUSY;
- /* If changing mode, ensure reset is called */
- if (lane->mode != PHY_MODE_INVALID && lane->mode != mode)
- lane->needs_reset = true;
-
/* Just remember the mode, ->power_on() will do the real setup */
lane->mode = mode;
lane->submode = submode;
@@ -1185,7 +1152,6 @@ static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode,
static int mvebu_a3700_comphy_power_on(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- int ret;
if (!mvebu_a3700_comphy_check_mode(lane->id, lane->mode,
lane->submode)) {
@@ -1193,14 +1159,6 @@ static int mvebu_a3700_comphy_power_on(struct phy *phy)
return -EINVAL;
}
- if (lane->needs_reset) {
- ret = mvebu_a3700_comphy_reset(phy);
- if (ret)
- return ret;
-
- lane->needs_reset = false;
- }
-
switch (lane->mode) {
case PHY_MODE_USB_HOST_SS:
dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id);
@@ -1224,38 +1182,28 @@ static int mvebu_a3700_comphy_power_off(struct phy *phy)
{
struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy);
- switch (lane->mode) {
- case PHY_MODE_USB_HOST_SS:
- /*
- * The USB3 MAC sets the USB3 PHY to low state, so we do not
- * need to power off USB3 PHY again.
- */
- break;
-
- case PHY_MODE_SATA:
- mvebu_a3700_comphy_sata_power_off(lane);
- break;
-
- case PHY_MODE_ETHERNET:
+ switch (lane->id) {
+ case 0:
+ mvebu_a3700_comphy_usb3_power_off(lane);
mvebu_a3700_comphy_ethernet_power_off(lane);
- break;
-
- case PHY_MODE_PCIE:
+ return 0;
+ case 1:
mvebu_a3700_comphy_pcie_power_off(lane);
- break;
-
+ mvebu_a3700_comphy_ethernet_power_off(lane);
+ return 0;
+ case 2:
+ mvebu_a3700_comphy_usb3_power_off(lane);
+ mvebu_a3700_comphy_sata_power_off(lane);
+ return 0;
default:
dev_err(lane->dev, "invalid COMPHY mode\n");
return -EINVAL;
}
-
- return 0;
}
static const struct phy_ops mvebu_a3700_comphy_ops = {
.power_on = mvebu_a3700_comphy_power_on,
.power_off = mvebu_a3700_comphy_power_off,
- .reset = mvebu_a3700_comphy_reset,
.set_mode = mvebu_a3700_comphy_set_mode,
.owner = THIS_MODULE,
};
@@ -1393,8 +1341,7 @@ static int mvebu_a3700_comphy_probe(struct platform_device *pdev)
* To avoid relying on the bootloader/firmware configuration,
* power off all comphys.
*/
- mvebu_a3700_comphy_reset(phy);
- lane->needs_reset = false;
+ mvebu_a3700_comphy_power_off(phy);
}
provider = devm_of_phy_provider_register(&pdev->dev,
diff --git a/drivers/phy/mediatek/phy-mtk-dp.c b/drivers/phy/mediatek/phy-mtk-dp.c
index 31266e7ca324..232fd3f1ff1b 100644
--- a/drivers/phy/mediatek/phy-mtk-dp.c
+++ b/drivers/phy/mediatek/phy-mtk-dp.c
@@ -85,7 +85,7 @@ struct mtk_dp_phy {
static int mtk_dp_phy_init(struct phy *phy)
{
struct mtk_dp_phy *dp_phy = phy_get_drvdata(phy);
- u32 driving_params[] = {
+ static const u32 driving_params[] = {
DRIVING_PARAM_3_DEFAULT,
DRIVING_PARAM_4_DEFAULT,
DRIVING_PARAM_5_DEFAULT,
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
index b74c65a1762c..e51b2d13eab4 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
@@ -5,83 +5,66 @@
*/
#include "phy-mtk-hdmi.h"
+#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
-#define RG_HDMITX_DRV_IBIAS 0
-#define RG_HDMITX_DRV_IBIAS_MASK (0x3f << 0)
-#define RG_HDMITX_EN_SER 12
-#define RG_HDMITX_EN_SER_MASK (0x0f << 12)
-#define RG_HDMITX_EN_SLDO 16
-#define RG_HDMITX_EN_SLDO_MASK (0x0f << 16)
-#define RG_HDMITX_EN_PRED 20
-#define RG_HDMITX_EN_PRED_MASK (0x0f << 20)
-#define RG_HDMITX_EN_IMP 24
-#define RG_HDMITX_EN_IMP_MASK (0x0f << 24)
-#define RG_HDMITX_EN_DRV 28
-#define RG_HDMITX_EN_DRV_MASK (0x0f << 28)
+#define RG_HDMITX_DRV_IBIAS_MASK GENMASK(5, 0)
+#define RG_HDMITX_EN_SER_MASK GENMASK(15, 12)
+#define RG_HDMITX_EN_SLDO_MASK GENMASK(19, 16)
+#define RG_HDMITX_EN_PRED_MASK GENMASK(23, 20)
+#define RG_HDMITX_EN_IMP_MASK GENMASK(27, 24)
+#define RG_HDMITX_EN_DRV_MASK GENMASK(31, 28)
#define HDMI_CON1 0x04
-#define RG_HDMITX_PRED_IBIAS 18
-#define RG_HDMITX_PRED_IBIAS_MASK (0x0f << 18)
-#define RG_HDMITX_PRED_IMP (0x01 << 22)
-#define RG_HDMITX_DRV_IMP 26
-#define RG_HDMITX_DRV_IMP_MASK (0x3f << 26)
+#define RG_HDMITX_PRED_IBIAS_MASK GENMASK(21, 18)
+#define RG_HDMITX_PRED_IMP BIT(22)
+#define RG_HDMITX_DRV_IMP_MASK GENMASK(31, 26)
#define HDMI_CON2 0x08
-#define RG_HDMITX_EN_TX_CKLDO (0x01 << 0)
-#define RG_HDMITX_EN_TX_POSDIV (0x01 << 1)
-#define RG_HDMITX_TX_POSDIV 3
-#define RG_HDMITX_TX_POSDIV_MASK (0x03 << 3)
-#define RG_HDMITX_EN_MBIAS (0x01 << 6)
-#define RG_HDMITX_MBIAS_LPF_EN (0x01 << 7)
+#define RG_HDMITX_EN_TX_CKLDO BIT(0)
+#define RG_HDMITX_EN_TX_POSDIV BIT(1)
+#define RG_HDMITX_TX_POSDIV_MASK GENMASK(4, 3)
+#define RG_HDMITX_EN_MBIAS BIT(6)
+#define RG_HDMITX_MBIAS_LPF_EN BIT(7)
#define HDMI_CON4 0x10
-#define RG_HDMITX_RESERVE_MASK (0xffffffff << 0)
+#define RG_HDMITX_RESERVE_MASK GENMASK(31, 0)
#define HDMI_CON6 0x18
-#define RG_HTPLL_BR 0
-#define RG_HTPLL_BR_MASK (0x03 << 0)
-#define RG_HTPLL_BC 2
-#define RG_HTPLL_BC_MASK (0x03 << 2)
-#define RG_HTPLL_BP 4
-#define RG_HTPLL_BP_MASK (0x0f << 4)
-#define RG_HTPLL_IR 8
-#define RG_HTPLL_IR_MASK (0x0f << 8)
-#define RG_HTPLL_IC 12
-#define RG_HTPLL_IC_MASK (0x0f << 12)
-#define RG_HTPLL_POSDIV 16
-#define RG_HTPLL_POSDIV_MASK (0x03 << 16)
-#define RG_HTPLL_PREDIV 18
-#define RG_HTPLL_PREDIV_MASK (0x03 << 18)
-#define RG_HTPLL_FBKSEL 20
-#define RG_HTPLL_FBKSEL_MASK (0x03 << 20)
-#define RG_HTPLL_RLH_EN (0x01 << 22)
-#define RG_HTPLL_FBKDIV 24
-#define RG_HTPLL_FBKDIV_MASK (0x7f << 24)
-#define RG_HTPLL_EN (0x01 << 31)
+#define RG_HTPLL_BR_MASK GENMASK(1, 0)
+#define RG_HTPLL_BC_MASK GENMASK(3, 2)
+#define RG_HTPLL_BP_MASK GENMASK(7, 4)
+#define RG_HTPLL_IR_MASK GENMASK(11, 8)
+#define RG_HTPLL_IC_MASK GENMASK(15, 12)
+#define RG_HTPLL_POSDIV_MASK GENMASK(17, 16)
+#define RG_HTPLL_PREDIV_MASK GENMASK(19, 18)
+#define RG_HTPLL_FBKSEL_MASK GENMASK(21, 20)
+#define RG_HTPLL_RLH_EN BIT(22)
+#define RG_HTPLL_FBKDIV_MASK GENMASK(30, 24)
+#define RG_HTPLL_EN BIT(31)
#define HDMI_CON7 0x1c
-#define RG_HTPLL_AUTOK_EN (0x01 << 23)
-#define RG_HTPLL_DIVEN 28
-#define RG_HTPLL_DIVEN_MASK (0x07 << 28)
+#define RG_HTPLL_AUTOK_EN BIT(23)
+#define RG_HTPLL_DIVEN_MASK GENMASK(30, 28)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
return 0;
}
@@ -89,20 +72,21 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
@@ -116,6 +100,7 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
u32 pos_div;
if (rate <= 64000000)
@@ -125,37 +110,25 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
else
pos_div = 1;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
- RG_HTPLL_IC_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
- RG_HTPLL_IR_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
- RG_HDMITX_TX_POSDIV_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
- RG_HTPLL_FBKSEL_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
- RG_HTPLL_FBKDIV_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
- RG_HTPLL_DIVEN_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
- RG_HTPLL_BP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
- RG_HTPLL_BC_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
- RG_HTPLL_BR_MASK);
-
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
- RG_HDMITX_PRED_IBIAS_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
- RG_HDMITX_DRV_IMP_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
- RG_HDMITX_DRV_IBIAS_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IC_MASK, 0x1);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_IR_MASK, 0x1);
+ mtk_phy_update_field(base + HDMI_CON2, RG_HDMITX_TX_POSDIV_MASK, pos_div);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKSEL_MASK, 1);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_FBKDIV_MASK, 19);
+ mtk_phy_update_field(base + HDMI_CON7, RG_HTPLL_DIVEN_MASK, 0x2);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BP_MASK, 0xc);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BC_MASK, 0x2);
+ mtk_phy_update_field(base + HDMI_CON6, RG_HTPLL_BR_MASK, 0x1);
+
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PRED_IMP);
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PRED_IBIAS_MASK, 0x3);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_DRV_IMP_MASK, 0x28);
+ mtk_phy_update_field(base + HDMI_CON4, RG_HDMITX_RESERVE_MASK, 0x28);
+ mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_DRV_IBIAS_MASK, 0xa);
return 0;
}
@@ -164,9 +137,10 @@ static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
unsigned long out_rate, val;
+ u32 tmp;
- val = (readl(hdmi_phy->regs + HDMI_CON6)
- & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+ tmp = readl(hdmi_phy->regs + HDMI_CON6);
+ val = FIELD_GET(RG_HTPLL_PREDIV_MASK, tmp);
switch (val) {
case 0x00:
out_rate = parent_rate;
@@ -179,14 +153,14 @@ static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
break;
}
- val = (readl(hdmi_phy->regs + HDMI_CON6)
- & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+ val = FIELD_GET(RG_HTPLL_FBKDIV_MASK, tmp);
out_rate *= (val + 1) * 2;
- val = (readl(hdmi_phy->regs + HDMI_CON2)
- & RG_HDMITX_TX_POSDIV_MASK);
- out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
- if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+ tmp = readl(hdmi_phy->regs + HDMI_CON2);
+ val = FIELD_GET(RG_HDMITX_TX_POSDIV_MASK, tmp);
+ out_rate >>= val;
+
+ if (tmp & RG_HDMITX_EN_TX_POSDIV)
out_rate /= 5;
return out_rate;
@@ -202,37 +176,41 @@ static const struct clk_ops mtk_hdmi_phy_pll_ops = {
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ void __iomem *base = hdmi_phy->regs;
+
+ mtk_phy_set_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_set_bits(base + HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
usleep_range(80, 100);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_set_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
usleep_range(80, 100);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+ void __iomem *base = hdmi_phy->regs;
+
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_EN);
usleep_range(80, 100);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON2, RG_HDMITX_EN_MBIAS);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+ mtk_phy_clear_bits(base + HDMI_CON6, RG_HTPLL_RLH_EN);
+ mtk_phy_clear_bits(base + HDMI_CON7, RG_HTPLL_AUTOK_EN);
usleep_range(80, 100);
}
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
index 6cdfdf5a698a..d04758396046 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
@@ -5,121 +5,99 @@
*/
#include "phy-mtk-hdmi.h"
+#include "phy-mtk-io.h"
#define HDMI_CON0 0x00
#define RG_HDMITX_PLL_EN BIT(31)
-#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
-#define PLL_FBKDIV_SHIFT 24
-#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
-#define PLL_FBKSEL_SHIFT 22
-#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
-#define PREDIV_SHIFT 20
-#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
-#define POSDIV_SHIFT 18
-#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
-#define RG_HDMITX_PLL_IR (0xf << 12)
-#define PLL_IR_SHIFT 12
-#define RG_HDMITX_PLL_IC (0xf << 8)
-#define PLL_IC_SHIFT 8
-#define RG_HDMITX_PLL_BP (0xf << 4)
-#define PLL_BP_SHIFT 4
-#define RG_HDMITX_PLL_BR (0x3 << 2)
-#define PLL_BR_SHIFT 2
-#define RG_HDMITX_PLL_BC (0x3 << 0)
-#define PLL_BC_SHIFT 0
+#define RG_HDMITX_PLL_FBKDIV GENMASK(30, 24)
+#define RG_HDMITX_PLL_FBKSEL GENMASK(23, 22)
+#define RG_HDMITX_PLL_PREDIV GENMASK(21, 20)
+#define RG_HDMITX_PLL_POSDIV GENMASK(19, 18)
+#define RG_HDMITX_PLL_RST_DLY GENMASK(17, 16)
+#define RG_HDMITX_PLL_IR GENMASK(15, 12)
+#define RG_HDMITX_PLL_IC GENMASK(11, 8)
+#define RG_HDMITX_PLL_BP GENMASK(7, 4)
+#define RG_HDMITX_PLL_BR GENMASK(3, 2)
+#define RG_HDMITX_PLL_BC GENMASK(1, 0)
#define HDMI_CON1 0x04
-#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
-#define PLL_DIVEN_SHIFT 29
+#define RG_HDMITX_PLL_DIVEN GENMASK(31, 29)
#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
-#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
-#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_KF GENMASK(27, 26)
+#define RG_HDMITX_PLL_AUTOK_KS GENMASK(25, 24)
#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
-#define RG_HDMITX_PLL_BAND (0x3f << 16)
+#define RG_HDMITX_PLL_BAND GENMASK(21, 16)
#define RG_HDMITX_PLL_REF_SEL BIT(15)
#define RG_HDMITX_PLL_BIAS_EN BIT(14)
#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
-#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
-#define PLL_TXDIV_SHIFT 10
+#define RG_HDMITX_PLL_TXDIV GENMASK(11, 10)
#define RG_HDMITX_PLL_LVROD_EN BIT(9)
#define RG_HDMITX_PLL_MONVC_EN BIT(8)
#define RG_HDMITX_PLL_MONCK_EN BIT(7)
#define RG_HDMITX_PLL_MONREF_EN BIT(6)
#define RG_HDMITX_PLL_TST_EN BIT(5)
#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
-#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
+#define RG_HDMITX_PLL_TST_SEL GENMASK(3, 0)
#define HDMI_CON2 0x08
-#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_BAND GENMASK(14, 8)
#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
#define RG_HDMITX_EN_TX_CKLDO BIT(0)
#define HDMI_CON3 0x0c
-#define RG_HDMITX_SER_EN (0xf << 28)
-#define RG_HDMITX_PRD_EN (0xf << 24)
-#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
-#define RG_HDMITX_DRV_EN (0xf << 16)
-#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
-#define DRV_IMP_EN_SHIFT 12
+#define RG_HDMITX_SER_EN GENMASK(31, 28)
+#define RG_HDMITX_PRD_EN GENMASK(27, 24)
+#define RG_HDMITX_PRD_IMP_EN GENMASK(23, 20)
+#define RG_HDMITX_DRV_EN GENMASK(19, 16)
+#define RG_HDMITX_DRV_IMP_EN GENMASK(15, 12)
#define RG_HDMITX_MHLCK_FORCE BIT(10)
#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
#define RG_HDMITX_MHLCK_EN BIT(8)
-#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
+#define RG_HDMITX_SER_DIN_SEL GENMASK(7, 4)
#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
#define RG_HDMITX_SER_BIST_TOG BIT(2)
#define RG_HDMITX_SER_DIN_TOG BIT(1)
#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
#define HDMI_CON4 0x10
-#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
-#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
-#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
-#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
-#define PRD_IBIAS_CLK_SHIFT 24
-#define PRD_IBIAS_D2_SHIFT 16
-#define PRD_IBIAS_D1_SHIFT 8
-#define PRD_IBIAS_D0_SHIFT 0
+#define RG_HDMITX_PRD_IBIAS_CLK GENMASK(27, 24)
+#define RG_HDMITX_PRD_IBIAS_D2 GENMASK(19, 16)
+#define RG_HDMITX_PRD_IBIAS_D1 GENMASK(11, 8)
+#define RG_HDMITX_PRD_IBIAS_D0 GENMASK(3, 0)
#define HDMI_CON5 0x14
-#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
-#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
-#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
-#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
-#define DRV_IBIAS_CLK_SHIFT 24
-#define DRV_IBIAS_D2_SHIFT 16
-#define DRV_IBIAS_D1_SHIFT 8
-#define DRV_IBIAS_D0_SHIFT 0
+#define RG_HDMITX_DRV_IBIAS_CLK GENMASK(29, 24)
+#define RG_HDMITX_DRV_IBIAS_D2 GENMASK(21, 16)
+#define RG_HDMITX_DRV_IBIAS_D1 GENMASK(13, 8)
+#define RG_HDMITX_DRV_IBIAS_D0 GENMASK(5, 0)
#define HDMI_CON6 0x18
-#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
-#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
-#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
-#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
-#define DRV_IMP_CLK_SHIFT 24
-#define DRV_IMP_D2_SHIFT 16
-#define DRV_IMP_D1_SHIFT 8
-#define DRV_IMP_D0_SHIFT 0
+#define RG_HDMITX_DRV_IMP_CLK GENMASK(29, 24)
+#define RG_HDMITX_DRV_IMP_D2 GENMASK(21, 16)
+#define RG_HDMITX_DRV_IMP_D1 GENMASK(13, 8)
+#define RG_HDMITX_DRV_IMP_D0 GENMASK(5, 0)
#define HDMI_CON7 0x1c
-#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
-#define RG_HDMITX_SER_DIN (0x3ff << 16)
-#define RG_HDMITX_CHLDC_TST (0xf << 12)
-#define RG_HDMITX_CHLCK_TST (0xf << 8)
-#define RG_HDMITX_RESERVE (0xff << 0)
+#define RG_HDMITX_MHLCK_DRV_IBIAS GENMASK(31, 27)
+#define RG_HDMITX_SER_DIN GENMASK(25, 16)
+#define RG_HDMITX_CHLDC_TST GENMASK(15, 12)
+#define RG_HDMITX_CHLCK_TST GENMASK(11, 8)
+#define RG_HDMITX_RESERVE GENMASK(7, 0)
#define HDMI_CON8 0x20
-#define RGS_HDMITX_2T1_LEV (0xf << 16)
-#define RGS_HDMITX_2T1_EDG (0xf << 12)
-#define RGS_HDMITX_5T1_LEV (0xf << 8)
-#define RGS_HDMITX_5T1_EDG (0xf << 4)
+#define RGS_HDMITX_2T1_LEV GENMASK(19, 16)
+#define RGS_HDMITX_2T1_EDG GENMASK(15, 12)
+#define RGS_HDMITX_5T1_LEV GENMASK(11, 8)
+#define RGS_HDMITX_5T1_EDG GENMASK(7, 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_MHLCK_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_phy_set_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
return 0;
}
@@ -127,15 +105,16 @@ static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_EN);
usleep_range(100, 150);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+ mtk_phy_clear_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_clear_bits(base + HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
usleep_range(100, 150);
}
@@ -157,6 +136,7 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+ void __iomem *base = hdmi_phy->regs;
unsigned int pre_div;
unsigned int div;
unsigned int pre_ibias;
@@ -177,65 +157,57 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
div = 1;
}
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
- RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
- (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
- RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
- (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
- (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
- (0x1 << PLL_BR_SHIFT),
- RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
- RG_HDMITX_PLL_BR);
+ mtk_phy_update_field(base + HDMI_CON0, RG_HDMITX_PLL_PREDIV, pre_div);
+ mtk_phy_set_bits(base + HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR,
+ FIELD_PREP(RG_HDMITX_PLL_IC, 0x1) |
+ FIELD_PREP(RG_HDMITX_PLL_IR, 0x1));
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_TXDIV, div);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV,
+ FIELD_PREP(RG_HDMITX_PLL_FBKSEL, 0x1) |
+ FIELD_PREP(RG_HDMITX_PLL_FBKDIV, 19));
+ mtk_phy_update_field(base + HDMI_CON1, RG_HDMITX_PLL_DIVEN, 0x2);
+ mtk_phy_update_bits(base + HDMI_CON0,
+ RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+ RG_HDMITX_PLL_BR,
+ FIELD_PREP(RG_HDMITX_PLL_BP, 0xc) |
+ FIELD_PREP(RG_HDMITX_PLL_BC, 0x2) |
+ FIELD_PREP(RG_HDMITX_PLL_BR, 0x1));
if (rate < 165000000) {
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_PRD_IMP_EN);
+ mtk_phy_clear_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x3;
imp_en = 0x0;
hdmi_ibias = hdmi_phy->ibias;
} else {
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_PRD_IMP_EN);
+ mtk_phy_set_bits(base + HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
pre_ibias = 0x6;
imp_en = 0xf;
hdmi_ibias = hdmi_phy->ibias_up;
}
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
- (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
- (pre_ibias << PRD_IBIAS_D2_SHIFT) |
- (pre_ibias << PRD_IBIAS_D1_SHIFT) |
- (pre_ibias << PRD_IBIAS_D0_SHIFT),
- RG_HDMITX_PRD_IBIAS_CLK |
- RG_HDMITX_PRD_IBIAS_D2 |
- RG_HDMITX_PRD_IBIAS_D1 |
- RG_HDMITX_PRD_IBIAS_D0);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
- (imp_en << DRV_IMP_EN_SHIFT),
- RG_HDMITX_DRV_IMP_EN);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
- (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
- (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
- (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
- (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
- RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
- RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
- mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
- (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
- (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
- RG_HDMITX_DRV_IBIAS_CLK |
- RG_HDMITX_DRV_IBIAS_D2 |
- RG_HDMITX_DRV_IBIAS_D1 |
- RG_HDMITX_DRV_IBIAS_D0);
+ mtk_phy_update_bits(base + HDMI_CON4,
+ RG_HDMITX_PRD_IBIAS_CLK | RG_HDMITX_PRD_IBIAS_D2 |
+ RG_HDMITX_PRD_IBIAS_D1 | RG_HDMITX_PRD_IBIAS_D0,
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_CLK, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D2, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D1, pre_ibias) |
+ FIELD_PREP(RG_HDMITX_PRD_IBIAS_D0, pre_ibias));
+ mtk_phy_update_field(base + HDMI_CON3, RG_HDMITX_DRV_IMP_EN, imp_en);
+ mtk_phy_update_bits(base + HDMI_CON6,
+ RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+ RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0,
+ FIELD_PREP(RG_HDMITX_DRV_IMP_CLK, hdmi_phy->drv_imp_clk) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D2, hdmi_phy->drv_imp_d2) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D1, hdmi_phy->drv_imp_d1) |
+ FIELD_PREP(RG_HDMITX_DRV_IMP_D0, hdmi_phy->drv_imp_d0));
+ mtk_phy_update_bits(base + HDMI_CON5,
+ RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
+ RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0,
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_CLK, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D2, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D1, hdmi_ibias) |
+ FIELD_PREP(RG_HDMITX_DRV_IBIAS_D0, hdmi_ibias));
return 0;
}
@@ -257,17 +229,17 @@ static const struct clk_ops mtk_hdmi_phy_pll_ops = {
static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
- RG_HDMITX_DRV_EN);
+ mtk_phy_set_bits(hdmi_phy->regs + HDMI_CON3,
+ RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_DRV_EN);
usleep_range(100, 150);
}
static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
{
- mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
- RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
- RG_HDMITX_SER_EN);
+ mtk_phy_clear_bits(hdmi_phy->regs + HDMI_CON3,
+ RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+ RG_HDMITX_SER_EN);
}
struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
index d4bd419abc3c..b16d437d6721 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.c
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.c
@@ -15,39 +15,6 @@ static const struct phy_ops mtk_hdmi_phy_dev_ops = {
.owner = THIS_MODULE,
};
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp &= ~bits;
- writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp |= bits;
- writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 val, u32 mask)
-{
- void __iomem *reg = hdmi_phy->regs + offset;
- u32 tmp;
-
- tmp = readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- writel(tmp, reg);
-}
-
inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
{
return container_of(hw, struct mtk_hdmi_phy, pll_hw);
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index dcf9bb13699b..c7fa65cff989 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -42,12 +41,6 @@ struct mtk_hdmi_phy {
unsigned int ibias_up;
};
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits);
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 bits);
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
- u32 val, u32 mask);
struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
index 500fcdab165d..d20ad5e5be81 100644
--- a/drivers/phy/mediatek/phy-mtk-io.h
+++ b/drivers/phy/mediatek/phy-mtk-io.h
@@ -8,6 +8,7 @@
#ifndef __PHY_MTK_H__
#define __PHY_MTK_H__
+#include <linux/bitfield.h>
#include <linux/io.h>
static inline void mtk_phy_clear_bits(void __iomem *reg, u32 bits)
@@ -35,4 +36,11 @@ static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
writel(tmp, reg);
}
+/* field @mask shall be constant and continuous */
+#define mtk_phy_update_field(reg, mask, val) \
+({ \
+ typeof(mask) mask_ = (mask); \
+ mtk_phy_update_bits(reg, mask_, FIELD_PREP(mask_, val)); \
+})
+
#endif
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
index 7a847954594f..673cb0f08959 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8173.c
@@ -4,14 +4,15 @@
* Author: jitao.shi <jitao.shi@mediatek.com>
*/
+#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_DSI_CON 0x00
#define RG_DSI_LDOCORE_EN BIT(0)
#define RG_DSI_CKG_LDOOUT_EN BIT(1)
-#define RG_DSI_BCLK_SEL (3 << 2)
-#define RG_DSI_LD_IDX_SEL (7 << 4)
-#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_BCLK_SEL GENMASK(3, 2)
+#define RG_DSI_LD_IDX_SEL GENMASK(6, 4)
+#define RG_DSI_PHYCLK_SEL GENMASK(9, 8)
#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
#define RG_DSI_LPTX_CLMP_EN BIT(11)
@@ -27,41 +28,46 @@
#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
-#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+#define RG_DSI_LNTx_RT_CODE GENMASK(11, 8)
#define MIPITX_DSI_TOP_CON 0x40
#define RG_DSI_LNT_INTR_EN BIT(0)
#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
#define RG_DSI_LNT_TESTMODE_EN BIT(3)
-#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
-#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_LNT_IMP_CAL_CODE GENMASK(7, 4)
+#define RG_DSI_LNT_AIO_SEL GENMASK(10, 8)
#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
#define RG_DSI_DEBUG_INPUT_EN BIT(12)
-#define RG_DSI_PRESERVE (7 << 13)
+#define RG_DSI_PRESERVE GENMASK(15, 13)
#define MIPITX_DSI_BG_CON 0x44
#define RG_DSI_BG_CORE_EN BIT(0)
#define RG_DSI_BG_CKEN BIT(1)
-#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_DIV GENMASK(3, 2)
#define RG_DSI_BG_FAST_CHARGE BIT(4)
-#define RG_DSI_VOUT_MSK (0x3ffff << 5)
-#define RG_DSI_V12_SEL (7 << 5)
-#define RG_DSI_V10_SEL (7 << 8)
-#define RG_DSI_V072_SEL (7 << 11)
-#define RG_DSI_V04_SEL (7 << 14)
-#define RG_DSI_V032_SEL (7 << 17)
-#define RG_DSI_V02_SEL (7 << 20)
-#define RG_DSI_BG_R1_TRIM (0xf << 24)
-#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define RG_DSI_V12_SEL GENMASK(7, 5)
+#define RG_DSI_V10_SEL GENMASK(10, 8)
+#define RG_DSI_V072_SEL GENMASK(13, 11)
+#define RG_DSI_V04_SEL GENMASK(16, 14)
+#define RG_DSI_V032_SEL GENMASK(19, 17)
+#define RG_DSI_V02_SEL GENMASK(22, 20)
+#define RG_DSI_VOUT_MSK \
+ (RG_DSI_V12_SEL | RG_DSI_V10_SEL | RG_DSI_V072_SEL | \
+ RG_DSI_V04_SEL | RG_DSI_V032_SEL | RG_DSI_V02_SEL)
+#define RG_DSI_BG_R1_TRIM GENMASK(27, 24)
+#define RG_DSI_BG_R2_TRIM GENMASK(31, 28)
#define MIPITX_DSI_PLL_CON0 0x50
#define RG_DSI_MPPLL_PLL_EN BIT(0)
-#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
-#define RG_DSI_MPPLL_PREDIV (3 << 1)
-#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
-#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
-#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_PREDIV GENMASK(2, 1)
+#define RG_DSI_MPPLL_TXDIV0 GENMASK(4, 3)
+#define RG_DSI_MPPLL_TXDIV1 GENMASK(6, 5)
+#define RG_DSI_MPPLL_POSDIV GENMASK(9, 7)
+#define RG_DSI_MPPLL_DIV_MSK \
+ (RG_DSI_MPPLL_PREDIV | RG_DSI_MPPLL_TXDIV0 | \
+ RG_DSI_MPPLL_TXDIV1 | RG_DSI_MPPLL_POSDIV)
#define RG_DSI_MPPLL_MONVC_EN BIT(10)
#define RG_DSI_MPPLL_MONREF_EN BIT(11)
#define RG_DSI_MPPLL_VOD_EN BIT(12)
@@ -70,12 +76,12 @@
#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
-#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+#define RG_DSI_MPPLL_SDM_SSC_PRD GENMASK(31, 16)
#define MIPITX_DSI_PLL_CON2 0x58
#define MIPITX_DSI_PLL_TOP 0x64
-#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+#define RG_DSI_MPPLL_PRESERVE GENMASK(15, 8)
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
@@ -116,6 +122,7 @@
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
u8 txdiv, txdiv0, txdiv1;
u64 pcw;
@@ -145,34 +152,38 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
return -EINVAL;
}
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_VOUT_MSK |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
- (4 << 20) | (4 << 17) | (4 << 14) |
- (4 << 11) | (4 << 8) | (4 << 5) |
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK | RG_DSI_BG_CKEN |
+ RG_DSI_BG_CORE_EN,
+ FIELD_PREP(RG_DSI_V02_SEL, 4) |
+ FIELD_PREP(RG_DSI_V032_SEL, 4) |
+ FIELD_PREP(RG_DSI_V04_SEL, 4) |
+ FIELD_PREP(RG_DSI_V072_SEL, 4) |
+ FIELD_PREP(RG_DSI_V10_SEL, 4) |
+ FIELD_PREP(RG_DSI_V12_SEL, 4) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
usleep_range(30, 100);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
- (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ FIELD_PREP(RG_DSI_LNT_IMP_CAL_CODE, 8) |
+ RG_DSI_LNT_HS_BIAS_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_PWR_ON |
- RG_DSI_MPPLL_SDM_ISO_EN,
- RG_DSI_MPPLL_SDM_PWR_ON);
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON | RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
- RG_DSI_MPPLL_PREDIV,
- (txdiv0 << 3) | (txdiv1 << 5));
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ FIELD_PREP(RG_DSI_MPPLL_TXDIV0, txdiv0) |
+ FIELD_PREP(RG_DSI_MPPLL_TXDIV1, txdiv1));
/*
* PLL PCW config
@@ -182,23 +193,20 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
* Post DIV =4, so need data_Rate*4
* Ref_clk is 26MHz
*/
- pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
- 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, 26000000);
+ writel(pcw, base + MIPITX_DSI_PLL_CON2);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_FRA_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_FRA_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_set_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
usleep_range(20, 100);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
- RG_DSI_MPPLL_SDM_SSC_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_SSC_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE,
- mipi_tx->driver_data->mppll_preserve);
+ mtk_phy_update_field(base + MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
return 0;
}
@@ -206,31 +214,27 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
dev_dbg(mipi_tx->dev, "unprepare\n");
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
- RG_DSI_MPPLL_PRESERVE, 0);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_TOP, RG_DSI_MPPLL_PRESERVE);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
- RG_DSI_MPPLL_SDM_ISO_EN |
- RG_DSI_MPPLL_SDM_PWR_ON,
- RG_DSI_MPPLL_SDM_ISO_EN);
+ mtk_phy_update_bits(base + MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN | RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_LNT_HS_BIAS_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_TOP_CON, RG_DSI_LNT_HS_BIAS_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
- RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
- RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+ mtk_phy_clear_bits(base + MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
- RG_DSI_MPPLL_DIV_MSK);
+ mtk_phy_clear_bits(base + MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_DIV_MSK);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -254,10 +258,10 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+ mtk_phy_set_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
+ mtk_phy_clear_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
@@ -265,23 +269,23 @@ static void mtk_mipi_tx_power_off_signal(struct phy *phy)
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
u32 reg;
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
- RG_DSI_PAD_TIE_LOW_EN);
+ mtk_phy_set_bits(mipi_tx->regs + MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
- mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+ mtk_phy_clear_bits(mipi_tx->regs + reg, RG_DSI_LNTx_LDOOUT_EN);
}
const struct mtk_mipitx_data mt2701_mipitx_data = {
- .mppll_preserve = (3 << 8),
+ .mppll_preserve = 3,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
};
const struct mtk_mipitx_data mt8173_mipitx_data = {
- .mppll_preserve = (0 << 8),
+ .mppll_preserve = 0,
.mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops,
.mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal,
.mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal,
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
index 99108426d57c..f021ec5a70e5 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi-mt8183.c
@@ -4,6 +4,7 @@
* Author: jitao.shi <jitao.shi@mediatek.com>
*/
+#include "phy-mtk-io.h"
#include "phy-mtk-mipi-dsi.h"
#define MIPITX_LANE_CON 0x000c
@@ -18,7 +19,7 @@
#define RG_DSI_PAD_TIEL_SEL BIT(8)
#define MIPITX_VOLTAGE_SEL 0x0010
-#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6)
+#define RG_DSI_HSTX_LDO_REF_SEL GENMASK(9, 6)
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
@@ -26,7 +27,7 @@
#define MIPITX_PLL_CON2 0x0034
#define MIPITX_PLL_CON3 0x0038
#define MIPITX_PLL_CON4 0x003c
-#define RG_DSI_PLL_IBIAS (3 << 10)
+#define RG_DSI_PLL_IBIAS GENMASK(11, 10)
#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
@@ -41,11 +42,12 @@
#define AD_DSI_PLL_SDM_ISO_EN BIT(1)
#define RG_DSI_PLL_EN BIT(4)
-#define RG_DSI_PLL_POSDIV (0x7 << 8)
+#define RG_DSI_PLL_POSDIV GENMASK(10, 8)
static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
unsigned int txdiv, txdiv0;
u64 pcw;
@@ -70,17 +72,16 @@ static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
return -EINVAL;
}
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
udelay(1);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000);
- writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV,
- txdiv0 << 8);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ writel(pcw, base + MIPITX_PLL_CON0);
+ mtk_phy_update_field(base + MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, txdiv0);
+ mtk_phy_set_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
return 0;
}
@@ -88,11 +89,12 @@ static int mtk_mipi_tx_pll_enable(struct clk_hw *hw)
static void mtk_mipi_tx_pll_disable(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ void __iomem *base = mipi_tx->regs;
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_CON1, RG_DSI_PLL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
+ mtk_phy_set_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN);
+ mtk_phy_clear_bits(base + MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON);
}
static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -121,7 +123,7 @@ static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
mipi_tx->rt_code[i] |= 0x10 << 5;
for (j = 0; j < 10; j++)
- mtk_mipi_tx_update_bits(mipi_tx,
+ mtk_phy_update_bits(mipi_tx->regs +
MIPITX_D2P_RTCODE * (i + 1) + j * 4,
1, mipi_tx->rt_code[i] >> j & 1);
}
@@ -130,44 +132,42 @@ static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ void __iomem *base = mipi_tx->regs;
/* BG_LPF_EN / BG_CORE_EN */
- writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
usleep_range(30, 100);
- writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
+ writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, base + MIPITX_LANE_CON);
/* Switch OFF each Lane */
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_clear_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
- RG_DSI_HSTX_LDO_REF_SEL,
- (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+ mtk_phy_update_field(base + MIPITX_VOLTAGE_SEL, RG_DSI_HSTX_LDO_REF_SEL,
+ (mipi_tx->mipitx_drive - 3000) / 200);
mtk_mipi_tx_config_calibration_data(mipi_tx);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
+ mtk_phy_set_bits(base + MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ void __iomem *base = mipi_tx->regs;
/* Switch ON each Lane */
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
- mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
-
- writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN,
- mipi_tx->regs + MIPITX_LANE_CON);
- writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON);
+ mtk_phy_set_bits(base + MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_phy_set_bits(base + MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+
+ writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, base + MIPITX_LANE_CON);
+ writel(RG_DSI_PAD_TIEL_SEL, base + MIPITX_LANE_CON);
}
const struct mtk_mipitx_data mt8183_mipitx_data = {
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 28506932bd91..cf9c386385bb 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -10,30 +10,6 @@ inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
return container_of(hw, struct mtk_mipi_tx, pll_hw);
}
-void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel(temp & ~bits, mipi_tx->regs + offset);
-}
-
-void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 bits)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel(temp | bits, mipi_tx->regs + offset);
-}
-
-void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
- u32 mask, u32 data)
-{
- u32 temp = readl(mipi_tx->regs + offset);
-
- writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
-}
-
int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
index c76f07c3fdeb..47b60b1a7226 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
@@ -10,7 +10,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
@@ -37,10 +36,6 @@ struct mtk_mipi_tx {
};
struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
-void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
-void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits);
-void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask,
- u32 data);
int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c
index 7f29d43442bf..25dbd6e35722 100644
--- a/drivers/phy/mediatek/phy-mtk-pcie.c
+++ b/drivers/phy/mediatek/phy-mtk-pcie.c
@@ -89,14 +89,14 @@ static void mtk_pcie_efuse_set_lane(struct mtk_pcie_phy *pcie_phy,
addr = pcie_phy->sif_base + PEXTP_ANA_LN0_TRX_REG +
lane * PEXTP_ANA_LANE_OFFSET;
- mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
- FIELD_PREP(EFUSE_LN_TX_PMOS_SEL, data->tx_pmos));
+ mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_PMOS_SEL,
+ data->tx_pmos);
- mtk_phy_update_bits(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
- FIELD_PREP(EFUSE_LN_TX_NMOS_SEL, data->tx_nmos));
+ mtk_phy_update_field(addr + PEXTP_ANA_TX_REG, EFUSE_LN_TX_NMOS_SEL,
+ data->tx_nmos);
- mtk_phy_update_bits(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
- FIELD_PREP(EFUSE_LN_RX_SEL, data->rx_data));
+ mtk_phy_update_field(addr + PEXTP_ANA_RX_REG, EFUSE_LN_RX_SEL,
+ data->rx_data);
}
/**
@@ -116,9 +116,8 @@ static int mtk_pcie_phy_init(struct phy *phy)
return 0;
/* Set global data */
- mtk_phy_update_bits(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
- EFUSE_GLB_INTR_SEL,
- FIELD_PREP(EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr));
+ mtk_phy_update_field(pcie_phy->sif_base + PEXTP_ANA_GLB_00_REG,
+ EFUSE_GLB_INTR_SEL, pcie_phy->efuse_glb_intr);
for (i = 0; i < pcie_phy->data->num_lanes; i++)
mtk_pcie_efuse_set_lane(pcie_phy, i);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 8ee7682b8e93..e906a82791bd 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -49,35 +49,28 @@
#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_USB20_PLL_PREDIV GENMASK(7, 6)
-#define PA0_USB20_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6)
#define PA0_RG_USB20_INTR_EN BIT(5)
#define U3P_USBPHYACR1 0x004
#define PA1_RG_INTR_CAL GENMASK(23, 19)
-#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
#define PA1_RG_VRT_SEL GENMASK(14, 12)
-#define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
#define PA1_RG_TERM_SEL GENMASK(10, 8)
-#define PA1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8)
#define U3P_USBPHYACR2 0x008
#define PA2_RG_U2PLL_BW GENMASK(21, 19)
-#define PA2_RG_U2PLL_BW_VAL(x) ((0x7 & (x)) << 19)
#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
#define U3P_USBPHYACR5 0x014
#define PA5_RG_U2_HSTX_SRCAL_EN BIT(15)
#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12)
-#define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
#define PA5_RG_U2_HS_100U_U3_EN BIT(11)
#define U3P_USBPHYACR6 0x018
+#define PA6_RG_U2_PRE_EMP GENMASK(31, 30)
#define PA6_RG_U2_BC11_SW_EN BIT(23)
#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
#define PA6_RG_U2_DISCTH GENMASK(7, 4)
-#define PA6_RG_U2_DISCTH_VAL(x) ((0xf & (x)) << 4)
#define PA6_RG_U2_SQTH GENMASK(3, 0)
-#define PA6_RG_U2_SQTH_VAL(x) (0xf & (x))
#define U3P_U2PHYACR4 0x020
#define P2C_RG_USB20_GPIO_CTL BIT(9)
@@ -104,11 +97,9 @@
#define P2C_FORCE_SUSPENDM BIT(18)
#define P2C_FORCE_TERMSEL BIT(17)
#define P2C_RG_DATAIN GENMASK(13, 10)
-#define P2C_RG_DATAIN_VAL(x) ((0xf & (x)) << 10)
#define P2C_RG_DMPULLDOWN BIT(7)
#define P2C_RG_DPPULLDOWN BIT(6)
#define P2C_RG_XCVRSEL GENMASK(5, 4)
-#define P2C_RG_XCVRSEL_VAL(x) ((0x3 & (x)) << 4)
#define P2C_RG_SUSPENDM BIT(3)
#define P2C_RG_TERMSEL BIT(2)
#define P2C_DTM0_PART_MASK \
@@ -139,87 +130,65 @@
#define U3P_U3_PHYA_REG0 0x000
#define P3A_RG_IEXT_INTR GENMASK(15, 10)
-#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10)
#define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
-#define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2)
#define U3P_U3_PHYA_REG1 0x004
#define P3A_RG_CLKDRV_AMP GENMASK(31, 29)
-#define P3A_RG_CLKDRV_AMP_VAL(x) ((0x7 & (x)) << 29)
#define U3P_U3_PHYA_REG6 0x018
#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28)
-#define P3A_RG_TX_EIDLE_CM_VAL(x) ((0xf & (x)) << 28)
#define U3P_U3_PHYA_REG9 0x024
#define P3A_RG_RX_DAC_MUX GENMASK(5, 1)
-#define P3A_RG_RX_DAC_MUX_VAL(x) ((0x1f & (x)) << 1)
#define U3P_U3_PHYA_DA_REG0 0x100
#define P3A_RG_XTAL_EXT_PE2H GENMASK(17, 16)
-#define P3A_RG_XTAL_EXT_PE2H_VAL(x) ((0x3 & (x)) << 16)
#define P3A_RG_XTAL_EXT_PE1H GENMASK(13, 12)
-#define P3A_RG_XTAL_EXT_PE1H_VAL(x) ((0x3 & (x)) << 12)
#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10)
-#define P3A_RG_XTAL_EXT_EN_U3_VAL(x) ((0x3 & (x)) << 10)
#define U3P_U3_PHYA_DA_REG4 0x108
#define P3A_RG_PLL_DIVEN_PE2H GENMASK(21, 19)
#define P3A_RG_PLL_BC_PE2H GENMASK(7, 6)
-#define P3A_RG_PLL_BC_PE2H_VAL(x) ((0x3 & (x)) << 6)
#define U3P_U3_PHYA_DA_REG5 0x10c
#define P3A_RG_PLL_BR_PE2H GENMASK(29, 28)
-#define P3A_RG_PLL_BR_PE2H_VAL(x) ((0x3 & (x)) << 28)
#define P3A_RG_PLL_IC_PE2H GENMASK(15, 12)
-#define P3A_RG_PLL_IC_PE2H_VAL(x) ((0xf & (x)) << 12)
#define U3P_U3_PHYA_DA_REG6 0x110
#define P3A_RG_PLL_IR_PE2H GENMASK(19, 16)
-#define P3A_RG_PLL_IR_PE2H_VAL(x) ((0xf & (x)) << 16)
#define U3P_U3_PHYA_DA_REG7 0x114
#define P3A_RG_PLL_BP_PE2H GENMASK(19, 16)
-#define P3A_RG_PLL_BP_PE2H_VAL(x) ((0xf & (x)) << 16)
#define U3P_U3_PHYA_DA_REG20 0x13c
#define P3A_RG_PLL_DELTA1_PE2H GENMASK(31, 16)
-#define P3A_RG_PLL_DELTA1_PE2H_VAL(x) ((0xffff & (x)) << 16)
#define U3P_U3_PHYA_DA_REG25 0x148
#define P3A_RG_PLL_DELTA_PE2H GENMASK(15, 0)
-#define P3A_RG_PLL_DELTA_PE2H_VAL(x) (0xffff & (x))
#define U3P_U3_PHYD_LFPS1 0x00c
#define P3D_RG_FWAKE_TH GENMASK(21, 16)
-#define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
#define U3P_U3_PHYD_IMPCAL0 0x010
#define P3D_RG_FORCE_TX_IMPEL BIT(31)
#define P3D_RG_TX_IMPEL GENMASK(28, 24)
-#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
#define U3P_U3_PHYD_IMPCAL1 0x014
#define P3D_RG_FORCE_RX_IMPEL BIT(31)
#define P3D_RG_RX_IMPEL GENMASK(28, 24)
-#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
#define U3P_U3_PHYD_RSV 0x054
#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
-#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
-#define P3D_RG_CDR_BIR_LTD0_VAL(x) ((0x1f & (x)) << 8)
#define U3P_U3_PHYD_RXDET1 0x128
#define P3D_RG_RXDET_STB2_SET GENMASK(17, 9)
-#define P3D_RG_RXDET_STB2_SET_VAL(x) ((0x1ff & (x)) << 9)
#define U3P_U3_PHYD_RXDET2 0x12c
#define P3D_RG_RXDET_STB2_SET_P3 GENMASK(8, 0)
-#define P3D_RG_RXDET_STB2_SET_P3_VAL(x) (0x1ff & (x))
#define U3P_SPLLC_XTALCTL3 0x018
#define XC3_RG_U3_XTAL_RX_PWD BIT(9)
@@ -227,10 +196,8 @@
#define U3P_U2FREQ_FMCR0 0x00
#define P2F_RG_MONCLK_SEL GENMASK(27, 26)
-#define P2F_RG_MONCLK_SEL_VAL(x) ((0x3 & (x)) << 26)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
-#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x))
#define U3P_U2FREQ_VALUE 0x0c
@@ -247,60 +214,45 @@
#define PHYD_CTRL_SIGNAL_MODE4 0x1c
/* CDR Charge Pump P-path current adjustment */
#define RG_CDR_BICLTD1_GEN1_MSK GENMASK(23, 20)
-#define RG_CDR_BICLTD1_GEN1_VAL(x) ((0xf & (x)) << 20)
#define RG_CDR_BICLTD0_GEN1_MSK GENMASK(11, 8)
-#define RG_CDR_BICLTD0_GEN1_VAL(x) ((0xf & (x)) << 8)
#define PHYD_DESIGN_OPTION2 0x24
/* Symbol lock count selection */
#define RG_LOCK_CNT_SEL_MSK GENMASK(5, 4)
-#define RG_LOCK_CNT_SEL_VAL(x) ((0x3 & (x)) << 4)
#define PHYD_DESIGN_OPTION9 0x40
/* COMWAK GAP width window */
#define RG_TG_MAX_MSK GENMASK(20, 16)
-#define RG_TG_MAX_VAL(x) ((0x1f & (x)) << 16)
/* COMINIT GAP width window */
#define RG_T2_MAX_MSK GENMASK(13, 8)
-#define RG_T2_MAX_VAL(x) ((0x3f & (x)) << 8)
/* COMWAK GAP width window */
#define RG_TG_MIN_MSK GENMASK(7, 5)
-#define RG_TG_MIN_VAL(x) ((0x7 & (x)) << 5)
/* COMINIT GAP width window */
#define RG_T2_MIN_MSK GENMASK(4, 0)
-#define RG_T2_MIN_VAL(x) (0x1f & (x))
#define ANA_RG_CTRL_SIGNAL1 0x4c
/* TX driver tail current control for 0dB de-empahsis mdoe for Gen1 speed */
#define RG_IDRV_0DB_GEN1_MSK GENMASK(13, 8)
-#define RG_IDRV_0DB_GEN1_VAL(x) ((0x3f & (x)) << 8)
#define ANA_RG_CTRL_SIGNAL4 0x58
#define RG_CDR_BICLTR_GEN1_MSK GENMASK(23, 20)
-#define RG_CDR_BICLTR_GEN1_VAL(x) ((0xf & (x)) << 20)
/* Loop filter R1 resistance adjustment for Gen1 speed */
#define RG_CDR_BR_GEN2_MSK GENMASK(10, 8)
-#define RG_CDR_BR_GEN2_VAL(x) ((0x7 & (x)) << 8)
#define ANA_RG_CTRL_SIGNAL6 0x60
/* I-path capacitance adjustment for Gen1 */
#define RG_CDR_BC_GEN1_MSK GENMASK(28, 24)
-#define RG_CDR_BC_GEN1_VAL(x) ((0x1f & (x)) << 24)
#define RG_CDR_BIRLTR_GEN1_MSK GENMASK(4, 0)
-#define RG_CDR_BIRLTR_GEN1_VAL(x) (0x1f & (x))
#define ANA_EQ_EYE_CTRL_SIGNAL1 0x6c
/* RX Gen1 LEQ tuning step */
#define RG_EQ_DLEQ_LFI_GEN1_MSK GENMASK(11, 8)
-#define RG_EQ_DLEQ_LFI_GEN1_VAL(x) ((0xf & (x)) << 8)
#define ANA_EQ_EYE_CTRL_SIGNAL4 0xd8
#define RG_CDR_BIRLTD0_GEN1_MSK GENMASK(20, 16)
-#define RG_CDR_BIRLTD0_GEN1_VAL(x) ((0x1f & (x)) << 16)
#define ANA_EQ_EYE_CTRL_SIGNAL5 0xdc
#define RG_CDR_BIRLTD0_GEN3_MSK GENMASK(4, 0)
-#define RG_CDR_BIRLTD0_GEN3_VAL(x) (0x1f & (x))
/* PHY switch between pcie/usb3/sgmii/sata */
#define USB_PHY_SWITCH_CTRL 0x0
@@ -370,6 +322,7 @@ struct mtk_phy_instance {
int eye_term;
int intr;
int discth;
+ int pre_emphasis;
bool bc12_en;
};
@@ -411,9 +364,9 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
/* set cycle count as 1024, and select u2 channel */
tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
tmp &= ~(P2F_RG_CYCLECNT | P2F_RG_MONCLK_SEL);
- tmp |= P2F_RG_CYCLECNT_VAL(U3P_FM_DET_CYCLE_CNT);
+ tmp |= FIELD_PREP(P2F_RG_CYCLECNT, U3P_FM_DET_CYCLE_CNT);
if (tphy->pdata->version == MTK_PHY_V1)
- tmp |= P2F_RG_MONCLK_SEL_VAL(instance->index >> 1);
+ tmp |= FIELD_PREP(P2F_RG_MONCLK_SEL, instance->index >> 1);
writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
@@ -446,8 +399,8 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
tphy->src_ref_clk, tphy->src_coef);
/* set HS slew rate */
- mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
- PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val));
+ mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ calibration_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
@@ -457,33 +410,30 @@ static void u3_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
+ void __iomem *phya = u3_banks->phya;
+ void __iomem *phyd = u3_banks->phyd;
/* gating PCIe Analog XTAL clock */
mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
/* gating XSQ */
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_DA_REG0,
- P3A_RG_XTAL_EXT_EN_U3, P3A_RG_XTAL_EXT_EN_U3_VAL(2));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG0, P3A_RG_XTAL_EXT_EN_U3, 2);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG9,
- P3A_RG_RX_DAC_MUX, P3A_RG_RX_DAC_MUX_VAL(4));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG9, P3A_RG_RX_DAC_MUX, 4);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG6,
- P3A_RG_TX_EIDLE_CM, P3A_RG_TX_EIDLE_CM_VAL(0xe));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG6, P3A_RG_TX_EIDLE_CM, 0xe);
mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1,
P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1,
- P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3));
+ FIELD_PREP(P3D_RG_CDR_BIR_LTD0, 0xc) |
+ FIELD_PREP(P3D_RG_CDR_BIR_LTD1, 0x3));
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_LFPS1,
- P3D_RG_FWAKE_TH, P3D_RG_FWAKE_TH_VAL(0x34));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_LFPS1, P3D_RG_FWAKE_TH, 0x34);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
- P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET1, P3D_RG_RXDET_STB2_SET, 0x10);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
- P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
+ mtk_phy_update_field(phyd + U3P_U3_PHYD_RXDET2, P3D_RG_RXDET_STB2_SET_P3, 0x10);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -497,11 +447,9 @@ static void u2_phy_pll_26m_set(struct mtk_tphy *tphy,
if (!tphy->pdata->sw_pll_48m_to_26m)
return;
- mtk_phy_update_bits(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV,
- PA0_USB20_PLL_PREDIV_VAL(0));
+ mtk_phy_update_field(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV, 0);
- mtk_phy_update_bits(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW,
- PA2_RG_U2PLL_BW_VAL(3));
+ mtk_phy_update_field(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW, 3);
writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV);
@@ -519,8 +467,8 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* switch to USB function, and enable usb pll */
mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
- mtk_phy_update_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN,
- P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0));
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN);
@@ -529,8 +477,7 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* disable switch 100uA current to SSUSB */
mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN);
- if (!index)
- mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
+ mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
if (tphy->pdata->avoid_rx_sen_degradation) {
if (!index) {
@@ -548,7 +495,7 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
/* DP/DM BC1.1 path Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN);
- mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, PA6_RG_U2_SQTH_VAL(2));
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, 2);
/* Workaround only for mt8195, HW fix it for others (V3) */
u2_phy_pll_26m_set(tphy, instance);
@@ -563,9 +510,6 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
- P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
-
/* OTG Enable */
mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
@@ -588,8 +532,6 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN);
-
/* OTG Disable */
mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
@@ -656,43 +598,39 @@ static void pcie_phy_instance_init(struct mtk_tphy *tphy,
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0,
P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H,
- P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2));
+ FIELD_PREP(P3A_RG_XTAL_EXT_PE1H, 0x2) |
+ FIELD_PREP(P3A_RG_XTAL_EXT_PE2H, 0x2));
/* ref clk drive */
- mtk_phy_update_bits(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP,
- P3A_RG_CLKDRV_AMP_VAL(0x4));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP, 0x4);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF,
- P3A_RG_CLKDRV_OFF_VAL(0x1));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF, 0x1);
/* SSC delta -5000ppm */
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H,
- P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H, 0x3c);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H,
- P3A_RG_PLL_DELTA_PE2H_VAL(0x36));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H, 0x36);
/* change pll BW 0.6M */
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5,
P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H,
- P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1));
+ FIELD_PREP(P3A_RG_PLL_BR_PE2H, 0x1) |
+ FIELD_PREP(P3A_RG_PLL_IC_PE2H, 0x1));
mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4,
P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H,
- P3A_RG_PLL_BC_PE2H_VAL(0x3));
+ FIELD_PREP(P3A_RG_PLL_BC_PE2H, 0x3));
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H,
- P3A_RG_PLL_IR_PE2H_VAL(0x2));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H, 0x2);
- mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H,
- P3A_RG_PLL_BP_PE2H_VAL(0xa));
+ mtk_phy_update_field(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H, 0xa);
/* Tx Detect Rx Timing: 10us -> 5us */
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
- P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, 0x10);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
- P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, 0x10);
/* wait for PCIe subsys register to active */
usleep_range(2500, 3000);
@@ -733,38 +671,38 @@ static void sata_phy_instance_init(struct mtk_tphy *tphy,
/* charge current adjustment */
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6,
RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK,
- RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a));
+ FIELD_PREP(RG_CDR_BIRLTR_GEN1_MSK, 0x6) |
+ FIELD_PREP(RG_CDR_BC_GEN1_MSK, 0x1a));
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK,
- RG_CDR_BIRLTD0_GEN1_VAL(0x18));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK, 0x18);
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK,
- RG_CDR_BIRLTD0_GEN3_VAL(0x06));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK, 0x06);
mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4,
RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK,
- RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07));
+ FIELD_PREP(RG_CDR_BICLTR_GEN1_MSK, 0x0c) |
+ FIELD_PREP(RG_CDR_BR_GEN2_MSK, 0x07));
mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4,
RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK,
- RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02));
+ FIELD_PREP(RG_CDR_BICLTD0_GEN1_MSK, 0x08) |
+ FIELD_PREP(RG_CDR_BICLTD1_GEN1_MSK, 0x02));
- mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK,
- RG_LOCK_CNT_SEL_VAL(0x02));
+ mtk_phy_update_field(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK, 0x02);
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MIN_MSK | RG_TG_MIN_MSK,
- RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04));
+ FIELD_PREP(RG_T2_MIN_MSK, 0x12) |
+ FIELD_PREP(RG_TG_MIN_MSK, 0x04));
mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
RG_T2_MAX_MSK | RG_TG_MAX_MSK,
- RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e));
+ FIELD_PREP(RG_T2_MAX_MSK, 0x31) |
+ FIELD_PREP(RG_TG_MAX_MSK, 0x0e));
- mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK,
- RG_IDRV_0DB_GEN1_VAL(0x20));
+ mtk_phy_update_field(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK, 0x20);
- mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK,
- RG_EQ_DLEQ_LFI_GEN1_VAL(0x03));
+ mtk_phy_update_field(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK, 0x03);
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -841,10 +779,13 @@ static void phy_parse_property(struct mtk_tphy *tphy,
&instance->intr);
device_property_read_u32(dev, "mediatek,discth",
&instance->discth);
+ device_property_read_u32(dev, "mediatek,pre-emphasis",
+ &instance->pre_emphasis);
dev_dbg(dev, "bc12:%d, src:%d, vrt:%d, term:%d, intr:%d, disc:%d\n",
instance->bc12_en, instance->eye_src,
instance->eye_vrt, instance->eye_term,
instance->intr, instance->discth);
+ dev_dbg(dev, "pre-emp:%d\n", instance->pre_emphasis);
}
static void u2_phy_props_set(struct mtk_tphy *tphy,
@@ -857,24 +798,33 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN);
if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src)
- mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
- PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src));
+ mtk_phy_update_field(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ instance->eye_src);
if (instance->eye_vrt)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
- PA1_RG_VRT_SEL_VAL(instance->eye_vrt));
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
+ instance->eye_vrt);
if (instance->eye_term)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
- PA1_RG_TERM_SEL_VAL(instance->eye_term));
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
+ instance->eye_term);
- if (instance->intr)
- mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
- PA1_RG_INTR_CAL_VAL(instance->intr));
+ if (instance->intr) {
+ if (u2_banks->misc)
+ mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1,
+ MR1_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_field(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ instance->intr);
+ }
if (instance->discth)
- mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
- PA6_RG_U2_DISCTH_VAL(instance->discth));
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
+ instance->discth);
+
+ if (instance->pre_emphasis)
+ mtk_phy_update_field(com + U3P_USBPHYACR6, PA6_RG_U2_PRE_EMP,
+ instance->pre_emphasis);
}
/* type switch for usb3/pcie/sgmii/sata */
@@ -906,7 +856,7 @@ static int phy_type_syscon_get(struct mtk_phy_instance *instance,
static int phy_type_set(struct mtk_phy_instance *instance)
{
int type;
- u32 mask;
+ u32 offset;
if (!instance->type_sw)
return 0;
@@ -929,8 +879,9 @@ static int phy_type_set(struct mtk_phy_instance *instance)
return 0;
}
- mask = RG_PHY_SW_TYPE << (instance->type_sw_index * BITS_PER_BYTE);
- regmap_update_bits(instance->type_sw, instance->type_sw_reg, mask, type);
+ offset = instance->type_sw_index * BITS_PER_BYTE;
+ regmap_update_bits(instance->type_sw, instance->type_sw_reg,
+ RG_PHY_SW_TYPE << offset, type << offset);
return 0;
}
@@ -1022,23 +973,23 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
case PHY_TYPE_USB2:
mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS);
- mtk_phy_update_bits(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
- PA1_RG_INTR_CAL_VAL(instance->efuse_intr));
+ mtk_phy_update_field(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ instance->efuse_intr);
break;
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
- P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
+ instance->efuse_tx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
- mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
- P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp));
+ mtk_phy_update_field(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
+ instance->efuse_rx_imp);
mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
- mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
- P3A_RG_IEXT_INTR_VAL(instance->efuse_intr));
+ mtk_phy_update_field(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
+ instance->efuse_intr);
break;
default:
dev_warn(dev, "no sw efuse for type %d\n", instance->type);
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
index a6af06941203..fc19e0fa8ed5 100644
--- a/drivers/phy/mediatek/phy-mtk-ufs.c
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -11,6 +11,8 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include "phy-mtk-io.h"
+
/* mphy register and offsets */
#define MP_GLB_DIG_8C 0x008C
#define FRC_PLL_ISO_EN BIT(8)
@@ -39,34 +41,6 @@ struct ufs_mtk_phy {
struct clk_bulk_data clks[UFSPHY_CLKS_CNT];
};
-static inline u32 mphy_readl(struct ufs_mtk_phy *phy, u32 reg)
-{
- return readl(phy->mmio + reg);
-}
-
-static inline void mphy_writel(struct ufs_mtk_phy *phy, u32 val, u32 reg)
-{
- writel(val, phy->mmio + reg);
-}
-
-static void mphy_set_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
-{
- u32 val;
-
- val = mphy_readl(phy, reg);
- val |= bit;
- mphy_writel(phy, val, reg);
-}
-
-static void mphy_clr_bit(struct ufs_mtk_phy *phy, u32 reg, u32 bit)
-{
- u32 val;
-
- val = mphy_readl(phy, reg);
- val &= ~bit;
- mphy_writel(phy, val, reg);
-}
-
static struct ufs_mtk_phy *get_ufs_mtk_phy(struct phy *generic_phy)
{
return (struct ufs_mtk_phy *)phy_get_drvdata(generic_phy);
@@ -84,57 +58,61 @@ static int ufs_mtk_phy_clk_init(struct ufs_mtk_phy *phy)
static void ufs_mtk_phy_set_active(struct ufs_mtk_phy *phy)
{
+ void __iomem *mmio = phy->mmio;
+
/* release DA_MP_PLL_PWR_ON */
- mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
/* release DA_MP_PLL_ISO_EN */
- mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
/* release DA_MP_CDR_PWR_ON */
- mphy_set_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
- mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
/* release DA_MP_CDR_ISO_EN */
- mphy_clr_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
- mphy_clr_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
/* release DA_MP_RX0_SQ_EN */
- mphy_set_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
- mphy_clr_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
/* delay 1us to wait DIFZ stable */
udelay(1);
/* release DIFZ */
- mphy_clr_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
}
static void ufs_mtk_phy_set_deep_hibern(struct ufs_mtk_phy *phy)
{
+ void __iomem *mmio = phy->mmio;
+
/* force DIFZ */
- mphy_set_bit(phy, MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_9C, FSM_DIFZ_FRC);
/* force DA_MP_RX0_SQ_EN */
- mphy_set_bit(phy, MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
- mphy_clr_bit(phy, MP_LN_DIG_RX_AC, RX_SQ_EN);
+ mtk_phy_set_bits(mmio + MP_LN_DIG_RX_AC, FRC_RX_SQ_EN);
+ mtk_phy_clear_bits(mmio + MP_LN_DIG_RX_AC, RX_SQ_EN);
/* force DA_MP_CDR_ISO_EN */
- mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_ISO_EN);
- mphy_set_bit(phy, MP_LN_RX_44, CDR_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, CDR_ISO_EN);
/* force DA_MP_CDR_PWR_ON */
- mphy_set_bit(phy, MP_LN_RX_44, FRC_CDR_PWR_ON);
- mphy_clr_bit(phy, MP_LN_RX_44, CDR_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_LN_RX_44, FRC_CDR_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_LN_RX_44, CDR_PWR_ON);
/* force DA_MP_PLL_ISO_EN */
- mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
- mphy_set_bit(phy, MP_GLB_DIG_8C, PLL_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_PLL_ISO_EN);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, PLL_ISO_EN);
/* force DA_MP_PLL_PWR_ON */
- mphy_set_bit(phy, MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
- mphy_clr_bit(phy, MP_GLB_DIG_8C, PLL_PWR_ON);
+ mtk_phy_set_bits(mmio + MP_GLB_DIG_8C, FRC_FRC_PWR_ON);
+ mtk_phy_clear_bits(mmio + MP_GLB_DIG_8C, PLL_PWR_ON);
}
static int ufs_mtk_phy_power_on(struct phy *generic_phy)
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index c0cdb78f77fa..b222fbbd71d1 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -37,7 +37,6 @@
#define XSP_U2FREQ_FMCR0 ((SSUSB_SIFSLV_U2FREQ) + 0x00)
#define P2F_RG_FREQDET_EN BIT(24)
#define P2F_RG_CYCLECNT GENMASK(23, 0)
-#define P2F_RG_CYCLECNT_VAL(x) ((P2F_RG_CYCLECNT) & (x))
#define XSP_U2FREQ_MMONR0 ((SSUSB_SIFSLV_U2FREQ) + 0x0c)
@@ -50,16 +49,12 @@
#define XSP_USBPHYACR1 ((SSUSB_SIFSLV_U2PHY_COM) + 0x04)
#define P2A1_RG_INTR_CAL GENMASK(23, 19)
-#define P2A1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
#define P2A1_RG_VRT_SEL GENMASK(14, 12)
-#define P2A1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
#define P2A1_RG_TERM_SEL GENMASK(10, 8)
-#define P2A1_RG_TERM_SEL_VAL(x) ((0x7 & (x)) << 8)
#define XSP_USBPHYACR5 ((SSUSB_SIFSLV_U2PHY_COM) + 0x014)
#define P2A5_RG_HSTX_SRCAL_EN BIT(15)
#define P2A5_RG_HSTX_SRCTRL GENMASK(14, 12)
-#define P2A5_RG_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
#define XSP_USBPHYACR6 ((SSUSB_SIFSLV_U2PHY_COM) + 0x018)
#define P2A6_RG_BC11_SW_EN BIT(23)
@@ -74,15 +69,12 @@
#define SSPXTP_PHYA_GLB_00 ((SSPXTP_SIFSLV_PHYA_GLB) + 0x00)
#define RG_XTP_GLB_BIAS_INTR_CTRL GENMASK(21, 16)
-#define RG_XTP_GLB_BIAS_INTR_CTRL_VAL(x) ((0x3f & (x)) << 16)
#define SSPXTP_PHYA_LN_04 ((SSPXTP_SIFSLV_PHYA_LN) + 0x04)
#define RG_XTP_LN0_TX_IMPSEL GENMASK(4, 0)
-#define RG_XTP_LN0_TX_IMPSEL_VAL(x) (0x1f & (x))
#define SSPXTP_PHYA_LN_14 ((SSPXTP_SIFSLV_PHYA_LN) + 0x014)
#define RG_XTP_LN0_RX_IMPSEL GENMASK(4, 0)
-#define RG_XTP_LN0_RX_IMPSEL_VAL(x) (0x1f & (x))
#define XSP_REF_CLK 26 /* MHZ */
#define XSP_SLEW_RATE_COEF 17
@@ -134,8 +126,8 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024 */
- mtk_phy_update_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
- P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT));
+ mtk_phy_update_field(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
+ XSP_FM_DET_CYCLE_CNT);
/* enable frequency meter */
mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
@@ -166,8 +158,7 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
xsphy->src_ref_clk, xsphy->src_coef);
/* set HS slew rate */
- mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
- P2A5_RG_HSTX_SRCTRL_VAL(calib_val));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL, calib_val);
/* disable USB ring oscillator */
mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
@@ -280,20 +271,20 @@ static void u2_phy_props_set(struct mtk_xsphy *xsphy,
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
- P2A1_RG_INTR_CAL_VAL(inst->efuse_intr));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
+ inst->efuse_intr);
if (inst->eye_src)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
- P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ inst->eye_src);
if (inst->eye_vrt)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
- P2A1_RG_VRT_SEL_VAL(inst->eye_vrt));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
+ inst->eye_vrt);
if (inst->eye_term)
- mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
- P2A1_RG_TERM_SEL_VAL(inst->eye_term));
+ mtk_phy_update_field(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
+ inst->eye_term);
}
static void u3_phy_props_set(struct mtk_xsphy *xsphy,
@@ -302,19 +293,16 @@ static void u3_phy_props_set(struct mtk_xsphy *xsphy,
void __iomem *pbase = inst->port_base;
if (inst->efuse_intr)
- mtk_phy_update_bits(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
- RG_XTP_GLB_BIAS_INTR_CTRL,
- RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr));
+ mtk_phy_update_field(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
+ RG_XTP_GLB_BIAS_INTR_CTRL, inst->efuse_intr);
if (inst->efuse_tx_imp)
- mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_04,
- RG_XTP_LN0_TX_IMPSEL,
- RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp));
+ mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_04,
+ RG_XTP_LN0_TX_IMPSEL, inst->efuse_tx_imp);
if (inst->efuse_rx_imp)
- mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_14,
- RG_XTP_LN0_RX_IMPSEL,
- RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp));
+ mtk_phy_update_field(pbase + SSPXTP_PHYA_LN_14,
+ RG_XTP_LN0_RX_IMPSEL, inst->efuse_rx_imp);
}
static int mtk_phy_init(struct phy *phy)
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
index e86a879b92b5..c1a41b6cd29b 100644
--- a/drivers/phy/microchip/lan966x_serdes.c
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -42,7 +42,10 @@
#define SERDES_MUX_QSGMII(i, p, m, c) \
SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
#define SERDES_MUX_RGMII(i, p, m, c) \
- SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c)
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_TXID, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_RXID, m, c), \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII_ID, m, c)
static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset)
{
@@ -94,21 +97,29 @@ static const struct serdes_mux lan966x_serdes_muxes[] = {
HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
- HSIO_HW_CFG_RGMII_ENA,
- HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(0) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(2))),
SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG |
- HSIO_HW_CFG_RGMII_ENA,
- HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(0) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(3))),
SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG |
- HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(5))),
SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG |
- HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_ENA |
+ HSIO_HW_CFG_GMII_ENA,
HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
- HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1)) |
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(6))),
};
struct serdes_ctrl {
@@ -382,6 +393,67 @@ static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode)
return lan966x_sd6g40_setup_lane(macro, conf, idx);
}
+static int lan966x_rgmii_setup(struct serdes_macro *macro, u32 idx, int mode)
+{
+ bool tx_delay = false;
+ bool rx_delay = false;
+
+ /* Configure RGMII */
+ lan_rmw(HSIO_RGMII_CFG_RGMII_RX_RST_SET(0) |
+ HSIO_RGMII_CFG_RGMII_TX_RST_SET(0) |
+ HSIO_RGMII_CFG_TX_CLK_CFG_SET(macro->speed == SPEED_1000 ? 1 :
+ macro->speed == SPEED_100 ? 2 :
+ macro->speed == SPEED_10 ? 3 : 0),
+ HSIO_RGMII_CFG_RGMII_RX_RST |
+ HSIO_RGMII_CFG_RGMII_TX_RST |
+ HSIO_RGMII_CFG_TX_CLK_CFG,
+ macro->ctrl->regs, HSIO_RGMII_CFG(idx));
+
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ rx_delay = true;
+
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ tx_delay = true;
+
+ /* Setup DLL configuration */
+ lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_DLL_CFG_DLL_ENA_SET(rx_delay),
+ HSIO_DLL_CFG_DLL_RST |
+ HSIO_DLL_CFG_DLL_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
+
+ lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(rx_delay),
+ HSIO_DLL_CFG_DELAY_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x0 : 0x2));
+
+ lan_rmw(HSIO_DLL_CFG_DLL_RST_SET(0) |
+ HSIO_DLL_CFG_DLL_ENA_SET(tx_delay),
+ HSIO_DLL_CFG_DLL_RST |
+ HSIO_DLL_CFG_DLL_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
+
+ lan_rmw(HSIO_DLL_CFG_DELAY_ENA_SET(tx_delay),
+ HSIO_DLL_CFG_DELAY_ENA,
+ macro->ctrl->regs, HSIO_DLL_CFG(idx == 0 ? 0x1 : 0x3));
+
+ return 0;
+}
+
+static int serdes_set_speed(struct phy *phy, int speed)
+{
+ struct serdes_macro *macro = phy_get_drvdata(phy);
+
+ if (!phy_interface_mode_is_rgmii(macro->mode))
+ return 0;
+
+ macro->speed = speed;
+ lan966x_rgmii_setup(macro, macro->idx - (SERDES6G_MAX + 1), macro->mode);
+
+ return 0;
+}
+
static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct serdes_macro *macro = phy_get_drvdata(phy);
@@ -401,6 +473,9 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
submode == PHY_INTERFACE_MODE_2500BASEX)
submode = PHY_INTERFACE_MODE_SGMII;
+ if (submode == PHY_INTERFACE_MODE_QUSGMII)
+ submode = PHY_INTERFACE_MODE_QSGMII;
+
for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) {
if (macro->idx != lan966x_serdes_muxes[i].idx ||
mode != lan966x_serdes_muxes[i].mode ||
@@ -424,7 +499,9 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
macro->mode);
if (macro->idx < RGMII_MAX)
- return 0;
+ return lan966x_rgmii_setup(macro,
+ macro->idx - (SERDES6G_MAX + 1),
+ macro->mode);
return -EOPNOTSUPP;
}
@@ -434,6 +511,7 @@ static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
static const struct phy_ops serdes_ops = {
.set_mode = serdes_set_mode,
+ .set_speed = serdes_set_speed,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/microchip/lan966x_serdes_regs.h b/drivers/phy/microchip/lan966x_serdes_regs.h
index ea30f64ffd5c..ac54cd01fea6 100644
--- a/drivers/phy/microchip/lan966x_serdes_regs.h
+++ b/drivers/phy/microchip/lan966x_serdes_regs.h
@@ -206,4 +206,46 @@ enum lan966x_target {
#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\
FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x)
+/* HSIO:HW_CFGSTAT:RGMII_CFG */
+#define HSIO_RGMII_CFG(r) __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 20, r, 2, 4)
+
+#define HSIO_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2)
+#define HSIO_RGMII_CFG_TX_CLK_CFG_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_TX_CLK_CFG, x)
+#define HSIO_RGMII_CFG_TX_CLK_CFG_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_TX_CLK_CFG, x)
+
+#define HSIO_RGMII_CFG_RGMII_TX_RST BIT(1)
+#define HSIO_RGMII_CFG_RGMII_TX_RST_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_RGMII_TX_RST, x)
+#define HSIO_RGMII_CFG_RGMII_TX_RST_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_RGMII_TX_RST, x)
+
+#define HSIO_RGMII_CFG_RGMII_RX_RST BIT(0)
+#define HSIO_RGMII_CFG_RGMII_RX_RST_SET(x)\
+ FIELD_PREP(HSIO_RGMII_CFG_RGMII_RX_RST, x)
+#define HSIO_RGMII_CFG_RGMII_RX_RST_GET(x)\
+ FIELD_GET(HSIO_RGMII_CFG_RGMII_RX_RST, x)
+
+/* HSIO:HW_CFGSTAT:DLL_CFG */
+#define HSIO_DLL_CFG(r) __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 36, r, 4, 4)
+
+#define HSIO_DLL_CFG_DELAY_ENA BIT(2)
+#define HSIO_DLL_CFG_DELAY_ENA_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DELAY_ENA, x)
+#define HSIO_DLL_CFG_DELAY_ENA_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DELAY_ENA, x)
+
+#define HSIO_DLL_CFG_DLL_ENA BIT(1)
+#define HSIO_DLL_CFG_DLL_ENA_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DLL_ENA, x)
+#define HSIO_DLL_CFG_DLL_ENA_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DLL_ENA, x)
+
+#define HSIO_DLL_CFG_DLL_RST BIT(0)
+#define HSIO_DLL_CFG_DLL_RST_SET(x)\
+ FIELD_PREP(HSIO_DLL_CFG_DLL_RST, x)
+#define HSIO_DLL_CFG_DLL_RST_GET(x)\
+ FIELD_GET(HSIO_DLL_CFG_DLL_RST, x)
+
#endif /* _LAN966X_HSIO_REGS_H_ */
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 7e3570789845..fc8ca0f3018d 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -70,8 +70,19 @@
#define TXn_TRAN_DRVR_EMP_EN 0x0078
+struct qcom_edp_cfg {
+ bool is_dp;
+
+ /* DP PHY swing and pre_emphasis tables */
+ const u8 (*swing_hbr_rbr)[4][4];
+ const u8 (*swing_hbr3_hbr2)[4][4];
+ const u8 (*pre_emphasis_hbr_rbr)[4][4];
+ const u8 (*pre_emphasis_hbr3_hbr2)[4][4];
+};
+
struct qcom_edp {
struct device *dev;
+ const struct qcom_edp_cfg *cfg;
struct phy *phy;
@@ -89,10 +100,84 @@ struct qcom_edp {
struct regulator_bulk_data supplies[2];
};
+static const u8 dp_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x16, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_pre_emp_hbr_rbr[4][4] = {
+ { 0x00, 0x0d, 0x14, 0x1a },
+ { 0x00, 0x0e, 0x15, 0xff },
+ { 0x00, 0x0e, 0xff, 0xff },
+ { 0x03, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_swing_hbr2_hbr3[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 dp_pre_emp_hbr2_hbr3[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1b },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const struct qcom_edp_cfg dp_phy_cfg = {
+ .is_dp = true,
+ .swing_hbr_rbr = &dp_swing_hbr_rbr,
+ .swing_hbr3_hbr2 = &dp_swing_hbr2_hbr3,
+ .pre_emphasis_hbr_rbr = &dp_pre_emp_hbr_rbr,
+ .pre_emphasis_hbr3_hbr2 = &dp_pre_emp_hbr2_hbr3,
+};
+
+static const u8 edp_swing_hbr_rbr[4][4] = {
+ { 0x07, 0x0f, 0x16, 0x1f },
+ { 0x0d, 0x16, 0x1e, 0xff },
+ { 0x11, 0x1b, 0xff, 0xff },
+ { 0x16, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_pre_emp_hbr_rbr[4][4] = {
+ { 0x05, 0x12, 0x17, 0x1d },
+ { 0x05, 0x11, 0x18, 0xff },
+ { 0x06, 0x11, 0xff, 0xff },
+ { 0x00, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_swing_hbr2_hbr3[4][4] = {
+ { 0x0b, 0x11, 0x17, 0x1c },
+ { 0x10, 0x19, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 edp_pre_emp_hbr2_hbr3[4][4] = {
+ { 0x08, 0x11, 0x17, 0x1b },
+ { 0x00, 0x0c, 0x13, 0xff },
+ { 0x05, 0x10, 0xff, 0xff },
+ { 0x00, 0xff, 0xff, 0xff }
+};
+
+static const struct qcom_edp_cfg edp_phy_cfg = {
+ .is_dp = false,
+ .swing_hbr_rbr = &edp_swing_hbr_rbr,
+ .swing_hbr3_hbr2 = &edp_swing_hbr2_hbr3,
+ .pre_emphasis_hbr_rbr = &edp_pre_emp_hbr_rbr,
+ .pre_emphasis_hbr3_hbr2 = &edp_pre_emp_hbr2_hbr3,
+};
+
static int qcom_edp_phy_init(struct phy *phy)
{
struct qcom_edp *edp = phy_get_drvdata(phy);
+ const struct qcom_edp_cfg *cfg = edp->cfg;
int ret;
+ u8 cfg8;
ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies);
if (ret)
@@ -117,6 +202,13 @@ static int qcom_edp_phy_init(struct phy *phy)
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
edp->edp + DP_PHY_PD_CTL);
+ if (cfg && cfg->is_dp)
+ cfg8 = 0xb7;
+ else
+ cfg8 = 0x37;
+
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
writel(0x00, edp->edp + DP_PHY_AUX_CFG0);
writel(0x13, edp->edp + DP_PHY_AUX_CFG1);
writel(0x24, edp->edp + DP_PHY_AUX_CFG2);
@@ -125,7 +217,7 @@ static int qcom_edp_phy_init(struct phy *phy)
writel(0x26, edp->edp + DP_PHY_AUX_CFG5);
writel(0x0a, edp->edp + DP_PHY_AUX_CFG6);
writel(0x03, edp->edp + DP_PHY_AUX_CFG7);
- writel(0x37, edp->edp + DP_PHY_AUX_CFG8);
+ writel(cfg8, edp->edp + DP_PHY_AUX_CFG8);
writel(0x03, edp->edp + DP_PHY_AUX_CFG9);
writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
@@ -142,14 +234,60 @@ out_disable_supplies:
return ret;
}
+static int qcom_edp_set_voltages(struct qcom_edp *edp, const struct phy_configure_opts_dp *dp_opts)
+{
+ const struct qcom_edp_cfg *cfg = edp->cfg;
+ unsigned int v_level = 0;
+ unsigned int p_level = 0;
+ u8 ldo_config;
+ u8 swing;
+ u8 emph;
+ int i;
+
+ if (!cfg)
+ return 0;
+
+ for (i = 0; i < dp_opts->lanes; i++) {
+ v_level = max(v_level, dp_opts->voltage[i]);
+ p_level = max(p_level, dp_opts->pre[i]);
+ }
+
+ if (dp_opts->link_rate <= 2700) {
+ swing = (*cfg->swing_hbr_rbr)[v_level][p_level];
+ emph = (*cfg->pre_emphasis_hbr_rbr)[v_level][p_level];
+ } else {
+ swing = (*cfg->swing_hbr3_hbr2)[v_level][p_level];
+ emph = (*cfg->pre_emphasis_hbr3_hbr2)[v_level][p_level];
+ }
+
+ if (swing == 0xff || emph == 0xff)
+ return -EINVAL;
+
+ ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+
+ writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
+ writel(swing, edp->tx0 + TXn_TX_DRV_LVL);
+ writel(emph, edp->tx0 + TXn_TX_EMP_POST1_LVL);
+
+ writel(ldo_config, edp->tx1 + TXn_LDO_CONFIG);
+ writel(swing, edp->tx1 + TXn_TX_DRV_LVL);
+ writel(emph, edp->tx1 + TXn_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
{
const struct phy_configure_opts_dp *dp_opts = &opts->dp;
struct qcom_edp *edp = phy_get_drvdata(phy);
+ int ret = 0;
memcpy(&edp->dp_opts, dp_opts, sizeof(*dp_opts));
- return 0;
+ if (dp_opts->set_voltages)
+ ret = qcom_edp_set_voltages(edp, dp_opts);
+
+ return ret;
}
static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
@@ -272,31 +410,30 @@ static int qcom_edp_configure_pll(const struct qcom_edp *edp)
return 0;
}
-static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
+static int qcom_edp_set_vco_div(const struct qcom_edp *edp, unsigned long *pixel_freq)
{
const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
- unsigned long pixel_freq;
u32 vco_div;
switch (dp_opts->link_rate) {
case 1620:
vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
+ *pixel_freq = 1620000000UL / 2;
break;
case 2700:
vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
+ *pixel_freq = 2700000000UL / 2;
break;
case 5400:
vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
+ *pixel_freq = 5400000000UL / 4;
break;
case 8100:
vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
+ *pixel_freq = 8100000000UL / 6;
break;
default:
@@ -306,18 +443,20 @@ static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
- clk_set_rate(edp->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
-
return 0;
}
static int qcom_edp_phy_power_on(struct phy *phy)
{
const struct qcom_edp *edp = phy_get_drvdata(phy);
+ const struct qcom_edp_cfg *cfg = edp->cfg;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ unsigned long pixel_freq;
+ u8 ldo_config;
int timeout;
int ret;
u32 val;
+ u8 cfg1;
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
@@ -330,8 +469,11 @@ static int qcom_edp_phy_power_on(struct phy *phy)
if (timeout)
return timeout;
- writel(0x01, edp->tx0 + TXn_LDO_CONFIG);
- writel(0x01, edp->tx1 + TXn_LDO_CONFIG);
+
+ ldo_config = (cfg && cfg->is_dp) ? 0x1 : 0x0;
+
+ writel(ldo_config, edp->tx0 + TXn_LDO_CONFIG);
+ writel(ldo_config, edp->tx1 + TXn_LDO_CONFIG);
writel(0x00, edp->tx0 + TXn_LANE_MODE_1);
writel(0x00, edp->tx1 + TXn_LANE_MODE_1);
@@ -363,7 +505,7 @@ static int qcom_edp_phy_power_on(struct phy *phy)
writel(0x01, edp->tx1 + TXn_TRAN_DRVR_EMP_EN);
writel(0x04, edp->tx1 + TXn_TX_BAND);
- ret = qcom_edp_set_vco_div(edp);
+ ret = qcom_edp_set_vco_div(edp, &pixel_freq);
if (ret)
return ret;
@@ -398,19 +540,46 @@ static int qcom_edp_phy_power_on(struct phy *phy)
writel(0x1f, edp->tx0 + TXn_TX_DRV_LVL);
writel(0x1f, edp->tx1 + TXn_TX_DRV_LVL);
- writel(0x4, edp->tx0 + TXn_HIGHZ_DRVR_EN);
- writel(0x3, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
- writel(0x4, edp->tx1 + TXn_HIGHZ_DRVR_EN);
- writel(0x0, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
- writel(0x3, edp->edp + DP_PHY_CFG_1);
+ if (edp->dp_opts.lanes == 1) {
+ bias0_en = 0x01;
+ bias1_en = 0x00;
+ drvr0_en = 0x06;
+ drvr1_en = 0x07;
+ cfg1 = 0x1;
+ } else if (edp->dp_opts.lanes == 2) {
+ bias0_en = 0x03;
+ bias1_en = 0x00;
+ drvr0_en = 0x04;
+ drvr1_en = 0x07;
+ cfg1 = 0x3;
+ } else {
+ bias0_en = 0x03;
+ bias1_en = 0x03;
+ drvr0_en = 0x04;
+ drvr1_en = 0x04;
+ cfg1 = 0xf;
+ }
+
+ writel(drvr0_en, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(bias0_en, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(bias1_en, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(cfg1, edp->edp + DP_PHY_CFG_1);
writel(0x18, edp->edp + DP_PHY_CFG);
usleep_range(100, 1000);
writel(0x19, edp->edp + DP_PHY_CFG);
- return readl_poll_timeout(edp->edp + DP_PHY_STATUS,
- val, val & BIT(1), 500, 10000);
+ ret = readl_poll_timeout(edp->edp + DP_PHY_STATUS,
+ val, val & BIT(1), 500, 10000);
+ if (ret)
+ return ret;
+
+ clk_set_rate(edp->dp_link_hw.clk, edp->dp_opts.link_rate * 100000);
+ clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
+
+ return 0;
}
static int qcom_edp_phy_power_off(struct phy *phy)
@@ -571,21 +740,24 @@ static int qcom_edp_clks_register(struct qcom_edp *edp, struct device_node *np)
{
struct clk_hw_onecell_data *data;
struct clk_init_data init = { };
+ char name[64];
int ret;
data = devm_kzalloc(edp->dev, struct_size(data, hws, 2), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ snprintf(name, sizeof(name), "%s::link_clk", dev_name(edp->dev));
init.ops = &qcom_edp_dp_link_clk_ops;
- init.name = "edp_phy_pll_link_clk";
+ init.name = name;
edp->dp_link_hw.init = &init;
ret = devm_clk_hw_register(edp->dev, &edp->dp_link_hw);
if (ret)
return ret;
+ snprintf(name, sizeof(name), "%s::vco_div_clk", dev_name(edp->dev));
init.ops = &qcom_edp_dp_pixel_clk_ops;
- init.name = "edp_phy_pll_vco_div_clk";
+ init.name = name;
edp->dp_pixel_hw.init = &init;
ret = devm_clk_hw_register(edp->dev, &edp->dp_pixel_hw);
if (ret)
@@ -610,6 +782,7 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
return -ENOMEM;
edp->dev = dev;
+ edp->cfg = of_device_get_match_data(&pdev->dev);
edp->edp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(edp->edp))
@@ -670,6 +843,8 @@ static int qcom_edp_phy_probe(struct platform_device *pdev)
static const struct of_device_id qcom_edp_phy_match_table[] = {
{ .compatible = "qcom,sc7280-edp-phy" },
{ .compatible = "qcom,sc8180x-edp-phy" },
+ { .compatible = "qcom,sc8280xp-dp-phy", .data = &dp_phy_cfg },
+ { .compatible = "qcom,sc8280xp-edp-phy", .data = &edp_phy_cfg },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 4b1828976104..9807c4d935cd 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -28,16 +28,11 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -71,11 +66,6 @@
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -115,22 +105,14 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -606,6 +588,160 @@ static const struct qmp_phy_init_tbl qmp_v4_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_TX_EMP_POST1_LVL, 0x20),
};
+static const struct qmp_phy_init_tbl qmp_v5_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_CONFIG, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v5_5nm_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_3, 0x51),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_PRE_STALL_LDO_BOOST_EN, 0x0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TRAN_DRVR_EMP_EN, 0xf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TX_BAND, 0x01),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xfd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0xfd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MSB_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MSB_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xd4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE0, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_INTEGLOOP_GAIN0_MODE1, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAXVAL2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SVS_MODE_CLK_SEL, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_2, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_3, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_SIGDET_ENABLES, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B0, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B1, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B3, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B5, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B6, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B7, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B0, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B1, 0x63),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B3, 0x23),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B4, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B5, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B6, 0x8e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_MODE_RATE2_B7, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_CAL_CODE_OVERRIDE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_SUMMER_CAL_SPD_MODE, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_PI_CONTROLS, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_RX_IVCM_POSTCAL_OFFSET, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_VGA_CAL_CNTRL1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_VGA_CAL_MAN_VAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_DFE_3, 0x45),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_GM_CAL, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE2, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x3f),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb43dp_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
/* list of regulators */
struct qmp_regulator_data {
@@ -618,14 +754,69 @@ static struct qmp_regulator_data qmp_phy_vreg_l[] = {
{ .name = "vdda-pll", .enable_load = 36000 },
};
+static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1a },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x02, 0x12, 0x16, 0x1a },
+ { 0x09, 0x19, 0x1f, 0xff },
+ { 0x10, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x00, 0x0c, 0x14, 0x19 },
+ { 0x00, 0x0b, 0x12, 0xff },
+ { 0x00, 0x0b, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x19, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x20, 0x2c, 0x35, 0x3b },
+ { 0x22, 0x2e, 0x36, 0xff },
+ { 0x22, 0x31, 0xff, 0xff },
+ { 0x24, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_voltage_swing_hbr3_hbr2[4][4] = {
+ { 0x22, 0x32, 0x36, 0x3a },
+ { 0x29, 0x39, 0x3f, 0xff },
+ { 0x30, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x20, 0x2d, 0x34, 0x3a },
+ { 0x20, 0x2e, 0x35, 0xff },
+ { 0x20, 0x2e, 0xff, 0xff },
+ { 0x24, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
+ { 0x28, 0x2f, 0x36, 0x3f },
+ { 0x31, 0x3e, 0x3f, 0xff },
+ { 0x36, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
struct qmp_phy;
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
/* phy-type - PCIE/UFS/USB */
unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -649,6 +840,12 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *serdes_tbl_hbr3;
int serdes_tbl_hbr3_num;
+ /* DP PHY swing and pre_emphasis tables */
+ const u8 (*swing_hbr_rbr)[4][4];
+ const u8 (*swing_hbr3_hbr2)[4][4];
+ const u8 (*pre_emphasis_hbr_rbr)[4][4];
+ const u8 (*pre_emphasis_hbr3_hbr2)[4][4];
+
/* DP PHY callbacks */
int (*configure_dp_phy)(struct qmp_phy *qphy);
void (*configure_dp_tx)(struct qmp_phy *qphy);
@@ -679,11 +876,6 @@ struct qmp_phy_cfg {
int pwrdn_delay_min;
int pwrdn_delay_max;
- /* true, if PHY has a separate DP_COM control block */
- bool has_phy_dp_com_ctrl;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
@@ -708,9 +900,7 @@ struct qmp_phy_combo_cfg {
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pcs_usb: iomapped memory space for lane's pcs_usb
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @lane_rst: lane's reset controller
* @mode: current PHY mode
* @dp_aux_cfg: Display port aux config
* @dp_opts: Display port optional config
@@ -728,9 +918,7 @@ struct qmp_phy {
void __iomem *pcs_misc;
void __iomem *pcs_usb;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
- struct reset_control *lane_rst;
enum phy_mode mode;
unsigned int dp_aux_cfg;
struct phy_configure_opts_dp dp_opts;
@@ -784,6 +972,8 @@ static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy);
static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy);
static int qcom_qmp_v4_dp_phy_calibrate(struct qmp_phy *qphy);
+static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy);
+
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -833,7 +1023,7 @@ static const char * const sc7180_usb3phy_reset_l[] = {
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -858,14 +1048,11 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
@@ -881,6 +1068,11 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v3_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
@@ -889,9 +1081,6 @@ static const struct qmp_phy_cfg sc7180_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v3_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v3_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v3_phy_configure_dp_phy,
@@ -903,9 +1092,43 @@ static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = {
.dp_cfg = &sc7180_dpphy_cfg,
};
+static const struct qmp_phy_cfg sdm845_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_combo_cfg sdm845_usb3dpphy_cfg = {
+ .usb_cfg = &sdm845_usb3phy_cfg,
+ .dp_cfg = &sc7180_dpphy_cfg,
+};
+
static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -934,14 +1157,11 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v4_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
@@ -957,6 +1177,11 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v3_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
@@ -965,9 +1190,6 @@ static const struct qmp_phy_cfg sc8180x_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
@@ -979,9 +1201,81 @@ static const struct qmp_phy_combo_cfg sc8180x_usb3dpphy_cfg = {
.dp_cfg = &sc8180x_dpphy_cfg,
};
+static const struct qmp_phy_cfg sc8280xp_usb43dp_usb_cfg = {
+ .type = PHY_TYPE_USB3,
+ .lanes = 2,
+
+ .serdes_tbl = sc8280xp_usb43dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_serdes_tbl),
+ .tx_tbl = sc8280xp_usb43dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_tx_tbl),
+ .rx_tbl = sc8280xp_usb43dp_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_rx_tbl),
+ .pcs_tbl = sc8280xp_usb43dp_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb43dp_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+};
+
+static const struct qmp_phy_cfg sc8280xp_usb43dp_dp_cfg = {
+ .type = PHY_TYPE_DP,
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v5_dp_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v5_dp_serdes_tbl),
+ .tx_tbl = qmp_v5_5nm_dp_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v5_5nm_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
+ .configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
+ .configure_dp_phy = qcom_qmp_v5_phy_configure_dp_phy,
+ .calibrate_dp_phy = qcom_qmp_v4_dp_phy_calibrate,
+};
+
+static const struct qmp_phy_combo_cfg sc8280xp_usb43dpphy_combo_cfg = {
+ .usb_cfg = &sc8280xp_usb43dp_usb_cfg,
+ .dp_cfg = &sc8280xp_usb43dp_dp_cfg,
+};
+
static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1009,14 +1303,11 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.has_pwrdn_delay = true,
.pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
-
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.type = PHY_TYPE_DP,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v4_dp_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
@@ -1032,6 +1323,11 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
.serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
@@ -1040,9 +1336,6 @@ static const struct qmp_phy_cfg sm8250_dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v4_usb3phy_regs_layout,
- .has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
-
.dp_aux_init = qcom_qmp_v4_phy_dp_aux_init,
.configure_dp_tx = qcom_qmp_v4_phy_configure_dp_tx,
.configure_dp_phy = qcom_qmp_v4_phy_configure_dp_phy,
@@ -1054,7 +1347,7 @@ static const struct qmp_phy_combo_cfg sm8250_usb3dpphy_cfg = {
.dp_cfg = &sm8250_dpphy_cfg,
};
-static void qcom_qmp_phy_combo_configure_lane(void __iomem *base,
+static void qmp_combo_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -1077,15 +1370,15 @@ static void qcom_qmp_phy_combo_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_combo_configure(void __iomem *base,
+static void qmp_combo_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_combo_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_combo_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy)
+static int qmp_combo_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
@@ -1093,27 +1386,27 @@ static int qcom_qmp_phy_combo_serdes_init(struct qmp_phy *qphy)
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_combo_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
if (cfg->type == PHY_TYPE_DP) {
switch (dp_opts->link_rate) {
case 1620:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_rbr,
cfg->serdes_tbl_rbr_num);
break;
case 2700:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr,
cfg->serdes_tbl_hbr_num);
break;
case 5400:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr2,
cfg->serdes_tbl_hbr2_num);
break;
case 8100:
- qcom_qmp_phy_combo_configure(serdes, cfg->regs,
+ qmp_combo_configure(serdes, cfg->regs,
cfg->serdes_tbl_hbr3,
cfg->serdes_tbl_hbr3_num);
break;
@@ -1169,38 +1462,11 @@ static void qcom_qmp_v3_phy_dp_aux_init(struct qmp_phy *qphy)
qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
}
-static const u8 qmp_dp_v3_pre_emphasis_hbr3_hbr2[4][4] = {
- { 0x00, 0x0c, 0x15, 0x1a },
- { 0x02, 0x0e, 0x16, 0xff },
- { 0x02, 0x11, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr3_hbr2[4][4] = {
- { 0x02, 0x12, 0x16, 0x1a },
- { 0x09, 0x19, 0x1f, 0xff },
- { 0x10, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = {
- { 0x00, 0x0c, 0x14, 0x19 },
- { 0x00, 0x0b, 0x12, 0xff },
- { 0x00, 0x0b, 0xff, 0xff },
- { 0x04, 0xff, 0xff, 0xff }
-};
-
-static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
- { 0x08, 0x0f, 0x16, 0x1f },
- { 0x11, 0x1e, 0x1f, 0xff },
- { 0x19, 0x1f, 0xff, 0xff },
- { 0x1f, 0xff, 0xff, 0xff }
-};
-
-static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy,
+static int qmp_combo_configure_dp_swing(struct qmp_phy *qphy,
unsigned int drv_lvl_reg, unsigned int emp_post_reg)
{
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ const struct qmp_phy_cfg *cfg = qphy->cfg;
unsigned int v_level = 0, p_level = 0;
u8 voltage_swing_cfg, pre_emphasis_cfg;
int i;
@@ -1211,11 +1477,11 @@ static int qcom_qmp_phy_combo_configure_dp_swing(struct qmp_phy *qphy,
}
if (dp_opts->link_rate <= 2700) {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level];
+ voltage_swing_cfg = (*cfg->swing_hbr_rbr)[v_level][p_level];
+ pre_emphasis_cfg = (*cfg->pre_emphasis_hbr_rbr)[v_level][p_level];
} else {
- voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr3_hbr2[v_level][p_level];
- pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr3_hbr2[v_level][p_level];
+ voltage_swing_cfg = (*cfg->swing_hbr3_hbr2)[v_level][p_level];
+ pre_emphasis_cfg = (*cfg->pre_emphasis_hbr3_hbr2)[v_level][p_level];
}
/* TODO: Move check to config check */
@@ -1239,8 +1505,7 @@ static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
u32 bias_en, drvr_en;
- if (qcom_qmp_phy_combo_configure_dp_swing(qphy,
- QSERDES_V3_TX_TX_DRV_LVL,
+ if (qmp_combo_configure_dp_swing(qphy, QSERDES_V3_TX_TX_DRV_LVL,
QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
return;
@@ -1258,7 +1523,7 @@ static void qcom_qmp_v3_phy_configure_dp_tx(struct qmp_phy *qphy)
writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN);
}
-static bool qcom_qmp_phy_combo_configure_dp_mode(struct qmp_phy *qphy)
+static bool qmp_combo_configure_dp_mode(struct qmp_phy *qphy)
{
u32 val;
bool reverse = false;
@@ -1295,7 +1560,7 @@ static int qcom_qmp_v3_phy_configure_dp_phy(struct qmp_phy *qphy)
u32 phy_vco_div, status;
unsigned long pixel_freq;
- qcom_qmp_phy_combo_configure_dp_mode(qphy);
+ qmp_combo_configure_dp_mode(qphy);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
@@ -1415,23 +1680,20 @@ static void qcom_qmp_v4_phy_configure_dp_tx(struct qmp_phy *qphy)
writel(0x20, qphy->tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
writel(0x20, qphy->tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- qcom_qmp_phy_combo_configure_dp_swing(qphy,
- QSERDES_V4_TX_TX_DRV_LVL,
+ qmp_combo_configure_dp_swing(qphy, QSERDES_V4_TX_TX_DRV_LVL,
QSERDES_V4_TX_TX_EMP_POST1_LVL);
}
-static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+static int qcom_qmp_v45_phy_configure_dp_phy(struct qmp_phy *qphy)
{
const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks;
const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
u32 phy_vco_div, status;
unsigned long pixel_freq;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- bool reverse;
writel(0x0f, qphy->pcs + QSERDES_V4_DP_PHY_CFG_1);
- reverse = qcom_qmp_phy_combo_configure_dp_mode(qphy);
+ qmp_combo_configure_dp_mode(qphy);
writel(0x13, qphy->pcs + QSERDES_DP_PHY_AUX_CFG1);
writel(0xa4, qphy->pcs + QSERDES_DP_PHY_AUX_CFG2);
@@ -1509,6 +1771,21 @@ static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
10000))
return -ETIMEDOUT;
+ return 0;
+}
+
+static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse = false;
+ u32 status;
+ int ret;
+
+ ret = qcom_qmp_v45_phy_configure_dp_phy(qphy);
+ if (ret < 0)
+ return ret;
+
/*
* At least for 7nm DP PHY this has to be done after enabling link
* clock.
@@ -1559,6 +1836,63 @@ static int qcom_qmp_v4_phy_configure_dp_phy(struct qmp_phy *qphy)
return 0;
}
+static int qcom_qmp_v5_phy_configure_dp_phy(struct qmp_phy *qphy)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse = false;
+ u32 status;
+ int ret;
+
+ ret = qcom_qmp_v45_phy_configure_dp_phy(qphy);
+ if (ret < 0)
+ return ret;
+
+ if (dp_opts->lanes == 1) {
+ bias0_en = reverse ? 0x3e : 0x1a;
+ drvr0_en = reverse ? 0x13 : 0x10;
+ bias1_en = reverse ? 0x15 : 0x3e;
+ drvr1_en = reverse ? 0x10 : 0x13;
+ } else if (dp_opts->lanes == 2) {
+ bias0_en = reverse ? 0x3f : 0x15;
+ drvr0_en = 0x10;
+ bias1_en = reverse ? 0x15 : 0x3f;
+ drvr1_en = 0x10;
+ } else {
+ bias0_en = 0x3f;
+ bias1_en = 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ }
+
+ writel(drvr0_en, qphy->tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
+ writel(bias0_en, qphy->tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, qphy->tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
+ writel(bias1_en, qphy->tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+
+ writel(0x18, qphy->pcs + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qphy->pcs + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qphy->pcs + QSERDES_V4_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x0a, qphy->tx + QSERDES_V5_5NM_TX_TX_POL_INV);
+ writel(0x0a, qphy->tx2 + QSERDES_V5_5NM_TX_TX_POL_INV);
+
+ writel(0x27, qphy->tx + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+ writel(0x27, qphy->tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+
+ writel(0x20, qphy->tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qphy->tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
/*
* We need to calibrate the aux setting here as many times
* as the caller tries
@@ -1603,7 +1937,7 @@ static int qcom_qmp_dp_phy_calibrate(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy)
+static int qmp_combo_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1640,28 +1974,25 @@ static int qcom_qmp_phy_combo_com_init(struct qmp_phy *qphy)
if (ret)
goto err_assert_reset;
- if (cfg->has_phy_dp_com_ctrl) {
- qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
- SW_PWRDN);
- /* override hardware control for reset of qmp phy */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL, SW_PWRDN);
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
- qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
- USB3_MODE | DP_MODE);
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
- }
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
qphy_setbits(pcs,
@@ -1685,7 +2016,7 @@ err_unlock:
return ret;
}
-static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy)
+static int qmp_combo_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1709,7 +2040,7 @@ static int qcom_qmp_phy_combo_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_combo_init(struct phy *phy)
+static int qmp_combo_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1717,7 +2048,7 @@ static int qcom_qmp_phy_combo_init(struct phy *phy)
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_combo_com_init(qphy);
+ ret = qmp_combo_com_init(qphy);
if (ret)
return ret;
@@ -1727,7 +2058,7 @@ static int qcom_qmp_phy_combo_init(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_power_on(struct phy *phy)
+static int qmp_combo_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1739,7 +2070,7 @@ static int qcom_qmp_phy_combo_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_combo_serdes_init(qphy);
+ qmp_combo_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -1748,33 +2079,29 @@ static int qcom_qmp_phy_combo_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_combo_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_combo_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_combo_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_combo_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 2);
}
/* Configure special DP tx tunings */
if (cfg->type == PHY_TYPE_DP)
cfg->configure_dp_tx(qphy);
- qcom_qmp_phy_combo_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_combo_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_combo_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_combo_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 2);
}
/* Configure link rate, swing, etc. */
- if (cfg->type == PHY_TYPE_DP) {
+ if (cfg->type == PHY_TYPE_DP)
cfg->configure_dp_phy(qphy);
- } else {
- qcom_qmp_phy_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- }
+ else
+ qmp_combo_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
@@ -1808,7 +2135,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_combo_power_off(struct phy *phy)
+static int qmp_combo_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1838,42 +2165,41 @@ static int qcom_qmp_phy_combo_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_combo_exit(struct phy *phy)
+static int qmp_combo_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- qcom_qmp_phy_combo_com_exit(qphy);
+ qmp_combo_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_combo_enable(struct phy *phy)
+static int qmp_combo_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_combo_init(phy);
+ ret = qmp_combo_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_power_on(phy);
+ ret = qmp_combo_power_on(phy);
if (ret)
- qcom_qmp_phy_combo_exit(phy);
+ qmp_combo_exit(phy);
return ret;
}
-static int qcom_qmp_phy_combo_disable(struct phy *phy)
+static int qmp_combo_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_combo_power_off(phy);
+ ret = qmp_combo_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_combo_exit(phy);
+ return qmp_combo_exit(phy);
}
-static int qcom_qmp_phy_combo_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
+static int qmp_combo_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -1882,7 +2208,7 @@ static int qcom_qmp_phy_combo_set_mode(struct phy *phy,
return 0;
}
-static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -1911,7 +2237,7 @@ static void qcom_qmp_phy_combo_enable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
}
-static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb;
@@ -1929,7 +2255,7 @@ static void qcom_qmp_phy_combo_disable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
}
-static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
+static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -1946,7 +2272,7 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
return 0;
}
- qcom_qmp_phy_combo_enable_autonomous_mode(qphy);
+ qmp_combo_enable_autonomous_mode(qphy);
clk_disable_unprepare(qphy->pipe_clk);
clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
@@ -1954,7 +2280,7 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev)
+static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -1983,12 +2309,12 @@ static int __maybe_unused qcom_qmp_phy_combo_runtime_resume(struct device *dev)
return ret;
}
- qcom_qmp_phy_combo_disable_autonomous_mode(qphy);
+ qmp_combo_disable_autonomous_mode(qphy);
return 0;
}
-static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2020,7 +2346,7 @@ static int qcom_qmp_phy_combo_vreg_init(struct device *dev, const struct qmp_phy
return 0;
}
-static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2041,7 +2367,7 @@ static int qcom_qmp_phy_combo_reset_init(struct device *dev, const struct qmp_ph
return 0;
}
-static int qcom_qmp_phy_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_combo_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2309,33 +2635,31 @@ static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy,
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_combo_usb_ops = {
- .init = qcom_qmp_phy_combo_enable,
- .exit = qcom_qmp_phy_combo_disable,
- .set_mode = qcom_qmp_phy_combo_set_mode,
+static const struct phy_ops qmp_combo_usb_ops = {
+ .init = qmp_combo_enable,
+ .exit = qmp_combo_disable,
+ .set_mode = qmp_combo_set_mode,
.owner = THIS_MODULE,
};
-static const struct phy_ops qcom_qmp_phy_combo_dp_ops = {
- .init = qcom_qmp_phy_combo_init,
+static const struct phy_ops qmp_combo_dp_ops = {
+ .init = qmp_combo_init,
.configure = qcom_qmp_dp_phy_configure,
- .power_on = qcom_qmp_phy_combo_power_on,
+ .power_on = qmp_combo_power_on,
.calibrate = qcom_qmp_dp_phy_calibrate,
- .power_off = qcom_qmp_phy_combo_power_off,
- .exit = qcom_qmp_phy_combo_exit,
- .set_mode = qcom_qmp_phy_combo_set_mode,
+ .power_off = qmp_combo_power_off,
+ .exit = qmp_combo_exit,
+ .set_mode = qmp_combo_set_mode,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id,
+static int qmp_combo_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
const struct phy_ops *ops;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -2350,49 +2674,39 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
if (cfg->pcs_usb_offset)
qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs_misc = NULL;
+ }
/*
* Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
@@ -2401,24 +2715,19 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
* Otherwise, we initialize pipe clock to NULL for
* all phys that don't need this.
*/
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
- if (cfg->type == PHY_TYPE_USB3) {
- ret = PTR_ERR(qphy->pipe_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev,
- "failed to get lane%d pipe_clk, %d\n",
- id, ret);
- return ret;
- }
+ if (cfg->type == PHY_TYPE_USB3)
+ return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
+ "failed to get lane%d pipe_clk\n",
+ id);
qphy->pipe_clk = NULL;
}
if (cfg->type == PHY_TYPE_DP)
- ops = &qcom_qmp_phy_combo_dp_ops;
+ ops = &qmp_combo_dp_ops;
else
- ops = &qcom_qmp_phy_combo_usb_ops;
+ ops = &qmp_combo_usb_ops;
generic_phy = devm_phy_create(dev, np, ops);
if (IS_ERR(generic_phy)) {
@@ -2428,7 +2737,6 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2436,12 +2744,16 @@ int qcom_qmp_phy_combo_create(struct device *dev, struct device_node *np, int id
return 0;
}
-static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
+static const struct of_device_id qmp_combo_of_match_table[] = {
{
.compatible = "qcom,sc7180-qmp-usb3-dp-phy",
.data = &sc7180_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sdm845-qmp-usb3-dp-phy",
+ .data = &sdm845_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sm8250-qmp-usb3-dp-phy",
.data = &sm8250_usb3dpphy_cfg,
},
@@ -2449,16 +2761,20 @@ static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = {
.compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
.data = &sc8180x_usb3dpphy_cfg,
},
+ {
+ .compatible = "qcom,sc8280xp-qmp-usb43dp-phy",
+ .data = &sc8280xp_usb43dpphy_combo_cfg,
+ },
{ }
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_combo_phy_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table);
-static const struct dev_pm_ops qcom_qmp_phy_combo_pm_ops = {
- SET_RUNTIME_PM_OPS(qcom_qmp_phy_combo_runtime_suspend,
- qcom_qmp_phy_combo_runtime_resume, NULL)
+static const struct dev_pm_ops qmp_combo_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_combo_runtime_suspend,
+ qmp_combo_runtime_resume, NULL)
};
-static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
+static int qmp_combo_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2494,12 +2810,9 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- /* per PHY dp_com; if PHY has dp_com control block */
- if (cfg->has_phy_dp_com_ctrl) {
- qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(qmp->dp_com))
- return PTR_ERR(qmp->dp_com);
- }
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
/* Only two serdes for combo PHY */
dp_serdes = devm_platform_ioremap_resource(pdev, 2);
@@ -2511,21 +2824,18 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
mutex_init(&qmp->phy_mutex);
- ret = qcom_qmp_phy_combo_clk_init(dev, cfg);
+ ret = qmp_combo_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_reset_init(dev, cfg);
+ ret = qmp_combo_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_combo_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_combo_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2537,7 +2847,9 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* Prevent runtime pm from being ON by default. Users can enable
* it using power/control in sysfs.
@@ -2551,7 +2863,7 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
serdes = dp_serdes;
/* Create per-lane phy */
- ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ ret = qmp_combo_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2569,7 +2881,7 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
serdes = usb_serdes;
/* Create per-lane phy */
- ret = qcom_qmp_phy_combo_create(dev, child, id, serdes, cfg);
+ ret = qmp_combo_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2592,29 +2904,24 @@ static int qcom_qmp_phy_combo_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_combo_driver = {
- .probe = qcom_qmp_phy_combo_probe,
+static struct platform_driver qmp_combo_driver = {
+ .probe = qmp_combo_probe,
.driver = {
.name = "qcom-qmp-combo-phy",
- .pm = &qcom_qmp_phy_combo_pm_ops,
- .of_match_table = qcom_qmp_combo_phy_of_match_table,
+ .pm = &qmp_combo_pm_ops,
+ .of_match_table = qmp_combo_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_combo_driver);
+module_platform_driver(qmp_combo_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP USB+DP combo PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index be6a94439b6c..461f0b5d464a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -36,46 +36,13 @@
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_COM_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -123,14 +90,8 @@ enum qphy_reg_layout {
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -223,36 +184,20 @@ static const struct qmp_phy_init_tbl msm8996_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0, 0x0e),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ /* number of PHYs provided by this block */
+ int num_phys;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
int serdes_tbl_num;
- const struct qmp_phy_init_tbl *serdes_tbl_sec;
- int serdes_tbl_num_sec;
const struct qmp_phy_init_tbl *tx_tbl;
int tx_tbl_num;
- const struct qmp_phy_init_tbl *tx_tbl_sec;
- int tx_tbl_num_sec;
const struct qmp_phy_init_tbl *rx_tbl;
int rx_tbl_num;
- const struct qmp_phy_init_tbl *rx_tbl_sec;
- int rx_tbl_num_sec;
const struct qmp_phy_init_tbl *pcs_tbl;
int pcs_tbl_num;
- const struct qmp_phy_init_tbl *pcs_tbl_sec;
- int pcs_tbl_num_sec;
- const struct qmp_phy_init_tbl *pcs_misc_tbl;
- int pcs_misc_tbl_num;
- const struct qmp_phy_init_tbl *pcs_misc_tbl_sec;
- int pcs_misc_tbl_num_sec;
/* clock ids to be requested */
const char * const *clk_list;
@@ -289,12 +234,10 @@ struct qmp_phy_cfg {
* @tx: iomapped memory space for lane's tx
* @rx: iomapped memory space for lane's rx
* @pcs: iomapped memory space for lane's pcs
- * @pcs_misc: iomapped memory space for lane's pcs_misc
* @pipe_clk: pipe clock
* @index: lane index
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -303,12 +246,10 @@ struct qmp_phy {
void __iomem *tx;
void __iomem *rx;
void __iomem *pcs;
- void __iomem *pcs_misc;
struct clk *pipe_clk;
unsigned int index;
struct qcom_qmp *qmp;
struct reset_control *lane_rst;
- enum phy_mode mode;
};
/**
@@ -377,8 +318,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 3,
+ .num_phys = 3,
.serdes_tbl = msm8996_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_pcie_serdes_tbl),
@@ -406,7 +346,7 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
-static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base,
+static void qmp_pcie_msm8996_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -429,15 +369,15 @@ static void qcom_qmp_phy_pcie_msm8996_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_pcie_msm8996_configure(void __iomem *base,
+static void qmp_pcie_msm8996_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_pcie_msm8996_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -448,11 +388,7 @@ static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
unsigned int mask, val;
int ret;
- qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
- if (cfg->serdes_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
- cfg->serdes_tbl_num_sec);
-
+ qmp_pcie_msm8996_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET);
qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL],
@@ -472,7 +408,7 @@ static int qcom_qmp_phy_pcie_msm8996_serdes_init(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_com_init(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -525,7 +461,7 @@ err_unlock:
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy)
+static int qmp_pcie_msm8996_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -555,21 +491,21 @@ static int qcom_qmp_phy_pcie_msm8996_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_init(struct phy *phy)
+static int qmp_pcie_msm8996_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
int ret;
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_pcie_msm8996_com_init(qphy);
+ ret = qmp_pcie_msm8996_com_init(qphy);
if (ret)
return ret;
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
+static int qmp_pcie_msm8996_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -577,12 +513,11 @@ static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
void __iomem *tx = qphy->tx;
void __iomem *rx = qphy->rx;
void __iomem *pcs = qphy->pcs;
- void __iomem *pcs_misc = qphy->pcs_misc;
void __iomem *status;
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_pcie_msm8996_serdes_init(qphy);
+ qmp_pcie_msm8996_serdes_init(qphy);
ret = reset_control_deassert(qphy->lane_rst);
if (ret) {
@@ -598,28 +533,13 @@ static int qcom_qmp_phy_pcie_msm8996_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 1);
-
- qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure_lane(rx, cfg->regs,
- cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
-
- qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- if (cfg->pcs_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
- cfg->pcs_tbl_num_sec);
-
- qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
- cfg->pcs_misc_tbl_num);
- if (cfg->pcs_misc_tbl_sec)
- qcom_qmp_phy_pcie_msm8996_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
- cfg->pcs_misc_tbl_num_sec);
+ qmp_pcie_msm8996_configure_lane(tx, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 1);
+
+ qmp_pcie_msm8996_configure_lane(rx, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 1);
+
+ qmp_pcie_msm8996_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
* Pull out PHY from POWER DOWN state.
@@ -657,7 +577,7 @@ err_reset_lane:
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy)
+static int qmp_pcie_msm8996_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -682,53 +602,43 @@ static int qcom_qmp_phy_pcie_msm8996_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_exit(struct phy *phy)
+static int qmp_pcie_msm8996_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
reset_control_assert(qphy->lane_rst);
- qcom_qmp_phy_pcie_msm8996_com_exit(qphy);
+ qmp_pcie_msm8996_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_enable(struct phy *phy)
+static int qmp_pcie_msm8996_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_msm8996_init(phy);
+ ret = qmp_pcie_msm8996_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_power_on(phy);
+ ret = qmp_pcie_msm8996_power_on(phy);
if (ret)
- qcom_qmp_phy_pcie_msm8996_exit(phy);
+ qmp_pcie_msm8996_exit(phy);
return ret;
}
-static int qcom_qmp_phy_pcie_msm8996_disable(struct phy *phy)
+static int qmp_pcie_msm8996_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_msm8996_power_off(phy);
+ ret = qmp_pcie_msm8996_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_pcie_msm8996_exit(phy);
+ return qmp_pcie_msm8996_exit(phy);
}
-static int qcom_qmp_phy_pcie_msm8996_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qphy->mode = mode;
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -744,7 +654,7 @@ static int qcom_qmp_phy_pcie_msm8996_vreg_init(struct device *dev, const struct
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -765,7 +675,7 @@ static int qcom_qmp_phy_pcie_msm8996_reset_init(struct device *dev, const struct
return 0;
}
-static int qcom_qmp_phy_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_msm8996_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -841,10 +751,9 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_pcie_msm8996_ops = {
- .power_on = qcom_qmp_phy_pcie_msm8996_enable,
- .power_off = qcom_qmp_phy_pcie_msm8996_disable,
- .set_mode = qcom_qmp_phy_pcie_msm8996_set_mode,
+static const struct phy_ops qmp_pcie_msm8996_ops = {
+ .power_on = qmp_pcie_msm8996_enable,
+ .power_off = qmp_pcie_msm8996_disable,
.owner = THIS_MODULE,
};
@@ -853,14 +762,12 @@ static void qcom_qmp_reset_control_put(void *data)
reset_control_put(data);
}
-static
-int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
+static int qmp_pcie_msm8996_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -872,36 +779,26 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
/*
* Get memory resources for each phy lane:
* Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
- * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
- * For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
-
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
-
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- if (!qphy->pcs_misc)
- dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- /* Get lane reset, if any */
- snprintf(prop_name, sizeof(prop_name), "lane%d", id);
- qphy->lane_rst = of_reset_control_get_exclusive(np, prop_name);
+ qphy->lane_rst = of_reset_control_get_exclusive_by_index(np, 0);
if (IS_ERR(qphy->lane_rst)) {
dev_err(dev, "failed to get lane%d reset\n", id);
return PTR_ERR(qphy->lane_rst);
@@ -911,7 +808,7 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
if (ret)
return ret;
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_msm8996_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_pcie_msm8996_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -927,16 +824,16 @@ int qcom_qmp_phy_pcie_msm8996_create(struct device *dev, struct device_node *np,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_pcie_msm8996_of_match_table[] = {
+static const struct of_device_id qmp_pcie_msm8996_of_match_table[] = {
{
.compatible = "qcom,msm8996-qmp-pcie-phy",
.data = &msm8996_pciephy_cfg,
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_msm8996_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_pcie_msm8996_of_match_table);
-static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
+static int qmp_pcie_msm8996_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -964,25 +861,22 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- expected_phys = cfg->nlanes;
+ expected_phys = cfg->num_phys;
mutex_init(&qmp->phy_mutex);
- ret = qcom_qmp_phy_pcie_msm8996_clk_init(dev, cfg);
+ ret = qmp_pcie_msm8996_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_reset_init(dev, cfg);
+ ret = qmp_pcie_msm8996_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_msm8996_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_pcie_msm8996_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -993,18 +887,10 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_pcie_msm8996_create(dev, child, id, serdes, cfg);
+ ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -1026,28 +912,23 @@ static int qcom_qmp_phy_pcie_msm8996_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_pcie_msm8996_driver = {
- .probe = qcom_qmp_phy_pcie_msm8996_probe,
+static struct platform_driver qmp_pcie_msm8996_driver = {
+ .probe = qmp_pcie_msm8996_probe,
.driver = {
.name = "qcom-qmp-msm8996-pcie-phy",
- .of_match_table = qcom_qmp_phy_pcie_msm8996_of_match_table,
+ .of_match_table = qmp_pcie_msm8996_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_pcie_msm8996_driver);
+module_platform_driver(qmp_pcie_msm8996_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP MSM8996 PCIe PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 2d65e1f56bfc..5be5348fbb26 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -32,49 +32,11 @@
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
-
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-#define POWER_DOWN_DELAY_US_MIN 10
-#define POWER_DOWN_DELAY_US_MAX 11
-
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
struct qmp_phy_init_tbl {
unsigned int offset;
@@ -123,14 +85,8 @@ enum qphy_reg_layout {
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -1344,14 +1300,9 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -1390,7 +1341,6 @@ struct qmp_phy_cfg {
unsigned int start_ctrl;
unsigned int pwrdn_ctrl;
- unsigned int mask_com_pcs_ready;
/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
unsigned int phy_status;
@@ -1400,9 +1350,6 @@ struct qmp_phy_cfg {
int pwrdn_delay_min;
int pwrdn_delay_max;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* QMP PHY pipe clock interface rate */
unsigned long pipe_clock_rate;
};
@@ -1420,9 +1367,7 @@ struct qmp_phy_cfg {
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -1435,9 +1380,7 @@ struct qmp_phy {
void __iomem *rx2;
void __iomem *pcs_misc;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
- enum phy_mode mode;
};
/**
@@ -1514,8 +1457,7 @@ static const char * const sdm845_pciephy_reset_l[] = {
};
static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
@@ -1543,8 +1485,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
};
static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_pcie_gen3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
@@ -1573,8 +1514,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
};
static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq6018_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
@@ -1603,8 +1543,7 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sdm845_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
@@ -1634,8 +1573,7 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sdm845_qhp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
@@ -1663,8 +1601,7 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -1702,8 +1639,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8250_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -1735,15 +1671,13 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8998_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
@@ -1767,8 +1701,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
};
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sc8180x_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
@@ -1797,8 +1730,7 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
};
static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sdx55_qmp_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
@@ -1822,15 +1754,13 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS_4_20,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
@@ -1860,8 +1790,7 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
};
static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
- .type = PHY_TYPE_PCIE,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
@@ -1885,13 +1814,12 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
- .is_dual_lane_phy = true,
.has_pwrdn_delay = true,
.pwrdn_delay_min = 995, /* us */
.pwrdn_delay_max = 1005, /* us */
};
-static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base,
+static void qmp_pcie_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -1914,31 +1842,30 @@ static void qcom_qmp_phy_pcie_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_pcie_configure(void __iomem *base,
- const unsigned int *regs,
- const struct qmp_phy_init_tbl tbl[],
- int num)
+static void qmp_pcie_configure(void __iomem *base,
+ const unsigned int *regs,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
{
- qcom_qmp_phy_pcie_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_pcie_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_pcie_serdes_init(struct qmp_phy *qphy)
+static int qmp_pcie_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
- if (cfg->serdes_tbl_sec)
- qcom_qmp_phy_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec,
- cfg->serdes_tbl_num_sec);
+ qmp_pcie_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_pcie_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, cfg->serdes_tbl_num_sec);
return 0;
}
-static int qcom_qmp_phy_pcie_com_init(struct qmp_phy *qphy)
+static int qmp_pcie_init(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
@@ -1985,8 +1912,9 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy)
+static int qmp_pcie_exit(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1999,21 +1927,7 @@ static int qcom_qmp_phy_pcie_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_pcie_init(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- ret = qcom_qmp_phy_pcie_com_init(qphy);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
+static int qmp_pcie_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -2026,7 +1940,7 @@ static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_pcie_serdes_init(qphy);
+ qmp_pcie_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -2035,47 +1949,31 @@ static int qcom_qmp_phy_pcie_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 1);
-
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
- if (cfg->tx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl_sec,
- cfg->tx_tbl_num_sec, 2);
+ qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_pcie_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, cfg->tx_tbl_num_sec, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl,
+ cfg->tx_tbl_num, 2);
+ qmp_pcie_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl_sec,
+ cfg->tx_tbl_num_sec, 2);
}
- qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(rx, cfg->regs,
- cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
-
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
- if (cfg->rx_tbl_sec)
- qcom_qmp_phy_pcie_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl_sec,
- cfg->rx_tbl_num_sec, 2);
+ qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_pcie_configure_lane(rx, cfg->regs, cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl,
+ cfg->rx_tbl_num, 2);
+ qmp_pcie_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl_sec,
+ cfg->rx_tbl_num_sec, 2);
}
- qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
- if (cfg->pcs_tbl_sec)
- qcom_qmp_phy_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec,
- cfg->pcs_tbl_num_sec);
+ qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_pcie_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, cfg->pcs_tbl_num_sec);
- qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
- cfg->pcs_misc_tbl_num);
- if (cfg->pcs_misc_tbl_sec)
- qcom_qmp_phy_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec,
- cfg->pcs_misc_tbl_num_sec);
+ qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num);
+ qmp_pcie_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, cfg->pcs_misc_tbl_num_sec);
/*
* Pull out PHY from POWER DOWN state.
@@ -2111,7 +2009,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_pcie_power_off(struct phy *phy)
+static int qmp_pcie_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2136,51 +2034,33 @@ static int qcom_qmp_phy_pcie_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_pcie_exit(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qcom_qmp_phy_pcie_com_exit(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_pcie_enable(struct phy *phy)
+static int qmp_pcie_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_init(phy);
+ ret = qmp_pcie_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_power_on(phy);
+ ret = qmp_pcie_power_on(phy);
if (ret)
- qcom_qmp_phy_pcie_exit(phy);
+ qmp_pcie_exit(phy);
return ret;
}
-static int qcom_qmp_phy_pcie_disable(struct phy *phy)
+static int qmp_pcie_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_pcie_power_off(phy);
+ ret = qmp_pcie_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_pcie_exit(phy);
-}
-
-static int qcom_qmp_phy_pcie_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- qphy->mode = mode;
-
- return 0;
+ return qmp_pcie_exit(phy);
}
-static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2196,7 +2076,7 @@ static int qcom_qmp_phy_pcie_vreg_init(struct device *dev, const struct qmp_phy_
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2217,7 +2097,7 @@ static int qcom_qmp_phy_pcie_reset_init(struct device *dev, const struct qmp_phy
return 0;
}
-static int qcom_qmp_phy_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_pcie_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2300,21 +2180,18 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_pcie_ops = {
- .power_on = qcom_qmp_phy_pcie_enable,
- .power_off = qcom_qmp_phy_pcie_disable,
- .set_mode = qcom_qmp_phy_pcie_set_mode,
+static const struct phy_ops qmp_pcie_ops = {
+ .power_on = qmp_pcie_enable,
+ .power_off = qmp_pcie_disable,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
+static int qmp_pcie_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
int ret;
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
@@ -2329,59 +2206,51 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ if (of_device_is_compatible(dev->of_node, "qcom,sdm845-qhp-pcie-phy"))
+ qphy->rx = qphy->tx;
+ else
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc &&
+ if (IS_ERR(qphy->pcs_misc) &&
of_device_is_compatible(dev->of_node, "qcom,ipq6018-qmp-pcie-phy"))
qphy->pcs_misc = qphy->pcs + 0x400;
- if (!qphy->pcs_misc)
- dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ if (IS_ERR(qphy->pcs_misc)) {
+ if (cfg->pcs_misc_tbl || cfg->pcs_misc_tbl_sec)
+ return PTR_ERR(qphy->pcs_misc);
+ }
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_pcie_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_pcie_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -2389,7 +2258,6 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2397,7 +2265,7 @@ int qcom_qmp_phy_pcie_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = {
+static const struct of_device_id qmp_pcie_of_match_table[] = {
{
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
@@ -2440,9 +2308,9 @@ static const struct of_device_id qcom_qmp_phy_pcie_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_pcie_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_pcie_of_match_table);
-static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
+static int qmp_pcie_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2470,21 +2338,18 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- ret = qcom_qmp_phy_pcie_clk_init(dev, cfg);
+ ret = qmp_pcie_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_reset_init(dev, cfg);
+ ret = qmp_pcie_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_pcie_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_pcie_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2495,18 +2360,10 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_pcie_create(dev, child, id, serdes, cfg);
+ ret = qmp_pcie_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2528,28 +2385,23 @@ static int qcom_qmp_phy_pcie_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_pcie_driver = {
- .probe = qcom_qmp_phy_pcie_probe,
+static struct platform_driver qmp_pcie_driver = {
+ .probe = qmp_pcie_probe,
.driver = {
.name = "qcom-qmp-pcie-phy",
- .of_match_table = qcom_qmp_phy_pcie_of_match_table,
+ .of_match_table = qmp_pcie_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_pcie_driver);
+module_platform_driver(qmp_pcie_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP PCIe PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
index 61a44519f969..04f260711ea1 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
@@ -7,11 +7,24 @@
#define QCOM_PHY_QMP_PCS_V5_H_
/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V5_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V5_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V5_PCS_RX_CONFIG 0x1b0
+#define QPHY_V5_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V5_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V5_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V5_PCS_EQ_CONFIG1 0x1dc
#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
+#define QPHY_V5_PCS_EQ_CONFIG5 0x1ec
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
new file mode 100644
index 000000000000..a1c088bd5158
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V5_5NM_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_V5_5NM_H_
+
+/* Only for QMP V5 5NM PHY - TX registers */
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_5NM_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_5NM_TX_LANE_MODE_2 0x7c
+#define QSERDES_V5_5NM_TX_LANE_MODE_3 0x80
+#define QSERDES_V5_5NM_TX_BIST_MODE_LANENO 0x00
+#define QSERDES_V5_5NM_TX_BIST_INVERT 0x04
+#define QSERDES_V5_5NM_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V5_5NM_TX_TX_IDLE_LVL_LARGE_AMP 0x10
+#define QSERDES_V5_5NM_TX_TX_DRV_LVL 0x14
+#define QSERDES_V5_5NM_TX_TX_DRV_LVL_OFFSET 0x18
+#define QSERDES_V5_5NM_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V5_5NM_TX_PRE_STALL_LDO_BOOST_EN 0x20
+#define QSERDES_V5_5NM_TX_LPB_EN 0x24
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_TX 0x28
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_RX 0x2c
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_5NM_TX_PERL_LENGTH1 0x38
+#define QSERDES_V5_5NM_TX_PERL_LENGTH2 0x3c
+#define QSERDES_V5_5NM_TX_SERDES_BYP_EN_OUT 0x40
+#define QSERDES_V5_5NM_TX_DEBUG_BUS_SEL 0x44
+#define QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V5_5NM_TX_TX_POL_INV 0x50
+#define QSERDES_V5_5NM_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
+#define QSERDES_V5_5NM_TX_BIST_PATTERN1 0x58
+#define QSERDES_V5_5NM_TX_BIST_PATTERN2 0x5c
+#define QSERDES_V5_5NM_TX_BIST_PATTERN3 0x60
+#define QSERDES_V5_5NM_TX_BIST_PATTERN4 0x64
+#define QSERDES_V5_5NM_TX_BIST_PATTERN5 0x68
+#define QSERDES_V5_5NM_TX_BIST_PATTERN6 0x6c
+#define QSERDES_V5_5NM_TX_BIST_PATTERN7 0x70
+#define QSERDES_V5_5NM_TX_BIST_PATTERN8 0x74
+#define QSERDES_V5_5NM_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_5NM_TX_LANE_MODE_2 0x7c
+#define QSERDES_V5_5NM_TX_LANE_MODE_3 0x80
+#define QSERDES_V5_5NM_TX_ATB_SEL1 0x84
+#define QSERDES_V5_5NM_TX_ATB_SEL2 0x88
+#define QSERDES_V5_5NM_TX_RCV_DETECT_LVL 0x8c
+#define QSERDES_V5_5NM_TX_RCV_DETECT_LVL_2 0x90
+#define QSERDES_V5_5NM_TX_PRBS_SEED1 0x94
+#define QSERDES_V5_5NM_TX_PRBS_SEED2 0x98
+#define QSERDES_V5_5NM_TX_PRBS_SEED3 0x9c
+#define QSERDES_V5_5NM_TX_PRBS_SEED4 0xa0
+#define QSERDES_V5_5NM_TX_RESET_GEN 0xa4
+#define QSERDES_V5_5NM_TX_RESET_GEN_MUXES 0xa8
+#define QSERDES_V5_5NM_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V5_5NM_TX_VMODE_CTRL1 0xb0
+#define QSERDES_V5_5NM_TX_ALOG_OBSV_BUS_CTRL_1 0xb4
+#define QSERDES_V5_5NM_TX_BIST_STATUS 0xb8
+#define QSERDES_V5_5NM_TX_BIST_ERROR_COUNT1 0xbc
+#define QSERDES_V5_5NM_TX_BIST_ERROR_COUNT2 0xc0
+#define QSERDES_V5_5NM_TX_ALOG_OBSV_BUS_STATUS_1 0xc4
+#define QSERDES_V5_5NM_TX_LANE_DIG_CONFIG 0xc8
+#define QSERDES_V5_5NM_TX_PI_QEC_CTRL 0xcc
+#define QSERDES_V5_5NM_TX_PRE_EMPH 0xd0
+#define QSERDES_V5_5NM_TX_SW_RESET 0xd4
+#define QSERDES_V5_5NM_TX_TX_BAND 0xd8
+#define QSERDES_V5_5NM_TX_SLEW_CNTL0 0xdc
+#define QSERDES_V5_5NM_TX_SLEW_CNTL1 0xe0
+#define QSERDES_V5_5NM_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V5_5NM_TX_DIG_BKUP_CTRL 0xe8
+#define QSERDES_V5_5NM_TX_DEBUG_BUS0 0xec
+#define QSERDES_V5_5NM_TX_DEBUG_BUS1 0xf0
+#define QSERDES_V5_5NM_TX_DEBUG_BUS2 0xf4
+#define QSERDES_V5_5NM_TX_DEBUG_BUS3 0xf8
+#define QSERDES_V5_5NM_TX_TX_BKUP_RO_BUS 0xfc
+
+/* Only for QMP V5 5NM PHY - RX registers */
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE0 0x000
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE1 0x004
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE2 0x008
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE0 0x010
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE1 0x014
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE2 0x018
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_SO_GAIN_RATE3 0x01c
+#define QSERDES_V5_5NM_RX_UCDR_SO_SATURATION 0x020
+#define QSERDES_V5_5NM_RX_UCDR_FO_TO_SO_DELAY 0x024
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE0 0x028
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE0 0x02c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE1 0x030
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE1 0x034
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE2 0x038
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE2 0x03c
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_LOW_RATE3 0x040
+#define QSERDES_V5_5NM_RX_UCDR_FASTLOCK_COUNT_HIGH_RATE3 0x044
+#define QSERDES_V5_5NM_RX_UCDR_PI_CTRL1 0x048
+#define QSERDES_V5_5NM_RX_UCDR_PI_CTRL2 0x04c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE0 0x050
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE1 0x054
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE2 0x058
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH1_RATE3 0x05c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE0 0x060
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE1 0x064
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE2 0x068
+#define QSERDES_V5_5NM_RX_UCDR_SB2_THRESH2_RATE3 0x06c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE0 0x070
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE1 0x074
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE2 0x078
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN1_RATE3 0x07c
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE0 0x080
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE1 0x084
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE2 0x088
+#define QSERDES_V5_5NM_RX_UCDR_SB2_GAIN2_RATE3 0x08c
+#define QSERDES_V5_5NM_RX_RXCLK_DIV2_CTRL 0x090
+#define QSERDES_V5_5NM_RX_RX_BAND 0x094
+#define QSERDES_V5_5NM_RX_RX_TERM_BW 0x098
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE0 0x09c
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE1 0x0a0
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE2 0x0a4
+#define QSERDES_V5_5NM_RX_UCDR_FO_GAIN_RATE3 0x0a8
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE0 0x0ac
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE1 0x0b0
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE2 0x0b4
+#define QSERDES_V5_5NM_RX_UCDR_SO_GAIN_RATE3 0x0b8
+#define QSERDES_V5_5NM_RX_UCDR_PI_CONTROLS 0x0bc
+#define QSERDES_V5_5NM_RX_UCDR_PD_DATA_FILTER_ENABLES 0x0c0
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE0 0x0c4
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE1 0x0c8
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE2 0x0cc
+#define QSERDES_V5_5NM_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3 0x0d0
+#define QSERDES_V5_5NM_RX_AUX_CONTROL 0x0d4
+#define QSERDES_V5_5NM_RX_AUXDATA_TB 0x0d8
+#define QSERDES_V5_5NM_RX_RCLK_AUXDATA_SEL 0x0dc
+#define QSERDES_V5_5NM_RX_EOM_CTRL 0x0e0
+#define QSERDES_V5_5NM_RX_AC_JTAG_ENABLE 0x0e4
+#define QSERDES_V5_5NM_RX_AC_JTAG_INITP 0x0e8
+#define QSERDES_V5_5NM_RX_AC_JTAG_INITN 0x0ec
+#define QSERDES_V5_5NM_RX_AC_JTAG_LVL 0x0f0
+#define QSERDES_V5_5NM_RX_AC_JTAG_MODE 0x0f4
+#define QSERDES_V5_5NM_RX_AC_JTAG_RESET 0x0f8
+#define QSERDES_V5_5NM_RX_RX_RCVR_IQ_EN 0x0fc
+#define QSERDES_V5_5NM_RX_RX_Q_EN_RATES 0x100
+#define QSERDES_V5_5NM_RX_RX_IDAC_I0_DC_OFFSETS 0x104
+#define QSERDES_V5_5NM_RX_RX_IDAC_I0BAR_DC_OFFSETS 0x108
+#define QSERDES_V5_5NM_RX_RX_IDAC_I1_DC_OFFSETS 0x10c
+#define QSERDES_V5_5NM_RX_RX_IDAC_I1BAR_DC_OFFSETS 0x110
+#define QSERDES_V5_5NM_RX_RX_IDAC_Q_DC_OFFSETS 0x114
+#define QSERDES_V5_5NM_RX_RX_IDAC_QBAR_DC_OFFSETS 0x118
+#define QSERDES_V5_5NM_RX_RX_IDAC_A_DC_OFFSETS 0x11c
+#define QSERDES_V5_5NM_RX_RX_IDAC_ABAR_DC_OFFSETS 0x120
+#define QSERDES_V5_5NM_RX_RX_IDAC_EN 0x124
+#define QSERDES_V5_5NM_RX_RX_IDAC_ENABLES 0x128
+#define QSERDES_V5_5NM_RX_RX_IDAC_SIGN 0x12c
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CODE_OVERRIDE 0x130
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL1 0x134
+#define QSERDES_V5_5NM_RX_RX_IVCM_CAL_CTRL2 0x138
+#define QSERDES_V5_5NM_RX_RX_IVCM_POSTCAL_OFFSET 0x13c
+#define QSERDES_V5_5NM_RX_RX_SUMMER_CAL_SPD_MODE 0x140
+#define QSERDES_V5_5NM_RX_RX_HIGHZ_PARRATE 0x144
+#define QSERDES_V5_5NM_RX_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x148
+#define QSERDES_V5_5NM_RX_DFE_1 0x14c
+#define QSERDES_V5_5NM_RX_DFE_2 0x150
+#define QSERDES_V5_5NM_RX_DFE_3 0x154
+#define QSERDES_V5_5NM_RX_DFE_4 0x158
+#define QSERDES_V5_5NM_RX_DFE_TAP3_CTRL 0x15c
+#define QSERDES_V5_5NM_RX_DFE_TAP3_MANVAL_KTAP 0x160
+#define QSERDES_V5_5NM_RX_DFE_TAP4_CTRL 0x164
+#define QSERDES_V5_5NM_RX_DFE_TAP4_MANVAL_KTAP 0x168
+#define QSERDES_V5_5NM_RX_DFE_TAP5_CTRL 0x16c
+#define QSERDES_V5_5NM_RX_DFE_TAP5_MANVAL_KTAP 0x170
+#define QSERDES_V5_5NM_RX_TX_ADPT_CTRL 0x174
+#define QSERDES_V5_5NM_RX_DFE_DAC_ENABLE1 0x178
+#define QSERDES_V5_5NM_RX_DFE_DAC_ENABLE2 0x17c
+#define QSERDES_V5_5NM_RX_TX_ADAPT_PRE_THRESH1 0x180
+#define QSERDES_V5_5NM_RX_TX_ADAPT_PRE_THRESH2 0x184
+#define QSERDES_V5_5NM_RX_TX_ADAPT_POST_THRESH1 0x188
+#define QSERDES_V5_5NM_RX_TX_ADAPT_POST_THRESH2 0x18c
+#define QSERDES_V5_5NM_RX_TX_ADAPT_MAIN_THRESH1 0x190
+#define QSERDES_V5_5NM_RX_TX_ADAPT_MAIN_THRESH2 0x194
+#define QSERDES_V5_5NM_RX_VGA_CAL_CNTRL1 0x198
+#define QSERDES_V5_5NM_RX_VGA_CAL_CNTRL2 0x19c
+#define QSERDES_V5_5NM_RX_VGA_CAL_MAN_VAL 0x1a0
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_CNTRL1 0x1a4
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_CNTRL2 0x1a8
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE0 0x1ac
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE1 0x1b0
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE2 0x1b4
+#define QSERDES_V5_5NM_RX_VTHRESH_CAL_MAN_VAL_RATE3 0x1b8
+#define QSERDES_V5_5NM_RX_GM_CAL 0x1bc
+#define QSERDES_V5_5NM_RX_RX_VGA_GAIN2_BLK1 0x1c0
+#define QSERDES_V5_5NM_RX_RX_VGA_GAIN2_BLK2 0x1c4
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL2 0x1c8
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL3 0x1cc
+#define QSERDES_V5_5NM_RX_RX_EQU_ADAPTOR_CNTRL4 0x1d0
+#define QSERDES_V5_5NM_RX_RX_IDAC_TSETTLE_LOW 0x1d4
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_LSB 0x1d8
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_MSB 0x1dc
+#define QSERDES_V5_5NM_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1e0
+#define QSERDES_V5_5NM_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x1e4
+#define QSERDES_V5_5NM_RX_SIGDET_ENABLES 0x1e8
+#define QSERDES_V5_5NM_RX_SIGDET_CNTRL 0x1ec
+#define QSERDES_V5_5NM_RX_SIGDET_LVL 0x1f0
+#define QSERDES_V5_5NM_RX_SIGDET_DEGLITCH_CNTRL 0x1f4
+#define QSERDES_V5_5NM_RX_CDR_FREEZE_UP_DN 0x1f8
+#define QSERDES_V5_5NM_RX_CDR_RESET_OVERRIDE 0x1fc
+#define QSERDES_V5_5NM_RX_RX_INTERFACE_MODE 0x200
+#define QSERDES_V5_5NM_RX_JITTER_GEN_MODE 0x204
+#define QSERDES_V5_5NM_RX_SJ_AMP1 0x208
+#define QSERDES_V5_5NM_RX_SJ_AMP2 0x20c
+#define QSERDES_V5_5NM_RX_SJ_PER1 0x210
+#define QSERDES_V5_5NM_RX_SJ_PER2 0x214
+#define QSERDES_V5_5NM_RX_PPM_OFFSET1 0x218
+#define QSERDES_V5_5NM_RX_PPM_OFFSET2 0x21c
+#define QSERDES_V5_5NM_RX_SIGN_PPM_PERIOD1 0x220
+#define QSERDES_V5_5NM_RX_SIGN_PPM_PERIOD2 0x224
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B0 0x228
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B1 0x22c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B2 0x230
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B3 0x234
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B4 0x238
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B5 0x23c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B6 0x240
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE_0_1_B7 0x244
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B0 0x248
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B1 0x24c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B2 0x250
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B3 0x254
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B4 0x258
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B5 0x25c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B6 0x260
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE2_B7 0x264
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B0 0x268
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B1 0x26c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B2 0x270
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B3 0x274
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B4 0x278
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B5 0x27c
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B6 0x280
+#define QSERDES_V5_5NM_RX_RX_MODE_RATE3_B7 0x284
+#define QSERDES_V5_5NM_RX_PHPRE_CTRL 0x288
+#define QSERDES_V5_5NM_RX_PHPRE_INITVAL 0x28c
+#define QSERDES_V5_5NM_RX_DFE_EN_TIMER 0x290
+#define QSERDES_V5_5NM_RX_DFE_CTLE_POST_CAL_OFFSET 0x294
+#define QSERDES_V5_5NM_RX_DCC_CTRL1 0x298
+#define QSERDES_V5_5NM_RX_DCC_CTRL2 0x29c
+#define QSERDES_V5_5NM_RX_DCC_OFFSET 0x2a0
+#define QSERDES_V5_5NM_RX_DCC_CMUX_POSTCAL_OFFSET 0x2a4
+#define QSERDES_V5_5NM_RX_DCC_CMUX_CAL_CTRL1 0x2a8
+#define QSERDES_V5_5NM_RX_DCC_CMUX_CAL_CTRL2 0x2ac
+#define QSERDES_V5_5NM_RX_ALOG_OBSV_BUS_CTRL_1 0x2b0
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL1 0x2b4
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL2 0x2b8
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL3 0x2bc
+#define QSERDES_V5_5NM_RX_RX_MARG_CTRL_4 0x2c0
+#define QSERDES_V5_5NM_RX_RX_MARG_CFG_RATE_0_1 0x2c4
+#define QSERDES_V5_5NM_RX_RX_MARG_CFG_RATE_2_3 0x2c8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_CTRL1 0x2cc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_CTRL2 0x2d0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH1_RATE210 0x2d4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH1_RATE3 0x2d8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH2_RATE210 0x2dc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH2_RATE3 0x2e0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH3_RATE210 0x2e4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH3_RATE3 0x2e8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH4_RATE210 0x2ec
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH4_RATE3 0x2f0
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH5_RATE210 0x2f4
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH5_RATE3 0x2f8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH6_RATE210 0x2fc
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH6_RATE3 0x300
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH7_RATE210 0x304
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_THRESH7_RATE3 0x308
+#define QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE10 0x30c
+#define QSERDES_V5_5NM_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x310
+#define QSERDES_V5_5NM_RX_RX_MARG_VERTICAL_CTRL 0x314
+#define QSERDES_V5_5NM_RX_RX_MARG_VERTICAL_CODE 0x318
+#define QSERDES_V5_5NM_RX_RES_CODE_THRESH_HIGH_AND_BYP 0x31c
+#define QSERDES_V5_5NM_RX_RES_CODE_THRESH_LOW 0x320
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL1 0x324
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL2 0x328
+#define QSERDES_V5_5NM_RX_RX_BKUP_CTRL3 0x32c
+#define QSERDES_V5_5NM_RX_PI_CTRL1 0x330
+#define QSERDES_V5_5NM_RX_PI_CTRL2 0x334
+#define QSERDES_V5_5NM_RX_PI_QUAD 0x338
+#define QSERDES_V5_5NM_RX_QPI_CTRL1 0x33c
+#define QSERDES_V5_5NM_RX_QPI_CTRL2 0x340
+#define QSERDES_V5_5NM_RX_QPI_QUAD 0x344
+#define QSERDES_V5_5NM_RX_IDATA1 0x348
+#define QSERDES_V5_5NM_RX_IDATA2 0x34c
+#define QSERDES_V5_5NM_RX_IDATA3 0x350
+#define QSERDES_V5_5NM_RX_AC_JTAG_OUTP 0x354
+#define QSERDES_V5_5NM_RX_AC_JTAG_OUTN 0x358
+#define QSERDES_V5_5NM_RX_RX_SIGDET 0x35c
+#define QSERDES_V5_5NM_RX_ALOG_OBSV_BUS_STATUS_1 0x360
+#define QSERDES_V5_5NM_RX_READ_EQCODE 0x364
+#define QSERDES_V5_5NM_RX_READ_OFFSETCODE 0x368
+#define QSERDES_V5_5NM_RX_IA_ERROR_COUNTER_LOW 0x36c
+#define QSERDES_V5_5NM_RX_IA_ERROR_COUNTER_HIGH 0x370
+#define QSERDES_V5_5NM_RX_VGA_READ_CODE 0x374
+#define QSERDES_V5_5NM_RX_VTHRESH_READ_CODE 0x378
+#define QSERDES_V5_5NM_RX_DFE_TAP1_READ_CODE 0x37c
+#define QSERDES_V5_5NM_RX_DFE_TAP2_READ_CODE 0x380
+#define QSERDES_V5_5NM_RX_DFE_TAP3_READ_CODE 0x384
+#define QSERDES_V5_5NM_RX_DFE_TAP4_READ_CODE 0x388
+#define QSERDES_V5_5NM_RX_DFE_TAP5_READ_CODE 0x38c
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I0 0x390
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I0BAR 0x394
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I1 0x398
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_I1BAR 0x39c
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_Q 0x3a0
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_QBAR 0x3a4
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_A 0x3a8
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_ABAR 0x3ac
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_SM_ON 0x3b0
+#define QSERDES_V5_5NM_RX_IDAC_STATUS_SIGNERROR 0x3b4
+#define QSERDES_V5_5NM_RX_IVCM_CAL_STATUS 0x3b8
+#define QSERDES_V5_5NM_RX_IVCM_CAL_DEBUG_STATUS 0x3bc
+#define QSERDES_V5_5NM_RX_DCC_CAL_STATUS 0x3c0
+#define QSERDES_V5_5NM_RX_DCC_READ_CODE_STATUS 0x3c4
+#define QSERDES_V5_5NM_RX_RX_MARG_DEBUG1_STATUS 0x3c8
+#define QSERDES_V5_5NM_RX_RX_MARG_DEBUG2_STATUS 0x3cc
+#define QSERDES_V5_5NM_RX_RX_MARG_READ_CODE_STATUS 0x3d0
+#define QSERDES_V5_5NM_RX_EOM_ERR_CNT_LSB_STATUS 0x3d4
+#define QSERDES_V5_5NM_RX_EOM_ERR_CNT_MSB_STATUS 0x3d8
+#define QSERDES_V5_5NM_RX_RX_MARG_COARSE_TUNE_STATUS 0x3dc
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS1_STATUS 0x3e0
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS2_STATUS 0x3e4
+#define QSERDES_V5_5NM_RX_RX_BKUP_READ_BUS3_STATUS 0x3e8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index c8583f5a54bd..c08d34ad1313 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -28,53 +28,15 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+/* QPHY_PCS_READY_STATUS bit */
#define PCS_READY BIT(0)
-/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
-/* DP PHY soft reset */
-#define SW_DPPHY_RESET BIT(0)
-/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
-#define SW_DPPHY_RESET_MUX BIT(1)
-/* USB3 PHY soft reset */
-#define SW_USB3PHY_RESET BIT(2)
-/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
-#define SW_USB3PHY_RESET_MUX BIT(3)
-
-/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
-#define USB3_MODE BIT(0) /* enables USB3 mode */
-#define DP_MODE BIT(1) /* enables DP mode */
-
-/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
-#define ARCVR_DTCT_EN BIT(0)
-#define ALFPS_DTCT_EN BIT(1)
-#define ARCVR_DTCT_EVENT_SEL BIT(4)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
-#define IRQ_CLEAR BIT(0)
-
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
-/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
-#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
-
#define PHY_INIT_COMPLETE_TIMEOUT 10000
-#define POWER_DOWN_DELAY_US_MIN 10
-#define POWER_DOWN_DELAY_US_MAX 11
-
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
struct qmp_phy_init_tbl {
unsigned int offset;
@@ -115,22 +77,11 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
QPHY_PCS_READY_STATUS,
- QPHY_PCS_STATUS,
- QPHY_PCS_AUTONOMOUS_MODE_CTRL,
- QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -580,14 +531,9 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
-struct qmp_phy;
-
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -614,9 +560,6 @@ struct qmp_phy_cfg {
/* bit offset of PHYSTATUS in QPHY_PCS_STATUS register */
unsigned int phy_status;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
-
/* true, if PCS block has no separate SW_RESET register */
bool no_pcs_sw_reset;
};
@@ -633,9 +576,7 @@ struct qmp_phy_cfg {
* @tx2: iomapped memory space for second lane's tx (in dual lane PHYs)
* @rx2: iomapped memory space for second lane's rx (in dual lane PHYs)
* @pcs_misc: iomapped memory space for lane's pcs_misc
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
- * @mode: current PHY mode
*/
struct qmp_phy {
struct phy *phy;
@@ -647,9 +588,7 @@ struct qmp_phy {
void __iomem *tx2;
void __iomem *rx2;
void __iomem *pcs_misc;
- unsigned int index;
struct qcom_qmp *qmp;
- enum phy_mode mode;
};
/**
@@ -719,8 +658,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg msm8996_ufs_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8996_ufs_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
@@ -745,8 +683,7 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = {
};
static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sdm845_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
@@ -766,13 +703,11 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
- .is_dual_lane_phy = true,
.no_pcs_sw_reset = true,
};
static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm6115_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl),
@@ -795,8 +730,7 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
};
static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8150_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
@@ -815,13 +749,10 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8350_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
@@ -840,13 +771,10 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
- .type = PHY_TYPE_UFS,
- .nlanes = 2,
+ .lanes = 2,
.serdes_tbl = sm8350_ufsphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
@@ -865,11 +793,9 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.start_ctrl = SERDES_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base,
+static void qmp_ufs_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -892,27 +818,27 @@ static void qcom_qmp_phy_ufs_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_ufs_configure(void __iomem *base,
+static void qmp_ufs_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_ufs_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_ufs_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_ufs_serdes_init(struct qmp_phy *qphy)
+static int qmp_ufs_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_ufs_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
return 0;
}
-static int qcom_qmp_phy_ufs_com_init(struct qmp_phy *qphy)
+static int qmp_ufs_com_init(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -946,7 +872,7 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy)
+static int qmp_ufs_com_exit(struct qmp_phy *qphy)
{
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -960,7 +886,7 @@ static int qcom_qmp_phy_ufs_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_ufs_init(struct phy *phy)
+static int qmp_ufs_init(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -995,14 +921,14 @@ static int qcom_qmp_phy_ufs_init(struct phy *phy)
return ret;
}
- ret = qcom_qmp_phy_ufs_com_init(qphy);
+ ret = qmp_ufs_com_init(qphy);
if (ret)
return ret;
return 0;
}
-static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
+static int qmp_ufs_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -1014,27 +940,24 @@ static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_ufs_serdes_init(qphy);
+ qmp_ufs_serdes_init(qphy);
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_ufs_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_ufs_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_ufs_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_ufs_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
}
- qcom_qmp_phy_ufs_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_ufs_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_ufs_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_ufs_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
}
- qcom_qmp_phy_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_ufs_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
@@ -1060,7 +983,7 @@ static int qcom_qmp_phy_ufs_power_on(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_ufs_power_off(struct phy *phy)
+static int qmp_ufs_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -1084,51 +1007,41 @@ static int qcom_qmp_phy_ufs_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_ufs_exit(struct phy *phy)
+static int qmp_ufs_exit(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
- qcom_qmp_phy_ufs_com_exit(qphy);
+ qmp_ufs_com_exit(qphy);
return 0;
}
-static int qcom_qmp_phy_ufs_enable(struct phy *phy)
+static int qmp_ufs_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_ufs_init(phy);
+ ret = qmp_ufs_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_ufs_power_on(phy);
+ ret = qmp_ufs_power_on(phy);
if (ret)
- qcom_qmp_phy_ufs_exit(phy);
+ qmp_ufs_exit(phy);
return ret;
}
-static int qcom_qmp_phy_ufs_disable(struct phy *phy)
+static int qmp_ufs_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_ufs_power_off(phy);
+ ret = qmp_ufs_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_ufs_exit(phy);
+ return qmp_ufs_exit(phy);
}
-static int qcom_qmp_phy_ufs_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qphy->mode = mode;
-
- return 0;
-}
-
-static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_ufs_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -1144,7 +1057,7 @@ static int qcom_qmp_phy_ufs_vreg_init(struct device *dev, const struct qmp_phy_c
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_ufs_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -1161,14 +1074,12 @@ static int qcom_qmp_phy_ufs_clk_init(struct device *dev, const struct qmp_phy_cf
}
static const struct phy_ops qcom_qmp_ufs_ops = {
- .power_on = qcom_qmp_phy_ufs_enable,
- .power_off = qcom_qmp_phy_ufs_disable,
- .set_mode = qcom_qmp_phy_ufs_set_mode,
+ .power_on = qmp_ufs_enable,
+ .power_off = qmp_ufs_disable,
.owner = THIS_MODULE,
};
-static
-int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
+static int qmp_ufs_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
@@ -1188,45 +1099,33 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = devm_of_iomap(dev, np, 2, NULL);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc))
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
generic_phy = devm_phy_create(dev, np, &qcom_qmp_ufs_ops);
@@ -1237,7 +1136,6 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -1245,7 +1143,7 @@ int qcom_qmp_phy_ufs_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = {
+static const struct of_device_id qmp_ufs_of_match_table[] = {
{
.compatible = "qcom,msm8996-qmp-ufs-phy",
.data = &msm8996_ufs_cfg,
@@ -1282,9 +1180,9 @@ static const struct of_device_id qcom_qmp_phy_ufs_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_ufs_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_ufs_of_match_table);
-static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
+static int qmp_ufs_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -1312,17 +1210,14 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
if (IS_ERR(serdes))
return PTR_ERR(serdes);
- ret = qcom_qmp_phy_ufs_clk_init(dev, cfg);
+ ret = qmp_ufs_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_ufs_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_ufs_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -1333,18 +1228,10 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
if (!qmp->phys)
return -ENOMEM;
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- /*
- * Prevent runtime pm from being ON by default. Users can enable
- * it using power/control in sysfs.
- */
- pm_runtime_forbid(dev);
-
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_ufs_create(dev, child, id, serdes, cfg);
+ ret = qmp_ufs_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -1355,28 +1242,23 @@ static int qcom_qmp_phy_ufs_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_ufs_driver = {
- .probe = qcom_qmp_phy_ufs_probe,
+static struct platform_driver qmp_ufs_driver = {
+ .probe = qmp_ufs_probe,
.driver = {
.name = "qcom-qmp-ufs-phy",
- .of_match_table = qcom_qmp_phy_ufs_of_match_table,
+ .of_match_table = qmp_ufs_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_ufs_driver);
+module_platform_driver(qmp_ufs_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP UFS PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 1d270356a97f..b84c0d4b5754 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -28,16 +28,11 @@
#define SW_RESET BIT(0)
/* QPHY_POWER_DOWN_CONTROL */
#define SW_PWRDN BIT(0)
-#define REFCLK_DRV_DSBL BIT(1)
/* QPHY_START_CONTROL bits */
#define SERDES_START BIT(0)
#define PCS_START BIT(1)
-#define PLL_READY_GATE_EN BIT(3)
/* QPHY_PCS_STATUS bit */
#define PHYSTATUS BIT(6)
-#define PHYSTATUS_4_20 BIT(7)
-/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
-#define PCS_READY BIT(0)
/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
/* DP PHY soft reset */
@@ -71,11 +66,6 @@
#define POWER_DOWN_DELAY_US_MIN 10
#define POWER_DOWN_DELAY_US_MAX 11
-#define MAX_PROP_NAME 32
-
-/* Define the assumed distance between lanes for underspecified device trees. */
-#define QMP_PHY_LEGACY_LANE_STRIDE 0x400
-
struct qmp_phy_init_tbl {
unsigned int offset;
unsigned int val;
@@ -115,15 +105,9 @@ struct qmp_phy_init_tbl {
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
- /* Common block control registers */
- QPHY_COM_SW_RESET,
- QPHY_COM_POWER_DOWN_CONTROL,
- QPHY_COM_START_CONTROL,
- QPHY_COM_PCS_READY_STATUS,
/* PCS registers */
QPHY_SW_RESET,
QPHY_START_CTRL,
- QPHY_PCS_READY_STATUS,
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
@@ -1338,14 +1322,114 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
};
-struct qmp_phy;
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0xbd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xe4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
- /* phy-type - PCIE/UFS/USB */
- unsigned int type;
- /* number of lanes provided by phy */
- int nlanes;
+ int lanes;
/* Init sequence for PHY blocks - serdes, tx, rx, pcs */
const struct qmp_phy_init_tbl *serdes_tbl;
@@ -1385,8 +1469,6 @@ struct qmp_phy_cfg {
/* true, if PHY has a separate DP_COM control block */
bool has_phy_dp_com_ctrl;
- /* true, if PHY has secondary tx/rx lanes to be configured */
- bool is_dual_lane_phy;
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
@@ -1406,7 +1488,6 @@ struct qmp_phy_cfg {
* @pcs_misc: iomapped memory space for lane's pcs_misc
* @pcs_usb: iomapped memory space for lane's pcs_usb
* @pipe_clk: pipe clock
- * @index: lane index
* @qmp: QMP phy to which this lane belongs
* @mode: current PHY mode
*/
@@ -1422,7 +1503,6 @@ struct qmp_phy {
void __iomem *pcs_misc;
void __iomem *pcs_usb;
struct clk *pipe_clk;
- unsigned int index;
struct qcom_qmp *qmp;
enum phy_mode mode;
};
@@ -1520,8 +1600,7 @@ static const char * const qmp_phy_vreg_l[] = {
};
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = ipq8074_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1545,8 +1624,7 @@ static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
};
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
@@ -1570,8 +1648,7 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
};
static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -1598,12 +1675,10 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -1630,12 +1705,38 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
+ .lanes = 1,
+
+ .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sc8280xp_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_tx_tbl),
+ .rx_tbl = sc8280xp_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sc8280xp_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = qmp_v3_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_uniphy_serdes_tbl),
@@ -1663,8 +1764,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = msm8998_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl),
@@ -1685,13 +1785,10 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1722,12 +1819,10 @@ static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1758,8 +1853,7 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1789,12 +1883,10 @@ static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1825,8 +1917,7 @@ static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1857,8 +1948,7 @@ static const struct qmp_phy_cfg sdx55_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1889,8 +1979,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1920,12 +2009,10 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
- .is_dual_lane_phy = true,
};
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 1,
.serdes_tbl = sm8150_usb3_uniphy_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_uniphy_serdes_tbl),
@@ -1956,8 +2043,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
};
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
- .type = PHY_TYPE_USB3,
- .nlanes = 1,
+ .lanes = 2,
.serdes_tbl = qcm2290_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qcm2290_usb3_serdes_tbl),
@@ -1978,11 +2064,9 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.start_ctrl = SERDES_START | PCS_START,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS,
-
- .is_dual_lane_phy = true,
};
-static void qcom_qmp_phy_usb_configure_lane(void __iomem *base,
+static void qmp_usb_configure_lane(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -2005,28 +2089,29 @@ static void qcom_qmp_phy_usb_configure_lane(void __iomem *base,
}
}
-static void qcom_qmp_phy_usb_configure(void __iomem *base,
+static void qmp_usb_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
int num)
{
- qcom_qmp_phy_usb_configure_lane(base, regs, tbl, num, 0xff);
+ qmp_usb_configure_lane(base, regs, tbl, num, 0xff);
}
-static int qcom_qmp_phy_usb_serdes_init(struct qmp_phy *qphy)
+static int qmp_usb_serdes_init(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *serdes = qphy->serdes;
const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
int serdes_tbl_num = cfg->serdes_tbl_num;
- qcom_qmp_phy_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
+ qmp_usb_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num);
return 0;
}
-static int qcom_qmp_phy_usb_com_init(struct qmp_phy *qphy)
+static int qmp_usb_init(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs = qphy->pcs;
@@ -2097,8 +2182,9 @@ err_disable_regulators:
return ret;
}
-static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy)
+static int qmp_usb_exit(struct phy *phy)
{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2111,21 +2197,7 @@ static int qcom_qmp_phy_usb_com_exit(struct qmp_phy *qphy)
return 0;
}
-static int qcom_qmp_phy_usb_init(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
- struct qcom_qmp *qmp = qphy->qmp;
- int ret;
- dev_vdbg(qmp->dev, "Initializing QMP phy\n");
-
- ret = qcom_qmp_phy_usb_com_init(qphy);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int qcom_qmp_phy_usb_power_on(struct phy *phy)
+static int qmp_usb_power_on(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
struct qcom_qmp *qmp = qphy->qmp;
@@ -2137,7 +2209,7 @@ static int qcom_qmp_phy_usb_power_on(struct phy *phy)
unsigned int mask, val, ready;
int ret;
- qcom_qmp_phy_usb_serdes_init(qphy);
+ qmp_usb_serdes_init(qphy);
ret = clk_prepare_enable(qphy->pipe_clk);
if (ret) {
@@ -2146,25 +2218,22 @@ static int qcom_qmp_phy_usb_power_on(struct phy *phy)
}
/* Tx, Rx, and PCS configurations */
- qcom_qmp_phy_usb_configure_lane(tx, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_usb_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- /* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_usb_configure_lane(qphy->tx2, cfg->regs,
- cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_usb_configure_lane(qphy->tx2, cfg->regs,
+ cfg->tx_tbl, cfg->tx_tbl_num, 2);
}
- qcom_qmp_phy_usb_configure_lane(rx, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 1);
+ qmp_usb_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1);
- if (cfg->is_dual_lane_phy) {
- qcom_qmp_phy_usb_configure_lane(qphy->rx2, cfg->regs,
- cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ if (cfg->lanes >= 2) {
+ qmp_usb_configure_lane(qphy->rx2, cfg->regs,
+ cfg->rx_tbl, cfg->rx_tbl_num, 2);
}
/* Configure link rate, swing, etc. */
- qcom_qmp_phy_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_usb_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
@@ -2194,7 +2263,7 @@ err_disable_pipe_clk:
return ret;
}
-static int qcom_qmp_phy_usb_power_off(struct phy *phy)
+static int qmp_usb_power_off(struct phy *phy)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qphy->cfg;
@@ -2219,42 +2288,32 @@ static int qcom_qmp_phy_usb_power_off(struct phy *phy)
return 0;
}
-static int qcom_qmp_phy_usb_exit(struct phy *phy)
-{
- struct qmp_phy *qphy = phy_get_drvdata(phy);
-
- qcom_qmp_phy_usb_com_exit(qphy);
-
- return 0;
-}
-
-static int qcom_qmp_phy_usb_enable(struct phy *phy)
+static int qmp_usb_enable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_usb_init(phy);
+ ret = qmp_usb_init(phy);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_power_on(phy);
+ ret = qmp_usb_power_on(phy);
if (ret)
- qcom_qmp_phy_usb_exit(phy);
+ qmp_usb_exit(phy);
return ret;
}
-static int qcom_qmp_phy_usb_disable(struct phy *phy)
+static int qmp_usb_disable(struct phy *phy)
{
int ret;
- ret = qcom_qmp_phy_usb_power_off(phy);
+ ret = qmp_usb_power_off(phy);
if (ret)
return ret;
- return qcom_qmp_phy_usb_exit(phy);
+ return qmp_usb_exit(phy);
}
-static int qcom_qmp_phy_usb_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
+static int qmp_usb_set_mode(struct phy *phy, enum phy_mode mode, int submode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -2263,7 +2322,7 @@ static int qcom_qmp_phy_usb_set_mode(struct phy *phy,
return 0;
}
-static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_usb_enable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -2292,7 +2351,7 @@ static void qcom_qmp_phy_usb_enable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
}
-static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy)
+static void qmp_usb_disable_autonomous_mode(struct qmp_phy *qphy)
{
const struct qmp_phy_cfg *cfg = qphy->cfg;
void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs;
@@ -2310,7 +2369,7 @@ static void qcom_qmp_phy_usb_disable_autonomous_mode(struct qmp_phy *qphy)
qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
}
-static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
+static int __maybe_unused qmp_usb_runtime_suspend(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -2318,16 +2377,12 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
if (!qphy->phy->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
}
- qcom_qmp_phy_usb_enable_autonomous_mode(qphy);
+ qmp_usb_enable_autonomous_mode(qphy);
clk_disable_unprepare(qphy->pipe_clk);
clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
@@ -2335,7 +2390,7 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
+static int __maybe_unused qmp_usb_runtime_resume(struct device *dev)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct qmp_phy *qphy = qmp->phys[0];
@@ -2344,10 +2399,6 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode);
- /* Supported only for USB3 PHY and luckily USB3 is the first phy */
- if (cfg->type != PHY_TYPE_USB3)
- return 0;
-
if (!qphy->phy->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
return 0;
@@ -2364,12 +2415,12 @@ static int __maybe_unused qcom_qmp_phy_usb_runtime_resume(struct device *dev)
return ret;
}
- qcom_qmp_phy_usb_disable_autonomous_mode(qphy);
+ qmp_usb_disable_autonomous_mode(qphy);
return 0;
}
-static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_vregs;
@@ -2385,7 +2436,7 @@ static int qcom_qmp_phy_usb_vreg_init(struct device *dev, const struct qmp_phy_c
return devm_regulator_bulk_get(dev, num, qmp->vregs);
}
-static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int i;
@@ -2406,7 +2457,7 @@ static int qcom_qmp_phy_usb_reset_init(struct device *dev, const struct qmp_phy_
return 0;
}
-static int qcom_qmp_phy_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
+static int qmp_usb_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
int num = cfg->num_clks;
@@ -2482,23 +2533,47 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
}
-static const struct phy_ops qcom_qmp_phy_usb_ops = {
- .init = qcom_qmp_phy_usb_enable,
- .exit = qcom_qmp_phy_usb_disable,
- .set_mode = qcom_qmp_phy_usb_set_mode,
+static const struct phy_ops qmp_usb_ops = {
+ .init = qmp_usb_enable,
+ .exit = qmp_usb_disable,
+ .set_mode = qmp_usb_set_mode,
.owner = THIS_MODULE,
};
+static void __iomem *qmp_usb_iomap(struct device *dev, struct device_node *np,
+ int index, bool exclusive)
+{
+ struct resource res;
+
+ if (!exclusive) {
+ if (of_address_to_resource(np, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ return devm_ioremap(dev, res.start, resource_size(&res));
+ }
+
+ return devm_of_iomap(dev, np, index, NULL);
+}
+
static
-int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
+int qmp_usb_create(struct device *dev, struct device_node *np, int id,
void __iomem *serdes, const struct qmp_phy_cfg *cfg)
{
struct qcom_qmp *qmp = dev_get_drvdata(dev);
struct phy *generic_phy;
struct qmp_phy *qphy;
- char prop_name[MAX_PROP_NAME];
+ bool exclusive = true;
int ret;
+ /*
+ * FIXME: These bindings should be fixed to not rely on overlapping
+ * mappings for PCS.
+ */
+ if (of_device_is_compatible(dev->of_node, "qcom,sdx65-qmp-usb3-uni-phy"))
+ exclusive = false;
+ if (of_device_is_compatible(dev->of_node, "qcom,sm8350-qmp-usb3-uni-phy"))
+ exclusive = false;
+
qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
if (!qphy)
return -ENOMEM;
@@ -2511,58 +2586,47 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
* For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
* For single lane PHYs: pcs_misc (optional) -> 3.
*/
- qphy->tx = of_iomap(np, 0);
- if (!qphy->tx)
- return -ENOMEM;
+ qphy->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qphy->tx))
+ return PTR_ERR(qphy->tx);
- qphy->rx = of_iomap(np, 1);
- if (!qphy->rx)
- return -ENOMEM;
+ qphy->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qphy->rx))
+ return PTR_ERR(qphy->rx);
- qphy->pcs = of_iomap(np, 2);
- if (!qphy->pcs)
- return -ENOMEM;
+ qphy->pcs = qmp_usb_iomap(dev, np, 2, exclusive);
+ if (IS_ERR(qphy->pcs))
+ return PTR_ERR(qphy->pcs);
if (cfg->pcs_usb_offset)
qphy->pcs_usb = qphy->pcs + cfg->pcs_usb_offset;
- /*
- * If this is a dual-lane PHY, then there should be registers for the
- * second lane. Some old device trees did not specify this, so fall
- * back to old legacy behavior of assuming they can be reached at an
- * offset from the first lane.
- */
- if (cfg->is_dual_lane_phy) {
- qphy->tx2 = of_iomap(np, 3);
- qphy->rx2 = of_iomap(np, 4);
- if (!qphy->tx2 || !qphy->rx2) {
- dev_warn(dev,
- "Underspecified device tree, falling back to legacy register regions\n");
-
- /* In the old version, pcs_misc is at index 3. */
- qphy->pcs_misc = qphy->tx2;
- qphy->tx2 = qphy->tx + QMP_PHY_LEGACY_LANE_STRIDE;
- qphy->rx2 = qphy->rx + QMP_PHY_LEGACY_LANE_STRIDE;
-
- } else {
- qphy->pcs_misc = of_iomap(np, 5);
- }
+ if (cfg->lanes >= 2) {
+ qphy->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qphy->tx2))
+ return PTR_ERR(qphy->tx2);
+ qphy->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qphy->rx2))
+ return PTR_ERR(qphy->rx2);
+
+ qphy->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
} else {
- qphy->pcs_misc = of_iomap(np, 3);
+ qphy->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
}
- if (!qphy->pcs_misc)
+ if (IS_ERR(qphy->pcs_misc)) {
dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qphy->pcs_misc = NULL;
+ }
- snprintf(prop_name, sizeof(prop_name), "pipe%d", id);
- qphy->pipe_clk = devm_get_clk_from_child(dev, np, prop_name);
+ qphy->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
if (IS_ERR(qphy->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qphy->pipe_clk),
"failed to get lane%d pipe clock\n", id);
}
- generic_phy = devm_phy_create(dev, np, &qcom_qmp_phy_usb_ops);
+ generic_phy = devm_phy_create(dev, np, &qmp_usb_ops);
if (IS_ERR(generic_phy)) {
ret = PTR_ERR(generic_phy);
dev_err(dev, "failed to create qphy %d\n", ret);
@@ -2570,7 +2634,6 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
}
qphy->phy = generic_phy;
- qphy->index = id;
qphy->qmp = qmp;
qmp->phys[id] = qphy;
phy_set_drvdata(generic_phy, qphy);
@@ -2578,7 +2641,7 @@ int qcom_qmp_phy_usb_create(struct device *dev, struct device_node *np, int id,
return 0;
}
-static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
+static const struct of_device_id qmp_usb_of_match_table[] = {
{
.compatible = "qcom,ipq8074-qmp-usb3-phy",
.data = &ipq8074_usb3phy_cfg,
@@ -2595,6 +2658,9 @@ static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
.compatible = "qcom,sc8180x-qmp-usb3-phy",
.data = &sm8150_usb3phy_cfg,
}, {
+ .compatible = "qcom,sc8280xp-qmp-usb3-uni-phy",
+ .data = &sc8280xp_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sdm845-qmp-usb3-phy",
.data = &qmp_v3_usb3phy_cfg,
}, {
@@ -2636,14 +2702,14 @@ static const struct of_device_id qcom_qmp_phy_usb_of_match_table[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, qcom_qmp_phy_usb_of_match_table);
+MODULE_DEVICE_TABLE(of, qmp_usb_of_match_table);
-static const struct dev_pm_ops qcom_qmp_phy_usb_pm_ops = {
- SET_RUNTIME_PM_OPS(qcom_qmp_phy_usb_runtime_suspend,
- qcom_qmp_phy_usb_runtime_resume, NULL)
+static const struct dev_pm_ops qmp_usb_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usb_runtime_suspend,
+ qmp_usb_runtime_resume, NULL)
};
-static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
+static int qmp_usb_probe(struct platform_device *pdev)
{
struct qcom_qmp *qmp;
struct device *dev = &pdev->dev;
@@ -2678,21 +2744,18 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
return PTR_ERR(qmp->dp_com);
}
- ret = qcom_qmp_phy_usb_clk_init(dev, cfg);
+ ret = qmp_usb_clk_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_reset_init(dev, cfg);
+ ret = qmp_usb_reset_init(dev, cfg);
if (ret)
return ret;
- ret = qcom_qmp_phy_usb_vreg_init(dev, cfg);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ ret = qmp_usb_vreg_init(dev, cfg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
num = of_get_available_child_count(dev->of_node);
/* do we have a rogue child node ? */
@@ -2704,7 +2767,9 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
/*
* Prevent runtime pm from being ON by default. Users can enable
* it using power/control in sysfs.
@@ -2714,7 +2779,7 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
id = 0;
for_each_available_child_of_node(dev->of_node, child) {
/* Create per-lane phy */
- ret = qcom_qmp_phy_usb_create(dev, child, id, serdes, cfg);
+ ret = qmp_usb_create(dev, child, id, serdes, cfg);
if (ret) {
dev_err(dev, "failed to create lane%d phy, %d\n",
id, ret);
@@ -2736,29 +2801,24 @@ static int qcom_qmp_phy_usb_probe(struct platform_device *pdev)
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (!IS_ERR(phy_provider))
- dev_info(dev, "Registered Qcom-QMP phy\n");
- else
- pm_runtime_disable(dev);
return PTR_ERR_OR_ZERO(phy_provider);
err_node_put:
- pm_runtime_disable(dev);
of_node_put(child);
return ret;
}
-static struct platform_driver qcom_qmp_phy_usb_driver = {
- .probe = qcom_qmp_phy_usb_probe,
+static struct platform_driver qmp_usb_driver = {
+ .probe = qmp_usb_probe,
.driver = {
.name = "qcom-qmp-usb-phy",
- .pm = &qcom_qmp_phy_usb_pm_ops,
- .of_match_table = qcom_qmp_phy_usb_of_match_table,
+ .pm = &qmp_usb_pm_ops,
+ .of_match_table = qmp_usb_of_match_table,
},
};
-module_platform_driver(qcom_qmp_phy_usb_driver);
+module_platform_driver(qmp_usb_driver);
MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
MODULE_DESCRIPTION("Qualcomm QMP USB PHY driver");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index b139c8af5e8b..26274e3c0cf9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -19,6 +19,7 @@
#include "phy-qcom-qmp-qserdes-com-v5.h"
#include "phy-qcom-qmp-qserdes-txrx-v5.h"
#include "phy-qcom-qmp-qserdes-txrx-v5_20.h"
+#include "phy-qcom-qmp-qserdes-txrx-v5_5nm.h"
#include "phy-qcom-qmp-qserdes-pll.h"
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 7529a7e6e5df..2ef638b32e8f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -973,20 +973,14 @@ static int qusb2_phy_probe(struct platform_device *pdev)
return PTR_ERR(qphy->base);
qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb");
- if (IS_ERR(qphy->cfg_ahb_clk)) {
- ret = PTR_ERR(qphy->cfg_ahb_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get cfg ahb clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(qphy->cfg_ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->cfg_ahb_clk),
+ "failed to get cfg ahb clk\n");
qphy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(qphy->ref_clk)) {
- ret = PTR_ERR(qphy->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get ref clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(qphy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->ref_clk),
+ "failed to get ref clk\n");
qphy->iface_clk = devm_clk_get_optional(dev, "iface");
if (IS_ERR(qphy->iface_clk))
@@ -1003,12 +997,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
qphy->vregs[i].supply = qusb2_phy_vreg_names[i];
ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
/* Get the specific init parameters of QMP phy */
qphy->cfg = of_device_get_match_data(dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index 5d203784f75d..a59063596214 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -52,6 +52,12 @@
#define USB2_SUSPEND_N BIT(2)
#define USB2_SUSPEND_N_SEL BIT(3)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0 (0x6c)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1 (0x70)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2 (0x74)
+#define USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X3 (0x78)
+#define PARAM_OVRD_MASK 0xFF
+
#define USB2_PHY_USB_PHY_CFG0 (0x94)
#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN BIT(0)
#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1)
@@ -60,12 +66,47 @@
#define REFCLK_SEL_MASK GENMASK(1, 0)
#define REFCLK_SEL_DEFAULT (0x2 << 0)
+#define HS_DISCONNECT_MASK GENMASK(2, 0)
+#define SQUELCH_DETECTOR_MASK GENMASK(7, 5)
+
+#define HS_AMPLITUDE_MASK GENMASK(3, 0)
+#define PREEMPHASIS_DURATION_MASK BIT(5)
+#define PREEMPHASIS_AMPLITUDE_MASK GENMASK(7, 6)
+
+#define HS_RISE_FALL_MASK GENMASK(1, 0)
+#define HS_CROSSOVER_VOLTAGE_MASK GENMASK(3, 2)
+#define HS_OUTPUT_IMPEDANCE_MASK GENMASK(5, 4)
+
+#define LS_FS_OUTPUT_IMPEDANCE_MASK GENMASK(3, 0)
+
static const char * const qcom_snps_hsphy_vreg_names[] = {
"vdda-pll", "vdda33", "vdda18",
};
#define SNPS_HS_NUM_VREGS ARRAY_SIZE(qcom_snps_hsphy_vreg_names)
+struct override_param {
+ s32 value;
+ u8 reg_val;
+};
+
+struct override_param_map {
+ const char *prop_name;
+ const struct override_param *param_table;
+ u8 table_size;
+ u8 reg_offset;
+ u8 param_mask;
+};
+
+struct phy_override_seq {
+ bool need_update;
+ u8 offset;
+ u8 value;
+ u8 mask;
+};
+
+#define NUM_HSPHY_TUNING_PARAMS (9)
+
/**
* struct qcom_snps_hsphy - snps hs phy attributes
*
@@ -91,6 +132,7 @@ struct qcom_snps_hsphy {
bool phy_initialized;
enum phy_mode mode;
+ struct phy_override_seq update_seq_cfg[NUM_HSPHY_TUNING_PARAMS];
};
static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
@@ -173,10 +215,158 @@ static int qcom_snps_hsphy_set_mode(struct phy *phy, enum phy_mode mode,
return 0;
}
+static const struct override_param hs_disconnect_sc7280[] = {
+ { -272, 0 },
+ { 0, 1 },
+ { 317, 2 },
+ { 630, 3 },
+ { 973, 4 },
+ { 1332, 5 },
+ { 1743, 6 },
+ { 2156, 7 },
+};
+
+static const struct override_param squelch_det_threshold_sc7280[] = {
+ { -2090, 7 },
+ { -1560, 6 },
+ { -1030, 5 },
+ { -530, 4 },
+ { 0, 3 },
+ { 530, 2 },
+ { 1060, 1 },
+ { 1590, 0 },
+};
+
+static const struct override_param hs_amplitude_sc7280[] = {
+ { -660, 0 },
+ { -440, 1 },
+ { -220, 2 },
+ { 0, 3 },
+ { 230, 4 },
+ { 440, 5 },
+ { 650, 6 },
+ { 890, 7 },
+ { 1110, 8 },
+ { 1330, 9 },
+ { 1560, 10 },
+ { 1780, 11 },
+ { 2000, 12 },
+ { 2220, 13 },
+ { 2430, 14 },
+ { 2670, 15 },
+};
+
+static const struct override_param preemphasis_duration_sc7280[] = {
+ { 10000, 1 },
+ { 20000, 0 },
+};
+
+static const struct override_param preemphasis_amplitude_sc7280[] = {
+ { 10000, 1 },
+ { 20000, 2 },
+ { 30000, 3 },
+ { 40000, 0 },
+};
+
+static const struct override_param hs_rise_fall_time_sc7280[] = {
+ { -4100, 3 },
+ { 0, 2 },
+ { 2810, 1 },
+ { 5430, 0 },
+};
+
+static const struct override_param hs_crossover_voltage_sc7280[] = {
+ { -31000, 1 },
+ { 0, 3 },
+ { 28000, 2 },
+};
+
+static const struct override_param hs_output_impedance_sc7280[] = {
+ { -2300000, 3 },
+ { 0, 2 },
+ { 2600000, 1 },
+ { 6100000, 0 },
+};
+
+static const struct override_param ls_fs_output_impedance_sc7280[] = {
+ { -1053, 15 },
+ { -557, 7 },
+ { 0, 3 },
+ { 612, 1 },
+ { 1310, 0 },
+};
+
+static const struct override_param_map sc7280_snps_7nm_phy[] = {
+ {
+ "qcom,hs-disconnect-bp",
+ hs_disconnect_sc7280,
+ ARRAY_SIZE(hs_disconnect_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0,
+ HS_DISCONNECT_MASK
+ },
+ {
+ "qcom,squelch-detector-bp",
+ squelch_det_threshold_sc7280,
+ ARRAY_SIZE(squelch_det_threshold_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X0,
+ SQUELCH_DETECTOR_MASK
+ },
+ {
+ "qcom,hs-amplitude-bp",
+ hs_amplitude_sc7280,
+ ARRAY_SIZE(hs_amplitude_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ HS_AMPLITUDE_MASK
+ },
+ {
+ "qcom,pre-emphasis-duration-bp",
+ preemphasis_duration_sc7280,
+ ARRAY_SIZE(preemphasis_duration_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ PREEMPHASIS_DURATION_MASK,
+ },
+ {
+ "qcom,pre-emphasis-amplitude-bp",
+ preemphasis_amplitude_sc7280,
+ ARRAY_SIZE(preemphasis_amplitude_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X1,
+ PREEMPHASIS_AMPLITUDE_MASK,
+ },
+ {
+ "qcom,hs-rise-fall-time-bp",
+ hs_rise_fall_time_sc7280,
+ ARRAY_SIZE(hs_rise_fall_time_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_RISE_FALL_MASK
+ },
+ {
+ "qcom,hs-crossover-voltage-microvolt",
+ hs_crossover_voltage_sc7280,
+ ARRAY_SIZE(hs_crossover_voltage_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_CROSSOVER_VOLTAGE_MASK
+ },
+ {
+ "qcom,hs-output-impedance-micro-ohms",
+ hs_output_impedance_sc7280,
+ ARRAY_SIZE(hs_output_impedance_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X2,
+ HS_OUTPUT_IMPEDANCE_MASK,
+ },
+ {
+ "qcom,ls-fs-output-impedance-bp",
+ ls_fs_output_impedance_sc7280,
+ ARRAY_SIZE(ls_fs_output_impedance_sc7280),
+ USB2_PHY_USB_PHY_HS_PHY_OVERRIDE_X3,
+ LS_FS_OUTPUT_IMPEDANCE_MASK,
+ },
+ {},
+};
+
static int qcom_snps_hsphy_init(struct phy *phy)
{
struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
- int ret;
+ int ret, i;
dev_vdbg(&phy->dev, "%s(): Initializing SNPS HS phy\n", __func__);
@@ -223,6 +413,14 @@ static int qcom_snps_hsphy_init(struct phy *phy)
qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL1,
VBUSVLDEXT0, VBUSVLDEXT0);
+ for (i = 0; i < ARRAY_SIZE(hsphy->update_seq_cfg); i++) {
+ if (hsphy->update_seq_cfg[i].need_update)
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ hsphy->update_seq_cfg[i].offset,
+ hsphy->update_seq_cfg[i].mask,
+ hsphy->update_seq_cfg[i].value);
+ }
+
qcom_snps_hsphy_write_mask(hsphy->base,
USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
VREGBYPASS, VREGBYPASS);
@@ -280,7 +478,10 @@ static const struct phy_ops qcom_snps_hsphy_gen_ops = {
static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
{ .compatible = "qcom,sm8150-usb-hs-phy", },
{ .compatible = "qcom,usb-snps-hs-5nm-phy", },
- { .compatible = "qcom,usb-snps-hs-7nm-phy", },
+ {
+ .compatible = "qcom,usb-snps-hs-7nm-phy",
+ .data = &sc7280_snps_7nm_phy,
+ },
{ .compatible = "qcom,usb-snps-femto-v2-phy", },
{ }
};
@@ -291,6 +492,55 @@ static const struct dev_pm_ops qcom_snps_hsphy_pm_ops = {
qcom_snps_hsphy_runtime_resume, NULL)
};
+static void qcom_snps_hsphy_override_param_update_val(
+ const struct override_param_map map,
+ s32 dt_val, struct phy_override_seq *seq_entry)
+{
+ int i;
+
+ /*
+ * Param table for each param is in increasing order
+ * of dt values. We need to iterate over the list to
+ * select the entry that matches the dt value and pick
+ * up the corresponding register value.
+ */
+ for (i = 0; i < map.table_size - 1; i++) {
+ if (map.param_table[i].value == dt_val)
+ break;
+ }
+
+ seq_entry->need_update = true;
+ seq_entry->offset = map.reg_offset;
+ seq_entry->mask = map.param_mask;
+ seq_entry->value = map.param_table[i].reg_val << __ffs(map.param_mask);
+}
+
+static void qcom_snps_hsphy_read_override_param_seq(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ s32 val;
+ int ret, i;
+ struct qcom_snps_hsphy *hsphy;
+ const struct override_param_map *cfg = of_device_get_match_data(dev);
+
+ if (!cfg)
+ return;
+
+ hsphy = dev_get_drvdata(dev);
+
+ for (i = 0; cfg[i].prop_name != NULL; i++) {
+ ret = of_property_read_s32(node, cfg[i].prop_name, &val);
+ if (ret)
+ continue;
+
+ qcom_snps_hsphy_override_param_update_val(cfg[i], val,
+ &hsphy->update_seq_cfg[i]);
+ dev_dbg(&hsphy->phy->dev, "Read param: %s dt_val: %d reg_val: 0x%x\n",
+ cfg[i].prop_name, val, hsphy->update_seq_cfg[i].value);
+
+ }
+}
+
static int qcom_snps_hsphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -309,12 +559,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
return PTR_ERR(hsphy->base);
hsphy->ref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(hsphy->ref_clk)) {
- ret = PTR_ERR(hsphy->ref_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get ref clk, %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsphy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk),
+ "failed to get ref clk\n");
hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(hsphy->phy_reset)) {
@@ -327,12 +574,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i];
ret = devm_regulator_bulk_get(dev, num, hsphy->vregs);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -352,6 +596,7 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, hsphy);
phy_set_drvdata(generic_phy, hsphy);
+ qcom_snps_hsphy_read_override_param_seq(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (!IS_ERR(phy_provider))
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 716a77748ed8..20f6dd37c7c1 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -54,8 +54,10 @@ static int qcom_usb_hsic_phy_power_on(struct phy *phy)
/* Configure pins for HSIC functionality */
pins_default = pinctrl_lookup_state(uphy->pctl, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(pins_default))
- return PTR_ERR(pins_default);
+ if (IS_ERR(pins_default)) {
+ ret = PTR_ERR(pins_default);
+ goto err_ulpi;
+ }
ret = pinctrl_select_state(uphy->pctl, pins_default);
if (ret)
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 9022e395c056..94360fc96a6f 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -83,6 +83,15 @@ config PHY_ROCKCHIP_PCIE
help
Enable this to support the Rockchip PCIe PHY.
+config PHY_ROCKCHIP_SNPS_PCIE3
+ tristate "Rockchip Snps PCIe3 PHY Driver"
+ depends on (ARCH_ROCKCHIP && OF) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support the Rockchip snps PCIe3 PHY.
+
config PHY_ROCKCHIP_TYPEC
tristate "Rockchip TYPEC PHY Driver"
depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index a5041efb5b8f..7eab129230d1 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_NANENG_COMBO_PHY) += phy-rockchip-naneng-combphy.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
+obj-$(CONFIG_PHY_ROCKCHIP_SNPS_PCIE3) += phy-rockchip-snps-pcie3.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
index ca13a604ab4f..75f948bdea6a 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
@@ -27,6 +27,9 @@
#define RK3368_GRF_SOC_CON6_OFFSET 0x0418
+#define RK3568_GRF_VI_CON0 0x0340
+#define RK3568_GRF_VI_CON1 0x0344
+
/* PHY */
#define CSIDPHY_CTRL_LANE_ENABLE 0x00
#define CSIDPHY_CTRL_LANE_ENABLE_CK BIT(6)
@@ -58,9 +61,11 @@
#define RK1808_CSIDPHY_CLK_WR_THS_SETTLE 0x160
#define RK3326_CSIDPHY_CLK_WR_THS_SETTLE 0x100
#define RK3368_CSIDPHY_CLK_WR_THS_SETTLE 0x100
+#define RK3568_CSIDPHY_CLK_WR_THS_SETTLE 0x160
/* Calibration reception enable */
#define RK1808_CSIDPHY_CLK_CALIB_EN 0x168
+#define RK3568_CSIDPHY_CLK_CALIB_EN 0x168
/*
* The higher 16-bit of this register is used for write protection
@@ -103,6 +108,12 @@ static const struct dphy_reg rk3368_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3368_GRF_SOC_CON6_OFFSET, 4, 8),
};
+static const struct dphy_reg rk3568_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3568_GRF_VI_CON0, 4, 0),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 4, 4),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 1, 8),
+};
+
struct hsfreq_range {
u32 range_h;
u8 cfg_bit;
@@ -352,6 +363,15 @@ static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
.grf_regs = rk3368_grf_dphy_regs,
};
+static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
+ .pwrctl_offset = -1,
+ .ths_settle_offset = RK3568_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = RK3568_CSIDPHY_CLK_CALIB_EN,
+ .hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3568_grf_dphy_regs,
+};
+
static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
{
.compatible = "rockchip,px30-csi-dphy",
@@ -369,6 +389,10 @@ static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
.compatible = "rockchip,rk3368-csi-dphy",
.data = &rk3368_mipidphy_drv_data,
},
+ {
+ .compatible = "rockchip,rk3568-csi-dphy",
+ .data = &rk3568_mipidphy_drv_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_inno_csidphy_match_id);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 630e01b5c19b..2c5847faff63 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -84,9 +84,25 @@
#define DATA_LANE_0_SKEW_PHASE_MASK GENMASK(2, 0)
#define DATA_LANE_0_SKEW_PHASE(x) UPDATE(x, 2, 0)
/* Analog Register Part: reg08 */
+#define PLL_POST_DIV_ENABLE_MASK BIT(5)
+#define PLL_POST_DIV_ENABLE BIT(5)
#define SAMPLE_CLOCK_DIRECTION_MASK BIT(4)
#define SAMPLE_CLOCK_DIRECTION_REVERSE BIT(4)
#define SAMPLE_CLOCK_DIRECTION_FORWARD 0
+#define LOWFRE_EN_MASK BIT(5)
+#define PLL_OUTPUT_FREQUENCY_DIV_BY_1 0
+#define PLL_OUTPUT_FREQUENCY_DIV_BY_2 1
+/* Analog Register Part: reg0b */
+#define CLOCK_LANE_VOD_RANGE_SET_MASK GENMASK(3, 0)
+#define CLOCK_LANE_VOD_RANGE_SET(x) UPDATE(x, 3, 0)
+#define VOD_MIN_RANGE 0x1
+#define VOD_MID_RANGE 0x3
+#define VOD_BIG_RANGE 0x7
+#define VOD_MAX_RANGE 0xf
+/* Analog Register Part: reg1E */
+#define PLL_MODE_SEL_MASK GENMASK(6, 5)
+#define PLL_MODE_SEL_LVDS_MODE 0
+#define PLL_MODE_SEL_MIPI_MODE BIT(5)
/* Digital Register Part: reg00 */
#define REG_DIG_RSTN_MASK BIT(0)
#define REG_DIG_RSTN_NORMAL BIT(0)
@@ -102,20 +118,22 @@
#define T_LPX_CNT_MASK GENMASK(5, 0)
#define T_LPX_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg06 */
+#define T_HS_ZERO_CNT_HI_MASK BIT(7)
+#define T_HS_ZERO_CNT_HI(x) UPDATE(x, 7, 7)
#define T_HS_PREPARE_CNT_MASK GENMASK(6, 0)
#define T_HS_PREPARE_CNT(x) UPDATE(x, 6, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg07 */
-#define T_HS_ZERO_CNT_MASK GENMASK(5, 0)
-#define T_HS_ZERO_CNT(x) UPDATE(x, 5, 0)
+#define T_HS_ZERO_CNT_LO_MASK GENMASK(5, 0)
+#define T_HS_ZERO_CNT_LO(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg08 */
#define T_HS_TRAIL_CNT_MASK GENMASK(6, 0)
#define T_HS_TRAIL_CNT(x) UPDATE(x, 6, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg09 */
-#define T_HS_EXIT_CNT_MASK GENMASK(4, 0)
-#define T_HS_EXIT_CNT(x) UPDATE(x, 4, 0)
+#define T_HS_EXIT_CNT_LO_MASK GENMASK(4, 0)
+#define T_HS_EXIT_CNT_LO(x) UPDATE(x, 4, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0a */
-#define T_CLK_POST_CNT_MASK GENMASK(3, 0)
-#define T_CLK_POST_CNT(x) UPDATE(x, 3, 0)
+#define T_CLK_POST_CNT_LO_MASK GENMASK(3, 0)
+#define T_CLK_POST_CNT_LO(x) UPDATE(x, 3, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg0c */
#define LPDT_TX_PPI_SYNC_MASK BIT(2)
#define LPDT_TX_PPI_SYNC_ENABLE BIT(2)
@@ -129,9 +147,13 @@
#define T_CLK_PRE_CNT_MASK GENMASK(3, 0)
#define T_CLK_PRE_CNT(x) UPDATE(x, 3, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg10 */
+#define T_CLK_POST_CNT_HI_MASK GENMASK(7, 6)
+#define T_CLK_POST_CNT_HI(x) UPDATE(x, 7, 6)
#define T_TA_GO_CNT_MASK GENMASK(5, 0)
#define T_TA_GO_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg11 */
+#define T_HS_EXIT_CNT_HI_MASK BIT(6)
+#define T_HS_EXIT_CNT_HI(x) UPDATE(x, 6, 6)
#define T_TA_SURE_CNT_MASK GENMASK(5, 0)
#define T_TA_SURE_CNT(x) UPDATE(x, 5, 0)
/* Clock/Data0/Data1/Data2/Data3 Lane Register Part: reg12 */
@@ -169,11 +191,23 @@
#define DSI_PHY_STATUS 0xb0
#define PHY_LOCK BIT(0)
+enum phy_max_rate {
+ MAX_1GHZ,
+ MAX_2_5GHZ,
+};
+
+struct inno_video_phy_plat_data {
+ const struct inno_mipi_dphy_timing *inno_mipi_dphy_timing_table;
+ const unsigned int num_timings;
+ enum phy_max_rate max_rate;
+};
+
struct inno_dsidphy {
struct device *dev;
struct clk *ref_clk;
struct clk *pclk_phy;
struct clk *pclk_host;
+ const struct inno_video_phy_plat_data *pdata;
void __iomem *phy_base;
void __iomem *host_base;
struct reset_control *rst;
@@ -200,6 +234,53 @@ enum {
REGISTER_PART_LVDS,
};
+struct inno_mipi_dphy_timing {
+ unsigned long rate;
+ u8 lpx;
+ u8 hs_prepare;
+ u8 clk_lane_hs_zero;
+ u8 data_lane_hs_zero;
+ u8 hs_trail;
+};
+
+static const
+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_1ghz[] = {
+ { 110000000, 0x0, 0x20, 0x16, 0x02, 0x22},
+ { 150000000, 0x0, 0x06, 0x16, 0x03, 0x45},
+ { 200000000, 0x0, 0x18, 0x17, 0x04, 0x0b},
+ { 250000000, 0x0, 0x05, 0x17, 0x05, 0x16},
+ { 300000000, 0x0, 0x51, 0x18, 0x06, 0x2c},
+ { 400000000, 0x0, 0x64, 0x19, 0x07, 0x33},
+ { 500000000, 0x0, 0x20, 0x1b, 0x07, 0x4e},
+ { 600000000, 0x0, 0x6a, 0x1d, 0x08, 0x3a},
+ { 700000000, 0x0, 0x3e, 0x1e, 0x08, 0x6a},
+ { 800000000, 0x0, 0x21, 0x1f, 0x09, 0x29},
+ {1000000000, 0x0, 0x09, 0x20, 0x09, 0x27},
+};
+
+static const
+struct inno_mipi_dphy_timing inno_mipi_dphy_timing_table_max_2_5ghz[] = {
+ { 110000000, 0x02, 0x7f, 0x16, 0x02, 0x02},
+ { 150000000, 0x02, 0x7f, 0x16, 0x03, 0x02},
+ { 200000000, 0x02, 0x7f, 0x17, 0x04, 0x02},
+ { 250000000, 0x02, 0x7f, 0x17, 0x05, 0x04},
+ { 300000000, 0x02, 0x7f, 0x18, 0x06, 0x04},
+ { 400000000, 0x03, 0x7e, 0x19, 0x07, 0x04},
+ { 500000000, 0x03, 0x7c, 0x1b, 0x07, 0x08},
+ { 600000000, 0x03, 0x70, 0x1d, 0x08, 0x10},
+ { 700000000, 0x05, 0x40, 0x1e, 0x08, 0x30},
+ { 800000000, 0x05, 0x02, 0x1f, 0x09, 0x30},
+ {1000000000, 0x05, 0x08, 0x20, 0x09, 0x30},
+ {1200000000, 0x06, 0x03, 0x32, 0x14, 0x0f},
+ {1400000000, 0x09, 0x03, 0x32, 0x14, 0x0f},
+ {1600000000, 0x0d, 0x42, 0x36, 0x0e, 0x0f},
+ {1800000000, 0x0e, 0x47, 0x7a, 0x0e, 0x0f},
+ {2000000000, 0x11, 0x64, 0x7a, 0x0e, 0x0b},
+ {2200000000, 0x13, 0x64, 0x7e, 0x15, 0x0b},
+ {2400000000, 0x13, 0x33, 0x7f, 0x15, 0x6a},
+ {2500000000, 0x15, 0x54, 0x7f, 0x15, 0x6a},
+};
+
static inline struct inno_dsidphy *hw_to_inno(struct clk_hw *hw)
{
return container_of(hw, struct inno_dsidphy, pll.hw);
@@ -290,31 +371,15 @@ static unsigned long inno_dsidphy_pll_calc_rate(struct inno_dsidphy *inno,
static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
{
struct phy_configure_opts_mipi_dphy *cfg = &inno->dphy_cfg;
- const struct {
- unsigned long rate;
- u8 hs_prepare;
- u8 clk_lane_hs_zero;
- u8 data_lane_hs_zero;
- u8 hs_trail;
- } timings[] = {
- { 110000000, 0x20, 0x16, 0x02, 0x22},
- { 150000000, 0x06, 0x16, 0x03, 0x45},
- { 200000000, 0x18, 0x17, 0x04, 0x0b},
- { 250000000, 0x05, 0x17, 0x05, 0x16},
- { 300000000, 0x51, 0x18, 0x06, 0x2c},
- { 400000000, 0x64, 0x19, 0x07, 0x33},
- { 500000000, 0x20, 0x1b, 0x07, 0x4e},
- { 600000000, 0x6a, 0x1d, 0x08, 0x3a},
- { 700000000, 0x3e, 0x1e, 0x08, 0x6a},
- { 800000000, 0x21, 0x1f, 0x09, 0x29},
- {1000000000, 0x09, 0x20, 0x09, 0x27},
- };
+ const struct inno_mipi_dphy_timing *timings;
u32 t_txbyteclkhs, t_txclkesc;
u32 txbyteclkhs, txclkesc, esc_clk_div;
u32 hs_exit, clk_post, clk_pre, wakeup, lpx, ta_go, ta_sure, ta_wait;
u32 hs_prepare, hs_trail, hs_zero, clk_lane_hs_zero, data_lane_hs_zero;
unsigned int i;
+ timings = inno->pdata->inno_mipi_dphy_timing_table;
+
inno_dsidphy_pll_calc_rate(inno, cfg->hs_clk_rate);
/* Select MIPI mode */
@@ -327,6 +392,13 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
REG_FBDIV_HI_MASK, REG_FBDIV_HI(inno->pll.fbdiv));
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x04,
REG_FBDIV_LO_MASK, REG_FBDIV_LO(inno->pll.fbdiv));
+ if (inno->pdata->max_rate == MAX_2_5GHZ) {
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
+ PLL_POST_DIV_ENABLE_MASK, PLL_POST_DIV_ENABLE);
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x0b,
+ CLOCK_LANE_VOD_RANGE_SET_MASK,
+ CLOCK_LANE_VOD_RANGE_SET(VOD_MAX_RANGE));
+ }
/* Enable PLL and LDO */
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x01,
REG_LDOPD_MASK | REG_PLLPD_MASK,
@@ -368,14 +440,6 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
- * The value of counter for HS Tlpx Time
- * Tlpx = Tpin_txbyteclkhs * (2 + value)
- */
- lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
- if (lpx >= 2)
- lpx -= 2;
-
- /*
* The value of counter for HS Tta-go
* Tta-go for turnaround
* Tta-go = Ttxclkesc * value
@@ -394,13 +458,24 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
*/
ta_wait = DIV_ROUND_UP(cfg->ta_get, t_txclkesc);
- for (i = 0; i < ARRAY_SIZE(timings); i++)
+ for (i = 0; i < inno->pdata->num_timings; i++)
if (inno->pll.rate <= timings[i].rate)
break;
- if (i == ARRAY_SIZE(timings))
+ if (i == inno->pdata->num_timings)
--i;
+ /*
+ * The value of counter for HS Tlpx Time
+ * Tlpx = Tpin_txbyteclkhs * (2 + value)
+ */
+ if (inno->pdata->max_rate == MAX_1GHZ) {
+ lpx = DIV_ROUND_UP(cfg->lpx, t_txbyteclkhs);
+ if (lpx >= 2)
+ lpx -= 2;
+ } else
+ lpx = timings[i].lpx;
+
hs_prepare = timings[i].hs_prepare;
hs_trail = timings[i].hs_trail;
clk_lane_hs_zero = timings[i].clk_lane_hs_zero;
@@ -417,14 +492,23 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
T_LPX_CNT(lpx));
phy_update_bits(inno, i, 0x06, T_HS_PREPARE_CNT_MASK,
T_HS_PREPARE_CNT(hs_prepare));
- phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_MASK,
- T_HS_ZERO_CNT(hs_zero));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x06, T_HS_ZERO_CNT_HI_MASK,
+ T_HS_ZERO_CNT_HI(hs_zero >> 6));
+ phy_update_bits(inno, i, 0x07, T_HS_ZERO_CNT_LO_MASK,
+ T_HS_ZERO_CNT_LO(hs_zero));
phy_update_bits(inno, i, 0x08, T_HS_TRAIL_CNT_MASK,
T_HS_TRAIL_CNT(hs_trail));
- phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_MASK,
- T_HS_EXIT_CNT(hs_exit));
- phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_MASK,
- T_CLK_POST_CNT(clk_post));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x11, T_HS_EXIT_CNT_HI_MASK,
+ T_HS_EXIT_CNT_HI(hs_exit >> 5));
+ phy_update_bits(inno, i, 0x09, T_HS_EXIT_CNT_LO_MASK,
+ T_HS_EXIT_CNT_LO(hs_exit));
+ if (inno->pdata->max_rate == MAX_2_5GHZ)
+ phy_update_bits(inno, i, 0x10, T_CLK_POST_CNT_HI_MASK,
+ T_CLK_POST_CNT_HI(clk_post >> 4));
+ phy_update_bits(inno, i, 0x0a, T_CLK_POST_CNT_LO_MASK,
+ T_CLK_POST_CNT_LO(clk_post));
phy_update_bits(inno, i, 0x0e, T_CLK_PRE_CNT_MASK,
T_CLK_PRE_CNT(clk_pre));
phy_update_bits(inno, i, 0x0c, T_WAKEUP_CNT_HI_MASK,
@@ -452,8 +536,9 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
/* Sample clock reverse direction */
phy_update_bits(inno, REGISTER_PART_ANALOG, 0x08,
- SAMPLE_CLOCK_DIRECTION_MASK,
- SAMPLE_CLOCK_DIRECTION_REVERSE);
+ SAMPLE_CLOCK_DIRECTION_MASK | LOWFRE_EN_MASK,
+ SAMPLE_CLOCK_DIRECTION_REVERSE |
+ PLL_OUTPUT_FREQUENCY_DIV_BY_1);
/* Select LVDS mode */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x03,
@@ -473,6 +558,10 @@ static void inno_dsidphy_lvds_mode_enable(struct inno_dsidphy *inno)
msleep(20);
+ /* Select PLL mode */
+ phy_update_bits(inno, REGISTER_PART_ANALOG, 0x1e,
+ PLL_MODE_SEL_MASK, PLL_MODE_SEL_LVDS_MODE);
+
/* Reset LVDS digital logic */
phy_update_bits(inno, REGISTER_PART_LVDS, 0x00,
LVDS_DIGITAL_INTERNAL_RESET_MASK,
@@ -592,6 +681,18 @@ static const struct phy_ops inno_dsidphy_ops = {
.owner = THIS_MODULE,
};
+static const struct inno_video_phy_plat_data max_1ghz_video_phy_plat_data = {
+ .inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_1ghz,
+ .num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_1ghz),
+ .max_rate = MAX_1GHZ,
+};
+
+static const struct inno_video_phy_plat_data max_2_5ghz_video_phy_plat_data = {
+ .inno_mipi_dphy_timing_table = inno_mipi_dphy_timing_table_max_2_5ghz,
+ .num_timings = ARRAY_SIZE(inno_mipi_dphy_timing_table_max_2_5ghz),
+ .max_rate = MAX_2_5GHZ,
+};
+
static int inno_dsidphy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -605,6 +706,7 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
return -ENOMEM;
inno->dev = dev;
+ inno->pdata = of_device_get_match_data(inno->dev);
platform_set_drvdata(pdev, inno);
inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
@@ -663,9 +765,19 @@ static int inno_dsidphy_remove(struct platform_device *pdev)
}
static const struct of_device_id inno_dsidphy_of_match[] = {
- { .compatible = "rockchip,px30-dsi-dphy", },
- { .compatible = "rockchip,rk3128-dsi-dphy", },
- { .compatible = "rockchip,rk3368-dsi-dphy", },
+ {
+ .compatible = "rockchip,px30-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3128-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3368-dsi-dphy",
+ .data = &max_1ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rk3568-dsi-dphy",
+ .data = &max_2_5ghz_video_phy_plat_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, inno_dsidphy_of_match);
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 0b1e9337ee8e..e6ededc51523 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1124,7 +1124,7 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
{
- int ret;
+ int ret, id;
rport->port_id = USB2PHY_PORT_OTG;
rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
@@ -1162,13 +1162,15 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
ret = devm_extcon_register_notifier(rphy->dev, rphy->edev,
EXTCON_USB_HOST, &rport->event_nb);
- if (ret)
+ if (ret) {
dev_err(rphy->dev, "register USB HOST notifier failed\n");
+ goto out;
+ }
if (!of_property_read_bool(rphy->dev->of_node, "extcon")) {
/* do initial sync of usb state */
- ret = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
- extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !ret);
+ id = property_enabled(rphy->grf, &rport->port_cfg->utmi_id);
+ extcon_set_state_sync(rphy->edev, EXTCON_USB_HOST, !id);
}
}
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
new file mode 100644
index 000000000000..1d355b32ba55
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip PCIE3.0 phy driver
+ *
+ * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Register for RK3568 */
+#define GRF_PCIE30PHY_CON1 0x4
+#define GRF_PCIE30PHY_CON6 0x18
+#define GRF_PCIE30PHY_CON9 0x24
+#define GRF_PCIE30PHY_DA_OCM (BIT(15) | BIT(31))
+#define GRF_PCIE30PHY_STATUS0 0x80
+#define GRF_PCIE30PHY_WR_EN (0xf << 16)
+#define SRAM_INIT_DONE(reg) (reg & BIT(14))
+
+#define RK3568_BIFURCATION_LANE_0_1 BIT(0)
+
+/* Register for RK3588 */
+#define PHP_GRF_PCIESEL_CON 0x100
+#define RK3588_PCIE3PHY_GRF_CMN_CON0 0x0
+#define RK3588_PCIE3PHY_GRF_PHY0_STATUS1 0x904
+#define RK3588_PCIE3PHY_GRF_PHY1_STATUS1 0xa04
+#define RK3588_SRAM_INIT_DONE(reg) (reg & BIT(0))
+
+#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
+#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
+#define RK3588_LANE_AGGREGATION BIT(2)
+
+struct rockchip_p3phy_ops;
+
+struct rockchip_p3phy_priv {
+ const struct rockchip_p3phy_ops *ops;
+ void __iomem *mmio;
+ /* mode: RC, EP */
+ int mode;
+ /* pcie30_phymode: Aggregation, Bifurcation */
+ int pcie30_phymode;
+ struct regmap *phy_grf;
+ struct regmap *pipe_grf;
+ struct reset_control *p30phy;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int num_lanes;
+ u32 lanes[4];
+};
+
+struct rockchip_p3phy_ops {
+ int (*phy_init)(struct rockchip_p3phy_priv *priv);
+};
+
+static int rockchip_p3phy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+
+ /* Actually We don't care EP/RC mode, but just record it */
+ switch (submode) {
+ case PHY_MODE_PCIE_RC:
+ priv->mode = PHY_MODE_PCIE_RC;
+ break;
+ case PHY_MODE_PCIE_EP:
+ priv->mode = PHY_MODE_PCIE_EP;
+ break;
+ default:
+ dev_err(&phy->dev, "%s, invalid mode\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rockchip_p3phy_rk3568_init(struct rockchip_p3phy_priv *priv)
+{
+ struct phy *phy = priv->phy;
+ bool bifurcation = false;
+ int ret;
+ u32 reg;
+
+ /* Deassert PCIe PMA output clamp mode */
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON9, GRF_PCIE30PHY_DA_OCM);
+
+ for (int i = 0; i < priv->num_lanes; i++) {
+ dev_info(&phy->dev, "lane number %d, val %d\n", i, priv->lanes[i]);
+ if (priv->lanes[i] > 1)
+ bifurcation = true;
+ }
+
+ /* Set bifurcation if needed, and it doesn't care RC/EP */
+ if (bifurcation) {
+ dev_info(&phy->dev, "bifurcation enabled\n");
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON6,
+ GRF_PCIE30PHY_WR_EN | RK3568_BIFURCATION_LANE_0_1);
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON1,
+ GRF_PCIE30PHY_DA_OCM);
+ } else {
+ dev_dbg(&phy->dev, "bifurcation disabled\n");
+ regmap_write(priv->phy_grf, GRF_PCIE30PHY_CON6,
+ GRF_PCIE30PHY_WR_EN & ~RK3568_BIFURCATION_LANE_0_1);
+ }
+
+ reset_control_deassert(priv->p30phy);
+
+ ret = regmap_read_poll_timeout(priv->phy_grf,
+ GRF_PCIE30PHY_STATUS0,
+ reg, SRAM_INIT_DONE(reg),
+ 0, 500);
+ if (ret)
+ dev_err(&priv->phy->dev, "%s: lock failed 0x%x, check input refclk and power supply\n",
+ __func__, reg);
+ return ret;
+}
+
+static const struct rockchip_p3phy_ops rk3568_ops = {
+ .phy_init = rockchip_p3phy_rk3568_init,
+};
+
+static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
+{
+ u32 reg = 0;
+ u8 mode = 0;
+ int ret;
+
+ /* Deassert PCIe PMA output clamp mode */
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, BIT(8) | BIT(24));
+
+ /* Set bifurcation if needed */
+ for (int i = 0; i < priv->num_lanes; i++) {
+ if (!priv->lanes[i])
+ mode |= (BIT(i) << 3);
+
+ if (priv->lanes[i] > 1)
+ mode |= (BIT(i) >> 1);
+ }
+
+ if (!mode)
+ reg = RK3588_LANE_AGGREGATION;
+ else {
+ if (mode & (BIT(0) | BIT(1)))
+ reg |= RK3588_BIFURCATION_LANE_0_1;
+
+ if (mode & (BIT(2) | BIT(3)))
+ reg |= RK3588_BIFURCATION_LANE_2_3;
+ }
+
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+
+ /* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
+ if (!IS_ERR(priv->pipe_grf)) {
+ reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ if (reg)
+ regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
+ (reg << 16) | reg);
+ }
+
+ reset_control_deassert(priv->p30phy);
+
+ ret = regmap_read_poll_timeout(priv->phy_grf,
+ RK3588_PCIE3PHY_GRF_PHY0_STATUS1,
+ reg, RK3588_SRAM_INIT_DONE(reg),
+ 0, 500);
+ ret |= regmap_read_poll_timeout(priv->phy_grf,
+ RK3588_PCIE3PHY_GRF_PHY1_STATUS1,
+ reg, RK3588_SRAM_INIT_DONE(reg),
+ 0, 500);
+ if (ret)
+ dev_err(&priv->phy->dev, "lock failed 0x%x, check input refclk and power supply\n",
+ reg);
+ return ret;
+}
+
+static const struct rockchip_p3phy_ops rk3588_ops = {
+ .phy_init = rockchip_p3phy_rk3588_init,
+};
+
+static int rochchip_p3phy_init(struct phy *phy)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks);
+ if (ret) {
+ dev_err(&priv->phy->dev, "failed to enable PCIe bulk clks %d\n", ret);
+ return ret;
+ }
+
+ reset_control_assert(priv->p30phy);
+ udelay(1);
+
+ if (priv->ops->phy_init) {
+ ret = priv->ops->phy_init(priv);
+ if (ret)
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+ }
+
+ return ret;
+}
+
+static int rochchip_p3phy_exit(struct phy *phy)
+{
+ struct rockchip_p3phy_priv *priv = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+ reset_control_assert(priv->p30phy);
+ return 0;
+}
+
+static const struct phy_ops rochchip_p3phy_ops = {
+ .init = rochchip_p3phy_init,
+ .exit = rochchip_p3phy_exit,
+ .set_mode = rockchip_p3phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_p3phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct rockchip_p3phy_priv *priv;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(priv->mmio)) {
+ ret = PTR_ERR(priv->mmio);
+ return ret;
+ }
+
+ priv->ops = of_device_get_match_data(&pdev->dev);
+ if (!priv->ops) {
+ dev_err(dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ priv->phy_grf = syscon_regmap_lookup_by_phandle(np, "rockchip,phy-grf");
+ if (IS_ERR(priv->phy_grf)) {
+ dev_err(dev, "failed to find rockchip,phy_grf regmap\n");
+ return PTR_ERR(priv->phy_grf);
+ }
+
+ if (of_device_is_compatible(np, "rockchip,rk3588-pcie3-phy")) {
+ priv->pipe_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,pipe-grf");
+ if (IS_ERR(priv->pipe_grf))
+ dev_info(dev, "failed to find rockchip,pipe_grf regmap\n");
+ } else {
+ priv->pipe_grf = NULL;
+ }
+
+ priv->num_lanes = of_property_read_variable_u32_array(dev->of_node, "data-lanes",
+ priv->lanes, 2,
+ ARRAY_SIZE(priv->lanes));
+
+ /* if no data-lanes assume aggregation */
+ if (priv->num_lanes == -EINVAL) {
+ dev_dbg(dev, "no data-lanes property found\n");
+ priv->num_lanes = 1;
+ priv->lanes[0] = 1;
+ } else if (priv->num_lanes < 0) {
+ dev_err(dev, "failed to read data-lanes property %d\n", priv->num_lanes);
+ return priv->num_lanes;
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &rochchip_p3phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create combphy\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ priv->p30phy = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(priv->p30phy)) {
+ return dev_err_probe(dev, PTR_ERR(priv->p30phy),
+ "failed to get phy reset control\n");
+ }
+ if (!priv->p30phy)
+ dev_info(dev, "no phy reset control specified\n");
+
+ priv->num_clks = devm_clk_bulk_get_all(dev, &priv->clks);
+ if (priv->num_clks < 1)
+ return -ENODEV;
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id rockchip_p3phy_of_match[] = {
+ { .compatible = "rockchip,rk3568-pcie3-phy", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-pcie3-phy", .data = &rk3588_ops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchip_p3phy_of_match);
+
+static struct platform_driver rockchip_p3phy_driver = {
+ .probe = rockchip_p3phy_probe,
+ .driver = {
+ .name = "rockchip-snps-pcie3-phy",
+ .of_match_table = rockchip_p3phy_of_match,
+ },
+};
+module_platform_driver(rockchip_p3phy_driver);
+MODULE_DESCRIPTION("Rockchip Synopsys PCIe 3.0 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/sunplus/Kconfig b/drivers/phy/sunplus/Kconfig
new file mode 100644
index 000000000000..3bd3cfb53a63
--- /dev/null
+++ b/drivers/phy/sunplus/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config PHY_SUNPLUS_USB
+ tristate "Sunplus SP7021 USB 2.0 PHY driver"
+ depends on OF && (SOC_SP7021 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support the USB 2.0 PHY on Sunplus SP7021
+ SoC. The USB 2.0 PHY controller supports battery charger
+ and synchronous signals, various power down modes including
+ operating, partial and suspend modes, and high-speed,
+ full-speed and low-speed data transfer.
diff --git a/drivers/phy/sunplus/Makefile b/drivers/phy/sunplus/Makefile
new file mode 100644
index 000000000000..71754d5cb545
--- /dev/null
+++ b/drivers/phy/sunplus/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_SUNPLUS_USB) += phy-sunplus-usb2.o
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
new file mode 100644
index 000000000000..b932087c55b2
--- /dev/null
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Sunplus SP7021 USB 2.0 phy driver
+ *
+ * Copyright (C) 2022 Sunplus Technology Inc., All rights reserved.
+ *
+ * Note 1 : non-posted write command for the registers accesses of
+ * Sunplus SP7021.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#define HIGH_MASK_BITS GENMASK(31, 16)
+#define LOW_MASK_BITS GENMASK(15, 0)
+#define OTP_DISC_LEVEL_DEFAULT 0xd
+
+/* GROUP UPHY */
+#define CONFIG1 0x4
+#define J_HS_TX_PWRSAV BIT(5)
+#define CONFIG3 0xc
+#define J_FORCE_DISC_ON BIT(5)
+#define J_DEBUG_CTRL_ADDR_MACRO BIT(0)
+#define CONFIG7 0x1c
+#define J_DISC 0X1f
+#define CONFIG9 0x24
+#define J_ECO_PATH BIT(6)
+#define CONFIG16 0x40
+#define J_TBCWAIT_MASK GENMASK(6, 5)
+#define J_TBCWAIT_1P1_MS FIELD_PREP(J_TBCWAIT_MASK, 0)
+#define J_TVDM_SRC_DIS_MASK GENMASK(4, 3)
+#define J_TVDM_SRC_DIS_8P2_MS FIELD_PREP(J_TVDM_SRC_DIS_MASK, 3)
+#define J_TVDM_SRC_EN_MASK GENMASK(2, 1)
+#define J_TVDM_SRC_EN_1P6_MS FIELD_PREP(J_TVDM_SRC_EN_MASK, 0)
+#define J_BC_EN BIT(0)
+#define CONFIG17 0x44
+#define IBG_TRIM0_MASK GENMASK(7, 5)
+#define IBG_TRIM0_SSLVHT FIELD_PREP(IBG_TRIM0_MASK, 4)
+#define J_VDATREE_TRIM_MASK GENMASK(4, 1)
+#define J_VDATREE_TRIM_DEFAULT FIELD_PREP(J_VDATREE_TRIM_MASK, 9)
+#define CONFIG23 0x5c
+#define PROB_MASK GENMASK(5, 3)
+#define PROB FIELD_PREP(PROB_MASK, 7)
+
+/* GROUP MOON4 */
+#define UPHY_CONTROL0 0x0
+#define UPHY_CONTROL1 0x4
+#define UPHY_CONTROL2 0x8
+#define MO1_UPHY_RX_CLK_SEL BIT(6)
+#define MASK_MO1_UPHY_RX_CLK_SEL BIT(6 + 16)
+#define UPHY_CONTROL3 0xc
+#define MO1_UPHY_PLL_POWER_OFF_SEL BIT(7)
+#define MASK_MO1_UPHY_PLL_POWER_OFF_SEL BIT(7 + 16)
+#define MO1_UPHY_PLL_POWER_OFF BIT(3)
+#define MASK_UPHY_PLL_POWER_OFF BIT(3 + 16)
+
+struct sp_usbphy {
+ struct device *dev;
+ struct resource *phy_res_mem;
+ struct resource *moon4_res_mem;
+ struct reset_control *rstc;
+ struct clk *phy_clk;
+ void __iomem *phy_regs;
+ void __iomem *moon4_regs;
+ u32 disc_vol_addr_off;
+};
+
+static int update_disc_vol(struct sp_usbphy *usbphy)
+{
+ struct nvmem_cell *cell;
+ char *disc_name = "disc_vol";
+ ssize_t otp_l = 0;
+ char *otp_v;
+ u32 val, set;
+
+ cell = nvmem_cell_get(usbphy->dev, disc_name);
+ if (IS_ERR_OR_NULL(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
+
+ otp_v = nvmem_cell_read(cell, &otp_l);
+ nvmem_cell_put(cell);
+
+ if (!IS_ERR(otp_v)) {
+ set = *(otp_v + 1);
+ set = (set << (sizeof(char) * 8)) | *otp_v;
+ set = (set >> usbphy->disc_vol_addr_off) & J_DISC;
+ }
+
+ if (IS_ERR(otp_v) || set == 0)
+ set = OTP_DISC_LEVEL_DEFAULT;
+
+ val = readl(usbphy->phy_regs + CONFIG7);
+ val = (val & ~J_DISC) | set;
+ writel(val, usbphy->phy_regs + CONFIG7);
+
+ return 0;
+}
+
+static int sp_uphy_init(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 val;
+ int ret;
+
+ ret = clk_prepare_enable(usbphy->phy_clk);
+ if (ret)
+ goto err_clk;
+
+ ret = reset_control_deassert(usbphy->rstc);
+ if (ret)
+ goto err_reset;
+
+ /* Default value modification */
+ writel(HIGH_MASK_BITS | 0x4002, usbphy->moon4_regs + UPHY_CONTROL0);
+ writel(HIGH_MASK_BITS | 0x8747, usbphy->moon4_regs + UPHY_CONTROL1);
+
+ /* disconnect voltage */
+ ret = update_disc_vol(usbphy);
+ if (ret < 0)
+ return ret;
+
+ /* board uphy 0 internal register modification for tid certification */
+ val = readl(usbphy->phy_regs + CONFIG9);
+ val &= ~(J_ECO_PATH);
+ writel(val, usbphy->phy_regs + CONFIG9);
+
+ val = readl(usbphy->phy_regs + CONFIG1);
+ val &= ~(J_HS_TX_PWRSAV);
+ writel(val, usbphy->phy_regs + CONFIG1);
+
+ val = readl(usbphy->phy_regs + CONFIG23);
+ val = (val & ~PROB) | PROB;
+ writel(val, usbphy->phy_regs + CONFIG23);
+
+ /* port 0 uphy clk fix */
+ writel(MASK_MO1_UPHY_RX_CLK_SEL | MO1_UPHY_RX_CLK_SEL,
+ usbphy->moon4_regs + UPHY_CONTROL2);
+
+ /* battery charger */
+ writel(J_TBCWAIT_1P1_MS | J_TVDM_SRC_DIS_8P2_MS | J_TVDM_SRC_EN_1P6_MS | J_BC_EN,
+ usbphy->phy_regs + CONFIG16);
+ writel(IBG_TRIM0_SSLVHT | J_VDATREE_TRIM_DEFAULT, usbphy->phy_regs + CONFIG17);
+
+ /* chirp mode */
+ writel(J_FORCE_DISC_ON | J_DEBUG_CTRL_ADDR_MACRO, usbphy->phy_regs + CONFIG3);
+
+ return 0;
+
+err_reset:
+ reset_control_assert(usbphy->rstc);
+err_clk:
+ clk_disable_unprepare(usbphy->phy_clk);
+
+ return ret;
+}
+
+static int sp_uphy_power_on(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 pll_pwr_on, pll_pwr_off;
+
+ /* PLL power off/on twice */
+ pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
+ pll_pwr_on = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL;
+
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_on,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+
+ return 0;
+}
+
+static int sp_uphy_power_off(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+ u32 pll_pwr_off;
+
+ pll_pwr_off = (readl(usbphy->moon4_regs + UPHY_CONTROL3) & ~LOW_MASK_BITS)
+ | MO1_UPHY_PLL_POWER_OFF_SEL | MO1_UPHY_PLL_POWER_OFF;
+
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | pll_pwr_off,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+ mdelay(1);
+ writel(MASK_MO1_UPHY_PLL_POWER_OFF_SEL | MASK_UPHY_PLL_POWER_OFF | 0x0,
+ usbphy->moon4_regs + UPHY_CONTROL3);
+
+ return 0;
+}
+
+static int sp_uphy_exit(struct phy *phy)
+{
+ struct sp_usbphy *usbphy = phy_get_drvdata(phy);
+
+ reset_control_assert(usbphy->rstc);
+ clk_disable_unprepare(usbphy->phy_clk);
+
+ return 0;
+}
+
+static const struct phy_ops sp_uphy_ops = {
+ .init = sp_uphy_init,
+ .power_on = sp_uphy_power_on,
+ .power_off = sp_uphy_power_off,
+ .exit = sp_uphy_exit,
+};
+
+static const struct of_device_id sp_uphy_dt_ids[] = {
+ {.compatible = "sunplus,sp7021-usb2-phy", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sp_uphy_dt_ids);
+
+static int sp_usb_phy_probe(struct platform_device *pdev)
+{
+ struct sp_usbphy *usbphy;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int ret;
+
+ usbphy = devm_kzalloc(&pdev->dev, sizeof(*usbphy), GFP_KERNEL);
+ if (!usbphy)
+ return -ENOMEM;
+
+ usbphy->dev = &pdev->dev;
+
+ usbphy->phy_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ usbphy->phy_regs = devm_ioremap_resource(&pdev->dev, usbphy->phy_res_mem);
+ if (IS_ERR(usbphy->phy_regs))
+ return PTR_ERR(usbphy->phy_regs);
+
+ usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
+ usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
+ resource_size(usbphy->moon4_res_mem));
+ if (IS_ERR(usbphy->moon4_regs))
+ return PTR_ERR(usbphy->moon4_regs);
+
+ usbphy->phy_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usbphy->phy_clk))
+ return PTR_ERR(usbphy->phy_clk);
+
+ usbphy->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(usbphy->rstc))
+ return PTR_ERR(usbphy->rstc);
+
+ of_property_read_u32(pdev->dev.of_node, "sunplus,disc-vol-addr-off",
+ &usbphy->disc_vol_addr_off);
+
+ phy = devm_phy_create(&pdev->dev, NULL, &sp_uphy_ops);
+ if (IS_ERR(phy)) {
+ ret = -PTR_ERR(phy);
+ return ret;
+ }
+
+ phy_set_drvdata(phy, usbphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver sunplus_usb_phy_driver = {
+ .probe = sp_usb_phy_probe,
+ .driver = {
+ .name = "sunplus-usb2-phy",
+ .of_match_table = sp_uphy_dt_ids,
+ },
+};
+module_platform_driver(sunplus_usb_phy_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.shih@sunplus.com>");
+MODULE_DESCRIPTION("Sunplus USB 2.0 phy driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index ae3915ed9fef..0996ede63387 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -638,7 +638,7 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
mutex_unlock(&padctl->lock);
}
-static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
+static void tegra186_utmi_pad_power_on(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
@@ -656,6 +656,8 @@ static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
return;
}
+ dev_dbg(dev, "power on UTMI pad %u\n", index);
+
tegra186_utmi_bias_pad_power_on(padctl);
udelay(2);
@@ -669,7 +671,7 @@ static void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
}
-static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
+static void tegra186_utmi_pad_power_down(struct phy *phy)
{
struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
struct tegra_xusb_padctl *padctl = lane->pad->padctl;
@@ -679,6 +681,8 @@ static void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
if (!phy)
return;
+ dev_dbg(padctl->dev, "power down UTMI pad %u\n", index);
+
value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
value |= USB2_OTG_PD;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
@@ -849,15 +853,14 @@ static int tegra186_utmi_phy_power_on(struct phy *phy)
value |= RPD_CTRL(priv->calib.rpd_ctrl);
padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
- /* TODO: pad power saving */
- tegra_phy_xusb_utmi_pad_power_on(phy);
+ tegra186_utmi_pad_power_on(phy);
+
return 0;
}
static int tegra186_utmi_phy_power_off(struct phy *phy)
{
- /* TODO: pad power saving */
- tegra_phy_xusb_utmi_pad_power_down(phy);
+ tegra186_utmi_pad_power_down(phy);
return 0;
}
@@ -1381,12 +1384,9 @@ tegra186_xusb_read_fuse_calibration(struct tegra186_xusb_padctl *padctl)
return -ENOMEM;
err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to read calibration fuse: %d\n",
- err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to read calibration fuse\n");
dev_dbg(dev, "FUSE_USB_CALIB_0 %#x\n", value);
@@ -1486,6 +1486,8 @@ static const struct tegra_xusb_padctl_ops tegra186_xusb_padctl_ops = {
.suspend_noirq = tegra186_xusb_padctl_suspend_noirq,
.resume_noirq = tegra186_xusb_padctl_resume_noirq,
.vbus_override = tegra186_xusb_padctl_vbus_override,
+ .utmi_pad_power_on = tegra186_utmi_pad_power_on,
+ .utmi_pad_power_down = tegra186_utmi_pad_power_down,
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index aa5237eacd29..95091876c422 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <linux/delay.h>
@@ -656,6 +656,7 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
struct usb_role_switch_desc role_sx_desc = {
.fwnode = dev_fwnode(&port->dev),
.set = tegra_xusb_role_sw_set,
+ .allow_userspace_control = true,
};
int err = 0;
@@ -1270,7 +1271,7 @@ static int tegra_xusb_padctl_remove(struct platform_device *pdev)
padctl->soc->ops->remove(padctl);
- return err;
+ return 0;
}
static __maybe_unused int tegra_xusb_padctl_suspend_noirq(struct device *dev)
@@ -1458,6 +1459,26 @@ int tegra_phy_xusb_utmi_port_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_pad_power_on)
+ padctl->soc->ops->utmi_pad_power_on(phy);
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_on);
+
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+
+ if (padctl->soc->ops->utmi_pad_power_down)
+ padctl->soc->ops->utmi_pad_power_down(phy);
+}
+EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_down);
+
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port)
{
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 034f7a2c28d6..8cfbbdbd6e0c 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
@@ -412,6 +412,8 @@ struct tegra_xusb_padctl_ops {
unsigned int index, bool enable);
int (*vbus_override)(struct tegra_xusb_padctl *padctl, bool set);
int (*utmi_port_reset)(struct phy *phy);
+ void (*utmi_pad_power_on)(struct phy *phy);
+ void (*utmi_pad_power_down)(struct phy *phy);
};
struct tegra_xusb_padctl_soc {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index d0ab69750c6b..0bcfd6d96b4d 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -22,6 +22,12 @@
#define AM33XX_GMII_SEL_MODE_RMII 1
#define AM33XX_GMII_SEL_MODE_RGMII 2
+/* J72xx SoC specific definitions for the CONTROL port */
+#define J72XX_GMII_SEL_MODE_QSGMII 4
+#define J72XX_GMII_SEL_MODE_QSGMII_SUB 6
+
+#define PHY_GMII_PORT(n) BIT((n) - 1)
+
enum {
PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
@@ -43,6 +49,7 @@ struct phy_gmii_sel_soc_data {
u32 features;
const struct reg_field (*regfields)[PHY_GMII_SEL_LAST];
bool use_of_data;
+ u64 extra_modes;
};
struct phy_gmii_sel_priv {
@@ -53,6 +60,7 @@ struct phy_gmii_sel_priv {
struct phy_gmii_sel_phy_priv *if_phys;
u32 num_ports;
u32 reg_offset;
+ u32 qsgmii_main_ports;
};
static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
@@ -88,10 +96,17 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ if (!(soc_data->extra_modes & BIT(PHY_INTERFACE_MODE_QSGMII)))
+ goto unsupported;
+ if (if_phy->priv->qsgmii_main_ports & BIT(if_phy->id - 1))
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII;
+ else
+ gmii_sel_mode = J72XX_GMII_SEL_MODE_QSGMII_SUB;
+ break;
+
default:
- dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
- if_phy->id, phy_modes(submode));
- return -EINVAL;
+ goto unsupported;
}
if_phy->phy_if_mode = submode;
@@ -123,6 +138,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
}
return 0;
+
+unsupported:
+ dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
+ if_phy->id, phy_modes(submode));
+ return -EINVAL;
}
static const
@@ -188,6 +208,13 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
.regfields = phy_gmii_sel_fields_am654,
};
+static const
+struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
+ .use_of_data = true,
+ .regfields = phy_gmii_sel_fields_am654,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id phy_gmii_sel_id_table[] = {
{
.compatible = "ti,am3352-phy-gmii-sel",
@@ -209,6 +236,10 @@ static const struct of_device_id phy_gmii_sel_id_table[] = {
.compatible = "ti,am654-phy-gmii-sel",
.data = &phy_gmii_sel_soc_am654,
},
+ {
+ .compatible = "ti,j7200-cpsw5g-phy-gmii-sel",
+ .data = &phy_gmii_sel_cpsw5g_soc_j7200,
+ },
{}
};
MODULE_DEVICE_TABLE(of, phy_gmii_sel_id_table);
@@ -350,6 +381,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
const struct of_device_id *of_id;
struct phy_gmii_sel_priv *priv;
+ u32 main_ports = 1;
int ret;
of_id = of_match_node(phy_gmii_sel_id_table, pdev->dev.of_node);
@@ -363,6 +395,15 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->soc_data = of_id->data;
priv->num_ports = priv->soc_data->num_ports;
+ of_property_read_u32(node, "ti,qsgmii-main-ports", &main_ports);
+ /*
+ * Ensure that main_ports is within bounds. If the property
+ * ti,qsgmii-main-ports is not mentioned, or the value mentioned
+ * is out of bounds, default to 1.
+ */
+ if (main_ports < 1 || main_ports > 4)
+ main_ports = 1;
+ priv->qsgmii_main_ports = PHY_GMII_PORT(main_ports);
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 70bac931f99a..41725c6bcdf6 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/mux/consumer.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -23,6 +24,15 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#define REF_CLK_19_2MHZ 19200000
+#define REF_CLK_25MHZ 25000000
+#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
+
+/* SCM offsets */
+#define SERDES_SUP_CTRL 0x4400
+
+/* SERDES offsets */
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
#define WIZ_SERDES_RST 0x40c
@@ -85,6 +95,18 @@ static const struct reg_field pma_cmn_refclk_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 26, 27);
static const struct reg_field pma_cmn_refclk1_dig_div =
REG_FIELD(WIZ_SERDES_TOP_CTRL, 24, 25);
+
+static const struct reg_field sup_pll0_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 0, 1);
+static const struct reg_field sup_pll1_refclk_mux_sel =
+ REG_FIELD(SERDES_SUP_CTRL, 2, 3);
+static const struct reg_field sup_pma_cmn_refclk1_int_mode =
+ REG_FIELD(SERDES_SUP_CTRL, 4, 5);
+static const struct reg_field sup_refclk_dig_sel_10g =
+ REG_FIELD(SERDES_SUP_CTRL, 6, 7);
+static const struct reg_field sup_legacy_clk_override =
+ REG_FIELD(SERDES_SUP_CTRL, 8, 8);
+
static const char * const output_clk_names[] = {
[TI_WIZ_PLL0_REFCLK] = "pll0-refclk",
[TI_WIZ_PLL1_REFCLK] = "pll1-refclk",
@@ -129,6 +151,26 @@ static const struct reg_field p0_fullrt_div[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 22, 23),
};
+static const struct reg_field p0_mac_src_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 20, 21),
+ REG_FIELD(WIZ_LANECTL(1), 20, 21),
+ REG_FIELD(WIZ_LANECTL(2), 20, 21),
+ REG_FIELD(WIZ_LANECTL(3), 20, 21),
+};
+
+static const struct reg_field p0_rxfclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 6, 7),
+ REG_FIELD(WIZ_LANECTL(1), 6, 7),
+ REG_FIELD(WIZ_LANECTL(2), 6, 7),
+ REG_FIELD(WIZ_LANECTL(3), 6, 7),
+};
+
+static const struct reg_field p0_refclk_sel[WIZ_MAX_LANES] = {
+ REG_FIELD(WIZ_LANECTL(0), 18, 19),
+ REG_FIELD(WIZ_LANECTL(1), 18, 19),
+ REG_FIELD(WIZ_LANECTL(2), 18, 19),
+ REG_FIELD(WIZ_LANECTL(3), 18, 19),
+};
static const struct reg_field p_mac_div_sel0[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANEDIV(0), 16, 22),
REG_FIELD(WIZ_LANEDIV(1), 16, 22),
@@ -228,6 +270,27 @@ static const struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
},
};
+static const struct wiz_clk_mux_sel clk_mux_sel_10g_2_refclk[] = {
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll0-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "pll1-refclk",
+ },
+ {
+ .num_parents = 3,
+ .parents = { WIZ_CORE_REFCLK, WIZ_CORE_REFCLK1, WIZ_EXT_REFCLK },
+ .table = { 2, 3, 0 },
+ .node_name = "refclk-dig",
+ },
+};
+
static const struct clk_div_table clk_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
@@ -249,14 +312,18 @@ static const struct wiz_clk_div_sel clk_div_sel[] = {
enum wiz_type {
J721E_WIZ_16G,
- J721E_WIZ_10G,
+ J721E_WIZ_10G, /* Also for J7200 SR1.0 */
AM64_WIZ_10G,
+ J7200_WIZ_10G, /* J7200 SR2.0 */
};
struct wiz_data {
enum wiz_type type;
+ const struct reg_field *pll0_refclk_mux_sel;
+ const struct reg_field *pll1_refclk_mux_sel;
const struct reg_field *refclk_dig_sel;
const struct reg_field *pma_cmn_refclk1_dig_div;
+ const struct reg_field *pma_cmn_refclk1_int_mode;
const struct wiz_clk_mux_sel *clk_mux_sel;
unsigned int clk_div_sel_num;
};
@@ -266,6 +333,7 @@ struct wiz_data {
struct wiz {
struct regmap *regmap;
+ struct regmap *scm_regmap;
enum wiz_type type;
const struct wiz_clk_mux_sel *clk_mux_sel;
const struct wiz_clk_div_sel *clk_div_sel;
@@ -280,13 +348,18 @@ struct wiz {
struct regmap_field *p_mac_div_sel0[WIZ_MAX_LANES];
struct regmap_field *p_mac_div_sel1[WIZ_MAX_LANES];
struct regmap_field *p0_fullrt_div[WIZ_MAX_LANES];
+ struct regmap_field *p0_mac_src_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_rxfclk_sel[WIZ_MAX_LANES];
+ struct regmap_field *p0_refclk_sel[WIZ_MAX_LANES];
struct regmap_field *pma_cmn_refclk_int_mode;
+ struct regmap_field *pma_cmn_refclk1_int_mode;
struct regmap_field *pma_cmn_refclk_mode;
struct regmap_field *pma_cmn_refclk_dig_div;
struct regmap_field *pma_cmn_refclk1_dig_div;
struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
+ struct regmap_field *sup_legacy_clk_override;
struct device *dev;
u32 num_lanes;
@@ -325,7 +398,9 @@ static int wiz_p_mac_div_sel(struct wiz *wiz)
int i;
for (i = 0; i < num_lanes; i++) {
- if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_SGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_QSGMII ||
+ wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
ret = regmap_field_write(wiz->p_mac_div_sel0[i], 1);
if (ret)
return ret;
@@ -354,6 +429,13 @@ static int wiz_mode_select(struct wiz *wiz)
else
continue;
+ if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
+ ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
+ mode = LANE_MODE_GEN1;
+ }
+
ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
return ret;
@@ -416,6 +498,7 @@ static int wiz_init(struct wiz *wiz)
static int wiz_regfield_init(struct wiz *wiz)
{
struct regmap *regmap = wiz->regmap;
+ struct regmap *scm_regmap = wiz->regmap; /* updated later to scm_regmap if applicable */
int num_lanes = wiz->num_lanes;
struct device *dev = wiz->dev;
const struct wiz_data *data = wiz->data;
@@ -465,27 +548,46 @@ static int wiz_regfield_init(struct wiz *wiz)
}
}
+ if (wiz->scm_regmap) {
+ scm_regmap = wiz->scm_regmap;
+ wiz->sup_legacy_clk_override =
+ devm_regmap_field_alloc(dev, scm_regmap, sup_legacy_clk_override);
+ if (IS_ERR(wiz->sup_legacy_clk_override)) {
+ dev_err(dev, "SUP_LEGACY_CLK_OVERRIDE reg field init failed\n");
+ return PTR_ERR(wiz->sup_legacy_clk_override);
+ }
+ }
+
wiz->mux_sel_field[PLL0_REFCLK] =
- devm_regmap_field_alloc(dev, regmap, pll0_refclk_mux_sel);
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll0_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL0_REFCLK])) {
dev_err(dev, "PLL0_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL0_REFCLK]);
}
wiz->mux_sel_field[PLL1_REFCLK] =
- devm_regmap_field_alloc(dev, regmap, pll1_refclk_mux_sel);
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pll1_refclk_mux_sel);
if (IS_ERR(wiz->mux_sel_field[PLL1_REFCLK])) {
dev_err(dev, "PLL1_REFCLK_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[PLL1_REFCLK]);
}
- wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, regmap,
+ wiz->mux_sel_field[REFCLK_DIG] = devm_regmap_field_alloc(dev, scm_regmap,
*data->refclk_dig_sel);
if (IS_ERR(wiz->mux_sel_field[REFCLK_DIG])) {
dev_err(dev, "REFCLK_DIG_SEL reg field init failed\n");
return PTR_ERR(wiz->mux_sel_field[REFCLK_DIG]);
}
+ if (data->pma_cmn_refclk1_int_mode) {
+ wiz->pma_cmn_refclk1_int_mode =
+ devm_regmap_field_alloc(dev, scm_regmap, *data->pma_cmn_refclk1_int_mode);
+ if (IS_ERR(wiz->pma_cmn_refclk1_int_mode)) {
+ dev_err(dev, "PMA_CMN_REFCLK1_INT_MODE reg field init failed\n");
+ return PTR_ERR(wiz->pma_cmn_refclk1_int_mode);
+ }
+ }
+
for (i = 0; i < num_lanes; i++) {
wiz->p_enable[i] = devm_regmap_field_alloc(dev, regmap,
p_enable[i]);
@@ -523,6 +625,24 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->p0_fullrt_div[i]);
}
+ wiz->p0_mac_src_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_mac_src_sel[i]);
+ if (IS_ERR(wiz->p0_mac_src_sel[i])) {
+ dev_err(dev, "P%d_MAC_SRC_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_mac_src_sel[i]);
+ }
+
+ wiz->p0_rxfclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_rxfclk_sel[i]);
+ if (IS_ERR(wiz->p0_rxfclk_sel[i])) {
+ dev_err(dev, "P%d_RXFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_rxfclk_sel[i]);
+ }
+
+ wiz->p0_refclk_sel[i] = devm_regmap_field_alloc(dev, regmap, p0_refclk_sel[i]);
+ if (IS_ERR(wiz->p0_refclk_sel[i])) {
+ dev_err(dev, "P%d_REFCLK_SEL reg field init failed\n", i);
+ return PTR_ERR(wiz->p0_refclk_sel[i]);
+ }
+
wiz->p_mac_div_sel0[i] =
devm_regmap_field_alloc(dev, regmap, p_mac_div_sel0[i]);
if (IS_ERR(wiz->p_mac_div_sel0[i])) {
@@ -597,6 +717,8 @@ static int wiz_phy_en_refclk_register(struct wiz *wiz)
struct device *dev = wiz->dev;
struct clk_init_data *init;
struct clk *clk;
+ char *clk_name;
+ unsigned int sz;
wiz_phy_en_refclk = devm_kzalloc(dev, sizeof(*wiz_phy_en_refclk), GFP_KERNEL);
if (!wiz_phy_en_refclk)
@@ -606,12 +728,23 @@ static int wiz_phy_en_refclk_register(struct wiz *wiz)
init->ops = &wiz_phy_en_refclk_ops;
init->flags = 0;
- init->name = output_clk_names[TI_WIZ_PHY_EN_REFCLK];
+
+ sz = strlen(dev_name(dev)) + strlen(output_clk_names[TI_WIZ_PHY_EN_REFCLK]) + 2;
+
+ clk_name = kzalloc(sz, GFP_KERNEL);
+ if (!clk_name)
+ return -ENOMEM;
+
+ snprintf(clk_name, sz, "%s_%s", dev_name(dev), output_clk_names[TI_WIZ_PHY_EN_REFCLK]);
+ init->name = clk_name;
wiz_phy_en_refclk->phy_en_refclk = wiz->phy_en_refclk;
wiz_phy_en_refclk->hw.init = init;
clk = devm_clk_register(dev, &wiz_phy_en_refclk->hw);
+
+ kfree(clk_name);
+
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -856,9 +989,13 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
struct device_node *clk_node;
int i;
- if (wiz->type == AM64_WIZ_10G) {
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
of_clk_del_provider(dev->of_node);
return;
+ default:
+ break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
@@ -885,9 +1022,6 @@ static int wiz_clock_register(struct wiz *wiz)
int ret;
int i;
- if (wiz->type != AM64_WIZ_10G)
- return 0;
-
clk_index = TI_WIZ_PLL0_REFCLK;
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++, clk_index++) {
ret = wiz_mux_clk_register(wiz, wiz->mux_sel_field[i], &clk_mux_sel[i], clk_index);
@@ -937,6 +1071,41 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3);
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
+ switch (rate) {
+ case REF_CLK_100MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x2);
+ break;
+ case REF_CLK_156_25MHZ:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0x3);
+ break;
+ default:
+ regmap_field_write(wiz->div_sel_field[CMN_REFCLK_DIG_DIV], 0);
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (wiz->data->pma_cmn_refclk1_int_mode) {
+ clk = devm_clk_get(dev, "core_ref1_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "core_ref1_clk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ wiz->input_clks[WIZ_CORE_REFCLK1] = clk;
+
+ rate = clk_get_rate(clk);
+ if (rate >= 100000000)
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1);
+ else
+ regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3);
+ }
+
clk = devm_clk_get(dev, "ext_ref_clk");
if (IS_ERR(clk)) {
dev_err(dev, "ext_ref_clk clock not found\n");
@@ -951,11 +1120,15 @@ static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
else
regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2);
- if (wiz->type == AM64_WIZ_10G) {
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ case J7200_WIZ_10G:
ret = wiz_clock_register(wiz);
if (ret)
dev_err(dev, "Failed to register wiz clocks\n");
return ret;
+ default:
+ break;
}
for (i = 0; i < WIZ_MUX_NUM_CLOCKS; i++) {
@@ -1025,12 +1198,19 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
static int wiz_phy_fullrt_div(struct wiz *wiz, int lane)
{
- if (wiz->type != AM64_WIZ_10G)
+ switch (wiz->type) {
+ case AM64_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
+ break;
+ case J721E_WIZ_10G:
+ case J7200_WIZ_10G:
+ if (wiz->lane_phy_type[lane] == PHY_TYPE_SGMII)
+ return regmap_field_write(wiz->p0_fullrt_div[lane], 0x2);
+ break;
+ default:
return 0;
-
- if (wiz->lane_phy_type[lane] == PHY_TYPE_PCIE)
- return regmap_field_write(wiz->p0_fullrt_div[lane], 0x1);
-
+ }
return 0;
}
@@ -1083,6 +1263,8 @@ static const struct regmap_config wiz_regmap_config = {
static struct wiz_data j721e_16g_data = {
.type = J721E_WIZ_16G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_16g,
.pma_cmn_refclk1_dig_div = &pma_cmn_refclk1_dig_div,
.clk_mux_sel = clk_mux_sel_16g,
@@ -1091,6 +1273,8 @@ static struct wiz_data j721e_16g_data = {
static struct wiz_data j721e_10g_data = {
.type = J721E_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
@@ -1098,11 +1282,23 @@ static struct wiz_data j721e_10g_data = {
static struct wiz_data am64_10g_data = {
.type = AM64_WIZ_10G,
+ .pll0_refclk_mux_sel = &pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &pll1_refclk_mux_sel,
.refclk_dig_sel = &refclk_dig_sel_10g,
.clk_mux_sel = clk_mux_sel_10g,
.clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
};
+static struct wiz_data j7200_pg2_10g_data = {
+ .type = J7200_WIZ_10G,
+ .pll0_refclk_mux_sel = &sup_pll0_refclk_mux_sel,
+ .pll1_refclk_mux_sel = &sup_pll1_refclk_mux_sel,
+ .refclk_dig_sel = &sup_refclk_dig_sel_10g,
+ .pma_cmn_refclk1_int_mode = &sup_pma_cmn_refclk1_int_mode,
+ .clk_mux_sel = clk_mux_sel_10g_2_refclk,
+ .clk_div_sel_num = WIZ_DIV_NUM_CLOCKS_10G,
+};
+
static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,j721e-wiz-16g", .data = &j721e_16g_data,
@@ -1113,6 +1309,9 @@ static const struct of_device_id wiz_id_table[] = {
{
.compatible = "ti,am64-wiz-10g", .data = &am64_10g_data,
},
+ {
+ .compatible = "ti,j7200-wiz-10g", .data = &j7200_pg2_10g_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
@@ -1210,6 +1409,17 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ wiz->scm_regmap = syscon_regmap_lookup_by_phandle(node, "ti,scm");
+ if (IS_ERR(wiz->scm_regmap)) {
+ if (wiz->type == J7200_WIZ_10G) {
+ dev_err(dev, "Couldn't get ti,scm regmap\n");
+ ret = -ENODEV;
+ goto err_addr_to_resource;
+ }
+
+ wiz->scm_regmap = NULL;
+ }
+
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
if (ret) {
dev_err(dev, "Failed to read num-lanes property\n");
@@ -1254,7 +1464,7 @@ static int wiz_probe(struct platform_device *pdev)
ret = wiz_get_lane_phy_types(dev, wiz);
if (ret)
- return ret;
+ goto err_addr_to_resource;
wiz->dev = dev;
wiz->regmap = regmap;
@@ -1271,6 +1481,10 @@ static int wiz_probe(struct platform_device *pdev)
goto err_addr_to_resource;
}
+ /* Enable supplemental Control override if available */
+ if (wiz->scm_regmap)
+ regmap_field_write(wiz->sup_legacy_clk_override, 1);
+
phy_reset_dev = &wiz->wiz_phy_reset_dev;
phy_reset_dev->dev = dev;
phy_reset_dev->ops = &wiz_phy_reset_ops,
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1cf74b0c42e5..d768dcf75cf1 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -292,7 +292,7 @@ config PINCTRL_MCP23S08
corresponding interrupt-controller.
config PINCTRL_MICROCHIP_SGPIO
- bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
+ tristate "Pinctrl driver for Microsemi/Microchip Serial GPIO"
depends on OF
depends on HAS_IOMEM
select GPIOLIB
@@ -310,6 +310,9 @@ config PINCTRL_MICROCHIP_SGPIO
connect control signals from SFP modules and to act as an
LED controller.
+ If compiled as a module, the module name will be
+ pinctrl-microchip-sgpio.
+
config PINCTRL_OCELOT
tristate "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
depends on OF
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 8f4d89806fcb..35b51ce4298e 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -31,13 +31,13 @@ config PINCTRL_BCM2835
config PINCTRL_BCM4908
tristate "Broadcom BCM4908 pinmux driver"
- depends on OF && (ARCH_BCM4908 || COMPILE_TEST)
+ depends on OF && (ARCH_BCMBCA || COMPILE_TEST)
select PINMUX
select PINCONF
select GENERIC_PINCONF
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
- default ARCH_BCM4908
+ default ARCH_BCMBCA
help
Driver for BCM4908 family SoCs with integrated pin controller.
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 6f55bf7d5e05..2b4167a09b3b 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
+#include <linux/mfd/ocelot.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/pinctrl/pinmux.h>
@@ -904,7 +905,6 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
struct reset_control *reset;
struct sgpio_priv *priv;
struct clk *clk;
- u32 __iomem *regs;
u32 val;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -937,11 +937,7 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
-
- priv->regs = devm_regmap_init_mmio(dev, regs, &regmap_config);
+ priv->regs = ocelot_regmap_from_resource(pdev, 0, &regmap_config);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
@@ -999,6 +995,7 @@ static const struct of_device_id microchip_sgpio_gpio_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, microchip_sgpio_gpio_of_match);
static struct platform_driver microchip_sgpio_pinctrl_driver = {
.driver = {
@@ -1008,4 +1005,7 @@ static struct platform_driver microchip_sgpio_pinctrl_driver = {
},
.probe = microchip_sgpio_probe,
};
-builtin_platform_driver(microchip_sgpio_pinctrl_driver);
+module_platform_driver(microchip_sgpio_pinctrl_driver);
+
+MODULE_DESCRIPTION("Microchip SGPIO Pinctrl Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index c5fd154990c8..83464e0bf4e6 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -10,6 +10,7 @@
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mfd/ocelot.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -331,6 +332,7 @@ struct ocelot_pinctrl {
const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
+ struct workqueue_struct *wq;
};
struct ocelot_match_data {
@@ -338,6 +340,11 @@ struct ocelot_match_data {
struct ocelot_pincfg_data pincfg_data;
};
+struct ocelot_irq_work {
+ struct work_struct irq_work;
+ struct irq_desc *irq_desc;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1813,6 +1820,75 @@ static void ocelot_irq_mask(struct irq_data *data)
gpiochip_disable_irq(chip, gpio);
}
+static void ocelot_irq_work(struct work_struct *work)
+{
+ struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+ struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+ struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+ struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ local_irq_disable();
+ chained_irq_enter(parent_chip, w->irq_desc);
+ generic_handle_domain_irq(chip->irq.domain, gpio);
+ chained_irq_exit(parent_chip, w->irq_desc);
+ local_irq_enable();
+
+ kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ struct irq_desc *desc = irq_data_to_desc(data);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bit = BIT(gpio % 32);
+ bool ack = false, active = false;
+ u8 trigger_level;
+ int val;
+
+ trigger_level = irqd_get_trigger_type(data);
+
+ /* Check if the interrupt line is still active. */
+ regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+ if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+ (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+ active = true;
+
+ /*
+ * Check if the interrupt controller has seen any changes in the
+ * interrupt line.
+ */
+ regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+ if (val & bit)
+ ack = true;
+
+ /* Enable the interrupt now */
+ gpiochip_enable_irq(chip, gpio);
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+ bit, bit);
+
+ /*
+ * In case the interrupt line is still active and the interrupt
+ * controller has not seen any changes in the interrupt line, then it
+ * means that there happen another interrupt while the line was active.
+ * So we missed that one, so we need to kick the interrupt again
+ * handler.
+ */
+ if (active && !ack) {
+ struct ocelot_irq_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->irq_desc = desc;
+ INIT_WORK(&work->irq_work, ocelot_irq_work);
+ queue_work(info->wq, &work->irq_work);
+ }
+}
+
static void ocelot_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1912,12 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
- .irq_eoi = ocelot_irq_ack,
- .irq_unmask = ocelot_irq_unmask,
- .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
- IRQCHIP_IMMUTABLE,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask_level,
+ .flags = IRQCHIP_IMMUTABLE,
.irq_set_type = ocelot_irq_set_type,
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
@@ -1859,14 +1934,9 @@ static struct irq_chip ocelot_irqchip = {
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
{
- type &= IRQ_TYPE_SENSE_MASK;
-
- if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
- handle_fasteoi_irq, NULL);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+ handle_level_irq, NULL);
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
handle_edge_irq, NULL);
@@ -1975,7 +2045,6 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
struct ocelot_pinctrl *info;
struct reset_control *reset;
struct regmap *pincfg;
- void __iomem *base;
int ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info->desc)
return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+ if (!info->wq)
+ return -ENOMEM;
+
info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2004,21 +2077,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
"Failed to get reset\n");
reset_control_reset(reset);
- base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
- if (IS_ERR(base))
- return PTR_ERR(base);
-
info->stride = 1 + (info->desc->npins - 1) / 32;
regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4;
- info->map = devm_regmap_init_mmio(dev, base, &regmap_config);
- if (IS_ERR(info->map)) {
- dev_err(dev, "Failed to create regmap\n");
- return PTR_ERR(info->map);
- }
- dev_set_drvdata(dev, info->map);
+ info->map = ocelot_regmap_from_resource(pdev, 0, &regmap_config);
+ if (IS_ERR(info->map))
+ return dev_err_probe(dev, PTR_ERR(info->map),
+ "Failed to create regmap\n");
+ dev_set_drvdata(dev, info);
info->dev = dev;
/* Pinconf registers */
@@ -2043,6 +2110,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+ struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+ destroy_workqueue(info->wq);
+
+ return 0;
+}
+
static struct platform_driver ocelot_pinctrl_driver = {
.driver = {
.name = "pinctrl-ocelot",
@@ -2050,6 +2126,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
.suppress_bind_attrs = true,
},
.probe = ocelot_pinctrl_probe,
+ .remove = ocelot_pinctrl_remove,
};
module_platform_driver(ocelot_pinctrl_driver);
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 6bec7f143134..704a99d2f93c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
DECLARE_MSM_GPIO_PINS(188);
DECLARE_MSM_GPIO_PINS(189);
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
enum sc8180x_functions {
msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
{ 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
{ 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
{ 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index afc1f5df7545..b82ad135bf2a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
static struct platform_driver a100_r_pinctrl_driver = {
.probe = a100_r_pinctrl_probe,
.driver = {
- .name = "sun50iw10p1-r-pinctrl",
+ .name = "sun50i-a100-r-pinctrl",
.of_match_table = a100_r_pinctrl_match,
},
};
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index c45fb376d653..6b954c5acadb 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -265,6 +265,17 @@ config CHROMEOS_PRIVACY_SCREEN
this should probably always be built into the kernel to avoid or
minimize drm probe deferral.
+config CROS_TYPEC_SWITCH
+ tristate "ChromeOS EC Type-C Switch Control"
+ depends on MFD_CROS_EC_DEV && TYPEC && ACPI
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for configuring the ChromeOS EC Type-C
+ muxes and retimers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_typec_switch.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
# Kunit test cases
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index f7e74a845afc..2950610101f1 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o
obj-$(CONFIG_CROS_EC) += cros_ec.o
obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
+obj-$(CONFIG_CROS_TYPEC_SWITCH) += cros_typec_switch.o
obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 4e14b4d6635d..a2cdbfbaeae6 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -740,6 +740,7 @@ static int __init
chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
const struct chromeos_laptop *src)
{
+ struct i2c_peripheral *i2c_peripherals;
struct i2c_peripheral *i2c_dev;
struct i2c_board_info *info;
int i;
@@ -748,17 +749,15 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
if (!src->num_i2c_peripherals)
return 0;
- cros_laptop->i2c_peripherals = kmemdup(src->i2c_peripherals,
- src->num_i2c_peripherals *
- sizeof(*src->i2c_peripherals),
- GFP_KERNEL);
- if (!cros_laptop->i2c_peripherals)
+ i2c_peripherals = kmemdup(src->i2c_peripherals,
+ src->num_i2c_peripherals *
+ sizeof(*src->i2c_peripherals),
+ GFP_KERNEL);
+ if (!i2c_peripherals)
return -ENOMEM;
- cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
-
- for (i = 0; i < cros_laptop->num_i2c_peripherals; i++) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ for (i = 0; i < src->num_i2c_peripherals; i++) {
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
error = chromeos_laptop_setup_irq(i2c_dev);
@@ -775,16 +774,19 @@ chromeos_laptop_prepare_i2c_peripherals(struct chromeos_laptop *cros_laptop,
}
}
+ cros_laptop->i2c_peripherals = i2c_peripherals;
+ cros_laptop->num_i2c_peripherals = src->num_i2c_peripherals;
+
return 0;
err_out:
while (--i >= 0) {
- i2c_dev = &cros_laptop->i2c_peripherals[i];
+ i2c_dev = &i2c_peripherals[i];
info = &i2c_dev->board_info;
if (!IS_ERR_OR_NULL(info->fwnode))
fwnode_remove_software_node(info->fwnode);
}
- kfree(cros_laptop->i2c_peripherals);
+ kfree(i2c_peripherals);
return error;
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 8aace50d446d..ec733f683f34 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -115,7 +115,7 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
if (ec_dev->host_sleep_v1) {
buf.u.req1.sleep_event = sleep_event;
buf.u.req1.suspend_params.sleep_timeout_ms =
- EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+ ec_dev->suspend_timeout_ms;
buf.msg.outsize = sizeof(buf.u.req1);
if ((sleep_event == HOST_SLEEP_EVENT_S3_RESUME) ||
@@ -188,6 +188,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
ec_dev->max_passthru = 0;
ec_dev->ec = NULL;
ec_dev->pd = NULL;
+ ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
@@ -349,10 +350,16 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
+ bool wake_event;
+
while (ec_dev->mkbp_event_supported &&
- cros_ec_get_next_event(ec_dev, NULL, NULL) > 0)
+ cros_ec_get_next_event(ec_dev, &wake_event, NULL) > 0) {
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
+
+ if (wake_event && device_may_wakeup(ec_dev->dev))
+ pm_wakeup_event(ec_dev->dev, 0);
+ }
}
/**
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index fd33de546aee..0de7c255254e 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -327,6 +327,9 @@ static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
return -EFAULT;
+ if (s_mem.bytes > sizeof(s_mem.buffer))
+ return -EINVAL;
+
num = ec_dev->cmd_readmem(ec_dev, s_mem.offset, s_mem.bytes,
s_mem.buffer);
if (num <= 0)
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 0dbceee87a4b..4e63adf083ea 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -470,6 +470,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debugfs_create_x32("last_resume_result", 0444, debug_info->dir,
&ec->ec_dev->last_resume_result);
+ debugfs_create_u16("suspend_timeout_ms", 0664, debug_info->dir,
+ &ec->ec_dev->suspend_timeout_ms);
+
ec->debug_info = debug_info;
dev_set_drvdata(&pd->dev, ec);
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 9f5b95763173..b6823c654c3f 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -317,13 +317,11 @@ static int cros_ec_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cros_ec_i2c_remove(struct i2c_client *client)
+static void cros_ec_i2c_remove(struct i2c_client *client)
{
struct cros_ec_device *ec_dev = i2c_get_clientdata(client);
cros_ec_unregister(ec_dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 05d2e8765a66..475a6dd72db6 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -773,6 +773,7 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u8 event_type;
u32 host_event;
int ret;
+ u32 ver_mask;
/*
* Default value for wake_event.
@@ -794,6 +795,37 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
return get_keyboard_state_event(ec_dev);
ret = get_next_event(ec_dev);
+ /*
+ * -ENOPROTOOPT is returned when EC returns EC_RES_INVALID_VERSION.
+ * This can occur when EC based device (e.g. Fingerprint MCU) jumps to
+ * the RO image which doesn't support newer version of the command. In
+ * this case we will attempt to update maximum supported version of the
+ * EC_CMD_GET_NEXT_EVENT.
+ */
+ if (ret == -ENOPROTOOPT) {
+ dev_dbg(ec_dev->dev,
+ "GET_NEXT_EVENT returned invalid version error.\n");
+ ret = cros_ec_get_host_command_version_mask(ec_dev,
+ EC_CMD_GET_NEXT_EVENT,
+ &ver_mask);
+ if (ret < 0 || ver_mask == 0)
+ /*
+ * Do not change the MKBP supported version if we can't
+ * obtain supported version correctly. Please note that
+ * calling EC_CMD_GET_NEXT_EVENT returned
+ * EC_RES_INVALID_VERSION which means that the command
+ * is present.
+ */
+ return -ENOPROTOOPT;
+
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+ dev_dbg(ec_dev->dev, "MKBP support version changed to %u\n",
+ ec_dev->mkbp_event_supported - 1);
+
+ /* Try to get next event with new MKBP support version set. */
+ ret = get_next_event(ec_dev);
+ }
+
if (ret <= 0)
return ret;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index de6ee0f926a6..2a7ff14dc37e 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -20,12 +20,14 @@
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
#include <linux/usb/typec_tbt.h>
#include <linux/usb/role.h>
#define DRV_NAME "cros-ec-typec"
-#define DP_PORT_VDO (BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D) | DP_CAP_DFP_D)
+#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
+ DP_CAP_DFP_D)
/* Supported alt modes. */
enum {
@@ -55,6 +57,7 @@ struct cros_typec_port {
struct usb_pd_identity c_identity;
struct typec_switch *ori_sw;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
struct usb_role_switch *role_sw;
/* Variables keeping track of switch state. */
@@ -70,6 +73,11 @@ struct cros_typec_port {
struct ec_response_typec_discovery *disc_data;
struct list_head partner_mode_list;
struct list_head plug_mode_list;
+
+ /* PDO-related structs */
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_src_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -143,6 +151,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
goto mux_err;
}
+ port->retimer = fwnode_typec_retimer_get(fwnode);
+ if (IS_ERR(port->retimer)) {
+ dev_dbg(dev, "Retimer handle not found.\n");
+ goto retimer_sw_err;
+ }
+
port->ori_sw = fwnode_typec_switch_get(fwnode);
if (IS_ERR(port->ori_sw)) {
dev_dbg(dev, "Orientation switch handle not found.\n");
@@ -158,12 +172,12 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
return 0;
role_sw_err:
- usb_role_switch_put(port->role_sw);
-ori_sw_err:
typec_switch_put(port->ori_sw);
-mux_err:
+ori_sw_err:
+ typec_retimer_put(port->retimer);
+retimer_sw_err:
typec_mux_put(port->mux);
-
+mux_err:
return -ENODEV;
}
@@ -206,6 +220,21 @@ static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int po
}
}
+/*
+ * Map the Type-C Mux state to retimer state and call the retimer set function. We need this
+ * because we re-use the Type-C mux state for retimers.
+ */
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_mux_state state)
+{
+ struct typec_retimer_state rstate = {
+ .alt = state.alt,
+ .mode = state.mode,
+ .data = state.data,
+ };
+
+ return typec_retimer_set(retimer, &rstate);
+}
+
static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
{
port->state.alt = NULL;
@@ -214,6 +243,7 @@ static int cros_typec_usb_disconnect_state(struct cros_typec_port *port)
usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE);
typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE);
+ cros_typec_retimer_set(port->retimer, port->state);
return typec_mux_set(port->mux, &port->state);
}
@@ -228,6 +258,14 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
cros_typec_unregister_altmodes(typec, port_num, true);
+ typec_partner_set_usb_power_delivery(port->partner, NULL);
+ usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
+ port->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(port->partner_src_caps);
+ port->partner_src_caps = NULL;
+ usb_power_delivery_unregister(port->partner_pd);
+ port->partner_pd = NULL;
+
cros_typec_usb_disconnect_state(port);
port->mux_flags = USB_PD_MUX_NONE;
@@ -411,9 +449,14 @@ unregister_ports:
static int cros_typec_usb_safe_state(struct cros_typec_port *port)
{
+ int ret;
port->state.mode = TYPEC_STATE_SAFE;
- return typec_mux_set(port->mux, &port->state);
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
}
/*
@@ -510,7 +553,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
port->state.data = &dp_data;
port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode));
- return typec_mux_set(port->mux, &port->state);
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
+
+ return ret;
}
static int cros_typec_enable_usb4(struct cros_typec_data *typec,
@@ -599,7 +646,10 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
} else if (port->mux_flags & USB_PD_MUX_USB_ENABLED) {
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
- ret = typec_mux_set(port->mux, &port->state);
+
+ ret = cros_typec_retimer_set(port->retimer, port->state);
+ if (!ret)
+ ret = typec_mux_set(port->mux, &port->state);
} else {
dev_dbg(typec->dev,
"Unrecognized mode requested, mux flags: %x\n",
@@ -697,7 +747,7 @@ static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_
for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
memset(&desc, 0, sizeof(desc));
desc.svid = sop_disc->svids[i].svid;
- desc.mode = j;
+ desc.mode = j + 1;
desc.vdo = sop_disc->svids[i].mode_vdo[j];
if (is_partner)
@@ -902,6 +952,46 @@ static int cros_typec_send_clear_event(struct cros_typec_data *typec, int port_n
sizeof(req), NULL, 0);
}
+static void cros_typec_register_partner_pdos(struct cros_typec_data *typec,
+ struct ec_response_typec_status *resp, int port_num)
+{
+ struct usb_power_delivery_capabilities_desc caps_desc = {};
+ struct usb_power_delivery_desc desc = {
+ .revision = (le16_to_cpu(resp->sop_revision) & 0xff00) >> 4,
+ };
+ struct cros_typec_port *port = typec->ports[port_num];
+
+ if (!port->partner || port->partner_pd)
+ return;
+
+ /* If no caps are available, don't bother creating a device. */
+ if (!resp->source_cap_count && !resp->sink_cap_count)
+ return;
+
+ port->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(port->partner_pd)) {
+ dev_warn(typec->dev, "Failed to register partner PD device, port: %d\n", port_num);
+ return;
+ }
+
+ typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
+
+ memcpy(caps_desc.pdo, resp->source_cap_pdos, sizeof(u32) * resp->source_cap_count);
+ caps_desc.role = TYPEC_SOURCE;
+ port->partner_src_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_src_caps))
+ dev_warn(typec->dev, "Failed to register source caps, port: %d\n", port_num);
+
+ memset(&caps_desc, 0, sizeof(caps_desc));
+ memcpy(caps_desc.pdo, resp->sink_cap_pdos, sizeof(u32) * resp->sink_cap_count);
+ caps_desc.role = TYPEC_SINK;
+ port->partner_sink_caps = usb_power_delivery_register_capabilities(port->partner_pd,
+ &caps_desc);
+ if (IS_ERR(port->partner_sink_caps))
+ dev_warn(typec->dev, "Failed to register sink caps, port: %d\n", port_num);
+}
+
static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
{
struct ec_response_typec_status resp;
@@ -949,6 +1039,8 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
}
if (resp.sop_connected)
typec_set_pwr_opmode(typec->ports[port_num]->port, TYPEC_PWR_MODE_PD);
+
+ cros_typec_register_partner_pdos(typec, &resp, port_num);
}
if (resp.events & PD_STATUS_EVENT_SOP_PRIME_DISC_DONE &&
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
new file mode 100644
index 000000000000..a26219e97c93
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC
+ *
+ * This driver provides the ability to configure Type-C muxes and retimers which are controlled by
+ * the ChromeOS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+
+/* Handles and other relevant data required for each port's switches. */
+struct cros_typec_port {
+ int port_num;
+ struct typec_mux_dev *mode_switch;
+ struct typec_retimer *retimer;
+ struct cros_typec_switch_data *sdata;
+};
+
+/* Driver-specific data. */
+struct cros_typec_switch_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+};
+
+static int cros_typec_cmd_mux_set(struct cros_typec_switch_data *sdata, int port_num, u8 index,
+ u8 state)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ .mux_params = {
+ .mux_index = index,
+ .mux_flags = state,
+ },
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static int cros_typec_get_mux_state(unsigned long mode, struct typec_altmode *alt)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (mode == TYPEC_STATE_SAFE)
+ ret = USB_PD_MUX_SAFE_MODE;
+ else if (mode == TYPEC_STATE_USB)
+ ret = USB_PD_MUX_USB_ENABLED;
+ else if (alt && alt->svid == USB_TYPEC_DP_SID)
+ ret = USB_PD_MUX_DP_ENABLED;
+
+ return ret;
+}
+
+static int cros_typec_send_clear_event(struct cros_typec_switch_data *sdata, int port_num,
+ u32 events_mask)
+{
+ struct ec_params_typec_control req = {
+ .port = port_num,
+ .command = TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ .clear_events_mask = events_mask,
+ };
+
+ return cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_CONTROL, &req, sizeof(req), NULL, 0);
+}
+
+static bool cros_typec_check_event(struct cros_typec_switch_data *sdata, int port_num, u32 mask)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_ec_cmd(sdata->ec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(sdata->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return false;
+ }
+
+ if (resp.events & mask)
+ return true;
+
+ return false;
+}
+
+/*
+ * The ChromeOS EC treats both mode-switches and retimers as "muxes" for the purposes of the
+ * host command API. This common function configures and verifies the retimer/mode-switch
+ * according to the provided setting.
+ */
+static int cros_typec_configure_mux(struct cros_typec_switch_data *sdata, int port_num, int index,
+ unsigned long mode, struct typec_altmode *alt)
+{
+ unsigned long end;
+ u32 event_mask;
+ u8 mux_state;
+ int ret;
+
+ ret = cros_typec_get_mux_state(mode, alt);
+ if (ret < 0)
+ return ret;
+ mux_state = (u8)ret;
+
+ /* Clear any old mux set done event. */
+ if (index == 0)
+ event_mask = PD_STATUS_EVENT_MUX_0_SET_DONE;
+ else
+ event_mask = PD_STATUS_EVENT_MUX_1_SET_DONE;
+
+ ret = cros_typec_send_clear_event(sdata, port_num, event_mask);
+ if (ret < 0)
+ return ret;
+
+ /* Send the set command. */
+ ret = cros_typec_cmd_mux_set(sdata, port_num, index, mux_state);
+ if (ret < 0)
+ return ret;
+
+ /* Check for the mux set done event. */
+ end = jiffies + msecs_to_jiffies(1000);
+ do {
+ if (cros_typec_check_event(sdata, port_num, event_mask))
+ return 0;
+
+ usleep_range(500, 1000);
+ } while (time_before(jiffies, end));
+
+ dev_err(sdata->dev, "Timed out waiting for mux set done on index: %d, state: %d\n",
+ index, mux_state);
+
+ return -ETIMEDOUT;
+}
+
+static int cros_typec_mode_switch_set(struct typec_mux_dev *mode_switch,
+ struct typec_mux_state *state)
+{
+ struct cros_typec_port *port = typec_mux_get_drvdata(mode_switch);
+
+ /* Mode switches have index 0. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 0, state->mode, state->alt);
+}
+
+static int cros_typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state)
+{
+ struct cros_typec_port *port = typec_retimer_get_drvdata(retimer);
+
+ /* Retimers have index 1. */
+ return cros_typec_configure_mux(port->sdata, port->port_num, 1, state->mode, state->alt);
+}
+
+static void cros_typec_unregister_switches(struct cros_typec_switch_data *sdata)
+{
+ int i;
+
+ for (i = 0; i < EC_USB_PD_MAX_PORTS; i++) {
+ if (!sdata->ports[i])
+ continue;
+ typec_retimer_unregister(sdata->ports[i]->retimer);
+ typec_mux_unregister(sdata->ports[i]->mode_switch);
+ }
+}
+
+static int cros_typec_register_mode_switch(struct cros_typec_port *port,
+ struct fwnode_handle *fwnode)
+{
+ struct typec_mux_desc mode_switch_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_mode_switch_set,
+ };
+
+ port->mode_switch = typec_mux_register(port->sdata->dev, &mode_switch_desc);
+
+ return PTR_ERR_OR_ZERO(port->mode_switch);
+}
+
+static int cros_typec_register_retimer(struct cros_typec_port *port, struct fwnode_handle *fwnode)
+{
+ struct typec_retimer_desc retimer_desc = {
+ .fwnode = fwnode,
+ .drvdata = port,
+ .name = fwnode_get_name(fwnode),
+ .set = cros_typec_retimer_set,
+ };
+
+ port->retimer = typec_retimer_register(port->sdata->dev, &retimer_desc);
+
+ return PTR_ERR_OR_ZERO(port->retimer);
+}
+
+static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
+{
+ struct cros_typec_port *port;
+ struct device *dev = sdata->dev;
+ struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ unsigned long long index;
+ int nports, ret;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No switch devices found.\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(dev, fwnode) {
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ ret = -ENOMEM;
+ goto err_switch;
+ }
+
+ adev = to_acpi_device_node(fwnode);
+ if (!adev) {
+ dev_err(fwnode->dev, "Couldn't get ACPI device handle\n");
+ ret = -ENODEV;
+ goto err_switch;
+ }
+
+ ret = acpi_evaluate_integer(adev->handle, "_ADR", NULL, &index);
+ if (ACPI_FAILURE(ret)) {
+ dev_err(fwnode->dev, "_ADR wasn't evaluated\n");
+ ret = -ENODATA;
+ goto err_switch;
+ }
+
+ if (index >= EC_USB_PD_MAX_PORTS) {
+ dev_err(fwnode->dev, "Invalid port index number: %llu\n", index);
+ ret = -EINVAL;
+ goto err_switch;
+ }
+ port->sdata = sdata;
+ port->port_num = index;
+ sdata->ports[index] = port;
+
+ ret = cros_typec_register_retimer(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Retimer switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
+
+ if (!device_property_present(fwnode->dev, "mode-switch"))
+ continue;
+
+ ret = cros_typec_register_mode_switch(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Mode switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Mode switch registered for index %llu\n", index);
+ }
+
+ return 0;
+err_switch:
+ cros_typec_unregister_switches(sdata);
+ return ret;
+}
+
+static int cros_typec_switch_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_typec_switch_data *sdata;
+
+ sdata = devm_kzalloc(dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ sdata->dev = dev;
+ sdata->ec = dev_get_drvdata(pdev->dev.parent);
+
+ platform_set_drvdata(pdev, sdata);
+
+ return cros_typec_register_switches(sdata);
+}
+
+static int cros_typec_switch_remove(struct platform_device *pdev)
+{
+ struct cros_typec_switch_data *sdata = platform_get_drvdata(pdev);
+
+ cros_typec_unregister_switches(sdata);
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_switch_acpi_id[] = {
+ { "GOOG001A", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_switch_acpi_id);
+#endif
+
+static struct platform_driver cros_typec_switch_driver = {
+ .driver = {
+ .name = "cros-typec-switch",
+ .acpi_match_table = ACPI_PTR(cros_typec_switch_acpi_id),
+ },
+ .probe = cros_typec_switch_probe,
+ .remove = cros_typec_switch_remove,
+};
+
+module_platform_driver(cros_typec_switch_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS EC Type-C Switch control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 55834ccb4ac7..8d833836a6d3 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -460,8 +460,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
u32 regval;
int err;
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
if (err)
goto regmap_read_fail;
@@ -474,7 +472,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -491,8 +488,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
* line card which is already has been enabled. Disabling does not affect the disabled line
* card.
*/
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
if (err)
goto regmap_read_fail;
@@ -505,7 +500,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -538,6 +532,15 @@ mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
static void
mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+}
+
+static void
+mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
mutex_lock(&mlxreg_lc->lock);
if (action)
@@ -560,8 +563,9 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
+ mutex_lock(&mlxreg_lc->lock);
if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
- return 0;
+ goto mlxreg_lc_non_initialzed_exit;
switch (kind) {
case MLXREG_HOTPLUG_LC_SYNCED:
@@ -574,7 +578,7 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
- return err;
+ goto mlxreg_lc_power_on_off_fail;
}
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
@@ -588,12 +592,13 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
- return err;
+
+ goto mlxreg_lc_enable_disable_exit;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
- return err;
+ goto mlxreg_lc_create_static_devices_fail;
/* In case line card is already in ready state - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
@@ -620,6 +625,12 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
break;
}
+mlxreg_lc_enable_disable_exit:
+mlxreg_lc_power_on_off_fail:
+mlxreg_lc_create_static_devices_fail:
+mlxreg_lc_non_initialzed_exit:
+ mutex_unlock(&mlxreg_lc->lock);
+
return err;
}
@@ -665,7 +676,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
if (err)
goto mlxreg_lc_create_static_devices_failed;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
}
/* Verify if line card is synchronized. */
@@ -676,7 +687,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
/* Power on line card if necessary. */
if (regval & mlxreg_lc->data->mask) {
mlxreg_lc->state |= MLXREG_LC_SYNCED;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
@@ -684,7 +695,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
}
}
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
return 0;
@@ -814,10 +825,9 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
mutex_init(&mlxreg_lc->lock);
/* Set event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = mlxreg_lc_event_handler;
- data->notifier->handle = mlxreg_lc;
- }
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
@@ -863,7 +873,6 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
- err = PTR_ERR(regmap);
goto regcache_sync_fail;
}
@@ -878,16 +887,14 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err)
goto mlxreg_lc_config_init_fail;
- return err;
+ return 0;
mlxreg_lc_config_init_fail:
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
- if (data->hpdev.client) {
- i2c_unregister_device(data->hpdev.client);
- data->hpdev.client = NULL;
- }
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
@@ -905,6 +912,8 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
+
/*
* Probing and removing are invoked by hotplug events raised upon line card insertion and
* removing. If probing procedure fails all data is cleared. However, hotplug event still
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index 444ec81ba02d..73961a24c849 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -519,7 +519,7 @@ static int mshw0011_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
+ strscpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
bat0 = i2c_acpi_new_device(dev, 1, &board_info);
if (IS_ERR(bat0))
@@ -554,7 +554,7 @@ out_err:
return error;
}
-static int mshw0011_remove(struct i2c_client *client)
+static void mshw0011_remove(struct i2c_client *client)
{
struct mshw0011_data *cdata = i2c_get_clientdata(client);
@@ -564,8 +564,6 @@ static int mshw0011_remove(struct i2c_client *client)
kthread_stop(cdata->poll_task);
i2c_unregister_device(cdata->bat0);
-
- return 0;
}
static const struct acpi_device_id mshw0011_acpi_match[] = {
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 44e317970557..50500e562963 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -355,7 +355,8 @@ static u32 san_evt_bat_nf(struct ssam_event_notifier *nf,
INIT_DELAYED_WORK(&work->work, san_evt_bat_workfn);
work->dev = d->dev;
- memcpy(&work->event, event, sizeof(struct ssam_event) + event->length);
+ work->event = *event;
+ memcpy(work->event.data, event->data, event->length);
queue_delayed_work(san_wq, &work->work, delay);
return SSAM_NOTIF_HANDLED;
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d5655f6a4a41..585911020cea 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -86,38 +86,38 @@ static const struct software_node ssam_node_bas_dtx = {
.parent = &ssam_node_root,
};
-/* HID keyboard (TID1). */
-static const struct software_node ssam_node_hid_tid1_keyboard = {
+/* HID keyboard (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_keyboard = {
.name = "ssam:01:15:01:01:00",
.parent = &ssam_node_root,
};
-/* HID pen stash (TID1; pen taken / stashed away evens). */
-static const struct software_node ssam_node_hid_tid1_penstash = {
+/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_sam_penstash = {
.name = "ssam:01:15:01:02:00",
.parent = &ssam_node_root,
};
-/* HID touchpad (TID1). */
-static const struct software_node ssam_node_hid_tid1_touchpad = {
+/* HID touchpad (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_touchpad = {
.name = "ssam:01:15:01:03:00",
.parent = &ssam_node_root,
};
-/* HID device instance 6 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid6 = {
+/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
+static const struct software_node ssam_node_hid_sam_sensors = {
.name = "ssam:01:15:01:06:00",
.parent = &ssam_node_root,
};
-/* HID device instance 7 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid7 = {
+/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
+static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
.name = "ssam:01:15:01:07:00",
.parent = &ssam_node_root,
};
-/* HID system controls (TID1). */
-static const struct software_node ssam_node_hid_tid1_sysctrl = {
+/* HID system controls (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_sysctrl = {
.name = "ssam:01:15:01:08:00",
.parent = &ssam_node_root,
};
@@ -182,8 +182,8 @@ static const struct software_node ssam_node_hid_kip_touchpad = {
.parent = &ssam_node_hub_kip,
};
-/* HID device instance 5 (KIP hub, unknown HID device). */
-static const struct software_node ssam_node_hid_kip_iid5 = {
+/* HID device instance 5 (KIP hub, type-cover firmware update). */
+static const struct software_node ssam_node_hid_kip_fwupd = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_kip,
};
@@ -241,12 +241,12 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
- &ssam_node_hid_tid1_keyboard,
- &ssam_node_hid_tid1_penstash,
- &ssam_node_hid_tid1_touchpad,
- &ssam_node_hid_tid1_iid6,
- &ssam_node_hid_tid1_iid7,
- &ssam_node_hid_tid1_sysctrl,
+ &ssam_node_hid_sam_keyboard,
+ &ssam_node_hid_sam_penstash,
+ &ssam_node_hid_sam_touchpad,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ &ssam_node_hid_sam_sysctrl,
NULL,
};
@@ -278,7 +278,9 @@ static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
- &ssam_node_hid_kip_iid5,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
NULL,
};
@@ -325,6 +327,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
/* Surface Laptop Studio */
{ "MSHW0123", (unsigned long)ssam_node_group_sls },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index f2f98e942cf2..f5312f51de19 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -93,6 +93,7 @@ config PEAQ_WMI
config NVIDIA_WMI_EC_BACKLIGHT
tristate "EC Backlight Driver for Hybrid Graphics Notebook Systems"
+ depends on ACPI_VIDEO
depends on ACPI_WMI
depends on BACKLIGHT_CLASS_DEVICE
help
@@ -790,6 +791,7 @@ config SAMSUNG_Q10
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on ACPI
+ depends on ACPI_BATTERY
depends on ACPI_WMI
select LEDS_CLASS
select NEW_LEDS
@@ -797,6 +799,7 @@ config ACPI_TOSHIBA
depends on INPUT
depends on SERIO_I8042 || SERIO_I8042 = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on HWMON || HWMON = n
depends on RFKILL || RFKILL = n
depends on IIO
select INPUT_SPARSEKMAP
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e0230ea0cb7e..18224f9a5bc0 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
@@ -643,69 +650,6 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = {
{}
};
-static int __init
-video_set_backlight_video_vendor(const struct dmi_system_id *d)
-{
- interface->capability &= ~ACER_CAP_BRIGHTNESS;
- pr_info("Brightness must be controlled by generic video driver\n");
- return 0;
-}
-
-static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer TravelMate 4750",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Extensa 5235",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer TravelMate 5760",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Aspire 5750",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
- },
- },
- {
- .callback = video_set_backlight_video_vendor,
- .ident = "Acer Aspire 5741",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
- },
- },
- {
- /*
- * Note no video_set_backlight_video_vendor, we must use the
- * acer interface, as there is no native backlight interface.
- */
- .ident = "Acer KAV80",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
- },
- },
- {}
-};
-
/* Find which quirks are needed for a particular vendor/ model pair */
static void __init find_quirks(void)
{
@@ -2477,9 +2421,6 @@ static int __init acer_wmi_init(void)
set_quirks();
- if (dmi_check_system(video_vendor_dmi_table))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
interface->capability &= ~ACER_CAP_BRIGHTNESS;
@@ -2522,7 +2463,7 @@ static int __init acer_wmi_init(void)
goto error_platform_register;
}
- acer_platform_device = platform_device_alloc("acer-wmi", -1);
+ acer_platform_device = platform_device_alloc("acer-wmi", PLATFORM_DEVID_NONE);
if (!acer_platform_device) {
err = -ENOMEM;
goto error_device_alloc;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 3463629f8764..d2c0fc38c201 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -676,7 +676,7 @@ static int __init acerhdf_register_platform(void)
if (err)
return err;
- acerhdf_dev = platform_device_alloc("acerhdf", -1);
+ acerhdf_dev = platform_device_alloc("acerhdf", PLATFORM_DEVID_NONE);
if (!acerhdf_dev) {
err = -ENOMEM;
goto err_device_alloc;
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index c0d0a3c5170c..a825af8126c8 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -3,6 +3,8 @@
# AMD x86 Platform Specific Drivers
#
+source "drivers/platform/x86/amd/pmf/Kconfig"
+
config AMD_PMC
tristate "AMD SoC PMC driver"
depends on ACPI && PCI && RTC_CLASS
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index a03fbb08e808..2c229198e24c 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -8,3 +8,4 @@ amd-pmc-y := pmc.o
obj-$(CONFIG_AMD_PMC) += amd-pmc.o
amd_hsmp-y := hsmp.o
obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
+obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c
index a0c54b838c11..521c6a229362 100644
--- a/drivers/platform/x86/amd/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp.c
@@ -392,7 +392,7 @@ static int __init hsmp_plt_init(void)
if (ret)
return ret;
- amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, -1);
+ amd_hsmp_platdev = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!amd_hsmp_platdev) {
ret = -ENOMEM;
goto drv_unregister;
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 700eb19e8450..ce859b300712 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -39,7 +39,9 @@
#define AMD_PMC_STB_INDEX_ADDRESS 0xF8
#define AMD_PMC_STB_INDEX_DATA 0xFC
#define AMD_PMC_STB_PMI_0 0x03E30600
-#define AMD_PMC_STB_PREDEF 0xC6000001
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
/* STB S2D(Spill to DRAM) has different message port offset */
#define STB_SPILL_TO_DRAM 0xBE
@@ -151,9 +153,7 @@ struct amd_pmc_dev {
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
-#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
};
static bool enable_stb;
@@ -369,7 +369,64 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
}
#endif
-#ifdef CONFIG_DEBUG_FS
+static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+{
+ int rc;
+ u32 val;
+
+ rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ if (rc)
+ return rc;
+
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
+ dev->minor = (val >> 8) & GENMASK(7, 0);
+ dev->rev = (val >> 0) & GENMASK(7, 0);
+
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
+
+ return 0;
+}
+
+static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u.%u.%u\n", dev->major, dev->minor, dev->rev);
+}
+
+static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmc_dev *dev = dev_get_drvdata(d);
+
+ if (!dev->major) {
+ int rc = amd_pmc_get_smu_version(dev);
+
+ if (rc)
+ return rc;
+ }
+ return sysfs_emit(buf, "%u\n", dev->smu_program);
+}
+
+static DEVICE_ATTR_RO(smu_fw_version);
+static DEVICE_ATTR_RO(smu_program);
+
+static struct attribute *pmc_attrs[] = {
+ &dev_attr_smu_fw_version.attr,
+ &dev_attr_smu_program.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(pmc);
+
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
@@ -435,26 +492,6 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
-static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
-{
- int rc;
- u32 val;
-
- rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
- if (rc)
- return rc;
-
- dev->smu_program = (val >> 24) & GENMASK(7, 0);
- dev->major = (val >> 16) & GENMASK(7, 0);
- dev->minor = (val >> 8) & GENMASK(7, 0);
- dev->rev = (val >> 0) & GENMASK(7, 0);
-
- dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
- dev->smu_program, dev->major, dev->minor, dev->rev);
-
- return 0;
-}
-
static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
{
struct amd_pmc_dev *dev = s->private;
@@ -504,15 +541,6 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&amd_pmc_stb_debugfs_fops);
}
}
-#else
-static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
-{
-}
-
-static inline void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
{
@@ -691,8 +719,6 @@ static void amd_pmc_s2idle_prepare(void)
}
}
- /* Dump the IdleMask before we send hint to SMU */
- amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc) {
@@ -700,11 +726,22 @@ static void amd_pmc_s2idle_prepare(void)
return;
}
- if (enable_stb) {
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
- if (rc)
- dev_err(pdev->dev, "error writing to STB: %d\n", rc);
- }
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_PREPARE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+}
+
+static void amd_pmc_s2idle_check(void)
+{
+ struct amd_pmc_dev *pdev = &pmc;
+ int rc;
+
+ /* Dump the IdleMask before we add to the STB */
+ amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
+
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_CHECK);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
}
static void amd_pmc_s2idle_restore(void)
@@ -721,15 +758,9 @@ static void amd_pmc_s2idle_restore(void)
/* Let SMU know that we are looking for stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
- /* Dump the IdleMask to see the blockers */
- amd_pmc_idlemask_read(pdev, pdev->dev, NULL);
-
- /* Write data incremented by 1 to distinguish in stb_read */
- if (enable_stb) {
- rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
- if (rc)
- dev_err(pdev->dev, "error writing to STB: %d\n", rc);
- }
+ rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ if (rc)
+ dev_err(pdev->dev, "error writing to STB: %d\n", rc);
/* Notify on failed entry */
amd_pmc_validate_deepest(pdev);
@@ -737,6 +768,7 @@ static void amd_pmc_s2idle_restore(void)
static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
+ .check = amd_pmc_s2idle_check,
.restore = amd_pmc_s2idle_restore,
};
#endif
@@ -935,6 +967,7 @@ static struct platform_driver amd_pmc_driver = {
.driver = {
.name = "amd_pmc",
.acpi_match_table = amd_pmc_acpi_ids,
+ .dev_groups = pmc_groups,
},
.probe = amd_pmc_probe,
.remove = amd_pmc_remove,
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
new file mode 100644
index 000000000000..c375498c4071
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMF Driver
+#
+
+config AMD_PMF
+ tristate "AMD Platform Management Framework"
+ depends on ACPI && PCI
+ select ACPI_PLATFORM_PROFILE
+ help
+ This driver provides support for the AMD Platform Management Framework.
+ The goal is to enhance end user experience by making AMD PCs smarter,
+ quiter, power efficient by adapting to user behavior and environment.
+
+ To compile this driver as a module, choose M here: the module will
+ be called amd_pmf.
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
new file mode 100644
index 000000000000..fdededf54392
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmf
+# AMD Platform Management Framework
+#
+
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-objs := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
new file mode 100644
index 000000000000..081e84e116e7
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include "pmf.h"
+
+#define APMF_CQL_NOTIFICATION 2
+#define APMF_AMT_NOTIFICATION 3
+
+static union acpi_object *apmf_if_call(struct amd_pmf_dev *pdev, int fn, struct acpi_buffer *param)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_handle ahandle = ACPI_HANDLE(pdev->dev);
+ struct acpi_object_list apmf_if_arg_list;
+ union acpi_object apmf_if_args[2];
+ acpi_status status;
+
+ apmf_if_arg_list.count = 2;
+ apmf_if_arg_list.pointer = &apmf_if_args[0];
+
+ apmf_if_args[0].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[0].integer.value = fn;
+
+ if (param) {
+ apmf_if_args[1].type = ACPI_TYPE_BUFFER;
+ apmf_if_args[1].buffer.length = param->length;
+ apmf_if_args[1].buffer.pointer = param->pointer;
+ } else {
+ apmf_if_args[1].type = ACPI_TYPE_INTEGER;
+ apmf_if_args[1].integer.value = 0;
+ }
+
+ status = acpi_evaluate_object(ahandle, "APMF", &apmf_if_arg_list, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pdev->dev, "APMF method:%d call failed\n", fn);
+ kfree(buffer.pointer);
+ return NULL;
+ }
+
+ return buffer.pointer;
+}
+
+static int apmf_if_call_store_buffer(struct amd_pmf_dev *pdev, int fn, void *dest, size_t out_sz)
+{
+ union acpi_object *info;
+ size_t size;
+ int err = 0;
+
+ info = apmf_if_call(pdev, fn, NULL);
+ if (!info)
+ return -EIO;
+
+ if (info->type != ACPI_TYPE_BUFFER) {
+ dev_err(pdev->dev, "object is not a buffer\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (info->buffer.length < 2) {
+ dev_err(pdev->dev, "buffer too small\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ size = *(u16 *)info->buffer.pointer;
+ if (info->buffer.length < size) {
+ dev_err(pdev->dev, "buffer smaller then headersize %u < %zu\n",
+ info->buffer.length, size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (size < out_sz) {
+ dev_err(pdev->dev, "buffer too small %zu\n", size);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(dest, info->buffer.pointer, out_sz);
+
+out:
+ kfree(info);
+ return err;
+}
+
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
+{
+ /* If bit-n is set, that indicates function n+1 is supported */
+ return !!(pdev->supported_func & BIT(index - 1));
+}
+
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *data)
+{
+ if (!is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ return -EINVAL;
+
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR,
+ data, sizeof(*data));
+}
+
+static void apmf_sbios_heartbeat_notify(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, heart_beat.work);
+ union acpi_object *info;
+
+ dev_dbg(dev->dev, "Sending heartbeat to SBIOS\n");
+ info = apmf_if_call(dev, APMF_FUNC_SBIOS_HEARTBEAT, NULL);
+ if (!info)
+ goto out;
+
+ schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+
+out:
+ kfree(info);
+}
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx)
+{
+ union acpi_object *info;
+ struct apmf_fan_idx args;
+ struct acpi_buffer params;
+ int err = 0;
+
+ args.size = sizeof(args);
+ args.fan_ctl_mode = manual;
+ args.fan_ctl_idx = idx;
+
+ params.length = sizeof(args);
+ params.pointer = (void *)&args;
+
+ info = apmf_if_call(pdev, APMF_FUNC_SET_FAN_IDX, &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(info);
+ return err;
+}
+
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_AUTO_MODE, data, sizeof(*data));
+}
+
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
+ req, sizeof(*req));
+}
+
+static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ struct apmf_sbios_req req;
+ int ret;
+
+ mutex_lock(&pmf_dev->update_mutex);
+ ret = apmf_get_sbios_requests(pmf_dev, &req);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get SBIOS requests:%d\n", ret);
+ goto out;
+ }
+
+ if (req.pending_req & BIT(APMF_AMT_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "AMT is supported and notifications %s\n",
+ req.amt_event ? "Enabled" : "Disabled");
+ pmf_dev->amt_enabled = !!req.amt_event;
+
+ if (pmf_dev->amt_enabled)
+ amd_pmf_handle_amt(pmf_dev);
+ else
+ amd_pmf_reset_amt(pmf_dev);
+ }
+
+ if (req.pending_req & BIT(APMF_CQL_NOTIFICATION)) {
+ dev_dbg(pmf_dev->dev, "CQL is supported and notifications %s\n",
+ req.cql_event ? "Enabled" : "Disabled");
+
+ /* update the target mode information */
+ if (pmf_dev->amt_enabled)
+ amd_pmf_update_2_cql(pmf_dev, req.cql_event);
+ }
+out:
+ mutex_unlock(&pmf_dev->update_mutex);
+}
+
+static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
+{
+ struct apmf_verify_interface output;
+ int err;
+
+ err = apmf_if_call_store_buffer(pdev, APMF_FUNC_VERIFY_INTERFACE, &output, sizeof(output));
+ if (err)
+ return err;
+
+ pdev->supported_func = output.supported_functions;
+ dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x\n",
+ output.supported_functions, output.notification_mask);
+
+ return 0;
+}
+
+static int apmf_get_system_params(struct amd_pmf_dev *dev)
+{
+ struct apmf_system_params params;
+ int err;
+
+ if (!is_apmf_func_supported(dev, APMF_FUNC_GET_SYS_PARAMS))
+ return -EINVAL;
+
+ err = apmf_if_call_store_buffer(dev, APMF_FUNC_GET_SYS_PARAMS, &params, sizeof(params));
+ if (err)
+ return err;
+
+ dev_dbg(dev->dev, "system params mask:0x%x flags:0x%x cmd_code:0x%x heartbeat:%d\n",
+ params.valid_mask,
+ params.flags,
+ params.command_code,
+ params.heartbeat_int);
+ params.flags = params.flags & params.valid_mask;
+ dev->hb_interval = params.heartbeat_int;
+
+ return 0;
+}
+
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_AC, data, sizeof(*data));
+}
+
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
+}
+
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+ acpi_status status;
+
+ /* Install the APMF Notify handler */
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS)) {
+ status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handler, pmf_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(pmf_dev->dev, "failed to install notify handler\n");
+ return -ENODEV;
+ }
+
+ /* Call the handler once manually to catch up with possibly missed notifies. */
+ apmf_event_handler(ahandle, 0, pmf_dev);
+ }
+
+ return 0;
+}
+
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
+{
+ acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
+
+ if (pmf_dev->hb_interval)
+ cancel_delayed_work_sync(&pmf_dev->heart_beat);
+
+ if (is_apmf_func_supported(pmf_dev, APMF_FUNC_AUTO_MODE) &&
+ is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
+}
+
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
+{
+ int ret;
+
+ ret = apmf_if_verify_interface(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF verify interface failed :%d\n", ret);
+ goto out;
+ }
+
+ ret = apmf_get_system_params(pmf_dev);
+ if (ret) {
+ dev_err(pmf_dev->dev, "APMF apmf_get_system_params failed :%d\n", ret);
+ goto out;
+ }
+
+ if (pmf_dev->hb_interval) {
+ /* send heartbeats only if the interval is not zero */
+ INIT_DELAYED_WORK(&pmf_dev->heart_beat, apmf_sbios_heartbeat_notify);
+ schedule_delayed_work(&pmf_dev->heart_beat, 0);
+ }
+
+out:
+ return ret;
+}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
new file mode 100644
index 000000000000..644af42e07cf
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct auto_mode_mode_config config_store;
+static const char *state_as_str(unsigned int state);
+
+static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
+ struct auto_mode_mode_config *table)
+{
+ struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2], NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev, config_store.mode_set[idx].fan_control.manual,
+ config_store.mode_set[idx].fan_control.fan_id);
+}
+
+static int amd_pmf_get_moving_avg(struct amd_pmf_dev *pdev, int socket_power)
+{
+ int i, total = 0;
+
+ if (pdev->socket_power_history_idx == -1) {
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ pdev->socket_power_history[i] = socket_power;
+ }
+
+ pdev->socket_power_history_idx = (pdev->socket_power_history_idx + 1) % AVG_SAMPLE_SIZE;
+ pdev->socket_power_history[pdev->socket_power_history_idx] = socket_power;
+
+ for (i = 0; i < AVG_SAMPLE_SIZE; i++)
+ total += pdev->socket_power_history[i];
+
+ return total / AVG_SAMPLE_SIZE;
+}
+
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms)
+{
+ int avg_power = 0;
+ bool update = false;
+ int i, j;
+
+ /* Get the average moving average computed by auto mode algorithm */
+ avg_power = amd_pmf_get_moving_avg(dev, socket_power);
+
+ for (i = 0; i < AUTO_TRANSITION_MAX; i++) {
+ if ((config_store.transition[i].shifting_up && avg_power >=
+ config_store.transition[i].power_threshold) ||
+ (!config_store.transition[i].shifting_up && avg_power <=
+ config_store.transition[i].power_threshold)) {
+ if (config_store.transition[i].timer <
+ config_store.transition[i].time_constant)
+ config_store.transition[i].timer += time_elapsed_ms;
+ } else {
+ config_store.transition[i].timer = 0;
+ }
+
+ if (config_store.transition[i].timer >=
+ config_store.transition[i].time_constant &&
+ !config_store.transition[i].applied) {
+ config_store.transition[i].applied = true;
+ update = true;
+ } else if (config_store.transition[i].timer <=
+ config_store.transition[i].time_constant &&
+ config_store.transition[i].applied) {
+ config_store.transition[i].applied = false;
+ update = true;
+ }
+ }
+
+ dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power,
+ state_as_str(config_store.current_mode));
+
+ if (update) {
+ for (j = 0; j < AUTO_TRANSITION_MAX; j++) {
+ /* Apply the mode with highest priority indentified */
+ if (config_store.transition[j].applied) {
+ if (config_store.current_mode !=
+ config_store.transition[j].target_mode) {
+ config_store.current_mode =
+ config_store.transition[j].target_mode;
+ dev_dbg(dev->dev, "[AUTO_MODE] moving to mode:%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ }
+}
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event)
+{
+ int mode = config_store.current_mode;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ is_cql_event ? AUTO_PERFORMANCE_ON_LAP : AUTO_PERFORMANCE;
+
+ if ((mode == AUTO_PERFORMANCE || mode == AUTO_PERFORMANCE_ON_LAP) &&
+ mode != config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode) {
+ mode = config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode;
+ amd_pmf_set_automode(dev, mode, NULL);
+ }
+ dev_dbg(dev->dev, "updated CQL thermals\n");
+}
+
+static void amd_pmf_get_power_threshold(void)
+{
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold =
+ config_store.mode_set[AUTO_BALANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_QUIET].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta;
+
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold =
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor -
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case AUTO_QUIET:
+ return "QUIET";
+ case AUTO_BALANCE:
+ return "BALANCED";
+ case AUTO_PERFORMANCE_ON_LAP:
+ return "ON_LAP";
+ case AUTO_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown Auto Mode State";
+ }
+}
+
+static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev)
+{
+ struct apmf_auto_mode output;
+ struct power_table_control *pwr_ctrl;
+ int i;
+
+ apmf_get_auto_mode_def(dev, &output);
+ /* time constant */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].time_constant =
+ output.balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant =
+ output.balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant =
+ output.quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant =
+ output.perf_to_balanced;
+
+ /* power floor */
+ config_store.mode_set[AUTO_QUIET].power_floor = output.pfloor_quiet;
+ config_store.mode_set[AUTO_BALANCE].power_floor = output.pfloor_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].power_floor = output.pfloor_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_floor = output.pfloor_perf;
+
+ /* Power delta for mode change */
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta =
+ output.pd_balanced_to_quiet;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta =
+ output.pd_balanced_to_perf;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta =
+ output.pd_quiet_to_balanced;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta =
+ output.pd_perf_to_balanced;
+
+ /* Power threshold */
+ amd_pmf_get_power_threshold();
+
+ /* skin temperature limits */
+ pwr_ctrl = &config_store.mode_set[AUTO_QUIET].power_control;
+ pwr_ctrl->spl = output.spl_quiet;
+ pwr_ctrl->sppt = output.sppt_quiet;
+ pwr_ctrl->fppt = output.fppt_quiet;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_quiet;
+ pwr_ctrl->stt_min = output.stt_min_limit_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_quiet;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_quiet;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_BALANCE].power_control;
+ pwr_ctrl->spl = output.spl_balanced;
+ pwr_ctrl->sppt = output.sppt_balanced;
+ pwr_ctrl->fppt = output.fppt_balanced;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_balanced;
+ pwr_ctrl->stt_min = output.stt_min_limit_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_balanced;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_balanced;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE].power_control;
+ pwr_ctrl->spl = output.spl_perf;
+ pwr_ctrl->sppt = output.sppt_perf;
+ pwr_ctrl->fppt = output.fppt_perf;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf;
+
+ pwr_ctrl = &config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].power_control;
+ pwr_ctrl->spl = output.spl_perf_on_lap;
+ pwr_ctrl->sppt = output.sppt_perf_on_lap;
+ pwr_ctrl->fppt = output.fppt_perf_on_lap;
+ pwr_ctrl->sppt_apu_only = output.sppt_apu_only_perf_on_lap;
+ pwr_ctrl->stt_min = output.stt_min_limit_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_APU] = output.stt_apu_perf_on_lap;
+ pwr_ctrl->stt_skin_temp[STT_TEMP_HS2] = output.stt_hs2_perf_on_lap;
+
+ /* Fan ID */
+ config_store.mode_set[AUTO_QUIET].fan_control.fan_id = output.fan_id_quiet;
+ config_store.mode_set[AUTO_BALANCE].fan_control.fan_id = output.fan_id_balanced;
+ config_store.mode_set[AUTO_PERFORMANCE].fan_control.fan_id = output.fan_id_perf;
+ config_store.mode_set[AUTO_PERFORMANCE_ON_LAP].fan_control.fan_id =
+ output.fan_id_perf;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].target_mode = AUTO_QUIET;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].target_mode =
+ AUTO_PERFORMANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].target_mode =
+ AUTO_BALANCE;
+
+ config_store.transition[AUTO_TRANSITION_TO_QUIET].shifting_up = false;
+ config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].shifting_up = true;
+ config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].shifting_up =
+ false;
+
+ for (i = 0 ; i < AUTO_MODE_MAX ; i++) {
+ if (config_store.mode_set[i].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i].fan_control.manual = false;
+ else
+ config_store.mode_set[i].fan_control.manual = true;
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = AUTO_BALANCE;
+ dev->socket_power_history_idx = -1;
+}
+
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
+{
+ /*
+ * OEM BIOS implementation guide says that if the auto mode is enabled
+ * the platform_profile registration shall be done by the OEM driver.
+ * There could be cases where both static slider and auto mode BIOS
+ * functions are enabled, in that case enable static slider updates
+ * only if it advertised as supported.
+ */
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ int mode = amd_pmf_get_pprof_modes(dev);
+
+ if (mode < 0)
+ return mode;
+
+ dev_dbg(dev->dev, "resetting AMT thermals\n");
+ amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ }
+ return 0;
+}
+
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev)
+{
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+}
+
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
+{
+ amd_pmf_load_defaults_auto_mode(dev);
+ /* update the thermal limits for Automode */
+ amd_pmf_set_automode(dev, config_store.current_mode, NULL);
+ amd_pmf_init_metrics_table(dev);
+}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
new file mode 100644
index 000000000000..668c7c0fea83
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/workqueue.h>
+#include "pmf.h"
+
+static struct cnqf_config config_store;
+
+static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
+ struct cnqf_config *table)
+{
+ struct power_table_control *pc;
+
+ pc = &config_store.mode_set[src][idx].power_control;
+
+ amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, pc->stt_skin_temp[STT_TEMP_APU],
+ NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, pc->stt_skin_temp[STT_TEMP_HS2],
+ NULL);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
+ apmf_update_fan_idx(dev,
+ config_store.mode_set[src][idx].fan_control.manual,
+ config_store.mode_set[src][idx].fan_control.fan_id);
+
+ return 0;
+}
+
+static void amd_pmf_update_power_threshold(int src)
+{
+ struct cnqf_mode_settings *ts;
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_QUIET];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_TO_TURBO];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_BALANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_PERFORMANCE];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_QUIET];
+ tp->power_threshold = ts->power_floor;
+
+ tp = &config_store.trans_param[src][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ ts = &config_store.mode_set[src][CNQF_MODE_TURBO];
+ tp->power_threshold = ts->power_floor;
+}
+
+static const char *state_as_str(unsigned int state)
+{
+ switch (state) {
+ case CNQF_MODE_QUIET:
+ return "QUIET";
+ case CNQF_MODE_BALANCE:
+ return "BALANCED";
+ case CNQF_MODE_TURBO:
+ return "TURBO";
+ case CNQF_MODE_PERFORMANCE:
+ return "PERFORMANCE";
+ default:
+ return "Unknown CnQF mode";
+ }
+}
+
+static int amd_pmf_cnqf_get_power_source(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) &&
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return amd_pmf_get_power_source();
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ return POWER_SOURCE_DC;
+ else
+ return POWER_SOURCE_AC;
+}
+
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms)
+{
+ struct cnqf_tran_params *tp;
+ int src, i, j;
+ u32 avg_power = 0;
+
+ src = amd_pmf_cnqf_get_power_source(dev);
+
+ if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ } else {
+ /*
+ * Return from here if the platform_profile is not balanced
+ * so that preference is given to user mode selection, rather
+ * than enforcing CnQF to run all the time (if enabled)
+ */
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CNQF_TRANSITION_MAX; i++) {
+ config_store.trans_param[src][i].timer += time_lapsed_ms;
+ config_store.trans_param[src][i].total_power += socket_power;
+ config_store.trans_param[src][i].count++;
+
+ tp = &config_store.trans_param[src][i];
+ if (tp->timer >= tp->time_constant && tp->count) {
+ avg_power = tp->total_power / tp->count;
+
+ /* Reset the indices */
+ tp->timer = 0;
+ tp->total_power = 0;
+ tp->count = 0;
+
+ if ((tp->shifting_up && avg_power >= tp->power_threshold) ||
+ (!tp->shifting_up && avg_power <= tp->power_threshold)) {
+ tp->priority = true;
+ } else {
+ tp->priority = false;
+ }
+ }
+ }
+
+ dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n",
+ avg_power, socket_power, state_as_str(config_store.current_mode));
+
+ for (j = 0; j < CNQF_TRANSITION_MAX; j++) {
+ /* apply the highest priority */
+ if (config_store.trans_param[src][j].priority) {
+ if (config_store.current_mode !=
+ config_store.trans_param[src][j].target_mode) {
+ config_store.current_mode =
+ config_store.trans_param[src][j].target_mode;
+ dev_dbg(dev->dev, "Moving to Mode :%s\n",
+ state_as_str(config_store.current_mode));
+ amd_pmf_set_cnqf(dev, src,
+ config_store.current_mode, NULL);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+static void amd_pmf_update_trans_data(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_tran_params *tp;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_QUIET];
+ tp->time_constant = out.t_balanced_to_quiet;
+ tp->target_mode = CNQF_MODE_QUIET;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE];
+ tp->time_constant = out.t_balanced_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_QUIET_TO_BALANCE];
+ tp->time_constant = out.t_quiet_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = true;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE];
+ tp->time_constant = out.t_perf_to_balanced;
+ tp->target_mode = CNQF_MODE_BALANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE];
+ tp->time_constant = out.t_turbo_to_perf;
+ tp->target_mode = CNQF_MODE_PERFORMANCE;
+ tp->shifting_up = false;
+
+ tp = &config_store.trans_param[idx][CNQF_TRANSITION_TO_TURBO];
+ tp->time_constant = out.t_perf_to_turbo;
+ tp->target_mode = CNQF_MODE_TURBO;
+ tp->shifting_up = true;
+}
+
+static void amd_pmf_update_mode_set(int idx, struct apmf_dyn_slider_output out)
+{
+ struct cnqf_mode_settings *ms;
+
+ /* Quiet Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_QUIET];
+ ms->power_floor = out.ps[APMF_CNQF_QUIET].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_QUIET].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_QUIET].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_QUIET].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_QUIET].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_QUIET].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_QUIET].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_QUIET].fan_id;
+
+ /* Balance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_BALANCE];
+ ms->power_floor = out.ps[APMF_CNQF_BALANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_BALANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_BALANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_BALANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_BALANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_BALANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_BALANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_BALANCE].fan_id;
+
+ /* Performance Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_PERFORMANCE];
+ ms->power_floor = out.ps[APMF_CNQF_PERFORMANCE].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_PERFORMANCE].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_PERFORMANCE].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_PERFORMANCE].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_PERFORMANCE].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_PERFORMANCE].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_PERFORMANCE].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_PERFORMANCE].fan_id;
+
+ /* Turbo Mode */
+ ms = &config_store.mode_set[idx][CNQF_MODE_TURBO];
+ ms->power_floor = out.ps[APMF_CNQF_TURBO].pfloor;
+ ms->power_control.fppt = out.ps[APMF_CNQF_TURBO].fppt;
+ ms->power_control.sppt = out.ps[APMF_CNQF_TURBO].sppt;
+ ms->power_control.sppt_apu_only = out.ps[APMF_CNQF_TURBO].sppt_apu_only;
+ ms->power_control.spl = out.ps[APMF_CNQF_TURBO].spl;
+ ms->power_control.stt_min = out.ps[APMF_CNQF_TURBO].stt_min_limit;
+ ms->power_control.stt_skin_temp[STT_TEMP_APU] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_APU];
+ ms->power_control.stt_skin_temp[STT_TEMP_HS2] =
+ out.ps[APMF_CNQF_TURBO].stt_skintemp[STT_TEMP_HS2];
+ ms->fan_control.fan_id = out.ps[APMF_CNQF_TURBO].fan_id;
+}
+
+static int amd_pmf_check_flags(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out = {};
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC))
+ apmf_get_dyn_slider_def_ac(dev, &out);
+ else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC))
+ apmf_get_dyn_slider_def_dc(dev, &out);
+
+ return out.flags;
+}
+
+static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev)
+{
+ struct apmf_dyn_slider_output out;
+ int i, j, ret;
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ if (!is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC + i))
+ continue;
+
+ if (i == POWER_SOURCE_AC)
+ ret = apmf_get_dyn_slider_def_ac(dev, &out);
+ else
+ ret = apmf_get_dyn_slider_def_dc(dev, &out);
+ if (ret) {
+ dev_err(dev->dev, "APMF apmf_get_dyn_slider_def_dc failed :%d\n", ret);
+ return ret;
+ }
+
+ amd_pmf_update_mode_set(i, out);
+ amd_pmf_update_trans_data(i, out);
+ amd_pmf_update_power_threshold(i);
+
+ for (j = 0; j < CNQF_MODE_MAX; j++) {
+ if (config_store.mode_set[i][j].fan_control.fan_id == FAN_INDEX_AUTO)
+ config_store.mode_set[i][j].fan_control.manual = false;
+ else
+ config_store.mode_set[i][j].fan_control.manual = true;
+ }
+ }
+
+ /* set to initial default values */
+ config_store.current_mode = CNQF_MODE_BALANCE;
+
+ return 0;
+}
+
+static ssize_t cnqf_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+ int mode, result, src;
+ bool input;
+
+ mode = amd_pmf_get_pprof_modes(pdev);
+ if (mode < 0)
+ return mode;
+
+ result = kstrtobool(buf, &input);
+ if (result)
+ return result;
+
+ src = amd_pmf_cnqf_get_power_source(pdev);
+ pdev->cnqf_enabled = input;
+
+ if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
+ } else {
+ if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ }
+
+ dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+ return count;
+}
+
+static ssize_t cnqf_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off");
+}
+
+static DEVICE_ATTR_RW(cnqf_enable);
+
+static umode_t cnqf_feature_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+ return pdev->cnqf_supported ? attr->mode : 0;
+}
+
+static struct attribute *cnqf_feature_attrs[] = {
+ &dev_attr_cnqf_enable.attr,
+ NULL
+};
+
+const struct attribute_group cnqf_feature_attribute_group = {
+ .is_visible = cnqf_feature_is_visible,
+ .attrs = cnqf_feature_attrs,
+};
+
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev)
+{
+ cancel_delayed_work_sync(&dev->work_buffer);
+}
+
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
+{
+ int ret, src;
+
+ /*
+ * Note the caller of this function has already checked that both
+ * APMF_FUNC_DYN_SLIDER_AC and APMF_FUNC_DYN_SLIDER_DC are supported.
+ */
+
+ ret = amd_pmf_load_defaults_cnqf(dev);
+ if (ret < 0)
+ return ret;
+
+ amd_pmf_init_metrics_table(dev);
+
+ dev->cnqf_supported = true;
+ dev->cnqf_enabled = amd_pmf_check_flags(dev);
+
+ /* update the thermal for CnQF */
+ if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ src = amd_pmf_cnqf_get_power_source(dev);
+ amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
new file mode 100644
index 000000000000..a5f5a4bcff6d
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include "pmf.h"
+
+/* PMF-SMU communication registers */
+#define AMD_PMF_REGISTER_MESSAGE 0xA18
+#define AMD_PMF_REGISTER_RESPONSE 0xA78
+#define AMD_PMF_REGISTER_ARGUMENT 0xA58
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMF_SMU_INDEX_ADDRESS 0xB8
+#define AMD_PMF_SMU_INDEX_DATA 0xBC
+#define AMD_PMF_MAPPING_SIZE 0x01000
+#define AMD_PMF_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMF_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMF_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMF_RESULT_OK 0x01
+#define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMF_RESULT_FAILED 0xFF
+
+/* List of supported CPU ids */
+#define AMD_CPU_ID_RMB 0x14b5
+#define AMD_CPU_ID_PS 0x14e8
+
+#define PMF_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
+/* override Metrics Table sample size time (in ms) */
+static int metrics_table_loop_ms = 1000;
+module_param(metrics_table_loop_ms, int, 0644);
+MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
+
+/* Force load on supported older platforms */
+static bool force_load;
+module_param(force_load, bool, 0444);
+MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+
+static int current_power_limits_show(struct seq_file *seq, void *unused)
+{
+ struct amd_pmf_dev *dev = seq->private;
+ struct amd_pmf_static_slider_granular table;
+ int mode, src = 0;
+
+ mode = amd_pmf_get_pprof_modes(dev);
+ if (mode < 0)
+ return mode;
+
+ src = amd_pmf_get_power_source();
+ amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
+ seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
+ table.prop[src][mode].spl,
+ table.prop[src][mode].fppt,
+ table.prop[src][mode].sppt,
+ table.prop[src][mode].sppt_apu_only,
+ table.prop[src][mode].stt_min,
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
+ table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(current_power_limits);
+
+static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
+{
+ debugfs_remove_recursive(dev->dbgfs_dir);
+}
+
+static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
+{
+ dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
+ debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
+ &current_power_limits_fops);
+}
+
+int amd_pmf_get_power_source(void)
+{
+ if (power_supply_is_system_supplied() > 0)
+ return POWER_SOURCE_AC;
+ else
+ return POWER_SOURCE_DC;
+}
+
+static void amd_pmf_get_metrics(struct work_struct *work)
+{
+ struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
+ ktime_t time_elapsed_ms;
+ int socket_power;
+
+ mutex_lock(&dev->update_mutex);
+ /* Transfer table contents */
+ memset(dev->buf, 0, sizeof(dev->m_table));
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
+
+ time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
+ /* Calculate the avg SoC power consumption */
+ socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
+
+ if (dev->amt_enabled) {
+ /* Apply the Auto Mode transition */
+ amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
+ }
+
+ if (dev->cnqf_enabled) {
+ /* Apply the CnQF transition */
+ amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
+ }
+
+ dev->start_time = ktime_to_ms(ktime_get());
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
+ mutex_unlock(&dev->update_mutex);
+}
+
+static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
+{
+ return ioread32(dev->regbase + reg_offset);
+}
+
+static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
+{
+ iowrite32(val, dev->regbase + reg_offset);
+}
+
+static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
+{
+ u32 value;
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
+
+ value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
+ dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
+}
+
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
+{
+ int rc;
+ u32 val;
+
+ mutex_lock(&dev->lock);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "failed to talk to SMU\n");
+ goto out_unlock;
+ }
+
+ /* Write zero to response register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
+
+ /* Write argument into argument register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
+
+ /* Write message ID to message ID register */
+ amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
+
+ /* Wait until we get a valid response */
+ rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
+ val, val != 0, PMF_MSG_DELAY_MIN_US,
+ PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
+ if (rc) {
+ dev_err(dev->dev, "SMU response timed out\n");
+ goto out_unlock;
+ }
+
+ switch (val) {
+ case AMD_PMF_RESULT_OK:
+ if (get) {
+ /* PMFW may take longer time to return back the data */
+ usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
+ *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
+ }
+ break;
+ case AMD_PMF_RESULT_CMD_REJECT_BUSY:
+ dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
+ rc = -EBUSY;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_UNKNOWN:
+ dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
+ rc = -EINVAL;
+ goto out_unlock;
+ case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
+ case AMD_PMF_RESULT_FAILED:
+ default:
+ dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
+ rc = -EIO;
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&dev->lock);
+ amd_pmf_dump_registers(dev);
+ return rc;
+}
+
+static const struct pci_device_id pmf_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
+ { }
+};
+
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
+ u64 phys_addr;
+ u32 hi, low;
+
+ INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+
+ /* Get Metrics Table Address */
+ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
+ if (!dev->buf)
+ return -ENOMEM;
+
+ phys_addr = virt_to_phys(dev->buf);
+ hi = phys_addr >> 32;
+ low = phys_addr & GENMASK(31, 0);
+
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+
+ /*
+ * Start collecting the metrics data after a small delay
+ * or else, we might end up getting stale values from PMFW.
+ */
+ schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
+
+ return 0;
+}
+
+static void amd_pmf_init_features(struct amd_pmf_dev *dev)
+{
+ int ret;
+
+ /* Enable Static Slider */
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ amd_pmf_init_sps(dev);
+ dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
+ }
+
+ /* Enable Auto Mode */
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_init_auto_mode(dev);
+ dev_dbg(dev->dev, "Auto Mode Init done\n");
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ /* Enable Cool n Quiet Framework (CnQF) */
+ ret = amd_pmf_init_cnqf(dev);
+ if (ret)
+ dev_warn(dev->dev, "CnQF Init failed\n");
+ }
+}
+
+static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
+{
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
+ amd_pmf_deinit_sps(dev);
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ amd_pmf_deinit_auto_mode(dev);
+ } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
+ is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
+ amd_pmf_deinit_cnqf(dev);
+ }
+}
+
+static const struct acpi_device_id amd_pmf_acpi_ids[] = {
+ {"AMDI0100", 0x100},
+ {"AMDI0102", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
+
+static int amd_pmf_probe(struct platform_device *pdev)
+{
+ const struct acpi_device_id *id;
+ struct amd_pmf_dev *dev;
+ struct pci_dev *rdev;
+ u32 base_addr_lo;
+ u32 base_addr_hi;
+ u64 base_addr;
+ u32 val;
+ int err;
+
+ id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ if (id->driver_data == 0x100 && !force_load)
+ return -ENODEV;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
+ pci_dev_put(rdev);
+ return -ENODEV;
+ }
+
+ dev->cpu_id = rdev->device;
+ err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
+ if (err) {
+ dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ if (err) {
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+
+ err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
+ if (err) {
+ dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
+ if (err) {
+ pci_dev_put(rdev);
+ return pcibios_err_to_errno(err);
+ }
+
+ base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
+ pci_dev_put(rdev);
+ base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
+
+ dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
+ AMD_PMF_MAPPING_SIZE);
+ if (!dev->regbase)
+ return -ENOMEM;
+
+ apmf_acpi_init(dev);
+ platform_set_drvdata(pdev, dev);
+ amd_pmf_init_features(dev);
+ apmf_install_handler(dev);
+ amd_pmf_dbgfs_register(dev);
+
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+ dev_info(dev->dev, "registered PMF device successfully\n");
+
+ return 0;
+}
+
+static int amd_pmf_remove(struct platform_device *pdev)
+{
+ struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
+
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
+ amd_pmf_deinit_features(dev);
+ apmf_acpi_deinit(dev);
+ amd_pmf_dbgfs_unregister(dev);
+ kfree(dev->buf);
+ return 0;
+}
+
+static const struct attribute_group *amd_pmf_driver_groups[] = {
+ &cnqf_feature_attribute_group,
+ NULL,
+};
+
+static struct platform_driver amd_pmf_driver = {
+ .driver = {
+ .name = "amd-pmf",
+ .acpi_match_table = amd_pmf_acpi_ids,
+ .dev_groups = amd_pmf_driver_groups,
+ },
+ .probe = amd_pmf_probe,
+ .remove = amd_pmf_remove,
+};
+module_platform_driver(amd_pmf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
new file mode 100644
index 000000000000..84bbe2c6ea61
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -0,0 +1,417 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#ifndef PMF_H
+#define PMF_H
+
+#include <linux/acpi.h>
+#include <linux/platform_profile.h>
+
+/* APMF Functions */
+#define APMF_FUNC_VERIFY_INTERFACE 0
+#define APMF_FUNC_GET_SYS_PARAMS 1
+#define APMF_FUNC_SBIOS_REQUESTS 2
+#define APMF_FUNC_SBIOS_HEARTBEAT 4
+#define APMF_FUNC_AUTO_MODE 5
+#define APMF_FUNC_SET_FAN_IDX 7
+#define APMF_FUNC_STATIC_SLIDER_GRANULAR 9
+#define APMF_FUNC_DYN_SLIDER_AC 11
+#define APMF_FUNC_DYN_SLIDER_DC 12
+
+/* Message Definitions */
+#define SET_SPL 0x03 /* SPL: Sustained Power Limit */
+#define SET_SPPT 0x05 /* SPPT: Slow Package Power Tracking */
+#define SET_FPPT 0x07 /* FPPT: Fast Package Power Tracking */
+#define GET_SPL 0x0B
+#define GET_SPPT 0x0D
+#define GET_FPPT 0x0F
+#define SET_DRAM_ADDR_HIGH 0x14
+#define SET_DRAM_ADDR_LOW 0x15
+#define SET_TRANSFER_TABLE 0x16
+#define SET_STT_MIN_LIMIT 0x18 /* STT: Skin Temperature Tracking */
+#define SET_STT_LIMIT_APU 0x19
+#define SET_STT_LIMIT_HS2 0x1A
+#define SET_SPPT_APU_ONLY 0x1D
+#define GET_SPPT_APU_ONLY 0x1E
+#define GET_STT_MIN_LIMIT 0x1F
+#define GET_STT_LIMIT_APU 0x20
+#define GET_STT_LIMIT_HS2 0x21
+
+/* Fan Index for Auto Mode */
+#define FAN_INDEX_AUTO 0xFFFFFFFF
+
+#define ARG_NONE 0
+#define AVG_SAMPLE_SIZE 3
+
+/* AMD PMF BIOS interfaces */
+struct apmf_verify_interface {
+ u16 size;
+ u16 version;
+ u32 notification_mask;
+ u32 supported_functions;
+} __packed;
+
+struct apmf_system_params {
+ u16 size;
+ u32 valid_mask;
+ u32 flags;
+ u8 command_code;
+ u32 heartbeat_int;
+} __packed;
+
+struct apmf_sbios_req {
+ u16 size;
+ u32 pending_req;
+ u8 rsd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 fppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+} __packed;
+
+struct apmf_fan_idx {
+ u16 size;
+ u8 fan_ctl_mode;
+ u32 fan_ctl_idx;
+} __packed;
+
+struct smu_pmf_metrics {
+ u16 gfxclk_freq; /* in MHz */
+ u16 socclk_freq; /* in MHz */
+ u16 vclk_freq; /* in MHz */
+ u16 dclk_freq; /* in MHz */
+ u16 memclk_freq; /* in MHz */
+ u16 spare;
+ u16 gfx_activity; /* in Centi */
+ u16 uvd_activity; /* in Centi */
+ u16 voltage[2]; /* in mV */
+ u16 currents[2]; /* in mA */
+ u16 power[2];/* in mW */
+ u16 core_freq[8]; /* in MHz */
+ u16 core_power[8]; /* in mW */
+ u16 core_temp[8]; /* in centi-Celsius */
+ u16 l3_freq; /* in MHz */
+ u16 l3_temp; /* in centi-Celsius */
+ u16 gfx_temp; /* in centi-Celsius */
+ u16 soc_temp; /* in centi-Celsius */
+ u16 throttler_status;
+ u16 current_socketpower; /* in mW */
+ u16 stapm_orig_limit; /* in W */
+ u16 stapm_cur_limit; /* in W */
+ u32 apu_power; /* in mW */
+ u32 dgpu_power; /* in mW */
+ u16 vdd_tdc_val; /* in mA */
+ u16 soc_tdc_val; /* in mA */
+ u16 vdd_edc_val; /* in mA */
+ u16 soc_edcv_al; /* in mA */
+ u16 infra_cpu_maxfreq; /* in MHz */
+ u16 infra_gfx_maxfreq; /* in MHz */
+ u16 skin_temp; /* in centi-Celsius */
+ u16 device_state;
+} __packed;
+
+enum amd_stt_skin_temp {
+ STT_TEMP_APU,
+ STT_TEMP_HS2,
+ STT_TEMP_COUNT,
+};
+
+enum amd_slider_op {
+ SLIDER_OP_GET,
+ SLIDER_OP_SET,
+};
+
+enum power_source {
+ POWER_SOURCE_AC,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_MAX,
+};
+
+enum power_modes {
+ POWER_MODE_PERFORMANCE,
+ POWER_MODE_BALANCED_POWER,
+ POWER_MODE_POWER_SAVER,
+ POWER_MODE_MAX,
+};
+
+struct amd_pmf_dev {
+ void __iomem *regbase;
+ void __iomem *smu_virt_addr;
+ void *buf;
+ u32 base_addr;
+ u32 cpu_id;
+ struct device *dev;
+ struct mutex lock; /* protects the PMF interface */
+ u32 supported_func;
+ enum platform_profile_option current_profile;
+ struct platform_profile_handler pprof;
+ struct dentry *dbgfs_dir;
+ int hb_interval; /* SBIOS heartbeat interval */
+ struct delayed_work heart_beat;
+ struct smu_pmf_metrics m_table;
+ struct delayed_work work_buffer;
+ ktime_t start_time;
+ int socket_power_history[AVG_SAMPLE_SIZE];
+ int socket_power_history_idx;
+ bool amt_enabled;
+ struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
+ bool cnqf_enabled;
+ bool cnqf_supported;
+};
+
+struct apmf_sps_prop_granular {
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min;
+ u8 stt_skin_temp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+/* Static Slider */
+struct apmf_static_slider_granular_output {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX * POWER_MODE_MAX];
+} __packed;
+
+struct amd_pmf_static_slider_granular {
+ u16 size;
+ struct apmf_sps_prop_granular prop[POWER_SOURCE_MAX][POWER_MODE_MAX];
+};
+
+struct fan_table_control {
+ bool manual;
+ unsigned long fan_id;
+};
+
+struct power_table_control {
+ u32 spl;
+ u32 sppt;
+ u32 fppt;
+ u32 sppt_apu_only;
+ u32 stt_min;
+ u32 stt_skin_temp[STT_TEMP_COUNT];
+ u32 reserved[16];
+};
+
+/* Auto Mode Layer */
+enum auto_mode_transition_priority {
+ AUTO_TRANSITION_TO_PERFORMANCE, /* Any other mode to Performance Mode */
+ AUTO_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ AUTO_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance Mode to Balance Mode */
+ AUTO_TRANSITION_MAX,
+};
+
+enum auto_mode_mode {
+ AUTO_QUIET,
+ AUTO_BALANCE,
+ AUTO_PERFORMANCE_ON_LAP,
+ AUTO_PERFORMANCE,
+ AUTO_MODE_MAX,
+};
+
+struct auto_mode_trans_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_delta; /* delta power to shift mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > TimeThreshold, it will move to next mode */
+ u32 applied;
+ enum auto_mode_mode target_mode;
+ u32 shifting_up;
+};
+
+struct auto_mode_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct auto_mode_mode_config {
+ struct auto_mode_trans_params transition[AUTO_TRANSITION_MAX];
+ struct auto_mode_mode_settings mode_set[AUTO_MODE_MAX];
+ enum auto_mode_mode current_mode;
+};
+
+struct apmf_auto_mode {
+ u16 size;
+ /* time constant */
+ u32 balanced_to_perf;
+ u32 perf_to_balanced;
+ u32 quiet_to_balanced;
+ u32 balanced_to_quiet;
+ /* power floor */
+ u32 pfloor_perf;
+ u32 pfloor_balanced;
+ u32 pfloor_quiet;
+ /* Power delta for mode change */
+ u32 pd_balanced_to_perf;
+ u32 pd_perf_to_balanced;
+ u32 pd_quiet_to_balanced;
+ u32 pd_balanced_to_quiet;
+ /* skin temperature limits */
+ u8 stt_apu_perf_on_lap; /* CQL ON */
+ u8 stt_hs2_perf_on_lap; /* CQL ON */
+ u8 stt_apu_perf;
+ u8 stt_hs2_perf;
+ u8 stt_apu_balanced;
+ u8 stt_hs2_balanced;
+ u8 stt_apu_quiet;
+ u8 stt_hs2_quiet;
+ u32 stt_min_limit_perf_on_lap; /* CQL ON */
+ u32 stt_min_limit_perf;
+ u32 stt_min_limit_balanced;
+ u32 stt_min_limit_quiet;
+ /* SPL based */
+ u32 fppt_perf_on_lap; /* CQL ON */
+ u32 sppt_perf_on_lap; /* CQL ON */
+ u32 spl_perf_on_lap; /* CQL ON */
+ u32 sppt_apu_only_perf_on_lap; /* CQL ON */
+ u32 fppt_perf;
+ u32 sppt_perf;
+ u32 spl_perf;
+ u32 sppt_apu_only_perf;
+ u32 fppt_balanced;
+ u32 sppt_balanced;
+ u32 spl_balanced;
+ u32 sppt_apu_only_balanced;
+ u32 fppt_quiet;
+ u32 sppt_quiet;
+ u32 spl_quiet;
+ u32 sppt_apu_only_quiet;
+ /* Fan ID */
+ u32 fan_id_perf;
+ u32 fan_id_balanced;
+ u32 fan_id_quiet;
+} __packed;
+
+/* CnQF Layer */
+enum cnqf_trans_priority {
+ CNQF_TRANSITION_TO_TURBO, /* Any other mode to Turbo Mode */
+ CNQF_TRANSITION_FROM_BALANCE_TO_PERFORMANCE, /* quiet/balance to Performance Mode */
+ CNQF_TRANSITION_FROM_QUIET_TO_BALANCE, /* Quiet Mode to Balance Mode */
+ CNQF_TRANSITION_TO_QUIET, /* Any other mode to Quiet Mode */
+ CNQF_TRANSITION_FROM_PERFORMANCE_TO_BALANCE, /* Performance/Turbo to Balance Mode */
+ CNQF_TRANSITION_FROM_TURBO_TO_PERFORMANCE, /* Turbo mode to Performance Mode */
+ CNQF_TRANSITION_MAX,
+};
+
+enum cnqf_mode {
+ CNQF_MODE_QUIET,
+ CNQF_MODE_BALANCE,
+ CNQF_MODE_PERFORMANCE,
+ CNQF_MODE_TURBO,
+ CNQF_MODE_MAX,
+};
+
+enum apmf_cnqf_pos {
+ APMF_CNQF_TURBO,
+ APMF_CNQF_PERFORMANCE,
+ APMF_CNQF_BALANCE,
+ APMF_CNQF_QUIET,
+ APMF_CNQF_MAX,
+};
+
+struct cnqf_mode_settings {
+ struct power_table_control power_control;
+ struct fan_table_control fan_control;
+ u32 power_floor;
+};
+
+struct cnqf_tran_params {
+ u32 time_constant; /* minimum time required to switch to next mode */
+ u32 power_threshold;
+ u32 timer; /* elapsed time. if timer > timethreshold, it will move to next mode */
+ u32 total_power;
+ u32 count;
+ bool priority;
+ bool shifting_up;
+ enum cnqf_mode target_mode;
+};
+
+struct cnqf_config {
+ struct cnqf_tran_params trans_param[POWER_SOURCE_MAX][CNQF_TRANSITION_MAX];
+ struct cnqf_mode_settings mode_set[POWER_SOURCE_MAX][CNQF_MODE_MAX];
+ struct power_table_control defaults;
+ enum cnqf_mode current_mode;
+ u32 power_src;
+ u32 avg_power;
+};
+
+struct apmf_cnqf_power_set {
+ u32 pfloor;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 stt_skintemp[STT_TEMP_COUNT];
+ u32 fan_id;
+} __packed;
+
+struct apmf_dyn_slider_output {
+ u16 size;
+ u16 flags;
+ u32 t_perf_to_turbo;
+ u32 t_balanced_to_perf;
+ u32 t_quiet_to_balanced;
+ u32 t_balanced_to_quiet;
+ u32 t_perf_to_balanced;
+ u32 t_turbo_to_perf;
+ struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
+} __packed;
+
+/* Core Layer */
+int apmf_acpi_init(struct amd_pmf_dev *pmf_dev);
+void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev);
+int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index);
+int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data);
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev);
+int amd_pmf_get_power_source(void);
+int apmf_install_handler(struct amd_pmf_dev *pmf_dev);
+
+/* SPS Layer */
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table);
+int amd_pmf_init_sps(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
+int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
+ struct apmf_static_slider_granular_output *output);
+
+
+int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+
+/* Auto Mode Layer */
+int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
+void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
+void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
+int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+
+void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
+int amd_pmf_reset_amt(struct amd_pmf_dev *dev);
+void amd_pmf_handle_amt(struct amd_pmf_dev *dev);
+
+/* CnQF Layer */
+int apmf_get_dyn_slider_def_ac(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_output *data);
+int amd_pmf_init_cnqf(struct amd_pmf_dev *dev);
+void amd_pmf_deinit_cnqf(struct amd_pmf_dev *dev);
+int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_lapsed_ms);
+extern const struct attribute_group cnqf_feature_attribute_group;
+
+#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
new file mode 100644
index 000000000000..dba7e36962dc
--- /dev/null
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Management Framework (PMF) Driver
+ *
+ * Copyright (c) 2022, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ */
+
+#include "pmf.h"
+
+static struct amd_pmf_static_slider_granular config_store;
+
+static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
+{
+ struct apmf_static_slider_granular_output output;
+ int i, j, idx = 0;
+
+ memset(&config_store, 0, sizeof(config_store));
+ apmf_get_static_slider_granular(dev, &output);
+
+ for (i = 0; i < POWER_SOURCE_MAX; i++) {
+ for (j = 0; j < POWER_MODE_MAX; j++) {
+ config_store.prop[i][j].spl = output.prop[idx].spl;
+ config_store.prop[i][j].sppt = output.prop[idx].sppt;
+ config_store.prop[i][j].sppt_apu_only =
+ output.prop[idx].sppt_apu_only;
+ config_store.prop[i][j].fppt = output.prop[idx].fppt;
+ config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_APU];
+ config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
+ output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
+ config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
+ idx++;
+ }
+ }
+}
+
+void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
+ struct amd_pmf_static_slider_granular *table)
+{
+ int src = amd_pmf_get_power_source();
+
+ if (op == SLIDER_OP_SET) {
+ amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ config_store.prop[src][idx].sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ config_store.prop[src][idx].stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
+ } else if (op == SLIDER_OP_GET) {
+ amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ &table->prop[src][idx].sppt_apu_only);
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ &table->prop[src][idx].stt_min);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
+ }
+}
+
+static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+
+ *profile = pmf->current_profile;
+ return 0;
+}
+
+int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ switch (pmf->current_profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ mode = POWER_MODE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ mode = POWER_MODE_BALANCED_POWER;
+ break;
+ case PLATFORM_PROFILE_LOW_POWER:
+ mode = POWER_MODE_POWER_SAVER;
+ break;
+ default:
+ dev_err(pmf->dev, "Unknown Platform Profile.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return mode;
+}
+
+static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
+ int mode;
+
+ pmf->current_profile = profile;
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+ return 0;
+}
+
+int amd_pmf_init_sps(struct amd_pmf_dev *dev)
+{
+ int err;
+
+ dev->current_profile = PLATFORM_PROFILE_BALANCED;
+ amd_pmf_load_defaults_sps(dev);
+
+ dev->pprof.profile_get = amd_pmf_profile_get;
+ dev->pprof.profile_set = amd_pmf_profile_set;
+
+ /* Setup supported modes */
+ set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
+
+ /* Create platform_profile structure and register */
+ err = platform_profile_register(&dev->pprof);
+ if (err)
+ dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
+ err);
+
+ return err;
+}
+
+void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
+{
+ platform_profile_remove();
+}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
index 493e169c8f61..3e313c4d538d 100644
--- a/drivers/platform/x86/amilo-rfkill.c
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -150,7 +150,8 @@ static int __init amilo_rfkill_init(void)
if (rc)
return rc;
- amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
+ amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME,
+ PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(amilo_rfkill_pdev)) {
rc = PTR_ERR(amilo_rfkill_pdev);
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index ffe98a18440b..ca33df7ea550 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -21,7 +21,6 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
-#include <acpi/video.h>
#include <asm/io.h>
/**
@@ -694,7 +693,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
* backlight control and supports more levels than other options.
* Disable the other backlight choices.
*/
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
apple_bl_unregister();
gmux_data->power_state = VGA_SWITCHEROO_ON;
@@ -804,7 +802,6 @@ static void gmux_remove(struct pnp_dev *pnp)
apple_gmux_data = NULL;
kfree(gmux_data);
- acpi_video_register();
apple_bl_register();
}
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 4d2d32bfbe2a..47b2f8bb6fb5 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1633,7 +1633,7 @@ static int asus_platform_init(struct asus_laptop *asus)
{
int result;
- asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, PLATFORM_DEVID_NONE);
if (!asus->platform_device)
return -ENOMEM;
platform_set_drvdata(asus->platform_device, asus);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 478dd300b9c9..613c45c9fbe3 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(wapf, "WAPF value");
static int tablet_mode_sw = -1;
module_param(tablet_mode_sw, uint, 0444);
-MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip");
+MODULE_PARM_DESC(tablet_mode_sw, "Tablet mode detect: -1:auto 0:disable 1:kbd-dock 2:lid-flip 3:lid-flip-rog");
static struct quirk_entry *quirks;
@@ -79,12 +79,10 @@ static struct quirk_entry quirk_asus_q500a = {
/*
* For those machines that need software to control bt/wifi status
- * and can't adjust brightness through ACPI interface
* and have duplicate events(ACPI and WMI) for display toggle
*/
static struct quirk_entry quirk_asus_x55u = {
.wapf = 4,
- .wmi_backlight_power = true,
.wmi_backlight_set_devstate = true,
.no_display_toggle = true,
};
@@ -99,11 +97,6 @@ static struct quirk_entry quirk_asus_x200ca = {
.wmi_backlight_set_devstate = true,
};
-static struct quirk_entry quirk_asus_ux303ub = {
- .wmi_backlight_native = true,
- .wmi_backlight_set_devstate = true,
-};
-
static struct quirk_entry quirk_asus_x550lb = {
.wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
@@ -115,12 +108,17 @@ static struct quirk_entry quirk_asus_forceals = {
};
static struct quirk_entry quirk_asus_use_kbd_dock_devid = {
- .use_kbd_dock_devid = true,
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
};
static struct quirk_entry quirk_asus_use_lid_flip_devid = {
.wmi_backlight_set_devstate = true,
- .use_lid_flip_devid = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_devid,
+};
+
+static struct quirk_entry quirk_asus_tablet_mode = {
+ .wmi_backlight_set_devstate = true,
+ .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
};
static int dmi_matched(const struct dmi_system_id *dmi)
@@ -147,11 +145,6 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "U32U"),
},
- /*
- * Note this machine has a Brazos APU, and most Brazos Asus
- * machines need quirk_asus_x55u / wmi_backlight_power but
- * here acpi-video seems to work fine for backlight control.
- */
.driver_data = &quirk_asus_wapf4,
},
{
@@ -381,15 +374,6 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. UX303UB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"),
- },
- .driver_data = &quirk_asus_ux303ub,
- },
- {
- .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX330UAK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -471,6 +455,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_use_lid_flip_devid,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG FLOW X13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
+ },
+ .driver_data = &quirk_asus_tablet_mode,
+ },
{},
};
@@ -490,20 +483,8 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
else
wapf = quirks->wapf;
- switch (tablet_mode_sw) {
- case 0:
- quirks->use_kbd_dock_devid = false;
- quirks->use_lid_flip_devid = false;
- break;
- case 1:
- quirks->use_kbd_dock_devid = true;
- quirks->use_lid_flip_devid = false;
- break;
- case 2:
- quirks->use_kbd_dock_devid = false;
- quirks->use_lid_flip_devid = true;
- break;
- }
+ if (tablet_mode_sw != -1)
+ quirks->tablet_switch_mode = tablet_mode_sw;
if (quirks->i8042_filter) {
ret = i8042_install_filter(quirks->i8042_filter);
@@ -575,12 +556,14 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
{ KE_KEY, 0xB5, { KEY_CALC } },
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
+ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index 6fd0c9fea82d..62310e06282b 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -878,14 +878,12 @@ static int tf103c_dock_probe(struct i2c_client *client)
return 0;
}
-static int tf103c_dock_remove(struct i2c_client *client)
+static void tf103c_dock_remove(struct i2c_client *client)
{
struct tf103c_dock_data *dock = i2c_get_clientdata(client);
tf103c_dock_stop_hpd(dock);
tf103c_dock_disable(dock);
-
- return 0;
}
static int __maybe_unused tf103c_dock_suspend(struct device *dev)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 89b604e04d7f..6e8e093f96b3 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -68,9 +68,11 @@ module_param(fnlock_default, bool, 0444);
#define NOTIFY_KBD_FBM 0x99
#define NOTIFY_KBD_TTP 0xae
#define NOTIFY_LID_FLIP 0xfa
+#define NOTIFY_LID_FLIP_ROG 0xbd
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+#define ASUS_GPU_FAN_DESC "gpu_fan"
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06
@@ -107,7 +109,7 @@ module_param(fnlock_default, bool, 0444);
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
-#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
+#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
/* Mask to determine if setting temperature or percentage */
@@ -221,19 +223,25 @@ struct asus_wmi {
struct asus_rfkill gps;
struct asus_rfkill uwb;
+ int tablet_switch_event_code;
+ u32 tablet_switch_dev_id;
+
enum fan_type fan_type;
+ enum fan_type gpu_fan_type;
int fan_pwm_mode;
+ int gpu_fan_pwm_mode;
int agfn_pwm;
bool fan_boost_mode_available;
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
- bool egpu_enable_available; // 0 = enable
- bool egpu_enable;
-
+ bool egpu_enable_available;
bool dgpu_disable_available;
- bool dgpu_disable;
+ bool gpu_mux_mode_available;
+
+ bool kbd_rgb_mode_available;
+ bool kbd_rgb_state_available;
bool throttle_thermal_policy_available;
u8 throttle_thermal_policy_mode;
@@ -249,7 +257,6 @@ struct asus_wmi {
bool battery_rsoc_available;
bool panel_overdrive_available;
- bool panel_overdrive;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
@@ -486,10 +493,28 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id)
}
/* Input **********************************************************************/
+static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code)
+{
+ struct device *dev = &asus->platform_device->dev;
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, dev_id);
+ if (result >= 0) {
+ input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
+ input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
+ asus->tablet_switch_dev_id = dev_id;
+ asus->tablet_switch_event_code = event_code;
+ } else if (result == -ENODEV) {
+ dev_err(dev, "This device has tablet-mode-switch quirk but got ENODEV checking it. This is a bug.");
+ } else {
+ dev_err(dev, "Error checking for tablet-mode-switch: %d\n", result);
+ }
+}
static int asus_wmi_input_init(struct asus_wmi *asus)
{
- int err, result;
+ struct device *dev = &asus->platform_device->dev;
+ int err;
asus->inputdev = input_allocate_device();
if (!asus->inputdev)
@@ -498,35 +523,25 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
asus->inputdev->name = asus->driver->input_name;
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
- asus->inputdev->dev.parent = &asus->platform_device->dev;
+ asus->inputdev->dev.parent = dev;
set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
goto err_free_dev;
- if (asus->driver->quirks->use_kbd_dock_devid) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_KBD_DOCK);
- if (result >= 0) {
- input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
- input_report_switch(asus->inputdev, SW_TABLET_MODE, !result);
- } else if (result != -ENODEV) {
- pr_err("Error checking for keyboard-dock: %d\n", result);
- }
- }
-
- if (asus->driver->quirks->use_lid_flip_devid) {
- result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_LID_FLIP);
- if (result < 0)
- asus->driver->quirks->use_lid_flip_devid = 0;
- if (result >= 0) {
- input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE);
- input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
- } else if (result == -ENODEV) {
- pr_err("This device has lid_flip quirk but got ENODEV checking it. This is a bug.");
- } else {
- pr_err("Error checking for lid-flip: %d\n", result);
- }
+ switch (asus->driver->quirks->tablet_switch_mode) {
+ case asus_wmi_no_tablet_switch:
+ break;
+ case asus_wmi_kbd_dock_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE);
+ break;
+ case asus_wmi_lid_flip_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP, NOTIFY_LID_FLIP);
+ break;
+ case asus_wmi_lid_flip_rog_devid:
+ asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_LID_FLIP_ROG, NOTIFY_LID_FLIP_ROG);
+ break;
}
err = input_register_device(asus->inputdev);
@@ -550,10 +565,14 @@ static void asus_wmi_input_exit(struct asus_wmi *asus)
/* Tablet mode ****************************************************************/
-static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
+static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
{
- int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_LID_FLIP);
+ int result;
+
+ if (!asus->tablet_switch_dev_id)
+ return;
+ result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id);
if (result >= 0) {
input_report_switch(asus->inputdev, SW_TABLET_MODE, result);
input_sync(asus->inputdev);
@@ -561,179 +580,267 @@ static void lid_flip_tablet_mode_get_state(struct asus_wmi *asus)
}
/* dGPU ********************************************************************/
-static int dgpu_disable_check_present(struct asus_wmi *asus)
+static ssize_t dgpu_disable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 result;
- int err;
-
- asus->dgpu_disable_available = false;
-
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_DGPU, &result);
- if (err) {
- if (err == -ENODEV)
- return 0;
- return err;
- }
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->dgpu_disable_available = true;
- asus->dgpu_disable = result & ASUS_WMI_DSTS_STATUS_BIT;
- }
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
+ if (result < 0)
+ return result;
- return 0;
+ return sysfs_emit(buf, "%d\n", result);
}
-static int dgpu_disable_write(struct asus_wmi *asus)
+/*
+ * A user may be required to store the value twice, typcial store first, then
+ * rescan PCI bus to activate power, then store a second time to save correctly.
+ * The reason for this is that an extra code path in the ACPI is enabled when
+ * the device and bus are powered.
+ */
+static ssize_t dgpu_disable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 retval;
- u8 value;
- int err;
+ int result, err;
+ u32 disable;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
- /* Don't rely on type conversion */
- value = asus->dgpu_disable ? 1 : 0;
+ result = kstrtou32(buf, 10, &disable);
+ if (result)
+ return result;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, value, &retval);
+ if (disable > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
if (err) {
pr_warn("Failed to set dgpu disable: %d\n", err);
return err;
}
- if (retval > 1) {
- pr_warn("Failed to set dgpu disable (retval): 0x%x\n", retval);
+ if (result > 1) {
+ pr_warn("Failed to set dgpu disable (result): 0x%x\n", result);
return -EIO;
}
sysfs_notify(&asus->platform_device->dev.kobj, NULL, "dgpu_disable");
- return 0;
+ return count;
}
+static DEVICE_ATTR_RW(dgpu_disable);
-static ssize_t dgpu_disable_show(struct device *dev,
+/* eGPU ********************************************************************/
+static ssize_t egpu_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
- u8 mode = asus->dgpu_disable;
+ int result;
- return sysfs_emit(buf, "%d\n", mode);
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
}
-/*
- * A user may be required to store the value twice, typcial store first, then
- * rescan PCI bus to activate power, then store a second time to save correctly.
- * The reason for this is that an extra code path in the ACPI is enabled when
- * the device and bus are powered.
- */
-static ssize_t dgpu_disable_store(struct device *dev,
+/* The ACPI call to enable the eGPU also disables the internal dGPU */
+static ssize_t egpu_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- bool disable;
- int result;
+ int result, err;
+ u32 enable;
struct asus_wmi *asus = dev_get_drvdata(dev);
- result = kstrtobool(buf, &disable);
- if (result)
- return result;
+ err = kstrtou32(buf, 10, &enable);
+ if (err)
+ return err;
- asus->dgpu_disable = disable;
+ if (enable > 1)
+ return -EINVAL;
- result = dgpu_disable_write(asus);
- if (result)
- return result;
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
+ if (err) {
+ pr_warn("Failed to set egpu disable: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set egpu disable (retval): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
return count;
}
+static DEVICE_ATTR_RW(egpu_enable);
-static DEVICE_ATTR_RW(dgpu_disable);
+/* gpu mux switch *************************************************************/
+static ssize_t gpu_mux_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
-/* eGPU ********************************************************************/
-static int egpu_enable_check_present(struct asus_wmi *asus)
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t gpu_mux_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 result;
- int err;
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, err;
+ u32 optimus;
+
+ err = kstrtou32(buf, 10, &optimus);
+ if (err)
+ return err;
- asus->egpu_enable_available = false;
+ if (optimus > 1)
+ return -EINVAL;
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_EGPU, &result);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
if (err) {
- if (err == -ENODEV)
- return 0;
+ dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
return err;
}
-
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->egpu_enable_available = true;
- asus->egpu_enable = result & ASUS_WMI_DSTS_STATUS_BIT;
+ /* !1 is considered a fail by ASUS */
+ if (result != 1) {
+ dev_warn(dev, "Failed to set GPU MUX mode (result): 0x%x\n", result);
+ return -EIO;
}
- return 0;
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "gpu_mux_mode");
+
+ return count;
}
+static DEVICE_ATTR_RW(gpu_mux_mode);
-static int egpu_enable_write(struct asus_wmi *asus)
+/* TUF Laptop Keyboard RGB Modes **********************************************/
+static ssize_t kbd_rgb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- u32 retval;
- u8 value;
+ u32 cmd, mode, r, g, b, speed;
int err;
- /* Don't rely on type conversion */
- value = asus->egpu_enable ? 1 : 0;
+ if (sscanf(buf, "%d %d %d %d %d %d", &cmd, &mode, &r, &g, &b, &speed) != 6)
+ return -EINVAL;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, value, &retval);
+ cmd = !!cmd;
- if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
- return err;
- }
+ /* These are the known usable modes across all TUF/ROG */
+ if (mode >= 12 || mode == 9)
+ mode = 10;
- if (retval > 1) {
- pr_warn("Failed to set egpu disable (retval): 0x%x\n", retval);
- return -EIO;
+ switch (speed) {
+ case 0:
+ speed = 0xe1;
+ break;
+ case 1:
+ speed = 0xeb;
+ break;
+ case 2:
+ speed = 0xf5;
+ break;
+ default:
+ speed = 0xeb;
}
- sysfs_notify(&asus->platform_device->dev.kobj, NULL, "egpu_enable");
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE,
+ cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL);
+ if (err)
+ return err;
- return 0;
+ return count;
}
+static DEVICE_ATTR_WO(kbd_rgb_mode);
-static ssize_t egpu_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t kbd_rgb_mode_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
{
- struct asus_wmi *asus = dev_get_drvdata(dev);
- bool mode = asus->egpu_enable;
-
- return sysfs_emit(buf, "%d\n", mode);
+ return sysfs_emit(buf, "%s\n", "cmd mode red green blue speed");
}
+static DEVICE_ATTR_RO(kbd_rgb_mode_index);
-/* The ACPI call to enable the eGPU also disables the internal dGPU */
-static ssize_t egpu_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- bool enable;
- int result;
-
- struct asus_wmi *asus = dev_get_drvdata(dev);
+static struct attribute *kbd_rgb_mode_attrs[] = {
+ &dev_attr_kbd_rgb_mode.attr,
+ &dev_attr_kbd_rgb_mode_index.attr,
+ NULL,
+};
- result = kstrtobool(buf, &enable);
- if (result)
- return result;
+static const struct attribute_group kbd_rgb_mode_group = {
+ .attrs = kbd_rgb_mode_attrs,
+};
- asus->egpu_enable = enable;
+/* TUF Laptop Keyboard RGB State **********************************************/
+static ssize_t kbd_rgb_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 flags, cmd, boot, awake, sleep, keyboard;
+ int err;
- result = egpu_enable_write(asus);
- if (result)
- return result;
+ if (sscanf(buf, "%d %d %d %d %d", &cmd, &boot, &awake, &sleep, &keyboard) != 5)
+ return -EINVAL;
- /* Ensure that the kernel status of dgpu is updated */
- result = dgpu_disable_check_present(asus);
- if (result)
- return result;
+ if (cmd)
+ cmd = BIT(2);
+
+ flags = 0;
+ if (boot)
+ flags |= BIT(1);
+ if (awake)
+ flags |= BIT(3);
+ if (sleep)
+ flags |= BIT(5);
+ if (keyboard)
+ flags |= BIT(7);
+
+ /* 0xbd is the required default arg0 for the method. Nothing happens otherwise */
+ err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS,
+ ASUS_WMI_DEVID_TUF_RGB_STATE, 0xbd | cmd << 8 | (flags << 16), 0, NULL);
+ if (err)
+ return err;
return count;
}
+static DEVICE_ATTR_WO(kbd_rgb_state);
-static DEVICE_ATTR_RW(egpu_enable);
+static ssize_t kbd_rgb_state_index_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "cmd boot awake sleep keyboard");
+}
+static DEVICE_ATTR_RO(kbd_rgb_state_index);
+
+static struct attribute *kbd_rgb_state_attrs[] = {
+ &dev_attr_kbd_rgb_state.attr,
+ &dev_attr_kbd_rgb_state_index.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_rgb_state_group = {
+ .attrs = kbd_rgb_state_attrs,
+};
+
+static const struct attribute_group *kbd_rgb_mode_groups[] = {
+ NULL,
+ NULL,
+ NULL,
+};
/* Battery ********************************************************************/
@@ -771,7 +878,7 @@ static ssize_t charge_control_end_threshold_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", charge_end_threshold);
+ return sysfs_emit(buf, "%d\n", charge_end_threshold);
}
static DEVICE_ATTR_RW(charge_control_end_threshold);
@@ -1053,7 +1160,12 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
static int asus_wmi_led_init(struct asus_wmi *asus)
{
- int rv = 0, led_val;
+ int rv = 0, num_rgb_groups = 0, led_val;
+
+ if (asus->kbd_rgb_mode_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group;
+ if (asus->kbd_rgb_state_available)
+ kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group;
asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
if (!asus->led_workqueue)
@@ -1081,6 +1193,9 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
+ if (num_rgb_groups != 0)
+ asus->kbd_led.groups = kbd_rgb_mode_groups;
+
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
if (rv)
@@ -1118,7 +1233,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
- asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
@@ -1555,84 +1670,51 @@ exit:
}
/* Panel Overdrive ************************************************************/
-static int panel_od_check_present(struct asus_wmi *asus)
-{
- u32 result;
- int err;
-
- asus->panel_overdrive_available = false;
-
- err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_PANEL_OD, &result);
- if (err) {
- if (err == -ENODEV)
- return 0;
- return err;
- }
-
- if (result & ASUS_WMI_DSTS_PRESENCE_BIT) {
- asus->panel_overdrive_available = true;
- asus->panel_overdrive = result & ASUS_WMI_DSTS_STATUS_BIT;
- }
-
- return 0;
-}
-
-static int panel_od_write(struct asus_wmi *asus)
-{
- u32 retval;
- u8 value;
- int err;
-
- /* Don't rely on type conversion */
- value = asus->panel_overdrive ? 1 : 0;
-
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, value, &retval);
-
- if (err) {
- pr_warn("Failed to set panel overdrive: %d\n", err);
- return err;
- }
-
- if (retval > 1) {
- pr_warn("Failed to set panel overdrive (retval): 0x%x\n", retval);
- return -EIO;
- }
-
- sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
-
- return 0;
-}
-
static ssize_t panel_od_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
- return sysfs_emit(buf, "%d\n", asus->panel_overdrive);
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_PANEL_OD);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
}
static ssize_t panel_od_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- bool overdrive;
- int result;
+ int result, err;
+ u32 overdrive;
struct asus_wmi *asus = dev_get_drvdata(dev);
- result = kstrtobool(buf, &overdrive);
+ result = kstrtou32(buf, 10, &overdrive);
if (result)
return result;
- asus->panel_overdrive = overdrive;
- result = panel_od_write(asus);
+ if (overdrive > 1)
+ return -EINVAL;
- if (result)
- return result;
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PANEL_OD, overdrive, &result);
+
+ if (err) {
+ pr_warn("Failed to set panel overdrive: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set panel overdrive (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "panel_od");
return count;
}
-
static DEVICE_ATTR_RW(panel_od);
/* Quirks *********************************************************************/
@@ -1782,6 +1864,18 @@ static int asus_fan_set_auto(struct asus_wmi *asus)
return -ENXIO;
}
+ /*
+ * Modern models like the G713 also have GPU fan control (this is not AGFN)
+ */
+ if (asus->gpu_fan_type == FAN_TYPE_SPEC83) {
+ status = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ 0, &retval);
+ if (status)
+ return status;
+
+ if (retval != 1)
+ return -EIO;
+ }
return 0;
}
@@ -1819,7 +1913,7 @@ static ssize_t pwm1_show(struct device *dev,
value = -1;
}
- return sprintf(buf, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t pwm1_store(struct device *dev,
@@ -1879,7 +1973,7 @@ static ssize_t fan1_input_show(struct device *dev,
return -ENXIO;
}
- return sprintf(buf, "%d\n", value < 0 ? -1 : value*100);
+ return sysfs_emit(buf, "%d\n", value < 0 ? -1 : value * 100);
}
static ssize_t pwm1_enable_show(struct device *dev,
@@ -1897,7 +1991,7 @@ static ssize_t pwm1_enable_show(struct device *dev,
* in practice on X532FL at least (the bit is always 0) and there's
* also nothing in the DSDT to indicate that this behaviour exists.
*/
- return sprintf(buf, "%d\n", asus->fan_pwm_mode);
+ return sysfs_emit(buf, "%d\n", asus->fan_pwm_mode);
}
static ssize_t pwm1_enable_store(struct device *dev,
@@ -1965,7 +2059,7 @@ static ssize_t fan1_label_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", ASUS_FAN_DESC);
+ return sysfs_emit(buf, "%s\n", ASUS_FAN_DESC);
}
static ssize_t asus_hwmon_temp1(struct device *dev,
@@ -1984,11 +2078,86 @@ static ssize_t asus_hwmon_temp1(struct device *dev,
deci_kelvin_to_millicelsius(value & 0xFFFF));
}
+/* GPU fan on modern ROG laptops */
+static ssize_t fan2_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+
+ return sysfs_emit(buf, "%d\n", value * 100);
+}
+
+static ssize_t fan2_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", ASUS_GPU_FAN_DESC);
+}
+
+static ssize_t pwm2_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->gpu_fan_pwm_mode);
+}
+
+static ssize_t pwm2_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int state;
+ int value;
+ int ret;
+ u32 retval;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+
+ asus->gpu_fan_pwm_mode = state;
+ return count;
+}
+
/* Fan1 */
static DEVICE_ATTR_RW(pwm1);
static DEVICE_ATTR_RW(pwm1_enable);
static DEVICE_ATTR_RO(fan1_input);
static DEVICE_ATTR_RO(fan1_label);
+/* Fan2 - GPU fan */
+static DEVICE_ATTR_RW(pwm2_enable);
+static DEVICE_ATTR_RO(fan2_input);
+static DEVICE_ATTR_RO(fan2_label);
/* Temperature */
static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
@@ -1996,8 +2165,11 @@ static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
static struct attribute *hwmon_attributes[] = {
&dev_attr_pwm1.attr,
&dev_attr_pwm1_enable.attr,
+ &dev_attr_pwm2_enable.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_label.attr,
+ &dev_attr_fan2_input.attr,
+ &dev_attr_fan2_label.attr,
&dev_attr_temp1_input.attr,
NULL
@@ -2006,7 +2178,7 @@ static struct attribute *hwmon_attributes[] = {
static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev->parent);
u32 value = ASUS_WMI_UNSUPPORTED_METHOD;
@@ -2018,6 +2190,11 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
|| attr == &dev_attr_pwm1_enable.attr) {
if (asus->fan_type == FAN_TYPE_NONE)
return 0;
+ } else if (attr == &dev_attr_fan2_input.attr
+ || attr == &dev_attr_fan2_label.attr
+ || attr == &dev_attr_pwm2_enable.attr) {
+ if (asus->gpu_fan_type == FAN_TYPE_NONE)
+ return 0;
} else if (attr == &dev_attr_temp1_input.attr) {
int err = asus_wmi_get_devstate(asus,
ASUS_WMI_DEVID_THERMAL_CTRL,
@@ -2060,6 +2237,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
static int asus_wmi_fan_init(struct asus_wmi *asus)
{
+ asus->gpu_fan_type = FAN_TYPE_NONE;
asus->fan_type = FAN_TYPE_NONE;
asus->agfn_pwm = -1;
@@ -2068,6 +2246,10 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
else if (asus_wmi_has_agfn_fan(asus))
asus->fan_type = FAN_TYPE_AGFN;
+ /* Modern models like G713 also have GPU fan control */
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL))
+ asus->gpu_fan_type = FAN_TYPE_SPEC83;
+
if (asus->fan_type == FAN_TYPE_NONE)
return -ENODEV;
@@ -2158,7 +2340,7 @@ static ssize_t fan_boost_mode_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", asus->fan_boost_mode);
+ return sysfs_emit(buf, "%d\n", asus->fan_boost_mode);
}
static ssize_t fan_boost_mode_store(struct device *dev,
@@ -2233,8 +2415,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
curves = &asus->custom_fan_curves[fan_idx];
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
- if (err)
+ if (err) {
+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
return err;
+ }
fan_curve_copy_from_buf(curves, buf);
curves->device_id = fan_dev;
@@ -2252,9 +2436,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
- fan_dev, err);
- /* Don't cause probe to fail on devices without fan-curves */
return 0;
}
@@ -2711,7 +2892,7 @@ static ssize_t throttle_thermal_policy_show(struct device *dev,
struct asus_wmi *asus = dev_get_drvdata(dev);
u8 mode = asus->throttle_thermal_policy_mode;
- return scnprintf(buf, PAGE_SIZE, "%d\n", mode);
+ return sysfs_emit(buf, "%d\n", mode);
}
static ssize_t throttle_thermal_policy_store(struct device *dev,
@@ -3063,9 +3244,7 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
- int result, orig_code;
-
- orig_code = code;
+ int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
@@ -3108,30 +3287,18 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
- if (asus->driver->quirks->use_kbd_dock_devid && code == NOTIFY_KBD_DOCK_CHANGE) {
- result = asus_wmi_get_devstate_simple(asus,
- ASUS_WMI_DEVID_KBD_DOCK);
- if (result >= 0) {
- input_report_switch(asus->inputdev, SW_TABLET_MODE,
- !result);
- input_sync(asus->inputdev);
- }
- return;
- }
-
- if (asus->driver->quirks->use_lid_flip_devid && code == NOTIFY_LID_FLIP) {
- lid_flip_tablet_mode_get_state(asus);
+ if (code == asus->tablet_switch_event_code) {
+ asus_wmi_tablet_mode_get_state(asus);
return;
}
- if (asus->fan_boost_mode_available && code == NOTIFY_KBD_FBM) {
- fan_boost_mode_switch_next(asus);
+ if (code == NOTIFY_KBD_FBM || code == NOTIFY_KBD_TTP) {
+ if (asus->fan_boost_mode_available)
+ fan_boost_mode_switch_next(asus);
+ if (asus->throttle_thermal_policy_available)
+ throttle_thermal_policy_switch_next(asus);
return;
- }
- if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) {
- throttle_thermal_policy_switch_next(asus);
- return;
}
if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle)
@@ -3283,6 +3450,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_touchpad.attr,
&dev_attr_egpu_enable.attr,
&dev_attr_dgpu_disable.attr,
+ &dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
@@ -3294,7 +3462,7 @@ static struct attribute *platform_attributes[] = {
static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct asus_wmi *asus = dev_get_drvdata(dev);
bool ok = true;
int devid = -1;
@@ -3313,6 +3481,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
ok = asus->egpu_enable_available;
else if (attr == &dev_attr_dgpu_disable.attr)
ok = asus->dgpu_disable_available;
+ else if (attr == &dev_attr_gpu_mux_mode.attr)
+ ok = asus->gpu_mux_mode_available;
else if (attr == &dev_attr_fan_boost_mode.attr)
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
@@ -3553,7 +3723,6 @@ static int asus_wmi_add(struct platform_device *pdev)
struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver);
struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv);
struct asus_wmi *asus;
- const char *chassis_type;
acpi_status status;
int err;
u32 result;
@@ -3574,13 +3743,12 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
- err = egpu_enable_check_present(asus);
- if (err)
- goto fail_egpu_enable;
-
- err = dgpu_disable_check_present(asus);
- if (err)
- goto fail_dgpu_disable;
+ asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
+ asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
+ asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
+ asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
err = fan_boost_mode_check_present(asus);
if (err)
@@ -3596,10 +3764,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform_profile_setup;
- err = panel_od_check_present(asus);
- if (err)
- goto fail_panel_od;
-
err = asus_wmi_sysfs_init(asus->platform_device);
if (err)
goto fail_sysfs;
@@ -3635,18 +3799,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (asus->driver->quirks->wmi_force_als_set)
asus_wmi_set_als();
- /* Some Asus desktop boards export an acpi-video backlight interface,
- stop this from showing up */
- chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
- if (chassis_type && !strcmp(chassis_type, "3"))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
- if (asus->driver->quirks->wmi_backlight_power)
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
- if (asus->driver->quirks->wmi_backlight_native)
- acpi_video_set_dmi_backlight_type(acpi_backlight_native);
-
if (asus->driver->quirks->xusb2pr)
asus_wmi_set_xusb2pr(asus);
@@ -3694,10 +3846,7 @@ fail_platform_profile_setup:
if (asus->platform_profile_support)
platform_profile_remove();
fail_fan_boost_mode:
-fail_egpu_enable:
-fail_dgpu_disable:
fail_platform:
-fail_panel_od:
kfree(asus);
return err;
}
@@ -3756,9 +3905,7 @@ static int asus_hotk_resume(struct device *device)
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
- if (asus->driver->quirks->use_lid_flip_devid)
- lid_flip_tablet_mode_get_state(asus);
-
+ asus_wmi_tablet_mode_get_state(asus);
return 0;
}
@@ -3798,9 +3945,7 @@ static int asus_hotk_restore(struct device *device)
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
- if (asus->driver->quirks->use_lid_flip_devid)
- lid_flip_tablet_mode_get_state(asus);
-
+ asus_wmi_tablet_mode_get_state(asus);
return 0;
}
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index b302415bf1d9..65316998b898 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -25,16 +25,20 @@ struct module;
struct key_entry;
struct asus_wmi;
+enum asus_wmi_tablet_switch_mode {
+ asus_wmi_no_tablet_switch,
+ asus_wmi_kbd_dock_devid,
+ asus_wmi_lid_flip_devid,
+ asus_wmi_lid_flip_rog_devid,
+};
+
struct quirk_entry {
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
- bool wmi_backlight_power;
- bool wmi_backlight_native;
bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
- bool use_kbd_dock_devid;
- bool use_lid_flip_devid;
+ enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
* For machines with AMD graphic chips, it will send out WMI event
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 0942f50bd793..e10d2f64dfad 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -721,16 +721,6 @@ static struct attribute *compal_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(compal_hwmon);
-static int compal_probe(struct platform_device *);
-static int compal_remove(struct platform_device *);
-static struct platform_driver compal_driver = {
- .driver = {
- .name = DRIVER_NAME,
- },
- .probe = compal_probe,
- .remove = compal_remove,
-};
-
static enum power_supply_property compal_bat_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
@@ -965,6 +955,80 @@ err_wifi:
return ret;
}
+static int compal_probe(struct platform_device *pdev)
+{
+ int err;
+ struct compal_data *data;
+ struct device *hwmon_dev;
+ struct power_supply_config psy_cfg = {};
+
+ if (!extra_features)
+ return 0;
+
+ /* Fan control */
+ data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ initialize_fan_control_data(data);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "compal", data,
+ compal_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto remove;
+ }
+
+ /* Power supply */
+ initialize_power_supply_data(data);
+ psy_cfg.drv_data = data;
+ data->psy = power_supply_register(&compal_device->dev, &psy_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(data->psy)) {
+ err = PTR_ERR(data->psy);
+ goto remove;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+remove:
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+ return err;
+}
+
+static int compal_remove(struct platform_device *pdev)
+{
+ struct compal_data *data;
+
+ if (!extra_features)
+ return 0;
+
+ pr_info("Unloading: resetting fan control to motherboard\n");
+ pwm_disable_control();
+
+ data = platform_get_drvdata(pdev);
+ power_supply_unregister(data->psy);
+
+ sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver compal_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = compal_probe,
+ .remove = compal_remove,
+};
+
static int __init compal_init(void)
{
int ret;
@@ -996,7 +1060,7 @@ static int __init compal_init(void)
if (ret)
goto err_backlight;
- compal_device = platform_device_alloc(DRIVER_NAME, -1);
+ compal_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!compal_device) {
ret = -ENOMEM;
goto err_platform_driver;
@@ -1028,54 +1092,6 @@ err_backlight:
return ret;
}
-static int compal_probe(struct platform_device *pdev)
-{
- int err;
- struct compal_data *data;
- struct device *hwmon_dev;
- struct power_supply_config psy_cfg = {};
-
- if (!extra_features)
- return 0;
-
- /* Fan control */
- data = devm_kzalloc(&pdev->dev, sizeof(struct compal_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- initialize_fan_control_data(data);
-
- err = sysfs_create_group(&pdev->dev.kobj, &compal_platform_attr_group);
- if (err)
- return err;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
- "compal", data,
- compal_hwmon_groups);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
- goto remove;
- }
-
- /* Power supply */
- initialize_power_supply_data(data);
- psy_cfg.drv_data = data;
- data->psy = power_supply_register(&compal_device->dev, &psy_bat_desc,
- &psy_cfg);
- if (IS_ERR(data->psy)) {
- err = PTR_ERR(data->psy);
- goto remove;
- }
-
- platform_set_drvdata(pdev, data);
-
- return 0;
-
-remove:
- sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
- return err;
-}
-
static void __exit compal_cleanup(void)
{
platform_device_unregister(compal_device);
@@ -1089,25 +1105,6 @@ static void __exit compal_cleanup(void)
pr_info("Driver unloaded\n");
}
-static int compal_remove(struct platform_device *pdev)
-{
- struct compal_data *data;
-
- if (!extra_features)
- return 0;
-
- pr_info("Unloading: resetting fan control to motherboard\n");
- pwm_disable_control();
-
- data = platform_get_drvdata(pdev);
- power_supply_unregister(data->psy);
-
- sysfs_remove_group(&pdev->dev.kobj, &compal_platform_attr_group);
-
- return 0;
-}
-
-
module_init(compal_init);
module_exit(compal_cleanup);
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
index f21248255529..a34e07ef2c79 100644
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ b/drivers/platform/x86/dell/alienware-wmi.c
@@ -791,7 +791,7 @@ static int __init alienware_wmi_init(void)
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
- platform_device = platform_device_alloc("alienware-wmi", -1);
+ platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
diff --git a/drivers/platform/x86/dell/dcdbas.c b/drivers/platform/x86/dell/dcdbas.c
index 42beafbc54b2..0ecb7b164750 100644
--- a/drivers/platform/x86/dell/dcdbas.c
+++ b/drivers/platform/x86/dell/dcdbas.c
@@ -716,7 +716,7 @@ static struct platform_driver dcdbas_driver = {
static const struct platform_device_info dcdbas_dev_info __initconst = {
.name = DRIVER_NAME,
- .id = -1,
+ .id = PLATFORM_DEVID_NONE,
.dma_mask = DMA_BIT_MASK(32),
};
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 1321687d923e..e92c3ad06d69 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -2193,7 +2193,7 @@ static int __init dell_init(void)
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
- platform_device = platform_device_alloc("dell-laptop", -1);
+ platform_device = platform_device_alloc("dell-laptop", PLATFORM_DEVID_NONE);
if (!platform_device) {
ret = -ENOMEM;
goto fail_platform_device1;
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index fc086b66f70b..e61bfaf8b5c4 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -441,7 +441,7 @@ static ssize_t location_show(struct device *dev,
i = match_attribute(dev, attr);
if (i > 0)
- return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location);
+ return sysfs_emit(buf, "%08x", da_tokens[i].location);
return 0;
}
@@ -455,7 +455,7 @@ static ssize_t value_show(struct device *dev,
i = match_attribute(dev, attr);
if (i > 0)
- return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value);
+ return sysfs_emit(buf, "%08x", da_tokens[i].value);
return 0;
}
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index e07d3ba85a3f..0a259a27459f 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -344,6 +344,9 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
* They are events with extended data
*/
static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* Backlight brightness change event */
+ { KE_IGNORE, 0x0003, { KEY_RESERVED } },
+
/* Ultra-performance mode switch request */
{ KE_IGNORE, 0x000d, { KEY_RESERVED } },
diff --git a/drivers/platform/x86/dell/dell-wmi-privacy.c b/drivers/platform/x86/dell/dell-wmi-privacy.c
index 074b7e68c227..c82b3d6867c5 100644
--- a/drivers/platform/x86/dell/dell-wmi-privacy.c
+++ b/drivers/platform/x86/dell/dell-wmi-privacy.c
@@ -174,15 +174,12 @@ static ssize_t dell_privacy_current_state_show(struct device *dev,
static DEVICE_ATTR_RO(dell_privacy_supported_type);
static DEVICE_ATTR_RO(dell_privacy_current_state);
-static struct attribute *privacy_attributes[] = {
+static struct attribute *privacy_attrs[] = {
&dev_attr_dell_privacy_supported_type.attr,
&dev_attr_dell_privacy_current_state.attr,
NULL,
};
-
-static const struct attribute_group privacy_attribute_group = {
- .attrs = privacy_attributes
-};
+ATTRIBUTE_GROUPS(privacy);
/*
* Describes the Device State class exposed by BIOS which can be consumed by
@@ -342,10 +339,6 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
if (ret)
return ret;
- ret = devm_device_add_group(&wdev->dev, &privacy_attribute_group);
- if (ret)
- return ret;
-
if (priv->features_present & BIT(DELL_PRIVACY_TYPE_AUDIO)) {
ret = dell_privacy_leds_setup(&priv->wdev->dev);
if (ret)
@@ -374,6 +367,7 @@ static const struct wmi_device_id dell_wmi_privacy_wmi_id_table[] = {
static struct wmi_driver dell_privacy_wmi_driver = {
.driver = {
.name = "dell-privacy",
+ .dev_groups = privacy_groups,
},
.probe = dell_privacy_wmi_probe,
.remove = dell_privacy_wmi_remove,
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 636bdfa83284..0a6411a8a104 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -270,7 +270,7 @@ void strlcpy_attr(char *dest, char *src)
size_t len = strlen(src) + 1;
if (len > 1 && len <= MAX_BUFF)
- strlcpy(dest, src, len);
+ strscpy(dest, src, len);
/*len can be zero because any property not-applicable to attribute can
* be empty so check only for too long buffers and log error
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index e9f4b30dcafa..9f51e0fcab04 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -645,7 +645,7 @@ static int __init dcdrbu_init(void)
spin_lock_init(&rbu_data.lock);
init_packet_head();
- rbu_device = platform_device_register_simple("dell_rbu", -1, NULL, 0);
+ rbu_device = platform_device_register_simple("dell_rbu", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(rbu_device)) {
pr_err("platform_device_register_simple failed\n");
return PTR_ERR(rbu_device);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index ba08c9235f76..a388a28b6f2a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -444,7 +444,7 @@ static int eeepc_platform_init(struct eeepc_laptop *eeepc)
{
int result;
- eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, -1);
+ eeepc->platform_device = platform_device_alloc(EEEPC_LAPTOP_FILE, PLATFORM_DEVID_NONE);
if (!eeepc->platform_device)
return -ENOMEM;
platform_set_drvdata(eeepc->platform_device, eeepc);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index ce86d84ee796..32d9f0ba6be3 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -96,11 +96,6 @@ static struct quirk_entry quirk_asus_et2012_type3 = {
.store_backlight_power = true,
};
-static struct quirk_entry quirk_asus_x101ch = {
- /* We need this when ACPI function doesn't do this well */
- .wmi_backlight_power = true,
-};
-
static struct quirk_entry *quirks;
static void et2012_quirks(void)
@@ -151,25 +146,7 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_unknown,
},
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK Computer INC. X101CH",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
- },
- .driver_data = &quirk_asus_x101ch,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK Computer INC. 1015CX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
- },
- .driver_data = &quirk_asus_x101ch,
- },
- {},
+ {}
};
static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 80929380ec7e..b543d117b12c 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -543,7 +543,7 @@ static int fujitsu_laptop_platform_add(struct acpi_device *device)
struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
- priv->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ priv->pf_device = platform_device_alloc("fujitsu-laptop", PLATFORM_DEVID_NONE);
if (!priv->pf_device)
return -ENOMEM;
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 9996485f5295..f11f726d2062 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -547,7 +547,7 @@ static int __init hdaps_init(void)
if (ret)
goto out_region;
- pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
+ pdev = platform_device_register_simple("hdaps", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
goto out_driver;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index bc7020e9df9e..627a6d0eaf83 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -177,7 +177,8 @@ enum hp_thermal_profile_omen_v1 {
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
- HP_THERMAL_PROFILE_COOL = 0x02
+ HP_THERMAL_PROFILE_COOL = 0x02,
+ HP_THERMAL_PROFILE_QUIET = 0x03,
};
#define IS_HWBLOCKED(x) ((x & HPWMI_POWER_FW_OR_HW) != HPWMI_POWER_FW_OR_HW)
@@ -206,15 +207,17 @@ struct bios_rfkill2_state {
};
static const struct key_entry hp_wmi_keymap[] = {
- { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
- { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
- { KE_KEY, 0x20e6, { KEY_PROG1 } },
- { KE_KEY, 0x20e8, { KEY_MEDIA } },
- { KE_KEY, 0x2142, { KEY_MEDIA } },
- { KE_KEY, 0x213b, { KEY_INFO } },
- { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
- { KE_KEY, 0x216a, { KEY_SETUP } },
- { KE_KEY, 0x231b, { KEY_HELP } },
+ { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x20e6, { KEY_PROG1 } },
+ { KE_KEY, 0x20e8, { KEY_MEDIA } },
+ { KE_KEY, 0x2142, { KEY_MEDIA } },
+ { KE_KEY, 0x213b, { KEY_INFO } },
+ { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
+ { KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -1194,6 +1197,9 @@ static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
case HP_THERMAL_PROFILE_COOL:
*profile = PLATFORM_PROFILE_COOL;
break;
+ case HP_THERMAL_PROFILE_QUIET:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
default:
return -EINVAL;
}
@@ -1216,6 +1222,9 @@ static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
case PLATFORM_PROFILE_COOL:
tp = HP_THERMAL_PROFILE_COOL;
break;
+ case PLATFORM_PROFILE_QUIET:
+ tp = HP_THERMAL_PROFILE_QUIET;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -1263,6 +1272,8 @@ static int thermal_profile_setup(void)
platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
+
+ set_bit(PLATFORM_PROFILE_QUIET, platform_profile_handler.choices);
}
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
@@ -1508,7 +1519,7 @@ static int __init hp_wmi_init(void)
if (bios_capable) {
hp_wmi_platform_dev =
- platform_device_register_simple("hp-wmi", -1, NULL, 0);
+ platform_device_register_simple("hp-wmi", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(hp_wmi_platform_dev)) {
err = PTR_ERR(hp_wmi_platform_dev);
goto err_destroy_input;
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
index eac3e6b4ea11..5873c2663a65 100644
--- a/drivers/platform/x86/huawei-wmi.c
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -871,7 +871,7 @@ static __init int huawei_wmi_init(void)
if (err)
goto pdrv_err;
- pdev = platform_device_register_simple("huawei-wmi", -1, NULL, 0);
+ pdev = platform_device_register_simple("huawei-wmi", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
goto pdev_err;
diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c
index c52ac23e2331..2c9a7d52be07 100644
--- a/drivers/platform/x86/intel/chtwc_int33fe.c
+++ b/drivers/platform/x86/intel/chtwc_int33fe.c
@@ -219,7 +219,7 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data)
/*
* Update node used in "usb-role-switch" property. Note that we
- * rely on software_node_register_nodes() to use the original
+ * rely on software_node_register_node_group() to use the original
* instance of properties instead of copying them.
*/
fusb302_mux_refs[0].node = mux_ref_node;
@@ -270,7 +270,7 @@ cht_int33fe_register_max17047(struct device *dev, struct cht_int33fe_data *data)
}
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ strscpy(board_info.type, "max17047", I2C_NAME_SIZE);
board_info.dev_name = "max17047";
board_info.fwnode = fwnode;
data->battery_fg = i2c_acpi_new_device(dev, 1, &board_info);
@@ -361,7 +361,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
}
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ strscpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
board_info.dev_name = "fusb302";
board_info.fwnode = fwnode;
board_info.irq = fusb302_irq;
@@ -381,7 +381,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev)
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
board_info.fwnode = fwnode;
- strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
+ strscpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
if (IS_ERR(data->pi3usb30532)) {
diff --git a/drivers/platform/x86/intel/int3472/common.c b/drivers/platform/x86/intel/int3472/common.c
index 77cf058e4168..9db2bb0bbba4 100644
--- a/drivers/platform/x86/intel/int3472/common.c
+++ b/drivers/platform/x86/intel/int3472/common.c
@@ -62,7 +62,7 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
struct acpi_device *sensor;
int ret = 0;
- sensor = acpi_dev_get_first_consumer_dev(adev);
+ sensor = acpi_dev_get_next_consumer_dev(adev, NULL);
if (!sensor) {
dev_err(dev, "INT3472 seems to have no dependents.\n");
return -ENODEV;
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index ed4c9d760757..974a132db651 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -331,7 +331,22 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
return 0;
}
-static int skl_int3472_discrete_remove(struct platform_device *pdev);
+static int skl_int3472_discrete_remove(struct platform_device *pdev)
+{
+ struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
+
+ gpiod_remove_lookup_table(&int3472->gpios);
+
+ if (int3472->clock.cl)
+ skl_int3472_unregister_clock(int3472);
+
+ gpiod_put(int3472->clock.ena_gpio);
+ gpiod_put(int3472->clock.led_gpio);
+
+ skl_int3472_unregister_regulator(int3472);
+
+ return 0;
+}
static int skl_int3472_discrete_probe(struct platform_device *pdev)
{
@@ -383,23 +398,6 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev)
return 0;
}
-static int skl_int3472_discrete_remove(struct platform_device *pdev)
-{
- struct int3472_discrete_device *int3472 = platform_get_drvdata(pdev);
-
- gpiod_remove_lookup_table(&int3472->gpios);
-
- if (int3472->clock.cl)
- skl_int3472_unregister_clock(int3472);
-
- gpiod_put(int3472->clock.ena_gpio);
- gpiod_put(int3472->clock.led_gpio);
-
- skl_int3472_unregister_regulator(int3472);
-
- return 0;
-}
-
static const struct acpi_device_id int3472_device_id[] = {
{ "INT3472", 0 },
{ }
diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c
index 22f61b47f9e5..5b8d1a9620a5 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.c
+++ b/drivers/platform/x86/intel/int3472/tps68470.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Author: Dan Scally <djrscally@gmail.com> */
+#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
@@ -95,20 +96,65 @@ static int skl_int3472_tps68470_calc_type(struct acpi_device *adev)
return DESIGNED_FOR_WINDOWS;
}
+/*
+ * Return the size of the flexible array member, because we'll need that later
+ * on to pass .pdata_size to cells.
+ */
+static int
+skl_int3472_fill_clk_pdata(struct device *dev, struct tps68470_clk_platform_data **clk_pdata)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *consumer;
+ unsigned int n_consumers = 0;
+ const char *sensor_name;
+ unsigned int i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer)
+ n_consumers++;
+
+ if (!n_consumers) {
+ dev_err(dev, "INT3472 seems to have no dependents\n");
+ return -ENODEV;
+ }
+
+ *clk_pdata = devm_kzalloc(dev, struct_size(*clk_pdata, consumers, n_consumers),
+ GFP_KERNEL);
+ if (!*clk_pdata)
+ return -ENOMEM;
+
+ (*clk_pdata)->n_consumers = n_consumers;
+ i = 0;
+
+ for_each_acpi_consumer_dev(adev, consumer) {
+ sensor_name = devm_kasprintf(dev, GFP_KERNEL, I2C_DEV_NAME_FORMAT,
+ acpi_dev_name(consumer));
+ if (!sensor_name) {
+ acpi_dev_put(consumer);
+ return -ENOMEM;
+ }
+
+ (*clk_pdata)->consumers[i].consumer_dev_name = sensor_name;
+ i++;
+ }
+
+ return n_consumers;
+}
+
static int skl_int3472_tps68470_probe(struct i2c_client *client)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
const struct int3472_tps68470_board_data *board_data;
- struct tps68470_clk_platform_data clk_pdata = {};
+ struct tps68470_clk_platform_data *clk_pdata;
struct mfd_cell *cells;
struct regmap *regmap;
+ int n_consumers;
int device_type;
int ret;
+ int i;
- ret = skl_int3472_get_sensor_adev_and_name(&client->dev, NULL,
- &clk_pdata.consumer_dev_name);
- if (ret)
- return ret;
+ n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata);
+ if (n_consumers < 0)
+ return n_consumers;
regmap = devm_regmap_init_i2c(client, &tps68470_regmap_config);
if (IS_ERR(regmap)) {
@@ -142,22 +188,25 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
* the clk + regulators must be ready when this happens.
*/
cells[0].name = "tps68470-clk";
- cells[0].platform_data = &clk_pdata;
- cells[0].pdata_size = sizeof(clk_pdata);
+ cells[0].platform_data = clk_pdata;
+ cells[0].pdata_size = struct_size(clk_pdata, consumers, n_consumers);
cells[1].name = "tps68470-regulator";
cells[1].platform_data = (void *)board_data->tps68470_regulator_pdata;
cells[1].pdata_size = sizeof(struct tps68470_regulator_platform_data);
cells[2].name = "tps68470-gpio";
- gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_table);
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_add_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
cells, TPS68470_WIN_MFD_CELL_COUNT,
NULL, 0, NULL);
kfree(cells);
- if (ret)
- gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
+ if (ret) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
break;
case DESIGNED_FOR_CHROMEOS:
@@ -178,15 +227,16 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client)
return ret;
}
-static int skl_int3472_tps68470_remove(struct i2c_client *client)
+static void skl_int3472_tps68470_remove(struct i2c_client *client)
{
const struct int3472_tps68470_board_data *board_data;
+ int i;
board_data = int3472_tps68470_get_board_data(dev_name(&client->dev));
- if (board_data)
- gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_table);
-
- return 0;
+ if (board_data) {
+ for (i = 0; i < board_data->n_gpiod_lookups; i++)
+ gpiod_remove_lookup_table(board_data->tps68470_gpio_lookup_tables[i]);
+ }
}
static const struct acpi_device_id int3472_device_id[] = {
diff --git a/drivers/platform/x86/intel/int3472/tps68470.h b/drivers/platform/x86/intel/int3472/tps68470.h
index cfd33eb62740..35915e701593 100644
--- a/drivers/platform/x86/intel/int3472/tps68470.h
+++ b/drivers/platform/x86/intel/int3472/tps68470.h
@@ -16,8 +16,9 @@ struct tps68470_regulator_platform_data;
struct int3472_tps68470_board_data {
const char *dev_name;
- struct gpiod_lookup_table *tps68470_gpio_lookup_table;
const struct tps68470_regulator_platform_data *tps68470_regulator_pdata;
+ unsigned int n_gpiod_lookups;
+ struct gpiod_lookup_table *tps68470_gpio_lookup_tables[];
};
const struct int3472_tps68470_board_data *int3472_tps68470_get_board_data(const char *dev_name);
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index 525f09a3b5ff..309eab9c0558 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -30,6 +30,15 @@ static struct regulator_consumer_supply int347a_vcm_consumer_supplies[] = {
static struct regulator_consumer_supply int347a_vsio_consumer_supplies[] = {
REGULATOR_SUPPLY("dovdd", "i2c-INT347A:00"),
REGULATOR_SUPPLY("vsio", "i2c-INT347A:00-VCM"),
+ REGULATOR_SUPPLY("vddd", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux1_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdda", "i2c-INT347E:00"),
+};
+
+static struct regulator_consumer_supply int347a_aux2_consumer_supplies[] = {
+ REGULATOR_SUPPLY("vdddo", "i2c-INT347E:00"),
};
static const struct regulator_init_data surface_go_tps68470_core_reg_init_data = {
@@ -86,6 +95,28 @@ static const struct regulator_init_data surface_go_tps68470_vsio_reg_init_data =
.consumer_supplies = int347a_vsio_consumer_supplies,
};
+static const struct regulator_init_data surface_go_tps68470_aux1_reg_init_data = {
+ .constraints = {
+ .min_uV = 2815200,
+ .max_uV = 2815200,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux1_consumer_supplies),
+ .consumer_supplies = int347a_aux1_consumer_supplies,
+};
+
+static const struct regulator_init_data surface_go_tps68470_aux2_reg_init_data = {
+ .constraints = {
+ .min_uV = 1800600,
+ .max_uV = 1800600,
+ .apply_uV = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(int347a_aux2_consumer_supplies),
+ .consumer_supplies = int347a_aux2_consumer_supplies,
+};
+
static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata = {
.reg_init_data = {
[TPS68470_CORE] = &surface_go_tps68470_core_reg_init_data,
@@ -93,10 +124,12 @@ static const struct tps68470_regulator_platform_data surface_go_tps68470_pdata =
[TPS68470_VCM] = &surface_go_tps68470_vcm_reg_init_data,
[TPS68470_VIO] = &surface_go_tps68470_vio_reg_init_data,
[TPS68470_VSIO] = &surface_go_tps68470_vsio_reg_init_data,
+ [TPS68470_AUX1] = &surface_go_tps68470_aux1_reg_init_data,
+ [TPS68470_AUX2] = &surface_go_tps68470_aux2_reg_init_data,
},
};
-static struct gpiod_lookup_table surface_go_tps68470_gpios = {
+static struct gpiod_lookup_table surface_go_int347a_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
@@ -105,16 +138,31 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
}
};
+static struct gpiod_lookup_table surface_go_int347e_gpios = {
+ .dev_id = "i2c-INT347E:00",
+ .table = {
+ GPIO_LOOKUP("tps68470-gpio", 5, "enable", GPIO_ACTIVE_HIGH),
+ { }
+ }
+};
+
static const struct int3472_tps68470_board_data surface_go_tps68470_board_data = {
.dev_name = "i2c-INT3472:05",
- .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 2,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios,
+ &surface_go_int347e_gpios,
+ },
};
static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
.dev_name = "i2c-INT3472:01",
- .tps68470_gpio_lookup_table = &surface_go_tps68470_gpios,
.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+ .n_gpiod_lookups = 1,
+ .tps68470_gpio_lookup_tables = {
+ &surface_go_int347a_gpios
+ },
};
static const struct dmi_system_id int3472_tps68470_board_data_table[] = {
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index 1a09a75bd16d..7c5c623630c1 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -317,7 +317,7 @@ static int __init oaktrail_init(void)
goto err_driver_reg;
}
- oaktrail_device = platform_device_alloc(DRIVER_NAME, -1);
+ oaktrail_device = platform_device_alloc(DRIVER_NAME, PLATFORM_DEVID_NONE);
if (!oaktrail_device) {
pr_warn("Unable to allocate platform device\n");
ret = -ENOMEM;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 84eabd6156bb..cb24de9e97dc 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -113,7 +113,7 @@ show_uncore_perf_status(current_freq_khz);
struct uncore_data *data = container_of(attr, struct uncore_data,\
member_name##_dev_attr);\
\
- return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ return sysfs_emit(buf, "%u\n", \
data->member_name); \
} \
diff --git a/drivers/platform/x86/intel/wmi/thunderbolt.c b/drivers/platform/x86/intel/wmi/thunderbolt.c
index 4ae87060d18b..fc333ff82d1e 100644
--- a/drivers/platform/x86/intel/wmi/thunderbolt.c
+++ b/drivers/platform/x86/intel/wmi/thunderbolt.c
@@ -51,26 +51,7 @@ static struct attribute *tbt_attrs[] = {
&dev_attr_force_power.attr,
NULL
};
-
-static const struct attribute_group tbt_attribute_group = {
- .attrs = tbt_attrs,
-};
-
-static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev,
- const void *context)
-{
- int ret;
-
- ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group);
- kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
- return ret;
-}
-
-static void intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
-{
- sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
- kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
-}
+ATTRIBUTE_GROUPS(tbt);
static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
{ .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
@@ -80,9 +61,8 @@ static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
static struct wmi_driver intel_wmi_thunderbolt_driver = {
.driver = {
.name = "intel-wmi-thunderbolt",
+ .dev_groups = tbt_groups,
},
- .probe = intel_wmi_thunderbolt_probe,
- .remove = intel_wmi_thunderbolt_remove,
.id_table = intel_wmi_thunderbolt_id_table,
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 5e072a0666f4..2fac05a17a5c 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -5181,7 +5181,7 @@ static int __init mlxplat_init(void)
if (!dmi_check_system(mlxplat_dmi_table))
return -ENODEV;
- mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, PLATFORM_DEVID_NONE,
mlxplat_lpc_resources,
ARRAY_SIZE(mlxplat_lpc_resources));
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 24ffc8e2d2d1..6b18ec543ac3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -53,8 +53,6 @@
#include <linux/input/sparse-keymap.h>
#include <acpi/video.h>
-#define MSI_DRIVER_VERSION "0.5"
-
#define MSI_LCD_LEVEL_MAX 9
#define MSI_EC_COMMAND_WIRELESS 0x10
@@ -592,15 +590,22 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
return 1;
}
+static unsigned long msi_work_delay(int msecs)
+{
+ if (quirks->ec_delay)
+ return msecs_to_jiffies(msecs);
+
+ return 0;
+}
+
static const struct dmi_system_id msi_dmi_table[] __initconst = {
{
.ident = "MSI S270",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -633,8 +638,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
DMI_MATCH(DMI_PRODUCT_VERSION, "0131"),
- DMI_MATCH(DMI_CHASSIS_VENDOR,
- "MICRO-STAR INT'L CO.,LTD")
+ DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR INT")
},
.driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
@@ -705,6 +709,7 @@ static const struct dmi_system_id msi_dmi_table[] __initconst = {
},
{ }
};
+MODULE_DEVICE_TABLE(dmi, msi_dmi_table);
static int rfkill_bluetooth_set(void *data, bool blocked)
{
@@ -785,7 +790,6 @@ static void msi_update_rfkill(struct work_struct *ignored)
msi_rfkill_set_state(rfk_threeg, !threeg_s);
}
static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
-static DECLARE_WORK(msi_rfkill_work, msi_update_rfkill);
static void msi_send_touchpad_key(struct work_struct *ignored)
{
@@ -801,7 +805,6 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
}
static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
-static DECLARE_WORK(msi_touchpad_work, msi_send_touchpad_key);
static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
@@ -819,20 +822,12 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
extended = false;
switch (data) {
case 0xE4:
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_touchpad_dwork,
- round_jiffies_relative(0.5 * HZ));
- } else
- schedule_work(&msi_touchpad_work);
+ schedule_delayed_work(&msi_touchpad_dwork, msi_work_delay(500));
break;
case 0x54:
case 0x62:
case 0x76:
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_rfkill_dwork,
- round_jiffies_relative(0.5 * HZ));
- } else
- schedule_work(&msi_rfkill_work);
+ schedule_delayed_work(&msi_rfkill_dwork, msi_work_delay(500));
break;
}
}
@@ -899,12 +894,7 @@ static int rfkill_init(struct platform_device *sdev)
}
/* schedule to run rfkill state initial */
- if (quirks->ec_delay) {
- schedule_delayed_work(&msi_rfkill_init,
- round_jiffies_relative(1 * HZ));
- } else
- schedule_work(&msi_rfkill_work);
-
+ schedule_delayed_work(&msi_rfkill_init, msi_work_delay(1000));
return 0;
err_threeg:
@@ -921,8 +911,7 @@ err_bluetooth:
return retval;
}
-#ifdef CONFIG_PM_SLEEP
-static int msi_laptop_resume(struct device *device)
+static int msi_scm_disable_hw_fn_handling(void)
{
u8 data;
int result;
@@ -942,6 +931,12 @@ static int msi_laptop_resume(struct device *device)
return 0;
}
+
+#ifdef CONFIG_PM_SLEEP
+static int msi_laptop_resume(struct device *device)
+{
+ return msi_scm_disable_hw_fn_handling();
+}
#endif
static int __init msi_laptop_input_setup(void)
@@ -974,7 +969,6 @@ err_free_dev:
static int __init load_scm_model_init(struct platform_device *sdev)
{
- u8 data;
int result;
if (!quirks->ec_read_only) {
@@ -988,12 +982,7 @@ static int __init load_scm_model_init(struct platform_device *sdev)
}
/* disable hardware control by fn key */
- result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
- if (result < 0)
- return result;
-
- result = ec_write(MSI_STANDARD_EC_SCM_LOAD_ADDRESS,
- data | MSI_STANDARD_EC_SCM_LOAD_MASK);
+ result = msi_scm_disable_hw_fn_handling();
if (result < 0)
return result;
@@ -1022,9 +1011,19 @@ fail_input:
rfkill_cleanup();
fail_rfkill:
-
return result;
+}
+
+static void msi_scm_model_exit(void)
+{
+ if (!quirks->load_scm_model)
+ return;
+ i8042_remove_filter(msi_laptop_i8042_filter);
+ cancel_delayed_work_sync(&msi_touchpad_dwork);
+ input_unregister_device(msi_laptop_input_dev);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ rfkill_cleanup();
}
static int __init msi_init(void)
@@ -1048,8 +1047,7 @@ static int __init msi_init(void)
return -EINVAL;
/* Register backlight stuff */
-
- if (quirks->old_ec_model ||
+ if (quirks->old_ec_model &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -1068,7 +1066,7 @@ static int __init msi_init(void)
/* Register platform stuff */
- msipf_device = platform_device_alloc("msi-laptop-pf", -1);
+ msipf_device = platform_device_alloc("msi-laptop-pf", PLATFORM_DEVID_NONE);
if (!msipf_device) {
ret = -ENOMEM;
goto fail_platform_driver;
@@ -1108,19 +1106,12 @@ static int __init msi_init(void)
set_auto_brightness(auto_brightness);
}
- pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
-
return 0;
fail_create_attr:
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
fail_create_group:
- if (quirks->load_scm_model) {
- i8042_remove_filter(msi_laptop_i8042_filter);
- cancel_delayed_work_sync(&msi_rfkill_dwork);
- cancel_work_sync(&msi_rfkill_work);
- rfkill_cleanup();
- }
+ msi_scm_model_exit();
fail_scm_model_init:
platform_device_del(msipf_device);
fail_device_add:
@@ -1135,14 +1126,7 @@ fail_backlight:
static void __exit msi_cleanup(void)
{
- if (quirks->load_scm_model) {
- i8042_remove_filter(msi_laptop_i8042_filter);
- input_unregister_device(msi_laptop_input_dev);
- cancel_delayed_work_sync(&msi_rfkill_dwork);
- cancel_work_sync(&msi_rfkill_work);
- rfkill_cleanup();
- }
-
+ msi_scm_model_exit();
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
@@ -1155,8 +1139,6 @@ static void __exit msi_cleanup(void)
if (auto_brightness != 2)
set_auto_brightness(1);
}
-
- pr_info("driver unloaded\n");
}
module_init(msi_init);
@@ -1164,16 +1146,4 @@ module_exit(msi_cleanup);
MODULE_AUTHOR("Lennart Poettering");
MODULE_DESCRIPTION("MSI Laptop Support");
-MODULE_VERSION(MSI_DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnMICRO-STARINT'LCO.,LTD:pnMS-1013:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1058:pvr0581:rvnMSI:rnMS-1058:*:ct10:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational:pnMS-1412:*:rvnMSI:rnMS-1412:*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnNOTEBOOK:pnSAM2000:pvr0131*:cvnMICRO-STARINT'LCO.,LTD:ct10:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
-MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
-MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnU90/U100:*");
diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
index 61e37194df70..baccdf658538 100644
--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
@@ -7,73 +7,10 @@
#include <linux/backlight.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
#include <linux/types.h>
#include <linux/wmi.h>
-
-/**
- * enum wmi_brightness_method - WMI method IDs
- * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
- * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
- */
-enum wmi_brightness_method {
- WMI_BRIGHTNESS_METHOD_LEVEL = 1,
- WMI_BRIGHTNESS_METHOD_SOURCE = 2,
- WMI_BRIGHTNESS_METHOD_MAX
-};
-
-/**
- * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
- * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
- * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
- * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
- * is only valid when the WMI method is
- * %WMI_BRIGHTNESS_METHOD_LEVEL.
- */
-enum wmi_brightness_mode {
- WMI_BRIGHTNESS_MODE_GET = 0,
- WMI_BRIGHTNESS_MODE_SET = 1,
- WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
- WMI_BRIGHTNESS_MODE_MAX
-};
-
-/**
- * enum wmi_brightness_source - Backlight brightness control source selection
- * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
- * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
- * system's Embedded Controller (EC).
- * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
- * DisplayPort AUX channel.
- */
-enum wmi_brightness_source {
- WMI_BRIGHTNESS_SOURCE_GPU = 1,
- WMI_BRIGHTNESS_SOURCE_EC = 2,
- WMI_BRIGHTNESS_SOURCE_AUX = 3,
- WMI_BRIGHTNESS_SOURCE_MAX
-};
-
-/**
- * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
- * @mode: Pass in an &enum wmi_brightness_mode value to select between
- * getting or setting a value.
- * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
- * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
- * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
- * @ret: Out parameter returning retrieved value when operating in
- * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
- * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
- * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
- *
- * This is the parameters structure for the WmiBrightnessNotify ACPI method as
- * wrapped by WMI. The value passed in to @val or returned by @ret will be a
- * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
- * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
- */
-struct wmi_brightness_args {
- u32 mode;
- u32 val;
- u32 ret;
- u32 ignored[3];
-};
+#include <acpi/video.h>
/**
* wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
@@ -151,19 +88,10 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
{
struct backlight_properties props = {};
struct backlight_device *bdev;
- u32 source;
int ret;
- ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_SOURCE,
- WMI_BRIGHTNESS_MODE_GET, &source);
- if (ret)
- return ret;
-
- /*
- * This driver is only to be used when brightness control is handled
- * by the EC; otherwise, the GPU driver(s) should control brightness.
- */
- if (source != WMI_BRIGHTNESS_SOURCE_EC)
+ /* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+ if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
return -ENODEV;
/*
@@ -191,8 +119,6 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
return PTR_ERR_OR_ZERO(bdev);
}
-#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
-
static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = {
{ .guid_string = WMI_BRIGHTNESS_GUID },
{ }
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index fb2e141f3eb8..384d0962ae93 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -42,10 +42,24 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+/* Copy resource from the first BAR of the device in question */
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- /* Copy resource from the first BAR of the device in question */
- *mem = pdev->resource[0];
+ struct resource *bar0 = &pdev->resource[0];
+
+ /* Make sure we have no dangling pointers in the output */
+ memset(mem, 0, sizeof(*mem));
+
+ /*
+ * We copy only selected fields from the original resource.
+ * Because a PCI device will be removed soon, we may not use
+ * any allocated data, hence we may not copy any pointers.
+ */
+ mem->start = bar0->start;
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+
return 0;
}
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index d9a095d2c0eb..ad3083f9946d 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -1034,7 +1034,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
/* optical drive initialization */
if (ACPI_SUCCESS(check_optd_present())) {
pcc->platform = platform_device_register_simple("panasonic",
- -1, NULL, 0);
+ PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pcc->platform)) {
result = PTR_ERR(pcc->platform);
goto out_backlight;
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 154317e9910d..93a6414c6611 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Intel Atom SOC Power Management Controller Driver
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Driver
+ * Copyright (c) 2014-2015,2017,2022 Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -60,7 +60,7 @@ static const struct pmc_clk byt_clks[] = {
.freq = 19200000,
.parent_name = "xtal",
},
- {},
+ {}
};
static const struct pmc_clk cht_clks[] = {
@@ -69,7 +69,7 @@ static const struct pmc_clk cht_clks[] = {
.freq = 19200000,
.parent_name = NULL,
},
- {},
+ {}
};
static const struct pmc_bit_map d3_sts_0_map[] = {
@@ -105,7 +105,7 @@ static const struct pmc_bit_map d3_sts_0_map[] = {
{"LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5},
{"LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6},
{"LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7},
- {},
+ {}
};
static struct pmc_bit_map byt_d3_sts_1_map[] = {
@@ -113,21 +113,21 @@ static struct pmc_bit_map byt_d3_sts_1_map[] = {
{"OTG_SS_PHY", BIT_OTG_SS_PHY},
{"USH_SS_PHY", BIT_USH_SS_PHY},
{"DFX", BIT_DFX},
- {},
+ {}
};
static struct pmc_bit_map cht_d3_sts_1_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_STS_GMM},
{"ISH", BIT_STS_ISH},
- {},
+ {}
};
static struct pmc_bit_map cht_func_dis_2_map[] = {
{"SMB", BIT_SMB},
{"GMM", BIT_FD_GMM},
{"ISH", BIT_FD_ISH},
- {},
+ {}
};
static const struct pmc_bit_map byt_pss_map[] = {
@@ -149,7 +149,7 @@ static const struct pmc_bit_map byt_pss_map[] = {
{"OTG_VCCA", PMC_PSS_BIT_OTG_VCCA},
{"USB", PMC_PSS_BIT_USB},
{"USB_SUS", PMC_PSS_BIT_USB_SUS},
- {},
+ {}
};
static const struct pmc_bit_map cht_pss_map[] = {
@@ -172,7 +172,7 @@ static const struct pmc_bit_map cht_pss_map[] = {
{"DFX_CLUSTER3", PMC_PSS_BIT_CHT_DFX_CLUSTER3},
{"DFX_CLUSTER4", PMC_PSS_BIT_CHT_DFX_CLUSTER4},
{"DFX_CLUSTER5", PMC_PSS_BIT_CHT_DFX_CLUSTER5},
- {},
+ {}
};
static const struct pmc_reg_map byt_reg_map = {
@@ -232,7 +232,7 @@ static void pmc_power_off(void)
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
@@ -354,7 +354,7 @@ static bool pmc_clk_is_critical = true;
static int dmi_callback(const struct dmi_system_id *d)
{
- pr_info("%s critclks quirk enabled\n", d->ident);
+ pr_info("%s: PMC critical clocks quirk enabled\n", d->ident);
return 1;
}
@@ -417,8 +417,7 @@ static const struct dmi_system_id critclk_systems[] = {
DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
},
},
-
- { /*sentinel*/ }
+ {}
};
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
@@ -490,15 +489,11 @@ static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
-/*
- * Data for PCI driver interface
- *
- * used by pci_match_id() call below.
- */
+/* Data for PCI driver interface used by pci_match_id() call below */
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_data },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_data },
- { 0, },
+ {}
};
static int __init pmc_atom_init(void)
@@ -506,8 +501,9 @@ static int __init pmc_atom_init(void)
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
- /* We look for our device - PCU PMC
- * we assume that there is max. one device.
+ /*
+ * We look for our device - PCU PMC.
+ * We assume that there is maximum one device.
*
* We can't use plain pci_driver mechanism,
* as the device is really a multiple function device,
@@ -519,7 +515,7 @@ static int __init pmc_atom_init(void)
if (ent)
return pmc_setup_dev(pdev, ent);
}
- /* Device not found. */
+ /* Device not found */
return -ENODEV;
}
@@ -527,6 +523,6 @@ device_initcall(pmc_atom_init);
/*
MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
+MODULE_DESCRIPTION("Intel Atom SoC Power Management Controller Interface");
MODULE_LICENSE("GPL v2");
*/
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index c187dcdf82f0..b4aa8ba35d2d 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -356,23 +356,13 @@ struct samsung_laptop {
};
struct samsung_quirks {
- bool broken_acpi_video;
bool four_kbd_backlight_levels;
bool enable_kbd_backlight;
- bool use_native_backlight;
bool lid_handling;
};
static struct samsung_quirks samsung_unknown = {};
-static struct samsung_quirks samsung_broken_acpi_video = {
- .broken_acpi_video = true,
-};
-
-static struct samsung_quirks samsung_use_native_backlight = {
- .use_native_backlight = true,
-};
-
static struct samsung_quirks samsung_np740u3e = {
.four_kbd_backlight_levels = true,
.enable_kbd_backlight = true,
@@ -1484,7 +1474,7 @@ static int __init samsung_platform_init(struct samsung_laptop *samsung)
{
struct platform_device *pdev;
- pdev = platform_device_register_simple("samsung", -1, NULL, 0);
+ pdev = platform_device_register_simple("samsung", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -1542,76 +1532,6 @@ static const struct dmi_system_id samsung_dmi_table[] __initconst = {
/* Specific DMI ids for laptop with quirks */
{
.callback = samsung_dmi_matched,
- .ident = "N150P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
- DMI_MATCH(DMI_BOARD_NAME, "N150P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N145P/N250P/N260P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N150/N210/N220",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "NF110/NF210/NF310",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "X360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "N250P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
- DMI_MATCH(DMI_BOARD_NAME, "N250P"),
- },
- .driver_data = &samsung_use_native_backlight,
- },
- {
- .callback = samsung_dmi_matched,
- .ident = "NC210",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
- },
- .driver_data = &samsung_broken_acpi_video,
- },
- {
- .callback = samsung_dmi_matched,
.ident = "730U3E/740U3E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
@@ -1654,15 +1574,8 @@ static int __init samsung_init(void)
samsung->handle_backlight = true;
samsung->quirks = quirks;
-#ifdef CONFIG_ACPI
- if (samsung->quirks->broken_acpi_video)
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
- if (samsung->quirks->use_native_backlight)
- acpi_video_set_dmi_backlight_type(acpi_backlight_native);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
samsung->handle_backlight = false;
-#endif
ret = samsung_platform_init(samsung);
if (ret)
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
index ca3647b751d5..ca76076fc706 100644
--- a/drivers/platform/x86/simatic-ipc.c
+++ b/drivers/platform/x86/simatic-ipc.c
@@ -41,10 +41,12 @@ static struct {
{SIMATIC_IPC_IPC127E, SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC227D, SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC227E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E},
+ {SIMATIC_IPC_IPC227G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
{SIMATIC_IPC_IPC277E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E},
{SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
{SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
{SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
+ {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
};
static int register_platform_devices(u32 station_id)
@@ -65,7 +67,8 @@ static int register_platform_devices(u32 station_id)
}
if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
- if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ if (ledmode == SIMATIC_IPC_DEVICE_127E ||
+ ledmode == SIMATIC_IPC_DEVICE_227G)
pdevname = KBUILD_MODNAME "_leds_gpio";
platform_data.devmode = ledmode;
ipc_led_platform_device =
@@ -80,6 +83,11 @@ static int register_platform_devices(u32 station_id)
ipc_led_platform_device->name);
}
+ if (wdtmode == SIMATIC_IPC_DEVICE_227G) {
+ request_module("w83627hf_wdt");
+ return 0;
+ }
+
if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
platform_data.devmode = wdtmode;
ipc_wdt_platform_device =
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 07ef05f727a2..765fcaba4d12 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -584,7 +584,7 @@ static int sony_pf_add(void)
if (ret)
goto out;
- sony_pf_device = platform_device_alloc("sony-laptop", -1);
+ sony_pf_device = platform_device_alloc("sony-laptop", PLATFORM_DEVID_NONE);
if (!sony_pf_device) {
ret = -ENOMEM;
goto out_platform_registered;
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 9072eb302618..ded26213c420 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -233,7 +233,7 @@ static int __init tc1100_init(void)
if (!wmi_has_guid(GUID))
return -ENODEV;
- tc1100_device = platform_device_alloc("tc1100-wmi", -1);
+ tc1100_device = platform_device_alloc("tc1100-wmi", PLATFORM_DEVID_NONE);
if (!tc1100_device)
return -ENOMEM;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 22d4e8633e30..6a823b850a77 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7623,9 +7623,9 @@ static int __init volume_create_alsa_mixer(void)
data = card->private_data;
data->card = card;
- strlcpy(card->driver, TPACPI_ALSA_DRVNAME,
+ strscpy(card->driver, TPACPI_ALSA_DRVNAME,
sizeof(card->driver));
- strlcpy(card->shortname, TPACPI_ALSA_SHRTNAME,
+ strscpy(card->shortname, TPACPI_ALSA_SHRTNAME,
sizeof(card->shortname));
snprintf(card->mixername, sizeof(card->mixername), "ThinkPad EC %s",
(thinkpad_id.ec_version_str) ?
@@ -10592,10 +10592,9 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
- /* Set AMT correctly now we know current profile */
- if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
- (dytc_capabilities & BIT(DYTC_FC_AMT)))
- dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+ /* Workaround for https://bugzilla.kernel.org/show_bug.cgi?id=216347 */
+ if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
return 0;
}
@@ -11716,7 +11715,7 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.quirks = dmi_id->driver_data;
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
@@ -11727,7 +11726,7 @@ static int __init thinkpad_acpi_module_init(void)
}
tpacpi_sensors_pdev = platform_device_register_simple(
TPACPI_HWMON_DRVR_NAME,
- -1, NULL, 0);
+ PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index f7761d98c0fd..6d18fbf8762b 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -192,7 +192,7 @@ static int topstar_platform_init(struct topstar_laptop *topstar)
{
int err;
- topstar->platform = platform_device_alloc(TOPSTAR_LAPTOP_CLASS, -1);
+ topstar->platform = platform_device_alloc(TOPSTAR_LAPTOP_CLASS, PLATFORM_DEVID_NONE);
if (!topstar->platform)
return -ENOMEM;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 0fc9e8b8827b..160abd3b3af8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -23,6 +23,7 @@
#define PROC_INTERFACE_VERSION 1
#include <linux/compiler.h>
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -38,18 +39,24 @@
#include <linux/workqueue.h>
#include <linux/i8042.h>
#include <linux/acpi.h>
-#include <linux/dmi.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/rfkill.h>
+#include <linux/hwmon.h>
#include <linux/iio/iio.h>
#include <linux/toshiba.h>
+#include <acpi/battery.h>
#include <acpi/video.h>
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
MODULE_LICENSE("GPL");
+static int turn_on_panel_on_resume = -1;
+module_param(turn_on_panel_on_resume, int, 0644);
+MODULE_PARM_DESC(turn_on_panel_on_resume,
+ "Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+
#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
/* Scan code for Fn key on TOS1900 models */
@@ -100,18 +107,21 @@ MODULE_LICENSE("GPL");
#define TOS_NOT_INSTALLED 0x8e00
/* Registers */
+#define HCI_PANEL_POWER_ON 0x0002
#define HCI_FAN 0x0004
#define HCI_TR_BACKLIGHT 0x0005
#define HCI_SYSTEM_EVENT 0x0016
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
#define HCI_LCD_BRIGHTNESS 0x002a
+#define HCI_FAN_RPM 0x0045
#define HCI_WIRELESS 0x0056
#define HCI_ACCELEROMETER 0x006d
#define HCI_COOLING_METHOD 0x007f
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
#define HCI_ACCELEROMETER2 0x00a6
+#define HCI_BATTERY_CHARGE_MODE 0x00ba
#define HCI_SYSTEM_INFO 0xc000
#define SCI_PANEL_POWER_ON 0x010d
#define SCI_ILLUMINATION 0x014e
@@ -170,6 +180,9 @@ struct toshiba_acpi_dev {
struct miscdevice miscdev;
struct rfkill *wwan_rfk;
struct iio_dev *indio_dev;
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_device;
+#endif
int force_fan;
int last_key_event;
@@ -185,6 +198,7 @@ struct toshiba_acpi_dev {
unsigned int illumination_supported:1;
unsigned int video_supported:1;
unsigned int fan_supported:1;
+ unsigned int fan_rpm_supported:1;
unsigned int system_event_supported:1;
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
@@ -201,6 +215,7 @@ struct toshiba_acpi_dev {
unsigned int usb_three_supported:1;
unsigned int wwan_supported:1;
unsigned int cooling_method_supported:1;
+ unsigned int battery_charge_mode_supported:1;
unsigned int sysfs_created:1;
unsigned int special_functions;
@@ -272,14 +287,6 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
};
/*
- * List of models which have a broken acpi-video backlight interface and thus
- * need to use the toshiba (vendor) interface instead.
- */
-static const struct dmi_system_id toshiba_vendor_backlight_dmi[] = {
- {}
-};
-
-/*
* Utility
*/
@@ -675,12 +682,15 @@ static void toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
return;
}
- if (out[0] == TOS_INPUT_DATA_ERROR) {
+ if (out[0] == TOS_INPUT_DATA_ERROR || out[0] == TOS_NOT_SUPPORTED) {
/*
* If we receive 0x8300 (Input Data Error), it means that the
* LED device is present, but that we just screwed the input
* parameters.
*
+ * On some laptops 0x8000 (Not supported) is also returned in
+ * this case, so we need to allow for that as well.
+ *
* Let's query the status of the LED to see if we really have a
* success response, indicating the actual presense of the LED,
* bail out otherwise.
@@ -1282,6 +1292,69 @@ static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
}
+/* Battery charge control */
+static void toshiba_battery_charge_mode_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->battery_charge_mode_supported = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ return;
+ }
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return;
+
+ dev->battery_charge_mode_supported = 1;
+}
+
+static int toshiba_battery_charge_mode_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_BATTERY_CHARGE_MODE, 0, 0, 0, 0x1 };
+ u32 out[TCI_WORDS];
+ int retries = 3;
+
+ do {
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI call to get Battery Charge Mode failed\n");
+ switch (out[0]) {
+ case TOS_SUCCESS:
+ case TOS_SUCCESS2:
+ *state = out[2];
+ return 0;
+ case TOS_NOT_SUPPORTED:
+ return -ENODEV;
+ case TOS_DATA_NOT_AVAILABLE:
+ retries--;
+ break;
+ default:
+ return -EIO;
+ }
+ } while (retries);
+
+ return -EIO;
+}
+
+static int toshiba_battery_charge_mode_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result = hci_write(dev, HCI_BATTERY_CHARGE_MODE, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to set Battery Charge Mode failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
/* Transflective Backlight */
static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
{
@@ -1616,6 +1689,29 @@ static const struct proc_ops fan_proc_ops = {
.proc_write = fan_proc_write,
};
+/* Fan RPM */
+static int get_fan_rpm(struct toshiba_acpi_dev *dev, u32 *rpm)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_FAN_RPM, 0, 1, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status = tci_raw(dev, in, out);
+
+ if (ACPI_FAILURE(status)) {
+ pr_err("ACPI call to get Fan speed failed\n");
+ return -EIO;
+ }
+
+ if (out[0] == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ if (out[0] == TOS_SUCCESS) {
+ *rpm = out[2];
+ return 0;
+ }
+
+ return -EIO;
+}
+
static int keys_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
@@ -2786,6 +2882,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
dev->hotkey_dev->name = "Toshiba input device";
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
+ dev->hotkey_dev->dev.parent = &dev->acpi_dev->dev;
if (dev->hotkey_event_type == HCI_SYSTEM_TYPE1 ||
!dev->kbd_function_keys_supported)
@@ -2881,14 +2978,6 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
- /*
- * Tell acpi-video-detect code to prefer vendor backlight on all
- * systems with transflective backlight and on dmi matched systems.
- */
- if (dev->tr_backlight_supported ||
- dmi_check_system(toshiba_vendor_backlight_dmi))
- acpi_video_set_dmi_backlight_type(acpi_backlight_vendor);
-
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
@@ -2916,6 +3005,139 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
return 0;
}
+/* HWMON support for fan */
+#if IS_ENABLED(CONFIG_HWMON)
+static umode_t toshiba_acpi_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return 0444;
+}
+
+static int toshiba_acpi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ /*
+ * There is only a single channel and single attribute (for the
+ * fan) at this point.
+ * This can be replaced with more advanced logic in the future,
+ * should the need arise.
+ */
+ if (type == hwmon_fan && channel == 0 && attr == hwmon_fan_input) {
+ u32 value;
+ int ret;
+
+ ret = get_fan_rpm(toshiba_acpi, &value);
+ if (ret)
+ return ret;
+
+ *val = value;
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_channel_info *toshiba_acpi_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops toshiba_acpi_hwmon_ops = {
+ .is_visible = toshiba_acpi_hwmon_is_visible,
+ .read = toshiba_acpi_hwmon_read,
+};
+
+static const struct hwmon_chip_info toshiba_acpi_hwmon_chip_info = {
+ .ops = &toshiba_acpi_hwmon_ops,
+ .info = toshiba_acpi_hwmon_info,
+};
+#endif
+
+/* ACPI battery hooking */
+static ssize_t charge_control_end_threshold_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 state;
+ int status;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ status = toshiba_battery_charge_mode_get(toshiba_acpi, &state);
+
+ if (status != 0)
+ return status;
+
+ if (state == 1)
+ return sprintf(buf, "80\n");
+ else
+ return sprintf(buf, "100\n");
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ u32 value;
+ int rval;
+
+ if (toshiba_acpi == NULL) {
+ pr_err("Toshiba ACPI object invalid\n");
+ return -ENODEV;
+ }
+
+ rval = kstrtou32(buf, 10, &value);
+ if (rval)
+ return rval;
+
+ if (value < 1 || value > 100)
+ return -EINVAL;
+ rval = toshiba_battery_charge_mode_set(toshiba_acpi,
+ (value < 90) ? 1 : 0);
+ if (rval < 0)
+ return rval;
+ else
+ return count;
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static struct attribute *toshiba_acpi_battery_attrs[] = {
+ &dev_attr_charge_control_end_threshold.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(toshiba_acpi_battery);
+
+static int toshiba_acpi_battery_add(struct power_supply *battery)
+{
+ if (toshiba_acpi == NULL) {
+ pr_err("Init order issue\n");
+ return -ENODEV;
+ }
+ if (!toshiba_acpi->battery_charge_mode_supported)
+ return -ENODEV;
+ if (device_add_groups(&battery->dev, toshiba_acpi_battery_groups))
+ return -ENODEV;
+ return 0;
+}
+
+static int toshiba_acpi_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, toshiba_acpi_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook battery_hook = {
+ .add_battery = toshiba_acpi_battery_add,
+ .remove_battery = toshiba_acpi_battery_remove,
+ .name = "Toshiba Battery Extension",
+};
+
static void print_supported_features(struct toshiba_acpi_dev *dev)
{
pr_info("Supported laptop features:");
@@ -2928,6 +3150,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
pr_cont(" video-out");
if (dev->fan_supported)
pr_cont(" fan");
+ if (dev->fan_rpm_supported)
+ pr_cont(" fan-rpm");
if (dev->tr_backlight_supported)
pr_cont(" transflective-backlight");
if (dev->illumination_supported)
@@ -2956,6 +3180,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
pr_cont(" wwan");
if (dev->cooling_method_supported)
pr_cont(" cooling-method");
+ if (dev->battery_charge_mode_supported)
+ pr_cont(" battery-charge-mode");
pr_cont("\n");
}
@@ -2968,6 +3194,11 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
remove_toshiba_proc_entries(dev);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon_device)
+ hwmon_device_unregister(dev->hwmon_device);
+#endif
+
if (dev->accelerometer_supported && dev->indio_dev) {
iio_device_unregister(dev->indio_dev);
iio_device_free(dev->indio_dev);
@@ -2996,6 +3227,9 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
rfkill_destroy(dev->wwan_rfk);
}
+ if (dev->battery_charge_mode_supported)
+ battery_hook_unregister(&battery_hook);
+
if (toshiba_acpi)
toshiba_acpi = NULL;
@@ -3015,6 +3249,43 @@ static const char *find_hci_method(acpi_handle handle)
return NULL;
}
+/*
+ * Some Toshibas have a broken acpi-video interface for brightness control,
+ * these are quirked in drivers/acpi/video_detect.c to use the GPU native
+ * (/sys/class/backlight/intel_backlight) instead.
+ * But these need a HCI_SET call to actually turn the panel back on at resume,
+ * without this call the screen stays black at resume.
+ * Either HCI_LCD_BRIGHTNESS (used by acpi_video's _BCM) or HCI_PANEL_POWER_ON
+ * works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
+ * the configured brightness level.
+ */
+static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+ {
+ /* Toshiba Portégé R700 */
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé R830 */
+ /* Portégé: https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
+ /* Satellite: https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
+ },
+ },
+ {
+ /* Toshiba Satellite/Portégé Z830 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
+ },
+ },
+};
+
static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
@@ -3157,12 +3428,32 @@ iio_error:
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ ret = get_fan_rpm(dev, &dummy);
+ dev->fan_rpm_supported = !ret;
+
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->fan_rpm_supported) {
+ dev->hwmon_device = hwmon_device_register_with_info(
+ &dev->acpi_dev->dev, "toshiba_acpi_sensors", NULL,
+ &toshiba_acpi_hwmon_chip_info, NULL);
+ if (IS_ERR(dev->hwmon_device)) {
+ dev->hwmon_device = NULL;
+ pr_warn("unable to register hwmon device, skipping\n");
+ }
+ }
+#endif
+
+ if (turn_on_panel_on_resume == -1)
+ turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+
toshiba_wwan_available(dev);
if (dev->wwan_supported)
toshiba_acpi_setup_wwan_rfkill(dev);
toshiba_cooling_method_available(dev);
+ toshiba_battery_charge_mode_available(dev);
+
print_supported_features(dev);
ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
@@ -3177,6 +3468,13 @@ iio_error:
toshiba_acpi = dev;
+ /*
+ * As the battery hook relies on the static variable toshiba_acpi being
+ * set, this must be done after toshiba_acpi is assigned.
+ */
+ if (dev->battery_charge_mode_supported)
+ battery_hook_register(&battery_hook);
+
return 0;
error:
@@ -3273,6 +3571,9 @@ static int toshiba_acpi_resume(struct device *device)
rfkill_set_hw_state(dev->wwan_rfk, !dev->killswitch);
}
+ if (turn_on_panel_on_resume)
+ hci_write(dev, HCI_PANEL_POWER_ON, 1);
+
return 0;
}
#endif
diff --git a/drivers/platform/x86/winmate-fm07-keys.c b/drivers/platform/x86/winmate-fm07-keys.c
index 2c90c5c7eca2..465ffad81a65 100644
--- a/drivers/platform/x86/winmate-fm07-keys.c
+++ b/drivers/platform/x86/winmate-fm07-keys.c
@@ -161,7 +161,7 @@ static int __init fm07keys_init(void)
return ret;
}
- dev = platform_device_register_simple(DRV_NAME, -1, NULL, 0);
+ dev = platform_device_register_simple(DRV_NAME, PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
pr_err("fm07keys: failed to allocate device, err = %d\n", ret);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aed293b5af81..223550a10d4d 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -95,9 +95,6 @@ module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
-static int acpi_wmi_remove(struct platform_device *device);
-static int acpi_wmi_probe(struct platform_device *device);
-
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
{"pnp0c14", 0},
@@ -105,13 +102,10 @@ static const struct acpi_device_id wmi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
-static struct platform_driver acpi_wmi_driver = {
- .driver = {
- .name = "acpi-wmi",
- .acpi_match_table = wmi_device_ids,
- },
- .probe = acpi_wmi_probe,
- .remove = acpi_wmi_remove,
+/* allow duplicate GUIDs as these device drivers use struct wmi_driver */
+static const char * const allow_duplicates[] = {
+ "05901221-D566-11D1-B2F0-00A0C9062910", /* wmi-bmof */
+ NULL
};
/*
@@ -1073,6 +1067,23 @@ static const struct device_type wmi_type_data = {
.release = wmi_dev_release,
};
+/*
+ * _WDG is a static list that is only parsed at startup,
+ * so it's safe to count entries without extra protection.
+ */
+static int guid_count(const guid_t *guid)
+{
+ struct wmi_block *wblock;
+ int count = 0;
+
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ if (guid_equal(&wblock->gblock.guid, guid))
+ count++;
+ }
+
+ return count;
+}
+
static int wmi_create_device(struct device *wmi_bus_dev,
struct wmi_block *wblock,
struct acpi_device *device)
@@ -1080,6 +1091,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
struct acpi_device_info *info;
char method[WMI_ACPI_METHOD_NAME_SIZE];
int result;
+ uint count;
if (wblock->gblock.flags & ACPI_WMI_EVENT) {
wblock->dev.dev.type = &wmi_type_event;
@@ -1134,7 +1146,11 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
- dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
+ count = guid_count(&wblock->gblock.guid);
+ if (count)
+ dev_set_name(&wblock->dev.dev, "%pUL-%d", &wblock->gblock.guid, count);
+ else
+ dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
device_initialize(&wblock->dev.dev);
@@ -1154,11 +1170,20 @@ static void wmi_free_devices(struct acpi_device *device)
}
}
-static bool guid_already_parsed(struct acpi_device *device, const guid_t *guid)
+static bool guid_already_parsed_for_legacy(struct acpi_device *device, const guid_t *guid)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
+ /* skip warning and register if we know the driver will use struct wmi_driver */
+ for (int i = 0; allow_duplicates[i] != NULL; i++) {
+ guid_t tmp;
+
+ if (guid_parse(allow_duplicates[i], &tmp))
+ continue;
+ if (guid_equal(&tmp, guid))
+ return false;
+ }
if (guid_equal(&wblock->gblock.guid, guid)) {
/*
* Because we historically didn't track the relationship
@@ -1208,13 +1233,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
- /*
- * Some WMI devices, like those for nVidia hooks, have a
- * duplicate GUID. It's not clear what we should do in this
- * case yet, so for now, we'll just ignore the duplicate
- * for device creation.
- */
- if (guid_already_parsed(device, &gblock[i].guid))
+ if (guid_already_parsed_for_legacy(device, &gblock[i].guid))
continue;
wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
@@ -1449,6 +1468,15 @@ void wmi_driver_unregister(struct wmi_driver *driver)
}
EXPORT_SYMBOL(wmi_driver_unregister);
+static struct platform_driver acpi_wmi_driver = {
+ .driver = {
+ .name = "acpi-wmi",
+ .acpi_match_table = wmi_device_ids,
+ },
+ .probe = acpi_wmi_probe,
+ .remove = acpi_wmi_remove,
+};
+
static int __init acpi_wmi_init(void)
{
int error;
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 480375977435..4acd6fa8d43b 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -663,9 +663,23 @@ static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
},
};
+static int __init chuwi_hi8_init(void)
+{
+ /*
+ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
+ * breaking the touchscreen + logging various errors when the Windows
+ * BIOS is used.
+ */
+ if (acpi_dev_present("MSSL0001", NULL, 1))
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_info = chuwi_hi8_i2c_clients,
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+ .init = chuwi_hi8_init,
};
#define CZC_EC_EXTRA_PORT 0x68
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index da78dc77aed3..4f05f610391b 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -206,7 +206,8 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
if (i >= 0) {
flags = acpi_dev_irq_flags(gpio->triggering,
gpio->polarity,
- gpio->shareable);
+ gpio->shareable,
+ gpio->wake_capable);
} else {
flags = IORESOURCE_DISABLED;
}
@@ -315,7 +316,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -339,7 +340,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable);
+ flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable, p->wake_capable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index 2ce739ff9c1a..f3302006842e 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -153,7 +153,6 @@ extern int pnpbios_dont_use_current_config;
extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node);
extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node);
-extern void pnpid32_to_pnpid(u32 id, char *str);
extern void pnpbios_print_status(const char * module, u16 status);
extern void pnpbios_calls_init(union pnp_bios_install_struct * header);
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 4a688741a88a..16bc01738be9 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -82,6 +82,7 @@ static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pms405-pon", .data = (void *)GEN1_REASON_SHIFT },
{ .compatible = "qcom,pm8998-pon", .data = (void *)GEN2_REASON_SHIFT },
+ { .compatible = "qcom,pmk8350-pon", .data = (void *)GEN2_REASON_SHIFT },
{ }
};
MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 1aa8323ad9f6..0bbfe6a7ce4d 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -619,6 +619,21 @@ config CHARGER_MT6360
Average Input Current Regulation, Battery Temperature Sensing,
Over-Temperature Protection, DPDM Detection for BC1.2.
+config CHARGER_MT6370
+ tristate "MediaTek MT6370 Charger Driver"
+ depends on MFD_MT6370
+ depends on REGULATOR
+ depends on IIO
+ select LINEAR_RANGES
+ help
+ Say Y here to enable MT6370 Charger Part.
+ The device supports High-Accuracy Voltage/Current Regulation,
+ Average Input Current Regulation, Battery Temperature Sensing,
+ Over-Temperature Protection, DPDM Detection for BC1.2.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370-charger".
+
config CHARGER_QCOM_SMBB
tristate "Qualcomm Switch-Mode Battery Charger and Boost"
depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -708,6 +723,12 @@ config CHARGER_BQ256XX
charge management and system power path management devices for single
cell Li-ion and Li-polymer batteries.
+config CHARGER_RK817
+ tristate "Rockchip RK817 PMIC Battery Charger"
+ depends on MFD_RK808
+ help
+ Say Y to include support for Rockchip RK817 Battery Charger.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB3XX Battery Charger"
depends on I2C
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 7f02f36aea55..0ee8653e882e 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -82,6 +82,7 @@ obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
obj-$(CONFIG_CHARGER_MT6360) += mt6360_charger.o
+obj-$(CONFIG_CHARGER_MT6370) += mt6370-charger.o
obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
@@ -91,6 +92,7 @@ obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o
obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_CHARGER_BQ25980) += bq25980_charger.o
obj-$(CONFIG_CHARGER_BQ256XX) += bq256xx_charger.o
+obj-$(CONFIG_CHARGER_RK817) += rk817_charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index ae4be553f424..ea4ad61d4c7e 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -252,12 +252,6 @@ static enum power_supply_property ab8500_chargalg_props[] = {
POWER_SUPPLY_PROP_HEALTH,
};
-struct ab8500_chargalg_sysfs_entry {
- struct attribute attr;
- ssize_t (*show)(struct ab8500_chargalg *di, char *buf);
- ssize_t (*store)(struct ab8500_chargalg *di, const char *buf, size_t length);
-};
-
/**
* ab8500_chargalg_safety_timer_expired() - Expiration of the safety timer
* @timer: pointer to the hrtimer structure
@@ -490,8 +484,6 @@ static int ab8500_chargalg_kick_watchdog(struct ab8500_chargalg *di)
static int ab8500_chargalg_ac_en(struct ab8500_chargalg *di, int enable,
int vset_uv, int iset_ua)
{
- static int ab8500_chargalg_ex_ac_enable_toggle;
-
if (!di->ac_chg || !di->ac_chg->ops.enable)
return -ENXIO;
diff --git a/drivers/power/supply/adp5061.c b/drivers/power/supply/adp5061.c
index 003557043ab3..fcf8ff0bc974 100644
--- a/drivers/power/supply/adp5061.c
+++ b/drivers/power/supply/adp5061.c
@@ -427,11 +427,11 @@ static int adp5061_get_chg_type(struct adp5061_state *st,
if (ret < 0)
return ret;
- chg_type = adp5061_chg_type[ADP5061_CHG_STATUS_1_CHG_STATUS(status1)];
- if (chg_type > ADP5061_CHG_FAST_CV)
+ chg_type = ADP5061_CHG_STATUS_1_CHG_STATUS(status1);
+ if (chg_type >= ARRAY_SIZE(adp5061_chg_type))
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else
- val->intval = chg_type;
+ val->intval = adp5061_chg_type[chg_type];
return ret;
}
@@ -493,6 +493,9 @@ static int adp5061_get_battery_status(struct adp5061_state *st,
case 0x4: /* VBAT_SNS > VWEAK */
val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
break;
+ default:
+ val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
+ break;
}
return ret;
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 5724001e66b9..6b99e1c675b8 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1696,7 +1696,7 @@ error_1:
/* main bq2415x remove function */
-static int bq2415x_remove(struct i2c_client *client)
+static void bq2415x_remove(struct i2c_client *client)
{
struct bq2415x_device *bq = i2c_get_clientdata(client);
@@ -1715,8 +1715,6 @@ static int bq2415x_remove(struct i2c_client *client)
dev_info(bq->dev, "driver unregistered\n");
kfree(bq->name);
-
- return 0;
}
static const struct i2c_device_id bq2415x_i2c_id_table[] = {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 27f5c7648617..2274679c5ddd 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1901,7 +1901,7 @@ out_pmrt:
return ret;
}
-static int bq24190_remove(struct i2c_client *client)
+static void bq24190_remove(struct i2c_client *client)
{
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;
@@ -1918,8 +1918,6 @@ static int bq24190_remove(struct i2c_client *client)
pm_runtime_put_sync(bdi->dev);
pm_runtime_dont_use_autosuspend(bdi->dev);
pm_runtime_disable(bdi->dev);
-
- return 0;
}
static void bq24190_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index ecba9ab86faf..a309bbedfe52 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -1077,7 +1077,7 @@ static int bq24257_probe(struct i2c_client *client,
return 0;
}
-static int bq24257_remove(struct i2c_client *client)
+static void bq24257_remove(struct i2c_client *client)
{
struct bq24257_device *bq = i2c_get_clientdata(client);
@@ -1085,8 +1085,6 @@ static int bq24257_remove(struct i2c_client *client)
cancel_delayed_work_sync(&bq->iilimit_setup_work);
bq24257_field_write(bq, F_RESET, 1); /* reset to defaults */
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 852a6fec4339..6020b58c641d 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -613,6 +613,33 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
return 0;
}
+static int bq25890_power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq25890_device *bq = power_supply_get_drvdata(psy);
+ u8 lval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ lval = bq25890_find_idx(val->intval, TBL_IINLIM);
+ return bq25890_field_write(bq, F_IINLIM, lval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bq25890_power_supply_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* On the BQ25892 try to get charger-type info from our supplier */
static void bq25890_charger_external_power_changed(struct power_supply *psy)
{
@@ -874,6 +901,8 @@ static const struct power_supply_desc bq25890_power_supply_desc = {
.properties = bq25890_power_supply_props,
.num_properties = ARRAY_SIZE(bq25890_power_supply_props),
.get_property = bq25890_power_supply_get_property,
+ .set_property = bq25890_power_supply_set_property,
+ .property_is_writeable = bq25890_power_supply_property_is_writeable,
.external_power_changed = bq25890_charger_external_power_changed,
};
@@ -946,6 +975,7 @@ static void bq25890_pump_express_work(struct work_struct *data)
return;
error_print:
+ bq25890_field_write(bq, F_PUMPX_EN, 0);
dev_err(bq->dev, "Failed to request hi-voltage charging\n");
}
@@ -1258,7 +1288,7 @@ err_unregister_usb_notifier:
return ret;
}
-static int bq25890_remove(struct i2c_client *client)
+static void bq25890_remove(struct i2c_client *client)
{
struct bq25890_device *bq = i2c_get_clientdata(client);
@@ -1269,8 +1299,6 @@ static int bq25890_remove(struct i2c_client *client)
/* reset all registers to default values */
bq25890_chip_reset(bq);
}
-
- return 0;
}
static void bq25890_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 35e6a394c0df..8bf048fbd36a 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -868,11 +868,11 @@ enum bq27xxx_dm_reg_id {
BQ27XXX_DM_TERMINATE_VOLTAGE,
};
-#define bq27000_dm_regs 0
-#define bq27010_dm_regs 0
-#define bq2750x_dm_regs 0
-#define bq2751x_dm_regs 0
-#define bq2752x_dm_regs 0
+#define bq27000_dm_regs NULL
+#define bq27010_dm_regs NULL
+#define bq2750x_dm_regs NULL
+#define bq2751x_dm_regs NULL
+#define bq2752x_dm_regs NULL
#if 0 /* not yet tested */
static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
@@ -881,24 +881,24 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 48, 2, 1000, 32767 },
};
#else
-#define bq27500_dm_regs 0
+#define bq27500_dm_regs NULL
#endif
/* todo create data memory definitions from datasheets and test on chips */
-#define bq27510g1_dm_regs 0
-#define bq27510g2_dm_regs 0
-#define bq27510g3_dm_regs 0
-#define bq27520g1_dm_regs 0
-#define bq27520g2_dm_regs 0
-#define bq27520g3_dm_regs 0
-#define bq27520g4_dm_regs 0
-#define bq27521_dm_regs 0
-#define bq27530_dm_regs 0
-#define bq27531_dm_regs 0
-#define bq27541_dm_regs 0
-#define bq27542_dm_regs 0
-#define bq27546_dm_regs 0
-#define bq27742_dm_regs 0
+#define bq27510g1_dm_regs NULL
+#define bq27510g2_dm_regs NULL
+#define bq27510g3_dm_regs NULL
+#define bq27520g1_dm_regs NULL
+#define bq27520g2_dm_regs NULL
+#define bq27520g3_dm_regs NULL
+#define bq27520g4_dm_regs NULL
+#define bq27521_dm_regs NULL
+#define bq27530_dm_regs NULL
+#define bq27531_dm_regs NULL
+#define bq27541_dm_regs NULL
+#define bq27542_dm_regs NULL
+#define bq27546_dm_regs NULL
+#define bq27742_dm_regs NULL
#if 0 /* not yet tested */
static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
@@ -907,7 +907,7 @@ static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 80, 67, 2, 2800, 3700 },
};
#else
-#define bq27545_dm_regs 0
+#define bq27545_dm_regs NULL
#endif
static struct bq27xxx_dm_reg bq27411_dm_regs[] = {
@@ -937,7 +937,7 @@ static struct bq27xxx_dm_reg bq27426_dm_regs[] = {
#if 0 /* not yet tested */
#define bq27441_dm_regs bq27421_dm_regs
#else
-#define bq27441_dm_regs 0
+#define bq27441_dm_regs NULL
#endif
#if 0 /* not yet tested */
@@ -947,13 +947,13 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 9, 2, 2500, 3700 },
};
#else
-#define bq27621_dm_regs 0
+#define bq27621_dm_regs NULL
#endif
-#define bq27z561_dm_regs 0
-#define bq28z610_dm_regs 0
-#define bq34z100_dm_regs 0
-#define bq78z100_dm_regs 0
+#define bq27z561_dm_regs NULL
+#define bq28z610_dm_regs NULL
+#define bq34z100_dm_regs NULL
+#define bq78z100_dm_regs NULL
#define BQ27XXX_O_ZERO BIT(0)
#define BQ27XXX_O_OTDC BIT(1) /* has OTC/OTD overtemperature flags */
@@ -1044,12 +1044,12 @@ struct bq27xxx_dm_buf {
.block = (di)->dm_regs[i].offset / BQ27XXX_DM_SZ, \
}
-static inline u16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf,
+static inline __be16 *bq27xxx_dm_reg_ptr(struct bq27xxx_dm_buf *buf,
struct bq27xxx_dm_reg *reg)
{
if (buf->class == reg->subclass_id &&
buf->block == reg->offset / BQ27XXX_DM_SZ)
- return (u16 *) (buf->data + reg->offset % BQ27XXX_DM_SZ);
+ return (__be16 *) (buf->data + reg->offset % BQ27XXX_DM_SZ);
return NULL;
}
@@ -1275,7 +1275,7 @@ static void bq27xxx_battery_update_dm_block(struct bq27xxx_device_info *di,
{
struct bq27xxx_dm_reg *reg = &di->dm_regs[reg_id];
const char *str = bq27xxx_dm_reg_name[reg_id];
- u16 *prev = bq27xxx_dm_reg_ptr(buf, reg);
+ __be16 *prev = bq27xxx_dm_reg_ptr(buf, reg);
if (prev == NULL) {
dev_warn(di->dev, "buffer does not match %s dm spec\n", str);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index cf38cbfe13e9..94b00bb89c17 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -205,7 +205,7 @@ err_failed:
return ret;
}
-static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
+static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
@@ -214,8 +214,6 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
mutex_lock(&battery_mutex);
idr_remove(&battery_id, di->id);
mutex_unlock(&battery_mutex);
-
- return 0;
}
static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 60e0ce105a29..be9764541d52 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -5,7 +5,7 @@
* Copyright (C) 2017 Tony Lindgren <tony@atomide.com>
*
* Rewritten for Linux power framework with some parts based on
- * on earlier driver found in the Motorola Linux kernel:
+ * earlier driver found in the Motorola Linux kernel:
*
* Copyright (C) 2009-2010 Motorola, Inc.
*/
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 728e2a6cc9c3..6d52641151d9 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/devm-helpers.h>
#define CW2015_SIZE_BATINFO 64
@@ -698,7 +699,8 @@ static int cw_bat_probe(struct i2c_client *client)
}
cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery");
- INIT_DELAYED_WORK(&cw_bat->battery_delay_work, cw_bat_work);
+ devm_delayed_work_autocancel(&client->dev,
+ &cw_bat->battery_delay_work, cw_bat_work);
queue_delayed_work(cw_bat->battery_workqueue,
&cw_bat->battery_delay_work, msecs_to_jiffies(10));
return 0;
@@ -725,15 +727,6 @@ static int __maybe_unused cw_bat_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
-static int cw_bat_remove(struct i2c_client *client)
-{
- struct cw_battery *cw_bat = i2c_get_clientdata(client);
-
- cancel_delayed_work_sync(&cw_bat->battery_delay_work);
- power_supply_put_battery_info(cw_bat->rk_bat, cw_bat->battery);
- return 0;
-}
-
static const struct i2c_device_id cw_bat_id_table[] = {
{ "cw2015", 0 },
{ }
@@ -752,7 +745,6 @@ static struct i2c_driver cw_bat_driver = {
.pm = &cw_bat_pm_ops,
},
.probe_new = cw_bat_probe,
- .remove = cw_bat_remove,
.id_table = cw_bat_id_table,
};
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index 9ae273fde7a2..d78cd05402f6 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -312,7 +312,7 @@ static void ds278x_power_supply_init(struct power_supply_desc *battery)
battery->external_power_changed = NULL;
}
-static int ds278x_battery_remove(struct i2c_client *client)
+static void ds278x_battery_remove(struct i2c_client *client)
{
struct ds278x_info *info = i2c_get_clientdata(client);
int id = info->id;
@@ -325,8 +325,6 @@ static int ds278x_battery_remove(struct i2c_client *client)
mutex_lock(&battery_lock);
idr_remove(&battery_id, id);
mutex_unlock(&battery_lock);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/supply/lp8727_charger.c b/drivers/power/supply/lp8727_charger.c
index 9ee54e397754..384a374b52c1 100644
--- a/drivers/power/supply/lp8727_charger.c
+++ b/drivers/power/supply/lp8727_charger.c
@@ -590,13 +590,12 @@ static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
return 0;
}
-static int lp8727_remove(struct i2c_client *cl)
+static void lp8727_remove(struct i2c_client *cl)
{
struct lp8727_chg *pchg = i2c_get_clientdata(cl);
lp8727_release_irq(pchg);
lp8727_unregister_psy(pchg);
- return 0;
}
static const struct of_device_id lp8727_dt_ids[] = {
diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c
index 473e53cd2801..d8d52e09da7b 100644
--- a/drivers/power/supply/max1721x_battery.c
+++ b/drivers/power/supply/max1721x_battery.c
@@ -444,5 +444,5 @@ module_w1_family(w1_max1721x_family);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex A. Mihaylov <minimumlaw@rambler.ru>");
-MODULE_DESCRIPTION("Maxim MAX17211/MAX17215 Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim MAX17211/MAX17215 Fuel Gauge IC driver");
MODULE_ALIAS("w1-family-" __stringify(W1_MAX1721X_FAMILY_ID));
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
new file mode 100644
index 000000000000..f27dae5043f5
--- /dev/null
+++ b/drivers/power/supply/mt6370-charger.c
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiaEn Wu <chiaen_wu@richtek.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/workqueue.h>
+
+#define MT6370_REG_CHG_CTRL1 0x111
+#define MT6370_REG_CHG_CTRL2 0x112
+#define MT6370_REG_CHG_CTRL3 0x113
+#define MT6370_REG_CHG_CTRL4 0x114
+#define MT6370_REG_CHG_CTRL5 0x115
+#define MT6370_REG_CHG_CTRL6 0x116
+#define MT6370_REG_CHG_CTRL7 0x117
+#define MT6370_REG_CHG_CTRL8 0x118
+#define MT6370_REG_CHG_CTRL9 0x119
+#define MT6370_REG_CHG_CTRL10 0x11A
+#define MT6370_REG_DEVICE_TYPE 0x122
+#define MT6370_REG_USB_STATUS1 0x127
+#define MT6370_REG_CHG_STAT 0x14A
+#define MT6370_REG_FLED_EN 0x17E
+#define MT6370_REG_CHG_STAT1 0X1D0
+#define MT6370_REG_OVPCTRL_STAT 0x1D8
+
+#define MT6370_VOBST_MASK GENMASK(7, 2)
+#define MT6370_OTG_PIN_EN_MASK BIT(1)
+#define MT6370_OPA_MODE_MASK BIT(0)
+#define MT6370_OTG_OC_MASK GENMASK(2, 0)
+
+#define MT6370_MIVR_IBUS_TH_100_mA 100000
+#define MT6370_ADC_CHAN_IBUS 5
+#define MT6370_ADC_CHAN_MAX 9
+
+enum mt6370_chg_reg_field {
+ /* MT6370_REG_CHG_CTRL2 */
+ F_IINLMTSEL, F_CFO_EN, F_CHG_EN,
+ /* MT6370_REG_CHG_CTRL3 */
+ F_IAICR, F_AICR_EN, F_ILIM_EN,
+ /* MT6370_REG_CHG_CTRL4 */
+ F_VOREG,
+ /* MT6370_REG_CHG_CTRL6 */
+ F_VMIVR,
+ /* MT6370_REG_CHG_CTRL7 */
+ F_ICHG,
+ /* MT6370_REG_CHG_CTRL8 */
+ F_IPREC,
+ /* MT6370_REG_CHG_CTRL9 */
+ F_IEOC,
+ /* MT6370_REG_DEVICE_TYPE */
+ F_USBCHGEN,
+ /* MT6370_REG_USB_STATUS1 */
+ F_USB_STAT, F_CHGDET,
+ /* MT6370_REG_CHG_STAT */
+ F_CHG_STAT, F_BOOST_STAT, F_VBAT_LVL,
+ /* MT6370_REG_FLED_EN */
+ F_FL_STROBE,
+ /* MT6370_REG_CHG_STAT1 */
+ F_CHG_MIVR_STAT,
+ /* MT6370_REG_OVPCTRL_STAT */
+ F_UVP_D_STAT,
+ F_MAX
+};
+
+enum mt6370_irq {
+ MT6370_IRQ_ATTACH_I = 0,
+ MT6370_IRQ_UVP_D_EVT,
+ MT6370_IRQ_MIVR,
+ MT6370_IRQ_MAX
+};
+
+struct mt6370_priv {
+ struct device *dev;
+ struct iio_channel *iio_adcs;
+ struct mutex attach_lock;
+ struct power_supply *psy;
+ struct regmap *regmap;
+ struct regmap_field *rmap_fields[F_MAX];
+ struct regulator_dev *rdev;
+ struct workqueue_struct *wq;
+ struct work_struct bc12_work;
+ struct delayed_work mivr_dwork;
+ unsigned int irq_nums[MT6370_IRQ_MAX];
+ int attach;
+ int psy_usb_type;
+ bool pwr_rdy;
+};
+
+enum mt6370_usb_status {
+ MT6370_USB_STAT_NO_VBUS = 0,
+ MT6370_USB_STAT_VBUS_FLOW_IS_UNDER_GOING,
+ MT6370_USB_STAT_SDP,
+ MT6370_USB_STAT_SDP_NSTD,
+ MT6370_USB_STAT_DCP,
+ MT6370_USB_STAT_CDP,
+ MT6370_USB_STAT_MAX
+};
+
+struct mt6370_chg_field {
+ const char *name;
+ const struct linear_range *range;
+ struct reg_field field;
+};
+
+enum {
+ MT6370_RANGE_F_IAICR = 0,
+ MT6370_RANGE_F_VOREG,
+ MT6370_RANGE_F_VMIVR,
+ MT6370_RANGE_F_ICHG,
+ MT6370_RANGE_F_IPREC,
+ MT6370_RANGE_F_IEOC,
+ MT6370_RANGE_F_MAX
+};
+
+static const struct linear_range mt6370_chg_ranges[MT6370_RANGE_F_MAX] = {
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IAICR, 100000, 0x0, 0x3F, 50000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_VOREG, 3900000, 0x0, 0x51, 10000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_VMIVR, 3900000, 0x0, 0x5F, 100000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_ICHG, 900000, 0x08, 0x31, 100000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IPREC, 100000, 0x0, 0x0F, 50000),
+ LINEAR_RANGE_IDX(MT6370_RANGE_F_IEOC, 100000, 0x0, 0x0F, 50000),
+};
+
+#define MT6370_CHG_FIELD(_fd, _reg, _lsb, _msb) \
+[_fd] = { \
+ .name = #_fd, \
+ .range = NULL, \
+ .field = REG_FIELD(_reg, _lsb, _msb), \
+}
+
+#define MT6370_CHG_FIELD_RANGE(_fd, _reg, _lsb, _msb) \
+[_fd] = { \
+ .name = #_fd, \
+ .range = &mt6370_chg_ranges[MT6370_RANGE_##_fd], \
+ .field = REG_FIELD(_reg, _lsb, _msb), \
+}
+
+static const struct mt6370_chg_field mt6370_chg_fields[F_MAX] = {
+ MT6370_CHG_FIELD(F_IINLMTSEL, MT6370_REG_CHG_CTRL2, 2, 3),
+ MT6370_CHG_FIELD(F_CFO_EN, MT6370_REG_CHG_CTRL2, 1, 1),
+ MT6370_CHG_FIELD(F_CHG_EN, MT6370_REG_CHG_CTRL2, 0, 0),
+ MT6370_CHG_FIELD_RANGE(F_IAICR, MT6370_REG_CHG_CTRL3, 2, 7),
+ MT6370_CHG_FIELD(F_AICR_EN, MT6370_REG_CHG_CTRL3, 1, 1),
+ MT6370_CHG_FIELD(F_ILIM_EN, MT6370_REG_CHG_CTRL3, 0, 0),
+ MT6370_CHG_FIELD_RANGE(F_VOREG, MT6370_REG_CHG_CTRL4, 1, 7),
+ MT6370_CHG_FIELD_RANGE(F_VMIVR, MT6370_REG_CHG_CTRL6, 1, 7),
+ MT6370_CHG_FIELD_RANGE(F_ICHG, MT6370_REG_CHG_CTRL7, 2, 7),
+ MT6370_CHG_FIELD_RANGE(F_IPREC, MT6370_REG_CHG_CTRL8, 0, 3),
+ MT6370_CHG_FIELD_RANGE(F_IEOC, MT6370_REG_CHG_CTRL9, 4, 7),
+ MT6370_CHG_FIELD(F_USBCHGEN, MT6370_REG_DEVICE_TYPE, 7, 7),
+ MT6370_CHG_FIELD(F_USB_STAT, MT6370_REG_USB_STATUS1, 4, 6),
+ MT6370_CHG_FIELD(F_CHGDET, MT6370_REG_USB_STATUS1, 3, 3),
+ MT6370_CHG_FIELD(F_CHG_STAT, MT6370_REG_CHG_STAT, 6, 7),
+ MT6370_CHG_FIELD(F_BOOST_STAT, MT6370_REG_CHG_STAT, 3, 3),
+ MT6370_CHG_FIELD(F_VBAT_LVL, MT6370_REG_CHG_STAT, 5, 5),
+ MT6370_CHG_FIELD(F_FL_STROBE, MT6370_REG_FLED_EN, 2, 2),
+ MT6370_CHG_FIELD(F_CHG_MIVR_STAT, MT6370_REG_CHG_STAT1, 6, 6),
+ MT6370_CHG_FIELD(F_UVP_D_STAT, MT6370_REG_OVPCTRL_STAT, 4, 4),
+};
+
+static inline int mt6370_chg_field_get(struct mt6370_priv *priv,
+ enum mt6370_chg_reg_field fd,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_field_read(priv->rmap_fields[fd], &reg_val);
+ if (ret)
+ return ret;
+
+ if (mt6370_chg_fields[fd].range)
+ return linear_range_get_value(mt6370_chg_fields[fd].range,
+ reg_val, val);
+
+ *val = reg_val;
+ return 0;
+}
+
+static inline int mt6370_chg_field_set(struct mt6370_priv *priv,
+ enum mt6370_chg_reg_field fd,
+ unsigned int val)
+{
+ int ret;
+ bool f;
+ const struct linear_range *r;
+
+ if (mt6370_chg_fields[fd].range) {
+ r = mt6370_chg_fields[fd].range;
+
+ if (fd == F_VMIVR) {
+ ret = linear_range_get_selector_high(r, val, &val, &f);
+ if (ret)
+ val = r->max_sel;
+ } else {
+ linear_range_get_selector_within(r, val, &val);
+ }
+ }
+
+ return regmap_field_write(priv->rmap_fields[fd], val);
+}
+
+enum {
+ MT6370_CHG_STAT_READY = 0,
+ MT6370_CHG_STAT_CHARGE_IN_PROGRESS,
+ MT6370_CHG_STAT_DONE,
+ MT6370_CHG_STAT_FAULT,
+ MT6370_CHG_STAT_MAX
+};
+
+enum {
+ MT6370_ATTACH_STAT_DETACH = 0,
+ MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12,
+ MT6370_ATTACH_STAT_ATTACH_BC12_DONE,
+ MT6370_ATTACH_STAT_ATTACH_MAX
+};
+
+static int mt6370_chg_otg_of_parse_cb(struct device_node *of,
+ const struct regulator_desc *rdesc,
+ struct regulator_config *rcfg)
+{
+ struct mt6370_priv *priv = rcfg->driver_data;
+
+ rcfg->ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(of),
+ "enable", 0, GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ rdesc->name);
+ if (IS_ERR(rcfg->ena_gpiod)) {
+ rcfg->ena_gpiod = NULL;
+ return 0;
+ }
+
+ return regmap_update_bits(priv->regmap, MT6370_REG_CHG_CTRL1,
+ MT6370_OTG_PIN_EN_MASK,
+ MT6370_OTG_PIN_EN_MASK);
+}
+
+static void mt6370_chg_bc12_work_func(struct work_struct *work)
+{
+ struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
+ bc12_work);
+ int ret;
+ bool rpt_psy = false;
+ unsigned int attach, usb_stat;
+
+ mutex_lock(&priv->attach_lock);
+ attach = priv->attach;
+
+ switch (attach) {
+ case MT6370_ATTACH_STAT_DETACH:
+ usb_stat = 0;
+ break;
+ case MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12:
+ ret = mt6370_chg_field_set(priv, F_USBCHGEN, attach);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable USB CHG EN\n");
+ goto bc12_work_func_out;
+ case MT6370_ATTACH_STAT_ATTACH_BC12_DONE:
+ ret = mt6370_chg_field_get(priv, F_USB_STAT, &usb_stat);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get USB status\n");
+ goto bc12_work_func_out;
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Invalid attach state\n");
+ goto bc12_work_func_out;
+ }
+
+ rpt_psy = true;
+
+ switch (usb_stat) {
+ case MT6370_USB_STAT_SDP:
+ case MT6370_USB_STAT_SDP_NSTD:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case MT6370_USB_STAT_DCP:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case MT6370_USB_STAT_CDP:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case MT6370_USB_STAT_NO_VBUS:
+ case MT6370_USB_STAT_VBUS_FLOW_IS_UNDER_GOING:
+ default:
+ priv->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+
+bc12_work_func_out:
+ mutex_unlock(&priv->attach_lock);
+
+ if (rpt_psy)
+ power_supply_changed(priv->psy);
+}
+
+static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
+{
+ int ret;
+ unsigned int fl_strobe;
+
+ /* check if flash led in strobe mode */
+ ret = mt6370_chg_field_get(priv, F_FL_STROBE, &fl_strobe);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get FL_STROBE_EN\n");
+ return ret;
+ }
+
+ if (fl_strobe) {
+ dev_err(priv->dev, "Flash led is still in strobe mode\n");
+ return ret;
+ }
+
+ /* cfo off */
+ ret = mt6370_chg_field_set(priv, F_CFO_EN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable CFO_EN\n");
+ return ret;
+ }
+
+ /* cfo on */
+ ret = mt6370_chg_field_set(priv, F_CFO_EN, 1);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable CFO_EN\n");
+
+ return ret;
+}
+
+static int mt6370_chg_read_adc_chan(struct mt6370_priv *priv, unsigned int chan,
+ int *val)
+{
+ int ret;
+
+ if (chan >= MT6370_ADC_CHAN_MAX)
+ return -EINVAL;
+
+ ret = iio_read_channel_processed(&priv->iio_adcs[chan], val);
+ if (ret)
+ dev_err(priv->dev, "Failed to read ADC\n");
+
+ return ret;
+}
+
+static void mt6370_chg_mivr_dwork_func(struct work_struct *work)
+{
+ struct mt6370_priv *priv = container_of(work, struct mt6370_priv,
+ mivr_dwork.work);
+ int ret;
+ unsigned int mivr_stat, ibus;
+
+ ret = mt6370_chg_field_get(priv, F_CHG_MIVR_STAT, &mivr_stat);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get mivr state\n");
+ goto mivr_handler_out;
+ }
+
+ if (!mivr_stat)
+ goto mivr_handler_out;
+
+ ret = mt6370_chg_read_adc_chan(priv, MT6370_ADC_CHAN_IBUS, &ibus);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get ibus\n");
+ goto mivr_handler_out;
+ }
+
+ if (ibus < MT6370_MIVR_IBUS_TH_100_mA) {
+ ret = mt6370_chg_toggle_cfo(priv);
+ if (ret)
+ dev_err(priv->dev, "Failed to toggle cfo\n");
+ }
+
+mivr_handler_out:
+ enable_irq(priv->irq_nums[MT6370_IRQ_MIVR]);
+ pm_relax(priv->dev);
+}
+
+static void mt6370_chg_pwr_rdy_check(struct mt6370_priv *priv)
+{
+ int ret;
+ unsigned int opposite_pwr_rdy, otg_en;
+ union power_supply_propval val;
+
+ /* Check in OTG mode or not */
+ ret = mt6370_chg_field_get(priv, F_BOOST_STAT, &otg_en);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get OTG state\n");
+ return;
+ }
+
+ if (otg_en)
+ return;
+
+ ret = mt6370_chg_field_get(priv, F_UVP_D_STAT, &opposite_pwr_rdy);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get opposite power ready state\n");
+ return;
+ }
+
+ val.intval = opposite_pwr_rdy ?
+ MT6370_ATTACH_STAT_DETACH :
+ MT6370_ATTACH_STAT_ATTACH_WAIT_FOR_BC12;
+
+ ret = power_supply_set_property(priv->psy, POWER_SUPPLY_PROP_ONLINE,
+ &val);
+ if (ret)
+ dev_err(priv->dev, "Failed to start attach/detach flow\n");
+}
+
+static int mt6370_chg_get_online(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ mutex_lock(&priv->attach_lock);
+ val->intval = !!priv->attach;
+ mutex_unlock(&priv->attach_lock);
+
+ return 0;
+}
+
+static int mt6370_chg_get_status(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ int ret;
+ unsigned int chg_stat;
+ union power_supply_propval online;
+
+ ret = power_supply_get_property(priv->psy, POWER_SUPPLY_PROP_ONLINE,
+ &online);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get online status\n");
+ return ret;
+ }
+
+ if (!online.intval) {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ return 0;
+ }
+
+ ret = mt6370_chg_field_get(priv, F_CHG_STAT, &chg_stat);
+ if (ret)
+ return ret;
+
+ switch (chg_stat) {
+ case MT6370_CHG_STAT_READY:
+ case MT6370_CHG_STAT_FAULT:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return ret;
+ case MT6370_CHG_STAT_CHARGE_IN_PROGRESS:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ return ret;
+ case MT6370_CHG_STAT_DONE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ return ret;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ return ret;
+ }
+}
+
+static int mt6370_chg_get_charge_type(struct mt6370_priv *priv,
+ union power_supply_propval *val)
+{
+ int type, ret;
+ unsigned int chg_stat, vbat_lvl;
+
+ ret = mt6370_chg_field_get(priv, F_CHG_STAT, &chg_stat);
+ if (ret)
+ return ret;
+
+ ret = mt6370_chg_field_get(priv, F_VBAT_LVL, &vbat_lvl);
+ if (ret)
+ return ret;
+
+ switch (chg_stat) {
+ case MT6370_CHG_STAT_CHARGE_IN_PROGRESS:
+ if (vbat_lvl)
+ type = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else
+ type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case MT6370_CHG_STAT_READY:
+ case MT6370_CHG_STAT_DONE:
+ case MT6370_CHG_STAT_FAULT:
+ default:
+ type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ }
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int mt6370_chg_set_online(struct mt6370_priv *priv,
+ const union power_supply_propval *val)
+{
+ bool pwr_rdy = !!val->intval;
+
+ mutex_lock(&priv->attach_lock);
+ if (pwr_rdy == !!priv->attach) {
+ dev_err(priv->dev, "pwr_rdy is same(%d)\n", pwr_rdy);
+ mutex_unlock(&priv->attach_lock);
+ return 0;
+ }
+
+ priv->attach = pwr_rdy;
+ mutex_unlock(&priv->attach_lock);
+
+ if (!queue_work(priv->wq, &priv->bc12_work))
+ dev_err(priv->dev, "bc12 work has already queued\n");
+
+ return 0;
+}
+
+static int mt6370_chg_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mt6370_priv *priv = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return mt6370_chg_get_online(priv, val);
+ case POWER_SUPPLY_PROP_STATUS:
+ return mt6370_chg_get_status(priv, val);
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ return mt6370_chg_get_charge_type(priv, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mt6370_chg_field_get(priv, F_ICHG, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = linear_range_get_max_value(&mt6370_chg_ranges[MT6370_RANGE_F_ICHG]);
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mt6370_chg_field_get(priv, F_VOREG, &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = linear_range_get_max_value(&mt6370_chg_ranges[MT6370_RANGE_F_VOREG]);
+ return 0;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mt6370_chg_field_get(priv, F_IAICR, &val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mt6370_chg_field_get(priv, F_VMIVR, &val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mt6370_chg_field_get(priv, F_IPREC, &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mt6370_chg_field_get(priv, F_IEOC, &val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = priv->psy_usb_type;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt6370_chg_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mt6370_priv *priv = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return mt6370_chg_set_online(priv, val);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mt6370_chg_field_set(priv, F_ICHG, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mt6370_chg_field_set(priv, F_VOREG, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mt6370_chg_field_set(priv, F_IAICR, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mt6370_chg_field_set(priv, F_VMIVR, val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mt6370_chg_field_set(priv, F_IPREC, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mt6370_chg_field_set(priv, F_IEOC, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mt6370_chg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static enum power_supply_property mt6370_chg_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static enum power_supply_usb_type mt6370_chg_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+};
+
+static const struct power_supply_desc mt6370_chg_psy_desc = {
+ .name = "mt6370-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = mt6370_chg_properties,
+ .num_properties = ARRAY_SIZE(mt6370_chg_properties),
+ .get_property = mt6370_chg_get_property,
+ .set_property = mt6370_chg_set_property,
+ .property_is_writeable = mt6370_chg_property_is_writeable,
+ .usb_types = mt6370_chg_usb_types,
+ .num_usb_types = ARRAY_SIZE(mt6370_chg_usb_types),
+};
+
+static const struct regulator_ops mt6370_chg_otg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+};
+
+static const u32 mt6370_chg_otg_oc_ma[] = {
+ 500000, 700000, 1100000, 1300000, 1800000, 2100000, 2400000,
+};
+
+static const struct regulator_desc mt6370_chg_otg_rdesc = {
+ .of_match = "usb-otg-vbus-regulator",
+ .of_parse_cb = mt6370_chg_otg_of_parse_cb,
+ .name = "mt6370-usb-otg-vbus",
+ .ops = &mt6370_chg_otg_ops,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .min_uV = 4425000,
+ .uV_step = 25000,
+ .n_voltages = 57,
+ .vsel_reg = MT6370_REG_CHG_CTRL5,
+ .vsel_mask = MT6370_VOBST_MASK,
+ .enable_reg = MT6370_REG_CHG_CTRL1,
+ .enable_mask = MT6370_OPA_MODE_MASK,
+ .curr_table = mt6370_chg_otg_oc_ma,
+ .n_current_limits = ARRAY_SIZE(mt6370_chg_otg_oc_ma),
+ .csel_reg = MT6370_REG_CHG_CTRL10,
+ .csel_mask = MT6370_OTG_OC_MASK,
+};
+
+static int mt6370_chg_init_rmap_fields(struct mt6370_priv *priv)
+{
+ int i;
+ const struct mt6370_chg_field *fds = mt6370_chg_fields;
+
+ for (i = 0; i < F_MAX; i++) {
+ priv->rmap_fields[i] = devm_regmap_field_alloc(priv->dev,
+ priv->regmap,
+ fds[i].field);
+ if (IS_ERR(priv->rmap_fields[i]))
+ return dev_err_probe(priv->dev,
+ PTR_ERR(priv->rmap_fields[i]),
+ "Failed to allocate regmapfield[%s]\n",
+ fds[i].name);
+ }
+
+ return 0;
+}
+
+static int mt6370_chg_init_setting(struct mt6370_priv *priv)
+{
+ int ret;
+
+ /* Disable usb_chg_en */
+ ret = mt6370_chg_field_set(priv, F_USBCHGEN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable usb_chg_en\n");
+ return ret;
+ }
+
+ /* Disable input current limit */
+ ret = mt6370_chg_field_set(priv, F_ILIM_EN, 0);
+ if (ret) {
+ dev_err(priv->dev, "Failed to disable input current limit\n");
+ return ret;
+ }
+
+ /* ICHG/IEOC Workaround, ICHG can not be set less than 900mA */
+ ret = mt6370_chg_field_set(priv, F_ICHG, 900000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set ICHG to 900mA");
+ return ret;
+ }
+
+ /* Change input current limit selection to using IAICR results */
+ ret = mt6370_chg_field_set(priv, F_IINLMTSEL, 2);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set IINLMTSEL\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#define MT6370_CHG_DT_PROP_DECL(_name, _type, _field) \
+{ \
+ .name = "mediatek,chg-" #_name, \
+ .type = MT6370_PARSE_TYPE_##_type, \
+ .fd = _field, \
+}
+
+static int mt6370_chg_init_otg_regulator(struct mt6370_priv *priv)
+{
+ struct regulator_config rcfg = {
+ .dev = priv->dev,
+ .regmap = priv->regmap,
+ .driver_data = priv,
+ };
+
+ priv->rdev = devm_regulator_register(priv->dev, &mt6370_chg_otg_rdesc,
+ &rcfg);
+
+ return PTR_ERR_OR_ZERO(priv->rdev);
+}
+
+static int mt6370_chg_init_psy(struct mt6370_priv *priv)
+{
+ struct power_supply_config cfg = {
+ .drv_data = priv,
+ .of_node = dev_of_node(priv->dev),
+ };
+
+ priv->psy = devm_power_supply_register(priv->dev, &mt6370_chg_psy_desc,
+ &cfg);
+
+ return PTR_ERR_OR_ZERO(priv->psy);
+}
+
+static void mt6370_chg_destroy_attach_lock(void *data)
+{
+ struct mutex *attach_lock = data;
+
+ mutex_destroy(attach_lock);
+}
+
+static void mt6370_chg_destroy_wq(void *data)
+{
+ struct workqueue_struct *wq = data;
+
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+}
+
+static irqreturn_t mt6370_attach_i_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+ unsigned int otg_en;
+ int ret;
+
+ /* Check in OTG mode or not */
+ ret = mt6370_chg_field_get(priv, F_BOOST_STAT, &otg_en);
+ if (ret) {
+ dev_err(priv->dev, "Failed to get OTG state\n");
+ return IRQ_NONE;
+ }
+
+ if (otg_en)
+ return IRQ_HANDLED;
+
+ mutex_lock(&priv->attach_lock);
+ priv->attach = MT6370_ATTACH_STAT_ATTACH_BC12_DONE;
+ mutex_unlock(&priv->attach_lock);
+
+ if (!queue_work(priv->wq, &priv->bc12_work))
+ dev_err(priv->dev, "bc12 work has already queued\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6370_uvp_d_evt_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+
+ mt6370_chg_pwr_rdy_check(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mt6370_mivr_handler(int irq, void *data)
+{
+ struct mt6370_priv *priv = data;
+
+ pm_stay_awake(priv->dev);
+ disable_irq_nosync(priv->irq_nums[MT6370_IRQ_MIVR]);
+ schedule_delayed_work(&priv->mivr_dwork, msecs_to_jiffies(200));
+
+ return IRQ_HANDLED;
+}
+
+#define MT6370_CHG_IRQ(_name) \
+{ \
+ .name = #_name, \
+ .handler = mt6370_##_name##_handler, \
+}
+
+static int mt6370_chg_init_irq(struct mt6370_priv *priv)
+{
+ int i, ret;
+ const struct {
+ char *name;
+ irq_handler_t handler;
+ } mt6370_chg_irqs[] = {
+ MT6370_CHG_IRQ(attach_i),
+ MT6370_CHG_IRQ(uvp_d_evt),
+ MT6370_CHG_IRQ(mivr),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(mt6370_chg_irqs); i++) {
+ ret = platform_get_irq_byname(to_platform_device(priv->dev),
+ mt6370_chg_irqs[i].name);
+ if (ret < 0)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to get irq %s\n",
+ mt6370_chg_irqs[i].name);
+
+ priv->irq_nums[i] = ret;
+ ret = devm_request_threaded_irq(priv->dev, ret, NULL,
+ mt6370_chg_irqs[i].handler,
+ IRQF_TRIGGER_FALLING,
+ dev_name(priv->dev), priv);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to request irq %s\n",
+ mt6370_chg_irqs[i].name);
+ }
+
+ return 0;
+}
+
+static int mt6370_chg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mt6370_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ ret = mt6370_chg_init_rmap_fields(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regmap fields\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->iio_adcs = devm_iio_channel_get_all(priv->dev);
+ if (IS_ERR(priv->iio_adcs))
+ return dev_err_probe(dev, PTR_ERR(priv->iio_adcs),
+ "Failed to get iio adc\n");
+
+ ret = mt6370_chg_init_otg_regulator(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init OTG regulator\n");
+
+ ret = mt6370_chg_init_psy(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init psy\n");
+
+ mutex_init(&priv->attach_lock);
+ ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_attach_lock,
+ &priv->attach_lock);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+
+ priv->attach = MT6370_ATTACH_STAT_DETACH;
+
+ priv->wq = create_singlethread_workqueue(dev_name(priv->dev));
+ if (!priv->wq)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to create workqueue\n");
+
+ ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_wq, priv->wq);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init wq\n");
+
+ ret = devm_work_autocancel(dev, &priv->bc12_work, mt6370_chg_bc12_work_func);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init bc12 work\n");
+
+ ret = devm_delayed_work_autocancel(dev, &priv->mivr_dwork, mt6370_chg_mivr_dwork_func);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init mivr delayed work\n");
+
+ ret = mt6370_chg_init_setting(priv);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init mt6370 charger setting\n");
+
+ ret = mt6370_chg_init_irq(priv);
+ if (ret)
+ return ret;
+
+ mt6370_chg_pwr_rdy_check(priv);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_chg_of_match[] = {
+ { .compatible = "mediatek,mt6370-charger", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_chg_of_match);
+
+static struct platform_driver mt6370_chg_driver = {
+ .probe = mt6370_chg_probe,
+ .driver = {
+ .name = "mt6370-charger",
+ .of_match_table = mt6370_chg_of_match,
+ },
+};
+module_platform_driver(mt6370_chg_driver);
+
+MODULE_AUTHOR("ChiaEn Wu <chiaen_wu@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 4239591e1522..5369abaceb5c 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -442,7 +442,7 @@ static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env,
if (ret == -ENODEV || ret == -ENODATA) {
/*
* When a battery is absent, we expect -ENODEV. Don't abort;
- * send the uevent with at least the the PRESENT=0 property
+ * send the uevent with at least the PRESENT=0 property
*/
return 0;
}
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
new file mode 100644
index 000000000000..635f051b0821
--- /dev/null
+++ b/drivers/power/supply/rk817_charger.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Charger Driver for Rockchip rk817
+ *
+ * Copyright (c) 2021 Maya Matuszczyk <maccraft123mc@gmail.com>
+ *
+ * Authors: Maya Matuszczyk <maccraft123mc@gmail.com>
+ * Chris Morgan <macromorgan@hotmail.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/devm-helpers.h>
+#include <linux/mfd/rk808.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+/* Charging statuses reported by hardware register */
+enum rk817_charge_status {
+ CHRG_OFF,
+ DEAD_CHRG,
+ TRICKLE_CHRG,
+ CC_OR_CV_CHRG,
+ CHARGE_FINISH,
+ USB_OVER_VOL,
+ BAT_TMP_ERR,
+ BAT_TIM_ERR,
+};
+
+/*
+ * Max charging current read to/written from hardware register.
+ * Note how highest value corresponding to 0x7 is the lowest
+ * current, this is per the datasheet.
+ */
+enum rk817_chg_cur {
+ CHG_1A,
+ CHG_1_5A,
+ CHG_2A,
+ CHG_2_5A,
+ CHG_2_75A,
+ CHG_3A,
+ CHG_3_5A,
+ CHG_0_5A,
+};
+
+struct rk817_charger {
+ struct device *dev;
+ struct rk808 *rk808;
+
+ struct power_supply *bat_ps;
+ struct power_supply *chg_ps;
+ bool plugged_in;
+ bool battery_present;
+
+ /*
+ * voltage_k and voltage_b values are used to calibrate the ADC
+ * voltage readings. While they are documented in the BSP kernel and
+ * datasheet as voltage_k and voltage_b, there is no further
+ * information explaining them in more detail.
+ */
+
+ uint32_t voltage_k;
+ uint32_t voltage_b;
+
+ /*
+ * soc - state of charge - like the BSP this is stored as a percentage,
+ * to the thousandth. BSP has a display state of charge (dsoc) and a
+ * remaining state of charge (rsoc). This value will be used for both
+ * purposes here so we don't do any fancy math to try and "smooth" the
+ * charge and just report it as it is. Note for example an soc of 100
+ * is stored as 100000, an soc of 50 is stored as 50000, etc.
+ */
+ int soc;
+
+ /*
+ * Capacity of battery when fully charged, equal or less than design
+ * capacity depending upon wear. BSP kernel saves to nvram in mAh,
+ * so this value is in mAh not the standard uAh.
+ */
+ int fcc_mah;
+
+ /*
+ * Calibrate the SOC on a fully charged battery, this way we can use
+ * the calibrated SOC value to correct for columb counter drift.
+ */
+ bool soc_cal;
+
+ /* Implementation specific immutable properties from device tree */
+ int res_div;
+ int sleep_enter_current_ua;
+ int sleep_filter_current_ua;
+ int bat_charge_full_design_uah;
+ int bat_voltage_min_design_uv;
+ int bat_voltage_max_design_uv;
+
+ /* Values updated periodically by driver for display. */
+ int charge_now_uah;
+ int volt_avg_uv;
+ int cur_avg_ua;
+ int max_chg_cur_ua;
+ int max_chg_volt_uv;
+ int charge_status;
+ int charger_input_volt_avg_uv;
+
+ /* Work queue to periodically update values. */
+ struct delayed_work work;
+};
+
+/* ADC coefficients extracted from BSP kernel */
+#define ADC_TO_CURRENT(adc_value, res_div) \
+ (adc_value * 172 / res_div)
+
+#define CURRENT_TO_ADC(current, samp_res) \
+ (current * samp_res / 172)
+
+#define CHARGE_TO_ADC(capacity, res_div) \
+ (capacity * res_div * 3600 / 172 * 1000)
+
+#define ADC_TO_CHARGE_UAH(adc_value, res_div) \
+ (adc_value / 3600 * 172 / res_div)
+
+static u8 rk817_chg_cur_to_reg(u32 chg_cur_ma)
+{
+ if (chg_cur_ma >= 3500)
+ return CHG_3_5A;
+ else if (chg_cur_ma >= 3000)
+ return CHG_3A;
+ else if (chg_cur_ma >= 2750)
+ return CHG_2_75A;
+ else if (chg_cur_ma >= 2500)
+ return CHG_2_5A;
+ else if (chg_cur_ma >= 2000)
+ return CHG_2A;
+ else if (chg_cur_ma >= 1500)
+ return CHG_1_5A;
+ else if (chg_cur_ma >= 1000)
+ return CHG_1A;
+ else if (chg_cur_ma >= 500)
+ return CHG_0_5A;
+ else
+ return -EINVAL;
+}
+
+static int rk817_chg_cur_from_reg(u8 reg)
+{
+ switch (reg) {
+ case CHG_0_5A:
+ return 500000;
+ case CHG_1A:
+ return 1000000;
+ case CHG_1_5A:
+ return 1500000;
+ case CHG_2A:
+ return 2000000;
+ case CHG_2_5A:
+ return 2500000;
+ case CHG_2_75A:
+ return 2750000;
+ case CHG_3A:
+ return 3000000;
+ case CHG_3_5A:
+ return 3500000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void rk817_bat_calib_vol(struct rk817_charger *charger)
+{
+ uint32_t vcalib0 = 0;
+ uint32_t vcalib1 = 0;
+ u8 bulk_reg[2];
+
+ /* calibrate voltage */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_VCALIB0_H,
+ bulk_reg, 2);
+ vcalib0 = get_unaligned_be16(bulk_reg);
+
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_VCALIB1_H,
+ bulk_reg, 2);
+ vcalib1 = get_unaligned_be16(bulk_reg);
+
+ /* values were taken from BSP kernel */
+ charger->voltage_k = (4025 - 2300) * 1000 /
+ ((vcalib1 - vcalib0) ? (vcalib1 - vcalib0) : 1);
+ charger->voltage_b = 4025 - (charger->voltage_k * vcalib1) / 1000;
+}
+
+static void rk817_bat_calib_cur(struct rk817_charger *charger)
+{
+ u8 bulk_reg[2];
+
+ /* calibrate current */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_IOFFSET_H,
+ bulk_reg, 2);
+ regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_CAL_OFFSET_H,
+ bulk_reg, 2);
+}
+
+/*
+ * note that only the fcc_mah is really used by this driver, the other values
+ * are to ensure we can remain backwards compatible with the BSP kernel.
+ */
+static int rk817_record_battery_nvram_values(struct rk817_charger *charger)
+{
+ u8 bulk_reg[3];
+ int ret, rsoc;
+
+ /*
+ * write the soc value to the nvram location used by the BSP kernel
+ * for the dsoc value.
+ */
+ put_unaligned_le24(charger->soc, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_R1,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ /*
+ * write the remaining capacity in mah to the nvram location used by
+ * the BSP kernel for the rsoc value.
+ */
+ rsoc = (charger->soc * charger->fcc_mah) / 100000;
+ put_unaligned_le24(rsoc, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_DATA0,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ /* write the fcc_mah in mAh, just as the BSP kernel does. */
+ put_unaligned_le24(charger->fcc_mah, bulk_reg);
+ ret = regmap_bulk_write(charger->rk808->regmap, RK817_GAS_GAUGE_DATA3,
+ bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rk817_bat_calib_cap(struct rk817_charger *charger)
+{
+ struct rk808 *rk808 = charger->rk808;
+ int tmp, charge_now, charge_now_adc, volt_avg;
+ u8 bulk_reg[4];
+
+ /* Calibrate the soc and fcc on a fully charged battery */
+
+ if (charger->charge_status == CHARGE_FINISH && (!charger->soc_cal)) {
+ /*
+ * soc should be 100000 and columb counter should show the full
+ * charge capacity. Note that if the device is unplugged for a
+ * period of several days the columb counter will have a large
+ * margin of error, so setting it back to the full charge on
+ * a completed charge cycle should correct this (my device was
+ * showing 33% battery after 3 days unplugged when it should
+ * have been closer to 95% based on voltage and charge
+ * current).
+ */
+
+ charger->soc = 100000;
+ charge_now_adc = CHARGE_TO_ADC(charger->fcc_mah,
+ charger->res_div);
+ put_unaligned_be32(charge_now_adc, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+
+ charger->soc_cal = 1;
+ dev_dbg(charger->dev,
+ "Fully charged. SOC is %d, full capacity is %d\n",
+ charger->soc, charger->fcc_mah * 1000);
+ }
+
+ /*
+ * The columb counter can drift up slightly, so we should correct for
+ * it. But don't correct it until we're at 100% soc.
+ */
+ if (charger->charge_status == CHARGE_FINISH && charger->soc_cal) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ charge_now_adc = get_unaligned_be32(bulk_reg);
+ if (charge_now_adc < 0)
+ return charge_now_adc;
+ charge_now = ADC_TO_CHARGE_UAH(charge_now_adc,
+ charger->res_div);
+
+ /*
+ * Re-init columb counter with updated values to correct drift.
+ */
+ if (charge_now / 1000 > charger->fcc_mah) {
+ dev_dbg(charger->dev,
+ "Recalibrating columb counter to %d uah\n",
+ charge_now);
+ /*
+ * Order of operations matters here to ensure we keep
+ * enough precision until the last step to keep from
+ * making needless updates to columb counter.
+ */
+ charge_now_adc = CHARGE_TO_ADC(charger->fcc_mah,
+ charger->res_div);
+ put_unaligned_be32(charge_now_adc, bulk_reg);
+ regmap_bulk_write(rk808->regmap,
+ RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+ }
+ }
+
+ /*
+ * Calibrate the fully charged capacity when we previously had a full
+ * battery (soc_cal = 1) and are now empty (at or below minimum design
+ * voltage). If our columb counter is still positive, subtract that
+ * from our fcc value to get a calibrated fcc, and if our columb
+ * counter is negative add that to our fcc (but not to exceed our
+ * design capacity).
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ volt_avg = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+ if (volt_avg <= charger->bat_voltage_min_design_uv &&
+ charger->soc_cal) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ charge_now_adc = get_unaligned_be32(bulk_reg);
+ charge_now = ADC_TO_CHARGE_UAH(charge_now_adc,
+ charger->res_div);
+ /*
+ * Note, if charge_now is negative this will add it (what we
+ * want) and if it's positive this will subtract (also what
+ * we want).
+ */
+ charger->fcc_mah = charger->fcc_mah - (charge_now / 1000);
+
+ dev_dbg(charger->dev,
+ "Recalibrating full charge capacity to %d uah\n",
+ charger->fcc_mah * 1000);
+ }
+
+ rk817_record_battery_nvram_values(charger);
+
+ return 0;
+}
+
+static void rk817_read_props(struct rk817_charger *charger)
+{
+ int tmp, reg;
+ u8 bulk_reg[4];
+
+ /*
+ * Recalibrate voltage and current readings if we need to BSP does both
+ * on CUR_CALIB_UPD, ignoring VOL_CALIB_UPD. Curiously enough, both
+ * documentation and the BSP show that you perform an update if bit 7
+ * is 1, but you clear the status by writing a 1 to bit 7.
+ */
+ regmap_read(charger->rk808->regmap, RK817_GAS_GAUGE_ADC_CONFIG1, &reg);
+ if (reg & RK817_VOL_CUR_CALIB_UPD) {
+ rk817_bat_calib_cur(charger);
+ rk817_bat_calib_vol(charger);
+ regmap_write_bits(charger->rk808->regmap,
+ RK817_GAS_GAUGE_ADC_CONFIG1,
+ RK817_VOL_CUR_CALIB_UPD,
+ RK817_VOL_CUR_CALIB_UPD);
+ }
+
+ /* Update reported charge. */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ charger->charge_now_uah = ADC_TO_CHARGE_UAH(tmp, charger->res_div);
+ if (charger->charge_now_uah < 0)
+ charger->charge_now_uah = 0;
+ if (charger->charge_now_uah > charger->fcc_mah * 1000)
+ charger->charge_now_uah = charger->fcc_mah * 1000;
+
+ /* Update soc based on reported charge. */
+ charger->soc = charger->charge_now_uah * 100 / charger->fcc_mah;
+
+ /* Update reported voltage. */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ charger->volt_avg_uv = (charger->voltage_k * tmp) + 1000 *
+ charger->voltage_b;
+
+ /*
+ * Update reported current. Note value from registers is a signed 16
+ * bit int.
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_BAT_CUR_H,
+ bulk_reg, 2);
+ tmp = (short int)get_unaligned_be16(bulk_reg);
+ charger->cur_avg_ua = ADC_TO_CURRENT(tmp, charger->res_div);
+
+ /*
+ * Update the max charge current. This value shouldn't change, but we
+ * can read it to report what the PMIC says it is instead of simply
+ * returning the default value.
+ */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_OUT, &reg);
+ charger->max_chg_cur_ua =
+ rk817_chg_cur_from_reg(reg & RK817_CHRG_CUR_SEL);
+
+ /*
+ * Update max charge voltage. Like the max charge current this value
+ * shouldn't change, but we can report what the PMIC says.
+ */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_OUT, &reg);
+ charger->max_chg_volt_uv = ((((reg & RK817_CHRG_VOL_SEL) >> 4) *
+ 50000) + 4100000);
+
+ /* Check if battery still present. */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_STS, &reg);
+ charger->battery_present = (reg & RK817_BAT_EXS);
+
+ /* Get which type of charge we are using (if any). */
+ regmap_read(charger->rk808->regmap, RK817_PMIC_CHRG_STS, &reg);
+ charger->charge_status = (reg >> 4) & 0x07;
+
+ /*
+ * Get charger input voltage. Note that on my example hardware (an
+ * Odroid Go Advance) the voltage of the power connector is measured
+ * on the register labelled USB in the datasheet; I don't know if this
+ * is how it is designed or just a quirk of the implementation. I
+ * believe this will also measure the voltage of the USB output when in
+ * OTG mode, if that is the case we may need to change this in the
+ * future to return 0 if the power supply status is offline (I can't
+ * test this with my current implementation. Also, when the voltage
+ * should be zero sometimes the ADC still shows a single bit (which
+ * would register as 20000uv). When this happens set it to 0.
+ */
+ regmap_bulk_read(charger->rk808->regmap, RK817_GAS_GAUGE_USB_VOL_H,
+ bulk_reg, 2);
+ reg = get_unaligned_be16(bulk_reg);
+ if (reg > 1) {
+ tmp = ((charger->voltage_k * reg / 1000 + charger->voltage_b) *
+ 60 / 46);
+ charger->charger_input_volt_avg_uv = tmp * 1000;
+ } else {
+ charger->charger_input_volt_avg_uv = 0;
+ }
+
+ /* Calibrate battery capacity and soc. */
+ rk817_bat_calib_cap(charger);
+}
+
+static int rk817_bat_get_prop(struct power_supply *ps,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct rk817_charger *charger = power_supply_get_drvdata(ps);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = charger->battery_present;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (charger->cur_avg_ua < 0) {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ }
+ switch (charger->charge_status) {
+ case CHRG_OFF:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ /*
+ * Dead charge is documented, but not explained. I never
+ * observed it but assume it's a pre-charge for a dead
+ * battery.
+ */
+ case DEAD_CHRG:
+ case TRICKLE_CHRG:
+ case CC_OR_CV_CHRG:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case CHARGE_FINISH:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ return -EINVAL;
+
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ switch (charger->charge_status) {
+ case CHRG_OFF:
+ case CHARGE_FINISH:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case TRICKLE_CHRG:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case DEAD_CHRG:
+ case CC_OR_CV_CHRG:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ break;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = charger->fcc_mah * 1000;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = charger->bat_charge_full_design_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = charger->charge_now_uah;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = charger->bat_voltage_min_design_uv;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ /* Add 500 so that values like 99999 are 100% not 99%. */
+ val->intval = (charger->soc + 500) / 1000;
+ if (val->intval > 100)
+ val->intval = 100;
+ if (val->intval < 0)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = charger->volt_avg_uv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ val->intval = charger->cur_avg_ua;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = charger->max_chg_cur_ua;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = charger->max_chg_volt_uv;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = charger->bat_voltage_max_design_uv;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int rk817_chg_get_prop(struct power_supply *ps,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct rk817_charger *charger = power_supply_get_drvdata(ps);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = charger->plugged_in;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ /* max voltage from datasheet at 5.5v (default 5.0v) */
+ val->intval = 5500000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ /* min voltage from datasheet at 3.8v (default 5.0v) */
+ val->intval = 3800000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = charger->charger_input_volt_avg_uv;
+ break;
+ /*
+ * While it's possible that other implementations could use different
+ * USB types, the current implementation for this PMIC (the Odroid Go
+ * Advance) only uses a dedicated charging port with no rx/tx lines.
+ */
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+static irqreturn_t rk817_plug_in_isr(int irq, void *cg)
+{
+ struct rk817_charger *charger;
+
+ charger = (struct rk817_charger *)cg;
+ charger->plugged_in = 1;
+ power_supply_changed(charger->chg_ps);
+ power_supply_changed(charger->bat_ps);
+ /* try to recalibrate capacity if we hit full charge. */
+ charger->soc_cal = 0;
+
+ rk817_read_props(charger);
+
+ dev_dbg(charger->dev, "Power Cord Inserted\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rk817_plug_out_isr(int irq, void *cg)
+{
+ struct rk817_charger *charger;
+ struct rk808 *rk808;
+
+ charger = (struct rk817_charger *)cg;
+ rk808 = charger->rk808;
+ charger->plugged_in = 0;
+ power_supply_changed(charger->bat_ps);
+ power_supply_changed(charger->chg_ps);
+
+ /*
+ * For some reason the bits of RK817_PMIC_CHRG_IN reset whenever the
+ * power cord is unplugged. This was not documented in the BSP kernel
+ * or the datasheet and only discovered by trial and error. Set minimum
+ * USB input voltage to 4.5v and enable USB voltage input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_VLIM_SEL, (0x05 << 4));
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_VLIM_EN,
+ (0x01 << 7));
+
+ /*
+ * Set average USB input current limit to 1.5A and enable USB current
+ * input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_ILIM_SEL, 0x03);
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_ILIM_EN,
+ (0x01 << 3));
+
+ rk817_read_props(charger);
+
+ dev_dbg(charger->dev, "Power Cord Removed\n");
+
+ return IRQ_HANDLED;
+}
+
+static enum power_supply_property rk817_bat_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static enum power_supply_property rk817_chg_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+};
+
+static enum power_supply_usb_type rk817_usb_type[] = {
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+};
+
+static const struct power_supply_desc rk817_bat_desc = {
+ .name = "rk817-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = rk817_bat_props,
+ .num_properties = ARRAY_SIZE(rk817_bat_props),
+ .get_property = rk817_bat_get_prop,
+};
+
+static const struct power_supply_desc rk817_chg_desc = {
+ .name = "rk817-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = rk817_usb_type,
+ .num_usb_types = ARRAY_SIZE(rk817_usb_type),
+ .properties = rk817_chg_props,
+ .num_properties = ARRAY_SIZE(rk817_chg_props),
+ .get_property = rk817_chg_get_prop,
+};
+
+static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+{
+ u8 bulk_reg[3];
+ int ret;
+
+ /* Read the nvram data for full charge capacity. */
+ ret = regmap_bulk_read(charger->rk808->regmap,
+ RK817_GAS_GAUGE_DATA3, bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ charger->fcc_mah = get_unaligned_le24(bulk_reg);
+
+ /*
+ * Sanity checking for values equal to zero or less than would be
+ * practical for this device (BSP Kernel assumes 500mAH or less) for
+ * practicality purposes. Also check if the value is too large and
+ * correct it.
+ */
+ if ((charger->fcc_mah < 500) ||
+ ((charger->fcc_mah * 1000) > charger->bat_charge_full_design_uah)) {
+ dev_info(charger->dev,
+ "Invalid NVRAM max charge, setting to %u uAH\n",
+ charger->bat_charge_full_design_uah);
+ charger->fcc_mah = charger->bat_charge_full_design_uah / 1000;
+ }
+
+ /*
+ * Read the nvram for state of charge. Sanity check for values greater
+ * than 100 (10000). If the value is off it should get corrected
+ * automatically when the voltage drops to the min (soc is 0) or when
+ * the battery is full (soc is 100).
+ */
+ ret = regmap_bulk_read(charger->rk808->regmap,
+ RK817_GAS_GAUGE_BAT_R1, bulk_reg, 3);
+ if (ret < 0)
+ return ret;
+ charger->soc = get_unaligned_le24(bulk_reg);
+ if (charger->soc > 10000)
+ charger->soc = 10000;
+
+ return 0;
+}
+
+static int
+rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ struct power_supply_battery_info *bat_info)
+{
+ struct rk808 *rk808 = charger->rk808;
+ u8 bulk_reg[4];
+ u32 boot_voltage, boot_charge_mah, tmp;
+ int ret, reg, off_time;
+ bool first_boot;
+
+ /*
+ * Check if the battery is uninitalized. If it is, the columb counter
+ * needs to be set up.
+ */
+ ret = regmap_read(rk808->regmap, RK817_GAS_GAUGE_GG_STS, &reg);
+ if (ret < 0)
+ return ret;
+ first_boot = reg & RK817_BAT_CON;
+ /*
+ * If the battery is uninitialized, use the poweron voltage and an ocv
+ * lookup to guess our charge. The number won't be very accurate until
+ * we hit either our minimum voltage (0%) or full charge (100%).
+ */
+ if (first_boot) {
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) +
+ 1000 * charger->voltage_b;
+ /*
+ * Since only implementation has no working thermistor, assume
+ * 20C for OCV lookup. If lookup fails, report error with OCV
+ * table.
+ */
+ charger->soc = power_supply_batinfo_ocv2cap(bat_info,
+ boot_voltage,
+ 20) * 1000;
+ if (charger->soc < 0)
+ charger->soc = 0;
+
+ /* Guess that full charge capacity is the design capacity */
+ charger->fcc_mah = charger->bat_charge_full_design_uah / 1000;
+ /*
+ * Set battery as "set up". BSP driver uses this value even
+ * though datasheet claims it's a read-only value.
+ */
+ regmap_write_bits(rk808->regmap, RK817_GAS_GAUGE_GG_STS,
+ RK817_BAT_CON, 0);
+ /* Save nvram values */
+ ret = rk817_record_battery_nvram_values(charger);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = rk817_read_battery_nvram_values(charger);
+ if (ret < 0)
+ return ret;
+
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ if (tmp < 0)
+ tmp = 0;
+ boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ charger->res_div) / 1000;
+ /*
+ * Check if the columb counter has been off for more than 300
+ * minutes as it tends to drift downward. If so, re-init soc
+ * with the boot voltage instead. Note the unit values for the
+ * OFF_CNT register appear to be in decaminutes and stops
+ * counting at 2550 (0xFF) minutes. BSP kernel used OCV, but
+ * for me occasionally that would show invalid values. Boot
+ * voltage is only accurate for me on first poweron (not
+ * reboots), but we shouldn't ever encounter an OFF_CNT more
+ * than 0 on a reboot anyway.
+ */
+ regmap_read(rk808->regmap, RK817_GAS_GAUGE_OFF_CNT, &off_time);
+ if (off_time >= 30) {
+ regmap_bulk_read(rk808->regmap,
+ RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) +
+ 1000 * charger->voltage_b;
+ charger->soc =
+ power_supply_batinfo_ocv2cap(bat_info,
+ boot_voltage,
+ 20) * 1000;
+ } else {
+ charger->soc = (boot_charge_mah * 1000 * 100 /
+ charger->fcc_mah);
+ }
+ }
+
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_PWRON_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ bulk_reg, 4);
+ tmp = get_unaligned_be32(bulk_reg);
+ if (tmp < 0)
+ tmp = 0;
+ boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
+ regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
+ bulk_reg, 2);
+ tmp = get_unaligned_be16(bulk_reg);
+ boot_voltage = (charger->voltage_k * tmp) + 1000 * charger->voltage_b;
+
+ /*
+ * Now we have our full charge capacity and soc, init the columb
+ * counter.
+ */
+ boot_charge_mah = charger->soc * charger->fcc_mah / 100 / 1000;
+ if (boot_charge_mah > charger->fcc_mah)
+ boot_charge_mah = charger->fcc_mah;
+ tmp = CHARGE_TO_ADC(boot_charge_mah, charger->res_div);
+ put_unaligned_be32(tmp, bulk_reg);
+ ret = regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_INIT_H3,
+ bulk_reg, 4);
+ if (ret < 0)
+ return ret;
+
+ /* Set QMAX value to max design capacity. */
+ tmp = CHARGE_TO_ADC((charger->bat_charge_full_design_uah / 1000),
+ charger->res_div);
+ put_unaligned_be32(tmp, bulk_reg);
+ ret = regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_Q_MAX_H3,
+ bulk_reg, 4);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int rk817_battery_init(struct rk817_charger *charger,
+ struct power_supply_battery_info *bat_info)
+{
+ struct rk808 *rk808 = charger->rk808;
+ u32 tmp, max_chg_vol_mv, max_chg_cur_ma;
+ u8 max_chg_vol_reg, chg_term_i_reg, max_chg_cur_reg;
+ int ret, chg_term_ma;
+ u8 bulk_reg[2];
+
+ /* Get initial plug state */
+ regmap_read(rk808->regmap, RK817_SYS_STS, &tmp);
+ charger->plugged_in = (tmp & RK817_PLUG_IN_STS);
+
+ /*
+ * Turn on all ADC functions to measure battery, USB, and sys voltage,
+ * as well as batt temp. Note only tested implementation so far does
+ * not use a battery with a thermistor.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_ADC_CONFIG0, 0xfc);
+
+ /*
+ * Set relax mode voltage sampling interval and ADC offset calibration
+ * interval to 8 minutes to mirror BSP kernel. Set voltage and current
+ * modes to average to mirror BSP kernel.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_GG_CON, 0x04);
+
+ /* Calibrate voltage like the BSP does here. */
+ rk817_bat_calib_vol(charger);
+
+ /* Write relax threshold, derived from sleep enter current. */
+ tmp = CURRENT_TO_ADC(charger->sleep_enter_current_ua,
+ charger->res_div);
+ put_unaligned_be16(tmp, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_RELAX_THRE_H,
+ bulk_reg, 2);
+
+ /* Write sleep sample current, derived from sleep filter current. */
+ tmp = CURRENT_TO_ADC(charger->sleep_filter_current_ua,
+ charger->res_div);
+ put_unaligned_be16(tmp, bulk_reg);
+ regmap_bulk_write(rk808->regmap, RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H,
+ bulk_reg, 2);
+
+ /* Restart battery relax voltage */
+ regmap_write_bits(rk808->regmap, RK817_GAS_GAUGE_GG_STS,
+ RK817_RELAX_VOL_UPD, (0x0 << 2));
+
+ /*
+ * Set OCV Threshold Voltage to 127.5mV. This was hard coded like this
+ * in the BSP.
+ */
+ regmap_write(rk808->regmap, RK817_GAS_GAUGE_OCV_THRE_VOL, 0xff);
+
+ /*
+ * Set maximum charging voltage to battery max voltage. Trying to be
+ * incredibly safe with these value, as setting them wrong could
+ * overcharge the battery, which would be very bad.
+ */
+ max_chg_vol_mv = bat_info->constant_charge_voltage_max_uv / 1000;
+ max_chg_cur_ma = bat_info->constant_charge_current_max_ua / 1000;
+
+ if (max_chg_vol_mv < 4100) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger voltage, value %u unsupported\n",
+ max_chg_vol_mv * 1000);
+ }
+ if (max_chg_vol_mv > 4450) {
+ dev_info(charger->dev,
+ "Setting max charge voltage to 4450000uv\n");
+ max_chg_vol_mv = 4450;
+ }
+
+ if (max_chg_cur_ma < 500) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger current, value %u unsupported\n",
+ max_chg_cur_ma * 1000);
+ }
+ if (max_chg_cur_ma > 3500)
+ dev_info(charger->dev,
+ "Setting max charge current to 3500000ua\n");
+
+ /*
+ * Now that the values are sanity checked, if we subtract 4100 from the
+ * max voltage and divide by 50, we conviently get the exact value for
+ * the registers, which are 4.1v, 4.15v, 4.2v, 4.25v, 4.3v, 4.35v,
+ * 4.4v, and 4.45v; these correspond to values 0x00 through 0x07.
+ */
+ max_chg_vol_reg = (max_chg_vol_mv - 4100) / 50;
+
+ max_chg_cur_reg = rk817_chg_cur_to_reg(max_chg_cur_ma);
+
+ if (max_chg_vol_reg < 0 || max_chg_vol_reg > 7) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger voltage, value %u unsupported\n",
+ max_chg_vol_mv * 1000);
+ }
+ if (max_chg_cur_reg < 0 || max_chg_cur_reg > 7) {
+ return dev_err_probe(charger->dev, -EINVAL,
+ "invalid max charger current, value %u unsupported\n",
+ max_chg_cur_ma * 1000);
+ }
+
+ /*
+ * Write the values to the registers, and deliver an emergency warning
+ * in the event they are not written correctly.
+ */
+ ret = regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_OUT,
+ RK817_CHRG_VOL_SEL, (max_chg_vol_reg << 4));
+ if (ret) {
+ dev_emerg(charger->dev,
+ "Danger, unable to set max charger voltage: %u\n",
+ ret);
+ }
+
+ ret = regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_OUT,
+ RK817_CHRG_CUR_SEL, max_chg_cur_reg);
+ if (ret) {
+ dev_emerg(charger->dev,
+ "Danger, unable to set max charger current: %u\n",
+ ret);
+ }
+
+ /* Set charge finishing mode to analog */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_TERM,
+ RK817_CHRG_TERM_ANA_DIG, (0x0 << 2));
+
+ /*
+ * Set charge finish current, warn if value not in range and keep
+ * default.
+ */
+ chg_term_ma = bat_info->charge_term_current_ua / 1000;
+ if (chg_term_ma < 150 || chg_term_ma > 400) {
+ dev_warn(charger->dev,
+ "Invalid charge termination %u, keeping default\n",
+ chg_term_ma * 1000);
+ chg_term_ma = 200;
+ }
+
+ /*
+ * Values of 150ma, 200ma, 300ma, and 400ma correspond to 00, 01, 10,
+ * and 11.
+ */
+ chg_term_i_reg = (chg_term_ma - 100) / 100;
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_TERM,
+ RK817_CHRG_TERM_ANA_SEL, chg_term_i_reg);
+
+ ret = rk817_read_or_set_full_charge_on_boot(charger, bat_info);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set minimum USB input voltage to 4.5v and enable USB voltage input
+ * limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_VLIM_SEL, (0x05 << 4));
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_VLIM_EN,
+ (0x01 << 7));
+
+ /*
+ * Set average USB input current limit to 1.5A and enable USB current
+ * input limit.
+ */
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN,
+ RK817_USB_ILIM_SEL, 0x03);
+ regmap_write_bits(rk808->regmap, RK817_PMIC_CHRG_IN, RK817_USB_ILIM_EN,
+ (0x01 << 3));
+
+ return 0;
+}
+
+static void rk817_charging_monitor(struct work_struct *work)
+{
+ struct rk817_charger *charger;
+
+ charger = container_of(work, struct rk817_charger, work.work);
+
+ rk817_read_props(charger);
+
+ /* Run every 8 seconds like the BSP driver did. */
+ queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+}
+
+static int rk817_charger_probe(struct platform_device *pdev)
+{
+ struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
+ struct rk817_charger *charger;
+ struct device_node *node;
+ struct power_supply_battery_info *bat_info;
+ struct device *dev = &pdev->dev;
+ struct power_supply_config pscfg = {};
+ int plugin_irq, plugout_irq;
+ int of_value;
+ int ret;
+
+ node = of_get_child_by_name(dev->parent->of_node, "charger");
+ if (!node)
+ return -ENODEV;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->rk808 = rk808;
+
+ charger->dev = &pdev->dev;
+ platform_set_drvdata(pdev, charger);
+
+ rk817_bat_calib_vol(charger);
+
+ pscfg.drv_data = charger;
+ pscfg.of_node = node;
+
+ /*
+ * Get sample resistor value. Note only values of 10000 or 20000
+ * microohms are allowed. Schematic for my test implementation (an
+ * Odroid Go Advance) shows a 10 milliohm resistor for reference.
+ */
+ ret = of_property_read_u32(node, "rockchip,resistor-sense-micro-ohms",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sample resistor value\n");
+ }
+ /*
+ * Store as a 1 or a 2, since all we really use the value for is as a
+ * divisor in some calculations.
+ */
+ charger->res_div = (of_value == 20000) ? 2 : 1;
+
+ /*
+ * Get sleep enter current value. Not sure what this value is for
+ * other than to help calibrate the relax threshold.
+ */
+ ret = of_property_read_u32(node,
+ "rockchip,sleep-enter-current-microamp",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sleep enter cur value\n");
+ }
+ charger->sleep_enter_current_ua = of_value;
+
+ /* Get sleep filter current value */
+ ret = of_property_read_u32(node,
+ "rockchip,sleep-filter-current-microamp",
+ &of_value);
+ if (ret < 0) {
+ return dev_err_probe(dev, ret,
+ "Error reading sleep filter cur value\n");
+ }
+
+ charger->sleep_filter_current_ua = of_value;
+
+ charger->bat_ps = devm_power_supply_register(&pdev->dev,
+ &rk817_bat_desc, &pscfg);
+
+ charger->chg_ps = devm_power_supply_register(&pdev->dev,
+ &rk817_chg_desc, &pscfg);
+
+ if (IS_ERR(charger->chg_ps))
+ return dev_err_probe(dev, -EINVAL,
+ "Battery failed to probe\n");
+
+ if (IS_ERR(charger->chg_ps))
+ return dev_err_probe(dev, -EINVAL,
+ "Charger failed to probe\n");
+
+ ret = power_supply_get_battery_info(charger->bat_ps,
+ &bat_info);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "Unable to get battery info: %d\n", ret);
+ }
+
+ if ((bat_info->charge_full_design_uah <= 0) ||
+ (bat_info->voltage_min_design_uv <= 0) ||
+ (bat_info->voltage_max_design_uv <= 0) ||
+ (bat_info->constant_charge_voltage_max_uv <= 0) ||
+ (bat_info->constant_charge_current_max_ua <= 0) ||
+ (bat_info->charge_term_current_ua <= 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "Required bat info missing or invalid\n");
+ }
+
+ charger->bat_charge_full_design_uah = bat_info->charge_full_design_uah;
+ charger->bat_voltage_min_design_uv = bat_info->voltage_min_design_uv;
+ charger->bat_voltage_max_design_uv = bat_info->voltage_max_design_uv;
+
+ /*
+ * Has to run after power_supply_get_battery_info as it depends on some
+ * values discovered from that routine.
+ */
+ ret = rk817_battery_init(charger, bat_info);
+ if (ret)
+ return ret;
+
+ power_supply_put_battery_info(charger->bat_ps, bat_info);
+
+ plugin_irq = platform_get_irq(pdev, 0);
+ if (plugin_irq < 0)
+ return plugin_irq;
+
+ plugout_irq = platform_get_irq(pdev, 1);
+ if (plugout_irq < 0)
+ return plugout_irq;
+
+ ret = devm_request_threaded_irq(charger->dev, plugin_irq, NULL,
+ rk817_plug_in_isr,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "rk817_plug_in", charger);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "plug_in_irq request failed!\n");
+ }
+
+ ret = devm_request_threaded_irq(charger->dev, plugout_irq, NULL,
+ rk817_plug_out_isr,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "rk817_plug_out", charger);
+ if (ret) {
+ return dev_err_probe(&pdev->dev, ret,
+ "plug_out_irq request failed!\n");
+ }
+
+ ret = devm_delayed_work_autocancel(&pdev->dev, &charger->work,
+ rk817_charging_monitor);
+ if (ret)
+ return ret;
+
+ /* Force the first update immediately. */
+ mod_delayed_work(system_wq, &charger->work, 0);
+
+ return 0;
+}
+
+
+static struct platform_driver rk817_charger_driver = {
+ .probe = rk817_charger_probe,
+ .driver = {
+ .name = "rk817-charger",
+ },
+};
+module_platform_driver(rk817_charger_driver);
+
+MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
+MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c
index 7a23c70f4879..736dec608ff6 100644
--- a/drivers/power/supply/rt5033_battery.c
+++ b/drivers/power/supply/rt5033_battery.c
@@ -149,13 +149,11 @@ static int rt5033_battery_probe(struct i2c_client *client,
return 0;
}
-static int rt5033_battery_remove(struct i2c_client *client)
+static void rt5033_battery_remove(struct i2c_client *client)
{
struct rt5033_battery *battery = i2c_get_clientdata(client);
power_supply_unregister(battery->psy);
-
- return 0;
}
static const struct i2c_device_id rt5033_battery_id[] = {
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index 74ee54320e6a..72962286d704 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -1698,7 +1698,7 @@ put_usb_notifier:
return ret;
}
-static int rt9455_remove(struct i2c_client *client)
+static void rt9455_remove(struct i2c_client *client)
{
int ret;
struct rt9455_info *info = i2c_get_clientdata(client);
@@ -1715,8 +1715,6 @@ static int rt9455_remove(struct i2c_client *client)
cancel_delayed_work_sync(&info->pwr_rdy_work);
cancel_delayed_work_sync(&info->max_charging_time_work);
cancel_delayed_work_sync(&info->batt_presence_work);
-
- return 0;
}
static const struct i2c_device_id rt9455_i2c_id_table[] = {
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index 1511f71f937c..996a82f8a2a1 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -1595,14 +1595,12 @@ static int smb347_probe(struct i2c_client *client,
return 0;
}
-static int smb347_remove(struct i2c_client *client)
+static void smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
smb347_usb_vbus_regulator_disable(smb->usb_rdev);
smb347_irq_disable(smb);
-
- return 0;
}
static void smb347_shutdown(struct i2c_client *client)
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index ba33d1617e0b..a4bc9f2a10bc 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -50,7 +50,7 @@ static int tps65217_config_charger(struct tps65217_charger *charger)
* tps65217 rev. G, p. 31 (see p. 32 for NTC schematic)
*
* The device can be configured to support a 100k NTC (B = 3960) by
- * setting the the NTC_TYPE bit in register CHGCONFIG1 to 1. However it
+ * setting the NTC_TYPE bit in register CHGCONFIG1 to 1. However it
* is not recommended to do so. In sleep mode, the charger continues
* charging the battery, but all register values are reset to default
* values. Therefore, the charger would get the wrong temperature
diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
index 7ed4e4bb26ec..1897c2984860 100644
--- a/drivers/power/supply/z2_battery.c
+++ b/drivers/power/supply/z2_battery.c
@@ -251,7 +251,7 @@ err:
return ret;
}
-static int z2_batt_remove(struct i2c_client *client)
+static void z2_batt_remove(struct i2c_client *client)
{
struct z2_charger *charger = i2c_get_clientdata(client);
@@ -263,8 +263,6 @@ static int z2_batt_remove(struct i2c_client *client)
free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
kfree(charger);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index a20bf12f3ce3..999e218d7793 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 0;
- wait_task_inactive(iit->tsk, 0);
+ wait_task_inactive(iit->tsk, TASK_ANY);
}
cpu_hotplug_enable();
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 21d624f9f5fb..26d00b1853b4 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -994,6 +994,9 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
y = value & 0x1f;
value = (1 << y) * (4 + f) * rp->time_unit / 4;
} else {
+ if (value < rp->time_unit)
+ return 0;
+
do_div(value, rp->time_unit);
y = ilog2(value);
f = div64_u64(4 * (value - (1 << y)), 1 << y);
@@ -1035,7 +1038,6 @@ static const struct rapl_defaults rapl_defaults_spr_server = {
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
- .dram_domain_energy_unit = 15300,
.psys_domain_energy_unit = 1000000000,
.spr_psys_bits = true,
};
@@ -1110,6 +1112,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 688cde320bb0..51cae72bb6db 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -174,7 +174,7 @@ static void ptp_clock_release(struct device *dev)
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
- ida_simple_remove(&ptp_clocks_map, ptp->index);
+ ida_free(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -217,7 +217,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp == NULL)
goto no_memory;
- index = ida_simple_get(&ptp_clocks_map, 0, MINORMASK + 1, GFP_KERNEL);
+ index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
if (index < 0) {
err = index;
goto no_slot;
@@ -332,7 +332,7 @@ kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
- ida_simple_remove(&ptp_clocks_map, index);
+ ida_free(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index e59ea2173aac..d36c3f597f77 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1311,12 +1311,6 @@ fail:
goto out;
}
-static int
-ptp_ocp_firstchild(struct device *dev, void *data)
-{
- return 1;
-}
-
static struct device *
ptp_ocp_find_flash(struct ptp_ocp *bp)
{
@@ -1325,7 +1319,7 @@ ptp_ocp_find_flash(struct ptp_ocp *bp)
last = NULL;
dev = &bp->spi_flash->dev;
- while ((dev = device_find_child(dev, NULL, ptp_ocp_firstchild))) {
+ while ((dev = device_find_any_child(dev))) {
if (!strcmp("mtd", dev_bus_name(dev)))
break;
put_device(last);
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0e042410f6b9..d333e7422f4a 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -678,7 +678,7 @@ static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
mutex_lock(&pwm_lock);
list_for_each_entry(chip, &pwm_chips, list)
- if (chip->dev && dev_fwnode(chip->dev) == fwnode) {
+ if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
mutex_unlock(&pwm_lock);
return chip;
}
@@ -734,8 +734,8 @@ static struct device_link *pwm_device_link_add(struct device *dev,
* Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
* error code on failure.
*/
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id)
+static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id)
{
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
@@ -797,7 +797,6 @@ put:
return pwm;
}
-EXPORT_SYMBOL_GPL(of_pwm_get);
/**
* acpi_pwm_get() - request a PWM via parsing "pwms" property in ACPI
@@ -1071,36 +1070,6 @@ struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
EXPORT_SYMBOL_GPL(devm_pwm_get);
/**
- * devm_of_pwm_get() - resource managed of_pwm_get()
- * @dev: device for PWM consumer
- * @np: device node to get the PWM from
- * @con_id: consumer name
- *
- * This function performs like of_pwm_get() but the acquired PWM device will
- * automatically be released on driver detach.
- *
- * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
- * error code on failure.
- */
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id)
-{
- struct pwm_device *pwm;
- int ret;
-
- pwm = of_pwm_get(dev, np, con_id);
- if (IS_ERR(pwm))
- return pwm;
-
- ret = devm_add_action_or_reset(dev, devm_pwm_release, pwm);
- if (ret)
- return ERR_PTR(ret);
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(devm_of_pwm_get);
-
-/**
* devm_fwnode_pwm_get() - request a resource managed PWM from firmware node
* @dev: device for PWM consumer
* @fwnode: firmware node to get the PWM from
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index c893ec3d2fb4..98413d364338 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -14,35 +14,6 @@
#include "pwm-lpss.h"
-/* BayTrail */
-static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
- .clk_rate = 25000000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Braswell */
-static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
- .clk_rate = 19200000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Broxton */
-static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
- .bypass = true,
-};
-
-/* Tangier */
-static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
-};
-
static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -54,8 +25,12 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
if (err < 0)
return err;
+ err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (err)
+ return err;
+
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
+ lpwm = pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
@@ -73,7 +48,6 @@ static void pwm_lpss_remove_pci(struct pci_dev *pdev)
pm_runtime_get_sync(&pdev->dev);
}
-#ifdef CONFIG_PM
static int pwm_lpss_runtime_suspend_pci(struct device *dev)
{
/*
@@ -87,12 +61,11 @@ static int pwm_lpss_runtime_resume_pci(struct device *dev)
{
return 0;
}
-#endif
-static const struct dev_pm_ops pwm_lpss_pci_pm = {
- SET_RUNTIME_PM_OPS(pwm_lpss_runtime_suspend_pci,
- pwm_lpss_runtime_resume_pci, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(pwm_lpss_pci_pm,
+ pwm_lpss_runtime_suspend_pci,
+ pwm_lpss_runtime_resume_pci,
+ NULL);
static const struct pci_device_id pwm_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
@@ -114,10 +87,11 @@ static struct pci_driver pwm_lpss_driver_pci = {
.probe = pwm_lpss_probe_pci,
.remove = pwm_lpss_remove_pci,
.driver = {
- .pm = &pwm_lpss_pci_pm,
+ .pm = pm_ptr(&pwm_lpss_pci_pm),
},
};
module_pci_driver(pwm_lpss_driver_pci);
MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PWM_LPSS);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 928570430cef..c48c6f2b2cd8 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -7,52 +7,31 @@
* Derived from the original pwm-lpss.c
*/
-#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include "pwm-lpss.h"
-/* BayTrail */
-static const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
- .clk_rate = 25000000,
- .npwm = 1,
- .base_unit_bits = 16,
-};
-
-/* Braswell */
-static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
- .clk_rate = 19200000,
- .npwm = 1,
- .base_unit_bits = 16,
- .other_devices_aml_touches_pwm_regs = true,
-};
-
-/* Broxton */
-static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
- .clk_rate = 19200000,
- .npwm = 4,
- .base_unit_bits = 22,
- .bypass = true,
-};
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
const struct pwm_lpss_boardinfo *info;
- const struct acpi_device_id *id;
struct pwm_lpss_chip *lpwm;
- struct resource *r;
+ void __iomem *base;
- id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
- if (!id)
+ info = device_get_match_data(&pdev->dev);
+ if (!info)
return -ENODEV;
- info = (const struct pwm_lpss_boardinfo *)id->driver_data;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- lpwm = pwm_lpss_probe(&pdev->dev, r, info);
+ lpwm = pwm_lpss_probe(&pdev->dev, base, info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
@@ -110,4 +89,5 @@ module_platform_driver(pwm_lpss_driver_platform);
MODULE_DESCRIPTION("PWM platform driver for Intel LPSS");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(PWM_LPSS);
MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 36d4e83e6b79..accdef5dd58e 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -10,6 +10,7 @@
* Author: Alan Cox <alan@linux.intel.com>
*/
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,17 +19,53 @@
#include <linux/pm_runtime.h>
#include <linux/time.h>
+#define DEFAULT_SYMBOL_NAMESPACE PWM_LPSS
+
#include "pwm-lpss.h"
#define PWM 0x00000000
#define PWM_ENABLE BIT(31)
#define PWM_SW_UPDATE BIT(30)
#define PWM_BASE_UNIT_SHIFT 8
-#define PWM_ON_TIME_DIV_MASK 0x000000ff
+#define PWM_ON_TIME_DIV_MASK GENMASK(7, 0)
/* Size of each PWM register space if multiple */
#define PWM_SIZE 0x400
+/* BayTrail */
+const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
+ .clk_rate = 25000000,
+ .npwm = 1,
+ .base_unit_bits = 16,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_byt_info);
+
+/* Braswell */
+const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
+ .clk_rate = 19200000,
+ .npwm = 1,
+ .base_unit_bits = 16,
+ .other_devices_aml_touches_pwm_regs = true,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info);
+
+/* Broxton */
+const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
+ .clk_rate = 19200000,
+ .npwm = 4,
+ .base_unit_bits = 22,
+ .bypass = true,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_bxt_info);
+
+/* Tangier */
+const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+ .clk_rate = 19200000,
+ .npwm = 4,
+ .base_unit_bits = 22,
+};
+EXPORT_SYMBOL_GPL(pwm_lpss_tng_info);
+
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
return container_of(chip, struct pwm_lpss_chip, chip);
@@ -207,7 +244,7 @@ static const struct pwm_ops pwm_lpss_ops = {
.owner = THIS_MODULE,
};
-struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, void __iomem *base,
const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
@@ -222,10 +259,7 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
if (!lpwm)
return ERR_PTR(-ENOMEM);
- lpwm->regs = devm_ioremap_resource(dev, r);
- if (IS_ERR(lpwm->regs))
- return ERR_CAST(lpwm->regs);
-
+ lpwm->regs = base;
lpwm->info = info;
c = lpwm->info->clk_rate;
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index 8b3476f25e06..8e82eb5a7e00 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -25,6 +25,11 @@ struct pwm_lpss_boardinfo {
unsigned long clk_rate;
unsigned int npwm;
unsigned long base_unit_bits;
+ /*
+ * Some versions of the IP may stuck in the state machine if enable
+ * bit is not set, and hence update bit will show busy status till
+ * the reset. For the rest it may be otherwise.
+ */
bool bypass;
/*
* On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
@@ -33,7 +38,12 @@ struct pwm_lpss_boardinfo {
bool other_devices_aml_touches_pwm_regs;
};
-struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_bxt_info;
+extern const struct pwm_lpss_boardinfo pwm_lpss_tng_info;
+
+struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, void __iomem *base,
const struct pwm_lpss_boardinfo *info);
#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index c91fa7f9e33d..f230c10d28bb 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -598,7 +598,7 @@ static int pca9685_pwm_probe(struct i2c_client *client,
return 0;
}
-static int pca9685_pwm_remove(struct i2c_client *client)
+static void pca9685_pwm_remove(struct i2c_client *client)
{
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -610,8 +610,6 @@ static int pca9685_pwm_remove(struct i2c_client *client)
}
pm_runtime_disable(&client->dev);
-
- return 0;
}
static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index f3647b317152..a5af859217c1 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -328,22 +328,16 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
else
pc->pclk = pc->clk;
- if (IS_ERR(pc->pclk)) {
- ret = PTR_ERR(pc->pclk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Can't get APB clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(pc->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->pclk), "Can't get APB clk\n");
ret = clk_prepare_enable(pc->clk);
- if (ret) {
- dev_err(&pdev->dev, "Can't prepare enable PWM clk: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't prepare enable PWM clk\n");
ret = clk_prepare_enable(pc->pclk);
if (ret) {
- dev_err(&pdev->dev, "Can't prepare enable APB clk: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "Can't prepare enable APB clk\n");
goto err_clk;
}
@@ -360,7 +354,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
goto err_pclk;
}
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 9903c3a7eced..e7db8e45001c 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%llu\n", state.period);
+ return sysfs_emit(buf, "%llu\n", state.period);
}
static ssize_t period_store(struct device *child,
@@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%llu\n", state.duty_cycle);
+ return sysfs_emit(buf, "%llu\n", state.duty_cycle);
}
static ssize_t duty_cycle_store(struct device *child,
@@ -112,7 +112,7 @@ static ssize_t enable_show(struct device *child,
pwm_get_state(pwm, &state);
- return sprintf(buf, "%d\n", state.enabled);
+ return sysfs_emit(buf, "%d\n", state.enabled);
}
static ssize_t enable_store(struct device *child,
@@ -171,7 +171,7 @@ static ssize_t polarity_show(struct device *child,
break;
}
- return sprintf(buf, "%s\n", polarity);
+ return sysfs_emit(buf, "%s\n", polarity);
}
static ssize_t polarity_store(struct device *child,
@@ -212,7 +212,7 @@ static ssize_t capture_show(struct device *child,
if (ret)
return ret;
- return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
+ return sysfs_emit(buf, "%u %u\n", result.period, result.duty_cycle);
}
static DEVICE_ATTR_RW(period);
@@ -361,7 +361,7 @@ static ssize_t npwm_show(struct device *parent, struct device_attribute *attr,
{
const struct pwm_chip *chip = dev_get_drvdata(parent);
- return sprintf(buf, "%u\n", chip->npwm);
+ return sysfs_emit(buf, "%u\n", chip->npwm);
}
static DEVICE_ATTR_RO(npwm);
@@ -433,7 +433,7 @@ static int pwm_class_resume_npwm(struct device *parent, unsigned int npwm)
return ret;
}
-static int __maybe_unused pwm_class_suspend(struct device *parent)
+static int pwm_class_suspend(struct device *parent)
{
struct pwm_chip *chip = dev_get_drvdata(parent);
unsigned int i;
@@ -464,20 +464,20 @@ static int __maybe_unused pwm_class_suspend(struct device *parent)
return ret;
}
-static int __maybe_unused pwm_class_resume(struct device *parent)
+static int pwm_class_resume(struct device *parent)
{
struct pwm_chip *chip = dev_get_drvdata(parent);
return pwm_class_resume_npwm(parent, chip->npwm);
}
-static SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume);
static struct class pwm_class = {
.name = "pwm",
.owner = THIS_MODULE,
.dev_groups = pwm_chip_groups,
- .pm = &pwm_class_pm_ops,
+ .pm = pm_sleep_ptr(&pwm_class_pm_ops),
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 42f2fc0bc8a9..321af498ee11 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -556,6 +556,14 @@ static int __init cec_init(void)
if (ce_arr.disabled)
return -ENODEV;
+ /*
+ * Intel systems may avoid uncorrectable errors
+ * if pages with corrected errors are aggressively
+ * taken offline.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ action_threshold = 2;
+
ce_arr.array = (void *)get_zeroed_page(GFP_KERNEL);
if (!ce_arr.array) {
pr_err("Error allocating CE array page!\n");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 23e3e4a35cc9..070e4403c6c2 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -787,6 +787,24 @@ config REGULATOR_MT6323
This driver supports the control of different power rails of device
through regulator interface.
+config REGULATOR_MT6331
+ tristate "MediaTek MT6331 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6331 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface
+
+config REGULATOR_MT6332
+ tristate "MediaTek MT6332 PMIC"
+ depends on MFD_MT6397
+ help
+ Say y here to select this option to enable the power regulator of
+ MediaTek MT6332 PMIC.
+ This driver supports the control of different power rails of device
+ through regulator interface
+
config REGULATOR_MT6358
tristate "MediaTek MT6358 PMIC"
depends on MFD_MT6397
@@ -1264,6 +1282,7 @@ config REGULATOR_STW481X_VMMC
config REGULATOR_SY7636A
tristate "Silergy SY7636A voltage regulator"
+ depends on MFD_SY7636A
help
This driver supports Silergy SY3686A voltage regulator.
@@ -1384,6 +1403,15 @@ config REGULATOR_TPS65218
voltage regulators. It supports software based voltage control
for different voltage domains
+config REGULATOR_TPS65219
+ tristate "TI TPS65219 Power regulators"
+ depends on MFD_TPS65219 && OF
+ help
+ This driver supports TPS65219 voltage regulator chips.
+ TPS65219 series of PMICs have 3 single phase BUCKs & 4 LDOs
+ voltage regulators. It supports software based voltage control
+ for different voltage domains.
+
config REGULATOR_TPS6524X
tristate "TI TPS6524X Power regulators"
depends on SPI
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index fa49bb6cc544..5962307e1130 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,8 @@ obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6315) += mt6315-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
+obj-$(CONFIG_REGULATOR_MT6331) += mt6331-regulator.o
+obj-$(CONFIG_REGULATOR_MT6332) += mt6332-regulator.o
obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o
obj-$(CONFIG_REGULATOR_MT6359) += mt6359-regulator.o
obj-$(CONFIG_REGULATOR_MT6360) += mt6360-regulator.o
@@ -162,6 +164,7 @@ obj-$(CONFIG_REGULATOR_TPS65086) += tps65086-regulator.o
obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65219) += tps65219-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index acaa6607898e..c2b8b8be7824 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -571,11 +571,10 @@ static int bd7181x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No parent regmap\n");
return -ENODEV;
}
- ldo4_en = devm_gpiod_get_from_of_node(&pdev->dev,
- pdev->dev.parent->of_node,
- "rohm,vsel-gpios", 0,
- GPIOD_ASIS, "ldo4-en");
+ ldo4_en = devm_fwnode_gpiod_get(&pdev->dev,
+ dev_fwnode(pdev->dev.parent),
+ "rohm,vsel", GPIOD_ASIS, "ldo4-en");
if (IS_ERR(ldo4_en)) {
ret = PTR_ERR(ldo4_en);
if (ret != -ENOENT)
diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
index aa42da4d141e..393c8693b327 100644
--- a/drivers/regulator/bd9576-regulator.c
+++ b/drivers/regulator/bd9576-regulator.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
@@ -939,8 +940,8 @@ static int bd957x_probe(struct platform_device *pdev)
}
ic_data->regmap = regmap;
- vout_mode = of_property_read_bool(pdev->dev.parent->of_node,
- "rohm,vout1-en-low");
+ vout_mode = device_property_read_bool(pdev->dev.parent,
+ "rohm,vout1-en-low");
if (vout_mode) {
struct gpio_desc *en;
@@ -948,10 +949,10 @@ static int bd957x_probe(struct platform_device *pdev)
/* VOUT1 enable state judged by VOUT1_EN pin */
/* See if we have GPIO defined */
- en = devm_gpiod_get_from_of_node(&pdev->dev,
- pdev->dev.parent->of_node,
- "rohm,vout1-en-gpios", 0,
- GPIOD_OUT_LOW, "vout1-en");
+ en = devm_fwnode_gpiod_get(&pdev->dev,
+ dev_fwnode(pdev->dev.parent),
+ "rohm,vout1-en", GPIOD_OUT_LOW,
+ "vout1-en");
if (!IS_ERR(en)) {
/* VOUT1_OPS gpio ctrl */
/*
@@ -986,8 +987,8 @@ static int bd957x_probe(struct platform_device *pdev)
* like DDR voltage selection.
*/
platform_set_drvdata(pdev, ic_data);
- ddr_sel = of_property_read_bool(pdev->dev.parent->of_node,
- "rohm,ddr-sel-low");
+ ddr_sel = device_property_read_bool(pdev->dev.parent,
+ "rohm,ddr-sel-low");
if (ddr_sel)
ic_data->regulator_data[2].desc.fixed_uV = 1350000;
else
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8373cb04f90..bcccad8f7516 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -977,12 +977,27 @@ static int drms_uA_update(struct regulator_dev *rdev)
rdev_err(rdev, "failed to set load %d: %pe\n",
current_uA, ERR_PTR(err));
} else {
+ /*
+ * Unfortunately in some cases the constraints->valid_ops has
+ * REGULATOR_CHANGE_DRMS but there are no valid modes listed.
+ * That's not really legit but we won't consider it a fatal
+ * error here. We'll treat it as if REGULATOR_CHANGE_DRMS
+ * wasn't set.
+ */
+ if (!rdev->constraints->valid_modes_mask) {
+ rdev_dbg(rdev, "Can change modes; but no valid mode\n");
+ return 0;
+ }
+
/* get output voltage */
output_uV = regulator_get_voltage_rdev(rdev);
- if (output_uV <= 0) {
- rdev_err(rdev, "invalid output voltage found\n");
- return -EINVAL;
- }
+
+ /*
+ * Don't return an error; if regulator driver cares about
+ * output_uV then it's up to the driver to validate.
+ */
+ if (output_uV <= 0)
+ rdev_dbg(rdev, "invalid output voltage found\n");
/* get input voltage */
input_uV = 0;
@@ -990,10 +1005,13 @@ static int drms_uA_update(struct regulator_dev *rdev)
input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
- if (input_uV <= 0) {
- rdev_err(rdev, "invalid input voltage found\n");
- return -EINVAL;
- }
+
+ /*
+ * Don't return an error; if regulator driver cares about
+ * input_uV then it's up to the driver to validate.
+ */
+ if (input_uV <= 0)
+ rdev_dbg(rdev, "invalid input voltage found\n");
/* now get the optimum mode for our new total regulator load */
mode = rdev->desc->ops->get_optimum_mode(rdev, input_uV,
@@ -2681,7 +2699,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
* return -ETIMEDOUT.
*/
if (rdev->desc->poll_enabled_time) {
- unsigned int time_remaining = delay;
+ int time_remaining = delay;
while (time_remaining > 0) {
_regulator_delay_helper(rdev->desc->poll_enabled_time);
@@ -2733,13 +2751,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
@@ -3497,10 +3520,8 @@ static int _regulator_set_voltage_time(struct regulator_dev *rdev,
(new_uV < old_uV))
return rdev->constraints->settling_time_down;
- if (ramp_delay == 0) {
- rdev_dbg(rdev, "ramp_delay not set\n");
+ if (ramp_delay == 0)
return 0;
- }
return DIV_ROUND_UP(abs(new_uV - old_uV), ramp_delay);
}
@@ -5393,6 +5414,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
bool dangling_of_gpiod = false;
struct device *dev;
int ret, i;
+ bool resolved_early = false;
if (cfg == NULL)
return ERR_PTR(-EINVAL);
@@ -5496,24 +5518,10 @@ regulator_register(const struct regulator_desc *regulator_desc,
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
- /* preform any regulator specific init */
- if (init_data && init_data->regulator_init) {
- ret = init_data->regulator_init(rdev->reg_data);
- if (ret < 0)
- goto clean;
- }
-
- if (config->ena_gpiod) {
- ret = regulator_ena_gpio_request(rdev, config);
- if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO: %pe\n",
- ERR_PTR(ret));
- goto clean;
- }
- /* The regulator core took over the GPIO descriptor */
- dangling_cfg_gpiod = false;
- dangling_of_gpiod = false;
- }
+ if (init_data && init_data->supply_regulator)
+ rdev->supply_name = init_data->supply_regulator;
+ else if (regulator_desc->supply_name)
+ rdev->supply_name = regulator_desc->supply_name;
/* register with sysfs */
rdev->dev.class = &regulator_class;
@@ -5535,13 +5543,38 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto wash;
}
- if (init_data && init_data->supply_regulator)
- rdev->supply_name = init_data->supply_regulator;
- else if (regulator_desc->supply_name)
- rdev->supply_name = regulator_desc->supply_name;
+ if ((rdev->supply_name && !rdev->supply) &&
+ (rdev->constraints->always_on ||
+ rdev->constraints->boot_on)) {
+ ret = regulator_resolve_supply(rdev);
+ if (ret)
+ rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
+ ERR_PTR(ret));
+
+ resolved_early = true;
+ }
+
+ /* perform any regulator specific init */
+ if (init_data && init_data->regulator_init) {
+ ret = init_data->regulator_init(rdev->reg_data);
+ if (ret < 0)
+ goto wash;
+ }
+
+ if (config->ena_gpiod) {
+ ret = regulator_ena_gpio_request(rdev, config);
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO: %pe\n",
+ ERR_PTR(ret));
+ goto wash;
+ }
+ /* The regulator core took over the GPIO descriptor */
+ dangling_cfg_gpiod = false;
+ dangling_of_gpiod = false;
+ }
ret = set_machine_constraints(rdev);
- if (ret == -EPROBE_DEFER) {
+ if (ret == -EPROBE_DEFER && !resolved_early) {
/* Regulator might be in bypass mode and so needs its supply
* to set the constraints
*/
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 76e0e23bf598..e4c753b83088 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -1164,7 +1164,7 @@ error:
return ret;
}
-static int da9121_i2c_remove(struct i2c_client *i2c)
+static void da9121_i2c_remove(struct i2c_client *i2c)
{
struct da9121 *chip = i2c_get_clientdata(i2c);
const int mask_all[4] = { 0xFF, 0xFF, 0xFF, 0xFF };
@@ -1176,7 +1176,6 @@ static int da9121_i2c_remove(struct i2c_client *i2c)
ret = regmap_bulk_write(chip->regmap, DA9121_REG_SYS_MASK_0, mask_all, 4);
if (ret != 0)
dev_err(chip->dev, "Failed to set IRQ masks: %d\n", ret);
- return 0;
}
static const struct i2c_device_id da9121_i2c_id[] = {
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 32823a87fd40..3265e75e97ab 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -70,6 +70,65 @@ struct regulator *devm_regulator_get_exclusive(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
+static void regulator_action_disable(void *d)
+{
+ struct regulator *r = (struct regulator *)d;
+
+ regulator_disable(r);
+}
+
+static int _devm_regulator_get_enable(struct device *dev, const char *id,
+ int get_type)
+{
+ struct regulator *r;
+ int ret;
+
+ r = _devm_regulator_get(dev, id, get_type);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ret = regulator_enable(r);
+ if (!ret)
+ ret = devm_add_action_or_reset(dev, &regulator_action_disable, r);
+
+ if (ret)
+ devm_regulator_put(r);
+
+ return ret;
+}
+
+/**
+ * devm_regulator_get_enable_optional - Resource managed regulator get and enable
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
+ *
+ * Get and enable regulator for duration of the device life-time.
+ * regulator_disable() and regulator_put() are automatically called on driver
+ * detach. See regulator_get_optional() and regulator_enable() for more
+ * information.
+ */
+int devm_regulator_get_enable_optional(struct device *dev, const char *id)
+{
+ return _devm_regulator_get_enable(dev, id, OPTIONAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_enable_optional);
+
+/**
+ * devm_regulator_get_enable - Resource managed regulator get and enable
+ * @dev: device to supply
+ * @id: supply name or regulator ID.
+ *
+ * Get and enable regulator for duration of the device life-time.
+ * regulator_disable() and regulator_put() are automatically called on driver
+ * detach. See regulator_get() and regulator_enable() for more
+ * information.
+ */
+int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return _devm_regulator_get_enable(dev, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_enable);
+
/**
* devm_regulator_get_optional - Resource managed regulator_get_optional()
* @dev: device to supply
@@ -194,6 +253,111 @@ int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
}
EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_const);
+static int devm_regulator_bulk_match(struct device *dev, void *res,
+ void *data)
+{
+ struct regulator_bulk_devres *match = res;
+ struct regulator_bulk_data *target = data;
+
+ /*
+ * We check the put uses same consumer list as the get did.
+ * We _could_ scan all entries in consumer array and check the
+ * regulators match but ATM I don't see the need. We can change this
+ * later if needed.
+ */
+ return match->consumers == target;
+}
+
+/**
+ * devm_regulator_bulk_put - Resource managed regulator_bulk_put()
+ * @consumers: consumers to free
+ *
+ * Deallocate regulators allocated with devm_regulator_bulk_get(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+ int rc;
+ struct regulator *regulator = consumers[0].consumer;
+
+ rc = devres_release(regulator->dev, devm_regulator_bulk_release,
+ devm_regulator_bulk_match, consumers);
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_put);
+
+static void devm_regulator_bulk_disable(void *res)
+{
+ struct regulator_bulk_devres *devres = res;
+ int i;
+
+ for (i = 0; i < devres->num_consumers; i++)
+ regulator_disable(devres->consumers[i].consumer);
+}
+
+/**
+ * devm_regulator_bulk_get_enable - managed get'n enable multiple regulators
+ *
+ * @dev: device to supply
+ * @num_consumers: number of consumers to register
+ * @id: list of supply names or regulator IDs
+ *
+ * @return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several regulator
+ * consumers in one operation with management, the regulators will
+ * automatically be freed when the device is unbound. If any of the
+ * regulators cannot be acquired then any regulators that were
+ * allocated will be freed before returning to the caller.
+ */
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id)
+{
+ struct regulator_bulk_devres *devres;
+ struct regulator_bulk_data *consumers;
+ int i, ret;
+
+ devres = devm_kmalloc(dev, sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->consumers = devm_kcalloc(dev, num_consumers, sizeof(*consumers),
+ GFP_KERNEL);
+ consumers = devres->consumers;
+ if (!consumers)
+ return -ENOMEM;
+
+ devres->num_consumers = num_consumers;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].supply = id[i];
+
+ ret = devm_regulator_bulk_get(dev, num_consumers, consumers);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_consumers; i++) {
+ ret = regulator_enable(consumers[i].consumer);
+ if (ret)
+ goto unwind;
+ }
+
+ ret = devm_add_action(dev, devm_regulator_bulk_disable, devres);
+ if (!ret)
+ return 0;
+
+unwind:
+ while (--i >= 0)
+ regulator_disable(consumers[i].consumer);
+
+ devm_regulator_bulk_put(consumers);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_bulk_get_enable);
+
static void devm_rdev_release(struct device *dev, void *res)
{
regulator_unregister(*(struct regulator_dev **)res);
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 5927d4f3eabd..95e61a2f43f5 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -220,6 +220,9 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
regtype);
}
+ if (of_find_property(np, "vin-supply", NULL))
+ config->input_supply = "vin";
+
return config;
}
@@ -259,6 +262,18 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *),
GFP_KERNEL);
+
+ if (config->input_supply) {
+ drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+ config->input_supply,
+ GFP_KERNEL);
+ if (!drvdata->desc.supply_name) {
+ dev_err(&pdev->dev,
+ "Failed to allocate input supply\n");
+ return -ENOMEM;
+ }
+ }
+
if (!drvdata->gpiods)
return -ENOMEM;
for (i = 0; i < config->ngpios; i++) {
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 321bec6e3f8d..31b43426d47c 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -422,15 +422,13 @@ err:
return ret;
}
-static int lp8755_remove(struct i2c_client *client)
+static void lp8755_remove(struct i2c_client *client)
{
int icnt;
struct lp8755_chip *pchip = i2c_get_clientdata(client);
for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
regmap_write(pchip->regmap, icnt, 0x00);
-
- return 0;
}
static const struct i2c_device_id lp8755_id[] = {
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c
index 03c6027682d8..39f803ff0a90 100644
--- a/drivers/regulator/max597x-regulator.c
+++ b/drivers/regulator/max597x-regulator.c
@@ -137,7 +137,7 @@ static int max597x_set_ovp(struct regulator_dev *rdev, int lim_uV, int severity,
static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
int severity, bool enable)
{
- int ret, val, reg;
+ int val, reg;
unsigned int vthst, vthfst;
struct max597x_regulator *data = rdev_get_drvdata(rdev);
@@ -183,9 +183,8 @@ static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
val = 0xFF;
reg = MAX5970_REG_DAC_FAST(rdev_id);
- ret = regmap_write(rdev->regmap, reg, val);
- return ret;
+ return regmap_write(rdev->regmap, reg, val);
}
static int max597x_get_status(struct regulator_dev *rdev)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index fdcb0f508984..596cc36aaff6 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -434,9 +434,9 @@ static int max8973_init_dcdc(struct max8973_chip *max,
return ret;
}
-static int max8973_thermal_read_temp(void *data, int *temp)
+static int max8973_thermal_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct max8973_chip *mchip = data;
+ struct max8973_chip *mchip = tz->devdata;
unsigned int val;
int ret;
@@ -465,7 +465,7 @@ static irqreturn_t max8973_thermal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct thermal_zone_of_device_ops max77621_tz_ops = {
+static const struct thermal_zone_device_ops max77621_tz_ops = {
.get_temp = max8973_thermal_read_temp,
};
@@ -479,8 +479,8 @@ static int max8973_thermal_init(struct max8973_chip *mchip)
if (mchip->id != MAX77621)
return 0;
- tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip,
- &max77621_tz_ops);
+ tzd = devm_thermal_of_zone_register(mchip->dev, 0, mchip,
+ &max77621_tz_ops);
if (IS_ERR(tzd)) {
ret = PTR_ERR(tzd);
dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
diff --git a/drivers/regulator/mt6331-regulator.c b/drivers/regulator/mt6331-regulator.c
new file mode 100644
index 000000000000..56be9a3a84ab
--- /dev/null
+++ b/drivers/regulator/mt6331-regulator.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2022 Collabora Ltd.
+// Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+//
+// Based on mt6323-regulator.c,
+// Copyright (c) 2016 MediaTek Inc.
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6331/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6331-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6331_LDO_MODE_NORMAL 0
+#define MT6331_LDO_MODE_LP 1
+
+/*
+ * MT6331 regulators information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ * @status_reg: Register for regulator enable status where qi unavailable
+ * @status_mask: Mask for querying regulator enable status
+ */
+struct mt6331_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 status_reg;
+ u32 status_mask;
+};
+
+#define MT6331_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .status_mask = 0, \
+}
+
+#define MT6331_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_table_ao_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ }, \
+}
+
+#define MT6331_LDO_S(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask, \
+ _status_reg, _status_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6331_volt_table_no_qi_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_reg = _status_reg, \
+ .status_mask = _status_mask, \
+}
+
+#define MT6331_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = (_modeset_reg ? \
+ &mt6331_volt_table_ops : \
+ &mt6331_volt_table_no_ms_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+#define MT6331_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, \
+ _modeset_reg, _modeset_mask) \
+[MT6331_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = (_modeset_reg ? \
+ &mt6331_volt_fixed_ops : \
+ &mt6331_volt_fixed_no_ms_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6331_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(qibit), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+}
+
+static const struct linear_range buck_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const unsigned int ldo_volt_table1[] = {
+ 2800000, 3000000, 0, 3200000
+};
+
+static const unsigned int ldo_volt_table2[] = {
+ 1500000, 1800000, 2500000, 2800000,
+};
+
+static const unsigned int ldo_volt_table3[] = {
+ 1200000, 1300000, 1500000, 1800000, 2000000, 2800000, 3000000, 3300000,
+};
+
+static const unsigned int ldo_volt_table4[] = {
+ 0, 0, 1700000, 1800000, 1860000, 2760000, 3000000, 3100000,
+};
+
+static const unsigned int ldo_volt_table5[] = {
+ 1800000, 3300000, 1800000, 3300000,
+};
+
+static const unsigned int ldo_volt_table6[] = {
+ 3000000, 3300000,
+};
+
+static const unsigned int ldo_volt_table7[] = {
+ 1200000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
+};
+
+static const unsigned int ldo_volt_table8[] = {
+ 900000, 1000000, 1100000, 1220000, 1300000, 1500000, 1500000, 1500000,
+};
+
+static const unsigned int ldo_volt_table9[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1300000,
+};
+
+static const unsigned int ldo_volt_table10[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const unsigned int ldo_volt_table11[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static int mt6331_get_status(struct regulator_dev *rdev)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6331_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = MT6331_LDO_MODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6331_LDO_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= ffs(info->modeset_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+}
+
+static unsigned int mt6331_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6331_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= info->modeset_mask;
+ val >>= ffs(info->modeset_mask) - 1;
+
+ return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops mt6331_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_table_no_ms_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_table_no_qi_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6331_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6331_volt_table_ao_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops mt6331_volt_fixed_no_ms_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+};
+
+static const struct regulator_ops mt6331_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6331_get_status,
+ .set_mode = mt6331_ldo_set_mode,
+ .get_mode = mt6331_ldo_get_mode,
+};
+
+/* The array is indexed by id(MT6331_ID_XXX) */
+static struct mt6331_regulator_info mt6331_regulators[] = {
+ MT6331_BUCK("buck-vdvfs11", VDVFS11, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS11_CON9,
+ MT6331_VDVFS11_CON11, GENMASK(6, 0),
+ MT6331_VDVFS11_CON12, MT6331_VDVFS11_CON7),
+ MT6331_BUCK("buck-vdvfs12", VDVFS12, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS12_CON9,
+ MT6331_VDVFS12_CON11, GENMASK(6, 0),
+ MT6331_VDVFS12_CON12, MT6331_VDVFS12_CON7),
+ MT6331_BUCK("buck-vdvfs13", VDVFS13, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS13_CON9,
+ MT6331_VDVFS13_CON11, GENMASK(6, 0),
+ MT6331_VDVFS13_CON12, MT6331_VDVFS13_CON7),
+ MT6331_BUCK("buck-vdvfs14", VDVFS14, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VDVFS14_CON9,
+ MT6331_VDVFS14_CON11, GENMASK(6, 0),
+ MT6331_VDVFS14_CON12, MT6331_VDVFS14_CON7),
+ MT6331_BUCK("buck-vcore2", VCORE2, 700000, 1493750, 6250,
+ buck_volt_range, MT6331_VCORE2_CON9,
+ MT6331_VCORE2_CON11, GENMASK(6, 0),
+ MT6331_VCORE2_CON12, MT6331_VCORE2_CON7),
+ MT6331_REG_FIXED("buck-vio18", VIO18, MT6331_VIO18_CON9, 0, 13, 1800000, 0, 0),
+ MT6331_REG_FIXED("ldo-vrtc", VRTC, MT6331_DIGLDO_CON11, 8, 15, 2800000, 0, 0),
+ MT6331_REG_FIXED("ldo-vtcxo1", VTCXO1, MT6331_ANALDO_CON1, 10, 15, 2800000,
+ MT6331_ANALDO_CON1, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vtcxo2", VTCXO2, MT6331_ANALDO_CON2, 10, 15, 2800000,
+ MT6331_ANALDO_CON2, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vsram", VSRAM_DVFS1, MT6331_SYSLDO_CON4, 10, 15, 1012500,
+ MT6331_SYSLDO_CON4, GENMASK(1, 0)),
+ MT6331_REG_FIXED("ldo-vio28", VIO28, MT6331_DIGLDO_CON1, 10, 15, 2800000,
+ MT6331_DIGLDO_CON1, GENMASK(1, 0)),
+ MT6331_LDO("ldo-avdd32aud", AVDD32_AUD, ldo_volt_table1, MT6331_ANALDO_CON3, 10,
+ MT6331_ANALDO_CON10, GENMASK(6, 5), MT6331_ANALDO_CON3, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vauxa32", VAUXA32, ldo_volt_table1, MT6331_ANALDO_CON4, 10,
+ MT6331_ANALDO_CON6, GENMASK(6, 5), MT6331_ANALDO_CON4, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vemc33", VEMC33, ldo_volt_table6, MT6331_DIGLDO_CON5, 10,
+ MT6331_DIGLDO_CON17, BIT(6), MT6331_DIGLDO_CON5, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vibr", VIBR, ldo_volt_table3, MT6331_DIGLDO_CON12, 10,
+ MT6331_DIGLDO_CON20, GENMASK(6, 4), MT6331_DIGLDO_CON12, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmc", VMC, ldo_volt_table5, MT6331_DIGLDO_CON3, 10,
+ MT6331_DIGLDO_CON15, GENMASK(5, 4), MT6331_DIGLDO_CON3, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmch", VMCH, ldo_volt_table6, MT6331_DIGLDO_CON4, 10,
+ MT6331_DIGLDO_CON16, BIT(6), MT6331_DIGLDO_CON4, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vmipi", VMIPI, ldo_volt_table3, MT6331_SYSLDO_CON5, 10,
+ MT6331_SYSLDO_CON13, GENMASK(5, 3), MT6331_SYSLDO_CON5, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vsim1", VSIM1, ldo_volt_table4, MT6331_DIGLDO_CON8, 10,
+ MT6331_DIGLDO_CON21, GENMASK(6, 4), MT6331_DIGLDO_CON8, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vsim2", VSIM2, ldo_volt_table4, MT6331_DIGLDO_CON9, 10,
+ MT6331_DIGLDO_CON22, GENMASK(6, 4), MT6331_DIGLDO_CON9, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vusb10", VUSB10, ldo_volt_table9, MT6331_SYSLDO_CON2, 10,
+ MT6331_SYSLDO_CON10, GENMASK(5, 3), MT6331_SYSLDO_CON2, GENMASK(1, 0)),
+ MT6331_LDO("ldo-vcama", VCAMA, ldo_volt_table2, MT6331_ANALDO_CON5, 15,
+ MT6331_ANALDO_CON9, GENMASK(5, 4), 0, 0),
+ MT6331_LDO_S("ldo-vcamaf", VCAM_AF, ldo_volt_table3, MT6331_DIGLDO_CON2, 10,
+ MT6331_DIGLDO_CON14, GENMASK(6, 4), MT6331_DIGLDO_CON2, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(0)),
+ MT6331_LDO_S("ldo-vcamd", VCAMD, ldo_volt_table8, MT6331_SYSLDO_CON1, 15,
+ MT6331_SYSLDO_CON9, GENMASK(6, 4), MT6331_SYSLDO_CON1, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(11)),
+ MT6331_LDO_S("ldo-vcamio", VCAM_IO, ldo_volt_table10, MT6331_SYSLDO_CON3, 10,
+ MT6331_SYSLDO_CON11, GENMASK(4, 3), MT6331_SYSLDO_CON3, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(13)),
+ MT6331_LDO_S("ldo-vgp1", VGP1, ldo_volt_table3, MT6331_DIGLDO_CON6, 10,
+ MT6331_DIGLDO_CON19, GENMASK(6, 4), MT6331_DIGLDO_CON6, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(4)),
+ MT6331_LDO_S("ldo-vgp2", VGP2, ldo_volt_table10, MT6331_SYSLDO_CON6, 10,
+ MT6331_SYSLDO_CON14, GENMASK(4, 3), MT6331_SYSLDO_CON6, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(15)),
+ MT6331_LDO_S("ldo-vgp3", VGP3, ldo_volt_table10, MT6331_SYSLDO_CON7, 10,
+ MT6331_SYSLDO_CON15, GENMASK(4, 3), MT6331_SYSLDO_CON7, GENMASK(1, 0),
+ MT6331_EN_STATUS2, BIT(0)),
+ MT6331_LDO_S("ldo-vgp4", VGP4, ldo_volt_table7, MT6331_DIGLDO_CON7, 10,
+ MT6331_DIGLDO_CON18, GENMASK(6, 4), MT6331_DIGLDO_CON7, GENMASK(1, 0),
+ MT6331_EN_STATUS1, BIT(5)),
+ MT6331_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table11,
+ MT6331_DIGLDO_CON28, GENMASK(14, 12)),
+};
+
+static int mt6331_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
+ if (mt6331_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6331->regmap,
+ mt6331_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6331_regulators[i].vselctrl_mask) {
+ mt6331_regulators[i].desc.vsel_reg =
+ mt6331_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6331_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6331 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6331_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6331->regmap, MT6331_HWCID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ reg_value &= GENMASK(7, 0);
+
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ /*
+ * ChipID 0x10 is "MT6331 E1", has a different voltage table and
+ * it's currently not supported in this driver. Upon detection of
+ * this ID, refuse to register the regulators, as we will wrongly
+ * interpret the VSEL for this revision, potentially overvolting
+ * some device.
+ */
+ if (reg_value == 0x10) {
+ dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MT6331_ID_VREG_MAX; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6331_regulators[i];
+ config.regmap = mt6331->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6331_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6331_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id mt6331_platform_ids[] = {
+ {"mt6331-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6331_platform_ids);
+
+static struct platform_driver mt6331_regulator_driver = {
+ .driver = {
+ .name = "mt6331-regulator",
+ },
+ .probe = mt6331_regulator_probe,
+ .id_table = mt6331_platform_ids,
+};
+
+module_platform_driver(mt6331_regulator_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6331 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/mt6332-regulator.c b/drivers/regulator/mt6332-regulator.c
new file mode 100644
index 000000000000..77a27d8127a3
--- /dev/null
+++ b/drivers/regulator/mt6332-regulator.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2022 Collabora Ltd.
+// Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+//
+// Based on mt6323-regulator.c,
+// Copyright (c) 2016 MediaTek Inc.
+//
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6332/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6332-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MT6332_LDO_MODE_NORMAL 0
+#define MT6332_LDO_MODE_LP 1
+
+/*
+ * MT6332 regulators information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ * @status_reg: Register for regulator enable status where qi unavailable
+ * @status_mask: Mask for querying regulator enable status
+ */
+struct mt6332_regulator_info {
+ struct regulator_desc desc;
+ u32 qi;
+ u32 vselon_reg;
+ u32 vselctrl_reg;
+ u32 vselctrl_mask;
+ u32 modeset_reg;
+ u32 modeset_mask;
+ u32 status_reg;
+ u32 status_mask;
+};
+
+#define MT6332_BUCK(match, vreg, min, max, step, volt_ranges, enreg, \
+ vosel, vosel_mask, voselon, vosel_ctrl) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_buck_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(13), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .status_mask = 0, \
+}
+
+#define MT6332_LDO_LINEAR(match, vreg, min, max, step, volt_ranges, \
+ enreg, vosel, vosel_mask, voselon, \
+ vosel_ctrl, _modeset_reg, _modeset_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_ldo_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = (max - min)/step + 1, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(0), \
+ }, \
+ .qi = BIT(15), \
+ .vselon_reg = voselon, \
+ .vselctrl_reg = vosel_ctrl, \
+ .vselctrl_mask = BIT(1), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_mask = 0, \
+}
+
+#define MT6332_LDO_AO(match, vreg, ldo_volt_table, vosel, vosel_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_table_ao_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ }, \
+}
+
+#define MT6332_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel, \
+ vosel_mask, _modeset_reg, _modeset_mask) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .volt_table = ldo_volt_table, \
+ .vsel_reg = vosel, \
+ .vsel_mask = vosel_mask, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ }, \
+ .qi = BIT(15), \
+ .modeset_reg = _modeset_reg, \
+ .modeset_mask = _modeset_mask, \
+ .status_mask = 0, \
+}
+
+#define MT6332_REG_FIXED(match, vreg, enreg, enbit, qibit, volt, stbit) \
+[MT6332_ID_##vreg] = { \
+ .desc = { \
+ .name = #vreg, \
+ .of_match = of_match_ptr(match), \
+ .ops = &mt6332_volt_fixed_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MT6332_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = 1, \
+ .enable_reg = enreg, \
+ .enable_mask = BIT(enbit), \
+ .min_uV = volt, \
+ }, \
+ .qi = BIT(qibit), \
+ .status_reg = MT6332_EN_STATUS0, \
+ .status_mask = BIT(stbit), \
+}
+
+static const struct linear_range boost_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(3500000, 0, 0x7f, 31250),
+};
+
+static const struct linear_range buck_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct linear_range buck_pa_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
+};
+
+static const struct linear_range buck_rf_volt_range[] = {
+ REGULATOR_LINEAR_RANGE(1050000, 0, 0x7f, 9375),
+};
+
+static const unsigned int ldo_volt_table1[] = {
+ 2800000, 3000000, 0, 3200000
+};
+
+static const unsigned int ldo_volt_table2[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static int mt6332_get_status(struct regulator_dev *rdev)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ u32 reg, en_mask, regval;
+ int ret;
+
+ if (info->qi > 0) {
+ reg = info->desc.enable_reg;
+ en_mask = info->qi;
+ } else {
+ reg = info->status_reg;
+ en_mask = info->status_mask;
+ }
+
+ ret = regmap_read(rdev->regmap, reg, &regval);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+ return ret;
+ }
+
+ return (regval & en_mask) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static int mt6332_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ val = MT6332_LDO_MODE_LP;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = MT6332_LDO_MODE_NORMAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val <<= ffs(info->modeset_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, info->modeset_reg,
+ info->modeset_mask, val);
+}
+
+static unsigned int mt6332_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct mt6332_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->modeset_reg, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= info->modeset_mask;
+ val >>= ffs(info->modeset_mask) - 1;
+
+ return (val & BIT(0)) ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+}
+
+static const struct regulator_ops mt6332_buck_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+};
+
+static const struct regulator_ops mt6332_ldo_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+ .set_mode = mt6332_ldo_set_mode,
+ .get_mode = mt6332_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6332_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+ .set_mode = mt6332_ldo_set_mode,
+ .get_mode = mt6332_ldo_get_mode,
+};
+
+static const struct regulator_ops mt6332_volt_table_ao_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops mt6332_volt_fixed_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6332_get_status,
+};
+
+/* The array is indexed by id(MT6332_ID_XXX) */
+static struct mt6332_regulator_info mt6332_regulators[] = {
+ MT6332_BUCK("buck-vdram", VDRAM, 700000, 1493750, 6250, buck_volt_range,
+ MT6332_EN_STATUS0, MT6332_VDRAM_CON11, GENMASK(6, 0),
+ MT6332_VDRAM_CON12, MT6332_VDRAM_CON7),
+ MT6332_BUCK("buck-vdvfs2", VDVFS2, 700000, 1312500, 6250, buck_volt_range,
+ MT6332_VDVFS2_CON9, MT6332_VDVFS2_CON11, GENMASK(6, 0),
+ MT6332_VDVFS2_CON12, MT6332_VDVFS2_CON7),
+ MT6332_BUCK("buck-vpa", VPA, 500000, 3400000, 50000, buck_pa_volt_range,
+ MT6332_VPA_CON9, MT6332_VPA_CON11, GENMASK(5, 0),
+ MT6332_VPA_CON12, MT6332_VPA_CON7),
+ MT6332_BUCK("buck-vrf18a", VRF1, 1050000, 2240625, 9375, buck_rf_volt_range,
+ MT6332_VRF1_CON9, MT6332_VRF1_CON11, GENMASK(6, 0),
+ MT6332_VRF1_CON12, MT6332_VRF1_CON7),
+ MT6332_BUCK("buck-vrf18b", VRF2, 1050000, 2240625, 9375, buck_rf_volt_range,
+ MT6332_VRF2_CON9, MT6332_VRF2_CON11, GENMASK(6, 0),
+ MT6332_VRF2_CON12, MT6332_VRF2_CON7),
+ MT6332_BUCK("buck-vsbst", VSBST, 3500000, 7468750, 31250, boost_volt_range,
+ MT6332_VSBST_CON8, MT6332_VSBST_CON12, GENMASK(6, 0),
+ MT6332_VSBST_CON13, MT6332_VSBST_CON8),
+ MT6332_LDO("ldo-vauxb32", VAUXB32, ldo_volt_table1, MT6332_LDO_CON1, 10,
+ MT6332_LDO_CON9, GENMASK(6, 5), MT6332_LDO_CON1, GENMASK(1, 0)),
+ MT6332_REG_FIXED("ldo-vbif28", VBIF28, MT6332_LDO_CON2, 10, 0, 2800000, 1),
+ MT6332_REG_FIXED("ldo-vusb33", VUSB33, MT6332_LDO_CON3, 10, 0, 3300000, 2),
+ MT6332_LDO_LINEAR("ldo-vsram", VSRAM_DVFS2, 700000, 1493750, 6250, buck_volt_range,
+ MT6332_EN_STATUS0, MT6332_LDO_CON8, GENMASK(15, 9),
+ MT6332_VDVFS2_CON23, MT6332_VDVFS2_CON22,
+ MT6332_LDO_CON5, GENMASK(1, 0)),
+ MT6332_LDO_AO("ldo-vdig18", VDIG18, ldo_volt_table2, MT6332_LDO_CON12, GENMASK(11, 9)),
+};
+
+static int mt6332_set_buck_vosel_reg(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
+ int i;
+ u32 regval;
+
+ for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
+ if (mt6332_regulators[i].vselctrl_reg) {
+ if (regmap_read(mt6332->regmap,
+ mt6332_regulators[i].vselctrl_reg,
+ &regval) < 0) {
+ dev_err(&pdev->dev,
+ "Failed to read buck ctrl\n");
+ return -EIO;
+ }
+
+ if (regval & mt6332_regulators[i].vselctrl_mask) {
+ mt6332_regulators[i].desc.vsel_reg =
+ mt6332_regulators[i].vselon_reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mt6332_regulator_probe(struct platform_device *pdev)
+{
+ struct mt6397_chip *mt6332 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ int i;
+ u32 reg_value;
+
+ /* Query buck controller to select activated voltage register part */
+ if (mt6332_set_buck_vosel_reg(pdev))
+ return -EIO;
+
+ /* Read PMIC chip revision to update constraints and voltage table */
+ if (regmap_read(mt6332->regmap, MT6332_HWCID, &reg_value) < 0) {
+ dev_err(&pdev->dev, "Failed to read Chip ID\n");
+ return -EIO;
+ }
+ reg_value &= GENMASK(7, 0);
+
+ dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+ /*
+ * ChipID 0x10 is "MT6332 E1", has a different voltage table and
+ * it's currently not supported in this driver. Upon detection of
+ * this ID, refuse to register the regulators, as we will wrongly
+ * interpret the VSEL for this revision, potentially overvolting
+ * some device.
+ */
+ if (reg_value == 0x10) {
+ dev_err(&pdev->dev, "Chip version not supported. Bailing out.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MT6332_ID_VREG_MAX; i++) {
+ config.dev = &pdev->dev;
+ config.driver_data = &mt6332_regulators[i];
+ config.regmap = mt6332->regmap;
+ rdev = devm_regulator_register(&pdev->dev,
+ &mt6332_regulators[i].desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register %s\n",
+ mt6332_regulators[i].desc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static const struct platform_device_id mt6332_platform_ids[] = {
+ {"mt6332-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6332_platform_ids);
+
+static struct platform_driver mt6332_regulator_driver = {
+ .driver = {
+ .name = "mt6332-regulator",
+ },
+ .probe = mt6332_regulator_probe,
+ .id_table = mt6332_platform_ids,
+};
+
+module_platform_driver(mt6332_regulator_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6332 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index e12b681c72e5..0aff1c2886b5 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -676,7 +676,7 @@ clean:
}
/**
- * of_parse_coupled regulator - Get regulator_dev pointer from rdev's property
+ * of_parse_coupled_regulator() - Get regulator_dev pointer from rdev's property
* @rdev: Pointer to regulator_dev, whose DTS is used as a source to parse
* "regulator-coupled-with" property
* @index: Index in phandles array
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 6b617024a67d..d899d6e98fb8 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -766,7 +766,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 561de6b2e6e3..4158ff126a67 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -306,9 +306,10 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
}
/**
- * rpmh_regulator_vrm_set_load() - set the regulator mode based upon the load
- * current requested
+ * rpmh_regulator_vrm_get_optimum_mode() - get the mode based on the load
* @rdev: Regulator device pointer for the rpmh-regulator
+ * @input_uV: Input voltage
+ * @output_uV: Output voltage
* @load_uA: Aggregated load current in microamps
*
* This function is used in the regulator_ops for VRM type RPMh regulator
@@ -316,17 +317,15 @@ static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
*
* Return: 0 on success, errno on failure
*/
-static int rpmh_regulator_vrm_set_load(struct regulator_dev *rdev, int load_uA)
+static unsigned int rpmh_regulator_vrm_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA)
{
struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
- unsigned int mode;
if (load_uA >= vreg->hw_data->hpm_min_load_uA)
- mode = REGULATOR_MODE_NORMAL;
+ return REGULATOR_MODE_NORMAL;
else
- mode = REGULATOR_MODE_IDLE;
-
- return rpmh_regulator_vrm_set_mode(rdev, mode);
+ return REGULATOR_MODE_IDLE;
}
static int rpmh_regulator_vrm_set_bypass(struct regulator_dev *rdev,
@@ -375,7 +374,7 @@ static const struct regulator_ops rpmh_regulator_vrm_drms_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.set_mode = rpmh_regulator_vrm_set_mode,
.get_mode = rpmh_regulator_vrm_get_mode,
- .set_load = rpmh_regulator_vrm_set_load,
+ .get_optimum_mode = rpmh_regulator_vrm_get_optimum_mode,
};
static const struct regulator_ops rpmh_regulator_vrm_bypass_ops = {
@@ -1199,6 +1198,52 @@ static const struct rpmh_vreg_init_data pmr735a_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm660_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic4_hfsmps3, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_hfsmps3, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic4_hfsmps3, "vdd-s6"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_nldo, "vdd-l2-l3"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_nldo, "vdd-l2-l3"),
+ /* ldo4 is inaccessible on PM660 */
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_nldo, "vdd-l5"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_nldo, "vdd-l1-l6-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic4_pldo_lv, "vdd-l8-l9-l10-l11-l12-l13-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo18", "ldo%s18", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic4_pldo, "vdd-l15-l16-l17-l18-l19"),
+ {}
+};
+
+static const struct rpmh_vreg_init_data pm660l_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic4_ftsmps426, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic4_ftsmps426, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic4_ftsmps426, "vdd-s3-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic4_ftsmps426, "vdd-s5"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic4_nldo, "vdd-l1-l9-l10"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic4_pldo, "vdd-l2"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic4_pldo, "vdd-l4-l6"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic4_pldo, "vdd-l4-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic4_pldo, "vdd-l3-l5-l7-l8"),
+ RPMH_VREG("bob", "bob%s1", &pmic4_bob, "vdd-bob"),
+ {}
+};
+
static int rpmh_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1321,6 +1366,14 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.compatible = "qcom,pmr735a-rpmh-regulators",
.data = pmr735a_vreg_data,
},
+ {
+ .compatible = "qcom,pm660-rpmh-regulators",
+ .data = pm660_vreg_data,
+ },
+ {
+ .compatible = "qcom,pm660l-rpmh-regulators",
+ .data = pm660l_vreg_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rpmh_regulator_match_table);
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index 7f9d66ac37ff..3c41b71a1f52 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -802,6 +802,12 @@ static const struct rpm_regulator_data rpm_pm8018_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
+ { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8058_LDO0, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l1", QCOM_RPM_PM8058_LDO1, &pm8058_nldo, "vdd_l0_l1_lvs" },
{ "l2", QCOM_RPM_PM8058_LDO2, &pm8058_pldo, "vdd_l2_l11_l12" },
@@ -829,12 +835,6 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
{ "l24", QCOM_RPM_PM8058_LDO24, &pm8058_nldo, "vdd_l23_l24_l25" },
{ "l25", QCOM_RPM_PM8058_LDO25, &pm8058_nldo, "vdd_l23_l24_l25" },
- { "s0", QCOM_RPM_PM8058_SMPS0, &pm8058_smps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8058_SMPS1, &pm8058_smps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8058_SMPS2, &pm8058_smps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8058_SMPS3, &pm8058_smps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8058_SMPS4, &pm8058_smps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8058_LVS0, &pm8058_switch, "vdd_l0_l1_lvs" },
{ "lvs1", QCOM_RPM_PM8058_LVS1, &pm8058_switch, "vdd_l0_l1_lvs" },
@@ -843,6 +843,12 @@ static const struct rpm_regulator_data rpm_pm8058_regulators[] = {
};
static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
+ { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
+ { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
+ { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
+ { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
+ { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
+
{ "l0", QCOM_RPM_PM8901_LDO0, &pm8901_nldo, "vdd_l0" },
{ "l1", QCOM_RPM_PM8901_LDO1, &pm8901_pldo, "vdd_l1" },
{ "l2", QCOM_RPM_PM8901_LDO2, &pm8901_pldo, "vdd_l2" },
@@ -851,12 +857,6 @@ static const struct rpm_regulator_data rpm_pm8901_regulators[] = {
{ "l5", QCOM_RPM_PM8901_LDO5, &pm8901_pldo, "vdd_l5" },
{ "l6", QCOM_RPM_PM8901_LDO6, &pm8901_pldo, "vdd_l6" },
- { "s0", QCOM_RPM_PM8901_SMPS0, &pm8901_ftsmps, "vdd_s0" },
- { "s1", QCOM_RPM_PM8901_SMPS1, &pm8901_ftsmps, "vdd_s1" },
- { "s2", QCOM_RPM_PM8901_SMPS2, &pm8901_ftsmps, "vdd_s2" },
- { "s3", QCOM_RPM_PM8901_SMPS3, &pm8901_ftsmps, "vdd_s3" },
- { "s4", QCOM_RPM_PM8901_SMPS4, &pm8901_ftsmps, "vdd_s4" },
-
{ "lvs0", QCOM_RPM_PM8901_LVS0, &pm8901_switch, "lvs0_in" },
{ "lvs1", QCOM_RPM_PM8901_LVS1, &pm8901_switch, "lvs1_in" },
{ "lvs2", QCOM_RPM_PM8901_LVS2, &pm8901_switch, "lvs2_in" },
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 59024c639141..f98168d58dce 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -668,6 +668,15 @@ static const struct regulator_desc pm660l_bob = {
.ops = &rpm_bob_ops,
};
+static const struct regulator_desc pm6125_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(300000, 0, 268, 4000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 269,
+ .ops = &rpm_smps_ldo_ops,
+};
+
static const struct regulator_desc pms405_hfsmps3 = {
.linear_ranges = (struct linear_range[]) {
REGULATOR_LINEAR_RANGE(320000, 0, 215, 8000),
@@ -772,6 +781,158 @@ static const struct rpm_regulator_data rpm_mp5496_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ {}
+};
+
+static const struct rpm_regulator_data rpm_pm6125_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm6125_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm6125_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm6125_ftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm6125_ftsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8998_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8998_hfsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8998_hfsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pm6125_ftsmps, "vdd_s8" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_nldo660, "vdd_l2_l3_l4" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l6_l8" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l6_l8" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l9_l11" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l9_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l12_l16" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l10_l13_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l12_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_nldo660, "vdd_l1_l7_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm660_pldo660, "vdd_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm660_pldo660, "vdd_l23_l24" },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm660_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
+ /* l4 is unaccessible on PM660 */
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
+ { "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
+ { "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
+ { "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
+ { "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
+ { "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
+ { "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
+ { "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
+ { }
+};
+
+static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
+ {}
+};
+
static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
@@ -833,44 +994,6 @@ static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pm8226_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8226_hfsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8226_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8226_hfsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8226_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8226_hfsmps, "vdd_s5" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8226_nldo, "vdd_l1_l2_l4_l5" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8226_pldo, "vdd_l12_l14" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8226_pldo, "vdd_l10_l11_l13" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8226_pldo, "vdd_l12_l14" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8226_pldo, "vdd_l15_l16_l17_l18" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8226_pldo, "vdd_l25" },
- { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8226_nldo, "vdd_l3_l24_l26" },
- { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8226_pldo, "vdd_l6_l7_l8_l9_l27" },
- { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8226_pldo, "vdd_l19_l20_l21_l22_l23_l28" },
- { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8226_switch, "vdd_lvs1" },
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
@@ -912,57 +1035,6 @@ static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pma8084_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pma8084_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pma8084_hfsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pma8084_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pma8084_hfsmps, "vdd_s5" },
- { "s6", QCOM_SMD_RPM_SMPA, 6, &pma8084_ftsmps, "vdd_s6" },
- { "s7", QCOM_SMD_RPM_SMPA, 7, &pma8084_ftsmps, "vdd_s7" },
- { "s8", QCOM_SMD_RPM_SMPA, 8, &pma8084_ftsmps, "vdd_s8" },
- { "s9", QCOM_SMD_RPM_SMPA, 9, &pma8084_ftsmps, "vdd_s9" },
- { "s10", QCOM_SMD_RPM_SMPA, 10, &pma8084_ftsmps, "vdd_s10" },
- { "s11", QCOM_SMD_RPM_SMPA, 11, &pma8084_ftsmps, "vdd_s11" },
- { "s12", QCOM_SMD_RPM_SMPA, 12, &pma8084_ftsmps, "vdd_s12" },
-
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pma8084_nldo, "vdd_l1_l11" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pma8084_pldo, "vdd_l5_l7" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pma8084_pldo, "vdd_l5_l7" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pma8084_pldo, "vdd_l8" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pma8084_nldo, "vdd_l1_l11" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pma8084_pldo, "vdd_l16_l25" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pma8084_pldo, "vdd_l17" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pma8084_pldo, "vdd_l18" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pma8084_pldo, "vdd_l19" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pma8084_pldo, "vdd_l21" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pma8084_pldo, "vdd_l22" },
- { "l23", QCOM_SMD_RPM_LDOA, 23, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l24", QCOM_SMD_RPM_LDOA, 24, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
- { "l25", QCOM_SMD_RPM_LDOA, 25, &pma8084_pldo, "vdd_l16_l25" },
- { "l26", QCOM_SMD_RPM_LDOA, 26, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
- { "l27", QCOM_SMD_RPM_LDOA, 27, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
-
- { "lvs1", QCOM_SMD_RPM_VSA, 1, &pma8084_switch },
- { "lvs2", QCOM_SMD_RPM_VSA, 2, &pma8084_switch },
- { "lvs3", QCOM_SMD_RPM_VSA, 3, &pma8084_switch },
- { "lvs4", QCOM_SMD_RPM_VSA, 4, &pma8084_switch },
- { "5vs1", QCOM_SMD_RPM_VSA, 5, &pma8084_switch },
-
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8950_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8950_hfsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8950_hfsmps, "vdd_s2" },
@@ -1082,14 +1154,6 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
- { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
- {}
-};
-
static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8998_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8998_ftsmps, "vdd_s2" },
@@ -1137,57 +1201,68 @@ static const struct rpm_regulator_data rpm_pm8998_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
- { "bob", QCOM_SMD_RPM_BOBB, 1, &pmi8998_bob, "vdd_bob" },
+static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pma8084_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pma8084_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pma8084_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pma8084_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pma8084_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pma8084_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pma8084_ftsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pma8084_ftsmps, "vdd_s8" },
+ { "s9", QCOM_SMD_RPM_SMPA, 9, &pma8084_ftsmps, "vdd_s9" },
+ { "s10", QCOM_SMD_RPM_SMPA, 10, &pma8084_ftsmps, "vdd_s10" },
+ { "s11", QCOM_SMD_RPM_SMPA, 11, &pma8084_ftsmps, "vdd_s11" },
+ { "s12", QCOM_SMD_RPM_SMPA, 12, &pma8084_ftsmps, "vdd_s12" },
+
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pma8084_nldo, "vdd_l1_l11" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pma8084_pldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pma8084_pldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pma8084_pldo, "vdd_l8" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pma8084_nldo, "vdd_l1_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pma8084_pldo, "vdd_l16_l25" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pma8084_pldo, "vdd_l17" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pma8084_pldo, "vdd_l18" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pma8084_pldo, "vdd_l19" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pma8084_pldo, "vdd_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pma8084_pldo, "vdd_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pma8084_pldo, "vdd_l9_l10_l13_l20_l23_l24" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pma8084_pldo, "vdd_l16_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pma8084_pldo, "vdd_l6_l12_l14_l15_l26" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pma8084_nldo, "vdd_l2_l3_l4_l27" },
+
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pma8084_switch },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pma8084_switch },
+ { "lvs3", QCOM_SMD_RPM_VSA, 3, &pma8084_switch },
+ { "lvs4", QCOM_SMD_RPM_VSA, 4, &pma8084_switch },
+ { "5vs1", QCOM_SMD_RPM_VSA, 5, &pma8084_switch },
+
{}
};
-static const struct rpm_regulator_data rpm_pm660_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm660_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm660_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm660_ftsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm660_hfsmps, "vdd_s4" },
- { "s5", QCOM_SMD_RPM_SMPA, 5, &pm660_hfsmps, "vdd_s5" },
- { "s6", QCOM_SMD_RPM_SMPA, 6, &pm660_hfsmps, "vdd_s6" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l6_l7" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_ht_nldo, "vdd_l2_l3" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l2_l3" },
- /* l4 is unaccessible on PM660 */
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_ht_nldo, "vdd_l5" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_ht_nldo, "vdd_l1_l6_l7" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_ht_nldo, "vdd_l1_l6_l7" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l8_l9_l10_l11_l12_l13_l14" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l15_l16_l17_l18_l19" },
- { }
+static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
+ { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
+ {}
};
-static const struct rpm_regulator_data rpm_pm660l_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPB, 1, &pm660_ftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPB, 2, &pm660_ftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_RWCX, 0, &pm660_ftsmps, "vdd_s3_s4" },
- { "s5", QCOM_SMD_RPM_RWMX, 0, &pm660_ftsmps, "vdd_s5" },
- { "l1", QCOM_SMD_RPM_LDOB, 1, &pm660_nldo660, "vdd_l1_l9_l10" },
- { "l2", QCOM_SMD_RPM_LDOB, 2, &pm660_pldo660, "vdd_l2" },
- { "l3", QCOM_SMD_RPM_LDOB, 3, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l4", QCOM_SMD_RPM_LDOB, 4, &pm660_pldo660, "vdd_l4_l6" },
- { "l5", QCOM_SMD_RPM_LDOB, 5, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l6", QCOM_SMD_RPM_LDOB, 6, &pm660_pldo660, "vdd_l4_l6" },
- { "l7", QCOM_SMD_RPM_LDOB, 7, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l8", QCOM_SMD_RPM_LDOB, 8, &pm660_pldo660, "vdd_l3_l5_l7_l8" },
- { "l9", QCOM_SMD_RPM_RWLC, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
- { "l10", QCOM_SMD_RPM_RWLM, 0, &pm660_ht_nldo, "vdd_l1_l9_l10" },
- { "bob", QCOM_SMD_RPM_BOBB, 1, &pm660l_bob, "vdd_bob", },
- { }
+static const struct rpm_regulator_data rpm_pmi8998_regulators[] = {
+ { "bob", QCOM_SMD_RPM_BOBB, 1, &pmi8998_bob, "vdd_bob" },
+ {}
};
static const struct rpm_regulator_data rpm_pms405_regulators[] = {
@@ -1212,54 +1287,25 @@ static const struct rpm_regulator_data rpm_pms405_regulators[] = {
{}
};
-static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
- { "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
- { "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
- { "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
- { "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
- { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
- { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
- { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
- {}
-};
-
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
+ { .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
+ { .compatible = "qcom,rpm-pm6125-regulators", .data = &rpm_pm6125_regulators },
+ { .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
+ { .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
+ { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8909-regulators", .data = &rpm_pm8909_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
- { .compatible = "qcom,rpm-pm8226-regulators", .data = &rpm_pm8226_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
{ .compatible = "qcom,rpm-pm8950-regulators", .data = &rpm_pm8950_regulators },
{ .compatible = "qcom,rpm-pm8953-regulators", .data = &rpm_pm8953_regulators },
{ .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pm8998-regulators", .data = &rpm_pm8998_regulators },
- { .compatible = "qcom,rpm-pm660-regulators", .data = &rpm_pm660_regulators },
- { .compatible = "qcom,rpm-pm660l-regulators", .data = &rpm_pm660l_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{ .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
{ .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
- { .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index a2d0292a92fd..3e312729741e 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -99,6 +99,9 @@ enum spmi_regulator_logical_type {
SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO,
SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS426,
SPMI_REGULATOR_LOGICAL_TYPE_HFS430,
+ SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3,
+ SPMI_REGULATOR_LOGICAL_TYPE_LDO_510,
+ SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS,
};
enum spmi_regulator_type {
@@ -166,6 +169,17 @@ enum spmi_regulator_subtype {
SPMI_REGULATOR_SUBTYPE_HFS430 = 0x0a,
SPMI_REGULATOR_SUBTYPE_HT_P150 = 0x35,
SPMI_REGULATOR_SUBTYPE_HT_P600 = 0x3d,
+ SPMI_REGULATOR_SUBTYPE_HFSMPS_510 = 0x0a,
+ SPMI_REGULATOR_SUBTYPE_FTSMPS_510 = 0x0b,
+ SPMI_REGULATOR_SUBTYPE_LV_P150_510 = 0x71,
+ SPMI_REGULATOR_SUBTYPE_LV_P300_510 = 0x72,
+ SPMI_REGULATOR_SUBTYPE_LV_P600_510 = 0x73,
+ SPMI_REGULATOR_SUBTYPE_N300_510 = 0x6a,
+ SPMI_REGULATOR_SUBTYPE_N600_510 = 0x6b,
+ SPMI_REGULATOR_SUBTYPE_N1200_510 = 0x6c,
+ SPMI_REGULATOR_SUBTYPE_MV_P50_510 = 0x7a,
+ SPMI_REGULATOR_SUBTYPE_MV_P150_510 = 0x7b,
+ SPMI_REGULATOR_SUBTYPE_MV_P600_510 = 0x7d,
};
enum spmi_common_regulator_registers {
@@ -193,6 +207,14 @@ enum spmi_ftsmps426_regulator_registers {
SPMI_FTSMPS426_REG_VOLTAGE_ULS_MSB = 0x69,
};
+/*
+ * Third common register layout
+ */
+enum spmi_hfsmps_regulator_registers {
+ SPMI_HFSMPS_REG_STEP_CTRL = 0x3c,
+ SPMI_HFSMPS_REG_PULL_DOWN = 0xa0,
+};
+
enum spmi_vs_registers {
SPMI_VS_REG_OCP = 0x4a,
SPMI_VS_REG_SOFT_START = 0x4c,
@@ -260,6 +282,15 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS426_MODE_MASK 0x07
+/* Third common regulator mode register values */
+#define SPMI_HFSMPS_MODE_BYPASS_MASK 2
+#define SPMI_HFSMPS_MODE_RETENTION_MASK 3
+#define SPMI_HFSMPS_MODE_LPM_MASK 4
+#define SPMI_HFSMPS_MODE_AUTO_MASK 6
+#define SPMI_HFSMPS_MODE_HPM_MASK 7
+
+#define SPMI_HFSMPS_MODE_MASK 0x07
+
/* Common regulator pull down control register layout */
#define SPMI_COMMON_PULL_DOWN_ENABLE_MASK 0x80
@@ -305,6 +336,9 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
+/* slew_rate has units of uV/us. */
+#define SPMI_HFSMPS_SLEW_RATE_38p4 38400
+
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK 0x03
#define SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT 0
@@ -554,6 +588,14 @@ static struct spmi_voltage_range ht_p600_ranges[] = {
SPMI_VOLTAGE_RANGE(0, 1704000, 1704000, 1896000, 1896000, 8000),
};
+static struct spmi_voltage_range nldo_510_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 320000, 320000, 1304000, 1304000, 8000),
+};
+
+static struct spmi_voltage_range ftsmps510_ranges[] = {
+ SPMI_VOLTAGE_RANGE(0, 300000, 300000, 1372000, 1372000, 4000),
+};
+
static DEFINE_SPMI_SET_POINTS(pldo);
static DEFINE_SPMI_SET_POINTS(nldo1);
static DEFINE_SPMI_SET_POINTS(nldo2);
@@ -576,6 +618,8 @@ static DEFINE_SPMI_SET_POINTS(ht_nldo);
static DEFINE_SPMI_SET_POINTS(hfs430);
static DEFINE_SPMI_SET_POINTS(ht_p150);
static DEFINE_SPMI_SET_POINTS(ht_p600);
+static DEFINE_SPMI_SET_POINTS(nldo_510);
+static DEFINE_SPMI_SET_POINTS(ftsmps510);
static inline int spmi_vreg_read(struct spmi_regulator *vreg, u16 addr, u8 *buf,
int len)
@@ -1062,6 +1106,23 @@ static unsigned int spmi_regulator_ftsmps426_get_mode(struct regulator_dev *rdev
}
}
+static unsigned int spmi_regulator_hfsmps_get_mode(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 reg;
+
+ spmi_vreg_read(vreg, SPMI_COMMON_REG_MODE, &reg, 1);
+
+ switch (reg) {
+ case SPMI_HFSMPS_MODE_HPM_MASK:
+ return REGULATOR_MODE_NORMAL;
+ case SPMI_HFSMPS_MODE_AUTO_MASK:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_IDLE;
+ }
+}
+
static int
spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
@@ -1109,6 +1170,33 @@ spmi_regulator_ftsmps426_set_mode(struct regulator_dev *rdev, unsigned int mode)
}
static int
+spmi_regulator_hfsmps_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 mask = SPMI_HFSMPS_MODE_MASK;
+ u8 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = SPMI_HFSMPS_MODE_HPM_MASK;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = SPMI_HFSMPS_MODE_AUTO_MASK;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = vreg->logical_type ==
+ SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3 ?
+ SPMI_HFSMPS_MODE_RETENTION_MASK :
+ SPMI_HFSMPS_MODE_LPM_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
+}
+
+static int
spmi_regulator_common_set_load(struct regulator_dev *rdev, int load_uA)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -1131,6 +1219,15 @@ static int spmi_regulator_common_set_pull_down(struct regulator_dev *rdev)
mask, mask);
}
+static int spmi_regulator_hfsmps_set_pull_down(struct regulator_dev *rdev)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ unsigned int mask = SPMI_COMMON_PULL_DOWN_ENABLE_MASK;
+
+ return spmi_vreg_update_bits(vreg, SPMI_HFSMPS_REG_PULL_DOWN,
+ mask, mask);
+}
+
static int spmi_regulator_common_set_soft_start(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -1465,6 +1562,21 @@ static const struct regulator_ops spmi_hfs430_ops = {
.get_mode = spmi_regulator_ftsmps426_get_mode,
};
+static const struct regulator_ops spmi_hfsmps_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = spmi_regulator_ftsmps426_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ftsmps426_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
+ .list_voltage = spmi_regulator_common_list_voltage,
+ .set_mode = spmi_regulator_hfsmps_set_mode,
+ .get_mode = spmi_regulator_hfsmps_get_mode,
+ .set_load = spmi_regulator_common_set_load,
+ .set_pull_down = spmi_regulator_hfsmps_set_pull_down,
+};
+
/* Maximum possible digital major revision value */
#define INF 0xFF
@@ -1473,7 +1585,8 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(LDO, HT_P600, 0, INF, HFS430, hfs430, ht_p600, 10000),
SPMI_VREG(LDO, HT_P150, 0, INF, HFS430, hfs430, ht_p150, 10000),
SPMI_VREG(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
- SPMI_VREG(BUCK, HFS430, 0, INF, HFS430, hfs430, hfs430, 10000),
+ SPMI_VREG(BUCK, HFS430, 0, 3, HFS430, hfs430, hfs430, 10000),
+ SPMI_VREG(BUCK, HFSMPS_510, 4, INF, HFSMPS, hfsmps, hfs430, 100000),
SPMI_VREG(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
SPMI_VREG(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
SPMI_VREG(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
@@ -1549,6 +1662,16 @@ static const struct spmi_regulator_mapping supported_regulators[] = {
SPMI_VREG(ULT_LDO, P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 10000),
SPMI_VREG(ULT_LDO, P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo, 5000),
+ SPMI_VREG(LDO, LV_P150_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, LV_P300_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, LV_P600_510, 0, INF, LDO_510, hfsmps, ht_lvpldo, 10000),
+ SPMI_VREG(LDO, MV_P50_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, MV_P150_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, MV_P600_510, 0, INF, LDO_510, hfsmps, pldo660, 10000),
+ SPMI_VREG(LDO, N300_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(LDO, N600_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(LDO, N1200_510, 0, INF, LDO_510, hfsmps, nldo_510, 10000),
+ SPMI_VREG(FTS, FTSMPS_510, 0, INF, FTSMPS3, hfsmps, ftsmps510, 100000),
};
static void spmi_calculate_num_voltages(struct spmi_voltage_set_points *points)
@@ -1696,6 +1819,26 @@ static int spmi_regulator_init_slew_rate_ftsmps426(struct spmi_regulator *vreg,
return ret;
}
+static int spmi_regulator_init_slew_rate_hfsmps(struct spmi_regulator *vreg)
+{
+ int ret;
+ u8 reg = 0;
+ int delay;
+
+ ret = spmi_vreg_read(vreg, SPMI_HFSMPS_REG_STEP_CTRL, &reg, 1);
+ if (ret) {
+ dev_err(vreg->dev, "spmi read failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ delay = reg & SPMI_FTSMPS426_STEP_CTRL_DELAY_MASK;
+ delay >>= SPMI_FTSMPS426_STEP_CTRL_DELAY_SHIFT;
+
+ vreg->slew_rate = SPMI_HFSMPS_SLEW_RATE_38p4 >> delay;
+
+ return ret;
+}
+
static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
const struct spmi_regulator_init_data *data)
{
@@ -1846,6 +1989,12 @@ static int spmi_regulator_of_parse(struct device_node *node,
if (ret)
return ret;
break;
+ case SPMI_REGULATOR_LOGICAL_TYPE_HFSMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS3:
+ ret = spmi_regulator_init_slew_rate_hfsmps(vreg);
+ if (ret)
+ return ret;
+ break;
default:
break;
}
@@ -1872,40 +2021,100 @@ static int spmi_regulator_of_parse(struct device_node *node,
return 0;
}
-static const struct spmi_regulator_data pm8941_regulators[] = {
+static const struct spmi_regulator_data pm6125_regulators[] = {
+ { "s1", 0x1400, "vdd_s1" },
+ { "s2", 0x1700, "vdd_s2" },
+ { "s3", 0x1a00, "vdd_s3" },
+ { "s4", 0x1d00, "vdd_s4" },
+ { "s5", 0x2000, "vdd_s5" },
+ { "s6", 0x2300, "vdd_s6" },
+ { "s7", 0x2600, "vdd_s7" },
+ { "s8", 0x2900, "vdd_s8" },
+ { "l1", 0x4000, "vdd_l1_l7_l17_l18" },
+ { "l2", 0x4100, "vdd_l2_l3_l4" },
+ { "l3", 0x4200, "vdd_l2_l3_l4" },
+ { "l4", 0x4300, "vdd_l2_l3_l4" },
+ { "l5", 0x4400, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l6", 0x4500, "vdd_l6_l8" },
+ { "l7", 0x4600, "vdd_l1_l7_l17_l18" },
+ { "l8", 0x4700, "vdd_l6_l8" },
+ { "l9", 0x4800, "vdd_l9_l11" },
+ { "l10", 0x4900, "vdd_l10_l13_l14" },
+ { "l11", 0x4a00, "vdd_l9_l11" },
+ { "l12", 0x4b00, "vdd_l12_l16" },
+ { "l13", 0x4c00, "vdd_l10_l13_l14" },
+ { "l14", 0x4d00, "vdd_l10_l13_l14" },
+ { "l15", 0x4e00, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l16", 0x4f00, "vdd_l12_l16" },
+ { "l17", 0x5000, "vdd_l1_l7_l17_l18" },
+ { "l18", 0x5100, "vdd_l1_l7_l17_l18" },
+ { "l19", 0x5200, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l20", 0x5300, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l21", 0x5400, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l22", 0x5500, "vdd_l5_l15_l19_l20_l21_l22" },
+ { "l23", 0x5600, "vdd_l23_l24" },
+ { "l24", 0x5700, "vdd_l23_l24" },
+};
+
+static const struct spmi_regulator_data pm660_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
{ "s3", 0x1a00, "vdd_s3", },
- { "s4", 0xa000, },
- { "l1", 0x4000, "vdd_l1_l3", },
- { "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
- { "l3", 0x4200, "vdd_l1_l3", },
- { "l4", 0x4300, "vdd_l4_l11", },
- { "l5", 0x4400, "vdd_l5_l7", NULL, 0x0410 },
- { "l6", 0x4500, "vdd_l6_l12_l14_l15", },
- { "l7", 0x4600, "vdd_l5_l7", NULL, 0x0410 },
- { "l8", 0x4700, "vdd_l8_l16_l18_19", },
- { "l9", 0x4800, "vdd_l9_l10_l17_l22", },
- { "l10", 0x4900, "vdd_l9_l10_l17_l22", },
- { "l11", 0x4a00, "vdd_l4_l11", },
- { "l12", 0x4b00, "vdd_l6_l12_l14_l15", },
- { "l13", 0x4c00, "vdd_l13_l20_l23_l24", },
- { "l14", 0x4d00, "vdd_l6_l12_l14_l15", },
- { "l15", 0x4e00, "vdd_l6_l12_l14_l15", },
- { "l16", 0x4f00, "vdd_l8_l16_l18_19", },
- { "l17", 0x5000, "vdd_l9_l10_l17_l22", },
- { "l18", 0x5100, "vdd_l8_l16_l18_19", },
- { "l19", 0x5200, "vdd_l8_l16_l18_19", },
- { "l20", 0x5300, "vdd_l13_l20_l23_l24", },
- { "l21", 0x5400, "vdd_l21", },
- { "l22", 0x5500, "vdd_l9_l10_l17_l22", },
- { "l23", 0x5600, "vdd_l13_l20_l23_l24", },
- { "l24", 0x5700, "vdd_l13_l20_l23_l24", },
- { "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
- { "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
- { "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
- { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
- { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
+ { "s4", 0x1d00, "vdd_s3", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "l1", 0x4000, "vdd_l1_l6_l7", },
+ { "l2", 0x4100, "vdd_l2_l3", },
+ { "l3", 0x4200, "vdd_l2_l3", },
+ /* l4 is unaccessible on PM660 */
+ { "l5", 0x4400, "vdd_l5", },
+ { "l6", 0x4500, "vdd_l1_l6_l7", },
+ { "l7", 0x4600, "vdd_l1_l6_l7", },
+ { "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
+ { "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
+ { "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
+ { "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
+ { "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
+ { "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
+ { }
+};
+
+static const struct spmi_regulator_data pm660l_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "l1", 0x4000, "vdd_l1_l9_l10", },
+ { "l2", 0x4100, "vdd_l2", },
+ { "l3", 0x4200, "vdd_l3_l5_l7_l8", },
+ { "l4", 0x4300, "vdd_l4_l6", },
+ { "l5", 0x4400, "vdd_l3_l5_l7_l8", },
+ { "l6", 0x4500, "vdd_l4_l6", },
+ { "l7", 0x4600, "vdd_l3_l5_l7_l8", },
+ { "l8", 0x4700, "vdd_l3_l5_l7_l8", },
+ { "l9", 0x4800, "vdd_l1_l9_l10", },
+ { "l10", 0x4900, "vdd_l1_l9_l10", },
+ { }
+};
+
+static const struct spmi_regulator_data pm8004_regulators[] = {
+ { "s2", 0x1700, "vdd_s2", },
+ { "s5", 0x2000, "vdd_s5", },
+ { }
+};
+
+static const struct spmi_regulator_data pm8005_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
{ }
};
@@ -1985,6 +2194,43 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8941_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0xa000, },
+ { "l1", 0x4000, "vdd_l1_l3", },
+ { "l2", 0x4100, "vdd_l2_lvs_1_2_3", },
+ { "l3", 0x4200, "vdd_l1_l3", },
+ { "l4", 0x4300, "vdd_l4_l11", },
+ { "l5", 0x4400, "vdd_l5_l7", NULL, 0x0410 },
+ { "l6", 0x4500, "vdd_l6_l12_l14_l15", },
+ { "l7", 0x4600, "vdd_l5_l7", NULL, 0x0410 },
+ { "l8", 0x4700, "vdd_l8_l16_l18_19", },
+ { "l9", 0x4800, "vdd_l9_l10_l17_l22", },
+ { "l10", 0x4900, "vdd_l9_l10_l17_l22", },
+ { "l11", 0x4a00, "vdd_l4_l11", },
+ { "l12", 0x4b00, "vdd_l6_l12_l14_l15", },
+ { "l13", 0x4c00, "vdd_l13_l20_l23_l24", },
+ { "l14", 0x4d00, "vdd_l6_l12_l14_l15", },
+ { "l15", 0x4e00, "vdd_l6_l12_l14_l15", },
+ { "l16", 0x4f00, "vdd_l8_l16_l18_19", },
+ { "l17", 0x5000, "vdd_l9_l10_l17_l22", },
+ { "l18", 0x5100, "vdd_l8_l16_l18_19", },
+ { "l19", 0x5200, "vdd_l8_l16_l18_19", },
+ { "l20", 0x5300, "vdd_l13_l20_l23_l24", },
+ { "l21", 0x5400, "vdd_l21", },
+ { "l22", 0x5500, "vdd_l9_l10_l17_l22", },
+ { "l23", 0x5600, "vdd_l13_l20_l23_l24", },
+ { "l24", 0x5700, "vdd_l13_l20_l23_l24", },
+ { "lvs1", 0x8000, "vdd_l2_lvs_1_2_3", },
+ { "lvs2", 0x8100, "vdd_l2_lvs_1_2_3", },
+ { "lvs3", 0x8200, "vdd_l2_lvs_1_2_3", },
+ { "5vs1", 0x8300, "vin_5vs", "ocp-5vs1", },
+ { "5vs2", 0x8400, "vin_5vs", "ocp-5vs2", },
+ { }
+};
+
static const struct spmi_regulator_data pm8950_regulators[] = {
{ "s1", 0x1400, "vdd_s1", },
{ "s2", 0x1700, "vdd_s2", },
@@ -2076,69 +2322,6 @@ static const struct spmi_regulator_data pmi8994_regulators[] = {
{ }
};
-static const struct spmi_regulator_data pm660_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s3", },
- { "s5", 0x2000, "vdd_s5", },
- { "s6", 0x2300, "vdd_s6", },
- { "l1", 0x4000, "vdd_l1_l6_l7", },
- { "l2", 0x4100, "vdd_l2_l3", },
- { "l3", 0x4200, "vdd_l2_l3", },
- /* l4 is unaccessible on PM660 */
- { "l5", 0x4400, "vdd_l5", },
- { "l6", 0x4500, "vdd_l1_l6_l7", },
- { "l7", 0x4600, "vdd_l1_l6_l7", },
- { "l8", 0x4700, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l9", 0x4800, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l10", 0x4900, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l11", 0x4a00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l12", 0x4b00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l13", 0x4c00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l14", 0x4d00, "vdd_l8_l9_l10_l11_l12_l13_l14", },
- { "l15", 0x4e00, "vdd_l15_l16_l17_l18_l19", },
- { "l16", 0x4f00, "vdd_l15_l16_l17_l18_l19", },
- { "l17", 0x5000, "vdd_l15_l16_l17_l18_l19", },
- { "l18", 0x5100, "vdd_l15_l16_l17_l18_l19", },
- { "l19", 0x5200, "vdd_l15_l16_l17_l18_l19", },
- { }
-};
-
-static const struct spmi_regulator_data pm660l_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s4", },
- { "s5", 0x2000, "vdd_s5", },
- { "l1", 0x4000, "vdd_l1_l9_l10", },
- { "l2", 0x4100, "vdd_l2", },
- { "l3", 0x4200, "vdd_l3_l5_l7_l8", },
- { "l4", 0x4300, "vdd_l4_l6", },
- { "l5", 0x4400, "vdd_l3_l5_l7_l8", },
- { "l6", 0x4500, "vdd_l4_l6", },
- { "l7", 0x4600, "vdd_l3_l5_l7_l8", },
- { "l8", 0x4700, "vdd_l3_l5_l7_l8", },
- { "l9", 0x4800, "vdd_l1_l9_l10", },
- { "l10", 0x4900, "vdd_l1_l9_l10", },
- { }
-};
-
-
-static const struct spmi_regulator_data pm8004_regulators[] = {
- { "s2", 0x1700, "vdd_s2", },
- { "s5", 0x2000, "vdd_s5", },
- { }
-};
-
-static const struct spmi_regulator_data pm8005_regulators[] = {
- { "s1", 0x1400, "vdd_s1", },
- { "s2", 0x1700, "vdd_s2", },
- { "s3", 0x1a00, "vdd_s3", },
- { "s4", 0x1d00, "vdd_s4", },
- { }
-};
-
static const struct spmi_regulator_data pmp8074_regulators[] = {
{ "s1", 0x1400, "vdd_s1"},
{ "s2", 0x1700, "vdd_s2"},
@@ -2167,6 +2350,9 @@ static const struct spmi_regulator_data pms405_regulators[] = {
};
static const struct of_device_id qcom_spmi_regulator_match[] = {
+ { .compatible = "qcom,pm6125-regulators", .data = &pm6125_regulators },
+ { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
+ { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pm8004-regulators", .data = &pm8004_regulators },
{ .compatible = "qcom,pm8005-regulators", .data = &pm8005_regulators },
{ .compatible = "qcom,pm8226-regulators", .data = &pm8226_regulators },
@@ -2176,8 +2362,6 @@ static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8950-regulators", .data = &pm8950_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
- { .compatible = "qcom,pm660-regulators", .data = &pm660_regulators },
- { .compatible = "qcom,pm660l-regulators", .data = &pm660l_regulators },
{ .compatible = "qcom,pmp8074-regulators", .data = &pmp8074_regulators },
{ .compatible = "qcom,pms405-regulators", .data = &pms405_regulators },
{ }
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 105f694a67e6..308f7972941b 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -381,13 +381,11 @@ error:
return ret;
}
-static int attiny_i2c_remove(struct i2c_client *client)
+static void attiny_i2c_remove(struct i2c_client *client)
{
struct attiny_lcd *state = i2c_get_clientdata(client);
mutex_destroy(&state->lock);
-
- return 0;
}
static const struct of_device_id attiny_dt_ids[] = {
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index ce00db27589a..115345e9fded 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -151,7 +151,7 @@ static inline void ti_abb_clear_txdone(const struct ti_abb *abb)
};
/**
- * ti_abb_wait_tranx() - waits for ABB tranxdone event
+ * ti_abb_wait_txdone() - waits for ABB tranxdone event
* @dev: device
* @abb: pointer to the abb instance
*
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
new file mode 100644
index 000000000000..c484c943e467
--- /dev/null
+++ b/drivers/regulator/tps65219-regulator.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// tps65219-regulator.c
+//
+// Regulator driver for TPS65219 PMIC
+//
+// Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+//
+// This implementation derived from tps65218 authored by
+// "J Keerthy <j-keerthy@ti.com>"
+//
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65219.h>
+
+struct tps65219_regulator_irq_type {
+ const char *irq_name;
+ const char *regulator_name;
+ const char *event_name;
+ unsigned long event;
+};
+
+static struct tps65219_regulator_irq_type tps65219_regulator_irq_types[] = {
+ { "LDO3_SCG", "LDO3", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO3_OC", "LDO3", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO3_UV", "LDO3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO4_SCG", "LDO4", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO4_OC", "LDO4", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO4_UV", "LDO4", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO1_SCG", "LDO1", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO1_OC", "LDO1", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO1_UV", "LDO1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "LDO2_SCG", "LDO2", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "LDO2_OC", "LDO2", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "LDO2_UV", "LDO2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK3_SCG", "BUCK3", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK3_OC", "BUCK3", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK3_NEG_OC", "BUCK3", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK3_UV", "BUCK3", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK1_SCG", "BUCK1", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK1_OC", "BUCK1", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK1_NEG_OC", "BUCK1", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK1_UV", "BUCK1", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK2_SCG", "BUCK2", "short circuit to ground", REGULATOR_EVENT_REGULATION_OUT },
+ { "BUCK2_OC", "BUCK2", "overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK2_NEG_OC", "BUCK2", "negative overcurrent", REGULATOR_EVENT_OVER_CURRENT },
+ { "BUCK2_UV", "BUCK2", "undervoltage", REGULATOR_EVENT_UNDER_VOLTAGE },
+ { "BUCK1_RV", "BUCK1", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK2_RV", "BUCK2", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK3_RV", "BUCK3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO1_RV", "LDO1", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO2_RV", "LDO2", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO3_RV", "LDO3", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV", "LDO4", "residual voltage", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK1_RV_SD", "BUCK1", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK2_RV_SD", "BUCK2", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "BUCK3_RV_SD", "BUCK3", "residual voltage on shutdown",
+ REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO1_RV_SD", "LDO1", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO2_RV_SD", "LDO2", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO3_RV_SD", "LDO3", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "LDO4_RV_SD", "LDO4", "residual voltage on shutdown", REGULATOR_EVENT_OVER_VOLTAGE_WARN },
+ { "SENSOR_3_WARM", "SENSOR3", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN},
+ { "SENSOR_2_WARM", "SENSOR2", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_1_WARM", "SENSOR1", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_0_WARM", "SENSOR0", "warm temperature", REGULATOR_EVENT_OVER_TEMP_WARN },
+ { "SENSOR_3_HOT", "SENSOR3", "hot temperature", REGULATOR_EVENT_OVER_TEMP},
+ { "SENSOR_2_HOT", "SENSOR2", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "SENSOR_1_HOT", "SENSOR1", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "SENSOR_0_HOT", "SENSOR0", "hot temperature", REGULATOR_EVENT_OVER_TEMP },
+ { "TIMEOUT", "", "", REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE },
+};
+
+struct tps65219_regulator_irq_data {
+ struct device *dev;
+ struct tps65219_regulator_irq_type *type;
+ struct regulator_dev *rdev;
+};
+
+#define TPS65219_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
+ _em, _cr, _cm, _lr, _nlr, _delay, _fuv, \
+ _ct, _ncl, _bpm) \
+ { \
+ .name = _name, \
+ .of_match = _of, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .supply_name = _of, \
+ .id = _id, \
+ .ops = &(_ops), \
+ .n_voltages = _n, \
+ .type = _type, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .csel_reg = _cr, \
+ .csel_mask = _cm, \
+ .curr_table = _ct, \
+ .n_current_limits = _ncl, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .volt_table = NULL, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ .ramp_delay = _delay, \
+ .fixed_uV = _fuv, \
+ .bypass_reg = _vr, \
+ .bypass_mask = _bpm, \
+ } \
+
+static const struct linear_range bucks_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x1f, 25000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x20, 0x33, 100000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x34, 0x3f, 0),
+};
+
+static const struct linear_range ldos_1_2_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x0, 0x37, 50000),
+ REGULATOR_LINEAR_RANGE(3400000, 0x38, 0x3f, 0),
+};
+
+static const struct linear_range ldos_3_4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x0, 0xC, 0),
+ REGULATOR_LINEAR_RANGE(1250000, 0xD, 0x35, 50000),
+ REGULATOR_LINEAR_RANGE(3300000, 0x36, 0x3F, 0),
+};
+
+static int tps65219_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ return regmap_set_bits(tps->regmap, TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+
+ case REGULATOR_MODE_STANDBY:
+ return regmap_clear_bits(tps->regmap,
+ TPS65219_REG_STBY_1_CONFIG,
+ dev->desc->enable_mask);
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int tps65219_get_mode(struct regulator_dev *dev)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+ int ret, value = 0;
+
+ ret = regmap_read(tps->regmap, TPS65219_REG_STBY_1_CONFIG, &value);
+ if (ret) {
+ dev_dbg(tps->dev, "%s failed for regulator %s: %d ",
+ __func__, dev->desc->name, ret);
+ return ret;
+ }
+ value = (value & BIT(rid)) >> rid;
+ if (value)
+ return REGULATOR_MODE_STANDBY;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+/*
+ * generic regulator_set_bypass_regmap does not fully match requirements
+ * TPS65219 Requires explicitly that regulator is disabled before switch
+ */
+static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
+{
+ struct tps65219 *tps = rdev_get_drvdata(dev);
+ unsigned int rid = rdev_get_id(dev);
+
+ if (dev->desc->ops->is_enabled(dev)) {
+ dev_err(tps->dev,
+ "%s LDO%d enabled, must be shut down to set bypass ",
+ __func__, rid);
+ return -EBUSY;
+ }
+ return regulator_set_bypass_regmap(dev, enable);
+}
+
+/* Operations permitted on BUCK1/2/3 */
+static const struct regulator_ops tps65219_bucks_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+
+};
+
+/* Operations permitted on LDO1/2 */
+static const struct regulator_ops tps65219_ldos_1_2_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_bypass = tps65219_set_bypass,
+ .get_bypass = regulator_get_bypass_regmap,
+};
+
+/* Operations permitted on LDO3/4 */
+static const struct regulator_ops tps65219_ldos_3_4_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .set_mode = tps65219_set_mode,
+ .get_mode = tps65219_get_mode,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65219_REGULATOR("BUCK1", "buck1", TPS65219_BUCK_1,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK1_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK2", "buck2", TPS65219_BUCK_2,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK2_EN_MASK, 0, 0, bucks_ranges,
+ 3, 4000, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("BUCK3", "buck3", TPS65219_BUCK_3,
+ REGULATOR_VOLTAGE, tps65219_bucks_ops, 64,
+ TPS65219_REG_BUCK3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_BUCK3_EN_MASK, 0, 0, bucks_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO1", "ldo1", TPS65219_LDO_1,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO1_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO1_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO2", "ldo2", TPS65219_LDO_2,
+ REGULATOR_VOLTAGE, tps65219_ldos_1_2_ops, 64,
+ TPS65219_REG_LDO2_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO2_EN_MASK, 0, 0, ldos_1_2_ranges,
+ 2, 0, 0, NULL, 0, TPS65219_LDOS_BYP_CONFIG_MASK),
+ TPS65219_REGULATOR("LDO3", "ldo3", TPS65219_LDO_3,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO3_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO3_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+ TPS65219_REGULATOR("LDO4", "ldo4", TPS65219_LDO_4,
+ REGULATOR_VOLTAGE, tps65219_ldos_3_4_ops, 64,
+ TPS65219_REG_LDO4_VOUT,
+ TPS65219_BUCKS_LDOS_VOUT_VSET_MASK,
+ TPS65219_REG_ENABLE_CTRL,
+ TPS65219_ENABLE_LDO4_EN_MASK, 0, 0, ldos_3_4_ranges,
+ 3, 0, 0, NULL, 0, 0),
+};
+
+static irqreturn_t tps65219_regulator_irq_handler(int irq, void *data)
+{
+ struct tps65219_regulator_irq_data *irq_data = data;
+
+ if (irq_data->type->event_name[0] == '\0') {
+ /* This is the timeout interrupt no specific regulator */
+ dev_err(irq_data->dev,
+ "System was put in shutdown due to timeout during an active or standby transition.\n");
+ return IRQ_HANDLED;
+ }
+
+ regulator_notifier_call_chain(irq_data->rdev,
+ irq_data->type->event, NULL);
+
+ dev_err(irq_data->dev, "Error IRQ trap %s for %s\n",
+ irq_data->type->event_name, irq_data->type->regulator_name);
+ return IRQ_HANDLED;
+}
+
+static int tps65219_get_rdev_by_name(const char *regulator_name,
+ struct regulator_dev *rdevtbl[7],
+ struct regulator_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ if (strcmp(regulator_name, regulators[i].name) == 0) {
+ dev = rdevtbl[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int tps65219_regulator_probe(struct platform_device *pdev)
+{
+ struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_dev *rdev;
+ struct regulator_config config = { };
+ int i;
+ int error;
+ int irq;
+ struct tps65219_regulator_irq_data *irq_data;
+ struct tps65219_regulator_irq_type *irq_type;
+ struct regulator_dev *rdevtbl[7];
+
+ config.dev = tps->dev;
+ config.driver_data = tps;
+ config.regmap = tps->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ dev_dbg(tps->dev, "%s regul i= %d START", __func__, i);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(tps->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ rdevtbl[i] = rdev;
+ dev_dbg(tps->dev, "%s regul i= %d COMPLETED", __func__, i);
+ }
+
+ irq_data = devm_kmalloc(tps->dev,
+ ARRAY_SIZE(tps65219_regulator_irq_types) *
+ sizeof(struct tps65219_regulator_irq_data),
+ GFP_KERNEL);
+ if (!irq_data)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(tps65219_regulator_irq_types); ++i) {
+ irq_type = &tps65219_regulator_irq_types[i];
+
+ irq = platform_get_irq_byname(pdev, irq_type->irq_name);
+ if (irq < 0)
+ return -EINVAL;
+
+ irq_data[i].dev = tps->dev;
+ irq_data[i].type = irq_type;
+
+ tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
+ if (rdev < 0) {
+ dev_err(tps->dev, "Failed to get rdev for %s\n",
+ irq_type->regulator_name);
+ return -EINVAL;
+ }
+ irq_data[i].rdev = rdev;
+
+ error = devm_request_threaded_irq(tps->dev, irq, NULL,
+ tps65219_regulator_irq_handler,
+ IRQF_ONESHOT,
+ irq_type->irq_name,
+ &irq_data[i]);
+ if (error) {
+ dev_err(tps->dev, "failed to request %s IRQ %d: %d\n",
+ irq_type->irq_name, irq, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id tps65219_regulator_id_table[] = {
+ { "tps65219-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65219_regulator_id_table);
+
+static struct platform_driver tps65219_regulator_driver = {
+ .driver = {
+ .name = "tps65219-pmic",
+ },
+ .probe = tps65219_regulator_probe,
+ .id_table = tps65219_regulator_id_table,
+};
+
+module_platform_driver(tps65219_regulator_driver);
+
+MODULE_AUTHOR("Jerome Neanne <j-neanne@baylibre.com>");
+MODULE_DESCRIPTION("TPS65219 voltage regulator driver");
+MODULE_ALIAS("platform:tps65219-pmic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index ca0817f8e41e..899aa8dd12f0 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -599,7 +599,7 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)att->sa,
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)att->sa,
att->size, da, NULL, NULL, "dsp_mem");
if (mem)
@@ -635,7 +635,7 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
}
/* Register memory region */
- mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)rmem->base,
+ mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
rmem->size, da, NULL, NULL, it.node->name);
if (mem)
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 38383e7de3c1..7cc4fd207e2d 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -646,7 +646,6 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
struct mbox_client *cl;
- int ret;
if (!of_get_property(dev->of_node, "mbox-names", NULL))
return 0;
@@ -659,18 +658,15 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
cl->rx_callback = imx_rproc_rx_callback;
priv->tx_ch = mbox_request_channel_byname(cl, "tx");
- if (IS_ERR(priv->tx_ch)) {
- ret = PTR_ERR(priv->tx_ch);
- return dev_err_probe(cl->dev, ret,
- "failed to request tx mailbox channel: %d\n", ret);
- }
+ if (IS_ERR(priv->tx_ch))
+ return dev_err_probe(cl->dev, PTR_ERR(priv->tx_ch),
+ "failed to request tx mailbox channel\n");
priv->rx_ch = mbox_request_channel_byname(cl, "rx");
if (IS_ERR(priv->rx_ch)) {
mbox_free_channel(priv->tx_ch);
- ret = PTR_ERR(priv->rx_ch);
- return dev_err_probe(cl->dev, ret,
- "failed to request rx mailbox channel: %d\n", ret);
+ return dev_err_probe(cl->dev, PTR_ERR(priv->rx_ch),
+ "failed to request rx mailbox channel\n");
}
return 0;
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 594a9b43b7ae..95b39741925d 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/remoteproc.h>
@@ -59,10 +59,10 @@ struct keystone_rproc {
int num_mems;
struct regmap *dev_ctrl;
struct reset_control *reset;
+ struct gpio_desc *kick_gpio;
u32 boot_offset;
int irq_ring;
int irq_fault;
- int kick_gpio;
struct work_struct workqueue;
};
@@ -232,10 +232,10 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
{
struct keystone_rproc *ksproc = rproc->priv;
- if (WARN_ON(ksproc->kick_gpio < 0))
+ if (!ksproc->kick_gpio)
return;
- gpio_set_value(ksproc->kick_gpio, 1);
+ gpiod_set_value(ksproc->kick_gpio, 1);
}
/*
@@ -432,9 +432,9 @@ static int keystone_rproc_probe(struct platform_device *pdev)
goto disable_clk;
}
- ksproc->kick_gpio = of_get_named_gpio_flags(np, "kick-gpios", 0, NULL);
- if (ksproc->kick_gpio < 0) {
- ret = ksproc->kick_gpio;
+ ksproc->kick_gpio = gpiod_get(dev, "kick", GPIOD_ASIS);
+ ret = PTR_ERR_OR_ZERO(ksproc->kick_gpio);
+ if (ret) {
dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
ret);
goto disable_clk;
@@ -466,6 +466,7 @@ static int keystone_rproc_probe(struct platform_device *pdev)
release_mem:
of_reserved_mem_device_release(dev);
+ gpiod_put(ksproc->kick_gpio);
disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
@@ -480,6 +481,7 @@ static int keystone_rproc_remove(struct platform_device *pdev)
struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
rproc_del(ksproc->rproc);
+ gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
rproc_free(ksproc->rproc);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index e5279ed9a8d7..8768cb64f560 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -23,9 +23,7 @@
#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
@@ -346,7 +344,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size))
return -ENOMEM;
} else {
- /* Register carveout in in list */
+ /* Register carveout in list */
mem = rproc_mem_entry_init(dev, NULL, 0,
size, rsc->vring[i].da,
rproc_alloc_carveout,
@@ -384,7 +382,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
return 0;
}
-static int
+int
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{
struct rproc *rproc = rvdev->rproc;
@@ -435,57 +433,17 @@ void rproc_free_vring(struct rproc_vring *rvring)
}
}
-static int rproc_vdev_do_start(struct rproc_subdev *subdev)
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev)
{
- struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
-
- return rproc_add_virtio_dev(rvdev, rvdev->id);
-}
-
-static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
-{
- struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
- int ret;
-
- ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev);
- if (ret)
- dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret);
-}
-
-/**
- * rproc_rvdev_release() - release the existence of a rvdev
- *
- * @dev: the subdevice's dev
- */
-static void rproc_rvdev_release(struct device *dev)
-{
- struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev);
-
- of_reserved_mem_device_release(dev);
- dma_release_coherent_memory(dev);
-
- kfree(rvdev);
+ if (rvdev && rproc)
+ list_add_tail(&rvdev->node, &rproc->rvdevs);
}
-static int copy_dma_range_map(struct device *to, struct device *from)
+void rproc_remove_rvdev(struct rproc_vdev *rvdev)
{
- const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
- int num_ranges = 0;
-
- if (!map)
- return 0;
-
- for (r = map; r->size; r++)
- num_ranges++;
-
- new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
- GFP_KERNEL);
- if (!new_map)
- return -ENOMEM;
- to->dma_range_map = new_map;
- return 0;
+ if (rvdev)
+ list_del(&rvdev->node);
}
-
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
@@ -520,12 +478,13 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
- int i, ret;
- char name[16];
+ size_t rsc_size;
+ struct rproc_vdev_data rvdev_data;
+ struct platform_device *pdev;
/* make sure resource isn't truncated */
- if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
- avail) {
+ rsc_size = struct_size(rsc, vring, rsc->num_of_vrings);
+ if (size_add(rsc_size, rsc->config_len) > avail) {
dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
@@ -545,93 +504,19 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
return -EINVAL;
}
- rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
- if (!rvdev)
- return -ENOMEM;
-
- kref_init(&rvdev->refcount);
-
- rvdev->id = rsc->id;
- rvdev->rproc = rproc;
- rvdev->index = rproc->nb_vdev++;
+ rvdev_data.id = rsc->id;
+ rvdev_data.index = rproc->nb_vdev++;
+ rvdev_data.rsc_offset = offset;
+ rvdev_data.rsc = rsc;
- /* Initialise vdev subdevice */
- snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
- rvdev->dev.parent = &rproc->dev;
- rvdev->dev.release = rproc_rvdev_release;
- dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
- dev_set_drvdata(&rvdev->dev, rvdev);
-
- ret = device_register(&rvdev->dev);
- if (ret) {
- put_device(&rvdev->dev);
- return ret;
+ pdev = platform_device_register_data(dev, "rproc-virtio", rvdev_data.index, &rvdev_data,
+ sizeof(rvdev_data));
+ if (IS_ERR(pdev)) {
+ dev_err(dev, "failed to create rproc-virtio device\n");
+ return PTR_ERR(pdev);
}
- ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent);
- if (ret)
- goto free_rvdev;
-
- /* Make device dma capable by inheriting from parent's capabilities */
- set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
-
- ret = dma_coerce_mask_and_coherent(&rvdev->dev,
- dma_get_mask(rproc->dev.parent));
- if (ret) {
- dev_warn(dev,
- "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
- dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
- }
-
- /* parse the vrings */
- for (i = 0; i < rsc->num_of_vrings; i++) {
- ret = rproc_parse_vring(rvdev, rsc, i);
- if (ret)
- goto free_rvdev;
- }
-
- /* remember the resource offset*/
- rvdev->rsc_offset = offset;
-
- /* allocate the vring resources */
- for (i = 0; i < rsc->num_of_vrings; i++) {
- ret = rproc_alloc_vring(rvdev, i);
- if (ret)
- goto unwind_vring_allocations;
- }
-
- list_add_tail(&rvdev->node, &rproc->rvdevs);
-
- rvdev->subdev.start = rproc_vdev_do_start;
- rvdev->subdev.stop = rproc_vdev_do_stop;
-
- rproc_add_subdev(rproc, &rvdev->subdev);
-
return 0;
-
-unwind_vring_allocations:
- for (i--; i >= 0; i--)
- rproc_free_vring(&rvdev->vring[i]);
-free_rvdev:
- device_unregister(&rvdev->dev);
- return ret;
-}
-
-void rproc_vdev_release(struct kref *ref)
-{
- struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount);
- struct rproc_vring *rvring;
- struct rproc *rproc = rvdev->rproc;
- int id;
-
- for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
- rvring = &rvdev->vring[id];
- rproc_free_vring(rvring);
- }
-
- rproc_remove_subdev(rproc, &rvdev->subdev);
- list_del(&rvdev->node);
- device_unregister(&rvdev->dev);
}
/**
@@ -1365,7 +1250,7 @@ void rproc_resource_cleanup(struct rproc *rproc)
/* clean up remote vdev entries */
list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
- kref_put(&rvdev->refcount, rproc_vdev_release);
+ platform_device_unregister(rvdev->pdev);
rproc_coredump_cleanup(rproc);
}
@@ -1885,6 +1770,45 @@ static int __rproc_detach(struct rproc *rproc)
return 0;
}
+static int rproc_attach_recovery(struct rproc *rproc)
+{
+ int ret;
+
+ ret = __rproc_detach(rproc);
+ if (ret)
+ return ret;
+
+ return __rproc_attach(rproc);
+}
+
+static int rproc_boot_recovery(struct rproc *rproc)
+{
+ const struct firmware *firmware_p;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_stop(rproc, true);
+ if (ret)
+ return ret;
+
+ /* generate coredump */
+ rproc->ops->coredump(rproc);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ return ret;
+ }
+
+ /* boot the remote processor up again */
+ ret = rproc_start(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+
+ return ret;
+}
+
/**
* rproc_trigger_recovery() - recover a remoteproc
* @rproc: the remote processor
@@ -1899,7 +1823,6 @@ static int __rproc_detach(struct rproc *rproc)
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
- const struct firmware *firmware_p;
struct device *dev = &rproc->dev;
int ret;
@@ -1913,24 +1836,10 @@ int rproc_trigger_recovery(struct rproc *rproc)
dev_err(dev, "recovering %s\n", rproc->name);
- ret = rproc_stop(rproc, true);
- if (ret)
- goto unlock_mutex;
-
- /* generate coredump */
- rproc->ops->coredump(rproc);
-
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
- goto unlock_mutex;
- }
-
- /* boot the remote processor up again */
- ret = rproc_start(rproc, firmware_p);
-
- release_firmware(firmware_p);
+ if (rproc_has_feature(rproc, RPROC_FEAT_ATTACH_ON_RECOVERY))
+ ret = rproc_attach_recovery(rproc);
+ else
+ ret = rproc_boot_recovery(rproc);
unlock_mutex:
mutex_unlock(&rproc->lock);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 72d4d3d7d94d..d4dbb8d1d80c 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -24,16 +24,43 @@ struct rproc_debug_trace {
struct rproc_mem_entry trace_mem;
};
+/**
+ * struct rproc_vdev_data - remoteproc virtio device data
+ * @rsc_offset: offset of the vdev's resource entry
+ * @id: virtio device id (as in virtio_ids.h)
+ * @index: vdev position versus other vdev declared in resource table
+ * @rsc: pointer to the vdev resource entry. Valid only during vdev init as
+ * the resource can be cached by rproc.
+ */
+struct rproc_vdev_data {
+ u32 rsc_offset;
+ unsigned int id;
+ u32 index;
+ struct fw_rsc_vdev *rsc;
+};
+
+static inline bool rproc_has_feature(struct rproc *rproc, unsigned int feature)
+{
+ return test_bit(feature, rproc->features);
+}
+
+static inline int rproc_set_feature(struct rproc *rproc, unsigned int feature)
+{
+ if (feature >= RPROC_MAX_FEATURES)
+ return -EINVAL;
+
+ set_bit(feature, rproc->features);
+
+ return 0;
+}
+
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
-irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
-void rproc_vdev_release(struct kref *ref);
int rproc_of_parse_firmware(struct device *dev, int index,
const char **fw_name);
/* from remoteproc_virtio.c */
-int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
-int rproc_remove_virtio_dev(struct device *dev, void *data);
+irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
/* from remoteproc_debugfs.c */
void rproc_remove_trace_file(struct dentry *tfile);
@@ -83,6 +110,7 @@ static inline void rproc_char_device_remove(struct rproc *rproc)
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
+int rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
@@ -95,6 +123,8 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw);
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
+void rproc_add_rvdev(struct rproc *rproc, struct rproc_vdev *rvdev);
+void rproc_remove_rvdev(struct rproc_vdev *rvdev);
static inline int rproc_prepare_device(struct rproc *rproc)
{
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 0f7706e23eb9..0e95525c1158 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -9,9 +9,12 @@
* Brian Swetland <swetland@google.com>
*/
+#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
+#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
@@ -23,9 +26,32 @@
#include "remoteproc_internal.h"
+static int copy_dma_range_map(struct device *to, struct device *from)
+{
+ const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
+ int num_ranges = 0;
+
+ if (!map)
+ return 0;
+
+ for (r = map; r->size; r++)
+ num_ranges++;
+
+ new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+ to->dma_range_map = new_map;
+ return 0;
+}
+
static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
+ struct platform_device *pdev;
+
+ pdev = container_of(vdev->dev.parent, struct platform_device, dev);
+
+ return platform_get_drvdata(pdev);
}
static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
@@ -322,13 +348,10 @@ static void rproc_virtio_dev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
- struct rproc *rproc = vdev_to_rproc(vdev);
kfree(vdev);
- kref_put(&rvdev->refcount, rproc_vdev_release);
-
- put_device(&rproc->dev);
+ put_device(&rvdev->pdev->dev);
}
/**
@@ -341,10 +364,10 @@ static void rproc_virtio_dev_release(struct device *dev)
*
* Return: 0 on success or an appropriate error value otherwise
*/
-int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
+static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = &rvdev->dev;
+ struct device *dev = &rvdev->pdev->dev;
struct virtio_device *vdev;
struct rproc_mem_entry *mem;
int ret;
@@ -414,18 +437,8 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
vdev->dev.parent = dev;
vdev->dev.release = rproc_virtio_dev_release;
- /*
- * We're indirectly making a non-temporary copy of the rproc pointer
- * here, because drivers probed with this vdev will indirectly
- * access the wrapping rproc.
- *
- * Therefore we must increment the rproc refcount here, and decrement
- * it _only_ when the vdev is released.
- */
- get_device(&rproc->dev);
-
/* Reference the vdev and vring allocations */
- kref_get(&rvdev->refcount);
+ get_device(dev);
ret = register_virtio_device(vdev);
if (ret) {
@@ -449,10 +462,142 @@ out:
*
* Return: 0
*/
-int rproc_remove_virtio_dev(struct device *dev, void *data)
+static int rproc_remove_virtio_dev(struct device *dev, void *data)
{
struct virtio_device *vdev = dev_to_virtio(dev);
unregister_virtio_device(vdev);
return 0;
}
+
+static int rproc_vdev_do_start(struct rproc_subdev *subdev)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+
+ return rproc_add_virtio_dev(rvdev, rvdev->id);
+}
+
+static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev);
+ struct device *dev = &rvdev->pdev->dev;
+ int ret;
+
+ ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev);
+ if (ret)
+ dev_warn(dev, "can't remove vdev child device: %d\n", ret);
+}
+
+static int rproc_virtio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc_vdev_data *rvdev_data = dev->platform_data;
+ struct rproc_vdev *rvdev;
+ struct rproc *rproc = container_of(dev->parent, struct rproc, dev);
+ struct fw_rsc_vdev *rsc;
+ int i, ret;
+
+ if (!rvdev_data)
+ return -EINVAL;
+
+ rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL);
+ if (!rvdev)
+ return -ENOMEM;
+
+ rvdev->id = rvdev_data->id;
+ rvdev->rproc = rproc;
+ rvdev->index = rvdev_data->index;
+
+ ret = copy_dma_range_map(dev, rproc->dev.parent);
+ if (ret)
+ return ret;
+
+ /* Make device dma capable by inheriting from parent's capabilities */
+ set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
+
+ ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent));
+ if (ret) {
+ dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
+ }
+
+ platform_set_drvdata(pdev, rvdev);
+ rvdev->pdev = pdev;
+
+ rsc = rvdev_data->rsc;
+
+ /* parse the vrings */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_parse_vring(rvdev, rsc, i);
+ if (ret)
+ return ret;
+ }
+
+ /* remember the resource offset*/
+ rvdev->rsc_offset = rvdev_data->rsc_offset;
+
+ /* allocate the vring resources */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = rproc_alloc_vring(rvdev, i);
+ if (ret)
+ goto unwind_vring_allocations;
+ }
+
+ rproc_add_rvdev(rproc, rvdev);
+
+ rvdev->subdev.start = rproc_vdev_do_start;
+ rvdev->subdev.stop = rproc_vdev_do_stop;
+
+ rproc_add_subdev(rproc, &rvdev->subdev);
+
+ /*
+ * We're indirectly making a non-temporary copy of the rproc pointer
+ * here, because the platform device or the vdev device will indirectly
+ * access the wrapping rproc.
+ *
+ * Therefore we must increment the rproc refcount here, and decrement
+ * it _only_ on platform remove.
+ */
+ get_device(&rproc->dev);
+
+ return 0;
+
+unwind_vring_allocations:
+ for (i--; i >= 0; i--)
+ rproc_free_vring(&rvdev->vring[i]);
+
+ return ret;
+}
+
+static int rproc_virtio_remove(struct platform_device *pdev)
+{
+ struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev);
+ struct rproc *rproc = rvdev->rproc;
+ struct rproc_vring *rvring;
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) {
+ rvring = &rvdev->vring[id];
+ rproc_free_vring(rvring);
+ }
+
+ rproc_remove_subdev(rproc, &rvdev->subdev);
+ rproc_remove_rvdev(rvdev);
+
+ of_reserved_mem_device_release(&pdev->dev);
+ dma_release_coherent_memory(&pdev->dev);
+
+ put_device(&rproc->dev);
+
+ return 0;
+}
+
+/* Platform driver */
+static struct platform_driver rproc_virtio_driver = {
+ .probe = rproc_virtio_probe,
+ .remove = rproc_virtio_remove,
+ .driver = {
+ .name = "rproc-virtio",
+ },
+};
+builtin_platform_driver(rproc_virtio_driver);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 806773e88832..de176c2fbad9 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -152,6 +152,13 @@ config RESET_PISTACHIO
help
This enables the reset driver for ImgTec Pistachio SoCs.
+config RESET_POLARFIRE_SOC
+ bool "Microchip PolarFire SoC (MPFS) Reset Driver"
+ depends on AUXILIARY_BUS && MCHP_CLK_MPFS
+ default MCHP_CLK_MPFS
+ help
+ This driver supports peripheral reset for the Microchip PolarFire SoC
+
config RESET_QCOM_AOSS
tristate "Qcom AOSS Reset Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -201,7 +208,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
- default ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index cd5cf8e7c6a7..3e7e5fd633a8 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_POLARFIRE_SOC) += reset-mpfs.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
@@ -40,4 +41,3 @@ obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
-
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 185a333df66c..d2408725eb2c 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -329,6 +329,7 @@ static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MP_RESET_PCIEPHY_PERST:
value = assert ? 0 : bit;
break;
}
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index 00b612a0effa..f3528dd1d084 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -33,11 +33,8 @@ static struct regmap_config sparx5_reset_regmap_config = {
.reg_stride = 4,
};
-static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int sparx5_switch_reset(struct mchp_reset_context *ctx)
{
- struct mchp_reset_context *ctx =
- container_of(rcdev, struct mchp_reset_context, rcdev);
u32 val;
/* Make sure the core is PROTECTED from reset */
@@ -54,8 +51,14 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
1, 100);
}
+static int sparx5_reset_noop(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static const struct reset_control_ops sparx5_reset_ops = {
- .reset = sparx5_switch_reset,
+ .reset = sparx5_reset_noop,
};
static int mchp_sparx5_map_syscon(struct platform_device *pdev, char *name,
@@ -122,6 +125,11 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.of_node = dn;
ctx->props = device_get_match_data(&pdev->dev);
+ /* Issue the reset very early, our actual reset callback is a noop. */
+ err = sparx5_switch_reset(ctx);
+ if (err)
+ return err;
+
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
@@ -163,6 +171,10 @@ static int __init mchp_sparx5_reset_init(void)
return platform_driver_register(&mchp_sparx5_reset_driver);
}
+/*
+ * Because this is a global reset, keep this postcore_initcall() to issue the
+ * reset as early as possible during the kernel startup.
+ */
postcore_initcall(mchp_sparx5_reset_init);
MODULE_DESCRIPTION("Microchip Sparx5 switch reset driver");
diff --git a/drivers/reset/reset-mpfs.c b/drivers/reset/reset-mpfs.c
new file mode 100644
index 000000000000..e003e50590ec
--- /dev/null
+++ b/drivers/reset/reset-mpfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PolarFire SoC (MPFS) Peripheral Clock Reset Controller
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
+ *
+ */
+#include <linux/auxiliary_bus.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/microchip,mpfs-clock.h>
+#include <soc/microchip/mpfs.h>
+
+/*
+ * The ENVM reset is the lowest bit in the register & I am using the CLK_FOO
+ * defines in the dt to make things easier to configure - so this is accounting
+ * for the offset of 3 there.
+ */
+#define MPFS_PERIPH_OFFSET CLK_ENVM
+#define MPFS_NUM_RESETS 30u
+#define MPFS_SLEEP_MIN_US 100
+#define MPFS_SLEEP_MAX_US 200
+
+/* block concurrent access to the soft reset register */
+static DEFINE_SPINLOCK(mpfs_reset_lock);
+
+/*
+ * Peripheral clock resets
+ */
+
+static int mpfs_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpfs_reset_lock, flags);
+
+ reg = mpfs_reset_read(rcdev->dev);
+ reg |= BIT(id);
+ mpfs_reset_write(rcdev->dev, reg);
+
+ spin_unlock_irqrestore(&mpfs_reset_lock, flags);
+
+ return 0;
+}
+
+static int mpfs_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mpfs_reset_lock, flags);
+
+ reg = mpfs_reset_read(rcdev->dev);
+ reg &= ~BIT(id);
+ mpfs_reset_write(rcdev->dev, reg);
+
+ spin_unlock_irqrestore(&mpfs_reset_lock, flags);
+
+ return 0;
+}
+
+static int mpfs_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ u32 reg = mpfs_reset_read(rcdev->dev);
+
+ /*
+ * It is safe to return here as MPFS_NUM_RESETS makes sure the sign bit
+ * is never hit.
+ */
+ return (reg & BIT(id));
+}
+
+static int mpfs_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ mpfs_assert(rcdev, id);
+
+ usleep_range(MPFS_SLEEP_MIN_US, MPFS_SLEEP_MAX_US);
+
+ mpfs_deassert(rcdev, id);
+
+ return 0;
+}
+
+static const struct reset_control_ops mpfs_reset_ops = {
+ .reset = mpfs_reset,
+ .assert = mpfs_assert,
+ .deassert = mpfs_deassert,
+ .status = mpfs_status,
+};
+
+static int mpfs_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int index = reset_spec->args[0];
+
+ /*
+ * CLK_RESERVED does not map to a clock, but it does map to a reset,
+ * so it has to be accounted for here. It is the reset for the fabric,
+ * so if this reset gets called - do not reset it.
+ */
+ if (index == CLK_RESERVED) {
+ dev_err(rcdev->dev, "Resetting the fabric is not supported\n");
+ return -EINVAL;
+ }
+
+ if (index < MPFS_PERIPH_OFFSET || index >= (MPFS_PERIPH_OFFSET + rcdev->nr_resets)) {
+ dev_err(rcdev->dev, "Invalid reset index %u\n", index);
+ return -EINVAL;
+ }
+
+ return index - MPFS_PERIPH_OFFSET;
+}
+
+static int mpfs_reset_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct reset_controller_dev *rcdev;
+
+ rcdev = devm_kzalloc(dev, sizeof(*rcdev), GFP_KERNEL);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->dev = dev;
+ rcdev->dev->parent = dev->parent;
+ rcdev->ops = &mpfs_reset_ops;
+ rcdev->of_node = dev->parent->of_node;
+ rcdev->of_reset_n_cells = 1;
+ rcdev->of_xlate = mpfs_reset_xlate;
+ rcdev->nr_resets = MPFS_NUM_RESETS;
+
+ return devm_reset_controller_register(dev, rcdev);
+}
+
+static const struct auxiliary_device_id mpfs_reset_ids[] = {
+ {
+ .name = "clk_mpfs.reset-mpfs",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, mpfs_reset_ids);
+
+static struct auxiliary_driver mpfs_reset_driver = {
+ .probe = mpfs_reset_probe,
+ .id_table = mpfs_reset_ids,
+};
+
+module_auxiliary_driver(mpfs_reset_driver);
+
+MODULE_DESCRIPTION("Microchip PolarFire SoC Reset Driver");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MCHP_CLK_MPFS);
diff --git a/drivers/reset/reset-npcm.c b/drivers/reset/reset-npcm.c
index 24c55efa98e5..f2333506b0a6 100644
--- a/drivers/reset/reset-npcm.c
+++ b/drivers/reset/reset-npcm.c
@@ -291,7 +291,7 @@ static void npcm_usb_reset_npcm8xx(struct npcm_rc_data *rc)
iprst2 |= ipsrst2_bits;
iprst3 |= (ipsrst3_bits | NPCM_IPSRST3_USBPHY1 |
NPCM_IPSRST3_USBPHY2);
- iprst2 |= ipsrst4_bits;
+ iprst4 |= ipsrst4_bits;
writel(iprst1, rc->base + NPCM_IPSRST1);
writel(iprst2, rc->base + NPCM_IPSRST2);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 4f2189111494..3e0b8f3496ed 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -76,7 +76,9 @@ int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
mutex_lock(&eptdev->ept_lock);
if (eptdev->ept) {
- rpmsg_destroy_ept(eptdev->ept);
+ /* The default endpoint is released by the rpmsg core */
+ if (!eptdev->default_ept)
+ rpmsg_destroy_ept(eptdev->ept);
eptdev->ept = NULL;
}
mutex_unlock(&eptdev->ept_lock);
@@ -424,15 +426,12 @@ int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent
struct rpmsg_channel_info chinfo)
{
struct rpmsg_eptdev *eptdev;
- int ret;
eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
if (IS_ERR(eptdev))
return PTR_ERR(eptdev);
- ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
-
- return ret;
+ return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
}
EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index e0bbb11d912e..6d6a55efb9cc 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -297,11 +297,9 @@ static int bq32k_probe(struct i2c_client *client)
return 0;
}
-static int bq32k_remove(struct i2c_client *client)
+static void bq32k_remove(struct i2c_client *client)
{
bq32k_sysfs_unregister(&client->dev);
-
- return 0;
}
static const struct i2c_device_id bq32k_id[] = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index b19de5100b1a..7f089f066163 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -530,7 +530,7 @@ static int ds1374_probe(struct i2c_client *client)
return 0;
}
-static int ds1374_remove(struct i2c_client *client)
+static void ds1374_remove(struct i2c_client *client)
{
struct ds1374 *ds1374 = i2c_get_clientdata(client);
@@ -542,8 +542,6 @@ static int ds1374_remove(struct i2c_client *client)
devm_free_irq(&client->dev, client->irq, client);
cancel_work_sync(&ds1374->work);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 1fc6627d854d..1bfca39079d4 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -472,12 +472,11 @@ static int isl12026_probe_new(struct i2c_client *client)
return devm_rtc_register_device(priv->rtc);
}
-static int isl12026_remove(struct i2c_client *client)
+static void isl12026_remove(struct i2c_client *client)
{
struct isl12026 *priv = i2c_get_clientdata(client);
i2c_unregister_device(priv->nvm_client);
- return 0;
}
static const struct of_device_id isl12026_dt_match[] = {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index d868458cd40e..e0b4d3794320 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -989,7 +989,7 @@ static int m41t80_probe(struct i2c_client *client,
return 0;
}
-static int m41t80_remove(struct i2c_client *client)
+static void m41t80_remove(struct i2c_client *client)
{
#ifdef CONFIG_RTC_DRV_M41T80_WDT
struct m41t80_data *clientdata = i2c_get_clientdata(client);
@@ -999,8 +999,6 @@ static int m41t80_remove(struct i2c_client *client)
unregister_reboot_notifier(&wdt_notifier);
}
#endif
-
- return 0;
}
static struct i2c_driver m41t80_driver = {
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index cb15983383f5..9562c477e1c9 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -910,10 +910,9 @@ exit:
return err;
}
-static int rs5c372_remove(struct i2c_client *client)
+static void rs5c372_remove(struct i2c_client *client)
{
rs5c_sysfs_unregister(&client->dev);
- return 0;
}
static struct i2c_driver rs5c372_driver = {
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index ba0d22a5b421..f587afa84357 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -657,10 +657,9 @@ static int x1205_probe(struct i2c_client *client)
return 0;
}
-static int x1205_remove(struct i2c_client *client)
+static void x1205_remove(struct i2c_client *client)
{
x1205_sysfs_unregister(&client->dev);
- return 0;
}
static const struct i2c_device_id x1205_id[] = {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ea82821599f6..5a6d9c15395f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,15 +41,6 @@
#define DASD_DIAG_MOD "dasd_diag_mod"
-static unsigned int queue_depth = 32;
-static unsigned int nr_hw_queues = 4;
-
-module_param(queue_depth, uint, 0444);
-MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
-
-module_param(nr_hw_queues, uint, 0444);
-MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
-
/*
* SECTION: exported variables of dasd.c
*/
@@ -68,8 +59,6 @@ MODULE_LICENSE("GPL");
/*
* SECTION: prototypes for static functions of dasd.c
*/
-static int dasd_alloc_queue(struct dasd_block *);
-static void dasd_free_queue(struct dasd_block *);
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(unsigned long);
static void dasd_block_tasklet(unsigned long);
@@ -198,21 +187,11 @@ EXPORT_SYMBOL_GPL(dasd_free_block);
*/
static int dasd_state_new_to_known(struct dasd_device *device)
{
- int rc;
-
/*
* As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0.
*/
dasd_get_device(device);
-
- if (device->block) {
- rc = dasd_alloc_queue(device->block);
- if (rc) {
- dasd_put_device(device);
- return rc;
- }
- }
device->state = DASD_STATE_KNOWN;
return 0;
}
@@ -226,9 +205,6 @@ static int dasd_state_known_to_new(struct dasd_device *device)
dasd_eer_disable(device);
device->state = DASD_STATE_NEW;
- if (device->block)
- dasd_free_queue(device->block);
-
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
return 0;
@@ -1591,9 +1567,8 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue,
- true);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -2691,7 +2666,7 @@ static void dasd_block_timeout(struct timer_list *t)
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
- blk_mq_run_hw_queues(block->request_queue, true);
+ blk_mq_run_hw_queues(block->gdp->queue, true);
}
/*
@@ -3239,7 +3214,7 @@ static void dasd_request_done(struct request *req)
blk_mq_run_hw_queues(req->q, true);
}
-static struct blk_mq_ops dasd_mq_ops = {
+struct blk_mq_ops dasd_mq_ops = {
.queue_rq = do_dasd_request,
.complete = dasd_request_done,
.timeout = dasd_times_out,
@@ -3247,45 +3222,6 @@ static struct blk_mq_ops dasd_mq_ops = {
.exit_hctx = dasd_exit_hctx,
};
-/*
- * Allocate and initialize request queue and default I/O scheduler.
- */
-static int dasd_alloc_queue(struct dasd_block *block)
-{
- int rc;
-
- block->tag_set.ops = &dasd_mq_ops;
- block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
- block->tag_set.nr_hw_queues = nr_hw_queues;
- block->tag_set.queue_depth = queue_depth;
- block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- block->tag_set.numa_node = NUMA_NO_NODE;
-
- rc = blk_mq_alloc_tag_set(&block->tag_set);
- if (rc)
- return rc;
-
- block->request_queue = blk_mq_init_queue(&block->tag_set);
- if (IS_ERR(block->request_queue))
- return PTR_ERR(block->request_queue);
-
- block->request_queue->queuedata = block;
-
- return 0;
-}
-
-/*
- * Deactivate and free request queue.
- */
-static void dasd_free_queue(struct dasd_block *block)
-{
- if (block->request_queue) {
- blk_mq_destroy_queue(block->request_queue);
- blk_mq_free_tag_set(&block->tag_set);
- block->request_queue = NULL;
- }
-}
-
static int dasd_open(struct block_device *bdev, fmode_t mode)
{
struct dasd_device *base;
@@ -3762,10 +3698,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue,
- true);
- }
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
+ }
if (!device->stopped)
wake_up(&generic_waitq);
@@ -3916,8 +3851,8 @@ void dasd_generic_space_avail(struct dasd_device *device)
if (device->block) {
dasd_schedule_block_bh(device->block);
- if (device->block->request_queue)
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->gdp)
+ blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
if (!device->stopped)
wake_up(&generic_waitq);
@@ -3927,7 +3862,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
/*
* clear active requests and requeue them to block layer if possible
*/
-static int dasd_generic_requeue_all_requests(struct dasd_device *device)
+int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
@@ -4001,6 +3936,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
dasd_schedule_device_bh(device);
return rc;
}
+EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
static void do_requeue_requests(struct work_struct *work)
{
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 299001ad9a32..81d283b3cd3b 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1050,6 +1050,11 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
dev_err(&device->cdev->dev, "An I/O request was rejected"
" because writing is inhibited\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ } else if (sense[7] & SNS7_INVALID_ON_SEC) {
+ dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
+ /* suppress dump of sense data for this error */
+ set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
+ erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
/* fatal error - set status to FAILED
internal error 09 - Command Reject */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index dc78a523a69f..b6b938aa6615 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -675,12 +675,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
- struct alias_pav_group *group = private->pavgroup;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
+ struct alias_pav_group *group;
unsigned long flags;
- if (!group || !lcu)
+ if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
@@ -697,6 +697,11 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
}
spin_lock_irqsave(&lcu->lock, flags);
+ group = private->pavgroup;
+ if (!group) {
+ spin_unlock_irqrestore(&lcu->lock, flags);
+ return NULL;
+ }
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 811e79c9f59c..cb83f81da416 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -26,7 +26,6 @@
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
-#define DASD_BUS_ID_SIZE 20
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -50,6 +49,7 @@ struct dasd_devmap {
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
+ struct dasd_copy_relation *copy;
};
/*
@@ -130,7 +130,7 @@ __setup ("dasd=", dasd_call_setup);
/*
* Read a device busid/devno from a string.
*/
-static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
+static int dasd_busid(char *str, int *id0, int *id1, int *devno)
{
unsigned int val;
char *tok;
@@ -426,7 +426,7 @@ dasd_add_busid(const char *bus_id, int features)
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
- strlcpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
+ strscpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
@@ -438,16 +438,12 @@ dasd_add_busid(const char *bus_id, int features)
return devmap;
}
-/*
- * Find devmap for device with given bus_id.
- */
static struct dasd_devmap *
-dasd_find_busid(const char *bus_id)
+dasd_find_busid_locked(const char *bus_id)
{
struct dasd_devmap *devmap, *tmp;
int hash;
- spin_lock(&dasd_devmap_lock);
devmap = ERR_PTR(-ENODEV);
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
@@ -456,6 +452,19 @@ dasd_find_busid(const char *bus_id)
break;
}
}
+ return devmap;
+}
+
+/*
+ * Find devmap for device with given bus_id.
+ */
+static struct dasd_devmap *
+dasd_find_busid(const char *bus_id)
+{
+ struct dasd_devmap *devmap;
+
+ spin_lock(&dasd_devmap_lock);
+ devmap = dasd_find_busid_locked(bus_id);
spin_unlock(&dasd_devmap_lock);
return devmap;
}
@@ -585,6 +594,238 @@ dasd_create_device(struct ccw_device *cdev)
}
/*
+ * allocate a PPRC data structure and call the discipline function to fill
+ */
+static int dasd_devmap_get_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 **data)
+{
+ struct dasd_pprc_data_sc4 *temp;
+
+ if (!device->discipline || !device->discipline->pprc_status) {
+ dev_warn(&device->cdev->dev, "Unable to query copy relation status\n");
+ return -EOPNOTSUPP;
+ }
+ temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ /* get PPRC information from storage */
+ if (device->discipline->pprc_status(device, temp)) {
+ dev_warn(&device->cdev->dev, "Error during copy relation status query\n");
+ kfree(temp);
+ return -EINVAL;
+ }
+ *data = temp;
+
+ return 0;
+}
+
+/*
+ * find an entry in a PPRC device_info array by a given UID
+ * depending on the primary/secondary state of the device it has to be
+ * matched with the respective fields
+ */
+static int dasd_devmap_entry_from_pprc_data(struct dasd_pprc_data_sc4 *data,
+ struct dasd_uid uid,
+ bool primary)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (primary) {
+ if (data->dev_info[i].prim_cu_ssid == uid.ssid &&
+ data->dev_info[i].primary == uid.real_unit_addr)
+ return i;
+ } else {
+ if (data->dev_info[i].sec_cu_ssid == uid.ssid &&
+ data->dev_info[i].secondary == uid.real_unit_addr)
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * check the consistency of a specified copy relation by checking
+ * the following things:
+ *
+ * - is the given device part of a copy pair setup
+ * - does the state of the device match the state in the PPRC status data
+ * - does the device UID match with the UID in the PPRC status data
+ * - to prevent misrouted IO check if the given device is present in all
+ * related PPRC status data
+ */
+static int dasd_devmap_check_copy_relation(struct dasd_device *device,
+ struct dasd_copy_entry *entry,
+ struct dasd_pprc_data_sc4 *data,
+ struct dasd_copy_relation *copy)
+{
+ struct dasd_pprc_data_sc4 *tmp_dat;
+ struct dasd_device *tmp_dev;
+ struct dasd_uid uid;
+ int i, j;
+
+ if (!device->discipline || !device->discipline->get_uid ||
+ device->discipline->get_uid(device, &uid))
+ return 1;
+
+ i = dasd_devmap_entry_from_pprc_data(data, uid, entry->primary);
+ if (i < 0) {
+ dev_warn(&device->cdev->dev, "Device not part of a copy relation\n");
+ return 1;
+ }
+
+ /* double check which role the current device has */
+ if (entry->primary) {
+ if (data->dev_info[i].flags & 0x80) {
+ dev_warn(&device->cdev->dev, "Copy pair secondary is setup as primary\n");
+ return 1;
+ }
+ if (data->dev_info[i].prim_cu_ssid != uid.ssid ||
+ data->dev_info[i].primary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Primary device %s does not match copy pair status primary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].prim_cu_ssid |
+ data->dev_info[i].primary);
+ return 1;
+ }
+ } else {
+ if (!(data->dev_info[i].flags & 0x80)) {
+ dev_warn(&device->cdev->dev, "Copy pair primary is setup as secondary\n");
+ return 1;
+ }
+ if (data->dev_info[i].sec_cu_ssid != uid.ssid ||
+ data->dev_info[i].secondary != uid.real_unit_addr) {
+ dev_warn(&device->cdev->dev,
+ "Secondary device %s does not match copy pair status secondary device %04x\n",
+ dev_name(&device->cdev->dev),
+ data->dev_info[i].sec_cu_ssid |
+ data->dev_info[i].secondary);
+ return 1;
+ }
+ }
+
+ /*
+ * the current device has to be part of the copy relation of all
+ * entries to prevent misrouted IO to another copy pair
+ */
+ for (j = 0; j < DASD_CP_ENTRIES; j++) {
+ if (entry == &copy->entry[j])
+ tmp_dev = device;
+ else
+ tmp_dev = copy->entry[j].device;
+
+ if (!tmp_dev)
+ continue;
+
+ if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat))
+ return 1;
+
+ if (dasd_devmap_entry_from_pprc_data(tmp_dat, uid, entry->primary) < 0) {
+ dev_warn(&tmp_dev->cdev->dev,
+ "Copy pair relation does not contain device: %s\n",
+ dev_name(&device->cdev->dev));
+ kfree(tmp_dat);
+ return 1;
+ }
+ kfree(tmp_dat);
+ }
+ return 0;
+}
+
+/* delete device from copy relation entry */
+static void dasd_devmap_delete_copy_relation_device(struct dasd_device *device)
+{
+ struct dasd_copy_relation *copy;
+ int i;
+
+ if (!device->copy)
+ return;
+
+ copy = device->copy;
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device)
+ copy->entry[i].device = NULL;
+ }
+ dasd_put_device(device);
+ device->copy = NULL;
+}
+
+/*
+ * read all required information for a copy relation setup and setup the device
+ * accordingly
+ */
+int dasd_devmap_set_device_copy_relation(struct ccw_device *cdev,
+ bool pprc_enabled)
+{
+ struct dasd_pprc_data_sc4 *data = NULL;
+ struct dasd_copy_entry *entry = NULL;
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ struct dasd_device *device;
+ int i, rc = 0;
+
+ devmap = dasd_devmap_from_cdev(cdev);
+ if (IS_ERR(devmap))
+ return PTR_ERR(devmap);
+
+ device = devmap->device;
+ if (!device)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* no copy pair setup for this device */
+ if (!copy)
+ goto out;
+
+ rc = dasd_devmap_get_pprc_status(device, &data);
+ if (rc)
+ return rc;
+
+ /* print error if PPRC is requested but not enabled on storage server */
+ if (!pprc_enabled) {
+ dev_err(&cdev->dev, "Copy relation not enabled on storage server\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (!data->dev_info[0].state) {
+ dev_warn(&device->cdev->dev, "Copy pair setup requested for device not in copy relation\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(dev_name(&cdev->dev),
+ copy->entry[i].busid, DASD_BUS_ID_SIZE) == 0) {
+ entry = &copy->entry[i];
+ break;
+ }
+ }
+ if (!entry) {
+ dev_warn(&device->cdev->dev, "Copy relation entry not found\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ /* check if the copy relation is valid */
+ if (dasd_devmap_check_copy_relation(device, entry, data, copy)) {
+ dev_warn(&device->cdev->dev, "Copy relation faulty\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dasd_get_device(device);
+ copy->entry[i].device = device;
+ device->copy = copy;
+out:
+ kfree(data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dasd_devmap_set_device_copy_relation);
+
+/*
* Wait queue for dasd_delete_device waits.
*/
static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
@@ -617,6 +858,8 @@ dasd_delete_device(struct dasd_device *device)
dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ /* Removve copy relation */
+ dasd_devmap_delete_copy_relation_device(device);
/*
* Drop ref_count by 3, one for the devmap reference, one for
* the cdev reference and one for the passed reference.
@@ -694,6 +937,7 @@ void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
gdp->private_data = devmap;
spin_unlock(&dasd_devmap_lock);
}
+EXPORT_SYMBOL(dasd_add_link_to_gendisk);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
{
@@ -1334,7 +1578,6 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
- struct request_queue *q;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
@@ -1346,15 +1589,13 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
return -EINVAL;
}
- q = device->block->request_queue;
- if (!q) {
+ if (!device->block->gdp) {
dasd_put_device(device);
return -ENODEV;
}
device->blk_timeout = val;
-
- blk_queue_rq_timeout(q, device->blk_timeout * HZ);
+ blk_queue_rq_timeout(device->block->gdp->queue, val * HZ);
dasd_put_device(device);
return count;
@@ -1683,6 +1924,347 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
static struct kobj_attribute path_fcs_attribute =
__ATTR(fc_security, 0444, dasd_path_fcs_show, NULL);
+/*
+ * print copy relation in the form
+ * primary,secondary[1] primary,secondary[2], ...
+ */
+static ssize_t
+dasd_copy_pair_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char prim_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int len = 0;
+ int i;
+
+ devmap = dasd_find_busid(dev_name(dev));
+ if (IS_ERR(devmap))
+ return -ENODEV;
+
+ if (!devmap->copy)
+ return -ENODEV;
+
+ copy = devmap->copy;
+ /* find primary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && copy->entry[i].primary) {
+ strscpy(prim_busid, copy->entry[i].busid,
+ DASD_BUS_ID_SIZE);
+ break;
+ }
+ }
+ if (!copy->entry[i].primary)
+ goto out;
+
+ /* print all secondary */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured && !copy->entry[i].primary)
+ len += sysfs_emit_at(buf, len, "%s,%s ", prim_busid,
+ copy->entry[i].busid);
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+out:
+ return len;
+}
+
+static int dasd_devmap_set_copy_relation(struct dasd_devmap *devmap,
+ struct dasd_copy_relation *copy,
+ char *busid, bool primary)
+{
+ int i;
+
+ /* find free entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ /* current bus_id already included, nothing to do */
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return 0;
+
+ if (!copy->entry[i].configured)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES)
+ return -EINVAL;
+
+ copy->entry[i].configured = true;
+ strscpy(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE);
+ if (primary) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ }
+ if (!devmap->copy)
+ devmap->copy = copy;
+
+ return 0;
+}
+
+static void dasd_devmap_del_copy_relation(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ spin_lock(&dasd_devmap_lock);
+ /* find entry */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ break;
+ }
+ if (i == DASD_CP_ENTRIES || !copy->entry[i].configured) {
+ spin_unlock(&dasd_devmap_lock);
+ return;
+ }
+
+ copy->entry[i].configured = false;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ if (copy->active == &copy->entry[i]) {
+ copy->active = NULL;
+ copy->entry[i].primary = false;
+ }
+ spin_unlock(&dasd_devmap_lock);
+}
+
+static int dasd_devmap_clear_copy_relation(struct device *dev)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_devmap *devmap;
+ int i, rc = 1;
+
+ devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(devmap))
+ return 1;
+
+ spin_lock(&dasd_devmap_lock);
+ if (!devmap->copy)
+ goto out;
+
+ copy = devmap->copy;
+ /* first check if all secondary devices are offline*/
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (!copy->entry[i].configured)
+ continue;
+
+ if (copy->entry[i].device == copy->active->device)
+ continue;
+
+ if (copy->entry[i].device)
+ goto out;
+ }
+ /* clear all devmap entries */
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (strlen(copy->entry[i].busid) == 0)
+ continue;
+ if (copy->entry[i].device) {
+ dasd_put_device(copy->entry[i].device);
+ copy->entry[i].device->copy = NULL;
+ copy->entry[i].device = NULL;
+ }
+ devmap = dasd_find_busid_locked(copy->entry[i].busid);
+ devmap->copy = NULL;
+ memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
+ }
+ kfree(copy);
+ rc = 0;
+out:
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+}
+
+/*
+ * parse BUSIDs from a copy pair
+ */
+static int dasd_devmap_parse_busid(const char *buf, char *prim_busid,
+ char *sec_busid)
+{
+ char *primary, *secondary, *tmp, *pt;
+ int id0, id1, id2;
+
+ pt = kstrdup(buf, GFP_KERNEL);
+ tmp = pt;
+ if (!tmp)
+ return -ENOMEM;
+
+ primary = strsep(&tmp, ",");
+ if (!primary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ secondary = strsep(&tmp, ",");
+ if (!secondary) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ if (dasd_busid(primary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(prim_busid, "%01x.%01x.%04x", id0, id1, id2);
+ if (dasd_busid(secondary, &id0, &id1, &id2)) {
+ kfree(pt);
+ return -EINVAL;
+ }
+ sprintf(sec_busid, "%01x.%01x.%04x", id0, id1, id2);
+ kfree(pt);
+
+ return 0;
+}
+
+static ssize_t dasd_copy_pair_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_devmap *prim_devmap, *sec_devmap;
+ char prim_busid[DASD_BUS_ID_SIZE];
+ char sec_busid[DASD_BUS_ID_SIZE];
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ bool pprc_enabled;
+ int rc;
+
+ if (strncmp(buf, "clear", strlen("clear")) == 0) {
+ if (dasd_devmap_clear_copy_relation(dev))
+ return -EINVAL;
+ return count;
+ }
+
+ rc = dasd_devmap_parse_busid(buf, prim_busid, sec_busid);
+ if (rc)
+ return rc;
+
+ if (strncmp(dev_name(dev), prim_busid, DASD_BUS_ID_SIZE) != 0 &&
+ strncmp(dev_name(dev), sec_busid, DASD_BUS_ID_SIZE) != 0)
+ return -EINVAL;
+
+ /* allocate primary devmap if needed */
+ prim_devmap = dasd_find_busid(prim_busid);
+ if (IS_ERR(prim_devmap))
+ prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
+
+ /* allocate secondary devmap if needed */
+ sec_devmap = dasd_find_busid(sec_busid);
+ if (IS_ERR(sec_devmap))
+ sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
+
+ /* setting copy relation is only allowed for offline secondary */
+ if (sec_devmap->device)
+ return -EINVAL;
+
+ if (prim_devmap->copy) {
+ copy = prim_devmap->copy;
+ } else if (sec_devmap->copy) {
+ copy = sec_devmap->copy;
+ } else {
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ }
+ spin_lock(&dasd_devmap_lock);
+ rc = dasd_devmap_set_copy_relation(prim_devmap, copy, prim_busid, true);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ rc = dasd_devmap_set_copy_relation(sec_devmap, copy, sec_busid, false);
+ if (rc) {
+ spin_unlock(&dasd_devmap_lock);
+ return rc;
+ }
+ spin_unlock(&dasd_devmap_lock);
+
+ /* if primary device is already online call device setup directly */
+ if (prim_devmap->device && !prim_devmap->device->copy) {
+ device = prim_devmap->device;
+ if (device->discipline->pprc_enabled) {
+ pprc_enabled = device->discipline->pprc_enabled(device);
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ pprc_enabled);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+ }
+ if (rc) {
+ dasd_devmap_del_copy_relation(copy, prim_busid);
+ dasd_devmap_del_copy_relation(copy, sec_busid);
+ count = rc;
+ }
+
+ return count;
+}
+static DEVICE_ATTR(copy_pair, 0644, dasd_copy_pair_show,
+ dasd_copy_pair_store);
+
+static ssize_t
+dasd_copy_role_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dasd_copy_relation *copy;
+ struct dasd_device *device;
+ int len, i;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ if (!device->copy) {
+ len = sysfs_emit(buf, "none\n");
+ goto out;
+ }
+ copy = device->copy;
+ /* only the active device is primary */
+ if (copy->active->device == device) {
+ len = sysfs_emit(buf, "primary\n");
+ goto out;
+ }
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].device == device) {
+ len = sysfs_emit(buf, "secondary\n");
+ goto out;
+ }
+ }
+ /* not in the list, no COPY role */
+ len = sysfs_emit(buf, "none\n");
+out:
+ dasd_put_device(device);
+ return len;
+}
+static DEVICE_ATTR(copy_role, 0444, dasd_copy_role_show, NULL);
+
+static ssize_t dasd_device_ping(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dasd_device *device;
+ size_t rc;
+
+ device = dasd_device_from_cdev(to_ccwdev(dev));
+ if (IS_ERR(device))
+ return -ENODEV;
+
+ /*
+ * do not try during offline processing
+ * early check only
+ * the sleep_on function itself checks for offline
+ * processing again
+ */
+ if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+ rc = -EBUSY;
+ goto out;
+ }
+ if (!device->discipline || !device->discipline->device_ping) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ rc = device->discipline->device_ping(device);
+ if (!rc)
+ rc = count;
+out:
+ dasd_put_device(device);
+ return rc;
+}
+static DEVICE_ATTR(ping, 0200, NULL, dasd_device_ping);
+
#define DASD_DEFINE_ATTR(_name, _func) \
static ssize_t dasd_##_name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -1739,6 +2321,9 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_hpf.attr,
&dev_attr_ese.attr,
&dev_attr_fc_security.attr,
+ &dev_attr_copy_pair.attr,
+ &dev_attr_copy_role.attr,
+ &dev_attr_ping.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 94ee59864971..f956a4ac9881 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -627,7 +627,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
static void dasd_diag_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
int max;
max = DIAG_MAX_BLOCKS << block->s2b_shift;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 3cc93e2e4e15..662730f3b027 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2013,6 +2013,49 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
}
/*
+ * return if the device is the copy relation primary if a copy relation is active
+ */
+static int dasd_device_is_primary(struct dasd_device *device)
+{
+ if (!device->copy)
+ return 1;
+
+ if (device->copy->active->device == device)
+ return 1;
+
+ return 0;
+}
+
+static int dasd_eckd_alloc_block(struct dasd_device *device)
+{
+ struct dasd_block *block;
+ struct dasd_uid temp_uid;
+
+ if (!dasd_device_is_primary(device))
+ return 0;
+
+ dasd_eckd_get_uid(device, &temp_uid);
+ if (temp_uid.type == UA_BASE_DEVICE) {
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "could not allocate dasd block structure");
+ return PTR_ERR(block);
+ }
+ device->block = block;
+ block->base = device;
+ }
+ return 0;
+}
+
+static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private = device->private;
+
+ return private->rdc_data.facilities.PPRC_enabled;
+}
+
+/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
*/
@@ -2020,8 +2063,6 @@ static int
dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
- struct dasd_block *block;
- struct dasd_uid temp_uid;
int rc, i;
int readonly;
unsigned long value;
@@ -2079,20 +2120,29 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->default_expires = value;
}
- dasd_eckd_get_uid(device, &temp_uid);
- if (temp_uid.type == UA_BASE_DEVICE) {
- block = dasd_alloc_block();
- if (IS_ERR(block)) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "could not allocate dasd "
- "block structure");
- rc = PTR_ERR(block);
- goto out_err1;
- }
- device->block = block;
- block->base = device;
+ /* Read Device Characteristics */
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read device characteristic failed, rc=%d", rc);
+ goto out_err1;
+ }
+
+ /* setup PPRC for device from devmap */
+ rc = dasd_devmap_set_device_copy_relation(device->cdev,
+ dasd_eckd_pprc_enabled(device));
+ if (rc) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "copy relation setup failed, rc=%d", rc);
+ goto out_err1;
}
+ /* check if block device is needed and allocate in case */
+ rc = dasd_eckd_alloc_block(device);
+ if (rc)
+ goto out_err1;
+
/* register lcu with alias handling, enable PAV */
rc = dasd_alias_make_device_known_to_lcu(device);
if (rc)
@@ -2117,15 +2167,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
/* Read Extent Pool Information */
dasd_eckd_read_ext_pool_info(device);
- /* Read Device Characteristics */
- rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
- &private->rdc_data, 64);
- if (rc) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read device characteristic failed, rc=%d", rc);
- goto out_err3;
- }
-
if ((device->features & DASD_FEATURE_USERAW) &&
!(private->rdc_data.facilities.RT_in_LR)) {
dev_err(&device->cdev->dev, "The storage server does not "
@@ -6078,6 +6119,207 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
return 0;
}
+static struct dasd_device
+*copy_relation_find_device(struct dasd_copy_relation *copy,
+ char *busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
+ return copy->entry[i].device;
+ }
+ return NULL;
+}
+
+/*
+ * set the new active/primary device
+ */
+static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
+ char *old_busid)
+{
+ int i;
+
+ for (i = 0; i < DASD_CP_ENTRIES; i++) {
+ if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, new_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->active = &copy->entry[i];
+ copy->entry[i].primary = true;
+ } else if (copy->entry[i].configured &&
+ strncmp(copy->entry[i].busid, old_busid,
+ DASD_BUS_ID_SIZE) == 0) {
+ copy->entry[i].primary = false;
+ }
+ }
+}
+
+/*
+ * The function will swap the role of a given copy pair.
+ * During the swap operation the relation of the blockdevice is disconnected
+ * from the old primary and connected to the new.
+ *
+ * IO is paused on the block queue before swap and may be resumed afterwards.
+ */
+static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
+ char *sec_busid)
+{
+ struct dasd_device *primary, *secondary;
+ struct dasd_copy_relation *copy;
+ struct dasd_block *block;
+ struct gendisk *gdp;
+
+ copy = device->copy;
+ if (!copy)
+ return DASD_COPYPAIRSWAP_INVALID;
+ primary = copy->active->device;
+ if (!primary)
+ return DASD_COPYPAIRSWAP_INVALID;
+ /* double check if swap has correct primary */
+ if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
+ return DASD_COPYPAIRSWAP_PRIMARY;
+
+ secondary = copy_relation_find_device(copy, sec_busid);
+ if (!secondary)
+ return DASD_COPYPAIRSWAP_SECONDARY;
+
+ /*
+ * usually the device should be quiesced for swap
+ * for paranoia stop device and requeue requests again
+ */
+ dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_generic_requeue_all_requests(primary);
+
+ /* swap DASD internal device <> block assignment */
+ block = primary->block;
+ primary->block = NULL;
+ secondary->block = block;
+ block->base = secondary;
+ /* set new primary device in COPY relation */
+ copy_pair_set_active(copy, sec_busid, prim_busid);
+
+ /* swap blocklayer device link */
+ gdp = block->gdp;
+ dasd_add_link_to_gendisk(gdp, secondary);
+
+ /* re-enable device */
+ dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
+ dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
+ dasd_schedule_device_bh(secondary);
+
+ return DASD_COPYPAIRSWAP_SUCCESS;
+}
+
+/*
+ * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
+ */
+static int dasd_eckd_query_pprc_status(struct dasd_device *device,
+ struct dasd_pprc_data_sc4 *data)
+{
+ struct dasd_pprc_data_sc4 *pprc_data;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ sizeof(*prssdp) + sizeof(*pprc_data) + 1,
+ device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate query PPRC status request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *)cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_PPRCEQ;
+ prssdp->varies[0] = PPRCEQ_SCOPE_4;
+ pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)prssdp;
+
+ /* Read Subsystem Data - query host access */
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(*pprc_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t)pprc_data;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc == 0) {
+ *data = *pprc_data;
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "PPRC Extended Query failed with rc=%d\n",
+ rc);
+ rc = -EOPNOTSUPP;
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+/*
+ * ECKD NOP - no operation
+ */
+static int dasd_eckd_nop(struct dasd_device *device)
+{
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate NOP request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 1;
+ cqr->expires = 10 * HZ;
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_NOP;
+ ccw->flags |= CCW_FLAG_SLI;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+
+ rc = dasd_sleep_on_interruptible(cqr);
+ if (rc != 0) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "NOP failed with rc=%d\n", rc);
+ rc = -EOPNOTSUPP;
+ }
+ dasd_sfree_request(cqr, cqr->memdev);
+ return rc;
+}
+
+static int dasd_eckd_device_ping(struct dasd_device *device)
+{
+ return dasd_eckd_nop(device);
+}
+
/*
* Perform Subsystem Function - CUIR response
*/
@@ -6602,7 +6844,7 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
struct dasd_device *device = block->base;
int max;
@@ -6697,6 +6939,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
.ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
.ese_format = dasd_eckd_ese_format,
.ese_read = dasd_eckd_ese_read,
+ .pprc_status = dasd_eckd_query_pprc_status,
+ .pprc_enabled = dasd_eckd_pprc_enabled,
+ .copy_pair_swap = dasd_eckd_copy_pair_swap,
+ .device_ping = dasd_eckd_device_ping,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index a91b265441cc..f9299bd184ba 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -13,6 +13,7 @@
/*****************************************************************************
* SECTION: CCW Definitions
****************************************************************************/
+#define DASD_ECKD_CCW_NOP 0x03
#define DASD_ECKD_CCW_WRITE 0x05
#define DASD_ECKD_CCW_READ 0x06
#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
@@ -66,10 +67,16 @@
* Perform Subsystem Function / Sub-Orders
*/
#define PSF_SUBORDER_QHA 0x1C /* Query Host Access */
+#define PSF_SUBORDER_PPRCEQ 0x50 /* PPRC Extended Query */
#define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */
#define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */
/*
+ * PPRC Extended Query Scopes
+ */
+#define PPRCEQ_SCOPE_4 0x04 /* Scope 4 for PPRC Extended Query */
+
+/*
* CUIR response condition codes
*/
#define PSF_CUIR_INVALID 0x00
@@ -261,7 +268,7 @@ struct dasd_eckd_characteristics {
unsigned char reserved3:8;
unsigned char defect_wr:1;
unsigned char XRC_supported:1;
- unsigned char reserved4:1;
+ unsigned char PPRC_enabled:1;
unsigned char striping:1;
unsigned char reserved5:4;
unsigned char cfw:1;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 5ae64af9ccea..d4d31cd11d26 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -313,7 +313,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
@@ -356,7 +356,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
- strlcpy(header.busid, dev_name(&device->cdev->dev),
+ strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 60be7f7bf2d1..cddfb01a3dca 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -767,7 +767,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
static void dasd_fba_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->request_queue;
+ struct request_queue *q = block->gdp->queue;
unsigned int max_bytes, max_discard_sectors;
int max;
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5a83f0a39901..998a961e1704 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -25,7 +25,14 @@
#include "dasd_int.h"
-static struct lock_class_key dasd_bio_compl_lkclass;
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
/*
* Allocate and register gendisk structure for device.
@@ -41,10 +48,21 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
- gdp = blk_mq_alloc_disk_for_queue(block->request_queue,
- &dasd_bio_compl_lkclass);
- if (!gdp)
- return -ENOMEM;
+ block->tag_set.ops = &dasd_mq_ops;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+ block->tag_set.nr_hw_queues = nr_hw_queues;
+ block->tag_set.queue_depth = queue_depth;
+ block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ block->tag_set.numa_node = NUMA_NO_NODE;
+ rc = blk_mq_alloc_tag_set(&block->tag_set);
+ if (rc)
+ return rc;
+
+ gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ if (IS_ERR(gdp)) {
+ blk_mq_free_tag_set(&block->tag_set);
+ return PTR_ERR(gdp);
+ }
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -100,6 +118,7 @@ void dasd_gendisk_free(struct dasd_block *block)
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
+ blk_mq_free_tag_set(&block->tag_set);
}
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 333a399f754e..97adc8a7ae6b 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -260,6 +260,55 @@ struct dasd_uid {
};
/*
+ * PPRC Status data
+ */
+struct dasd_pprc_header {
+ __u8 entries; /* 0 Number of device entries */
+ __u8 unused; /* 1 unused */
+ __u16 entry_length; /* 2-3 Length of device entry */
+ __u32 unused2; /* 4-7 unused */
+} __packed;
+
+struct dasd_pprc_dev_info {
+ __u8 state; /* 0 Copy State */
+ __u8 flags; /* 1 Flags */
+ __u8 reserved1[2]; /* 2-3 reserved */
+ __u8 prim_lss; /* 4 Primary device LSS */
+ __u8 primary; /* 5 Primary device address */
+ __u8 sec_lss; /* 6 Secondary device LSS */
+ __u8 secondary; /* 7 Secondary device address */
+ __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */
+ __u8 reserved2[12]; /* 10-21 reserved */
+ __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */
+ __u8 reserved3[12]; /* 24-35 reserved */
+ __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */
+ __u8 reserved4[90]; /* 38-127 reserved */
+} __packed;
+
+struct dasd_pprc_data_sc4 {
+ struct dasd_pprc_header header;
+ struct dasd_pprc_dev_info dev_info[5];
+} __packed;
+
+#define DASD_BUS_ID_SIZE 20
+#define DASD_CP_ENTRIES 5
+
+struct dasd_copy_entry {
+ char busid[DASD_BUS_ID_SIZE];
+ struct dasd_device *device;
+ bool primary;
+ bool configured;
+};
+
+struct dasd_copy_relation {
+ struct dasd_copy_entry entry[DASD_CP_ENTRIES];
+ struct dasd_copy_entry *active;
+};
+
+int dasd_devmap_set_device_copy_relation(struct ccw_device *,
+ bool pprc_enabled);
+
+/*
* the struct dasd_discipline is
* sth like a table of virtual functions, if you think of dasd_eckd
* inheriting dasd...
@@ -387,6 +436,10 @@ struct dasd_discipline {
struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
struct dasd_ccw_req *, struct irb *);
int (*ese_read)(struct dasd_ccw_req *, struct irb *);
+ int (*pprc_status)(struct dasd_device *, struct dasd_pprc_data_sc4 *);
+ bool (*pprc_enabled)(struct dasd_device *);
+ int (*copy_pair_swap)(struct dasd_device *, char *, char *);
+ int (*device_ping)(struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -583,12 +636,12 @@ struct dasd_device {
struct dasd_profile profile;
struct dasd_format_entry format_entry;
struct kset *paths_info;
+ struct dasd_copy_relation *copy;
};
struct dasd_block {
/* Block device stuff. */
struct gendisk *gdp;
- struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
struct block_device *bdev;
@@ -629,6 +682,7 @@ struct dasd_queue {
#define DASD_STOPPED_PENDING 4 /* long busy */
#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
#define DASD_STOPPED_SU 16 /* summary unit check handling */
+#define DASD_STOPPED_PPRC 32 /* PPRC swap */
#define DASD_STOPPED_NOSPC 128 /* no space left */
/* per device flags */
@@ -654,6 +708,22 @@ struct dasd_queue {
void dasd_put_device_wake(struct dasd_device *);
/*
+ * return values to be returned from the copy pair swap function
+ * 0x00: swap successful
+ * 0x01: swap data invalid
+ * 0x02: no active device found
+ * 0x03: wrong primary specified
+ * 0x04: secondary device not found
+ * 0x05: swap already running
+ */
+#define DASD_COPYPAIRSWAP_SUCCESS 0
+#define DASD_COPYPAIRSWAP_INVALID 1
+#define DASD_COPYPAIRSWAP_NOACTIVE 2
+#define DASD_COPYPAIRSWAP_PRIMARY 3
+#define DASD_COPYPAIRSWAP_SECONDARY 4
+#define DASD_COPYPAIRSWAP_MULTIPLE 5
+
+/*
* Reference count inliners
*/
static inline void
@@ -779,6 +849,7 @@ extern debug_info_t *dasd_debug_area;
extern struct dasd_profile dasd_global_profile;
extern unsigned int dasd_global_profile_level;
extern const struct block_device_operations dasd_device_operations;
+extern struct blk_mq_ops dasd_mq_ops;
extern struct kmem_cache *dasd_page_cache;
@@ -837,6 +908,8 @@ int dasd_generic_verify_path(struct dasd_device *, __u8);
void dasd_generic_space_exhaust(struct dasd_device *, struct dasd_ccw_req *);
void dasd_generic_space_avail(struct dasd_device *);
+int dasd_generic_requeue_all_requests(struct dasd_device *);
+
int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 95349f95758c..d0ddf2cc9786 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -379,6 +379,56 @@ out_err:
return rc;
}
+/*
+ * Swap driver iternal copy relation.
+ */
+static int
+dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
+{
+ struct dasd_copypair_swap_data_t data;
+ struct dasd_device *device;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ device = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!device)
+ return -ENODEV;
+
+ if (copy_from_user(&data, argp, sizeof(struct dasd_copypair_swap_data_t))) {
+ dasd_put_device(device);
+ return -EFAULT;
+ }
+ if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
+ pr_warn("%s: Ivalid swap data specified.\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (bdev_is_partition(bdev)) {
+ pr_warn("%s: The specified DASD is a partition and cannot be swapped\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return DASD_COPYPAIRSWAP_INVALID;
+ }
+ if (!device->copy) {
+ pr_warn("%s: The specified DASD has no copy pair set up\n",
+ dev_name(&device->cdev->dev));
+ dasd_put_device(device);
+ return -ENODEV;
+ }
+ if (!device->discipline->copy_pair_swap) {
+ dasd_put_device(device);
+ return -EOPNOTSUPP;
+ }
+ rc = device->discipline->copy_pair_swap(device, data.primary,
+ data.secondary);
+ dasd_put_device(device);
+
+ return rc;
+}
+
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
@@ -637,6 +687,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDRAS:
rc = dasd_ioctl_release_space(bdev, argp);
break;
+ case BIODASDCOPYPAIRSWAP:
+ rc = dasd_ioctl_copy_pair_swap(bdev, argp);
+ break;
default:
/* if the discipline has an ioctl method try it. */
rc = -ENOTTY;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5187705bd0f3..93b80da60277 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -614,7 +614,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = -ENAMETOOLONG;
goto seg_list_del;
}
- strlcpy(local_buf, buf, i + 1);
+ strscpy(local_buf, buf, i + 1);
dev_info->num_of_segments = num_of_segments;
rc = dcssblk_is_continuous(dev_info);
if (rc < 0)
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
index 1f5bdb237862..43df27ceec11 100644
--- a/drivers/s390/char/hmcdrv_cache.c
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -154,7 +154,7 @@ static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
- strlcpy(hmcdrv_cache_file.fname, ftp->fname,
+ strscpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index b58df0dd0039..c21dc68e05a0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -54,10 +54,10 @@ struct tape_class_device *register_tape_dev(
if (!tcd)
return ERR_PTR(-ENOMEM);
- strlcpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
- strlcpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
+ strscpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 5c83f71c1d0e..26e3995ac062 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1760,7 +1760,7 @@ tty3270_flush_chars(struct tty_struct *tty)
* Check for visible/invisible input switches
*/
static void
-tty3270_set_termios(struct tty_struct *tty, struct ktermios *old)
+tty3270_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct tty3270 *tp;
int new;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index f6da215ccf9f..6165e6aae762 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -30,6 +30,7 @@
#include <asm/checksum.h>
#include <asm/os_info.h>
#include <asm/switch_to.h>
+#include <asm/maccess.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 4bb7965daa0f..1a9714af51e4 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -87,7 +87,7 @@ int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
debug_unregister(irq_ptr->debug_area);
return -ENOMEM;
}
- strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+ strscpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
new_entry->dbf_info = irq_ptr->debug_area;
mutex_lock(&qdio_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qdio_dbf_list);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 6c8c41fac4e1..ee82207b4e60 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -984,6 +984,11 @@ static ssize_t assign_adapter_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apid, matrix_mdev->matrix.apm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1109,6 +1114,11 @@ static ssize_t unassign_adapter_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
ret = count;
@@ -1183,6 +1193,11 @@ static ssize_t assign_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
set_bit_inv(apqi, matrix_mdev->matrix.aqm);
ret = vfio_ap_mdev_validate_masks(matrix_mdev);
@@ -1286,6 +1301,11 @@ static ssize_t unassign_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
ret = count;
@@ -1329,6 +1349,11 @@ static ssize_t assign_control_domain_store(struct device *dev,
goto done;
}
+ if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
/* Set the bit in the ADM (bitmask) corresponding to the AP control
* domain number (id). The bits in the mask, from most significant to
* least significant, correspond to IDs 0 up to the one less than the
@@ -1378,6 +1403,11 @@ static ssize_t unassign_control_domain_store(struct device *dev,
goto done;
}
+ if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
+
clear_bit_inv(domid, matrix_mdev->matrix.adm);
if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index e0fdd54bfeb7..37b551bd43bf 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1566,7 +1566,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_dev;
}
- strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+ strscpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n",
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 98c4864932d2..0ff61d00feb1 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -28,7 +28,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
return NULL;
}
- strlcpy(this->name, name, sizeof(this->name));
+ strscpy(this->name, name, sizeof(this->name));
init_waitqueue_head(&this->wait_q);
f = kzalloc(sizeof(fsm), order);
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 9eba0a32e9f9..e250f49535fa 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -188,9 +188,9 @@ static void qeth_get_drvinfo(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
- strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
+ strscpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
- strlcpy(info->fw_version, card->info.mcl_level,
+ strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2d4436cbcb47..9dc935886e9f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1133,7 +1133,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
@@ -1530,8 +1530,8 @@ static void qeth_addr_change_event(struct qeth_card *card,
else
INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
- memcpy(&data->ac_event, hostevs,
- sizeof(struct qeth_ipacmd_addr_change) + extrasize);
+ data->ac_event = *hostevs;
+ memcpy(data->ac_event.entry, hostevs->entry, extrasize);
queue_delayed_work(card->event_wq, &data->dwork, 0);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8d44bce0477a..d8487a10cd55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1910,7 +1910,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_set_tso_max_size(card->dev,
PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1));
- netif_napi_add(card->dev, &card->napi, qeth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(card->dev, &card->napi, qeth_poll);
return register_netdev(card->dev);
}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index fd2f1c31bd21..df782646e856 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -103,7 +103,7 @@ static void __init zfcp_init_device_setup(char *devstr)
token = strsep(&str, ",");
if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE)
goto err_out;
- strlcpy(busid, token, ZFCP_BUS_ID_SIZE);
+ strscpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index b61acbb09be3..77917b339870 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -885,7 +885,7 @@ static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
- strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ strscpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index cd823ff5deab..6cb9cca9565b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2006,7 +2006,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a853c5497af6..ffdecb12d654 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -912,7 +912,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
retval = -ENOMEM;
goto out;
@@ -921,7 +921,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
tw_ioctl = (TW_New_Ioctl *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl) - 1))
+ if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl)))
goto out2;
passthru = (TW_Passthru *)&tw_ioctl->firmware_command;
@@ -966,15 +966,15 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Load the sg list */
switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) {
case 2:
- tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted;
break;
case 3:
- tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted;
break;
case 5:
- passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl);
passthru->sg_list[0].length = data_buffer_length_adjusted;
break;
}
@@ -1017,12 +1017,12 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
}
/* Now copy the response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length - 1))
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length))
goto out2;
retval = 0;
out2:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), cpu_addr, dma_handle);
out:
mutex_unlock(&tw_dev->ioctl_lock);
mutex_unlock(&tw_mutex);
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index e8f3f081b7d8..120a087bdf3c 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -348,7 +348,7 @@ typedef struct TAG_TW_New_Ioctl {
unsigned int data_buffer_length;
unsigned char padding [508];
TW_Command firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_New_Ioctl;
/* GetParam descriptor */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 955cb69a5418..03e71e3d5e5b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -2,9 +2,10 @@
menu "SCSI device support"
config SCSI_MOD
- tristate
- default y if SCSI=n || SCSI=y
- default m if SCSI=m
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+ depends on BLOCK
config RAID_ATTRS
tristate "RAID Transport Class"
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 928099163f0f..f2f3405cdec5 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -194,7 +194,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 9aafe0002ab1..05e1a63e00c3 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1366,9 +1366,9 @@ csio_show_hw_state(struct device *dev,
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (csio_is_hw_ready(hw))
- return snprintf(buf, PAGE_SIZE, "ready\n");
- else
- return snprintf(buf, PAGE_SIZE, "not ready\n");
+ return sysfs_emit(buf, "ready\n");
+
+ return sysfs_emit(buf, "not ready\n");
}
/* Device reset */
@@ -1430,7 +1430,7 @@ csio_show_dbg_level(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+ return sysfs_emit(buf, "%x\n", ln->params.log_level);
}
/* Store debug level */
@@ -1476,7 +1476,7 @@ csio_show_num_reg_rnodes(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+ return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes);
}
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index e7be95ee7d64..cd1324ec742d 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -132,7 +132,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
break;
case SISL_AFU_RC_OUT_OF_DATA_BUFS:
/* Retry */
- scp->result = (DID_ALLOC_FAILURE << 16);
+ scp->result = (DID_ERROR << 16);
break;
default:
scp->result = (DID_ERROR << 16);
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index ff2ad9b38575..dd3437412ffc 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -831,6 +831,7 @@ struct __packed atto_hba_trace {
u32 total_length;
u32 trace_mask;
u8 reserved2[48];
+ u8 contents[];
};
#define ATTO_FUNC_SCSI_PASS_THRU 0x04
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 08f4e43c7d9e..e003d923acbf 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -947,10 +947,9 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break;
}
- memcpy(trc + 1,
+ memcpy(trc->contents,
a->fw_coredump_buff + offset,
len);
-
hi->data_length = len;
} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
memset(a->fw_coredump_buff, 0,
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 24c83bc4f5dc..9aebf4a26b13 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -649,6 +649,7 @@ extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
int enable);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
gfp_t gfp_flags);
+extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 33af5b8dede2..699b07abb6b0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1341,6 +1341,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
{
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_port *_sas_port = NULL;
int phy_no;
@@ -1369,6 +1370,12 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
}
}
+ /*
+ * Ensure any bcast events are processed prior to calling async nexus
+ * reset calls from hisi_sas_clear_nexus_ha() ->
+ * hisi_sas_async_I_T_nexus_reset()
+ */
+ sas_drain_work(sas_ha);
}
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
@@ -1527,9 +1534,9 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_done(hisi_hba);
- clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
dev_info(dev, "controller reset complete\n");
return 0;
@@ -1816,12 +1823,14 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
ASYNC_DOMAIN_EXCLUSIVE(async);
- int i;
+ int i, ret;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (!r.done)
- return TMF_RESP_FUNC_FAILED;
+ if (!r.done) {
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
@@ -1838,7 +1847,9 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
- return TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
+out:
+ return ret;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1982,6 +1993,22 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
+{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct sas_ha_struct *sha = &hisi_hba->sha;
+
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ return;
+
+ if (test_bit(SAS_HA_FROZEN, &sha->state))
+ return;
+
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
+
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 349546bacb2b..d643c5a49aa9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1412,9 +1412,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ hisi_sas_phy_bcast(phy);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 70e401fd432a..cded42f4ca44 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2811,15 +2811,12 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -3537,7 +3534,7 @@ static struct attribute *host_v2_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v2_hw);
-static int map_queues_v2_hw(struct Scsi_Host *shost)
+static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
@@ -3552,9 +3549,6 @@ static int map_queues_v2_hw(struct Scsi_Host *shost)
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
-
- return 0;
-
}
static struct scsi_host_template sht_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index efe8c5be5870..d56b4bfd2767 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1626,15 +1626,12 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2786,7 +2783,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
struct hisi_hba *hisi_hba = shost_priv(shost);
int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- unsigned int max_sectors;
if (ret)
return ret;
@@ -2802,12 +2798,6 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
}
}
- /* Set according to IOMMU IOVA caching limit */
- max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
- (PAGE_SIZE * 32) >> SECTOR_SHIFT);
-
- blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
-
return 0;
}
@@ -3171,13 +3161,12 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
return 0;
}
-static int hisi_sas_map_queues(struct Scsi_Host *shost)
+static void hisi_sas_map_queues(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
}
static struct scsi_host_template sht_v3_hw = {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 0738238ed6cc..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -190,15 +199,6 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
-
- /*
- * After scsi_remove_host() has returned the scsi LLD module can be
- * unloaded and/or the host resources can be released. Hence wait until
- * the dependent SCSI targets and devices are gone before returning.
- */
- wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
-
- scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -254,6 +254,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto fail;
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -309,8 +312,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any resources associated with the SCSI host in this function except
- * the tag set will be freed by scsi_host_dev_release().
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -326,7 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -406,7 +409,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- init_waitqueue_head(&shost->targets_wq);
index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a47bcce3c9c7..f8e832b1bc46 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -6233,8 +6233,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
- set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ set_bit(i, h->cmd_pool_bits);
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
@@ -6261,8 +6260,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
}
}
@@ -8030,7 +8028,7 @@ out_disable:
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
- kfree(h->cmd_pool_bits);
+ bitmap_free(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
dma_free_coherent(&h->pdev->dev,
@@ -8052,9 +8050,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
{
- h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
- sizeof(unsigned long),
- GFP_KERNEL);
+ h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL);
h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
&h->cmd_pool_dhandle, GFP_KERNEL);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index f18b770626e6..7e8903718245 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1044,10 +1044,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
- req->header.size = cpu_to_le32(
- sizeof(struct hpt_iop_request_scsi_command)
- - sizeof(struct hpt_iopsg)
- + sg_count * sizeof(struct hpt_iopsg));
+ req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count));
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
hba->ops->post_req(hba, _req);
@@ -1397,8 +1394,8 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
host->max_cmd_len = 16;
- req_size = sizeof(struct hpt_iop_request_scsi_command)
- + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+ req_size = struct_size((struct hpt_iop_request_scsi_command *)0,
+ sg_list, hba->max_sg_descriptors);
if ((req_size & 0x1f) != 0)
req_size = (req_size + 0x1f) & ~0x1f;
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 363d5a16243f..394ef6aa469e 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -228,7 +228,7 @@ struct hpt_iop_request_scsi_command {
u8 pad1;
u8 cdb[16];
__le32 dataxfer_length;
- struct hpt_iopsg sg_list[1];
+ struct hpt_iopsg sg_list[];
};
struct hpt_iop_request_ioctl_command {
@@ -237,7 +237,7 @@ struct hpt_iop_request_ioctl_command {
__le32 inbuf_size;
__le32 outbuf_size;
__le32 bytes_returned;
- u8 buf[1];
+ u8 buf[];
/* out data should be put at buf[(inbuf_size+3)&~3] */
};
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index eee1a24f7e15..e8770310a64b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -444,7 +444,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
break;
/*
- * Can transition from this state to to unconfiguring
+ * Can transition from this state to unconfiguring
* or err disconnect.
*/
case ERR_DISCONNECT_RECONNECT:
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index f585d6e5fab9..375261d67619 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1166,7 +1166,7 @@ static void tulip_scsi(struct initio_host * host)
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((scb = host->active) != NULL)
+ if (host->active)
initio_next_state(host);
return;
}
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 29b1bd755afe..5fb1f364e815 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -595,6 +595,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
+ mutex_init(&tcp_sw_conn->sock_lock);
+
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto free_conn;
@@ -629,11 +631,15 @@ free_conn:
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
+ /*
+ * The iscsi transport class will make sure we are not called in
+ * parallel with start, stop, bind and destroys. However, this can be
+ * called twice if userspace does a stop then a destroy.
+ */
if (!sock)
return;
@@ -649,9 +655,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_suspend_rx(conn);
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
sockfd_put(sock);
}
@@ -703,7 +709,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -723,10 +728,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -763,8 +768,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ if (!tcp_sw_conn->sock) {
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ return -ENOTCONN;
+ }
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ mutex_unlock(&tcp_sw_conn->sock_lock);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -779,8 +791,8 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
struct sockaddr_in6 addr;
struct socket *sock;
int rc;
@@ -790,21 +802,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
- if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ if (!conn->session->leadconn) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
- sock = tcp_sw_conn->sock;
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&conn->session->frwd_lock);
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock) {
+ rc = -ENOTCONN;
+ goto sock_unlock;
+ }
+
if (param == ISCSI_PARAM_LOCAL_PORT)
rc = kernel_getsockname(sock,
(struct sockaddr *)&addr);
else
rc = kernel_getpeername(sock,
(struct sockaddr *)&addr);
- sock_put(sock->sk);
+sock_unlock:
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -842,17 +869,21 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- sock = tcp_sw_conn->sock;
- if (!sock) {
- spin_unlock_bh(&session->frwd_lock);
- return -ENOTCONN;
- }
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- rc = kernel_getsockname(sock,
- (struct sockaddr *)&addr);
- sock_put(sock->sk);
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock)
+ rc = -ENOTCONN;
+ else
+ rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 850a018aefb9..68e14a344904 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,9 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ /* Taken when accessing the sock from the netlink/sysfs interface */
+ struct mutex sock_lock;
+
struct work_struct recvwork;
bool queue_recv;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fa2209080cc2..5ce251830104 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -67,7 +67,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9c82e5dc4fcc..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -872,7 +872,8 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e6a083d098a1..9ad233b40a9e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -68,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
-#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
- collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -405,6 +403,7 @@ struct lpfc_trunk_link {
link1,
link2,
link3;
+ u32 phy_lnk_speed;
};
/* Format of congestion module parameters */
@@ -732,8 +731,6 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
@@ -1436,13 +1433,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- /* data structure used for latency data collection */
-#define LPFC_NO_BUCKET 0
-#define LPFC_LINEAR_BUCKET 1
-#define LPFC_POWER2_BUCKET 2
- uint8_t bucket_type;
- uint32_t bucket_base;
- uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
@@ -1564,16 +1554,13 @@ struct lpfc_hba {
/* cgn_reg_signal and cgn_init_reg_signal use
* enum fc_edc_cg_signal_cap_types
*/
- u16 cgn_fpin_frequency;
+ u16 cgn_fpin_frequency; /* In units of msecs */
#define LPFC_FPIN_INIT_FREQ 0xffff
u32 cgn_sig_freq;
u32 cgn_acqe_cnt;
/* RX monitor handling for CMF */
- struct rxtable_entry *rxtable; /* RX_monitor information */
- atomic_t rxtable_idx_head;
-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
- atomic_t rxtable_idx_tail;
+ struct lpfc_rx_info_monitor *rx_monitor;
atomic_t rx_max_read_cnt; /* Maximum read bytes */
uint64_t rx_block_cnt;
@@ -1610,10 +1597,11 @@ struct lpfc_hba {
char os_host_name[MAXHOSTNAMELEN];
- /* SCSI host template information - for physical port */
- struct scsi_host_template port_template;
- /* SCSI host template information - for all vports */
- struct scsi_host_template vport_template;
+ /* LD Signaling */
+ u32 degrade_activate_threshold;
+ u32 degrade_deactivate_threshold;
+ u32 fec_degrade_interval;
+
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;
@@ -1622,7 +1610,7 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
-struct rxtable_entry {
+struct rx_info_entry {
uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
@@ -1637,6 +1625,13 @@ struct rxtable_entry {
uint32_t timer_interval;
};
+struct lpfc_rx_info_monitor {
+ struct rx_info_entry *ring; /* info organized in a circular buffer */
+ u32 head_idx, tail_idx; /* index to head/tail of ring */
+ spinlock_t lock; /* spinlock for ring */
+ u32 entries; /* storing number entries/size of ring */
+};
+
static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport *vport)
{
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 09cf2cd0ae60..ef1481326fd7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4093,333 +4093,6 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
*/
static DEVICE_ATTR_RO(lpfc_static_vport);
-/**
- * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- * @count: Size of the data buffer.
- *
- * This function get called when a user write to the lpfc_stat_data_ctrl
- * sysfs file. This function parse the command written to the sysfs file
- * and take appropriate action. These commands are used for controlling
- * driver statistical data collection.
- * Following are the command this function handles.
- *
- * setbucket <bucket_type> <base> <step>
- * = Set the latency buckets.
- * destroybucket = destroy all the buckets.
- * start = start data collection
- * stop = stop data collection
- * reset = reset the collected data
- **/
-static ssize_t
-lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-#define LPFC_MAX_DATA_CTRL_LEN 1024
- static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
- unsigned long i;
- char *str_ptr, *token;
- struct lpfc_vport **vports;
- struct Scsi_Host *v_shost;
- char *bucket_type_str, *base_str, *step_str;
- unsigned long base, step, bucket_type;
-
- if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
- if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
- return -EINVAL;
-
- strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
- str_ptr = &bucket_data[0];
- /* Ignore this token - this is command token */
- token = strsep(&str_ptr, "\t ");
- if (!token)
- return -EINVAL;
-
- bucket_type_str = strsep(&str_ptr, "\t ");
- if (!bucket_type_str)
- return -EINVAL;
-
- if (!strncmp(bucket_type_str, "linear", strlen("linear")))
- bucket_type = LPFC_LINEAR_BUCKET;
- else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
- bucket_type = LPFC_POWER2_BUCKET;
- else
- return -EINVAL;
-
- base_str = strsep(&str_ptr, "\t ");
- if (!base_str)
- return -EINVAL;
- base = simple_strtoul(base_str, NULL, 0);
-
- step_str = strsep(&str_ptr, "\t ");
- if (!step_str)
- return -EINVAL;
- step = simple_strtoul(step_str, NULL, 0);
- if (!step)
- return -EINVAL;
-
- /* Block the data collection for every vport */
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(v_shost->host_lock);
- /* Block and reset data collection */
- vports[i]->stat_data_blocked = 1;
- if (vports[i]->stat_data_enabled)
- lpfc_vport_reset_stat_data(vports[i]);
- spin_unlock_irq(v_shost->host_lock);
- }
-
- /* Set the bucket attributes */
- phba->bucket_type = bucket_type;
- phba->bucket_base = base;
- phba->bucket_step = step;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
-
- /* Unblock data collection */
- spin_lock_irq(v_shost->host_lock);
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(v_shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->stat_data_blocked = 1;
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- phba->bucket_type = LPFC_NO_BUCKET;
- phba->bucket_base = 0;
- phba->bucket_step = 0;
- return strlen(buf);
- }
-
- if (!strncmp(buf, "start", strlen("start"))) {
- /* If no buckets configured return error */
- if (phba->bucket_type == LPFC_NO_BUCKET)
- return -EINVAL;
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_alloc_bucket(vport);
- vport->stat_data_enabled = 1;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "stop", strlen("stop"))) {
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled == 0) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "reset", strlen("reset"))) {
- if ((phba->bucket_type == LPFC_NO_BUCKET)
- || !vport->stat_data_enabled)
- return strlen(buf);
- spin_lock_irq(shost->host_lock);
- vport->stat_data_blocked = 1;
- lpfc_vport_reset_stat_data(vport);
- vport->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- return -EINVAL;
-}
-
-
-/**
- * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- *
- * This function is the read call back function for
- * lpfc_stat_data_ctrl sysfs file. This function report the
- * current statistical data collection state.
- **/
-static ssize_t
-lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int index = 0;
- int i;
- char *bucket_type;
- unsigned long bucket_value;
-
- switch (phba->bucket_type) {
- case LPFC_LINEAR_BUCKET:
- bucket_type = "linear";
- break;
- case LPFC_POWER2_BUCKET:
- bucket_type = "power2";
- break;
- default:
- bucket_type = "No Bucket";
- break;
- }
-
- sprintf(&buf[index], "Statistical Data enabled :%d, "
- "blocked :%d, Bucket type :%s, Bucket base :%d,"
- " Bucket step :%d\nLatency Ranges :",
- vport->stat_data_enabled, vport->stat_data_blocked,
- bucket_type, phba->bucket_base, phba->bucket_step);
- index = strlen(buf);
- if (phba->bucket_type != LPFC_NO_BUCKET) {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- if (phba->bucket_type == LPFC_LINEAR_BUCKET)
- bucket_value = phba->bucket_base +
- phba->bucket_step * i;
- else
- bucket_value = phba->bucket_base +
- (1 << i) * phba->bucket_step;
-
- if (index + 10 > PAGE_SIZE)
- break;
- sprintf(&buf[index], "%08ld ", bucket_value);
- index = strlen(buf);
- }
- }
- sprintf(&buf[index], "\n");
- return strlen(buf);
-}
-
-/*
- * Sysfs attribute to control the statistical data collection.
- */
-static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
-
-/*
- * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
- */
-
-/*
- * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
- * for each target.
- */
-#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
-#define MAX_STAT_DATA_SIZE_PER_TARGET \
- STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
-
-
-/**
- * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
- * @filp: sysfs file
- * @kobj: Pointer to the kernel object
- * @bin_attr: Attribute object
- * @buf: Buffer pointer
- * @off: File offset
- * @count: Buffer size
- *
- * This function is the read call back function for lpfc_drvr_stat_data
- * sysfs file. This function export the statistical data to user
- * applications.
- **/
-static ssize_t
-sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device,
- kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int i = 0, index = 0;
- unsigned long nport_index;
- struct lpfc_nodelist *ndlp = NULL;
- nport_index = (unsigned long)off /
- MAX_STAT_DATA_SIZE_PER_TARGET;
-
- if (!vport->stat_data_enabled || vport->stat_data_blocked
- || (phba->bucket_type == LPFC_NO_BUCKET))
- return 0;
-
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!ndlp->lat_data)
- continue;
-
- if (nport_index > 0) {
- nport_index--;
- continue;
- }
-
- if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
- > count)
- break;
-
- if (!ndlp->lat_data)
- continue;
-
- /* Print the WWN */
- sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
-
- index = strlen(buf);
-
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- sprintf(&buf[index], "%010u,",
- ndlp->lat_data[i].cmd_count);
- index = strlen(buf);
- }
- sprintf(&buf[index], "\n");
- index = strlen(buf);
- }
- spin_unlock_irq(shost->host_lock);
- return index;
-}
-
-static struct bin_attribute sysfs_drvr_stat_data_attr = {
- .attr = {
- .name = "lpfc_drvr_stat_data",
- .mode = S_IRUSR,
- },
- .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
- .read = sysfs_drvr_stat_data_read,
- .write = NULL,
-};
-
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -6273,7 +5946,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_xlane_priority.attr,
&dev_attr_lpfc_sg_seg_cnt.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_aer_support.attr,
&dev_attr_lpfc_aer_state_cleanup.attr,
&dev_attr_lpfc_sriov_nr_virtfn.attr,
@@ -6332,7 +6004,6 @@ static struct attribute *lpfc_vport_attrs[] = {
&dev_attr_npiv_info.attr,
&dev_attr_lpfc_enable_da_id.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_static_vport.attr,
&dev_attr_cmf_info.attr,
NULL,
@@ -6545,17 +6216,14 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
-
/* Virtual ports do not need ctrl_reg and mbox */
- if (error || vport->port_type == LPFC_NPIV_PORT)
- goto out;
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return 0;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out_remove_stat_attr;
+ goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -6565,9 +6233,6 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
-out_remove_stat_attr:
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
out:
return error;
}
@@ -6580,8 +6245,7 @@ void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
+
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 9be3bb01a8ec..ac0c7ccf2eae 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1977,8 +1977,6 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
@@ -1988,8 +1986,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bcad91204328..d2d207791056 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -78,6 +78,7 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+int lpfc_read_lds_params(struct lpfc_hba *phba);
uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
void lpfc_cmf_signal_init(struct lpfc_hba *phba);
void lpfc_cmf_start(struct lpfc_hba *phba);
@@ -92,6 +93,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
void lpfc_unblock_requests(struct lpfc_hba *phba);
void lpfc_block_requests(struct lpfc_hba *phba);
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries);
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry);
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -454,6 +463,7 @@ extern const struct attribute_group *lpfc_hba_groups[];
extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_nvme;
+extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 13dfe285493d..75fd2bfc212b 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1509,7 +1509,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *CTrsp;
int did;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_nodelist *ns_ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
uint32_t fc4_data_0, fc4_data_1;
u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
u32 ulp_word4 = get_job_word4(phba, rspiocb);
@@ -1522,15 +1522,12 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
- if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"9046 Event tag mismatch. Ignoring NS rsp\n");
goto out;
}
- /* Preserve the nameserver node to release the reference. */
- ns_ndlp = cmdiocb->ndlp;
-
if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
@@ -2504,420 +2501,298 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
}
}
-/* Routines for all individual HBA attributes */
-static int
-lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_fdmi_attr_u32 *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ ae->value_u32 = cpu_to_be32(attrval);
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-static int
-lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+
+static inline int
+lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_wwn *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(ae->name, wwn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- /* This string MUST be consistent with other FC platforms
- * supported by Broadcom.
- */
- strncpy(ae->un.AttrString,
- "Emulex Corporation",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
+ struct lpfc_name *wwnn, struct lpfc_name *wwpn)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fullwwn *ae = attr;
+ u8 *nname = ae->nname;
+ u8 *pname = ae->pname;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(nname, wwnn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
+ memcpy(pname, wwpn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- strncpy(ae->un.AttrString, phba->SerialNumber,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_string *ae = attr;
+ int len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ /*
+ * We are trusting the caller that if a fdmi string field
+ * is capped at 64 bytes, the caller passes in a string of
+ * 64 bytes or less.
+ */
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ len = strnlen(ae->value_string, sizeof(ae->value_string));
+ /* round string length to a 32bit boundary. Ensure there's a NULL */
len += (len & 3) ? (4 - (len & 3)) : 4;
+ /* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL);
+
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
return size;
}
-static int
-lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+/* Bitfields for FC4 Types that can be reported */
+#define ATTR_FC4_CT 0x00000001
+#define ATTR_FC4_FCP 0x00000002
+#define ATTR_FC4_NVME 0x00000004
+
+static inline int
+lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fc4types *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
+ if (typemask & ATTR_FC4_FCP)
+ ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
+
+ if (typemask & ATTR_FC4_CT)
+ ae->value_types[7] = 0x01; /* Type 0x20 - CT */
+
+ if (typemask & ATTR_FC4_NVME)
+ ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
- strncpy(ae->un.AttrString, phba->ModelDesc,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
return size;
}
+/* Routines for all individual HBA attributes */
static int
-lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- lpfc_vpd_t *vp = &phba->vpd;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t i, j, incr, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- /* Convert JEDEC ID to ascii for hardware version */
- incr = vp->rev.biuRev;
- for (i = 0; i < 8; i++) {
- j = (incr & 0xf);
- if (j <= 9)
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
- else
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
- incr = (incr >> 4);
- }
- size = FOURBYTES + 8;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
+ "Emulex Corporation");
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
+{
+ struct lpfc_hba *phba = vport->phba;
- strncpy(ae->un.AttrString, lpfc_release_version,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
+ phba->SerialNumber);
}
static int
-lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- else
- strncpy(ae->un.AttrString, phba->OptionROMVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
+ phba->ModelDesc);
}
static int
-lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+ char buf[16] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
+ return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
+}
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
+ lpfc_release_version);
}
static int
-lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_decode_firmware_rev(phba, buf, 1);
- ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ buf);
+ }
+
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ phba->OptionROMVersion);
}
static int
-lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_decode_firmware_rev(phba, buf, 1);
- len = lpfc_vport_symbolic_node_name(vport,
- ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
+ snprintf(buf, sizeof(buf), "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
- /* Nothing is defined for this currently */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
- /* Each driver instance corresponds to a single port */
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
+ LPFC_MAX_CT_SIZE);
}
static int
-lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
- memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
}
static int
-lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
+{
+ /* Each driver instance corresponds to a single port */
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
+}
- strlcat(ae->un.AttrString, phba->BIOSVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
+ &vport->fabric_nodename);
}
static int
-lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
- ae = &ad->AttrValue;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
+ phba->BIOSVersion);
+}
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
+{
/* Driver doesn't have access to this information */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
}
static int
-lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "EMULEX",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
}
-/* Routines for all individual PORT attributes */
+/*
+ * Routines for all individual PORT attributes
+ */
+
static int
-lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if Firmware supports NVME and on physical port */
if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
phba->sli4_hba.pc_sli4_params.nvme)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ u32 speeds = 0;
u32 tcfg;
u8 i, cnt;
- ae = &ad->AttrValue;
-
- ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
cnt = 0;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -2929,539 +2804,314 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
if (cnt > 2) { /* 4 lane trunk group */
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
} else if (cnt) { /* 2 lane trunk group */
if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+ speeds |= HBA_PORTSPEED_32GFC;
} else {
if (phba->lmt & LMT_256Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
+ speeds |= HBA_PORTSPEED_256GFC;
if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
+ speeds |= HBA_PORTSPEED_128GFC;
if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
+ speeds |= HBA_PORTSPEED_64GFC;
if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
+ speeds |= HBA_PORTSPEED_32GFC;
if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
+ speeds |= HBA_PORTSPEED_16GFC;
if (phba->lmt & LMT_10Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
+ speeds |= HBA_PORTSPEED_10GFC;
if (phba->lmt & LMT_8Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
+ speeds |= HBA_PORTSPEED_8GFC;
if (phba->lmt & LMT_4Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
+ speeds |= HBA_PORTSPEED_4GFC;
if (phba->lmt & LMT_2Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
+ speeds |= HBA_PORTSPEED_2GFC;
if (phba->lmt & LMT_1Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+ speeds |= HBA_PORTSPEED_1GFC;
}
} else {
/* FCoE links support only one speed */
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ u32 speeds = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+ speeds = HBA_PORTSPEED_1GFC;
break;
case LPFC_LINK_SPEED_2GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+ speeds = HBA_PORTSPEED_2GFC;
break;
case LPFC_LINK_SPEED_4GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+ speeds = HBA_PORTSPEED_4GFC;
break;
case LPFC_LINK_SPEED_8GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+ speeds = HBA_PORTSPEED_8GFC;
break;
case LPFC_LINK_SPEED_10GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+ speeds = HBA_PORTSPEED_10GFC;
break;
case LPFC_LINK_SPEED_16GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+ speeds = HBA_PORTSPEED_16GFC;
break;
case LPFC_LINK_SPEED_32GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+ speeds = HBA_PORTSPEED_32GFC;
break;
case LPFC_LINK_SPEED_64GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_64GFC;
+ speeds = HBA_PORTSPEED_64GFC;
break;
case LPFC_LINK_SPEED_128GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ speeds = HBA_PORTSPEED_128GFC;
break;
case LPFC_LINK_SPEED_256GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ speeds = HBA_PORTSPEED_256GFC;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
} else {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
{
- struct serv_parm *hsp;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
- hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) hsp->cmn.bbRcvSizeLsb;
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
+ (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t)hsp->cmn.bbRcvSizeLsb);
}
static int
-lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
+ shost->host_no);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
- "/sys/class/scsi_host/host%d", shost->host_no);
- len = strnlen((char *)ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
- scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- vport->phba->os_host_name);
-
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
- len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
}
static int
-lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
- ae = &ad->AttrValue;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
- else
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
+ LPFC_FDMI_PORTTYPE_NLPORT :
+ LPFC_FDMI_PORTTYPE_NPORT);
}
static int
-lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
+ FC_COS_CLASS2 | FC_COS_CLASS3);
}
static int
-lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
+ &vport->fabric_portname);
}
static int
-lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if NVME is configured or not */
if (vport == phba->pport &&
phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- /* Link Up - operational */
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
+ LPFC_FDMI_PORTSTATE_ONLINE);
}
static int
-lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
- ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
+ vport->fdmi_num_disc);
}
static int
-lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
}
static int
-lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Initiator",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
+ "Smart SAN Initiator");
}
static int
-lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy((((uint8_t *)&ae->un.AttrString) +
- sizeof(struct lpfc_name)),
- &vport->fc_sparam.portName, sizeof(struct lpfc_name));
- size = FOURBYTES + (2 * sizeof(struct lpfc_name));
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
- return size;
+ return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
+ &vport->fc_sparam.nodeName,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
+ "Smart SAN Version 2.0");
}
static int
-lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
/* SRIOV (type 3) is not supported */
- if (vport->vpi)
- ae->un.AttrInt = cpu_to_be32(2); /* NPIV */
- else
- ae->un.AttrInt = cpu_to_be32(1); /* Physical */
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
+ (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */);
}
static int
-lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
}
static int
-lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
}
static int
-lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- char mibrevision[16];
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
- sprintf(mibrevision, "ELXE2EM:%04d",
- phba->sli4_hba.pc_sli4_params.mi_ver);
- strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
- return size;
+ char buf[32] = { 0 };
+
+ sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
}
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */
lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */
@@ -3485,7 +3135,7 @@ int (*lpfc_fdmi_hba_action[])
/* RPA / RPRT attribute jump table */
int (*lpfc_fdmi_port_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */
lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */
@@ -3527,20 +3177,20 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int cmdcode, uint32_t new_mask)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_dmabuf *rq, *rsp;
struct lpfc_sli_ct_request *CtReq;
- struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
uint32_t bit_pos;
- uint32_t size;
+ uint32_t size, addsz;
uint32_t rsp_size;
uint32_t mask;
struct lpfc_fdmi_reg_hba *rh;
struct lpfc_fdmi_port_entry *pe;
- struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
struct lpfc_fdmi_attr_block *ab = NULL;
- int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
- void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
+ int (*func)(struct lpfc_vport *vport, void *attrbuf);
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
if (!ndlp)
return 0;
@@ -3549,25 +3199,29 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
goto fdmi_cmd_exit;
- mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
- if (!mp->virt)
- goto fdmi_cmd_free_mp;
+ rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
+ if (!rq->virt)
+ goto fdmi_cmd_free_rq;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
- goto fdmi_cmd_free_mpvirt;
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ goto fdmi_cmd_free_rqvirt;
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
- if (!bmp->virt)
- goto fdmi_cmd_free_bmp;
+ rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
+ if (!rsp->virt)
+ goto fdmi_cmd_free_rsp;
- INIT_LIST_HEAD(&mp->list);
- INIT_LIST_HEAD(&bmp->list);
+ INIT_LIST_HEAD(&rq->list);
+ INIT_LIST_HEAD(&rsp->list);
+
+ /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
+ memset(rq->virt, 0, LPFC_BPL_SIZE);
+ rsp_size = LPFC_BPL_SIZE;
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3575,10 +3229,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
cmdcode, new_mask, vport->fdmi_port_mask,
vport->fc_flag, vport->port_state);
- CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ CtReq = (struct lpfc_sli_ct_request *)rq->virt;
/* First populate the CT_IU preamble */
- memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
@@ -3586,17 +3239,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
- rsp_size = LPFC_BPL_SIZE;
+
size = 0;
/* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
- rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
/* HBA Identifier */
memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_fdmi_hba_ident);
if (cmdcode == SLI_MGMT_RHBA) {
/* Registered Port List */
@@ -3605,16 +3259,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&rh->rpl.pe.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof(struct lpfc_name) +
- FOURBYTES;
- } else {
- size = sizeof(struct lpfc_name);
+ size += sizeof(struct lpfc_fdmi_reg_port_list);
}
+
ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
ab->EntryCnt = 0;
- size += FOURBYTES;
+ size += FOURBYTES; /* add length of EntryCnt field */
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3625,11 +3276,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_hba_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)rh + size));
- ab->EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)rh + size));
+ if (addsz) {
+ ab->EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto hba_out;
}
@@ -3639,7 +3292,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hba_out:
ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_RPRT:
@@ -3650,22 +3303,29 @@ hba_out:
}
fallthrough;
case SLI_MGMT_RPA:
- pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+ /* Store base ptr right after preamble */
+ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
+
if (cmdcode == SLI_MGMT_RPRT) {
- rh = (struct lpfc_fdmi_reg_hba *)pab;
+ rh = (struct lpfc_fdmi_reg_hba *)base;
/* HBA Identifier */
memcpy(&rh->hi.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
pab = (struct lpfc_fdmi_reg_portattr *)
- ((uint8_t *)pab + sizeof(struct lpfc_name));
+ ((uint8_t *)base + sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_name);
+ } else {
+ pab = base;
}
memcpy((uint8_t *)&pab->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
- size += sizeof(struct lpfc_name) + FOURBYTES;
pab->ab.EntryCnt = 0;
+ /* add length of name and EntryCnt field */
+ size += sizeof(struct lpfc_name) + FOURBYTES;
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3676,11 +3336,13 @@ hba_out:
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_port_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)pab + size));
- pab->ab.EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)base + size));
+ if (addsz) {
+ pab->ab.EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto port_out;
}
@@ -3689,10 +3351,7 @@ hba_out:
}
port_out:
pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
- /* Total size */
- if (cmdcode == SLI_MGMT_RPRT)
- size += sizeof(struct lpfc_name);
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_GHAT:
@@ -3701,7 +3360,7 @@ port_out:
fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3720,7 +3379,7 @@ port_out:
}
fallthrough;
case SLI_MGMT_DPA:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3733,31 +3392,32 @@ port_out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0298 FDMI cmdcode x%x not supported\n",
cmdcode);
- goto fdmi_cmd_free_bmpvirt;
+ goto fdmi_cmd_free_rspvirt;
}
CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *)bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = size;
+ bde = (struct ulp_bde64_le *)rsp->virt;
+ bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
+ bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
+ bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
+ ULP_BDE64_TYPE_SHIFT);
+ bde->type_size |= cpu_to_le32(size);
/*
* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
return 0;
-fdmi_cmd_free_bmpvirt:
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-fdmi_cmd_free_bmp:
- kfree(bmp);
-fdmi_cmd_free_mpvirt:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-fdmi_cmd_free_mp:
- kfree(mp);
+fdmi_cmd_free_rspvirt:
+ lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
+fdmi_cmd_free_rsp:
+ kfree(rsp);
+fdmi_cmd_free_rqvirt:
+ lpfc_mbuf_free(phba, rq->virt, rq->phys);
+fdmi_cmd_free_rq:
+ kfree(rq);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3912,6 +3572,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_sli_ct_request *ctrsp = outp->virt;
u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
struct app_id_object *app;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
u32 cmd, hash, bucket;
struct lpfc_vmid *vmp, *cur;
u8 *data = outp->virt;
@@ -3923,7 +3584,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
if (cmd != SLI_CTAS_DALLAPP_ID)
- return;
+ goto free_res;
}
/* Check for a CT LS_RJT response */
if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@@ -3938,7 +3599,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
- return;
+ goto free_res;
}
}
@@ -3952,7 +3613,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
app->obj.entity_id_len);
if (app->obj.entity_id_len == 0 || app->port_id == 0)
- return;
+ goto free_res;
hash = lpfc_vmid_hash_fn(app->obj.entity_id,
app->obj.entity_id_len);
@@ -3999,6 +3660,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
"8857 Invalid command code\n");
}
+free_res:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5037ea09a810..f5252e45a48a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5156,7 +5156,7 @@ error_out:
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
- uint16_t ext_cnt, ext_size;
+ uint16_t ext_cnt = 0, ext_size = 0;
len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
@@ -5531,7 +5531,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
if (!debug->buffer) {
kfree(debug);
goto out;
@@ -5552,57 +5552,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
struct lpfc_rx_monitor_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
char *buffer = debug->buffer;
- struct rxtable_entry *entry;
- int i, len = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
- /* Table is getting updated */
- msleep(20);
- head = atomic_read(&phba->rxtable_idx_head);
- }
- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
- if (!phba->rxtable || head == tail) {
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "Rxtable is empty\n");
- goto out;
- }
- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
- start = tail;
-
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
- "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
- "Bsy IO_cnt Info BWutil(ms)\n");
-get_table:
- for (i = start; i < last; i++) {
- entry = &phba->rxtable[i];
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld %12lld %12lld "
- "%7lldus %8lld %7lld "
- "%2d %4d %2d %2d(%2d)\n",
- i, entry->max_bytes_per_interval,
- entry->cmf_bytes,
- entry->total_bytes,
- entry->rcv_bytes,
- entry->avg_io_latency,
- entry->avg_io_size,
- entry->max_read_cnt,
- entry->cmf_busy,
- entry->io_cnt,
- entry->cmf_info,
- entry->timer_utilization,
- entry->timer_interval);
+ if (!phba->rx_monitor) {
+ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
+ "Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
+ MAX_DEBUGFS_RX_INFO_SIZE,
+ LPFC_MAX_RXMONITOR_ENTRY);
}
- if (head != last) {
- start = 0;
- last = head;
- goto get_table;
- }
-out:
- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
+ strlen(buffer));
}
static int
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6dd361c1fd31..8d2e8d05bbc0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 37a4b79010bf..f82615d87c4b 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -149,7 +149,6 @@ struct lpfc_nodelist {
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
- struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
@@ -188,7 +187,6 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
-#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 9e69de9eb992..863b2125fed6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2200,10 +2200,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
-
pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PLOGI request, remainder of payload is service parameters */
@@ -3992,7 +3988,8 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"4676 Fabric EDC Rsp: "
"0x%02x, 0x%08x\n",
edc_rsp->acc_hdr.la_cmd,
@@ -4029,18 +4026,18 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6462 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
- lpfc_printf_log(
- phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
"4617 Link Fault Desc Data: 0x%08x 0x%08x "
"0x%08x 0x%08x 0x%08x\n",
be32_to_cpu(plnkflt->desc_tag),
@@ -4120,8 +4117,26 @@ out:
}
static void
-lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
+ struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
+
+ lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
+ lft->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
+
+ lft->degrade_activate_threshold =
+ cpu_to_be32(phba->degrade_activate_threshold);
+ lft->degrade_deactivate_threshold =
+ cpu_to_be32(phba->degrade_deactivate_threshold);
+ lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
+
/* We are assuming cgd was zero'ed before calling this routine */
/* Configure the congestion detection capability */
@@ -4165,6 +4180,23 @@ lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
}
+static bool
+lpfc_link_is_lds_capable(struct lpfc_hba *phba)
+{
+ if (!(phba->lmt & LMT_64Gb))
+ return false;
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return false;
+
+ if (phba->sli4_hba.conf_trunk) {
+ if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
+ return true;
+ } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
+ return true;
+ }
+ return false;
+}
+
/**
* lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
* @vport: pointer to a host virtual N_Port data structure.
@@ -4192,12 +4224,12 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_els_edc_req *edc_req;
- struct fc_diag_cg_sig_desc *cgn_desc;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
u16 cmdsize;
struct lpfc_nodelist *ndlp;
u8 *pcmd = NULL;
- u32 edc_req_size, cgn_desc_size;
+ u32 cgn_desc_size, lft_desc_size;
int rc;
if (vport->port_type == LPFC_NPIV_PORT)
@@ -4207,13 +4239,17 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
return -ENODEV;
- /* If HBA doesn't support signals, drop into RDF */
- if (!phba->cgn_init_reg_signal)
+ cgn_desc_size = (phba->cgn_init_reg_signal) ?
+ sizeof(struct fc_diag_cg_sig_desc) : 0;
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize = cgn_desc_size + lft_desc_size;
+
+ /* Skip EDC if no applicable descriptors */
+ if (!cmdsize)
goto try_rdf;
- edc_req_size = sizeof(struct fc_els_edc);
- cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
- cmdsize = edc_req_size + cgn_desc_size;
+ cmdsize += sizeof(struct fc_els_edc);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_EDC);
if (!elsiocb)
@@ -4222,15 +4258,19 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
/* Configure the payload for the supported Diagnostics capabilities. */
pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_req = (struct lpfc_els_edc_req *)pcmd;
- edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
- edc_req->edc.edc_cmd = ELS_EDC;
-
- cgn_desc = &edc_req->cgn_desc;
+ edc_req = (struct fc_els_edc *)pcmd;
+ edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
+ edc_req->edc_cmd = ELS_EDC;
+ tlv = edc_req->desc;
- lpfc_format_edc_cgn_desc(phba, cgn_desc);
+ if (cgn_desc_size) {
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ tlv = fc_tlv_next_desc(tlv);
+ }
- phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4623 Xmit EDC to remote "
@@ -4676,47 +4716,52 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
- /* The driver has a VALID PLOGI but the rport has
- * rejected the PRLI - can't do it now. Delay
- * for 1 second and try again.
- *
- * However, if explanation is REQ_UNSUPPORTED there's
- * no point to retry PRLI.
+ /* Special case for PRLI LS_RJTs. Recall that lpfc
+ * uses a single routine to issue both PRLI FC4 types.
+ * If the PRLI is rejected because that FC4 type
+ * isn't really supported, don't retry and cause
+ * multiple transport registrations. Otherwise, parse
+ * the reason code/reason code explanation and take the
+ * appropriate action.
*/
- if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
- stat.un.b.lsRjtRsnCodeExp !=
- LSEXP_REQ_UNSUPPORTED) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
- break;
- }
-
- /* Legacy bug fix code for targets with PLOGI delays. */
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CMD_IN_PROGRESS) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS | LOG_NODE,
+ "0153 ELS cmd x%x LS_RJT by x%x. "
+ "RsnCode x%x RsnCodeExp x%x\n",
+ cmd, did, stat.un.b.lsRjtRsnCode,
+ stat.un.b.lsRjtRsnCodeExp);
+
+ switch (stat.un.b.lsRjtRsnCodeExp) {
+ case LSEXP_CANT_GIVE_DATA:
+ case LSEXP_CMD_IN_PROGRESS:
if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
- }
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CANT_GIVE_DATA) {
- if (cmd == ELS_CMD_PLOGI) {
+ case LSEXP_REQ_UNSUPPORTED:
+ case LSEXP_NO_RSRC_ASSIGN:
+ /* These explanation codes get no retry. */
+ if (cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI)
+ break;
+ fallthrough;
+ default:
+ /* Limit the delay and retry action to a limited
+ * cmd set. There are other ELS commands where
+ * a retry is not expected.
+ */
+ if (cmd == ELS_CMD_PLOGI ||
+ cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI) {
delay = 1000;
- maxretry = 48;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
}
- retry = 1;
- break;
- }
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
break;
}
+
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
@@ -4797,13 +4842,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_REQ_UNSUPPORTED) {
- if (cmd == ELS_CMD_PRLI) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
- retry = 0;
+ if (cmd == ELS_CMD_PRLI)
goto out_retry;
- }
}
break;
}
@@ -5784,14 +5824,21 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_els_edc_rsp *edc_rsp;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
struct lpfc_iocbq *elsiocb;
IOCB_t *icmd, *cmd;
union lpfc_wqe128 *wqe;
+ u32 cgn_desc_size, lft_desc_size;
+ u16 cmdsize;
uint8_t *pcmd;
- int cmdsize, rc;
+ int rc;
- cmdsize = sizeof(struct lpfc_els_edc_rsp);
+ cmdsize = sizeof(struct fc_els_edc_resp);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize += cgn_desc_size + lft_desc_size;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
@@ -5813,15 +5860,19 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
- edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
- edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
- FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
- edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
- edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+ edc_rsp = (struct fc_els_edc_resp *)pcmd;
+ edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
+ cgn_desc_size + lft_desc_size);
+ edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->lsri.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
- edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
- lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+ edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
+ tlv = edc_rsp->desc;
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue EDC ACC: did:x%x flg:x%x refcnt %d",
@@ -6006,7 +6057,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
@@ -6069,7 +6120,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
@@ -9086,7 +9137,7 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *ptr, dtag;
const char *dtag_nm;
int desc_cnt = 0, bytes_remain;
- bool rcv_cap_desc = false;
+ struct fc_diag_lnkflt_desc *plnkflt;
payload = cmdiocb->cmd_dmabuf->virt;
@@ -9094,7 +9145,8 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
bytes_remain = be32_to_cpu(edc_req->desc_len);
ptr = (uint32_t *)payload;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"3319 Rcv EDC payload len %d: x%x x%x x%x\n",
bytes_remain, be32_to_cpu(*ptr),
be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
@@ -9113,9 +9165,10 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* cycle through EDC diagnostic descriptors to find the
* congestion signaling capability descriptor
*/
- while (bytes_remain && !rcv_cap_desc) {
+ while (bytes_remain) {
if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6464 Truncated TLV hdr on "
"Diagnostic descriptor[%d]\n",
desc_cnt);
@@ -9128,16 +9181,27 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6465 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
- /* No action for Link Fault descriptor for now */
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4626 Link Fault Desc Data: x%08x len x%x "
+ "da x%x dd x%x interval x%x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
break;
case ELS_DTAG_CG_SIGNAL_CAP:
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
@@ -9164,11 +9228,11 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_least_capable_settings(
phba, (struct fc_diag_cg_sig_desc *)tlv);
- rcv_cap_desc = true;
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6467 unknown Diagnostic "
"Descriptor[%d]: tag x%x (%s)\n",
desc_cnt, dtag, dtag_nm);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2645def612e6..c7f834ba8edb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1242,6 +1242,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
@@ -1353,8 +1355,13 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ if (phba->defer_flogi_acc_flag)
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ else
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
+ FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
@@ -1392,7 +1399,6 @@ lpfc_linkup(struct lpfc_hba *phba)
/* reinitialize initial HBA flag */
phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
- phba->defer_flogi_acc_flag = false;
return 0;
}
@@ -2964,7 +2970,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
@@ -3069,7 +3075,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -3790,6 +3796,9 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->cmf_active_mode != LPFC_CFG_OFF)
lpfc_cmf_signal_init(phba);
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -4389,8 +4398,11 @@ out:
rc = lpfc_issue_els_edc(vport, 0);
lpfc_printf_log(phba, KERN_INFO,
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
- "4220 EDC issue error x%x, Data: x%x\n",
+ "4220 Issue EDC status x%x Data x%x\n",
rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
} else {
lpfc_issue_els_rdf(vport, 0);
}
@@ -4788,22 +4800,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
new_state == NLP_STE_UNMAPPED_NODE)
lpfc_nlp_reg_node(vport, ndlp);
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
@@ -6648,7 +6644,6 @@ lpfc_nlp_release(struct kref *kref)
ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- kfree(ndlp->lat_data);
if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
mempool_free(ndlp->active_rrqs_xri_bitmap,
ndlp->phba->active_rrq_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 071983e2cdfe..5c283936ff08 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -703,6 +703,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
} un;
@@ -1441,30 +1442,56 @@ struct lpfc_vmid_gallapp_ident_list {
/* Definitions for HBA / Port attribute entries */
-/* Attribute Entry */
-struct lpfc_fdmi_attr_entry {
- union {
- uint32_t AttrInt;
- uint8_t AttrTypes[32];
- uint8_t AttrString[256];
- struct lpfc_name AttrWWN;
- } un;
+/* Attribute Entry Structures */
+
+struct lpfc_fdmi_attr_u32 {
+ __be16 type;
+ __be16 len;
+ __be32 value_u32;
};
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- /* Marks start of Value (ATTRIBUTE_ENTRY) */
- struct lpfc_fdmi_attr_entry AttrValue;
-} __packed;
+struct lpfc_fdmi_attr_wwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 name[8];
+};
+
+struct lpfc_fdmi_attr_fullwwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 nname[8];
+ u8 pname[8];
+};
+
+struct lpfc_fdmi_attr_fc4types {
+ __be16 type;
+ __be16 len;
+ u8 value_types[32];
+};
+
+struct lpfc_fdmi_attr_string {
+ __be16 type;
+ __be16 len;
+ char value_string[256];
+};
+
+/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */
+#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string)
/*
* HBA Attribute Block
*/
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
- struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+ /* Variable Length Attribute Entry TLV's follow */
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4527fef23ae7..5288fc69908a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -738,6 +738,7 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
#define LPFC_SEC_TO_MSEC 1000
+#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000)
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -3483,9 +3484,10 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x12
-#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_LD_SIGNAL 0x23
#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
@@ -3516,13 +3518,17 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_cmf_SHIFT 0
#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_lds_qry_SHIFT 0
+#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001
+#define lpfc_mbx_set_feature_lds_qry_WORD word6
+#define LPFC_QUERY_LDS_OP 1
#define lpfc_mbx_set_feature_mi_SHIFT 0
#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
#define lpfc_mbx_set_feature_mi_WORD word6
#define lpfc_mbx_set_feature_milunq_SHIFT 16
#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
#define lpfc_mbx_set_feature_milunq_WORD word6
- uint32_t word7;
+ u32 word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UERP_WORD word7
@@ -3536,6 +3542,8 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
+ u32 word9;
+ u32 word10;
};
@@ -4313,7 +4321,7 @@ struct lpfc_acqe_cgn_signal {
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
- uint32_t reserved;
+ uint32_t event_data3;
uint32_t trailer;
#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
@@ -4326,6 +4334,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
+#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
};
/*
@@ -4798,6 +4807,9 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
+#define cmf_sync_period_SHIFT 16
+#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
};
@@ -5046,22 +5058,6 @@ struct lpfc_grp_hdr {
{ FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
}
-/* EDC supports two descriptors. When allocated, it is the
- * size of this structure plus each supported descriptor.
- */
-struct lpfc_els_edc_req {
- struct fc_els_edc edc; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
-/* Minimum structure defines for the EDC response.
- * Balance is in buffer.
- */
-struct lpfc_els_edc_rsp {
- struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
/* Used for logging FPIN messages */
#define LPFC_FPIN_WWPN_LINE_SZ 128
#define LPFC_FPIN_WWPN_LINE_CNT 6
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c69c5a0979ec..b49c39569386 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -325,8 +325,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
- if (prg->dist < 4)
- dist = dist_char[prg->dist];
+ dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
@@ -2258,6 +2257,101 @@ lpfc_handle_latt_err_exit:
return;
}
+static void
+lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
+{
+ int i, j;
+
+ while (length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->SerialNumber[j++] = vpd[(*pindex)++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelDesc[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelName[j++] = vpd[(*pindex)++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ProgramType[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3 + i);
+ while (i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ (*pindex)++;
+ } else
+ phba->Port[j++] = vpd[(*pindex)++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ } else {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ *pindex += i;
+ length -= (3 + i);
+ }
+ }
+}
+
/**
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
* @phba: pointer to lpfc hba data structure.
@@ -2277,7 +2371,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
- int i, j;
+ int i;
int finished = 0;
int index = 0;
@@ -2310,101 +2404,10 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
Length = ((((unsigned short)lenhi) << 8) + lenlo);
if (Length > len - index)
Length = len - index;
- while (Length > 0) {
- /* Look for Serial Number */
- if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->SerialNumber[j++] = vpd[index++];
- if (j == 31)
- break;
- }
- phba->SerialNumber[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
- phba->vpd_flag |= VPD_MODEL_DESC;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelDesc[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ModelDesc[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
- phba->vpd_flag |= VPD_MODEL_NAME;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelName[j++] = vpd[index++];
- if (j == 79)
- break;
- }
- phba->ModelName[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
- phba->vpd_flag |= VPD_PROGRAM_TYPE;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ProgramType[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ProgramType[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
- phba->vpd_flag |= VPD_PORT;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_GET)) {
- j++;
- index++;
- } else
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
- }
- if ((phba->sli_rev != LPFC_SLI_REV4) ||
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_NON))
- phba->Port[j] = 0;
- continue;
- }
- else {
- index += 2;
- i = vpd[index];
- index += 1;
- index += i;
- Length -= (3 + i);
- }
- }
- finished = 0;
- break;
+
+ lpfc_fill_vpd(phba, vpd, Length, &index);
+ finished = 0;
+ break;
case 0x78:
finished = 1;
break;
@@ -4614,6 +4617,17 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
+static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ return LPFC_MAX_SG_TABLESIZE;
+ else
+ return phba->cfg_scsi_seg_cnt;
+ else
+ return phba->cfg_sg_seg_cnt;
+}
+
/**
* lpfc_vmid_res_alloc - Allocates resources for VMID
* @phba: pointer to lpfc hba data structure.
@@ -4716,42 +4730,26 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
- template = &phba->port_template;
-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
- memcpy(template, &lpfc_template, sizeof(*template));
+ template = &lpfc_template;
if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
template->eh_host_reset_handler = NULL;
- /* Template for all vports this physical port creates */
- memcpy(&phba->vport_template, &lpfc_template,
- sizeof(*template));
- phba->vport_template.shost_groups = lpfc_vport_groups;
- phba->vport_template.eh_bus_reset_handler = NULL;
- phba->vport_template.eh_host_reset_handler = NULL;
- phba->vport_template.vendor_id = 0;
-
- /* Initialize the host templates with updated value */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- template->sg_tablesize = phba->cfg_scsi_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_scsi_seg_cnt;
- } else {
- template->sg_tablesize = phba->cfg_sg_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_sg_seg_cnt;
- }
-
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
/* NVMET is for physical port only */
- memcpy(template, &lpfc_template_nvme,
- sizeof(*template));
+ template = &lpfc_template_nvme;
}
} else {
- template = &phba->vport_template;
+ /* Seed vport template */
+ template = &lpfc_vport_template;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
@@ -4784,11 +4782,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-
- if (phba->cfg_xpsgl && !phba->nvmet_support)
- shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
- else
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -5569,38 +5562,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
{
- struct rxtable_entry *entry;
- int cnt = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (!phba->rxtable || head == tail) {
- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "4411 Rxtable is empty\n");
- return;
- }
- last = tail;
- start = head;
-
- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
- while (start != last) {
- if (start)
- start--;
- else
- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
- entry = &phba->rxtable[start];
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
- "Lat %lld ASz %lld Info %02d BWUtil %d "
- "Int %d slot %d\n",
- cnt, entry->max_bytes_per_interval,
- entry->total_bytes, entry->rcv_bytes,
- entry->avg_io_latency, entry->avg_io_size,
- entry->cmf_info, entry->timer_utilization,
- entry->timer_interval, start);
- cnt++;
- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
- return;
+ "4411 Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
+ LPFC_MAX_RXMONITOR_DUMP);
}
}
@@ -6007,9 +5974,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
{
struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
cmf_timer);
- struct rxtable_entry *entry;
+ struct rx_info_entry entry;
uint32_t io_cnt;
- uint32_t head, tail;
uint32_t busy, max_read;
uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
@@ -6129,40 +6095,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
}
/* Save rxmonitor information for debug */
- if (phba->rxtable) {
- head = atomic_xchg(&phba->rxtable_idx_head,
- LPFC_RXMONITOR_TABLE_IN_USE);
- entry = &phba->rxtable[head];
- entry->total_bytes = total;
- entry->cmf_bytes = total + extra;
- entry->rcv_bytes = rcv;
- entry->cmf_busy = busy;
- entry->cmf_info = phba->cmf_active_info;
+ if (phba->rx_monitor) {
+ entry.total_bytes = total;
+ entry.cmf_bytes = total + extra;
+ entry.rcv_bytes = rcv;
+ entry.cmf_busy = busy;
+ entry.cmf_info = phba->cmf_active_info;
if (io_cnt) {
- entry->avg_io_latency = div_u64(lat, io_cnt);
- entry->avg_io_size = div_u64(rcv, io_cnt);
+ entry.avg_io_latency = div_u64(lat, io_cnt);
+ entry.avg_io_size = div_u64(rcv, io_cnt);
} else {
- entry->avg_io_latency = 0;
- entry->avg_io_size = 0;
+ entry.avg_io_latency = 0;
+ entry.avg_io_size = 0;
}
- entry->max_read_cnt = max_read;
- entry->io_cnt = io_cnt;
- entry->max_bytes_per_interval = mbpi;
+ entry.max_read_cnt = max_read;
+ entry.io_cnt = io_cnt;
+ entry.max_bytes_per_interval = mbpi;
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
- entry->timer_utilization = phba->cmf_last_ts;
+ entry.timer_utilization = phba->cmf_last_ts;
else
- entry->timer_utilization = ms;
- entry->timer_interval = ms;
+ entry.timer_utilization = ms;
+ entry.timer_interval = ms;
phba->cmf_last_ts = 0;
- /* Increment rxtable index */
- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (head == tail) {
- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- atomic_set(&phba->rxtable_idx_tail, tail);
- }
- atomic_set(&phba->rxtable_idx_head, head);
+ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
}
if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
@@ -6232,6 +6188,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
{
uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+ u8 cnt = 0;
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -6250,26 +6207,36 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
phba->trunk_link.link1.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
phba->trunk_link.link2.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
phba->trunk_link.link3.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ cnt++;
}
+ if (cnt)
+ phba->trunk_link.phy_lnk_speed =
+ phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
+ else
+ phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
+
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
@@ -6347,7 +6314,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
LPFC_FC_LA_TYPE_LINK_DOWN)
phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
+ else if (!phba->sli4_hba.conf_trunk)
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
@@ -6465,7 +6432,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"2901 Async SLI event - Type:%d, Event Data: x%08x "
"x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- acqe_sli->reserved, acqe_sli->trailer);
+ acqe_sli->event_data3, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -6494,7 +6461,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
@@ -6672,6 +6639,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
}
}
break;
+ case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
+ /* May be accompanied by a temperature event */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
+ "2902 Remote Degrade Signaling: x%08x x%08x "
+ "x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ acqe_sli->event_data3);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -7085,6 +7061,12 @@ lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
spin_unlock_irq(&phba->hbalock);
}
+static const char * const lpfc_cmf_mode_to_str[] = {
+ "OFF",
+ "MANAGED",
+ "MONITOR",
+};
+
/**
* lpfc_cgn_params_parse - Process a FW cong parm change event
* @phba: pointer to lpfc hba data structure.
@@ -7104,6 +7086,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
{
struct lpfc_cgn_info *cp;
uint32_t crc, oldmode;
+ char acr_string[4] = {0};
/* Make sure the FW has encoded the correct magic number to
* validate the congestion parameter in FW memory.
@@ -7180,9 +7163,6 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MONITOR:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4661 Switch from MANAGED to "
- "`MONITOR mode\n");
phba->cmf_max_bytes_per_interval =
phba->cmf_link_byte_count;
@@ -7201,14 +7181,26 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MANAGED:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4662 Switch from MONITOR to "
- "MANAGED mode\n");
lpfc_cmf_signal_init(phba);
break;
}
break;
}
+ if (oldmode != LPFC_CFG_OFF ||
+ oldmode != phba->cgn_p.cgn_param_mode) {
+ if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
+ scnprintf(acr_string, sizeof(acr_string), "%u",
+ phba->cgn_p.cgn_param_level0);
+ else
+ scnprintf(acr_string, sizeof(acr_string), "NA");
+
+ dev_info(&phba->pcidev->dev, "%d: "
+ "4663 CMF: Mode %s acr %s\n",
+ phba->brd_no,
+ lpfc_cmf_mode_to_str
+ [phba->cgn_p.cgn_param_mode],
+ acr_string);
+ }
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
"4669 FW cgn parm buf wrong magic 0x%x "
@@ -8053,7 +8045,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -8315,8 +8307,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
+ if (!phba->lpfc_sg_dma_buf_pool) {
+ rc = -ENOMEM;
goto out_free_bsmbx;
+ }
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
@@ -8324,8 +8318,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp),
i, 0);
- if (!phba->lpfc_cmd_rsp_buf_pool)
+ if (!phba->lpfc_cmd_rsp_buf_pool) {
+ rc = -ENOMEM;
goto out_free_sg_dma_buf;
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -8481,6 +8477,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -12413,7 +12412,7 @@ lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_irq_chann; i++) {
eqhdl = lpfc_get_eq_hdl(i);
- eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->irq = LPFC_IRQ_EMPTY;
eqhdl->phba = phba;
}
}
@@ -12786,7 +12785,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
__lpfc_cpuhp_remove(phba);
@@ -13050,9 +13049,17 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_DRIVER_HANDLER_NAME"%d", index);
eqhdl->idx = index;
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = pci_irq_vector(phba->pcidev, index);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0489 MSI-X fast-path (%d) "
+ "pci_irq_vec failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ eqhdl->irq = rc;
+
+ rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -13060,8 +13067,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
goto cfg_fail_out;
}
- eqhdl->irq = pci_irq_vector(phba->pcidev, index);
-
if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
@@ -13178,7 +13183,14 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ rc = pci_irq_vector(phba->pcidev, 0);
+ if (rc < 0) {
+ pci_free_irq_vectors(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0496 MSI pci_irq_vec failed (%d)\n", rc);
+ return rc;
+ }
+ eqhdl->irq = rc;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
@@ -13205,8 +13217,8 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
* MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - successful
- * other values - error
+ * Interrupt mode (2, 1, 0) - successful
+ * LPFC_INTR_ERROR - error
**/
static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
@@ -13251,7 +13263,14 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
intr_mode = 0;
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ retval = pci_irq_vector(phba->pcidev, 0);
+ if (retval < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0502 INTR pci_irq_vec failed (%d)\n",
+ retval);
+ return LPFC_INTR_ERROR;
+ }
+ eqhdl->irq = retval;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 4d455da9cd69..b39cefcd8703 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,7 +35,7 @@
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
-#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 870e53b8f81d..89cbeba06aea 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
phba->cgn_i = NULL;
}
- /* Free RX table */
- kfree(phba->rxtable);
- phba->rxtable = NULL;
+ /* Free RX Monitor */
+ if (phba->rx_monitor) {
+ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ }
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 084c0f9fdc3a..7a1563564df7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -112,62 +112,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
#define LPFC_INVALID_REFTAG ((u32)-1)
/**
- * lpfc_update_stats - Update statistical data for the command completion
- * @vport: The virtual port on which this call is executing.
- * @lpfc_cmd: lpfc scsi command object pointer.
- *
- * This function is called when there is a command completion and this
- * function updates the statistical data for the command completion.
- **/
-static void
-lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- unsigned long flags;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long latency;
- int i;
-
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- (cmd->result))
- return;
-
- latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
- rdata = lpfc_cmd->rdata;
- pnode = rdata->pnode;
-
- spin_lock_irqsave(shost->host_lock, flags);
- if (!pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return;
- }
-
- if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
- i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
- phba->bucket_step;
- /* check array subscript bounds */
- if (i < 0)
- i = 0;
- else if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT - 1;
- } else {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
- if (latency <= (phba->bucket_base +
- ((1<<i)*phba->bucket_step)))
- break;
- }
-
- pnode->lat_data[i].cmd_count++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
-/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -4272,7 +4216,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4335,8 +4279,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
cmd->retries, scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
-
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4562,7 +4504,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4617,7 +4559,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -6853,3 +6794,30 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
+
+struct scsi_host_template lpfc_vport_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = NULL,
+ .eh_host_reset_handler = NULL,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .shost_groups = lpfc_vport_groups,
+ .max_sectors = 0xFFFFFFFF,
+ .vendor_id = 0,
+ .change_queue_depth = scsi_change_queue_depth,
+ .track_queue_depth = 1,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 3836d7f6a575..eae56944f31b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
-struct lpfc_scsicmd_bkt {
- uint32_t cmd_count;
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 608016725db9..99d06dc7ddf6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1916,6 +1916,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
+ u16 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -1970,10 +1971,14 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
lpfc_acqe_cgn_frequency;
bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ warn_sync_period = lpfc_acqe_cgn_frequency;
} else {
/* We hit a FPIN warning condition */
bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
+ warn_sync_period =
+ LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
}
}
@@ -1989,6 +1994,7 @@ initpath:
bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
@@ -2850,6 +2856,7 @@ void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
+ struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
uint16_t rpi, vpi;
@@ -2862,6 +2869,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ if (mp) {
+ pmb->ctx_buf = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -6202,6 +6215,9 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
LPFC_MBOXQ_t *mbox;
+ *extnt_count = 0;
+ *extnt_size = 0;
+
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -6817,8 +6833,13 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
phba->sli4_hba.pc_sli4_params.mi_ver);
break;
+ case LPFC_SET_LD_SIGNAL:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 16;
+ bf_set(lpfc_mbx_set_feature_lds_qry,
+ &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
+ break;
case LPFC_SET_ENABLE_CMF:
- bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
mbox->u.mqe.un.set_feature.param_len = 4;
bf_set(lpfc_mbx_set_feature_cmf,
@@ -7814,6 +7835,62 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
static void
+lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
+ "4622 SET_FEATURE (x%x) mbox failed, "
+ "status x%x add_status x%x, mbx status x%x\n",
+ LPFC_SET_LD_SIGNAL, shdr_status,
+ shdr_add_status, pmb->u.mb.mbxStatus);
+ phba->degrade_activate_threshold = 0;
+ phba->degrade_deactivate_threshold = 0;
+ phba->fec_degrade_interval = 0;
+ goto out;
+ }
+
+ phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
+ phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
+ phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
+ "4624 Success: da x%x dd x%x interval x%x\n",
+ phba->degrade_activate_threshold,
+ phba->degrade_deactivate_threshold,
+ phba->fec_degrade_interval);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+int
+lpfc_read_lds_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -7960,6 +8037,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
}
/**
+ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entries: Number of rx_info_entry objects to allocate in ring
+ *
+ * Return:
+ * 0 - Success
+ * ENOMEM - Failure to kmalloc
+ **/
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries)
+{
+ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
+ GFP_KERNEL);
+ if (!rx_monitor->ring)
+ return -ENOMEM;
+
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_lock_init(&rx_monitor->lock);
+ rx_monitor->entries = entries;
+
+ return 0;
+}
+
+/**
+ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ **/
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
+{
+ spin_lock(&rx_monitor->lock);
+ kfree(rx_monitor->ring);
+ rx_monitor->ring = NULL;
+ rx_monitor->entries = 0;
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_unlock(&rx_monitor->lock);
+}
+
+/**
+ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entry: Pointer to rx_info_entry
+ *
+ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
+ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
+ *
+ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
+ *
+ * In cases of old data overflow, we do a best effort of FIFO order.
+ **/
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+
+ spin_lock(ring_lock);
+ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
+ *tail_idx = (*tail_idx + 1) % ring_size;
+
+ /* Best effort of FIFO saved data */
+ if (*tail_idx == *head_idx)
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ spin_unlock(ring_lock);
+}
+
+/**
+ * lpfc_rx_monitor_report - Read out rx_monitor's ring
+ * @phba: Pointer to lpfc_hba object
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @buf: Pointer to char buffer that will contain rx monitor info data
+ * @buf_len: Length buf including null char
+ * @max_read_entries: Maximum number of entries to read out of ring
+ *
+ * Used to dump/read what's in rx_monitor's ring buffer.
+ *
+ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
+ * information to kmsg instead of filling out buf.
+ *
+ * Return:
+ * Number of entries read out of the ring
+ **/
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ struct rx_info_entry *entry;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+ u32 cnt = 0;
+ char tmp[DBG_LOG_STR_SZ] = {0};
+ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
+
+ if (!log_to_kmsg) {
+ /* clear the buffer to be sure */
+ memset(buf, 0, buf_len);
+
+ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
+ "%-8s%-8s%-8s%-16s\n",
+ "MaxBPI", "Tot_Data_CMF",
+ "Tot_Data_Cmd", "Tot_Data_Cmpl",
+ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+ /* Needs to be _bh because record is called from timer interrupt
+ * context
+ */
+ spin_lock_bh(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+ /* Read out this entry's data. */
+ if (!log_to_kmsg) {
+ /* If !log_to_kmsg, then store to buf. */
+ scnprintf(tmp, sizeof(tmp),
+ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
+ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
+ *head_idx, entry->max_bytes_per_interval,
+ entry->cmf_bytes, entry->total_bytes,
+ entry->rcv_bytes, entry->avg_io_latency,
+ entry->avg_io_size, entry->max_read_cnt,
+ entry->cmf_busy, entry->io_cnt,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval);
+
+ /* Check for buffer overflow */
+ if ((strlen(buf) + strlen(tmp)) >= buf_len)
+ break;
+
+ /* Append entry's data to buffer */
+ strlcat(buf, tmp, buf_len);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02u: MBPI %llu Xmit %llu "
+ "Cmpl %llu Lat %llu ASz %llu Info %02u "
+ "BWUtil %u Int %u slot %u\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size, entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval, *head_idx);
+ }
+
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ /* Don't feed more than max_read_entries */
+ cnt++;
+ if (cnt >= max_read_entries)
+ break;
+ }
+ spin_unlock_bh(ring_lock);
+
+ return cnt;
+}
+
+/**
* lpfc_cmf_setup - Initialize idle_stat tracking
* @phba: Pointer to HBA context object.
*
@@ -8133,19 +8376,29 @@ no_cmf:
phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
/* Allocate RX Monitor Buffer */
- if (!phba->rxtable) {
- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
- sizeof(struct rxtable_entry),
- GFP_KERNEL);
- if (!phba->rxtable) {
+ if (!phba->rx_monitor) {
+ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
+ GFP_KERNEL);
+
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2644 Failed to alloc memory "
"for RX Monitor Buffer\n");
return -ENOMEM;
}
+
+ /* Instruct the rx_monitor object to instantiate its ring */
+ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
+ LPFC_MAX_RXMONITOR_ENTRY)) {
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2645 Failed to alloc memory "
+ "for RX Monitor's Ring\n");
+ return -ENOMEM;
+ }
}
- atomic_set(&phba->rxtable_idx_head, 0);
- atomic_set(&phba->rxtable_idx_tail, 0);
+
return 0;
}
@@ -10322,12 +10575,10 @@ static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- int rc;
struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
lpfc_prep_embed_io(phba, lpfc_cmd);
- rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
- return rc;
+ return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
}
void
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 1ddad5b170a6..cbb1aa1cf025 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -489,7 +489,7 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
- uint16_t irq;
+ int irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
@@ -611,6 +611,8 @@ struct lpfc_vector_map_info {
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+#define LPFC_IRQ_EMPTY 0xffffffff
+
/* Multi-XRI pool */
#define XRI_BATCH 8
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 63eba9928e4b..192d5630a44d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.5"
+#define LPFC_DRIVER_VERSION "14.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index f64ced04b912..ed1d7f7b88a3 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -245,9 +245,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
/* allocate the per cpu variable for holding */
/* the last access time stamp only if VMID is enabled */
if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
+ vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC);
if (!vmp->last_io_time) {
hash_del(&vmp->hnode);
vmp->flag = LPFC_VMID_SLOT_FREE;
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e7efb025ed50..4d171f5c213f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -809,74 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
-
-/**
- * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
- * @vport: Pointer to vport object.
- *
- * This function resets the statistical data for the vport. This function
- * is called with the host_lock held
- **/
-void
-lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->lat_data)
- memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
- sizeof(struct lpfc_scsicmd_bkt));
- }
-}
-
-
-/**
- * lpfc_alloc_bucket - Allocate data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * This function allocates data buffer required for all the FC
- * nodes of the vport to collect statistical data.
- **/
-void
-lpfc_alloc_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
-
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_ATOMIC);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0287 lpfc_alloc_bucket failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
- }
-}
-
-/**
- * lpfc_free_bucket - Free data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * Th function frees statistical data buffer of all the FC
- * nodes of the vport.
- **/
-void
-lpfc_free_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
- }
-}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index f4b8528dd2e7..fa60c146c169 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
-void lpfc_vport_reset_stat_data(struct lpfc_vport *);
-void lpfc_alloc_bucket(struct lpfc_vport *);
-void lpfc_free_bucket(struct lpfc_vport *);
-
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 157c3bdb50be..132de68c14e9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3979,7 +3979,7 @@ megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, c
app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
- return snprintf(buf, 8, "%u\n", app_hndl);
+ return sysfs_emit(buf, "%u\n", app_hndl);
}
@@ -4048,7 +4048,7 @@ megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *b
}
}
- return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv,
ldid_map, app_hndl);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f6c37a97544e..9be4ba61a076 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3174,7 +3174,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
-static int megasas_map_queues(struct Scsi_Host *shost)
+static void megasas_map_queues(struct Scsi_Host *shost)
{
struct megasas_instance *instance;
int qoff = 0, offset;
@@ -3183,7 +3183,7 @@ static int megasas_map_queues(struct Scsi_Host *shost)
instance = (struct megasas_instance *)shost->hostdata;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
offset = instance->low_latency_index_start;
@@ -3209,8 +3209,6 @@ static int megasas_map_queues(struct Scsi_Host *shost)
map->queue_offset = qoff;
blk_mq_map_queues(map);
}
-
- return 0;
}
static void megasas_aen_polling(struct work_struct *work);
@@ -4023,10 +4021,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
u32 mfiStatus;
u32 fw_state;
- if ((mfiStatus = instance->instancet->check_reset(instance,
- instance->reg_set)) == 1) {
+ if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
return IRQ_HANDLED;
- }
mfiStatus = instance->instancet->clear_intr(instance);
if (mfiStatus == 0) {
@@ -5157,9 +5153,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->current_map_sz = ventura_map_sz;
fusion->max_map_sz = ventura_map_sz;
} else {
- fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->fw_supported_vd_count - 1));
+ fusion->old_map_sz =
+ struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
+ instance->fw_supported_vd_count);
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
fusion->max_map_sz =
@@ -5792,10 +5788,10 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
+ MAX_PHYSICAL_DEVICES);
instance->use_seqnum_jbod_fp =
instance->support_seqnum_jbod_fp;
@@ -7970,7 +7966,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
struct Scsi_Host *host;
struct megasas_instance *instance;
struct fusion_context *fusion;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
@@ -8042,9 +8038,9 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz =
+ struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
+ seq, MAX_PHYSICAL_DEVICES);
for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 83f69c33b01a..da1cad1ee123 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -326,9 +326,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
- expected_size =
- (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+ expected_size = struct_size((struct MR_FW_RAID_MAP *)0,
+ ldSpanMap,
+ le16_to_cpu(pDrvRaidMap->ldCount));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 09c5fe37754c..6650f8c8e9b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1310,7 +1310,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES);
cmd = megasas_get_cmd(instance);
if (!cmd) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index ce84f811e5e1..49e9a9048ee7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -942,7 +942,7 @@ struct MR_FW_RAID_MAP {
u8 reserved2[7];
struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
struct IO_REQUEST_INFO {
@@ -1053,7 +1053,7 @@ struct MR_FW_RAID_MAP_DYNAMIC {
struct MR_RAID_MAP_DESC_TABLE
raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
/* Variable Size buffer containing all data */
- u32 raid_map_desc_data[1];
+ u32 raid_map_desc_data[];
}; /* Dynamicaly sized RAID MAp structure */
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1148,7 +1148,7 @@ typedef struct LOG_BLOCK_SPAN_INFO {
struct MR_FW_RAID_MAP_ALL {
struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
} __attribute__ ((packed));
struct MR_DRV_RAID_MAP {
@@ -1182,7 +1182,7 @@ struct MR_DRV_RAID_MAP {
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
@@ -1193,7 +1193,7 @@ struct MR_DRV_RAID_MAP {
struct MR_DRV_RAID_MAP_ALL {
struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
} __packed;
@@ -1249,7 +1249,7 @@ struct MR_PD_CFG_SEQ {
struct MR_PD_CFG_SEQ_NUM_SYNC {
__le32 size;
__le32 count;
- struct MR_PD_CFG_SEQ seq[1];
+ struct MR_PD_CFG_SEQ seq[];
} __packed;
/* stream detection */
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
index f5cdbe48c150..ef86ca46646b 100644
--- a/drivers/scsi/mpi3mr/Makefile
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -3,3 +3,4 @@ obj-m += mpi3mr.o
mpi3mr-y += mpi3mr_os.o \
mpi3mr_fw.o \
mpi3mr_app.o \
+ mpi3mr_transport.o
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 4cd9f24e544c..0a2af48915a5 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2017-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2017-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_CNFG_H
#define MPI30_CNFG_H 1
@@ -100,6 +99,7 @@ struct mpi3_config_page_header {
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
@@ -135,6 +135,16 @@ struct mpi3_config_page_header {
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24)
+#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000)
+#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
@@ -210,7 +220,7 @@ struct mpi3_man_page0 {
u8 board_rework_day;
u8 board_rework_month;
__le16 board_rework_year;
- __le64 board_revision;
+ u8 board_revision[8];
u8 e_pack_fru[16];
u8 product_name[256];
};
@@ -226,6 +236,15 @@ struct mpi3_man_page1 {
};
#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man_page2 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c[3];
+ u8 oem_board_tracer_number[32];
+};
+#define MPI3_MAN2_PAGEVERSION (0x00)
+#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01)
struct mpi3_man5_phy_entry {
__le64 ioc_wwid;
__le64 device_name;
@@ -338,6 +357,8 @@ struct mpi3_man7_receptacle_info {
#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_LOCATION_HOST (0x04)
+#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
@@ -369,7 +390,8 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
-#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff)
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff)
+#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@@ -536,6 +558,10 @@ struct mpi3_man11_bkplane_spec_non_ubm_format {
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
@@ -825,19 +851,16 @@ struct mpi3_man_page21 {
};
#define MPI3_MAN21_PAGEVERSION (0x00)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_MASK (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_ENABLED (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_DISABLED (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x60)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x20)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x40)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x08)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x08)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x01)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x01)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
#endif
@@ -995,7 +1018,12 @@ struct mpi3_io_unit_page5 {
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
-#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@@ -1027,7 +1055,8 @@ struct mpi3_io_unit_page8 {
u8 slots_available;
u8 current_key_encryption_algo;
u8 key_digest_hash_algo;
- __le32 reserved10[2];
+ union mpi3_version_union current_svn;
+ __le32 reserved14;
__le32 current_key[128];
union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
};
@@ -1036,6 +1065,7 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
struct mpi3_io_unit_page9 {
@@ -1045,9 +1075,14 @@ struct mpi3_io_unit_page9 {
__le16 reserved0e;
};
-#define MPI3_IOUNIT9_PAGEVERSION (0x00)
-#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
-#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
struct mpi3_io_unit_page10 {
struct mpi3_config_page_header header;
u8 flags;
@@ -1090,6 +1125,57 @@ struct mpi3_io_unit_page11 {
struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
};
#define MPI3_IOUNIT11_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT12_BUCKET_MAX
+#define MPI3_IOUNIT12_BUCKET_MAX (1)
+#endif
+struct mpi3_iounit12_bucket {
+ u8 coalescing_depth;
+ u8 coalescing_timeout;
+ __le16 io_count_low_boundary;
+ __le32 reserved04;
+};
+struct mpi3_io_unit_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c[4];
+ u8 num_buckets;
+ u8 reserved1d[3];
+ struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX];
+};
+#define MPI3_IOUNIT12_PAGEVERSION (0x00)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003)
+#ifndef MPI3_IOUNIT13_FUNC_MAX
+#define MPI3_IOUNIT13_FUNC_MAX (1)
+#endif
+struct mpi3_iounit13_allowed_function {
+ __le16 sub_function;
+ u8 function_code;
+ u8 fuction_flags;
+};
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01)
+struct mpi3_io_unit_page13 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_functions;
+ u8 reserved0d[3];
+ struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX];
+};
+#define MPI3_IOUNIT13_PAGEVERSION (0x00)
+#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
+#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -1182,6 +1268,7 @@ struct mpi3_driver_page0 {
__le32 reserved18;
};
#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
@@ -1906,19 +1993,30 @@ struct mpi3_pcie_io_unit_page1 {
};
#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
@@ -2169,10 +2267,7 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_IOC (0x0002)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index c29b87de8e18..64c58815988a 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2018-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2018-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IMAGE_H
#define MPI30_IMAGE_H 1
@@ -63,6 +62,9 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index aac11c58cca9..3c03610ecfa6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -1,13 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_INIT_H
#define MPI30_INIT_H 1
struct mpi3_scsi_io_cdb_eedp32 {
u8 cdb[20];
- __be32 primary_reference_tag;
+ __be32 primary_reference_tag;
__le16 primary_application_tag;
__le16 primary_application_tag_mask;
__le32 transfer_length;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 214e4c65e576..1c6c6730df5c 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IOC_H
#define MPI30_IOC_H 1
@@ -158,6 +157,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
struct mpi3_mgmt_passthrough_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -637,6 +637,23 @@ struct mpi3_event_data_diag_buffer_status_change {
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
+#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
+#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
+#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
+#define MPI3_PEL_LOCALE_FLAGS_CONFIGURATION (0x0040)
+#define MPI3_PEL_LOCALE_FLAGS_CONTROLER (0x0020)
+#define MPI3_PEL_LOCALE_FLAGS_SAS (0x0010)
+#define MPI3_PEL_LOCALE_FLAGS_EPACK (0x0008)
+#define MPI3_PEL_LOCALE_FLAGS_ENCLOSURE (0x0004)
+#define MPI3_PEL_LOCALE_FLAGS_PD (0x0002)
+#define MPI3_PEL_LOCALE_FLAGS_VD (0x0001)
+#define MPI3_PEL_CLASS_DEBUG (0x00)
+#define MPI3_PEL_CLASS_PROGRESS (0x01)
+#define MPI3_PEL_CLASS_INFORMATIONAL (0x02)
+#define MPI3_PEL_CLASS_WARNING (0x03)
+#define MPI3_PEL_CLASS_CRITICAL (0x04)
+#define MPI3_PEL_CLASS_FATAL (0x05)
+#define MPI3_PEL_CLASS_FAULT (0x06)
#define MPI3_PEL_CLEARTYPE_CLEAR (0x00)
#define MPI3_PEL_WAITTIME_INFINITE_WAIT (0x00)
#define MPI3_PEL_ACTION_GET_SEQNUM (0x01)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40)
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
index 901dbd788940..b7a5df01120d 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*
*/
#ifndef MPI30_PCI_H
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index 298d895e374b..e587f77ccd68 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_SAS_H
#define MPI30_SAS_H 1
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index ba05ea57af25..9b76b9632751 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_TRANSPORT_H
#define MPI30_TRANSPORT_H 1
@@ -19,8 +18,8 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (23)
-#define MPI3_VERSION_DEV (1)
+#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
@@ -212,6 +211,7 @@ struct mpi3_default_reply_descriptor {
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff)
struct mpi3_address_reply_descriptor {
__le64 reply_frame_address;
__le16 request_queue_ci;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0935b2e80662..def4c5e15cd8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -39,6 +39,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+#include <scsi/scsi_transport_sas.h>
#include "mpi/mpi30_transport.h"
#include "mpi/mpi30_cnfg.h"
@@ -55,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.0.0.69.0"
-#define MPI3MR_DRIVER_RELDATE "16-March-2022"
+#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
+#define MPI3MR_DRIVER_RELDATE "08-September-2022"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -97,9 +98,11 @@ extern atomic64_t event_counter;
#define MPI3MR_HOSTTAG_PEL_ABORT 3
#define MPI3MR_HOSTTAG_PEL_WAIT 4
#define MPI3MR_HOSTTAG_BLK_TMS 5
+#define MPI3MR_HOSTTAG_CFG_CMDS 6
+#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7
#define MPI3MR_NUM_DEVRMCMD 16
-#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
@@ -115,6 +118,7 @@ extern atomic64_t event_counter;
/* command/controller interaction timeout definitions in seconds */
#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
+#define MPI3MR_PORTENABLE_POLL_INTERVAL 5
#define MPI3MR_ABORTTM_TIMEOUT 60
#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
@@ -126,6 +130,10 @@ extern atomic64_t event_counter;
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */
+
+#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10
+
#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
@@ -274,6 +282,8 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_SYSFS = 23,
MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
MPI3MR_RESET_FROM_FIRMWARE = 27,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
};
/* Queue type definitions */
@@ -421,12 +431,14 @@ struct op_reply_qinfo {
* struct mpi3mr_intr_info - Interrupt cookie information
*
* @mrioc: Adapter instance reference
+ * @os_irq: irq number
* @msix_index: MSIx index
* @op_reply_q: Associated operational reply queue
* @name: Dev name for the irq claiming device
*/
struct mpi3mr_intr_info {
struct mpi3mr_ioc *mrioc;
+ int os_irq;
u16 msix_index;
struct op_reply_qinfo *op_reply_q;
char name[MPI3MR_NAME_LENGTH];
@@ -457,16 +469,138 @@ struct mpi3mr_throttle_group_info {
atomic_t pend_large_data_sz;
};
+/* HBA port flags */
+#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+
+/**
+ * struct mpi3mr_hba_port - HBA's port information
+ * @port_id: Port number
+ * @flags: HBA port flags
+ */
+struct mpi3mr_hba_port {
+ struct list_head list;
+ u8 port_id;
+ u8 flags;
+};
+
+/**
+ * struct mpi3mr_sas_port - Internal SAS port information
+ * @port_list: List of ports belonging to a SAS node
+ * @num_phys: Number of phys associated with port
+ * @marked_responding: used while refresing the sas ports
+ * @lowest_phy: lowest phy ID of current sas port
+ * @phy_mask: phy_mask of current sas port
+ * @hba_port: HBA port entry
+ * @remote_identify: Attached device identification
+ * @rphy: SAS transport layer rphy object
+ * @port: SAS transport layer port object
+ * @phy_list: mpi3mr_sas_phy objects belonging to this port
+ */
+struct mpi3mr_sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ u8 marked_responding;
+ int lowest_phy;
+ u32 phy_mask;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct mpi3mr_sas_phy - Internal SAS Phy information
+ * @port_siblings: List of phys belonging to a port
+ * @identify: Phy identification
+ * @remote_identify: Attached device identification
+ * @phy: SAS transport layer Phy object
+ * @phy_id: Unique phy id within a port
+ * @handle: Firmware device handle for this phy
+ * @attached_handle: Firmware device handle for attached device
+ * @phy_belongs_to_port: Flag to indicate phy belongs to port
+ @hba_port: HBA port entry
+ */
+struct mpi3mr_sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+ struct mpi3mr_hba_port *hba_port;
+};
+
+/**
+ * struct mpi3mr_sas_node - SAS host/expander information
+ * @list: List of sas nodes in a controller
+ * @parent_dev: Parent device class
+ * @num_phys: Number phys belonging to sas_node
+ * @sas_address: SAS address of sas_node
+ * @handle: Firmware device handle for this sas_host/expander
+ * @sas_address_parent: SAS address of parent expander or host
+ * @enclosure_handle: Firmware handle of enclosure of this node
+ * @device_info: Capabilities of this sas_host/expander
+ * @non_responding: used to refresh the expander devices during reset
+ * @host_node: Flag to indicate this is a host_node
+ * @hba_port: HBA port entry
+ * @phy: A list of phys that make up this sas_host/expander
+ * @sas_port_list: List of internal ports of this node
+ * @rphy: sas_rphy object of this expander node
+ */
+struct mpi3mr_sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 non_responding;
+ u8 host_node;
+ struct mpi3mr_hba_port *hba_port;
+ struct mpi3mr_sas_phy *phy;
+ struct list_head sas_port_list;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct mpi3mr_enclosure_node - enclosure information
+ * @list: List of enclosures
+ * @pg0: Enclosure page 0;
+ */
+struct mpi3mr_enclosure_node {
+ struct list_head list;
+ struct mpi3_enclosure_page0 pg0;
+};
+
/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
* @sas_address: World wide unique SAS address
+ * @sas_address_parent: Sas address of parent expander or host
* @dev_info: Device information bits
+ * @phy_id: Phy identifier provided in device page 0
+ * @attached_phy_id: Attached phy identifier provided in device page 0
+ * @sas_transport_attached: Is this device exposed to transport
+ * @pend_sas_rphy_add: Flag to check device is in process of add
+ * @hba_port: HBA port entry
+ * @rphy: SAS transport layer rphy object
*/
struct tgt_dev_sas_sata {
u64 sas_address;
+ u64 sas_address_parent;
u16 dev_info;
+ u8 phy_id;
+ u8 attached_phy_id;
+ u8 sas_transport_attached;
+ u8 pend_sas_rphy_add;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_rphy *rphy;
};
/**
@@ -531,12 +665,16 @@ union _form_spec_inf {
* @slot: Slot number
* @encl_handle: FW enclosure handle
* @perst_id: FW assigned Persistent ID
+ * @devpg0_flag: Device Page0 flag
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_unit_port: IO Unit port ID
+ * @non_stl: Is this device not to be attached with SAS TL
* @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
+ * @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
* @ref_count: Reference count
*/
@@ -548,12 +686,16 @@ struct mpi3mr_tgt_dev {
u16 slot;
u16 encl_handle;
u16 perst_id;
+ u16 devpg0_flag;
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_unit_port;
+ u8 non_stl;
u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
+ u64 enclosure_logical_id;
union _form_spec_inf dev_spec;
struct kref ref_count;
};
@@ -679,6 +821,21 @@ struct mpi3mr_drv_cmd {
struct mpi3mr_drv_cmd *drv_cmd);
};
+/**
+ * struct dma_memory_desc - memory descriptor structure to store
+ * virtual address, dma address and size for any generic dma
+ * memory allocations in the driver.
+ *
+ * @size: buffer size
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct dma_memory_desc {
+ u32 size;
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
/**
* struct chain_element - memory descriptor structure to store
@@ -756,6 +913,7 @@ struct scmd_priv {
* @num_op_reply_q: Number of operational reply queues
* @op_reply_qinfo: Operational reply queue info pointer
* @init_cmds: Command tracker for initialization commands
+ * @cfg_cmds: Command tracker for configuration requests
* @facts: Cached IOC facts data
* @op_reply_desc_sz: Operational reply descriptor size
* @num_reply_bufs: Number of reply buffers allocated
@@ -792,6 +950,7 @@ struct scmd_priv {
* @scan_started: Async scan started
* @scan_failed: Asycn scan failed
* @stop_drv_processing: Stop all command processing
+ * @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
@@ -854,6 +1013,17 @@ struct scmd_priv {
* @io_throttle_low: I/O size to stop throttle in 512b blocks
* @num_io_throttle_group: Maximum number of throttle groups
* @throttle_groups: Pointer to throttle group info structures
+ * @cfg_page: Default memory for configuration pages
+ * @cfg_page_dma: Configuration page DMA address
+ * @cfg_page_sz: Default configuration page memory size
+ * @sas_transport_enabled: SAS transport enabled or not
+ * @scsi_device_channel: Channel ID for SCSI devices
+ * @transport_cmds: Command tracker for SAS transport commands
+ * @sas_hba: SAS node for the controller
+ * @sas_expander_list: SAS node list of expanders
+ * @sas_node_lock: Lock to protect SAS node list
+ * @hba_port_table_list: List of HBA Ports
+ * @enclosure_list: List of Enclosure objects
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -904,6 +1074,7 @@ struct mpi3mr_ioc {
struct op_reply_qinfo *op_reply_qinfo;
struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_drv_cmd cfg_cmds;
struct mpi3mr_ioc_facts facts;
u16 op_reply_desc_sz;
@@ -948,6 +1119,7 @@ struct mpi3mr_ioc {
u8 scan_started;
u16 scan_failed;
u8 stop_drv_processing;
+ u8 device_refresh_on;
u16 max_host_ios;
spinlock_t tgtdev_lock;
@@ -1025,6 +1197,19 @@ struct mpi3mr_ioc {
u32 io_throttle_low;
u16 num_io_throttle_group;
struct mpi3mr_throttle_group_info *throttle_groups;
+
+ void *cfg_page;
+ dma_addr_t cfg_page_dma;
+ u16 cfg_page_sz;
+
+ u8 sas_transport_enabled;
+ u8 scsi_device_channel;
+ struct mpi3mr_drv_cmd transport_cmds;
+ struct mpi3mr_sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head hba_port_table_list;
+ struct list_head enclosure_list;
};
/**
@@ -1149,6 +1334,67 @@ int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
u16 event_data_size);
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
extern const struct attribute_group *mpi3mr_host_groups[];
extern const struct attribute_group *mpi3mr_dev_groups[];
+
+extern struct sas_function_template mpi3mr_transport_functions;
+extern struct scsi_transport_template *mpi3mr_transport_template;
+
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz);
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
+
+u8 mpi3mr_is_expander_device(u16 device_info);
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port);
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle);
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id);
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc);
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port);
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy);
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add);
+void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc);
+void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc);
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index 2464c400a5a4..ee6edd8322e6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -23,9 +23,13 @@
#define MPI3_DEBUG_RESET 0x00000020
#define MPI3_DEBUG_SCSI_ERROR 0x00000040
#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_CFG_ERROR 0x00000100
+#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200
#define MPI3_DEBUG_BSG_ERROR 0x00008000
#define MPI3_DEBUG_BSG_INFO 0x00010000
#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG_CFG_INFO 0x00040000
+#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000
#define MPI3_DEBUG 0x01000000
#define MPI3_DEBUG_SG 0x02000000
@@ -122,6 +126,29 @@
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
+#define dprint_cfg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+#define dprint_transport_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_transport_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0866dfd43318..0c4aabaefdcc 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -244,6 +244,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
desc = "Enclosure Device Status Change";
break;
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ desc = "Enclosure Added";
+ break;
case MPI3_EVENT_HARD_RESET_RECEIVED:
desc = "Hard Reset Received";
break;
@@ -299,6 +302,8 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
switch (host_tag) {
case MPI3MR_HOSTTAG_INITCMDS:
return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_CFG_CMDS:
+ return &mrioc->cfg_cmds;
case MPI3MR_HOSTTAG_BSG_CMDS:
return &mrioc->bsg_cmds;
case MPI3MR_HOSTTAG_BLK_TMS:
@@ -307,6 +312,8 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
return &mrioc->pel_abort_cmd;
case MPI3MR_HOSTTAG_PEL_WAIT:
return &mrioc->pel_cmds;
+ case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
+ return &mrioc->transport_cmds;
case MPI3MR_HOSTTAG_INVALID:
if (def_reply && def_reply->function ==
MPI3_FUNCTION_EVENT_NOTIFICATION)
@@ -424,6 +431,9 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
return 0;
do {
+ if (mrioc->unrecoverable)
+ break;
+
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
if (reply_dma)
@@ -509,6 +519,9 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
+ if (mrioc->unrecoverable)
+ break;
+
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
op_req_q = &mrioc->req_qinfo[req_q_idx];
@@ -530,6 +543,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+#ifndef CONFIG_PREEMPT_RT
/*
* Exit completion loop to avoid CPU lockup
* Ensure remaining completion happens from threaded ISR.
@@ -538,7 +552,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
op_reply_q->enable_irq_poll = true;
break;
}
-
+#endif
} while (1);
writel(reply_ci,
@@ -569,7 +583,8 @@ int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
mrioc = (struct mpi3mr_ioc *)shost->hostdata;
- if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
+ mrioc->unrecoverable))
return 0;
num_entries = mpi3mr_process_op_reply_q(mrioc,
@@ -607,18 +622,16 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
return IRQ_NONE;
}
+#ifndef CONFIG_PREEMPT_RT
+
static irqreturn_t mpi3mr_isr(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
- struct mpi3mr_ioc *mrioc;
- u16 midx;
int ret;
if (!intr_info)
return IRQ_NONE;
- mrioc = intr_info->mrioc;
- midx = intr_info->msix_index;
/* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata);
@@ -633,7 +646,7 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
!atomic_read(&intr_info->op_reply_q->pend_ios))
return ret;
- disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+ disable_irq_nosync(intr_info->os_irq);
return IRQ_WAKE_THREAD;
}
@@ -663,7 +676,7 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
/* Poll for pending IOs completions */
do {
- if (!mrioc->intr_enabled)
+ if (!mrioc->intr_enabled || mrioc->unrecoverable)
break;
if (!midx)
@@ -679,11 +692,13 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
(num_op_reply < mrioc->max_host_ios));
intr_info->op_reply_q->enable_irq_poll = false;
- enable_irq(pci_irq_vector(mrioc->pdev, midx));
+ enable_irq(intr_info->os_irq);
return IRQ_HANDLED;
}
+#endif
+
/**
* mpi3mr_request_irq - Request IRQ and register ISR
* @mrioc: Adapter instance reference
@@ -706,14 +721,20 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
mrioc->driver_name, mrioc->id, index);
+#ifndef CONFIG_PREEMPT_RT
retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+ NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
if (retval) {
ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
intr_info->name, pci_irq_vector(pdev, index));
return retval;
}
+ intr_info->os_irq = pci_irq_vector(pdev, index);
return retval;
}
@@ -907,6 +928,8 @@ static const struct {
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
+ { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
};
/**
@@ -1130,6 +1153,13 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
return -EPERM;
}
+ if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ ioc_err(mrioc,
+ "critical error: multipath capability is enabled at the\n"
+ "\tcontroller while sas transport support is enabled at the\n"
+ "\tdriver, please reboot the system or reload the driver\n");
+
dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
if (mrioc->facts.max_devhandle % 8)
dev_handle_bitmap_sz++;
@@ -1194,6 +1224,14 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
msleep(100);
} while (--timeout);
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present while waiting to reset\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+
ioc_state = mpi3mr_get_iocstate(mrioc);
ioc_info(mrioc,
"controller is in %s state after waiting to reset\n",
@@ -1251,6 +1289,13 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
mpi3mr_iocstate_name(ioc_state));
return 0;
}
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present at the bringup\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
msleep(100);
} while (--timeout);
@@ -1259,6 +1304,7 @@ out_failed:
ioc_err(mrioc,
"failed to bring to ready state, current state: %s\n",
mpi3mr_iocstate_name(ioc_state));
+out_device_not_present:
return retval;
}
@@ -2163,9 +2209,13 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0;
op_req_q->pi = pi;
+#ifndef CONFIG_PREEMPT_RT
if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
> MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+ atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
writel(op_req_q->pi,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
@@ -2193,6 +2243,17 @@ void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
{
u32 ioc_status, host_diagnostic, timeout;
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "controller is unrecoverable\n");
+ return;
+ }
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present\n");
+ return;
+ }
+
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
@@ -2384,9 +2445,21 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
u32 fault, host_diagnostic, ioc_status;
u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
- if (mrioc->reset_in_progress || mrioc->unrecoverable)
+ if (mrioc->reset_in_progress)
return;
+ if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
+ ioc_err(mrioc, "watchdog could not detect the controller\n");
+ mrioc->unrecoverable = 1;
+ }
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc,
+ "flush pending commands for unrecoverable controller\n");
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ return;
+ }
+
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
@@ -2426,11 +2499,12 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
mrioc->diagsave_timeout = 0;
switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
- ioc_info(mrioc,
+ ioc_warn(mrioc,
"controller requires system power cycle, marking controller as unrecoverable\n");
mrioc->unrecoverable = 1;
- return;
+ goto schedule_work;
case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
return;
case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
@@ -2853,6 +2927,10 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->bsg_cmds.reply)
goto out_failed;
+ mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->transport_cmds.reply)
+ goto out_failed;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
GFP_KERNEL);
@@ -3362,10 +3440,13 @@ out_failed:
static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd)
{
- drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
- mrioc->scan_failed = drv_cmd->ioc_status;
mrioc->scan_started = 0;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ else
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
}
/**
@@ -3447,6 +3528,7 @@ static const struct {
char *name;
} mpi3mr_capabilities[] = {
{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
};
/**
@@ -3657,6 +3739,7 @@ static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
@@ -3727,6 +3810,14 @@ retry_init:
mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
MPI3MR_HOST_IOS_KDUMP);
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ mrioc->sas_transport_enabled = 1;
+ mrioc->scsi_device_channel = 1;
+ mrioc->shost->max_channel = 1;
+ mrioc->shost->transportt = mpi3mr_transport_template;
+ }
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -3738,6 +3829,14 @@ retry_init:
mpi3mr_print_ioc_info(mrioc);
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page)
+ goto out_failed_noretry;
+
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+
retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
if (retval) {
ioc_err(mrioc,
@@ -3795,8 +3894,7 @@ retry_init:
if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
dprint_init(mrioc, "allocating memory for throttle groups\n");
sz = sizeof(struct mpi3mr_throttle_group_info);
- mrioc->throttle_groups = (struct mpi3mr_throttle_group_info *)
- kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
if (!mrioc->throttle_groups)
goto out_failed_noretry;
}
@@ -3845,8 +3943,12 @@ int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
int retval = 0;
u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 pe_timeout, ioc_status;
retry_init:
+ pe_timeout =
+ (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
+
dprint_reset(mrioc, "bringing up the controller to ready state\n");
retval = mpi3mr_bring_ioc_ready(mrioc);
if (retval) {
@@ -3936,12 +4038,50 @@ retry_init:
goto out_failed;
}
+ mrioc->device_refresh_on = 1;
+ mpi3mr_add_event_wait_for_device_refresh(mrioc);
+
ioc_info(mrioc, "sending port enable\n");
- retval = mpi3mr_issue_port_enable(mrioc, 0);
+ retval = mpi3mr_issue_port_enable(mrioc, 1);
if (retval) {
ioc_err(mrioc, "failed to issue port enable\n");
goto out_failed;
}
+ do {
+ ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
+ if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
+ break;
+ if (!pci_device_is_present(mrioc->pdev))
+ mrioc->unrecoverable = 1;
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ }
+ } while (--pe_timeout);
+
+ if (!pe_timeout) {
+ ioc_err(mrioc, "port enable timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ } else if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable completed successfully\n");
ioc_info(mrioc, "controller %s completed successfully\n",
(is_resume)?"resume":"re-initialization");
@@ -4042,6 +4182,8 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
sizeof(*mrioc->pel_cmds.reply));
memset(mrioc->pel_abort_cmd.reply, 0,
sizeof(*mrioc->pel_abort_cmd.reply));
+ memset(mrioc->transport_cmds.reply, 0,
+ sizeof(*mrioc->transport_cmds.reply));
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
memset(mrioc->dev_rmhs_cmds[i].reply, 0,
sizeof(*mrioc->dev_rmhs_cmds[i].reply));
@@ -4102,6 +4244,8 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
u16 i;
struct mpi3mr_intr_info *intr_info;
+ mpi3mr_free_enclosure_list(mrioc);
+
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)
dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
@@ -4187,6 +4331,9 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
kfree(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
+ kfree(mrioc->transport_cmds.reply);
+ mrioc->transport_cmds.reply = NULL;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
kfree(mrioc->dev_rmhs_cmds[i].reply);
mrioc->dev_rmhs_cmds[i].reply = NULL;
@@ -4355,13 +4502,17 @@ static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
*
* Return: Nothing.
*/
-static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_drv_cmd *cmdptr;
u8 i;
cmdptr = &mrioc->init_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
cmdptr = &mrioc->bsg_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
cmdptr = &mrioc->host_tm_cmds;
@@ -4383,6 +4534,8 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
cmdptr = &mrioc->pel_abort_cmd;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ cmdptr = &mrioc->transport_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
/**
@@ -4681,6 +4834,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "controller reset is triggered by %s\n",
mpi3mr_reset_rc_name(reset_reason));
+ mrioc->device_refresh_on = 0;
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
@@ -4739,6 +4893,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_flush_host_io(mrioc);
mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+
if (mrioc->prepare_for_reset) {
mrioc->prepare_for_reset = 0;
mrioc->prepare_for_reset_timeout_counter = 0;
@@ -4750,7 +4906,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mrioc->name, reset_reason);
goto out;
}
- ssleep(10);
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
out:
if (!retval) {
@@ -4762,7 +4918,8 @@ out:
mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
}
- mpi3mr_rfresh_tgtdevs(mrioc);
+ mrioc->device_refresh_on = 0;
+
mrioc->ts_update_counter = 0;
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
if (mrioc->watchdog_work_q)
@@ -4776,9 +4933,11 @@ out:
} else {
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
retval = -1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
}
mrioc->prev_reset_result = retval;
mutex_unlock(&mrioc->reset_mutex);
@@ -4786,3 +4945,836 @@ out:
((retval == 0) ? "successful" : "failed"));
return retval;
}
+
+
+/**
+ * mpi3mr_free_config_dma_memory - free memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: memory descriptor structure
+ *
+ * Check whether the size of the buffer specified by the memory
+ * descriptor is greater than the default page size if so then
+ * free the memory pointed by the descriptor.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+}
+
+/**
+ * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: Memory descriptor to hold dma memory info
+ *
+ * This function allocates new dmaable memory or provides the
+ * default config page dmaable memory based on the memory size
+ * described by the descriptor.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->size > mrioc->cfg_page_sz) {
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+ if (!mem_desc->addr)
+ return -ENOMEM;
+ } else {
+ mem_desc->addr = mrioc->cfg_page;
+ mem_desc->dma_addr = mrioc->cfg_page_dma;
+ memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+ }
+ return 0;
+}
+
+/**
+ * mpi3mr_post_cfg_req - Issue config requests and wait
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 configuration request to
+ * the firmware. This blocks for the completion of request for
+ * timeout seconds and if the request times out this function
+ * faults the controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->cfg_cmds.mutex);
+ if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending config request failed due to command in use\n");
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+ goto out;
+ }
+ mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->cfg_cmds.is_waiting = 1;
+ mrioc->cfg_cmds.callback = NULL;
+ mrioc->cfg_cmds.ioc_status = 0;
+ mrioc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
+ cfg_req->function = MPI3_FUNCTION_CONFIG;
+
+ init_completion(&mrioc->cfg_cmds.done);
+ dprint_cfg_info(mrioc, "posting config request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
+ "mpi3_cfg_req");
+ retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting config request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
+ if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ ioc_err(mrioc, "config request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_cfg_err(mrioc,
+ "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
+
+out_unlock:
+ mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_cfg_req - config page request processor
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @cfg_hdr: Configuration page header
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ * @cfg_buf: Memory pointer to copy config page or header
+ * @cfg_buf_sz: Size of the memory to get config page or header
+ *
+ * This is handler for config page read, write and config page
+ * header read operations.
+ *
+ * This function expects the cfg_req to be populated with page
+ * type, page number, action for the header read and with page
+ * address for all other operations.
+ *
+ * The cfg_hdr can be passed as null for reading required header
+ * details for read/write pages the cfg_hdr should point valid
+ * configuration page header.
+ *
+ * This allocates dmaable memory based on the size of the config
+ * buffer and set the SGE of the cfg_req.
+ *
+ * For write actions, the config page data has to be passed in
+ * the cfg_buf and size of the data has to be mentioned in the
+ * cfg_buf_sz.
+ *
+ * For read/header actions, on successful completion of the
+ * request with successful ioc_status the data will be copied
+ * into the cfg_buf limited to a minimum of actual page size and
+ * cfg_buf_sz
+ *
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req,
+ struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
+ void *cfg_buf, u32 cfg_buf_sz)
+{
+ struct dma_memory_desc mem_desc;
+ int retval = -1;
+ u8 invalid_action = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
+
+ if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(struct mpi3_config_page_header);
+ else {
+ if (!cfg_hdr) {
+ ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number);
+ goto out;
+ }
+ switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
+ case MPI3_CONFIG_PAGEATTR_READ_ONLY:
+ if (cfg_req->action
+ != MPI3_CONFIG_ACTION_READ_CURRENT)
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
+ if ((cfg_req->action ==
+ MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
+ (cfg_req->action ==
+ MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_PERSISTENT:
+ default:
+ break;
+ }
+ if (invalid_action) {
+ ioc_err(mrioc,
+ "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number, cfg_hdr->page_attribute);
+ goto out;
+ }
+ mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
+ cfg_req->page_length = cfg_hdr->page_length;
+ cfg_req->page_version = cfg_hdr->page_version;
+ }
+ if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+ goto out;
+
+ mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ mem_desc.dma_addr);
+
+ if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
+ (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer to be written\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+ if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
+ goto out;
+
+ retval = 0;
+ if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer read\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+out:
+ mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
+ return retval;
+}
+
+/**
+ * mpi3mr_cfg_get_dev_pg0 - Read current device page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @dev_pg0: Pointer to return device page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific device
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(dev_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "device page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
+ (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
+ ioc_err(mrioc, "device page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg0: Pointer to return SAS Phy page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas phy page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg1: Pointer to return SAS Phy page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page1. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas phy page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg0: Pointer to return SAS Expander page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page0. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
+ ioc_err(mrioc, "expander page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg1: Pointer to return SAS Expander page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page1. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
+ ioc_err(mrioc, "expander page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @encl_pg0: Pointer to return Enclosure page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific Enclosure
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(encl_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "enclosure page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
+ (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
+ ioc_err(mrioc, "enclosure page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page0. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page0 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page0 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page write for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not. This will modify both current
+ * and persistent page.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write current failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
+ * @mrioc: Adapter instance reference
+ * @driver_pg1: Pointer to return Driver page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the Driver page1.
+ * This routine checks ioc_status to decide whether the page
+ * read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(driver_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "driver page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
+ ioc_err(mrioc, "driver page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index bfa1165e23b6..f77ee4051b00 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -40,6 +40,8 @@ static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -422,6 +424,8 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
tgt_priv->io_throttle_enabled = 0;
tgt_priv->io_divert = 0;
tgt_priv->throttle_group = NULL;
+ if (tgtdev->host_exposed)
+ atomic_set(&tgt_priv->block_io, 1);
}
}
}
@@ -579,6 +583,39 @@ void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
+ * @mrioc: Adapter instance reference
+ *
+ * This function waits for currently running IO poll threads to
+ * exit and then flushes all host I/Os and any internal pending
+ * cmds. This is executed after controller is marked as
+ * unrecoverable.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+ int i;
+
+ if (!mrioc->unrecoverable)
+ return;
+
+ if (mrioc->op_reply_qinfo) {
+ for (i = 0; i < mrioc->num_queues; i++) {
+ while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
+ udelay(500);
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ }
+ }
+ mrioc->flush_io_count = 0;
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+}
+
+/**
* mpi3mr_alloc_tgtdev - target device allocator
*
* Allocate target device instance and initialize the reference
@@ -796,7 +833,7 @@ static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
*
* Return: None.
*/
-static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
bool device_add)
{
ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
@@ -816,7 +853,7 @@ static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
*
* Return: 0 on success, non zero on failure.
*/
-static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
struct mpi3mr_tgt_dev *tgtdev)
{
struct mpi3mr_stgt_priv_data *tgt_priv;
@@ -825,22 +862,29 @@ static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
+ atomic_set(&tgt_priv->block_io, 0);
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
}
- if (tgtdev->starget) {
- if (mrioc->current_event)
- mrioc->current_event->pending_at_sml = 1;
- scsi_remove_target(&tgtdev->starget->dev);
- tgtdev->host_exposed = 0;
- if (mrioc->current_event) {
- mrioc->current_event->pending_at_sml = 0;
- if (mrioc->current_event->discard) {
- mpi3mr_print_device_event_notice(mrioc, false);
- return;
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
+ if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc,
+ false);
+ return;
+ }
}
}
- }
+ } else
+ mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
+
ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
}
@@ -862,21 +906,25 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev;
+ if (mrioc->reset_in_progress)
+ return -1;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (!tgtdev) {
retval = -1;
goto out;
}
- if (tgtdev->is_hidden) {
+ if (tgtdev->is_hidden || tgtdev->host_exposed) {
retval = -1;
goto out;
}
- if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
tgtdev->host_exposed = 1;
if (mrioc->current_event)
mrioc->current_event->pending_at_sml = 1;
- scsi_scan_target(&mrioc->shost->shost_gendev, 0,
- tgtdev->perst_id,
+ scsi_scan_target(&mrioc->shost->shost_gendev,
+ mrioc->scsi_device_channel, tgtdev->perst_id,
SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (!tgtdev->starget)
tgtdev->host_exposed = 0;
@@ -887,7 +935,8 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
goto out;
}
}
- }
+ } else
+ mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
out:
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
@@ -1018,18 +1067,29 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
u16 flags = 0;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->io_unit_port = dev_pg0->io_unit_port;
tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
tgtdev->slot = le16_to_cpu(dev_pg0->slot);
tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+ tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
+
+ if (tgtdev->encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ tgtdev->encl_handle);
+ if (enclosure_dev)
+ tgtdev->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+
+ flags = tgtdev->devpg0_flag;
- flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
if (is_added == true)
@@ -1045,6 +1105,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
scsi_tgt_priv_data->io_throttle_enabled =
tgtdev->io_throttle_enabled;
+ if (is_added == true)
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
}
switch (dev_pg0->access_status) {
@@ -1068,12 +1130,25 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
tgtdev->dev_spec.sas_sata_inf.sas_address =
le64_to_cpu(sasinf->sas_address);
+ tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
+ tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
+ sasinf->attached_phy_identifier;
if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
tgtdev->is_hidden = 1;
else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
tgtdev->is_hidden = 1;
+
+ if (((tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
+ && (tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
+ (tgtdev->parent_handle == 0xFFFF))
+ tgtdev->non_stl = 1;
+ if (tgtdev->dev_spec.sas_sata_inf.hba_port)
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
+ dev_pg0->io_unit_port;
break;
}
case MPI3_DEVICE_DEVFORM_PCIE:
@@ -1106,6 +1181,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
if (!mrioc->shost)
break;
prot_mask = scsi_host_get_prot(mrioc->shost);
@@ -1129,6 +1205,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
tgtdev->dev_spec.vd_inf.tg_high =
le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
@@ -1258,6 +1335,135 @@ out:
}
/**
+ * mpi3mr_free_enclosure_list - release enclosures
+ * @mrioc: Adapter instance reference
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &mrioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
+ * mpi3mr_enclosure_find_by_handle - enclosure search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the enclosure
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ *
+ * Return: Enclosure object reference or NULL
+ */
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
+
+ list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
+ * @mrioc: Adapter instance reference
+ * @encl_pg0: Enclosure page 0.
+ * @is_added: Added event or not
+ *
+ * Return nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
+{
+ char *reason_str = NULL;
+
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ if (is_added)
+ reason_str = "enclosure added";
+ else
+ reason_str = "enclosure dev status changed";
+
+ ioc_info(mrioc,
+ "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
+ reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
+ (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
+ ioc_info(mrioc,
+ "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
+ le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
+ le16_to_cpu(encl_pg0->flags),
+ ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the Enclosure device status or
+ * Enclosure add events if logging is enabled and add or remove
+ * the enclosure from the controller's internal list of
+ * enclosures.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ struct mpi3_enclosure_page0 *encl_pg0;
+ u16 encl_handle;
+ u8 added, present;
+
+ encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
+ added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
+ mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
+
+
+ encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
+ present = ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
+
+ if (encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ encl_handle);
+ if (!enclosure_dev && present) {
+ enclosure_dev =
+ kzalloc(sizeof(struct mpi3mr_enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev)
+ return;
+ list_add_tail(&enclosure_dev->list,
+ &mrioc->enclosure_list);
+ }
+ if (enclosure_dev) {
+ if (!present) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ } else
+ memcpy(&enclosure_dev->pg0, encl_pg0,
+ sizeof(enclosure_dev->pg0));
+
+ }
+}
+
+/**
* mpi3mr_sastopochg_evt_debug - SASTopoChange details
* @mrioc: Adapter instance reference
* @event_data: SAS topology change list event data
@@ -1296,8 +1502,9 @@ mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "%s :sas topology change: (%s)\n",
__func__, status_str);
ioc_info(mrioc,
- "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
__func__, le16_to_cpu(event_data->expander_dev_handle),
+ event_data->io_unit_port,
le16_to_cpu(event_data->enclosure_handle),
event_data->start_phy_num, event_data->num_entries);
for (i = 0; i < event_data->num_entries; i++) {
@@ -1355,9 +1562,30 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
int i;
u16 handle;
u8 reason_code;
+ u64 exp_sas_address = 0, parent_sas_address = 0;
+ struct mpi3mr_hba_port *hba_port = NULL;
struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_sas_node *sas_expander = NULL;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate, parent_phy_number;
mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ if (mrioc->sas_transport_enabled) {
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc,
+ event_data->io_unit_port);
+ if (le16_to_cpu(event_data->expander_dev_handle)) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
+ le16_to_cpu(event_data->expander_dev_handle));
+ if (sas_expander) {
+ exp_sas_address = sas_expander->sas_address;
+ hba_port = sas_expander->hba_port;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ parent_sas_address = exp_sas_address;
+ } else
+ parent_sas_address = mrioc->sas_hba.sas_address;
+ }
for (i = 0; i < event_data->num_entries; i++) {
if (fwevt->discard)
@@ -1379,12 +1607,37 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ {
+ if (!mrioc->sas_transport_enabled || tgtdev->non_stl
+ || tgtdev->is_hidden)
+ break;
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ if (link_rate == prev_link_rate)
+ break;
+ if (!parent_sas_address)
+ break;
+ parent_phy_number = event_data->start_phy_num + i;
+ mpi3mr_update_links(mrioc, parent_sas_address, handle,
+ parent_phy_number, link_rate, hba_port);
+ break;
+ }
default:
break;
}
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
}
+
+ if (mrioc->sas_transport_enabled && (event_data->exp_status ==
+ MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
+ if (sas_expander)
+ mpi3mr_expander_remove(mrioc, exp_sas_address,
+ hba_port);
+ }
}
/**
@@ -1604,28 +1857,54 @@ static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
struct mpi3mr_fwevt *fwevt)
{
+ struct mpi3_device_page0 *dev_pg0 = NULL;
+ u16 perst_id, handle, dev_info;
+ struct mpi3_device0_sas_sata_format *sasinf = NULL;
+
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
mrioc->current_event = fwevt;
if (mrioc->stop_drv_processing)
goto out;
+ if (mrioc->unrecoverable) {
+ dprint_event_bh(mrioc,
+ "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
+ fwevt->event_id);
+ goto out;
+ }
+
if (!fwevt->process_evt)
goto evt_ack;
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
{
- struct mpi3_device_page0 *dev_pg0 =
- (struct mpi3_device_page0 *)fwevt->event_data;
- mpi3mr_report_tgtdev_to_host(mrioc,
- le16_to_cpu(dev_pg0->persistent_id));
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ handle = le16_to_cpu(dev_pg0->dev_handle);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ else if (mrioc->sas_transport_enabled &&
+ (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ sasinf = &dev_pg0->device_specific.sas_sata_format;
+ dev_info = le16_to_cpu(sasinf->device_info);
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_is_expander_device(dev_info))
+ mpi3mr_expander_add(mrioc, handle);
+ }
break;
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
{
- mpi3mr_devinfochg_evt_bh(mrioc,
- (struct mpi3_device_page0 *)fwevt->event_data);
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
break;
}
case MPI3_EVENT_DEVICE_STATUS_CHANGE:
@@ -1633,6 +1912,13 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
+ break;
+ }
+
case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
{
mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
@@ -1662,6 +1948,22 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
}
break;
}
+ case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
+ {
+ while (mrioc->device_refresh_on)
+ msleep(500);
+
+ dprint_event_bh(mrioc,
+ "scan for non responding and newly added devices after soft reset started\n");
+ if (mrioc->sas_transport_enabled) {
+ mpi3mr_refresh_sas_ports(mrioc);
+ mpi3mr_refresh_expanders(mrioc);
+ }
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ ioc_info(mrioc,
+ "scan for non responding and newly added devices after soft reset completed\n");
+ break;
+ }
default:
break;
}
@@ -1716,6 +2018,9 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
u16 perst_id = 0;
perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
+ return retval;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
@@ -2429,6 +2734,35 @@ static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
+ * @mrioc: Adapter instance reference
+ *
+ * Add driver specific event to make sure that the driver won't process the
+ * events until all the devices are refreshed during soft reset.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ fwevt = mpi3mr_alloc_fwevt(0);
+ if (!fwevt) {
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for event(0x%02x)\n",
+ MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
+ return;
+ }
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = 0;
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_os_handle_events - Firmware event handler
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -2494,6 +2828,8 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
case MPI3_EVENT_LOG_DATA:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
{
process_evt_bh = 1;
break;
@@ -2508,7 +2844,6 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
break;
}
- case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
case MPI3_EVENT_SAS_DISCOVERY:
case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
@@ -3464,7 +3799,7 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
*
* Return: return zero.
*/
-static int mpi3mr_map_queues(struct Scsi_Host *shost)
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
int i, qoff, offset;
@@ -3500,9 +3835,6 @@ static int mpi3mr_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
offset += map->nr_queues;
}
-
- return 0;
-
}
/**
@@ -3852,9 +4184,10 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
struct scsi_target *starget;
+ struct sas_rphy *rphy = NULL;
if (!sdev->hostdata)
return;
@@ -3867,7 +4200,14 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
scsi_tgt_priv_data->num_luns--;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
tgt_dev->starget = NULL;
if (tgt_dev)
@@ -3932,16 +4272,23 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
struct scsi_target *starget;
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
mrioc = shost_priv(shost);
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
if (!tgt_dev)
return -ENXIO;
@@ -3989,11 +4336,12 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
unsigned long flags;
struct scsi_target *starget;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
@@ -4001,7 +4349,14 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
scsi_tgt_priv_data = starget->hostdata;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
if (tgt_dev) {
if (tgt_dev->starget == NULL)
@@ -4044,6 +4399,8 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
struct mpi3mr_tgt_dev *tgt_dev;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
+ bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -4052,8 +4409,25 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden) {
+
+ if (starget->channel == mrioc->scsi_device_channel) {
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden)
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ } else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ }
+
+ if (update_stgt_priv_data) {
scsi_tgt_priv_data->starget = starget;
scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
@@ -4067,8 +4441,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
scsi_tgt_priv_data->throttle_group =
tgt_dev->dev_spec.vd_inf.tg;
- } else
- retval = -ENXIO;
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -4257,6 +4630,16 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
dev_handle = stgt_priv_data->dev_handle;
if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4269,16 +4652,6 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- if (atomic_read(&stgt_priv_data->block_io)) {
- if (mrioc->stop_drv_processing) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- retval = SCSI_MLQUEUE_DEVICE_BUSY;
- goto out;
- }
-
if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
is_pcie_dev = 1;
if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
@@ -4556,16 +4929,23 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&mrioc->tgtdev_lock);
spin_lock_init(&mrioc->watchdog_lock);
spin_lock_init(&mrioc->chain_buf_lock);
+ spin_lock_init(&mrioc->sas_node_lock);
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
+ INIT_LIST_HEAD(&mrioc->sas_expander_list);
+ INIT_LIST_HEAD(&mrioc->hba_port_table_list);
+ INIT_LIST_HEAD(&mrioc->enclosure_list);
mutex_init(&mrioc->reset_mutex);
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
+ MPI3MR_HOSTTAG_TRANSPORT_CMDS);
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
@@ -4700,6 +5080,11 @@ static void mpi3mr_remove(struct pci_dev *pdev)
while (mrioc->reset_in_progress || mrioc->is_driver_loading)
ssleep(1);
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+
mpi3mr_bsg_exit(mrioc);
mrioc->stop_drv_processing = 1;
mpi3mr_cleanup_fwevt_list(mrioc);
@@ -4709,7 +5094,11 @@ static void mpi3mr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- scsi_remove_host(shost);
+
+ if (mrioc->sas_transport_enabled)
+ sas_remove_host(shost);
+ else
+ scsi_remove_host(shost);
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -4766,22 +5155,21 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
mpi3mr_cleanup_resources(mrioc);
}
-#ifdef CONFIG_PM
/**
* mpi3mr_suspend - PCI power management suspend callback
- * @pdev: PCI device instance
- * @state: New power state
+ * @dev: Device struct
*
* Change the power state to the given value and cleanup the IOC
* by issuing MUR and shutdown notification
*
* Return: 0 always.
*/
-static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+mpi3mr_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
- pci_power_t device_state;
if (!shost)
return 0;
@@ -4795,27 +5183,26 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_stop_watchdog(mrioc);
mpi3mr_cleanup_ioc(mrioc);
- device_state = pci_choose_state(pdev, state);
- ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- pdev, pci_name(pdev), device_state);
- pci_save_state(pdev);
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
mpi3mr_cleanup_resources(mrioc);
- pci_set_power_state(pdev, device_state);
return 0;
}
/**
* mpi3mr_resume - PCI power management resume callback
- * @pdev: PCI device instance
+ * @dev: Device struct
*
* Restore the power state to D0 and reinitialize the controller
* and resume I/O operations to the target devices
*
* Return: 0 on success, non-zero on failure
*/
-static int mpi3mr_resume(struct pci_dev *pdev)
+static int __maybe_unused
+mpi3mr_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
pci_power_t device_state = pdev->current_state;
@@ -4828,9 +5215,6 @@ static int mpi3mr_resume(struct pci_dev *pdev)
ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
mrioc->pdev = pdev;
mrioc->cpu_count = num_online_cpus();
r = mpi3mr_setup_resources(mrioc);
@@ -4841,18 +5225,21 @@ static int mpi3mr_resume(struct pci_dev *pdev)
}
mrioc->stop_drv_processing = 0;
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
mpi3mr_memset_buffers(mrioc);
r = mpi3mr_reinit_ioc(mrioc, 1);
if (r) {
ioc_err(mrioc, "resuming controller failed[%d]\n", r);
return r;
}
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
scsi_unblock_requests(shost);
+ mrioc->device_refresh_on = 0;
mpi3mr_start_watchdog(mrioc);
return 0;
}
-#endif
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
@@ -4863,16 +5250,15 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
+
static struct pci_driver mpi3mr_pci_driver = {
.name = MPI3MR_DRIVER_NAME,
.id_table = mpi3mr_pci_id_table,
.probe = mpi3mr_probe,
.remove = mpi3mr_remove,
.shutdown = mpi3mr_shutdown,
-#ifdef CONFIG_PM
- .suspend = mpi3mr_suspend,
- .resume = mpi3mr_resume,
-#endif
+ .driver.pm = &mpi3mr_pm_ops,
};
static ssize_t event_counter_show(struct device_driver *dd, char *buf)
@@ -4888,18 +5274,33 @@ static int __init mpi3mr_init(void)
pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
MPI3MR_DRIVER_VERSION);
+ mpi3mr_transport_template =
+ sas_attach_transport(&mpi3mr_transport_functions);
+ if (!mpi3mr_transport_template) {
+ pr_err("%s failed to load due to sas transport attach failure\n",
+ MPI3MR_DRIVER_NAME);
+ return -ENODEV;
+ }
+
ret_val = pci_register_driver(&mpi3mr_pci_driver);
if (ret_val) {
pr_err("%s failed to load due to pci register driver failure\n",
MPI3MR_DRIVER_NAME);
- return ret_val;
+ goto err_pci_reg_fail;
}
ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
&driver_attr_event_counter);
if (ret_val)
- pci_unregister_driver(&mpi3mr_pci_driver);
+ goto err_event_counter;
+
+ return ret_val;
+
+err_event_counter:
+ pci_unregister_driver(&mpi3mr_pci_driver);
+err_pci_reg_fail:
+ sas_release_transport(mpi3mr_transport_template);
return ret_val;
}
@@ -4916,6 +5317,7 @@ static void __exit mpi3mr_exit(void)
driver_remove_file(&mpi3mr_pci_driver.driver,
&driver_attr_event_counter);
pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
}
module_init(mpi3mr_init);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
new file mode 100644
index 000000000000..3fc897336b5e
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -0,0 +1,3291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
+
+/**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+ * @request: Properly populated MPI3 request
+ * @request_sz: Size of the MPI3 request
+ * @reply: Pointer to return MPI3 reply
+ * @reply_sz: Size of the MPI3 reply buffer
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 requests from the SAS
+ * transport layer that uses transport command infrastructure.
+ * This blocks for the completion of request for timeout seconds
+ * and if the request times out this function faults the
+ * controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request,
+ u16 request_sz, void *reply, u16 reply_sz, int timeout,
+ u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->transport_cmds.mutex);
+ if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending transport request failed due to command in use\n");
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+ goto out;
+ }
+ mrioc->transport_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->transport_cmds.is_waiting = 1;
+ mrioc->transport_cmds.callback = NULL;
+ mrioc->transport_cmds.ioc_status = 0;
+ mrioc->transport_cmds.ioc_loginfo = 0;
+
+ init_completion(&mrioc->transport_cmds.done);
+ dprint_cfg_info(mrioc, "posting transport request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)
+ dprint_dump(request, request_sz, "transport_req");
+ retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1);
+ if (retval) {
+ ioc_err(mrioc, "posting transport request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->transport_cmds.done,
+ (timeout * HZ));
+ if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT);
+ ioc_err(mrioc, "transport request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->transport_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_transport_err(mrioc,
+ "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->transport_cmds.ioc_loginfo);
+
+ if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID))
+ memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz);
+
+out_unlock:
+ mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mpi3mr_report_manufacture - obtain SMP report_manufacture
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the expander device
+ * @edev: SAS transport layer sas_expander_device object
+ * @port_id: ID of the HBA port
+ *
+ * Fills in the sas_expander_device with manufacturing info.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc = 0;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u8 *tmp;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev,
+ data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ manufacture_reply = data_out + data_out_sz;
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) port_id;
+ mpi_request.sas_address = cpu_to_le64(sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n",
+ (unsigned long long)sas_address, port_id);
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "report manufacturer SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "report manufacturer - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct rep_manu_reply)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ return rc;
+}
+
+/**
+ * __mpi3mr_expander_find_by_handle - expander search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the expander
+ *
+ * Context: The caller should acquire sas_node_lock
+ *
+ * This searches for expander device based on handle, then
+ * returns the sas_node object.
+ *
+ * Return: Expander sas_node object reference or NULL
+ */
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpi3mr_is_expander_device - if device is an expander
+ * @device_info: Bitfield providing information about the device
+ *
+ * Return: 1 if the device is expander device, else 0.
+ */
+u8 mpi3mr_is_expander_device(u16 device_info)
+{
+ if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) ==
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * mpi3mr_get_sas_address - retrieve sas_address for handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @sas_address: Address to hold sas address
+ *
+ * This function issues device page0 read for a given device
+ * handle and gets the SAS address and return it back
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle,
+ u64 *sas_address)
+{
+ struct mpi3_device_page0 dev_pg0;
+ u16 ioc_status;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ *sas_address = 0;
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (le16_to_cpu(dev_pg0.flags) &
+ MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE)
+ *sas_address = mrioc->sas_hba.sas_address;
+ else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) {
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ *sas_address = le64_to_cpu(sasinf->sas_address);
+ } else {
+ ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n",
+ __func__, dev_pg0.device_form);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Context: This function will acquire tgtdev_lock and will
+ * release before returning the mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ if (!hba_port)
+ goto out;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+out:
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_device_by_sas_address - remove the device
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device using sas address and hba
+ * port pointer then removes it from the OS.
+ *
+ * Return: None
+ */
+static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ u8 was_on_tgtdev_list = 0;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc,
+ sas_address, hba_port);
+ if (tgtdev) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ was_on_tgtdev_list = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (was_on_tgtdev_list) {
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @rphy: SAS transport layer rphy object
+ *
+ * This searches for target device from sas address and rphy
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_expander_find_by_sas_address - sas expander search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander
+ * @hba_port: HBA port entry
+ *
+ * Return: A valid SAS expander node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander, *r = NULL;
+
+ if (!hba_port)
+ goto out;
+
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if ((sas_expander->sas_address != sas_address) ||
+ (sas_expander->hba_port != hba_port))
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * __mpi3mr_sas_node_find_by_sas_address - sas node search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander or sas host
+ * @hba_port: HBA port entry
+ * Context: Caller should acquire mrioc->sas_node_lock.
+ *
+ * If the SAS address indicates the device is direct attached to
+ * the controller (controller's SAS address) then the SAS node
+ * associated with the controller is returned back else the SAS
+ * address and hba port are used to identify the exact expander
+ * and the associated sas_node object is returned. If there is
+ * no match NULL is returned.
+ *
+ * Return: A valid SAS node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+
+ if (mrioc->sas_hba.sas_address == sas_address)
+ return &mrioc->sas_hba;
+ return mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+}
+
+/**
+ * mpi3mr_parent_present - Is parent present for a phy
+ * @mrioc: Adapter instance reference
+ * @phy: SAS transport layer phy object
+ *
+ * Return: 0 if parent is present else non-zero
+ */
+static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy)
+{
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ if (__mpi3mr_sas_node_find_by_sas_address(mrioc,
+ phy->identify.sas_address,
+ hba_port) == NULL) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return 0;
+}
+
+/**
+ * mpi3mr_convert_phy_link_rate -
+ * @link_rate: link rate as defined in the MPI header
+ *
+ * Convert link_rate from mpi format into sas_transport layer
+ * form.
+ *
+ * Return: A valid SAS transport layer defined link rate
+ */
+static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI3_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ default:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * mpi3mr_delete_sas_phy - Remove a single phy from port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mr_sas_phy->phy_id);
+
+ list_del(&mr_sas_phy->port_siblings);
+ mr_sas_port->num_phys--;
+ mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+ if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * mpi3mr_add_sas_phy - Adding a single phy to a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mr_sas_phy->phy_id);
+
+ list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+ if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * mpi3mr_add_phy_to_an_existing_port - add phy to existing port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object *
+ * @sas_address: SAS address of device/expander were phy needs
+ * to be added to
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_port *mr_sas_port;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ if (!hba_port)
+ return;
+
+ list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy == mr_sas_phy)
+ return;
+ }
+ mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
+ return;
+ }
+}
+
+/**
+ * mpi3mr_delete_sas_port - helper function to removing a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port;
+ enum sas_device_type device_type =
+ mr_sas_port->remote_identify.device_type;
+
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ if (device_type == SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc, sas_address,
+ hba_port);
+
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc, sas_address, hba_port);
+}
+
+/**
+ * mpi3mr_del_phy_from_an_existing_port - del phy from a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy != mr_sas_phy)
+ continue;
+ if ((mr_sas_port->num_phys == 1) &&
+ !mrioc->reset_in_progress)
+ mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+ else
+ mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+ mr_sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * mpi3mr_sas_port_sanity_check - sanity check while adding port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @sas_address: SAS address of device/expander
+ * @hba_port: HBA port entry
+ *
+ * Verifies whether the Phys attached to a device with the given
+ * SAS address already belongs to an existing sas port if so
+ * will remove those phys from the sas port
+ *
+ * Return: None.
+ */
+static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ sas_address) || (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ if (mr_sas_node->phy[i].phy_belongs_to_port == 1)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ mr_sas_node, &mr_sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpi3mr_set_identify - set identify for phys and end devices
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @identify: SAS transport layer's identify info
+ *
+ * Populates sas identify info for a specific device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct sas_identify *identify)
+{
+
+ struct mpi3_device_page0 device_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ u16 device_info;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
+ sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ sasinf = &device_pg0.device_specific.sas_sata_format;
+ device_info = le16_to_cpu(sasinf->device_info);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sasinf->sas_address);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sasinf->phy_num;
+
+ /* device_type */
+ switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) {
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for SATA INIT so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+
+ /* target_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for STP Target so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET)
+ identify->target_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_host_phy - report sas_host phy to SAS transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @phy_pg0: SAS phy page 0
+ * @parent_dev: Prent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_expander_phy - report expander phy to transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @expander_pg1: SAS Expander page 1
+ * @parent_dev: Parent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy,
+ struct mpi3_sas_expander_page1 expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_hba_port - alloc hba port object
+ * @mrioc: Adapter instance reference
+ * @port_id: Port number
+ *
+ * Alloc memory for hba port object.
+ */
+static struct mpi3mr_hba_port *
+mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
+{
+ struct mpi3mr_hba_port *hba_port;
+
+ hba_port = kzalloc(sizeof(struct mpi3mr_hba_port),
+ GFP_KERNEL);
+ if (!hba_port)
+ return NULL;
+ hba_port->port_id = port_id;
+ ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
+ return hba_port;
+}
+
+/**
+ * mpi3mr_get_hba_port_by_id - find hba port by id
+ * @mrioc: Adapter instance reference
+ * @port_id - Port ID to search
+ *
+ * Return: mpi3mr_hba_port reference for the matched port
+ */
+
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id)
+{
+ struct mpi3mr_hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY)
+ continue;
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpi3mr_update_links - refreshing SAS phy link changes
+ * @mrioc: Adapter instance reference
+ * @sas_address_parent: SAS address of parent expander or host
+ * @handle: Firmware device handle of attached device
+ * @phy_number: Phy number
+ * @link_rate: New link rate
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port)
+{
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct mpi3mr_sas_phy *mr_sas_phy;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ mr_sas_phy = &mr_sas_node->phy[phy_number];
+ mr_sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) {
+ mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_phy->remote_identify);
+ mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node,
+ mr_sas_phy, mr_sas_phy->remote_identify.sas_address,
+ hba_port);
+ } else
+ memset(&mr_sas_phy->remote_identify, 0, sizeof(struct
+ sas_identify));
+
+ if (mr_sas_phy->phy)
+ mr_sas_phy->phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(link_rate);
+
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_phy->phy->dev,
+ "refresh: parent sas_address(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ (unsigned long long)sas_address_parent,
+ link_rate, phy_number, handle, (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+}
+
+/**
+ * mpi3mr_sas_host_refresh - refreshing sas host object contents
+ * @mrioc: Adapter instance reference
+ *
+ * This function refreshes the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for each device addition or device info
+ * change events
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u8 link_rate;
+ u16 sz, port_id, attached_handle;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+
+ dprint_transport_info(mrioc,
+ "updating handles for sas_host(0x%016llx)\n",
+ (unsigned long long)mrioc->sas_hba.sas_address);
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ link_rate =
+ sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4;
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address,
+ attached_handle, i, link_rate,
+ mrioc->sas_hba.phy[i].hba_port);
+ }
+ out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_host_add - create sas host object
+ * @mrioc: Adapter instance reference
+ *
+ * This function creates the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for first device addition or device info
+ * change event.
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u16 sz, num_phys = 1, port_id, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_enclosure_page0 encl_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ num_phys = sas_io_unit_pg0->num_phys;
+ kfree(sas_io_unit_pg0);
+
+ mrioc->sas_hba.host_node = 1;
+ INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
+ mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
+ mrioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!mrioc->sas_hba.phy)
+ return;
+
+ mrioc->sas_hba.num_phys = num_phys;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ mrioc->sas_hba.phy[i].phy_id = i;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i],
+ phy_pg0, mrioc->sas_hba.parent_dev);
+ }
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__,
+ __func__);
+ goto out;
+ }
+ mrioc->sas_hba.enclosure_handle =
+ le16_to_cpu(dev_pg0.enclosure_handle);
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ mrioc->sas_hba.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ ioc_info(mrioc,
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ mrioc->sas_hba.handle,
+ (unsigned long long) mrioc->sas_hba.sas_address,
+ mrioc->sas_hba.num_phys);
+
+ if (mrioc->sas_hba.enclosure_handle) {
+ if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+ &encl_pg0, sizeof(dev_pg0),
+ MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.enclosure_handle)) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+ mrioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(encl_pg0.enclosure_logical_id);
+ }
+
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the attached device
+ * @sas_address_parent: sas address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * This function creates a new sas port object for the given end
+ * device matching sas address and hba_port and adds it to the
+ * sas_node's sas_port_list and expose the attached sas device
+ * to the SAS transport layer through sas_rphy_add.
+ *
+ * Returns a valid mpi3mr_sas_port reference or NULL.
+ */
+static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy, *next;
+ struct mpi3mr_sas_port *mr_sas_port;
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct sas_rphy *rphy;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ int i;
+ struct sas_port *port;
+
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL);
+ if (!mr_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&mr_sas_port->port_list);
+ INIT_LIST_HEAD(&mr_sas_port->phy_list);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!mr_sas_node) {
+ ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n",
+ __func__, (unsigned long long)sas_address_parent);
+ goto out_fail;
+ }
+
+ if ((mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_port->remote_identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->hba_port = hba_port;
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << i);
+ }
+
+ if (!mr_sas_port->num_phys) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+
+ if (!tgtdev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1;
+ }
+
+ if (!mr_sas_node->parent_dev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(mr_sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&port->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ sas_port_add_phy(port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+ mr_sas_phy->hba_port = hba_port;
+ }
+
+ mr_sas_port->port = port;
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ rphy = sas_end_device_alloc(port);
+ tgtdev->dev_spec.sas_sata_inf.rphy = rphy;
+ } else {
+ rphy = sas_expander_alloc(port,
+ mr_sas_port->remote_identify.device_type);
+ }
+ rphy->identify = mr_sas_port->remote_identify;
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ }
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0;
+ tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ dev_info(&rphy->dev,
+ "%s: added: handle(0x%04x), sas_address(0x%016llx)\n",
+ __func__, handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address);
+
+ mr_sas_port->rphy = rphy;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, true);
+ }
+
+ /* fill in report manufacture */
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_report_manufacture(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+
+ return mr_sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list,
+ port_siblings)
+ list_del(&mr_sas_phy->port_siblings);
+ kfree(mr_sas_port);
+ return NULL;
+}
+
+/**
+ * mpi3mr_sas_port_remove - remove port from the list
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of attached device
+ * @sas_address_parent: SAS address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * Removing object and freeing associated memory from the
+ * sas_port_list.
+ *
+ * Return: None
+ */
+static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+ unsigned long flags;
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_node *mr_sas_node;
+ u8 found = 0;
+ struct mpi3mr_sas_phy *mr_sas_phy, *next_phy;
+ struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ found = 1;
+ list_del(&mr_sas_port->port_list);
+ goto out;
+ }
+
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ if (mr_sas_node->host_node) {
+ list_for_each_entry_safe(srch_port, hba_port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (srch_port != hba_port)
+ continue;
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ srch_port, srch_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ break;
+ }
+ }
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if (mr_sas_node->phy[i].remote_identify.sas_address ==
+ sas_address)
+ memset(&mr_sas_node->phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ list_for_each_entry_safe(mr_sas_phy, next_phy,
+ &mr_sas_port->phy_list, port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ mr_sas_phy->phy_belongs_to_port = 0;
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete_phy(mr_sas_port->port,
+ mr_sas_phy->phy);
+ list_del(&mr_sas_phy->port_siblings);
+ }
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete(mr_sas_port->port);
+ ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, false);
+ }
+
+ kfree(mr_sas_port);
+}
+
+/**
+ * struct host_port - host port details
+ * @sas_address: SAS Address of the attached device
+ * @phy_mask: phy mask of host port
+ * @handle: Device Handle of attached device
+ * @iounit_port_id: port ID
+ * @used: host port is already matched with sas port from sas_port_list
+ * @lowest_phy: lowest phy ID of host port
+ */
+struct host_port {
+ u64 sas_address;
+ u32 phy_mask;
+ u16 handle;
+ u8 iounit_port_id;
+ u8 used;
+ u8 lowest_phy;
+};
+
+/**
+ * mpi3mr_update_mr_sas_port - update sas port objects during reset
+ * @mrioc: Adapter instance reference
+ * @h_port: host_port object
+ * @mr_sas_port: sas_port objects which needs to be updated
+ *
+ * Update the port ID of sas port object. Also add the phys if new phys got
+ * added to current sas port and remove the phys if some phys are moved
+ * out of the current sas port.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy;
+ u32 phy_mask_xor;
+ u64 phys_to_be_added, phys_to_be_removed;
+ int i;
+
+ h_port->used = 1;
+ mr_sas_port->marked_responding = 1;
+
+ dev_info(&mr_sas_port->port->dev,
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
+ h_port->iounit_port_id, h_port->phy_mask);
+
+ mr_sas_port->hba_port->port_id = h_port->iounit_port_id;
+ mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY;
+
+ /* Get the newly added phys bit map & removed phys bit map */
+ phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask;
+ phys_to_be_added = h_port->phy_mask & phy_mask_xor;
+ phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor;
+
+ /*
+ * Register these new phys to current mr_sas_port's port.
+ * if these phys are previously registered with another port
+ * then delete these phys from that port first.
+ */
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ mpi3mr_add_phy_to_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ /* Delete the phys which are not part of current mr_sas_port's port. */
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ }
+}
+
+/**
+ * mpi3mr_refresh_sas_ports - update host's sas ports during reset
+ * @mrioc: Adapter instance reference
+ *
+ * Update the host's sas ports during reset by checking whether
+ * sas ports are still intact or not. Add/remove phys if any hba
+ * phys are (moved in)/(moved out) of sas port. Also update
+ * io_unit_port if it got changed during reset.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
+{
+ struct host_port h_port[32];
+ int i, j, found, host_port_count = 0, port_idx;
+ u16 sz, attached_handle, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ struct mpi3mr_sas_port *mr_sas_port;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* Create a new expander port table */
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (!attached_handle)
+ continue;
+ found = 0;
+ for (j = 0; j < host_port_count; j++) {
+ if (h_port[j].handle == attached_handle) {
+ h_port[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ attached_handle))) {
+ dprint_reset(mrioc,
+ "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ attached_handle, __FILE__, __LINE__, __func__);
+ continue;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ ioc_status, attached_handle,
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+
+ port_idx = host_port_count;
+ h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address);
+ h_port[port_idx].handle = attached_handle;
+ h_port[port_idx].phy_mask = (1 << i);
+ h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ h_port[port_idx].lowest_phy = sasinf->phy_num;
+ h_port[port_idx].used = 0;
+ host_port_count++;
+ }
+
+ if (!host_port_count)
+ goto out;
+
+ if (mrioc->logging_level & MPI3_DEBUG_RESET) {
+ ioc_info(mrioc, "Host port details before reset\n");
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ mr_sas_port->hba_port->port_id,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
+ }
+ mr_sas_port = NULL;
+ ioc_info(mrioc, "Host port details after reset\n");
+ for (i = 0; i < host_port_count; i++) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ h_port[i].iounit_port_id, h_port[i].sas_address,
+ h_port[i].phy_mask, h_port[i].lowest_phy);
+ }
+ }
+
+ /* mark all host sas port entries as dirty */
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ mr_sas_port->marked_responding = 0;
+ mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY;
+ }
+
+ /* First check for matching lowest phy */
+ for (i = 0; i < host_port_count; i++) {
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if lowest phy is got enabled or disabled during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].phy_mask & mr_sas_port->phy_mask) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if expander cable is removed & connected to another HBA port during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_refresh_expanders - Refresh expander device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing expander devices during reset and remove from the upper layers
+ * or expose any newly detected expander device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ u16 ioc_status, handle;
+ u64 sas_address;
+ int i;
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ sas_expander->non_responding = 1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ sas_expander = NULL;
+
+ handle = 0xffff;
+
+ /* Search for responding expander devices and add them if they are newly got added */
+ while (true) {
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(struct mpi3_sas_expander_page0),
+ MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ dprint_reset(mrioc,
+ "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n",
+ ioc_status, handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ handle = le16_to_cpu(expander_pg0.dev_handle);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port);
+
+ if (!hba_port) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!sas_expander) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ sas_expander->non_responding = 0;
+ if (sas_expander->handle == handle)
+ continue;
+
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ }
+
+ /*
+ * Delete non responding expander devices and the corresponding
+ * hba_port if the non responding expander device's parent device
+ * is a host node.
+ */
+ sas_expander = NULL;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ if (sas_expander->non_responding) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_node_add - insert an expander to the list.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander sas node
+ * Context: This function will acquire sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return: None.
+ */
+static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &mrioc->sas_expander_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_add - Create expander object
+ * @mrioc: Adapter instance reference
+ * @handle: Expander firmware device handle
+ *
+ * This function creating expander object, stored in
+ * sas_expander_list and expose it to the SAS transport
+ * layer.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_enclosure_node *enclosure_dev;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ u16 ioc_status, parent_handle, temp_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ u8 port_id, link_rate;
+ struct mpi3mr_sas_port *mr_sas_port = NULL;
+ struct mpi3mr_hba_port *hba_port;
+ u32 phynum_handle;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle);
+ if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent)
+ != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ port_id = expander_pg0.io_unit_port;
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (sas_address_parent != mrioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = mpi3mr_expander_add(mrioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ } else {
+ /*
+ * When there is a parent expander present, update it's
+ * phys where child expander is connected with the link
+ * speed, attached dev handle and sas address.
+ */
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle =
+ (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ parent_handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc,
+ &ioc_status, &expander_pg1,
+ sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ temp_handle = le16_to_cpu(
+ expander_pg1.attached_dev_handle);
+ if (temp_handle != handle)
+ continue;
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ mpi3mr_update_links(mrioc, sas_address_parent,
+ handle, i, link_rate, hba_port);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
+ GFP_KERNEL);
+ if (!sas_expander)
+ return -1;
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.num_phys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+ sas_expander->hba_port = hba_port;
+
+ ioc_info(mrioc,
+ "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys) {
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent,
+ sas_expander->hba_port);
+ if (!mr_sas_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mr_sas_port->rphy->dev;
+ sas_expander->rphy = mr_sas_port->rphy;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].hba_port = hba_port;
+
+ if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i],
+ expander_pg1, sas_expander->parent_dev))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ enclosure_dev =
+ mpi3mr_enclosure_find_by_handle(mrioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
+ sas_expander->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+ }
+
+ mpi3mr_expander_node_add(mrioc, sas_expander);
+ return 0;
+
+out_fail:
+
+ if (mr_sas_port)
+ mpi3mr_sas_port_remove(mrioc,
+ sas_expander->sas_address,
+ sas_address_parent, sas_expander->hba_port);
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpi3mr_expander_node_remove - recursive removal of expander.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander device object
+ *
+ * Removes expander object and freeing associated memory from
+ * the sas_expander_list and removes the same from SAS TL, if
+ * one of the attached device is an expander then it recursively
+ * removes the expander device too.
+ *
+ * Return nothing.
+ */
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ unsigned long flags;
+ u8 port_id;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mr_sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (mrioc->reset_in_progress)
+ return;
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ else if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ port_id = sas_expander->hba_port->port_id;
+ mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent, sas_expander->hba_port);
+
+ ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address, port_id);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * mpi3mr_expander_remove - Remove expander object
+ * @mrioc: Adapter instance reference
+ * @sas_address: Remove expander sas_address
+ * @hba_port: HBA port reference
+ *
+ * This function remove expander object, stored in
+ * mrioc->sas_expander_list and removes it from the SAS TL by
+ * calling mpi3mr_expander_node_remove().
+ *
+ * Return: None
+ */
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ unsigned long flags;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (sas_expander)
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+
+}
+
+/**
+ * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function identifies whether the target device is
+ * attached directly or through expander and issues sas phy
+ * page0 or expander phy page1 and gets the link rate, if there
+ * is any failure in reading the pages then this returns link
+ * rate of 1.5.
+ *
+ * Return: logical link rate.
+ */
+static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u32 phynum_handle;
+ u16 ioc_status;
+
+ phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) {
+ phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT)
+ | tgtdev->parent_handle);
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+out:
+ return link_rate;
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function exposes the target device after
+ * preparing host_phy, setting up link rate etc.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ int retval = 0;
+ u8 link_rate, parent_phy_number;
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+ u8 port_id;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return -1;
+
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle,
+ &sas_address_parent) != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent;
+
+ parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ port_id = tgtdev->io_unit_port;
+
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port;
+
+ link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+
+ mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
+ parent_phy_number, link_rate, hba_port);
+
+ tgtdev->host_exposed = 1;
+ if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ sas_address_parent, hba_port)) {
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ } else if ((!tgtdev->starget)) {
+ if (!mrioc->is_driver_loading)
+ mpi3mr_sas_port_remove(mrioc, sas_address,
+ sas_address_parent, hba_port);
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function removes the target device
+ *
+ * Return: None.
+ */
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return;
+
+ hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port;
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent;
+ mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ hba_port);
+ tgtdev->host_exposed = 0;
+}
+
+/**
+ * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy
+ * @phy: SAS transport layer phy object
+ *
+ * Return: Port number for valid ID else 0xFFFF
+ */
+static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ if (hba_port)
+ port_id = hba_port->port_id;
+
+ return port_id;
+}
+
+/**
+ * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy
+ *
+ * @mrioc: Adapter instance reference
+ * @rphy: SAS transport layer remote phy object
+ *
+ * Retrieves HBA port number in which the device pointed by the
+ * rphy object is attached with.
+ *
+ * Return: Valid port number on success else OxFFFF.
+ */
+static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list,
+ list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->hba_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ port_id =
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ }
+ return port_id;
+}
+
+static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+
+ return shost_priv(shost);
+}
+
+static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+
+/**
+ * mpi3mr_get_expander_phy_error_log - return expander counters:
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ *
+ * Return: 0 for success, non-zero for failure.
+ *
+ */
+static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma, data_in_dma;
+ u32 data_out_sz, data_in_sz, sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_error_log_request);
+ data_in_sz = sizeof(struct phy_error_log_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_error_log_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy error log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log - function_result(%d)\n",
+ phy_error_log_reply->function_result);
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_linkerrors - return phy error counters
+ * @phy: The SAS transport layer phy object
+ *
+ * This function retrieves the phy error log information of the
+ * HBA or expander for which the phy belongs to
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_phy_page1 phy_pg1;
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_get_expander_phy_error_log(mrioc, phy);
+
+ memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1));
+ /* get hba phy error logs */
+ if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1,
+ sizeof(struct mpi3_sas_phy_page1),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.running_disparity_error_count);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.loss_dword_synch_count);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.phy_reset_problem_count);
+ return 0;
+}
+
+/**
+ * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID
+ * @rphy: The SAS transport layer remote phy object
+ * @identifier: Enclosure identifier to be returned
+ *
+ * Returns the enclosure id for the device pointed by the remote
+ * phy object.
+ *
+ * Return: 0 on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ *identifier =
+ tgtdev->enclosure_logical_id;
+ rc = 0;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_bay_identifier - Get bay ID
+ * @rphy: The SAS transport layer remote phy object
+ *
+ * Returns the slot id for the device pointed by the remote phy
+ * object.
+ *
+ * Return: Valid slot ID on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ rc = tgtdev->slot;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * mpi3mr_expander_phy_control - expander phy control
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ * @phy_operation: The phy operation to be executed
+ *
+ * Issues SMP passthru phy control request to execute a specific
+ * phy operation for a given expander device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u16 sz;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_control_request);
+ data_in_sz = sizeof(struct phy_control_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_control_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy control SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+ dprint_transport_info(mrioc,
+ "phy control - function_result(%d)\n",
+ phy_control_reply->function_result);
+ rc = 0;
+ }
+ out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_reset - Reset a given phy
+ * @phy: The SAS transport layer phy object
+ * @hard_reset: Flag to indicate the type of reset
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_iounit_control_request mpi_request;
+ struct mpi3_iounit_control_reply mpi_reply;
+ u16 request_sz = sizeof(struct mpi3_iounit_control_request);
+ u16 reply_sz = sizeof(struct mpi3_iounit_control_reply);
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, request_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL;
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] =
+ (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET :
+ MPI3_CTRL_ACTION_LINK_RESET);
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] =
+ phy->number;
+
+ dprint_transport_info(mrioc,
+ "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ hard_reset);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "phy reset request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+out:
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_enable - enable/disable phys
+ * @phy: The SAS transport layer phy object
+ * @enable: flag to enable/disable, enable phy when true
+ *
+ * This function enables/disables a given by executing required
+ * configuration page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ u16 sz;
+ int rc = 0;
+ int i, discovery_active;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when discovery is active */
+ for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) {
+ if (sas_io_unit_pg0->phy_data[i].port_flags &
+ MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) {
+ ioc_err(mrioc,
+ "discovery is active on port = %d, phy = %d\n"
+ "\tunable to enable/disable phys, try again later!\n",
+ sas_io_unit_pg0->phy_data[i].io_unit_port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_io_unit_pg1);
+ kfree(sas_io_unit_pg0);
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_speed - set phy min/max speed
+ * @phy: The SAS transport later phy object
+ * @rates: Rates defined as in sas_phy_linkrates
+ *
+ * This function sets the link rates given in the rates
+ * argument to the given phy by executing required configuration
+ * page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u16 sz, ioc_status;
+ int rc = 0;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate =
+ (rates->minimum_linkrate + (rates->maximum_linkrate << 4));
+
+ if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS)) {
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate &
+ MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK)
+ >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ }
+
+out:
+ kfree(sas_io_unit_pg1);
+ return rc;
+}
+
+/**
+ * mpi3mr_map_smp_buffer - map BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address holder
+ * @dma_len: Mapped DMA buffer length.
+ * @p: Virtual address holder
+ *
+ * This function maps the DMAable buffer
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+ /* Check if the request is split across multiple segments */
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address to be unmapped
+ * @p: Virtual address
+ *
+ * This function unmaps the DMAable buffer
+ */
+static void
+mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t dma_addr, void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mpi3mr_transport_smp_handler - handler for smp passthru
+ * @job: BSG job reference
+ * @shost: SCSI host object reference
+ * @rphy: SAS transport rphy object pointing the expander
+ *
+ * This is used primarily by smp utils for sending the SMP
+ * commands to the expanders attached to the controller
+ */
+static void
+mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ int rc;
+ void *psge;
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t dma_len_in;
+ size_t dma_len_out;
+ unsigned int reslen = 0;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ &dma_addr_out, &dma_len_out, &addr_out);
+ if (rc)
+ goto out;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ &dma_addr_in, &dma_len_in, &addr_in);
+ if (rc)
+ goto unmap_out;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy);
+ mpi_request.sas_address = ((rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(mrioc->sas_hba.sas_address));
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in);
+
+ dprint_transport_info(mrioc, "sending SMP request\n");
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto unmap_in;
+
+ dprint_transport_info(mrioc,
+ "SMP request completed with ioc_status(0x%04x)\n", ioc_status);
+
+ dprint_transport_info(mrioc,
+ "SMP request - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ memcpy(job->reply, &mpi_reply, reply_sz);
+ job->reply_len = reply_sz;
+ reslen = le16_to_cpu(mpi_reply.response_data_length);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+
+ rc = 0;
+unmap_in:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ dma_addr_in, addr_in);
+unmap_out:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ dma_addr_out, addr_out);
+out:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template mpi3mr_transport_functions = {
+ .get_linkerrors = mpi3mr_transport_get_linkerrors,
+ .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier,
+ .get_bay_identifier = mpi3mr_transport_get_bay_identifier,
+ .phy_reset = mpi3mr_transport_phy_reset,
+ .phy_enable = mpi3mr_transport_phy_enable,
+ .set_phy_speed = mpi3mr_transport_phy_speed,
+ .smp_handler = mpi3mr_transport_smp_handler,
+};
+
+struct scsi_transport_template *mpi3mr_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index d00431f553e1..4d0be5ab98c1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -534,6 +534,7 @@ typedef struct _MPI2_CONFIG_REPLY {
****************************************************************************/
#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C)
/*MPI v2.0 SAS products */
#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 565339a0811d..8b22df8c1792 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2990,19 +2990,26 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
+ u64 coherent_dma_mask, dma_mask;
- if (ioc->is_mcpu_endpoint ||
- sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
+ if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
+ dma_get_required_mask(&pdev->dev) <= 32) {
ioc->dma_mask = 32;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
ioc->dma_mask = 63;
- else
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
+ } else {
ioc->dma_mask = 64;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ }
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
+ if (ioc->use_32bit_dma)
+ coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, dma_mask) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
return -ENODEV;
if (ioc->dma_mask > 32) {
@@ -4313,7 +4320,7 @@ _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4335,7 +4342,7 @@ _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4358,7 +4365,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
descriptor.MSIxIndex = msix_task;
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -4379,7 +4386,7 @@ _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
descriptor.SMID = cpu_to_le16(smid);
- writel(*request, &ioc->chip->AtomicRequestDescriptorPost);
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
}
/**
@@ -5425,6 +5432,151 @@ out:
}
/**
+ * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
+ *
+ * @ioc : per adapter object
+ * @n : ptr to the ATTO nvram structure
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
+ struct ATTO_SAS_NVRAM *n)
+{
+ int r = -EINVAL;
+ union ATTO_SAS_ADDRESS *s1;
+ u32 len;
+ u8 *pb;
+ u8 ckSum;
+
+ /* validate nvram checksum */
+ pb = (u8 *) n;
+ ckSum = ATTO_SASNVR_CKSUM_SEED;
+ len = sizeof(struct ATTO_SAS_NVRAM);
+
+ while (len--)
+ ckSum = ckSum + pb[len];
+
+ if (ckSum) {
+ ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
+ return r;
+ }
+
+ s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
+
+ if (n->Signature[0] != 'E'
+ || n->Signature[1] != 'S'
+ || n->Signature[2] != 'A'
+ || n->Signature[3] != 'S')
+ ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
+ else if (n->Version > ATTO_SASNVR_VERSION)
+ ioc_info(ioc, "Invalid ATTO NVRAM version");
+ else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
+ || s1->b[0] != 0x50
+ || s1->b[1] != 0x01
+ || s1->b[2] != 0x08
+ || (s1->b[3] & 0xF0) != 0x60
+ || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
+ ioc_err(ioc, "Invalid ATTO SAS address\n");
+ } else
+ r = 0;
+ return r;
+}
+
+/**
+ * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
+ *
+ * @ioc : per adapter object
+ * @*sas_addr : return sas address
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
+{
+ Mpi2ManufacturingPage1_t mfg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ struct ATTO_SAS_NVRAM *nvram;
+ int r;
+ __be64 addr;
+
+ r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
+ if (r) {
+ ioc_err(ioc, "Failed to read manufacturing page 1\n");
+ return r;
+ }
+
+ /* validate nvram */
+ nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
+ r = mpt3sas_atto_validate_nvram(ioc, nvram);
+ if (r)
+ return r;
+
+ addr = *((__be64 *) nvram->SasAddr);
+ sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
+ return r;
+}
+
+/**
+ * mpt3sas_atto_init - perform initializaion for ATTO branded
+ * adapter.
+ * @ioc : per adapter object
+ *5
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
+{
+ int sz = 0;
+ Mpi2BiosPage4_t *bios_pg4 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ int ix;
+ union ATTO_SAS_ADDRESS sas_addr;
+ union ATTO_SAS_ADDRESS temp;
+ union ATTO_SAS_ADDRESS bias;
+
+ r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
+ if (r)
+ return r;
+
+ /* get header first to get size */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
+ return r;
+ }
+
+ sz = mpi_reply.Header.PageLength * sizeof(u32);
+ bios_pg4 = kzalloc(sz, GFP_KERNEL);
+ if (!bios_pg4) {
+ ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
+ return -ENOMEM;
+ }
+
+ /* read bios page 4 */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4\n");
+ goto out;
+ }
+
+ /* Update bios page 4 with the ATTO WWID */
+ bias.q = sas_addr.q;
+ bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
+
+ for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
+ temp.q = sas_addr.q;
+ temp.b[7] += ix;
+ bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
+ bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
+ }
+ r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+
+out:
+ kfree(bios_pg4);
+ return r;
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -5447,6 +5599,13 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
}
+
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ rc = mpt3sas_atto_init(ioc);
+ if (rc)
+ return rc;
+ }
+
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
@@ -5496,12 +5655,21 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
rc = _base_assign_fw_reported_qd(ioc);
if (rc)
return rc;
- rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- if (rc)
- return rc;
- rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- if (rc)
- return rc;
+
+ /*
+ * ATTO doesn't use bios page 2 and 3 for bios settings.
+ */
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO)
+ ioc->bios_pg3.BiosVersion = 0;
+ else {
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ }
+
rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
if (rc)
return rc;
@@ -6895,7 +7063,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* send message 32-bits at a time */
for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
- writel(request[i], &ioc->chip->Doorbell);
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
if ((_base_wait_for_doorbell_ack(ioc, 5)))
failed = 1;
}
@@ -6914,16 +7082,16 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
/* read the first two 16-bits, it gives the total length of the reply */
- reply[0] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
__LINE__);
return -EFAULT;
}
- reply[1] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
for (i = 2; i < default_reply->MsgLength * 2; i++) {
@@ -6935,8 +7103,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
if (i >= reply_bytes/2) /* overflow case */
ioc->base_readl(&ioc->chip->Doorbell);
else
- reply[i] = ioc->base_readl(&ioc->chip->Doorbell)
- & MPI2_DOORBELL_DATA_MASK;
+ reply[i] = le16_to_cpu(
+ ioc->base_readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e584cf0ffc23..05364aa15ecd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "42.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 42
+#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 43
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1652,6 +1652,32 @@ struct mpt3sas_debugfs_buffer {
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
+/*
+ * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored
+ * in Manufacturing page 1 used to get
+ * ATTO SasAddr.
+ */
+struct ATTO_SAS_NVRAM {
+ u8 Signature[4];
+ u8 Version;
+#define ATTO_SASNVR_VERSION 0
+
+ u8 Checksum;
+#define ATTO_SASNVR_CKSUM_SEED 0x5A
+ u8 Pad[10];
+ u8 SasAddr[8];
+#define ATTO_SAS_ADDR_ALIGN 64
+ u8 Reserved[232];
+};
+
+#define ATTO_SAS_ADDR_DEVNAME_BIAS 63
+
+union ATTO_SAS_ADDRESS {
+ U8 b[8];
+ U16 w[4];
+ U32 d[2];
+ U64 q;
+};
/* base shared API */
extern struct list_head mpt3sas_ioc_list;
@@ -1828,6 +1854,9 @@ int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
u8 *num_phys);
int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
+
int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
u16 sz);
@@ -1846,6 +1875,12 @@ int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage2_t *config_page);
int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
+int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage0_t *config_page);
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index a8dd14c91efd..d114ef381c44 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -541,6 +541,42 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -757,11 +793,99 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
+
out:
return r;
}
/**
+ * mpt3sas_config_set_bios_pg4 - write out bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg4 - read bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ /*
+ * The sizeof the page is variable. Allow for just the
+ * size to be returned
+ */
+ if (config_page && sz_config_pg) {
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ }
+
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 84c87c2c3e7e..0d8b1e942ded 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -948,6 +948,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
break;
}
case MPI2_FUNCTION_FW_DOWNLOAD:
+ {
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
+ ret = -EPERM;
+ break;
+ }
+ fallthrough;
+ }
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
@@ -1686,6 +1694,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
request_data = ioc->diag_buffer[buffer_type];
@@ -1787,6 +1796,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
@@ -2163,6 +2173,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
@@ -2417,6 +2428,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index def37a7e5980..8e24ebcebfe5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
@@ -5156,6 +5156,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* invalid device handle */
handle = sas_target_priv_data->handle;
+
+ /*
+ * Avoid error handling escallation when device is disconnected
+ */
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ return 0;
+ }
+ }
+
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scsi_done(scmd);
@@ -11872,7 +11885,7 @@ out:
* scsih_map_queues - map reply queues with request queues
* @shost: SCSI host pointer
*/
-static int scsih_map_queues(struct Scsi_Host *shost)
+static void scsih_map_queues(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata;
@@ -11882,7 +11895,7 @@ static int scsih_map_queues(struct Scsi_Host *shost)
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
map = &shost->tag_set.map[i];
@@ -11910,7 +11923,6 @@ static int scsih_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
- return 0;
}
/* shost template for SAS 2.0 HBA devices */
@@ -11975,7 +11987,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
- .cmd_per_lun = 7,
+ .cmd_per_lun = 128,
.shost_groups = mpt3sas_host_groups,
.sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
@@ -12733,6 +12745,12 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
/*
+ * ATTO Branded ExpressSAS H12xx GT
+ */
+ { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
* Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
*/
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 91d78d0a38fe..628b08ba6770 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3612,6 +3612,10 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
return -1;
}
+
+ if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT)
+ atomic_dec(&pm8001_dev->running_req);
+
ts = &t->task_status;
if (status != 0)
pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a0028e130a7e..2ff2fac1e403 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -81,7 +81,7 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
-static int pm8001_map_queues(struct Scsi_Host *shost)
+static void pm8001_map_queues(struct Scsi_Host *shost)
{
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index c5e3f380a01c..b08f52673889 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -612,7 +612,7 @@ struct fw_control_info {
operations.*/
u32 reserved;/* padding required for 64 bit
alignment */
- u8 buffer[1];/* Start of buffer */
+ u8 buffer[];/* Start of buffer */
};
struct fw_control_ex {
struct fw_control_info *fw_control;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3d6b137314f3..e045c6e25090 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1921,6 +1921,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
@@ -3686,11 +3707,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5db9bf69dcff..cd75b179410d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2519,19 +2519,23 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
+ if (IS_QLA27XX(ha))
+ regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
+
if (IS_QLA28XX(ha)) {
qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+ regions.nvme_params = active_regions.aux.nvme_params;
}
ql_dbg(ql_dbg_user, vha, 0x70e1,
- "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
__func__, vha->host_no, regions.global_image,
regions.board_config, regions.vpd_nvram,
- regions.npiv_config_0_1, regions.npiv_config_2_3);
+ regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index bb64b9c5a74b..d38dab0a07e8 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -314,7 +314,8 @@ struct qla_active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
- uint8_t reserved[32];
+ uint8_t nvme_params;
+ uint8_t reserved[31];
} __packed;
#include "qla_edif_bsg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 7cf1f78cbaee..d7e8454304ce 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2455,7 +2455,7 @@ qla83xx_fw_dump_failed_0:
/****************************************************************************/
/* Write the debug message prefix into @pbuf. */
-static void ql_dbg_prefix(char *pbuf, int pbuf_size,
+static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
const scsi_qla_host_t *vha, uint msg_id)
{
if (vha) {
@@ -2464,6 +2464,9 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
dev_name(&(pdev->dev)), msg_id, vha->host_no);
+ } else if (pdev) {
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&pdev->dev), msg_id);
} else {
/* <module-name> [<dev-name>]-<msg-id>: : */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
@@ -2491,20 +2494,20 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
struct va_format vaf;
char pbuf[64];
- if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+ ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
+
+ if (!ql_mask_match(level))
return;
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
+
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
-
- if (!ql_mask_match(level))
- trace_ql_dbg_log(pbuf, &vaf);
- else
- pr_warn("%s%pV", pbuf, &vaf);
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2533,6 +2536,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (pdev == NULL)
return;
+
+ ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2541,7 +2547,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
+ id + ql_dbg_offset);
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2570,7 +2578,10 @@ ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
+ ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
va_start(va, fmt);
@@ -2621,7 +2632,10 @@ ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id);
+ ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
va_start(va, fmt);
@@ -2716,7 +2730,11 @@ ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id);
+ ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id);
va_start(va, fmt);
@@ -2762,6 +2780,8 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
struct va_format vaf;
char pbuf[128];
+ ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2770,8 +2790,10 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL,
- id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id + ql_dbg_offset);
+
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index feeb1666227f..70482b55d240 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -385,3 +385,46 @@ ql_mask_match(uint level)
return level && ((level & ql2xextended_error_logging) == level);
}
+
+static inline int
+ql_mask_match_ext(uint level, int *log_tunable)
+{
+ if (*log_tunable == 1)
+ *log_tunable = QL_DBG_DEFAULT1_MASK;
+
+ return (level & *log_tunable) == level;
+}
+
+/* Assumes local variable pbuf and pbuf_ready present. */
+#define ql_ktrace(dbg_msg, level, pbuf, pdev, vha, id, fmt) do { \
+ struct va_format _vaf; \
+ va_list _va; \
+ u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \
+ \
+ pbuf[0] = 0; \
+ if (!trace_ql_dbg_log_enabled()) \
+ break; \
+ \
+ if (dbg_msg && !ql_mask_match_ext(level, \
+ &ql2xextended_error_logging_ktrace)) \
+ break; \
+ \
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, vha, id + dbg_off); \
+ \
+ va_start(_va, fmt); \
+ _vaf.fmt = fmt; \
+ _vaf.va = &_va; \
+ \
+ trace_ql_dbg_log(pbuf, &_vaf); \
+ \
+ va_end(_va); \
+} while (0)
+
+#define QLA_ENABLE_KERNEL_TRACING
+
+#ifdef QLA_ENABLE_KERNEL_TRACING
+#define QLA_TRACE_ENABLE(_tr) \
+ trace_array_set_clr_event(_tr, "qla", NULL, true)
+#else /* QLA_ENABLE_KERNEL_TRACING */
+#define QLA_TRACE_ENABLE(_tr)
+#endif /* QLA_ENABLE_KERNEL_TRACING */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3ec6a200942e..802eec6407d9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,11 @@
#include <uapi/scsi/fc/fc_els.h>
+#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *dfs_##_debugfs_file_name
+#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *qla_dfs_##_debugfs_file_name
+
/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
typedef struct {
uint8_t domain;
@@ -4768,6 +4773,7 @@ struct active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
+ uint8_t nvme_params;
} aux;
};
@@ -5052,6 +5058,7 @@ struct qla27xx_image_status {
#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
+#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4
#define SET_VP_IDX 1
#define SET_AL_PA 2
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 85bd0e468d43..777808af5634 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -489,6 +489,99 @@ qla_dfs_naqp_show(struct seq_file *s, void *unused)
return 0;
}
+/*
+ * Helper macros for setting up debugfs entries.
+ * _name: The name of the debugfs entry
+ * _ctx_struct: The context that was passed when creating the debugfs file
+ *
+ * QLA_DFS_SETUP_RD could be used when there is only a show function.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ *
+ * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ * - write function take the name qla_dfs_<sysfs-name>_write
+ *
+ * To have a new debugfs entry, do:
+ * 1. Create a "struct dentry *" in the appropriate structure in the format
+ * dfs_<sysfs-name>
+ * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
+ * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
+ * or QLA_DFS_ROOT_CREATE_FILE
+ * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
+ * or QLA_DFS_ROOT_REMOVE_FILE
+ *
+ * Example for creating "TEST" sysfs file:
+ * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
+ * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
+ * 3. In qla2x00_dfs_setup():
+ * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
+ * 4. In qla2x00_dfs_remove():
+ * QLA_DFS_REMOVE_FILE(ha, TEST);
+ */
+#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = qla_dfs_##_name##_write, \
+};
+
+#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
+ do { \
+ if (!qla_dfs_##_name) \
+ qla_dfs_##_name = debugfs_create_file(#_name, \
+ _perm, qla2x00_dfs_root, _ctx, \
+ &qla_dfs_##_name##_ops); \
+ } while (0)
+
+#define QLA_DFS_ROOT_REMOVE_FILE(_name) \
+ do { \
+ if (qla_dfs_##_name) { \
+ debugfs_remove(qla_dfs_##_name); \
+ qla_dfs_##_name = NULL; \
+ } \
+ } while (0)
+
+#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
+ do { \
+ (_struct)->dfs_##_name = debugfs_create_file(#_name, \
+ _perm, _parent, _ctx, \
+ &qla_dfs_##_name##_ops) \
+ } while (0)
+
+#define QLA_DFS_REMOVE_FILE(_struct, _name) \
+ do { \
+ if ((_struct)->dfs_##_name) { \
+ debugfs_remove((_struct)->dfs_##_name); \
+ (_struct)->dfs_##_name = NULL; \
+ } \
+ } while (0)
+
static int
qla_dfs_naqp_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 400a8b6f3982..00ccc41cef14 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1551,7 +1551,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
sa_frame.port_id.b24);
rval = -EINVAL;
- SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
goto done;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 361015b5763e..f307beed9d29 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1675,6 +1675,7 @@ struct qla_flt_location {
#define FLT_REG_VPD_SEC_27XX_1 0x52
#define FLT_REG_VPD_SEC_27XX_2 0xD8
#define FLT_REG_VPD_SEC_27XX_3 0xDA
+#define FLT_REG_NVME_PARAMS_27XX 0x21
/* 28xx */
#define FLT_REG_AUX_IMG_PRI_28XX 0x125
@@ -1691,6 +1692,8 @@ struct qla_flt_location {
#define FLT_REG_MPI_SEC_28XX 0xF0
#define FLT_REG_PEP_PRI_28XX 0xD1
#define FLT_REG_PEP_SEC_28XX 0xF1
+#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E
+#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179
struct qla_flt_region {
__le16 code;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5dd2932382ee..e3256e721be1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -70,8 +70,6 @@ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
-extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -163,6 +161,7 @@ extern int ql2xrdpenable;
extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
+extern int ql2xextended_error_logging_ktrace;
extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
@@ -193,8 +192,6 @@ extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
extern u32 ql2xnvme_queues;
-extern int ql2xrspq_follow_inptr;
-extern int ql2xrspq_follow_inptr_legacy;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -279,7 +276,6 @@ extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
-extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
@@ -616,7 +612,6 @@ void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **
/*
* Global Function Prototypes in qla_sup.c source file.
*/
-extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
@@ -788,12 +783,6 @@ extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
-extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
@@ -878,8 +867,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
-extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7fe0e52c11d..e12db95de688 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7933,6 +7933,9 @@ qla28xx_component_status(
active_regions->aux.npiv_config_2_3 =
qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+
+ active_regions->aux.nvme_params =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
}
static int
@@ -8041,11 +8044,12 @@ check_valid_image:
}
ql_dbg(ql_dbg_init, vha, 0x018f,
- "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
active_regions->aux.board_config,
active_regions->aux.vpd_nvram,
active_regions->aux.npiv_config_0_1,
- active_regions->aux.npiv_config_2_3);
+ active_regions->aux.npiv_config_2_3,
+ active_regions->aux.nvme_params);
}
void
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 76e79f350a22..e19fde304e5c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3764,7 +3764,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
u16 rsp_in = 0, cur_ring_index;
- int follow_inptr, is_shadow_hba;
+ int is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3774,25 +3774,18 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
-#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
+#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
do { \
- if (_update) { \
- _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
- } \
} while (0)
is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
- follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
- ql2xrspq_follow_inptr_legacy;
- __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
- while ((likely(follow_inptr &&
- rsp->ring_index != rsp_in &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
- (!follow_inptr &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
+ while (rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
cur_ring_index = rsp->ring_index;
@@ -3906,8 +3899,7 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
- __update_rsp_in(follow_inptr, is_shadow_hba,
- rsp, rsp_in);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 7450c3458be7..02fdeb0d31ec 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -684,12 +684,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
struct blk_mq_queue_map *map)
{
struct scsi_qla_host *vha = lport->private;
- int rc;
- rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
- if (rc)
- ql_log(ql_log_warn, vha, 0x21de,
- "pci map queue failed 0x%x", rc);
+ blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 0bd0fd1042df..2c85f3cce726 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -15,6 +15,8 @@
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
+#include <linux/trace_events.h>
+#include <linux/trace.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -35,6 +37,8 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+static struct trace_array *qla_trc_array;
+
int ql2xfulldump_on_mpifail;
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
@@ -117,6 +121,11 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
+int ql2xextended_error_logging_ktrace = 1;
+module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
+ "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
+
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd,
@@ -333,24 +342,14 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
-u32 ql2xdelay_before_pci_error_handling = 5;
+static u32 ql2xdelay_before_pci_error_handling = 5;
module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
"Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
-int ql2xrspq_follow_inptr = 1;
-module_param(ql2xrspq_follow_inptr, int, 0644);
-MODULE_PARM_DESC(ql2xrspq_follow_inptr,
- "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
-
-int ql2xrspq_follow_inptr_legacy = 1;
-module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
-MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
- "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
-
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
@@ -2849,6 +2848,27 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
+static void
+qla_trace_init(void)
+{
+ qla_trc_array = trace_array_get_by_name("qla2xxx");
+ if (!qla_trc_array) {
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
+ return;
+ }
+
+ QLA_TRACE_ENABLE(qla_trc_array);
+}
+
+static void
+qla_trace_uninit(void)
+{
+ if (!qla_trc_array)
+ return;
+ trace_array_put(qla_trc_array);
+}
+
/*
* PCI driver interface
*/
@@ -3530,7 +3550,7 @@ skip_dpc:
qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
- ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ ql_log(ql_log_info, base_vha, 0x0122,
"skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -7994,17 +8014,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
-static int qla2xxx_map_queues(struct Scsi_Host *shost)
+static void qla2xxx_map_queues(struct Scsi_Host *shost)
{
- int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
- rc = blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
- return rc;
+ blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
@@ -8191,6 +8209,8 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(sw_info_t) != 32);
BUILD_BUG_ON(sizeof(target_id_t) != 2);
+ qla_trace_init();
+
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
@@ -8269,6 +8289,8 @@ qlt_exit:
destroy_cache:
kmem_cache_destroy(srb_cachep);
+
+ qla_trace_uninit();
return ret;
}
@@ -8287,6 +8309,7 @@ qla2x00_module_exit(void)
fc_release_transport(qla2xxx_transport_template);
qlt_exit();
kmem_cache_destroy(srb_cachep);
+ qla_trace_uninit();
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 62666df1a59e..bb754a950802 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1557,11 +1557,11 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- while (!list_empty(&tgt->sess_works_list)) {
+ do {
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- flush_scheduled_work();
+ flush_work(&tgt->sess_work);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- }
+ } while (!list_empty(&tgt->sess_works_list));
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
@@ -2151,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (!abort_cmd)
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
+ }
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {
@@ -6334,69 +6336,6 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void qlt_tmr_work(struct qla_tgt *tgt,
- struct qla_tgt_sess_work_param *prm)
-{
- struct atio_from_isp *a = &prm->tm_iocb2;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- unsigned long flags;
- be_id_t s_id;
- int rc;
- u64 unpacked_lun;
- int fn;
- void *iocb;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term2;
-
- s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has got an extra creation ref */
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (!sess)
- goto out_term2;
- } else {
- if (sess->deleted) {
- goto out_term2;
- }
-
- if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
- "%s: kref_get fail %8phC\n",
- __func__, sess->port_name);
- goto out_term2;
- }
- }
-
- iocb = a;
- fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun =
- scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
-
- rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ha->tgt.tgt_ops->put_sess(sess);
-
- if (rc != 0)
- goto out_term;
- return;
-
-out_term2:
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-out_term:
- qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
-}
-
static void qlt_sess_work_fn(struct work_struct *work)
{
struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
@@ -6423,9 +6362,6 @@ static void qlt_sess_work_fn(struct work_struct *work)
case QLA_TGT_SESS_WORK_ABORT:
qlt_abort_work(tgt, prm);
break;
- case QLA_TGT_SESS_WORK_TM:
- qlt_tmr_work(tgt, prm);
- break;
default:
BUG_ON(1);
break;
@@ -6512,7 +6448,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->del_sess_list);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index de3942b8efc4..7df86578214f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -813,9 +813,6 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock */
- struct list_head del_sess_list;
-
spinlock_t sess_work_lock;
struct list_head sess_works_list;
struct work_struct sess_work;
@@ -945,7 +942,6 @@ struct qla_tgt_sess_work_param {
struct list_head sess_works_list_entry;
#define QLA_TGT_SESS_WORK_ABORT 1
-#define QLA_TGT_SESS_WORK_TM 2
int type;
union {
@@ -1079,8 +1075,6 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
-extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
- struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index f3257d46b6d2..03f3e2cd62b5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.800-k"
+#define QLA2XXX_VERSION "10.02.07.900-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 800
+#define QLA_DRIVER_BETA_VER 900
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 57f2f4135a06..8c961ff03fcd 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -909,7 +909,8 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
sg_count = dma_map_sg(&qpti->op->dev, sg,
scsi_sg_count(Cmnd),
Cmnd->sc_data_direction);
-
+ if (!sg_count)
+ return -1;
ds = cmd->dataseg;
cmd->segment_cnt = sg_count;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 086ec5b5862d..c59eac7a32f2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,13 +586,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- /*
- * Decreasing the module reference count before the device reference
- * count is safe since scsi_remove_host() only returns after all
- * devices have been removed.
- */
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b8a76b89f85a..697fc57bc711 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7474,12 +7474,12 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
-static int sdebug_map_queues(struct Scsi_Host *shost)
+static void sdebug_map_queues(struct Scsi_Host *shost)
{
int i, qoff;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -7501,9 +7501,6 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
-
- return 0;
-
}
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 448748e3fba5..6995c8979230 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -334,6 +334,7 @@ enum blk_eh_timer_return scsi_timeout(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ atomic_inc(&scmd->device->iotmo_cnt);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
@@ -514,6 +515,11 @@ static void scsi_report_sense(struct scsi_device *sdev,
}
}
+static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status)
+{
+ cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
@@ -644,7 +650,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
- set_host_byte(scmd, DID_ALLOC_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC);
return SUCCESS;
}
fallthrough;
@@ -652,14 +658,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- set_host_byte(scmd, DID_MEDIUM_ERROR);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
@@ -668,7 +674,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
fallthrough;
case ILLEGAL_REQUEST:
@@ -678,7 +684,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
}
return SUCCESS;
@@ -1983,7 +1989,7 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
- set_host_byte(scmd, DID_NEXUS_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT);
return SUCCESS; /* causes immediate i/o error */
}
return FAILED;
@@ -2004,9 +2010,11 @@ maybe_retry:
}
}
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+ blk_status_t status)
{
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 729e309e6034..2d20da55fb64 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -449,25 +449,9 @@ static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
if (ret < 0)
goto out_put_request;
- ret = 0;
- if (hdr->iovec_count && hdr->dxfer_len) {
- struct iov_iter i;
- struct iovec *iov = NULL;
-
- ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
- hdr->iovec_count, 0, &iov, &i);
- if (ret < 0)
- goto out_put_request;
-
- /* SG_IO howto says that the shorter of the two wins */
- iov_iter_truncate(&i, hdr->dxfer_len);
-
- ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
- hdr->dxfer_len, GFP_KERNEL);
-
+ ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len,
+ hdr->iovec_count, 0, rq_data_dir(rq));
if (ret)
goto out_put_request;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ef08029a0079..8b89fab7c420 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -581,16 +581,36 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
return false;
}
+static inline u8 get_scsi_ml_byte(int result)
+{
+ return (result >> 8) & 0xff;
+}
+
/**
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
- * @cmd: SCSI command
* @result: scsi error code
*
- * Translate a SCSI result code into a blk_status_t value. May reset the host
- * byte of @cmd->result.
+ * Translate a SCSI result code into a blk_status_t value.
*/
-static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
+static blk_status_t scsi_result_to_blk_status(int result)
{
+ /*
+ * Check the scsi-ml byte first in case we converted a host or status
+ * byte.
+ */
+ switch (get_scsi_ml_byte(result)) {
+ case SCSIML_STAT_OK:
+ break;
+ case SCSIML_STAT_RESV_CONFLICT:
+ return BLK_STS_NEXUS;
+ case SCSIML_STAT_NOSPC:
+ return BLK_STS_NOSPC;
+ case SCSIML_STAT_MED_ERROR:
+ return BLK_STS_MEDIUM;
+ case SCSIML_STAT_TGT_FAILURE:
+ return BLK_STS_TARGET;
+ }
+
switch (host_byte(result)) {
case DID_OK:
if (scsi_status_is_good(result))
@@ -599,18 +619,6 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
case DID_TRANSPORT_FAILFAST:
case DID_TRANSPORT_MARGINAL:
return BLK_STS_TRANSPORT;
- case DID_TARGET_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_TARGET;
- case DID_NEXUS_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NEXUS;
- case DID_ALLOC_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NOSPC;
- case DID_MEDIUM_ERROR:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_MEDIUM;
default:
return BLK_STS_IOERR;
}
@@ -697,7 +705,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
if (sense_valid)
sense_current = !scsi_sense_is_deferred(&sshdr);
- blk_stat = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -878,14 +886,14 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
SCSI_SENSE_BUFFERSIZE);
}
if (sense_current)
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
} else if (blk_rq_bytes(req) == 0 && sense_current) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets *blk_statp explicitly for the problem case.
*/
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
}
/*
* Recovered errors need reporting, but they're always treated as
@@ -1856,13 +1864,13 @@ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int scsi_map_queues(struct blk_mq_tag_set *set)
+static void scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -1983,9 +1991,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 429663bd78ec..c52de9a973e4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -19,6 +19,17 @@ struct scsi_nl_hdr;
#define SCSI_CMD_RETRIES_NO_LIMIT -1
/*
+ * Error codes used by scsi-ml internally. These must not be used by drivers.
+ */
+enum scsi_ml_status {
+ SCSIML_STAT_OK = 0x00,
+ SCSIML_STAT_RESV_CONFLICT = 0x01, /* Reservation conflict */
+ SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */
+ SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */
+ SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */
+};
+
+/*
* Scsi Error Handler Flags
*/
#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
@@ -94,7 +105,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ac6059702d13..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
+ kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
@@ -406,14 +407,9 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
- struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
-
- if (atomic_dec_return(&shost->target_count) == 0)
- wake_up(&shost->targets_wq);
-
put_device(parent);
}
@@ -526,10 +522,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
- init_waitqueue_head(&starget->sdev_wq);
-
- atomic_inc(&shost->target_count);
-
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9dad2fd5297f..c95177ca6ed2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,15 +443,18 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev = container_of(work, struct scsi_device,
- ew.work);
- struct scsi_target *starget = sdev->sdev_target;
+ struct scsi_device *sdev;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ struct module *mod;
+
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -513,16 +516,19 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
- if (starget && atomic_dec_return(&starget->sdev_count) == 0)
- wake_up(&starget->sdev_wq);
-
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -970,6 +976,7 @@ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
show_sdev_iostat(iorequest_cnt);
show_sdev_iostat(iodone_cnt);
show_sdev_iostat(ioerr_cnt);
+show_sdev_iostat(iotmo_cnt);
static ssize_t
sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1289,6 +1296,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_iorequest_cnt.attr,
&dev_attr_iodone_cnt.attr,
&dev_attr_ioerr_cnt.attr,
+ &dev_attr_iotmo_cnt.attr,
&dev_attr_modalias.attr,
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
@@ -1470,6 +1478,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
mutex_unlock(&sdev->state_mutex);
blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
@@ -1529,14 +1538,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
-
- /*
- * After scsi_remove_target() returns its caller can remove resources
- * associated with @starget, e.g. an rport or session. Wait until all
- * devices associated with @starget have been removed to prevent that
- * a SCSI error handling callback function triggers a use-after-free.
- */
- wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1647,9 +1648,6 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
-
- atomic_inc(&starget->sdev_count);
-
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a2524106206d..8934160c4a33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -543,7 +543,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
struct nlmsghdr *nlh;
struct fc_nl_event *event;
const char *name;
- u32 len;
+ size_t len, padding;
int err;
if (!data_buf || data_len < 4)
@@ -554,7 +554,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
goto send_fail;
}
- len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
+ len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
@@ -578,7 +578,9 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
event->event_num = event_number;
event->event_code = event_code;
if (data_len)
- memcpy(&event->event_data, data_buf, data_len);
+ memcpy(event->event_data_flex, data_buf, data_len);
+ padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
+ memset(event->event_data_flex + data_len, 0, padding);
nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
GFP_KERNEL);
@@ -1170,7 +1172,7 @@ static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
return 0;
}
-fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bd72c38d7bfc..f569cf0095c2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -998,8 +998,9 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
- u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ unsigned int sleep_flags;
+ u8 *buffer;
/*
* Because this function and the power management code both call
@@ -1007,7 +1008,7 @@ spi_dv_device(struct scsi_device *sdev)
* while suspend or resume is in progress. Hence the
* lock/unlock_system_sleep() calls.
*/
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
if (scsi_autopm_get_device(sdev))
goto unlock_system_sleep;
@@ -1058,7 +1059,7 @@ put_autopm:
scsi_autopm_put_device(sdev);
unlock_system_sleep:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 340b050ad28d..ce34a8ad53b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -1311,7 +1311,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void
+static enum rq_end_io_ret
sg_rq_end_io(struct request *rq, blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -1324,11 +1324,11 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
- return;
+ return RQ_END_IO_NONE;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
- return;
+ return RQ_END_IO_NONE;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
@@ -1406,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
+ return RQ_END_IO_NONE;
}
static const struct file_operations sg_fops = {
@@ -1803,26 +1804,8 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md->from_user = 0;
}
- if (iov_count) {
- struct iovec *iov = NULL;
- struct iov_iter i;
-
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
- if (res < 0)
- return res;
-
- iov_iter_truncate(&i, hp->dxfer_len);
- if (!iov_iter_count(&i)) {
- kfree(iov);
- return -EINVAL;
- }
-
- res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
- kfree(iov);
- } else
- res = blk_rq_map_user(q, rq, md, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
+ res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
+ GFP_ATOMIC, iov_count, iov_count, 1, rw);
if (!res) {
srp->bio = rq->bio;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 7a8c2c75acba..b971fbe3b3a1 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6436,12 +6436,12 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 850172a2b8f1..b90a440e135d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -512,7 +512,8 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+ blk_status_t status)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
@@ -532,6 +533,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
blk_rq_unmap_user(tmp);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -4246,11 +4248,10 @@ static int st_probe(struct device *dev)
struct st_partstat *STps;
struct st_buffer *buffer;
int i, error;
- char *stp;
if (SDp->type != TYPE_TAPE)
return -ENODEV;
- if ((stp = st_incompatible(SDp))) {
+ if (st_incompatible(SDp)) {
sdev_printk(KERN_INFO, SDp,
"OnStream tapes are no longer supported;\n");
sdev_printk(KERN_INFO, SDp,
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6420f2127ce..8def242675ef 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -665,16 +665,17 @@ static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
if (sizeof(ver) == cp_len)
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ced292c4b96..bc46721aa01c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -60,6 +60,9 @@
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
+/* channel callback timeout in ms */
+#define CALLBACK_TIMEOUT 2
+
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
VSTOR_OPERATION_COMPLETE_IO = 1,
@@ -1029,7 +1032,7 @@ do_work:
*/
wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
if (!wrk) {
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_BAD_TARGET);
return;
}
@@ -1204,6 +1207,7 @@ static void storvsc_on_channel_callback(void *context)
struct hv_device *device;
struct storvsc_device *stor_device;
struct Scsi_Host *shost;
+ unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT);
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1224,6 +1228,11 @@ static void storvsc_on_channel_callback(void *context)
u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
sizeof(enum vstor_packet_operation);
+ if (unlikely(time_after(jiffies, time_limit))) {
+ hv_pkt_iter_close(channel);
+ return;
+ }
+
if (pktlen < minlen) {
dev_err(&device->device,
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
@@ -2059,7 +2068,7 @@ err_out3:
err_out2:
/*
* Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
+ * invoke storvsc_dev_remove() to rollback this state and
* this call also frees up the stor_device; hence the jump around
* err_out1 label.
*/
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 578c4b6d0f7d..2a79ab16134b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -141,10 +141,10 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
@@ -711,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+ blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 3fe562047d85..e4fafc77bd20 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -162,65 +162,6 @@ module_param(setup_strings, charp, 0);
static void wd33c93_execute(struct Scsi_Host *instance);
-#ifdef CONFIG_WD33C93_PIO
-static inline uchar
-read_wd33c93(const wd33c93_regs regs, uchar reg_num)
-{
- uchar data;
-
- outb(reg_num, regs.SASR);
- data = inb(regs.SCMD);
- return data;
-}
-
-static inline unsigned long
-read_wd33c93_count(const wd33c93_regs regs)
-{
- unsigned long value;
-
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- value = inb(regs.SCMD) << 16;
- value |= inb(regs.SCMD) << 8;
- value |= inb(regs.SCMD);
- return value;
-}
-
-static inline uchar
-read_aux_stat(const wd33c93_regs regs)
-{
- return inb(regs.SASR);
-}
-
-static inline void
-write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
-{
- outb(reg_num, regs.SASR);
- outb(value, regs.SCMD);
-}
-
-static inline void
-write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
-{
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- outb((value >> 16) & 0xff, regs.SCMD);
- outb((value >> 8) & 0xff, regs.SCMD);
- outb( value & 0xff, regs.SCMD);
-}
-
-#define write_wd33c93_cmd(regs, cmd) \
- write_wd33c93((regs), WD_COMMAND, (cmd))
-
-static inline void
-write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
-{
- int i;
-
- outb(WD_CDB_1, regs.SASR);
- for (i=0; i<len; i++)
- outb(cmnd[i], regs.SCMD);
-}
-
-#else /* CONFIG_WD33C93_PIO */
static inline uchar
read_wd33c93(const wd33c93_regs regs, uchar reg_num)
{
@@ -287,7 +228,6 @@ write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
for (i = 0; i < len; i++)
*regs.SCMD = cmnd[i];
}
-#endif /* CONFIG_WD33C93_PIO */
static inline uchar
read_1_byte(const wd33c93_regs regs)
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index b3800baccd2c..e5e4254b1477 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -180,13 +180,8 @@
/* This is what the 3393 chip looks like to us */
typedef struct {
-#ifdef CONFIG_WD33C93_PIO
- unsigned int SASR;
- unsigned int SCMD;
-#else
volatile unsigned char *SASR;
volatile unsigned char *SCMD;
-#endif
} wd33c93_regs;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 51afc66e839d..66b316d173b0 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -289,14 +289,6 @@ static unsigned int scsifront_host_byte(int32_t rslt)
return DID_TRANSPORT_DISRUPTED;
case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
return DID_TRANSPORT_FAILFAST;
- case XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE:
- return DID_TARGET_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE:
- return DID_NEXUS_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE:
- return DID_ALLOC_FAILURE;
- case XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR:
- return DID_MEDIUM_ERROR;
case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
return DID_TRANSPORT_MARGINAL;
default:
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index 1235b7dc8496..2ed821f75816 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -22,7 +22,8 @@ config SLIM_QCOM_CTRL
config SLIM_QCOM_NGD_CTRL
tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
- depends on HAS_IOMEM && DMA_ENGINE && NET && QCOM_RPROC_COMMON
+ depends on HAS_IOMEM && DMA_ENGINE && NET
+ depends on QCOM_RPROC_COMMON || COMPILE_TEST
depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
select QCOM_PDR_HELPERS
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 0aa8408464ad..76c5e446d243 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1470,7 +1470,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_device_add(ngd->pdev);
+ ret = platform_device_add(ngd->pdev);
+ if (ret) {
+ platform_device_put(ngd->pdev);
+ kfree(ngd);
+ of_node_put(node);
+ return ret;
+ }
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
@@ -1543,10 +1549,8 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
- if (ret) {
- dev_err(&pdev->dev, "request IRQ failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "request IRQ failed\n");
ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify;
ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb);
@@ -1575,18 +1579,27 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl);
if (IS_ERR(ctrl->pdr)) {
- dev_err(dev, "Failed to init PDR handle\n");
- return PTR_ERR(ctrl->pdr);
+ ret = dev_err_probe(dev, PTR_ERR(ctrl->pdr),
+ "Failed to init PDR handle\n");
+ goto err_pdr_alloc;
}
pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd");
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
- dev_err(dev, "pdr add lookup failed: %d\n", ret);
- return PTR_ERR(pds);
+ ret = dev_err_probe(dev, PTR_ERR(pds), "pdr add lookup failed\n");
+ goto err_pdr_lookup;
}
platform_driver_register(&qcom_slim_ngd_driver);
return of_qcom_slim_ngd_register(dev, ctrl);
+
+err_pdr_alloc:
+ qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb);
+
+err_pdr_lookup:
+ pdr_handle_release(ctrl->pdr);
+
+ return ret;
}
static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 2be3afe6c2e3..dd5f2a13ceb5 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -469,6 +469,7 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
{
const struct meson_ee_pwrc_domain_data *match;
struct regmap *regmap_ao, *regmap_hhi;
+ struct device_node *parent_np;
struct meson_ee_pwrc *pwrc;
int i, ret;
@@ -495,7 +496,9 @@ static int meson_ee_pwrc_probe(struct platform_device *pdev)
pwrc->xlate.num_domains = match->count;
- regmap_hhi = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_hhi = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_hhi)) {
dev_err(&pdev->dev, "failed to get HHI regmap\n");
return PTR_ERR(regmap_hhi);
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index b4615b288625..312fd9afccb0 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -273,6 +273,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
struct meson_gx_pwrc_vpu *vpu_pd;
+ struct device_node *parent_np;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
@@ -291,7 +292,9 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
- regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ parent_np = of_get_parent(pdev->dev.of_node);
+ regmap_ao = syscon_node_to_regmap(parent_np);
+ of_node_put(parent_np);
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
return PTR_ERR(regmap_ao);
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index cf1129e9f76b..031ec4aa06d5 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -660,6 +660,12 @@ int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
}
EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+int apple_rtkit_poll(struct apple_rtkit *rtk)
+{
+ return mbox_client_peek_data(rtk->mbox_chan);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_poll);
+
int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
{
u64 msg;
diff --git a/drivers/soc/bcm/bcm63xx/Kconfig b/drivers/soc/bcm/bcm63xx/Kconfig
index 9e501c8ac5ce..355c34482076 100644
--- a/drivers/soc/bcm/bcm63xx/Kconfig
+++ b/drivers/soc/bcm/bcm63xx/Kconfig
@@ -13,8 +13,8 @@ endif # SOC_BCM63XX
config BCM_PMB
bool "Broadcom PMB (Power Management Bus) driver"
- depends on ARCH_BCM4908 || (COMPILE_TEST && OF)
- default ARCH_BCM4908
+ depends on ARCH_BCMBCA || (COMPILE_TEST && OF)
+ default ARCH_BCMBCA
select PM_GENERIC_DOMAINS if PM
help
This enables support for the Broadcom's PMB (Power Management Bus) that
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 1467bbd59690..e1d7b4543248 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -288,7 +288,6 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
out:
- of_node_put(np);
return ret;
}
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index d6b30d521307..d681cd24c6e1 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -25,7 +25,6 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/module.h>
-#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/panic_notifier.h>
@@ -664,7 +663,20 @@ static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
return of_io_request_and_map(dn, index, dn->full_name);
}
-
+/*
+ * The AON is a small domain in the SoC that can retain its state across
+ * various system wide sleep states and specific reset conditions; the
+ * AON DATA RAM is a small RAM of a few words (< 1KB) which can store
+ * persistent information across such events.
+ *
+ * The purpose of the below panic notifier is to help with notifying
+ * the bootloader that a panic occurred and so that it should try its
+ * best to preserve the DRAM contents holding that buffer for recovery
+ * by the kernel as opposed to wiping out DRAM clean again.
+ *
+ * Reference: comment from Florian Fainelli, at
+ * https://lore.kernel.org/lkml/781cafb0-8d06-8b56-907a-5175c2da196a@gmail.com
+ */
static int brcmstb_pm_panic_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -684,13 +696,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -700,8 +713,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -711,7 +726,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -731,17 +747,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -752,14 +771,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -779,21 +802,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -813,7 +839,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
out:
kfree(ctrl.s3_params);
-
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 07d52cafbb31..fcec6ed83d5e 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index fde4edd83c14..739e4eee6b75 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2483,13 +2483,8 @@ out:
}
EXPORT_SYMBOL(qman_create_cgr);
-int qman_delete_cgr(struct qman_cgr *cgr)
+static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
{
- unsigned long irqflags;
- struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts;
- int ret = 0;
- struct qman_cgr *i;
struct qman_portal *p = get_affine_portal();
if (cgr->chan != p->config->channel) {
@@ -2497,10 +2492,25 @@ int qman_delete_cgr(struct qman_cgr *cgr)
dev_err(p->config->dev, "CGR not owned by current portal");
dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
cgr->chan, p->config->channel);
-
- ret = -EINVAL;
- goto put_portal;
+ put_affine_portal();
+ return NULL;
}
+
+ return p;
+}
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
@@ -2528,7 +2538,6 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
spin_unlock_irqrestore(&p->cgr_lock, irqflags);
-put_portal:
put_affine_portal();
return ret;
}
@@ -2559,6 +2568,54 @@ void qman_delete_cgr_safe(struct qman_cgr *cgr)
}
EXPORT_SYMBOL(qman_delete_cgr_safe);
+static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ int ret;
+ unsigned long irqflags;
+ struct qman_portal *p = qman_cgr_get_affine_portal(cgr);
+
+ if (!p)
+ return -EINVAL;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ ret = qm_modify_cgr(cgr, 0, opts);
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+struct update_cgr_params {
+ struct qman_cgr *cgr;
+ struct qm_mcc_initcgr *opts;
+ int ret;
+};
+
+static void qman_update_cgr_smp_call(void *p)
+{
+ struct update_cgr_params *params = p;
+
+ params->ret = qman_update_cgr(params->cgr, params->opts);
+}
+
+int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
+{
+ struct update_cgr_params params = {
+ .cgr = cgr,
+ .opts = opts,
+ };
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id())
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_update_cgr_smp_call, &params,
+ true);
+ else
+ params.ret = qman_update_cgr(cgr, opts);
+ preempt_enable();
+ return params.ret;
+}
+EXPORT_SYMBOL(qman_update_cgr_safe);
+
/* Cleanup FQs */
static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index a840494e849a..4b906791d6c7 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -20,4 +20,12 @@ config SOC_IMX8M
support, it will provide the SoC info like SoC family,
ID and revision etc.
+config SOC_IMX9
+ tristate "i.MX9 SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ help
+ If you say yes here, you get support for the NXP i.MX9 family
+
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 63cd29f6d4d2..7b4099ceafd6 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
obj-$(CONFIG_SOC_IMX8M) += imx8mp-blk-ctrl.o
+obj-$(CONFIG_SOC_IMX9) += imx93-src.o imx93-pd.o
+obj-$(CONFIG_SOC_IMX9) += imx93-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 6383a4edc360..88aee59730e3 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -335,6 +335,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
}
+ reset_control_assert(domain->reset);
+
/* Enable reset clocks for all devices in the domain */
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
@@ -342,7 +344,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
- reset_control_assert(domain->reset);
+ /* delays for reset to propagate */
+ udelay(5);
if (domain->bits.pxx) {
/* request the domain to power up */
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index dff7529268e4..00879615a701 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -5,6 +5,7 @@
*/
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -37,6 +38,8 @@ struct imx8m_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
+ const char * const *path_names;
+ int num_paths;
const char *gpc_name;
u32 rst_mask;
u32 clk_mask;
@@ -52,13 +55,16 @@ struct imx8m_blk_ctrl_domain_data {
};
#define DOMAIN_MAX_CLKS 4
+#define DOMAIN_MAX_PATHS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8m_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8m_blk_ctrl *bc;
+ int num_paths;
};
struct imx8m_blk_ctrl_data {
@@ -117,6 +123,10 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
if (data->mipi_phy_rst_mask)
regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+ ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
+ if (ret)
+ dev_err(bc->dev, "failed to set icc bw\n");
+
/* disable upstream clocks */
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -152,19 +162,6 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
return 0;
}
-static struct generic_pm_domain *
-imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
-{
- struct genpd_onecell_data *onecell_data = data;
- unsigned int index = args->args[0];
-
- if (args->args_count != 1 ||
- index >= onecell_data->num_domains)
- return ERR_PTR(-EINVAL);
-
- return onecell_data->domains[index];
-}
-
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
@@ -206,7 +203,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
return -ENOMEM;
bc->onecell_data.num_domains = bc_data->num_domains;
- bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
bc->onecell_data.domains =
devm_kcalloc(dev, bc_data->num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
@@ -224,10 +220,29 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
int j;
domain->data = data;
+ domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
+ for (j = 0; j < data->num_paths; j++) {
+ domain->paths[j].name = data->path_names[j];
+ /* Fake value for now, just let ICC could configure NoC mode/priority */
+ domain->paths[j].avg_bw = 1;
+ domain->paths[j].peak_bw = 1;
+ }
+
+ ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
+ domain->num_paths = 0;
+ } else {
+ dev_err_probe(dev, ret, "failed to get noc entries\n");
+ goto cleanup_pds;
+ }
+ }
+
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
@@ -243,7 +258,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
- dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
@@ -455,6 +469,46 @@ static const struct imx8m_blk_ctrl_data imx8mm_vpu_blk_ctl_dev_data = {
.num_domains = ARRAY_SIZE(imx8mm_vpu_blk_ctl_domain_data),
};
+static const struct imx8m_blk_ctrl_domain_data imx8mp_vpu_blk_ctl_domain_data[] = {
+ [IMX8MP_VPUBLK_PD_G1] = {
+ .name = "vpublk-g1",
+ .clk_names = (const char *[]){ "g1", },
+ .num_clks = 1,
+ .gpc_name = "g1",
+ .rst_mask = BIT(1),
+ .clk_mask = BIT(1),
+ .path_names = (const char *[]){"g1"},
+ .num_paths = 1,
+ },
+ [IMX8MP_VPUBLK_PD_G2] = {
+ .name = "vpublk-g2",
+ .clk_names = (const char *[]){ "g2", },
+ .num_clks = 1,
+ .gpc_name = "g2",
+ .rst_mask = BIT(0),
+ .clk_mask = BIT(0),
+ .path_names = (const char *[]){"g2"},
+ .num_paths = 1,
+ },
+ [IMX8MP_VPUBLK_PD_VC8000E] = {
+ .name = "vpublk-vc8000e",
+ .clk_names = (const char *[]){ "vc8000e", },
+ .num_clks = 1,
+ .gpc_name = "vc8000e",
+ .rst_mask = BIT(2),
+ .clk_mask = BIT(2),
+ .path_names = (const char *[]){"vc8000e"},
+ .num_paths = 1,
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mp_vpu_blk_ctl_dev_data = {
+ .max_reg = 0x18,
+ .power_notifier_fn = imx8mm_vpu_power_notifier,
+ .domains = imx8mp_vpu_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mp_vpu_blk_ctl_domain_data),
+};
+
static int imx8mm_disp_power_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -650,6 +704,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "lcdif1",
.rst_mask = BIT(4) | BIT(5) | BIT(23),
.clk_mask = BIT(4) | BIT(5) | BIT(23),
+ .path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISI] = {
.name = "mediablk-isi",
@@ -658,6 +714,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "isi",
.rst_mask = BIT(6) | BIT(7),
.clk_mask = BIT(6) | BIT(7),
+ .path_names = (const char *[]){"isi0", "isi1", "isi2"},
+ .num_paths = 3,
},
[IMX8MP_MEDIABLK_PD_MIPI_CSI2_2] = {
.name = "mediablk-mipi-csi2-2",
@@ -675,6 +733,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "lcdif2",
.rst_mask = BIT(11) | BIT(12) | BIT(24),
.clk_mask = BIT(11) | BIT(12) | BIT(24),
+ .path_names = (const char *[]){"lcdif-rd", "lcdif-wr"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_ISP] = {
.name = "mediablk-isp",
@@ -683,6 +743,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "isp",
.rst_mask = BIT(16) | BIT(17) | BIT(18),
.clk_mask = BIT(16) | BIT(17) | BIT(18),
+ .path_names = (const char *[]){"isp0", "isp1"},
+ .num_paths = 2,
},
[IMX8MP_MEDIABLK_PD_DWE] = {
.name = "mediablk-dwe",
@@ -691,6 +753,8 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[
.gpc_name = "dwe",
.rst_mask = BIT(19) | BIT(20) | BIT(21),
.clk_mask = BIT(19) | BIT(20) | BIT(21),
+ .path_names = (const char *[]){"dwe"},
+ .num_paths = 1,
},
[IMX8MP_MEDIABLK_PD_MIPI_DSI_2] = {
.name = "mediablk-mipi-dsi-2",
@@ -789,6 +853,9 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
.compatible = "fsl,imx8mq-vpu-blk-ctrl",
.data = &imx8mq_vpu_blk_ctl_dev_data
}, {
+ .compatible = "fsl,imx8mp-vpu-blk-ctrl",
+ .data = &imx8mp_vpu_blk_ctl_dev_data
+ }, {
/* Sentinel */
}
};
diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c
index 4ca2ede6871b..0e3b6ba22f94 100644
--- a/drivers/soc/imx/imx8mp-blk-ctrl.c
+++ b/drivers/soc/imx/imx8mp-blk-ctrl.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -18,6 +19,8 @@
#define GPR_REG0 0x0
#define PCIE_CLOCK_MODULE_EN BIT(0)
#define USB_CLOCK_MODULE_EN BIT(1)
+#define PCIE_PHY_APB_RST BIT(4)
+#define PCIE_PHY_INIT_RST BIT(5)
struct imx8mp_blk_ctrl_domain;
@@ -36,17 +39,22 @@ struct imx8mp_blk_ctrl_domain_data {
const char *name;
const char * const *clk_names;
int num_clks;
+ const char * const *path_names;
+ int num_paths;
const char *gpc_name;
};
#define DOMAIN_MAX_CLKS 2
+#define DOMAIN_MAX_PATHS 3
struct imx8mp_blk_ctrl_domain {
struct generic_pm_domain genpd;
const struct imx8mp_blk_ctrl_domain_data *data;
struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct icc_bulk_data paths[DOMAIN_MAX_PATHS];
struct device *power_dev;
struct imx8mp_blk_ctrl *bc;
+ int num_paths;
int id;
};
@@ -75,6 +83,10 @@ static void imx8mp_hsio_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_set_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
+ case IMX8MP_HSIOBLK_PD_PCIE_PHY:
+ regmap_set_bits(bc->regmap, GPR_REG0,
+ PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
+ break;
default:
break;
}
@@ -90,6 +102,10 @@ static void imx8mp_hsio_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
case IMX8MP_HSIOBLK_PD_PCIE:
regmap_clear_bits(bc->regmap, GPR_REG0, PCIE_CLOCK_MODULE_EN);
break;
+ case IMX8MP_HSIOBLK_PD_PCIE_PHY:
+ regmap_clear_bits(bc->regmap, GPR_REG0,
+ PCIE_PHY_APB_RST | PCIE_PHY_INIT_RST);
+ break;
default:
break;
}
@@ -144,6 +160,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
.clk_names = (const char *[]){ "usb" },
.num_clks = 1,
.gpc_name = "usb",
+ .path_names = (const char *[]){"usb1", "usb2"},
+ .num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_USB_PHY1] = {
.name = "hsioblk-usb-phy1",
@@ -158,6 +176,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hsio_domain_data[] = {
.clk_names = (const char *[]){ "pcie" },
.num_clks = 1,
.gpc_name = "pcie",
+ .path_names = (const char *[]){"noc-pcie", "pcie"},
+ .num_paths = 2,
},
[IMX8MP_HSIOBLK_PD_PCIE_PHY] = {
.name = "hsioblk-pcie-phy",
@@ -225,6 +245,13 @@ static void imx8mp_hdmi_blk_ctrl_power_on(struct imx8mp_blk_ctrl *bc,
regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3));
break;
+ case IMX8MP_HDMIBLK_PD_HDCP:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
+ break;
+ case IMX8MP_HDMIBLK_PD_HRV:
+ regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
+ regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
+ break;
default:
break;
}
@@ -273,6 +300,13 @@ static void imx8mp_hdmi_blk_ctrl_power_off(struct imx8mp_blk_ctrl *bc,
regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12));
regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24));
break;
+ case IMX8MP_HDMIBLK_PD_HDCP:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(11));
+ break;
+ case IMX8MP_HDMIBLK_PD_HRV:
+ regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(15));
+ regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(3) | BIT(4) | BIT(5));
+ break;
default:
break;
}
@@ -322,6 +356,8 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
.clk_names = (const char *[]){ "axi", "apb" },
.num_clks = 2,
.gpc_name = "lcdif",
+ .path_names = (const char *[]){"lcdif-hdmi"},
+ .num_paths = 1,
},
[IMX8MP_HDMIBLK_PD_PAI] = {
.name = "hdmiblk-pai",
@@ -353,6 +389,22 @@ static const struct imx8mp_blk_ctrl_domain_data imx8mp_hdmi_domain_data[] = {
.num_clks = 2,
.gpc_name = "hdmi-tx-phy",
},
+ [IMX8MP_HDMIBLK_PD_HRV] = {
+ .name = "hdmiblk-hrv",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "hrv",
+ .path_names = (const char *[]){"hrv"},
+ .num_paths = 1,
+ },
+ [IMX8MP_HDMIBLK_PD_HDCP] = {
+ .name = "hdmiblk-hdcp",
+ .clk_names = (const char *[]){ "axi", "apb" },
+ .num_clks = 2,
+ .gpc_name = "hdcp",
+ .path_names = (const char *[]){"hdcp"},
+ .num_paths = 1,
+ },
};
static const struct imx8mp_blk_ctrl_data imx8mp_hdmi_blk_ctl_dev_data = {
@@ -395,6 +447,10 @@ static int imx8mp_blk_ctrl_power_on(struct generic_pm_domain *genpd)
goto clk_disable;
}
+ ret = icc_bulk_set_bw(domain->num_paths, domain->paths);
+ if (ret)
+ dev_err(bc->dev, "failed to set icc bw\n");
+
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
return 0;
@@ -434,19 +490,6 @@ static int imx8mp_blk_ctrl_power_off(struct generic_pm_domain *genpd)
return 0;
}
-static struct generic_pm_domain *
-imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
-{
- struct genpd_onecell_data *onecell_data = data;
- unsigned int index = args->args[0];
-
- if (args->args_count != 1 ||
- index >= onecell_data->num_domains)
- return ERR_PTR(-EINVAL);
-
- return onecell_data->domains[index];
-}
-
static struct lock_class_key blk_ctrl_genpd_lock_class;
static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
@@ -489,7 +532,6 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
return -ENOMEM;
bc->onecell_data.num_domains = num_domains;
- bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
bc->onecell_data.domains =
devm_kcalloc(dev, num_domains,
sizeof(struct generic_pm_domain *), GFP_KERNEL);
@@ -510,10 +552,29 @@ static int imx8mp_blk_ctrl_probe(struct platform_device *pdev)
int j;
domain->data = data;
+ domain->num_paths = data->num_paths;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
+ for (j = 0; j < data->num_paths; j++) {
+ domain->paths[j].name = data->path_names[j];
+ /* Fake value for now, just let ICC could configure NoC mode/priority */
+ domain->paths[j].avg_bw = 1;
+ domain->paths[j].peak_bw = 1;
+ }
+
+ ret = devm_of_icc_bulk_get(dev, data->num_paths, domain->paths);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_warn_once(dev, "Could not get interconnect paths, NoC will stay unconfigured!\n");
+ domain->num_paths = 0;
+ } else {
+ dev_err_probe(dev, ret, "failed to get noc entries\n");
+ goto cleanup_pds;
+ }
+ }
+
ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
if (ret) {
dev_err_probe(dev, ret, "failed to get clock\n");
diff --git a/drivers/soc/imx/imx93-blk-ctrl.c b/drivers/soc/imx/imx93-blk-ctrl.c
new file mode 100644
index 000000000000..2c600329436c
--- /dev/null
+++ b/drivers/soc/imx/imx93-blk-ctrl.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+#include <dt-bindings/power/fsl,imx93-power.h>
+
+#define BLK_SFT_RSTN 0x0
+#define BLK_CLK_EN 0x4
+#define BLK_MAX_CLKS 4
+
+#define DOMAIN_MAX_CLKS 4
+
+#define LCDIF_QOS_REG 0xC
+#define LCDIF_DEFAULT_QOS_OFF 12
+#define LCDIF_CFG_QOS_OFF 8
+
+#define PXP_QOS_REG 0x10
+#define PXP_R_DEFAULT_QOS_OFF 28
+#define PXP_R_CFG_QOS_OFF 24
+#define PXP_W_DEFAULT_QOS_OFF 20
+#define PXP_W_CFG_QOS_OFF 16
+
+#define ISI_CACHE_REG 0x14
+
+#define ISI_QOS_REG 0x1C
+#define ISI_V_DEFAULT_QOS_OFF 28
+#define ISI_V_CFG_QOS_OFF 24
+#define ISI_U_DEFAULT_QOS_OFF 20
+#define ISI_U_CFG_QOS_OFF 16
+#define ISI_Y_R_DEFAULT_QOS_OFF 12
+#define ISI_Y_R_CFG_QOS_OFF 8
+#define ISI_Y_W_DEFAULT_QOS_OFF 4
+#define ISI_Y_W_CFG_QOS_OFF 0
+
+#define PRIO_MASK 0xF
+
+#define PRIO(X) (X)
+
+struct imx93_blk_ctrl_domain;
+
+struct imx93_blk_ctrl {
+ struct device *dev;
+ struct regmap *regmap;
+ int num_clks;
+ struct clk_bulk_data clks[BLK_MAX_CLKS];
+ struct imx93_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+};
+
+#define DOMAIN_MAX_QOS 4
+
+struct imx93_blk_ctrl_qos {
+ u32 reg;
+ u32 cfg_off;
+ u32 default_prio;
+ u32 cfg_prio;
+};
+
+struct imx93_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ u32 rst_mask;
+ u32 clk_mask;
+ int num_qos;
+ struct imx93_blk_ctrl_qos qos[DOMAIN_MAX_QOS];
+};
+
+struct imx93_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx93_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct imx93_blk_ctrl *bc;
+};
+
+struct imx93_blk_ctrl_data {
+ const struct imx93_blk_ctrl_domain_data *domains;
+ int num_domains;
+ const char * const *clk_names;
+ int num_clks;
+ const struct regmap_access_table *reg_access_table;
+};
+
+static inline struct imx93_blk_ctrl_domain *
+to_imx93_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx93_blk_ctrl_domain, genpd);
+}
+
+static int imx93_blk_ctrl_set_qos(struct imx93_blk_ctrl_domain *domain)
+{
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ const struct imx93_blk_ctrl_qos *qos;
+ u32 val, mask;
+ int i;
+
+ for (i = 0; i < data->num_qos; i++) {
+ qos = &data->qos[i];
+
+ mask = PRIO_MASK << qos->cfg_off;
+ mask |= PRIO_MASK << (qos->cfg_off + 4);
+ val = qos->cfg_prio << qos->cfg_off;
+ val |= qos->default_prio << (qos->cfg_off + 4);
+
+ regmap_write_bits(bc->regmap, qos->reg, mask, val);
+
+ dev_dbg(bc->dev, "data->qos[i].reg 0x%x 0x%x\n", qos->reg, val);
+ }
+
+ return 0;
+}
+
+static int imx93_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable bus clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+ dev_err(bc->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(bc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->dev);
+ dev_err(bc->dev, "failed to power up domain\n");
+ goto disable_clk;
+ }
+
+ /* ungate clk */
+ regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* release reset */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ dev_dbg(bc->dev, "pd_on: name: %s\n", genpd->name);
+
+ return imx93_blk_ctrl_set_qos(domain);
+
+disable_clk:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+
+ dev_dbg(bc->dev, "pd_off: name: %s\n", genpd->name);
+
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ pm_runtime_put(bc->dev);
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+
+ return 0;
+}
+
+static int imx93_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct imx93_blk_ctrl_data *bc_data = of_device_get_match_data(dev);
+ struct imx93_blk_ctrl *bc;
+ void __iomem *base;
+ int i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = bc_data->reg_access_table,
+ .wr_table = bc_data->reg_access_table,
+ .max_register = SZ_4K,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct imx93_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = bc_data->num_domains;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ for (i = 0; i < bc_data->num_clks; i++)
+ bc->clks[i].id = bc_data->clk_names[i];
+ bc->num_clks = bc_data->num_clks;
+
+ ret = devm_clk_bulk_get(dev, bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get bus clock\n");
+ return ret;
+ }
+
+ for (i = 0; i < bc_data->num_domains; i++) {
+ const struct imx93_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx93_blk_ctrl_power_on;
+ domain->genpd.power_off = imx93_blk_ctrl_power_off;
+ domain->bc = bc;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ goto cleanup_pds;
+ }
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_pds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(&bc->domains[i].genpd);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx93_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ }
+
+ return 0;
+}
+
+static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_domain_data[] = {
+ [IMX93_MEDIABLK_PD_MIPI_DSI] = {
+ .name = "mediablk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi" },
+ .num_clks = 1,
+ .rst_mask = BIT(11) | BIT(12),
+ .clk_mask = BIT(11) | BIT(12),
+ },
+ [IMX93_MEDIABLK_PD_MIPI_CSI] = {
+ .name = "mediablk-mipi-csi",
+ .clk_names = (const char *[]){ "cam", "csi" },
+ .num_clks = 2,
+ .rst_mask = BIT(9) | BIT(10),
+ .clk_mask = BIT(9) | BIT(10),
+ },
+ [IMX93_MEDIABLK_PD_PXP] = {
+ .name = "mediablk-pxp",
+ .clk_names = (const char *[]){ "pxp" },
+ .num_clks = 1,
+ .rst_mask = BIT(7) | BIT(8),
+ .clk_mask = BIT(7) | BIT(8),
+ .num_qos = 2,
+ .qos = {
+ {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }, {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_LCDIF] = {
+ .name = "mediablk-lcdif",
+ .clk_names = (const char *[]){ "disp", "lcdif" },
+ .num_clks = 2,
+ .rst_mask = BIT(4) | BIT(5) | BIT(6),
+ .clk_mask = BIT(4) | BIT(5) | BIT(6),
+ .num_qos = 1,
+ .qos = {
+ {
+ .reg = LCDIF_QOS_REG,
+ .cfg_off = LCDIF_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_ISI] = {
+ .name = "mediablk-isi",
+ .clk_names = (const char *[]){ "isi" },
+ .num_clks = 1,
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .num_qos = 4,
+ .qos = {
+ {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_U_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_V_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+};
+
+static const struct regmap_range imx93_media_blk_ctl_yes_ranges[] = {
+ regmap_reg_range(BLK_SFT_RSTN, BLK_CLK_EN),
+ regmap_reg_range(LCDIF_QOS_REG, ISI_CACHE_REG),
+ regmap_reg_range(ISI_QOS_REG, ISI_QOS_REG),
+};
+
+static const struct regmap_access_table imx93_media_blk_ctl_access_table = {
+ .yes_ranges = imx93_media_blk_ctl_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx93_media_blk_ctl_yes_ranges),
+};
+
+static const struct imx93_blk_ctrl_data imx93_media_blk_ctl_dev_data = {
+ .domains = imx93_media_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
+ .clk_names = (const char *[]){ "axi", "apb", "nic", },
+ .num_clks = 3,
+ .reg_access_table = &imx93_media_blk_ctl_access_table,
+};
+
+static const struct of_device_id imx93_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx93-media-blk-ctrl",
+ .data = &imx93_media_blk_ctl_dev_data
+ }, {
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx93_blk_ctrl_of_match);
+
+static struct platform_driver imx93_blk_ctrl_driver = {
+ .probe = imx93_blk_ctrl_probe,
+ .remove = imx93_blk_ctrl_remove,
+ .driver = {
+ .name = "imx93-blk-ctrl",
+ .of_match_table = imx93_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx93_blk_ctrl_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("i.MX93 BLK CTRL driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c
new file mode 100644
index 000000000000..1f3d7039c1de
--- /dev/null
+++ b/drivers/soc/imx/imx93-pd.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#define MIX_SLICE_SW_CTRL_OFF 0x20
+#define SLICE_SW_CTRL_PSW_CTRL_OFF_MASK BIT(4)
+#define SLICE_SW_CTRL_PDN_SOFT_MASK BIT(31)
+
+#define MIX_FUNC_STAT_OFF 0xB4
+
+#define FUNC_STAT_PSW_STAT_MASK BIT(0)
+#define FUNC_STAT_RST_STAT_MASK BIT(2)
+#define FUNC_STAT_ISO_STAT_MASK BIT(4)
+
+struct imx93_power_domain {
+ struct generic_pm_domain genpd;
+ struct device *dev;
+ void __iomem *addr;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ bool init_off;
+};
+
+#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
+
+static int imx93_pd_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ void __iomem *addr = domain->addr;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n", genpd->name);
+ return ret;
+ }
+
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val &= ~SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
+ if (ret) {
+ dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx93_pd_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ void __iomem *addr = domain->addr;
+ int ret;
+ u32 val;
+
+ /* Power off MIX */
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val |= SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
+ if (ret) {
+ dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ return 0;
+};
+
+static int imx93_pd_remove(struct platform_device *pdev)
+{
+ struct imx93_power_domain *domain = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!domain->init_off)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ of_genpd_del_provider(np);
+ pm_genpd_remove(&domain->genpd);
+
+ return 0;
+}
+
+static int imx93_pd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx93_power_domain *domain;
+ int ret;
+
+ domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return -ENOMEM;
+
+ domain->addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(domain->addr))
+ return PTR_ERR(domain->addr);
+
+ domain->num_clks = devm_clk_bulk_get_all(dev, &domain->clks);
+ if (domain->num_clks < 0)
+ return dev_err_probe(dev, domain->num_clks, "Failed to get domain's clocks\n");
+
+ domain->genpd.name = dev_name(dev);
+ domain->genpd.power_off = imx93_pd_off;
+ domain->genpd.power_on = imx93_pd_on;
+ domain->dev = dev;
+
+ domain->init_off = readl(domain->addr + MIX_FUNC_STAT_OFF) & FUNC_STAT_ISO_STAT_MASK;
+ /* Just to sync the status of hardware */
+ if (!domain->init_off) {
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
+ domain->genpd.name);
+ return ret;
+ }
+ }
+
+ ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, domain);
+
+ return of_genpd_add_provider_simple(np, &domain->genpd);
+}
+
+static const struct of_device_id imx93_pd_ids[] = {
+ { .compatible = "fsl,imx93-src-slice" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx93_pd_ids);
+
+static struct platform_driver imx93_power_domain_driver = {
+ .driver = {
+ .name = "imx93_power_domain",
+ .owner = THIS_MODULE,
+ .of_match_table = imx93_pd_ids,
+ },
+ .probe = imx93_pd_probe,
+ .remove = imx93_pd_remove,
+};
+module_platform_driver(imx93_power_domain_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 power domain driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/imx93-src.c b/drivers/soc/imx/imx93-src.c
new file mode 100644
index 000000000000..4d74921cae0f
--- /dev/null
+++ b/drivers/soc/imx/imx93-src.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+static int imx93_src_probe(struct platform_device *pdev)
+{
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id imx93_src_ids[] = {
+ { .compatible = "fsl,imx93-src" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx93_src_ids);
+
+static struct platform_driver imx93_src_driver = {
+ .driver = {
+ .name = "imx93_src",
+ .owner = THIS_MODULE,
+ .of_match_table = imx93_src_ids,
+ },
+ .probe = imx93_src_probe,
+};
+module_platform_driver(imx93_src_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 src driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 3c3eedea35f7..40d0cc600cae 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -37,6 +37,7 @@ config MTK_INFRACFG
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
depends on RESET_CONTROLLER
+ depends on OF
select REGMAP
help
Say yes here to add support for MediaTek PMIC Wrapper found
@@ -46,6 +47,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
default ARCH_MEDIATEK
+ depends on OF
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
@@ -75,7 +77,7 @@ config MTK_MMSYS
config MTK_SVS
tristate "MediaTek Smart Voltage Scaling(SVS)"
- depends on MTK_EFUSE && NVMEM
+ depends on NVMEM_MTK_EFUSE && NVMEM
help
The Smart Voltage Scaling(SVS) engine is a piece of hardware
which has several controllers(banks) for calculating suitable
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index eb1ad9c37a9c..09b1ccbc0093 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -3,6 +3,12 @@
#ifndef __SOC_MEDIATEK_MT8186_MMSYS_H
#define __SOC_MEDIATEK_MT8186_MMSYS_H
+/* Values for DPI configuration in MMSYS address space */
+#define MT8186_MMSYS_DPI_OUTPUT_FORMAT 0x400
+#define DPI_FORMAT_MASK 0x1
+#define DPI_RGB888_DDR_CON BIT(0)
+#define DPI_RGB565_SDR_CON BIT(1)
+
#define MT8186_MMSYS_OVL_CON 0xF04
#define MT8186_MMSYS_OVL0_CON_MASK 0x3
#define MT8186_MMSYS_OVL0_2L_CON_MASK 0xC
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index 06d8e83a2cb5..d2c7a87aab87 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -227,6 +227,26 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+static void mtk_mmsys_update_bits(struct mtk_mmsys *mmsys, u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(mmsys->regs + offset);
+ tmp = (tmp & ~mask) | val;
+ writel_relaxed(tmp, mmsys->regs + offset);
+}
+
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val)
+{
+ if (val)
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ DPI_RGB888_DDR_CON, DPI_FORMAT_MASK);
+ else
+ mtk_mmsys_update_bits(dev_get_drvdata(dev), MT8186_MMSYS_DPI_OUTPUT_FORMAT,
+ DPI_RGB565_SDR_CON, DPI_FORMAT_MASK);
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_dpi_fmt_config);
+
static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned long id,
bool assert)
{
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5ea43de4e410..c1a33d52038e 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -91,6 +91,15 @@
#define MT8183_MUTEX_MOD_MDP_AAL0 23
#define MT8183_MUTEX_MOD_MDP_CCORR0 24
+#define MT8186_MUTEX_MOD_MDP_RDMA0 0
+#define MT8186_MUTEX_MOD_MDP_AAL0 2
+#define MT8186_MUTEX_MOD_MDP_HDR0 4
+#define MT8186_MUTEX_MOD_MDP_RSZ0 5
+#define MT8186_MUTEX_MOD_MDP_RSZ1 6
+#define MT8186_MUTEX_MOD_MDP_WROT0 7
+#define MT8186_MUTEX_MOD_MDP_TDSHP0 9
+#define MT8186_MUTEX_MOD_MDP_COLOR0 14
+
#define MT8173_MUTEX_MOD_DISP_OVL0 11
#define MT8173_MUTEX_MOD_DISP_OVL1 12
#define MT8173_MUTEX_MOD_DISP_RDMA0 13
@@ -324,6 +333,17 @@ static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_RDMA1] = MT8186_MUTEX_MOD_DISP_RDMA1,
};
+static const unsigned int mt8186_mdp_mutex_table_mod[MUTEX_MOD_IDX_MAX] = {
+ [MUTEX_MOD_IDX_MDP_RDMA0] = MT8186_MUTEX_MOD_MDP_RDMA0,
+ [MUTEX_MOD_IDX_MDP_RSZ0] = MT8186_MUTEX_MOD_MDP_RSZ0,
+ [MUTEX_MOD_IDX_MDP_RSZ1] = MT8186_MUTEX_MOD_MDP_RSZ1,
+ [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8186_MUTEX_MOD_MDP_TDSHP0,
+ [MUTEX_MOD_IDX_MDP_WROT0] = MT8186_MUTEX_MOD_MDP_WROT0,
+ [MUTEX_MOD_IDX_MDP_HDR0] = MT8186_MUTEX_MOD_MDP_HDR0,
+ [MUTEX_MOD_IDX_MDP_AAL0] = MT8186_MUTEX_MOD_MDP_AAL0,
+ [MUTEX_MOD_IDX_MDP_COLOR0] = MT8186_MUTEX_MOD_MDP_COLOR0,
+};
+
static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
[DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
@@ -380,6 +400,13 @@ static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3,
};
+static const unsigned int mt6795_mutex_sof[DDP_MUTEX_SOF_MAX] = {
+ [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
+ [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
+ [MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1,
+ [MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0,
+};
+
static const unsigned int mt8167_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -434,6 +461,13 @@ static const struct mtk_mutex_data mt2712_mutex_driver_data = {
.mutex_sof_reg = MT2701_MUTEX0_SOF0,
};
+static const struct mtk_mutex_data mt6795_mutex_driver_data = {
+ .mutex_mod = mt8173_mutex_mod,
+ .mutex_sof = mt6795_mutex_sof,
+ .mutex_mod_reg = MT2701_MUTEX0_MOD0,
+ .mutex_sof_reg = MT2701_MUTEX0_SOF0,
+};
+
static const struct mtk_mutex_data mt8167_mutex_driver_data = {
.mutex_mod = mt8167_mutex_mod,
.mutex_sof = mt8167_mutex_sof,
@@ -458,6 +492,12 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.no_clk = true,
};
+static const struct mtk_mutex_data mt8186_mdp_mutex_driver_data = {
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+ .mutex_table_mod = mt8186_mdp_mutex_table_mod,
+};
+
static const struct mtk_mutex_data mt8186_mutex_driver_data = {
.mutex_mod = mt8186_mutex_mod,
.mutex_sof = mt8186_mutex_sof,
@@ -802,6 +842,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt2701_mutex_driver_data},
{ .compatible = "mediatek,mt2712-disp-mutex",
.data = &mt2712_mutex_driver_data},
+ { .compatible = "mediatek,mt6795-disp-mutex",
+ .data = &mt6795_mutex_driver_data},
{ .compatible = "mediatek,mt8167-disp-mutex",
.data = &mt8167_mutex_driver_data},
{ .compatible = "mediatek,mt8173-disp-mutex",
@@ -810,6 +852,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8183_mutex_driver_data},
{ .compatible = "mediatek,mt8186-disp-mutex",
.data = &mt8186_mutex_driver_data},
+ { .compatible = "mediatek,mt8186-mdp3-mutex",
+ .data = &mt8186_mdp_mutex_driver_data},
{ .compatible = "mediatek,mt8192-disp-mutex",
.data = &mt8192_mutex_driver_data},
{ .compatible = "mediatek,mt8195-disp-mutex",
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
index 9734f1091c69..09e3c38b8466 100644
--- a/drivers/soc/mediatek/mtk-pm-domains.c
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -393,7 +393,7 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
- "%pOF: failed to get clk at index %d: %d\n", node, i, ret);
+ "%pOF: failed to get clk at index %d\n", node, i);
goto err_put_clocks;
}
@@ -405,8 +405,8 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(scpsys->dev, ret,
- "%pOF: failed to get clk at index %d: %d\n", node,
- i + clk_ind, ret);
+ "%pOF: failed to get clk at index %d\n", node,
+ i + clk_ind);
goto err_put_subsys_clocks;
}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index d8cb0f833645..eb82ae06697f 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -2316,7 +2316,7 @@ err_out1:
static struct platform_driver pwrap_drv = {
.driver = {
.name = "mt-pmic-pwrap",
- .of_match_table = of_match_ptr(of_pwrap_match_tbl),
+ .of_match_table = of_pwrap_match_tbl,
},
.probe = pwrap_probe,
};
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index ca75b14931ec..7a668888111c 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -1141,7 +1141,7 @@ static struct platform_driver scpsys_drv = {
.name = "mtk-scpsys",
.suppress_bind_attrs = true,
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(of_scpsys_match_tbl),
+ .of_match_table = of_scpsys_match_tbl,
},
};
builtin_platform_driver(scpsys_drv);
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index dee8664a12fd..0469c9dfeb04 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022 MediaTek Inc.
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
@@ -53,22 +54,79 @@
#define SVSB_MON_VOLT_IGNORE BIT(16)
#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24)
-/* svs bank register common configuration */
-#define SVSB_DET_MAX 0xffff
+/* svs bank register fields and common configuration */
+#define SVSB_PTPCONFIG_DETMAX GENMASK(15, 0)
+#define SVSB_DET_MAX FIELD_PREP(SVSB_PTPCONFIG_DETMAX, 0xffff)
#define SVSB_DET_WINDOW 0xa28
-#define SVSB_DTHI 0x1
-#define SVSB_DTLO 0xfe
-#define SVSB_EN_INIT01 0x1
-#define SVSB_EN_INIT02 0x5
-#define SVSB_EN_MON 0x2
-#define SVSB_EN_OFF 0x0
-#define SVSB_INTEN_INIT0x 0x00005f01
-#define SVSB_INTEN_MONVOPEN 0x00ff0000
-#define SVSB_INTSTS_CLEAN 0x00ffffff
-#define SVSB_INTSTS_COMPLETE 0x1
-#define SVSB_INTSTS_MONVOP 0x00ff0000
+
+/* DESCHAR */
+#define SVSB_DESCHAR_FLD_MDES GENMASK(7, 0)
+#define SVSB_DESCHAR_FLD_BDES GENMASK(15, 8)
+
+/* TEMPCHAR */
+#define SVSB_TEMPCHAR_FLD_DVT_FIXED GENMASK(7, 0)
+#define SVSB_TEMPCHAR_FLD_MTDES GENMASK(15, 8)
+#define SVSB_TEMPCHAR_FLD_VCO GENMASK(23, 16)
+
+/* DETCHAR */
+#define SVSB_DETCHAR_FLD_DCMDET GENMASK(7, 0)
+#define SVSB_DETCHAR_FLD_DCBDET GENMASK(15, 8)
+
+/* SVSEN (PTPEN) */
+#define SVSB_PTPEN_INIT01 BIT(0)
+#define SVSB_PTPEN_MON BIT(1)
+#define SVSB_PTPEN_INIT02 (SVSB_PTPEN_INIT01 | BIT(2))
+#define SVSB_PTPEN_OFF 0x0
+
+/* FREQPCTS */
+#define SVSB_FREQPCTS_FLD_PCT0_4 GENMASK(7, 0)
+#define SVSB_FREQPCTS_FLD_PCT1_5 GENMASK(15, 8)
+#define SVSB_FREQPCTS_FLD_PCT2_6 GENMASK(23, 16)
+#define SVSB_FREQPCTS_FLD_PCT3_7 GENMASK(31, 24)
+
+/* INTSTS */
+#define SVSB_INTSTS_VAL_CLEAN 0x00ffffff
+#define SVSB_INTSTS_F0_COMPLETE BIT(0)
+#define SVSB_INTSTS_FLD_MONVOP GENMASK(23, 16)
#define SVSB_RUNCONFIG_DEFAULT 0x80000000
+/* LIMITVALS */
+#define SVSB_LIMITVALS_FLD_DTLO GENMASK(7, 0)
+#define SVSB_LIMITVALS_FLD_DTHI GENMASK(15, 8)
+#define SVSB_LIMITVALS_FLD_VMIN GENMASK(23, 16)
+#define SVSB_LIMITVALS_FLD_VMAX GENMASK(31, 24)
+#define SVSB_VAL_DTHI 0x1
+#define SVSB_VAL_DTLO 0xfe
+
+/* INTEN */
+#define SVSB_INTEN_F0EN BIT(0)
+#define SVSB_INTEN_DACK0UPEN BIT(8)
+#define SVSB_INTEN_DC0EN BIT(9)
+#define SVSB_INTEN_DC1EN BIT(10)
+#define SVSB_INTEN_DACK0LOEN BIT(11)
+#define SVSB_INTEN_INITPROD_OVF_EN BIT(12)
+#define SVSB_INTEN_INITSUM_OVF_EN BIT(14)
+#define SVSB_INTEN_MONVOPEN GENMASK(23, 16)
+#define SVSB_INTEN_INIT0x (SVSB_INTEN_F0EN | SVSB_INTEN_DACK0UPEN | \
+ SVSB_INTEN_DC0EN | SVSB_INTEN_DC1EN | \
+ SVSB_INTEN_DACK0LOEN | \
+ SVSB_INTEN_INITPROD_OVF_EN | \
+ SVSB_INTEN_INITSUM_OVF_EN)
+
+/* TSCALCS */
+#define SVSB_TSCALCS_FLD_MTS GENMASK(11, 0)
+#define SVSB_TSCALCS_FLD_BTS GENMASK(23, 12)
+
+/* INIT2VALS */
+#define SVSB_INIT2VALS_FLD_DCVOFFSETIN GENMASK(15, 0)
+#define SVSB_INIT2VALS_FLD_AGEVOFFSETIN GENMASK(31, 16)
+
+/* VOPS */
+#define SVSB_VOPS_FLD_VOP0_4 GENMASK(7, 0)
+#define SVSB_VOPS_FLD_VOP1_5 GENMASK(15, 8)
+#define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16)
+#define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24)
+
/* svs bank related setting */
#define BITS8 8
#define MAX_OPP_ENTRIES 16
@@ -262,7 +320,6 @@ static const u32 svs_regs_v2[] = {
* @rst: svs platform reset control
* @efuse_parsing: svs platform efuse parsing function pointer
* @probe: svs platform probe function pointer
- * @irqflags: svs platform irq settings flags
* @efuse_max: total number of svs efuse
* @tefuse_max: total number of thermal efuse
* @regs: svs platform registers map
@@ -280,7 +337,6 @@ struct svs_platform {
struct reset_control *rst;
bool (*efuse_parsing)(struct svs_platform *svsp);
int (*probe)(struct svs_platform *svsp);
- unsigned long irqflags;
size_t efuse_max;
size_t tefuse_max;
const u32 *regs;
@@ -294,7 +350,6 @@ struct svs_platform_data {
struct svs_bank *banks;
bool (*efuse_parsing)(struct svs_platform *svsp);
int (*probe)(struct svs_platform *svsp);
- unsigned long irqflags;
const u32 *regs;
u32 bank_max;
};
@@ -668,8 +723,8 @@ static ssize_t svs_enable_debug_write(struct file *filp,
svsp->pbank = svsb;
svsb->mode_support = SVSB_MODE_ALL_DISABLE;
svs_switch_bank(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
svsb->phase = SVSB_PHASE_ERROR;
@@ -830,7 +885,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
} else if (svsb->type == SVSB_LOW) {
/* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */
j = svsb->opp_count - 7;
- svsb->volt[turn_pt] = vop30 & GENMASK(7, 0);
+ svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
for (i = j; i < svsb->opp_count; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -852,7 +907,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp)
if (svsb->type == SVSB_HIGH) {
/* volt[0] + volt[j] ~ volt[turn_pt - 1] */
j = turn_pt - 7;
- svsb->volt[0] = vop30 & GENMASK(7, 0);
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30);
shift_byte++;
for (i = j; i < turn_pt; i++) {
b_sft = BITS8 * (shift_byte % REG_BYTES);
@@ -983,16 +1038,16 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
u32 temp, i;
temp = svs_readl_relaxed(svsp, VOP74);
- svsb->volt[14] = (temp >> 24) & GENMASK(7, 0);
- svsb->volt[12] = (temp >> 16) & GENMASK(7, 0);
- svsb->volt[10] = (temp >> 8) & GENMASK(7, 0);
- svsb->volt[8] = (temp & GENMASK(7, 0));
+ svsb->volt[14] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[12] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[10] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[8] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
temp = svs_readl_relaxed(svsp, VOP30);
- svsb->volt[6] = (temp >> 24) & GENMASK(7, 0);
- svsb->volt[4] = (temp >> 16) & GENMASK(7, 0);
- svsb->volt[2] = (temp >> 8) & GENMASK(7, 0);
- svsb->volt[0] = (temp & GENMASK(7, 0));
+ svsb->volt[6] = FIELD_GET(SVSB_VOPS_FLD_VOP3_7, temp);
+ svsb->volt[4] = FIELD_GET(SVSB_VOPS_FLD_VOP2_6, temp);
+ svsb->volt[2] = FIELD_GET(SVSB_VOPS_FLD_VOP1_5, temp);
+ svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, temp);
for (i = 0; i <= 12; i += 2)
svsb->volt[i + 1] = interpolate(svsb->freq_pct[i],
@@ -1014,20 +1069,20 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp)
static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp)
{
struct svs_bank *svsb = svsp->pbank;
+ u32 freqpct74_val, freqpct30_val;
+
+ freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[10]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[12]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[14]);
- svs_writel_relaxed(svsp,
- (svsb->freq_pct[14] << 24) |
- (svsb->freq_pct[12] << 16) |
- (svsb->freq_pct[10] << 8) |
- svsb->freq_pct[8],
- FREQPCT74);
-
- svs_writel_relaxed(svsp,
- (svsb->freq_pct[6] << 24) |
- (svsb->freq_pct[4] << 16) |
- (svsb->freq_pct[2] << 8) |
- svsb->freq_pct[0],
- FREQPCT30);
+ freqpct30_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[0]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT1_5, svsb->freq_pct[2]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT2_6, svsb->freq_pct[4]) |
+ FIELD_PREP(SVSB_FREQPCTS_FLD_PCT3_7, svsb->freq_pct[6]);
+
+ svs_writel_relaxed(svsp, freqpct74_val, FREQPCT74);
+ svs_writel_relaxed(svsp, freqpct30_val, FREQPCT30);
}
static void svs_set_bank_phase(struct svs_platform *svsp,
@@ -1038,13 +1093,17 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svs_switch_bank(svsp);
- des_char = (svsb->bdes << 8) | svsb->mdes;
+ des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) |
+ FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes);
svs_writel_relaxed(svsp, des_char, DESCHAR);
- temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed;
+ temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) |
+ FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed);
svs_writel_relaxed(svsp, temp_char, TEMPCHAR);
- det_char = (svsb->dcbdet << 8) | svsb->dcmdet;
+ det_char = FIELD_PREP(SVSB_DETCHAR_FLD_DCBDET, svsb->dcbdet) |
+ FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet);
svs_writel_relaxed(svsp, det_char, DETCHAR);
svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG);
@@ -1053,33 +1112,37 @@ static void svs_set_bank_phase(struct svs_platform *svsp,
svsb->set_freq_pct(svsp);
- limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) |
- (SVSB_DTHI << 8) | SVSB_DTLO;
+ limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMIN, svsb->vmin) |
+ FIELD_PREP(SVSB_LIMITVALS_FLD_VMAX, svsb->vmax);
svs_writel_relaxed(svsp, limit_vals, LIMITVALS);
svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW);
svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG);
svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT);
svs_writel_relaxed(svsp, svsb->ctl0, CTL0);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
switch (target_phase) {
case SVSB_PHASE_INIT01:
svs_writel_relaxed(svsp, svsb->vboot, VBOOT);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
- svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN);
break;
case SVSB_PHASE_INIT02:
+ init2vals = FIELD_PREP(SVSB_INIT2VALS_FLD_AGEVOFFSETIN, svsb->age_voffset_in) |
+ FIELD_PREP(SVSB_INIT2VALS_FLD_DCVOFFSETIN, svsb->dc_voffset_in);
svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN);
- init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in;
svs_writel_relaxed(svsp, init2vals, INIT2VALS);
- svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_INIT02, SVSEN);
break;
case SVSB_PHASE_MON:
- ts_calcs = (svsb->bts << 12) | svsb->mts;
+ ts_calcs = FIELD_PREP(SVSB_TSCALCS_FLD_BTS, svsb->bts) |
+ FIELD_PREP(SVSB_TSCALCS_FLD_MTS, svsb->mts);
svs_writel_relaxed(svsp, ts_calcs, TSCALCS);
svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN);
- svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_MON, SVSEN);
break;
default:
dev_err(svsb->dev, "requested unknown target phase: %u\n",
@@ -1115,8 +1178,8 @@ static inline void svs_error_isr_handler(struct svs_platform *svsp)
svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR);
svsb->phase = SVSB_PHASE_ERROR;
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
}
static inline void svs_init01_isr_handler(struct svs_platform *svsp)
@@ -1141,8 +1204,8 @@ static inline void svs_init01_isr_handler(struct svs_platform *svsp)
svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) &
GENMASK(15, 0);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
svsb->core_sel &= ~SVSB_DET_CLK_EN;
}
@@ -1160,8 +1223,8 @@ static inline void svs_init02_isr_handler(struct svs_platform *svsp)
svsb->phase = SVSB_PHASE_INIT02;
svsb->get_volts(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS);
}
static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
@@ -1174,7 +1237,7 @@ static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp)
svsb->get_volts(svsp);
svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0);
- svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS);
}
static irqreturn_t svs_isr(int irq, void *data)
@@ -1201,13 +1264,13 @@ static irqreturn_t svs_isr(int irq, void *data)
int_sts = svs_readl_relaxed(svsp, INTSTS);
svs_en = svs_readl_relaxed(svsp, SVSEN);
- if (int_sts == SVSB_INTSTS_COMPLETE &&
- svs_en == SVSB_EN_INIT01)
+ if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT01)
svs_init01_isr_handler(svsp);
- else if (int_sts == SVSB_INTSTS_COMPLETE &&
- svs_en == SVSB_EN_INIT02)
+ else if (int_sts == SVSB_INTSTS_F0_COMPLETE &&
+ svs_en == SVSB_PTPEN_INIT02)
svs_init02_isr_handler(svsp);
- else if (int_sts & SVSB_INTSTS_MONVOP)
+ else if (int_sts & SVSB_INTSTS_FLD_MONVOP)
svs_mon_mode_isr_handler(svsp);
else
svs_error_isr_handler(svsp);
@@ -1493,8 +1556,8 @@ static int svs_suspend(struct device *dev)
spin_lock_irqsave(&svs_lock, flags);
svsp->pbank = svsb;
svs_switch_bank(svsp);
- svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN);
- svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS);
+ svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
+ svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
spin_unlock_irqrestore(&svs_lock, flags);
svsb->phase = SVSB_PHASE_ERROR;
@@ -1589,7 +1652,7 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
dev_set_drvdata(svsb->dev, svsp);
- ret = dev_pm_opp_of_add_table(svsb->opp_dev);
+ ret = devm_pm_opp_of_add_table(svsb->opp_dev);
if (ret) {
dev_err(svsb->dev, "add opp table fail: %d\n", ret);
return ret;
@@ -1644,11 +1707,36 @@ static int svs_bank_resource_setup(struct svs_platform *svsp)
return 0;
}
+static int svs_thermal_efuse_get_data(struct svs_platform *svsp)
+{
+ struct nvmem_cell *cell;
+
+ /* Thermal efuse parsing */
+ cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
+ if (IS_ERR_OR_NULL(cell)) {
+ dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", PTR_ERR(cell));
+ return PTR_ERR(cell);
+ }
+
+ svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
+ if (IS_ERR(svsp->tefuse)) {
+ dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
+ PTR_ERR(svsp->tefuse));
+ nvmem_cell_put(cell);
+ return PTR_ERR(svsp->tefuse);
+ }
+
+ svsp->tefuse_max /= sizeof(u32);
+ nvmem_cell_put(cell);
+
+ return 0;
+}
+
static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
{
struct svs_bank *svsb;
- struct nvmem_cell *cell;
u32 idx, i, vmin, golden_temp;
+ int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1686,24 +1774,9 @@ static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
svsb->vmax += svsb->dvt_fixed;
}
- /* Thermal efuse parsing */
- cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
- if (IS_ERR_OR_NULL(cell)) {
- dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
- PTR_ERR(cell));
- return false;
- }
-
- svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
- if (IS_ERR(svsp->tefuse)) {
- dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
- PTR_ERR(svsp->tefuse));
- nvmem_cell_put(cell);
+ ret = svs_thermal_efuse_get_data(svsp);
+ if (ret)
return false;
- }
-
- svsp->tefuse_max /= sizeof(u32);
- nvmem_cell_put(cell);
for (i = 0; i < svsp->tefuse_max; i++)
if (svsp->tefuse[i] != 0)
@@ -1726,11 +1799,11 @@ static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp)
static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
{
struct svs_bank *svsb;
- struct nvmem_cell *cell;
int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0;
int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t;
int o_slope, o_slope_sign, ts_id;
u32 idx, i, ft_pgm, mts, temp0, temp1, temp2;
+ int ret;
for (i = 0; i < svsp->efuse_max; i++)
if (svsp->efuse[i])
@@ -1806,24 +1879,9 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp)
}
}
- /* Get thermal efuse by nvmem */
- cell = nvmem_cell_get(svsp->dev, "t-calibration-data");
- if (IS_ERR(cell)) {
- dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n",
- PTR_ERR(cell));
- goto remove_mt8183_svsb_mon_mode;
- }
-
- svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max);
- if (IS_ERR(svsp->tefuse)) {
- dev_err(svsp->dev, "cannot read thermal efuse: %ld\n",
- PTR_ERR(svsp->tefuse));
- nvmem_cell_put(cell);
- goto remove_mt8183_svsb_mon_mode;
- }
-
- svsp->tefuse_max /= sizeof(u32);
- nvmem_cell_put(cell);
+ ret = svs_thermal_efuse_get_data(svsp);
+ if (ret)
+ return false;
/* Thermal efuse parsing */
adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0);
@@ -2244,7 +2302,6 @@ static const struct svs_platform_data svs_mt8192_platform_data = {
.banks = svs_mt8192_banks,
.efuse_parsing = svs_mt8192_efuse_parsing,
.probe = svs_mt8192_platform_probe,
- .irqflags = IRQF_TRIGGER_HIGH,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8192_banks),
};
@@ -2254,7 +2311,6 @@ static const struct svs_platform_data svs_mt8183_platform_data = {
.banks = svs_mt8183_banks,
.efuse_parsing = svs_mt8183_efuse_parsing,
.probe = svs_mt8183_platform_probe,
- .irqflags = IRQF_TRIGGER_LOW,
.regs = svs_regs_v2,
.bank_max = ARRAY_SIZE(svs_mt8183_banks),
};
@@ -2292,7 +2348,6 @@ static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
svsp->banks = svsp_data->banks;
svsp->efuse_parsing = svsp_data->efuse_parsing;
svsp->probe = svsp_data->probe;
- svsp->irqflags = svsp_data->irqflags;
svsp->regs = svsp_data->regs;
svsp->bank_max = svsp_data->bank_max;
@@ -2306,8 +2361,7 @@ static struct svs_platform *svs_platform_probe(struct platform_device *pdev)
static int svs_probe(struct platform_device *pdev)
{
struct svs_platform *svsp;
- unsigned int svsp_irq;
- int ret;
+ int svsp_irq, ret;
svsp = svs_platform_probe(pdev);
if (IS_ERR(svsp))
@@ -2325,10 +2379,14 @@ static int svs_probe(struct platform_device *pdev)
goto svs_probe_free_resource;
}
- svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0);
+ svsp_irq = platform_get_irq(pdev, 0);
+ if (svsp_irq < 0) {
+ ret = svsp_irq;
+ goto svs_probe_free_resource;
+ }
+
ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
- svsp->irqflags | IRQF_ONESHOT,
- svsp->name, svsp);
+ IRQF_ONESHOT, svsp->name, svsp);
if (ret) {
dev_err(svsp->dev, "register irq(%d) failed: %d\n",
svsp_irq, ret);
@@ -2392,7 +2450,7 @@ static struct platform_driver svs_driver = {
.driver = {
.name = "mtk-svs",
.pm = &svs_pm_ops,
- .of_match_table = of_match_ptr(svs_of_match),
+ .of_match_table = svs_of_match,
},
};
diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c
index 563440315acd..93449fb3519e 100644
--- a/drivers/soc/pxa/ssp.c
+++ b/drivers/soc/pxa/ssp.c
@@ -180,11 +180,7 @@ static int pxa_ssp_probe(struct platform_device *pdev)
static int pxa_ssp_remove(struct platform_device *pdev)
{
- struct ssp_device *ssp;
-
- ssp = platform_get_drvdata(pdev);
- if (ssp == NULL)
- return -ENODEV;
+ struct ssp_device *ssp = platform_get_drvdata(pdev);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index e0d7a5459562..024e420f1bb7 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -129,7 +129,7 @@ config QCOM_RPMHPD
config QCOM_RPMPD
tristate "Qualcomm RPM Power domain driver"
- depends on PM
+ depends on PM && OF
depends on QCOM_SMD_RPM
select PM_GENERIC_DOMAINS
select PM_GENERIC_DOMAINS_OF
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 7f8aca533cd3..d07be3700db6 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -5,6 +5,8 @@
* Author: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>, based on
* previous work of Thara Gopinath and msm-4.9 downstream sources.
*/
+
+#include <linux/err.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -13,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regmap.h>
#include <linux/sizes.h>
/*
@@ -31,33 +34,44 @@
/* Internal sampling clock frequency */
#define HW_TIMER_HZ 19200000
-#define BWMON_GLOBAL_IRQ_STATUS 0x0
-#define BWMON_GLOBAL_IRQ_CLEAR 0x8
-#define BWMON_GLOBAL_IRQ_ENABLE 0xc
-#define BWMON_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
-
-#define BWMON_IRQ_STATUS 0x100
-#define BWMON_IRQ_STATUS_ZONE_SHIFT 4
-#define BWMON_IRQ_CLEAR 0x108
-#define BWMON_IRQ_ENABLE 0x10c
-#define BWMON_IRQ_ENABLE_ZONE1_SHIFT 5
-#define BWMON_IRQ_ENABLE_ZONE2_SHIFT 6
-#define BWMON_IRQ_ENABLE_ZONE3_SHIFT 7
-#define BWMON_IRQ_ENABLE_MASK (BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT) | \
- BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT))
-
-#define BWMON_ENABLE 0x2a0
+#define BWMON_V4_GLOBAL_IRQ_CLEAR 0x008
+#define BWMON_V4_GLOBAL_IRQ_ENABLE 0x00c
+/*
+ * All values here and further are matching regmap fields, so without absolute
+ * register offsets.
+ */
+#define BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE BIT(0)
+
+#define BWMON_V4_IRQ_STATUS 0x100
+#define BWMON_V4_IRQ_CLEAR 0x108
+
+#define BWMON_V4_IRQ_ENABLE 0x10c
+#define BWMON_IRQ_ENABLE_MASK (BIT(1) | BIT(3))
+#define BWMON_V5_IRQ_STATUS 0x000
+#define BWMON_V5_IRQ_CLEAR 0x008
+#define BWMON_V5_IRQ_ENABLE 0x00c
+
+#define BWMON_V4_ENABLE 0x2a0
+#define BWMON_V5_ENABLE 0x010
#define BWMON_ENABLE_ENABLE BIT(0)
-#define BWMON_CLEAR 0x2a4
+#define BWMON_V4_CLEAR 0x2a4
+#define BWMON_V5_CLEAR 0x014
#define BWMON_CLEAR_CLEAR BIT(0)
+#define BWMON_CLEAR_CLEAR_ALL BIT(1)
-#define BWMON_SAMPLE_WINDOW 0x2a8
-#define BWMON_THRESHOLD_HIGH 0x2ac
-#define BWMON_THRESHOLD_MED 0x2b0
-#define BWMON_THRESHOLD_LOW 0x2b4
+#define BWMON_V4_SAMPLE_WINDOW 0x2a8
+#define BWMON_V5_SAMPLE_WINDOW 0x020
-#define BWMON_ZONE_ACTIONS 0x2b8
+#define BWMON_V4_THRESHOLD_HIGH 0x2ac
+#define BWMON_V4_THRESHOLD_MED 0x2b0
+#define BWMON_V4_THRESHOLD_LOW 0x2b4
+#define BWMON_V5_THRESHOLD_HIGH 0x024
+#define BWMON_V5_THRESHOLD_MED 0x028
+#define BWMON_V5_THRESHOLD_LOW 0x02c
+
+#define BWMON_V4_ZONE_ACTIONS 0x2b8
+#define BWMON_V5_ZONE_ACTIONS 0x030
/*
* Actions to perform on some zone 'z' when current zone hits the threshold:
* Increment counter of zone 'z'
@@ -83,55 +97,244 @@
BWMON_ZONE_ACTIONS_CLEAR(2) | \
BWMON_ZONE_ACTIONS_CLEAR(1) | \
BWMON_ZONE_ACTIONS_CLEAR(0))
-/* Value for BWMON_ZONE_ACTIONS */
-#define BWMON_ZONE_ACTIONS_DEFAULT (BWMON_ZONE_ACTIONS_ZONE0 | \
- BWMON_ZONE_ACTIONS_ZONE1 << 8 | \
- BWMON_ZONE_ACTIONS_ZONE2 << 16 | \
- BWMON_ZONE_ACTIONS_ZONE3 << 24)
/*
- * There is no clear documentation/explanation of BWMON_THRESHOLD_COUNT
+ * There is no clear documentation/explanation of BWMON_V4_THRESHOLD_COUNT
* register. Based on observations, this is number of times one threshold has to
* be reached, to trigger interrupt in given zone.
*
* 0xff are maximum values meant to ignore the zones 0 and 2.
*/
-#define BWMON_THRESHOLD_COUNT 0x2bc
-#define BWMON_THRESHOLD_COUNT_ZONE1_SHIFT 8
-#define BWMON_THRESHOLD_COUNT_ZONE2_SHIFT 16
-#define BWMON_THRESHOLD_COUNT_ZONE3_SHIFT 24
+#define BWMON_V4_THRESHOLD_COUNT 0x2bc
+#define BWMON_V5_THRESHOLD_COUNT 0x034
#define BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT 0xff
#define BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT 0xff
-/* BWMONv4 count registers use count unit of 64 kB */
-#define BWMON_COUNT_UNIT_KB 64
-#define BWMON_ZONE_COUNT 0x2d8
-#define BWMON_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
+#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone))
+
+/* Quirks for specific BWMON types */
+#define BWMON_HAS_GLOBAL_IRQ BIT(0)
+#define BWMON_NEEDS_FORCE_CLEAR BIT(1)
+
+enum bwmon_fields {
+ F_GLOBAL_IRQ_CLEAR,
+ F_GLOBAL_IRQ_ENABLE,
+ F_IRQ_STATUS,
+ F_IRQ_CLEAR,
+ F_IRQ_ENABLE,
+ F_ENABLE,
+ F_CLEAR,
+ F_SAMPLE_WINDOW,
+ F_THRESHOLD_HIGH,
+ F_THRESHOLD_MED,
+ F_THRESHOLD_LOW,
+ F_ZONE_ACTIONS_ZONE0,
+ F_ZONE_ACTIONS_ZONE1,
+ F_ZONE_ACTIONS_ZONE2,
+ F_ZONE_ACTIONS_ZONE3,
+ F_THRESHOLD_COUNT_ZONE0,
+ F_THRESHOLD_COUNT_ZONE1,
+ F_THRESHOLD_COUNT_ZONE2,
+ F_THRESHOLD_COUNT_ZONE3,
+ F_ZONE0_MAX,
+ F_ZONE1_MAX,
+ F_ZONE2_MAX,
+ F_ZONE3_MAX,
+
+ F_NUM_FIELDS
+};
struct icc_bwmon_data {
unsigned int sample_ms;
+ unsigned int count_unit_kb; /* kbytes */
unsigned int default_highbw_kbps;
unsigned int default_medbw_kbps;
unsigned int default_lowbw_kbps;
u8 zone1_thres_count;
u8 zone3_thres_count;
+ unsigned int quirks;
+
+ const struct regmap_config *regmap_cfg;
+ const struct reg_field *regmap_fields;
};
struct icc_bwmon {
struct device *dev;
- void __iomem *base;
+ const struct icc_bwmon_data *data;
int irq;
- unsigned int default_lowbw_kbps;
- unsigned int sample_ms;
+ struct regmap *regmap;
+ struct regmap_field *regs[F_NUM_FIELDS];
+
unsigned int max_bw_kbps;
unsigned int min_bw_kbps;
unsigned int target_kbps;
unsigned int current_kbps;
};
-static void bwmon_clear_counters(struct icc_bwmon *bwmon)
+/* BWMON v4 */
+static const struct reg_field msm8998_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_CLEAR, 0, 0),
+ [F_GLOBAL_IRQ_ENABLE] = REG_FIELD(BWMON_V4_GLOBAL_IRQ_ENABLE, 0, 0),
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V4_IRQ_STATUS, 4, 7),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V4_IRQ_CLEAR, 4, 7),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V4_IRQ_ENABLE, 4, 7),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V4_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V4_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V4_SAMPLE_WINDOW, 0, 23),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V4_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V4_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V4_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V4_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V4_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V4_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V4_GLOBAL_IRQ_CLEAR, BWMON_V4_GLOBAL_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_IRQ_CLEAR, BWMON_V4_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V4_CLEAR, BWMON_V4_CLEAR),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_read_table = {
+ .no_ranges = msm8998_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(msm8998_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range msm8998_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V4_IRQ_STATUS, BWMON_V4_IRQ_STATUS),
+ regmap_reg_range(BWMON_V4_ZONE_MAX(0), BWMON_V4_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table msm8998_bwmon_reg_volatile_table = {
+ .yes_ranges = msm8998_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(msm8998_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default msm8998_bwmon_reg_defaults[] = {
+ { BWMON_V4_GLOBAL_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_IRQ_CLEAR, 0x0 },
+ { BWMON_V4_CLEAR, 0x0 },
+};
+
+static const struct regmap_config msm8998_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &msm8998_bwmon_reg_read_table,
+ .volatile_table = &msm8998_bwmon_reg_volatile_table,
+ .reg_defaults = msm8998_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(msm8998_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/* BWMON v5 */
+static const struct reg_field sdm845_llcc_bwmon_reg_fields[] = {
+ [F_GLOBAL_IRQ_CLEAR] = {},
+ [F_GLOBAL_IRQ_ENABLE] = {},
+ [F_IRQ_STATUS] = REG_FIELD(BWMON_V5_IRQ_STATUS, 0, 3),
+ [F_IRQ_CLEAR] = REG_FIELD(BWMON_V5_IRQ_CLEAR, 0, 3),
+ [F_IRQ_ENABLE] = REG_FIELD(BWMON_V5_IRQ_ENABLE, 0, 3),
+ /* F_ENABLE covers entire register to disable other features */
+ [F_ENABLE] = REG_FIELD(BWMON_V5_ENABLE, 0, 31),
+ [F_CLEAR] = REG_FIELD(BWMON_V5_CLEAR, 0, 1),
+ [F_SAMPLE_WINDOW] = REG_FIELD(BWMON_V5_SAMPLE_WINDOW, 0, 19),
+ [F_THRESHOLD_HIGH] = REG_FIELD(BWMON_V5_THRESHOLD_HIGH, 0, 11),
+ [F_THRESHOLD_MED] = REG_FIELD(BWMON_V5_THRESHOLD_MED, 0, 11),
+ [F_THRESHOLD_LOW] = REG_FIELD(BWMON_V5_THRESHOLD_LOW, 0, 11),
+ [F_ZONE_ACTIONS_ZONE0] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 0, 7),
+ [F_ZONE_ACTIONS_ZONE1] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 8, 15),
+ [F_ZONE_ACTIONS_ZONE2] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 16, 23),
+ [F_ZONE_ACTIONS_ZONE3] = REG_FIELD(BWMON_V5_ZONE_ACTIONS, 24, 31),
+ [F_THRESHOLD_COUNT_ZONE0] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 0, 7),
+ [F_THRESHOLD_COUNT_ZONE1] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 8, 15),
+ [F_THRESHOLD_COUNT_ZONE2] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 16, 23),
+ [F_THRESHOLD_COUNT_ZONE3] = REG_FIELD(BWMON_V5_THRESHOLD_COUNT, 24, 31),
+ [F_ZONE0_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(0), 0, 11),
+ [F_ZONE1_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(1), 0, 11),
+ [F_ZONE2_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(2), 0, 11),
+ [F_ZONE3_MAX] = REG_FIELD(BWMON_V5_ZONE_MAX(3), 0, 11),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_noread_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_CLEAR, BWMON_V5_IRQ_CLEAR),
+ regmap_reg_range(BWMON_V5_CLEAR, BWMON_V5_CLEAR),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_read_table = {
+ .no_ranges = sdm845_llcc_bwmon_reg_noread_ranges,
+ .n_no_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_noread_ranges),
+};
+
+static const struct regmap_range sdm845_llcc_bwmon_reg_volatile_ranges[] = {
+ regmap_reg_range(BWMON_V5_IRQ_STATUS, BWMON_V5_IRQ_STATUS),
+ regmap_reg_range(BWMON_V5_ZONE_MAX(0), BWMON_V5_ZONE_MAX(3)),
+};
+
+static const struct regmap_access_table sdm845_llcc_bwmon_reg_volatile_table = {
+ .yes_ranges = sdm845_llcc_bwmon_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sdm845_llcc_bwmon_reg_volatile_ranges),
+};
+
+/*
+ * Fill the cache for non-readable registers only as rest does not really
+ * matter and can be read from the device.
+ */
+static const struct reg_default sdm845_llcc_bwmon_reg_defaults[] = {
+ { BWMON_V5_IRQ_CLEAR, 0x0 },
+ { BWMON_V5_CLEAR, 0x0 },
+};
+
+static const struct regmap_config sdm845_llcc_bwmon_regmap_cfg = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ /*
+ * No concurrent access expected - driver has one interrupt handler,
+ * regmap is not shared, no driver or user-space API.
+ */
+ .disable_locking = true,
+ .rd_table = &sdm845_llcc_bwmon_reg_read_table,
+ .volatile_table = &sdm845_llcc_bwmon_reg_volatile_table,
+ .reg_defaults = sdm845_llcc_bwmon_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sdm845_llcc_bwmon_reg_defaults),
+ /*
+ * Cache is necessary for using regmap fields with non-readable
+ * registers.
+ */
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void bwmon_clear_counters(struct icc_bwmon *bwmon, bool clear_all)
{
+ unsigned int val = BWMON_CLEAR_CLEAR;
+
+ if (clear_all)
+ val |= BWMON_CLEAR_CLEAR_ALL;
/*
* Clear counters. The order and barriers are
* important. Quoting downstream Qualcomm msm-4.9 tree:
@@ -140,7 +343,9 @@ static void bwmon_clear_counters(struct icc_bwmon *bwmon)
* region. So, we need to make sure the counter clear is completed
* before we try to clear the IRQ or do any other counter operations.
*/
- writel(BWMON_CLEAR_CLEAR, bwmon->base + BWMON_CLEAR);
+ regmap_field_force_write(bwmon->regs[F_CLEAR], val);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_CLEAR], 0);
}
static void bwmon_clear_irq(struct icc_bwmon *bwmon)
@@ -161,76 +366,91 @@ static void bwmon_clear_irq(struct icc_bwmon *bwmon)
* clearing here so that local writes don't happen before the
* interrupt is cleared.
*/
- writel(BWMON_IRQ_ENABLE_MASK, bwmon->base + BWMON_IRQ_CLEAR);
- writel(BIT(0), bwmon->base + BWMON_GLOBAL_IRQ_CLEAR);
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], BWMON_IRQ_ENABLE_MASK);
+ if (bwmon->data->quirks & BWMON_NEEDS_FORCE_CLEAR)
+ regmap_field_force_write(bwmon->regs[F_IRQ_CLEAR], 0);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_force_write(bwmon->regs[F_GLOBAL_IRQ_CLEAR],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
}
static void bwmon_disable(struct icc_bwmon *bwmon)
{
/* Disable interrupts. Strict ordering, see bwmon_clear_irq(). */
- writel(0x0, bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
- writel(0x0, bwmon->base + BWMON_IRQ_ENABLE);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE], 0x0);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], 0x0);
/*
* Disable bwmon. Must happen before bwmon_clear_irq() to avoid spurious
* IRQ.
*/
- writel(0x0, bwmon->base + BWMON_ENABLE);
+ regmap_field_write(bwmon->regs[F_ENABLE], 0x0);
}
static void bwmon_enable(struct icc_bwmon *bwmon, unsigned int irq_enable)
{
/* Enable interrupts */
- writel(BWMON_GLOBAL_IRQ_ENABLE_ENABLE,
- bwmon->base + BWMON_GLOBAL_IRQ_ENABLE);
- writel(irq_enable, bwmon->base + BWMON_IRQ_ENABLE);
+ if (bwmon->data->quirks & BWMON_HAS_GLOBAL_IRQ)
+ regmap_field_write(bwmon->regs[F_GLOBAL_IRQ_ENABLE],
+ BWMON_V4_GLOBAL_IRQ_ENABLE_ENABLE);
+ regmap_field_write(bwmon->regs[F_IRQ_ENABLE], irq_enable);
/* Enable bwmon */
- writel(BWMON_ENABLE_ENABLE, bwmon->base + BWMON_ENABLE);
+ regmap_field_write(bwmon->regs[F_ENABLE], BWMON_ENABLE_ENABLE);
}
-static unsigned int bwmon_kbps_to_count(unsigned int kbps)
+static unsigned int bwmon_kbps_to_count(struct icc_bwmon *bwmon,
+ unsigned int kbps)
{
- return kbps / BWMON_COUNT_UNIT_KB;
+ return kbps / bwmon->data->count_unit_kb;
}
-static void bwmon_set_threshold(struct icc_bwmon *bwmon, unsigned int reg,
- unsigned int kbps)
+static void bwmon_set_threshold(struct icc_bwmon *bwmon,
+ struct regmap_field *reg, unsigned int kbps)
{
unsigned int thres;
- thres = mult_frac(bwmon_kbps_to_count(kbps), bwmon->sample_ms,
- MSEC_PER_SEC);
- writel_relaxed(thres, bwmon->base + reg);
+ thres = mult_frac(bwmon_kbps_to_count(bwmon, kbps),
+ bwmon->data->sample_ms, MSEC_PER_SEC);
+ regmap_field_write(reg, thres);
}
-static void bwmon_start(struct icc_bwmon *bwmon,
- const struct icc_bwmon_data *data)
+static void bwmon_start(struct icc_bwmon *bwmon)
{
- unsigned int thres_count;
+ const struct icc_bwmon_data *data = bwmon->data;
int window;
- bwmon_clear_counters(bwmon);
+ bwmon_clear_counters(bwmon, true);
- window = mult_frac(bwmon->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
- /* Maximum sampling window: 0xfffff */
- writel_relaxed(window, bwmon->base + BWMON_SAMPLE_WINDOW);
+ window = mult_frac(bwmon->data->sample_ms, HW_TIMER_HZ, MSEC_PER_SEC);
+ /* Maximum sampling window: 0xffffff for v4 and 0xfffff for v5 */
+ regmap_field_write(bwmon->regs[F_SAMPLE_WINDOW], window);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
data->default_highbw_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
data->default_medbw_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_LOW,
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_LOW],
data->default_lowbw_kbps);
- thres_count = data->zone3_thres_count << BWMON_THRESHOLD_COUNT_ZONE3_SHIFT |
- BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT << BWMON_THRESHOLD_COUNT_ZONE2_SHIFT |
- data->zone1_thres_count << BWMON_THRESHOLD_COUNT_ZONE1_SHIFT |
- BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT;
- writel_relaxed(thres_count, bwmon->base + BWMON_THRESHOLD_COUNT);
- writel_relaxed(BWMON_ZONE_ACTIONS_DEFAULT,
- bwmon->base + BWMON_ZONE_ACTIONS);
- /* Write barriers in bwmon_clear_irq() */
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE0],
+ BWMON_THRESHOLD_COUNT_ZONE0_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE1],
+ data->zone1_thres_count);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE2],
+ BWMON_THRESHOLD_COUNT_ZONE2_DEFAULT);
+ regmap_field_write(bwmon->regs[F_THRESHOLD_COUNT_ZONE3],
+ data->zone3_thres_count);
+
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE0],
+ BWMON_ZONE_ACTIONS_ZONE0);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE1],
+ BWMON_ZONE_ACTIONS_ZONE1);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE2],
+ BWMON_ZONE_ACTIONS_ZONE2);
+ regmap_field_write(bwmon->regs[F_ZONE_ACTIONS_ZONE3],
+ BWMON_ZONE_ACTIONS_ZONE3);
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, BWMON_IRQ_ENABLE_MASK);
@@ -242,7 +462,9 @@ static irqreturn_t bwmon_intr(int irq, void *dev_id)
unsigned int status, max;
int zone;
- status = readl(bwmon->base + BWMON_IRQ_STATUS);
+ if (regmap_field_read(bwmon->regs[F_IRQ_STATUS], &status))
+ return IRQ_NONE;
+
status &= BWMON_IRQ_ENABLE_MASK;
if (!status) {
/*
@@ -259,15 +481,18 @@ static irqreturn_t bwmon_intr(int irq, void *dev_id)
bwmon_disable(bwmon);
- zone = get_bitmask_order(status >> BWMON_IRQ_STATUS_ZONE_SHIFT) - 1;
+ zone = get_bitmask_order(status) - 1;
/*
* Zone max bytes count register returns count units within sampling
* window. Downstream kernel for BWMONv4 (called BWMON type 2 in
* downstream) always increments the max bytes count by one.
*/
- max = readl(bwmon->base + BWMON_ZONE_MAX(zone)) + 1;
- max *= BWMON_COUNT_UNIT_KB;
- bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->sample_ms);
+ if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max))
+ return IRQ_NONE;
+
+ max += 1;
+ max *= bwmon->data->count_unit_kb;
+ bwmon->target_kbps = mult_frac(max, MSEC_PER_SEC, bwmon->data->sample_ms);
return IRQ_WAKE_THREAD;
}
@@ -297,16 +522,17 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
up_kbps = bwmon->target_kbps + 1;
if (bwmon->target_kbps >= bwmon->max_bw_kbps)
- irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE1_SHIFT);
+ irq_enable = BIT(1);
else if (bwmon->target_kbps <= bwmon->min_bw_kbps)
- irq_enable = BIT(BWMON_IRQ_ENABLE_ZONE3_SHIFT);
+ irq_enable = BIT(3);
else
irq_enable = BWMON_IRQ_ENABLE_MASK;
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_HIGH, up_kbps);
- bwmon_set_threshold(bwmon, BWMON_THRESHOLD_MED, down_kbps);
- /* Write barriers in bwmon_clear_counters() */
- bwmon_clear_counters(bwmon);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_HIGH],
+ up_kbps);
+ bwmon_set_threshold(bwmon, bwmon->regs[F_THRESHOLD_MED],
+ down_kbps);
+ bwmon_clear_counters(bwmon, false);
bwmon_clear_irq(bwmon);
bwmon_enable(bwmon, irq_enable);
@@ -324,25 +550,47 @@ out:
return IRQ_HANDLED;
}
+static int bwmon_init_regmap(struct platform_device *pdev,
+ struct icc_bwmon *bwmon)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *map;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to map bwmon registers\n");
+
+ map = devm_regmap_init_mmio(dev, base, bwmon->data->regmap_cfg);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "failed to initialize regmap\n");
+
+ BUILD_BUG_ON(ARRAY_SIZE(msm8998_bwmon_reg_fields) != F_NUM_FIELDS);
+ BUILD_BUG_ON(ARRAY_SIZE(sdm845_llcc_bwmon_reg_fields) != F_NUM_FIELDS);
+
+ return devm_regmap_field_bulk_alloc(dev, map, bwmon->regs,
+ bwmon->data->regmap_fields,
+ F_NUM_FIELDS);
+}
+
static int bwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dev_pm_opp *opp;
struct icc_bwmon *bwmon;
- const struct icc_bwmon_data *data;
int ret;
bwmon = devm_kzalloc(dev, sizeof(*bwmon), GFP_KERNEL);
if (!bwmon)
return -ENOMEM;
- data = of_device_get_match_data(dev);
+ bwmon->data = of_device_get_match_data(dev);
- bwmon->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(bwmon->base)) {
- dev_err(dev, "failed to map bwmon registers\n");
- return PTR_ERR(bwmon->base);
- }
+ ret = bwmon_init_regmap(pdev, bwmon);
+ if (ret)
+ return ret;
bwmon->irq = platform_get_irq(pdev, 0);
if (bwmon->irq < 0)
@@ -362,8 +610,6 @@ static int bwmon_probe(struct platform_device *pdev)
if (IS_ERR(opp))
return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n");
- bwmon->sample_ms = data->sample_ms;
- bwmon->default_lowbw_kbps = data->default_lowbw_kbps;
bwmon->dev = dev;
bwmon_disable(bwmon);
@@ -374,7 +620,7 @@ static int bwmon_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "failed to request IRQ\n");
platform_set_drvdata(pdev, bwmon);
- bwmon_start(bwmon, data);
+ bwmon_start(bwmon);
return 0;
}
@@ -388,18 +634,55 @@ static int bwmon_remove(struct platform_device *pdev)
return 0;
}
-/* BWMON v4 */
static const struct icc_bwmon_data msm8998_bwmon_data = {
.sample_ms = 4,
+ .count_unit_kb = 64,
.default_highbw_kbps = 4800 * 1024, /* 4.8 GBps */
.default_medbw_kbps = 512 * 1024, /* 512 MBps */
.default_lowbw_kbps = 0,
.zone1_thres_count = 16,
.zone3_thres_count = 1,
+ .quirks = BWMON_HAS_GLOBAL_IRQ,
+ .regmap_fields = msm8998_bwmon_reg_fields,
+ .regmap_cfg = &msm8998_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sdm845_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 1024,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
+};
+
+static const struct icc_bwmon_data sc7280_llcc_bwmon_data = {
+ .sample_ms = 4,
+ .count_unit_kb = 64,
+ .default_highbw_kbps = 800 * 1024, /* 800 MBps */
+ .default_medbw_kbps = 256 * 1024, /* 256 MBps */
+ .default_lowbw_kbps = 0,
+ .zone1_thres_count = 16,
+ .zone3_thres_count = 1,
+ .quirks = BWMON_NEEDS_FORCE_CLEAR,
+ .regmap_fields = sdm845_llcc_bwmon_reg_fields,
+ .regmap_cfg = &sdm845_llcc_bwmon_regmap_cfg,
};
static const struct of_device_id bwmon_of_match[] = {
- { .compatible = "qcom,msm8998-bwmon", .data = &msm8998_bwmon_data },
+ {
+ .compatible = "qcom,msm8998-bwmon",
+ .data = &msm8998_bwmon_data
+ }, {
+ .compatible = "qcom,sdm845-llcc-bwmon",
+ .data = &sdm845_llcc_bwmon_data
+ }, {
+ .compatible = "qcom,sc7280-llcc-bwmon",
+ .data = &sc7280_llcc_bwmon_data
+ },
{}
};
MODULE_DEVICE_TABLE(of, bwmon_of_match);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 38d7296315a2..8b7e8118f3ce 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -104,6 +104,7 @@ struct qcom_llcc_config {
int size;
bool need_llcc_cfg;
const u32 *reg_offset;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
};
enum llcc_reg_offset {
@@ -296,12 +297,68 @@ static const struct llcc_slice_config sm8450_data[] = {
{LLCC_AENPU, 8, 2048, 1, 1, 0xFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0 },
};
-static const u32 llcc_v1_2_reg_offset[] = {
+static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2304c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3000c,
+ .cmn_interrupt_0_enable = 0x3001c,
+ .cmn_interrupt_2_enable = 0x3003c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x40000,
+ .drp_ecc_error_cntr_clear = 0x40004,
+ .drp_interrupt_status = 0x41000,
+ .drp_interrupt_clear = 0x41008,
+ .drp_interrupt_enable = 0x4100c,
+ .drp_ecc_error_status0 = 0x42044,
+ .drp_ecc_error_status1 = 0x42048,
+ .drp_ecc_sb_err_syn0 = 0x4204c,
+ .drp_ecc_db_err_syn0 = 0x42070,
+};
+
+static const struct llcc_edac_reg_offset llcc_v2_1_edac_reg_offset = {
+ .trp_ecc_error_status0 = 0x20344,
+ .trp_ecc_error_status1 = 0x20348,
+ .trp_ecc_sb_err_syn0 = 0x2034c,
+ .trp_ecc_db_err_syn0 = 0x20370,
+ .trp_ecc_error_cntr_clear = 0x20440,
+ .trp_interrupt_0_status = 0x20480,
+ .trp_interrupt_0_clear = 0x20484,
+ .trp_interrupt_0_enable = 0x20488,
+
+ /* LLCC Common registers */
+ .cmn_status0 = 0x3400c,
+ .cmn_interrupt_0_enable = 0x3401c,
+ .cmn_interrupt_2_enable = 0x3403c,
+
+ /* LLCC DRP registers */
+ .drp_ecc_error_cfg = 0x50000,
+ .drp_ecc_error_cntr_clear = 0x50004,
+ .drp_interrupt_status = 0x50020,
+ .drp_interrupt_clear = 0x50028,
+ .drp_interrupt_enable = 0x5002c,
+ .drp_ecc_error_status0 = 0x520f4,
+ .drp_ecc_error_status1 = 0x520f8,
+ .drp_ecc_sb_err_syn0 = 0x520fc,
+ .drp_ecc_db_err_syn0 = 0x52120,
+};
+
+/* LLCC register offset starting from v1.0.0 */
+static const u32 llcc_v1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00030000,
[LLCC_COMMON_STATUS0] = 0x0003000c,
};
-static const u32 llcc_v21_reg_offset[] = {
+/* LLCC register offset starting from v2.0.1 */
+static const u32 llcc_v2_1_reg_offset[] = {
[LLCC_COMMON_HW_INFO] = 0x00034000,
[LLCC_COMMON_STATUS0] = 0x0003400c,
};
@@ -310,70 +367,80 @@ static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc7280_cfg = {
.sct_data = sc7280_data,
.size = ARRAY_SIZE(sc7280_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc8180x_cfg = {
.sct_data = sc8180x_data,
.size = ARRAY_SIZE(sc8180x_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sc8280xp_cfg = {
.sct_data = sc8280xp_data,
.size = ARRAY_SIZE(sc8280xp_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
.need_llcc_cfg = false,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm6350_cfg = {
.sct_data = sm6350_data,
.size = ARRAY_SIZE(sm6350_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8250_cfg = {
.sct_data = sm8250_data,
.size = ARRAY_SIZE(sm8250_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8350_cfg = {
.sct_data = sm8350_data,
.size = ARRAY_SIZE(sm8350_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v1_2_reg_offset,
+ .reg_offset = llcc_v1_reg_offset,
+ .edac_reg_offset = &llcc_v1_edac_reg_offset,
};
static const struct qcom_llcc_config sm8450_cfg = {
.sct_data = sm8450_data,
.size = ARRAY_SIZE(sm8450_data),
.need_llcc_cfg = true,
- .reg_offset = llcc_v21_reg_offset,
+ .reg_offset = llcc_v2_1_reg_offset,
+ .edac_reg_offset = &llcc_v2_1_edac_reg_offset,
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -774,6 +841,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
+ drv_data->edac_reg_offset = cfg->edac_reg_offset;
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
index d6bfd1bbdc2a..121ea409fafc 100644
--- a/drivers/soc/qcom/qcom_stats.c
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -246,6 +246,14 @@ static const struct stats_config rpm_data_dba0 = {
.subsystem_stats_in_smem = false,
};
+static const struct stats_config rpmh_data_sdm845 = {
+ .stats_offset = 0x48,
+ .num_records = 2,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
static const struct stats_config rpmh_data = {
.stats_offset = 0x48,
.num_records = 3,
@@ -261,6 +269,7 @@ static const struct of_device_id qcom_stats_table[] = {
{ .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 },
{ .compatible = "qcom,rpm-stats", .data = &rpm_data },
{ .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { .compatible = "qcom,sdm845-rpmh-stats", .data = &rpmh_data_sdm845 },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_stats_table);
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 328cc8237191..b7158e3c3a0b 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -57,11 +57,11 @@ do { \
#define TLV_TYPE_SIZE sizeof(u8)
#define OPTIONAL_TLV_TYPE_START 0x10
-static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
const void *in_c_struct, u32 out_buf_len,
int enc_level);
-static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
const void *in_buf, u32 in_buf_len, int dec_level);
/**
@@ -76,10 +76,10 @@ static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
*
* Return: struct info of the next element that can be encoded.
*/
-static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
- int level)
+static const struct qmi_elem_info *
+skip_to_next_elem(const struct qmi_elem_info *ei_array, int level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 tlv_type;
if (level > 1) {
@@ -101,11 +101,11 @@ static struct qmi_elem_info *skip_to_next_elem(struct qmi_elem_info *ei_array,
*
* Return: Expected minimum length of the QMI message or 0 on error.
*/
-static int qmi_calc_min_msg_len(struct qmi_elem_info *ei_array,
+static int qmi_calc_min_msg_len(const struct qmi_elem_info *ei_array,
int level)
{
int min_msg_len = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
if (!ei_array)
return min_msg_len;
@@ -194,13 +194,13 @@ static int qmi_encode_basic_elem(void *buf_dst, const void *buf_src,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
+static int qmi_encode_struct_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 elem_len, u32 out_buf_len,
int enc_level)
{
int i, rc, encoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
for (i = 0; i < elem_len; i++) {
rc = qmi_encode(temp_ei->ei_array, buf_dst, buf_src,
@@ -233,13 +233,13 @@ static int qmi_encode_struct_elem(struct qmi_elem_info *ei_array,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
+static int qmi_encode_string_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 out_buf_len, int enc_level)
{
int rc;
int encoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u32 string_len = 0;
u32 string_len_sz = 0;
@@ -289,11 +289,11 @@ static int qmi_encode_string_elem(struct qmi_elem_info *ei_array,
* Return: The number of bytes of encoded information on success or negative
* errno on error.
*/
-static int qmi_encode(struct qmi_elem_info *ei_array, void *out_buf,
+static int qmi_encode(const struct qmi_elem_info *ei_array, void *out_buf,
const void *in_c_struct, u32 out_buf_len,
int enc_level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 opt_flag_value = 0;
u32 data_len_value = 0, data_len_sz;
u8 *buf_dst = (u8 *)out_buf;
@@ -468,13 +468,13 @@ static int qmi_decode_basic_elem(void *buf_dst, const void *buf_src,
* Return: The total size of the decoded data elements on success, negative
* errno on error.
*/
-static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
+static int qmi_decode_struct_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 elem_len, u32 tlv_len,
int dec_level)
{
int i, rc, decoded_bytes = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
rc = qmi_decode(temp_ei->ei_array, buf_dst, buf_src,
@@ -514,7 +514,7 @@ static int qmi_decode_struct_elem(struct qmi_elem_info *ei_array,
* Return: The total size of the decoded data elements on success, negative
* errno on error.
*/
-static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
+static int qmi_decode_string_elem(const struct qmi_elem_info *ei_array,
void *buf_dst, const void *buf_src,
u32 tlv_len, int dec_level)
{
@@ -522,7 +522,7 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
int decoded_bytes = 0;
u32 string_len = 0;
u32 string_len_sz = 0;
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
if (dec_level == 1) {
string_len = tlv_len;
@@ -564,10 +564,10 @@ static int qmi_decode_string_elem(struct qmi_elem_info *ei_array,
*
* Return: Pointer to struct info, if found
*/
-static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
- u32 type)
+static const struct qmi_elem_info *find_ei(const struct qmi_elem_info *ei_array,
+ u32 type)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
while (temp_ei->data_type != QMI_EOTI) {
if (temp_ei->tlv_type == (u8)type)
@@ -590,11 +590,11 @@ static struct qmi_elem_info *find_ei(struct qmi_elem_info *ei_array,
* Return: The number of bytes of decoded information on success, negative
* errno on error.
*/
-static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
+static int qmi_decode(const struct qmi_elem_info *ei_array, void *out_c_struct,
const void *in_buf, u32 in_buf_len,
int dec_level)
{
- struct qmi_elem_info *temp_ei = ei_array;
+ const struct qmi_elem_info *temp_ei = ei_array;
u8 opt_flag_value = 1;
u32 data_len_value = 0, data_len_sz = 0;
u8 *buf_dst = out_c_struct;
@@ -713,7 +713,7 @@ static int qmi_decode(struct qmi_elem_info *ei_array, void *out_c_struct,
* Return: Buffer with encoded message, or negative ERR_PTR() on error
*/
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct)
{
struct qmi_header *hdr;
@@ -767,7 +767,7 @@ EXPORT_SYMBOL(qmi_encode_message);
* errno on error.
*/
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct)
+ const struct qmi_elem_info *ei, void *c_struct)
{
if (!ei)
return -EINVAL;
@@ -781,7 +781,7 @@ int qmi_decode_message(const void *buf, size_t len,
EXPORT_SYMBOL(qmi_decode_message);
/* Common header in all QMI responses */
-struct qmi_elem_info qmi_response_type_v01_ei[] = {
+const struct qmi_elem_info qmi_response_type_v01_ei[] = {
{
.data_type = QMI_SIGNED_2_BYTE_ENUM,
.elem_len = 1,
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index c8c4c730b135..57052726299d 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(qmi_add_server);
* Return: Transaction id on success, negative errno on failure.
*/
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct)
+ const struct qmi_elem_info *ei, void *c_struct)
{
int ret;
@@ -736,7 +736,8 @@ EXPORT_SYMBOL(qmi_handle_release);
static ssize_t qmi_send_message(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq, struct qmi_txn *txn,
int type, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei,
+ const void *c_struct)
{
struct msghdr msghdr = {};
struct kvec iv;
@@ -787,7 +788,7 @@ static ssize_t qmi_send_message(struct qmi_handle *qmi,
*/
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei, const void *c_struct)
{
return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei,
c_struct);
@@ -808,7 +809,7 @@ EXPORT_SYMBOL(qmi_send_request);
*/
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct)
+ const struct qmi_elem_info *ei, const void *c_struct)
{
return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei,
c_struct);
@@ -827,7 +828,8 @@ EXPORT_SYMBOL(qmi_send_response);
* Return: 0 on success, negative errno on failure.
*/
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len,
+ const struct qmi_elem_info *ei,
const void *c_struct)
{
struct qmi_txn txn;
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 5803038c744e..337b1ad1cd3b 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -29,6 +29,7 @@
#define RPMPD_RWLM 0x6d6c7772
#define RPMPD_RWSC 0x63737772
#define RPMPD_RWSM 0x6d737772
+#define RPMPD_RWGX 0x78677772
/* Operation Keys */
#define KEY_CORNER 0x6e726f63 /* corn */
@@ -433,6 +434,26 @@ static const struct rpmpd_desc sm6125_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
+DEFINE_RPMPD_PAIR(sm6375, vddgx, vddgx_ao, RWGX, LEVEL, 0);
+static struct rpmpd *sm6375_rpmpds[] = {
+ [SM6375_VDDCX] = &sm6125_vddcx,
+ [SM6375_VDDCX_AO] = &sm6125_vddcx_ao,
+ [SM6375_VDDCX_VFL] = &sm6125_vddcx_vfl,
+ [SM6375_VDDMX] = &sm6125_vddmx,
+ [SM6375_VDDMX_AO] = &sm6125_vddmx_ao,
+ [SM6375_VDDMX_VFL] = &sm6125_vddmx_vfl,
+ [SM6375_VDDGX] = &sm6375_vddgx,
+ [SM6375_VDDGX_AO] = &sm6375_vddgx_ao,
+ [SM6375_VDD_LPI_CX] = &sm6115_vdd_lpi_cx,
+ [SM6375_VDD_LPI_MX] = &sm6115_vdd_lpi_mx,
+};
+
+static const struct rpmpd_desc sm6375_desc = {
+ .rpmpds = sm6375_rpmpds,
+ .num_pds = ARRAY_SIZE(sm6375_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR,
+};
+
static struct rpmpd *qcm2290_rpmpds[] = {
[QCM2290_VDDCX] = &sm6115_vddcx,
[QCM2290_VDDCX_AO] = &sm6115_vddcx_ao,
@@ -466,6 +487,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc },
{ .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc },
+ { .compatible = "qcom,sm6375-rpmpd", .data = &sm6375_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c
index 31faf4aa868e..e848cc9a3cf8 100644
--- a/drivers/soc/qcom/smem_state.c
+++ b/drivers/soc/qcom/smem_state.c
@@ -136,6 +136,7 @@ static void qcom_smem_state_release(struct kref *ref)
struct qcom_smem_state *state = container_of(ref, struct qcom_smem_state, refcount);
list_del(&state->list);
+ of_node_put(state->of_node);
kfree(state);
}
@@ -205,7 +206,7 @@ struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
kref_init(&state->refcount);
- state->of_node = of_node;
+ state->of_node = of_node_get(of_node);
state->ops = *ops;
state->priv = priv;
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 9df9bba242f3..3e8994d6110e 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -526,7 +526,7 @@ static int qcom_smsm_probe(struct platform_device *pdev)
for (id = 0; id < smsm->num_hosts; id++) {
ret = smsm_parse_ipc(smsm, id);
if (ret < 0)
- return ret;
+ goto out_put;
}
/* Acquire the main SMSM state vector */
@@ -534,13 +534,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->num_entries * sizeof(u32));
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate shared state entry\n");
- return ret;
+ goto out_put;
}
states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
if (IS_ERR(states)) {
dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
- return PTR_ERR(states);
+ ret = PTR_ERR(states);
+ goto out_put;
}
/* Acquire the list of interrupt mask vectors */
@@ -548,13 +549,14 @@ static int qcom_smsm_probe(struct platform_device *pdev)
ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
if (ret < 0 && ret != -EEXIST) {
dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
- return ret;
+ goto out_put;
}
intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
if (IS_ERR(intr_mask)) {
dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
- return PTR_ERR(intr_mask);
+ ret = PTR_ERR(intr_mask);
+ goto out_put;
}
/* Setup the reference to the local state bits */
@@ -565,7 +567,8 @@ static int qcom_smsm_probe(struct platform_device *pdev)
smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
if (IS_ERR(smsm->state)) {
dev_err(smsm->dev, "failed to register qcom_smem_state\n");
- return PTR_ERR(smsm->state);
+ ret = PTR_ERR(smsm->state);
+ goto out_put;
}
/* Register handlers for remote processor entries of interest. */
@@ -595,16 +598,19 @@ static int qcom_smsm_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, smsm);
+ of_node_put(local_node);
return 0;
unwind_interfaces:
+ of_node_put(node);
for (id = 0; id < smsm->num_entries; id++)
if (smsm->entries[id].domain)
irq_domain_remove(smsm->entries[id].domain);
qcom_smem_state_unregister(smsm->state);
-
+out_put:
+ of_node_put(local_node);
return ret;
}
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4554fb8655d3..aa37e1bad095 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -104,6 +104,7 @@ static const char *const pmic_models[] = {
[36] = "PM8009",
[38] = "PM8150C",
[41] = "SMB2351",
+ [45] = "PM6125",
[47] = "PMK8350",
[48] = "PM8350",
[49] = "PM8350C",
@@ -334,6 +335,7 @@ static const struct soc_id soc_id[] = {
{ 482, "SM8450" },
{ 487, "SC7280" },
{ 495, "SC7180P" },
+ { 507, "SM6375" },
};
static const char *socinfo_machine(struct device *dev, unsigned int id)
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index c50a6ce1b99d..f95a1337450d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,6 +44,7 @@ config ARCH_RZG2L
bool
select PM
select PM_GENERIC_DOMAINS
+ select RENESAS_RZG2L_IRQC
config ARCH_RZN1
bool
@@ -332,6 +333,16 @@ config ARCH_R9A09G011
endif # ARM64
+if RISCV
+
+config ARCH_R9A07G043
+ bool "RISC-V Platform support for RZ/Five"
+ select ARCH_RZG2L
+ help
+ This enables support for the Renesas RZ/Five SoC.
+
+endif # RISCV
+
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index d171f1b635c7..621ceaa047d4 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -50,6 +50,10 @@ static const struct renesas_family fam_rza2 __initconst __maybe_unused = {
.name = "RZ/A2",
};
+static const struct renesas_family fam_rzfive __initconst __maybe_unused = {
+ .name = "RZ/Five",
+};
+
static const struct renesas_family fam_rzg1 __initconst __maybe_unused = {
.name = "RZ/G1",
.reg = 0xff000044, /* PRR (Product Register) */
@@ -102,6 +106,11 @@ static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
.id = 0x40,
};
+static const struct renesas_soc soc_rz_five __initconst __maybe_unused = {
+ .family = &fam_rzfive,
+ .id = 0x847c447,
+};
+
static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = {
.family = &fam_rzg1,
.id = 0x45,
@@ -320,6 +329,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779mb", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
@@ -358,8 +368,12 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
#if defined(CONFIG_ARCH_R9A07G043)
+#ifdef CONFIG_RISCV
+ { .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
+#else
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_g2ul },
#endif
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index 9df513d1219b..6619256c2d11 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -491,6 +491,22 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
},
};
+static const struct rockchip_iodomain_soc_data soc_data_rv1126_pmu = {
+ .grf_offset = 0x140,
+ .supply_names = {
+ NULL,
+ "vccio1",
+ "vccio2",
+ "vccio3",
+ "vccio4",
+ "vccio5",
+ "vccio6",
+ "vccio7",
+ "pmuio0",
+ "pmuio1",
+ },
+};
+
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,px30-io-voltage-domain",
@@ -544,6 +560,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
.data = &soc_data_rv1108_pmu
},
+ {
+ .compatible = "rockchip,rv1126-pmu-io-voltage-domain",
+ .data = &soc_data_rv1126_pmu
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 89795abac951..84bc022f9e5b 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <soc/rockchip/pm_domains.h>
#include <dt-bindings/power/px30-power.h>
+#include <dt-bindings/power/rockchip,rv1126-power.h>
#include <dt-bindings/power/rk3036-power.h>
#include <dt-bindings/power/rk3066-power.h>
#include <dt-bindings/power/rk3128-power.h>
@@ -30,6 +31,7 @@
#include <dt-bindings/power/rk3368-power.h>
#include <dt-bindings/power/rk3399-power.h>
#include <dt-bindings/power/rk3568-power.h>
+#include <dt-bindings/power/rk3588-power.h>
struct rockchip_domain_info {
const char *name;
@@ -41,6 +43,9 @@ struct rockchip_domain_info {
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
+ int repair_status_mask;
+ u32 pwr_offset;
+ u32 req_offset;
};
struct rockchip_pmu_info {
@@ -49,6 +54,7 @@ struct rockchip_pmu_info {
u32 req_offset;
u32 idle_offset;
u32 ack_offset;
+ u32 repair_status_offset;
u32 core_pwrcnt_offset;
u32 gpu_pwrcnt_offset;
@@ -113,6 +119,22 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup) \
+{ \
+ .name = _name, \
+ .pwr_offset = p_offset, \
+ .pwr_w_mask = (pwr) << 16, \
+ .pwr_mask = (pwr), \
+ .status_mask = (status), \
+ .repair_status_mask = (r_status), \
+ .req_offset = r_offset, \
+ .req_w_mask = (req) << 16, \
+ .req_mask = (req), \
+ .idle_mask = (idle), \
+ .ack_mask = (ack), \
+ .active_wakeup = wakeup, \
+}
+
#define DOMAIN_RK3036(_name, req, ack, idle, wakeup) \
{ \
.name = _name, \
@@ -126,6 +148,9 @@ struct rockchip_pmu {
#define DOMAIN_PX30(name, pwr, status, req, wakeup) \
DOMAIN_M(name, pwr, status, req, (req) << 16, req, wakeup)
+#define DOMAIN_RV1126(name, pwr, req, idle, wakeup) \
+ DOMAIN_M(name, pwr, pwr, req, idle, idle, wakeup)
+
#define DOMAIN_RK3288(name, pwr, status, req, wakeup) \
DOMAIN(name, pwr, status, req, req, (req) << 16, wakeup)
@@ -244,6 +269,9 @@ void rockchip_pmu_unblock(void)
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
+#define DOMAIN_RK3588(name, p_offset, pwr, status, r_status, r_offset, req, idle, wakeup) \
+ DOMAIN_M_O_R(name, p_offset, pwr, status, r_status, r_offset, req, idle, idle, wakeup)
+
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
@@ -268,6 +296,7 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
const struct rockchip_domain_info *pd_info = pd->info;
struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ u32 pd_req_offset = pd_info->req_offset;
unsigned int target_ack;
unsigned int val;
bool is_idle;
@@ -276,11 +305,11 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
if (pd_info->req_mask == 0)
return 0;
else if (pd_info->req_w_mask)
- regmap_write(pmu->regmap, pmu->info->req_offset,
+ regmap_write(pmu->regmap, pmu->info->req_offset + pd_req_offset,
idle ? (pd_info->req_mask | pd_info->req_w_mask) :
pd_info->req_w_mask);
else
- regmap_update_bits(pmu->regmap, pmu->info->req_offset,
+ regmap_update_bits(pmu->regmap, pmu->info->req_offset + pd_req_offset,
pd_info->req_mask, idle ? -1U : 0);
wmb();
@@ -363,6 +392,12 @@ static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
+ if (pd->info->repair_status_mask) {
+ regmap_read(pmu->regmap, pmu->info->repair_status_offset, &val);
+ /* 1'b1: power on, 1'b0: power off */
+ return val & pd->info->repair_status_mask;
+ }
+
/* check idle status for idle-only domains */
if (pd->info->status_mask == 0)
return !rockchip_pmu_domain_is_idle(pd);
@@ -378,16 +413,17 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
+ u32 pd_pwr_offset = pd->info->pwr_offset;
bool is_on;
if (pd->info->pwr_mask == 0)
return;
else if (pd->info->pwr_w_mask)
- regmap_write(pmu->regmap, pmu->info->pwr_offset,
+ regmap_write(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
on ? pd->info->pwr_w_mask :
(pd->info->pwr_mask | pd->info->pwr_w_mask));
else
- regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
wmb();
@@ -514,6 +550,9 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
node, id);
return -EINVAL;
}
+ /* RK3588 has domains with two parents (RKVDEC0/RKVDEC1) */
+ if (pmu->genpd_data.domains[id])
+ return 0;
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
@@ -595,14 +634,6 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
}
}
- error = rockchip_pd_power(pd, true);
- if (error) {
- dev_err(pmu->dev,
- "failed to power on domain '%pOFn': %d\n",
- node, error);
- goto err_unprepare_clocks;
- }
-
if (pd->info->name)
pd->genpd.name = pd->info->name;
else
@@ -614,7 +645,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.flags = GENPD_FLAG_PM_CLK;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
- pm_genpd_init(&pd->genpd, NULL, false);
+ pm_genpd_init(&pd->genpd, NULL, !rockchip_pmu_domain_is_on(pd));
pmu->genpd_data.domains[id] = &pd->genpd;
return 0;
@@ -855,6 +886,16 @@ static const struct rockchip_domain_info px30_pm_domains[] = {
[PX30_PD_GPU] = DOMAIN_PX30("gpu", BIT(15), BIT(15), BIT(2), false),
};
+static const struct rockchip_domain_info rv1126_pm_domains[] = {
+ [RV1126_PD_VEPU] = DOMAIN_RV1126("vepu", BIT(2), BIT(9), BIT(9), false),
+ [RV1126_PD_VI] = DOMAIN_RV1126("vi", BIT(4), BIT(6), BIT(6), false),
+ [RV1126_PD_ISPP] = DOMAIN_RV1126("ispp", BIT(1), BIT(8), BIT(8), false),
+ [RV1126_PD_VDPU] = DOMAIN_RV1126("vdpu", BIT(3), BIT(10), BIT(10), false),
+ [RV1126_PD_NVM] = DOMAIN_RV1126("nvm", BIT(7), BIT(11), BIT(11), false),
+ [RV1126_PD_SDIO] = DOMAIN_RV1126("sdio", BIT(8), BIT(13), BIT(13), false),
+ [RV1126_PD_USB] = DOMAIN_RV1126("usb", BIT(9), BIT(15), BIT(15), false),
+};
+
static const struct rockchip_domain_info rk3036_pm_domains[] = {
[RK3036_PD_MSCH] = DOMAIN_RK3036("msch", BIT(14), BIT(23), BIT(30), true),
[RK3036_PD_CORE] = DOMAIN_RK3036("core", BIT(13), BIT(17), BIT(24), false),
@@ -982,6 +1023,38 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = {
[RK3568_PD_PIPE] = DOMAIN_RK3568("pipe", BIT(8), BIT(11), false),
};
+static const struct rockchip_domain_info rk3588_pm_domains[] = {
+ [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, BIT(1), 0x0, BIT(0), BIT(0), false),
+ [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0, 0x0, 0, 0, false),
+ [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0, 0x0, 0, 0, false),
+ [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, BIT(2), 0x0, BIT(1), BIT(1), false),
+ [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, BIT(3), 0x0, BIT(2), BIT(2), false),
+ [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, BIT(4), 0x0, BIT(3), BIT(3), false),
+ [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, BIT(5), 0x0, BIT(4), BIT(4), false),
+ [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, BIT(6), 0x0, BIT(5), BIT(5), false),
+ [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, BIT(7), 0x0, BIT(6), BIT(6), false),
+ [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, BIT(8), 0x0, BIT(7), BIT(7), false),
+ [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, BIT(9), 0x0, BIT(8), BIT(8), false),
+ [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, BIT(10), 0x0, 0, 0, false),
+ [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, BIT(11), 0x0, BIT(9), BIT(9), false),
+ [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, BIT(12), 0x0, BIT(10), BIT(10), false),
+ [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, BIT(13), 0x0, 0, 0, false),
+ [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, BIT(14), 0x0, BIT(11), BIT(11), false),
+ [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, BIT(15), 0x0, BIT(12), BIT(12), false),
+ [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
+ [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, BIT(17), 0x0, BIT(15), BIT(15), false),
+ [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, BIT(18), 0x4, BIT(0), BIT(16), false),
+ [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, BIT(19), 0x4, BIT(1), BIT(17), false),
+ [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, BIT(20), 0x4, BIT(5), BIT(21), false),
+ [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, BIT(21), 0x0, 0, 0, false),
+ [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, BIT(22), 0x0, 0, 0, true),
+ [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0, 0x4, BIT(2), BIT(18), false),
+ [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, BIT(23), 0x0, 0, 0, false),
+ [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, BIT(24), 0x4, BIT(3), BIT(19), false),
+ [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, BIT(25), 0x4, BIT(4), BIT(20), true),
+ [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, BIT(26), 0x0, 0, 0, false),
+};
+
static const struct rockchip_pmu_info px30_pmu = {
.pwr_offset = 0x18,
.status_offset = 0x20,
@@ -1128,6 +1201,29 @@ static const struct rockchip_pmu_info rk3568_pmu = {
.domain_info = rk3568_pm_domains,
};
+static const struct rockchip_pmu_info rk3588_pmu = {
+ .pwr_offset = 0x14c,
+ .status_offset = 0x180,
+ .req_offset = 0x10c,
+ .idle_offset = 0x120,
+ .ack_offset = 0x118,
+ .repair_status_offset = 0x290,
+
+ .num_domains = ARRAY_SIZE(rk3588_pm_domains),
+ .domain_info = rk3588_pm_domains,
+};
+
+static const struct rockchip_pmu_info rv1126_pmu = {
+ .pwr_offset = 0x110,
+ .status_offset = 0x108,
+ .req_offset = 0xc0,
+ .idle_offset = 0xd8,
+ .ack_offset = 0xd0,
+
+ .num_domains = ARRAY_SIZE(rv1126_pm_domains),
+ .domain_info = rv1126_pm_domains,
+};
+
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,px30-power-controller",
@@ -1177,6 +1273,14 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.compatible = "rockchip,rk3568-power-controller",
.data = (void *)&rk3568_pmu,
},
+ {
+ .compatible = "rockchip,rk3588-power-controller",
+ .data = (void *)&rk3588_pmu,
+ },
+ {
+ .compatible = "rockchip,rv1126-power-controller",
+ .data = (void *)&rv1126_pmu,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index a8f3876963a0..92f9186c1c42 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -78,8 +78,8 @@ static struct sunxi_sram_desc sun4i_a10_sram_d = {
static struct sunxi_sram_desc sun50i_a64_sram_c = {
.data = SUNXI_SRAM_DATA("C", 0x4, 24, 1,
- SUNXI_SRAM_MAP(0, 1, "cpu"),
- SUNXI_SRAM_MAP(1, 0, "de2")),
+ SUNXI_SRAM_MAP(1, 0, "cpu"),
+ SUNXI_SRAM_MAP(0, 1, "de2")),
};
static const struct of_device_id sunxi_sram_dt_ids[] = {
@@ -254,36 +254,36 @@ int sunxi_sram_claim(struct device *dev)
writel(val | ((device << sram_data->offset) & mask),
base + sram_data->reg);
+ sram_desc->claimed = true;
spin_unlock(&sram_lock);
return 0;
}
EXPORT_SYMBOL(sunxi_sram_claim);
-int sunxi_sram_release(struct device *dev)
+void sunxi_sram_release(struct device *dev)
{
const struct sunxi_sram_data *sram_data;
struct sunxi_sram_desc *sram_desc;
if (!dev || !dev->of_node)
- return -EINVAL;
+ return;
sram_data = sunxi_sram_of_parse(dev->of_node, NULL);
if (IS_ERR(sram_data))
- return -EINVAL;
+ return;
sram_desc = to_sram_desc(sram_data);
spin_lock(&sram_lock);
sram_desc->claimed = false;
spin_unlock(&sram_lock);
-
- return 0;
}
EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
int num_emac_clocks;
+ bool has_ldo_ctrl;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -294,6 +294,11 @@ static const struct sunxi_sramc_variant sun8i_h3_sramc_variant = {
.num_emac_clocks = 1,
};
+static const struct sunxi_sramc_variant sun20i_d1_sramc_variant = {
+ .num_emac_clocks = 1,
+ .has_ldo_ctrl = true,
+};
+
static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
.num_emac_clocks = 1,
};
@@ -303,37 +308,38 @@ static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
};
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
+#define SUNXI_SYS_LDO_CTRL_REG 0x150
+
static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
unsigned int reg)
{
- const struct sunxi_sramc_variant *variant;
-
- variant = of_device_get_match_data(dev);
+ const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
- if (reg < SUNXI_SRAM_EMAC_CLOCK_REG)
- return false;
- if (reg > SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
- return false;
+ if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
+ reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
+ return true;
+ if (reg == SUNXI_SYS_LDO_CTRL_REG && variant->has_ldo_ctrl)
+ return true;
- return true;
+ return false;
}
-static struct regmap_config sunxi_sram_emac_clock_regmap = {
+static struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
/* last defined register */
- .max_register = SUNXI_SRAM_EMAC_CLOCK_REG + 4,
+ .max_register = SUNXI_SYS_LDO_CTRL_REG,
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
};
-static int sunxi_sram_probe(struct platform_device *pdev)
+static int __init sunxi_sram_probe(struct platform_device *pdev)
{
- struct dentry *d;
- struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
sram_dev = &pdev->dev;
@@ -341,24 +347,21 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
+ dev_set_drvdata(dev, (struct sunxi_sramc_variant *)variant);
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
- of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-
- d = debugfs_create_file("sram", S_IRUGO, NULL, NULL,
- &sunxi_sram_fops);
- if (!d)
- return -ENOMEM;
+ if (variant->num_emac_clocks || variant->has_ldo_ctrl) {
+ regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ }
- if (variant->num_emac_clocks > 0) {
- emac_clock = devm_regmap_init_mmio(&pdev->dev, base,
- &sunxi_sram_emac_clock_regmap);
+ of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (IS_ERR(emac_clock))
- return PTR_ERR(emac_clock);
- }
+ debugfs_create_file("sram", 0444, NULL, NULL, &sunxi_sram_fops);
return 0;
}
@@ -385,6 +388,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.data = &sun8i_h3_sramc_variant,
},
{
+ .compatible = "allwinner,sun20i-d1-system-control",
+ .data = &sun20i_d1_sramc_variant,
+ },
+ {
.compatible = "allwinner,sun50i-a64-sram-controller",
.data = &sun50i_a64_sramc_variant,
},
@@ -409,9 +416,8 @@ static struct platform_driver sunxi_sram_driver = {
.name = "sunxi-sram",
.of_match_table = sunxi_sram_dt_match,
},
- .probe = sunxi_sram_probe,
};
-module_platform_driver(sunxi_sram_driver);
+builtin_platform_driver_probe(sunxi_sram_driver, sunxi_sram_probe);
MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
MODULE_DESCRIPTION("Allwinner sunXi SRAM Controller Driver");
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 5725c8ef0406..d1ecadffa1bb 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -136,7 +136,6 @@ config SOC_TEGRA_FUSE
def_bool y
depends on ARCH_TEGRA
select SOC_BUS
- select TEGRA20_APB_DMA if ARCH_TEGRA_2x_SOC
config SOC_TEGRA_FLOWCTRL
bool
@@ -162,3 +161,12 @@ config SOC_TEGRA30_VOLTAGE_COUPLER
bool "Voltage scaling support for Tegra30 SoCs"
depends on ARCH_TEGRA_3x_SOC || COMPILE_TEST
depends on REGULATOR
+
+config SOC_TEGRA_CBB
+ tristate "Tegra driver to handle error from CBB"
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC
+ default y
+ help
+ Support for handling error from Tegra Control Backbone(CBB).
+ This driver handles the errors from CBB and prints debug
+ information about the failed transactions.
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 054e862b63d8..d722f512dc9d 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += fuse/
+obj-y += cbb/
obj-y += common.o
obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o
diff --git a/drivers/soc/tegra/cbb/Makefile b/drivers/soc/tegra/cbb/Makefile
new file mode 100644
index 000000000000..e3ac6cdddf5c
--- /dev/null
+++ b/drivers/soc/tegra/cbb/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Control Backbone Driver code.
+#
+ifdef CONFIG_SOC_TEGRA_CBB
+obj-y += tegra-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-cbb.o
+obj-$(CONFIG_ARCH_TEGRA_234_SOC) += tegra234-cbb.o
+endif
diff --git a/drivers/soc/tegra/cbb/tegra-cbb.c b/drivers/soc/tegra/cbb/tegra-cbb.c
new file mode 100644
index 000000000000..d200937353c7
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra-cbb.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+void tegra_cbb_print_err(struct seq_file *file, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ if (file) {
+ seq_vprintf(file, fmt, args);
+ } else {
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ pr_crit("%pV", &vaf);
+ }
+
+ va_end(args);
+}
+
+void tegra_cbb_print_cache(struct seq_file *file, u32 cache)
+{
+ const char *buff_str, *mod_str, *rd_str, *wr_str;
+
+ buff_str = (cache & BIT(0)) ? "Bufferable " : "";
+ mod_str = (cache & BIT(1)) ? "Modifiable " : "";
+ rd_str = (cache & BIT(2)) ? "Read-Allocate " : "";
+ wr_str = (cache & BIT(3)) ? "Write-Allocate" : "";
+
+ if (cache == 0x0)
+ buff_str = "Device Non-Bufferable";
+
+ tegra_cbb_print_err(file, "\t Cache\t\t\t: 0x%x -- %s%s%s%s\n",
+ cache, buff_str, mod_str, rd_str, wr_str);
+}
+
+void tegra_cbb_print_prot(struct seq_file *file, u32 prot)
+{
+ const char *data_str, *secure_str, *priv_str;
+
+ data_str = (prot & 0x4) ? "Instruction" : "Data";
+ secure_str = (prot & 0x2) ? "Non-Secure" : "Secure";
+ priv_str = (prot & 0x1) ? "Privileged" : "Unprivileged";
+
+ tegra_cbb_print_err(file, "\t Protection\t\t: 0x%x -- %s, %s, %s Access\n",
+ prot, priv_str, secure_str, data_str);
+}
+
+static int tegra_cbb_err_show(struct seq_file *file, void *data)
+{
+ struct tegra_cbb *cbb = file->private;
+
+ return cbb->ops->debugfs_show(cbb, file, data);
+}
+
+static int tegra_cbb_err_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_cbb_err_show, inode->i_private);
+}
+
+static const struct file_operations tegra_cbb_err_fops = {
+ .open = tegra_cbb_err_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int tegra_cbb_err_debugfs_init(struct tegra_cbb *cbb)
+{
+ static struct dentry *root;
+
+ if (!root) {
+ root = debugfs_create_file("tegra_cbb_err", 0444, NULL, cbb, &tegra_cbb_err_fops);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("%s(): could not create debugfs node\n", __func__);
+ return PTR_ERR(root);
+ }
+ }
+
+ return 0;
+}
+
+void tegra_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->stall_enable)
+ cbb->ops->stall_enable(cbb);
+}
+
+void tegra_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->fault_enable)
+ cbb->ops->fault_enable(cbb);
+}
+
+void tegra_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->error_clear)
+ cbb->ops->error_clear(cbb);
+}
+
+u32 tegra_cbb_get_status(struct tegra_cbb *cbb)
+{
+ if (cbb->ops->get_status)
+ return cbb->ops->get_status(cbb);
+
+ return 0;
+}
+
+int tegra_cbb_get_irq(struct platform_device *pdev, unsigned int *nonsec_irq,
+ unsigned int *sec_irq)
+{
+ unsigned int index = 0;
+ int num_intr = 0, irq;
+
+ num_intr = platform_irq_count(pdev);
+ if (!num_intr)
+ return -EINVAL;
+
+ if (num_intr == 2) {
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get non-secure IRQ: %d\n", irq);
+ return -ENOENT;
+ }
+
+ *nonsec_irq = irq;
+ index++;
+ }
+
+ irq = platform_get_irq(pdev, index);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "failed to get secure IRQ: %d\n", irq);
+ return -ENOENT;
+ }
+
+ *sec_irq = irq;
+
+ if (num_intr == 1)
+ dev_dbg(&pdev->dev, "secure IRQ: %u\n", *sec_irq);
+
+ if (num_intr == 2)
+ dev_dbg(&pdev->dev, "secure IRQ: %u, non-secure IRQ: %u\n", *sec_irq, *nonsec_irq);
+
+ return 0;
+}
+
+int tegra_cbb_register(struct tegra_cbb *cbb)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ ret = tegra_cbb_err_debugfs_init(cbb);
+ if (ret) {
+ dev_err(cbb->dev, "failed to create debugfs\n");
+ return ret;
+ }
+ }
+
+ /* register interrupt handler for errors due to different initiators */
+ ret = cbb->ops->interrupt_enable(cbb);
+ if (ret < 0) {
+ dev_err(cbb->dev, "Failed to register CBB Interrupt ISR");
+ return ret;
+ }
+
+ cbb->ops->error_enable(cbb);
+ dsb(sy);
+
+ return 0;
+}
diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c
new file mode 100644
index 000000000000..1ae0bd9a1ac1
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra194-cbb.c
@@ -0,0 +1,2364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) generated due to
+ * illegal accesses. When an error is reported from a NOC within CBB,
+ * the driver checks ErrVld status of all three Error Logger's of that NOC.
+ * It then prints debug information about failed transaction using ErrLog
+ * registers of error logger which has ErrVld set. Currently, SLV, DEC,
+ * TMO, SEC, UNS are the codes which are supported by CBB.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define ERRLOGGER_0_ID_COREID_0 0x00000000
+#define ERRLOGGER_0_ID_REVISIONID_0 0x00000004
+#define ERRLOGGER_0_FAULTEN_0 0x00000008
+#define ERRLOGGER_0_ERRVLD_0 0x0000000c
+#define ERRLOGGER_0_ERRCLR_0 0x00000010
+#define ERRLOGGER_0_ERRLOG0_0 0x00000014
+#define ERRLOGGER_0_ERRLOG1_0 0x00000018
+#define ERRLOGGER_0_RSVD_00_0 0x0000001c
+#define ERRLOGGER_0_ERRLOG3_0 0x00000020
+#define ERRLOGGER_0_ERRLOG4_0 0x00000024
+#define ERRLOGGER_0_ERRLOG5_0 0x00000028
+#define ERRLOGGER_0_STALLEN_0 0x00000038
+
+#define ERRLOGGER_1_ID_COREID_0 0x00000080
+#define ERRLOGGER_1_ID_REVISIONID_0 0x00000084
+#define ERRLOGGER_1_FAULTEN_0 0x00000088
+#define ERRLOGGER_1_ERRVLD_0 0x0000008c
+#define ERRLOGGER_1_ERRCLR_0 0x00000090
+#define ERRLOGGER_1_ERRLOG0_0 0x00000094
+#define ERRLOGGER_1_ERRLOG1_0 0x00000098
+#define ERRLOGGER_1_RSVD_00_0 0x0000009c
+#define ERRLOGGER_1_ERRLOG3_0 0x000000a0
+#define ERRLOGGER_1_ERRLOG4_0 0x000000a4
+#define ERRLOGGER_1_ERRLOG5_0 0x000000a8
+#define ERRLOGGER_1_STALLEN_0 0x000000b8
+
+#define ERRLOGGER_2_ID_COREID_0 0x00000100
+#define ERRLOGGER_2_ID_REVISIONID_0 0x00000104
+#define ERRLOGGER_2_FAULTEN_0 0x00000108
+#define ERRLOGGER_2_ERRVLD_0 0x0000010c
+#define ERRLOGGER_2_ERRCLR_0 0x00000110
+#define ERRLOGGER_2_ERRLOG0_0 0x00000114
+#define ERRLOGGER_2_ERRLOG1_0 0x00000118
+#define ERRLOGGER_2_RSVD_00_0 0x0000011c
+#define ERRLOGGER_2_ERRLOG3_0 0x00000120
+#define ERRLOGGER_2_ERRLOG4_0 0x00000124
+#define ERRLOGGER_2_ERRLOG5_0 0x00000128
+#define ERRLOGGER_2_STALLEN_0 0x00000138
+
+#define CBB_NOC_INITFLOW GENMASK(23, 20)
+#define CBB_NOC_TARGFLOW GENMASK(19, 16)
+#define CBB_NOC_TARG_SUBRANGE GENMASK(15, 9)
+#define CBB_NOC_SEQID GENMASK(8, 0)
+
+#define BPMP_NOC_INITFLOW GENMASK(20, 18)
+#define BPMP_NOC_TARGFLOW GENMASK(17, 13)
+#define BPMP_NOC_TARG_SUBRANGE GENMASK(12, 9)
+#define BPMP_NOC_SEQID GENMASK(8, 0)
+
+#define AON_NOC_INITFLOW GENMASK(22, 21)
+#define AON_NOC_TARGFLOW GENMASK(20, 15)
+#define AON_NOC_TARG_SUBRANGE GENMASK(14, 9)
+#define AON_NOC_SEQID GENMASK(8, 0)
+
+#define SCE_NOC_INITFLOW GENMASK(21, 19)
+#define SCE_NOC_TARGFLOW GENMASK(18, 14)
+#define SCE_NOC_TARG_SUBRANGE GENMASK(13, 9)
+#define SCE_NOC_SEQID GENMASK(8, 0)
+
+#define CBB_NOC_AXCACHE GENMASK(3, 0)
+#define CBB_NOC_NON_MOD GENMASK(4, 4)
+#define CBB_NOC_AXPROT GENMASK(7, 5)
+#define CBB_NOC_FALCONSEC GENMASK(9, 8)
+#define CBB_NOC_GRPSEC GENMASK(16, 10)
+#define CBB_NOC_VQC GENMASK(18, 17)
+#define CBB_NOC_MSTR_ID GENMASK(22, 19)
+#define CBB_NOC_AXI_ID GENMASK(30, 23)
+
+#define CLUSTER_NOC_AXCACHE GENMASK(3, 0)
+#define CLUSTER_NOC_AXPROT GENMASK(6, 4)
+#define CLUSTER_NOC_FALCONSEC GENMASK(8, 7)
+#define CLUSTER_NOC_GRPSEC GENMASK(15, 9)
+#define CLUSTER_NOC_VQC GENMASK(17, 16)
+#define CLUSTER_NOC_MSTR_ID GENMASK(21, 18)
+
+#define USRBITS_MSTR_ID GENMASK(21, 18)
+
+#define CBB_ERR_OPC GENMASK(4, 1)
+#define CBB_ERR_ERRCODE GENMASK(10, 8)
+#define CBB_ERR_LEN1 GENMASK(27, 16)
+
+#define DMAAPB_X_RAW_INTERRUPT_STATUS 0x2ec
+
+struct tegra194_cbb_packet_header {
+ bool lock; // [0]
+ u8 opc; // [4:1]
+ u8 errcode; // [10:8]= RD, RDW, RDL, RDX, WR, WRW, WRC, PRE, URG
+ u16 len1; // [27:16]
+ bool format; // [31] = 1 -> FlexNoC versions 2.7 & above
+};
+
+struct tegra194_cbb_aperture {
+ u8 initflow;
+ u8 targflow;
+ u8 targ_subrange;
+ u8 init_mapping;
+ u32 init_localaddress;
+ u8 targ_mapping;
+ u32 targ_localaddress;
+ u16 seqid;
+};
+
+struct tegra194_cbb_userbits {
+ u8 axcache;
+ u8 non_mod;
+ u8 axprot;
+ u8 falconsec;
+ u8 grpsec;
+ u8 vqc;
+ u8 mstr_id;
+ u8 axi_id;
+};
+
+struct tegra194_cbb_noc_data {
+ const char *name;
+ bool erd_mask_inband_err;
+ const char * const *master_id;
+ unsigned int max_aperture;
+ const struct tegra194_cbb_aperture *noc_aperture;
+ const char * const *routeid_initflow;
+ const char * const *routeid_targflow;
+ void (*parse_routeid)(struct tegra194_cbb_aperture *info, u64 routeid);
+ void (*parse_userbits)(struct tegra194_cbb_userbits *usrbits, u32 elog_5);
+};
+
+struct tegra194_axi2apb_bridge {
+ struct resource res;
+ void __iomem *base;
+};
+
+struct tegra194_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra194_cbb_noc_data *noc;
+ struct resource *res;
+
+ void __iomem *regs;
+ unsigned int num_intr;
+ unsigned int sec_irq;
+ unsigned int nonsec_irq;
+ u32 errlog0;
+ u32 errlog1;
+ u32 errlog2;
+ u32 errlog3;
+ u32 errlog4;
+ u32 errlog5;
+
+ struct tegra194_axi2apb_bridge *bridges;
+ unsigned int num_bridges;
+};
+
+static inline struct tegra194_cbb *to_tegra194_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra194_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static const char * const tegra194_cbb_trantype[] = {
+ "RD - Read, Incrementing",
+ "RDW - Read, Wrap", /* Not Supported */
+ "RDX - Exclusive Read", /* Not Supported */
+ "RDL - Linked Read", /* Not Supported */
+ "WR - Write, Incrementing",
+ "WRW - Write, Wrap", /* Not Supported */
+ "WRC - Exclusive Write", /* Not Supported */
+ "PRE - Preamble Sequence for Fixed Accesses"
+};
+
+static const char * const tegra194_axi2apb_error[] = {
+ "SFIFONE - Status FIFO Not Empty interrupt",
+ "SFIFOF - Status FIFO Full interrupt",
+ "TIM - Timer(Timeout) interrupt",
+ "SLV - SLVERR interrupt",
+ "NULL",
+ "ERBF - Early response buffer Full interrupt",
+ "NULL",
+ "RDFIFOF - Read Response FIFO Full interrupt",
+ "WRFIFOF - Write Response FIFO Full interrupt",
+ "CH0DFIFOF - Ch0 Data FIFO Full interrupt",
+ "CH1DFIFOF - Ch1 Data FIFO Full interrupt",
+ "CH2DFIFOF - Ch2 Data FIFO Full interrupt",
+ "UAT - Unsupported alignment type error",
+ "UBS - Unsupported burst size error",
+ "UBE - Unsupported Byte Enable error",
+ "UBT - Unsupported burst type error",
+ "BFS - Block Firewall security error",
+ "ARFS - Address Range Firewall security error",
+ "CH0RFIFOF - Ch0 Request FIFO Full interrupt",
+ "CH1RFIFOF - Ch1 Request FIFO Full interrupt",
+ "CH2RFIFOF - Ch2 Request FIFO Full interrupt"
+};
+
+static const char * const tegra194_master_id[] = {
+ [0x0] = "CCPLEX",
+ [0x1] = "CCPLEX_DPMU",
+ [0x2] = "BPMP",
+ [0x3] = "AON",
+ [0x4] = "SCE",
+ [0x5] = "GPCDMA_PERIPHERAL",
+ [0x6] = "TSECA",
+ [0x7] = "TSECB",
+ [0x8] = "JTAGM_DFT",
+ [0x9] = "CORESIGHT_AXIAP",
+ [0xa] = "APE",
+ [0xb] = "PEATR",
+ [0xc] = "NVDEC",
+ [0xd] = "RCE",
+ [0xe] = "NVDEC1"
+};
+
+static const struct tegra_cbb_error tegra194_cbb_errors[] = {
+ {
+ .code = "SLV",
+ .source = "Target",
+ .desc = "Target error detected by CBB slave"
+ }, {
+ .code = "DEC",
+ .source = "Initiator NIU",
+ .desc = "Address decode error"
+ }, {
+ .code = "UNS",
+ .source = "Target NIU",
+ .desc = "Unsupported request. Not a valid transaction"
+ }, {
+ .code = "DISC", /* Not Supported by CBB */
+ .source = "Power Disconnect",
+ .desc = "Disconnected target or domain"
+ }, {
+ .code = "SEC",
+ .source = "Initiator NIU or Firewall",
+ .desc = "Security violation. Firewall error"
+ }, {
+ .code = "HIDE", /* Not Supported by CBB */
+ .source = "Firewall",
+ .desc = "Hidden security violation, reported as OK to initiator"
+ }, {
+ .code = "TMO",
+ .source = "Target NIU",
+ .desc = "Target time-out error"
+ }, {
+ .code = "RSV",
+ .source = "None",
+ .desc = "Reserved"
+ }
+};
+
+/*
+ * CBB NOC aperture lookup table as per file "cbb_central_noc_Structure.info".
+ */
+static const char * const tegra194_cbbcentralnoc_routeid_initflow[] = {
+ [0x0] = "aon_p2ps/I/aon",
+ [0x1] = "ape_p2ps/I/ape_p2ps",
+ [0x2] = "bpmp_p2ps/I/bpmp_p2ps",
+ [0x3] = "ccroc_p2ps/I/ccroc_p2ps",
+ [0x4] = "csite_p2ps/I/0",
+ [0x5] = "gpcdma_mmio_p2ps/I/0",
+ [0x6] = "jtag_p2ps/I/0",
+ [0x7] = "nvdec1_p2ps/I/0",
+ [0x8] = "nvdec_p2ps/I/0",
+ [0x9] = "rce_p2ps/I/rce_p2ps",
+ [0xa] = "sce_p2ps/I/sce_p2ps",
+ [0xb] = "tseca_p2ps/I/0",
+ [0xc] = "tsecb_p2ps/I/0",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+static const char * const tegra194_cbbcentralnoc_routeid_targflow[] = {
+ [0x0] = "SVC/T/intreg",
+ [0x1] = "axis_satellite_axi2apb_p2pm/T/axis_satellite_axi2apb_p2pm",
+ [0x2] = "axis_satellite_grout/T/axis_satellite_grout",
+ [0x3] = "cbb_firewall/T/cbb_firewall",
+ [0x4] = "gpu_p2pm/T/gpu_p2pm",
+ [0x5] = "host1x_p2pm/T/host1x_p2pm",
+ [0x6] = "sapb_3_p2pm/T/sapb_3_p2pm",
+ [0x7] = "smmu0_p2pm/T/smmu0_p2pm",
+ [0x8] = "smmu1_p2pm/T/smmu1_p2pm",
+ [0x9] = "smmu2_p2pm/T/smmu2_p2pm",
+ [0xa] = "stm_p2pm/T/stm_p2pm",
+ [0xb] = "RESERVED",
+ [0xc] = "RESERVED",
+ [0xd] = "RESERVED",
+ [0xe] = "RESERVED",
+ [0xf] = "RESERVED"
+};
+
+/*
+ * Fields of CBB NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_cbbcentralnoc_apert_lookup[] = {
+ { 0x0, 0x0, 0x00, 0x0, 0x02300000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x00, 0x0, 0x02003000, 0, 0x02003000 },
+ { 0x0, 0x1, 0x01, 0x0, 0x02006000, 2, 0x02006000 },
+ { 0x0, 0x1, 0x02, 0x0, 0x02016000, 3, 0x02016000 },
+ { 0x0, 0x1, 0x03, 0x0, 0x0201d000, 4, 0x0201d000 },
+ { 0x0, 0x1, 0x04, 0x0, 0x0202b000, 6, 0x0202b000 },
+ { 0x0, 0x1, 0x05, 0x0, 0x02434000, 20, 0x02434000 },
+ { 0x0, 0x1, 0x06, 0x0, 0x02436000, 21, 0x02436000 },
+ { 0x0, 0x1, 0x07, 0x0, 0x02438000, 22, 0x02438000 },
+ { 0x0, 0x1, 0x08, 0x0, 0x02445000, 24, 0x02445000 },
+ { 0x0, 0x1, 0x09, 0x0, 0x02446000, 25, 0x02446000 },
+ { 0x0, 0x1, 0x0a, 0x0, 0x02004000, 1, 0x02004000 },
+ { 0x0, 0x1, 0x0b, 0x0, 0x0201e000, 5, 0x0201e000 },
+ { 0x0, 0x1, 0x0c, 0x0, 0x0202c000, 7, 0x0202c000 },
+ { 0x0, 0x1, 0x0d, 0x0, 0x02204000, 8, 0x02204000 },
+ { 0x0, 0x1, 0x0e, 0x0, 0x02214000, 9, 0x02214000 },
+ { 0x0, 0x1, 0x0f, 0x0, 0x02224000, 10, 0x02224000 },
+ { 0x0, 0x1, 0x10, 0x0, 0x02234000, 11, 0x02234000 },
+ { 0x0, 0x1, 0x11, 0x0, 0x02244000, 12, 0x02244000 },
+ { 0x0, 0x1, 0x12, 0x0, 0x02254000, 13, 0x02254000 },
+ { 0x0, 0x1, 0x13, 0x0, 0x02264000, 14, 0x02264000 },
+ { 0x0, 0x1, 0x14, 0x0, 0x02274000, 15, 0x02274000 },
+ { 0x0, 0x1, 0x15, 0x0, 0x02284000, 16, 0x02284000 },
+ { 0x0, 0x1, 0x16, 0x0, 0x0243a000, 23, 0x0243a000 },
+ { 0x0, 0x1, 0x17, 0x0, 0x02370000, 17, 0x02370000 },
+ { 0x0, 0x1, 0x18, 0x0, 0x023d0000, 18, 0x023d0000 },
+ { 0x0, 0x1, 0x19, 0x0, 0x023e0000, 19, 0x023e0000 },
+ { 0x0, 0x1, 0x1a, 0x0, 0x02450000, 26, 0x02450000 },
+ { 0x0, 0x1, 0x1b, 0x0, 0x02460000, 27, 0x02460000 },
+ { 0x0, 0x1, 0x1c, 0x0, 0x02490000, 28, 0x02490000 },
+ { 0x0, 0x1, 0x1d, 0x0, 0x03130000, 31, 0x03130000 },
+ { 0x0, 0x1, 0x1e, 0x0, 0x03160000, 32, 0x03160000 },
+ { 0x0, 0x1, 0x1f, 0x0, 0x03270000, 33, 0x03270000 },
+ { 0x0, 0x1, 0x20, 0x0, 0x032e0000, 35, 0x032e0000 },
+ { 0x0, 0x1, 0x21, 0x0, 0x03300000, 36, 0x03300000 },
+ { 0x0, 0x1, 0x22, 0x0, 0x13090000, 40, 0x13090000 },
+ { 0x0, 0x1, 0x23, 0x0, 0x20120000, 43, 0x20120000 },
+ { 0x0, 0x1, 0x24, 0x0, 0x20170000, 44, 0x20170000 },
+ { 0x0, 0x1, 0x25, 0x0, 0x20190000, 45, 0x20190000 },
+ { 0x0, 0x1, 0x26, 0x0, 0x201b0000, 46, 0x201b0000 },
+ { 0x0, 0x1, 0x27, 0x0, 0x20250000, 47, 0x20250000 },
+ { 0x0, 0x1, 0x28, 0x0, 0x20260000, 48, 0x20260000 },
+ { 0x0, 0x1, 0x29, 0x0, 0x20420000, 49, 0x20420000 },
+ { 0x0, 0x1, 0x2a, 0x0, 0x20460000, 50, 0x20460000 },
+ { 0x0, 0x1, 0x2b, 0x0, 0x204f0000, 51, 0x204f0000 },
+ { 0x0, 0x1, 0x2c, 0x0, 0x20520000, 52, 0x20520000 },
+ { 0x0, 0x1, 0x2d, 0x0, 0x20580000, 53, 0x20580000 },
+ { 0x0, 0x1, 0x2e, 0x0, 0x205a0000, 54, 0x205a0000 },
+ { 0x0, 0x1, 0x2f, 0x0, 0x205c0000, 55, 0x205c0000 },
+ { 0x0, 0x1, 0x30, 0x0, 0x20690000, 56, 0x20690000 },
+ { 0x0, 0x1, 0x31, 0x0, 0x20770000, 57, 0x20770000 },
+ { 0x0, 0x1, 0x32, 0x0, 0x20790000, 58, 0x20790000 },
+ { 0x0, 0x1, 0x33, 0x0, 0x20880000, 59, 0x20880000 },
+ { 0x0, 0x1, 0x34, 0x0, 0x20990000, 62, 0x20990000 },
+ { 0x0, 0x1, 0x35, 0x0, 0x20e10000, 65, 0x20e10000 },
+ { 0x0, 0x1, 0x36, 0x0, 0x20e70000, 66, 0x20e70000 },
+ { 0x0, 0x1, 0x37, 0x0, 0x20e80000, 67, 0x20e80000 },
+ { 0x0, 0x1, 0x38, 0x0, 0x20f30000, 68, 0x20f30000 },
+ { 0x0, 0x1, 0x39, 0x0, 0x20f50000, 69, 0x20f50000 },
+ { 0x0, 0x1, 0x3a, 0x0, 0x20fc0000, 70, 0x20fc0000 },
+ { 0x0, 0x1, 0x3b, 0x0, 0x21110000, 72, 0x21110000 },
+ { 0x0, 0x1, 0x3c, 0x0, 0x21270000, 73, 0x21270000 },
+ { 0x0, 0x1, 0x3d, 0x0, 0x21290000, 74, 0x21290000 },
+ { 0x0, 0x1, 0x3e, 0x0, 0x21840000, 75, 0x21840000 },
+ { 0x0, 0x1, 0x3f, 0x0, 0x21880000, 76, 0x21880000 },
+ { 0x0, 0x1, 0x40, 0x0, 0x218d0000, 77, 0x218d0000 },
+ { 0x0, 0x1, 0x41, 0x0, 0x21950000, 78, 0x21950000 },
+ { 0x0, 0x1, 0x42, 0x0, 0x21960000, 79, 0x21960000 },
+ { 0x0, 0x1, 0x43, 0x0, 0x21a10000, 80, 0x21a10000 },
+ { 0x0, 0x1, 0x44, 0x0, 0x024a0000, 29, 0x024a0000 },
+ { 0x0, 0x1, 0x45, 0x0, 0x024c0000, 30, 0x024c0000 },
+ { 0x0, 0x1, 0x46, 0x0, 0x032c0000, 34, 0x032c0000 },
+ { 0x0, 0x1, 0x47, 0x0, 0x03400000, 37, 0x03400000 },
+ { 0x0, 0x1, 0x48, 0x0, 0x130a0000, 41, 0x130a0000 },
+ { 0x0, 0x1, 0x49, 0x0, 0x130c0000, 42, 0x130c0000 },
+ { 0x0, 0x1, 0x4a, 0x0, 0x208a0000, 60, 0x208a0000 },
+ { 0x0, 0x1, 0x4b, 0x0, 0x208c0000, 61, 0x208c0000 },
+ { 0x0, 0x1, 0x4c, 0x0, 0x209a0000, 63, 0x209a0000 },
+ { 0x0, 0x1, 0x4d, 0x0, 0x21a40000, 81, 0x21a40000 },
+ { 0x0, 0x1, 0x4e, 0x0, 0x03440000, 38, 0x03440000 },
+ { 0x0, 0x1, 0x4f, 0x0, 0x20d00000, 64, 0x20d00000 },
+ { 0x0, 0x1, 0x50, 0x0, 0x21000000, 71, 0x21000000 },
+ { 0x0, 0x1, 0x51, 0x0, 0x0b000000, 39, 0x0b000000 },
+ { 0x0, 0x2, 0x00, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x00, 0x0, 0x02340000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x00, 0x0, 0x17000000, 0, 0x17000000 },
+ { 0x0, 0x4, 0x01, 0x0, 0x18000000, 1, 0x18000000 },
+ { 0x0, 0x5, 0x00, 0x0, 0x13e80000, 1, 0x13e80000 },
+ { 0x0, 0x5, 0x01, 0x0, 0x15810000, 12, 0x15810000 },
+ { 0x0, 0x5, 0x02, 0x0, 0x15840000, 14, 0x15840000 },
+ { 0x0, 0x5, 0x03, 0x0, 0x15a40000, 17, 0x15a40000 },
+ { 0x0, 0x5, 0x04, 0x0, 0x13f00000, 3, 0x13f00000 },
+ { 0x0, 0x5, 0x05, 0x0, 0x15820000, 13, 0x15820000 },
+ { 0x0, 0x5, 0x06, 0x0, 0x13ec0000, 2, 0x13ec0000 },
+ { 0x0, 0x5, 0x07, 0x0, 0x15200000, 6, 0x15200000 },
+ { 0x0, 0x5, 0x08, 0x0, 0x15340000, 7, 0x15340000 },
+ { 0x0, 0x5, 0x09, 0x0, 0x15380000, 8, 0x15380000 },
+ { 0x0, 0x5, 0x0a, 0x0, 0x15500000, 10, 0x15500000 },
+ { 0x0, 0x5, 0x0b, 0x0, 0x155c0000, 11, 0x155c0000 },
+ { 0x0, 0x5, 0x0c, 0x0, 0x15a00000, 16, 0x15a00000 },
+ { 0x0, 0x5, 0x0d, 0x0, 0x13e00000, 0, 0x13e00000 },
+ { 0x0, 0x5, 0x0e, 0x0, 0x15100000, 5, 0x15100000 },
+ { 0x0, 0x5, 0x0f, 0x0, 0x15480000, 9, 0x15480000 },
+ { 0x0, 0x5, 0x10, 0x0, 0x15880000, 15, 0x15880000 },
+ { 0x0, 0x5, 0x11, 0x0, 0x15a80000, 18, 0x15a80000 },
+ { 0x0, 0x5, 0x12, 0x0, 0x15b00000, 19, 0x15b00000 },
+ { 0x0, 0x5, 0x13, 0x0, 0x14800000, 4, 0x14800000 },
+ { 0x0, 0x5, 0x14, 0x0, 0x15c00000, 20, 0x15c00000 },
+ { 0x0, 0x5, 0x15, 0x0, 0x16000000, 21, 0x16000000 },
+ { 0x0, 0x6, 0x00, 0x0, 0x02000000, 4, 0x02000000 },
+ { 0x0, 0x6, 0x01, 0x0, 0x02007000, 5, 0x02007000 },
+ { 0x0, 0x6, 0x02, 0x0, 0x02008000, 6, 0x02008000 },
+ { 0x0, 0x6, 0x03, 0x0, 0x02013000, 7, 0x02013000 },
+ { 0x0, 0x6, 0x04, 0x0, 0x0201c000, 8, 0x0201c000 },
+ { 0x0, 0x6, 0x05, 0x0, 0x02020000, 9, 0x02020000 },
+ { 0x0, 0x6, 0x06, 0x0, 0x0202a000, 10, 0x0202a000 },
+ { 0x0, 0x6, 0x07, 0x0, 0x0202e000, 11, 0x0202e000 },
+ { 0x0, 0x6, 0x08, 0x0, 0x06400000, 33, 0x06400000 },
+ { 0x0, 0x6, 0x09, 0x0, 0x02038000, 12, 0x02038000 },
+ { 0x0, 0x6, 0x0a, 0x0, 0x00100000, 0, 0x00100000 },
+ { 0x0, 0x6, 0x0b, 0x0, 0x023b0000, 13, 0x023b0000 },
+ { 0x0, 0x6, 0x0c, 0x0, 0x02800000, 16, 0x02800000 },
+ { 0x0, 0x6, 0x0d, 0x0, 0x030e0000, 22, 0x030e0000 },
+ { 0x0, 0x6, 0x0e, 0x0, 0x03800000, 23, 0x03800000 },
+ { 0x0, 0x6, 0x0f, 0x0, 0x03980000, 25, 0x03980000 },
+ { 0x0, 0x6, 0x10, 0x0, 0x03a60000, 26, 0x03a60000 },
+ { 0x0, 0x6, 0x11, 0x0, 0x03d80000, 31, 0x03d80000 },
+ { 0x0, 0x6, 0x12, 0x0, 0x20000000, 36, 0x20000000 },
+ { 0x0, 0x6, 0x13, 0x0, 0x20050000, 38, 0x20050000 },
+ { 0x0, 0x6, 0x14, 0x0, 0x201e0000, 40, 0x201e0000 },
+ { 0x0, 0x6, 0x15, 0x0, 0x20280000, 42, 0x20280000 },
+ { 0x0, 0x6, 0x16, 0x0, 0x202c0000, 43, 0x202c0000 },
+ { 0x0, 0x6, 0x17, 0x0, 0x20390000, 44, 0x20390000 },
+ { 0x0, 0x6, 0x18, 0x0, 0x20430000, 45, 0x20430000 },
+ { 0x0, 0x6, 0x19, 0x0, 0x20440000, 46, 0x20440000 },
+ { 0x0, 0x6, 0x1a, 0x0, 0x204e0000, 47, 0x204e0000 },
+ { 0x0, 0x6, 0x1b, 0x0, 0x20550000, 48, 0x20550000 },
+ { 0x0, 0x6, 0x1c, 0x0, 0x20570000, 49, 0x20570000 },
+ { 0x0, 0x6, 0x1d, 0x0, 0x20590000, 50, 0x20590000 },
+ { 0x0, 0x6, 0x1e, 0x0, 0x20730000, 52, 0x20730000 },
+ { 0x0, 0x6, 0x1f, 0x0, 0x209f0000, 54, 0x209f0000 },
+ { 0x0, 0x6, 0x20, 0x0, 0x20e20000, 55, 0x20e20000 },
+ { 0x0, 0x6, 0x21, 0x0, 0x20ed0000, 56, 0x20ed0000 },
+ { 0x0, 0x6, 0x22, 0x0, 0x20fd0000, 57, 0x20fd0000 },
+ { 0x0, 0x6, 0x23, 0x0, 0x21120000, 59, 0x21120000 },
+ { 0x0, 0x6, 0x24, 0x0, 0x211a0000, 60, 0x211a0000 },
+ { 0x0, 0x6, 0x25, 0x0, 0x21850000, 61, 0x21850000 },
+ { 0x0, 0x6, 0x26, 0x0, 0x21860000, 62, 0x21860000 },
+ { 0x0, 0x6, 0x27, 0x0, 0x21890000, 63, 0x21890000 },
+ { 0x0, 0x6, 0x28, 0x0, 0x21970000, 64, 0x21970000 },
+ { 0x0, 0x6, 0x29, 0x0, 0x21990000, 65, 0x21990000 },
+ { 0x0, 0x6, 0x2a, 0x0, 0x21a00000, 66, 0x21a00000 },
+ { 0x0, 0x6, 0x2b, 0x0, 0x21a90000, 68, 0x21a90000 },
+ { 0x0, 0x6, 0x2c, 0x0, 0x21ac0000, 70, 0x21ac0000 },
+ { 0x0, 0x6, 0x2d, 0x0, 0x01f80000, 3, 0x01f80000 },
+ { 0x0, 0x6, 0x2e, 0x0, 0x024e0000, 14, 0x024e0000 },
+ { 0x0, 0x6, 0x2f, 0x0, 0x030c0000, 21, 0x030c0000 },
+ { 0x0, 0x6, 0x30, 0x0, 0x03820000, 24, 0x03820000 },
+ { 0x0, 0x6, 0x31, 0x0, 0x03aa0000, 27, 0x03aa0000 },
+ { 0x0, 0x6, 0x32, 0x0, 0x03c80000, 29, 0x03c80000 },
+ { 0x0, 0x6, 0x33, 0x0, 0x130e0000, 34, 0x130e0000 },
+ { 0x0, 0x6, 0x34, 0x0, 0x20020000, 37, 0x20020000 },
+ { 0x0, 0x6, 0x35, 0x0, 0x20060000, 39, 0x20060000 },
+ { 0x0, 0x6, 0x36, 0x0, 0x20200000, 41, 0x20200000 },
+ { 0x0, 0x6, 0x37, 0x0, 0x206a0000, 51, 0x206a0000 },
+ { 0x0, 0x6, 0x38, 0x0, 0x20740000, 53, 0x20740000 },
+ { 0x0, 0x6, 0x39, 0x0, 0x20fe0000, 58, 0x20fe0000 },
+ { 0x0, 0x6, 0x3a, 0x0, 0x21a20000, 67, 0x21a20000 },
+ { 0x0, 0x6, 0x3b, 0x0, 0x21aa0000, 69, 0x21aa0000 },
+ { 0x0, 0x6, 0x3c, 0x0, 0x02b80000, 17, 0x02b80000 },
+ { 0x0, 0x6, 0x3d, 0x0, 0x03080000, 20, 0x03080000 },
+ { 0x0, 0x6, 0x3e, 0x0, 0x13100000, 35, 0x13100000 },
+ { 0x0, 0x6, 0x3f, 0x0, 0x01f00000, 2, 0x01f00000 },
+ { 0x0, 0x6, 0x40, 0x0, 0x03000000, 19, 0x03000000 },
+ { 0x0, 0x6, 0x41, 0x0, 0x03c00000, 28, 0x03c00000 },
+ { 0x0, 0x6, 0x42, 0x0, 0x03d00000, 30, 0x03d00000 },
+ { 0x0, 0x6, 0x43, 0x0, 0x01700000, 1, 0x01700000 },
+ { 0x0, 0x6, 0x44, 0x0, 0x02c00000, 18, 0x02c00000 },
+ { 0x0, 0x6, 0x45, 0x0, 0x02600000, 15, 0x02600000 },
+ { 0x0, 0x6, 0x46, 0x0, 0x06000000, 32, 0x06000000 },
+ { 0x0, 0x6, 0x47, 0x0, 0x24000000, 71, 0x24000000 },
+ { 0x0, 0x7, 0x00, 0x0, 0x12000000, 0, 0x12000000 },
+ { 0x0, 0x8, 0x00, 0x0, 0x11000000, 0, 0x11000000 },
+ { 0x0, 0x9, 0x00, 0x0, 0x10000000, 0, 0x10000000 },
+ { 0x0, 0xa, 0x00, 0x0, 0x22000000, 0, 0x22000000 }
+};
+
+/*
+ * BPMP NOC aperture lookup table as per file "BPMP_NOC_Structure.info".
+ */
+static const char * const tegra194_bpmpnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "cvc_i/I/0",
+ [0x4] = "dma_m_i/I/0",
+ [0x5] = "dma_p_i/I/0",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_bpmpnoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/actmon",
+ [0x01] = "multiport0_t/T/ast_0",
+ [0x02] = "multiport0_t/T/ast_1",
+ [0x03] = "multiport0_t/T/atcm_cfg",
+ [0x04] = "multiport0_t/T/car",
+ [0x05] = "multiport0_t/T/central_pwr_mgr",
+ [0x06] = "multiport0_t/T/central_vtg_ctlr",
+ [0x07] = "multiport0_t/T/cfg",
+ [0x08] = "multiport0_t/T/dma",
+ [0x09] = "multiport0_t/T/err_collator",
+ [0x0a] = "multiport0_t/T/err_collator_car",
+ [0x0b] = "multiport0_t/T/fpga_misc",
+ [0x0c] = "multiport0_t/T/fpga_uart",
+ [0x0d] = "multiport0_t/T/gte",
+ [0x0e] = "multiport0_t/T/hsp",
+ [0x0f] = "multiport0_t/T/misc",
+ [0x10] = "multiport0_t/T/pm",
+ [0x11] = "multiport0_t/T/simon0",
+ [0x12] = "multiport0_t/T/simon1",
+ [0x13] = "multiport0_t/T/simon2",
+ [0x14] = "multiport0_t/T/simon3",
+ [0x15] = "multiport0_t/T/simon4",
+ [0x16] = "multiport0_t/T/soc_therm",
+ [0x17] = "multiport0_t/T/tke",
+ [0x18] = "multiport0_t/T/vic_0",
+ [0x19] = "multiport0_t/T/vic_1",
+ [0x1a] = "ast0_t/T/0",
+ [0x1b] = "ast1_t/T/0",
+ [0x1c] = "bpmp_noc_firewall/T/0",
+ [0x1d] = "cbb_t/T/0",
+ [0x1e] = "cpu_t/T/0",
+ [0x1f] = "svc_t/T/0"
+};
+
+/*
+ * Fields of BPMP NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_bpmpnoc_apert_lookup[] = {
+ { 0x0, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x0, 0x1e, 0x0, 0x0, 0x0d400000, 0, 0x0d400000 },
+ { 0x0, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x0, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x0, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x0, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x0, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x0, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x0, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x0, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x0, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x0, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x0, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x0, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x0, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x0, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x0, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x0, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x0, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x0, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x0, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x0, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x0, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x0, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x0, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x0, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x0, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x0, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x0, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x0, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x0, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x0, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x0, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x0, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x1a, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x1a, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x1a, 0x2, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x2, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x2, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x2, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x2, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x2, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x2, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x2, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x2, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x2, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x2, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x2, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x2, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x2, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x2, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x2, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x2, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x2, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x2, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x2, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x2, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x2, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x2, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x2, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x2, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x2, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x2, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x2, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x2, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x2, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x2, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x2, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x2, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x2, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x2, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x2, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x2, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x2, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x2, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x2, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x2, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x2, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x2, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x2, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x2, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x2, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x2, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x1c, 0x0, 0x2, 0x0d640000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0x0, 0x2, 0x20b00000, 8, 0x20b00000 },
+ { 0x3, 0x1d, 0x1, 0x2, 0x20800000, 7, 0x20800000 },
+ { 0x3, 0x1d, 0x2, 0x2, 0x20c00000, 9, 0x20c00000 },
+ { 0x3, 0x1d, 0x3, 0x2, 0x0d800000, 3, 0x0d800000 },
+ { 0x3, 0x1d, 0x4, 0x2, 0x20000000, 6, 0x20000000 },
+ { 0x3, 0x1d, 0x5, 0x2, 0x0c000000, 2, 0x0c000000 },
+ { 0x3, 0x1d, 0x6, 0x2, 0x21000000, 10, 0x21000000 },
+ { 0x3, 0x1d, 0x7, 0x2, 0x0e000000, 4, 0x0e000000 },
+ { 0x3, 0x1d, 0x8, 0x2, 0x22000000, 11, 0x22000000 },
+ { 0x3, 0x1d, 0x9, 0x2, 0x08000000, 1, 0x08000000 },
+ { 0x3, 0x1d, 0xa, 0x2, 0x24000000, 12, 0x24000000 },
+ { 0x3, 0x1d, 0xb, 0x2, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x1d, 0xc, 0x2, 0x28000000, 13, 0x28000000 },
+ { 0x3, 0x1d, 0xd, 0x2, 0x10000000, 5, 0x10000000 },
+ { 0x3, 0x1d, 0xe, 0x2, 0x30000000, 14, 0x30000000 },
+ { 0x3, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x3, 0x00, 0x0, 0x2, 0x0d230000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x0, 0x2, 0x0d040000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x0, 0x2, 0x0d050000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x0, 0x2, 0x0d000000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x0, 0x2, 0x20ae0000, 3, 0x000e0000 },
+ { 0x3, 0x04, 0x1, 0x2, 0x20ac0000, 2, 0x000c0000 },
+ { 0x3, 0x04, 0x2, 0x2, 0x20a80000, 1, 0x00080000 },
+ { 0x3, 0x04, 0x3, 0x2, 0x20a00000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x0, 0x2, 0x0d2a0000, 0, 0x00000000 },
+ { 0x3, 0x06, 0x0, 0x2, 0x0d290000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x0, 0x2, 0x0d2c0000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x0, 0x2, 0x0d0e0000, 4, 0x00080000 },
+ { 0x3, 0x08, 0x1, 0x2, 0x0d060000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x2, 0x2, 0x0d080000, 1, 0x00020000 },
+ { 0x3, 0x08, 0x3, 0x2, 0x0d0a0000, 2, 0x00040000 },
+ { 0x3, 0x08, 0x4, 0x2, 0x0d0c0000, 3, 0x00060000 },
+ { 0x3, 0x09, 0x0, 0x2, 0x0d650000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x0, 0x2, 0x20af0000, 0, 0x00000000 },
+ { 0x3, 0x0b, 0x0, 0x2, 0x0d3e0000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x0, 0x2, 0x0d3d0000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x0, 0x2, 0x0d1e0000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x0, 0x2, 0x0d150000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x1, 0x2, 0x0d160000, 1, 0x00010000 },
+ { 0x3, 0x0e, 0x2, 0x2, 0x0d170000, 2, 0x00020000 },
+ { 0x3, 0x0e, 0x3, 0x2, 0x0d180000, 3, 0x00030000 },
+ { 0x3, 0x0e, 0x4, 0x2, 0x0d190000, 4, 0x00040000 },
+ { 0x3, 0x0e, 0x5, 0x2, 0x0d1a0000, 5, 0x00050000 },
+ { 0x3, 0x0e, 0x6, 0x2, 0x0d1b0000, 6, 0x00060000 },
+ { 0x3, 0x0e, 0x7, 0x2, 0x0d1c0000, 7, 0x00070000 },
+ { 0x3, 0x0e, 0x8, 0x2, 0x0d1d0000, 8, 0x00080000 },
+ { 0x3, 0x0f, 0x0, 0x2, 0x0d660000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x0, 0x2, 0x0d1f0000, 0, 0x00000000 },
+ { 0x3, 0x10, 0x1, 0x2, 0x0d200000, 1, 0x00010000 },
+ { 0x3, 0x10, 0x2, 0x2, 0x0d210000, 2, 0x00020000 },
+ { 0x3, 0x10, 0x3, 0x2, 0x0d220000, 3, 0x00030000 },
+ { 0x3, 0x11, 0x0, 0x2, 0x0d240000, 0, 0x00000000 },
+ { 0x3, 0x12, 0x0, 0x2, 0x0d250000, 0, 0x00000000 },
+ { 0x3, 0x13, 0x0, 0x2, 0x0d260000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0x2, 0x0d270000, 0, 0x00000000 },
+ { 0x3, 0x15, 0x0, 0x2, 0x0d2b0000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x0, 0x2, 0x0d280000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x0, 0x2, 0x0d0f0000, 0, 0x00000000 },
+ { 0x3, 0x17, 0x1, 0x2, 0x0d100000, 1, 0x00010000 },
+ { 0x3, 0x17, 0x2, 0x2, 0x0d110000, 2, 0x00020000 },
+ { 0x3, 0x17, 0x3, 0x2, 0x0d120000, 3, 0x00030000 },
+ { 0x3, 0x17, 0x4, 0x2, 0x0d130000, 4, 0x00040000 },
+ { 0x3, 0x17, 0x5, 0x2, 0x0d140000, 5, 0x00050000 },
+ { 0x3, 0x18, 0x0, 0x2, 0x0d020000, 0, 0x00000000 },
+ { 0x3, 0x19, 0x0, 0x2, 0x0d030000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x0, 0x2, 0x0d600000, 0, 0x00000000 },
+ { 0x3, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x1b, 0x0, 0x0, 0x40000000, 0, 0x40000000 },
+ { 0x4, 0x1b, 0x1, 0x1, 0x80000000, 1, 0x80000000 },
+ { 0x4, 0x1e, 0x0, 0x2, 0x0d400000, 0, 0x0d400000 },
+ { 0x4, 0x1e, 0x1, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1c, 0x0, 0x0, 0x0d640000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0x0, 0x0, 0x20b00000, 8, 0x20b00000 },
+ { 0x5, 0x1d, 0x1, 0x0, 0x20800000, 7, 0x20800000 },
+ { 0x5, 0x1d, 0x2, 0x0, 0x20c00000, 9, 0x20c00000 },
+ { 0x5, 0x1d, 0x3, 0x0, 0x0d800000, 3, 0x0d800000 },
+ { 0x5, 0x1d, 0x4, 0x0, 0x20000000, 6, 0x20000000 },
+ { 0x5, 0x1d, 0x5, 0x0, 0x0c000000, 2, 0x0c000000 },
+ { 0x5, 0x1d, 0x6, 0x0, 0x21000000, 10, 0x21000000 },
+ { 0x5, 0x1d, 0x7, 0x0, 0x0e000000, 4, 0x0e000000 },
+ { 0x5, 0x1d, 0x8, 0x0, 0x22000000, 11, 0x22000000 },
+ { 0x5, 0x1d, 0x9, 0x0, 0x08000000, 1, 0x08000000 },
+ { 0x5, 0x1d, 0xa, 0x0, 0x24000000, 12, 0x24000000 },
+ { 0x5, 0x1d, 0xb, 0x0, 0x00000000, 0, 0x00000000 },
+ { 0x5, 0x1d, 0xc, 0x0, 0x28000000, 13, 0x28000000 },
+ { 0x5, 0x1d, 0xd, 0x0, 0x10000000, 5, 0x10000000 },
+ { 0x5, 0x1d, 0xe, 0x0, 0x30000000, 14, 0x30000000 },
+ { 0x5, 0x00, 0x0, 0x0, 0x0d230000, 0, 0x00000000 },
+ { 0x5, 0x01, 0x0, 0x0, 0x0d040000, 0, 0x00000000 },
+ { 0x5, 0x02, 0x0, 0x0, 0x0d050000, 0, 0x00000000 },
+ { 0x5, 0x03, 0x0, 0x0, 0x0d000000, 0, 0x00000000 },
+ { 0x5, 0x04, 0x0, 0x0, 0x20ae0000, 3, 0x000e0000 },
+ { 0x5, 0x04, 0x1, 0x0, 0x20ac0000, 2, 0x000c0000 },
+ { 0x5, 0x04, 0x2, 0x0, 0x20a80000, 1, 0x00080000 },
+ { 0x5, 0x04, 0x3, 0x0, 0x20a00000, 0, 0x00000000 },
+ { 0x5, 0x05, 0x0, 0x0, 0x0d2a0000, 0, 0x00000000 },
+ { 0x5, 0x06, 0x0, 0x0, 0x0d290000, 0, 0x00000000 },
+ { 0x5, 0x07, 0x0, 0x0, 0x0d2c0000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x0, 0x0, 0x0d0e0000, 4, 0x00080000 },
+ { 0x5, 0x08, 0x1, 0x0, 0x0d060000, 0, 0x00000000 },
+ { 0x5, 0x08, 0x2, 0x0, 0x0d080000, 1, 0x00020000 },
+ { 0x5, 0x08, 0x3, 0x0, 0x0d0a0000, 2, 0x00040000 },
+ { 0x5, 0x08, 0x4, 0x0, 0x0d0c0000, 3, 0x00060000 },
+ { 0x5, 0x09, 0x0, 0x0, 0x0d650000, 0, 0x00000000 },
+ { 0x5, 0x0a, 0x0, 0x0, 0x20af0000, 0, 0x00000000 },
+ { 0x5, 0x0b, 0x0, 0x0, 0x0d3e0000, 0, 0x00000000 },
+ { 0x5, 0x0c, 0x0, 0x0, 0x0d3d0000, 0, 0x00000000 },
+ { 0x5, 0x0d, 0x0, 0x0, 0x0d1e0000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x0, 0x0, 0x0d150000, 0, 0x00000000 },
+ { 0x5, 0x0e, 0x1, 0x0, 0x0d160000, 1, 0x00010000 },
+ { 0x5, 0x0e, 0x2, 0x0, 0x0d170000, 2, 0x00020000 },
+ { 0x5, 0x0e, 0x3, 0x0, 0x0d180000, 3, 0x00030000 },
+ { 0x5, 0x0e, 0x4, 0x0, 0x0d190000, 4, 0x00040000 },
+ { 0x5, 0x0e, 0x5, 0x0, 0x0d1a0000, 5, 0x00050000 },
+ { 0x5, 0x0e, 0x6, 0x0, 0x0d1b0000, 6, 0x00060000 },
+ { 0x5, 0x0e, 0x7, 0x0, 0x0d1c0000, 7, 0x00070000 },
+ { 0x5, 0x0e, 0x8, 0x0, 0x0d1d0000, 8, 0x00080000 },
+ { 0x5, 0x0f, 0x0, 0x0, 0x0d660000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x0, 0x0, 0x0d1f0000, 0, 0x00000000 },
+ { 0x5, 0x10, 0x1, 0x0, 0x0d200000, 1, 0x00010000 },
+ { 0x5, 0x10, 0x2, 0x0, 0x0d210000, 2, 0x00020000 },
+ { 0x5, 0x10, 0x3, 0x0, 0x0d220000, 3, 0x00030000 },
+ { 0x5, 0x11, 0x0, 0x0, 0x0d240000, 0, 0x00000000 },
+ { 0x5, 0x12, 0x0, 0x0, 0x0d250000, 0, 0x00000000 },
+ { 0x5, 0x13, 0x0, 0x0, 0x0d260000, 0, 0x00000000 },
+ { 0x5, 0x14, 0x0, 0x0, 0x0d270000, 0, 0x00000000 },
+ { 0x5, 0x15, 0x0, 0x0, 0x0d2b0000, 0, 0x00000000 },
+ { 0x5, 0x16, 0x0, 0x0, 0x0d280000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x0, 0x0, 0x0d0f0000, 0, 0x00000000 },
+ { 0x5, 0x17, 0x1, 0x0, 0x0d100000, 1, 0x00010000 },
+ { 0x5, 0x17, 0x2, 0x0, 0x0d110000, 2, 0x00020000 },
+ { 0x5, 0x17, 0x3, 0x0, 0x0d120000, 3, 0x00030000 },
+ { 0x5, 0x17, 0x4, 0x0, 0x0d130000, 4, 0x00040000 },
+ { 0x5, 0x17, 0x5, 0x0, 0x0d140000, 5, 0x00050000 },
+ { 0x5, 0x18, 0x0, 0x0, 0x0d020000, 0, 0x00000000 },
+ { 0x5, 0x19, 0x0, 0x0, 0x0d030000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x0, 0x0, 0x0d600000, 0, 0x00000000 },
+ { 0x5, 0x1f, 0x1, 0x0, 0x00000000, 0, 0x00000000 }
+};
+
+/*
+ * AON NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_aonnoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_p_i/I/0",
+ [0x2] = "dma_m_i/I/0",
+ [0x3] = "dma_p_i/I/0"
+};
+
+static const char * const tegra194_aonnoc_routeid_targflow[] = {
+ [0x00] = "multiport1_t/T/aon_misc",
+ [0x01] = "multiport1_t/T/avic0",
+ [0x02] = "multiport1_t/T/avic1",
+ [0x03] = "multiport1_t/T/can1",
+ [0x04] = "multiport1_t/T/can2",
+ [0x05] = "multiport1_t/T/dma",
+ [0x06] = "multiport1_t/T/dmic",
+ [0x07] = "multiport1_t/T/err_collator",
+ [0x08] = "multiport1_t/T/fpga_misc",
+ [0x09] = "multiport1_t/T/gte",
+ [0x0a] = "multiport1_t/T/hsp",
+ [0x0b] = "multiport1_t/T/i2c2",
+ [0x0c] = "multiport1_t/T/i2c8",
+ [0x0d] = "multiport1_t/T/pwm",
+ [0x0e] = "multiport1_t/T/spi2",
+ [0x0f] = "multiport1_t/T/tke",
+ [0x10] = "multiport1_t/T/uartg",
+ [0x11] = "RESERVED",
+ [0x12] = "RESERVED",
+ [0x13] = "RESERVED",
+ [0x14] = "RESERVED",
+ [0x15] = "RESERVED",
+ [0x16] = "RESERVED",
+ [0x17] = "RESERVED",
+ [0x18] = "RESERVED",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED",
+ [0x20] = "multiport0_t/T/aovc",
+ [0x21] = "multiport0_t/T/atcm",
+ [0x22] = "multiport0_t/T/cast",
+ [0x23] = "multiport0_t/T/dast",
+ [0x24] = "multiport0_t/T/err_collator_car",
+ [0x25] = "multiport0_t/T/gpio",
+ [0x26] = "multiport0_t/T/i2c10",
+ [0x27] = "multiport0_t/T/mss",
+ [0x28] = "multiport0_t/T/padctl_a12",
+ [0x29] = "multiport0_t/T/padctl_a14",
+ [0x2a] = "multiport0_t/T/padctl_a15",
+ [0x2b] = "multiport0_t/T/rtc",
+ [0x2c] = "multiport0_t/T/tsc",
+ [0x2d] = "RESERVED",
+ [0x2e] = "RESERVED",
+ [0x2f] = "RESERVED",
+ [0x30] = "multiport2_t/T/aon_vref_ro",
+ [0x31] = "multiport2_t/T/aopm",
+ [0x32] = "multiport2_t/T/car",
+ [0x33] = "multiport2_t/T/pmc",
+ [0x34] = "ast1_t/T/0",
+ [0x35] = "cbb_t/T/0",
+ [0x36] = "cpu_t/T/0",
+ [0x37] = "firewall_t/T/0",
+ [0x38] = "svc_t/T/0",
+ [0x39] = "uartc/T/uartc",
+ [0x3a] = "RESERVED",
+ [0x3b] = "RESERVED",
+ [0x3c] = "RESERVED",
+ [0x3d] = "RESERVED",
+ [0x3e] = "RESERVED",
+ [0x3f] = "RESERVED"
+};
+
+/*
+ * Fields of AON NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_aonnoc_aperture_lookup[] = {
+ { 0x0, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x0, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x0, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x0, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x0, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x0, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x0, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x0, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x0, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x0, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x0, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x0, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x0, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x0, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x0, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x0, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x0, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x0, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x0, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x0, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x0, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x0, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x0, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x0, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x0, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x0, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x0, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x0, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x0, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x0, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x0, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x0, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x0, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x0, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x0, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x0, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x0, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x0, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x0, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x0, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x0, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x0, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x0, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x0, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x0, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x0, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x0, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x0, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x0, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x0, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x0, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x0, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x0, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x0, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x0, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x0, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x0, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x0, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x0, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x0, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x0, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x1, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x1, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x1, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x1, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x1, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x1, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x1, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x1, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x1, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x1, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x1, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x1, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x1, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x1, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x1, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x1, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x1, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x1, 0x35, 0x13, 0, 0x05000000, 9, 0x05000000 },
+ { 0x1, 0x35, 0x14, 0, 0x0c800000, 34, 0x0c800000 },
+ { 0x1, 0x35, 0x15, 0, 0x01000000, 5, 0x01000000 },
+ { 0x1, 0x35, 0x16, 0, 0x03000000, 7, 0x03000000 },
+ { 0x1, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x1, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x1, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x1, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x1, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x1, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x1, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x1, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x1, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x1, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x1, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x1, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x1, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x1, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x1, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x1, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x1, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x1, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x1, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x1, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x1, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x1, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x1, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x1, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x1, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x1, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x1, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x1, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x1, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x1, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x1, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x1, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x1, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x1, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x1, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x1, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x1, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x1, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x1, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x1, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x1, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x1, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x1, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x1, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x1, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x1, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x1, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x1, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x1, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x1, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x1, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x1, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x1, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x1, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x1, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x1, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x1, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x1, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x1, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x1, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x1, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x1, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x1, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x1, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x1, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x1, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x1, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x1, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x1, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x1, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x1, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x1, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x1, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 },
+ { 0x2, 0x34, 0x00, 0, 0x40000000, 0, 0x40000000 },
+ { 0x2, 0x34, 0x01, 0, 0x80000000, 1, 0x80000000 },
+ { 0x2, 0x36, 0x00, 0, 0x0c400000, 0, 0x0c400000 },
+ { 0x2, 0x36, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x00, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x35, 0x01, 0, 0x00100000, 1, 0x00100000 },
+ { 0x3, 0x35, 0x02, 0, 0x05a00000, 11, 0x05a00000 },
+ { 0x3, 0x35, 0x03, 0, 0x05b00000, 32, 0x05b00000 },
+ { 0x3, 0x35, 0x04, 0, 0x05c00000, 33, 0x05c00000 },
+ { 0x3, 0x35, 0x05, 0, 0x05d00000, 12, 0x05d00000 },
+ { 0x3, 0x35, 0x06, 0, 0x20000000, 19, 0x20000000 },
+ { 0x3, 0x35, 0x07, 0, 0x20100000, 20, 0x20100000 },
+ { 0x3, 0x35, 0x08, 0, 0x20a00000, 24, 0x20a00000 },
+ { 0x3, 0x35, 0x09, 0, 0x20d00000, 25, 0x20d00000 },
+ { 0x3, 0x35, 0x0a, 0, 0x00200000, 2, 0x00200000 },
+ { 0x3, 0x35, 0x0b, 0, 0x05800000, 10, 0x05800000 },
+ { 0x3, 0x35, 0x0c, 0, 0x05e00000, 13, 0x05e00000 },
+ { 0x3, 0x35, 0x0d, 0, 0x20200000, 21, 0x20200000 },
+ { 0x3, 0x35, 0x0e, 0, 0x20800000, 23, 0x20800000 },
+ { 0x3, 0x35, 0x0f, 0, 0x20e00000, 26, 0x20e00000 },
+ { 0x3, 0x35, 0x10, 0, 0x00400000, 3, 0x00400000 },
+ { 0x3, 0x35, 0x11, 0, 0x20400000, 22, 0x20400000 },
+ { 0x3, 0x35, 0x12, 0, 0x00800000, 4, 0x00800000 },
+ { 0x3, 0x35, 0x13, 0, 0x50000000, 9, 0x05000000 },
+ { 0x3, 0x35, 0x14, 0, 0xc0800000, 34, 0x0c800000 },
+ { 0x3, 0x35, 0x15, 0, 0x10000000, 5, 0x01000000 },
+ { 0x3, 0x35, 0x16, 0, 0x30000000, 7, 0x03000000 },
+ { 0x3, 0x35, 0x17, 0, 0x04000000, 8, 0x04000000 },
+ { 0x3, 0x35, 0x18, 0, 0x0d000000, 16, 0x0d000000 },
+ { 0x3, 0x35, 0x19, 0, 0x21000000, 27, 0x21000000 },
+ { 0x3, 0x35, 0x1a, 0, 0x02000000, 6, 0x02000000 },
+ { 0x3, 0x35, 0x1b, 0, 0x06000000, 14, 0x06000000 },
+ { 0x3, 0x35, 0x1c, 0, 0x0e000000, 17, 0x0e000000 },
+ { 0x3, 0x35, 0x1d, 0, 0x22000000, 28, 0x22000000 },
+ { 0x3, 0x35, 0x1e, 0, 0x08000000, 15, 0x08000000 },
+ { 0x3, 0x35, 0x1f, 0, 0x24000000, 29, 0x24000000 },
+ { 0x3, 0x35, 0x20, 0, 0x28000000, 30, 0x28000000 },
+ { 0x3, 0x35, 0x21, 0, 0x10000000, 18, 0x10000000 },
+ { 0x3, 0x35, 0x22, 0, 0x30000000, 31, 0x30000000 },
+ { 0x3, 0x37, 0x00, 0, 0x0c640000, 0, 0x00000000 },
+ { 0x3, 0x20, 0x00, 0, 0x0c3b0000, 0, 0x00000000 },
+ { 0x3, 0x21, 0x00, 0, 0x0c000000, 0, 0x00000000 },
+ { 0x3, 0x22, 0x00, 0, 0x0c040000, 0, 0x00000000 },
+ { 0x3, 0x23, 0x00, 0, 0x0c050000, 0, 0x00000000 },
+ { 0x3, 0x24, 0x00, 0, 0x20cf0000, 0, 0x00000000 },
+ { 0x3, 0x25, 0x00, 0, 0x0c2f0000, 0, 0x00000000 },
+ { 0x3, 0x26, 0x00, 0, 0x0c230000, 0, 0x00000000 },
+ { 0x3, 0x27, 0x00, 0, 0x0c350000, 0, 0x00000000 },
+ { 0x3, 0x28, 0x00, 0, 0x0c301000, 0, 0x00000000 },
+ { 0x3, 0x29, 0x00, 0, 0x0c302000, 0, 0x00000000 },
+ { 0x3, 0x2a, 0x00, 0, 0x0c303000, 0, 0x00000000 },
+ { 0x3, 0x2b, 0x00, 0, 0x0c2a0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x00, 0, 0x0c2b0000, 0, 0x00000000 },
+ { 0x3, 0x2c, 0x01, 0, 0x0c2c0000, 1, 0x00010000 },
+ { 0x3, 0x2c, 0x02, 0, 0x0c2d0000, 2, 0x00020000 },
+ { 0x3, 0x2c, 0x03, 0, 0x0c2e0000, 3, 0x00030000 },
+ { 0x3, 0x00, 0x00, 0, 0x0c660000, 0, 0x00000000 },
+ { 0x3, 0x01, 0x00, 0, 0x0c020000, 0, 0x00000000 },
+ { 0x3, 0x02, 0x00, 0, 0x0c030000, 0, 0x00000000 },
+ { 0x3, 0x03, 0x00, 0, 0x0c310000, 0, 0x00000000 },
+ { 0x3, 0x04, 0x00, 0, 0x0c320000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x00, 0, 0x0c0a0000, 2, 0x00040000 },
+ { 0x3, 0x05, 0x01, 0, 0x0c0b0000, 3, 0x00050000 },
+ { 0x3, 0x05, 0x02, 0, 0x0c0e0000, 5, 0x00080000 },
+ { 0x3, 0x05, 0x03, 0, 0x0c060000, 0, 0x00000000 },
+ { 0x3, 0x05, 0x04, 0, 0x0c080000, 1, 0x00020000 },
+ { 0x3, 0x05, 0x05, 0, 0x0c0c0000, 4, 0x00060000 },
+ { 0x3, 0x06, 0x00, 0, 0x0c330000, 0, 0x00000000 },
+ { 0x3, 0x07, 0x00, 0, 0x0c650000, 0, 0x00000000 },
+ { 0x3, 0x08, 0x00, 0, 0x0c3e0000, 0, 0x00000000 },
+ { 0x3, 0x09, 0x00, 0, 0x0c1e0000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x00, 0, 0x0c150000, 0, 0x00000000 },
+ { 0x3, 0x0a, 0x01, 0, 0x0c160000, 1, 0x00010000 },
+ { 0x3, 0x0a, 0x02, 0, 0x0c170000, 2, 0x00020000 },
+ { 0x3, 0x0a, 0x03, 0, 0x0c180000, 3, 0x00030000 },
+ { 0x3, 0x0a, 0x04, 0, 0x0c190000, 4, 0x00040000 },
+ { 0x3, 0x0a, 0x05, 0, 0x0c1a0000, 5, 0x00050000 },
+ { 0x3, 0x0a, 0x06, 0, 0x0c1b0000, 6, 0x00060000 },
+ { 0x3, 0x0a, 0x07, 0, 0x0c1c0000, 7, 0x00070000 },
+ { 0x3, 0x0a, 0x08, 0, 0x0c1d0000, 8, 0x00080000 },
+ { 0x3, 0x0b, 0x00, 0, 0x0c240000, 0, 0x00000000 },
+ { 0x3, 0x0c, 0x00, 0, 0x0c250000, 0, 0x00000000 },
+ { 0x3, 0x0d, 0x00, 0, 0x0c340000, 0, 0x00000000 },
+ { 0x3, 0x0e, 0x00, 0, 0x0c260000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x00, 0, 0x0c0f0000, 0, 0x00000000 },
+ { 0x3, 0x0f, 0x01, 0, 0x0c100000, 1, 0x00010000 },
+ { 0x3, 0x0f, 0x02, 0, 0x0c110000, 2, 0x00020000 },
+ { 0x3, 0x0f, 0x03, 0, 0x0c120000, 3, 0x00030000 },
+ { 0x3, 0x0f, 0x04, 0, 0x0c130000, 4, 0x00040000 },
+ { 0x3, 0x0f, 0x05, 0, 0x0c140000, 5, 0x00050000 },
+ { 0x3, 0x10, 0x00, 0, 0x0c290000, 0, 0x00000000 },
+ { 0x3, 0x30, 0x00, 0, 0x20ce0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x00, 0, 0x0c1f0000, 0, 0x00000000 },
+ { 0x3, 0x31, 0x01, 0, 0x0c200000, 1, 0x00010000 },
+ { 0x3, 0x31, 0x02, 0, 0x0c210000, 2, 0x00020000 },
+ { 0x3, 0x31, 0x03, 0, 0x0c220000, 3, 0x00030000 },
+ { 0x3, 0x32, 0x00, 0, 0x20cc0000, 3, 0x001c0000 },
+ { 0x3, 0x32, 0x01, 0, 0x20c80000, 2, 0x00180000 },
+ { 0x3, 0x32, 0x02, 0, 0x20c00000, 1, 0x00100000 },
+ { 0x3, 0x32, 0x03, 0, 0x20b00000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x00, 0, 0x0c360000, 0, 0x00000000 },
+ { 0x3, 0x33, 0x01, 0, 0x0c370000, 1, 0x00010000 },
+ { 0x3, 0x33, 0x02, 0, 0x0c3a0000, 3, 0x00040000 },
+ { 0x3, 0x33, 0x03, 0, 0x0c380000, 2, 0x00020000 },
+ { 0x3, 0x38, 0x00, 0, 0x0c600000, 0, 0x00000000 },
+ { 0x3, 0x38, 0x01, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x39, 0x00, 0, 0x0c280000, 0, 0x00000000 }
+};
+
+/*
+ * SCE/RCE NOC aperture lookup table as per file "AON_NOC_Structure.info".
+ */
+static const char * const tegra194_scenoc_routeid_initflow[] = {
+ [0x0] = "cbb_i/I/0",
+ [0x1] = "cpu_m_i/I/0",
+ [0x2] = "cpu_p_i/I/0",
+ [0x3] = "dma_m_i/I/0",
+ [0x4] = "dma_p_i/I/0",
+ [0x5] = "RESERVED",
+ [0x6] = "RESERVED",
+ [0x7] = "RESERVED"
+};
+
+static const char * const tegra194_scenoc_routeid_targflow[] = {
+ [0x00] = "multiport0_t/T/atcm_cfg",
+ [0x01] = "multiport0_t/T/car",
+ [0x02] = "multiport0_t/T/cast",
+ [0x03] = "multiport0_t/T/cfg",
+ [0x04] = "multiport0_t/T/dast",
+ [0x05] = "multiport0_t/T/dma",
+ [0x06] = "multiport0_t/T/err_collator",
+ [0x07] = "multiport0_t/T/err_collator_car",
+ [0x08] = "multiport0_t/T/fpga_misc",
+ [0x09] = "multiport0_t/T/fpga_uart",
+ [0x0a] = "multiport0_t/T/gte",
+ [0x0b] = "multiport0_t/T/hsp",
+ [0x0c] = "multiport0_t/T/misc",
+ [0x0d] = "multiport0_t/T/pm",
+ [0x0e] = "multiport0_t/T/tke",
+ [0x0f] = "RESERVED",
+ [0x10] = "multiport1_t/T/hsm",
+ [0x11] = "multiport1_t/T/vic0",
+ [0x12] = "multiport1_t/T/vic1",
+ [0x13] = "ast0_t/T/0",
+ [0x14] = "ast1_t/T/0",
+ [0x15] = "cbb_t/T/0",
+ [0x16] = "cpu_t/T/0",
+ [0x17] = "sce_noc_firewall/T/0",
+ [0x18] = "svc_t/T/0",
+ [0x19] = "RESERVED",
+ [0x1a] = "RESERVED",
+ [0x1b] = "RESERVED",
+ [0x1c] = "RESERVED",
+ [0x1d] = "RESERVED",
+ [0x1e] = "RESERVED",
+ [0x1f] = "RESERVED"
+};
+
+/*
+ * Fields of SCE/RCE NOC lookup table:
+ * Init flow, Targ flow, Targ subrange, Init mapping, Init localAddress,
+ * Targ mapping, Targ localAddress
+ * ----------------------------------------------------------------------------
+ */
+static const struct tegra194_cbb_aperture tegra194_scenoc_apert_lookup[] = {
+ { 0x0, 0x16, 0x0, 0, 0x0b400000, 0, 0x0b400000 },
+ { 0x0, 0x16, 0x1, 0, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x0, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x0, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x0, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x0, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x0, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x0, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x0, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x0, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x0, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x0, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x0, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x0, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x0, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x0, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x0, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x0, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x0, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x0, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x0, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x0, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x0, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x0, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x0, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x0, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x0, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x0, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x0, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x0, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x0, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x0, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x0, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x0, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x0, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x0, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x0, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x0, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x0, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x0, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x0, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x0, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x0, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x0, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x0, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x0, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x0, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x0, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x0, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x0, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x0, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x0, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x0, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x0, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x0, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x0, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x0, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x0, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x0, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x0, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x0, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x0, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x0, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x0, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x0, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x0, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x0, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x0, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x0, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x0, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x0, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x0, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x0, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x0, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x0, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x0, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x0, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x0, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x0, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x0, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x0, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x0, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x0, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x0, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x0, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x0, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x0, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x0, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x0, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x0, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x0, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x0, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x0, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x0, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x0, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x0, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x1, 0x13, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x1, 0x13, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x1, 0x13, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x2, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x2, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x2, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x2, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x2, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x2, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x2, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x2, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x2, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x2, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x2, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x2, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x2, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x2, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x2, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x2, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x2, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x2, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x2, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x2, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x2, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x2, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x2, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x2, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x2, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x2, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x2, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x2, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x2, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x2, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x2, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x2, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x2, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x2, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x2, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x2, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x2, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x2, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x2, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x2, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x2, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x2, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x2, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x2, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x2, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x2, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x2, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x2, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x2, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x2, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x2, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x2, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x2, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x2, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x2, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x2, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x2, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x2, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x2, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x2, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x2, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x2, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x2, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x2, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x2, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x2, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x2, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x2, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x2, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x2, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x2, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x2, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x2, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x2, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x2, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x2, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x2, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x2, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x2, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x2, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x2, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x2, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x2, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x2, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x2, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x2, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x2, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x2, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x2, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x2, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x2, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x2, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x2, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x2, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x2, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x2, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x2, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x2, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x2, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x2, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x2, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x2, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x2, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x2, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x2, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x2, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x2, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x2, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x2, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x2, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x2, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x2, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x2, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x2, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x2, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x2, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x2, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x2, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x2, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x2, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x2, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x2, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x14, 0x0, 0, 0x40000000, 0, 0x40000000 },
+ { 0x3, 0x14, 0x1, 1, 0x80000000, 1, 0x80000000 },
+ { 0x3, 0x16, 0x0, 2, 0x0b400000, 0, 0x0b400000 },
+ { 0x3, 0x16, 0x1, 2, 0x0bc00000, 1, 0x0bc00000 },
+ { 0x3, 0x16, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x3, 0x16, 0x3, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x0, 0, 0x20c00000, 8, 0x20c00000 },
+ { 0x4, 0x15, 0x1, 0, 0x21100000, 22, 0x21100000 },
+ { 0x4, 0x15, 0x2, 0, 0x20e00000, 9, 0x20e00000 },
+ { 0x4, 0x15, 0x3, 0, 0x21200000, 23, 0x21200000 },
+ { 0x4, 0x15, 0x4, 0, 0x20800000, 7, 0x20800000 },
+ { 0x4, 0x15, 0x5, 0, 0x21400000, 24, 0x21400000 },
+ { 0x4, 0x15, 0x6, 0, 0x0b000000, 18, 0x0b000000 },
+ { 0x4, 0x15, 0x7, 0, 0x0b800000, 3, 0x0b800000 },
+ { 0x4, 0x15, 0x8, 0, 0x20000000, 6, 0x20000000 },
+ { 0x4, 0x15, 0x9, 0, 0x21800000, 25, 0x21800000 },
+ { 0x4, 0x15, 0xa, 0, 0x0a000000, 2, 0x0a000000 },
+ { 0x4, 0x15, 0xb, 0, 0x0a000000, 17, 0x0a000000 },
+ { 0x4, 0x15, 0xc, 0, 0x20000000, 21, 0x20000000 },
+ { 0x4, 0x15, 0xd, 0, 0x21000000, 10, 0x21000000 },
+ { 0x4, 0x15, 0xe, 0, 0x08000000, 1, 0x08000000 },
+ { 0x4, 0x15, 0xf, 0, 0x08000000, 16, 0x08000000 },
+ { 0x4, 0x15, 0x10, 0, 0x22000000, 11, 0x22000000 },
+ { 0x4, 0x15, 0x11, 0, 0x22000000, 26, 0x22000000 },
+ { 0x4, 0x15, 0x12, 0, 0x0c000000, 4, 0x0c000000 },
+ { 0x4, 0x15, 0x13, 0, 0x0c000000, 19, 0x0c000000 },
+ { 0x4, 0x15, 0x14, 0, 0x24000000, 12, 0x24000000 },
+ { 0x4, 0x15, 0x15, 0, 0x24000000, 27, 0x24000000 },
+ { 0x4, 0x15, 0x16, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x15, 0x17, 0, 0x00000000, 15, 0x00000000 },
+ { 0x4, 0x15, 0x18, 0, 0x28000000, 13, 0x28000000 },
+ { 0x4, 0x15, 0x19, 0, 0x28000000, 28, 0x28000000 },
+ { 0x4, 0x15, 0x1a, 0, 0x10000000, 5, 0x10000000 },
+ { 0x4, 0x15, 0x1b, 0, 0x10000000, 20, 0x10000000 },
+ { 0x4, 0x15, 0x1c, 0, 0x30000000, 14, 0x30000000 },
+ { 0x4, 0x15, 0x1d, 0, 0x30000000, 29, 0x30000000 },
+ { 0x4, 0x0, 0x0, 0, 0x0b000000, 0, 0x00000000 },
+ { 0x4, 0x0, 0x1, 0, 0x0b800000, 1, 0x00000000 },
+ { 0x4, 0x1, 0x0, 0, 0x20de0000, 3, 0x000e0000 },
+ { 0x4, 0x1, 0x1, 0, 0x210e0000, 7, 0x000e0000 },
+ { 0x4, 0x1, 0x2, 0, 0x20dc0000, 2, 0x000c0000 },
+ { 0x4, 0x1, 0x3, 0, 0x210c0000, 6, 0x000c0000 },
+ { 0x4, 0x1, 0x4, 0, 0x20d80000, 1, 0x00080000 },
+ { 0x4, 0x1, 0x5, 0, 0x21080000, 5, 0x00080000 },
+ { 0x4, 0x1, 0x6, 0, 0x20d00000, 0, 0x00000000 },
+ { 0x4, 0x1, 0x7, 0, 0x21000000, 4, 0x00000000 },
+ { 0x4, 0x2, 0x0, 0, 0x0b040000, 0, 0x00000000 },
+ { 0x4, 0x2, 0x1, 0, 0x0b840000, 1, 0x00000000 },
+ { 0x4, 0x3, 0x0, 0, 0x0b230000, 0, 0x00000000 },
+ { 0x4, 0x3, 0x1, 0, 0x0ba30000, 1, 0x00000000 },
+ { 0x4, 0x4, 0x0, 0, 0x0b050000, 0, 0x00000000 },
+ { 0x4, 0x4, 0x1, 0, 0x0b850000, 1, 0x00000000 },
+ { 0x4, 0x5, 0x0, 0, 0x0b060000, 0, 0x00000000 },
+ { 0x4, 0x5, 0x1, 0, 0x0b070000, 1, 0x00010000 },
+ { 0x4, 0x5, 0x2, 0, 0x0b080000, 2, 0x00020000 },
+ { 0x4, 0x5, 0x3, 0, 0x0b090000, 3, 0x00030000 },
+ { 0x4, 0x5, 0x4, 0, 0x0b0a0000, 4, 0x00040000 },
+ { 0x4, 0x5, 0x5, 0, 0x0b0b0000, 5, 0x00050000 },
+ { 0x4, 0x5, 0x6, 0, 0x0b0c0000, 6, 0x00060000 },
+ { 0x4, 0x5, 0x7, 0, 0x0b0d0000, 7, 0x00070000 },
+ { 0x4, 0x5, 0x8, 0, 0x0b0e0000, 8, 0x00080000 },
+ { 0x4, 0x5, 0x9, 0, 0x0b860000, 9, 0x00000000 },
+ { 0x4, 0x5, 0xa, 0, 0x0b870000, 10, 0x00010000 },
+ { 0x4, 0x5, 0xb, 0, 0x0b880000, 11, 0x00020000 },
+ { 0x4, 0x5, 0xc, 0, 0x0b890000, 12, 0x00030000 },
+ { 0x4, 0x5, 0xd, 0, 0x0b8a0000, 13, 0x00040000 },
+ { 0x4, 0x5, 0xe, 0, 0x0b8b0000, 14, 0x00050000 },
+ { 0x4, 0x5, 0xf, 0, 0x0b8c0000, 15, 0x00060000 },
+ { 0x4, 0x5, 0x10, 0, 0x0b8d0000, 16, 0x00070000 },
+ { 0x4, 0x5, 0x11, 0, 0x0b8e0000, 17, 0x00080000 },
+ { 0x4, 0x6, 0x0, 0, 0x0b650000, 0, 0x00000000 },
+ { 0x4, 0x6, 0x1, 0, 0x0be50000, 1, 0x00000000 },
+ { 0x4, 0x7, 0x0, 0, 0x20df0000, 0, 0x00000000 },
+ { 0x4, 0x7, 0x1, 0, 0x210f0000, 1, 0x00000000 },
+ { 0x4, 0x8, 0x0, 0, 0x0b3e0000, 0, 0x00000000 },
+ { 0x4, 0x8, 0x1, 0, 0x0bbe0000, 1, 0x00000000 },
+ { 0x4, 0x9, 0x0, 0, 0x0b3d0000, 0, 0x00000000 },
+ { 0x4, 0x9, 0x1, 0, 0x0bbd0000, 1, 0x00000000 },
+ { 0x4, 0xa, 0x0, 0, 0x0b1e0000, 0, 0x00000000 },
+ { 0x4, 0xa, 0x1, 0, 0x0b9e0000, 1, 0x00000000 },
+ { 0x4, 0xb, 0x0, 0, 0x0b150000, 0, 0x00000000 },
+ { 0x4, 0xb, 0x1, 0, 0x0b160000, 1, 0x00010000 },
+ { 0x4, 0xb, 0x2, 0, 0x0b170000, 2, 0x00020000 },
+ { 0x4, 0xb, 0x3, 0, 0x0b180000, 3, 0x00030000 },
+ { 0x4, 0xb, 0x4, 0, 0x0b190000, 4, 0x00040000 },
+ { 0x4, 0xb, 0x5, 0, 0x0b1a0000, 5, 0x00050000 },
+ { 0x4, 0xb, 0x6, 0, 0x0b1b0000, 6, 0x00060000 },
+ { 0x4, 0xb, 0x7, 0, 0x0b1c0000, 7, 0x00070000 },
+ { 0x4, 0xb, 0x8, 0, 0x0b1d0000, 8, 0x00080000 },
+ { 0x4, 0xb, 0x9, 0, 0x0b950000, 9, 0x00000000 },
+ { 0x4, 0xb, 0xa, 0, 0x0b960000, 10, 0x00010000 },
+ { 0x4, 0xb, 0xb, 0, 0x0b970000, 11, 0x00020000 },
+ { 0x4, 0xb, 0xc, 0, 0x0b980000, 12, 0x00030000 },
+ { 0x4, 0xb, 0xd, 0, 0x0b990000, 13, 0x00040000 },
+ { 0x4, 0xb, 0xe, 0, 0x0b9a0000, 14, 0x00050000 },
+ { 0x4, 0xb, 0xf, 0, 0x0b9b0000, 15, 0x00060000 },
+ { 0x4, 0xb, 0x10, 0, 0x0b9c0000, 16, 0x00070000 },
+ { 0x4, 0xb, 0x11, 0, 0x0b9d0000, 17, 0x00080000 },
+ { 0x4, 0xc, 0x0, 0, 0x0b660000, 0, 0x00000000 },
+ { 0x4, 0xc, 0x1, 0, 0x0be60000, 1, 0x00000000 },
+ { 0x4, 0xd, 0x0, 0, 0x0b1f0000, 0, 0x00000000 },
+ { 0x4, 0xd, 0x1, 0, 0x0b200000, 1, 0x00010000 },
+ { 0x4, 0xd, 0x2, 0, 0x0b210000, 2, 0x00020000 },
+ { 0x4, 0xd, 0x3, 0, 0x0b220000, 3, 0x00030000 },
+ { 0x4, 0xd, 0x4, 0, 0x0b9f0000, 4, 0x00000000 },
+ { 0x4, 0xd, 0x5, 0, 0x0ba00000, 5, 0x00010000 },
+ { 0x4, 0xd, 0x6, 0, 0x0ba10000, 6, 0x00020000 },
+ { 0x4, 0xd, 0x7, 0, 0x0ba20000, 7, 0x00030000 },
+ { 0x4, 0xe, 0x0, 0, 0x0b0f0000, 0, 0x00000000 },
+ { 0x4, 0xe, 0x1, 0, 0x0b100000, 1, 0x00010000 },
+ { 0x4, 0xe, 0x2, 0, 0x0b110000, 2, 0x00020000 },
+ { 0x4, 0xe, 0x3, 0, 0x0b120000, 3, 0x00030000 },
+ { 0x4, 0xe, 0x4, 0, 0x0b130000, 4, 0x00040000 },
+ { 0x4, 0xe, 0x5, 0, 0x0b140000, 5, 0x00050000 },
+ { 0x4, 0xe, 0x6, 0, 0x0b8f0000, 6, 0x00000000 },
+ { 0x4, 0xe, 0x7, 0, 0x0b900000, 7, 0x00010000 },
+ { 0x4, 0xe, 0x8, 0, 0x0b910000, 8, 0x00020000 },
+ { 0x4, 0xe, 0x9, 0, 0x0b920000, 9, 0x00030000 },
+ { 0x4, 0xe, 0xa, 0, 0x0b930000, 10, 0x00040000 },
+ { 0x4, 0xe, 0xb, 0, 0x0b940000, 11, 0x00050000 },
+ { 0x4, 0x10, 0x0, 0, 0x0b240000, 0, 0x00000000 },
+ { 0x4, 0x10, 0x1, 0, 0x0ba40000, 1, 0x00000000 },
+ { 0x4, 0x11, 0x0, 0, 0x0b020000, 0, 0x00000000 },
+ { 0x4, 0x11, 0x1, 0, 0x0b820000, 1, 0x00000000 },
+ { 0x4, 0x12, 0x0, 0, 0x0b030000, 0, 0x00000000 },
+ { 0x4, 0x12, 0x1, 0, 0x0b830000, 1, 0x00000000 },
+ { 0x4, 0x17, 0x0, 0, 0x0b640000, 0, 0x00000000 },
+ { 0x4, 0x17, 0x1, 0, 0x0be40000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x0, 0, 0x0b600000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x1, 0, 0x0be00000, 1, 0x00000000 },
+ { 0x4, 0x18, 0x2, 0, 0x00000000, 0, 0x00000000 },
+ { 0x4, 0x18, 0x3, 0, 0x00000000, 0, 0x00000000 }
+};
+
+static void cbbcentralnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(CBB_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(CBB_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(CBB_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(CBB_NOC_SEQID, routeid);
+}
+
+static void bpmpnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(BPMP_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(BPMP_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(BPMP_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(BPMP_NOC_SEQID, routeid);
+}
+
+static void aonnoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(AON_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(AON_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(AON_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(AON_NOC_SEQID, routeid);
+}
+
+static void scenoc_parse_routeid(struct tegra194_cbb_aperture *info, u64 routeid)
+{
+ info->initflow = FIELD_GET(SCE_NOC_INITFLOW, routeid);
+ info->targflow = FIELD_GET(SCE_NOC_TARGFLOW, routeid);
+ info->targ_subrange = FIELD_GET(SCE_NOC_TARG_SUBRANGE, routeid);
+ info->seqid = FIELD_GET(SCE_NOC_SEQID, routeid);
+}
+
+static void cbbcentralnoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CBB_NOC_AXCACHE, elog_5);
+ usrbits->non_mod = FIELD_GET(CBB_NOC_NON_MOD, elog_5);
+ usrbits->axprot = FIELD_GET(CBB_NOC_AXPROT, elog_5);
+ usrbits->falconsec = FIELD_GET(CBB_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CBB_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CBB_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CBB_NOC_MSTR_ID, elog_5) - 1;
+ usrbits->axi_id = FIELD_GET(CBB_NOC_AXI_ID, elog_5);
+}
+
+static void clusternoc_parse_userbits(struct tegra194_cbb_userbits *usrbits, u32 elog_5)
+{
+ usrbits->axcache = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->axprot = FIELD_GET(CLUSTER_NOC_AXCACHE, elog_5);
+ usrbits->falconsec = FIELD_GET(CLUSTER_NOC_FALCONSEC, elog_5);
+ usrbits->grpsec = FIELD_GET(CLUSTER_NOC_GRPSEC, elog_5);
+ usrbits->vqc = FIELD_GET(CLUSTER_NOC_VQC, elog_5);
+ usrbits->mstr_id = FIELD_GET(CLUSTER_NOC_MSTR_ID, elog_5) - 1;
+}
+
+static void tegra194_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_FAULTEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_FAULTEN_0);
+}
+
+static void tegra194_cbb_stall_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_1_STALLEN_0);
+ writel(1, priv->regs + ERRLOGGER_2_STALLEN_0);
+}
+
+static void tegra194_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+
+ writel(1, priv->regs + ERRLOGGER_0_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_1_ERRCLR_0);
+ writel(1, priv->regs + ERRLOGGER_2_ERRCLR_0);
+ dsb(sy);
+}
+
+static u32 tegra194_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ u32 value;
+
+ value = readl(priv->regs + ERRLOGGER_0_ERRVLD_0);
+ value |= (readl(priv->regs + ERRLOGGER_1_ERRVLD_0) << 1);
+ value |= (readl(priv->regs + ERRLOGGER_2_ERRVLD_0) << 2);
+
+ dsb(sy);
+ return value;
+}
+
+static u32 tegra194_axi2apb_status(void __iomem *addr)
+{
+ u32 value;
+
+ value = readl(addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+ writel(0xffffffff, addr + DMAAPB_X_RAW_INTERRUPT_STATUS);
+
+ return value;
+}
+
+static bool tegra194_axi2apb_fatal(struct seq_file *file, unsigned int bridge, u32 status)
+{
+ bool is_fatal = true;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tegra194_axi2apb_error); i++) {
+ if (status & BIT(i)) {
+ tegra_cbb_print_err(file, "\t AXI2APB_%d bridge error: %s\n",
+ bridge + 1, tegra194_axi2apb_error[i]);
+ if (strstr(tegra194_axi2apb_error[i], "Firewall"))
+ is_fatal = false;
+ }
+ }
+
+ return is_fatal;
+}
+
+/*
+ * Fetch InitlocalAddress from NOC Aperture lookup table
+ * using Targflow, Targsubrange
+ */
+static u32 get_init_localaddress(const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aper, unsigned int max)
+{
+ unsigned int t_f = 0, t_sr = 0;
+ u32 addr = 0;
+
+ for (t_f = 0; t_f < max; t_f++) {
+ if (aper[t_f].targflow == info->targflow) {
+ t_sr = t_f;
+
+ do {
+ if (aper[t_sr].targ_subrange == info->targ_subrange) {
+ addr = aper[t_sr].init_localaddress;
+ return addr;
+ }
+
+ if (t_sr >= max)
+ return 0;
+
+ t_sr++;
+ } while (aper[t_sr].targflow == aper[t_sr - 1].targflow);
+
+ t_f = t_sr;
+ }
+ }
+
+ return addr;
+}
+
+static void print_errlog5(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_userbits userbits;
+
+ cbb->noc->parse_userbits(&userbits, cbb->errlog5);
+
+ if (!strcmp(cbb->noc->name, "cbb-noc")) {
+ tegra_cbb_print_err(file, "\t Non-Modify\t\t: %#x\n", userbits.non_mod);
+ tegra_cbb_print_err(file, "\t AXI ID\t\t: %#x\n", userbits.axi_id);
+ }
+
+ tegra_cbb_print_err(file, "\t Master ID\t\t: %s\n",
+ cbb->noc->master_id[userbits.mstr_id]);
+ tegra_cbb_print_err(file, "\t Security Group(GRPSEC): %#x\n", userbits.grpsec);
+ tegra_cbb_print_cache(file, userbits.axcache);
+ tegra_cbb_print_prot(file, userbits.axprot);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", userbits.falconsec);
+ tegra_cbb_print_err(file, "\t Virtual Queuing Channel(VQC): %#x\n", userbits.vqc);
+}
+
+/*
+ * Fetch Base Address/InitlocalAddress from NOC aperture lookup table using TargFlow &
+ * Targ_subRange extracted from RouteId. Perform address reconstruction as below:
+ *
+ * Address = Base Address + (ErrLog3 + ErrLog4)
+ */
+static void
+print_errlog3_4(struct seq_file *file, u32 errlog3, u32 errlog4,
+ const struct tegra194_cbb_aperture *info,
+ const struct tegra194_cbb_aperture *aperture, unsigned int max)
+{
+ u64 addr = (u64)errlog4 << 32 | errlog3;
+
+ /*
+ * If errlog4[7] = "1", then it's a joker entry. Joker entries are a rare phenomenon and
+ * such addresses are not reliable. Debugging should be done using only the RouteId
+ * information.
+ */
+ if (errlog4 & 0x80)
+ tegra_cbb_print_err(file, "\t debug using RouteId alone as below address is a "
+ "joker entry and not reliable");
+
+ addr += get_init_localaddress(info, aperture, max);
+
+ tegra_cbb_print_err(file, "\t Address accessed\t: %#llx\n", addr);
+}
+
+/*
+ * Get RouteId from ErrLog1+ErrLog2 registers and fetch values of
+ * InitFlow, TargFlow, Targ_subRange and SeqId values from RouteId
+ */
+static void
+print_errlog1_2(struct seq_file *file, struct tegra194_cbb *cbb,
+ struct tegra194_cbb_aperture *info)
+{
+ u64 routeid = (u64)cbb->errlog2 << 32 | cbb->errlog1;
+ u32 seqid = 0;
+
+ tegra_cbb_print_err(file, "\t RouteId\t\t: %#llx\n", routeid);
+
+ cbb->noc->parse_routeid(info, routeid);
+
+ tegra_cbb_print_err(file, "\t InitFlow\t\t: %s\n",
+ cbb->noc->routeid_initflow[info->initflow]);
+
+ tegra_cbb_print_err(file, "\t Targflow\t\t: %s\n",
+ cbb->noc->routeid_targflow[info->targflow]);
+
+ tegra_cbb_print_err(file, "\t TargSubRange\t\t: %d\n", info->targ_subrange);
+ tegra_cbb_print_err(file, "\t SeqId\t\t\t: %d\n", seqid);
+}
+
+/*
+ * Print transcation type, error code and description from ErrLog0 for all
+ * errors. For NOC slave errors, all relevant error info is printed using
+ * ErrLog0 only. But additional information is printed for errors from
+ * APB slaves because for them:
+ * - All errors are logged as SLV(slave) errors due to APB having only single
+ * bit pslverr to report all errors.
+ * - Exact cause is printed by reading DMAAPB_X_RAW_INTERRUPT_STATUS register.
+ * - The driver prints information showing AXI2APB bridge and exact error
+ * only if there is error in any AXI2APB slave.
+ * - There is still no way to disambiguate a DEC error from SLV error type.
+ */
+static bool print_errlog0(struct seq_file *file, struct tegra194_cbb *cbb)
+{
+ struct tegra194_cbb_packet_header hdr;
+ bool is_fatal = true;
+
+ hdr.lock = cbb->errlog0 & 0x1;
+ hdr.opc = FIELD_GET(CBB_ERR_OPC, cbb->errlog0);
+ hdr.errcode = FIELD_GET(CBB_ERR_ERRCODE, cbb->errlog0);
+ hdr.len1 = FIELD_GET(CBB_ERR_LEN1, cbb->errlog0);
+ hdr.format = (cbb->errlog0 >> 31);
+
+ tegra_cbb_print_err(file, "\t Transaction Type\t: %s\n",
+ tegra194_cbb_trantype[hdr.opc]);
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].code);
+ tegra_cbb_print_err(file, "\t Error Source\t\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].source);
+ tegra_cbb_print_err(file, "\t Error Description\t: %s\n",
+ tegra194_cbb_errors[hdr.errcode].desc);
+
+ /*
+ * Do not crash system for errors which are only notifications to indicate a transaction
+ * was not allowed to be attempted.
+ */
+ if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DEC") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "UNS") ||
+ !strcmp(tegra194_cbb_errors[hdr.errcode].code, "DISC")) {
+ is_fatal = false;
+ } else if (!strcmp(tegra194_cbb_errors[hdr.errcode].code, "SLV") &&
+ cbb->num_bridges > 0) {
+ unsigned int i;
+ u32 status;
+
+ /* For all SLV errors, read DMAAPB_X_RAW_INTERRUPT_STATUS
+ * register to get error status for all AXI2APB bridges.
+ * Print bridge details if a bit is set in a bridge's
+ * status register due to error in a APB slave connected
+ * to that bridge. For other NOC slaves, none of the status
+ * register will be set.
+ */
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ status = tegra194_axi2apb_status(cbb->bridges[i].base);
+
+ if (status)
+ is_fatal = tegra194_axi2apb_fatal(file, i, status);
+ }
+ }
+
+ tegra_cbb_print_err(file, "\t Packet header Lock\t: %d\n", hdr.lock);
+ tegra_cbb_print_err(file, "\t Packet header Len1\t: %d\n", hdr.len1);
+
+ if (hdr.format)
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version >= 2.7");
+ else
+ tegra_cbb_print_err(file, "\t NOC protocol version\t: %s\n",
+ "version < 2.7");
+
+ return is_fatal;
+}
+
+/*
+ * Print debug information about failed transaction using
+ * ErrLog registers of error loggger having ErrVld set
+ */
+static bool print_errloggerX_info(struct seq_file *file, struct tegra194_cbb *cbb,
+ int errloggerX)
+{
+ struct tegra194_cbb_aperture info = { 0, };
+ bool is_fatal = true;
+
+ tegra_cbb_print_err(file, "\tError Logger\t\t: %d\n", errloggerX);
+
+ if (errloggerX == 0) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_0_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_0_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_0_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_0_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_0_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_0_ERRLOG5_0);
+ } else if (errloggerX == 1) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_1_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_1_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_1_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_1_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_1_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_1_ERRLOG5_0);
+ } else if (errloggerX == 2) {
+ cbb->errlog0 = readl(cbb->regs + ERRLOGGER_2_ERRLOG0_0);
+ cbb->errlog1 = readl(cbb->regs + ERRLOGGER_2_ERRLOG1_0);
+ cbb->errlog2 = readl(cbb->regs + ERRLOGGER_2_RSVD_00_0);
+ cbb->errlog3 = readl(cbb->regs + ERRLOGGER_2_ERRLOG3_0);
+ cbb->errlog4 = readl(cbb->regs + ERRLOGGER_2_ERRLOG4_0);
+ cbb->errlog5 = readl(cbb->regs + ERRLOGGER_2_ERRLOG5_0);
+ }
+
+ tegra_cbb_print_err(file, "\tErrLog0\t\t\t: %#x\n", cbb->errlog0);
+ is_fatal = print_errlog0(file, cbb);
+
+ tegra_cbb_print_err(file, "\tErrLog1\t\t\t: %#x\n", cbb->errlog1);
+ tegra_cbb_print_err(file, "\tErrLog2\t\t\t: %#x\n", cbb->errlog2);
+ print_errlog1_2(file, cbb, &info);
+
+ tegra_cbb_print_err(file, "\tErrLog3\t\t\t: %#x\n", cbb->errlog3);
+ tegra_cbb_print_err(file, "\tErrLog4\t\t\t: %#x\n", cbb->errlog4);
+ print_errlog3_4(file, cbb->errlog3, cbb->errlog4, &info, cbb->noc->noc_aperture,
+ cbb->noc->max_aperture);
+
+ tegra_cbb_print_err(file, "\tErrLog5\t\t\t: %#x\n", cbb->errlog5);
+
+ if (cbb->errlog5)
+ print_errlog5(file, cbb);
+
+ return is_fatal;
+}
+
+static bool print_errlog(struct seq_file *file, struct tegra194_cbb *cbb, u32 errvld)
+{
+ bool is_fatal = true;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s\n", smp_processor_id(), cbb->noc->name);
+
+ if (errvld & 0x1)
+ is_fatal = print_errloggerX_info(file, cbb, 0);
+ else if (errvld & 0x2)
+ is_fatal = print_errloggerX_info(file, cbb, 1);
+ else if (errvld & 0x4)
+ is_fatal = print_errloggerX_info(file, cbb, 2);
+
+ tegra_cbb_error_clear(&cbb->base);
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return is_fatal;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_err_mutex);
+
+static int tegra194_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ struct tegra_cbb *noc;
+
+ mutex_lock(&cbb_err_mutex);
+
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status;
+
+ status = tegra_cbb_get_status(noc);
+ if (status)
+ print_errlog(file, priv, status);
+ }
+
+ mutex_unlock(&cbb_err_mutex);
+
+ return 0;
+}
+#endif
+
+/*
+ * Handler for CBB errors from different initiators
+ */
+static irqreturn_t tegra194_cbb_err_isr(int irq, void *data)
+{
+ bool is_inband_err = false, is_fatal = false;
+ //struct tegra194_cbb *cbb = data;
+ struct tegra_cbb *noc;
+ unsigned long flags;
+ u8 mstr_id = 0;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ /* XXX only process interrupts for "cbb" instead of iterating over all NOCs? */
+ list_for_each_entry(noc, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+ u32 status = 0;
+
+ status = tegra_cbb_get_status(noc);
+
+ if (status && ((irq == priv->sec_irq) || (irq == priv->nonsec_irq))) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
+ smp_processor_id(), priv->noc->name, priv->res->start,
+ irq);
+
+ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->errlog5) - 1;
+ is_fatal = print_errlog(NULL, priv, status);
+
+ /*
+ * If illegal request is from CCPLEX(0x1)
+ * initiator then call BUG() to crash system.
+ */
+ if ((mstr_id == 0x1) && priv->noc->erd_mask_inband_err)
+ is_inband_err = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (is_inband_err) {
+ if (is_fatal)
+ BUG();
+ else
+ WARN(true, "Warning due to CBB Error\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_NONSECURE & CBB_SECURE interrupts
+ * for reporting CBB errors
+ */
+static int tegra194_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra194_cbb *priv = to_tegra194_cbb(cbb);
+ struct device *dev = cbb->dev;
+ int err;
+
+ if (priv->sec_irq) {
+ err = devm_request_irq(dev, priv->sec_irq, tegra194_cbb_err_isr, 0, dev_name(dev),
+ priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->sec_irq, err);
+ return err;
+ }
+ }
+
+ if (priv->nonsec_irq) {
+ err = devm_request_irq(dev, priv->nonsec_irq, tegra194_cbb_err_isr, 0,
+ dev_name(dev), priv);
+ if (err) {
+ dev_err(dev, "failed to register interrupt %u: %d\n", priv->nonsec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra194_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ /*
+ * Set “StallEn=1” to enable queuing of error packets till
+ * first is served & cleared
+ */
+ tegra_cbb_stall_enable(cbb);
+
+ /* set “FaultEn=1” to enable error reporting signal “Fault” */
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra194_cbb_ops = {
+ .get_status = tegra194_cbb_get_status,
+ .error_clear = tegra194_cbb_error_clear,
+ .fault_enable = tegra194_cbb_fault_enable,
+ .stall_enable = tegra194_cbb_stall_enable,
+ .error_enable = tegra194_cbb_error_enable,
+ .interrupt_enable = tegra194_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra194_cbb_debugfs_show,
+#endif
+};
+
+static struct tegra194_cbb_noc_data tegra194_cbb_central_noc_data = {
+ .name = "cbb-noc",
+ .erd_mask_inband_err = true,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_cbbcentralnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_cbbcentralnoc_apert_lookup),
+ .routeid_initflow = tegra194_cbbcentralnoc_routeid_initflow,
+ .routeid_targflow = tegra194_cbbcentralnoc_routeid_targflow,
+ .parse_routeid = cbbcentralnoc_parse_routeid,
+ .parse_userbits = cbbcentralnoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_aon_noc_data = {
+ .name = "aon-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_aonnoc_aperture_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_aonnoc_aperture_lookup),
+ .routeid_initflow = tegra194_aonnoc_routeid_initflow,
+ .routeid_targflow = tegra194_aonnoc_routeid_targflow,
+ .parse_routeid = aonnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_bpmp_noc_data = {
+ .name = "bpmp-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_bpmpnoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_bpmpnoc_apert_lookup),
+ .routeid_initflow = tegra194_bpmpnoc_routeid_initflow,
+ .routeid_targflow = tegra194_bpmpnoc_routeid_targflow,
+ .parse_routeid = bpmpnoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_rce_noc_data = {
+ .name = "rce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static struct tegra194_cbb_noc_data tegra194_sce_noc_data = {
+ .name = "sce-noc",
+ .erd_mask_inband_err = false,
+ .master_id = tegra194_master_id,
+ .noc_aperture = tegra194_scenoc_apert_lookup,
+ .max_aperture = ARRAY_SIZE(tegra194_scenoc_apert_lookup),
+ .routeid_initflow = tegra194_scenoc_routeid_initflow,
+ .routeid_targflow = tegra194_scenoc_routeid_targflow,
+ .parse_routeid = scenoc_parse_routeid,
+ .parse_userbits = clusternoc_parse_userbits
+};
+
+static const struct of_device_id tegra194_cbb_match[] = {
+ { .compatible = "nvidia,tegra194-cbb-noc", .data = &tegra194_cbb_central_noc_data },
+ { .compatible = "nvidia,tegra194-aon-noc", .data = &tegra194_aon_noc_data },
+ { .compatible = "nvidia,tegra194-bpmp-noc", .data = &tegra194_bpmp_noc_data },
+ { .compatible = "nvidia,tegra194-rce-noc", .data = &tegra194_rce_noc_data },
+ { .compatible = "nvidia,tegra194-sce-noc", .data = &tegra194_sce_noc_data },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tegra194_cbb_match);
+
+static int tegra194_cbb_get_bridges(struct tegra194_cbb *cbb, struct device_node *np)
+{
+ struct tegra_cbb *entry;
+ struct resource res;
+ unsigned long flags;
+ unsigned int i;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(entry, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(entry);
+
+ if (priv->bridges) {
+ cbb->num_bridges = priv->num_bridges;
+ cbb->bridges = priv->bridges;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ if (!cbb->bridges) {
+ while (of_address_to_resource(np, cbb->num_bridges, &res) == 0)
+ cbb->num_bridges++;
+
+ cbb->bridges = devm_kcalloc(cbb->base.dev, cbb->num_bridges,
+ sizeof(*cbb->bridges), GFP_KERNEL);
+ if (!cbb->bridges)
+ return -ENOMEM;
+
+ for (i = 0; i < cbb->num_bridges; i++) {
+ err = of_address_to_resource(np, i, &cbb->bridges[i].res);
+ if (err < 0)
+ return err;
+
+ cbb->bridges[i].base = devm_ioremap_resource(cbb->base.dev,
+ &cbb->bridges[i].res);
+ if (IS_ERR(cbb->bridges[i].base)) {
+ dev_err(cbb->base.dev, "failed to map AXI2APB range\n");
+ return PTR_ERR(cbb->bridges[i].base);
+ }
+ }
+ }
+
+ if (cbb->num_bridges > 0) {
+ dev_dbg(cbb->base.dev, "AXI2APB bridge info present:\n");
+
+ for (i = 0; i < cbb->num_bridges; i++)
+ dev_dbg(cbb->base.dev, " %u: %pR\n", i, &cbb->bridges[i].res);
+ }
+
+ return 0;
+}
+
+static int tegra194_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra194_cbb_noc_data *noc;
+ struct tegra194_cbb *cbb;
+ struct device_node *np;
+ unsigned long flags;
+ int err;
+
+ noc = of_device_get_match_data(&pdev->dev);
+
+ if (noc->erd_mask_inband_err) {
+ /*
+ * Set Error Response Disable(ERD) bit to mask SError/inband
+ * error and only trigger interrupts for illegal access from
+ * CCPLEX initiator.
+ */
+ err = tegra194_miscreg_mask_serror();
+ if (err) {
+ dev_err(&pdev->dev, "couldn't mask inband errors\n");
+ return err;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra194_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->noc = noc;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, &cbb->nonsec_irq, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,axi2apb", 0);
+ if (np) {
+ err = tegra194_cbb_get_bridges(cbb, np);
+ of_node_put(np);
+ if (err < 0)
+ return err;
+ }
+
+ platform_set_drvdata(pdev, cbb);
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int tegra194_cbb_remove(struct platform_device *pdev)
+{
+ struct tegra194_cbb *cbb = platform_get_drvdata(pdev);
+ struct tegra_cbb *noc, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry_safe(noc, tmp, &cbb_list, node) {
+ struct tegra194_cbb *priv = to_tegra194_cbb(noc);
+
+ if (cbb->res->start == priv->res->start) {
+ list_del(&noc->node);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra194_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra194_cbb_error_enable(&cbb->base);
+ dsb(sy);
+
+ dev_dbg(dev, "%s resumed\n", cbb->noc->name);
+ return 0;
+}
+
+static const struct dev_pm_ops tegra194_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra194_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra194_cbb_driver = {
+ .probe = tegra194_cbb_probe,
+ .remove = tegra194_cbb_remove,
+ .driver = {
+ .name = "tegra194-cbb",
+ .of_match_table = of_match_ptr(tegra194_cbb_match),
+ .pm = &tegra194_cbb_pm,
+ },
+};
+
+static int __init tegra194_cbb_init(void)
+{
+ return platform_driver_register(&tegra194_cbb_driver);
+}
+pure_initcall(tegra194_cbb_init);
+
+static void __exit tegra194_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra194_cbb_driver);
+}
+module_exit(tegra194_cbb_exit);
+
+MODULE_AUTHOR("Sumit Gupta <sumitg@nvidia.com>");
+MODULE_DESCRIPTION("Control Backbone error handling driver for Tegra194");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
new file mode 100644
index 000000000000..3528f9e15d5c
--- /dev/null
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -0,0 +1,1113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved
+ *
+ * The driver handles Error's from Control Backbone(CBB) version 2.0.
+ * generated due to illegal accesses. The driver prints debug information
+ * about failed transaction on receiving interrupt from Error Notifier.
+ * Error types supported by CBB2.0 are:
+ * UNSUPPORTED_ERR, PWRDOWN_ERR, TIMEOUT_ERR, FIREWALL_ERR, DECODE_ERR,
+ * SLAVE_ERR
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpufeature.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/tegra-cbb.h>
+
+#define FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0 0x0
+#define FABRIC_EN_CFG_STATUS_0_0 0x40
+#define FABRIC_EN_CFG_ADDR_INDEX_0_0 0x60
+#define FABRIC_EN_CFG_ADDR_LOW_0 0x80
+#define FABRIC_EN_CFG_ADDR_HI_0 0x84
+
+#define FABRIC_MN_MASTER_ERR_EN_0 0x200
+#define FABRIC_MN_MASTER_ERR_FORCE_0 0x204
+#define FABRIC_MN_MASTER_ERR_STATUS_0 0x208
+#define FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0 0x20c
+
+#define FABRIC_MN_MASTER_LOG_ERR_STATUS_0 0x300
+#define FABRIC_MN_MASTER_LOG_ADDR_LOW_0 0x304
+#define FABRIC_MN_MASTER_LOG_ADDR_HIGH_0 0x308
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0 0x30c
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0 0x310
+#define FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0 0x314
+#define FABRIC_MN_MASTER_LOG_USER_BITS0_0 0x318
+
+#define AXI_SLV_TIMEOUT_STATUS_0_0 0x8
+#define APB_BLOCK_TMO_STATUS_0 0xc00
+#define APB_BLOCK_NUM_TMO_OFFSET 0x20
+
+#define FAB_EM_EL_MSTRID GENMASK(29, 24)
+#define FAB_EM_EL_VQC GENMASK(17, 16)
+#define FAB_EM_EL_GRPSEC GENMASK(14, 8)
+#define FAB_EM_EL_FALCONSEC GENMASK(1, 0)
+
+#define FAB_EM_EL_FABID GENMASK(20, 16)
+#define FAB_EM_EL_SLAVEID GENMASK(7, 0)
+
+#define FAB_EM_EL_ACCESSID GENMASK(7, 0)
+
+#define FAB_EM_EL_AXCACHE GENMASK(27, 24)
+#define FAB_EM_EL_AXPROT GENMASK(22, 20)
+#define FAB_EM_EL_BURSTLENGTH GENMASK(19, 12)
+#define FAB_EM_EL_BURSTTYPE GENMASK(9, 8)
+#define FAB_EM_EL_BEATSIZE GENMASK(6, 4)
+#define FAB_EM_EL_ACCESSTYPE GENMASK(0, 0)
+
+#define USRBITS_MSTR_ID GENMASK(29, 24)
+
+#define REQ_SOCKET_ID GENMASK(27, 24)
+
+enum tegra234_cbb_fabric_ids {
+ CBB_FAB_ID,
+ SCE_FAB_ID,
+ RCE_FAB_ID,
+ DCE_FAB_ID,
+ AON_FAB_ID,
+ PSC_FAB_ID,
+ BPMP_FAB_ID,
+ FSI_FAB_ID,
+ MAX_FAB_ID,
+};
+
+struct tegra234_slave_lookup {
+ const char *name;
+ unsigned int offset;
+};
+
+struct tegra234_cbb_fabric {
+ const char *name;
+ phys_addr_t off_mask_erd;
+ bool erd_mask_inband_err;
+ const char * const *master_id;
+ unsigned int notifier_offset;
+ const struct tegra_cbb_error *errors;
+ const struct tegra234_slave_lookup *slave_map;
+};
+
+struct tegra234_cbb {
+ struct tegra_cbb base;
+
+ const struct tegra234_cbb_fabric *fabric;
+ struct resource *res;
+ void __iomem *regs;
+
+ int num_intr;
+ int sec_irq;
+
+ /* record */
+ void __iomem *mon;
+ unsigned int type;
+ u32 mask;
+ u64 access;
+ u32 mn_attr0;
+ u32 mn_attr1;
+ u32 mn_attr2;
+ u32 mn_user_bits;
+};
+
+static inline struct tegra234_cbb *to_tegra234_cbb(struct tegra_cbb *cbb)
+{
+ return container_of(cbb, struct tegra234_cbb, base);
+}
+
+static LIST_HEAD(cbb_list);
+static DEFINE_SPINLOCK(cbb_lock);
+
+static void tegra234_cbb_fault_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ writel(0x1ff, addr + FABRIC_EN_CFG_INTERRUPT_ENABLE_0_0);
+ dsb(sy);
+}
+
+static void tegra234_cbb_error_clear(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ writel(0x3f, priv->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_status(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ void __iomem *addr;
+ u32 value;
+
+ addr = priv->regs + priv->fabric->notifier_offset;
+ value = readl(addr + FABRIC_EN_CFG_STATUS_0_0);
+ dsb(sy);
+
+ return value;
+}
+
+static void tegra234_cbb_mask_serror(struct tegra234_cbb *cbb)
+{
+ writel(0x1, cbb->regs + cbb->fabric->off_mask_erd);
+ dsb(sy);
+}
+
+static u32 tegra234_cbb_get_tmo_slv(void __iomem *addr)
+{
+ u32 timeout;
+
+ timeout = readl(addr);
+ return timeout;
+}
+
+static void tegra234_cbb_tmo_slv(struct seq_file *file, const char *slave, void __iomem *addr,
+ u32 status)
+{
+ tegra_cbb_print_err(file, "\t %s : %#x\n", slave, status);
+}
+
+static void tegra234_cbb_lookup_apbslv(struct seq_file *file, const char *slave,
+ void __iomem *base)
+{
+ unsigned int block = 0;
+ void __iomem *addr;
+ char name[64];
+ u32 status;
+
+ status = tegra234_cbb_get_tmo_slv(base);
+ if (status)
+ tegra_cbb_print_err(file, "\t %s_BLOCK_TMO_STATUS : %#x\n", slave, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ u32 timeout, clients, client = 0;
+
+ addr = base + APB_BLOCK_NUM_TMO_OFFSET + (block * 4);
+ timeout = tegra234_cbb_get_tmo_slv(addr);
+ clients = timeout;
+
+ while (timeout) {
+ if (timeout & BIT(0)) {
+ if (clients != 0xffffffff)
+ clients &= BIT(client);
+
+ sprintf(name, "%s_BLOCK%d_TMO", slave, block);
+
+ tegra234_cbb_tmo_slv(file, name, addr, clients);
+ }
+
+ timeout >>= 1;
+ client++;
+ }
+ }
+
+ status >>= 1;
+ block++;
+ }
+}
+
+static void tegra234_lookup_slave_timeout(struct seq_file *file, struct tegra234_cbb *cbb,
+ u8 slave_id, u8 fab_id)
+{
+ const struct tegra234_slave_lookup *map = cbb->fabric->slave_map;
+ void __iomem *addr;
+
+ /*
+ * 1) Get slave node name and address mapping using slave_id.
+ * 2) Check if the timed out slave node is APB or AXI.
+ * 3) If AXI, then print timeout register and reset axi slave
+ * using <FABRIC>_SN_<>_SLV_TIMEOUT_STATUS_0_0 register.
+ * 4) If APB, then perform an additional lookup to find the client
+ * which timed out.
+ * a) Get block number from the index of set bit in
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK_TMO_STATUS_0 register.
+ * b) Get address of register repective to block number i.e.
+ * <FABRIC>_SN_AXI2APB_<>_BLOCK<index-set-bit>_TMO_0.
+ * c) Read the register in above step to get client_id which
+ * timed out as per the set bits.
+ * d) Reset the timedout client and print details.
+ * e) Goto step-a till all bits are set.
+ */
+
+ addr = cbb->regs + map[slave_id].offset;
+
+ if (strstr(map[slave_id].name, "AXI2APB")) {
+ addr += APB_BLOCK_TMO_STATUS_0;
+
+ tegra234_cbb_lookup_apbslv(file, map[slave_id].name, addr);
+ } else {
+ char name[64];
+ u32 status;
+
+ addr += AXI_SLV_TIMEOUT_STATUS_0_0;
+
+ status = tegra234_cbb_get_tmo_slv(addr);
+ if (status) {
+ sprintf(name, "%s_SLV_TIMEOUT_STATUS", map[slave_id].name);
+ tegra234_cbb_tmo_slv(file, name, addr, status);
+ }
+ }
+}
+
+static void tegra234_cbb_print_error(struct seq_file *file, struct tegra234_cbb *cbb, u32 status,
+ u32 overflow)
+{
+ unsigned int type = 0;
+
+ if (status & (status - 1))
+ tegra_cbb_print_err(file, "\t Multiple type of errors reported\n");
+
+ while (status) {
+ if (status & 0x1)
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[type].code);
+
+ status >>= 1;
+ type++;
+ }
+
+ type = 0;
+
+ while (overflow) {
+ if (overflow & 0x1)
+ tegra_cbb_print_err(file, "\t Overflow\t\t: Multiple %s\n",
+ cbb->fabric->errors[type].code);
+
+ overflow >>= 1;
+ type++;
+ }
+}
+
+static void print_errlog_err(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u8 cache_type, prot_type, burst_length, mstr_id, grpsec, vqc, falconsec, beat_size;
+ u8 access_type, access_id, requester_socket_id, local_socket_id, slave_id, fab_id;
+ char fabric_name[20];
+ bool is_numa = false;
+ u8 burst_type;
+
+ if (num_possible_nodes() > 1)
+ is_numa = true;
+
+ mstr_id = FIELD_GET(FAB_EM_EL_MSTRID, cbb->mn_user_bits);
+ vqc = FIELD_GET(FAB_EM_EL_VQC, cbb->mn_user_bits);
+ grpsec = FIELD_GET(FAB_EM_EL_GRPSEC, cbb->mn_user_bits);
+ falconsec = FIELD_GET(FAB_EM_EL_FALCONSEC, cbb->mn_user_bits);
+
+ /*
+ * For SOC with multiple NUMA nodes, print cross socket access
+ * errors only if initiator/master_id is CCPLEX, CPMU or GPU.
+ */
+ if (is_numa) {
+ local_socket_id = numa_node_id();
+ requester_socket_id = FIELD_GET(REQ_SOCKET_ID, cbb->mn_attr2);
+
+ if (requester_socket_id != local_socket_id) {
+ if ((mstr_id != 0x1) && (mstr_id != 0x2) && (mstr_id != 0xB))
+ return;
+ }
+ }
+
+ fab_id = FIELD_GET(FAB_EM_EL_FABID, cbb->mn_attr2);
+ slave_id = FIELD_GET(FAB_EM_EL_SLAVEID, cbb->mn_attr2);
+
+ access_id = FIELD_GET(FAB_EM_EL_ACCESSID, cbb->mn_attr1);
+
+ cache_type = FIELD_GET(FAB_EM_EL_AXCACHE, cbb->mn_attr0);
+ prot_type = FIELD_GET(FAB_EM_EL_AXPROT, cbb->mn_attr0);
+ burst_length = FIELD_GET(FAB_EM_EL_BURSTLENGTH, cbb->mn_attr0);
+ burst_type = FIELD_GET(FAB_EM_EL_BURSTTYPE, cbb->mn_attr0);
+ beat_size = FIELD_GET(FAB_EM_EL_BEATSIZE, cbb->mn_attr0);
+ access_type = FIELD_GET(FAB_EM_EL_ACCESSTYPE, cbb->mn_attr0);
+
+ tegra_cbb_print_err(file, "\n");
+ tegra_cbb_print_err(file, "\t Error Code\t\t: %s\n",
+ cbb->fabric->errors[cbb->type].code);
+
+ tegra_cbb_print_err(file, "\t MASTER_ID\t\t: %s\n", cbb->fabric->master_id[mstr_id]);
+ tegra_cbb_print_err(file, "\t Address\t\t: %#llx\n", cbb->access);
+
+ tegra_cbb_print_cache(file, cache_type);
+ tegra_cbb_print_prot(file, prot_type);
+
+ tegra_cbb_print_err(file, "\t Access_Type\t\t: %s", (access_type) ? "Write\n" : "Read\n");
+ tegra_cbb_print_err(file, "\t Access_ID\t\t: %#x", access_id);
+
+ if (fab_id == PSC_FAB_ID)
+ strcpy(fabric_name, "psc-fabric");
+ else if (fab_id == FSI_FAB_ID)
+ strcpy(fabric_name, "fsi-fabric");
+ else
+ strcpy(fabric_name, cbb->fabric->name);
+
+ if (is_numa) {
+ tegra_cbb_print_err(file, "\t Requester_Socket_Id\t: %#x\n",
+ requester_socket_id);
+ tegra_cbb_print_err(file, "\t Local_Socket_Id\t: %#x\n",
+ local_socket_id);
+ tegra_cbb_print_err(file, "\t No. of NUMA_NODES\t: %#x\n",
+ num_possible_nodes());
+ }
+
+ tegra_cbb_print_err(file, "\t Fabric\t\t: %s\n", fabric_name);
+ tegra_cbb_print_err(file, "\t Slave_Id\t\t: %#x\n", slave_id);
+ tegra_cbb_print_err(file, "\t Burst_length\t\t: %#x\n", burst_length);
+ tegra_cbb_print_err(file, "\t Burst_type\t\t: %#x\n", burst_type);
+ tegra_cbb_print_err(file, "\t Beat_size\t\t: %#x\n", beat_size);
+ tegra_cbb_print_err(file, "\t VQC\t\t\t: %#x\n", vqc);
+ tegra_cbb_print_err(file, "\t GRPSEC\t\t: %#x\n", grpsec);
+ tegra_cbb_print_err(file, "\t FALCONSEC\t\t: %#x\n", falconsec);
+
+ if ((fab_id == PSC_FAB_ID) || (fab_id == FSI_FAB_ID))
+ return;
+
+ if (!strcmp(cbb->fabric->errors[cbb->type].code, "TIMEOUT_ERR")) {
+ tegra234_lookup_slave_timeout(file, cbb, slave_id, fab_id);
+ return;
+ }
+
+ tegra_cbb_print_err(file, "\t Slave\t\t\t: %s\n", cbb->fabric->slave_map[slave_id].name);
+}
+
+static int print_errmonX_info(struct seq_file *file, struct tegra234_cbb *cbb)
+{
+ u32 overflow, status, error;
+
+ status = readl(cbb->mon + FABRIC_MN_MASTER_ERR_STATUS_0);
+ if (!status) {
+ pr_err("Error Notifier received a spurious notification\n");
+ return -ENODATA;
+ }
+
+ if (status == 0xffffffff) {
+ pr_err("CBB registers returning all 1's which is invalid\n");
+ return -EINVAL;
+ }
+
+ overflow = readl(cbb->mon + FABRIC_MN_MASTER_ERR_OVERFLOW_STATUS_0);
+
+ tegra234_cbb_print_error(file, cbb, status, overflow);
+
+ error = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ERR_STATUS_0);
+ if (!error) {
+ pr_info("Error Monitor doesn't have Error Logger\n");
+ return -EINVAL;
+ }
+
+ cbb->type = 0;
+
+ while (error) {
+ if (error & BIT(0)) {
+ u32 hi, lo;
+
+ hi = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_HIGH_0);
+ lo = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ADDR_LOW_0);
+
+ cbb->access = (u64)hi << 32 | lo;
+
+ cbb->mn_attr0 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES0_0);
+ cbb->mn_attr1 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES1_0);
+ cbb->mn_attr2 = readl(cbb->mon + FABRIC_MN_MASTER_LOG_ATTRIBUTES2_0);
+ cbb->mn_user_bits = readl(cbb->mon + FABRIC_MN_MASTER_LOG_USER_BITS0_0);
+
+ print_errlog_err(file, cbb);
+ }
+
+ cbb->type++;
+ error >>= 1;
+ }
+
+ return 0;
+}
+
+static int print_err_notifier(struct seq_file *file, struct tegra234_cbb *cbb, u32 status)
+{
+ unsigned int index = 0;
+ int err;
+
+ pr_crit("**************************************\n");
+ pr_crit("CPU:%d, Error:%s, Errmon:%d\n", smp_processor_id(),
+ cbb->fabric->name, status);
+
+ while (status) {
+ if (status & BIT(0)) {
+ unsigned int notifier = cbb->fabric->notifier_offset;
+ u32 hi, lo, mask = BIT(index);
+ phys_addr_t addr;
+ u64 offset;
+
+ writel(mask, cbb->regs + notifier + FABRIC_EN_CFG_ADDR_INDEX_0_0);
+ hi = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_HI_0);
+ lo = readl(cbb->regs + notifier + FABRIC_EN_CFG_ADDR_LOW_0);
+
+ addr = (u64)hi << 32 | lo;
+
+ offset = addr - cbb->res->start;
+ cbb->mon = cbb->regs + offset;
+ cbb->mask = BIT(index);
+
+ err = print_errmonX_info(file, cbb);
+ tegra234_cbb_error_clear(&cbb->base);
+ if (err)
+ return err;
+ }
+
+ status >>= 1;
+ index++;
+ }
+
+ tegra_cbb_print_err(file, "\t**************************************\n");
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_MUTEX(cbb_debugfs_mutex);
+
+static int tegra234_cbb_debugfs_show(struct tegra_cbb *cbb, struct seq_file *file, void *data)
+{
+ int err = 0;
+
+ mutex_lock(&cbb_debugfs_mutex);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status;
+
+ status = tegra_cbb_get_status(&priv->base);
+ if (status) {
+ err = print_err_notifier(file, priv, status);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&cbb_debugfs_mutex);
+ return err;
+}
+#endif
+
+/*
+ * Handler for CBB errors
+ */
+static irqreturn_t tegra234_cbb_isr(int irq, void *data)
+{
+ bool is_inband_err = false;
+ struct tegra_cbb *cbb;
+ unsigned long flags;
+ u8 mstr_id;
+ int err;
+
+ spin_lock_irqsave(&cbb_lock, flags);
+
+ list_for_each_entry(cbb, &cbb_list, node) {
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+ u32 status = tegra_cbb_get_status(cbb);
+
+ if (status && (irq == priv->sec_irq)) {
+ tegra_cbb_print_err(NULL, "CPU:%d, Error: %s@%llx, irq=%d\n",
+ smp_processor_id(), priv->fabric->name,
+ priv->res->start, irq);
+
+ err = print_err_notifier(NULL, priv, status);
+ if (err)
+ goto unlock;
+
+ mstr_id = FIELD_GET(USRBITS_MSTR_ID, priv->mn_user_bits);
+
+ /*
+ * If illegal request is from CCPLEX(id:0x1) master then call BUG() to
+ * crash system.
+ */
+ if ((mstr_id == 0x1) && priv->fabric->off_mask_erd)
+ is_inband_err = 1;
+ }
+ }
+
+unlock:
+ spin_unlock_irqrestore(&cbb_lock, flags);
+ WARN_ON(is_inband_err);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Register handler for CBB_SECURE interrupt for reporting errors
+ */
+static int tegra234_cbb_interrupt_enable(struct tegra_cbb *cbb)
+{
+ struct tegra234_cbb *priv = to_tegra234_cbb(cbb);
+
+ if (priv->sec_irq) {
+ int err = devm_request_irq(cbb->dev, priv->sec_irq, tegra234_cbb_isr, 0,
+ dev_name(cbb->dev), priv);
+ if (err) {
+ dev_err(cbb->dev, "failed to register interrupt %u: %d\n", priv->sec_irq,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra234_cbb_error_enable(struct tegra_cbb *cbb)
+{
+ tegra_cbb_fault_enable(cbb);
+}
+
+static const struct tegra_cbb_ops tegra234_cbb_ops = {
+ .get_status = tegra234_cbb_get_status,
+ .error_clear = tegra234_cbb_error_clear,
+ .fault_enable = tegra234_cbb_fault_enable,
+ .error_enable = tegra234_cbb_error_enable,
+ .interrupt_enable = tegra234_cbb_interrupt_enable,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_show = tegra234_cbb_debugfs_show,
+#endif
+};
+
+static const char * const tegra234_master_id[] = {
+ [0x00] = "TZ",
+ [0x01] = "CCPLEX",
+ [0x02] = "CCPMU",
+ [0x03] = "BPMP_FW",
+ [0x04] = "AON",
+ [0x05] = "SCE",
+ [0x06] = "GPCDMA_P",
+ [0x07] = "TSECA_NONSECURE",
+ [0x08] = "TSECA_LIGHTSECURE",
+ [0x09] = "TSECA_HEAVYSECURE",
+ [0x0a] = "CORESIGHT",
+ [0x0b] = "APE",
+ [0x0c] = "PEATRANS",
+ [0x0d] = "JTAGM_DFT",
+ [0x0e] = "RCE",
+ [0x0f] = "DCE",
+ [0x10] = "PSC_FW_USER",
+ [0x11] = "PSC_FW_SUPERVISOR",
+ [0x12] = "PSC_FW_MACHINE",
+ [0x13] = "PSC_BOOT",
+ [0x14] = "BPMP_BOOT",
+ [0x15] = "NVDEC_NONSECURE",
+ [0x16] = "NVDEC_LIGHTSECURE",
+ [0x17] = "NVDEC_HEAVYSECURE",
+ [0x18] = "CBB_INTERNAL",
+ [0x19] = "RSVD"
+};
+
+static const struct tegra_cbb_error tegra234_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error"
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole"
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewall protected"
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave"
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of fabric that is powered down"
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access"
+ }
+};
+
+static const struct tegra234_slave_lookup tegra234_aon_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST", 0x14000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_aon_fabric = {
+ .name = "aon-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_aon_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x17000,
+};
+
+static const struct tegra234_slave_lookup tegra234_bpmp_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_bpmp_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_cbb_slave_map[] = {
+ { "AON", 0x40000 },
+ { "BPMP", 0x41000 },
+ { "CBB", 0x42000 },
+ { "HOST1X", 0x43000 },
+ { "STM", 0x44000 },
+ { "FSI", 0x45000 },
+ { "PSC", 0x46000 },
+ { "PCIE_C1", 0x47000 },
+ { "PCIE_C2", 0x48000 },
+ { "PCIE_C3", 0x49000 },
+ { "PCIE_C0", 0x4a000 },
+ { "PCIE_C4", 0x4b000 },
+ { "GPU", 0x4c000 },
+ { "SMMU0", 0x4d000 },
+ { "SMMU1", 0x4e000 },
+ { "SMMU2", 0x4f000 },
+ { "SMMU3", 0x50000 },
+ { "SMMU4", 0x51000 },
+ { "PCIE_C10", 0x52000 },
+ { "PCIE_C7", 0x53000 },
+ { "PCIE_C8", 0x54000 },
+ { "PCIE_C9", 0x55000 },
+ { "PCIE_C5", 0x56000 },
+ { "PCIE_C6", 0x57000 },
+ { "DCE", 0x58000 },
+ { "RCE", 0x59000 },
+ { "SCE", 0x5a000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_25", 0x80000 },
+ { "AXI2APB_26", 0x81000 },
+ { "AXI2APB_27", 0x82000 },
+ { "AXI2APB_28", 0x83000 },
+ { "AXI2APB_29", 0x84000 },
+ { "AXI2APB_30", 0x85000 },
+ { "AXI2APB_31", 0x86000 },
+ { "AXI2APB_32", 0x87000 },
+ { "AXI2APB_33", 0x88000 },
+ { "AXI2APB_34", 0x89000 },
+ { "AXI2APB_35", 0x92000 },
+ { "AXI2APB_4", 0x8b000 },
+ { "AXI2APB_5", 0x8c000 },
+ { "AXI2APB_6", 0x8d000 },
+ { "AXI2APB_7", 0x8e000 },
+ { "AXI2APB_8", 0x8f000 },
+ { "AXI2APB_9", 0x90000 },
+ { "AXI2APB_3", 0x91000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_cbb_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x3a004
+};
+
+static const struct tegra234_slave_lookup tegra234_dce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_dce_fabric = {
+ .name = "dce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_dce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_rce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_rce_fabric = {
+ .name = "rce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_rce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct tegra234_slave_lookup tegra234_sce_slave_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AST0", 0x15000 },
+ { "AST1", 0x16000 },
+ { "CBB", 0x17000 },
+ { "CPU", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
+ .name = "sce-fabric",
+ .master_id = tegra234_master_id,
+ .slave_map = tegra234_sce_slave_map,
+ .errors = tegra234_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const char * const tegra241_master_id[] = {
+ [0x0] = "TZ",
+ [0x1] = "CCPLEX",
+ [0x2] = "CCPMU",
+ [0x3] = "BPMP_FW",
+ [0x4] = "PSC_FW_USER",
+ [0x5] = "PSC_FW_SUPERVISOR",
+ [0x6] = "PSC_FW_MACHINE",
+ [0x7] = "PSC_BOOT",
+ [0x8] = "BPMP_BOOT",
+ [0x9] = "JTAGM_DFT",
+ [0xa] = "CORESIGHT",
+ [0xb] = "GPU",
+ [0xc] = "PEATRANS",
+ [0xd ... 0x3f] = "RSVD"
+};
+
+/*
+ * Possible causes for Slave and Timeout errors.
+ * SLAVE_ERR:
+ * Slave being accessed responded with an error. Slave could return
+ * an error for various cases :
+ * Unsupported access, clamp setting when power gated, register
+ * level firewall(SCR), address hole within the slave, etc
+ *
+ * TIMEOUT_ERR:
+ * No response returned by slave. Can be due to slave being clock
+ * gated, under reset, powered down or slave inability to respond
+ * for an internal slave issue
+ */
+static const struct tegra_cbb_error tegra241_cbb_errors[] = {
+ {
+ .code = "SLAVE_ERR",
+ .desc = "Slave being accessed responded with an error."
+ }, {
+ .code = "DECODE_ERR",
+ .desc = "Attempt to access an address hole or Reserved region of memory."
+ }, {
+ .code = "FIREWALL_ERR",
+ .desc = "Attempt to access a region which is firewalled."
+ }, {
+ .code = "TIMEOUT_ERR",
+ .desc = "No response returned by slave."
+ }, {
+ .code = "PWRDOWN_ERR",
+ .desc = "Attempt to access a portion of the fabric that is powered down."
+ }, {
+ .code = "UNSUPPORTED_ERR",
+ .desc = "Attempt to access a slave through an unsupported access."
+ }, {
+ .code = "POISON_ERR",
+ .desc = "Slave responds with poison error to indicate error in data."
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "RSVD"
+ }, {
+ .code = "NO_SUCH_ADDRESS_ERR",
+ .desc = "The address belongs to the pri_target range but there is no register "
+ "implemented at the address."
+ }, {
+ .code = "TASK_ERR",
+ .desc = "Attempt to update a PRI task when the current task has still not "
+ "completed."
+ }, {
+ .code = "EXTERNAL_ERR",
+ .desc = "Indicates that an external PRI register access met with an error due to "
+ "any issue in the unit."
+ }, {
+ .code = "INDEX_ERR",
+ .desc = "Applicable to PRI index aperture pair, when the programmed index is "
+ "outside the range defined in the manual."
+ }, {
+ .code = "RESET_ERR",
+ .desc = "Target in Reset Error: Attempt to access a SubPri or external PRI "
+ "register but they are in reset."
+ }, {
+ .code = "REGISTER_RST_ERR",
+ .desc = "Attempt to access a PRI register but the register is partial or "
+ "completely in reset."
+ }, {
+ .code = "POWER_GATED_ERR",
+ .desc = "Returned by external PRI client when the external access goes to a power "
+ "gated domain."
+ }, {
+ .code = "SUBPRI_FS_ERR",
+ .desc = "Subpri is floorswept: Attempt to access a subpri through the main pri "
+ "target but subPri logic is floorswept."
+ }, {
+ .code = "SUBPRI_CLK_OFF_ERR",
+ .desc = "Subpri clock is off: Attempt to access a subpri through the main pri "
+ "target but subPris clock is gated/off."
+ },
+};
+
+static const struct tegra234_slave_lookup tegra241_cbb_slave_map[] = {
+ { "CCPLEX", 0x50000 },
+ { "PCIE_C8", 0x51000 },
+ { "PCIE_C9", 0x52000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "AON", 0x5b000 },
+ { "BPMP", 0x5c000 },
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "PSC", 0x5d000 },
+ { "STM", 0x5e000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_10", 0x71000 },
+ { "AXI2APB_11", 0x72000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_14", 0x75000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_17", 0x78000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7a000 },
+ { "AXI2APB_2", 0x7b000 },
+ { "AXI2APB_20", 0x7c000 },
+ { "AXI2APB_4", 0x87000 },
+ { "AXI2APB_5", 0x88000 },
+ { "AXI2APB_6", 0x89000 },
+ { "AXI2APB_7", 0x8a000 },
+ { "AXI2APB_8", 0x8b000 },
+ { "AXI2APB_9", 0x8c000 },
+ { "AXI2APB_3", 0x8d000 },
+ { "AXI2APB_21", 0x7d000 },
+ { "AXI2APB_22", 0x7e000 },
+ { "AXI2APB_23", 0x7f000 },
+ { "AXI2APB_24", 0x80000 },
+ { "AXI2APB_25", 0x81000 },
+ { "AXI2APB_26", 0x82000 },
+ { "AXI2APB_27", 0x83000 },
+ { "AXI2APB_28", 0x84000 },
+ { "PCIE_C4", 0x53000 },
+ { "PCIE_C5", 0x54000 },
+ { "PCIE_C6", 0x55000 },
+ { "PCIE_C7", 0x56000 },
+ { "PCIE_C2", 0x57000 },
+ { "PCIE_C3", 0x58000 },
+ { "PCIE_C0", 0x59000 },
+ { "PCIE_C1", 0x5a000 },
+ { "AXI2APB_29", 0x85000 },
+ { "AXI2APB_30", 0x86000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_cbb_fabric = {
+ .name = "cbb-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_cbb_slave_map,
+ .errors = tegra241_cbb_errors,
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x40004,
+};
+
+static const struct tegra234_slave_lookup tegra241_bpmp_slave_map[] = {
+ { "RSVD", 0x00000 },
+ { "RSVD", 0x00000 },
+ { "CBB", 0x15000 },
+ { "CPU", 0x16000 },
+ { "AXI2APB", 0x00000 },
+ { "DBB0", 0x17000 },
+ { "DBB1", 0x18000 },
+};
+
+static const struct tegra234_cbb_fabric tegra241_bpmp_fabric = {
+ .name = "bpmp-fabric",
+ .master_id = tegra241_master_id,
+ .slave_map = tegra241_bpmp_slave_map,
+ .errors = tegra241_cbb_errors,
+ .notifier_offset = 0x19000,
+};
+
+static const struct of_device_id tegra234_cbb_dt_ids[] = {
+ { .compatible = "nvidia,tegra234-cbb-fabric", .data = &tegra234_cbb_fabric },
+ { .compatible = "nvidia,tegra234-aon-fabric", .data = &tegra234_aon_fabric },
+ { .compatible = "nvidia,tegra234-bpmp-fabric", .data = &tegra234_bpmp_fabric },
+ { .compatible = "nvidia,tegra234-dce-fabric", .data = &tegra234_dce_fabric },
+ { .compatible = "nvidia,tegra234-rce-fabric", .data = &tegra234_rce_fabric },
+ { .compatible = "nvidia,tegra234-sce-fabric", .data = &tegra234_sce_fabric },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tegra234_cbb_dt_ids);
+
+struct tegra234_cbb_acpi_uid {
+ const char *hid;
+ const char *uid;
+ const struct tegra234_cbb_fabric *fabric;
+};
+
+static const struct tegra234_cbb_acpi_uid tegra234_cbb_acpi_uids[] = {
+ { "NVDA1070", "1", &tegra241_cbb_fabric },
+ { "NVDA1070", "2", &tegra241_bpmp_fabric },
+ { },
+};
+
+static const struct
+tegra234_cbb_fabric *tegra234_cbb_acpi_get_fabric(struct acpi_device *adev)
+{
+ const struct tegra234_cbb_acpi_uid *entry;
+
+ for (entry = tegra234_cbb_acpi_uids; entry->hid; entry++) {
+ if (acpi_dev_hid_uid_match(adev, entry->hid, entry->uid))
+ return entry->fabric;
+ }
+
+ return NULL;
+}
+
+static const struct acpi_device_id tegra241_cbb_acpi_ids[] = {
+ { "NVDA1070" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, tegra241_cbb_acpi_ids);
+
+static int tegra234_cbb_probe(struct platform_device *pdev)
+{
+ const struct tegra234_cbb_fabric *fabric;
+ struct tegra234_cbb *cbb;
+ unsigned long flags = 0;
+ int err;
+
+ if (pdev->dev.of_node) {
+ fabric = of_device_get_match_data(&pdev->dev);
+ } else {
+ struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ if (!device)
+ return -ENODEV;
+
+ fabric = tegra234_cbb_acpi_get_fabric(device);
+ if (!fabric) {
+ dev_err(&pdev->dev, "no device match found\n");
+ return -ENODEV;
+ }
+ }
+
+ cbb = devm_kzalloc(&pdev->dev, sizeof(*cbb), GFP_KERNEL);
+ if (!cbb)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cbb->base.node);
+ cbb->base.ops = &tegra234_cbb_ops;
+ cbb->base.dev = &pdev->dev;
+ cbb->fabric = fabric;
+
+ cbb->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &cbb->res);
+ if (IS_ERR(cbb->regs))
+ return PTR_ERR(cbb->regs);
+
+ err = tegra_cbb_get_irq(pdev, NULL, &cbb->sec_irq);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, cbb);
+
+ spin_lock_irqsave(&cbb_lock, flags);
+ list_add(&cbb->base.node, &cbb_list);
+ spin_unlock_irqrestore(&cbb_lock, flags);
+
+ /* set ERD bit to mask SError and generate interrupt to report error */
+ if (cbb->fabric->off_mask_erd)
+ tegra234_cbb_mask_serror(cbb);
+
+ return tegra_cbb_register(&cbb->base);
+}
+
+static int tegra234_cbb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int __maybe_unused tegra234_cbb_resume_noirq(struct device *dev)
+{
+ struct tegra234_cbb *cbb = dev_get_drvdata(dev);
+
+ tegra234_cbb_error_enable(&cbb->base);
+
+ dev_dbg(dev, "%s resumed\n", cbb->fabric->name);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra234_cbb_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, tegra234_cbb_resume_noirq)
+};
+
+static struct platform_driver tegra234_cbb_driver = {
+ .probe = tegra234_cbb_probe,
+ .remove = tegra234_cbb_remove,
+ .driver = {
+ .name = "tegra234-cbb",
+ .of_match_table = tegra234_cbb_dt_ids,
+ .acpi_match_table = tegra241_cbb_acpi_ids,
+ .pm = &tegra234_cbb_pm,
+ },
+};
+
+static int __init tegra234_cbb_init(void)
+{
+ return platform_driver_register(&tegra234_cbb_driver);
+}
+pure_initcall(tegra234_cbb_init);
+
+static void __exit tegra234_cbb_exit(void)
+{
+ platform_driver_unregister(&tegra234_cbb_driver);
+}
+module_exit(tegra234_cbb_exit);
+
+MODULE_DESCRIPTION("Control Backbone 2.0 error handling driver for Tegra234");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index b0a8405dbdb1..6542267a224d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -568,6 +568,7 @@ static int __init tegra_init_fuse(void)
np = of_find_matching_node(NULL, car_match);
if (np) {
void __iomem *base = of_iomap(np, 0);
+ of_node_put(np);
if (base) {
tegra_enable_fuse_clk(base);
iounmap(base);
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 590c862538d0..3351bd872ab2 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -16,12 +16,16 @@
#define FUSE_SKU_INFO 0x10
+#define ERD_ERR_CONFIG 0x120c
+#define ERD_MASK_INBAND_ERR 0x1
+
#define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT 4
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_LONG \
(0xf << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
#define PMC_STRAPPING_OPT_A_RAM_CODE_MASK_SHORT \
(0x3 << PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT)
+static void __iomem *apbmisc_base;
static bool long_ram_code;
static u32 strapping;
static u32 chipid;
@@ -93,6 +97,28 @@ u32 tegra_read_ram_code(void)
}
EXPORT_SYMBOL_GPL(tegra_read_ram_code);
+/*
+ * The function sets ERD(Error Response Disable) bit.
+ * This allows to mask inband errors and always send an
+ * OKAY response from CBB to the master which caused error.
+ */
+int tegra194_miscreg_mask_serror(void)
+{
+ if (!apbmisc_base)
+ return -EPROBE_DEFER;
+
+ if (!of_machine_is_compatible("nvidia,tegra194")) {
+ WARN(1, "Only supported for Tegra194 devices!\n");
+ return -EOPNOTSUPP;
+ }
+
+ writel_relaxed(ERD_MASK_INBAND_ERR,
+ apbmisc_base + ERD_ERR_CONFIG);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra194_miscreg_mask_serror);
+
static const struct of_device_id apbmisc_match[] __initconst = {
{ .compatible = "nvidia,tegra20-apbmisc", },
{ .compatible = "nvidia,tegra186-misc", },
@@ -134,7 +160,7 @@ void __init tegra_init_revision(void)
void __init tegra_init_apbmisc(void)
{
- void __iomem *apbmisc_base, *strapping_base;
+ void __iomem *strapping_base;
struct resource apbmisc, straps;
struct device_node *np;
@@ -182,12 +208,12 @@ void __init tegra_init_apbmisc(void)
*/
if (of_address_to_resource(np, 0, &apbmisc) < 0) {
pr_err("failed to get APBMISC registers\n");
- return;
+ goto put;
}
if (of_address_to_resource(np, 1, &straps) < 0) {
pr_err("failed to get strapping options registers\n");
- return;
+ goto put;
}
}
@@ -196,7 +222,6 @@ void __init tegra_init_apbmisc(void)
pr_err("failed to map APBMISC registers\n");
} else {
chipid = readl_relaxed(apbmisc_base + 4);
- iounmap(apbmisc_base);
}
strapping_base = ioremap(straps.start, resource_size(&straps));
@@ -208,4 +233,7 @@ void __init tegra_init_apbmisc(void)
}
long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 6a4b8f7e7948..678e8bc8a45d 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -296,6 +296,17 @@ struct tegra_wake_event {
} gpio;
};
+#define TEGRA_WAKE_SIMPLE(_name, _id) \
+ { \
+ .name = _name, \
+ .id = _id, \
+ .irq = 0, \
+ .gpio = { \
+ .instance = UINT_MAX, \
+ .pin = UINT_MAX, \
+ }, \
+ }
+
#define TEGRA_WAKE_IRQ(_name, _id, _irq) \
{ \
.name = _name, \
@@ -2239,6 +2250,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < soc->num_wake_events; i++) {
const struct tegra_wake_event *event = &soc->wake_events[i];
+ /* IRQ and simple wake events */
if (fwspec->param_count == 2) {
struct irq_fwspec spec;
@@ -2251,6 +2263,12 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
if (err < 0)
break;
+ /* simple hierarchies stop at the PMC level */
+ if (event->irq == 0) {
+ err = irq_domain_disconnect_hierarchy(domain->parent, virq);
+ break;
+ }
+
spec.fwnode = &pmc->dev->of_node->fwnode;
spec.param_count = 3;
spec.param[0] = GIC_SPI;
@@ -2263,6 +2281,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
break;
}
+ /* GPIO wake events */
if (fwspec->param_count == 3) {
if (event->gpio.instance != fwspec->param[0] ||
event->gpio.pin != fwspec->param[1])
@@ -2274,7 +2293,7 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
/* GPIO hierarchies stop at the PMC level */
if (!err && domain->parent)
- err = irq_domain_disconnect_hierarchy(domain->parent,
+ err = irq_domain_disconnect_hierarchy(domain->parent,
virq);
break;
}
@@ -2885,17 +2904,10 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->scratch = base;
}
- pmc->clk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(pmc->clk)) {
- err = PTR_ERR(pmc->clk);
-
- if (err != -ENOENT) {
- dev_err(&pdev->dev, "failed to get pclk: %d\n", err);
- return err;
- }
-
- pmc->clk = NULL;
- }
+ pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(pmc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pmc->clk),
+ "failed to get pclk\n");
/*
* PMC should be last resort for restarting since it soft-resets
@@ -3757,6 +3769,13 @@ static const struct tegra_wake_event tegra194_wake_events[] = {
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
+ TEGRA_WAKE_SIMPLE("usb3-port-0", 76),
+ TEGRA_WAKE_SIMPLE("usb3-port-1", 77),
+ TEGRA_WAKE_SIMPLE("usb3-port-2-3", 78),
+ TEGRA_WAKE_SIMPLE("usb2-port-0", 79),
+ TEGRA_WAKE_SIMPLE("usb2-port-1", 80),
+ TEGRA_WAKE_SIMPLE("usb2-port-2", 81),
+ TEGRA_WAKE_SIMPLE("usb2-port-3", 82),
};
static const struct tegra_pmc_soc tegra194_pmc_soc = {
@@ -4025,7 +4044,7 @@ static int __init tegra_pmc_early_init(void)
return -ENXIO;
}
- if (np) {
+ if (of_device_is_available(np)) {
pmc->soc = match->data;
if (pmc->soc->maybe_tz_only)
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 8d4000664fa3..76515c33e639 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -11,11 +11,12 @@
#include "bus.h"
#include "sysfs_local.h"
-static DEFINE_IDA(sdw_ida);
+static DEFINE_IDA(sdw_bus_ida);
+static DEFINE_IDA(sdw_peripheral_ida);
static int sdw_get_id(struct sdw_bus *bus)
{
- int rc = ida_alloc(&sdw_ida, GFP_KERNEL);
+ int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
if (rc < 0)
return rc;
@@ -75,7 +76,6 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
/*
* Initialize multi_link flag
- * TODO: populate this flag by reading property from FW node
*/
bus->multi_link = false;
if (bus->ops->read_prop) {
@@ -157,9 +157,11 @@ static int sdw_delete_slave(struct device *dev, void *data)
mutex_lock(&bus->bus_lock);
- if (slave->dev_num) /* clear dev_num if assigned */
+ if (slave->dev_num) { /* clear dev_num if assigned */
clear_bit(slave->dev_num, bus->assigned);
-
+ if (bus->dev_num_ida_min)
+ ida_free(&sdw_peripheral_ida, slave->dev_num);
+ }
list_del_init(&slave->node);
mutex_unlock(&bus->bus_lock);
@@ -179,7 +181,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus)
sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
- ida_free(&sdw_ida, bus->id);
+ ida_free(&sdw_bus_ida, bus->id);
}
EXPORT_SYMBOL(sdw_bus_master_delete);
@@ -298,6 +300,38 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
}
/**
+ * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
+ * @bus: SDW bus
+ * @sync_delay: Delay before reading status
+ */
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
+{
+ u32 status;
+
+ if (!bus->ops->read_ping_status)
+ return;
+
+ /*
+ * wait for peripheral to sync if desired. 10-15ms should be more than
+ * enough in most cases.
+ */
+ if (sync_delay)
+ usleep_range(10000, 15000);
+
+ mutex_lock(&bus->msg_lock);
+
+ status = bus->ops->read_ping_status(bus);
+
+ mutex_unlock(&bus->msg_lock);
+
+ if (!status)
+ dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
+ else
+ dev_dbg(bus->dev, "PING status: %#x\n", status);
+}
+EXPORT_SYMBOL(sdw_show_ping_status);
+
+/**
* sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
* @bus: SDW bus
* @msg: SDW message to be xfered
@@ -639,10 +673,18 @@ static int sdw_get_device_num(struct sdw_slave *slave)
{
int bit;
- bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
- if (bit == SDW_MAX_DEVICES) {
- bit = -ENODEV;
- goto err;
+ if (slave->bus->dev_num_ida_min) {
+ bit = ida_alloc_range(&sdw_peripheral_ida,
+ slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
+ GFP_KERNEL);
+ if (bit < 0)
+ goto err;
+ } else {
+ bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ if (bit == SDW_MAX_DEVICES) {
+ bit = -ENODEV;
+ goto err;
+ }
}
/*
@@ -719,7 +761,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
}
EXPORT_SYMBOL(sdw_extract_slave_id);
-static int sdw_program_device_num(struct sdw_bus *bus)
+static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
{
u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
struct sdw_slave *slave, *_s;
@@ -729,6 +771,8 @@ static int sdw_program_device_num(struct sdw_bus *bus)
int count = 0, ret;
u64 addr;
+ *programmed = false;
+
/* No Slave, so use raw xfer api */
ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
@@ -764,6 +808,16 @@ static int sdw_program_device_num(struct sdw_bus *bus)
found = true;
/*
+ * To prevent skipping state-machine stages don't
+ * program a device until we've seen it UNATTACH.
+ * Must return here because no other device on #0
+ * can be detected until this one has been
+ * assigned a device ID.
+ */
+ if (slave->status != SDW_SLAVE_UNATTACHED)
+ return 0;
+
+ /*
* Assign a new dev_num to this Slave and
* not mark it present. It will be marked
* present after it reports ATTACHED on new
@@ -777,6 +831,8 @@ static int sdw_program_device_num(struct sdw_bus *bus)
return ret;
}
+ *programmed = true;
+
break;
}
}
@@ -816,13 +872,13 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
mutex_lock(&bus->bus_lock);
dev_vdbg(bus->dev,
- "%s: changing status slave %d status %d new status %d\n",
- __func__, slave->dev_num, slave->status, status);
+ "changing status slave %d status %d new status %d\n",
+ slave->dev_num, slave->status, status);
if (status == SDW_SLAVE_UNATTACHED) {
dev_dbg(&slave->dev,
- "%s: initializing enumeration and init completion for Slave %d\n",
- __func__, slave->dev_num);
+ "initializing enumeration and init completion for Slave %d\n",
+ slave->dev_num);
init_completion(&slave->enumeration_complete);
init_completion(&slave->initialization_complete);
@@ -830,8 +886,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
} else if ((status == SDW_SLAVE_ATTACHED) &&
(slave->status == SDW_SLAVE_UNATTACHED)) {
dev_dbg(&slave->dev,
- "%s: signaling enumeration completion for Slave %d\n",
- __func__, slave->dev_num);
+ "signaling enumeration completion for Slave %d\n",
+ slave->dev_num);
complete(&slave->enumeration_complete);
}
@@ -1598,7 +1654,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
for_each_set_bit(bit, &port, 8) {
/* scp2 ports start from 4 */
- port_num = bit + 3;
+ port_num = bit + 4;
sdw_handle_port_interrupt(slave,
port_num,
&port_status[port_num]);
@@ -1610,7 +1666,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
for_each_set_bit(bit, &port, 8) {
/* scp3 ports start from 11 */
- port_num = bit + 10;
+ port_num = bit + 11;
sdw_handle_port_interrupt(slave,
port_num,
&port_status[port_num]);
@@ -1736,7 +1792,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
{
enum sdw_slave_status prev_status;
struct sdw_slave *slave;
- bool attached_initializing;
+ bool attached_initializing, id_programmed;
int i, ret = 0;
/* first check if any Slaves fell off the bus */
@@ -1757,19 +1813,33 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
i, slave->status);
sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
+
+ /* Ensure driver knows that peripheral unattached */
+ ret = sdw_update_slave_status(slave, status[i]);
+ if (ret < 0)
+ dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
}
}
if (status[0] == SDW_SLAVE_ATTACHED) {
dev_dbg(bus->dev, "Slave attached, programming device number\n");
- ret = sdw_program_device_num(bus);
- if (ret < 0)
- dev_err(bus->dev, "Slave attach failed: %d\n", ret);
+
/*
- * programming a device number will have side effects,
- * so we deal with other devices at a later time
+ * Programming a device number will have side effects,
+ * so we deal with other devices at a later time.
+ * This relies on those devices reporting ATTACHED, which will
+ * trigger another call to this function. This will only
+ * happen if at least one device ID was programmed.
+ * Error returns from sdw_program_device_num() are currently
+ * ignored because there's no useful recovery that can be done.
+ * Returning the error here could result in the current status
+ * of other devices not being handled, because if no device IDs
+ * were programmed there's nothing to guarantee a status change
+ * to trigger another call to this function.
*/
- return ret;
+ sdw_program_device_num(bus, &id_programmed);
+ if (id_programmed)
+ return 0;
}
/* Continue to check other slave statuses */
@@ -1838,8 +1908,8 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
"Update Slave status failed:%d\n", ret);
if (attached_initializing) {
dev_dbg(&slave->dev,
- "%s: signaling initialization completion for Slave %d\n",
- __func__, slave->dev_num);
+ "signaling initialization completion for Slave %d\n",
+ slave->dev_num);
complete(&slave->initialization_complete);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 4fbb19557f5e..93929f19d083 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -544,9 +544,12 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
return SDW_CMD_IGNORED;
}
- /* fill response */
- for (i = 0; i < count; i++)
- msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA, cdns->response_buf[i]);
+ if (msg->flags == SDW_MSG_FLAG_READ) {
+ /* fill response */
+ for (i = 0; i < count; i++)
+ msg->buf[i + offset] = FIELD_GET(CDNS_MCP_RESP_RDATA,
+ cdns->response_buf[i]);
+ }
return SDW_CMD_OK;
}
@@ -566,7 +569,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
}
base = CDNS_MCP_CMD_BASE;
- addr = msg->addr;
+ addr = msg->addr + offset;
for (i = 0; i < count; i++) {
data = FIELD_PREP(CDNS_MCP_CMD_DEV_ADDR, msg->dev_num);
@@ -705,18 +708,15 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
CDNS_MCP_CMD_LEN, false);
- if (ret < 0)
- goto exit;
+ if (ret != SDW_CMD_OK)
+ return ret;
}
if (!(msg->len % CDNS_MCP_CMD_LEN))
- goto exit;
-
- ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
- msg->len % CDNS_MCP_CMD_LEN, false);
+ return SDW_CMD_OK;
-exit:
- return ret;
+ return _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
+ msg->len % CDNS_MCP_CMD_LEN, false);
}
EXPORT_SYMBOL(cdns_xfer_msg);
@@ -756,6 +756,14 @@ cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num)
}
EXPORT_SYMBOL(cdns_reset_page_addr);
+u32 cdns_read_ping_status(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+
+ return cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
+}
+EXPORT_SYMBOL(cdns_read_ping_status);
+
/*
* IRQ handling
*/
@@ -782,6 +790,7 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
bool is_slave = false;
u32 mask;
+ u32 val;
int i, set_status;
memset(status, 0, sizeof(status));
@@ -789,41 +798,38 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
for (i = 0; i <= SDW_MAX_DEVICES; i++) {
mask = (slave_intstat >> (i * CDNS_MCP_SLAVE_STATUS_NUM)) &
CDNS_MCP_SLAVE_STATUS_BITS;
- if (!mask)
- continue;
- is_slave = true;
set_status = 0;
- if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
- status[i] = SDW_SLAVE_RESERVED;
- set_status++;
- }
-
- if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
- status[i] = SDW_SLAVE_ATTACHED;
- set_status++;
- }
+ if (mask) {
+ is_slave = true;
- if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
- status[i] = SDW_SLAVE_ALERT;
- set_status++;
- }
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED) {
+ status[i] = SDW_SLAVE_RESERVED;
+ set_status++;
+ }
- if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
- status[i] = SDW_SLAVE_UNATTACHED;
- set_status++;
- }
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED) {
+ status[i] = SDW_SLAVE_ATTACHED;
+ set_status++;
+ }
- /* first check if Slave reported multiple status */
- if (set_status > 1) {
- u32 val;
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_ALERT) {
+ status[i] = SDW_SLAVE_ALERT;
+ set_status++;
+ }
- dev_warn_ratelimited(cdns->dev,
- "Slave %d reported multiple Status: %d\n",
- i, mask);
+ if (mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT) {
+ status[i] = SDW_SLAVE_UNATTACHED;
+ set_status++;
+ }
+ }
- /* check latest status extracted from PING commands */
+ /*
+ * check that there was a single reported Slave status and when
+ * there is not use the latest status extracted from PING commands
+ */
+ if (set_status != 1) {
val = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
val >>= (i * 2);
@@ -842,11 +848,6 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
status[i] = SDW_SLAVE_RESERVED;
break;
}
-
- dev_warn_ratelimited(cdns->dev,
- "Slave %d status updated to %d\n",
- i, status[i]);
-
}
}
@@ -961,9 +962,22 @@ static void cdns_update_slave_status_work(struct work_struct *work)
u32 device0_status;
int retry_count = 0;
+ /*
+ * Clear main interrupt first so we don't lose any assertions
+ * that happen during this function.
+ */
+ cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+
slave0 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT0);
slave1 = cdns_readl(cdns, CDNS_MCP_SLAVE_INTSTAT1);
+ /*
+ * Clear the bits before handling so we don't lose any
+ * bits that re-assert.
+ */
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
+ cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
+
/* combine the two status */
slave_intstat = ((u64)slave1 << 32) | slave0;
@@ -971,8 +985,6 @@ static void cdns_update_slave_status_work(struct work_struct *work)
update_status:
cdns_update_slave_status(cdns, slave_intstat);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT0, slave0);
- cdns_writel(cdns, CDNS_MCP_SLAVE_INTSTAT1, slave1);
/*
* When there is more than one peripheral per link, it's
@@ -989,6 +1001,11 @@ update_status:
* attention with PING commands. There is no need to check for
* ALERTS since they are not allowed until a non-zero
* device_number is assigned.
+ *
+ * Do not clear the INTSTAT0/1. While looping to enumerate devices on
+ * #0 there could be status changes on other devices - these must
+ * be kept in the INTSTAT so they can be handled when all #0 devices
+ * have been handled.
*/
device0_status = cdns_readl(cdns, CDNS_MCP_SLAVE_STAT);
@@ -1008,8 +1025,7 @@ update_status:
}
}
- /* clear and unmask Slave interrupt now */
- cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
+ /* unmask Slave interrupt now */
cdns_updatel(cdns, CDNS_MCP_INTMASK,
CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 595d72c15d97..ca9e805bab88 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -177,6 +177,8 @@ enum sdw_command_response
cdns_xfer_msg_defer(struct sdw_bus *bus,
struct sdw_msg *msg, struct sdw_defer *defer);
+u32 cdns_read_ping_status(struct sdw_bus *bus);
+
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 747983743a14..f81cdd83ec26 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -55,7 +55,26 @@ static const struct adr_remap dell_sku_0A3E[] = {
{}
};
+/*
+ * The HP Omen 16-k0005TX does not expose the correct version of RT711 on link0
+ * and does not expose a RT1316 on link3
+ */
+static const struct adr_remap hp_omen_16[] = {
+ /* rt711-sdca on link0 */
+ {
+ 0x000020025d071100ull,
+ 0x000030025d071101ull
+ },
+ /* rt1316-sdca on link3 */
+ {
+ 0x000120025d071100ull,
+ 0x000330025d131601ull
+ },
+ {}
+};
+
static const struct dmi_system_id adr_remap_quirk_table[] = {
+ /* TGL devices */
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
@@ -78,6 +97,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
},
.driver_data = (void *)dell_sku_0A3E,
},
+ /* ADL devices */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16-k0xxx"),
+ },
+ .driver_data = (void *)hp_omen_16,
+ },
{}
};
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 89d1d0d021fc..244209358784 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -22,6 +22,9 @@
#include "bus.h"
#include "intel.h"
+/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
+#define INTEL_DEV_NUM_IDA_MIN 4
+
#define INTEL_MASTER_SUSPEND_DELAY_MS 3000
#define INTEL_MASTER_RESET_ITERATIONS 10
@@ -135,7 +138,7 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
if (!buf)
return -ENOMEM;
- links = intel_readl(s, SDW_SHIM_LCAP) & GENMASK(2, 0);
+ links = intel_readl(s, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_LCOUNT_MASK;
ret = scnprintf(buf, RD_BUF, "Register Value\n");
ret += scnprintf(buf + ret, RD_BUF - ret, "\nShim\n");
@@ -167,9 +170,8 @@ static int intel_reg_show(struct seq_file *s_file, void *data)
ret += intel_sprintf(s, false, buf, ret,
SDW_SHIM_PCMSYCHC(i, j));
}
- ret += scnprintf(buf + ret, RD_BUF - ret, "\n PDMSCAP, IOCTL, CTMCTL\n");
+ ret += scnprintf(buf + ret, RD_BUF - ret, "\n IOCTL, CTMCTL\n");
- ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_PDMSCAP(i));
ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_IOCTL(i));
ret += intel_sprintf(s, false, buf, ret, SDW_SHIM_CTMCTL(i));
}
@@ -258,86 +260,6 @@ static void intel_debugfs_exit(struct sdw_intel *sdw) {}
/*
* shim ops
*/
-
-static int intel_link_power_up(struct sdw_intel *sdw)
-{
- unsigned int link_id = sdw->instance;
- void __iomem *shim = sdw->link_res->shim;
- u32 *shim_mask = sdw->link_res->shim_mask;
- struct sdw_bus *bus = &sdw->cdns.bus;
- struct sdw_master_prop *prop = &bus->prop;
- u32 spa_mask, cpa_mask;
- u32 link_control;
- int ret = 0;
- u32 syncprd;
- u32 sync_reg;
-
- mutex_lock(sdw->link_res->shim_lock);
-
- /*
- * The hardware relies on an internal counter, typically 4kHz,
- * to generate the SoundWire SSP - which defines a 'safe'
- * synchronization point between commands and audio transport
- * and allows for multi link synchronization. The SYNCPRD value
- * is only dependent on the oscillator clock provided to
- * the IP, so adjust based on _DSD properties reported in DSDT
- * tables. The values reported are based on either 24MHz
- * (CNL/CML) or 38.4 MHz (ICL/TGL+).
- */
- if (prop->mclk_freq % 6000000)
- syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
- else
- syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
-
- if (!*shim_mask) {
- dev_dbg(sdw->cdns.dev, "%s: powering up all links\n", __func__);
-
- /* we first need to program the SyncPRD/CPU registers */
- dev_dbg(sdw->cdns.dev,
- "%s: first link up, programming SYNCPRD\n", __func__);
-
- /* set SyncPRD period */
- sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
- u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
-
- /* Set SyncCPU bit */
- sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
- intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
-
- /* Link power up sequence */
- link_control = intel_readl(shim, SDW_SHIM_LCTL);
-
- /* only power-up enabled links */
- spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
- cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
-
- link_control |= spa_mask;
-
- ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
- goto out;
- }
-
- /* SyncCPU will change once link is active */
- ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
- SDW_SHIM_SYNC_SYNCCPU, 0);
- if (ret < 0) {
- dev_err(sdw->cdns.dev,
- "Failed to set SHIM_SYNC: %d\n", ret);
- goto out;
- }
- }
-
- *shim_mask |= BIT(link_id);
-
- sdw->cdns.link_up = true;
-out:
- mutex_unlock(sdw->link_res->shim_lock);
-
- return ret;
-}
-
/* this needs to be called with shim_lock */
static void intel_shim_glue_to_master_ip(struct sdw_intel *sdw)
{
@@ -389,15 +311,13 @@ static void intel_shim_master_ip_to_glue(struct sdw_intel *sdw)
/* at this point Integration Glue has full control of the I/Os */
}
-static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
+/* this needs to be called with shim_lock */
+static void intel_shim_init(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int ret = 0;
u16 ioctl = 0, act = 0;
- mutex_lock(sdw->link_res->shim_lock);
-
/* Initialize Shim */
ioctl |= SDW_SHIM_IOCTL_BKE;
intel_writew(shim, SDW_SHIM_IOCTL(link_id), ioctl);
@@ -422,10 +342,17 @@ static int intel_shim_init(struct sdw_intel *sdw, bool clock_stop)
act |= SDW_SHIM_CTMCTL_DODS;
intel_writew(shim, SDW_SHIM_CTMCTL(link_id), act);
usleep_range(10, 15);
+}
- mutex_unlock(sdw->link_res->shim_lock);
+static int intel_shim_check_wake(struct sdw_intel *sdw)
+{
+ void __iomem *shim;
+ u16 wake_sts;
- return ret;
+ shim = sdw->link_res->shim;
+ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
+
+ return wake_sts & BIT(sdw->instance);
}
static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
@@ -454,6 +381,88 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
mutex_unlock(sdw->link_res->shim_lock);
}
+static int intel_link_power_up(struct sdw_intel *sdw)
+{
+ unsigned int link_id = sdw->instance;
+ void __iomem *shim = sdw->link_res->shim;
+ u32 *shim_mask = sdw->link_res->shim_mask;
+ struct sdw_bus *bus = &sdw->cdns.bus;
+ struct sdw_master_prop *prop = &bus->prop;
+ u32 spa_mask, cpa_mask;
+ u32 link_control;
+ int ret = 0;
+ u32 syncprd;
+ u32 sync_reg;
+
+ mutex_lock(sdw->link_res->shim_lock);
+
+ /*
+ * The hardware relies on an internal counter, typically 4kHz,
+ * to generate the SoundWire SSP - which defines a 'safe'
+ * synchronization point between commands and audio transport
+ * and allows for multi link synchronization. The SYNCPRD value
+ * is only dependent on the oscillator clock provided to
+ * the IP, so adjust based on _DSD properties reported in DSDT
+ * tables. The values reported are based on either 24MHz
+ * (CNL/CML) or 38.4 MHz (ICL/TGL+).
+ */
+ if (prop->mclk_freq % 6000000)
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4;
+ else
+ syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24;
+
+ if (!*shim_mask) {
+ dev_dbg(sdw->cdns.dev, "powering up all links\n");
+
+ /* we first need to program the SyncPRD/CPU registers */
+ dev_dbg(sdw->cdns.dev,
+ "first link up, programming SYNCPRD\n");
+
+ /* set SyncPRD period */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ u32p_replace_bits(&sync_reg, syncprd, SDW_SHIM_SYNC_SYNCPRD);
+
+ /* Set SyncCPU bit */
+ sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ /* Link power up sequence */
+ link_control = intel_readl(shim, SDW_SHIM_LCTL);
+
+ /* only power-up enabled links */
+ spa_mask = FIELD_PREP(SDW_SHIM_LCTL_SPA_MASK, sdw->link_res->link_mask);
+ cpa_mask = FIELD_PREP(SDW_SHIM_LCTL_CPA_MASK, sdw->link_res->link_mask);
+
+ link_control |= spa_mask;
+
+ ret = intel_set_bit(shim, SDW_SHIM_LCTL, link_control, cpa_mask);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "Failed to power up link: %d\n", ret);
+ goto out;
+ }
+
+ /* SyncCPU will change once link is active */
+ ret = intel_wait_bit(shim, SDW_SHIM_SYNC,
+ SDW_SHIM_SYNC_SYNCCPU, 0);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev,
+ "Failed to set SHIM_SYNC: %d\n", ret);
+ goto out;
+ }
+ }
+
+ *shim_mask |= BIT(link_id);
+
+ sdw->cdns.link_up = true;
+
+ intel_shim_init(sdw);
+
+out:
+ mutex_unlock(sdw->link_res->shim_lock);
+
+ return ret;
+}
+
static int intel_link_power_down(struct sdw_intel *sdw)
{
u32 link_control, spa_mask, cpa_mask;
@@ -476,7 +485,7 @@ static int intel_link_power_down(struct sdw_intel *sdw)
if (!*shim_mask) {
- dev_dbg(sdw->cdns.dev, "%s: powering down all links\n", __func__);
+ dev_dbg(sdw->cdns.dev, "powering down all links\n");
/* Link power down sequence */
link_control = intel_readl(shim, SDW_SHIM_LCTL);
@@ -1169,11 +1178,20 @@ static int intel_create_dai(struct sdw_cdns *cdns,
static int intel_register_dai(struct sdw_intel *sdw)
{
+ struct sdw_cdns_stream_config config;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_cdns_streams *stream;
struct snd_soc_dai_driver *dais;
int num_dai, ret, off = 0;
+ /* Read the PDI config and initialize cadence PDI */
+ intel_pdi_init(sdw, &config);
+ ret = sdw_cdns_pdi_init(cdns, config);
+ if (ret)
+ return ret;
+
+ intel_pdi_ch_update(sdw);
+
/* DAIs are created based on total number of PDIs supported */
num_dai = cdns->pcm.num_pdi;
@@ -1201,8 +1219,208 @@ static int intel_register_dai(struct sdw_intel *sdw)
if (ret)
return ret;
- return snd_soc_register_component(cdns->dev, &dai_component,
- dais, num_dai);
+ return devm_snd_soc_register_component(cdns->dev, &dai_component,
+ dais, num_dai);
+}
+
+static int intel_start_bus(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_bus *bus = &cdns->bus;
+ int ret;
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid timeouts when
+ * gsync is enabled
+ */
+ if (bus->multi_link)
+ intel_shim_sync_arm(sdw);
+
+ ret = sdw_cdns_init(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+
+ if (bus->multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
+ goto err_interrupt;
+ }
+ }
+ sdw_cdns_check_self_clearing_bits(cdns, __func__,
+ true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+
+err_interrupt:
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+}
+
+static int intel_start_bus_after_reset(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ struct sdw_bus *bus = &cdns->bus;
+ bool clock_stop0;
+ int status;
+ int ret;
+
+ /*
+ * An exception condition occurs for the CLK_STOP_BUS_RESET
+ * case if one or more masters remain active. In this condition,
+ * all the masters are powered on for they are in the same power
+ * domain. Master can preserve its context for clock stop0, so
+ * there is no need to clear slave status and reset bus.
+ */
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+
+ if (!clock_stop0) {
+
+ /*
+ * make sure all Slaves are tagged as UNATTACHED and
+ * provide reason for reinitialization
+ */
+
+ status = SDW_UNATTACH_REQUEST_MASTER_RESET;
+ sdw_clear_slave_status(bus, status);
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+
+ /*
+ * follow recommended programming flows to avoid
+ * timeouts when gsync is enabled
+ */
+ if (bus->multi_link)
+ intel_shim_sync_arm(sdw);
+
+ /*
+ * Re-initialize the IP since it was powered-off
+ */
+ sdw_cdns_init(&sdw->cdns);
+
+ } else {
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable interrupts during resume\n");
+ return ret;
+ }
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
+ if (ret < 0) {
+ dev_err(dev, "unable to restart clock during resume\n");
+ goto err_interrupt;
+ }
+
+ if (!clock_stop0) {
+ ret = sdw_cdns_exit_reset(cdns);
+ if (ret < 0) {
+ dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ goto err_interrupt;
+ }
+
+ if (bus->multi_link) {
+ ret = intel_shim_sync_go(sdw);
+ if (ret < 0) {
+ dev_err(sdw->cdns.dev, "sync go failed during resume\n");
+ goto err_interrupt;
+ }
+ }
+ }
+ sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+
+err_interrupt:
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+}
+
+static void intel_check_clock_stop(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ bool clock_stop0;
+
+ clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
+ if (!clock_stop0)
+ dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
+}
+
+static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ int ret;
+
+ ret = sdw_cdns_enable_interrupt(cdns, true);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = sdw_cdns_clock_restart(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
+ sdw_cdns_enable_interrupt(cdns, false);
+ return ret;
+ }
+
+ sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
+ true, INTEL_MASTER_RESET_ITERATIONS);
+
+ return 0;
+}
+
+static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
+{
+ struct device *dev = sdw->cdns.dev;
+ struct sdw_cdns *cdns = &sdw->cdns;
+ bool wake_enable = false;
+ int ret;
+
+ if (clock_stop) {
+ ret = sdw_cdns_clock_stop(cdns, true);
+ if (ret < 0)
+ dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
+ else
+ wake_enable = true;
+ }
+
+ ret = sdw_cdns_enable_interrupt(cdns, false);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = intel_link_power_down(sdw);
+ if (ret) {
+ dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
+ return ret;
+ }
+
+ intel_shim_wake(sdw, wake_enable);
+
+ return 0;
}
static int sdw_master_read_intel_prop(struct sdw_bus *bus)
@@ -1254,7 +1472,7 @@ static int intel_prop_read(struct sdw_bus *bus)
}
static struct sdw_master_ops sdw_intel_ops = {
- .read_prop = sdw_master_read_prop,
+ .read_prop = intel_prop_read,
.override_adr = sdw_dmi_override_adr,
.xfer_msg = cdns_xfer_msg,
.xfer_msg_defer = cdns_xfer_msg_defer,
@@ -1262,22 +1480,9 @@ static struct sdw_master_ops sdw_intel_ops = {
.set_bus_conf = cdns_bus_conf,
.pre_bank_switch = intel_pre_bank_switch,
.post_bank_switch = intel_post_bank_switch,
+ .read_ping_status = cdns_read_ping_status,
};
-static int intel_init(struct sdw_intel *sdw)
-{
- bool clock_stop;
-
- /* Initialize shim and controller */
- intel_link_power_up(sdw);
-
- clock_stop = sdw_cdns_is_clock_stop(&sdw->cdns);
-
- intel_shim_init(sdw, clock_stop);
-
- return 0;
-}
-
/*
* probe and init (aux_dev_id argument is required by function prototype but not used)
*/
@@ -1307,11 +1512,11 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
cdns->msg_count = 0;
bus->link_id = auxdev->id;
+ bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
sdw_cdns_probe(cdns);
- /* Set property read ops */
- sdw_intel_ops.read_prop = intel_prop_read;
+ /* Set ops */
bus->ops = &sdw_intel_ops;
/* set driver data, accessed by snd_soc_dai_get_drvdata() */
@@ -1344,7 +1549,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
int intel_link_startup(struct auxiliary_device *auxdev)
{
- struct sdw_cdns_stream_config config;
struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -1365,7 +1569,6 @@ int intel_link_startup(struct auxiliary_device *auxdev)
multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
if (!multi_link) {
dev_dbg(dev, "Multi-link is disabled\n");
- bus->multi_link = false;
} else {
/*
* hardware-based synchronization is required regardless
@@ -1373,68 +1576,31 @@ int intel_link_startup(struct auxiliary_device *auxdev)
* synchronization is gated by gsync when the multi-master
* mode is set.
*/
- bus->multi_link = true;
bus->hw_sync_min_links = 1;
}
+ bus->multi_link = multi_link;
/* Initialize shim, controller */
- ret = intel_init(sdw);
- if (ret)
- goto err_init;
-
- /* Read the PDI config and initialize cadence PDI */
- intel_pdi_init(sdw, &config);
- ret = sdw_cdns_pdi_init(cdns, config);
+ ret = intel_link_power_up(sdw);
if (ret)
goto err_init;
- intel_pdi_ch_update(sdw);
-
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts\n");
- goto err_init;
- }
-
- /*
- * follow recommended programming flows to avoid timeouts when
- * gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP\n");
- goto err_interrupt;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence\n");
- goto err_interrupt;
- }
-
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed: %d\n", ret);
- goto err_interrupt;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, __func__,
- true, INTEL_MASTER_RESET_ITERATIONS);
-
/* Register DAIs */
ret = intel_register_dai(sdw);
if (ret) {
dev_err(dev, "DAI registration failed: %d\n", ret);
- snd_soc_unregister_component(dev);
- goto err_interrupt;
+ goto err_power_up;
}
intel_debugfs_init(sdw);
+ /* start bus */
+ ret = intel_start_bus(sdw);
+ if (ret) {
+ dev_err(dev, "bus start failed: %d\n", ret);
+ goto err_power_up;
+ }
+
/* Enable runtime PM */
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
pm_runtime_set_autosuspend_delay(dev,
@@ -1479,15 +1645,14 @@ int intel_link_startup(struct auxiliary_device *auxdev)
sdw->startup_done = true;
return 0;
-err_interrupt:
- sdw_cdns_enable_interrupt(cdns, false);
+err_power_up:
+ intel_link_power_down(sdw);
err_init:
return ret;
}
static void intel_link_remove(struct auxiliary_device *auxdev)
{
- struct device *dev = &auxdev->dev;
struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1500,7 +1665,6 @@ static void intel_link_remove(struct auxiliary_device *auxdev)
if (!bus->prop.hw_disabled) {
intel_debugfs_exit(sdw);
sdw_cdns_enable_interrupt(cdns, false);
- snd_soc_unregister_component(dev);
}
sdw_bus_master_delete(bus);
}
@@ -1510,8 +1674,6 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
struct device *dev = &auxdev->dev;
struct sdw_intel *sdw;
struct sdw_bus *bus;
- void __iomem *shim;
- u16 wake_sts;
sdw = auxiliary_get_drvdata(auxdev);
bus = &sdw->cdns.bus;
@@ -1522,10 +1684,7 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
return 0;
}
- shim = sdw->link_res->shim;
- wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS);
-
- if (!(wake_sts & BIT(sdw->instance)))
+ if (!intel_shim_check_wake(sdw))
return 0;
/* disable WAKEEN interrupt ASAP to prevent interrupt flood */
@@ -1553,11 +1712,11 @@ static int intel_resume_child_device(struct device *dev, void *data)
struct sdw_slave *slave = dev_to_sdw_dev(dev);
if (!slave->probed) {
- dev_dbg(dev, "%s: skipping device, no probed driver\n", __func__);
+ dev_dbg(dev, "skipping device, no probed driver\n");
return 0;
}
if (!slave->dev_num_sticky) {
- dev_dbg(dev, "%s: skipping device, never detected on bus\n", __func__);
+ dev_dbg(dev, "skipping device, never detected on bus\n");
return 0;
}
@@ -1643,7 +1802,7 @@ static int __maybe_unused intel_suspend(struct device *dev)
}
if (pm_runtime_suspended(dev)) {
- dev_dbg(dev, "%s: pm_runtime status: suspended\n", __func__);
+ dev_dbg(dev, "pm_runtime status: suspended\n");
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
@@ -1664,20 +1823,12 @@ static int __maybe_unused intel_suspend(struct device *dev)
return 0;
}
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ ret = intel_stop_bus(sdw, false);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus: %d\n", __func__, ret);
return ret;
}
- intel_shim_wake(sdw, false);
-
return 0;
}
@@ -1698,44 +1849,19 @@ static int __maybe_unused intel_suspend_runtime(struct device *dev)
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
-
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ ret = intel_stop_bus(sdw, false);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus during teardown: %d\n",
+ __func__, ret);
return ret;
}
-
- intel_shim_wake(sdw, false);
-
- } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET ||
- !clock_stop_quirks) {
- bool wake_enable = true;
-
- ret = sdw_cdns_clock_stop(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable clock stop on suspend\n");
- wake_enable = false;
- }
-
- ret = sdw_cdns_enable_interrupt(cdns, false);
+ } else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET || !clock_stop_quirks) {
+ ret = intel_stop_bus(sdw, true);
if (ret < 0) {
- dev_err(dev, "cannot disable interrupts on suspend\n");
- return ret;
- }
-
- ret = intel_link_power_down(sdw);
- if (ret) {
- dev_err(dev, "Link power down failed: %d\n", ret);
+ dev_err(dev, "%s: cannot stop bus during clock_stop: %d\n",
+ __func__, ret);
return ret;
}
-
- intel_shim_wake(sdw, wake_enable);
} else {
dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
__func__, clock_stop_quirks);
@@ -1751,7 +1877,6 @@ static int __maybe_unused intel_resume(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
int link_flags;
- bool multi_link;
int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1761,10 +1886,9 @@ static int __maybe_unused intel_resume(struct device *dev)
}
link_flags = md_flags >> (bus->link_id * 8);
- multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
if (pm_runtime_suspended(dev)) {
- dev_dbg(dev, "%s: pm_runtime status was suspended, forcing active\n", __func__);
+ dev_dbg(dev, "pm_runtime status was suspended, forcing active\n");
/* follow required sequence from runtime_pm.rst */
pm_runtime_disable(dev);
@@ -1778,7 +1902,7 @@ static int __maybe_unused intel_resume(struct device *dev)
pm_runtime_idle(dev);
}
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
dev_err(dev, "%s failed: %d\n", __func__, ret);
return ret;
@@ -1790,41 +1914,13 @@ static int __maybe_unused intel_resume(struct device *dev)
*/
sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid timeouts when
- * gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP during resume\n");
- return ret;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
+ ret = intel_start_bus(sdw);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "cannot start bus during resume\n");
+ intel_link_power_down(sdw);
return ret;
}
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed during resume\n");
- return ret;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, __func__,
- true, INTEL_MASTER_RESET_ITERATIONS);
-
/*
* after system resume, the pm_runtime suspend() may kick in
* during the enumeration, before any children device force the
@@ -1837,7 +1933,7 @@ static int __maybe_unused intel_resume(struct device *dev)
*/
pm_runtime_mark_last_busy(dev);
- return ret;
+ return 0;
}
static int __maybe_unused intel_resume_runtime(struct device *dev)
@@ -1846,10 +1942,6 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
u32 clock_stop_quirks;
- bool clock_stop0;
- int link_flags;
- bool multi_link;
- int status;
int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1861,15 +1953,12 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
/* unconditionally disable WAKEEN interrupt */
intel_shim_wake(sdw, false);
- link_flags = md_flags >> (bus->link_id * 8);
- multi_link = !(link_flags & SDW_INTEL_MASTER_DISABLE_MULTI_LINK);
-
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
if (clock_stop_quirks & SDW_INTEL_CLK_STOP_TEARDOWN) {
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed after teardown: %d\n", __func__, ret);
return ret;
}
@@ -1879,145 +1968,45 @@ static int __maybe_unused intel_resume_runtime(struct device *dev)
*/
sdw_clear_slave_status(bus, SDW_UNATTACH_REQUEST_MASTER_RESET);
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid
- * timeouts when gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- ret = sdw_cdns_init(&sdw->cdns);
- if (ret < 0) {
- dev_err(dev, "unable to initialize Cadence IP during resume\n");
- return ret;
- }
-
- ret = sdw_cdns_exit_reset(cdns);
+ ret = intel_start_bus(sdw);
if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
+ dev_err(dev, "%s: cannot start bus after teardown: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(dev, "sync go failed during resume\n");
- return ret;
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime TEARDOWN",
- true, INTEL_MASTER_RESET_ITERATIONS);
} else if (clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) {
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed after bus reset: %d\n", __func__, ret);
return ret;
}
- /*
- * An exception condition occurs for the CLK_STOP_BUS_RESET
- * case if one or more masters remain active. In this condition,
- * all the masters are powered on for they are in the same power
- * domain. Master can preserve its context for clock stop0, so
- * there is no need to clear slave status and reset bus.
- */
- clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
-
- if (!clock_stop0) {
-
- /*
- * make sure all Slaves are tagged as UNATTACHED and
- * provide reason for reinitialization
- */
-
- status = SDW_UNATTACH_REQUEST_MASTER_RESET;
- sdw_clear_slave_status(bus, status);
-
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
-
- /*
- * follow recommended programming flows to avoid
- * timeouts when gsync is enabled
- */
- if (multi_link)
- intel_shim_sync_arm(sdw);
-
- /*
- * Re-initialize the IP since it was powered-off
- */
- sdw_cdns_init(&sdw->cdns);
-
- } else {
- ret = sdw_cdns_enable_interrupt(cdns, true);
- if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
- return ret;
- }
- }
-
- ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
+ ret = intel_start_bus_after_reset(sdw);
if (ret < 0) {
- dev_err(dev, "unable to restart clock during resume\n");
+ dev_err(dev, "%s: cannot start bus after reset: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
-
- if (!clock_stop0) {
- ret = sdw_cdns_exit_reset(cdns);
- if (ret < 0) {
- dev_err(dev, "unable to exit bus reset sequence during resume\n");
- return ret;
- }
-
- if (multi_link) {
- ret = intel_shim_sync_go(sdw);
- if (ret < 0) {
- dev_err(sdw->cdns.dev, "sync go failed during resume\n");
- return ret;
- }
- }
- }
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime BUS_RESET",
- true, INTEL_MASTER_RESET_ITERATIONS);
-
} else if (!clock_stop_quirks) {
- clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
- if (!clock_stop0)
- dev_err(dev, "%s invalid configuration, clock was not stopped", __func__);
+ intel_check_clock_stop(sdw);
- ret = intel_init(sdw);
+ ret = intel_link_power_up(sdw);
if (ret) {
- dev_err(dev, "%s failed: %d\n", __func__, ret);
+ dev_err(dev, "%s: power_up failed: %d\n", __func__, ret);
return ret;
}
- ret = sdw_cdns_enable_interrupt(cdns, true);
+ ret = intel_start_bus_after_clock_stop(sdw);
if (ret < 0) {
- dev_err(dev, "cannot enable interrupts during resume\n");
+ dev_err(dev, "%s: cannot start bus after clock stop: %d\n", __func__, ret);
+ intel_link_power_down(sdw);
return ret;
}
-
- ret = sdw_cdns_clock_restart(cdns, false);
- if (ret < 0) {
- dev_err(dev, "unable to resume master during resume\n");
- return ret;
- }
-
- sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
- true, INTEL_MASTER_RESET_ITERATIONS);
} else {
- dev_err(dev, "%s clock_stop_quirks %x unsupported\n",
+ dev_err(dev, "%s: clock_stop_quirks %x unsupported\n",
__func__, clock_stop_quirks);
ret = -EINVAL;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 824f4f32d4dc..d091513919df 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -306,7 +306,7 @@ sdw_intel_startup_controller(struct sdw_intel_ctx *ctx)
/* Check SNDWLCAP.LCOUNT */
caps = ioread32(ctx->mmio_base + ctx->shim_base + SDW_SHIM_LCAP);
- caps &= GENMASK(2, 0);
+ caps &= SDW_SHIM_LCAP_LCOUNT_MASK;
/* Check HW supported vs property value */
if (caps < ctx->count) {
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 9df970eeca45..b33d5db494a5 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -169,7 +169,7 @@ struct qcom_swrm_ctrl {
u8 wcmd_id;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
- enum sdw_slave_status status[SDW_MAX_DEVICES];
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
u32 slave_status;
@@ -420,7 +420,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
- for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+ for (dev_num = 1; dev_num <= SDW_MAX_DEVICES; dev_num++) {
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
@@ -440,7 +440,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
ctrl->slave_status = val;
- for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ for (i = 1; i <= SDW_MAX_DEVICES; i++) {
u32 s;
s = (val >> (i * 2));
@@ -573,11 +573,10 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
break;
case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
- dev_err_ratelimited(swrm->dev, "%s: SWR new slave attached\n",
- __func__);
+ dev_dbg_ratelimited(swrm->dev, "SWR new slave attached\n");
swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
if (swrm->slave_status == slave_status) {
- dev_err(swrm->dev, "Slave status not changed %x\n",
+ dev_dbg(swrm->dev, "Slave status not changed %x\n",
slave_status);
} else {
qcom_swrm_get_device_status(swrm);
@@ -1356,10 +1355,6 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ctrl->bus.clk_stop_timeout = 300;
- ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
- if (IS_ERR(ctrl->audio_cgcr))
- dev_err(dev, "Failed to get audio_cgcr reset required for soundwire-v1.6.0\n");
-
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
goto err_clk;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index e32f6a2058ae..d1bb62f7368b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -591,6 +591,15 @@ config SPI_MICROCHIP_CORE
PolarFire SoC.
If built as a module, it will be called spi-microchip-core.
+config SPI_MICROCHIP_CORE_QSPI
+ tristate "Microchip FPGA QSPI controllers"
+ depends on SPI_MASTER
+ help
+ This enables the QSPI driver for Microchip FPGA QSPI controllers.
+ Say Y or M here if you want to use the QSPI controllers on
+ PolarFire SoC.
+ If built as a module, it will be called spi-microchip-core-qspi.
+
config SPI_MT65XX
tristate "MediaTek SPI controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 15d2f3835e45..4b34e855c841 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MICROCHIP_CORE) += spi-microchip-core.o
+obj-$(CONFIG_SPI_MICROCHIP_CORE_QSPI) += spi-microchip-core-qspi.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 08df4f8d0531..e23121456c70 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -36,9 +36,17 @@
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
-/* M_CMD OP codes for SPI */
-#define AMD_SPI_XFER_TX 1
-#define AMD_SPI_XFER_RX 2
+#define AMD_SPI_ENA_REG 0x20
+#define AMD_SPI_ALT_SPD_SHIFT 20
+#define AMD_SPI_ALT_SPD_MASK GENMASK(23, AMD_SPI_ALT_SPD_SHIFT)
+#define AMD_SPI_SPI100_SHIFT 0
+#define AMD_SPI_SPI100_MASK GENMASK(AMD_SPI_SPI100_SHIFT, AMD_SPI_SPI100_SHIFT)
+#define AMD_SPI_SPEED_REG 0x6C
+#define AMD_SPI_SPD7_SHIFT 8
+#define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT)
+
+#define AMD_SPI_MAX_HZ 100000000
+#define AMD_SPI_MIN_HZ 800000
/**
* enum amd_spi_versions - SPI controller versions
@@ -50,14 +58,41 @@ enum amd_spi_versions {
AMD_SPI_V2,
};
+enum amd_spi_speed {
+ F_66_66MHz,
+ F_33_33MHz,
+ F_22_22MHz,
+ F_16_66MHz,
+ F_100MHz,
+ F_800KHz,
+ SPI_SPD7,
+ F_50MHz = 0x4,
+ F_4MHz = 0x32,
+ F_3_17MHz = 0x3F
+};
+
+/**
+ * struct amd_spi_freq - Matches device speed with values to write in regs
+ * @speed_hz: Device frequency
+ * @enable_val: Value to be written to "enable register"
+ * @spd7_val: Some frequencies requires to have a value written at SPISPEED register
+ */
+struct amd_spi_freq {
+ u32 speed_hz;
+ u32 enable_val;
+ u32 spd7_val;
+};
+
/**
* struct amd_spi - SPI driver instance
* @io_remap_addr: Start address of the SPI controller registers
* @version: SPI controller hardware version
+ * @speed_hz: Device frequency
*/
struct amd_spi {
void __iomem *io_remap_addr;
enum amd_spi_versions version;
+ unsigned int speed_hz;
};
static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
@@ -189,65 +224,125 @@ static int amd_spi_master_setup(struct spi_device *spi)
return 0;
}
+static const struct amd_spi_freq amd_spi_freq[] = {
+ { AMD_SPI_MAX_HZ, F_100MHz, 0},
+ { 66660000, F_66_66MHz, 0},
+ { 50000000, SPI_SPD7, F_50MHz},
+ { 33330000, F_33_33MHz, 0},
+ { 22220000, F_22_22MHz, 0},
+ { 16660000, F_16_66MHz, 0},
+ { 4000000, SPI_SPD7, F_4MHz},
+ { 3170000, SPI_SPD7, F_3_17MHz},
+ { AMD_SPI_MIN_HZ, F_800KHz, 0},
+};
+
+static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
+{
+ unsigned int i, spd7_val, alt_spd;
+
+ if (speed_hz < AMD_SPI_MIN_HZ)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++)
+ if (speed_hz >= amd_spi_freq[i].speed_hz)
+ break;
+
+ if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz)
+ return 0;
+
+ amd_spi->speed_hz = amd_spi_freq[i].speed_hz;
+
+ alt_spd = (amd_spi_freq[i].enable_val << AMD_SPI_ALT_SPD_SHIFT)
+ & AMD_SPI_ALT_SPD_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, alt_spd,
+ AMD_SPI_ALT_SPD_MASK);
+
+ if (amd_spi->speed_hz == AMD_SPI_MAX_HZ)
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_ENA_REG, 1,
+ AMD_SPI_SPI100_MASK);
+
+ if (amd_spi_freq[i].spd7_val) {
+ spd7_val = (amd_spi_freq[i].spd7_val << AMD_SPI_SPD7_SHIFT)
+ & AMD_SPI_SPD7_MASK;
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val,
+ AMD_SPI_SPD7_MASK);
+ }
+
+ return 0;
+}
+
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
struct spi_master *master,
struct spi_message *message)
{
struct spi_transfer *xfer = NULL;
- u8 cmd_opcode;
+ struct spi_device *spi = message->spi;
+ u8 cmd_opcode = 0, fifo_pos = AMD_SPI_FIFO_BASE;
u8 *buf = NULL;
- u32 m_cmd = 0;
u32 i = 0;
u32 tx_len = 0, rx_len = 0;
list_for_each_entry(xfer, &message->transfers,
transfer_list) {
- if (xfer->rx_buf)
- m_cmd = AMD_SPI_XFER_RX;
- if (xfer->tx_buf)
- m_cmd = AMD_SPI_XFER_TX;
+ if (xfer->speed_hz)
+ amd_set_spi_freq(amd_spi, xfer->speed_hz);
+ else
+ amd_set_spi_freq(amd_spi, spi->max_speed_hz);
- if (m_cmd & AMD_SPI_XFER_TX) {
+ if (xfer->tx_buf) {
buf = (u8 *)xfer->tx_buf;
- tx_len = xfer->len - 1;
- cmd_opcode = *(u8 *)xfer->tx_buf;
- buf++;
- amd_spi_set_opcode(amd_spi, cmd_opcode);
+ if (!tx_len) {
+ cmd_opcode = *(u8 *)xfer->tx_buf;
+ buf++;
+ xfer->len--;
+ }
+ tx_len += xfer->len;
/* Write data into the FIFO. */
- for (i = 0; i < tx_len; i++) {
- iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr +
- AMD_SPI_FIFO_BASE + i));
- }
+ for (i = 0; i < xfer->len; i++)
+ amd_spi_writereg8(amd_spi, fifo_pos + i, buf[i]);
- amd_spi_set_tx_count(amd_spi, tx_len);
- amd_spi_clear_fifo_ptr(amd_spi);
- /* Execute command */
- amd_spi_execute_opcode(amd_spi);
- }
- if (m_cmd & AMD_SPI_XFER_RX) {
- /*
- * Store no. of bytes to be received from
- * FIFO
- */
- rx_len = xfer->len;
- buf = (u8 *)xfer->rx_buf;
- amd_spi_set_rx_count(amd_spi, rx_len);
- amd_spi_clear_fifo_ptr(amd_spi);
- /* Execute command */
- amd_spi_execute_opcode(amd_spi);
- amd_spi_busy_wait(amd_spi);
- /* Read data from FIFO to receive buffer */
- for (i = 0; i < rx_len; i++)
- buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i);
+ fifo_pos += xfer->len;
}
+
+ /* Store no. of bytes to be received from FIFO */
+ if (xfer->rx_buf)
+ rx_len += xfer->len;
+ }
+
+ if (!buf) {
+ message->status = -EINVAL;
+ goto fin_msg;
+ }
+
+ amd_spi_set_opcode(amd_spi, cmd_opcode);
+ amd_spi_set_tx_count(amd_spi, tx_len);
+ amd_spi_set_rx_count(amd_spi, rx_len);
+
+ /* Execute command */
+ message->status = amd_spi_execute_opcode(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ if (rx_len) {
+ message->status = amd_spi_busy_wait(amd_spi);
+ if (message->status)
+ goto fin_msg;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list)
+ if (xfer->rx_buf) {
+ buf = (u8 *)xfer->rx_buf;
+ /* Read data from FIFO to receive buffer */
+ for (i = 0; i < xfer->len; i++)
+ buf[i] = amd_spi_readreg8(amd_spi, fifo_pos + i);
+ fifo_pos += xfer->len;
+ }
}
/* Update statistics */
message->actual_length = tx_len + rx_len + 1;
- /* complete the transaction */
- message->status = 0;
+fin_msg:
switch (amd_spi->version) {
case AMD_SPI_V1:
break;
@@ -260,7 +355,7 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
spi_finalize_current_message(master);
- return 0;
+ return message->status;
}
static int amd_spi_master_transfer(struct spi_master *master,
@@ -275,9 +370,7 @@ static int amd_spi_master_transfer(struct spi_master *master,
* Extract spi_transfers from the spi message and
* program the controller.
*/
- amd_spi_fifo_xfer(amd_spi, master, msg);
-
- return 0;
+ return amd_spi_fifo_xfer(amd_spi, master, msg);
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
@@ -312,6 +405,8 @@ static int amd_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
master->mode_bits = 0;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->max_speed_hz = AMD_SPI_MAX_HZ;
+ master->min_speed_hz = AMD_SPI_MIN_HZ;
master->setup = amd_spi_master_setup;
master->transfer_one_message = amd_spi_master_transfer;
master->max_transfer_size = amd_spi_max_transfer_size;
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index 3e891bf22470..a334e89add86 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -736,10 +736,8 @@ static int aspeed_spi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
aspi->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(aspi->regs)) {
- dev_err(dev, "missing AHB register window\n");
+ if (IS_ERR(aspi->regs))
return PTR_ERR(aspi->regs);
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
aspi->ahb_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 267342dfa738..2dcbe166df63 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
setsck(spi, cpol);
}
return word;
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
}
return word;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 72b1a5a2298c..447230547945 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -39,6 +39,7 @@
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM BIT(4)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -87,6 +88,7 @@ struct cqspi_st {
bool use_dma_read;
u32 pd_dev_id;
bool wr_completion;
+ bool slow_sram;
};
struct cqspi_driver_platdata {
@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
}
}
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else if (!cqspi->slow_sram)
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else
+ irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
- writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ /*
+ * On SoCFPGA platform reading the SRAM is slow due to
+ * hardware limitation and causing read interrupt storm to CPU,
+ * so enabling only watermark interrupt to disable all read
+ * interrupts later as we want to run "bytes to read" loop with
+ * all the read interrupts disabled for max performance.
+ */
+
+ if (!cqspi->slow_sram)
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ else
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
+ /*
+ * Disable all read interrupts until
+ * we are out of "bytes to read"
+ */
+ if (cqspi->slow_sram)
+ writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
- if (remaining > 0)
+ if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete);
+ if (cqspi->slow_sram)
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ }
}
/* Check indirect done status */
@@ -1619,7 +1645,7 @@ static int cqspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
- return ret;
+ goto probe_pm_failed;
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1712,6 +1740,7 @@ probe_reset_failed:
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
pm_runtime_put_sync(dev);
+probe_pm_failed:
pm_runtime_disable(dev);
return ret;
}
@@ -1779,7 +1808,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+ | CQSPI_SLOW_SRAM,
};
static const struct cqspi_driver_platdata versal_ospi = {
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index 3ab19be83095..9e187f9c6c95 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -565,10 +565,8 @@ static int cdns_xspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
- if (IS_ERR(cdns_xspi->sdmabase)) {
- dev_err(dev, "Failed to remap SDMA address\n");
+ if (IS_ERR(cdns_xspi->sdmabase))
return PTR_ERR(cdns_xspi->sdmabase);
- }
cdns_xspi->sdmasize = resource_size(res);
cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c
index c06553416123..3fb89dee595e 100644
--- a/drivers/spi/spi-dw-bt1.c
+++ b/drivers/spi/spi-dw-bt1.c
@@ -293,8 +293,10 @@ static int dw_spi_bt1_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_host(&pdev->dev, dws);
- if (ret)
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
goto err_disable_clk;
+ }
platform_set_drvdata(pdev, dwsbt1);
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index f87d97ccd2d6..99edddf9958b 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -955,7 +955,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
ret = spi_register_controller(master);
if (ret) {
- dev_err(&master->dev, "problem registering spi master\n");
+ dev_err_probe(dev, ret, "problem registering spi master\n");
goto err_dma_exit;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index fd004c9db9dc..a33e547b7d39 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1294,8 +1294,7 @@ static int dspi_probe(struct platform_device *pdev)
else
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_ctlr_put;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 19b1f3d881b0..e8c1c8a4c6c8 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -855,8 +855,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
init_completion(&fsl_lpspi->xfer_done);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_lpspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
goto out_controller_put;
@@ -912,7 +911,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
- dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
+ dev_err_probe(&pdev->dev, ret, "spi_register_controller error\n");
goto free_dma;
}
@@ -947,11 +946,8 @@ static int fsl_lpspi_remove(struct platform_device *pdev)
static int __maybe_unused fsl_lpspi_suspend(struct device *dev)
{
- int ret;
-
pinctrl_pm_select_sleep_state(dev);
- ret = pm_runtime_force_suspend(dev);
- return ret;
+ return pm_runtime_force_suspend(dev);
}
static int __maybe_unused fsl_lpspi_resume(struct device *dev)
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 46ae46a944c5..85cc71ba624a 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -867,8 +867,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, q);
/* find the resources */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
- q->iobase = devm_ioremap_resource(dev, res);
+ q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI");
if (IS_ERR(q->iobase)) {
ret = PTR_ERR(q->iobase);
goto err_put_ctrl;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index bdf94cc7be1a..731624f157fc 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -111,32 +111,6 @@ static void fsl_spi_change_mode(struct spi_device *spi)
local_irq_restore(flags);
}
-static void fsl_spi_chipselect(struct spi_device *spi, int value)
-{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_platform_data *pdata;
- struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
- pdata = spi->dev.parent->parent->platform_data;
-
- if (value == BITBANG_CS_INACTIVE) {
- if (pdata->cs_control)
- pdata->cs_control(spi, false);
- }
-
- if (value == BITBANG_CS_ACTIVE) {
- mpc8xxx_spi->rx_shift = cs->rx_shift;
- mpc8xxx_spi->tx_shift = cs->tx_shift;
- mpc8xxx_spi->get_rx = cs->get_rx;
- mpc8xxx_spi->get_tx = cs->get_tx;
-
- fsl_spi_change_mode(spi);
-
- if (pdata->cs_control)
- pdata->cs_control(spi, true);
- }
-}
-
static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
int bits_per_word, int msb_first)
{
@@ -354,15 +328,11 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
return mpc8xxx_spi->count;
}
-static int fsl_spi_do_one_msg(struct spi_master *master,
- struct spi_message *m)
+static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *m)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
- struct spi_device *spi = m->spi;
- struct spi_transfer *t, *first;
- unsigned int cs_change;
- const int nsecs = 50;
- int status, last_bpw;
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(ctlr);
+ struct spi_transfer *t;
/*
* In CPU mode, optimize large byte transfers to use larger
@@ -378,62 +348,30 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
t->bits_per_word = 16;
}
}
+ return 0;
+}
- /* Don't allow changes if CS is active */
- cs_change = 1;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (cs_change)
- first = t;
- cs_change = t->cs_change;
- if (first->speed_hz != t->speed_hz) {
- dev_err(&spi->dev,
- "speed_hz cannot change while CS is active\n");
- return -EINVAL;
- }
- }
-
- last_bpw = -1;
- cs_change = 1;
- status = -EINVAL;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (cs_change || last_bpw != t->bits_per_word)
- status = fsl_spi_setup_transfer(spi, t);
- if (status < 0)
- break;
- last_bpw = t->bits_per_word;
-
- if (cs_change) {
- fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (t->len)
- status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
- if (status) {
- status = -EMSGSIZE;
- break;
- }
- m->actual_length += t->len;
-
- spi_transfer_delay_exec(t);
-
- if (cs_change) {
- ndelay(nsecs);
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
- }
+static int fsl_spi_transfer_one(struct spi_controller *controller,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int status;
- m->status = status;
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ return status;
+ if (t->len)
+ status = fsl_spi_bufs(spi, t, !!t->tx_dma || !!t->rx_dma);
+ if (status > 0)
+ return -EMSGSIZE;
- if (status || !cs_change) {
- ndelay(nsecs);
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
- }
+ return status;
+}
- fsl_spi_setup_transfer(spi, NULL);
- spi_finalize_current_message(master);
- return 0;
+static int fsl_spi_unprepare_message(struct spi_controller *controller,
+ struct spi_message *msg)
+{
+ return fsl_spi_setup_transfer(msg->spi, NULL);
}
static int fsl_spi_setup(struct spi_device *spi)
@@ -482,9 +420,6 @@ static int fsl_spi_setup(struct spi_device *spi)
return retval;
}
- /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
- fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
-
return 0;
}
@@ -557,9 +492,7 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
u32 slvsel;
u16 cs = spi->chip_select;
- if (spi->cs_gpiod) {
- gpiod_set_value(spi->cs_gpiod, on);
- } else if (cs < mpc8xxx_spi->native_chipselects) {
+ if (cs < mpc8xxx_spi->native_chipselects) {
slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
@@ -568,7 +501,6 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
static void fsl_spi_grlib_probe(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
@@ -588,7 +520,18 @@ static void fsl_spi_grlib_probe(struct device *dev)
mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
}
master->num_chipselect = mpc8xxx_spi->native_chipselects;
- pdata->cs_control = fsl_spi_grlib_cs_control;
+ master->set_cs = fsl_spi_grlib_cs_control;
+}
+
+static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+{
+ struct device *dev = spi->dev.parent->parent;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+
+ if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
+ return;
+ iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
}
static struct spi_master *fsl_spi_probe(struct device *dev,
@@ -613,8 +556,11 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
master->setup = fsl_spi_setup;
master->cleanup = fsl_spi_cleanup;
- master->transfer_one_message = fsl_spi_do_one_msg;
+ master->prepare_message = fsl_spi_prepare_message;
+ master->transfer_one = fsl_spi_transfer_one;
+ master->unprepare_message = fsl_spi_unprepare_message;
master->use_gpio_descriptors = true;
+ master->set_cs = fsl_spi_cs_control;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->max_bits_per_word = 32;
@@ -688,21 +634,6 @@ err:
return ERR_PTR(ret);
}
-static void fsl_spi_cs_control(struct spi_device *spi, bool on)
-{
- if (spi->cs_gpiod) {
- gpiod_set_value(spi->cs_gpiod, on);
- } else {
- struct device *dev = spi->dev.parent->parent;
- struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
-
- if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
- return;
- iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
- }
-}
-
static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -744,12 +675,10 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
ret = gpiod_count(dev, "cs");
if (ret < 0)
ret = 0;
- if (ret == 0 && !spisel_boot) {
+ if (ret == 0 && !spisel_boot)
pdata->max_chipselect = 1;
- } else {
+ else
pdata->max_chipselect = ret + spisel_boot;
- pdata->cs_control = fsl_spi_cs_control;
- }
}
ret = of_address_to_resource(np, 0, &mem);
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
index 9ea355f7d64f..15b110183839 100644
--- a/drivers/spi/spi-gxp.c
+++ b/drivers/spi/spi-gxp.c
@@ -254,7 +254,6 @@ static int gxp_spifi_probe(struct platform_device *pdev)
const struct gxp_spi_data *data;
struct spi_controller *ctlr;
struct gxp_spi *spifi;
- struct resource *res;
int ret;
data = of_device_get_match_data(&pdev->dev);
@@ -269,18 +268,15 @@ static int gxp_spifi_probe(struct platform_device *pdev)
spifi->data = data;
spifi->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spifi->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spifi->reg_base))
return PTR_ERR(spifi->reg_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- spifi->dat_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->dat_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(spifi->dat_base))
return PTR_ERR(spifi->dat_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- spifi->dir_base = devm_ioremap_resource(&pdev->dev, res);
+ spifi->dir_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(spifi->dir_base))
return PTR_ERR(spifi->dir_base);
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 71376b6df89d..bfd12247f173 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -730,11 +730,9 @@ static int img_spfi_resume(struct device *dev)
struct img_spfi *spfi = spi_master_get_devdata(master);
int ret;
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- }
spfi_reset(spfi);
pm_runtime_put(dev);
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index 66063687ae27..55f4ee2db002 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -116,6 +116,22 @@
#define ERASE_64K_OPCODE_SHIFT 16
#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+/* Flash descriptor fields */
+#define FLVALSIG_MAGIC 0x0ff0a55a
+#define FLMAP0_NC_MASK GENMASK(9, 8)
+#define FLMAP0_NC_SHIFT 8
+#define FLMAP0_FCBA_MASK GENMASK(7, 0)
+
+#define FLCOMP_C0DEN_MASK GENMASK(3, 0)
+#define FLCOMP_C0DEN_512K 0x00
+#define FLCOMP_C0DEN_1M 0x01
+#define FLCOMP_C0DEN_2M 0x02
+#define FLCOMP_C0DEN_4M 0x03
+#define FLCOMP_C0DEN_8M 0x04
+#define FLCOMP_C0DEN_16M 0x05
+#define FLCOMP_C0DEN_32M 0x06
+#define FLCOMP_C0DEN_64M 0x07
+
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -129,6 +145,7 @@
* @master: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
+ * @chip0_size: Size of the first flash chip in bytes
* @locked: Is SPI setting locked
* @swseq_reg: Use SW sequencer in register reads/writes
* @swseq_erase: Use SW sequencer in erase operation
@@ -146,6 +163,7 @@ struct intel_spi {
struct spi_controller *master;
size_t nregions;
size_t pr_num;
+ size_t chip0_size;
bool locked;
bool swseq_reg;
bool swseq_erase;
@@ -158,6 +176,7 @@ struct intel_spi_mem_op {
struct spi_mem_op mem_op;
u32 replacement_op;
int (*exec_op)(struct intel_spi *ispi,
+ const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op);
};
@@ -441,7 +460,16 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len,
return 0;
}
-static int intel_spi_read_reg(struct intel_spi *ispi,
+static u32 intel_spi_chip_addr(const struct intel_spi *ispi,
+ const struct spi_mem *mem)
+{
+ /* Pick up the correct start address */
+ if (!mem)
+ return 0;
+ return mem->spi->chip_select == 1 ? ispi->chip0_size : 0;
+}
+
+static int intel_spi_read_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
@@ -449,8 +477,7 @@ static int intel_spi_read_reg(struct intel_spi *ispi,
u8 opcode = op->cmd.opcode;
int ret;
- /* Address of the first chip */
- writel(0, ispi->base + FADDR);
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
if (ispi->swseq_reg)
ret = intel_spi_sw_cycle(ispi, opcode, nbytes,
@@ -464,7 +491,7 @@ static int intel_spi_read_reg(struct intel_spi *ispi,
return intel_spi_read_block(ispi, op->data.buf.in, nbytes);
}
-static int intel_spi_write_reg(struct intel_spi *ispi,
+static int intel_spi_write_reg(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
@@ -511,7 +538,7 @@ static int intel_spi_write_reg(struct intel_spi *ispi,
if (opcode == SPINOR_OP_WRDI)
return 0;
- writel(0, ispi->base + FADDR);
+ writel(intel_spi_chip_addr(ispi, mem), ispi->base + FADDR);
/* Write the value beforehand */
ret = intel_spi_write_block(ispi, op->data.buf.out, nbytes);
@@ -524,13 +551,13 @@ static int intel_spi_write_reg(struct intel_spi *ispi,
return intel_spi_hw_cycle(ispi, opcode, nbytes);
}
-static int intel_spi_read(struct intel_spi *ispi,
+static int intel_spi_read(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
- void *read_buf = op->data.buf.in;
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
- u32 addr = op->addr.val;
+ void *read_buf = op->data.buf.in;
u32 val, status;
int ret;
@@ -585,13 +612,13 @@ static int intel_spi_read(struct intel_spi *ispi,
return 0;
}
-static int intel_spi_write(struct intel_spi *ispi,
+static int intel_spi_write(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
size_t block_size, nbytes = op->data.nbytes;
const void *write_buf = op->data.buf.out;
- u32 addr = op->addr.val;
u32 val, status;
int ret;
@@ -648,12 +675,12 @@ static int intel_spi_write(struct intel_spi *ispi,
return 0;
}
-static int intel_spi_erase(struct intel_spi *ispi,
+static int intel_spi_erase(struct intel_spi *ispi, const struct spi_mem *mem,
const struct intel_spi_mem_op *iop,
const struct spi_mem_op *op)
{
+ u32 addr = intel_spi_chip_addr(ispi, mem) + op->addr.val;
u8 opcode = op->cmd.opcode;
- u32 addr = op->addr.val;
u32 val, status;
int ret;
@@ -765,7 +792,7 @@ static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *o
if (!iop)
return -EOPNOTSUPP;
- return iop->exec_op(ispi, iop, op);
+ return iop->exec_op(ispi, mem, iop, op);
}
static const char *intel_spi_get_name(struct spi_mem *mem)
@@ -805,7 +832,7 @@ static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
op.data.nbytes = len;
op.data.buf.in = buf;
- ret = iop->exec_op(ispi, iop, &op);
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
@@ -821,7 +848,7 @@ static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs
op.data.nbytes = len;
op.data.buf.out = buf;
- ret = iop->exec_op(ispi, iop, &op);
+ ret = iop->exec_op(ispi, desc->mem, iop, &op);
return ret ? ret : len;
}
@@ -1073,6 +1100,7 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->pregs = ispi->base + CNL_PR;
ispi->nregions = CNL_FREG_NUM;
ispi->pr_num = CNL_PR_NUM;
+ erase_64k = true;
break;
default:
@@ -1226,10 +1254,98 @@ static void intel_spi_fill_partition(struct intel_spi *ispi,
}
}
+static int intel_spi_read_desc(struct intel_spi *ispi)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 0),
+ SPI_MEM_OP_ADDR(3, 0, 0),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(0, NULL, 0));
+ u32 buf[2], nc, fcba, flcomp;
+ ssize_t ret;
+
+ op.addr.val = 0x10;
+ op.data.buf.in = buf;
+ op.data.nbytes = sizeof(buf);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read descriptor\n");
+ return ret;
+ }
+
+ dev_dbg(ispi->dev, "FLVALSIG=0x%08x\n", buf[0]);
+ dev_dbg(ispi->dev, "FLMAP0=0x%08x\n", buf[1]);
+
+ if (buf[0] != FLVALSIG_MAGIC) {
+ dev_warn(ispi->dev, "descriptor signature not valid\n");
+ return -ENODEV;
+ }
+
+ fcba = (buf[1] & FLMAP0_FCBA_MASK) << 4;
+ dev_dbg(ispi->dev, "FCBA=%#x\n", fcba);
+
+ op.addr.val = fcba;
+ op.data.buf.in = &flcomp;
+ op.data.nbytes = sizeof(flcomp);
+
+ ret = intel_spi_read(ispi, NULL, NULL, &op);
+ if (ret) {
+ dev_warn(ispi->dev, "failed to read FLCOMP\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(ispi->dev, "FLCOMP=0x%08x\n", flcomp);
+
+ switch (flcomp & FLCOMP_C0DEN_MASK) {
+ case FLCOMP_C0DEN_512K:
+ ispi->chip0_size = SZ_512K;
+ break;
+ case FLCOMP_C0DEN_1M:
+ ispi->chip0_size = SZ_1M;
+ break;
+ case FLCOMP_C0DEN_2M:
+ ispi->chip0_size = SZ_2M;
+ break;
+ case FLCOMP_C0DEN_4M:
+ ispi->chip0_size = SZ_4M;
+ break;
+ case FLCOMP_C0DEN_8M:
+ ispi->chip0_size = SZ_8M;
+ break;
+ case FLCOMP_C0DEN_16M:
+ ispi->chip0_size = SZ_16M;
+ break;
+ case FLCOMP_C0DEN_32M:
+ ispi->chip0_size = SZ_32M;
+ break;
+ case FLCOMP_C0DEN_64M:
+ ispi->chip0_size = SZ_64M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ispi->dev, "chip0 size %zd KB\n", ispi->chip0_size / SZ_1K);
+
+ nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
+ if (!nc)
+ ispi->master->num_chipselect = 1;
+ else if (nc == 1)
+ ispi->master->num_chipselect = 2;
+ else
+ return -EINVAL;
+
+ dev_dbg(ispi->dev, "%u flash components found\n",
+ ispi->master->num_chipselect);
+ return 0;
+}
+
static int intel_spi_populate_chip(struct intel_spi *ispi)
{
struct flash_platform_data *pdata;
struct spi_board_info chip;
+ int ret;
pdata = devm_kzalloc(ispi->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -1247,7 +1363,23 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
snprintf(chip.modalias, 8, "spi-nor");
chip.platform_data = pdata;
- return spi_new_device(ispi->master, &chip) ? 0 : -ENODEV;
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+
+ /* Add the second chip if present */
+ if (ispi->master->num_chipselect < 2)
+ return 0;
+
+ ret = intel_spi_read_desc(ispi);
+ if (ret)
+ return ret;
+
+ chip.platform_data = NULL;
+ chip.chip_select = 1;
+
+ if (!spi_new_device(ispi->master, &chip))
+ return -ENODEV;
+ return 0;
}
/**
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 4d4f77a186a9..dd7de8fa37d0 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -313,6 +313,33 @@ static struct spi_test spi_tests[] = {
},
},
},
+ {
+ .description = "three tx+rx transfers with overlapping cache lines",
+ .fill_option = FILL_COUNT_8,
+ /*
+ * This should be large enough for the controller driver to
+ * choose to transfer it with DMA.
+ */
+ .iterate_len = { 512, -1 },
+ .iterate_transfer_mask = BIT(1),
+ .transfer_count = 3,
+ .transfers = {
+ {
+ .len = 1,
+ .tx_buf = TX(0),
+ .rx_buf = RX(0),
+ },
+ {
+ .tx_buf = TX(1),
+ .rx_buf = RX(1),
+ },
+ {
+ .len = 1,
+ .tx_buf = TX(513),
+ .rx_buf = RX(513),
+ },
+ },
+ },
{ /* end of tests sequence */ }
};
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index e4cb52e1fe26..bad201510a99 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -537,7 +537,7 @@ static unsigned long meson_spicc_pow2_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
- if (!spicc->master->cur_msg || !spicc->master->busy)
+ if (!spicc->master->cur_msg)
return 0;
return clk_divider_ops.recalc_rate(hw, parent_rate);
@@ -549,7 +549,7 @@ static int meson_spicc_pow2_determine_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
- if (!spicc->master->cur_msg || !spicc->master->busy)
+ if (!spicc->master->cur_msg)
return -EINVAL;
return clk_divider_ops.determine_rate(hw, req);
@@ -561,13 +561,13 @@ static int meson_spicc_pow2_set_rate(struct clk_hw *hw, unsigned long rate,
struct clk_divider *divider = to_clk_divider(hw);
struct meson_spicc_device *spicc = pow2_clk_to_spicc(divider);
- if (!spicc->master->cur_msg || !spicc->master->busy)
+ if (!spicc->master->cur_msg)
return -EINVAL;
return clk_divider_ops.set_rate(hw, rate, parent_rate);
}
-const struct clk_ops meson_spicc_pow2_clk_ops = {
+static const struct clk_ops meson_spicc_pow2_clk_ops = {
.recalc_rate = meson_spicc_pow2_recalc_rate,
.determine_rate = meson_spicc_pow2_determine_rate,
.set_rate = meson_spicc_pow2_set_rate,
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c
new file mode 100644
index 000000000000..19a6a46829f6
--- /dev/null
+++ b/drivers/spi/spi-microchip-core-qspi.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * Microchip coreQSPI QSPI controller driver
+ *
+ * Copyright (C) 2018-2022 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Naga Sureshkumar Relli <nagasuresh.relli@microchip.com>
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/*
+ * QSPI Control register mask defines
+ */
+#define CONTROL_ENABLE BIT(0)
+#define CONTROL_MASTER BIT(1)
+#define CONTROL_XIP BIT(2)
+#define CONTROL_XIPADDR BIT(3)
+#define CONTROL_CLKIDLE BIT(10)
+#define CONTROL_SAMPLE_MASK GENMASK(12, 11)
+#define CONTROL_MODE0 BIT(13)
+#define CONTROL_MODE12_MASK GENMASK(15, 14)
+#define CONTROL_MODE12_EX_RO BIT(14)
+#define CONTROL_MODE12_EX_RW BIT(15)
+#define CONTROL_MODE12_FULL GENMASK(15, 14)
+#define CONTROL_FLAGSX4 BIT(16)
+#define CONTROL_CLKRATE_MASK GENMASK(27, 24)
+#define CONTROL_CLKRATE_SHIFT 24
+
+/*
+ * QSPI Frames register mask defines
+ */
+#define FRAMES_TOTALBYTES_MASK GENMASK(15, 0)
+#define FRAMES_CMDBYTES_MASK GENMASK(24, 16)
+#define FRAMES_CMDBYTES_SHIFT 16
+#define FRAMES_SHIFT 25
+#define FRAMES_IDLE_MASK GENMASK(29, 26)
+#define FRAMES_IDLE_SHIFT 26
+#define FRAMES_FLAGBYTE BIT(30)
+#define FRAMES_FLAGWORD BIT(31)
+
+/*
+ * QSPI Interrupt Enable register mask defines
+ */
+#define IEN_TXDONE BIT(0)
+#define IEN_RXDONE BIT(1)
+#define IEN_RXAVAILABLE BIT(2)
+#define IEN_TXAVAILABLE BIT(3)
+#define IEN_RXFIFOEMPTY BIT(4)
+#define IEN_TXFIFOFULL BIT(5)
+
+/*
+ * QSPI Status register mask defines
+ */
+#define STATUS_TXDONE BIT(0)
+#define STATUS_RXDONE BIT(1)
+#define STATUS_RXAVAILABLE BIT(2)
+#define STATUS_TXAVAILABLE BIT(3)
+#define STATUS_RXFIFOEMPTY BIT(4)
+#define STATUS_TXFIFOFULL BIT(5)
+#define STATUS_READY BIT(7)
+#define STATUS_FLAGSX4 BIT(8)
+#define STATUS_MASK GENMASK(8, 0)
+
+#define BYTESUPPER_MASK GENMASK(31, 16)
+#define BYTESLOWER_MASK GENMASK(15, 0)
+
+#define MAX_DIVIDER 16
+#define MIN_DIVIDER 0
+#define MAX_DATA_CMD_LEN 256
+
+/* QSPI ready time out value */
+#define TIMEOUT_MS 500
+
+/*
+ * QSPI Register offsets.
+ */
+#define REG_CONTROL (0x00)
+#define REG_FRAMES (0x04)
+#define REG_IEN (0x0c)
+#define REG_STATUS (0x10)
+#define REG_DIRECT_ACCESS (0x14)
+#define REG_UPPER_ACCESS (0x18)
+#define REG_RX_DATA (0x40)
+#define REG_TX_DATA (0x44)
+#define REG_X4_RX_DATA (0x48)
+#define REG_X4_TX_DATA (0x4c)
+#define REG_FRAMESUP (0x50)
+
+/**
+ * struct mchp_coreqspi - Defines qspi driver instance
+ * @regs: Virtual address of the QSPI controller registers
+ * @clk: QSPI Operating clock
+ * @data_completion: completion structure
+ * @op_lock: lock access to the device
+ * @txbuf: TX buffer
+ * @rxbuf: RX buffer
+ * @irq: IRQ number
+ * @tx_len: Number of bytes left to transfer
+ * @rx_len: Number of bytes left to receive
+ */
+struct mchp_coreqspi {
+ void __iomem *regs;
+ struct clk *clk;
+ struct completion data_completion;
+ struct mutex op_lock; /* lock access to the device */
+ u8 *txbuf;
+ u8 *rxbuf;
+ int irq;
+ int tx_len;
+ int rx_len;
+};
+
+static int mchp_coreqspi_set_mode(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * The operating mode can be configured based on the command that needs to be send.
+ * bits[15:14]: Sets whether multiple bit SPI operates in normal, extended or full modes.
+ * 00: Normal (single DQ0 TX and single DQ1 RX lines)
+ * 01: Extended RO (command and address bytes on DQ0 only)
+ * 10: Extended RW (command byte on DQ0 only)
+ * 11: Full. (command and address are on all DQ lines)
+ * bit[13]: Sets whether multiple bit SPI uses 2 or 4 bits of data
+ * 0: 2-bits (BSPI)
+ * 1: 4-bits (QSPI)
+ */
+ if (op->data.buswidth == 4 || op->data.buswidth == 2) {
+ control &= ~CONTROL_MODE12_MASK;
+ if (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))
+ control |= CONTROL_MODE12_EX_RO;
+ else if (op->cmd.buswidth == 1)
+ control |= CONTROL_MODE12_EX_RW;
+ else
+ control |= CONTROL_MODE12_FULL;
+
+ control |= CONTROL_MODE0;
+ } else {
+ control &= ~(CONTROL_MODE12_MASK |
+ CONTROL_MODE0);
+ }
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)
+{
+ u32 control, data;
+
+ if (!qspi->rx_len)
+ return;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ /*
+ * Read 4-bytes from the SPI FIFO in single transaction and then read
+ * the reamaining data byte wise.
+ */
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_X4_RX_DATA);
+ *(u32 *)qspi->rxbuf = data;
+ qspi->rxbuf += 4;
+ qspi->rx_len -= 4;
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->rx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY)
+ ;
+ data = readl_relaxed(qspi->regs + REG_RX_DATA);
+ *qspi->rxbuf++ = (data & 0xFF);
+ }
+}
+
+static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)
+{
+ u32 control, data;
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len >= 4) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *(u32 *)qspi->txbuf;
+ qspi->txbuf += 4;
+ qspi->tx_len -= 4;
+ writel_relaxed(data, qspi->regs + REG_X4_TX_DATA);
+ }
+
+ control &= ~CONTROL_FLAGSX4;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ while (qspi->tx_len--) {
+ while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL)
+ ;
+ data = *qspi->txbuf++;
+ writel_relaxed(data, qspi->regs + REG_TX_DATA);
+ }
+}
+
+static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)
+{
+ u32 mask = IEN_TXDONE |
+ IEN_RXDONE |
+ IEN_RXAVAILABLE;
+
+ writel_relaxed(mask, qspi->regs + REG_IEN);
+}
+
+static void mchp_coreqspi_disable_ints(struct mchp_coreqspi *qspi)
+{
+ writel_relaxed(0, qspi->regs + REG_IEN);
+}
+
+static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)
+{
+ struct mchp_coreqspi *qspi = (struct mchp_coreqspi *)dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ int intfield = readl_relaxed(qspi->regs + REG_STATUS) & STATUS_MASK;
+
+ if (intfield == 0)
+ return ret;
+
+ if (intfield & IEN_TXDONE) {
+ writel_relaxed(IEN_TXDONE, qspi->regs + REG_STATUS);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXAVAILABLE) {
+ writel_relaxed(IEN_RXAVAILABLE, qspi->regs + REG_STATUS);
+ mchp_coreqspi_read_op(qspi);
+ ret = IRQ_HANDLED;
+ }
+
+ if (intfield & IEN_RXDONE) {
+ writel_relaxed(IEN_RXDONE, qspi->regs + REG_STATUS);
+ complete(&qspi->data_completion);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi)
+{
+ unsigned long clk_hz;
+ u32 control, baud_rate_val = 0;
+
+ clk_hz = clk_get_rate(qspi->clk);
+ if (!clk_hz)
+ return -EINVAL;
+
+ baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz);
+ if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {
+ dev_err(&spi->dev,
+ "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n",
+ spi->max_speed_hz, clk_hz);
+ return -EINVAL;
+ }
+
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+ control |= baud_rate_val << CONTROL_CLKRATE_SHIFT;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if ((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA))
+ control |= CONTROL_CLKIDLE;
+ else
+ control &= ~CONTROL_CLKIDLE;
+
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static int mchp_coreqspi_setup_op(struct spi_device *spi_dev)
+{
+ struct spi_controller *ctlr = spi_dev->master;
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ control |= (CONTROL_MASTER | CONTROL_ENABLE);
+ control &= ~CONTROL_CLKIDLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+
+ return 0;
+}
+
+static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const struct spi_mem_op *op)
+{
+ u32 idle_cycles = 0;
+ int total_bytes, cmd_bytes, frames, ctrl;
+
+ cmd_bytes = op->cmd.nbytes + op->addr.nbytes;
+ total_bytes = cmd_bytes + op->data.nbytes;
+
+ /*
+ * As per the coreQSPI IP spec,the number of command and data bytes are
+ * controlled by the frames register for each SPI sequence. This supports
+ * the SPI flash memory read and writes sequences as below. so configure
+ * the cmd and total bytes accordingly.
+ * ---------------------------------------------------------------------
+ * TOTAL BYTES | CMD BYTES | What happens |
+ * ______________________________________________________________________
+ * | | |
+ * 1 | 1 | The SPI core will transmit a single byte |
+ * | | and receive data is discarded |
+ * | | |
+ * 1 | 0 | The SPI core will transmit a single byte |
+ * | | and return a single byte |
+ * | | |
+ * 10 | 4 | The SPI core will transmit 4 command |
+ * | | bytes discarding the receive data and |
+ * | | transmits 6 dummy bytes returning the 6 |
+ * | | received bytes and return a single byte |
+ * | | |
+ * 10 | 10 | The SPI core will transmit 10 command |
+ * | | |
+ * 10 | 0 | The SPI core will transmit 10 command |
+ * | | bytes and returning 10 received bytes |
+ * ______________________________________________________________________
+ */
+ if (!(op->data.dir == SPI_MEM_DATA_IN))
+ cmd_bytes = total_bytes;
+
+ frames = total_bytes & BYTESUPPER_MASK;
+ writel_relaxed(frames, qspi->regs + REG_FRAMESUP);
+ frames = total_bytes & BYTESLOWER_MASK;
+ frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT;
+
+ if (op->dummy.buswidth)
+ idle_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ frames |= idle_cycles << FRAMES_IDLE_SHIFT;
+ ctrl = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ if (ctrl & CONTROL_MODE12_MASK)
+ frames |= (1 << FRAMES_SHIFT);
+
+ frames |= FRAMES_FLAGWORD;
+ writel_relaxed(frames, qspi->regs + REG_FRAMES);
+}
+
+static int mchp_qspi_wait_for_ready(struct spi_mem *mem)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 status;
+ int ret;
+
+ ret = readl_poll_timeout(qspi->regs + REG_STATUS, status,
+ (status & STATUS_READY), 0,
+ TIMEOUT_MS);
+ if (ret) {
+ dev_err(&mem->spi->dev,
+ "Timeout waiting on QSPI ready.\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mchp_coreqspi *qspi = spi_controller_get_devdata
+ (mem->spi->master);
+ u32 address = op->addr.val;
+ u8 opcode = op->cmd.opcode;
+ u8 opaddr[5];
+ int err, i;
+
+ mutex_lock(&qspi->op_lock);
+ err = mchp_qspi_wait_for_ready(mem);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_setup_clock(qspi, mem->spi);
+ if (err)
+ goto error;
+
+ err = mchp_coreqspi_set_mode(qspi, op);
+ if (err)
+ goto error;
+
+ reinit_completion(&qspi->data_completion);
+ mchp_coreqspi_config_op(qspi, op);
+ if (op->cmd.opcode) {
+ qspi->txbuf = &opcode;
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->cmd.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ qspi->txbuf = &opaddr[0];
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ qspi->txbuf[i] = address >> (8 * (op->addr.nbytes - i - 1));
+
+ qspi->rxbuf = NULL;
+ qspi->tx_len = op->addr.nbytes;
+ qspi->rx_len = 0;
+ mchp_coreqspi_write_op(qspi, false);
+ }
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ qspi->txbuf = (u8 *)op->data.buf.out;
+ qspi->rxbuf = NULL;
+ qspi->rx_len = 0;
+ qspi->tx_len = op->data.nbytes;
+ mchp_coreqspi_write_op(qspi, true);
+ } else {
+ qspi->txbuf = NULL;
+ qspi->rxbuf = (u8 *)op->data.buf.in;
+ qspi->rx_len = op->data.nbytes;
+ qspi->tx_len = 0;
+ }
+ }
+
+ mchp_coreqspi_enable_ints(qspi);
+
+ if (!wait_for_completion_timeout(&qspi->data_completion, msecs_to_jiffies(1000)))
+ err = -ETIMEDOUT;
+
+error:
+ mutex_unlock(&qspi->op_lock);
+ mchp_coreqspi_disable_ints(qspi);
+
+ return err;
+}
+
+static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if ((op->data.buswidth == 4 || op->data.buswidth == 2) &&
+ (op->cmd.buswidth == 1 && (op->addr.buswidth == 1 || op->addr.buswidth == 0))) {
+ /*
+ * If the command and address are on DQ0 only, then this
+ * controller doesn't support sending data on dual and
+ * quad lines. but it supports reading data on dual and
+ * quad lines with same configuration as command and
+ * address on DQ0.
+ * i.e. The control register[15:13] :EX_RO(read only) is
+ * meant only for the command and address are on DQ0 but
+ * not to write data, it is just to read.
+ * Ex: 0x34h is Quad Load Program Data which is not
+ * supported. Then the spi-mem layer will iterate over
+ * each command and it will chose the supported one.
+ */
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return false;
+ }
+
+ return true;
+}
+
+static int mchp_coreqspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_OUT || op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->data.nbytes > MAX_DATA_CMD_LEN)
+ op->data.nbytes = MAX_DATA_CMD_LEN;
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = {
+ .adjust_op_size = mchp_coreqspi_adjust_op_size,
+ .supports_op = mchp_coreqspi_supports_op,
+ .exec_op = mchp_coreqspi_exec_op,
+};
+
+static int mchp_coreqspi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mchp_coreqspi *qspi;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!ctlr)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "unable to allocate master for QSPI controller\n");
+
+ qspi = spi_controller_get_devdata(ctlr);
+ platform_set_drvdata(pdev, qspi);
+
+ qspi->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qspi->regs))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->regs),
+ "failed to map registers\n");
+
+ qspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(qspi->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(qspi->clk),
+ "could not get clock\n");
+
+ ret = clk_prepare_enable(qspi->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to enable clock\n");
+
+ init_completion(&qspi->data_completion);
+ mutex_init(&qspi->op_lock);
+
+ qspi->irq = platform_get_irq(pdev, 0);
+ if (qspi->irq < 0) {
+ ret = qspi->irq;
+ goto out;
+ }
+
+ ret = devm_request_irq(&pdev->dev, qspi->irq, mchp_coreqspi_isr,
+ IRQF_SHARED, pdev->name, qspi);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed %d\n", ret);
+ goto out;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->mem_ops = &mchp_coreqspi_mem_ops;
+ ctlr->setup = mchp_coreqspi_setup_op;
+ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->dev.of_node = np;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "spi_register_controller failed\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ clk_disable_unprepare(qspi->clk);
+
+ return ret;
+}
+
+static int mchp_coreqspi_remove(struct platform_device *pdev)
+{
+ struct mchp_coreqspi *qspi = platform_get_drvdata(pdev);
+ u32 control = readl_relaxed(qspi->regs + REG_CONTROL);
+
+ mchp_coreqspi_disable_ints(qspi);
+ control &= ~CONTROL_ENABLE;
+ writel_relaxed(control, qspi->regs + REG_CONTROL);
+ clk_disable_unprepare(qspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mchp_coreqspi_of_match[] = {
+ { .compatible = "microchip,coreqspi-rtl-v2" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mchp_coreqspi_of_match);
+
+static struct platform_driver mchp_coreqspi_driver = {
+ .probe = mchp_coreqspi_probe,
+ .driver = {
+ .name = "microchip,coreqspi",
+ .of_match_table = mchp_coreqspi_of_match,
+ },
+ .remove = mchp_coreqspi_remove,
+};
+module_platform_driver(mchp_coreqspi_driver);
+
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasuresh.relli@microchip.com");
+MODULE_DESCRIPTION("Microchip coreQSPI QSPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index ce4385330b19..d352844c798c 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -548,12 +548,12 @@ static int mchp_corespi_probe(struct platform_device *pdev)
IRQF_SHARED, dev_name(&pdev->dev), master);
if (ret)
return dev_err_probe(&pdev->dev, ret,
- "could not request irq: %d\n", ret);
+ "could not request irq\n");
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(spi->clk),
- "could not get clk: %d\n", ret);
+ "could not get clk\n");
ret = clk_prepare_enable(spi->clk);
if (ret)
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index bc5e36fd4288..cb075c1acbee 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -11,13 +11,14 @@
*/
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -89,7 +90,7 @@ struct mpc52xx_spi {
const u8 *tx_buf;
int cs_change;
int gpio_cs_count;
- unsigned int *gpio_cs;
+ struct gpio_desc **gpio_cs;
};
/*
@@ -101,9 +102,10 @@ static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
if (ms->gpio_cs_count > 0) {
cs = ms->message->spi->chip_select;
- gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
- } else
+ gpiod_set_value(ms->gpio_cs[cs], value);
+ } else {
out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+ }
}
/*
@@ -385,10 +387,10 @@ static int mpc52xx_spi_probe(struct platform_device *op)
{
struct spi_master *master;
struct mpc52xx_spi *ms;
+ struct gpio_desc *gpio_cs;
void __iomem *regs;
u8 ctrl1;
int rc, i = 0;
- int gpio_cs;
/* MMIO registers */
dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
@@ -438,7 +440,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
ms->state = mpc52xx_spi_fsmstate_idle;
ms->ipb_freq = mpc5xxx_get_bus_frequency(&op->dev);
- ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
+ ms->gpio_cs_count = gpiod_count(&op->dev, NULL);
if (ms->gpio_cs_count > 0) {
master->num_chipselect = ms->gpio_cs_count;
ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
@@ -450,23 +452,16 @@ static int mpc52xx_spi_probe(struct platform_device *op)
}
for (i = 0; i < ms->gpio_cs_count; i++) {
- gpio_cs = of_get_gpio(op->dev.of_node, i);
- if (!gpio_is_valid(gpio_cs)) {
- dev_err(&op->dev,
- "could not parse the gpio field in oftree\n");
- rc = -ENODEV;
- goto err_gpio;
- }
-
- rc = gpio_request(gpio_cs, dev_name(&op->dev));
+ gpio_cs = gpiod_get_index(&op->dev,
+ NULL, i, GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(gpio_cs);
if (rc) {
dev_err(&op->dev,
- "can't request spi cs gpio #%d on gpio line %d\n",
- i, gpio_cs);
+ "failed to get spi cs gpio #%d: %d\n",
+ i, rc);
goto err_gpio;
}
- gpio_direction_output(gpio_cs, 1);
ms->gpio_cs[i] = gpio_cs;
}
}
@@ -507,7 +502,7 @@ static int mpc52xx_spi_probe(struct platform_device *op)
dev_err(&ms->master->dev, "initialization failed\n");
err_gpio:
while (i-- > 0)
- gpio_free(ms->gpio_cs[i]);
+ gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
err_alloc_gpio:
@@ -528,7 +523,7 @@ static int mpc52xx_spi_remove(struct platform_device *op)
free_irq(ms->irq1, ms);
for (i = 0; i < ms->gpio_cs_count; i++)
- gpio_free(ms->gpio_cs[i]);
+ gpiod_put(ms->gpio_cs[i]);
kfree(ms->gpio_cs);
spi_unregister_master(master);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0a3b9f7eed30..11aeae7fe7fc 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1184,6 +1184,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+ if (mdata->dev_comp->ipm_design)
+ dma_set_max_seg_size(dev, SZ_16M);
+ else
+ dma_set_max_seg_size(dev, SZ_256K);
+
ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
IRQF_TRIGGER_NONE, dev_name(dev), master);
if (ret)
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index b4b9b7309b5e..c4cc8e2f85e2 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -55,7 +55,6 @@ struct mt7621_spi {
void __iomem *base;
unsigned int sys_freq;
unsigned int speed;
- struct clk *clk;
int pending_write;
};
@@ -327,7 +326,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
struct spi_controller *master;
struct mt7621_spi *rs;
void __iomem *base;
- int status = 0;
struct clk *clk;
int ret;
@@ -339,21 +337,14 @@ static int mt7621_spi_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n",
- status);
- return PTR_ERR(clk);
- }
-
- status = clk_prepare_enable(clk);
- if (status)
- return status;
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "unable to get SYS clock\n");
master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
if (!master) {
dev_info(&pdev->dev, "master allocation failed\n");
- clk_disable_unprepare(clk);
return -ENOMEM;
}
@@ -369,38 +360,18 @@ static int mt7621_spi_probe(struct platform_device *pdev)
rs = spi_controller_get_devdata(master);
rs->base = base;
- rs->clk = clk;
rs->master = master;
- rs->sys_freq = clk_get_rate(rs->clk);
+ rs->sys_freq = clk_get_rate(clk);
rs->pending_write = 0;
dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
ret = device_reset(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "SPI reset failed!\n");
- clk_disable_unprepare(clk);
return ret;
}
- ret = spi_register_controller(master);
- if (ret)
- clk_disable_unprepare(clk);
-
- return ret;
-}
-
-static int mt7621_spi_remove(struct platform_device *pdev)
-{
- struct spi_controller *master;
- struct mt7621_spi *rs;
-
- master = dev_get_drvdata(&pdev->dev);
- rs = spi_controller_get_devdata(master);
-
- spi_unregister_controller(master);
- clk_disable_unprepare(rs->clk);
-
- return 0;
+ return devm_spi_register_controller(&pdev->dev, master);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -411,7 +382,6 @@ static struct platform_driver mt7621_spi_driver = {
.of_match_table = mt7621_spi_match,
},
.probe = mt7621_spi_probe,
- .remove = mt7621_spi_remove,
};
module_platform_driver(mt7621_spi_driver);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index f5d32ec4634e..0709e987bd5a 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
+ ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
index 1668a347e003..7f2e4d1b0d43 100644
--- a/drivers/spi/spi-npcm-pspi.c
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -443,6 +443,7 @@ static int npcm_pspi_remove(struct platform_device *pdev)
static const struct of_device_id npcm_pspi_match[] = {
{ .compatible = "nuvoton,npcm750-pspi", .data = NULL },
+ { .compatible = "nuvoton,npcm845-pspi", .data = NULL },
{}
};
MODULE_DEVICE_TABLE(of, npcm_pspi_match);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 2b0301fc971c..d6a65a989ef8 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -588,7 +588,7 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
{
int ret;
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
ret = clk_prepare_enable(f->clk_en);
@@ -606,7 +606,7 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
{
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
return 0;
clk_disable_unprepare(f->clk);
@@ -1100,7 +1100,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, f);
/* find the resources - configuration register address space */
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
else
res = platform_get_resource_byname(pdev,
@@ -1113,7 +1113,7 @@ static int nxp_fspi_probe(struct platform_device *pdev)
}
/* find the resources - controller memory mapped space */
- if (is_acpi_node(f->dev->fwnode))
+ if (is_acpi_node(dev_fwnode(f->dev)))
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
else
res = platform_get_resource_byname(pdev,
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 20b047172965..061f7394e5b9 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -412,6 +412,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
return status;
err_fck:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(spi100k->fck);
err_ick:
clk_disable_unprepare(spi100k->ick);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index c42e59df38fe..6ba9b0d7710b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1509,10 +1509,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
status = platform_get_irq(pdev, 0);
- if (status == -EPROBE_DEFER)
- goto free_master;
if (status < 0) {
- dev_err(&pdev->dev, "no irq resource found\n");
+ dev_err_probe(&pdev->dev, status, "no irq resource found\n");
goto free_master;
}
init_completion(&mcspi->txdone);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 838d12e65144..2bf21c2e7a52 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1441,31 +1441,6 @@ static const struct of_device_id pxa2xx_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
-#ifdef CONFIG_ACPI
-
-static int pxa2xx_spi_get_port_id(struct device *dev)
-{
- struct acpi_device *adev;
- unsigned int devid;
- int port_id = -1;
-
- adev = ACPI_COMPANION(dev);
- if (adev && adev->pnp.unique_id &&
- !kstrtouint(adev->pnp.unique_id, 0, &devid))
- port_id = devid;
- return port_id;
-}
-
-#else /* !CONFIG_ACPI */
-
-static int pxa2xx_spi_get_port_id(struct device *dev)
-{
- return -1;
-}
-
-#endif /* CONFIG_ACPI */
-
-
#ifdef CONFIG_PCI
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
@@ -1479,13 +1454,16 @@ static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_controller *pdata;
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
struct ssp_device *ssp;
struct resource *res;
- struct device *parent = pdev->dev.parent;
struct pci_dev *pcidev = dev_is_pci(parent) ? to_pci_dev(parent) : NULL;
const struct pci_device_id *pcidev_id = NULL;
enum pxa_ssp_type type;
const void *match;
+ int status;
+ u64 uid;
if (pcidev)
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match, pcidev);
@@ -1529,7 +1507,12 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
ssp->type = type;
ssp->dev = &pdev->dev;
- ssp->port_id = pxa2xx_spi_get_port_id(&pdev->dev);
+
+ status = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (status)
+ ssp->port_id = -1;
+ else
+ ssp->port_id = uid;
pdata->is_slave = device_property_read_bool(&pdev->dev, "spi-slave");
pdata->num_chipselect = 1;
@@ -1873,10 +1856,8 @@ static int pxa2xx_spi_runtime_suspend(struct device *dev)
static int pxa2xx_spi_runtime_resume(struct device *dev)
{
struct driver_data *drv_data = dev_get_drvdata(dev);
- int status;
- status = clk_prepare_enable(drv_data->ssp->clk);
- return status;
+ return clk_prepare_enable(drv_data->ssp->clk);
}
#endif
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 00d6084306b4..7d89510dc3f0 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1198,8 +1198,10 @@ static int spi_qup_pm_resume_runtime(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1245,14 +1247,25 @@ static int spi_qup_resume(struct device *device)
return ret;
ret = clk_prepare_enable(controller->cclk);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(controller->iclk);
return ret;
+ }
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret)
- return ret;
+ goto disable_clk;
- return spi_master_resume(master);
+ ret = spi_master_resume(master);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 660aa866af06..ef25b5e93900 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -449,7 +449,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
struct spi_master *master;
int err = 0;
- master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
dev_err(&pdev->dev, "No memory for spi_master\n");
return -ENOMEM;
@@ -463,8 +463,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
if (pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
- err = -ENOENT;
- goto err_no_pdata;
+ return -ENOENT;
}
platform_set_drvdata(pdev, hw);
@@ -499,29 +498,24 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
/* find and map our resources */
hw->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hw->regs)) {
- err = PTR_ERR(hw->regs);
- goto err_no_pdata;
- }
+ if (IS_ERR(hw->regs))
+ return PTR_ERR(hw->regs);
hw->irq = platform_get_irq(pdev, 0);
- if (hw->irq < 0) {
- err = -ENOENT;
- goto err_no_pdata;
- }
+ if (hw->irq < 0)
+ return -ENOENT;
err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
pdev->name, hw);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- goto err_no_pdata;
+ return err;
}
hw->clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(hw->clk)) {
dev_err(&pdev->dev, "No clock for device\n");
- err = PTR_ERR(hw->clk);
- goto err_no_pdata;
+ return PTR_ERR(hw->clk);
}
s3c24xx_spi_initialsetup(hw);
@@ -539,8 +533,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
err_register:
clk_disable(hw->clk);
- err_no_pdata:
- spi_master_put(hw->master);
return err;
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7f346866614a..71d324ec9a70 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -84,6 +84,7 @@
#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+#define S3C64XX_SPI_PACKET_CNT_MASK GENMASK(15, 0)
#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
@@ -389,8 +390,8 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
- sdd->rx_dma.ch = 0;
- sdd->tx_dma.ch = 0;
+ sdd->rx_dma.ch = NULL;
+ sdd->tx_dma.ch = NULL;
}
return 0;
@@ -711,6 +712,13 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
return 0;
}
+static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
+{
+ struct spi_controller *ctlr = spi->controller;
+
+ return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
+}
+
static int s3c64xx_spi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
@@ -1152,6 +1160,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
+ master->max_transfer_size = s3c64xx_spi_max_transfer_size;
master->num_chipselect = sci->num_cs;
master->use_gpio_descriptors = true;
master->dma_alignment = 8;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d0012b30410c..9bca3d076f05 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1085,6 +1085,7 @@ static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data },
{ .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data },
+ { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data },
{ .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f3fe92300639..9131660c1afb 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
@@ -355,10 +356,10 @@ static int stm32_qspi_get_mode(u8 buswidth)
return buswidth;
}
-static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
+static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
{
- struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
- struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
+ struct stm32_qspi *qspi = spi_controller_get_devdata(spi->master);
+ struct stm32_qspi_flash *flash = &qspi->flash[spi->chip_select];
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
@@ -465,7 +466,7 @@ static int stm32_qspi_poll_status(struct spi_mem *mem, const struct spi_mem_op *
qspi->fmode = CCR_FMODE_APM;
qspi->status_timeout = timeout_ms;
- ret = stm32_qspi_send(mem, op);
+ ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -489,7 +490,7 @@ static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
else
qspi->fmode = CCR_FMODE_INDW;
- ret = stm32_qspi_send(mem, op);
+ ret = stm32_qspi_send(mem->spi, op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -545,7 +546,7 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
else
qspi->fmode = CCR_FMODE_INDR;
- ret = stm32_qspi_send(desc->mem, &op);
+ ret = stm32_qspi_send(desc->mem->spi, &op);
mutex_unlock(&qspi->lock);
pm_runtime_mark_last_busy(qspi->dev);
@@ -554,12 +555,96 @@ static ssize_t stm32_qspi_dirmap_read(struct spi_mem_dirmap_desc *desc,
return ret ?: len;
}
+static int stm32_qspi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ int ret = 0;
+
+ if (!spi->cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(qspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&qspi->lock);
+
+ gpiod_set_value_cansleep(spi->cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(qspi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * QSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account QSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* if happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ qspi->fmode = CCR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ qspi->fmode = CCR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_qspi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(spi->cs_gpiod, false);
+
+ mutex_unlock(&qspi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ pm_runtime_put_autosuspend(qspi->dev);
+
+ return ret;
+}
+
static int stm32_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
- u32 presc;
+ u32 presc, mode;
int ret;
if (ctrl->busy)
@@ -568,6 +653,16 @@ static int stm32_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
+ mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
+ if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
+ ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
+ gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
+ dev_err(qspi->dev, "configuration not supported\n");
+
+ return -EINVAL;
+ }
+
ret = pm_runtime_resume_and_get(qspi->dev);
if (ret < 0)
return ret;
@@ -580,6 +675,16 @@ static int stm32_qspi_setup(struct spi_device *spi)
mutex_lock(&qspi->lock);
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
+
+ /*
+ * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
+ * are both set in spi->mode and "cs-gpios" properties is found in DT
+ */
+ if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ qspi->cr_reg |= CR_DFM;
+ dev_dbg(qspi->dev, "Dual flash mode enable");
+ }
+
writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
@@ -741,11 +846,13 @@ static int stm32_qspi_probe(struct platform_device *pdev)
mutex_init(&qspi->lock);
- ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
- | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
+ | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_OCTAL;
ctrl->setup = stm32_qspi_setup;
ctrl->bus_num = -1;
ctrl->mem_ops = &stm32_qspi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_qspi_transfer_one_message;
ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
ctrl->dev.of_node = dev->of_node;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 523edfdf5dcd..7377d3b81302 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -421,7 +421,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
return -ENODEV;
@@ -439,10 +439,8 @@ static int xilinx_spi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(xspi->regs)) {
- ret = PTR_ERR(xspi->regs);
- goto put_master;
- }
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
@@ -472,14 +470,13 @@ static int xilinx_spi_probe(struct platform_device *pdev)
xspi->irq = platform_get_irq(pdev, 0);
if (xspi->irq < 0 && xspi->irq != -ENXIO) {
- ret = xspi->irq;
- goto put_master;
+ return xspi->irq;
} else if (xspi->irq >= 0) {
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
dev_name(&pdev->dev), xspi);
if (ret)
- goto put_master;
+ return ret;
}
/* SPI controller initializations */
@@ -488,7 +485,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
- goto put_master;
+ return ret;
}
dev_info(&pdev->dev, "at %pR, irq=%d\n", res, xspi->irq);
@@ -500,11 +497,6 @@ static int xilinx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
return 0;
-
-put_master:
- spi_master_put(master);
-
- return ret;
}
static int xilinx_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index fc2b5eb7d614..2fa7608f94cd 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -83,7 +83,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
- master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
if (!master)
return -ENOMEM;
@@ -97,30 +97,24 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
xspi->bitbang.chipselect = xtfpga_spi_chipselect;
xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(xspi->regs)) {
- ret = PTR_ERR(xspi->regs);
- goto err;
- }
+ if (IS_ERR(xspi->regs))
+ return PTR_ERR(xspi->regs);
xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
usleep_range(1000, 2000);
if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
dev_err(&pdev->dev, "Device stuck in busy state\n");
- ret = -EBUSY;
- goto err;
+ return -EBUSY;
}
ret = spi_bitbang_start(&xspi->bitbang);
if (ret < 0) {
dev_err(&pdev->dev, "spi_bitbang_start failed\n");
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, master);
return 0;
-err:
- spi_master_put(master);
- return ret;
}
static int xtfpga_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 83da8862b8f2..5f9aedd1f0b6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -753,7 +753,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
- strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
+ strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
proxy->dev.platform_data = (void *) chip->platform_data;
proxy->controller_data = chip->controller_data;
proxy->controller_state = NULL;
@@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
}
#ifdef CONFIG_HAS_DMA
-int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, void *buf, size_t len,
- enum dma_data_direction dir)
+static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@@ -1078,28 +1078,41 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}
- ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
- if (!ret)
- ret = -ENOMEM;
+ ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
- sgt->nents = ret;
-
return 0;
}
-void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
- struct sg_table *sgt, enum dma_data_direction dir)
+int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
+}
+
+static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
+ struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
if (sgt->orig_nents) {
- dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
+ sgt->orig_nents = 0;
+ sgt->nents = 0;
}
}
+void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
+}
+
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@@ -1124,29 +1137,37 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync is done before each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
- ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf,
+ xfer->len, DMA_TO_DEVICE,
+ attrs);
if (ret != 0)
return ret;
}
if (xfer->rx_buf != NULL) {
- ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
- xfer->rx_buf, xfer->len,
- DMA_FROM_DEVICE);
+ ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE, attrs);
if (ret != 0) {
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
- DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, tx_dev,
+ &xfer->tx_sg, DMA_TO_DEVICE,
+ attrs);
+
return ret;
}
}
}
+ ctlr->cur_rx_dma_dev = rx_dev;
+ ctlr->cur_tx_dma_dev = tx_dev;
ctlr->cur_msg_mapped = true;
return 0;
@@ -1154,38 +1175,60 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
- struct device *tx_dev, *rx_dev;
if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
return 0;
- if (ctlr->dma_tx)
- tx_dev = ctlr->dma_tx->device->dev;
- else if (ctlr->dma_map_dev)
- tx_dev = ctlr->dma_map_dev;
- else
- tx_dev = ctlr->dev.parent;
-
- if (ctlr->dma_rx)
- rx_dev = ctlr->dma_rx->device->dev;
- else if (ctlr->dma_map_dev)
- rx_dev = ctlr->dma_map_dev;
- else
- rx_dev = ctlr->dev.parent;
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* The sync has already been done after each transfer. */
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
- spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
- spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
+ DMA_FROM_DEVICE, attrs);
+ spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE, attrs);
}
ctlr->cur_msg_mapped = false;
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ struct device *rx_dev = ctlr->cur_rx_dma_dev;
+ struct device *tx_dev = ctlr->cur_tx_dma_dev;
+
+ if (!ctlr->cur_msg_mapped)
+ return;
+
+ if (xfer->rx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ if (xfer->tx_sg.orig_nents)
+ dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
@@ -1198,6 +1241,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}
+
+static void spi_dma_sync_for_device(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
+
+static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
+ struct spi_transfer *xfer)
+{
+}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@@ -1435,7 +1488,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
- spi_set_cs(msg->spi, true, false);
+ xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
+ spi_set_cs(msg->spi, !xfer->cs_off, false);
SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
@@ -1455,8 +1509,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
+ spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
+ spi_dma_sync_for_cpu(ctlr, xfer);
+
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
@@ -1479,6 +1536,8 @@ fallback_pio:
if (ret < 0)
msg->status = ret;
}
+
+ spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,
@@ -1503,10 +1562,15 @@ fallback_pio:
&msg->transfers)) {
keep_cs = true;
} else {
- spi_set_cs(msg->spi, false, false);
+ if (!xfer->cs_off)
+ spi_set_cs(msg->spi, false, false);
_spi_transfer_cs_change_delay(msg, xfer);
- spi_set_cs(msg->spi, true, false);
+ if (!list_next_entry(xfer, transfer_list)->cs_off)
+ spi_set_cs(msg->spi, true, false);
}
+ } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
+ xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
+ spi_set_cs(msg->spi, xfer->cs_off, false);
}
msg->actual_length += xfer->len;
@@ -1587,6 +1651,15 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
trace_spi_message_start(msg);
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi),
+ GFP_KERNEL | GFP_DMA);
+ if (ret) {
+ msg->status = ret;
+ spi_finalize_current_message(ctlr);
+ return ret;
+ }
+
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
@@ -1727,8 +1800,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
- if (!ret)
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL;
ctlr->fallback = false;
@@ -2330,7 +2402,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
goto err_out;
}
- strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
+ strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
ancillary->chip_select = chip_select;
@@ -2726,7 +2798,7 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
if (!spi)
return -ENOMEM;
- strlcpy(spi->modalias, name, sizeof(spi->modalias));
+ strscpy(spi->modalias, name, sizeof(spi->modalias));
rc = spi_add_device(spi);
if (rc) {
@@ -4033,7 +4105,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* guard against reentrancy from a different context. The io_mutex
* will catch those cases.
*/
- if (READ_ONCE(ctlr->queue_empty)) {
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0;
message->status = -EINPROGRESS;
@@ -4375,7 +4447,7 @@ static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
switch (value) {
case ACPI_RECONFIG_DEVICE_ADD:
- ctlr = acpi_spi_find_controller_by_adev(adev->parent);
+ ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
if (!ctlr)
break;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2113be40b5a9..2cf3203b2397 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -91,7 +91,7 @@ enum pmic_arb_channel {
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 512
-#define PMIC_ARB_TIMEOUT_US 100
+#define PMIC_ARB_TIMEOUT_US 1000
#define PMIC_ARB_MAX_TRANS_BYTES (8)
#define PMIC_ARB_APID_MASK 0xFF
@@ -590,23 +590,16 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
u8 per = ppid & 0xFF;
u8 irq_mask = BIT(id);
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "%s apid=%d sid=0x%x per=0x%x irq=%d\n",
+ __func__, apid, sid, per, id);
writel_relaxed(irq_mask, pmic_arb->ver_ops->irq_clear(pmic_arb, apid));
-
- if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
-
- if (pmic_arb_write_cmd(pmic_arb->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pmic_arb->spmic->dev, "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
+static int periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
{
unsigned int irq;
u32 status, id;
+ int handled = 0;
u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
@@ -621,7 +614,10 @@ static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
continue;
}
generic_handle_irq(irq);
+ handled++;
}
+
+ return handled;
}
static void pmic_arb_chained_irq(struct irq_desc *desc)
@@ -629,28 +625,66 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
struct spmi_pmic_arb *pmic_arb = irq_desc_get_handler_data(desc);
const struct pmic_arb_ver_ops *ver_ops = pmic_arb->ver_ops;
struct irq_chip *chip = irq_desc_get_chip(desc);
- int first = pmic_arb->min_apid >> 5;
- int last = pmic_arb->max_apid >> 5;
+ int first = pmic_arb->min_apid;
+ int last = pmic_arb->max_apid;
u8 ee = pmic_arb->ee;
- u32 status, enable;
+ u32 status, enable, handled = 0;
int i, id, apid;
+ /* status based dispatch */
+ bool acc_valid = false;
+ u32 irq_status = 0;
chained_irq_enter(chip, desc);
- for (i = first; i <= last; ++i) {
+ for (i = first >> 5; i <= last >> 5; ++i) {
status = readl_relaxed(
ver_ops->owner_acc_status(pmic_arb, ee, i));
+ if (status)
+ acc_valid = true;
+
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
apid = id + i * 32;
+ if (apid < first || apid > last) {
+ WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+ apid);
+ continue;
+ }
enable = readl_relaxed(
ver_ops->acc_enable(pmic_arb, apid));
if (enable & SPMI_PIC_ACC_ENABLE_BIT)
- periph_interrupt(pmic_arb, apid);
+ if (periph_interrupt(pmic_arb, apid) != 0)
+ handled++;
+ }
+ }
+
+ /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */
+ if (!acc_valid) {
+ for (i = first; i <= last; i++) {
+ /* skip if APPS is not irq owner */
+ if (pmic_arb->apid_data[i].irq_ee != pmic_arb->ee)
+ continue;
+
+ irq_status = readl_relaxed(
+ ver_ops->irq_status(pmic_arb, i));
+ if (irq_status) {
+ enable = readl_relaxed(
+ ver_ops->acc_enable(pmic_arb, i));
+ if (enable & SPMI_PIC_ACC_ENABLE_BIT) {
+ dev_dbg(&pmic_arb->spmic->dev,
+ "Dispatching IRQ for apid=%d status=%x\n",
+ i, irq_status);
+ if (periph_interrupt(pmic_arb, i) != 0)
+ handled++;
+ }
+ }
}
}
+ if (handled == 0)
+ handle_bad_irq(desc);
+
chained_irq_exit(chip, desc);
}
@@ -770,6 +804,7 @@ static int qpnpint_irq_domain_activate(struct irq_domain *domain,
u16 apid = hwirq_to_apid(d->hwirq);
u16 sid = hwirq_to_sid(d->hwirq);
u16 irq = hwirq_to_irq(d->hwirq);
+ u8 buf;
if (pmic_arb->apid_data[apid].irq_ee != pmic_arb->ee) {
dev_err(&pmic_arb->spmic->dev, "failed to xlate sid = %#x, periph = %#x, irq = %u: ee=%u but owner=%u\n",
@@ -778,6 +813,10 @@ static int qpnpint_irq_domain_activate(struct irq_domain *domain,
return -ENODEV;
}
+ buf = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
+
return 0;
}
@@ -992,7 +1031,8 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
* version 5, there is more than one APID mapped to each PPID.
* The owner field for each of these mappings specifies the EE which is
* allowed to write to the APID. The owner of the last (highest) APID
- * for a given PPID will receive interrupts from the PPID.
+ * which has the IRQ owner bit set for a given PPID will receive
+ * interrupts from the PPID.
*/
for (i = 0; ; i++, apidd++) {
offset = pmic_arb->ver_ops->apid_map_offset(i);
@@ -1015,16 +1055,16 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pmic_arb)
apid = pmic_arb->ppid_to_apid[ppid] & ~PMIC_ARB_APID_VALID;
prev_apidd = &pmic_arb->apid_data[apid];
- if (valid && is_irq_ee &&
- prev_apidd->write_ee == pmic_arb->ee) {
+ if (!valid || apidd->write_ee == pmic_arb->ee) {
+ /* First PPID mapping or one for this EE */
+ pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
+ } else if (valid && is_irq_ee &&
+ prev_apidd->write_ee == pmic_arb->ee) {
/*
* Duplicate PPID mapping after the one for this EE;
* override the irq owner
*/
prev_apidd->irq_ee = apidd->irq_ee;
- } else if (!valid || is_irq_ee) {
- /* First PPID mapping or duplicate for another EE */
- pmic_arb->ppid_to_apid[ppid] = i | PMIC_ARB_APID_VALID;
}
apidd->ppid = ppid;
@@ -1093,6 +1133,11 @@ static int pmic_arb_offset_v5(struct spmi_pmic_arb *pmic_arb, u8 sid, u16 addr,
offset = 0x10000 * pmic_arb->ee + 0x80 * apid;
break;
case PMIC_ARB_CHANNEL_RW:
+ if (pmic_arb->apid_data[apid].write_ee != pmic_arb->ee) {
+ dev_err(&pmic_arb->spmic->dev, "disallowed SPMI write to sid=%u, addr=0x%04X\n",
+ sid, addr);
+ return -EPERM;
+ }
offset = 0x10000 * apid;
break;
}
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index a456ce5141e1..55381592bb5a 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -35,7 +35,7 @@ static void spmi_ctrl_release(struct device *dev)
{
struct spmi_controller *ctrl = to_spmi_controller(dev);
- ida_simple_remove(&ctrl_ida, ctrl->nr);
+ ida_free(&ctrl_ida, ctrl->nr);
kfree(ctrl);
}
@@ -457,7 +457,7 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent,
ctrl->dev.of_node = parent->of_node;
spmi_controller_set_drvdata(ctrl, &ctrl[1]);
- id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate SPMI controller identifier.\n");
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3bd80f9695ac..5cfabd5376cc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -60,10 +60,6 @@ source "drivers/staging/board/Kconfig"
source "drivers/staging/gdm724x/Kconfig"
-source "drivers/staging/fwserial/Kconfig"
-
-source "drivers/staging/clocking-wizard/Kconfig"
-
source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/most/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 1d9ae39fea14..f8c3aa9c2418 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -20,8 +20,6 @@ obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
-obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_KS7010) += ks7010/
diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
deleted file mode 100644
index 2324b5d73788..000000000000
--- a/drivers/staging/clocking-wizard/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Xilinx Clocking Wizard Driver
-#
-
-config COMMON_CLK_XLNX_CLKWZRD
- tristate "Xilinx Clocking Wizard"
- depends on COMMON_CLK && OF && HAS_IOMEM
- help
- Support for the Xilinx Clocking Wizard IP core clock generator.
diff --git a/drivers/staging/clocking-wizard/Makefile b/drivers/staging/clocking-wizard/Makefile
deleted file mode 100644
index b1f915224d96..000000000000
--- a/drivers/staging/clocking-wizard/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clk-xlnx-clock-wizard.o
diff --git a/drivers/staging/clocking-wizard/TODO b/drivers/staging/clocking-wizard/TODO
deleted file mode 100644
index c7e1dc58dfba..000000000000
--- a/drivers/staging/clocking-wizard/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO:
- - support for fractional multiplier
- - support for fractional divider (output 0 only)
- - support for set_rate() operations (may benefit from Stephen Boyd's
- refactoring of the clk primitives:
- https://lore.kernel.org/lkml/1409957256-23729-1-git-send-email-sboyd@codeaurora.org)
- - review arithmetic
- - overflow after multiplication?
- - maximize accuracy before divisions
-
-Patches to:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- Sören Brinkmann <soren.brinkmann@xilinx.com>
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
deleted file mode 100644
index efb67ff9f76c..000000000000
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Binding for Xilinx Clocking Wizard IP Core
-
-This binding uses the common clock binding[1]. Details about the devices can be
-found in the product guide[2].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Clocking Wizard Product Guide
-https://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-clk-wiz.pdf
-
-Required properties:
- - compatible: Must be 'xlnx,clocking-wizard'
- - reg: Base and size of the cores register space
- - clocks: Handle to input clock
- - clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
- - clock-output-names: Names for the output clocks
-
-Optional properties:
- - speed-grade: Speed grade of the device (valid values are 1..3)
-
-Example:
- clock-generator@40040000 {
- reg = <0x40040000 0x1000>;
- compatible = "xlnx,clocking-wizard";
- speed-grade = <1>;
- clock-names = "clk_in1", "s_axi_aclk";
- clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
- "clk_out3", "clk_out4", "clk_out5",
- "clk_out6", "clk_out7";
- };
diff --git a/drivers/staging/fwserial/Kconfig b/drivers/staging/fwserial/Kconfig
deleted file mode 100644
index 6964aac2a7ed..000000000000
--- a/drivers/staging/fwserial/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config FIREWIRE_SERIAL
- tristate "TTY over Firewire"
- depends on FIREWIRE && TTY
- help
- This enables TTY over IEEE 1394, providing high-speed serial
- connectivity to cabled peers. This driver implements a
- ad-hoc transport protocol and is currently limited to
- Linux-to-Linux communication.
-
- To compile this driver as a module, say M here: the module will
- be called firewire-serial.
-
-if FIREWIRE_SERIAL
-
-config FWTTY_MAX_TOTAL_PORTS
- int "Maximum number of serial ports supported"
- default "64"
- help
- Set this to the maximum number of serial ports you want the
- firewire-serial driver to support.
-
-config FWTTY_MAX_CARD_PORTS
- int "Maximum number of serial ports supported per adapter"
- range 0 FWTTY_MAX_TOTAL_PORTS
- default "32"
- help
- Set this to the maximum number of serial ports each firewire
- adapter supports. The actual number of serial ports registered
- is set with the module parameter "ttys".
-
-endif
diff --git a/drivers/staging/fwserial/Makefile b/drivers/staging/fwserial/Makefile
deleted file mode 100644
index 1cd5c5c7e805..000000000000
--- a/drivers/staging/fwserial/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_FIREWIRE_SERIAL) += firewire-serial.o
-firewire-serial-objs := fwserial.o dma_fifo.o
diff --git a/drivers/staging/fwserial/TODO b/drivers/staging/fwserial/TODO
deleted file mode 100644
index 382a7959407c..000000000000
--- a/drivers/staging/fwserial/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
-TODOs prior to this driver moving out of staging
-------------------------------------------------
-1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
- - I/O is handled asynchronously which presents some issues when error
- conditions occur.
-2. Implement _robust_ console on top of this. The existing prototype console
- driver is not ready for the big leagues yet.
-3. Expose means of controlling attach/detach of peers via sysfs. Include
- GUID-to-port matching/whitelist/blacklist.
-
--- Issues with firewire stack --
-1. This driver uses the same unregistered vendor id that the firewire core does
- (0xd00d1e). Perhaps this could be exposed as a define in
- firewire.h?
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
deleted file mode 100644
index 5dcbab6fd622..000000000000
--- a/drivers/staging/fwserial/dma_fifo.c
+++ /dev/null
@@ -1,294 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * DMA-able FIFO implementation
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/bug.h>
-
-#include "dma_fifo.h"
-
-#ifdef DEBUG_TRACING
-#define df_trace(s, args...) pr_debug(s, ##args)
-#else
-#define df_trace(s, args...)
-#endif
-
-#define FAIL(fifo, condition, format...) ({ \
- fifo->corrupt = !!(condition); \
- WARN(fifo->corrupt, format); \
-})
-
-/*
- * private helper fn to determine if check is in open interval (lo,hi)
- */
-static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
-{
- return check - (lo + 1) < (hi - 1) - lo;
-}
-
-/**
- * dma_fifo_init: initialize the fifo to a valid but inoperative state
- * @fifo: address of in-place "struct dma_fifo" object
- */
-void dma_fifo_init(struct dma_fifo *fifo)
-{
- memset(fifo, 0, sizeof(*fifo));
- INIT_LIST_HEAD(&fifo->pending);
-}
-
-/**
- * dma_fifo_alloc - initialize and allocate dma_fifo
- * @fifo: address of in-place "struct dma_fifo" object
- * @size: 'apparent' size, in bytes, of fifo
- * @align: dma alignment to maintain (should be at least cpu cache alignment),
- * must be power of 2
- * @tx_limit: maximum # of bytes transmissible per dma (rounded down to
- * multiple of alignment, but at least align size)
- * @open_limit: maximum # of outstanding dma transactions allowed
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
- *
- * The 'apparent' size will be rounded up to next greater aligned size.
- * Returns 0 if no error, otherwise an error code
- */
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
- int tx_limit, int open_limit, gfp_t gfp_mask)
-{
- int capacity;
-
- if (!is_power_of_2(align) || size < 0)
- return -EINVAL;
-
- size = round_up(size, align);
- capacity = size + align * open_limit + align * DMA_FIFO_GUARD;
- fifo->data = kmalloc(capacity, gfp_mask);
- if (!fifo->data)
- return -ENOMEM;
-
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->size = size;
- fifo->avail = size;
- fifo->align = align;
- fifo->tx_limit = max_t(int, round_down(tx_limit, align), align);
- fifo->open = 0;
- fifo->open_limit = open_limit;
- fifo->guard = size + align * open_limit;
- fifo->capacity = capacity;
- fifo->corrupt = 0;
-
- return 0;
-}
-
-/**
- * dma_fifo_free - frees the fifo
- * @fifo: address of in-place "struct dma_fifo" to free
- *
- * Also reinits the fifo to a valid but inoperative state. This
- * allows the fifo to be reused with a different target requiring
- * different fifo parameters.
- */
-void dma_fifo_free(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (!fifo->data)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- kfree(fifo->data);
- fifo->data = NULL;
-}
-
-/**
- * dma_fifo_reset - dumps the fifo contents and reinits for reuse
- * @fifo: address of in-place "struct dma_fifo" to reset
- */
-void dma_fifo_reset(struct dma_fifo *fifo)
-{
- struct dma_pending *pending, *next;
-
- if (!fifo->data)
- return;
-
- list_for_each_entry_safe(pending, next, &fifo->pending, link)
- list_del_init(&pending->link);
- fifo->in = 0;
- fifo->out = 0;
- fifo->done = 0;
- fifo->avail = fifo->size;
- fifo->open = 0;
- fifo->corrupt = 0;
-}
-
-/**
- * dma_fifo_in - copies data into the fifo
- * @fifo: address of in-place "struct dma_fifo" to write to
- * @src: buffer to copy from
- * @n: # of bytes to copy
- *
- * Returns the # of bytes actually copied, which can be less than requested if
- * the fifo becomes full. If < 0, return is error code.
- */
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
-{
- int ofs, l;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- if (n > fifo->avail)
- n = fifo->avail;
- if (n <= 0)
- return 0;
-
- ofs = fifo->in % fifo->capacity;
- l = min(n, fifo->capacity - ofs);
- memcpy(fifo->data + ofs, src, l);
- memcpy(fifo->data, src + l, n - l);
-
- if (FAIL(fifo, addr_check(fifo->done, fifo->in, fifo->in + n) ||
- fifo->avail < n,
- "fifo corrupt: in:%u out:%u done:%u n:%d avail:%d",
- fifo->in, fifo->out, fifo->done, n, fifo->avail))
- return -ENXIO;
-
- fifo->in += n;
- fifo->avail -= n;
-
- df_trace("in:%u out:%u done:%u n:%d avail:%d", fifo->in, fifo->out,
- fifo->done, n, fifo->avail);
-
- return n;
-}
-
-/**
- * dma_fifo_out_pend - gets address/len of next avail read and marks as pended
- * @fifo: address of in-place "struct dma_fifo" to read from
- * @pended: address of structure to fill with read address/len
- * The data/len fields will be NULL/0 if no dma is pended.
- *
- * Returns the # of used bytes remaining in fifo (ie, if > 0, more data
- * remains in the fifo that was not pended). If < 0, return is error code.
- */
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
-{
- unsigned int len, n, ofs, l, limit;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
-
- pended->len = 0;
- pended->data = NULL;
- pended->out = fifo->out;
-
- len = fifo->in - fifo->out;
- if (!len)
- return -ENODATA;
- if (fifo->open == fifo->open_limit)
- return -EAGAIN;
-
- n = len;
- ofs = fifo->out % fifo->capacity;
- l = fifo->capacity - ofs;
- limit = min_t(unsigned int, l, fifo->tx_limit);
- if (n > limit) {
- n = limit;
- fifo->out += limit;
- } else if (ofs + n > fifo->guard) {
- fifo->out += l;
- fifo->in = fifo->out;
- } else {
- fifo->out += round_up(n, fifo->align);
- fifo->in = fifo->out;
- }
-
- df_trace("in: %u out: %u done: %u n: %d len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, n, len, fifo->avail);
-
- pended->len = n;
- pended->data = fifo->data + ofs;
- pended->next = fifo->out;
- list_add_tail(&pended->link, &fifo->pending);
- ++fifo->open;
-
- if (FAIL(fifo, fifo->open > fifo->open_limit,
- "past open limit:%d (limit:%d)",
- fifo->open, fifo->open_limit))
- return -ENXIO;
- if (FAIL(fifo, fifo->out & (fifo->align - 1),
- "fifo out unaligned:%u (align:%u)",
- fifo->out, fifo->align))
- return -ENXIO;
-
- return len - n;
-}
-
-/**
- * dma_fifo_out_complete - marks pended dma as completed
- * @fifo: address of in-place "struct dma_fifo" which was read from
- * @complete: address of structure for previously pended dma to mark completed
- */
-int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete)
-{
- struct dma_pending *pending, *next, *tmp;
-
- if (!fifo->data)
- return -ENOENT;
- if (fifo->corrupt)
- return -ENXIO;
- if (list_empty(&fifo->pending) && fifo->open == 0)
- return -EINVAL;
-
- if (FAIL(fifo, list_empty(&fifo->pending) != (fifo->open == 0),
- "pending list disagrees with open count:%d",
- fifo->open))
- return -ENXIO;
-
- tmp = complete->data;
- *tmp = *complete;
- list_replace(&complete->link, &tmp->link);
- dp_mark_completed(tmp);
-
- /* Only update the fifo in the original pended order */
- list_for_each_entry_safe(pending, next, &fifo->pending, link) {
- if (!dp_is_completed(pending)) {
- df_trace("still pending: saved out: %u len: %d",
- pending->out, pending->len);
- break;
- }
-
- if (FAIL(fifo, pending->out != fifo->done ||
- addr_check(fifo->in, fifo->done, pending->next),
- "in:%u out:%u done:%u saved:%u next:%u",
- fifo->in, fifo->out, fifo->done, pending->out,
- pending->next))
- return -ENXIO;
-
- list_del_init(&pending->link);
- fifo->done = pending->next;
- fifo->avail += pending->len;
- --fifo->open;
-
- df_trace("in: %u out: %u done: %u len: %u avail: %d", fifo->in,
- fifo->out, fifo->done, pending->len, fifo->avail);
- }
-
- if (FAIL(fifo, fifo->open < 0, "open dma:%d < 0", fifo->open))
- return -ENXIO;
- if (FAIL(fifo, fifo->avail > fifo->size, "fifo avail:%d > size:%d",
- fifo->avail, fifo->size))
- return -ENXIO;
-
- return 0;
-}
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
deleted file mode 100644
index c46a06336975..000000000000
--- a/drivers/staging/fwserial/dma_fifo.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * DMA-able FIFO interface
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#ifndef _DMA_FIFO_H_
-#define _DMA_FIFO_H_
-
-/**
- * The design basis for the DMA FIFO is to provide an output side that
- * complies with the streaming DMA API design that can be DMA'd from directly
- * (without additional copying), coupled with an input side that maintains a
- * logically consistent 'apparent' size (ie, bytes in + bytes avail is static
- * for the lifetime of the FIFO).
- *
- * DMA output transactions originate on a cache line boundary and can be
- * variably-sized. DMA output transactions can be retired out-of-order but
- * the FIFO will only advance the output in the original input sequence.
- * This means the FIFO will eventually stall if a transaction is never retired.
- *
- * Chunking the output side into cache line multiples means that some FIFO
- * memory is unused. For example, if all the avail input has been pended out,
- * then the in and out markers are re-aligned to the next cache line.
- * The maximum possible waste is
- * (cache line alignment - 1) * (max outstanding dma transactions)
- * This potential waste requires additional hidden capacity within the FIFO
- * to be able to accept input while the 'apparent' size has not been reached.
- *
- * Additional cache lines (ie, guard area) are used to minimize DMA
- * fragmentation when wrapping at the end of the FIFO. Input is allowed into the
- * guard area, but the in and out FIFO markers are wrapped when DMA is pended.
- */
-
-#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
-
-struct dma_fifo {
- unsigned int in;
- unsigned int out; /* updated when dma is pended */
- unsigned int done; /* updated upon dma completion */
- struct {
- unsigned corrupt:1;
- };
- int size; /* 'apparent' size of fifo */
- int guard; /* ofs of guard area */
- int capacity; /* size + reserved */
- int avail; /* # of unused bytes in fifo */
- unsigned int align; /* must be power of 2 */
- int tx_limit; /* max # of bytes per dma transaction */
- int open_limit; /* max # of outstanding allowed */
- int open; /* # of outstanding dma transactions */
- struct list_head pending; /* fifo markers for outstanding dma */
- void *data;
-};
-
-struct dma_pending {
- struct list_head link;
- void *data;
- unsigned int len;
- unsigned int next;
- unsigned int out;
-};
-
-static inline void dp_mark_completed(struct dma_pending *dp)
-{
- dp->data += 1;
-}
-
-static inline bool dp_is_completed(struct dma_pending *dp)
-{
- return (unsigned long)dp->data & 1UL;
-}
-
-void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
- int tx_limit, int open_limit, gfp_t gfp_mask);
-void dma_fifo_free(struct dma_fifo *fifo);
-void dma_fifo_reset(struct dma_fifo *fifo);
-int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n);
-int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended);
-int dma_fifo_out_complete(struct dma_fifo *fifo,
- struct dma_pending *complete);
-
-/* returns the # of used bytes in the fifo */
-static inline int dma_fifo_level(struct dma_fifo *fifo)
-{
- return fifo->size - fifo->avail;
-}
-
-/* returns the # of bytes ready for output in the fifo */
-static inline int dma_fifo_out_level(struct dma_fifo *fifo)
-{
- return fifo->in - fifo->out;
-}
-
-/* returns the # of unused bytes in the fifo */
-static inline int dma_fifo_avail(struct dma_fifo *fifo)
-{
- return fifo->avail;
-}
-
-/* returns true if fifo has max # of outstanding dmas */
-static inline bool dma_fifo_busy(struct dma_fifo *fifo)
-{
- return fifo->open == fifo->open_limit;
-}
-
-/* changes the max size of dma returned from dma_fifo_out_pend() */
-static inline int dma_fifo_change_tx_limit(struct dma_fifo *fifo, int tx_limit)
-{
- tx_limit = round_down(tx_limit, fifo->align);
- fifo->tx_limit = max_t(int, tx_limit, fifo->align);
- return 0;
-}
-
-#endif /* _DMA_FIFO_H_ */
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
deleted file mode 100644
index e8fa7f53cd5e..000000000000
--- a/drivers/staging/fwserial/fwserial.c
+++ /dev/null
@@ -1,2890 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * FireWire Serial driver
- *
- * Copyright (C) 2012 Peter Hurley <peter@hurleysoftware.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/rculist.h>
-#include <linux/workqueue.h>
-#include <linux/ratelimit.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-#include "fwserial.h"
-
-inline u64 be32_to_u64(__be32 hi, __be32 lo)
-{
- return ((u64)be32_to_cpu(hi) << 32 | be32_to_cpu(lo));
-}
-
-#define LINUX_VENDOR_ID 0xd00d1eU /* same id used in card root directory */
-#define FWSERIAL_VERSION 0x00e81cU /* must be unique within LINUX_VENDOR_ID */
-
-/* configurable options */
-static int num_ttys = 4; /* # of std ttys to create per fw_card */
- /* - doubles as loopback port index */
-static bool auto_connect = true; /* try to VIRT_CABLE to every peer */
-static bool create_loop_dev = true; /* create a loopback device for each card */
-
-module_param_named(ttys, num_ttys, int, 0644);
-module_param_named(auto, auto_connect, bool, 0644);
-module_param_named(loop, create_loop_dev, bool, 0644);
-
-/*
- * Threshold below which the tty is woken for writing
- * - should be equal to WAKEUP_CHARS in drivers/tty/n_tty.c because
- * even if the writer is woken, n_tty_poll() won't set EPOLLOUT until
- * our fifo is below this level
- */
-#define WAKEUP_CHARS 256
-
-/*
- * fwserial_list: list of every fw_serial created for each fw_card
- * See discussion in fwserial_probe.
- */
-static LIST_HEAD(fwserial_list);
-static DEFINE_MUTEX(fwserial_list_mutex);
-
-/*
- * port_table: array of tty ports allocated to each fw_card
- *
- * tty ports are allocated during probe when an fw_serial is first
- * created for a given fw_card. Ports are allocated in a contiguous block,
- * each block consisting of 'num_ports' ports.
- */
-static struct fwtty_port *port_table[MAX_TOTAL_PORTS];
-static DEFINE_MUTEX(port_table_lock);
-static bool port_table_corrupt;
-#define FWTTY_INVALID_INDEX MAX_TOTAL_PORTS
-
-#define loop_idx(port) (((port)->index) / num_ports)
-#define table_idx(loop) ((loop) * num_ports + num_ttys)
-
-/* total # of tty ports created per fw_card */
-static int num_ports;
-
-/* slab used as pool for struct fwtty_transactions */
-static struct kmem_cache *fwtty_txn_cache;
-
-struct tty_driver *fwtty_driver;
-static struct tty_driver *fwloop_driver;
-
-static struct dentry *fwserial_debugfs;
-
-struct fwtty_transaction;
-typedef void (*fwtty_transaction_cb)(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn);
-
-struct fwtty_transaction {
- struct fw_transaction fw_txn;
- fwtty_transaction_cb callback;
- struct fwtty_port *port;
- union {
- struct dma_pending dma_pended;
- };
-};
-
-#define to_device(a, b) (a->b)
-#define fwtty_err(p, fmt, ...) \
- dev_err(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_info(p, fmt, ...) \
- dev_info(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_notice(p, fmt, ...) \
- dev_notice(to_device(p, device), fmt, ##__VA_ARGS__)
-#define fwtty_dbg(p, fmt, ...) \
- dev_dbg(to_device(p, device), "%s: " fmt, __func__, ##__VA_ARGS__)
-#define fwtty_err_ratelimited(p, fmt, ...) \
- dev_err_ratelimited(to_device(p, device), fmt, ##__VA_ARGS__)
-
-#ifdef DEBUG
-static inline void debug_short_write(struct fwtty_port *port, int c, int n)
-{
- int avail;
-
- if (n < c) {
- spin_lock_bh(&port->lock);
- avail = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
- fwtty_dbg(port, "short write: avail:%d req:%d wrote:%d\n",
- avail, c, n);
- }
-}
-#else
-#define debug_short_write(port, c, n)
-#endif
-
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id);
-
-#ifdef FWTTY_PROFILING
-
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
-{
- spin_lock_bh(&port->lock);
- fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
- spin_unlock_bh(&port->lock);
-}
-
-static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
-{
- /* for each stat, print sum of 0 to 2^k, then individually */
- int k = 4;
- unsigned int sum;
- int j;
- char t[10];
-
- snprintf(t, 10, "< %d", 1 << k);
- seq_printf(m, "\n%14s %6s", " ", t);
- for (j = k + 1; j < DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", 1 << j);
-
- ++k;
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->reads[j];
- seq_printf(m, "\n%14s: %6d", "reads", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->reads[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->writes[j];
- seq_printf(m, "\n%14s: %6d", "writes", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->writes[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->txns[j];
- seq_printf(m, "\n%14s: %6d", "txns", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->txns[j]);
-
- for (j = 0, sum = 0; j <= k; ++j)
- sum += stats->unthrottle[j];
- seq_printf(m, "\n%14s: %6d", "avail @ unthr", sum);
- for (j = k + 1; j <= DISTRIBUTION_MAX_INDEX; ++j)
- seq_printf(m, "%6d", stats->unthrottle[j]);
-}
-
-#else
-#define fwtty_profile_fifo(port, stat)
-#define fwtty_dump_profile(m, stats)
-#endif
-
-/*
- * Returns the max receive packet size for the given node
- * Devices which are OHCI v1.0/ v1.1/ v1.2-draft or RFC 2734 compliant
- * are required by specification to support max_rec of 8 (512 bytes) or more.
- */
-static inline int device_max_receive(struct fw_device *fw_device)
-{
- /* see IEEE 1394-2008 table 8-8 */
- return min(2 << fw_device->max_rec, 4096);
-}
-
-static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
-{
- switch (rcode) {
- case RCODE_SEND_ERROR:
- fwtty_err_ratelimited(port, "card busy\n");
- break;
- case RCODE_ADDRESS_ERROR:
- fwtty_err_ratelimited(port, "bad unit addr or write length\n");
- break;
- case RCODE_DATA_ERROR:
- fwtty_err_ratelimited(port, "failed rx\n");
- break;
- case RCODE_NO_ACK:
- fwtty_err_ratelimited(port, "missing ack\n");
- break;
- case RCODE_BUSY:
- fwtty_err_ratelimited(port, "remote busy\n");
- break;
- default:
- fwtty_err_ratelimited(port, "failed tx: %d\n", rcode);
- }
-}
-
-static void fwtty_common_callback(struct fw_card *card, int rcode,
- void *payload, size_t len, void *cb_data)
-{
- struct fwtty_transaction *txn = cb_data;
- struct fwtty_port *port = txn->port;
-
- if (port && rcode != RCODE_COMPLETE)
- fwtty_log_tx_error(port, rcode);
- if (txn->callback)
- txn->callback(card, rcode, payload, len, txn);
- kmem_cache_free(fwtty_txn_cache, txn);
-}
-
-static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- struct fwtty_transaction *txn;
- int generation;
-
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn)
- return -ENOMEM;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
- return 0;
-}
-
-static void fwtty_send_txn_async(struct fwtty_peer *peer,
- struct fwtty_transaction *txn, int tcode,
- unsigned long long addr, void *payload,
- size_t len, fwtty_transaction_cb callback,
- struct fwtty_port *port)
-{
- int generation;
-
- txn->callback = callback;
- txn->port = port;
-
- generation = peer->generation;
- smp_rmb();
- fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
- peer->node_id, generation, peer->speed, addr, payload,
- len, fwtty_common_callback, txn);
-}
-
-static void __fwtty_restart_tx(struct fwtty_port *port)
-{
- int len, avail;
-
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len)
- schedule_delayed_work(&port->drain, 0);
- avail = dma_fifo_avail(&port->tx_fifo);
-
- fwtty_dbg(port, "fifo len: %d avail: %d\n", len, avail);
-}
-
-static void fwtty_restart_tx(struct fwtty_port *port)
-{
- spin_lock_bh(&port->lock);
- __fwtty_restart_tx(port);
- spin_unlock_bh(&port->lock);
-}
-
-/*
- * fwtty_update_port_status - decodes & dispatches line status changes
- *
- * Note: in loopback, the port->lock is being held. Only use functions that
- * don't attempt to reclaim the port->lock.
- */
-static void fwtty_update_port_status(struct fwtty_port *port,
- unsigned int status)
-{
- unsigned int delta;
- struct tty_struct *tty;
-
- /* simulated LSR/MSR status from remote */
- status &= ~MCTRL_MASK;
- delta = (port->mstatus ^ status) & ~MCTRL_MASK;
- delta &= ~(status & TIOCM_RNG);
- port->mstatus = status;
-
- if (delta & TIOCM_RNG)
- ++port->icount.rng;
- if (delta & TIOCM_DSR)
- ++port->icount.dsr;
- if (delta & TIOCM_CAR)
- ++port->icount.dcd;
- if (delta & TIOCM_CTS)
- ++port->icount.cts;
-
- fwtty_dbg(port, "status: %x delta: %x\n", status, delta);
-
- if (delta & TIOCM_CAR) {
- tty = tty_port_tty_get(&port->port);
- if (tty && !C_CLOCAL(tty)) {
- if (status & TIOCM_CAR)
- wake_up_interruptible(&port->port.open_wait);
- else
- schedule_work(&port->hangup);
- }
- tty_kref_put(tty);
- }
-
- if (delta & TIOCM_CTS) {
- tty = tty_port_tty_get(&port->port);
- if (tty && C_CRTSCTS(tty)) {
- if (tty->hw_stopped) {
- if (status & TIOCM_CTS) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (~status & TIOCM_CTS)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
-
- } else if (delta & OOB_TX_THROTTLE) {
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- if (tty->hw_stopped) {
- if (~status & OOB_TX_THROTTLE) {
- tty->hw_stopped = 0;
- if (port->loopback)
- __fwtty_restart_tx(port);
- else
- fwtty_restart_tx(port);
- }
- } else {
- if (status & OOB_TX_THROTTLE)
- tty->hw_stopped = 1;
- }
- }
- tty_kref_put(tty);
- }
-
- if (delta & (UART_LSR_BI << 24)) {
- if (status & (UART_LSR_BI << 24)) {
- port->break_last = jiffies;
- schedule_delayed_work(&port->emit_breaks, 0);
- } else {
- /* run emit_breaks one last time (if pending) */
- mod_delayed_work(system_wq, &port->emit_breaks, 0);
- }
- }
-
- if (delta & (TIOCM_DSR | TIOCM_CAR | TIOCM_CTS | TIOCM_RNG))
- wake_up_interruptible(&port->port.delta_msr_wait);
-}
-
-/*
- * __fwtty_port_line_status - generate 'line status' for indicated port
- *
- * This function returns a remote 'MSR' state based on the local 'MCR' state,
- * as if a null modem cable was attached. The actual status is a mangling
- * of TIOCM_* bits suitable for sending to a peer's status_addr.
- *
- * Note: caller must be holding port lock
- */
-static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
-{
- unsigned int status = 0;
-
- /* TODO: add module param to tie RNG to DTR as well */
-
- if (port->mctrl & TIOCM_DTR)
- status |= TIOCM_DSR | TIOCM_CAR;
- if (port->mctrl & TIOCM_RTS)
- status |= TIOCM_CTS;
- if (port->mctrl & OOB_RX_THROTTLE)
- status |= OOB_TX_THROTTLE;
- /* emulate BRK as add'l line status */
- if (port->break_ctl)
- status |= UART_LSR_BI << 24;
-
- return status;
-}
-
-/*
- * __fwtty_write_port_status - send the port line status to peer
- *
- * Note: caller must be holding the port lock.
- */
-static int __fwtty_write_port_status(struct fwtty_port *port)
-{
- struct fwtty_peer *peer;
- int err = -ENOENT;
- unsigned int status = __fwtty_port_line_status(port);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST,
- peer->status_addr, &status,
- sizeof(status), NULL, port);
- }
- rcu_read_unlock();
-
- return err;
-}
-
-/*
- * fwtty_write_port_status - same as above but locked by port lock
- */
-static int fwtty_write_port_status(struct fwtty_port *port)
-{
- int err;
-
- spin_lock_bh(&port->lock);
- err = __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return err;
-}
-
-static void fwtty_throttle_port(struct fwtty_port *port)
-{
- struct tty_struct *tty;
- unsigned int old;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return;
-
- spin_lock_bh(&port->lock);
-
- old = port->mctrl;
- port->mctrl |= OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl &= ~TIOCM_RTS;
- if (~old & OOB_RX_THROTTLE)
- __fwtty_write_port_status(port);
-
- spin_unlock_bh(&port->lock);
-
- tty_kref_put(tty);
-}
-
-/*
- * fwtty_do_hangup - wait for ldisc to deliver all pending rx; only then hangup
- *
- * When the remote has finished tx, and all in-flight rx has been received and
- * pushed to the flip buffer, the remote may close its device. This will
- * drop DTR on the remote which will drop carrier here. Typically, the tty is
- * hung up when carrier is dropped or lost.
- *
- * However, there is a race between the hang up and the line discipline
- * delivering its data to the reader. A hangup will cause the ldisc to flush
- * (ie., clear) the read buffer and flip buffer. Because of firewire's
- * relatively high throughput, the ldisc frequently lags well behind the driver,
- * resulting in lost data (which has already been received and written to
- * the flip buffer) when the remote closes its end.
- *
- * Unfortunately, since the flip buffer offers no direct method for determining
- * if it holds data, ensuring the ldisc has delivered all data is problematic.
- */
-
-/* FIXME: drop this workaround when __tty_hangup waits for ldisc completion */
-static void fwtty_do_hangup(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(work, hangup);
- struct tty_struct *tty;
-
- schedule_timeout_uninterruptible(msecs_to_jiffies(50));
-
- tty = tty_port_tty_get(&port->port);
- if (tty)
- tty_vhangup(tty);
- tty_kref_put(tty);
-}
-
-static void fwtty_emit_breaks(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), emit_breaks);
- static const char buf[16];
- unsigned long now = jiffies;
- unsigned long elapsed = now - port->break_last;
- int n, t, c, brk = 0;
-
- /* generate breaks at the line rate (but at least 1) */
- n = (elapsed * port->cps) / HZ + 1;
- port->break_last = now;
-
- fwtty_dbg(port, "sending %d brks\n", n);
-
- while (n) {
- t = min(n, 16);
- c = tty_insert_flip_string_fixed_flag(&port->port, buf,
- TTY_BREAK, t);
- n -= c;
- brk += c;
- if (c < t)
- break;
- }
- tty_flip_buffer_push(&port->port);
-
- if (port->mstatus & (UART_LSR_BI << 24))
- schedule_delayed_work(&port->emit_breaks, FREQ_BREAKS);
- port->icount.brk += brk;
-}
-
-static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
-{
- int c, n = len;
- unsigned int lsr;
- int err = 0;
-
- fwtty_dbg(port, "%d\n", n);
- fwtty_profile_data(port->stats.reads, n);
-
- if (port->write_only) {
- n = 0;
- goto out;
- }
-
- /* disregard break status; breaks are generated by emit_breaks work */
- lsr = (port->mstatus >> 24) & ~UART_LSR_BI;
-
- if (port->overrun)
- lsr |= UART_LSR_OE;
-
- if (lsr & UART_LSR_OE)
- ++port->icount.overrun;
-
- lsr &= port->status_mask;
- if (lsr & ~port->ignore_mask & UART_LSR_OE) {
- if (!tty_insert_flip_char(&port->port, 0, TTY_OVERRUN)) {
- err = -EIO;
- goto out;
- }
- }
- port->overrun = false;
-
- if (lsr & port->ignore_mask & ~UART_LSR_OE) {
- /* TODO: don't drop SAK and Magic SysRq here */
- n = 0;
- goto out;
- }
-
- c = tty_insert_flip_string_fixed_flag(&port->port, data, TTY_NORMAL, n);
- if (c > 0)
- tty_flip_buffer_push(&port->port);
- n -= c;
-
- if (n) {
- port->overrun = true;
- err = -EIO;
- fwtty_err_ratelimited(port, "flip buffer overrun\n");
-
- } else {
- /* throttle the sender if remaining flip buffer space has
- * reached high watermark to avoid losing data which may be
- * in-flight. Since the AR request context is 32k, that much
- * data may have _already_ been acked.
- */
- if (tty_buffer_space_avail(&port->port) < HIGH_WATERMARK)
- fwtty_throttle_port(port);
- }
-
-out:
- port->icount.rx += len;
- port->stats.lost += n;
- return err;
-}
-
-/*
- * fwtty_port_handler - bus address handler for port reads/writes
- *
- * This handler is responsible for handling inbound read/write dma from remotes.
- */
-static void fwtty_port_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwtty_port *port = callback_data;
- struct fwtty_peer *peer;
- int err;
- int rcode;
-
- /* Only accept rx from the peer virtual-cabled to this port */
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- rcu_read_unlock();
- if (!peer || peer != rcu_access_pointer(port->peer)) {
- rcode = RCODE_ADDRESS_ERROR;
- fwtty_err_ratelimited(port, "ignoring unauthenticated data\n");
- goto respond;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- if (addr != port->rx_handler.offset || len != 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- fwtty_update_port_status(port, *(unsigned int *)data);
- rcode = RCODE_COMPLETE;
- }
- break;
-
- case TCODE_WRITE_BLOCK_REQUEST:
- if (addr != port->rx_handler.offset + 4 ||
- len > port->rx_handler.length - 4) {
- rcode = RCODE_ADDRESS_ERROR;
- } else {
- err = fwtty_rx(port, data, len);
- switch (err) {
- case 0:
- rcode = RCODE_COMPLETE;
- break;
- case -EIO:
- rcode = RCODE_DATA_ERROR;
- break;
- default:
- rcode = RCODE_CONFLICT_ERROR;
- break;
- }
- }
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
-
-respond:
- fw_send_response(card, request, rcode);
-}
-
-/*
- * fwtty_tx_complete - callback for tx dma
- * @data: ignored, has no meaning for write txns
- * @length: ignored, has no meaning for write txns
- *
- * The writer must be woken here if the fifo has been emptied because it
- * may have slept if chars_in_buffer was != 0
- */
-static void fwtty_tx_complete(struct fw_card *card, int rcode,
- void *data, size_t length,
- struct fwtty_transaction *txn)
-{
- struct fwtty_port *port = txn->port;
- int len;
-
- fwtty_dbg(port, "rcode: %d\n", rcode);
-
- switch (rcode) {
- case RCODE_COMPLETE:
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->icount.tx += txn->dma_pended.len;
- break;
-
- default:
- /* TODO: implement retries */
- spin_lock_bh(&port->lock);
- dma_fifo_out_complete(&port->tx_fifo, &txn->dma_pended);
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- port->stats.dropped += txn->dma_pended.len;
- }
-
- if (len < WAKEUP_CHARS)
- tty_port_tty_wakeup(&port->port);
-}
-
-static int fwtty_tx(struct fwtty_port *port, bool drain)
-{
- struct fwtty_peer *peer;
- struct fwtty_transaction *txn;
- struct tty_struct *tty;
- int n, len;
-
- tty = tty_port_tty_get(&port->port);
- if (!tty)
- return -ENOENT;
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (!peer) {
- n = -EIO;
- goto out;
- }
-
- if (test_and_set_bit(IN_TX, &port->flags)) {
- n = -EALREADY;
- goto out;
- }
-
- /* try to write as many dma transactions out as possible */
- n = -EAGAIN;
- while (!tty->flow.stopped && !tty->hw_stopped &&
- !test_bit(STOP_TX, &port->flags)) {
- txn = kmem_cache_alloc(fwtty_txn_cache, GFP_ATOMIC);
- if (!txn) {
- n = -ENOMEM;
- break;
- }
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_out_pend(&port->tx_fifo, &txn->dma_pended);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "out: %u rem: %d\n", txn->dma_pended.len, n);
-
- if (n < 0) {
- kmem_cache_free(fwtty_txn_cache, txn);
- if (n == -EAGAIN) {
- ++port->stats.tx_stall;
- } else if (n == -ENODATA) {
- fwtty_profile_data(port->stats.txns, 0);
- } else {
- ++port->stats.fifo_errs;
- fwtty_err_ratelimited(port, "fifo err: %d\n",
- n);
- }
- break;
- }
-
- fwtty_profile_data(port->stats.txns, txn->dma_pended.len);
-
- fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, txn->dma_pended.data,
- txn->dma_pended.len, fwtty_tx_complete,
- port);
- ++port->stats.sent;
-
- /*
- * Stop tx if the 'last view' of the fifo is empty or if
- * this is the writer and there's not enough data to bother
- */
- if (n == 0 || (!drain && n < WRITER_MINIMUM))
- break;
- }
-
- if (n >= 0 || n == -EAGAIN || n == -ENOMEM || n == -ENODATA) {
- spin_lock_bh(&port->lock);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len) {
- unsigned long delay = (n == -ENOMEM) ? HZ : 1;
-
- schedule_delayed_work(&port->drain, delay);
- }
- len = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- /* wakeup the writer */
- if (drain && len < WAKEUP_CHARS)
- tty_wakeup(tty);
- }
-
- clear_bit(IN_TX, &port->flags);
- wake_up_interruptible(&port->wait_tx);
-
-out:
- rcu_read_unlock();
- tty_kref_put(tty);
- return n;
-}
-
-static void fwtty_drain_tx(struct work_struct *work)
-{
- struct fwtty_port *port = to_port(to_delayed_work(work), drain);
-
- fwtty_tx(port, true);
-}
-
-static void fwtty_write_xchar(struct fwtty_port *port, char ch)
-{
- struct fwtty_peer *peer;
-
- ++port->stats.xchars;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- rcu_read_lock();
- peer = rcu_dereference(port->peer);
- if (peer) {
- fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST,
- peer->fifo_addr, &ch, sizeof(ch),
- NULL, port);
- }
- rcu_read_unlock();
-}
-
-static struct fwtty_port *fwtty_port_get(unsigned int index)
-{
- struct fwtty_port *port;
-
- if (index >= MAX_TOTAL_PORTS)
- return NULL;
-
- mutex_lock(&port_table_lock);
- port = port_table[index];
- if (port)
- kref_get(&port->serial->kref);
- mutex_unlock(&port_table_lock);
- return port;
-}
-
-static int fwtty_ports_add(struct fw_serial *serial)
-{
- int err = -EBUSY;
- int i, j;
-
- if (port_table_corrupt)
- return err;
-
- mutex_lock(&port_table_lock);
- for (i = 0; i + num_ports <= MAX_TOTAL_PORTS; i += num_ports) {
- if (!port_table[i]) {
- for (j = 0; j < num_ports; ++i, ++j) {
- serial->ports[j]->index = i;
- port_table[i] = serial->ports[j];
- }
- err = 0;
- break;
- }
- }
- mutex_unlock(&port_table_lock);
- return err;
-}
-
-static void fwserial_destroy(struct kref *kref)
-{
- struct fw_serial *serial = to_serial(kref, kref);
- struct fwtty_port **ports = serial->ports;
- int j, i = ports[0]->index;
-
- synchronize_rcu();
-
- mutex_lock(&port_table_lock);
- for (j = 0; j < num_ports; ++i, ++j) {
- port_table_corrupt |= port_table[i] != ports[j];
- WARN_ONCE(port_table_corrupt, "port_table[%d]: %p != ports[%d]: %p",
- i, port_table[i], j, ports[j]);
-
- port_table[i] = NULL;
- }
- mutex_unlock(&port_table_lock);
-
- for (j = 0; j < num_ports; ++j) {
- fw_core_remove_address_handler(&ports[j]->rx_handler);
- tty_port_destroy(&ports[j]->port);
- kfree(ports[j]);
- }
- kfree(serial);
-}
-
-static void fwtty_port_put(struct fwtty_port *port)
-{
- kref_put(&port->serial->kref, fwserial_destroy);
-}
-
-static void fwtty_port_dtr_rts(struct tty_port *tty_port, int on)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- fwtty_dbg(port, "on/off: %d\n", on);
-
- spin_lock_bh(&port->lock);
- /* Don't change carrier state if this is a console */
- if (!port->port.console) {
- if (on)
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- }
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-/*
- * fwtty_port_carrier_raised: required tty_port operation
- *
- * This port operation is polled after a tty has been opened and is waiting for
- * carrier detect -- see drivers/tty/tty_port:tty_port_block_til_ready().
- */
-static int fwtty_port_carrier_raised(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- int rc;
-
- rc = (port->mstatus & TIOCM_CAR);
-
- fwtty_dbg(port, "%d\n", rc);
-
- return rc;
-}
-
-static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
-{
- unsigned int baud, frame;
-
- baud = tty_termios_baud_rate(&tty->termios);
- tty_termios_encode_baud_rate(&tty->termios, baud, baud);
-
- /* compute bit count of 2 frames */
- frame = 12 + ((C_CSTOPB(tty)) ? 4 : 2) + ((C_PARENB(tty)) ? 2 : 0);
-
- switch (C_CSIZE(tty)) {
- case CS5:
- frame -= (C_CSTOPB(tty)) ? 1 : 0;
- break;
- case CS6:
- frame += 2;
- break;
- case CS7:
- frame += 4;
- break;
- case CS8:
- frame += 6;
- break;
- }
-
- port->cps = (baud << 1) / frame;
-
- port->status_mask = UART_LSR_OE;
- if (_I_FLAG(tty, BRKINT | PARMRK))
- port->status_mask |= UART_LSR_BI;
-
- port->ignore_mask = 0;
- if (I_IGNBRK(tty)) {
- port->ignore_mask |= UART_LSR_BI;
- if (I_IGNPAR(tty))
- port->ignore_mask |= UART_LSR_OE;
- }
-
- port->write_only = !C_CREAD(tty);
-
- /* turn off echo and newline xlat if loopback */
- if (port->loopback) {
- tty->termios.c_lflag &= ~(ECHO | ECHOE | ECHOK | ECHOKE |
- ECHONL | ECHOPRT | ECHOCTL);
- tty->termios.c_oflag &= ~ONLCR;
- }
-
- return baud;
-}
-
-static int fwtty_port_activate(struct tty_port *tty_port,
- struct tty_struct *tty)
-{
- struct fwtty_port *port = to_port(tty_port, port);
- unsigned int baud;
- int err;
-
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- err = dma_fifo_alloc(&port->tx_fifo, FWTTY_PORT_TXFIFO_LEN,
- cache_line_size(),
- port->max_payload,
- FWTTY_PORT_MAX_PEND_DMA,
- GFP_KERNEL);
- if (err)
- return err;
-
- spin_lock_bh(&port->lock);
-
- baud = set_termios(port, tty);
-
- /* if console, don't change carrier state */
- if (!port->port.console) {
- port->mctrl = 0;
- if (baud != 0)
- port->mctrl = TIOCM_DTR | TIOCM_RTS;
- }
-
- if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS)
- tty->hw_stopped = 1;
-
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
-
- return 0;
-}
-
-/*
- * fwtty_port_shutdown
- *
- * Note: the tty port core ensures this is not the console and
- * manages TTY_IO_ERROR properly
- */
-static void fwtty_port_shutdown(struct tty_port *tty_port)
-{
- struct fwtty_port *port = to_port(tty_port, port);
-
- /* TODO: cancel outstanding transactions */
-
- cancel_delayed_work_sync(&port->emit_breaks);
- cancel_delayed_work_sync(&port->drain);
-
- spin_lock_bh(&port->lock);
- port->flags = 0;
- port->break_ctl = 0;
- port->overrun = 0;
- __fwtty_write_port_status(port);
- dma_fifo_free(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-}
-
-static int fwtty_open(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- return tty_port_open(&port->port, tty, fp);
-}
-
-static void fwtty_close(struct tty_struct *tty, struct file *fp)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_close(&port->port, tty, fp);
-}
-
-static void fwtty_hangup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty_port_hangup(&port->port);
-}
-
-static void fwtty_cleanup(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- tty->driver_data = NULL;
- fwtty_port_put(port);
-}
-
-static int fwtty_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(tty->index);
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwloop_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct fwtty_port *port = fwtty_port_get(table_idx(tty->index));
- int err;
-
- err = tty_standard_install(driver, tty);
- if (!err)
- tty->driver_data = port;
- else
- fwtty_port_put(port);
- return err;
-}
-
-static int fwtty_write(struct tty_struct *tty, const unsigned char *buf, int c)
-{
- struct fwtty_port *port = tty->driver_data;
- int n, len;
-
- fwtty_dbg(port, "%d\n", c);
- fwtty_profile_data(port->stats.writes, c);
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_in(&port->tx_fifo, buf, c);
- len = dma_fifo_out_level(&port->tx_fifo);
- if (len < DRAIN_THRESHOLD)
- schedule_delayed_work(&port->drain, 1);
- spin_unlock_bh(&port->lock);
-
- if (len >= DRAIN_THRESHOLD)
- fwtty_tx(port, false);
-
- debug_short_write(port, c, n);
-
- return (n < 0) ? 0 : n;
-}
-
-static unsigned int fwtty_write_room(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_avail(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%u\n", n);
-
- return n;
-}
-
-static unsigned int fwtty_chars_in_buffer(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int n;
-
- spin_lock_bh(&port->lock);
- n = dma_fifo_level(&port->tx_fifo);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%u\n", n);
-
- return n;
-}
-
-static void fwtty_send_xchar(struct tty_struct *tty, char ch)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "%02x\n", ch);
-
- fwtty_write_xchar(port, ch);
-}
-
-static void fwtty_throttle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- /*
- * Ignore throttling (but not unthrottling).
- * It only makes sense to throttle when data will no longer be
- * accepted by the tty flip buffer. For example, it is
- * possible for received data to overflow the tty buffer long
- * before the line discipline ever has a chance to throttle the driver.
- * Additionally, the driver may have already completed the I/O
- * but the tty buffer is still emptying, so the line discipline is
- * throttling and unthrottling nothing.
- */
-
- ++port->stats.throttled;
-}
-
-static void fwtty_unthrottle(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "CRTSCTS: %d\n", C_CRTSCTS(tty) != 0);
-
- fwtty_profile_fifo(port, port->stats.unthrottle);
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~OOB_RX_THROTTLE;
- if (C_CRTSCTS(tty))
- port->mctrl |= TIOCM_RTS;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-}
-
-static int check_msr_delta(struct fwtty_port *port, unsigned long mask,
- struct async_icount *prev)
-{
- struct async_icount now;
- int delta;
-
- now = port->icount;
-
- delta = ((mask & TIOCM_RNG && prev->rng != now.rng) ||
- (mask & TIOCM_DSR && prev->dsr != now.dsr) ||
- (mask & TIOCM_CAR && prev->dcd != now.dcd) ||
- (mask & TIOCM_CTS && prev->cts != now.cts));
-
- *prev = now;
-
- return delta;
-}
-
-static int wait_msr_change(struct fwtty_port *port, unsigned long mask)
-{
- struct async_icount prev;
-
- prev = port->icount;
-
- return wait_event_interruptible(port->port.delta_msr_wait,
- check_msr_delta(port, mask, &prev));
-}
-
-static int get_serial_info(struct tty_struct *tty,
- struct serial_struct *ss)
-{
- struct fwtty_port *port = tty->driver_data;
-
- mutex_lock(&port->port.mutex);
- ss->line = port->index;
- ss->baud_base = 400000000;
- ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
- ss->closing_wait = 3000;
- mutex_unlock(&port->port.mutex);
-
- return 0;
-}
-
-static int set_serial_info(struct tty_struct *tty,
- struct serial_struct *ss)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int cdelay;
-
- cdelay = msecs_to_jiffies(ss->close_delay * 10);
-
- mutex_lock(&port->port.mutex);
- if (!capable(CAP_SYS_ADMIN)) {
- if (cdelay != port->port.close_delay ||
- ((ss->flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK))) {
- mutex_unlock(&port->port.mutex);
- return -EPERM;
- }
- }
- port->port.close_delay = cdelay;
- mutex_unlock(&port->port.mutex);
-
- return 0;
-}
-
-static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct fwtty_port *port = tty->driver_data;
- int err;
-
- switch (cmd) {
- case TIOCMIWAIT:
- err = wait_msr_change(port, arg);
- break;
-
- default:
- err = -ENOIOCTLCMD;
- }
-
- return err;
-}
-
-static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int baud;
-
- spin_lock_bh(&port->lock);
- baud = set_termios(port, tty);
-
- if ((baud == 0) && (old->c_cflag & CBAUD)) {
- port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
- } else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
- if (C_CRTSCTS(tty) || !tty_throttled(tty))
- port->mctrl |= TIOCM_DTR | TIOCM_RTS;
- else
- port->mctrl |= TIOCM_DTR;
- }
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (old->c_cflag & CRTSCTS) {
- if (!C_CRTSCTS(tty)) {
- tty->hw_stopped = 0;
- fwtty_restart_tx(port);
- }
- } else if (C_CRTSCTS(tty) && ~port->mstatus & TIOCM_CTS) {
- tty->hw_stopped = 1;
- }
-}
-
-/*
- * fwtty_break_ctl - start/stop sending breaks
- *
- * Signals the remote to start or stop generating simulated breaks.
- * First, stop dequeueing from the fifo and wait for writer/drain to leave tx
- * before signalling the break line status. This guarantees any pending rx will
- * be queued to the line discipline before break is simulated on the remote.
- * Conversely, turning off break_ctl requires signalling the line status change,
- * then enabling tx.
- */
-static int fwtty_break_ctl(struct tty_struct *tty, int state)
-{
- struct fwtty_port *port = tty->driver_data;
- long ret;
-
- fwtty_dbg(port, "%d\n", state);
-
- if (state == -1) {
- set_bit(STOP_TX, &port->flags);
- ret = wait_event_interruptible_timeout(port->wait_tx,
- !test_bit(IN_TX, &port->flags),
- 10);
- if (ret == 0 || ret == -ERESTARTSYS) {
- clear_bit(STOP_TX, &port->flags);
- fwtty_restart_tx(port);
- return -EINTR;
- }
- }
-
- spin_lock_bh(&port->lock);
- port->break_ctl = (state == -1);
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
-
- if (state == 0) {
- spin_lock_bh(&port->lock);
- dma_fifo_reset(&port->tx_fifo);
- clear_bit(STOP_TX, &port->flags);
- spin_unlock_bh(&port->lock);
- }
- return 0;
-}
-
-static int fwtty_tiocmget(struct tty_struct *tty)
-{
- struct fwtty_port *port = tty->driver_data;
- unsigned int tiocm;
-
- spin_lock_bh(&port->lock);
- tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
- spin_unlock_bh(&port->lock);
-
- fwtty_dbg(port, "%x\n", tiocm);
-
- return tiocm;
-}
-
-static int fwtty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct fwtty_port *port = tty->driver_data;
-
- fwtty_dbg(port, "set: %x clear: %x\n", set, clear);
-
- /* TODO: simulate loopback if TIOCM_LOOP set */
-
- spin_lock_bh(&port->lock);
- port->mctrl &= ~(clear & MCTRL_MASK & 0xffff);
- port->mctrl |= set & MCTRL_MASK & 0xffff;
- __fwtty_write_port_status(port);
- spin_unlock_bh(&port->lock);
- return 0;
-}
-
-static int fwtty_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount)
-{
- struct fwtty_port *port = tty->driver_data;
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- icount->cts = port->icount.cts;
- icount->dsr = port->icount.dsr;
- icount->rng = port->icount.rng;
- icount->dcd = port->icount.dcd;
- icount->rx = port->icount.rx;
- icount->tx = port->icount.tx + stats.xchars;
- icount->frame = port->icount.frame;
- icount->overrun = port->icount.overrun;
- icount->parity = port->icount.parity;
- icount->brk = port->icount.brk;
- icount->buf_overrun = port->icount.overrun;
- return 0;
-}
-
-static void fwtty_proc_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " addr:%012llx tx:%d rx:%d", port->rx_handler.offset,
- port->icount.tx + stats.xchars, port->icount.rx);
- seq_printf(m, " cts:%d dsr:%d rng:%d dcd:%d", port->icount.cts,
- port->icount.dsr, port->icount.rng, port->icount.dcd);
- seq_printf(m, " fe:%d oe:%d pe:%d brk:%d", port->icount.frame,
- port->icount.overrun, port->icount.parity, port->icount.brk);
-}
-
-static void fwtty_debugfs_show_port(struct seq_file *m, struct fwtty_port *port)
-{
- struct stats stats;
-
- memcpy(&stats, &port->stats, sizeof(stats));
- if (port->port.console)
- (*port->fwcon_ops->stats)(&stats, port->con_data);
-
- seq_printf(m, " dr:%d st:%d err:%d lost:%d", stats.dropped,
- stats.tx_stall, stats.fifo_errs, stats.lost);
- seq_printf(m, " pkts:%d thr:%d", stats.sent, stats.throttled);
-
- if (port->port.console) {
- seq_puts(m, "\n ");
- (*port->fwcon_ops->proc_show)(m, port->con_data);
- }
-
- fwtty_dump_profile(m, &port->stats);
-}
-
-static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
-{
- int generation = peer->generation;
-
- smp_rmb();
- seq_printf(m, " %s:", dev_name(&peer->unit->device));
- seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
- seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
- peer->max_payload, (unsigned long long)peer->guid);
- seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr);
- seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr);
- seq_putc(m, '\n');
-}
-
-static int fwtty_proc_show(struct seq_file *m, void *v)
-{
- struct fwtty_port *port;
- int i;
-
- seq_puts(m, "fwserinfo: 1.0 driver: 1.0\n");
- for (i = 0; i < MAX_TOTAL_PORTS && (port = fwtty_port_get(i)); ++i) {
- seq_printf(m, "%2d:", i);
- if (capable(CAP_SYS_ADMIN))
- fwtty_proc_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- return 0;
-}
-
-static int fwtty_stats_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_port *port;
- int i;
-
- for (i = 0; i < num_ports; ++i) {
- port = fwtty_port_get(serial->ports[i]->index);
- if (port) {
- seq_printf(m, "%2d:", port->index);
- fwtty_proc_show_port(m, port);
- fwtty_debugfs_show_port(m, port);
- fwtty_port_put(port);
- seq_puts(m, "\n");
- }
- }
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(fwtty_stats);
-
-static int fwtty_peers_show(struct seq_file *m, void *v)
-{
- struct fw_serial *serial = m->private;
- struct fwtty_peer *peer;
-
- rcu_read_lock();
- seq_printf(m, "card: %s guid: %016llx\n",
- dev_name(serial->card->device),
- (unsigned long long)serial->card->guid);
- list_for_each_entry_rcu(peer, &serial->peer_list, list)
- fwtty_debugfs_show_peer(m, peer);
- rcu_read_unlock();
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(fwtty_peers);
-
-static const struct tty_port_operations fwtty_port_ops = {
- .dtr_rts = fwtty_port_dtr_rts,
- .carrier_raised = fwtty_port_carrier_raised,
- .shutdown = fwtty_port_shutdown,
- .activate = fwtty_port_activate,
-};
-
-static const struct tty_operations fwtty_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwtty_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
- .set_serial = set_serial_info,
- .get_serial = get_serial_info,
- .proc_show = fwtty_proc_show,
-};
-
-static const struct tty_operations fwloop_ops = {
- .open = fwtty_open,
- .close = fwtty_close,
- .hangup = fwtty_hangup,
- .cleanup = fwtty_cleanup,
- .install = fwloop_install,
- .write = fwtty_write,
- .write_room = fwtty_write_room,
- .chars_in_buffer = fwtty_chars_in_buffer,
- .send_xchar = fwtty_send_xchar,
- .throttle = fwtty_throttle,
- .unthrottle = fwtty_unthrottle,
- .ioctl = fwtty_ioctl,
- .set_termios = fwtty_set_termios,
- .break_ctl = fwtty_break_ctl,
- .tiocmget = fwtty_tiocmget,
- .tiocmset = fwtty_tiocmset,
- .get_icount = fwtty_get_icount,
- .set_serial = set_serial_info,
- .get_serial = get_serial_info,
-};
-
-static inline int mgmt_pkt_expected_len(__be16 code)
-{
- static const struct fwserial_mgmt_pkt pkt;
-
- switch (be16_to_cpu(code)) {
- case FWSC_VIRT_CABLE_PLUG:
- return sizeof(pkt.hdr) + sizeof(pkt.plug_req);
-
- case FWSC_VIRT_CABLE_PLUG_RSP: /* | FWSC_RSP_OK */
- return sizeof(pkt.hdr) + sizeof(pkt.plug_rsp);
-
- case FWSC_VIRT_CABLE_UNPLUG:
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- case FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK:
- case FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK:
- return sizeof(pkt.hdr);
-
- default:
- return -1;
- }
-}
-
-static inline void fill_plug_params(struct virt_plug_params *params,
- struct fwtty_port *port)
-{
- u64 status_addr = port->rx_handler.offset;
- u64 fifo_addr = port->rx_handler.offset + 4;
- size_t fifo_len = port->rx_handler.length - 4;
-
- params->status_hi = cpu_to_be32(status_addr >> 32);
- params->status_lo = cpu_to_be32(status_addr);
- params->fifo_hi = cpu_to_be32(fifo_addr >> 32);
- params->fifo_lo = cpu_to_be32(fifo_addr);
- params->fifo_len = cpu_to_be32(fifo_len);
-}
-
-static inline void fill_plug_req(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_req, port);
-}
-
-static inline void fill_plug_rsp_ok(struct fwserial_mgmt_pkt *pkt,
- struct fwtty_port *port)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
- fill_plug_params(&pkt->plug_rsp, port);
-}
-
-static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_PLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static inline void fill_unplug_rsp_ok(struct fwserial_mgmt_pkt *pkt)
-{
- pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP);
- pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
-static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
- struct virt_plug_params *params)
-{
- struct fwtty_port *port = peer->port;
-
- peer->status_addr = be32_to_u64(params->status_hi, params->status_lo);
- peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo);
- peer->fifo_len = be32_to_cpu(params->fifo_len);
- peer_set_state(peer, FWPS_ATTACHED);
-
- /* reconfigure tx_fifo optimally for this peer */
- spin_lock_bh(&port->lock);
- port->max_payload = min(peer->max_payload, peer->fifo_len);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
- spin_unlock_bh(&peer->port->lock);
-
- if (port->port.console && port->fwcon_ops->notify)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
-
- fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
- (unsigned long long)peer->guid, dev_name(port->device));
-}
-
-static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt)
-{
- int generation;
- int rcode, tries = 5;
-
- do {
- generation = peer->generation;
- smp_rmb();
-
- rcode = fw_run_transaction(peer->serial->card,
- TCODE_WRITE_BLOCK_REQUEST,
- peer->node_id,
- generation, peer->speed,
- peer->mgmt_addr,
- pkt, be16_to_cpu(pkt->hdr.len));
- if (rcode == RCODE_BUSY || rcode == RCODE_SEND_ERROR ||
- rcode == RCODE_GENERATION) {
- fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
- continue;
- } else {
- break;
- }
- } while (--tries > 0);
- return rcode;
-}
-
-/*
- * fwserial_claim_port - attempt to claim port @ index for peer
- *
- * Returns ptr to claimed port or error code (as ERR_PTR())
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
- int index)
-{
- struct fwtty_port *port;
-
- if (index < 0 || index >= num_ports)
- return ERR_PTR(-EINVAL);
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- port = peer->serial->ports[index];
- spin_lock_bh(&port->lock);
- if (!rcu_access_pointer(port->peer))
- rcu_assign_pointer(port->peer, peer);
- else
- port = ERR_PTR(-EBUSY);
- spin_unlock_bh(&port->lock);
-
- return port;
-}
-
-/*
- * fwserial_find_port - find avail port and claim for peer
- *
- * Returns ptr to claimed port or NULL if none avail
- * Can sleep - must be called from process context
- */
-static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
-{
- struct fwtty_port **ports = peer->serial->ports;
- int i;
-
- /* must guarantee that previous port releases have completed */
- synchronize_rcu();
-
- /* TODO: implement optional GUID-to-specific port # matching */
-
- /* find an unattached port (but not the loopback port, if present) */
- for (i = 0; i < num_ttys; ++i) {
- spin_lock_bh(&ports[i]->lock);
- if (!ports[i]->peer) {
- /* claim port */
- rcu_assign_pointer(ports[i]->peer, peer);
- spin_unlock_bh(&ports[i]->lock);
- return ports[i];
- }
- spin_unlock_bh(&ports[i]->lock);
- }
- return NULL;
-}
-
-static void fwserial_release_port(struct fwtty_port *port, bool reset)
-{
- /* drop carrier (and all other line status) */
- if (reset)
- fwtty_update_port_status(port, 0);
-
- spin_lock_bh(&port->lock);
-
- /* reset dma fifo max transmission size back to S100 */
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
-
- RCU_INIT_POINTER(port->peer, NULL);
- spin_unlock_bh(&port->lock);
-
- if (port->port.console && port->fwcon_ops->notify)
- (*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
-}
-
-static void fwserial_plug_timeout(struct timer_list *t)
-{
- struct fwtty_peer *peer = from_timer(peer, t, timer);
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- if (peer->state != FWPS_PLUG_PENDING) {
- spin_unlock_bh(&peer->lock);
- return;
- }
-
- port = peer_revert_state(peer);
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, false);
-}
-
-/*
- * fwserial_connect_peer - initiate virtual cable with peer
- *
- * Returns 0 if VIRT_CABLE_PLUG request was successfully sent,
- * otherwise error code. Must be called from process context.
- */
-static int fwserial_connect_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int err, rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- port = fwserial_find_port(peer);
- if (!port) {
- fwtty_err(&peer->unit, "avail ports in use\n");
- err = -EBUSY;
- goto free_pkt;
- }
-
- spin_lock_bh(&peer->lock);
-
- /* only initiate VIRT_CABLE_PLUG if peer is currently not attached */
- if (peer->state != FWPS_NOT_ATTACHED) {
- err = -EBUSY;
- goto release_port;
- }
-
- peer->port = port;
- peer_set_state(peer, FWPS_PLUG_PENDING);
-
- fill_plug_req(pkt, peer->port);
-
- mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) {
- if (rcode == RCODE_CONFLICT_ERROR)
- err = -EAGAIN;
- else
- err = -EIO;
- goto cancel_timer;
- }
- spin_unlock_bh(&peer->lock);
-
- kfree(pkt);
- return 0;
-
-cancel_timer:
- del_timer(&peer->timer);
- peer_revert_state(peer);
-release_port:
- spin_unlock_bh(&peer->lock);
- fwserial_release_port(port, false);
-free_pkt:
- kfree(pkt);
- return err;
-}
-
-/*
- * fwserial_close_port -
- * HUP the tty (if the tty exists) and unregister the tty device.
- * Only used by the unit driver upon unit removal to disconnect and
- * cleanup all attached ports
- *
- * The port reference is put by fwtty_cleanup (if a reference was
- * ever taken).
- */
-static void fwserial_close_port(struct tty_driver *driver,
- struct fwtty_port *port)
-{
- struct tty_struct *tty;
-
- mutex_lock(&port->port.mutex);
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_vhangup(tty);
- tty_kref_put(tty);
- }
- mutex_unlock(&port->port.mutex);
-
- if (driver == fwloop_driver)
- tty_unregister_device(driver, loop_idx(port));
- else
- tty_unregister_device(driver, port->index);
-}
-
-/**
- * fwserial_lookup - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be holding fwserial_list_mutex
- */
-static struct fw_serial *fwserial_lookup(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/**
- * __fwserial_lookup_rcu - finds first fw_serial associated with card
- * @card: fw_card to match
- *
- * NB: caller must be inside rcu_read_lock() section
- */
-static struct fw_serial *__fwserial_lookup_rcu(struct fw_card *card)
-{
- struct fw_serial *serial;
-
- list_for_each_entry_rcu(serial, &fwserial_list, list) {
- if (card == serial->card)
- return serial;
- }
-
- return NULL;
-}
-
-/*
- * __fwserial_peer_by_node_id - finds a peer matching the given generation + id
- *
- * If a matching peer could not be found for the specified generation/node id,
- * this could be because:
- * a) the generation has changed and one of the nodes hasn't updated yet
- * b) the remote node has created its remote unit device before this
- * local node has created its corresponding remote unit device
- * In either case, the remote node should retry
- *
- * Note: caller must be in rcu_read_lock() section
- */
-static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
- int generation, int id)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial) {
- /*
- * Something is very wrong - there should be a matching
- * fw_serial structure for every fw_card. Maybe the remote node
- * has created its remote unit device before this driver has
- * been probed for any unit devices...
- */
- fwtty_err(card, "unknown card (guid %016llx)\n",
- (unsigned long long)card->guid);
- return NULL;
- }
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- if (generation == g && id == peer->node_id)
- return peer;
- }
-
- return NULL;
-}
-
-#ifdef DEBUG
-static void __dump_peer_list(struct fw_card *card)
-{
- struct fw_serial *serial;
- struct fwtty_peer *peer;
-
- serial = __fwserial_lookup_rcu(card);
- if (!serial)
- return;
-
- list_for_each_entry_rcu(peer, &serial->peer_list, list) {
- int g = peer->generation;
-
- smp_rmb();
- fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
- g, peer->node_id, (unsigned long long)peer->guid);
- }
-}
-#else
-#define __dump_peer_list(s)
-#endif
-
-static void fwserial_auto_connect(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect);
- int err;
-
- err = fwserial_connect_peer(peer);
- if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES)
- schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
-}
-
-static void fwserial_peer_workfn(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
-
- peer->workfn(work);
-}
-
-/**
- * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
- * @serial: aggregate representing the specific fw_card to add the peer to
- * @unit: 'peer' to create and add to peer_list of serial
- *
- * Adds a 'peer' (ie, a local or remote 'serial' unit device) to the list of
- * peers for a specific fw_card. Optionally, auto-attach this peer to an
- * available tty port. This function is called either directly or indirectly
- * as a result of a 'serial' unit device being created & probed.
- *
- * Note: this function is serialized with fwserial_remove_peer() by the
- * fwserial_list_mutex held in fwserial_probe().
- *
- * A 1:1 correspondence between an fw_unit and an fwtty_peer is maintained
- * via the dev_set_drvdata() for the device of the fw_unit.
- */
-static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
-{
- struct device *dev = &unit->device;
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer;
- struct fw_csr_iterator ci;
- int key, val;
- int generation;
-
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
- if (!peer)
- return -ENOMEM;
-
- peer_set_state(peer, FWPS_NOT_ATTACHED);
-
- dev_set_drvdata(dev, peer);
- peer->unit = unit;
- peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4];
- peer->speed = parent->max_speed;
- peer->max_payload = min(device_max_receive(parent),
- link_speed_to_max_payload(peer->speed));
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-
- /* retrieve the mgmt bus addr from the unit directory */
- fw_csr_iterator_init(&ci, unit->directory);
- while (fw_csr_iterator_next(&ci, &key, &val)) {
- if (key == (CSR_OFFSET | CSR_DEPENDENT_INFO)) {
- peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val;
- break;
- }
- }
- if (peer->mgmt_addr == 0ULL) {
- /*
- * No mgmt address effectively disables VIRT_CABLE_PLUG -
- * this peer will not be able to attach to a remote
- */
- peer_set_state(peer, FWPS_NO_MGMT_ADDR);
- }
-
- spin_lock_init(&peer->lock);
- peer->port = NULL;
-
- timer_setup(&peer->timer, fwserial_plug_timeout, 0);
- INIT_WORK(&peer->work, fwserial_peer_workfn);
- INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
-
- /* associate peer with specific fw_card */
- peer->serial = serial;
- list_add_rcu(&peer->list, &serial->peer_list);
-
- fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- /* identify the local unit & virt cable to loopback port */
- if (parent->is_local) {
- serial->self = peer;
- if (create_loop_dev) {
- struct fwtty_port *port;
-
- port = fwserial_claim_port(peer, num_ttys);
- if (!IS_ERR(port)) {
- struct virt_plug_params params;
-
- spin_lock_bh(&peer->lock);
- peer->port = port;
- fill_plug_params(&params, port);
- fwserial_virt_plug_complete(peer, &params);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(port);
- }
- }
-
- } else if (auto_connect) {
- /* auto-attach to remote units only (if policy allows) */
- schedule_delayed_work(&peer->connect, 1);
- }
-
- return 0;
-}
-
-/*
- * fwserial_remove_peer - remove a 'serial' unit device as a 'peer'
- *
- * Remove a 'peer' from its list of peers. This function is only
- * called by fwserial_remove() on bus removal of the unit device.
- *
- * Note: this function is serialized with fwserial_add_peer() by the
- * fwserial_list_mutex held in fwserial_remove().
- */
-static void fwserial_remove_peer(struct fwtty_peer *peer)
-{
- struct fwtty_port *port;
-
- spin_lock_bh(&peer->lock);
- peer_set_state(peer, FWPS_GONE);
- spin_unlock_bh(&peer->lock);
-
- cancel_delayed_work_sync(&peer->connect);
- cancel_work_sync(&peer->work);
-
- spin_lock_bh(&peer->lock);
- /* if this unit is the local unit, clear link */
- if (peer == peer->serial->self)
- peer->serial->self = NULL;
-
- /* cancel the request timeout timer (if running) */
- del_timer(&peer->timer);
-
- port = peer->port;
- peer->port = NULL;
-
- list_del_rcu(&peer->list);
-
- fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
- (unsigned long long)peer->guid);
-
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, true);
-
- synchronize_rcu();
- kfree(peer);
-}
-
-/**
- * fwserial_create - init everything to create TTYs for a specific fw_card
- * @unit: fw_unit for first 'serial' unit device probed for this fw_card
- *
- * This function inits the aggregate structure (an fw_serial instance)
- * used to manage the TTY ports registered by a specific fw_card. Also, the
- * unit device is added as the first 'peer'.
- *
- * This unit device may represent a local unit device (as specified by the
- * config ROM unit directory) or it may represent a remote unit device
- * (as specified by the reading of the remote node's config ROM).
- *
- * Returns 0 to indicate "ownership" of the unit device, or a negative errno
- * value to indicate which error.
- */
-static int fwserial_create(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fw_card *card = parent->card;
- struct fw_serial *serial;
- struct fwtty_port *port;
- struct device *tty_dev;
- int i, j;
- int err;
-
- serial = kzalloc(sizeof(*serial), GFP_KERNEL);
- if (!serial)
- return -ENOMEM;
-
- kref_init(&serial->kref);
- serial->card = card;
- INIT_LIST_HEAD(&serial->peer_list);
-
- for (i = 0; i < num_ports; ++i) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port) {
- err = -ENOMEM;
- goto free_ports;
- }
- tty_port_init(&port->port);
- port->index = FWTTY_INVALID_INDEX;
- port->port.ops = &fwtty_port_ops;
- port->serial = serial;
- tty_buffer_set_limit(&port->port, 128 * 1024);
-
- spin_lock_init(&port->lock);
- INIT_DELAYED_WORK(&port->drain, fwtty_drain_tx);
- INIT_DELAYED_WORK(&port->emit_breaks, fwtty_emit_breaks);
- INIT_WORK(&port->hangup, fwtty_do_hangup);
- init_waitqueue_head(&port->wait_tx);
- port->max_payload = link_speed_to_max_payload(SCODE_100);
- dma_fifo_init(&port->tx_fifo);
-
- RCU_INIT_POINTER(port->peer, NULL);
- serial->ports[i] = port;
-
- /* get unique bus addr region for port's status & recv fifo */
- port->rx_handler.length = FWTTY_PORT_RXFIFO_LEN + 4;
- port->rx_handler.address_callback = fwtty_port_handler;
- port->rx_handler.callback_data = port;
- /*
- * XXX: use custom memory region above cpu physical memory addrs
- * this will ease porting to 64-bit firewire adapters
- */
- err = fw_core_add_address_handler(&port->rx_handler,
- &fw_high_memory_region);
- if (err) {
- tty_port_destroy(&port->port);
- kfree(port);
- goto free_ports;
- }
- }
- /* preserve i for error cleanup */
-
- err = fwtty_ports_add(serial);
- if (err) {
- fwtty_err(&unit, "no space in port table\n");
- goto free_ports;
- }
-
- for (j = 0; j < num_ttys; ++j) {
- tty_dev = tty_port_register_device(&serial->ports[j]->port,
- fwtty_driver,
- serial->ports[j]->index,
- card->device);
- if (IS_ERR(tty_dev)) {
- err = PTR_ERR(tty_dev);
- fwtty_err(&unit, "register tty device error (%d)\n",
- err);
- goto unregister_ttys;
- }
-
- serial->ports[j]->device = tty_dev;
- }
- /* preserve j for error cleanup */
-
- if (create_loop_dev) {
- struct device *loop_dev;
-
- loop_dev = tty_port_register_device(&serial->ports[j]->port,
- fwloop_driver,
- loop_idx(serial->ports[j]),
- card->device);
- if (IS_ERR(loop_dev)) {
- err = PTR_ERR(loop_dev);
- fwtty_err(&unit, "create loop device failed (%d)\n",
- err);
- goto unregister_ttys;
- }
- serial->ports[j]->device = loop_dev;
- serial->ports[j]->loopback = true;
- }
-
- if (!IS_ERR_OR_NULL(fwserial_debugfs)) {
- serial->debugfs = debugfs_create_dir(dev_name(&unit->device),
- fwserial_debugfs);
- if (!IS_ERR_OR_NULL(serial->debugfs)) {
- debugfs_create_file("peers", 0444, serial->debugfs,
- serial, &fwtty_peers_fops);
- debugfs_create_file("stats", 0444, serial->debugfs,
- serial, &fwtty_stats_fops);
- }
- }
-
- list_add_rcu(&serial->list, &fwserial_list);
-
- fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
- dev_name(card->device), (unsigned long long)card->guid);
-
- err = fwserial_add_peer(serial, unit);
- if (!err)
- return 0;
-
- fwtty_err(&unit, "unable to add peer unit device (%d)\n", err);
-
- /* fall-through to error processing */
- debugfs_remove_recursive(serial->debugfs);
-
- list_del_rcu(&serial->list);
- if (create_loop_dev)
- tty_unregister_device(fwloop_driver,
- loop_idx(serial->ports[j]));
-unregister_ttys:
- for (--j; j >= 0; --j)
- tty_unregister_device(fwtty_driver, serial->ports[j]->index);
- kref_put(&serial->kref, fwserial_destroy);
- return err;
-
-free_ports:
- for (--i; i >= 0; --i) {
- fw_core_remove_address_handler(&serial->ports[i]->rx_handler);
- tty_port_destroy(&serial->ports[i]->port);
- kfree(serial->ports[i]);
- }
- kfree(serial);
- return err;
-}
-
-/*
- * fwserial_probe: bus probe function for firewire 'serial' unit devices
- *
- * A 'serial' unit device is created and probed as a result of:
- * - declaring a ieee1394 bus id table for 'devices' matching a fabricated
- * 'serial' unit specifier id
- * - adding a unit directory to the config ROM(s) for a 'serial' unit
- *
- * The firewire core registers unit devices by enumerating unit directories
- * of a node's config ROM after reading the config ROM when a new node is
- * added to the bus topology after a bus reset.
- *
- * The practical implications of this are:
- * - this probe is called for both local and remote nodes that have a 'serial'
- * unit directory in their config ROM (that matches the specifiers in
- * fwserial_id_table).
- * - no specific order is enforced for local vs. remote unit devices
- *
- * This unit driver copes with the lack of specific order in the same way the
- * firewire net driver does -- each probe, for either a local or remote unit
- * device, is treated as a 'peer' (has a struct fwtty_peer instance) and the
- * first peer created for a given fw_card (tracked by the global fwserial_list)
- * creates the underlying TTYs (aggregated in a fw_serial instance).
- *
- * NB: an early attempt to differentiate local & remote unit devices by creating
- * peers only for remote units and fw_serial instances (with their
- * associated TTY devices) only for local units was discarded. Managing
- * the peer lifetimes on device removal proved too complicated.
- *
- * fwserial_probe/fwserial_remove are effectively serialized by the
- * fwserial_list_mutex. This is necessary because the addition of the first peer
- * for a given fw_card will trigger the creation of the fw_serial for that
- * fw_card, which must not simultaneously contend with the removal of the
- * last peer for a given fw_card triggering the destruction of the same
- * fw_serial for the same fw_card.
- */
-static int fwserial_probe(struct fw_unit *unit,
- const struct ieee1394_device_id *id)
-{
- struct fw_serial *serial;
- int err;
-
- mutex_lock(&fwserial_list_mutex);
- serial = fwserial_lookup(fw_parent_device(unit)->card);
- if (!serial)
- err = fwserial_create(unit);
- else
- err = fwserial_add_peer(serial, unit);
- mutex_unlock(&fwserial_list_mutex);
- return err;
-}
-
-/*
- * fwserial_remove: bus removal function for firewire 'serial' unit devices
- *
- * The corresponding 'peer' for this unit device is removed from the list of
- * peers for the associated fw_serial (which has a 1:1 correspondence with a
- * specific fw_card). If this is the last peer being removed, then trigger
- * the destruction of the underlying TTYs.
- */
-static void fwserial_remove(struct fw_unit *unit)
-{
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- struct fw_serial *serial = peer->serial;
- int i;
-
- mutex_lock(&fwserial_list_mutex);
- fwserial_remove_peer(peer);
-
- if (list_empty(&serial->peer_list)) {
- /* unlink from the fwserial_list here */
- list_del_rcu(&serial->list);
-
- debugfs_remove_recursive(serial->debugfs);
-
- for (i = 0; i < num_ttys; ++i)
- fwserial_close_port(fwtty_driver, serial->ports[i]);
- if (create_loop_dev)
- fwserial_close_port(fwloop_driver, serial->ports[i]);
- kref_put(&serial->kref, fwserial_destroy);
- }
- mutex_unlock(&fwserial_list_mutex);
-}
-
-/*
- * fwserial_update: bus update function for 'firewire' serial unit devices
- *
- * Updates the new node_id and bus generation for this peer. Note that locking
- * is unnecessary; but careful memory barrier usage is important to enforce the
- * load and store order of generation & node_id.
- *
- * The fw-core orders the write of node_id before generation in the parent
- * fw_device to ensure that a stale node_id cannot be used with a current
- * bus generation. So the generation value must be read before the node_id.
- *
- * In turn, this orders the write of node_id before generation in the peer to
- * also ensure a stale node_id cannot be used with a current bus generation.
- */
-static void fwserial_update(struct fw_unit *unit)
-{
- struct fw_device *parent = fw_parent_device(unit);
- struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
- int generation;
-
- generation = parent->generation;
- smp_rmb();
- peer->node_id = parent->node_id;
- smp_wmb();
- peer->generation = generation;
-}
-
-static const struct ieee1394_device_id fwserial_id_table[] = {
- {
- .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .specifier_id = LINUX_VENDOR_ID,
- .version = FWSERIAL_VERSION,
- },
- { }
-};
-
-static struct fw_driver fwserial_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = KBUILD_MODNAME,
- .bus = &fw_bus_type,
- },
- .probe = fwserial_probe,
- .update = fwserial_update,
- .remove = fwserial_remove,
- .id_table = fwserial_id_table,
-};
-
-#define FW_UNIT_SPECIFIER(id) ((CSR_SPECIFIER_ID << 24) | (id))
-#define FW_UNIT_VERSION(ver) ((CSR_VERSION << 24) | (ver))
-#define FW_UNIT_ADDRESS(ofs) (((CSR_OFFSET | CSR_DEPENDENT_INFO) << 24) \
- | (((ofs) - CSR_REGISTER_BASE) >> 2))
-/* XXX: config ROM definitons could be improved with semi-automated offset
- * and length calculation
- */
-#define FW_ROM_LEN(quads) ((quads) << 16)
-#define FW_ROM_DESCRIPTOR(ofs) (((CSR_LEAF | CSR_DESCRIPTOR) << 24) | (ofs))
-
-struct fwserial_unit_directory_data {
- u32 len_crc;
- u32 unit_specifier;
- u32 unit_sw_version;
- u32 unit_addr_offset;
- u32 desc1_ofs;
- u32 desc1_len_crc;
- u32 desc1_data[5];
-} __packed;
-
-static struct fwserial_unit_directory_data fwserial_unit_directory_data = {
- .len_crc = FW_ROM_LEN(4),
- .unit_specifier = FW_UNIT_SPECIFIER(LINUX_VENDOR_ID),
- .unit_sw_version = FW_UNIT_VERSION(FWSERIAL_VERSION),
- .desc1_ofs = FW_ROM_DESCRIPTOR(1),
- .desc1_len_crc = FW_ROM_LEN(5),
- .desc1_data = {
- 0x00000000, /* type = text */
- 0x00000000, /* enc = ASCII, lang EN */
- 0x4c696e75, /* 'Linux TTY' */
- 0x78205454,
- 0x59000000,
- },
-};
-
-static struct fw_descriptor fwserial_unit_directory = {
- .length = sizeof(fwserial_unit_directory_data) / sizeof(u32),
- .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
- .data = (u32 *)&fwserial_unit_directory_data,
-};
-
-/*
- * The management address is in the unit space region but above other known
- * address users (to keep wild writes from causing havoc)
- */
-static const struct fw_address_region fwserial_mgmt_addr_region = {
- .start = CSR_REGISTER_BASE + 0x1e0000ULL,
- .end = 0x1000000000000ULL,
-};
-
-static struct fw_address_handler fwserial_mgmt_addr_handler;
-
-/**
- * fwserial_handle_plug_req - handle VIRT_CABLE_PLUG request work
- * @work: ptr to peer->work
- *
- * Attempts to complete the VIRT_CABLE_PLUG handshake sequence for this peer.
- *
- * This checks for a collided request-- ie, that a VIRT_CABLE_PLUG request was
- * already sent to this peer. If so, the collision is resolved by comparing
- * guid values; the loser sends the plug response.
- *
- * Note: if an error prevents a response, don't do anything -- the
- * remote will timeout its request.
- */
-static void fwserial_handle_plug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct virt_plug_params *plug_req = &peer->work_params.plug_req;
- struct fwtty_port *port;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- port = fwserial_find_port(peer);
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_NOT_ATTACHED:
- if (!port) {
- fwtty_err(&peer->unit, "no more ports avail\n");
- fill_plug_rsp_nack(pkt);
- } else {
- peer->port = port;
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- /* don't release claimed port */
- port = NULL;
- }
- break;
-
- case FWPS_PLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - hijack the already-claimed port and send ok */
- del_timer(&peer->timer);
- fill_plug_rsp_ok(pkt, peer->port);
- peer_set_state(peer, FWPS_PLUG_RESPONDING);
- break;
-
- default:
- fill_plug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_PLUG_RESPONDING) {
- if (rcode == RCODE_COMPLETE) {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, plug_req);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- } else {
- fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
- port = peer_revert_state(peer);
- }
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, false);
- kfree(pkt);
-}
-
-static void fwserial_handle_unplug_req(struct work_struct *work)
-{
- struct fwtty_peer *peer = to_peer(work, work);
- struct fwtty_port *port = NULL;
- struct fwserial_mgmt_pkt *pkt;
- int rcode;
-
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return;
-
- spin_lock_bh(&peer->lock);
-
- switch (peer->state) {
- case FWPS_ATTACHED:
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- case FWPS_UNPLUG_PENDING:
- if (peer->serial->card->guid > peer->guid)
- goto cleanup;
-
- /* We lost - send unplug rsp */
- del_timer(&peer->timer);
- fill_unplug_rsp_ok(pkt);
- peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
- break;
-
- default:
- fill_unplug_rsp_nack(pkt);
- }
-
- spin_unlock_bh(&peer->lock);
-
- rcode = fwserial_send_mgmt_sync(peer, pkt);
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_UNPLUG_RESPONDING) {
- if (rcode != RCODE_COMPLETE)
- fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
- rcode);
- port = peer_revert_state(peer);
- }
-cleanup:
- spin_unlock_bh(&peer->lock);
- if (port)
- fwserial_release_port(port, true);
- kfree(pkt);
-}
-
-static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
- struct fwserial_mgmt_pkt *pkt,
- unsigned long long addr,
- size_t len)
-{
- struct fwtty_port *port = NULL;
- bool reset = false;
- int rcode;
-
- if (addr != fwserial_mgmt_addr_handler.offset || len < sizeof(pkt->hdr))
- return RCODE_ADDRESS_ERROR;
-
- if (len != be16_to_cpu(pkt->hdr.len) ||
- len != mgmt_pkt_expected_len(pkt->hdr.code))
- return RCODE_DATA_ERROR;
-
- spin_lock_bh(&peer->lock);
- if (peer->state == FWPS_GONE) {
- /*
- * This should never happen - it would mean that the
- * remote unit that just wrote this transaction was
- * already removed from the bus -- and the removal was
- * processed before we rec'd this transaction
- */
- fwtty_err(&peer->unit, "peer already removed\n");
- spin_unlock_bh(&peer->lock);
- return RCODE_ADDRESS_ERROR;
- }
-
- rcode = RCODE_COMPLETE;
-
- fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04x\n", pkt->hdr.code);
-
- switch (be16_to_cpu(pkt->hdr.code) & FWSC_CODE_MASK) {
- case FWSC_VIRT_CABLE_PLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "plug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- peer->work_params.plug_req = pkt->plug_req;
- peer->workfn = fwserial_handle_plug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_PLUG_RSP:
- if (peer->state != FWPS_PLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
-
- } else if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK) {
- fwtty_notice(&peer->unit, "NACK plug rsp\n");
- port = peer_revert_state(peer);
-
- } else {
- struct fwtty_port *tmp = peer->port;
-
- fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
- spin_unlock_bh(&peer->lock);
-
- fwtty_write_port_status(tmp);
- spin_lock_bh(&peer->lock);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG:
- if (work_pending(&peer->work)) {
- fwtty_err(&peer->unit, "unplug req: busy\n");
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- peer->workfn = fwserial_handle_unplug_req;
- queue_work(system_unbound_wq, &peer->work);
- }
- break;
-
- case FWSC_VIRT_CABLE_UNPLUG_RSP:
- if (peer->state != FWPS_UNPLUG_PENDING) {
- rcode = RCODE_CONFLICT_ERROR;
- } else {
- if (be16_to_cpu(pkt->hdr.code) & FWSC_RSP_NACK)
- fwtty_notice(&peer->unit, "NACK unplug?\n");
- port = peer_revert_state(peer);
- reset = true;
- }
- break;
-
- default:
- fwtty_err(&peer->unit, "unknown mgmt code %d\n",
- be16_to_cpu(pkt->hdr.code));
- rcode = RCODE_DATA_ERROR;
- }
- spin_unlock_bh(&peer->lock);
-
- if (port)
- fwserial_release_port(port, reset);
-
- return rcode;
-}
-
-/*
- * fwserial_mgmt_handler: bus address handler for mgmt requests
- *
- * This handler is responsible for handling virtual cable requests from remotes
- * for all cards.
- */
-static void fwserial_mgmt_handler(struct fw_card *card,
- struct fw_request *request,
- int tcode, int destination, int source,
- int generation,
- unsigned long long addr,
- void *data, size_t len,
- void *callback_data)
-{
- struct fwserial_mgmt_pkt *pkt = data;
- struct fwtty_peer *peer;
- int rcode;
-
- rcu_read_lock();
- peer = __fwserial_peer_by_node_id(card, generation, source);
- if (!peer) {
- fwtty_dbg(card, "peer(%d:%x) not found\n", generation, source);
- __dump_peer_list(card);
- rcode = RCODE_CONFLICT_ERROR;
-
- } else {
- switch (tcode) {
- case TCODE_WRITE_BLOCK_REQUEST:
- rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
- break;
-
- default:
- rcode = RCODE_TYPE_ERROR;
- }
- }
-
- rcu_read_unlock();
- fw_send_response(card, request, rcode);
-}
-
-static int __init fwserial_init(void)
-{
- int err, num_loops = !!(create_loop_dev);
-
- /* XXX: placeholder for a "firewire" debugfs node */
- fwserial_debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
-
- /* num_ttys/num_ports must not be set above the static alloc avail */
- if (num_ttys + num_loops > MAX_CARD_PORTS)
- num_ttys = MAX_CARD_PORTS - num_loops;
-
- num_ports = num_ttys + num_loops;
-
- fwtty_driver = tty_alloc_driver(MAX_TOTAL_PORTS, TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwtty_driver)) {
- err = PTR_ERR(fwtty_driver);
- goto remove_debugfs;
- }
-
- fwtty_driver->driver_name = KBUILD_MODNAME;
- fwtty_driver->name = tty_dev_name;
- fwtty_driver->major = 0;
- fwtty_driver->minor_start = 0;
- fwtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwtty_driver->subtype = SERIAL_TYPE_NORMAL;
- fwtty_driver->init_termios = tty_std_termios;
- fwtty_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwtty_driver, &fwtty_ops);
-
- err = tty_register_driver(fwtty_driver);
- if (err) {
- pr_err("register tty driver failed (%d)\n", err);
- goto put_tty;
- }
-
- if (create_loop_dev) {
- fwloop_driver = tty_alloc_driver(MAX_TOTAL_PORTS / num_ports,
- TTY_DRIVER_REAL_RAW
- | TTY_DRIVER_DYNAMIC_DEV);
- if (IS_ERR(fwloop_driver)) {
- err = PTR_ERR(fwloop_driver);
- goto unregister_driver;
- }
-
- fwloop_driver->driver_name = KBUILD_MODNAME "_loop";
- fwloop_driver->name = loop_dev_name;
- fwloop_driver->major = 0;
- fwloop_driver->minor_start = 0;
- fwloop_driver->type = TTY_DRIVER_TYPE_SERIAL;
- fwloop_driver->subtype = SERIAL_TYPE_NORMAL;
- fwloop_driver->init_termios = tty_std_termios;
- fwloop_driver->init_termios.c_cflag |= CLOCAL;
- tty_set_operations(fwloop_driver, &fwloop_ops);
-
- err = tty_register_driver(fwloop_driver);
- if (err) {
- pr_err("register loop driver failed (%d)\n", err);
- goto put_loop;
- }
- }
-
- fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
- sizeof(struct fwtty_transaction),
- 0, 0, NULL);
- if (!fwtty_txn_cache) {
- err = -ENOMEM;
- goto unregister_loop;
- }
-
- /*
- * Ideally, this address handler would be registered per local node
- * (rather than the same handler for all local nodes). However,
- * since the firewire core requires the config rom descriptor *before*
- * the local unit device(s) are created, a single management handler
- * must suffice for all local serial units.
- */
- fwserial_mgmt_addr_handler.length = sizeof(struct fwserial_mgmt_pkt);
- fwserial_mgmt_addr_handler.address_callback = fwserial_mgmt_handler;
-
- err = fw_core_add_address_handler(&fwserial_mgmt_addr_handler,
- &fwserial_mgmt_addr_region);
- if (err) {
- pr_err("add management handler failed (%d)\n", err);
- goto destroy_cache;
- }
-
- fwserial_unit_directory_data.unit_addr_offset =
- FW_UNIT_ADDRESS(fwserial_mgmt_addr_handler.offset);
- err = fw_core_add_descriptor(&fwserial_unit_directory);
- if (err) {
- pr_err("add unit descriptor failed (%d)\n", err);
- goto remove_handler;
- }
-
- err = driver_register(&fwserial_driver.driver);
- if (err) {
- pr_err("register fwserial driver failed (%d)\n", err);
- goto remove_descriptor;
- }
-
- return 0;
-
-remove_descriptor:
- fw_core_remove_descriptor(&fwserial_unit_directory);
-remove_handler:
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
-destroy_cache:
- kmem_cache_destroy(fwtty_txn_cache);
-unregister_loop:
- if (create_loop_dev)
- tty_unregister_driver(fwloop_driver);
-put_loop:
- if (create_loop_dev)
- tty_driver_kref_put(fwloop_driver);
-unregister_driver:
- tty_unregister_driver(fwtty_driver);
-put_tty:
- tty_driver_kref_put(fwtty_driver);
-remove_debugfs:
- debugfs_remove_recursive(fwserial_debugfs);
-
- return err;
-}
-
-static void __exit fwserial_exit(void)
-{
- driver_unregister(&fwserial_driver.driver);
- fw_core_remove_descriptor(&fwserial_unit_directory);
- fw_core_remove_address_handler(&fwserial_mgmt_addr_handler);
- kmem_cache_destroy(fwtty_txn_cache);
- if (create_loop_dev) {
- tty_unregister_driver(fwloop_driver);
- tty_driver_kref_put(fwloop_driver);
- }
- tty_unregister_driver(fwtty_driver);
- tty_driver_kref_put(fwtty_driver);
- debugfs_remove_recursive(fwserial_debugfs);
-}
-
-module_init(fwserial_init);
-module_exit(fwserial_exit);
-
-MODULE_AUTHOR("Peter Hurley (peter@hurleysoftware.com)");
-MODULE_DESCRIPTION("FireWire Serial TTY Driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(ieee1394, fwserial_id_table);
-MODULE_PARM_DESC(ttys, "Number of ttys to create for each local firewire node");
-MODULE_PARM_DESC(auto, "Auto-connect a tty to each firewire node discovered");
-MODULE_PARM_DESC(loop, "Create a loopback device, fwloop<n>, with ttys");
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
deleted file mode 100644
index 1d15f183e0fa..000000000000
--- a/drivers/staging/fwserial/fwserial.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FIREWIRE_FWSERIAL_H
-#define _FIREWIRE_FWSERIAL_H
-
-#include <linux/kernel.h>
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/list.h>
-#include <linux/firewire.h>
-#include <linux/firewire-constants.h>
-#include <linux/spinlock.h>
-#include <linux/rcupdate.h>
-#include <linux/mutex.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-
-#include "dma_fifo.h"
-
-#ifdef FWTTY_PROFILING
-#define DISTRIBUTION_MAX_SIZE 8192
-#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
-{
- int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
- ++stat[n];
-}
-#else
-#define DISTRIBUTION_MAX_INDEX 0
-#define fwtty_profile_data(st, n)
-#endif
-
-/* Parameters for both VIRT_CABLE_PLUG & VIRT_CABLE_PLUG_RSP mgmt codes */
-struct virt_plug_params {
- __be32 status_hi;
- __be32 status_lo;
- __be32 fifo_hi;
- __be32 fifo_lo;
- __be32 fifo_len;
-};
-
-struct peer_work_params {
- union {
- struct virt_plug_params plug_req;
- };
-};
-
-/**
- * fwtty_peer: structure representing local & remote unit devices
- * @unit: unit child device of fw_device node
- * @serial: back pointer to associated fw_serial aggregate
- * @guid: unique 64-bit guid for this unit device
- * @generation: most recent bus generation
- * @node_id: most recent node_id
- * @speed: link speed of peer (0 = S100, 2 = S400, ... 5 = S3200)
- * @mgmt_addr: bus addr region to write mgmt packets to
- * @status_addr: bus addr register to write line status to
- * @fifo_addr: bus addr region to write serial output to
- * @fifo_len: max length for single write to fifo_addr
- * @list: link for insertion into fw_serial's peer_list
- * @rcu: for deferring peer reclamation
- * @lock: spinlock to synchonize changes to state & port fields
- * @work: only one work item can be queued at any one time
- * Note: pending work is canceled prior to removal, so this
- * peer is valid for at least the lifetime of the work function
- * @work_params: parameter block for work functions
- * @timer: timer for resetting peer state if remote request times out
- * @state: current state
- * @connect: work item for auto-connecting
- * @connect_retries: # of connections already attempted
- * @port: associated tty_port (usable if state == FWSC_ATTACHED)
- */
-struct fwtty_peer {
- struct fw_unit *unit;
- struct fw_serial *serial;
- u64 guid;
- int generation;
- int node_id;
- unsigned int speed;
- int max_payload;
- u64 mgmt_addr;
-
- /* these are usable only if state == FWSC_ATTACHED */
- u64 status_addr;
- u64 fifo_addr;
- int fifo_len;
-
- struct list_head list;
- struct rcu_head rcu;
-
- spinlock_t lock;
- work_func_t workfn;
- struct work_struct work;
- struct peer_work_params work_params;
- struct timer_list timer;
- int state;
- struct delayed_work connect;
- int connect_retries;
-
- struct fwtty_port *port;
-};
-
-#define to_peer(ptr, field) (container_of(ptr, struct fwtty_peer, field))
-
-/* state values for fwtty_peer.state field */
-enum fwtty_peer_state {
- FWPS_GONE,
- FWPS_NOT_ATTACHED,
- FWPS_ATTACHED,
- FWPS_PLUG_PENDING,
- FWPS_PLUG_RESPONDING,
- FWPS_UNPLUG_PENDING,
- FWPS_UNPLUG_RESPONDING,
-
- FWPS_NO_MGMT_ADDR = -1,
-};
-
-#define CONNECT_RETRY_DELAY HZ
-#define MAX_CONNECT_RETRIES 10
-
-/* must be holding peer lock for these state funclets */
-static inline void peer_set_state(struct fwtty_peer *peer, int new)
-{
- peer->state = new;
-}
-
-static inline struct fwtty_port *peer_revert_state(struct fwtty_peer *peer)
-{
- struct fwtty_port *port = peer->port;
-
- peer->port = NULL;
- peer_set_state(peer, FWPS_NOT_ATTACHED);
- return port;
-}
-
-struct fwserial_mgmt_pkt {
- struct {
- __be16 len;
- __be16 code;
- } hdr;
- union {
- struct virt_plug_params plug_req;
- struct virt_plug_params plug_rsp;
- };
-} __packed;
-
-/* fwserial_mgmt_packet codes */
-#define FWSC_RSP_OK 0x0000
-#define FWSC_RSP_NACK 0x8000
-#define FWSC_CODE_MASK 0x0fff
-
-#define FWSC_VIRT_CABLE_PLUG 1
-#define FWSC_VIRT_CABLE_UNPLUG 2
-#define FWSC_VIRT_CABLE_PLUG_RSP 3
-#define FWSC_VIRT_CABLE_UNPLUG_RSP 4
-
-/* 1 min. plug timeout -- suitable for userland authorization */
-#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
-
-struct stats {
- unsigned int xchars;
- unsigned int dropped;
- unsigned int tx_stall;
- unsigned int fifo_errs;
- unsigned int sent;
- unsigned int lost;
- unsigned int throttled;
- unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
-};
-
-struct fwconsole_ops {
- void (*notify)(int code, void *data);
- void (*stats)(struct stats *stats, void *data);
- void (*proc_show)(struct seq_file *m, void *data);
-};
-
-/* codes for console ops notify */
-#define FWCON_NOTIFY_ATTACH 1
-#define FWCON_NOTIFY_DETACH 2
-
-/**
- * fwtty_port: structure used to track/represent underlying tty_port
- * @port: underlying tty_port
- * @device: tty device
- * @index: index into port_table for this particular port
- * note: minor = index + minor_start assigned by tty_alloc_driver()
- * @serial: back pointer to the containing fw_serial
- * @rx_handler: bus address handler for unique addr region used by remotes
- * to communicate with this port. Every port uses
- * fwtty_port_handler() for per port transactions.
- * @fwcon_ops: ops for attached fw_console (if any)
- * @con_data: private data for fw_console
- * @wait_tx: waitqueue for sleeping until writer/drain completes tx
- * @emit_breaks: delayed work responsible for generating breaks when the
- * break line status is active
- * @cps : characters per second computed from the termios settings
- * @break_last: timestamp in jiffies from last emit_breaks
- * @hangup: work responsible for HUPing when carrier is dropped/lost
- * @mstatus: loose virtualization of LSR/MSR
- * bits 15..0 correspond to TIOCM_* bits
- * bits 19..16 reserved for mctrl
- * bit 20 OOB_TX_THROTTLE
- * bits 23..21 reserved
- * bits 31..24 correspond to UART_LSR_* bits
- * @lock: spinlock for protecting concurrent access to fields below it
- * @mctrl: loose virtualization of MCR
- * bits 15..0 correspond to TIOCM_* bits
- * bit 16 OOB_RX_THROTTLE
- * bits 19..17 reserved
- * bits 31..20 reserved for mstatus
- * @drain: delayed work scheduled to ensure that writes are flushed.
- * The work can race with the writer but concurrent sending is
- * prevented with the IN_TX flag. Scheduled under lock to
- * limit scheduling when fifo has just been drained.
- * @tx_fifo: fifo used to store & block-up writes for dma to remote
- * @max_payload: max bytes transmissible per dma (based on peer's max_payload)
- * @status_mask: UART_LSR_* bitmask significant to rx (based on termios)
- * @ignore_mask: UART_LSR_* bitmask of states to ignore (also based on termios)
- * @break_ctl: if set, port is 'sending break' to remote
- * @write_only: self-explanatory
- * @overrun: previous rx was lost (partially or completely)
- * @loopback: if set, port is in loopback mode
- * @flags: atomic bit flags
- * bit 0: IN_TX - gate to allow only one cpu to send from the dma fifo
- * at a time.
- * bit 1: STOP_TX - force tx to exit while sending
- * @peer: rcu-pointer to associated fwtty_peer (if attached)
- * NULL if no peer attached
- * @icount: predefined statistics reported by the TIOCGICOUNT ioctl
- * @stats: additional statistics reported in /proc/tty/driver/firewire_serial
- */
-struct fwtty_port {
- struct tty_port port;
- struct device *device;
- unsigned int index;
- struct fw_serial *serial;
- struct fw_address_handler rx_handler;
-
- struct fwconsole_ops *fwcon_ops;
- void *con_data;
-
- wait_queue_head_t wait_tx;
- struct delayed_work emit_breaks;
- unsigned int cps;
- unsigned long break_last;
-
- struct work_struct hangup;
-
- unsigned int mstatus;
-
- spinlock_t lock;
- unsigned int mctrl;
- struct delayed_work drain;
- struct dma_fifo tx_fifo;
- int max_payload;
- unsigned int status_mask;
- unsigned int ignore_mask;
- unsigned int break_ctl:1,
- write_only:1,
- overrun:1,
- loopback:1;
- unsigned long flags;
-
- struct fwtty_peer __rcu *peer;
-
- struct async_icount icount;
- struct stats stats;
-};
-
-#define to_port(ptr, field) (container_of(ptr, struct fwtty_port, field))
-
-/* bit #s for flags field */
-#define IN_TX 0
-#define STOP_TX 1
-
-/* bitmasks for special mctrl/mstatus bits */
-#define OOB_RX_THROTTLE 0x00010000
-#define MCTRL_RSRVD 0x000e0000
-#define OOB_TX_THROTTLE 0x00100000
-#define MSTATUS_RSRVD 0x00e00000
-
-#define MCTRL_MASK (TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | \
- TIOCM_LOOP | OOB_RX_THROTTLE | MCTRL_RSRVD)
-
-/* XXX even every 1/50th secs. may be unnecessarily accurate */
-/* delay in jiffies between brk emits */
-#define FREQ_BREAKS (HZ / 50)
-
-/* Ports are allocated in blocks of num_ports for each fw_card */
-#define MAX_CARD_PORTS CONFIG_FWTTY_MAX_CARD_PORTS
-#define MAX_TOTAL_PORTS CONFIG_FWTTY_MAX_TOTAL_PORTS
-
-/* tuning parameters */
-#define FWTTY_PORT_TXFIFO_LEN 4096
-#define FWTTY_PORT_MAX_PEND_DMA 8 /* costs a cache line per pend */
-#define DRAIN_THRESHOLD 1024
-#define MAX_ASYNC_PAYLOAD 4096 /* ohci-defined limit */
-#define WRITER_MINIMUM 128
-/* TODO: how to set watermark to AR context size? see fwtty_rx() */
-#define HIGH_WATERMARK 32768 /* AR context is 32K */
-
-/*
- * Size of bus addr region above 4GB used per port as the recv addr
- * - must be at least as big as the MAX_ASYNC_PAYLOAD
- */
-#define FWTTY_PORT_RXFIFO_LEN MAX_ASYNC_PAYLOAD
-
-/**
- * fw_serial: aggregate used to associate tty ports with specific fw_card
- * @card: fw_card associated with this fw_serial device (1:1 association)
- * @kref: reference-counted multi-port management allows delayed destroy
- * @self: local unit device as 'peer'. Not valid until local unit device
- * is enumerated.
- * @list: link for insertion into fwserial_list
- * @peer_list: list of local & remote unit devices attached to this card
- * @ports: fixed array of tty_ports provided by this serial device
- */
-struct fw_serial {
- struct fw_card *card;
- struct kref kref;
-
- struct dentry *debugfs;
- struct fwtty_peer *self;
-
- struct list_head list;
- struct list_head peer_list;
-
- struct fwtty_port *ports[MAX_CARD_PORTS];
-};
-
-#define to_serial(ptr, field) (container_of(ptr, struct fw_serial, field))
-
-#define TTY_DEV_NAME "fwtty" /* ttyFW was taken */
-static const char tty_dev_name[] = TTY_DEV_NAME;
-static const char loop_dev_name[] = "fwloop";
-
-extern struct tty_driver *fwtty_driver;
-
-/*
- * Returns the max send async payload size in bytes based on the unit device
- * link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
- * is not necessary and does not work, because
- * 1) asynchronous traffic will absorb all available bandwidth (less that
- * being used for isochronous traffic)
- * 2) isochronous arbitration always wins.
- */
-static inline int link_speed_to_max_payload(unsigned int speed)
-{
- /* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
- return min(512 << speed, 4096);
-}
-
-#endif /* _FIREWIRE_FWSERIAL_H */
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 05e91e6bc2a0..223987616e07 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -3,7 +3,6 @@
* Greybus Audio Sound SoC helper APIs
*/
-#include <linux/debugfs.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -116,10 +115,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
{
int i;
struct snd_soc_dapm_widget *w, *tmp_w;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *parent = dapm->debugfs_dapm;
- struct dentry *debugfs_w = NULL;
-#endif
mutex_lock(&dapm->card->dapm_mutex);
for (i = 0; i < num; i++) {
@@ -139,12 +134,6 @@ int gbaudio_dapm_free_controls(struct snd_soc_dapm_context *dapm,
continue;
}
widget++;
-#ifdef CONFIG_DEBUG_FS
- if (!parent)
- debugfs_w = debugfs_lookup(w->name, parent);
- debugfs_remove(debugfs_w);
- debugfs_w = NULL;
-#endif
gbaudio_dapm_free_widget(w);
}
mutex_unlock(&dapm->card->dapm_mutex);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index dc4ed0ff1ae2..90ff07f2cbf7 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -480,7 +480,7 @@ static int gb_tty_break_ctl(struct tty_struct *tty, int state)
}
static void gb_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old)
+ const struct ktermios *termios_old)
{
struct gb_uart_set_line_coding_request newline;
struct gb_tty *gb_tty = tty->driver_data;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index a8e970db179d..afd05bf3345e 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -8,7 +8,6 @@ menu "IIO staging drivers"
source "drivers/staging/iio/accel/Kconfig"
source "drivers/staging/iio/adc/Kconfig"
source "drivers/staging/iio/addac/Kconfig"
-source "drivers/staging/iio/cdc/Kconfig"
source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index b15904b99581..5ed56fe57e14 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -6,7 +6,6 @@
obj-y += accel/
obj-y += adc/
obj-y += addac/
-obj-y += cdc/
obj-y += frequency/
obj-y += impedance-analyzer/
obj-y += meter/
diff --git a/drivers/staging/iio/cdc/Kconfig b/drivers/staging/iio/cdc/Kconfig
deleted file mode 100644
index a7386bbbcb79..000000000000
--- a/drivers/staging/iio/cdc/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# CDC drivers
-#
-menu "Capacitance to digital converters"
-
-config AD7746
- tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
- depends on I2C
- help
- Say yes here to build support for Analog Devices capacitive sensors.
- (AD7745, AD7746, AD7747) Provides direct access via sysfs.
-
- To compile this driver as a module, choose M here: the
- module will be called ad7746.
-
-endmenu
diff --git a/drivers/staging/iio/cdc/Makefile b/drivers/staging/iio/cdc/Makefile
deleted file mode 100644
index afb7499a7090..000000000000
--- a/drivers/staging/iio/cdc/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for industrial I/O CDC drivers
-#
-
-obj-$(CONFIG_AD7746) += ad7746.o
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index f43464db618a..6f9eebd6c7ee 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -112,10 +112,10 @@ struct ad9832_state {
* transfer buffers to live in their own cache lines.
*/
union {
- __be16 freq_data[4]____cacheline_aligned;
+ __be16 freq_data[4];
__be16 phase_data[2];
__be16 data;
- };
+ } __aligned(IIO_DMA_MINALIGN);
};
static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 94b131ef8a22..2b4267a87e65 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -83,7 +83,7 @@ struct ad9834_state {
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
- __be16 data ____cacheline_aligned;
+ __be16 data __aligned(IIO_DMA_MINALIGN);
__be16 freq_data[2];
};
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index a51e6e3183d3..7a49f8f1016f 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -162,7 +162,7 @@ struct ade7854_state {
int bits);
int irq;
struct mutex buf_lock;
- u8 tx[ADE7854_MAX_TX] ____cacheline_aligned;
+ u8 tx[ADE7854_MAX_TX] __aligned(IIO_DMA_MINALIGN);
u8 rx[ADE7854_MAX_RX];
};
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index c0b2716d0511..e4cf42438487 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -94,8 +94,8 @@ struct ad2s1210_state {
bool hysteresis;
u8 resolution;
enum ad2s1210_mode mode;
- u8 rx[2] ____cacheline_aligned;
- u8 tx[2] ____cacheline_aligned;
+ u8 rx[2] __aligned(IIO_DMA_MINALIGN);
+ u8 tx[2];
};
static const int ad2s1210_mode_vals[4][2] = {
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 421ce9dbf44c..d4f03b203ae5 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -22,10 +22,6 @@ if STAGING_MEDIA && MEDIA_SUPPORT
# Please keep them in alphabetic order
source "drivers/staging/media/atomisp/Kconfig"
-source "drivers/staging/media/av7110/Kconfig"
-
-source "drivers/staging/media/hantro/Kconfig"
-
source "drivers/staging/media/imx/Kconfig"
source "drivers/staging/media/ipu3/Kconfig"
@@ -38,12 +34,31 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/rkvdec/Kconfig"
-source "drivers/staging/media/stkwebcam/Kconfig"
-
source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-video/Kconfig"
-source "drivers/staging/media/zoran/Kconfig"
+menuconfig STAGING_MEDIA_DEPRECATED
+ bool "Media staging drivers (DEPRECATED)"
+ default n
+ help
+ This option enables deprecated media drivers that are
+ scheduled for future removal from the kernel.
+
+ If you wish to work on these drivers to prevent their removal,
+ then contact the linux-media@vger.kernel.org mailing list.
+
+ If in doubt, say N here.
+
+if STAGING_MEDIA_DEPRECATED
+source "drivers/staging/media/deprecated/cpia2/Kconfig"
+source "drivers/staging/media/deprecated/fsl-viu/Kconfig"
+source "drivers/staging/media/deprecated/meye/Kconfig"
+source "drivers/staging/media/deprecated/saa7146/Kconfig"
+source "drivers/staging/media/deprecated/stkwebcam/Kconfig"
+source "drivers/staging/media/deprecated/tm6000/Kconfig"
+source "drivers/staging/media/deprecated/vpfe_capture/Kconfig"
+source "drivers/staging/media/deprecated/zr364xx/Kconfig"
+endif
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 950e96f10aad..a387692b84f2 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,14 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INTEL_ATOMISP) += atomisp/
+obj-$(CONFIG_VIDEO_CPIA2) += deprecated/cpia2/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_VIDEO_MAX96712) += max96712/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
+obj-$(CONFIG_VIDEO_MEYE) += deprecated/meye/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
-obj-$(CONFIG_VIDEO_STKWEBCAM) += stkwebcam/
+obj-$(CONFIG_VIDEO_STKWEBCAM) += deprecated/stkwebcam/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
-obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
-obj-$(CONFIG_VIDEO_ZORAN) += zoran/
-obj-$(CONFIG_DVB_AV7110) += av7110/
+obj-$(CONFIG_VIDEO_TM6000) += deprecated/tm6000/
+obj-$(CONFIG_VIDEO_VIU) += deprecated/fsl-viu/
+obj-$(CONFIG_USB_ZR364XX) += deprecated/zr364xx/
+obj-y += deprecated/vpfe_capture/
+obj-y += deprecated/saa7146/
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index cbc8b1d91995..783f1b88ebf2 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -1194,7 +1194,7 @@ static const struct v4l2_subdev_ops gc0310_ops = {
.sensor = &gc0310_sensor_ops,
};
-static int gc0310_remove(struct i2c_client *client)
+static void gc0310_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct gc0310_device *dev = to_gc0310_sensor(sd);
@@ -1207,8 +1207,6 @@ static int gc0310_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int gc0310_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index 0e6b2e6100d1..4d5a7e335f85 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -952,7 +952,7 @@ static const struct v4l2_subdev_ops gc2235_ops = {
.sensor = &gc2235_sensor_ops,
};
-static int gc2235_remove(struct i2c_client *client)
+static void gc2235_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct gc2235_device *dev = to_gc2235_sensor(sd);
@@ -965,8 +965,6 @@ static int gc2235_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int gc2235_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index e046489cd253..75d16b525294 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -910,7 +910,7 @@ free_flash:
return err;
}
-static int lm3554_remove(struct i2c_client *client)
+static void lm3554_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct lm3554 *flash = to_lm3554(sd);
@@ -926,8 +926,6 @@ static int lm3554_remove(struct i2c_client *client)
lm3554_gpio_uninit(client);
kfree(flash);
-
- return 0;
}
static const struct dev_pm_ops lm3554_pm_ops = {
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 3c81ab73cdae..a0e8e94b2412 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -1713,7 +1713,7 @@ static const struct v4l2_subdev_ops mt9m114_ops = {
.sensor = &mt9m114_sensor_ops,
};
-static int mt9m114_remove(struct i2c_client *client)
+static void mt9m114_remove(struct i2c_client *client)
{
struct mt9m114_device *dev;
struct v4l2_subdev *sd = i2c_get_clientdata(client);
@@ -1724,7 +1724,6 @@ static int mt9m114_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
- return 0;
}
static int mt9m114_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 4ba99c660681..8f48b23be3aa 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -1135,7 +1135,7 @@ static const struct v4l2_subdev_ops ov2680_ops = {
.sensor = &ov2680_sensor_ops,
};
-static int ov2680_remove(struct i2c_client *client)
+static void ov2680_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2680_device *dev = to_ov2680_sensor(sd);
@@ -1148,8 +1148,6 @@ static int ov2680_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int ov2680_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index d5d099ac1b70..887b6f99f6ca 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -1090,7 +1090,7 @@ static const struct v4l2_subdev_ops ov2722_ops = {
.sensor = &ov2722_sensor_ops,
};
-static int ov2722_remove(struct i2c_client *client)
+static void ov2722_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov2722_device *dev = to_ov2722_sensor(sd);
@@ -1103,8 +1103,6 @@ static int ov2722_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
kfree(dev);
-
- return 0;
}
static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 6c95f57a52e9..c1cd631455e6 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1877,7 +1877,7 @@ static const struct v4l2_subdev_ops ov5693_ops = {
.pad = &ov5693_pad_ops,
};
-static int ov5693_remove(struct i2c_client *client)
+static void ov5693_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov5693_device *dev = to_ov5693_sensor(sd);
@@ -1893,8 +1893,6 @@ static int ov5693_remove(struct i2c_client *client)
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
-
- return 0;
}
static int ov5693_probe(struct i2c_client *client)
diff --git a/drivers/staging/media/av7110/TODO b/drivers/staging/media/av7110/TODO
deleted file mode 100644
index 60062d8441b3..000000000000
--- a/drivers/staging/media/av7110/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-- This driver is too old and relies on a different API.
- Drop it from Kernel on a couple of versions.
-- Cleanup patches for the drivers here won't be accepted.
diff --git a/drivers/media/usb/cpia2/Kconfig b/drivers/staging/media/deprecated/cpia2/Kconfig
index da2c6862b4a2..ee3b25a759d4 100644
--- a/drivers/media/usb/cpia2/Kconfig
+++ b/drivers/staging/media/deprecated/cpia2/Kconfig
@@ -1,10 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CPIA2
- tristate "CPiA2 Video For Linux"
+ tristate "CPiA2 Video For Linux (DEPRECATED)"
depends on USB && VIDEO_DEV
help
This is the video4linux driver for cameras based on Vision's CPiA2
(Colour Processor Interface ASIC), such as the Digital Blue QX5
Microscope. If you have one of these cameras, say Y here
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
This driver is also available as a module (cpia2).
diff --git a/drivers/media/usb/cpia2/Makefile b/drivers/staging/media/deprecated/cpia2/Makefile
index 05664141f4d7..05664141f4d7 100644
--- a/drivers/media/usb/cpia2/Makefile
+++ b/drivers/staging/media/deprecated/cpia2/Makefile
diff --git a/drivers/staging/media/deprecated/cpia2/TODO b/drivers/staging/media/deprecated/cpia2/TODO
new file mode 100644
index 000000000000..92ac8718d164
--- /dev/null
+++ b/drivers/staging/media/deprecated/cpia2/TODO
@@ -0,0 +1,6 @@
+The cpia2 driver does not use the vb2 framework for streaming
+video, instead it implements this in the driver.
+
+To prevent removal of this driver early 2023 it has to be
+converted to use vb2. Contact the linux-media@vger.kernel.org
+mailing list if you want to do this.
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/staging/media/deprecated/cpia2/cpia2.h
index 57b7f1ea68da..57b7f1ea68da 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/staging/media/deprecated/cpia2/cpia2.h
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/staging/media/deprecated/cpia2/cpia2_core.c
index b5a2d06fb356..b5a2d06fb356 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/staging/media/deprecated/cpia2/cpia2_core.c
diff --git a/drivers/media/usb/cpia2/cpia2_registers.h b/drivers/staging/media/deprecated/cpia2/cpia2_registers.h
index 8c73812a15c9..8c73812a15c9 100644
--- a/drivers/media/usb/cpia2/cpia2_registers.h
+++ b/drivers/staging/media/deprecated/cpia2/cpia2_registers.h
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/staging/media/deprecated/cpia2/cpia2_usb.c
index cba03b286473..cba03b286473 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/staging/media/deprecated/cpia2/cpia2_usb.c
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/staging/media/deprecated/cpia2/cpia2_v4l.c
index 926ecfc9b64a..926ecfc9b64a 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/staging/media/deprecated/cpia2/cpia2_v4l.c
diff --git a/drivers/staging/media/deprecated/fsl-viu/Kconfig b/drivers/staging/media/deprecated/fsl-viu/Kconfig
new file mode 100644
index 000000000000..399892c69a18
--- /dev/null
+++ b/drivers/staging/media/deprecated/fsl-viu/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_VIU
+ tristate "NXP VIU Video Driver (DEPRECATED)"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && (PPC_MPC512x || COMPILE_TEST) && I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Support for Freescale VIU video driver. This device captures
+ video data, or overlays video on DIU frame buffer.
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
+ Say Y here if you want to enable VIU device on MPC5121e Rev2+.
+ In doubt, say N.
diff --git a/drivers/staging/media/deprecated/fsl-viu/Makefile b/drivers/staging/media/deprecated/fsl-viu/Makefile
new file mode 100644
index 000000000000..931ec56ad08c
--- /dev/null
+++ b/drivers/staging/media/deprecated/fsl-viu/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/staging/media/deprecated/fsl-viu/TODO b/drivers/staging/media/deprecated/fsl-viu/TODO
new file mode 100644
index 000000000000..ecb30a429689
--- /dev/null
+++ b/drivers/staging/media/deprecated/fsl-viu/TODO
@@ -0,0 +1,7 @@
+This is one of the few drivers still not using the vb2
+framework, so this driver is now deprecated with the intent of
+removing it altogether by the beginning of 2023.
+
+In order to keep this driver it has to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/platform/nxp/fsl-viu.c b/drivers/staging/media/deprecated/fsl-viu/fsl-viu.c
index afc96f6db2a1..afc96f6db2a1 100644
--- a/drivers/media/platform/nxp/fsl-viu.c
+++ b/drivers/staging/media/deprecated/fsl-viu/fsl-viu.c
diff --git a/drivers/media/pci/meye/Kconfig b/drivers/staging/media/deprecated/meye/Kconfig
index 3e69b66f1a5b..f135f8568c85 100644
--- a/drivers/media/pci/meye/Kconfig
+++ b/drivers/staging/media/deprecated/meye/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_MEYE
- tristate "Sony Vaio Picturebook Motion Eye Video For Linux"
+ tristate "Sony Vaio Picturebook Motion Eye Video For Linux (DEPRECATED)"
depends on PCI && VIDEO_DEV
depends on SONY_LAPTOP
depends on X86 || COMPILE_TEST
@@ -12,5 +12,8 @@ config VIDEO_MEYE
If you say Y or M here, you need to say Y or M to "Sony Laptop
Extras" in the misc device section.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called meye.
diff --git a/drivers/media/pci/meye/Makefile b/drivers/staging/media/deprecated/meye/Makefile
index 36f1f86f0d58..36f1f86f0d58 100644
--- a/drivers/media/pci/meye/Makefile
+++ b/drivers/staging/media/deprecated/meye/Makefile
diff --git a/drivers/staging/media/deprecated/meye/TODO b/drivers/staging/media/deprecated/meye/TODO
new file mode 100644
index 000000000000..6d1d1433d5a0
--- /dev/null
+++ b/drivers/staging/media/deprecated/meye/TODO
@@ -0,0 +1,6 @@
+The meye driver does not use the vb2 framework for streaming
+video, instead it implements this in the driver.
+
+To prevent removal of this driver early 2023 it has to be
+converted to use vb2. Contact the linux-media@vger.kernel.org
+mailing list if you want to do this.
diff --git a/drivers/media/pci/meye/meye.c b/drivers/staging/media/deprecated/meye/meye.c
index 5d87efd9b95c..5d87efd9b95c 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/staging/media/deprecated/meye/meye.c
diff --git a/drivers/media/pci/meye/meye.h b/drivers/staging/media/deprecated/meye/meye.h
index 5fa6552cf93d..5fa6552cf93d 100644
--- a/drivers/media/pci/meye/meye.h
+++ b/drivers/staging/media/deprecated/meye/meye.h
diff --git a/drivers/staging/media/deprecated/saa7146/Kconfig b/drivers/staging/media/deprecated/saa7146/Kconfig
new file mode 100644
index 000000000000..54154da79f59
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+source "drivers/staging/media/deprecated/saa7146/common/Kconfig"
+source "drivers/staging/media/deprecated/saa7146/av7110/Kconfig"
+source "drivers/staging/media/deprecated/saa7146/saa7146/Kconfig"
+source "drivers/staging/media/deprecated/saa7146/ttpci/Kconfig"
diff --git a/drivers/staging/media/deprecated/saa7146/Makefile b/drivers/staging/media/deprecated/saa7146/Makefile
new file mode 100644
index 000000000000..68e7aa10c639
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/Makefile
@@ -0,0 +1,2 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+obj-y += common/ av7110/ saa7146/ ttpci/
diff --git a/drivers/staging/media/av7110/Kconfig b/drivers/staging/media/deprecated/saa7146/av7110/Kconfig
index 9faf9d2d4001..1571eab31926 100644
--- a/drivers/staging/media/av7110/Kconfig
+++ b/drivers/staging/media/deprecated/saa7146/av7110/Kconfig
@@ -5,7 +5,7 @@ config DVB_AV7110_IR
default DVB_AV7110
config DVB_AV7110
- tristate "AV7110 cards"
+ tristate "AV7110 cards (DEPRECATED)"
depends on DVB_CORE && PCI && I2C
select TTPCI_EEPROM
select VIDEO_SAA7146_VV
@@ -35,10 +35,13 @@ config DVB_AV7110
kernel image by adding the filename to the EXTRA_FIRMWARE
configuration option string.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a card and want to use it.
config DVB_AV7110_OSD
- bool "AV7110 OSD support"
+ bool "AV7110 OSD support (DEPRECATED)"
depends on DVB_AV7110
default y if DVB_AV7110=y || DVB_AV7110=m
help
@@ -49,10 +52,13 @@ config DVB_AV7110_OSD
Anyway, some popular DVB software like VDR uses this OSD to render
its menus, so say Y if you want to use this software.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
All other people say N.
config DVB_BUDGET_PATCH
- tristate "AV7110 cards with Budget Patch"
+ tristate "AV7110 cards with Budget Patch (DEPRECATED)"
depends on DVB_BUDGET_CORE && I2C
depends on DVB_AV7110
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
@@ -68,6 +74,9 @@ config DVB_BUDGET_PATCH
standard AV7110 driver prior to loading this
driver.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a card and want to use it.
To compile this driver as a module, choose M here: the
@@ -80,7 +89,7 @@ if DVB_AV7110
# it if we drop support for AV7110, as no other driver will use it.
config DVB_SP8870
- tristate "Spase sp8870 based"
+ tristate "Spase sp8870 based (DEPRECATED)"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -91,4 +100,7 @@ config DVB_SP8870
download/extract it, and then copy it to /usr/lib/hotplug/firmware
or /lib/firmware (depending on configuration of firmware hotplug).
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
endif
diff --git a/drivers/staging/media/av7110/Makefile b/drivers/staging/media/deprecated/saa7146/av7110/Makefile
index 307b267598ea..c04cd0a59109 100644
--- a/drivers/staging/media/av7110/Makefile
+++ b/drivers/staging/media/deprecated/saa7146/av7110/Makefile
@@ -18,5 +18,6 @@ obj-$(CONFIG_DVB_SP8870) += sp8870.o
ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
ccflags-y += -I $(srctree)/drivers/media/tuners
-ccflags-y += -I $(srctree)/drivers/media/pci/ttpci
ccflags-y += -I $(srctree)/drivers/media/common
+ccflags-y += -I $(srctree)/drivers/staging/media/deprecated/saa7146/ttpci
+ccflags-y += -I $(srctree)/drivers/staging/media/deprecated/saa7146/common
diff --git a/drivers/staging/media/deprecated/saa7146/av7110/TODO b/drivers/staging/media/deprecated/saa7146/av7110/TODO
new file mode 100644
index 000000000000..38817e04bb67
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/av7110/TODO
@@ -0,0 +1,9 @@
+- This driver is too old and relies on a different API.
+ Drop it from Kernel on a couple of versions.
+- Cleanup patches for the drivers here won't be accepted.
+
+These drivers are now deprecated with the intent of
+removing them altogether by the beginning of 2023.
+
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/staging/media/av7110/audio-bilingual-channel-select.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-bilingual-channel-select.rst
index 33b5363317f1..33b5363317f1 100644
--- a/drivers/staging/media/av7110/audio-bilingual-channel-select.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-bilingual-channel-select.rst
diff --git a/drivers/staging/media/av7110/audio-channel-select.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-channel-select.rst
index 74093df92a68..74093df92a68 100644
--- a/drivers/staging/media/av7110/audio-channel-select.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-channel-select.rst
diff --git a/drivers/staging/media/av7110/audio-clear-buffer.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-clear-buffer.rst
index a0ebb0278260..a0ebb0278260 100644
--- a/drivers/staging/media/av7110/audio-clear-buffer.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-clear-buffer.rst
diff --git a/drivers/staging/media/av7110/audio-continue.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-continue.rst
index a2e9850f37f2..a2e9850f37f2 100644
--- a/drivers/staging/media/av7110/audio-continue.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-continue.rst
diff --git a/drivers/staging/media/av7110/audio-fclose.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-fclose.rst
index 77857d578e83..77857d578e83 100644
--- a/drivers/staging/media/av7110/audio-fclose.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-fclose.rst
diff --git a/drivers/staging/media/av7110/audio-fopen.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-fopen.rst
index 774daaab3bad..774daaab3bad 100644
--- a/drivers/staging/media/av7110/audio-fopen.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-fopen.rst
diff --git a/drivers/staging/media/av7110/audio-fwrite.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-fwrite.rst
index 7b096ac2b6c4..7b096ac2b6c4 100644
--- a/drivers/staging/media/av7110/audio-fwrite.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-fwrite.rst
diff --git a/drivers/staging/media/av7110/audio-get-capabilities.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-get-capabilities.rst
index 6d9eb71dad17..6d9eb71dad17 100644
--- a/drivers/staging/media/av7110/audio-get-capabilities.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-get-capabilities.rst
diff --git a/drivers/staging/media/av7110/audio-get-status.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-get-status.rst
index 7ae8db2e65e9..7ae8db2e65e9 100644
--- a/drivers/staging/media/av7110/audio-get-status.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-get-status.rst
diff --git a/drivers/staging/media/av7110/audio-pause.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-pause.rst
index d37d1ddce4df..d37d1ddce4df 100644
--- a/drivers/staging/media/av7110/audio-pause.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-pause.rst
diff --git a/drivers/staging/media/av7110/audio-play.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-play.rst
index e591930b6ca7..e591930b6ca7 100644
--- a/drivers/staging/media/av7110/audio-play.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-play.rst
diff --git a/drivers/staging/media/av7110/audio-select-source.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-select-source.rst
index 6a0c0f365eb1..6a0c0f365eb1 100644
--- a/drivers/staging/media/av7110/audio-select-source.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-select-source.rst
diff --git a/drivers/staging/media/av7110/audio-set-av-sync.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-av-sync.rst
index 85a8016bf025..85a8016bf025 100644
--- a/drivers/staging/media/av7110/audio-set-av-sync.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-av-sync.rst
diff --git a/drivers/staging/media/av7110/audio-set-bypass-mode.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-bypass-mode.rst
index 80d551a2053a..80d551a2053a 100644
--- a/drivers/staging/media/av7110/audio-set-bypass-mode.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-bypass-mode.rst
diff --git a/drivers/staging/media/av7110/audio-set-id.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-id.rst
index 39ad846d412d..39ad846d412d 100644
--- a/drivers/staging/media/av7110/audio-set-id.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-id.rst
diff --git a/drivers/staging/media/av7110/audio-set-mixer.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-mixer.rst
index 45dbdf4801e0..45dbdf4801e0 100644
--- a/drivers/staging/media/av7110/audio-set-mixer.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-mixer.rst
diff --git a/drivers/staging/media/av7110/audio-set-mute.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-mute.rst
index 987751f92967..987751f92967 100644
--- a/drivers/staging/media/av7110/audio-set-mute.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-mute.rst
diff --git a/drivers/staging/media/av7110/audio-set-streamtype.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-streamtype.rst
index 77d73c74882f..77d73c74882f 100644
--- a/drivers/staging/media/av7110/audio-set-streamtype.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-set-streamtype.rst
diff --git a/drivers/staging/media/av7110/audio-stop.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio-stop.rst
index d77f786fd797..d77f786fd797 100644
--- a/drivers/staging/media/av7110/audio-stop.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio-stop.rst
diff --git a/drivers/staging/media/av7110/audio.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio.rst
index aa753336b31f..aa753336b31f 100644
--- a/drivers/staging/media/av7110/audio.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio.rst
diff --git a/drivers/staging/media/av7110/audio_data_types.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio_data_types.rst
index 4744529136a8..4744529136a8 100644
--- a/drivers/staging/media/av7110/audio_data_types.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio_data_types.rst
diff --git a/drivers/staging/media/av7110/audio_function_calls.rst b/drivers/staging/media/deprecated/saa7146/av7110/audio_function_calls.rst
index fa5ba9539caf..fa5ba9539caf 100644
--- a/drivers/staging/media/av7110/audio_function_calls.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/audio_function_calls.rst
diff --git a/drivers/staging/media/av7110/av7110.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110.c
index df81a9b744c2..df81a9b744c2 100644
--- a/drivers/staging/media/av7110/av7110.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110.c
diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/deprecated/saa7146/av7110/av7110.h
index 809d938ae166..9fde69b38f1c 100644
--- a/drivers/staging/media/av7110/av7110.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110.h
@@ -33,7 +33,7 @@
#include "stv0297.h"
#include "l64781.h"
-#include <media/drv-intf/saa7146_vv.h>
+#include "saa7146_vv.h"
#define ANALOG_TUNER_VES1820 1
diff --git a/drivers/staging/media/av7110/av7110_av.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
index ab7cf496b454..0bf513c26b6b 100644
--- a/drivers/staging/media/av7110/av7110_av.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
@@ -106,7 +106,7 @@ int av7110_av_start_record(struct av7110 *av7110, int av,
int ret = 0;
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
- dprintk(2, "av7110:%p, , dvb_demux_feed:%p\n", av7110, dvbdmxfeed);
+ dprintk(2, "av7110:%p, dvb_demux_feed:%p\n", av7110, dvbdmxfeed);
if (av7110->playing || (av7110->rec_mode & av))
return -EBUSY;
diff --git a/drivers/staging/media/av7110/av7110_av.h b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.h
index 71bbd4391f57..71bbd4391f57 100644
--- a/drivers/staging/media/av7110/av7110_av.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.h
diff --git a/drivers/staging/media/av7110/av7110_ca.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.c
index c1338e074a3d..c1338e074a3d 100644
--- a/drivers/staging/media/av7110/av7110_ca.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.c
diff --git a/drivers/staging/media/av7110/av7110_ca.h b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.h
index a6e3f2955730..a6e3f2955730 100644
--- a/drivers/staging/media/av7110/av7110_ca.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ca.h
diff --git a/drivers/staging/media/av7110/av7110_hw.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.c
index 93ca31e38ddd..93ca31e38ddd 100644
--- a/drivers/staging/media/av7110/av7110_hw.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.c
diff --git a/drivers/staging/media/av7110/av7110_hw.h b/drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.h
index 6380d8950c69..6380d8950c69 100644
--- a/drivers/staging/media/av7110/av7110_hw.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_hw.h
diff --git a/drivers/staging/media/av7110/av7110_ipack.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.c
index 30330ed01ce8..30330ed01ce8 100644
--- a/drivers/staging/media/av7110/av7110_ipack.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.c
diff --git a/drivers/staging/media/av7110/av7110_ipack.h b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.h
index 943ec899bb93..943ec899bb93 100644
--- a/drivers/staging/media/av7110/av7110_ipack.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ipack.h
diff --git a/drivers/staging/media/av7110/av7110_ir.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ir.c
index a851ba328e4a..a851ba328e4a 100644
--- a/drivers/staging/media/av7110/av7110_ir.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_ir.c
diff --git a/drivers/staging/media/av7110/av7110_v4l.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_v4l.c
index c89f536f699c..c89f536f699c 100644
--- a/drivers/staging/media/av7110/av7110_v4l.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_v4l.c
diff --git a/drivers/staging/media/av7110/budget-patch.c b/drivers/staging/media/deprecated/saa7146/av7110/budget-patch.c
index d173c8ade6a7..d173c8ade6a7 100644
--- a/drivers/staging/media/av7110/budget-patch.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/budget-patch.c
diff --git a/drivers/staging/media/av7110/dvb_filter.c b/drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.c
index 8c2eca5dcdc9..8c2eca5dcdc9 100644
--- a/drivers/staging/media/av7110/dvb_filter.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.c
diff --git a/drivers/staging/media/av7110/dvb_filter.h b/drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.h
index 67a3c6333bca..67a3c6333bca 100644
--- a/drivers/staging/media/av7110/dvb_filter.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/dvb_filter.h
diff --git a/drivers/staging/media/av7110/sp8870.c b/drivers/staging/media/deprecated/saa7146/av7110/sp8870.c
index 9767159aeb9b..9767159aeb9b 100644
--- a/drivers/staging/media/av7110/sp8870.c
+++ b/drivers/staging/media/deprecated/saa7146/av7110/sp8870.c
diff --git a/drivers/staging/media/av7110/sp8870.h b/drivers/staging/media/deprecated/saa7146/av7110/sp8870.h
index 5eacf39f425e..5eacf39f425e 100644
--- a/drivers/staging/media/av7110/sp8870.h
+++ b/drivers/staging/media/deprecated/saa7146/av7110/sp8870.h
diff --git a/drivers/staging/media/av7110/video-clear-buffer.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-clear-buffer.rst
index a7730559bbb2..a7730559bbb2 100644
--- a/drivers/staging/media/av7110/video-clear-buffer.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-clear-buffer.rst
diff --git a/drivers/staging/media/av7110/video-command.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-command.rst
index cae9445eb3af..cae9445eb3af 100644
--- a/drivers/staging/media/av7110/video-command.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-command.rst
diff --git a/drivers/staging/media/av7110/video-continue.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-continue.rst
index bc34bf3989e4..bc34bf3989e4 100644
--- a/drivers/staging/media/av7110/video-continue.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-continue.rst
diff --git a/drivers/staging/media/av7110/video-fast-forward.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-fast-forward.rst
index e71fa8d6965b..e71fa8d6965b 100644
--- a/drivers/staging/media/av7110/video-fast-forward.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-fast-forward.rst
diff --git a/drivers/staging/media/av7110/video-fclose.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-fclose.rst
index 01d24d548439..01d24d548439 100644
--- a/drivers/staging/media/av7110/video-fclose.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-fclose.rst
diff --git a/drivers/staging/media/av7110/video-fopen.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-fopen.rst
index 1371b083e4e8..1371b083e4e8 100644
--- a/drivers/staging/media/av7110/video-fopen.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-fopen.rst
diff --git a/drivers/staging/media/av7110/video-freeze.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-freeze.rst
index 4321f257cb70..4321f257cb70 100644
--- a/drivers/staging/media/av7110/video-freeze.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-freeze.rst
diff --git a/drivers/staging/media/av7110/video-fwrite.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-fwrite.rst
index a07fd7d7a40e..a07fd7d7a40e 100644
--- a/drivers/staging/media/av7110/video-fwrite.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-fwrite.rst
diff --git a/drivers/staging/media/av7110/video-get-capabilities.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-capabilities.rst
index 01e09f56656c..01e09f56656c 100644
--- a/drivers/staging/media/av7110/video-get-capabilities.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-capabilities.rst
diff --git a/drivers/staging/media/av7110/video-get-event.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-event.rst
index 90382bc36cfe..90382bc36cfe 100644
--- a/drivers/staging/media/av7110/video-get-event.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-event.rst
diff --git a/drivers/staging/media/av7110/video-get-frame-count.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-frame-count.rst
index b48ac8c58a41..b48ac8c58a41 100644
--- a/drivers/staging/media/av7110/video-get-frame-count.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-frame-count.rst
diff --git a/drivers/staging/media/av7110/video-get-pts.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-pts.rst
index fedaff41be0b..fedaff41be0b 100644
--- a/drivers/staging/media/av7110/video-get-pts.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-pts.rst
diff --git a/drivers/staging/media/av7110/video-get-size.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-size.rst
index de34331c5bd1..de34331c5bd1 100644
--- a/drivers/staging/media/av7110/video-get-size.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-size.rst
diff --git a/drivers/staging/media/av7110/video-get-status.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-get-status.rst
index 9b86fbf411d4..9b86fbf411d4 100644
--- a/drivers/staging/media/av7110/video-get-status.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-get-status.rst
diff --git a/drivers/staging/media/av7110/video-play.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-play.rst
index 35ac8b98fdbf..35ac8b98fdbf 100644
--- a/drivers/staging/media/av7110/video-play.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-play.rst
diff --git a/drivers/staging/media/av7110/video-select-source.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-select-source.rst
index 929a20985d53..929a20985d53 100644
--- a/drivers/staging/media/av7110/video-select-source.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-select-source.rst
diff --git a/drivers/staging/media/av7110/video-set-blank.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-set-blank.rst
index 70249a6ba125..70249a6ba125 100644
--- a/drivers/staging/media/av7110/video-set-blank.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-set-blank.rst
diff --git a/drivers/staging/media/av7110/video-set-display-format.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-set-display-format.rst
index 1de4f40ae732..1de4f40ae732 100644
--- a/drivers/staging/media/av7110/video-set-display-format.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-set-display-format.rst
diff --git a/drivers/staging/media/av7110/video-set-format.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-set-format.rst
index bb64e37ae081..bb64e37ae081 100644
--- a/drivers/staging/media/av7110/video-set-format.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-set-format.rst
diff --git a/drivers/staging/media/av7110/video-set-streamtype.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-set-streamtype.rst
index 1f31c048bdbc..1f31c048bdbc 100644
--- a/drivers/staging/media/av7110/video-set-streamtype.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-set-streamtype.rst
diff --git a/drivers/staging/media/av7110/video-slowmotion.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-slowmotion.rst
index 1478fcc30cb8..1478fcc30cb8 100644
--- a/drivers/staging/media/av7110/video-slowmotion.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-slowmotion.rst
diff --git a/drivers/staging/media/av7110/video-stillpicture.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-stillpicture.rst
index d25384222a20..d25384222a20 100644
--- a/drivers/staging/media/av7110/video-stillpicture.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-stillpicture.rst
diff --git a/drivers/staging/media/av7110/video-stop.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-stop.rst
index 96f61c5b48a2..96f61c5b48a2 100644
--- a/drivers/staging/media/av7110/video-stop.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-stop.rst
diff --git a/drivers/staging/media/av7110/video-try-command.rst b/drivers/staging/media/deprecated/saa7146/av7110/video-try-command.rst
index 79bf3dfb8a32..79bf3dfb8a32 100644
--- a/drivers/staging/media/av7110/video-try-command.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video-try-command.rst
diff --git a/drivers/staging/media/av7110/video.rst b/drivers/staging/media/deprecated/saa7146/av7110/video.rst
index 808705b769a1..808705b769a1 100644
--- a/drivers/staging/media/av7110/video.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video.rst
diff --git a/drivers/staging/media/av7110/video_function_calls.rst b/drivers/staging/media/deprecated/saa7146/av7110/video_function_calls.rst
index 20a897be5dca..20a897be5dca 100644
--- a/drivers/staging/media/av7110/video_function_calls.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video_function_calls.rst
diff --git a/drivers/staging/media/av7110/video_types.rst b/drivers/staging/media/deprecated/saa7146/av7110/video_types.rst
index c4557d328b7a..c4557d328b7a 100644
--- a/drivers/staging/media/av7110/video_types.rst
+++ b/drivers/staging/media/deprecated/saa7146/av7110/video_types.rst
diff --git a/drivers/media/common/saa7146/Kconfig b/drivers/staging/media/deprecated/saa7146/common/Kconfig
index a0aa155e5d85..a0aa155e5d85 100644
--- a/drivers/media/common/saa7146/Kconfig
+++ b/drivers/staging/media/deprecated/saa7146/common/Kconfig
diff --git a/drivers/media/common/saa7146/Makefile b/drivers/staging/media/deprecated/saa7146/common/Makefile
index 2a6337feaec8..2a6337feaec8 100644
--- a/drivers/media/common/saa7146/Makefile
+++ b/drivers/staging/media/deprecated/saa7146/common/Makefile
diff --git a/drivers/staging/media/deprecated/saa7146/common/saa7146.h b/drivers/staging/media/deprecated/saa7146/common/saa7146.h
new file mode 100644
index 000000000000..71ce63c99cb4
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146.h
@@ -0,0 +1,472 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAA7146__
+#define __SAA7146__
+
+#include <linux/delay.h> /* for delay-stuff */
+#include <linux/slab.h> /* for kmalloc/kfree */
+#include <linux/pci.h> /* for pci-config-stuff, vendor ids etc. */
+#include <linux/init.h> /* for "__init" */
+#include <linux/interrupt.h> /* for IMMEDIATE_BH */
+#include <linux/kmod.h> /* for kernel module loader */
+#include <linux/i2c.h> /* for i2c subsystem */
+#include <asm/io.h> /* for accessing devices */
+#include <linux/stringify.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include <linux/vmalloc.h> /* for vmalloc() */
+#include <linux/mm.h> /* for vmalloc_to_page() */
+
+#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
+#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
+
+extern unsigned int saa7146_debug;
+
+#ifndef DEBUG_VARIABLE
+ #define DEBUG_VARIABLE saa7146_debug
+#endif
+
+#define ERR(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
+
+#define _DBG(mask, fmt, ...) \
+do { \
+ if (DEBUG_VARIABLE & mask) \
+ pr_debug("%s(): " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+
+/* simple debug messages */
+#define DEB_S(fmt, ...) _DBG(0x01, fmt, ##__VA_ARGS__)
+/* more detailed debug messages */
+#define DEB_D(fmt, ...) _DBG(0x02, fmt, ##__VA_ARGS__)
+/* print enter and exit of functions */
+#define DEB_EE(fmt, ...) _DBG(0x04, fmt, ##__VA_ARGS__)
+/* i2c debug messages */
+#define DEB_I2C(fmt, ...) _DBG(0x08, fmt, ##__VA_ARGS__)
+/* vbi debug messages */
+#define DEB_VBI(fmt, ...) _DBG(0x10, fmt, ##__VA_ARGS__)
+/* interrupt debug messages */
+#define DEB_INT(fmt, ...) _DBG(0x20, fmt, ##__VA_ARGS__)
+/* capture debug messages */
+#define DEB_CAP(fmt, ...) _DBG(0x40, fmt, ##__VA_ARGS__)
+
+#define SAA7146_ISR_CLEAR(x,y) \
+ saa7146_write(x, ISR, (y));
+
+struct module;
+
+struct saa7146_dev;
+struct saa7146_extension;
+struct saa7146_vv;
+
+/* saa7146 page table */
+struct saa7146_pgtable {
+ unsigned int size;
+ __le32 *cpu;
+ dma_addr_t dma;
+ /* used for offsets for u,v planes for planar capture modes */
+ unsigned long offset;
+ /* used for custom pagetables (used for example by budget dvb cards) */
+ struct scatterlist *slist;
+ int nents;
+};
+
+struct saa7146_pci_extension_data {
+ struct saa7146_extension *ext;
+ void *ext_priv; /* most likely a name string */
+};
+
+#define MAKE_EXTENSION_PCI(x_var, x_vendor, x_device) \
+ { \
+ .vendor = PCI_VENDOR_ID_PHILIPS, \
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7146, \
+ .subvendor = x_vendor, \
+ .subdevice = x_device, \
+ .driver_data = (unsigned long)& x_var, \
+ }
+
+struct saa7146_extension
+{
+ char name[32]; /* name of the device */
+#define SAA7146_USE_I2C_IRQ 0x1
+#define SAA7146_I2C_SHORT_DELAY 0x2
+ int flags;
+
+ /* pairs of subvendor and subdevice ids for
+ supported devices, last entry 0xffff, 0xfff */
+ struct module *module;
+ struct pci_driver driver;
+ const struct pci_device_id *pci_tbl;
+
+ /* extension functions */
+ int (*probe)(struct saa7146_dev *);
+ int (*attach)(struct saa7146_dev *, struct saa7146_pci_extension_data *);
+ int (*detach)(struct saa7146_dev*);
+
+ u32 irq_mask; /* mask to indicate, which irq-events are handled by the extension */
+ void (*irq_func)(struct saa7146_dev*, u32* irq_mask);
+};
+
+struct saa7146_dma
+{
+ dma_addr_t dma_handle;
+ __le32 *cpu_addr;
+};
+
+struct saa7146_dev
+{
+ struct module *module;
+
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* different device locks */
+ spinlock_t slock;
+ struct mutex v4l2_lock;
+
+ unsigned char __iomem *mem; /* pointer to mapped IO memory */
+ u32 revision; /* chip revision; needed for bug-workarounds*/
+
+ /* pci-device & irq stuff*/
+ char name[32];
+ struct pci_dev *pci;
+ u32 int_todo;
+ spinlock_t int_slock;
+
+ /* extension handling */
+ struct saa7146_extension *ext; /* indicates if handled by extension */
+ void *ext_priv; /* pointer for extension private use (most likely some private data) */
+ struct saa7146_ext_vv *ext_vv_data;
+
+ /* per device video/vbi information (if available) */
+ struct saa7146_vv *vv_data;
+ void (*vv_callback)(struct saa7146_dev *dev, unsigned long status);
+
+ /* i2c-stuff */
+ struct mutex i2c_lock;
+
+ u32 i2c_bitrate;
+ struct saa7146_dma d_i2c; /* pointer to i2c memory */
+ wait_queue_head_t i2c_wq;
+ int i2c_op;
+
+ /* memories */
+ struct saa7146_dma d_rps0;
+ struct saa7146_dma d_rps1;
+};
+
+static inline struct saa7146_dev *to_saa7146_dev(struct v4l2_device *v4l2_dev)
+{
+ return container_of(v4l2_dev, struct saa7146_dev, v4l2_dev);
+}
+
+/* from saa7146_i2c.c */
+int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate);
+
+/* from saa7146_core.c */
+int saa7146_register_extension(struct saa7146_extension*);
+int saa7146_unregister_extension(struct saa7146_extension*);
+struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc);
+int saa7146_pgtable_alloc(struct pci_dev *pci, struct saa7146_pgtable *pt);
+void saa7146_pgtable_free(struct pci_dev *pci, struct saa7146_pgtable *pt);
+int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt, struct scatterlist *list, int length );
+void *saa7146_vmalloc_build_pgtable(struct pci_dev *pci, long length, struct saa7146_pgtable *pt);
+void saa7146_vfree_destroy_pgtable(struct pci_dev *pci, void *mem, struct saa7146_pgtable *pt);
+void saa7146_setgpio(struct saa7146_dev *dev, int port, u32 data);
+int saa7146_wait_for_debi_done(struct saa7146_dev *dev, int nobusyloop);
+
+/* some memory sizes */
+#define SAA7146_I2C_MEM ( 1*PAGE_SIZE)
+#define SAA7146_RPS_MEM ( 1*PAGE_SIZE)
+
+/* some i2c constants */
+#define SAA7146_I2C_TIMEOUT 100 /* i2c-timeout-value in ms */
+#define SAA7146_I2C_RETRIES 3 /* how many times shall we retry an i2c-operation? */
+#define SAA7146_I2C_DELAY 5 /* time we wait after certain i2c-operations */
+
+/* unsorted defines */
+#define ME1 0x0000000800
+#define PV1 0x0000000008
+
+/* gpio defines */
+#define SAA7146_GPIO_INPUT 0x00
+#define SAA7146_GPIO_IRQHI 0x10
+#define SAA7146_GPIO_IRQLO 0x20
+#define SAA7146_GPIO_IRQHL 0x30
+#define SAA7146_GPIO_OUTLO 0x40
+#define SAA7146_GPIO_OUTHI 0x50
+
+/* debi defines */
+#define DEBINOSWAP 0x000e0000
+
+/* define for the register programming sequencer (rps) */
+#define CMD_NOP 0x00000000 /* No operation */
+#define CMD_CLR_EVENT 0x00000000 /* Clear event */
+#define CMD_SET_EVENT 0x10000000 /* Set signal event */
+#define CMD_PAUSE 0x20000000 /* Pause */
+#define CMD_CHECK_LATE 0x30000000 /* Check late */
+#define CMD_UPLOAD 0x40000000 /* Upload */
+#define CMD_STOP 0x50000000 /* Stop */
+#define CMD_INTERRUPT 0x60000000 /* Interrupt */
+#define CMD_JUMP 0x80000000 /* Jump */
+#define CMD_WR_REG 0x90000000 /* Write (load) register */
+#define CMD_RD_REG 0xa0000000 /* Read (store) register */
+#define CMD_WR_REG_MASK 0xc0000000 /* Write register with mask */
+
+#define CMD_OAN MASK_27
+#define CMD_INV MASK_26
+#define CMD_SIG4 MASK_25
+#define CMD_SIG3 MASK_24
+#define CMD_SIG2 MASK_23
+#define CMD_SIG1 MASK_22
+#define CMD_SIG0 MASK_21
+#define CMD_O_FID_B MASK_14
+#define CMD_E_FID_B MASK_13
+#define CMD_O_FID_A MASK_12
+#define CMD_E_FID_A MASK_11
+
+/* some events and command modifiers for rps1 squarewave generator */
+#define EVT_HS (1<<15) // Source Line Threshold reached
+#define EVT_VBI_B (1<<9) // VSYNC Event
+#define RPS_OAN (1<<27) // 1: OR events, 0: AND events
+#define RPS_INV (1<<26) // Invert (compound) event
+#define GPIO3_MSK 0xFF000000 // GPIO #3 control bits
+
+/* Bit mask constants */
+#define MASK_00 0x00000001 /* Mask value for bit 0 */
+#define MASK_01 0x00000002 /* Mask value for bit 1 */
+#define MASK_02 0x00000004 /* Mask value for bit 2 */
+#define MASK_03 0x00000008 /* Mask value for bit 3 */
+#define MASK_04 0x00000010 /* Mask value for bit 4 */
+#define MASK_05 0x00000020 /* Mask value for bit 5 */
+#define MASK_06 0x00000040 /* Mask value for bit 6 */
+#define MASK_07 0x00000080 /* Mask value for bit 7 */
+#define MASK_08 0x00000100 /* Mask value for bit 8 */
+#define MASK_09 0x00000200 /* Mask value for bit 9 */
+#define MASK_10 0x00000400 /* Mask value for bit 10 */
+#define MASK_11 0x00000800 /* Mask value for bit 11 */
+#define MASK_12 0x00001000 /* Mask value for bit 12 */
+#define MASK_13 0x00002000 /* Mask value for bit 13 */
+#define MASK_14 0x00004000 /* Mask value for bit 14 */
+#define MASK_15 0x00008000 /* Mask value for bit 15 */
+#define MASK_16 0x00010000 /* Mask value for bit 16 */
+#define MASK_17 0x00020000 /* Mask value for bit 17 */
+#define MASK_18 0x00040000 /* Mask value for bit 18 */
+#define MASK_19 0x00080000 /* Mask value for bit 19 */
+#define MASK_20 0x00100000 /* Mask value for bit 20 */
+#define MASK_21 0x00200000 /* Mask value for bit 21 */
+#define MASK_22 0x00400000 /* Mask value for bit 22 */
+#define MASK_23 0x00800000 /* Mask value for bit 23 */
+#define MASK_24 0x01000000 /* Mask value for bit 24 */
+#define MASK_25 0x02000000 /* Mask value for bit 25 */
+#define MASK_26 0x04000000 /* Mask value for bit 26 */
+#define MASK_27 0x08000000 /* Mask value for bit 27 */
+#define MASK_28 0x10000000 /* Mask value for bit 28 */
+#define MASK_29 0x20000000 /* Mask value for bit 29 */
+#define MASK_30 0x40000000 /* Mask value for bit 30 */
+#define MASK_31 0x80000000 /* Mask value for bit 31 */
+
+#define MASK_B0 0x000000ff /* Mask value for byte 0 */
+#define MASK_B1 0x0000ff00 /* Mask value for byte 1 */
+#define MASK_B2 0x00ff0000 /* Mask value for byte 2 */
+#define MASK_B3 0xff000000 /* Mask value for byte 3 */
+
+#define MASK_W0 0x0000ffff /* Mask value for word 0 */
+#define MASK_W1 0xffff0000 /* Mask value for word 1 */
+
+#define MASK_PA 0xfffffffc /* Mask value for physical address */
+#define MASK_PR 0xfffffffe /* Mask value for protection register */
+#define MASK_ER 0xffffffff /* Mask value for the entire register */
+
+#define MASK_NONE 0x00000000 /* No mask */
+
+/* register aliases */
+#define BASE_ODD1 0x00 /* Video DMA 1 registers */
+#define BASE_EVEN1 0x04
+#define PROT_ADDR1 0x08
+#define PITCH1 0x0C
+#define BASE_PAGE1 0x10 /* Video DMA 1 base page */
+#define NUM_LINE_BYTE1 0x14
+
+#define BASE_ODD2 0x18 /* Video DMA 2 registers */
+#define BASE_EVEN2 0x1C
+#define PROT_ADDR2 0x20
+#define PITCH2 0x24
+#define BASE_PAGE2 0x28 /* Video DMA 2 base page */
+#define NUM_LINE_BYTE2 0x2C
+
+#define BASE_ODD3 0x30 /* Video DMA 3 registers */
+#define BASE_EVEN3 0x34
+#define PROT_ADDR3 0x38
+#define PITCH3 0x3C
+#define BASE_PAGE3 0x40 /* Video DMA 3 base page */
+#define NUM_LINE_BYTE3 0x44
+
+#define PCI_BT_V1 0x48 /* Video/FIFO 1 */
+#define PCI_BT_V2 0x49 /* Video/FIFO 2 */
+#define PCI_BT_V3 0x4A /* Video/FIFO 3 */
+#define PCI_BT_DEBI 0x4B /* DEBI */
+#define PCI_BT_A 0x4C /* Audio */
+
+#define DD1_INIT 0x50 /* Init setting of DD1 interface */
+
+#define DD1_STREAM_B 0x54 /* DD1 B video data stream handling */
+#define DD1_STREAM_A 0x56 /* DD1 A video data stream handling */
+
+#define BRS_CTRL 0x58 /* BRS control register */
+#define HPS_CTRL 0x5C /* HPS control register */
+#define HPS_V_SCALE 0x60 /* HPS vertical scale */
+#define HPS_V_GAIN 0x64 /* HPS vertical ACL and gain */
+#define HPS_H_PRESCALE 0x68 /* HPS horizontal prescale */
+#define HPS_H_SCALE 0x6C /* HPS horizontal scale */
+#define BCS_CTRL 0x70 /* BCS control */
+#define CHROMA_KEY_RANGE 0x74
+#define CLIP_FORMAT_CTRL 0x78 /* HPS outputs formats & clipping */
+
+#define DEBI_CONFIG 0x7C
+#define DEBI_COMMAND 0x80
+#define DEBI_PAGE 0x84
+#define DEBI_AD 0x88
+
+#define I2C_TRANSFER 0x8C
+#define I2C_STATUS 0x90
+
+#define BASE_A1_IN 0x94 /* Audio 1 input DMA */
+#define PROT_A1_IN 0x98
+#define PAGE_A1_IN 0x9C
+
+#define BASE_A1_OUT 0xA0 /* Audio 1 output DMA */
+#define PROT_A1_OUT 0xA4
+#define PAGE_A1_OUT 0xA8
+
+#define BASE_A2_IN 0xAC /* Audio 2 input DMA */
+#define PROT_A2_IN 0xB0
+#define PAGE_A2_IN 0xB4
+
+#define BASE_A2_OUT 0xB8 /* Audio 2 output DMA */
+#define PROT_A2_OUT 0xBC
+#define PAGE_A2_OUT 0xC0
+
+#define RPS_PAGE0 0xC4 /* RPS task 0 page register */
+#define RPS_PAGE1 0xC8 /* RPS task 1 page register */
+
+#define RPS_THRESH0 0xCC /* HBI threshold for task 0 */
+#define RPS_THRESH1 0xD0 /* HBI threshold for task 1 */
+
+#define RPS_TOV0 0xD4 /* RPS timeout for task 0 */
+#define RPS_TOV1 0xD8 /* RPS timeout for task 1 */
+
+#define IER 0xDC /* Interrupt enable register */
+
+#define GPIO_CTRL 0xE0 /* GPIO 0-3 register */
+
+#define EC1SSR 0xE4 /* Event cnt set 1 source select */
+#define EC2SSR 0xE8 /* Event cnt set 2 source select */
+#define ECT1R 0xEC /* Event cnt set 1 thresholds */
+#define ECT2R 0xF0 /* Event cnt set 2 thresholds */
+
+#define ACON1 0xF4
+#define ACON2 0xF8
+
+#define MC1 0xFC /* Main control register 1 */
+#define MC2 0x100 /* Main control register 2 */
+
+#define RPS_ADDR0 0x104 /* RPS task 0 address register */
+#define RPS_ADDR1 0x108 /* RPS task 1 address register */
+
+#define ISR 0x10C /* Interrupt status register */
+#define PSR 0x110 /* Primary status register */
+#define SSR 0x114 /* Secondary status register */
+
+#define EC1R 0x118 /* Event counter set 1 register */
+#define EC2R 0x11C /* Event counter set 2 register */
+
+#define PCI_VDP1 0x120 /* Video DMA pointer of FIFO 1 */
+#define PCI_VDP2 0x124 /* Video DMA pointer of FIFO 2 */
+#define PCI_VDP3 0x128 /* Video DMA pointer of FIFO 3 */
+#define PCI_ADP1 0x12C /* Audio DMA pointer of audio out 1 */
+#define PCI_ADP2 0x130 /* Audio DMA pointer of audio in 1 */
+#define PCI_ADP3 0x134 /* Audio DMA pointer of audio out 2 */
+#define PCI_ADP4 0x138 /* Audio DMA pointer of audio in 2 */
+#define PCI_DMA_DDP 0x13C /* DEBI DMA pointer */
+
+#define LEVEL_REP 0x140,
+#define A_TIME_SLOT1 0x180, /* from 180 - 1BC */
+#define A_TIME_SLOT2 0x1C0, /* from 1C0 - 1FC */
+
+/* isr masks */
+#define SPCI_PPEF 0x80000000 /* PCI parity error */
+#define SPCI_PABO 0x40000000 /* PCI access error (target or master abort) */
+#define SPCI_PPED 0x20000000 /* PCI parity error on 'real time data' */
+#define SPCI_RPS_I1 0x10000000 /* Interrupt issued by RPS1 */
+#define SPCI_RPS_I0 0x08000000 /* Interrupt issued by RPS0 */
+#define SPCI_RPS_LATE1 0x04000000 /* RPS task 1 is late */
+#define SPCI_RPS_LATE0 0x02000000 /* RPS task 0 is late */
+#define SPCI_RPS_E1 0x01000000 /* RPS error from task 1 */
+#define SPCI_RPS_E0 0x00800000 /* RPS error from task 0 */
+#define SPCI_RPS_TO1 0x00400000 /* RPS timeout task 1 */
+#define SPCI_RPS_TO0 0x00200000 /* RPS timeout task 0 */
+#define SPCI_UPLD 0x00100000 /* RPS in upload */
+#define SPCI_DEBI_S 0x00080000 /* DEBI status */
+#define SPCI_DEBI_E 0x00040000 /* DEBI error */
+#define SPCI_IIC_S 0x00020000 /* I2C status */
+#define SPCI_IIC_E 0x00010000 /* I2C error */
+#define SPCI_A2_IN 0x00008000 /* Audio 2 input DMA protection / limit */
+#define SPCI_A2_OUT 0x00004000 /* Audio 2 output DMA protection / limit */
+#define SPCI_A1_IN 0x00002000 /* Audio 1 input DMA protection / limit */
+#define SPCI_A1_OUT 0x00001000 /* Audio 1 output DMA protection / limit */
+#define SPCI_AFOU 0x00000800 /* Audio FIFO over- / underflow */
+#define SPCI_V_PE 0x00000400 /* Video protection address */
+#define SPCI_VFOU 0x00000200 /* Video FIFO over- / underflow */
+#define SPCI_FIDA 0x00000100 /* Field ID video port A */
+#define SPCI_FIDB 0x00000080 /* Field ID video port B */
+#define SPCI_PIN3 0x00000040 /* GPIO pin 3 */
+#define SPCI_PIN2 0x00000020 /* GPIO pin 2 */
+#define SPCI_PIN1 0x00000010 /* GPIO pin 1 */
+#define SPCI_PIN0 0x00000008 /* GPIO pin 0 */
+#define SPCI_ECS 0x00000004 /* Event counter 1, 2, 4, 5 */
+#define SPCI_EC3S 0x00000002 /* Event counter 3 */
+#define SPCI_EC0S 0x00000001 /* Event counter 0 */
+
+/* i2c */
+#define SAA7146_I2C_ABORT (1<<7)
+#define SAA7146_I2C_SPERR (1<<6)
+#define SAA7146_I2C_APERR (1<<5)
+#define SAA7146_I2C_DTERR (1<<4)
+#define SAA7146_I2C_DRERR (1<<3)
+#define SAA7146_I2C_AL (1<<2)
+#define SAA7146_I2C_ERR (1<<1)
+#define SAA7146_I2C_BUSY (1<<0)
+
+#define SAA7146_I2C_START (0x3)
+#define SAA7146_I2C_CONT (0x2)
+#define SAA7146_I2C_STOP (0x1)
+#define SAA7146_I2C_NOP (0x0)
+
+#define SAA7146_I2C_BUS_BIT_RATE_6400 (0x500)
+#define SAA7146_I2C_BUS_BIT_RATE_3200 (0x100)
+#define SAA7146_I2C_BUS_BIT_RATE_480 (0x400)
+#define SAA7146_I2C_BUS_BIT_RATE_320 (0x600)
+#define SAA7146_I2C_BUS_BIT_RATE_240 (0x700)
+#define SAA7146_I2C_BUS_BIT_RATE_120 (0x000)
+#define SAA7146_I2C_BUS_BIT_RATE_80 (0x200)
+#define SAA7146_I2C_BUS_BIT_RATE_60 (0x300)
+
+static inline void SAA7146_IER_DISABLE(struct saa7146_dev *x, unsigned y)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&x->int_slock, flags);
+ saa7146_write(x, IER, saa7146_read(x, IER) & ~y);
+ spin_unlock_irqrestore(&x->int_slock, flags);
+}
+
+static inline void SAA7146_IER_ENABLE(struct saa7146_dev *x, unsigned y)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&x->int_slock, flags);
+ saa7146_write(x, IER, saa7146_read(x, IER) | y);
+ spin_unlock_irqrestore(&x->int_slock, flags);
+}
+
+#endif
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_core.c
index e50fa0ff7c5d..da21d346b870 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_core.c
@@ -8,8 +8,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <media/drv-intf/saa7146.h>
#include <linux/module.h>
+#include "saa7146.h"
static int saa7146_num;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_fops.c
index e9a15de6126e..aa14698a9c54 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_fops.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <media/drv-intf/saa7146_vv.h>
#include <linux/module.h>
+#include "saa7146_vv.h"
/****************************************************************************/
/* resource management functions, shamelessly stolen from saa7134 driver */
diff --git a/drivers/media/common/saa7146/saa7146_hlp.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_hlp.c
index 6c9946a402ee..b1222a4cfa4a 100644
--- a/drivers/media/common/saa7146/saa7146_hlp.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_hlp.c
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
-#include <media/drv-intf/saa7146_vv.h>
+#include "saa7146_vv.h"
static void calculate_output_format_register(struct saa7146_dev* saa, u32 palette, u32* clip_format)
{
diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_i2c.c
index df9ebe2a168c..7a33fe51775a 100644
--- a/drivers/media/common/saa7146/saa7146_i2c.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_i2c.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <media/drv-intf/saa7146_vv.h>
+#include "saa7146_vv.h"
static u32 saa7146_i2c_func(struct i2c_adapter *adapter)
{
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_vbi.c
index bd442b984423..2d4a05d7bc5b 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_vbi.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <media/drv-intf/saa7146_vv.h>
+#include "saa7146_vv.h"
static int vbi_pixel_to_capture = 720 * 2;
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/staging/media/deprecated/saa7146/common/saa7146_video.c
index 2296765079a4..4598a44231fa 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_video.c
@@ -1,10 +1,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <media/drv-intf/saa7146_vv.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include "saa7146_vv.h"
static int max_memory = 32;
diff --git a/drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h b/drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h
new file mode 100644
index 000000000000..d7bd916fe3ad
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/common/saa7146_vv.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SAA7146_VV__
+#define __SAA7146_VV__
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf-dma-sg.h>
+#include "saa7146.h"
+
+#define MAX_SAA7146_CAPTURE_BUFFERS 32 /* arbitrary */
+#define BUFFER_TIMEOUT (HZ/2) /* 0.5 seconds */
+
+#define WRITE_RPS0(x) do { \
+ dev->d_rps0.cpu_addr[ count++ ] = cpu_to_le32(x); \
+ } while (0);
+
+#define WRITE_RPS1(x) do { \
+ dev->d_rps1.cpu_addr[ count++ ] = cpu_to_le32(x); \
+ } while (0);
+
+struct saa7146_video_dma {
+ u32 base_odd;
+ u32 base_even;
+ u32 prot_addr;
+ u32 pitch;
+ u32 base_page;
+ u32 num_line_byte;
+};
+
+#define FORMAT_BYTE_SWAP 0x1
+#define FORMAT_IS_PLANAR 0x2
+
+struct saa7146_format {
+ u32 pixelformat;
+ u32 trans;
+ u8 depth;
+ u8 flags;
+ u8 swap;
+};
+
+struct saa7146_standard
+{
+ char *name;
+ v4l2_std_id id;
+
+ int v_offset; /* number of lines of vertical offset before processing */
+ int v_field; /* number of lines in a field for HPS to process */
+
+ int h_offset; /* horizontal offset of processing window */
+ int h_pixels; /* number of horizontal pixels to process */
+
+ int v_max_out;
+ int h_max_out;
+};
+
+/* buffer for one video/vbi frame */
+struct saa7146_buf {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+
+ /* saa7146 specific */
+ struct v4l2_pix_format *fmt;
+ int (*activate)(struct saa7146_dev *dev,
+ struct saa7146_buf *buf,
+ struct saa7146_buf *next);
+
+ /* page tables */
+ struct saa7146_pgtable pt[3];
+};
+
+struct saa7146_dmaqueue {
+ struct saa7146_dev *dev;
+ struct saa7146_buf *curr;
+ struct list_head queue;
+ struct timer_list timeout;
+};
+
+struct saa7146_overlay {
+ struct saa7146_fh *fh;
+ struct v4l2_window win;
+ struct v4l2_clip clips[16];
+ int nclips;
+};
+
+/* per open data */
+struct saa7146_fh {
+ /* Must be the first field! */
+ struct v4l2_fh fh;
+ struct saa7146_dev *dev;
+
+ /* video capture */
+ struct videobuf_queue video_q;
+
+ /* vbi capture */
+ struct videobuf_queue vbi_q;
+
+ unsigned int resources; /* resource management for device open */
+};
+
+#define STATUS_OVERLAY 0x01
+#define STATUS_CAPTURE 0x02
+
+struct saa7146_vv
+{
+ /* vbi capture */
+ struct saa7146_dmaqueue vbi_dmaq;
+ struct v4l2_vbi_format vbi_fmt;
+ struct timer_list vbi_read_timeout;
+ struct file *vbi_read_timeout_file;
+ /* vbi workaround interrupt queue */
+ wait_queue_head_t vbi_wq;
+ int vbi_fieldcount;
+ struct saa7146_fh *vbi_streaming;
+
+ int video_status;
+ struct saa7146_fh *video_fh;
+
+ /* video overlay */
+ struct saa7146_overlay ov;
+ struct v4l2_framebuffer ov_fb;
+ struct saa7146_format *ov_fmt;
+ struct saa7146_fh *ov_suspend;
+
+ /* video capture */
+ struct saa7146_dmaqueue video_dmaq;
+ struct v4l2_pix_format video_fmt;
+ enum v4l2_field last_field;
+
+ /* common: fixme? shouldn't this be in saa7146_fh?
+ (this leads to a more complicated question: shall the driver
+ store the different settings (for example S_INPUT) for every open
+ and restore it appropriately, or should all settings be common for
+ all opens? currently, we do the latter, like all other
+ drivers do... */
+ struct saa7146_standard *standard;
+
+ int vflip;
+ int hflip;
+ int current_hps_source;
+ int current_hps_sync;
+
+ struct saa7146_dma d_clipping; /* pointer to clipping memory */
+
+ unsigned int resources; /* resource management for device */
+};
+
+/* flags */
+#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */
+
+struct saa7146_ext_vv
+{
+ /* information about the video capabilities of the device */
+ int inputs;
+ int audios;
+ u32 capabilities;
+ int flags;
+
+ /* additionally supported transmission standards */
+ struct saa7146_standard *stds;
+ int num_stds;
+ int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
+
+ /* the extension can override this */
+ struct v4l2_ioctl_ops vid_ops;
+ struct v4l2_ioctl_ops vbi_ops;
+ /* pointer to the saa7146 core ops */
+ const struct v4l2_ioctl_ops *core_ops;
+
+ struct v4l2_file_operations vbi_fops;
+};
+
+struct saa7146_use_ops {
+ void (*init)(struct saa7146_dev *, struct saa7146_vv *);
+ int(*open)(struct saa7146_dev *, struct file *);
+ void (*release)(struct saa7146_dev *, struct file *);
+ void (*irq_done)(struct saa7146_dev *, unsigned long status);
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+};
+
+/* from saa7146_fops.c */
+int saa7146_register_device(struct video_device *vid, struct saa7146_dev *dev, char *name, int type);
+int saa7146_unregister_device(struct video_device *vid, struct saa7146_dev *dev);
+void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state);
+void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi);
+int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf);
+void saa7146_buffer_timeout(struct timer_list *t);
+void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q,
+ struct saa7146_buf *buf);
+
+int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv);
+int saa7146_vv_release(struct saa7146_dev* dev);
+
+/* from saa7146_hlp.c */
+int saa7146_enable_overlay(struct saa7146_fh *fh);
+void saa7146_disable_overlay(struct saa7146_fh *fh);
+
+void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next);
+void saa7146_write_out_dma(struct saa7146_dev* dev, int which, struct saa7146_video_dma* vdma) ;
+void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sync);
+void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
+
+/* from saa7146_video.c */
+extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
+extern const struct v4l2_ioctl_ops saa7146_vbi_ioctl_ops;
+extern const struct saa7146_use_ops saa7146_video_uops;
+int saa7146_start_preview(struct saa7146_fh *fh);
+int saa7146_stop_preview(struct saa7146_fh *fh);
+long saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg);
+int saa7146_s_ctrl(struct v4l2_ctrl *ctrl);
+
+/* from saa7146_vbi.c */
+extern const struct saa7146_use_ops saa7146_vbi_uops;
+
+/* resource management functions */
+int saa7146_res_get(struct saa7146_fh *fh, unsigned int bit);
+void saa7146_res_free(struct saa7146_fh *fh, unsigned int bits);
+
+#define RESOURCE_DMA1_HPS 0x1
+#define RESOURCE_DMA2_CLP 0x2
+#define RESOURCE_DMA3_BRS 0x4
+
+/* saa7146 source inputs */
+#define SAA7146_HPS_SOURCE_PORT_A 0x00
+#define SAA7146_HPS_SOURCE_PORT_B 0x01
+#define SAA7146_HPS_SOURCE_YPB_CPA 0x02
+#define SAA7146_HPS_SOURCE_YPA_CPB 0x03
+
+/* sync inputs */
+#define SAA7146_HPS_SYNC_PORT_A 0x00
+#define SAA7146_HPS_SYNC_PORT_B 0x01
+
+/* some memory sizes */
+/* max. 16 clipping rectangles */
+#define SAA7146_CLIPPING_MEM (16 * 4 * sizeof(u32))
+
+/* some defines for the various clipping-modes */
+#define SAA7146_CLIPPING_RECT 0x4
+#define SAA7146_CLIPPING_RECT_INVERTED 0x5
+#define SAA7146_CLIPPING_MASK 0x6
+#define SAA7146_CLIPPING_MASK_INVERTED 0x7
+
+/* output formats: each entry holds four information */
+#define RGB08_COMPOSED 0x0217 /* composed is used in the sense of "not-planar" */
+/* this means: planar?=0, yuv2rgb-conversation-mode=2, dither=yes(=1), format-mode = 7 */
+#define RGB15_COMPOSED 0x0213
+#define RGB16_COMPOSED 0x0210
+#define RGB24_COMPOSED 0x0201
+#define RGB32_COMPOSED 0x0202
+
+#define Y8 0x0006
+#define YUV411_COMPOSED 0x0003
+#define YUV422_COMPOSED 0x0000
+/* this means: planar?=1, yuv2rgb-conversion-mode=0, dither=no(=0), format-mode = b */
+#define YUV411_DECOMPOSED 0x100b
+#define YUV422_DECOMPOSED 0x1009
+#define YUV420_DECOMPOSED 0x100a
+
+#define IS_PLANAR(x) (x & 0xf000)
+
+/* misc defines */
+#define SAA7146_NO_SWAP (0x0)
+#define SAA7146_TWO_BYTE_SWAP (0x1)
+#define SAA7146_FOUR_BYTE_SWAP (0x2)
+
+#endif
diff --git a/drivers/media/pci/saa7146/Kconfig b/drivers/staging/media/deprecated/saa7146/saa7146/Kconfig
index 3bbb68a0ed7b..228e8d3f8d2b 100644
--- a/drivers/media/pci/saa7146/Kconfig
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_HEXIUM_GEMINI
- tristate "Hexium Gemini frame grabber"
+ tristate "Hexium Gemini frame grabber (DEPRECATED)"
depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
help
@@ -8,22 +8,28 @@ config VIDEO_HEXIUM_GEMINI
grabber card by Hexium. Please note that the Gemini Dual
card is *not* fully supported.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called hexium_gemini.
config VIDEO_HEXIUM_ORION
- tristate "Hexium HV-PCI6 and Orion frame grabber"
+ tristate "Hexium HV-PCI6 and Orion frame grabber (DEPRECATED)"
depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
help
This is a video4linux driver for the Hexium HV-PCI6 and
Orion frame grabber cards by Hexium.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called hexium_orion.
config VIDEO_MXB
- tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
+ tristate "Siemens-Nixdorf 'Multimedia eXtension Board' (DEPRECATED)"
depends on PCI && VIDEO_DEV && I2C
select VIDEO_SAA7146_VV
select VIDEO_TUNER
@@ -35,5 +41,8 @@ config VIDEO_MXB
This is a video4linux driver for the 'Multimedia eXtension Board'
TV card by Siemens-Nixdorf.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called mxb.
diff --git a/drivers/media/pci/saa7146/Makefile b/drivers/staging/media/deprecated/saa7146/saa7146/Makefile
index 37c9336f83d5..37c9336f83d5 100644
--- a/drivers/media/pci/saa7146/Makefile
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/Makefile
diff --git a/drivers/staging/media/deprecated/saa7146/saa7146/TODO b/drivers/staging/media/deprecated/saa7146/saa7146/TODO
new file mode 100644
index 000000000000..c9ae2ec79cea
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/TODO
@@ -0,0 +1,7 @@
+The saa7146-based drivers are one of the few drivers still not using
+the vb2 framework, so these drivers are now deprecated with the intent of
+removing them altogether by the beginning of 2023.
+
+In order to keep these drivers they have to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/staging/media/deprecated/saa7146/saa7146/hexium_gemini.c
index 3947701cd6c7..124e82bd4507 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/hexium_gemini.c
@@ -13,9 +13,9 @@
#define DEBUG_VARIABLE debug
-#include <media/drv-intf/saa7146_vv.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include "../common/saa7146_vv.h"
static int debug;
module_param(debug, int, 0);
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/staging/media/deprecated/saa7146/saa7146/hexium_orion.c
index 2eb4bee16b71..ebd63998ac79 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/hexium_orion.c
@@ -13,9 +13,9 @@
#define DEBUG_VARIABLE debug
-#include <media/drv-intf/saa7146_vv.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include "../common/saa7146_vv.h"
static int debug;
module_param(debug, int, 0);
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/staging/media/deprecated/saa7146/saa7146/mxb.c
index 7ded8f5b05cb..3e568f952dae 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/staging/media/deprecated/saa7146/saa7146/mxb.c
@@ -13,13 +13,13 @@
#define DEBUG_VARIABLE debug
-#include <media/drv-intf/saa7146_vv.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
#include <media/i2c/saa7115.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include "../common/saa7146_vv.h"
#include "tea6415c.h"
#include "tea6420.h"
diff --git a/drivers/media/pci/ttpci/Kconfig b/drivers/staging/media/deprecated/saa7146/ttpci/Kconfig
index 65a6832a6b96..8c85ed58e938 100644
--- a/drivers/media/pci/ttpci/Kconfig
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config DVB_BUDGET_CORE
- tristate "SAA7146 DVB cards (aka Budget, Nova-PCI)"
+ tristate "SAA7146 DVB cards (aka Budget, Nova-PCI) (DEPRECATED)"
depends on DVB_CORE && PCI && I2C
select VIDEO_SAA7146
select TTPCI_EEPROM
@@ -10,7 +10,7 @@ config DVB_BUDGET_CORE
MPEG2 decoder.
config DVB_BUDGET
- tristate "Budget cards"
+ tristate "Budget cards (DEPRECATED)"
depends on DVB_BUDGET_CORE && I2C
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
select DVB_VES1X93 if MEDIA_SUBDRV_AUTOSELECT
@@ -30,13 +30,16 @@ config DVB_BUDGET
or Nova-PCI cards) without onboard MPEG2 decoder, and without
analog inputs or an onboard Common Interface connector.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a card and want to use it.
To compile this driver as a module, choose M here: the
module will be called budget.
config DVB_BUDGET_CI
- tristate "Budget cards with onboard CI connector"
+ tristate "Budget cards with onboard CI connector (DEPRECATED)"
depends on DVB_BUDGET_CORE && I2C
select DVB_STV0297 if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
@@ -57,13 +60,16 @@ config DVB_BUDGET_CI
Note: The Common Interface is not yet supported by this driver
due to lack of information from the vendor.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a card and want to use it.
To compile this driver as a module, choose M here: the
module will be called budget-ci.
config DVB_BUDGET_AV
- tristate "Budget cards with analog video inputs"
+ tristate "Budget cards with analog video inputs (DEPRECATED)"
depends on DVB_BUDGET_CORE && I2C
select VIDEO_SAA7146_VV
depends on VIDEO_DEV # dependencies of VIDEO_SAA7146_VV
@@ -80,6 +86,9 @@ config DVB_BUDGET_AV
(so called Budget- or Nova-PCI cards) without onboard
MPEG2 decoder, but with one or more analog video inputs.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a card and want to use it.
To compile this driver as a module, choose M here: the
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/staging/media/deprecated/saa7146/ttpci/Makefile
index b0708f6e40cc..b0708f6e40cc 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/Makefile
diff --git a/drivers/staging/media/deprecated/saa7146/ttpci/TODO b/drivers/staging/media/deprecated/saa7146/ttpci/TODO
new file mode 100644
index 000000000000..c9ae2ec79cea
--- /dev/null
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/TODO
@@ -0,0 +1,7 @@
+The saa7146-based drivers are one of the few drivers still not using
+the vb2 framework, so these drivers are now deprecated with the intent of
+removing them altogether by the beginning of 2023.
+
+In order to keep these drivers they have to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
index 3cb83005cf09..0c61a2dec221 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-av.c
@@ -29,7 +29,7 @@
#include "tda1004x.h"
#include "tua6100.h"
#include "dvb-pll.h"
-#include <media/drv-intf/saa7146_vv.h>
+#include "../common/saa7146_vv.h"
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-ci.c
index d59d18647371..d59d18647371 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-ci.c
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget-core.c
index 5d5796f24469..5d5796f24469 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget-core.c
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/staging/media/deprecated/saa7146/ttpci/budget.c
index a88711a3ac7f..a88711a3ac7f 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget.c
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/staging/media/deprecated/saa7146/ttpci/budget.h
index bd87432e6cde..82cc0df492b3 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/staging/media/deprecated/saa7146/ttpci/budget.h
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
-#include <media/drv-intf/saa7146.h>
+#include "../common/saa7146.h"
extern int budget_debug;
diff --git a/drivers/staging/media/stkwebcam/Kconfig b/drivers/staging/media/deprecated/stkwebcam/Kconfig
index 4450403dff41..4450403dff41 100644
--- a/drivers/staging/media/stkwebcam/Kconfig
+++ b/drivers/staging/media/deprecated/stkwebcam/Kconfig
diff --git a/drivers/staging/media/stkwebcam/Makefile b/drivers/staging/media/deprecated/stkwebcam/Makefile
index 17ad7b6f43d0..17ad7b6f43d0 100644
--- a/drivers/staging/media/stkwebcam/Makefile
+++ b/drivers/staging/media/deprecated/stkwebcam/Makefile
diff --git a/drivers/staging/media/stkwebcam/TODO b/drivers/staging/media/deprecated/stkwebcam/TODO
index 735304a72729..735304a72729 100644
--- a/drivers/staging/media/stkwebcam/TODO
+++ b/drivers/staging/media/deprecated/stkwebcam/TODO
diff --git a/drivers/staging/media/stkwebcam/stk-sensor.c b/drivers/staging/media/deprecated/stkwebcam/stk-sensor.c
index 94aa6a27f934..94aa6a27f934 100644
--- a/drivers/staging/media/stkwebcam/stk-sensor.c
+++ b/drivers/staging/media/deprecated/stkwebcam/stk-sensor.c
diff --git a/drivers/staging/media/stkwebcam/stk-webcam.c b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
index 787edb3d47c2..787edb3d47c2 100644
--- a/drivers/staging/media/stkwebcam/stk-webcam.c
+++ b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
diff --git a/drivers/staging/media/stkwebcam/stk-webcam.h b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.h
index 136decffe9ce..136decffe9ce 100644
--- a/drivers/staging/media/stkwebcam/stk-webcam.h
+++ b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.h
diff --git a/drivers/media/usb/tm6000/Kconfig b/drivers/staging/media/deprecated/tm6000/Kconfig
index 56e977deba81..73d72e49eb28 100644
--- a/drivers/media/usb/tm6000/Kconfig
+++ b/drivers/staging/media/deprecated/tm6000/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_TM6000
- tristate "TV Master TM5600/6000/6010 driver"
+ tristate "TV Master TM5600/6000/6010 driver (DEPRECATED)"
depends on VIDEO_DEV && I2C && INPUT && RC_CORE && USB
select VIDEO_TUNER
select MEDIA_TUNER_XC2028
@@ -13,6 +13,9 @@ config VIDEO_TM6000
only compressed MPEG data over the usb bus, so you need
an external software decoder to watch TV on your computer.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
Say Y if you own such a device and want to use it.
config VIDEO_TM6000_ALSA
diff --git a/drivers/media/usb/tm6000/Makefile b/drivers/staging/media/deprecated/tm6000/Makefile
index 75247a02a485..75247a02a485 100644
--- a/drivers/media/usb/tm6000/Makefile
+++ b/drivers/staging/media/deprecated/tm6000/Makefile
diff --git a/drivers/staging/media/deprecated/tm6000/TODO b/drivers/staging/media/deprecated/tm6000/TODO
new file mode 100644
index 000000000000..ecb30a429689
--- /dev/null
+++ b/drivers/staging/media/deprecated/tm6000/TODO
@@ -0,0 +1,7 @@
+This is one of the few drivers still not using the vb2
+framework, so this driver is now deprecated with the intent of
+removing it altogether by the beginning of 2023.
+
+In order to keep this driver it has to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/staging/media/deprecated/tm6000/tm6000-alsa.c
index a19a46770c2b..a19a46770c2b 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-alsa.c
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/staging/media/deprecated/tm6000/tm6000-cards.c
index 98f4a63adc2a..98f4a63adc2a 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-cards.c
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/staging/media/deprecated/tm6000/tm6000-core.c
index 5c8cbc5d6f72..5c8cbc5d6f72 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-core.c
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/staging/media/deprecated/tm6000/tm6000-dvb.c
index ee04973cbf93..ee04973cbf93 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-dvb.c
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/staging/media/deprecated/tm6000/tm6000-i2c.c
index 7554b93b82e6..7554b93b82e6 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-i2c.c
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/staging/media/deprecated/tm6000/tm6000-input.c
index 5136e9e202f1..5136e9e202f1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-input.c
diff --git a/drivers/media/usb/tm6000/tm6000-regs.h b/drivers/staging/media/deprecated/tm6000/tm6000-regs.h
index 6a181f2e7ef2..6a181f2e7ef2 100644
--- a/drivers/media/usb/tm6000/tm6000-regs.h
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-regs.h
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/staging/media/deprecated/tm6000/tm6000-stds.c
index 858cb4f3a9ca..858cb4f3a9ca 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-stds.c
diff --git a/drivers/media/usb/tm6000/tm6000-usb-isoc.h b/drivers/staging/media/deprecated/tm6000/tm6000-usb-isoc.h
index e3c6933f854d..e3c6933f854d 100644
--- a/drivers/media/usb/tm6000/tm6000-usb-isoc.h
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-usb-isoc.h
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/staging/media/deprecated/tm6000/tm6000-video.c
index d855a19551f3..e06ed21edbdd 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/staging/media/deprecated/tm6000/tm6000-video.c
@@ -916,8 +916,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return -EINVAL;
}
- field = f->fmt.pix.field;
-
field = V4L2_FIELD_INTERLACED;
tm6000_get_std_res(dev);
diff --git a/drivers/media/usb/tm6000/tm6000.h b/drivers/staging/media/deprecated/tm6000/tm6000.h
index c08c95312739..c08c95312739 100644
--- a/drivers/media/usb/tm6000/tm6000.h
+++ b/drivers/staging/media/deprecated/tm6000/tm6000.h
diff --git a/drivers/staging/media/deprecated/vpfe_capture/Kconfig b/drivers/staging/media/deprecated/vpfe_capture/Kconfig
new file mode 100644
index 000000000000..10250e7e566b
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/Kconfig
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_DM6446_CCDC
+ tristate "TI DM6446 CCDC video capture driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from slave decoders.
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpfe_capture.ko and dm644x_ccdc.ko
+
+config VIDEO_DM355_CCDC
+ tristate "TI DM355 CCDC video capture driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables DM355 CCD hw module. DM355 CCDC hw interfaces
+ with decoder modules such as TVP5146 over BT656 or
+ sensor module such as MT9T001 over a raw interface. This
+ module configures the interface and CCDC/ISIF to do
+ video frame capture from a slave decoders
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpfe_capture.ko and dm355_ccdc.ko
+
+config VIDEO_DM365_ISIF
+ tristate "TI DM365 ISIF video capture driver"
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ depends on I2C
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables ISIF hw module. This is the hardware module for
+ configuring ISIF in VPFE to capture Raw Bayer RGB data from
+ a image sensor or YUV data from a YUV source.
+
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
+ To compile this driver as a module, choose M here. There will
+ be two modules called vpfe_capture.ko and isif.ko
diff --git a/drivers/staging/media/deprecated/vpfe_capture/Makefile b/drivers/staging/media/deprecated/vpfe_capture/Makefile
new file mode 100644
index 000000000000..609e8dc09ce7
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o dm644x_ccdc.o
+obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o dm355_ccdc.o
+obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o isif.o
diff --git a/drivers/staging/media/deprecated/vpfe_capture/TODO b/drivers/staging/media/deprecated/vpfe_capture/TODO
new file mode 100644
index 000000000000..ce654d7337af
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/TODO
@@ -0,0 +1,7 @@
+These are one of the few drivers still not using the vb2
+framework, so these drivers are now deprecated with the intent of
+removing them altogether by the beginning of 2023.
+
+In order to keep these drivers they have to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/platform/ti/davinci/ccdc_hw_device.h b/drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h
index a545052a95a9..a545052a95a9 100644
--- a/drivers/media/platform/ti/davinci/ccdc_hw_device.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h
diff --git a/drivers/media/platform/ti/davinci/dm355_ccdc.c b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c
index 8fe55d1b972c..da8db53e9498 100644
--- a/drivers/media/platform/ti/davinci/dm355_ccdc.c
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <media/davinci/dm355_ccdc.h>
+#include "dm355_ccdc.h"
#include <media/davinci/vpss.h>
#include "dm355_ccdc_regs.h"
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
new file mode 100644
index 000000000000..1f3d00aa46d1
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2005-2009 Texas Instruments Inc
+ */
+#ifndef _DM355_CCDC_H
+#define _DM355_CCDC_H
+#include <media/davinci/ccdc_types.h>
+#include <media/davinci/vpfe_types.h>
+
+/* enum for No of pixel per line to be avg. in Black Clamping */
+enum ccdc_sample_length {
+ CCDC_SAMPLE_1PIXELS,
+ CCDC_SAMPLE_2PIXELS,
+ CCDC_SAMPLE_4PIXELS,
+ CCDC_SAMPLE_8PIXELS,
+ CCDC_SAMPLE_16PIXELS
+};
+
+/* enum for No of lines in Black Clamping */
+enum ccdc_sample_line {
+ CCDC_SAMPLE_1LINES,
+ CCDC_SAMPLE_2LINES,
+ CCDC_SAMPLE_4LINES,
+ CCDC_SAMPLE_8LINES,
+ CCDC_SAMPLE_16LINES
+};
+
+/* enum for Alaw gamma width */
+enum ccdc_gamma_width {
+ CCDC_GAMMA_BITS_13_4,
+ CCDC_GAMMA_BITS_12_3,
+ CCDC_GAMMA_BITS_11_2,
+ CCDC_GAMMA_BITS_10_1,
+ CCDC_GAMMA_BITS_09_0
+};
+
+enum ccdc_colpats {
+ CCDC_RED,
+ CCDC_GREEN_RED,
+ CCDC_GREEN_BLUE,
+ CCDC_BLUE
+};
+
+struct ccdc_col_pat {
+ enum ccdc_colpats olop;
+ enum ccdc_colpats olep;
+ enum ccdc_colpats elop;
+ enum ccdc_colpats elep;
+};
+
+enum ccdc_datasft {
+ CCDC_DATA_NO_SHIFT,
+ CCDC_DATA_SHIFT_1BIT,
+ CCDC_DATA_SHIFT_2BIT,
+ CCDC_DATA_SHIFT_3BIT,
+ CCDC_DATA_SHIFT_4BIT,
+ CCDC_DATA_SHIFT_5BIT,
+ CCDC_DATA_SHIFT_6BIT
+};
+
+enum ccdc_data_size {
+ CCDC_DATA_16BITS,
+ CCDC_DATA_15BITS,
+ CCDC_DATA_14BITS,
+ CCDC_DATA_13BITS,
+ CCDC_DATA_12BITS,
+ CCDC_DATA_11BITS,
+ CCDC_DATA_10BITS,
+ CCDC_DATA_8BITS
+};
+enum ccdc_mfilt1 {
+ CCDC_NO_MEDIAN_FILTER1,
+ CCDC_AVERAGE_FILTER1,
+ CCDC_MEDIAN_FILTER1
+};
+
+enum ccdc_mfilt2 {
+ CCDC_NO_MEDIAN_FILTER2,
+ CCDC_AVERAGE_FILTER2,
+ CCDC_MEDIAN_FILTER2
+};
+
+/* structure for ALaw */
+struct ccdc_a_law {
+ /* Enable/disable A-Law */
+ unsigned char enable;
+ /* Gamma Width Input */
+ enum ccdc_gamma_width gamma_wd;
+};
+
+/* structure for Black Clamping */
+struct ccdc_black_clamp {
+ /* only if bClampEnable is TRUE */
+ unsigned char b_clamp_enable;
+ /* only if bClampEnable is TRUE */
+ enum ccdc_sample_length sample_pixel;
+ /* only if bClampEnable is TRUE */
+ enum ccdc_sample_line sample_ln;
+ /* only if bClampEnable is TRUE */
+ unsigned short start_pixel;
+ /* only if bClampEnable is FALSE */
+ unsigned short sgain;
+ unsigned short dc_sub;
+};
+
+/* structure for Black Level Compensation */
+struct ccdc_black_compensation {
+ /* Constant value to subtract from Red component */
+ unsigned char r;
+ /* Constant value to subtract from Gr component */
+ unsigned char gr;
+ /* Constant value to subtract from Blue component */
+ unsigned char b;
+ /* Constant value to subtract from Gb component */
+ unsigned char gb;
+};
+
+struct ccdc_float {
+ int integer;
+ unsigned int decimal;
+};
+
+#define CCDC_CSC_COEFF_TABLE_SIZE 16
+/* structure for color space converter */
+struct ccdc_csc {
+ unsigned char enable;
+ /*
+ * S8Q5. Use 2 decimal precision, user values range from -3.00 to 3.99.
+ * example - to use 1.03, set integer part as 1, and decimal part as 3
+ * to use -1.03, set integer part as -1 and decimal part as 3
+ */
+ struct ccdc_float coeff[CCDC_CSC_COEFF_TABLE_SIZE];
+};
+
+/* Structures for Vertical Defect Correction*/
+enum ccdc_vdf_csl {
+ CCDC_VDF_NORMAL,
+ CCDC_VDF_HORZ_INTERPOL_SAT,
+ CCDC_VDF_HORZ_INTERPOL
+};
+
+enum ccdc_vdf_cuda {
+ CCDC_VDF_WHOLE_LINE_CORRECT,
+ CCDC_VDF_UPPER_DISABLE
+};
+
+enum ccdc_dfc_mwr {
+ CCDC_DFC_MWR_WRITE_COMPLETE,
+ CCDC_DFC_WRITE_REG
+};
+
+enum ccdc_dfc_mrd {
+ CCDC_DFC_READ_COMPLETE,
+ CCDC_DFC_READ_REG
+};
+
+enum ccdc_dfc_ma_rst {
+ CCDC_DFC_INCR_ADDR,
+ CCDC_DFC_CLR_ADDR
+};
+
+enum ccdc_dfc_mclr {
+ CCDC_DFC_CLEAR_COMPLETE,
+ CCDC_DFC_CLEAR
+};
+
+struct ccdc_dft_corr_ctl {
+ enum ccdc_vdf_csl vdfcsl;
+ enum ccdc_vdf_cuda vdfcuda;
+ unsigned int vdflsft;
+};
+
+struct ccdc_dft_corr_mem_ctl {
+ enum ccdc_dfc_mwr dfcmwr;
+ enum ccdc_dfc_mrd dfcmrd;
+ enum ccdc_dfc_ma_rst dfcmarst;
+ enum ccdc_dfc_mclr dfcmclr;
+};
+
+#define CCDC_DFT_TABLE_SIZE 16
+/*
+ * Main Structure for vertical defect correction. Vertical defect
+ * correction can correct up to 16 defects if defects less than 16
+ * then pad the rest with 0
+ */
+struct ccdc_vertical_dft {
+ unsigned char ver_dft_en;
+ unsigned char gen_dft_en;
+ unsigned int saturation_ctl;
+ struct ccdc_dft_corr_ctl dft_corr_ctl;
+ struct ccdc_dft_corr_mem_ctl dft_corr_mem_ctl;
+ int table_size;
+ unsigned int dft_corr_horz[CCDC_DFT_TABLE_SIZE];
+ unsigned int dft_corr_vert[CCDC_DFT_TABLE_SIZE];
+ unsigned int dft_corr_sub1[CCDC_DFT_TABLE_SIZE];
+ unsigned int dft_corr_sub2[CCDC_DFT_TABLE_SIZE];
+ unsigned int dft_corr_sub3[CCDC_DFT_TABLE_SIZE];
+};
+
+struct ccdc_data_offset {
+ unsigned char horz_offset;
+ unsigned char vert_offset;
+};
+
+/*
+ * Structure for CCDC configuration parameters for raw capture mode passed
+ * by application
+ */
+struct ccdc_config_params_raw {
+ /* data shift to be applied before storing */
+ enum ccdc_datasft datasft;
+ /* data size value from 8 to 16 bits */
+ enum ccdc_data_size data_sz;
+ /* median filter for sdram */
+ enum ccdc_mfilt1 mfilt1;
+ enum ccdc_mfilt2 mfilt2;
+ /* low pass filter enable/disable */
+ unsigned char lpf_enable;
+ /* Threshold of median filter */
+ int med_filt_thres;
+ /*
+ * horz and vertical data offset. Applicable for defect correction
+ * and lsc
+ */
+ struct ccdc_data_offset data_offset;
+ /* Structure for Optional A-Law */
+ struct ccdc_a_law alaw;
+ /* Structure for Optical Black Clamp */
+ struct ccdc_black_clamp blk_clamp;
+ /* Structure for Black Compensation */
+ struct ccdc_black_compensation blk_comp;
+ /* structure for vertical Defect Correction Module Configuration */
+ struct ccdc_vertical_dft vertical_dft;
+ /* structure for color space converter Module Configuration */
+ struct ccdc_csc csc;
+ /* color patters for bayer capture */
+ struct ccdc_col_pat col_pat_field0;
+ struct ccdc_col_pat col_pat_field1;
+};
+
+#ifdef __KERNEL__
+#include <linux/io.h>
+
+#define CCDC_WIN_PAL {0, 0, 720, 576}
+#define CCDC_WIN_VGA {0, 0, 640, 480}
+
+struct ccdc_params_ycbcr {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* enable BT.656 embedded sync mode */
+ int bt656_enable;
+ /* cb:y:cr:y or y:cb:y:cr in memory */
+ enum ccdc_pixorder pix_order;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+};
+
+/* Gain applied to Raw Bayer data */
+struct ccdc_gain {
+ unsigned short r_ye;
+ unsigned short gr_cy;
+ unsigned short gb_g;
+ unsigned short b_mg;
+};
+
+/* Structure for CCDC configuration parameters for raw capture mode */
+struct ccdc_params_raw {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+ /* Gain values */
+ struct ccdc_gain gain;
+ /* offset */
+ unsigned int ccdc_offset;
+ /* horizontal flip enable */
+ unsigned char horz_flip_enable;
+ /*
+ * enable to store the image in inverse order in memory
+ * (bottom to top)
+ */
+ unsigned char image_invert_enable;
+ /* Configurable part of raw data */
+ struct ccdc_config_params_raw config_params;
+};
+
+#endif
+#endif /* DM355_CCDC_H */
diff --git a/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h
index eb381f075245..eb381f075245 100644
--- a/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h
diff --git a/drivers/media/platform/ti/davinci/dm644x_ccdc.c b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c
index e4073e99914c..4a93e5ad6415 100644
--- a/drivers/media/platform/ti/davinci/dm644x_ccdc.c
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c
@@ -24,7 +24,7 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <media/davinci/dm644x_ccdc.h>
+#include "dm644x_ccdc.h"
#include <media/davinci/vpss.h>
#include "dm644x_ccdc_regs.h"
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
new file mode 100644
index 000000000000..c20dba3d76d6
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2006-2009 Texas Instruments Inc
+ */
+#ifndef _DM644X_CCDC_H
+#define _DM644X_CCDC_H
+#include <media/davinci/ccdc_types.h>
+#include <media/davinci/vpfe_types.h>
+
+/* enum for No of pixel per line to be avg. in Black Clamping*/
+enum ccdc_sample_length {
+ CCDC_SAMPLE_1PIXELS,
+ CCDC_SAMPLE_2PIXELS,
+ CCDC_SAMPLE_4PIXELS,
+ CCDC_SAMPLE_8PIXELS,
+ CCDC_SAMPLE_16PIXELS
+};
+
+/* enum for No of lines in Black Clamping */
+enum ccdc_sample_line {
+ CCDC_SAMPLE_1LINES,
+ CCDC_SAMPLE_2LINES,
+ CCDC_SAMPLE_4LINES,
+ CCDC_SAMPLE_8LINES,
+ CCDC_SAMPLE_16LINES
+};
+
+/* enum for Alaw gamma width */
+enum ccdc_gamma_width {
+ CCDC_GAMMA_BITS_15_6, /* use bits 15-6 for gamma */
+ CCDC_GAMMA_BITS_14_5,
+ CCDC_GAMMA_BITS_13_4,
+ CCDC_GAMMA_BITS_12_3,
+ CCDC_GAMMA_BITS_11_2,
+ CCDC_GAMMA_BITS_10_1,
+ CCDC_GAMMA_BITS_09_0 /* use bits 9-0 for gamma */
+};
+
+/* returns the highest bit used for the gamma */
+static inline u8 ccdc_gamma_width_max_bit(enum ccdc_gamma_width width)
+{
+ return 15 - width;
+}
+
+enum ccdc_data_size {
+ CCDC_DATA_16BITS,
+ CCDC_DATA_15BITS,
+ CCDC_DATA_14BITS,
+ CCDC_DATA_13BITS,
+ CCDC_DATA_12BITS,
+ CCDC_DATA_11BITS,
+ CCDC_DATA_10BITS,
+ CCDC_DATA_8BITS
+};
+
+/* returns the highest bit used for this data size */
+static inline u8 ccdc_data_size_max_bit(enum ccdc_data_size sz)
+{
+ return sz == CCDC_DATA_8BITS ? 7 : 15 - sz;
+}
+
+/* structure for ALaw */
+struct ccdc_a_law {
+ /* Enable/disable A-Law */
+ unsigned char enable;
+ /* Gamma Width Input */
+ enum ccdc_gamma_width gamma_wd;
+};
+
+/* structure for Black Clamping */
+struct ccdc_black_clamp {
+ unsigned char enable;
+ /* only if bClampEnable is TRUE */
+ enum ccdc_sample_length sample_pixel;
+ /* only if bClampEnable is TRUE */
+ enum ccdc_sample_line sample_ln;
+ /* only if bClampEnable is TRUE */
+ unsigned short start_pixel;
+ /* only if bClampEnable is TRUE */
+ unsigned short sgain;
+ /* only if bClampEnable is FALSE */
+ unsigned short dc_sub;
+};
+
+/* structure for Black Level Compensation */
+struct ccdc_black_compensation {
+ /* Constant value to subtract from Red component */
+ char r;
+ /* Constant value to subtract from Gr component */
+ char gr;
+ /* Constant value to subtract from Blue component */
+ char b;
+ /* Constant value to subtract from Gb component */
+ char gb;
+};
+
+/* Structure for CCDC configuration parameters for raw capture mode passed
+ * by application
+ */
+struct ccdc_config_params_raw {
+ /* data size value from 8 to 16 bits */
+ enum ccdc_data_size data_sz;
+ /* Structure for Optional A-Law */
+ struct ccdc_a_law alaw;
+ /* Structure for Optical Black Clamp */
+ struct ccdc_black_clamp blk_clamp;
+ /* Structure for Black Compensation */
+ struct ccdc_black_compensation blk_comp;
+};
+
+
+#ifdef __KERNEL__
+#include <linux/io.h>
+/* Define to enable/disable video port */
+#define FP_NUM_BYTES 4
+/* Define for extra pixel/line and extra lines/frame */
+#define NUM_EXTRAPIXELS 8
+#define NUM_EXTRALINES 8
+
+/* settings for commonly used video formats */
+#define CCDC_WIN_PAL {0, 0, 720, 576}
+/* ntsc square pixel */
+#define CCDC_WIN_VGA {0, 0, (640 + NUM_EXTRAPIXELS), (480 + NUM_EXTRALINES)}
+
+/* Structure for CCDC configuration parameters for raw capture mode */
+struct ccdc_params_raw {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+ /*
+ * enable to store the image in inverse
+ * order in memory(bottom to top)
+ */
+ unsigned char image_invert_enable;
+ /* configurable parameters */
+ struct ccdc_config_params_raw config_params;
+};
+
+struct ccdc_params_ycbcr {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* enable BT.656 embedded sync mode */
+ int bt656_enable;
+ /* cb:y:cr:y or y:cb:y:cr in memory */
+ enum ccdc_pixorder pix_order;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+};
+#endif
+#endif /* _DM644X_CCDC_H */
diff --git a/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h
index c4894f6a254e..c4894f6a254e 100644
--- a/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h
diff --git a/drivers/media/platform/ti/davinci/isif.c b/drivers/staging/media/deprecated/vpfe_capture/isif.c
index 69e862de014f..4059891c2824 100644
--- a/drivers/media/platform/ti/davinci/isif.c
+++ b/drivers/staging/media/deprecated/vpfe_capture/isif.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <media/davinci/isif.h>
+#include "isif.h"
#include <media/davinci/vpss.h>
#include "isif_regs.h"
diff --git a/drivers/staging/media/deprecated/vpfe_capture/isif.h b/drivers/staging/media/deprecated/vpfe_capture/isif.h
new file mode 100644
index 000000000000..8369acd26e7e
--- /dev/null
+++ b/drivers/staging/media/deprecated/vpfe_capture/isif.h
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2008-2009 Texas Instruments Inc
+ *
+ * isif header file
+ */
+#ifndef _ISIF_H
+#define _ISIF_H
+
+#include <media/davinci/ccdc_types.h>
+#include <media/davinci/vpfe_types.h>
+
+/* isif float type S8Q8/U8Q8 */
+struct isif_float_8 {
+ /* 8 bit integer part */
+ __u8 integer;
+ /* 8 bit decimal part */
+ __u8 decimal;
+};
+
+/* isif float type U16Q16/S16Q16 */
+struct isif_float_16 {
+ /* 16 bit integer part */
+ __u16 integer;
+ /* 16 bit decimal part */
+ __u16 decimal;
+};
+
+/************************************************************************
+ * Vertical Defect Correction parameters
+ ***********************************************************************/
+/* Defect Correction (DFC) table entry */
+struct isif_vdfc_entry {
+ /* vertical position of defect */
+ __u16 pos_vert;
+ /* horizontal position of defect */
+ __u16 pos_horz;
+ /*
+ * Defect level of Vertical line defect position. This is subtracted
+ * from the data at the defect position
+ */
+ __u8 level_at_pos;
+ /*
+ * Defect level of the pixels upper than the vertical line defect.
+ * This is subtracted from the data
+ */
+ __u8 level_up_pixels;
+ /*
+ * Defect level of the pixels lower than the vertical line defect.
+ * This is subtracted from the data
+ */
+ __u8 level_low_pixels;
+};
+
+#define ISIF_VDFC_TABLE_SIZE 8
+struct isif_dfc {
+ /* enable vertical defect correction */
+ __u8 en;
+ /* Defect level subtraction. Just fed through if saturating */
+#define ISIF_VDFC_NORMAL 0
+ /*
+ * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
+ * if data saturating
+ */
+#define ISIF_VDFC_HORZ_INTERPOL_IF_SAT 1
+ /* Horizontal interpolation (((i-2)+(i+2))/2) */
+#define ISIF_VDFC_HORZ_INTERPOL 2
+ /* one of the vertical defect correction modes above */
+ __u8 corr_mode;
+ /* 0 - whole line corrected, 1 - not pixels upper than the defect */
+ __u8 corr_whole_line;
+#define ISIF_VDFC_NO_SHIFT 0
+#define ISIF_VDFC_SHIFT_1 1
+#define ISIF_VDFC_SHIFT_2 2
+#define ISIF_VDFC_SHIFT_3 3
+#define ISIF_VDFC_SHIFT_4 4
+ /*
+ * defect level shift value. level_at_pos, level_upper_pos,
+ * and level_lower_pos can be shifted up by this value. Choose
+ * one of the values above
+ */
+ __u8 def_level_shift;
+ /* defect saturation level */
+ __u16 def_sat_level;
+ /* number of vertical defects. Max is ISIF_VDFC_TABLE_SIZE */
+ __u16 num_vdefects;
+ /* VDFC table ptr */
+ struct isif_vdfc_entry table[ISIF_VDFC_TABLE_SIZE];
+};
+
+struct isif_horz_bclamp {
+
+ /* Horizontal clamp disabled. Only vertical clamp value is subtracted */
+#define ISIF_HORZ_BC_DISABLE 0
+ /*
+ * Horizontal clamp value is calculated and subtracted from image data
+ * along with vertical clamp value
+ */
+#define ISIF_HORZ_BC_CLAMP_CALC_ENABLED 1
+ /*
+ * Horizontal clamp value calculated from previous image is subtracted
+ * from image data along with vertical clamp value.
+ */
+#define ISIF_HORZ_BC_CLAMP_NOT_UPDATED 2
+ /* horizontal clamp mode. One of the values above */
+ __u8 mode;
+ /*
+ * pixel value limit enable.
+ * 0 - limit disabled
+ * 1 - pixel value limited to 1023
+ */
+ __u8 clamp_pix_limit;
+ /* Select Most left window for bc calculation */
+#define ISIF_SEL_MOST_LEFT_WIN 0
+ /* Select Most right window for bc calculation */
+#define ISIF_SEL_MOST_RIGHT_WIN 1
+ /* Select most left or right window for clamp val calculation */
+ __u8 base_win_sel_calc;
+ /* Window count per color for calculation. range 1-32 */
+ __u8 win_count_calc;
+ /* Window start position - horizontal for calculation. 0 - 8191 */
+ __u16 win_start_h_calc;
+ /* Window start position - vertical for calculation 0 - 8191 */
+ __u16 win_start_v_calc;
+#define ISIF_HORZ_BC_SZ_H_2PIXELS 0
+#define ISIF_HORZ_BC_SZ_H_4PIXELS 1
+#define ISIF_HORZ_BC_SZ_H_8PIXELS 2
+#define ISIF_HORZ_BC_SZ_H_16PIXELS 3
+ /* Width of the sample window in pixels for calculation */
+ __u8 win_h_sz_calc;
+#define ISIF_HORZ_BC_SZ_V_32PIXELS 0
+#define ISIF_HORZ_BC_SZ_V_64PIXELS 1
+#define ISIF_HORZ_BC_SZ_V_128PIXELS 2
+#define ISIF_HORZ_BC_SZ_V_256PIXELS 3
+ /* Height of the sample window in pixels for calculation */
+ __u8 win_v_sz_calc;
+};
+
+/************************************************************************
+ * Black Clamp parameters
+ ***********************************************************************/
+struct isif_vert_bclamp {
+ /* Reset value used is the clamp value calculated */
+#define ISIF_VERT_BC_USE_HORZ_CLAMP_VAL 0
+ /* Reset value used is reset_clamp_val configured */
+#define ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL 1
+ /* No update, previous image value is used */
+#define ISIF_VERT_BC_NO_UPDATE 2
+ /*
+ * Reset value selector for vertical clamp calculation. Use one of
+ * the above values
+ */
+ __u8 reset_val_sel;
+ /* U8Q8. Line average coefficient used in vertical clamp calculation */
+ __u8 line_ave_coef;
+ /* Height of the optical black region for calculation */
+ __u16 ob_v_sz_calc;
+ /* Optical black region start position - horizontal. 0 - 8191 */
+ __u16 ob_start_h;
+ /* Optical black region start position - vertical 0 - 8191 */
+ __u16 ob_start_v;
+};
+
+struct isif_black_clamp {
+ /*
+ * This offset value is added irrespective of the clamp enable status.
+ * S13
+ */
+ __u16 dc_offset;
+ /*
+ * Enable black/digital clamp value to be subtracted from the image data
+ */
+ __u8 en;
+ /*
+ * black clamp mode. same/separate clamp for 4 colors
+ * 0 - disable - same clamp value for all colors
+ * 1 - clamp value calculated separately for all colors
+ */
+ __u8 bc_mode_color;
+ /* Vertical start position for bc subtraction */
+ __u16 vert_start_sub;
+ /* Black clamp for horizontal direction */
+ struct isif_horz_bclamp horz;
+ /* Black clamp for vertical direction */
+ struct isif_vert_bclamp vert;
+};
+
+/*************************************************************************
+** Color Space Conversion (CSC)
+*************************************************************************/
+#define ISIF_CSC_NUM_COEFF 16
+struct isif_color_space_conv {
+ /* Enable color space conversion */
+ __u8 en;
+ /*
+ * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
+ * so forth
+ */
+ struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF];
+};
+
+
+/*************************************************************************
+** Black Compensation parameters
+*************************************************************************/
+struct isif_black_comp {
+ /* Comp for Red */
+ __s8 r_comp;
+ /* Comp for Gr */
+ __s8 gr_comp;
+ /* Comp for Blue */
+ __s8 b_comp;
+ /* Comp for Gb */
+ __s8 gb_comp;
+};
+
+/*************************************************************************
+** Gain parameters
+*************************************************************************/
+struct isif_gain {
+ /* Gain for Red or ye */
+ struct isif_float_16 r_ye;
+ /* Gain for Gr or cy */
+ struct isif_float_16 gr_cy;
+ /* Gain for Gb or g */
+ struct isif_float_16 gb_g;
+ /* Gain for Blue or mg */
+ struct isif_float_16 b_mg;
+};
+
+#define ISIF_LINEAR_TAB_SIZE 192
+/*************************************************************************
+** Linearization parameters
+*************************************************************************/
+struct isif_linearize {
+ /* Enable or Disable linearization of data */
+ __u8 en;
+ /* Shift value applied */
+ __u8 corr_shft;
+ /* scale factor applied U11Q10 */
+ struct isif_float_16 scale_fact;
+ /* Size of the linear table */
+ __u16 table[ISIF_LINEAR_TAB_SIZE];
+};
+
+/* Color patterns */
+#define ISIF_RED 0
+#define ISIF_GREEN_RED 1
+#define ISIF_GREEN_BLUE 2
+#define ISIF_BLUE 3
+struct isif_col_pat {
+ __u8 olop;
+ __u8 olep;
+ __u8 elop;
+ __u8 elep;
+};
+
+/*************************************************************************
+** Data formatter parameters
+*************************************************************************/
+struct isif_fmtplen {
+ /*
+ * number of program entries for SET0, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ __u16 plen0;
+ /*
+ * number of program entries for SET1, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ __u16 plen1;
+ /**
+ * number of program entries for SET2, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ __u16 plen2;
+ /**
+ * number of program entries for SET3, range 1 - 16
+ * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
+ * ISIF_COMBINE
+ */
+ __u16 plen3;
+};
+
+struct isif_fmt_cfg {
+#define ISIF_SPLIT 0
+#define ISIF_COMBINE 1
+ /* Split or combine or line alternate */
+ __u8 fmtmode;
+ /* enable or disable line alternating mode */
+ __u8 ln_alter_en;
+#define ISIF_1LINE 0
+#define ISIF_2LINES 1
+#define ISIF_3LINES 2
+#define ISIF_4LINES 3
+ /* Split/combine line number */
+ __u8 lnum;
+ /* Address increment Range 1 - 16 */
+ __u8 addrinc;
+};
+
+struct isif_fmt_addr_ptr {
+ /* Initial address */
+ __u32 init_addr;
+ /* output line number */
+#define ISIF_1STLINE 0
+#define ISIF_2NDLINE 1
+#define ISIF_3RDLINE 2
+#define ISIF_4THLINE 3
+ __u8 out_line;
+};
+
+struct isif_fmtpgm_ap {
+ /* program address pointer */
+ __u8 pgm_aptr;
+ /* program address increment or decrement */
+ __u8 pgmupdt;
+};
+
+struct isif_data_formatter {
+ /* Enable/Disable data formatter */
+ __u8 en;
+ /* data formatter configuration */
+ struct isif_fmt_cfg cfg;
+ /* Formatter program entries length */
+ struct isif_fmtplen plen;
+ /* first pixel in a line fed to formatter */
+ __u16 fmtrlen;
+ /* HD interval for output line. Only valid when split line */
+ __u16 fmthcnt;
+ /* formatter address pointers */
+ struct isif_fmt_addr_ptr fmtaddr_ptr[16];
+ /* program enable/disable */
+ __u8 pgm_en[32];
+ /* program address pointers */
+ struct isif_fmtpgm_ap fmtpgm_ap[32];
+};
+
+struct isif_df_csc {
+ /* Color Space Conversion configuration, 0 - csc, 1 - df */
+ __u8 df_or_csc;
+ /* csc configuration valid if df_or_csc is 0 */
+ struct isif_color_space_conv csc;
+ /* data formatter configuration valid if df_or_csc is 1 */
+ struct isif_data_formatter df;
+ /* start pixel in a line at the input */
+ __u32 start_pix;
+ /* number of pixels in input line */
+ __u32 num_pixels;
+ /* start line at the input */
+ __u32 start_line;
+ /* number of lines at the input */
+ __u32 num_lines;
+};
+
+struct isif_gain_offsets_adj {
+ /* Gain adjustment per color */
+ struct isif_gain gain;
+ /* Offset adjustment */
+ __u16 offset;
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ __u8 gain_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ __u8 gain_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ __u8 gain_h3a_en;
+ /* Enable or Disable Gain adjustment for SDRAM data */
+ __u8 offset_sdram_en;
+ /* Enable or Disable Gain adjustment for IPIPE data */
+ __u8 offset_ipipe_en;
+ /* Enable or Disable Gain adjustment for H3A data */
+ __u8 offset_h3a_en;
+};
+
+struct isif_cul {
+ /* Horizontal Cull pattern for odd lines */
+ __u8 hcpat_odd;
+ /* Horizontal Cull pattern for even lines */
+ __u8 hcpat_even;
+ /* Vertical Cull pattern */
+ __u8 vcpat;
+ /* Enable or disable lpf. Apply when cull is enabled */
+ __u8 en_lpf;
+};
+
+struct isif_compress {
+#define ISIF_ALAW 0
+#define ISIF_DPCM 1
+#define ISIF_NO_COMPRESSION 2
+ /* Compression Algorithm used */
+ __u8 alg;
+ /* Choose Predictor1 for DPCM compression */
+#define ISIF_DPCM_PRED1 0
+ /* Choose Predictor2 for DPCM compression */
+#define ISIF_DPCM_PRED2 1
+ /* Predictor for DPCM compression */
+ __u8 pred;
+};
+
+/* all the stuff in this struct will be provided by userland */
+struct isif_config_params_raw {
+ /* Linearization parameters for image sensor data input */
+ struct isif_linearize linearize;
+ /* Data formatter or CSC */
+ struct isif_df_csc df_csc;
+ /* Defect Pixel Correction (DFC) configuration */
+ struct isif_dfc dfc;
+ /* Black/Digital Clamp configuration */
+ struct isif_black_clamp bclamp;
+ /* Gain, offset adjustments */
+ struct isif_gain_offsets_adj gain_offset;
+ /* Culling */
+ struct isif_cul culling;
+ /* A-Law and DPCM compression options */
+ struct isif_compress compress;
+ /* horizontal offset for Gain/LSC/DFC */
+ __u16 horz_offset;
+ /* vertical offset for Gain/LSC/DFC */
+ __u16 vert_offset;
+ /* color pattern for field 0 */
+ struct isif_col_pat col_pat_field0;
+ /* color pattern for field 1 */
+ struct isif_col_pat col_pat_field1;
+#define ISIF_NO_SHIFT 0
+#define ISIF_1BIT_SHIFT 1
+#define ISIF_2BIT_SHIFT 2
+#define ISIF_3BIT_SHIFT 3
+#define ISIF_4BIT_SHIFT 4
+#define ISIF_5BIT_SHIFT 5
+#define ISIF_6BIT_SHIFT 6
+ /* Data shift applied before storing to SDRAM */
+ __u8 data_shift;
+ /* enable input test pattern generation */
+ __u8 test_pat_gen;
+};
+
+#ifdef __KERNEL__
+struct isif_ycbcr_config {
+ /* isif pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* isif frame format */
+ enum ccdc_frmfmt frm_fmt;
+ /* ISIF crop window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* isif pix order. Only used for ycbcr capture */
+ enum ccdc_pixorder pix_order;
+ /* isif buffer type. Only used for ycbcr capture */
+ enum ccdc_buftype buf_type;
+};
+
+/* MSB of image data connected to sensor port */
+enum isif_data_msb {
+ ISIF_BIT_MSB_15,
+ ISIF_BIT_MSB_14,
+ ISIF_BIT_MSB_13,
+ ISIF_BIT_MSB_12,
+ ISIF_BIT_MSB_11,
+ ISIF_BIT_MSB_10,
+ ISIF_BIT_MSB_9,
+ ISIF_BIT_MSB_8,
+ ISIF_BIT_MSB_7
+};
+
+enum isif_cfa_pattern {
+ ISIF_CFA_PAT_MOSAIC,
+ ISIF_CFA_PAT_STRIPE
+};
+
+struct isif_params_raw {
+ /* isif pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* isif frame format */
+ enum ccdc_frmfmt frm_fmt;
+ /* video window */
+ struct v4l2_rect win;
+ /* field polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* interface VD polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* interface HD polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* buffer type. Applicable for interlaced mode */
+ enum ccdc_buftype buf_type;
+ /* Gain values */
+ struct isif_gain gain;
+ /* cfa pattern */
+ enum isif_cfa_pattern cfa_pat;
+ /* Data MSB position */
+ enum isif_data_msb data_msb;
+ /* Enable horizontal flip */
+ unsigned char horz_flip_en;
+ /* Enable image invert vertically */
+ unsigned char image_invert_en;
+
+ /* all the userland defined stuff*/
+ struct isif_config_params_raw config_params;
+};
+
+enum isif_data_pack {
+ ISIF_PACK_16BIT,
+ ISIF_PACK_12BIT,
+ ISIF_PACK_8BIT
+};
+
+#define ISIF_WIN_NTSC {0, 0, 720, 480}
+#define ISIF_WIN_VGA {0, 0, 640, 480}
+
+#endif
+#endif
diff --git a/drivers/media/platform/ti/davinci/isif_regs.h b/drivers/staging/media/deprecated/vpfe_capture/isif_regs.h
index d68d38841ae7..d68d38841ae7 100644
--- a/drivers/media/platform/ti/davinci/isif_regs.h
+++ b/drivers/staging/media/deprecated/vpfe_capture/isif_regs.h
diff --git a/drivers/media/platform/ti/davinci/vpfe_capture.c b/drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c
index 0a2226b321d7..0a2226b321d7 100644
--- a/drivers/media/platform/ti/davinci/vpfe_capture.c
+++ b/drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c
diff --git a/drivers/media/usb/zr364xx/Kconfig b/drivers/staging/media/deprecated/zr364xx/Kconfig
index a9fb02566c4b..ea29c9d8dca2 100644
--- a/drivers/media/usb/zr364xx/Kconfig
+++ b/drivers/staging/media/deprecated/zr364xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config USB_ZR364XX
- tristate "USB ZR364XX Camera support"
- depends on VIDEO_DEV
+ tristate "USB ZR364XX Camera support (DEPRECATED)"
+ depends on USB && VIDEO_DEV
select VIDEOBUF_GEN
select VIDEOBUF_VMALLOC
help
@@ -10,6 +10,9 @@ config USB_ZR364XX
See <file:Documentation/admin-guide/media/zr364xx.rst> for more info
and list of supported cameras.
+ This driver is deprecated and is scheduled for removal by
+ the beginning of 2023. See the TODO file for more information.
+
To compile this driver as a module, choose M here: the
module will be called zr364xx.
diff --git a/drivers/media/usb/zr364xx/Makefile b/drivers/staging/media/deprecated/zr364xx/Makefile
index edab017d499c..edab017d499c 100644
--- a/drivers/media/usb/zr364xx/Makefile
+++ b/drivers/staging/media/deprecated/zr364xx/Makefile
diff --git a/drivers/staging/media/deprecated/zr364xx/TODO b/drivers/staging/media/deprecated/zr364xx/TODO
new file mode 100644
index 000000000000..ecb30a429689
--- /dev/null
+++ b/drivers/staging/media/deprecated/zr364xx/TODO
@@ -0,0 +1,7 @@
+This is one of the few drivers still not using the vb2
+framework, so this driver is now deprecated with the intent of
+removing it altogether by the beginning of 2023.
+
+In order to keep this driver it has to be converted to vb2.
+If someone is interested in doing this work, then contact the
+linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/staging/media/deprecated/zr364xx/zr364xx.c
index 538a330046ec..538a330046ec 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/staging/media/deprecated/zr364xx/zr364xx.c
diff --git a/drivers/staging/media/hantro/TODO b/drivers/staging/media/hantro/TODO
deleted file mode 100644
index 8483ff482146..000000000000
--- a/drivers/staging/media/hantro/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-The V4L controls for the HEVC CODEC are not yet part of the stable uABI,
-we are keeping this driver in staging until the HEVC uABI has been merged.
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index a0553c24cce4..cbc66ef0eda8 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -160,7 +160,7 @@
#define IMX7_CSI_VIDEO_NAME "imx-capture"
/* In bytes, per queue */
-#define IMX7_CSI_VIDEO_MEM_LIMIT SZ_64M
+#define IMX7_CSI_VIDEO_MEM_LIMIT SZ_512M
#define IMX7_CSI_VIDEO_EOF_TIMEOUT 2000
#define IMX7_CSI_DEF_MBUS_CODE MEDIA_BUS_FMT_UYVY8_2X8
diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c
index 6b5abd958bff..99b333b68198 100644
--- a/drivers/staging/media/max96712/max96712.c
+++ b/drivers/staging/media/max96712/max96712.c
@@ -407,15 +407,13 @@ static int max96712_probe(struct i2c_client *client)
return max96712_v4l2_register(priv);
}
-static int max96712_remove(struct i2c_client *client)
+static void max96712_remove(struct i2c_client *client)
{
struct max96712_priv *priv = i2c_get_clientdata(client);
v4l2_async_unregister_subdev(&priv->sd);
gpiod_set_value_cansleep(priv->gpiod_pwdn, 0);
-
- return 0;
}
static const struct of_device_id max96712_of_table[] = {
diff --git a/drivers/staging/media/meson/vdec/vdec_hevc.c b/drivers/staging/media/meson/vdec/vdec_hevc.c
index 9530e580e57a..afced435c907 100644
--- a/drivers/staging/media/meson/vdec/vdec_hevc.c
+++ b/drivers/staging/media/meson/vdec/vdec_hevc.c
@@ -167,8 +167,12 @@ static int vdec_hevc_start(struct amvdec_session *sess)
clk_set_rate(core->vdec_hevc_clk, 666666666);
ret = clk_prepare_enable(core->vdec_hevc_clk);
- if (ret)
+ if (ret) {
+ if (core->platform->revision == VDEC_REVISION_G12A ||
+ core->platform->revision == VDEC_REVISION_SM1)
+ clk_disable_unprepare(core->vdec_hevcf_clk);
return ret;
+ }
if (core->platform->revision == VDEC_REVISION_SM1)
regmap_update_bits(core->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 9512cd3314f2..842509dcfedf 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -843,7 +843,7 @@ iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
* processing might be possible but requires more testing.
*
* Stream start must be delayed until buffers are available at both the input
- * and output. The pipeline must be started in the videobuf queue callback with
+ * and output. The pipeline must be started in the vb2 queue callback with
* the buffers queue spinlock held. The modules subdev set stream operation must
* not sleep.
*/
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 4af5a831bde0..4fc167b42cf0 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1162,8 +1162,8 @@ static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
- writel(0xffffffff, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
+ writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
+ writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 960a0130cd62..55c54dfdc585 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -448,6 +448,8 @@ static int cedrus_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, dev);
+
dev->vfd = cedrus_video_device;
dev->dev = &pdev->dev;
dev->pdev = pdev;
@@ -521,8 +523,6 @@ static int cedrus_probe(struct platform_device *pdev)
goto err_m2m_mc;
}
- platform_set_drvdata(pdev, dev);
-
return 0;
err_m2m_mc:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 084193019350..93a2196006f7 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -237,19 +237,23 @@ static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
}
static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
- int index, unsigned int plane)
+ struct vb2_buffer *buf,
+ unsigned int plane)
{
- struct vb2_buffer *buf = NULL;
- struct vb2_queue *vq;
-
- if (index < 0)
- return 0;
+ return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+}
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- if (vq)
- buf = vb2_get_buffer(vq, index);
+static inline void cedrus_write_ref_buf_addr(struct cedrus_ctx *ctx,
+ struct vb2_queue *q,
+ u64 timestamp,
+ u32 luma_reg,
+ u32 chroma_reg)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ struct vb2_buffer *buf = vb2_find_buffer(q, timestamp);
- return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+ cedrus_write(dev, luma_reg, cedrus_dst_buf_addr(ctx, buf, 0));
+ cedrus_write(dev, chroma_reg, cedrus_dst_buf_addr(ctx, buf, 1));
}
static inline struct cedrus_buffer *
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 3b6aa78a2985..e7f7602a5ab4 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -106,11 +106,11 @@ void cedrus_device_run(void *priv)
/* Trigger decoding if setup went well, bail out otherwise. */
if (!error) {
- dev->dec_ops[ctx->current_codec]->trigger(ctx);
-
/* Start the watchdog timer. */
schedule_delayed_work(&dev->watchdog_work,
msecs_to_jiffies(2000));
+
+ dev->dec_ops[ctx->current_codec]->trigger(ctx);
} else {
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev,
ctx->fh.m2m_ctx,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index c345e67ba9bc..a8b236cd3800 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -111,16 +111,16 @@ static void cedrus_write_frame_list(struct cedrus_ctx *ctx,
for (i = 0; i < ARRAY_SIZE(decode->dpb); i++) {
const struct v4l2_h264_dpb_entry *dpb = &decode->dpb[i];
struct cedrus_buffer *cedrus_buf;
- int buf_idx;
+ struct vb2_buffer *buf;
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
continue;
- buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
- if (buf_idx < 0)
+ buf = vb2_find_buffer(cap_q, dpb->reference_ts);
+ if (!buf)
continue;
- cedrus_buf = vb2_to_cedrus_buffer(cap_q->bufs[buf_idx]);
+ cedrus_buf = vb2_to_cedrus_buffer(buf);
position = cedrus_buf->codec.h264.position;
used_dpbs |= BIT(position);
@@ -186,7 +186,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
const struct v4l2_h264_dpb_entry *dpb;
const struct cedrus_buffer *cedrus_buf;
unsigned int position;
- int buf_idx;
+ struct vb2_buffer *buf;
u8 dpb_idx;
dpb_idx = ref_list[i].index;
@@ -195,11 +195,11 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
- buf_idx = vb2_find_timestamp(cap_q, dpb->reference_ts, 0);
- if (buf_idx < 0)
+ buf = vb2_find_buffer(cap_q, dpb->reference_ts);
+ if (!buf)
continue;
- cedrus_buf = vb2_to_cedrus_buffer(cap_q->bufs[buf_idx]);
+ cedrus_buf = vb2_to_cedrus_buffer(buf);
position = cedrus_buf->codec.h264.position;
sram_array[i] |= position << 1;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index 687f87598f78..4952fc17f3e6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -102,14 +102,14 @@ static void cedrus_h265_frame_info_write_single(struct cedrus_ctx *ctx,
unsigned int index,
bool field_pic,
u32 pic_order_cnt[],
- int buffer_index)
+ struct vb2_buffer *buf)
{
struct cedrus_dev *dev = ctx->dev;
- dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 0);
- dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buffer_index, 1);
+ dma_addr_t dst_luma_addr = cedrus_dst_buf_addr(ctx, buf, 0);
+ dma_addr_t dst_chroma_addr = cedrus_dst_buf_addr(ctx, buf, 1);
dma_addr_t mv_col_buf_addr[2] = {
- cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index, 0),
- cedrus_h265_frame_info_mv_col_buf_addr(ctx, buffer_index,
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index, 0),
+ cedrus_h265_frame_info_mv_col_buf_addr(ctx, buf->index,
field_pic ? 1 : 0)
};
u32 offset = VE_DEC_H265_SRAM_OFFSET_FRAME_INFO +
@@ -141,18 +141,18 @@ static void cedrus_h265_frame_info_write_dpb(struct cedrus_ctx *ctx,
unsigned int i;
for (i = 0; i < num_active_dpb_entries; i++) {
- int buffer_index = vb2_find_timestamp(vq, dpb[i].timestamp, 0);
+ struct vb2_buffer *buf = vb2_find_buffer(vq, dpb[i].timestamp);
u32 pic_order_cnt[2] = {
dpb[i].pic_order_cnt_val,
dpb[i].pic_order_cnt_val
};
- if (buffer_index < 0)
+ if (!buf)
continue;
cedrus_h265_frame_info_write_single(ctx, i, dpb[i].field_pic,
pic_order_cnt,
- buffer_index);
+ buf);
}
}
@@ -234,8 +234,9 @@ static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
cedrus_write(dev, VE_DEC_H265_TRIGGER,
VE_DEC_H265_TRIGGER_FLUSH_BITS |
VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
- while (cedrus_read(dev, VE_DEC_H265_STATUS) & VE_DEC_H265_STATUS_VLD_BUSY)
- udelay(1);
+
+ if (cedrus_wait_for(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_VLD_BUSY))
+ dev_err_ratelimited(dev->dev, "timed out waiting to skip bits\n");
count += tmp;
}
@@ -751,7 +752,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_h265_frame_info_write_single(ctx, output_pic_list_index,
slice_params->pic_struct != 0,
pic_order_cnt,
- run->dst->vb2_buf.index);
+ &run->dst->vb2_buf);
cedrus_write(dev, VE_DEC_H265_OUTPUT_FRAME_IDX, output_pic_list_index);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
index 4cfc4a3c8a7f..c1128d2cd555 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -54,13 +54,9 @@ static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
const struct v4l2_ctrl_mpeg2_picture *pic;
const struct v4l2_ctrl_mpeg2_quantisation *quantisation;
dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
- dma_addr_t fwd_luma_addr, fwd_chroma_addr;
- dma_addr_t bwd_luma_addr, bwd_chroma_addr;
struct cedrus_dev *dev = ctx->dev;
struct vb2_queue *vq;
const u8 *matrix;
- int forward_idx;
- int backward_idx;
unsigned int i;
u32 reg;
@@ -123,27 +119,19 @@ static int cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg);
/* Forward and backward prediction reference buffers. */
-
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
- forward_idx = vb2_find_timestamp(vq, pic->forward_ref_ts, 0);
- fwd_luma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 0);
- fwd_chroma_addr = cedrus_dst_buf_addr(ctx, forward_idx, 1);
-
- cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr);
- cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr);
-
- backward_idx = vb2_find_timestamp(vq, pic->backward_ref_ts, 0);
- bwd_luma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 0);
- bwd_chroma_addr = cedrus_dst_buf_addr(ctx, backward_idx, 1);
-
- cedrus_write(dev, VE_DEC_MPEG_BWD_REF_LUMA_ADDR, bwd_luma_addr);
- cedrus_write(dev, VE_DEC_MPEG_BWD_REF_CHROMA_ADDR, bwd_chroma_addr);
+ cedrus_write_ref_buf_addr(ctx, vq, pic->forward_ref_ts,
+ VE_DEC_MPEG_FWD_REF_LUMA_ADDR,
+ VE_DEC_MPEG_FWD_REF_CHROMA_ADDR);
+ cedrus_write_ref_buf_addr(ctx, vq, pic->backward_ref_ts,
+ VE_DEC_MPEG_BWD_REF_LUMA_ADDR,
+ VE_DEC_MPEG_BWD_REF_CHROMA_ADDR);
/* Destination luma and chroma buffers. */
- dst_luma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 0);
- dst_chroma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 1);
+ dst_luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
+ dst_chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr);
cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
index 3f750d1795b6..f7714baae37d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_vp8.c
@@ -660,7 +660,6 @@ static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
dma_addr_t luma_addr, chroma_addr;
dma_addr_t src_buf_addr;
int header_size;
- int qindex;
u32 reg;
cedrus_engine_enable(ctx, CEDRUS_CODEC_VP8);
@@ -804,43 +803,17 @@ static int cedrus_vp8_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
reg |= VE_VP8_LF_DELTA0(slice->lf.mb_mode_delta[0]);
cedrus_write(dev, VE_VP8_MODE_LF_DELTA, reg);
- luma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 0);
- chroma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 1);
+ luma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 0);
+ chroma_addr = cedrus_dst_buf_addr(ctx, &run->dst->vb2_buf, 1);
cedrus_write(dev, VE_VP8_REC_LUMA, luma_addr);
cedrus_write(dev, VE_VP8_REC_CHROMA, chroma_addr);
- qindex = vb2_find_timestamp(cap_q, slice->last_frame_ts, 0);
- if (qindex >= 0) {
- luma_addr = cedrus_dst_buf_addr(ctx, qindex, 0);
- chroma_addr = cedrus_dst_buf_addr(ctx, qindex, 1);
- cedrus_write(dev, VE_VP8_FWD_LUMA, luma_addr);
- cedrus_write(dev, VE_VP8_FWD_CHROMA, chroma_addr);
- } else {
- cedrus_write(dev, VE_VP8_FWD_LUMA, 0);
- cedrus_write(dev, VE_VP8_FWD_CHROMA, 0);
- }
-
- qindex = vb2_find_timestamp(cap_q, slice->golden_frame_ts, 0);
- if (qindex >= 0) {
- luma_addr = cedrus_dst_buf_addr(ctx, qindex, 0);
- chroma_addr = cedrus_dst_buf_addr(ctx, qindex, 1);
- cedrus_write(dev, VE_VP8_BWD_LUMA, luma_addr);
- cedrus_write(dev, VE_VP8_BWD_CHROMA, chroma_addr);
- } else {
- cedrus_write(dev, VE_VP8_BWD_LUMA, 0);
- cedrus_write(dev, VE_VP8_BWD_CHROMA, 0);
- }
-
- qindex = vb2_find_timestamp(cap_q, slice->alt_frame_ts, 0);
- if (qindex >= 0) {
- luma_addr = cedrus_dst_buf_addr(ctx, qindex, 0);
- chroma_addr = cedrus_dst_buf_addr(ctx, qindex, 1);
- cedrus_write(dev, VE_VP8_ALT_LUMA, luma_addr);
- cedrus_write(dev, VE_VP8_ALT_CHROMA, chroma_addr);
- } else {
- cedrus_write(dev, VE_VP8_ALT_LUMA, 0);
- cedrus_write(dev, VE_VP8_ALT_CHROMA, 0);
- }
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->last_frame_ts,
+ VE_VP8_FWD_LUMA, VE_VP8_FWD_CHROMA);
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->golden_frame_ts,
+ VE_VP8_BWD_LUMA, VE_VP8_BWD_CHROMA);
+ cedrus_write_ref_buf_addr(ctx, cap_q, slice->alt_frame_ts,
+ VE_VP8_ALT_LUMA, VE_VP8_ALT_CHROMA);
cedrus_write(dev, VE_H264_CTRL, VE_H264_CTRL_VP8 |
VE_H264_CTRL_DECODE_ERR_INT |
diff --git a/drivers/staging/media/zoran/TODO b/drivers/staging/media/zoran/TODO
deleted file mode 100644
index 6992540d3e53..000000000000
--- a/drivers/staging/media/zoran/TODO
+++ /dev/null
@@ -1,19 +0,0 @@
-
-How to test the zoran driver:
-- RAW capture
- mplayer tv:///dev/video0 -tv driver=v4l2
-
-- MJPEG capture (compression)
- mplayer tv:///dev/video0 -tv driver=v4l2:outfmt=mjpeg
- TODO: need two test for both Dcim path
-
-- MJPEG play (decompression)
- ffmpeg -i test.avi -vcodec mjpeg -an -f v4l2 /dev/video0
- Note: only recent ffmpeg has the ability of sending non-raw video via v4l2
-
- The original way of sending video was via mplayer vo_zr/vo_zr2, but it does not compile
- anymore and is a dead end (usage of some old private ffmpeg structures).
-
-TODO
-- fix the v4l compliance "TRY_FMT cannot handle an invalid pixelformat"
-- Filter JPEG data to made output work
diff --git a/drivers/staging/media/zoran/zoran_device.h b/drivers/staging/media/zoran/zoran_device.h
deleted file mode 100644
index 322b04c55d41..000000000000
--- a/drivers/staging/media/zoran/zoran_device.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Zoran zr36057/zr36067 PCI controller driver, for the
- * Pinnacle/Miro DC10/DC10+/DC30/DC30+, Iomega Buz, Linux
- * Media Labs LML33/LML33R10.
- *
- * This part handles card-specific data and detection
- *
- * Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
- */
-
-#ifndef __ZORAN_DEVICE_H__
-#define __ZORAN_DEVICE_H__
-
-/* general purpose I/O */
-extern void GPIO(struct zoran *zr, int bit, unsigned int value);
-
-/* codec (or actually: guest bus) access */
-extern int post_office_wait(struct zoran *zr);
-extern int post_office_write(struct zoran *zr, unsigned int guest, unsigned int reg, unsigned int value);
-extern int post_office_read(struct zoran *zr, unsigned int guest, unsigned int reg);
-
-extern void jpeg_codec_sleep(struct zoran *zr, int sleep);
-extern int jpeg_codec_reset(struct zoran *zr);
-
-/* zr360x7 access to raw capture */
-extern void zr36057_overlay(struct zoran *zr, int on);
-extern void write_overlay_mask(struct zoran_fh *fh, struct v4l2_clip *vp, int count);
-extern void zr36057_set_memgrab(struct zoran *zr, int mode);
-extern int wait_grab_pending(struct zoran *zr);
-
-/* interrupts */
-extern void print_interrupts(struct zoran *zr);
-extern void clear_interrupt_counters(struct zoran *zr);
-extern irqreturn_t zoran_irq(int irq, void *dev_id);
-
-/* JPEG codec access */
-extern void jpeg_start(struct zoran *zr);
-extern void zr36057_enable_jpg(struct zoran *zr,
- enum zoran_codec_mode mode);
-extern void zoran_feed_stat_com(struct zoran *zr);
-
-/* general */
-extern void zoran_set_pci_master(struct zoran *zr, int set_master);
-extern void zoran_init_hardware(struct zoran *zr);
-extern void zr36057_restart(struct zoran *zr);
-
-extern const struct zoran_format zoran_formats[];
-
-extern int v4l_bufsize;
-extern int jpg_bufsize;
-extern int pass_through;
-
-/* i2c */
-#define decoder_call(zr, o, f, args...) \
- v4l2_subdev_call(zr->decoder, o, f, ##args)
-#define encoder_call(zr, o, f, args...) \
- v4l2_subdev_call(zr->encoder, o, f, ##args)
-
-#endif /* __ZORAN_DEVICE_H__ */
diff --git a/drivers/staging/most/i2c/i2c.c b/drivers/staging/most/i2c/i2c.c
index 7042f10887bb..285a071f02be 100644
--- a/drivers/staging/most/i2c/i2c.c
+++ b/drivers/staging/most/i2c/i2c.c
@@ -340,14 +340,12 @@ static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
*
* Unregister the i2c client device as a MOST interface
*/
-static int i2c_remove(struct i2c_client *client)
+static void i2c_remove(struct i2c_client *client)
{
struct hdm_i2c *dev = i2c_get_clientdata(client);
most_deregister_interface(&dev->most_iface);
kfree(dev);
-
- return 0;
}
static const struct i2c_device_id i2c_id[] = {
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 1ad94c5060b5..a36e36701c74 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -125,7 +125,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
*
* Returns Always returns NETDEV_TX_OK
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
{
union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr hw_buffer;
@@ -506,7 +506,7 @@ skip_xmit:
* @dev: Device info structure
* Returns Always returns zero
*/
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
void *packet_buffer;
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index 78936e9b33b0..6c524668f65a 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -5,8 +5,8 @@
* Copyright (c) 2003-2007 Cavium Networks
*/
-int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
-int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
int do_free, int qos);
void cvm_oct_tx_initialize(void);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 9363c5cfe50f..4fb9b9f10799 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -668,7 +668,7 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
return rc;
}
-static int dcon_remove(struct i2c_client *client)
+static void dcon_remove(struct i2c_client *client)
{
struct dcon_priv *dcon = i2c_get_clientdata(client);
@@ -684,8 +684,6 @@ static int dcon_remove(struct i2c_client *client)
cancel_work_sync(&dcon->switch_source);
kfree(dcon);
-
- return 0;
}
#ifdef CONFIG_PM
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index df02335fdbab..d4e06a3929f3 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1149,19 +1149,7 @@ out_unlock:
return ret;
}
-
-static int pi433_debugfs_regs_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, pi433_debugfs_regs_show, inode->i_private);
-}
-
-static const struct file_operations debugfs_fops = {
- .llseek = seq_lseek,
- .open = pi433_debugfs_regs_open,
- .owner = THIS_MODULE,
- .read = seq_read,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(pi433_debugfs_regs);
/*-------------------------------------------------------------------------*/
@@ -1320,7 +1308,7 @@ static int pi433_probe(struct spi_device *spi)
entry = debugfs_create_dir(dev_name(device->dev),
debugfs_lookup(KBUILD_MODNAME, NULL));
- debugfs_create_file("regs", 0400, entry, device, &debugfs_fops);
+ debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops);
return 0;
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 659c8c1b38fd..8c7fab6a46bb 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -816,7 +816,7 @@ int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
if (size > FIFO_SIZE) {
dev_dbg(&spi->dev,
- "read fifo: passed in buffer bigger then internal buffer\n");
+ "write fifo: passed in buffer bigger then internal buffer\n");
return -EMSGSIZE;
}
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index ca6b966f5dd3..1ead7793062a 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -3041,8 +3041,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
- netif_napi_add_weight(qdev->ndev, &rx_ring->napi,
- qlge_napi_poll_msix, 64);
+ netif_napi_add(qdev->ndev, &rx_ring->napi,
+ qlge_napi_poll_msix);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index eea16eb7caa0..fd494c2299e6 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -10,7 +10,6 @@ r8188eu-y = \
hal/hal_com.o \
hal/odm.o \
hal/odm_HWConfig.o \
- hal/odm_RegConfig8188E.o \
hal/odm_RTL8188E.o \
hal/rtl8188e_cmd.o \
hal/rtl8188e_dm.o \
@@ -18,19 +17,14 @@ r8188eu-y = \
hal/rtl8188e_phycfg.o \
hal/rtl8188e_rf6052.o \
hal/rtl8188e_rxdesc.o \
- hal/rtl8188e_xmit.o \
- hal/rtl8188eu_recv.o \
hal/rtl8188eu_xmit.o \
hal/usb_halinit.o \
hal/usb_ops_linux.o \
os_dep/ioctl_linux.o \
- os_dep/mlme_linux.o \
os_dep/os_intfs.o \
os_dep/osdep_service.o \
- os_dep/recv_linux.o \
os_dep/usb_intf.o \
os_dep/usb_ops_linux.o \
- os_dep/xmit_linux.o \
core/rtw_ap.o \
core/rtw_br_ext.o \
core/rtw_cmd.o \
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index 5bd9dfa57cc5..24eb8dce9bfe 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -935,6 +935,48 @@ u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
return beacon_updated;
}
+void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
+}
+
+static void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
+}
+
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason)
{
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index bca20fe5c983..4c5f30792a46 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -12,7 +12,6 @@
#include "../include/drv_types.h"
#include "../include/rtw_br_ext.h"
#include "../include/usb_osintf.h"
-#include "../include/recv_osdep.h"
#ifndef csum_ipv6_magic
#include "../include/net/ip6_checksum.h"
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 5b6a891b5d67..3fadace33de6 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/rtw_br_ext.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
@@ -58,8 +56,6 @@ exit:
u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- u32 res = _SUCCESS;
-
init_completion(&pcmdpriv->enqueue_cmd);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
init_completion(&pcmdpriv->start_cmd_thread);
@@ -74,27 +70,24 @@ u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = kzalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
GFP_KERNEL);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = _FAIL;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return _FAIL;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ - 1));
pcmdpriv->rsp_allocated_buf = kzalloc(MAX_RSPSZ + 4, GFP_KERNEL);
if (!pcmdpriv->rsp_allocated_buf) {
- res = _FAIL;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return _FAIL;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
-exit:
- return res;
+ return _SUCCESS;
}
u32 rtw_init_evt_priv(struct evt_priv *pevtpriv)
@@ -288,8 +281,7 @@ post_process:
* ### NOTE:#### (!!!!)
* MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
*/
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
- struct rtw_ieee80211_channel *ch, int ch_num)
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num)
{
u8 res = _FAIL;
struct cmd_obj *ph2c;
@@ -331,17 +323,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
}
}
- /* prepare channel list */
- if (ch) {
- int i;
- for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
- if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
- memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
- psurveyPara->ch_num++;
- }
- }
- }
-
set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
@@ -1290,6 +1271,66 @@ exit:
return res;
}
+/* C2H event format:
+ * Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+ * BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+ */
+static s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (!buf)
+ goto exit;
+
+ ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
+ if (ret)
+ return _FAIL;
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ memset(c2h_evt, 0, 16);
+
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++) {
+ ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i, c2h_evt->payload + i);
+ if (ret) {
+ ret = _FAIL;
+ goto clear_evt;
+ }
+ }
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /* Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the next
+ * command message.
+ */
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+exit:
+ return ret;
+}
+
static void c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
{
u8 buf[16];
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 95534f9c7a0f..682c65b1e04c 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -236,7 +236,7 @@ static int load_firmware(struct rt_firmware *rtfw, struct device *device)
{
int ret = _SUCCESS;
const struct firmware *fw;
- const char *fw_name = "rtlwifi/rtl8188eufw.bin";
+ const char *fw_name = FW_RTL8188EU;
int err = request_firmware(&fw, fw_name, device);
if (err) {
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 17f6bcbeebf4..55e6b0f41dc3 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -11,8 +11,6 @@
#include "../include/usb_osintf.h"
#include "../include/usb_ops.h"
-extern void indicate_wx_scan_complete_event(struct adapter *padapter);
-
u8 rtw_do_join(struct adapter *padapter)
{
struct list_head *plist, *phead;
@@ -43,7 +41,7 @@ u8 rtw_do_join(struct adapter *padapter)
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
/* submit site_survey_cmd */
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
@@ -89,7 +87,7 @@ u8 rtw_do_join(struct adapter *padapter)
/* we try to issue sitesurvey firstly */
if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
pmlmepriv->to_roaming > 0) {
- ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1);
if (ret != _SUCCESS)
pmlmepriv->to_join = false;
} else {
@@ -353,14 +351,9 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
/* Scan or linking is in progress, do nothing. */
res = true;
} else {
- if (rtw_is_scan_deny(padapter)) {
- indicate_wx_scan_complete_event(padapter);
- return _SUCCESS;
- }
-
spin_lock_bh(&pmlmepriv->lock);
- res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
+ res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num);
spin_unlock_bh(&pmlmepriv->lock);
}
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index d5c6c5e29621..1e316e6358ea 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -25,9 +25,7 @@ static void ResetLedStatus(struct led_priv *pLed)
pLed->bLedWPSBlinkInProgress = false;
pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
- pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
- pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
@@ -37,7 +35,7 @@ static void SwLedOn(struct adapter *padapter, struct led_priv *pLed)
u8 LedCfg;
int res;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->bDriverStopped)
return;
res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);
@@ -53,7 +51,7 @@ static void SwLedOff(struct adapter *padapter, struct led_priv *pLed)
u8 LedCfg;
int res;
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ if (padapter->bDriverStopped)
goto exit;
res = rtw_read8(padapter, REG_LEDCFG2, &LedCfg);/* 0x4E */
@@ -79,41 +77,25 @@ static void blink_work(struct work_struct *work)
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
SwLedOff(padapter, pLed);
ResetLedStatus(pLed);
return;
}
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
+ if (pLed->bLedOn)
SwLedOff(padapter, pLed);
+ else
+ SwLedOn(padapter, pLed);
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_BLINK_NORMAL:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_BLINK_SCAN:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -121,7 +103,6 @@ static void blink_work(struct work_struct *work)
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else {
- pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
@@ -131,10 +112,6 @@ static void blink_work(struct work_struct *work)
}
break;
case LED_BLINK_TXRX:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -142,7 +119,6 @@ static void blink_work(struct work_struct *work)
pLed->CurrLedState = LED_BLINK_NORMAL;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else {
- pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
@@ -152,25 +128,16 @@ static void blink_work(struct work_struct *work)
}
break;
case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState != RTW_LED_ON) {
+ if (!pLed->bLedOn) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
pLed->bLedWPSBlinkInProgress = false;
} else {
- pLed->BlinkingLedState = RTW_LED_OFF;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
}
break;
@@ -217,192 +184,110 @@ void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
switch (LedAction) {
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
- if (!pLed->bLedNoLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
- }
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_LINK:
- if (!pLed->bLedLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
- }
+ if (!pLed->bLedLinkBlinkInProgress)
+ return;
+
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_CTL_SITE_SURVEY:
- if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- ;
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED)))
+ return;
+
+ if (pLed->bLedScanBlinkInProgress)
+ return;
+
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_CTL_TX:
case LED_CTL_RX:
- if (!pLed->bLedBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
- }
+ if (pLed->bLedBlinkInProgress)
+ return;
+
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = true;
+
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- if (!pLed->bLedWPSBlinkInProgress) {
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
- }
+ if (pLed->bLedWPSBlinkInProgress)
+ return;
+
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_CTL_STOP_WPS:
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress)
- cancel_delayed_work(&pLed->blink_work);
- else
- pLed->bLedWPSBlinkInProgress = true;
+ cancel_delayed_work(&pLed->blink_work);
+
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = true;
+
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
} else {
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, 0);
}
break;
case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedWPSBlinkInProgress = false;
- }
- pLed->bLedNoLinkBlinkInProgress = true;
+ cancel_delayed_work(&pLed->blink_work);
+ pLed->bLedWPSBlinkInProgress = false;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_POWER_OFF:
pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedNoLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- cancel_delayed_work(&pLed->blink_work);
- pLed->bLedScanBlinkInProgress = false;
- }
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedBlinkInProgress = false;
+ pLed->bLedWPSBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+ cancel_delayed_work(&pLed->blink_work);
SwLedOff(padapter, pLed);
break;
default:
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 2705c9d87b14..5ca03d6cac32 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -5,10 +5,7 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
-#include "../include/mlme_osdep.h"
#include "../include/sta_info.h"
#include "../include/wifi.h"
#include "../include/wlan_bssdef.h"
@@ -190,6 +187,37 @@ u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
return ie + 8;
}
+static void rtw_join_timeout_handler(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
+
+ _rtw_join_timeout_handler(adapter);
+}
+
+static void _rtw_scan_timeout_handler(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.scan_to_timer);
+
+ rtw_scan_timeout_handler(adapter);
+}
+
+static void _dynamic_check_timer_handlder(struct timer_list *t)
+{
+ struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
+
+ rtw_dynamic_check_timer_handlder(adapter);
+ _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
+}
+
+static void rtw_init_mlme_timer(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ timer_setup(&pmlmepriv->assoc_timer, rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, _rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer, _dynamic_check_timer_handlder, 0);
+}
+
int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
{
int i;
@@ -235,8 +263,6 @@ int rtw_init_mlme_priv(struct adapter *padapter)/* struct mlme_priv *pmlmepriv)
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- rtw_clear_scan_deny(padapter);
-
rtw_init_mlme_timer(padapter);
exit:
@@ -641,6 +667,23 @@ exit:
spin_unlock_bh(&pmlmepriv->lock);
}
+static void rtw_xmit_schedule(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv;
+
+ if (!padapter)
+ return;
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ spin_lock_bh(&pxmitpriv->lock);
+
+ if (rtw_txframes_pending(padapter))
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+ spin_unlock_bh(&pxmitpriv->lock);
+}
+
void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -697,7 +740,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
} else {
if (rtw_to_roaming(adapter) != 0) {
if (--pmlmepriv->to_roaming == 0 ||
- rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0) != _SUCCESS) {
+ rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1) != _SUCCESS) {
rtw_set_roaming(adapter, 0);
rtw_free_assoc_resources(adapter, 1);
rtw_indicate_disconnect(adapter);
@@ -719,7 +762,7 @@ void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, _FW_LINKED))
p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
- rtw_os_xmit_schedule(adapter);
+ rtw_xmit_schedule(adapter);
}
static void free_scanqueue(struct mlme_priv *pmlmepriv)
@@ -795,6 +838,48 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
}
+static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
+
+static void rtw_reset_securitypriv(struct adapter *adapter)
+{
+ u8 backup_index;
+ u8 backup_counter;
+ u32 backup_time;
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ /* 802.1x */
+ /* We have to backup the PMK information for WiFi PMK Caching test item. */
+ /* Backup the btkip_countermeasure information. */
+ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
+ memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backup_index = adapter->securitypriv.PMKIDIndex;
+ backup_counter = adapter->securitypriv.btkip_countermeasure;
+ backup_time = adapter->securitypriv.btkip_countermeasure_time;
+ memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
+
+ /* Restore the PMK information to securitypriv structure for the following connection. */
+ memcpy(&adapter->securitypriv.PMKIDList[0],
+ &backup_pmkid[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backup_index;
+ adapter->securitypriv.btkip_countermeasure = backup_counter;
+ adapter->securitypriv.btkip_countermeasure_time = backup_time;
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ } else {
+ /* reset values in securitypriv */
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ }
+}
+
/*
*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
*/
@@ -809,12 +894,13 @@ void rtw_indicate_connect(struct adapter *padapter)
rtw_led_control(padapter, LED_CTL_LINK);
- rtw_os_indicate_connect(padapter);
+ rtw_indicate_wx_assoc_event(padapter);
+ netif_carrier_on(padapter->pnetdev);
+ if (padapter->pid[2] != 0)
+ rtw_signal_process(padapter->pid[2], SIGALRM);
}
pmlmepriv->to_roaming = 0;
-
- rtw_set_scan_deny(padapter, 3000);
}
/*
@@ -831,11 +917,14 @@ void rtw_indicate_disconnect(struct adapter *padapter)
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
(pmlmepriv->to_roaming <= 0)) {
- rtw_os_indicate_disconnect(padapter);
+ /* Do it first for tx broadcast pkt after disconnection issue! */
+ netif_carrier_off(padapter->pnetdev);
+
+ rtw_indicate_wx_disassoc_event(padapter);
+ rtw_reset_securitypriv(padapter);
_clr_fwstate_(pmlmepriv, _FW_LINKED);
rtw_led_control(padapter, LED_CTL_NO_LINK);
- rtw_clear_scan_deny(padapter);
}
p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
@@ -843,9 +932,9 @@ void rtw_indicate_disconnect(struct adapter *padapter)
}
-inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
+inline void rtw_indicate_scan_done(struct adapter *padapter)
{
- rtw_os_indicate_scan_done(padapter, aborted);
+ indicate_wx_scan_complete_event(padapter);
}
static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
@@ -1068,8 +1157,7 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
- rtw_os_xmit_schedule(adapter);
-
+ rtw_xmit_schedule(adapter);
}
void rtw_set_max_rpt_macid(struct adapter *adapter, u8 macid)
@@ -1316,7 +1404,7 @@ void rtw_scan_timeout_handler (struct adapter *adapter)
spin_lock_bh(&pmlmepriv->lock);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
spin_unlock_bh(&pmlmepriv->lock);
- rtw_indicate_scan_done(adapter, true);
+ rtw_indicate_scan_done(adapter);
}
static void rtw_auto_scan_handler(struct adapter *padapter)
@@ -1442,10 +1530,6 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
pmlmepriv->pscanned = phead->next;
while (phead != pmlmepriv->pscanned) {
pnetwork = container_of(pmlmepriv->pscanned, struct wlan_network, list);
- if (!pnetwork) {
- ret = _FAIL;
- goto exit;
- }
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
}
@@ -1639,6 +1723,33 @@ static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie
return ie_len;
}
+static void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff, *p, i;
+ union iwreq_data wrqu;
+
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_) {
+ buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
+ if (!buff)
+ return;
+ p = buff;
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
+ len = sec_ie[1] + 2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+ for (i = 0; i < len; i++)
+ p += sprintf(p, "%02x", sec_ie[i]);
+ p += sprintf(p, ")");
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = p - buff;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
+ wrqu.data.length : IW_CUSTOM_MAX;
+ wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
+ kfree(buff);
+ }
+}
+
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
{
u8 authmode = 0;
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 32d0e101d0c2..07905e2ae8e0 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -9,8 +9,6 @@
#include "../include/wifi.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/wlan_bssdef.h"
-#include "../include/mlme_osdep.h"
-#include "../include/recv_osdep.h"
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
@@ -334,6 +332,28 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
return chanset_size;
}
+static void _survey_timer_hdl(struct timer_list *t)
+{
+ struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.survey_timer);
+
+ survey_timer_hdl(padapter);
+}
+
+static void _link_timer_hdl(struct timer_list *t)
+{
+ struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.link_timer);
+
+ link_timer_hdl(padapter);
+}
+
+static void init_mlme_ext_timer(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ timer_setup(&pmlmeext->survey_timer, _survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
+}
+
void init_mlme_ext_priv(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
@@ -910,6 +930,46 @@ authclnt_fail:
return _FAIL;
}
+static void UpdateBrateTbl(u8 *mbrate)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mbrate[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+static void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
u16 capab_info;
@@ -1320,9 +1380,9 @@ OnAssocReqFail:
unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
uint i;
int res;
- unsigned short status;
struct ndis_802_11_var_ie *pIE;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -1331,7 +1391,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
uint pkt_len = precv_frame->len;
/* check A1 matches or not */
- if (memcmp(myid(&padapter->eeprompriv), get_da(pframe), ETH_ALEN))
+ if (memcmp(myid(&padapter->eeprompriv), mgmt->da, ETH_ALEN))
return _SUCCESS;
if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
@@ -1342,28 +1402,24 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
_cancel_timer_ex(&pmlmeext->link_timer);
- /* status */
- status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
- if (status > 0) {
+ if (le16_to_cpu(mgmt->u.assoc_resp.status_code) > 0) {
pmlmeinfo->state = WIFI_FW_NULL_STATE;
res = -4;
goto report_assoc_result;
}
- /* get capabilities */
- pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+ pmlmeinfo->capability = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
/* set slot time */
pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
- /* AID */
- pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4)) & 0x3fff);
+ pmlmeinfo->aid = le16_to_cpu(mgmt->u.assoc_resp.aid) & 0x3fff;
res = pmlmeinfo->aid;
/* following are moved to join event callback function */
/* to handle HT, WMM, rate adaptive, update MAC reg */
/* for not to handle the synchronous IO in the tasklet */
- for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ for (i = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable); i < pkt_len;) {
pIE = (struct ndis_802_11_var_ie *)(pframe + i);
switch (pIE->ElementID) {
@@ -1391,7 +1447,7 @@ unsigned int OnAssocRsp(struct adapter *padapter, struct recv_frame *precv_frame
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
/* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
- UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
+ UpdateBrateTbl(pmlmeinfo->network.SupportedRates);
report_assoc_result:
report_join_res(padapter, res);
@@ -7858,7 +7914,7 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
spin_unlock_bh(&psta_bmc->sleep_q.lock);
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index bd654d4ff8b4..dc159e58f428 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -1883,15 +1883,14 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
{
- int ret = _SUCCESS;
+ int ret;
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
/* leave IPS/Autosuspend */
- if (rtw_pwr_wakeup(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
+ return ret;
/* Added by Albert 2011/03/22 */
/* In the P2P mode, the driver should not support the b mode. */
@@ -1902,10 +1901,9 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
init_wifidirect_info(padapter, role);
} else if (role == P2P_ROLE_DISABLE) {
- if (rtw_pwr_wakeup(padapter)) {
- ret = _FAIL;
- goto exit;
- }
+ ret = rtw_pwr_wakeup(padapter);
+ if (ret)
+ return ret;
/* Disable P2P function */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
@@ -1923,6 +1921,5 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
}
-exit:
- return ret;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 10550bd2c16d..870d81735b8d 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -89,7 +89,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
bool ret = false;
- if (adapter->pwrctrlpriv.ips_deny_time >= jiffies)
+ if (time_after_eq(adapter->pwrctrlpriv.ips_deny_time, jiffies))
goto exit;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE | WIFI_SITE_MONITOR) ||
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index e5a7b7dfc387..bb5c3b3888e0 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -6,8 +6,6 @@
#include <linux/ieee80211.h>
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/usb_ops.h"
#include "../include/wifi.h"
#include "../include/rtl8188e_recv.h"
@@ -37,6 +35,69 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
}
+static int rtl8188eu_init_recv_priv(struct adapter *padapter)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int i, res = _SUCCESS;
+ struct recv_buf *precvbuf;
+
+ tasklet_init(&precvpriv->recv_tasklet,
+ rtl8188eu_recv_tasklet,
+ (unsigned long)padapter);
+
+ /* init recv_buf */
+ rtw_init_queue(&precvpriv->free_recv_buf_queue);
+
+ precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
+ GFP_KERNEL);
+ if (!precvpriv->pallocated_recv_buf) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!precvbuf->purb) {
+ res = _FAIL;
+ break;
+ }
+ precvbuf->adapter = padapter;
+ precvbuf++;
+ }
+ precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+ skb_queue_head_init(&precvpriv->rx_skb_queue);
+ {
+ int i;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ struct sk_buff *pskb = NULL;
+
+ skb_queue_head_init(&precvpriv->free_recv_skb_queue);
+
+ for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
+ pskb = __netdev_alloc_skb(padapter->pnetdev,
+ MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
+ if (pskb) {
+ pskb->dev = padapter->pnetdev;
+ tmpaddr = (size_t)pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
+ skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+ pskb = NULL;
+ }
+ }
+exit:
+ return res;
+}
+
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
int i;
@@ -91,6 +152,26 @@ exit:
return res;
}
+static void rtl8188eu_free_recv_priv(struct adapter *padapter)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ usb_free_urb(precvbuf->purb);
+ precvbuf++;
+ }
+
+ kfree(precvpriv->pallocated_recv_buf);
+
+ skb_queue_purge(&precvpriv->rx_skb_queue);
+
+ skb_queue_purge(&precvpriv->free_recv_skb_queue);
+}
+
void _rtw_free_recv_priv(struct recv_priv *precvpriv)
{
struct adapter *padapter = precvpriv->adapter;
@@ -244,6 +325,42 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
return cnt;
}
+static void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
+{
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 cur_time = 0;
+
+ if (psecuritypriv->last_mic_err_time == 0) {
+ psecuritypriv->last_mic_err_time = jiffies;
+ } else {
+ cur_time = jiffies;
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ } else {
+ psecuritypriv->last_mic_err_time = jiffies;
+ }
+ }
+
+ memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
+ memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+ wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
+ &wrqu, (char *)&ev);
+}
+
static int recvframe_chkmic(struct adapter *adapter, struct recv_frame *precvframe)
{
int i, res = _SUCCESS;
@@ -1294,7 +1411,6 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
u8 nr_subframes, i;
unsigned char *pdata;
struct rx_pkt_attrib *pattrib;
- unsigned char *data_ptr;
struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
struct recv_priv *precvpriv = &padapter->recvpriv;
@@ -1329,8 +1445,7 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
if (sub_skb) {
skb_reserve(sub_skb, 12);
- data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
- memcpy(data_ptr, pdata, nSubframe_Length);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
} else {
sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
if (sub_skb) {
@@ -1460,6 +1575,85 @@ static bool enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, s
return true;
}
+static int rtw_recv_indicatepkt(struct adapter *padapter, struct recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *pfree_recv_queue;
+ struct sk_buff *skb;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ precvpriv = &padapter->recvpriv;
+ pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ skb = precv_frame->pkt;
+ if (!skb)
+ goto _recv_indicatepkt_drop;
+
+ skb->data = precv_frame->rx_data;
+
+ skb_set_tail_pointer(skb, precv_frame->len);
+
+ skb->len = precv_frame->len;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ struct sk_buff *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
+ bool bmcast = is_multicast_ether_addr(pattrib->dst);
+
+ if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pskb2 = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->dst);
+ }
+
+ if (psta) {
+ struct net_device *pnetdev;
+
+ pnetdev = (struct net_device *)padapter->pnetdev;
+ skb->dev = pnetdev;
+ skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
+
+ rtw_xmit_entry(skb, pnetdev);
+
+ if (bmcast)
+ skb = pskb2;
+ else
+ goto _recv_indicatepkt_end;
+ }
+ }
+ }
+
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = padapter->pnetdev;
+ skb->protocol = eth_type_trans(skb, padapter->pnetdev);
+
+ netif_rx(skb);
+
+_recv_indicatepkt_end:
+
+ /* pointers to NULL before rtw_free_recvframe() */
+ precv_frame->pkt = NULL;
+
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ /* enqueue back to free_recv_queue */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ return _FAIL;
+}
+
static bool recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
{
struct list_head *phead, *plist;
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index 357f98e22d8a..98eeb16cab6c 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -5,9 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/sta_info.h"
static void _rtw_init_stainfo(struct sta_info *psta)
@@ -141,6 +138,31 @@ void _rtw_free_sta_priv(struct sta_priv *pstapriv)
}
}
+static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
+{
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ preorder_ctrl = from_timer(preorder_ctrl, t, reordering_ctrl_timer);
+ rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
+}
+
+static void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer, _rtw_reordering_ctrl_timeout_handler, 0);
+}
+
+static void _addba_timer_hdl(struct timer_list *t)
+{
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
+
+ addba_timer_hdl(psta);
+}
+
+static void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
+{
+ timer_setup(&psta->addba_retry_timer, _addba_timer_hdl, 0);
+}
+
struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
s32 index;
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 3a002cb6834f..e50631848cab 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -222,46 +222,6 @@ void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrat
memcpy(pbssrate, supportedrates, *bssrate_len);
}
-void UpdateBrateTbl(struct adapter *Adapter, u8 *mbrate)
-{
- u8 i;
- u8 rate;
-
- /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
- for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
- rate = mbrate[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- case IEEE80211_OFDM_RATE_6MB:
- case IEEE80211_OFDM_RATE_12MB:
- case IEEE80211_OFDM_RATE_24MB:
- mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
-void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
-{
- u8 i;
- u8 rate;
-
- for (i = 0; i < bssratelen; i++) {
- rate = bssrateset[i] & 0x7f;
- switch (rate) {
- case IEEE80211_CCK_RATE_1MB:
- case IEEE80211_CCK_RATE_2MB:
- case IEEE80211_CCK_RATE_5MB:
- case IEEE80211_CCK_RATE_11MB:
- bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
- break;
- }
- }
-}
-
void Save_DM_Func_Flag(struct adapter *padapter)
{
struct hal_data_8188e *haldata = &padapter->haldata;
@@ -1578,10 +1538,8 @@ void beacon_timing_control(struct adapter *padapter)
static struct adapter *pbuddy_padapter;
-int rtw_handle_dualmac(struct adapter *adapter, bool init)
+void rtw_handle_dualmac(struct adapter *adapter, bool init)
{
- int status = _SUCCESS;
-
if (init) {
if (!pbuddy_padapter) {
pbuddy_padapter = adapter;
@@ -1594,5 +1552,4 @@ int rtw_handle_dualmac(struct adapter *adapter, bool init)
} else {
pbuddy_padapter = NULL;
}
- return status;
}
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 24401f3ae2a0..873d2c5c3634 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -33,6 +33,32 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
INIT_LIST_HEAD(&psta_xmitpriv->apsd);
}
+static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf,
+ u32 alloc_sz)
+{
+ pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
+ if (!pxmitbuf->pallocated_buf)
+ return _FAIL;
+
+ pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->dma_transfer_addr = 0;
+
+ pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!pxmitbuf->pxmit_urb) {
+ kfree(pxmitbuf->pallocated_buf);
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+static void rtw_xmit_resource_free(struct adapter *padapter, struct xmit_buf *pxmitbuf,
+ u32 free_sz)
+{
+ usb_free_urb(pxmitbuf->pxmit_urb);
+ kfree(pxmitbuf->pallocated_buf);
+}
+
s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
{
int i;
@@ -108,7 +134,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (!pxmitpriv->pallocated_xmitbuf) {
res = _FAIL;
- goto exit;
+ goto free_frame_buf;
}
pxmitpriv->pxmitbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
@@ -125,12 +151,12 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->ext_tag = false;
/* Tx buf allocation may fail sometimes, so sleep and retry. */
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL) {
msleep(10);
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
if (res == _FAIL)
- goto exit;
+ goto free_xmitbuf;
}
pxmitbuf->flags = XMIT_VO_QUEUE;
@@ -148,7 +174,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (!pxmitpriv->pallocated_xmit_extbuf) {
res = _FAIL;
- goto exit;
+ goto free_xmitbuf;
}
pxmitpriv->pxmit_extbuf = (u8 *)ALIGN((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
@@ -162,10 +188,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->padapter = padapter;
pxmitbuf->ext_tag = true;
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ res = rtw_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
if (res == _FAIL) {
res = _FAIL;
- goto exit;
+ goto free_xmit_extbuf;
}
list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
@@ -176,7 +202,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (rtw_alloc_hwxmits(padapter)) {
res = _FAIL;
- goto exit;
+ goto free_xmit_extbuf;
}
rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
@@ -200,11 +226,54 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
rtl8188eu_init_xmit_priv(padapter);
-exit:
+ return _SUCCESS;
+free_xmit_extbuf:
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ while (i--) {
+ rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+ vfree(pxmitpriv->pallocated_xmit_extbuf);
+ i = NR_XMITBUFF;
+free_xmitbuf:
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+ while (i--) {
+ rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+ vfree(pxmitpriv->pallocated_xmitbuf);
+free_frame_buf:
+ vfree(pxmitpriv->pallocated_frame_buf);
+exit:
return res;
}
+static void rtw_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
+{
+ u16 queue;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_pkt_complete(padapter, pxframe->pkt);
+ pxframe->pkt = NULL;
+}
+
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
{
int i;
@@ -218,13 +287,13 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
return;
for (i = 0; i < NR_XMITFRAME; i++) {
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
pxmitframe++;
}
for (i = 0; i < NR_XMITBUFF; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ rtw_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
pxmitbuf++;
}
@@ -234,7 +303,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ rtw_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
pxmitbuf++;
}
@@ -378,18 +447,59 @@ u8 qos_acm(u8 acm_mask, u8 priority)
return change_priority;
}
+static void rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
+{
+ if (!pktptr) {
+ pr_err("8188eu: pktptr is NULL\n");
+ return;
+ }
+ if (!pfile) {
+ pr_err("8188eu: pfile is NULL\n");
+ return;
+ }
+ pfile->pkt = pktptr;
+ pfile->cur_addr = pktptr->data;
+ pfile->buf_start = pktptr->data;
+ pfile->pkt_len = pktptr->len;
+ pfile->buf_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start;
+}
+
+static uint rtw_remainder_len(struct pkt_file *pfile)
+{
+ return pfile->buf_len - ((size_t)(pfile->cur_addr) -
+ (size_t)(pfile->buf_start));
+}
+
+static uint rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len;
+
+ len = rtw_remainder_len(pfile);
+ len = (rlen > len) ? len : rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+
+ return len;
+}
+
static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
struct ethhdr etherhdr;
struct iphdr ip_hdr;
s32 user_prio = 0;
- _rtw_open_pktfile(ppktfile->pkt, ppktfile);
- _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+ rtw_open_pktfile(ppktfile->pkt, ppktfile);
+ rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
/* get user_prio from IP hdr */
if (pattrib->ether_type == 0x0800) {
- _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
+ rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
user_prio = ip_hdr.tos >> 5;
} else if (pattrib->ether_type == 0x888e) {
@@ -418,8 +528,8 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
- _rtw_open_pktfile(pkt, &pktfile);
- _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+ rtw_open_pktfile(pkt, &pktfile);
+ rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
@@ -447,7 +557,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
/* to prevent DHCP protocol fail */
u8 tmp[24];
- _rtw_pktfile_read(&pktfile, &tmp[0], 24);
+ rtw_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
@@ -460,9 +570,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
}
- if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
- rtw_set_scan_deny(padapter, 3000);
-
/* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
@@ -897,8 +1004,8 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
goto exit;
}
- _rtw_open_pktfile(pkt, &pktfile);
- _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+ rtw_open_pktfile(pkt, &pktfile);
+ rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
frg_inx = 0;
frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
@@ -956,9 +1063,9 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
if (bmcst) {
/* don't do fragment to broadcast/multicast packets */
- mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
+ mem_sz = rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
} else {
- mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
+ mem_sz = rtw_pktfile_read(&pktfile, pframe, mpdu_len);
}
pframe += mem_sz;
@@ -970,7 +1077,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
frg_inx++;
- if (bmcst || rtw_endofpktfile(&pktfile)) {
+ if (bmcst || pktfile.pkt_len == 0) {
pattrib->nr_frags = frg_inx;
pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1286,7 +1393,7 @@ s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitfram
spin_unlock_bh(&pfree_xmit_queue->lock);
if (pndis_pkt)
- rtw_os_pkt_complete(padapter, pndis_pkt);
+ rtw_pkt_complete(padapter, pndis_pkt);
exit:
@@ -1945,7 +2052,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&psta->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta->sleep_q.lock);
}
@@ -1995,7 +2102,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&psta_bmc->sleep_q.lock);
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
@@ -2069,7 +2176,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
pxmitframe->attrib.triggered = 1;
if (rtl8188eu_hal_xmit(padapter, pxmitframe))
- rtw_os_xmit_complete(padapter, pxmitframe);
+ rtw_xmit_complete(padapter, pxmitframe);
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -2136,3 +2243,105 @@ void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
if (pxmitpriv->ack_tx)
rtw_sctx_done_err(&pack_tx_ops, status);
}
+
+static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt <= 4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct list_head *phead, *plist;
+ struct sk_buff *newskb;
+ struct sta_info *psta = NULL;
+ s32 res;
+
+ spin_lock_bh(&pstapriv->asoc_list_lock);
+ phead = &pstapriv->asoc_list;
+ plist = phead->next;
+
+ /* free sta asoc_queue */
+ while (phead != plist) {
+ psta = container_of(plist, struct sta_info, asoc_list);
+
+ plist = plist->next;
+
+ /* avoid come from STA1 and send back STA1 */
+ if (!memcmp(psta->hwaddr, &skb->data[6], 6))
+ continue;
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+
+ if (newskb) {
+ memcpy(newskb->data, psta->hwaddr, 6);
+ res = rtw_xmit(padapter, &newskb);
+ if (res < 0) {
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(newskb);
+ } else {
+ pxmitpriv->tx_pkts++;
+ }
+ } else {
+ pxmitpriv->tx_drop++;
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ return false; /* Caller shall tx this multicast frame via normal way. */
+ }
+ }
+
+ spin_unlock_bh(&pstapriv->asoc_list_lock);
+ dev_kfree_skb_any(skb);
+ return true;
+}
+
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ s32 res = 0;
+
+ if (!rtw_if_up(padapter))
+ goto drop_packet;
+
+ rtw_check_xmit_resource(padapter, pkt);
+
+ if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
+ (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
+ (padapter->registrypriv.wifi_spec == 0)) {
+ if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
+ res = rtw_mlcst2unicst(padapter, pkt);
+ if (res)
+ goto exit;
+ }
+ }
+
+ res = rtw_xmit(padapter, &pkt);
+ if (res < 0)
+ goto drop_packet;
+
+ pxmitpriv->tx_pkts++;
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(pkt);
+
+exit:
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index 7901d0afa2e7..23b7205722b5 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -166,7 +166,14 @@ static u32 array_agc_tab_1t_8188e[] = {
0xC78, 0x407F0001,
};
-enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+}
+
+int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
@@ -176,7 +183,6 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapter = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
@@ -187,7 +193,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -238,10 +244,10 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
- rst = HAL_STATUS_FAILURE;
+ return -1;
}
}
- return rst;
+ return 0;
}
/******************************************************************************
@@ -442,7 +448,31 @@ static u32 array_phy_reg_1t_8188e[] = {
0xF00, 0x00000300,
};
-enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ msleep(50);
+ } else if (Addr == 0xfd) {
+ mdelay(5);
+ } else if (Addr == 0xfc) {
+ mdelay(1);
+ } else if (Addr == 0xfb) {
+ udelay(50);
+ } else if (Addr == 0xfa) {
+ udelay(5);
+ } else if (Addr == 0xf9) {
+ udelay(1);
+ } else {
+ if (Addr == 0xa24)
+ pDM_Odm->RFCalibrateInfo.RegA24 = Data;
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+}
+
+int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
@@ -452,7 +482,6 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapter = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
hex += 0xFF000000;
@@ -462,7 +491,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -544,11 +573,11 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
}
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ return -1;
}
}
- return rst;
+ return 0;
}
/******************************************************************************
@@ -647,6 +676,25 @@ static u32 array_phy_reg_pg_8188e[] = {
};
+static void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask,
+ u32 Data)
+{
+ if (Addr == 0xfe)
+ msleep(50);
+ else if (Addr == 0xfd)
+ mdelay(5);
+ else if (Addr == 0xfc)
+ mdelay(1);
+ else if (Addr == 0xfb)
+ udelay(50);
+ else if (Addr == 0xfa)
+ udelay(5);
+ else if (Addr == 0xf9)
+ udelay(1);
+ else
+ storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
+}
+
void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index 77b25885c63b..da71867bcca3 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -126,7 +126,12 @@ static u32 array_MAC_REG_8188E[] = {
0x70B, 0x00000087,
};
-enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
+static void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
+{
+ rtw_write8(pDM_Odm->Adapter, Addr, Data);
+}
+
+int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
{
#define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = array[i]; v2 = array[i + 1]; } while (0)
@@ -139,7 +144,6 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
struct adapter *adapt = dm_odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
hex += 0xFF000000;
@@ -150,7 +154,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(adapt);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -201,8 +205,8 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
- rst = HAL_STATUS_FAILURE;
+ return -1;
}
}
- return rst;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 08cbfce3808d..a4c3d3d149f7 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -130,7 +130,37 @@ static u32 Array_RadioA_1T_8188E[] = {
0x000, 0x00033E60,
};
-enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
+static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, u32 RegAddr)
+{
+ if (Addr == 0xffe) {
+ msleep(50);
+ } else if (Addr == 0xfd) {
+ mdelay(5);
+ } else if (Addr == 0xfc) {
+ mdelay(1);
+ } else if (Addr == 0xfb) {
+ udelay(50);
+ } else if (Addr == 0xfa) {
+ udelay(5);
+ } else if (Addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+ }
+}
+
+static void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content & 0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, Addr | maskforPhySet);
+}
+
+int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
{
#define READ_NEXT_PAIR(v1, v2, i) do \
{ i += 2; v1 = Array[i]; \
@@ -144,7 +174,6 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
struct adapter *Adapter = pDM_Odm->Adapter;
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
- enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
hex += ODM_ITRF_USB << 8;
hex += ODM_CE << 16;
@@ -155,7 +184,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
pxmit_frame = rtw_IOL_accquire_xmit_frame(Adapter);
if (!pxmit_frame) {
pr_info("rtw_IOL_accquire_xmit_frame failed\n");
- return HAL_STATUS_FAILURE;
+ return -ENOMEM;
}
}
@@ -232,9 +261,9 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
}
if (biol) {
if (!rtl8188e_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
- rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ return -1;
}
}
- return rst;
+ return 0;
}
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index 6a1cdc67335b..33967eb3c0d0 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -137,176 +137,3 @@ void HalSetBrateCfg(struct adapter *adapt, u8 *brates, u16 *rate_cfg)
}
}
}
-
-static void one_out_pipe(struct adapter *adapter)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-}
-
-static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- if (wifi_cfg) { /* WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 0, 1, 0, 1, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 1, 0, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
- }
-}
-
-static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
-{
- struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
-
- if (wifi_cfg) {/* for WMM */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 1, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
-
- } else {/* typical setting */
- /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
- /* 2, 2, 1, 0, 0, 0, 0, 0, 0}; */
- /* 0:H, 1:N, 2:L */
-
- pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
- pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
- pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
- pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2];/* BK */
-
- pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
- pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
- pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
- pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
- }
-}
-
-bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
-{
- struct registry_priv *pregistrypriv = &adapter->registrypriv;
- bool wifi_cfg = pregistrypriv->wifi_spec;
- bool result = true;
-
- switch (numoutpipe) {
- case 2:
- two_out_pipe(adapter, wifi_cfg);
- break;
- case 3:
- three_out_pipe(adapter, wifi_cfg);
- break;
- case 1:
- one_out_pipe(adapter);
- break;
- default:
- result = false;
- break;
- }
- return result;
-}
-
-/*
-* C2H event format:
-* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
-* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
-*/
-
-s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
-{
- s32 ret = _FAIL;
- struct c2h_evt_hdr *c2h_evt;
- int i;
- u8 trigger;
-
- if (!buf)
- goto exit;
-
- ret = rtw_read8(adapter, REG_C2HEVT_CLEAR, &trigger);
- if (ret)
- return _FAIL;
-
- if (trigger == C2H_EVT_HOST_CLOSE)
- goto exit; /* Not ready */
- else if (trigger != C2H_EVT_FW_CLOSE)
- goto clear_evt; /* Not a valid value */
-
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- memset(c2h_evt, 0, 16);
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL, buf);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
-
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1, buf + 1);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- /* Read the content */
- for (i = 0; i < c2h_evt->plen; i++) {
- ret = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
- sizeof(*c2h_evt) + i, c2h_evt->payload + i);
- if (ret) {
- ret = _FAIL;
- goto clear_evt;
- }
- }
-
- ret = _SUCCESS;
-
-clear_evt:
- /*
- * Clear event to notify FW we have read the command.
- * If this field isn't clear, the FW won't update the next
- * command message.
- */
- rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
-exit:
- return ret;
-}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 54cc3d7789cd..38f357e8aeda 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -3,38 +3,38 @@
#include "../include/drv_types.h"
-static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+static u8 odm_query_rxpwrpercentage(s8 antpower)
{
- if ((AntPower <= -100) || (AntPower >= 20))
- return 0;
- else if (AntPower >= 0)
- return 100;
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
else
- return 100 + AntPower;
+ return 100 + antpower;
}
-static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
+static s32 odm_signal_scale_mapping(struct odm_dm_struct *dm_odm, s32 currsig)
{
- s32 RetSig = 0;
-
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40) * 2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ s32 retsig;
+
+ if (currsig >= 51 && currsig <= 100)
+ retsig = 100;
+ else if (currsig >= 41 && currsig <= 50)
+ retsig = 80 + ((currsig - 40) * 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 10 && currsig <= 20)
+ retsig = 42 + (((currsig - 10) * 2) / 3);
+ else if (currsig >= 5 && currsig <= 9)
+ retsig = 22 + (((currsig - 5) * 3) / 2);
+ else if (currsig >= 1 && currsig <= 4)
+ retsig = 6 + (((currsig - 1) * 3) / 2);
else
- RetSig = CurrSig;
+ retsig = currsig;
- return RetSig;
+ return retsig;
}
static u8 odm_evm_db_to_percentage(s8 value)
@@ -117,7 +117,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
break;
}
rx_pwr_all += 6;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
if (!cck_highpwr) {
if (PWDB_ALL >= 80)
PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
@@ -162,7 +162,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxPwr[i] = rx_pwr[i];
/* Translate DBM to percentage. */
- RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ RSSI = odm_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += RSSI;
pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
@@ -173,7 +173,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL = odm_query_rxpwrpercentage(rx_pwr_all);
pPhyInfo->RxPWDBAll = PWDB_ALL;
pPhyInfo->RxPower = rx_pwr_all;
@@ -200,10 +200,10 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
/* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
if (isCCKrate) {
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
+ pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
} else {
if (rf_rx_num != 0)
- pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, total_rssi /= rf_rx_num));
+ pPhyInfo->SignalStrength = (u8)(odm_signal_scale_mapping(dm_odm, total_rssi /= rf_rx_num));
}
/* For 88E HW Antenna Diversity */
@@ -347,8 +347,3 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
-
-enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm)
-{
- return ODM_ReadAndConfig_RadioA_1T_8188E(dm_odm);
-}
diff --git a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
deleted file mode 100644
index 0fa17a99f9e9..000000000000
--- a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/drv_types.h"
-
-static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, u32 RegAddr)
-{
- if (Addr == 0xffe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RegAddr, bRFRegOffsetMask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
-
-void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
-{
- u32 content = 0x1000; /* RF_Content: radioa_txt */
- u32 maskforPhySet = (u32)(content & 0xE000);
-
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, Addr | maskforPhySet);
-}
-
-void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
-{
- rtw_write8(pDM_Odm->Adapter, Addr, Data);
-}
-
-void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
-}
-
-void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data)
-{
- if (Addr == 0xfe)
- msleep(50);
- else if (Addr == 0xfd)
- mdelay(5);
- else if (Addr == 0xfc)
- mdelay(1);
- else if (Addr == 0xfb)
- udelay(50);
- else if (Addr == 0xfa)
- udelay(5);
- else if (Addr == 0xf9)
- udelay(1);
- else
- storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
-}
-
-void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
-{
- if (Addr == 0xfe) {
- msleep(50);
- } else if (Addr == 0xfd) {
- mdelay(5);
- } else if (Addr == 0xfc) {
- mdelay(1);
- } else if (Addr == 0xfb) {
- udelay(50);
- } else if (Addr == 0xfa) {
- udelay(5);
- } else if (Addr == 0xf9) {
- udelay(1);
- } else {
- if (Addr == 0xa24)
- pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
-
- /* Add 1us delay between BB/RF register setting. */
- udelay(1);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index b01ee1695fee..8310d7f53982 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
#include "../include/rtw_ioctl_set.h"
#include "../include/rtl8188e_hal.h"
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 5b8f1a912bbb..158260547f2b 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -526,43 +526,38 @@ void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _size_byte, u8 *pbuf)
Hal_EfuseReadEFuse88E(Adapter, 0, _size_byte, pbuf);
}
-static void dump_chip_info(struct HAL_VERSION chip_vers)
+static void dump_chip_info(struct adapter *adapter, struct HAL_VERSION chip_vers)
{
- uint cnt = 0;
- char buf[128];
-
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
- cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
- "Normal_Chip" : "Test_Chip");
- cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
- "TSMC" : "UMC");
+ struct net_device *netdev = adapter->pnetdev;
+ char *cut = NULL;
+ char buf[25];
switch (chip_vers.CUTVersion) {
case A_CUT_VERSION:
- cnt += sprintf((buf + cnt), "A_CUT_");
+ cut = "A_CUT";
break;
case B_CUT_VERSION:
- cnt += sprintf((buf + cnt), "B_CUT_");
+ cut = "B_CUT";
break;
case C_CUT_VERSION:
- cnt += sprintf((buf + cnt), "C_CUT_");
+ cut = "C_CUT";
break;
case D_CUT_VERSION:
- cnt += sprintf((buf + cnt), "D_CUT_");
+ cut = "D_CUT";
break;
case E_CUT_VERSION:
- cnt += sprintf((buf + cnt), "E_CUT_");
+ cut = "E_CUT";
break;
default:
- cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_", chip_vers.CUTVersion);
+ snprintf(buf, sizeof(buf), "UNKNOWN_CUT(%d)", chip_vers.CUTVersion);
+ cut = buf;
break;
}
- cnt += sprintf((buf + cnt), "1T1R_");
-
- cnt += sprintf((buf + cnt), "RomVer(%d)\n", 0);
-
- pr_info("%s", buf);
+ netdev_dbg(netdev, "Chip Version Info: CHIP_8188E_%s_%s_%s_1T1R_RomVer(%d)\n",
+ IS_NORMAL_CHIP(chip_vers) ? "Normal_Chip" : "Test_Chip",
+ IS_CHIP_VENDOR_TSMC(chip_vers) ? "TSMC" : "UMC",
+ cut, 0);
}
void rtl8188e_read_chip_version(struct adapter *padapter)
@@ -581,7 +576,7 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
- dump_chip_info(ChipVersion);
+ dump_chip_info(padapter, ChipVersion);
pHalData->VersionID = ChipVersion;
}
@@ -688,6 +683,7 @@ Hal_EfuseParseIDCode88E(
)
{
struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
+ struct net_device *netdev = padapter->pnetdev;
u16 EEPROMId;
/* Check 0x8129 again for making sure autoload status!! */
@@ -699,7 +695,7 @@ Hal_EfuseParseIDCode88E(
pEEPROM->bautoload_fail_flag = false;
}
- pr_info("EEPROM ID = 0x%04x\n", EEPROMId);
+ netdev_dbg(netdev, "EEPROM ID = 0x%04x\n", EEPROMId);
}
static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index dea6d915a1f4..532c63bce0bf 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -12,26 +12,12 @@
/* 1. BB register R/W API */
/* */
-/**
-* Function: phy_CalculateBitShift
-*
-* OverView: Get shifted position of the BitMask
-*
-* Input:
-* u32 BitMask,
-*
-* Output: none
-* Return: u32 Return the shift bit bit position of the mask
-*/
-static u32 phy_CalculateBitShift(u32 BitMask)
+/* Get shifted position of the bit mask */
+static u32 phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((BitMask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
/**
@@ -62,7 +48,7 @@ rtl8188e_PHY_QueryBBReg(
if (res)
return 0;
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
ReturnValue = (OriginalValue & BitMask) >> BitShift;
return ReturnValue;
}
@@ -95,7 +81,7 @@ void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u3
if (res)
return;
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
}
@@ -267,7 +253,7 @@ u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask)
Original_Value = phy_RFSerialRead(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Readback_Value = (Original_Value & BitMask) >> BitShift;
return Readback_Value;
}
@@ -302,7 +288,7 @@ rtl8188e_PHY_SetRFReg(
/* RF data is 12 bits only */
if (BitMask != bRFRegOffsetMask) {
Original_Value = phy_RFSerialRead(Adapter, RegAddr);
- BitShift = phy_CalculateBitShift(BitMask);
+ BitShift = phy_calculate_bit_shift(BitMask);
Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
}
@@ -337,7 +323,7 @@ s32 PHY_MACConfig8188E(struct adapter *Adapter)
/* */
/* Config MAC */
/* */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv))
rtStatus = _FAIL;
/* 2010.07.13 AMPDU aggregation number B */
@@ -469,7 +455,7 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
/* 1. Read PHY_REG.TXT BB INIT!! */
/* We will separate as 88C / 92C according to chip version */
/* */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv))
return _FAIL;
/* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
@@ -479,7 +465,7 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
}
/* 3. BB AGC table Initialization */
- if (HAL_STATUS_FAILURE == ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv))
return _FAIL;
return _SUCCESS;
@@ -521,15 +507,6 @@ PHY_BBConfig8188E(
return rtStatus;
}
-int PHY_RFConfig8188E(struct adapter *Adapter)
-{
- int rtStatus = _SUCCESS;
-
- /* RF config */
- rtStatus = PHY_RF6052_Config8188E(Adapter);
- return rtStatus;
-}
-
static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
u8 *BW40PowerLevel)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index d043b7bc4142..e5ec6e563fbd 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -366,7 +366,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
}
}
-static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
+int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
{
struct bb_reg_def *pPhyReg;
struct hal_data_8188e *pHalData = &Adapter->haldata;
@@ -396,7 +396,7 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv))
+ if (ODM_ReadAndConfig_RadioA_1T_8188E(&pHalData->odmpriv))
rtStatus = _FAIL;
/*----Restore RFENV control type----*/;
@@ -404,14 +404,3 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
return rtStatus;
}
-
-int PHY_RF6052_Config8188E(struct adapter *Adapter)
-{
- int rtStatus = _SUCCESS;
-
- /* */
- /* Config BB and RF */
- /* */
- rtStatus = phy_RF6052_Config_ParaFile(Adapter);
- return rtStatus;
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_xmit.c b/drivers/staging/r8188eu/hal/rtl8188e_xmit.c
deleted file mode 100644
index 46b871f3f631..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_xmit.c
+++ /dev/null
@@ -1,22 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_XMIT_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-
-void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
-{
- struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
-
- if (txrpt_ccx->int_ccx) {
- if (txrpt_ccx->pkt_ok)
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_SUCCESS);
- else
- rtw_ack_tx_done(&adapter->xmitpriv,
- RTW_SCTX_DONE_CCX_PKT_FAIL);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
deleted file mode 100644
index def6d0d6e402..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
+++ /dev/null
@@ -1,91 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188EU_RECV_C_
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/mlme_osdep.h"
-
-#include "../include/usb_ops.h"
-#include "../include/wifi.h"
-
-#include "../include/rtl8188e_hal.h"
-
-int rtl8188eu_init_recv_priv(struct adapter *padapter)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int i, res = _SUCCESS;
- struct recv_buf *precvbuf;
-
- tasklet_init(&precvpriv->recv_tasklet,
- rtl8188eu_recv_tasklet,
- (unsigned long)padapter);
-
- /* init recv_buf */
- rtw_init_queue(&precvpriv->free_recv_buf_queue);
-
- precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
- GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf) {
- res = _FAIL;
- goto exit;
- }
-
- precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
- if (res == _FAIL)
- break;
- precvbuf->adapter = padapter;
- precvbuf++;
- }
- precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
- skb_queue_head_init(&precvpriv->rx_skb_queue);
- {
- int i;
- size_t tmpaddr = 0;
- size_t alignment = 0;
- struct sk_buff *pskb = NULL;
-
- skb_queue_head_init(&precvpriv->free_recv_skb_queue);
-
- for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
- pskb = __netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
- if (pskb) {
- pskb->dev = padapter->pnetdev;
- tmpaddr = (size_t)pskb->data;
- alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
- skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
-
- skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
- }
- pskb = NULL;
- }
- }
-exit:
- return res;
-}
-
-void rtl8188eu_free_recv_priv(struct adapter *padapter)
-{
- int i;
- struct recv_buf *precvbuf;
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf = (struct recv_buf *)precvpriv->precv_buf;
-
- for (i = 0; i < NR_RECVBUFF; i++) {
- rtw_os_recvbuf_resource_free(padapter, precvbuf);
- precvbuf++;
- }
-
- kfree(precvpriv->pallocated_recv_buf);
-
- skb_queue_purge(&precvpriv->rx_skb_queue);
-
- skb_queue_purge(&precvpriv->free_recv_skb_queue);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index bdfa51949289..8e4a5acc0b18 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -431,7 +431,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_os_xmit_complete(adapt, pxmitframe);
+ rtw_xmit_complete(adapt, pxmitframe);
/* 3 2. aggregate same priority and same DA(AP or STA) frames */
pfirstframe = pxmitframe;
@@ -501,7 +501,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_os_xmit_complete(adapt, pxmitframe);
+ rtw_xmit_complete(adapt, pxmitframe);
/* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index ff074d246dab..d28b4dc2a767 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -13,40 +13,77 @@
#include "../include/usb_osintf.h"
#include "../include/HalPwrSeqCmd.h"
-static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
+static void one_out_pipe(struct adapter *adapter)
{
- struct hal_data_8188e *haldata = &adapt->haldata;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
- switch (NumOutPipe) {
- case 3:
- haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_LQ | TX_SELE_NQ;
- haldata->OutEpNumber = 3;
- break;
- case 2:
- haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_NQ;
- haldata->OutEpNumber = 2;
- break;
- case 1:
- haldata->OutEpQueueSel = TX_SELE_HQ;
- haldata->OutEpNumber = 1;
- break;
- default:
- break;
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+}
+
+static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+
+ if (wifi_cfg) {
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+ } else {
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
}
}
-static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumOutPipe)
+static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ /* 0:H, 1:N, 2:L */
- _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
- return Hal_MappingOutPipe(adapt, NumOutPipe);
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+
+ pdvobjpriv->Queue2Pipe[3] = wifi_cfg ?
+ pdvobjpriv->RtOutPipe[1] : pdvobjpriv->RtOutPipe[2];/* BK */
}
-void rtl8188eu_interface_configure(struct adapter *adapt)
+int rtl8188eu_interface_configure(struct adapter *adapt)
{
+ struct registry_priv *pregistrypriv = &adapt->registrypriv;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
+ bool wifi_cfg = pregistrypriv->wifi_spec;
- HalUsbSetQueuePipeMapping8188EUsb(adapt, pdvobjpriv->RtNumOutPipes);
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ switch (pdvobjpriv->RtNumOutPipes) {
+ case 3:
+ haldata->out_ep_extra_queues = TX_SELE_LQ | TX_SELE_NQ;
+ three_out_pipe(adapt, wifi_cfg);
+ break;
+ case 2:
+ haldata->out_ep_extra_queues = TX_SELE_NQ;
+ two_out_pipe(adapt, wifi_cfg);
+ break;
+ case 1:
+ one_out_pipe(adapt);
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
}
u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
@@ -116,32 +153,24 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u32 numHQ = 0;
- u32 numLQ = 0;
- u32 numNQ = 0;
- u32 numPubQ;
- u32 value32;
- u8 value8;
- bool bWiFiConfig = pregistrypriv->wifi_spec;
-
- if (bWiFiConfig) {
- if (haldata->OutEpQueueSel & TX_SELE_HQ)
- numHQ = 0x29;
+ u8 numLQ = 0;
+ u8 numNQ = 0;
+ u8 numPubQ;
- if (haldata->OutEpQueueSel & TX_SELE_LQ)
+ if (pregistrypriv->wifi_spec) {
+ if (haldata->out_ep_extra_queues & TX_SELE_LQ)
numLQ = 0x1C;
/* NOTE: This step shall be proceed before writing REG_RQPN. */
- if (haldata->OutEpQueueSel & TX_SELE_NQ)
+ if (haldata->out_ep_extra_queues & TX_SELE_NQ)
numNQ = 0x1C;
- value8 = (u8)_NPQ(numNQ);
- rtw_write8(Adapter, REG_RQPN_NPQ, value8);
- numPubQ = 0xA8 - numHQ - numLQ - numNQ;
+ rtw_write8(Adapter, REG_RQPN_NPQ, numNQ);
+
+ numPubQ = 0xA8 - NUM_HQ - numLQ - numNQ;
/* TX DMA */
- value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
- rtw_write32(Adapter, REG_RQPN, value32);
+ rtw_write32(Adapter, REG_RQPN, LD_RQPN | numPubQ << 16 | numLQ << 8 | NUM_HQ);
} else {
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0000);/* Just follow MP Team,??? Georgia 03/28 */
rtw_write16(Adapter, REG_RQPN_NPQ, 0x0d);
@@ -187,69 +216,20 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
}
-static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = &Adapter->haldata;
-
- u16 value = 0;
- switch (haldata->OutEpQueueSel) {
- case TX_SELE_HQ:
- value = QUEUE_HIGH;
- break;
- case TX_SELE_LQ:
- value = QUEUE_LOW;
- break;
- case TX_SELE_NQ:
- value = QUEUE_NORMAL;
- break;
- default:
- break;
- }
- _InitNormalChipRegPriority(Adapter, value, value, value, value,
- value, value);
-}
-
static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
- u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
- u16 valueHi = 0;
- u16 valueLow = 0;
-
- switch (haldata->OutEpQueueSel) {
- case (TX_SELE_HQ | TX_SELE_LQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_NQ | TX_SELE_LQ):
- valueHi = QUEUE_NORMAL;
- valueLow = QUEUE_LOW;
- break;
- case (TX_SELE_HQ | TX_SELE_NQ):
- valueHi = QUEUE_HIGH;
- valueLow = QUEUE_NORMAL;
- break;
- default:
- break;
- }
+ u16 bkQ, voQ;
if (!pregistrypriv->wifi_spec) {
- beQ = valueLow;
- bkQ = valueLow;
- viQ = valueHi;
- voQ = valueHi;
- mgtQ = valueHi;
- hiQ = valueHi;
+ bkQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
} else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- beQ = valueLow;
- bkQ = valueHi;
- viQ = valueHi;
- voQ = valueLow;
- mgtQ = valueHi;
- hiQ = valueHi;
+ bkQ = QUEUE_HIGH;
+ voQ = QUEUE_NORMAL;
}
- _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+ _InitNormalChipRegPriority(Adapter, QUEUE_NORMAL, bkQ, QUEUE_HIGH,
+ voQ, QUEUE_HIGH, QUEUE_HIGH);
}
static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
@@ -277,11 +257,12 @@ static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
static void _InitQueuePriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = &Adapter->haldata;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(Adapter);
- switch (haldata->OutEpNumber) {
+ switch (pdvobjpriv->RtNumOutPipes) {
case 1:
- _InitNormalChipOneOutEpPriority(Adapter);
+ _InitNormalChipRegPriority(Adapter, QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH,
+ QUEUE_HIGH, QUEUE_HIGH, QUEUE_HIGH);
break;
case 2:
_InitNormalChipTwoOutEpPriority(Adapter);
@@ -515,8 +496,7 @@ static int _InitBeaconParameters(struct adapter *Adapter)
return 0;
}
-static void _BeaconFunctionEnable(struct adapter *Adapter,
- bool Enable, bool Linked)
+static void _BeaconFunctionEnable(struct adapter *Adapter)
{
rtw_write8(Adapter, REG_BCN_CTRL, (BIT(4) | BIT(3) | BIT(1)));
@@ -567,7 +547,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
{
u8 value8 = 0;
u16 value16;
- u8 txpktbuf_bndy;
u32 status = _SUCCESS;
int res;
struct hal_data_8188e *haldata = &Adapter->haldata;
@@ -600,13 +579,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* HW GPIO pin. Before PHY_RFConfig8192C. */
/* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
- if (!pregistrypriv->wifi_spec) {
- txpktbuf_bndy = TX_PAGE_BOUNDARY_88E;
- } else {
- /* for WMM */
- txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
- }
-
_InitQueueReservedPage(Adapter);
_InitQueuePriority(Adapter);
_InitPageBoundary(Adapter);
@@ -639,7 +611,7 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
if (status == _FAIL)
goto exit;
- status = PHY_RFConfig8188E(Adapter);
+ status = phy_RF6052_Config_ParaFile(Adapter);
if (status == _FAIL)
goto exit;
@@ -647,9 +619,9 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
if (status == _FAIL)
goto exit;
- _InitTxBufferBoundary(Adapter, txpktbuf_bndy);
+ _InitTxBufferBoundary(Adapter, TX_PAGE_BOUNDARY_88E);
- status = InitLLTTable(Adapter, txpktbuf_bndy);
+ status = InitLLTTable(Adapter, TX_PAGE_BOUNDARY_88E);
if (status == _FAIL)
goto exit;
@@ -922,7 +894,7 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
}
}
-void ReadAdapterInfo8188EU(struct adapter *Adapter)
+int ReadAdapterInfo8188EU(struct adapter *Adapter)
{
struct eeprom_priv *eeprom = &Adapter->eeprompriv;
struct led_priv *ledpriv = &Adapter->ledpriv;
@@ -933,13 +905,13 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter)
/* check system boot selection */
res = rtw_read8(Adapter, REG_9346CR, &eeValue);
if (res)
- return;
+ return res;
eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
efuse_buf = kmalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
if (!efuse_buf)
- return;
+ return -ENOMEM;
memset(efuse_buf, 0xFF, EFUSE_MAP_LEN_88E);
if (!(eeValue & BOOT_FROM_EEPROM) && !eeprom->bautoload_fail_flag) {
@@ -961,6 +933,7 @@ void ReadAdapterInfo8188EU(struct adapter *Adapter)
ledpriv->bRegUseLed = true;
kfree(efuse_buf);
+ return 0;
}
void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
@@ -1069,7 +1042,7 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
rtw_write8(adapt, REG_RXTSF_OFFSET_CCK, 0x50);
rtw_write8(adapt, REG_RXTSF_OFFSET_OFDM, 0x50);
- _BeaconFunctionEnable(adapt, true, true);
+ _BeaconFunctionEnable(adapt);
rtw_resume_tx_beacon(adapt);
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index c1a4d023f627..7c72f5e04d9b 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -5,7 +5,6 @@
#include "../include/drv_types.h"
#include "../include/osdep_intf.h"
#include "../include/usb_ops.h"
-#include "../include/recv_osdep.h"
#include "../include/rtl8188e_hal.h"
static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
@@ -190,6 +189,20 @@ int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
return RTW_STATUS_CODE(ret);
}
+static void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ if (txrpt_ccx->int_ccx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
{
u8 *pbuf;
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
index 9e6f2361b090..4a0b782c33be 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
@@ -80,7 +80,6 @@ void rtl8188e_PHY_SetRFReg(struct adapter *adapter, u32 regaddr, u32 mask, u32 d
/* MAC/BB/RF HAL config */
int PHY_MACConfig8188E(struct adapter *adapter);
int PHY_BBConfig8188E(struct adapter *adapter);
-int PHY_RFConfig8188E(struct adapter *adapter);
/* BB TX Power R/W */
void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h b/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
index 8270fdbc2844..0a290bc31c4d 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_BB.h
@@ -10,13 +10,13 @@
* AGC_TAB_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
/******************************************************************************
* PHY_REG_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
/******************************************************************************
* PHY_REG_PG.TXT
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h b/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
index 391c1754b0b6..b3d67c1a8050 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_MAC.h
@@ -7,7 +7,6 @@
/******************************************************************************
* MAC_REG.TXT
******************************************************************************/
-
-enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
+int ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h b/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
index 0c67c3df20b9..880feadb4340 100644
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
+++ b/drivers/staging/r8188eu/include/HalHWImg8188E_RF.h
@@ -8,6 +8,6 @@
* RadioA_1T.TXT
******************************************************************************/
-enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
+int ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index bba88a0ede61..1bd0c8f3a358 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -10,8 +10,6 @@
#ifndef __DRV_TYPES_H__
#define __DRV_TYPES_H__
-#define DRV_NAME "r8188eu"
-
#include "osdep_service.h"
#include "wlan_bssdef.h"
#include "rtw_ht.h"
@@ -36,10 +34,9 @@
#include "rtl8188e_hal.h"
#include "rtw_fw.h"
-#define DRIVERVERSION "v4.1.4_6773.20130222"
+#define FW_RTL8188EU "rtlwifi/rtl8188eufw.bin"
struct registry_priv {
- u8 chip_version;
u8 rfintfs;
u8 lbkmode;
u8 hci;
@@ -222,7 +219,7 @@ struct adapter {
#define adapter_to_dvobj(adapter) (adapter->dvobj)
-int rtw_handle_dualmac(struct adapter *adapter, bool init);
+void rtw_handle_dualmac(struct adapter *adapter, bool init);
static inline u8 *myid(struct eeprom_priv *peepriv)
{
diff --git a/drivers/staging/r8188eu/include/hal_com.h b/drivers/staging/r8188eu/include/hal_com.h
index d7e333f6ce39..cd3f845e146a 100644
--- a/drivers/staging/r8188eu/include/hal_com.h
+++ b/drivers/staging/r8188eu/include/hal_com.h
@@ -143,8 +143,4 @@ u8 MRateToHwRate(u8 rate);
void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg);
-bool Hal_MappingOutPipe(struct adapter *pAdapter, u8 NumOutPipe);
-
-s32 c2h_evt_read(struct adapter *adapter, u8 *buf);
-
#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index ab6856d8a090..ac6e3f95c5b7 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -10,8 +10,8 @@
typedef s32 (*c2h_id_filter)(u8 id);
-void rtl8188eu_interface_configure(struct adapter *adapt);
-void ReadAdapterInfo8188EU(struct adapter *Adapter);
+int rtl8188eu_interface_configure(struct adapter *adapt);
+int ReadAdapterInfo8188EU(struct adapter *Adapter);
void rtl8188eu_init_default_value(struct adapter *adapt);
void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
@@ -39,7 +39,6 @@ void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
void rtw_hal_clone_data(struct adapter *dst_adapt,
struct adapter *src_adapt);
-void indicate_wx_scan_complete_event(struct adapter *padapter);
u8 rtw_do_join(struct adapter *padapter);
#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/r8188eu/include/ioctl_cfg80211.h b/drivers/staging/r8188eu/include/ioctl_cfg80211.h
deleted file mode 100644
index 738f645f9bbc..000000000000
--- a/drivers/staging/r8188eu/include/ioctl_cfg80211.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#ifndef __IOCTL_CFG80211_H__
-#define __IOCTL_CFG80211_H__
-
-struct rtw_wdev_invit_info {
- u8 token;
- u8 flags;
- u8 status;
- u8 req_op_ch;
- u8 rsp_op_ch;
-};
-
-#define rtw_wdev_invit_info_init(invit_info) \
- do { \
- (invit_info)->token = 0; \
- (invit_info)->flags = 0x00; \
- (invit_info)->status = 0xff; \
- (invit_info)->req_op_ch = 0; \
- (invit_info)->rsp_op_ch = 0; \
- } while (0)
-
-struct rtw_wdev_priv {
- struct wireless_dev *rtw_wdev;
-
- struct adapter *padapter;
-
- struct cfg80211_scan_request *scan_request;
- spinlock_t scan_req_lock;
-
- struct net_device *pmon_ndev;/* for monitor interface */
- char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
-
- u8 p2p_enabled;
-
- u8 provdisc_req_issued;
-
- struct rtw_wdev_invit_info invit_info;
-
- u8 bandroid_scan;
- bool block;
- bool power_mgmt;
-};
-
-#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
-
-#define wiphy_to_wdev(x) \
-((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
-
-int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
-void rtw_wdev_free(struct wireless_dev *wdev);
-void rtw_wdev_unregister(struct wireless_dev *wdev);
-
-void rtw_cfg80211_init_wiphy(struct adapter *padapter);
-
-void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
-
-void rtw_cfg80211_indicate_connect(struct adapter *padapter);
-void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
-void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
- bool aborted);
-
-void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
- unsigned char *da,
- unsigned short reason);
-
-void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
- const u8 *buf, size_t len);
-void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
- u8 *pmgmt_frame, uint frame_len);
-void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
- uint frame_len);
-void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
- uint frame_len, const char *msg);
-
-int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
- char *buf, int len, int type);
-
-bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
-
-#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
- cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
-#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
- cfg80211_send_rx_assoc(dev, bss, buf, len)
-
-#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/r8188eu/include/mlme_osdep.h b/drivers/staging/r8188eu/include/mlme_osdep.h
deleted file mode 100644
index 5b9f688f9424..000000000000
--- a/drivers/staging/r8188eu/include/mlme_osdep.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __MLME_OSDEP_H_
-#define __MLME_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-void rtw_init_mlme_timer(struct adapter *padapter);
-void rtw_os_indicate_disconnect(struct adapter *adapter);
-void rtw_os_indicate_connect(struct adapter *adapter);
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
-void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
-
-void rtw_reset_securitypriv(struct adapter *adapter);
-void indicate_wx_scan_complete_event(struct adapter *padapter);
-
-#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/r8188eu/include/odm_HWConfig.h b/drivers/staging/r8188eu/include/odm_HWConfig.h
index b37962edb2ed..3f7185780e87 100644
--- a/drivers/staging/r8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/r8188eu/include/odm_HWConfig.h
@@ -66,5 +66,4 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt);
-enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm);
#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
deleted file mode 100644
index 683fa4a07956..000000000000
--- a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_ODM_REGCONFIG_H_8188E
-#define __INC_ODM_REGCONFIG_H_8188E
-
-void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
- u32 Addr, u32 Data);
-
-void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data);
-
-void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Bitmask, u32 Data);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_types.h b/drivers/staging/r8188eu/include/odm_types.h
index 08ba7a418ba8..76302df4b330 100644
--- a/drivers/staging/r8188eu/include/odm_types.h
+++ b/drivers/staging/r8188eu/include/odm_types.h
@@ -6,11 +6,6 @@
#define ODM_CE 0x04 /* BIT(2) */
-enum HAL_STATUS {
- HAL_STATUS_SUCCESS,
- HAL_STATUS_FAILURE,
-};
-
#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \
le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24))
#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \
diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h
index 0d7009269aab..36511c469546 100644
--- a/drivers/staging/r8188eu/include/osdep_intf.h
+++ b/drivers/staging/r8188eu/include/osdep_intf.h
@@ -39,6 +39,9 @@ The protection mechanism is through the pending queue.
u8 bio_timer_cancel;
};
+int netdev_open(struct net_device *pnetdev);
+int netdev_close(struct net_device *pnetdev);
+
u8 rtw_init_drv_sw(struct adapter *padapter);
u8 rtw_free_drv_sw(struct adapter *padapter);
u8 rtw_reset_drv_sw(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/recv_osdep.h b/drivers/staging/r8188eu/include/recv_osdep.h
deleted file mode 100644
index ca8a613508fd..000000000000
--- a/drivers/staging/r8188eu/include/recv_osdep.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RECV_OSDEP_H_
-#define __RECV_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void _rtw_free_recv_priv(struct recv_priv *precvpriv);
-
-s32 rtw_recv_entry(struct recv_frame *precv_frame);
-int rtw_recv_indicatepkt(struct adapter *adapter, struct recv_frame *recv_frame);
-void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
-
-void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
-
-int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
-void rtw_free_recv_priv(struct recv_priv *precvpriv);
-
-int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
-int rtw_os_recvbuf_resource_free(struct adapter *adapt, struct recv_buf *buf);
-
-void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
-int _netdev_open(struct net_device *pnetdev);
-int netdev_open(struct net_device *pnetdev);
-int netdev_close(struct net_device *pnetdev);
-
-#endif /* */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index 5cd62b216720..ed4091e7cc7e 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -23,7 +23,6 @@
#include "HalHWImg8188E_MAC.h"
#include "HalHWImg8188E_RF.h"
#include "HalHWImg8188E_BB.h"
-#include "odm_RegConfig8188E.h"
#include "odm_RTL8188E.h"
#define DRVINFO_SZ 4 /* unit is 8bytes */
@@ -36,7 +35,6 @@
0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
* WOLPattern(16*24)) */
-#define TX_SELE_HQ BIT(0) /* High Queue */
#define TX_SELE_LQ BIT(1) /* Low Queue */
#define TX_SELE_NQ BIT(2) /* Normal Queue */
@@ -51,12 +49,6 @@
#define TX_PAGE_BOUNDARY_88E (TX_TOTAL_PAGE_NUMBER_88E + 1)
-/* Note: For Normal Chip Setting ,modify later */
-#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER \
- TX_TOTAL_PAGE_NUMBER_88E /* 0xA9 , 0xb0=>176=>22k */
-#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
- (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
-
#include "HalVerDef.h"
#include "hal_com.h"
@@ -155,8 +147,7 @@ struct hal_data_8188e {
u8 AntDivCfg;
u8 TRxAntDivType;
- u8 OutEpQueueSel;
- u8 OutEpNumber;
+ u8 out_ep_extra_queues;
struct P2P_PS_Offload_t p2p_ps_offload;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
index b752c5c06309..dc4f358f646d 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_recv.h
@@ -33,8 +33,6 @@ enum rx_packet_type {
HIS_REPORT,/* USB HISR RPT */
};
-s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
-void rtl8188eu_free_recv_priv(struct adapter * padapter);
void rtl8188eu_recv_tasklet(unsigned long priv);
void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
void update_recvframe_attrib_88e(struct recv_frame *fra, struct recv_stat *stat);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_rf.h b/drivers/staging/r8188eu/include/rtl8188e_rf.h
index 04556496baad..63ac0acc68fd 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_rf.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_rf.h
@@ -8,7 +8,7 @@
#define RF6052_MAX_REG 0x3F
#define RF6052_MAX_PATH 2
-int PHY_RF6052_Config8188E(struct adapter *Adapter);
+int phy_RF6052_Config_ParaFile(struct adapter *Adapter);
void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
enum ht_channel_width Bandwidth);
void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 9e7b1f89037c..e34619140e33 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -924,15 +924,9 @@ Current IOREG MAP
#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
/* 0x0200h ~ 0x027Fh TXDMA Configuration */
-/* 2RQPN */
-#define _HPQ(x) ((x) & 0xFF)
-#define _LPQ(x) (((x) & 0xFF) << 8)
-#define _PUBQ(x) (((x) & 0xFF) << 16)
-/* NOTE: in RQPN_NPQ register */
-#define _NPQ(x) ((x) & 0xFF)
-
-#define HPQ_PUBLIC_DIS BIT(24)
-#define LPQ_PUBLIC_DIS BIT(25)
+
+#define NUM_HQ 0x29
+
#define LD_RQPN BIT(31)
/* 2TDECTRL */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_xmit.h b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
index 8adb672f7a07..6db7fabebea9 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
@@ -83,12 +83,6 @@
/* OFFSET 20 */
#define RTY_LMT_EN BIT(17)
-enum TXDESC_SC {
- SC_DONT_CARE = 0x00,
- SC_UPPER = 0x01,
- SC_LOWER = 0x02,
- SC_DUPLICATE = 0x03
-};
/* OFFSET 20 */
#define SGI BIT(6)
#define USB_TXAGG_NUM_SHT 24
@@ -147,6 +141,4 @@ bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
-void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
-
#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_ap.h b/drivers/staging/r8188eu/include/rtw_ap.h
index 724229fe84aa..8b4134eb3095 100644
--- a/drivers/staging/r8188eu/include/rtw_ap.h
+++ b/drivers/staging/r8188eu/include/rtw_ap.h
@@ -10,8 +10,6 @@
/* external function */
void rtw_indicate_sta_assoc_event(struct adapter *padapter,
struct sta_info *psta);
-void rtw_indicate_sta_disassoc_event(struct adapter *padapter,
- struct sta_info *psta);
void init_mlme_ap_info(struct adapter *padapter);
void free_mlme_ap_info(struct adapter *padapter);
void update_beacon(struct adapter *padapter, u8 ie_id,
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index 6b6d560d7143..9a76aa85de94 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -730,9 +730,7 @@ Result:
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
- int ssid_num, struct rtw_ieee80211_channel *ch,
- int ch_num);
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num);
u8 rtw_createbss_cmd(struct adapter *padapter);
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index d6b0c1c2f9a2..8520f022a67f 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -21,20 +21,15 @@ enum LED_CTL_MODE {
};
enum LED_STATE_871x {
- LED_UNKNOWN = 0,
- RTW_LED_ON = 1,
RTW_LED_OFF = 2,
LED_BLINK_NORMAL = 3,
LED_BLINK_SLOWLY = 4,
LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
* the # of times to blink is depend on time
* for scanning. */
- LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
- * Server case */
LED_BLINK_TXRX = 9,
LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
LED_BLINK_WPS_STOP = 11,
- LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
};
struct led_priv {
@@ -43,8 +38,6 @@ struct led_priv {
bool bRegUseLed;
enum LED_STATE_871x CurrLedState; /* Current LED state. */
- enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
- * either RTW_LED_ON or RTW_LED_OFF are. */
bool bLedOn; /* true if LED is ON, false if LED is OFF. */
@@ -54,7 +47,6 @@ struct led_priv {
u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
- bool bLedNoLinkBlinkInProgress;
bool bLedLinkBlinkInProgress;
bool bLedScanBlinkInProgress;
struct delayed_work blink_work;
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index d81668498e46..b69989cbab21 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -5,7 +5,6 @@
#define __RTW_MLME_H_
#include "osdep_service.h"
-#include "mlme_osdep.h"
#include "drv_types.h"
#include "wlan_bssdef.h"
@@ -64,17 +63,6 @@ enum rt_scan_type {
SCAN_MIX,
};
-enum SCAN_RESULT_TYPE {
- SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
- SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
- * include AP. */
- SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
- * device. */
- /* If this device is Miracast sink
- * device, it will just return all the
- * Miracast source devices. */
-};
-
/*
there are several "locks" in mlme_priv,
since mlme_priv is a shared resource between many threads,
@@ -433,8 +421,6 @@ void indicate_wx_scan_complete_event(struct adapter *padapter);
void rtw_indicate_wx_assoc_event(struct adapter *padapter);
void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
int event_thread(void *context);
-void rtw_join_timeout_handler (struct timer_list *t);
-void _rtw_scan_timeout_handler (struct timer_list *t);
void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
int rtw_init_mlme_priv(struct adapter *adapter);
void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
@@ -537,7 +523,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
-void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_indicate_scan_done(struct adapter *padapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
uint in_len);
@@ -551,10 +537,6 @@ void _rtw_join_timeout_handler(struct adapter *adapter);
void rtw_scan_timeout_handler(struct adapter *adapter);
void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
-#define rtw_is_scan_deny(adapter) false
-#define rtw_clear_scan_deny(adapter) do {} while (0)
-#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
-#define rtw_set_scan_deny(adapter, ms) do {} while (0)
void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 343ce1ce4b3d..b322d0848db9 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -388,15 +388,11 @@ struct mlme_ext_priv {
void init_mlme_ext_priv(struct adapter *adapter);
int init_hw_mlme_ext(struct adapter *padapter);
void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
-extern void init_mlme_ext_timer(struct adapter *padapter);
-extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
unsigned char networktype_to_raid(unsigned char network_type);
u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
-void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
-void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
void Save_DM_Func_Flag(struct adapter *padapter);
void Restore_DM_Func_Flag(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 66d240a7123d..7768b0c5988c 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -243,6 +243,9 @@ struct recv_frame {
struct recv_reorder_ctrl *preorder_ctrl;
};
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void _rtw_free_recv_priv(struct recv_priv *precvpriv);
+s32 rtw_recv_entry(struct recv_frame *precv_frame);
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
int rtw_free_recvframe(struct recv_frame *precvframe,
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index 034a9f8f51c9..82efcd54af3f 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -7,6 +7,9 @@
#include "osdep_service.h"
#include "drv_types.h"
+#define NR_XMITFRAME 256
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME * 2 / 5)
+
#define MAX_XMITBUF_SZ (20480) /* 20k */
#define NR_XMITBUFF (4)
@@ -304,6 +307,15 @@ struct xmit_priv {
struct submit_ctx ack_tx_ops;
};
+struct pkt_file {
+ struct sk_buff *pkt;
+ size_t pkt_len; /* the remainder length of the open_file */
+ unsigned char *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ size_t buf_len;
+};
+
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
@@ -355,7 +367,7 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
-/* include after declaring struct xmit_buf, in order to avoid warning */
-#include "xmit_osdep.h"
+void rtw_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe);
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/r8188eu/include/wlan_bssdef.h b/drivers/staging/r8188eu/include/wlan_bssdef.h
index 9d1c9e763287..81bda91a4136 100644
--- a/drivers/staging/r8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/r8188eu/include/wlan_bssdef.h
@@ -133,10 +133,6 @@ struct ndis_802_11_assoc_info {
u32 OffsetResponseIEs;
};
-enum ndis_802_11_reload_def {
- Ndis802_11ReloadWEPKeys
-};
-
/* Key mapping keys require a BSSID */
struct ndis_802_11_key {
u32 Length; /* Length of this structure */
diff --git a/drivers/staging/r8188eu/include/xmit_osdep.h b/drivers/staging/r8188eu/include/xmit_osdep.h
deleted file mode 100644
index 00658681fef9..000000000000
--- a/drivers/staging/r8188eu/include/xmit_osdep.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __XMIT_OSDEP_H_
-#define __XMIT_OSDEP_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-struct pkt_file {
- struct sk_buff *pkt;
- size_t pkt_len; /* the remainder length of the open_file */
- unsigned char *cur_buffer;
- u8 *buf_start;
- u8 *cur_addr;
- size_t buf_len;
-};
-
-extern int rtw_ht_enable;
-extern int rtw_cbw40_enable;
-extern int rtw_ampdu_enable;/* for enable tx_ampdu */
-
-#define NR_XMITFRAME 256
-
-struct xmit_priv;
-struct pkt_attrib;
-struct sta_xmit_priv;
-struct xmit_frame;
-struct xmit_buf;
-
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
-
-void rtw_os_xmit_schedule(struct adapter *padapter);
-
-int rtw_os_xmit_resource_alloc(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz);
-
-uint rtw_remainder_len(struct pkt_file *pfile);
-void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
-uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
-bool rtw_endofpktfile(struct pkt_file *pfile);
-
-void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
-void rtw_os_xmit_complete(struct adapter *padapter,
- struct xmit_frame *pxframe);
-
-#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 7f91dac2e41b..2de2e1e32738 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1099,7 +1099,7 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
spin_lock_bh(&pmlmepriv->lock);
- _status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
+ _status = rtw_sitesurvey_cmd(padapter, ssid, 1);
spin_unlock_bh(&pmlmepriv->lock);
}
@@ -1836,7 +1836,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
goto out;
}
- strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strscpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
@@ -2079,7 +2079,7 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- int ret = 0;
+ int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -2094,10 +2094,9 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
else if (*extra == '3')
init_role = P2P_ROLE_GO;
- if (_FAIL == rtw_p2p_enable(padapter, init_role)) {
- ret = -EFAULT;
- goto exit;
- }
+ ret = rtw_p2p_enable(padapter, init_role);
+ if (ret)
+ return ret;
/* set channel/bandwidth */
if (init_role != P2P_ROLE_DISABLE) {
@@ -2121,8 +2120,7 @@ static int rtw_wext_p2p_enable(struct net_device *dev,
set_channel_bwmode(padapter, channel, ch_offset, bwmode);
}
-exit:
- return ret;
+ return 0;
}
static void rtw_p2p_set_go_nego_ssid(struct net_device *dev,
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
deleted file mode 100644
index 899d8e9c3834..000000000000
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ /dev/null
@@ -1,205 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#define _MLME_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/mlme_osdep.h"
-
-void rtw_join_timeout_handler (struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.assoc_timer);
-
- _rtw_join_timeout_handler(adapter);
-}
-
-void _rtw_scan_timeout_handler (struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.scan_to_timer);
-
- rtw_scan_timeout_handler(adapter);
-}
-
-static void _dynamic_check_timer_handlder(struct timer_list *t)
-{
- struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
-
- rtw_dynamic_check_timer_handlder(adapter);
- _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
-}
-
-void rtw_init_mlme_timer(struct adapter *padapter)
-{
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- timer_setup(&pmlmepriv->assoc_timer, rtw_join_timeout_handler, 0);
- timer_setup(&pmlmepriv->scan_to_timer, _rtw_scan_timeout_handler, 0);
- timer_setup(&pmlmepriv->dynamic_chk_timer, _dynamic_check_timer_handlder, 0);
-}
-
-void rtw_os_indicate_connect(struct adapter *adapter)
-{
-
- rtw_indicate_wx_assoc_event(adapter);
- netif_carrier_on(adapter->pnetdev);
- if (adapter->pid[2] != 0)
- rtw_signal_process(adapter->pid[2], SIGALRM);
-
-}
-
-void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
-{
- indicate_wx_scan_complete_event(padapter);
-}
-
-static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
-
-void rtw_reset_securitypriv(struct adapter *adapter)
-{
- u8 backup_index = 0;
- u8 backup_counter = 0x00;
- u32 backup_time = 0;
-
- if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
- /* 802.1x */
- /* We have to backup the PMK information for WiFi PMK Caching test item. */
- /* Backup the btkip_countermeasure information. */
- /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
- memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- backup_index = adapter->securitypriv.PMKIDIndex;
- backup_counter = adapter->securitypriv.btkip_countermeasure;
- backup_time = adapter->securitypriv.btkip_countermeasure_time;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
-
- /* Restore the PMK information to securitypriv structure for the following connection. */
- memcpy(&adapter->securitypriv.PMKIDList[0],
- &backup_pmkid[0],
- sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
- adapter->securitypriv.PMKIDIndex = backup_index;
- adapter->securitypriv.btkip_countermeasure = backup_counter;
- adapter->securitypriv.btkip_countermeasure_time = backup_time;
- adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
- adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
- } else {
- /* reset values in securitypriv */
- struct security_priv *psec_priv = &adapter->securitypriv;
-
- psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
- psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
- psec_priv->dot11PrivacyKeyIndex = 0;
- psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
- psec_priv->dot118021XGrpKeyid = 1;
- psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
- psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
- }
-}
-
-void rtw_os_indicate_disconnect(struct adapter *adapter)
-{
-
- netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
- rtw_indicate_wx_disassoc_event(adapter);
- rtw_reset_securitypriv(adapter);
-}
-
-void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
-{
- uint len;
- u8 *buff, *p, i;
- union iwreq_data wrqu;
-
- buff = NULL;
- if (authmode == _WPA_IE_ID_) {
- buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (!buff)
- return;
- p = buff;
- p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1] + 2;
- len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0; i < len; i++)
- p += sprintf(p, "%02x", sec_ie[i]);
- p += sprintf(p, ")");
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = p - buff;
- wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
- wrqu.data.length : IW_CUSTOM_MAX;
- wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
- kfree(buff);
- }
-}
-
-static void _survey_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.survey_timer);
-
- survey_timer_hdl(padapter);
-}
-
-static void _link_timer_hdl(struct timer_list *t)
-{
- struct adapter *padapter = from_timer(padapter, t, mlmeextpriv.link_timer);
- link_timer_hdl(padapter);
-}
-
-static void _addba_timer_hdl(struct timer_list *t)
-{
- struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
- addba_timer_hdl(psta);
-}
-
-void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
-{
- timer_setup(&psta->addba_retry_timer, _addba_timer_hdl, 0);
-}
-
-void init_mlme_ext_timer(struct adapter *padapter)
-{
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- timer_setup(&pmlmeext->survey_timer, _survey_timer_hdl, 0);
- timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
-}
-
-void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
-}
-
-void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
-{
- union iwreq_data wrqu;
- struct sta_priv *pstapriv = &padapter->stapriv;
-
- if (!psta)
- return;
-
- if (psta->aid > NUM_STA)
- return;
-
- if (pstapriv->sta_aid[psta->aid - 1] != psta)
- return;
-
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
-
- wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
-}
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index cac9553666e6..6a45315d01a2 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -5,8 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/xmit_osdep.h"
-#include "../include/recv_osdep.h"
#include "../include/hal_intf.h"
#include "../include/rtw_ioctl.h"
#include "../include/usb_osintf.h"
@@ -17,13 +15,12 @@
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
-MODULE_VERSION(DRIVERVERSION);
+MODULE_FIRMWARE(FW_RTL8188EU);
#define CONFIG_BR_EXT_BRNAME "br0"
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
/* module param defaults */
-static int rtw_chip_version = 0x00;
static int rtw_rfintfs = HWPI;
static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
@@ -66,9 +63,9 @@ static int rtw_uapsd_acvo_en;
static int rtw_led_enable = 1;
-int rtw_ht_enable = 1;
-int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
-int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
+static int rtw_ht_enable = 1;
+static int rtw_cbw40_enable = 3; /* 0 :disable, bit(0): enable 2.4g, bit(1): enable 5g */
+static int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
@@ -105,7 +102,6 @@ char *rtw_initmac; /* temp mac address if users want to use instead of the mac
module_param(rtw_initmac, charp, 0644);
module_param(rtw_channel_plan, int, 0644);
-module_param(rtw_chip_version, int, 0644);
module_param(rtw_rfintfs, int, 0644);
module_param(rtw_lbkmode, int, 0644);
module_param(rtw_network_mode, int, 0644);
@@ -152,7 +148,6 @@ static uint loadparam(struct adapter *padapter)
{
struct registry_priv *registry_par = &padapter->registrypriv;
- registry_par->chip_version = (u8)rtw_chip_version;
registry_par->rfintfs = (u8)rtw_rfintfs;
registry_par->lbkmode = (u8)rtw_lbkmode;
registry_par->network_mode = (u8)rtw_network_mode;
@@ -621,7 +616,7 @@ void netdev_br_init(struct net_device *netdev)
rcu_read_unlock();
}
-int _netdev_open(struct net_device *pnetdev)
+static int _netdev_open(struct net_device *pnetdev)
{
uint status;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -635,7 +630,7 @@ int _netdev_open(struct net_device *pnetdev)
if (status == _FAIL)
goto netdev_open_error;
- pr_info("MAC Address = %pM\n", pnetdev->dev_addr);
+ netdev_dbg(pnetdev, "MAC Address = %pM\n", pnetdev->dev_addr);
status = rtw_start_drv_threads(padapter);
if (status == _FAIL) {
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 3504a0a9ba87..88271f956b52 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -5,7 +5,6 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
#include "../include/rtw_ioctl_set.h"
/*
@@ -54,14 +53,13 @@ struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
if (!pnetdev)
- goto RETURN;
+ return NULL;
pnetdev->dev.type = &wlan_type;
pnpi = netdev_priv(pnetdev);
pnpi->priv = old_priv;
pnpi->sizeof_priv = sizeof_priv;
-RETURN:
return pnetdev;
}
@@ -72,19 +70,18 @@ struct net_device *rtw_alloc_etherdev(int sizeof_priv)
pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
if (!pnetdev)
- goto RETURN;
+ return NULL;
pnpi = netdev_priv(pnetdev);
pnpi->priv = vzalloc(sizeof_priv);
if (!pnpi->priv) {
free_netdev(pnetdev);
- pnetdev = NULL;
- goto RETURN;
+ return NULL;
}
pnpi->sizeof_priv = sizeof_priv;
-RETURN:
+
return pnetdev;
}
diff --git a/drivers/staging/r8188eu/os_dep/recv_linux.c b/drivers/staging/r8188eu/os_dep/recv_linux.c
deleted file mode 100644
index 1e14b6d49795..000000000000
--- a/drivers/staging/r8188eu/os_dep/recv_linux.c
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RECV_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-
-#include "../include/wifi.h"
-#include "../include/recv_osdep.h"
-
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-
-/* alloc os related resource in struct recv_buf */
-int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
- struct recv_buf *precvbuf)
-{
- int res = _SUCCESS;
-
- precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (!precvbuf->purb)
- res = _FAIL;
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- return res;
-}
-
-/* free os related resource in struct recv_buf */
-int rtw_os_recvbuf_resource_free(struct adapter *padapter,
- struct recv_buf *precvbuf)
-{
- usb_free_urb(precvbuf->purb);
- return _SUCCESS;
-}
-
-void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
-{
- union iwreq_data wrqu;
- struct iw_michaelmicfailure ev;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- u32 cur_time = 0;
-
- if (psecuritypriv->last_mic_err_time == 0) {
- psecuritypriv->last_mic_err_time = jiffies;
- } else {
- cur_time = jiffies;
-
- if (cur_time - psecuritypriv->last_mic_err_time < 60 * HZ) {
- psecuritypriv->btkip_countermeasure = true;
- psecuritypriv->last_mic_err_time = 0;
- psecuritypriv->btkip_countermeasure_time = cur_time;
- } else {
- psecuritypriv->last_mic_err_time = jiffies;
- }
- }
-
- memset(&ev, 0x00, sizeof(ev));
- if (bgroup)
- ev.flags |= IW_MICFAILURE_GROUP;
- else
- ev.flags |= IW_MICFAILURE_PAIRWISE;
-
- ev.src_addr.sa_family = ARPHRD_ETHER;
- memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
- memset(&wrqu, 0x00, sizeof(wrqu));
- wrqu.data.length = sizeof(ev);
- wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
- &wrqu, (char *)&ev);
-}
-
-int rtw_recv_indicatepkt(struct adapter *padapter,
- struct recv_frame *precv_frame)
-{
- struct recv_priv *precvpriv;
- struct __queue *pfree_recv_queue;
- struct sk_buff *skb;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- precvpriv = &padapter->recvpriv;
- pfree_recv_queue = &precvpriv->free_recv_queue;
-
- skb = precv_frame->pkt;
- if (!skb)
- goto _recv_indicatepkt_drop;
-
- skb->data = precv_frame->rx_data;
-
- skb_set_tail_pointer(skb, precv_frame->len);
-
- skb->len = precv_frame->len;
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- struct sk_buff *pskb2 = NULL;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
- bool bmcast = is_multicast_ether_addr(pattrib->dst);
-
- if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
- ETH_ALEN)) {
- if (bmcast) {
- psta = rtw_get_bcmc_stainfo(padapter);
- pskb2 = skb_clone(skb, GFP_ATOMIC);
- } else {
- psta = rtw_get_stainfo(pstapriv, pattrib->dst);
- }
-
- if (psta) {
- struct net_device *pnetdev;
-
- pnetdev = (struct net_device *)padapter->pnetdev;
- skb->dev = pnetdev;
- skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
-
- rtw_xmit_entry(skb, pnetdev);
-
- if (bmcast)
- skb = pskb2;
- else
- goto _recv_indicatepkt_end;
- }
- }
- }
-
- rcu_read_lock();
- rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->dev = padapter->pnetdev;
- skb->protocol = eth_type_trans(skb, padapter->pnetdev);
-
- netif_rx(skb);
-
-_recv_indicatepkt_end:
-
- /* pointers to NULL before rtw_free_recvframe() */
- precv_frame->pkt = NULL;
-
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _SUCCESS;
-
-_recv_indicatepkt_drop:
-
- /* enqueue back to free_recv_queue */
- rtw_free_recvframe(precv_frame, pfree_recv_queue);
-
- return _FAIL;
-}
-
-static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
-{
- struct recv_reorder_ctrl *preorder_ctrl;
-
- preorder_ctrl = from_timer(preorder_ctrl, t, reordering_ctrl_timer);
- rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
-}
-
-void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
-{
- timer_setup(&preorder_ctrl->reordering_ctrl_timer, _rtw_reordering_ctrl_timeout_handler, 0);
-}
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index cc2b44f60c46..5fbfbcd95de2 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -4,8 +4,6 @@
#include <linux/usb.h>
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
-#include "../include/recv_osdep.h"
-#include "../include/xmit_osdep.h"
#include "../include/hal_intf.h"
#include "../include/osdep_intf.h"
#include "../include/usb_ops.h"
@@ -28,6 +26,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
@@ -54,7 +53,7 @@ struct rtw_usb_drv {
};
static struct rtw_usb_drv rtl8188e_usb_drv = {
- .usbdrv.name = "r8188eu",
+ .usbdrv.name = KBUILD_MODNAME,
.usbdrv.probe = rtw_drv_init,
.usbdrv.disconnect = rtw_dev_remove,
.usbdrv.id_table = rtw_usb_id_tbl,
@@ -231,7 +230,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
mutex_unlock(&pwrpriv->lock);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
- rtw_indicate_scan_done(padapter, 1);
+ rtw_indicate_scan_done(padapter);
if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
rtw_indicate_disconnect(padapter);
@@ -287,17 +286,17 @@ exit:
* We accept the new device by returning 0.
*/
-static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
- struct usb_interface *pusb_intf)
+static int rtw_usb_if1_init(struct dvobj_priv *dvobj, struct usb_interface *pusb_intf)
{
struct adapter *padapter = NULL;
struct net_device *pnetdev = NULL;
struct io_priv *piopriv;
struct intf_hdl *pintf;
+ int ret;
padapter = vzalloc(sizeof(*padapter));
if (!padapter)
- return NULL;
+ return -ENOMEM;
padapter->dvobj = dvobj;
dvobj->if1 = padapter;
@@ -306,12 +305,13 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
- if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
- goto free_adapter;
+ rtw_handle_dualmac(padapter, 1);
pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev)
+ if (!pnetdev) {
+ ret = -ENODEV;
goto handle_dualmac;
+ }
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
@@ -329,14 +329,20 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
rtl8188e_read_chip_version(padapter);
/* step usb endpoint mapping */
- rtl8188eu_interface_configure(padapter);
+ ret = rtl8188eu_interface_configure(padapter);
+ if (ret)
+ goto handle_dualmac;
/* step read efuse/eeprom data and get mac_addr */
- ReadAdapterInfo8188EU(padapter);
+ ret = ReadAdapterInfo8188EU(padapter);
+ if (ret)
+ goto handle_dualmac;
/* step 5. */
- if (rtw_init_drv_sw(padapter) == _FAIL)
+ if (rtw_init_drv_sw(padapter) == _FAIL) {
+ ret = -ENODEV;
goto handle_dualmac;
+ }
#ifdef CONFIG_PM
if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
@@ -351,7 +357,8 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
usb_autopm_get_interface(pusb_intf);
/* alloc dev name after read efuse. */
- if (rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname) < 0)
+ ret = rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
+ if (ret)
goto free_drv_sw;
rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
@@ -359,23 +366,23 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
/* step 6. Tell the network stack we exist */
- if (register_netdev(pnetdev) != 0)
+ ret = register_netdev(pnetdev);
+ if (ret)
goto free_drv_sw;
- return padapter;
+ return 0;
free_drv_sw:
rtw_cancel_all_timer(padapter);
rtw_free_drv_sw(padapter);
handle_dualmac:
rtw_handle_dualmac(padapter, 0);
-free_adapter:
if (pnetdev)
rtw_free_netdev(pnetdev);
else
vfree(padapter);
- return NULL;
+ return ret;
}
static void rtw_usb_if1_deinit(struct adapter *if1)
@@ -403,27 +410,24 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
{
- struct adapter *if1 = NULL;
struct dvobj_priv *dvobj;
+ int ret;
/* Initialize dvobj_priv */
dvobj = usb_dvobj_init(pusb_intf);
if (!dvobj)
- goto err;
+ return -ENODEV;
- if1 = rtw_usb_if1_init(dvobj, pusb_intf);
- if (!if1)
- goto free_dvobj;
+ ret = rtw_usb_if1_init(dvobj, pusb_intf);
+ if (ret) {
+ usb_dvobj_deinit(pusb_intf);
+ return ret;
+ }
if (ui_pid[1] != 0)
rtw_signal_process(ui_pid[1], SIGUSR2);
return 0;
-
-free_dvobj:
- usb_dvobj_deinit(pusb_intf);
-err:
- return -ENODEV;
}
/*
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
deleted file mode 100644
index 91a1e4e3219a..000000000000
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _XMIT_OSDEP_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/wifi.h"
-#include "../include/mlme_osdep.h"
-#include "../include/xmit_osdep.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_osintf.h"
-
-uint rtw_remainder_len(struct pkt_file *pfile)
-{
- return pfile->buf_len - ((size_t)(pfile->cur_addr) -
- (size_t)(pfile->buf_start));
-}
-
-void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
-{
-
- if (!pktptr) {
- pr_err("8188eu: pktptr is NULL\n");
- return;
- }
- if (!pfile) {
- pr_err("8188eu: pfile is NULL\n");
- return;
- }
- pfile->pkt = pktptr;
- pfile->cur_addr = pktptr->data;
- pfile->buf_start = pktptr->data;
- pfile->pkt_len = pktptr->len;
- pfile->buf_len = pktptr->len;
-
- pfile->cur_buffer = pfile->buf_start;
-
-}
-
-uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
-{
- uint len = 0;
-
- len = rtw_remainder_len(pfile);
- len = (rlen > len) ? len : rlen;
-
- if (rmem)
- skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
-
- pfile->cur_addr += len;
- pfile->pkt_len -= len;
-
- return len;
-}
-
-bool rtw_endofpktfile(struct pkt_file *pfile)
-{
-
- if (pfile->pkt_len == 0) {
-
- return true;
- }
-
- return false;
-}
-
-int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
-{
- pxmitbuf->pallocated_buf = kzalloc(alloc_sz, GFP_KERNEL);
- if (!pxmitbuf->pallocated_buf)
- return _FAIL;
-
- pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
- pxmitbuf->dma_transfer_addr = 0;
-
- pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!pxmitbuf->pxmit_urb)
- return _FAIL;
-
- return _SUCCESS;
-}
-
-void rtw_os_xmit_resource_free(struct adapter *padapter,
- struct xmit_buf *pxmitbuf, u32 free_sz)
-{
- usb_free_urb(pxmitbuf->pxmit_urb);
-
- kfree(pxmitbuf->pallocated_buf);
-}
-
-#define WMM_XMIT_THRESHOLD (NR_XMITFRAME * 2 / 5)
-
-void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
-{
- u16 queue;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
- (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
- netif_wake_subqueue(padapter->pnetdev, queue);
- } else {
- if (__netif_subqueue_stopped(padapter->pnetdev, queue))
- netif_wake_subqueue(padapter->pnetdev, queue);
- }
-
- dev_kfree_skb_any(pkt);
-}
-
-void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
-{
- if (pxframe->pkt)
- rtw_os_pkt_complete(padapter, pxframe->pkt);
- pxframe->pkt = NULL;
-}
-
-void rtw_os_xmit_schedule(struct adapter *padapter)
-{
- struct xmit_priv *pxmitpriv;
-
- if (!padapter)
- return;
-
- pxmitpriv = &padapter->xmitpriv;
-
- spin_lock_bh(&pxmitpriv->lock);
-
- if (rtw_txframes_pending(padapter))
- tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
- spin_unlock_bh(&pxmitpriv->lock);
-}
-
-static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
-{
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- u16 queue;
-
- queue = skb_get_queue_mapping(pkt);
- if (padapter->registrypriv.wifi_spec) {
- /* No free space for Tx, tx_worker is too slow */
- if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
- netif_stop_subqueue(padapter->pnetdev, queue);
- } else {
- if (pxmitpriv->free_xmitframe_cnt <= 4) {
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
- netif_stop_subqueue(padapter->pnetdev, queue);
- }
- }
-}
-
-static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct list_head *phead, *plist;
- struct sk_buff *newskb;
- struct sta_info *psta = NULL;
- s32 res;
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* free sta asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- plist = plist->next;
-
- /* avoid come from STA1 and send back STA1 */
- if (!memcmp(psta->hwaddr, &skb->data[6], 6))
- continue;
-
- newskb = skb_copy(skb, GFP_ATOMIC);
-
- if (newskb) {
- memcpy(newskb->data, psta->hwaddr, 6);
- res = rtw_xmit(padapter, &newskb);
- if (res < 0) {
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(newskb);
- } else {
- pxmitpriv->tx_pkts++;
- }
- } else {
- pxmitpriv->tx_drop++;
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- return false; /* Caller shall tx this multicast frame via normal way. */
- }
- }
-
- spin_unlock_bh(&pstapriv->asoc_list_lock);
- dev_kfree_skb_any(skb);
- return true;
-}
-
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
-{
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- s32 res = 0;
-
- if (!rtw_if_up(padapter))
- goto drop_packet;
-
- rtw_check_xmit_resource(padapter, pkt);
-
- if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
- (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
- (padapter->registrypriv.wifi_spec == 0)) {
- if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
- res = rtw_mlcst2unicst(padapter, pkt);
- if (res)
- goto exit;
- }
- }
-
- res = rtw_xmit(padapter, &pkt);
- if (res < 0)
- goto drop_packet;
-
- pxmitpriv->tx_pkts++;
- goto exit;
-
-drop_packet:
- pxmitpriv->tx_drop++;
- dev_kfree_skb_any(pkt);
-
-exit:
-
- return 0;
-}
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 39f5a6a7346a..e06c189b4ce4 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -10,6 +10,9 @@ config RTLLIB
If unsure, say N.
+ This driver adds support for rtllib wireless cards.
+ Only the rtl8192e is supported as of now.
+
if RTLLIB
config RTLLIB_CRYPTO_CCMP
@@ -23,6 +26,8 @@ config RTLLIB_CRYPTO_CCMP
CCMP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the CCM mode Protocol crypto driver for
+ use in wireless cards (including rtllib cards).
config RTLLIB_CRYPTO_TKIP
tristate "Support for rtllib TKIP crypto"
@@ -35,6 +40,8 @@ config RTLLIB_CRYPTO_TKIP
TKIP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the Temporal Key Integrity Protocol for
+ the IEEE 802.11i standard for use on wireless cards.
config RTLLIB_CRYPTO_WEP
tristate "Support for rtllib WEP crypto"
@@ -42,9 +49,12 @@ config RTLLIB_CRYPTO_WEP
depends on RTLLIB
default y
help
- TKIP crypto driver for rtllib.
+ WEP crypto driver for rtllib.
If you enabled RTLLIB, you want this.
+ Adds support for the (now weak) Wired Equivalent Privacy
+ (WEP) crypto protocol for wireless cards.
+ NOTE: This protocol is now considered insecure.
source "drivers/staging/rtl8192e/rtl8192e/Kconfig"
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 4abec7b42993..ab2e9b729883 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -10,7 +10,7 @@
#include "r8190P_rtl8256.h"
void rtl92e_set_bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth)
+ enum ht_channel_width bandwidth)
{
u8 eRFPath;
struct r8192_priv *priv = rtllib_priv(dev);
@@ -25,7 +25,7 @@ void rtl92e_set_bandwidth(struct net_device *dev,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
- switch (Bandwidth) {
+ switch (bandwidth) {
case HT_CHANNEL_WIDTH_20:
rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
0x0b, bMask12Bits, 0x100);
@@ -44,7 +44,7 @@ void rtl92e_set_bandwidth(struct net_device *dev,
break;
default:
netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
- __func__, Bandwidth);
+ __func__, bandwidth);
break;
}
}
@@ -115,10 +115,6 @@ bool rtl92e_config_rf(struct net_device *dev)
(enum rf90_radio_path)eRFPath,
RegOffSetToBeCheck,
bMask12Bits);
- RT_TRACE(COMP_RF,
- "RF %d %d register final value: %x\n",
- eRFPath, RegOffSetToBeCheck,
- RF3_Final_Value);
RetryTimes--;
}
@@ -142,8 +138,6 @@ bool rtl92e_config_rf(struct net_device *dev)
goto fail;
}
}
-
- RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
return true;
fail:
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 4cb483f1a152..3c52e2b43095 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -9,7 +9,7 @@
#define RTL819X_TOTAL_RF_PATH 2
void rtl92e_set_bandwidth(struct net_device *dev,
- enum ht_channel_width Bandwidth);
+ enum ht_channel_width bandwidth);
bool rtl92e_config_rf(struct net_device *dev);
void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel);
void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index cd8bbc358d01..8bf06f736ffb 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -21,8 +21,6 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
- RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, len);
-
do {
if ((len - frag_offset) > CMDPACKET_FRAG_SIZE) {
frag_length = CMDPACKET_FRAG_SIZE;
@@ -61,8 +59,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
tcb_desc->txbuf_size = frag_length;
}
- seg_ptr = skb_put(skb, frag_length);
- memcpy(seg_ptr, data, (u32)frag_length);
+ skb_put_data(skb, data, frag_length);
if (type == DESC_PACKET_TYPE_INIT &&
(!priv->rtllib->check_nic_enough_desc(dev, TXCMD_QUEUE) ||
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 4b9249195b5a..18e4e5d84878 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -186,8 +186,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
AC_PARAM_ECW_MIN_OFFSET) |
(((u32)u1bAIFS) << AC_PARAM_AIFS_OFFSET);
- RT_TRACE(COMP_DBG, "%s():HW_VAR_AC_PARAM eACI:%x:%x\n",
- __func__, eACI, u4bAcParam);
switch (eACI) {
case AC1_BK:
rtl92e_writel(dev, EDCAPARA_BK, u4bAcParam);
@@ -226,8 +224,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
u8 acm = pAciAifsn->f.acm;
u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
- RT_TRACE(COMP_DBG, "===========>%s():HW_VAR_ACM_CTRL:%x\n",
- __func__, eACI);
AcmCtrl = AcmCtrl | ((priv->AcmMethod == 2) ? 0x0 : 0x1);
if (acm) {
@@ -243,12 +239,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
case AC3_VO:
AcmCtrl |= AcmHw_VoqEn;
break;
-
- default:
- RT_TRACE(COMP_QOS,
- "SetHwReg8185(): [HW_VAR_ACM_CTRL] acm set failed: eACI is %d\n",
- eACI);
- break;
}
} else {
switch (eACI) {
@@ -268,10 +258,6 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
break;
}
}
-
- RT_TRACE(COMP_QOS,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- AcmCtrl);
rtl92e_writeb(dev, AcmHwCtrl, AcmCtrl);
break;
}
@@ -304,8 +290,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
u16 i, usValue, IC_Version;
u16 EEPROMId;
- RT_TRACE(COMP_INIT, "====> %s\n", __func__);
-
EEPROMId = rtl92e_eeprom_read(dev, 0);
if (EEPROMId != RTL8190_EEPROM_ID) {
netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
@@ -329,8 +313,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
ICVer8192 = IC_Version & 0xf;
ICVer8256 = (IC_Version & 0xf0)>>4;
- RT_TRACE(COMP_INIT, "\nICVer8192 = 0x%x\n", ICVer8192);
- RT_TRACE(COMP_INIT, "\nICVer8256 = 0x%x\n", ICVer8256);
if (ICVer8192 == 0x2) {
if (ICVer8256 == 0x5)
priv->card_8192_version = VERSION_8190_BE;
@@ -343,22 +325,14 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->card_8192_version = VERSION_8190_BD;
break;
}
- RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n",
- priv->card_8192_version);
} else {
priv->card_8192_version = VERSION_8190_BD;
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
priv->eeprom_CustomerID = 0;
priv->eeprom_ChannelPlan = 0;
- RT_TRACE(COMP_INIT, "\nIC Version = 0x%x\n", 0xff);
}
- RT_TRACE(COMP_INIT, "EEPROM VID = 0x%4x\n", priv->eeprom_vid);
- RT_TRACE(COMP_INIT, "EEPROM DID = 0x%4x\n", priv->eeprom_did);
- RT_TRACE(COMP_INIT, "EEPROM Customer ID: 0x%2x\n",
- priv->eeprom_CustomerID);
-
if (!priv->AutoloadFailFlag) {
u8 addr[ETH_ALEN];
@@ -372,9 +346,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
eth_hw_addr_set(dev, bMac_Tmp_Addr);
}
- RT_TRACE(COMP_INIT, "Permanent Address = %pM\n",
- dev->dev_addr);
-
if (priv->card_8192_version > VERSION_8190_BD)
priv->bTXPowerDataReadFromEEPORM = true;
else
@@ -395,8 +366,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
} else {
priv->EEPROMLegacyHTTxPowerDiff = 0x04;
}
- RT_TRACE(COMP_INIT, "EEPROMLegacyHTTxPowerDiff = %d\n",
- priv->EEPROMLegacyHTTxPowerDiff);
if (!priv->AutoloadFailFlag)
priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
@@ -404,8 +373,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
0xff00) >> 8;
else
priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- RT_TRACE(COMP_INIT, "ThermalMeter = %d\n",
- priv->EEPROMThermalMeter);
priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
if (priv->epromtype == EEPROM_93C46) {
@@ -421,10 +388,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->EEPROMCrystalCap =
EEPROM_Default_TxPwDiff_CrystalCap;
}
- RT_TRACE(COMP_INIT, "EEPROMAntPwDiff = %d\n",
- priv->EEPROMAntPwDiff);
- RT_TRACE(COMP_INIT, "EEPROMCrystalCap = %d\n",
- priv->EEPROMCrystalCap);
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
@@ -434,12 +397,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelCCK[i])) =
usValue;
- RT_TRACE(COMP_INIT,
- "CCK Tx Power Level, Index %d = 0x%02x\n",
- i, priv->EEPROMTxPowerLevelCCK[i]);
- RT_TRACE(COMP_INIT,
- "CCK Tx Power Level, Index %d = 0x%02x\n",
- i+1, priv->EEPROMTxPowerLevelCCK[i+1]);
}
for (i = 0; i < 14; i += 2) {
if (!priv->AutoloadFailFlag)
@@ -449,13 +406,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
usValue = EEPROM_Default_TxPower;
*((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[i]))
= usValue;
- RT_TRACE(COMP_INIT,
- "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n",
- i, priv->EEPROMTxPowerLevelOFDM24G[i]);
- RT_TRACE(COMP_INIT,
- "OFDM 2.4G Tx Power Level, Index %d = 0x%02x\n",
- i + 1,
- priv->EEPROMTxPowerLevelOFDM24G[i+1]);
}
}
if (priv->epromtype == EEPROM_93C46) {
@@ -508,22 +458,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->TxPowerLevelOFDM24G_C[i] =
priv->EEPROMRfCOfdmChnlTxPwLevel[2];
}
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelCCK_A[%d] = 0x%x\n",
- i, priv->TxPowerLevelCCK_A[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelOFDM24G_A[%d] = 0x%x\n",
- i, priv->TxPowerLevelOFDM24G_A[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelCCK_C[%d] = 0x%x\n",
- i, priv->TxPowerLevelCCK_C[i]);
- for (i = 0; i < 14; i++)
- RT_TRACE(COMP_INIT,
- "priv->TxPowerLevelOFDM24G_C[%d] = 0x%x\n",
- i, priv->TxPowerLevelOFDM24G_C[i]);
priv->LegacyHTTxPowerDiff =
priv->EEPROMLegacyHTTxPowerDiff;
priv->AntennaTxPwDiff[0] = 0;
@@ -536,13 +470,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
}
}
- if (priv->rf_type == RF_1T2R) {
- /* no matter what checkpatch says, the braces are needed */
- RT_TRACE(COMP_INIT, "\n1T2R config\n");
- } else if (priv->rf_type == RF_2T4R) {
- RT_TRACE(COMP_INIT, "\n2T4R config\n");
- }
-
rtl92e_init_adaptive_rate(dev);
priv->rf_chip = RF_8256;
@@ -574,8 +501,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
else
priv->ChannelPlan = 0x0;
- RT_TRACE(COMP_INIT, "Toshiba ChannelPlan = 0x%x\n",
- priv->ChannelPlan);
break;
case EEPROM_CID_Nettronix:
priv->ScanDelay = 100;
@@ -602,10 +527,6 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->rtllib->bSupportRemoteWakeUp = true;
else
priv->rtllib->bSupportRemoteWakeUp = false;
-
- RT_TRACE(COMP_INIT, "RegChannelPlan(%d)\n", priv->RegChannelPlan);
- RT_TRACE(COMP_INIT, "ChannelPlan = %d\n", priv->ChannelPlan);
- RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
}
void rtl92e_get_eeprom_size(struct net_device *dev)
@@ -613,14 +534,9 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
u16 curCR;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
curCR = rtl92e_readw(dev, EPROM_CMD);
- RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
- curCR);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
EEPROM_93C46;
- RT_TRACE(COMP_INIT, "<===========%s(), epromtype:%d\n", __func__,
- priv->epromtype);
_rtl92e_read_eeprom_info(dev);
}
@@ -697,7 +613,6 @@ bool rtl92e_start_adapter(struct net_device *dev)
int i = 0;
u32 retry_times = 0;
- RT_TRACE(COMP_INIT, "====>%s()\n", __func__);
priv->being_init_adapter = true;
start:
@@ -710,7 +625,7 @@ start:
priv->pFirmware->status = FW_STATUS_0_INIT;
if (priv->RegRfOff)
- priv->rtllib->eRFPowerState = eRfOff;
+ priv->rtllib->rf_power_state = rf_off;
ulRegRead = rtl92e_readl(dev, CPU_GEN);
if (priv->pFirmware->status == FW_STATUS_0_INIT)
@@ -732,13 +647,11 @@ start:
rtl92e_writeb(dev, SWREGULATOR, 0xb8);
}
}
- RT_TRACE(COMP_INIT, "BB Config Start!\n");
rtStatus = rtl92e_config_bb(dev);
if (!rtStatus) {
netdev_warn(dev, "%s(): Failed to configure BB\n", __func__);
return rtStatus;
}
- RT_TRACE(COMP_INIT, "BB Config Finished!\n");
priv->LoopbackMode = RTL819X_NO_LOOPBACK;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
@@ -818,19 +731,7 @@ start:
tmpvalue = rtl92e_readb(dev, IC_VERRSION);
priv->IC_Cut = tmpvalue;
- RT_TRACE(COMP_INIT, "priv->IC_Cut= 0x%x\n", priv->IC_Cut);
- if (priv->IC_Cut >= IC_VersionCut_D) {
- if (priv->IC_Cut == IC_VersionCut_D) {
- /* no matter what checkpatch says, braces are needed */
- RT_TRACE(COMP_INIT, "D-cut\n");
- } else if (priv->IC_Cut == IC_VersionCut_E) {
- RT_TRACE(COMP_INIT, "E-cut\n");
- }
- } else {
- RT_TRACE(COMP_INIT, "Before C-cut\n");
- }
- RT_TRACE(COMP_INIT, "Load Firmware!\n");
bfirmwareok = rtl92e_init_fw(dev);
if (!bfirmwareok) {
if (retry_times < 10) {
@@ -841,15 +742,13 @@ start:
goto end;
}
}
- RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
+
if (priv->ResetProgress == RESET_TYPE_NORESET) {
- RT_TRACE(COMP_INIT, "RF Config Started!\n");
rtStatus = rtl92e_config_phy(dev);
if (!rtStatus) {
netdev_info(dev, "RF Config failed\n");
return rtStatus;
}
- RT_TRACE(COMP_INIT, "RF Config Finished!\n");
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
@@ -858,25 +757,14 @@ start:
rtl92e_writeb(dev, 0x87, 0x0);
if (priv->RegRfOff) {
- RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
- "%s(): Turn off RF for RegRfOff ----------\n",
- __func__);
- rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_SW);
- } else if (priv->rtllib->RfOffReason > RF_CHANGE_BY_PS) {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
- "%s(): Turn off RF for RfOffReason(%d) ----------\n",
- __func__, priv->rtllib->RfOffReason);
- rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
- } else if (priv->rtllib->RfOffReason >= RF_CHANGE_BY_IPS) {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
- "%s(): Turn off RF for RfOffReason(%d) ----------\n",
- __func__, priv->rtllib->RfOffReason);
- rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
+ rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_SW);
+ } else if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_PS) {
+ rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
+ } else if (priv->rtllib->rf_off_reason >= RF_CHANGE_BY_IPS) {
+ rtl92e_set_rf_state(dev, rf_off, priv->rtllib->rf_off_reason);
} else {
- RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON\n",
- __func__);
- priv->rtllib->eRFPowerState = eRfOn;
- priv->rtllib->RfOffReason = 0;
+ priv->rtllib->rf_power_state = rf_on;
+ priv->rtllib->rf_off_reason = 0;
}
if (priv->rtllib->FwRWRF)
@@ -915,18 +803,6 @@ start:
priv->CCKPresentAttentuation_difference = 0;
priv->CCKPresentAttentuation =
priv->CCKPresentAttentuation_20Mdefault;
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_initial = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real__initial = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference_initial = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_initial = %d\n",
- priv->CCKPresentAttentuation);
priv->btxpower_tracking = false;
}
}
@@ -946,7 +822,7 @@ static void _rtl92e_net_update(struct net_device *dev)
net = &priv->rtllib->current_network;
rtl92e_config_rate(dev, &rate_config);
- priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
@@ -1237,7 +1113,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
static u8 tmp;
if (!tmp) {
- RT_TRACE(COMP_DBG, "==>================hw sec\n");
tmp = 1;
}
switch (priv->rtllib->pairwise_key_type) {
@@ -1350,12 +1225,6 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
case DESC90_RATE54M:
ret_rate = MGN_54M;
break;
-
- default:
- RT_TRACE(COMP_RECV,
- "%s: Non supportedRate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
}
} else {
@@ -1411,12 +1280,6 @@ static u8 _rtl92e_rate_hw_to_mgn(bool bIsHT, u8 rate)
case DESC90_RATEMCS32:
ret_rate = 0x80 | 0x20;
break;
-
- default:
- RT_TRACE(COMP_RECV,
- "%s: Non supported Rate [%x], bIsHT = %d!!!\n",
- __func__, rate, bIsHT);
- break;
}
}
@@ -1721,9 +1584,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
if (!rtl92e_is_legal_rf_path(priv->rtllib->dev, rfpath))
continue;
- RT_TRACE(COMP_DBG,
- "Jacken -> pPreviousstats->RxMIMOSignalStrength[rfpath] = %d\n",
- prev_st->RxMIMOSignalStrength[rfpath]);
if (priv->stats.rx_rssi_percentage[rfpath] == 0) {
priv->stats.rx_rssi_percentage[rfpath] =
prev_st->RxMIMOSignalStrength[rfpath];
@@ -1745,9 +1605,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(prev_st->RxMIMOSignalStrength[rfpath])) /
(RX_SMOOTH);
}
- RT_TRACE(COMP_DBG,
- "Jacken -> priv->RxStats.RxRSSIPercentage[rfPath] = %d\n",
- priv->stats.rx_rssi_percentage[rfpath]);
}
}
@@ -1772,11 +1629,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
}
-
- RT_TRACE(COMP_RXDESC, "Smooth %s PWDB = %d\n",
- prev_st->bIsCCK ? "CCK" : "OFDM",
- prev_st->RxPWDBAll);
-
if (prev_st->bPacketToSelf || prev_st->bPacketBeacon ||
prev_st->bToSelfBA) {
if (priv->undecorated_smoothed_pwdb < 0)
@@ -2052,11 +1904,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->RxIs40MHzPacket = pDrvInfo->BW;
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
-
- if (pDrvInfo->FirstAGGR == 1 || pDrvInfo->PartAggr == 1)
- RT_TRACE(COMP_RXDESC,
- "pDrvInfo->FirstAGGR = %d, pDrvInfo->PartAggr = %d\n",
- pDrvInfo->FirstAGGR, pDrvInfo->PartAggr);
skb_trim(skb, skb->len - 4/*sCrcLng*/);
@@ -2138,7 +1985,7 @@ void rtl92e_update_ratr_table(struct net_device *dev)
break;
case IEEE_N_24G:
case IEEE_N_5G:
- if (ieee->pHTInfo->PeerMimoPs == 0) {
+ if (ieee->pHTInfo->peer_mimo_ps == 0) {
ratr_value &= 0x0007F007;
} else {
if (priv->rf_type == RF_1T2R)
@@ -2151,10 +1998,10 @@ void rtl92e_update_ratr_table(struct net_device *dev)
break;
}
ratr_value &= 0x0FFFFFFF;
- if (ieee->pHTInfo->bCurTxBW40MHz &&
+ if (ieee->pHTInfo->cur_tx_bw40mhz &&
ieee->pHTInfo->bCurShortGI40MHz)
ratr_value |= 0x80000000;
- else if (!ieee->pHTInfo->bCurTxBW40MHz &&
+ else if (!ieee->pHTInfo->cur_tx_bw40mhz &&
ieee->pHTInfo->bCurShortGI20MHz)
ratr_value |= 0x80000000;
rtl92e_writel(dev, RATR0+rate_index*4, ratr_value);
@@ -2261,9 +2108,6 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
u8 i;
u8 SilentResetRxSoltNum = 4;
- RT_TRACE(COMP_RESET, "%s(): RegRxCounter is %d, RxCounter is %d\n",
- __func__, RegRxCounter, priv->RxCounter);
-
rx_chk_cnt++;
if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5)) {
rx_chk_cnt = 0;
@@ -2321,9 +2165,6 @@ bool rtl92e_is_tx_stuck(struct net_device *dev)
bool bStuck = false;
u16 RegTxCounter = rtl92e_readw(dev, 0x128);
- RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n",
- __func__, RegTxCounter, priv->TxCounter);
-
if (priv->TxCounter == RegTxCounter)
bStuck = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 38110fa4f36d..789d288d7503 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -77,10 +77,6 @@ static bool _rtl92e_fw_check_ready(struct net_device *dev,
rt_status = _rtl92e_wait_for_fw(dev, CPU_GEN_FIRM_RDY, 20);
if (rt_status)
pfirmware->status = FW_STATUS_5_READY;
- else
- RT_TRACE(COMP_FIRMWARE,
- "_rtl92e_is_fw_ready fail(%d)!\n",
- rt_status);
break;
default:
rt_status = false;
@@ -149,9 +145,6 @@ bool rtl92e_init_fw(struct net_device *dev)
} else if (pfirmware->status == FW_STATUS_5_READY) {
rst_opt = OPT_FIRMWARE_RESET;
starting_state = FW_INIT_STEP2_DATA;
- } else {
- RT_TRACE(COMP_FIRMWARE,
- "PlatformInitFirmware: undefined firmware state\n");
}
for (i = starting_state; i <= FW_INIT_STEP2_DATA; i++) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index f92551094738..1b592258e640 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -117,8 +117,6 @@ static u32 _rtl92e_phy_rf_read(struct net_device *dev,
} else
NewOffset = Offset;
} else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
NewOffset = Offset;
}
rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
@@ -173,8 +171,6 @@ static void _rtl92e_phy_rf_write(struct net_device *dev,
} else
NewOffset = Offset;
} else {
- RT_TRACE((COMP_PHY|COMP_ERR),
- "check RF type here, need to be 8256\n");
NewOffset = Offset;
}
@@ -204,10 +200,9 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return;
- if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return;
- RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n");
if (priv->Rf_Mode == RF_OP_By_FW) {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath,
@@ -242,7 +237,7 @@ u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
return 0;
- if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
+ if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return 0;
mutex_lock(&priv->rf_mutex);
if (priv->Rf_Mode == RF_OP_By_FW) {
@@ -312,19 +307,14 @@ void rtl92e_config_mac(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bTXPowerDataReadFromEEPORM) {
- RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n");
dwArrayLen = MACPHY_Array_PGLength;
pdwArray = Rtl819XMACPHY_Array_PG;
} else {
- RT_TRACE(COMP_PHY, "Read rtl819XMACPHY_Array\n");
dwArrayLen = MACPHY_ArrayLength;
pdwArray = Rtl819XMACPHY_Array;
}
for (i = 0; i < dwArrayLen; i += 3) {
- RT_TRACE(COMP_DBG,
- "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n",
- pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
if (pdwArray[i] == 0x318)
pdwArray[i+2] = 0x00000800;
rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i+1],
@@ -357,20 +347,12 @@ static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
bMaskDWord,
Rtl819XPHY_REGArray_Table[i+1]);
- RT_TRACE(COMP_DBG,
- "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",
- i, Rtl819XPHY_REGArray_Table[i],
- Rtl819XPHY_REGArray_Table[i+1]);
}
} else if (ConfigType == BaseBand_Config_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
bMaskDWord,
Rtl819XAGCTAB_Array_Table[i+1]);
- RT_TRACE(COMP_DBG,
- "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x\n",
- i, Rtl819XAGCTAB_Array_Table[i],
- Rtl819XAGCTAB_Array_Table[i+1]);
}
}
}
@@ -478,8 +460,6 @@ bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
WriteAddr[HW90_BLOCK_PHY0] = 0x900;
WriteAddr[HW90_BLOCK_PHY1] = 0x800;
WriteAddr[HW90_BLOCK_RF] = 0x3;
- RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __func__,
- CheckBlock);
if (CheckBlock == HW90_BLOCK_MAC) {
netdev_warn(dev, "%s(): No checks available for MAC block.\n",
@@ -543,9 +523,6 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
(enum hw90_block)eCheckItem,
(enum rf90_radio_path)0);
if (!rtStatus) {
- RT_TRACE((COMP_ERR | COMP_PHY),
- "rtl92e_config_rf():Check PHY%d Fail!!\n",
- eCheckItem-1);
return rtStatus;
}
}
@@ -602,15 +579,9 @@ void rtl92e_get_tx_power(struct net_device *dev)
priv->DefaultInitialGain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
priv->DefaultInitialGain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
priv->DefaultInitialGain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
- RT_TRACE(COMP_INIT,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
- priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
priv->framesyncC34 = rtl92e_readl(dev, rOFDM0_RxDetector2);
- RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
- rOFDM0_RxDetector3, priv->framesync);
priv->SifsTime = rtl92e_readw(dev, SIFS);
}
@@ -813,9 +784,6 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
struct sw_chnl_cmd *CurrentCmd = NULL;
u8 eRFPath;
- RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
- __func__, *stage, *step, channel);
-
if (!rtllib_legal_channel(priv->rtllib, channel)) {
netdev_err(dev, "Invalid channel requested: %d\n", channel);
return true;
@@ -976,21 +944,13 @@ static void _rtl92e_phy_switch_channel_work_item(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n");
-
- RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __func__,
- priv->chan, priv);
-
_rtl92e_phy_switch_channel(dev, priv->chan);
-
- RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
}
u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_PHY, "=====>%s()\n", __func__);
if (!priv->up) {
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return false;
@@ -1060,10 +1020,6 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
if (priv->CCKPresentAttentuation < 0)
priv->CCKPresentAttentuation = 0;
- RT_TRACE(COMP_POWER_TRACKING,
- "20M, priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
-
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
@@ -1082,9 +1038,6 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
priv->CCKPresentAttentuation_40Mdefault +
priv->CCKPresentAttentuation_difference;
- RT_TRACE(COMP_POWER_TRACKING,
- "40M, priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
if (priv->CCKPresentAttentuation >
(CCKTxBBGainTableLength - 1))
priv->CCKPresentAttentuation =
@@ -1123,16 +1076,10 @@ static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev)
if (priv->Record_CCK_20Mindex == 0)
priv->Record_CCK_20Mindex = 6;
priv->CCK_index = priv->Record_CCK_20Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "20MHz, %s,CCK_index = %d\n", __func__,
- priv->CCK_index);
break;
case HT_CHANNEL_WIDTH_20_40:
priv->CCK_index = priv->Record_CCK_40Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "40MHz, %s, CCK_index = %d\n", __func__,
- priv->CCK_index);
break;
}
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
@@ -1154,12 +1101,6 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
u8 regBwOpMode;
- RT_TRACE(COMP_SWBW,
- "==>%s Switch to %s bandwidth\n", __func__,
- priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
-
-
if (priv->rf_chip == RF_PSEUDO_11N) {
priv->SetBWModeInProgress = false;
return;
@@ -1251,11 +1192,9 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
atomic_dec(&(priv->rtllib->atm_swbw));
priv->SetBWModeInProgress = false;
-
- RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()");
}
-void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
+void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1267,7 +1206,7 @@ void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
atomic_inc(&(priv->rtllib->atm_swbw));
priv->SetBWModeInProgress = true;
- priv->CurrentChannelBW = Bandwidth;
+ priv->CurrentChannelBW = bandwidth;
if (Offset == HT_EXTCHNL_OFFSET_LOWER)
priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
@@ -1291,8 +1230,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
if (priv->up) {
switch (Operation) {
case IG_Backup:
- RT_TRACE(COMP_SCAN,
- "IG_Backup, backup the initial gain.\n");
initial_gain = SCAN_RX_INITIAL_GAIN;
BitMask = bMaskByte0;
if (dm_digtable.dig_algorithm ==
@@ -1314,35 +1251,13 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
rCCK0_CCA, BitMask);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan InitialGainBackup 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
- RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
- initial_gain);
rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
- RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
- POWER_DETECTION_TH);
rtl92e_writeb(dev, 0xa0a, POWER_DETECTION_TH);
break;
case IG_Restore:
- RT_TRACE(COMP_SCAN,
- "IG_Restore, restore the initial gain.\n");
BitMask = 0x7f;
if (dm_digtable.dig_algorithm ==
DIG_ALGO_BY_FALSE_ALARM)
@@ -1360,22 +1275,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
(u32)priv->initgain_backup.cca);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_SCAN,
- "Scan BBInitialGainRestore 0xa0a is %x\n",
- priv->initgain_backup.cca);
-
rtl92e_set_tx_power(dev,
priv->rtllib->current_network.channel);
@@ -1383,9 +1282,6 @@ void rtl92e_init_gain(struct net_device *dev, u8 Operation)
DIG_ALGO_BY_FALSE_ALARM)
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
break;
- default:
- RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
- break;
}
}
}
@@ -1405,7 +1301,7 @@ void rtl92e_set_rf_off(struct net_device *dev)
}
static bool _rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState)
+ enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -1416,15 +1312,13 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
if (priv->SetRFPowerStateInProgress)
return false;
- RT_TRACE(COMP_PS, "===========> %s!\n", __func__);
priv->SetRFPowerStateInProgress = true;
switch (priv->rf_chip) {
case RF_8256:
- switch (eRFPowerState) {
- case eRfOn:
- RT_TRACE(COMP_PS, "%s eRfOn!\n", __func__);
- if ((priv->rtllib->eRFPowerState == eRfOff) &&
+ switch (rf_power_state) {
+ case rf_on:
+ if ((priv->rtllib->rf_power_state == rf_off) &&
RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus;
u32 InitilizeCount = 3;
@@ -1469,8 +1363,8 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
break;
- case eRfSleep:
- if (priv->rtllib->eRFPowerState == eRfOff)
+ case rf_sleep:
+ if (priv->rtllib->rf_power_state == rf_off)
break;
@@ -1481,25 +1375,18 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
QueueID++;
continue;
} else {
- RT_TRACE((COMP_POWER|COMP_RF),
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 before doze!\n",
- (i+1), QueueID);
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(COMP_POWER, "\n\n\n TimeOut!! %s: eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
- __func__, MAX_DOZE_WAITING_TIMES_9x, QueueID);
break;
}
}
rtl92e_set_rf_off(dev);
break;
- case eRfOff:
- RT_TRACE(COMP_PS, "%s eRfOff/Sleep !\n", __func__);
-
+ case rf_off:
for (QueueID = 0, i = 0; QueueID < MAX_TX_QUEUE; ) {
ring = &priv->tx_ring[QueueID];
@@ -1507,18 +1394,11 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
QueueID++;
continue;
} else {
- RT_TRACE(COMP_POWER,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 before doze!\n",
- (i+1), QueueID);
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(COMP_POWER,
- "\n\n\n SetZebra: RFPowerState8185B(): eRfOff: %d times TcbBusyQueue[%d] != 0 !!!\n",
- MAX_DOZE_WAITING_TIMES_9x,
- QueueID);
break;
}
}
@@ -1538,7 +1418,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
bResult = false;
netdev_warn(dev,
"%s(): Unknown state requested: 0x%X.\n",
- __func__, eRFPowerState);
+ __func__, rf_power_state);
break;
}
@@ -1550,7 +1430,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
if (bResult) {
- priv->rtllib->eRFPowerState = eRFPowerState;
+ priv->rtllib->rf_power_state = rf_power_state;
switch (priv->rf_chip) {
case RF_8256:
@@ -1563,30 +1443,22 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
priv->SetRFPowerStateInProgress = false;
- RT_TRACE(COMP_PS, "<=========== %s bResult = %d!\n", __func__, bResult);
return bResult;
}
bool rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState)
+ enum rt_rf_power_state rf_power_state)
{
struct r8192_priv *priv = rtllib_priv(dev);
bool bResult = false;
- RT_TRACE(COMP_PS,
- "---------> %s: eRFPowerState(%d)\n", __func__, eRFPowerState);
- if (eRFPowerState == priv->rtllib->eRFPowerState &&
+ if (rf_power_state == priv->rtllib->rf_power_state &&
priv->bHwRfOffAction == 0) {
- RT_TRACE(COMP_PS, "<--------- %s: discard the request for eRFPowerState(%d) is the same.\n",
- __func__, eRFPowerState);
return bResult;
}
- bResult = _rtl92e_set_rf_power_state(dev, eRFPowerState);
-
- RT_TRACE(COMP_PS, "<--------- %s: bResult(%d)\n", __func__, bResult);
-
+ bResult = _rtl92e_set_rf_power_state(dev, rf_power_state);
return bResult;
}
@@ -1603,10 +1475,6 @@ void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
case SCAN_OPT_RESTORE:
priv->rtllib->InitialGainHandler(dev, IG_Restore);
break;
-
- default:
- RT_TRACE(COMP_SCAN, "Unknown Scan Backup Operation.\n");
- break;
}
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 7c9148e033d8..75629f5df954 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -75,15 +75,14 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
void rtl92e_set_bw_mode(struct net_device *dev,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void rtl92e_init_gain(struct net_device *dev, u8 Operation);
void rtl92e_set_rf_off(struct net_device *dev);
bool rtl92e_set_rf_power_state(struct net_device *dev,
- enum rt_rf_power_state eRFPowerState);
-#define PHY_SetRFPowerState rtl92e_set_rf_power_state
+ enum rt_rf_power_state rf_power_state);
void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index d7630f02a910..41faeb4b9b9b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -42,14 +42,10 @@ void rtl92e_enable_hw_security_config(struct net_device *dev)
ieee->hwsec_active = 1;
- if ((ieee->pHTInfo->IOTAction&HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
+ if ((ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE) || !hwwep) {
ieee->hwsec_active = 0;
SECR_value &= ~SCR_RxDecEnable;
}
-
- RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
- __func__, ieee->hwsec_active, ieee->pairwise_key_type,
- SECR_value);
rtl92e_writeb(dev, SECR, SECR_value);
}
@@ -60,10 +56,6 @@ void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- RT_TRACE(COMP_DBG,
- "===========>%s():EntryNo is %d,KeyIndex is %d,KeyType is %d,is_mesh is %d\n",
- __func__, EntryNo, KeyIndex, KeyType, is_mesh);
-
if (EntryNo >= TOTAL_CAM_ENTRY)
return;
@@ -86,12 +78,12 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 usConfig = 0;
u8 i;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
@@ -107,10 +99,6 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
return;
}
- RT_TRACE(COMP_SEC,
- "====>to %s, dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
- __func__, dev, EntryNo, KeyIndex, KeyType, MacAddr);
-
if (DefaultKey)
usConfig |= BIT15 | (KeyType<<2);
else
@@ -144,7 +132,6 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
}
}
}
- RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
}
void rtl92e_cam_restore(struct net_device *dev)
@@ -163,9 +150,6 @@ void rtl92e_cam_restore(struct net_device *dev)
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
- RT_TRACE(COMP_SEC, "%s:\n", __func__);
-
-
if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->rtllib->pairwise_key_type == KEY_TYPE_WEP104)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index b9ce71848023..89bc989cffba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -25,7 +25,6 @@
int hwwep = 1;
static char *ifname = "wlan%d";
-
static const struct rtl819x_ops rtl819xp_ops = {
.nic_type = NIC_8192E,
.get_eeprom_size = rtl92e_get_eeprom_size,
@@ -44,8 +43,8 @@ static const struct rtl819x_ops rtl819xp_ops = {
.rx_enable = rtl92e_enable_rx,
.tx_enable = rtl92e_enable_tx,
.interrupt_recognized = rtl92e_ack_irq,
- .TxCheckStuckHandler = rtl92e_is_tx_stuck,
- .RxCheckStuckHandler = rtl92e_is_rx_stuck,
+ .tx_check_stuck_handler = rtl92e_is_tx_stuck,
+ .rx_check_stuck_handler = rtl92e_is_rx_stuck,
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
@@ -133,36 +132,27 @@ void rtl92e_writew(struct net_device *dev, int x, u16 y)
* -----------------------------GENERAL FUNCTION-------------------------
****************************************************************************/
bool rtl92e_set_rf_state(struct net_device *dev,
- enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource)
+ enum rt_rf_power_state state_to_set,
+ RT_RF_CHANGE_SOURCE change_source)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- bool bActionAllowed = false;
- bool bConnectBySSID = false;
- enum rt_rf_power_state rtState;
- u16 RFWaitCounter = 0;
+ bool action_allowed = false;
+ bool connect_by_ssid = false;
+ enum rt_rf_power_state rt_state;
+ u16 rf_wait_counter = 0;
unsigned long flag;
- RT_TRACE((COMP_PS | COMP_RF),
- "===>%s: StateToSet(%d)\n", __func__, StateToSet);
-
while (true) {
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: RF Change in progress! Wait to set..StateToSet(%d).\n",
- __func__, StateToSet);
-
- while (priv->RFChangeInProgress) {
- RFWaitCounter++;
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Wait 1 ms (%d times)...\n",
- __func__, RFWaitCounter);
+
+ while (priv->rf_change_in_progress) {
+ rf_wait_counter++;
mdelay(1);
- if (RFWaitCounter > 100) {
+ if (rf_wait_counter > 100) {
netdev_warn(dev,
"%s(): Timeout waiting for RF change.\n",
__func__);
@@ -170,43 +160,37 @@ bool rtl92e_set_rf_state(struct net_device *dev,
}
}
} else {
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
break;
}
}
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
- switch (StateToSet) {
- case eRfOn:
- priv->rtllib->RfOffReason &= (~ChangeSource);
+ switch (state_to_set) {
+ case rf_on:
+ priv->rtllib->rf_off_reason &= (~change_source);
- if ((ChangeSource == RF_CHANGE_BY_HW) && priv->bHwRadioOff)
- priv->bHwRadioOff = false;
+ if ((change_source == RF_CHANGE_BY_HW) && priv->hw_radio_off)
+ priv->hw_radio_off = false;
- if (!priv->rtllib->RfOffReason) {
- priv->rtllib->RfOffReason = 0;
- bActionAllowed = true;
-
-
- if (rtState == eRfOff &&
- ChangeSource >= RF_CHANGE_BY_HW)
- bConnectBySSID = true;
- } else {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
- __func__, priv->rtllib->RfOffReason, ChangeSource);
- }
+ if (!priv->rtllib->rf_off_reason) {
+ priv->rtllib->rf_off_reason = 0;
+ action_allowed = true;
+ if (rt_state == rf_off &&
+ change_source >= RF_CHANGE_BY_HW)
+ connect_by_ssid = true;
+ }
break;
- case eRfOff:
+ case rf_off:
if ((priv->rtllib->iw_mode == IW_MODE_INFRA) ||
(priv->rtllib->iw_mode == IW_MODE_ADHOC)) {
- if ((priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) ||
- (ChangeSource > RF_CHANGE_BY_IPS)) {
+ if ((priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) ||
+ (change_source > RF_CHANGE_BY_IPS)) {
if (ieee->state == RTLLIB_LINKED)
priv->blinked_ingpio = true;
else
@@ -215,46 +199,36 @@ bool rtl92e_set_rf_state(struct net_device *dev,
WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
- if ((ChangeSource == RF_CHANGE_BY_HW) && !priv->bHwRadioOff)
- priv->bHwRadioOff = true;
- priv->rtllib->RfOffReason |= ChangeSource;
- bActionAllowed = true;
+ if ((change_source == RF_CHANGE_BY_HW) && !priv->hw_radio_off)
+ priv->hw_radio_off = true;
+ priv->rtllib->rf_off_reason |= change_source;
+ action_allowed = true;
break;
- case eRfSleep:
- priv->rtllib->RfOffReason |= ChangeSource;
- bActionAllowed = true;
+ case rf_sleep:
+ priv->rtllib->rf_off_reason |= change_source;
+ action_allowed = true;
break;
default:
break;
}
- if (bActionAllowed) {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
- __func__, StateToSet, priv->rtllib->RfOffReason);
- PHY_SetRFPowerState(dev, StateToSet);
- if (StateToSet == eRfOn) {
-
- if (bConnectBySSID && priv->blinked_ingpio) {
+ if (action_allowed) {
+ rtl92e_set_rf_power_state(dev, state_to_set);
+ if (state_to_set == rf_on) {
+ if (connect_by_ssid && priv->blinked_ingpio) {
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->blinked_ingpio = false;
}
}
- } else {
- RT_TRACE((COMP_PS | COMP_RF),
- "%s: Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
- __func__, StateToSet, ChangeSource, priv->rtllib->RfOffReason);
}
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-
- RT_TRACE((COMP_PS | COMP_RF), "<===%s\n", __func__);
- return bActionAllowed;
+ return action_allowed;
}
static short _rtl92e_check_nic_enough_desc(struct net_device *dev, int prio)
@@ -297,7 +271,6 @@ static void _rtl92e_set_chan(struct net_device *dev, short ch)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
if (priv->chan_forced)
return;
@@ -314,22 +287,16 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
bool ShortPreamble;
if (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- if (priv->dot11CurrentPreambleMode != PREAMBLE_SHORT) {
+ if (priv->dot11_current_preamble_mode != PREAMBLE_SHORT) {
ShortPreamble = true;
- priv->dot11CurrentPreambleMode = PREAMBLE_SHORT;
- RT_TRACE(COMP_DBG,
- "%s(): WLAN_CAPABILITY_SHORT_PREAMBLE\n",
- __func__);
+ priv->dot11_current_preamble_mode = PREAMBLE_SHORT;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
} else {
- if (priv->dot11CurrentPreambleMode != PREAMBLE_LONG) {
+ if (priv->dot11_current_preamble_mode != PREAMBLE_LONG) {
ShortPreamble = false;
- priv->dot11CurrentPreambleMode = PREAMBLE_LONG;
- RT_TRACE(COMP_DBG,
- "%s(): WLAN_CAPABILITY_LONG_PREAMBLE\n",
- __func__);
+ priv->dot11_current_preamble_mode = PREAMBLE_LONG;
priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
@@ -337,17 +304,17 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
if (net->mode & (IEEE_G | IEEE_N_24G)) {
u8 slot_time_val;
- u8 CurSlotTime = priv->slot_time;
+ u8 cur_slot_time = priv->slot_time;
if ((cap & WLAN_CAPABILITY_SHORT_SLOT_TIME) &&
- (!priv->rtllib->pHTInfo->bCurrentRT2RTLongSlotTime)) {
- if (CurSlotTime != SHORT_SLOT_TIME) {
+ (!priv->rtllib->pHTInfo->current_rt2rt_long_slot_time)) {
+ if (cur_slot_time != SHORT_SLOT_TIME) {
slot_time_val = SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
} else {
- if (CurSlotTime != NON_SHORT_SLOT_TIME) {
+ if (cur_slot_time != NON_SHORT_SLOT_TIME) {
slot_time_val = NON_SHORT_SLOT_TIME;
priv->rtllib->SetHwRegHandler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
@@ -374,7 +341,7 @@ static void _rtl92e_update_beacon(void *data)
if (ieee->pHTInfo->bCurrentHTSupport)
HT_update_self_and_peer_setting(ieee, net);
- ieee->pHTInfo->bCurrentRT2RTLongSlotTime = net->bssht.bd_rt2rt_long_slot_time;
+ ieee->pHTInfo->current_rt2rt_long_slot_time = net->bssht.bd_rt2rt_long_slot_time;
ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.rt2rt_ht_mode;
_rtl92e_update_cap(dev, net->capability);
}
@@ -389,13 +356,10 @@ static void _rtl92e_qos_activate(void *data)
mutex_lock(&priv->mutex);
if (priv->rtllib->state != RTLLIB_LINKED)
goto success;
- RT_TRACE(COMP_QOS,
- "qos active process with associate response received\n");
for (i = 0; i < QOS_QUEUE_NUM; i++)
priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
-
success:
mutex_unlock(&priv->mutex);
}
@@ -426,18 +390,14 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
network->qos_data.param_count;
priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS parameters change call qos_activate\n");
}
} else {
memcpy(&priv->rtllib->current_network.qos_data.parameters,
&def_qos_parameters, size);
- if ((network->qos_data.active == 1) && (active_network == 1)) {
+ if ((network->qos_data.active == 1) && (active_network == 1))
schedule_work(&priv->qos_activate);
- RT_TRACE(COMP_QOS,
- "QoS was disabled call qos_activate\n");
- }
+
network->qos_data.active = 0;
network->qos_data.supported = 0;
}
@@ -455,7 +415,6 @@ static int _rtl92e_handle_beacon(struct net_device *dev,
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
-
}
static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
@@ -496,8 +455,6 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
spin_unlock_irqrestore(&priv->rtllib->lock, flags);
- RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
- network->flags, priv->rtllib->current_network.qos_data.active);
if (set_qos_param == 1) {
rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
schedule_work(&priv->qos_activate);
@@ -716,15 +673,9 @@ void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
if ((wireless_mode == WIRELESS_MODE_N_24G) ||
(wireless_mode == WIRELESS_MODE_N_5G)) {
priv->rtllib->pHTInfo->bEnableHT = 1;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 1\n",
- __func__, wireless_mode);
} else {
priv->rtllib->pHTInfo->bEnableHT = 0;
- RT_TRACE(COMP_DBG, "%s(), wireless_mode:%x, bEnableHT = 0\n",
- __func__, wireless_mode);
}
-
- RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
_rtl92e_refresh_support_rate(priv);
}
@@ -742,7 +693,6 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
priv->rtllib->ieee_up = 1;
priv->up_first_time = 0;
- RT_TRACE(COMP_INIT, "Bringing up iface");
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
@@ -751,7 +701,6 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
return -1;
}
- RT_TRACE(COMP_INIT, "start adapter finished\n");
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
priv->bfirst_init = false;
@@ -790,7 +739,6 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
priv->up = 0;
priv->rtllib->ieee_up = 0;
priv->bfirst_after_down = true;
- RT_TRACE(COMP_DOWN, "==========>%s()\n", __func__);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
@@ -807,29 +755,25 @@ static int _rtl92e_sta_down(struct net_device *dev, bool shutdownrf)
rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- while (priv->RFChangeInProgress) {
+ while (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
if (RFInProgressTimeOut > 100) {
spin_lock_irqsave(&priv->rf_ps_lock, flags);
break;
}
- RT_TRACE(COMP_DBG,
- "===>%s():RF is in progress, need to wait until rf change is done.\n",
- __func__);
mdelay(1);
RFInProgressTimeOut++;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
}
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
priv->ops->stop_adapter(dev, false);
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
udelay(100);
memset(&priv->rtllib->current_network, 0,
offsetof(struct rtllib_network, list));
- RT_TRACE(COMP_DOWN, "<==========%s()\n", __func__);
return 0;
}
@@ -883,14 +827,13 @@ static void _rtl92e_init_priv_constant(struct net_device *dev)
pPSC->RegMaxLPSAwakeIntvl = 5;
}
-
static void _rtl92e_init_priv_variable(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 i;
priv->AcmMethod = eAcmWay2_SW;
- priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
+ priv->dot11_current_preamble_mode = PREAMBLE_AUTO;
priv->rtllib->status = 0;
priv->polling_timer_on = 0;
priv->up_first_time = 1;
@@ -935,12 +878,12 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
memset(&priv->InterruptLog, 0, sizeof(struct log_int_8190));
priv->RxCounter = 0;
priv->rtllib->wx_set_enc = 0;
- priv->bHwRadioOff = false;
+ priv->hw_radio_off = false;
priv->RegRfOff = false;
priv->isRFOff = false;
priv->bInPowerSaveMode = false;
- priv->rtllib->RfOffReason = 0;
- priv->RFChangeInProgress = false;
+ priv->rtllib->rf_off_reason = 0;
+ priv->rf_change_in_progress = false;
priv->bHwRfOffAction = 0;
priv->SetRFPowerStateInProgress = false;
priv->rtllib->PowerSaveControl.bInactivePs = true;
@@ -949,7 +892,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->rtllib->PowerSaveControl.bFwCtrlLPS = false;
priv->rtllib->LPSDelayCnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
- priv->rtllib->eRFPowerState = eRfOn;
+ priv->rtllib->rf_power_state = rf_on;
priv->rtllib->current_network.beacon_interval = DEFAULT_BEACONINTERVAL;
priv->rtllib->iw_mode = IW_MODE_INFRA;
@@ -1032,7 +975,6 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
"rtl819x_init:Error channel plan! Set to default.\n");
priv->ChannelPlan = COUNTRY_CODE_FCC;
}
- RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
dot11d_init(priv->rtllib);
dot11d_channel_map(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
@@ -1072,7 +1014,6 @@ static short _rtl92e_init(struct net_device *dev)
}
priv->irq = dev->irq;
- RT_TRACE(COMP_INIT, "IRQ %d\n", dev->irq);
if (_rtl92e_pci_initdescring(dev) != 0) {
netdev_err(dev, "Endopoints initialization failed");
@@ -1149,11 +1090,8 @@ static enum reset_type _rtl92e_tx_check_stuck(struct net_device *dev)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
if (bCheckFwTxCnt) {
- if (priv->ops->TxCheckStuckHandler(dev)) {
- RT_TRACE(COMP_RESET,
- "TxCheckStuck(): Fw indicates no Tx condition!\n");
+ if (priv->ops->tx_check_stuck_handler(dev))
return RESET_TYPE_SILENT;
- }
}
return RESET_TYPE_NORESET;
@@ -1163,10 +1101,8 @@ static enum reset_type _rtl92e_rx_check_stuck(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->ops->RxCheckStuckHandler(dev)) {
- RT_TRACE(COMP_RESET, "RxStuck Condition\n");
+ if (priv->ops->rx_check_stuck_handler(dev))
return RESET_TYPE_SILENT;
- }
return RESET_TYPE_NORESET;
}
@@ -1178,12 +1114,12 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
enum reset_type RxResetType = RESET_TYPE_NORESET;
enum rt_rf_power_state rfState;
- rfState = priv->rtllib->eRFPowerState;
+ rfState = priv->rtllib->rf_power_state;
- if (rfState == eRfOn)
+ if (rfState == rf_on)
TxResetType = _rtl92e_tx_check_stuck(dev);
- if (rfState == eRfOn &&
+ if (rfState == rf_on &&
(priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->state == RTLLIB_LINKED))
RxResetType = _rtl92e_rx_check_stuck(dev);
@@ -1201,7 +1137,6 @@ static enum reset_type _rtl92e_if_check_reset(struct net_device *dev)
} else {
return RESET_TYPE_NORESET;
}
-
}
static void _rtl92e_if_silent_reset(struct net_device *dev)
@@ -1213,17 +1148,14 @@ static void _rtl92e_if_silent_reset(struct net_device *dev)
unsigned long flag;
if (priv->ResetProgress == RESET_TYPE_NORESET) {
-
- RT_TRACE(COMP_RESET, "=========>Reset progress!!\n");
-
priv->ResetProgress = RESET_TYPE_SILENT;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
goto END;
}
- priv->RFChangeInProgress = true;
+ priv->rf_change_in_progress = true;
priv->bResetInProgress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
@@ -1242,12 +1174,7 @@ RESET_START:
}
priv->up = 0;
- RT_TRACE(COMP_RESET, "%s():======>start to down the driver\n",
- __func__);
mdelay(1000);
- RT_TRACE(COMP_RESET,
- "%s():111111111111111111111111======>start to down the driver\n",
- __func__);
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
@@ -1275,16 +1202,8 @@ RESET_START:
rtl92e_dm_backup_state(dev);
mutex_unlock(&priv->wx_mutex);
- RT_TRACE(COMP_RESET,
- "%s():<==========down process is finished\n",
- __func__);
-
- RT_TRACE(COMP_RESET, "%s():<===========up process start\n",
- __func__);
reset_status = _rtl92e_up(dev, true);
- RT_TRACE(COMP_RESET,
- "%s():<===========up process is finished\n", __func__);
if (reset_status == -1) {
if (reset_times < 3) {
reset_times++;
@@ -1298,7 +1217,7 @@ RESET_START:
ieee->is_silent_reset = 1;
spin_lock_irqsave(&priv->rf_ps_lock, flag);
- priv->RFChangeInProgress = false;
+ priv->rf_change_in_progress = false;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
rtl92e_enable_hw_security_config(dev);
@@ -1333,8 +1252,6 @@ END:
priv->bResetInProgress = false;
rtl92e_writeb(dev, UFWP, 1);
- RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n",
- priv->reset_count);
}
}
@@ -1375,7 +1292,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
- if (!priv->up || priv->bHwRadioOff)
+ if (!priv->up || priv->hw_radio_off)
return;
if (priv->rtllib->state >= RTLLIB_LINKED) {
@@ -1390,13 +1307,11 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!rtllib_act_scanning(priv->rtllib, false)) {
if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
RTLLIB_NOLINK) &&
- (ieee->eRFPowerState == eRfOn) && !ieee->is_set_key &&
+ (ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
if ((ieee->PowerSaveControl.ReturnPoint ==
IPS_CALLBACK_NONE) &&
(!ieee->bNetPromiscuousMode)) {
- RT_TRACE(COMP_PS,
- "====================>haha: rtl92e_ips_enter()\n");
rtl92e_ips_enter(dev);
}
}
@@ -1407,7 +1322,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->LinkDetectInfo.NumTxOkInPeriod > 100)
bBusyTraffic = true;
-
if (ieee->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
ieee->LinkDetectInfo.NumTxOkInPeriod > 4000) {
bHigherBusyTraffic = true;
@@ -1433,7 +1347,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
rtl92e_leisure_ps_leave(dev);
} else {
- RT_TRACE(COMP_LPS, "====>no link LPS leave\n");
rtl92e_leisure_ps_leave(dev);
}
@@ -1456,9 +1369,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
else
priv->check_roaming_cnt = 0;
-
if (priv->check_roaming_cnt > 0) {
- if (ieee->eRFPowerState == eRfOff)
+ if (ieee->rf_power_state == rf_off)
netdev_info(dev, "%s(): RF is off\n", __func__);
netdev_info(dev,
@@ -1487,12 +1399,11 @@ static void _rtl92e_watchdog_wq_cb(void *data)
}
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
ieee->LinkDetectInfo.NumRecvDataInPeriod = 0;
-
}
spin_lock_irqsave(&priv->tx_lock, flags);
if ((check_reset_cnt++ >= 3) && (!ieee->is_roaming) &&
- (!priv->RFChangeInProgress) && (!pPSC->bSwRfProcessing)) {
+ (!priv->rf_change_in_progress) && (!pPSC->bSwRfProcessing)) {
ResetType = _rtl92e_if_check_reset(dev);
check_reset_cnt = 3;
}
@@ -1500,7 +1411,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!priv->bDisableNormalResetCheck && ResetType == RESET_TYPE_NORMAL) {
priv->ResetProgress = RESET_TYPE_NORMAL;
- RT_TRACE(COMP_RESET, "%s(): NOMAL RESET\n", __func__);
return;
}
@@ -1510,7 +1420,6 @@ static void _rtl92e_watchdog_wq_cb(void *data)
priv->force_reset = false;
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
- RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
@@ -1541,7 +1450,6 @@ void rtl92e_tx_enable(struct net_device *dev)
rtllib_reset_queue(priv->rtllib);
}
-
static void _rtl92e_free_rx_ring(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -1599,7 +1507,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
MAX_DEV_ADDR_SIZE);
u8 queue_index = tcb_desc->queue_index;
- if ((priv->rtllib->eRFPowerState == eRfOff) || !priv->up ||
+ if ((priv->rtllib->rf_power_state == rf_off) || !priv->up ||
priv->bResetInProgress) {
kfree_skb(skb);
return;
@@ -1632,7 +1540,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
u8 queue_index = tcb_desc->queue_index;
if (queue_index != TXCMD_QUEUE) {
- if ((priv->rtllib->eRFPowerState == eRfOff) ||
+ if ((priv->rtllib->rf_power_state == rf_off) ||
!priv->up || priv->bResetInProgress) {
kfree_skb(skb);
return 0;
@@ -1936,13 +1844,11 @@ long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
return signal_power;
}
-
void rtl92e_update_rx_statistics(struct r8192_priv *priv,
struct rtllib_rx_stats *pprevious_stats)
{
int weighting = 0;
-
if (priv->stats.recv_signal_power == 0)
priv->stats.recv_signal_power =
pprevious_stats->RecvSignalPower;
@@ -1985,8 +1891,6 @@ void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
}
-
-
static void _rtl92e_rx_normal(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2086,7 +1990,6 @@ done:
priv->rx_idx[rx_queue_idx] = (priv->rx_idx[rx_queue_idx] + 1) %
priv->rxringcount;
}
-
}
static void _rtl92e_tx_resume(struct net_device *dev)
@@ -2151,7 +2054,6 @@ static int _rtl92e_open(struct net_device *dev)
ret = _rtl92e_try_up(dev);
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int _rtl92e_try_up(struct net_device *dev)
@@ -2163,7 +2065,6 @@ static int _rtl92e_try_up(struct net_device *dev)
return _rtl92e_up(dev, false);
}
-
static int _rtl92e_close(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2181,7 +2082,6 @@ static int _rtl92e_close(struct net_device *dev)
mutex_unlock(&priv->wx_mutex);
return ret;
-
}
static int _rtl92e_down(struct net_device *dev, bool shutdownrf)
@@ -2224,10 +2124,8 @@ static void _rtl92e_set_multicast(struct net_device *dev)
promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->promisc = promisc;
-
}
-
static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
{
struct r8192_priv *priv = rtllib_priv(dev);
@@ -2278,21 +2176,13 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
goto done;
}
- if (inta & IMR_TBDOK) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ if (inta & IMR_TBDOK)
priv->stats.txbeaconokint++;
- }
- if (inta & IMR_TBDER) {
- RT_TRACE(COMP_INTR, "beacon ok interrupt!\n");
+ if (inta & IMR_TBDER)
priv->stats.txbeaconerr++;
- }
-
- if (inta & IMR_BDOK)
- RT_TRACE(COMP_INTR, "beacon interrupt!\n");
if (inta & IMR_MGNTDOK) {
- RT_TRACE(COMP_INTR, "Manage ok interrupt!\n");
priv->stats.txmanageokint++;
_rtl92e_tx_isr(dev, MGNT_QUEUE);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -2319,13 +2209,10 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if (inta & IMR_BcnInt) {
- RT_TRACE(COMP_INTR, "prepare beacon for interrupt!\n");
+ if (inta & IMR_BcnInt)
tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
- }
if (inta & IMR_RDU) {
- RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
priv->stats.rxrdu++;
rtl92e_writel(dev, INTA_MASK,
rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
@@ -2333,7 +2220,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
}
if (inta & IMR_RXFOVW) {
- RT_TRACE(COMP_INTR, "rx overflow !\n");
priv->stats.rxoverflow++;
tasklet_schedule(&priv->irq_rx_tasklet);
}
@@ -2342,21 +2228,18 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
priv->stats.txoverflow++;
if (inta & IMR_BKDOK) {
- RT_TRACE(COMP_INTR, "BK Tx OK interrupt!\n");
priv->stats.txbkokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
- RT_TRACE(COMP_INTR, "BE TX OK interrupt!\n");
priv->stats.txbeokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
- RT_TRACE(COMP_INTR, "VI TX OK interrupt!\n");
priv->stats.txviokint++;
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VI_QUEUE);
@@ -2364,7 +2247,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
if (inta & IMR_VODOK) {
priv->stats.txvookint++;
- RT_TRACE(COMP_INTR, "Vo TX OK interrupt!\n");
priv->rtllib->LinkDetectInfo.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VO_QUEUE);
}
@@ -2376,8 +2258,6 @@ done:
return IRQ_HANDLED;
}
-
-
/****************************************************************************
* ---------------------------- PCI_STUFF---------------------------
****************************************************************************/
@@ -2402,8 +2282,6 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
int err = -ENOMEM;
u8 revision_id;
- RT_TRACE(COMP_INIT, "Configuring chip resources");
-
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "Failed to enable PCI device");
return -EIO;
@@ -2452,7 +2330,6 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
goto err_rel_rtllib;
}
-
ioaddr = (unsigned long)ioremap(pmem_start, pmem_len);
if (ioaddr == (unsigned long)NULL) {
netdev_err(dev, "ioremap failed!");
@@ -2483,13 +2360,9 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
dev->type = ARPHRD_ETHER;
dev->watchdog_timeo = HZ * 3;
- if (dev_alloc_name(dev, ifname) < 0) {
- RT_TRACE(COMP_INIT,
- "Oops: devname already taken! Trying wlan%%d...\n");
+ if (dev_alloc_name(dev, ifname) < 0)
dev_alloc_name(dev, ifname);
- }
- RT_TRACE(COMP_INIT, "Driver probe completed1\n");
if (_rtl92e_init(dev) != 0) {
netdev_warn(dev, "Initialization failed");
goto err_free_irq;
@@ -2500,12 +2373,10 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
if (register_netdev(dev))
goto err_free_irq;
- RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
- RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
err_free_irq:
@@ -2560,7 +2431,6 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
}
pci_disable_device(pdev);
- RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
bool rtl92e_enable_nic(struct net_device *dev)
@@ -2576,7 +2446,6 @@ bool rtl92e_enable_nic(struct net_device *dev)
return false;
}
- RT_TRACE(COMP_PS, "===========>%s()\n", __func__);
priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
@@ -2584,13 +2453,11 @@ bool rtl92e_enable_nic(struct net_device *dev)
priv->bdisable_nic = false;
return false;
}
- RT_TRACE(COMP_INIT, "start adapter finished\n");
RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
priv->bfirst_init = false;
rtl92e_irq_enable(dev);
priv->bdisable_nic = false;
- RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
return init_status;
}
@@ -2599,7 +2466,6 @@ bool rtl92e_disable_nic(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
u8 tmp_state = 0;
- RT_TRACE(COMP_PS, "=========>%s()\n", __func__);
priv->bdisable_nic = true;
tmp_state = priv->rtllib->state;
rtllib_softmac_stop_protocol(priv->rtllib, 0, false);
@@ -2608,8 +2474,6 @@ bool rtl92e_disable_nic(struct net_device *dev)
rtl92e_irq_disable(dev);
priv->ops->stop_adapter(dev, false);
- RT_TRACE(COMP_PS, "<=========%s()\n", __func__);
-
return true;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 698552a92100..7021f9c435d9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -299,8 +299,8 @@ struct rtl819x_ops {
void (*tx_enable)(struct net_device *dev);
void (*interrupt_recognized)(struct net_device *dev,
u32 *p_inta, u32 *p_intb);
- bool (*TxCheckStuckHandler)(struct net_device *dev);
- bool (*RxCheckStuckHandler)(struct net_device *dev);
+ bool (*tx_check_stuck_handler)(struct net_device *dev);
+ bool (*rx_check_stuck_handler)(struct net_device *dev);
};
struct r8192_priv {
@@ -392,7 +392,7 @@ struct r8192_priv {
u16 ShortRetryLimit;
u16 LongRetryLimit;
- bool bHwRadioOff;
+ bool hw_radio_off;
bool blinked_ingpio;
u8 polling_timer_on;
@@ -430,7 +430,7 @@ struct r8192_priv {
u16 basic_rate;
u8 short_preamble;
- u8 dot11CurrentPreambleMode;
+ u8 dot11_current_preamble_mode;
u8 slot_time;
u16 SifsTime;
@@ -478,7 +478,7 @@ struct r8192_priv {
bool bInPowerSaveMode;
u8 bHwRfOffAction;
- bool RFChangeInProgress;
+ bool rf_change_in_progress;
bool SetRFPowerStateInProgress;
bool bdisable_nic;
@@ -598,6 +598,6 @@ bool rtl92e_enable_nic(struct net_device *dev);
bool rtl92e_disable_nic(struct net_device *dev);
bool rtl92e_set_rf_state(struct net_device *dev,
- enum rt_rf_power_state StateToSet,
- RT_RF_CHANGE_SOURCE ChangeSource);
+ enum rt_rf_power_state state_to_set,
+ RT_RF_CHANGE_SOURCE change_source);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index d58800d06e8f..702551056227 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -268,8 +268,6 @@ static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
NULL};
if (priv->ResetProgress == RESET_TYPE_SILENT) {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),
- "GPIOChangeRFWorkItemCallBack(): Silent Reset!!!!!!!\n");
return;
}
@@ -333,8 +331,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
static u8 ping_rssi_state;
if (!priv->up) {
- RT_TRACE(COMP_RATE,
- "<---- %s: driver is going to unload\n", __func__);
return;
}
@@ -347,9 +343,9 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (priv->rtllib->state == RTLLIB_LINKED) {
- bshort_gi_enabled = (pHTInfo->bCurTxBW40MHz &&
+ bshort_gi_enabled = (pHTInfo->cur_tx_bw40mhz &&
pHTInfo->bCurShortGI40MHz) ||
- (!pHTInfo->bCurTxBW40MHz &&
+ (!pHTInfo->cur_tx_bw40mhz &&
pHTInfo->bCurShortGI20MHz);
pra->upper_rssi_threshold_ratr =
@@ -423,9 +419,6 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
u32 ratr_value;
ratr_value = targetRATR;
- RT_TRACE(COMP_RATE,
- "currentRATR = %x, targetRATR = %x\n",
- currentRATR, targetRATR);
if (priv->rf_type == RF_1T2R)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
@@ -628,7 +621,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
u32 delta = 0;
- RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
@@ -637,10 +629,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
- RT_TRACE(COMP_POWER_TRACKING, "powerlevelOFDM24G = %x\n",
- powerlevelOFDM24G);
-
-
for (j = 0; j <= 30; j++) {
tx_cmd.Op = TXCMD_SET_TX_PWR_TRACKING;
@@ -656,15 +644,11 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
mdelay(1);
if (priv->bResetInProgress) {
- RT_TRACE(COMP_POWER_TRACKING,
- "we are in silent reset progress, so return\n");
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
- if (priv->rtllib->eRFPowerState != eRfOn) {
- RT_TRACE(COMP_POWER_TRACKING,
- "we are in power save, so return\n");
+ if (priv->rtllib->rf_power_state != rf_on) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
@@ -689,10 +673,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
tmp_report[k] = rtl92e_readb(dev,
Tssi_Report_Value2);
- RT_TRACE(COMP_POWER_TRACKING,
- "TSSI_report_value = %d\n",
- tmp_report[k]);
-
if (tmp_report[k] <= 20) {
viviflag = true;
break;
@@ -702,8 +682,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
if (viviflag) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
viviflag = false;
- RT_TRACE(COMP_POWER_TRACKING,
- "we filted this data\n");
for (k = 0; k < 5; k++)
tmp_report[k] = 0;
break;
@@ -713,12 +691,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
Avg_TSSI_Meas_from_driver += tmp_report[k];
Avg_TSSI_Meas_from_driver *= 100 / 5;
- RT_TRACE(COMP_POWER_TRACKING,
- "Avg_TSSI_Meas_from_driver = %d\n",
- Avg_TSSI_Meas_from_driver);
TSSI_13dBm = priv->TSSI_13dBm;
- RT_TRACE(COMP_POWER_TRACKING, "TSSI_13dBm = %d\n",
- TSSI_13dBm);
if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
@@ -729,20 +702,6 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING,
- "tx power track is done\n");
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
return;
}
if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
@@ -785,26 +744,12 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
} else
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex = %d\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->rfa_txpowertrackingindex_real = %d\n",
- priv->rfa_txpowertrackingindex_real);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation_difference = %d\n",
- priv->CCKPresentAttentuation_difference);
- RT_TRACE(COMP_POWER_TRACKING,
- "priv->CCKPresentAttentuation = %d\n",
- priv->CCKPresentAttentuation);
if (priv->CCKPresentAttentuation_difference <= -12 ||
priv->CCKPresentAttentuation_difference >= 24) {
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
- RT_TRACE(COMP_POWER_TRACKING,
- "tx power track--->limited\n");
return;
}
@@ -834,10 +779,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
for (i = 0; i < OFDM_Table_Length; i++) {
if (tmpRegA == OFDMSwingTable[i]) {
priv->OFDM_index[0] = i;
- RT_TRACE(COMP_POWER_TRACKING,
- "Initial reg0x%x = 0x%x, OFDM_index = 0x%x\n",
- rOFDM0_XATxIQImbalance, tmpRegA,
- priv->OFDM_index[0]);
}
}
@@ -845,10 +786,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
for (i = 0; i < CCK_Table_length; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
priv->CCK_index = i;
- RT_TRACE(COMP_POWER_TRACKING,
- "Initial reg0x%x = 0x%x, CCK_index = 0x%x\n",
- rCCK0_TxFilter1, TempCCk,
- priv->CCK_index);
break;
}
}
@@ -857,12 +794,10 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
}
tmpRegA = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
- RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
if (tmpRegA < 3 || tmpRegA > 13)
return;
if (tmpRegA >= 12)
tmpRegA = 12;
- RT_TRACE(COMP_POWER_TRACKING, "Valid ThermalMeterA = %d\n", tmpRegA);
priv->ThermalMeter[0] = ThermalMeterVal;
priv->ThermalMeter[1] = ThermalMeterVal;
@@ -894,9 +829,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
priv->Record_CCK_20Mindex = tmpCCK20Mindex;
priv->Record_CCK_40Mindex = tmpCCK40Mindex;
- RT_TRACE(COMP_POWER_TRACKING,
- "Record_CCK_20Mindex / Record_CCK_40Mindex = %d / %d.\n",
- priv->Record_CCK_20Mindex, priv->Record_CCK_40Mindex);
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
@@ -919,9 +851,6 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
priv->OFDM_index[0] = tmpOFDMindex;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
OFDMSwingTable[priv->OFDM_index[0]]);
- RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
- priv->OFDM_index[0],
- OFDMSwingTable[priv->OFDM_index[0]]);
}
priv->txpower_count = 0;
}
@@ -960,8 +889,6 @@ static void _rtl92e_dm_init_tx_power_tracking_thermal(struct net_device *dev)
priv->btxpower_tracking = false;
priv->txpower_count = 0;
priv->btxpower_trackingInit = false;
- RT_TRACE(COMP_POWER_TRACKING, "pMgntInfo->bTXPowerTracking = %d\n",
- priv->btxpower_tracking);
}
void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
@@ -979,7 +906,6 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
static u32 tx_power_track_counter;
- RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
if (rtl92e_readb(dev, 0x11e) == 1)
return;
if (!priv->btxpower_tracking)
@@ -1086,44 +1012,29 @@ static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1,
- TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2,
- TempVal);
TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
(CCKSwingTable_Ch1_Ch13[priv->CCK_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING,
- "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort,
- TempVal);
} else {
TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
(CCKSwingTable_Ch14[priv->CCK_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter1, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
(CCKSwingTable_Ch14[priv->CCK_index][3] << 8) +
(CCKSwingTable_Ch14[priv->CCK_index][4] << 16)+
(CCKSwingTable_Ch14[priv->CCK_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_TxFilter2, TempVal);
TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
(CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
- RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
- rCCK0_DebugPort, TempVal);
}
}
@@ -1141,32 +1052,12 @@ static void _rtl92e_dm_tx_power_reset_recovery(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",
- dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: Fill in RFA_txPowerTrackingIndex is %x\n",
- priv->rfa_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery : RF A I/Q Amplify Gain is %d\n",
- dm_tx_bb_gain_idx_to_amplify(priv->rfa_txpowertrackingindex));
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: CCK Attenuation is %d dB\n",
- priv->CCKPresentAttentuation);
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",
- dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery: Fill in RFC_txPowerTrackingIndex is %x\n",
- priv->rfc_txpowertrackingindex);
- RT_TRACE(COMP_POWER_TRACKING,
- "Reset Recovery : RF C I/Q Amplify Gain is %d\n",
- dm_tx_bb_gain_idx_to_amplify(priv->rfc_txpowertrackingindex));
}
void rtl92e_dm_restore_state(struct net_device *dev)
@@ -1176,8 +1067,6 @@ void rtl92e_dm_restore_state(struct net_device *dev)
u32 ratr_value;
if (!priv->up) {
- RT_TRACE(COMP_RATE,
- "<---- %s: driver is going to unload\n", __func__);
return;
}
@@ -1218,17 +1107,6 @@ static void _rtl92e_dm_bb_initialgain_restore(struct net_device *dev)
bit_mask = bMaskByte2;
rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
(u32)priv->initgain_backup.cca);
-
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",
- priv->initgain_backup.cca);
rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
}
@@ -1251,17 +1129,6 @@ void rtl92e_dm_backup_state(struct net_device *dev)
priv->initgain_backup.xdagccore1 = rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
bit_mask = bMaskByte2;
priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
-
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",
- priv->initgain_backup.xaagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc58 is %x\n",
- priv->initgain_backup.xbagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc60 is %x\n",
- priv->initgain_backup.xcagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc68 is %x\n",
- priv->initgain_backup.xdagccore1);
- RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xa0a is %x\n",
- priv->initgain_backup.cca);
}
static void _rtl92e_dm_dig_init(struct net_device *dev)
@@ -1681,13 +1548,13 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
goto dm_CheckEdcaTurbo_EXIT;
if (priv->rtllib->state != RTLLIB_LINKED)
goto dm_CheckEdcaTurbo_EXIT;
- if (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_DISABLE_EDCA_TURBO)
+ if (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_DISABLE_EDCA_TURBO)
goto dm_CheckEdcaTurbo_EXIT;
if (!priv->rtllib->bis_any_nonbepkts) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
- if (pHTInfo->IOTAction & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
+ if (pHTInfo->iot_action & HT_IOT_ACT_EDCA_BIAS_ON_RX) {
if (curTxOkCnt > 4*curRxOkCnt) {
if (priv->bis_cur_rdlstate ||
!priv->bcurrent_turbo_EDCA) {
@@ -1766,16 +1633,16 @@ static void _rtl92e_dm_cts_to_self(struct net_device *dev)
unsigned long curRxOkCnt = 0;
if (!priv->rtllib->bCTSToSelfEnable) {
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
return;
}
if (pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
curTxOkCnt = priv->stats.txbytesunicast - lastTxOkCnt;
curRxOkCnt = priv->stats.rxbytesunicast - lastRxOkCnt;
if (curRxOkCnt > 4*curTxOkCnt)
- pHTInfo->IOTAction &= ~HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action &= ~HT_IOT_ACT_FORCED_CTS2SELF;
else
- pHTInfo->IOTAction |= HT_IOT_ACT_FORCED_CTS2SELF;
+ pHTInfo->iot_action |= HT_IOT_ACT_FORCED_CTS2SELF;
lastTxOkCnt = priv->stats.txbytesunicast;
lastRxOkCnt = priv->stats.rxbytesunicast;
@@ -1798,7 +1665,7 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
struct r8192_priv, gpio_change_rf_wq);
struct net_device *dev = priv->rtllib->dev;
u8 tmp1byte;
- enum rt_rf_power_state eRfPowerStateToSet;
+ enum rt_rf_power_state rf_power_state_to_set;
bool bActuallySet = false;
char *argv[3];
static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
@@ -1817,25 +1684,23 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
tmp1byte = rtl92e_readb(dev, GPI);
- eRfPowerStateToSet = (tmp1byte&BIT1) ? eRfOn : eRfOff;
+ rf_power_state_to_set = (tmp1byte&BIT1) ? rf_on : rf_off;
- if (priv->bHwRadioOff && (eRfPowerStateToSet == eRfOn)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio ON\n");
+ if (priv->hw_radio_off && (rf_power_state_to_set == rf_on)) {
netdev_info(dev, "gpiochangeRF - HW Radio ON\n");
- priv->bHwRadioOff = false;
+ priv->hw_radio_off = false;
bActuallySet = true;
- } else if (!priv->bHwRadioOff && (eRfPowerStateToSet == eRfOff)) {
- RT_TRACE(COMP_RF, "gpiochangeRF - HW Radio OFF\n");
+ } else if (!priv->hw_radio_off && (rf_power_state_to_set == rf_off)) {
netdev_info(dev, "gpiochangeRF - HW Radio OFF\n");
- priv->bHwRadioOff = true;
+ priv->hw_radio_off = true;
bActuallySet = true;
}
if (bActuallySet) {
mdelay(1000);
priv->bHwRfOffAction = 1;
- rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- if (priv->bHwRadioOff)
+ rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
+ if (priv->hw_radio_off)
argv[1] = "RFOFF";
else
argv[1] = "RFON";
@@ -2132,7 +1997,7 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
if (priv->rtllib->state == RTLLIB_LINKED &&
priv->rtllib->bfsync_enable &&
- (priv->rtllib->pHTInfo->IOTAction & HT_IOT_ACT_CDD_FSYNC)) {
+ (priv->rtllib->pHTInfo->iot_action & HT_IOT_ACT_CDD_FSYNC)) {
u32 rate_bitmap;
for (rate_index = 0; rate_index <= 27; rate_index++) {
@@ -2173,10 +2038,6 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
}
priv->rate_record = rate_count;
priv->rateCountDiffRecord = rate_count_diff;
- RT_TRACE(COMP_HALDM,
- "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n",
- priv->rate_record, rate_count, rate_count_diff,
- priv->bswitch_fsync);
if (priv->undecorated_smoothed_pwdb >
priv->rtllib->fsync_rssi_threshold &&
bSwitchFromCountDiff) {
@@ -2220,11 +2081,6 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
priv->ContinueDiffCount = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
- RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
- RT_TRACE(COMP_HALDM,
- "rateRecord %d rateCount %d, rateCountdiff %d bSwitchFsync %d\n",
- priv->rate_record, rate_count, rate_count_diff,
- priv->bswitch_fsync);
}
static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
@@ -2232,7 +2088,6 @@ static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
u8 rf_timing = 0x77;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
@@ -2244,7 +2099,6 @@ static void _rtl92e_dm_end_hw_fsync(struct net_device *dev)
u8 rf_timing = 0xaa;
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
@@ -2255,7 +2109,6 @@ static void _rtl92e_dm_end_sw_fsync(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
del_timer_sync(&(priv->fsync_timer));
if (priv->bswitch_fsync) {
@@ -2276,7 +2129,6 @@ static void _rtl92e_dm_start_sw_fsync(struct net_device *dev)
u32 rateIndex;
u32 rateBitmap;
- RT_TRACE(COMP_HALDM, "%s\n", __func__);
priv->rate_record = 0;
priv->ContinueDiffCount = 0;
priv->rateCountDiffRecord = 0;
@@ -2315,17 +2167,6 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
static u8 reg_c38_State = RegC38_Default;
static u32 reset_cnt;
- RT_TRACE(COMP_HALDM,
- "RSSI %d TimeInterval %d MultipleTimeInterval %d\n",
- priv->rtllib->fsync_rssi_threshold,
- priv->rtllib->fsync_time_interval,
- priv->rtllib->fsync_multiple_timeinterval);
- RT_TRACE(COMP_HALDM,
- "RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n",
- priv->rtllib->fsync_rate_bitmap,
- priv->rtllib->fsync_firstdiff_ratethreshold,
- priv->rtllib->fsync_seconddiff_ratethreshold);
-
if (priv->rtllib->state == RTLLIB_LINKED &&
priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
@@ -2461,9 +2302,6 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
- RT_TRACE(COMP_TXAGC, "priv->undecorated_smoothed_pwdb = %ld\n",
- priv->undecorated_smoothed_pwdb);
-
if (priv->rtllib->state == RTLLIB_LINKED) {
if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->bDynamicTxHighPower = true;
@@ -2484,9 +2322,6 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
(priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
- RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190() channel = %d\n",
- priv->rtllib->current_network.channel);
-
rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
}
priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
@@ -2499,14 +2334,9 @@ static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev,
- Current_Tx_Rate_Reg);
-
- ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev,
- Initial_Tx_Rate_Reg);
-
- ieee->softmac_stats.txretrycount = rtl92e_readl(dev,
- Tx_Retry_Count_Reg);
+ ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev, CURRENT_TX_RATE_REG);
+ ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev, INITIAL_TX_RATE_REG);
+ ieee->softmac_stats.txretrycount = rtl92e_readl(dev, TX_RETRY_COUNT_REG);
}
static void _rtl92e_dm_send_rssi_to_fw(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index ea1b14bbcdcd..51e295d389a8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -42,9 +42,9 @@
#define TX_POWER_ATHEROAP_THRESH_HIGH 78
#define TX_POWER_ATHEROAP_THRESH_LOW 72
-#define Current_Tx_Rate_Reg 0x1e0
-#define Initial_Tx_Rate_Reg 0x1e1
-#define Tx_Retry_Count_Reg 0x1ac
+#define CURRENT_TX_RATE_REG 0x1e0
+#define INITIAL_TX_RATE_REG 0x1e1
+#define TX_RETRY_COUNT_REG 0x1ac
#define RegC38_TH 20
#define DM_Type_ByDriver 1
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 1d992d5c4e17..81e1bb856c60 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -16,11 +16,9 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u8 tmp;
- u16 LinkCtrlReg;
+ u16 link_ctrl_reg;
- pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg);
-
- RT_TRACE(COMP_INIT, "Link Control Register =%x\n", LinkCtrlReg);
+ pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &link_ctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT4;
@@ -33,28 +31,28 @@ static void _rtl92e_parse_pci_configuration(struct pci_dev *pdev,
bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- u16 DeviceID;
- u8 RevisionID;
- u16 IrqLine;
+ u16 device_id;
+ u8 revision_id;
+ u16 irq_line;
- DeviceID = pdev->device;
- RevisionID = pdev->revision;
- pci_read_config_word(pdev, 0x3C, &IrqLine);
+ device_id = pdev->device;
+ revision_id = pdev->revision;
+ pci_read_config_word(pdev, 0x3C, &irq_line);
priv->card_8192 = priv->ops->nic_type;
- if (DeviceID == 0x8192) {
- switch (RevisionID) {
+ if (device_id == 0x8192) {
+ switch (revision_id) {
case HAL_HW_PCI_REVISION_ID_8192PCIE:
dev_info(&pdev->dev,
"Adapter(8192 PCI-E) is found - DeviceID=%x\n",
- DeviceID);
+ device_id);
priv->card_8192 = NIC_8192E;
break;
case HAL_HW_PCI_REVISION_ID_8192SE:
dev_info(&pdev->dev,
"Adapter(8192SE) is found - DeviceID=%x\n",
- DeviceID);
+ device_id);
priv->card_8192 = NIC_8192SE;
break;
default:
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 5575186caebd..82b45c61ac75 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -32,7 +32,7 @@ int rtl92e_suspend(struct device *dev_d)
netif_device_detach(dev);
if (!priv->rtllib->bSupportRemoteWakeUp) {
- rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_INIT);
+ rtl92e_set_rf_state(dev, rf_off, RF_CHANGE_BY_INIT);
ulRegRead = rtl92e_readl(dev, CPU_GEN);
ulRegRead |= CPU_GEN_SYSTEM_RESET;
rtl92e_writel(dev, CPU_GEN, ulRegRead);
@@ -83,10 +83,9 @@ int rtl92e_resume(struct device *dev_d)
dev->netdev_ops->ndo_open(dev);
if (!priv->rtllib->bSupportRemoteWakeUp)
- rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_INIT);
+ rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_INIT);
out:
- RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c5e89eb40342..8c00b111ddb2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -21,16 +21,12 @@ static void _rtl92e_hw_sleep(struct net_device *dev)
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG,
- "%s(): RF Change in progress!\n", __func__);
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__);
-
- rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS);
+ rtl92e_set_rf_state(dev, rf_sleep, RF_CHANGE_BY_PS);
}
void rtl92e_hw_sleep_wq(void *data)
@@ -48,17 +44,14 @@ void rtl92e_hw_wakeup(struct net_device *dev)
unsigned long flags = 0;
spin_lock_irqsave(&priv->rf_ps_lock, flags);
- if (priv->RFChangeInProgress) {
+ if (priv->rf_change_in_progress) {
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_DBG,
- "%s(): RF Change in progress!\n", __func__);
schedule_delayed_work(&priv->rtllib->hw_wakeup_wq,
msecs_to_jiffies(10));
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
- RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__);
- rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS);
+ rtl92e_set_rf_state(dev, rf_on, RF_CHANGE_BY_PS);
}
void rtl92e_hw_wakeup_wq(void *data)
@@ -110,15 +103,10 @@ static void _rtl92e_ps_update_rf_state(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "%s() --------->\n", __func__);
pPSC->bSwRfProcessing = true;
-
- RT_TRACE(COMP_PS, "%s(): Set RF to %s.\n", __func__,
- pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
pPSC->bSwRfProcessing = false;
- RT_TRACE(COMP_PS, "%s() <---------\n", __func__);
}
void rtl92e_ips_enter(struct net_device *dev)
@@ -126,15 +114,14 @@ void rtl92e_ips_enter(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
if (pPSC->bInactivePs) {
- rtState = priv->rtllib->eRFPowerState;
- if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
+ rt_state = priv->rtllib->rf_power_state;
+ if (rt_state == rf_on && !pPSC->bSwRfProcessing &&
(priv->rtllib->state != RTLLIB_LINKED) &&
(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
- RT_TRACE(COMP_PS, "%s(): Turn off RF.\n", __func__);
- pPSC->eInactivePowerState = eRfOff;
+ pPSC->eInactivePowerState = rf_off;
priv->isRFOff = true;
priv->bInPowerSaveMode = true;
_rtl92e_ps_update_rf_state(dev);
@@ -147,14 +134,13 @@ void rtl92e_ips_leave(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
if (pPSC->bInactivePs) {
- rtState = priv->rtllib->eRFPowerState;
- if (rtState != eRfOn && !pPSC->bSwRfProcessing &&
- priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
- RT_TRACE(COMP_PS, "%s(): Turn on RF.\n", __func__);
- pPSC->eInactivePowerState = eRfOn;
+ rt_state = priv->rtllib->rf_power_state;
+ if (rt_state != rf_on && !pPSC->bSwRfProcessing &&
+ priv->rtllib->rf_off_reason <= RF_CHANGE_BY_IPS) {
+ pPSC->eInactivePowerState = rf_on;
priv->bInPowerSaveMode = false;
_rtl92e_ps_update_rf_state(dev);
}
@@ -176,13 +162,13 @@ void rtl92e_ips_leave_wq(void *data)
void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason > RF_CHANGE_BY_IPS) {
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason > RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
return;
@@ -210,7 +196,6 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
if (priv->rtllib->iw_mode == IW_MODE_ADHOC)
return false;
- RT_TRACE(COMP_LPS, "%s(): set ieee->ps = %x\n", __func__, rtPsMode);
if (!priv->ps_force)
priv->rtllib->ps = rtPsMode;
if (priv->rtllib->sta_sleep != LPS_IS_WAKE &&
@@ -221,8 +206,6 @@ static bool _rtl92e_ps_set_mode(struct net_device *dev, u8 rtPsMode)
priv->rtllib->sta_sleep = LPS_IS_WAKE;
spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
- RT_TRACE(COMP_DBG,
- "LPS leave: notify AP we are awaked ++++++++++ SendNullFunctionData\n");
rtllib_sta_ps_send_null_frame(priv->rtllib, 0);
spin_unlock_irqrestore(&(priv->rtllib->mgmt_tx_lock), flags);
}
@@ -236,12 +219,6 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
- RT_TRACE(COMP_PS, "%s()...\n", __func__);
- RT_TRACE(COMP_PS,
- "pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdleCount is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
- pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
- RT_CHECK_FOR_HANG_PERIOD);
-
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->state == RTLLIB_LINKED))
|| (priv->rtllib->iw_mode == IW_MODE_ADHOC) ||
@@ -252,10 +229,6 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
if (pPSC->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
-
- RT_TRACE(COMP_LPS,
- "%s(): Enter 802.11 power save mode...\n", __func__);
-
if (!pPSC->bFwCtrlLPS) {
if (priv->rtllib->SetFwCmdHandler)
priv->rtllib->SetFwCmdHandler(
@@ -275,15 +248,8 @@ void rtl92e_leisure_ps_leave(struct net_device *dev)
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
&(priv->rtllib->PowerSaveControl);
-
- RT_TRACE(COMP_PS, "%s()...\n", __func__);
- RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
- pPSC->bLeisurePs, priv->rtllib->ps);
-
if (pPSC->bLeisurePs) {
if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
- RT_TRACE(COMP_LPS,
- "%s(): Busy Traffic , Leave 802.11 power save..\n", __func__);
_rtl92e_ps_set_mode(dev, RTLLIB_PS_DISABLED);
if (!pPSC->bFwCtrlLPS) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 407effde5e71..4920cb49e381 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -52,7 +52,7 @@ static int _rtl92e_wx_set_rate(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -71,7 +71,7 @@ static int _rtl92e_wx_set_rts(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -99,7 +99,7 @@ static int _rtl92e_wx_set_power(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_warn(dev, "%s(): Can't set Power: Radio is Off.\n",
__func__);
return 0;
@@ -129,7 +129,7 @@ static int _rtl92e_wx_set_rawtx(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -149,8 +149,6 @@ static int _rtl92e_wx_force_reset(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n",
- __func__, *extra);
priv->force_reset = *extra;
mutex_unlock(&priv->wx_mutex);
return 0;
@@ -167,8 +165,6 @@ static int _rtl92e_wx_adapter_power_status(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ?
- "DC power" : "AC power");
if (*extra || priv->force_lps) {
priv->ps_force = false;
pPSC->bLeisurePs = true;
@@ -228,7 +224,7 @@ static int _rtl92e_wx_set_debug(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
u8 c = *extra;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
netdev_info(dev, "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
@@ -247,18 +243,18 @@ static int _rtl92e_wx_set_mode(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = netdev_priv_rsl(dev);
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
int ret;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
mutex_lock(&priv->wx_mutex);
if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
ieee->bNetPromiscuousMode) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason >
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
@@ -379,7 +375,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- enum rt_rf_power_state rtState;
+ enum rt_rf_power_state rt_state;
int ret;
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
@@ -391,12 +387,12 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
return 0;
}
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_info(dev, "================>%s(): hwradio off\n",
__func__);
return 0;
}
- rtState = priv->rtllib->eRFPowerState;
+ rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true)
@@ -419,17 +415,14 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
if (priv->rtllib->state != RTLLIB_LINKED) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
- if (rtState == eRfOff) {
- if (priv->rtllib->RfOffReason >
+ if (rt_state == rf_off) {
+ if (priv->rtllib->rf_off_reason >
RF_CHANGE_BY_IPS) {
netdev_warn(dev, "%s(): RF is OFF.\n",
__func__);
mutex_unlock(&priv->wx_mutex);
return -1;
}
- RT_TRACE(COMP_PS,
- "=========>%s(): rtl92e_ips_leave\n",
- __func__);
mutex_lock(&priv->rtllib->ips_mutex);
rtl92e_ips_leave(dev);
mutex_unlock(&priv->rtllib->ips_mutex);
@@ -440,7 +433,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
priv->rtllib->LedControlHandler(dev,
LED_CTL_SITE_SURVEY);
- if (priv->rtllib->eRFPowerState != eRfOff) {
+ if (priv->rtllib->rf_power_state != rf_off) {
priv->rtllib->actscanning = true;
if (ieee->ScanOperationBackupHandler)
@@ -473,7 +466,7 @@ static int _rtl92e_wx_get_scan(struct net_device *dev,
if (!priv->up)
return -ENETDOWN;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -492,9 +485,9 @@ static int _rtl92e_wx_set_essid(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
- if (priv->bHwRadioOff) {
+ if (priv->hw_radio_off) {
netdev_info(dev,
- "=========>%s():hw radio off,or Rf state is eRfOff, return\n",
+ "=========>%s():hw radio off,or Rf state is rf_off, return\n",
__func__);
return 0;
}
@@ -560,7 +553,7 @@ static int _rtl92e_wx_set_freq(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -586,7 +579,7 @@ static int _rtl92e_wx_set_frag(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
if (wrqu->frag.disabled)
@@ -622,7 +615,7 @@ static int _rtl92e_wx_set_wap(struct net_device *dev,
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -669,7 +662,7 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
int i;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
if (!priv->up)
@@ -681,7 +674,6 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
mutex_unlock(&priv->rtllib->ips_mutex);
mutex_lock(&priv->wx_mutex);
- RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
mutex_unlock(&priv->wx_mutex);
@@ -754,7 +746,7 @@ static int _rtl92e_wx_set_scan_type(struct net_device *dev,
int *parms = (int *)p;
int mode = parms[0];
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
priv->rtllib->active_scan = mode;
@@ -770,7 +762,7 @@ static int _rtl92e_wx_set_retry(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
int err = 0;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -843,7 +835,7 @@ static int _rtl92e_wx_set_sens(struct net_device *dev,
short err = 0;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -870,7 +862,7 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -950,7 +942,7 @@ static int _rtl92e_wx_set_auth(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -967,7 +959,7 @@ static int _rtl92e_wx_set_mlme(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
@@ -984,7 +976,7 @@ static int _rtl92e_wx_set_gen_ie(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bHwRadioOff)
+ if (priv->hw_radio_off)
return 0;
mutex_lock(&priv->wx_mutex);
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 7d04966afdd9..19d13b3fcecf 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -100,8 +100,6 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
*tag++ = pBA->dialog_token;
if (type == ACT_ADDBARSP) {
- RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");
-
put_unaligned_le16(StatusCode, tag);
tag += 2;
}
@@ -183,7 +181,6 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
if (skb) {
- RT_TRACE(COMP_DBG, "====>to send ADDBAREQ!!!!!\n");
softmac_mgmt_xmit(skb, ieee);
} else {
netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n");
@@ -247,10 +244,9 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (union sequence_control *)(req + 7);
- RT_TRACE(COMP_DBG, "====>rx ADDBAREQ from : %pM\n", dst);
if (!ieee->current_network.qos_data.active ||
!ieee->pHTInfo->bCurrentHTSupport ||
- (ieee->pHTInfo->IOTAction & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
+ (ieee->pHTInfo->iot_action & HT_IOT_ACT_REJECT_ADDBA_REQ)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev,
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
@@ -282,7 +278,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
pBA->ba_start_seq_ctrl = *pBaStartSeqCtrl;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
- (ieee->pHTInfo->IOTAction & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
+ (ieee->pHTInfo->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
pBA->ba_param_set.field.buffer_size = 1;
else
pBA->ba_param_set.field.buffer_size = 32;
@@ -330,7 +326,6 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pBaParamSet = (union ba_param_set *)(tag + 5);
pBaTimeoutVal = (u16 *)(tag + 7);
- RT_TRACE(COMP_DBG, "====>rx ADDBARSP from : %pM\n", dst);
if (!ieee->current_network.qos_data.active ||
!ieee->pHTInfo->bCurrentHTSupport ||
!ieee->pHTInfo->bCurrentAMPDUEnable) {
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index ce13b41074a7..76bc9c5a6d83 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -131,51 +131,40 @@ struct rt_hi_throughput {
u8 AMPDU_Factor;
u8 CurrentAMPDUFactor;
u8 MPDU_Density;
- u8 CurrentMPDUDensity;
+ u8 current_mpdu_density;
enum ht_aggre_mode ForcedAMPDUMode;
- u8 ForcedAMPDUFactor;
- u8 ForcedMPDUDensity;
+ u8 forced_ampdu_factor;
+ u8 forced_mpdu_density;
enum ht_aggre_mode ForcedAMSDUMode;
- u16 ForcedAMSDUMaxSize;
+ u8 forced_short_gi;
- u8 bForcedShortGI;
+ u8 current_op_mode;
- u8 CurrentOpMode;
-
- u8 SelfMimoPs;
- u8 PeerMimoPs;
+ u8 self_mimo_ps;
+ u8 peer_mimo_ps;
enum ht_extchnl_offset CurSTAExtChnlOffset;
- u8 bCurTxBW40MHz;
- u8 PeerBandwidth;
-
- u8 bSwBwInProgress;
- u8 SwBwStep;
-
- u8 bRegRT2RTAggregation;
+ u8 cur_tx_bw40mhz;
+ u8 sw_bw_in_progress;
+ u8 reg_rt2rt_aggregation;
u8 RT2RT_HT_Mode;
- u8 bCurrentRT2RTAggregation;
- u8 bCurrentRT2RTLongSlotTime;
- u8 szRT2RTAggBuffer[10];
-
- u8 bRegRxReorderEnable;
- u8 bCurRxReorderEnable;
- u8 RxReorderWinSize;
- u8 RxReorderPendingTime;
- u16 RxReorderDropCounter;
-
- u8 bIsPeerBcm;
-
+ u8 current_rt2rt_aggregation;
+ u8 current_rt2rt_long_slot_time;
+ u8 sz_rt2rt_agg_buf[10];
+
+ u8 reg_rx_reorder_enable;
+ u8 cur_rx_reorder_enable;
+ u8 rx_reorder_win_size;
+ u8 rx_reorder_pending_time;
+ u16 rx_reorder_drop_counter;
u8 IOTPeer;
- u32 IOTAction;
- u8 IOTRaFunc;
+ u32 iot_action;
+ u8 iot_ra_func;
u8 bWAIotBroadcom;
u8 WAIotTH;
-
- u8 bAcceptAddbaReq;
} __packed;
struct bss_ht {
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 3b8efaf9b88c..ef3dca51cf99 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -70,9 +70,6 @@ static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
void HTUpdateDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
-
- pHTInfo->bAcceptAddbaReq = 1;
-
pHTInfo->bRegShortGI20MHz = 1;
pHTInfo->bRegShortGI40MHz = 1;
@@ -90,19 +87,19 @@ void HTUpdateDefaultSetting(struct rtllib_device *ieee)
pHTInfo->AMPDU_Factor = 2;
pHTInfo->MPDU_Density = 0;
- pHTInfo->SelfMimoPs = 3;
- if (pHTInfo->SelfMimoPs == 2)
- pHTInfo->SelfMimoPs = 3;
+ pHTInfo->self_mimo_ps = 3;
+ if (pHTInfo->self_mimo_ps == 2)
+ pHTInfo->self_mimo_ps = 3;
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
ieee->bTxEnableFwCalcDur = 1;
- pHTInfo->bRegRT2RTAggregation = 1;
+ pHTInfo->reg_rt2rt_aggregation = 1;
- pHTInfo->bRegRxReorderEnable = 1;
- pHTInfo->RxReorderWinSize = 64;
- pHTInfo->RxReorderPendingTime = 30;
+ pHTInfo->reg_rx_reorder_enable = 1;
+ pHTInfo->rx_reorder_win_size = 64;
+ pHTInfo->rx_reorder_pending_time = 30;
}
static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
@@ -254,20 +251,20 @@ static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
- pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL;
+ pHTInfo->iot_ra_func &= HT_IOT_RAFUNC_DISABLE_ALL;
if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
- pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R;
+ pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_PEER_1R;
- if (pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE)
- pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU;
+ if (pHTInfo->iot_action & HT_IOT_ACT_AMSDU_ENABLE)
+ pHTInfo->iot_ra_func |= HT_IOT_RAFUNC_TX_AMSDU;
}
void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo)
{
- pHTInfo->IOTAction = 0;
+ pHTInfo->iot_action = 0;
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_ra_func = 0;
}
void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
@@ -300,7 +297,7 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
else
pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
- pCapELE->MimoPwrSave = pHT->SelfMimoPs;
+ pCapELE->MimoPwrSave = pHT->self_mimo_ps;
pCapELE->GreenField = 0;
pCapELE->ShortGI20Mhz = 1;
pCapELE->ShortGI40Mhz = 1;
@@ -332,16 +329,16 @@ void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
pCapELE->ASCap = 0;
if (bAssoc) {
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
- if (pHT->IOTAction & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
+ if (pHT->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
pCapELE->ShortGI40Mhz = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
@@ -377,7 +374,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
- pHTInfoEle->OptMode = pHT->CurrentOpMode;
+ pHTInfoEle->OptMode = pHT->current_op_mode;
pHTInfoEle->NonGFDevPresent = 0;
pHTInfoEle->DualBeacon = 0;
pHTInfoEle->SecondaryBeacon = 0;
@@ -506,7 +503,7 @@ static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void HTOnAssocRsp(struct rtllib_device *ieee)
@@ -543,7 +540,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
#endif
HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
(enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
- pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
+ pHTInfo->cur_tx_bw40mhz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
true : false);
pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ?
@@ -574,7 +571,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
pHTInfo->bCurrentAMPDUEnable = false;
}
- if (!pHTInfo->bRegRT2RTAggregation) {
+ if (!pHTInfo->reg_rt2rt_aggregation) {
if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
@@ -597,15 +594,14 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
}
}
if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density;
else
- pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
- if (pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) {
+ pHTInfo->current_mpdu_density = pPeerHTCap->MPDUDensity;
+ if (pHTInfo->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
pHTInfo->bCurrentAMPDUEnable = false;
pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
- pHTInfo->ForcedAMSDUMaxSize = 7935;
}
- pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
+ pHTInfo->cur_rx_reorder_enable = pHTInfo->reg_rx_reorder_enable;
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
@@ -614,8 +610,8 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
- pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
- if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
+ pHTInfo->peer_mimo_ps = pPeerHTCap->MimoPwrSave;
+ if (pHTInfo->peer_mimo_ps == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
@@ -623,7 +619,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
ieee->dot11HTOperationalRateSet, pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+ pHTInfo->current_op_mode = pPeerHTInfo->OptMode;
}
void HTInitializeHTInfo(struct rtllib_device *ieee)
@@ -633,17 +629,17 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurBW40MHz = false;
- pHTInfo->bCurTxBW40MHz = false;
+ pHTInfo->cur_tx_bw40mhz = false;
pHTInfo->bCurShortGI20MHz = false;
pHTInfo->bCurShortGI40MHz = false;
- pHTInfo->bForcedShortGI = false;
+ pHTInfo->forced_short_gi = false;
pHTInfo->bCurSuppCCK = true;
pHTInfo->bCurrent_AMSDU_Support = false;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
- pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
+ pHTInfo->current_mpdu_density = pHTInfo->MPDU_Density;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
memset((void *)(&(pHTInfo->SelfHTCap)), 0,
@@ -655,17 +651,17 @@ void HTInitializeHTInfo(struct rtllib_device *ieee)
memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0,
sizeof(pHTInfo->PeerHTInfoBuf));
- pHTInfo->bSwBwInProgress = false;
+ pHTInfo->sw_bw_in_progress = false;
pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
pHTInfo->IOTPeer = 0;
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_action = 0;
+ pHTInfo->iot_ra_func = 0;
{
u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
@@ -717,51 +713,51 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
- if (pHTInfo->bRegRT2RTAggregation) {
- pHTInfo->bCurrentRT2RTAggregation =
+ if (pHTInfo->reg_rt2rt_aggregation) {
+ pHTInfo->current_rt2rt_aggregation =
pNetwork->bssht.bd_rt2rt_aggregation;
- pHTInfo->bCurrentRT2RTLongSlotTime =
+ pHTInfo->current_rt2rt_long_slot_time =
pNetwork->bssht.bd_rt2rt_long_slot_time;
pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.rt2rt_ht_mode;
} else {
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
}
HTIOTPeerDetermine(ieee);
- pHTInfo->IOTAction = 0;
+ pHTInfo->iot_action = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
+ pHTInfo->iot_action |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
+ pHTInfo->iot_action |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(ieee);
if (bIOTAction)
- pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
+ pHTInfo->iot_action |= HT_IOT_ACT_CDD_FSYNC;
} else {
pHTInfo->bCurrentHTSupport = false;
- pHTInfo->bCurrentRT2RTAggregation = false;
- pHTInfo->bCurrentRT2RTLongSlotTime = false;
+ pHTInfo->current_rt2rt_aggregation = false;
+ pHTInfo->current_rt2rt_long_slot_time = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
- pHTInfo->IOTAction = 0;
- pHTInfo->IOTRaFunc = 0;
+ pHTInfo->iot_action = 0;
+ pHTInfo->iot_ra_func = 0;
}
}
@@ -774,7 +770,7 @@ void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
if (pHTInfo->bCurrentHTSupport) {
if (pNetwork->bssht.bd_ht_info_len != 0)
- pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
+ pHTInfo->current_op_mode = pPeerHTInfo->OptMode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
@@ -801,7 +797,7 @@ void HTUseDefaultSetting(struct rtllib_device *ieee)
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
- pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity;
+ pHTInfo->current_mpdu_density = pHTInfo->current_mpdu_density;
HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet,
ieee->dot11HTOperationalRateSet);
@@ -850,11 +846,11 @@ static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
HT_EXTCHNL_OFFSET_NO_EXT);
}
- pHTInfo->bSwBwInProgress = false;
+ pHTInfo->sw_bw_in_progress = false;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
@@ -863,13 +859,13 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
- Bandwidth = HT_CHANNEL_WIDTH_20;
+ bandwidth = HT_CHANNEL_WIDTH_20;
- if (pHTInfo->bSwBwInProgress) {
- pr_info("%s: bSwBwInProgress!!\n", __func__);
+ if (pHTInfo->sw_bw_in_progress) {
+ pr_info("%s: sw_bw_in_progress!!\n", __func__);
return;
}
- if (Bandwidth == HT_CHANNEL_WIDTH_20_40) {
+ if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
if (ieee->current_network.channel < 2 &&
Offset == HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
@@ -889,7 +885,7 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
pHTInfo->bCurBW40MHz);
- pHTInfo->bSwBwInProgress = true;
+ pHTInfo->sw_bw_in_progress = true;
HTSetConnectBwModeCallback(ieee);
}
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 34b00a76b6bd..05c7e822f372 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -83,7 +83,7 @@ static void RxPktPendingTimeout(struct timer_list *t)
if (bPktInBuf && (pRxTs->rx_timeout_indicate_seq == 0xffff)) {
pRxTs->rx_timeout_indicate_seq = pRxTs->rx_indicate_seq;
mod_timer(&pRxTs->rx_pkt_pending_timer, jiffies +
- msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime)
+ msecs_to_jiffies(ieee->pHTInfo->rx_reorder_pending_time)
);
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 0ecd81a81866..3c72ed2a30a4 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1244,9 +1244,9 @@ enum ips_callback_function {
};
enum rt_rf_power_state {
- eRfOn,
- eRfSleep,
- eRfOff
+ rf_on,
+ rf_sleep,
+ rf_off
};
struct rt_pwr_save_ctrl {
@@ -1434,8 +1434,8 @@ struct rtllib_device {
bool FirstIe_InScan;
bool be_scan_inprogress;
bool beinretry;
- enum rt_rf_power_state eRFPowerState;
- RT_RF_CHANGE_SOURCE RfOffReason;
+ enum rt_rf_power_state rf_power_state;
+ RT_RF_CHANGE_SOURCE rf_off_reason;
bool is_set_key;
bool wx_set_enc;
struct rt_hi_throughput *pHTInfo;
@@ -1765,7 +1765,7 @@ struct rtllib_device {
/* check whether Tx hw resource available */
short (*check_nic_enough_desc)(struct net_device *dev, int queue_index);
void (*SetBWModeHandler)(struct net_device *dev,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
void (*SetWirelessMode)(struct net_device *dev, u8 wireless_mode);
@@ -1938,7 +1938,7 @@ int rtllib_encrypt_fragment(
struct sk_buff *frag,
int hdr_len);
-int rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev);
void rtllib_txb_free(struct rtllib_txb *txb);
/* rtllib_rx.c */
@@ -2073,7 +2073,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
#define MAX_RECEIVE_BUFFER_SIZE 9100
void HTSetConnectBwMode(struct rtllib_device *ieee,
- enum ht_channel_width Bandwidth,
+ enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
void HTUpdateDefaultSetting(struct rtllib_device *ieee);
void HTConstructCapabilityElement(struct rtllib_device *ieee,
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index e3e8302945eb..f6b23defe225 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -46,10 +46,4 @@ enum RTL_DEBUG {
COMP_ERR = BIT(31)
};
-#define RT_TRACE(component, x, args...) \
-do { \
- if (rt_global_debug_component & component) \
- printk(KERN_DEBUG DRV_NAME ":" x "\n", ##args);\
-} while (0)
-
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index abe5c153f74e..46d75e925ee9 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -569,7 +569,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct rx_reorder_entry *pReorderEntry = NULL;
- u8 WinSize = pHTInfo->RxReorderWinSize;
+ u8 WinSize = pHTInfo->rx_reorder_win_size;
u16 WinEnd = 0;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
@@ -591,7 +591,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
netdev_dbg(ieee->dev,
"Packet Drop! IndicateSeq: %d, NewSeq: %d\n",
pTS->rx_indicate_seq, SeqNum);
- pHTInfo->RxReorderDropCounter++;
+ pHTInfo->rx_reorder_drop_counter++;
{
int i;
@@ -755,7 +755,7 @@ static void RxReorderIndicatePacket(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "%s(): SET rx timeout timer\n", __func__);
pTS->rx_timeout_indicate_seq = pTS->rx_indicate_seq;
mod_timer(&pTS->rx_pkt_pending_timer, jiffies +
- msecs_to_jiffies(pHTInfo->RxReorderPendingTime));
+ msecs_to_jiffies(pHTInfo->rx_reorder_pending_time));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
@@ -924,7 +924,7 @@ static int rtllib_rx_check_duplicate(struct rtllib_device *ieee,
sc = le16_to_cpu(hdr->seq_ctl);
frag = WLAN_GET_SEQ_FRAG(sc);
- if (!ieee->pHTInfo->bCurRxReorderEnable ||
+ if (!ieee->pHTInfo->cur_rx_reorder_enable ||
!ieee->current_network.qos_data.active ||
!IsDataFrame(skb->data) ||
IsLegacyDataFrame(skb->data)) {
@@ -1442,7 +1442,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
}
/* Indicate packets to upper layer or Rx Reorder */
- if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL || bToOtherSTA)
+ if (!ieee->pHTInfo->cur_rx_reorder_enable || pTS == NULL || bToOtherSTA)
rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src);
else
RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b5f4d35954a9..1a3ca3e57623 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -180,7 +180,7 @@ static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 rate;
- if (pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
+ if (pHTInfo->iot_action & HT_IOT_ACT_MGNT_USE_CCK_6M)
rate = 0x0c;
else
rate = ieee->basic_rate & 0x7f;
@@ -586,9 +586,9 @@ static void rtllib_softmac_scan_wq(void *data)
mutex_lock(&ieee->scan_mutex);
- if (ieee->eRFPowerState == eRfOff) {
+ if (ieee->rf_power_state == rf_off) {
netdev_info(ieee->dev,
- "======>%s():rf state is eRfOff, return\n",
+ "======>%s():rf state is rf_off, return\n",
__func__);
goto out1;
}
@@ -865,10 +865,10 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
HTConstructInfoElement(ieee, tmp_ht_info_buf, &tmp_ht_info_len,
encrypt);
- if (pHTInfo->bRegRT2RTAggregation) {
- tmp_generic_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ if (pHTInfo->reg_rt2rt_aggregation) {
+ tmp_generic_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf;
tmp_generic_ie_len =
- sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, tmp_generic_ie_buf,
&tmp_generic_ie_len);
}
@@ -1189,10 +1189,10 @@ rtllib_association_req(struct rtllib_network *beacon,
ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
HTConstructCapabilityElement(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
- realtek_ie_buf = ieee->pHTInfo->szRT2RTAggBuffer;
+ if (ieee->pHTInfo->current_rt2rt_aggregation) {
+ realtek_ie_buf = ieee->pHTInfo->sz_rt2rt_agg_buf;
realtek_ie_len =
- sizeof(ieee->pHTInfo->szRT2RTAggBuffer);
+ sizeof(ieee->pHTInfo->sz_rt2rt_agg_buf);
HTConstructRT2RTAggElement(ieee, realtek_ie_buf,
&realtek_ie_len);
}
@@ -1368,7 +1368,7 @@ rtllib_association_req(struct rtllib_network *beacon,
tag += ht_cap_len - 2;
}
- if (ieee->pHTInfo->bCurrentRT2RTAggregation) {
+ if (ieee->pHTInfo->current_rt2rt_aggregation) {
tag = skb_put(skb, realtek_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
*tag++ = realtek_ie_len - 2;
@@ -1584,13 +1584,8 @@ static void rtllib_associate_procedure_wq(void *data)
ieee->data_hard_stop(ieee->dev);
rtllib_stop_scan(ieee);
- RT_TRACE(COMP_DBG, "===>%s(), chan:%d\n", __func__,
- ieee->current_network.channel);
HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
- if (ieee->eRFPowerState == eRfOff) {
- RT_TRACE(COMP_DBG,
- "=============>%s():Rf state is eRfOff, schedule ipsleave wq again,return\n",
- __func__);
+ if (ieee->rf_power_state == rf_off) {
if (ieee->rtllib_ips_leave_wq != NULL)
ieee->rtllib_ips_leave_wq(ieee->dev);
mutex_unlock(&ieee->wx_mutex);
@@ -1611,7 +1606,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
- /* we are interested in new new only if we are not associated
+ /* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
if (ieee->state != RTLLIB_NOLINK)
@@ -1899,7 +1894,7 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
((ieee->mode == IEEE_G) &&
(ieee->current_network.mode == IEEE_N_24G) &&
(ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT-1)))) {
- ieee->pHTInfo->IOTAction |= HT_IOT_ACT_PURE_N_MODE;
+ ieee->pHTInfo->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
ieee->AsocRetryCount = 0;
}
@@ -2062,9 +2057,6 @@ static inline void rtllib_sta_ps(struct work_struct *work)
if ((ieee->ps == RTLLIB_PS_DISABLED ||
ieee->iw_mode != IW_MODE_INFRA ||
ieee->state != RTLLIB_LINKED)) {
- RT_TRACE(COMP_DBG,
- "=====>%s(): no need to ps,wake up!! ieee->ps is %d, ieee->iw_mode is %d, ieee->state is %d\n",
- __func__, ieee->ps, ieee->iw_mode, ieee->state);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
rtllib_sta_wakeup(ieee, 1);
@@ -2109,7 +2101,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
{
if (ieee->sta_sleep == LPS_IS_WAKE) {
if (nl) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
@@ -2125,7 +2117,7 @@ static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
if (ieee->sta_sleep == LPS_IS_SLEEP)
ieee->sta_wake_up(ieee->dev);
if (nl) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING) {
ieee->ack_tx_to_ieee = 1;
rtllib_sta_ps_send_null_frame(ieee, 0);
@@ -2160,7 +2152,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
if ((ieee->sta_sleep == LPS_IS_WAKE) && !success) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_NULL_DATA_POWER_SAVING)
rtllib_sta_ps_send_null_frame(ieee, 0);
else
@@ -2304,7 +2296,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
if (ieee->open_wep || !challenge) {
ieee->state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
- if (!(ieee->pHTInfo->IOTAction & HT_IOT_ACT_PURE_N_MODE)) {
+ if (!(ieee->pHTInfo->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
if (IsHTHalfNmodeAPs(ieee)) {
bSupportNmode = true;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 70a62ca0f69a..f9589c5b62ba 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -364,8 +364,6 @@ void rtllib_wx_sync_scan_wq(void *data)
b40M = 1;
chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset;
bandwidth = (enum ht_channel_width)ieee->pHTInfo->bCurBW40MHz;
- RT_TRACE(COMP_DBG, "Scan in 40M, force to 20M first:%d, %d\n",
- chan_offset, bandwidth);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
@@ -373,7 +371,6 @@ void rtllib_wx_sync_scan_wq(void *data)
rtllib_start_scan_syncro(ieee, 0);
if (b40M) {
- RT_TRACE(COMP_DBG, "Scan in 20M, back to 40M\n");
if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev, chan + 2);
else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER)
@@ -571,14 +568,11 @@ int rtllib_wx_set_power(struct rtllib_device *ieee,
mutex_lock(&ieee->wx_mutex);
if (wrqu->power.disabled) {
- RT_TRACE(COMP_DBG, "===>%s(): power disable\n", __func__);
ieee->ps = RTLLIB_PS_DISABLED;
goto exit;
}
if (wrqu->power.flags & IW_POWER_TIMEOUT) {
ieee->ps_timeout = wrqu->power.value / 1000;
- RT_TRACE(COMP_DBG, "===>%s():ps_timeout is %d\n", __func__,
- ieee->ps_timeout);
}
if (wrqu->power.flags & IW_POWER_PERIOD)
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 42f81b23a144..e307020580a0 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -284,7 +284,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
return;
- if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
+ if (pHTInfo->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
return;
if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
@@ -315,7 +315,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (ieee->iw_mode == IW_MODE_INFRA) {
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
- tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
+ tcb_desc->ampdu_density = pHTInfo->current_mpdu_density;
}
}
FORCED_AGG_SETTING:
@@ -325,8 +325,8 @@ FORCED_AGG_SETTING:
case HT_AGG_FORCE_ENABLE:
tcb_desc->bAMPDUEnable = true;
- tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
- tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
+ tcb_desc->ampdu_density = pHTInfo->forced_mpdu_density;
+ tcb_desc->ampdu_factor = pHTInfo->forced_ampdu_factor;
break;
case HT_AGG_FORCE_DISABLE:
@@ -358,7 +358,7 @@ static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
return;
- if (pHTInfo->bForcedShortGI) {
+ if (pHTInfo->forced_short_gi) {
tcb_desc->bUseShortGI = true;
return;
}
@@ -384,7 +384,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
if ((tcb_desc->data_rate & 0x80) == 0)
return;
- if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
+ if (pHTInfo->bCurBW40MHz && pHTInfo->cur_tx_bw40mhz &&
!ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
}
@@ -422,12 +422,12 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
pHTInfo = ieee->pHTInfo;
while (true) {
- if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
+ if (pHTInfo->iot_action & HT_IOT_ACT_FORCED_CTS2SELF) {
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
- } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
+ } else if (pHTInfo->iot_action & (HT_IOT_ACT_FORCED_RTS |
HT_IOT_ACT_PURE_N_MODE)) {
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
@@ -440,7 +440,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
break;
}
if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
- u8 HTOpMode = pHTInfo->CurrentOpMode;
+ u8 HTOpMode = pHTInfo->current_op_mode;
if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
HTOpMode == 3)) ||
@@ -885,7 +885,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->priority = skb->priority;
if (ether_type == ETH_P_PAE) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
@@ -910,7 +910,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->data_rate = rtllib_current_rate(ieee);
if (bdhcp) {
- if (ieee->pHTInfo->IOTAction &
+ if (ieee->pHTInfo->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
MgntQuery_TxRateExcludeCCKRates(ieee);
@@ -962,9 +962,9 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
-int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
memset(skb->cb, 0, sizeof(skb->cb));
- return rtllib_xmit_inter(skb, dev);
+ return rtllib_xmit_inter(skb, dev) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
EXPORT_SYMBOL(rtllib_xmit);
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 0be7426b6ebc..d32dfd89a606 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -8,6 +8,7 @@ ccflags-y += -DTHOMAS_BEACON -DTHOMAS_TASKLET -DTHOMAS_SKB -DTHOMAS_TURBO
r8192u_usb-y := r8192U_core.o r8180_93cx6.o r8192U_wx.o \
r8190_rtl8256.o r819xU_phy.o r819xU_firmware.o \
r819xU_cmdpkt.o r8192U_dm.o r819xU_firmware_img.o \
+ r8192U_debugfs.o \
ieee80211/ieee80211_crypt.o \
ieee80211/ieee80211_crypt_tkip.o \
ieee80211/ieee80211_crypt_ccmp.o \
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index b577f9c81f85..9cd4b1896745 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2178,7 +2178,7 @@ int ieee80211_set_encryption(struct ieee80211_device *ieee);
int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
struct sk_buff *frag, int hdr_len);
-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
void ieee80211_txb_free(struct ieee80211_txb *txb);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 8602e3a6c837..e4b6454809a0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -526,7 +526,7 @@ static void ieee80211_query_seqnum(struct ieee80211_device *ieee,
}
}
-int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
@@ -822,13 +822,13 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += __le16_to_cpu(txb->payload_size);
- return 0;
+ return NETDEV_TX_OK;
}
ieee80211_txb_free(txb);
}
}
- return 0;
+ return NETDEV_TX_OK;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 1942cb849374..ff0ada00bf41 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1061,6 +1061,9 @@ typedef struct r8192_priv {
struct delayed_work gpio_change_rf_wq;
struct delayed_work initialgain_operate_wq;
struct workqueue_struct *priv_wq;
+
+ /* debugfs */
+ struct dentry *debugfs_dir;
} r8192_priv;
/* For rtl8187B */
@@ -1117,4 +1120,10 @@ void EnableHWSecurityConfig8192(struct net_device *dev);
void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
+void rtl8192_debugfs_init_one(struct net_device *dev);
+void rtl8192_debugfs_exit_one(struct net_device *dev);
+void rtl8192_debugfs_rename_one(struct net_device *dev);
+void rtl8192_debugfs_init(void);
+void rtl8192_debugfs_exit(void);
+
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 2ca925f35830..0a60ef20107c 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -56,7 +56,6 @@ double __extendsfdf2(float a)
#include "r8192U_dm.h"
#include <linux/usb.h>
#include <linux/slab.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
/* FIXME: check if 2.6.7 is ok */
@@ -453,179 +452,6 @@ static void rtl8192_restart(struct work_struct *work);
static void watch_dog_timer_callback(struct timer_list *t);
/****************************************************************************
- * -----------------------------PROCFS STUFF-------------------------
- ****************************************************************************/
-
-static struct proc_dir_entry *rtl8192_proc;
-
-static int __maybe_unused proc_get_stats_ap(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- struct ieee80211_device *ieee = priv->ieee80211;
- struct ieee80211_network *target;
-
- list_for_each_entry(target, &ieee->network_list, list) {
- const char *wpa = "non_WPA";
-
- if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
- wpa = "WPA";
-
- seq_printf(m, "%s %s\n", target->ssid, wpa);
- }
-
- return 0;
-}
-
-static int __maybe_unused proc_get_registers(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- int i, n, max = 0xff;
- u8 byte_rd;
-
- seq_puts(m, "\n####################page 0##################\n ");
-
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x000 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 1##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x100 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_puts(m, "\n####################page 3##################\n ");
- for (n = 0; n <= max;) {
- seq_printf(m, "\nD: %2x > ", n);
-
- for (i = 0; i < 16 && n <= max; i++, n++) {
- read_nic_byte(dev, 0x300 | n, &byte_rd);
- seq_printf(m, "%2x ", byte_rd);
- }
- }
-
- seq_putc(m, '\n');
- return 0;
-}
-
-static int __maybe_unused proc_get_stats_tx(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- seq_printf(m,
- "TX VI priority ok int: %lu\n"
- "TX VI priority error int: %lu\n"
- "TX VO priority ok int: %lu\n"
- "TX VO priority error int: %lu\n"
- "TX BE priority ok int: %lu\n"
- "TX BE priority error int: %lu\n"
- "TX BK priority ok int: %lu\n"
- "TX BK priority error int: %lu\n"
- "TX MANAGE priority ok int: %lu\n"
- "TX MANAGE priority error int: %lu\n"
- "TX BEACON priority ok int: %lu\n"
- "TX BEACON priority error int: %lu\n"
- "TX queue resume: %lu\n"
- "TX queue stopped?: %d\n"
- "TX fifo overflow: %lu\n"
- "TX VI queue: %d\n"
- "TX VO queue: %d\n"
- "TX BE queue: %d\n"
- "TX BK queue: %d\n"
- "TX VI dropped: %lu\n"
- "TX VO dropped: %lu\n"
- "TX BE dropped: %lu\n"
- "TX BK dropped: %lu\n"
- "TX total data packets %lu\n",
- priv->stats.txviokint,
- priv->stats.txvierr,
- priv->stats.txvookint,
- priv->stats.txvoerr,
- priv->stats.txbeokint,
- priv->stats.txbeerr,
- priv->stats.txbkokint,
- priv->stats.txbkerr,
- priv->stats.txmanageokint,
- priv->stats.txmanageerr,
- priv->stats.txbeaconokint,
- priv->stats.txbeaconerr,
- priv->stats.txresumed,
- netif_queue_stopped(dev),
- priv->stats.txoverflow,
- atomic_read(&(priv->tx_pending[VI_PRIORITY])),
- atomic_read(&(priv->tx_pending[VO_PRIORITY])),
- atomic_read(&(priv->tx_pending[BE_PRIORITY])),
- atomic_read(&(priv->tx_pending[BK_PRIORITY])),
- priv->stats.txvidrop,
- priv->stats.txvodrop,
- priv->stats.txbedrop,
- priv->stats.txbkdrop,
- priv->stats.txdatapkt
- );
-
- return 0;
-}
-
-static int __maybe_unused proc_get_stats_rx(struct seq_file *m, void *v)
-{
- struct net_device *dev = m->private;
- struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
-
- seq_printf(m,
- "RX packets: %lu\n"
- "RX urb status error: %lu\n"
- "RX invalid urb error: %lu\n",
- priv->stats.rxoktotal,
- priv->stats.rxstaterr,
- priv->stats.rxurberr);
-
- return 0;
-}
-
-static void rtl8192_proc_module_init(void)
-{
- RT_TRACE(COMP_INIT, "Initializing proc filesystem");
- rtl8192_proc = proc_mkdir(RTL819XU_MODULE_NAME, init_net.proc_net);
-}
-
-static void rtl8192_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir;
-
- if (!rtl8192_proc)
- return;
-
- dir = proc_mkdir_data(dev->name, 0, rtl8192_proc, dev);
- if (!dir)
- return;
-
- proc_create_single("stats-rx", S_IFREG | 0444, dir,
- proc_get_stats_rx);
- proc_create_single("stats-tx", S_IFREG | 0444, dir,
- proc_get_stats_tx);
- proc_create_single("stats-ap", S_IFREG | 0444, dir,
- proc_get_stats_ap);
- proc_create_single("registers", S_IFREG | 0444, dir,
- proc_get_registers);
-}
-
-static void rtl8192_proc_remove_one(struct net_device *dev)
-{
- remove_proc_subtree(dev->name, rtl8192_proc);
-}
-
-/****************************************************************************
* -----------------------------MISC STUFF-------------------------
*****************************************************************************/
@@ -4730,7 +4556,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
goto fail2;
RT_TRACE(COMP_INIT, "dev name=======> %s\n", dev->name);
- rtl8192_proc_init_one(dev);
+ rtl8192_debugfs_init_one(dev);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -4764,10 +4590,11 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
struct net_device *dev = usb_get_intfdata(intf);
struct r8192_priv *priv = ieee80211_priv(dev);
- unregister_netdev(dev);
RT_TRACE(COMP_DOWN, "=============>wlan driver to be removed\n");
- rtl8192_proc_remove_one(dev);
+ rtl8192_debugfs_exit_one(dev);
+
+ unregister_netdev(dev);
rtl8192_down(dev);
kfree(priv->pFirmware);
@@ -4779,6 +4606,30 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
RT_TRACE(COMP_DOWN, "wlan driver removed\n");
}
+static int rtl8192_usb_netdev_event(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(data);
+
+ if (netdev->netdev_ops != &rtl8192_netdev_ops)
+ goto out;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ rtl8192_debugfs_rename_one(netdev);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rtl8192_usb_netdev_notifier = {
+ .notifier_call = rtl8192_usb_netdev_event,
+};
+
static int __init rtl8192_usb_module_init(void)
{
int ret;
@@ -4788,10 +4639,17 @@ static int __init rtl8192_usb_module_init(void)
RT_TRACE(COMP_INIT, "Initializing module");
RT_TRACE(COMP_INIT, "Wireless extensions version %d", WIRELESS_EXT);
+ ret = register_netdevice_notifier(&rtl8192_usb_netdev_notifier);
+ if (ret) {
+ pr_err("register_netdevice_notifier failed %d\n", ret);
+ return ret;
+ }
+
+ rtl8192_debugfs_init();
ret = ieee80211_debug_init();
if (ret) {
pr_err("ieee80211_debug_init() failed %d\n", ret);
- return ret;
+ goto debugfs_exit;
}
ret = ieee80211_crypto_init();
@@ -4818,14 +4676,12 @@ static int __init rtl8192_usb_module_init(void)
goto crypto_ccmp_exit;
}
- rtl8192_proc_module_init();
ret = usb_register(&rtl8192_usb_driver);
if (ret)
- goto rtl8192_proc_module_exit;
+ goto crypto_wep_exit;
return ret;
-rtl8192_proc_module_exit:
- remove_proc_entry(RTL819XU_MODULE_NAME, init_net.proc_net);
+crypto_wep_exit:
ieee80211_crypto_wep_exit();
crypto_ccmp_exit:
ieee80211_crypto_ccmp_exit();
@@ -4835,18 +4691,22 @@ crypto_exit:
ieee80211_crypto_deinit();
debug_exit:
ieee80211_debug_exit();
+debugfs_exit:
+ rtl8192_debugfs_exit();
+ unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
return ret;
}
static void __exit rtl8192_usb_module_exit(void)
{
usb_deregister(&rtl8192_usb_driver);
- remove_proc_entry(RTL819XU_MODULE_NAME, init_net.proc_net);
ieee80211_crypto_wep_exit();
ieee80211_crypto_ccmp_exit();
ieee80211_crypto_tkip_exit();
ieee80211_crypto_deinit();
ieee80211_debug_exit();
+ rtl8192_debugfs_exit();
+ unregister_netdevice_notifier(&rtl8192_usb_netdev_notifier);
RT_TRACE(COMP_DOWN, "Exiting");
}
diff --git a/drivers/staging/rtl8192u/r8192U_debugfs.c b/drivers/staging/rtl8192u/r8192U_debugfs.c
new file mode 100644
index 000000000000..fe8ef72506ee
--- /dev/null
+++ b/drivers/staging/rtl8192u/r8192U_debugfs.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/****************************************************************************
+ * -----------------------------DEGUGFS STUFF-------------------------
+ ****************************************************************************/
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "r8192U.h"
+
+#define KBUILD_MODNAME "r8192u_usb"
+
+static int rtl8192_usb_stats_ap_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct ieee80211_device *ieee = priv->ieee80211;
+ struct ieee80211_network *target;
+
+ list_for_each_entry(target, &ieee->network_list, list) {
+ const char *wpa = "non_WPA";
+
+ if (target->wpa_ie_len > 0 || target->rsn_ie_len > 0)
+ wpa = "WPA";
+
+ seq_printf(m, "%s %s\n", target->ssid, wpa);
+ }
+
+ return 0;
+}
+
+static int rtl8192_usb_registers_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ int i, n, max = 0xff;
+ u8 byte_rd;
+
+ seq_puts(m, "\n####################page 0##################\n ");
+
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x000 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_puts(m, "\n####################page 1##################\n ");
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x100 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_puts(m, "\n####################page 3##################\n ");
+ for (n = 0; n <= max;) {
+ seq_printf(m, "\nD: %2x > ", n);
+
+ for (i = 0; i < 16 && n <= max; i++, n++) {
+ read_nic_byte(dev, 0x300 | n, &byte_rd);
+ seq_printf(m, "%2x ", byte_rd);
+ }
+ }
+
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static int rtl8192_usb_stats_tx_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ seq_printf(m,
+ "TX VI priority ok int: %lu\n"
+ "TX VI priority error int: %lu\n"
+ "TX VO priority ok int: %lu\n"
+ "TX VO priority error int: %lu\n"
+ "TX BE priority ok int: %lu\n"
+ "TX BE priority error int: %lu\n"
+ "TX BK priority ok int: %lu\n"
+ "TX BK priority error int: %lu\n"
+ "TX MANAGE priority ok int: %lu\n"
+ "TX MANAGE priority error int: %lu\n"
+ "TX BEACON priority ok int: %lu\n"
+ "TX BEACON priority error int: %lu\n"
+ "TX queue resume: %lu\n"
+ "TX queue stopped?: %d\n"
+ "TX fifo overflow: %lu\n"
+ "TX VI queue: %d\n"
+ "TX VO queue: %d\n"
+ "TX BE queue: %d\n"
+ "TX BK queue: %d\n"
+ "TX VI dropped: %lu\n"
+ "TX VO dropped: %lu\n"
+ "TX BE dropped: %lu\n"
+ "TX BK dropped: %lu\n"
+ "TX total data packets %lu\n",
+ priv->stats.txviokint,
+ priv->stats.txvierr,
+ priv->stats.txvookint,
+ priv->stats.txvoerr,
+ priv->stats.txbeokint,
+ priv->stats.txbeerr,
+ priv->stats.txbkokint,
+ priv->stats.txbkerr,
+ priv->stats.txmanageokint,
+ priv->stats.txmanageerr,
+ priv->stats.txbeaconokint,
+ priv->stats.txbeaconerr,
+ priv->stats.txresumed,
+ netif_queue_stopped(dev),
+ priv->stats.txoverflow,
+ atomic_read(&(priv->tx_pending[VI_PRIORITY])),
+ atomic_read(&(priv->tx_pending[VO_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BE_PRIORITY])),
+ atomic_read(&(priv->tx_pending[BK_PRIORITY])),
+ priv->stats.txvidrop,
+ priv->stats.txvodrop,
+ priv->stats.txbedrop,
+ priv->stats.txbkdrop,
+ priv->stats.txdatapkt
+ );
+
+ return 0;
+}
+
+static int rtl8192_usb_stats_rx_show(struct seq_file *m, void *v)
+{
+ struct net_device *dev = m->private;
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ seq_printf(m,
+ "RX packets: %lu\n"
+ "RX urb status error: %lu\n"
+ "RX invalid urb error: %lu\n",
+ priv->stats.rxoktotal,
+ priv->stats.rxstaterr,
+ priv->stats.rxurberr);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_rx);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_tx);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_stats_ap);
+DEFINE_SHOW_ATTRIBUTE(rtl8192_usb_registers);
+
+void rtl8192_debugfs_init_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
+ struct dentry *dir = debugfs_create_dir(dev->name, parent_dir);
+
+ debugfs_create_file("stats-rx", 0444, dir, dev, &rtl8192_usb_stats_rx_fops);
+ debugfs_create_file("stats-tx", 0444, dir, dev, &rtl8192_usb_stats_tx_fops);
+ debugfs_create_file("stats-ap", 0444, dir, dev, &rtl8192_usb_stats_ap_fops);
+ debugfs_create_file("registers", 0444, dir, dev, &rtl8192_usb_registers_fops);
+
+ priv->debugfs_dir = dir;
+}
+
+void rtl8192_debugfs_exit_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+
+ debugfs_remove_recursive(priv->debugfs_dir);
+}
+
+void rtl8192_debugfs_rename_one(struct net_device *dev)
+{
+ struct r8192_priv *priv = ieee80211_priv(dev);
+ struct dentry *parent_dir = debugfs_lookup(KBUILD_MODNAME, NULL);
+
+ debugfs_rename(parent_dir, priv->debugfs_dir, parent_dir, dev->name);
+}
+
+void rtl8192_debugfs_init(void)
+{
+ debugfs_create_dir(KBUILD_MODNAME, NULL);
+}
+
+void rtl8192_debugfs_exit(void)
+{
+ debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
+}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 2326aae6709e..bb7db96ed821 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index 4a93839bf947..132afbf49dde 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -66,16 +66,16 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
{
struct ethhdr etherhdr;
struct iphdr ip_hdr;
- u16 UserPriority = 0;
+ u16 user_priority = 0;
_r8712_open_pktfile(ppktfile->pkt, ppktfile);
_r8712_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
- /* get UserPriority from IP hdr*/
+ /* get user_priority from IP hdr*/
if (pattrib->ether_type == 0x0800) {
_r8712_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
- /*UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
- UserPriority = ip_hdr.tos >> 5;
+ /*user_priority = (ntohs(ip_hdr.tos) >> 5) & 0x3 ;*/
+ user_priority = ip_hdr.tos >> 5;
} else {
/* "When priority processing of data frames is supported,
* a STA's SME should send EAPOL-Key frames at the highest
@@ -83,9 +83,9 @@ void r8712_set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
*/
if (pattrib->ether_type == 0x888e)
- UserPriority = 7;
+ user_priority = 7;
}
- pattrib->priority = UserPriority;
+ pattrib->priority = user_priority;
pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
pattrib->subtype = WIFI_QOS_DATA_TYPE;
}
@@ -140,7 +140,7 @@ void r8712_xmit_complete(struct _adapter *padapter, struct xmit_frame *pxframe)
pxframe->pkt = NULL;
}
-int r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
+netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
{
struct xmit_frame *xmitframe = NULL;
struct _adapter *adapter = netdev_priv(netdev);
@@ -165,11 +165,11 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *netdev)
}
xmitpriv->tx_pkts++;
xmitpriv->tx_bytes += xmitframe->attrib.last_txcmdsz;
- return 0;
+ return NETDEV_TX_OK;
_xmit_entry_drop:
if (xmitframe)
r8712_free_xmitframe(xmitpriv, xmitframe);
xmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
- return 0;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/staging/rtl8712/xmit_osdep.h b/drivers/staging/rtl8712/xmit_osdep.h
index b76021b568f8..1ad42658c883 100644
--- a/drivers/staging/rtl8712/xmit_osdep.h
+++ b/drivers/staging/rtl8712/xmit_osdep.h
@@ -34,7 +34,7 @@ struct sta_xmit_priv;
struct xmit_frame;
struct xmit_buf;
-int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev);
+netdev_tx_t r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev);
void r8712_SetFilter(struct work_struct *work);
int r8712_xmit_resource_alloc(struct _adapter *padapter,
struct xmit_buf *pxmitbuf);
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index 159ca1b9016b..590bde02058c 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -10,7 +10,6 @@ r8723bs-y = \
core/rtw_ieee80211.o \
core/rtw_mlme.o \
core/rtw_mlme_ext.o \
- core/rtw_odm.o \
core/rtw_pwrctrl.o \
core/rtw_recv.o \
core/rtw_rf.o \
@@ -33,7 +32,6 @@ r8723bs-y = \
hal/odm_DynamicTxPower.o \
hal/odm_EdcaTurboCheck.o \
hal/odm_HWConfig.o \
- hal/odm_NoiseMonitor.o \
hal/odm_RegConfig8723B.o \
hal/rtl8723b_cmd.o \
hal/rtl8723b_dm.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index b4170f64d118..d3f10a3cf972 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -161,8 +161,6 @@ static struct cmd_hdl wlancmds[] = {
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- int res = 0;
-
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -175,18 +173,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = -ENOMEM;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
- res = -ENOMEM;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -196,8 +192,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
-exit:
- return res;
+
+ return 0;
}
static void c2h_wk_callback(struct work_struct *work);
@@ -593,35 +589,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
return res;
}
-u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
-{
- struct cmd_obj *ph2c;
- struct setdatarate_parm *pbsetdataratepara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm));
- if (!pbsetdataratepara) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
- pbsetdataratepara->mac_id = 5;
- memcpy(pbsetdataratepara->datarates, rateset, NumRates);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
- return res;
-}
-
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
/* rtw_free_cmd_obj(pcmd); */
@@ -1140,61 +1107,6 @@ exit:
return res;
}
-u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig)
-{
- struct cmd_obj *pcmdobj;
- struct SetChannelPlan_param *setChannelPlan_param;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- /* check if allow software config */
- if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter)) {
- res = _FAIL;
- goto exit;
- }
-
- /* check input parameter */
- if (!rtw_is_channel_plan_valid(chplan)) {
- res = _FAIL;
- goto exit;
- }
-
- /* prepare cmd parameter */
- setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param));
- if (!setChannelPlan_param) {
- res = _FAIL;
- goto exit;
- }
- setChannelPlan_param->channel_plan = chplan;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (!pcmdobj) {
- kfree(setChannelPlan_param);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param) != H2C_SUCCESS)
- res = _FAIL;
-
- kfree(setChannelPlan_param);
- }
-
- /* do something based on res... */
- if (res == _SUCCESS)
- padapter->mlmepriv.ChannelPlan = chplan;
-
-exit:
- return res;
-}
-
static void collect_traffic_statistics(struct adapter *padapter)
{
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 68e41d99679d..3d8a64f69448 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -634,23 +634,6 @@ void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie
}
}
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
-{
- u8 match = false;
- u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
-
- if (!ie_ptr)
- return match;
-
- eid = ie_ptr[0];
-
- if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
- *wps_ielen = ie_ptr[1]+2;
- match = true;
- }
- return match;
-}
-
/**
* rtw_get_wps_ie - Search WPS IE from a series of IEs
* @in_ie: Address of IEs to search
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index f2242cf2dfb4..6498fd17e1d3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -2521,7 +2521,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
{
u8 issued;
int priority;
- struct sta_info *psta = NULL;
+ struct sta_info *psta;
struct ht_priv *phtpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
s32 bmcst = IS_MCAST(pattrib->ra);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index f878b04076d8..8e74b4f47b94 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -5945,27 +5945,6 @@ int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset
return connect_allow ? _SUCCESS : _FAIL;
}
-/* Find union about ch, bw, ch_offset of all linked/linking interfaces */
-int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset)
-{
- struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
- struct adapter *iface;
-
- if (ch)
- *ch = 0;
- if (bw)
- *bw = CHANNEL_WIDTH_20;
- if (offset)
- *offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- iface = dvobj->padapters;
-
- if (!check_fwstate(&iface->mlmepriv, _FW_LINKED|_FW_UNDER_LINKING))
- return 0;
-
- return 1;
-}
-
u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
{
struct set_ch_parm *set_ch_parm;
diff --git a/drivers/staging/rtl8723bs/core/rtw_odm.c b/drivers/staging/rtl8723bs/core/rtw_odm.c
deleted file mode 100644
index f6b73a2a0270..000000000000
--- a/drivers/staging/rtl8723bs/core/rtw_odm.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include <drv_types.h>
-#include <rtw_debug.h>
-#include <rtw_odm.h>
-#include <hal_data.h>
-
-static const char * const odm_comp_str[] = {
- /* BIT0 */"ODM_COMP_DIG",
- /* BIT1 */"ODM_COMP_RA_MASK",
- /* BIT2 */"ODM_COMP_DYNAMIC_TXPWR",
- /* BIT3 */"ODM_COMP_FA_CNT",
- /* BIT4 */"ODM_COMP_RSSI_MONITOR",
- /* BIT5 */"ODM_COMP_CCK_PD",
- /* BIT6 */"ODM_COMP_ANT_DIV",
- /* BIT7 */"ODM_COMP_PWR_SAVE",
- /* BIT8 */"ODM_COMP_PWR_TRAIN",
- /* BIT9 */"ODM_COMP_RATE_ADAPTIVE",
- /* BIT10 */"ODM_COMP_PATH_DIV",
- /* BIT11 */"ODM_COMP_PSD",
- /* BIT12 */"ODM_COMP_DYNAMIC_PRICCA",
- /* BIT13 */"ODM_COMP_RXHP",
- /* BIT14 */"ODM_COMP_MP",
- /* BIT15 */"ODM_COMP_DYNAMIC_ATC",
- /* BIT16 */"ODM_COMP_EDCA_TURBO",
- /* BIT17 */"ODM_COMP_EARLY_MODE",
- /* BIT18 */NULL,
- /* BIT19 */NULL,
- /* BIT20 */NULL,
- /* BIT21 */NULL,
- /* BIT22 */NULL,
- /* BIT23 */NULL,
- /* BIT24 */"ODM_COMP_TX_PWR_TRACK",
- /* BIT25 */"ODM_COMP_RX_GAIN_TRACK",
- /* BIT26 */"ODM_COMP_CALIBRATION",
- /* BIT27 */NULL,
- /* BIT28 */NULL,
- /* BIT29 */NULL,
- /* BIT30 */"ODM_COMP_COMMON",
- /* BIT31 */"ODM_COMP_INIT",
-};
-
-#define RTW_ODM_COMP_MAX 32
-
-static const char * const odm_ability_str[] = {
- /* BIT0 */"ODM_BB_DIG",
- /* BIT1 */"ODM_BB_RA_MASK",
- /* BIT2 */"ODM_BB_DYNAMIC_TXPWR",
- /* BIT3 */"ODM_BB_FA_CNT",
- /* BIT4 */"ODM_BB_RSSI_MONITOR",
- /* BIT5 */"ODM_BB_CCK_PD",
- /* BIT6 */"ODM_BB_ANT_DIV",
- /* BIT7 */"ODM_BB_PWR_SAVE",
- /* BIT8 */"ODM_BB_PWR_TRAIN",
- /* BIT9 */"ODM_BB_RATE_ADAPTIVE",
- /* BIT10 */"ODM_BB_PATH_DIV",
- /* BIT11 */"ODM_BB_PSD",
- /* BIT12 */"ODM_BB_RXHP",
- /* BIT13 */"ODM_BB_ADAPTIVITY",
- /* BIT14 */"ODM_BB_DYNAMIC_ATC",
- /* BIT15 */NULL,
- /* BIT16 */"ODM_MAC_EDCA_TURBO",
- /* BIT17 */"ODM_MAC_EARLY_MODE",
- /* BIT18 */NULL,
- /* BIT19 */NULL,
- /* BIT20 */NULL,
- /* BIT21 */NULL,
- /* BIT22 */NULL,
- /* BIT23 */NULL,
- /* BIT24 */"ODM_RF_TX_PWR_TRACK",
- /* BIT25 */"ODM_RF_RX_GAIN_TRACK",
- /* BIT26 */"ODM_RF_CALIBRATION",
-};
-
-#define RTW_ODM_ABILITY_MAX 27
-
-static const char * const odm_dbg_level_str[] = {
- NULL,
- "ODM_DBG_OFF",
- "ODM_DBG_SERIOUS",
- "ODM_DBG_WARNING",
- "ODM_DBG_LOUD",
- "ODM_DBG_TRACE",
-};
-
-#define RTW_ODM_DBG_LEVEL_NUM 6
-
-void rtw_odm_dbg_comp_msg(struct adapter *adapter)
-{
- u64 dbg_comp;
- int i;
-
- rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &dbg_comp);
- netdev_dbg(adapter->pnetdev, "odm.DebugComponents = 0x%016llx\n",
- dbg_comp);
- for (i = 0; i < RTW_ODM_COMP_MAX; i++) {
- if (odm_comp_str[i])
- netdev_dbg(adapter->pnetdev, "%cBIT%-2d %s\n",
- (BIT0 << i) & dbg_comp ? '+' : ' ', i,
- odm_comp_str[i]);
- }
-}
-
-inline void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps)
-{
- rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_FLAG, &comps);
-}
-
-void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter)
-{
- u32 dbg_level;
- int i;
-
- rtw_hal_get_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &dbg_level);
- netdev_dbg(adapter->pnetdev, "odm.DebugLevel = %u\n", dbg_level);
- for (i = 0; i < RTW_ODM_DBG_LEVEL_NUM; i++) {
- if (odm_dbg_level_str[i])
- netdev_dbg(adapter->pnetdev, "%u %s\n", i,
- odm_dbg_level_str[i]);
- }
-}
-
-inline void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level)
-{
- rtw_hal_set_def_var(adapter, HW_DEF_ODM_DBG_LEVEL, &level);
-}
-
-void rtw_odm_ability_msg(void *sel, struct adapter *adapter)
-{
- u32 ability = 0;
- int i;
-
- rtw_hal_get_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
- netdev_dbg(adapter->pnetdev, "odm.SupportAbility = 0x%08x\n", ability);
- for (i = 0; i < RTW_ODM_ABILITY_MAX; i++) {
- if (odm_ability_str[i])
- netdev_dbg(adapter->pnetdev, "%cBIT%-2d %s\n",
- (BIT0 << i) & ability ? '+' : ' ', i,
- odm_ability_str[i]);
- }
-}
-
-inline void rtw_odm_ability_set(struct adapter *adapter, u32 ability)
-{
- rtw_hal_set_hwreg(adapter, HW_VAR_DM_FLAG, (u8 *)&ability);
-}
-
-void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &pHalData->odmpriv;
-
- netdev_dbg(adapter->pnetdev, "%10s %16s %8s %10s %11s %14s\n",
- "TH_L2H_ini", "TH_EDCCA_HL_diff", "IGI_Base", "ForceEDCCA",
- "AdapEn_RSSI", "IGI_LowerBound");
- netdev_dbg(adapter->pnetdev,
- "0x%-8x %-16d 0x%-6x %-10d %-11u %-14u\n",
- (u8)odm->TH_L2H_ini,
- odm->TH_EDCCA_HL_diff,
- odm->IGI_Base,
- odm->ForceEDCCA,
- odm->AdapEn_RSSI,
- odm->IGI_LowerBound);
-}
-
-void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini,
- s8 TH_EDCCA_HL_diff, s8 IGI_Base,
- bool ForceEDCCA, u8 AdapEn_RSSI,
- u8 IGI_LowerBound)
-{
- struct hal_com_data *pHalData = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &pHalData->odmpriv;
-
- odm->TH_L2H_ini = TH_L2H_ini;
- odm->TH_EDCCA_HL_diff = TH_EDCCA_HL_diff;
- odm->IGI_Base = IGI_Base;
- odm->ForceEDCCA = ForceEDCCA;
- odm->AdapEn_RSSI = AdapEn_RSSI;
- odm->IGI_LowerBound = IGI_LowerBound;
-}
-
-void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter)
-{
- struct hal_com_data *hal_data = GET_HAL_DATA(adapter);
- struct dm_odm_t *odm = &hal_data->odmpriv;
-
- netdev_dbg(adapter->pnetdev,
- "RxRate = %s, RSSI_A = %d(%%), RSSI_B = %d(%%)\n",
- HDATA_RATE(odm->RxRate), odm->RSSI_A, odm->RSSI_B);
-}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index d8d394b67eeb..2825375bff94 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -203,22 +203,12 @@ signed int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *q
}
/*
-signed int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
-{
- return rtw_free_recvframe(precvframe, queue);
-}
-*/
-
-
-
-
-/*
-caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread (passive)
-
-using spinlock to protect
-
-*/
+ * caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+ *
+ * using spinlock to protect
+ *
+ */
void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
{
@@ -245,6 +235,7 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
{
u32 cnt = 0;
union recv_frame *pending_frame;
+
while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
cnt++;
@@ -397,6 +388,7 @@ static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *p
if (prxattrib->encrypt > 0) {
u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+
prxattrib->key_index = (((iv[3])>>6)&0x3);
if (prxattrib->key_index > WEP_KEYS) {
@@ -882,6 +874,7 @@ static signed int sta2ap_data_frame(struct adapter *adapter, union recv_frame *p
}
} else {
u8 *myhwaddr = myid(&adapter->eeprompriv);
+
if (memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
ret = RTW_RX_HANDLED;
goto exit;
@@ -1125,6 +1118,7 @@ static union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union re
psta = rtw_get_stainfo(pstapriv, psta_addr);
if (!psta) {
u8 type = GetFrameType(pfhdr->rx_data);
+
if (type != WIFI_DATA_TYPE) {
psta = rtw_get_bcmc_stainfo(padapter);
pdefrag_q = &psta->sta_recvpriv.defrag_q;
@@ -1207,6 +1201,7 @@ static signed int validate_recv_mgnt_frame(struct adapter *padapter, union recv_
{
/* for rx pkt statistics */
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+
if (psta) {
psta->sta_stats.rx_mgnt_pkts++;
if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON)
@@ -1374,9 +1369,8 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
/* actual management data frame body */
data_len = pattrib->pkt_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
mgmt_DATA = rtw_zmalloc(data_len);
- if (!mgmt_DATA) {
+ if (!mgmt_DATA)
goto validate_80211w_fail;
- }
precv_frame = decryptor(adapter, precv_frame);
/* save actual management data frame body */
memcpy(mgmt_DATA, ptr+pattrib->hdrlen+pattrib->iv_len, data_len);
@@ -1385,9 +1379,8 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
/* remove the iv and icv length */
pattrib->pkt_len = pattrib->pkt_len - pattrib->iv_len - pattrib->icv_len;
kfree(mgmt_DATA);
- if (!precv_frame) {
+ if (!precv_frame)
goto validate_80211w_fail;
- }
} else if (IS_MCAST(GetAddr1Ptr(ptr)) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)) {
signed int BIP_ret = _SUCCESS;
@@ -1480,6 +1473,7 @@ static signed int validate_recv_frame(struct adapter *adapter, union recv_frame
retval = validate_recv_data_frame(adapter, precv_frame);
if (retval == _FAIL) {
struct recv_priv *precvpriv = &adapter->recvpriv;
+
precvpriv->rx_drop++;
} else if (retval == _SUCCESS) {
#ifdef DBG_RX_DUMP_EAP
@@ -1651,14 +1645,12 @@ static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_n
u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
/* Rx Reorder initialize condition. */
- if (preorder_ctrl->indicate_seq == 0xFFFF) {
+ if (preorder_ctrl->indicate_seq == 0xFFFF)
preorder_ctrl->indicate_seq = seq_num;
- }
/* Drop out the packet which SeqNum is smaller than WinStart */
- if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) {
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
return false;
- }
/* */
/* Sliding window manipulation. Conditions includes: */
@@ -2084,10 +2076,8 @@ s32 rtw_recv_entry(union recv_frame *precvframe)
precvpriv = &padapter->recvpriv;
ret = recv_func(padapter, precvframe);
- if (ret == _FAIL) {
+ if (ret == _FAIL)
goto _recv_entry_drop;
- }
-
precvpriv->rx_pkts++;
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 9acd49323c7c..e36f8c369a04 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -1283,11 +1283,6 @@ s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter)
return (s32)GLBtCoexist.btInfo.bBtCtrlAggBufSize;
}
-void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual)
-{
- GLBtCoexist.bManualControl = bmanual;
-}
-
bool hal_btcoex_IsBtControlLps(struct adapter *padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 909b37bcc897..e42556d03bce 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -861,25 +861,6 @@ bool eqNByte(u8 *str1, u8 *str2, u32 num)
/* */
/* Description: */
-/* Return true if chTmp is represent for hex digit and */
-/* false otherwise. */
-/* */
-/* */
-bool IsHexDigit(char chTmp)
-{
- if (
- (chTmp >= '0' && chTmp <= '9') ||
- (chTmp >= 'a' && chTmp <= 'f') ||
- (chTmp >= 'A' && chTmp <= 'F')
- )
- return true;
- else
- return false;
-}
-
-
-/* */
-/* Description: */
/* Translate a character to hex digit. */
/* */
u32 MapCharToHexDigit(char chTmp)
@@ -894,106 +875,6 @@ u32 MapCharToHexDigit(char chTmp)
return 0;
}
-
-
-/* Description: */
-/* Parse hex number from the string pucStr. */
-bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove)
-{
- char *szScan = szStr;
-
- /* Check input parameter. */
- if (!szStr || !pu4bVal || !pu4bMove)
- return false;
-
- /* Initialize output. */
- *pu4bMove = 0;
- *pu4bVal = 0;
-
- /* Skip leading space. */
- while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
- szScan++;
- (*pu4bMove)++;
- }
-
- /* Skip leading '0x' or '0X'. */
- if (*szScan == '0' && (*(szScan+1) == 'x' || *(szScan+1) == 'X')) {
- szScan += 2;
- (*pu4bMove) += 2;
- }
-
- /* Check if szScan is now pointer to a character for hex digit, */
- /* if not, it means this is not a valid hex number. */
- if (!IsHexDigit(*szScan))
- return false;
-
- /* Parse each digit. */
- do {
- (*pu4bVal) <<= 4;
- *pu4bVal += MapCharToHexDigit(*szScan);
-
- szScan++;
- (*pu4bMove)++;
- } while (IsHexDigit(*szScan));
-
- return true;
-}
-
-bool GetFractionValueFromString(
- char *szStr, u8 *pInteger, u8 *pFraction, u32 *pu4bMove
-)
-{
- char *szScan = szStr;
-
- /* Initialize output. */
- *pu4bMove = 0;
- *pInteger = 0;
- *pFraction = 0;
-
- /* Skip leading space. */
- while (*szScan != '\0' && (*szScan == ' ' || *szScan == '\t')) {
- ++szScan;
- ++(*pu4bMove);
- }
-
- /* Parse each digit. */
- do {
- (*pInteger) *= 10;
- *pInteger += (*szScan - '0');
-
- ++szScan;
- ++(*pu4bMove);
-
- if (*szScan == '.') {
- ++szScan;
- ++(*pu4bMove);
-
- if (*szScan < '0' || *szScan > '9')
- return false;
- else {
- *pFraction = *szScan - '0';
- ++szScan;
- ++(*pu4bMove);
- return true;
- }
- }
- } while (*szScan >= '0' && *szScan <= '9');
-
- return true;
-}
-
-/* */
-/* Description: */
-/* Return true if szStr is comment out with leading "//". */
-/* */
-bool IsCommentString(char *szStr)
-{
- if (*szStr == '/' && *(szStr+1) == '/')
- return true;
- else
- return false;
-}
-
bool GetU1ByteIntegerFromStringInDecimal(char *Str, u8 *pInt)
{
u16 i = 0;
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 94ecefb9113d..6bb0ff8d7c78 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -400,11 +400,6 @@ c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
return adapter->HalFunc.c2h_id_filter_ccx;
}
-s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter)
-{
- return GET_HAL_DATA(padapter)->bDisableSWChannelPlan;
-}
-
s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid)
{
u8 support;
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index 19cfc2915458..fe9782d2d4fd 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -14,7 +14,6 @@
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
-#include "odm_NoiseMonitor.h"
#define TP_MODE 0
#define RSSI_MODE 1
@@ -863,7 +862,6 @@ struct dm_odm_t { /* DM_Out_Source_Dynamic_Mechanism_Structure */
u8 Adaptivity_IGI_upper;
u8 NHM_cnt_0;
- struct odm_noise_monitor noise_level;/* ODM_MAX_CHANNEL_NUM]; */
/* */
/* 2 Define STA info. */
/* _ODM_STA_INFO */
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 7e92c373cddb..07edf74ccfe5 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -309,63 +309,6 @@ void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI)
}
-void odm_PauseDIG(
- void *pDM_VOID,
- enum ODM_Pause_DIG_TYPE PauseType,
- u8 IGIValue
-)
-{
- struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
- struct dig_t *pDM_DigTable = &pDM_Odm->DM_DigTable;
- static bool bPaused;
-
- if (
- (pDM_Odm->SupportAbility & ODM_BB_ADAPTIVITY) &&
- pDM_Odm->TxHangFlg == true
- ) {
- return;
- }
-
- if (
- !bPaused && (!(pDM_Odm->SupportAbility & ODM_BB_DIG) ||
- !(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
- ){
- return;
- }
-
- switch (PauseType) {
- /* 1 Pause DIG */
- case ODM_PAUSE_DIG:
- /* 2 Disable DIG */
- ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility & (~ODM_BB_DIG));
-
- /* 2 Backup IGI value */
- if (!bPaused) {
- pDM_DigTable->IGIBackup = pDM_DigTable->CurIGValue;
- bPaused = true;
- }
-
- /* 2 Write new IGI value */
- ODM_Write_DIG(pDM_Odm, IGIValue);
- break;
-
- /* 1 Resume DIG */
- case ODM_RESUME_DIG:
- if (bPaused) {
- /* 2 Write backup IGI value */
- ODM_Write_DIG(pDM_Odm, pDM_DigTable->IGIBackup);
- bPaused = false;
-
- /* 2 Enable DIG */
- ODM_CmnInfoUpdate(pDM_Odm, ODM_CMNINFO_ABILITY, pDM_Odm->SupportAbility | ODM_BB_DIG);
- }
- break;
-
- default:
- break;
- }
-}
-
bool odm_DigAbort(void *pDM_VOID)
{
struct dm_odm_t *pDM_Odm = (struct dm_odm_t *)pDM_VOID;
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.h b/drivers/staging/rtl8723bs/hal/odm_DIG.h
index 88cfd542df16..a5b041101c89 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.h
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.h
@@ -141,8 +141,6 @@ void odm_Adaptivity(void *pDM_VOID, u8 IGI);
void ODM_Write_DIG(void *pDM_VOID, u8 CurrentIGI);
-void odm_PauseDIG(void *pDM_VOID, enum ODM_Pause_DIG_TYPE PauseType, u8 IGIValue);
-
void odm_DIGInit(void *pDM_VOID);
void odm_DIG(void *pDM_VOID);
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
deleted file mode 100644
index 392cc8a398f5..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-
-#include "odm_precomp.h"
-
-/* This function is for inband noise test utility only */
-/* To obtain the inband noise level(dbm), do the following. */
-/* 1. disable DIG and Power Saving */
-/* 2. Set initial gain = 0x1a */
-/* 3. Stop updating idle time pwer report (for driver read) */
-/* - 0x80c[25] */
-
-#define Valid_Min -35
-#define Valid_Max 10
-#define ValidCnt 5
-
-static s16 odm_InbandNoise_Monitor_NSeries(
- struct dm_odm_t *pDM_Odm,
- u8 bPauseDIG,
- u8 IGIValue,
- u32 max_time
-)
-{
- u32 tmp4b;
- u8 max_rf_path = 0, rf_path;
- u8 reg_c50, reg_c58, valid_done = 0;
- struct noise_level noise_data;
- u32 start = 0;
-
- pDM_Odm->noise_level.noise_all = 0;
-
- max_rf_path = 1;
-
- memset(&noise_data, 0, sizeof(struct noise_level));
-
- /* */
- /* Step 1. Disable DIG && Set initial gain. */
- /* */
-
- if (bPauseDIG)
- odm_PauseDIG(pDM_Odm, ODM_PAUSE_DIG, IGIValue);
- /* */
- /* Step 2. Disable all power save for read registers */
- /* */
- /* dcmd_DebugControlPowerSave(padapter, PSDisable); */
-
- /* */
- /* Step 3. Get noise power level */
- /* */
- start = jiffies;
- while (1) {
-
- /* Stop updating idle time pwer report (for driver read) */
- PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 1);
-
- /* Read Noise Floor Report */
- tmp4b = PHY_QueryBBReg(pDM_Odm->Adapter, 0x8f8, bMaskDWord);
-
- /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0, TestInitialGain); */
- /* if (max_rf_path == 2) */
- /* PHY_SetBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0, TestInitialGain); */
-
- /* update idle time pwer report per 5us */
- PHY_SetBBReg(pDM_Odm->Adapter, rFPGA0_TxGainStage, BIT25, 0);
-
- noise_data.value[RF_PATH_A] = (u8)(tmp4b&0xff);
- noise_data.value[RF_PATH_B] = (u8)((tmp4b&0xff00)>>8);
-
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- noise_data.sval[rf_path] = (s8)noise_data.value[rf_path];
- noise_data.sval[rf_path] /= 2;
- }
- /* mdelay(10); */
- /* msleep(10); */
-
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- if ((noise_data.valid_cnt[rf_path] < ValidCnt) && (noise_data.sval[rf_path] < Valid_Max && noise_data.sval[rf_path] >= Valid_Min)) {
- noise_data.valid_cnt[rf_path]++;
- noise_data.sum[rf_path] += noise_data.sval[rf_path];
- if (noise_data.valid_cnt[rf_path] == ValidCnt) {
- valid_done++;
- }
-
- }
-
- }
-
- /* printk("####### valid_done:%d #############\n", valid_done); */
- if ((valid_done == max_rf_path) || (jiffies_to_msecs(jiffies - start) > max_time)) {
- for (rf_path = RF_PATH_A; rf_path < max_rf_path; rf_path++) {
- /* printk("%s PATH_%d - sum = %d, valid_cnt = %d\n", __func__, rf_path, noise_data.sum[rf_path], noise_data.valid_cnt[rf_path]); */
- if (noise_data.valid_cnt[rf_path])
- noise_data.sum[rf_path] /= noise_data.valid_cnt[rf_path];
- else
- noise_data.sum[rf_path] = 0;
- }
- break;
- }
- }
- reg_c50 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
- reg_c50 &= ~BIT7;
- pDM_Odm->noise_level.noise[RF_PATH_A] = -110 + reg_c50 + noise_data.sum[RF_PATH_A];
- pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[RF_PATH_A];
-
- if (max_rf_path == 2) {
- reg_c58 = (s32)PHY_QueryBBReg(pDM_Odm->Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
- reg_c58 &= ~BIT7;
- pDM_Odm->noise_level.noise[RF_PATH_B] = -110 + reg_c58 + noise_data.sum[RF_PATH_B];
- pDM_Odm->noise_level.noise_all += pDM_Odm->noise_level.noise[RF_PATH_B];
- }
- pDM_Odm->noise_level.noise_all /= max_rf_path;
-
- /* */
- /* Step 4. Recover the Dig */
- /* */
- if (bPauseDIG)
- odm_PauseDIG(pDM_Odm, ODM_RESUME_DIG, IGIValue);
-
- return pDM_Odm->noise_level.noise_all;
-
-}
-
-s16 ODM_InbandNoise_Monitor(void *pDM_VOID, u8 bPauseDIG, u8 IGIValue, u32 max_time)
-{
- return odm_InbandNoise_Monitor_NSeries(pDM_VOID, bPauseDIG, IGIValue, max_time);
-}
diff --git a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h b/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
deleted file mode 100644
index ab114543f39c..000000000000
--- a/drivers/staging/rtl8723bs/hal/odm_NoiseMonitor.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- *****************************************************************************/
-#ifndef __ODMNOISEMONITOR_H__
-#define __ODMNOISEMONITOR_H__
-
-#define ODM_MAX_CHANNEL_NUM 38/* 14+24 */
-struct noise_level {
- /* u8 value_a, value_b; */
- u8 value[MAX_RF_PATH];
- /* s8 sval_a, sval_b; */
- s8 sval[MAX_RF_PATH];
-
- /* s32 noise_a = 0, noise_b = 0, sum_a = 0, sum_b = 0; */
- /* s32 noise[ODM_RF_PATH_MAX]; */
- s32 sum[MAX_RF_PATH];
- /* u8 valid_cnt_a = 0, valid_cnt_b = 0, */
- u8 valid[MAX_RF_PATH];
- u8 valid_cnt[MAX_RF_PATH];
-
-};
-
-
-struct odm_noise_monitor {
- s8 noise[MAX_RF_PATH];
- s16 noise_all;
-};
-
-s16 ODM_InbandNoise_Monitor(
- void *pDM_VOID,
- u8 bPauseDIG,
- u8 IGIValue,
- u32 max_time
-);
-
-#endif
diff --git a/drivers/staging/rtl8723bs/hal/odm_precomp.h b/drivers/staging/rtl8723bs/hal/odm_precomp.h
index edce506022a5..2987857a8761 100644
--- a/drivers/staging/rtl8723bs/hal/odm_precomp.h
+++ b/drivers/staging/rtl8723bs/hal/odm_precomp.h
@@ -33,7 +33,6 @@
#include "odm_DynamicBBPowerSaving.h"
#include "odm_DynamicTxPower.h"
#include "odm_CfoTracking.h"
-#include "odm_NoiseMonitor.h"
#include "HalPhyRf.h"
#include "HalPhyRf_8723B.h"/* for IQK, LCK, Power-tracking */
#include "rtl8723b_hal.h"
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 0bbbdebdf157..82159e1c7f9b 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -50,7 +50,6 @@
#include <rtw_mlme_ext.h>
#include <rtw_ap.h>
#include <rtw_version.h>
-#include <rtw_odm.h>
#include "ioctl_cfg80211.h"
@@ -493,8 +492,6 @@ static inline u8 *myid(struct eeprom_priv *peepriv)
#include <rtw_btcoex.h>
-int rtw_change_ifname(struct adapter *padapter, const char *ifname);
-
extern char *rtw_initmac;
extern int rtw_mc2u_disable;
extern int rtw_ht_enable;
diff --git a/drivers/staging/rtl8723bs/include/hal_btcoex.h b/drivers/staging/rtl8723bs/include/hal_btcoex.h
index 78599d3521bf..fb167642da01 100644
--- a/drivers/staging/rtl8723bs/include/hal_btcoex.h
+++ b/drivers/staging/rtl8723bs/include/hal_btcoex.h
@@ -45,7 +45,6 @@ void hal_btcoex_HaltNotify(struct adapter *padapter);
void hal_btcoex_Handler(struct adapter *padapter);
s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(struct adapter *padapter);
-void hal_btcoex_SetManualControl(struct adapter *padapter, u8 bmanual);
bool hal_btcoex_IsBtControlLps(struct adapter *padapter);
bool hal_btcoex_IsLpsOn(struct adapter *padapter);
u8 hal_btcoex_RpwmVal(struct adapter *);
diff --git a/drivers/staging/rtl8723bs/include/hal_com.h b/drivers/staging/rtl8723bs/include/hal_com.h
index 7be0ea20bca4..6356b8c2ef81 100644
--- a/drivers/staging/rtl8723bs/include/hal_com.h
+++ b/drivers/staging/rtl8723bs/include/hal_com.h
@@ -147,17 +147,8 @@ u8 GetHalDefVar(struct adapter *adapter, enum hal_def_variable variable,
bool eqNByte(u8 *str1, u8 *str2, u32 num);
-bool IsHexDigit(char chTmp);
-
u32 MapCharToHexDigit(char chTmp);
-bool GetHexValueFromString(char *szStr, u32 *pu4bVal, u32 *pu4bMove);
-
-bool GetFractionValueFromString(char *szStr, u8 *pInteger, u8 *pFraction,
- u32 *pu4bMove);
-
-bool IsCommentString(char *szStr);
-
bool ParseQualifiedString(char *In, u32 *Start, char *Out, char LeftQualifier,
char RightQualifier);
diff --git a/drivers/staging/rtl8723bs/include/hal_intf.h b/drivers/staging/rtl8723bs/include/hal_intf.h
index 45bebbadb7ca..5cffab2d06ff 100644
--- a/drivers/staging/rtl8723bs/include/hal_intf.h
+++ b/drivers/staging/rtl8723bs/include/hal_intf.h
@@ -353,8 +353,6 @@ bool rtw_hal_c2h_valid(struct adapter *adapter, u8 *buf);
s32 rtw_hal_c2h_handler(struct adapter *adapter, u8 *c2h_evt);
c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
-s32 rtw_hal_is_disable_sw_channel_plan(struct adapter *padapter);
-
s32 rtw_hal_macid_sleep(struct adapter *padapter, u32 macid);
s32 rtw_hal_macid_wakeup(struct adapter *padapter, u32 macid);
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index 1e627dc0044d..9041d8dc5fb1 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -746,7 +746,6 @@ int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwi
void rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len);
-u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_attr, u32 *len_attr);
u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8 *buf_content, uint *len_content);
diff --git a/drivers/staging/rtl8723bs/include/rtw_cmd.h b/drivers/staging/rtl8723bs/include/rtw_cmd.h
index 1bf030cbbbbe..fe1b03101203 100644
--- a/drivers/staging/rtl8723bs/include/rtw_cmd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_cmd.h
@@ -591,7 +591,6 @@ extern u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8
extern u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork);
u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
extern u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrastructure networktype, bool enqueue);
-extern u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
extern u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
extern u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset, u8 *pval);
@@ -613,8 +612,6 @@ extern u8 rtw_ps_cmd(struct adapter *padapter);
u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
-extern u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconfig);
-
extern u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length);
extern u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index 89b389d4c44b..65e138a5238f 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -662,7 +662,6 @@ extern void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint
extern u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer);
int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset);
-int rtw_get_ch_setting_union(struct adapter *adapter, u8 *ch, u8 *bw, u8 *offset);
struct cmd_hdl {
uint parmsize;
diff --git a/drivers/staging/rtl8723bs/include/rtw_odm.h b/drivers/staging/rtl8723bs/include/rtw_odm.h
deleted file mode 100644
index 94fc68a5c424..000000000000
--- a/drivers/staging/rtl8723bs/include/rtw_odm.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/******************************************************************************
- *
- * Copyright(c) 2013 Realtek Corporation. All rights reserved.
- *
- ******************************************************************************/
-#ifndef __RTW_ODM_H__
-#define __RTW_ODM_H__
-
-#include <drv_types.h>
-
-/*
-* This file provides utilities/wrappers for rtw driver to use ODM
-*/
-
-void rtw_odm_dbg_comp_msg(struct adapter *adapter);
-void rtw_odm_dbg_comp_set(struct adapter *adapter, u64 comps);
-void rtw_odm_dbg_level_msg(void *sel, struct adapter *adapter);
-void rtw_odm_dbg_level_set(struct adapter *adapter, u32 level);
-
-void rtw_odm_ability_msg(void *sel, struct adapter *adapter);
-void rtw_odm_ability_set(struct adapter *adapter, u32 ability);
-
-void rtw_odm_adaptivity_parm_msg(void *sel, struct adapter *adapter);
-void rtw_odm_adaptivity_parm_set(struct adapter *adapter, s8 TH_L2H_ini, s8 TH_EDCCA_HL_diff,
- s8 IGI_Base, bool ForceEDCCA, u8 AdapEn_RSSI, u8 IGI_LowerBound);
-void rtw_odm_get_perpkt_rssi(void *sel, struct adapter *adapter);
-#endif /* __RTW_ODM_H__ */
diff --git a/drivers/staging/rtl8723bs/include/xmit_osdep.h b/drivers/staging/rtl8723bs/include/xmit_osdep.h
index e781cd5dfd01..8704dced593a 100644
--- a/drivers/staging/rtl8723bs/include/xmit_osdep.h
+++ b/drivers/staging/rtl8723bs/include/xmit_osdep.h
@@ -25,8 +25,8 @@ struct sta_xmit_priv;
struct xmit_frame;
struct xmit_buf;
-extern int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
-extern int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+extern void _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+extern netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
void rtw_os_xmit_schedule(struct adapter *padapter);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index cb6d287f580d..6aeb169c6ebf 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -198,7 +198,7 @@ static int rtw_ieee80211_channel_to_frequency(int chan, int band)
if (band == NL80211_BAND_2GHZ) {
if (chan == 14)
return 2484;
- else if (chan < 14)
+ else if (chan < 14)
return 2407 + chan * 5;
}
@@ -810,7 +810,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
- padapter->securitypriv.binstallGrpkey = true;
+ padapter->securitypriv.binstallGrpkey = true;
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1, true);
@@ -850,8 +850,8 @@ exit:
}
static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
char *alg_name;
u32 param_len;
@@ -920,9 +920,9 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true
- || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
- ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
- }
+ || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ ret = rtw_cfg80211_set_encryption(ndev, param, param_len);
+ }
addkey_end:
kfree(param);
@@ -932,8 +932,8 @@ addkey_end:
}
static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie,
struct key_params*))
{
@@ -941,7 +941,8 @@ static int cfg80211_rtw_get_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -955,7 +956,7 @@ static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_index
+ struct net_device *ndev, int link_id, u8 key_index
, bool unicast, bool multicast
)
{
@@ -1065,7 +1066,7 @@ static int cfg80211_rtw_change_iface(struct wiphy *wiphy,
}
}
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1239,7 +1240,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
rtw_ps_deny(padapter, PS_DENY_SCAN);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
need_indicate_scan_done = true;
goto check_need_indicate_scan_done;
}
@@ -1499,49 +1500,49 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
pairwise_cipher = WPA_CIPHER_NONE;
switch (group_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
}
switch (pairwise_cipher) {
- case WPA_CIPHER_NONE:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
- break;
- case WPA_CIPHER_WEP40:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
- case WPA_CIPHER_TKIP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
- break;
- case WPA_CIPHER_CCMP:
- padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
- break;
- case WPA_CIPHER_WEP104:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
- break;
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
}
{/* handle wps_ie */
@@ -1582,7 +1583,7 @@ static int cfg80211_rtw_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ret = 0;
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1673,7 +1674,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
}
rtw_ps_deny(padapter, PS_DENY_JOIN);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EPERM;
goto exit;
}
@@ -1848,6 +1849,7 @@ static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
inline bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter)
{
struct rtw_wdev_priv *rtw_wdev_priv = adapter_wdev_data(adapter);
+
return rtw_wdev_priv->power_mgmt;
}
@@ -1953,6 +1955,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter, u8 *pmgmt_frame,
{
struct station_info sinfo = {};
u8 ie_offset;
+
if (GetFrameSubType(pmgmt_frame) == WIFI_ASSOCREQ)
ie_offset = _ASOCREQ_IE_OFFSET_;
else /* WIFI_REASSOCREQ */
@@ -2084,7 +2087,8 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
/* Use the real net device to transmit the packet */
- return _rtw_xmit_entry(skb, padapter->pnetdev);
+ _rtw_xmit_entry(skb, padapter->pnetdev);
+ return NETDEV_TX_OK;
} else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE)) ==
(IEEE80211_FTYPE_MGMT|IEEE80211_STYPE_ACTION)) {
@@ -2347,7 +2351,7 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_beacon_data *info)
+ struct cfg80211_beacon_data *info)
{
struct adapter *adapter = rtw_netdev_priv(ndev);
@@ -2466,7 +2470,7 @@ static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *nde
spin_lock_bh(&pstapriv->asoc_list_lock);
psta = rtw_sta_info_get_by_idx(idx, pstapriv);
spin_unlock_bh(&pstapriv->asoc_list_lock);
- if (NULL == psta) {
+ if (psta == NULL) {
ret = -ENOENT;
goto exit;
}
@@ -2601,7 +2605,7 @@ static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
goto exit;
rtw_ps_deny(padapter, PS_DENY_MGNT_TX);
- if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ if (rtw_pwr_wakeup(padapter) == _FAIL) {
ret = -EFAULT;
goto cancel_ps_deny;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 380d8c9e1239..68bba3c0e757 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -664,51 +664,36 @@ void rtw_reset_drv_sw(struct adapter *padapter)
u8 rtw_init_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
-
rtw_init_default_value(padapter);
rtw_init_hal_com_default_value(padapter);
- if (rtw_init_cmd_priv(&padapter->cmdpriv)) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_cmd_priv(&padapter->cmdpriv))
+ return _FAIL;
padapter->cmdpriv.padapter = padapter;
- if (rtw_init_evt_priv(&padapter->evtpriv)) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_evt_priv(&padapter->evtpriv))
+ goto free_cmd_priv;
-
- if (rtw_init_mlme_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_mlme_priv(padapter) == _FAIL)
+ goto free_evt_priv;
init_mlme_ext_priv(padapter);
- if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL)
+ goto free_mlme_ext;
- if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL)
+ goto free_xmit_priv;
/* add for CONFIG_IEEE80211W, none 11w also can use */
spin_lock_init(&padapter->security_key_mutex);
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
/* memset((unsigned char *)&padapter->securitypriv, 0, sizeof (struct security_priv)); */
- if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL)
+ goto free_recv_priv;
padapter->stapriv.padapter = padapter;
padapter->setband = GHZ24_50;
@@ -719,9 +704,26 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_hal_dm_init(padapter);
-exit:
+ return _SUCCESS;
+
+free_recv_priv:
+ _rtw_free_recv_priv(&padapter->recvpriv);
+
+free_xmit_priv:
+ _rtw_free_xmit_priv(&padapter->xmitpriv);
+
+free_mlme_ext:
+ free_mlme_ext_priv(&padapter->mlmeextpriv);
- return ret8;
+ rtw_free_mlme_priv(&padapter->mlmepriv);
+
+free_evt_priv:
+ rtw_free_evt_priv(&padapter->evtpriv);
+
+free_cmd_priv:
+ rtw_free_cmd_priv(&padapter->cmdpriv);
+
+ return _FAIL;
}
void rtw_cancel_all_timer(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index 4fbfa75c05d7..f09c1324c39c 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -108,56 +108,6 @@ RETURN:
return;
}
-int rtw_change_ifname(struct adapter *padapter, const char *ifname)
-{
- struct net_device *pnetdev;
- struct net_device *cur_pnetdev;
- struct rereg_nd_name_data *rereg_priv;
- int ret;
-
- if (!padapter)
- goto error;
-
- cur_pnetdev = padapter->pnetdev;
- rereg_priv = &padapter->rereg_nd_name_priv;
-
- /* free the old_pnetdev */
- if (rereg_priv->old_pnetdev) {
- free_netdev(rereg_priv->old_pnetdev);
- rereg_priv->old_pnetdev = NULL;
- }
-
- if (!rtnl_is_locked())
- unregister_netdev(cur_pnetdev);
- else
- unregister_netdevice(cur_pnetdev);
-
- rereg_priv->old_pnetdev = cur_pnetdev;
-
- pnetdev = rtw_init_netdev(padapter);
- if (!pnetdev)
- goto error;
-
- SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
-
- rtw_init_netdev_name(pnetdev, ifname);
-
- eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
-
- if (!rtnl_is_locked())
- ret = register_netdev(pnetdev);
- else
- ret = register_netdevice(pnetdev);
-
- if (ret != 0)
- goto error;
-
- return 0;
-
-error:
- return -1;
-}
-
void rtw_buf_free(u8 **buf, u32 *buf_len)
{
if (!buf || !buf_len)
diff --git a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
index 530e7a6c67c5..1eeabfffd6d2 100644
--- a/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/xmit_linux.c
@@ -181,7 +181,7 @@ static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
return true;
}
-int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+void _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
struct adapter *padapter = rtw_netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -202,7 +202,7 @@ int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME / 4)) {
res = rtw_mlcst2unicst(padapter, pkt);
if (res)
- goto exit;
+ return;
}
}
@@ -210,22 +210,17 @@ int _rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
if (res < 0)
goto drop_packet;
- goto exit;
+ return;
drop_packet:
pxmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
-
-exit:
- return 0;
}
-int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
{
- int ret = 0;
-
if (pkt)
- ret = _rtw_xmit_entry(pkt, pnetdev);
+ _rtw_xmit_entry(pkt, pnetdev);
- return ret;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index dbd1159a2ef0..168ae2e9005d 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -386,7 +387,8 @@ static int lynxfb_ops_set_par(struct fb_info *info)
ret = lynxfb_set_color_offsets(info);
- var->height = var->width = -1;
+ var->height = -1;
+ var->width = -1;
var->accel_flags = 0;/*FB_ACCELF_TEXT;*/
if (ret) {
@@ -498,7 +500,8 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
return ret;
}
- var->height = var->width = -1;
+ var->height = -1;
+ var->width = -1;
var->accel_flags = 0;/* FB_ACCELF_TEXT; */
/* check if current fb's video memory big enough to hold the onscreen*/
@@ -723,7 +726,8 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
0x800f0 + (int)crtc->channel * 0x140;
pr_info("crtc->cursor.mmio = %p\n", crtc->cursor.mmio);
- crtc->cursor.max_h = crtc->cursor.max_w = 64;
+ crtc->cursor.max_h = 64;
+ crtc->cursor.max_w = 64;
crtc->cursor.size = crtc->cursor.max_h * crtc->cursor.max_w * 2 / 8;
crtc->cursor.vstart = sm750_dev->pvMem + crtc->cursor.offset;
@@ -987,22 +991,16 @@ release_fb:
static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
{
- struct apertures_struct *ap;
+ resource_size_t base = pci_resource_start(pdev, 0);
+ resource_size_t size = pci_resource_len(pdev, 0);
bool primary = false;
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags &
IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
- kfree(ap);
- return 0;
+
+ return aperture_remove_conflicting_devices(base, size, primary, "sm750_fb1");
}
static int lynxfb_pci_probe(struct pci_dev *pdev,
@@ -1027,7 +1025,8 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
if (!sm750_dev)
return err;
- sm750_dev->fbinfo[0] = sm750_dev->fbinfo[1] = NULL;
+ sm750_dev->fbinfo[0] = NULL;
+ sm750_dev->fbinfo[1] = NULL;
sm750_dev->devid = pdev->device;
sm750_dev->revid = pdev->revision;
sm750_dev->pdev = pdev;
diff --git a/drivers/staging/vme_user/vme_tsi148.c b/drivers/staging/vme_user/vme_tsi148.c
index 956476213241..020e0b3bce64 100644
--- a/drivers/staging/vme_user/vme_tsi148.c
+++ b/drivers/staging/vme_user/vme_tsi148.c
@@ -125,8 +125,8 @@ static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
- dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
- ": 0x%x\n", i, val);
+ dev_err(tsi148_bridge->parent, "VME Mailbox %d received: 0x%x\n",
+ i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
@@ -143,14 +143,12 @@ static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
bridge = tsi148_bridge->driver_priv;
- dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
- "attributes: %08x\n",
+ dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
- dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
- "completion reg: %08x\n",
+ dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
@@ -180,10 +178,8 @@ static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
- if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
- dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
- "Occurred\n");
- }
+ if (error_attrib & TSI148_LCSR_VEAT_VEOF)
+ dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow Occurred\n");
if (err_chk)
vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
@@ -317,8 +313,8 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
- dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
- "vector %02X\n", pdev->irq);
+ dev_err(tsi148_bridge->parent, "Can't get assigned pci irq vector %02X\n",
+ pdev->irq);
return result;
}
@@ -529,8 +525,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
- dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid PCI Offset alignment\n");
return -EINVAL;
}
@@ -588,7 +583,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
- temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
+ temp_ctl |= TSI148_LCSR_ITAT_SUPR;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
@@ -762,8 +757,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
- dev_err(tsi148_bridge->parent, "Failed to allocate mem "
- "resource for window %d size 0x%lx start 0x%lx\n",
+ dev_err(tsi148_bridge->parent, "Failed to allocate mem resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
@@ -827,15 +821,13 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
/* Verify input data */
if (vme_base & 0xFFFF) {
- dev_err(tsi148_bridge->parent, "Invalid VME Window "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Window alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
- dev_err(tsi148_bridge->parent, "Size must be non-zero for "
- "enabled windows\n");
+ dev_err(tsi148_bridge->parent, "Size must be non-zero for enabled windows\n");
retval = -EINVAL;
goto err_window;
}
@@ -849,8 +841,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
- "resource\n");
+ dev_err(tsi148_bridge->parent, "Unable to allocate memory for resource\n");
goto err_res;
}
@@ -890,8 +881,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&image->lock);
- dev_err(tsi148_bridge->parent, "Invalid VME Offset "
- "alignment\n");
+ dev_err(tsi148_bridge->parent, "Invalid VME Offset alignment\n");
retval = -EINVAL;
goto err_gran;
}
@@ -937,8 +927,7 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
- dev_warn(tsi148_bridge->parent, "Currently not setting "
- "Broadcast Select Registers\n");
+ dev_warn(tsi148_bridge->parent, "Currently not setting Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
@@ -1451,8 +1440,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
val |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
@@ -1550,8 +1538,7 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
val |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
- dev_err(dev, "Currently not setting Broadcast Select "
- "Registers\n");
+ dev_err(dev, "Currently not setting Broadcast Select Registers\n");
val |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
@@ -1639,8 +1626,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Test descriptor alignment */
if ((unsigned long)&entry->descriptor & 0x7) {
- dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
- "byte boundary as required: %p\n",
+ dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 byte boundary as required: %p\n",
&entry->descriptor);
retval = -EINVAL;
goto err_align;
@@ -1827,10 +1813,10 @@ static int tsi148_dma_list_exec(struct vme_dma_list *list)
/* Need to add to pending here */
mutex_unlock(&ctrlr->mtx);
return -EBUSY;
- } else {
- list_add(&list->list, &ctrlr->running);
}
+ list_add(&list->list, &ctrlr->running);
+
/* Get first bus address and write into registers */
entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
list);
@@ -1935,8 +1921,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor "
- "callback attached, can't reset\n");
+ dev_err(tsi148_bridge->parent, "Location monitor callback attached, can't reset\n");
return -EBUSY;
}
}
@@ -1961,7 +1946,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
}
if (cycle & VME_SUPER)
- lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
+ lm_ctl |= TSI148_LCSR_LMAT_SUPR;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
@@ -2051,8 +2036,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&lm->mtx);
- dev_err(tsi148_bridge->parent, "Location monitor not properly "
- "configured\n");
+ dev_err(tsi148_bridge->parent, "Location monitor not properly configured\n");
return -EINVAL;
}
@@ -2196,8 +2180,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus, GFP_KERNEL);
if (!bridge->crcsr_kernel) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "CR/CSR image\n");
+ dev_err(tsi148_bridge->parent, "Failed to allocate memory for CR/CSR image\n");
return -ENOMEM;
}
@@ -2237,8 +2220,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
- dev_err(tsi148_bridge->parent, "Configuring flush image"
- " failed\n");
+ dev_err(tsi148_bridge->parent, "Configuring flush image failed\n");
}
return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 5de841cb776c..6ce41983dcf4 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2083,7 +2083,7 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->dbm_threshold[2] = 0;
priv->dbm_threshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
- MACvSetRFLE_LatchBase(iobase);
+ vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT);
/* {{ RobertYu: 20050104 */
} else {
/* No VGA Table now */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 846469cc06bb..c680925b9c92 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -55,9 +55,15 @@ static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
/*--------------------- Static Functions --------------------------*/
-static void s_vCalculateOFDMRParameter(unsigned char rate, u8 bb_type,
- unsigned char *pbyTxRate,
- unsigned char *pbyRsvTime);
+static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value & ~ENCFG_BBTYPE_MASK;
+ reg_value = reg_value | mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
/*--------------------- Export Functions --------------------------*/
@@ -186,21 +192,21 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
if (bb_type == BB_TYPE_11A) {
- MACvSetBBType(priv->port_offset, BB_TYPE_11A);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11A);
bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
byCWMaxMin = 0xA4;
} else if (bb_type == BB_TYPE_11B) {
- MACvSetBBType(priv->port_offset, BB_TYPE_11B);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11B);
bb_write_embedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
- MACvSetBBType(priv->port_offset, BB_TYPE_11G);
+ vt6655_mac_set_bb_type(priv->port_offset, BB_TYPE_11G);
bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
@@ -403,9 +409,9 @@ void CARDvSafeResetTx(struct vnt_private *priv)
}
/* set MAC TD pointer */
- MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
+ vt6655_mac_set_curr_tx_desc_addr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
- MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
+ vt6655_mac_set_curr_tx_desc_addr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
iowrite32((u32)priv->tx_beacon_dma, priv->port_offset + MAC_REG_BCNDMAPTR);
@@ -452,9 +458,9 @@ void CARDvSafeResetRx(struct vnt_private *priv)
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL0);
iowrite32(RX_PERPKT, priv->port_offset + MAC_REG_RXDMACTL1);
/* set MAC RD pointer */
- MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
+ vt6655_mac_set_curr_rx_0_desc_addr(priv, priv->rd0_pool_dma);
- MACvSetCurrRx1DescAddr(priv, priv->rd1_pool_dma);
+ vt6655_mac_set_curr_rx_1_desc_addr(priv, priv->rd1_pool_dma);
}
/*
@@ -539,7 +545,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
spin_lock_irqsave(&priv->lock, flags);
/* Set to Page1 */
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
/* RSPINF_b_1 */
vnt_get_phy_field(priv, 14,
@@ -637,7 +643,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
&byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index e926f9829a15..4122875ebcaa 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -116,12 +116,12 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
spin_lock_irqsave(&priv->lock, flags);
/* set HW default power register */
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWRCCK);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
iowrite8(priv->byCurPwr, priv->port_offset + MAC_REG_PWROFDM);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index bab08a40fe66..56c3cf3ba53d 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -205,6 +205,55 @@ static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
iowrite8(0, iobase + MAC_REG_PAGE1SEL);
}
+static void vt6655_mac_dma_ctl(void __iomem *iobase, u8 reg_index)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + reg_index);
+ if (reg_value & DMACTL_RUN)
+ iowrite32(DMACTL_WAKE, iobase + reg_index);
+ else
+ iowrite32(DMACTL_RUN, iobase + reg_index);
+}
+
+static void vt6655_mac_set_bits(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value | mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
+
+static void vt6655_mac_clear_bits(void __iomem *iobase, u32 mask)
+{
+ u32 reg_value;
+
+ reg_value = ioread32(iobase + MAC_REG_ENCFG);
+ reg_value = reg_value & ~mask;
+ iowrite32(reg_value, iobase + MAC_REG_ENCFG);
+}
+
+static void vt6655_mac_en_protect_md(void __iomem *iobase)
+{
+ vt6655_mac_set_bits(iobase, ENCFG_PROTECTMD);
+}
+
+static void vt6655_mac_dis_protect_md(void __iomem *iobase)
+{
+ vt6655_mac_clear_bits(iobase, ENCFG_PROTECTMD);
+}
+
+static void vt6655_mac_en_barker_preamble_md(void __iomem *iobase)
+{
+ vt6655_mac_set_bits(iobase, ENCFG_BARKERPREAM);
+}
+
+static void vt6655_mac_dis_barker_preamble_md(void __iomem *iobase)
+{
+ vt6655_mac_clear_bits(iobase, ENCFG_BARKERPREAM);
+}
+
/*
* Initialisation of MAC & BBP registers
*/
@@ -351,11 +400,11 @@ static void device_init_registers(struct vnt_private *priv)
}
if (priv->local_id > REV_ID_VT3253_B1) {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
/* use relative tx timeout and 802.11i D4 */
@@ -363,7 +412,7 @@ static void device_init_registers(struct vnt_private *priv)
(CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
- MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
+ vt6655_mac_set_short_retry_limit(priv, priv->byShortRetryLimit);
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
@@ -420,8 +469,8 @@ static void device_init_registers(struct vnt_private *priv)
vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
/* Turn On Rx DMA */
- MACvReceive0(priv->port_offset);
- MACvReceive1(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
/* start the adapter */
iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
@@ -537,13 +586,12 @@ static void device_free_rings(struct vnt_private *priv)
priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
priv->aRD0Ring, priv->pool_dma);
- if (priv->tx0_bufs)
- dma_free_coherent(&priv->pcid->dev,
- priv->opts.tx_descs[0] * PKT_BUF_SZ +
- priv->opts.tx_descs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- priv->tx0_bufs, priv->tx_bufs_dma0);
+ dma_free_coherent(&priv->pcid->dev,
+ priv->opts.tx_descs[0] * PKT_BUF_SZ +
+ priv->opts.tx_descs[1] * PKT_BUF_SZ +
+ CB_BEACON_BUF_SIZE +
+ CB_MAX_BUF_SIZE,
+ priv->tx0_bufs, priv->tx_bufs_dma0);
}
static int device_init_rd0_ring(struct vnt_private *priv)
@@ -583,7 +631,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -629,7 +677,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -694,7 +742,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
@@ -734,7 +782,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
@@ -1135,8 +1183,8 @@ static void vnt_interrupt_process(struct vnt_private *priv)
isr = ioread32(priv->port_offset + MAC_REG_ISR);
- MACvReceive0(priv->port_offset);
- MACvReceive1(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL0);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_RXDMACTL1);
if (max_count > priv->opts.int_works)
break;
@@ -1218,9 +1266,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
wmb(); /* second memory barrier */
if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
- MACvTransmitAC0(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_AC0DMACTL);
else
- MACvTransmit0(priv->port_offset);
+ vt6655_mac_dma_ctl(priv->port_offset, MAC_REG_TXDMACTL0);
priv->iTDUsed[dma_idx]++;
@@ -1440,19 +1488,19 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
- MACvEnableBarkerPreambleMd(priv->port_offset);
+ vt6655_mac_en_barker_preamble_md(priv->port_offset);
priv->preamble_type = true;
} else {
- MACvDisableBarkerPreambleMd(priv->port_offset);
+ vt6655_mac_dis_barker_preamble_md(priv->port_offset);
priv->preamble_type = false;
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (conf->use_cts_prot)
- MACvEnableProtectMD(priv->port_offset);
+ vt6655_mac_en_protect_md(priv->port_offset);
else
- MACvDisableProtectMD(priv->port_offset);
+ vt6655_mac_dis_protect_md(priv->port_offset);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -1538,21 +1586,21 @@ static void vnt_configure(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
if (priv->mc_list_count > 2) {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
} else {
- MACvSelectPage1(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE1(priv->port_offset);
multicast = le64_to_cpu(multicast);
iowrite32((u32)multicast, priv->port_offset + MAC_REG_MAR0);
iowrite32((u32)(multicast >> 32),
priv->port_offset + MAC_REG_MAR0 + 4);
- MACvSelectPage0(priv->port_offset);
+ VT6655_MAC_SELECT_PAGE0(priv->port_offset);
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index dcc649532737..b4ebc7d31961 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -10,17 +10,16 @@
* Date: May 21, 1996
*
* Functions:
- * MACbIsRegBitsOff - Test if All test Bits Off
- * MACbIsIntDisable - Test if MAC interrupt disable
- * MACvSetShortRetryLimit - Set 802.11 Short Retry limit
+ * vt6655_mac_is_reg_bits_off - Test if All test Bits Off
+ * vt6655_mac_set_short_retry_limit - Set 802.11 Short Retry limit
* MACvSetLongRetryLimit - Set 802.11 Long Retry limit
- * MACvSetLoopbackMode - Set MAC Loopback Mode
- * MACvSaveContext - Save Context of MAC Registers
- * MACvRestoreContext - Restore Context of MAC Registers
+ * vt6655_mac_set_loopback_mode - Set MAC Loopback Mode
+ * vt6655_mac_save_context - Save Context of MAC Registers
+ * vt6655_mac_restore_context - Restore Context of MAC Registers
* MACbSoftwareReset - Software Reset MAC
- * MACbSafeRxOff - Turn Off MAC Rx
- * MACbSafeTxOff - Turn Off MAC Tx
- * MACbSafeStop - Stop MAC function
+ * vt6655_mac_safe_rx_off - Turn Off MAC Rx
+ * vt6655_mac_safe_tx_off - Turn Off MAC Tx
+ * vt6655_mac_safe_stop - Stop MAC function
* MACbShutdown - Shut down MAC
* MACvInitialize - Initialize MAC
* MACvSetCurrRxDescAddr - Set Rx Descriptors Address
@@ -86,43 +85,21 @@ static void vt6655_mac_clear_stck_ds(void __iomem *iobase)
* Parameters:
* In:
* io_base - Base Address for MAC
- * byRegOfs - Offset of MAC Register
- * byTestBits - Test bits
+ * reg_offset - Offset of MAC Register
+ * mask - Test bits
* Out:
* none
*
* Return Value: true if all test bits Off; otherwise false
*
*/
-bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits)
+static bool vt6655_mac_is_reg_bits_off(struct vnt_private *priv,
+ unsigned char reg_offset,
+ unsigned char mask)
{
void __iomem *io_base = priv->port_offset;
- return !(ioread8(io_base + byRegOfs) & byTestBits);
-}
-
-/*
- * Description:
- * Test if MAC interrupt disable
- *
- * Parameters:
- * In:
- * io_base - Base Address for MAC
- * Out:
- * none
- *
- * Return Value: true if interrupt is disable; otherwise false
- *
- */
-bool MACbIsIntDisable(struct vnt_private *priv)
-{
- void __iomem *io_base = priv->port_offset;
-
- if (ioread32(io_base + MAC_REG_IMR))
- return false;
-
- return true;
+ return !(ioread8(io_base + reg_offset) & mask);
}
/*
@@ -132,19 +109,18 @@ bool MACbIsIntDisable(struct vnt_private *priv)
* Parameters:
* In:
* io_base - Base Address for MAC
- * byRetryLimit- Retry Limit
+ * retry_limit - Retry Limit
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(struct vnt_private *priv,
- unsigned char byRetryLimit)
+void vt6655_mac_set_short_retry_limit(struct vnt_private *priv, unsigned char retry_limit)
{
void __iomem *io_base = priv->port_offset;
/* set SRT */
- iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
+ iowrite8(retry_limit, io_base + MAC_REG_SRT);
}
/*
@@ -176,21 +152,20 @@ void MACvSetLongRetryLimit(struct vnt_private *priv,
* Parameters:
* In:
* io_base - Base Address for MAC
- * byLoopbackMode - Loopback Mode
+ * loopback_mode - Loopback Mode
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
+static void vt6655_mac_set_loopback_mode(struct vnt_private *priv, u8 loopback_mode)
{
void __iomem *io_base = priv->port_offset;
- byLoopbackMode <<= 6;
+ loopback_mode <<= 6;
/* set TCR */
- iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | byLoopbackMode,
- io_base + MAC_REG_TEST);
+ iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | loopback_mode, io_base + MAC_REG_TEST);
}
/*
@@ -206,20 +181,20 @@ void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
* Return Value: none
*
*/
-void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
+static void vt6655_mac_save_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
/* read page0 register */
memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0);
- MACvSelectPage1(io_base);
+ VT6655_MAC_SELECT_PAGE1(io_base);
/* read page1 register */
memcpy_fromio(cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, io_base,
MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(io_base);
+ VT6655_MAC_SELECT_PAGE0(io_base);
}
/*
@@ -236,16 +211,16 @@ void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
* Return Value: none
*
*/
-void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf)
+static void vt6655_mac_restore_context(struct vnt_private *priv, u8 *cxt_buf)
{
void __iomem *io_base = priv->port_offset;
- MACvSelectPage1(io_base);
+ VT6655_MAC_SELECT_PAGE1(io_base);
/* restore page1 */
memcpy_toio(io_base, cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0,
MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(io_base);
+ VT6655_MAC_SELECT_PAGE0(io_base);
/* restore RCR,TCR,IMR... */
memcpy_toio(io_base + MAC_REG_RCR, cxt_buf + MAC_REG_RCR,
@@ -318,23 +293,20 @@ bool MACbSoftwareReset(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeSoftwareReset(struct vnt_private *priv)
+static void vt6655_mac_save_soft_reset(struct vnt_private *priv)
{
- unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
- bool bRetVal;
+ u8 tmp_reg_data[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
/* PATCH....
* save some important register's value, then do
* reset, then restore register's value
*/
/* save MAC context */
- MACvSaveContext(priv, abyTmpRegData);
+ vt6655_mac_save_context(priv, tmp_reg_data);
/* do reset */
- bRetVal = MACbSoftwareReset(priv);
+ MACbSoftwareReset(priv);
/* restore MAC context, except CR0 */
- MACvRestoreContext(priv, abyTmpRegData);
-
- return bRetVal;
+ vt6655_mac_restore_context(priv, tmp_reg_data);
}
/*
@@ -350,7 +322,7 @@ bool MACbSafeSoftwareReset(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeRxOff(struct vnt_private *priv)
+static bool vt6655_mac_safe_rx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -404,7 +376,7 @@ bool MACbSafeRxOff(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeTxOff(struct vnt_private *priv)
+static bool vt6655_mac_safe_tx_off(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -460,20 +432,20 @@ bool MACbSafeTxOff(struct vnt_private *priv)
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeStop(struct vnt_private *priv)
+static bool vt6655_mac_safe_stop(struct vnt_private *priv)
{
void __iomem *io_base = priv->port_offset;
vt6655_mac_reg_bits_off(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
- if (!MACbSafeRxOff(priv)) {
- pr_debug(" MACbSafeRxOff == false)\n");
- MACbSafeSoftwareReset(priv);
+ if (!vt6655_mac_safe_rx_off(priv)) {
+ pr_debug(" vt6655_mac_safe_rx_off == false)\n");
+ vt6655_mac_save_soft_reset(priv);
return false;
}
- if (!MACbSafeTxOff(priv)) {
- pr_debug(" MACbSafeTxOff == false)\n");
- MACbSafeSoftwareReset(priv);
+ if (!vt6655_mac_safe_tx_off(priv)) {
+ pr_debug(" vt6655_mac_safe_tx_off == false)\n");
+ vt6655_mac_save_soft_reset(priv);
return false;
}
@@ -500,13 +472,13 @@ bool MACbShutdown(struct vnt_private *priv)
void __iomem *io_base = priv->port_offset;
/* disable MAC IMR */
iowrite32(0, io_base + MAC_REG_IMR);
- MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_INTERNAL);
/* stop the adapter */
- if (!MACbSafeStop(priv)) {
- MACvSetLoopbackMode(priv, MAC_LB_NONE);
+ if (!vt6655_mac_safe_stop(priv)) {
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return false;
}
- MACvSetLoopbackMode(priv, MAC_LB_NONE);
+ vt6655_mac_set_loopback_mode(priv, MAC_LB_NONE);
return true;
}
@@ -555,7 +527,7 @@ void MACvInitialize(struct vnt_private *priv)
* Return Value: none
*
*/
-void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
+void vt6655_mac_set_curr_rx_0_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -589,7 +561,7 @@ void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
* Return Value: none
*
*/
-void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
+void vt6655_mac_set_curr_rx_1_desc_addr(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -623,8 +595,7 @@ void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
* Return Value: none
*
*/
-void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr)
+static void vt6655_mac_set_curr_tx_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -659,8 +630,7 @@ void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
*
*/
/* TxDMA1 = AC0DMA */
-void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr)
+static void vt6655_mac_set_curr_ac_0_desc_addr_ex(struct vnt_private *priv, u32 curr_desc_addr)
{
void __iomem *io_base = priv->port_offset;
unsigned short ww;
@@ -681,13 +651,12 @@ void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL);
}
-void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
- u32 curr_desc_addr)
+void vt6655_mac_set_curr_tx_desc_addr(int tx_type, struct vnt_private *priv, u32 curr_desc_addr)
{
- if (iTxType == TYPE_AC0DMA)
- MACvSetCurrAC0DescAddrEx(priv, curr_desc_addr);
- else if (iTxType == TYPE_TXDMA0)
- MACvSetCurrTx0DescAddrEx(priv, curr_desc_addr);
+ if (tx_type == TYPE_AC0DMA)
+ vt6655_mac_set_curr_ac_0_desc_addr_ex(priv, curr_desc_addr);
+ else if (tx_type == TYPE_TXDMA0)
+ vt6655_mac_set_curr_tx_0_desc_addr_ex(priv, curr_desc_addr);
}
/*
@@ -767,7 +736,7 @@ bool MACbPSWakeup(struct vnt_private *priv)
void __iomem *io_base = priv->port_offset;
unsigned int ww;
/* Read PSCTL */
- if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS))
+ if (vt6655_mac_is_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_PS))
return true;
/* Disable PS */
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 0122c4603c66..acf931c3f5fd 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,7 +12,7 @@
* Revision History:
* 07-01-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-25-2003 Kyle Hsu: Porting MAC functions from sim53.
- * 09-03-2003 Bryan YC Fan: Add MACvDisableProtectMD & MACvEnableProtectMD
+ * 09-03-2003 Bryan YC Fan: Add vt6655_mac_dis_protect_md & vt6655_mac_en_protect_md
*/
#ifndef __MAC_H__
@@ -537,95 +537,9 @@
/*--------------------- Export Macros ------------------------------*/
-#define MACvReceive0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_RXDMACTL0); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL0); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL0); \
-} while (0)
-
-#define MACvReceive1(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_RXDMACTL1); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_RXDMACTL1); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_RXDMACTL1); \
-} while (0)
-
-#define MACvTransmit0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_TXDMACTL0); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_TXDMACTL0); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_TXDMACTL0); \
-} while (0)
-
-#define MACvTransmitAC0(iobase) \
-do { \
- unsigned long dwData; \
- dwData = ioread32(iobase + MAC_REG_AC0DMACTL); \
- if (dwData & DMACTL_RUN) \
- iowrite32(DMACTL_WAKE, iobase + MAC_REG_AC0DMACTL); \
- else \
- iowrite32(DMACTL_RUN, iobase + MAC_REG_AC0DMACTL); \
-} while (0)
-
-#define MACvSelectPage0(iobase) \
- iowrite8(0, iobase + MAC_REG_PAGE1SEL)
-
-#define MACvSelectPage1(iobase) \
- iowrite8(1, iobase + MAC_REG_PAGE1SEL)
-
-#define MACvEnableProtectMD(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue | ENCFG_PROTECTMD; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvDisableProtectMD(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_PROTECTMD; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvEnableBarkerPreambleMd(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue | ENCFG_BARKERPREAM; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvDisableBarkerPreambleMd(iobase) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_BARKERPREAM; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvSetBBType(iobase, byTyp) \
-do { \
- unsigned long dwOrgValue; \
- dwOrgValue = ioread32(iobase + MAC_REG_ENCFG); \
- dwOrgValue = dwOrgValue & ~ENCFG_BBTYPE_MASK; \
- dwOrgValue = dwOrgValue | (unsigned long)byTyp; \
- iowrite32((u32)dwOrgValue, iobase + MAC_REG_ENCFG); \
-} while (0)
-
-#define MACvSetRFLE_LatchBase(iobase) \
- vt6655_mac_word_reg_bits_on(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define VT6655_MAC_SELECT_PAGE0(iobase) iowrite8(0, iobase + MAC_REG_PAGE1SEL)
+
+#define VT6655_MAC_SELECT_PAGE1(iobase) iowrite8(1, iobase + MAC_REG_PAGE1SEL)
#define MAKEWORD(lb, hb) \
((unsigned short)(((unsigned char)(lb)) | (((unsigned short)((unsigned char)(hb))) << 8)))
@@ -635,38 +549,16 @@ void vt6655_mac_word_reg_bits_on(void __iomem *iobase, const u8 reg_offset, cons
void vt6655_mac_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u8 bit_mask);
void vt6655_mac_word_reg_bits_off(void __iomem *iobase, const u8 reg_offset, const u16 bit_mask);
-bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
- unsigned char byTestBits);
-
-bool MACbIsIntDisable(struct vnt_private *priv);
-
-void MACvSetShortRetryLimit(struct vnt_private *priv,
- unsigned char byRetryLimit);
+void vt6655_mac_set_short_retry_limit(struct vnt_private *priv, unsigned char retry_limit);
void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit);
-void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode);
-
-void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf);
-void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf);
-
bool MACbSoftwareReset(struct vnt_private *priv);
-bool MACbSafeSoftwareReset(struct vnt_private *priv);
-bool MACbSafeRxOff(struct vnt_private *priv);
-bool MACbSafeTxOff(struct vnt_private *priv);
-bool MACbSafeStop(struct vnt_private *priv);
bool MACbShutdown(struct vnt_private *priv);
void MACvInitialize(struct vnt_private *priv);
-void MACvSetCurrRx0DescAddr(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrRx1DescAddr(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr);
-void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
- u32 curr_desc_addr);
+void vt6655_mac_set_curr_rx_0_desc_addr(struct vnt_private *priv, u32 curr_desc_addr);
+void vt6655_mac_set_curr_rx_1_desc_addr(struct vnt_private *priv, u32 curr_desc_addr);
+void vt6655_mac_set_curr_tx_desc_addr(int tx_type, struct vnt_private *priv, u32 curr_desc_addr);
void MACvSetCurrSyncDescAddrEx(struct vnt_private *priv,
u32 curr_desc_addr);
void MACvSetCurrATIMDescAddrEx(struct vnt_private *priv,
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index b7b56d8406d1..471bb310176f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -143,8 +143,8 @@ exit:
}
static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr, struct key_params *params)
{
struct wlandevice *wlandev = dev->ml_priv;
u32 did;
@@ -172,7 +172,7 @@ static int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise,
+ int link_id, u8 key_index, bool pairwise,
const u8 *mac_addr, void *cookie,
void (*callback)(void *cookie, struct key_params*))
{
@@ -202,7 +202,8 @@ static int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
+ int link_id, u8 key_index, bool pairwise,
+ const u8 *mac_addr)
{
struct wlandevice *wlandev = dev->ml_priv;
u32 did;
@@ -227,7 +228,8 @@ static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
}
static int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
- u8 key_index, bool unicast, bool multicast)
+ int link_id, u8 key_index, bool unicast,
+ bool multicast)
{
struct wlandevice *wlandev = dev->ml_priv;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 5654dc54ae91..1cee51a1075e 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -137,8 +137,6 @@ struct p80211_frmrx {
/* called by /proc/net/wireless */
struct iw_statistics *p80211wext_get_wireless_stats(struct net_device *dev);
-/* wireless extensions' ioctls */
-extern struct iw_handler_def p80211wext_handler_def;
/* WEP stuff */
#define NUM_WEPKEYS 4
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fb91423a4e2e..c8470e7c0e10 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -164,6 +164,9 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
+ /* Skip empty port groups */
+ if (!tg_pt_gp->tg_pt_gp_members)
+ continue;
/*
* Check if the Target port group and Target port descriptor list
* based on tg_pt_gp_members count will fit into the response payload.
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index a889a6237d9c..30fcf69e1a1d 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -133,8 +133,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
/* target_core_transport.c */
-extern struct kmem_cache *se_tmr_req_cache;
-
int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e6a967ddc08c..69a4c9581e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -39,7 +39,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
}
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, blk_status_t);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
/* pscsi_attach_hba():
*
@@ -500,7 +500,7 @@ static int pscsi_configure_device(struct se_device *dev)
continue;
/*
* Functions will release the held struct scsi_host->host_lock
- * before calling calling pscsi_add_device_to_list() to register
+ * before calling pscsi_add_device_to_list() to register
* struct scsi_device with target_core_mod.
*/
switch (sd->type) {
@@ -1002,7 +1002,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
return 0;
}
-static void pscsi_req_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret pscsi_req_done(struct request *req,
+ blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
@@ -1029,6 +1030,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
}
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static const struct target_backend_ops pscsi_ops = {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index c14441c89bed..7cca3b15472b 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -115,6 +115,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[5] |= 0x1;
}
+ /*
+ * Set MULTIP bit to indicate presence of multiple SCSI target ports
+ */
+ if (dev->export_count > 1)
+ buf[6] |= 0x10;
+
buf[7] = 0x2; /* CmdQue=1 */
/*
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 3deaeecb712e..2940559c3086 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -486,6 +486,7 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.netnsok = true,
.small_ops = tcmu_genl_ops,
.n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .resv_start_op = TCMU_CMD_SET_FEATURES + 1,
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 7ab31740cff8..0828240f27e6 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -271,8 +271,8 @@ static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
unsigned long start)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
struct ffa_mem_region_attributes mem_attr = {
.receiver = ffa_dev->vm_id,
.attrs = FFA_MEM_RW,
@@ -294,14 +294,14 @@ static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
if (rc)
return rc;
args.sg = sgt.sgl;
- rc = ffa_ops->memory_share(ffa_dev, &args);
+ rc = mem_ops->memory_share(&args);
sg_free_table(&sgt);
if (rc)
return rc;
rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
if (rc) {
- ffa_ops->memory_reclaim(args.g_handle, 0);
+ mem_ops->memory_reclaim(args.g_handle, 0);
return rc;
}
@@ -314,8 +314,9 @@ static int optee_ffa_shm_unregister(struct tee_context *ctx,
struct tee_shm *shm)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
+ const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops;
u64 global_handle = shm->sec_world_id;
struct ffa_send_direct_data data = {
.data0 = OPTEE_FFA_UNREGISTER_SHM,
@@ -327,11 +328,11 @@ static int optee_ffa_shm_unregister(struct tee_context *ctx,
optee_shm_rem_ffa_handle(optee, global_handle);
shm->sec_world_id = 0;
- rc = ffa_ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc)
pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
- rc = ffa_ops->memory_reclaim(global_handle, 0);
+ rc = mem_ops->memory_reclaim(global_handle, 0);
if (rc)
pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
@@ -342,7 +343,7 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
struct tee_shm *shm)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ const struct ffa_mem_ops *mem_ops;
u64 global_handle = shm->sec_world_id;
int rc;
@@ -353,7 +354,8 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
*/
optee_shm_rem_ffa_handle(optee, global_handle);
- rc = ffa_ops->memory_reclaim(global_handle, 0);
+ mem_ops = optee->ffa.ffa_dev->ops->mem_ops;
+ rc = mem_ops->memory_reclaim(global_handle, 0);
if (rc)
pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
@@ -529,8 +531,8 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
struct optee_msg_arg *rpc_arg)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops;
struct optee_call_waiter w;
u32 cmd = data->data0;
u32 w4 = data->data1;
@@ -541,7 +543,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
/* Initialize waiter */
optee_cq_wait_init(&optee->call_queue, &w);
while (true) {
- rc = ffa_ops->sync_send_receive(ffa_dev, data);
+ rc = msg_ops->sync_send_receive(ffa_dev, data);
if (rc)
goto done;
@@ -576,7 +578,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* OP-TEE has returned with a RPC request.
*
* Note that data->data4 (passed in register w7) is already
- * filled in by ffa_ops->sync_send_receive() returning
+ * filled in by ffa_mem_ops->sync_send_receive() returning
* above.
*/
cond_resched();
@@ -652,14 +654,15 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
*/
static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
- const struct ffa_dev_ops *ops)
+ const struct ffa_ops *ops)
{
+ const struct ffa_msg_ops *msg_ops = ops->msg_ops;
struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
int rc;
- ops->mode_32bit_set(ffa_dev);
+ msg_ops->mode_32bit_set(ffa_dev);
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
return false;
@@ -672,7 +675,7 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
}
data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d\n", rc);
return false;
@@ -687,14 +690,14 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
}
static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
- const struct ffa_dev_ops *ops,
+ const struct ffa_ops *ops,
u32 *sec_caps,
unsigned int *rpc_param_count)
{
struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
int rc;
- rc = ops->sync_send_receive(ffa_dev, &data);
+ rc = ops->msg_ops->sync_send_receive(ffa_dev, &data);
if (rc) {
pr_err("Unexpected error %d", rc);
return false;
@@ -783,7 +786,7 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
- const struct ffa_dev_ops *ffa_ops;
+ const struct ffa_ops *ffa_ops;
unsigned int rpc_param_count;
struct tee_shm_pool *pool;
struct tee_device *teedev;
@@ -793,11 +796,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
u32 sec_caps;
int rc;
- ffa_ops = ffa_dev_ops_get(ffa_dev);
- if (!ffa_ops) {
- pr_warn("failed \"method\" init: ffa\n");
- return -ENOENT;
- }
+ ffa_ops = ffa_dev->ops;
if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
return -EINVAL;
@@ -821,7 +820,6 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
- optee->ffa.ffa_ops = ffa_ops;
optee->rpc_param_count = rpc_param_count;
teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index a33d98d17cfd..04ae58892608 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -111,7 +111,6 @@ struct optee_smc {
*/
struct optee_ffa {
struct ffa_device *ffa_dev;
- const struct ffa_dev_ops *ffa_ops;
/* Serializes access to @global_ids */
struct mutex mutex;
struct rhashtable global_ids;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 1175f3a46859..27295bda3e0b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index def8e1a0399c..2506c6c8ca83 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o
obj-y += intel/
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-y += st/
-obj-$(CONFIG_QCOM_TSENS) += qcom/
+obj-y += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index e61b91d14ad1..d30cb791e63c 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -179,12 +179,12 @@ static int amlogic_thermal_disable(struct amlogic_thermal *data)
return 0;
}
-static int amlogic_thermal_get_temp(void *data, int *temp)
+static int amlogic_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
unsigned int tval;
- struct amlogic_thermal *pdata = data;
+ struct amlogic_thermal *pdata = tz->devdata;
- if (!data)
+ if (!pdata)
return -EINVAL;
regmap_read(pdata->regmap, TSENSOR_STAT0, &tval);
@@ -195,7 +195,7 @@ static int amlogic_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops amlogic_thermal_ops = {
+static const struct thermal_zone_device_ops amlogic_thermal_ops = {
.get_temp = amlogic_thermal_get_temp,
};
@@ -276,10 +276,10 @@ static int amlogic_thermal_probe(struct platform_device *pdev)
return PTR_ERR(pdata->sec_ao_map);
}
- pdata->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- 0,
- pdata,
- &amlogic_thermal_ops);
+ pdata->tzd = devm_thermal_of_zone_register(&pdev->dev,
+ 0,
+ pdata,
+ &amlogic_thermal_ops);
if (IS_ERR(pdata->tzd)) {
ret = PTR_ERR(pdata->tzd);
dev_err(dev, "Failed to register tsensor: %d\n", ret);
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index c2ebfb5be4b3..52d63b3997fe 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -420,9 +420,9 @@ static struct thermal_zone_device_ops legacy_ops = {
.get_temp = armada_get_temp_legacy,
};
-static int armada_get_temp(void *_sensor, int *temp)
+static int armada_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct armada_thermal_sensor *sensor = _sensor;
+ struct armada_thermal_sensor *sensor = tz->devdata;
struct armada_thermal_priv *priv = sensor->priv;
int ret;
@@ -450,7 +450,7 @@ unlock_mutex:
return ret;
}
-static const struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_device_ops of_ops = {
.get_temp = armada_get_temp,
};
@@ -928,9 +928,9 @@ static int armada_thermal_probe(struct platform_device *pdev)
/* Register the sensor */
sensor->priv = priv;
sensor->id = sensor_id;
- tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor,
- &of_ops);
+ tz = devm_thermal_of_zone_register(&pdev->dev,
+ sensor->id, sensor,
+ &of_ops);
if (IS_ERR(tz)) {
dev_info(&pdev->dev, "Thermal sensor %d unavailable\n",
sensor_id);
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
index e9bef5c3414b..1f8651d15160 100644
--- a/drivers/thermal/broadcom/bcm2711_thermal.c
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -31,11 +31,11 @@ struct bcm2711_thermal_priv {
struct thermal_zone_device *thermal;
};
-static int bcm2711_get_temp(void *data, int *temp)
+static int bcm2711_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct bcm2711_thermal_priv *priv = data;
- int slope = thermal_zone_get_slope(priv->thermal);
- int offset = thermal_zone_get_offset(priv->thermal);
+ struct bcm2711_thermal_priv *priv = tz->devdata;
+ int slope = thermal_zone_get_slope(tz);
+ int offset = thermal_zone_get_offset(tz);
u32 val;
int ret;
@@ -54,7 +54,7 @@ static int bcm2711_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = {
+static const struct thermal_zone_device_ops bcm2711_thermal_of_ops = {
.get_temp = bcm2711_get_temp,
};
@@ -88,8 +88,8 @@ static int bcm2711_thermal_probe(struct platform_device *pdev)
}
priv->regmap = regmap;
- thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv,
- &bcm2711_thermal_of_ops);
+ thermal = devm_thermal_of_zone_register(dev, 0, priv,
+ &bcm2711_thermal_of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(dev, "could not register sensor: %d\n", ret);
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index c8e4344d5a3d..2c67841a1115 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -88,9 +88,9 @@ static int bcm2835_thermal_temp2adc(int temp, int offset, int slope)
return temp;
}
-static int bcm2835_thermal_get_temp(void *d, int *temp)
+static int bcm2835_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct bcm2835_thermal_data *data = d;
+ struct bcm2835_thermal_data *data = tz->devdata;
u32 val = readl(data->regs + BCM2835_TS_TSENSSTAT);
if (!(val & BCM2835_TS_TSENSSTAT_VALID))
@@ -135,7 +135,7 @@ static void bcm2835_thermal_debugfs(struct platform_device *pdev)
debugfs_create_regset32("regset", 0444, data->debugfsdir, regset);
}
-static const struct thermal_zone_of_device_ops bcm2835_thermal_ops = {
+static const struct thermal_zone_device_ops bcm2835_thermal_ops = {
.get_temp = bcm2835_thermal_get_temp,
};
@@ -206,8 +206,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
data->clk, rate);
/* register of thermal sensor and get info from DT */
- tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
- &bcm2835_thermal_ops);
+ tz = devm_thermal_of_zone_register(&pdev->dev, 0, data,
+ &bcm2835_thermal_ops);
if (IS_ERR(tz)) {
err = PTR_ERR(tz);
dev_err(&pdev->dev,
@@ -277,7 +277,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
return 0;
err_tz:
- thermal_zone_of_sensor_unregister(&pdev->dev, tz);
+ thermal_of_zone_unregister(tz);
err_clk:
clk_disable_unprepare(data->clk);
@@ -290,7 +290,7 @@ static int bcm2835_thermal_remove(struct platform_device *pdev)
struct thermal_zone_device *tz = data->tz;
debugfs_remove_recursive(data->debugfsdir);
- thermal_zone_of_sensor_unregister(&pdev->dev, tz);
+ thermal_of_zone_unregister(tz);
clk_disable_unprepare(data->clk);
return 0;
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 0cedb8b4f00a..c79c6cfdd74d 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -105,7 +105,7 @@ static struct avs_tmon_trip avs_tmon_trips[] = {
struct brcmstb_thermal_params {
unsigned int offset;
unsigned int mult;
- const struct thermal_zone_of_device_ops *of_ops;
+ const struct thermal_zone_device_ops *of_ops;
};
struct brcmstb_thermal_priv {
@@ -150,9 +150,9 @@ static inline u32 avs_tmon_temp_to_code(struct brcmstb_thermal_priv *priv,
return (u32)((offset - temp) / mult);
}
-static int brcmstb_get_temp(void *data, int *temp)
+static int brcmstb_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct brcmstb_thermal_priv *priv = data;
+ struct brcmstb_thermal_priv *priv = tz->devdata;
u32 val;
long t;
@@ -260,9 +260,9 @@ static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int brcmstb_set_trips(void *data, int low, int high)
+static int brcmstb_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct brcmstb_thermal_priv *priv = data;
+ struct brcmstb_thermal_priv *priv = tz->devdata;
dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high);
@@ -288,7 +288,7 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops brcmstb_16nm_of_ops = {
+static const struct thermal_zone_device_ops brcmstb_16nm_of_ops = {
.get_temp = brcmstb_get_temp,
};
@@ -298,7 +298,7 @@ static const struct brcmstb_thermal_params brcmstb_16nm_params = {
.of_ops = &brcmstb_16nm_of_ops,
};
-static const struct thermal_zone_of_device_ops brcmstb_28nm_of_ops = {
+static const struct thermal_zone_device_ops brcmstb_28nm_of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
static int brcmstb_thermal_probe(struct platform_device *pdev)
{
- const struct thermal_zone_of_device_ops *of_ops;
+ const struct thermal_zone_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
struct resource *res;
@@ -341,8 +341,8 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
of_ops = priv->temp_params->of_ops;
- thermal = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv,
- of_ops);
+ thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
+ of_ops);
if (IS_ERR(thermal)) {
ret = PTR_ERR(thermal);
dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
diff --git a/drivers/thermal/broadcom/ns-thermal.c b/drivers/thermal/broadcom/ns-thermal.c
index c9468ba9d449..07a8a3f49bd0 100644
--- a/drivers/thermal/broadcom/ns-thermal.c
+++ b/drivers/thermal/broadcom/ns-thermal.c
@@ -14,19 +14,14 @@
#define PVTMON_CONTROL0_SEL_TEST_MODE 0x0000000e
#define PVTMON_STATUS 0x08
-struct ns_thermal {
- struct thermal_zone_device *tz;
- void __iomem *pvtmon;
-};
-
-static int ns_thermal_get_temp(void *data, int *temp)
+static int ns_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct ns_thermal *ns_thermal = data;
- int offset = thermal_zone_get_offset(ns_thermal->tz);
- int slope = thermal_zone_get_slope(ns_thermal->tz);
+ void __iomem *pvtmon = tz->devdata;
+ int offset = thermal_zone_get_offset(tz);
+ int slope = thermal_zone_get_slope(tz);
u32 val;
- val = readl(ns_thermal->pvtmon + PVTMON_CONTROL0);
+ val = readl(pvtmon + PVTMON_CONTROL0);
if ((val & PVTMON_CONTROL0_SEL_MASK) != PVTMON_CONTROL0_SEL_TEMP_MONITOR) {
/* Clear current mode selection */
val &= ~PVTMON_CONTROL0_SEL_MASK;
@@ -34,50 +29,47 @@ static int ns_thermal_get_temp(void *data, int *temp)
/* Set temp monitor mode (it's the default actually) */
val |= PVTMON_CONTROL0_SEL_TEMP_MONITOR;
- writel(val, ns_thermal->pvtmon + PVTMON_CONTROL0);
+ writel(val, pvtmon + PVTMON_CONTROL0);
}
- val = readl(ns_thermal->pvtmon + PVTMON_STATUS);
+ val = readl(pvtmon + PVTMON_STATUS);
*temp = slope * val + offset;
return 0;
}
-static const struct thermal_zone_of_device_ops ns_thermal_ops = {
+static const struct thermal_zone_device_ops ns_thermal_ops = {
.get_temp = ns_thermal_get_temp,
};
static int ns_thermal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ns_thermal *ns_thermal;
-
- ns_thermal = devm_kzalloc(dev, sizeof(*ns_thermal), GFP_KERNEL);
- if (!ns_thermal)
- return -ENOMEM;
+ struct thermal_zone_device *tz;
+ void __iomem *pvtmon;
- ns_thermal->pvtmon = of_iomap(dev_of_node(dev), 0);
- if (WARN_ON(!ns_thermal->pvtmon))
+ pvtmon = of_iomap(dev_of_node(dev), 0);
+ if (WARN_ON(!pvtmon))
return -ENOENT;
- ns_thermal->tz = devm_thermal_zone_of_sensor_register(dev, 0,
- ns_thermal,
- &ns_thermal_ops);
- if (IS_ERR(ns_thermal->tz)) {
- iounmap(ns_thermal->pvtmon);
- return PTR_ERR(ns_thermal->tz);
+ tz = devm_thermal_of_zone_register(dev, 0,
+ pvtmon,
+ &ns_thermal_ops);
+ if (IS_ERR(tz)) {
+ iounmap(pvtmon);
+ return PTR_ERR(tz);
}
- platform_set_drvdata(pdev, ns_thermal);
+ platform_set_drvdata(pdev, pvtmon);
return 0;
}
static int ns_thermal_remove(struct platform_device *pdev)
{
- struct ns_thermal *ns_thermal = platform_get_drvdata(pdev);
+ void __iomem *pvtmon = platform_get_drvdata(pdev);
- iounmap(ns_thermal->pvtmon);
+ iounmap(pvtmon);
return 0;
}
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 85ab9edd580c..2b93502543ff 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -19,7 +19,6 @@
#define SR_TMON_MAX_LIST 6
struct sr_tmon {
- struct thermal_zone_device *tz;
unsigned int crit_temp;
unsigned int tmon_id;
struct sr_thermal *priv;
@@ -31,9 +30,9 @@ struct sr_thermal {
struct sr_tmon tmon[SR_TMON_MAX_LIST];
};
-static int sr_get_temp(void *data, int *temp)
+static int sr_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sr_tmon *tmon = data;
+ struct sr_tmon *tmon = tz->devdata;
struct sr_thermal *sr_thermal = tmon->priv;
*temp = readl(sr_thermal->regs + SR_TMON_TEMP_BASE(tmon->tmon_id));
@@ -41,13 +40,14 @@ static int sr_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sr_tz_ops = {
+static const struct thermal_zone_device_ops sr_tz_ops = {
.get_temp = sr_get_temp,
};
static int sr_thermal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct thermal_zone_device *tz;
struct sr_thermal *sr_thermal;
struct sr_tmon *tmon;
struct resource *res;
@@ -84,10 +84,10 @@ static int sr_thermal_probe(struct platform_device *pdev)
writel(0, sr_thermal->regs + SR_TMON_TEMP_BASE(i));
tmon->tmon_id = i;
tmon->priv = sr_thermal;
- tmon->tz = devm_thermal_zone_of_sensor_register(dev, i, tmon,
- &sr_tz_ops);
- if (IS_ERR(tmon->tz))
- return PTR_ERR(tmon->tz);
+ tz = devm_thermal_of_zone_register(dev, i, tmon,
+ &sr_tz_ops);
+ if (IS_ERR(tz))
+ return PTR_ERR(tz);
dev_dbg(dev, "thermal sensor %d registered\n", i);
}
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index b76293cc989c..9f8b438fcf8f 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -475,7 +475,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
- * @np: a valid struct device_node to the cooling device device tree node
+ * @np: a valid struct device_node to the cooling device tree node
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @em: Energy Model of the cpufreq policy
@@ -501,17 +501,17 @@ __cpufreq_cooling_register(struct device_node *np,
struct thermal_cooling_device_ops *cooling_ops;
char *name;
+ if (IS_ERR_OR_NULL(policy)) {
+ pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
+ return ERR_PTR(-EINVAL);
+ }
+
dev = get_cpu_device(policy->cpu);
if (unlikely(!dev)) {
pr_warn("No cpu device for cpu %d\n", policy->cpu);
return ERR_PTR(-ENODEV);
}
- if (IS_ERR_OR_NULL(policy)) {
- pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
- return ERR_PTR(-EINVAL);
- }
-
i = cpufreq_table_count_valid_entries(policy);
if (!i) {
pr_debug("%s: CPUFreq table not found or has no valid entries\n",
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 180edec34e07..7dcfde7a9f2c 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -248,10 +248,9 @@ static int da9062_thermal_probe(struct platform_device *pdev)
jiffies_to_msecs(thermal->zone->passive_delay_jiffies));
ret = platform_get_irq_byname(pdev, "THERMAL");
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get platform IRQ.\n");
+ if (ret < 0)
goto err_zone;
- }
+
thermal->irq = ret;
ret = request_threaded_irq(thermal->irq, NULL,
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index 121cf853e545..cb10e280681f 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -58,9 +58,9 @@ struct db8500_thermal_zone {
};
/* Callback to get current temperature */
-static int db8500_thermal_get_temp(void *data, int *temp)
+static int db8500_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct db8500_thermal_zone *th = data;
+ struct db8500_thermal_zone *th = tz->devdata;
/*
* TODO: There is no PRCMU interface to get temperature data currently,
@@ -72,7 +72,7 @@ static int db8500_thermal_get_temp(void *data, int *temp)
return 0;
}
-static struct thermal_zone_of_device_ops thdev_ops = {
+static const struct thermal_zone_device_ops thdev_ops = {
.get_temp = db8500_thermal_get_temp,
};
@@ -182,7 +182,7 @@ static int db8500_thermal_probe(struct platform_device *pdev)
}
/* register of thermal sensor and get info from DT */
- th->tz = devm_thermal_zone_of_sensor_register(dev, 0, th, &thdev_ops);
+ th->tz = devm_thermal_of_zone_register(dev, 0, th, &thdev_ops);
if (IS_ERR(th->tz)) {
dev_err(dev, "register thermal zone sensor failed\n");
return PTR_ERR(th->tz);
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 991a1c54296d..a08bbe33be96 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -31,8 +31,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
trip, trip_temp, tz->temperature,
trip_hyst);
- mutex_lock(&tz->lock);
-
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -65,8 +63,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
-
- mutex_unlock(&tz->lock);
}
/**
@@ -100,15 +96,13 @@ static int bang_bang_control(struct thermal_zone_device *tz, int trip)
{
struct thermal_instance *instance;
- thermal_zone_trip_update(tz, trip);
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ thermal_zone_trip_update(tz, trip);
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
- mutex_unlock(&tz->lock);
-
return 0;
}
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 6a2abcfc648f..a4ee4661e9cc 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -82,7 +82,7 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
@@ -112,7 +112,6 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
mutex_unlock(&cdev->lock);
}
- mutex_unlock(&tz->lock);
return 0;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 1d5052470967..2d1aeaba38a8 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -392,8 +392,6 @@ static int allocate_power(struct thermal_zone_device *tz,
int i, num_actors, total_weight, ret = 0;
int trip_max_desired_temperature = params->trip_max_desired_temperature;
- mutex_lock(&tz->lock);
-
num_actors = 0;
total_weight = 0;
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
@@ -404,10 +402,8 @@ static int allocate_power(struct thermal_zone_device *tz,
}
}
- if (!num_actors) {
- ret = -ENODEV;
- goto unlock;
- }
+ if (!num_actors)
+ return -ENODEV;
/*
* We need to allocate five arrays of the same size:
@@ -421,10 +417,8 @@ static int allocate_power(struct thermal_zone_device *tz,
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
- if (!req_power) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!req_power)
+ return -ENOMEM;
max_power = &req_power[num_actors];
granted_power = &req_power[2 * num_actors];
@@ -496,8 +490,6 @@ static int allocate_power(struct thermal_zone_device *tz,
control_temp - tz->temperature);
kfree(req_power);
-unlock:
- mutex_unlock(&tz->lock);
return ret;
}
@@ -576,7 +568,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
struct power_allocator_params *params = tz->governor_data;
u32 req_power;
- mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
struct thermal_cooling_device *cdev = instance->cdev;
@@ -598,7 +589,6 @@ static void allow_maximum_power(struct thermal_zone_device *tz, bool update)
mutex_unlock(&instance->cdev->lock);
}
- mutex_unlock(&tz->lock);
}
/**
@@ -712,6 +702,8 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
struct power_allocator_params *params = tz->governor_data;
bool update;
+ lockdep_assert_held(&tz->lock);
+
/*
* We get called for every trip point but we only need to do
* our calculations once
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index 9729b46d0258..cdd3354bc27f 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -117,8 +117,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
trip, trip_type, trip_temp, trend, throttle);
- mutex_lock(&tz->lock);
-
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@@ -145,8 +143,6 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
-
- mutex_unlock(&tz->lock);
}
/**
@@ -164,15 +160,13 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip)
{
struct thermal_instance *instance;
- thermal_zone_trip_update(tz, trip);
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ thermal_zone_trip_update(tz, trip);
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
- mutex_unlock(&tz->lock);
-
return 0;
}
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index a62a4e90bd3f..8bc1c22aaf03 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -34,7 +34,8 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
char *thermal_prop[5];
int i;
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
+
thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", tz->type);
thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", tz->temperature);
thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=%d", trip);
@@ -43,7 +44,7 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, thermal_prop);
for (i = 0; i < 4; ++i)
kfree(thermal_prop[i]);
- mutex_unlock(&tz->lock);
+
return 0;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 19a242c69ce6..d6974db7aaf7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -434,9 +434,9 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
return 0;
}
-static int hisi_thermal_get_temp(void *__data, int *temp)
+static int hisi_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct hisi_thermal_sensor *sensor = __data;
+ struct hisi_thermal_sensor *sensor = tz->devdata;
struct hisi_thermal_data *data = sensor->data;
*temp = data->ops->get_temp(sensor);
@@ -447,7 +447,7 @@ static int hisi_thermal_get_temp(void *__data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
+static const struct thermal_zone_device_ops hisi_of_thermal_ops = {
.get_temp = hisi_thermal_get_temp,
};
@@ -459,7 +459,7 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
data->ops->irq_handler(sensor);
- hisi_thermal_get_temp(sensor, &temp);
+ temp = data->ops->get_temp(sensor);
if (temp >= sensor->thres_temp) {
dev_crit(&data->pdev->dev,
@@ -484,9 +484,9 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
int ret, i;
const struct thermal_trip *trip;
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor,
- &hisi_of_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
+ sensor->id, sensor,
+ &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
sensor->tzd = NULL;
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index af666bd9e8d4..e2c2673025a7 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -96,15 +96,15 @@ static int imx8mp_tmu_get_temp(void *data, int *temp)
return 0;
}
-static int tmu_get_temp(void *data, int *temp)
+static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tmu_sensor *sensor = data;
+ struct tmu_sensor *sensor = tz->devdata;
struct imx8mm_tmu *tmu = sensor->priv;
- return tmu->socdata->get_temp(data, temp);
+ return tmu->socdata->get_temp(sensor, temp);
}
-static struct thermal_zone_of_device_ops tmu_tz_ops = {
+static const struct thermal_zone_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
@@ -165,9 +165,9 @@ static int imx8mm_tmu_probe(struct platform_device *pdev)
for (i = 0; i < data->num_sensors; i++) {
tmu->sensors[i].priv = tmu;
tmu->sensors[i].tzd =
- devm_thermal_zone_of_sensor_register(&pdev->dev, i,
- &tmu->sensors[i],
- &tmu_tz_ops);
+ devm_thermal_of_zone_register(&pdev->dev, i,
+ &tmu->sensors[i],
+ &tmu_tz_ops);
if (IS_ERR(tmu->sensors[i].tzd)) {
ret = PTR_ERR(tmu->sensors[i].tzd);
dev_err(&pdev->dev,
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 331a241eb0ef..5d92b70a5d53 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -43,11 +43,11 @@ struct imx_sc_msg_misc_get_temp {
} data;
} __packed __aligned(4);
-static int imx_sc_thermal_get_temp(void *data, int *temp)
+static int imx_sc_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct imx_sc_msg_misc_get_temp msg;
struct imx_sc_rpc_msg *hdr = &msg.hdr;
- struct imx_sc_sensor *sensor = data;
+ struct imx_sc_sensor *sensor = tz->devdata;
int ret;
msg.data.req.resource_id = sensor->resource_id;
@@ -70,65 +70,61 @@ static int imx_sc_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = {
+static const struct thermal_zone_device_ops imx_sc_thermal_ops = {
.get_temp = imx_sc_thermal_get_temp,
};
static int imx_sc_thermal_probe(struct platform_device *pdev)
{
- struct device_node *np, *child, *sensor_np;
struct imx_sc_sensor *sensor;
- int ret;
+ const int *resource_id;
+ int i, ret;
ret = imx_scu_get_handle(&thermal_ipc_handle);
if (ret)
return ret;
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np)
- return -ENODEV;
+ resource_id = of_device_get_match_data(&pdev->dev);
+ if (!resource_id)
+ return -EINVAL;
- sensor_np = of_node_get(pdev->dev.of_node);
+ for (i = 0; resource_id[i] > 0; i++) {
- for_each_available_child_of_node(np, child) {
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
- if (!sensor) {
- of_node_put(child);
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!sensor)
+ return -ENOMEM;
- ret = thermal_zone_of_get_sensor_id(child,
- sensor_np,
- &sensor->resource_id);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to get valid sensor resource id: %d\n",
- ret);
- of_node_put(child);
- break;
- }
+ sensor->resource_id = resource_id[i];
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->resource_id,
- sensor,
- &imx_sc_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, sensor->resource_id,
+ sensor, &imx_sc_thermal_ops);
if (IS_ERR(sensor->tzd)) {
- dev_err(&pdev->dev, "failed to register thermal zone\n");
+ /*
+ * Save the error value before freeing the
+ * sensor pointer, otherwise we endup with a
+ * use-after-free error
+ */
ret = PTR_ERR(sensor->tzd);
- of_node_put(child);
- break;
+
+ devm_kfree(&pdev->dev, sensor);
+
+ /*
+ * The thermal framework notifies us there is
+ * no thermal zone description for such a
+ * sensor id
+ */
+ if (ret == -ENODEV)
+ continue;
+
+ dev_err(&pdev->dev, "failed to register thermal zone\n");
+ return ret;
}
if (devm_thermal_add_hwmon_sysfs(sensor->tzd))
dev_warn(&pdev->dev, "failed to add hwmon sysfs attributes\n");
}
-put_node:
- of_node_put(sensor_np);
- of_node_put(np);
-
- return ret;
+ return 0;
}
static int imx_sc_thermal_remove(struct platform_device *pdev)
@@ -136,8 +132,10 @@ static int imx_sc_thermal_remove(struct platform_device *pdev)
return 0;
}
+static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+
static const struct of_device_id imx_sc_thermal_table[] = {
- { .compatible = "fsl,imx-sc-thermal", },
+ { .compatible = "fsl,imx-sc-thermal", .data = imx_sc_sensors },
{}
};
MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 365489bf4b8c..db8a6f63657d 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -614,9 +614,8 @@ static int int3400_thermal_probe(struct platform_device *pdev)
free_sysfs:
cleanup_odvp(priv);
- if (priv->data_vault) {
- if (!ZERO_OR_NULL_PTR(priv->data_vault))
- sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
+ if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
+ sysfs_remove_group(&pdev->dev.kobj, &data_attribute_group);
kfree(priv->data_vault);
}
free_uuid:
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index c2dc4c158b9d..bf1b1cdfade4 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -373,18 +373,7 @@ static struct pci_driver proc_thermal_pci_driver = {
.driver.pm = &proc_thermal_pci_pm,
};
-static int __init proc_thermal_init(void)
-{
- return pci_register_driver(&proc_thermal_pci_driver);
-}
-
-static void __exit proc_thermal_exit(void)
-{
- pci_unregister_driver(&proc_thermal_pci_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+module_pci_driver(proc_thermal_pci_driver);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
index 4571a1a53b84..09e032f822f3 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
@@ -151,18 +151,7 @@ static struct pci_driver proc_thermal_pci_driver = {
.driver.pm = &proc_thermal_pci_pm,
};
-static int __init proc_thermal_init(void)
-{
- return pci_register_driver(&proc_thermal_pci_driver);
-}
-
-static void __exit proc_thermal_exit(void)
-{
- pci_unregister_driver(&proc_thermal_pci_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+module_pci_driver(proc_thermal_pci_driver);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index c841ab37e7c6..2a5570b9799a 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -62,8 +62,7 @@ static struct dentry *debug_dir;
static unsigned int set_target_ratio;
static unsigned int current_ratio;
static bool should_skip;
-static bool reduce_irq;
-static atomic_t idle_wakeup_counter;
+
static unsigned int control_cpu; /* The cpu assigned to collect stat and update
* control parameters. default to BSP but BSP
* can be offlined.
@@ -285,9 +284,6 @@ static unsigned int get_compensation(int ratio)
cal_data[ratio + 1].steady_comp) / 3;
}
- /* REVISIT: simple penalty of double idle injection */
- if (reduce_irq)
- comp = ratio;
/* do not exceed limit */
if (comp + ratio >= MAX_TARGET_RATIO)
comp = MAX_TARGET_RATIO - ratio - 1;
@@ -301,13 +297,9 @@ static void adjust_compensation(int target_ratio, unsigned int win)
struct powerclamp_calibration_data *d = &cal_data[target_ratio];
/*
- * adjust compensations if confidence level has not been reached or
- * there are too many wakeups during the last idle injection period, we
- * cannot trust the data for compensation.
+ * adjust compensations if confidence level has not been reached.
*/
- if (d->confidence >= CONFIDENCE_OK ||
- atomic_read(&idle_wakeup_counter) >
- win * num_online_cpus())
+ if (d->confidence >= CONFIDENCE_OK)
return;
delta = set_target_ratio - current_ratio;
@@ -347,14 +339,7 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
tsc_last = tsc_now;
adjust_compensation(target_ratio, win);
- /*
- * too many external interrupts, set flag such
- * that we can take measure later.
- */
- reduce_irq = atomic_read(&idle_wakeup_counter) >=
- 2 * win * num_online_cpus();
- atomic_set(&idle_wakeup_counter, 0);
/* if we are above target+guard, skip */
return set_target_ratio + guard <= current_ratio;
}
@@ -532,8 +517,10 @@ static int start_power_clamp(void)
/* prefer BSP */
control_cpu = 0;
- if (!cpu_online(control_cpu))
- control_cpu = smp_processor_id();
+ if (!cpu_online(control_cpu)) {
+ control_cpu = get_cpu();
+ put_cpu();
+ }
clamping = true;
schedule_delayed_work(&poll_pkg_cstate_work, 0);
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 5d0b3ffc6f46..22c9bcb899c3 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -139,9 +139,9 @@ static int k3_bgp_read_temp(struct k3_thermal_data *devdata,
return 0;
}
-static int k3_thermal_get_temp(void *devdata, int *temp)
+static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct k3_thermal_data *data = devdata;
+ struct k3_thermal_data *data = tz->devdata;
int ret = 0;
ret = k3_bgp_read_temp(data, temp);
@@ -151,7 +151,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops k3_of_thermal_ops = {
+static const struct thermal_zone_device_ops k3_of_thermal_ops = {
.get_temp = k3_thermal_get_temp,
};
@@ -213,9 +213,9 @@ static int k3_bandgap_probe(struct platform_device *pdev)
writel(val, data[id].bgp->base + data[id].ctrl_offset);
data[id].tzd =
- devm_thermal_zone_of_sensor_register(dev, id,
- &data[id],
- &k3_of_thermal_ops);
+ devm_thermal_of_zone_register(dev, id,
+ &data[id],
+ &k3_of_thermal_ops);
if (IS_ERR(data[id].tzd)) {
dev_err(dev, "thermal zone device is NULL\n");
ret = PTR_ERR(data[id].tzd);
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 115a44eb4fbf..16b6bcf1bf4f 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -247,9 +247,9 @@ static inline int k3_bgp_read_temp(struct k3_thermal_data *devdata,
}
/* Get temperature callback function for thermal zone */
-static int k3_thermal_get_temp(void *devdata, int *temp)
+static int k3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct k3_thermal_data *data = devdata;
+ struct k3_thermal_data *data = tz->devdata;
int ret = 0;
ret = k3_bgp_read_temp(data, temp);
@@ -259,7 +259,7 @@ static int k3_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static const struct thermal_zone_of_device_ops k3_of_thermal_ops = {
+static const struct thermal_zone_device_ops k3_of_thermal_ops = {
.get_temp = k3_thermal_get_temp,
};
@@ -474,10 +474,8 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
writel(val, data[id].bgp->cfg2_base + data[id].ctrl_offset);
bgp->ts_data[id] = &data[id];
- ti_thermal =
- devm_thermal_zone_of_sensor_register(bgp->dev, id,
- &data[id],
- &k3_of_thermal_ops);
+ ti_thermal = devm_thermal_of_zone_register(bgp->dev, id, &data[id],
+ &k3_of_thermal_ops);
if (IS_ERR(ti_thermal)) {
dev_err(bgp->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(ti_thermal);
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 82d06c7411eb..6451a55eb582 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -44,9 +44,9 @@ struct max77620_therm_info {
* Return 0 on success otherwise error number to show reason of failure.
*/
-static int max77620_thermal_read_temp(void *data, int *temp)
+static int max77620_thermal_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct max77620_therm_info *mtherm = data;
+ struct max77620_therm_info *mtherm = tz->devdata;
unsigned int val;
int ret;
@@ -66,7 +66,7 @@ static int max77620_thermal_read_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops max77620_thermal_ops = {
+static const struct thermal_zone_device_ops max77620_thermal_ops = {
.get_temp = max77620_thermal_read_temp,
};
@@ -114,7 +114,7 @@ static int max77620_thermal_probe(struct platform_device *pdev)
*/
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
- mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ mtherm->tz_device = devm_thermal_of_zone_register(&pdev->dev, 0,
mtherm, &max77620_thermal_ops);
if (IS_ERR(mtherm->tz_device)) {
ret = PTR_ERR(mtherm->tz_device);
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index ede94eadddda..8440692e3890 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -679,9 +679,9 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
return max;
}
-static int mtk_read_temp(void *data, int *temperature)
+static int mtk_read_temp(struct thermal_zone_device *tz, int *temperature)
{
- struct mtk_thermal *mt = data;
+ struct mtk_thermal *mt = tz->devdata;
int i;
int tempmax = INT_MIN;
@@ -700,7 +700,7 @@ static int mtk_read_temp(void *data, int *temperature)
return 0;
}
-static const struct thermal_zone_of_device_ops mtk_thermal_ops = {
+static const struct thermal_zone_device_ops mtk_thermal_ops = {
.get_temp = mtk_read_temp,
};
@@ -1082,8 +1082,8 @@ static int mtk_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mt);
- tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
- &mtk_thermal_ops);
+ tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
if (IS_ERR(tzdev)) {
ret = PTR_ERR(tzdev);
goto err_disable_clk_peri_therm;
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index bfd889422dd3..2c7f3f9a26eb 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config QCOM_TSENS
tristate "Qualcomm TSENS Temperature Alarm"
- depends on QCOM_QFPROM
+ depends on NVMEM_QCOM_QFPROM
depends on ARCH_QCOM || COMPILE_TEST
help
This enables the thermal sysfs driver for the TSENS device. It shows
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 073943cbcc2b..1b2c43eab27d 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -357,9 +357,9 @@ static irqreturn_t adc_tm5_gen2_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int adc_tm5_get_temp(void *data, int *temp)
+static int adc_tm5_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct adc_tm5_channel *channel = data;
+ struct adc_tm5_channel *channel = tz->devdata;
int ret;
if (!channel || !channel->iio)
@@ -639,9 +639,9 @@ config_fail:
return ret;
}
-static int adc_tm5_set_trips(void *data, int low, int high)
+static int adc_tm5_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct adc_tm5_channel *channel = data;
+ struct adc_tm5_channel *channel = tz->devdata;
struct adc_tm5_chip *chip;
int ret;
@@ -660,7 +660,7 @@ static int adc_tm5_set_trips(void *data, int low, int high)
return ret;
}
-static struct thermal_zone_of_device_ops adc_tm5_thermal_ops = {
+static const struct thermal_zone_device_ops adc_tm5_thermal_ops = {
.get_temp = adc_tm5_get_temp,
.set_trips = adc_tm5_set_trips,
};
@@ -672,11 +672,10 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
for (i = 0; i < adc_tm->nchannels; i++) {
adc_tm->channels[i].chip = adc_tm;
-
- tzd = devm_thermal_zone_of_sensor_register(adc_tm->dev,
- adc_tm->channels[i].channel,
- &adc_tm->channels[i],
- &adc_tm5_thermal_ops);
+ tzd = devm_thermal_of_zone_register(adc_tm->dev,
+ adc_tm->channels[i].channel,
+ &adc_tm->channels[i],
+ &adc_tm5_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -ENODEV) {
dev_warn(adc_tm->dev, "thermal sensor on channel %d is not used\n",
@@ -830,7 +829,8 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm,
}
channel->adc_channel = args.args[0];
- channel->iio = devm_of_iio_channel_get_by_name(adc_tm->dev, node, NULL);
+ channel->iio = devm_fwnode_iio_channel_get_by_name(adc_tm->dev,
+ of_fwnode_handle(node), NULL);
if (IS_ERR(channel->iio)) {
ret = PTR_ERR(channel->iio);
if (ret != -EPROBE_DEFER)
@@ -1026,10 +1026,8 @@ static int adc_tm5_probe(struct platform_device *pdev)
adc_tm->base = reg;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "get_irq failed: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = adc_tm5_get_dt_data(adc_tm, node);
if (ret) {
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 770f82cc9bca..be785ab37e53 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -186,9 +186,9 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
return 0;
}
-static int qpnp_tm_get_temp(void *data, int *temp)
+static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct qpnp_tm_chip *chip = data;
+ struct qpnp_tm_chip *chip = tz->devdata;
int ret, mili_celsius;
if (!temp)
@@ -263,9 +263,9 @@ skip:
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
}
-static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
+static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
{
- struct qpnp_tm_chip *chip = data;
+ struct qpnp_tm_chip *chip = tz->devdata;
const struct thermal_trip *trip_points;
int ret;
@@ -283,7 +283,7 @@ static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
return ret;
}
-static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
+static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = {
.get_temp = qpnp_tm_get_temp,
.set_trip_temp = qpnp_tm_set_trip_temp,
};
@@ -446,7 +446,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
* read the trip points. get_temp() returns the default temperature
* before the hardware initialization is completed.
*/
- chip->tz_dev = devm_thermal_zone_of_sensor_register(
+ chip->tz_dev = devm_thermal_of_zone_register(
&pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index f136cb350238..327f37202c69 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -604,7 +604,7 @@ static const struct tsens_ops ops_8939 = {
struct tsens_plat_data data_8939 = {
.num_sensors = 10,
.ops = &ops_8939,
- .hw_ids = (unsigned int []){ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10 },
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index e49f58e83513..b1b10005fb28 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -532,9 +532,9 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct tsens_sensor *s = _sensor;
+ struct tsens_sensor *s = tz->devdata;
struct tsens_priv *priv = s->priv;
struct device *dev = priv->dev;
struct tsens_irq_data d;
@@ -925,9 +925,9 @@ err_put_device:
return ret;
}
-static int tsens_get_temp(void *data, int *temp)
+static int tsens_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tsens_sensor *s = data;
+ struct tsens_sensor *s = tz->devdata;
struct tsens_priv *priv = s->priv;
return priv->ops->get_temp(s, temp);
@@ -991,7 +991,7 @@ static const struct of_device_id tsens_table[] = {
};
MODULE_DEVICE_TABLE(of, tsens_table);
-static const struct thermal_zone_of_device_ops tsens_of_ops = {
+static const struct thermal_zone_device_ops tsens_of_ops = {
.get_temp = tsens_get_temp,
.set_trips = tsens_set_trips,
};
@@ -1044,9 +1044,9 @@ static int tsens_register(struct tsens_priv *priv)
for (i = 0; i < priv->num_sensors; i++) {
priv->sensor[i].priv = priv;
- tzd = devm_thermal_zone_of_sensor_register(priv->dev, priv->sensor[i].hw_id,
- &priv->sensor[i],
- &tsens_of_ops);
+ tzd = devm_thermal_of_zone_register(priv->dev, priv->sensor[i].hw_id,
+ &priv->sensor[i],
+ &tsens_of_ops);
if (IS_ERR(tzd))
continue;
priv->sensor[i].tzd = tzd;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 73049f9bea25..d111e218f362 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -82,9 +82,9 @@ static struct qoriq_tmu_data *qoriq_sensor_to_data(struct qoriq_sensor *s)
return container_of(s, struct qoriq_tmu_data, sensor[s->id]);
}
-static int tmu_get_temp(void *p, int *temp)
+static int tmu_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct qoriq_sensor *qsensor = p;
+ struct qoriq_sensor *qsensor = tz->devdata;
struct qoriq_tmu_data *qdata = qoriq_sensor_to_data(qsensor);
u32 val;
/*
@@ -122,7 +122,7 @@ static int tmu_get_temp(void *p, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops tmu_tz_ops = {
+static const struct thermal_zone_device_ops tmu_tz_ops = {
.get_temp = tmu_get_temp,
};
@@ -146,9 +146,9 @@ static int qoriq_tmu_register_tmu_zone(struct device *dev,
sensor->id = id;
- tzd = devm_thermal_zone_of_sensor_register(dev, id,
- sensor,
- &tmu_tz_ops);
+ tzd = devm_thermal_of_zone_register(dev, id,
+ sensor,
+ &tmu_tz_ops);
ret = PTR_ERR_OR_ZERO(tzd);
if (ret) {
if (ret == -ENODEV)
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index cda7c52f2319..4c1c6f89aa2f 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -164,9 +164,9 @@ static int rcar_gen3_thermal_round(int temp)
return result * RCAR3_THERMAL_GRAN;
}
-static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
+static int rcar_gen3_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ struct rcar_gen3_thermal_tsc *tsc = tz->devdata;
int mcelsius, val;
int reg;
@@ -203,9 +203,9 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
return INT_FIXPT(val);
}
-static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ struct rcar_gen3_thermal_tsc *tsc = tz->devdata;
u32 irqmsk = 0;
if (low != -INT_MAX) {
@@ -225,7 +225,7 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
return 0;
}
-static struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
+static struct thermal_zone_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
.set_trips = rcar_gen3_thermal_set_trips,
};
@@ -508,8 +508,8 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
- &rcar_gen3_tz_of_ops);
+ zone = devm_thermal_of_zone_register(dev, i, tsc,
+ &rcar_gen3_tz_of_ops);
if (IS_ERR(zone)) {
dev_err(dev, "Sensor %u: Can't register thermal zone\n", i);
ret = PTR_ERR(zone);
@@ -560,7 +560,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
priv->thermal_init(tsc);
if (zone->ops->set_trips)
- rcar_gen3_thermal_set_trips(tsc, zone->prev_low_trip,
+ rcar_gen3_thermal_set_trips(zone, zone->prev_low_trip,
zone->prev_high_trip);
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 1d729ed4d685..61c2b8855cb8 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -271,13 +271,6 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
return 0;
}
-static int rcar_thermal_of_get_temp(void *data, int *temp)
-{
- struct rcar_thermal_priv *priv = data;
-
- return rcar_thermal_get_current_temp(priv, temp);
-}
-
static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
{
struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
@@ -323,8 +316,8 @@ static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
return 0;
}
-static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
- .get_temp = rcar_thermal_of_get_temp,
+static const struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
+ .get_temp = rcar_thermal_get_temp,
};
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
@@ -534,7 +527,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
if (chip->use_of_thermal) {
- priv->zone = devm_thermal_zone_of_sensor_register(
+ priv->zone = devm_thermal_of_zone_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
} else {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index dc3a9c276a09..819e059cde71 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1211,9 +1211,9 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
+static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_sensor *sensor = tz->devdata;
struct rockchip_thermal_data *thermal = sensor->thermal;
const struct rockchip_tsadc_chip *tsadc = thermal->chip;
@@ -1224,9 +1224,9 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
sensor->id, thermal->regs, high);
}
-static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
+static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct rockchip_thermal_sensor *sensor = _sensor;
+ struct rockchip_thermal_sensor *sensor = tz->devdata;
struct rockchip_thermal_data *thermal = sensor->thermal;
const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
int retval;
@@ -1239,7 +1239,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
return retval;
}
-static const struct thermal_zone_of_device_ops rockchip_of_thermal_ops = {
+static const struct thermal_zone_device_ops rockchip_of_thermal_ops = {
.get_temp = rockchip_thermal_get_temp,
.set_trips = rockchip_thermal_set_trips,
};
@@ -1326,8 +1326,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
sensor->thermal = thermal;
sensor->id = id;
- sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id,
- sensor, &rockchip_of_thermal_ops);
+ sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor,
+ &rockchip_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
error = PTR_ERR(sensor->tzd);
dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c
index 51ae80eda6af..2e0649f38506 100644
--- a/drivers/thermal/rzg2l_thermal.c
+++ b/drivers/thermal/rzg2l_thermal.c
@@ -73,9 +73,9 @@ static inline void rzg2l_thermal_write(struct rzg2l_thermal_priv *priv, u32 reg,
iowrite32(data, priv->base + reg);
}
-static int rzg2l_thermal_get_temp(void *devdata, int *temp)
+static int rzg2l_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct rzg2l_thermal_priv *priv = devdata;
+ struct rzg2l_thermal_priv *priv = tz->devdata;
u32 result = 0, dsensor, ts_code_ave;
int val, i;
@@ -114,7 +114,7 @@ static int rzg2l_thermal_get_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops rzg2l_tz_of_ops = {
+static const struct thermal_zone_device_ops rzg2l_tz_of_ops = {
.get_temp = rzg2l_thermal_get_temp,
};
@@ -207,8 +207,8 @@ static int rzg2l_thermal_probe(struct platform_device *pdev)
goto err;
}
- zone = devm_thermal_zone_of_sensor_register(dev, 0, priv,
- &rzg2l_tz_of_ops);
+ zone = devm_thermal_of_zone_register(dev, 0, priv,
+ &rzg2l_tz_of_ops);
if (IS_ERR(zone)) {
dev_err(dev, "Can't register thermal zone");
ret = PTR_ERR(zone);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index f4ab4c5b4b62..51874d0a284c 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -650,9 +650,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
}
-static int exynos_get_temp(void *p, int *temp)
+static int exynos_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct exynos_tmu_data *data = p;
+ struct exynos_tmu_data *data = tz->devdata;
int value, ret = 0;
if (!data || !data->tmu_read)
@@ -728,9 +728,9 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
writel(val, data->base + emul_con);
}
-static int exynos_tmu_set_emulation(void *drv_data, int temp)
+static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{
- struct exynos_tmu_data *data = drv_data;
+ struct exynos_tmu_data *data = tz->devdata;
int ret = -EINVAL;
if (data->soc == SOC_ARCH_EXYNOS4210)
@@ -750,7 +750,7 @@ out:
}
#else
#define exynos4412_tmu_set_emulation NULL
-static int exynos_tmu_set_emulation(void *drv_data, int temp)
+static int exynos_tmu_set_emulation(struct thermal_zone_device *tz, int temp)
{ return -EINVAL; }
#endif /* CONFIG_THERMAL_EMULATION */
@@ -997,7 +997,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return 0;
}
-static const struct thermal_zone_of_device_ops exynos_sensor_ops = {
+static const struct thermal_zone_device_ops exynos_sensor_ops = {
.get_temp = exynos_get_temp,
.set_emul_temp = exynos_tmu_set_emulation,
};
@@ -1091,8 +1091,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* data->tzd must be registered before calling exynos_tmu_initialize(),
* requesting irq and calling exynos_tmu_control().
*/
- data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
- &exynos_sensor_ops);
+ data->tzd = devm_thermal_of_zone_register(&pdev->dev, 0, data,
+ &exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
ret = PTR_ERR(data->tzd);
if (ret != -EPROBE_DEFER)
@@ -1104,21 +1104,19 @@ static int exynos_tmu_probe(struct platform_device *pdev)
ret = exynos_tmu_initialize(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize TMU\n");
- goto err_thermal;
+ goto err_sclk;
}
ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
- goto err_thermal;
+ goto err_sclk;
}
exynos_tmu_control(pdev, true);
return 0;
-err_thermal:
- thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
err_sclk:
clk_disable_unprepare(data->sclk);
err_clk:
@@ -1136,9 +1134,7 @@ err_sensor:
static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- struct thermal_zone_device *tzd = data->tzd;
- thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
exynos_tmu_control(pdev, false);
clk_disable_unprepare(data->sclk);
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index fff80fc18002..ac884514f116 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -204,9 +204,9 @@ static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen)
return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1));
}
-static int sprd_thm_read_temp(void *devdata, int *temp)
+static int sprd_thm_read_temp(struct thermal_zone_device *tz, int *temp)
{
- struct sprd_thermal_sensor *sen = devdata;
+ struct sprd_thermal_sensor *sen = tz->devdata;
u32 data;
data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) &
@@ -217,7 +217,7 @@ static int sprd_thm_read_temp(void *devdata, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops sprd_thm_ops = {
+static const struct thermal_zone_device_ops sprd_thm_ops = {
.get_temp = sprd_thm_read_temp,
};
@@ -408,10 +408,10 @@ static int sprd_thm_probe(struct platform_device *pdev)
sprd_thm_sensor_init(thm, sen);
- sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev,
- sen->id,
- sen,
- &sprd_thm_ops);
+ sen->tzd = devm_thermal_of_zone_register(sen->dev,
+ sen->id,
+ sen,
+ &sprd_thm_ops);
if (IS_ERR(sen->tzd)) {
dev_err(&pdev->dev, "register thermal zone failed %d\n",
sen->id);
@@ -523,8 +523,8 @@ static int sprd_thm_remove(struct platform_device *pdev)
for (i = 0; i < thm->nr_sensors; i++) {
sprd_thm_toggle_sensor(thm->sensor[i], false);
- devm_thermal_zone_of_sensor_unregister(&pdev->dev,
- thm->sensor[i]->tzd);
+ devm_thermal_of_zone_unregister(&pdev->dev,
+ thm->sensor[i]->tzd);
}
clk_disable_unprepare(thm->clk);
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 5fd3fb8912a6..78feb802a87d 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -302,9 +302,9 @@ static int stm_disable_irq(struct stm_thermal_sensor *sensor)
return 0;
}
-static int stm_thermal_set_trips(void *data, int low, int high)
+static int stm_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- struct stm_thermal_sensor *sensor = data;
+ struct stm_thermal_sensor *sensor = tz->devdata;
u32 itr1, th;
int ret;
@@ -350,9 +350,9 @@ static int stm_thermal_set_trips(void *data, int low, int high)
}
/* Callback to get temperature from HW */
-static int stm_thermal_get_temp(void *data, int *temp)
+static int stm_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct stm_thermal_sensor *sensor = data;
+ struct stm_thermal_sensor *sensor = tz->devdata;
u32 periods;
int freqM, ret;
@@ -474,7 +474,7 @@ static int stm_thermal_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
stm_thermal_suspend, stm_thermal_resume);
-static const struct thermal_zone_of_device_ops stm_tz_ops = {
+static const struct thermal_zone_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
.set_trips = stm_thermal_set_trips,
};
@@ -539,9 +539,9 @@ static int stm_thermal_probe(struct platform_device *pdev)
return ret;
}
- sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
- sensor,
- &stm_tz_ops);
+ sensor->th_dev = devm_thermal_of_zone_register(&pdev->dev, 0,
+ sensor,
+ &stm_tz_ops);
if (IS_ERR(sensor->th_dev)) {
dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
@@ -572,7 +572,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
return 0;
err_tz:
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
return ret;
}
@@ -582,7 +581,6 @@ static int stm_thermal_remove(struct platform_device *pdev)
stm_thermal_sensor_off(sensor);
thermal_remove_hwmon_sysfs(sensor->th_dev);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
return 0;
}
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 212c87e63a66..e64d06d1328c 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -108,9 +108,9 @@ static int sun50i_h5_calc_temp(struct ths_device *tmdev,
return -1590 * reg / 10 + 276000;
}
-static int sun8i_ths_get_temp(void *data, int *temp)
+static int sun8i_ths_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct tsensor *s = data;
+ struct tsensor *s = tz->devdata;
struct ths_device *tmdev = s->tmdev;
int val = 0;
@@ -135,7 +135,7 @@ static int sun8i_ths_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops ths_ops = {
+static const struct thermal_zone_device_ops ths_ops = {
.get_temp = sun8i_ths_get_temp,
};
@@ -468,10 +468,10 @@ static int sun8i_ths_register(struct ths_device *tmdev)
tmdev->sensor[i].tmdev = tmdev;
tmdev->sensor[i].id = i;
tmdev->sensor[i].tzd =
- devm_thermal_zone_of_sensor_register(tmdev->dev,
- i,
- &tmdev->sensor[i],
- &ths_ops);
+ devm_thermal_of_zone_register(tmdev->dev,
+ i,
+ &tmdev->sensor[i],
+ &ths_ops);
if (IS_ERR(tmdev->sensor[i].tzd))
return PTR_ERR(tmdev->sensor[i].tzd);
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 825eab526619..1efe470f31e9 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -421,9 +421,9 @@ static int translate_temp(u16 val)
return t;
}
-static int tegra_thermctl_get_temp(void *data, int *out_temp)
+static int tegra_thermctl_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct tegra_thermctl_zone *zone = data;
+ struct tegra_thermctl_zone *zone = tz->devdata;
u32 val;
val = readl(zone->reg);
@@ -582,10 +582,9 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
return temp;
}
-static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
+static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
{
- struct tegra_thermctl_zone *zone = data;
- struct thermal_zone_device *tz = zone->tz;
+ struct tegra_thermctl_zone *zone = tz->devdata;
struct tegra_soctherm *ts = zone->ts;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
@@ -657,9 +656,9 @@ static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
mutex_unlock(&zn->ts->thermctl_lock);
}
-static int tegra_thermctl_set_trips(void *data, int lo, int hi)
+static int tegra_thermctl_set_trips(struct thermal_zone_device *tz, int lo, int hi)
{
- struct tegra_thermctl_zone *zone = data;
+ struct tegra_thermctl_zone *zone = tz->devdata;
u32 r;
thermal_irq_disable(zone);
@@ -682,7 +681,7 @@ static int tegra_thermctl_set_trips(void *data, int lo, int hi)
return 0;
}
-static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+static const struct thermal_zone_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.set_trips = tegra_thermctl_set_trips,
@@ -2194,9 +2193,9 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
zone->sg = soc->ttgs[i];
zone->ts = tegra;
- z = devm_thermal_zone_of_sensor_register(&pdev->dev,
- soc->ttgs[i]->id, zone,
- &tegra_of_thermal_ops);
+ z = devm_thermal_of_zone_register(&pdev->dev,
+ soc->ttgs[i]->id, zone,
+ &tegra_of_thermal_ops);
if (IS_ERR(z)) {
err = PTR_ERR(z);
dev_err(&pdev->dev, "failed to register sensor: %d\n",
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index 5affc3d196be..eb84f0b9dc7c 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -30,9 +30,9 @@ struct tegra_bpmp_thermal {
struct tegra_bpmp_thermal_zone **zones;
};
-static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
+static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone,
+ int *out_temp)
{
- struct tegra_bpmp_thermal_zone *zone = data;
struct mrq_thermal_host_to_bpmp_request req;
union mrq_thermal_bpmp_to_host_response reply;
struct tegra_bpmp_message msg;
@@ -60,9 +60,14 @@ static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
return 0;
}
-static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
+static int tegra_bpmp_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct tegra_bpmp_thermal_zone *zone = data;
+ return __tegra_bpmp_thermal_get_temp(tz->devdata, out_temp);
+}
+
+static int tegra_bpmp_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct tegra_bpmp_thermal_zone *zone = tz->devdata;
struct mrq_thermal_host_to_bpmp_request req;
struct tegra_bpmp_message msg;
int err;
@@ -157,7 +162,7 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
return 0;
}
-static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = {
+static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = {
.get_temp = tegra_bpmp_thermal_get_temp,
.set_trips = tegra_bpmp_thermal_set_trips,
};
@@ -200,13 +205,13 @@ static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
zone->idx = i;
zone->tegra = tegra;
- err = tegra_bpmp_thermal_get_temp(zone, &temp);
+ err = __tegra_bpmp_thermal_get_temp(zone, &temp);
if (err < 0) {
devm_kfree(&pdev->dev, zone);
continue;
}
- tzd = devm_thermal_zone_of_sensor_register(
+ tzd = devm_thermal_of_zone_register(
&pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -EPROBE_DEFER)
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index 05886684f429..c34501287e96 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -159,9 +159,9 @@ static void devm_tegra_tsensor_hw_disable(void *data)
tegra_tsensor_hw_disable(ts);
}
-static int tegra_tsensor_get_temp(void *data, int *temp)
+static int tegra_tsensor_get_temp(struct thermal_zone_device *tz, int *temp)
{
- const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor_channel *tsc = tz->devdata;
const struct tegra_tsensor *ts = tsc->ts;
int err, c1, c2, c3, c4, counter;
u32 val;
@@ -217,9 +217,9 @@ static int tegra_tsensor_temp_to_counter(const struct tegra_tsensor *ts, int tem
return DIV_ROUND_CLOSEST(c2 * 1000000 - ts->calib.b, ts->calib.a);
}
-static int tegra_tsensor_set_trips(void *data, int low, int high)
+static int tegra_tsensor_set_trips(struct thermal_zone_device *tz, int low, int high)
{
- const struct tegra_tsensor_channel *tsc = data;
+ const struct tegra_tsensor_channel *tsc = tz->devdata;
const struct tegra_tsensor *ts = tsc->ts;
u32 val;
@@ -240,7 +240,7 @@ static int tegra_tsensor_set_trips(void *data, int low, int high)
return 0;
}
-static const struct thermal_zone_of_device_ops ops = {
+static const struct thermal_zone_device_ops ops = {
.get_temp = tegra_tsensor_get_temp,
.set_trips = tegra_tsensor_set_trips,
};
@@ -516,7 +516,7 @@ static int tegra_tsensor_register_channel(struct tegra_tsensor *ts,
tsc->id = id;
tsc->regs = ts->regs + 0x40 * (hw_id + 1);
- tsc->tzd = devm_thermal_zone_of_sensor_register(ts->dev, id, tsc, &ops);
+ tsc->tzd = devm_thermal_of_zone_register(ts->dev, id, tsc, &ops);
if (IS_ERR(tsc->tzd)) {
if (PTR_ERR(tsc->tzd) != -ENODEV)
return dev_err_probe(ts->dev, PTR_ERR(tsc->tzd),
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 73665c3ccfe0..323e273e3298 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -52,9 +52,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
return temp;
}
-static int gadc_thermal_get_temp(void *data, int *temp)
+static int gadc_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
- struct gadc_thermal_info *gti = data;
+ struct gadc_thermal_info *gti = tz->devdata;
int val;
int ret;
@@ -68,7 +68,7 @@ static int gadc_thermal_get_temp(void *data, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops gadc_thermal_ops = {
+static const struct thermal_zone_device_ops gadc_thermal_ops = {
.get_temp = gadc_thermal_get_temp,
};
@@ -143,8 +143,8 @@ static int gadc_thermal_probe(struct platform_device *pdev)
gti->dev = &pdev->dev;
platform_set_drvdata(pdev, gti);
- gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
- &gadc_thermal_ops);
+ gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti,
+ &gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
if (ret != -EPROBE_DEFER)
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 50d50cec7774..117eeaf7dd24 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -295,27 +295,14 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
cancel_delayed_work(&tz->poll_queue);
}
-static inline bool should_stop_polling(struct thermal_zone_device *tz)
-{
- return !thermal_zone_device_is_enabled(tz);
-}
-
static void monitor_thermal_zone(struct thermal_zone_device *tz)
{
- bool stop;
-
- stop = should_stop_polling(tz);
-
- mutex_lock(&tz->lock);
-
- if (!stop && tz->passive)
+ if (tz->mode != THERMAL_DEVICE_ENABLED)
+ thermal_zone_device_set_polling(tz, 0);
+ else if (tz->passive)
thermal_zone_device_set_polling(tz, tz->passive_delay_jiffies);
- else if (!stop && tz->polling_delay_jiffies)
+ else if (tz->polling_delay_jiffies)
thermal_zone_device_set_polling(tz, tz->polling_delay_jiffies);
- else
- thermal_zone_device_set_polling(tz, 0);
-
- mutex_unlock(&tz->lock);
}
static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip)
@@ -383,18 +370,13 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
handle_critical_trips(tz, trip, trip_temp, type);
else
handle_non_critical_trips(tz, trip);
- /*
- * Alright, we handled this trip successfully.
- * So, start monitoring again.
- */
- monitor_thermal_zone(tz);
}
static void update_temperature(struct thermal_zone_device *tz)
{
int temp, ret;
- ret = thermal_zone_get_temp(tz, &temp);
+ ret = __thermal_zone_get_temp(tz, &temp);
if (ret) {
if (ret != -EAGAIN)
dev_warn(&tz->device,
@@ -403,10 +385,8 @@ static void update_temperature(struct thermal_zone_device *tz)
return;
}
- mutex_lock(&tz->lock);
tz->last_temperature = tz->temperature;
tz->temperature = temp;
- mutex_unlock(&tz->lock);
trace_thermal_temperature(tz);
@@ -469,15 +449,9 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_disable);
int thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
{
- enum thermal_device_mode mode;
-
- mutex_lock(&tz->lock);
-
- mode = tz->mode;
+ lockdep_assert_held(&tz->lock);
- mutex_unlock(&tz->lock);
-
- return mode == THERMAL_DEVICE_ENABLED;
+ return tz->mode == THERMAL_DEVICE_ENABLED;
}
void thermal_zone_device_update(struct thermal_zone_device *tz,
@@ -485,9 +459,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
{
int count;
- if (should_stop_polling(tz))
- return;
-
if (atomic_read(&in_suspend))
return;
@@ -495,14 +466,23 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
"'get_temp' ops set\n", __func__))
return;
+ mutex_lock(&tz->lock);
+
+ if (!thermal_zone_device_is_enabled(tz))
+ goto out;
+
update_temperature(tz);
- thermal_zone_set_trips(tz);
+ __thermal_zone_set_trips(tz);
tz->notify_event = event;
for (count = 0; count < tz->num_trips; count++)
handle_thermal_trip(tz, count);
+
+ monitor_thermal_zone(tz);
+out:
+ mutex_unlock(&tz->lock);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
@@ -1206,13 +1186,26 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (type && strlen(type) >= THERMAL_NAME_LENGTH) {
+ if (strlen(type) >= THERMAL_NAME_LENGTH) {
pr_err("Thermal zone name (%s) too long, should be under %d chars\n",
type, THERMAL_NAME_LENGTH);
return ERR_PTR(-EINVAL);
}
- if (num_trips > THERMAL_MAX_TRIPS || num_trips < 0 || mask >> num_trips) {
+ /*
+ * Max trip count can't exceed 31 as the "mask >> num_trips" condition.
+ * For example, shifting by 32 will result in compiler warning:
+ * warning: right shift count >= width of type [-Wshift-count- overflow]
+ *
+ * Also "mask >> num_trips" will always be true with 32 bit shift.
+ * E.g. mask = 0x80000000 for trip id 31 to be RW. Then
+ * mask >> 32 = 0x80000000
+ * This will result in failure for the below condition.
+ *
+ * Check will be true when the bit 31 of the mask is set.
+ * 32 bit shift will cause overflow of 4 byte integer.
+ */
+ if (num_trips > (BITS_PER_TYPE(int) - 1) || num_trips < 0 || mask >> num_trips) {
pr_err("Incorrect number of thermal trips\n");
return ERR_PTR(-EINVAL);
}
@@ -1239,7 +1232,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
}
tz->id = id;
- strlcpy(tz->type, type, sizeof(tz->type));
+ strscpy(tz->type, type, sizeof(tz->type));
result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
if (result)
@@ -1458,9 +1451,6 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
- if (!thermal_zone_device_is_enabled(tz))
- continue;
-
thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
@@ -1492,10 +1482,6 @@ static int __init thermal_init(void)
if (result)
goto unregister_governors;
- result = of_parse_thermal_zones();
- if (result)
- goto unregister_class;
-
result = register_pm_notifier(&thermal_pm_nb);
if (result)
pr_warn("Thermal: Can not register suspend notifier, return %d\n",
@@ -1503,8 +1489,6 @@ static int __init thermal_init(void)
return 0;
-unregister_class:
- class_unregister(&thermal_class);
unregister_governors:
thermal_unregister_governors();
error:
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index c991bb290512..1571917bd3c8 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -112,6 +112,8 @@ int thermal_build_list_of_policies(char *buf);
/* Helpers */
void thermal_zone_set_trips(struct thermal_zone_device *tz);
+void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
int thermal_zone_create_device_groups(struct thermal_zone_device *, int);
@@ -135,13 +137,11 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
/* device tree support */
#ifdef CONFIG_THERMAL_OF
-int of_parse_thermal_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
-static inline int of_parse_thermal_zones(void) { return 0; }
static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
{
return 0;
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 690890f054a3..c65cdce8f856 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -64,27 +64,17 @@ get_thermal_instance(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL(get_thermal_instance);
-/**
- * thermal_zone_get_temp() - returns the temperature of a thermal zone
- * @tz: a valid pointer to a struct thermal_zone_device
- * @temp: a valid pointer to where to store the resulting temperature.
- *
- * When a valid thermal zone reference is passed, it will fetch its
- * temperature and fill @temp.
- *
- * Return: On success returns 0, an error code otherwise
- */
-int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
{
int ret = -EINVAL;
int count;
int crit_temp = INT_MAX;
enum thermal_trip_type type;
- if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
- goto exit;
+ lockdep_assert_held(&tz->lock);
- mutex_lock(&tz->lock);
+ if (!tz || IS_ERR(tz) || !tz->ops->get_temp)
+ return -EINVAL;
ret = tz->ops->get_temp(tz, temp);
@@ -107,35 +97,42 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
*temp = tz->emul_temperature;
}
- mutex_unlock(&tz->lock);
-exit:
return ret;
}
-EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
/**
- * thermal_zone_set_trips - Computes the next trip points for the driver
- * @tz: a pointer to a thermal zone device structure
+ * thermal_zone_get_temp() - returns the temperature of a thermal zone
+ * @tz: a valid pointer to a struct thermal_zone_device
+ * @temp: a valid pointer to where to store the resulting temperature.
*
- * The function computes the next temperature boundaries by browsing
- * the trip points. The result is the closer low and high trip points
- * to the current temperature. These values are passed to the backend
- * driver to let it set its own notification mechanism (usually an
- * interrupt).
+ * When a valid thermal zone reference is passed, it will fetch its
+ * temperature and fill @temp.
*
- * It does not return a value
+ * Return: On success returns 0, an error code otherwise
*/
-void thermal_zone_set_trips(struct thermal_zone_device *tz)
+int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int ret;
+
+ mutex_lock(&tz->lock);
+ ret = __thermal_zone_get_temp(tz, temp);
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
+
+void __thermal_zone_set_trips(struct thermal_zone_device *tz)
{
int low = -INT_MAX;
int high = INT_MAX;
int trip_temp, hysteresis;
int i, ret;
- mutex_lock(&tz->lock);
+ lockdep_assert_held(&tz->lock);
if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
- goto exit;
+ return;
for (i = 0; i < tz->num_trips; i++) {
int trip_low;
@@ -154,7 +151,7 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
/* No need to change trip points */
if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- goto exit;
+ return;
tz->prev_low_trip = low;
tz->prev_high_trip = high;
@@ -169,8 +166,24 @@ void thermal_zone_set_trips(struct thermal_zone_device *tz)
ret = tz->ops->set_trips(tz, low, high);
if (ret)
dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+}
-exit:
+/**
+ * thermal_zone_set_trips - Computes the next trip points for the driver
+ * @tz: a pointer to a thermal zone device structure
+ *
+ * The function computes the next temperature boundaries by browsing
+ * the trip points. The result is the closer low and high trip points
+ * to the current temperature. These values are passed to the backend
+ * driver to let it set its own notification mechanism (usually an
+ * interrupt).
+ *
+ * It does not return a value
+ */
+void thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+ mutex_lock(&tz->lock);
+ __thermal_zone_set_trips(tz);
mutex_unlock(&tz->lock);
}
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index 09e49ec8b6f4..f53f4ceb6a5d 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -147,7 +147,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
return -ENOMEM;
INIT_LIST_HEAD(&hwmon->tz_list);
- strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
+ strscpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
strreplace(hwmon->type, '-', '_');
hwmon->device = hwmon_device_register_for_thermal(&tz->device,
hwmon->type, hwmon);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index 360b0dfdc3b0..39c921415989 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -20,11 +20,10 @@ static u32 thermal_mmio_readb(void __iomem *mmio_base)
return readb(mmio_base);
}
-static int thermal_mmio_get_temperature(void *private, int *temp)
+static int thermal_mmio_get_temperature(struct thermal_zone_device *tz, int *temp)
{
int t;
- struct thermal_mmio *sensor =
- (struct thermal_mmio *)private;
+ struct thermal_mmio *sensor = tz->devdata;
t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
t *= sensor->factor;
@@ -34,7 +33,7 @@ static int thermal_mmio_get_temperature(void *private, int *temp)
return 0;
}
-static const struct thermal_zone_of_device_ops thermal_mmio_ops = {
+static const struct thermal_zone_device_ops thermal_mmio_ops = {
.get_temp = thermal_mmio_get_temperature,
};
@@ -68,10 +67,10 @@ static int thermal_mmio_probe(struct platform_device *pdev)
}
}
- thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev,
- 0,
- sensor,
- &thermal_mmio_ops);
+ thermal_zone = devm_thermal_of_zone_register(&pdev->dev,
+ 0,
+ sensor,
+ &thermal_mmio_ops);
if (IS_ERR(thermal_zone)) {
dev_err(&pdev->dev,
"failed to register sensor (%ld)\n",
@@ -79,7 +78,7 @@ static int thermal_mmio_probe(struct platform_device *pdev)
return PTR_ERR(thermal_zone);
}
- thermal_mmio_get_temperature(sensor, &temperature);
+ thermal_mmio_get_temperature(thermal_zone, &temperature);
dev_info(&pdev->dev,
"thermal mmio sensor %s registered, current temperature: %d\n",
pdev->name, temperature);
@@ -107,7 +106,7 @@ static struct platform_driver thermal_mmio_driver = {
.probe = thermal_mmio_probe,
.driver = {
.name = "thermal-mmio",
- .of_match_table = of_match_ptr(thermal_mmio_id_table),
+ .of_match_table = thermal_mmio_id_table,
},
};
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 050d243a5fa1..e2d78a996b5f 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -693,6 +693,7 @@ static struct genl_family thermal_gnl_family __ro_after_init = {
.policy = thermal_genl_policy,
.small_ops = thermal_genl_ops,
.n_small_ops = ARRAY_SIZE(thermal_genl_ops),
+ .resv_start_op = THERMAL_GENL_CMD_CDEV_GET + 1,
.mcgrps = thermal_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 802c30b72a92..d4b6335ace15 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -19,93 +19,6 @@
#include "thermal_core.h"
-/*** Private data structures to represent thermal device tree data ***/
-
-/**
- * struct __thermal_cooling_bind_param - a cooling device for a trip point
- * @cooling_device: a pointer to identify the referred cooling device
- * @min: minimum cooling state used at this trip point
- * @max: maximum cooling state used at this trip point
- */
-
-struct __thermal_cooling_bind_param {
- struct device_node *cooling_device;
- unsigned long min;
- unsigned long max;
-};
-
-/**
- * struct __thermal_bind_params - a match between trip and cooling device
- * @tcbp: a pointer to an array of cooling devices
- * @count: number of elements in array
- * @trip_id: the trip point index
- * @usage: the percentage (from 0 to 100) of cooling contribution
- */
-
-struct __thermal_bind_params {
- struct __thermal_cooling_bind_param *tcbp;
- unsigned int count;
- unsigned int trip_id;
- unsigned int usage;
-};
-
-/**
- * struct __thermal_zone - internal representation of a thermal zone
- * @passive_delay: polling interval while passive cooling is activated
- * @polling_delay: zone polling interval
- * @slope: slope of the temperature adjustment curve
- * @offset: offset of the temperature adjustment curve
- * @ntrips: number of trip points
- * @trips: an array of trip points (0..ntrips - 1)
- * @num_tbps: number of thermal bind params
- * @tbps: an array of thermal bind params (0..num_tbps - 1)
- * @sensor_data: sensor private data used while reading temperature and trend
- * @ops: set of callbacks to handle the thermal zone based on DT
- */
-
-struct __thermal_zone {
- int passive_delay;
- int polling_delay;
- int slope;
- int offset;
-
- /* trip data */
- int ntrips;
- struct thermal_trip *trips;
-
- /* cooling binding data */
- int num_tbps;
- struct __thermal_bind_params *tbps;
-
- /* sensor interface */
- void *sensor_data;
- const struct thermal_zone_of_device_ops *ops;
-};
-
-/*** DT thermal zone device callbacks ***/
-
-static int of_thermal_get_temp(struct thermal_zone_device *tz,
- int *temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->get_temp)
- return -EINVAL;
-
- return data->ops->get_temp(data->sensor_data, temp);
-}
-
-static int of_thermal_set_trips(struct thermal_zone_device *tz,
- int low, int high)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->set_trips)
- return -EINVAL;
-
- return data->ops->set_trips(data->sensor_data, low, high);
-}
-
/**
* of_thermal_get_ntrips - function to export number of available trip
* points.
@@ -158,114 +71,6 @@ of_thermal_get_trip_points(struct thermal_zone_device *tz)
}
EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
-/**
- * of_thermal_set_emul_temp - function to set emulated temperature
- *
- * @tz: pointer to a thermal zone
- * @temp: temperature to set
- *
- * This function gives the ability to set emulated value of temperature,
- * which is handy for debugging
- *
- * Return: zero on success, error code otherwise
- */
-static int of_thermal_set_emul_temp(struct thermal_zone_device *tz,
- int temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->set_emul_temp)
- return -EINVAL;
-
- return data->ops->set_emul_temp(data->sensor_data, temp);
-}
-
-static int of_thermal_get_trend(struct thermal_zone_device *tz, int trip,
- enum thermal_trend *trend)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (!data->ops || !data->ops->get_trend)
- return -EINVAL;
-
- return data->ops->get_trend(data->sensor_data, trip, trend);
-}
-
-static int of_thermal_change_mode(struct thermal_zone_device *tz,
- enum thermal_device_mode mode)
-{
- struct __thermal_zone *data = tz->devdata;
-
- return data->ops->change_mode(data->sensor_data, mode);
-}
-
-static int of_thermal_bind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct __thermal_zone *data = thermal->devdata;
- struct __thermal_bind_params *tbp;
- struct __thermal_cooling_bind_param *tcbp;
- int i, j;
-
- if (!data || IS_ERR(data))
- return -ENODEV;
-
- /* find where to bind */
- for (i = 0; i < data->num_tbps; i++) {
- tbp = data->tbps + i;
-
- for (j = 0; j < tbp->count; j++) {
- tcbp = tbp->tcbp + j;
-
- if (tcbp->cooling_device == cdev->np) {
- int ret;
-
- ret = thermal_zone_bind_cooling_device(thermal,
- tbp->trip_id, cdev,
- tcbp->max,
- tcbp->min,
- tbp->usage);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int of_thermal_unbind(struct thermal_zone_device *thermal,
- struct thermal_cooling_device *cdev)
-{
- struct __thermal_zone *data = thermal->devdata;
- struct __thermal_bind_params *tbp;
- struct __thermal_cooling_bind_param *tcbp;
- int i, j;
-
- if (!data || IS_ERR(data))
- return -ENODEV;
-
- /* find where to unbind */
- for (i = 0; i < data->num_tbps; i++) {
- tbp = data->tbps + i;
-
- for (j = 0; j < tbp->count; j++) {
- tcbp = tbp->tcbp + j;
-
- if (tcbp->cooling_device == cdev->np) {
- int ret;
-
- ret = thermal_zone_unbind_cooling_device(thermal,
- tbp->trip_id, cdev);
- if (ret)
- return ret;
- }
- }
- }
-
- return 0;
-}
-
static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
enum thermal_trip_type *type)
{
@@ -288,28 +93,6 @@ static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
return 0;
}
-static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
- int temp)
-{
- struct __thermal_zone *data = tz->devdata;
-
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- if (data->ops && data->ops->set_trip_temp) {
- int ret;
-
- ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
- if (ret)
- return ret;
- }
-
- /* thermal framework should take care of data->mask & (1 << trip) */
- tz->trips[trip].temperature = temp;
-
- return 0;
-}
-
static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
int *hyst)
{
@@ -347,307 +130,6 @@ static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
return -EINVAL;
}
-static struct thermal_zone_device_ops of_thermal_ops = {
- .get_trip_type = of_thermal_get_trip_type,
- .get_trip_temp = of_thermal_get_trip_temp,
- .set_trip_temp = of_thermal_set_trip_temp,
- .get_trip_hyst = of_thermal_get_trip_hyst,
- .set_trip_hyst = of_thermal_set_trip_hyst,
- .get_crit_temp = of_thermal_get_crit_temp,
-
- .bind = of_thermal_bind,
- .unbind = of_thermal_unbind,
-};
-
-/*** sensor API ***/
-
-static struct thermal_zone_device *
-thermal_zone_of_add_sensor(struct device_node *zone,
- struct device_node *sensor, void *data,
- const struct thermal_zone_of_device_ops *ops)
-{
- struct thermal_zone_device *tzd;
- struct __thermal_zone *tz;
-
- tzd = thermal_zone_get_zone_by_name(zone->name);
- if (IS_ERR(tzd))
- return ERR_PTR(-EPROBE_DEFER);
-
- tz = tzd->devdata;
-
- if (!ops)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&tzd->lock);
- tz->ops = ops;
- tz->sensor_data = data;
-
- tzd->ops->get_temp = of_thermal_get_temp;
- tzd->ops->get_trend = of_thermal_get_trend;
-
- /*
- * The thermal zone core will calculate the window if they have set the
- * optional set_trips pointer.
- */
- if (ops->set_trips)
- tzd->ops->set_trips = of_thermal_set_trips;
-
- if (ops->set_emul_temp)
- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
-
- if (ops->change_mode)
- tzd->ops->change_mode = of_thermal_change_mode;
-
- mutex_unlock(&tzd->lock);
-
- return tzd;
-}
-
-/**
- * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
- * @tz_np: a valid thermal zone device node.
- * @sensor_np: a sensor node of a valid sensor device.
- * @id: the sensor ID returned if success.
- *
- * This function will get sensor ID from a given thermal zone node and
- * the sensor node must match the temperature provider @sensor_np.
- *
- * Return: 0 on success, proper error code otherwise.
- */
-
-int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
- struct device_node *sensor_np,
- u32 *id)
-{
- struct of_phandle_args sensor_specs;
- int ret;
-
- ret = of_parse_phandle_with_args(tz_np,
- "thermal-sensors",
- "#thermal-sensor-cells",
- 0,
- &sensor_specs);
- if (ret)
- return ret;
-
- if (sensor_specs.np != sensor_np) {
- of_node_put(sensor_specs.np);
- return -ENODEV;
- }
-
- if (sensor_specs.args_count > 1)
- pr_warn("%pOFn: too many cells in sensor specifier %d\n",
- sensor_specs.np, sensor_specs.args_count);
-
- *id = sensor_specs.args_count ? sensor_specs.args[0] : 0;
-
- of_node_put(sensor_specs.np);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
-
-/**
- * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @sensor_id: a sensor identifier, in case the sensor IP has more
- * than one sensors
- * @data: a private pointer (owned by the caller) that will be passed
- * back, when a temperature reading is needed.
- * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
- *
- * This function will search the list of thermal zones described in device
- * tree and look for the zone that refer to the sensor device pointed by
- * @dev->of_node as temperature providers. For the zone pointing to the
- * sensor node, the sensor will be added to the DT thermal zone device.
- *
- * The thermal zone temperature is provided by the @get_temp function
- * pointer. When called, it will have the private pointer @data back.
- *
- * The thermal zone temperature trend is provided by the @get_trend function
- * pointer. When called, it will have the private pointer @data back.
- *
- * TODO:
- * 01 - This function must enqueue the new sensor instead of using
- * it as the only source of temperature values.
- *
- * 02 - There must be a way to match the sensor with all thermal zones
- * that refer to it.
- *
- * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- */
-struct thermal_zone_device *
-thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
- const struct thermal_zone_of_device_ops *ops)
-{
- struct device_node *np, *child, *sensor_np;
- struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np)
- return ERR_PTR(-ENODEV);
-
- if (!dev || !dev->of_node) {
- of_node_put(np);
- return ERR_PTR(-ENODEV);
- }
-
- sensor_np = of_node_get(dev->of_node);
-
- for_each_available_child_of_node(np, child) {
- int ret, id;
-
- /* For now, thermal framework supports only 1 sensor per zone */
- ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id);
- if (ret)
- continue;
-
- if (id == sensor_id) {
- tzd = thermal_zone_of_add_sensor(child, sensor_np,
- data, ops);
- if (!IS_ERR(tzd))
- thermal_zone_device_enable(tzd);
-
- of_node_put(child);
- goto exit;
- }
- }
-exit:
- of_node_put(sensor_np);
- of_node_put(np);
-
- return tzd;
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
-
-/**
- * thermal_zone_of_sensor_unregister - unregisters a sensor from a DT thermal zone
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
- *
- * This function removes the sensor callbacks and private data from the
- * thermal zone device registered with thermal_zone_of_sensor_register()
- * API. It will also silent the zone by remove the .get_temp() and .get_trend()
- * thermal zone device callbacks.
- *
- * TODO: When the support to several sensors per zone is added, this
- * function must search the sensor list based on @dev parameter.
- *
- */
-void thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tzd)
-{
- struct __thermal_zone *tz;
-
- if (!dev || !tzd || !tzd->devdata)
- return;
-
- tz = tzd->devdata;
-
- /* no __thermal_zone, nothing to be done */
- if (!tz)
- return;
-
- /* stop temperature polling */
- thermal_zone_device_disable(tzd);
-
- mutex_lock(&tzd->lock);
- tzd->ops->get_temp = NULL;
- tzd->ops->get_trend = NULL;
- tzd->ops->set_emul_temp = NULL;
- tzd->ops->change_mode = NULL;
-
- tz->ops = NULL;
- tz->sensor_data = NULL;
- mutex_unlock(&tzd->lock);
-}
-EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
-
-static void devm_thermal_zone_of_sensor_release(struct device *dev, void *res)
-{
- thermal_zone_of_sensor_unregister(dev,
- *(struct thermal_zone_device **)res);
-}
-
-static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res,
- void *data)
-{
- struct thermal_zone_device **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
-/**
- * devm_thermal_zone_of_sensor_register - Resource managed version of
- * thermal_zone_of_sensor_register()
- * @dev: a valid struct device pointer of a sensor device. Must contain
- * a valid .of_node, for the sensor node.
- * @sensor_id: a sensor identifier, in case the sensor IP has more
- * than one sensors
- * @data: a private pointer (owned by the caller) that will be passed
- * back, when a temperature reading is needed.
- * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
- *
- * Refer thermal_zone_of_sensor_register() for more details.
- *
- * Return: On success returns a valid struct thermal_zone_device,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- * Registered thermal_zone_device device will automatically be
- * released when device is unbounded.
- */
-struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
- struct device *dev, int sensor_id,
- void *data, const struct thermal_zone_of_device_ops *ops)
-{
- struct thermal_zone_device **ptr, *tzd;
-
- ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return ERR_PTR(-ENOMEM);
-
- tzd = thermal_zone_of_sensor_register(dev, sensor_id, data, ops);
- if (IS_ERR(tzd)) {
- devres_free(ptr);
- return tzd;
- }
-
- *ptr = tzd;
- devres_add(dev, ptr);
-
- return tzd;
-}
-EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_register);
-
-/**
- * devm_thermal_zone_of_sensor_unregister - Resource managed version of
- * thermal_zone_of_sensor_unregister().
- * @dev: Device for which which resource was allocated.
- * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
- *
- * This function removes the sensor callbacks and private data from the
- * thermal zone device registered with devm_thermal_zone_of_sensor_register()
- * API. It will also silent the zone by remove the .get_temp() and .get_trend()
- * thermal zone device callbacks.
- * Normally this function will not need to be called and the resource
- * management code will ensure that the resource is freed.
- */
-void devm_thermal_zone_of_sensor_unregister(struct device *dev,
- struct thermal_zone_device *tzd)
-{
- WARN_ON(devres_release(dev, devm_thermal_zone_of_sensor_release,
- devm_thermal_zone_of_sensor_match, tzd));
-}
-EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister);
-
/*** functions parsing device tree nodes ***/
static int of_find_trip_id(struct device_node *np, struct device_node *trip)
@@ -679,98 +161,6 @@ out:
return i;
}
-/**
- * thermal_of_populate_bind_params - parse and fill cooling map data
- * @np: DT node containing a cooling-map node
- * @__tbp: data structure to be filled with cooling map info
- * @trips: array of thermal zone trip points
- * @ntrips: number of trip points inside trips.
- *
- * This function parses a cooling-map type of node represented by
- * @np parameter and fills the read data into @__tbp data structure.
- * It needs the already parsed array of trip points of the thermal zone
- * in consideration.
- *
- * Return: 0 on success, proper error code otherwise
- */
-static int thermal_of_populate_bind_params(struct device_node *tz_np,
- struct device_node *np,
- struct __thermal_bind_params *__tbp)
-{
- struct of_phandle_args cooling_spec;
- struct __thermal_cooling_bind_param *__tcbp;
- struct device_node *trip;
- int ret, i, count;
- int trip_id;
- u32 prop;
-
- /* Default weight. Usage is optional */
- __tbp->usage = THERMAL_WEIGHT_DEFAULT;
- ret = of_property_read_u32(np, "contribution", &prop);
- if (ret == 0)
- __tbp->usage = prop;
-
- trip = of_parse_phandle(np, "trip", 0);
- if (!trip) {
- pr_err("missing trip property\n");
- return -ENODEV;
- }
-
- trip_id = of_find_trip_id(tz_np, trip);
- if (trip_id < 0) {
- ret = trip_id;
- goto end;
- }
-
- __tbp->trip_id = trip_id;
-
- count = of_count_phandle_with_args(np, "cooling-device",
- "#cooling-cells");
- if (count <= 0) {
- pr_err("Add a cooling_device property with at least one device\n");
- ret = -ENOENT;
- goto end;
- }
-
- __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
- if (!__tcbp) {
- ret = -ENOMEM;
- goto end;
- }
-
- for (i = 0; i < count; i++) {
- ret = of_parse_phandle_with_args(np, "cooling-device",
- "#cooling-cells", i, &cooling_spec);
- if (ret < 0) {
- pr_err("Invalid cooling-device entry\n");
- goto free_tcbp;
- }
-
- __tcbp[i].cooling_device = cooling_spec.np;
-
- if (cooling_spec.args_count >= 2) { /* at least min and max */
- __tcbp[i].min = cooling_spec.args[0];
- __tcbp[i].max = cooling_spec.args[1];
- } else {
- pr_err("wrong reference to cooling device, missing limits\n");
- }
- }
-
- __tbp->tcbp = __tcbp;
- __tbp->count = count;
-
- goto end;
-
-free_tcbp:
- for (i = i - 1; i >= 0; i--)
- of_node_put(__tcbp[i].cooling_device);
- kfree(__tcbp);
-end:
- of_node_put(trip);
-
- return ret;
-}
-
/*
* It maps 'enum thermal_trip_type' found in include/linux/thermal.h
* into the device tree binding of 'trip', property type.
@@ -811,16 +201,6 @@ static int thermal_of_get_trip_type(struct device_node *np,
return -ENODEV;
}
-/**
- * thermal_of_populate_trip - parse and fill one trip point data
- * @np: DT node containing a trip point node
- * @trip: trip point data structure to be filled up
- *
- * This function parses a trip point type of node represented by
- * @np parameter and fills the read data into @trip data structure.
- *
- * Return: 0 on success, proper error code otherwise
- */
static int thermal_of_populate_trip(struct device_node *np,
struct thermal_trip *trip)
{
@@ -897,258 +277,458 @@ out_of_node_put:
return ERR_PTR(ret);
}
-/**
- * thermal_of_build_thermal_zone - parse and fill one thermal zone data
- * @np: DT node containing a thermal zone node
- *
- * This function parses a thermal zone type of node represented by
- * @np parameter and fills the read data into a __thermal_zone data structure
- * and return this pointer.
- *
- * TODO: Missing properties to parse: thermal-sensor-names
- *
- * Return: On success returns a valid struct __thermal_zone,
- * otherwise, it returns a corresponding ERR_PTR(). Caller must
- * check the return value with help of IS_ERR() helper.
- */
-static struct __thermal_zone
-__init *thermal_of_build_thermal_zone(struct device_node *np)
+static struct device_node *of_thermal_zone_find(struct device_node *sensor, int id)
{
- struct device_node *child = NULL, *gchild;
- struct __thermal_zone *tz;
- int ret, i;
- u32 prop, coef[2];
+ struct device_node *np, *tz;
+ struct of_phandle_args sensor_specs;
+ np = of_find_node_by_name(NULL, "thermal-zones");
if (!np) {
- pr_err("no thermal zone np\n");
- return ERR_PTR(-EINVAL);
+ pr_debug("No thermal zones description\n");
+ return ERR_PTR(-ENODEV);
}
- tz = kzalloc(sizeof(*tz), GFP_KERNEL);
- if (!tz)
- return ERR_PTR(-ENOMEM);
+ /*
+ * Search for each thermal zone, a defined sensor
+ * corresponding to the one passed as parameter
+ */
+ for_each_available_child_of_node(np, tz) {
+
+ int count, i;
+
+ count = of_count_phandle_with_args(tz, "thermal-sensors",
+ "#thermal-sensor-cells");
+ if (count <= 0) {
+ pr_err("%pOFn: missing thermal sensor\n", tz);
+ tz = ERR_PTR(-EINVAL);
+ goto out;
+ }
- ret = of_property_read_u32(np, "polling-delay-passive", &prop);
+ for (i = 0; i < count; i++) {
+
+ int ret;
+
+ ret = of_parse_phandle_with_args(tz, "thermal-sensors",
+ "#thermal-sensor-cells",
+ i, &sensor_specs);
+ if (ret < 0) {
+ pr_err("%pOFn: Failed to read thermal-sensors cells: %d\n", tz, ret);
+ tz = ERR_PTR(ret);
+ goto out;
+ }
+
+ if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ?
+ sensor_specs.args[0] : 0)) {
+ pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, tz);
+ goto out;
+ }
+ }
+ }
+ tz = ERR_PTR(-ENODEV);
+out:
+ of_node_put(np);
+ return tz;
+}
+
+static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdelay)
+{
+ int ret;
+
+ ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
if (ret < 0) {
pr_err("%pOFn: missing polling-delay-passive property\n", np);
- goto free_tz;
+ return ret;
}
- tz->passive_delay = prop;
- ret = of_property_read_u32(np, "polling-delay", &prop);
+ ret = of_property_read_u32(np, "polling-delay", delay);
if (ret < 0) {
pr_err("%pOFn: missing polling-delay property\n", np);
- goto free_tz;
+ return ret;
}
- tz->polling_delay = prop;
+
+ return 0;
+}
+
+static struct thermal_zone_params *thermal_of_parameters_init(struct device_node *np)
+{
+ struct thermal_zone_params *tzp;
+ int coef[2];
+ int ncoef = ARRAY_SIZE(coef);
+ int prop, ret;
+
+ tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
+ if (!tzp)
+ return ERR_PTR(-ENOMEM);
+
+ tzp->no_hwmon = true;
+
+ if (!of_property_read_u32(np, "sustainable-power", &prop))
+ tzp->sustainable_power = prop;
/*
- * REVIST: for now, the thermal framework supports only
- * one sensor per thermal zone. Thus, we are considering
- * only the first two values as slope and offset.
+ * For now, the thermal framework supports only one sensor per
+ * thermal zone. Thus, we are considering only the first two
+ * values as slope and offset.
*/
- ret = of_property_read_u32_array(np, "coefficients", coef, 2);
- if (ret == 0) {
- tz->slope = coef[0];
- tz->offset = coef[1];
- } else {
- tz->slope = 1;
- tz->offset = 0;
+ ret = of_property_read_u32_array(np, "coefficients", coef, ncoef);
+ if (ret) {
+ coef[0] = 1;
+ coef[1] = 0;
}
- tz->trips = thermal_of_trips_init(np, &tz->ntrips);
- if (IS_ERR(tz->trips)) {
- ret = PTR_ERR(tz->trips);
- goto finish;
+ tzp->slope = coef[0];
+ tzp->offset = coef[1];
+
+ return tzp;
+}
+
+static struct device_node *thermal_of_zone_get_by_name(struct thermal_zone_device *tz)
+{
+ struct device_node *np, *tz_np;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ tz_np = of_get_child_by_name(np, tz->type);
+
+ of_node_put(np);
+
+ if (!tz_np)
+ return ERR_PTR(-ENODEV);
+
+ return tz_np;
+}
+
+static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_id,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
+{
+ struct of_phandle_args cooling_spec;
+ int ret;
+
+ ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ index, &cooling_spec);
+
+ of_node_put(cooling_spec.np);
+
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ return ret;
}
- /* cooling-maps */
- child = of_get_child_by_name(np, "cooling-maps");
+ if (cooling_spec.args_count < 2) {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ return -EINVAL;
+ }
- /* cooling-maps not provided */
- if (!child)
- goto finish;
+ if (cooling_spec.np != cdev->np)
+ return 0;
- tz->num_tbps = of_get_child_count(child);
- if (tz->num_tbps == 0)
- goto finish;
+ ret = thermal_zone_unbind_cooling_device(tz, trip_id, cdev);
+ if (ret)
+ pr_err("Failed to unbind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
- tz->tbps = kcalloc(tz->num_tbps, sizeof(*tz->tbps), GFP_KERNEL);
- if (!tz->tbps) {
- ret = -ENOMEM;
- goto free_trips;
+ return ret;
+}
+
+static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev)
+{
+ struct of_phandle_args cooling_spec;
+ int ret, weight = THERMAL_WEIGHT_DEFAULT;
+
+ of_property_read_u32(map_np, "contribution", &weight);
+
+ ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
+ index, &cooling_spec);
+
+ of_node_put(cooling_spec.np);
+
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ return ret;
}
- i = 0;
- for_each_child_of_node(child, gchild) {
- ret = thermal_of_populate_bind_params(np, gchild, &tz->tbps[i++]);
- if (ret) {
- of_node_put(gchild);
- goto free_tbps;
- }
+ if (cooling_spec.args_count < 2) {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ return -EINVAL;
}
-finish:
- of_node_put(child);
+ if (cooling_spec.np != cdev->np)
+ return 0;
- return tz;
+ ret = thermal_zone_bind_cooling_device(tz, trip_id, cdev, cooling_spec.args[1],
+ cooling_spec.args[0],
+ weight);
+ if (ret)
+ pr_err("Failed to bind '%s' with '%s': %d\n", tz->type, cdev->type, ret);
-free_tbps:
- for (i = i - 1; i >= 0; i--) {
- struct __thermal_bind_params *tbp = tz->tbps + i;
- int j;
+ return ret;
+}
+
+static int thermal_of_for_each_cooling_device(struct device_node *tz_np, struct device_node *map_np,
+ struct thermal_zone_device *tz, struct thermal_cooling_device *cdev,
+ int (*action)(struct device_node *, int, int,
+ struct thermal_zone_device *, struct thermal_cooling_device *))
+{
+ struct device_node *tr_np;
+ int count, i, trip_id;
- for (j = 0; j < tbp->count; j++)
- of_node_put(tbp->tcbp[j].cooling_device);
+ tr_np = of_parse_phandle(map_np, "trip", 0);
+ if (!tr_np)
+ return -ENODEV;
- kfree(tbp->tcbp);
+ trip_id = of_find_trip_id(tz_np, tr_np);
+ if (trip_id < 0)
+ return trip_id;
+
+ count = of_count_phandle_with_args(map_np, "cooling-device", "#cooling-cells");
+ if (count <= 0) {
+ pr_err("Add a cooling_device property with at least one device\n");
+ return -ENOENT;
}
- kfree(tz->tbps);
-free_trips:
- kfree(tz->trips);
-free_tz:
- kfree(tz);
- of_node_put(child);
+ /*
+ * At this point, we don't want to bail out when there is an
+ * error, we will try to bind/unbind as many as possible
+ * cooling devices
+ */
+ for (i = 0; i < count; i++)
+ action(map_np, i, trip_id, tz, cdev);
- return ERR_PTR(ret);
+ return 0;
}
-static __init void of_thermal_free_zone(struct __thermal_zone *tz)
+static int thermal_of_for_each_cooling_maps(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev,
+ int (*action)(struct device_node *, int, int,
+ struct thermal_zone_device *, struct thermal_cooling_device *))
{
- struct __thermal_bind_params *tbp;
- int i, j;
+ struct device_node *tz_np, *cm_np, *child;
+ int ret = 0;
- for (i = 0; i < tz->num_tbps; i++) {
- tbp = tz->tbps + i;
+ tz_np = thermal_of_zone_get_by_name(tz);
+ if (IS_ERR(tz_np)) {
+ pr_err("Failed to get node tz by name\n");
+ return PTR_ERR(tz_np);
+ }
- for (j = 0; j < tbp->count; j++)
- of_node_put(tbp->tcbp[j].cooling_device);
+ cm_np = of_get_child_by_name(tz_np, "cooling-maps");
+ if (!cm_np)
+ goto out;
- kfree(tbp->tcbp);
+ for_each_child_of_node(cm_np, child) {
+ ret = thermal_of_for_each_cooling_device(tz_np, child, tz, cdev, action);
+ if (ret)
+ break;
}
- kfree(tz->tbps);
- kfree(tz->trips);
- kfree(tz);
+ of_node_put(cm_np);
+out:
+ of_node_put(tz_np);
+
+ return ret;
+}
+
+static int thermal_of_bind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_bind);
+}
+
+static int thermal_of_unbind(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev)
+{
+ return thermal_of_for_each_cooling_maps(tz, cdev, __thermal_of_unbind);
}
/**
- * of_thermal_destroy_zones - remove all zones parsed and allocated resources
+ * thermal_of_zone_unregister - Cleanup the specific allocated ressources
*
- * Finds all zones parsed and added to the thermal framework and remove them
- * from the system, together with their resources.
+ * This function disables the thermal zone and frees the different
+ * ressources allocated specific to the thermal OF.
*
+ * @tz: a pointer to the thermal zone structure
*/
-static __init void of_thermal_destroy_zones(void)
+void thermal_of_zone_unregister(struct thermal_zone_device *tz)
{
- struct device_node *np, *child;
+ struct thermal_trip *trips = tz->trips;
+ struct thermal_zone_params *tzp = tz->tzp;
+ struct thermal_zone_device_ops *ops = tz->ops;
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return;
+ thermal_zone_device_disable(tz);
+ thermal_zone_device_unregister(tz);
+ kfree(trips);
+ kfree(tzp);
+ kfree(ops);
+}
+EXPORT_SYMBOL_GPL(thermal_of_zone_unregister);
+
+/**
+ * thermal_of_zone_register - Register a thermal zone with device node
+ * sensor
+ *
+ * The thermal_of_zone_register() parses a device tree given a device
+ * node sensor and identifier. It searches for the thermal zone
+ * associated to the couple sensor/id and retrieves all the thermal
+ * zone properties and registers new thermal zone with those
+ * properties.
+ *
+ * @sensor: A device node pointer corresponding to the sensor in the device tree
+ * @id: An integer as sensor identifier
+ * @data: A private data to be stored in the thermal zone dedicated private area
+ * @ops: A set of thermal sensor ops
+ *
+ * Return: a valid thermal zone structure pointer on success.
+ * - EINVAL: if the device tree thermal description is malformed
+ * - ENOMEM: if one structure can not be allocated
+ * - Other negative errors are returned by the underlying called functions
+ */
+struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor, int id, void *data,
+ const struct thermal_zone_device_ops *ops)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_trip *trips;
+ struct thermal_zone_params *tzp;
+ struct thermal_zone_device_ops *of_ops;
+ struct device_node *np;
+ int delay, pdelay;
+ int ntrips, mask;
+ int ret;
+
+ of_ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
+ if (!of_ops)
+ return ERR_PTR(-ENOMEM);
+
+ np = of_thermal_zone_find(sensor, id);
+ if (IS_ERR(np)) {
+ if (PTR_ERR(np) != -ENODEV)
+ pr_err("Failed to find thermal zone for %pOFn id=%d\n", sensor, id);
+ return ERR_CAST(np);
}
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
+ trips = thermal_of_trips_init(np, &ntrips);
+ if (IS_ERR(trips)) {
+ pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id);
+ return ERR_CAST(trips);
+ }
- zone = thermal_zone_get_zone_by_name(child->name);
- if (IS_ERR(zone))
- continue;
+ ret = thermal_of_monitor_init(np, &delay, &pdelay);
+ if (ret) {
+ pr_err("Failed to initialize monitoring delays from %pOFn\n", np);
+ goto out_kfree_trips;
+ }
- thermal_zone_device_unregister(zone);
- kfree(zone->tzp);
- kfree(zone->ops);
- of_thermal_free_zone(zone->devdata);
+ tzp = thermal_of_parameters_init(np);
+ if (IS_ERR(tzp)) {
+ ret = PTR_ERR(tzp);
+ pr_err("Failed to initialize parameter from %pOFn: %d\n", np, ret);
+ goto out_kfree_trips;
}
- of_node_put(np);
+
+ of_ops->get_trip_type = of_ops->get_trip_type ? : of_thermal_get_trip_type;
+ of_ops->get_trip_temp = of_ops->get_trip_temp ? : of_thermal_get_trip_temp;
+ of_ops->get_trip_hyst = of_ops->get_trip_hyst ? : of_thermal_get_trip_hyst;
+ of_ops->set_trip_hyst = of_ops->set_trip_hyst ? : of_thermal_set_trip_hyst;
+ of_ops->get_crit_temp = of_ops->get_crit_temp ? : of_thermal_get_crit_temp;
+ of_ops->bind = thermal_of_bind;
+ of_ops->unbind = thermal_of_unbind;
+
+ mask = GENMASK_ULL((ntrips) - 1, 0);
+
+ tz = thermal_zone_device_register_with_trips(np->name, trips, ntrips,
+ mask, data, of_ops, tzp,
+ pdelay, delay);
+ if (IS_ERR(tz)) {
+ ret = PTR_ERR(tz);
+ pr_err("Failed to register thermal zone %pOFn: %d\n", np, ret);
+ goto out_kfree_tzp;
+ }
+
+ ret = thermal_zone_device_enable(tz);
+ if (ret) {
+ pr_err("Failed to enabled thermal zone '%s', id=%d: %d\n",
+ tz->type, tz->id, ret);
+ thermal_of_zone_unregister(tz);
+ return ERR_PTR(ret);
+ }
+
+ return tz;
+
+out_kfree_tzp:
+ kfree(tzp);
+out_kfree_trips:
+ kfree(trips);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(thermal_of_zone_register);
+
+static void devm_thermal_of_zone_release(struct device *dev, void *res)
+{
+ thermal_of_zone_unregister(*(struct thermal_zone_device **)res);
+}
+
+static int devm_thermal_of_zone_match(struct device *dev, void *res,
+ void *data)
+{
+ struct thermal_zone_device **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
}
/**
- * of_parse_thermal_zones - parse device tree thermal data
- *
- * Initialization function that can be called by machine initialization
- * code to parse thermal data and populate the thermal framework
- * with hardware thermal zones info. This function only parses thermal zones.
- * Cooling devices and sensor devices nodes are supposed to be parsed
- * by their respective drivers.
+ * devm_thermal_of_zone_register - register a thermal tied with the sensor life cycle
*
- * Return: 0 on success, proper error code otherwise
+ * This function is the device version of the thermal_of_zone_register() function.
*
+ * @dev: a device structure pointer to sensor to be tied with the thermal zone OF life cycle
+ * @sensor_id: the sensor identifier
+ * @data: a pointer to a private data to be stored in the thermal zone 'devdata' field
+ * @ops: a pointer to the ops structure associated with the sensor
*/
-int __init of_parse_thermal_zones(void)
+struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int sensor_id, void *data,
+ const struct thermal_zone_device_ops *ops)
{
- struct device_node *np, *child;
- struct __thermal_zone *tz;
- struct thermal_zone_device_ops *ops;
-
- np = of_find_node_by_name(NULL, "thermal-zones");
- if (!np) {
- pr_debug("unable to find thermal zones\n");
- return 0; /* Run successfully on systems without thermal DT */
- }
-
- for_each_available_child_of_node(np, child) {
- struct thermal_zone_device *zone;
- struct thermal_zone_params *tzp;
- int i, mask = 0;
- u32 prop;
-
- tz = thermal_of_build_thermal_zone(child);
- if (IS_ERR(tz)) {
- pr_err("failed to build thermal zone %pOFn: %ld\n",
- child,
- PTR_ERR(tz));
- continue;
- }
-
- ops = kmemdup(&of_thermal_ops, sizeof(*ops), GFP_KERNEL);
- if (!ops)
- goto exit_free;
+ struct thermal_zone_device **ptr, *tzd;
- tzp = kzalloc(sizeof(*tzp), GFP_KERNEL);
- if (!tzp) {
- kfree(ops);
- goto exit_free;
- }
+ ptr = devres_alloc(devm_thermal_of_zone_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
- /* No hwmon because there might be hwmon drivers registering */
- tzp->no_hwmon = true;
-
- if (!of_property_read_u32(child, "sustainable-power", &prop))
- tzp->sustainable_power = prop;
-
- for (i = 0; i < tz->ntrips; i++)
- mask |= 1 << i;
-
- /* these two are left for temperature drivers to use */
- tzp->slope = tz->slope;
- tzp->offset = tz->offset;
-
- zone = thermal_zone_device_register_with_trips(child->name, tz->trips, tz->ntrips,
- mask, tz, ops, tzp, tz->passive_delay,
- tz->polling_delay);
- if (IS_ERR(zone)) {
- pr_err("Failed to build %pOFn zone %ld\n", child,
- PTR_ERR(zone));
- kfree(tzp);
- kfree(ops);
- of_thermal_free_zone(tz);
- /* attempting to build remaining zones still */
- }
+ tzd = thermal_of_zone_register(dev->of_node, sensor_id, data, ops);
+ if (IS_ERR(tzd)) {
+ devres_free(ptr);
+ return tzd;
}
- of_node_put(np);
- return 0;
-
-exit_free:
- of_node_put(child);
- of_node_put(np);
- of_thermal_free_zone(tz);
+ *ptr = tzd;
+ devres_add(dev, ptr);
- /* no memory available, so free what we have built */
- of_thermal_destroy_zones();
+ return tzd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_of_zone_register);
- return -ENOMEM;
+/**
+ * devm_thermal_of_zone_unregister - Resource managed version of
+ * thermal_of_zone_unregister().
+ * @dev: Device for which which resource was allocated.
+ * @tz: a pointer to struct thermal_zone where the sensor is registered.
+ *
+ * This function removes the sensor callbacks and private data from the
+ * thermal zone device registered with devm_thermal_zone_of_sensor_register()
+ * API. It will also silent the zone by remove the .get_temp() and .get_trend()
+ * thermal zone device callbacks.
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz)
+{
+ WARN_ON(devres_release(dev, devm_thermal_of_zone_release,
+ devm_thermal_of_zone_match, tz));
}
+EXPORT_SYMBOL_GPL(devm_thermal_of_zone_unregister);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 3a8d6e747c25..ec495c7dff03 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -49,7 +49,11 @@ static ssize_t
mode_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int enabled = thermal_zone_device_is_enabled(tz);
+ int enabled;
+
+ mutex_lock(&tz->lock);
+ enabled = thermal_zone_device_is_enabled(tz);
+ mutex_unlock(&tz->lock);
return sprintf(buf, "%s\n", enabled ? "enabled" : "disabled");
}
@@ -115,7 +119,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
int temperature, hyst = 0;
enum thermal_trip_type type;
- if (!tz->ops->set_trip_temp)
+ if (!tz->ops->set_trip_temp && !tz->trips)
return -EPERM;
if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
@@ -124,9 +128,14 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
if (kstrtoint(buf, 10, &temperature))
return -EINVAL;
- ret = tz->ops->set_trip_temp(tz, trip, temperature);
- if (ret)
- return ret;
+ if (tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip, temperature);
+ if (ret)
+ return ret;
+ }
+
+ if (tz->trips)
+ tz->trips[trip].temperature = temperature;
if (tz->ops->get_trip_hyst) {
ret = tz->ops->get_trip_hyst(tz, trip, &hyst);
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 703039d8b937..8a9055bd376e 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -65,10 +65,10 @@ static inline int ti_thermal_hotspot_temperature(int t, int s, int c)
/* thermal zone ops */
/* Get temperature callback function for thermal zone */
-static inline int __ti_thermal_get_temp(void *devdata, int *temp)
+static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
{
struct thermal_zone_device *pcb_tz = NULL;
- struct ti_thermal_data *data = devdata;
+ struct ti_thermal_data *data = tz->devdata;
struct ti_bandgap *bgp;
const struct ti_temp_sensor *s;
int ret, tmp, slope, constant;
@@ -85,8 +85,8 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
/* Default constants */
- slope = thermal_zone_get_slope(data->ti_thermal);
- constant = thermal_zone_get_offset(data->ti_thermal);
+ slope = thermal_zone_get_slope(tz);
+ constant = thermal_zone_get_offset(tz);
pcb_tz = data->pcb_tz;
/* In case pcb zone is available, use the extrapolation rule with it */
@@ -107,9 +107,9 @@ static inline int __ti_thermal_get_temp(void *devdata, int *temp)
return ret;
}
-static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend)
{
- struct ti_thermal_data *data = p;
+ struct ti_thermal_data *data = tz->devdata;
struct ti_bandgap *bgp;
int id, tr, ret = 0;
@@ -130,7 +130,7 @@ static int __ti_thermal_get_trend(void *p, int trip, enum thermal_trend *trend)
return 0;
}
-static const struct thermal_zone_of_device_ops ti_of_thermal_ops = {
+static const struct thermal_zone_device_ops ti_of_thermal_ops = {
.get_temp = __ti_thermal_get_temp,
.get_trend = __ti_thermal_get_trend,
};
@@ -170,7 +170,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
return -EINVAL;
/* in case this is specified by DT */
- data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id,
+ data->ti_thermal = devm_thermal_of_zone_register(bgp->dev, id,
data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
dev_err(bgp->dev, "thermal zone device is NULL\n");
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index 4cae5561a2a3..4111d99ef50e 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -187,9 +187,9 @@ static void uniphier_tm_disable_sensor(struct uniphier_tm_dev *tdev)
usleep_range(1000, 2000); /* The spec note says at least 1ms */
}
-static int uniphier_tm_get_temp(void *data, int *out_temp)
+static int uniphier_tm_get_temp(struct thermal_zone_device *tz, int *out_temp)
{
- struct uniphier_tm_dev *tdev = data;
+ struct uniphier_tm_dev *tdev = tz->devdata;
struct regmap *map = tdev->regmap;
int ret;
u32 temp;
@@ -204,7 +204,7 @@ static int uniphier_tm_get_temp(void *data, int *out_temp)
return 0;
}
-static const struct thermal_zone_of_device_ops uniphier_of_thermal_ops = {
+static const struct thermal_zone_device_ops uniphier_of_thermal_ops = {
.get_temp = uniphier_tm_get_temp,
};
@@ -289,8 +289,8 @@ static int uniphier_tm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tdev);
- tdev->tz_dev = devm_thermal_zone_of_sensor_register(dev, 0, tdev,
- &uniphier_of_thermal_ops);
+ tdev->tz_dev = devm_thermal_of_zone_register(dev, 0, tdev,
+ &uniphier_of_thermal_ops);
if (IS_ERR(tdev->tz_dev)) {
dev_err(dev, "failed to register sensor device\n");
return PTR_ERR(tdev->tz_dev);
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index e76a6c173637..448fd2ec8f6e 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -27,10 +27,19 @@ config USB4_DEBUGFS_WRITE
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
+config USB4_DEBUGFS_MARGINING
+ bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)"
+ depends on DEBUG_FS
+ depends on USB4_DEBUGFS_WRITE
+ help
+ Enables hardware and software based receiver lane margining support
+ under each USB4 port. Used for electrical quality and robustness
+ validation during manufacturing. Should not be enabled by distro
+ kernels.
+
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
- depends on (USB4=m || KUNIT=y)
- depends on KUNIT
+ depends on USB4 && KUNIT=y
default KUNIT_ALL_TESTS
config USB4_DMA_TEST
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index b1f0dc8df47c..7a8adf5ad5a0 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -42,7 +42,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
*/
dev = acpi_get_first_physical_node(adev);
while (!dev) {
- adev = adev->parent;
+ adev = acpi_dev_parent(adev);
if (!adev)
break;
dev = acpi_get_first_physical_node(adev);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e5ede5debfb0..0c661a706160 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -407,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index c850b0ac098c..834bcad42e9f 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include "tb.h"
+#include "sb_regs.h"
#define PORT_CAP_PCIE_LEN 1
#define PORT_CAP_POWER_LEN 2
@@ -187,6 +188,828 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
#define DEBUGFS_MODE 0400
#endif
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
+/**
+ * struct tb_margining - Lane margining support
+ * @caps: Port lane margining capabilities
+ * @results: Last lane margining results
+ * @lanes: %0, %1 or %7 (all)
+ * @min_ber_level: Minimum supported BER level contour value
+ * @max_ber_level: Maximum supported BER level contour value
+ * @ber_level: Current BER level contour value
+ * @voltage_steps: Number of mandatory voltage steps
+ * @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @time_steps: Number of time margin steps
+ * @max_time_offset: Maximum time margin offset (in mUI)
+ * @software: %true if software margining is used instead of hardware
+ * @time: %true if time margining is used instead of voltage
+ * @right_high: %false if left/low margin test is performed, %true if
+ * right/high
+ */
+struct tb_margining {
+ u32 caps[2];
+ u32 results[2];
+ unsigned int lanes;
+ unsigned int min_ber_level;
+ unsigned int max_ber_level;
+ unsigned int ber_level;
+ unsigned int voltage_steps;
+ unsigned int max_voltage_offset;
+ unsigned int time_steps;
+ unsigned int max_time_offset;
+ bool software;
+ bool time;
+ bool right_high;
+};
+
+static bool supports_software(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+}
+
+static bool supports_hardware(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+}
+
+static bool both_lanes(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
+}
+
+static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
+{
+ return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
+ USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
+}
+
+static bool supports_time(const struct usb4_port *usb4)
+{
+ return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+}
+
+/* Only applicable if supports_time() returns true */
+static unsigned int independent_time_margins(const struct usb4_port *usb4)
+{
+ return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
+}
+
+static ssize_t
+margining_ber_level_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ unsigned int val;
+ int ret = 0;
+ char *buf;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (usb4->margining->software) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto out_unlock;
+ }
+
+ buf[count - 1] = '\0';
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ goto out_free;
+
+ if (val < usb4->margining->min_ber_level ||
+ val > usb4->margining->max_ber_level) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ usb4->margining->ber_level = val;
+
+out_free:
+ free_page((unsigned long)buf);
+out_unlock:
+ mutex_unlock(&tb->lock);
+
+ return ret < 0 ? ret : count;
+}
+
+static void ber_level_show(struct seq_file *s, unsigned int val)
+{
+ if (val % 2)
+ seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val);
+ else
+ seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val);
+}
+
+static int margining_ber_level_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+
+ if (usb4->margining->software)
+ return -EINVAL;
+ ber_level_show(s, usb4->margining->ber_level);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_ber_level);
+
+static int margining_caps_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ u32 cap0, cap1;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ /* Dump the raw caps first */
+ cap0 = usb4->margining->caps[0];
+ seq_printf(s, "0x%08x\n", cap0);
+ cap1 = usb4->margining->caps[1];
+ seq_printf(s, "0x%08x\n", cap1);
+
+ seq_printf(s, "# software margining: %s\n",
+ supports_software(usb4) ? "yes" : "no");
+ if (supports_hardware(usb4)) {
+ seq_puts(s, "# hardware margining: yes\n");
+ seq_puts(s, "# minimum BER level contour: ");
+ ber_level_show(s, usb4->margining->min_ber_level);
+ seq_puts(s, "# maximum BER level contour: ");
+ ber_level_show(s, usb4->margining->max_ber_level);
+ } else {
+ seq_puts(s, "# hardware margining: no\n");
+ }
+
+ seq_printf(s, "# both lanes simultaneously: %s\n",
+ both_lanes(usb4) ? "yes" : "no");
+ seq_printf(s, "# voltage margin steps: %u\n",
+ usb4->margining->voltage_steps);
+ seq_printf(s, "# maximum voltage offset: %u mV\n",
+ usb4->margining->max_voltage_offset);
+
+ switch (independent_voltage_margins(usb4)) {
+ case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ seq_puts(s, "# returns minimum between high and low voltage margins\n");
+ break;
+ case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ seq_puts(s, "# returns high or low voltage margin\n");
+ break;
+ case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
+ seq_puts(s, "# returns both high and low margins\n");
+ break;
+ }
+
+ if (supports_time(usb4)) {
+ seq_puts(s, "# time margining: yes\n");
+ seq_printf(s, "# time margining is destructive: %s\n",
+ cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
+
+ switch (independent_time_margins(usb4)) {
+ case USB4_MARGIN_CAP_1_TIME_MIN:
+ seq_puts(s, "# returns minimum between left and right time margins\n");
+ break;
+ case USB4_MARGIN_CAP_1_TIME_LR:
+ seq_puts(s, "# returns left or right margin\n");
+ break;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ seq_puts(s, "# returns both left and right margins\n");
+ break;
+ }
+
+ seq_printf(s, "# time margin steps: %u\n",
+ usb4->margining->time_steps);
+ seq_printf(s, "# maximum time offset: %u mUI\n",
+ usb4->margining->max_time_offset);
+ } else {
+ seq_puts(s, "# time margining: no\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RO(margining_caps);
+
+static ssize_t
+margining_lanes_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "0")) {
+ usb4->margining->lanes = 0;
+ } else if (!strcmp(buf, "1")) {
+ usb4->margining->lanes = 1;
+ } else if (!strcmp(buf, "all")) {
+ /* Needs to be supported */
+ if (both_lanes(usb4))
+ usb4->margining->lanes = 7;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret < 0 ? ret : count;
+}
+
+static int margining_lanes_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ unsigned int lanes;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ lanes = usb4->margining->lanes;
+ if (both_lanes(usb4)) {
+ if (!lanes)
+ seq_puts(s, "[0] 1 all\n");
+ else if (lanes == 1)
+ seq_puts(s, "0 [1] all\n");
+ else
+ seq_puts(s, "0 1 [all]\n");
+ } else {
+ if (!lanes)
+ seq_puts(s, "[0] 1\n");
+ else
+ seq_puts(s, "0 [1]\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_lanes);
+
+static ssize_t margining_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "software")) {
+ if (supports_software(usb4))
+ usb4->margining->software = true;
+ else
+ ret = -EINVAL;
+ } else if (!strcmp(buf, "hardware")) {
+ if (supports_hardware(usb4))
+ usb4->margining->software = false;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_mode_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_port *port = s->private;
+ const struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ const char *space = "";
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (supports_software(usb4)) {
+ if (usb4->margining->software)
+ seq_puts(s, "[software]");
+ else
+ seq_puts(s, "software");
+ space = " ";
+ }
+ if (supports_hardware(usb4)) {
+ if (usb4->margining->software)
+ seq_printf(s, "%shardware", space);
+ else
+ seq_printf(s, "%s[hardware]", space);
+ }
+
+ mutex_unlock(&tb->lock);
+
+ seq_puts(s, "\n");
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_mode);
+
+static int margining_run_write(void *data, u64 val)
+{
+ struct tb_port *port = data;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb_switch *sw = port->sw;
+ struct tb_margining *margining;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ /*
+ * CL states may interfere with lane margining so inform the user know
+ * and bail out.
+ */
+ if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) {
+ tb_port_warn(port,
+ "CL states are enabled, Disable them with clx=0 and re-connect\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ margining = usb4->margining;
+
+ if (margining->software) {
+ tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
+ margining->time ? "time" : "voltage", margining->lanes);
+ ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
+ margining->right_high,
+ USB4_MARGIN_SW_COUNTER_CLEAR);
+ if (ret)
+ goto out_unlock;
+
+ ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
+ } else {
+ tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
+ margining->time ? "time" : "voltage", margining->lanes);
+ /* Clear the results */
+ margining->results[0] = 0;
+ margining->results[1] = 0;
+ ret = usb4_port_hw_margin(port, margining->lanes,
+ margining->ber_level, margining->time,
+ margining->right_high, margining->results);
+ }
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write,
+ "%llu\n");
+
+static ssize_t margining_results_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ /* Just clear the results */
+ usb4->margining->results[0] = 0;
+ usb4->margining->results[1] = 0;
+
+ mutex_unlock(&tb->lock);
+ return count;
+}
+
+static void voltage_margin_show(struct seq_file *s,
+ const struct tb_margining *margining, u8 val)
+{
+ unsigned int tmp, voltage;
+
+ tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
+ seq_printf(s, "%u mV (%u)", voltage, tmp);
+ if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ seq_puts(s, " exceeds maximum");
+ seq_puts(s, "\n");
+}
+
+static void time_margin_show(struct seq_file *s,
+ const struct tb_margining *margining, u8 val)
+{
+ unsigned int tmp, interval;
+
+ tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ interval = tmp * margining->max_time_offset / margining->time_steps;
+ seq_printf(s, "%u mUI (%u)", interval, tmp);
+ if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ seq_puts(s, " exceeds maximum");
+ seq_puts(s, "\n");
+}
+
+static int margining_results_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ margining = usb4->margining;
+ /* Dump the raw results first */
+ seq_printf(s, "0x%08x\n", margining->results[0]);
+ /* Only the hardware margining has two result dwords */
+ if (!margining->software) {
+ unsigned int val;
+
+ seq_printf(s, "0x%08x\n", margining->results[1]);
+
+ if (margining->time) {
+ if (!margining->lanes || margining->lanes == 7) {
+ val = margining->results[1];
+ seq_puts(s, "# lane 0 right time margin: ");
+ time_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 0 left time margin: ");
+ time_margin_show(s, margining, val);
+ }
+ if (margining->lanes == 1 || margining->lanes == 7) {
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 right time margin: ");
+ time_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 left time margin: ");
+ time_margin_show(s, margining, val);
+ }
+ } else {
+ if (!margining->lanes || margining->lanes == 7) {
+ val = margining->results[1];
+ seq_puts(s, "# lane 0 high voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 0 low voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ }
+ if (margining->lanes == 1 || margining->lanes == 7) {
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 high voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ val = margining->results[1] >>
+ USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
+ seq_puts(s, "# lane 1 low voltage margin: ");
+ voltage_margin_show(s, margining, val);
+ }
+ }
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_results);
+
+static ssize_t margining_test_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (!strcmp(buf, "time") && supports_time(usb4))
+ usb4->margining->time = true;
+ else if (!strcmp(buf, "voltage"))
+ usb4->margining->time = false;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_test_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (supports_time(usb4)) {
+ if (usb4->margining->time)
+ seq_puts(s, "voltage [time]\n");
+ else
+ seq_puts(s, "[voltage] time\n");
+ } else {
+ seq_puts(s, "[voltage]\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_test);
+
+static ssize_t margining_margin_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_free;
+ }
+
+ if (usb4->margining->time) {
+ if (!strcmp(buf, "left"))
+ usb4->margining->right_high = false;
+ else if (!strcmp(buf, "right"))
+ usb4->margining->right_high = true;
+ else
+ ret = -EINVAL;
+ } else {
+ if (!strcmp(buf, "low"))
+ usb4->margining->right_high = false;
+ else if (!strcmp(buf, "high"))
+ usb4->margining->right_high = true;
+ else
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out_free:
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_margin_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ if (mutex_lock_interruptible(&tb->lock))
+ return -ERESTARTSYS;
+
+ if (usb4->margining->time) {
+ if (usb4->margining->right_high)
+ seq_puts(s, "left [right]\n");
+ else
+ seq_puts(s, "[left] right\n");
+ } else {
+ if (usb4->margining->right_high)
+ seq_puts(s, "low [high]\n");
+ else
+ seq_puts(s, "[low] high\n");
+ }
+
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_margin);
+
+static void margining_port_init(struct tb_port *port)
+{
+ struct tb_margining *margining;
+ struct dentry *dir, *parent;
+ struct usb4_port *usb4;
+ char dir_name[10];
+ unsigned int val;
+ int ret;
+
+ usb4 = port->usb4;
+ if (!usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+
+ margining = kzalloc(sizeof(*margining), GFP_KERNEL);
+ if (!margining)
+ return;
+
+ ret = usb4_port_margining_caps(port, margining->caps);
+ if (ret) {
+ kfree(margining);
+ return;
+ }
+
+ usb4->margining = margining;
+
+ /* Set the initial mode */
+ if (supports_software(usb4))
+ margining->software = true;
+
+ val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
+ USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
+ margining->voltage_steps = val;
+ val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
+ USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
+ margining->max_voltage_offset = 74 + val * 2;
+
+ if (supports_time(usb4)) {
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
+ margining->time_steps = val;
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
+ USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
+ /*
+ * Store it as mUI (milli Unit Interval) because we want
+ * to keep it as integer.
+ */
+ margining->max_time_offset = 200 + 10 * val;
+ }
+
+ dir = debugfs_create_dir("margining", parent);
+ if (supports_hardware(usb4)) {
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
+ USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
+ margining->min_ber_level = val;
+ val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
+ USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
+ margining->max_ber_level = val;
+
+ /* Set the default to minimum */
+ margining->ber_level = margining->min_ber_level;
+
+ debugfs_create_file("ber_level_contour", 0400, dir, port,
+ &margining_ber_level_fops);
+ }
+ debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
+ debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
+ debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
+ debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
+ debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
+ debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
+ if (independent_voltage_margins(usb4) ||
+ (supports_time(usb4) && independent_time_margins(usb4)))
+ debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+}
+
+static void margining_port_remove(struct tb_port *port)
+{
+ struct dentry *parent;
+ char dir_name[10];
+
+ if (!port->usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ debugfs_remove_recursive(debugfs_lookup("margining", parent));
+
+ kfree(port->usb4->margining);
+ port->usb4->margining = NULL;
+}
+
+static void margining_switch_init(struct tb_switch *sw)
+{
+ struct tb_port *upstream, *downstream;
+ struct tb_switch *parent_sw;
+ u64 route = tb_route(sw);
+
+ if (!route)
+ return;
+
+ upstream = tb_upstream_port(sw);
+ parent_sw = tb_switch_parent(sw);
+ downstream = tb_port_at(route, parent_sw);
+
+ margining_port_init(downstream);
+ margining_port_init(upstream);
+}
+
+static void margining_switch_remove(struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+ u64 route = tb_route(sw);
+
+ if (!route)
+ return;
+
+ /*
+ * Upstream is removed with the router itself but we need to
+ * remove the downstream port margining directory.
+ */
+ parent_sw = tb_switch_parent(sw);
+ downstream = tb_port_at(route, parent_sw);
+ margining_port_remove(downstream);
+}
+
+static void margining_xdomain_init(struct tb_xdomain *xd)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+
+ parent_sw = tb_xdomain_parent(xd);
+ downstream = tb_port_at(xd->route, parent_sw);
+
+ margining_port_init(downstream);
+}
+
+static void margining_xdomain_remove(struct tb_xdomain *xd)
+{
+ struct tb_switch *parent_sw;
+ struct tb_port *downstream;
+
+ parent_sw = tb_xdomain_parent(xd);
+ downstream = tb_port_at(xd->route, parent_sw);
+ margining_port_remove(downstream);
+}
+#else
+static inline void margining_switch_init(struct tb_switch *sw) { }
+static inline void margining_switch_remove(struct tb_switch *sw) { }
+static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
+static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
+#endif
+
static int port_clear_all_counters(struct tb_port *port)
{
u32 *buf;
@@ -689,6 +1512,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
}
+
+ margining_switch_init(sw);
}
/**
@@ -699,9 +1524,20 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
*/
void tb_switch_debugfs_remove(struct tb_switch *sw)
{
+ margining_switch_remove(sw);
debugfs_remove_recursive(sw->debugfs_dir);
}
+void tb_xdomain_debugfs_init(struct tb_xdomain *xd)
+{
+ margining_xdomain_init(xd);
+}
+
+void tb_xdomain_debugfs_remove(struct tb_xdomain *xd)
+{
+ margining_xdomain_remove(xd);
+}
+
/**
* tb_service_debugfs_init() - Add debugfs directory for service
* @svc: Thunderbolt service pointer
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 99211f35a5cd..ec7b5f65804e 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -144,11 +144,9 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
if (!uuid_is_null(&uuids[i]))
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
- &uuids[i]);
+ ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
- i < tb->nboot_acl - 1 ? "," : "\n");
+ ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
}
out:
@@ -247,7 +245,7 @@ static ssize_t deauthorization_show(struct device *dev,
tb->security_level == TB_SECURITY_SECURE)
deauthorization = !!tb->cm_ops->disapprove_switch;
- return sprintf(buf, "%d\n", deauthorization);
+ return sysfs_emit(buf, "%d\n", deauthorization);
}
static DEVICE_ATTR_RO(deauthorization);
@@ -270,7 +268,7 @@ static ssize_t security_show(struct device *dev, struct device_attribute *attr,
if (tb->security_level < ARRAY_SIZE(tb_security_names))
name = tb_security_names[tb->security_level];
- return sprintf(buf, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(security);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index ae38f0d25a8d..86521ebb2579 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2518,6 +2518,9 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
case PCI_DEVICE_ID_INTEL_RPL_NHI0:
case PCI_DEVICE_ID_INTEL_RPL_NHI1:
+ case PCI_DEVICE_ID_INTEL_MTL_M_NHI0:
+ case PCI_DEVICE_ID_INTEL_MTL_P_NHI0:
+ case PCI_DEVICE_ID_INTEL_MTL_P_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
@@ -2529,6 +2532,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
tb->cm_ops = &icm_icl_ops;
break;
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index cb8c9c4ae93a..4dce2edd86ea 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -28,7 +28,11 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
-
+/*
+ * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
+ * transferred.
+ */
+#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
@@ -38,7 +42,9 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
+/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
+#define QUIRK_E2E BIT(1)
static int ring_interrupt_index(struct tb_ring *ring)
{
@@ -458,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring)
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
+ unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
+ if (nhi->quirks & QUIRK_E2E) {
+ start_hop = RING_FIRST_USABLE_HOPID + 1;
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
+ ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
+ ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
+ }
+ }
+
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
@@ -469,7 +485,7 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
- for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
@@ -484,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
+ if (ring->hop > 0 && ring->hop < start_hop) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
@@ -1097,12 +1118,26 @@ static void nhi_shutdown(struct tb_nhi *nhi)
static void nhi_check_quirks(struct tb_nhi *nhi)
{
- /*
- * Intel hardware supports auto clear of the interrupt status
- * reqister right after interrupt is being issued.
- */
- if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL)
+ if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ /*
+ * Intel hardware supports auto clear of the interrupt
+ * status register right after interrupt is being
+ * issued.
+ */
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ /*
+ * Falcon Ridge controller needs the end-to-end
+ * flow control workaround to avoid losing Rx
+ * packets when RING_FLAG_E2E is set.
+ */
+ nhi->quirks |= QUIRK_E2E;
+ break;
+ }
+ }
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
@@ -1149,6 +1184,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
+ struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
@@ -1179,10 +1215,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
- if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
@@ -1223,26 +1257,21 @@ static struct tb *nhi_select_cm(struct tb_nhi *nhi)
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
- if (!nhi_imr_valid(pdev)) {
- dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
- return -ENODEV;
- }
+ if (!nhi_imr_valid(pdev))
+ return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
res = pcim_enable_device(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res) {
- dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
@@ -1253,7 +1282,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
- dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
+ dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -1266,18 +1295,14 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_iommu(nhi);
res = nhi_init_msi(nhi);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (res) {
- dev_err(&pdev->dev, "failed to set DMA mask\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
@@ -1288,13 +1313,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
tb = nhi_select_cm(nhi);
- if (!tb) {
- dev_err(&nhi->pdev->dev,
+ if (!tb)
+ return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
- return -ENODEV;
- }
- dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
@@ -1398,6 +1421,7 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Thunderbolt 4 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
@@ -1414,6 +1438,12 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index f09da5b62233..b0718020c6f5 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -55,6 +55,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware.
*/
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
@@ -74,6 +75,9 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
+#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
+#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
+#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index b3f310389378..3dd5f81bd629 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -12,19 +12,315 @@
#include "tb.h"
+/* Intel specific NVM offsets */
+#define INTEL_NVM_DEVID 0x05
+#define INTEL_NVM_VERSION 0x08
+#define INTEL_NVM_CSS 0x10
+#define INTEL_NVM_FLASH_SIZE 0x45
+
+/* ASMedia specific NVM offsets */
+#define ASMEDIA_NVM_DATE 0x1c
+#define ASMEDIA_NVM_VERSION 0x28
+
static DEFINE_IDA(nvm_ida);
/**
+ * struct tb_nvm_vendor_ops - Vendor specific NVM operations
+ * @read_version: Reads out NVM version from the flash
+ * @validate: Validates the NVM image before update (optional)
+ * @write_headers: Writes headers before the rest of the image (optional)
+ */
+struct tb_nvm_vendor_ops {
+ int (*read_version)(struct tb_nvm *nvm);
+ int (*validate)(struct tb_nvm *nvm);
+ int (*write_headers)(struct tb_nvm *nvm);
+};
+
+/**
+ * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
+ * @vendor: Vendor ID
+ * @vops: Vendor specific NVM operations
+ *
+ * Maps vendor ID to NVM vendor operations. If there is no mapping then
+ * NVM firmware upgrade is disabled for the device.
+ */
+struct tb_nvm_vendor {
+ u16 vendor;
+ const struct tb_nvm_vendor_ops *vops;
+};
+
+static int intel_switch_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ u32 val, nvm_size, hdr_size;
+ int ret;
+
+ /*
+ * If the switch is in safe-mode the only accessible portion of
+ * the NVM is the non-active one where userspace is expected to
+ * write new functional NVM.
+ */
+ if (sw->safe_mode)
+ return 0;
+
+ ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - hdr_size) / 2;
+
+ ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val >> 16) & 0xff;
+ nvm->minor = (val >> 8) & 0xff;
+ nvm->active_size = nvm_size;
+
+ return 0;
+}
+
+static int intel_switch_nvm_validate(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ unsigned int image_size, hdr_size;
+ u16 ds_size, device_id;
+ u8 *buf = nvm->buf;
+
+ image_size = nvm->buf_data_size;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ if (sw->safe_mode)
+ return 0;
+
+ /*
+ * Make sure the device ID in the image matches the one
+ * we read from the switch config space.
+ */
+ device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
+ if (device_id != sw->config.device_id)
+ return -EINVAL;
+
+ /* Skip headers in the image */
+ nvm->buf_data_start = buf + hdr_size;
+ nvm->buf_data_size = image_size - hdr_size;
+
+ return 0;
+}
+
+static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+
+ if (sw->generation < 3) {
+ int ret;
+
+ /* Write CSS headers first */
+ ret = dma_port_flash_write(sw->dma_port,
+ DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
+ DMA_PORT_CSS_MAX_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
+ .read_version = intel_switch_nvm_version,
+ .validate = intel_switch_nvm_validate,
+ .write_headers = intel_switch_nvm_write_headers,
+};
+
+static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ u32 val;
+ int ret;
+
+ ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val << 16) & 0xff0000;
+ nvm->major |= val & 0x00ff00;
+ nvm->major |= (val >> 16) & 0x0000ff;
+
+ ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->minor = (val << 16) & 0xff0000;
+ nvm->minor |= val & 0x00ff00;
+ nvm->minor |= (val >> 16) & 0x0000ff;
+
+ /* ASMedia NVM size is fixed to 512k */
+ nvm->active_size = SZ_512K;
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
+ .read_version = asmedia_switch_nvm_version,
+};
+
+/* Router vendor NVM support table */
+static const struct tb_nvm_vendor switch_nvm_vendors[] = {
+ { 0x174c, &asmedia_switch_nvm_ops },
+ { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
+ { 0x8087, &intel_switch_nvm_ops },
+};
+
+static int intel_retimer_nvm_version(struct tb_nvm *nvm)
+{
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ u32 val, nvm_size;
+ int ret;
+
+ ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm->major = (val >> 16) & 0xff;
+ nvm->minor = (val >> 8) & 0xff;
+
+ ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - SZ_16K) / 2;
+ nvm->active_size = nvm_size;
+
+ return 0;
+}
+
+static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
+{
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ unsigned int image_size, hdr_size;
+ u8 *buf = nvm->buf;
+ u16 ds_size, device;
+
+ image_size = nvm->buf_data_size;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ /*
+ * Make sure the device ID in the image matches the retimer
+ * hardware.
+ */
+ device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
+ if (device != rt->device)
+ return -EINVAL;
+
+ /* Skip headers in the image */
+ nvm->buf_data_start = buf + hdr_size;
+ nvm->buf_data_size = image_size - hdr_size;
+
+ return 0;
+}
+
+static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
+ .read_version = intel_retimer_nvm_version,
+ .validate = intel_retimer_nvm_validate,
+};
+
+/* Retimer vendor NVM support table */
+static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
+ { 0x8087, &intel_retimer_nvm_ops },
+};
+
+/**
* tb_nvm_alloc() - Allocate new NVM structure
* @dev: Device owning the NVM
*
* Allocates new NVM structure with unique @id and returns it. In case
- * of error returns ERR_PTR().
+ * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
+ * NVM format of the @dev is not known by the kernel.
*/
struct tb_nvm *tb_nvm_alloc(struct device *dev)
{
+ const struct tb_nvm_vendor_ops *vops = NULL;
struct tb_nvm *nvm;
- int ret;
+ int ret, i;
+
+ if (tb_is_switch(dev)) {
+ const struct tb_switch *sw = tb_to_switch(dev);
+
+ for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
+ const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
+
+ if (v->vendor == sw->config.vendor_id) {
+ vops = v->vops;
+ break;
+ }
+ }
+
+ if (!vops) {
+ tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
+ sw->config.vendor_id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ } else if (tb_is_retimer(dev)) {
+ const struct tb_retimer *rt = tb_to_retimer(dev);
+
+ for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
+ const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
+
+ if (v->vendor == rt->vendor) {
+ vops = v->vops;
+ break;
+ }
+ }
+
+ if (!vops) {
+ dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
+ rt->vendor);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ } else {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -38,14 +334,85 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
nvm->id = ret;
nvm->dev = dev;
+ nvm->vops = vops;
return nvm;
}
/**
+ * tb_nvm_read_version() - Read and populate NVM version
+ * @nvm: NVM structure
+ *
+ * Uses vendor specific means to read out and fill in the existing
+ * active NVM version. Returns %0 in case of success and negative errno
+ * otherwise.
+ */
+int tb_nvm_read_version(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+
+ if (vops && vops->read_version)
+ return vops->read_version(nvm);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_nvm_validate() - Validate new NVM image
+ * @nvm: NVM structure
+ *
+ * Runs vendor specific validation over the new NVM image and if all
+ * checks pass returns %0. As side effect updates @nvm->buf_data_start
+ * and @nvm->buf_data_size fields to match the actual data to be written
+ * to the NVM.
+ *
+ * If the validation does not pass then returns negative errno.
+ */
+int tb_nvm_validate(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+ unsigned int image_size;
+ u8 *buf = nvm->buf;
+
+ if (!buf)
+ return -EINVAL;
+ if (!vops)
+ return -EOPNOTSUPP;
+
+ /* Just do basic image size checks */
+ image_size = nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * Set the default data start in the buffer. The validate method
+ * below can change this if needed.
+ */
+ nvm->buf_data_start = buf;
+
+ return vops->validate ? vops->validate(nvm) : 0;
+}
+
+/**
+ * tb_nvm_write_headers() - Write headers before the rest of the image
+ * @nvm: NVM structure
+ *
+ * If the vendor NVM format requires writing headers before the rest of
+ * the image, this function does that. Can be called even if the device
+ * does not need this.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
+ */
+int tb_nvm_write_headers(struct tb_nvm *nvm)
+{
+ const struct tb_nvm_vendor_ops *vops = nvm->vops;
+
+ return vops->write_headers ? vops->write_headers(nvm) : 0;
+}
+
+/**
* tb_nvm_add_active() - Adds active NVMem device to NVM
* @nvm: NVM structure
- * @size: Size of the active NVM in bytes
* @reg_read: Pointer to the function to read the NVM (passed directly to the
* NVMem device)
*
@@ -54,7 +421,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
* needed. The first parameter passed to @reg_read is @nvm structure.
* Returns %0 in success and negative errno otherwise.
*/
-int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
+int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@@ -67,7 +434,7 @@ int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
- config.size = size;
+ config.size = nvm->active_size;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
@@ -109,17 +476,17 @@ int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
/**
* tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
* @nvm: NVM structure
- * @size: Size of the non-active NVM in bytes
* @reg_write: Pointer to the function to write the NVM (passed directly
* to the NVMem device)
*
* Registers new non-active NVmem device for @nvm. The @reg_write is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_write is @nvm structure.
+ * The size of the NVMem device is set to %NVM_MAX_SIZE.
+ *
* Returns %0 in success and negative errno otherwise.
*/
-int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
- nvmem_reg_write_t reg_write)
+int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
@@ -132,7 +499,7 @@ int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
- config.size = size;
+ config.size = NVM_MAX_SIZE;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 8c29bd556ae0..81252e31014a 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -16,8 +16,23 @@
#define TB_MAX_RETIMER_INDEX 6
-static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
+/**
+ * tb_retimer_nvm_read() - Read contents of retimer NVM
+ * @rt: Retimer device
+ * @address: NVM address (in bytes) to start reading
+ * @buf: Data read from NVM is stored here
+ * @size: Number of bytes to read
+ *
+ * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
+ * read was successful and negative errno in case of failure.
+ */
+int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
+}
+
+static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@@ -30,7 +45,7 @@ static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
+ ret = tb_retimer_nvm_read(rt, offset, val, bytes);
mutex_unlock(&rt->tb->lock);
out:
@@ -40,8 +55,7 @@ out:
return ret;
}
-static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
@@ -59,34 +73,23 @@ static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_retimer_nvm_add(struct tb_retimer *rt)
{
struct tb_nvm *nvm;
- u32 val, nvm_size;
int ret;
nvm = tb_nvm_alloc(&rt->dev);
- if (IS_ERR(nvm))
- return PTR_ERR(nvm);
-
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
- sizeof(val));
- if (ret)
+ if (IS_ERR(nvm)) {
+ ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
+ }
- nvm->major = val >> 16;
- nvm->minor = val >> 8;
-
- ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
- &val, sizeof(val));
+ ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
- nvm_size = (SZ_1M << (val & 7)) / 8;
- nvm_size = (nvm_size - SZ_16K) / 2;
-
- ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
+ ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
@@ -94,59 +97,33 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
return 0;
err_nvm:
- tb_nvm_free(nvm);
+ dev_dbg(&rt->dev, "NVM upgrade disabled\n");
+ if (!IS_ERR(nvm))
+ tb_nvm_free(nvm);
+
return ret;
}
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
{
- unsigned int image_size, hdr_size;
- const u8 *buf = rt->nvm->buf;
- u16 ds_size, device;
+ unsigned int image_size;
+ const u8 *buf;
int ret;
- image_size = rt->nvm->buf_data_size;
- if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
- return -EINVAL;
-
- /*
- * FARB pointer must point inside the image and must at least
- * contain parts of the digital section we will be reading here.
- */
- hdr_size = (*(u32 *)buf) & 0xffffff;
- if (hdr_size + NVM_DEVID + 2 >= image_size)
- return -EINVAL;
-
- /* Digital section start should be aligned to 4k page */
- if (!IS_ALIGNED(hdr_size, SZ_4K))
- return -EINVAL;
-
- /*
- * Read digital section size and check that it also fits inside
- * the image.
- */
- ds_size = *(u16 *)(buf + hdr_size);
- if (ds_size >= image_size)
- return -EINVAL;
-
- /*
- * Make sure the device ID in the image matches the retimer
- * hardware.
- */
- device = *(u16 *)(buf + hdr_size + NVM_DEVID);
- if (device != rt->device)
- return -EINVAL;
+ ret = tb_nvm_validate(rt->nvm);
+ if (ret)
+ return ret;
- /* Skip headers in the image */
- buf += hdr_size;
- image_size -= hdr_size;
+ buf = rt->nvm->buf_data_start;
+ image_size = rt->nvm->buf_data_size;
ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
image_size);
- if (!ret)
- rt->nvm->flushed = true;
+ if (ret)
+ return ret;
- return ret;
+ rt->nvm->flushed = true;
+ return 0;
}
static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
@@ -185,7 +162,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
- return sprintf(buf, "%#x\n", rt->device);
+ return sysfs_emit(buf, "%#x\n", rt->device);
}
static DEVICE_ATTR_RO(device);
@@ -200,8 +177,10 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
+ else if (rt->no_nvm_upgrade)
+ ret = -EOPNOTSUPP;
else
- ret = sprintf(buf, "%#x\n", rt->auth_status);
+ ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
mutex_unlock(&rt->tb->lock);
@@ -276,7 +255,7 @@ static ssize_t nvm_version_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
else
- ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
+ ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
mutex_unlock(&rt->tb->lock);
return ret;
@@ -288,7 +267,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_retimer *rt = tb_to_retimer(dev);
- return sprintf(buf, "%#x\n", rt->vendor);
+ return sysfs_emit(buf, "%#x\n", rt->vendor);
}
static DEVICE_ATTR_RO(vendor);
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index bda889ff3bda..5185cf3e4d97 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -26,10 +26,68 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */
USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */
+ USB4_SB_OPCODE_READ_LANE_MARGINING_CAP = 0x50434452, /* "RDCP" */
+ USB4_SB_OPCODE_RUN_HW_LANE_MARGINING = 0x474d4852, /* "RHMG" */
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING = 0x474d5352, /* "RSMG" */
+ USB4_SB_OPCODE_READ_SW_MARGIN_ERR = 0x57534452, /* "RDSW" */
};
#define USB4_SB_METADATA 0x09
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
#define USB4_SB_DATA 0x12
+/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */
+#define USB4_MARGIN_CAP_0_MODES_HW BIT(0)
+#define USB4_MARGIN_CAP_0_MODES_SW BIT(1)
+#define USB4_MARGIN_CAP_0_2_LANES BIT(2)
+#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3)
+#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3
+#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0
+#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1
+#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2
+#define USB4_MARGIN_CAP_0_TIME BIT(5)
+#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
+#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6
+#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
+#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13
+#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
+#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
+#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9
+#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
+#define USB4_MARGIN_CAP_1_TIME_LR 0x1
+#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2
+#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11)
+#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11
+#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16)
+#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16
+#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21)
+#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21
+#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26)
+#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+
+/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */
+#define USB4_MARGIN_HW_TIME BIT(3)
+#define USB4_MARGIN_HW_RH BIT(4)
+#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
+#define USB4_MARGIN_HW_BER_SHIFT 5
+
+/* Applicable to all margin values */
+#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
+#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7)
+/* Different lane margin shifts */
+#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8
+#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16
+#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
+
+/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_TIME BIT(3)
+#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
+#define USB4_MARGIN_SW_COUNTER_SHIFT 13
+#define USB4_MARGIN_SW_COUNTER_NOP 0x0
+#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
+#define USB4_MARGIN_SW_COUNTER_START 0x2
+#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 244f8cd38b25..60da5c23ccaf 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -19,8 +19,6 @@
/* Switch NVM support */
-#define NVM_CSS 0x10
-
struct nvm_auth_status {
struct list_head list;
uuid_t uuid;
@@ -102,70 +100,30 @@ static void nvm_clear_auth_status(const struct tb_switch *sw)
static int nvm_validate_and_write(struct tb_switch *sw)
{
- unsigned int image_size, hdr_size;
- const u8 *buf = sw->nvm->buf;
- u16 ds_size;
+ unsigned int image_size;
+ const u8 *buf;
int ret;
- if (!buf)
- return -EINVAL;
-
- image_size = sw->nvm->buf_data_size;
- if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
- return -EINVAL;
-
- /*
- * FARB pointer must point inside the image and must at least
- * contain parts of the digital section we will be reading here.
- */
- hdr_size = (*(u32 *)buf) & 0xffffff;
- if (hdr_size + NVM_DEVID + 2 >= image_size)
- return -EINVAL;
-
- /* Digital section start should be aligned to 4k page */
- if (!IS_ALIGNED(hdr_size, SZ_4K))
- return -EINVAL;
-
- /*
- * Read digital section size and check that it also fits inside
- * the image.
- */
- ds_size = *(u16 *)(buf + hdr_size);
- if (ds_size >= image_size)
- return -EINVAL;
-
- if (!sw->safe_mode) {
- u16 device_id;
+ ret = tb_nvm_validate(sw->nvm);
+ if (ret)
+ return ret;
- /*
- * Make sure the device ID in the image matches the one
- * we read from the switch config space.
- */
- device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
- if (device_id != sw->config.device_id)
- return -EINVAL;
-
- if (sw->generation < 3) {
- /* Write CSS headers first */
- ret = dma_port_flash_write(sw->dma_port,
- DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
- DMA_PORT_CSS_MAX_SIZE);
- if (ret)
- return ret;
- }
+ ret = tb_nvm_write_headers(sw->nvm);
+ if (ret)
+ return ret;
- /* Skip headers in the image */
- buf += hdr_size;
- image_size -= hdr_size;
- }
+ buf = sw->nvm->buf_data_start;
+ image_size = sw->nvm->buf_data_size;
if (tb_switch_is_usb4(sw))
ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
else
ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
- if (!ret)
- sw->nvm->flushed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ sw->nvm->flushed = true;
+ return 0;
}
static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
@@ -300,14 +258,6 @@ static inline bool nvm_upgradeable(struct tb_switch *sw)
return nvm_readable(sw);
}
-static inline int nvm_read(struct tb_switch *sw, unsigned int address,
- void *buf, size_t size)
-{
- if (tb_switch_is_usb4(sw))
- return usb4_switch_nvm_read(sw, address, buf, size);
- return dma_port_flash_read(sw->dma_port, address, buf, size);
-}
-
static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
{
int ret;
@@ -335,8 +285,26 @@ static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
return ret;
}
-static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
+/**
+ * tb_switch_nvm_read() - Read router NVM
+ * @sw: Router whose NVM to read
+ * @address: Start address on the NVM
+ * @buf: Buffer where the read data is copied
+ * @size: Size of the buffer in bytes
+ *
+ * Reads from router NVM and returns the requested data in @buf. Locking
+ * is up to the caller. Returns %0 in success and negative errno in case
+ * of failure.
+ */
+int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@@ -349,7 +317,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
goto out;
}
- ret = nvm_read(sw, offset, val, bytes);
+ ret = tb_switch_nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
@@ -359,8 +327,7 @@ out:
return ret;
}
-static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
- size_t bytes)
+static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
@@ -384,28 +351,20 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
static int tb_switch_nvm_add(struct tb_switch *sw)
{
struct tb_nvm *nvm;
- u32 val;
int ret;
if (!nvm_readable(sw))
return 0;
- /*
- * The NVM format of non-Intel hardware is not known so
- * currently restrict NVM upgrade for Intel hardware. We may
- * relax this in the future when we learn other NVM formats.
- */
- if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
- sw->config.vendor_id != 0x8087) {
- dev_info(&sw->dev,
- "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
- sw->config.vendor_id);
- return 0;
+ nvm = tb_nvm_alloc(&sw->dev);
+ if (IS_ERR(nvm)) {
+ ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
+ goto err_nvm;
}
- nvm = tb_nvm_alloc(&sw->dev);
- if (IS_ERR(nvm))
- return PTR_ERR(nvm);
+ ret = tb_nvm_read_version(nvm);
+ if (ret)
+ goto err_nvm;
/*
* If the switch is in safe-mode the only accessible portion of
@@ -413,31 +372,13 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
* write new functional NVM.
*/
if (!sw->safe_mode) {
- u32 nvm_size, hdr_size;
-
- ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
- if (ret)
- goto err_nvm;
-
- hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
- nvm_size = (SZ_1M << (val & 7)) / 8;
- nvm_size = (nvm_size - hdr_size) / 2;
-
- ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
- if (ret)
- goto err_nvm;
-
- nvm->major = val >> 16;
- nvm->minor = val >> 8;
-
- ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
+ ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
}
if (!sw->no_nvm_upgrade) {
- ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
- tb_switch_nvm_write);
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
}
@@ -446,7 +387,11 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
return 0;
err_nvm:
- tb_nvm_free(nvm);
+ tb_sw_dbg(sw, "NVM upgrade disabled\n");
+ sw->no_nvm_upgrade = true;
+ if (!IS_ERR(nvm))
+ tb_nvm_free(nvm);
+
return ret;
}
@@ -1229,6 +1174,135 @@ int tb_port_update_credits(struct tb_port *port)
return tb_port_do_update_credits(port->dual_link_port);
}
+static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
+{
+ u32 phy;
+ int ret;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (secondary)
+ phy |= LANE_ADP_CS_1_PMS;
+ else
+ phy &= ~LANE_ADP_CS_1_PMS;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_pm_secondary_enable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, true);
+}
+
+static int tb_port_pm_secondary_disable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, false);
+}
+
+/* Called for USB4 or Titan Ridge routers only */
+static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
+{
+ u32 val, mask = 0;
+ bool ret;
+
+ /* Don't enable CLx in case of two single-lane links */
+ if (!port->bonded && port->dual_link_port)
+ return false;
+
+ /* Don't enable CLx in case of inter-domain link */
+ if (port->xdomain)
+ return false;
+
+ if (tb_switch_is_usb4(port->sw)) {
+ if (!usb4_port_clx_supported(port))
+ return false;
+ } else if (!tb_lc_is_clx_supported(port)) {
+ return false;
+ }
+
+ if (clx_mask & TB_CL1) {
+ /* CL0s and CL1 are enabled and supported together */
+ mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
+ }
+ if (clx_mask & TB_CL2)
+ mask |= LANE_ADP_CS_0_CL2_SUPPORT;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
+static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
+{
+ u32 phy, mask;
+ int ret;
+
+ /* CL0s and CL1 are enabled and supported together */
+ if (clx == TB_CL1)
+ mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ else
+ /* For now we support only CL0s and CL1. Not CL2 */
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy |= mask;
+ else
+ phy &= ~mask;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
+{
+ return __tb_port_clx_set(port, clx, false);
+}
+
+static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
+{
+ return __tb_port_clx_set(port, clx, true);
+}
+
+/**
+ * tb_port_is_clx_enabled() - Is given CL state enabled
+ * @port: USB4 port to check
+ * @clx_mask: Mask of CL states to check
+ *
+ * Returns true if any of the given CL states is enabled for @port.
+ */
+bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
+{
+ u32 val, mask = 0;
+ int ret;
+
+ if (!tb_port_clx_supported(port, clx_mask))
+ return false;
+
+ if (clx_mask & TB_CL1)
+ mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ if (clx_mask & TB_CL2)
+ mask |= LANE_ADP_CS_1_CL2_ENABLE;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
@@ -1620,7 +1694,7 @@ static ssize_t authorized_show(struct device *dev,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->authorized);
+ return sysfs_emit(buf, "%u\n", sw->authorized);
}
static int disapprove_switch(struct device *dev, void *not_used)
@@ -1730,7 +1804,7 @@ static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->boot);
+ return sysfs_emit(buf, "%u\n", sw->boot);
}
static DEVICE_ATTR_RO(boot);
@@ -1739,7 +1813,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%#x\n", sw->device);
+ return sysfs_emit(buf, "%#x\n", sw->device);
}
static DEVICE_ATTR_RO(device);
@@ -1748,7 +1822,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+ return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
}
static DEVICE_ATTR_RO(device_name);
@@ -1757,7 +1831,7 @@ generation_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->generation);
+ return sysfs_emit(buf, "%u\n", sw->generation);
}
static DEVICE_ATTR_RO(generation);
@@ -1771,9 +1845,9 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
return restart_syscall();
if (sw->key)
- ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+ ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
else
- ret = sprintf(buf, "\n");
+ ret = sysfs_emit(buf, "\n");
mutex_unlock(&sw->tb->lock);
return ret;
@@ -1818,7 +1892,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+ return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
}
/*
@@ -1833,7 +1907,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%u\n", sw->link_width);
+ return sysfs_emit(buf, "%u\n", sw->link_width);
}
/*
@@ -1850,7 +1924,7 @@ static ssize_t nvm_authenticate_show(struct device *dev,
u32 status;
nvm_get_auth_status(sw, &status);
- return sprintf(buf, "%#x\n", status);
+ return sysfs_emit(buf, "%#x\n", status);
}
static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
@@ -1866,6 +1940,11 @@ static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
goto exit_rpm;
}
+ if (sw->no_nvm_upgrade) {
+ ret = -EOPNOTSUPP;
+ goto exit_unlock;
+ }
+
/* If NVMem devices are not yet added */
if (!sw->nvm) {
ret = -EAGAIN;
@@ -1954,7 +2033,7 @@ static ssize_t nvm_version_show(struct device *dev,
else if (!sw->nvm)
ret = -EAGAIN;
else
- ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+ ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
mutex_unlock(&sw->tb->lock);
@@ -1967,7 +2046,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%#x\n", sw->vendor);
+ return sysfs_emit(buf, "%#x\n", sw->vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -1976,7 +2055,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+ return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
}
static DEVICE_ATTR_RO(vendor_name);
@@ -1985,7 +2064,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_switch *sw = tb_to_switch(dev);
- return sprintf(buf, "%pUb\n", sw->uuid);
+ return sysfs_emit(buf, "%pUb\n", sw->uuid);
}
static DEVICE_ATTR_RO(unique_id);
@@ -2413,6 +2492,7 @@ int tb_switch_configure(struct tb_switch *sw)
* additional capabilities.
*/
sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
@@ -2821,6 +2901,26 @@ static void tb_switch_credits_init(struct tb_switch *sw)
tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
}
+static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ tb_switch_for_each_port(sw, port) {
+ int res;
+
+ if (!port->cap_usb4)
+ continue;
+
+ res = usb4_port_hotplug_enable(port);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
@@ -2890,6 +2990,10 @@ int tb_switch_add(struct tb_switch *sw)
return ret;
}
+ ret = tb_switch_port_hotplug_enable(sw);
+ if (ret)
+ return ret;
+
ret = device_add(&sw->dev);
if (ret) {
dev_err(&sw->dev, "failed to add device: %d\n", ret);
@@ -3361,35 +3465,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
-static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
-{
- u32 phy;
- int ret;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (secondary)
- phy |= LANE_ADP_CS_1_PMS;
- else
- phy &= ~LANE_ADP_CS_1_PMS;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_pm_secondary_enable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, true);
-}
-
-static int tb_port_pm_secondary_disable(struct tb_port *port)
-{
- return __tb_port_pm_secondary_set(port, false);
-}
-
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
@@ -3408,83 +3483,6 @@ static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
return tb_port_pm_secondary_disable(down);
}
-/* Called for USB4 or Titan Ridge routers only */
-static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
-{
- u32 mask, val;
- bool ret;
-
- /* Don't enable CLx in case of two single-lane links */
- if (!port->bonded && port->dual_link_port)
- return false;
-
- /* Don't enable CLx in case of inter-domain link */
- if (port->xdomain)
- return false;
-
- if (tb_switch_is_usb4(port->sw)) {
- if (!usb4_port_clx_supported(port))
- return false;
- } else if (!tb_lc_is_clx_supported(port)) {
- return false;
- }
-
- switch (clx) {
- case TB_CL1:
- /* CL0s and CL1 are enabled and supported together */
- mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
- break;
-
- /* For now we support only CL0s and CL1. Not CL2 */
- case TB_CL2:
- default:
- return false;
- }
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_0, 1);
- if (ret)
- return false;
-
- return !!(val & mask);
-}
-
-static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
-{
- u32 phy, mask;
- int ret;
-
- /* CL0s and CL1 are enabled and supported together */
- if (clx == TB_CL1)
- mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
- else
- /* For now we support only CL0s and CL1. Not CL2 */
- return -EOPNOTSUPP;
-
- ret = tb_port_read(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
- if (ret)
- return ret;
-
- if (enable)
- phy |= mask;
- else
- phy &= ~mask;
-
- return tb_port_write(port, &phy, TB_CFG_PORT,
- port->cap_phy + LANE_ADP_CS_1, 1);
-}
-
-static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, false);
-}
-
-static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
-{
- return __tb_port_clx_set(port, clx, true);
-}
-
static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
struct tb_switch *parent = tb_switch_parent(sw);
@@ -3786,14 +3784,18 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
- bool usb_port1, usb_port3, xhci_port1, xhci_port3;
struct tb_port *port1, *port3;
int ret;
+ if (sw->generation != 3)
+ return 0;
+
port1 = &sw->ports[1];
port3 = &sw->ports[3];
if (tb_switch_is_alpine_ridge(sw)) {
+ bool usb_port1, usb_port3, xhci_port1, xhci_port3;
+
usb_port1 = tb_lc_is_usb_plugged(port1);
usb_port3 = tb_lc_is_usb_plugged(port3);
xhci_port1 = tb_lc_is_xhci_connected(port1);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 9853f6c7e81d..462845804427 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -105,6 +105,32 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
}
}
+static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available discovered\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+}
+
+static void tb_discover_dp_resources(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel))
+ tb_discover_dp_resource(tb, tunnel->dst_port);
+ }
+}
+
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
@@ -174,10 +200,10 @@ static void tb_discover_tunnels(struct tb *tb)
}
}
-static int tb_port_configure_xdomain(struct tb_port *port)
+static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
- return usb4_port_configure_xdomain(port);
+ return usb4_port_configure_xdomain(port, xd);
return tb_lc_configure_xdomain(port);
}
@@ -212,7 +238,7 @@ static void tb_scan_xdomain(struct tb_port *port)
NULL);
if (xd) {
tb_port_at(route, sw)->xdomain = xd;
- tb_port_configure_xdomain(port);
+ tb_port_configure_xdomain(port, xd);
tb_xdomain_add(xd);
}
}
@@ -1416,8 +1442,11 @@ static int tb_start(struct tb *tb)
* ICM firmware upgrade needs running firmware and in native
* mode that is not available so disable firmware upgrade of the
* root switch.
+ *
+ * However, USB4 routers support NVM firmware upgrade if they
+ * implement the necessary router operations.
*/
- tb->root_switch->no_nvm_upgrade = true;
+ tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
/* All USB4 routers support runtime PM */
tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
@@ -1446,6 +1475,8 @@ static int tb_start(struct tb *tb)
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -1516,7 +1547,7 @@ static void tb_restore_children(struct tb_switch *sw)
tb_restore_children(port->remote->sw);
} else if (port->xdomain) {
- tb_port_configure_xdomain(port);
+ tb_port_configure_xdomain(port, port->xdomain);
}
}
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5db76de40cc1..f9786976f5ec 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,11 +23,6 @@
#define NVM_MAX_SIZE SZ_512K
#define NVM_DATA_DWORDS 16
-/* Intel specific NVM offsets */
-#define NVM_DEVID 0x05
-#define NVM_VERSION 0x08
-#define NVM_FLASH_SIZE 0x45
-
/**
* struct tb_nvm - Structure holding NVM information
* @dev: Owner of the NVM
@@ -35,28 +30,35 @@
* @minor: Minor version number of the active NVM portion
* @id: Identifier used with both NVM portions
* @active: Active portion NVMem device
+ * @active_size: Size in bytes of the active NVM
* @non_active: Non-active portion NVMem device
* @buf: Buffer where the NVM image is stored before it is written to
* the actual NVM flash device
+ * @buf_data_start: Where the actual image starts after skipping
+ * possible headers
* @buf_data_size: Number of bytes actually consumed by the new NVM
* image
* @authenticating: The device is authenticating the new NVM
* @flushed: The image has been flushed to the storage area
+ * @vops: Router vendor specific NVM operations (optional)
*
* The user of this structure needs to handle serialization of possible
* concurrent access.
*/
struct tb_nvm {
struct device *dev;
- u8 major;
- u8 minor;
+ u32 major;
+ u32 minor;
int id;
struct nvmem_device *active;
+ size_t active_size;
struct nvmem_device *non_active;
void *buf;
+ void *buf_data_start;
size_t buf_data_size;
bool authenticating;
bool flushed;
+ const struct tb_nvm_vendor_ops *vops;
};
enum tb_nvm_write_ops {
@@ -113,8 +115,8 @@ struct tb_switch_tmu {
enum tb_clx {
TB_CLX_DISABLE,
/* CL0s and CL1 are enabled and supported together */
- TB_CL1,
- TB_CL2,
+ TB_CL1 = BIT(0),
+ TB_CL2 = BIT(1),
};
/**
@@ -279,12 +281,16 @@ struct tb_port {
* @can_offline: Does the port have necessary platform support to moved
* it into offline mode and back
* @offline: The port is currently in offline mode
+ * @margining: Pointer to margining structure if enabled
*/
struct usb4_port {
struct device dev;
struct tb_port *port;
bool can_offline;
bool offline;
+#ifdef CONFIG_USB4_DEBUGFS_MARGINING
+ struct tb_margining *margining;
+#endif
};
/**
@@ -296,6 +302,7 @@ struct usb4_port {
* @device: Device ID of the retimer
* @port: Pointer to the lane 0 adapter
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this retimer
* @auth_status: Status of last NVM authentication
*/
struct tb_retimer {
@@ -306,6 +313,7 @@ struct tb_retimer {
u32 device;
struct tb_port *port;
struct tb_nvm *nvm;
+ bool no_nvm_upgrade;
u32 auth_status;
};
@@ -737,11 +745,13 @@ static inline void tb_domain_put(struct tb *tb)
}
struct tb_nvm *tb_nvm_alloc(struct device *dev);
-int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
+int tb_nvm_read_version(struct tb_nvm *nvm);
+int tb_nvm_validate(struct tb_nvm *nvm);
+int tb_nvm_write_headers(struct tb_nvm *nvm);
+int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read);
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
size_t bytes);
-int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
- nvmem_reg_write_t reg_write);
+int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write);
void tb_nvm_free(struct tb_nvm *nvm);
void tb_nvm_exit(void);
@@ -755,6 +765,8 @@ int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
unsigned int retries, write_block_fn write_next_block,
void *write_block_data);
+int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route);
struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
@@ -1035,6 +1047,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
+bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -1132,6 +1145,13 @@ void tb_xdomain_remove(struct tb_xdomain *xd);
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
+static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
+{
+ return tb_to_switch(xd->dev.parent);
+}
+
+int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
+ size_t size);
int tb_retimer_scan(struct tb_port *port, bool add);
void tb_retimer_remove_all(struct tb_port *port);
@@ -1174,14 +1194,22 @@ int usb4_switch_add_ports(struct tb_switch *sw);
void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
+int usb4_port_hotplug_enable(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
-int usb4_port_configure_xdomain(struct tb_port *port);
+int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
+int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
+int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+ unsigned int ber_level, bool timing, bool right_high,
+ u32 *results);
+int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ bool right_high, u32 counter);
+int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
@@ -1264,6 +1292,8 @@ void tb_debugfs_init(void);
void tb_debugfs_exit(void);
void tb_switch_debugfs_init(struct tb_switch *sw);
void tb_switch_debugfs_remove(struct tb_switch *sw);
+void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
+void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
#else
@@ -1271,6 +1301,8 @@ static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
+static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
+static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 166054110388..86319dca0f8c 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -308,6 +308,7 @@ struct tb_regs_port_header {
#define ADP_CS_5 0x05
#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
#define ADP_CS_5_LCA_SHIFT 22
+#define ADP_CS_5_DHP BIT(31)
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
@@ -324,6 +325,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2
#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
+#define LANE_ADP_CS_0_CL2_SUPPORT BIT(28)
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
@@ -333,6 +335,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
+#define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 3a2e7126db9d..f986854aa207 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1046,6 +1046,26 @@ int usb4_port_unlock(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
+/**
+ * usb4_port_hotplug_enable() - Enables hotplug for a port
+ * @port: USB4 port to operate on
+ *
+ * Enables hot plug events on a given port. This is only intended
+ * to be used on lane, DP-IN, and DP-OUT adapters.
+ */
+int usb4_port_hotplug_enable(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_5_DHP;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -1115,12 +1135,14 @@ static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
+ * @xd: XDomain that is connected to the port
*
- * Marks the USB4 port as being connected to another host. Returns %0 in
- * success and negative errno in failure.
+ * Marks the USB4 port as being connected to another host and updates
+ * the link type. Returns %0 in success and negative errno in failure.
*/
-int usb4_port_configure_xdomain(struct tb_port *port)
+int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
+ xd->link_usb4 = link_is_usb4(port);
return usb4_set_xdomain_configured(port, true);
}
@@ -1384,6 +1406,126 @@ bool usb4_port_clx_supported(struct tb_port *port)
return !!(val & PORT_CS_18_CPS);
}
+/**
+ * usb4_port_margining_caps() - Read USB4 port marginig capabilities
+ * @port: USB4 port
+ * @caps: Array with at least two elements to hold the results
+ *
+ * Reads the USB4 port lane margining capabilities into @caps.
+ */
+int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
+{
+ int ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_DATA, caps, sizeof(*caps) * 2);
+}
+
+/**
+ * usb4_port_hw_margin() - Run hardware lane margining on port
+ * @port: USB4 port
+ * @lanes: Which lanes to run (must match the port capabilities). Can be
+ * %0, %1 or %7.
+ * @ber_level: BER level contour value
+ * @timing: Perform timing margining instead of voltage
+ * @right_high: Use Right/high margin instead of left/low
+ * @results: Array with at least two elements to hold the results
+ *
+ * Runs hardware lane margining on USB4 port and returns the result in
+ * @results.
+ */
+int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
+ unsigned int ber_level, bool timing, bool right_high,
+ u32 *results)
+{
+ u32 val;
+ int ret;
+
+ val = lanes;
+ if (timing)
+ val |= USB4_MARGIN_HW_TIME;
+ if (right_high)
+ val |= USB4_MARGIN_HW_RH;
+ if (ber_level)
+ val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
+ USB4_MARGIN_HW_BER_MASK;
+
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_DATA, results, sizeof(*results) * 2);
+}
+
+/**
+ * usb4_port_sw_margin() - Run software lane margining on port
+ * @port: USB4 port
+ * @lanes: Which lanes to run (must match the port capabilities). Can be
+ * %0, %1 or %7.
+ * @timing: Perform timing margining instead of voltage
+ * @right_high: Use Right/high margin instead of left/low
+ * @counter: What to do with the error counter
+ *
+ * Runs software lane margining on USB4 port. Read back the error
+ * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
+ * success and negative errno otherwise.
+ */
+int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ bool right_high, u32 counter)
+{
+ u32 val;
+ int ret;
+
+ val = lanes;
+ if (timing)
+ val |= USB4_MARGIN_SW_TIME;
+ if (right_high)
+ val |= USB4_MARGIN_SW_RH;
+ val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
+ USB4_MARGIN_SW_COUNTER_MASK;
+
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+}
+
+/**
+ * usb4_port_sw_margin_errors() - Read the software margining error counters
+ * @port: USB4 port
+ * @errors: Error metadata is copied here.
+ *
+ * This reads back the software margining error counters from the port.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
+{
+ int ret;
+
+ ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
+ if (ret)
+ return ret;
+
+ return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_METADATA, errors, sizeof(*errors));
+}
+
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 6b02945624ee..1a30c0a23286 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -53,6 +53,8 @@ static ssize_t link_show(struct device *dev, struct device_attribute *attr,
link = port->sw->link_usb4 ? "usb4" : "tbt";
else if (tb_port_has_remote(port))
link = port->remote->sw->link_usb4 ? "usb4" : "tbt";
+ else if (port->xdomain)
+ link = port->xdomain->link_usb4 ? "usb4" : "tbt";
else
link = "none";
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index c31c0d94d8b3..bbb248a2686f 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -877,7 +877,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
- return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
+ return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
@@ -903,7 +903,7 @@ static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcid);
+ return sysfs_emit(buf, "%u\n", svc->prtcid);
}
static DEVICE_ATTR_RO(prtcid);
@@ -912,7 +912,7 @@ static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcvers);
+ return sysfs_emit(buf, "%u\n", svc->prtcvers);
}
static DEVICE_ATTR_RO(prtcvers);
@@ -921,7 +921,7 @@ static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcrevs);
+ return sysfs_emit(buf, "%u\n", svc->prtcrevs);
}
static DEVICE_ATTR_RO(prtcrevs);
@@ -930,7 +930,7 @@ static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "0x%08x\n", svc->prtcstns);
+ return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
}
static DEVICE_ATTR_RO(prtcstns);
@@ -1131,11 +1131,6 @@ static int populate_properties(struct tb_xdomain *xd,
return 0;
}
-static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
-{
- return tb_to_switch(xd->dev.parent);
-}
-
static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
bool change = false;
@@ -1440,6 +1435,8 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
if (xd->vendor_name && xd->device_name)
dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
xd->device_name);
+
+ tb_xdomain_debugfs_init(xd);
} else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
}
@@ -1664,7 +1661,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->device);
+ return sysfs_emit(buf, "%#x\n", xd->device);
}
static DEVICE_ATTR_RO(device);
@@ -1676,7 +1673,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@@ -1688,7 +1685,7 @@ static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%d\n", xd->remote_max_hopid);
+ return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
}
static DEVICE_ATTR_RO(maxhopid);
@@ -1697,7 +1694,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->vendor);
+ return sysfs_emit(buf, "%#x\n", xd->vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -1709,7 +1706,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@@ -1721,7 +1718,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%pUb\n", xd->remote_uuid);
+ return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
}
static DEVICE_ATTR_RO(unique_id);
@@ -1730,7 +1727,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
+ return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
}
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
@@ -1741,7 +1738,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%u\n", xd->link_width);
+ return sysfs_emit(buf, "%u\n", xd->link_width);
}
static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
@@ -1940,6 +1937,8 @@ static int unregister_service(struct device *dev, void *data)
*/
void tb_xdomain_remove(struct tb_xdomain *xd)
{
+ tb_xdomain_debugfs_remove(xd);
+
stop_handshake(xd);
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 81e7f64c1739..f52266766df9 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -94,7 +94,7 @@ static struct tty_driver *serial_driver;
static unsigned char current_ctl_bits;
static void change_speed(struct tty_struct *tty, struct serial_state *info,
- struct ktermios *old);
+ const struct ktermios *old);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -566,7 +566,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
* the specified baud rate for a serial port.
*/
static void change_speed(struct tty_struct *tty, struct serial_state *info,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct tty_port *port = &info->tport;
int quot = 0, baud_base, baud;
@@ -1169,7 +1169,7 @@ static int rs_ioctl(struct tty_struct *tty,
return 0;
}
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void rs_set_termios(struct tty_struct *tty, const struct ktermios *old_termios)
{
struct serial_state *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 32366caca662..7d49a872de48 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -29,7 +29,6 @@
/* General device driver settings */
-#define HVC_IUCV_MAGIC 0xc9e4c3e5
#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
@@ -131,9 +130,9 @@ static struct iucv_handler hvc_iucv_handler = {
*/
static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
{
- if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
+ if (num > hvc_iucv_devices)
return NULL;
- return hvc_iucv_table[num - HVC_IUCV_MAGIC];
+ return hvc_iucv_table[num];
}
/**
@@ -1072,8 +1071,8 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->is_console = is_console;
/* allocate hvc device */
- priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
- HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
+ priv->hvc = hvc_alloc(id, /* PAGE_SIZE */
+ id, &hvc_iucv_ops, 256);
if (IS_ERR(priv->hvc)) {
rc = PTR_ERR(priv->hvc);
goto out_error_hvc;
@@ -1371,7 +1370,7 @@ static int __init hvc_iucv_init(void)
/* register the first terminal device as console
* (must be done before allocating hvc terminal devices) */
- rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
+ rc = hvc_instantiate(0, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
if (rc) {
pr_err("Registering HVC terminal device as "
"Linux console failed\n");
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 9b7e8246a464..4ba24963685e 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -69,6 +69,7 @@
#include <asm/hvconsole.h>
#include <asm/hvcserver.h>
#include <linux/uaccess.h>
+#include <linux/termios_internal.h>
#include <asm/vio.h>
/*
@@ -839,7 +840,7 @@ static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd)
hvcsd->p_partition_ID = pi->partition_ID;
/* copy the null-term char too */
- strlcpy(hvcsd->p_location_code, pi->location_code,
+ strscpy(hvcsd->p_location_code, pi->location_code,
sizeof(hvcsd->p_location_code));
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index f3c72ab1476c..35b6fddf0341 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -491,7 +491,7 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int);
static unsigned int moxa_write_room(struct tty_struct *);
static void moxa_flush_buffer(struct tty_struct *);
static unsigned int moxa_chars_in_buffer(struct tty_struct *);
-static void moxa_set_termios(struct tty_struct *, struct ktermios *);
+static void moxa_set_termios(struct tty_struct *, const struct ktermios *);
static void moxa_stop(struct tty_struct *);
static void moxa_start(struct tty_struct *);
static void moxa_hangup(struct tty_struct *);
@@ -499,7 +499,7 @@ static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static void moxa_poll(struct timer_list *);
-static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
+static void moxa_set_tty_param(struct tty_struct *, const struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
static void moxa_dtr_rts(struct tty_port *, int);
@@ -1602,7 +1602,7 @@ static int moxa_tiocmset(struct tty_struct *tty,
}
static void moxa_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct moxa_port *ch = tty->driver_data;
@@ -1761,7 +1761,8 @@ static void moxa_poll(struct timer_list *unused)
/******************************************************************************/
-static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios)
+static void moxa_set_tty_param(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
register struct ktermios *ts = &tty->termios;
struct moxa_port *ch = tty->driver_data;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 70b982b2c6b2..2436e0b10f9a 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -398,7 +398,7 @@ static enum mxser_must_hwid mxser_must_get_hwid(unsigned long io)
oldmcr = inb(io + UART_MCR);
outb(0, io + UART_MCR);
mxser_set_must_xon1_value(io, 0x11);
- if ((hwid = inb(io + UART_MCR)) != 0) {
+ if (inb(io + UART_MCR) != 0) {
outb(oldmcr, io + UART_MCR);
return MOXA_OTHER_UART;
}
@@ -571,7 +571,8 @@ static void mxser_handle_cts(struct tty_struct *tty, struct mxser_port *info,
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static void mxser_change_speed(struct tty_struct *tty, struct ktermios *old_termios)
+static void mxser_change_speed(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval;
@@ -1348,7 +1349,8 @@ static void mxser_start(struct tty_struct *tty)
spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void mxser_set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index caa5c14ed57f..5e516f5cac5a 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -63,6 +63,14 @@
static int debug;
module_param(debug, int, 0600);
+/* Module debug bits */
+#define DBG_DUMP BIT(0) /* Data transmission dump. */
+#define DBG_CD_ON BIT(1) /* Always assume CD line on. */
+#define DBG_DATA BIT(2) /* Data transmission details. */
+#define DBG_ERRORS BIT(3) /* Details for fail conditions. */
+#define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */
+#define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */
+
/* Defaults: these are from the specification */
#define T1 10 /* 100mS */
@@ -164,6 +172,9 @@ struct gsm_dlci {
struct net_device *net; /* network interface, if created */
};
+/* Total number of supported devices */
+#define GSM_TTY_MINORS 256
+
/* DLCI 0, 62/63 are special or reserved see gsmtty_open */
#define NUM_DLCI 64
@@ -184,6 +195,11 @@ struct gsm_control {
int error; /* Error if any */
};
+enum gsm_encoding {
+ GSM_BASIC_OPT,
+ GSM_ADV_OPT,
+};
+
enum gsm_mux_state {
GSM_SEARCH,
GSM_START,
@@ -230,7 +246,7 @@ struct gsm_mux {
unsigned int address;
unsigned int count;
bool escape;
- int encoding;
+ enum gsm_encoding encoding;
u8 control;
u8 fcs;
u8 *txframe; /* TX framing buffer */
@@ -248,7 +264,7 @@ struct gsm_mux {
bool constipated; /* Asked by remote to shut up */
bool has_devices; /* Devices were registered */
- spinlock_t tx_lock;
+ struct mutex tx_mutex;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
@@ -256,7 +272,7 @@ struct gsm_mux {
struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
- struct timer_list kick_timer; /* Kick TX queuing on timeout */
+ struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -527,7 +543,7 @@ static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
*/
dev = tty_register_device(gsm_tty_driver, base + i, NULL);
if (IS_ERR(dev)) {
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_info("%s failed to register device minor %u",
__func__, base + i);
for (i--; i >= 1; i--)
@@ -581,8 +597,12 @@ static void gsm_unregister_devices(struct tty_driver *driver,
static void gsm_print_packet(const char *hdr, int addr, int cr,
u8 control, const u8 *data, int dlen)
{
- if (!(debug & 1))
+ if (!(debug & DBG_DUMP))
return;
+ /* Only show user payload frames if debug & DBG_PAYLOAD */
+ if (!(debug & DBG_PAYLOAD) && addr != 0)
+ if ((control & ~PF) == UI || (control & ~PF) == UIH)
+ return;
pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
@@ -680,7 +700,6 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
struct gsm_msg *msg;
u8 *dp;
int ocr;
- unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control);
if (!msg)
@@ -694,7 +713,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
*dp++ = (addr << 2) | (ocr << 1) | EA;
*dp++ = control;
- if (gsm->encoding == 0)
+ if (gsm->encoding == GSM_BASIC_OPT)
*dp++ = EA; /* Length of data = 0 */
*dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
@@ -702,10 +721,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
gsm->tx_bytes += msg->len;
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
gsmld_write_trigger(gsm);
return 0;
@@ -730,7 +749,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
if (msg->addr != addr)
continue;
@@ -738,7 +757,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
list_del(&msg->list);
kfree(msg);
}
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -813,7 +832,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
int len, ret;
- if (gsm->encoding == 0) {
+ if (gsm->encoding == GSM_BASIC_OPT) {
gsm->txframe[0] = GSM0_SOF;
memcpy(gsm->txframe + 1, msg->data, msg->len);
gsm->txframe[msg->len + 1] = GSM0_SOF;
@@ -825,7 +844,7 @@ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
len += 2;
}
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, gsm->txframe, len);
gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
msg->len);
@@ -965,7 +984,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
u8 *fcs = dp + msg->len;
/* Fill in the header */
- if (gsm->encoding == 0) {
+ if (gsm->encoding == GSM_BASIC_OPT) {
if (msg->len < 128)
*--dp = (msg->len << 1) | EA;
else {
@@ -1009,7 +1028,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm);
- mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
+ schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
}
/**
@@ -1024,10 +1043,9 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- unsigned long flags;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
__gsm_data_queue(dlci, msg);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/**
@@ -1039,7 +1057,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
@@ -1099,7 +1117,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -1115,7 +1133,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_lock */
+ /* dlci->skb is locked by tx_mutex */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -1169,7 +1187,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1283,13 +1301,12 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
- unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -1300,7 +1317,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/*
@@ -1309,6 +1326,31 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
/**
+ * gsm_control_command - send a command frame to a control
+ * @gsm: gsm channel
+ * @cmd: the command to use
+ * @data: data to follow encoded info
+ * @dlen: length of data
+ *
+ * Encode up and queue a UI/UIH frame containing our command.
+ */
+static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data,
+ int dlen)
+{
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype);
+
+ if (msg == NULL)
+ return -ENOMEM;
+
+ msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */
+ msg->data[1] = (dlen << 1) | EA;
+ memcpy(msg->data + 2, data, dlen);
+ gsm_data_queue(gsm->dlci[0], msg);
+
+ return 0;
+}
+
+/**
* gsm_control_reply - send a response frame to a control
* @gsm: gsm channel
* @cmd: the command to use
@@ -1410,18 +1452,12 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
unsigned int modem = 0;
struct gsm_dlci *dlci;
int len = clen;
- int slen;
+ int cl = clen;
const u8 *dp = data;
struct tty_struct *tty;
- while (gsm_read_ea(&addr, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- /* Must be at least one byte following the EA */
- len--;
- if (len <= 0)
+ len = gsm_read_ea_val(&addr, data, cl);
+ if (len < 1)
return;
addr >>= 1;
@@ -1430,15 +1466,20 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
dlci = gsm->dlci[addr];
- slen = len;
- while (gsm_read_ea(&modem, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- len--;
+ /* Must be at least one byte following the EA */
+ if ((cl - len) < 1)
+ return;
+
+ dp += len;
+ cl -= len;
+
+ /* get the modem status */
+ len = gsm_read_ea_val(&modem, dp, cl);
+ if (len < 1)
+ return;
+
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, slen - len);
+ gsm_process_modem(tty, dlci, modem, cl);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1614,13 +1655,7 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
- if (msg == NULL)
- return;
- msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
- msg->data[1] = (ctrl->len << 1) | EA;
- memcpy(msg->data + 2, ctrl->data, ctrl->len);
- gsm_data_queue(gsm->dlci[0], msg);
+ gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len);
}
/**
@@ -1740,7 +1775,7 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
del_timer(&dlci->t1);
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
/* Prevent us from sending data before the link is up again */
@@ -1774,7 +1809,7 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
/* This will let a tty open continue */
dlci->state = DLCI_OPEN;
dlci->constipated = false;
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
if (dlci->addr)
@@ -1810,7 +1845,7 @@ static void gsm_dlci_t1(struct timer_list *t)
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else if (!dlci->addr && gsm->control == (DM | PF)) {
- if (debug & 8)
+ if (debug & DBG_ERRORS)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
dlci->mode = DLCI_MODE_ADM;
@@ -1913,11 +1948,10 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
struct tty_port *port = &dlci->port;
struct tty_struct *tty;
unsigned int modem = 0;
- int len = clen;
- int slen = 0;
+ int len;
- if (debug & 16)
- pr_debug("%d bytes for tty\n", len);
+ if (debug & DBG_TTY)
+ pr_debug("%d bytes for tty\n", clen);
switch (dlci->adaption) {
/* Unsupported types */
case 4: /* Packetised interruptible data */
@@ -1925,24 +1959,22 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
case 3: /* Packetised uininterruptible voice/data */
break;
case 2: /* Asynchronous serial with line state in each frame */
- while (gsm_read_ea(&modem, *data++) == 0) {
- len--;
- slen++;
- if (len == 0)
- return;
- }
- len--;
- slen++;
+ len = gsm_read_ea_val(&modem, data, clen);
+ if (len < 1)
+ return;
tty = tty_port_tty_get(port);
if (tty) {
- gsm_process_modem(tty, dlci, modem, slen);
+ gsm_process_modem(tty, dlci, modem, len);
tty_wakeup(tty);
tty_kref_put(tty);
}
+ /* Skip processed modem data */
+ data += len;
+ clen -= len;
fallthrough;
case 1: /* Line state will go via DLCI 0 controls only */
default:
- tty_insert_flip_string(port, data, len);
+ tty_insert_flip_string(port, data, clen);
tty_flip_buffer_push(port);
}
}
@@ -1963,47 +1995,49 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
{
/* See what command is involved */
unsigned int command = 0;
- while (len-- > 0) {
- if (gsm_read_ea(&command, *data++) == 1) {
- int clen = *data++;
- len--;
- /* FIXME: this is properly an EA */
- clen >>= 1;
- /* Malformed command ? */
- if (clen > len)
- return;
- if (command & 1)
- gsm_control_message(dlci->gsm, command,
- data, clen);
- else
- gsm_control_response(dlci->gsm, command,
- data, clen);
- return;
- }
- }
+ unsigned int clen = 0;
+ unsigned int dlen;
+
+ /* read the command */
+ dlen = gsm_read_ea_val(&command, data, len);
+ len -= dlen;
+ data += dlen;
+
+ /* read any control data */
+ dlen = gsm_read_ea_val(&clen, data, len);
+ len -= dlen;
+ data += dlen;
+
+ /* Malformed command? */
+ if (clen > len)
+ return;
+
+ if (command & 1)
+ gsm_control_message(dlci->gsm, command, data, clen);
+ else
+ gsm_control_response(dlci->gsm, command, data, clen);
}
/**
- * gsm_kick_timer - transmit if possible
- * @t: timer contained in our gsm object
+ * gsm_kick_timeout - transmit if possible
+ * @work: work contained in our gsm object
*
* Transmit data from DLCIs if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
-static void gsm_kick_timer(struct timer_list *t)
+static void gsm_kick_timeout(struct work_struct *work)
{
- struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
- unsigned long flags;
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
int sent = 0;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
if (gsm->tx_bytes < TX_THRESH_LO)
sent = gsm_dlci_data_sweep(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
- if (sent && debug & 4)
+ if (sent && debug & DBG_DATA)
pr_info("%s TX queue stalled\n", __func__);
}
@@ -2137,7 +2171,7 @@ static void gsm_queue(struct gsm_mux *gsm)
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
- if (debug & 4)
+ if (debug & DBG_DATA)
pr_debug("BAD FCS %02x\n", gsm->fcs);
return;
}
@@ -2458,7 +2492,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
- del_timer_sync(&gsm->kick_timer);
+ cancel_delayed_work_sync(&gsm->kick_timeout);
del_timer_sync(&gsm->t2_timer);
/* Finish writing to ldisc */
@@ -2501,14 +2535,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
if (dlci == NULL)
return -ENOMEM;
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
- init_waitqueue_head(&gsm->event);
- spin_lock_init(&gsm->control_lock);
- spin_lock_init(&gsm->tx_lock);
-
- if (gsm->encoding == 0)
+ if (gsm->encoding == GSM_BASIC_OPT)
gsm->receive = gsm0_receive;
else
gsm->receive = gsm1_receive;
@@ -2538,6 +2565,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
break;
}
}
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2609,16 +2637,22 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
+ mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
INIT_LIST_HEAD(&gsm->tx_data_list);
+ INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
gsm->t1 = T1;
gsm->t2 = T2;
gsm->n2 = N2;
gsm->ftype = UIH;
gsm->adaption = 1;
- gsm->encoding = 1;
+ gsm->encoding = GSM_ADV_OPT;
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
@@ -2636,6 +2670,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2719,7 +2754,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
gsm->initiator = c->initiator;
gsm->mru = c->mru;
gsm->mtu = c->mtu;
- gsm->encoding = c->encapsulation;
+ gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT;
gsm->adaption = c->adaption;
gsm->n2 = c->n2;
@@ -2763,7 +2798,7 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
return -ENOSPC;
}
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, data, len);
return gsm->tty->ops->write(gsm->tty, data, len);
}
@@ -2791,17 +2826,16 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
static void gsmld_write_task(struct work_struct *work)
{
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
- unsigned long flags;
int i, ret;
/* All outstanding control channel and control messages and one data
* frame is sent.
*/
ret = -ENODEV;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
if (gsm->tty)
ret = gsm_data_kick(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
if (ret >= 0)
for (i = 0; i < NUM_DLCI; i++)
@@ -2850,7 +2884,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
struct gsm_mux *gsm = tty->disc_data;
char flags = TTY_NORMAL;
- if (debug & 4)
+ if (debug & DBG_DATA)
gsm_hex_dump_bytes(__func__, cp, count);
for (; count; count--, cp++) {
@@ -2858,7 +2892,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
flags = *fp++;
switch (flags) {
case TTY_NORMAL:
- gsm->receive(gsm, *cp);
+ if (gsm->receive)
+ gsm->receive(gsm, *cp);
break;
case TTY_OVERRUN:
case TTY_BREAK:
@@ -2942,14 +2977,9 @@ static int gsmld_open(struct tty_struct *tty)
tty->receive_room = 65536;
/* Attach the initial passive connection */
- gsm->encoding = 1;
-
+ gsm->encoding = GSM_ADV_OPT;
gsmld_attach_gsm(tty, gsm);
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
-
return 0;
}
@@ -3012,7 +3042,6 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
int space;
int ret;
@@ -3020,13 +3049,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
return -ENODEV;
ret = -ENOBUFS;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
space = tty_write_room(tty);
if (space >= nr)
ret = tty->ops->write(tty, buf, nr);
else
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
return ret;
}
@@ -3323,14 +3352,13 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
- unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_dlci_modem_output(gsm, dlci, brk);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -3345,7 +3373,7 @@ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
struct gsm_control *ctrl;
int len = 2;
- if (dlci->gsm->encoding != 0)
+ if (dlci->gsm->encoding != GSM_BASIC_OPT)
return 0;
modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
@@ -3374,7 +3402,7 @@ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
/* Send convergence layer type 2 empty data frame. */
gsm_modem_upd_via_data(dlci, brk);
return 0;
- } else if (dlci->gsm->encoding == 0) {
+ } else if (dlci->gsm->encoding == GSM_BASIC_OPT) {
/* Send as MSC control message. */
return gsm_modem_upd_via_msc(dlci, brk);
}
@@ -3391,15 +3419,15 @@ static int gsm_carrier_raised(struct tty_port *port)
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
- if (debug & 2)
+ if (debug & DBG_CD_ON)
return 1;
/*
* Basic mode with control channel in ADM mode may not respond
* to CMD_MSC at all and modem_rx is empty.
*/
- if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
- !dlci->modem_rx)
+ if (gsm->encoding == GSM_BASIC_OPT &&
+ gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx)
return 1;
return dlci->modem_rx & TIOCM_CD;
@@ -3647,7 +3675,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
}
}
-static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void gsmtty_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
@@ -3745,7 +3774,7 @@ static int __init gsm_init(void)
return status;
}
- gsm_tty_driver = tty_alloc_driver(256, TTY_DRIVER_REAL_RAW |
+ gsm_tty_driver = tty_alloc_driver(GSM_TTY_MINORS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
if (IS_ERR(gsm_tty_driver)) {
pr_err("gsm_init: tty allocation failed.\n");
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 94c1ec2dd754..46b09bfb6f3a 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -76,8 +76,6 @@
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define HDLC_MAGIC 0x239e
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -98,7 +96,6 @@
#include <linux/if.h>
#include <linux/bitops.h>
-#include <asm/termios.h>
#include <linux/uaccess.h>
#include "tty.h"
@@ -124,7 +121,6 @@ struct n_hdlc_buf_list {
/**
* struct n_hdlc - per device instance data structure
- * @magic: magic value for structure
* @tbusy: reentrancy flag for tx wakeup code
* @woke_up: tx wakeup needs to be run again as it was called while @tbusy
* @tx_buf_list: list of pending transmit frame buffers
@@ -133,7 +129,6 @@ struct n_hdlc_buf_list {
* @rx_free_buf_list: list unused received frame buffers
*/
struct n_hdlc {
- int magic;
bool tbusy;
bool woke_up;
struct n_hdlc_buf_list tx_buf_list;
@@ -200,10 +195,6 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
- if (n_hdlc->magic != HDLC_MAGIC) {
- pr_warn("n_hdlc: trying to close unopened tty!\n");
- return;
- }
#if defined(TTY_NO_WRITE_SPLIT)
clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
#endif
@@ -386,12 +377,6 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
pr_debug("%s() called count=%d\n", __func__, count);
- /* verify line is using HDLC discipline */
- if (n_hdlc->magic != HDLC_MAGIC) {
- pr_err("line not using HDLC discipline\n");
- return;
- }
-
if (count > maxframe) {
pr_debug("rx count>maxframesize, data discarded\n");
return;
@@ -542,9 +527,6 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
pr_debug("%s() called count=%zd\n", __func__, count);
- if (n_hdlc->magic != HDLC_MAGIC)
- return -EIO;
-
/* verify frame size */
if (count > maxframe) {
pr_debug("%s: truncating user packet from %zu to %d\n",
@@ -609,10 +591,6 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
pr_debug("%s() called %d\n", __func__, cmd);
- /* Verify the status of the device */
- if (n_hdlc->magic != HDLC_MAGIC)
- return -EBADF;
-
switch (cmd) {
case FIONREAD:
/* report count of read data available */
@@ -673,9 +651,6 @@ static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
struct n_hdlc *n_hdlc = tty->disc_data;
__poll_t mask = 0;
- if (n_hdlc->magic != HDLC_MAGIC)
- return 0;
-
/*
* queue the current process into any wait queue that may awaken in the
* future (read and write)
@@ -739,9 +714,6 @@ static struct n_hdlc *n_hdlc_alloc(void)
n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx");
n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx");
- /* Initialize the control block */
- n_hdlc->magic = HDLC_MAGIC;
-
return n_hdlc;
} /* end of n_hdlc_alloc() */
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 3afdd9033a9c..597019690ae6 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1758,7 +1758,7 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
*
* Locking: Caller holds @tty->termios_rwsem
*/
-static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void n_tty_set_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 752dab3356d7..07394fdaf522 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -240,7 +240,7 @@ out:
}
static void pty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
/* See if packet mode change of state. */
if (tty->link && tty->link->ctrl.packet) {
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 7520cc02fd4d..c7d34823f715 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -243,7 +243,7 @@ static void serial21285_shutdown(struct uart_port *port)
static void
serial21285_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, quot, h_lcr, b;
@@ -461,9 +461,6 @@ static int __init serial21285_console_setup(struct console *co, char *options)
int parity = 'n';
int flow = 'n';
- if (machine_is_personal_server())
- baud = 57600;
-
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 8efdc271eb75..fa8ccf204d86 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -755,7 +755,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
static void brcmstb_set_termios(struct uart_port *up,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *p8250 = up_to_u8250p(up);
struct brcmuart_priv *priv = up->private_data;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 2e83e7367441..94fbf0add2ce 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -298,10 +298,9 @@ static void serial8250_backup_timeout(struct timer_list *t)
jiffies + uart_poll_timeout(&up->port) + HZ / 5);
}
-static int univ8250_setup_irq(struct uart_8250_port *up)
+static void univ8250_setup_timer(struct uart_8250_port *up)
{
struct uart_port *port = &up->port;
- int retval = 0;
/*
* The above check will only give an accurate result the first time
@@ -322,10 +321,16 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
*/
if (!port->irq)
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
- else
- retval = serial_link_irq_chain(up);
+}
- return retval;
+static int univ8250_setup_irq(struct uart_8250_port *up)
+{
+ struct uart_port *port = &up->port;
+
+ if (port->irq)
+ return serial_link_irq_chain(up);
+
+ return 0;
}
static void univ8250_release_irq(struct uart_8250_port *up)
@@ -381,6 +386,7 @@ static struct uart_ops univ8250_port_ops;
static const struct uart_8250_ops univ8250_driver_ops = {
.setup_irq = univ8250_setup_irq,
.release_irq = univ8250_release_irq,
+ .setup_timer = univ8250_setup_timer,
};
static struct uart_8250_port serial8250_ports[UART_NR];
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index a8dba4a0a8fb..b85c82616e8c 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -26,9 +26,7 @@ static void __dma_tx_complete(void *param)
dma->tx_running = 0;
- xmit->tail += dma->tx_size;
- xmit->tail &= UART_XMIT_SIZE - 1;
- p->port.icount.tx += dma->tx_size;
+ uart_xmit_advance(&p->port, dma->tx_size);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&p->port);
@@ -107,8 +105,7 @@ int serial8250_tx_dma(struct uart_8250_port *p)
dma_async_issue_pending(dma->txchan);
serial8250_clear_THRI(p);
- if (dma->tx_err)
- dma->tx_err = 0;
+ dma->tx_err = 0;
return 0;
err:
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a604b42e4458..7db51781289e 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -350,7 +350,7 @@ dw8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
}
static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long newrate = tty_termios_baud_rate(termios) * 16;
struct dw8250_data *d = to_dw8250_data(p->private_data);
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index dbe4d44f60d4..75f32f054ebb 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -92,7 +92,8 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud,
serial8250_do_set_divisor(p, baud, quot, quot_frac);
}
-void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old)
+void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios,
+ const struct ktermios *old)
{
p->status &= ~UPSTAT_AUTOCTS;
if (termios->c_cflag & CRTSCTS)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index 055bfdc87985..f13e91f2cace 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -47,7 +47,7 @@ struct dw8250_data {
unsigned int uart_16550_compatible:1;
};
-void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, struct ktermios *old);
+void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 65b6b3cbaff6..e2aa2a1a02dd 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -278,7 +278,7 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
static void fintek_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct fintek_8250 *pdata = port->private_data;
unsigned int baud = tty_termios_baud_rate(termios);
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 4ba43bef9933..44cc755b1a29 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -70,7 +70,7 @@ static inline struct lpss8250 *to_lpss8250(struct dw8250_port_data *data)
}
static void byt_set_termios(struct uart_port *p, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
struct lpss8250 *lpss = to_lpss8250(p->private_data);
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index 737c4c31e8a0..f46ca13ff4aa 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -7,7 +7,6 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
-#include <uapi/linux/serial_core.h>
#define MEN_UART_ID_Z025 0x19
#define MEN_UART_ID_Z057 0x39
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index a2a03acb04ad..2cc78a4bf7a1 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -206,9 +206,8 @@ static void dnv_exit(struct mid8250 *mid)
/*****************************************************************************/
-static void mid8250_set_termios(struct uart_port *p,
- struct ktermios *termios,
- struct ktermios *old)
+static void mid8250_set_termios(struct uart_port *p, struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
struct mid8250 *mid = p->private_data;
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 54051ec7b499..fb1d5ec0940e 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -291,7 +291,7 @@ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
static void
mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
static const unsigned short fraction_L_mapping[] = {
0, 1, 0x5, 0x15, 0x55, 0x57, 0x57, 0x77, 0x7F, 0xFF, 0xFF
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 0dcecbbc3967..41b8c6b27136 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -342,6 +342,9 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
omap8250_update_mdr1(up, priv);
up->port.ops->set_mctrl(&up->port, up->port.mctrl);
+
+ if (up->port.rs485.flags & SER_RS485_ENABLED)
+ serial8250_em485_stop_tx(up);
}
/*
@@ -350,7 +353,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
*/
static void omap_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
@@ -984,9 +987,7 @@ static void omap_8250_dma_tx_complete(void *param)
dma->tx_running = 0;
- xmit->tail += dma->tx_size;
- xmit->tail &= UART_XMIT_SIZE - 1;
- p->port.icount.tx += dma->tx_size;
+ uart_xmit_advance(&p->port, dma->tx_size);
if (priv->delayed_restore) {
priv->delayed_restore = 0;
@@ -1334,6 +1335,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
up.port.rs485_config = serial8250_em485_config;
+ up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = serial8250_em485_start_tx;
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 6f66dc2ebacc..8e9f247590bd 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1232,6 +1232,10 @@ static void pci_oxsemi_tornado_set_mctrl(struct uart_port *port,
serial8250_do_set_mctrl(port, mctrl);
}
+/*
+ * We require EFR features for clock programming, so set UPF_FULL_PROBE
+ * for full probing regardless of CONFIG_SERIAL_8250_16550A_VARIANTS setting.
+ */
static int pci_oxsemi_tornado_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *up, int idx)
@@ -1239,6 +1243,7 @@ static int pci_oxsemi_tornado_setup(struct serial_private *priv,
struct pci_dev *dev = priv->dev;
if (pci_oxsemi_tornado_p(dev)) {
+ up->port.flags |= UPF_FULL_PROBE;
up->port.get_divisor = pci_oxsemi_tornado_get_divisor;
up->port.set_divisor = pci_oxsemi_tornado_set_divisor;
up->port.set_mctrl = pci_oxsemi_tornado_set_mctrl;
@@ -1627,7 +1632,6 @@ static int pci_fintek_init(struct pci_dev *dev)
resource_size_t bar_data[3];
u8 config_base;
struct serial_private *priv = pci_get_drvdata(dev);
- struct uart_8250_port *port;
if (!(pci_resource_flags(dev, 5) & IORESOURCE_IO) ||
!(pci_resource_flags(dev, 4) & IORESOURCE_IO) ||
@@ -1674,13 +1678,7 @@ static int pci_fintek_init(struct pci_dev *dev)
pci_write_config_byte(dev, config_base + 0x06, dev->irq);
- if (priv) {
- /* re-apply RS232/485 mode when
- * pciserial_resume_ports()
- */
- port = serial8250_get_port(priv->line[i]);
- uart_rs485_config(&port->port);
- } else {
+ if (!priv) {
/* First init without port data
* force init to RS232 Mode
*/
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 39b35a61958c..fe8662cd9402 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -600,7 +600,7 @@ EXPORT_SYMBOL_GPL(serial8250_rpm_put);
static int serial8250_em485_init(struct uart_8250_port *p)
{
if (p->em485)
- return 0;
+ goto deassert_rts;
p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
if (!p->em485)
@@ -616,7 +616,9 @@ static int serial8250_em485_init(struct uart_8250_port *p)
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
- p->rs485_stop_tx(p);
+deassert_rts:
+ if (p->em485->tx_stopped)
+ p->rs485_stop_tx(p);
return 0;
}
@@ -752,6 +754,14 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
serial8250_rpm_put(p);
}
+static void serial8250_clear_IER(struct uart_8250_port *up)
+{
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+}
+
#ifdef CONFIG_SERIAL_8250_RSA
/*
* Attempts to turn on the RSA FIFO. Returns zero on failure.
@@ -1021,7 +1031,8 @@ static void autoconfig_16550a(struct uart_8250_port *up)
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
- if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS))
+ if (!IS_ENABLED(CONFIG_SERIAL_8250_16550A_VARIANTS) &&
+ !(up->port.flags & UPF_FULL_PROBE))
return;
/*
@@ -1133,7 +1144,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
* internal UARTs.
* We're going to explicitly set the UUE bit to 0 before
* trying to write and read a 1 just to make sure it's not
- * already a 1 and maybe locked there before we even start start.
+ * already a 1 and maybe locked there before we even start.
*/
iersave = serial_in(up, UART_IER);
serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
@@ -1329,10 +1340,7 @@ static void autoconfig(struct uart_8250_port *up)
serial8250_out_MCR(up, save_mcr);
serial8250_clear_fifos(up);
serial_in(up, UART_RX);
- if (up->capabilities & UART_CAP_UUE)
- serial_out(up, UART_IER, UART_IER_UUE);
- else
- serial_out(up, UART_IER, 0);
+ serial8250_clear_IER(up);
out_unlock:
spin_unlock_irqrestore(&port->lock, flags);
@@ -2042,6 +2050,9 @@ EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ return;
+
if (port->set_mctrl)
port->set_mctrl(port, mctrl);
else
@@ -2142,10 +2153,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
+ serial8250_clear_IER(up);
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
@@ -2294,6 +2302,10 @@ int serial8250_do_startup(struct uart_port *port)
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
+ retval = up->ops->setup_irq(up);
+ if (retval)
+ goto out;
+
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
@@ -2336,9 +2348,7 @@ int serial8250_do_startup(struct uart_port *port)
}
}
- retval = up->ops->setup_irq(up);
- if (retval)
- goto out;
+ up->ops->setup_timer(up);
/*
* Now, initialize the UART
@@ -2651,7 +2661,7 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int tolerance = port->uartclk / 100;
unsigned int min;
@@ -2737,7 +2747,7 @@ EXPORT_SYMBOL_GPL(serial8250_update_uartclk);
void
serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char cval;
@@ -2875,7 +2885,7 @@ EXPORT_SYMBOL(serial8250_do_set_termios);
static void
serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
if (port->set_termios)
port->set_termios(port, termios, old);
@@ -3187,9 +3197,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up);
- if (port->rs485.flags & SER_RS485_ENABLED)
- uart_rs485_config(port);
-
/* if access method is AU, it is a 16550 with a quirk */
if (port->type == PORT_16550A && port->iotype == UPIO_AU)
up->bugs |= UART_BUG_NOMSR;
@@ -3314,8 +3321,13 @@ static void serial8250_console_restore(struct uart_8250_port *up)
unsigned int baud, quot, frac = 0;
termios.c_cflag = port->cons->cflag;
- if (port->state->port.tty && termios.c_cflag == 0)
+ termios.c_ispeed = port->cons->ispeed;
+ termios.c_ospeed = port->cons->ospeed;
+ if (port->state->port.tty && termios.c_cflag == 0) {
termios.c_cflag = port->state->port.tty->termios.c_cflag;
+ termios.c_ispeed = port->state->port.tty->termios.c_ispeed;
+ termios.c_ospeed = port->state->port.tty->termios.c_ospeed;
+ }
baud = serial8250_get_baud_rate(port, &termios, NULL);
quot = serial8250_get_divisor(port, baud, &frac);
@@ -3383,11 +3395,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
* First save the IER then disable the interrupts
*/
ier = serial_port_in(port, UART_IER);
-
- if (up->capabilities & UART_CAP_UUE)
- serial_port_out(port, UART_IER, UART_IER_UUE);
- else
- serial_port_out(port, UART_IER, 0);
+ serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 877173907c53..13cdd9def087 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -127,6 +127,7 @@ config SERIAL_SB1250_DUART_CONSOLE
config SERIAL_ATMEL
bool "AT91 on-chip serial port support"
+ depends on COMMON_CLK
depends on ARCH_AT91 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
@@ -427,7 +428,7 @@ config SERIAL_PXA
config SERIAL_PXA_NON8250
bool
- depends on !SERIAL_8250
+ depends on !SERIAL_8250 || COMPILE_TEST
config SERIAL_PXA_CONSOLE
bool "Console on PXA serial port (DEPRECATED)"
@@ -1083,8 +1084,8 @@ config SERIAL_TIMBERDALE
config SERIAL_BCM63XX
tristate "Broadcom BCM63xx/BCM33xx UART support"
select SERIAL_CORE
- depends on ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
- default ARCH_BCM4908 || ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
+ depends on ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ default ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC
help
This enables the driver for the onchip UART core found on
the following chipsets:
@@ -1325,7 +1326,7 @@ config SERIAL_FSL_LPUART
config SERIAL_FSL_LPUART_CONSOLE
bool "Console on Freescale lpuart serial port"
- depends on SERIAL_FSL_LPUART=y
+ depends on SERIAL_FSL_LPUART
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index cb791c5149a3..c2d154d78e54 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -9,6 +9,7 @@
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
*/
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -48,7 +49,6 @@
#define ALTERA_JTAGUART_CONTROL_WI_MSK 0x00000200
#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
-#define ALTERA_JTAGUART_CONTROL_WSPACE_OFF 16
/*
* Local per-uart structure.
@@ -59,10 +59,19 @@ struct altera_jtaguart {
unsigned long imr; /* Local IMR mirror */
};
+static unsigned int altera_jtaguart_tx_space(struct uart_port *port, u32 *ctlp)
+{
+ u32 ctl = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG);
+
+ if (ctlp)
+ *ctlp = ctl;
+
+ return FIELD_GET(ALTERA_JTAGUART_CONTROL_WSPACE_MSK, ctl);
+}
+
static unsigned int altera_jtaguart_tx_empty(struct uart_port *port)
{
- return (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) ? TIOCSER_TEMT : 0;
+ return altera_jtaguart_tx_space(port, NULL) ? TIOCSER_TEMT : 0;
}
static unsigned int altera_jtaguart_get_mctrl(struct uart_port *port)
@@ -106,8 +115,8 @@ static void altera_jtaguart_break_ctl(struct uart_port *port, int break_state)
}
static void altera_jtaguart_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
/* Just copy the old termios settings back */
if (old)
@@ -150,9 +159,7 @@ static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
pending = uart_circ_chars_pending(xmit);
if (pending > 0) {
- count = (readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) >>
- ALTERA_JTAGUART_CONTROL_WSPACE_OFF;
+ count = altera_jtaguart_tx_space(port, NULL);
if (count > pending)
count = pending;
if (count > 0) {
@@ -298,17 +305,17 @@ static struct altera_jtaguart altera_jtaguart_ports[ALTERA_JTAGUART_MAXPORTS];
#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c)
{
- unsigned long status;
unsigned long flags;
+ u32 status;
spin_lock_irqsave(&port->lock, flags);
- while (((status = readl(port->membase + ALTERA_JTAGUART_CONTROL_REG)) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ while (!altera_jtaguart_tx_space(port, &status)) {
+ spin_unlock_irqrestore(&port->lock, flags);
+
if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0) {
- spin_unlock_irqrestore(&port->lock, flags);
return; /* no connection activity */
}
- spin_unlock_irqrestore(&port->lock, flags);
+
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
}
@@ -321,8 +328,7 @@ static void altera_jtaguart_console_putc(struct uart_port *port, unsigned char c
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- while ((readl(port->membase + ALTERA_JTAGUART_CONTROL_REG) &
- ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
+ while (!altera_jtaguart_tx_space(port, NULL)) {
spin_unlock_irqrestore(&port->lock, flags);
cpu_relax();
spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 8b749ed557c6..82f2790de28d 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -175,7 +175,7 @@ static void altera_uart_break_ctl(struct uart_port *port, int break_state)
static void altera_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
@@ -199,9 +199,8 @@ static void altera_uart_set_termios(struct uart_port *port,
*/
}
-static void altera_uart_rx_chars(struct altera_uart *pp)
+static void altera_uart_rx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
unsigned char ch, flag;
unsigned short status;
@@ -246,9 +245,8 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
tty_flip_buffer_push(&port->state->port);
}
-static void altera_uart_tx_chars(struct altera_uart *pp)
+static void altera_uart_tx_chars(struct uart_port *port)
{
- struct uart_port *port = &pp->port;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
@@ -272,10 +270,8 @@ static void altera_uart_tx_chars(struct altera_uart *pp)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (xmit->head == xmit->tail) {
- pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK;
- altera_uart_update_ctrl_reg(pp);
- }
+ if (uart_circ_empty(xmit))
+ altera_uart_stop_tx(port);
}
static irqreturn_t altera_uart_interrupt(int irq, void *data)
@@ -288,9 +284,9 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
spin_lock(&port->lock);
if (isr & ALTERA_UART_STATUS_RRDY_MSK)
- altera_uart_rx_chars(pp);
+ altera_uart_rx_chars(port);
if (isr & ALTERA_UART_STATUS_TRDY_MSK)
- altera_uart_tx_chars(pp);
+ altera_uart_tx_chars(port);
spin_unlock(&port->lock);
return IRQ_RETVAL(isr);
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index fae0b581ff42..af27fb8ec145 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -370,7 +370,7 @@ static void pl010_shutdown(struct uart_port *port)
static void
pl010_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int lcr_h, old_cr;
unsigned long flags;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 15f0e4d88c5a..5cdced39eafd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2030,7 +2030,7 @@ pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2162,7 +2162,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
static void
sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
@@ -2777,6 +2777,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
int portnr, ret;
+ u32 val;
portnr = pl011_find_free_port();
if (portnr < 0)
@@ -2801,6 +2802,21 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.rs485_supported = pl011_rs485_supported;
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
+ if (device_property_read_u32(&dev->dev, "reg-io-width", &val) == 0) {
+ switch (val) {
+ case 1:
+ uap->port.iotype = UPIO_MEM;
+ break;
+ case 4:
+ uap->port.iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&dev->dev, "unsupported reg-io-width (%d)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
return ret;
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 9ef82d870ff2..450f4edfda0f 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -228,7 +228,7 @@ static void apbuart_shutdown(struct uart_port *port)
}
static void apbuart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios, const struct ktermios *old)
{
unsigned int cr;
unsigned long flags;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 32caeac12985..925484a42c82 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -283,7 +283,7 @@ static void ar933x_uart_get_scale_step(unsigned int clk,
static void ar933x_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct ar933x_uart_port *up =
container_of(port, struct ar933x_uart_port, port);
@@ -583,6 +583,13 @@ static const struct uart_ops ar933x_uart_ops = {
static int ar933x_config_rs485(struct uart_port *port, struct ktermios *termios,
struct serial_rs485 *rs485conf)
{
+ struct ar933x_uart_port *up =
+ container_of(port, struct ar933x_uart_port, port);
+
+ if (port->rs485.flags & SER_RS485_ENABLED)
+ gpiod_set_value(up->rts_gpiod,
+ !!(rs485conf->flags & SER_RS485_RTS_AFTER_SEND));
+
return 0;
}
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 2a09e92ef9ed..2a65ea2660e1 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -351,7 +351,7 @@ static void arc_serial_shutdown(struct uart_port *port)
static void
arc_serial_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct arc_uart_port *uart = to_arc_port(port);
unsigned int baud, uartl, uarth, hw_val;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 30ba9eef7b39..bd07f79a2df9 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
@@ -110,6 +111,7 @@ struct atmel_uart_char {
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
+ struct clk *gclk; /* uart generic clock */
int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
@@ -150,6 +152,7 @@ struct atmel_uart_port {
u32 rts_low;
bool ms_irq_enabled;
u32 rtor; /* address of receiver timeout register if it exists */
+ bool is_usart;
bool has_frac_baudrate;
bool has_hw_timer;
struct timer_list uart_timer;
@@ -228,6 +231,11 @@ static inline int atmel_uart_is_half_duplex(struct uart_port *port)
(port->iso7816.flags & SER_ISO7816_ENABLED);
}
+static inline int atmel_error_rate(int desired_value, int actual_value)
+{
+ return 100 - (desired_value * 100) / actual_value;
+}
+
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_pdc_rx(struct uart_port *port)
{
@@ -294,9 +302,6 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (rs485conf->flags & SER_RS485_RX_DURING_TX)
@@ -306,6 +311,7 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -1827,6 +1833,7 @@ static void atmel_get_ip_name(struct uart_port *port)
*/
atmel_port->has_frac_baudrate = false;
atmel_port->has_hw_timer = false;
+ atmel_port->is_usart = false;
if (name == new_uart) {
dev_dbg(port->dev, "Uart with hw timer");
@@ -1836,6 +1843,7 @@ static void atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "Usart\n");
atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
+ atmel_port->is_usart = true;
atmel_port->rtor = ATMEL_US_RTOR;
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
@@ -1865,6 +1873,7 @@ static void atmel_get_ip_name(struct uart_port *port)
dev_dbg(port->dev, "This version is usart\n");
atmel_port->has_frac_baudrate = true;
atmel_port->has_hw_timer = true;
+ atmel_port->is_usart = true;
atmel_port->rtor = ATMEL_US_RTOR;
break;
case 0x203:
@@ -2115,6 +2124,8 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
* This is called on uart_close() or a suspend event.
*/
clk_disable_unprepare(atmel_port->clk);
+ if (__clk_is_enabled(atmel_port->gclk))
+ clk_disable_unprepare(atmel_port->gclk);
break;
default:
dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
@@ -2124,19 +2135,25 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
/*
* Change the port parameters
*/
-static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void atmel_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned long flags;
- unsigned int old_mode, mode, imr, quot, baud, div, cd, fp = 0;
+ unsigned int old_mode, mode, imr, quot, div, cd, fp = 0;
+ unsigned int baud, actual_baud, gclk_rate;
+ int ret;
/* save the current mode register */
mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
/* reset the mode, clock divisor, parity, stop bits and data size */
- mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
- ATMEL_US_PAR | ATMEL_US_USMODE);
+ if (atmel_port->is_usart)
+ mode &= ~(ATMEL_US_NBSTOP | ATMEL_US_PAR | ATMEL_US_CHRL |
+ ATMEL_US_USCLKS | ATMEL_US_USMODE);
+ else
+ mode &= ~(ATMEL_UA_BRSRCCK | ATMEL_US_PAR | ATMEL_UA_FILTER);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
@@ -2284,10 +2301,60 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
cd = uart_get_divisor(port, baud);
}
- if (cd > 65535) { /* BRGR is 16-bit, so switch to slower clock */
+ /*
+ * If the current value of the Clock Divisor surpasses the 16 bit
+ * ATMEL_US_CD mask and the IP is USART, switch to the Peripheral
+ * Clock implicitly divided by 8.
+ * If the IP is UART however, keep the highest possible value for
+ * the CD and avoid needless division of CD, since UART IP's do not
+ * support implicit division of the Peripheral Clock.
+ */
+ if (atmel_port->is_usart && cd > ATMEL_US_CD) {
cd /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
+ } else {
+ cd = min_t(unsigned int, cd, ATMEL_US_CD);
+ }
+
+ /*
+ * If there is no Fractional Part, there is a high chance that
+ * we may be able to generate a baudrate closer to the desired one
+ * if we use the GCLK as the clock source driving the baudrate
+ * generator.
+ */
+ if (!atmel_port->has_frac_baudrate) {
+ if (__clk_is_enabled(atmel_port->gclk))
+ clk_disable_unprepare(atmel_port->gclk);
+ gclk_rate = clk_round_rate(atmel_port->gclk, 16 * baud);
+ actual_baud = clk_get_rate(atmel_port->clk) / (16 * cd);
+ if (gclk_rate && abs(atmel_error_rate(baud, actual_baud)) >
+ abs(atmel_error_rate(baud, gclk_rate / 16))) {
+ clk_set_rate(atmel_port->gclk, 16 * baud);
+ ret = clk_prepare_enable(atmel_port->gclk);
+ if (ret)
+ goto gclk_fail;
+
+ if (atmel_port->is_usart) {
+ mode &= ~ATMEL_US_USCLKS;
+ mode |= ATMEL_US_USCLKS_GCLK;
+ } else {
+ mode |= ATMEL_UA_BRSRCCK;
+ }
+
+ /*
+ * Set the Clock Divisor for GCLK to 1.
+ * Since we were able to generate the smallest
+ * multiple of the desired baudrate times 16,
+ * then we surely can generate a bigger multiple
+ * with the exact error rate for an equally increased
+ * CD. Thus no need to take into account
+ * a higher value for CD.
+ */
+ cd = 1;
+ }
}
+
+gclk_fail:
quot = cd | fp << ATMEL_US_FP_OFFSET;
if (!(port->iso7816.flags & SER_ISO7816_ENABLED))
@@ -2883,6 +2950,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err;
+ atmel_port->gclk = devm_clk_get_optional(&pdev->dev, "gclk");
+ if (IS_ERR(atmel_port->gclk)) {
+ ret = PTR_ERR(atmel_port->gclk);
+ goto err_clk_disable_unprepare;
+ }
+
ret = atmel_init_port(atmel_port, pdev);
if (ret)
goto err_clk_disable_unprepare;
diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index 0d8a0f9cc5c3..87f8f7996307 100644
--- a/drivers/tty/serial/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
@@ -9,6 +9,8 @@
* Based on AT91RM9200 datasheet revision E.
*/
+#include <linux/bitfield.h>
+
#ifndef ATMEL_SERIAL_H
#define ATMEL_SERIAL_H
@@ -39,39 +41,42 @@
#define ATMEL_US_MR 0x04 /* Mode Register */
#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USMODE_NORMAL FIELD_PREP(ATMEL_US_USMODE, 0)
+#define ATMEL_US_USMODE_RS485 FIELD_PREP(ATMEL_US_USMODE, 1)
+#define ATMEL_US_USMODE_HWHS FIELD_PREP(ATMEL_US_USMODE, 2)
+#define ATMEL_US_USMODE_MODEM FIELD_PREP(ATMEL_US_USMODE, 3)
+#define ATMEL_US_USMODE_ISO7816_T0 FIELD_PREP(ATMEL_US_USMODE, 4)
+#define ATMEL_US_USMODE_ISO7816_T1 FIELD_PREP(ATMEL_US_USMODE, 6)
+#define ATMEL_US_USMODE_IRDA FIELD_PREP(ATMEL_US_USMODE, 8)
#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_USCLKS_MCK FIELD_PREP(ATMEL_US_USCLKS, 0)
+#define ATMEL_US_USCLKS_MCK_DIV8 FIELD_PREP(ATMEL_US_USCLKS, 1)
+#define ATMEL_US_USCLKS_GCLK FIELD_PREP(ATMEL_US_USCLKS, 2)
+#define ATMEL_US_USCLKS_SCK FIELD_PREP(ATMEL_US_USCLKS, 3)
+#define ATMEL_UA_FILTER BIT(4)
#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_CHRL_5 FIELD_PREP(ATMEL_US_CHRL, 0)
+#define ATMEL_US_CHRL_6 FIELD_PREP(ATMEL_US_CHRL, 1)
+#define ATMEL_US_CHRL_7 FIELD_PREP(ATMEL_US_CHRL, 2)
+#define ATMEL_US_CHRL_8 FIELD_PREP(ATMEL_US_CHRL, 3)
#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_PAR_EVEN FIELD_PREP(ATMEL_US_PAR, 0)
+#define ATMEL_US_PAR_ODD FIELD_PREP(ATMEL_US_PAR, 1)
+#define ATMEL_US_PAR_SPACE FIELD_PREP(ATMEL_US_PAR, 2)
+#define ATMEL_US_PAR_MARK FIELD_PREP(ATMEL_US_PAR, 3)
+#define ATMEL_US_PAR_NONE FIELD_PREP(ATMEL_US_PAR, 4)
+#define ATMEL_US_PAR_MULTI_DROP FIELD_PREP(ATMEL_US_PAR, 6)
#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_NBSTOP_1 FIELD_PREP(ATMEL_US_NBSTOP, 0)
+#define ATMEL_US_NBSTOP_1_5 FIELD_PREP(ATMEL_US_NBSTOP, 1)
+#define ATMEL_US_NBSTOP_2 FIELD_PREP(ATMEL_US_NBSTOP, 2)
+#define ATMEL_UA_BRSRCCK BIT(12) /* Clock Selection for UART */
#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_CHMODE_NORMAL FIELD_PREP(ATMEL_US_CHMODE, 0)
+#define ATMEL_US_CHMODE_ECHO FIELD_PREP(ATMEL_US_CHMODE, 1)
+#define ATMEL_US_CHMODE_LOC_LOOP FIELD_PREP(ATMEL_US_CHMODE, 2)
+#define ATMEL_US_CHMODE_REM_LOOP FIELD_PREP(ATMEL_US_CHMODE, 3)
#define ATMEL_US_MSBF BIT(16) /* Bit Order */
#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
@@ -79,7 +84,7 @@
#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
#define ATMEL_US_MAX_ITER_MASK GENMASK(26, 24) /* Max Iterations */
-#define ATMEL_US_MAX_ITER(n) (((n) << 24) & ATMEL_US_MAX_ITER_MASK)
+#define ATMEL_US_MAX_ITER(n) FIELD_PREP(ATMEL_US_MAX_ITER_MASK, (n))
#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
@@ -131,19 +136,19 @@
#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
-#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
-#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
+#define ATMEL_US_TXRDYM(data) FIELD_PREP(GENMASK(1, 0), (data)) /* TX Ready Mode */
+#define ATMEL_US_RXRDYM(data) FIELD_PREP(GENMASK(5, 4), (data)) /* RX Ready Mode */
#define ATMEL_US_ONE_DATA 0x0
#define ATMEL_US_TWO_DATA 0x1
#define ATMEL_US_FOUR_DATA 0x2
#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
-#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
-#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
-#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
+#define ATMEL_US_TXFTHRES(thr) FIELD_PREP(GENMASK(13, 8), (thr)) /* TX FIFO Threshold */
+#define ATMEL_US_RXFTHRES(thr) FIELD_PREP(GENMASK(21, 16), (thr)) /* RX FIFO Threshold */
+#define ATMEL_US_RXFTHRES2(thr) FIELD_PREP(GENMASK(29, 24), (thr)) /* RX FIFO Threshold2 */
#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
-#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
-#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
+#define ATMEL_US_TXFL(reg) FIELD_GET(GENMASK(5, 0), (reg)) /* TX FIFO Level */
+#define ATMEL_US_RXFL(reg) FIELD_GET(GENMASK(21, 16), (reg)) /* RX FIFO Level */
#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 53b43174aa40..5d9737c2d1f2 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -492,9 +492,8 @@ static void bcm_uart_shutdown(struct uart_port *port)
/*
* serial core request to change current uart setting
*/
-static void bcm_uart_set_termios(struct uart_port *port,
- struct ktermios *new,
- struct ktermios *old)
+static void bcm_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
unsigned int ctl, baud, quot, ier;
unsigned long flags;
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b9b66ad31a08..404b43a5ae33 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -251,7 +251,7 @@ static void uart_clps711x_shutdown(struct uart_port *port)
static void uart_clps711x_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
u32 ubrlcr;
unsigned int baud, quot;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 8c582779cf22..0577618e78c0 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -87,7 +87,6 @@ struct uart_cpm_port {
struct gpio_desc *gpios[NUM_GPIOS];
};
-extern int cpm_uart_nr;
extern struct uart_cpm_port cpm_uart_ports[UART_NR];
/* these are located in their respective files */
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index db07d6a5d764..b4369ed45ae2 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -484,12 +484,11 @@ static void cpm_uart_shutdown(struct uart_port *port)
static void cpm_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
int baud;
unsigned long flags;
u16 cval, scval, prev_mode;
- int bits, sbits;
struct uart_cpm_port *pinfo =
container_of(port, struct uart_cpm_port, port);
smc_t __iomem *smcp = pinfo->smcp;
@@ -515,28 +514,17 @@ static void cpm_uart_set_termios(struct uart_port *port,
if (maxidl > 0x10)
maxidl = 0x10;
- /* Character length programmed into the mode register is the
- * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
- * 1 or 2 stop bits, minus 1.
- * The value 'bits' counts this for us.
- */
cval = 0;
scval = 0;
- /* byte size */
- bits = tty_get_char_size(termios->c_cflag);
- sbits = bits - 5;
-
if (termios->c_cflag & CSTOPB) {
cval |= SMCMR_SL; /* Two stops */
scval |= SCU_PSMR_SL;
- bits++;
}
if (termios->c_cflag & PARENB) {
cval |= SMCMR_PEN;
scval |= SCU_PSMR_PEN;
- bits++;
if (!(termios->c_cflag & PARODD)) {
cval |= SMCMR_PM_EVEN;
scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP);
@@ -580,12 +568,9 @@ static void cpm_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- /* Start bit has not been added (so don't, because we would just
- * subtract it later), and we need to add one for the number of
- * stops bits (there is always at least one).
- */
- bits++;
if (IS_SMC(pinfo)) {
+ unsigned int bits = tty_get_frame_size(termios->c_cflag);
+
/*
* MRBLR can be changed while an SMC/SCC is operating only
* if it is done in a single bus cycle with one 16-bit move
@@ -604,13 +589,17 @@ static void cpm_uart_set_termios(struct uart_port *port,
*/
prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN);
/* Output in *one* operation, so we don't interrupt RX/TX if they
- * were already enabled. */
- out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval |
- SMCMR_SM_UART | prev_mode);
+ * were already enabled.
+ * Character length programmed into the register is frame bits minus 1.
+ */
+ out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits - 1) | cval |
+ SMCMR_SM_UART | prev_mode);
} else {
+ unsigned int bits = tty_get_char_size(termios->c_cflag);
+
out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
out_be16(&pinfo->sccup->scc_maxidl, maxidl);
- out_be16(&sccp->scc_psmr, (sbits << 12) | scval);
+ out_be16(&sccp->scc_psmr, (UART_LCR_WLEN(bits) << 12) | scval);
}
if (pinfo->clk)
@@ -1214,12 +1203,6 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize;
spin_lock_init(&pinfo->port.lock);
- pinfo->port.irq = irq_of_parse_and_map(np, 0);
- if (pinfo->port.irq == NO_IRQ) {
- ret = -EINVAL;
- goto out_pram;
- }
-
for (i = 0; i < NUM_GPIOS; i++) {
struct gpio_desc *gpiod;
@@ -1229,7 +1212,7 @@ static int cpm_uart_init_port(struct device_node *np,
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
- goto out_irq;
+ goto out_pram;
}
if (gpiod) {
@@ -1255,8 +1238,6 @@ static int cpm_uart_init_port(struct device_node *np,
return cpm_uart_request_port(&pinfo->port);
-out_irq:
- irq_dispose_mapping(pinfo->port.irq);
out_pram:
cpm_uart_unmap_pram(pinfo, pram);
out_mem:
@@ -1436,11 +1417,17 @@ static int cpm_uart_probe(struct platform_device *ofdev)
/* initialize the device pointer for the port */
pinfo->port.dev = &ofdev->dev;
+ pinfo->port.irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!pinfo->port.irq)
+ return -EINVAL;
+
ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo);
- if (ret)
- return ret;
+ if (!ret)
+ return uart_add_one_port(&cpm_reg, &pinfo->port);
+
+ irq_dispose_mapping(pinfo->port.irq);
- return uart_add_one_port(&cpm_reg, &pinfo->port);
+ return ret;
}
static int cpm_uart_remove(struct platform_device *ofdev)
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index af951e6a2ef4..0c0a62346f23 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -287,7 +287,7 @@ static void digicolor_uart_shutdown(struct uart_port *port)
static void digicolor_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, divisor;
u8 config = 0;
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 2e21acf39720..829b452daee9 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -559,7 +559,7 @@ static void dz_reset(struct dz_port *dport)
}
static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct dz_port *dport = to_dport(uport);
unsigned long flags;
@@ -592,9 +592,12 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
bflag = dz_encode_baud_rate(baud);
- if (bflag < 0) { /* Try to keep unchanged. */
- baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
- bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) {
+ if (old_termios) {
+ /* Keep unchanged. */
+ baud = tty_termios_baud_rate(old_termios);
+ bflag = dz_encode_baud_rate(baud);
+ }
if (bflag < 0) { /* Resort to 9600. */
baud = 9600;
bflag = DZ_B9600;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 88d08ba1ca83..a5f380584cda 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -67,7 +67,7 @@ static void __init earlycon_init(struct earlycon_device *device,
if (*s)
earlycon->index = simple_strtoul(s, NULL, 10);
len = s - name;
- strlcpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
+ strscpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
earlycon->data = &early_console_dev;
}
@@ -123,7 +123,7 @@ static int __init parse_options(struct earlycon_device *device, char *options)
device->baud = simple_strtoul(options, NULL, 0);
length = min(strcspn(options, " ") + 1,
(size_t)(sizeof(device->options)));
- strlcpy(device->options, options, length);
+ strscpy(device->options, options, length);
}
return 0;
@@ -304,7 +304,7 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
if (options) {
early_console_dev.baud = simple_strtoul(options, NULL, 0);
- strlcpy(early_console_dev.options, options,
+ strscpy(early_console_dev.options, options,
sizeof(early_console_dev.options));
}
earlycon_init(&early_console_dev, match->name);
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 98bb0c315e13..84e8153e5420 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -401,7 +401,7 @@ static void linflex_shutdown(struct uart_port *port)
static void
linflex_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned long cr, old_cr, cr1;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f6c33cd228c8..67fa113f77d4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1284,17 +1284,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct dma_slave_config dma_rx_sconfig = {};
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
- int bits, baud;
struct tty_port *port = &sport->port.state->port;
struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
struct dma_chan *chan = sport->dma_rx_chan;
-
- baud = tty_get_baud_rate(tty);
-
- bits = (termios->c_cflag & CSIZE) == CS7 ? 9 : 10;
- if (termios->c_cflag & PARENB)
- bits++;
+ unsigned int bits = tty_get_frame_size(termios->c_cflag);
+ unsigned int baud = tty_get_baud_rate(tty);
/*
* Calculate length of one DMA buffer size to keep latency below
@@ -1394,9 +1389,9 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
writeb(modem, sport->port.membase + UARTMODEM);
@@ -1776,6 +1771,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
if (sport->lpuart_dma_rx_use) {
del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
+ sport->lpuart_dma_rx_use = false;
}
if (sport->lpuart_dma_tx_use) {
@@ -1784,6 +1780,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
+ sport->lpuart_dma_tx_use = false;
}
if (sport->dma_tx_chan)
@@ -1833,7 +1830,7 @@ static void lpuart32_shutdown(struct uart_port *port)
static void
lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
@@ -2073,7 +2070,7 @@ static void lpuart32_serial_setbrg(struct lpuart_port *sport,
static void
lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
@@ -2191,6 +2188,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
+ lpuart32_write(&sport->port, 0, UARTMODIR);
lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
@@ -2723,9 +2721,6 @@ static int lpuart_probe(struct platform_device *pdev)
lpuart_reg.cons = LPUART_CONSOLE;
handler = lpuart_int;
}
- ret = uart_add_one_port(&lpuart_reg, &sport->port);
- if (ret)
- goto failed_attach_port;
ret = lpuart_global_reset(sport);
if (ret)
@@ -2735,7 +2730,9 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_get_rs485;
- uart_rs485_config(&sport->port);
+ ret = uart_add_one_port(&lpuart_reg, &sport->port);
+ if (ret)
+ goto failed_attach_port;
ret = devm_request_irq(&pdev->dev, sport->port.irq, handler, 0,
DRIVER_NAME, sport);
@@ -2745,10 +2742,10 @@ static int lpuart_probe(struct platform_device *pdev)
return 0;
failed_irq_request:
-failed_get_rs485:
-failed_reset:
uart_remove_one_port(&lpuart_reg, &sport->port);
failed_attach_port:
+failed_get_rs485:
+failed_reset:
lpuart_disable_clks(sport);
return ret;
}
@@ -2798,7 +2795,7 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
* EDMA driver during suspend will forcefully release any
* non-idle DMA channels. If port wakeup is enabled or if port
* is console port or 'no_console_suspend' is set the Rx DMA
- * cannot resume as as expected, hence gracefully release the
+ * cannot resume as expected, hence gracefully release the
* Rx DMA path before suspend and start Rx DMA path on resume.
*/
if (irq_wake) {
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 45df29947fe8..819f957b6b84 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1351,9 +1351,8 @@ static void icom_close(struct uart_port *port)
kref_put(&icom_port->adapter->kref, icom_kref_release);
}
-static void icom_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old_termios)
+static void icom_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old_termios)
{
struct icom_port *icom_port = to_icom_port(port);
int baud;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 522445a8f666..05b432dc7a85 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -380,8 +380,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
- sport->port.mctrl |= TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
}
/* called with port.lock taken and irqs caller dependent */
@@ -390,8 +389,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
- sport->port.mctrl &= ~TIOCM_RTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
}
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
@@ -1620,7 +1618,7 @@ static void imx_uart_flush_buffer(struct uart_port *port)
static void
imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
@@ -2347,8 +2345,6 @@ static int imx_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"low-active RTS not possible when receiver is off, enabling receiver\n");
- uart_rs485_config(&sport->port);
-
/* Disable interrupts before requesting them */
ucr1 = imx_uart_readl(sport, UCR1);
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 655e64b26852..dd0a8915ce4f 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -873,7 +873,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
/* The port lock is not held. */
static void
ip22zilog_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_ip22zilog_port *up =
container_of(port, struct uart_ip22zilog_port, port);
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 0ea799bf8dbb..417a5b6bffc3 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -211,7 +211,8 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
default:
- return -ENXIO;
+ rc = -ENXIO;
+ goto out_kfree_brd;
}
rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd);
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index cb58bdec2f43..222afc270c88 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -300,8 +300,8 @@ static void jsm_tty_close(struct uart_port *port)
}
static void jsm_tty_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old_termios)
+ struct ktermios *termios,
+ const struct ktermios *old_termios)
{
unsigned long lock_flags;
struct jsm_channel *channel =
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index a3120c3347dd..c892f3c7d1ab 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -8,6 +8,7 @@
* Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/device.h>
@@ -93,7 +94,6 @@
#define ASCFSTAT_RXFFLMASK 0x003F
#define ASCFSTAT_TXFFLMASK 0x3F00
#define ASCFSTAT_TXFREEMASK 0x3F000000
-#define ASCFSTAT_TXFREEOFF 24
static void lqasc_tx_chars(struct uart_port *port);
static struct ltq_uart_port *lqasc_port[MAXPORTS];
@@ -139,6 +139,13 @@ lqasc_stop_tx(struct uart_port *port)
return;
}
+static bool lqasc_tx_ready(struct uart_port *port)
+{
+ u32 fstat = __raw_readl(port->membase + LTQ_ASC_FSTAT);
+
+ return FIELD_GET(ASCFSTAT_TXFREEMASK, fstat);
+}
+
static void
lqasc_start_tx(struct uart_port *port)
{
@@ -228,8 +235,7 @@ lqasc_tx_chars(struct uart_port *port)
return;
}
- while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
- ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
+ while (lqasc_tx_ready(port)) {
if (port->x_char) {
writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
port->icount.tx++;
@@ -405,8 +411,8 @@ lqasc_shutdown(struct uart_port *port)
}
static void
-lqasc_set_termios(struct uart_port *port,
- struct ktermios *new, struct ktermios *old)
+lqasc_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
unsigned int cflag;
unsigned int iflag;
@@ -600,15 +606,12 @@ static const struct uart_ops lqasc_pops = {
static void
lqasc_console_putchar(struct uart_port *port, unsigned char ch)
{
- int fifofree;
-
if (!port->membase)
return;
- do {
- fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
- & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
- } while (fifofree == 0);
+ while (!lqasc_tx_ready(port))
+ ;
+
writeb(ch, port->membase + LTQ_ASC_TBUF);
}
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 328b50521f14..4c0604325ee9 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -178,7 +178,7 @@ static void liteuart_shutdown(struct uart_port *port)
}
static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned long flags;
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 93140cac1ca1..ed47f4768338 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -278,6 +278,13 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
static void serial_lpc32xx_stop_tx(struct uart_port *port);
+static bool serial_lpc32xx_tx_ready(struct uart_port *port)
+{
+ u32 level = readl(LPC32XX_HSUART_LEVEL(port->membase));
+
+ return LPC32XX_HSU_TX_LEV(level) < 64;
+}
+
static void __serial_lpc32xx_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -293,8 +300,7 @@ static void __serial_lpc32xx_tx(struct uart_port *port)
goto exit_tx;
/* Transfer data */
- while (LPC32XX_HSU_TX_LEV(readl(
- LPC32XX_HSUART_LEVEL(port->membase))) < 64) {
+ while (serial_lpc32xx_tx_ready(port)) {
writel((u32) xmit->buf[xmit->tail],
LPC32XX_HSUART_FIFO(port->membase));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -493,7 +499,7 @@ static void serial_lpc32xx_shutdown(struct uart_port *port)
/* port->lock is not held. */
static void serial_lpc32xx_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, quot;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 0b5f21fbb53d..c69602f356fd 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -418,7 +418,7 @@ static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void
max3100_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct max3100_port *s = container_of(port,
struct max3100_port,
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index ab10ca4a45b5..fbf6e2b3161c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -906,7 +906,7 @@ static void max310x_break_ctl(struct uart_port *port, int break_state)
static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int lcr = 0, flow = 0;
int baud;
@@ -1616,11 +1616,9 @@ static int max310x_i2c_probe(struct i2c_client *client)
regmaps, client->irq);
}
-static int max310x_i2c_remove(struct i2c_client *client)
+static void max310x_i2c_remove(struct i2c_client *client)
{
max310x_remove(&client->dev);
-
- return 0;
}
static struct i2c_driver max310x_i2c_driver = {
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index f4aaaadd0742..b1cd9a76dd93 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -192,7 +192,7 @@ static void mcf_shutdown(struct uart_port *port)
/****************************************************************************/
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, baudclk;
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 12117b596e73..3690f5cf0f43 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -646,8 +646,8 @@ static void men_z135_shutdown(struct uart_port *port)
}
static void men_z135_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct men_z135_port *uart = to_men_z135(port);
unsigned int baud;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 6c8db19fd572..056243c12836 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -335,7 +335,7 @@ static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
static void meson_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int cflags, iflags, baud;
unsigned long flags;
@@ -667,29 +667,6 @@ static struct uart_driver meson_uart_driver = {
.cons = MESON_SERIAL_CONSOLE,
};
-static inline struct clk *meson_uart_probe_clock(struct device *dev,
- const char *id)
-{
- struct clk *clk = NULL;
- int ret;
-
- clk = devm_clk_get(dev, id);
- if (IS_ERR(clk))
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "couldn't enable clk\n");
- return ERR_PTR(ret);
- }
-
- devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
-
- return clk;
-}
-
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
{
@@ -697,15 +674,15 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
struct clk *clk_pclk = NULL;
struct clk *clk_baud = NULL;
- clk_pclk = meson_uart_probe_clock(&pdev->dev, "pclk");
+ clk_pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
if (IS_ERR(clk_pclk))
return PTR_ERR(clk_pclk);
- clk_xtal = meson_uart_probe_clock(&pdev->dev, "xtal");
+ clk_xtal = devm_clk_get_enabled(&pdev->dev, "xtal");
if (IS_ERR(clk_xtal))
return PTR_ERR(clk_xtal);
- clk_baud = meson_uart_probe_clock(&pdev->dev, "baud");
+ clk_baud = devm_clk_get_enabled(&pdev->dev, "baud");
if (IS_ERR(clk_baud))
return PTR_ERR(clk_baud);
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 347088bb380e..c15e0d84dc7e 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -298,7 +298,8 @@ static void mlb_usio_shutdown(struct uart_port *port)
}
static void mlb_usio_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int escr, smr = MLB_USIO_SMR_SOE;
unsigned long flags, baud, quot;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 3f1986c89694..73362d4bc45d 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -101,7 +101,7 @@ struct psc_ops {
void (*cw_restore_ints)(struct uart_port *port);
unsigned int (*set_baudrate)(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
int (*clock_alloc)(struct uart_port *port);
void (*clock_relse)(struct uart_port *port);
int (*clock)(struct uart_port *port, int enable);
@@ -287,7 +287,7 @@ static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -305,7 +305,7 @@ static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port,
static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -533,7 +533,7 @@ static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -880,7 +880,7 @@ static inline void mpc5125_set_divisor(struct mpc5125_psc __iomem *psc,
static unsigned int mpc5125_psc_set_baudrate(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud;
unsigned int divisor;
@@ -1167,7 +1167,7 @@ mpc52xx_uart_shutdown(struct uart_port *port)
static void
mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned char mr1, mr2;
@@ -1364,7 +1364,7 @@ static const struct uart_ops mpc52xx_uart_ops = {
/* Interrupt handling */
/* ======================================================================== */
-static inline unsigned int
+static inline bool
mpc52xx_uart_int_rx_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
@@ -1425,7 +1425,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
return psc_ops->raw_rx_rdy(port);
}
-static inline int
+static inline bool
mpc52xx_uart_int_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -1435,13 +1435,13 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
psc_ops->write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
- return 1;
+ return true;
}
/* Nothing to do ? */
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
mpc52xx_uart_stop_tx(port);
- return 0;
+ return false;
}
/* Send chars */
@@ -1460,23 +1460,23 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
/* Maybe we're done after all */
if (uart_circ_empty(xmit)) {
mpc52xx_uart_stop_tx(port);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static irqreturn_t
mpc5xxx_uart_process_int(struct uart_port *port)
{
unsigned long pass = ISR_PASS_LIMIT;
- unsigned int keepgoing;
+ bool keepgoing;
u8 status;
/* While we have stuff to do, we continue */
do {
/* If we don't find anything to do, we stop */
- keepgoing = 0;
+ keepgoing = false;
psc_ops->rx_clr_irq(port);
if (psc_ops->rx_rdy(port))
@@ -1495,7 +1495,7 @@ mpc5xxx_uart_process_int(struct uart_port *port)
/* Limit number of iteration */
if (!(--pass))
- keepgoing = 0;
+ keepgoing = false;
} while (keepgoing);
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 5e9429dcc51f..2e3e6cf16817 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -358,7 +358,7 @@ static void mps2_uart_shutdown(struct uart_port *port)
static void
mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, bauddiv;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 3159889ddae1..7dd19a281579 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1263,7 +1263,7 @@ static void msm_shutdown(struct uart_port *port)
}
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 0ba0f4d9459d..ed0e763f622a 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -289,7 +289,7 @@ static void mux_shutdown(struct uart_port *port)
*/
static void
mux_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
}
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 65eaecd10b7c..ba16e1da6bd3 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -564,7 +564,7 @@ static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned in
static void mvebu_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned long flags;
unsigned int baud, min_baud, max_baud;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 1944daf8593a..d21a4f3ef2fe 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -959,7 +959,7 @@ err_out:
#define CTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_CTS)
static void mxs_auart_settermios(struct uart_port *u,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct mxs_auart_port *s = to_auart_port(u);
u32 ctrl, ctrl2, div;
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0aa666e247d5..7d0d2718ef59 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -300,8 +300,7 @@ static void serial_omap_stop_tx(struct uart_port *port)
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
- if (up->rts_gpiod &&
- gpiod_get_value(up->rts_gpiod) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
@@ -337,19 +336,24 @@ static void serial_omap_stop_rx(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
+static void serial_omap_put_char(struct uart_omap_port *up, unsigned char ch)
+{
+ serial_out(up, UART_TX, ch);
+
+ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
+ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ up->rs485_tx_filter_count++;
+}
+
static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
if (up->port.x_char) {
- serial_out(up, UART_TX, up->port.x_char);
+ serial_omap_put_char(up, up->port.x_char);
up->port.icount.tx++;
up->port.x_char = 0;
- if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
- !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
- up->rs485_tx_filter_count++;
-
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
@@ -358,12 +362,9 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
}
count = up->port.fifosize / 4;
do {
- serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+ serial_omap_put_char(up, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
up->port.icount.tx++;
- if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
- !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
- up->rs485_tx_filter_count++;
if (uart_circ_empty(xmit))
break;
@@ -397,7 +398,7 @@ static void serial_omap_start_tx(struct uart_port *port)
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
- if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
gpiod_set_value(up->rts_gpiod, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
@@ -802,7 +803,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work)
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned char cval = 0;
@@ -1336,13 +1337,11 @@ serial_omap_config_rs485(struct uart_port *port, struct ktermios *termios,
up->ier = 0;
serial_out(up, UART_IER, 0);
- if (up->rts_gpiod) {
- /* enable / disable rts */
- val = (rs485->flags & SER_RS485_ENABLED) ?
- SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
- val = (rs485->flags & val) ? 1 : 0;
- gpiod_set_value(up->rts_gpiod, val);
- }
+ /* enable / disable rts */
+ val = (rs485->flags & SER_RS485_ENABLED) ?
+ SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
+ val = (rs485->flags & val) ? 1 : 0;
+ gpiod_set_value(up->rts_gpiod, val);
/* Enable interrupts */
up->ier = mode;
@@ -1547,11 +1546,13 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
ret = PTR_ERR(up->rts_gpiod);
if (ret == -EPROBE_DEFER)
return ret;
- /*
- * FIXME: the code historically ignored any other error than
- * -EPROBE_DEFER and just went on without GPIO.
- */
+
up->rts_gpiod = NULL;
+ up->port.rs485_supported = (const struct serial_rs485) { };
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ dev_err(dev, "disabling RS-485 (rts-gpio missing in device tree)\n");
+ memset(rs485conf, 0, sizeof(*rs485conf));
+ }
} else {
gpiod_set_consumer_name(up->rts_gpiod, "omap-serial");
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index 888e17e3f25f..fde39cc1145d 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -328,7 +328,7 @@ static void owl_uart_change_baudrate(struct owl_uart_port *owl_port,
static void owl_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct owl_uart_port *owl_port = to_owl_uart_port(port);
unsigned int baud;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 8a9065e4a903..c59ce7886579 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -898,9 +898,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
fifo_size--;
}
- bytes = min((int)CIRC_CNT(xmit->head, xmit->tail,
- UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head,
- xmit->tail, UART_XMIT_SIZE));
+ bytes = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
if (!bytes) {
dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__);
pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT);
@@ -1301,7 +1299,8 @@ static void pch_uart_shutdown(struct uart_port *port)
*bits. Update read_status_mask and ignore_status_mask to indicate
*the types of events we are interested in receiving. */
static void pch_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
int rtn;
unsigned int baud, parity, bits, stb;
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index f418f1de66b3..2beada66c824 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -50,7 +50,7 @@
* @irq_rx_name: irq rx name
* @irq_tx: virtual tx interrupt number
* @irq_tx_name: irq tx name
- * @cts_gpio: clear to send gpio
+ * @cts_gpiod: clear to send GPIO
* @dev: device descriptor
**/
struct pic32_sport {
@@ -65,8 +65,7 @@ struct pic32_sport {
const char *irq_tx_name;
bool enable_tx_irq;
- bool hw_flow_ctrl;
- int cts_gpio;
+ struct gpio_desc *cts_gpiod;
struct clk *clk;
@@ -158,25 +157,16 @@ static void pic32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
PIC32_UART_MODE_LPBK);
}
-/* get the state of CTS input pin for this port */
-static unsigned int get_cts_state(struct pic32_sport *sport)
-{
- /* read and invert UxCTS */
- if (gpio_is_valid(sport->cts_gpio))
- return !gpio_get_value(sport->cts_gpio);
-
- return 1;
-}
-
/* serial core request to return the state of misc UART input pins */
static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
{
struct pic32_sport *sport = to_pic32_sport(port);
unsigned int mctrl = 0;
- if (!sport->hw_flow_ctrl)
+ /* get the state of CTS input pin for this port */
+ if (!sport->cts_gpiod)
mctrl |= TIOCM_CTS;
- else if (get_cts_state(sport))
+ else if (gpiod_get_value(sport->cts_gpiod))
mctrl |= TIOCM_CTS;
/* DSR and CD are not supported in PIC32, so return 1
@@ -609,7 +599,7 @@ static void pic32_uart_shutdown(struct uart_port *port)
/* serial core request to change current uart setting */
static void pic32_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct pic32_sport *sport = to_pic32_sport(port);
unsigned int baud;
@@ -648,7 +638,7 @@ static void pic32_uart_set_termios(struct uart_port *port,
PIC32_UART_MODE_PDSEL0);
}
/* if hw flow ctrl, then the pins must be specified in device tree */
- if ((new->c_cflag & CRTSCTS) && sport->hw_flow_ctrl) {
+ if ((new->c_cflag & CRTSCTS) && sport->cts_gpiod) {
/* enable hardware flow control */
pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
PIC32_UART_MODE_UEN1);
@@ -875,7 +865,8 @@ static struct uart_driver pic32_uart_driver = {
static int pic32_uart_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct pic32_sport *sport;
int uart_idx = 0;
struct resource *res_mem;
@@ -904,25 +895,10 @@ static int pic32_uart_probe(struct platform_device *pdev)
/* Hardware flow control: gpios
* !Note: Basically, CTS is needed for reading the status.
*/
- sport->hw_flow_ctrl = false;
- sport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0);
- if (gpio_is_valid(sport->cts_gpio)) {
- sport->hw_flow_ctrl = true;
-
- ret = devm_gpio_request(sport->dev,
- sport->cts_gpio, "CTS");
- if (ret) {
- dev_err(&pdev->dev,
- "error requesting CTS GPIO\n");
- goto err;
- }
-
- ret = gpio_direction_input(sport->cts_gpio);
- if (ret) {
- dev_err(&pdev->dev, "error setting CTS GPIO\n");
- goto err;
- }
- }
+ sport->cts_gpiod = devm_gpiod_get_optional(dev, "cts", GPIOD_IN);
+ if (IS_ERR(sport->cts_gpiod))
+ return dev_err_probe(dev, PTR_ERR(sport->cts_gpiod), "error requesting CTS GPIO\n");
+ gpiod_set_consumer_name(sport->cts_gpiod, "CTS");
pic32_sports[uart_idx] = sport;
port = &sport->port;
@@ -943,7 +919,7 @@ static int pic32_uart_probe(struct platform_device *pdev)
}
#ifdef CONFIG_SERIAL_PIC32_CONSOLE
- if (uart_console(port) && (pic32_console.flags & CON_ENABLED)) {
+ if (uart_console_enabled(port)) {
/* The peripheral clock has been enabled by console_setup,
* so disable it till the port is used.
*/
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index f63257b8e872..fe2e4ec423f7 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1202,7 +1202,7 @@ static void pmz_irda_setup(struct uart_pmac_port *uap, unsigned long *baud)
static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pmac_port *uap = to_pmz(port);
unsigned long baud;
@@ -1244,7 +1244,7 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
/* The port lock is not held. */
static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pmac_port *uap = to_pmz(port);
unsigned long flags;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 9309ffd87c8e..2d25231fad84 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -423,7 +423,7 @@ static void serial_pxa_shutdown(struct uart_port *port)
static void
serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
unsigned char cval, fcr = 0;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index f4698a064a4d..83b66b73303a 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <dt-bindings/interconnect/qcom,icc.h>
/* UART specific GENI registers */
#define SE_UART_LOOPBACK_CFG 0x22c
@@ -1005,7 +1006,8 @@ static unsigned long get_clk_div_rate(struct clk *clk, unsigned int baud,
}
static void qcom_geni_serial_set_termios(struct uart_port *uport,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud;
u32 bits_per_char;
@@ -1524,7 +1526,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
* even with no_console_suspend
*/
if (uart_console(uport)) {
- geni_icc_set_tag(&port->se, 0x3);
+ geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ACTIVE_ONLY);
geni_icc_set_bw(&port->se);
}
return uart_suspend_port(private_data->drv, uport);
@@ -1539,7 +1541,7 @@ static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
ret = uart_resume_port(private_data->drv, uport);
if (uart_console(uport)) {
- geni_icc_set_tag(&port->se, 0x7);
+ geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
}
return ret;
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index feb2054aba37..0e387e2144fa 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -238,7 +238,7 @@ static void rda_uart_change_baudrate(struct rda_uart_port *rda_port,
static void rda_uart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct rda_uart_port *rda_port = to_rda_uart_port(port);
unsigned long flags;
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 6689d8add8f7..b81afb06f1f4 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -370,9 +370,8 @@ static void __rp2_uart_set_termios(struct rp2_uart_port *up,
up->ucode + RP2_RX_SWFLOW);
}
-static void rp2_uart_set_termios(struct uart_port *port,
- struct ktermios *new,
- struct ktermios *old)
+static void rp2_uart_set_termios(struct uart_port *port, struct ktermios *new,
+ const struct ktermios *old)
{
struct rp2_uart_port *up = port_to_up(port);
unsigned long flags;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index e64e42a19d1a..dd9e3253cab4 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -409,7 +409,7 @@ static void sa1100_shutdown(struct uart_port *port)
static void
sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sa1100_port *sport =
container_of(port, struct sa1100_port, port);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index b7a4b47ce74e..77d1363029f5 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -1530,7 +1530,7 @@ static const u16 udivslot_table[16] = {
static void s3c24xx_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
const struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 2cf8533ef760..c5d2b6cdcb4a 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -531,7 +531,7 @@ static void sbd_init_port(struct sbd_port *sport)
}
static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct sbd_port *sport = to_sport(uport);
unsigned int mode1 = 0, mode2 = 0, aux = 0;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 259e08cc347c..524921360ca7 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1015,7 +1015,7 @@ static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
static void sc16is7xx_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
@@ -1689,11 +1689,9 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
}
-static int sc16is7xx_i2c_remove(struct i2c_client *client)
+static void sc16is7xx_i2c_remove(struct i2c_client *client)
{
sc16is7xx_remove(&client->dev);
-
- return 0;
}
static const struct i2c_device_id sc16is7xx_i2c_id_table[] = {
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index c56de2e104d4..dd98509f52e5 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -636,7 +636,8 @@ static void sccnxp_break_ctl(struct uart_port *port, int break_state)
}
static void sccnxp_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index ad4f3567ff90..b7170cb9a544 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args)
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&tup->uport);
@@ -613,7 +613,6 @@ static unsigned int tegra_uart_tx_empty(struct uart_port *u)
static void tegra_uart_stop_tx(struct uart_port *u)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned int count;
@@ -624,7 +623,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(&tup->uport, count);
tup->tx_in_progress = 0;
}
@@ -1271,14 +1270,14 @@ static void tegra_uart_enable_ms(struct uart_port *u)
}
static void tegra_uart_set_termios(struct uart_port *u,
- struct ktermios *termios, struct ktermios *oldtermios)
+ struct ktermios *termios,
+ const struct ktermios *oldtermios)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
unsigned int baud;
unsigned long flags;
unsigned int lcr;
unsigned char char_bits;
- int symb_bit = 1;
struct clk *parent_clk = clk_get_parent(tup->uart_clk);
unsigned long parent_clk_rate = clk_get_rate(parent_clk);
int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
@@ -1305,7 +1304,6 @@ static void tegra_uart_set_termios(struct uart_port *u,
termios->c_cflag &= ~CMSPAR;
if ((termios->c_cflag & PARENB) == PARENB) {
- symb_bit++;
if (termios->c_cflag & PARODD) {
lcr |= UART_LCR_PARITY;
lcr &= ~UART_LCR_EPAR;
@@ -1318,22 +1316,18 @@ static void tegra_uart_set_termios(struct uart_port *u,
}
char_bits = tty_get_char_size(termios->c_cflag);
- symb_bit += char_bits;
lcr &= ~UART_LCR_WLEN8;
lcr |= UART_LCR_WLEN(char_bits);
/* Stop bits */
- if (termios->c_cflag & CSTOPB) {
+ if (termios->c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
- symb_bit += 2;
- } else {
+ else
lcr &= ~UART_LCR_STOP;
- symb_bit++;
- }
tegra_uart_write(tup, lcr, UART_LCR);
tup->lcr_shadow = lcr;
- tup->symb_bit = symb_bit;
+ tup->symb_bit = tty_get_frame_size(termios->c_cflag);
/* Baud rate. */
baud = uart_get_baud_rate(u, termios, oldtermios,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 12c87cd201a7..179ee199df34 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -49,7 +49,7 @@ static struct lock_class_key port_lock_key;
#define RS485_MAX_RTS_DELAY 100 /* msecs */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state);
@@ -158,15 +158,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
- if (old != port->mctrl)
+ if (old != port->mctrl && !(port->rs485.flags & SER_RS485_ENABLED))
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -380,7 +375,7 @@ EXPORT_SYMBOL(uart_update_timeout);
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min, unsigned int max)
+ const struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try;
unsigned int baud;
@@ -492,7 +487,7 @@ EXPORT_SYMBOL(uart_get_divisor);
/* Caller holds port mutex */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct uart_port *uport = uart_port_check(state);
struct ktermios *termios;
@@ -1391,7 +1386,7 @@ static void uart_set_rs485_termination(struct uart_port *port,
!!(rs485->flags & SER_RS485_TERMINATE_BUS));
}
-int uart_rs485_config(struct uart_port *port)
+static int uart_rs485_config(struct uart_port *port)
{
struct serial_rs485 *rs485 = &port->rs485;
int ret;
@@ -1405,7 +1400,6 @@ int uart_rs485_config(struct uart_port *port)
return ret;
}
-EXPORT_SYMBOL_GPL(uart_rs485_config);
static int uart_get_rs485_config(struct uart_port *port,
struct serial_rs485 __user *rs485)
@@ -1444,8 +1438,13 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
ret = port->rs485_config(port, &tty->termios, &rs485);
- if (!ret)
+ if (!ret) {
port->rs485 = rs485;
+
+ /* Reset RTS and other mctrl lines when disabling RS485 */
+ if (!(rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ }
spin_unlock_irqrestore(&port->lock, flags);
if (ret)
return ret;
@@ -1619,7 +1618,7 @@ static void uart_set_ldisc(struct tty_struct *tty)
}
static void uart_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport;
@@ -2352,7 +2351,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
- ops->set_mctrl(uport, 0);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
/* save mctrl so it can be restored on resume */
mctrl = uport->mctrl;
uport->mctrl = 0;
@@ -2440,7 +2440,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, 0);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
@@ -2451,7 +2452,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
if (tty)
uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
- ops->set_mctrl(uport, uport->mctrl);
+ if (!(uport->rs485.flags & SER_RS485_ENABLED))
+ ops->set_mctrl(uport, uport->mctrl);
+ else
+ uart_rs485_config(uport);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
tty_port_set_initialized(port, 1);
@@ -2497,7 +2501,7 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
- strlcpy(address, "*unknown*", sizeof(address));
+ strscpy(address, "*unknown*", sizeof(address));
break;
}
@@ -2558,10 +2562,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
*/
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= TIOCM_DTR;
- if (port->rs485.flags & SER_RS485_ENABLED &&
- !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
- port->mctrl |= TIOCM_RTS;
- port->ops->set_mctrl(port, port->mctrl);
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
+ port->ops->set_mctrl(port, port->mctrl);
+ else
+ uart_rs485_config(port);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 228e380db080..e12f1dc18c38 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -594,7 +594,7 @@ static void serial_txx9_shutdown(struct uart_port *up)
static void
serial_txx9_set_termios(struct uart_port *up, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int cval, fcr = 0;
unsigned long flags;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0075a1420005..62f773286d44 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1407,10 +1407,8 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
spin_lock_irq(&port->lock);
head = xmit->head;
tail = xmit->tail;
- buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
- s->tx_dma_len = min_t(unsigned int,
- CIRC_CNT(head, tail, UART_XMIT_SIZE),
- CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+ buf = s->tx_dma_addr + tail;
+ s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE);
if (!s->tx_dma_len) {
/* Transmit buffer has been flushed */
spin_unlock_irq(&port->lock);
@@ -2367,7 +2365,7 @@ static void sci_reset(struct uart_port *port)
}
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits;
unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index 5c3a07546a58..7fb6760b5c37 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -646,7 +646,7 @@ static int sifive_serial_clk_notifier(struct notifier_block *nb,
static void sifive_serial_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct sifive_serial_port *ssp = port_to_sifive_serial_port(port);
unsigned long flags;
@@ -945,7 +945,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "unable to find controller clock\n");
return PTR_ERR(clk);
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 4329b9c9cbf0..342a87967631 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -771,9 +771,8 @@ static void sprd_shutdown(struct uart_port *port)
devm_free_irq(port->dev, port->irq, port);
}
-static void sprd_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+static void sprd_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud, quot;
unsigned int lcr = 0, fc;
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index cce42f4c9bc2..fcecea689a0d 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -500,7 +500,7 @@ static void asc_pm(struct uart_port *port, unsigned int state,
}
static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
struct gpio_desc *gpiod;
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2c85dbf165c4..dfdbcf092fac 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -37,7 +37,7 @@
/* Register offsets */
-static struct stm32_usart_info stm32f4_info = {
+static struct stm32_usart_info __maybe_unused stm32f4_info = {
.ofs = {
.isr = 0x00,
.rdr = 0x04,
@@ -58,7 +58,7 @@ static struct stm32_usart_info stm32f4_info = {
}
};
-static struct stm32_usart_info stm32f7_info = {
+static struct stm32_usart_info __maybe_unused stm32f7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
@@ -80,7 +80,7 @@ static struct stm32_usart_info stm32f7_info = {
}
};
-static struct stm32_usart_info stm32h7_info = {
+static struct stm32_usart_info __maybe_unused stm32h7_info = {
.ofs = {
.cr1 = 0x00,
.cr2 = 0x04,
@@ -131,6 +131,53 @@ static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
writel_relaxed(val, port->membase + reg);
}
+static unsigned int stm32_usart_tx_empty(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ }
+}
+
+static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct serial_rs485 *rs485conf = &port->rs485;
+
+ if (stm32_port->hw_flow_control ||
+ !(rs485conf->flags & SER_RS485_ENABLED))
+ return;
+
+ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl & ~TIOCM_RTS);
+ } else {
+ mctrl_gpio_set(stm32_port->gpios,
+ stm32_port->port.mctrl | TIOCM_RTS);
+ }
+}
+
static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
u32 delay_DDE, u32 baud)
{
@@ -214,6 +261,12 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+ /* Adjust RTS polarity in case it's driven in software */
+ if (stm32_usart_tx_empty(port))
+ stm32_usart_rs485_rts_disable(port);
+ else
+ stm32_usart_rs485_rts_enable(port);
+
return 0;
}
@@ -529,42 +582,6 @@ static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
}
-static void stm32_usart_rs485_rts_enable(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
-
- if (stm32_port->hw_flow_control ||
- !(rs485conf->flags & SER_RS485_ENABLED))
- return;
-
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- }
-}
-
-static void stm32_usart_rs485_rts_disable(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- struct serial_rs485 *rs485conf = &port->rs485;
-
- if (stm32_port->hw_flow_control ||
- !(rs485conf->flags & SER_RS485_ENABLED))
- return;
-
- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl & ~TIOCM_RTS);
- } else {
- mctrl_gpio_set(stm32_port->gpios,
- stm32_port->port.mctrl | TIOCM_RTS);
- }
-}
-
static void stm32_usart_transmit_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -807,17 +824,6 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
-static unsigned int stm32_usart_tx_empty(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
-
- if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
- return TIOCSER_TEMT;
-
- return 0;
-}
-
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1089,7 +1095,7 @@ static void stm32_usart_shutdown(struct uart_port *port)
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index eafada8fb6fa..1938ba5e98c0 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -323,7 +323,7 @@ static void sunhv_shutdown(struct uart_port *port)
/* port->lock is not held. */
static void sunhv_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
unsigned int quot = uart_get_divisor(port, baud);
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 60c73662f955..7afe61a0e72e 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -333,7 +333,7 @@ static void sunplus_shutdown(struct uart_port *port)
static void sunplus_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *oldtermios)
+ const struct ktermios *oldtermios)
{
u32 ext, div, div_l, div_h, baud, lcr;
u32 clk = port->uartclk;
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 6ea52293d9f3..99608b2a2b74 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -681,27 +681,23 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
unsigned int quot)
{
unsigned char dafo;
- int bits, n, m;
+ int n, m;
/* Byte size and parity */
switch (cflag & CSIZE) {
- case CS5: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
- case CS6: dafo = SAB82532_DAFO_CHL6; bits = 8; break;
- case CS7: dafo = SAB82532_DAFO_CHL7; bits = 9; break;
- case CS8: dafo = SAB82532_DAFO_CHL8; bits = 10; break;
+ case CS5: dafo = SAB82532_DAFO_CHL5; break;
+ case CS6: dafo = SAB82532_DAFO_CHL6; break;
+ case CS7: dafo = SAB82532_DAFO_CHL7; break;
+ case CS8: dafo = SAB82532_DAFO_CHL8; break;
/* Never happens, but GCC is too dumb to figure it out */
- default: dafo = SAB82532_DAFO_CHL5; bits = 7; break;
+ default: dafo = SAB82532_DAFO_CHL5; break;
}
- if (cflag & CSTOPB) {
+ if (cflag & CSTOPB)
dafo |= SAB82532_DAFO_STOP;
- bits++;
- }
- if (cflag & PARENB) {
+ if (cflag & PARENB)
dafo |= SAB82532_DAFO_PARE;
- bits++;
- }
if (cflag & PARODD) {
dafo |= SAB82532_DAFO_PAR_ODD;
@@ -776,7 +772,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
/* port->lock is not held. */
static void sunsab_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_sunsab_port *up =
container_of(port, struct uart_sunsab_port, port);
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 84d545e5a8c7..9ea7e567540d 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -897,7 +897,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
static void
sunsu_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
unsigned int baud, quot;
@@ -1217,13 +1217,13 @@ static int sunsu_kbd_ms_init(struct uart_sunsu_port *up)
serio->id.type = SERIO_RS232;
if (up->su_type == SU_PORT_KBD) {
serio->id.proto = SERIO_SUNKBD;
- strlcpy(serio->name, "sukbd", sizeof(serio->name));
+ strscpy(serio->name, "sukbd", sizeof(serio->name));
} else {
serio->id.proto = SERIO_SUN;
serio->id.extra = 1;
- strlcpy(serio->name, "sums", sizeof(serio->name));
+ strscpy(serio->name, "sums", sizeof(serio->name));
}
- strlcpy(serio->phys,
+ strscpy(serio->phys,
(!(up->port.line & 1) ? "su/serio0" : "su/serio1"),
sizeof(serio->phys));
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index c14275d83b0b..87425290687d 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -938,7 +938,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
/* The port lock is not held. */
static void
sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct uart_sunzilog_port *up =
container_of(port, struct uart_sunzilog_port, port);
@@ -1307,13 +1307,13 @@ static void sunzilog_register_serio(struct uart_sunzilog_port *up)
serio->id.type = SERIO_RS232;
if (up->flags & SUNZILOG_FLAG_CONS_KEYB) {
serio->id.proto = SERIO_SUNKBD;
- strlcpy(serio->name, "zskbd", sizeof(serio->name));
+ strscpy(serio->name, "zskbd", sizeof(serio->name));
} else {
serio->id.proto = SERIO_SUN;
serio->id.extra = 1;
- strlcpy(serio->name, "zsms", sizeof(serio->name));
+ strscpy(serio->name, "zsms", sizeof(serio->name));
}
- strlcpy(serio->phys,
+ strscpy(serio->phys,
((up->flags & SUNZILOG_FLAG_CONS_KEYB) ?
"zs/serio0" : "zs/serio1"),
sizeof(serio->phys));
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 4877c54c613d..23500b342da7 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -101,7 +101,7 @@ static void tegra_tcu_uart_start_tx(struct uart_port *port)
break;
tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count);
- xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+ uart_xmit_advance(port, count);
}
uart_write_wakeup(port);
@@ -126,7 +126,7 @@ static void tegra_tcu_uart_shutdown(struct uart_port *port)
static void tegra_tcu_uart_set_termios(struct uart_port *port,
struct ktermios *new,
- struct ktermios *old)
+ const struct ktermios *old)
{
}
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 08941eabe7b1..bb19ed012def 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -275,8 +275,8 @@ static int get_bindex(int baud)
}
static void timbuart_set_termios(struct uart_port *port,
- struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned int baud;
short bindex;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 880e2afbb97b..eca41ac5477c 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -314,8 +314,9 @@ static void ulite_shutdown(struct uart_port *port)
clk_disable(pdata->clk);
}
-static void ulite_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static void ulite_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ const struct ktermios *old)
{
unsigned long flags;
struct uartlite_data *pdata = port->private_data;
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 3cc9ef08455c..82cf14dd3d43 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -843,7 +843,8 @@ static void qe_uart_shutdown(struct uart_port *port)
* Set the serial port parameters.
*/
static void qe_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
@@ -853,13 +854,6 @@ static void qe_uart_set_termios(struct uart_port *port,
u16 upsmr = ioread16be(&uccp->upsmr);
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
u16 supsmr = ioread16be(&uccup->supsmr);
- u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
-
- /* Character length programmed into the mode register is the
- * sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
- * 1 or 2 stop bits, minus 1.
- * The value 'bits' counts this for us.
- */
/* byte size */
upsmr &= UCC_UART_UPSMR_CL_MASK;
@@ -869,22 +863,18 @@ static void qe_uart_set_termios(struct uart_port *port,
case CS5:
upsmr |= UCC_UART_UPSMR_CL_5;
supsmr |= UCC_UART_SUPSMR_CL_5;
- char_length += 5;
break;
case CS6:
upsmr |= UCC_UART_UPSMR_CL_6;
supsmr |= UCC_UART_SUPSMR_CL_6;
- char_length += 6;
break;
case CS7:
upsmr |= UCC_UART_UPSMR_CL_7;
supsmr |= UCC_UART_SUPSMR_CL_7;
- char_length += 7;
break;
default: /* case CS8 */
upsmr |= UCC_UART_UPSMR_CL_8;
supsmr |= UCC_UART_SUPSMR_CL_8;
- char_length += 8;
break;
}
@@ -892,13 +882,11 @@ static void qe_uart_set_termios(struct uart_port *port,
if (termios->c_cflag & CSTOPB) {
upsmr |= UCC_UART_UPSMR_SL;
supsmr |= UCC_UART_SUPSMR_SL;
- char_length++; /* + SL */
}
if (termios->c_cflag & PARENB) {
upsmr |= UCC_UART_UPSMR_PEN;
supsmr |= UCC_UART_SUPSMR_PEN;
- char_length++; /* + PEN */
if (!(termios->c_cflag & PARODD)) {
upsmr &= ~(UCC_UART_UPSMR_RPM_MASK |
@@ -953,7 +941,7 @@ static void qe_uart_set_termios(struct uart_port *port,
iowrite16be(upsmr, &uccp->upsmr);
if (soft_uart) {
iowrite16be(supsmr, &uccup->supsmr);
- iowrite8(char_length, &uccup->rx_length);
+ iowrite8(tty_get_frame_size(termios->c_cflag), &uccup->rx_length);
/* Soft-UART requires a 1X multiplier for TX */
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 6f08136ce78a..10fbdb09965f 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -187,6 +187,13 @@ static void handle_rx(struct uart_port *port)
tty_flip_buffer_push(tport);
}
+static unsigned int vt8500_tx_empty(struct uart_port *port)
+{
+ unsigned int idx = vt8500_read(port, VT8500_URFIDX) & 0x1f;
+
+ return idx < 16 ? TIOCSER_TEMT : 0;
+}
+
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
@@ -201,7 +208,7 @@ static void handle_tx(struct uart_port *port)
return;
}
- while ((vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16) {
+ while (vt8500_tx_empty(port)) {
if (uart_circ_empty(xmit))
break;
@@ -260,12 +267,6 @@ static irqreturn_t vt8500_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static unsigned int vt8500_tx_empty(struct uart_port *port)
-{
- return (vt8500_read(port, VT8500_URFIDX) & 0x1f) < 16 ?
- TIOCSER_TEMT : 0;
-}
-
static unsigned int vt8500_get_mctrl(struct uart_port *port)
{
unsigned int usr;
@@ -355,7 +356,7 @@ static void vt8500_shutdown(struct uart_port *port)
static void vt8500_set_termios(struct uart_port *port,
struct ktermios *termios,
- struct ktermios *old)
+ const struct ktermios *old)
{
struct vt8500_port *vt8500_port =
container_of(port, struct vt8500_port, uart);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 9e01fe6c0ab8..2eff7cff57c4 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -2,7 +2,7 @@
/*
* Cadence UART driver (found in Xilinx Zynq)
*
- * 2011 - 2014 (C) Xilinx Inc.
+ * Copyright (c) 2011 - 2014 Xilinx, Inc.
*
* This driver has originally been pushed by Xilinx using a Zynq-branding. This
* still shows in the naming of this file, the kconfig symbols and some symbols
@@ -361,6 +361,8 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
+ isrstatus &= port->read_status_mask;
+ isrstatus &= ~port->ignore_status_mask;
/*
* Skip RX processing if RX is disabled as RXEMPTY will never be set
* as read bytes will not be removed from the FIFO.
@@ -675,7 +677,8 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
* @old: Values of the previously saved termios structure
*/
static void cdns_uart_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
+ struct ktermios *termios,
+ const struct ktermios *old)
{
u32 cval = 0;
unsigned int baud, minbaud, maxbaud;
@@ -1130,8 +1133,35 @@ static struct uart_driver cdns_uart_uart_driver;
*/
static void cdns_uart_console_putchar(struct uart_port *port, unsigned char ch)
{
- while (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)
+ unsigned int ctrl_reg;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
+ if (!(ctrl_reg & CDNS_UART_CR_TX_DIS))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev,
+ "timeout waiting for Enable\n");
+ return;
+ }
+ cpu_relax();
+ }
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ ctrl_reg = readl(port->membase + CDNS_UART_SR);
+
+ if (!(ctrl_reg & CDNS_UART_SR_TXFULL))
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_warn(port->dev,
+ "timeout waiting for TX fifo\n");
+ return;
+ }
cpu_relax();
+ }
writel(ch, port->membase + CDNS_UART_FIFO);
}
@@ -1329,12 +1359,20 @@ static int cdns_uart_resume(struct device *device)
unsigned long flags;
u32 ctrl_reg;
int may_wake;
+ int ret;
may_wake = device_may_wakeup(device);
if (console_suspend_enabled && uart_console(port) && !may_wake) {
- clk_enable(cdns_uart->pclk);
- clk_enable(cdns_uart->uartclk);
+ ret = clk_enable(cdns_uart->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(cdns_uart->uartclk);
+ if (ret) {
+ clk_disable(cdns_uart->pclk);
+ return ret;
+ }
spin_lock_irqsave(&port->lock, flags);
@@ -1383,9 +1421,17 @@ static int __maybe_unused cdns_runtime_resume(struct device *dev)
{
struct uart_port *port = dev_get_drvdata(dev);
struct cdns_uart *cdns_uart = port->private_data;
+ int ret;
- clk_enable(cdns_uart->pclk);
- clk_enable(cdns_uart->uartclk);
+ ret = clk_enable(cdns_uart->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(cdns_uart->uartclk);
+ if (ret) {
+ clk_disable(cdns_uart->pclk);
+ return ret;
+ }
return 0;
};
@@ -1551,6 +1597,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
+ port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 5bc58591665a..688db7d8b748 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -846,7 +846,7 @@ static void zs_reset(struct zs_port *zport)
}
static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct zs_port *zport = to_zport(uport);
struct zs_scc *scc = zport->scc;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 9bc2a9265277..25e9befdda3a 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -91,7 +91,6 @@ static char *driver_name = "SyncLink GT";
static char *slgt_driver_name = "synclink_gt";
static char *tty_dev_prefix = "ttySLG";
MODULE_LICENSE("GPL");
-#define MGSL_MAGIC 0x5401
#define MAX_DEVICES 32
static const struct pci_device_id pci_table[] = {
@@ -215,8 +214,6 @@ struct slgt_info {
struct slgt_info *next_device; /* device list link */
- int magic;
-
char device_name[25];
struct pci_dev *pdev;
@@ -554,10 +551,6 @@ static inline int sanity_check(struct slgt_info *info, char *devname, const char
printk("null struct slgt_info for (%s) in %s\n", devname, name);
return 1;
}
- if (info->magic != MGSL_MAGIC) {
- printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
- return 1;
- }
#else
if (!info)
return 1;
@@ -707,7 +700,8 @@ static void hangup(struct tty_struct *tty)
wake_up_interruptible(&info->port.open_wait);
}
-static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
@@ -3498,7 +3492,6 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
} else {
tty_port_init(&info->port);
info->port.ops = &slgt_port_ops;
- info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, bh_handler);
info->max_frame_size = 4096;
info->base_clock = 14745600;
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index f310a8274df1..1c08c9b67b16 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -73,7 +73,7 @@ void tty_buffer_set_lock_subclass(struct tty_port *port);
bool tty_buffer_restart_work(struct tty_port *port);
bool tty_buffer_cancel_work(struct tty_port *port);
void tty_buffer_flush_work(struct tty_port *port);
-speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+speed_t tty_termios_input_baud_rate(const struct ktermios *termios);
void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
int tty_ldisc_reinit(struct tty_struct *tty, int disc);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 3cd99ed7c710..f9b49939c27b 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -49,13 +49,13 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
+ * structure. Device drivers can call this function but should use
+ * ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
-speed_t tty_termios_baud_rate(struct ktermios *termios)
+speed_t tty_termios_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud;
@@ -67,11 +67,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~CBAUDEX;
- else
- cbaud += 15;
+ cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
@@ -83,30 +79,26 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
*
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
- * structure. May change the termios data. Device drivers can call this
- * function but should use ->c_[io]speed directly as they are updated.
+ * structure. Device drivers can call this function but should use
+ * ->c_[io]speed directly as they are updated.
*
* Locking: none
*/
-speed_t tty_termios_input_baud_rate(struct ktermios *termios)
+speed_t tty_termios_input_baud_rate(const struct ktermios *termios)
{
unsigned int cbaud = (termios->c_cflag >> IBSHIFT) & CBAUD;
if (cbaud == B0)
return tty_termios_baud_rate(termios);
- /* Magic token for arbitrary speed via c_ispeed*/
+ /* Magic token for arbitrary speed via c_ispeed */
if (cbaud == BOTHER)
return termios->c_ispeed;
if (cbaud & CBAUDEX) {
cbaud &= ~CBAUDEX;
-
- if (cbaud < 1 || cbaud + 15 > n_baud_table)
- termios->c_cflag &= ~(CBAUDEX << IBSHIFT);
- else
- cbaud += 15;
+ cbaud += 15;
}
return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9fdecc795b6b..5e287dedce01 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -470,7 +470,6 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
while (head) {
struct tty_buffer *next;
- unsigned char *p, *f = NULL;
unsigned int count;
/*
@@ -489,11 +488,16 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
continue;
}
- p = char_buf_ptr(head, head->lookahead);
- if (~head->flags & TTYB_NORMAL)
- f = flag_buf_ptr(head, head->lookahead);
+ if (port->client_ops->lookahead_buf) {
+ unsigned char *p, *f = NULL;
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ }
- port->client_ops->lookahead_buf(port, p, f, count);
head->lookahead += count;
}
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 82a8855981f7..de06c3c2ff70 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -99,8 +99,8 @@
#include <linux/serial.h>
#include <linux/ratelimit.h>
#include <linux/compat.h>
-
#include <linux/uaccess.h>
+#include <linux/termios_internal.h>
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
@@ -170,7 +170,6 @@ static void free_tty_struct(struct tty_struct *tty)
tty_ldisc_deinit(tty);
put_device(tty->dev);
kvfree(tty->write_buf);
- tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -265,11 +264,6 @@ static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
imajor(inode), iminor(inode), routine);
return 1;
}
- if (tty->magic != TTY_MAGIC) {
- pr_warn("(%d:%d): %s: bad magic number\n",
- imajor(inode), iminor(inode), routine);
- return 1;
- }
#endif
return 0;
}
@@ -1533,7 +1527,6 @@ static void release_one_tty(struct work_struct *work)
if (tty->ops->cleanup)
tty->ops->cleanup(tty);
- tty->magic = 0;
tty_driver_kref_put(driver);
module_put(owner);
@@ -3093,7 +3086,6 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
return NULL;
kref_init(&tty->kref);
- tty->magic = TTY_MAGIC;
if (tty_ldisc_init(tty)) {
kfree(tty);
return NULL;
@@ -3329,7 +3321,6 @@ struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
return ERR_PTR(-ENOMEM);
kref_init(&driver->kref);
- driver->magic = TTY_DRIVER_MAGIC;
driver->num = lines;
driver->owner = owner;
driver->flags = flags;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 2a76b330e108..ce511557b98b 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -21,6 +21,7 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/compat.h>
+#include <linux/termios_internal.h>
#include "tty.h"
#include <asm/io.h>
@@ -219,7 +220,7 @@ EXPORT_SYMBOL(tty_wait_until_sent);
* Termios Helper Methods
*/
-static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old)
+static void unset_locked_termios(struct tty_struct *tty, const struct ktermios *old)
{
struct ktermios *termios = &tty->termios;
struct ktermios *locked = &tty->termios_locked;
@@ -249,7 +250,7 @@ static void unset_locked_termios(struct tty_struct *tty, struct ktermios *old)
* in some cases where only minimal reconfiguration is supported
*/
-void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old)
+void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old)
{
/* The bits a dumb device handles in software. Smart devices need
to always provide a set_termios method */
@@ -374,6 +375,80 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
}
EXPORT_SYMBOL_GPL(tty_set_termios);
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+__weak int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ struct termio v;
+
+ if (copy_from_user(&v, termio, sizeof(struct termio)))
+ return -EFAULT;
+
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | v.c_iflag;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | v.c_oflag;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | v.c_cflag;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | v.c_lflag;
+ termios->c_line = (0xffff0000 & termios->c_lflag) | v.c_line;
+ memcpy(termios->c_cc, v.c_cc, NCC);
+ return 0;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+__weak int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ struct termio v;
+ memset(&v, 0, sizeof(struct termio));
+ v.c_iflag = termios->c_iflag;
+ v.c_oflag = termios->c_oflag;
+ v.c_cflag = termios->c_cflag;
+ v.c_lflag = termios->c_lflag;
+ v.c_line = termios->c_line;
+ memcpy(v.c_cc, termios->c_cc, NCC);
+ return copy_to_user(termio, &v, sizeof(struct termio));
+}
+
+#ifdef TCGETS2
+__weak int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2));
+}
+__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2));
+}
+__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+
+#else
+
+__weak int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+__weak int kernel_termios_to_user_termios(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#endif /* TCGETS2 */
+
/**
* set_termios - set termios values for a tty
* @tty: terminal device
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 393518a24cfe..784e46a0a3b1 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -14,8 +14,6 @@
void tty_lock(struct tty_struct *tty)
{
- if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
- return;
tty_kref_get(tty);
mutex_lock(&tty->legacy_mutex);
}
@@ -25,8 +23,6 @@ int tty_lock_interruptible(struct tty_struct *tty)
{
int ret;
- if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
- return -EIO;
tty_kref_get(tty);
ret = mutex_lock_interruptible(&tty->legacy_mutex);
if (ret)
@@ -36,8 +32,6 @@ int tty_lock_interruptible(struct tty_struct *tty)
void tty_unlock(struct tty_struct *tty)
{
- if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
- return;
mutex_unlock(&tty->legacy_mutex);
tty_kref_put(tty);
}
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index e11383ae1e7e..34ba6e54789a 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -11,6 +11,7 @@
#include <linux/sysfs.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/termios_internal.h>
#include <asm/vio.h>
#include <asm/ldc.h>
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ae9c926acd6f..981d2bfcf9a5 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -154,10 +154,10 @@ static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
+static void unblank_screen(void);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
-static int printable; /* Is console ready for printing? */
int default_utf8 = true;
module_param(default_utf8, int, S_IRUGO | S_IWUSR);
int global_cursor_default = -1;
@@ -3084,9 +3084,9 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
ushort start_x, cnt;
int kmsg_console;
- /* console busy or not yet initialized */
- if (!printable)
- return;
+ WARN_CONSOLE_UNLOCKED();
+
+ /* this protects against concurrent oops only */
if (!spin_trylock(&printing_lock))
return;
@@ -3537,7 +3537,6 @@ static int __init con_init(void)
pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
display_desc, vc->vc_cols, vc->vc_rows);
- printable = 1;
console_unlock();
@@ -4452,7 +4451,7 @@ EXPORT_SYMBOL(do_unblank_screen);
* call it with 1 as an argument and so force a mode restore... that may kill
* X or at least garbage the screen but would also make the Oops visible...
*/
-void unblank_screen(void)
+static void unblank_screen(void)
{
do_unblank_screen(0);
}
@@ -4662,9 +4661,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4691,9 +4692,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 0a088b47d557..53aea56d1de1 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -225,12 +225,13 @@ static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
unsigned int wb_enable;
ssize_t res;
- if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+ if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
+ && ufshcd_enable_wb_if_scaling_up(hba))) {
/*
* If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
* on/off will be done while clock scaling up/down.
*/
- dev_warn(dev, "To control WB through wb_on is not allowed!\n");
+ dev_warn(dev, "It is not allowed to configure WB!\n");
return -EOPNOTSUPP;
}
@@ -254,6 +255,49 @@ out:
return res < 0 ? res : count;
}
+static ssize_t enable_wb_buf_flush_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
+}
+
+static ssize_t enable_wb_buf_flush_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int enable_wb_buf_flush;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
+ dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &enable_wb_buf_flush))
+ return -EINVAL;
+
+ if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ ufshcd_rpm_get_sync(hba);
+ res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
+ ufshcd_rpm_put_sync(hba);
+
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -262,6 +306,7 @@ static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
+static DEVICE_ATTR_RW(enable_wb_buf_flush);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -272,6 +317,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
&dev_attr_wb_on.attr,
+ &dev_attr_enable_wb_buf_flush.attr,
NULL
};
@@ -279,6 +325,40 @@ static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
+static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
+}
+
+static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
+}
+
+static DEVICE_ATTR_RO(clock_scaling);
+static DEVICE_ATTR_RO(write_booster);
+
+/*
+ * See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
+ * group.
+ */
+static struct attribute *ufs_sysfs_capabilities_attrs[] = {
+ &dev_attr_clock_scaling.attr,
+ &dev_attr_write_booster.attr,
+ NULL
+};
+
+static const struct attribute_group ufs_sysfs_capabilities_group = {
+ .name = "capabilities",
+ .attrs = ufs_sysfs_capabilities_attrs,
+};
+
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1134,6 +1214,7 @@ static const struct attribute_group ufs_sysfs_attributes_group = {
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
+ &ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 8f67db202d7b..f68ca33f6ac7 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -26,6 +26,12 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
return 0;
}
+static inline bool ufshcd_is_wb_buf_flush_allowed(struct ufs_hba *hba)
+{
+ return ufshcd_is_wb_allowed(hba) &&
+ !(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL);
+}
+
#ifdef CONFIG_SCSI_UFS_HWMON
void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
void ufs_hwmon_remove(struct ufs_hba *hba);
@@ -36,6 +42,11 @@ static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
#endif
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode,
+ enum desc_idn idn, u8 index,
+ u8 selector,
+ u8 *desc_buf, int *buf_len);
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a202d7d5240d..7256e6c43ca6 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/sched/clock.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
@@ -265,8 +266,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -286,16 +287,17 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
-static inline void ufshcd_wb_config(struct ufs_hba *hba)
+static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
return;
ufshcd_wb_toggle(hba, true);
- ufshcd_wb_toggle_flush_during_h8(hba, true);
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, true);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
+
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -457,7 +459,7 @@ static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- e->val[p], ktime_to_us(e->tstamp[p]));
+ e->val[p], div_u64(e->tstamp[p], 1000));
found = true;
}
@@ -502,9 +504,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, ktime_to_us(lrbp->issue_time_stamp));
+ tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, ktime_to_us(lrbp->compl_time_stamp));
+ tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -566,10 +568,10 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev,
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- ktime_to_us(hba->ufs_stats.last_intr_ts),
+ div_u64(hba->ufs_stats.last_intr_ts, 1000),
hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
@@ -1298,9 +1300,11 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
+ if (ufshcd_enable_wb_if_scaling_up(hba)) {
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
+ ufshcd_wb_toggle(hba, scale_up);
+ }
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, is_writelock);
@@ -2140,7 +2144,9 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
+ lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
+ lrbp->compl_time_stamp_local_clock = 0;
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -2701,9 +2707,9 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
* Associate the UFS controller queue with the default and poll HCTX types.
* Initialize the mq_map[] arrays.
*/
-static int ufshcd_map_queues(struct Scsi_Host *shost)
+static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i, ret;
+ int i;
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -2720,11 +2726,8 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
WARN_ON_ONCE(true);
}
map->queue_offset = 0;
- ret = blk_mq_map_queues(map);
- WARN_ON_ONCE(ret);
+ blk_mq_map_queues(map);
}
-
- return 0;
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
@@ -4222,7 +4225,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
hba->ufs_stats.hibern8_exit_cnt++;
}
@@ -4724,7 +4727,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
- e->tstamp[e->pos] = ktime_get();
+ e->tstamp[e->pos] = local_clock();
e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
@@ -5357,6 +5360,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
lrbp->compl_time_stamp = ktime_get();
+ lrbp->compl_time_stamp_local_clock = local_clock();
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@@ -5752,60 +5756,60 @@ int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
{
int ret;
- if (!ufshcd_is_wb_allowed(hba))
- return 0;
-
- if (!(enable ^ hba->dev_info.wb_enabled))
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_enabled == enable)
return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
if (ret) {
- dev_err(hba->dev, "%s Write Booster %s failed %d\n",
- __func__, enable ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return ret;
}
hba->dev_info.wb_enabled = enable;
- dev_dbg(hba->dev, "%s Write Booster %s\n",
+ dev_dbg(hba->dev, "%s: Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
}
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
+ bool enable)
{
int ret;
- ret = __ufshcd_wb_toggle(hba, set,
+ ret = __ufshcd_wb_toggle(hba, enable,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
if (ret) {
- dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
- __func__, set ? "enable" : "disable", ret);
+ dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
return;
}
- dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
- __func__, set ? "enabled" : "disabled");
+ dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
+ __func__, enable ? "enabled" : "disabled");
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
{
int ret;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_buf_flush_enabled == enable)
- return;
+ return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
if (ret) {
- dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
- enable ? "enable" : "disable", ret);
- return;
+ dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
+ __func__, enable ? "enabling" : "disabling", ret);
+ return ret;
}
hba->dev_info.wb_buf_flush_enabled = enable;
-
- dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
+ dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
__func__, enable ? "enabled" : "disabled");
+
+ return ret;
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5820,7 +5824,7 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
index, 0, &cur_buf);
if (ret) {
- dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -5836,10 +5840,10 @@ static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
static void ufshcd_wb_force_disable(struct ufs_hba *hba)
{
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, false);
+ if (ufshcd_is_wb_buf_flush_allowed(hba))
+ ufshcd_wb_toggle_buf_flush(hba, false);
- ufshcd_wb_toggle_flush_during_h8(hba, false);
+ ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
ufshcd_wb_toggle(hba, false);
hba->caps &= ~UFSHCD_CAP_WB_EN;
@@ -5905,7 +5909,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
index, 0, &avail_buf);
if (ret) {
- dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
@@ -6645,7 +6649,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = ktime_get();
+ hba->ufs_stats.last_intr_ts = local_clock();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -8236,7 +8240,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
*/
ufshcd_set_active_icc_lvl(hba);
- ufshcd_wb_config(hba);
+ /* Enable UFS Write Booster if supported */
+ ufshcd_configure_wb(hba);
+
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
/* Enable Auto-Hibernate if configured */
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index a1a7a1175a5a..3d69a81c5b17 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -613,14 +613,17 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
srgn->srgn_state = HPB_SRGN_VALID;
}
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
ufshpb_put_req(umap_req->hpb, umap_req);
+ return RQ_END_IO_NONE;
}
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
+static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
+ blk_status_t error)
{
struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
struct ufshpb_lu *hpb = map_req->hpb;
@@ -636,6 +639,7 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ufshpb_put_map_req(map_req->hpb, map_req);
+ return RQ_END_IO_NONE;
}
static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
diff --git a/drivers/ufs/host/ufs-mediatek-trace.h b/drivers/ufs/host/ufs-mediatek-trace.h
index 7e010848dc99..b5f2ec314074 100644
--- a/drivers/ufs/host/ufs-mediatek-trace.h
+++ b/drivers/ufs/host/ufs-mediatek-trace.h
@@ -24,9 +24,32 @@ TRACE_EVENT(ufs_mtk_event,
__entry->data = data;
),
- TP_printk("ufs:event=%u data=%u",
+ TP_printk("ufs: event=%u data=%u",
__entry->type, __entry->data)
- );
+);
+
+TRACE_EVENT(ufs_mtk_clk_scale,
+ TP_PROTO(const char *name, bool scale_up, unsigned long clk_rate),
+ TP_ARGS(name, scale_up, clk_rate),
+
+ TP_STRUCT__entry(
+ __field(const char*, name)
+ __field(bool, scale_up)
+ __field(unsigned long, clk_rate)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->scale_up = scale_up;
+ __entry->clk_rate = clk_rate;
+ ),
+
+ TP_printk("ufs: clk (%s) scaled %s @ %lu",
+ __entry->name,
+ __entry->scale_up ? "up" : "down",
+ __entry->clk_rate)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index c958279bdd8f..7309f3f87eac 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -19,7 +19,6 @@
#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
-#include <linux/sched/clock.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <ufs/ufshcd.h>
@@ -47,6 +46,44 @@ static const struct of_device_id ufs_mtk_of_match[] = {
{},
};
+/*
+ * Details of UIC Errors
+ */
+static const char *const ufs_uic_err_str[] = {
+ "PHY Adapter Layer",
+ "Data Link Layer",
+ "Network Link Layer",
+ "Transport Link Layer",
+ "DME"
+};
+
+static const char *const ufs_uic_pa_err_str[] = {
+ "PHY error on Lane 0",
+ "PHY error on Lane 1",
+ "PHY error on Lane 2",
+ "PHY error on Lane 3",
+ "Generic PHY Adapter Error. This should be the LINERESET indication"
+};
+
+static const char *const ufs_uic_dl_err_str[] = {
+ "NAC_RECEIVED",
+ "TCx_REPLAY_TIMER_EXPIRED",
+ "AFCx_REQUEST_TIMER_EXPIRED",
+ "FCx_PROTECTION_TIMER_EXPIRED",
+ "CRC_ERROR",
+ "RX_BUFFER_OVERFLOW",
+ "MAX_FRAME_LENGTH_EXCEEDED",
+ "WRONG_SEQUENCE_NUMBER",
+ "AFC_FRAME_SYNTAX_ERROR",
+ "NAC_FRAME_SYNTAX_ERROR",
+ "EOF_SYNTAX_ERROR",
+ "FRAME_SYNTAX_ERROR",
+ "BAD_CTRL_SYMBOL_TYPE",
+ "PA_INIT_ERROR",
+ "PA_ERROR_IND_RECEIVED",
+ "PA_INIT"
+};
+
static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -598,6 +635,12 @@ static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
boost ? 0 : PM_QOS_DEFAULT_VALUE);
}
+static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
+{
+ ufs_mtk_boost_crypt(hba, scale_up);
+ ufs_mtk_boost_pm_qos(hba, scale_up);
+}
+
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -605,11 +648,11 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
if (on) {
phy_power_on(host->mphy);
ufs_mtk_setup_ref_clk(hba, on);
- ufs_mtk_boost_crypt(hba, on);
- ufs_mtk_boost_pm_qos(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
} else {
- ufs_mtk_boost_pm_qos(hba, on);
- ufs_mtk_boost_crypt(hba, on);
+ if (!ufshcd_is_clkscaling_supported(hba))
+ ufs_mtk_scale_perf(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
}
@@ -695,6 +738,46 @@ static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
return hba->ufs_version;
}
+/**
+ * ufs_mtk_init_clocks - Init mtk driver private clocks
+ *
+ * @hba: per adapter instance
+ */
+static void ufs_mtk_init_clocks(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct list_head *head = &hba->clk_list_head;
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki, *clki_tmp;
+
+ /*
+ * Find private clocks and store them in struct ufs_mtk_clk.
+ * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
+ * being switched on/off in clock gating.
+ */
+ list_for_each_entry_safe(clki, clki_tmp, head, list) {
+ if (!strcmp(clki->name, "ufs_sel")) {
+ host->mclk.ufs_sel_clki = clki;
+ } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
+ host->mclk.ufs_sel_max_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
+ host->mclk.ufs_sel_min_clki = clki;
+ clk_disable_unprepare(clki->clk);
+ list_del(&clki->list);
+ }
+ }
+
+ if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
+ !mclk->ufs_sel_min_clki) {
+ hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
+ dev_info(hba->dev,
+ "%s: Clk-scaling not ready. Feature disabled.",
+ __func__);
+ }
+}
+
#define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
{
@@ -815,12 +898,18 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Enable clk scaling*/
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ ufs_mtk_init_clocks(hba);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -833,6 +922,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
+ /* Initialize pm-qos request */
+ cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
+ host->pm_qos_init = true;
+
goto out;
out_variant_clear:
@@ -1247,13 +1340,16 @@ fail:
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
{
- ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
+ /* Dump ufshci register 0x140 ~ 0x14C */
+ ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
+ "XOUFS Ctrl (0x140): ");
ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
+ /* Dump ufshci register 0x2200 ~ 0x22AC */
ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
- "MPHY Ctrl ");
+ "MPHY Ctrl (0x2200): ");
/* Direct debugging information to REG_MTK_PROBE */
ufs_mtk_dbg_sel(hba);
@@ -1310,8 +1406,101 @@ static void ufs_mtk_event_notify(struct ufs_hba *hba,
enum ufs_event_type evt, void *data)
{
unsigned int val = *(u32 *)data;
+ unsigned long reg;
+ u8 bit;
trace_ufs_mtk_event(evt, val);
+
+ /* Print details of UIC Errors */
+ if (evt <= UFS_EVT_DME_ERR) {
+ dev_info(hba->dev,
+ "Host UIC Error Code (%s): %08x\n",
+ ufs_uic_err_str[evt], val);
+ reg = val;
+ }
+
+ if (evt == UFS_EVT_PA_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
+ }
+
+ if (evt == UFS_EVT_DL_ERR) {
+ for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
+ dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
+ }
+}
+
+static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_simple_ondemand_data *data)
+{
+ /* Customize min gear in clk scaling */
+ hba->clk_scaling.min_gear = UFS_HS_G4;
+
+ hba->vps->devfreq_profile.polling_ms = 200;
+ hba->vps->ondemand_data.upthreshold = 50;
+ hba->vps->ondemand_data.downdifferential = 20;
+}
+
+/**
+ * ufs_mtk_clk_scale - Internal clk scaling operation
+ *
+ * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
+ * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
+ * Max and min clocks rate of ufs_sel defined in dts should match rate of
+ * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
+ * This prevent changing rate of pll clock that is shared between modules.
+ *
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scaling down
+ */
+static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_clk *mclk = &host->mclk;
+ struct ufs_clk_info *clki = mclk->ufs_sel_clki;
+ int ret = 0;
+
+ ret = clk_prepare_enable(clki->clk);
+ if (ret) {
+ dev_info(hba->dev,
+ "clk_prepare_enable() fail, ret: %d\n", ret);
+ return;
+ }
+
+ if (scale_up) {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
+ clki->curr_freq = clki->max_freq;
+ } else {
+ ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
+ clki->curr_freq = clki->min_freq;
+ }
+
+ if (ret) {
+ dev_info(hba->dev,
+ "Failed to set ufs_sel_clki, ret: %d\n", ret);
+ }
+
+ clk_disable_unprepare(clki->clk);
+
+ trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
+}
+
+static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ enum ufs_notify_change_status status)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return 0;
+
+ if (status == PRE_CHANGE) {
+ /* Switch parent before clk_set_rate() */
+ ufs_mtk_clk_scale(hba, scale_up);
+ } else {
+ /* Request interrupt latency QoS accordingly */
+ ufs_mtk_scale_perf(hba, scale_up);
+ }
+
+ return 0;
}
/*
@@ -1335,6 +1524,8 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.dbg_register_dump = ufs_mtk_dbg_register_dump,
.device_reset = ufs_mtk_device_reset,
.event_notify = ufs_mtk_event_notify,
+ .config_scaling_param = ufs_mtk_config_scaling_param,
+ .clk_scale_notify = ufs_mtk_clk_scale_notify,
};
/**
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index aa26d415527b..2fc6d7b87694 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -124,6 +124,12 @@ struct ufs_mtk_crypt_cfg {
int vcore_volt;
};
+struct ufs_mtk_clk {
+ struct ufs_clk_info *ufs_sel_clki; /* Mux */
+ struct ufs_clk_info *ufs_sel_max_clki; /* Max src */
+ struct ufs_clk_info *ufs_sel_min_clki; /* Min src */
+};
+
struct ufs_mtk_hw_ver {
u8 step;
u8 minor;
@@ -139,6 +145,7 @@ struct ufs_mtk_host {
struct reset_control *crypto_reset;
struct ufs_hba *hba;
struct ufs_mtk_crypt_cfg *crypt;
+ struct ufs_mtk_clk mclk;
struct ufs_mtk_hw_ver hw_ver;
enum ufs_mtk_host_caps caps;
bool mphy_powered_on;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 473fad83701e..8ad1415e10b6 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -846,7 +846,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_CRYPTO;
diff --git a/drivers/uio/uio_dfl.c b/drivers/uio/uio_dfl.c
index 8f39cc8bb034..69e93f3e7faf 100644
--- a/drivers/uio/uio_dfl.c
+++ b/drivers/uio/uio_dfl.c
@@ -46,10 +46,12 @@ static int uio_dfl_probe(struct dfl_device *ddev)
#define FME_FEATURE_ID_ETH_GROUP 0x10
#define FME_FEATURE_ID_HSSI_SUBSYS 0x15
+#define PORT_FEATURE_ID_IOPLL_USRCLK 0x14
static const struct dfl_device_id uio_dfl_ids[] = {
{ FME_ID, FME_FEATURE_ID_ETH_GROUP },
{ FME_ID, FME_FEATURE_ID_HSSI_SUBSYS },
+ { PORT_ID, PORT_FEATURE_ID_IOPLL_USRCLK },
{ }
};
MODULE_DEVICE_TABLE(dfl, uio_dfl_ids);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 362217189ef3..1cdb8758ae01 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1026,7 +1026,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
/* public fields */
instance->driver = driver;
- strlcpy(instance->driver_name, driver->driver_name,
+ strscpy(instance->driver_name, driver->driver_name,
sizeof(instance->driver_name));
instance->usb_dev = usb_dev;
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d21b69997e75..5adcb349718c 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1530,7 +1530,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
- le32_to_cpu(trb->control) & TRB_SMM)
+ le32_to_cpu(trb->control) & TRB_SMM &&
+ le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
@@ -1690,6 +1691,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index dc068e940ed5..2bc5d094548b 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -110,8 +110,6 @@ static int cdns3_plat_probe(struct platform_device *pdev)
cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
if (cdns->wakeup_irq == -EPROBE_DEFER)
return cdns->wakeup_irq;
- else if (cdns->wakeup_irq == 0)
- return -EINVAL;
if (cdns->wakeup_irq < 0) {
dev_dbg(dev, "couldn't get wakeup irq\n");
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 661818e8fed6..c815824a0b2d 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -34,26 +34,26 @@ config USB_CHIPIDEA_HOST
ChipIdea driver.
config USB_CHIPIDEA_PCI
- tristate "Enable PCI glue driver" if EMBEDDED
+ tristate "Enable PCI glue driver" if EXPERT
depends on USB_PCI
depends on NOP_USB_XCEIV
default USB_CHIPIDEA
config USB_CHIPIDEA_MSM
- tristate "Enable MSM hsusb glue driver" if EMBEDDED
+ tristate "Enable MSM hsusb glue driver" if EXPERT
default USB_CHIPIDEA
config USB_CHIPIDEA_IMX
- tristate "Enable i.MX USB glue driver" if EMBEDDED
+ tristate "Enable i.MX USB glue driver" if EXPERT
depends on OF
default USB_CHIPIDEA
config USB_CHIPIDEA_GENERIC
- tristate "Enable generic USB2 glue driver" if EMBEDDED
+ tristate "Enable generic USB2 glue driver" if EXPERT
default USB_CHIPIDEA
config USB_CHIPIDEA_TEGRA
- tristate "Enable Tegra USB glue driver" if EMBEDDED
+ tristate "Enable Tegra USB glue driver" if EXPERT
depends on OF
default USB_CHIPIDEA
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 89e1d82d739b..dc86b12060b5 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -30,6 +30,7 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_PHY_VBUS_CONTROL,
};
static const struct ci_hdrc_platform_data ci_zevio_pdata = {
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index bdc3885c0d49..bc3634a54c6b 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -63,6 +63,13 @@ static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
priv->enabled = enable;
}
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL) {
+ if (enable)
+ usb_phy_vbus_on(ci->usb_phy);
+ else
+ usb_phy_vbus_off(ci->usb_phy);
+ }
+
if (enable && (ci->platdata->phy_mode == USBPHY_INTERFACE_MODE_HSIC)) {
/*
* Marvell 28nm HSIC PHY requires forcing the port to HS mode.
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 61b157b9c662..ada78daba6df 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -471,6 +471,10 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
return;
}
}
+
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
+ usb_phy_vbus_on(ci->usb_phy);
+
/* Disable data pulse irq */
hw_write_otgsc(ci, OTGSC_DPIE, 0);
@@ -480,6 +484,9 @@ static void ci_otg_drv_vbus(struct otg_fsm *fsm, int on)
if (ci->platdata->reg_vbus)
regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->flags & CI_HDRC_PHY_VBUS_CONTROL)
+ usb_phy_vbus_off(ci->usb_phy);
+
fsm->a_bus_drop = 1;
fsm->a_bus_req = 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 483bcb1213f7..36bf051b345b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,7 +51,7 @@ static DEFINE_IDR(acm_minors);
static DEFINE_MUTEX(acm_minors_lock);
static void acm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old);
+ const struct ktermios *termios_old);
/*
* acm_minors accessors
@@ -1049,7 +1049,7 @@ static int acm_tty_ioctl(struct tty_struct *tty,
}
static void acm_tty_set_termios(struct tty_struct *tty,
- struct ktermios *termios_old)
+ const struct ktermios *termios_old)
{
struct acm *acm = tty->driver_data;
struct ktermios *termios = &tty->termios;
@@ -1810,6 +1810,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index eebe782380fb..1f0951be15ab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -958,7 +958,7 @@ static void wdm_wwan_rx(struct wdm_device *desc, int length)
if (!skb)
return;
- memcpy(skb_put(skb, length), desc->inbuf, length);
+ skb_put_data(skb, desc->inbuf, length);
wwan_port_rx(port, skb);
/* inbuf has been copied, it is safe to check for outstanding data */
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
index 075f6b1b2a1a..f204cec8d380 100644
--- a/drivers/usb/common/debug.c
+++ b/drivers/usb/common/debug.c
@@ -208,30 +208,28 @@ static void usb_decode_set_isoch_delay(__u8 wValue, char *str, size_t size)
snprintf(str, size, "Set Isochronous Delay(Delay = %d ns)", wValue);
}
-/**
- * usb_decode_ctrl - Returns human readable representation of control request.
- * @str: buffer to return a human-readable representation of control request.
- * This buffer should have about 200 bytes.
- * @size: size of str buffer.
- * @bRequestType: matches the USB bmRequestType field
- * @bRequest: matches the USB bRequest field
- * @wValue: matches the USB wValue field (CPU byte order)
- * @wIndex: matches the USB wIndex field (CPU byte order)
- * @wLength: matches the USB wLength field (CPU byte order)
- *
- * Function returns decoded, formatted and human-readable description of
- * control request packet.
- *
- * The usage scenario for this is for tracepoints, so function as a return
- * use the same value as in parameters. This approach allows to use this
- * function in TP_printk
- *
- * Important: wValue, wIndex, wLength parameters before invoking this function
- * should be processed by le16_to_cpu macro.
- */
-const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
- __u8 bRequest, __u16 wValue, __u16 wIndex,
- __u16 wLength)
+static void usb_decode_ctrl_generic(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ u8 recip = bRequestType & USB_RECIP_MASK;
+ u8 type = bRequestType & USB_TYPE_MASK;
+
+ snprintf(str, size,
+ "Type=%s Recipient=%s Dir=%s bRequest=%u wValue=%u wIndex=%u wLength=%u",
+ (type == USB_TYPE_STANDARD) ? "Standard" :
+ (type == USB_TYPE_VENDOR) ? "Vendor" :
+ (type == USB_TYPE_CLASS) ? "Class" : "Unknown",
+ (recip == USB_RECIP_DEVICE) ? "Device" :
+ (recip == USB_RECIP_INTERFACE) ? "Interface" :
+ (recip == USB_RECIP_ENDPOINT) ? "Endpoint" : "Unknown",
+ (bRequestType & USB_DIR_IN) ? "IN" : "OUT",
+ bRequest, wValue, wIndex, wLength);
+}
+
+static void usb_decode_ctrl_standard(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
{
switch (bRequest) {
case USB_REQ_GET_STATUS:
@@ -272,14 +270,48 @@ const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
usb_decode_set_isoch_delay(wValue, str, size);
break;
default:
- snprintf(str, size, "%02x %02x %02x %02x %02x %02x %02x %02x",
- bRequestType, bRequest,
- (u8)(cpu_to_le16(wValue) & 0xff),
- (u8)(cpu_to_le16(wValue) >> 8),
- (u8)(cpu_to_le16(wIndex) & 0xff),
- (u8)(cpu_to_le16(wIndex) >> 8),
- (u8)(cpu_to_le16(wLength) & 0xff),
- (u8)(cpu_to_le16(wLength) >> 8));
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ }
+}
+
+/**
+ * usb_decode_ctrl - Returns human readable representation of control request.
+ * @str: buffer to return a human-readable representation of control request.
+ * This buffer should have about 200 bytes.
+ * @size: size of str buffer.
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field (CPU byte order)
+ * @wIndex: matches the USB wIndex field (CPU byte order)
+ * @wLength: matches the USB wLength field (CPU byte order)
+ *
+ * Function returns decoded, formatted and human-readable description of
+ * control request packet.
+ *
+ * The usage scenario for this is for tracepoints, so function as a return
+ * use the same value as in parameters. This approach allows to use this
+ * function in TP_printk
+ *
+ * Important: wValue, wIndex, wLength parameters before invoking this function
+ * should be processed by le16_to_cpu macro.
+ */
+const char *usb_decode_ctrl(char *str, size_t size, __u8 bRequestType,
+ __u8 bRequest, __u16 wValue, __u16 wIndex,
+ __u16 wLength)
+{
+ switch (bRequestType & USB_TYPE_MASK) {
+ case USB_TYPE_STANDARD:
+ usb_decode_ctrl_standard(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
+ case USB_TYPE_VENDOR:
+ case USB_TYPE_CLASS:
+ default:
+ usb_decode_ctrl_generic(str, size, bRequestType, bRequest,
+ wValue, wIndex, wLength);
+ break;
}
return str;
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0a4f441aff8f..d7c8461976ce 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -233,7 +233,7 @@ err:
return 0;
}
-static int ulpi_regs_read(struct seq_file *seq, void *data)
+static int ulpi_regs_show(struct seq_file *seq, void *data)
{
struct ulpi *ulpi = seq->private;
@@ -269,21 +269,7 @@ static int ulpi_regs_read(struct seq_file *seq, void *data)
return 0;
}
-
-static int ulpi_regs_open(struct inode *inode, struct file *f)
-{
- struct ulpi *ulpi = inode->i_private;
-
- return single_open(f, ulpi_regs_read, ulpi);
-}
-
-static const struct file_operations ulpi_regs_ops = {
- .owner = THIS_MODULE,
- .open = ulpi_regs_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
+DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
@@ -316,7 +302,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
}
root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
- debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_ops);
+ debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c
index b39c9f1c375d..e20874caba36 100644
--- a/drivers/usb/common/usb-conn-gpio.c
+++ b/drivers/usb/common/usb-conn-gpio.c
@@ -208,10 +208,8 @@ static int usb_conn_probe(struct platform_device *pdev)
if (PTR_ERR(info->vbus) == -ENODEV)
info->vbus = NULL;
- if (IS_ERR(info->vbus)) {
- ret = PTR_ERR(info->vbus);
- return dev_err_probe(dev, ret, "failed to get vbus :%d\n", ret);
- }
+ if (IS_ERR(info->vbus))
+ return dev_err_probe(dev, PTR_ERR(info->vbus), "failed to get vbus\n");
info->role_sw = usb_role_switch_get(dev);
if (IS_ERR(info->role_sw))
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index b5b85bf80329..837f3e57f580 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,7 +1434,7 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
if (!intf || !intf->dev.driver)
ret = -ENODATA;
else {
- strlcpy(gd.driver, intf->dev.driver->name,
+ strscpy(gd.driver, intf->dev.driver->name,
sizeof(gd.driver));
ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 482dae72ef1c..9b77f49b3560 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -157,7 +157,6 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
/**
* usb_hcd_pci_probe - initialize PCI-based HCDs
* @dev: USB Host Controller being probed
- * @id: pci hotplug id connecting controller to HCD framework
* @driver: USB HC driver handle
*
* Context: task context, might sleep
@@ -170,8 +169,7 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
*
* Return: 0 if successful.
*/
-int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
- const struct hc_driver *driver)
+int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver)
{
struct usb_hcd *hcd;
int retval;
@@ -180,9 +178,6 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
if (usb_disabled())
return -ENODEV;
- if (!id)
- return -EINVAL;
-
if (!driver)
return -EINVAL;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 94b305bbd621..faeaace0d197 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1474,7 +1474,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
urb->sg,
urb->num_sgs,
dir);
- if (n <= 0)
+ if (!n)
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
@@ -2158,21 +2158,14 @@ static struct urb *request_single_step_set_feature_urb(
{
struct urb *urb;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- struct usb_host_endpoint *ep;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return NULL;
urb->pipe = usb_rcvctrlpipe(udev, 0);
- ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
- [usb_pipeendpoint(urb->pipe)];
- if (!ep) {
- usb_free_urb(urb);
- return NULL;
- }
- urb->ep = ep;
+ urb->ep = &udev->ep0;
urb->dev = udev;
urb->setup_packet = (void *)dr;
urb->transfer_buffer = buf;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2633acde7ac1..bbab424b0d55 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6038,6 +6038,11 @@ re_enumerate:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_reset() or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -6071,6 +6076,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6135,6 +6144,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f99a65a64588..0722d2131305 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -388,6 +388,15 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Kingston DataTraveler 3.0 */
{ USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+ /* NVIDIA Jetson devices in Force Recovery mode */
+ { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
@@ -437,6 +446,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo ThinkPad OneLink+ Dock twin hub controllers (VIA Labs VL812) */
+ { USB_DEVICE(0x17ef, 0x1018), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x17ef, 0x1019), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
{ USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index dc4fc72ab1b6..5635e4d7ec88 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -3,36 +3,6 @@
* core.c - DesignWare HS OTG Controller common routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 0683852e47e4..40cf2880d7e5 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -3,36 +3,6 @@
* core.h - DesignWare HS OTG Controller common declarations
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DWC2_CORE_H__
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index a5c52b237e72..158ede753854 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -3,36 +3,6 @@
* core_intr.c - DesignWare HS OTG Controller common interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index aaf7b9fc4d34..657f1f659ffa 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3,36 +3,6 @@
* hcd.c - DesignWare HS OTG Controller host-mode routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index ea02ee63ac6d..b7254d94fdc3 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -3,37 +3,8 @@
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#ifndef __DWC2_HCD_H__
#define __DWC2_HCD_H__
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index a858b5f9c1d6..6b4d825e97a2 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -3,36 +3,6 @@
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index d5f4ec1b73b1..c9740caa5974 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -3,36 +3,6 @@
* hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 24beff610cf2..0a1145592fc7 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -3,36 +3,6 @@
* hcd_queue.c - DesignWare HS OTG Controller host queuing routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 6b16fbf98bc6..13abdd5f6752 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -3,36 +3,6 @@
* hw.h - DesignWare HS OTG Controller hardware definitions
*
* Copyright 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __DWC2_HW_H__
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index fdb8a42fff86..8eab5f38b110 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -1,36 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2004-2016 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index a93559b4ecdb..b7306ed8be4c 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -3,36 +3,6 @@
* pci.c - DesignWare HS OTG Controller PCI driver
*
* Copyright (C) 2004-2013 Synopsys, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8ba87df7abe..ec4ace0107f5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -3,36 +3,6 @@
* platform.c - DesignWare HS OTG Controller platform driver
*
* Copyright (C) Matthijs Kooijman <matthijs@stdin.nl>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
@@ -154,9 +124,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -188,9 +158,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c5c238ab3083..ea51624461b5 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_graph.h>
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
@@ -86,7 +85,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* mode. If the controller supports DRD but the dr_mode is not
* specified or set to OTG, then set the mode to peripheral.
*/
- if (mode == USB_DR_MODE_OTG && !dwc->edev &&
+ if (mode == USB_DR_MODE_OTG &&
(!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
!device_property_read_bool(dwc->dev, "usb-role-switch")) &&
!DWC3_VER_IS_PRIOR(DWC3, 330A))
@@ -408,6 +407,10 @@ static void dwc3_ref_clk_period(struct dwc3 *dwc)
reg |= FIELD_PREP(DWC3_GFLADJ_REFCLK_FLADJ_MASK, fladj)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR, decr >> 1)
| FIELD_PREP(DWC3_GFLADJ_240MHZDECR_PLS1, decr & 1);
+
+ if (dwc->gfladj_refclk_lpm_sel)
+ reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+
dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
}
@@ -789,7 +792,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
else
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
- if (dwc->dis_u2_freeclk_exists_quirk)
+ if (dwc->dis_u2_freeclk_exists_quirk || dwc->gfladj_refclk_lpm_sel)
reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
@@ -833,15 +836,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
@@ -1179,6 +1183,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
+ /*
+ * When configured in HOST mode, after issuing U3/L2 exit controller
+ * fails to send proper CRC checksum in CRC5 feild. Because of this
+ * behaviour Transaction Error is generated, resulting in reset and
+ * re-enumeration of usb device attached. All the termsel, xcvrsel,
+ * opmode becomes 0 during end of resume. Enabling bit 10 of GUCTL1
+ * will correct this problem. This option is to support certain
+ * legacy ULPI PHYs.
+ */
+ if (dwc->resume_hs_terminations) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_RESUME_OPMODE_HS_HOST;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+ }
+
if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
@@ -1522,8 +1541,12 @@ static void dwc3_get_properties(struct dwc3 *dwc)
"snps,dis-del-phy-power-chg-quirk");
dwc->dis_tx_ipgap_linecheck_quirk = device_property_read_bool(dev,
"snps,dis-tx-ipgap-linecheck-quirk");
+ dwc->resume_hs_terminations = device_property_read_bool(dev,
+ "snps,resume-hs-terminations");
dwc->parkmode_disable_ss_quirk = device_property_read_bool(dev,
"snps,parkmode-disable-ss-quirk");
+ dwc->gfladj_refclk_lpm_sel = device_property_read_bool(dev,
+ "snps,gfladj-refclk-lpm-sel-quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -1667,46 +1690,6 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
}
-static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
-{
- struct device *dev = dwc->dev;
- struct device_node *np_phy;
- struct extcon_dev *edev = NULL;
- const char *name;
-
- if (device_property_read_bool(dev, "extcon"))
- return extcon_get_edev_by_phandle(dev, 0);
-
- /*
- * Device tree platforms should get extcon via phandle.
- * On ACPI platforms, we get the name from a device property.
- * This device property is for kernel internal use only and
- * is expected to be set by the glue code.
- */
- if (device_property_read_string(dev, "linux,extcon-name", &name) == 0)
- return extcon_get_extcon_dev(name);
-
- /*
- * Try to get an extcon device from the USB PHY controller's "port"
- * node. Check if it has the "port" node first, to avoid printing the
- * error message from underlying code, as it's a valid case: extcon
- * device (and "port" node) may be missing in case of "usb-role-switch"
- * or OTG mode.
- */
- np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- if (of_graph_is_present(np_phy)) {
- struct device_node *np_conn;
-
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- of_node_put(np_conn);
- }
- of_node_put(np_phy);
-
- return edev;
-}
-
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1751,15 +1734,11 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- if (!dwc->sysdev_is_parent) {
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
- }
-
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
- if (IS_ERR(dwc->reset))
- return PTR_ERR(dwc->reset);
+ if (IS_ERR(dwc->reset)) {
+ ret = PTR_ERR(dwc->reset);
+ goto put_usb_psy;
+ }
if (dev->of_node) {
/*
@@ -1769,45 +1748,57 @@ static int dwc3_probe(struct platform_device *pdev)
* check for them to retain backwards compatibility.
*/
dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
- if (IS_ERR(dwc->bus_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
- "could not get bus clock\n");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
if (dwc->bus_clk == NULL) {
dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
- if (IS_ERR(dwc->bus_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
- "could not get bus clock\n");
+ if (IS_ERR(dwc->bus_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ goto put_usb_psy;
+ }
}
dwc->ref_clk = devm_clk_get_optional(dev, "ref");
- if (IS_ERR(dwc->ref_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
- "could not get ref clock\n");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
if (dwc->ref_clk == NULL) {
dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
- if (IS_ERR(dwc->ref_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
- "could not get ref clock\n");
+ if (IS_ERR(dwc->ref_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ goto put_usb_psy;
+ }
}
dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
- if (IS_ERR(dwc->susp_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
- "could not get suspend clock\n");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
if (dwc->susp_clk == NULL) {
dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
- if (IS_ERR(dwc->susp_clk))
- return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
- "could not get suspend clock\n");
+ if (IS_ERR(dwc->susp_clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ goto put_usb_psy;
+ }
}
}
ret = reset_control_deassert(dwc->reset);
if (ret)
- return ret;
+ goto put_usb_psy;
ret = dwc3_clk_enable(dwc);
if (ret)
@@ -1821,7 +1812,13 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- device_init_wakeup(&pdev->dev, of_property_read_bool(dev->of_node, "wakeup-source"));
+
+ if (!dwc->sysdev_is_parent &&
+ DWC3_GHWPARAMS0_AWIDTH(dwc->hwparams.hwparams0) == 64) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ goto disable_clks;
+ }
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
@@ -1843,13 +1840,6 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
- dwc->edev = dwc3_get_extcon(dwc);
- if (IS_ERR(dwc->edev)) {
- ret = PTR_ERR(dwc->edev);
- dev_err_probe(dwc->dev, ret, "failed to get extcon\n");
- goto err3;
- }
-
ret = dwc3_get_dr_mode(dwc);
if (ret)
goto err3;
@@ -1879,16 +1869,16 @@ err5:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1908,7 +1898,7 @@ disable_clks:
dwc3_clk_disable(dwc);
assert_reset:
reset_control_assert(dwc->reset);
-
+put_usb_psy:
if (dwc->usb_psy)
power_supply_put(dwc->usb_psy);
@@ -1976,14 +1966,12 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
case DWC3_GCTL_PRTCAP_DEVICE:
if (pm_runtime_suspended(dwc->dev))
break;
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
@@ -2039,12 +2027,10 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
- spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
- spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4fe4287dc934..8f9959ba9fd4 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -263,6 +263,7 @@
#define DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK BIT(26)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
+#define DWC3_GUCTL1_RESUME_OPMODE_HS_HOST BIT(10)
/* Global Status Register */
#define DWC3_GSTS_OTG_IP BIT(10)
@@ -391,6 +392,7 @@
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
#define DWC3_GFLADJ_REFCLK_FLADJ_MASK GENMASK(21, 8)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL BIT(23)
#define DWC3_GFLADJ_240MHZDECR GENMASK(30, 24)
#define DWC3_GFLADJ_240MHZDECR_PLS1 BIT(31)
@@ -1096,6 +1098,8 @@ struct dwc3_scratchpad_array {
* change quirk.
* @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate
* check during HS transmit.
+ * @resume-hs-terminations: Set if we enable quirk for fixing improper crc
+ * generation after resume from suspend.
* @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed
* instances in park mode.
* @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk
@@ -1311,7 +1315,9 @@ struct dwc3 {
unsigned dis_u2_freeclk_exists_quirk:1;
unsigned dis_del_phy_power_chg_quirk:1;
unsigned dis_tx_ipgap_linecheck_quirk:1;
+ unsigned resume_hs_terminations:1;
unsigned parkmode_disable_ss_quirk:1;
+ unsigned gfladj_refclk_lpm_sel:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
@@ -1560,6 +1566,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
u32 param);
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index d223c54115f4..48b44b88dc25 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -278,7 +278,7 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
break;
case DWC3_DEPEVT_XFERINPROGRESS:
scnprintf(str + len, size - len,
- "Transfer In Progress [%d] (%c%c%c)",
+ "Transfer In Progress [%08x] (%c%c%c)",
event->parameters,
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
@@ -286,7 +286,7 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
break;
case DWC3_DEPEVT_XFERNOTREADY:
len += scnprintf(str + len, size - len,
- "Transfer Not Ready [%d]%s",
+ "Transfer Not Ready [%08x]%s",
event->parameters,
status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
" (Active)" : " (Not Active)");
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 039bf241769a..8cad9e7d3368 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -8,6 +8,7 @@
*/
#include <linux/extcon.h>
+#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -438,6 +439,51 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
+{
+ struct device *dev = dwc->dev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
+ const char *name;
+
+ if (device_property_read_bool(dev, "extcon"))
+ return extcon_get_edev_by_phandle(dev, 0);
+
+ /*
+ * Device tree platforms should get extcon via phandle.
+ * On ACPI platforms, we get the name from a device property.
+ * This device property is for kernel internal use only and
+ * is expected to be set by the glue code.
+ */
+ if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) {
+ edev = extcon_get_extcon_dev(name);
+ if (!edev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return edev;
+ }
+
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
+ np_phy = of_parse_phandle(dev->of_node, "phys", 0);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
+
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
+ of_node_put(np_phy);
+
+ return edev;
+}
+
#if IS_ENABLED(CONFIG_USB_ROLE_SWITCH)
#define ROLE_SWITCH 1
static int dwc3_usb_role_switch_set(struct usb_role_switch *sw,
@@ -542,6 +588,10 @@ int dwc3_drd_init(struct dwc3 *dwc)
device_property_read_bool(dwc->dev, "usb-role-switch"))
return dwc3_setup_role_switch(dwc);
+ dwc->edev = dwc3_get_extcon(dwc);
+ if (IS_ERR(dwc->edev))
+ return PTR_ERR(dwc->edev);
+
if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6b018048fe2e..fb14511b1e10 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,10 +40,12 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
-#define PCI_DEVICE_ID_INTEL_ADL 0x465e
-#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
-#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
+#define PCI_DEVICE_ID_INTEL_ADL 0x460e
+#define PCI_DEVICE_ID_INTEL_ADL_PCH 0x51ee
+#define PCI_DEVICE_ID_INTEL_ADLN 0x465e
+#define PCI_DEVICE_ID_INTEL_ADLN_PCH 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPL 0x460e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
@@ -447,15 +449,21 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_PCH),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLN_PCH),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c5e482f53e9d..7c40f3ffc054 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/pm_domain.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
@@ -244,6 +243,7 @@ static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom)
*/
static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
{
+ enum usb_device_speed max_speed;
struct device *dev = qcom->dev;
int ret;
@@ -253,7 +253,7 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
dev_err(dev, "failed to get usb-ddr path: %ld\n",
- PTR_ERR(qcom->icc_path_ddr));
+ PTR_ERR(qcom->icc_path_ddr));
return PTR_ERR(qcom->icc_path_ddr);
}
@@ -264,21 +264,20 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
return PTR_ERR(qcom->icc_path_apps);
}
- if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER ||
- usb_get_maximum_speed(&qcom->dwc3->dev) == USB_SPEED_UNKNOWN)
+ max_speed = usb_get_maximum_speed(&qcom->dwc3->dev);
+ if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) {
ret = icc_set_bw(qcom->icc_path_ddr,
- USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
- else
+ USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW);
+ } else {
ret = icc_set_bw(qcom->icc_path_ddr,
- USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
-
+ USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW);
+ }
if (ret) {
dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret);
return ret;
}
- ret = icc_set_bw(qcom->icc_path_apps,
- APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
+ ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW);
if (ret) {
dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret);
return ret;
@@ -299,11 +298,24 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
icc_put(qcom->icc_path_apps);
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ return dwc->xhci;
+}
+
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
- struct usb_hcd *hcd = platform_get_drvdata(dwc->xhci);
struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
/*
* It is possible to query the speed of all children of
@@ -311,8 +323,11 @@ static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
* currently supports only 1 port per controller. So
* this is sufficient.
*/
+#ifdef CONFIG_USB
udev = usb_hub_find_child(hcd->self.root_hub, 1);
-
+#else
+ udev = NULL;
+#endif
if (!udev)
return USB_SPEED_UNKNOWN;
@@ -387,7 +402,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
-static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
{
u32 val;
int i, ret;
@@ -406,7 +421,11 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
- if (device_may_wakeup(qcom->dev)) {
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
}
@@ -416,7 +435,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
return 0;
}
-static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
{
int ret;
int i;
@@ -424,7 +443,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
if (!qcom->is_suspended)
return 0;
- if (device_may_wakeup(qcom->dev))
+ if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
@@ -458,7 +477,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -757,13 +780,13 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- int ret, i;
- bool ignore_pipe_clk;
- struct generic_pm_domain *genpd;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
@@ -772,8 +795,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- genpd = pd_to_genpd(qcom->dev->pm_domain);
-
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
@@ -881,16 +902,9 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret)
goto interconnect_exit;
- if (device_can_wakeup(&qcom->dwc3->dev)) {
- /*
- * Setting GENPD_FLAG_ALWAYS_ON flag takes care of keeping
- * genpd on in both runtime suspend and system suspend cases.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- device_init_wakeup(&pdev->dev, true);
- } else {
- genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
- }
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
@@ -944,39 +958,45 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- int ret = 0;
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
- ret = dwc3_qcom_suspend(qcom);
- if (!ret)
- qcom->pm_suspended = true;
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = true;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
int ret;
- ret = dwc3_qcom_resume(qcom);
- if (!ret)
- qcom->pm_suspended = false;
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = false;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_suspend(qcom);
+ return dwc3_qcom_suspend(qcom, true);
}
static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_resume(qcom);
+ return dwc3_qcom_resume(qcom, true);
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
@@ -987,10 +1007,6 @@ static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
static const struct of_device_id dwc3_qcom_of_match[] = {
{ .compatible = "qcom,dwc3" },
- { .compatible = "qcom,msm8996-dwc3" },
- { .compatible = "qcom,msm8998-dwc3" },
- { .compatible = "qcom,sdm660-dwc3" },
- { .compatible = "qcom,sdm845-dwc3" },
{ }
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 166b5bde45cb..6c14a79279f9 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
/* Manage SoftReset */
reset_control_deassert(dwc3_data->rstc_rst);
- child = of_get_child_by_name(node, "dwc3");
+ child = of_get_child_by_name(node, "usb");
if (!child) {
dev_err(&pdev->dev, "failed to find dwc3 core node\n");
ret = -ENODEV;
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 67b237c7a76a..8607d4c23283 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -47,6 +47,7 @@ struct dwc3_xlnx {
struct device *dev;
void __iomem *regs;
int (*pltfm_init)(struct dwc3_xlnx *data);
+ struct phy *usb3_phy;
};
static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
@@ -100,13 +101,12 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct gpio_desc *reset_gpio;
- struct phy *usb3_phy;
int ret = 0;
u32 reg;
- usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
- if (IS_ERR(usb3_phy)) {
- ret = PTR_ERR(usb3_phy);
+ priv_data->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(priv_data->usb3_phy)) {
+ ret = PTR_ERR(priv_data->usb3_phy);
dev_err_probe(dev, ret,
"failed to get USB3 PHY\n");
goto err;
@@ -121,7 +121,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
* in use but the usb3-phy entry is missing from the device tree.
* Therefore, skip these operations in this case.
*/
- if (!usb3_phy)
+ if (!priv_data->usb3_phy)
goto skip_usb3_phy;
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
@@ -166,9 +166,9 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
- ret = phy_init(usb3_phy);
+ ret = phy_init(priv_data->usb3_phy);
if (ret < 0) {
- phy_exit(usb3_phy);
+ phy_exit(priv_data->usb3_phy);
goto err;
}
@@ -196,9 +196,9 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
- ret = phy_power_on(usb3_phy);
+ ret = phy_power_on(priv_data->usb3_phy);
if (ret < 0) {
- phy_exit(usb3_phy);
+ phy_exit(priv_data->usb3_phy);
goto err;
}
@@ -322,7 +322,7 @@ static int dwc3_xlnx_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused dwc3_xlnx_suspend_common(struct device *dev)
+static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
@@ -331,7 +331,7 @@ static int __maybe_unused dwc3_xlnx_suspend_common(struct device *dev)
return 0;
}
-static int __maybe_unused dwc3_xlnx_resume_common(struct device *dev)
+static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
{
struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
@@ -346,8 +346,45 @@ static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
return 0;
}
-static UNIVERSAL_DEV_PM_OPS(dwc3_xlnx_dev_pm_ops, dwc3_xlnx_suspend_common,
- dwc3_xlnx_resume_common, dwc3_xlnx_runtime_idle);
+static int __maybe_unused dwc3_xlnx_suspend(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+
+ phy_exit(priv_data->usb3_phy);
+
+ /* Disable the clocks */
+ clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_xlnx_resume(struct device *dev)
+{
+ struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
+ if (ret)
+ return ret;
+
+ ret = phy_init(priv_data->usb3_phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(priv_data->usb3_phy);
+ if (ret < 0) {
+ phy_exit(priv_data->usb3_phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_xlnx_suspend, dwc3_xlnx_resume)
+ SET_RUNTIME_PM_OPS(dwc3_xlnx_runtime_suspend,
+ dwc3_xlnx_runtime_resume, dwc3_xlnx_runtime_idle)
+};
static struct platform_driver dwc3_xlnx_driver = {
.probe = dwc3_xlnx_probe,
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 197af63f8d05..61de693461da 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int ret;
spin_lock_irqsave(&dwc->lock, flags);
- if (!dep->endpoint.desc || !dwc->pullups_connected) {
+ if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
ret = -ESHUTDOWN;
@@ -293,7 +293,10 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
continue;
dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
- dwc3_stop_active_transfer(dwc3_ep, true, true);
+ if (dwc->connected)
+ dwc3_stop_active_transfer(dwc3_ep, true, true);
+ else
+ dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
}
}
@@ -815,7 +818,7 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
int ret = -EINVAL;
u32 len;
- if (!dwc->gadget_driver || !dwc->connected)
+ if (!dwc->gadget_driver || !dwc->softconnect || !dwc->connected)
goto out;
trace_dwc3_ctrl_req(ctrl);
@@ -1118,6 +1121,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
{
switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA:
+ if (!dwc->softconnect || !dwc->connected)
+ return;
/*
* We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index aeeec751c53c..079cd333632e 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -366,7 +366,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
- if (!(cmd & DWC3_DEPCMD_CMDACT)) {
+ if (!(cmd & DWC3_DEPCMD_CMDACT) ||
+ (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
+ !(cmd & DWC3_DEPCMD_CMDIOC))) {
ret = 0;
goto skip_status;
}
@@ -965,29 +967,33 @@ out:
return 0;
}
-static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
+void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
{
struct dwc3_request *req;
dwc3_stop_active_transfer(dep, true, false);
+ /* If endxfer is delayed, avoid unmapping requests */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ return;
+
/* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->pending_list)) {
req = next_request(&dep->pending_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
while (!list_empty(&dep->cancelled_list)) {
req = next_request(&dep->cancelled_list);
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ dwc3_gadget_giveback(dep, req, status);
}
}
@@ -1005,6 +1011,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
+ u32 mask;
trace_dwc3_gadget_ep_disable(dep);
@@ -1022,11 +1029,19 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->endpoint.desc = NULL;
}
- dwc3_remove_requests(dwc, dep);
+ dwc3_remove_requests(dwc, dep, -ECONNRESET);
dep->stream_capable = false;
dep->type = 0;
- dep->flags &= DWC3_EP_TXFIFO_RESIZED;
+ mask = DWC3_EP_TXFIFO_RESIZED;
+ /*
+ * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
+ * set. Do not clear DEP flags, so that the end transfer command will
+ * be reattempted during the next SETUP stage.
+ */
+ if (dep->flags & DWC3_EP_DELAY_STOP)
+ mask |= (DWC3_EP_DELAY_STOP | DWC3_EP_TRANSFER_STARTED);
+ dep->flags &= mask;
return 0;
}
@@ -2340,7 +2355,7 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
if (!dep)
continue;
- dwc3_remove_requests(dwc, dep);
+ dwc3_remove_requests(dwc, dep, -ESHUTDOWN);
}
}
@@ -2440,7 +2455,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
- u32 timeout = 500;
+ u32 timeout = 2000;
if (pm_runtime_suspended(dwc->dev))
return 0;
@@ -2473,6 +2488,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc3_gadget_dctl_write_safe(dwc, reg);
do {
+ usleep_range(1000, 2000);
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
@@ -2501,6 +2517,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
if (dwc->ep0state != EP0_SETUP_PHASE) {
int ret;
+ if (dwc->delayed_status)
+ dwc3_ep0_send_delayed_status(dwc);
+
reinit_completion(&dwc->ep0_in_setup);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2539,9 +2558,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
- if (dwc->pullups_connected == is_on)
- return 0;
-
dwc->softconnect = is_on;
/*
@@ -2566,6 +2582,13 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
+ synchronize_irq(dwc->irq_gadget);
+
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
@@ -2716,6 +2739,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
+ dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -2723,6 +2747,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
}
dep = dwc->eps[1];
+ dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -3566,7 +3591,7 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
* streams are updated, and the device controller will not be
* triggered to generate ERDY to move the next stream data. To
* workaround this and maintain compatibility with various
- * hosts, force to reinitate the stream until the host is ready
+ * hosts, force to reinitiate the stream until the host is ready
* instead of waiting for the host to prime the endpoint.
*/
if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
@@ -3594,11 +3619,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) {
- if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
+ if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return;
/* Handle only EPCMDCMPLT when EP disabled */
- if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+ if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
+ !(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
return;
}
@@ -3693,7 +3719,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* timeout. Delay issuing the End Transfer command until the Setup TRB is
* prepared.
*/
- if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) {
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
dep->flags |= DWC3_EP_DELAY_STOP;
return;
}
@@ -3761,13 +3787,24 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
reg &= ~DWC3_DCTL_INITU2ENA;
dwc3_gadget_dctl_write_safe(dwc, reg);
+ dwc->connected = false;
+
dwc3_disconnect_gadget(dwc);
dwc->gadget->speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
- dwc->connected = false;
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ unsigned int dir;
+
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+ dwc3_ep0_stall_and_restart(dwc);
+ }
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -3865,6 +3902,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
u8 lanes = 1;
u8 speed;
+ if (!dwc->softconnect)
+ return;
+
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
@@ -4127,7 +4167,7 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
unsigned int is_ss = evtinfo & BIT(4);
/*
- * WORKAROUND: DWC3 revison 2.20a with hibernation support
+ * WORKAROUND: DWC3 revision 2.20a with hibernation support
* have a known issue which can cause USB CV TD.9.23 to fail
* randomly.
*
@@ -4505,12 +4545,17 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
+ unsigned long flags;
+
if (!dwc->gadget_driver)
return 0;
dwc3_gadget_run_stop(dwc, false, false);
+
+ spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f56c30cf151e..a7154fe8206d 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,13 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-plat.h"
#include "core.h"
+static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
+ .quirks = XHCI_SKIP_PHY_INIT,
+};
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -92,6 +97,11 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
+ sizeof(dwc3_xhci_plat_priv));
+ if (ret)
+ goto err;
+
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
@@ -135,4 +145,5 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index cb998ba50fea..1975aec8d36d 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -241,7 +241,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
__entry->enqueue = dep->trb_enqueue;
__entry->dequeue = dep->trb_dequeue;
),
- TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
+ TP_printk("%s: trb %p (E%d:D%d) buf %08x%08x size %s%d ctrl %08x sofn %08x (%c%c%c%c:%c%c:%s)",
__get_str(name), __entry->trb, __entry->enqueue,
__entry->dequeue, __entry->bph, __entry->bpl,
({char *s;
@@ -267,6 +267,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
s = "";
} s; }),
DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
+ DWC3_TRB_CTRL_GET_SID_SOFN(__entry->ctrl),
__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e0fa4b186ec6..73dc10a77cde 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2645,10 +2645,10 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
unsigned i = 0;
vla_group(d);
vla_item(d, struct usb_gadget_strings *, stringtabs,
- lang_count + 1);
+ size_add(lang_count, 1));
vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
vla_item(d, struct usb_string, strings,
- lang_count*(needed_count+1));
+ size_mul(lang_count, (needed_count + 1)));
char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
@@ -3700,7 +3700,7 @@ int ffs_name_dev(struct ffs_dev *dev, const char *name)
existing = _ffs_do_find_dev(name);
if (!existing)
- strlcpy(dev->name, name, ARRAY_SIZE(dev->name));
+ strscpy(dev->name, name, ARRAY_SIZE(dev->name));
else if (existing != dev)
ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 925e99f9775c..3abf7f586e2a 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2662,11 +2662,16 @@ static ssize_t forced_eject_store(struct device *dev,
}
static DEVICE_ATTR_RW(nofua);
-/* mode wil be set in fsg_lun_attr_is_visible() */
-static DEVICE_ATTR(ro, 0, ro_show, ro_store);
-static DEVICE_ATTR(file, 0, file_show, file_store);
static DEVICE_ATTR_WO(forced_eject);
+/*
+ * Mode of the ro and file attribute files will be overridden in
+ * fsg_lun_dev_is_visible() depending on if this is a cdrom, or if it is a
+ * removable device.
+ */
+static DEVICE_ATTR_RW(ro);
+static DEVICE_ATTR_RW(file);
+
/****************************** FSG COMMON ******************************/
static void fsg_lun_release(struct device *dev)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index dc8f078f918c..c36bcfa0e9b4 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -450,39 +450,35 @@ struct ndp_parser_opts {
unsigned next_ndp_index;
};
-#define INIT_NDP16_OPTS { \
- .nth_sign = USB_CDC_NCM_NTH16_SIGN, \
- .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \
- .nth_size = sizeof(struct usb_cdc_ncm_nth16), \
- .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \
- .dpe_size = sizeof(struct usb_cdc_ncm_dpe16), \
- .ndplen_align = 4, \
- .dgram_item_len = 1, \
- .block_length = 1, \
- .ndp_index = 1, \
- .reserved1 = 0, \
- .reserved2 = 0, \
- .next_ndp_index = 1, \
- }
-
-
-#define INIT_NDP32_OPTS { \
- .nth_sign = USB_CDC_NCM_NTH32_SIGN, \
- .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \
- .nth_size = sizeof(struct usb_cdc_ncm_nth32), \
- .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \
- .dpe_size = sizeof(struct usb_cdc_ncm_dpe32), \
- .ndplen_align = 8, \
- .dgram_item_len = 2, \
- .block_length = 2, \
- .ndp_index = 2, \
- .reserved1 = 1, \
- .reserved2 = 2, \
- .next_ndp_index = 2, \
- }
+static const struct ndp_parser_opts ndp16_opts = {
+ .nth_sign = USB_CDC_NCM_NTH16_SIGN,
+ .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN,
+ .nth_size = sizeof(struct usb_cdc_ncm_nth16),
+ .ndp_size = sizeof(struct usb_cdc_ncm_ndp16),
+ .dpe_size = sizeof(struct usb_cdc_ncm_dpe16),
+ .ndplen_align = 4,
+ .dgram_item_len = 1,
+ .block_length = 1,
+ .ndp_index = 1,
+ .reserved1 = 0,
+ .reserved2 = 0,
+ .next_ndp_index = 1,
+};
-static const struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS;
-static const struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS;
+static const struct ndp_parser_opts ndp32_opts = {
+ .nth_sign = USB_CDC_NCM_NTH32_SIGN,
+ .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN,
+ .nth_size = sizeof(struct usb_cdc_ncm_nth32),
+ .ndp_size = sizeof(struct usb_cdc_ncm_ndp32),
+ .dpe_size = sizeof(struct usb_cdc_ncm_dpe32),
+ .ndplen_align = 8,
+ .dgram_item_len = 2,
+ .block_length = 2,
+ .ndp_index = 2,
+ .reserved1 = 1,
+ .reserved2 = 2,
+ .next_ndp_index = 2,
+};
static inline void put_ncm(__le16 **p, unsigned size, unsigned val)
{
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index abec5c58f525..a881c69b1f2b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -89,7 +89,7 @@ struct printer_dev {
u8 printer_cdev_open;
wait_queue_head_t wait;
unsigned q_len;
- char *pnp_string; /* We don't own memory! */
+ char **pnp_string; /* We don't own memory! */
struct usb_function function;
};
@@ -1000,16 +1000,16 @@ static int printer_func_setup(struct usb_function *f,
if ((wIndex>>8) != dev->interface)
break;
- if (!dev->pnp_string) {
+ if (!*dev->pnp_string) {
value = 0;
break;
}
- value = strlen(dev->pnp_string);
+ value = strlen(*dev->pnp_string);
buf[0] = (value >> 8) & 0xFF;
buf[1] = value & 0xFF;
- memcpy(buf + 2, dev->pnp_string, value);
+ memcpy(buf + 2, *dev->pnp_string, value);
DBG(dev, "1284 PNP String: %x %s\n", value,
- dev->pnp_string);
+ *dev->pnp_string);
break;
case GET_PORT_STATUS: /* Get Port Status */
@@ -1475,7 +1475,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi)
kref_init(&dev->kref);
++opts->refcnt;
dev->minor = opts->minor;
- dev->pnp_string = opts->pnp_string;
+ dev->pnp_string = &opts->pnp_string;
dev->q_len = opts->q_len;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 8e17ac831be0..658e2e21fdd0 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -2306,7 +2306,7 @@ static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
-static int tcm_init(void)
+static int __init tcm_init(void)
{
int ret;
@@ -2322,7 +2322,7 @@ static int tcm_init(void)
}
module_init(tcm_init);
-static void tcm_exit(void)
+static void __exit tcm_exit(void)
{
target_unregister_template(&usbg_ops);
usb_function_unregister(&tcmusb_func);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 1905a8d8e0c9..08726e4c68a5 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -291,6 +291,12 @@ static struct usb_endpoint_descriptor ss_ep_int_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
+ .bLength = sizeof(ss_ep_int_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .wBytesPerInterval = cpu_to_le16(6),
+};
+
/* Audio Streaming OUT Interface - Alt0 */
static struct usb_interface_descriptor std_as_out_if0_desc = {
.bLength = sizeof std_as_out_if0_desc,
@@ -604,7 +610,8 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
- (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc_comp,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
@@ -800,6 +807,7 @@ static void setup_headers(struct f_uac2_opts *opts,
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -827,6 +835,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
+ ep_int_desc_comp = &ss_ep_int_desc_comp;
}
i = 0;
@@ -855,8 +864,11 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
- if (FUOUT_EN(opts) || FUIN_EN(opts))
+ if (FUOUT_EN(opts) || FUIN_EN(opts)) {
headers[i++] = USBDHDR(ep_int_desc);
+ if (ep_int_desc_comp)
+ headers[i++] = USBDHDR(ep_int_desc_comp);
+ }
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 71669e0e4d00..6e196e06181e 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -421,7 +421,7 @@ uvc_register_video(struct uvc_device *uvc)
int ret;
/* TODO reference counting. */
- memset(&uvc->vdev, 0, sizeof(uvc->video));
+ memset(&uvc->vdev, 0, sizeof(uvc->vdev));
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
uvc->vdev.fops = &uvc_v4l2_fops;
@@ -430,7 +430,7 @@ uvc_register_video(struct uvc_device *uvc)
uvc->vdev.vfl_dir = VFL_DIR_TX;
uvc->vdev.lock = &uvc->video.mutex;
uvc->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
- strlcpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
+ strscpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name));
video_set_drvdata(&uvc->vdev, uvc);
@@ -888,6 +888,7 @@ static void uvc_free(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts,
func_inst);
+ config_item_put(&uvc->header->item);
--opts->refcnt;
kfree(uvc);
}
@@ -897,10 +898,14 @@ static void uvc_function_unbind(struct usb_configuration *c,
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ struct uvc_video *video = &uvc->video;
long wait_ret = 1;
uvcg_info(f, "%s()\n", __func__);
+ if (video->async_wq)
+ destroy_workqueue(video->async_wq);
+
/*
* If we know we're connected via v4l2, then there should be a cleanup
* of the device from userspace either via UVC_EVENT_DISCONNECT or
@@ -941,6 +946,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
struct uvc_device *uvc;
struct f_uvc_opts *opts;
struct uvc_descriptor_header **strm_cls;
+ struct config_item *streaming, *header, *h;
uvc = kzalloc(sizeof(*uvc), GFP_KERNEL);
if (uvc == NULL)
@@ -973,6 +979,28 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->desc.fs_streaming = opts->fs_streaming;
uvc->desc.hs_streaming = opts->hs_streaming;
uvc->desc.ss_streaming = opts->ss_streaming;
+
+ streaming = config_group_find_item(&opts->func_inst.group, "streaming");
+ if (!streaming)
+ goto err_config;
+
+ header = config_group_find_item(to_config_group(streaming), "header");
+ config_item_put(streaming);
+ if (!header)
+ goto err_config;
+
+ h = config_group_find_item(to_config_group(header), "h");
+ config_item_put(header);
+ if (!h)
+ goto err_config;
+
+ uvc->header = to_uvcg_streaming_header(h);
+ if (!uvc->header->linked) {
+ mutex_unlock(&opts->lock);
+ kfree(uvc);
+ return ERR_PTR(-EBUSY);
+ }
+
++opts->refcnt;
mutex_unlock(&opts->lock);
@@ -988,6 +1016,11 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
uvc->func.bind_deactivated = true;
return &uvc->func;
+
+err_config:
+ mutex_unlock(&opts->lock);
+ kfree(uvc);
+ return ERR_PTR(-ENOENT);
}
DECLARE_USB_FUNCTION_INIT(uvc, uvc_alloc_inst, uvc_alloc);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 713efd9aefde..29bf8664bf58 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -869,7 +869,7 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
static inline int rndis_get_nr(void)
{
- return ida_simple_get(&rndis_ida, 0, 0, GFP_KERNEL);
+ return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
}
static inline void rndis_put_nr(int nr)
@@ -1105,7 +1105,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
"used : %s\n"
"state : %s\n"
"medium : 0x%08X\n"
- "speed : %d\n"
+ "speed : %u\n"
"cable : %s\n"
"vendor ID : 0x%08X\n"
"vendor : %s\n",
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 03035dbbe97b..208c6a92780a 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 7887def05dc2..e06022873df1 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -144,10 +144,10 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
{
struct eth_dev *dev = netdev_priv(net);
- strlcpy(p->driver, "g_ether", sizeof(p->driver));
- strlcpy(p->version, UETH__VERSION, sizeof(p->version));
- strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
- strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+ strscpy(p->driver, "g_ether", sizeof(p->driver));
+ strscpy(p->version, UETH__VERSION, sizeof(p->version));
+ strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+ strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
}
/* REVISIT can also support:
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 6f68cbeeee7c..7538279f9817 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1443,7 +1443,7 @@ void gserial_resume(struct gserial *gser)
}
EXPORT_SYMBOL_GPL(gserial_resume);
-static int userial_init(void)
+static int __init userial_init(void)
{
struct tty_driver *driver;
unsigned i;
@@ -1496,7 +1496,7 @@ fail:
}
module_init(userial_init);
-static void userial_cleanup(void)
+static void __exit userial_cleanup(void)
{
tty_unregister_driver(gs_tty_driver);
tty_driver_kref_put(gs_tty_driver);
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index 60ae8b2d3f6a..dd21c251542c 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -158,8 +158,8 @@ size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
snd_pcm_sframes_t frames;
try_again:
- if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
- runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
+ if (runtime->state == SNDRV_PCM_STATE_XRUN ||
+ runtime->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_pcm_kernel_ioctl(substream,
SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 58e383afdd44..40226b1f7e14 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -88,6 +88,7 @@ struct uvc_video {
struct usb_ep *ep;
struct work_struct pump;
+ struct workqueue_struct *async_wq;
/* Frame parameters */
u8 bpp;
@@ -133,6 +134,8 @@ struct uvc_device {
bool func_connected;
wait_queue_head_t func_connected_queue;
+ struct uvcg_streaming_header *header;
+
/* Descriptors */
struct {
const struct uvc_descriptor_header * const *fs_control;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index fd8f73bb726d..c4ed48d6b8a4 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -18,12 +18,161 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-uvc.h>
#include "f_uvc.h"
#include "uvc.h"
#include "uvc_queue.h"
#include "uvc_video.h"
#include "uvc_v4l2.h"
+#include "uvc_configfs.h"
+
+static struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
+{
+ char guid[16] = UVC_GUID_FORMAT_MJPEG;
+ struct uvc_format_desc *format;
+ struct uvcg_uncompressed *unc;
+
+ if (uformat->type == UVCG_UNCOMPRESSED) {
+ unc = to_uvcg_uncompressed(&uformat->group.cg_item);
+ if (!unc)
+ return ERR_PTR(-EINVAL);
+
+ memcpy(guid, unc->desc.guidFormat, sizeof(guid));
+ }
+
+ format = uvc_format_by_guid(guid);
+ if (!format)
+ return ERR_PTR(-EINVAL);
+
+ return format;
+}
+
+static int uvc_v4l2_get_bytesperline(struct uvcg_format *uformat,
+ struct uvcg_frame *uframe)
+{
+ struct uvcg_uncompressed *u;
+
+ if (uformat->type == UVCG_UNCOMPRESSED) {
+ u = to_uvcg_uncompressed(&uformat->group.cg_item);
+ if (!u)
+ return 0;
+
+ return u->desc.bBitsPerPixel * uframe->frame.w_width / 8;
+ }
+
+ return 0;
+}
+
+static int uvc_get_frame_size(struct uvcg_format *uformat,
+ struct uvcg_frame *uframe)
+{
+ unsigned int bpl = uvc_v4l2_get_bytesperline(uformat, uframe);
+
+ return bpl ? bpl * uframe->frame.w_height :
+ uframe->frame.dw_max_video_frame_buffer_size;
+}
+
+static struct uvcg_format *find_format_by_index(struct uvc_device *uvc, int index)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_format *uformat = NULL;
+ int i = 1;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (index == i) {
+ uformat = format->fmt;
+ break;
+ }
+ i++;
+ }
+
+ return uformat;
+}
+
+static struct uvcg_frame *find_frame_by_index(struct uvc_device *uvc,
+ struct uvcg_format *uformat,
+ int index)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_frame_ptr *frame;
+ struct uvcg_frame *uframe = NULL;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (format->fmt->type != uformat->type)
+ continue;
+ list_for_each_entry(frame, &format->fmt->frames, entry) {
+ if (index == frame->frm->frame.b_frame_index) {
+ uframe = frame->frm;
+ break;
+ }
+ }
+ }
+
+ return uframe;
+}
+
+static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
+ u32 pixelformat)
+{
+ struct uvcg_format_ptr *format;
+ struct uvcg_format *uformat = NULL;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+
+ if (fmtdesc->fcc == pixelformat) {
+ uformat = format->fmt;
+ break;
+ }
+ }
+
+ return uformat;
+}
+
+static struct uvcg_frame *find_closest_frame_by_size(struct uvc_device *uvc,
+ struct uvcg_format *uformat,
+ u16 rw, u16 rh)
+{
+ struct uvc_video *video = &uvc->video;
+ struct uvcg_format_ptr *format;
+ struct uvcg_frame_ptr *frame;
+ struct uvcg_frame *uframe = NULL;
+ unsigned int d, maxd;
+
+ /* Find the closest image size. The distance between image sizes is
+ * the size in pixels of the non-overlapping regions between the
+ * requested size and the frame-specified size.
+ */
+ maxd = (unsigned int)-1;
+
+ list_for_each_entry(format, &uvc->header->formats, entry) {
+ if (format->fmt->type != uformat->type)
+ continue;
+
+ list_for_each_entry(frame, &format->fmt->frames, entry) {
+ u16 w, h;
+
+ w = frame->frm->frame.w_width;
+ h = frame->frm->frame.w_height;
+
+ d = min(w, rw) * min(h, rh);
+ d = w*h + rw*rh - 2*d;
+ if (d < maxd) {
+ maxd = d;
+ uframe = frame->frm;
+ }
+
+ if (maxd == 0)
+ break;
+ }
+ }
+
+ if (!uframe)
+ uvcg_dbg(&video->uvc->func, "Unsupported size %ux%u\n", rw, rh);
+
+ return uframe;
+}
/* --------------------------------------------------------------------------
* Requests handling
@@ -67,9 +216,9 @@ uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
struct uvc_device *uvc = video_get_drvdata(vdev);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
- strlcpy(cap->driver, "g_uvc", sizeof(cap->driver));
- strlcpy(cap->card, cdev->gadget->name, sizeof(cap->card));
- strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev),
+ strscpy(cap->driver, "g_uvc", sizeof(cap->driver));
+ strscpy(cap->card, cdev->gadget->name, sizeof(cap->card));
+ strscpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
return 0;
}
@@ -135,6 +284,139 @@ uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
static int
+uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_video *video = &uvc->video;
+ struct uvcg_format *uformat;
+ struct uvcg_frame *uframe;
+ u8 *fcc;
+
+ if (fmt->type != video->queue.queue.type)
+ return -EINVAL;
+
+ fcc = (u8 *)&fmt->fmt.pix.pixelformat;
+ uvcg_dbg(&uvc->func, "Trying format 0x%08x (%c%c%c%c): %ux%u\n",
+ fmt->fmt.pix.pixelformat,
+ fcc[0], fcc[1], fcc[2], fcc[3],
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
+
+ uformat = find_format_by_pix(uvc, fmt->fmt.pix.pixelformat);
+ if (!uformat)
+ return -EINVAL;
+
+ uframe = find_closest_frame_by_size(uvc, uformat,
+ fmt->fmt.pix.width, fmt->fmt.pix.height);
+ if (!uframe)
+ return -EINVAL;
+
+ fmt->fmt.pix.width = uframe->frame.w_width;
+ fmt->fmt.pix.height = uframe->frame.w_height;
+ fmt->fmt.pix.field = V4L2_FIELD_NONE;
+ fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe);
+ fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe);
+ fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc;
+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->fmt.pix.priv = 0;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvcg_format *uformat = NULL;
+ struct uvcg_frame *uframe = NULL;
+ struct uvcg_frame_ptr *frame;
+
+ uformat = find_format_by_pix(uvc, fival->pixel_format);
+ if (!uformat)
+ return -EINVAL;
+
+ list_for_each_entry(frame, &uformat->frames, entry) {
+ if (frame->frm->frame.w_width == fival->width &&
+ frame->frm->frame.w_height == fival->height) {
+ uframe = frame->frm;
+ break;
+ }
+ }
+ if (!uframe)
+ return -EINVAL;
+
+ if (fival->index >= uframe->frame.b_frame_interval_type)
+ return -EINVAL;
+
+ fival->discrete.numerator =
+ uframe->dw_frame_interval[fival->index];
+
+ /* TODO: handle V4L2_FRMIVAL_TYPE_STEPWISE */
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete.denominator = 10000000;
+ v4l2_simplify_fraction(&fival->discrete.numerator,
+ &fival->discrete.denominator, 8, 333);
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvcg_format *uformat = NULL;
+ struct uvcg_frame *uframe = NULL;
+
+ uformat = find_format_by_pix(uvc, fsize->pixel_format);
+ if (!uformat)
+ return -EINVAL;
+
+ if (fsize->index >= uformat->num_frames)
+ return -EINVAL;
+
+ uframe = find_frame_by_index(uvc, uformat, fsize->index + 1);
+ if (!uframe)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = uframe->frame.w_width;
+ fsize->discrete.height = uframe->frame.w_height;
+
+ return 0;
+}
+
+static int
+uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct uvc_device *uvc = video_get_drvdata(vdev);
+ struct uvc_format_desc *fmtdesc;
+ struct uvcg_format *uformat;
+
+ if (f->index >= uvc->header->num_fmt)
+ return -EINVAL;
+
+ uformat = find_format_by_index(uvc, f->index + 1);
+ if (!uformat)
+ return -EINVAL;
+
+ if (uformat->type != UVCG_UNCOMPRESSED)
+ f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+
+ fmtdesc = to_uvc_format(uformat);
+ f->pixelformat = fmtdesc->fcc;
+
+ strscpy(f->description, fmtdesc->name, sizeof(f->description));
+ f->description[strlen(fmtdesc->name) - 1] = 0;
+
+ return 0;
+}
+
+static int
uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b)
{
struct video_device *vdev = video_devdata(file);
@@ -170,7 +452,7 @@ uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
return ret;
if (uvc->state == UVC_STATE_STREAMING)
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
return ret;
}
@@ -298,8 +580,12 @@ uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio,
const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = {
.vidioc_querycap = uvc_v4l2_querycap,
+ .vidioc_try_fmt_vid_out = uvc_v4l2_try_format,
.vidioc_g_fmt_vid_out = uvc_v4l2_get_format,
.vidioc_s_fmt_vid_out = uvc_v4l2_set_format,
+ .vidioc_enum_frameintervals = uvc_v4l2_enum_frameintervals,
+ .vidioc_enum_framesizes = uvc_v4l2_enum_framesizes,
+ .vidioc_enum_fmt_vid_out = uvc_v4l2_enum_format,
.vidioc_reqbufs = uvc_v4l2_reqbufs,
.vidioc_querybuf = uvc_v4l2_querybuf,
.vidioc_qbuf = uvc_v4l2_qbuf,
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index c00ce0e91f5d..bb037fcc90e6 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -277,7 +277,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
spin_unlock_irqrestore(&video->req_lock, flags);
if (uvc->state == UVC_STATE_STREAMING)
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
}
static int
@@ -485,7 +485,7 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
video->req_int_count = 0;
- schedule_work(&video->pump);
+ queue_work(video->async_wq, &video->pump);
return ret;
}
@@ -499,6 +499,11 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);
+ /* Allocate a work queue for asynchronous video pump handler. */
+ video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!video->async_wq)
+ return -EINVAL;
+
video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 728987280373..a9a7b3fc60ec 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -994,7 +994,7 @@ static const struct usb_gadget_ops at91_udc_ops = {
.udc_stop = at91_stop,
/*
- * VBUS-powered devices may also also want to support bigger
+ * VBUS-powered devices may also want to support bigger
* power budgets after an appropriate SET_CONFIGURATION.
*/
/* .vbus_power = at91_vbus_power, */
@@ -1779,12 +1779,14 @@ static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
board->vbus_polled = 1;
- board->vbus_pin = gpiod_get_from_of_node(np, "atmel,vbus-gpio", 0,
- GPIOD_IN, "udc_vbus");
+ board->vbus_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,vbus", 0, GPIOD_IN,
+ "udc_vbus");
if (IS_ERR(board->vbus_pin))
board->vbus_pin = NULL;
- board->pullup_pin = gpiod_get_from_of_node(np, "atmel,pullup-gpio", 0,
+ board->pullup_pin = fwnode_gpiod_get_index(of_fwnode_handle(np),
+ "atmel,pullup", 0,
GPIOD_ASIS, "udc_pullup");
if (IS_ERR(board->pullup_pin))
board->pullup_pin = NULL;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cafcf260394c..c63c0c2cf649 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -736,7 +736,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
ret = gadget->ops->pullup(gadget, 0);
if (!ret) {
gadget->connected = 0;
- gadget->udc->driver->disconnect(gadget);
+ mutex_lock(&udc_lock);
+ if (gadget->udc->driver)
+ gadget->udc->driver->disconnect(gadget);
+ mutex_unlock(&udc_lock);
}
out:
@@ -1489,7 +1492,6 @@ static int gadget_bind_driver(struct device *dev)
usb_gadget_udc_set_speed(udc, driver->max_speed);
- mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err_bind;
@@ -1499,7 +1501,6 @@ static int gadget_bind_driver(struct device *dev)
goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
- mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@@ -1512,6 +1513,7 @@ static int gadget_bind_driver(struct device *dev)
dev_err(&udc->dev, "failed to start %s: %d\n",
driver->function, ret);
+ mutex_lock(&udc_lock);
udc->driver = NULL;
driver->is_bound = false;
mutex_unlock(&udc_lock);
@@ -1529,7 +1531,6 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- mutex_lock(&udc_lock);
usb_gadget_disconnect(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
@@ -1537,6 +1538,7 @@ static void gadget_unbind_driver(struct device *dev)
udc->driver->unbind(gadget);
usb_gadget_udc_stop(udc);
+ mutex_lock(&udc_lock);
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
@@ -1612,7 +1614,7 @@ static ssize_t soft_connect_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
ssize_t ret;
- mutex_lock(&udc_lock);
+ device_lock(&udc->gadget->dev);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
ret = -EOPNOTSUPP;
@@ -1633,7 +1635,7 @@ static ssize_t soft_connect_store(struct device *dev,
ret = n;
out:
- mutex_unlock(&udc_lock);
+ device_unlock(&udc->gadget->dev);
return ret;
}
static DEVICE_ATTR_WO(soft_connect);
@@ -1652,11 +1654,15 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget_driver *drv = udc->driver;
+ struct usb_gadget_driver *drv;
+ int rc = 0;
- if (!drv || !drv->function)
- return 0;
- return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_lock(&udc_lock);
+ drv = udc->driver;
+ if (drv && drv->function)
+ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_unlock(&udc_lock);
+ return rc;
}
static DEVICE_ATTR_RO(function);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index c97cd4bc817c..84605a4d0715 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -91,7 +91,7 @@ module_param(dma_mode, ushort, 0644);
* mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
* mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
*/
-static ushort fifo_mode = 0;
+static ushort fifo_mode;
module_param(fifo_mode, ushort, 0644);
/*
@@ -100,7 +100,7 @@ module_param(fifo_mode, ushort, 0644);
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices. For bus powered devices set this to 1.
*/
-static ushort enable_suspend = 0;
+static ushort enable_suspend;
module_param(enable_suspend, ushort, 0644);
static void assert_out_naking(struct net2272_ep *ep, const char *where)
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 61cabb9de6ae..bea346e362b2 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2234,7 +2234,7 @@ static int proc_otg_show(struct seq_file *s)
char *ctrl_name = "(UNKNOWN)";
tmp = omap_readl(OTG_REV);
- ctrl_name = "tranceiver_ctrl";
+ ctrl_name = "transceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
tmp >> 4, tmp & 0xf, ctrl_name, trans);
@@ -2558,7 +2558,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
/* set up driver data structures */
BUG_ON(strlen(name) >= sizeof ep->name);
- strlcpy(ep->name, name, sizeof ep->name);
+ strscpy(ep->name, name, sizeof(ep->name));
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->iso);
ep->bEndpointAddress = addr;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 648be3fd476a..615ba0a6fbee 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -17,6 +17,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -38,16 +39,16 @@
#define USB3_USB20_CON 0x204
#define USB3_USB30_CON 0x208
#define USB3_USB_STA 0x210
-#define USB3_DRD_CON 0x218
+#define USB3_DRD_CON(p) ((p)->is_rzv2m ? 0x400 : 0x218)
#define USB3_USB_INT_STA_1 0x220
#define USB3_USB_INT_STA_2 0x224
#define USB3_USB_INT_ENA_1 0x228
#define USB3_USB_INT_ENA_2 0x22c
#define USB3_STUP_DAT_0 0x230
#define USB3_STUP_DAT_1 0x234
-#define USB3_USB_OTG_STA 0x268
-#define USB3_USB_OTG_INT_STA 0x26c
-#define USB3_USB_OTG_INT_ENA 0x270
+#define USB3_USB_OTG_STA(p) ((p)->is_rzv2m ? 0x410 : 0x268)
+#define USB3_USB_OTG_INT_STA(p) ((p)->is_rzv2m ? 0x414 : 0x26c)
+#define USB3_USB_OTG_INT_ENA(p) ((p)->is_rzv2m ? 0x418 : 0x270)
#define USB3_P0_MOD 0x280
#define USB3_P0_CON 0x288
#define USB3_P0_STA 0x28c
@@ -135,6 +136,8 @@
#define USB_STA_VBUS_STA BIT(0)
/* DRD_CON */
+#define DRD_CON_PERI_RST BIT(31) /* rzv2m only */
+#define DRD_CON_HOST_RST BIT(30) /* rzv2m only */
#define DRD_CON_PERI_CON BIT(24)
#define DRD_CON_VBOUT BIT(0)
@@ -155,7 +158,7 @@
#define USB_INT_2_PIPE(n) BIT(n)
/* USB_OTG_STA, USB_OTG_INT_STA and USB_OTG_INT_ENA */
-#define USB_OTG_IDMON BIT(4)
+#define USB_OTG_IDMON(p) ((p)->is_rzv2m ? BIT(0) : BIT(4))
/* P0_MOD */
#define P0_MOD_DIR BIT(6)
@@ -255,7 +258,7 @@
#define USB3_EP0_SS_MAX_PACKET_SIZE 512
#define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
#define USB3_EP0_BUF_SIZE 8
-#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
+#define USB3_MAX_NUM_PIPES(p) ((p)->is_rzv2m ? 16 : 6) /* This includes PIPE 0 */
#define USB3_WAIT_US 3
#define USB3_DMA_NUM_SETTING_AREA 4
/*
@@ -326,10 +329,13 @@ struct renesas_usb3_priv {
int num_ramif;
int ramsize_per_pipe; /* unit = bytes */
bool workaround_for_vbus; /* if true, don't check vbus signal */
+ bool is_rzv2m; /* if true, RZ/V2M SoC */
};
struct renesas_usb3 {
void __iomem *reg;
+ struct reset_control *drd_rstc;
+ struct reset_control *usbp_rstc;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
@@ -363,6 +369,7 @@ struct renesas_usb3 {
bool forced_b_device;
bool start_to_connect;
bool role_sw_by_connector;
+ bool is_rzv2m;
};
#define gadget_to_renesas_usb3(_gadget) \
@@ -467,7 +474,7 @@ static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
static bool usb3_is_host(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_DRD_CON) & DRD_CON_PERI_CON);
+ return !(usb3_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
}
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
@@ -674,10 +681,20 @@ static void renesas_usb3_role_work(struct work_struct *work)
static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
+ if (usb3->is_rzv2m) {
+ if (host) {
+ usb3_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ } else {
+ usb3_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ }
+ }
+
if (host)
- usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
else
- usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
}
static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
@@ -693,9 +710,9 @@ static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
- usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+ usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
else
- usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
+ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
}
static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
@@ -716,7 +733,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
static bool usb3_is_a_device(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_USB_OTG_STA) & USB_OTG_IDMON);
+ return !(usb3_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
}
static void usb3_check_id(struct renesas_usb3 *usb3)
@@ -739,8 +756,8 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
USB3_USB_COM_CON);
- usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
- usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
+ usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
usb3_check_id(usb3);
usb3_check_vbus(usb3);
@@ -750,7 +767,7 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
- usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
+ usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
@@ -2005,9 +2022,15 @@ static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
usb3_check_id(usb3);
}
-static void usb3_irq_otg_int(struct renesas_usb3 *usb3, u32 otg_int_sta)
+static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
{
- if (otg_int_sta & USB_OTG_IDMON)
+ u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA(usb3));
+
+ otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
+ if (otg_int_sta)
+ usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
+
+ if (otg_int_sta & USB_OTG_IDMON(usb3))
usb3_irq_idmon_change(usb3);
}
@@ -2015,7 +2038,6 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
{
u32 int_sta_1 = usb3_read(usb3, USB3_USB_INT_STA_1);
u32 int_sta_2 = usb3_read(usb3, USB3_USB_INT_STA_2);
- u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA);
int_sta_1 &= usb3_read(usb3, USB3_USB_INT_ENA_1);
if (int_sta_1) {
@@ -2027,11 +2049,8 @@ static void usb3_irq_epc(struct renesas_usb3 *usb3)
if (int_sta_2)
usb3_irq_epc_int_2(usb3, int_sta_2);
- otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA);
- if (otg_int_sta) {
- usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA);
- usb3_irq_otg_int(usb3, otg_int_sta);
- }
+ if (!usb3->is_rzv2m)
+ usb3_irq_otg_int(usb3);
}
static void usb3_irq_dma_int(struct renesas_usb3 *usb3, u32 dma_sta)
@@ -2085,6 +2104,15 @@ static irqreturn_t renesas_usb3_irq(int irq, void *_usb3)
return ret;
}
+static irqreturn_t renesas_usb3_otg_irq(int irq, void *_usb3)
+{
+ struct renesas_usb3 *usb3 = _usb3;
+
+ usb3_irq_otg_int(usb3);
+
+ return IRQ_HANDLED;
+}
+
static void usb3_write_pn_mod(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
@@ -2571,6 +2599,8 @@ static int renesas_usb3_remove(struct platform_device *pdev)
usb_role_switch_unregister(usb3->role_sw);
usb_del_gadget_udc(&usb3->gadget);
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2589,8 +2619,8 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
usb3->num_usb3_eps = priv->ramsize_per_ramif * priv->num_ramif * 2 /
priv->ramsize_per_pipe + 1;
- if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES)
- usb3->num_usb3_eps = USB3_MAX_NUM_PIPES;
+ if (usb3->num_usb3_eps > USB3_MAX_NUM_PIPES(usb3))
+ usb3->num_usb3_eps = USB3_MAX_NUM_PIPES(usb3);
usb3->usb3_ep = devm_kcalloc(dev,
usb3->num_usb3_eps, sizeof(*usb3_ep),
@@ -2707,6 +2737,13 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
.workaround_for_vbus = true,
};
+static const struct renesas_usb3_priv renesas_usb3_priv_rzv2m = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 1,
+ .ramsize_per_pipe = SZ_4K,
+ .is_rzv2m = true,
+};
+
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a774c0-usb3-peri",
@@ -2718,6 +2755,9 @@ static const struct of_device_id usb3_of_match[] = {
.compatible = "renesas,r8a77990-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
+ .compatible = "renesas,rzv2m-usb3-peri",
+ .data = &renesas_usb3_priv_rzv2m,
+ }, {
.compatible = "renesas,rcar-gen3-usb3-peri",
.data = &renesas_usb3_priv_gen3,
},
@@ -2748,7 +2788,7 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- int irq, ret;
+ int irq, drd_irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2762,10 +2802,18 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ if (priv->is_rzv2m) {
+ drd_irq = platform_get_irq_byname(pdev, "drd");
+ if (drd_irq < 0)
+ return drd_irq;
+ }
+
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
+ usb3->is_rzv2m = priv->is_rzv2m;
+
usb3->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(usb3->reg))
return PTR_ERR(usb3->reg);
@@ -2787,6 +2835,14 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (usb3->is_rzv2m) {
+ ret = devm_request_irq(&pdev->dev, drd_irq,
+ renesas_usb3_otg_irq, 0,
+ dev_name(&pdev->dev), usb3);
+ if (ret < 0)
+ return ret;
+ }
+
INIT_WORK(&usb3->extcon_work, renesas_usb3_extcon_work);
usb3->extcon = devm_extcon_dev_allocate(&pdev->dev, renesas_usb3_cable);
if (IS_ERR(usb3->extcon))
@@ -2817,10 +2873,27 @@ static int renesas_usb3_probe(struct platform_device *pdev)
goto err_add_udc;
}
+ usb3->drd_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "drd_reset");
+ if (IS_ERR(usb3->drd_rstc)) {
+ ret = PTR_ERR(usb3->drd_rstc);
+ goto err_add_udc;
+ }
+
+ usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
+ "aresetn_p");
+ if (IS_ERR(usb3->usbp_rstc)) {
+ ret = PTR_ERR(usb3->usbp_rstc);
+ goto err_add_udc;
+ }
+
+ reset_control_deassert(usb3->drd_rstc);
+ reset_control_deassert(usb3->usbp_rstc);
+
pm_runtime_enable(&pdev->dev);
ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
if (ret < 0)
- goto err_add_udc;
+ goto err_reset;
ret = device_create_file(&pdev->dev, &dev_attr_role);
if (ret < 0)
@@ -2858,6 +2931,10 @@ static int renesas_usb3_probe(struct platform_device *pdev)
err_dev_create:
usb_del_gadget_udc(&usb3->gadget);
+err_reset:
+ reset_control_assert(usb3->usbp_rstc);
+ reset_control_assert(usb3->drd_rstc);
+
err_add_udc:
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index c6625aeb7bca..8c57b191e52b 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -23,7 +23,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/io.h>
@@ -1419,8 +1419,7 @@ static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
{
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- if (udc_info && (udc_info->udc_command ||
- gpio_is_valid(udc_info->pullup_pin))) {
+ if (udc_info && (udc_info->udc_command || udc->pullup_gpiod)) {
if (is_on)
s3c2410_udc_enable(udc);
@@ -1467,9 +1466,7 @@ static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- value = gpio_get_value(udc_info->vbus_pin) ? 1 : 0;
- if (udc_info->vbus_pin_inverted)
- value = !value;
+ value = gpiod_get_value(dev->vbus_gpiod);
if (value != dev->vbus)
s3c2410_udc_vbus_session(&dev->gadget, value);
@@ -1504,14 +1501,15 @@ static const struct usb_gadget_ops s3c2410_ops = {
.udc_stop = s3c2410_udc_stop,
};
-static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
+static void s3c2410_udc_command(struct s3c2410_udc *udc,
+ enum s3c2410_udc_cmd_e cmd)
{
if (!udc_info)
return;
if (udc_info->udc_command) {
udc_info->udc_command(cmd);
- } else if (gpio_is_valid(udc_info->pullup_pin)) {
+ } else if (udc->pullup_gpiod) {
int value;
switch (cmd) {
@@ -1524,9 +1522,8 @@ static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
default:
return;
}
- value ^= udc_info->pullup_pin_inverted;
- gpio_set_value(udc_info->pullup_pin, value);
+ gpiod_set_value(udc->pullup_gpiod, value);
}
}
@@ -1551,7 +1548,7 @@ static void s3c2410_udc_disable(struct s3c2410_udc *dev)
udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
/* Good bye, cruel world */
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+ s3c2410_udc_command(dev, S3C2410_UDC_P_DISABLE);
/* Set speed to unknown */
dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1613,7 +1610,7 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
/* time to say "hello, world" */
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+ s3c2410_udc_command(dev, S3C2410_UDC_P_ENABLE);
}
static int s3c2410_udc_start(struct usb_gadget *g,
@@ -1802,14 +1799,15 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
dev_dbg(dev, "got irq %i\n", irq_usbd);
- if (udc_info && udc_info->vbus_pin > 0) {
- retval = gpio_request(udc_info->vbus_pin, "udc vbus");
- if (retval < 0) {
- dev_err(dev, "cannot claim vbus pin\n");
- goto err_int;
- }
+ udc->vbus_gpiod = gpiod_get_optional(dev, "vbus", GPIOD_IN);
+ if (IS_ERR(udc->vbus_gpiod)) {
+ retval = PTR_ERR(udc->vbus_gpiod);
+ goto err_int;
+ }
+ if (udc->vbus_gpiod) {
+ gpiod_set_consumer_name(udc->vbus_gpiod, "udc vbus");
- irq = gpio_to_irq(udc_info->vbus_pin);
+ irq = gpiod_to_irq(udc->vbus_gpiod);
if (irq < 0) {
dev_err(dev, "no irq for gpio vbus pin\n");
retval = irq;
@@ -1833,16 +1831,12 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
udc->vbus = 1;
}
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin)) {
-
- retval = gpio_request_one(udc_info->pullup_pin,
- udc_info->vbus_pin_inverted ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "udc pullup");
- if (retval)
- goto err_vbus_irq;
+ udc->pullup_gpiod = gpiod_get_optional(dev, "pullup", GPIOD_OUT_LOW);
+ if (IS_ERR(udc->pullup_gpiod)) {
+ retval = PTR_ERR(udc->pullup_gpiod);
+ goto err_vbus_irq;
}
+ gpiod_set_consumer_name(udc->pullup_gpiod, "udc pullup");
retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (retval)
@@ -1856,15 +1850,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
return 0;
err_add_udc:
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin))
- gpio_free(udc_info->pullup_pin);
err_vbus_irq:
- if (udc_info && udc_info->vbus_pin > 0)
- free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
err_gpio_claim:
- if (udc_info && udc_info->vbus_pin > 0)
- gpio_free(udc_info->vbus_pin);
err_int:
free_irq(irq_usbd, udc);
err_udc_clk:
@@ -1885,7 +1874,6 @@ err_usb_bus_clk:
static int s3c2410_udc_remove(struct platform_device *pdev)
{
struct s3c2410_udc *udc = platform_get_drvdata(pdev);
- unsigned int irq;
dev_dbg(&pdev->dev, "%s()\n", __func__);
@@ -1895,14 +1883,8 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
usb_del_gadget_udc(&udc->gadget);
debugfs_remove(debugfs_lookup("registers", s3c2410_udc_debugfs_root));
- if (udc_info && !udc_info->udc_command &&
- gpio_is_valid(udc_info->pullup_pin))
- gpio_free(udc_info->pullup_pin);
-
- if (udc_info && udc_info->vbus_pin > 0) {
- irq = gpio_to_irq(udc_info->vbus_pin);
- free_irq(irq, udc);
- }
+ if (udc->vbus_gpiod)
+ free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
free_irq(irq_usbd, udc);
@@ -1926,14 +1908,18 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
static int
s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
{
- s3c2410_udc_command(S3C2410_UDC_P_DISABLE);
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_DISABLE);
return 0;
}
static int s3c2410_udc_resume(struct platform_device *pdev)
{
- s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
+ struct s3c2410_udc *udc = platform_get_drvdata(pdev);
+
+ s3c2410_udc_command(udc, S3C2410_UDC_P_ENABLE);
return 0;
}
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index 135a5bff3c74..cdbf202e5ee8 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -83,6 +83,9 @@ struct s3c2410_udc {
u32 port_status;
int ep0state;
+ struct gpio_desc *vbus_gpiod;
+ struct gpio_desc *pullup_gpiod;
+
unsigned got_irq : 1;
unsigned req_std : 1;
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 3c37effdfa64..76919d7570d2 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -2,7 +2,7 @@
/*
* NVIDIA Tegra XUSB device mode controller
*
- * Copyright (c) 2013-2019, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2013-2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015, Google Inc.
*/
@@ -702,6 +702,8 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
pm_runtime_get_sync(xudc->dev);
+ tegra_phy_xusb_utmi_pad_power_on(xudc->curr_utmi_phy);
+
err = phy_power_on(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI power on failed: %d\n", err);
@@ -756,6 +758,8 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
/* Make sure interrupt handler has completed before powergating. */
synchronize_irq(xudc->irq);
+ tegra_phy_xusb_utmi_pad_power_down(xudc->curr_utmi_phy);
+
err = phy_power_off(xudc->curr_utmi_phy);
if (err < 0)
dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fd9264cf6c87..247568bc17a2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -205,12 +205,12 @@ config USB_EHCI_FSL
Variation of ARC USB block used in some Freescale chips.
config USB_EHCI_HCD_NPCM7XX
- tristate "Support for Nuvoton NPCM7XX on-chip EHCI USB controller"
- depends on (USB_EHCI_HCD && ARCH_NPCM7XX) || COMPILE_TEST
- default y if (USB_EHCI_HCD && ARCH_NPCM7XX)
+ tristate "Support for Nuvoton NPCM on-chip EHCI USB controller"
+ depends on (USB_EHCI_HCD && ARCH_NPCM) || COMPILE_TEST
+ default y if (USB_EHCI_HCD && ARCH_NPCM)
help
Enables support for the on-chip EHCI controller on
- Nuvoton NPCM7XX chips.
+ Nuvoton NPCM chips.
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 05d41fd65f25..8b775e7bab06 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -25,8 +25,6 @@
#define DRIVER_DESC "EHCI Atmel driver"
-static const char hcd_name[] = "ehci-atmel";
-
#define EHCI_INSNREG(index) ((index) * 4 + 0x90)
#define EHCI_INSNREG08_HSIC_EN BIT(2)
@@ -239,7 +237,6 @@ static int __init ehci_atmel_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ehci_init_driver(&ehci_atmel_hc_driver, &ehci_atmel_drv_overrides);
return platform_driver_register(&ehci_atmel_driver);
}
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 1a9b7572e17f..a333231616f4 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
@@ -32,7 +32,6 @@
(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
-static const char hcd_name[] = "ehci-exynos";
static struct hc_driver __read_mostly exynos_ehci_hc_driver;
#define PHY_NUMBER 3
@@ -132,20 +131,13 @@ static void exynos_ehci_phy_disable(struct device *dev)
static void exynos_setup_vbus_gpio(struct device *dev)
{
+ struct gpio_desc *gpio;
int err;
- int gpio;
- if (!dev->of_node)
- return;
-
- gpio = of_get_named_gpio(dev->of_node, "samsung,vbus-gpio", 0);
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request_one(dev, gpio, GPIOF_OUT_INIT_HIGH,
- "ehci_vbus_gpio");
+ gpio = devm_gpiod_get_optional(dev, "samsung,vbus", GPIOD_OUT_HIGH);
+ err = PTR_ERR_OR_ZERO(gpio);
if (err)
- dev_err(dev, "can't request ehci vbus gpio %d", gpio);
+ dev_err(dev, "can't request ehci vbus gpio: %d\n", err);
}
static int exynos_ehci_probe(struct platform_device *pdev)
@@ -347,7 +339,6 @@ static int __init ehci_exynos_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ehci_init_driver(&exynos_ehci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ehci_driver);
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 896c0d107f72..9cea785934e5 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -722,8 +722,6 @@ static int __init ehci_fsl_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info(DRV_NAME ": " DRIVER_DESC "\n");
-
ehci_init_driver(&fsl_ehci_hc_driver, &ehci_fsl_overrides);
fsl_ehci_hc_driver.product_desc =
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 684164fa9716..a1930db0da1c 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1351,7 +1351,6 @@ static int __init ehci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index 6b5a7a873e01..63af1a827fcb 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -22,19 +22,8 @@
#include "ehci.h"
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
#define DRIVER_DESC "EHCI npcm7xx driver"
-static const char hcd_name[] = "npcm7xx-ehci";
-
-#define USB2PHYCTL_OFFSET 0x144
-
-#define IPSRST2_OFFSET 0x24
-#define IPSRST3_OFFSET 0x34
-
-
static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver;
static int __maybe_unused ehci_npcm7xx_drv_suspend(struct device *dev)
@@ -60,52 +49,12 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct resource *res;
- struct regmap *gcr_regmap;
- struct regmap *rst_regmap;
const struct hc_driver *driver = &ehci_npcm7xx_hc_driver;
int irq;
int retval;
dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
- gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
- if (IS_ERR(gcr_regmap)) {
- dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-gcr\n",
- __func__);
- return PTR_ERR(gcr_regmap);
- }
-
- rst_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
- if (IS_ERR(rst_regmap)) {
- dev_err(&pdev->dev, "%s: failed to find nuvoton,npcm750-rst\n",
- __func__);
- return PTR_ERR(rst_regmap);
- }
-
- /********* phy init ******/
- // reset usb host
- regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
- (0x1 << 26), (0x1 << 26));
- regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
- (0x1 << 25), (0x1 << 25));
- regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
- (0x1 << 28), 0);
-
- udelay(1);
-
- // enable phy
- regmap_update_bits(rst_regmap, IPSRST3_OFFSET,
- (0x1 << 25), 0);
-
- udelay(50); // enable phy
-
- regmap_update_bits(gcr_regmap, USB2PHYCTL_OFFSET,
- (0x1 << 28), (0x1 << 28));
-
- // enable host
- regmap_update_bits(rst_regmap, IPSRST2_OFFSET,
- (0x1 << 26), 0);
-
if (usb_disabled())
return -ENODEV;
@@ -191,8 +140,6 @@ static int __init ehci_npcm7xx_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_npcm7xx_hc_driver, NULL);
return platform_driver_register(&npcm7xx_ehci_hcd_driver);
}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 8c45bc17a580..7dd984722a7f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -284,8 +284,6 @@ static int __init ehci_omap_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_omap_hc_driver, &ehci_omap_overrides);
return platform_driver_register(&ehci_hcd_omap_driver);
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 3626758b3e2a..a3454a3ea4e0 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -65,8 +65,6 @@ struct orion_ehci_hcd {
struct phy *phy;
};
-static const char hcd_name[] = "ehci-orion";
-
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
@@ -361,8 +359,6 @@ static int __init ehci_orion_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_orion_hc_driver, &orion_overrides);
return platform_driver_register(&ehci_orion_driver);
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 9937c5a7efc2..17f8b6ea0c35 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -382,7 +382,7 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (is_bypassed_id(pdev))
return -ENODEV;
- return usb_hcd_pci_probe(pdev, id, &ehci_pci_hc_driver);
+ return usb_hcd_pci_probe(pdev, &ehci_pci_hc_driver);
}
static void ehci_pci_remove(struct pci_dev *pdev)
@@ -423,8 +423,6 @@ static int __init ehci_pci_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides);
/* Entries for the PCI suspend/resume callbacks are special */
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 6924f0316e9a..fe497c876d76 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -53,8 +53,6 @@ struct ehci_platform_priv {
struct delayed_work poll_work;
};
-static const char hcd_name[] = "ehci-platform";
-
static int ehci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
@@ -529,8 +527,6 @@ static int __init ehci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 807e64991e3e..666f5c4db25a 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -645,7 +645,7 @@ qh_urb_transaction (
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
- maxpacket = usb_maxpacket(urb->dev, urb->pipe);
+ maxpacket = usb_endpoint_maxp(&urb->ep->desc);
/*
* buffer gets wrapped in one or more qtds;
@@ -1218,7 +1218,7 @@ static int ehci_submit_single_step_set_feature(
token |= (1 /* "in" */ << 8); /*This is IN stage*/
- maxpacket = usb_maxpacket(urb->dev, urb->pipe);
+ maxpacket = usb_endpoint_maxp(&urb->ep->desc);
qtd_fill(ehci, qtd, buf, len, token, maxpacket);
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 3694e450a11a..c4ddd1022f60 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -24,8 +24,6 @@
#define DRIVER_DESC "EHCI SPEAr driver"
-static const char hcd_name[] = "SPEAr-ehci";
-
struct spear_ehci {
struct clk *clk;
};
@@ -167,8 +165,6 @@ static int __init ehci_spear_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ehci_hcd_driver);
}
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index f74433aac948..f731dc98c533 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -42,8 +42,6 @@ struct st_ehci_platform_priv {
#define hcd_to_ehci_priv(h) \
((struct st_ehci_platform_priv *)hcd_to_ehci(h)->priv)
-static const char hcd_name[] = "ehci-st";
-
#define EHCI_CAPS_SIZE 0x10
#define AHB2STBUS_INSREG01 (EHCI_CAPS_SIZE + 0x84)
@@ -346,8 +344,6 @@ static int __init ehci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ehci_platform_driver);
}
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 2ba09c3fbc2f..95a44462bed0 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -25,8 +25,8 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include <asm/fsl_gtm.h>
#include "fhci.h"
@@ -150,15 +150,15 @@ int fhci_ioports_check_bus_state(struct fhci_hcd *fhci)
u8 bits = 0;
/* check USBOE,if transmitting,exit */
- if (!gpio_get_value(fhci->gpios[GPIO_USBOE]))
+ if (!gpiod_get_value(fhci->gpiods[GPIO_USBOE]))
return -1;
/* check USBRP */
- if (gpio_get_value(fhci->gpios[GPIO_USBRP]))
+ if (gpiod_get_value(fhci->gpiods[GPIO_USBRP]))
bits |= 0x2;
/* check USBRN */
- if (gpio_get_value(fhci->gpios[GPIO_USBRN]))
+ if (gpiod_get_value(fhci->gpiods[GPIO_USBRN]))
bits |= 0x1;
return bits;
@@ -630,40 +630,23 @@ static int of_fhci_probe(struct platform_device *ofdev)
/* GPIOs and pins */
for (i = 0; i < NUM_GPIOS; i++) {
- int gpio;
- enum of_gpio_flags flags;
-
- gpio = of_get_gpio_flags(node, i, &flags);
- fhci->gpios[i] = gpio;
- fhci->alow_gpios[i] = flags & OF_GPIO_ACTIVE_LOW;
-
- if (!gpio_is_valid(gpio)) {
- if (i < GPIO_SPEED) {
- dev_err(dev, "incorrect GPIO%d: %d\n",
- i, gpio);
- goto err_gpios;
- } else {
- dev_info(dev, "assuming board doesn't have "
- "%s gpio\n", i == GPIO_SPEED ?
- "speed" : "power");
- continue;
- }
- }
+ if (i < GPIO_SPEED)
+ fhci->gpiods[i] = devm_gpiod_get_index(dev,
+ NULL, i, GPIOD_IN);
+
+ else
+ fhci->gpiods[i] = devm_gpiod_get_index_optional(dev,
+ NULL, i, GPIOD_OUT_LOW);
- ret = gpio_request(gpio, dev_name(dev));
- if (ret) {
- dev_err(dev, "failed to request gpio %d", i);
+ if (IS_ERR(fhci->gpiods[i])) {
+ dev_err(dev, "incorrect GPIO%d: %ld\n",
+ i, PTR_ERR(fhci->gpiods[i]));
goto err_gpios;
}
-
- if (i >= GPIO_SPEED) {
- ret = gpio_direction_output(gpio, 0);
- if (ret) {
- dev_err(dev, "failed to set gpio %d as "
- "an output\n", i);
- i++;
- goto err_gpios;
- }
+ if (!fhci->gpiods[i]) {
+ dev_info(dev, "assuming board doesn't have "
+ "%s gpio\n", i == GPIO_SPEED ?
+ "speed" : "power");
}
}
@@ -766,10 +749,6 @@ err_pins:
while (--j >= 0)
qe_pin_free(fhci->pins[j]);
err_gpios:
- while (--i >= 0) {
- if (gpio_is_valid(fhci->gpios[i]))
- gpio_free(fhci->gpios[i]);
- }
cpm_muram_free(pram_addr);
err_pram:
iounmap(hcd->regs);
@@ -782,18 +761,12 @@ static int fhci_remove(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct fhci_hcd *fhci = hcd_to_fhci(hcd);
- int i;
int j;
usb_remove_hcd(hcd);
free_irq(fhci->timer->irq, hcd);
gtm_put_timer16(fhci->timer);
cpm_muram_free(cpm_muram_offset(fhci->pram));
- for (i = 0; i < NUM_GPIOS; i++) {
- if (!gpio_is_valid(fhci->gpios[i]))
- continue;
- gpio_free(fhci->gpios[i]);
- }
for (j = 0; j < NUM_PINS; j++)
qe_pin_free(fhci->pins[j]);
fhci_dfs_destroy(fhci);
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index c359dcdb9b13..5f48660ebdfa 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -19,7 +19,7 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include "fhci.h"
@@ -38,13 +38,12 @@ static u8 root_hub_des[] = {
static void fhci_gpio_set_value(struct fhci_hcd *fhci, int gpio_nr, bool on)
{
- int gpio = fhci->gpios[gpio_nr];
- bool alow = fhci->alow_gpios[gpio_nr];
+ struct gpio_desc *gpiod = fhci->gpiods[gpio_nr];
- if (!gpio_is_valid(gpio))
+ if (!gpiod)
return;
- gpio_set_value(gpio, on ^ alow);
+ gpiod_set_value(gpiod, on);
mdelay(5);
}
@@ -129,9 +128,9 @@ void fhci_io_port_generate_reset(struct fhci_hcd *fhci)
{
fhci_dbg(fhci, "-> %s\n", __func__);
- gpio_direction_output(fhci->gpios[GPIO_USBOE], 0);
- gpio_direction_output(fhci->gpios[GPIO_USBTP], 0);
- gpio_direction_output(fhci->gpios[GPIO_USBTN], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBOE], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBTP], 0);
+ gpiod_direction_output(fhci->gpiods[GPIO_USBTN], 0);
mdelay(5);
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 81fbc019a9b3..1f57b0989485 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
#include <soc/fsl/qe/immap_qe.h>
@@ -242,8 +243,7 @@ struct fhci_hcd {
enum qe_clock fullspeed_clk;
enum qe_clock lowspeed_clk;
struct qe_pin *pins[NUM_PINS];
- int gpios[NUM_GPIOS];
- bool alow_gpios[NUM_GPIOS];
+ struct gpio_desc *gpiods[NUM_GPIOS];
struct qe_usb_ctlr __iomem *regs; /* I/O memory used to communicate */
struct fhci_pram __iomem *pram; /* Parameter RAM */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index f8c111e08a0d..3d1dbcf4c073 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5692,7 +5692,6 @@ static int __init fotg210_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 98326465e2dc..533537ef3c21 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -62,8 +62,6 @@ struct ohci_at91_priv {
#define DRIVER_DESC "OHCI Atmel driver"
-static const char hcd_name[] = "ohci-atmel";
-
static struct hc_driver __read_mostly ohci_at91_hc_driver;
static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst = {
@@ -699,7 +697,6 @@ static int __init ohci_at91_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_at91_hc_driver, &ohci_at91_drv_overrides);
/*
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 1371b0c249ec..d4818e8d652b 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -551,7 +551,6 @@ static int __init ohci_da8xx_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
/*
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 5f5e8a64c8e2..8d7977fd5d3b 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -21,7 +21,6 @@
#define DRIVER_DESC "OHCI Exynos driver"
-static const char hcd_name[] = "ohci-exynos";
static struct hc_driver __read_mostly exynos_ohci_hc_driver;
#define to_exynos_ohci(hcd) (struct exynos_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
@@ -310,7 +309,6 @@ static int __init ohci_exynos_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&exynos_ohci_hc_driver, &exynos_overrides);
return platform_driver_register(&exynos_ohci_driver);
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c4c821c2288c..0457dd9f6c19 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1276,7 +1276,6 @@ static int __init ohci_hcd_mod_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
pr_debug ("%s: block sizes: ed %zd td %zd\n", hcd_name,
sizeof (struct ed), sizeof (struct td));
set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 106a6bcefb08..5b32e683e367 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -275,8 +275,6 @@ static int __init ohci_nxp_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_nxp_hc_driver, NULL);
return platform_driver_register(&ohci_hcd_nxp_driver);
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index f5bc9c8bdc9a..cb29701df911 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -423,8 +423,6 @@ static int __init ohci_omap_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_omap_hc_driver, &omap_overrides);
return platform_driver_register(&ohci_hcd_omap_driver);
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 41efe927d8f3..d7b4f40f9ff4 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -282,7 +282,7 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static int ohci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- return usb_hcd_pci_probe(dev, id, &ohci_pci_hc_driver);
+ return usb_hcd_pci_probe(dev, &ohci_pci_hc_driver);
}
/* pci driver glue; this is a "new style" PCI driver module */
@@ -306,8 +306,6 @@ static int __init ohci_pci_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 0adae6265127..a84305091c43 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -41,8 +41,6 @@ struct ohci_platform_priv {
struct reset_control *resets;
};
-static const char hcd_name[] = "ohci-platform";
-
static int ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -289,7 +287,7 @@ static int ohci_platform_suspend(struct device *dev)
return ret;
}
-static int ohci_platform_resume(struct device *dev)
+static int ohci_platform_resume_common(struct device *dev, bool hibernated)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
@@ -301,7 +299,7 @@ static int ohci_platform_resume(struct device *dev)
return err;
}
- ohci_resume(hcd, false);
+ ohci_resume(hcd, hibernated);
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
@@ -309,6 +307,16 @@ static int ohci_platform_resume(struct device *dev)
return 0;
}
+
+static int ohci_platform_resume(struct device *dev)
+{
+ return ohci_platform_resume_common(dev, false);
+}
+
+static int ohci_platform_restore(struct device *dev)
+{
+ return ohci_platform_resume_common(dev, true);
+}
#endif /* CONFIG_PM_SLEEP */
static const struct of_device_id ohci_platform_ids[] = {
@@ -325,8 +333,16 @@ static const struct platform_device_id ohci_platform_table[] = {
};
MODULE_DEVICE_TABLE(platform, ohci_platform_table);
-static SIMPLE_DEV_PM_OPS(ohci_platform_pm_ops, ohci_platform_suspend,
- ohci_platform_resume);
+#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops ohci_platform_pm_ops = {
+ .suspend = ohci_platform_suspend,
+ .resume = ohci_platform_resume,
+ .freeze = ohci_platform_suspend,
+ .thaw = ohci_platform_resume,
+ .poweroff = ohci_platform_suspend,
+ .restore = ohci_platform_restore,
+};
+#endif
static struct platform_driver ohci_platform_driver = {
.id_table = ohci_platform_table,
@@ -335,7 +351,9 @@ static struct platform_driver ohci_platform_driver = {
.shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-platform",
+#ifdef CONFIG_PM_SLEEP
.pm = &ohci_platform_pm_ops,
+#endif
.of_match_table = ohci_platform_ids,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
}
@@ -346,8 +364,6 @@ static int __init ohci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index ab4f610a0140..a1dad8745622 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -114,8 +114,6 @@
#define PXA_UHC_MAX_PORTNUM 3
-static const char hcd_name[] = "ohci-pxa27x";
-
static struct hc_driver __read_mostly ohci_pxa27x_hc_driver;
struct pxa27x_ohci {
@@ -608,8 +606,6 @@ static int __init ohci_pxa27x_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_pxa27x_hc_driver, &pxa27x_overrides);
ohci_pxa27x_hc_driver.hub_control = pxa27x_ohci_hub_control;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 12264c048601..85a0a9ae0095 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -39,8 +39,6 @@
#define DRIVER_DESC "OHCI S3C2410 driver"
-static const char hcd_name[] = "ohci-s3c2410";
-
static struct clk *clk;
static struct clk *usb_clk;
@@ -474,7 +472,6 @@ static int __init ohci_s3c2410_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_s3c2410_hc_driver, NULL);
/*
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 9b81f420656d..196951a27f3f 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -23,7 +23,6 @@
#define DRIVER_DESC "OHCI SPEAr driver"
-static const char hcd_name[] = "SPEAr-ohci";
struct spear_ohci {
struct clk *clk;
};
@@ -179,8 +178,6 @@ static int __init ohci_spear_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_spear_hc_driver, &spear_overrides);
return platform_driver_register(&spear_ohci_hcd_driver);
}
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index ac796ccd93ef..82eef3c62e11 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -40,8 +40,6 @@ struct st_ohci_platform_priv {
#define hcd_to_ohci_priv(h) \
((struct st_ohci_platform_priv *)hcd_to_ohci(h)->priv)
-static const char hcd_name[] = "ohci-st";
-
static int st_ohci_platform_power_on(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
@@ -324,8 +322,6 @@ static int __init ohci_platform_init(void)
if (usb_disabled())
return -ENODEV;
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
-
ohci_init_driver(&ohci_platform_hc_driver, &platform_overrides);
return platform_driver_register(&ohci_platform_driver);
}
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index d879d6af5710..95240c9c45bd 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3190,7 +3190,6 @@ static int __init u132_hcd_init(void)
u132_exiting = 0;
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "driver %s\n", hcd_name);
workqueue = create_singlethread_workqueue("u132");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index d90b869f5f40..c22b51af83fc 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -867,8 +867,6 @@ static int __init uhci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
- printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
- ignore_oc ? ", overcurrent ignored" : "");
set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
#ifdef CONFIG_DYNAMIC_DEBUG
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 9b88745d247f..3592f757fe05 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static int uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- return usb_hcd_pci_probe(dev, id, &uhci_driver);
+ return usb_hcd_pci_probe(dev, &uhci_driver);
}
static struct pci_driver uhci_pci_driver = {
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index e61155fa6379..f1367b53b260 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -988,7 +988,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->driver = driver;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
- return NULL;
+ goto err;
INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
spin_lock_init(&dbc->lock);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0fdc014c9401..4619d5e89d5b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
@@ -1648,6 +1648,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8c19e151a945..9e56aa28efcd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -641,7 +641,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
- goto cleanup_ctx;
+ goto cleanup_ring_array;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
@@ -702,6 +702,11 @@ cleanup_rings:
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
+ xhci_free_stream_ctx(xhci,
+ stream_info->num_stream_ctxs,
+ stream_info->stream_ctx_array,
+ stream_info->ctx_array_dma);
+cleanup_ring_array:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 06a6b19acaae..579899eb24c1 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -425,7 +425,6 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- u32 extra_cs_count;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
@@ -461,18 +460,12 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
- if (sch_ep->ep_type == ISOC_IN_EP)
- extra_cs_count = (last_cs == 7) ? 1 : 2;
- else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = 1;
-
- cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
- /* one for ss, the other for idle */
- sch_ep->num_budget_microframes = cs_count + 2;
+ /* ss, idle are ignored */
+ sch_ep->num_budget_microframes = cs_count;
/*
* if interval=1, maxp >752, num_budge_micoframe is larger
@@ -771,8 +764,8 @@ int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
if (ret)
return ret;
- if (ep->hcpriv)
- drop_ep_quirk(hcd, udev, ep);
+ /* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
+ drop_ep_quirk(hcd, udev, ep);
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dce6c0ec8d34..40228a3d77a0 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -431,7 +431,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* to say USB 2.0, but I'm not sure what the implications would be in
* the other parts of the HCD code.
*/
- retval = usb_hcd_pci_probe(dev, id, &xhci_pci_hc_driver);
+ retval = usb_hcd_pci_probe(dev, &xhci_pci_hc_driver);
if (retval)
goto put_runtime_pm;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 044855818cb1..5fb55bf19493 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -123,7 +123,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
};
static const struct xhci_plat_priv xhci_plat_brcm = {
- .quirks = XHCI_RESET_ON_RESUME,
+ .quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -398,12 +398,17 @@ static int xhci_plat_remove(struct platform_device *dev)
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(shared_hcd);
- xhci->shared_hcd = NULL;
+ if (shared_hcd) {
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
+ }
+
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+
+ if (shared_hcd)
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
@@ -432,7 +437,16 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
* xhci_suspend() needs `do_wakeup` to know whether host is allowed
* to do wakeup during suspend.
*/
- return xhci_suspend(xhci, device_may_wakeup(dev));
+ ret = xhci_suspend(xhci, device_may_wakeup(dev));
+ if (ret)
+ return ret;
+
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_disable_unprepare(xhci->clk);
+ clk_disable_unprepare(xhci->reg_clk);
+ }
+
+ return 0;
}
static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -441,6 +455,11 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (!device_may_wakeup(dev) && (xhci->quirks & XHCI_SUSPEND_RESUME_CLKS)) {
+ clk_prepare_enable(xhci->clk);
+ clk_prepare_enable(xhci->reg_clk);
+ }
+
ret = xhci_priv_resume_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 65858f607437..5176765c4013 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -151,9 +151,11 @@ int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -791,8 +793,6 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- unsigned long flags;
- int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -808,21 +808,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
-
- /* Power off USB2 ports*/
- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
-
- /* Power off USB3 ports*/
- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
-
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
@@ -1192,7 +1183,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
- xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ if (!xhci->broken_suspend)
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
@@ -1491,7 +1483,7 @@ EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
* address from the XHCI endpoint index.
*/
-unsigned int xhci_get_endpoint_address(unsigned int ep_index)
+static unsigned int xhci_get_endpoint_address(unsigned int ep_index)
{
unsigned int number = DIV_ROUND_UP(ep_index, 2);
unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
@@ -4104,7 +4096,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
slot_id = command->slot_id;
if (!slot_id || command->status != COMP_SUCCESS) {
- xhci_err(xhci, "Error while assigning device slot ID\n");
+ xhci_err(xhci, "Error while assigning device slot ID: %s\n",
+ xhci_trb_comp_code_string(command->status));
xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
HCS_MAX_SLOTS(
readl(&xhci->cap_regs->hcs_params1)));
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1960b47acfb2..c0964fe8ac12 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1807,8 +1807,6 @@ struct xhci_hcd {
struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
- /* Store LPM test failed devices' information */
- struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
/* these are not thread safe so use mutex */
@@ -1826,8 +1824,7 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
- u32 command;
+ unsigned long run_graceperiod;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
*
@@ -1899,6 +1896,7 @@ struct xhci_hcd {
#define XHCI_NO_SOFT_RETRY BIT_ULL(40)
#define XHCI_BROKEN_D3COLD BIT_ULL(41)
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
+#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -2041,7 +2039,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
-unsigned int xhci_get_endpoint_address(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
@@ -2196,8 +2193,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
- bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index e9437a176518..ea39243efee3 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -177,10 +177,6 @@ static int idmouse_create_image(struct usb_idmouse *dev)
bytes_read += bulk_read;
}
- /* reset the device */
-reset:
- ftip_command(dev, FTIP_RELEASE, 0, 0);
-
/* check for valid image */
/* right border should be black (0x00) */
for (bytes_read = sizeof(HEADER)-1 + WIDTH-1; bytes_read < IMGSIZE; bytes_read += WIDTH)
@@ -192,6 +188,10 @@ reset:
if (dev->bulk_in_buffer[bytes_read] != 0xFF)
return -EAGAIN;
+ /* reset the device */
+reset:
+ ftip_command(dev, FTIP_RELEASE, 0, 0);
+
/* should be IMGSIZE == 65040 */
dev_dbg(&dev->interface->dev, "read %d bytes fingerprint data\n",
bytes_read);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index d1df153e7f5a..d63c63942af1 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -71,10 +71,7 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
{
int err;
- if (hub->reset_gpio) {
- gpiod_set_value_cansleep(hub->reset_gpio, 1);
- fsleep(hub->pdata->reset_us);
- }
+ gpiod_set_value_cansleep(hub->reset_gpio, 1);
err = regulator_disable(hub->vdd);
if (err) {
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 04c4e3fed094..54337d72bb9f 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -400,7 +400,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
{
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
- int len, err;
+ int len;
u32 property_u32 = 0;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
@@ -416,13 +416,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->skip_config = 0;
hub->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (PTR_ERR(hub->gpio_reset) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(hub->gpio_reset)) {
- err = PTR_ERR(hub->gpio_reset);
- dev_err(dev, "unable to request GPIO reset pin (%d)\n", err);
- return err;
- }
+ if (IS_ERR(hub->gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(hub->gpio_reset),
+ "unable to request GPIO reset pin\n");
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -547,7 +543,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->boost_up = USB251XB_DEF_BOOST_UP;
cproperty_char = of_get_property(np, "manufacturer", NULL);
- strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
+ strscpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
hub->manufacturer_len = strlen(str) & 0xFF;
memset(hub->manufacturer, 0, USB251XB_STRING_BUFSIZE);
@@ -557,7 +553,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "product", NULL);
- strlcpy(str, cproperty_char ? : data->product_str, sizeof(str));
+ strscpy(str, cproperty_char ? : data->product_str, sizeof(str));
hub->product_len = strlen(str) & 0xFF;
memset(hub->product, 0, USB251XB_STRING_BUFSIZE);
len = min_t(size_t, USB251XB_STRING_BUFSIZE / 2, strlen(str));
@@ -566,7 +562,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
USB251XB_STRING_BUFSIZE);
cproperty_char = of_get_property(np, "serial", NULL);
- strlcpy(str, cproperty_char ? : USB251XB_DEF_SERIAL_STRING,
+ strscpy(str, cproperty_char ? : USB251XB_DEF_SERIAL_STRING,
sizeof(str));
hub->serial_len = strlen(str) & 0xFF;
memset(hub->serial, 0, USB251XB_STRING_BUFSIZE);
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 330f494cd158..c70ca475c7c7 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -160,6 +160,7 @@ static int usb3503_probe(struct usb3503 *hub)
struct usb3503_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
int err;
+ bool is_clk_enabled = false;
u32 mode = USB3503_MODE_HUB;
const u32 *property;
enum gpiod_flags flags;
@@ -217,6 +218,7 @@ static int usb3503_probe(struct usb3503 *hub)
return err;
}
+ is_clk_enabled = true;
property = of_get_property(np, "disabled-ports", &len);
if (property && (len / sizeof(u32)) > 0) {
int i;
@@ -236,20 +238,26 @@ static int usb3503_probe(struct usb3503 *hub)
else
flags = GPIOD_OUT_HIGH;
hub->intn = devm_gpiod_get_optional(dev, "intn", flags);
- if (IS_ERR(hub->intn))
- return PTR_ERR(hub->intn);
+ if (IS_ERR(hub->intn)) {
+ err = PTR_ERR(hub->intn);
+ goto err_clk;
+ }
if (hub->intn)
gpiod_set_consumer_name(hub->intn, "usb3503 intn");
hub->connect = devm_gpiod_get_optional(dev, "connect", GPIOD_OUT_LOW);
- if (IS_ERR(hub->connect))
- return PTR_ERR(hub->connect);
+ if (IS_ERR(hub->connect)) {
+ err = PTR_ERR(hub->connect);
+ goto err_clk;
+ }
if (hub->connect)
gpiod_set_consumer_name(hub->connect, "usb3503 connect");
hub->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(hub->reset))
- return PTR_ERR(hub->reset);
+ if (IS_ERR(hub->reset)) {
+ err = PTR_ERR(hub->reset);
+ goto err_clk;
+ }
if (hub->reset) {
/* Datasheet defines a hardware reset to be at least 100us */
usleep_range(100, 10000);
@@ -265,6 +273,11 @@ static int usb3503_probe(struct usb3503 *hub)
(hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
return 0;
+
+err_clk:
+ if (is_clk_enabled)
+ clk_disable_unprepare(hub->clk);
+ return err;
}
static int usb3503_i2c_probe(struct i2c_client *i2c,
@@ -289,14 +302,12 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
return usb3503_probe(hub);
}
-static int usb3503_i2c_remove(struct i2c_client *i2c)
+static void usb3503_i2c_remove(struct i2c_client *i2c)
{
struct usb3503 *hub;
hub = i2c_get_clientdata(i2c);
clk_disable_unprepare(hub->clk);
-
- return 0;
}
static int usb3503_platform_probe(struct platform_device *pdev)
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 0be8efcda15d..b00d92db5dfd 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -502,7 +502,7 @@ static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf,
#else
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -563,7 +563,7 @@ static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buff
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -581,7 +581,7 @@ static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, siz
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
@@ -614,7 +614,7 @@ static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer
{
struct parport_uss720_private *priv = pp->private_data;
struct usb_device *usbdev = priv->usbdev;
- int rlen;
+ int rlen = 0;
int i;
if (!usbdev)
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f48a23adbc35..094e812e9e69 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1268,6 +1268,11 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{
/* don't do anything here: "fault" will set up page table entries */
vma->vm_ops = &mon_bin_vm_ops;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_flags &= ~VM_MAYWRITE;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 0ca173af87bb..a3a6282893d0 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -978,8 +978,6 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
goto irq_err;
}
- device_init_wakeup(dev, true);
-
/* power down device IP for power saving by default */
mtu3_stop(mtu);
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 4cb65346789d..d78ae52b4e26 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -356,6 +356,8 @@ static int mtu3_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, true);
+
ret = ssusb_rscs_init(ssusb);
if (ret)
goto comm_init_err;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f906dfd360d3..6c8f7763e75e 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -86,7 +86,7 @@ config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
depends on ARCH_OMAP2PLUS || COMPILE_TEST
- depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
+ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index fd4ae2dd24e5..a4e55b0c52cf 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -523,11 +523,9 @@ static int da8xx_probe(struct platform_device *pdev)
}
glue->phy = devm_phy_get(&pdev->dev, "usb-phy");
- if (IS_ERR(glue->phy)) {
- if (PTR_ERR(glue->phy) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get phy\n");
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "failed to get phy\n");
glue->dev = &pdev->dev;
glue->clk = clk;
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 417c30bff9ca..d1e4e0deb753 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -105,7 +105,6 @@ static int jz4740_musb_init(struct musb *musb)
.driver_data = glue,
.fwnode = dev_fwnode(dev),
};
- int err;
glue->musb = musb;
@@ -113,12 +112,9 @@ static int jz4740_musb_init(struct musb *musb)
musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
else
musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
- if (IS_ERR(musb->xceiv)) {
- err = PTR_ERR(musb->xceiv);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "No transceiver configured: %d\n", err);
- return err;
- }
+ if (IS_ERR(musb->xceiv))
+ return dev_err_probe(dev, PTR_ERR(musb->xceiv),
+ "No transceiver configured\n");
glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
if (IS_ERR(glue->role_sw)) {
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index bbbcfd49fb35..03027c6fa3ab 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2595,9 +2595,7 @@ fail2:
musb_platform_exit(musb);
fail1:
- if (status != -EPROBE_DEFER)
- dev_err(musb->controller,
- "%s failed with status %d\n", __func__, status);
+ dev_err_probe(musb->controller, status, "%s failed\n", __func__);
musb_free(musb);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index c963cb8565f2..9589243e8951 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -718,10 +718,8 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
dc = dma_request_chan(dev->parent, str);
if (IS_ERR(dc)) {
- ret = PTR_ERR(dc);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to request %s: %d.\n",
- str, ret);
+ ret = dev_err_probe(dev, PTR_ERR(dc),
+ "Failed to request %s.\n", str);
goto err;
}
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index daada4b66a92..6704a62a1665 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -760,6 +760,9 @@ static void rxstate(struct musb *musb, struct musb_request *req)
musb_writew(epio, MUSB_RXCSR, csr);
buffer_aint_mapped:
+ fifo_count = min_t(unsigned int,
+ request->length - request->actual,
+ (unsigned int)fifo_count);
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 961c858fb349..7f9a999cd5ff 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -743,31 +743,20 @@ static int sunxi_musb_probe(struct platform_device *pdev)
if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
glue->rst = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(glue->rst)) {
- if (PTR_ERR(glue->rst) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Error getting reset %ld\n",
- PTR_ERR(glue->rst));
- return PTR_ERR(glue->rst);
- }
+ if (IS_ERR(glue->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->rst),
+ "Error getting reset\n");
}
glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
- if (IS_ERR(glue->extcon)) {
- if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Invalid or missing extcon\n");
- return PTR_ERR(glue->extcon);
- }
+ if (IS_ERR(glue->extcon))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->extcon),
+ "Invalid or missing extcon\n");
glue->phy = devm_phy_get(&pdev->dev, "usb");
- if (IS_ERR(glue->phy)) {
- if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "Error getting phy %ld\n",
- PTR_ERR(glue->phy));
- return PTR_ERR(glue->phy);
- }
+ if (IS_ERR(glue->phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(glue->phy),
+ "Error getting phy\n");
glue->usb_phy = usb_phy_generic_register();
if (IS_ERR(glue->usb_phy)) {
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 34b9f8140187..3dc5c04e7cbf 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -230,12 +230,9 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
}
- if (err == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (err) {
- dev_err(dev, "Error requesting RESET or VBUS GPIO\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "Error requesting RESET or VBUS GPIO\n");
if (nop->gpiod_reset)
gpiod_direction_output(nop->gpiod_reset, 1);
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index f8bd93fe69cd..e5d3f206097c 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1196,7 +1196,7 @@ static void isp1301_release(struct device *dev)
static struct isp1301 *the_transceiver;
-static int isp1301_remove(struct i2c_client *i2c)
+static void isp1301_remove(struct i2c_client *i2c)
{
struct isp1301 *isp;
@@ -1214,8 +1214,6 @@ static int isp1301_remove(struct i2c_client *i2c)
put_device(&i2c->dev);
the_transceiver = NULL;
-
- return 0;
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index ad3d57f1c273..c2777a5c1f4e 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -120,14 +120,12 @@ static int isp1301_probe(struct i2c_client *client,
return 0;
}
-static int isp1301_remove(struct i2c_client *client)
+static void isp1301_remove(struct i2c_client *client)
{
struct isp1301 *isp = i2c_get_clientdata(client);
usb_remove_phy(&isp->phy);
isp1301_i2c_client = NULL;
-
- return 0;
}
static struct i2c_driver isp1301_driver = {
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index 4025da20b3fd..f16adcacdce3 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -321,27 +321,18 @@ static int jz4770_phy_probe(struct platform_device *pdev)
}
priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock\n");
- return err;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Failed to get clock\n");
priv->vcc_supply = devm_regulator_get(dev, "vcc");
- if (IS_ERR(priv->vcc_supply)) {
- err = PTR_ERR(priv->vcc_supply);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulator\n");
- return err;
- }
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get regulator\n");
err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Unable to register PHY\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Unable to register PHY\n");
return devm_add_action_or_reset(dev, ingenic_usb_phy_remove, &priv->phy);
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8a262c5a0408..d2836ef5d15c 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -144,8 +144,8 @@
#define MXS_PHY_NEED_IP_FIX BIT(3)
/* Minimum and maximum values for device tree entries */
-#define MXS_PHY_TX_CAL45_MIN 30
-#define MXS_PHY_TX_CAL45_MAX 55
+#define MXS_PHY_TX_CAL45_MIN 35
+#define MXS_PHY_TX_CAL45_MAX 54
#define MXS_PHY_TX_D_CAL_MIN 79
#define MXS_PHY_TX_D_CAL_MAX 119
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 68cd4b68e3a2..f0240107edb1 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1440,16 +1440,22 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return err;
}
- gpiod = devm_gpiod_get_from_of_node(&pdev->dev, np,
- "nvidia,phy-reset-gpio",
- 0, GPIOD_OUT_HIGH,
- "ulpi_phy_reset_b");
+ gpiod = devm_gpiod_get(&pdev->dev, "nvidia,phy-reset",
+ GPIOD_OUT_HIGH);
err = PTR_ERR_OR_ZERO(gpiod);
if (err) {
dev_err(&pdev->dev,
"Request failed for reset GPIO: %d\n", err);
return err;
}
+
+ err = gpiod_set_consumer_name(gpiod, "ulpi_phy_reset_b");
+ if (err) {
+ dev_err(&pdev->dev,
+ "Failed to set up reset GPIO name: %d\n", err);
+ return err;
+ }
+
tegra_phy->reset_gpio = gpiod;
phy = devm_otg_ulpi_create(&pdev->dev,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 39eaa7b97c40..9452291f1703 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -189,7 +189,7 @@ static void ark3116_port_remove(struct usb_serial_port *port)
static void ark3116_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct ark3116_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 8107e4b5b03b..9331a562dac0 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -44,7 +44,8 @@ static void belkin_sa_close(struct usb_serial_port *port);
static void belkin_sa_read_int_callback(struct urb *urb);
static void belkin_sa_process_read_urb(struct urb *urb);
static void belkin_sa_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios * old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void belkin_sa_break_ctl(struct tty_struct *tty, int break_state);
static int belkin_sa_tiocmget(struct tty_struct *tty);
static int belkin_sa_tiocmset(struct tty_struct *tty,
@@ -273,7 +274,8 @@ static void belkin_sa_process_read_urb(struct urb *urb)
}
static void belkin_sa_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct belkin_sa_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2798fca71261..6e1b87e67304 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -97,13 +97,16 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
+
unsigned long break_end;
};
static void ch341_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static int ch341_control_out(struct usb_device *dev, u8 request,
u16 value, u16 index)
@@ -250,8 +253,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- val |= BIT(7);
+ if (priv->version > 0x27)
+ val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
@@ -265,6 +272,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
@@ -308,7 +318,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r)
return r;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
@@ -470,7 +482,8 @@ err_kill_interrupt_urb:
* tty->termios contains the new setting to be used.
*/
static void ch341_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ch341_private *priv = usb_get_serial_port_data(port);
unsigned baud_rate;
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index b97aa40ca4d1..da19a5fa414f 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -189,8 +189,8 @@ static int usb_console_setup(struct console *co, char *options)
info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
- usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
+ usb_serial_put(serial);
return retval;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c374620a486f..3bcec419f463 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -31,9 +31,9 @@
static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
- struct ktermios *);
+ const struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
- struct ktermios*);
+ const struct ktermios *);
static bool cp210x_tx_empty(struct usb_serial_port *port);
static int cp210x_tiocmget(struct tty_struct *);
static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
@@ -1039,7 +1040,8 @@ static speed_t cp210x_get_actual_rate(speed_t baud)
* otherwise.
*/
static void cp210x_change_speed(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -1121,7 +1123,8 @@ static bool cp210x_termios_change(const struct ktermios *a, const struct ktermio
}
static void cp210x_set_flow_control(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
@@ -1231,7 +1234,8 @@ out_unlock:
}
static void cp210x_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
u16 bits;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 5fbcc155e8f5..1e0c028c5ec9 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -125,7 +125,8 @@ static void cypress_send(struct usb_serial_port *port);
static unsigned int cypress_write_room(struct tty_struct *tty);
static void cypress_earthmate_init_termios(struct tty_struct *tty);
static void cypress_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int cypress_tiocmget(struct tty_struct *tty);
static int cypress_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -859,7 +860,8 @@ static void cypress_earthmate_init_termios(struct tty_struct *tty)
}
static void cypress_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index af65eb863d70..45d688e9b93f 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -215,7 +215,8 @@ static int digi_transmit_idle(struct usb_serial_port *port,
static void digi_rx_throttle(struct tty_struct *tty);
static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
@@ -649,7 +650,8 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
static void digi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct digi_port *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index d9f20256a6a8..2dd58cd9f0cc 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -603,7 +603,8 @@ static int f81232_port_disable(struct usb_serial_port *port)
}
static void f81232_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct f81232_private *priv = usb_get_serial_port_data(port);
u8 new_lcr = 0;
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index d789c1ec87b3..ddfcd72eb0ae 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -944,8 +944,8 @@ static int f81534_calc_num_ports(struct usb_serial *serial,
}
static void f81534_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
u8 new_lcr = 0;
int status;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d5a3986dfee7..05e28a5ce42b 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -47,10 +47,27 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
+enum ftdi_chip_type {
+ SIO,
+ FT232A,
+ FT232B,
+ FT2232C,
+ FT232R,
+ FT232H,
+ FT2232H,
+ FT4232H,
+ FT4232HA,
+ FT232HP,
+ FT233HP,
+ FT2232HP,
+ FT2233HP,
+ FT4232HP,
+ FT4233HP,
+ FTX,
+};
struct ftdi_private {
enum ftdi_chip_type chip_type;
- /* type of device, either SIO or FT8U232AM */
int baud_base; /* baud base clock for divisor setting */
int custom_divisor; /* custom_divisor kludge, this is for
baud_base (different from what goes to the
@@ -62,8 +79,7 @@ struct ftdi_private {
unsigned long last_dtr_rts; /* saved modem control outputs */
char prev_status; /* Used for TIOCMIWAIT */
char transmit_empty; /* If transmitter is empty or not */
- u16 interface; /* FT2232C, FT2232H or FT4232H port interface
- (0 for FT232/245) */
+ u16 channel; /* channel index, or 0 for legacy types */
speed_t force_baud; /* if non-zero, force the baud rate to
this value */
@@ -84,8 +100,7 @@ struct ftdi_private {
#endif
};
-/* struct ftdi_sio_quirk is used by devices requiring special attention. */
-struct ftdi_sio_quirk {
+struct ftdi_quirk {
int (*probe)(struct usb_serial *);
/* Special settings for probed ports. */
void (*port_probe)(struct ftdi_private *);
@@ -98,27 +113,27 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
-static const struct ftdi_sio_quirk ftdi_jtag_quirk = {
+static const struct ftdi_quirk ftdi_jtag_quirk = {
.probe = ftdi_jtag_probe,
};
-static const struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
+static const struct ftdi_quirk ftdi_NDI_device_quirk = {
.probe = ftdi_NDI_device_setup,
};
-static const struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
+static const struct ftdi_quirk ftdi_USB_UIRT_quirk = {
.port_probe = ftdi_USB_UIRT_setup,
};
-static const struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+static const struct ftdi_quirk ftdi_HE_TIRA1_quirk = {
.port_probe = ftdi_HE_TIRA1_setup,
};
-static const struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+static const struct ftdi_quirk ftdi_stmclite_quirk = {
.probe = ftdi_stmclite_probe,
};
-static const struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+static const struct ftdi_quirk ftdi_8u2232c_quirk = {
.probe = ftdi_8u2232c_probe,
};
@@ -180,6 +195,13 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_FTX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT2233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT2232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT233HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT232HP_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_FT4232HA_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -1045,6 +1067,8 @@ static const struct usb_device_id id_table_combined[] = {
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
@@ -1059,15 +1083,22 @@ static const struct usb_device_id id_table_combined[] = {
MODULE_DEVICE_TABLE(usb, id_table_combined);
static const char *ftdi_chip_name[] = {
- [SIO] = "SIO", /* the serial part of FT8U100AX */
- [FT8U232AM] = "FT8U232AM",
- [FT232BM] = "FT232BM",
- [FT2232C] = "FT2232C",
- [FT232RL] = "FT232RL",
- [FT2232H] = "FT2232H",
- [FT4232H] = "FT4232H",
- [FT232H] = "FT232H",
- [FTX] = "FT-X"
+ [SIO] = "SIO", /* the serial part of FT8U100AX */
+ [FT232A] = "FT232A",
+ [FT232B] = "FT232B",
+ [FT2232C] = "FT2232C/D",
+ [FT232R] = "FT232R",
+ [FT232H] = "FT232H",
+ [FT2232H] = "FT2232H",
+ [FT4232H] = "FT4232H",
+ [FT4232HA] = "FT4232HA",
+ [FT232HP] = "FT232HP",
+ [FT233HP] = "FT233HP",
+ [FT2232HP] = "FT2232HP",
+ [FT2233HP] = "FT2233HP",
+ [FT4232HP] = "FT4232HP",
+ [FT4233HP] = "FT4233HP",
+ [FTX] = "FT-X",
};
@@ -1076,74 +1107,12 @@ static const char *ftdi_chip_name[] = {
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
/* End TIOCMIWAIT */
-/* function prototypes for a FTDI serial converter */
-static int ftdi_sio_probe(struct usb_serial *serial,
- const struct usb_device_id *id);
-static int ftdi_sio_port_probe(struct usb_serial_port *port);
-static void ftdi_sio_port_remove(struct usb_serial_port *port);
-static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port);
-static void ftdi_dtr_rts(struct usb_serial_port *port, int on);
-static void ftdi_process_read_urb(struct urb *urb);
-static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
- void *dest, size_t size);
static void ftdi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
-static int ftdi_tiocmget(struct tty_struct *tty);
-static int ftdi_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int ftdi_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-static void get_serial_info(struct tty_struct *tty, struct serial_struct *ss);
-static int set_serial_info(struct tty_struct *tty,
- struct serial_struct *ss);
-static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
-static bool ftdi_tx_empty(struct usb_serial_port *port);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int ftdi_get_modem_status(struct usb_serial_port *port,
unsigned char status[2]);
-static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
-static unsigned short int ftdi_232am_baud_to_divisor(int baud);
-static u32 ftdi_232bm_baud_base_to_divisor(int baud, int base);
-static u32 ftdi_232bm_baud_to_divisor(int baud);
-static u32 ftdi_2232h_baud_base_to_divisor(int baud, int base);
-static u32 ftdi_2232h_baud_to_divisor(int baud);
-
-static struct usb_serial_driver ftdi_sio_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "ftdi_sio",
- },
- .description = "FTDI USB Serial Device",
- .id_table = id_table_combined,
- .num_ports = 1,
- .bulk_in_size = 512,
- .bulk_out_size = 256,
- .probe = ftdi_sio_probe,
- .port_probe = ftdi_sio_port_probe,
- .port_remove = ftdi_sio_port_remove,
- .open = ftdi_open,
- .dtr_rts = ftdi_dtr_rts,
- .throttle = usb_serial_generic_throttle,
- .unthrottle = usb_serial_generic_unthrottle,
- .process_read_urb = ftdi_process_read_urb,
- .prepare_write_buffer = ftdi_prepare_write_buffer,
- .tiocmget = ftdi_tiocmget,
- .tiocmset = ftdi_tiocmset,
- .tiocmiwait = usb_serial_generic_tiocmiwait,
- .get_icount = usb_serial_generic_get_icount,
- .ioctl = ftdi_ioctl,
- .get_serial = get_serial_info,
- .set_serial = set_serial_info,
- .set_termios = ftdi_set_termios,
- .break_ctl = ftdi_break_ctl,
- .tx_empty = ftdi_tx_empty,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &ftdi_sio_device, NULL
-};
-
-
#define WDR_TIMEOUT 5000 /* default urb timeout */
#define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */
@@ -1259,7 +1228,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
- value, priv->interface,
+ value, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(dev, "%s Error from MODEM_CTRL urb: DTR %s, RTS %s\n",
@@ -1305,7 +1274,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
if (!baud)
baud = 9600;
switch (priv->chip_type) {
- case SIO: /* SIO chip */
+ case SIO:
switch (baud) {
case 300: div_value = ftdi_sio_b300; break;
case 600: div_value = ftdi_sio_b600; break;
@@ -1317,8 +1286,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
case 38400: div_value = ftdi_sio_b38400; break;
case 57600: div_value = ftdi_sio_b57600; break;
case 115200: div_value = ftdi_sio_b115200; break;
- } /* baud */
- if (div_value == 0) {
+ default:
dev_dbg(dev, "%s - Baudrate (%d) requested is not supported\n",
__func__, baud);
div_value = ftdi_sio_b9600;
@@ -1326,7 +1294,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
div_okay = 0;
}
break;
- case FT8U232AM: /* 8U232AM chip */
+ case FT232A:
if (baud <= 3000000) {
div_value = ftdi_232am_baud_to_divisor(baud);
} else {
@@ -1336,10 +1304,10 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
div_okay = 0;
}
break;
- case FT232BM: /* FT232BM chip */
- case FT2232C: /* FT2232C chip */
- case FT232RL: /* FT232RL chip */
- case FTX: /* FT-X series */
+ case FT232B:
+ case FT2232C:
+ case FT232R:
+ case FTX:
if (baud <= 3000000) {
u16 product_id = le16_to_cpu(
port->serial->dev->descriptor.idProduct);
@@ -1359,9 +1327,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
baud = 9600;
}
break;
- case FT2232H: /* FT2232H chip */
- case FT4232H: /* FT4232H chip */
- case FT232H: /* FT232H chip */
+ default:
if ((baud <= 12000000) && (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
@@ -1373,7 +1339,7 @@ static u32 get_ftdi_divisor(struct tty_struct *tty,
baud = 9600;
}
break;
- } /* priv->chip_type */
+ }
if (div_okay) {
dev_dbg(dev, "%s - Baud rate set to %d (divisor 0x%lX) on chip %s\n",
@@ -1396,13 +1362,8 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
index_value = get_ftdi_divisor(tty, port);
value = (u16)index_value;
index = (u16)(index_value >> 16);
- if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H || priv->chip_type == FT232H ||
- priv->chip_type == FTX) {
- /* Probably the BM type needs the MSB of the encoded fractional
- * divider also moved like for the chips above. Any infos? */
- index = (u16)((index << 8) | priv->interface);
- }
+ if (priv->channel)
+ index = (u16)((index << 8) | priv->channel);
rv = usb_control_msg(port->serial->dev,
usb_sndctrlpipe(port->serial->dev, 0),
@@ -1420,7 +1381,7 @@ static int write_latency_timer(struct usb_serial_port *port)
int rv;
int l = priv->latency;
- if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1432,7 +1393,7 @@ static int write_latency_timer(struct usb_serial_port *port)
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- l, priv->interface,
+ l, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
@@ -1448,7 +1409,7 @@ static int _read_latency_timer(struct usb_serial_port *port)
rv = usb_control_msg_recv(udev, 0, FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0,
- priv->interface, &buf, 1, WDR_TIMEOUT,
+ priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (rv == 0)
rv = buf;
@@ -1461,7 +1422,7 @@ static int read_latency_timer(struct usb_serial_port *port)
struct ftdi_private *priv = usb_get_serial_port_data(port);
int rv;
- if (priv->chip_type == SIO || priv->chip_type == FT8U232AM)
+ if (priv->chip_type == SIO || priv->chip_type == FT232A)
return -EINVAL;
rv = _read_latency_timer(port);
@@ -1536,90 +1497,97 @@ static int get_lsr_info(struct usb_serial_port *port,
return 0;
}
-
-/* Determine type of FTDI chip based on USB config and descriptor. */
-static void ftdi_determine_type(struct usb_serial_port *port)
+static int ftdi_determine_type(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
struct usb_device *udev = serial->dev;
- unsigned version;
- unsigned interfaces;
-
- /* Assume it is not the original SIO device for now. */
- priv->baud_base = 48000000 / 2;
+ unsigned int version, ifnum;
version = le16_to_cpu(udev->descriptor.bcdDevice);
- interfaces = udev->actconfig->desc.bNumInterfaces;
- dev_dbg(&port->dev, "%s: bcdDevice = 0x%x, bNumInterfaces = %u\n", __func__,
- version, interfaces);
- if (interfaces > 1) {
- struct usb_interface *intf = serial->interface;
- int ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-
- /* Multiple interfaces.*/
- if (version == 0x0800) {
- priv->chip_type = FT4232H;
- /* Hi-speed - baud clock runs at 120MHz */
- priv->baud_base = 120000000 / 2;
- } else if (version == 0x0700) {
- priv->chip_type = FT2232H;
- /* Hi-speed - baud clock runs at 120MHz */
- priv->baud_base = 120000000 / 2;
- } else
- priv->chip_type = FT2232C;
-
- /* Determine interface code. */
- if (ifnum == 0)
- priv->interface = INTERFACE_A;
- else if (ifnum == 1)
- priv->interface = INTERFACE_B;
- else if (ifnum == 2)
- priv->interface = INTERFACE_C;
- else if (ifnum == 3)
- priv->interface = INTERFACE_D;
-
- /* BM-type devices have a bug where bcdDevice gets set
- * to 0x200 when iSerialNumber is 0. */
- if (version < 0x500) {
- dev_dbg(&port->dev,
- "%s: something fishy - bcdDevice too low for multi-interface device\n",
- __func__);
- }
- } else if (version < 0x200) {
- /* Old device. Assume it's the original SIO. */
- priv->chip_type = SIO;
- priv->baud_base = 12000000 / 16;
- } else if (version < 0x400) {
- /* Assume it's an FT8U232AM (or FT8U245AM) */
- priv->chip_type = FT8U232AM;
+ ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+
+ /* Assume Hi-Speed type */
+ priv->baud_base = 120000000 / 2;
+ priv->channel = CHANNEL_A + ifnum;
+
+ switch (version) {
+ case 0x200:
+ priv->chip_type = FT232A;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
/*
- * It might be a BM type because of the iSerialNumber bug.
- * If iSerialNumber==0 and the latency timer is readable,
- * assume it is BM type.
+ * FT232B devices have a bug where bcdDevice gets set to 0x200
+ * when iSerialNumber is 0. Assume it is an FT232B in case the
+ * latency timer is readable.
*/
if (udev->descriptor.iSerialNumber == 0 &&
_read_latency_timer(port) >= 0) {
- dev_dbg(&port->dev,
- "%s: has latency timer so not an AM type\n",
- __func__);
- priv->chip_type = FT232BM;
+ priv->chip_type = FT232B;
}
- } else if (version < 0x600) {
- /* Assume it's an FT232BM (or FT245BM) */
- priv->chip_type = FT232BM;
- } else if (version < 0x900) {
- /* Assume it's an FT232RL */
- priv->chip_type = FT232RL;
- } else if (version < 0x1000) {
- /* Assume it's an FT232H */
+ break;
+ case 0x400:
+ priv->chip_type = FT232B;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
+ break;
+ case 0x500:
+ priv->chip_type = FT2232C;
+ priv->baud_base = 48000000 / 2;
+ break;
+ case 0x600:
+ priv->chip_type = FT232R;
+ priv->baud_base = 48000000 / 2;
+ priv->channel = 0;
+ break;
+ case 0x700:
+ priv->chip_type = FT2232H;
+ break;
+ case 0x800:
+ priv->chip_type = FT4232H;
+ break;
+ case 0x900:
priv->chip_type = FT232H;
- } else {
- /* Assume it's an FT-X series device */
+ break;
+ case 0x1000:
priv->chip_type = FTX;
+ priv->baud_base = 48000000 / 2;
+ break;
+ case 0x2800:
+ priv->chip_type = FT2233HP;
+ break;
+ case 0x2900:
+ priv->chip_type = FT4233HP;
+ break;
+ case 0x3000:
+ priv->chip_type = FT2232HP;
+ break;
+ case 0x3100:
+ priv->chip_type = FT4232HP;
+ break;
+ case 0x3200:
+ priv->chip_type = FT233HP;
+ break;
+ case 0x3300:
+ priv->chip_type = FT232HP;
+ break;
+ case 0x3600:
+ priv->chip_type = FT4232HA;
+ break;
+ default:
+ if (version < 0x200) {
+ priv->chip_type = SIO;
+ priv->baud_base = 12000000 / 16;
+ priv->channel = 0;
+ } else {
+ dev_err(&port->dev, "unknown device type: 0x%02x\n", version);
+ return -ENODEV;
+ }
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
+
+ return 0;
}
@@ -1718,7 +1686,7 @@ static ssize_t event_char_store(struct device *dev,
usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
- v, priv->interface,
+ v, priv->channel,
NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dev_dbg(&port->dev, "Unable to write event character: %i\n", rv);
@@ -1729,51 +1697,42 @@ static ssize_t event_char_store(struct device *dev,
}
static DEVICE_ATTR_WO(event_char);
-static int create_sysfs_attrs(struct usb_serial_port *port)
-{
- struct ftdi_private *priv = usb_get_serial_port_data(port);
- int retval = 0;
-
- /* XXX I've no idea if the original SIO supports the event_char
- * sysfs parameter, so I'm playing it safe. */
- if (priv->chip_type != SIO) {
- dev_dbg(&port->dev, "sysfs attributes for %s\n", ftdi_chip_name[priv->chip_type]);
- retval = device_create_file(&port->dev, &dev_attr_event_char);
- if ((!retval) &&
- (priv->chip_type == FT232BM ||
- priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL ||
- priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H ||
- priv->chip_type == FT232H ||
- priv->chip_type == FTX)) {
- retval = device_create_file(&port->dev,
- &dev_attr_latency_timer);
- }
- }
- return retval;
-}
+static struct attribute *ftdi_attrs[] = {
+ &dev_attr_event_char.attr,
+ &dev_attr_latency_timer.attr,
+ NULL
+};
-static void remove_sysfs_attrs(struct usb_serial_port *port)
+static umode_t ftdi_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ enum ftdi_chip_type type = priv->chip_type;
- /* XXX see create_sysfs_attrs */
- if (priv->chip_type != SIO) {
- device_remove_file(&port->dev, &dev_attr_event_char);
- if (priv->chip_type == FT232BM ||
- priv->chip_type == FT2232C ||
- priv->chip_type == FT232RL ||
- priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H ||
- priv->chip_type == FT232H ||
- priv->chip_type == FTX) {
- device_remove_file(&port->dev, &dev_attr_latency_timer);
- }
+ if (attr == &dev_attr_event_char.attr) {
+ if (type == SIO)
+ return 0;
+ }
+
+ if (attr == &dev_attr_latency_timer.attr) {
+ if (type == SIO || type == FT232A)
+ return 0;
}
+ return attr->mode;
}
+static const struct attribute_group ftdi_group = {
+ .attrs = ftdi_attrs,
+ .is_visible = ftdi_is_visible,
+};
+
+static const struct attribute_group *ftdi_groups[] = {
+ &ftdi_group,
+ NULL
+};
+
#ifdef CONFIG_GPIOLIB
static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
@@ -1792,7 +1751,7 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
usb_sndctrlpipe(serial->dev, 0),
FTDI_SIO_SET_BITMODE_REQUEST,
FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
- priv->interface, NULL, 0, WDR_TIMEOUT);
+ priv->channel, NULL, 0, WDR_TIMEOUT);
if (result < 0) {
dev_err(&serial->interface->dev,
"bitmode request failed for value 0x%04x: %d\n",
@@ -1856,7 +1815,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
result = usb_control_msg_recv(serial->dev, 0,
FTDI_SIO_READ_PINS_REQUEST,
FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
- priv->interface, &buf, 1, WDR_TIMEOUT,
+ priv->channel, &buf, 1, WDR_TIMEOUT,
GFP_KERNEL);
if (result == 0)
result = buf;
@@ -2141,7 +2100,7 @@ static int ftdi_gpio_init(struct usb_serial_port *port)
case FT232H:
result = ftdi_gpio_init_ft232h(port);
break;
- case FT232RL:
+ case FT232R:
result = ftdi_gpio_init_ft232r(port);
break;
case FTX:
@@ -2211,12 +2170,9 @@ static void ftdi_gpio_remove(struct usb_serial_port *port) { }
* ***************************************************************************
*/
-/* Probe function to check for special devices */
-static int ftdi_sio_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
+static int ftdi_probe(struct usb_serial *serial, const struct usb_device_id *id)
{
- const struct ftdi_sio_quirk *quirk =
- (struct ftdi_sio_quirk *)id->driver_info;
+ const struct ftdi_quirk *quirk = (struct ftdi_quirk *)id->driver_info;
if (quirk && quirk->probe) {
int ret = quirk->probe(serial);
@@ -2229,10 +2185,10 @@ static int ftdi_sio_probe(struct usb_serial *serial,
return 0;
}
-static int ftdi_sio_port_probe(struct usb_serial_port *port)
+static int ftdi_port_probe(struct usb_serial_port *port)
{
+ const struct ftdi_quirk *quirk = usb_get_serial_data(port->serial);
struct ftdi_private *priv;
- const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
@@ -2246,12 +2202,14 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
- ftdi_determine_type(port);
+ result = ftdi_determine_type(port);
+ if (result)
+ goto err_free;
+
ftdi_set_max_packet_size(port);
if (read_latency_timer(port) < 0)
priv->latency = 16;
write_latency_timer(port);
- create_sysfs_attrs(port);
result = ftdi_gpio_init(port);
if (result < 0) {
@@ -2261,6 +2219,11 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
}
return 0;
+
+err_free:
+ kfree(priv);
+
+ return result;
}
/* Setup for the USB-UIRT device, which requires hardwired
@@ -2371,14 +2334,12 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
return 0;
}
-static void ftdi_sio_port_remove(struct usb_serial_port *port)
+static void ftdi_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
ftdi_gpio_remove(port);
- remove_sysfs_attrs(port);
-
kfree(priv);
}
@@ -2392,7 +2353,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
- priv->interface, NULL, 0, WDR_TIMEOUT);
+ priv->channel, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
@@ -2415,7 +2376,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, NULL, 0,
+ 0, priv->channel, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
@@ -2608,7 +2569,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
- value , priv->interface,
+ value, priv->channel,
NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state (state was %d)\n",
__func__, break_state);
@@ -2638,7 +2599,8 @@ static bool ftdi_tx_empty(struct usb_serial_port *port)
* WARNING: set_termios calls this with old_termios in kernel space
*/
static void ftdi_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct device *ddev = &port->dev;
@@ -2744,7 +2706,7 @@ no_skip:
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
- value , priv->interface,
+ value, priv->channel,
NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(ddev, "%s FAILED to set databits/stopbits/parity\n",
__func__);
@@ -2757,7 +2719,7 @@ no_data_parity_stop_changes:
if (usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface,
+ 0, priv->channel,
NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(ddev, "%s error from disable flowcontrol urb\n",
__func__);
@@ -2791,7 +2753,7 @@ no_c_cflag_changes:
index = FTDI_SIO_DISABLE_FLOW_CTRL;
}
- index |= priv->interface;
+ index |= priv->channel;
ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
@@ -2819,33 +2781,19 @@ static int ftdi_get_modem_status(struct usb_serial_port *port,
if (!buf)
return -ENOMEM;
/*
- * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
- * the same format as the data returned from the in point.
+ * The device returns a two byte value (the SIO a 1 byte value) in the
+ * same format as the data returned from the IN endpoint.
*/
- switch (priv->chip_type) {
- case SIO:
+ if (priv->chip_type == SIO)
len = 1;
- break;
- case FT8U232AM:
- case FT232BM:
- case FT2232C:
- case FT232RL:
- case FT2232H:
- case FT4232H:
- case FT232H:
- case FTX:
+ else
len = 2;
- break;
- default:
- ret = -EFAULT;
- goto out;
- }
ret = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
FTDI_SIO_GET_MODEM_STATUS_REQUEST,
FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, priv->interface,
+ 0, priv->channel,
buf, len, WDR_TIMEOUT);
/* NOTE: We allow short responses and handle that below. */
@@ -2915,6 +2863,41 @@ static int ftdi_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
+static struct usb_serial_driver ftdi_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ftdi_sio",
+ .dev_groups = ftdi_groups,
+ },
+ .description = "FTDI USB Serial Device",
+ .id_table = id_table_combined,
+ .num_ports = 1,
+ .bulk_in_size = 512,
+ .bulk_out_size = 256,
+ .probe = ftdi_probe,
+ .port_probe = ftdi_port_probe,
+ .port_remove = ftdi_port_remove,
+ .open = ftdi_open,
+ .dtr_rts = ftdi_dtr_rts,
+ .throttle = usb_serial_generic_throttle,
+ .unthrottle = usb_serial_generic_unthrottle,
+ .process_read_urb = ftdi_process_read_urb,
+ .prepare_write_buffer = ftdi_prepare_write_buffer,
+ .tiocmget = ftdi_tiocmget,
+ .tiocmset = ftdi_tiocmset,
+ .tiocmiwait = usb_serial_generic_tiocmiwait,
+ .get_icount = usb_serial_generic_get_icount,
+ .ioctl = ftdi_ioctl,
+ .get_serial = get_serial_info,
+ .set_serial = set_serial_info,
+ .set_termios = ftdi_set_termios,
+ .break_ctl = ftdi_break_ctl,
+ .tx_empty = ftdi_tx_empty,
+};
+
+static struct usb_serial_driver * const serial_drivers[] = {
+ &ftdi_device, NULL
+};
module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index be1641e0408b..55ea61264f91 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -40,11 +40,11 @@
#define FTDI_SIO_READ_PINS 0x0c /* Read immediate value of pins */
#define FTDI_SIO_READ_EEPROM 0x90 /* Read EEPROM */
-/* Interface indices for FT2232, FT2232H and FT4232H devices */
-#define INTERFACE_A 1
-#define INTERFACE_B 2
-#define INTERFACE_C 3
-#define INTERFACE_D 4
+/* Channel indices for FT2232, FT2232H and FT4232H devices */
+#define CHANNEL_A 1
+#define CHANNEL_B 2
+#define CHANNEL_C 3
+#define CHANNEL_D 4
/*
@@ -153,18 +153,6 @@
* not supported by the FT8U232AM).
*/
-enum ftdi_chip_type {
- SIO = 1,
- FT8U232AM = 2,
- FT232BM = 3,
- FT2232C = 4,
- FT232RL = 5,
- FT2232H = 6,
- FT4232H = 7,
- FT232H = 8,
- FTX = 9,
-};
-
enum ftdi_sio_baudrate {
ftdi_sio_b300 = 0,
ftdi_sio_b600 = 1,
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e92c165c86b..e2099445db70 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -25,6 +25,13 @@
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */
+#define FTDI_FT2233HP_PID 0x6040 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4233HP_PID 0x6041 /* Quad channel hi-speed device with PD */
+#define FTDI_FT2232HP_PID 0x6042 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4232HP_PID 0x6043 /* Quad channel hi-speed device with PD */
+#define FTDI_FT233HP_PID 0x6044 /* Dual channel hi-speed device with PD */
+#define FTDI_FT232HP_PID 0x6045 /* Dual channel hi-speed device with PD */
+#define FTDI_FT4232HA_PID 0x6048 /* Quad channel automotive grade hi-speed device */
#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
@@ -662,6 +669,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index ffa622539a25..3a4c0febf335 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -281,7 +281,7 @@ static int send_iosp_ext_cmd(struct edgeport_port *edge_port, __u8 command,
static int calc_baud_rate_divisor(struct device *dev, int baud_rate, int *divisor);
static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port,
- struct ktermios *old_termios);
+ const struct ktermios *old_termios);
static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
__u8 regNum, __u8 regValue);
static int write_cmd_usb(struct edgeport_port *edge_port,
@@ -1441,7 +1441,8 @@ static void edge_unthrottle(struct tty_struct *tty)
* the termios structure
*****************************************************************************/
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
@@ -2325,7 +2326,7 @@ static int send_cmd_write_uart_register(struct edgeport_port *edge_port,
*****************************************************************************/
static void change_port_settings(struct tty_struct *tty,
- struct edgeport_port *edge_port, struct ktermios *old_termios)
+ struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct edgeport_serial *edge_serial =
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index feba2a8d1233..bc3c24ea42c1 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -221,7 +221,8 @@ static void stop_read(struct edgeport_port *edge_port);
static int restart_read(struct edgeport_port *edge_port);
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void edge_send(struct usb_serial_port *port, struct tty_struct *tty);
static int do_download_mode(struct edgeport_serial *serial,
@@ -2210,7 +2211,7 @@ static int restart_read(struct edgeport_port *edge_port)
}
static void change_port_settings(struct tty_struct *tty,
- struct edgeport_port *edge_port, struct ktermios *old_termios)
+ struct edgeport_port *edge_port, const struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct ump_uart_config *config;
@@ -2351,7 +2352,8 @@ static void change_port_settings(struct tty_struct *tty,
}
static void edge_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 7b44dbea95cd..82f108134e6f 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -51,7 +51,8 @@ static unsigned int ir_write_room(struct tty_struct *tty);
static void ir_write_bulk_callback(struct urb *urb);
static void ir_process_read_urb(struct urb *urb);
static void ir_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
/* Not that this lot means you can only have one per system */
static u8 ir_baud;
@@ -376,7 +377,8 @@ static void ir_process_read_urb(struct urb *urb)
}
static void ir_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *udev = port->serial->dev;
unsigned char *transfer_buffer;
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 0be3b5e1eaf3..77cba71bcccb 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -879,7 +879,8 @@ static int iuu_uart_baud(struct usb_serial_port *port, u32 baud_base,
}
static void iuu_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
const u32 supported_mask = CMSPAR|PARENB|PARODD;
struct iuu_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 1cfcd805f286..2966e0c4941e 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -616,7 +616,8 @@ static void keyspan_break_ctl(struct tty_struct *tty, int break_state)
static void keyspan_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
int baud_rate, device_port;
struct keyspan_port_private *p_priv;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 3e7628becdcd..6fd15cd9e1eb 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -321,7 +321,8 @@ static void keyspan_pda_break_ctl(struct tty_struct *tty, int break_state)
}
static void keyspan_pda_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
speed_t speed;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index edcc57bd9b5e..394b3189e003 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -56,7 +56,8 @@ static void klsi_105_port_remove(struct usb_serial_port *port);
static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port);
static void klsi_105_close(struct usb_serial_port *port);
static void klsi_105_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int klsi_105_tiocmget(struct tty_struct *tty);
static void klsi_105_process_read_urb(struct urb *urb);
static int klsi_105_prepare_write_buffer(struct usb_serial_port *port,
@@ -366,7 +367,7 @@ static void klsi_105_process_read_urb(struct urb *urb)
static void klsi_105_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct klsi_105_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 4ed8b8b0a361..5e775f68fcb8 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -62,7 +62,8 @@ static int kobil_tiocmset(struct tty_struct *tty,
static void kobil_read_int_callback(struct urb *urb);
static void kobil_write_int_callback(struct urb *urb);
static void kobil_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
static const struct usb_device_id id_table[] = {
@@ -474,7 +475,8 @@ static int kobil_tiocmset(struct tty_struct *tty,
}
static void kobil_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old)
+ struct usb_serial_port *port,
+ const struct ktermios *old)
{
struct kobil_private *priv;
int result;
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ecd5b921e374..d3852feb81a4 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -45,7 +45,8 @@ static void mct_u232_close(struct usb_serial_port *port);
static void mct_u232_dtr_rts(struct usb_serial_port *port, int on);
static void mct_u232_read_int_callback(struct urb *urb);
static void mct_u232_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void mct_u232_break_ctl(struct tty_struct *tty, int break_state);
static int mct_u232_tiocmget(struct tty_struct *tty);
static int mct_u232_tiocmset(struct tty_struct *tty,
@@ -593,7 +594,7 @@ exit:
static void mct_u232_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct mct_u232_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 23ccbba716c7..1d1f85fabc28 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1356,7 +1356,7 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
*/
static void change_port_settings(struct tty_struct *tty,
struct moschip_port *mos7720_port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial_port *port;
struct usb_serial *serial;
@@ -1494,7 +1494,8 @@ static void change_port_settings(struct tty_struct *tty,
* termios structure.
*/
static void mos7720_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
int status;
struct moschip_port *mos7720_port;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 925067a7978d..6b12bb4648b8 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1188,7 +1188,8 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
*****************************************************************************/
static void mos7840_change_port_settings(struct tty_struct *tty,
- struct moschip_port *mos7840_port, struct ktermios *old_termios)
+ struct moschip_port *mos7840_port,
+ const struct ktermios *old_termios)
{
struct usb_serial_port *port = mos7840_port->port;
int baud;
@@ -1330,7 +1331,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
static void mos7840_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
int status;
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index eb45a9b0005c..faa0eedfe245 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -760,7 +760,7 @@ static int mxuport_tiocmget(struct tty_struct *tty)
}
static int mxuport_set_termios_flow(struct tty_struct *tty,
- struct ktermios *old_termios,
+ const struct ktermios *old_termios,
struct usb_serial_port *port,
struct usb_serial *serial)
{
@@ -834,7 +834,7 @@ out:
static void mxuport_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
- struct ktermios *old_termios)
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
u8 *buf;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index de59fa919540..697683e3fbff 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -253,8 +253,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
+#define QUECTEL_PRODUCT_RM520N 0x0801
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
#define QUECTEL_PRODUCT_EC200T 0x6026
#define QUECTEL_PRODUCT_RM500K 0x7001
@@ -438,6 +440,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -573,6 +577,10 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
/* Device flags */
@@ -1131,6 +1139,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
.driver_info = NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0203, 0xff), /* BG95-M3 */
+ .driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -1138,6 +1148,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1149,6 +1162,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
.driver_info = ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
@@ -1993,8 +2009,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2155,6 +2175,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a5caedbe72e2..6365cfe5402c 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -119,7 +119,8 @@ struct oti6858_control_pkt {
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port);
static void oti6858_close(struct usb_serial_port *port);
static void oti6858_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static void oti6858_init_termios(struct tty_struct *tty);
static void oti6858_read_int_callback(struct urb *urb);
static void oti6858_read_bulk_callback(struct urb *urb);
@@ -395,7 +396,8 @@ static void oti6858_init_termios(struct tty_struct *tty)
}
static void oti6858_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 40b1ab3d284d..8949c1891164 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -789,7 +789,8 @@ static bool pl2303_enable_xonxoff(struct tty_struct *tty, const struct pl2303_ty
}
static void pl2303_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 586ef5551e76..b1e844bf31f8 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */
{DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 36b1e064e51f..6fca40ace83a 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -261,8 +261,8 @@ static int qt2_calc_num_ports(struct usb_serial *serial,
}
static void qt2_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct qt2_port_private *port_priv;
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 7039dc918827..09a972a838ee 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -283,7 +283,8 @@ static void spcp8x5_init_termios(struct tty_struct *tty)
}
static void spcp8x5_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 181e302136a5..1e1888b66305 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -214,8 +214,8 @@ out: kfree(data);
static void ssu100_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct usb_device *dev = port->serial->dev;
struct ktermios *termios = &tty->termios;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 18c0bd853392..b99f78224846 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -314,7 +314,8 @@ static bool ti_tx_empty(struct usb_serial_port *port);
static void ti_throttle(struct tty_struct *tty);
static void ti_unthrottle(struct tty_struct *tty);
static void ti_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int ti_tiocmget(struct tty_struct *tty);
static int ti_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -892,7 +893,8 @@ static void ti_unthrottle(struct tty_struct *tty)
}
static void ti_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ti_port *tport = usb_get_serial_port_data(port);
struct ti_uart_config *config;
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 63d4a784ae45..c47439bd90fa 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -296,8 +296,8 @@ static speed_t upd78f0730_get_baud_rate(struct tty_struct *tty)
}
static void upd78f0730_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct device *dev = &port->dev;
struct upd78f0730_line_control request;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index e35bea2235c1..164521ee10c6 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -519,7 +519,8 @@ static int serial_ioctl(struct tty_struct *tty,
return retval;
}
-static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
+static void serial_set_termios(struct tty_struct *tty,
+ const struct ktermios *old)
{
struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 332fb92ae575..7f82d40753ee 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -82,7 +82,8 @@ static void whiteheat_close(struct usb_serial_port *port);
static void whiteheat_get_serial(struct tty_struct *tty,
struct serial_struct *ss);
static void whiteheat_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
static int whiteheat_tiocmget(struct tty_struct *tty);
static int whiteheat_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
@@ -442,7 +443,8 @@ static void whiteheat_get_serial(struct tty_struct *tty, struct serial_struct *s
static void whiteheat_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
firm_setup_port(tty);
}
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index 6853cd56d8dc..f3811e060a44 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -104,7 +104,8 @@ static int xr21v141x_uart_enable(struct usb_serial_port *port);
static int xr21v141x_uart_disable(struct usb_serial_port *port);
static int xr21v141x_fifo_reset(struct usb_serial_port *port);
static void xr21v141x_set_line_settings(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
struct xr_type {
int reg_width;
@@ -133,8 +134,8 @@ struct xr_type {
int (*disable)(struct usb_serial_port *port);
int (*fifo_reset)(struct usb_serial_port *port);
void (*set_line_settings)(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios);
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios);
};
enum xr_type_id {
@@ -622,8 +623,8 @@ static int xr21v141x_set_baudrate(struct tty_struct *tty, struct usb_serial_port
}
static void xr_set_flow_mode(struct tty_struct *tty,
- struct usb_serial_port *port,
- struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
@@ -674,7 +675,8 @@ static void xr_set_flow_mode(struct tty_struct *tty,
}
static void xr21v141x_set_line_settings(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct ktermios *termios = &tty->termios;
u8 bits = 0;
@@ -732,7 +734,8 @@ static void xr21v141x_set_line_settings(struct tty_struct *tty,
}
static void xr_cdc_set_line_coding(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
struct usb_host_interface *alt = port->serial->interface->cur_altsetting;
@@ -809,7 +812,8 @@ static void xr_cdc_set_line_coding(struct tty_struct *tty,
}
static void xr_set_termios(struct tty_struct *tty,
- struct usb_serial_port *port, struct ktermios *old_termios)
+ struct usb_serial_port *port,
+ const struct ktermios *old_termios)
{
struct xr_data *data = usb_get_serial_port_data(port);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 1db2eefeea22..01f3c2779ccf 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -201,7 +201,7 @@ static int onetouch_connect_input(struct us_data *ss)
onetouch->dev = input_dev;
if (udev->manufacturer)
- strlcpy(onetouch->name, udev->manufacturer,
+ strscpy(onetouch->name, udev->manufacturer,
sizeof(onetouch->name));
if (udev->product) {
if (udev->manufacturer)
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 84dc270f6f73..de3836412bf3 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -283,7 +283,7 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *
set_host_byte(cmnd, DID_OK);
break;
case RC_TMF_NOT_SUPPORTED:
- set_host_byte(cmnd, DID_TARGET_FAILURE);
+ set_host_byte(cmnd, DID_BAD_TARGET);
break;
default:
uas_log_cmd_state(cmnd, "response iu", response_code);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a05e3dcfec8..20dcbccb290b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1275,12 +1275,6 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
-UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
- "Samsung",
- "Flash Drive FIT",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_MAX_SECTORS_64),
-
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
@@ -2294,6 +2288,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 4051c8cd0cd8..251778d14e2d 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
* commands in UAS mode. Observed with the 1.28 firmware; are there others?
@@ -62,6 +69,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
@@ -69,6 +83,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_LUNS),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
+ "Hiksemi",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
@@ -111,6 +132,13 @@ UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
+UNUSUAL_DEV(0x17ef, 0x3899, 0x0000, 0x9999,
+ "Thinkplus",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 5defdfead653..831e7049977d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_ANX7411
tristate "Analogix ANX7411 Type-C DRP Port controller driver"
depends on I2C
depends on USB_ROLE_SWITCH
+ depends on POWER_SUPPLY
help
Say Y or M here if your system has Analogix ANX7411 Type-C DRP Port
controller driver.
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index c1d8c23baa39..de66a2949e33 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -99,8 +99,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c
index c0f0842d443c..b8f3b75fd7eb 100644
--- a/drivers/usb/typec/anx7411.c
+++ b/drivers/usb/typec/anx7411.c
@@ -1105,7 +1105,7 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
int ret;
struct device_node *node;
- node = of_find_node_by_name(dev->of_node, "orientation_switch");
+ node = of_get_child_by_name(dev->of_node, "orientation_switch");
if (!node)
return 0;
@@ -1115,7 +1115,7 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx,
return ret;
}
- node = of_find_node_by_name(dev->of_node, "mode_switch");
+ node = of_get_child_by_name(dev->of_node, "mode_switch");
if (!node) {
dev_err(dev, "no typec mux exist");
ret = -ENODEV;
@@ -1541,7 +1541,7 @@ free_i2c_dummy:
return ret;
}
-static int anx7411_i2c_remove(struct i2c_client *client)
+static void anx7411_i2c_remove(struct i2c_client *client)
{
struct anx7411_data *plat = i2c_get_clientdata(client);
@@ -1565,8 +1565,6 @@ static int anx7411_i2c_remove(struct i2c_client *client)
typec_unregister_port(plat->typec.port);
anx7411_port_unregister_altmodes(plat->typec.port_amode);
-
- return 0;
}
static const struct i2c_device_id anx7411_id[] = {
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebc29ec20e3f..bd5e5dd70431 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2346,6 +2346,7 @@ static void __exit typec_exit(void)
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
class_unregister(&typec_mux_class);
+ class_unregister(&retimer_class);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index cd47c3597e19..2a58185fb14c 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -245,14 +245,12 @@ err_put_fwnode:
return ret;
}
-static int hd3ss3220_remove(struct i2c_client *client)
+static void hd3ss3220_remove(struct i2c_client *client)
{
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
typec_unregister_port(hd3ss3220->port);
usb_role_switch_put(hd3ss3220->role_sw);
-
- return 0;
}
static const struct of_device_id dev_ids[] = {
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 464330776cd6..941735c73161 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -29,7 +29,7 @@ static int switch_fwnode_match(struct device *dev, const void *fwnode)
if (!is_typec_switch_dev(dev))
return 0;
- return dev_fwnode(dev) == fwnode;
+ return device_match_fwnode(dev, fwnode);
}
static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
@@ -259,7 +259,7 @@ static int mux_fwnode_match(struct device *dev, const void *fwnode)
if (!is_typec_mux_dev(dev))
return 0;
- return dev_fwnode(dev) == fwnode;
+ return device_match_fwnode(dev, fwnode);
}
static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c
index 6184f5367190..d6495e533e58 100644
--- a/drivers/usb/typec/mux/fsa4480.c
+++ b/drivers/usb/typec/mux/fsa4480.c
@@ -181,14 +181,12 @@ static int fsa4480_probe(struct i2c_client *client)
return 0;
}
-static int fsa4480_remove(struct i2c_client *client)
+static void fsa4480_remove(struct i2c_client *client)
{
struct fsa4480 *fsa = i2c_get_clientdata(client);
typec_mux_unregister(fsa->mux);
typec_switch_unregister(fsa->sw);
-
- return 0;
}
static const struct i2c_device_id fsa4480_table[] = {
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 47b733f78fb0..e1f4df7238bf 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -569,13 +569,6 @@ err_unregister_switch:
return ret;
}
-static int is_memory(struct acpi_resource *res, void *data)
-{
- struct resource r;
-
- return !acpi_dev_resource_memory(res, &r);
-}
-
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
static const struct acpi_device_id iom_acpi_ids[] = {
/* TigerLake */
@@ -583,6 +576,9 @@ static const struct acpi_device_id iom_acpi_ids[] = {
/* AlderLake */
{ "INTC1079", 0x160, },
+
+ /* Meteor Lake */
+ { "INTC107A", 0x160, },
{}
};
@@ -606,7 +602,7 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
return -ENODEV;
INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/mux/pi3usb30532.c b/drivers/usb/typec/mux/pi3usb30532.c
index 6ce9f282594e..1cd388b55c30 100644
--- a/drivers/usb/typec/mux/pi3usb30532.c
+++ b/drivers/usb/typec/mux/pi3usb30532.c
@@ -160,13 +160,12 @@ static int pi3usb30532_probe(struct i2c_client *client)
return 0;
}
-static int pi3usb30532_remove(struct i2c_client *client)
+static void pi3usb30532_remove(struct i2c_client *client)
{
struct pi3usb30532 *pi = i2c_get_clientdata(client);
typec_mux_unregister(pi->mux);
typec_switch_unregister(pi->sw);
- return 0;
}
static const struct i2c_device_id pi3usb30532_table[] = {
diff --git a/drivers/usb/typec/qcom-pmic-typec.c b/drivers/usb/typec/qcom-pmic-typec.c
index a0454a80c4a2..432ea62f1bab 100644
--- a/drivers/usb/typec/qcom-pmic-typec.c
+++ b/drivers/usb/typec/qcom-pmic-typec.c
@@ -195,9 +195,8 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
qcom_usb->role_sw = fwnode_usb_role_switch_get(dev_fwnode(qcom_usb->dev));
if (IS_ERR(qcom_usb->role_sw)) {
- if (PTR_ERR(qcom_usb->role_sw) != -EPROBE_DEFER)
- dev_err(dev, "failed to get role switch\n");
- ret = PTR_ERR(qcom_usb->role_sw);
+ ret = dev_err_probe(dev, PTR_ERR(qcom_usb->role_sw),
+ "failed to get role switch\n");
goto err_typec_port;
}
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
index 2003731f1bee..ee94dbbe4745 100644
--- a/drivers/usb/typec/retimer.c
+++ b/drivers/usb/typec/retimer.c
@@ -31,7 +31,7 @@ static bool dev_name_ends_with(struct device *dev, const char *suffix)
static int retimer_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-retimer");
+ return device_match_fwnode(dev, fwnode) && dev_name_ends_with(dev, "-retimer");
}
static void *typec_retimer_match(struct fwnode_handle *fwnode, const char *id, void *data)
diff --git a/drivers/usb/typec/rt1719.c b/drivers/usb/typec/rt1719.c
index f1b698edd7eb..ea8b700b0ceb 100644
--- a/drivers/usb/typec/rt1719.c
+++ b/drivers/usb/typec/rt1719.c
@@ -930,14 +930,12 @@ err_fwnode_put:
return ret;
}
-static int rt1719_remove(struct i2c_client *i2c)
+static void rt1719_remove(struct i2c_client *i2c)
{
struct rt1719_data *data = i2c_get_clientdata(i2c);
typec_unregister_port(data->port);
usb_role_switch_put(data->role_sw);
-
- return 0;
}
static const struct of_device_id __maybe_unused rt1719_device_table[] = {
diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
index e7745d1c2a5c..494b371151e0 100644
--- a/drivers/usb/typec/stusb160x.c
+++ b/drivers/usb/typec/stusb160x.c
@@ -750,11 +750,8 @@ static int stusb160x_probe(struct i2c_client *client)
if (client->irq) {
chip->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(chip->role_sw)) {
- ret = PTR_ERR(chip->role_sw);
- if (ret != -EPROBE_DEFER)
- dev_err(chip->dev,
- "Failed to get usb role switch: %d\n",
- ret);
+ ret = dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
+ "Failed to get usb role switch\n");
goto port_unregister;
}
@@ -801,7 +798,7 @@ fwnode_put:
return ret;
}
-static int stusb160x_remove(struct i2c_client *client)
+static void stusb160x_remove(struct i2c_client *client)
{
struct stusb160x *chip = i2c_get_clientdata(client);
@@ -823,8 +820,6 @@ static int stusb160x_remove(struct i2c_client *client)
if (chip->main_supply)
regulator_disable(chip->main_supply);
-
- return 0;
}
static int __maybe_unused stusb160x_suspend(struct device *dev)
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 073fd2ea5e0b..e6b88ca4a4b9 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -35,6 +35,17 @@ config TYPEC_MT6360
USB Type-C. It works with Type-C Port Controller Manager
to provide USB PD and USB Type-C functionalities.
+config TYPEC_TCPCI_MT6370
+ tristate "MediaTek MT6370 Type-C driver"
+ depends on MFD_MT6370
+ help
+ MediaTek MT6370 is a multi-functional IC that includes
+ USB Type-C. It works with Type-C Port Controller Manager
+ to provide USB PD and USB Type-C functionalities.
+
+ This driver can also be built as a module. The module
+ will be called "tcpci_mt6370".
+
config TYPEC_TCPCI_MAXIM
tristate "Maxim TCPCI based Type-C chip driver"
help
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index 7d499f3569fd..906d9dced8e7 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -6,4 +6,5 @@ typec_wcove-y := wcove.o
obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
+obj-$(CONFIG_TYPEC_TCPCI_MT6370) += tcpci_mt6370.o
obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 96c55eaf3f80..721b2a548084 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -151,7 +151,7 @@ static void _fusb302_log(struct fusb302_chip *chip, const char *fmt,
if (fusb302_log_full(chip)) {
chip->logbuffer_head = max(chip->logbuffer_head - 1, 0);
- strlcpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
+ strscpy(tmpbuffer, "overflow", sizeof(tmpbuffer));
}
if (chip->logbuffer_head < 0 ||
@@ -1743,9 +1743,8 @@ static int fusb302_probe(struct i2c_client *client,
chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
if (IS_ERR(chip->tcpm_port)) {
fwnode_handle_put(chip->tcpc_dev.fwnode);
- ret = PTR_ERR(chip->tcpm_port);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "cannot register tcpm port, ret=%d", ret);
+ ret = dev_err_probe(dev, PTR_ERR(chip->tcpm_port),
+ "cannot register tcpm port\n");
goto destroy_workqueue;
}
@@ -1771,7 +1770,7 @@ destroy_workqueue:
return ret;
}
-static int fusb302_remove(struct i2c_client *client)
+static void fusb302_remove(struct i2c_client *client)
{
struct fusb302_chip *chip = i2c_get_clientdata(client);
@@ -1783,8 +1782,6 @@ static int fusb302_remove(struct i2c_client *client)
fwnode_handle_put(chip->tcpc_dev.fwnode);
destroy_workqueue(chip->wq);
fusb302_debugfs_exit(chip);
-
- return 0;
}
static int fusb302_pm_suspend(struct device *dev)
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 812784702d53..b2bfcebe218f 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -27,11 +27,6 @@
#define VPPS_VALID_MIN_MV 100
#define VSINKDISCONNECT_PD_MIN_PERCENT 90
-#define tcpc_presenting_rd(reg, cc) \
- (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
- (((reg) & (TCPC_ROLE_CTRL_## cc ##_MASK << TCPC_ROLE_CTRL_## cc ##_SHIFT)) == \
- (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_## cc ##_SHIFT)))
-
struct tcpci {
struct device *dev;
@@ -218,23 +213,6 @@ static int tcpci_start_toggling(struct tcpc_dev *tcpc,
TCPC_CMD_LOOK4CONNECTION);
}
-static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
-{
- switch (cc) {
- case 0x1:
- return sink ? TYPEC_CC_RP_DEF : TYPEC_CC_RA;
- case 0x2:
- return sink ? TYPEC_CC_RP_1_5 : TYPEC_CC_RD;
- case 0x3:
- if (sink)
- return TYPEC_CC_RP_3_0;
- fallthrough;
- case 0x0:
- default:
- return TYPEC_CC_OPEN;
- }
-}
-
static int tcpci_get_cc(struct tcpc_dev *tcpc,
enum typec_cc_status *cc1, enum typec_cc_status *cc2)
{
@@ -868,7 +846,7 @@ static int tcpci_probe(struct i2c_client *client,
return 0;
}
-static int tcpci_remove(struct i2c_client *client)
+static void tcpci_remove(struct i2c_client *client)
{
struct tcpci_chip *chip = i2c_get_clientdata(client);
int err;
@@ -879,8 +857,6 @@ static int tcpci_remove(struct i2c_client *client)
dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
tcpci_unregister_port(chip->tcpci);
-
- return 0;
}
static const struct i2c_device_id tcpci_id[] = {
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index 4b6705f3d7b7..03f89e6f1a78 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -492,14 +492,12 @@ unreg_port:
return ret;
}
-static int max_tcpci_remove(struct i2c_client *client)
+static void max_tcpci_remove(struct i2c_client *client)
{
struct max_tcpci_chip *chip = i2c_get_clientdata(client);
if (!IS_ERR_OR_NULL(chip->tcpci))
tcpci_unregister_port(chip->tcpci);
-
- return 0;
}
static const struct i2c_device_id max_tcpci_id[] = {
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
new file mode 100644
index 000000000000..c5bb201a5163
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/tcpci.h>
+#include <linux/usb/tcpm.h>
+
+#define MT6370_REG_SYSCTRL8 0x9B
+
+#define MT6370_AUTOIDLE_MASK BIT(3)
+
+#define MT6370_VENDOR_ID 0x29CF
+#define MT6370_TCPC_DID_A 0x2170
+
+struct mt6370_priv {
+ struct device *dev;
+ struct regulator *vbus;
+ struct tcpci *tcpci;
+ struct tcpci_data tcpci_data;
+};
+
+static const struct reg_sequence mt6370_reg_init[] = {
+ REG_SEQ(0xA0, 0x1, 1000),
+ REG_SEQ(0x81, 0x38, 0),
+ REG_SEQ(0x82, 0x82, 0),
+ REG_SEQ(0xBA, 0xFC, 0),
+ REG_SEQ(0xBB, 0x50, 0),
+ REG_SEQ(0x9E, 0x8F, 0),
+ REG_SEQ(0xA1, 0x5, 0),
+ REG_SEQ(0xA2, 0x4, 0),
+ REG_SEQ(0xA3, 0x4A, 0),
+ REG_SEQ(0xA4, 0x01, 0),
+ REG_SEQ(0x95, 0x01, 0),
+ REG_SEQ(0x80, 0x71, 0),
+ REG_SEQ(0x9B, 0x3A, 1000),
+};
+
+static int mt6370_tcpc_init(struct tcpci *tcpci, struct tcpci_data *data)
+{
+ u16 did;
+ int ret;
+
+ ret = regmap_register_patch(data->regmap, mt6370_reg_init,
+ ARRAY_SIZE(mt6370_reg_init));
+ if (ret)
+ return ret;
+
+ ret = regmap_raw_read(data->regmap, TCPC_BCD_DEV, &did, sizeof(u16));
+ if (ret)
+ return ret;
+
+ if (did == MT6370_TCPC_DID_A)
+ return regmap_write(data->regmap, TCPC_FAULT_CTRL, 0x80);
+
+ return 0;
+}
+
+static int mt6370_tcpc_set_vconn(struct tcpci *tcpci, struct tcpci_data *data,
+ bool enable)
+{
+ return regmap_update_bits(data->regmap, MT6370_REG_SYSCTRL8,
+ MT6370_AUTOIDLE_MASK,
+ enable ? 0 : MT6370_AUTOIDLE_MASK);
+}
+
+static int mt6370_tcpc_set_vbus(struct tcpci *tcpci, struct tcpci_data *data,
+ bool source, bool sink)
+{
+ struct mt6370_priv *priv = container_of(data, struct mt6370_priv,
+ tcpci_data);
+ int ret;
+
+ ret = regulator_is_enabled(priv->vbus);
+ if (ret < 0)
+ return ret;
+
+ if (ret && !source)
+ return regulator_disable(priv->vbus);
+
+ if (!ret && source)
+ return regulator_enable(priv->vbus);
+
+ return 0;
+}
+
+static irqreturn_t mt6370_irq_handler(int irq, void *dev_id)
+{
+ struct mt6370_priv *priv = dev_id;
+
+ return tcpci_irq(priv->tcpci);
+}
+
+static int mt6370_check_vendor_info(struct mt6370_priv *priv)
+{
+ struct regmap *regmap = priv->tcpci_data.regmap;
+ u16 vid;
+ int ret;
+
+ ret = regmap_raw_read(regmap, TCPC_VENDOR_ID, &vid, sizeof(u16));
+ if (ret)
+ return ret;
+
+ if (vid != MT6370_VENDOR_ID)
+ return dev_err_probe(priv->dev, -ENODEV,
+ "Vendor ID not correct 0x%02x\n", vid);
+
+ return 0;
+}
+
+static void mt6370_unregister_tcpci_port(void *tcpci)
+{
+ tcpci_unregister_port(tcpci);
+}
+
+static int mt6370_tcpc_probe(struct platform_device *pdev)
+{
+ struct mt6370_priv *priv;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->tcpci_data.regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->tcpci_data.regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to init regmap\n");
+
+ ret = mt6370_check_vendor_info(priv);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get irq\n");
+
+ /* Assign TCPCI feature and ops */
+ priv->tcpci_data.auto_discharge_disconnect = 1;
+ priv->tcpci_data.init = mt6370_tcpc_init;
+ priv->tcpci_data.set_vconn = mt6370_tcpc_set_vconn;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (!IS_ERR(priv->vbus))
+ priv->tcpci_data.set_vbus = mt6370_tcpc_set_vbus;
+
+ priv->tcpci = tcpci_register_port(dev, &priv->tcpci_data);
+ if (IS_ERR(priv->tcpci))
+ return dev_err_probe(dev, PTR_ERR(priv->tcpci),
+ "Failed to register tcpci port\n");
+
+ ret = devm_add_action_or_reset(dev, mt6370_unregister_tcpci_port, priv->tcpci);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mt6370_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to allocate irq\n");
+
+ device_init_wakeup(dev, true);
+ dev_pm_set_wake_irq(dev, irq);
+
+ return 0;
+}
+
+static int mt6370_tcpc_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_tcpc_devid_table[] = {
+ { .compatible = "mediatek,mt6370-tcpc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_tcpc_devid_table);
+
+static struct platform_driver mt6370_tcpc_driver = {
+ .driver = {
+ .name = "mt6370-tcpc",
+ .of_match_table = mt6370_tcpc_devid_table,
+ },
+ .probe = mt6370_tcpc_probe,
+ .remove = mt6370_tcpc_remove,
+};
+module_platform_driver(mt6370_tcpc_driver);
+
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_DESCRIPTION("MT6370 USB Type-C Port Controller Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 3291ca4948da..7b217c712c11 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -5,6 +5,7 @@
* Richtek RT1711H Type-C Chip Driver
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
@@ -13,16 +14,27 @@
#include <linux/usb/tcpci.h>
#include <linux/usb/tcpm.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define RT1711H_VID 0x29CF
#define RT1711H_PID 0x1711
+#define RT1711H_DID 0x2171
+#define RT1715_DID 0x2173
-#define RT1711H_RTCTRL8 0x9B
+#define RT1711H_PHYCTRL1 0x80
+#define RT1711H_PHYCTRL2 0x81
+
+#define RT1711H_RTCTRL4 0x93
+/* rx threshold of rd/rp: 1b0 for level 0.4V/0.7V, 1b1 for 0.35V/0.75V */
+#define RT1711H_BMCIO_RXDZSEL BIT(0)
+#define RT1711H_RTCTRL8 0x9B
/* Autoidle timeout = (tout * 2 + 1) * 6.4ms */
#define RT1711H_RTCTRL8_SET(ck300, ship_off, auto_idle, tout) \
(((ck300) << 7) | ((ship_off) << 5) | \
((auto_idle) << 3) | ((tout) & 0x07))
+#define RT1711H_AUTOIDLEEN BIT(3)
+#define RT1711H_ENEXTMSG BIT(4)
#define RT1711H_RTCTRL11 0x9E
@@ -35,10 +47,17 @@
#define RT1711H_RTCTRL15 0xA2
#define RT1711H_RTCTRL16 0xA3
+#define RT1711H_RTCTRL18 0xAF
+/* 1b0 as fixed rx threshold of rd/rp 0.55V, 1b1 depends on RTCRTL4[0] */
+#define BMCIO_RXDZEN BIT(0)
+
struct rt1711h_chip {
struct tcpci_data data;
struct tcpci *tcpci;
struct device *dev;
+ struct regulator *vbus;
+ bool src_en;
+ u16 did;
};
static int rt1711h_read16(struct rt1711h_chip *chip, unsigned int reg, u16 *val)
@@ -75,8 +94,9 @@ static struct rt1711h_chip *tdata_to_rt1711h(struct tcpci_data *tdata)
static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
{
- int ret;
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
+ struct regmap *regmap = chip->data.regmap;
+ int ret;
/* CK 300K from 320K, shipping off, auto_idle enable, tout = 32ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL8,
@@ -84,6 +104,14 @@ static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
if (ret < 0)
return ret;
+ /* Enable PD30 extended message for RT1715 */
+ if (chip->did == RT1715_DID) {
+ ret = regmap_update_bits(regmap, RT1711H_RTCTRL8,
+ RT1711H_ENEXTMSG, RT1711H_ENEXTMSG);
+ if (ret < 0)
+ return ret;
+ }
+
/* I2C reset : (val + 1) * 12.5ms */
ret = rt1711h_write8(chip, RT1711H_RTCTRL11,
RT1711H_RTCTRL11_SET(1, 0x0F));
@@ -101,7 +129,37 @@ static int rt1711h_init(struct tcpci *tcpci, struct tcpci_data *tdata)
return ret;
/* dcSRC.DRP : 33% */
- return rt1711h_write16(chip, RT1711H_RTCTRL16, 330);
+ ret = rt1711h_write16(chip, RT1711H_RTCTRL16, 330);
+ if (ret < 0)
+ return ret;
+
+ /* Enable phy discard retry, retry count 7, rx filter deglitch 100 us */
+ ret = rt1711h_write8(chip, RT1711H_PHYCTRL1, 0xF1);
+ if (ret < 0)
+ return ret;
+
+ /* Decrease wait time of BMC-encoded 1 bit from 2.67us to 2.55us */
+ /* wait time : (val * .4167) us */
+ return rt1711h_write8(chip, RT1711H_PHYCTRL2, 62);
+}
+
+static int rt1711h_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata,
+ bool src, bool snk)
+{
+ struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
+ int ret;
+
+ if (chip->src_en == src)
+ return 0;
+
+ if (src)
+ ret = regulator_enable(chip->vbus);
+ else
+ ret = regulator_disable(chip->vbus);
+
+ if (!ret)
+ chip->src_en = src;
+ return ret;
}
static int rt1711h_set_vconn(struct tcpci *tcpci, struct tcpci_data *tdata,
@@ -109,8 +167,55 @@ static int rt1711h_set_vconn(struct tcpci *tcpci, struct tcpci_data *tdata,
{
struct rt1711h_chip *chip = tdata_to_rt1711h(tdata);
- return rt1711h_write8(chip, RT1711H_RTCTRL8,
- RT1711H_RTCTRL8_SET(0, 1, !enable, 2));
+ return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL8,
+ RT1711H_AUTOIDLEEN, enable ? 0 : RT1711H_AUTOIDLEEN);
+}
+
+/*
+ * Selects the CC PHY noise filter voltage level according to the remote current
+ * CC voltage level.
+ *
+ * @status: The port's current cc status read from IC
+ * Return 0 if writes succeed; failure code otherwise
+ */
+static inline int rt1711h_init_cc_params(struct rt1711h_chip *chip, u8 status)
+{
+ int ret, cc1, cc2;
+ u8 role = 0;
+ u32 rxdz_en, rxdz_sel;
+
+ ret = rt1711h_read8(chip, TCPC_ROLE_CTRL, &role);
+ if (ret < 0)
+ return ret;
+
+ cc1 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC1_SHIFT) &
+ TCPC_CC_STATUS_CC1_MASK,
+ status & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_rd(role, CC1));
+ cc2 = tcpci_to_typec_cc((status >> TCPC_CC_STATUS_CC2_SHIFT) &
+ TCPC_CC_STATUS_CC2_MASK,
+ status & TCPC_CC_STATUS_TERM ||
+ tcpc_presenting_rd(role, CC2));
+
+ if ((cc1 >= TYPEC_CC_RP_1_5 && cc2 < TYPEC_CC_RP_DEF) ||
+ (cc2 >= TYPEC_CC_RP_1_5 && cc1 < TYPEC_CC_RP_DEF)) {
+ rxdz_en = BMCIO_RXDZEN;
+ if (chip->did == RT1715_DID)
+ rxdz_sel = RT1711H_BMCIO_RXDZSEL;
+ else
+ rxdz_sel = 0;
+ } else {
+ rxdz_en = 0;
+ rxdz_sel = RT1711H_BMCIO_RXDZSEL;
+ }
+
+ ret = regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL18,
+ BMCIO_RXDZEN, rxdz_en);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(chip->data.regmap, RT1711H_RTCTRL4,
+ RT1711H_BMCIO_RXDZSEL, rxdz_sel);
}
static int rt1711h_start_drp_toggling(struct tcpci *tcpci,
@@ -173,6 +278,8 @@ static irqreturn_t rt1711h_irq(int irq, void *dev_id)
/* Clear cc change event triggered by starting toggling */
if (status & TCPC_CC_STATUS_TOGGLING)
rt1711h_write8(chip, TCPC_ALERT, TCPC_ALERT_CC_STATUS);
+ else
+ rt1711h_init_cc_params(chip, status);
}
out:
@@ -191,7 +298,7 @@ static int rt1711h_sw_reset(struct rt1711h_chip *chip)
return 0;
}
-static int rt1711h_check_revision(struct i2c_client *i2c)
+static int rt1711h_check_revision(struct i2c_client *i2c, struct rt1711h_chip *chip)
{
int ret;
@@ -209,7 +316,15 @@ static int rt1711h_check_revision(struct i2c_client *i2c)
dev_err(&i2c->dev, "pid is not correct, 0x%04x\n", ret);
return -ENODEV;
}
- return 0;
+ ret = i2c_smbus_read_word_data(i2c, TCPC_BCD_DEV);
+ if (ret < 0)
+ return ret;
+ if (ret != chip->did) {
+ dev_err(&i2c->dev, "did is not correct, 0x%04x\n", ret);
+ return -ENODEV;
+ }
+ dev_dbg(&i2c->dev, "did is 0x%04x\n", ret);
+ return ret;
}
static int rt1711h_probe(struct i2c_client *client,
@@ -218,16 +333,18 @@ static int rt1711h_probe(struct i2c_client *client,
int ret;
struct rt1711h_chip *chip;
- ret = rt1711h_check_revision(client);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->did = (size_t)device_get_match_data(&client->dev);
+
+ ret = rt1711h_check_revision(client, chip);
if (ret < 0) {
dev_err(&client->dev, "check vid/pid fail\n");
return ret;
}
- chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
chip->data.regmap = devm_regmap_init_i2c(client,
&rt1711h_regmap_config);
if (IS_ERR(chip->data.regmap))
@@ -245,7 +362,12 @@ static int rt1711h_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ chip->vbus = devm_regulator_get(&client->dev, "vbus");
+ if (IS_ERR(chip->vbus))
+ return PTR_ERR(chip->vbus);
+
chip->data.init = rt1711h_init;
+ chip->data.set_vbus = rt1711h_set_vbus;
chip->data.set_vconn = rt1711h_set_vconn;
chip->data.start_drp_toggling = rt1711h_start_drp_toggling;
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
@@ -263,23 +385,24 @@ static int rt1711h_probe(struct i2c_client *client,
return 0;
}
-static int rt1711h_remove(struct i2c_client *client)
+static void rt1711h_remove(struct i2c_client *client)
{
struct rt1711h_chip *chip = i2c_get_clientdata(client);
tcpci_unregister_port(chip->tcpci);
- return 0;
}
static const struct i2c_device_id rt1711h_id[] = {
{ "rt1711h", 0 },
+ { "rt1715", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt1711h_id);
#ifdef CONFIG_OF
static const struct of_device_id rt1711h_of_match[] = {
- { .compatible = "richtek,rt1711h", },
+ { .compatible = "richtek,rt1711h", .data = (void *)RT1711H_DID },
+ { .compatible = "richtek,rt1715", .data = (void *)RT1715_DID },
{},
};
MODULE_DEVICE_TABLE(of, rt1711h_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ea5a917c51b1..904c7b4ce2f0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6320,6 +6320,13 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret;
+ /*
+ * All the properties below are related to USB PD. The check needs to be
+ * property specific when a non-pd related property is added.
+ */
+ if (!port->pd_supported)
+ return -EOPNOTSUPP;
+
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_set_online(port, val);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index dfbba5ae9487..b637e8b378b3 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -857,15 +857,13 @@ err_clear_mask:
return ret;
}
-static int tps6598x_remove(struct i2c_client *client)
+static void tps6598x_remove(struct i2c_client *client)
{
struct tps6598x *tps = i2c_get_clientdata(client);
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
usb_role_switch_put(tps->role_sw);
-
- return 0;
}
static const struct of_device_id tps6598x_of_match[] = {
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1aea46493b85..74fb5a4c6f21 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,8 +588,6 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
- if (ret == 0 && offset == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
return ret;
}
@@ -1069,11 +1067,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
cap->fwnode = ucsi_find_fwnode(con);
con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
- if (IS_ERR(con->usb_role_sw)) {
- dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
- con->num);
- return PTR_ERR(con->usb_role_sw);
- }
+ if (IS_ERR(con->usb_role_sw))
+ return dev_err_probe(ucsi->dev, PTR_ERR(con->usb_role_sw),
+ "con%d: failed to get usb role switch\n", con->num);
/* Delay other interactions with the con until registration is complete */
mutex_lock(&con->lock);
@@ -1200,32 +1196,6 @@ out_unlock:
return ret;
}
-static void ucsi_unregister_connectors(struct ucsi *ucsi)
-{
- struct ucsi_connector *con;
- int i;
-
- if (!ucsi->connector)
- return;
-
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- con = &ucsi->connector[i];
-
- if (!con->wq)
- break;
-
- cancel_work_sync(&con->work);
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- }
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-}
-
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1234,6 +1204,7 @@ static void ucsi_unregister_connectors(struct ucsi *ucsi)
*/
static int ucsi_init(struct ucsi *ucsi)
{
+ struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1264,7 +1235,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1288,7 +1259,15 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- ucsi_unregister_connectors(ucsi);
+ for (con = ucsi->connector; con->port; con++) {
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ if (con->wq)
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ con->port = NULL;
+ }
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1402,6 +1381,7 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_delayed_work_sync(&ucsi->work);
@@ -1409,7 +1389,18 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- ucsi_unregister_connectors(ucsi);
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ cancel_work_sync(&ucsi->connector[i].work);
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+ if (ucsi->connector[i].wq)
+ destroy_workqueue(ucsi->connector[i].wq);
+ typec_unregister_port(ucsi->connector[i].port);
+ }
+
+ kfree(ucsi->connector);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 5c0bf48be766..835f1c4372ba 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -125,6 +125,11 @@ struct version_format {
#define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
#define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
+/* Firmware for Tegra doesn't support UCSI ALT command, built
+ * for NVIDIA has known issue of reporting wrong capability info
+ */
+#define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
+
/* Altmode offset for NVIDIA Function Test Board (FTB) */
#define NVIDIA_FTB_DP_OFFSET (2)
#define NVIDIA_FTB_DBG_OFFSET (3)
@@ -513,6 +518,7 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
{
struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
+ struct ucsi_capability *cap;
struct ucsi_altmode *alt;
int ret;
@@ -536,6 +542,12 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
ucsi_ccg_nvidia_altmode(uc, alt);
}
break;
+ case UCSI_GET_CAPABILITY:
+ if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
+ cap = val;
+ cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
+ }
+ break;
default:
break;
}
@@ -1403,7 +1415,7 @@ out_ucsi_destroy:
return status;
}
-static int ucsi_ccg_remove(struct i2c_client *client)
+static void ucsi_ccg_remove(struct i2c_client *client)
{
struct ucsi_ccg *uc = i2c_get_clientdata(client);
@@ -1413,8 +1425,6 @@ static int ucsi_ccg_remove(struct i2c_client *client)
ucsi_unregister(uc->ucsi);
ucsi_destroy(uc->ucsi);
free_irq(uc->irq, uc);
-
- return 0;
}
static const struct i2c_device_id ucsi_ccg_device_id[] = {
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 061551d464f1..7b92f0c8de70 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -599,7 +599,7 @@ static int ucsi_stm32g0_probe_bootloader(struct ucsi *ucsi)
g0->i2c_bl = i2c_new_dummy_device(g0->client->adapter, STM32G0_I2C_BL_ADDR);
if (IS_ERR(g0->i2c_bl)) {
ret = dev_err_probe(g0->dev, PTR_ERR(g0->i2c_bl),
- "Failed to register booloader I2C address\n");
+ "Failed to register bootloader I2C address\n");
return ret;
}
}
@@ -688,7 +688,7 @@ destroy:
return ret;
}
-static int ucsi_stm32g0_remove(struct i2c_client *client)
+static void ucsi_stm32g0_remove(struct i2c_client *client)
{
struct ucsi_stm32g0 *g0 = i2c_get_clientdata(client);
@@ -697,8 +697,6 @@ static int ucsi_stm32g0_remove(struct i2c_client *client)
if (g0->fw_name)
i2c_unregister_device(g0->i2c_bl);
ucsi_destroy(g0->ucsi);
-
- return 0;
}
static int ucsi_stm32g0_suspend(struct device *dev)
diff --git a/drivers/usb/typec/wusb3801.c b/drivers/usb/typec/wusb3801.c
index e63509f8b01e..3cc7a15ecbd3 100644
--- a/drivers/usb/typec/wusb3801.c
+++ b/drivers/usb/typec/wusb3801.c
@@ -399,7 +399,7 @@ err_put_connector:
return ret;
}
-static int wusb3801_remove(struct i2c_client *client)
+static void wusb3801_remove(struct i2c_client *client)
{
struct wusb3801 *wusb3801 = i2c_get_clientdata(client);
@@ -411,8 +411,6 @@ static int wusb3801_remove(struct i2c_client *client)
if (wusb3801->vbus_on)
regulator_disable(wusb3801->vbus_supply);
-
- return 0;
}
static const struct of_device_id wusb3801_of_match[] = {
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 77a5b3f8736a..e8c3131a8543 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -100,7 +100,7 @@ static int add_match_busid(char *busid)
for (i = 0; i < MAX_BUSID; i++) {
spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
- strlcpy(busid_table[i].name, busid, BUSID_SIZE);
+ strscpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 5dd41e8215e0..fc01b31bbb87 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -464,7 +464,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
int nents;
int num_urbs = 1;
int pipe = get_pipe(sdev, pdu);
- int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
+ int use_sg = pdu->u.cmd_submit.transfer_flags & USBIP_URB_DMA_MAP_SG;
int support_sg = 1;
int np = 0;
int ret, i;
@@ -514,7 +514,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
num_urbs = nents;
priv->completed_urbs = 0;
pdu->u.cmd_submit.transfer_flags &=
- ~URB_DMA_MAP_SG;
+ ~USBIP_URB_DMA_MAP_SG;
}
} else {
buffer = kzalloc(buf_len, GFP_KERNEL);
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 2ab99244bc31..053a2bca4c47 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -344,6 +344,91 @@ static unsigned int tweak_transfer_flags(unsigned int flags)
return flags;
}
+/*
+ * USBIP driver packs URB transfer flags in PDUs that are exchanged
+ * between Server (usbip_host) and Client (vhci_hcd). URB_* flags
+ * are internal to kernel and could change. Where as USBIP URB flags
+ * exchanged in PDUs are USBIP user API must not change.
+ *
+ * USBIP_URB* flags are exported as explicit API and client and server
+ * do mapping from kernel flags to USBIP_URB*. Details as follows:
+ *
+ * Client tx path (USBIP_CMD_SUBMIT):
+ * - Maps URB_* to USBIP_URB_* when it sends USBIP_CMD_SUBMIT packet.
+ *
+ * Server rx path (USBIP_CMD_SUBMIT):
+ * - Maps USBIP_URB_* to URB_* when it receives USBIP_CMD_SUBMIT packet.
+ *
+ * Flags aren't included in USBIP_CMD_UNLINK and USBIP_RET_SUBMIT packets
+ * and no special handling is needed for them in the following cases:
+ * - Server rx path (USBIP_CMD_UNLINK)
+ * - Client rx path & Server tx path (USBIP_RET_SUBMIT)
+ *
+ * Code paths:
+ * usbip_pack_pdu() is the common routine that handles packing pdu from
+ * urb and unpack pdu to an urb.
+ *
+ * usbip_pack_cmd_submit() and usbip_pack_ret_submit() handle
+ * USBIP_CMD_SUBMIT and USBIP_RET_SUBMIT respectively.
+ *
+ * usbip_map_urb_to_usbip() and usbip_map_usbip_to_urb() are used
+ * by usbip_pack_cmd_submit() and usbip_pack_ret_submit() to map
+ * flags.
+ */
+
+struct urb_to_usbip_flags {
+ u32 urb_flag;
+ u32 usbip_flag;
+};
+
+#define NUM_USBIP_FLAGS 17
+
+static const struct urb_to_usbip_flags flag_map[NUM_USBIP_FLAGS] = {
+ {URB_SHORT_NOT_OK, USBIP_URB_SHORT_NOT_OK},
+ {URB_ISO_ASAP, USBIP_URB_ISO_ASAP},
+ {URB_NO_TRANSFER_DMA_MAP, USBIP_URB_NO_TRANSFER_DMA_MAP},
+ {URB_ZERO_PACKET, USBIP_URB_ZERO_PACKET},
+ {URB_NO_INTERRUPT, USBIP_URB_NO_INTERRUPT},
+ {URB_FREE_BUFFER, USBIP_URB_FREE_BUFFER},
+ {URB_DIR_IN, USBIP_URB_DIR_IN},
+ {URB_DIR_OUT, USBIP_URB_DIR_OUT},
+ {URB_DIR_MASK, USBIP_URB_DIR_MASK},
+ {URB_DMA_MAP_SINGLE, USBIP_URB_DMA_MAP_SINGLE},
+ {URB_DMA_MAP_PAGE, USBIP_URB_DMA_MAP_PAGE},
+ {URB_DMA_MAP_SG, USBIP_URB_DMA_MAP_SG},
+ {URB_MAP_LOCAL, USBIP_URB_MAP_LOCAL},
+ {URB_SETUP_MAP_SINGLE, USBIP_URB_SETUP_MAP_SINGLE},
+ {URB_SETUP_MAP_LOCAL, USBIP_URB_SETUP_MAP_LOCAL},
+ {URB_DMA_SG_COMBINED, USBIP_URB_DMA_SG_COMBINED},
+ {URB_ALIGNED_TEMP_BUFFER, USBIP_URB_ALIGNED_TEMP_BUFFER},
+};
+
+static unsigned int urb_to_usbip(unsigned int flags)
+{
+ unsigned int map_flags = 0;
+ int loop;
+
+ for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
+ if (flags & flag_map[loop].urb_flag)
+ map_flags |= flag_map[loop].usbip_flag;
+ }
+
+ return map_flags;
+}
+
+static unsigned int usbip_to_urb(unsigned int flags)
+{
+ unsigned int map_flags = 0;
+ int loop;
+
+ for (loop = 0; loop < NUM_USBIP_FLAGS; loop++) {
+ if (flags & flag_map[loop].usbip_flag)
+ map_flags |= flag_map[loop].urb_flag;
+ }
+
+ return map_flags;
+}
+
static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb,
int pack)
{
@@ -354,14 +439,14 @@ static void usbip_pack_cmd_submit(struct usbip_header *pdu, struct urb *urb,
* will be discussed when usbip is ported to other operating systems.
*/
if (pack) {
- spdu->transfer_flags =
- tweak_transfer_flags(urb->transfer_flags);
+ /* map after tweaking the urb flags */
+ spdu->transfer_flags = urb_to_usbip(tweak_transfer_flags(urb->transfer_flags));
spdu->transfer_buffer_length = urb->transfer_buffer_length;
spdu->start_frame = urb->start_frame;
spdu->number_of_packets = urb->number_of_packets;
spdu->interval = urb->interval;
} else {
- urb->transfer_flags = spdu->transfer_flags;
+ urb->transfer_flags = usbip_to_urb(spdu->transfer_flags);
urb->transfer_buffer_length = spdu->transfer_buffer_length;
urb->start_frame = spdu->start_frame;
urb->number_of_packets = spdu->number_of_packets;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 75a703b803a2..3e4486bfa0b7 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -323,7 +323,7 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
last_avail_idx = vp_ioread16(avail_idx_addr);
@@ -337,7 +337,7 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num)
u32 q_pair_id;
ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
- q_pair_id = qid / hw->nr_vring;
+ q_pair_id = qid / 2;
avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
hw->vring[qid].last_avail_idx = num;
vp_iowrite16(num, avail_idx_addr);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index ed100a35e596..90913365def4 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1320,6 +1320,8 @@ static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *
static int create_rqt(struct mlx5_vdpa_net *ndev)
{
+ int rqt_table_size = roundup_pow_of_two(ndev->rqt_size);
+ int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1327,7 +1329,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + rqt_table_size * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1336,12 +1338,12 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
- MLX5_SET(rqtc, rqtc, rqt_max_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, rqt_table_size);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j += 2)
list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1354,6 +1356,7 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
{
+ int act_sz = roundup_pow_of_two(num / 2);
__be32 *list;
void *rqtc;
int inlen;
@@ -1361,7 +1364,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + ndev->rqt_size * MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + act_sz * MLX5_ST_SZ_BYTES(rq_num);
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -1372,10 +1375,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; i < ndev->rqt_size; i++, j += 2)
+ for (i = 0, j = 0; i < act_sz; i++, j = j + 2)
list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- MLX5_SET(rqtc, rqtc, rqt_actual_size, ndev->rqt_size);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, act_sz);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index c06c02704461..febdc99b51a7 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -600,6 +600,11 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
}
config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
+ if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) {
+ config.device_features =
+ nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES);
+ }
/* Skip checking capability if user didn't prefer to configure any
* device networking attributes. It is likely that user might have used
@@ -799,51 +804,76 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
return msg->len;
}
-static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
- struct sk_buff *msg, u64 features,
+static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features,
const struct virtio_net_config *config)
{
u16 val_u16;
- if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 &&
+ (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0)
return 0;
- val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
+ val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs);
+
return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
}
+static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ u16 val_u16;
+
+ if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0)
+ return 0;
+
+ val_u16 = __virtio16_to_cpu(true, config->mtu);
+
+ return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16);
+}
+
+static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0)
+ return 0;
+
+ return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR,
+ sizeof(config->mac), config->mac);
+}
+
static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
{
struct virtio_net_config config = {};
- u64 features;
+ u64 features_device;
u16 val_u16;
- vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
-
- if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
- config.mac))
- return -EMSGSIZE;
+ vdev->config->get_config(vdev, 0, &config, sizeof(config));
val_u16 = __virtio16_to_cpu(true, config.status);
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
return -EMSGSIZE;
- val_u16 = __virtio16_to_cpu(true, config.mtu);
- if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
- return -EMSGSIZE;
+ features_device = vdev->config->get_device_features(vdev);
- features = vdev->config->get_driver_features(vdev);
- if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
VDPA_ATTR_PAD))
return -EMSGSIZE;
- return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
+ if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_net_mac_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ return vdpa_dev_net_mq_config_fill(msg, features_device, &config);
}
static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
+ u64 features_driver;
+ u8 status = 0;
u32 device_id;
void *hdr;
int err;
@@ -867,6 +897,17 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
goto msg_err;
}
+ /* only read driver features after the feature negotiation is done */
+ status = vdev->config->get_status(vdev);
+ if (status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ features_driver = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver,
+ VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ }
+
switch (device_id) {
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
@@ -1183,6 +1224,7 @@ static struct genl_family vdpa_nl_family __ro_after_init = {
.module = THIS_MODULE,
.ops = vdpa_nl_ops,
.n_ops = ARRAY_SIZE(vdpa_nl_ops),
+ .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1,
};
static int vdpa_init(void)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 225b7f5d8be3..b071f0d842fb 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,6 +18,7 @@
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <linux/iova.h>
+#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
@@ -245,13 +246,22 @@ static const struct dma_map_ops vdpasim_dma_ops = {
static const struct vdpa_config_ops vdpasim_config_ops;
static const struct vdpa_config_ops vdpasim_batch_config_ops;
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
+ const struct vdpa_dev_set_config *config)
{
const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
int i, ret = -ENOMEM;
+ if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (config->device_features &
+ ~dev_attr->supported_features)
+ return ERR_PTR(-EINVAL);
+ dev_attr->supported_features =
+ config->device_features;
+ }
+
if (batch_mapping)
ops = &vdpasim_batch_config_ops;
else
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index 061986f30911..0e78737dcc16 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -71,7 +71,8 @@ struct vdpasim {
spinlock_t iommu_lock;
};
-struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr);
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr,
+ const struct vdpa_dev_set_config *config);
/* TODO: cross-endian support */
static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index c8bfea3b7db2..c6db1a1baf76 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -383,7 +383,7 @@ static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_blk_work;
dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 886449e88502..c3cb225ea469 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -254,7 +254,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dev_attr.work_fn = vdpasim_net_work;
dev_attr.buffer_size = PAGE_SIZE;
- simdev = vdpasim_create(&dev_attr);
+ simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
@@ -294,7 +294,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
- 1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ 1 << VDPA_ATTR_DEV_NET_CFG_MTU |
+ 1 << VDPA_ATTR_DEV_FEATURES),
.max_supported_vqs = VDPASIM_NET_VQ_NUM,
.supported_features = VDPASIM_NET_FEATURES,
};
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 41c0b29739f1..35dceee3ed56 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -673,10 +673,15 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (offset > dev->config_size ||
- len > dev->config_size - offset)
+ /* Initialize the buffer in case of partial copy. */
+ memset(buf, 0, len);
+
+ if (offset > dev->config_size)
return;
+ if (len > dev->config_size - offset)
+ len = dev->config_size - offset;
+
memcpy(buf, dev->config + offset, len);
}
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 04522077735b..d448db0c4de3 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -17,6 +17,7 @@
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
#include <linux/virtio_pci_modern.h>
+#include <uapi/linux/vdpa.h>
#define VP_VDPA_QUEUE_MAX 256
#define VP_VDPA_DRIVER_NAME "vp_vdpa"
@@ -35,6 +36,7 @@ struct vp_vdpa {
struct virtio_pci_modern_device *mdev;
struct vp_vring *vring;
struct vdpa_callback config_cb;
+ u64 device_features;
char msix_name[VP_VDPA_NAME_SIZE];
int config_irq;
int queues;
@@ -66,9 +68,9 @@ static struct virtio_pci_modern_device *vp_vdpa_to_mdev(struct vp_vdpa *vp_vdpa)
static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
- struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
- return vp_modern_get_features(mdev);
+ return vp_vdpa->device_features;
}
static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
@@ -475,6 +477,7 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
struct pci_dev *pdev = mdev->pci_dev;
struct device *dev = &pdev->dev;
struct vp_vdpa *vp_vdpa = NULL;
+ u64 device_features;
int ret, i;
vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
@@ -491,6 +494,20 @@ static int vp_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
vp_vdpa->queues = vp_modern_get_num_queues(mdev);
vp_vdpa->mdev = mdev;
+ device_features = vp_modern_get_features(mdev);
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
+ if (add_config->device_features & ~device_features) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Try to provision features "
+ "that are not supported by the device: "
+ "device_features 0x%llx provisioned 0x%llx\n",
+ device_features, add_config->device_features);
+ goto err;
+ }
+ device_features = add_config->device_features;
+ }
+ vp_vdpa->device_features = device_features;
+
ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
if (ret) {
dev_err(&pdev->dev,
@@ -599,6 +616,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mgtdev->id_table = mdev_id;
mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev);
mgtdev->supported_features = vp_modern_get_features(mdev);
+ mgtdev->config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
pci_set_master(pdev);
pci_set_drvdata(pdev, vp_vdpa_mgtdev);
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index e163aa9f6144..0cbdcd14f1c8 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -151,7 +151,10 @@ int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
if (!vdev->vdev.kvm)
return 0;
- return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm);
+ if (zpci_kvm_hook.kvm_register)
+ return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
+
+ return -ENOENT;
}
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
@@ -161,5 +164,6 @@ void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
if (!zdev || !vdev->vdev.kvm)
return;
- kvm_s390_pci_unregister_kvm(zdev);
+ if (zpci_kvm_hook.kvm_unregister)
+ zpci_kvm_hook.kvm_unregister(zdev);
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index db516c90a977..23c24fe98c00 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -37,7 +37,6 @@
#include <linux/vfio.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
-#include <linux/dma-iommu.h>
#include <linux/irqdomain.h>
#include "vfio.h"
@@ -558,6 +557,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL, NULL);
if (ret > 0) {
+ int i;
+
+ /*
+ * The zero page is always resident, we don't need to pin it
+ * and it falls into our invalid/reserved test so we don't
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
+ */
+ for (i = 0 ; i < ret; i++) {
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+ unpin_user_page(pages[i]);
+ }
+
*pfn = page_to_pfn(pages[0]);
goto done;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 68e4ecd1cc0e..20265393aee7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -118,7 +118,7 @@ struct vhost_net_virtqueue {
/* Number of XDP frames batched */
int batched_xdp;
/* an array of userspace buffers info */
- struct ubuf_info *ubuf_info;
+ struct ubuf_info_msgzc *ubuf_info;
/* Reference counting for outstanding ubufs.
* Protected by vq mutex. Writers must also take device mutex. */
struct vhost_net_ubuf_ref *ubufs;
@@ -382,8 +382,9 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net,
}
static void vhost_zerocopy_callback(struct sk_buff *skb,
- struct ubuf_info *ubuf, bool success)
+ struct ubuf_info *ubuf_base, bool success)
{
+ struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
int cnt;
@@ -871,7 +872,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
- struct ubuf_info *ubuf;
+ struct ubuf_info_msgzc *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -907,14 +908,14 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
ubuf = nvq->ubuf_info + nvq->upend_idx;
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
ubuf->desc = nvq->upend_idx;
- ubuf->flags = SKBFL_ZEROCOPY_FRAG;
- refcount_set(&ubuf->refcnt, 1);
+ ubuf->ubuf.callback = vhost_zerocopy_callback;
+ ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
+ refcount_set(&ubuf->ubuf.refcnt, 1);
msg.msg_control = &ctl;
ctl.type = TUN_MSG_UBUF;
- ctl.ptr = ubuf;
+ ctl.ptr = &ubuf->ubuf;
msg.msg_controllen = sizeof(ctl);
ubufs = nvq->ubufs;
atomic_inc(&ubufs->refcount);
@@ -1781,7 +1782,7 @@ static struct miscdevice vhost_net_misc = {
.fops = &vhost_net_fops,
};
-static int vhost_net_init(void)
+static int __init vhost_net_init(void)
{
if (experimental_zcopytx)
vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
@@ -1789,7 +1790,7 @@ static int vhost_net_init(void)
}
module_init(vhost_net_init);
-static void vhost_net_exit(void)
+static void __exit vhost_net_exit(void)
{
misc_deregister(&vhost_net_misc);
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 368330417bde..5703775af129 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -393,7 +393,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
if (!pkt->buf) {
kfree(pkt);
return NULL;
diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
index 538f2d40acda..9e6bcc03a1a4 100644
--- a/drivers/video/aperture.c
+++ b/drivers/video/aperture.c
@@ -2,15 +2,17 @@
#include <linux/aperture.h>
#include <linux/device.h>
-#include <linux/fb.h> /* for old fbdev helpers */
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sysfb.h>
#include <linux/types.h>
#include <linux/vgaarb.h>
+#include <video/vga.h>
+
/**
* DOC: overview
*
@@ -283,26 +285,27 @@ static void aperture_detach_devices(resource_size_t base, resource_size_t size)
int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
bool primary, const char *name)
{
-#if IS_REACHABLE(CONFIG_FB)
- struct apertures_struct *a;
- int ret;
-
- a = alloc_apertures(1);
- if (!a)
- return -ENOMEM;
-
- a->ranges[0].base = base;
- a->ranges[0].size = size;
-
- ret = remove_conflicting_framebuffers(a, name, primary);
- kfree(a);
-
- if (ret)
- return ret;
-#endif
+ /*
+ * If a driver asked to unregister a platform device registered by
+ * sysfb, then can be assumed that this is a driver for a display
+ * that is set up by the system firmware and has a generic driver.
+ *
+ * Drivers for devices that don't have a generic driver will never
+ * ask for this, so let's assume that a real driver for the display
+ * was already probed and prevent sysfb to register devices later.
+ */
+ sysfb_disable();
aperture_detach_devices(base, size);
+ /*
+ * If this is the primary adapter, there could be a VGA device
+ * that consumes the VGA framebuffer I/O range. Remove this device
+ * as well.
+ */
+ if (primary)
+ aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
+
return 0;
}
EXPORT_SYMBOL(aperture_remove_conflicting_devices);
@@ -321,30 +324,36 @@ EXPORT_SYMBOL(aperture_remove_conflicting_devices);
*/
int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
{
+ bool primary = false;
resource_size_t base, size;
int bar, ret;
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
- if (ret)
- return ret;
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- ret = vga_remove_vgacon(pdev);
- if (ret)
- return ret;
for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
continue;
+
base = pci_resource_start(pdev, bar);
size = pci_resource_len(pdev, bar);
- aperture_detach_devices(base, size);
+ ret = aperture_remove_conflicting_devices(base, size, primary, name);
+ if (ret)
+ break;
}
+ if (ret)
+ return ret;
+
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
+ ret = vga_remove_vgacon(pdev);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index a003e02e13ce..936ba1e4d35e 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -268,6 +268,19 @@ config BACKLIGHT_MAX8925
If you have a LCD backlight connected to the WLED output of MAX8925
WLED output, say Y here to enable this driver.
+config BACKLIGHT_MT6370
+ tristate "MediaTek MT6370 Backlight Driver"
+ depends on MFD_MT6370
+ help
+ This enables support for Mediatek MT6370 Backlight driver.
+ It's commonly used to drive the display WLED. There are 4 channels
+ inside, and each channel supports up to 30mA of current capability
+ with 2048 current steps (only for MT6370/MT6371) or 16384 current
+ steps (only for MT6372) in exponential or linear mapping curves.
+
+ This driver can also be built as a module. If so, the module
+ will be called "mt6370-backlight".
+
config BACKLIGHT_APPLE
tristate "Apple Backlight Driver"
depends on X86 && ACPI
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index cae2c83422ae..e815f3f1deff 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o
obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o
obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
+obj-$(CONFIG_BACKLIGHT_MT6370) += mt6370-backlight.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 8ec19425671f..b0fe02273e87 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -753,7 +753,7 @@ out:
return ret;
}
-static int adp8860_remove(struct i2c_client *client)
+static void adp8860_remove(struct i2c_client *client)
{
struct adp8860_bl *data = i2c_get_clientdata(client);
@@ -765,8 +765,6 @@ static int adp8860_remove(struct i2c_client *client)
if (data->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8860_bl_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 8b5213a39527..5becace3fd0f 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -925,7 +925,7 @@ out:
return ret;
}
-static int adp8870_remove(struct i2c_client *client)
+static void adp8870_remove(struct i2c_client *client)
{
struct adp8870_bl *data = i2c_get_clientdata(client);
@@ -937,8 +937,6 @@ static int adp8870_remove(struct i2c_client *client)
if (data->pdata->en_ambl_sens)
sysfs_remove_group(&data->bl->dev.kobj,
&adp8870_bl_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/backlight/arcxcnn_bl.c b/drivers/video/backlight/arcxcnn_bl.c
index 7b1c0a0e6cad..060c0eef6a52 100644
--- a/drivers/video/backlight/arcxcnn_bl.c
+++ b/drivers/video/backlight/arcxcnn_bl.c
@@ -362,7 +362,7 @@ probe_err:
return ret;
}
-static int arcxcnn_remove(struct i2c_client *cl)
+static void arcxcnn_remove(struct i2c_client *cl)
{
struct arcxcnn *lp = i2c_get_clientdata(cl);
@@ -376,8 +376,6 @@ static int arcxcnn_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
-
- return 0;
}
static const struct of_device_id arcxcnn_dt_ids[] = {
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 515184fbe33a..a506872d4396 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -175,14 +175,12 @@ static int bd6107_probe(struct i2c_client *client,
return 0;
}
-static int bd6107_remove(struct i2c_client *client)
+static void bd6107_remove(struct i2c_client *client)
{
struct backlight_device *backlight = i2c_get_clientdata(client);
backlight->props.brightness = 0;
backlight_update_status(backlight);
-
- return 0;
}
static const struct i2c_device_id bd6107_ids[] = {
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 1d17c439430e..475f35635bf6 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -579,7 +579,7 @@ static int lm3630a_probe(struct i2c_client *client,
return 0;
}
-static int lm3630a_remove(struct i2c_client *client)
+static void lm3630a_remove(struct i2c_client *client)
{
int rval;
struct lm3630a_chip *pchip = i2c_get_clientdata(client);
@@ -596,7 +596,6 @@ static int lm3630a_remove(struct i2c_client *client)
free_irq(pchip->irq, pchip);
destroy_workqueue(pchip->irqthread);
}
- return 0;
}
static const struct i2c_device_id lm3630a_id[] = {
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 48c04155a5f9..6580911671a3 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -390,7 +390,7 @@ err_out:
return ret;
}
-static int lm3639_remove(struct i2c_client *client)
+static void lm3639_remove(struct i2c_client *client)
{
struct lm3639_chip_data *pchip = i2c_get_clientdata(client);
@@ -400,7 +400,6 @@ static int lm3639_remove(struct i2c_client *client)
led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
- return 0;
}
static const struct i2c_device_id lm3639_id[] = {
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index fc02c5c16055..bd0bdeae23a4 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -534,7 +534,7 @@ disable_supply:
return ret;
}
-static int lp855x_remove(struct i2c_client *cl)
+static void lp855x_remove(struct i2c_client *cl)
{
struct lp855x *lp = i2c_get_clientdata(cl);
@@ -545,8 +545,6 @@ static int lp855x_remove(struct i2c_client *cl)
if (lp->supply)
regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
-
- return 0;
}
static const struct of_device_id lp855x_dt_ids[] = {
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 1842ae9a55f8..767b800d79fa 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -124,14 +124,12 @@ static int lv5207lp_probe(struct i2c_client *client,
return 0;
}
-static int lv5207lp_remove(struct i2c_client *client)
+static void lv5207lp_remove(struct i2c_client *client)
{
struct backlight_device *backlight = i2c_get_clientdata(client);
backlight->props.brightness = 0;
backlight_update_status(backlight);
-
- return 0;
}
static const struct i2c_device_id lv5207lp_ids[] = {
diff --git a/drivers/video/backlight/mt6370-backlight.c b/drivers/video/backlight/mt6370-backlight.c
new file mode 100644
index 000000000000..623d4f2baca2
--- /dev/null
+++ b/drivers/video/backlight/mt6370-backlight.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiaEn Wu <chiaen_wu@richtek.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MT6370_REG_DEV_INFO 0x100
+#define MT6370_REG_BL_EN 0x1A0
+#define MT6370_REG_BL_BSTCTRL 0x1A1
+#define MT6370_REG_BL_PWM 0x1A2
+#define MT6370_REG_BL_DIM2 0x1A4
+
+#define MT6370_VENID_MASK GENMASK(7, 4)
+#define MT6370_BL_EXT_EN_MASK BIT(7)
+#define MT6370_BL_EN_MASK BIT(6)
+#define MT6370_BL_CODE_MASK BIT(0)
+#define MT6370_BL_CH_MASK GENMASK(5, 2)
+#define MT6370_BL_CH_SHIFT 2
+#define MT6370_BL_DIM2_COMMON_MASK GENMASK(2, 0)
+#define MT6370_BL_DIM2_COMMON_SHIFT 3
+#define MT6370_BL_DIM2_6372_MASK GENMASK(5, 0)
+#define MT6370_BL_DIM2_6372_SHIFT 6
+#define MT6370_BL_PWM_EN_MASK BIT(7)
+#define MT6370_BL_PWM_HYS_EN_MASK BIT(2)
+#define MT6370_BL_PWM_HYS_SEL_MASK GENMASK(1, 0)
+#define MT6370_BL_OVP_EN_MASK BIT(7)
+#define MT6370_BL_OVP_SEL_MASK GENMASK(6, 5)
+#define MT6370_BL_OVP_SEL_SHIFT 5
+#define MT6370_BL_OC_EN_MASK BIT(3)
+#define MT6370_BL_OC_SEL_MASK GENMASK(2, 1)
+#define MT6370_BL_OC_SEL_SHIFT 1
+
+#define MT6370_BL_PWM_HYS_TH_MIN_STEP 1
+#define MT6370_BL_PWM_HYS_TH_MAX_STEP 64
+#define MT6370_BL_OVP_MIN_UV 17000000
+#define MT6370_BL_OVP_MAX_UV 29000000
+#define MT6370_BL_OVP_STEP_UV 4000000
+#define MT6370_BL_OCP_MIN_UA 900000
+#define MT6370_BL_OCP_MAX_UA 1800000
+#define MT6370_BL_OCP_STEP_UA 300000
+#define MT6370_BL_MAX_COMMON_BRIGHTNESS 2048
+#define MT6370_BL_MAX_6372_BRIGHTNESS 16384
+#define MT6370_BL_MAX_CH 15
+
+enum {
+ MT6370_VID_COMMON = 1,
+ MT6370_VID_6372,
+};
+
+struct mt6370_priv {
+ u8 dim2_mask;
+ u8 dim2_shift;
+ int def_max_brightness;
+ struct backlight_device *bl;
+ struct device *dev;
+ struct gpio_desc *enable_gpio;
+ struct regmap *regmap;
+};
+
+static int mt6370_bl_update_status(struct backlight_device *bl_dev)
+{
+ struct mt6370_priv *priv = bl_get_data(bl_dev);
+ int brightness = backlight_get_brightness(bl_dev);
+ unsigned int enable_val;
+ u8 brightness_val[2];
+ int ret;
+
+ if (brightness) {
+ brightness_val[0] = (brightness - 1) & priv->dim2_mask;
+ brightness_val[1] = (brightness - 1) >> priv->dim2_shift;
+
+ ret = regmap_raw_write(priv->regmap, MT6370_REG_BL_DIM2,
+ brightness_val, sizeof(brightness_val));
+ if (ret)
+ return ret;
+ }
+
+ gpiod_set_value(priv->enable_gpio, !!brightness);
+
+ enable_val = brightness ? MT6370_BL_EN_MASK : 0;
+ return regmap_update_bits(priv->regmap, MT6370_REG_BL_EN,
+ MT6370_BL_EN_MASK, enable_val);
+}
+
+static int mt6370_bl_get_brightness(struct backlight_device *bl_dev)
+{
+ struct mt6370_priv *priv = bl_get_data(bl_dev);
+ unsigned int enable;
+ u8 brightness_val[2];
+ int brightness, ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_BL_EN, &enable);
+ if (ret)
+ return ret;
+
+ if (!(enable & MT6370_BL_EN_MASK))
+ return 0;
+
+ ret = regmap_raw_read(priv->regmap, MT6370_REG_BL_DIM2,
+ brightness_val, sizeof(brightness_val));
+ if (ret)
+ return ret;
+
+ brightness = brightness_val[1] << priv->dim2_shift;
+ brightness += brightness_val[0] & priv->dim2_mask;
+
+ return brightness + 1;
+}
+
+static const struct backlight_ops mt6370_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = mt6370_bl_update_status,
+ .get_brightness = mt6370_bl_get_brightness,
+};
+
+static int mt6370_init_backlight_properties(struct mt6370_priv *priv,
+ struct backlight_properties *props)
+{
+ struct device *dev = priv->dev;
+ u8 prop_val;
+ u32 brightness, ovp_uV, ocp_uA;
+ unsigned int mask, val;
+ int ret;
+
+ /* Vendor optional properties */
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-pwm-enable"))
+ val |= MT6370_BL_PWM_EN_MASK;
+
+ if (device_property_read_bool(dev, "mediatek,bled-pwm-hys-enable"))
+ val |= MT6370_BL_PWM_HYS_EN_MASK;
+
+ ret = device_property_read_u8(dev,
+ "mediatek,bled-pwm-hys-input-th-steps",
+ &prop_val);
+ if (!ret) {
+ prop_val = clamp_val(prop_val,
+ MT6370_BL_PWM_HYS_TH_MIN_STEP,
+ MT6370_BL_PWM_HYS_TH_MAX_STEP);
+ prop_val = prop_val <= 1 ? 0 :
+ prop_val <= 4 ? 1 :
+ prop_val <= 16 ? 2 : 3;
+ val |= prop_val;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6370_REG_BL_PWM,
+ val, val);
+ if (ret)
+ return ret;
+
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-ovp-shutdown"))
+ val |= MT6370_BL_OVP_EN_MASK;
+
+ ret = device_property_read_u32(dev, "mediatek,bled-ovp-microvolt",
+ &ovp_uV);
+ if (!ret) {
+ ovp_uV = clamp_val(ovp_uV, MT6370_BL_OVP_MIN_UV,
+ MT6370_BL_OVP_MAX_UV);
+ ovp_uV = DIV_ROUND_UP(ovp_uV - MT6370_BL_OVP_MIN_UV,
+ MT6370_BL_OVP_STEP_UV);
+ val |= ovp_uV << MT6370_BL_OVP_SEL_SHIFT;
+ }
+
+ if (device_property_read_bool(dev, "mediatek,bled-ocp-shutdown"))
+ val |= MT6370_BL_OC_EN_MASK;
+
+ ret = device_property_read_u32(dev, "mediatek,bled-ocp-microamp",
+ &ocp_uA);
+ if (!ret) {
+ ocp_uA = clamp_val(ocp_uA, MT6370_BL_OCP_MIN_UA,
+ MT6370_BL_OCP_MAX_UA);
+ ocp_uA = DIV_ROUND_UP(ocp_uA - MT6370_BL_OCP_MIN_UA,
+ MT6370_BL_OCP_STEP_UA);
+ val |= ocp_uA << MT6370_BL_OC_SEL_SHIFT;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6370_REG_BL_BSTCTRL,
+ val, val);
+ if (ret)
+ return ret;
+
+ /* Common properties */
+ ret = device_property_read_u32(dev, "max-brightness", &brightness);
+ if (ret)
+ brightness = priv->def_max_brightness;
+
+ props->max_brightness = min_t(u32, brightness, priv->def_max_brightness);
+
+ ret = device_property_read_u32(dev, "default-brightness", &brightness);
+ if (ret)
+ brightness = props->max_brightness;
+
+ props->brightness = min_t(u32, brightness, props->max_brightness);
+
+ val = 0;
+ if (device_property_read_bool(dev, "mediatek,bled-exponential-mode-enable")) {
+ val |= MT6370_BL_CODE_MASK;
+ props->scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props->scale = BACKLIGHT_SCALE_LINEAR;
+
+ ret = device_property_read_u8(dev, "mediatek,bled-channel-use",
+ &prop_val);
+ if (ret) {
+ dev_err(dev, "mediatek,bled-channel-use DT property missing\n");
+ return ret;
+ }
+
+ if (!prop_val || prop_val > MT6370_BL_MAX_CH) {
+ dev_err(dev,
+ "No channel specified or over than upper bound (%d)\n",
+ prop_val);
+ return -EINVAL;
+ }
+
+ mask = MT6370_BL_EXT_EN_MASK | MT6370_BL_CH_MASK;
+ val |= prop_val << MT6370_BL_CH_SHIFT;
+
+ if (priv->enable_gpio)
+ val |= MT6370_BL_EXT_EN_MASK;
+
+ return regmap_update_bits(priv->regmap, MT6370_REG_BL_EN, mask, val);
+}
+
+static int mt6370_check_vendor_info(struct mt6370_priv *priv)
+{
+ /*
+ * Because MT6372 uses 14 bits to control the brightness,
+ * MT6370 and MT6371 use 11 bits. This function is used
+ * to check the vendor's ID and set the relative hardware
+ * mask, shift and default maximum brightness value that
+ * should be used.
+ */
+ unsigned int dev_info, hw_vid, of_vid;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
+ if (ret)
+ return ret;
+
+ of_vid = (uintptr_t)device_get_match_data(priv->dev);
+ hw_vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
+ hw_vid = (hw_vid == 0x9 || hw_vid == 0xb) ? MT6370_VID_6372 : MT6370_VID_COMMON;
+ if (hw_vid != of_vid)
+ return dev_err_probe(priv->dev, -EINVAL,
+ "Buggy DT, wrong compatible string\n");
+
+ if (hw_vid == MT6370_VID_6372) {
+ priv->dim2_mask = MT6370_BL_DIM2_6372_MASK;
+ priv->dim2_shift = MT6370_BL_DIM2_6372_SHIFT;
+ priv->def_max_brightness = MT6370_BL_MAX_6372_BRIGHTNESS;
+ } else {
+ priv->dim2_mask = MT6370_BL_DIM2_COMMON_MASK;
+ priv->dim2_shift = MT6370_BL_DIM2_COMMON_SHIFT;
+ priv->def_max_brightness = MT6370_BL_MAX_COMMON_BRIGHTNESS;
+ }
+
+ return 0;
+}
+
+static int mt6370_bl_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ };
+ struct device *dev = &pdev->dev;
+ struct mt6370_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap)
+ return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n");
+
+ ret = mt6370_check_vendor_info(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check vendor info\n");
+
+ priv->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->enable_gpio),
+ "Failed to get 'enable' gpio\n");
+
+ ret = mt6370_init_backlight_properties(priv, &props);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to init backlight properties\n");
+
+ priv->bl = devm_backlight_device_register(dev, pdev->name, dev, priv,
+ &mt6370_bl_ops, &props);
+ if (IS_ERR(priv->bl))
+ return dev_err_probe(dev, PTR_ERR(priv->bl),
+ "Failed to register backlight\n");
+
+ backlight_update_status(priv->bl);
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int mt6370_bl_remove(struct platform_device *pdev)
+{
+ struct mt6370_priv *priv = platform_get_drvdata(pdev);
+ struct backlight_device *bl_dev = priv->bl;
+
+ bl_dev->props.brightness = 0;
+ backlight_update_status(priv->bl);
+
+ return 0;
+}
+
+static const struct of_device_id mt6370_bl_of_match[] = {
+ { .compatible = "mediatek,mt6370-backlight", .data = (void *)MT6370_VID_COMMON },
+ { .compatible = "mediatek,mt6372-backlight", .data = (void *)MT6370_VID_6372 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6370_bl_of_match);
+
+static struct platform_driver mt6370_bl_driver = {
+ .driver = {
+ .name = "mt6370-backlight",
+ .of_match_table = mt6370_bl_of_match,
+ },
+ .probe = mt6370_bl_probe,
+ .remove = mt6370_bl_remove,
+};
+module_platform_driver(mt6370_bl_driver);
+
+MODULE_AUTHOR("ChiaEn Wu <chiaen_wu@richtek.com>");
+MODULE_DESCRIPTION("MediaTek MT6370 Backlight Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 6df6fcd132e3..f55b3d616a87 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -121,12 +121,11 @@ err_reg:
return ret;
}
-static int tosa_bl_remove(struct i2c_client *client)
+static void tosa_bl_remove(struct i2c_client *client)
{
struct tosa_bl_data *data = i2c_get_clientdata(client);
data->bl = NULL;
- return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index a2a381631628..a317d9fe1d67 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -11,6 +11,7 @@
* Code is based on s3fb
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -956,6 +957,10 @@ static int ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
int rc;
u8 regval;
+ rc = aperture_remove_conflicting_pci_devices(dev, "arkfb");
+ if (rc < 0)
+ return rc;
+
/* Ignore secondary VGA device because there is no VGA arbitration */
if (! svga_primary_device(dev)) {
dev_info(&(dev->dev), "ignoring secondary device\n");
diff --git a/drivers/video/fbdev/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index f8ef62542f7f..3818437a8f69 100644
--- a/drivers/video/fbdev/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
@@ -29,6 +29,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -545,6 +546,10 @@ static int asiliantfb_pci_init(struct pci_dev *dp,
struct fb_info *p;
int err;
+ err = aperture_remove_conflicting_pci_devices(dp, "asiliantfb");
+ if (err)
+ return err;
+
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
return -ENODEV;
addr = pci_resource_start(dp, 0);
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index b26c81233b6b..57e398fe7a81 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -23,7 +23,7 @@
* - Convert to new framebuffer API,
* fix colormap setting at 16 bits/pixel (565)
*
- * Paul Mundt
+ * Paul Mundt
* - PCI hotplug
*
* Jon Smirl <jonsmirl@yahoo.com>
@@ -47,6 +47,7 @@
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -520,13 +521,13 @@ static const struct fb_ops aty128fb_ops = {
* - endian conversions may possibly be avoided by
* using the other register aperture. TODO.
*/
-static inline u32 _aty_ld_le32(volatile unsigned int regindex,
+static inline u32 _aty_ld_le32(volatile unsigned int regindex,
const struct aty128fb_par *par)
{
return readl (par->regbase + regindex);
}
-static inline void _aty_st_le32(volatile unsigned int regindex, u32 val,
+static inline void _aty_st_le32(volatile unsigned int regindex, u32 val,
const struct aty128fb_par *par)
{
writel (val, par->regbase + regindex);
@@ -559,12 +560,12 @@ static inline void _aty_st_8(unsigned int regindex, u8 val,
static u32 _aty_ld_pll(unsigned int pll_index,
const struct aty128fb_par *par)
-{
+{
aty_st_8(CLOCK_CNTL_INDEX, pll_index & 0x3F);
return aty_ld_le32(CLOCK_CNTL_DATA);
}
-
+
static void _aty_st_pll(unsigned int pll_index, u32 val,
const struct aty128fb_par *par)
{
@@ -619,7 +620,7 @@ static int register_test(const struct aty128fb_par *par)
aty_st_le32(BIOS_0_SCRATCH, 0xAAAAAAAA);
if (aty_ld_le32(BIOS_0_SCRATCH) == 0xAAAAAAAA)
- flag = 1;
+ flag = 1;
}
aty_st_le32(BIOS_0_SCRATCH, val); // restore value
@@ -901,7 +902,7 @@ static void aty128_get_pllinfo(struct aty128fb_par *par,
bios_hdr = BIOS_IN16(0x48);
bios_pll = BIOS_IN16(bios_hdr + 0x30);
-
+
par->constants.ppll_max = BIOS_IN32(bios_pll + 0x16);
par->constants.ppll_min = BIOS_IN32(bios_pll + 0x12);
par->constants.xclk = BIOS_IN16(bios_pll + 0x08);
@@ -913,7 +914,7 @@ static void aty128_get_pllinfo(struct aty128fb_par *par,
par->constants.xclk, par->constants.ref_divider,
par->constants.ref_clk);
-}
+}
#ifdef CONFIG_X86
static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
@@ -925,7 +926,7 @@ static void __iomem *aty128_find_mem_vbios(struct aty128fb_par *par)
*/
u32 segstart;
unsigned char __iomem *rom_base = NULL;
-
+
for (segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
rom_base = ioremap(segstart, 0x10000);
if (rom_base == NULL)
@@ -1118,12 +1119,12 @@ static int aty128_var_to_crtc(const struct fb_var_screeninfo *var,
v_sync_wid = 1;
else if (v_sync_wid > 0x1f) /* 0x1f = max vwidth */
v_sync_wid = 0x1f;
-
+
v_sync_strt = v_disp + lower;
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
+
c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
crtc->gen_cntl = 0x3000000L | c_sync | (dst << 8);
@@ -1301,11 +1302,11 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on)
aty_st_le32(LVDS_GEN_CNTL, reg);
#ifdef CONFIG_FB_ATY128_BACKLIGHT
aty128_bl_set_power(info, FB_BLANK_UNBLANK);
-#endif
+#endif
} else {
#ifdef CONFIG_FB_ATY128_BACKLIGHT
aty128_bl_set_power(info, FB_BLANK_POWERDOWN);
-#endif
+#endif
reg = aty_ld_le32(LVDS_GEN_CNTL);
reg |= LVDS_DISPLAY_DIS;
aty_st_le32(LVDS_GEN_CNTL, reg);
@@ -1481,7 +1482,7 @@ static int aty128_ddafifo(struct aty128_ddafifo *dsp,
* This actually sets the video mode.
*/
static int aty128fb_set_par(struct fb_info *info)
-{
+{
struct aty128fb_par *par = info->par;
u32 config;
int err;
@@ -1595,7 +1596,7 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
var->accel_flags = par->accel_flags;
return 0;
-}
+}
static int aty128fb_check_var(struct fb_var_screeninfo *var,
@@ -1979,12 +1980,12 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PowerBook Titanium */
if (of_machine_is_compatible("PowerBook3,2"))
default_vmode = VMODE_1152_768_60;
-
- if (default_cmode > 16)
+
+ if (default_cmode > 16)
default_cmode = CMODE_32;
- else if (default_cmode > 8)
+ else if (default_cmode > 8)
default_cmode = CMODE_16;
- else
+ else
default_cmode = CMODE_8;
if (mac_vmode_to_var(default_vmode, default_cmode, &var))
@@ -1994,7 +1995,7 @@ static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_PPC_PMAC */
{
if (mode_option)
- if (fb_find_mode(&var, info, mode_option, NULL,
+ if (fb_find_mode(&var, info, mode_option, NULL,
0, &defaultmode, 8) == 0)
var = default_var;
}
@@ -2055,6 +2056,10 @@ static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *bios = NULL;
#endif
+ err = aperture_remove_conflicting_pci_devices(pdev, "aty128fb");
+ if (err)
+ return err;
+
/* Enable device in PCI config */
if ((err = pci_enable_device(pdev))) {
printk(KERN_ERR "aty128fb: Cannot enable PCI device: %d\n",
@@ -2301,7 +2306,7 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
struct aty128fb_par *par = info->par;
u32 value;
int rc;
-
+
switch (cmd) {
case FBIO_ATY128_SET_MIRROR:
if (par->chip_gen != rage_M3)
@@ -2313,8 +2318,8 @@ static int aty128fb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
par->crt_on = (value & 0x02) != 0;
if (!par->crt_on && !par->lcd_on)
par->lcd_on = 1;
- aty128_set_crt_enable(par, par->crt_on);
- aty128_set_lcd_enable(par, par->lcd_on);
+ aty128_set_crt_enable(par, par->crt_on);
+ aty128_set_lcd_enable(par, par->lcd_on);
return 0;
case FBIO_ATY128_GET_MIRROR:
if (par->chip_gen != rage_M3)
@@ -2331,7 +2336,7 @@ static void aty128_set_suspend(struct aty128fb_par *par, int suspend)
if (!par->pdev->pm_cap)
return;
-
+
/* Set the chip into the appropriate suspend mode (we use D2,
* D3 would require a complete re-initialisation of the chip,
* including PCI config registers, clocks, AGP configuration, ...)
@@ -2376,12 +2381,12 @@ static int aty128_pci_suspend_late(struct device *dev, pm_message_t state)
*/
return 0;
#endif /* CONFIG_PPC_PMAC */
-
+
if (state.event == pdev->dev.power.power_state.event)
return 0;
printk(KERN_DEBUG "aty128fb: suspending...\n");
-
+
console_lock();
fb_set_suspend(info, 1);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 14eb718bd67c..b3463d137152 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -48,6 +48,7 @@
******************************************************************************/
+#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -3533,7 +3534,11 @@ static int atyfb_pci_probe(struct pci_dev *pdev,
struct fb_info *info;
struct resource *rp;
struct atyfb_par *par;
- int rc = -ENOMEM;
+ int rc;
+
+ rc = aperture_remove_conflicting_pci_devices(pdev, "atyfb");
+ if (rc)
+ return rc;
/* Enable device in PCI config */
if (pci_enable_device(pdev)) {
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index a14a8d73035c..8b28c9bddd97 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -7,7 +7,7 @@
* Copyright 2000 Ani Joshi <ajoshi@kernel.crashing.org>
*
* i2c bits from Luca Tettamanti <kronos@kronoz.cjb.net>
- *
+ *
* Special thanks to ATI DevRel team for their hardware donations.
*
* ...Insert GPL boilerplate here...
@@ -54,6 +54,7 @@
#include "radeonfb.h"
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -110,7 +111,7 @@ static const struct pci_device_id radeonfb_pci_table[] = {
/* Radeon IGP320M (U1) */
CHIP_DEF(PCI_CHIP_RS100_4336, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* Radeon IGP320 (A3) */
- CHIP_DEF(PCI_CHIP_RS100_4136, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP),
+ CHIP_DEF(PCI_CHIP_RS100_4136, RS100, CHIP_HAS_CRTC2 | CHIP_IS_IGP),
/* IGP330M/340M/350M (U2) */
CHIP_DEF(PCI_CHIP_RS200_4337, RS200, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
/* IGP330/340/350 (A4) */
@@ -240,7 +241,7 @@ typedef struct {
* interfere with anything
*/
static reg_val common_regs[] = {
- { OVR_CLR, 0 },
+ { OVR_CLR, 0 },
{ OVR_WID_LEFT_RIGHT, 0 },
{ OVR_WID_TOP_BOTTOM, 0 },
{ OV0_SCALE_CNTL, 0 },
@@ -255,7 +256,7 @@ static reg_val common_regs[] = {
/*
* globals
*/
-
+
static char *mode_option;
static char *monitor_layout;
static bool noaccel = 0;
@@ -422,7 +423,7 @@ static int radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
* ROM somewhere in the first meg. We will just ignore the copy
* and use the ROM directly.
*/
-
+
/* Fix from ATI for problem with Radeon hardware not leaving ROM enabled */
unsigned int temp;
temp = INREG(MPP_TB_CONFIG);
@@ -430,14 +431,14 @@ static int radeon_map_ROM(struct radeonfb_info *rinfo, struct pci_dev *dev)
temp |= 0x04 << 24;
OUTREG(MPP_TB_CONFIG, temp);
temp = INREG(MPP_TB_CONFIG);
-
+
rom = pci_map_rom(dev, &rom_size);
if (!rom) {
printk(KERN_ERR "radeonfb (%s): ROM failed to map\n",
pci_name(rinfo->pdev));
return -ENOMEM;
}
-
+
rinfo->bios_seg = rom;
/* Very simple test to make sure it appeared */
@@ -515,7 +516,7 @@ static int radeon_find_mem_vbios(struct radeonfb_info *rinfo)
*/
u32 segstart;
void __iomem *rom_base = NULL;
-
+
for(segstart=0x000c0000; segstart<0x000f0000; segstart+=0x00001000) {
rom_base = ioremap(segstart, 0x10000);
if (rom_base == NULL)
@@ -605,16 +606,16 @@ static int radeon_probe_pll_params(struct radeonfb_info *rinfo)
for(i=0; i<1000000; i++)
if (((INREG(CRTC_VLINE_CRNT_VLINE) >> 16) & 0x3ff) == 0)
break;
-
+
stop_time = ktime_get();
-
+
local_irq_enable();
total_usecs = ktime_us_delta(stop_time, start_time);
if (total_usecs >= 10 * USEC_PER_SEC || total_usecs == 0)
return -1;
hz = USEC_PER_SEC/(u32)total_usecs;
-
+
hTotal = ((INREG(CRTC_H_TOTAL_DISP) & 0x1ff) + 1) * 8;
vTotal = ((INREG(CRTC_V_TOTAL_DISP) & 0x3ff) + 1);
vclk = (long long)hTotal * (long long)vTotal * hz;
@@ -662,7 +663,7 @@ static int radeon_probe_pll_params(struct radeonfb_info *rinfo)
denom *= 3;
break;
case 6:
- denom *= 6;
+ denom *= 6;
break;
case 7:
denom *= 12;
@@ -878,7 +879,7 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
v.green.length = 6;
v.blue.length = 5;
v.transp.offset = v.transp.length = 0;
- break;
+ break;
case 24:
nom = 4;
den = 1;
@@ -908,7 +909,7 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
v.yres_virtual = v.yres;
if (v.xres_virtual < v.xres)
v.xres_virtual = v.xres;
-
+
/* XXX I'm adjusting xres_virtual to the pitch, that may help XFree
* with some panels, though I don't quite like this solution
@@ -929,14 +930,14 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
if (v.xoffset > v.xres_virtual - v.xres)
v.xoffset = v.xres_virtual - v.xres - 1;
-
+
if (v.yoffset > v.yres_virtual - v.yres)
v.yoffset = v.yres_virtual - v.yres - 1;
-
+
v.red.msb_right = v.green.msb_right = v.blue.msb_right =
v.transp.offset = v.transp.length =
v.transp.msb_right = 0;
-
+
memcpy(var, &v, sizeof(v));
return 0;
@@ -951,7 +952,7 @@ static int radeonfb_pan_display (struct fb_var_screeninfo *var,
if ((var->xoffset + info->var.xres > info->var.xres_virtual)
|| (var->yoffset + info->var.yres > info->var.yres_virtual))
return -EINVAL;
-
+
if (rinfo->asleep)
return 0;
@@ -1151,7 +1152,7 @@ static int radeonfb_blank (int blank, struct fb_info *info)
if (rinfo->asleep)
return 0;
-
+
return radeon_screen_blank(rinfo, blank, 0);
}
@@ -1401,7 +1402,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
} else {
/* R300 uses ref_div_acc field as real ref divider */
OUTPLLP(PPLL_REF_DIV,
- (mode->ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
+ (mode->ppll_ref_div << R300_PPLL_REF_DIV_ACC_SHIFT),
~R300_PPLL_REF_DIV_ACC_MASK);
}
} else
@@ -1423,7 +1424,7 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
workaround shouldn't have any effect on them. */
for (i = 0; (i < 10000 && INPLL(PPLL_REF_DIV) & PPLL_ATOMIC_UPDATE_R); i++)
;
-
+
OUTPLL(HTOTAL_CNTL, 0);
/* Clear reset & atomic update */
@@ -1510,7 +1511,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
radeon_fifo_wait(2);
OUTPLL(VCLK_ECP_CNTL, mode->vclk_ecp_cntl);
-
+
return;
}
@@ -1735,7 +1736,7 @@ static int radeonfb_set_par(struct fb_info *info)
/* Clear auto-center etc... */
newmode->crtc_more_cntl = rinfo->init_state.crtc_more_cntl;
newmode->crtc_more_cntl &= 0xfffffff0;
-
+
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD)) {
newmode->crtc_ext_cntl = VGA_ATI_LINEAR | XCRT_CNT_EN;
if (mirror)
@@ -1793,7 +1794,7 @@ static int radeonfb_set_par(struct fb_info *info)
newmode->surface_cntl |= NONSURF_AP0_SWP_16BPP;
newmode->surface_cntl |= NONSURF_AP1_SWP_16BPP;
break;
- case 24:
+ case 24:
case 32:
newmode->surface_cntl |= NONSURF_AP0_SWP_32BPP;
newmode->surface_cntl |= NONSURF_AP1_SWP_32BPP;
@@ -2028,7 +2029,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
}
save_crtc_gen_cntl = INREG(CRTC_GEN_CNTL);
save_crtc_ext_cntl = INREG(CRTC_EXT_CNTL);
-
+
OUTREG(CRTC_EXT_CNTL, save_crtc_ext_cntl | CRTC_DISPLAY_DIS);
OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl | CRTC_DISP_REQ_EN_B);
mdelay(100);
@@ -2038,7 +2039,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
#ifdef SET_MC_FB_FROM_APERTURE
/* Set framebuffer to be at the same address as set in PCI BAR */
- OUTREG(MC_FB_LOCATION,
+ OUTREG(MC_FB_LOCATION,
((aper_base + aper_size - 1) & 0xffff0000) | (aper_base >> 16));
rinfo->fb_local_base = aper_base;
#else
@@ -2079,7 +2080,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
OUTREG(CRTC_GEN_CNTL, save_crtc_gen_cntl);
OUTREG(CRTC_EXT_CNTL, save_crtc_ext_cntl);
if (rinfo->has_CRTC2)
- OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);
+ OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);
pr_debug("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
aper_base,
@@ -2239,20 +2240,10 @@ static const struct bin_attribute edid2_attr = {
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
+ resource_size_t base = pci_resource_start(pdev, 0);
+ resource_size_t size = pci_resource_len(pdev, 0);
- remove_conflicting_framebuffers(ap, KBUILD_MODNAME, false);
-
- kfree(ap);
-
- return 0;
+ return aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
}
static int radeonfb_pci_register(struct pci_dev *pdev,
@@ -2265,7 +2256,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
int err = 0;
pr_debug("radeonfb_pci_register BEGIN\n");
-
+
/* Enable device in PCI config */
ret = pci_enable_device(pdev);
if (ret < 0) {
@@ -2280,9 +2271,9 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
goto err_disable;
}
rinfo = info->par;
- rinfo->info = info;
+ rinfo->info = info;
rinfo->pdev = pdev;
-
+
spin_lock_init(&rinfo->reg_lock);
timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
@@ -2521,7 +2512,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct radeonfb_info *rinfo = info->par;
-
+
if (!rinfo)
return;
@@ -2540,7 +2531,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
iounmap(rinfo->mmio_base);
iounmap(rinfo->fb_base);
-
+
pci_release_region(pdev, 2);
pci_release_region(pdev, 0);
@@ -2550,7 +2541,7 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
fb_destroy_modedb(rinfo->mon1_modedb);
#ifdef CONFIG_FB_RADEON_I2C
radeon_delete_i2c_busses(rinfo);
-#endif
+#endif
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
diff --git a/drivers/video/fbdev/carminefb.c b/drivers/video/fbdev/carminefb.c
index 3a1c2e0739a1..4651b48a87f9 100644
--- a/drivers/video/fbdev/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
@@ -7,6 +7,7 @@
* - FB1 is display 1 with unique memory area
* - both display use 32 bit colors
*/
+#include <linux/aperture.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fb.h>
@@ -614,6 +615,10 @@ static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
struct fb_info *info;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(dev, "carminefb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(dev);
if (ret)
return ret;
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 2b00a9d554fc..f1c1c95c1fdf 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -14,6 +14,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -122,7 +123,7 @@ static int chipsfb_set_par(struct fb_info *info)
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 5;
-
+
} else {
/* p->var.bits_per_pixel == 8 */
write_cr(0x13, 100); // Set line length (doublewords)
@@ -131,13 +132,13 @@ static int chipsfb_set_par(struct fb_info *info)
write_xr(0x20, 0x00); // 8 bit blitter mode
info->fix.line_length = 800;
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->var.red.offset = info->var.green.offset =
info->var.blue.offset = 0;
info->var.red.length = info->var.green.length =
info->var.blue.length = 8;
-
+
}
return 0;
}
@@ -351,7 +352,11 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
struct fb_info *p;
unsigned long addr;
unsigned short cmd;
- int rc = -ENODEV;
+ int rc;
+
+ rc = aperture_remove_conflicting_pci_devices(dp, "chipsfb");
+ if (rc)
+ return rc;
if (pci_enable_device(dp) < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n");
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 2a9fa06881b5..b08bee43779a 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -34,6 +34,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -2085,6 +2086,10 @@ static int cirrusfb_pci_register(struct pci_dev *pdev,
unsigned long board_addr, board_size;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "cirrusfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret < 0) {
printk(KERN_ERR "cirrusfb: Cannot enable PCI device\n");
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 02b0cf2cfafe..1e70d8c67653 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -13,13 +13,13 @@
#include <linux/module.h>
+#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
-#include <linux/sysfb.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/vt.h>
@@ -40,6 +40,7 @@
#include <asm/fb.h>
+#include <video/vga.h>
/*
* Frame buffer device initialization and setup routines
@@ -50,10 +51,10 @@
static DEFINE_MUTEX(registration_lock);
struct fb_info *registered_fb[FB_MAX] __read_mostly;
-EXPORT_SYMBOL(registered_fb);
-
int num_registered_fb __read_mostly;
-EXPORT_SYMBOL(num_registered_fb);
+#define for_each_registered_fb(i) \
+ for (i = 0; i < FB_MAX; i++) \
+ if (!registered_fb[i]) {} else
bool fb_center_logo __read_mostly;
@@ -1525,103 +1526,6 @@ static int fb_check_foreignness(struct fb_info *fi)
return 0;
}
-static bool apertures_overlap(struct aperture *gen, struct aperture *hw)
-{
- /* is the generic aperture base the same as the HW one */
- if (gen->base == hw->base)
- return true;
- /* is the generic aperture base inside the hw base->hw base+size */
- if (gen->base > hw->base && gen->base < hw->base + hw->size)
- return true;
- return false;
-}
-
-static bool fb_do_apertures_overlap(struct apertures_struct *gena,
- struct apertures_struct *hwa)
-{
- int i, j;
- if (!hwa || !gena)
- return false;
-
- for (i = 0; i < hwa->count; ++i) {
- struct aperture *h = &hwa->ranges[i];
- for (j = 0; j < gena->count; ++j) {
- struct aperture *g = &gena->ranges[j];
- printk(KERN_DEBUG "checking generic (%llx %llx) vs hw (%llx %llx)\n",
- (unsigned long long)g->base,
- (unsigned long long)g->size,
- (unsigned long long)h->base,
- (unsigned long long)h->size);
- if (apertures_overlap(g, h))
- return true;
- }
- }
-
- return false;
-}
-
-static void do_unregister_framebuffer(struct fb_info *fb_info);
-
-#define VGA_FB_PHYS 0xA0000
-static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
- int i;
-
-restart_removal:
- /* check all firmware fbs and kick off if the base addr overlaps */
- for_each_registered_fb(i) {
- struct apertures_struct *gen_aper;
- struct device *device;
-
- if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE))
- continue;
-
- gen_aper = registered_fb[i]->apertures;
- device = registered_fb[i]->device;
- if (fb_do_apertures_overlap(gen_aper, a) ||
- (primary && gen_aper && gen_aper->count &&
- gen_aper->ranges[0].base == VGA_FB_PHYS)) {
-
- printk(KERN_INFO "fb%d: switching to %s from %s\n",
- i, name, registered_fb[i]->fix.id);
-
- /*
- * If we kick-out a firmware driver, we also want to remove
- * the underlying platform device, such as simple-framebuffer,
- * VESA, EFI, etc. A native driver will then be able to
- * allocate the memory range.
- *
- * If it's not a platform device, at least print a warning. A
- * fix would add code to remove the device from the system. For
- * framebuffers without any Linux device, print a warning as
- * well.
- */
- if (!device) {
- pr_warn("fb%d: no device set\n", i);
- do_unregister_framebuffer(registered_fb[i]);
- } else if (dev_is_platform(device)) {
- /*
- * Drop the lock because if the device is unregistered, its
- * driver will call to unregister_framebuffer(), that takes
- * this lock.
- */
- mutex_unlock(&registration_lock);
- platform_device_unregister(to_platform_device(device));
- mutex_lock(&registration_lock);
- } else {
- pr_warn("fb%d: cannot remove device\n", i);
- do_unregister_framebuffer(registered_fb[i]);
- }
- /*
- * Restart the removal loop now that the device has been
- * unregistered and its associated framebuffer gone.
- */
- goto restart_removal;
- }
- }
-}
-
static int do_register_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1630,10 +1534,6 @@ static int do_register_framebuffer(struct fb_info *fb_info)
if (fb_check_foreignness(fb_info))
return -ENOSYS;
- do_remove_conflicting_framebuffers(fb_info->apertures,
- fb_info->fix.id,
- fb_is_primary_device(fb_info));
-
if (num_registered_fb == FB_MAX)
return -ENXIO;
@@ -1752,100 +1652,31 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
put_fb_info(fb_info);
}
-/**
- * remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
+static int fb_aperture_acquire_for_platform_device(struct fb_info *fb_info)
{
- bool do_free = false;
-
- if (!a) {
- a = alloc_apertures(1);
- if (!a)
- return -ENOMEM;
-
- a->ranges[0].base = 0;
- a->ranges[0].size = ~0;
- do_free = true;
- }
-
- /*
- * If a driver asked to unregister a platform device registered by
- * sysfb, then can be assumed that this is a driver for a display
- * that is set up by the system firmware and has a generic driver.
- *
- * Drivers for devices that don't have a generic driver will never
- * ask for this, so let's assume that a real driver for the display
- * was already probed and prevent sysfb to register devices later.
- */
- sysfb_disable();
-
- mutex_lock(&registration_lock);
- do_remove_conflicting_framebuffers(a, name, primary);
- mutex_unlock(&registration_lock);
-
- if (do_free)
- kfree(a);
-
- return 0;
-}
-EXPORT_SYMBOL(remove_conflicting_framebuffers);
+ struct apertures_struct *ap = fb_info->apertures;
+ struct device *dev = fb_info->device;
+ struct platform_device *pdev;
+ unsigned int i;
+ int ret;
-/**
- * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
-{
- struct apertures_struct *ap;
- bool primary = false;
- int err, idx, bar;
+ if (!ap)
+ return 0;
- for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
- continue;
- idx++;
- }
+ if (!dev_is_platform(dev))
+ return 0;
- ap = alloc_apertures(idx);
- if (!ap)
- return -ENOMEM;
+ pdev = to_platform_device(dev);
- for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
- continue;
- ap->ranges[idx].base = pci_resource_start(pdev, bar);
- ap->ranges[idx].size = pci_resource_len(pdev, bar);
- pci_dbg(pdev, "%s: bar %d: 0x%lx -> 0x%lx\n", __func__, bar,
- (unsigned long)pci_resource_start(pdev, bar),
- (unsigned long)pci_resource_end(pdev, bar));
- idx++;
+ for (ret = 0, i = 0; i < ap->count; ++i) {
+ ret = devm_aperture_acquire_for_platform_device(pdev, ap->ranges[i].base,
+ ap->ranges[i].size);
+ if (ret)
+ break;
}
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags &
- IORESOURCE_ROM_SHADOW;
-#endif
- err = remove_conflicting_framebuffers(ap, name, primary);
- kfree(ap);
- return err;
+ return ret;
}
-EXPORT_SYMBOL(remove_conflicting_pci_framebuffers);
/**
* register_framebuffer - registers a frame buffer device
@@ -1861,6 +1692,12 @@ register_framebuffer(struct fb_info *fb_info)
{
int ret;
+ if (fb_info->flags & FBINFO_MISC_FIRMWARE) {
+ ret = fb_aperture_acquire_for_platform_device(fb_info);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&registration_lock);
ret = do_register_framebuffer(fb_info);
mutex_unlock(&registration_lock);
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 8f041f9b14c7..585af90a68a5 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -33,6 +33,7 @@
* (which, incidentally, is about the same saving as a 2.5in hard disk
* entering standby mode.)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1720,6 +1721,10 @@ static int cyberpro_pci_probe(struct pci_dev *dev,
sprintf(name, "CyberPro%4X", id->device);
+ err = aperture_remove_conflicting_pci_devices(dev, name);
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index e41204ecb0e3..1514c653a84f 100644
--- a/drivers/video/fbdev/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
@@ -6,6 +6,7 @@
* Copyright (C) 2005 Arcom Control Systems Ltd.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -320,6 +321,10 @@ static int gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_info *info;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "gx1fb");
+ if (ret)
+ return ret;
+
info = gx1fb_init_fbinfo(&pdev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 44089b331f91..2527bd80ec5f 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -15,6 +15,7 @@
*
* 16 MiB of framebuffer memory is assumed to be available.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -364,6 +365,10 @@ static int gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_videomode *modedb_ptr;
unsigned int modedb_size;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "gxfb");
+ if (ret)
+ return ret;
+
info = gxfb_init_fbinfo(&pdev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 66c81262d18f..9d26592dbfce 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -6,6 +6,7 @@
* Built from gxfb (which is Copyright (C) 2006 Arcom Control Systems Ltd.)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -484,6 +485,10 @@ static int lxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_videomode *modedb_ptr;
unsigned int modedb_size;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "lxfb");
+ if (ret)
+ return ret;
+
info = lxfb_init_fbinfo(&pdev->dev);
if (info == NULL)
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 94588b809ebf..0dcef4bec8d7 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -6,6 +6,7 @@
* Copyright (C) 2006 Paul Mackerras, IBM Corp. <paulus@samba.org>
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fb.h>
@@ -621,6 +622,10 @@ static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct fb_var_screeninfo var;
enum gxt_cards cardtype;
+ err = aperture_remove_conflicting_pci_devices(pdev, "gxt4500fb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "gxt4500: cannot enable PCI device: %d\n",
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 886c564787f1..072ce07ba9e0 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -45,6 +45,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
@@ -74,10 +75,6 @@
#define SYNTHVID_DEPTH_WIN8 32
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
enum pipe_msg_type {
PIPE_MSG_INVALID,
PIPE_MSG_DATA,
@@ -1074,8 +1071,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- remove_conflicting_framebuffers(info->apertures,
- KBUILD_MODNAME, false);
+ aperture_remove_conflicting_devices(info->apertures->ranges[0].base,
+ info->apertures->ranges[0].size,
+ false, KBUILD_MODNAME);
if (gen2vm) {
/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index bd30d8314b68..b795f6503cb6 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -12,6 +12,7 @@
* i740fb by Patrick LERDA, v0.9
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1013,6 +1014,10 @@ static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
bool found = false;
u8 *edid;
+ ret = aperture_remove_conflicting_pci_devices(dev, "i740fb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct i740fb_par), &(dev->dev));
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index 13bbf7fe13bf..ff09f8c20bfc 100644
--- a/drivers/video/fbdev/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
@@ -2,12 +2,12 @@
* linux/drivers/video/i810_main.c -- Intel 810 frame buffer device
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
- * All Rights Reserved
+ * All Rights Reserved
*
* Contributors:
* Michael Vogt <mvogt@acm.org> - added support for Intel 815 chipsets
- * and enabling the power-on state of
- * external VGA connectors for
+ * and enabling the power-on state of
+ * external VGA connectors for
* secondary displays
*
* Fredrik Andersson <krueger@shell.linux.se> - alpha testing of
@@ -17,10 +17,10 @@
* timings support
*
* The code framework is a modification of vfb.c by Geert Uytterhoeven.
- * DotClock and PLL calculations are partly based on i810_driver.c
+ * DotClock and PLL calculations are partly based on i810_driver.c
* in xfree86 v4.0.3 by Precision Insight.
- * Watermark calculation and tables are based on i810_wmark.c
- * in xfre86 v4.0.3 by Precision Insight. Slight modifications
+ * Watermark calculation and tables are based on i810_wmark.c
+ * in xfre86 v4.0.3 by Precision Insight. Slight modifications
* only to allow for integer operations instead of floating point.
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -28,6 +28,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -204,8 +205,8 @@ static void i810_dram_off(u8 __iomem *mmio, u8 mode)
* @mode: protect/unprotect
*
* DESCRIPTION:
- * The IBM VGA standard allows protection of certain VGA registers.
- * This will protect or unprotect them.
+ * The IBM VGA standard allows protection of certain VGA registers.
+ * This will protect or unprotect them.
*/
static void i810_protect_regs(u8 __iomem *mmio, int mode)
{
@@ -215,7 +216,7 @@ static void i810_protect_regs(u8 __iomem *mmio, int mode)
reg = i810_readb(CR_DATA_CGA, mmio);
reg = (mode == OFF) ? reg & ~0x80 :
reg | 0x80;
-
+
i810_writeb(CR_INDEX_CGA, mmio, CR11);
i810_writeb(CR_DATA_CGA, mmio, reg);
}
@@ -225,18 +226,18 @@ static void i810_protect_regs(u8 __iomem *mmio, int mode)
* @par: pointer to i810fb_par structure
*
* DESCRIPTION:
- * Loads the P, M, and N registers.
+ * Loads the P, M, and N registers.
*/
static void i810_load_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
tmp1 = par->regs.M | par->regs.N << 16;
tmp2 = i810_readl(DCLK_2D, mmio);
tmp2 &= ~MN_MASK;
i810_writel(DCLK_2D, mmio, tmp1 | tmp2);
-
+
tmp1 = par->regs.P;
tmp2 = i810_readl(DCLK_0DS, mmio);
tmp2 &= ~(P_OR << 16);
@@ -254,7 +255,7 @@ static void i810_load_pll(struct i810fb_par *par)
* Load values to VGA registers
*/
static void i810_load_vga(struct i810fb_par *par)
-{
+{
u8 __iomem *mmio = par->mmio_start_virtual;
/* interlace */
@@ -327,7 +328,7 @@ static void i810_load_2d(struct i810fb_par *par)
u8 tmp8;
u8 __iomem *mmio = par->mmio_start_virtual;
- i810_writel(FW_BLC, mmio, par->watermark);
+ i810_writel(FW_BLC, mmio, par->watermark);
tmp = i810_readl(PIXCONF, mmio);
tmp |= 1 | 1 << 20;
i810_writel(PIXCONF, mmio, tmp);
@@ -339,7 +340,7 @@ static void i810_load_2d(struct i810fb_par *par)
tmp8 |= 2;
i810_writeb(GR_INDEX, mmio, GR10);
i810_writeb(GR_DATA, mmio, tmp8);
-}
+}
/**
* i810_hires - enables high resolution mode
@@ -348,7 +349,7 @@ static void i810_load_2d(struct i810fb_par *par)
static void i810_hires(u8 __iomem *mmio)
{
u8 val;
-
+
i810_writeb(CR_INDEX_CGA, mmio, CR80);
val = i810_readb(CR_DATA_CGA, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR80);
@@ -363,13 +364,13 @@ static void i810_hires(u8 __iomem *mmio)
*
* DESCRIPTION:
* Loads the characters per line
- */
+ */
static void i810_load_pitch(struct i810fb_par *par)
{
u32 tmp, pitch;
u8 val;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
pitch = par->pitch >> 3;
i810_writeb(SR_INDEX, mmio, SR01);
val = i810_readb(SR_DATA, mmio);
@@ -381,7 +382,7 @@ static void i810_load_pitch(struct i810fb_par *par)
tmp = pitch & 0xFF;
i810_writeb(CR_INDEX_CGA, mmio, CR13);
i810_writeb(CR_DATA_CGA, mmio, (u8) tmp);
-
+
tmp = pitch >> 8;
i810_writeb(CR_INDEX_CGA, mmio, CR41);
val = i810_readb(CR_DATA_CGA, mmio) & ~0x0F;
@@ -414,7 +415,7 @@ static void i810_load_color(struct i810fb_par *par)
/**
* i810_load_regs - loads all registers for the mode
* @par: pointer to i810fb_par structure
- *
+ *
* DESCRIPTION:
* Loads registers
*/
@@ -428,7 +429,7 @@ static void i810_load_regs(struct i810fb_par *par)
i810_load_pll(par);
i810_load_vga(par);
i810_load_vgax(par);
- i810_dram_off(mmio, ON);
+ i810_dram_off(mmio, ON);
i810_load_2d(par);
i810_hires(mmio);
i810_screen_off(mmio, ON);
@@ -443,7 +444,7 @@ static void i810_write_dac(u8 regno, u8 red, u8 green, u8 blue,
i810_writeb(CLUT_INDEX_WRITE, mmio, regno);
i810_writeb(CLUT_DATA, mmio, red);
i810_writeb(CLUT_DATA, mmio, green);
- i810_writeb(CLUT_DATA, mmio, blue);
+ i810_writeb(CLUT_DATA, mmio, blue);
}
static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
@@ -456,13 +457,13 @@ static void i810_read_dac(u8 regno, u8 *red, u8 *green, u8 *blue,
}
/************************************************************
- * VGA State Restore *
+ * VGA State Restore *
************************************************************/
static void i810_restore_pll(struct i810fb_par *par)
{
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
tmp1 = par->hw_state.dclk_2d;
tmp2 = i810_readl(DCLK_2D, mmio);
tmp1 &= ~MN_MASK;
@@ -494,7 +495,7 @@ static void i810_restore_vgax(struct i810fb_par *par)
{
u8 i, j;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
for (i = 0; i < 4; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR30+i);
i810_writeb(CR_DATA_CGA, mmio, *(&(par->hw_state.cr30) + i));
@@ -528,7 +529,7 @@ static void i810_restore_vga(struct i810fb_par *par)
{
u8 i;
u8 __iomem *mmio = par->mmio_start_virtual;
-
+
for (i = 0; i < 10; i++) {
i810_writeb(CR_INDEX_CGA, mmio, CR00 + i);
i810_writeb(CR_DATA_CGA, mmio, *((&par->hw_state.cr00) + i));
@@ -559,10 +560,10 @@ static void i810_restore_2d(struct i810fb_par *par)
u8 __iomem *mmio = par->mmio_start_virtual;
tmp_word = i810_readw(BLTCNTL, mmio);
- tmp_word &= ~(3 << 4);
+ tmp_word &= ~(3 << 4);
tmp_word |= par->hw_state.bltcntl;
i810_writew(BLTCNTL, mmio, tmp_word);
-
+
i810_dram_off(mmio, OFF);
i810_writel(PIXCONF, mmio, par->hw_state.pixconf);
i810_dram_off(mmio, ON);
@@ -577,7 +578,7 @@ static void i810_restore_2d(struct i810fb_par *par)
tmp_long |= par->hw_state.fw_blc;
i810_writel(FW_BLC, mmio, tmp_long);
- i810_writel(HWS_PGA, mmio, par->hw_state.hws_pga);
+ i810_writel(HWS_PGA, mmio, par->hw_state.hws_pga);
i810_writew(IER, mmio, par->hw_state.ier);
i810_writew(IMR, mmio, par->hw_state.imr);
i810_writel(DPLYSTAS, mmio, par->hw_state.dplystas);
@@ -621,7 +622,7 @@ static void i810_save_vgax(struct i810fb_par *par)
i810_writeb(CR_INDEX_CGA, mmio, CR41);
par->hw_state.cr41 = i810_readb(CR_DATA_CGA, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR70);
- par->hw_state.cr70 = i810_readb(CR_DATA_CGA, mmio);
+ par->hw_state.cr70 = i810_readb(CR_DATA_CGA, mmio);
par->hw_state.msr = i810_readb(MSR_READ, mmio);
i810_writeb(CR_INDEX_CGA, mmio, CR80);
par->hw_state.cr80 = i810_readb(CR_DATA_CGA, mmio);
@@ -654,8 +655,8 @@ static void i810_save_2d(struct i810fb_par *par)
par->hw_state.pixconf = i810_readl(PIXCONF, mmio);
par->hw_state.fw_blc = i810_readl(FW_BLC, mmio);
par->hw_state.bltcntl = i810_readw(BLTCNTL, mmio);
- par->hw_state.hwstam = i810_readw(HWSTAM, mmio);
- par->hw_state.hws_pga = i810_readl(HWS_PGA, mmio);
+ par->hw_state.hwstam = i810_readw(HWSTAM, mmio);
+ par->hw_state.hws_pga = i810_readl(HWS_PGA, mmio);
par->hw_state.ier = i810_readw(IER, mmio);
par->hw_state.imr = i810_readw(IMR, mmio);
par->hw_state.dplystas = i810_readl(DPLYSTAS, mmio);
@@ -669,7 +670,7 @@ static void i810_save_vga_state(struct i810fb_par *par)
}
/************************************************************
- * Helpers *
+ * Helpers *
************************************************************/
/**
* get_line_length - calculates buffer pitch in bytes
@@ -678,12 +679,12 @@ static void i810_save_vga_state(struct i810fb_par *par)
* @bpp: bits per pixel
*
* DESCRIPTION:
- * Calculates buffer pitch in bytes.
+ * Calculates buffer pitch in bytes.
*/
static u32 get_line_length(struct i810fb_par *par, int xres_virtual, int bpp)
{
u32 length;
-
+
length = xres_virtual*bpp;
length = (length+31)&-32;
length >>= 3;
@@ -716,17 +717,17 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
n_target_max = 30;
/*
- * find P such that target freq is 16x reference freq (Hz).
+ * find P such that target freq is 16x reference freq (Hz).
*/
p_divisor = 1;
p_target = 0;
- while(!((1000000 * p_divisor)/(16 * 24 * target_freq)) &&
+ while(!((1000000 * p_divisor)/(16 * 24 * target_freq)) &&
p_divisor <= 32) {
p_divisor <<= 1;
p_target++;
}
- n_reg = m_reg = n_target = 3;
+ n_reg = m_reg = n_target = 3;
while (diff_min && mod_min && (n_target < n_target_max)) {
f_out = (p_divisor * n_reg * 1000000)/(4 * 24 * m_reg);
mod = (p_divisor * n_reg * 1000000) % (4 * 24 * m_reg);
@@ -744,14 +745,14 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
diff_min = diff;
n_best = n_target;
m_best = m_target;
- }
+ }
if (!diff && mod_min > mod) {
mod_min = mod;
n_best = n_target;
m_best = m_target;
}
- }
+ }
if (m) *m = (m_best - 2) & 0x3FF;
if (n) *n = (n_best - 2) & 0x3FF;
if (p) *p = (p_target << 4);
@@ -772,7 +773,7 @@ static void i810_calc_dclk(u32 freq, u32 *m, u32 *n, u32 *p)
static void i810_enable_cursor(u8 __iomem *mmio, int mode)
{
u32 temp;
-
+
temp = i810_readl(PIXCONF, mmio);
temp = (mode == ON) ? temp | CURSOR_ENABLE_MASK :
temp & ~CURSOR_ENABLE_MASK;
@@ -786,10 +787,10 @@ static void i810_reset_cursor_image(struct i810fb_par *par)
int i, j;
for (i = 64; i--; ) {
- for (j = 0; j < 8; j++) {
- i810_writeb(j, addr, 0xff);
- i810_writeb(j+8, addr, 0x00);
- }
+ for (j = 0; j < 8; j++) {
+ i810_writeb(j, addr, 0xff);
+ i810_writeb(j+8, addr, 0x00);
+ }
addr +=16;
}
}
@@ -800,9 +801,9 @@ static void i810_load_cursor_image(int width, int height, u8 *data,
u8 __iomem *addr = par->cursor_heap.virtual;
int i, j, w = width/8;
int mod = width % 8, t_mask, d_mask;
-
+
t_mask = 0xff >> mod;
- d_mask = ~(0xff >> mod);
+ d_mask = ~(0xff >> mod);
for (i = height; i--; ) {
for (j = 0; j < w; j++) {
i810_writeb(j+0, addr, 0x00);
@@ -854,7 +855,7 @@ static void i810_init_cursor(struct i810fb_par *par)
i810_enable_cursor(mmio, OFF);
i810_writel(CURBASE, mmio, par->cursor_heap.physical);
i810_writew(CURCNTR, mmio, COORD_ACTIVE | CURSOR_MODE_64_XOR);
-}
+}
/*********************************************************************
* Framebuffer hook helpers *
@@ -873,7 +874,7 @@ static void i810_round_off(struct fb_var_screeninfo *var)
u32 xres, yres, vxres, vyres;
/*
- * Presently supports only these configurations
+ * Presently supports only these configurations
*/
xres = var->xres;
@@ -883,20 +884,20 @@ static void i810_round_off(struct fb_var_screeninfo *var)
var->bits_per_pixel += 7;
var->bits_per_pixel &= ~7;
-
+
if (var->bits_per_pixel < 8)
var->bits_per_pixel = 8;
- if (var->bits_per_pixel > 32)
+ if (var->bits_per_pixel > 32)
var->bits_per_pixel = 32;
round_off_xres(&xres);
if (xres < 40)
xres = 40;
- if (xres > 2048)
+ if (xres > 2048)
xres = 2048;
xres = (xres + 7) & ~7;
- if (vxres < xres)
+ if (vxres < xres)
vxres = xres;
round_off_yres(&xres, &yres);
@@ -905,7 +906,7 @@ static void i810_round_off(struct fb_var_screeninfo *var)
if (yres >= 2048)
yres = 2048;
- if (vyres < yres)
+ if (vyres < yres)
vyres = yres;
if (var->bits_per_pixel == 32)
@@ -917,30 +918,30 @@ static void i810_round_off(struct fb_var_screeninfo *var)
var->hsync_len = (var->hsync_len + 4) & ~7;
if (var->vmode & FB_VMODE_INTERLACED) {
- if (!((yres + var->upper_margin + var->vsync_len +
+ if (!((yres + var->upper_margin + var->vsync_len +
var->lower_margin) & 1))
var->upper_margin++;
}
-
+
var->xres = xres;
var->yres = yres;
var->xres_virtual = vxres;
var->yres_virtual = vyres;
-}
+}
/**
* set_color_bitfields - sets rgba fields
* @var: pointer to fb_var_screeninfo
*
* DESCRIPTION:
- * The length, offset and ordering for each color field
- * (red, green, blue) will be set as specified
+ * The length, offset and ordering for each color field
+ * (red, green, blue) will be set as specified
* by the hardware
- */
+ */
static void set_color_bitfields(struct fb_var_screeninfo *var)
{
switch (var->bits_per_pixel) {
- case 8:
+ case 8:
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
@@ -984,11 +985,11 @@ static void set_color_bitfields(struct fb_var_screeninfo *var)
* @info: pointer to fb_info
*
* DESCRIPTION:
- * This will check if the framebuffer size is sufficient
- * for the current mode and if the user's monitor has the
+ * This will check if the framebuffer size is sufficient
+ * for the current mode and if the user's monitor has the
* required specifications to display the current mode.
*/
-static int i810_check_params(struct fb_var_screeninfo *var,
+static int i810_check_params(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1007,14 +1008,14 @@ static int i810_check_params(struct fb_var_screeninfo *var,
vyres = info->var.yres;
vxres = par->fb.size/vyres;
vxres /= var->bits_per_pixel >> 3;
- line_length = get_line_length(par, vxres,
+ line_length = get_line_length(par, vxres,
var->bits_per_pixel);
vidmem = line_length * info->var.yres;
if (vxres < var->xres) {
printk("i810fb: required video memory, "
"%d bytes, for %dx%d-%d (virtual) "
- "is out of range\n",
- vidmem, vxres, vyres,
+ "is out of range\n",
+ vidmem, vxres, vyres,
var->bits_per_pixel);
return -ENOMEM;
}
@@ -1074,7 +1075,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
}
return retval;
-}
+}
/**
* encode_fix - fill up fb_fix_screeninfo structure
@@ -1131,9 +1132,9 @@ static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
*
* DESCRIPTION:
* Based on the contents of @var, @par will be dynamically filled up.
- * @par contains all information necessary to modify the hardware.
+ * @par contains all information necessary to modify the hardware.
*/
-static void decode_var(const struct fb_var_screeninfo *var,
+static void decode_var(const struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
u32 xres, yres, vxres, vyres;
@@ -1175,13 +1176,13 @@ static void decode_var(const struct fb_var_screeninfo *var,
if (var->nonstd && var->bits_per_pixel != 8)
par->pixconf |= 1 << 27;
- i810_calc_dclk(var->pixclock, &par->regs.M,
+ i810_calc_dclk(var->pixclock, &par->regs.M,
&par->regs.N, &par->regs.P);
i810fb_encode_registers(var, par, xres, yres);
par->watermark = i810_get_watermark(var, par);
par->pitch = get_line_length(par, vxres, var->bits_per_pixel);
-}
+}
/**
* i810fb_getcolreg - gets red, green and blue values of the hardware DAC
@@ -1196,7 +1197,7 @@ static void decode_var(const struct fb_var_screeninfo *var,
* Gets the red, green and blue values of the hardware DAC as pointed by @regno
* and writes them to @red, @green and @blue respectively
*/
-static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
+static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
u8 *transp, struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1212,18 +1213,18 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
temp = i810_readb(PIXCONF1, mmio);
i810_writeb(PIXCONF1, mmio, temp & ~EXTENDED_PALETTE);
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
- info->var.green.length == 5)
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ info->var.green.length == 5)
i810_read_dac(regno * 8, red, green, blue, mmio);
- else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 6) {
u8 tmp;
i810_read_dac(regno * 8, red, &tmp, blue, mmio);
i810_read_dac(regno * 4, &tmp, green, &tmp, mmio);
}
- else
+ else
i810_read_dac(regno, red, green, blue, mmio);
*transp = 0;
@@ -1232,7 +1233,7 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
return 0;
}
-/******************************************************************
+/******************************************************************
* Framebuffer device-specific hooks *
******************************************************************/
@@ -1252,7 +1253,7 @@ static int i810fb_open(struct fb_info *info, int user)
par->use_count++;
mutex_unlock(&par->open_lock);
-
+
return 0;
}
@@ -1273,13 +1274,13 @@ static int i810fb_release(struct fb_info *info, int user)
par->use_count--;
mutex_unlock(&par->open_lock);
-
+
return 0;
}
-static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
+static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
@@ -1302,24 +1303,24 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
temp = i810_readb(PIXCONF1, mmio);
i810_writeb(PIXCONF1, mmio, temp & ~EXTENDED_PALETTE);
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 5) {
- for (i = 0; i < 8; i++)
- i810_write_dac((u8) (regno * 8) + i, (u8) red,
+ for (i = 0; i < 8; i++)
+ i810_write_dac((u8) (regno * 8) + i, (u8) red,
(u8) green, (u8) blue, mmio);
- } else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
+ } else if (info->fix.visual == FB_VISUAL_DIRECTCOLOR &&
info->var.green.length == 6) {
u8 r, g, b;
if (regno < 32) {
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++)
i810_write_dac((u8) (regno * 8) + i,
- (u8) red, (u8) green,
+ (u8) red, (u8) green,
(u8) blue, mmio);
}
i810_read_dac((u8) (regno*4), &r, &g, &b, mmio);
- for (i = 0; i < 4; i++)
- i810_write_dac((u8) (regno*4) + i, r, (u8) green,
+ for (i = 0; i < 4; i++)
+ i810_write_dac((u8) (regno*4) + i, r, (u8) green,
b, mmio);
} else if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) {
i810_write_dac((u8) regno, (u8) red, (u8) green,
@@ -1330,20 +1331,20 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
if (regno < 16) {
switch (info->var.bits_per_pixel) {
- case 16:
+ case 16:
if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- if (info->var.green.length == 5)
- ((u32 *)info->pseudo_palette)[regno] =
+ if (info->var.green.length == 5)
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 10) | (regno << 5) |
regno;
else
- ((u32 *)info->pseudo_palette)[regno] =
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 11) | (regno << 5) |
regno;
} else {
if (info->var.green.length == 5) {
/* RGB 555 */
- ((u32 *)info->pseudo_palette)[regno] =
+ ((u32 *)info->pseudo_palette)[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
@@ -1358,12 +1359,12 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
case 24: /* RGB 888 */
case 32: /* RGBA 8888 */
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
- ((u32 *)info->pseudo_palette)[regno] =
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ ((u32 *)info->pseudo_palette)[regno] =
(regno << 16) | (regno << 8) |
regno;
- else
- ((u32 *)info->pseudo_palette)[regno] =
+ else
+ ((u32 *)info->pseudo_palette)[regno] =
((red & 0xff00) << 8) |
(green & 0xff00) |
((blue & 0xff00) >> 8);
@@ -1373,13 +1374,13 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static int i810fb_pan_display(struct fb_var_screeninfo *var,
+static int i810fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct i810fb_par *par = info->par;
u32 total;
-
- total = var->xoffset * par->depth +
+
+ total = var->xoffset * par->depth +
var->yoffset * info->fix.line_length;
i810fb_load_front(total, info);
@@ -1391,7 +1392,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info)
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
int mode = 0, pwr, scr_off = 0;
-
+
pwr = i810_readl(PWR_CLKC, mmio);
switch (blank_mode) {
@@ -1421,7 +1422,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info)
scr_off = OFF;
break;
default:
- return -EINVAL;
+ return -EINVAL;
}
i810_screen_off(mmio, scr_off);
@@ -1452,7 +1453,7 @@ static int i810fb_set_par(struct fb_info *info)
return 0;
}
-static int i810fb_check_var(struct fb_var_screeninfo *var,
+static int i810fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err;
@@ -1550,7 +1551,7 @@ static const struct fb_ops i810fb_ops = {
.fb_set_par = i810fb_set_par,
.fb_setcolreg = i810fb_setcolreg,
.fb_blank = i810fb_blank,
- .fb_pan_display = i810fb_pan_display,
+ .fb_pan_display = i810fb_pan_display,
.fb_fillrect = i810fb_fillrect,
.fb_copyarea = i810fb_copyarea,
.fb_imageblit = i810fb_imageblit,
@@ -1593,7 +1594,7 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t mesg)
return 0;
}
-static int i810fb_resume(struct pci_dev *dev)
+static int i810fb_resume(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
struct i810fb_par *par = info->par;
@@ -1628,14 +1629,14 @@ fail:
/***********************************************************************
* AGP resource allocation *
***********************************************************************/
-
+
static void i810_fix_pointers(struct i810fb_par *par)
{
par->fb.physical = par->aperture.physical+(par->fb.offset << 12);
par->fb.virtual = par->aperture.virtual+(par->fb.offset << 12);
- par->iring.physical = par->aperture.physical +
+ par->iring.physical = par->aperture.physical +
(par->iring.offset << 12);
- par->iring.virtual = par->aperture.virtual +
+ par->iring.virtual = par->aperture.virtual +
(par->iring.offset << 12);
par->cursor_heap.virtual = par->aperture.virtual+
(par->cursor_heap.offset << 12);
@@ -1666,7 +1667,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
struct i810fb_par *par = info->par;
int size;
struct agp_bridge_data *bridge;
-
+
i810_fix_offsets(par);
size = par->fb.size + par->iring.size;
@@ -1674,7 +1675,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_fbmem: cannot acquire agpgart\n");
return -ENODEV;
}
- if (!(par->i810_gtt.i810_fb_memory =
+ if (!(par->i810_gtt.i810_fb_memory =
agp_allocate_memory(bridge, size >> 12, AGP_NORMAL_MEMORY))) {
printk("i810fb_alloc_fbmem: can't allocate framebuffer "
"memory\n");
@@ -1686,9 +1687,9 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_fbmem: can't bind framebuffer memory\n");
agp_backend_release(bridge);
return -EBUSY;
- }
-
- if (!(par->i810_gtt.i810_cursor_memory =
+ }
+
+ if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
printk("i810fb_alloc_cursormem: can't allocate "
@@ -1701,7 +1702,7 @@ static int i810_alloc_agp_mem(struct fb_info *info)
printk("i810fb_alloc_cursormem: cannot bind cursor memory\n");
agp_backend_release(bridge);
return -EBUSY;
- }
+ }
par->cursor_heap.physical = par->i810_gtt.i810_cursor_memory->physical;
@@ -1712,8 +1713,8 @@ static int i810_alloc_agp_mem(struct fb_info *info)
return 0;
}
-/***************************************************************
- * Initialization *
+/***************************************************************
+ * Initialization *
***************************************************************/
/**
@@ -1728,7 +1729,7 @@ static void i810_init_monspecs(struct fb_info *info)
{
if (!hsync1)
hsync1 = HFMIN;
- if (!hsync2)
+ if (!hsync2)
hsync2 = HFMAX;
if (!info->monspecs.hfmax)
info->monspecs.hfmax = hsync2;
@@ -1739,7 +1740,7 @@ static void i810_init_monspecs(struct fb_info *info)
if (!vsync1)
vsync1 = VFMIN;
- if (!vsync2)
+ if (!vsync2)
vsync2 = VFMAX;
if (IS_DVT && vsync1 < 60)
vsync1 = 60;
@@ -1747,7 +1748,7 @@ static void i810_init_monspecs(struct fb_info *info)
info->monspecs.vfmax = vsync2;
if (!info->monspecs.vfmin)
info->monspecs.vfmin = vsync1;
- if (vsync2 < vsync1)
+ if (vsync2 < vsync1)
info->monspecs.vfmin = vsync2;
}
@@ -1760,27 +1761,27 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
{
mutex_init(&par->open_lock);
- if (voffset)
+ if (voffset)
v_offset_default = voffset;
else if (par->aperture.size > 32 * 1024 * 1024)
v_offset_default = 16;
else
v_offset_default = 8;
- if (!vram)
+ if (!vram)
vram = 1;
- if (accel)
+ if (accel)
par->dev_flags |= HAS_ACCELERATION;
- if (sync)
+ if (sync)
par->dev_flags |= ALWAYS_SYNC;
par->ddc_num = (ddc3 ? 3 : 2);
if (bpp < 8)
bpp = 8;
-
+
par->i810fb_ops = i810fb_ops;
if (xres)
@@ -1793,7 +1794,7 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
else
info->var.yres = 480;
- if (!vyres)
+ if (!vyres)
vyres = (vram << 20)/(info->var.xres*bpp >> 3);
info->var.yres_virtual = vyres;
@@ -1802,12 +1803,12 @@ static void i810_init_defaults(struct i810fb_par *par, struct fb_info *info)
if (dcolor)
info->var.nonstd = 1;
- if (par->dev_flags & HAS_ACCELERATION)
+ if (par->dev_flags & HAS_ACCELERATION)
info->var.accel_flags = 1;
i810_init_monspecs(info);
}
-
+
/**
* i810_init_device - initialize device
* @par: pointer to i810fb_par structure
@@ -1840,9 +1841,9 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
{
int err;
- if ((err = pci_enable_device(par->dev))) {
+ if ((err = pci_enable_device(par->dev))) {
printk("i810fb_init: cannot enable device\n");
- return err;
+ return err;
}
par->res_flags |= PCI_DEVICE_ENABLED;
@@ -1860,8 +1861,8 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
return -ENOMEM;
}
- if (!request_mem_region(par->aperture.physical,
- par->aperture.size,
+ if (!request_mem_region(par->aperture.physical,
+ par->aperture.size,
i810_pci_list[entry->driver_data])) {
printk("i810fb_init: cannot request framebuffer region\n");
return -ENODEV;
@@ -1874,16 +1875,16 @@ static int i810_allocate_pci_resource(struct i810fb_par *par,
printk("i810fb_init: cannot remap framebuffer region\n");
return -ENODEV;
}
-
- if (!request_mem_region(par->mmio_start_phys,
- MMIO_SIZE,
+
+ if (!request_mem_region(par->mmio_start_phys,
+ MMIO_SIZE,
i810_pci_list[entry->driver_data])) {
printk("i810fb_init: cannot request mmio region\n");
return -ENODEV;
}
par->res_flags |= MMIO_REQ;
- par->mmio_start_virtual = ioremap(par->mmio_start_phys,
+ par->mmio_start_virtual = ioremap(par->mmio_start_phys,
MMIO_SIZE);
if (!par->mmio_start_virtual) {
printk("i810fb_init: cannot remap mmio region\n");
@@ -1963,7 +1964,7 @@ static int i810fb_setup(char *options)
if (!options || !*options)
return 0;
-
+
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "mtrr", 4))
mtrr = true;
@@ -1987,13 +1988,13 @@ static int i810fb_setup(char *options)
bpp = simple_strtoul(this_opt+4, NULL, 0);
else if (!strncmp(this_opt, "hsync1:", 7)) {
hsync1 = simple_strtoul(this_opt+7, &suffix, 0);
- if (strncmp(suffix, "H", 1))
+ if (strncmp(suffix, "H", 1))
hsync1 *= 1000;
} else if (!strncmp(this_opt, "hsync2:", 7)) {
hsync2 = simple_strtoul(this_opt+7, &suffix, 0);
- if (strncmp(suffix, "H", 1))
+ if (strncmp(suffix, "H", 1))
hsync2 *= 1000;
- } else if (!strncmp(this_opt, "vsync1:", 7))
+ } else if (!strncmp(this_opt, "vsync1:", 7))
vsync1 = simple_strtoul(this_opt+7, NULL, 0);
else if (!strncmp(this_opt, "vsync2:", 7))
vsync2 = simple_strtoul(this_opt+7, NULL, 0);
@@ -2016,6 +2017,10 @@ static int i810fb_init_pci(struct pci_dev *dev,
struct fb_videomode mode;
int err = -1, vfreq, hfreq, pixclock;
+ err = aperture_remove_conflicting_pci_devices(dev, "i810fb");
+ if (err)
+ return err;
+
info = framebuffer_alloc(sizeof(struct i810fb_par), &dev->dev);
if (!info)
return -ENOMEM;
@@ -2044,7 +2049,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
return err;
}
- i810_init_device(par);
+ i810_init_device(par);
info->screen_base = par->fb.virtual;
info->fbops = &par->i810fb_ops;
@@ -2064,21 +2069,21 @@ static int i810fb_init_pci(struct pci_dev *dev,
err = register_framebuffer(info);
if (err < 0) {
- i810fb_release_resource(info, par);
+ i810fb_release_resource(info, par);
printk("i810fb_init: cannot register framebuffer device\n");
- return err;
- }
+ return err;
+ }
pci_set_drvdata(dev, info);
pixclock = 1000000000/(info->var.pixclock);
pixclock *= 1000;
- hfreq = pixclock/(info->var.xres + info->var.left_margin +
+ hfreq = pixclock/(info->var.xres + info->var.left_margin +
info->var.hsync_len + info->var.right_margin);
vfreq = hfreq/(info->var.yres + info->var.upper_margin +
info->var.vsync_len + info->var.lower_margin);
printk("I810FB: fb%d : %s v%d.%d.%d%s\n"
- "I810FB: Video RAM : %dK\n"
+ "I810FB: Video RAM : %dK\n"
"I810FB: Monitor : H: %d-%d KHz V: %d-%d Hz\n"
"I810FB: Mode : %dx%d-%dbpp@%dHz\n",
info->node,
@@ -2086,7 +2091,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
VERSION_MAJOR, VERSION_MINOR, VERSION_TEENIE, BRANCH_VERSION,
(int) par->fb.size>>10, info->monspecs.hfmin/1000,
info->monspecs.hfmax/1000, info->monspecs.vfmin,
- info->monspecs.vfmax, info->var.xres,
+ info->monspecs.vfmax, info->var.xres,
info->var.yres, info->var.bits_per_pixel, vfreq);
return 0;
}
@@ -2095,7 +2100,7 @@ static int i810fb_init_pci(struct pci_dev *dev,
* De-initialization *
***************************************************************/
-static void i810fb_release_resource(struct fb_info *info,
+static void i810fb_release_resource(struct fb_info *info,
struct i810fb_par *par)
{
struct gtt_data *gtt = &par->i810_gtt;
@@ -2128,10 +2133,10 @@ static void i810fb_remove_pci(struct pci_dev *dev)
struct fb_info *info = pci_get_drvdata(dev);
struct i810fb_par *par = info->par;
- unregister_framebuffer(info);
+ unregister_framebuffer(info);
i810fb_release_resource(info, par);
printk("cleanup_module: unloaded i810 framebuffer device\n");
-}
+}
#ifndef MODULE
static int i810fb_init(void)
@@ -2144,7 +2149,7 @@ static int i810fb_init(void)
return pci_register_driver(&i810fb_driver);
}
-#endif
+#endif
/*********************************************************************
* Modularization *
@@ -2161,7 +2166,7 @@ static int i810fb_init(void)
}
module_param(vram, int, 0);
-MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB"
+MODULE_PARM_DESC(vram, "System RAM to allocate to framebuffer in MiB"
" (default=4)");
module_param(voffset, int, 0);
MODULE_PARM_DESC(voffset, "at what offset to place start of framebuffer "
@@ -2186,7 +2191,7 @@ module_param(vsync1, int, 0);
MODULE_PARM_DESC(vsync1, "Minimum vertical frequency of monitor in Hz"
" (default = 50)");
module_param(vsync2, int, 0);
-MODULE_PARM_DESC(vsync2, "Maximum vertical frequency of monitor in Hz"
+MODULE_PARM_DESC(vsync2, "Maximum vertical frequency of monitor in Hz"
" (default = 60)");
module_param(accel, bool, 0);
MODULE_PARM_DESC(accel, "Use Acceleration (BLIT) engine (default = 0)");
@@ -2208,7 +2213,7 @@ MODULE_PARM_DESC(mode_option, "Specify initial video mode");
MODULE_AUTHOR("Tony A. Daplas");
MODULE_DESCRIPTION("Framebuffer device for the Intel 810/815 and"
" compatible cards");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL");
static void __exit i810fb_exit(void)
{
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 16f272a50811..d7edb9c5d3a3 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -16,6 +16,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -86,7 +87,7 @@ enum {
SSTATUS = 36, /* 0x90 */
PRC = 37, /* 0x94 */
-#if 0
+#if 0
/* PCI Registers */
DVID = 0x00000000L,
SC = 0x00000004L,
@@ -103,8 +104,8 @@ enum {
PDATA = 0x04,
PPMASK = 0x08,
PADDRR = 0x0c,
- PIDXLO = 0x10,
- PIDXHI = 0x14,
+ PIDXLO = 0x10,
+ PIDXHI = 0x14,
PIDXDATA= 0x18,
PIDXCTL = 0x1c
};
@@ -131,7 +132,7 @@ enum {
SYSCLKC = 0x18, /* () System Clock C */
/*
* Dot clock rate is 20MHz * (m + 1) / ((n + 1) * (p ? 2 * p : 1)
- * c is charge pump bias which depends on the VCO frequency
+ * c is charge pump bias which depends on the VCO frequency
*/
PIXM0 = 0x20, /* () Pixel M 0 */
PIXN0 = 0x21, /* () Pixel N 0 */
@@ -320,7 +321,7 @@ struct imstt_par {
__u32 ramdac;
__u32 palette[16];
};
-
+
enum {
IBM = 0,
TVP = 1
@@ -373,7 +374,7 @@ static struct imstt_regvals tvp_reg_init_17 = {
static struct imstt_regvals tvp_reg_init_18 = {
1152,
- 0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000,
+ 0x0009, 0x0011, 0x059, 0x5b, 0x0003, 0x0031, 0x0397, 0x039a, 0x0000,
0xfd, 0x3a, 0xf1,
{ 0x39, 0x38, 0x38 }, { 0xf3, 0xf3, 0xf2 }
};
@@ -856,10 +857,10 @@ imsttfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
static int
-imsttfb_set_par(struct fb_info *info)
+imsttfb_set_par(struct fb_info *info)
{
struct imstt_par *par = info->par;
-
+
if (!compute_imstt_regvals(par, info->var.xres, info->var.yres))
return -EINVAL;
@@ -930,7 +931,7 @@ imsttfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static int
+static int
imsttfb_blank(int blank, struct fb_info *info)
{
struct imstt_par *par = info->par;
@@ -986,7 +987,7 @@ imsttfb_blank(int blank, struct fb_info *info)
static void
imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
+{
struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, bgc, dx, dy, width, height;
@@ -1192,7 +1193,7 @@ imstt_set_cursor(struct imstt_par *par, struct fb_image *d, int on)
}
}
-static int
+static int
imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct imstt_par *par = info->par;
@@ -1200,7 +1201,7 @@ imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
if (cursor->dest == NULL && cursor->rop == ROP_XOR)
return 1;
-
+
imstt_set_cursor(info, cursor, 0);
if (flags & FB_CUR_SETPOS) {
@@ -1469,8 +1470,13 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
- int ret = -ENOMEM;
-
+ int ret;
+
+ ret = aperture_remove_conflicting_pci_devices(pdev, "imsttfb");
+ if (ret)
+ return ret;
+ ret = -ENOMEM;
+
dp = pci_device_to_OF_node(pdev);
if(dp)
printk(KERN_INFO "%s: OF name %pOFn\n",__func__, dp);
@@ -1619,7 +1625,7 @@ static int __init imsttfb_init(void)
#endif
return pci_register_driver(&imsttfb_pci_driver);
}
-
+
static void __exit imsttfb_exit(void)
{
pci_unregister_driver(&imsttfb_pci_driver);
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index 5647fca8c49a..d4a2891a9a7a 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -107,6 +107,7 @@
* Add support for 945GME. (Phil Endecott <spam_from_intelfb@chezphil.org>)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -483,6 +484,10 @@ static int intelfb_pci_register(struct pci_dev *pdev,
DBG_MSG("intelfb_pci_register\n");
+ err = aperture_remove_conflicting_pci_devices(pdev, "intelfb");
+ if (err)
+ return err;
+
num_registered++;
if (num_registered != 1) {
ERR_MSG("Attempted to register %d devices "
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index d57772f96ad2..b4b93054c520 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -9,6 +9,7 @@
* for more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -676,6 +677,10 @@ static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long size;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "kyrofb");
+ if (err)
+ return err;
+
if ((err = pci_enable_device(pdev))) {
printk(KERN_WARNING "kyrofb: Can't enable pdev: %d\n", err);
return err;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 68bba2688f4c..775d34115e2d 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -100,6 +100,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/version.h>
#include "matroxfb_base.h"
@@ -2044,6 +2045,10 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
u_int32_t cmd;
DBG(__func__)
+ err = aperture_remove_conflicting_pci_devices(pdev, "matroxfb");
+ if (err)
+ return err;
+
svid = pdev->subsystem_vendor;
sid = pdev->subsystem_device;
for (b = dev_list; b->vendor; b++) {
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index 9a98c4a6ba33..f2e02958673d 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
@@ -1276,11 +1276,10 @@ ERROR0:;
return err;
}
-static int maven_remove(struct i2c_client *client)
+static void maven_remove(struct i2c_client *client)
{
maven_shutdown_client(client);
kfree(i2c_get_clientdata(client));
- return 0;
}
static const struct i2c_device_id maven_id[] = {
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index a7508f5be343..96800c9c9cd9 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -10,6 +10,7 @@
#undef DEBUG
+#include <linux/aperture.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
@@ -999,6 +1000,10 @@ static int mb862xx_pci_probe(struct pci_dev *pdev,
struct device *dev = &pdev->dev;
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "mb862xxfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret < 0) {
dev_err(dev, "Cannot enable PCI device\n");
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 28d32cbf496b..93a2d2d1abe8 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -23,9 +23,9 @@
*
* 0.3.3
* - Porting over to new fbdev api. (jsimmons)
- *
+ *
* 0.3.2
- * - got rid of all floating point (dok)
+ * - got rid of all floating point (dok)
*
* 0.3.1
* - added module license (dok)
@@ -54,6 +54,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1154,14 +1155,14 @@ static int neofb_set_par(struct fb_info *info)
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_accel_init(info, &info->var);
break;
default:
break;
- }
+ }
return 0;
}
@@ -1493,15 +1494,15 @@ neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_fillrect(info, rect);
break;
default:
cfb_fillrect(info, rect);
break;
- }
+ }
}
static void
@@ -1509,15 +1510,15 @@ neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_copyarea(info, area);
break;
default:
cfb_copyarea(info, area);
break;
- }
+ }
}
static void
@@ -1536,20 +1537,20 @@ neofb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
-static int
+static int
neofb_sync(struct fb_info *info)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
- case FB_ACCEL_NEOMAGIC_NM2230:
- case FB_ACCEL_NEOMAGIC_NM2360:
- case FB_ACCEL_NEOMAGIC_NM2380:
+ case FB_ACCEL_NEOMAGIC_NM2230:
+ case FB_ACCEL_NEOMAGIC_NM2360:
+ case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_sync(info);
break;
default:
break;
}
- return 0;
+ return 0;
}
/*
@@ -2029,6 +2030,10 @@ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
DBG("neofb_probe");
+ err = aperture_remove_conflicting_pci_devices(dev, "neofb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index a372a183c1f0..329e2e8133c6 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1276,11 +1277,15 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct nvidia_par *par;
struct fb_info *info;
unsigned short cmd;
-
+ int ret;
NVTRACE_ENTER();
assert(pd != NULL);
+ ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
if (!info)
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 8fd79deb1e2a..7da715d31a93 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -27,6 +27,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -1521,6 +1522,10 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int err;
int retval = -ENXIO;
+ err = aperture_remove_conflicting_pci_devices(pdev, "pm2fb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
printk(KERN_WARNING "pm2fb: Can't enable pdev: %d\n", err);
diff --git a/drivers/video/fbdev/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index a8faf46adeb1..ba69846d444f 100644
--- a/drivers/video/fbdev/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1315,6 +1316,10 @@ static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
int err;
int retval = -ENXIO;
+ err = aperture_remove_conflicting_pci_devices(dev, "pm3fb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err) {
printk(KERN_WARNING "pm3fb: Can't enable PCI dev: %d\n", err);
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index f4add36cb5f4..b73ad14efa20 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -45,6 +45,7 @@
#undef DEBUG
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -942,6 +943,10 @@ static int pvr2fb_pci_probe(struct pci_dev *pdev,
{
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "pvrfb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret) {
printk(KERN_ERR "pvr2fb: PCI enable failed\n");
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 84d5e23ad7d3..0ea74e28f915 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -29,6 +29,7 @@
* doublescan modes are broken
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -474,7 +475,7 @@ static inline void reverse_order(u32 *l)
* DESCRIPTiON:
* Loads cursor image based on a monochrome source and mask bitmap. The
* image bits determines the color of the pixel, 0 for background, 1 for
- * foreground. Only the affected region (as determined by @w and @h
+ * foreground. Only the affected region (as determined by @w and @h
* parameters) will be updated.
*
* CALLED FROM:
@@ -494,7 +495,7 @@ static void rivafb_load_cursor_image(struct riva_par *par, u8 *data8,
for (i = 0; i < h; i++) {
b = *data++;
reverse_order(&b);
-
+
for (j = 0; j < w/2; j++) {
tmp = 0;
#if defined (__BIG_ENDIAN)
@@ -562,7 +563,7 @@ static void riva_rclut(RIVA_HW_INST *chip,
unsigned char regnum, unsigned char *red,
unsigned char *green, unsigned char *blue)
{
-
+
VGA_WR08(chip->PDIO, 0x3c7, regnum);
*red = VGA_RD08(chip->PDIO, 0x3c9);
*green = VGA_RD08(chip->PDIO, 0x3c9);
@@ -673,7 +674,7 @@ static int riva_load_video_mode(struct fb_info *info)
int rc;
struct riva_par *par = info->par;
struct riva_regs newmode;
-
+
NVTRACE_ENTER();
/* time to calculate */
rivafb_blank(FB_BLANK_NORMAL, info);
@@ -717,7 +718,7 @@ static int riva_load_video_mode(struct fb_info *info)
hBlankEnd = hTotal + 4;
}
- newmode.crtc[0x0] = Set8Bits (hTotal);
+ newmode.crtc[0x0] = Set8Bits (hTotal);
newmode.crtc[0x1] = Set8Bits (hDisplay);
newmode.crtc[0x2] = Set8Bits (hBlankStart);
newmode.crtc[0x3] = SetBitField (hBlankEnd, 4: 0, 4:0) | SetBit (7);
@@ -748,20 +749,20 @@ static int riva_load_video_mode(struct fb_info *info)
| SetBitField(vStart,10:10,2:2)
| SetBitField(vDisplay,10:10,1:1)
| SetBitField(vTotal,10:10,0:0);
- newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
+ newmode.ext.horiz = SetBitField(hTotal,8:8,0:0)
| SetBitField(hDisplay,8:8,1:1)
| SetBitField(hBlankStart,8:8,2:2)
| SetBitField(hStart,8:8,3:3);
newmode.ext.extra = SetBitField(vTotal,11:11,0:0)
| SetBitField(vDisplay,11:11,2:2)
| SetBitField(vStart,11:11,4:4)
- | SetBitField(vBlankStart,11:11,6:6);
+ | SetBitField(vBlankStart,11:11,6:6);
if ((info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
int tmp = (hTotal >> 1) & ~1;
newmode.ext.interlace = Set8Bits(tmp);
newmode.ext.horiz |= SetBitField(tmp, 8:8,4:4);
- } else
+ } else
newmode.ext.interlace = 0xff; /* interlace off */
if (par->riva.Architecture >= NV_ARCH_10)
@@ -774,7 +775,7 @@ static int riva_load_video_mode(struct fb_info *info)
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
newmode.misc_output &= ~0x80;
else
- newmode.misc_output |= 0x80;
+ newmode.misc_output |= 0x80;
rc = CalcStateExt(&par->riva, &newmode.ext, par->pdev, bpp, width,
hDisplaySize, height, dotClock);
@@ -841,7 +842,7 @@ static void riva_update_var(struct fb_var_screeninfo *var,
}
/**
- * rivafb_do_maximize -
+ * rivafb_do_maximize -
* @info: pointer to fb_info object containing info for current riva board
* @var: standard kernel fb changeable data
* @nom: nom
@@ -852,7 +853,7 @@ static void riva_update_var(struct fb_var_screeninfo *var,
*
* RETURNS:
* -EINVAL on failure, 0 on success
- *
+ *
*
* CALLED FROM:
* rivafb_check_var()
@@ -916,14 +917,14 @@ static int rivafb_do_maximize(struct fb_info *info,
return -EINVAL;
}
}
-
+
if (var->xres_virtual * nom / den >= 8192) {
printk(KERN_WARNING PFX
"virtual X resolution (%d) is too high, lowering to %d\n",
var->xres_virtual, 8192 * den / nom - 16);
var->xres_virtual = 8192 * den / nom - 16;
}
-
+
if (var->xres_virtual < var->xres) {
printk(KERN_ERR PFX
"virtual X resolution (%d) is smaller than real\n", var->xres_virtual);
@@ -1010,7 +1011,7 @@ static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
break;
case 6:
rc = 64; /* 64 entries (2^6), 16 bpp, RGB565 */
- break;
+ break;
default:
/* should not occur */
break;
@@ -1042,7 +1043,7 @@ static int rivafb_open(struct fb_info *info, int user)
/* vgaHWunlock() + riva unlock (0x7F) */
CRTCout(par, 0x11, 0xFF);
par->riva.LockUnlock(&par->riva, 0);
-
+
riva_save_state(par, &par->initial_state);
}
par->ref_count++;
@@ -1082,7 +1083,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
struct riva_par *par = info->par;
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
-
+
NVTRACE_ENTER();
if (!var->pixclock)
return -EINVAL;
@@ -1176,7 +1177,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
- var->red.msb_right =
+ var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset = var->transp.length = var->transp.msb_right = 0;
@@ -1198,7 +1199,7 @@ static int rivafb_set_par(struct fb_info *info)
goto out;
if(!(info->flags & FBINFO_HWACCEL_DISABLED))
riva_setup_accel(info);
-
+
par->cursor_reset = 1;
info->fix.line_length = (info->var.xres_virtual * (info->var.bits_per_pixel >> 3));
info->fix.visual = (info->var.bits_per_pixel == 8) ?
@@ -1486,7 +1487,7 @@ static inline void convert_bgcolor_16(u32 *col)
* CALLED FROM:
* framebuffer hook
*/
-static void rivafb_imageblit(struct fb_info *info,
+static void rivafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct riva_par *par = info->par;
@@ -1515,7 +1516,7 @@ static void rivafb_imageblit(struct fb_info *info,
bgx = par->palette[image->bg_color];
}
if (info->var.green.length == 6)
- convert_bgcolor_16(&bgx);
+ convert_bgcolor_16(&bgx);
break;
}
@@ -1612,7 +1613,7 @@ static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
u8 *dat = (u8 *) cursor->image.data;
u8 *msk = (u8 *) cursor->mask;
u8 *src;
-
+
src = kmalloc_array(s_pitch, cursor->image.height, GFP_ATOMIC);
if (src) {
@@ -1683,7 +1684,7 @@ static const struct fb_ops riva_fb_ops = {
.fb_fillrect = rivafb_fillrect,
.fb_copyarea = rivafb_copyarea,
.fb_imageblit = rivafb_imageblit,
- .fb_cursor = rivafb_cursor,
+ .fb_cursor = rivafb_cursor,
.fb_sync = rivafb_sync,
};
@@ -1713,7 +1714,7 @@ static int riva_set_fbinfo(struct fb_info *info)
info->pseudo_palette = par->pseudo_palette;
cmap_len = riva_get_cmap_len(&info->var);
- fb_alloc_cmap(&info->cmap, cmap_len, 0);
+ fb_alloc_cmap(&info->cmap, cmap_len, 0);
info->pixmap.size = 8 * 1024;
info->pixmap.buf_align = 4;
@@ -1898,6 +1899,10 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
NVTRACE_ENTER();
assert(pd != NULL);
+ ret = aperture_remove_conflicting_pci_devices(pd, "rivafb");
+ if (ret)
+ return ret;
+
info = framebuffer_alloc(sizeof(struct riva_par), &pd->dev);
if (!info) {
ret = -ENOMEM;
@@ -1929,7 +1934,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
default_par->Chipset = (pd->vendor << 16) | pd->device;
printk(KERN_INFO PFX "nVidia device/chipset %X\n",default_par->Chipset);
-
+
if(default_par->riva.Architecture == 0) {
printk(KERN_ERR PFX "unknown NV_ARCH\n");
ret=-ENODEV;
@@ -1947,7 +1952,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
default_par->forceCRTC = forceCRTC;
-
+
rivafb_fix.mmio_len = pci_resource_len(pd, 0);
rivafb_fix.smem_len = pci_resource_len(pd, 1);
@@ -1959,7 +1964,7 @@ static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pd, PCI_COMMAND, cmd);
}
-
+
rivafb_fix.mmio_start = pci_resource_start(pd, 0);
rivafb_fix.smem_start = pci_resource_start(pd, 1);
@@ -2058,7 +2063,7 @@ err_iounmap_screen_base:
#endif
iounmap(info->screen_base);
err_iounmap_pramin:
- if (default_par->riva.Architecture == NV_ARCH_03)
+ if (default_par->riva.Architecture == NV_ARCH_03)
iounmap(default_par->riva.PRAMIN);
err_iounmap_ctrl_base:
iounmap(default_par->ctrl_base);
@@ -2077,7 +2082,7 @@ static void rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
struct riva_par *par = info->par;
-
+
NVTRACE_ENTER();
#ifdef CONFIG_FB_RIVA_I2C
@@ -2117,11 +2122,11 @@ static int rivafb_setup(char *options)
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "forceCRTC", 9)) {
char *p;
-
+
p = this_opt + 9;
- if (!*p || !*(++p)) continue;
+ if (!*p || !*(++p)) continue;
forceCRTC = *p - '0';
- if (forceCRTC < 0 || forceCRTC > 1)
+ if (forceCRTC < 0 || forceCRTC > 1)
forceCRTC = -1;
} else if (!strncmp(this_opt, "flatpanel", 9)) {
flatpanel = 1;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 67b63a753cb2..7713274bd04c 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -11,6 +11,7 @@
* which is based on the code of neofb.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1131,6 +1132,10 @@ static int s3_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ rc = aperture_remove_conflicting_pci_devices(dev, "s3fb");
+ if (rc)
+ return rc;
+
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct s3fb_info), &(dev->dev));
if (!info)
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 8114c921ceb8..b7818b652698 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -41,6 +41,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -2176,6 +2177,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
DBG("savagefb_probe");
+ err = aperture_remove_conflicting_pci_devices(dev, "savagefb");
+ if (err)
+ return err;
+
info = framebuffer_alloc(sizeof(struct savagefb_par), &dev->dev);
if (!info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index c9e77429dfa3..1c197c3f9538 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -19,6 +19,7 @@
* which is (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -5861,6 +5862,10 @@ static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if(sisfb_off)
return -ENXIO;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "sisfb");
+ if (ret)
+ return ret;
+
sis_fb_info = framebuffer_alloc(sizeof(*ivideo), &pdev->dev);
if(!sis_fb_info)
return -ENOMEM;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 8ab9a3fbd281..a10f1057293b 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -10,38 +10,39 @@
* The primary goal is to remove the console code from fbdev and place it
* into fbcon.c. This reduces the code and makes writing a new fbdev driver
* easy since the author doesn't need to worry about console internals. It
- * also allows the ability to run fbdev without a console/tty system on top
- * of it.
+ * also allows the ability to run fbdev without a console/tty system on top
+ * of it.
*
* First the roles of struct fb_info and struct display have changed. Struct
* display will go away. The way the new framebuffer console code will
- * work is that it will act to translate data about the tty/console in
+ * work is that it will act to translate data about the tty/console in
* struct vc_data to data in a device independent way in struct fb_info. Then
- * various functions in struct fb_ops will be called to store the device
- * dependent state in the par field in struct fb_info and to change the
+ * various functions in struct fb_ops will be called to store the device
+ * dependent state in the par field in struct fb_info and to change the
* hardware to that state. This allows a very clean separation of the fbdev
* layer from the console layer. It also allows one to use fbdev on its own
- * which is a bounus for embedded devices. The reason this approach works is
+ * which is a bounus for embedded devices. The reason this approach works is
* for each framebuffer device when used as a tty/console device is allocated
- * a set of virtual terminals to it. Only one virtual terminal can be active
- * per framebuffer device. We already have all the data we need in struct
+ * a set of virtual terminals to it. Only one virtual terminal can be active
+ * per framebuffer device. We already have all the data we need in struct
* vc_data so why store a bunch of colormaps and other fbdev specific data
- * per virtual terminal.
+ * per virtual terminal.
*
* As you can see doing this makes the con parameter pretty much useless
- * for struct fb_ops functions, as it should be. Also having struct
- * fb_var_screeninfo and other data in fb_info pretty much eliminates the
+ * for struct fb_ops functions, as it should be. Also having struct
+ * fb_var_screeninfo and other data in fb_info pretty much eliminates the
* need for get_fix and get_var. Once all drivers use the fix, var, and cmap
* fbcon can be written around these fields. This will also eliminate the
* need to regenerate struct fb_var_screeninfo, struct fb_fix_screeninfo
* struct fb_cmap every time get_var, get_fix, get_cmap functions are called
- * as many drivers do now.
+ * as many drivers do now.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -66,68 +67,68 @@
static char *mode_option;
/*
- * If your driver supports multiple boards, you should make the
- * below data types arrays, or allocate them dynamically (using kmalloc()).
- */
+ * If your driver supports multiple boards, you should make the
+ * below data types arrays, or allocate them dynamically (using kmalloc()).
+ */
-/*
+/*
* This structure defines the hardware state of the graphics card. Normally
* you place this in a header file in linux/include/video. This file usually
* also includes register information. That allows other driver subsystems
- * and userland applications the ability to use the same header file to
- * avoid duplicate work and easy porting of software.
+ * and userland applications the ability to use the same header file to
+ * avoid duplicate work and easy porting of software.
*/
struct xxx_par;
/*
* Here we define the default structs fb_fix_screeninfo and fb_var_screeninfo
* if we don't use modedb. If we do use modedb see xxxfb_init how to use it
- * to get a fb_var_screeninfo. Otherwise define a default var as well.
+ * to get a fb_var_screeninfo. Otherwise define a default var as well.
*/
static const struct fb_fix_screeninfo xxxfb_fix = {
- .id = "FB's name",
+ .id = "FB's name",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 1,
.ypanstep = 1,
- .ywrapstep = 1,
+ .ywrapstep = 1,
.accel = FB_ACCEL_NONE,
};
/*
- * Modern graphical hardware not only supports pipelines but some
+ * Modern graphical hardware not only supports pipelines but some
* also support multiple monitors where each display can have
- * its own unique data. In this case each display could be
- * represented by a separate framebuffer device thus a separate
+ * its own unique data. In this case each display could be
+ * represented by a separate framebuffer device thus a separate
* struct fb_info. Now the struct xxx_par represents the graphics
- * hardware state thus only one exist per card. In this case the
- * struct xxx_par for each graphics card would be shared between
- * every struct fb_info that represents a framebuffer on that card.
- * This allows when one display changes it video resolution (info->var)
+ * hardware state thus only one exist per card. In this case the
+ * struct xxx_par for each graphics card would be shared between
+ * every struct fb_info that represents a framebuffer on that card.
+ * This allows when one display changes it video resolution (info->var)
* the other displays know instantly. Each display can always be
* aware of the entire hardware state that affects it because they share
* the same xxx_par struct. The other side of the coin is multiple
* graphics cards that pass data around until it is finally displayed
* on one monitor. Such examples are the voodoo 1 cards and high end
* NUMA graphics servers. For this case we have a bunch of pars, each
- * one that represents a graphics state, that belong to one struct
+ * one that represents a graphics state, that belong to one struct
* fb_info. Their you would want to have *par point to a array of device
- * states and have each struct fb_ops function deal with all those
+ * states and have each struct fb_ops function deal with all those
* states. I hope this covers every possible hardware design. If not
- * feel free to send your ideas at jsimmons@users.sf.net
+ * feel free to send your ideas at jsimmons@users.sf.net
*/
/*
- * If your driver supports multiple boards or it supports multiple
- * framebuffers, you should make these arrays, or allocate them
+ * If your driver supports multiple boards or it supports multiple
+ * framebuffers, you should make these arrays, or allocate them
* dynamically using framebuffer_alloc() and free them with
* framebuffer_release().
- */
+ */
static struct fb_info info;
- /*
+ /*
* Each one represents the state of the hardware. Most hardware have
- * just one hardware state. These here represent the default state(s).
+ * just one hardware state. These here represent the default state(s).
*/
static struct xxx_par __initdata current_par;
@@ -136,12 +137,12 @@ static struct xxx_par __initdata current_par;
* first accessed.
* @info: frame buffer structure that represents a single frame buffer
* @user: tell us if the userland (value=1) or the console is accessing
- * the framebuffer.
+ * the framebuffer.
*
* This function is the first function called in the framebuffer api.
- * Usually you don't need to provide this function. The case where it
+ * Usually you don't need to provide this function. The case where it
* is used is to change from a text mode hardware state to a graphics
- * mode state.
+ * mode state.
*
* Returns negative errno on error, or zero on success.
*/
@@ -151,13 +152,13 @@ static int xxxfb_open(struct fb_info *info, int user)
}
/**
- * xxxfb_release - Optional function. Called when the framebuffer
- * device is closed.
+ * xxxfb_release - Optional function. Called when the framebuffer
+ * device is closed.
* @info: frame buffer structure that represents a single frame buffer
* @user: tell us if the userland (value=1) or the console is accessing
- * the framebuffer.
- *
- * Thus function is called when we close /dev/fb or the framebuffer
+ * the framebuffer.
+ *
+ * Thus function is called when we close /dev/fb or the framebuffer
* console system is released. Usually you don't need this function.
* The case where it is usually used is to go from a graphics state
* to a text mode state.
@@ -170,17 +171,17 @@ static int xxxfb_release(struct fb_info *info, int user)
}
/**
- * xxxfb_check_var - Optional function. Validates a var passed in.
+ * xxxfb_check_var - Optional function. Validates a var passed in.
* @var: frame buffer variable screen structure
- * @info: frame buffer structure that represents a single frame buffer
+ * @info: frame buffer structure that represents a single frame buffer
*
* Checks to see if the hardware supports the state requested by
- * var passed in. This function does not alter the hardware state!!!
- * This means the data stored in struct fb_info and struct xxx_par do
- * not change. This includes the var inside of struct fb_info.
+ * var passed in. This function does not alter the hardware state!!!
+ * This means the data stored in struct fb_info and struct xxx_par do
+ * not change. This includes the var inside of struct fb_info.
* Do NOT change these. This function can be called on its own if we
- * intent to only test a mode and not actually set it. The stuff in
- * modedb.c is a example of this. If the var passed in is slightly
+ * intent to only test a mode and not actually set it. The stuff in
+ * modedb.c is a example of this. If the var passed in is slightly
* off by what the hardware can support then we alter the var PASSED in
* to what we can do.
*
@@ -208,7 +209,7 @@ static int xxxfb_release(struct fb_info *info, int user)
static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
/* ... */
- return 0;
+ return 0;
}
/**
@@ -217,9 +218,9 @@ static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
*
* Using the fb_var_screeninfo in fb_info we set the resolution of the
* this particular framebuffer. This function alters the par AND the
- * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in
+ * fb_fix_screeninfo stored in fb_info. It doesn't not alter var in
* fb_info since we are using that data. This means we depend on the
- * data in var inside fb_info to be supported by the hardware.
+ * data in var inside fb_info to be supported by the hardware.
*
* This function is also used to recover/restore the hardware to a
* known working state.
@@ -254,20 +255,20 @@ static int xxxfb_set_par(struct fb_info *info)
{
struct xxx_par *par = info->par;
/* ... */
- return 0;
+ return 0;
}
/**
* xxxfb_setcolreg - Optional function. Sets a color register.
- * @regno: Which register in the CLUT we are programming
- * @red: The red value which can be up to 16 bits wide
- * @green: The green value which can be up to 16 bits wide
+ * @regno: Which register in the CLUT we are programming
+ * @red: The red value which can be up to 16 bits wide
+ * @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported, the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
- *
+ *
* Set a single color register. The values supplied have a 16 bit
- * magnitude which needs to be scaled in this function for the hardware.
+ * magnitude which needs to be scaled in this function for the hardware.
* Things to take into consideration are how many color registers, if
* any, are supported with the current color visual. With truecolor mode
* no color palettes are supported. Here a pseudo palette is created
@@ -275,8 +276,8 @@ static int xxxfb_set_par(struct fb_info *info)
* pseudocolor mode we have a limited color palette. To deal with this
* we can program what color is displayed for a particular pixel value.
* DirectColor is similar in that we can program each color field. If
- * we have a static colormap we don't need to implement this function.
- *
+ * we have a static colormap we don't need to implement this function.
+ *
* Returns negative errno on error, or zero on success.
*/
static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -440,7 +441,7 @@ static int xxxfb_pan_display(struct fb_var_screeninfo *var,
/**
* xxxfb_blank - NOT a required function. Blanks the display.
- * @blank_mode: the blank mode we want.
+ * @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*
* Blank the screen if blank_mode != FB_BLANK_UNBLANK, else unblank.
@@ -469,22 +470,22 @@ static int xxxfb_blank(int blank_mode, struct fb_info *info)
/*
* We provide our own functions if we have hardware acceleration
- * or non packed pixel format layouts. If we have no hardware
+ * or non packed pixel format layouts. If we have no hardware
* acceleration, we can use a generic unaccelerated function. If using
- * a pack pixel format just use the functions in cfb_*.c. Each file
+ * a pack pixel format just use the functions in cfb_*.c. Each file
* has one of the three different accel functions we support.
*/
/**
- * xxxfb_fillrect - REQUIRED function. Can use generic routines if
+ * xxxfb_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Draws a rectangle on the screen.
+ * Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
- * @region: The structure representing the rectangular region we
+ * @region: The structure representing the rectangular region we
* wish to draw to.
*
- * This drawing operation places/removes a retangle on the screen
+ * This drawing operation places/removes a retangle on the screen
* depending on the rastering operation with the value of color which
* is in the current color depth format.
*/
@@ -492,13 +493,13 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
{
/* Meaning of struct fb_fillrect
*
- * @dx: The x and y corrdinates of the upper left hand corner of the
- * @dy: area we want to draw to.
+ * @dx: The x and y corrdinates of the upper left hand corner of the
+ * @dy: area we want to draw to.
* @width: How wide the rectangle is we want to draw.
* @height: How tall the rectangle is we want to draw.
- * @color: The color to fill in the rectangle with.
+ * @color: The color to fill in the rectangle with.
* @rop: The raster operation. We can draw the rectangle with a COPY
- * of XOR which provides erasing effect.
+ * of XOR which provides erasing effect.
*/
}
@@ -514,7 +515,7 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
* This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
-void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
/*
* @dx: The x and y coordinates of the upper left hand corner of the
@@ -530,28 +531,28 @@ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
/**
* xxxfb_imageblit - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Copies a image from system memory to the screen.
+ * Copies a image from system memory to the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @image: structure defining the image.
*
- * This drawing operation draws a image on the screen. It can be a
+ * This drawing operation draws a image on the screen. It can be a
* mono image (needed for font handling) or a color image (needed for
- * tux).
+ * tux).
*/
-void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
+void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
{
/*
* @dx: The x and y coordinates of the upper left hand corner of the
* @dy: destination area to place the image on the screen.
* @width: How wide the image is we want to copy.
* @height: How tall the image is we want to copy.
- * @fg_color: For mono bitmap images this is color data for
+ * @fg_color: For mono bitmap images this is color data for
* @bg_color: the foreground and background of the image to
* write directly to the frmaebuffer.
* @depth: How many bits represent a single pixel for this image.
* @data: The actual data used to construct the image on the display.
- * @cmap: The colormap used for color images.
+ * @cmap: The colormap used for color images.
*/
/*
@@ -578,13 +579,13 @@ void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
/*
- * @set: Which fields we are altering in struct fb_cursor
- * @enable: Disable or enable the cursor
- * @rop: The bit operation we want to do.
- * @mask: This is the cursor mask bitmap.
+ * @set: Which fields we are altering in struct fb_cursor
+ * @enable: Disable or enable the cursor
+ * @rop: The bit operation we want to do.
+ * @mask: This is the cursor mask bitmap.
* @dest: A image of the area we are going to display the cursor.
- * Used internally by the driver.
- * @hot: The hot spot.
+ * Used internally by the driver.
+ * @hot: The hot spot.
* @image: The actual data for the cursor image.
*
* NOTES ON FLAGS (cursor->set):
@@ -612,11 +613,11 @@ int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
/**
- * xxxfb_sync - NOT a required function. Normally the accel engine
+ * xxxfb_sync - NOT a required function. Normally the accel engine
* for a graphics card take a specific amount of time.
* Often we have to wait for the accelerator to finish
* its operation before we can write to the framebuffer
- * so we can have consistent display output.
+ * so we can have consistent display output.
*
* @info: frame buffer structure that represents a single frame buffer
*
@@ -664,8 +665,15 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
struct fb_info *info;
struct xxx_par *par;
struct device *device = &dev->dev; /* or &pdev->dev */
- int cmap_len, retval;
-
+ int cmap_len, retval;
+
+ /*
+ * Remove firmware-based drivers that create resource conflicts.
+ */
+ retval = aperture_remove_conflicting_pci_devices(pdev, "xxxfb");
+ if (retval)
+ return retval;
+
/*
* Dynamically allocate info and par
*/
@@ -677,11 +685,11 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
par = info->par;
- /*
+ /*
* Here we set the screen_base to the virtual memory address
* for the framebuffer. Usually we obtain the resource address
* from the bus layer and then translate it to virtual memory
- * space via ioremap. Consult ioport.h.
+ * space via ioremap. Consult ioport.h.
*/
info->screen_base = framebuffer_virtual_memory;
info->fbops = &xxxfb_ops;
@@ -765,24 +773,24 @@ static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent)
/*
* This should give a reasonable default video mode. The following is
- * done when we can set a video mode.
+ * done when we can set a video mode.
*/
if (!mode_option)
- mode_option = "640x480@60";
+ mode_option = "640x480@60";
retval = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
-
+
if (!retval || retval == 4)
- return -EINVAL;
+ return -EINVAL;
/* This has to be done! */
if (fb_alloc_cmap(&info->cmap, cmap_len, 0))
return -ENOMEM;
-
- /*
- * The following is done in the case of having hardware with a static
- * mode. If we are setting the mode ourselves we don't call this.
- */
+
+ /*
+ * The following is done in the case of having hardware with a static
+ * mode. If we are setting the mode ourselves we don't call this.
+ */
info->var = xxxfb_var;
/*
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
index 092a1caa1208..3baf33635e65 100644
--- a/drivers/video/fbdev/sm712fb.c
+++ b/drivers/video/fbdev/sm712fb.c
@@ -18,6 +18,7 @@
* Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
*/
+#include <linux/aperture.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -1502,6 +1503,10 @@ static int smtcfb_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Silicon Motion display driver.\n");
+ err = aperture_remove_conflicting_pci_devices(pdev, "smtcfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev); /* enable SMTC chip */
if (err)
return err;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 52e4ed9da78c..5c891aa00d59 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -817,7 +817,7 @@ fb_alloc_error:
return ret;
}
-static int ssd1307fb_remove(struct i2c_client *client)
+static void ssd1307fb_remove(struct i2c_client *client)
{
struct fb_info *info = i2c_get_clientdata(client);
struct ssd1307fb_par *par = info->par;
@@ -836,8 +836,6 @@ static int ssd1307fb_remove(struct i2c_client *client)
fb_deferred_io_cleanup(info);
__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
framebuffer_release(info);
-
- return 0;
}
static const struct i2c_device_id ssd1307fb_i2c_id[] = {
diff --git a/drivers/video/fbdev/sstfb.c b/drivers/video/fbdev/sstfb.c
index cd4d640f9477..a56b24288566 100644
--- a/drivers/video/fbdev/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
@@ -80,6 +80,7 @@
* Includes
*/
+#include <linux/aperture.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -364,7 +365,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
var->pixclock = KHZ2PICOS(freq);
-
+
if (var->vmode & FB_VMODE_INTERLACED)
vBackPorch += (vBackPorch % 2);
if (var->vmode & FB_VMODE_DOUBLE) {
@@ -382,7 +383,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
printk(KERN_ERR "sstfb: Unsupported bpp %d\n", var->bits_per_pixel);
return -EINVAL;
}
-
+
/* validity tests */
if (var->xres <= 1 || yDim <= 0 || var->hsync_len <= 1 ||
hSyncOff <= 1 || var->left_margin <= 2 || vSyncOn <= 0 ||
@@ -392,7 +393,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
if (IS_VOODOO2(par)) {
/* Voodoo 2 limits */
- tiles_in_X = (var->xres + 63 ) / 64 * 2;
+ tiles_in_X = (var->xres + 63 ) / 64 * 2;
if (var->xres > POW2(11) || yDim >= POW2(11)) {
printk(KERN_ERR "sstfb: Unsupported resolution %dx%d\n",
@@ -631,7 +632,7 @@ static int sstfb_set_par(struct fb_info *info)
lfbmode |= ( LFB_WORD_SWIZZLE_WR | LFB_BYTE_SWIZZLE_WR |
LFB_WORD_SWIZZLE_RD | LFB_BYTE_SWIZZLE_RD );
#endif
-
+
if (clipping) {
sst_write(LFBMODE, lfbmode | EN_PXL_PIPELINE);
/*
@@ -684,7 +685,7 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
| (green << info->var.green.offset)
| (blue << info->var.blue.offset)
| (transp << info->var.transp.offset);
-
+
par->palette[regno] = col;
return 0;
@@ -773,7 +774,7 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
-
+
if (!IS_VOODOO2(par))
return;
@@ -795,17 +796,17 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
* FillRect 2D command (solidfill or invert (via ROP_XOR)) - Voodoo2 only
*/
#if 0
-static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
if (!IS_VOODOO2(par))
return;
-
+
sst_write(BLTCLIPX, info->var.xres);
sst_write(BLTCLIPY, info->var.yres);
-
+
sst_write(BLTDSTBASEADDR, 0);
sst_write(BLTCOLOR, rect->color);
sst_write(BLTROP, rect->rop == ROP_COPY ? BLTROP_COPY : BLTROP_XOR);
@@ -820,8 +821,8 @@ static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-/*
- * get lfb size
+/*
+ * get lfb size
*/
static int sst_get_memsize(struct fb_info *info, __u32 *memsize)
{
@@ -859,8 +860,8 @@ static int sst_get_memsize(struct fb_info *info, __u32 *memsize)
}
-/*
- * DAC detection routines
+/*
+ * DAC detection routines
*/
/* fbi should be idle, and fifo emty and mem disabled */
@@ -963,7 +964,7 @@ static int sst_detect_ics(struct fb_info *info)
* see detect_dac
*/
-static int sst_set_pll_att_ti(struct fb_info *info,
+static int sst_set_pll_att_ti(struct fb_info *info,
const struct pll_timing *t, const int clock)
{
struct sstfb_par *par = info->par;
@@ -1326,6 +1327,10 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct sst_spec *spec;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "sstfb");
+ if (err)
+ return err;
+
/* Enable device in PCI config. */
if ((err=pci_enable_device(pdev))) {
printk(KERN_ERR "cannot enable device\n");
@@ -1338,10 +1343,10 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
pci_set_drvdata(pdev, info);
-
+
par = info->par;
fix = &info->fix;
-
+
par->type = id->driver_data;
spec = &voodoo_spec[par->type];
f_ddprintk("found device : %s\n", spec->name);
@@ -1407,7 +1412,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* fact dithered to 16bit).
*/
fix->line_length = 2048; /* default value, for 24 or 32bit: 4096 */
-
+
fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 16);
if (sstfb_check_var(&info->var, info)) {
@@ -1419,7 +1424,7 @@ static int sstfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(KERN_ERR "sstfb: can't set default video mode.\n");
goto fail;
}
-
+
if (fb_alloc_cmap(&info->cmap, 256, 0)) {
printk(KERN_ERR "sstfb: can't alloc cmap memory.\n");
goto fail;
@@ -1465,7 +1470,7 @@ static void sstfb_remove(struct pci_dev *pdev)
info = pci_get_drvdata(pdev);
par = info->par;
-
+
device_remove_file(info->dev, &device_attrs[0]);
sst_shutdown(info);
iounmap(info->screen_base);
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 1279b02234f8..f4059529c602 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -123,6 +124,10 @@ static int s3d_pci_register(struct pci_dev *pdev,
struct s3d_info *sp;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "s3dfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "s3d: Cannot enable PCI device %s\n",
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index f7b463633ba0..b0c8cf0c535a 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -249,6 +250,10 @@ static int e3d_pci_register(struct pci_dev *pdev,
unsigned int line_length;
int err;
+ err = aperture_remove_conflicting_pci_devices(pdev, "e3dfb");
+ if (err)
+ return err;
+
of_node = pci_device_to_OF_node(pdev);
if (!of_node) {
printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
diff --git a/drivers/video/fbdev/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index 8a8122f8bfeb..592a913d0718 100644
--- a/drivers/video/fbdev/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
@@ -64,6 +64,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1376,6 +1377,10 @@ static int tdfxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct fb_monspecs *specs;
bool found;
+ err = aperture_remove_conflicting_pci_devices(pdev, "tdfxfb");
+ if (err)
+ return err;
+
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "tdfxfb: Can't enable pdev: %d\n", err);
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index 1fff5fd7ab51..251dbd282f5e 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -12,6 +12,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/bitrev.h>
#include <linux/compiler.h>
#include <linux/delay.h>
@@ -106,6 +107,12 @@ static struct pci_driver tgafb_pci_driver = {
static int tgafb_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ int ret;
+
+ ret = aperture_remove_conflicting_pci_devices(pdev, "tgafb");
+ if (ret)
+ return ret;
+
return tgafb_register(&pdev->dev);
}
@@ -729,7 +736,7 @@ tgafb_mono_imageblit(struct fb_info *info, const struct fb_image *image)
/* Handle another common case in which accel_putcs
generates a large bitmap, which happens to be aligned.
- Allow the tail to be misaligned. This case is
+ Allow the tail to be misaligned. This case is
interesting because we've not got to hold partial
bytes across the words being written. */
@@ -908,9 +915,9 @@ tgafb_imageblit(struct fb_info *info, const struct fb_image *image)
}
/**
- * tgafb_fillrect - REQUIRED function. Can use generic routines if
+ * tgafb_fillrect - REQUIRED function. Can use generic routines if
* non acclerated hardware and packed pixel based.
- * Draws a rectangle on the screen.
+ * Draws a rectangle on the screen.
*
* @info: frame buffer structure that represents a single frame buffer
* @rect: structure defining the rectagle and operation.
@@ -1044,7 +1051,7 @@ tgafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
/* Handle the special case of copying entire lines, e.g. during scrolling.
We can avoid a lot of needless computation in this case. In the 8bpp
- case we need to use the COPY64 registers instead of mask writes into
+ case we need to use the COPY64 registers instead of mask writes into
the frame buffer to achieve maximum performance. */
static inline void
@@ -1251,7 +1258,7 @@ copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
}
static void
-tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
unsigned long dx, dy, width, height, sx, sy, vxres, vyres;
unsigned long line_length, bpp;
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index cda095420ee8..f9c3b1d38fc2 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -16,6 +16,7 @@
* timing value tweaking so it looks good on every monitor in every mode
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/init.h>
@@ -1470,6 +1471,10 @@ static int trident_pci_probe(struct pci_dev *dev,
int chip_id;
bool found = false;
+ err = aperture_remove_conflicting_pci_devices(dev, "tridentfb");
+ if (err)
+ return err;
+
err = pci_enable_device(dev);
if (err)
return err;
diff --git a/drivers/video/fbdev/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index ff61605b8764..82b36dbb5b1a 100644
--- a/drivers/video/fbdev/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
@@ -14,6 +14,7 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -442,7 +443,11 @@ static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vml_info *vinfo;
struct fb_info *info;
struct vml_par *par;
- int err = 0;
+ int err;
+
+ err = aperture_remove_conflicting_pci_devices(dev, "vmlfb");
+ if (err)
+ return err;
par = kzalloc(sizeof(*par), GFP_KERNEL);
if (par == NULL)
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index d21f68f3ee44..35cf51ae3292 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1,13 +1,13 @@
/*
* linux/drivers/video/vga16.c -- VGA 16-color framebuffer driver
- *
+ *
* Copyright 1999 Ben Pfaff <pfaffben@debian.org> and Petr Vandrovec <VANDROVE@vc.cvut.cz>
* Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm
* Based on VESA framebuffer (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
- * archive for more details.
+ * archive for more details.
*/
#include <linux/module.h>
@@ -25,9 +25,6 @@
#include <asm/io.h>
#include <video/vga.h>
-#define VGA_FB_PHYS 0xA0000
-#define VGA_FB_PHYS_LEN 65536
-
#define MODE_SKIP4 1
#define MODE_8BPP 2
#define MODE_CFB 4
@@ -70,7 +67,7 @@ static struct fb_var_screeninfo vga16fb_defined = {
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
- .bits_per_pixel = 4,
+ .bits_per_pixel = 4,
.activate = FB_ACTIVATE_TEST,
.height = -1,
.width = -1,
@@ -87,8 +84,8 @@ static struct fb_var_screeninfo vga16fb_defined = {
/* name should not depend on EGA/VGA */
static const struct fb_fix_screeninfo vga16fb_fix = {
.id = "VGA16 VGA",
- .smem_start = VGA_FB_PHYS,
- .smem_len = VGA_FB_PHYS_LEN,
+ .smem_start = VGA_FB_PHYS_BASE,
+ .smem_len = VGA_FB_PHYS_SIZE,
.type = FB_TYPE_VGA_PLANES,
.type_aux = FB_AUX_VGA_PLANES_VGA4,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -120,7 +117,7 @@ static inline void rmw(volatile char __iomem *p)
static inline int setmode(int mode)
{
int oldmode;
-
+
oldmode = vga_io_rgfx(VGA_GFX_MODE);
vga_io_w(VGA_GFX_D, mode);
return oldmode;
@@ -139,19 +136,19 @@ static inline void setmask(int mask)
vga_io_w(VGA_GFX_D, mask);
}
-/* Set the Data Rotate Register and return its old value.
+/* Set the Data Rotate Register and return its old value.
Bits 0-2 are rotate count, bits 3-4 are logical operation
(0=NOP, 1=AND, 2=OR, 3=XOR). */
static inline int setop(int op)
{
int oldop;
-
+
oldop = vga_io_rgfx(VGA_GFX_DATA_ROTATE);
vga_io_w(VGA_GFX_D, op);
return oldop;
}
-/* Set the Enable Set/Reset Register and return its old value.
+/* Set the Enable Set/Reset Register and return its old value.
The code here always uses value 0xf for this register. */
static inline int setsr(int sr)
{
@@ -185,25 +182,25 @@ static inline void setindex(int index)
}
/* Check if the video mode is supported by the driver */
-static inline int check_mode_supported(void)
+static inline int check_mode_supported(const struct screen_info *si)
{
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
#if defined(CONFIG_X86)
/* only EGA and VGA in 16 color graphic mode are supported */
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
- screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
+ if (si->orig_video_isVGA != VIDEO_TYPE_EGAC &&
+ si->orig_video_isVGA != VIDEO_TYPE_VGAC)
return -ENODEV;
- if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
- screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
- screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
- screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
+ if (si->orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
+ si->orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
+ si->orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
+ si->orig_video_mode != 0x12) /* 640x480/4 (VGA) */
return -ENODEV;
#endif
return 0;
}
-static void vga16fb_pan_var(struct fb_info *info,
+static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct vga16fb_par *par = info->par;
@@ -296,7 +293,7 @@ static void vga16fb_clock_chip(struct vga16fb_par *par,
par->clkdiv = best->seq_clock_mode;
*pixclock = (best->pixclock * div) / mul;
}
-
+
#define FAIL(X) return -EINVAL
static int vga16fb_open(struct fb_info *info, int user)
@@ -511,7 +508,7 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
par->misc &= ~0x40;
if (var->sync & FB_SYNC_VERT_HIGH_ACT)
par->misc &= ~0x80;
-
+
par->mode = mode;
if (mode & MODE_8BPP)
@@ -520,8 +517,8 @@ static int vga16fb_check_var(struct fb_var_screeninfo *var,
else
/* pixel clock == vga clock */
vga16fb_clock_chip(par, &var->pixclock, info, 1, 1);
-
- var->red.offset = var->green.offset = var->blue.offset =
+
+ var->red.offset = var->green.offset = var->blue.offset =
var->transp.offset = 0;
var->red.length = var->green.length = var->blue.length =
(par->isVGA) ? 6 : 2;
@@ -588,10 +585,10 @@ static int vga16fb_set_par(struct fb_info *info)
else
atc[VGA_ATC_PEL] = info->var.xoffset & 7;
atc[VGA_ATC_COLOR_PAGE] = 0x00;
-
+
if (par->mode & MODE_TEXT) {
- fh = 16; // FIXME !!! Fudge font height.
- par->crtc[VGA_CRTC_MAX_SCAN] = (par->crtc[VGA_CRTC_MAX_SCAN]
+ fh = 16; // FIXME !!! Fudge font height.
+ par->crtc[VGA_CRTC_MAX_SCAN] = (par->crtc[VGA_CRTC_MAX_SCAN]
& ~0x1F) | (fh - 1);
}
@@ -602,10 +599,10 @@ static int vga16fb_set_par(struct fb_info *info)
vga_io_w(EGA_GFX_E0, 0x00);
vga_io_w(EGA_GFX_E1, 0x01);
}
-
+
/* update misc output register */
vga_io_w(VGA_MIS_W, par->misc);
-
+
/* synchronous reset on */
vga_io_wseq(0x00, 0x01);
@@ -617,7 +614,7 @@ static int vga16fb_set_par(struct fb_info *info)
for (i = 2; i < VGA_SEQ_C; i++) {
vga_io_wseq(i, seq[i]);
}
-
+
/* synchronous reset off */
vga_io_wseq(0x00, 0x03);
@@ -628,12 +625,12 @@ static int vga16fb_set_par(struct fb_info *info)
for (i = 0; i < VGA_CRTC_REGS; i++) {
vga_io_wcrt(i, par->crtc[i]);
}
-
+
/* write graphics controller registers */
for (i = 0; i < VGA_GFX_C; i++) {
vga_io_wgfx(i, gdc[i]);
}
-
+
/* write attribute controller registers */
for (i = 0; i < VGA_ATT_C; i++) {
vga_io_r(VGA_IS1_RC); /* reset flip-flop */
@@ -656,7 +653,7 @@ static void ega16_setpalette(int regno, unsigned red, unsigned green, unsigned b
{
static const unsigned char map[] = { 000, 001, 010, 011 };
int val;
-
+
if (regno >= 16)
return;
val = map[red>>14] | ((map[green>>14]) << 1) | ((map[blue>>14]) << 2);
@@ -687,17 +684,17 @@ static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
-
+
if (regno >= 256)
return 1;
gray = info->var.grayscale;
-
+
if (gray) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
}
- if (par->isVGA)
+ if (par->isVGA)
vga16_setpalette(regno,red,green,blue);
else
ega16_setpalette(regno,red,green,blue);
@@ -705,7 +702,7 @@ static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
static int vga16fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
+ struct fb_info *info)
{
vga16fb_pan_var(info, var);
return 0;
@@ -720,7 +717,7 @@ static void vga_vesa_blank(struct vga16fb_par *par, int mode)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
-
+
/* save original values of VGA controller registers */
if(!par->vesa_blanked) {
par->vga_state.CrtMiscIO = vga_io_r(VGA_MIS_R);
@@ -776,7 +773,7 @@ static void vga_vesa_unblank(struct vga16fb_par *par)
{
unsigned char SeqCtrlIndex = vga_io_r(VGA_SEQ_I);
unsigned char CrtCtrlIndex = vga_io_r(VGA_CRT_IC);
-
+
/* restore original values of VGA controller registers */
vga_io_w(VGA_MIS_W, par->vga_state.CrtMiscIO);
@@ -962,7 +959,7 @@ static void vga16fb_fillrect(struct fb_info *info, const struct fb_fillrect *rec
}
break;
}
- } else
+ } else
vga_8planes_fillrect(info, rect);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1029,7 +1026,7 @@ static void vga_8planes_copyarea(struct fb_info *info, const struct fb_copyarea
static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
int x, x2, y2, old_dx, old_dy, vxres, vyres;
int height, width, line_ofs;
char __iomem *dst = NULL;
@@ -1094,9 +1091,9 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
dst += line_ofs;
}
} else {
- dst = info->screen_base + (dx/8) + width +
+ dst = info->screen_base + (dx/8) + width +
(dy + height - 1) * info->fix.line_length;
- src = info->screen_base + (sx/8) + width +
+ src = info->screen_base + (sx/8) + width +
(sy + height - 1) * info->fix.line_length;
while (height--) {
for (x = 0; x < width; x++) {
@@ -1109,7 +1106,7 @@ static void vga16fb_copyarea(struct fb_info *info, const struct fb_copyarea *are
dst -= line_ofs;
}
}
- } else
+ } else
vga_8planes_copyarea(info, area);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1182,7 +1179,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
setsr(0xf);
setcolor(image->fg_color);
selectmask();
-
+
setmask(0xff);
writeb(image->bg_color, where);
rmb();
@@ -1191,7 +1188,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
wmb();
for (y = 0; y < image->height; y++) {
dst = where;
- for (x = image->width/8; x--;)
+ for (x = image->width/8; x--;)
writeb(*cdat++, dst++);
where += info->fix.line_length;
}
@@ -1202,7 +1199,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
setsr(0xf);
setcolor(image->bg_color);
selectmask();
-
+
setmask(0xff);
for (y = 0; y < image->height; y++) {
dst = where;
@@ -1218,7 +1215,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
where += info->fix.line_length;
}
}
- } else
+ } else
vga_8planes_imageblit(info, image);
break;
case FB_TYPE_PACKED_PIXELS:
@@ -1231,7 +1228,7 @@ static void vga_imageblit_expand(struct fb_info *info, const struct fb_image *im
static void vga_imageblit_color(struct fb_info *info, const struct fb_image *image)
{
/*
- * Draw logo
+ * Draw logo
*/
struct vga16fb_par *par = info->par;
char __iomem *where =
@@ -1248,7 +1245,7 @@ static void vga_imageblit_color(struct fb_info *info, const struct fb_image *ima
setsr(0xf);
setop(0);
setmode(0);
-
+
for (y = 0; y < image->height; y++) {
for (x = 0; x < image->width; x++) {
dst = where + x/8;
@@ -1272,7 +1269,7 @@ static void vga_imageblit_color(struct fb_info *info, const struct fb_image *ima
break;
}
}
-
+
static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
if (image->depth == 1)
@@ -1304,28 +1301,22 @@ static const struct fb_ops vga16fb_ops = {
.fb_imageblit = vga16fb_imageblit,
};
-#ifndef MODULE
-static int __init vga16fb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt) continue;
- }
- return 0;
-}
-#endif
-
static int vga16fb_probe(struct platform_device *dev)
{
+ struct screen_info *si;
struct fb_info *info;
struct vga16fb_par *par;
int i;
int ret = 0;
+ si = dev_get_platdata(&dev->dev);
+ if (!si)
+ return -ENODEV;
+
+ ret = check_mode_supported(si);
+ if (ret)
+ return ret;
+
printk(KERN_DEBUG "vga16fb: initializing\n");
info = framebuffer_alloc(sizeof(struct vga16fb_par), &dev->dev);
@@ -1339,8 +1330,8 @@ static int vga16fb_probe(struct platform_device *dev)
goto err_ioremap;
}
- /* XXX share VGA_FB_PHYS and I/O region with vgacon and others */
- info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS, 0);
+ /* XXX share VGA_FB_PHYS_BASE and I/O region with vgacon and others */
+ info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS_BASE, 0);
if (!info->screen_base) {
printk(KERN_ERR "vga16fb: unable to map device\n");
@@ -1352,19 +1343,19 @@ static int vga16fb_probe(struct platform_device *dev)
par = info->par;
#if defined(CONFIG_X86)
- par->isVGA = screen_info.orig_video_isVGA == VIDEO_TYPE_VGAC;
+ par->isVGA = si->orig_video_isVGA == VIDEO_TYPE_VGAC;
#else
/* non-x86 architectures treat orig_video_isVGA as a boolean flag */
- par->isVGA = screen_info.orig_video_isVGA;
+ par->isVGA = si->orig_video_isVGA;
#endif
par->palette_blanked = 0;
par->vesa_blanked = 0;
i = par->isVGA? 6 : 2;
-
+
vga16fb_defined.red.length = i;
vga16fb_defined.green.length = i;
- vga16fb_defined.blue.length = i;
+ vga16fb_defined.blue.length = i;
/* name should not depend on EGA/VGA */
info->fbops = &vga16fb_ops;
@@ -1391,8 +1382,8 @@ static int vga16fb_probe(struct platform_device *dev)
vga16fb_update_fix(info);
- info->apertures->ranges[0].base = VGA_FB_PHYS;
- info->apertures->ranges[0].size = VGA_FB_PHYS_LEN;
+ info->apertures->ranges[0].base = VGA_FB_PHYS_BASE;
+ info->apertures->ranges[0].size = VGA_FB_PHYS_SIZE;
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
@@ -1425,58 +1416,22 @@ static int vga16fb_remove(struct platform_device *dev)
return 0;
}
+static const struct platform_device_id vga16fb_driver_id_table[] = {
+ {"ega-framebuffer", 0},
+ {"vga-framebuffer", 0},
+ { }
+};
+
static struct platform_driver vga16fb_driver = {
.probe = vga16fb_probe,
.remove = vga16fb_remove,
.driver = {
.name = "vga16fb",
},
+ .id_table = vga16fb_driver_id_table,
};
-static struct platform_device *vga16fb_device;
-
-static int __init vga16fb_init(void)
-{
- int ret;
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("vga16fb", &option))
- return -ENODEV;
-
- vga16fb_setup(option);
-#endif
-
- ret = check_mode_supported();
- if (ret)
- return ret;
-
- ret = platform_driver_register(&vga16fb_driver);
-
- if (!ret) {
- vga16fb_device = platform_device_alloc("vga16fb", 0);
-
- if (vga16fb_device)
- ret = platform_device_add(vga16fb_device);
- else
- ret = -ENOMEM;
-
- if (ret) {
- platform_device_put(vga16fb_device);
- platform_driver_unregister(&vga16fb_driver);
- }
- }
-
- return ret;
-}
-
-static void __exit vga16fb_exit(void)
-{
- platform_device_unregister(vga16fb_device);
- platform_driver_unregister(&vga16fb_driver);
-}
+module_platform_driver(vga16fb_driver);
MODULE_DESCRIPTION("Legacy VGA framebuffer device driver");
MODULE_LICENSE("GPL");
-module_init(vga16fb_init);
-module_exit(vga16fb_exit);
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 89d75079b730..2ee8fcae08df 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -8,6 +8,7 @@
/*
* Core code for the Via multifunction framebuffer device.
*/
+#include <linux/aperture.h>
#include <linux/via-core.h>
#include <linux/via_i2c.h>
#include <linux/via-gpio.h>
@@ -617,6 +618,10 @@ static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
+ ret = aperture_remove_conflicting_pci_devices(pdev, "viafb");
+ if (ret)
+ return ret;
+
ret = pci_enable_device(pdev);
if (ret)
return ret;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 4274c6efb249..49b9f148d3a1 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -12,6 +12,7 @@
* (http://davesdomain.org.uk/viafb/)
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -672,6 +673,10 @@ static int vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
+ rc = aperture_remove_conflicting_pci_devices(dev, "vt8623fb");
+ if (rc)
+ return rc;
+
/* Allocate and fill driver data structure */
info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
if (!info)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 947be761dfa4..03c7f27dde49 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/display/drm_dp.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/errno.h>
@@ -381,12 +382,34 @@ static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *fr
*
* Returns 0 on success or a negative error code on failure.
*/
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame)
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame)
{
return hdmi_audio_infoframe_check_only(frame);
}
EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+static void
+hdmi_audio_infoframe_pack_payload(const struct hdmi_audio_infoframe *frame,
+ u8 *buffer)
+{
+ u8 channels;
+
+ if (frame->channels >= 2)
+ channels = frame->channels - 1;
+ else
+ channels = 0;
+
+ buffer[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
+ buffer[1] = ((frame->sample_frequency & 0x7) << 2) |
+ (frame->sample_size & 0x3);
+ buffer[2] = frame->coding_type_ext & 0x1f;
+ buffer[3] = frame->channel_allocation;
+ buffer[4] = (frame->level_shift_value & 0xf) << 3;
+
+ if (frame->downmix_inhibit)
+ buffer[4] |= BIT(7);
+}
+
/**
* hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
@@ -404,7 +427,6 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_check);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size)
{
- unsigned char channels;
u8 *ptr = buffer;
size_t length;
int ret;
@@ -420,28 +442,13 @@ ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
memset(buffer, 0, size);
- if (frame->channels >= 2)
- channels = frame->channels - 1;
- else
- channels = 0;
-
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
- /* start infoframe payload */
- ptr += HDMI_INFOFRAME_HEADER_SIZE;
-
- ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
- ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
- (frame->sample_size & 0x3);
- ptr[2] = frame->coding_type_ext & 0x1f;
- ptr[3] = frame->channel_allocation;
- ptr[4] = (frame->level_shift_value & 0xf) << 3;
-
- if (frame->downmix_inhibit)
- ptr[4] |= BIT(7);
+ hdmi_audio_infoframe_pack_payload(frame,
+ ptr + HDMI_INFOFRAME_HEADER_SIZE);
hdmi_infoframe_set_checksum(buffer, length);
@@ -480,6 +487,43 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
+ * hdmi_audio_infoframe_pack_for_dp - Pack a HDMI Audio infoframe for DisplayPort
+ *
+ * @frame: HDMI Audio infoframe
+ * @sdp: Secondary data packet for DisplayPort.
+ * @dp_version: DisplayPort version to be encoded in the header
+ *
+ * Packs a HDMI Audio Infoframe to be sent over DisplayPort. This function
+ * fills the secondary data packet to be used for DisplayPort.
+ *
+ * Return: Number of total written bytes or a negative errno on failure.
+ */
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ memset(sdp->db, 0, sizeof(sdp->db));
+
+ /* Secondary-data packet header */
+ sdp->sdp_header.HB0 = 0;
+ sdp->sdp_header.HB1 = frame->type;
+ sdp->sdp_header.HB2 = DP_SDP_AUDIO_INFOFRAME_HB2;
+ sdp->sdp_header.HB3 = (dp_version & 0x3f) << 2;
+
+ hdmi_audio_infoframe_pack_payload(frame, sdp->db);
+
+ /* Return size = frame length + four HB for sdp_header */
+ return frame->length + 4;
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_for_dp);
+
+/**
* hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
*
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index ce91add81401..dc4d25c26256 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -17,7 +17,7 @@ config NITRO_ENCLAVES
config NITRO_ENCLAVES_MISC_DEV_TEST
bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
- depends on NITRO_ENCLAVES && KUNIT
+ depends on NITRO_ENCLAVES && KUNIT=y
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the misc device functionality of the Nitro
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 0b43efddea22..dfd69bd77f53 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -198,7 +198,7 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
req2->additions_revision = VBG_SVN_REV;
req2->additions_features =
VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
- strlcpy(req2->name, VBG_VERSION_STRING,
+ strscpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
/*
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 4ccfd30c2a30..c47e62dc55da 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -270,6 +270,13 @@ static ssize_t host_features_show(struct device *dev,
static DEVICE_ATTR_RO(host_version);
static DEVICE_ATTR_RO(host_features);
+static struct attribute *vbg_pci_attrs[] = {
+ &dev_attr_host_version.attr,
+ &dev_attr_host_features.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vbg_pci);
+
/**
* Does the PCI detection and init of the device.
*
@@ -390,12 +397,6 @@ static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
}
pci_set_drvdata(pci, gdev);
- device_create_file(dev, &dev_attr_host_version);
- device_create_file(dev, &dev_attr_host_features);
-
- vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n",
- gdev->misc_device.minor, pci->irq, gdev->io_port,
- &mmio, &mmio_len);
return 0;
@@ -422,8 +423,6 @@ static void vbg_pci_remove(struct pci_dev *pci)
mutex_unlock(&vbg_gdev_mutex);
free_irq(pci->irq, gdev);
- device_remove_file(gdev->dev, &dev_attr_host_features);
- device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
misc_deregister(&gdev->misc_device);
vbg_core_exit(gdev);
@@ -488,6 +487,7 @@ MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
static struct pci_driver vbg_pci_driver = {
.name = DEVICE_NAME,
+ .dev_groups = vbg_pci_groups,
.id_table = vbg_pci_ids,
.probe = vbg_pci_probe,
.remove = vbg_pci_remove,
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index ad258a9d3b9f..4df77eeb4d16 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -409,6 +409,9 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
if (!err)
return 0;
+ /* Is there an interrupt pin? If not give up. */
+ if (!(to_vp_device(vdev)->pci_dev->pin))
+ return err;
/* Finally fall back to regular interrupts. */
return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 8974c34b40fd..2e7689bb933b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1074,7 +1074,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
if (!queue) {
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr, GFP_KERNEL|__GFP_ZERO);
+ &dma_addr, GFP_KERNEL | __GFP_ZERO);
}
if (!queue)
return -ENOMEM;
@@ -1875,7 +1875,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!ring)
goto err;
@@ -1887,7 +1887,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!driver)
goto err;
@@ -1897,7 +1897,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!device)
goto err;
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6c962e88501c..62c44616d8a9 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -525,7 +525,7 @@ exit:
return err;
}
-static int ds2482_remove(struct i2c_client *client)
+static void ds2482_remove(struct i2c_client *client)
{
struct ds2482_data *data = i2c_get_clientdata(client);
int idx;
@@ -538,7 +538,6 @@ static int ds2482_remove(struct i2c_client *client)
/* Free the memory */
kfree(data);
- return 0;
}
/*
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index fa490aa4407c..db110cc442b1 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -611,7 +611,8 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
}
atomic_set(&block->refcnt, 1);
block->portid = nsp->portid;
- memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len);
+ block->request_cn = *cn;
+ memcpy(block->request_cn.data, cn->data, cn->len);
node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
/* Sneeky, when not bundling, reply_size is the allocated space
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9295492d24f7..76c3500b21c7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1799,7 +1799,7 @@ config BCM7038_WDT
tristate "BCM63xx/BCM7038 Watchdog"
select WATCHDOG_CORE
depends on HAS_IOMEM
- depends on ARCH_BCM4908 || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
help
Watchdog driver for the built-in hardware in Broadcom 7038 and
later SoCs used in set-top boxes. BCM7038 was made public
@@ -1935,10 +1935,10 @@ config BOOKE_WDT
config BOOKE_WDT_DEFAULT_TIMEOUT
int "PowerPC Book-E Watchdog Timer Default Timeout"
depends on BOOKE_WDT
- default 38 if PPC_FSL_BOOK3E
- range 0 63 if PPC_FSL_BOOK3E
- default 3 if !PPC_FSL_BOOK3E
- range 0 3 if !PPC_FSL_BOOK3E
+ default 38 if PPC_E500
+ range 0 63 if PPC_E500
+ default 3 if !PPC_E500
+ range 0 3 if !PPC_E500
help
Select the default watchdog timer period to be used by the PowerPC
Book-E watchdog driver. A watchdog "event" occurs when the bit
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 75da5cd02615..932a03f4436a 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -27,7 +27,7 @@
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
@@ -45,7 +45,7 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
@@ -88,7 +88,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT period_to_sec(1)
-#else /* CONFIG_PPC_FSL_BOOK3E */
+#else /* CONFIG_PPC_E500 */
static unsigned long long period_to_sec(unsigned int period)
{
@@ -102,7 +102,7 @@ static unsigned int sec_to_period(unsigned int secs)
#define MAX_WDT_TIMEOUT 3 /* from Kconfig */
-#endif /* !CONFIG_PPC_FSL_BOOK3E */
+#endif /* !CONFIG_PPC_E500 */
static void __booke_wdt_set(void *data)
{
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index c5a9b820d43a..d0e88875443a 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -708,13 +708,11 @@ static int ziirave_wdt_probe(struct i2c_client *client,
return ret;
}
-static int ziirave_wdt_remove(struct i2c_client *client)
+static void ziirave_wdt_remove(struct i2c_client *client)
{
struct ziirave_wdt_data *w_priv = i2c_get_clientdata(client);
watchdog_unregister_device(&w_priv->wdd);
-
- return 0;
}
static const struct i2c_device_id ziirave_wdt_id[] = {
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 738029de3c67..e1ec725c2819 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1047,6 +1047,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
size_t size;
int i, ret;
+ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
+ return -ENOMEM;
+
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index d3dcda344989..6106ed93817d 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -333,18 +333,6 @@ static int32_t scsiback_result(int32_t result)
case DID_TRANSPORT_FAILFAST:
host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST;
break;
- case DID_TARGET_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE;
- break;
- case DID_NEXUS_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE;
- break;
- case DID_ALLOC_FAILURE:
- host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE;
- break;
- case DID_MEDIUM_ERROR:
- host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR;
- break;
case DID_TRANSPORT_MARGINAL:
host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL;
break;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index d5f3f763717e..d4b251925796 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -382,9 +382,10 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long ring_size = nr_pages * XEN_PAGE_SIZE;
grant_ref_t gref_head;
unsigned int i;
+ void *addr;
int ret;
- *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
+ addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO);
if (!*vaddr) {
ret = -ENOMEM;
goto err;
@@ -401,13 +402,15 @@ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr,
unsigned long gfn;
if (is_vmalloc_addr(*vaddr))
- gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i]));
+ gfn = pfn_to_gfn(vmalloc_to_pfn(addr));
else
- gfn = virt_to_gfn(vaddr[i]);
+ gfn = virt_to_gfn(addr);
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
+
+ addr += XEN_PAGE_SIZE;
}
return 0;